| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 1000, | |
| "global_step": 15175, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.00019774569903104609, | |
| "grad_norm": 8.964256286621094, | |
| "learning_rate": 0.0, | |
| "loss": 3.9486, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.19774569903104608, | |
| "grad_norm": 6.0791192054748535, | |
| "learning_rate": 3.2921403855659913e-06, | |
| "loss": 3.172, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.19774569903104608, | |
| "eval_cosine_accuracy": 0.9392154812812805, | |
| "eval_loss": 0.5803369879722595, | |
| "eval_runtime": 34.8035, | |
| "eval_samples_per_second": 273.219, | |
| "eval_steps_per_second": 2.155, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.39549139806209216, | |
| "grad_norm": 6.649482250213623, | |
| "learning_rate": 6.58757620695337e-06, | |
| "loss": 2.6021, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.39549139806209216, | |
| "eval_cosine_accuracy": 0.9489957094192505, | |
| "eval_loss": 0.5286197662353516, | |
| "eval_runtime": 33.7422, | |
| "eval_samples_per_second": 281.813, | |
| "eval_steps_per_second": 2.223, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.5932370970931382, | |
| "grad_norm": 8.011848449707031, | |
| "learning_rate": 9.88301202834075e-06, | |
| "loss": 2.1529, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.5932370970931382, | |
| "eval_cosine_accuracy": 0.9544641971588135, | |
| "eval_loss": 0.49919888377189636, | |
| "eval_runtime": 33.725, | |
| "eval_samples_per_second": 281.957, | |
| "eval_steps_per_second": 2.224, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.7909827961241843, | |
| "grad_norm": 8.981022834777832, | |
| "learning_rate": 1.317515241390674e-05, | |
| "loss": 1.3847, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.7909827961241843, | |
| "eval_cosine_accuracy": 0.9546745419502258, | |
| "eval_loss": 0.47944125533103943, | |
| "eval_runtime": 33.7995, | |
| "eval_samples_per_second": 281.335, | |
| "eval_steps_per_second": 2.219, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.9887284951552304, | |
| "grad_norm": 12.642546653747559, | |
| "learning_rate": 1.647058823529412e-05, | |
| "loss": 0.9942, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.9887284951552304, | |
| "eval_cosine_accuracy": 0.9547796845436096, | |
| "eval_loss": 0.4432453513145447, | |
| "eval_runtime": 33.82, | |
| "eval_samples_per_second": 281.165, | |
| "eval_steps_per_second": 2.218, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.1864004744020558, | |
| "grad_norm": 6.489721298217773, | |
| "learning_rate": 1.976272862086011e-05, | |
| "loss": 1.4574, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.1864004744020558, | |
| "eval_cosine_accuracy": 0.9597223401069641, | |
| "eval_loss": 0.43778446316719055, | |
| "eval_runtime": 35.2925, | |
| "eval_samples_per_second": 269.434, | |
| "eval_steps_per_second": 2.125, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.3840679976279897, | |
| "grad_norm": 11.65429973602295, | |
| "learning_rate": 1.923536439665472e-05, | |
| "loss": 1.3286, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.3840679976279897, | |
| "eval_cosine_accuracy": 0.9628772735595703, | |
| "eval_loss": 0.4299427270889282, | |
| "eval_runtime": 33.6219, | |
| "eval_samples_per_second": 282.822, | |
| "eval_steps_per_second": 2.231, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.5817355208539237, | |
| "grad_norm": 6.698935508728027, | |
| "learning_rate": 1.8411403617187826e-05, | |
| "loss": 1.2024, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.5817355208539237, | |
| "eval_cosine_accuracy": 0.9645599126815796, | |
| "eval_loss": 0.41794154047966003, | |
| "eval_runtime": 33.7437, | |
| "eval_samples_per_second": 281.801, | |
| "eval_steps_per_second": 2.223, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.7794030440798578, | |
| "grad_norm": 6.8305864334106445, | |
| "learning_rate": 1.758909075927986e-05, | |
| "loss": 1.1554, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.7794030440798578, | |
| "eval_cosine_accuracy": 0.9647701978683472, | |
| "eval_loss": 0.4171123504638672, | |
| "eval_runtime": 33.5248, | |
| "eval_samples_per_second": 283.64, | |
| "eval_steps_per_second": 2.237, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.9770705673057916, | |
| "grad_norm": 6.222408771514893, | |
| "learning_rate": 1.676512997981296e-05, | |
| "loss": 1.0769, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.9770705673057916, | |
| "eval_cosine_accuracy": 0.9635082483291626, | |
| "eval_loss": 0.41735684871673584, | |
| "eval_runtime": 33.8215, | |
| "eval_samples_per_second": 281.152, | |
| "eval_steps_per_second": 2.218, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 2.1747380905317257, | |
| "grad_norm": 6.215972423553467, | |
| "learning_rate": 1.5941169200346065e-05, | |
| "loss": 0.9984, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 2.1747380905317257, | |
| "eval_cosine_accuracy": 0.967714786529541, | |
| "eval_loss": 0.4162609279155731, | |
| "eval_runtime": 34.4291, | |
| "eval_samples_per_second": 276.191, | |
| "eval_steps_per_second": 2.178, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 2.3724056137576595, | |
| "grad_norm": 6.266003608703613, | |
| "learning_rate": 1.5117208420879169e-05, | |
| "loss": 0.9714, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 2.3724056137576595, | |
| "eval_cosine_accuracy": 0.9676096439361572, | |
| "eval_loss": 0.4025992751121521, | |
| "eval_runtime": 33.6112, | |
| "eval_samples_per_second": 282.912, | |
| "eval_steps_per_second": 2.231, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 2.5700731369835936, | |
| "grad_norm": 6.162975788116455, | |
| "learning_rate": 1.4294071602191736e-05, | |
| "loss": 0.9208, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 2.5700731369835936, | |
| "eval_cosine_accuracy": 0.9673992991447449, | |
| "eval_loss": 0.4087478220462799, | |
| "eval_runtime": 34.1669, | |
| "eval_samples_per_second": 278.31, | |
| "eval_steps_per_second": 2.195, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 2.767740660209528, | |
| "grad_norm": 5.935854911804199, | |
| "learning_rate": 1.347011082272484e-05, | |
| "loss": 0.9027, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 2.767740660209528, | |
| "eval_cosine_accuracy": 0.9681354761123657, | |
| "eval_loss": 0.39745473861694336, | |
| "eval_runtime": 33.8947, | |
| "eval_samples_per_second": 280.545, | |
| "eval_steps_per_second": 2.213, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 2.9654081834354615, | |
| "grad_norm": 5.407319068908691, | |
| "learning_rate": 1.2646150043257941e-05, | |
| "loss": 0.8854, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 2.9654081834354615, | |
| "eval_cosine_accuracy": 0.9680302739143372, | |
| "eval_loss": 0.40181729197502136, | |
| "eval_runtime": 33.9454, | |
| "eval_samples_per_second": 280.126, | |
| "eval_steps_per_second": 2.209, | |
| "step": 15000 | |
| } | |
| ], | |
| "logging_steps": 1000, | |
| "max_steps": 30342, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 6, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 128, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |