| { | |
| "best_global_step": 150, | |
| "best_metric": 0.3620273470878601, | |
| "best_model_checkpoint": "/content/output/checkpoint-150", | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 150, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.13333333333333333, | |
| "grad_norm": 1.7940553426742554, | |
| "learning_rate": 5.917159763313609e-06, | |
| "loss": 1.3972, | |
| "mean_token_accuracy": 0.7618847399950027, | |
| "num_tokens": 31152.0, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.26666666666666666, | |
| "grad_norm": 1.0511088371276855, | |
| "learning_rate": 1.1834319526627219e-05, | |
| "loss": 1.2288, | |
| "mean_token_accuracy": 0.7673537939786911, | |
| "num_tokens": 61773.0, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 0.8096975088119507, | |
| "learning_rate": 1.7751479289940828e-05, | |
| "loss": 0.9255, | |
| "mean_token_accuracy": 0.7948055118322372, | |
| "num_tokens": 94758.0, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.5333333333333333, | |
| "grad_norm": 0.6213255524635315, | |
| "learning_rate": 2.3668639053254438e-05, | |
| "loss": 0.7366, | |
| "mean_token_accuracy": 0.8212289035320282, | |
| "num_tokens": 125915.0, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 0.42887863516807556, | |
| "learning_rate": 2.958579881656805e-05, | |
| "loss": 0.5556, | |
| "mean_token_accuracy": 0.8463607966899872, | |
| "num_tokens": 159364.0, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.5108550786972046, | |
| "learning_rate": 3.5502958579881656e-05, | |
| "loss": 0.4676, | |
| "mean_token_accuracy": 0.8614094287157059, | |
| "num_tokens": 190374.0, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.9333333333333333, | |
| "grad_norm": 0.4310983121395111, | |
| "learning_rate": 4.142011834319527e-05, | |
| "loss": 0.4214, | |
| "mean_token_accuracy": 0.8710317760705948, | |
| "num_tokens": 222156.0, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.4043419659137726, | |
| "eval_mean_token_accuracy": 0.8734077858924866, | |
| "eval_num_tokens": 238011.0, | |
| "eval_runtime": 50.8925, | |
| "eval_samples_per_second": 1.965, | |
| "eval_steps_per_second": 0.491, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 1.0666666666666667, | |
| "grad_norm": 0.35767048597335815, | |
| "learning_rate": 4.7337278106508875e-05, | |
| "loss": 0.3901, | |
| "mean_token_accuracy": 0.8787548273801804, | |
| "num_tokens": 255825.0, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "grad_norm": 0.3684692680835724, | |
| "learning_rate": 5.3254437869822495e-05, | |
| "loss": 0.3956, | |
| "mean_token_accuracy": 0.8787799149751663, | |
| "num_tokens": 287370.0, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.3333333333333333, | |
| "grad_norm": 0.35148611664772034, | |
| "learning_rate": 5.91715976331361e-05, | |
| "loss": 0.3822, | |
| "mean_token_accuracy": 0.8814697057008744, | |
| "num_tokens": 315883.0, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.4666666666666668, | |
| "grad_norm": 0.39074671268463135, | |
| "learning_rate": 6.50887573964497e-05, | |
| "loss": 0.3703, | |
| "mean_token_accuracy": 0.8836676925420761, | |
| "num_tokens": 347324.0, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "grad_norm": 0.38130292296409607, | |
| "learning_rate": 7.100591715976331e-05, | |
| "loss": 0.3762, | |
| "mean_token_accuracy": 0.8851484537124634, | |
| "num_tokens": 379374.0, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.7333333333333334, | |
| "grad_norm": 0.4091486632823944, | |
| "learning_rate": 7.692307692307693e-05, | |
| "loss": 0.3446, | |
| "mean_token_accuracy": 0.8894414573907852, | |
| "num_tokens": 411843.0, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.8666666666666667, | |
| "grad_norm": 0.361017644405365, | |
| "learning_rate": 8.284023668639054e-05, | |
| "loss": 0.3494, | |
| "mean_token_accuracy": 0.8915452778339386, | |
| "num_tokens": 444017.0, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.3487047851085663, | |
| "learning_rate": 8.875739644970414e-05, | |
| "loss": 0.3674, | |
| "mean_token_accuracy": 0.8846240967512131, | |
| "num_tokens": 476022.0, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.3620273470878601, | |
| "eval_mean_token_accuracy": 0.8875497984886169, | |
| "eval_num_tokens": 476022.0, | |
| "eval_runtime": 50.8961, | |
| "eval_samples_per_second": 1.965, | |
| "eval_steps_per_second": 0.491, | |
| "step": 150 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1125, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 15, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.842489979664384e+16, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |