| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 192, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.078125, | |
| "grad_norm": 0.7274424433708191, | |
| "learning_rate": 7.5e-06, | |
| "loss": 1.867, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.15625, | |
| "grad_norm": 0.6130610704421997, | |
| "learning_rate": 1.6875e-05, | |
| "loss": 1.8588, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.234375, | |
| "grad_norm": 0.5408557057380676, | |
| "learning_rate": 2.625e-05, | |
| "loss": 1.8381, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.3125, | |
| "grad_norm": 0.48840343952178955, | |
| "learning_rate": 2.999279188735074e-05, | |
| "loss": 1.7762, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.390625, | |
| "grad_norm": 0.5034699440002441, | |
| "learning_rate": 2.994876739510005e-05, | |
| "loss": 1.6663, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.46875, | |
| "grad_norm": 0.4756667912006378, | |
| "learning_rate": 2.9864840289257616e-05, | |
| "loss": 1.6195, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.546875, | |
| "grad_norm": 0.49448809027671814, | |
| "learning_rate": 2.9741234595710393e-05, | |
| "loss": 1.6162, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.625, | |
| "grad_norm": 0.5317527651786804, | |
| "learning_rate": 2.9578280254051174e-05, | |
| "loss": 1.5056, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.703125, | |
| "grad_norm": 0.5216270685195923, | |
| "learning_rate": 2.9376412236873792e-05, | |
| "loss": 1.476, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.78125, | |
| "grad_norm": 0.6141660809516907, | |
| "learning_rate": 2.9136169388704557e-05, | |
| "loss": 1.4277, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.859375, | |
| "grad_norm": 0.6590550541877747, | |
| "learning_rate": 2.8858192987669303e-05, | |
| "loss": 1.3739, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.9375, | |
| "grad_norm": 0.6383962631225586, | |
| "learning_rate": 2.8543225033735316e-05, | |
| "loss": 1.347, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.015625, | |
| "grad_norm": 0.9010741114616394, | |
| "learning_rate": 2.8192106268097336e-05, | |
| "loss": 1.2718, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 1.09375, | |
| "grad_norm": 1.0943779945373535, | |
| "learning_rate": 2.780577392899446e-05, | |
| "loss": 1.1758, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.171875, | |
| "grad_norm": 1.015796422958374, | |
| "learning_rate": 2.7385259249948338e-05, | |
| "loss": 1.1385, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "grad_norm": 0.8987584710121155, | |
| "learning_rate": 2.693168470710059e-05, | |
| "loss": 1.1287, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.328125, | |
| "grad_norm": 1.0710234642028809, | |
| "learning_rate": 2.6446261022997098e-05, | |
| "loss": 1.0745, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.40625, | |
| "grad_norm": 1.4526638984680176, | |
| "learning_rate": 2.593028393481692e-05, | |
| "loss": 1.0067, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.484375, | |
| "grad_norm": 1.084017038345337, | |
| "learning_rate": 2.5385130735672442e-05, | |
| "loss": 1.0116, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.5625, | |
| "grad_norm": 1.2450714111328125, | |
| "learning_rate": 2.4812256598212946e-05, | |
| "loss": 0.9489, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.640625, | |
| "grad_norm": 1.2581003904342651, | |
| "learning_rate": 2.4213190690345018e-05, | |
| "loss": 0.9191, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.71875, | |
| "grad_norm": 1.2668284177780151, | |
| "learning_rate": 2.3589532093438104e-05, | |
| "loss": 0.8674, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.796875, | |
| "grad_norm": 1.346251368522644, | |
| "learning_rate": 2.2942945533910633e-05, | |
| "loss": 0.8263, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 1.875, | |
| "grad_norm": 1.4039610624313354, | |
| "learning_rate": 2.2275156939590395e-05, | |
| "loss": 0.8105, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.953125, | |
| "grad_norm": 1.3616975545883179, | |
| "learning_rate": 2.1587948832710557e-05, | |
| "loss": 0.8055, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 2.03125, | |
| "grad_norm": 1.530532956123352, | |
| "learning_rate": 2.0883155571838692e-05, | |
| "loss": 0.7202, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.109375, | |
| "grad_norm": 1.4595762491226196, | |
| "learning_rate": 2.016265845543958e-05, | |
| "loss": 0.661, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 2.1875, | |
| "grad_norm": 1.6068073511123657, | |
| "learning_rate": 1.94283807001417e-05, | |
| "loss": 0.6393, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.265625, | |
| "grad_norm": 1.4308488368988037, | |
| "learning_rate": 1.8682282307111988e-05, | |
| "loss": 0.6115, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 2.34375, | |
| "grad_norm": 1.6304932832717896, | |
| "learning_rate": 1.7926354830241928e-05, | |
| "loss": 0.6226, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.421875, | |
| "grad_norm": 1.6512377262115479, | |
| "learning_rate": 1.7162616060110202e-05, | |
| "loss": 0.583, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": 1.6636899709701538, | |
| "learning_rate": 1.639310463791205e-05, | |
| "loss": 0.5627, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.578125, | |
| "grad_norm": 1.67765474319458, | |
| "learning_rate": 1.5619874613732198e-05, | |
| "loss": 0.6028, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 2.65625, | |
| "grad_norm": 1.6269663572311401, | |
| "learning_rate": 1.4844989963686993e-05, | |
| "loss": 0.5336, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 2.734375, | |
| "grad_norm": 1.7632614374160767, | |
| "learning_rate": 1.4070519080571082e-05, | |
| "loss": 0.5299, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 2.8125, | |
| "grad_norm": 1.7652994394302368, | |
| "learning_rate": 1.3298529252714685e-05, | |
| "loss": 0.5322, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.890625, | |
| "grad_norm": 2.1501216888427734, | |
| "learning_rate": 1.2531081145788989e-05, | |
| "loss": 0.5275, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 2.96875, | |
| "grad_norm": 2.2576866149902344, | |
| "learning_rate": 1.1770223302289385e-05, | |
| "loss": 0.5388, | |
| "step": 190 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 320, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 2000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4.2719784862325146e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |