| { | |
| "best_metric": 0.10648240149021149, | |
| "best_model_checkpoint": "./beans_outputs/checkpoint-260", | |
| "epoch": 5.0, | |
| "eval_steps": 500, | |
| "global_step": 325, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.15384615384615385, | |
| "grad_norm": 1.5408769845962524, | |
| "learning_rate": 1.9384615384615386e-05, | |
| "loss": 1.0251, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.3076923076923077, | |
| "grad_norm": 1.687791347503662, | |
| "learning_rate": 1.876923076923077e-05, | |
| "loss": 0.8722, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.46153846153846156, | |
| "grad_norm": 1.5239825248718262, | |
| "learning_rate": 1.8153846153846155e-05, | |
| "loss": 0.779, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.6153846153846154, | |
| "grad_norm": 1.423100471496582, | |
| "learning_rate": 1.753846153846154e-05, | |
| "loss": 0.6294, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.7692307692307693, | |
| "grad_norm": 2.351439952850342, | |
| "learning_rate": 1.6923076923076924e-05, | |
| "loss": 0.5649, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.9230769230769231, | |
| "grad_norm": 1.5229129791259766, | |
| "learning_rate": 1.630769230769231e-05, | |
| "loss": 0.4659, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.9624060150375939, | |
| "eval_loss": 0.3747750222682953, | |
| "eval_runtime": 0.9731, | |
| "eval_samples_per_second": 136.679, | |
| "eval_steps_per_second": 9.249, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 1.0769230769230769, | |
| "grad_norm": 1.3030115365982056, | |
| "learning_rate": 1.5692307692307693e-05, | |
| "loss": 0.3978, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.2307692307692308, | |
| "grad_norm": 1.1350892782211304, | |
| "learning_rate": 1.5076923076923078e-05, | |
| "loss": 0.3449, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.3846153846153846, | |
| "grad_norm": 1.3598690032958984, | |
| "learning_rate": 1.4461538461538462e-05, | |
| "loss": 0.3157, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.5384615384615383, | |
| "grad_norm": 0.7663145661354065, | |
| "learning_rate": 1.3846153846153847e-05, | |
| "loss": 0.2961, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.6923076923076923, | |
| "grad_norm": 4.763614177703857, | |
| "learning_rate": 1.3230769230769231e-05, | |
| "loss": 0.2683, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.8461538461538463, | |
| "grad_norm": 4.330443382263184, | |
| "learning_rate": 1.2615384615384616e-05, | |
| "loss": 0.2818, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.8338944315910339, | |
| "learning_rate": 1.2e-05, | |
| "loss": 0.2039, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.9774436090225563, | |
| "eval_loss": 0.18510694801807404, | |
| "eval_runtime": 0.9351, | |
| "eval_samples_per_second": 142.238, | |
| "eval_steps_per_second": 9.625, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.1538461538461537, | |
| "grad_norm": 1.1333366632461548, | |
| "learning_rate": 1.1384615384615385e-05, | |
| "loss": 0.1953, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.3076923076923075, | |
| "grad_norm": 2.3248729705810547, | |
| "learning_rate": 1.076923076923077e-05, | |
| "loss": 0.188, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.4615384615384617, | |
| "grad_norm": 2.4981637001037598, | |
| "learning_rate": 1.0153846153846154e-05, | |
| "loss": 0.2092, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.6153846153846154, | |
| "grad_norm": 1.3927757740020752, | |
| "learning_rate": 9.53846153846154e-06, | |
| "loss": 0.188, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 2.769230769230769, | |
| "grad_norm": 2.8940329551696777, | |
| "learning_rate": 8.923076923076925e-06, | |
| "loss": 0.1883, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.9230769230769234, | |
| "grad_norm": 1.4261060953140259, | |
| "learning_rate": 8.307692307692309e-06, | |
| "loss": 0.1747, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.9774436090225563, | |
| "eval_loss": 0.13086195290088654, | |
| "eval_runtime": 2.0456, | |
| "eval_samples_per_second": 65.018, | |
| "eval_steps_per_second": 4.4, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 3.076923076923077, | |
| "grad_norm": 10.818984031677246, | |
| "learning_rate": 7.692307692307694e-06, | |
| "loss": 0.192, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 3.230769230769231, | |
| "grad_norm": 3.111773729324341, | |
| "learning_rate": 7.076923076923078e-06, | |
| "loss": 0.1913, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 3.3846153846153846, | |
| "grad_norm": 3.3806750774383545, | |
| "learning_rate": 6.461538461538463e-06, | |
| "loss": 0.1438, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 3.5384615384615383, | |
| "grad_norm": 2.405831813812256, | |
| "learning_rate": 5.846153846153847e-06, | |
| "loss": 0.1329, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 3.6923076923076925, | |
| "grad_norm": 1.5929791927337646, | |
| "learning_rate": 5.230769230769232e-06, | |
| "loss": 0.1325, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 3.8461538461538463, | |
| "grad_norm": 0.5674996972084045, | |
| "learning_rate": 4.615384615384616e-06, | |
| "loss": 0.1503, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.8114491105079651, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 0.1496, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.9849624060150376, | |
| "eval_loss": 0.10648240149021149, | |
| "eval_runtime": 1.9512, | |
| "eval_samples_per_second": 68.162, | |
| "eval_steps_per_second": 4.612, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 4.153846153846154, | |
| "grad_norm": 0.8715596795082092, | |
| "learning_rate": 3.384615384615385e-06, | |
| "loss": 0.161, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 4.3076923076923075, | |
| "grad_norm": 1.114379644393921, | |
| "learning_rate": 2.7692307692307697e-06, | |
| "loss": 0.1433, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 4.461538461538462, | |
| "grad_norm": 0.40282967686653137, | |
| "learning_rate": 2.153846153846154e-06, | |
| "loss": 0.1505, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 4.615384615384615, | |
| "grad_norm": 1.5655503273010254, | |
| "learning_rate": 1.5384615384615387e-06, | |
| "loss": 0.1304, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 4.769230769230769, | |
| "grad_norm": 0.8997613787651062, | |
| "learning_rate": 9.230769230769232e-07, | |
| "loss": 0.127, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 4.923076923076923, | |
| "grad_norm": 0.5467543005943298, | |
| "learning_rate": 3.0769230769230774e-07, | |
| "loss": 0.1125, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.9774436090225563, | |
| "eval_loss": 0.11632176488637924, | |
| "eval_runtime": 2.1915, | |
| "eval_samples_per_second": 60.69, | |
| "eval_steps_per_second": 4.107, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "step": 325, | |
| "total_flos": 4.006371770595533e+17, | |
| "train_loss": 0.2948218886668865, | |
| "train_runtime": 123.6687, | |
| "train_samples_per_second": 41.805, | |
| "train_steps_per_second": 2.628 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 325, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4.006371770595533e+17, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |