| { | |
| "best_metric": 0.042105622589588165, | |
| "best_model_checkpoint": "./beans_outputs/checkpoint-130", | |
| "epoch": 5.0, | |
| "eval_steps": 500, | |
| "global_step": 650, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.07692307692307693, | |
| "grad_norm": 112.06184387207031, | |
| "learning_rate": 1.9692307692307696e-05, | |
| "loss": 1.363, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.15384615384615385, | |
| "grad_norm": 60.377403259277344, | |
| "learning_rate": 1.9384615384615386e-05, | |
| "loss": 0.8057, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.23076923076923078, | |
| "grad_norm": 186.28451538085938, | |
| "learning_rate": 1.907692307692308e-05, | |
| "loss": 0.2844, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.3076923076923077, | |
| "grad_norm": 129.29640197753906, | |
| "learning_rate": 1.876923076923077e-05, | |
| "loss": 0.2606, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.38461538461538464, | |
| "grad_norm": 95.92760467529297, | |
| "learning_rate": 1.8461538461538465e-05, | |
| "loss": 0.369, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.46153846153846156, | |
| "grad_norm": 270.521728515625, | |
| "learning_rate": 1.8153846153846155e-05, | |
| "loss": 0.6975, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.5384615384615384, | |
| "grad_norm": 18.815027236938477, | |
| "learning_rate": 1.784615384615385e-05, | |
| "loss": 0.519, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.6153846153846154, | |
| "grad_norm": 28.882125854492188, | |
| "learning_rate": 1.753846153846154e-05, | |
| "loss": 0.181, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.6923076923076923, | |
| "grad_norm": 25.98147964477539, | |
| "learning_rate": 1.7230769230769234e-05, | |
| "loss": 0.3703, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.7692307692307693, | |
| "grad_norm": 65.80670928955078, | |
| "learning_rate": 1.6923076923076924e-05, | |
| "loss": 0.3021, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.8461538461538461, | |
| "grad_norm": 588.9821166992188, | |
| "learning_rate": 1.6615384615384618e-05, | |
| "loss": 0.3162, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.9230769230769231, | |
| "grad_norm": 172.91128540039062, | |
| "learning_rate": 1.630769230769231e-05, | |
| "loss": 0.4984, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 148.1098175048828, | |
| "learning_rate": 1.6000000000000003e-05, | |
| "loss": 0.4416, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.9774436090225563, | |
| "eval_loss": 0.042105622589588165, | |
| "eval_runtime": 4.8662, | |
| "eval_samples_per_second": 27.331, | |
| "eval_steps_per_second": 3.493, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.0769230769230769, | |
| "grad_norm": 90.7206802368164, | |
| "learning_rate": 1.5692307692307693e-05, | |
| "loss": 0.1361, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.1538461538461537, | |
| "grad_norm": 52.01783752441406, | |
| "learning_rate": 1.5384615384615387e-05, | |
| "loss": 0.415, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.2307692307692308, | |
| "grad_norm": 265.2178649902344, | |
| "learning_rate": 1.5076923076923078e-05, | |
| "loss": 1.0526, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.3076923076923077, | |
| "grad_norm": 19.775461196899414, | |
| "learning_rate": 1.4769230769230772e-05, | |
| "loss": 0.4827, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.3846153846153846, | |
| "grad_norm": 88.16533660888672, | |
| "learning_rate": 1.4461538461538462e-05, | |
| "loss": 0.2066, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.4615384615384617, | |
| "grad_norm": 209.10418701171875, | |
| "learning_rate": 1.4153846153846156e-05, | |
| "loss": 0.9121, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.5384615384615383, | |
| "grad_norm": 19.684755325317383, | |
| "learning_rate": 1.3846153846153847e-05, | |
| "loss": 0.8565, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.6153846153846154, | |
| "grad_norm": 35.34351348876953, | |
| "learning_rate": 1.353846153846154e-05, | |
| "loss": 0.2954, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.6923076923076923, | |
| "grad_norm": 0.2399674952030182, | |
| "learning_rate": 1.3230769230769231e-05, | |
| "loss": 0.4924, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.7692307692307692, | |
| "grad_norm": 79.6393051147461, | |
| "learning_rate": 1.2923076923076925e-05, | |
| "loss": 0.3515, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.8461538461538463, | |
| "grad_norm": 68.64823913574219, | |
| "learning_rate": 1.2615384615384616e-05, | |
| "loss": 0.5329, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.9230769230769231, | |
| "grad_norm": 27.282691955566406, | |
| "learning_rate": 1.230769230769231e-05, | |
| "loss": 0.2026, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.0012617758475244045, | |
| "learning_rate": 1.2e-05, | |
| "loss": 0.228, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.8872180451127819, | |
| "eval_loss": 0.5106561183929443, | |
| "eval_runtime": 4.3946, | |
| "eval_samples_per_second": 30.265, | |
| "eval_steps_per_second": 3.868, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 2.076923076923077, | |
| "grad_norm": 5.975718975067139, | |
| "learning_rate": 1.1692307692307694e-05, | |
| "loss": 0.4097, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 2.1538461538461537, | |
| "grad_norm": 0.03580649197101593, | |
| "learning_rate": 1.1384615384615385e-05, | |
| "loss": 0.5936, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 2.230769230769231, | |
| "grad_norm": 86.95597839355469, | |
| "learning_rate": 1.1076923076923079e-05, | |
| "loss": 0.3961, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 2.3076923076923075, | |
| "grad_norm": 59.9096794128418, | |
| "learning_rate": 1.076923076923077e-05, | |
| "loss": 0.2056, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.3846153846153846, | |
| "grad_norm": 247.10830688476562, | |
| "learning_rate": 1.0461538461538463e-05, | |
| "loss": 0.4483, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 2.4615384615384617, | |
| "grad_norm": 0.2814353406429291, | |
| "learning_rate": 1.0153846153846154e-05, | |
| "loss": 0.7666, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 2.5384615384615383, | |
| "grad_norm": 49.798301696777344, | |
| "learning_rate": 9.846153846153848e-06, | |
| "loss": 0.3381, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 2.6153846153846154, | |
| "grad_norm": 13.517538070678711, | |
| "learning_rate": 9.53846153846154e-06, | |
| "loss": 0.2543, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 2.6923076923076925, | |
| "grad_norm": 0.05923676863312721, | |
| "learning_rate": 9.230769230769232e-06, | |
| "loss": 0.201, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 2.769230769230769, | |
| "grad_norm": 490.6665954589844, | |
| "learning_rate": 8.923076923076925e-06, | |
| "loss": 0.3604, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 2.8461538461538463, | |
| "grad_norm": 135.96426391601562, | |
| "learning_rate": 8.615384615384617e-06, | |
| "loss": 0.3943, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 2.9230769230769234, | |
| "grad_norm": 14.018547058105469, | |
| "learning_rate": 8.307692307692309e-06, | |
| "loss": 0.087, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.0006046479684300721, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 0.2485, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.9548872180451128, | |
| "eval_loss": 0.10911013185977936, | |
| "eval_runtime": 4.6673, | |
| "eval_samples_per_second": 28.496, | |
| "eval_steps_per_second": 3.642, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 3.076923076923077, | |
| "grad_norm": 82.9144515991211, | |
| "learning_rate": 7.692307692307694e-06, | |
| "loss": 0.5248, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 3.1538461538461537, | |
| "grad_norm": 277.6345520019531, | |
| "learning_rate": 7.384615384615386e-06, | |
| "loss": 0.5708, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 3.230769230769231, | |
| "grad_norm": 56.93598937988281, | |
| "learning_rate": 7.076923076923078e-06, | |
| "loss": 0.1575, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 3.3076923076923075, | |
| "grad_norm": 0.0031539953779429197, | |
| "learning_rate": 6.76923076923077e-06, | |
| "loss": 0.0855, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 3.3846153846153846, | |
| "grad_norm": 0.7423490881919861, | |
| "learning_rate": 6.461538461538463e-06, | |
| "loss": 0.168, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 3.4615384615384617, | |
| "grad_norm": 120.9870834350586, | |
| "learning_rate": 6.153846153846155e-06, | |
| "loss": 0.191, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 3.5384615384615383, | |
| "grad_norm": 296.5716552734375, | |
| "learning_rate": 5.846153846153847e-06, | |
| "loss": 0.2306, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 3.6153846153846154, | |
| "grad_norm": 7.56702995300293, | |
| "learning_rate": 5.538461538461539e-06, | |
| "loss": 0.1797, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 3.6923076923076925, | |
| "grad_norm": 93.5758285522461, | |
| "learning_rate": 5.230769230769232e-06, | |
| "loss": 0.1971, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 3.769230769230769, | |
| "grad_norm": 0.010949734598398209, | |
| "learning_rate": 4.923076923076924e-06, | |
| "loss": 0.0764, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 3.8461538461538463, | |
| "grad_norm": 0.016034867614507675, | |
| "learning_rate": 4.615384615384616e-06, | |
| "loss": 0.2204, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 3.9230769230769234, | |
| "grad_norm": 0.0015761979157105088, | |
| "learning_rate": 4.307692307692308e-06, | |
| "loss": 0.1723, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.00017710862448439002, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 0.2278, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.9774436090225563, | |
| "eval_loss": 0.11477751284837723, | |
| "eval_runtime": 4.5357, | |
| "eval_samples_per_second": 29.323, | |
| "eval_steps_per_second": 3.748, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 4.076923076923077, | |
| "grad_norm": 2.7093148231506348, | |
| "learning_rate": 3.692307692307693e-06, | |
| "loss": 0.0931, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 4.153846153846154, | |
| "grad_norm": 87.03694915771484, | |
| "learning_rate": 3.384615384615385e-06, | |
| "loss": 0.2926, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 4.230769230769231, | |
| "grad_norm": 11.575035095214844, | |
| "learning_rate": 3.0769230769230774e-06, | |
| "loss": 0.1522, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 4.3076923076923075, | |
| "grad_norm": 55.532737731933594, | |
| "learning_rate": 2.7692307692307697e-06, | |
| "loss": 0.0306, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 4.384615384615385, | |
| "grad_norm": 8.849506378173828, | |
| "learning_rate": 2.461538461538462e-06, | |
| "loss": 0.1612, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 4.461538461538462, | |
| "grad_norm": 0.044292520731687546, | |
| "learning_rate": 2.153846153846154e-06, | |
| "loss": 0.0723, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 4.538461538461538, | |
| "grad_norm": 105.56414794921875, | |
| "learning_rate": 1.8461538461538465e-06, | |
| "loss": 0.0192, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 4.615384615384615, | |
| "grad_norm": 0.03247995674610138, | |
| "learning_rate": 1.5384615384615387e-06, | |
| "loss": 0.2548, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 4.6923076923076925, | |
| "grad_norm": 0.052367787808179855, | |
| "learning_rate": 1.230769230769231e-06, | |
| "loss": 0.0246, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 4.769230769230769, | |
| "grad_norm": 0.00981516856700182, | |
| "learning_rate": 9.230769230769232e-07, | |
| "loss": 0.0353, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 4.846153846153846, | |
| "grad_norm": 0.01593305729329586, | |
| "learning_rate": 6.153846153846155e-07, | |
| "loss": 0.0982, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 4.923076923076923, | |
| "grad_norm": 0.21991655230522156, | |
| "learning_rate": 3.0769230769230774e-07, | |
| "loss": 0.0059, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 0.0014734750147908926, | |
| "learning_rate": 0.0, | |
| "loss": 0.3263, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.9849624060150376, | |
| "eval_loss": 0.10819273442029953, | |
| "eval_runtime": 4.5937, | |
| "eval_samples_per_second": 28.953, | |
| "eval_steps_per_second": 3.701, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "step": 650, | |
| "total_flos": 5.2806343514259456e+17, | |
| "train_loss": 0.3360688577305812, | |
| "train_runtime": 518.6256, | |
| "train_samples_per_second": 9.969, | |
| "train_steps_per_second": 1.253 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 650, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.2806343514259456e+17, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |