| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.7777777777777778, | |
| "eval_steps": 50, | |
| "global_step": 700, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0011111111111111111, | |
| "grad_norm": 234.0, | |
| "learning_rate": 0.0, | |
| "loss": 34.6327, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.011111111111111112, | |
| "grad_norm": 196.0, | |
| "learning_rate": 9e-06, | |
| "loss": 32.4195, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.022222222222222223, | |
| "grad_norm": 66.875, | |
| "learning_rate": 1.9e-05, | |
| "loss": 14.1658, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.03333333333333333, | |
| "grad_norm": 0.9267578125, | |
| "learning_rate": 2.9e-05, | |
| "loss": 0.4885, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.044444444444444446, | |
| "grad_norm": 0.4169921875, | |
| "learning_rate": 3.9000000000000006e-05, | |
| "loss": 0.3108, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.05555555555555555, | |
| "grad_norm": 0.41357421875, | |
| "learning_rate": 4.9e-05, | |
| "loss": 0.2811, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.05555555555555555, | |
| "eval_loss": 0.14292466640472412, | |
| "eval_runtime": 449.904, | |
| "eval_samples_per_second": 4.001, | |
| "eval_steps_per_second": 2.0, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.06666666666666667, | |
| "grad_norm": 0.359619140625, | |
| "learning_rate": 4.9742857142857145e-05, | |
| "loss": 0.2584, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.07777777777777778, | |
| "grad_norm": 0.431640625, | |
| "learning_rate": 4.9457142857142854e-05, | |
| "loss": 0.2643, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.08888888888888889, | |
| "grad_norm": 0.46923828125, | |
| "learning_rate": 4.917142857142858e-05, | |
| "loss": 0.2923, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 0.449951171875, | |
| "learning_rate": 4.888571428571429e-05, | |
| "loss": 0.29, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.1111111111111111, | |
| "grad_norm": 0.3896484375, | |
| "learning_rate": 4.86e-05, | |
| "loss": 0.2609, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.1111111111111111, | |
| "eval_loss": 0.13491107523441315, | |
| "eval_runtime": 431.8863, | |
| "eval_samples_per_second": 4.168, | |
| "eval_steps_per_second": 2.084, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.12222222222222222, | |
| "grad_norm": 0.3828125, | |
| "learning_rate": 4.831428571428572e-05, | |
| "loss": 0.2863, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.13333333333333333, | |
| "grad_norm": 0.603515625, | |
| "learning_rate": 4.802857142857143e-05, | |
| "loss": 0.2647, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.14444444444444443, | |
| "grad_norm": 0.439208984375, | |
| "learning_rate": 4.7742857142857144e-05, | |
| "loss": 0.2706, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.15555555555555556, | |
| "grad_norm": 0.50634765625, | |
| "learning_rate": 4.745714285714286e-05, | |
| "loss": 0.2356, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.16666666666666666, | |
| "grad_norm": 0.513671875, | |
| "learning_rate": 4.717142857142857e-05, | |
| "loss": 0.2469, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.16666666666666666, | |
| "eval_loss": 0.12212829291820526, | |
| "eval_runtime": 424.7761, | |
| "eval_samples_per_second": 4.238, | |
| "eval_steps_per_second": 2.119, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.17777777777777778, | |
| "grad_norm": 0.48095703125, | |
| "learning_rate": 4.6885714285714285e-05, | |
| "loss": 0.2469, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.18888888888888888, | |
| "grad_norm": 0.4775390625, | |
| "learning_rate": 4.660000000000001e-05, | |
| "loss": 0.2566, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 0.416259765625, | |
| "learning_rate": 4.631428571428572e-05, | |
| "loss": 0.2263, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.2111111111111111, | |
| "grad_norm": 0.47900390625, | |
| "learning_rate": 4.602857142857143e-05, | |
| "loss": 0.2505, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.2222222222222222, | |
| "grad_norm": 0.3916015625, | |
| "learning_rate": 4.574285714285714e-05, | |
| "loss": 0.2232, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.2222222222222222, | |
| "eval_loss": 0.11928859353065491, | |
| "eval_runtime": 432.6807, | |
| "eval_samples_per_second": 4.16, | |
| "eval_steps_per_second": 2.08, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.23333333333333334, | |
| "grad_norm": 0.48046875, | |
| "learning_rate": 4.545714285714286e-05, | |
| "loss": 0.2394, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.24444444444444444, | |
| "grad_norm": 0.40771484375, | |
| "learning_rate": 4.5171428571428575e-05, | |
| "loss": 0.226, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.25555555555555554, | |
| "grad_norm": 0.476318359375, | |
| "learning_rate": 4.4885714285714284e-05, | |
| "loss": 0.2628, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.26666666666666666, | |
| "grad_norm": 0.443603515625, | |
| "learning_rate": 4.46e-05, | |
| "loss": 0.25, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.2777777777777778, | |
| "grad_norm": 0.468994140625, | |
| "learning_rate": 4.4314285714285716e-05, | |
| "loss": 0.2663, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.2777777777777778, | |
| "eval_loss": 0.11743941903114319, | |
| "eval_runtime": 463.571, | |
| "eval_samples_per_second": 3.883, | |
| "eval_steps_per_second": 1.941, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.28888888888888886, | |
| "grad_norm": 0.5185546875, | |
| "learning_rate": 4.402857142857143e-05, | |
| "loss": 0.2446, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 0.43994140625, | |
| "learning_rate": 4.374285714285715e-05, | |
| "loss": 0.257, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.3111111111111111, | |
| "grad_norm": 0.447021484375, | |
| "learning_rate": 4.345714285714286e-05, | |
| "loss": 0.2292, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.32222222222222224, | |
| "grad_norm": 0.372314453125, | |
| "learning_rate": 4.317142857142857e-05, | |
| "loss": 0.2314, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.3333333333333333, | |
| "grad_norm": 0.4365234375, | |
| "learning_rate": 4.288571428571429e-05, | |
| "loss": 0.2288, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.3333333333333333, | |
| "eval_loss": 0.11491398513317108, | |
| "eval_runtime": 454.5263, | |
| "eval_samples_per_second": 3.96, | |
| "eval_steps_per_second": 1.98, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.34444444444444444, | |
| "grad_norm": 0.3798828125, | |
| "learning_rate": 4.26e-05, | |
| "loss": 0.23, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.35555555555555557, | |
| "grad_norm": 0.431396484375, | |
| "learning_rate": 4.2314285714285715e-05, | |
| "loss": 0.2208, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.36666666666666664, | |
| "grad_norm": 0.369384765625, | |
| "learning_rate": 4.202857142857143e-05, | |
| "loss": 0.213, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.37777777777777777, | |
| "grad_norm": 0.46826171875, | |
| "learning_rate": 4.174285714285715e-05, | |
| "loss": 0.2453, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.3888888888888889, | |
| "grad_norm": 0.485107421875, | |
| "learning_rate": 4.145714285714286e-05, | |
| "loss": 0.2454, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.3888888888888889, | |
| "eval_loss": 0.11370400339365005, | |
| "eval_runtime": 429.4907, | |
| "eval_samples_per_second": 4.191, | |
| "eval_steps_per_second": 2.096, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 0.42919921875, | |
| "learning_rate": 4.117142857142857e-05, | |
| "loss": 0.245, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.4111111111111111, | |
| "grad_norm": 0.38427734375, | |
| "learning_rate": 4.088571428571429e-05, | |
| "loss": 0.2255, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.4222222222222222, | |
| "grad_norm": 0.295654296875, | |
| "learning_rate": 4.0600000000000004e-05, | |
| "loss": 0.2266, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.43333333333333335, | |
| "grad_norm": 0.437744140625, | |
| "learning_rate": 4.0314285714285714e-05, | |
| "loss": 0.2438, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.4444444444444444, | |
| "grad_norm": 0.4521484375, | |
| "learning_rate": 4.002857142857143e-05, | |
| "loss": 0.2135, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.4444444444444444, | |
| "eval_loss": 0.11205437779426575, | |
| "eval_runtime": 431.8391, | |
| "eval_samples_per_second": 4.168, | |
| "eval_steps_per_second": 2.084, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.45555555555555555, | |
| "grad_norm": 0.33154296875, | |
| "learning_rate": 3.9742857142857146e-05, | |
| "loss": 0.2081, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.4666666666666667, | |
| "grad_norm": 0.35595703125, | |
| "learning_rate": 3.945714285714286e-05, | |
| "loss": 0.2369, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.4777777777777778, | |
| "grad_norm": 0.5166015625, | |
| "learning_rate": 3.917142857142858e-05, | |
| "loss": 0.2116, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.4888888888888889, | |
| "grad_norm": 0.403076171875, | |
| "learning_rate": 3.888571428571429e-05, | |
| "loss": 0.2159, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 0.349609375, | |
| "learning_rate": 3.86e-05, | |
| "loss": 0.208, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "eval_loss": 0.11105828732252121, | |
| "eval_runtime": 468.2112, | |
| "eval_samples_per_second": 3.844, | |
| "eval_steps_per_second": 1.922, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.5111111111111111, | |
| "grad_norm": 0.4482421875, | |
| "learning_rate": 3.831428571428571e-05, | |
| "loss": 0.2459, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.5222222222222223, | |
| "grad_norm": 0.365478515625, | |
| "learning_rate": 3.802857142857143e-05, | |
| "loss": 0.2239, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.5333333333333333, | |
| "grad_norm": 0.397216796875, | |
| "learning_rate": 3.7742857142857145e-05, | |
| "loss": 0.2396, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.5444444444444444, | |
| "grad_norm": 0.384765625, | |
| "learning_rate": 3.745714285714286e-05, | |
| "loss": 0.2263, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.5555555555555556, | |
| "grad_norm": 0.403564453125, | |
| "learning_rate": 3.717142857142858e-05, | |
| "loss": 0.2225, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.5555555555555556, | |
| "eval_loss": 0.11006490141153336, | |
| "eval_runtime": 450.548, | |
| "eval_samples_per_second": 3.995, | |
| "eval_steps_per_second": 1.998, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.5666666666666667, | |
| "grad_norm": 0.4404296875, | |
| "learning_rate": 3.688571428571429e-05, | |
| "loss": 0.2238, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.5777777777777777, | |
| "grad_norm": 0.354248046875, | |
| "learning_rate": 3.66e-05, | |
| "loss": 0.2032, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.5888888888888889, | |
| "grad_norm": 0.344970703125, | |
| "learning_rate": 3.631428571428572e-05, | |
| "loss": 0.222, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 0.3740234375, | |
| "learning_rate": 3.602857142857143e-05, | |
| "loss": 0.2329, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.6111111111111112, | |
| "grad_norm": 0.3486328125, | |
| "learning_rate": 3.574285714285714e-05, | |
| "loss": 0.2188, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.6111111111111112, | |
| "eval_loss": 0.10909327119588852, | |
| "eval_runtime": 435.9811, | |
| "eval_samples_per_second": 4.129, | |
| "eval_steps_per_second": 2.064, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.6222222222222222, | |
| "grad_norm": 0.425537109375, | |
| "learning_rate": 3.545714285714286e-05, | |
| "loss": 0.215, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.6333333333333333, | |
| "grad_norm": 0.41357421875, | |
| "learning_rate": 3.517142857142857e-05, | |
| "loss": 0.242, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.6444444444444445, | |
| "grad_norm": 0.482666015625, | |
| "learning_rate": 3.488571428571429e-05, | |
| "loss": 0.2034, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.6555555555555556, | |
| "grad_norm": 0.49658203125, | |
| "learning_rate": 3.46e-05, | |
| "loss": 0.2241, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 0.42041015625, | |
| "learning_rate": 3.431428571428572e-05, | |
| "loss": 0.2477, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.6666666666666666, | |
| "eval_loss": 0.10835430771112442, | |
| "eval_runtime": 451.0734, | |
| "eval_samples_per_second": 3.99, | |
| "eval_steps_per_second": 1.995, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.6777777777777778, | |
| "grad_norm": 0.372314453125, | |
| "learning_rate": 3.402857142857143e-05, | |
| "loss": 0.2293, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.6888888888888889, | |
| "grad_norm": 0.387939453125, | |
| "learning_rate": 3.374285714285714e-05, | |
| "loss": 0.246, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "grad_norm": 0.426025390625, | |
| "learning_rate": 3.345714285714286e-05, | |
| "loss": 0.2, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.7111111111111111, | |
| "grad_norm": 0.380126953125, | |
| "learning_rate": 3.3171428571428574e-05, | |
| "loss": 0.2184, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.7222222222222222, | |
| "grad_norm": 0.393310546875, | |
| "learning_rate": 3.2885714285714284e-05, | |
| "loss": 0.2092, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.7222222222222222, | |
| "eval_loss": 0.10704999417066574, | |
| "eval_runtime": 434.4458, | |
| "eval_samples_per_second": 4.143, | |
| "eval_steps_per_second": 2.072, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.7333333333333333, | |
| "grad_norm": 0.37841796875, | |
| "learning_rate": 3.26e-05, | |
| "loss": 0.2078, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.7444444444444445, | |
| "grad_norm": 0.400634765625, | |
| "learning_rate": 3.2314285714285716e-05, | |
| "loss": 0.2358, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.7555555555555555, | |
| "grad_norm": 0.38037109375, | |
| "learning_rate": 3.202857142857143e-05, | |
| "loss": 0.2226, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.7666666666666667, | |
| "grad_norm": 0.4248046875, | |
| "learning_rate": 3.174285714285715e-05, | |
| "loss": 0.2351, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.7777777777777778, | |
| "grad_norm": 0.51953125, | |
| "learning_rate": 3.145714285714286e-05, | |
| "loss": 0.2525, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.7777777777777778, | |
| "eval_loss": 0.10690909624099731, | |
| "eval_runtime": 432.8965, | |
| "eval_samples_per_second": 4.158, | |
| "eval_steps_per_second": 2.079, | |
| "step": 700 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1800, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.2387832165981747e+17, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |