| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.98989898989899, | |
| "eval_steps": 20, | |
| "global_step": 111, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.05387205387205387, | |
| "grad_norm": 30.91269874572754, | |
| "learning_rate": 0.0001, | |
| "loss": 9.4247, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.10774410774410774, | |
| "grad_norm": 17.899776458740234, | |
| "learning_rate": 9.818181818181818e-05, | |
| "loss": 8.2334, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.16161616161616163, | |
| "grad_norm": 11.289294242858887, | |
| "learning_rate": 9.636363636363637e-05, | |
| "loss": 6.7049, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.21548821548821548, | |
| "grad_norm": 13.52355670928955, | |
| "learning_rate": 9.454545454545455e-05, | |
| "loss": 6.0078, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.26936026936026936, | |
| "grad_norm": 6.83944845199585, | |
| "learning_rate": 9.272727272727273e-05, | |
| "loss": 5.4206, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.32323232323232326, | |
| "grad_norm": 5.749248504638672, | |
| "learning_rate": 9.090909090909092e-05, | |
| "loss": 5.0018, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.3771043771043771, | |
| "grad_norm": 6.119119644165039, | |
| "learning_rate": 8.90909090909091e-05, | |
| "loss": 4.7585, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.43097643097643096, | |
| "grad_norm": 5.070394992828369, | |
| "learning_rate": 8.727272727272727e-05, | |
| "loss": 4.4387, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.48484848484848486, | |
| "grad_norm": 5.378049850463867, | |
| "learning_rate": 8.545454545454545e-05, | |
| "loss": 4.1869, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.5387205387205387, | |
| "grad_norm": 6.204185962677002, | |
| "learning_rate": 8.363636363636364e-05, | |
| "loss": 4.0268, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.5387205387205387, | |
| "eval_loss": 3.4558703899383545, | |
| "eval_runtime": 8.1602, | |
| "eval_samples_per_second": 145.708, | |
| "eval_steps_per_second": 18.259, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.5925925925925926, | |
| "grad_norm": 4.224447727203369, | |
| "learning_rate": 8.181818181818183e-05, | |
| "loss": 3.8953, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.6464646464646465, | |
| "grad_norm": 3.6069717407226562, | |
| "learning_rate": 8e-05, | |
| "loss": 3.8231, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.7003367003367004, | |
| "grad_norm": 2.707709550857544, | |
| "learning_rate": 7.818181818181818e-05, | |
| "loss": 3.7116, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.7542087542087542, | |
| "grad_norm": 3.31360125541687, | |
| "learning_rate": 7.636363636363637e-05, | |
| "loss": 3.5999, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.8080808080808081, | |
| "grad_norm": 2.9818968772888184, | |
| "learning_rate": 7.454545454545455e-05, | |
| "loss": 3.5958, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.8619528619528619, | |
| "grad_norm": 2.68215274810791, | |
| "learning_rate": 7.272727272727273e-05, | |
| "loss": 3.5518, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.9158249158249159, | |
| "grad_norm": 1.9956828355789185, | |
| "learning_rate": 7.090909090909092e-05, | |
| "loss": 3.4949, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.9696969696969697, | |
| "grad_norm": 1.9735620021820068, | |
| "learning_rate": 6.90909090909091e-05, | |
| "loss": 3.4185, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 1.0235690235690236, | |
| "grad_norm": 1.8688853979110718, | |
| "learning_rate": 6.727272727272727e-05, | |
| "loss": 3.4284, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 1.0774410774410774, | |
| "grad_norm": 1.9668887853622437, | |
| "learning_rate": 6.545454545454546e-05, | |
| "loss": 3.3888, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.0774410774410774, | |
| "eval_loss": 3.2203786373138428, | |
| "eval_runtime": 8.1727, | |
| "eval_samples_per_second": 145.484, | |
| "eval_steps_per_second": 18.231, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.1313131313131313, | |
| "grad_norm": 1.8594480752944946, | |
| "learning_rate": 6.363636363636364e-05, | |
| "loss": 3.3016, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 1.1851851851851851, | |
| "grad_norm": 2.0851259231567383, | |
| "learning_rate": 6.181818181818182e-05, | |
| "loss": 3.2937, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 1.239057239057239, | |
| "grad_norm": 2.022775411605835, | |
| "learning_rate": 6e-05, | |
| "loss": 3.3097, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 1.2929292929292928, | |
| "grad_norm": 1.7249592542648315, | |
| "learning_rate": 5.818181818181818e-05, | |
| "loss": 3.2856, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 1.3468013468013469, | |
| "grad_norm": 1.894808292388916, | |
| "learning_rate": 5.636363636363636e-05, | |
| "loss": 3.2579, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.4006734006734007, | |
| "grad_norm": 2.0816519260406494, | |
| "learning_rate": 5.4545454545454546e-05, | |
| "loss": 3.2534, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 1.4545454545454546, | |
| "grad_norm": 1.8632744550704956, | |
| "learning_rate": 5.272727272727272e-05, | |
| "loss": 3.2358, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 1.5084175084175084, | |
| "grad_norm": 1.8851394653320312, | |
| "learning_rate": 5.090909090909091e-05, | |
| "loss": 3.2256, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 1.5622895622895623, | |
| "grad_norm": 1.7529875040054321, | |
| "learning_rate": 4.909090909090909e-05, | |
| "loss": 3.2148, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 1.6161616161616161, | |
| "grad_norm": 1.8536018133163452, | |
| "learning_rate": 4.7272727272727275e-05, | |
| "loss": 3.2608, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.6161616161616161, | |
| "eval_loss": 3.1798043251037598, | |
| "eval_runtime": 8.1729, | |
| "eval_samples_per_second": 145.48, | |
| "eval_steps_per_second": 18.231, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.67003367003367, | |
| "grad_norm": 1.9621059894561768, | |
| "learning_rate": 4.545454545454546e-05, | |
| "loss": 3.2202, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 1.723905723905724, | |
| "grad_norm": 1.9148567914962769, | |
| "learning_rate": 4.3636363636363636e-05, | |
| "loss": 3.2282, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 1.7777777777777777, | |
| "grad_norm": 1.7804056406021118, | |
| "learning_rate": 4.181818181818182e-05, | |
| "loss": 3.2654, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 1.8316498316498318, | |
| "grad_norm": 1.8891396522521973, | |
| "learning_rate": 4e-05, | |
| "loss": 3.1924, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 1.8855218855218854, | |
| "grad_norm": 1.8429640531539917, | |
| "learning_rate": 3.818181818181819e-05, | |
| "loss": 3.1756, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.9393939393939394, | |
| "grad_norm": 1.795774221420288, | |
| "learning_rate": 3.6363636363636364e-05, | |
| "loss": 3.2097, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 1.9932659932659933, | |
| "grad_norm": 1.8442139625549316, | |
| "learning_rate": 3.454545454545455e-05, | |
| "loss": 3.2271, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 2.047138047138047, | |
| "grad_norm": 1.6865243911743164, | |
| "learning_rate": 3.272727272727273e-05, | |
| "loss": 3.1261, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 2.101010101010101, | |
| "grad_norm": 1.6551978588104248, | |
| "learning_rate": 3.090909090909091e-05, | |
| "loss": 3.101, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 2.154882154882155, | |
| "grad_norm": 1.7607771158218384, | |
| "learning_rate": 2.909090909090909e-05, | |
| "loss": 3.0763, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 2.154882154882155, | |
| "eval_loss": 3.162412405014038, | |
| "eval_runtime": 8.1804, | |
| "eval_samples_per_second": 145.348, | |
| "eval_steps_per_second": 18.214, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 2.208754208754209, | |
| "grad_norm": 1.720595359802246, | |
| "learning_rate": 2.7272727272727273e-05, | |
| "loss": 3.1138, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 2.2626262626262625, | |
| "grad_norm": 1.7866605520248413, | |
| "learning_rate": 2.5454545454545454e-05, | |
| "loss": 3.1239, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 2.3164983164983166, | |
| "grad_norm": 1.7226550579071045, | |
| "learning_rate": 2.3636363636363637e-05, | |
| "loss": 3.0789, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 2.3703703703703702, | |
| "grad_norm": 1.8532516956329346, | |
| "learning_rate": 2.1818181818181818e-05, | |
| "loss": 3.1286, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 2.4242424242424243, | |
| "grad_norm": 1.7198731899261475, | |
| "learning_rate": 2e-05, | |
| "loss": 3.1123, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 2.478114478114478, | |
| "grad_norm": 1.8436646461486816, | |
| "learning_rate": 1.8181818181818182e-05, | |
| "loss": 3.0712, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 2.531986531986532, | |
| "grad_norm": 1.811488151550293, | |
| "learning_rate": 1.6363636363636366e-05, | |
| "loss": 3.0727, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 2.5858585858585856, | |
| "grad_norm": 1.7093764543533325, | |
| "learning_rate": 1.4545454545454545e-05, | |
| "loss": 3.0966, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 2.6397306397306397, | |
| "grad_norm": 1.8103761672973633, | |
| "learning_rate": 1.2727272727272727e-05, | |
| "loss": 3.084, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 2.6936026936026938, | |
| "grad_norm": 1.8916672468185425, | |
| "learning_rate": 1.0909090909090909e-05, | |
| "loss": 3.0912, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 2.6936026936026938, | |
| "eval_loss": 3.153109550476074, | |
| "eval_runtime": 8.1741, | |
| "eval_samples_per_second": 145.459, | |
| "eval_steps_per_second": 18.228, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 2.7474747474747474, | |
| "grad_norm": 1.7815495729446411, | |
| "learning_rate": 9.090909090909091e-06, | |
| "loss": 3.0743, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 2.8013468013468015, | |
| "grad_norm": 1.8348023891448975, | |
| "learning_rate": 7.272727272727272e-06, | |
| "loss": 3.092, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 2.855218855218855, | |
| "grad_norm": 1.807790756225586, | |
| "learning_rate": 5.4545454545454545e-06, | |
| "loss": 3.0913, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 2.909090909090909, | |
| "grad_norm": 1.7084001302719116, | |
| "learning_rate": 3.636363636363636e-06, | |
| "loss": 3.07, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 2.962962962962963, | |
| "grad_norm": 1.719460129737854, | |
| "learning_rate": 1.818181818181818e-06, | |
| "loss": 3.0812, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 2.98989898989899, | |
| "step": 111, | |
| "total_flos": 1.389220151427072e+16, | |
| "train_loss": 3.7213772073522344, | |
| "train_runtime": 747.8744, | |
| "train_samples_per_second": 38.144, | |
| "train_steps_per_second": 0.148 | |
| }, | |
| { | |
| "epoch": 2.98989898989899, | |
| "eval_loss": 3.1522939205169678, | |
| "eval_runtime": 8.1573, | |
| "eval_samples_per_second": 145.759, | |
| "eval_steps_per_second": 18.266, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 2.98989898989899, | |
| "eval_loss": 3.1559650897979736, | |
| "eval_runtime": 8.1292, | |
| "eval_samples_per_second": 146.262, | |
| "eval_steps_per_second": 18.329, | |
| "step": 111 | |
| } | |
| ], | |
| "logging_steps": 2, | |
| "max_steps": 111, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.389220151427072e+16, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |