| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.875912408759124, | |
| "eval_steps": 1000, | |
| "global_step": 51, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.058394160583941604, | |
| "grad_norm": 4.656792954985782, | |
| "learning_rate": 0.0, | |
| "loss": 0.4427, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.11678832116788321, | |
| "grad_norm": 4.86013370097108, | |
| "learning_rate": 1.6666666666666665e-07, | |
| "loss": 0.4479, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.17518248175182483, | |
| "grad_norm": 4.890457886287774, | |
| "learning_rate": 3.333333333333333e-07, | |
| "loss": 0.4409, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.23357664233576642, | |
| "grad_norm": 4.716105876070575, | |
| "learning_rate": 5e-07, | |
| "loss": 0.4431, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.291970802919708, | |
| "grad_norm": 4.662120140818256, | |
| "learning_rate": 6.666666666666666e-07, | |
| "loss": 0.4399, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.35036496350364965, | |
| "grad_norm": 4.595422118216669, | |
| "learning_rate": 8.333333333333333e-07, | |
| "loss": 0.4329, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.40875912408759124, | |
| "grad_norm": 4.226730555329321, | |
| "learning_rate": 1e-06, | |
| "loss": 0.4309, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.46715328467153283, | |
| "grad_norm": 4.171716322640935, | |
| "learning_rate": 9.98782025129912e-07, | |
| "loss": 0.4376, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.5255474452554745, | |
| "grad_norm": 2.797761384697659, | |
| "learning_rate": 9.95134034370785e-07, | |
| "loss": 0.4133, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.583941605839416, | |
| "grad_norm": 2.5830664759555444, | |
| "learning_rate": 9.890738003669027e-07, | |
| "loss": 0.406, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.6423357664233577, | |
| "grad_norm": 2.3850526224388444, | |
| "learning_rate": 9.806308479691594e-07, | |
| "loss": 0.4163, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.7007299270072993, | |
| "grad_norm": 2.3089181989651593, | |
| "learning_rate": 9.698463103929541e-07, | |
| "loss": 0.4158, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.7591240875912408, | |
| "grad_norm": 2.134247320958559, | |
| "learning_rate": 9.567727288213004e-07, | |
| "loss": 0.4014, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.8175182481751825, | |
| "grad_norm": 2.136565413176427, | |
| "learning_rate": 9.414737964294634e-07, | |
| "loss": 0.4, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.8759124087591241, | |
| "grad_norm": 2.091829552338069, | |
| "learning_rate": 9.240240480782129e-07, | |
| "loss": 0.3939, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.9343065693430657, | |
| "grad_norm": 2.0557987105811537, | |
| "learning_rate": 9.045084971874737e-07, | |
| "loss": 0.3961, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.9927007299270073, | |
| "grad_norm": 1.9838338116938496, | |
| "learning_rate": 8.83022221559489e-07, | |
| "loss": 0.3936, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 1.9838338116938496, | |
| "learning_rate": 8.596699001693255e-07, | |
| "loss": 0.3953, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 1.0583941605839415, | |
| "grad_norm": 3.7195402419448547, | |
| "learning_rate": 8.34565303179429e-07, | |
| "loss": 0.3889, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 1.1167883211678833, | |
| "grad_norm": 2.0185142045130724, | |
| "learning_rate": 8.07830737662829e-07, | |
| "loss": 0.3945, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.1751824817518248, | |
| "grad_norm": 1.776183202633764, | |
| "learning_rate": 7.795964517353733e-07, | |
| "loss": 0.3792, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 1.2335766423357664, | |
| "grad_norm": 1.3920276083517107, | |
| "learning_rate": 7.5e-07, | |
| "loss": 0.3656, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 1.2919708029197081, | |
| "grad_norm": 1.2863840146844603, | |
| "learning_rate": 7.191855733945386e-07, | |
| "loss": 0.3661, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 1.3503649635036497, | |
| "grad_norm": 1.2539879708687325, | |
| "learning_rate": 6.87303296707956e-07, | |
| "loss": 0.3597, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 1.4087591240875912, | |
| "grad_norm": 1.166120242178893, | |
| "learning_rate": 6.545084971874736e-07, | |
| "loss": 0.3516, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 1.4671532846715327, | |
| "grad_norm": 1.1488662726861754, | |
| "learning_rate": 6.209609477998338e-07, | |
| "loss": 0.3565, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 1.5255474452554745, | |
| "grad_norm": 1.1401441355458104, | |
| "learning_rate": 5.868240888334652e-07, | |
| "loss": 0.3593, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 1.583941605839416, | |
| "grad_norm": 1.1098917532126118, | |
| "learning_rate": 5.522642316338268e-07, | |
| "loss": 0.3553, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 1.6423357664233578, | |
| "grad_norm": 1.0608300492190985, | |
| "learning_rate": 5.174497483512505e-07, | |
| "loss": 0.3468, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 1.7007299270072993, | |
| "grad_norm": 1.0513121541995292, | |
| "learning_rate": 4.825502516487496e-07, | |
| "loss": 0.3512, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.7591240875912408, | |
| "grad_norm": 1.0321496985153071, | |
| "learning_rate": 4.477357683661733e-07, | |
| "loss": 0.3462, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 1.8175182481751824, | |
| "grad_norm": 0.9664124061777448, | |
| "learning_rate": 4.131759111665348e-07, | |
| "loss": 0.3453, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 1.8759124087591241, | |
| "grad_norm": 0.9864302519260517, | |
| "learning_rate": 3.790390522001662e-07, | |
| "loss": 0.3548, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 1.9343065693430657, | |
| "grad_norm": 0.9284627064839345, | |
| "learning_rate": 3.454915028125263e-07, | |
| "loss": 0.3369, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 1.9927007299270074, | |
| "grad_norm": 0.9113524544646622, | |
| "learning_rate": 3.1269670329204393e-07, | |
| "loss": 0.3452, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.9113524544646622, | |
| "learning_rate": 2.808144266054612e-07, | |
| "loss": 0.3717, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 2.0583941605839415, | |
| "grad_norm": 1.606883667124614, | |
| "learning_rate": 2.500000000000001e-07, | |
| "loss": 0.3407, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 2.116788321167883, | |
| "grad_norm": 0.8931807344006792, | |
| "learning_rate": 2.2040354826462664e-07, | |
| "loss": 0.3421, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 2.1751824817518246, | |
| "grad_norm": 0.9177610844452588, | |
| "learning_rate": 1.9216926233717084e-07, | |
| "loss": 0.3475, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 2.2335766423357666, | |
| "grad_norm": 0.8877519183136245, | |
| "learning_rate": 1.6543469682057104e-07, | |
| "loss": 0.3428, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 2.291970802919708, | |
| "grad_norm": 0.8786726057000499, | |
| "learning_rate": 1.4033009983067452e-07, | |
| "loss": 0.3369, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 2.3503649635036497, | |
| "grad_norm": 0.8740350065054695, | |
| "learning_rate": 1.1697777844051104e-07, | |
| "loss": 0.3432, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 2.408759124087591, | |
| "grad_norm": 0.8767663235746571, | |
| "learning_rate": 9.549150281252632e-08, | |
| "loss": 0.3448, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 2.4671532846715327, | |
| "grad_norm": 0.8554900558425669, | |
| "learning_rate": 7.597595192178702e-08, | |
| "loss": 0.3366, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 2.5255474452554747, | |
| "grad_norm": 0.8931022618355365, | |
| "learning_rate": 5.8526203570536504e-08, | |
| "loss": 0.3413, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 2.5839416058394162, | |
| "grad_norm": 0.8653492886520165, | |
| "learning_rate": 4.322727117869951e-08, | |
| "loss": 0.3427, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 2.6423357664233578, | |
| "grad_norm": 0.8828223465494373, | |
| "learning_rate": 3.015368960704584e-08, | |
| "loss": 0.3401, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 2.7007299270072993, | |
| "grad_norm": 0.8436117670189993, | |
| "learning_rate": 1.936915203084055e-08, | |
| "loss": 0.344, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 2.759124087591241, | |
| "grad_norm": 0.870664029954303, | |
| "learning_rate": 1.0926199633097154e-08, | |
| "loss": 0.3367, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 2.8175182481751824, | |
| "grad_norm": 0.8533210535483198, | |
| "learning_rate": 4.865965629214819e-09, | |
| "loss": 0.3326, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 2.875912408759124, | |
| "grad_norm": 0.8603385788586924, | |
| "learning_rate": 1.217974870087901e-09, | |
| "loss": 0.3353, | |
| "step": 51 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 51, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 20, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 121368125374464.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |