| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9968051118210862, | |
| "eval_steps": 500, | |
| "global_step": 78, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.012779552715654952, | |
| "grad_norm": 0.37965271626197744, | |
| "learning_rate": 0.0, | |
| "loss": 0.3599, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.025559105431309903, | |
| "grad_norm": 0.3736706075404897, | |
| "learning_rate": 3.3333333333333333e-06, | |
| "loss": 0.378, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.038338658146964855, | |
| "grad_norm": 0.37890493762604477, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 0.3825, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.051118210862619806, | |
| "grad_norm": 0.40372911477846174, | |
| "learning_rate": 1e-05, | |
| "loss": 0.3788, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.06389776357827476, | |
| "grad_norm": 0.37907950078708796, | |
| "learning_rate": 9.995614150494293e-06, | |
| "loss": 0.3732, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.07667731629392971, | |
| "grad_norm": 0.45474683275444566, | |
| "learning_rate": 9.982464296247523e-06, | |
| "loss": 0.3713, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.08945686900958466, | |
| "grad_norm": 0.38965745007242497, | |
| "learning_rate": 9.960573506572391e-06, | |
| "loss": 0.3721, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.10223642172523961, | |
| "grad_norm": 0.3588543347048909, | |
| "learning_rate": 9.929980185352525e-06, | |
| "loss": 0.3677, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.11501597444089456, | |
| "grad_norm": 0.3676288354917835, | |
| "learning_rate": 9.890738003669029e-06, | |
| "loss": 0.3682, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.12779552715654952, | |
| "grad_norm": 0.366572371724228, | |
| "learning_rate": 9.842915805643156e-06, | |
| "loss": 0.3711, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.14057507987220447, | |
| "grad_norm": 0.3967011359602009, | |
| "learning_rate": 9.786597487660336e-06, | |
| "loss": 0.3834, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.15335463258785942, | |
| "grad_norm": 0.3728090092074529, | |
| "learning_rate": 9.721881851187406e-06, | |
| "loss": 0.3692, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.16613418530351437, | |
| "grad_norm": 0.3632162211431165, | |
| "learning_rate": 9.648882429441258e-06, | |
| "loss": 0.3513, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.17891373801916932, | |
| "grad_norm": 0.3778373601791395, | |
| "learning_rate": 9.567727288213005e-06, | |
| "loss": 0.371, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.19169329073482427, | |
| "grad_norm": 0.3646460111888152, | |
| "learning_rate": 9.478558801197065e-06, | |
| "loss": 0.3694, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.20447284345047922, | |
| "grad_norm": 0.34205692388708364, | |
| "learning_rate": 9.381533400219319e-06, | |
| "loss": 0.3573, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.21725239616613418, | |
| "grad_norm": 0.36225959033997773, | |
| "learning_rate": 9.276821300802535e-06, | |
| "loss": 0.3564, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.23003194888178913, | |
| "grad_norm": 0.37050465607039806, | |
| "learning_rate": 9.164606203550498e-06, | |
| "loss": 0.3734, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.24281150159744408, | |
| "grad_norm": 0.36600323916056005, | |
| "learning_rate": 9.045084971874738e-06, | |
| "loss": 0.3611, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.25559105431309903, | |
| "grad_norm": 0.3919776608648141, | |
| "learning_rate": 8.9184672866292e-06, | |
| "loss": 0.3698, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.268370607028754, | |
| "grad_norm": 0.35347876739340284, | |
| "learning_rate": 8.784975278258783e-06, | |
| "loss": 0.3615, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.28115015974440893, | |
| "grad_norm": 0.3590738074772562, | |
| "learning_rate": 8.644843137107058e-06, | |
| "loss": 0.3713, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.2939297124600639, | |
| "grad_norm": 0.36434445447386, | |
| "learning_rate": 8.498316702566828e-06, | |
| "loss": 0.3662, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.30670926517571884, | |
| "grad_norm": 0.36248143685760353, | |
| "learning_rate": 8.345653031794292e-06, | |
| "loss": 0.3593, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.3194888178913738, | |
| "grad_norm": 0.35575162986957903, | |
| "learning_rate": 8.18711994874345e-06, | |
| "loss": 0.3543, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.33226837060702874, | |
| "grad_norm": 0.36303944788148274, | |
| "learning_rate": 8.022995574311876e-06, | |
| "loss": 0.3727, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.3450479233226837, | |
| "grad_norm": 0.3921031488253169, | |
| "learning_rate": 7.85356783842216e-06, | |
| "loss": 0.3585, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.35782747603833864, | |
| "grad_norm": 0.3496025366873796, | |
| "learning_rate": 7.679133974894984e-06, | |
| "loss": 0.363, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.3706070287539936, | |
| "grad_norm": 0.3742172743264075, | |
| "learning_rate": 7.500000000000001e-06, | |
| "loss": 0.3539, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.38338658146964855, | |
| "grad_norm": 0.44014907339205533, | |
| "learning_rate": 7.31648017559931e-06, | |
| "loss": 0.3655, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.3961661341853035, | |
| "grad_norm": 0.3305493435139563, | |
| "learning_rate": 7.128896457825364e-06, | |
| "loss": 0.3657, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.40894568690095845, | |
| "grad_norm": 0.35909874221936344, | |
| "learning_rate": 6.9375779322605154e-06, | |
| "loss": 0.3652, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.4217252396166134, | |
| "grad_norm": 0.3365061176374655, | |
| "learning_rate": 6.7428602366090764e-06, | |
| "loss": 0.3607, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.43450479233226835, | |
| "grad_norm": 0.3779276077542422, | |
| "learning_rate": 6.545084971874738e-06, | |
| "loss": 0.3446, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.4472843450479233, | |
| "grad_norm": 0.3651665352306785, | |
| "learning_rate": 6.344599103076329e-06, | |
| "loss": 0.3555, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.46006389776357826, | |
| "grad_norm": 0.37043471461258853, | |
| "learning_rate": 6.141754350553279e-06, | |
| "loss": 0.3569, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.4728434504792332, | |
| "grad_norm": 0.36813379072727104, | |
| "learning_rate": 5.936906572928625e-06, | |
| "loss": 0.3677, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.48562300319488816, | |
| "grad_norm": 0.4483809967394267, | |
| "learning_rate": 5.730415142812059e-06, | |
| "loss": 0.3593, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.4984025559105431, | |
| "grad_norm": 0.34177088967681896, | |
| "learning_rate": 5.522642316338268e-06, | |
| "loss": 0.3602, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.5111821086261981, | |
| "grad_norm": 0.35781000352243253, | |
| "learning_rate": 5.3139525976465675e-06, | |
| "loss": 0.3551, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.5239616613418531, | |
| "grad_norm": 0.35871882162000174, | |
| "learning_rate": 5.1047120994167855e-06, | |
| "loss": 0.3563, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.536741214057508, | |
| "grad_norm": 0.36917420614177304, | |
| "learning_rate": 4.895287900583216e-06, | |
| "loss": 0.3559, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.549520766773163, | |
| "grad_norm": 0.38499006672877384, | |
| "learning_rate": 4.686047402353433e-06, | |
| "loss": 0.366, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.5623003194888179, | |
| "grad_norm": 0.33608612010402134, | |
| "learning_rate": 4.477357683661734e-06, | |
| "loss": 0.3492, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.5750798722044729, | |
| "grad_norm": 0.33229601499781103, | |
| "learning_rate": 4.269584857187942e-06, | |
| "loss": 0.3563, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.5878594249201278, | |
| "grad_norm": 0.3776421423395802, | |
| "learning_rate": 4.063093427071376e-06, | |
| "loss": 0.3625, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.6006389776357828, | |
| "grad_norm": 0.3180983877094481, | |
| "learning_rate": 3.8582456494467214e-06, | |
| "loss": 0.3666, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.6134185303514377, | |
| "grad_norm": 0.36287999152044925, | |
| "learning_rate": 3.655400896923672e-06, | |
| "loss": 0.3598, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.6261980830670927, | |
| "grad_norm": 0.3244108095244616, | |
| "learning_rate": 3.4549150281252635e-06, | |
| "loss": 0.3576, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.6389776357827476, | |
| "grad_norm": 0.3631094081661346, | |
| "learning_rate": 3.2571397633909252e-06, | |
| "loss": 0.3531, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.6517571884984026, | |
| "grad_norm": 0.3727645091131634, | |
| "learning_rate": 3.0624220677394854e-06, | |
| "loss": 0.3568, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.6645367412140575, | |
| "grad_norm": 0.38196517296559107, | |
| "learning_rate": 2.871103542174637e-06, | |
| "loss": 0.3637, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.6773162939297125, | |
| "grad_norm": 0.3476139702225886, | |
| "learning_rate": 2.683519824400693e-06, | |
| "loss": 0.3602, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.6900958466453674, | |
| "grad_norm": 0.3735680719951074, | |
| "learning_rate": 2.5000000000000015e-06, | |
| "loss": 0.3654, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.7028753993610224, | |
| "grad_norm": 0.36691452194460167, | |
| "learning_rate": 2.320866025105016e-06, | |
| "loss": 0.3525, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.7156549520766773, | |
| "grad_norm": 0.356375157502457, | |
| "learning_rate": 2.146432161577842e-06, | |
| "loss": 0.3604, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.7284345047923323, | |
| "grad_norm": 0.33769185121287565, | |
| "learning_rate": 1.977004425688126e-06, | |
| "loss": 0.3474, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.7412140575079872, | |
| "grad_norm": 0.3863085150751754, | |
| "learning_rate": 1.8128800512565514e-06, | |
| "loss": 0.3637, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.7539936102236422, | |
| "grad_norm": 0.3309629514569372, | |
| "learning_rate": 1.6543469682057105e-06, | |
| "loss": 0.3568, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.7667731629392971, | |
| "grad_norm": 0.3994851800797164, | |
| "learning_rate": 1.5016832974331725e-06, | |
| "loss": 0.3625, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.7795527156549521, | |
| "grad_norm": 0.3376669486785409, | |
| "learning_rate": 1.3551568628929434e-06, | |
| "loss": 0.3602, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.792332268370607, | |
| "grad_norm": 0.35127015707304343, | |
| "learning_rate": 1.2150247217412186e-06, | |
| "loss": 0.3479, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.805111821086262, | |
| "grad_norm": 0.34795059814422313, | |
| "learning_rate": 1.0815327133708015e-06, | |
| "loss": 0.362, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.8178913738019169, | |
| "grad_norm": 0.31935427422139573, | |
| "learning_rate": 9.549150281252633e-07, | |
| "loss": 0.3517, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.8306709265175719, | |
| "grad_norm": 0.3551869591988346, | |
| "learning_rate": 8.353937964495029e-07, | |
| "loss": 0.3626, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.8434504792332268, | |
| "grad_norm": 0.3348505850619627, | |
| "learning_rate": 7.23178699197467e-07, | |
| "loss": 0.3571, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.8562300319488818, | |
| "grad_norm": 0.33688290789611175, | |
| "learning_rate": 6.184665997806832e-07, | |
| "loss": 0.3654, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.8690095846645367, | |
| "grad_norm": 0.35195820197977745, | |
| "learning_rate": 5.214411988029355e-07, | |
| "loss": 0.3583, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.8817891373801917, | |
| "grad_norm": 0.3371110567104658, | |
| "learning_rate": 4.322727117869951e-07, | |
| "loss": 0.3554, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.8945686900958466, | |
| "grad_norm": 0.36762422097879016, | |
| "learning_rate": 3.511175705587433e-07, | |
| "loss": 0.3549, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.9073482428115016, | |
| "grad_norm": 0.3725211087794039, | |
| "learning_rate": 2.7811814881259503e-07, | |
| "loss": 0.3678, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.9201277955271565, | |
| "grad_norm": 0.37170196569211045, | |
| "learning_rate": 2.134025123396638e-07, | |
| "loss": 0.3564, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.9329073482428115, | |
| "grad_norm": 0.3328627066146425, | |
| "learning_rate": 1.5708419435684463e-07, | |
| "loss": 0.3454, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.9456869009584664, | |
| "grad_norm": 0.3524868947579366, | |
| "learning_rate": 1.0926199633097156e-07, | |
| "loss": 0.3638, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.9584664536741214, | |
| "grad_norm": 0.3369712810359636, | |
| "learning_rate": 7.001981464747565e-08, | |
| "loss": 0.3584, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.9712460063897763, | |
| "grad_norm": 0.33126087012419436, | |
| "learning_rate": 3.9426493427611177e-08, | |
| "loss": 0.3551, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.9840255591054313, | |
| "grad_norm": 0.37584666495722696, | |
| "learning_rate": 1.753570375247815e-08, | |
| "loss": 0.3587, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.9968051118210862, | |
| "grad_norm": 0.3399855800542146, | |
| "learning_rate": 4.385849505708084e-09, | |
| "loss": 0.3542, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.9968051118210862, | |
| "step": 78, | |
| "total_flos": 104106607771648.0, | |
| "train_loss": 0.3616702079008787, | |
| "train_runtime": 488.541, | |
| "train_samples_per_second": 20.469, | |
| "train_steps_per_second": 0.16 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 78, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 40, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": false, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 104106607771648.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |