| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9968051118210862, | |
| "eval_steps": 500, | |
| "global_step": 78, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.012779552715654952, | |
| "grad_norm": 0.39771989742585373, | |
| "learning_rate": 0.0, | |
| "loss": 0.3513, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.025559105431309903, | |
| "grad_norm": 0.3503927382842784, | |
| "learning_rate": 3.3333333333333333e-06, | |
| "loss": 0.3453, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.038338658146964855, | |
| "grad_norm": 0.3541513294750479, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 0.3579, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.051118210862619806, | |
| "grad_norm": 0.3793453302754602, | |
| "learning_rate": 1e-05, | |
| "loss": 0.3449, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.06389776357827476, | |
| "grad_norm": 0.3507061000992864, | |
| "learning_rate": 9.995614150494293e-06, | |
| "loss": 0.3494, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.07667731629392971, | |
| "grad_norm": 0.3834964826170654, | |
| "learning_rate": 9.982464296247523e-06, | |
| "loss": 0.3496, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.08945686900958466, | |
| "grad_norm": 0.30842125424492345, | |
| "learning_rate": 9.960573506572391e-06, | |
| "loss": 0.3437, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.10223642172523961, | |
| "grad_norm": 0.36593539875619124, | |
| "learning_rate": 9.929980185352525e-06, | |
| "loss": 0.3561, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.11501597444089456, | |
| "grad_norm": 0.3498410702922384, | |
| "learning_rate": 9.890738003669029e-06, | |
| "loss": 0.3552, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.12779552715654952, | |
| "grad_norm": 0.3412875396513364, | |
| "learning_rate": 9.842915805643156e-06, | |
| "loss": 0.3429, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.14057507987220447, | |
| "grad_norm": 0.35588346335375753, | |
| "learning_rate": 9.786597487660336e-06, | |
| "loss": 0.3514, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.15335463258785942, | |
| "grad_norm": 0.3504178411262019, | |
| "learning_rate": 9.721881851187406e-06, | |
| "loss": 0.34, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.16613418530351437, | |
| "grad_norm": 0.35881741829545866, | |
| "learning_rate": 9.648882429441258e-06, | |
| "loss": 0.3389, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.17891373801916932, | |
| "grad_norm": 0.3691885354718765, | |
| "learning_rate": 9.567727288213005e-06, | |
| "loss": 0.3452, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.19169329073482427, | |
| "grad_norm": 0.3604109009057351, | |
| "learning_rate": 9.478558801197065e-06, | |
| "loss": 0.3537, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.20447284345047922, | |
| "grad_norm": 0.3578608086185542, | |
| "learning_rate": 9.381533400219319e-06, | |
| "loss": 0.3521, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.21725239616613418, | |
| "grad_norm": 0.34007722342016206, | |
| "learning_rate": 9.276821300802535e-06, | |
| "loss": 0.3452, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.23003194888178913, | |
| "grad_norm": 0.34585441677753703, | |
| "learning_rate": 9.164606203550498e-06, | |
| "loss": 0.3411, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.24281150159744408, | |
| "grad_norm": 0.3939125640814021, | |
| "learning_rate": 9.045084971874738e-06, | |
| "loss": 0.3395, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.25559105431309903, | |
| "grad_norm": 0.36175304863323837, | |
| "learning_rate": 8.9184672866292e-06, | |
| "loss": 0.3339, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.268370607028754, | |
| "grad_norm": 0.35275928384567523, | |
| "learning_rate": 8.784975278258783e-06, | |
| "loss": 0.3367, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.28115015974440893, | |
| "grad_norm": 0.3250827185486952, | |
| "learning_rate": 8.644843137107058e-06, | |
| "loss": 0.3369, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.2939297124600639, | |
| "grad_norm": 0.5054419109369158, | |
| "learning_rate": 8.498316702566828e-06, | |
| "loss": 0.34, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.30670926517571884, | |
| "grad_norm": 0.3266776805331227, | |
| "learning_rate": 8.345653031794292e-06, | |
| "loss": 0.3306, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.3194888178913738, | |
| "grad_norm": 0.32890587767024576, | |
| "learning_rate": 8.18711994874345e-06, | |
| "loss": 0.3356, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.33226837060702874, | |
| "grad_norm": 0.3221665538699026, | |
| "learning_rate": 8.022995574311876e-06, | |
| "loss": 0.3375, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.3450479233226837, | |
| "grad_norm": 0.3465083152803515, | |
| "learning_rate": 7.85356783842216e-06, | |
| "loss": 0.3431, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.35782747603833864, | |
| "grad_norm": 0.43310871763536735, | |
| "learning_rate": 7.679133974894984e-06, | |
| "loss": 0.3386, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.3706070287539936, | |
| "grad_norm": 0.32877502257390834, | |
| "learning_rate": 7.500000000000001e-06, | |
| "loss": 0.3298, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.38338658146964855, | |
| "grad_norm": 0.3292494934921218, | |
| "learning_rate": 7.31648017559931e-06, | |
| "loss": 0.3448, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.3961661341853035, | |
| "grad_norm": 0.3199663189062581, | |
| "learning_rate": 7.128896457825364e-06, | |
| "loss": 0.3457, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.40894568690095845, | |
| "grad_norm": 0.3233128445798616, | |
| "learning_rate": 6.9375779322605154e-06, | |
| "loss": 0.3434, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.4217252396166134, | |
| "grad_norm": 0.3285261067465233, | |
| "learning_rate": 6.7428602366090764e-06, | |
| "loss": 0.3383, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.43450479233226835, | |
| "grad_norm": 0.35326735190011854, | |
| "learning_rate": 6.545084971874738e-06, | |
| "loss": 0.3381, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.4472843450479233, | |
| "grad_norm": 0.3666756609109888, | |
| "learning_rate": 6.344599103076329e-06, | |
| "loss": 0.3448, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.46006389776357826, | |
| "grad_norm": 0.3305434346853474, | |
| "learning_rate": 6.141754350553279e-06, | |
| "loss": 0.3421, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.4728434504792332, | |
| "grad_norm": 0.3323636206886762, | |
| "learning_rate": 5.936906572928625e-06, | |
| "loss": 0.3278, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.48562300319488816, | |
| "grad_norm": 0.35794677827451554, | |
| "learning_rate": 5.730415142812059e-06, | |
| "loss": 0.3459, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.4984025559105431, | |
| "grad_norm": 0.3679618475733093, | |
| "learning_rate": 5.522642316338268e-06, | |
| "loss": 0.35, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.5111821086261981, | |
| "grad_norm": 0.37138496480270594, | |
| "learning_rate": 5.3139525976465675e-06, | |
| "loss": 0.3402, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.5239616613418531, | |
| "grad_norm": 0.416006703048718, | |
| "learning_rate": 5.1047120994167855e-06, | |
| "loss": 0.3379, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.536741214057508, | |
| "grad_norm": 0.3204081349960758, | |
| "learning_rate": 4.895287900583216e-06, | |
| "loss": 0.3203, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.549520766773163, | |
| "grad_norm": 0.397613745358975, | |
| "learning_rate": 4.686047402353433e-06, | |
| "loss": 0.3415, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.5623003194888179, | |
| "grad_norm": 0.3060040830145125, | |
| "learning_rate": 4.477357683661734e-06, | |
| "loss": 0.3357, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.5750798722044729, | |
| "grad_norm": 0.3011008775846613, | |
| "learning_rate": 4.269584857187942e-06, | |
| "loss": 0.3302, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.5878594249201278, | |
| "grad_norm": 0.38677720697029094, | |
| "learning_rate": 4.063093427071376e-06, | |
| "loss": 0.3401, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.6006389776357828, | |
| "grad_norm": 0.34738743193162697, | |
| "learning_rate": 3.8582456494467214e-06, | |
| "loss": 0.3365, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.6134185303514377, | |
| "grad_norm": 0.3408367792504418, | |
| "learning_rate": 3.655400896923672e-06, | |
| "loss": 0.3413, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.6261980830670927, | |
| "grad_norm": 0.31282832243354336, | |
| "learning_rate": 3.4549150281252635e-06, | |
| "loss": 0.3351, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.6389776357827476, | |
| "grad_norm": 0.5037224278122471, | |
| "learning_rate": 3.2571397633909252e-06, | |
| "loss": 0.3269, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.6517571884984026, | |
| "grad_norm": 0.3187609984988368, | |
| "learning_rate": 3.0624220677394854e-06, | |
| "loss": 0.343, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.6645367412140575, | |
| "grad_norm": 0.3246183978936672, | |
| "learning_rate": 2.871103542174637e-06, | |
| "loss": 0.3388, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.6773162939297125, | |
| "grad_norm": 0.3474514194400151, | |
| "learning_rate": 2.683519824400693e-06, | |
| "loss": 0.3391, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.6900958466453674, | |
| "grad_norm": 0.36714846974157134, | |
| "learning_rate": 2.5000000000000015e-06, | |
| "loss": 0.3441, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.7028753993610224, | |
| "grad_norm": 0.3338959751720368, | |
| "learning_rate": 2.320866025105016e-06, | |
| "loss": 0.3461, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.7156549520766773, | |
| "grad_norm": 0.33002449328764283, | |
| "learning_rate": 2.146432161577842e-06, | |
| "loss": 0.3335, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.7284345047923323, | |
| "grad_norm": 0.36330675360870346, | |
| "learning_rate": 1.977004425688126e-06, | |
| "loss": 0.3324, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.7412140575079872, | |
| "grad_norm": 0.33953008325956463, | |
| "learning_rate": 1.8128800512565514e-06, | |
| "loss": 0.3475, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.7539936102236422, | |
| "grad_norm": 0.3067527038125689, | |
| "learning_rate": 1.6543469682057105e-06, | |
| "loss": 0.3379, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.7667731629392971, | |
| "grad_norm": 0.3300789753114584, | |
| "learning_rate": 1.5016832974331725e-06, | |
| "loss": 0.3462, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.7795527156549521, | |
| "grad_norm": 0.33631904568049786, | |
| "learning_rate": 1.3551568628929434e-06, | |
| "loss": 0.3434, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.792332268370607, | |
| "grad_norm": 0.3556419263507287, | |
| "learning_rate": 1.2150247217412186e-06, | |
| "loss": 0.3357, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.805111821086262, | |
| "grad_norm": 0.326809046715247, | |
| "learning_rate": 1.0815327133708015e-06, | |
| "loss": 0.3426, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.8178913738019169, | |
| "grad_norm": 0.3582138331441028, | |
| "learning_rate": 9.549150281252633e-07, | |
| "loss": 0.3402, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.8306709265175719, | |
| "grad_norm": 0.34620512842283024, | |
| "learning_rate": 8.353937964495029e-07, | |
| "loss": 0.3455, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.8434504792332268, | |
| "grad_norm": 0.3788440935963394, | |
| "learning_rate": 7.23178699197467e-07, | |
| "loss": 0.3339, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.8562300319488818, | |
| "grad_norm": 0.34598146714632005, | |
| "learning_rate": 6.184665997806832e-07, | |
| "loss": 0.3432, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.8690095846645367, | |
| "grad_norm": 0.3826176987474288, | |
| "learning_rate": 5.214411988029355e-07, | |
| "loss": 0.345, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.8817891373801917, | |
| "grad_norm": 0.34238202630181, | |
| "learning_rate": 4.322727117869951e-07, | |
| "loss": 0.325, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.8945686900958466, | |
| "grad_norm": 0.35033619456877874, | |
| "learning_rate": 3.511175705587433e-07, | |
| "loss": 0.3396, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.9073482428115016, | |
| "grad_norm": 0.3573085481244407, | |
| "learning_rate": 2.7811814881259503e-07, | |
| "loss": 0.3305, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.9201277955271565, | |
| "grad_norm": 0.3628016685745722, | |
| "learning_rate": 2.134025123396638e-07, | |
| "loss": 0.3395, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.9329073482428115, | |
| "grad_norm": 0.3811711581587498, | |
| "learning_rate": 1.5708419435684463e-07, | |
| "loss": 0.3388, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.9456869009584664, | |
| "grad_norm": 0.36389313722045097, | |
| "learning_rate": 1.0926199633097156e-07, | |
| "loss": 0.3445, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.9584664536741214, | |
| "grad_norm": 0.319653351469741, | |
| "learning_rate": 7.001981464747565e-08, | |
| "loss": 0.3439, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.9712460063897763, | |
| "grad_norm": 0.332617621032464, | |
| "learning_rate": 3.9426493427611177e-08, | |
| "loss": 0.3351, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.9840255591054313, | |
| "grad_norm": 0.31908764512170884, | |
| "learning_rate": 1.753570375247815e-08, | |
| "loss": 0.3395, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.9968051118210862, | |
| "grad_norm": 0.32698909544304744, | |
| "learning_rate": 4.385849505708084e-09, | |
| "loss": 0.3368, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.9968051118210862, | |
| "step": 78, | |
| "total_flos": 104051217661952.0, | |
| "train_loss": 0.34081209813937163, | |
| "train_runtime": 482.4252, | |
| "train_samples_per_second": 20.729, | |
| "train_steps_per_second": 0.162 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 78, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 40, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": false, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 104051217661952.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |