| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.01297016861219196, | |
| "eval_steps": 500, | |
| "global_step": 500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.00025940337224383917, | |
| "grad_norm": 13.49365234375, | |
| "learning_rate": 1.6000000000000003e-05, | |
| "loss": 1.5584, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.0005188067444876783, | |
| "grad_norm": 2.6006040573120117, | |
| "learning_rate": 3.2000000000000005e-05, | |
| "loss": 0.7111, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.0007782101167315176, | |
| "grad_norm": 4.435553073883057, | |
| "learning_rate": 4.8e-05, | |
| "loss": 0.3076, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.0010376134889753567, | |
| "grad_norm": 1.1146177053451538, | |
| "learning_rate": 6.400000000000001e-05, | |
| "loss": 0.1826, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.0012970168612191958, | |
| "grad_norm": 0.8707008957862854, | |
| "learning_rate": 8e-05, | |
| "loss": 0.1341, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.0015564202334630351, | |
| "grad_norm": 1.7411352396011353, | |
| "learning_rate": 9.6e-05, | |
| "loss": 0.1144, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.0018158236057068742, | |
| "grad_norm": 0.9691908955574036, | |
| "learning_rate": 0.00011200000000000001, | |
| "loss": 0.0831, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.0020752269779507134, | |
| "grad_norm": 1.0398532152175903, | |
| "learning_rate": 0.00012800000000000002, | |
| "loss": 0.0801, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.0023346303501945525, | |
| "grad_norm": 0.5252112746238708, | |
| "learning_rate": 0.000144, | |
| "loss": 0.081, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.0025940337224383916, | |
| "grad_norm": 0.6552278995513916, | |
| "learning_rate": 0.00016, | |
| "loss": 0.0821, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.0028534370946822307, | |
| "grad_norm": 0.6302046775817871, | |
| "learning_rate": 0.00017600000000000002, | |
| "loss": 0.0749, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.0031128404669260703, | |
| "grad_norm": 0.7439587712287903, | |
| "learning_rate": 0.000192, | |
| "loss": 0.0647, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.0033722438391699094, | |
| "grad_norm": 6.8584771156311035, | |
| "learning_rate": 0.0001999978128380225, | |
| "loss": 0.0726, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.0036316472114137485, | |
| "grad_norm": 2.014613389968872, | |
| "learning_rate": 0.0001999803161162393, | |
| "loss": 0.0936, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.0038910505836575876, | |
| "grad_norm": 1.3758050203323364, | |
| "learning_rate": 0.00019994532573409262, | |
| "loss": 0.1089, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.004150453955901427, | |
| "grad_norm": 1.3027769327163696, | |
| "learning_rate": 0.00019989284781388617, | |
| "loss": 0.0772, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.004409857328145266, | |
| "grad_norm": 1.021938443183899, | |
| "learning_rate": 0.00019982289153773646, | |
| "loss": 0.0695, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.004669260700389105, | |
| "grad_norm": 0.8069638609886169, | |
| "learning_rate": 0.00019973546914596623, | |
| "loss": 0.0624, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.004928664072632944, | |
| "grad_norm": 0.5198134779930115, | |
| "learning_rate": 0.00019963059593496268, | |
| "loss": 0.0481, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.005188067444876783, | |
| "grad_norm": 0.39965012669563293, | |
| "learning_rate": 0.00019950829025450114, | |
| "loss": 0.0493, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.005447470817120622, | |
| "grad_norm": 0.8152652978897095, | |
| "learning_rate": 0.0001993685735045343, | |
| "loss": 0.0477, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.005706874189364461, | |
| "grad_norm": 0.7083427309989929, | |
| "learning_rate": 0.0001992114701314478, | |
| "loss": 0.0501, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.0059662775616083005, | |
| "grad_norm": 0.690096914768219, | |
| "learning_rate": 0.000199037007623783, | |
| "loss": 0.0509, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.0062256809338521405, | |
| "grad_norm": 0.871699869632721, | |
| "learning_rate": 0.00019884521650742715, | |
| "loss": 0.0464, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.00648508430609598, | |
| "grad_norm": 0.6445639133453369, | |
| "learning_rate": 0.00019863613034027224, | |
| "loss": 0.0462, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.006744487678339819, | |
| "grad_norm": 0.4920782148838043, | |
| "learning_rate": 0.0001984097857063434, | |
| "loss": 0.0425, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.007003891050583658, | |
| "grad_norm": 0.8624788522720337, | |
| "learning_rate": 0.0001981662222093976, | |
| "loss": 0.0471, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.007263294422827497, | |
| "grad_norm": 0.5704030990600586, | |
| "learning_rate": 0.00019790548246599447, | |
| "loss": 0.0571, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.007522697795071336, | |
| "grad_norm": 0.569247841835022, | |
| "learning_rate": 0.00019762761209803927, | |
| "loss": 0.0531, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.007782101167315175, | |
| "grad_norm": 0.7414583563804626, | |
| "learning_rate": 0.0001973326597248006, | |
| "loss": 0.0455, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.008041504539559013, | |
| "grad_norm": 0.42905348539352417, | |
| "learning_rate": 0.00019702067695440332, | |
| "loss": 0.0542, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.008300907911802853, | |
| "grad_norm": 0.6132651567459106, | |
| "learning_rate": 0.00019669171837479873, | |
| "loss": 0.0439, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.008560311284046693, | |
| "grad_norm": 0.44733384251594543, | |
| "learning_rate": 0.00019634584154421317, | |
| "loss": 0.0429, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.008819714656290532, | |
| "grad_norm": 0.516659677028656, | |
| "learning_rate": 0.00019598310698107702, | |
| "loss": 0.0407, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.009079118028534372, | |
| "grad_norm": 0.2340136468410492, | |
| "learning_rate": 0.00019560357815343577, | |
| "loss": 0.036, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.00933852140077821, | |
| "grad_norm": 0.6629723906517029, | |
| "learning_rate": 0.00019520732146784491, | |
| "loss": 0.041, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.00959792477302205, | |
| "grad_norm": 0.3777199387550354, | |
| "learning_rate": 0.0001947944062577507, | |
| "loss": 0.0414, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.009857328145265888, | |
| "grad_norm": 0.44298338890075684, | |
| "learning_rate": 0.00019436490477135878, | |
| "loss": 0.0371, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.010116731517509728, | |
| "grad_norm": 0.4530090093612671, | |
| "learning_rate": 0.00019391889215899299, | |
| "loss": 0.037, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.010376134889753566, | |
| "grad_norm": 0.36142367124557495, | |
| "learning_rate": 0.0001934564464599461, | |
| "loss": 0.0387, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.010635538261997406, | |
| "grad_norm": 0.36559927463531494, | |
| "learning_rate": 0.00019297764858882514, | |
| "loss": 0.0386, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.010894941634241245, | |
| "grad_norm": 0.47660204768180847, | |
| "learning_rate": 0.00019248258232139388, | |
| "loss": 0.0316, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.011154345006485085, | |
| "grad_norm": 0.2614981234073639, | |
| "learning_rate": 0.00019197133427991436, | |
| "loss": 0.0417, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.011413748378728923, | |
| "grad_norm": 0.44811561703681946, | |
| "learning_rate": 0.00019144399391799043, | |
| "loss": 0.036, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.011673151750972763, | |
| "grad_norm": 0.4213508367538452, | |
| "learning_rate": 0.00019090065350491626, | |
| "loss": 0.0399, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.011932555123216601, | |
| "grad_norm": 0.29146406054496765, | |
| "learning_rate": 0.0001903414081095315, | |
| "loss": 0.0376, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.012191958495460441, | |
| "grad_norm": 0.2714293897151947, | |
| "learning_rate": 0.00018976635558358722, | |
| "loss": 0.0362, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.012451361867704281, | |
| "grad_norm": 0.3767367899417877, | |
| "learning_rate": 0.00018917559654462474, | |
| "loss": 0.0364, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.01271076523994812, | |
| "grad_norm": 0.36578992009162903, | |
| "learning_rate": 0.00018856923435837022, | |
| "loss": 0.0327, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.01297016861219196, | |
| "grad_norm": 0.32976096868515015, | |
| "learning_rate": 0.0001879473751206489, | |
| "loss": 0.04, | |
| "step": 500 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 2500, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 6.5729978171392e+16, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |