| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.994106090373281, | |
| "eval_steps": 500, | |
| "global_step": 381, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.07858546168958742, | |
| "grad_norm": 5.295495045457736, | |
| "learning_rate": 2.564102564102564e-06, | |
| "loss": 1.1348, | |
| "num_input_tokens_seen": 285944, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.15717092337917485, | |
| "grad_norm": 1.3364250221425176, | |
| "learning_rate": 5.128205128205128e-06, | |
| "loss": 0.9328, | |
| "num_input_tokens_seen": 553248, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.2357563850687623, | |
| "grad_norm": 0.8492034246167627, | |
| "learning_rate": 7.692307692307694e-06, | |
| "loss": 0.8484, | |
| "num_input_tokens_seen": 822480, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.3143418467583497, | |
| "grad_norm": 0.7234244773864981, | |
| "learning_rate": 9.999789047591563e-06, | |
| "loss": 0.758, | |
| "num_input_tokens_seen": 1085968, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.3929273084479371, | |
| "grad_norm": 0.7012241271754472, | |
| "learning_rate": 9.974496289936769e-06, | |
| "loss": 0.7506, | |
| "num_input_tokens_seen": 1332664, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.4715127701375246, | |
| "grad_norm": 0.6727005740081735, | |
| "learning_rate": 9.90725746626209e-06, | |
| "loss": 0.7311, | |
| "num_input_tokens_seen": 1605712, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.550098231827112, | |
| "grad_norm": 0.5942119076165618, | |
| "learning_rate": 9.798639549376946e-06, | |
| "loss": 0.6815, | |
| "num_input_tokens_seen": 1896456, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.6286836935166994, | |
| "grad_norm": 0.7307490789781241, | |
| "learning_rate": 9.64955842986544e-06, | |
| "loss": 0.7245, | |
| "num_input_tokens_seen": 2161224, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.7072691552062869, | |
| "grad_norm": 0.5604571056904298, | |
| "learning_rate": 9.461271193091971e-06, | |
| "loss": 0.6255, | |
| "num_input_tokens_seen": 2467224, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.7858546168958742, | |
| "grad_norm": 0.6954309102689223, | |
| "learning_rate": 9.23536551917611e-06, | |
| "loss": 0.6309, | |
| "num_input_tokens_seen": 2743848, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.8644400785854617, | |
| "grad_norm": 0.6105791636001258, | |
| "learning_rate": 8.973746295318499e-06, | |
| "loss": 0.6661, | |
| "num_input_tokens_seen": 3015224, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.9430255402750491, | |
| "grad_norm": 0.7555612161614201, | |
| "learning_rate": 8.67861955336566e-06, | |
| "loss": 0.6815, | |
| "num_input_tokens_seen": 3274408, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.0216110019646365, | |
| "grad_norm": 0.736180742069797, | |
| "learning_rate": 8.352473868055746e-06, | |
| "loss": 0.6733, | |
| "num_input_tokens_seen": 3552384, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.1001964636542239, | |
| "grad_norm": 0.8186129592841361, | |
| "learning_rate": 7.998059372799409e-06, | |
| "loss": 0.545, | |
| "num_input_tokens_seen": 3820496, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.1787819253438114, | |
| "grad_norm": 0.7297158276181345, | |
| "learning_rate": 7.61836456993939e-06, | |
| "loss": 0.5028, | |
| "num_input_tokens_seen": 4086592, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.2573673870333988, | |
| "grad_norm": 0.7309791930989968, | |
| "learning_rate": 7.2165911310299305e-06, | |
| "loss": 0.5087, | |
| "num_input_tokens_seen": 4355776, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.3359528487229864, | |
| "grad_norm": 0.7564499779548668, | |
| "learning_rate": 6.796126899625688e-06, | |
| "loss": 0.49, | |
| "num_input_tokens_seen": 4634424, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.4145383104125737, | |
| "grad_norm": 0.7980536006544474, | |
| "learning_rate": 6.360517324226676e-06, | |
| "loss": 0.4941, | |
| "num_input_tokens_seen": 4909728, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.493123772102161, | |
| "grad_norm": 0.7591010900995777, | |
| "learning_rate": 5.913435562263036e-06, | |
| "loss": 0.4711, | |
| "num_input_tokens_seen": 5195200, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.5717092337917484, | |
| "grad_norm": 0.7101983358318353, | |
| "learning_rate": 5.458651507209518e-06, | |
| "loss": 0.497, | |
| "num_input_tokens_seen": 5462608, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.650294695481336, | |
| "grad_norm": 0.8796806516365058, | |
| "learning_rate": 5e-06, | |
| "loss": 0.482, | |
| "num_input_tokens_seen": 5730248, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.7288801571709234, | |
| "grad_norm": 0.8085067044469503, | |
| "learning_rate": 4.541348492790482e-06, | |
| "loss": 0.5014, | |
| "num_input_tokens_seen": 5990552, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.807465618860511, | |
| "grad_norm": 0.7890332198315568, | |
| "learning_rate": 4.0865644377369666e-06, | |
| "loss": 0.4757, | |
| "num_input_tokens_seen": 6273288, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.8860510805500983, | |
| "grad_norm": 0.7795005553479045, | |
| "learning_rate": 3.639482675773324e-06, | |
| "loss": 0.4583, | |
| "num_input_tokens_seen": 6553400, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.9646365422396856, | |
| "grad_norm": 0.7390457626908427, | |
| "learning_rate": 3.203873100374314e-06, | |
| "loss": 0.4617, | |
| "num_input_tokens_seen": 6836072, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 2.043222003929273, | |
| "grad_norm": 0.8497968488938652, | |
| "learning_rate": 2.783408868970071e-06, | |
| "loss": 0.4566, | |
| "num_input_tokens_seen": 7093808, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 2.1218074656188604, | |
| "grad_norm": 0.7260683682391704, | |
| "learning_rate": 2.381635430060611e-06, | |
| "loss": 0.3637, | |
| "num_input_tokens_seen": 7368800, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 2.2003929273084477, | |
| "grad_norm": 0.8975430950896504, | |
| "learning_rate": 2.0019406272005913e-06, | |
| "loss": 0.3459, | |
| "num_input_tokens_seen": 7655808, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 2.2789783889980355, | |
| "grad_norm": 0.7999294865618197, | |
| "learning_rate": 1.6475261319442553e-06, | |
| "loss": 0.3553, | |
| "num_input_tokens_seen": 7911600, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 2.357563850687623, | |
| "grad_norm": 1.08697856574494, | |
| "learning_rate": 1.321380446634342e-06, | |
| "loss": 0.3451, | |
| "num_input_tokens_seen": 8192536, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.43614931237721, | |
| "grad_norm": 0.9266005735156818, | |
| "learning_rate": 1.026253704681502e-06, | |
| "loss": 0.3735, | |
| "num_input_tokens_seen": 8465008, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 2.5147347740667976, | |
| "grad_norm": 0.988052963356121, | |
| "learning_rate": 7.646344808238904e-07, | |
| "loss": 0.373, | |
| "num_input_tokens_seen": 8732552, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 2.593320235756385, | |
| "grad_norm": 0.7077241690549176, | |
| "learning_rate": 5.387288069080298e-07, | |
| "loss": 0.3504, | |
| "num_input_tokens_seen": 9006928, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 2.6719056974459727, | |
| "grad_norm": 0.8753220952555882, | |
| "learning_rate": 3.504415701345615e-07, | |
| "loss": 0.3255, | |
| "num_input_tokens_seen": 9272760, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 2.75049115913556, | |
| "grad_norm": 0.8961502686499399, | |
| "learning_rate": 2.0136045062305543e-07, | |
| "loss": 0.337, | |
| "num_input_tokens_seen": 9539904, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 2.8290766208251474, | |
| "grad_norm": 0.8575326308820346, | |
| "learning_rate": 9.274253373791064e-08, | |
| "loss": 0.381, | |
| "num_input_tokens_seen": 9813344, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 2.907662082514735, | |
| "grad_norm": 0.729386140153712, | |
| "learning_rate": 2.55037100632316e-08, | |
| "loss": 0.3531, | |
| "num_input_tokens_seen": 10090560, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 2.986247544204322, | |
| "grad_norm": 0.7450157172804136, | |
| "learning_rate": 2.1095240843815868e-10, | |
| "loss": 0.359, | |
| "num_input_tokens_seen": 10366800, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 2.994106090373281, | |
| "num_input_tokens_seen": 10403944, | |
| "step": 381, | |
| "total_flos": 16608737034240.0, | |
| "train_loss": 0.5381014986926802, | |
| "train_runtime": 3056.3101, | |
| "train_samples_per_second": 1.996, | |
| "train_steps_per_second": 0.125 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 381, | |
| "num_input_tokens_seen": 10403944, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 16608737034240.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |