| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 3.0, |
| "eval_steps": 500, |
| "global_step": 375, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.08, |
| "grad_norm": 1.7801982164382935, |
| "learning_rate": 5.263157894736842e-06, |
| "loss": 0.5021, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.16, |
| "grad_norm": 1.8954057693481445, |
| "learning_rate": 1.0526315789473684e-05, |
| "loss": 0.4477, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.24, |
| "grad_norm": 0.959135890007019, |
| "learning_rate": 1.578947368421053e-05, |
| "loss": 0.4523, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.32, |
| "grad_norm": 0.6955620646476746, |
| "learning_rate": 1.9998261969639324e-05, |
| "loss": 0.453, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.4, |
| "grad_norm": 0.48524388670921326, |
| "learning_rate": 1.9937494319239112e-05, |
| "loss": 0.4317, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.48, |
| "grad_norm": 0.5131641030311584, |
| "learning_rate": 1.979042835741503e-05, |
| "loss": 0.4387, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.56, |
| "grad_norm": 0.6963252425193787, |
| "learning_rate": 1.9558341221417744e-05, |
| "loss": 0.4648, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.64, |
| "grad_norm": 0.5048828125, |
| "learning_rate": 1.9243248381877605e-05, |
| "loss": 0.4563, |
| "step": 80 |
| }, |
| { |
| "epoch": 0.72, |
| "grad_norm": 0.5360320210456848, |
| "learning_rate": 1.8847886140232438e-05, |
| "loss": 0.4356, |
| "step": 90 |
| }, |
| { |
| "epoch": 0.8, |
| "grad_norm": 0.5510643124580383, |
| "learning_rate": 1.8375687866379988e-05, |
| "loss": 0.4599, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.88, |
| "grad_norm": 0.4948376715183258, |
| "learning_rate": 1.7830754182909985e-05, |
| "loss": 0.4438, |
| "step": 110 |
| }, |
| { |
| "epoch": 0.96, |
| "grad_norm": 0.43106070160865784, |
| "learning_rate": 1.721781735483921e-05, |
| "loss": 0.4399, |
| "step": 120 |
| }, |
| { |
| "epoch": 1.04, |
| "grad_norm": 0.44680503010749817, |
| "learning_rate": 1.654220019409317e-05, |
| "loss": 0.3948, |
| "step": 130 |
| }, |
| { |
| "epoch": 1.12, |
| "grad_norm": 0.8041722178459167, |
| "learning_rate": 1.580976983561235e-05, |
| "loss": 0.3355, |
| "step": 140 |
| }, |
| { |
| "epoch": 1.2, |
| "grad_norm": 0.4531875252723694, |
| "learning_rate": 1.5026886786496624e-05, |
| "loss": 0.3182, |
| "step": 150 |
| }, |
| { |
| "epoch": 1.28, |
| "grad_norm": 0.3980570137500763, |
| "learning_rate": 1.4200349690650654e-05, |
| "loss": 0.3274, |
| "step": 160 |
| }, |
| { |
| "epoch": 1.3599999999999999, |
| "grad_norm": 0.44365376234054565, |
| "learning_rate": 1.3337336288600297e-05, |
| "loss": 0.3524, |
| "step": 170 |
| }, |
| { |
| "epoch": 1.44, |
| "grad_norm": 0.3755331039428711, |
| "learning_rate": 1.24453410851916e-05, |
| "loss": 0.3114, |
| "step": 180 |
| }, |
| { |
| "epoch": 1.52, |
| "grad_norm": 0.4399131238460541, |
| "learning_rate": 1.1532110266473026e-05, |
| "loss": 0.3151, |
| "step": 190 |
| }, |
| { |
| "epoch": 1.6, |
| "grad_norm": 0.4257424473762512, |
| "learning_rate": 1.0605574430949983e-05, |
| "loss": 0.3391, |
| "step": 200 |
| }, |
| { |
| "epoch": 1.6800000000000002, |
| "grad_norm": 0.5761308073997498, |
| "learning_rate": 9.673779719380967e-06, |
| "loss": 0.3313, |
| "step": 210 |
| }, |
| { |
| "epoch": 1.76, |
| "grad_norm": 0.4401087462902069, |
| "learning_rate": 8.744817941191862e-06, |
| "loss": 0.3081, |
| "step": 220 |
| }, |
| { |
| "epoch": 1.8399999999999999, |
| "grad_norm": 0.37950068712234497, |
| "learning_rate": 7.826756304298428e-06, |
| "loss": 0.3324, |
| "step": 230 |
| }, |
| { |
| "epoch": 1.92, |
| "grad_norm": 0.4223324954509735, |
| "learning_rate": 6.92756735857107e-06, |
| "loss": 0.3175, |
| "step": 240 |
| }, |
| { |
| "epoch": 2.0, |
| "grad_norm": 0.4112967848777771, |
| "learning_rate": 6.0550597613206205e-06, |
| "loss": 0.3204, |
| "step": 250 |
| }, |
| { |
| "epoch": 2.08, |
| "grad_norm": 0.5121579766273499, |
| "learning_rate": 5.216810466045448e-06, |
| "loss": 0.2403, |
| "step": 260 |
| }, |
| { |
| "epoch": 2.16, |
| "grad_norm": 0.5039759874343872, |
| "learning_rate": 4.420098923320378e-06, |
| "loss": 0.2426, |
| "step": 270 |
| }, |
| { |
| "epoch": 2.24, |
| "grad_norm": 0.44253629446029663, |
| "learning_rate": 3.671843865234238e-06, |
| "loss": 0.2264, |
| "step": 280 |
| }, |
| { |
| "epoch": 2.32, |
| "grad_norm": 0.35495468974113464, |
| "learning_rate": 2.978543222347076e-06, |
| "loss": 0.2603, |
| "step": 290 |
| }, |
| { |
| "epoch": 2.4, |
| "grad_norm": 0.38393262028694153, |
| "learning_rate": 2.346217694934847e-06, |
| "loss": 0.2436, |
| "step": 300 |
| }, |
| { |
| "epoch": 2.48, |
| "grad_norm": 0.4627029299736023, |
| "learning_rate": 1.7803584685552877e-06, |
| "loss": 0.2437, |
| "step": 310 |
| }, |
| { |
| "epoch": 2.56, |
| "grad_norm": 0.40824005007743835, |
| "learning_rate": 1.2858795279787517e-06, |
| "loss": 0.2365, |
| "step": 320 |
| }, |
| { |
| "epoch": 2.64, |
| "grad_norm": 0.3864591419696808, |
| "learning_rate": 8.670749835951964e-07, |
| "loss": 0.246, |
| "step": 330 |
| }, |
| { |
| "epoch": 2.7199999999999998, |
| "grad_norm": 0.4451102316379547, |
| "learning_rate": 5.275817808796013e-07, |
| "loss": 0.2507, |
| "step": 340 |
| }, |
| { |
| "epoch": 2.8, |
| "grad_norm": 0.41204768419265747, |
| "learning_rate": 2.703481167509281e-07, |
| "loss": 0.225, |
| "step": 350 |
| }, |
| { |
| "epoch": 2.88, |
| "grad_norm": 0.43420955538749695, |
| "learning_rate": 9.760783710056176e-08, |
| "loss": 0.2448, |
| "step": 360 |
| }, |
| { |
| "epoch": 2.96, |
| "grad_norm": 0.40928831696510315, |
| "learning_rate": 1.0861037824896337e-08, |
| "loss": 0.2144, |
| "step": 370 |
| }, |
| { |
| "epoch": 3.0, |
| "step": 375, |
| "total_flos": 74595673178112.0, |
| "train_loss": 0.33862359380722046, |
| "train_runtime": 6722.8271, |
| "train_samples_per_second": 0.446, |
| "train_steps_per_second": 0.056 |
| } |
| ], |
| "logging_steps": 10, |
| "max_steps": 375, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 3, |
| "save_steps": 500, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 74595673178112.0, |
| "train_batch_size": 1, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|