| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 10.0, | |
| "eval_steps": 500, | |
| "global_step": 3160, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.31695721077654515, | |
| "grad_norm": 110.09585571289062, | |
| "learning_rate": 6.265822784810128e-06, | |
| "loss": 3.1045, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.6339144215530903, | |
| "grad_norm": 89.5663833618164, | |
| "learning_rate": 1.2594936708860761e-05, | |
| "loss": 1.4141, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.9508716323296355, | |
| "grad_norm": 51.678932189941406, | |
| "learning_rate": 1.8924050632911394e-05, | |
| "loss": 0.8833, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.266244057052298, | |
| "grad_norm": 35.33523178100586, | |
| "learning_rate": 1.9416315049226445e-05, | |
| "loss": 0.6702, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.583201267828843, | |
| "grad_norm": 64.43267822265625, | |
| "learning_rate": 1.871308016877637e-05, | |
| "loss": 0.4852, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.9001584786053882, | |
| "grad_norm": 69.72541046142578, | |
| "learning_rate": 1.8009845288326303e-05, | |
| "loss": 0.3471, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 2.215530903328051, | |
| "grad_norm": 65.43933868408203, | |
| "learning_rate": 1.7306610407876232e-05, | |
| "loss": 0.2796, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 2.532488114104596, | |
| "grad_norm": 43.50911331176758, | |
| "learning_rate": 1.6603375527426162e-05, | |
| "loss": 0.2656, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 2.849445324881141, | |
| "grad_norm": 47.55598449707031, | |
| "learning_rate": 1.590014064697609e-05, | |
| "loss": 0.1962, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 3.1648177496038037, | |
| "grad_norm": 40.16280746459961, | |
| "learning_rate": 1.519690576652602e-05, | |
| "loss": 0.1511, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 3.4817749603803487, | |
| "grad_norm": 18.966760635375977, | |
| "learning_rate": 1.449367088607595e-05, | |
| "loss": 0.124, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 3.7987321711568938, | |
| "grad_norm": 17.122596740722656, | |
| "learning_rate": 1.379043600562588e-05, | |
| "loss": 0.1295, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 4.114104595879557, | |
| "grad_norm": 14.107647895812988, | |
| "learning_rate": 1.308720112517581e-05, | |
| "loss": 0.12, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 4.431061806656102, | |
| "grad_norm": 47.62938690185547, | |
| "learning_rate": 1.238396624472574e-05, | |
| "loss": 0.09, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 4.748019017432647, | |
| "grad_norm": 6.217677116394043, | |
| "learning_rate": 1.1680731364275668e-05, | |
| "loss": 0.0798, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 5.063391442155309, | |
| "grad_norm": 2.919074773788452, | |
| "learning_rate": 1.0977496483825597e-05, | |
| "loss": 0.0574, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 5.380348652931854, | |
| "grad_norm": 37.143314361572266, | |
| "learning_rate": 1.0274261603375528e-05, | |
| "loss": 0.0481, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 5.697305863708399, | |
| "grad_norm": 2.6090009212493896, | |
| "learning_rate": 9.571026722925458e-06, | |
| "loss": 0.0566, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 6.0126782884310614, | |
| "grad_norm": 2.3183188438415527, | |
| "learning_rate": 8.867791842475387e-06, | |
| "loss": 0.0221, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 6.329635499207607, | |
| "grad_norm": 46.22297668457031, | |
| "learning_rate": 8.164556962025318e-06, | |
| "loss": 0.0271, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 6.646592709984152, | |
| "grad_norm": 0.24476002156734467, | |
| "learning_rate": 7.461322081575246e-06, | |
| "loss": 0.0226, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 6.9635499207606975, | |
| "grad_norm": 0.4829137921333313, | |
| "learning_rate": 6.758087201125176e-06, | |
| "loss": 0.0187, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 7.27892234548336, | |
| "grad_norm": 1.0153768062591553, | |
| "learning_rate": 6.0548523206751065e-06, | |
| "loss": 0.0149, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 7.595879556259905, | |
| "grad_norm": 0.03896712511777878, | |
| "learning_rate": 5.351617440225036e-06, | |
| "loss": 0.0241, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 7.91283676703645, | |
| "grad_norm": 0.04027330502867699, | |
| "learning_rate": 4.648382559774965e-06, | |
| "loss": 0.0067, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 8.228209191759113, | |
| "grad_norm": 12.497308731079102, | |
| "learning_rate": 3.945147679324895e-06, | |
| "loss": 0.0056, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 8.545166402535658, | |
| "grad_norm": 0.2126639187335968, | |
| "learning_rate": 3.2419127988748244e-06, | |
| "loss": 0.0092, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 8.862123613312203, | |
| "grad_norm": 0.03942370414733887, | |
| "learning_rate": 2.538677918424754e-06, | |
| "loss": 0.008, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 9.177496038034866, | |
| "grad_norm": 0.03362393379211426, | |
| "learning_rate": 1.8354430379746838e-06, | |
| "loss": 0.0034, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 9.49445324881141, | |
| "grad_norm": 0.02909662574529648, | |
| "learning_rate": 1.1322081575246132e-06, | |
| "loss": 0.0028, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 9.811410459587956, | |
| "grad_norm": 0.2622971832752228, | |
| "learning_rate": 4.289732770745429e-07, | |
| "loss": 0.0022, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "step": 3160, | |
| "total_flos": 1.0318820217244877e+19, | |
| "train_loss": 0.2743822718157044, | |
| "train_runtime": 4080.2158, | |
| "train_samples_per_second": 24.744, | |
| "train_steps_per_second": 0.774 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 3160, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.0318820217244877e+19, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |