| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "global_step": 32568, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 4.923237533775485e-05, | |
| "loss": 0.008, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 4.8464750675509704e-05, | |
| "loss": 0.0007, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 4.769712601326456e-05, | |
| "loss": 0.0008, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 4.6929501351019406e-05, | |
| "loss": 0.0003, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 4.616187668877426e-05, | |
| "loss": 0.0004, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 4.539425202652911e-05, | |
| "loss": 0.0001, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 4.4626627364283966e-05, | |
| "loss": 0.0007, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 4.3859002702038817e-05, | |
| "loss": 0.0004, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 4.309137803979367e-05, | |
| "loss": 0.0006, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 4.232375337754851e-05, | |
| "loss": 0.0009, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 4.155612871530337e-05, | |
| "loss": 0.0, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 4.078850405305822e-05, | |
| "loss": 0.0, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 4.002087939081307e-05, | |
| "loss": 0.0, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 3.925325472856792e-05, | |
| "loss": 0.0, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 3.848563006632277e-05, | |
| "loss": 0.0, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 3.7718005404077624e-05, | |
| "loss": 0.0, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 3.6950380741832475e-05, | |
| "loss": 0.0001, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 3.6182756079587326e-05, | |
| "loss": 0.0008, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 3.541513141734218e-05, | |
| "loss": 0.0, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 3.4647506755097034e-05, | |
| "loss": 0.0001, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 3.3879882092851885e-05, | |
| "loss": 0.0, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "learning_rate": 3.311225743060673e-05, | |
| "loss": 0.0, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "learning_rate": 3.234463276836158e-05, | |
| "loss": 0.0, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "learning_rate": 3.157700810611644e-05, | |
| "loss": 0.0, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "learning_rate": 3.080938344387129e-05, | |
| "loss": 0.0, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "learning_rate": 3.004175878162614e-05, | |
| "loss": 0.0, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "learning_rate": 2.927413411938099e-05, | |
| "loss": 0.0, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "learning_rate": 2.8506509457135838e-05, | |
| "loss": 0.0, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "learning_rate": 2.773888479489069e-05, | |
| "loss": 0.0, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 2.6971260132645544e-05, | |
| "loss": 0.0, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 1.43, | |
| "learning_rate": 2.6203635470400394e-05, | |
| "loss": 0.0, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 1.47, | |
| "learning_rate": 2.543601080815525e-05, | |
| "loss": 0.0, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "learning_rate": 2.4668386145910096e-05, | |
| "loss": 0.0, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 1.57, | |
| "learning_rate": 2.390076148366495e-05, | |
| "loss": 0.0, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 1.61, | |
| "learning_rate": 2.3133136821419798e-05, | |
| "loss": 0.0, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 1.66, | |
| "learning_rate": 2.2365512159174652e-05, | |
| "loss": 0.0, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 1.7, | |
| "learning_rate": 2.1597887496929503e-05, | |
| "loss": 0.0, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "learning_rate": 2.0830262834684354e-05, | |
| "loss": 0.0, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "learning_rate": 2.0062638172439205e-05, | |
| "loss": 0.0, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 1.84, | |
| "learning_rate": 1.9295013510194056e-05, | |
| "loss": 0.0, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 1.89, | |
| "learning_rate": 1.8527388847948907e-05, | |
| "loss": 0.0, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 1.93, | |
| "learning_rate": 1.7759764185703758e-05, | |
| "loss": 0.0, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 1.98, | |
| "learning_rate": 1.6992139523458612e-05, | |
| "loss": 0.0, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 2.03, | |
| "learning_rate": 1.622451486121346e-05, | |
| "loss": 0.0, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 2.07, | |
| "learning_rate": 1.5456890198968314e-05, | |
| "loss": 0.0, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 2.12, | |
| "learning_rate": 1.4689265536723165e-05, | |
| "loss": 0.0, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 2.16, | |
| "learning_rate": 1.3921640874478014e-05, | |
| "loss": 0.0, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 2.21, | |
| "learning_rate": 1.3154016212232867e-05, | |
| "loss": 0.0, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 2.26, | |
| "learning_rate": 1.238639154998772e-05, | |
| "loss": 0.0, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 2.3, | |
| "learning_rate": 1.161876688774257e-05, | |
| "loss": 0.0, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 2.35, | |
| "learning_rate": 1.0851142225497421e-05, | |
| "loss": 0.0, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 2.39, | |
| "learning_rate": 1.0083517563252274e-05, | |
| "loss": 0.0002, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 2.44, | |
| "learning_rate": 9.315892901007125e-06, | |
| "loss": 0.0, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 2.49, | |
| "learning_rate": 8.548268238761976e-06, | |
| "loss": 0.0, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 2.53, | |
| "learning_rate": 7.780643576516827e-06, | |
| "loss": 0.0, | |
| "step": 27500 | |
| }, | |
| { | |
| "epoch": 2.58, | |
| "learning_rate": 7.0130189142716784e-06, | |
| "loss": 0.0, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 2.63, | |
| "learning_rate": 6.245394252026529e-06, | |
| "loss": 0.0, | |
| "step": 28500 | |
| }, | |
| { | |
| "epoch": 2.67, | |
| "learning_rate": 5.47776958978138e-06, | |
| "loss": 0.0, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 2.72, | |
| "learning_rate": 4.710144927536232e-06, | |
| "loss": 0.0, | |
| "step": 29500 | |
| }, | |
| { | |
| "epoch": 2.76, | |
| "learning_rate": 3.942520265291084e-06, | |
| "loss": 0.0, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 2.81, | |
| "learning_rate": 3.1748956030459347e-06, | |
| "loss": 0.0, | |
| "step": 30500 | |
| }, | |
| { | |
| "epoch": 2.86, | |
| "learning_rate": 2.407270940800786e-06, | |
| "loss": 0.0, | |
| "step": 31000 | |
| }, | |
| { | |
| "epoch": 2.9, | |
| "learning_rate": 1.6396462785556374e-06, | |
| "loss": 0.0, | |
| "step": 31500 | |
| }, | |
| { | |
| "epoch": 2.95, | |
| "learning_rate": 8.720216163104889e-07, | |
| "loss": 0.0, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 2.99, | |
| "learning_rate": 1.0439695406534021e-07, | |
| "loss": 0.0, | |
| "step": 32500 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 32568, | |
| "total_flos": 550963785498624.0, | |
| "train_loss": 0.00022073277012470594, | |
| "train_runtime": 10077.5092, | |
| "train_samples_per_second": 12.927, | |
| "train_steps_per_second": 3.232 | |
| } | |
| ], | |
| "max_steps": 32568, | |
| "num_train_epochs": 3, | |
| "total_flos": 550963785498624.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |