| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.0, | |
| "global_step": 3322, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 2e-05, | |
| "loss": 5.0874, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 1.995250208167269e-05, | |
| "loss": 3.8099, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 1.9810459537139835e-05, | |
| "loss": 3.7186, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 1.9575221711437283e-05, | |
| "loss": 3.6782, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 1.924902326597158e-05, | |
| "loss": 3.6658, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 1.8834962950166965e-05, | |
| "loss": 3.6644, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 1.8336974164635978e-05, | |
| "loss": 3.6594, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 1.7759787595511233e-05, | |
| "loss": 3.6464, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 1.7108886274896706e-05, | |
| "loss": 3.6364, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 1.639045349434554e-05, | |
| "loss": 3.6319, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 1.5611314066164592e-05, | |
| "loss": 3.6096, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 1.4778869490538927e-05, | |
| "loss": 3.6131, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 1.3901027644361556e-05, | |
| "loss": 3.6074, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 1.2986127659695292e-05, | |
| "loss": 3.6134, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 1.2042860705489998e-05, | |
| "loss": 3.6074, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 1.1080187425096018e-05, | |
| "loss": 3.6057, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "learning_rate": 1.010725281388296e-05, | |
| "loss": 3.4945, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 9.133299345591059e-06, | |
| "loss": 3.3152, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "learning_rate": 8.16757917267863e-06, | |
| "loss": 3.3171, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "learning_rate": 7.219266234725672e-06, | |
| "loss": 3.322, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 6.2973691098273205e-06, | |
| "loss": 3.3089, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "learning_rate": 5.410645436852491e-06, | |
| "loss": 3.3124, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 4.567518721520754e-06, | |
| "loss": 3.3064, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "learning_rate": 3.775998316604984e-06, | |
| "loss": 3.2992, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 1.51, | |
| "learning_rate": 3.0436033364146135e-06, | |
| "loss": 3.2997, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.57, | |
| "learning_rate": 2.377291228340114e-06, | |
| "loss": 3.2948, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 1.63, | |
| "learning_rate": 1.7833916799994533e-06, | |
| "loss": 3.2986, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 1.69, | |
| "learning_rate": 1.2675464898409772e-06, | |
| "loss": 3.2966, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "learning_rate": 8.346559724070181e-07, | |
| "loss": 3.2939, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "learning_rate": 4.888324073859274e-07, | |
| "loss": 3.2912, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.87, | |
| "learning_rate": 2.3336097466711283e-07, | |
| "loss": 3.2915, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 1.93, | |
| "learning_rate": 7.066854649982025e-08, | |
| "loss": 3.2873, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 1.99, | |
| "learning_rate": 2.3006332171626733e-09, | |
| "loss": 3.2845, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "step": 3322, | |
| "total_flos": 345709784072192.0, | |
| "train_loss": 3.518749882412703, | |
| "train_runtime": 8860.4079, | |
| "train_samples_per_second": 47.988, | |
| "train_steps_per_second": 0.375 | |
| } | |
| ], | |
| "max_steps": 3322, | |
| "num_train_epochs": 2, | |
| "total_flos": 345709784072192.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |