| { | |
| "best_metric": 3.3389594554901123, | |
| "best_model_checkpoint": "output/travis-scott/checkpoint-174", | |
| "epoch": 1.0, | |
| "global_step": 174, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.0001369206553164936, | |
| "loss": 3.9488, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.00013608489629373538, | |
| "loss": 3.7969, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.00013469952948681868, | |
| "loss": 3.6955, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.00013277583754449623, | |
| "loss": 3.6337, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.00013032948732148497, | |
| "loss": 3.5633, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.0001273804022850966, | |
| "loss": 3.4878, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.0001239526002553348, | |
| "loss": 3.6424, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.0001200739977999292, | |
| "loss": 3.5744, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.00011577618287734484, | |
| "loss": 3.4553, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 0.0001110941575793986, | |
| "loss": 3.5316, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.00010606605306862761, | |
| "loss": 3.4538, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.00010073281903200561, | |
| "loss": 3.2755, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 9.513789018014557e-05, | |
| "loss": 3.2518, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 8.932683250807268e-05, | |
| "loss": 3.4163, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 8.334697219847626e-05, | |
| "loss": 3.3031, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 7.724701018971185e-05, | |
| "loss": 3.2611, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 7.107662554757363e-05, | |
| "loss": 3.3722, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 6.488607087104036e-05, | |
| "loss": 3.3329, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 5.872576302707542e-05, | |
| "loss": 3.3299, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 5.264587254760233e-05, | |
| "loss": 3.3178, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 4.6695915032671784e-05, | |
| "loss": 3.4824, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 4.092434788749914e-05, | |
| "loss": 3.412, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 3.537817567760932e-05, | |
| "loss": 3.121, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 3.0102567316140575e-05, | |
| "loss": 3.3823, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 2.5140488200994943e-05, | |
| "loss": 3.217, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 2.0532350297768717e-05, | |
| "loss": 3.2794, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 1.6315683018244145e-05, | |
| "loss": 3.232, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 1.2524827574860022e-05, | |
| "loss": 3.2429, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 9.190657300387505e-06, | |
| "loss": 3.2955, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 6.340326210572357e-06, | |
| "loss": 3.2237, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 3.9970478574895515e-06, | |
| "loss": 3.2312, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 2.179906274664157e-06, | |
| "loss": 3.3031, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 9.037005536513067e-07, | |
| "loss": 3.4051, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 1.7882431786864473e-07, | |
| "loss": 3.4258, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 3.3389594554901123, | |
| "eval_runtime": 11.911, | |
| "eval_samples_per_second": 20.989, | |
| "eval_steps_per_second": 2.687, | |
| "step": 174 | |
| } | |
| ], | |
| "max_steps": 174, | |
| "num_train_epochs": 1, | |
| "total_flos": 181597962240000.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |