| { | |
| "best_metric": 0.20292561469032058, | |
| "best_model_checkpoint": "models/ArtFair/Hank-Green-326-timecoded/checkpoint-978", | |
| "epoch": 0.9979591836734694, | |
| "eval_steps": 326, | |
| "global_step": 978, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 1.1965811965811965e-07, | |
| "loss": 3.8774, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 2.393162393162393e-07, | |
| "loss": 3.7909, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 3.7606837606837604e-07, | |
| "loss": 3.4601, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 5.128205128205127e-07, | |
| "loss": 3.3684, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 6.495726495726495e-07, | |
| "loss": 2.3532, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 7.863247863247862e-07, | |
| "loss": 2.3592, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 9.230769230769231e-07, | |
| "loss": 1.7341, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 9.99837673007631e-07, | |
| "loss": 1.4398, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 9.982484597964214e-07, | |
| "loss": 1.3764, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 9.949694232523914e-07, | |
| "loss": 1.1601, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 9.900116843180192e-07, | |
| "loss": 1.1587, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 9.833920573010362e-07, | |
| "loss": 1.0174, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 9.75132992848238e-07, | |
| "loss": 0.725, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 9.652625018035032e-07, | |
| "loss": 0.5014, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 9.538140602082435e-07, | |
| "loss": 0.55, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 9.408264957664926e-07, | |
| "loss": 0.4288, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 9.263438561596806e-07, | |
| "loss": 0.467, | |
| "step": 272 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 9.104152596577132e-07, | |
| "loss": 0.4463, | |
| "step": 288 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 8.930947285330108e-07, | |
| "loss": 0.3361, | |
| "step": 304 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 8.744410058424878e-07, | |
| "loss": 0.3554, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "eval_loss": 0.7186310291290283, | |
| "eval_raw_wer": 0.2878929349517585, | |
| "eval_runtime": 477.7988, | |
| "eval_samples_per_second": 0.82, | |
| "eval_steps_per_second": 0.82, | |
| "eval_wer": 0.21568627450980393, | |
| "step": 326 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 8.545173561988624e-07, | |
| "loss": 0.3713, | |
| "step": 336 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 8.333913512069848e-07, | |
| "loss": 0.4327, | |
| "step": 352 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 8.111346402928848e-07, | |
| "loss": 0.4631, | |
| "step": 368 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 7.878227077027753e-07, | |
| "loss": 0.3439, | |
| "step": 384 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 7.635346164961587e-07, | |
| "loss": 0.4046, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 7.383527404012899e-07, | |
| "loss": 0.2902, | |
| "step": 416 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 7.123624844424145e-07, | |
| "loss": 0.4497, | |
| "step": 432 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 6.856519952862844e-07, | |
| "loss": 0.2758, | |
| "step": 448 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 6.583118622903166e-07, | |
| "loss": 0.4701, | |
| "step": 464 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 6.304348102663004e-07, | |
| "loss": 0.3829, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 6.021153850016527e-07, | |
| "loss": 0.3568, | |
| "step": 496 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 5.734496326047821e-07, | |
| "loss": 0.4111, | |
| "step": 512 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 5.445347737620766e-07, | |
| "loss": 0.5163, | |
| "step": 528 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 5.154688740112749e-07, | |
| "loss": 0.502, | |
| "step": 544 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 4.86350511149504e-07, | |
| "loss": 0.325, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 4.5727844090397133e-07, | |
| "loss": 0.3893, | |
| "step": 576 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 4.2835126199920545e-07, | |
| "loss": 0.4843, | |
| "step": 592 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 3.996670817567741e-07, | |
| "loss": 0.4143, | |
| "step": 608 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 3.713231833616096e-07, | |
| "loss": 0.3176, | |
| "step": 624 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 3.434156959234152e-07, | |
| "loss": 0.4304, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "eval_loss": 0.6797897219657898, | |
| "eval_raw_wer": 0.2773109243697479, | |
| "eval_runtime": 485.642, | |
| "eval_samples_per_second": 0.807, | |
| "eval_steps_per_second": 0.807, | |
| "eval_wer": 0.20385932150638034, | |
| "step": 652 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 3.1603926845215e-07, | |
| "loss": 0.4579, | |
| "step": 656 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 2.8928674885331293e-07, | |
| "loss": 0.4704, | |
| "step": 672 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 2.632488690317219e-07, | |
| "loss": 0.4061, | |
| "step": 688 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 2.380139371717667e-07, | |
| "loss": 0.3334, | |
| "step": 704 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 2.136675382377774e-07, | |
| "loss": 0.3986, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 1.9029224371026699e-07, | |
| "loss": 0.4019, | |
| "step": 736 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 1.6796733154248855e-07, | |
| "loss": 0.4271, | |
| "step": 752 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 1.467685172870794e-07, | |
| "loss": 0.4094, | |
| "step": 768 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 1.26767697304686e-07, | |
| "loss": 0.3653, | |
| "step": 784 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 1.0803270492548155e-07, | |
| "loss": 0.3873, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 9.062708039056515e-08, | |
| "loss": 0.4225, | |
| "step": 816 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 7.46098553534884e-08, | |
| "loss": 0.4128, | |
| "step": 832 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 6.00353526727821e-08, | |
| "loss": 0.3909, | |
| "step": 848 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 4.695300217448944e-08, | |
| "loss": 0.3881, | |
| "step": 864 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 3.540717300955431e-08, | |
| "loss": 0.3468, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "learning_rate": 2.543702317462676e-08, | |
| "loss": 0.3516, | |
| "step": 896 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 1.7076366706642587e-08, | |
| "loss": 0.3635, | |
| "step": 912 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 1.035355900158863e-08, | |
| "loss": 0.3995, | |
| "step": 928 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 5.29140064639827e-09, | |
| "loss": 0.4308, | |
| "step": 944 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 1.907060090135082e-09, | |
| "loss": 0.3883, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 2.1201541672644097e-10, | |
| "loss": 0.4248, | |
| "step": 976 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.6699328422546387, | |
| "eval_raw_wer": 0.27388733271086213, | |
| "eval_runtime": 482.3136, | |
| "eval_samples_per_second": 0.813, | |
| "eval_steps_per_second": 0.813, | |
| "eval_wer": 0.20292561469032058, | |
| "step": 978 | |
| } | |
| ], | |
| "logging_steps": 16, | |
| "max_steps": 980, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 326, | |
| "total_flos": 8.3058450333696e+18, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |