| { | |
| "best_metric": 0.21568627450980393, | |
| "best_model_checkpoint": "models/ArtFair/Hank-Green-326-timecoded/checkpoint-326", | |
| "epoch": 0.3326530612244898, | |
| "eval_steps": 326, | |
| "global_step": 326, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 1.1965811965811965e-07, | |
| "loss": 3.8774, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 2.393162393162393e-07, | |
| "loss": 3.7909, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 3.7606837606837604e-07, | |
| "loss": 3.4601, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 5.128205128205127e-07, | |
| "loss": 3.3684, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 6.495726495726495e-07, | |
| "loss": 2.3532, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 7.863247863247862e-07, | |
| "loss": 2.3592, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 9.230769230769231e-07, | |
| "loss": 1.7341, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 9.99837673007631e-07, | |
| "loss": 1.4398, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 9.982484597964214e-07, | |
| "loss": 1.3764, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 9.949694232523914e-07, | |
| "loss": 1.1601, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 9.900116843180192e-07, | |
| "loss": 1.1587, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 9.833920573010362e-07, | |
| "loss": 1.0174, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 9.75132992848238e-07, | |
| "loss": 0.725, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 9.652625018035032e-07, | |
| "loss": 0.5014, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 9.538140602082435e-07, | |
| "loss": 0.55, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 9.408264957664926e-07, | |
| "loss": 0.4288, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 9.263438561596806e-07, | |
| "loss": 0.467, | |
| "step": 272 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 9.104152596577132e-07, | |
| "loss": 0.4463, | |
| "step": 288 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 8.930947285330108e-07, | |
| "loss": 0.3361, | |
| "step": 304 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 8.744410058424878e-07, | |
| "loss": 0.3554, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "eval_loss": 0.7186310291290283, | |
| "eval_raw_wer": 0.2878929349517585, | |
| "eval_runtime": 477.7988, | |
| "eval_samples_per_second": 0.82, | |
| "eval_steps_per_second": 0.82, | |
| "eval_wer": 0.21568627450980393, | |
| "step": 326 | |
| } | |
| ], | |
| "logging_steps": 16, | |
| "max_steps": 980, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 326, | |
| "total_flos": 2.7686150111232e+18, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |