aimonbc24's picture
Upload folder using huggingface_hub
a04ee74 verified
{
"best_metric": 0.20385932150638034,
"best_model_checkpoint": "models/ArtFair/Hank-Green-326-timecoded/checkpoint-652",
"epoch": 0.6653061224489796,
"eval_steps": 326,
"global_step": 652,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 1.1965811965811965e-07,
"loss": 3.8774,
"step": 16
},
{
"epoch": 0.03,
"learning_rate": 2.393162393162393e-07,
"loss": 3.7909,
"step": 32
},
{
"epoch": 0.05,
"learning_rate": 3.7606837606837604e-07,
"loss": 3.4601,
"step": 48
},
{
"epoch": 0.07,
"learning_rate": 5.128205128205127e-07,
"loss": 3.3684,
"step": 64
},
{
"epoch": 0.08,
"learning_rate": 6.495726495726495e-07,
"loss": 2.3532,
"step": 80
},
{
"epoch": 0.1,
"learning_rate": 7.863247863247862e-07,
"loss": 2.3592,
"step": 96
},
{
"epoch": 0.11,
"learning_rate": 9.230769230769231e-07,
"loss": 1.7341,
"step": 112
},
{
"epoch": 0.13,
"learning_rate": 9.99837673007631e-07,
"loss": 1.4398,
"step": 128
},
{
"epoch": 0.15,
"learning_rate": 9.982484597964214e-07,
"loss": 1.3764,
"step": 144
},
{
"epoch": 0.16,
"learning_rate": 9.949694232523914e-07,
"loss": 1.1601,
"step": 160
},
{
"epoch": 0.18,
"learning_rate": 9.900116843180192e-07,
"loss": 1.1587,
"step": 176
},
{
"epoch": 0.2,
"learning_rate": 9.833920573010362e-07,
"loss": 1.0174,
"step": 192
},
{
"epoch": 0.21,
"learning_rate": 9.75132992848238e-07,
"loss": 0.725,
"step": 208
},
{
"epoch": 0.23,
"learning_rate": 9.652625018035032e-07,
"loss": 0.5014,
"step": 224
},
{
"epoch": 0.24,
"learning_rate": 9.538140602082435e-07,
"loss": 0.55,
"step": 240
},
{
"epoch": 0.26,
"learning_rate": 9.408264957664926e-07,
"loss": 0.4288,
"step": 256
},
{
"epoch": 0.28,
"learning_rate": 9.263438561596806e-07,
"loss": 0.467,
"step": 272
},
{
"epoch": 0.29,
"learning_rate": 9.104152596577132e-07,
"loss": 0.4463,
"step": 288
},
{
"epoch": 0.31,
"learning_rate": 8.930947285330108e-07,
"loss": 0.3361,
"step": 304
},
{
"epoch": 0.33,
"learning_rate": 8.744410058424878e-07,
"loss": 0.3554,
"step": 320
},
{
"epoch": 0.33,
"eval_loss": 0.7186310291290283,
"eval_raw_wer": 0.2878929349517585,
"eval_runtime": 477.7988,
"eval_samples_per_second": 0.82,
"eval_steps_per_second": 0.82,
"eval_wer": 0.21568627450980393,
"step": 326
},
{
"epoch": 0.34,
"learning_rate": 8.545173561988624e-07,
"loss": 0.3713,
"step": 336
},
{
"epoch": 0.36,
"learning_rate": 8.333913512069848e-07,
"loss": 0.4327,
"step": 352
},
{
"epoch": 0.38,
"learning_rate": 8.111346402928848e-07,
"loss": 0.4631,
"step": 368
},
{
"epoch": 0.39,
"learning_rate": 7.878227077027753e-07,
"loss": 0.3439,
"step": 384
},
{
"epoch": 0.41,
"learning_rate": 7.635346164961587e-07,
"loss": 0.4046,
"step": 400
},
{
"epoch": 0.42,
"learning_rate": 7.383527404012899e-07,
"loss": 0.2902,
"step": 416
},
{
"epoch": 0.44,
"learning_rate": 7.123624844424145e-07,
"loss": 0.4497,
"step": 432
},
{
"epoch": 0.46,
"learning_rate": 6.856519952862844e-07,
"loss": 0.2758,
"step": 448
},
{
"epoch": 0.47,
"learning_rate": 6.583118622903166e-07,
"loss": 0.4701,
"step": 464
},
{
"epoch": 0.49,
"learning_rate": 6.304348102663004e-07,
"loss": 0.3829,
"step": 480
},
{
"epoch": 0.51,
"learning_rate": 6.021153850016527e-07,
"loss": 0.3568,
"step": 496
},
{
"epoch": 0.52,
"learning_rate": 5.734496326047821e-07,
"loss": 0.4111,
"step": 512
},
{
"epoch": 0.54,
"learning_rate": 5.445347737620766e-07,
"loss": 0.5163,
"step": 528
},
{
"epoch": 0.56,
"learning_rate": 5.154688740112749e-07,
"loss": 0.502,
"step": 544
},
{
"epoch": 0.57,
"learning_rate": 4.86350511149504e-07,
"loss": 0.325,
"step": 560
},
{
"epoch": 0.59,
"learning_rate": 4.5727844090397133e-07,
"loss": 0.3893,
"step": 576
},
{
"epoch": 0.6,
"learning_rate": 4.2835126199920545e-07,
"loss": 0.4843,
"step": 592
},
{
"epoch": 0.62,
"learning_rate": 3.996670817567741e-07,
"loss": 0.4143,
"step": 608
},
{
"epoch": 0.64,
"learning_rate": 3.713231833616096e-07,
"loss": 0.3176,
"step": 624
},
{
"epoch": 0.65,
"learning_rate": 3.434156959234152e-07,
"loss": 0.4304,
"step": 640
},
{
"epoch": 0.67,
"eval_loss": 0.6797897219657898,
"eval_raw_wer": 0.2773109243697479,
"eval_runtime": 485.642,
"eval_samples_per_second": 0.807,
"eval_steps_per_second": 0.807,
"eval_wer": 0.20385932150638034,
"step": 652
}
],
"logging_steps": 16,
"max_steps": 980,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 326,
"total_flos": 5.5372300222464e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}