| { | |
| "best_global_step": 500, | |
| "best_metric": 19.59291781998664, | |
| "best_model_checkpoint": "./SALAMA_C3/checkpoint-500", | |
| "epoch": 0.8375209380234506, | |
| "eval_steps": 500, | |
| "global_step": 500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03350083752093802, | |
| "grad_norm": 11.757207870483398, | |
| "learning_rate": 6.333333333333334e-07, | |
| "loss": 0.5112, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.06700167504187604, | |
| "grad_norm": 10.197953224182129, | |
| "learning_rate": 1.3e-06, | |
| "loss": 0.5283, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.10050251256281408, | |
| "grad_norm": 12.811074256896973, | |
| "learning_rate": 1.9666666666666668e-06, | |
| "loss": 0.474, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.13400335008375208, | |
| "grad_norm": 8.559342384338379, | |
| "learning_rate": 2.6e-06, | |
| "loss": 0.424, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.16750418760469013, | |
| "grad_norm": 6.9629225730896, | |
| "learning_rate": 3.266666666666667e-06, | |
| "loss": 0.4465, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.20100502512562815, | |
| "grad_norm": 7.833058834075928, | |
| "learning_rate": 3.9333333333333335e-06, | |
| "loss": 0.3764, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.23450586264656617, | |
| "grad_norm": 6.882424831390381, | |
| "learning_rate": 4.600000000000001e-06, | |
| "loss": 0.3705, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.26800670016750416, | |
| "grad_norm": 5.4183244705200195, | |
| "learning_rate": 5.2666666666666665e-06, | |
| "loss": 0.4116, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.3015075376884422, | |
| "grad_norm": 6.103787899017334, | |
| "learning_rate": 5.933333333333335e-06, | |
| "loss": 0.3903, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.33500837520938026, | |
| "grad_norm": 6.084160804748535, | |
| "learning_rate": 6.600000000000001e-06, | |
| "loss": 0.3794, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.3685092127303183, | |
| "grad_norm": 4.531406879425049, | |
| "learning_rate": 7.266666666666668e-06, | |
| "loss": 0.3904, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.4020100502512563, | |
| "grad_norm": 6.702854156494141, | |
| "learning_rate": 7.933333333333334e-06, | |
| "loss": 0.3672, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.4355108877721943, | |
| "grad_norm": 6.386377811431885, | |
| "learning_rate": 8.6e-06, | |
| "loss": 0.3379, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.46901172529313234, | |
| "grad_norm": 5.33281135559082, | |
| "learning_rate": 9.266666666666667e-06, | |
| "loss": 0.4102, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.5025125628140703, | |
| "grad_norm": 6.031435012817383, | |
| "learning_rate": 9.933333333333334e-06, | |
| "loss": 0.3815, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.5360134003350083, | |
| "grad_norm": 4.8638176918029785, | |
| "learning_rate": 9.968253968253969e-06, | |
| "loss": 0.358, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.5695142378559463, | |
| "grad_norm": 5.352113723754883, | |
| "learning_rate": 9.932980599647268e-06, | |
| "loss": 0.3578, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.6030150753768844, | |
| "grad_norm": 5.560739040374756, | |
| "learning_rate": 9.897707231040565e-06, | |
| "loss": 0.3795, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.6365159128978225, | |
| "grad_norm": 6.186940670013428, | |
| "learning_rate": 9.862433862433864e-06, | |
| "loss": 0.3852, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.6700167504187605, | |
| "grad_norm": 5.37507438659668, | |
| "learning_rate": 9.827160493827161e-06, | |
| "loss": 0.3761, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.7035175879396985, | |
| "grad_norm": 6.373449802398682, | |
| "learning_rate": 9.79188712522046e-06, | |
| "loss": 0.3674, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.7370184254606366, | |
| "grad_norm": 5.702625274658203, | |
| "learning_rate": 9.756613756613757e-06, | |
| "loss": 0.4013, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.7705192629815746, | |
| "grad_norm": 5.5675153732299805, | |
| "learning_rate": 9.721340388007056e-06, | |
| "loss": 0.3495, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.8040201005025126, | |
| "grad_norm": 6.296374320983887, | |
| "learning_rate": 9.686067019400353e-06, | |
| "loss": 0.3704, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.8375209380234506, | |
| "grad_norm": 4.830463886260986, | |
| "learning_rate": 9.650793650793652e-06, | |
| "loss": 0.36, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.8375209380234506, | |
| "eval_loss": 0.27764827013015747, | |
| "eval_runtime": 1752.3826, | |
| "eval_samples_per_second": 2.725, | |
| "eval_steps_per_second": 0.341, | |
| "eval_wer": 19.59291781998664, | |
| "step": 500 | |
| } | |
| ], | |
| "logging_steps": 20, | |
| "max_steps": 5970, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4.61736640512e+18, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |