| { |
| "best_global_step": 1200, |
| "best_metric": 37.091943373052636, |
| "best_model_checkpoint": "./HAUSA_B/checkpoint-1200", |
| "epoch": 1.6242383209207854, |
| "eval_steps": 200, |
| "global_step": 1200, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.027081922816519974, |
| "grad_norm": 106.59215545654297, |
| "learning_rate": 8.000000000000001e-07, |
| "loss": 6.1915, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.05416384563303995, |
| "grad_norm": 30.563518524169922, |
| "learning_rate": 1.8000000000000001e-06, |
| "loss": 4.5886, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.08124576844955991, |
| "grad_norm": 16.528457641601562, |
| "learning_rate": 2.8000000000000003e-06, |
| "loss": 3.0338, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.1083276912660799, |
| "grad_norm": 13.852096557617188, |
| "learning_rate": 3.8000000000000005e-06, |
| "loss": 2.3188, |
| "step": 80 |
| }, |
| { |
| "epoch": 0.13540961408259986, |
| "grad_norm": 13.284646987915039, |
| "learning_rate": 4.800000000000001e-06, |
| "loss": 1.9725, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.16249153689911983, |
| "grad_norm": 13.212055206298828, |
| "learning_rate": 5.8e-06, |
| "loss": 1.7839, |
| "step": 120 |
| }, |
| { |
| "epoch": 0.1895734597156398, |
| "grad_norm": 12.006990432739258, |
| "learning_rate": 6.800000000000001e-06, |
| "loss": 1.5991, |
| "step": 140 |
| }, |
| { |
| "epoch": 0.2166553825321598, |
| "grad_norm": 12.490514755249023, |
| "learning_rate": 7.800000000000002e-06, |
| "loss": 1.4411, |
| "step": 160 |
| }, |
| { |
| "epoch": 0.24373730534867977, |
| "grad_norm": 11.587646484375, |
| "learning_rate": 8.8e-06, |
| "loss": 1.3487, |
| "step": 180 |
| }, |
| { |
| "epoch": 0.2708192281651997, |
| "grad_norm": 11.574933052062988, |
| "learning_rate": 9.800000000000001e-06, |
| "loss": 1.2119, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.2708192281651997, |
| "eval_loss": 1.1157827377319336, |
| "eval_runtime": 1601.2991, |
| "eval_samples_per_second": 3.688, |
| "eval_steps_per_second": 0.462, |
| "eval_wer": 64.22461160550755, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.2979011509817197, |
| "grad_norm": 8.368565559387207, |
| "learning_rate": 9.920556107249256e-06, |
| "loss": 0.9452, |
| "step": 220 |
| }, |
| { |
| "epoch": 0.32498307379823965, |
| "grad_norm": 7.2616682052612305, |
| "learning_rate": 9.821251241310825e-06, |
| "loss": 0.9298, |
| "step": 240 |
| }, |
| { |
| "epoch": 0.35206499661475965, |
| "grad_norm": 7.266394138336182, |
| "learning_rate": 9.721946375372395e-06, |
| "loss": 0.8567, |
| "step": 260 |
| }, |
| { |
| "epoch": 0.3791469194312796, |
| "grad_norm": 8.120067596435547, |
| "learning_rate": 9.622641509433963e-06, |
| "loss": 0.8251, |
| "step": 280 |
| }, |
| { |
| "epoch": 0.4062288422477996, |
| "grad_norm": 7.483547687530518, |
| "learning_rate": 9.523336643495532e-06, |
| "loss": 0.7774, |
| "step": 300 |
| }, |
| { |
| "epoch": 0.4333107650643196, |
| "grad_norm": 7.521896839141846, |
| "learning_rate": 9.4240317775571e-06, |
| "loss": 0.7266, |
| "step": 320 |
| }, |
| { |
| "epoch": 0.46039268788083954, |
| "grad_norm": 7.073266506195068, |
| "learning_rate": 9.32472691161867e-06, |
| "loss": 0.7248, |
| "step": 340 |
| }, |
| { |
| "epoch": 0.48747461069735953, |
| "grad_norm": 6.335423469543457, |
| "learning_rate": 9.22542204568024e-06, |
| "loss": 0.7151, |
| "step": 360 |
| }, |
| { |
| "epoch": 0.5145565335138795, |
| "grad_norm": 6.936922550201416, |
| "learning_rate": 9.126117179741808e-06, |
| "loss": 0.6881, |
| "step": 380 |
| }, |
| { |
| "epoch": 0.5416384563303994, |
| "grad_norm": 7.596807479858398, |
| "learning_rate": 9.026812313803377e-06, |
| "loss": 0.6995, |
| "step": 400 |
| }, |
| { |
| "epoch": 0.5416384563303994, |
| "eval_loss": 0.6352065801620483, |
| "eval_runtime": 1597.3635, |
| "eval_samples_per_second": 3.697, |
| "eval_steps_per_second": 0.463, |
| "eval_wer": 51.63653601672089, |
| "step": 400 |
| }, |
| { |
| "epoch": 0.5687203791469194, |
| "grad_norm": 6.981812477111816, |
| "learning_rate": 8.927507447864945e-06, |
| "loss": 0.6795, |
| "step": 420 |
| }, |
| { |
| "epoch": 0.5958023019634394, |
| "grad_norm": 6.481506824493408, |
| "learning_rate": 8.828202581926516e-06, |
| "loss": 0.6643, |
| "step": 440 |
| }, |
| { |
| "epoch": 0.6228842247799594, |
| "grad_norm": 6.537086009979248, |
| "learning_rate": 8.728897715988084e-06, |
| "loss": 0.6459, |
| "step": 460 |
| }, |
| { |
| "epoch": 0.6499661475964793, |
| "grad_norm": 6.739567756652832, |
| "learning_rate": 8.629592850049653e-06, |
| "loss": 0.6736, |
| "step": 480 |
| }, |
| { |
| "epoch": 0.6770480704129993, |
| "grad_norm": 7.422546863555908, |
| "learning_rate": 8.530287984111221e-06, |
| "loss": 0.6591, |
| "step": 500 |
| }, |
| { |
| "epoch": 0.7041299932295193, |
| "grad_norm": 5.7051215171813965, |
| "learning_rate": 8.430983118172792e-06, |
| "loss": 0.6442, |
| "step": 520 |
| }, |
| { |
| "epoch": 0.7312119160460393, |
| "grad_norm": 7.166143417358398, |
| "learning_rate": 8.33167825223436e-06, |
| "loss": 0.6326, |
| "step": 540 |
| }, |
| { |
| "epoch": 0.7582938388625592, |
| "grad_norm": 7.759460926055908, |
| "learning_rate": 8.232373386295929e-06, |
| "loss": 0.6328, |
| "step": 560 |
| }, |
| { |
| "epoch": 0.7853757616790792, |
| "grad_norm": 5.876537799835205, |
| "learning_rate": 8.133068520357497e-06, |
| "loss": 0.5945, |
| "step": 580 |
| }, |
| { |
| "epoch": 0.8124576844955992, |
| "grad_norm": 6.475106716156006, |
| "learning_rate": 8.033763654419066e-06, |
| "loss": 0.6038, |
| "step": 600 |
| }, |
| { |
| "epoch": 0.8124576844955992, |
| "eval_loss": 0.5318673849105835, |
| "eval_runtime": 1614.0202, |
| "eval_samples_per_second": 3.659, |
| "eval_steps_per_second": 0.458, |
| "eval_wer": 44.61419121291129, |
| "step": 600 |
| }, |
| { |
| "epoch": 0.8395396073121192, |
| "grad_norm": 6.8348259925842285, |
| "learning_rate": 7.934458788480636e-06, |
| "loss": 0.6046, |
| "step": 620 |
| }, |
| { |
| "epoch": 0.8666215301286392, |
| "grad_norm": 6.060784339904785, |
| "learning_rate": 7.835153922542206e-06, |
| "loss": 0.6055, |
| "step": 640 |
| }, |
| { |
| "epoch": 0.8937034529451591, |
| "grad_norm": 6.01262903213501, |
| "learning_rate": 7.735849056603775e-06, |
| "loss": 0.6139, |
| "step": 660 |
| }, |
| { |
| "epoch": 0.9207853757616791, |
| "grad_norm": 6.706854820251465, |
| "learning_rate": 7.636544190665344e-06, |
| "loss": 0.5789, |
| "step": 680 |
| }, |
| { |
| "epoch": 0.9478672985781991, |
| "grad_norm": 6.464681625366211, |
| "learning_rate": 7.537239324726913e-06, |
| "loss": 0.5857, |
| "step": 700 |
| }, |
| { |
| "epoch": 0.9749492213947191, |
| "grad_norm": 7.8035478591918945, |
| "learning_rate": 7.437934458788482e-06, |
| "loss": 0.5739, |
| "step": 720 |
| }, |
| { |
| "epoch": 1.001354096140826, |
| "grad_norm": 5.574532985687256, |
| "learning_rate": 7.33862959285005e-06, |
| "loss": 0.5443, |
| "step": 740 |
| }, |
| { |
| "epoch": 1.028436018957346, |
| "grad_norm": 5.275153160095215, |
| "learning_rate": 7.23932472691162e-06, |
| "loss": 0.4705, |
| "step": 760 |
| }, |
| { |
| "epoch": 1.055517941773866, |
| "grad_norm": 5.722527027130127, |
| "learning_rate": 7.140019860973188e-06, |
| "loss": 0.4692, |
| "step": 780 |
| }, |
| { |
| "epoch": 1.0825998645903858, |
| "grad_norm": 5.501762866973877, |
| "learning_rate": 7.040714995034758e-06, |
| "loss": 0.4791, |
| "step": 800 |
| }, |
| { |
| "epoch": 1.0825998645903858, |
| "eval_loss": 0.46895861625671387, |
| "eval_runtime": 1599.6376, |
| "eval_samples_per_second": 3.691, |
| "eval_steps_per_second": 0.462, |
| "eval_wer": 40.9726561658299, |
| "step": 800 |
| }, |
| { |
| "epoch": 1.1096817874069058, |
| "grad_norm": 5.207645893096924, |
| "learning_rate": 6.941410129096326e-06, |
| "loss": 0.4569, |
| "step": 820 |
| }, |
| { |
| "epoch": 1.1367637102234258, |
| "grad_norm": 5.584475517272949, |
| "learning_rate": 6.842105263157896e-06, |
| "loss": 0.4262, |
| "step": 840 |
| }, |
| { |
| "epoch": 1.1638456330399458, |
| "grad_norm": 5.881894588470459, |
| "learning_rate": 6.742800397219464e-06, |
| "loss": 0.4533, |
| "step": 860 |
| }, |
| { |
| "epoch": 1.1909275558564658, |
| "grad_norm": 6.939334869384766, |
| "learning_rate": 6.643495531281034e-06, |
| "loss": 0.4675, |
| "step": 880 |
| }, |
| { |
| "epoch": 1.2180094786729858, |
| "grad_norm": 5.780360698699951, |
| "learning_rate": 6.544190665342602e-06, |
| "loss": 0.4238, |
| "step": 900 |
| }, |
| { |
| "epoch": 1.2450914014895058, |
| "grad_norm": 5.867160797119141, |
| "learning_rate": 6.444885799404172e-06, |
| "loss": 0.444, |
| "step": 920 |
| }, |
| { |
| "epoch": 1.2721733243060258, |
| "grad_norm": 6.121824741363525, |
| "learning_rate": 6.34558093346574e-06, |
| "loss": 0.4333, |
| "step": 940 |
| }, |
| { |
| "epoch": 1.2992552471225456, |
| "grad_norm": 5.132157802581787, |
| "learning_rate": 6.24627606752731e-06, |
| "loss": 0.4544, |
| "step": 960 |
| }, |
| { |
| "epoch": 1.3263371699390656, |
| "grad_norm": 6.384315013885498, |
| "learning_rate": 6.146971201588878e-06, |
| "loss": 0.4332, |
| "step": 980 |
| }, |
| { |
| "epoch": 1.3534190927555856, |
| "grad_norm": 4.54695987701416, |
| "learning_rate": 6.047666335650447e-06, |
| "loss": 0.4416, |
| "step": 1000 |
| }, |
| { |
| "epoch": 1.3534190927555856, |
| "eval_loss": 0.43043428659439087, |
| "eval_runtime": 1614.3899, |
| "eval_samples_per_second": 3.658, |
| "eval_steps_per_second": 0.458, |
| "eval_wer": 38.00771402098731, |
| "step": 1000 |
| }, |
| { |
| "epoch": 1.3805010155721056, |
| "grad_norm": 4.973900318145752, |
| "learning_rate": 5.948361469712016e-06, |
| "loss": 0.4401, |
| "step": 1020 |
| }, |
| { |
| "epoch": 1.4075829383886256, |
| "grad_norm": 5.952788352966309, |
| "learning_rate": 5.849056603773585e-06, |
| "loss": 0.4865, |
| "step": 1040 |
| }, |
| { |
| "epoch": 1.4346648612051456, |
| "grad_norm": 6.601942539215088, |
| "learning_rate": 5.749751737835154e-06, |
| "loss": 0.4435, |
| "step": 1060 |
| }, |
| { |
| "epoch": 1.4617467840216656, |
| "grad_norm": 6.17143440246582, |
| "learning_rate": 5.650446871896723e-06, |
| "loss": 0.451, |
| "step": 1080 |
| }, |
| { |
| "epoch": 1.4888287068381856, |
| "grad_norm": 5.782886981964111, |
| "learning_rate": 5.551142005958292e-06, |
| "loss": 0.4311, |
| "step": 1100 |
| }, |
| { |
| "epoch": 1.5159106296547056, |
| "grad_norm": 5.734127998352051, |
| "learning_rate": 5.451837140019861e-06, |
| "loss": 0.4632, |
| "step": 1120 |
| }, |
| { |
| "epoch": 1.5429925524712256, |
| "grad_norm": 5.601761341094971, |
| "learning_rate": 5.35253227408143e-06, |
| "loss": 0.424, |
| "step": 1140 |
| }, |
| { |
| "epoch": 1.5700744752877456, |
| "grad_norm": 5.866110801696777, |
| "learning_rate": 5.253227408142999e-06, |
| "loss": 0.4273, |
| "step": 1160 |
| }, |
| { |
| "epoch": 1.5971563981042654, |
| "grad_norm": 5.120361328125, |
| "learning_rate": 5.153922542204568e-06, |
| "loss": 0.4297, |
| "step": 1180 |
| }, |
| { |
| "epoch": 1.6242383209207854, |
| "grad_norm": 5.093082427978516, |
| "learning_rate": 5.054617676266137e-06, |
| "loss": 0.4321, |
| "step": 1200 |
| }, |
| { |
| "epoch": 1.6242383209207854, |
| "eval_loss": 0.3993258476257324, |
| "eval_runtime": 1619.3754, |
| "eval_samples_per_second": 3.646, |
| "eval_steps_per_second": 0.456, |
| "eval_wer": 37.091943373052636, |
| "step": 1200 |
| } |
| ], |
| "logging_steps": 20, |
| "max_steps": 2214, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 3, |
| "save_steps": 200, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 1.107359898107904e+19, |
| "train_batch_size": 16, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|