| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 5.0, | |
| "eval_steps": 1000.0, | |
| "global_step": 5495, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.09099181073703366, | |
| "grad_norm": 1.4607406854629517, | |
| "learning_rate": 1.701414099755401e-05, | |
| "loss": 0.2386, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.18198362147406733, | |
| "grad_norm": 1.313841462135315, | |
| "learning_rate": 1.6698772304364868e-05, | |
| "loss": 0.2229, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.272975432211101, | |
| "grad_norm": 0.9963859915733337, | |
| "learning_rate": 1.638340361117573e-05, | |
| "loss": 0.2372, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.36396724294813465, | |
| "grad_norm": 1.5595107078552246, | |
| "learning_rate": 1.6068034917986592e-05, | |
| "loss": 0.2349, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.4549590536851683, | |
| "grad_norm": 3.309925079345703, | |
| "learning_rate": 1.5752666224797455e-05, | |
| "loss": 0.2498, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.545950864422202, | |
| "grad_norm": 0.9195685982704163, | |
| "learning_rate": 1.5437297531608317e-05, | |
| "loss": 0.2376, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.6369426751592356, | |
| "grad_norm": 1.338065266609192, | |
| "learning_rate": 1.5121928838419178e-05, | |
| "loss": 0.2341, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.7279344858962693, | |
| "grad_norm": 1.154276728630066, | |
| "learning_rate": 1.480656014523004e-05, | |
| "loss": 0.2339, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.818926296633303, | |
| "grad_norm": 1.0532206296920776, | |
| "learning_rate": 1.4491191452040903e-05, | |
| "loss": 0.2229, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.9099181073703366, | |
| "grad_norm": 1.547398567199707, | |
| "learning_rate": 1.4175822758851763e-05, | |
| "loss": 0.2145, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.9099181073703366, | |
| "eval_cer": 0.1716800867787559, | |
| "eval_loss": 0.24501025676727295, | |
| "eval_runtime": 16.5978, | |
| "eval_samples_per_second": 30.124, | |
| "eval_steps_per_second": 0.964, | |
| "eval_wer": 0.3676761958675347, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.0009099181073704, | |
| "grad_norm": 1.0578970909118652, | |
| "learning_rate": 1.3860454065662626e-05, | |
| "loss": 0.2136, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.091901728844404, | |
| "grad_norm": 1.3032770156860352, | |
| "learning_rate": 1.3545085372473488e-05, | |
| "loss": 0.1984, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.1828935395814377, | |
| "grad_norm": 1.5720305442810059, | |
| "learning_rate": 1.322971667928435e-05, | |
| "loss": 0.2046, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.2738853503184713, | |
| "grad_norm": 2.217538595199585, | |
| "learning_rate": 1.291434798609521e-05, | |
| "loss": 0.2015, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.364877161055505, | |
| "grad_norm": 1.1916780471801758, | |
| "learning_rate": 1.2598979292906072e-05, | |
| "loss": 0.2089, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.4558689717925386, | |
| "grad_norm": 2.570552110671997, | |
| "learning_rate": 1.2283610599716934e-05, | |
| "loss": 0.1944, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.5468607825295724, | |
| "grad_norm": 1.21017324924469, | |
| "learning_rate": 1.1968241906527795e-05, | |
| "loss": 0.2065, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.6378525932666061, | |
| "grad_norm": 3.1120645999908447, | |
| "learning_rate": 1.1652873213338657e-05, | |
| "loss": 0.2049, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.7288444040036397, | |
| "grad_norm": 0.8226349353790283, | |
| "learning_rate": 1.133750452014952e-05, | |
| "loss": 0.1993, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 1.8198362147406733, | |
| "grad_norm": 1.935673475265503, | |
| "learning_rate": 1.1022135826960382e-05, | |
| "loss": 0.2083, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.8198362147406733, | |
| "eval_cer": 0.17084567566439984, | |
| "eval_loss": 0.2324480563402176, | |
| "eval_runtime": 16.6022, | |
| "eval_samples_per_second": 30.116, | |
| "eval_steps_per_second": 0.964, | |
| "eval_wer": 0.3656948768751769, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.910828025477707, | |
| "grad_norm": 1.1601260900497437, | |
| "learning_rate": 1.0706767133771243e-05, | |
| "loss": 0.2007, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 2.001819836214741, | |
| "grad_norm": 0.8161017298698425, | |
| "learning_rate": 1.0391398440582105e-05, | |
| "loss": 0.2127, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 2.092811646951774, | |
| "grad_norm": 0.8181062340736389, | |
| "learning_rate": 1.0076029747392968e-05, | |
| "loss": 0.1941, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 2.183803457688808, | |
| "grad_norm": 0.8099838495254517, | |
| "learning_rate": 9.760661054203828e-06, | |
| "loss": 0.1963, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 2.2747952684258417, | |
| "grad_norm": 1.2429511547088623, | |
| "learning_rate": 9.445292361014689e-06, | |
| "loss": 0.2037, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 2.3657870791628755, | |
| "grad_norm": 1.5289260149002075, | |
| "learning_rate": 9.129923667825551e-06, | |
| "loss": 0.1806, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 2.4567788898999092, | |
| "grad_norm": 0.9370952844619751, | |
| "learning_rate": 8.814554974636414e-06, | |
| "loss": 0.1993, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 2.5477707006369426, | |
| "grad_norm": 2.0104141235351562, | |
| "learning_rate": 8.499186281447274e-06, | |
| "loss": 0.1794, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 2.6387625113739763, | |
| "grad_norm": 1.7518079280853271, | |
| "learning_rate": 8.183817588258137e-06, | |
| "loss": 0.1729, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 2.72975432211101, | |
| "grad_norm": 0.9197149276733398, | |
| "learning_rate": 7.868448895069e-06, | |
| "loss": 0.1853, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 2.72975432211101, | |
| "eval_cer": 0.16817556009846052, | |
| "eval_loss": 0.2309054285287857, | |
| "eval_runtime": 16.5274, | |
| "eval_samples_per_second": 30.253, | |
| "eval_steps_per_second": 0.968, | |
| "eval_wer": 0.35833569204641946, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 2.8207461328480434, | |
| "grad_norm": 2.1217689514160156, | |
| "learning_rate": 7.553080201879861e-06, | |
| "loss": 0.1936, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 2.911737943585077, | |
| "grad_norm": 1.075772762298584, | |
| "learning_rate": 7.237711508690722e-06, | |
| "loss": 0.1808, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 3.002729754322111, | |
| "grad_norm": 1.0478922128677368, | |
| "learning_rate": 6.922342815501585e-06, | |
| "loss": 0.1843, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 3.0937215650591448, | |
| "grad_norm": 0.6994670033454895, | |
| "learning_rate": 6.606974122312446e-06, | |
| "loss": 0.1702, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 3.1847133757961785, | |
| "grad_norm": 1.5650808811187744, | |
| "learning_rate": 6.291605429123308e-06, | |
| "loss": 0.1746, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 3.275705186533212, | |
| "grad_norm": 1.1467134952545166, | |
| "learning_rate": 5.976236735934169e-06, | |
| "loss": 0.1715, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 3.3666969972702456, | |
| "grad_norm": 1.9923007488250732, | |
| "learning_rate": 5.660868042745032e-06, | |
| "loss": 0.1764, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 3.4576888080072794, | |
| "grad_norm": 0.8818196654319763, | |
| "learning_rate": 5.345499349555893e-06, | |
| "loss": 0.1889, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 3.548680618744313, | |
| "grad_norm": 2.67970609664917, | |
| "learning_rate": 5.030130656366755e-06, | |
| "loss": 0.1768, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 3.6396724294813465, | |
| "grad_norm": 4.243306636810303, | |
| "learning_rate": 4.714761963177616e-06, | |
| "loss": 0.1872, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 3.6396724294813465, | |
| "eval_cer": 0.16892653010138095, | |
| "eval_loss": 0.23469558358192444, | |
| "eval_runtime": 16.4113, | |
| "eval_samples_per_second": 30.467, | |
| "eval_steps_per_second": 0.975, | |
| "eval_wer": 0.35578828191338807, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 3.7306642402183803, | |
| "grad_norm": 1.6732439994812012, | |
| "learning_rate": 4.399393269988479e-06, | |
| "loss": 0.1762, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 3.821656050955414, | |
| "grad_norm": 0.6598711609840393, | |
| "learning_rate": 4.08402457679934e-06, | |
| "loss": 0.1587, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 3.912647861692448, | |
| "grad_norm": 1.5305451154708862, | |
| "learning_rate": 3.768655883610202e-06, | |
| "loss": 0.1748, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 4.003639672429482, | |
| "grad_norm": 1.1548172235488892, | |
| "learning_rate": 3.453287190421064e-06, | |
| "loss": 0.1652, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 4.094631483166515, | |
| "grad_norm": 1.0717195272445679, | |
| "learning_rate": 3.1379184972319254e-06, | |
| "loss": 0.1682, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 4.185623293903548, | |
| "grad_norm": 2.524381637573242, | |
| "learning_rate": 2.8225498040427873e-06, | |
| "loss": 0.172, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 4.276615104640582, | |
| "grad_norm": 0.7992689609527588, | |
| "learning_rate": 2.507181110853649e-06, | |
| "loss": 0.1756, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 4.367606915377616, | |
| "grad_norm": 1.247957468032837, | |
| "learning_rate": 2.191812417664511e-06, | |
| "loss": 0.1731, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 4.45859872611465, | |
| "grad_norm": 0.6922060251235962, | |
| "learning_rate": 1.8764437244753726e-06, | |
| "loss": 0.1819, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 4.549590536851683, | |
| "grad_norm": 1.0490756034851074, | |
| "learning_rate": 1.5610750312862342e-06, | |
| "loss": 0.17, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 4.549590536851683, | |
| "eval_cer": 0.16867620676707415, | |
| "eval_loss": 0.23541562259197235, | |
| "eval_runtime": 16.4749, | |
| "eval_samples_per_second": 30.349, | |
| "eval_steps_per_second": 0.971, | |
| "eval_wer": 0.3560713274837249, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 4.640582347588717, | |
| "grad_norm": 2.3390719890594482, | |
| "learning_rate": 1.245706338097096e-06, | |
| "loss": 0.1811, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 4.731574158325751, | |
| "grad_norm": 2.068753242492676, | |
| "learning_rate": 9.303376449079578e-07, | |
| "loss": 0.1715, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 4.822565969062785, | |
| "grad_norm": 4.086577415466309, | |
| "learning_rate": 6.149689517188195e-07, | |
| "loss": 0.1703, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 4.9135577797998184, | |
| "grad_norm": 1.0212068557739258, | |
| "learning_rate": 2.9960025852968135e-07, | |
| "loss": 0.1761, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "step": 5495, | |
| "total_flos": 4.052184710714386e+19, | |
| "train_loss": 0.19382703171956528, | |
| "train_runtime": 8779.1622, | |
| "train_samples_per_second": 20.016, | |
| "train_steps_per_second": 0.626 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 5495, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4.052184710714386e+19, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |