| { | |
| "best_global_step": 1000, | |
| "best_metric": 1.1414633216773222, | |
| "best_model_checkpoint": "./SALAMA_C6/checkpoint-1000", | |
| "epoch": 0.6807351940095303, | |
| "eval_steps": 1000, | |
| "global_step": 1000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.013614703880190605, | |
| "grad_norm": 0.12137346714735031, | |
| "learning_rate": 3.8e-07, | |
| "loss": 0.008, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.02722940776038121, | |
| "grad_norm": 0.114188052713871, | |
| "learning_rate": 7.8e-07, | |
| "loss": 0.0051, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.04084411164057182, | |
| "grad_norm": 1.0347418785095215, | |
| "learning_rate": 1.1800000000000001e-06, | |
| "loss": 0.0065, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.05445881552076242, | |
| "grad_norm": 1.6876862049102783, | |
| "learning_rate": 1.5800000000000001e-06, | |
| "loss": 0.0086, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.06807351940095303, | |
| "grad_norm": 0.44940465688705444, | |
| "learning_rate": 1.98e-06, | |
| "loss": 0.0058, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.08168822328114364, | |
| "grad_norm": 0.12510177493095398, | |
| "learning_rate": 2.38e-06, | |
| "loss": 0.007, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.09530292716133425, | |
| "grad_norm": 0.20447663962841034, | |
| "learning_rate": 2.7800000000000005e-06, | |
| "loss": 0.0076, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.10891763104152484, | |
| "grad_norm": 0.5220006108283997, | |
| "learning_rate": 3.1800000000000005e-06, | |
| "loss": 0.0049, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.12253233492171545, | |
| "grad_norm": 1.6316051483154297, | |
| "learning_rate": 3.58e-06, | |
| "loss": 0.0056, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.13614703880190607, | |
| "grad_norm": 0.33280882239341736, | |
| "learning_rate": 3.980000000000001e-06, | |
| "loss": 0.0046, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.14976174268209666, | |
| "grad_norm": 1.9523682594299316, | |
| "learning_rate": 4.38e-06, | |
| "loss": 0.0059, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.16337644656228728, | |
| "grad_norm": 2.4847404956817627, | |
| "learning_rate": 4.78e-06, | |
| "loss": 0.0102, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.17699115044247787, | |
| "grad_norm": 1.8841696977615356, | |
| "learning_rate": 5.18e-06, | |
| "loss": 0.0102, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.1906058543226685, | |
| "grad_norm": 0.3495821952819824, | |
| "learning_rate": 5.580000000000001e-06, | |
| "loss": 0.0112, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.2042205582028591, | |
| "grad_norm": 0.7233176231384277, | |
| "learning_rate": 5.98e-06, | |
| "loss": 0.0099, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.21783526208304968, | |
| "grad_norm": 1.7731997966766357, | |
| "learning_rate": 6.380000000000001e-06, | |
| "loss": 0.0101, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.2314499659632403, | |
| "grad_norm": 1.6149709224700928, | |
| "learning_rate": 6.780000000000001e-06, | |
| "loss": 0.0158, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.2450646698434309, | |
| "grad_norm": 1.7614096403121948, | |
| "learning_rate": 7.180000000000001e-06, | |
| "loss": 0.0149, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.2586793737236215, | |
| "grad_norm": 1.063245177268982, | |
| "learning_rate": 7.58e-06, | |
| "loss": 0.0081, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.27229407760381213, | |
| "grad_norm": 2.128750801086426, | |
| "learning_rate": 7.980000000000002e-06, | |
| "loss": 0.0101, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.2859087814840027, | |
| "grad_norm": 4.583016872406006, | |
| "learning_rate": 8.380000000000001e-06, | |
| "loss": 0.0161, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.2995234853641933, | |
| "grad_norm": 2.7598137855529785, | |
| "learning_rate": 8.78e-06, | |
| "loss": 0.0155, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.3131381892443839, | |
| "grad_norm": 1.4036035537719727, | |
| "learning_rate": 9.180000000000002e-06, | |
| "loss": 0.0126, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.32675289312457456, | |
| "grad_norm": 2.3361454010009766, | |
| "learning_rate": 9.58e-06, | |
| "loss": 0.0182, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.34036759700476515, | |
| "grad_norm": 4.159565448760986, | |
| "learning_rate": 9.980000000000001e-06, | |
| "loss": 0.0132, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.35398230088495575, | |
| "grad_norm": 2.0892586708068848, | |
| "learning_rate": 9.95136933708728e-06, | |
| "loss": 0.0185, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.36759700476514634, | |
| "grad_norm": 4.004888534545898, | |
| "learning_rate": 9.900179165600206e-06, | |
| "loss": 0.012, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.381211708645337, | |
| "grad_norm": 2.4637577533721924, | |
| "learning_rate": 9.848988994113131e-06, | |
| "loss": 0.0162, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.3948264125255276, | |
| "grad_norm": 2.228325128555298, | |
| "learning_rate": 9.797798822626056e-06, | |
| "loss": 0.031, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.4084411164057182, | |
| "grad_norm": 2.1333324909210205, | |
| "learning_rate": 9.746608651138983e-06, | |
| "loss": 0.0158, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.42205582028590877, | |
| "grad_norm": 3.556786060333252, | |
| "learning_rate": 9.695418479651908e-06, | |
| "loss": 0.0141, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.43567052416609936, | |
| "grad_norm": 1.8913724422454834, | |
| "learning_rate": 9.644228308164833e-06, | |
| "loss": 0.0187, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.44928522804629, | |
| "grad_norm": 2.2834181785583496, | |
| "learning_rate": 9.59303813667776e-06, | |
| "loss": 0.0195, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.4628999319264806, | |
| "grad_norm": 1.7960888147354126, | |
| "learning_rate": 9.541847965190683e-06, | |
| "loss": 0.0184, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.4765146358066712, | |
| "grad_norm": 3.7277791500091553, | |
| "learning_rate": 9.49065779370361e-06, | |
| "loss": 0.0147, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.4901293396868618, | |
| "grad_norm": 3.077613353729248, | |
| "learning_rate": 9.439467622216535e-06, | |
| "loss": 0.0191, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.5037440435670524, | |
| "grad_norm": 3.0859522819519043, | |
| "learning_rate": 9.38827745072946e-06, | |
| "loss": 0.0207, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.517358747447243, | |
| "grad_norm": 3.4750730991363525, | |
| "learning_rate": 9.337087279242385e-06, | |
| "loss": 0.0219, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.5309734513274337, | |
| "grad_norm": 3.5851595401763916, | |
| "learning_rate": 9.285897107755312e-06, | |
| "loss": 0.0177, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.5445881552076243, | |
| "grad_norm": 1.2438093423843384, | |
| "learning_rate": 9.234706936268237e-06, | |
| "loss": 0.0163, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.5582028590878149, | |
| "grad_norm": 2.504617691040039, | |
| "learning_rate": 9.183516764781162e-06, | |
| "loss": 0.0175, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.5718175629680055, | |
| "grad_norm": 2.9373462200164795, | |
| "learning_rate": 9.132326593294089e-06, | |
| "loss": 0.0188, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.585432266848196, | |
| "grad_norm": 1.7320044040679932, | |
| "learning_rate": 9.081136421807014e-06, | |
| "loss": 0.0216, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.5990469707283866, | |
| "grad_norm": 3.027221202850342, | |
| "learning_rate": 9.02994625031994e-06, | |
| "loss": 0.0211, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.6126616746085772, | |
| "grad_norm": 2.3604636192321777, | |
| "learning_rate": 8.978756078832866e-06, | |
| "loss": 0.0224, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.6262763784887678, | |
| "grad_norm": 3.7307496070861816, | |
| "learning_rate": 8.92756590734579e-06, | |
| "loss": 0.016, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.6398910823689585, | |
| "grad_norm": 3.3601698875427246, | |
| "learning_rate": 8.876375735858716e-06, | |
| "loss": 0.0216, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.6535057862491491, | |
| "grad_norm": 2.8738534450531006, | |
| "learning_rate": 8.825185564371641e-06, | |
| "loss": 0.0219, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.6671204901293397, | |
| "grad_norm": 3.381775379180908, | |
| "learning_rate": 8.773995392884566e-06, | |
| "loss": 0.0166, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.6807351940095303, | |
| "grad_norm": 0.9475630521774292, | |
| "learning_rate": 8.722805221397493e-06, | |
| "loss": 0.0148, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.6807351940095303, | |
| "eval_loss": 0.01398420799523592, | |
| "eval_runtime": 4342.5429, | |
| "eval_samples_per_second": 2.706, | |
| "eval_steps_per_second": 0.338, | |
| "eval_wer": 1.1414633216773222, | |
| "step": 1000 | |
| } | |
| ], | |
| "logging_steps": 20, | |
| "max_steps": 4407, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 9.23473281024e+18, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |