| { | |
| "best_global_step": 1800, | |
| "best_metric": 2.1885855197906494, | |
| "best_model_checkpoint": "./outputs/checkpoint-1800", | |
| "epoch": 2.6058631921824107, | |
| "eval_steps": 200, | |
| "global_step": 2400, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.05428881650380022, | |
| "grad_norm": 0.3356933295726776, | |
| "learning_rate": 0.000196, | |
| "loss": 2.9766, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.10857763300760044, | |
| "grad_norm": 0.5300453305244446, | |
| "learning_rate": 0.00019983906674307943, | |
| "loss": 2.4625, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.16286644951140064, | |
| "grad_norm": 0.4830726385116577, | |
| "learning_rate": 0.00019934360560412188, | |
| "loss": 2.3984, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.21715526601520088, | |
| "grad_norm": 0.6850009560585022, | |
| "learning_rate": 0.000198515209997625, | |
| "loss": 2.3727, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.21715526601520088, | |
| "eval_loss": 2.348525047302246, | |
| "eval_runtime": 9.0868, | |
| "eval_samples_per_second": 66.25, | |
| "eval_steps_per_second": 2.091, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.2714440825190011, | |
| "grad_norm": 0.6019847393035889, | |
| "learning_rate": 0.00019735665616115814, | |
| "loss": 2.3382, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.3257328990228013, | |
| "grad_norm": 0.7136948704719543, | |
| "learning_rate": 0.00019587182680566505, | |
| "loss": 2.3352, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.3800217155266015, | |
| "grad_norm": 0.6682165861129761, | |
| "learning_rate": 0.00019406569810316855, | |
| "loss": 2.3282, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.43431053203040176, | |
| "grad_norm": 0.6841819882392883, | |
| "learning_rate": 0.00019194432300991294, | |
| "loss": 2.3176, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.43431053203040176, | |
| "eval_loss": 2.2951860427856445, | |
| "eval_runtime": 7.346, | |
| "eval_samples_per_second": 81.949, | |
| "eval_steps_per_second": 2.586, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.48859934853420195, | |
| "grad_norm": 0.6011854410171509, | |
| "learning_rate": 0.00018951481098083322, | |
| "loss": 2.2901, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.5428881650380022, | |
| "grad_norm": 0.6126257181167603, | |
| "learning_rate": 0.00018678530414333524, | |
| "loss": 2.3023, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.5971769815418024, | |
| "grad_norm": 0.6901931762695312, | |
| "learning_rate": 0.00018376495001023636, | |
| "loss": 2.2943, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.6514657980456026, | |
| "grad_norm": 0.6403626203536987, | |
| "learning_rate": 0.00018046387082331516, | |
| "loss": 2.2696, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.6514657980456026, | |
| "eval_loss": 2.2626566886901855, | |
| "eval_runtime": 7.335, | |
| "eval_samples_per_second": 82.072, | |
| "eval_steps_per_second": 2.59, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.7057546145494028, | |
| "grad_norm": 0.6320544481277466, | |
| "learning_rate": 0.00017689312963021044, | |
| "loss": 2.2678, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.760043431053203, | |
| "grad_norm": 0.7033542990684509, | |
| "learning_rate": 0.00017306469320835707, | |
| "loss": 2.2667, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.8143322475570033, | |
| "grad_norm": 0.6568920612335205, | |
| "learning_rate": 0.00016899139196021397, | |
| "loss": 2.2727, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.8686210640608035, | |
| "grad_norm": 0.7634288668632507, | |
| "learning_rate": 0.00016468687691418833, | |
| "loss": 2.2482, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.8686210640608035, | |
| "eval_loss": 2.2396719455718994, | |
| "eval_runtime": 7.3506, | |
| "eval_samples_per_second": 81.898, | |
| "eval_steps_per_second": 2.585, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.9229098805646037, | |
| "grad_norm": 0.6467435359954834, | |
| "learning_rate": 0.0001601655739753613, | |
| "loss": 2.2523, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.9771986970684039, | |
| "grad_norm": 0.6962546706199646, | |
| "learning_rate": 0.0001554426355793363, | |
| "loss": 2.2509, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.0314875135722041, | |
| "grad_norm": 0.6648530960083008, | |
| "learning_rate": 0.0001505338899112344, | |
| "loss": 2.2103, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.0857763300760044, | |
| "grad_norm": 0.9022650122642517, | |
| "learning_rate": 0.00014545578786002178, | |
| "loss": 2.1862, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.0857763300760044, | |
| "eval_loss": 2.22625994682312, | |
| "eval_runtime": 7.3813, | |
| "eval_samples_per_second": 81.558, | |
| "eval_steps_per_second": 2.574, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.1400651465798046, | |
| "grad_norm": 0.788972795009613, | |
| "learning_rate": 0.00014022534788594312, | |
| "loss": 2.2099, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.1943539630836049, | |
| "grad_norm": 0.7912983894348145, | |
| "learning_rate": 0.00013486009898582889, | |
| "loss": 2.1799, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.248642779587405, | |
| "grad_norm": 0.766243040561676, | |
| "learning_rate": 0.00012937802194741994, | |
| "loss": 2.2058, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.3029315960912053, | |
| "grad_norm": 0.8190643191337585, | |
| "learning_rate": 0.00012379748908958562, | |
| "loss": 2.2021, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.3029315960912053, | |
| "eval_loss": 2.21370530128479, | |
| "eval_runtime": 7.3565, | |
| "eval_samples_per_second": 81.833, | |
| "eval_steps_per_second": 2.583, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.3572204125950054, | |
| "grad_norm": 0.824234664440155, | |
| "learning_rate": 0.00011813720269038712, | |
| "loss": 2.1931, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.4115092290988056, | |
| "grad_norm": 0.8461474776268005, | |
| "learning_rate": 0.00011241613230933439, | |
| "loss": 2.1755, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.4657980456026058, | |
| "grad_norm": 0.8048629760742188, | |
| "learning_rate": 0.00010665345121389123, | |
| "loss": 2.186, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 1.520086862106406, | |
| "grad_norm": 0.7822412848472595, | |
| "learning_rate": 0.00010086847212328548, | |
| "loss": 2.1949, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.520086862106406, | |
| "eval_loss": 2.2031383514404297, | |
| "eval_runtime": 7.3421, | |
| "eval_samples_per_second": 81.993, | |
| "eval_steps_per_second": 2.588, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.5743756786102063, | |
| "grad_norm": 0.8551859855651855, | |
| "learning_rate": 9.508058248496825e-05, | |
| "loss": 2.193, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 1.6286644951140063, | |
| "grad_norm": 0.8906627893447876, | |
| "learning_rate": 8.930917950063358e-05, | |
| "loss": 2.1834, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.6829533116178066, | |
| "grad_norm": 0.7810714840888977, | |
| "learning_rate": 8.357360511954789e-05, | |
| "loss": 2.1665, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 1.7372421281216068, | |
| "grad_norm": 0.8593874573707581, | |
| "learning_rate": 7.789308121704968e-05, | |
| "loss": 2.1698, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.7372421281216068, | |
| "eval_loss": 2.1958162784576416, | |
| "eval_runtime": 7.3308, | |
| "eval_samples_per_second": 82.119, | |
| "eval_steps_per_second": 2.592, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.791530944625407, | |
| "grad_norm": 0.8802404403686523, | |
| "learning_rate": 7.228664517545785e-05, | |
| "loss": 2.1577, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 1.8458197611292073, | |
| "grad_norm": 0.8591893911361694, | |
| "learning_rate": 6.677308608327948e-05, | |
| "loss": 2.161, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.9001085776330076, | |
| "grad_norm": 0.8191171884536743, | |
| "learning_rate": 6.137088176653491e-05, | |
| "loss": 2.178, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 1.9543973941368078, | |
| "grad_norm": 0.9448741674423218, | |
| "learning_rate": 5.609813686323086e-05, | |
| "loss": 2.1746, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.9543973941368078, | |
| "eval_loss": 2.1885855197906494, | |
| "eval_runtime": 7.3549, | |
| "eval_samples_per_second": 81.85, | |
| "eval_steps_per_second": 2.583, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.008686210640608, | |
| "grad_norm": 0.9358265399932861, | |
| "learning_rate": 5.097252214851365e-05, | |
| "loss": 2.165, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.0629750271444083, | |
| "grad_norm": 1.0949631929397583, | |
| "learning_rate": 4.601121531384579e-05, | |
| "loss": 2.1348, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.1172638436482085, | |
| "grad_norm": 1.3909953832626343, | |
| "learning_rate": 4.1230843398675555e-05, | |
| "loss": 2.1399, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 2.1715526601520088, | |
| "grad_norm": 1.2448049783706665, | |
| "learning_rate": 3.664742706752925e-05, | |
| "loss": 2.1417, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 2.1715526601520088, | |
| "eval_loss": 2.194857120513916, | |
| "eval_runtime": 7.3786, | |
| "eval_samples_per_second": 81.587, | |
| "eval_steps_per_second": 2.575, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 2.225841476655809, | |
| "grad_norm": 1.2955018281936646, | |
| "learning_rate": 3.227632691927414e-05, | |
| "loss": 2.1308, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 2.2801302931596092, | |
| "grad_norm": 1.0340995788574219, | |
| "learning_rate": 2.8132192008487768e-05, | |
| "loss": 2.129, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 2.3344191096634095, | |
| "grad_norm": 1.2585651874542236, | |
| "learning_rate": 2.4228910751455625e-05, | |
| "loss": 2.1403, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 2.3887079261672097, | |
| "grad_norm": 1.2523435354232788, | |
| "learning_rate": 2.0579564381328775e-05, | |
| "loss": 2.1197, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 2.3887079261672097, | |
| "eval_loss": 2.190831422805786, | |
| "eval_runtime": 7.3454, | |
| "eval_samples_per_second": 81.956, | |
| "eval_steps_per_second": 2.587, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 2.44299674267101, | |
| "grad_norm": 1.4523568153381348, | |
| "learning_rate": 1.7196383108428315e-05, | |
| "loss": 2.1359, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 2.49728555917481, | |
| "grad_norm": 1.3263523578643799, | |
| "learning_rate": 1.4090705132619631e-05, | |
| "loss": 2.1206, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 2.5515743756786105, | |
| "grad_norm": 1.4148005247116089, | |
| "learning_rate": 1.1272938645118803e-05, | |
| "loss": 2.1314, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 2.6058631921824107, | |
| "grad_norm": 1.9476280212402344, | |
| "learning_rate": 8.752526947077045e-06, | |
| "loss": 2.1254, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 2.6058631921824107, | |
| "eval_loss": 2.189617395401001, | |
| "eval_runtime": 7.3524, | |
| "eval_samples_per_second": 81.878, | |
| "eval_steps_per_second": 2.584, | |
| "step": 2400 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 2763, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 200, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 3, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 3 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 6.723721683521372e+17, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |