| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 37500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 30.94066619873047, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 2.5555, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 31.642507553100586, | |
| "learning_rate": 1.3333333333333333e-05, | |
| "loss": 1.5341, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 15.755248069763184, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8868, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 16.84243392944336, | |
| "learning_rate": 2.6666666666666667e-05, | |
| "loss": 0.7249, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 16.464881896972656, | |
| "learning_rate": 3.3333333333333335e-05, | |
| "loss": 0.6703, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 22.622730255126953, | |
| "learning_rate": 4e-05, | |
| "loss": 0.6409, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "grad_norm": 36.4688720703125, | |
| "learning_rate": 4.666666666666667e-05, | |
| "loss": 0.622, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 26.15373420715332, | |
| "learning_rate": 4.9993231029486544e-05, | |
| "loss": 0.6081, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "grad_norm": 18.611602783203125, | |
| "learning_rate": 4.993910125649561e-05, | |
| "loss": 0.5778, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 8.996405601501465, | |
| "learning_rate": 4.983095894354858e-05, | |
| "loss": 0.5566, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "grad_norm": 9.431114196777344, | |
| "learning_rate": 4.966903830281449e-05, | |
| "loss": 0.5342, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 7.389405250549316, | |
| "learning_rate": 4.9453690018345144e-05, | |
| "loss": 0.5065, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "grad_norm": 31.81819725036621, | |
| "learning_rate": 4.9185380486571595e-05, | |
| "loss": 0.4933, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "grad_norm": 0.20836065709590912, | |
| "learning_rate": 4.88646908061933e-05, | |
| "loss": 0.4857, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 17.081571578979492, | |
| "learning_rate": 4.849231551964771e-05, | |
| "loss": 0.4848, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 18.848472595214844, | |
| "learning_rate": 4.806906110888606e-05, | |
| "loss": 0.4648, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "grad_norm": 28.703367233276367, | |
| "learning_rate": 4.759584424871302e-05, | |
| "loss": 0.4274, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "grad_norm": 25.15323829650879, | |
| "learning_rate": 4.707368982147318e-05, | |
| "loss": 0.4581, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "grad_norm": 24.141220092773438, | |
| "learning_rate": 4.650372869738414e-05, | |
| "loss": 0.4331, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 11.99329662322998, | |
| "learning_rate": 4.588719528532342e-05, | |
| "loss": 0.4188, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "grad_norm": 5.699385166168213, | |
| "learning_rate": 4.522542485937369e-05, | |
| "loss": 0.4158, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "grad_norm": 22.17208480834961, | |
| "learning_rate": 4.4519850666916484e-05, | |
| "loss": 0.4045, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "grad_norm": 0.43230658769607544, | |
| "learning_rate": 4.377200082453749e-05, | |
| "loss": 0.3884, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 11.466931343078613, | |
| "learning_rate": 4.2983495008466276e-05, | |
| "loss": 0.3896, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 15.537298202514648, | |
| "learning_rate": 4.215604094671835e-05, | |
| "loss": 0.3913, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.887, | |
| "eval_loss": 0.3431779742240906, | |
| "eval_runtime": 577.7398, | |
| "eval_samples_per_second": 17.309, | |
| "eval_steps_per_second": 2.164, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "grad_norm": 23.692087173461914, | |
| "learning_rate": 4.129143072053638e-05, | |
| "loss": 0.3064, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "grad_norm": 0.12528562545776367, | |
| "learning_rate": 4.039153688314145e-05, | |
| "loss": 0.301, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "grad_norm": 42.4860725402832, | |
| "learning_rate": 3.945830840419966e-05, | |
| "loss": 0.3009, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "grad_norm": 15.097381591796875, | |
| "learning_rate": 3.8493766448787825e-05, | |
| "loss": 0.3001, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "grad_norm": 16.759035110473633, | |
| "learning_rate": 3.7500000000000003e-05, | |
| "loss": 0.2997, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "grad_norm": 17.78322410583496, | |
| "learning_rate": 3.6479161334675296e-05, | |
| "loss": 0.3082, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "grad_norm": 20.591352462768555, | |
| "learning_rate": 3.543346136204545e-05, | |
| "loss": 0.2789, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "grad_norm": 2.979491949081421, | |
| "learning_rate": 3.436516483539781e-05, | |
| "loss": 0.299, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 1.3599999999999999, | |
| "grad_norm": 21.6214542388916, | |
| "learning_rate": 3.327658544712395e-05, | |
| "loss": 0.2841, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "grad_norm": 12.33953857421875, | |
| "learning_rate": 3.217008081777726e-05, | |
| "loss": 0.2803, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "grad_norm": 1.7421214580535889, | |
| "learning_rate": 3.104804738999169e-05, | |
| "loss": 0.2799, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "grad_norm": 3.7507829666137695, | |
| "learning_rate": 2.9912915238320754e-05, | |
| "loss": 0.2969, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "grad_norm": 0.3172661364078522, | |
| "learning_rate": 2.876714280623708e-05, | |
| "loss": 0.2839, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "grad_norm": 0.3152337968349457, | |
| "learning_rate": 2.761321158169134e-05, | |
| "loss": 0.2934, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "grad_norm": 6.9163289070129395, | |
| "learning_rate": 2.6453620722761896e-05, | |
| "loss": 0.2889, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 1.6400000000000001, | |
| "grad_norm": 28.51948356628418, | |
| "learning_rate": 2.5290881645034932e-05, | |
| "loss": 0.2709, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 1.6800000000000002, | |
| "grad_norm": 6.519890308380127, | |
| "learning_rate": 2.4127512582437485e-05, | |
| "loss": 0.2621, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "grad_norm": 0.005845916923135519, | |
| "learning_rate": 2.2966033133303545e-05, | |
| "loss": 0.2588, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "grad_norm": 0.0003709389711730182, | |
| "learning_rate": 2.1808958803485136e-05, | |
| "loss": 0.2677, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "grad_norm": 1.661258339881897, | |
| "learning_rate": 2.0658795558326743e-05, | |
| "loss": 0.26, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 1.8399999999999999, | |
| "grad_norm": 20.34541130065918, | |
| "learning_rate": 1.9518034395302414e-05, | |
| "loss": 0.2442, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "grad_norm": 0.12481857091188431, | |
| "learning_rate": 1.838914594906995e-05, | |
| "loss": 0.2599, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "grad_norm": 12.972709655761719, | |
| "learning_rate": 1.7274575140626318e-05, | |
| "loss": 0.2513, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "grad_norm": 15.054149627685547, | |
| "learning_rate": 1.617673588215328e-05, | |
| "loss": 0.2497, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.9500556588172913, | |
| "learning_rate": 1.509800584902108e-05, | |
| "loss": 0.2686, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.9154, | |
| "eval_loss": 0.3040485382080078, | |
| "eval_runtime": 578.6501, | |
| "eval_samples_per_second": 17.282, | |
| "eval_steps_per_second": 2.16, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 2.04, | |
| "grad_norm": 3.1150968074798584, | |
| "learning_rate": 1.4040721330273062e-05, | |
| "loss": 0.1121, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 2.08, | |
| "grad_norm": 1.2094102203263901e-05, | |
| "learning_rate": 1.3007172168743854e-05, | |
| "loss": 0.1066, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 2.12, | |
| "grad_norm": 0.005309335887432098, | |
| "learning_rate": 1.1999596801769616e-05, | |
| "loss": 0.104, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 2.16, | |
| "grad_norm": 0.26751643419265747, | |
| "learning_rate": 1.1020177413231334e-05, | |
| "loss": 0.1105, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "grad_norm": 20.066680908203125, | |
| "learning_rate": 1.0071035207430352e-05, | |
| "loss": 0.1103, | |
| "step": 27500 | |
| }, | |
| { | |
| "epoch": 2.24, | |
| "grad_norm": 9.758234977722168, | |
| "learning_rate": 9.154225815032242e-06, | |
| "loss": 0.1216, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 2.2800000000000002, | |
| "grad_norm": 39.75688934326172, | |
| "learning_rate": 8.271734841028553e-06, | |
| "loss": 0.121, | |
| "step": 28500 | |
| }, | |
| { | |
| "epoch": 2.32, | |
| "grad_norm": 8.768061637878418, | |
| "learning_rate": 7.4254735643584564e-06, | |
| "loss": 0.0926, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 2.36, | |
| "grad_norm": 1.5957878828048706, | |
| "learning_rate": 6.617274798504286e-06, | |
| "loss": 0.1115, | |
| "step": 29500 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "grad_norm": 0.007041580975055695, | |
| "learning_rate": 5.848888922025553e-06, | |
| "loss": 0.113, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 2.44, | |
| "grad_norm": 0.0010586841963231564, | |
| "learning_rate": 5.121980087628803e-06, | |
| "loss": 0.1154, | |
| "step": 30500 | |
| }, | |
| { | |
| "epoch": 2.48, | |
| "grad_norm": 10.376145362854004, | |
| "learning_rate": 4.438122617983443e-06, | |
| "loss": 0.117, | |
| "step": 31000 | |
| }, | |
| { | |
| "epoch": 2.52, | |
| "grad_norm": 5.163309992894938e-07, | |
| "learning_rate": 3.798797596089351e-06, | |
| "loss": 0.0858, | |
| "step": 31500 | |
| }, | |
| { | |
| "epoch": 2.56, | |
| "grad_norm": 8.937009260989726e-05, | |
| "learning_rate": 3.205389657580943e-06, | |
| "loss": 0.0908, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "grad_norm": 65.54885864257812, | |
| "learning_rate": 2.659183991914696e-06, | |
| "loss": 0.1016, | |
| "step": 32500 | |
| }, | |
| { | |
| "epoch": 2.64, | |
| "grad_norm": 9.665397556091193e-06, | |
| "learning_rate": 2.1613635589349756e-06, | |
| "loss": 0.081, | |
| "step": 33000 | |
| }, | |
| { | |
| "epoch": 2.68, | |
| "grad_norm": 0.019450731575489044, | |
| "learning_rate": 1.713006526846439e-06, | |
| "loss": 0.1056, | |
| "step": 33500 | |
| }, | |
| { | |
| "epoch": 2.7199999999999998, | |
| "grad_norm": 0.04092613607645035, | |
| "learning_rate": 1.31508393714177e-06, | |
| "loss": 0.1185, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 2.76, | |
| "grad_norm": 0.00013055396266281605, | |
| "learning_rate": 9.684576015420278e-07, | |
| "loss": 0.1084, | |
| "step": 34500 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "grad_norm": 0.004797887057065964, | |
| "learning_rate": 6.738782355044049e-07, | |
| "loss": 0.0973, | |
| "step": 35000 | |
| }, | |
| { | |
| "epoch": 2.84, | |
| "grad_norm": 0.0011987409088760614, | |
| "learning_rate": 4.319838323396691e-07, | |
| "loss": 0.0883, | |
| "step": 35500 | |
| }, | |
| { | |
| "epoch": 2.88, | |
| "grad_norm": 4.8637772124493495e-05, | |
| "learning_rate": 2.4329828146074095e-07, | |
| "loss": 0.0966, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 2.92, | |
| "grad_norm": 69.71770477294922, | |
| "learning_rate": 1.0823023375489127e-07, | |
| "loss": 0.0953, | |
| "step": 36500 | |
| }, | |
| { | |
| "epoch": 2.96, | |
| "grad_norm": 3.9461236000061035, | |
| "learning_rate": 2.7072216536885853e-08, | |
| "loss": 0.0754, | |
| "step": 37000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.002531010191887617, | |
| "learning_rate": 0.0, | |
| "loss": 0.0937, | |
| "step": 37500 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.9197, | |
| "eval_loss": 0.49928906559944153, | |
| "eval_runtime": 577.5752, | |
| "eval_samples_per_second": 17.314, | |
| "eval_steps_per_second": 2.164, | |
| "step": 37500 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 37500, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |