| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 7.058997050147493, |
| "eval_steps": 50, |
| "global_step": 600, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.11799410029498525, |
| "grad_norm": 0.20195358991622925, |
| "learning_rate": 0.0003, |
| "loss": 1.9738, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.2359882005899705, |
| "grad_norm": 0.1426803171634674, |
| "learning_rate": 0.0002963855421686747, |
| "loss": 1.5063, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.35398230088495575, |
| "grad_norm": 0.15137845277786255, |
| "learning_rate": 0.0002927710843373494, |
| "loss": 1.3522, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.471976401179941, |
| "grad_norm": 0.14357119798660278, |
| "learning_rate": 0.0002891566265060241, |
| "loss": 1.2434, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.5899705014749262, |
| "grad_norm": 0.13860304653644562, |
| "learning_rate": 0.00028554216867469873, |
| "loss": 1.1816, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.5899705014749262, |
| "eval_loss": 1.159853458404541, |
| "eval_runtime": 18.5086, |
| "eval_samples_per_second": 8.861, |
| "eval_steps_per_second": 0.756, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.7079646017699115, |
| "grad_norm": 0.13697050511837006, |
| "learning_rate": 0.0002819277108433735, |
| "loss": 1.1465, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.8259587020648967, |
| "grad_norm": 0.12234006822109222, |
| "learning_rate": 0.0002783132530120482, |
| "loss": 1.1006, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.943952802359882, |
| "grad_norm": 0.1310320794582367, |
| "learning_rate": 0.00027469879518072284, |
| "loss": 1.0819, |
| "step": 80 |
| }, |
| { |
| "epoch": 1.0589970501474926, |
| "grad_norm": 0.12331829220056534, |
| "learning_rate": 0.0002710843373493976, |
| "loss": 1.044, |
| "step": 90 |
| }, |
| { |
| "epoch": 1.176991150442478, |
| "grad_norm": 0.11900211870670319, |
| "learning_rate": 0.00026746987951807225, |
| "loss": 1.0244, |
| "step": 100 |
| }, |
| { |
| "epoch": 1.176991150442478, |
| "eval_loss": 1.0301724672317505, |
| "eval_runtime": 18.427, |
| "eval_samples_per_second": 8.9, |
| "eval_steps_per_second": 0.76, |
| "step": 100 |
| }, |
| { |
| "epoch": 1.294985250737463, |
| "grad_norm": 0.13372938334941864, |
| "learning_rate": 0.00026385542168674695, |
| "loss": 1.0084, |
| "step": 110 |
| }, |
| { |
| "epoch": 1.4129793510324484, |
| "grad_norm": 0.12052427977323532, |
| "learning_rate": 0.00026024096385542165, |
| "loss": 0.9994, |
| "step": 120 |
| }, |
| { |
| "epoch": 1.5309734513274336, |
| "grad_norm": 0.11335684359073639, |
| "learning_rate": 0.00025662650602409636, |
| "loss": 0.9798, |
| "step": 130 |
| }, |
| { |
| "epoch": 1.648967551622419, |
| "grad_norm": 0.13653819262981415, |
| "learning_rate": 0.00025301204819277106, |
| "loss": 0.9601, |
| "step": 140 |
| }, |
| { |
| "epoch": 1.7669616519174043, |
| "grad_norm": 0.11394950747489929, |
| "learning_rate": 0.00024939759036144576, |
| "loss": 0.9586, |
| "step": 150 |
| }, |
| { |
| "epoch": 1.7669616519174043, |
| "eval_loss": 0.9602091908454895, |
| "eval_runtime": 18.4269, |
| "eval_samples_per_second": 8.9, |
| "eval_steps_per_second": 0.76, |
| "step": 150 |
| }, |
| { |
| "epoch": 1.8849557522123894, |
| "grad_norm": 0.1154383197426796, |
| "learning_rate": 0.00024578313253012046, |
| "loss": 0.95, |
| "step": 160 |
| }, |
| { |
| "epoch": 2.0, |
| "grad_norm": 0.12450100481510162, |
| "learning_rate": 0.00024216867469879517, |
| "loss": 0.9272, |
| "step": 170 |
| }, |
| { |
| "epoch": 2.117994100294985, |
| "grad_norm": 0.10410638153553009, |
| "learning_rate": 0.00023855421686746987, |
| "loss": 0.9071, |
| "step": 180 |
| }, |
| { |
| "epoch": 2.2359882005899703, |
| "grad_norm": 0.12171204388141632, |
| "learning_rate": 0.00023493975903614455, |
| "loss": 0.9032, |
| "step": 190 |
| }, |
| { |
| "epoch": 2.353982300884956, |
| "grad_norm": 0.12321081757545471, |
| "learning_rate": 0.00023132530120481928, |
| "loss": 0.8935, |
| "step": 200 |
| }, |
| { |
| "epoch": 2.353982300884956, |
| "eval_loss": 0.9177303910255432, |
| "eval_runtime": 18.4239, |
| "eval_samples_per_second": 8.902, |
| "eval_steps_per_second": 0.76, |
| "step": 200 |
| }, |
| { |
| "epoch": 2.471976401179941, |
| "grad_norm": 0.10513285547494888, |
| "learning_rate": 0.00022771084337349395, |
| "loss": 0.8834, |
| "step": 210 |
| }, |
| { |
| "epoch": 2.589970501474926, |
| "grad_norm": 0.12400174885988235, |
| "learning_rate": 0.00022409638554216866, |
| "loss": 0.8927, |
| "step": 220 |
| }, |
| { |
| "epoch": 2.7079646017699117, |
| "grad_norm": 0.12554600834846497, |
| "learning_rate": 0.00022048192771084336, |
| "loss": 0.8787, |
| "step": 230 |
| }, |
| { |
| "epoch": 2.825958702064897, |
| "grad_norm": 0.11129195988178253, |
| "learning_rate": 0.00021686746987951806, |
| "loss": 0.8715, |
| "step": 240 |
| }, |
| { |
| "epoch": 2.943952802359882, |
| "grad_norm": 0.09794709086418152, |
| "learning_rate": 0.00021325301204819274, |
| "loss": 0.8739, |
| "step": 250 |
| }, |
| { |
| "epoch": 2.943952802359882, |
| "eval_loss": 0.8860040307044983, |
| "eval_runtime": 18.4949, |
| "eval_samples_per_second": 8.867, |
| "eval_steps_per_second": 0.757, |
| "step": 250 |
| }, |
| { |
| "epoch": 3.0589970501474926, |
| "grad_norm": 0.10913146287202835, |
| "learning_rate": 0.00020963855421686747, |
| "loss": 0.8555, |
| "step": 260 |
| }, |
| { |
| "epoch": 3.1769911504424777, |
| "grad_norm": 0.10241620987653732, |
| "learning_rate": 0.00020602409638554214, |
| "loss": 0.8415, |
| "step": 270 |
| }, |
| { |
| "epoch": 3.2949852507374633, |
| "grad_norm": 0.10102570056915283, |
| "learning_rate": 0.00020240963855421685, |
| "loss": 0.8389, |
| "step": 280 |
| }, |
| { |
| "epoch": 3.4129793510324484, |
| "grad_norm": 0.11023970693349838, |
| "learning_rate": 0.00019879518072289155, |
| "loss": 0.8326, |
| "step": 290 |
| }, |
| { |
| "epoch": 3.5309734513274336, |
| "grad_norm": 0.11006706953048706, |
| "learning_rate": 0.00019518072289156625, |
| "loss": 0.8233, |
| "step": 300 |
| }, |
| { |
| "epoch": 3.5309734513274336, |
| "eval_loss": 0.8637099862098694, |
| "eval_runtime": 18.4424, |
| "eval_samples_per_second": 8.893, |
| "eval_steps_per_second": 0.759, |
| "step": 300 |
| }, |
| { |
| "epoch": 3.6489675516224187, |
| "grad_norm": 0.1085994690656662, |
| "learning_rate": 0.00019156626506024093, |
| "loss": 0.8246, |
| "step": 310 |
| }, |
| { |
| "epoch": 3.7669616519174043, |
| "grad_norm": 0.10803534090518951, |
| "learning_rate": 0.00018795180722891566, |
| "loss": 0.826, |
| "step": 320 |
| }, |
| { |
| "epoch": 3.8849557522123894, |
| "grad_norm": 0.1013474091887474, |
| "learning_rate": 0.00018433734939759034, |
| "loss": 0.8286, |
| "step": 330 |
| }, |
| { |
| "epoch": 4.0, |
| "grad_norm": 0.11949065327644348, |
| "learning_rate": 0.00018072289156626507, |
| "loss": 0.822, |
| "step": 340 |
| }, |
| { |
| "epoch": 4.117994100294985, |
| "grad_norm": 0.10211551189422607, |
| "learning_rate": 0.00017710843373493974, |
| "loss": 0.7956, |
| "step": 350 |
| }, |
| { |
| "epoch": 4.117994100294985, |
| "eval_loss": 0.8463084697723389, |
| "eval_runtime": 18.4912, |
| "eval_samples_per_second": 8.869, |
| "eval_steps_per_second": 0.757, |
| "step": 350 |
| }, |
| { |
| "epoch": 4.23598820058997, |
| "grad_norm": 0.09929126501083374, |
| "learning_rate": 0.00017349397590361444, |
| "loss": 0.7919, |
| "step": 360 |
| }, |
| { |
| "epoch": 4.353982300884955, |
| "grad_norm": 0.10217483341693878, |
| "learning_rate": 0.00016987951807228915, |
| "loss": 0.795, |
| "step": 370 |
| }, |
| { |
| "epoch": 4.4719764011799406, |
| "grad_norm": 0.10732585191726685, |
| "learning_rate": 0.00016626506024096385, |
| "loss": 0.7951, |
| "step": 380 |
| }, |
| { |
| "epoch": 4.589970501474927, |
| "grad_norm": 0.09474306553602219, |
| "learning_rate": 0.00016265060240963853, |
| "loss": 0.7932, |
| "step": 390 |
| }, |
| { |
| "epoch": 4.707964601769912, |
| "grad_norm": 0.10201520472764969, |
| "learning_rate": 0.00015903614457831326, |
| "loss": 0.7859, |
| "step": 400 |
| }, |
| { |
| "epoch": 4.707964601769912, |
| "eval_loss": 0.8334778547286987, |
| "eval_runtime": 18.4535, |
| "eval_samples_per_second": 8.887, |
| "eval_steps_per_second": 0.759, |
| "step": 400 |
| }, |
| { |
| "epoch": 4.825958702064897, |
| "grad_norm": 0.09956536442041397, |
| "learning_rate": 0.00015542168674698793, |
| "loss": 0.7908, |
| "step": 410 |
| }, |
| { |
| "epoch": 4.943952802359882, |
| "grad_norm": 0.10082168132066727, |
| "learning_rate": 0.00015180722891566264, |
| "loss": 0.783, |
| "step": 420 |
| }, |
| { |
| "epoch": 5.058997050147493, |
| "grad_norm": 0.10280101001262665, |
| "learning_rate": 0.00014819277108433734, |
| "loss": 0.7728, |
| "step": 430 |
| }, |
| { |
| "epoch": 5.176991150442478, |
| "grad_norm": 0.09640956670045853, |
| "learning_rate": 0.00014457831325301204, |
| "loss": 0.7627, |
| "step": 440 |
| }, |
| { |
| "epoch": 5.294985250737463, |
| "grad_norm": 0.09960771352052689, |
| "learning_rate": 0.00014096385542168674, |
| "loss": 0.7631, |
| "step": 450 |
| }, |
| { |
| "epoch": 5.294985250737463, |
| "eval_loss": 0.8229261040687561, |
| "eval_runtime": 18.4466, |
| "eval_samples_per_second": 8.891, |
| "eval_steps_per_second": 0.759, |
| "step": 450 |
| }, |
| { |
| "epoch": 5.412979351032448, |
| "grad_norm": 0.10458113998174667, |
| "learning_rate": 0.00013734939759036142, |
| "loss": 0.7589, |
| "step": 460 |
| }, |
| { |
| "epoch": 5.530973451327434, |
| "grad_norm": 0.10363869369029999, |
| "learning_rate": 0.00013373493975903612, |
| "loss": 0.7628, |
| "step": 470 |
| }, |
| { |
| "epoch": 5.648967551622419, |
| "grad_norm": 0.09949172288179398, |
| "learning_rate": 0.00013012048192771083, |
| "loss": 0.7581, |
| "step": 480 |
| }, |
| { |
| "epoch": 5.766961651917404, |
| "grad_norm": 0.10134359449148178, |
| "learning_rate": 0.00012650602409638553, |
| "loss": 0.7591, |
| "step": 490 |
| }, |
| { |
| "epoch": 5.88495575221239, |
| "grad_norm": 0.09729685634374619, |
| "learning_rate": 0.00012289156626506023, |
| "loss": 0.7549, |
| "step": 500 |
| }, |
| { |
| "epoch": 5.88495575221239, |
| "eval_loss": 0.8134584426879883, |
| "eval_runtime": 18.5585, |
| "eval_samples_per_second": 8.837, |
| "eval_steps_per_second": 0.754, |
| "step": 500 |
| }, |
| { |
| "epoch": 6.0, |
| "grad_norm": 0.12621940672397614, |
| "learning_rate": 0.00011927710843373494, |
| "loss": 0.754, |
| "step": 510 |
| }, |
| { |
| "epoch": 6.117994100294985, |
| "grad_norm": 0.10083900392055511, |
| "learning_rate": 0.00011566265060240964, |
| "loss": 0.7309, |
| "step": 520 |
| }, |
| { |
| "epoch": 6.23598820058997, |
| "grad_norm": 0.09549740701913834, |
| "learning_rate": 0.00011204819277108433, |
| "loss": 0.7376, |
| "step": 530 |
| }, |
| { |
| "epoch": 6.353982300884955, |
| "grad_norm": 0.09781886637210846, |
| "learning_rate": 0.00010843373493975903, |
| "loss": 0.7336, |
| "step": 540 |
| }, |
| { |
| "epoch": 6.4719764011799406, |
| "grad_norm": 0.0949656218290329, |
| "learning_rate": 0.00010481927710843373, |
| "loss": 0.7375, |
| "step": 550 |
| }, |
| { |
| "epoch": 6.4719764011799406, |
| "eval_loss": 0.8066837191581726, |
| "eval_runtime": 18.4545, |
| "eval_samples_per_second": 8.887, |
| "eval_steps_per_second": 0.759, |
| "step": 550 |
| }, |
| { |
| "epoch": 6.589970501474927, |
| "grad_norm": 0.09802760183811188, |
| "learning_rate": 0.00010120481927710842, |
| "loss": 0.736, |
| "step": 560 |
| }, |
| { |
| "epoch": 6.707964601769912, |
| "grad_norm": 0.09498999267816544, |
| "learning_rate": 9.759036144578313e-05, |
| "loss": 0.7334, |
| "step": 570 |
| }, |
| { |
| "epoch": 6.825958702064897, |
| "grad_norm": 0.10631827265024185, |
| "learning_rate": 9.397590361445783e-05, |
| "loss": 0.7336, |
| "step": 580 |
| }, |
| { |
| "epoch": 6.943952802359882, |
| "grad_norm": 0.09911607205867767, |
| "learning_rate": 9.036144578313253e-05, |
| "loss": 0.7333, |
| "step": 590 |
| }, |
| { |
| "epoch": 7.058997050147493, |
| "grad_norm": 0.09343602508306503, |
| "learning_rate": 8.674698795180722e-05, |
| "loss": 0.7224, |
| "step": 600 |
| }, |
| { |
| "epoch": 7.058997050147493, |
| "eval_loss": 0.8018268346786499, |
| "eval_runtime": 18.4713, |
| "eval_samples_per_second": 8.879, |
| "eval_steps_per_second": 0.758, |
| "step": 600 |
| } |
| ], |
| "logging_steps": 10, |
| "max_steps": 840, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 10, |
| "save_steps": 120, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 1.0093778203412791e+19, |
| "train_batch_size": 4, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|