| { | |
| "best_global_step": 3415, | |
| "best_metric": 0.6445827232796486, | |
| "best_model_checkpoint": "./saved_models/checkpoint-3415", | |
| "epoch": 5.0, | |
| "eval_steps": 500, | |
| "global_step": 3415, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.07320644216691069, | |
| "grad_norm": 9.181918144226074, | |
| "learning_rate": 1.971303074670571e-05, | |
| "loss": 0.8158, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.14641288433382138, | |
| "grad_norm": 10.360370635986328, | |
| "learning_rate": 1.942020497803807e-05, | |
| "loss": 0.7489, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.21961932650073207, | |
| "grad_norm": 5.784631252288818, | |
| "learning_rate": 1.9127379209370426e-05, | |
| "loss": 0.7137, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.29282576866764276, | |
| "grad_norm": 6.866419315338135, | |
| "learning_rate": 1.8834553440702785e-05, | |
| "loss": 0.7168, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.36603221083455345, | |
| "grad_norm": 12.543447494506836, | |
| "learning_rate": 1.854172767203514e-05, | |
| "loss": 0.7341, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.43923865300146414, | |
| "grad_norm": 5.011623382568359, | |
| "learning_rate": 1.8248901903367496e-05, | |
| "loss": 0.7068, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.5124450951683748, | |
| "grad_norm": 8.569839477539062, | |
| "learning_rate": 1.7956076134699855e-05, | |
| "loss": 0.702, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5856515373352855, | |
| "grad_norm": 4.254383087158203, | |
| "learning_rate": 1.766325036603221e-05, | |
| "loss": 0.6995, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.6588579795021962, | |
| "grad_norm": 5.654411792755127, | |
| "learning_rate": 1.737042459736457e-05, | |
| "loss": 0.6922, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.7320644216691069, | |
| "grad_norm": 4.375262260437012, | |
| "learning_rate": 1.7077598828696925e-05, | |
| "loss": 0.6808, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.8052708638360175, | |
| "grad_norm": 4.428417205810547, | |
| "learning_rate": 1.6784773060029284e-05, | |
| "loss": 0.7041, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8784773060029283, | |
| "grad_norm": 7.5497612953186035, | |
| "learning_rate": 1.649194729136164e-05, | |
| "loss": 0.6856, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.9516837481698389, | |
| "grad_norm": 6.1082658767700195, | |
| "learning_rate": 1.6199121522694e-05, | |
| "loss": 0.6843, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.5728404099560761, | |
| "eval_loss": 0.6616985201835632, | |
| "eval_runtime": 9.1365, | |
| "eval_samples_per_second": 299.02, | |
| "eval_steps_per_second": 4.706, | |
| "step": 683 | |
| }, | |
| { | |
| "epoch": 1.0248901903367496, | |
| "grad_norm": 7.093502521514893, | |
| "learning_rate": 1.5906295754026355e-05, | |
| "loss": 0.6763, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.0980966325036603, | |
| "grad_norm": 7.498149394989014, | |
| "learning_rate": 1.5613469985358714e-05, | |
| "loss": 0.6657, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.171303074670571, | |
| "grad_norm": 4.592875957489014, | |
| "learning_rate": 1.532064421669107e-05, | |
| "loss": 0.6725, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.2445095168374818, | |
| "grad_norm": 3.1138663291931152, | |
| "learning_rate": 1.5027818448023428e-05, | |
| "loss": 0.6793, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.3177159590043923, | |
| "grad_norm": 5.978513240814209, | |
| "learning_rate": 1.4734992679355784e-05, | |
| "loss": 0.6698, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.390922401171303, | |
| "grad_norm": 7.4365339279174805, | |
| "learning_rate": 1.4442166910688143e-05, | |
| "loss": 0.6803, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.4641288433382138, | |
| "grad_norm": 4.018332004547119, | |
| "learning_rate": 1.4149341142020499e-05, | |
| "loss": 0.6619, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.5373352855051245, | |
| "grad_norm": 3.3087430000305176, | |
| "learning_rate": 1.3856515373352856e-05, | |
| "loss": 0.6647, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.610541727672035, | |
| "grad_norm": 3.5859272480010986, | |
| "learning_rate": 1.3563689604685213e-05, | |
| "loss": 0.6641, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.6837481698389458, | |
| "grad_norm": 3.992220163345337, | |
| "learning_rate": 1.327086383601757e-05, | |
| "loss": 0.6625, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.7569546120058566, | |
| "grad_norm": 5.505192756652832, | |
| "learning_rate": 1.2978038067349928e-05, | |
| "loss": 0.653, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.830161054172767, | |
| "grad_norm": 3.4128470420837402, | |
| "learning_rate": 1.2685212298682286e-05, | |
| "loss": 0.6467, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.903367496339678, | |
| "grad_norm": 3.6012327671051025, | |
| "learning_rate": 1.2392386530014641e-05, | |
| "loss": 0.6585, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.9765739385065886, | |
| "grad_norm": 2.7060303688049316, | |
| "learning_rate": 1.2099560761347e-05, | |
| "loss": 0.6403, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.5904099560761347, | |
| "eval_loss": 0.6402733325958252, | |
| "eval_runtime": 9.1043, | |
| "eval_samples_per_second": 300.077, | |
| "eval_steps_per_second": 4.723, | |
| "step": 1366 | |
| }, | |
| { | |
| "epoch": 2.049780380673499, | |
| "grad_norm": 8.067757606506348, | |
| "learning_rate": 1.1806734992679356e-05, | |
| "loss": 0.641, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.12298682284041, | |
| "grad_norm": 6.0310540199279785, | |
| "learning_rate": 1.1513909224011715e-05, | |
| "loss": 0.639, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.1961932650073206, | |
| "grad_norm": 8.116523742675781, | |
| "learning_rate": 1.122108345534407e-05, | |
| "loss": 0.6338, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.269399707174231, | |
| "grad_norm": 2.589458703994751, | |
| "learning_rate": 1.092825768667643e-05, | |
| "loss": 0.6359, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.342606149341142, | |
| "grad_norm": 3.3060238361358643, | |
| "learning_rate": 1.0635431918008785e-05, | |
| "loss": 0.6387, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.4158125915080526, | |
| "grad_norm": 4.6258134841918945, | |
| "learning_rate": 1.0342606149341143e-05, | |
| "loss": 0.6279, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.4890190336749636, | |
| "grad_norm": 5.477193832397461, | |
| "learning_rate": 1.00497803806735e-05, | |
| "loss": 0.6455, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.562225475841874, | |
| "grad_norm": 4.201015949249268, | |
| "learning_rate": 9.756954612005857e-06, | |
| "loss": 0.6321, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.6354319180087846, | |
| "grad_norm": 4.213687896728516, | |
| "learning_rate": 9.464128843338215e-06, | |
| "loss": 0.6319, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.7086383601756956, | |
| "grad_norm": 15.101090431213379, | |
| "learning_rate": 9.171303074670572e-06, | |
| "loss": 0.6306, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.781844802342606, | |
| "grad_norm": 3.422281265258789, | |
| "learning_rate": 8.87847730600293e-06, | |
| "loss": 0.6436, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.855051244509517, | |
| "grad_norm": 8.450922012329102, | |
| "learning_rate": 8.585651537335287e-06, | |
| "loss": 0.6355, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 2.9282576866764276, | |
| "grad_norm": 7.880731105804443, | |
| "learning_rate": 8.292825768667644e-06, | |
| "loss": 0.6226, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.6284773060029283, | |
| "eval_loss": 0.6120732426643372, | |
| "eval_runtime": 9.1117, | |
| "eval_samples_per_second": 299.834, | |
| "eval_steps_per_second": 4.719, | |
| "step": 2049 | |
| }, | |
| { | |
| "epoch": 3.001464128843338, | |
| "grad_norm": 3.401750326156616, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 0.6201, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 3.074670571010249, | |
| "grad_norm": 3.4573326110839844, | |
| "learning_rate": 7.707174231332359e-06, | |
| "loss": 0.6152, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.1478770131771596, | |
| "grad_norm": 3.4153525829315186, | |
| "learning_rate": 7.414348462664715e-06, | |
| "loss": 0.6124, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 3.22108345534407, | |
| "grad_norm": 3.386751651763916, | |
| "learning_rate": 7.1215226939970725e-06, | |
| "loss": 0.6098, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.294289897510981, | |
| "grad_norm": 8.25568962097168, | |
| "learning_rate": 6.82869692532943e-06, | |
| "loss": 0.6131, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 3.3674963396778916, | |
| "grad_norm": 3.146071195602417, | |
| "learning_rate": 6.535871156661787e-06, | |
| "loss": 0.6109, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.440702781844802, | |
| "grad_norm": 3.564626455307007, | |
| "learning_rate": 6.2430453879941446e-06, | |
| "loss": 0.6024, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 3.513909224011713, | |
| "grad_norm": 4.476875305175781, | |
| "learning_rate": 5.950219619326502e-06, | |
| "loss": 0.6121, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.5871156661786237, | |
| "grad_norm": 4.083089828491211, | |
| "learning_rate": 5.657393850658858e-06, | |
| "loss": 0.6155, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 3.660322108345534, | |
| "grad_norm": 4.565993785858154, | |
| "learning_rate": 5.364568081991216e-06, | |
| "loss": 0.6037, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.733528550512445, | |
| "grad_norm": 3.9050488471984863, | |
| "learning_rate": 5.071742313323573e-06, | |
| "loss": 0.6008, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 3.8067349926793557, | |
| "grad_norm": 3.331878185272217, | |
| "learning_rate": 4.77891654465593e-06, | |
| "loss": 0.5896, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 3.8799414348462666, | |
| "grad_norm": 7.776970863342285, | |
| "learning_rate": 4.486090775988287e-06, | |
| "loss": 0.608, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 3.953147877013177, | |
| "grad_norm": 4.320408821105957, | |
| "learning_rate": 4.193265007320644e-06, | |
| "loss": 0.5978, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.637628111273792, | |
| "eval_loss": 0.604773759841919, | |
| "eval_runtime": 9.1056, | |
| "eval_samples_per_second": 300.036, | |
| "eval_steps_per_second": 4.722, | |
| "step": 2732 | |
| }, | |
| { | |
| "epoch": 4.026354319180088, | |
| "grad_norm": 4.932640075683594, | |
| "learning_rate": 3.900439238653002e-06, | |
| "loss": 0.592, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 4.099560761346998, | |
| "grad_norm": 4.4933929443359375, | |
| "learning_rate": 3.607613469985359e-06, | |
| "loss": 0.6146, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 4.172767203513909, | |
| "grad_norm": 5.861272811889648, | |
| "learning_rate": 3.314787701317716e-06, | |
| "loss": 0.5881, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 4.24597364568082, | |
| "grad_norm": 2.9945833683013916, | |
| "learning_rate": 3.0219619326500732e-06, | |
| "loss": 0.5937, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 4.31918008784773, | |
| "grad_norm": 5.6117658615112305, | |
| "learning_rate": 2.7291361639824306e-06, | |
| "loss": 0.5811, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 4.392386530014641, | |
| "grad_norm": 3.8142032623291016, | |
| "learning_rate": 2.436310395314788e-06, | |
| "loss": 0.5561, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 4.465592972181552, | |
| "grad_norm": 3.9350202083587646, | |
| "learning_rate": 2.1434846266471453e-06, | |
| "loss": 0.581, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 4.538799414348462, | |
| "grad_norm": 5.021022796630859, | |
| "learning_rate": 1.8506588579795024e-06, | |
| "loss": 0.581, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 4.612005856515373, | |
| "grad_norm": 4.0799760818481445, | |
| "learning_rate": 1.5578330893118595e-06, | |
| "loss": 0.5673, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 4.685212298682284, | |
| "grad_norm": 3.7053580284118652, | |
| "learning_rate": 1.2650073206442169e-06, | |
| "loss": 0.5759, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 4.758418740849194, | |
| "grad_norm": 6.692492961883545, | |
| "learning_rate": 9.72181551976574e-07, | |
| "loss": 0.572, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 4.831625183016105, | |
| "grad_norm": 3.743006467819214, | |
| "learning_rate": 6.793557833089313e-07, | |
| "loss": 0.5728, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 4.904831625183016, | |
| "grad_norm": 4.34169340133667, | |
| "learning_rate": 3.865300146412885e-07, | |
| "loss": 0.5952, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 4.978038067349927, | |
| "grad_norm": 6.44805383682251, | |
| "learning_rate": 9.370424597364569e-08, | |
| "loss": 0.5987, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.6445827232796486, | |
| "eval_loss": 0.5975594520568848, | |
| "eval_runtime": 9.1017, | |
| "eval_samples_per_second": 300.165, | |
| "eval_steps_per_second": 4.724, | |
| "step": 3415 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 3415, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.33697966920192e+16, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |