| { |
| "best_global_step": null, |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 2.0, |
| "eval_steps": 500, |
| "global_step": 492, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.04065040650406504, |
| "grad_norm": 14.707608222961426, |
| "learning_rate": 9.000000000000001e-07, |
| "loss": 0.9613, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.08130081300813008, |
| "grad_norm": 12.035386085510254, |
| "learning_rate": 1.9000000000000002e-06, |
| "loss": 0.8509, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.12195121951219512, |
| "grad_norm": 18.44657325744629, |
| "learning_rate": 2.9e-06, |
| "loss": 0.9369, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.16260162601626016, |
| "grad_norm": 12.941998481750488, |
| "learning_rate": 3.900000000000001e-06, |
| "loss": 0.896, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.2032520325203252, |
| "grad_norm": 15.150432586669922, |
| "learning_rate": 4.9000000000000005e-06, |
| "loss": 0.9821, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.24390243902439024, |
| "grad_norm": 18.638032913208008, |
| "learning_rate": 5.9e-06, |
| "loss": 0.9666, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.2845528455284553, |
| "grad_norm": 15.954939842224121, |
| "learning_rate": 6.9e-06, |
| "loss": 0.9019, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.3252032520325203, |
| "grad_norm": 11.114273071289062, |
| "learning_rate": 7.9e-06, |
| "loss": 0.8321, |
| "step": 80 |
| }, |
| { |
| "epoch": 0.36585365853658536, |
| "grad_norm": 15.285880088806152, |
| "learning_rate": 8.900000000000001e-06, |
| "loss": 0.7198, |
| "step": 90 |
| }, |
| { |
| "epoch": 0.4065040650406504, |
| "grad_norm": 17.8586483001709, |
| "learning_rate": 9.9e-06, |
| "loss": 0.7806, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.44715447154471544, |
| "grad_norm": 11.217978477478027, |
| "learning_rate": 9.961864406779662e-06, |
| "loss": 1.0024, |
| "step": 110 |
| }, |
| { |
| "epoch": 0.4878048780487805, |
| "grad_norm": 7.002477169036865, |
| "learning_rate": 9.91949152542373e-06, |
| "loss": 0.8086, |
| "step": 120 |
| }, |
| { |
| "epoch": 0.5284552845528455, |
| "grad_norm": 13.211562156677246, |
| "learning_rate": 9.877118644067798e-06, |
| "loss": 0.8958, |
| "step": 130 |
| }, |
| { |
| "epoch": 0.5691056910569106, |
| "grad_norm": 11.169763565063477, |
| "learning_rate": 9.834745762711865e-06, |
| "loss": 0.8316, |
| "step": 140 |
| }, |
| { |
| "epoch": 0.6097560975609756, |
| "grad_norm": 15.369425773620605, |
| "learning_rate": 9.792372881355933e-06, |
| "loss": 0.675, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.6504065040650406, |
| "grad_norm": 11.003032684326172, |
| "learning_rate": 9.75e-06, |
| "loss": 0.7512, |
| "step": 160 |
| }, |
| { |
| "epoch": 0.6910569105691057, |
| "grad_norm": 16.466957092285156, |
| "learning_rate": 9.707627118644068e-06, |
| "loss": 0.8546, |
| "step": 170 |
| }, |
| { |
| "epoch": 0.7317073170731707, |
| "grad_norm": 13.067832946777344, |
| "learning_rate": 9.665254237288136e-06, |
| "loss": 0.7497, |
| "step": 180 |
| }, |
| { |
| "epoch": 0.7723577235772358, |
| "grad_norm": 9.530882835388184, |
| "learning_rate": 9.622881355932205e-06, |
| "loss": 0.6136, |
| "step": 190 |
| }, |
| { |
| "epoch": 0.8130081300813008, |
| "grad_norm": 15.063409805297852, |
| "learning_rate": 9.580508474576273e-06, |
| "loss": 0.6073, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.8536585365853658, |
| "grad_norm": 9.07528305053711, |
| "learning_rate": 9.53813559322034e-06, |
| "loss": 0.7295, |
| "step": 210 |
| }, |
| { |
| "epoch": 0.8943089430894309, |
| "grad_norm": 14.199564933776855, |
| "learning_rate": 9.495762711864408e-06, |
| "loss": 0.596, |
| "step": 220 |
| }, |
| { |
| "epoch": 0.9349593495934959, |
| "grad_norm": 16.03732681274414, |
| "learning_rate": 9.453389830508474e-06, |
| "loss": 0.6922, |
| "step": 230 |
| }, |
| { |
| "epoch": 0.975609756097561, |
| "grad_norm": 17.533695220947266, |
| "learning_rate": 9.411016949152543e-06, |
| "loss": 0.7484, |
| "step": 240 |
| }, |
| { |
| "epoch": 1.0, |
| "eval_general_loss": 0.754059910774231, |
| "eval_general_runtime": 254.3068, |
| "eval_general_samples_per_second": 3.598, |
| "eval_general_steps_per_second": 0.9, |
| "step": 246 |
| }, |
| { |
| "epoch": 1.0, |
| "eval_code_loss": 0.7234187722206116, |
| "eval_code_runtime": 296.8568, |
| "eval_code_samples_per_second": 3.092, |
| "eval_code_steps_per_second": 0.775, |
| "step": 246 |
| }, |
| { |
| "epoch": 1.0, |
| "eval_stem_loss": 0.7939386367797852, |
| "eval_stem_runtime": 250.6578, |
| "eval_stem_samples_per_second": 3.646, |
| "eval_stem_steps_per_second": 0.914, |
| "step": 246 |
| }, |
| { |
| "epoch": 1.016260162601626, |
| "grad_norm": 16.589147567749023, |
| "learning_rate": 9.368644067796611e-06, |
| "loss": 0.7309, |
| "step": 250 |
| }, |
| { |
| "epoch": 1.056910569105691, |
| "grad_norm": 8.124390602111816, |
| "learning_rate": 9.32627118644068e-06, |
| "loss": 0.5937, |
| "step": 260 |
| }, |
| { |
| "epoch": 1.0975609756097562, |
| "grad_norm": 13.223992347717285, |
| "learning_rate": 9.283898305084746e-06, |
| "loss": 0.6961, |
| "step": 270 |
| }, |
| { |
| "epoch": 1.1382113821138211, |
| "grad_norm": 13.791741371154785, |
| "learning_rate": 9.241525423728814e-06, |
| "loss": 0.5783, |
| "step": 280 |
| }, |
| { |
| "epoch": 1.1788617886178863, |
| "grad_norm": 16.130605697631836, |
| "learning_rate": 9.199152542372882e-06, |
| "loss": 0.6318, |
| "step": 290 |
| }, |
| { |
| "epoch": 1.2195121951219512, |
| "grad_norm": 14.941628456115723, |
| "learning_rate": 9.15677966101695e-06, |
| "loss": 0.6163, |
| "step": 300 |
| }, |
| { |
| "epoch": 1.2601626016260163, |
| "grad_norm": 6.407144546508789, |
| "learning_rate": 9.114406779661017e-06, |
| "loss": 0.5745, |
| "step": 310 |
| }, |
| { |
| "epoch": 1.3008130081300813, |
| "grad_norm": 7.032970905303955, |
| "learning_rate": 9.072033898305086e-06, |
| "loss": 0.474, |
| "step": 320 |
| }, |
| { |
| "epoch": 1.3414634146341464, |
| "grad_norm": 14.255295753479004, |
| "learning_rate": 9.029661016949152e-06, |
| "loss": 0.6833, |
| "step": 330 |
| }, |
| { |
| "epoch": 1.3821138211382114, |
| "grad_norm": 19.13692855834961, |
| "learning_rate": 8.98728813559322e-06, |
| "loss": 0.5369, |
| "step": 340 |
| }, |
| { |
| "epoch": 1.4227642276422765, |
| "grad_norm": 9.524161338806152, |
| "learning_rate": 8.944915254237289e-06, |
| "loss": 0.6925, |
| "step": 350 |
| }, |
| { |
| "epoch": 1.4634146341463414, |
| "grad_norm": 12.59819221496582, |
| "learning_rate": 8.902542372881357e-06, |
| "loss": 0.572, |
| "step": 360 |
| }, |
| { |
| "epoch": 1.5040650406504064, |
| "grad_norm": 7.560878753662109, |
| "learning_rate": 8.860169491525424e-06, |
| "loss": 0.5101, |
| "step": 370 |
| }, |
| { |
| "epoch": 1.5447154471544715, |
| "grad_norm": 10.911989212036133, |
| "learning_rate": 8.817796610169492e-06, |
| "loss": 0.7357, |
| "step": 380 |
| }, |
| { |
| "epoch": 1.5853658536585367, |
| "grad_norm": 13.019627571105957, |
| "learning_rate": 8.77542372881356e-06, |
| "loss": 0.7094, |
| "step": 390 |
| }, |
| { |
| "epoch": 1.6260162601626016, |
| "grad_norm": 5.221607685089111, |
| "learning_rate": 8.733050847457629e-06, |
| "loss": 0.5762, |
| "step": 400 |
| }, |
| { |
| "epoch": 1.6666666666666665, |
| "grad_norm": 9.154648780822754, |
| "learning_rate": 8.690677966101695e-06, |
| "loss": 0.6516, |
| "step": 410 |
| }, |
| { |
| "epoch": 1.7073170731707317, |
| "grad_norm": 8.04185962677002, |
| "learning_rate": 8.648305084745763e-06, |
| "loss": 0.5457, |
| "step": 420 |
| }, |
| { |
| "epoch": 1.7479674796747968, |
| "grad_norm": 12.428257942199707, |
| "learning_rate": 8.60593220338983e-06, |
| "loss": 0.6458, |
| "step": 430 |
| }, |
| { |
| "epoch": 1.7886178861788617, |
| "grad_norm": 6.699453353881836, |
| "learning_rate": 8.563559322033898e-06, |
| "loss": 0.6026, |
| "step": 440 |
| }, |
| { |
| "epoch": 1.8292682926829267, |
| "grad_norm": 5.6174774169921875, |
| "learning_rate": 8.521186440677967e-06, |
| "loss": 0.6756, |
| "step": 450 |
| }, |
| { |
| "epoch": 1.8699186991869918, |
| "grad_norm": 12.72862720489502, |
| "learning_rate": 8.478813559322035e-06, |
| "loss": 0.5932, |
| "step": 460 |
| }, |
| { |
| "epoch": 1.910569105691057, |
| "grad_norm": 4.651763439178467, |
| "learning_rate": 8.436440677966102e-06, |
| "loss": 0.418, |
| "step": 470 |
| }, |
| { |
| "epoch": 1.951219512195122, |
| "grad_norm": 16.044872283935547, |
| "learning_rate": 8.39406779661017e-06, |
| "loss": 0.5756, |
| "step": 480 |
| }, |
| { |
| "epoch": 1.9918699186991868, |
| "grad_norm": 10.388751029968262, |
| "learning_rate": 8.351694915254238e-06, |
| "loss": 0.6304, |
| "step": 490 |
| }, |
| { |
| "epoch": 2.0, |
| "eval_general_loss": 0.6887519359588623, |
| "eval_general_runtime": 255.0576, |
| "eval_general_samples_per_second": 3.587, |
| "eval_general_steps_per_second": 0.898, |
| "step": 492 |
| }, |
| { |
| "epoch": 2.0, |
| "eval_code_loss": 0.5912412405014038, |
| "eval_code_runtime": 297.8316, |
| "eval_code_samples_per_second": 3.082, |
| "eval_code_steps_per_second": 0.772, |
| "step": 492 |
| }, |
| { |
| "epoch": 2.0, |
| "eval_stem_loss": 0.7283160090446472, |
| "eval_stem_runtime": 251.4929, |
| "eval_stem_samples_per_second": 3.634, |
| "eval_stem_steps_per_second": 0.911, |
| "step": 492 |
| } |
| ], |
| "logging_steps": 10, |
| "max_steps": 2460, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 10, |
| "save_steps": 500, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 1.2172218885557453e+18, |
| "train_batch_size": 4, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|