| { |
| "best_global_step": 225, |
| "best_metric": 0.36194586753845215, |
| "best_model_checkpoint": "/content/output/checkpoint-225", |
| "epoch": 6.0, |
| "eval_steps": 500, |
| "global_step": 450, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.13333333333333333, |
| "grad_norm": 1.7940553426742554, |
| "learning_rate": 5.917159763313609e-06, |
| "loss": 1.3972, |
| "mean_token_accuracy": 0.7618847399950027, |
| "num_tokens": 31152.0, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.26666666666666666, |
| "grad_norm": 1.0511088371276855, |
| "learning_rate": 1.1834319526627219e-05, |
| "loss": 1.2288, |
| "mean_token_accuracy": 0.7673537939786911, |
| "num_tokens": 61773.0, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.4, |
| "grad_norm": 0.8096975088119507, |
| "learning_rate": 1.7751479289940828e-05, |
| "loss": 0.9255, |
| "mean_token_accuracy": 0.7948055118322372, |
| "num_tokens": 94758.0, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.5333333333333333, |
| "grad_norm": 0.6213255524635315, |
| "learning_rate": 2.3668639053254438e-05, |
| "loss": 0.7366, |
| "mean_token_accuracy": 0.8212289035320282, |
| "num_tokens": 125915.0, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.6666666666666666, |
| "grad_norm": 0.42887863516807556, |
| "learning_rate": 2.958579881656805e-05, |
| "loss": 0.5556, |
| "mean_token_accuracy": 0.8463607966899872, |
| "num_tokens": 159364.0, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.8, |
| "grad_norm": 0.5108550786972046, |
| "learning_rate": 3.5502958579881656e-05, |
| "loss": 0.4676, |
| "mean_token_accuracy": 0.8614094287157059, |
| "num_tokens": 190374.0, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.9333333333333333, |
| "grad_norm": 0.4310983121395111, |
| "learning_rate": 4.142011834319527e-05, |
| "loss": 0.4214, |
| "mean_token_accuracy": 0.8710317760705948, |
| "num_tokens": 222156.0, |
| "step": 70 |
| }, |
| { |
| "epoch": 1.0, |
| "eval_loss": 0.4043419659137726, |
| "eval_mean_token_accuracy": 0.8734077858924866, |
| "eval_num_tokens": 238011.0, |
| "eval_runtime": 50.8925, |
| "eval_samples_per_second": 1.965, |
| "eval_steps_per_second": 0.491, |
| "step": 75 |
| }, |
| { |
| "epoch": 1.0666666666666667, |
| "grad_norm": 0.35767048597335815, |
| "learning_rate": 4.7337278106508875e-05, |
| "loss": 0.3901, |
| "mean_token_accuracy": 0.8787548273801804, |
| "num_tokens": 255825.0, |
| "step": 80 |
| }, |
| { |
| "epoch": 1.2, |
| "grad_norm": 0.3684692680835724, |
| "learning_rate": 5.3254437869822495e-05, |
| "loss": 0.3956, |
| "mean_token_accuracy": 0.8787799149751663, |
| "num_tokens": 287370.0, |
| "step": 90 |
| }, |
| { |
| "epoch": 1.3333333333333333, |
| "grad_norm": 0.35148611664772034, |
| "learning_rate": 5.91715976331361e-05, |
| "loss": 0.3822, |
| "mean_token_accuracy": 0.8814697057008744, |
| "num_tokens": 315883.0, |
| "step": 100 |
| }, |
| { |
| "epoch": 1.4666666666666668, |
| "grad_norm": 0.39074671268463135, |
| "learning_rate": 6.50887573964497e-05, |
| "loss": 0.3703, |
| "mean_token_accuracy": 0.8836676925420761, |
| "num_tokens": 347324.0, |
| "step": 110 |
| }, |
| { |
| "epoch": 1.6, |
| "grad_norm": 0.38130292296409607, |
| "learning_rate": 7.100591715976331e-05, |
| "loss": 0.3762, |
| "mean_token_accuracy": 0.8851484537124634, |
| "num_tokens": 379374.0, |
| "step": 120 |
| }, |
| { |
| "epoch": 1.7333333333333334, |
| "grad_norm": 0.4091486632823944, |
| "learning_rate": 7.692307692307693e-05, |
| "loss": 0.3446, |
| "mean_token_accuracy": 0.8894414573907852, |
| "num_tokens": 411843.0, |
| "step": 130 |
| }, |
| { |
| "epoch": 1.8666666666666667, |
| "grad_norm": 0.361017644405365, |
| "learning_rate": 8.284023668639054e-05, |
| "loss": 0.3494, |
| "mean_token_accuracy": 0.8915452778339386, |
| "num_tokens": 444017.0, |
| "step": 140 |
| }, |
| { |
| "epoch": 2.0, |
| "grad_norm": 0.3487047851085663, |
| "learning_rate": 8.875739644970414e-05, |
| "loss": 0.3674, |
| "mean_token_accuracy": 0.8846240967512131, |
| "num_tokens": 476022.0, |
| "step": 150 |
| }, |
| { |
| "epoch": 2.0, |
| "eval_loss": 0.3620273470878601, |
| "eval_mean_token_accuracy": 0.8875497984886169, |
| "eval_num_tokens": 476022.0, |
| "eval_runtime": 50.8961, |
| "eval_samples_per_second": 1.965, |
| "eval_steps_per_second": 0.491, |
| "step": 150 |
| }, |
| { |
| "epoch": 2.1333333333333333, |
| "grad_norm": 0.480814665555954, |
| "learning_rate": 9.467455621301775e-05, |
| "loss": 0.2974, |
| "mean_token_accuracy": 0.9050114572048187, |
| "num_tokens": 508445.0, |
| "step": 160 |
| }, |
| { |
| "epoch": 2.2666666666666666, |
| "grad_norm": 0.40775951743125916, |
| "learning_rate": 9.989539748953976e-05, |
| "loss": 0.2896, |
| "mean_token_accuracy": 0.9084265738725662, |
| "num_tokens": 541530.0, |
| "step": 170 |
| }, |
| { |
| "epoch": 2.4, |
| "grad_norm": 0.5174453854560852, |
| "learning_rate": 9.884937238493725e-05, |
| "loss": 0.29, |
| "mean_token_accuracy": 0.9073476493358612, |
| "num_tokens": 572212.0, |
| "step": 180 |
| }, |
| { |
| "epoch": 2.533333333333333, |
| "grad_norm": 0.5889921188354492, |
| "learning_rate": 9.780334728033474e-05, |
| "loss": 0.2828, |
| "mean_token_accuracy": 0.909446981549263, |
| "num_tokens": 603269.0, |
| "step": 190 |
| }, |
| { |
| "epoch": 2.6666666666666665, |
| "grad_norm": 0.5635319352149963, |
| "learning_rate": 9.675732217573223e-05, |
| "loss": 0.2744, |
| "mean_token_accuracy": 0.9101487964391708, |
| "num_tokens": 635792.0, |
| "step": 200 |
| }, |
| { |
| "epoch": 2.8, |
| "grad_norm": 0.609230637550354, |
| "learning_rate": 9.571129707112972e-05, |
| "loss": 0.2863, |
| "mean_token_accuracy": 0.9104649156332016, |
| "num_tokens": 666470.0, |
| "step": 210 |
| }, |
| { |
| "epoch": 2.9333333333333336, |
| "grad_norm": 0.5209991931915283, |
| "learning_rate": 9.46652719665272e-05, |
| "loss": 0.3027, |
| "mean_token_accuracy": 0.9050683736801147, |
| "num_tokens": 699119.0, |
| "step": 220 |
| }, |
| { |
| "epoch": 3.0, |
| "eval_loss": 0.36194586753845215, |
| "eval_mean_token_accuracy": 0.8886684679985046, |
| "eval_num_tokens": 714033.0, |
| "eval_runtime": 50.8962, |
| "eval_samples_per_second": 1.965, |
| "eval_steps_per_second": 0.491, |
| "step": 225 |
| }, |
| { |
| "epoch": 3.066666666666667, |
| "grad_norm": 0.5214265584945679, |
| "learning_rate": 9.361924686192469e-05, |
| "loss": 0.2371, |
| "mean_token_accuracy": 0.9248212277889252, |
| "num_tokens": 729520.0, |
| "step": 230 |
| }, |
| { |
| "epoch": 3.2, |
| "grad_norm": 0.7459234595298767, |
| "learning_rate": 9.257322175732218e-05, |
| "loss": 0.1874, |
| "mean_token_accuracy": 0.939895498752594, |
| "num_tokens": 759510.0, |
| "step": 240 |
| }, |
| { |
| "epoch": 3.3333333333333335, |
| "grad_norm": 0.614969789981842, |
| "learning_rate": 9.152719665271967e-05, |
| "loss": 0.1754, |
| "mean_token_accuracy": 0.943790751695633, |
| "num_tokens": 791875.0, |
| "step": 250 |
| }, |
| { |
| "epoch": 3.466666666666667, |
| "grad_norm": 0.8585807085037231, |
| "learning_rate": 9.048117154811716e-05, |
| "loss": 0.1794, |
| "mean_token_accuracy": 0.9401535421609879, |
| "num_tokens": 821467.0, |
| "step": 260 |
| }, |
| { |
| "epoch": 3.6, |
| "grad_norm": 0.6798614263534546, |
| "learning_rate": 8.943514644351465e-05, |
| "loss": 0.1754, |
| "mean_token_accuracy": 0.9439484149217605, |
| "num_tokens": 854000.0, |
| "step": 270 |
| }, |
| { |
| "epoch": 3.7333333333333334, |
| "grad_norm": 0.6594717502593994, |
| "learning_rate": 8.838912133891214e-05, |
| "loss": 0.179, |
| "mean_token_accuracy": 0.9433883309364319, |
| "num_tokens": 885296.0, |
| "step": 280 |
| }, |
| { |
| "epoch": 3.8666666666666667, |
| "grad_norm": 0.6909582614898682, |
| "learning_rate": 8.734309623430963e-05, |
| "loss": 0.1855, |
| "mean_token_accuracy": 0.9395292073488235, |
| "num_tokens": 919387.0, |
| "step": 290 |
| }, |
| { |
| "epoch": 4.0, |
| "grad_norm": 0.7755655646324158, |
| "learning_rate": 8.629707112970712e-05, |
| "loss": 0.1851, |
| "mean_token_accuracy": 0.9409447878599166, |
| "num_tokens": 952044.0, |
| "step": 300 |
| }, |
| { |
| "epoch": 4.0, |
| "eval_loss": 0.39551886916160583, |
| "eval_mean_token_accuracy": 0.887341833114624, |
| "eval_num_tokens": 952044.0, |
| "eval_runtime": 50.8937, |
| "eval_samples_per_second": 1.965, |
| "eval_steps_per_second": 0.491, |
| "step": 300 |
| }, |
| { |
| "epoch": 4.133333333333334, |
| "grad_norm": 0.6951614022254944, |
| "learning_rate": 8.525104602510461e-05, |
| "loss": 0.0942, |
| "mean_token_accuracy": 0.9724623709917068, |
| "num_tokens": 984335.0, |
| "step": 310 |
| }, |
| { |
| "epoch": 4.266666666666667, |
| "grad_norm": 0.6803505420684814, |
| "learning_rate": 8.42050209205021e-05, |
| "loss": 0.1073, |
| "mean_token_accuracy": 0.9653927534818649, |
| "num_tokens": 1014854.0, |
| "step": 320 |
| }, |
| { |
| "epoch": 4.4, |
| "grad_norm": 0.7284913063049316, |
| "learning_rate": 8.315899581589958e-05, |
| "loss": 0.1054, |
| "mean_token_accuracy": 0.9662943929433823, |
| "num_tokens": 1047986.0, |
| "step": 330 |
| }, |
| { |
| "epoch": 4.533333333333333, |
| "grad_norm": 0.6803147792816162, |
| "learning_rate": 8.211297071129707e-05, |
| "loss": 0.1065, |
| "mean_token_accuracy": 0.9682819366455078, |
| "num_tokens": 1079663.0, |
| "step": 340 |
| }, |
| { |
| "epoch": 4.666666666666667, |
| "grad_norm": 0.6864665150642395, |
| "learning_rate": 8.106694560669456e-05, |
| "loss": 0.1085, |
| "mean_token_accuracy": 0.9646553307771683, |
| "num_tokens": 1109495.0, |
| "step": 350 |
| }, |
| { |
| "epoch": 4.8, |
| "grad_norm": 0.7594568729400635, |
| "learning_rate": 8.002092050209205e-05, |
| "loss": 0.1079, |
| "mean_token_accuracy": 0.9674631506204605, |
| "num_tokens": 1141598.0, |
| "step": 360 |
| }, |
| { |
| "epoch": 4.933333333333334, |
| "grad_norm": 0.6360507607460022, |
| "learning_rate": 7.897489539748954e-05, |
| "loss": 0.118, |
| "mean_token_accuracy": 0.9628556370735168, |
| "num_tokens": 1172834.0, |
| "step": 370 |
| }, |
| { |
| "epoch": 5.0, |
| "eval_loss": 0.45760437846183777, |
| "eval_mean_token_accuracy": 0.8874933505058289, |
| "eval_num_tokens": 1190055.0, |
| "eval_runtime": 50.9015, |
| "eval_samples_per_second": 1.965, |
| "eval_steps_per_second": 0.491, |
| "step": 375 |
| }, |
| { |
| "epoch": 5.066666666666666, |
| "grad_norm": 0.4860494136810303, |
| "learning_rate": 7.792887029288704e-05, |
| "loss": 0.0845, |
| "mean_token_accuracy": 0.9751860290765763, |
| "num_tokens": 1206554.0, |
| "step": 380 |
| }, |
| { |
| "epoch": 5.2, |
| "grad_norm": 0.6970198750495911, |
| "learning_rate": 7.688284518828453e-05, |
| "loss": 0.065, |
| "mean_token_accuracy": 0.9810720026493073, |
| "num_tokens": 1237792.0, |
| "step": 390 |
| }, |
| { |
| "epoch": 5.333333333333333, |
| "grad_norm": 0.8283060193061829, |
| "learning_rate": 7.583682008368202e-05, |
| "loss": 0.0663, |
| "mean_token_accuracy": 0.9797625124454499, |
| "num_tokens": 1270203.0, |
| "step": 400 |
| }, |
| { |
| "epoch": 5.466666666666667, |
| "grad_norm": 0.8080848455429077, |
| "learning_rate": 7.479079497907951e-05, |
| "loss": 0.0712, |
| "mean_token_accuracy": 0.9780115723609925, |
| "num_tokens": 1301414.0, |
| "step": 410 |
| }, |
| { |
| "epoch": 5.6, |
| "grad_norm": 0.7461147904396057, |
| "learning_rate": 7.3744769874477e-05, |
| "loss": 0.0678, |
| "mean_token_accuracy": 0.9800621330738067, |
| "num_tokens": 1332421.0, |
| "step": 420 |
| }, |
| { |
| "epoch": 5.733333333333333, |
| "grad_norm": 0.5786488056182861, |
| "learning_rate": 7.269874476987449e-05, |
| "loss": 0.067, |
| "mean_token_accuracy": 0.980746528506279, |
| "num_tokens": 1365542.0, |
| "step": 430 |
| }, |
| { |
| "epoch": 5.866666666666667, |
| "grad_norm": 0.6561855673789978, |
| "learning_rate": 7.165271966527197e-05, |
| "loss": 0.0702, |
| "mean_token_accuracy": 0.9795888513326645, |
| "num_tokens": 1396981.0, |
| "step": 440 |
| }, |
| { |
| "epoch": 6.0, |
| "grad_norm": 0.8029749989509583, |
| "learning_rate": 7.060669456066946e-05, |
| "loss": 0.0721, |
| "mean_token_accuracy": 0.9787283718585968, |
| "num_tokens": 1428066.0, |
| "step": 450 |
| }, |
| { |
| "epoch": 6.0, |
| "eval_loss": 0.5095443725585938, |
| "eval_mean_token_accuracy": 0.8847648048400879, |
| "eval_num_tokens": 1428066.0, |
| "eval_runtime": 50.9006, |
| "eval_samples_per_second": 1.965, |
| "eval_steps_per_second": 0.491, |
| "step": 450 |
| } |
| ], |
| "logging_steps": 10, |
| "max_steps": 1125, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 15, |
| "save_steps": 500, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 8.525609704189133e+16, |
| "train_batch_size": 4, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|