| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 13.26530612244898, | |
| "eval_steps": 50, | |
| "global_step": 1300, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.5102040816326531, | |
| "grad_norm": 1.334796667098999, | |
| "learning_rate": 4.872448979591837e-06, | |
| "loss": 0.6699, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.5102040816326531, | |
| "eval_accuracy": 0.7743589743589744, | |
| "eval_loss": 0.6341359615325928, | |
| "eval_runtime": 0.8219, | |
| "eval_samples_per_second": 237.259, | |
| "eval_steps_per_second": 15.817, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.0204081632653061, | |
| "grad_norm": 2.8748810291290283, | |
| "learning_rate": 4.744897959183674e-06, | |
| "loss": 0.5575, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.0204081632653061, | |
| "eval_accuracy": 0.8102564102564103, | |
| "eval_loss": 0.4748656451702118, | |
| "eval_runtime": 1.104, | |
| "eval_samples_per_second": 176.629, | |
| "eval_steps_per_second": 11.775, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.5306122448979593, | |
| "grad_norm": 2.5201377868652344, | |
| "learning_rate": 4.617346938775511e-06, | |
| "loss": 0.4066, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.5306122448979593, | |
| "eval_accuracy": 0.8769230769230769, | |
| "eval_loss": 0.3459306061267853, | |
| "eval_runtime": 0.8488, | |
| "eval_samples_per_second": 229.747, | |
| "eval_steps_per_second": 15.316, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.0408163265306123, | |
| "grad_norm": 4.418571949005127, | |
| "learning_rate": 4.489795918367348e-06, | |
| "loss": 0.3036, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.0408163265306123, | |
| "eval_accuracy": 0.882051282051282, | |
| "eval_loss": 0.2921195328235626, | |
| "eval_runtime": 0.9044, | |
| "eval_samples_per_second": 215.622, | |
| "eval_steps_per_second": 14.375, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.5510204081632653, | |
| "grad_norm": 5.748416423797607, | |
| "learning_rate": 4.362244897959184e-06, | |
| "loss": 0.269, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 2.5510204081632653, | |
| "eval_accuracy": 0.8974358974358975, | |
| "eval_loss": 0.2766072154045105, | |
| "eval_runtime": 0.8653, | |
| "eval_samples_per_second": 225.36, | |
| "eval_steps_per_second": 15.024, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 3.061224489795918, | |
| "grad_norm": 4.963164329528809, | |
| "learning_rate": 4.234693877551021e-06, | |
| "loss": 0.2127, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 3.061224489795918, | |
| "eval_accuracy": 0.9179487179487179, | |
| "eval_loss": 0.258162260055542, | |
| "eval_runtime": 0.9368, | |
| "eval_samples_per_second": 208.163, | |
| "eval_steps_per_second": 13.878, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 3.571428571428571, | |
| "grad_norm": 10.84053897857666, | |
| "learning_rate": 4.107142857142857e-06, | |
| "loss": 0.2124, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 3.571428571428571, | |
| "eval_accuracy": 0.8974358974358975, | |
| "eval_loss": 0.25368446111679077, | |
| "eval_runtime": 0.8378, | |
| "eval_samples_per_second": 232.739, | |
| "eval_steps_per_second": 15.516, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 4.081632653061225, | |
| "grad_norm": 5.123897075653076, | |
| "learning_rate": 3.979591836734694e-06, | |
| "loss": 0.2242, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 4.081632653061225, | |
| "eval_accuracy": 0.8923076923076924, | |
| "eval_loss": 0.2503947615623474, | |
| "eval_runtime": 0.9683, | |
| "eval_samples_per_second": 201.381, | |
| "eval_steps_per_second": 13.425, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 4.591836734693878, | |
| "grad_norm": 2.4097230434417725, | |
| "learning_rate": 3.852040816326531e-06, | |
| "loss": 0.1759, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 4.591836734693878, | |
| "eval_accuracy": 0.9076923076923077, | |
| "eval_loss": 0.24156133830547333, | |
| "eval_runtime": 0.8601, | |
| "eval_samples_per_second": 226.723, | |
| "eval_steps_per_second": 15.115, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 5.1020408163265305, | |
| "grad_norm": 6.135026454925537, | |
| "learning_rate": 3.724489795918368e-06, | |
| "loss": 0.1813, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 5.1020408163265305, | |
| "eval_accuracy": 0.9179487179487179, | |
| "eval_loss": 0.23144249618053436, | |
| "eval_runtime": 0.9317, | |
| "eval_samples_per_second": 209.289, | |
| "eval_steps_per_second": 13.953, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 5.612244897959184, | |
| "grad_norm": 9.705278396606445, | |
| "learning_rate": 3.5969387755102043e-06, | |
| "loss": 0.1518, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 5.612244897959184, | |
| "eval_accuracy": 0.9230769230769231, | |
| "eval_loss": 0.22450213134288788, | |
| "eval_runtime": 0.8405, | |
| "eval_samples_per_second": 232.006, | |
| "eval_steps_per_second": 15.467, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 6.122448979591836, | |
| "grad_norm": 2.7998666763305664, | |
| "learning_rate": 3.469387755102041e-06, | |
| "loss": 0.1545, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 6.122448979591836, | |
| "eval_accuracy": 0.9230769230769231, | |
| "eval_loss": 0.21868816018104553, | |
| "eval_runtime": 0.8426, | |
| "eval_samples_per_second": 231.425, | |
| "eval_steps_per_second": 15.428, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 6.63265306122449, | |
| "grad_norm": 8.062090873718262, | |
| "learning_rate": 3.341836734693878e-06, | |
| "loss": 0.1517, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 6.63265306122449, | |
| "eval_accuracy": 0.9282051282051282, | |
| "eval_loss": 0.2160363644361496, | |
| "eval_runtime": 0.8448, | |
| "eval_samples_per_second": 230.829, | |
| "eval_steps_per_second": 15.389, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 7.142857142857143, | |
| "grad_norm": 5.271668910980225, | |
| "learning_rate": 3.2142857142857147e-06, | |
| "loss": 0.1617, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 7.142857142857143, | |
| "eval_accuracy": 0.9384615384615385, | |
| "eval_loss": 0.20777937769889832, | |
| "eval_runtime": 0.8491, | |
| "eval_samples_per_second": 229.651, | |
| "eval_steps_per_second": 15.31, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 7.653061224489796, | |
| "grad_norm": 2.642810821533203, | |
| "learning_rate": 3.086734693877551e-06, | |
| "loss": 0.1166, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 7.653061224489796, | |
| "eval_accuracy": 0.9333333333333333, | |
| "eval_loss": 0.20337507128715515, | |
| "eval_runtime": 0.9559, | |
| "eval_samples_per_second": 203.995, | |
| "eval_steps_per_second": 13.6, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 8.16326530612245, | |
| "grad_norm": 1.7500669956207275, | |
| "learning_rate": 2.959183673469388e-06, | |
| "loss": 0.1634, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 8.16326530612245, | |
| "eval_accuracy": 0.9384615384615385, | |
| "eval_loss": 0.19573524594306946, | |
| "eval_runtime": 0.8288, | |
| "eval_samples_per_second": 235.271, | |
| "eval_steps_per_second": 15.685, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 8.673469387755102, | |
| "grad_norm": 3.396003007888794, | |
| "learning_rate": 2.8316326530612247e-06, | |
| "loss": 0.1187, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 8.673469387755102, | |
| "eval_accuracy": 0.9384615384615385, | |
| "eval_loss": 0.19067615270614624, | |
| "eval_runtime": 0.9462, | |
| "eval_samples_per_second": 206.094, | |
| "eval_steps_per_second": 13.74, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 9.183673469387756, | |
| "grad_norm": 9.532454490661621, | |
| "learning_rate": 2.7040816326530615e-06, | |
| "loss": 0.1349, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 9.183673469387756, | |
| "eval_accuracy": 0.9487179487179487, | |
| "eval_loss": 0.18987035751342773, | |
| "eval_runtime": 0.8345, | |
| "eval_samples_per_second": 233.672, | |
| "eval_steps_per_second": 15.578, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 9.693877551020408, | |
| "grad_norm": 0.2874181568622589, | |
| "learning_rate": 2.576530612244898e-06, | |
| "loss": 0.1177, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 9.693877551020408, | |
| "eval_accuracy": 0.9487179487179487, | |
| "eval_loss": 0.18763838708400726, | |
| "eval_runtime": 0.8969, | |
| "eval_samples_per_second": 217.409, | |
| "eval_steps_per_second": 14.494, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 10.204081632653061, | |
| "grad_norm": 1.572355031967163, | |
| "learning_rate": 2.4489795918367347e-06, | |
| "loss": 0.1046, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 10.204081632653061, | |
| "eval_accuracy": 0.9487179487179487, | |
| "eval_loss": 0.1864548921585083, | |
| "eval_runtime": 0.8419, | |
| "eval_samples_per_second": 231.606, | |
| "eval_steps_per_second": 15.44, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 10.714285714285714, | |
| "grad_norm": 0.1485631763935089, | |
| "learning_rate": 2.321428571428572e-06, | |
| "loss": 0.0985, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 10.714285714285714, | |
| "eval_accuracy": 0.9487179487179487, | |
| "eval_loss": 0.19090034067630768, | |
| "eval_runtime": 0.8347, | |
| "eval_samples_per_second": 233.62, | |
| "eval_steps_per_second": 15.575, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 11.224489795918368, | |
| "grad_norm": 7.8412184715271, | |
| "learning_rate": 2.1938775510204083e-06, | |
| "loss": 0.1113, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 11.224489795918368, | |
| "eval_accuracy": 0.9384615384615385, | |
| "eval_loss": 0.19337205588817596, | |
| "eval_runtime": 0.879, | |
| "eval_samples_per_second": 221.845, | |
| "eval_steps_per_second": 14.79, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 11.73469387755102, | |
| "grad_norm": 10.776495933532715, | |
| "learning_rate": 2.066326530612245e-06, | |
| "loss": 0.1057, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 11.73469387755102, | |
| "eval_accuracy": 0.9538461538461539, | |
| "eval_loss": 0.18780122697353363, | |
| "eval_runtime": 0.902, | |
| "eval_samples_per_second": 216.18, | |
| "eval_steps_per_second": 14.412, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 12.244897959183673, | |
| "grad_norm": 4.360813140869141, | |
| "learning_rate": 1.938775510204082e-06, | |
| "loss": 0.1037, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 12.244897959183673, | |
| "eval_accuracy": 0.9487179487179487, | |
| "eval_loss": 0.2044650912284851, | |
| "eval_runtime": 0.9494, | |
| "eval_samples_per_second": 205.399, | |
| "eval_steps_per_second": 13.693, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 12.755102040816327, | |
| "grad_norm": 0.4971785545349121, | |
| "learning_rate": 1.8112244897959187e-06, | |
| "loss": 0.0828, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 12.755102040816327, | |
| "eval_accuracy": 0.9538461538461539, | |
| "eval_loss": 0.18574653565883636, | |
| "eval_runtime": 0.8198, | |
| "eval_samples_per_second": 237.876, | |
| "eval_steps_per_second": 15.858, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 13.26530612244898, | |
| "grad_norm": 8.4419584274292, | |
| "learning_rate": 1.6836734693877552e-06, | |
| "loss": 0.1279, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 13.26530612244898, | |
| "eval_accuracy": 0.9538461538461539, | |
| "eval_loss": 0.18369780480861664, | |
| "eval_runtime": 0.9156, | |
| "eval_samples_per_second": 212.964, | |
| "eval_steps_per_second": 14.198, | |
| "step": 1300 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 1960, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 20, | |
| "save_steps": 50, | |
| "total_flos": 331983999062976.0, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |