| { | |
| "best_metric": 1.1743316650390625, | |
| "best_model_checkpoint": "/gscratch/stf/seunguk/dipika/olmo-code-sft/train/result_outputs/13b_py23_mix_10k_normal/allenai_OLMo-2-1124-13B-Instruct/r64_lr1.5e-05/checkpoint-429", | |
| "epoch": 2.9861224489795917, | |
| "eval_steps": 39, | |
| "global_step": 459, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.006530612244897959, | |
| "grad_norm": 7.480628490447998, | |
| "learning_rate": 3.0000000000000004e-07, | |
| "loss": 15.3347, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.0653061224489796, | |
| "grad_norm": 7.086081504821777, | |
| "learning_rate": 3e-06, | |
| "loss": 16.3175, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.1306122448979592, | |
| "grad_norm": 6.222248077392578, | |
| "learning_rate": 6e-06, | |
| "loss": 16.0446, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.19591836734693877, | |
| "grad_norm": 5.059980392456055, | |
| "learning_rate": 9e-06, | |
| "loss": 14.2873, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.2546938775510204, | |
| "eval_loss": 1.4279829263687134, | |
| "eval_runtime": 10.3422, | |
| "eval_samples_per_second": 9.669, | |
| "eval_steps_per_second": 1.257, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.2612244897959184, | |
| "grad_norm": 2.272027015686035, | |
| "learning_rate": 1.2e-05, | |
| "loss": 12.1039, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.32653061224489793, | |
| "grad_norm": 1.4014880657196045, | |
| "learning_rate": 1.5e-05, | |
| "loss": 10.8603, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.39183673469387753, | |
| "grad_norm": 1.2100738286972046, | |
| "learning_rate": 1.4977885819099278e-05, | |
| "loss": 10.3019, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.45714285714285713, | |
| "grad_norm": 0.9245325922966003, | |
| "learning_rate": 1.4911673686262952e-05, | |
| "loss": 10.208, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.5093877551020408, | |
| "eval_loss": 1.2185193300247192, | |
| "eval_runtime": 10.2655, | |
| "eval_samples_per_second": 9.741, | |
| "eval_steps_per_second": 1.266, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.5224489795918368, | |
| "grad_norm": 1.0301765203475952, | |
| "learning_rate": 1.4801754062046587e-05, | |
| "loss": 9.9344, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.5877551020408164, | |
| "grad_norm": 0.9953351020812988, | |
| "learning_rate": 1.4648775155104705e-05, | |
| "loss": 9.8284, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.6530612244897959, | |
| "grad_norm": 0.9446775913238525, | |
| "learning_rate": 1.4453639099629869e-05, | |
| "loss": 9.8364, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.7183673469387755, | |
| "grad_norm": 0.8912749290466309, | |
| "learning_rate": 1.4217496635363684e-05, | |
| "loss": 9.8114, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.7640816326530612, | |
| "eval_loss": 1.1940735578536987, | |
| "eval_runtime": 10.2644, | |
| "eval_samples_per_second": 9.742, | |
| "eval_steps_per_second": 1.267, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 0.7836734693877551, | |
| "grad_norm": 0.8845490217208862, | |
| "learning_rate": 1.3941740321552318e-05, | |
| "loss": 9.7705, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.8489795918367347, | |
| "grad_norm": 0.9408980011940002, | |
| "learning_rate": 1.3627996324864611e-05, | |
| "loss": 9.6758, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.9142857142857143, | |
| "grad_norm": 1.036733865737915, | |
| "learning_rate": 1.3278114829700362e-05, | |
| "loss": 9.7541, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.9795918367346939, | |
| "grad_norm": 1.0284887552261353, | |
| "learning_rate": 1.2894159127440344e-05, | |
| "loss": 9.4729, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.0130612244897959, | |
| "eval_loss": 1.1869996786117554, | |
| "eval_runtime": 10.2878, | |
| "eval_samples_per_second": 9.72, | |
| "eval_steps_per_second": 1.264, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 1.0391836734693878, | |
| "grad_norm": 0.9137061238288879, | |
| "learning_rate": 1.2478393448979922e-05, | |
| "loss": 8.7202, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.1044897959183673, | |
| "grad_norm": 1.0339038372039795, | |
| "learning_rate": 1.2033269612299312e-05, | |
| "loss": 9.5115, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.169795918367347, | |
| "grad_norm": 1.0830352306365967, | |
| "learning_rate": 1.1561412563811198e-05, | |
| "loss": 9.5251, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.2351020408163265, | |
| "grad_norm": 1.210103988647461, | |
| "learning_rate": 1.1065604898750127e-05, | |
| "loss": 9.6413, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.2677551020408164, | |
| "eval_loss": 1.1819005012512207, | |
| "eval_runtime": 10.2481, | |
| "eval_samples_per_second": 9.758, | |
| "eval_steps_per_second": 1.269, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 1.3004081632653062, | |
| "grad_norm": 1.1372894048690796, | |
| "learning_rate": 1.0548770451888665e-05, | |
| "loss": 9.3296, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.3657142857142857, | |
| "grad_norm": 1.081299901008606, | |
| "learning_rate": 1.0013957055347779e-05, | |
| "loss": 9.5166, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.4310204081632654, | |
| "grad_norm": 1.2336442470550537, | |
| "learning_rate": 9.464318565180596e-06, | |
| "loss": 9.5273, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.4963265306122449, | |
| "grad_norm": 1.3089706897735596, | |
| "learning_rate": 8.903096262720867e-06, | |
| "loss": 9.6093, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.5224489795918368, | |
| "eval_loss": 1.1787770986557007, | |
| "eval_runtime": 10.2953, | |
| "eval_samples_per_second": 9.713, | |
| "eval_steps_per_second": 1.263, | |
| "step": 234 | |
| }, | |
| { | |
| "epoch": 1.5616326530612246, | |
| "grad_norm": 1.2751823663711548, | |
| "learning_rate": 8.333599740374487e-06, | |
| "loss": 9.5248, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.626938775510204, | |
| "grad_norm": 1.049634575843811, | |
| "learning_rate": 7.75918738457279e-06, | |
| "loss": 9.3375, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.6922448979591835, | |
| "grad_norm": 1.1573090553283691, | |
| "learning_rate": 7.183246570981859e-06, | |
| "loss": 9.488, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.7575510204081632, | |
| "grad_norm": 1.1605769395828247, | |
| "learning_rate": 6.609173688758989e-06, | |
| "loss": 9.456, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.7771428571428571, | |
| "eval_loss": 1.1771632432937622, | |
| "eval_runtime": 10.3065, | |
| "eval_samples_per_second": 9.703, | |
| "eval_steps_per_second": 1.261, | |
| "step": 273 | |
| }, | |
| { | |
| "epoch": 1.822857142857143, | |
| "grad_norm": 1.0704485177993774, | |
| "learning_rate": 6.0403541116555636e-06, | |
| "loss": 9.6155, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.8881632653061224, | |
| "grad_norm": 1.5852781534194946, | |
| "learning_rate": 5.480142234079027e-06, | |
| "loss": 9.4657, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.953469387755102, | |
| "grad_norm": 1.571425437927246, | |
| "learning_rate": 4.9318416898436404e-06, | |
| "loss": 9.5001, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.013061224489796, | |
| "grad_norm": 1.1965842247009277, | |
| "learning_rate": 4.398685870262254e-06, | |
| "loss": 8.5998, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 2.0261224489795917, | |
| "eval_loss": 1.1757442951202393, | |
| "eval_runtime": 10.261, | |
| "eval_samples_per_second": 9.746, | |
| "eval_steps_per_second": 1.267, | |
| "step": 312 | |
| }, | |
| { | |
| "epoch": 2.0783673469387756, | |
| "grad_norm": 1.2646270990371704, | |
| "learning_rate": 3.883818856466194e-06, | |
| "loss": 9.2906, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 2.143673469387755, | |
| "grad_norm": 1.1598410606384277, | |
| "learning_rate": 3.390276878397574e-06, | |
| "loss": 9.339, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 2.2089795918367345, | |
| "grad_norm": 1.4158833026885986, | |
| "learning_rate": 2.9209704098124204e-06, | |
| "loss": 9.5879, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 2.2742857142857145, | |
| "grad_norm": 1.4064640998840332, | |
| "learning_rate": 2.47866700488251e-06, | |
| "loss": 9.1889, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 2.280816326530612, | |
| "eval_loss": 1.1747936010360718, | |
| "eval_runtime": 10.2783, | |
| "eval_samples_per_second": 9.729, | |
| "eval_steps_per_second": 1.265, | |
| "step": 351 | |
| }, | |
| { | |
| "epoch": 2.339591836734694, | |
| "grad_norm": 1.3816022872924805, | |
| "learning_rate": 2.0659749776104147e-06, | |
| "loss": 9.1374, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 2.4048979591836734, | |
| "grad_norm": 1.2557651996612549, | |
| "learning_rate": 1.6853280203020998e-06, | |
| "loss": 9.4392, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 2.470204081632653, | |
| "grad_norm": 1.3306424617767334, | |
| "learning_rate": 1.3389708518037574e-06, | |
| "loss": 9.3487, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 2.535510204081633, | |
| "grad_norm": 1.1443352699279785, | |
| "learning_rate": 1.0289459801368406e-06, | |
| "loss": 9.4177, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 2.535510204081633, | |
| "eval_loss": 1.1745020151138306, | |
| "eval_runtime": 10.2669, | |
| "eval_samples_per_second": 9.74, | |
| "eval_steps_per_second": 1.266, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 2.6008163265306123, | |
| "grad_norm": 1.2281705141067505, | |
| "learning_rate": 7.570816575935527e-07, | |
| "loss": 9.3764, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.666122448979592, | |
| "grad_norm": 1.2790119647979736, | |
| "learning_rate": 5.249810993230036e-07, | |
| "loss": 9.4371, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 2.7314285714285713, | |
| "grad_norm": 1.3635507822036743, | |
| "learning_rate": 3.3401302898726395e-07, | |
| "loss": 9.4412, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 2.790204081632653, | |
| "eval_loss": 1.1743316650390625, | |
| "eval_runtime": 10.2825, | |
| "eval_samples_per_second": 9.725, | |
| "eval_steps_per_second": 1.264, | |
| "step": 429 | |
| }, | |
| { | |
| "epoch": 2.796734693877551, | |
| "grad_norm": 1.5212961435317993, | |
| "learning_rate": 1.853036072406436e-07, | |
| "loss": 9.2948, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 2.8620408163265307, | |
| "grad_norm": 1.4816234111785889, | |
| "learning_rate": 7.972979063091468e-08, | |
| "loss": 9.5499, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 2.92734693877551, | |
| "grad_norm": 1.343505620956421, | |
| "learning_rate": 1.7914160085782116e-08, | |
| "loss": 9.3699, | |
| "step": 450 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 459, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 39, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 10, | |
| "early_stopping_threshold": 0.001 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 3 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 9.675106681184322e+18, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |