| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 4.931506849315069, | |
| "eval_steps": 500, | |
| "global_step": 180, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03, | |
| "grad_norm": 227.0, | |
| "learning_rate": 1.1111111111111112e-05, | |
| "loss": 25.8078, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 191.0, | |
| "learning_rate": 5.555555555555556e-05, | |
| "loss": 29.6305, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "grad_norm": 27.75, | |
| "learning_rate": 0.00011111111111111112, | |
| "loss": 17.0724, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "grad_norm": 13.875, | |
| "learning_rate": 0.0001666666666666667, | |
| "loss": 13.9336, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "grad_norm": 8.0, | |
| "learning_rate": 0.00019992479525042303, | |
| "loss": 12.2567, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "grad_norm": 25.125, | |
| "learning_rate": 0.00019908004033648453, | |
| "loss": 9.9926, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "grad_norm": 4.3125, | |
| "learning_rate": 0.00019730448705798239, | |
| "loss": 3.3192, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 1.9375, | |
| "learning_rate": 0.00019461481568757506, | |
| "loss": 1.5404, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "eval_loss": 1.504757046699524, | |
| "eval_runtime": 1.175, | |
| "eval_samples_per_second": 1.702, | |
| "eval_steps_per_second": 0.851, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "grad_norm": 1.4453125, | |
| "learning_rate": 0.0001910362940966147, | |
| "loss": 1.3549, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.23, | |
| "grad_norm": 1.3984375, | |
| "learning_rate": 0.00018660254037844388, | |
| "loss": 1.1182, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 1.37, | |
| "grad_norm": 2.296875, | |
| "learning_rate": 0.00018135520702629675, | |
| "loss": 1.0879, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.51, | |
| "grad_norm": 0.70703125, | |
| "learning_rate": 0.00017534358963276607, | |
| "loss": 0.9894, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 1.64, | |
| "grad_norm": 4.03125, | |
| "learning_rate": 0.0001686241637868734, | |
| "loss": 1.0143, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "grad_norm": 0.609375, | |
| "learning_rate": 0.0001612600545193203, | |
| "loss": 0.941, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "grad_norm": 1.40625, | |
| "learning_rate": 0.00015332044328016914, | |
| "loss": 0.9147, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 1.232730746269226, | |
| "eval_runtime": 1.0872, | |
| "eval_samples_per_second": 1.84, | |
| "eval_steps_per_second": 0.92, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 2.05, | |
| "grad_norm": 1.171875, | |
| "learning_rate": 0.00014487991802004623, | |
| "loss": 0.8644, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 2.19, | |
| "grad_norm": 1.75, | |
| "learning_rate": 0.00013601777248047105, | |
| "loss": 0.8647, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 2.33, | |
| "grad_norm": 2.140625, | |
| "learning_rate": 0.00012681726127606376, | |
| "loss": 0.7439, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 2.47, | |
| "grad_norm": 1.5078125, | |
| "learning_rate": 0.00011736481776669306, | |
| "loss": 0.7543, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "grad_norm": 2.78125, | |
| "learning_rate": 0.0001077492420671931, | |
| "loss": 0.768, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 2.74, | |
| "grad_norm": 0.98046875, | |
| "learning_rate": 9.806086682281758e-05, | |
| "loss": 0.7375, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 2.88, | |
| "grad_norm": 1.8203125, | |
| "learning_rate": 8.839070858747697e-05, | |
| "loss": 0.7658, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 2.99, | |
| "eval_loss": 1.1765708923339844, | |
| "eval_runtime": 1.0876, | |
| "eval_samples_per_second": 1.839, | |
| "eval_steps_per_second": 0.919, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 3.01, | |
| "grad_norm": 1.09375, | |
| "learning_rate": 7.882961277705895e-05, | |
| "loss": 0.7233, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 3.15, | |
| "grad_norm": 6.0, | |
| "learning_rate": 6.94674002304887e-05, | |
| "loss": 0.6432, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 3.29, | |
| "grad_norm": 0.66796875, | |
| "learning_rate": 6.039202339608432e-05, | |
| "loss": 0.6253, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 3.42, | |
| "grad_norm": 0.6015625, | |
| "learning_rate": 5.168874007033615e-05, | |
| "loss": 0.6256, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 3.56, | |
| "grad_norm": 0.9453125, | |
| "learning_rate": 4.343931245134616e-05, | |
| "loss": 0.6338, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 3.7, | |
| "grad_norm": 1.203125, | |
| "learning_rate": 3.5721239031346066e-05, | |
| "loss": 0.5985, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 3.84, | |
| "grad_norm": 0.6328125, | |
| "learning_rate": 2.8607026544210114e-05, | |
| "loss": 0.6229, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 3.97, | |
| "grad_norm": 0.61328125, | |
| "learning_rate": 2.2163508807583998e-05, | |
| "loss": 0.6657, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 1.166442632675171, | |
| "eval_runtime": 1.1331, | |
| "eval_samples_per_second": 1.765, | |
| "eval_steps_per_second": 0.883, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 4.11, | |
| "grad_norm": 0.5546875, | |
| "learning_rate": 1.6451218858706374e-05, | |
| "loss": 0.6001, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 4.25, | |
| "grad_norm": 0.5703125, | |
| "learning_rate": 1.1523820282334219e-05, | |
| "loss": 0.5512, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 4.38, | |
| "grad_norm": 0.60546875, | |
| "learning_rate": 7.427603073110967e-06, | |
| "loss": 0.5623, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 4.52, | |
| "grad_norm": 0.5390625, | |
| "learning_rate": 4.20104876845111e-06, | |
| "loss": 0.5772, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 4.66, | |
| "grad_norm": 0.55078125, | |
| "learning_rate": 1.874468937261531e-06, | |
| "loss": 0.5458, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 4.79, | |
| "grad_norm": 0.640625, | |
| "learning_rate": 4.6972042068341714e-07, | |
| "loss": 0.5804, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 4.93, | |
| "grad_norm": 0.51953125, | |
| "learning_rate": 0.0, | |
| "loss": 0.5601, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 4.93, | |
| "eval_loss": 1.1870766878128052, | |
| "eval_runtime": 1.0847, | |
| "eval_samples_per_second": 1.844, | |
| "eval_steps_per_second": 0.922, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 4.93, | |
| "step": 180, | |
| "total_flos": 1.3804098843312128e+17, | |
| "train_loss": 3.0282001667552523, | |
| "train_runtime": 1292.0645, | |
| "train_samples_per_second": 1.13, | |
| "train_steps_per_second": 0.139 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 180, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 100, | |
| "total_flos": 1.3804098843312128e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |