| { |
| "best_metric": 2.166038751602173, |
| "best_model_checkpoint": "/content/models/EleutherAI-pythia-1-4b-deduped/checkpoint-976", |
| "epoch": 5.0, |
| "eval_steps": 122, |
| "global_step": 2435, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.25, |
| "grad_norm": 6.129247188568115, |
| "learning_rate": 4.7494866529774126e-05, |
| "loss": 2.4532, |
| "step": 122 |
| }, |
| { |
| "epoch": 0.25, |
| "eval_loss": 2.365628957748413, |
| "eval_runtime": 4.5744, |
| "eval_samples_per_second": 44.815, |
| "eval_steps_per_second": 5.684, |
| "step": 122 |
| }, |
| { |
| "epoch": 0.5, |
| "grad_norm": 5.094282627105713, |
| "learning_rate": 4.498973305954826e-05, |
| "loss": 2.2648, |
| "step": 244 |
| }, |
| { |
| "epoch": 0.5, |
| "eval_loss": 2.2924389839172363, |
| "eval_runtime": 4.5814, |
| "eval_samples_per_second": 44.746, |
| "eval_steps_per_second": 5.675, |
| "step": 244 |
| }, |
| { |
| "epoch": 0.75, |
| "grad_norm": 4.384838104248047, |
| "learning_rate": 4.248459958932238e-05, |
| "loss": 2.181, |
| "step": 366 |
| }, |
| { |
| "epoch": 0.75, |
| "eval_loss": 2.2324371337890625, |
| "eval_runtime": 4.5735, |
| "eval_samples_per_second": 44.824, |
| "eval_steps_per_second": 5.685, |
| "step": 366 |
| }, |
| { |
| "epoch": 1.0, |
| "grad_norm": 4.523687839508057, |
| "learning_rate": 3.997946611909651e-05, |
| "loss": 2.1045, |
| "step": 488 |
| }, |
| { |
| "epoch": 1.0, |
| "eval_loss": 2.1804869174957275, |
| "eval_runtime": 4.5794, |
| "eval_samples_per_second": 44.766, |
| "eval_steps_per_second": 5.678, |
| "step": 488 |
| }, |
| { |
| "epoch": 1.25, |
| "grad_norm": 4.680737495422363, |
| "learning_rate": 3.7474332648870635e-05, |
| "loss": 1.787, |
| "step": 610 |
| }, |
| { |
| "epoch": 1.25, |
| "eval_loss": 2.2023770809173584, |
| "eval_runtime": 4.5821, |
| "eval_samples_per_second": 44.739, |
| "eval_steps_per_second": 5.674, |
| "step": 610 |
| }, |
| { |
| "epoch": 1.5, |
| "grad_norm": 4.793215751647949, |
| "learning_rate": 3.4969199178644766e-05, |
| "loss": 1.7681, |
| "step": 732 |
| }, |
| { |
| "epoch": 1.5, |
| "eval_loss": 2.1931827068328857, |
| "eval_runtime": 4.5812, |
| "eval_samples_per_second": 44.748, |
| "eval_steps_per_second": 5.675, |
| "step": 732 |
| }, |
| { |
| "epoch": 1.75, |
| "grad_norm": 4.8442912101745605, |
| "learning_rate": 3.2464065708418896e-05, |
| "loss": 1.7573, |
| "step": 854 |
| }, |
| { |
| "epoch": 1.75, |
| "eval_loss": 2.1689414978027344, |
| "eval_runtime": 4.5805, |
| "eval_samples_per_second": 44.755, |
| "eval_steps_per_second": 5.676, |
| "step": 854 |
| }, |
| { |
| "epoch": 2.0, |
| "grad_norm": 3.6165990829467773, |
| "learning_rate": 2.995893223819302e-05, |
| "loss": 1.7568, |
| "step": 976 |
| }, |
| { |
| "epoch": 2.0, |
| "eval_loss": 2.166038751602173, |
| "eval_runtime": 4.5798, |
| "eval_samples_per_second": 44.761, |
| "eval_steps_per_second": 5.677, |
| "step": 976 |
| }, |
| { |
| "epoch": 2.25, |
| "grad_norm": 5.473954677581787, |
| "learning_rate": 2.7453798767967147e-05, |
| "loss": 1.1741, |
| "step": 1098 |
| }, |
| { |
| "epoch": 2.25, |
| "eval_loss": 2.411022663116455, |
| "eval_runtime": 4.5809, |
| "eval_samples_per_second": 44.751, |
| "eval_steps_per_second": 5.676, |
| "step": 1098 |
| }, |
| { |
| "epoch": 2.51, |
| "grad_norm": 5.729491233825684, |
| "learning_rate": 2.4948665297741274e-05, |
| "loss": 1.2087, |
| "step": 1220 |
| }, |
| { |
| "epoch": 2.51, |
| "eval_loss": 2.4238908290863037, |
| "eval_runtime": 4.6456, |
| "eval_samples_per_second": 44.128, |
| "eval_steps_per_second": 5.597, |
| "step": 1220 |
| }, |
| { |
| "epoch": 2.76, |
| "grad_norm": 12.393416404724121, |
| "learning_rate": 2.24435318275154e-05, |
| "loss": 1.2292, |
| "step": 1342 |
| }, |
| { |
| "epoch": 2.76, |
| "eval_loss": 2.4039969444274902, |
| "eval_runtime": 4.6482, |
| "eval_samples_per_second": 44.103, |
| "eval_steps_per_second": 5.594, |
| "step": 1342 |
| }, |
| { |
| "epoch": 3.01, |
| "grad_norm": 4.7261643409729, |
| "learning_rate": 1.993839835728953e-05, |
| "loss": 1.1914, |
| "step": 1464 |
| }, |
| { |
| "epoch": 3.01, |
| "eval_loss": 2.4631035327911377, |
| "eval_runtime": 4.6443, |
| "eval_samples_per_second": 44.14, |
| "eval_steps_per_second": 5.598, |
| "step": 1464 |
| }, |
| { |
| "epoch": 3.26, |
| "grad_norm": 6.772904872894287, |
| "learning_rate": 1.7433264887063656e-05, |
| "loss": 0.6141, |
| "step": 1586 |
| }, |
| { |
| "epoch": 3.26, |
| "eval_loss": 2.874403953552246, |
| "eval_runtime": 4.6544, |
| "eval_samples_per_second": 44.045, |
| "eval_steps_per_second": 5.586, |
| "step": 1586 |
| }, |
| { |
| "epoch": 3.51, |
| "grad_norm": 6.660802364349365, |
| "learning_rate": 1.4928131416837782e-05, |
| "loss": 0.599, |
| "step": 1708 |
| }, |
| { |
| "epoch": 3.51, |
| "eval_loss": 2.881758451461792, |
| "eval_runtime": 4.6621, |
| "eval_samples_per_second": 43.972, |
| "eval_steps_per_second": 5.577, |
| "step": 1708 |
| }, |
| { |
| "epoch": 3.76, |
| "grad_norm": 5.723119735717773, |
| "learning_rate": 1.242299794661191e-05, |
| "loss": 0.58, |
| "step": 1830 |
| }, |
| { |
| "epoch": 3.76, |
| "eval_loss": 2.8967392444610596, |
| "eval_runtime": 4.644, |
| "eval_samples_per_second": 44.143, |
| "eval_steps_per_second": 5.599, |
| "step": 1830 |
| }, |
| { |
| "epoch": 4.01, |
| "grad_norm": 3.4636168479919434, |
| "learning_rate": 9.917864476386038e-06, |
| "loss": 0.5557, |
| "step": 1952 |
| }, |
| { |
| "epoch": 4.01, |
| "eval_loss": 2.9618570804595947, |
| "eval_runtime": 4.6496, |
| "eval_samples_per_second": 44.09, |
| "eval_steps_per_second": 5.592, |
| "step": 1952 |
| }, |
| { |
| "epoch": 4.26, |
| "grad_norm": 4.451182842254639, |
| "learning_rate": 7.412731006160165e-06, |
| "loss": 0.2127, |
| "step": 2074 |
| }, |
| { |
| "epoch": 4.26, |
| "eval_loss": 3.1594464778900146, |
| "eval_runtime": 4.6569, |
| "eval_samples_per_second": 44.021, |
| "eval_steps_per_second": 5.583, |
| "step": 2074 |
| }, |
| { |
| "epoch": 4.51, |
| "grad_norm": 4.451918601989746, |
| "learning_rate": 4.907597535934292e-06, |
| "loss": 0.2068, |
| "step": 2196 |
| }, |
| { |
| "epoch": 4.51, |
| "eval_loss": 3.1875267028808594, |
| "eval_runtime": 4.6447, |
| "eval_samples_per_second": 44.137, |
| "eval_steps_per_second": 5.598, |
| "step": 2196 |
| }, |
| { |
| "epoch": 4.76, |
| "grad_norm": 5.166147232055664, |
| "learning_rate": 2.402464065708419e-06, |
| "loss": 0.1983, |
| "step": 2318 |
| }, |
| { |
| "epoch": 4.76, |
| "eval_loss": 3.2017271518707275, |
| "eval_runtime": 4.6518, |
| "eval_samples_per_second": 44.069, |
| "eval_steps_per_second": 5.589, |
| "step": 2318 |
| }, |
| { |
| "epoch": 5.0, |
| "step": 2435, |
| "total_flos": 7348458970152960.0, |
| "train_loss": 1.2034366779993202, |
| "train_runtime": 2011.972, |
| "train_samples_per_second": 3.626, |
| "train_steps_per_second": 1.21 |
| } |
| ], |
| "logging_steps": 122, |
| "max_steps": 2435, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 5, |
| "save_steps": 122, |
| "total_flos": 7348458970152960.0, |
| "train_batch_size": 3, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|