| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 220, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.022779043280182234, | |
| "grad_norm": 1.0761960744857788, | |
| "learning_rate": 2.181818181818182e-06, | |
| "loss": 1.4131, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.04555808656036447, | |
| "grad_norm": 0.7671697735786438, | |
| "learning_rate": 4.90909090909091e-06, | |
| "loss": 1.3993, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.0683371298405467, | |
| "grad_norm": 0.5325023531913757, | |
| "learning_rate": 7.636363636363636e-06, | |
| "loss": 1.3575, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.09111617312072894, | |
| "grad_norm": 0.5349851846694946, | |
| "learning_rate": 1.0363636363636364e-05, | |
| "loss": 1.3727, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.11389521640091116, | |
| "grad_norm": 0.5694962739944458, | |
| "learning_rate": 1.309090909090909e-05, | |
| "loss": 1.3057, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.1366742596810934, | |
| "grad_norm": 0.541211724281311, | |
| "learning_rate": 1.5818181818181818e-05, | |
| "loss": 1.2344, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.15945330296127563, | |
| "grad_norm": 0.5115331411361694, | |
| "learning_rate": 1.8545454545454545e-05, | |
| "loss": 1.3365, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.18223234624145787, | |
| "grad_norm": 0.46041619777679443, | |
| "learning_rate": 2.1272727272727273e-05, | |
| "loss": 1.2517, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.20501138952164008, | |
| "grad_norm": 0.4588950276374817, | |
| "learning_rate": 2.4e-05, | |
| "loss": 1.233, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.22779043280182232, | |
| "grad_norm": 0.571067750453949, | |
| "learning_rate": 2.6727272727272728e-05, | |
| "loss": 1.1614, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.2505694760820046, | |
| "grad_norm": 0.47105151414871216, | |
| "learning_rate": 2.9454545454545456e-05, | |
| "loss": 1.1789, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.2733485193621868, | |
| "grad_norm": 0.5167170763015747, | |
| "learning_rate": 2.9998915465986464e-05, | |
| "loss": 1.1674, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.296127562642369, | |
| "grad_norm": 0.5363845229148865, | |
| "learning_rate": 2.999450981533838e-05, | |
| "loss": 1.1646, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.31890660592255127, | |
| "grad_norm": 0.5026624798774719, | |
| "learning_rate": 2.9986716259340288e-05, | |
| "loss": 1.1268, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.3416856492027335, | |
| "grad_norm": 0.46400442719459534, | |
| "learning_rate": 2.9975536558892034e-05, | |
| "loss": 1.0501, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.36446469248291574, | |
| "grad_norm": 0.6948593258857727, | |
| "learning_rate": 2.9960973239969295e-05, | |
| "loss": 1.0567, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.38724373576309795, | |
| "grad_norm": 0.6658697724342346, | |
| "learning_rate": 2.9943029593052822e-05, | |
| "loss": 1.1199, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.41002277904328016, | |
| "grad_norm": 0.5796182155609131, | |
| "learning_rate": 2.992170967238502e-05, | |
| "loss": 1.0407, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.4328018223234624, | |
| "grad_norm": 0.5862748622894287, | |
| "learning_rate": 2.9897018295053883e-05, | |
| "loss": 1.0524, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.45558086560364464, | |
| "grad_norm": 0.714149534702301, | |
| "learning_rate": 2.9868961039904628e-05, | |
| "loss": 1.0367, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.4783599088838269, | |
| "grad_norm": 0.7176491618156433, | |
| "learning_rate": 2.983754424627919e-05, | |
| "loss": 1.0013, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.5011389521640092, | |
| "grad_norm": 0.6981073617935181, | |
| "learning_rate": 2.9802775012583884e-05, | |
| "loss": 1.0079, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.5239179954441914, | |
| "grad_norm": 0.5984258651733398, | |
| "learning_rate": 2.9764661194685583e-05, | |
| "loss": 0.975, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.5466970387243736, | |
| "grad_norm": 0.7986994981765747, | |
| "learning_rate": 2.972321140413672e-05, | |
| "loss": 0.9417, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.5694760820045558, | |
| "grad_norm": 0.6552079916000366, | |
| "learning_rate": 2.9678435006229585e-05, | |
| "loss": 0.9208, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.592255125284738, | |
| "grad_norm": 0.7615071535110474, | |
| "learning_rate": 2.9630342117880293e-05, | |
| "loss": 0.9208, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.6150341685649203, | |
| "grad_norm": 0.73598313331604, | |
| "learning_rate": 2.957894360534295e-05, | |
| "loss": 0.9459, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.6378132118451025, | |
| "grad_norm": 0.9029515981674194, | |
| "learning_rate": 2.9524251081754475e-05, | |
| "loss": 0.9338, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.6605922551252847, | |
| "grad_norm": 0.8563511967658997, | |
| "learning_rate": 2.9466276904510713e-05, | |
| "loss": 0.8437, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.683371298405467, | |
| "grad_norm": 0.8966438174247742, | |
| "learning_rate": 2.9405034172474363e-05, | |
| "loss": 0.8995, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.7061503416856492, | |
| "grad_norm": 0.9085351228713989, | |
| "learning_rate": 2.9340536723015367e-05, | |
| "loss": 0.8988, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.7289293849658315, | |
| "grad_norm": 0.920749843120575, | |
| "learning_rate": 2.927279912888447e-05, | |
| "loss": 0.8457, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.7517084282460137, | |
| "grad_norm": 0.844048023223877, | |
| "learning_rate": 2.920183669492061e-05, | |
| "loss": 0.8699, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.7744874715261959, | |
| "grad_norm": 1.1362037658691406, | |
| "learning_rate": 2.9127665454592872e-05, | |
| "loss": 0.8561, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.7972665148063781, | |
| "grad_norm": 0.9490585923194885, | |
| "learning_rate": 2.9050302166377858e-05, | |
| "loss": 0.8016, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.8200455580865603, | |
| "grad_norm": 1.047890543937683, | |
| "learning_rate": 2.896976430997323e-05, | |
| "loss": 0.8047, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.8428246013667426, | |
| "grad_norm": 1.252584457397461, | |
| "learning_rate": 2.8886070082348268e-05, | |
| "loss": 0.8087, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.8656036446469249, | |
| "grad_norm": 1.0744786262512207, | |
| "learning_rate": 2.879923839363242e-05, | |
| "loss": 0.7823, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.8883826879271071, | |
| "grad_norm": 0.9743952751159668, | |
| "learning_rate": 2.870928886284267e-05, | |
| "loss": 0.727, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.9111617312072893, | |
| "grad_norm": 1.029543399810791, | |
| "learning_rate": 2.8616241813450755e-05, | |
| "loss": 0.7648, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.9339407744874715, | |
| "grad_norm": 1.031134009361267, | |
| "learning_rate": 2.852011826879125e-05, | |
| "loss": 0.7444, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.9567198177676538, | |
| "grad_norm": 1.1572555303573608, | |
| "learning_rate": 2.8420939947311454e-05, | |
| "loss": 0.7408, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.979498861047836, | |
| "grad_norm": 1.0397342443466187, | |
| "learning_rate": 2.8318729257664265e-05, | |
| "loss": 0.7015, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 1.4852145910263062, | |
| "learning_rate": 2.821350929364512e-05, | |
| "loss": 0.6917, | |
| "step": 220 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 1100, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 2000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.180798646723543e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |