| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 264, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.07575757575757576, | |
| "grad_norm": 0.12508434057235718, | |
| "learning_rate": 4.829545454545455e-05, | |
| "loss": 2.6859, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.15151515151515152, | |
| "grad_norm": 0.1292639672756195, | |
| "learning_rate": 4.6401515151515154e-05, | |
| "loss": 2.6835, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.22727272727272727, | |
| "grad_norm": 0.1345479041337967, | |
| "learning_rate": 4.450757575757576e-05, | |
| "loss": 2.5523, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.30303030303030304, | |
| "grad_norm": 0.17236831784248352, | |
| "learning_rate": 4.261363636363637e-05, | |
| "loss": 2.663, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.3787878787878788, | |
| "grad_norm": 0.16812169551849365, | |
| "learning_rate": 4.071969696969698e-05, | |
| "loss": 2.6315, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.45454545454545453, | |
| "grad_norm": 0.16568826138973236, | |
| "learning_rate": 3.8825757575757574e-05, | |
| "loss": 2.4627, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.5303030303030303, | |
| "grad_norm": 0.22369684278964996, | |
| "learning_rate": 3.6931818181818184e-05, | |
| "loss": 2.5355, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.6060606060606061, | |
| "grad_norm": 0.20267294347286224, | |
| "learning_rate": 3.5037878787878794e-05, | |
| "loss": 2.4826, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.6818181818181818, | |
| "grad_norm": 0.1907360702753067, | |
| "learning_rate": 3.314393939393939e-05, | |
| "loss": 2.4922, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.7575757575757576, | |
| "grad_norm": 0.18876223266124725, | |
| "learning_rate": 3.125e-05, | |
| "loss": 2.4565, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.8333333333333334, | |
| "grad_norm": 0.19660402834415436, | |
| "learning_rate": 2.935606060606061e-05, | |
| "loss": 2.4516, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.9090909090909091, | |
| "grad_norm": 0.18837645649909973, | |
| "learning_rate": 2.746212121212121e-05, | |
| "loss": 2.4103, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.9848484848484849, | |
| "grad_norm": 0.1770673394203186, | |
| "learning_rate": 2.5568181818181817e-05, | |
| "loss": 2.405, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.0606060606060606, | |
| "grad_norm": 0.18203772604465485, | |
| "learning_rate": 2.3674242424242428e-05, | |
| "loss": 2.4113, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.1363636363636362, | |
| "grad_norm": 0.18428196012973785, | |
| "learning_rate": 2.178030303030303e-05, | |
| "loss": 2.417, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.2121212121212122, | |
| "grad_norm": 0.21885260939598083, | |
| "learning_rate": 1.9886363636363638e-05, | |
| "loss": 2.4421, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.2878787878787878, | |
| "grad_norm": 0.18416795134544373, | |
| "learning_rate": 1.7992424242424244e-05, | |
| "loss": 2.3115, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.3636363636363638, | |
| "grad_norm": 0.22103261947631836, | |
| "learning_rate": 1.6098484848484848e-05, | |
| "loss": 2.3108, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.4393939393939394, | |
| "grad_norm": 0.20306836068630219, | |
| "learning_rate": 1.4204545454545456e-05, | |
| "loss": 2.3548, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.5151515151515151, | |
| "grad_norm": 0.20151984691619873, | |
| "learning_rate": 1.2310606060606061e-05, | |
| "loss": 2.3711, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.5909090909090908, | |
| "grad_norm": 0.20869004726409912, | |
| "learning_rate": 1.0416666666666668e-05, | |
| "loss": 2.352, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.6666666666666665, | |
| "grad_norm": 0.24070830643177032, | |
| "learning_rate": 8.522727272727273e-06, | |
| "loss": 2.2323, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.7424242424242424, | |
| "grad_norm": 0.24936740100383759, | |
| "learning_rate": 6.628787878787879e-06, | |
| "loss": 2.2505, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.8181818181818183, | |
| "grad_norm": 0.21129560470581055, | |
| "learning_rate": 4.734848484848485e-06, | |
| "loss": 2.2958, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.893939393939394, | |
| "grad_norm": 0.1849289834499359, | |
| "learning_rate": 2.840909090909091e-06, | |
| "loss": 2.2544, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.9696969696969697, | |
| "grad_norm": 0.1718548685312271, | |
| "learning_rate": 9.469696969696971e-07, | |
| "loss": 2.286, | |
| "step": 260 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 264, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 30, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3535405225869312.0, | |
| "train_batch_size": 10, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |