| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 0, | |
| "global_step": 17, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.058823529411764705, | |
| "grad_norm": 27.23881721496582, | |
| "learning_rate": 1e-05, | |
| "loss": 2.4516, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.11764705882352941, | |
| "grad_norm": 13.277213096618652, | |
| "learning_rate": 9.91486549841951e-06, | |
| "loss": 1.821, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.17647058823529413, | |
| "grad_norm": 8.688852310180664, | |
| "learning_rate": 9.66236114702178e-06, | |
| "loss": 1.3026, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.23529411764705882, | |
| "grad_norm": 6.421550750732422, | |
| "learning_rate": 9.251085678648072e-06, | |
| "loss": 1.0329, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.29411764705882354, | |
| "grad_norm": 4.951260566711426, | |
| "learning_rate": 8.695044586103297e-06, | |
| "loss": 1.1477, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.35294117647058826, | |
| "grad_norm": 4.49473762512207, | |
| "learning_rate": 8.013173181896283e-06, | |
| "loss": 0.8971, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.4117647058823529, | |
| "grad_norm": 2.0649447441101074, | |
| "learning_rate": 7.2286917788826926e-06, | |
| "loss": 0.9146, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.47058823529411764, | |
| "grad_norm": 1.6849682331085205, | |
| "learning_rate": 6.368314950360416e-06, | |
| "loss": 0.7985, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.5294117647058824, | |
| "grad_norm": 2.4519593715667725, | |
| "learning_rate": 5.46134179731651e-06, | |
| "loss": 0.994, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.5882352941176471, | |
| "grad_norm": 1.6040832996368408, | |
| "learning_rate": 4.53865820268349e-06, | |
| "loss": 0.8472, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.6470588235294118, | |
| "grad_norm": 1.4992567300796509, | |
| "learning_rate": 3.6316850496395863e-06, | |
| "loss": 0.8321, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.7058823529411765, | |
| "grad_norm": 1.186579704284668, | |
| "learning_rate": 2.771308221117309e-06, | |
| "loss": 0.7723, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.7647058823529411, | |
| "grad_norm": 0.9862204194068909, | |
| "learning_rate": 1.9868268181037186e-06, | |
| "loss": 0.7068, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.8235294117647058, | |
| "grad_norm": 1.18026602268219, | |
| "learning_rate": 1.3049554138967052e-06, | |
| "loss": 0.811, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.8823529411764706, | |
| "grad_norm": 0.8639672994613647, | |
| "learning_rate": 7.489143213519301e-07, | |
| "loss": 0.8248, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.9411764705882353, | |
| "grad_norm": 1.0533088445663452, | |
| "learning_rate": 3.3763885297822153e-07, | |
| "loss": 0.8128, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.8004978895187378, | |
| "learning_rate": 8.513450158049109e-08, | |
| "loss": 0.734, | |
| "step": 17 | |
| } | |
| ], | |
| "logging_steps": 1.0, | |
| "max_steps": 17, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 0, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.0006846020937318e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |