| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.013021237017516665, | |
| "eval_steps": 3, | |
| "global_step": 21, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0006200589055960316, | |
| "grad_norm": 0.5875879526138306, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 1.179, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.0012401178111920632, | |
| "grad_norm": 0.2559925317764282, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 0.9296, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.0018601767167880949, | |
| "grad_norm": 0.27975356578826904, | |
| "learning_rate": 1.2e-05, | |
| "loss": 0.8212, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.0018601767167880949, | |
| "eval_loss": 1.0227885246276855, | |
| "eval_runtime": 47.9985, | |
| "eval_samples_per_second": 2.083, | |
| "eval_steps_per_second": 2.083, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.0024802356223841263, | |
| "grad_norm": 0.4077853858470917, | |
| "learning_rate": 1.6000000000000003e-05, | |
| "loss": 0.9553, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.0031002945279801583, | |
| "grad_norm": 0.3481099307537079, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8071, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.0037203534335761897, | |
| "grad_norm": 0.33538541197776794, | |
| "learning_rate": 1.9200000000000003e-05, | |
| "loss": 0.9046, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.0037203534335761897, | |
| "eval_loss": 1.0122885704040527, | |
| "eval_runtime": 48.1202, | |
| "eval_samples_per_second": 2.078, | |
| "eval_steps_per_second": 2.078, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.004340412339172222, | |
| "grad_norm": 0.2848486602306366, | |
| "learning_rate": 1.8400000000000003e-05, | |
| "loss": 0.8963, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.004960471244768253, | |
| "grad_norm": 0.29836800694465637, | |
| "learning_rate": 1.76e-05, | |
| "loss": 0.9869, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.005580530150364285, | |
| "grad_norm": 0.22438423335552216, | |
| "learning_rate": 1.6800000000000002e-05, | |
| "loss": 0.8704, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.005580530150364285, | |
| "eval_loss": 0.996995210647583, | |
| "eval_runtime": 48.1511, | |
| "eval_samples_per_second": 2.077, | |
| "eval_steps_per_second": 2.077, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.0062005890559603165, | |
| "grad_norm": 0.26462429761886597, | |
| "learning_rate": 1.6000000000000003e-05, | |
| "loss": 1.0479, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.0068206479615563476, | |
| "grad_norm": 0.2650766968727112, | |
| "learning_rate": 1.5200000000000002e-05, | |
| "loss": 0.8352, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.0074407068671523795, | |
| "grad_norm": 0.3595620095729828, | |
| "learning_rate": 1.4400000000000001e-05, | |
| "loss": 1.1297, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.0074407068671523795, | |
| "eval_loss": 0.9821679592132568, | |
| "eval_runtime": 48.205, | |
| "eval_samples_per_second": 2.074, | |
| "eval_steps_per_second": 2.074, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.008060765772748411, | |
| "grad_norm": 0.2854643166065216, | |
| "learning_rate": 1.3600000000000002e-05, | |
| "loss": 0.9925, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.008680824678344443, | |
| "grad_norm": 0.21441830694675446, | |
| "learning_rate": 1.2800000000000001e-05, | |
| "loss": 0.8125, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.009300883583940475, | |
| "grad_norm": 0.21920695900917053, | |
| "learning_rate": 1.2e-05, | |
| "loss": 0.8518, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.009300883583940475, | |
| "eval_loss": 0.9693244695663452, | |
| "eval_runtime": 47.8177, | |
| "eval_samples_per_second": 2.091, | |
| "eval_steps_per_second": 2.091, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.009920942489536505, | |
| "grad_norm": 0.20094920694828033, | |
| "learning_rate": 1.1200000000000001e-05, | |
| "loss": 0.8003, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.010541001395132537, | |
| "grad_norm": 0.21595342457294464, | |
| "learning_rate": 1.04e-05, | |
| "loss": 0.8492, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.01116106030072857, | |
| "grad_norm": 0.20084670186042786, | |
| "learning_rate": 9.600000000000001e-06, | |
| "loss": 0.8238, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.01116106030072857, | |
| "eval_loss": 0.9594716429710388, | |
| "eval_runtime": 47.8775, | |
| "eval_samples_per_second": 2.089, | |
| "eval_steps_per_second": 2.089, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.011781119206324601, | |
| "grad_norm": 0.22359193861484528, | |
| "learning_rate": 8.8e-06, | |
| "loss": 0.8032, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.012401178111920633, | |
| "grad_norm": 0.16142494976520538, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 0.6508, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.013021237017516665, | |
| "grad_norm": 0.2063056379556656, | |
| "learning_rate": 7.2000000000000005e-06, | |
| "loss": 0.9097, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.013021237017516665, | |
| "eval_loss": 0.9518673419952393, | |
| "eval_runtime": 48.16, | |
| "eval_samples_per_second": 2.076, | |
| "eval_steps_per_second": 2.076, | |
| "step": 21 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 30, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 3, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4.561430264020992e+16, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |