| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.982264665757162, | |
| "eval_steps": 500, | |
| "global_step": 135, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.10914051841746249, | |
| "grad_norm": 6.1484925203115175, | |
| "learning_rate": 7.1428571428571436e-06, | |
| "loss": 1.0901, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.21828103683492497, | |
| "grad_norm": 2.324275786006338, | |
| "learning_rate": 9.765625e-06, | |
| "loss": 0.9406, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.3274215552523875, | |
| "grad_norm": 0.6415172627953729, | |
| "learning_rate": 9.375000000000001e-06, | |
| "loss": 0.7957, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.43656207366984995, | |
| "grad_norm": 0.4845948931255211, | |
| "learning_rate": 8.984375000000002e-06, | |
| "loss": 0.7404, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.5457025920873124, | |
| "grad_norm": 0.3598623633567546, | |
| "learning_rate": 8.59375e-06, | |
| "loss": 0.7189, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.654843110504775, | |
| "grad_norm": 0.30031722762348606, | |
| "learning_rate": 8.203125000000001e-06, | |
| "loss": 0.6963, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.7639836289222374, | |
| "grad_norm": 0.2330482524880791, | |
| "learning_rate": 7.8125e-06, | |
| "loss": 0.6861, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.8731241473396999, | |
| "grad_norm": 0.21252046989518256, | |
| "learning_rate": 7.421875000000001e-06, | |
| "loss": 0.6751, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.9822646657571623, | |
| "grad_norm": 0.18439306771068548, | |
| "learning_rate": 7.031250000000001e-06, | |
| "loss": 0.6671, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 1.1091405184174625, | |
| "grad_norm": 0.21673658085207242, | |
| "learning_rate": 6.6406250000000005e-06, | |
| "loss": 0.7934, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.218281036834925, | |
| "grad_norm": 0.17692058577888592, | |
| "learning_rate": 6.25e-06, | |
| "loss": 0.6539, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 1.3274215552523874, | |
| "grad_norm": 0.15664201090967206, | |
| "learning_rate": 5.859375e-06, | |
| "loss": 0.6488, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.43656207366985, | |
| "grad_norm": 0.16039341987077393, | |
| "learning_rate": 5.468750000000001e-06, | |
| "loss": 0.647, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 1.5457025920873124, | |
| "grad_norm": 0.1553501322829955, | |
| "learning_rate": 5.078125000000001e-06, | |
| "loss": 0.6406, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.654843110504775, | |
| "grad_norm": 0.15129870114630797, | |
| "learning_rate": 4.6875000000000004e-06, | |
| "loss": 0.6414, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 1.7639836289222375, | |
| "grad_norm": 0.1504937417964536, | |
| "learning_rate": 4.296875e-06, | |
| "loss": 0.636, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.8731241473396998, | |
| "grad_norm": 0.15448731815105268, | |
| "learning_rate": 3.90625e-06, | |
| "loss": 0.6341, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.9822646657571623, | |
| "grad_norm": 0.14809996235477288, | |
| "learning_rate": 3.5156250000000003e-06, | |
| "loss": 0.6382, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 2.1091405184174623, | |
| "grad_norm": 0.1441343536579204, | |
| "learning_rate": 3.125e-06, | |
| "loss": 0.759, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 2.218281036834925, | |
| "grad_norm": 0.15235408715037718, | |
| "learning_rate": 2.7343750000000004e-06, | |
| "loss": 0.6286, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 2.3274215552523874, | |
| "grad_norm": 0.14978591450309872, | |
| "learning_rate": 2.3437500000000002e-06, | |
| "loss": 0.6299, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 2.43656207366985, | |
| "grad_norm": 0.1546941133240721, | |
| "learning_rate": 1.953125e-06, | |
| "loss": 0.624, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 2.5457025920873124, | |
| "grad_norm": 0.1514364862595183, | |
| "learning_rate": 1.5625e-06, | |
| "loss": 0.6263, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 2.6548431105047747, | |
| "grad_norm": 0.15180937374140135, | |
| "learning_rate": 1.1718750000000001e-06, | |
| "loss": 0.6243, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 2.7639836289222375, | |
| "grad_norm": 0.14125409946816633, | |
| "learning_rate": 7.8125e-07, | |
| "loss": 0.6256, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 2.8731241473397, | |
| "grad_norm": 0.14292141087504207, | |
| "learning_rate": 3.90625e-07, | |
| "loss": 0.6263, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.982264665757162, | |
| "grad_norm": 0.13803009452826046, | |
| "learning_rate": 0.0, | |
| "loss": 0.6246, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 2.982264665757162, | |
| "step": 135, | |
| "total_flos": 247159395975168.0, | |
| "train_loss": 0.6930544199766936, | |
| "train_runtime": 2626.8342, | |
| "train_samples_per_second": 107.049, | |
| "train_steps_per_second": 0.051 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 135, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 247159395975168.0, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |