| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.9904761904761905, | |
| "eval_steps": 500, | |
| "global_step": 195, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0761904761904762, | |
| "grad_norm": 0.2635799050331116, | |
| "learning_rate": 4.9918932703355256e-05, | |
| "loss": 0.3154, | |
| "num_input_tokens_seen": 211280, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.1523809523809524, | |
| "grad_norm": 0.2279728353023529, | |
| "learning_rate": 4.967625656594782e-05, | |
| "loss": 0.2409, | |
| "num_input_tokens_seen": 419136, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.22857142857142856, | |
| "grad_norm": 0.22582456469535828, | |
| "learning_rate": 4.92735454356513e-05, | |
| "loss": 0.1711, | |
| "num_input_tokens_seen": 631632, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.3047619047619048, | |
| "grad_norm": 0.14574189484119415, | |
| "learning_rate": 4.8713411048678635e-05, | |
| "loss": 0.1228, | |
| "num_input_tokens_seen": 836896, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.38095238095238093, | |
| "grad_norm": 0.12243571132421494, | |
| "learning_rate": 4.799948609147061e-05, | |
| "loss": 0.0945, | |
| "num_input_tokens_seen": 1042416, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.45714285714285713, | |
| "grad_norm": 0.12587733566761017, | |
| "learning_rate": 4.713640064133025e-05, | |
| "loss": 0.0873, | |
| "num_input_tokens_seen": 1244384, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.5333333333333333, | |
| "grad_norm": 0.09762891381978989, | |
| "learning_rate": 4.6129752138594874e-05, | |
| "loss": 0.0635, | |
| "num_input_tokens_seen": 1445168, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.6095238095238096, | |
| "grad_norm": 0.10953030735254288, | |
| "learning_rate": 4.498606908508754e-05, | |
| "loss": 0.0784, | |
| "num_input_tokens_seen": 1651760, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.6857142857142857, | |
| "grad_norm": 0.12629230320453644, | |
| "learning_rate": 4.371276870427753e-05, | |
| "loss": 0.0634, | |
| "num_input_tokens_seen": 1849920, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.7619047619047619, | |
| "grad_norm": 0.12405364215373993, | |
| "learning_rate": 4.231810883773999e-05, | |
| "loss": 0.0715, | |
| "num_input_tokens_seen": 2056592, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.8380952380952381, | |
| "grad_norm": 0.07898106426000595, | |
| "learning_rate": 4.0811134389884433e-05, | |
| "loss": 0.0588, | |
| "num_input_tokens_seen": 2265056, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.9142857142857143, | |
| "grad_norm": 0.10841359943151474, | |
| "learning_rate": 3.920161866827889e-05, | |
| "loss": 0.0634, | |
| "num_input_tokens_seen": 2471328, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.9904761904761905, | |
| "grad_norm": 0.1293160319328308, | |
| "learning_rate": 3.7500000000000003e-05, | |
| "loss": 0.0582, | |
| "num_input_tokens_seen": 2676640, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 1.0761904761904761, | |
| "grad_norm": 0.11754991114139557, | |
| "learning_rate": 3.5717314035076355e-05, | |
| "loss": 0.0687, | |
| "num_input_tokens_seen": 2897712, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.1523809523809523, | |
| "grad_norm": 0.10079122334718704, | |
| "learning_rate": 3.386512217606339e-05, | |
| "loss": 0.0539, | |
| "num_input_tokens_seen": 3089808, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 1.2285714285714286, | |
| "grad_norm": 0.10278293490409851, | |
| "learning_rate": 3.195543659791132e-05, | |
| "loss": 0.0545, | |
| "num_input_tokens_seen": 3296576, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.3047619047619048, | |
| "grad_norm": 0.10142832249403, | |
| "learning_rate": 3.0000642344401113e-05, | |
| "loss": 0.0446, | |
| "num_input_tokens_seen": 3508416, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.380952380952381, | |
| "grad_norm": 0.21511930227279663, | |
| "learning_rate": 2.8013417006383076e-05, | |
| "loss": 0.0535, | |
| "num_input_tokens_seen": 3706512, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.457142857142857, | |
| "grad_norm": 0.10380080342292786, | |
| "learning_rate": 2.600664850273538e-05, | |
| "loss": 0.0488, | |
| "num_input_tokens_seen": 3913120, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.5333333333333332, | |
| "grad_norm": 0.11608559638261795, | |
| "learning_rate": 2.399335149726463e-05, | |
| "loss": 0.044, | |
| "num_input_tokens_seen": 4121248, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.6095238095238096, | |
| "grad_norm": 0.11765897274017334, | |
| "learning_rate": 2.1986582993616926e-05, | |
| "loss": 0.0479, | |
| "num_input_tokens_seen": 4327824, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.6857142857142857, | |
| "grad_norm": 0.0989590659737587, | |
| "learning_rate": 1.9999357655598893e-05, | |
| "loss": 0.0441, | |
| "num_input_tokens_seen": 4537936, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.7619047619047619, | |
| "grad_norm": 0.1432141810655594, | |
| "learning_rate": 1.8044563402088684e-05, | |
| "loss": 0.0428, | |
| "num_input_tokens_seen": 4744432, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 1.8380952380952382, | |
| "grad_norm": 0.08667314797639847, | |
| "learning_rate": 1.613487782393661e-05, | |
| "loss": 0.0425, | |
| "num_input_tokens_seen": 4955296, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.9142857142857141, | |
| "grad_norm": 0.12118078023195267, | |
| "learning_rate": 1.4282685964923642e-05, | |
| "loss": 0.0437, | |
| "num_input_tokens_seen": 5154784, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.9904761904761905, | |
| "grad_norm": 0.09234391897916794, | |
| "learning_rate": 1.2500000000000006e-05, | |
| "loss": 0.039, | |
| "num_input_tokens_seen": 5358160, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.0761904761904764, | |
| "grad_norm": 0.08275337517261505, | |
| "learning_rate": 1.0798381331721109e-05, | |
| "loss": 0.0465, | |
| "num_input_tokens_seen": 5577808, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 2.1523809523809523, | |
| "grad_norm": 0.08790527284145355, | |
| "learning_rate": 9.18886561011557e-06, | |
| "loss": 0.0451, | |
| "num_input_tokens_seen": 5775552, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.2285714285714286, | |
| "grad_norm": 0.0865679457783699, | |
| "learning_rate": 7.681891162260015e-06, | |
| "loss": 0.0385, | |
| "num_input_tokens_seen": 5986112, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 2.3047619047619046, | |
| "grad_norm": 0.09322898089885712, | |
| "learning_rate": 6.28723129572247e-06, | |
| "loss": 0.0506, | |
| "num_input_tokens_seen": 6185568, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.380952380952381, | |
| "grad_norm": 0.13761946558952332, | |
| "learning_rate": 5.013930914912476e-06, | |
| "loss": 0.0471, | |
| "num_input_tokens_seen": 6389392, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 2.4571428571428573, | |
| "grad_norm": 0.1628292351961136, | |
| "learning_rate": 3.8702478614051355e-06, | |
| "loss": 0.0383, | |
| "num_input_tokens_seen": 6594256, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.533333333333333, | |
| "grad_norm": 0.09071007370948792, | |
| "learning_rate": 2.8635993586697553e-06, | |
| "loss": 0.036, | |
| "num_input_tokens_seen": 6802240, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 2.6095238095238096, | |
| "grad_norm": 0.09868919104337692, | |
| "learning_rate": 2.0005139085293945e-06, | |
| "loss": 0.0381, | |
| "num_input_tokens_seen": 7004816, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 2.685714285714286, | |
| "grad_norm": 0.11255531758069992, | |
| "learning_rate": 1.286588951321363e-06, | |
| "loss": 0.0415, | |
| "num_input_tokens_seen": 7208608, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 2.761904761904762, | |
| "grad_norm": 0.09013009071350098, | |
| "learning_rate": 7.264545643486997e-07, | |
| "loss": 0.0397, | |
| "num_input_tokens_seen": 7418320, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.8380952380952382, | |
| "grad_norm": 0.10500302165746689, | |
| "learning_rate": 3.237434340521789e-07, | |
| "loss": 0.0421, | |
| "num_input_tokens_seen": 7633248, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 2.914285714285714, | |
| "grad_norm": 0.09550434350967407, | |
| "learning_rate": 8.106729664475176e-08, | |
| "loss": 0.0378, | |
| "num_input_tokens_seen": 7838576, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 2.9904761904761905, | |
| "grad_norm": 0.10578631609678268, | |
| "learning_rate": 0.0, | |
| "loss": 0.047, | |
| "num_input_tokens_seen": 8039616, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 2.9904761904761905, | |
| "num_input_tokens_seen": 8039616, | |
| "step": 195, | |
| "total_flos": 3.746792787278561e+17, | |
| "train_loss": 0.06834620604148278, | |
| "train_runtime": 3970.1665, | |
| "train_samples_per_second": 0.793, | |
| "train_steps_per_second": 0.049 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 195, | |
| "num_input_tokens_seen": 8039616, | |
| "num_train_epochs": 3, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.746792787278561e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |