| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 4.588235294117647, |
| "eval_steps": 500, |
| "global_step": 50, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.09803921568627451, |
| "grad_norm": 1.4192003011703491, |
| "learning_rate": 5.0000000000000004e-08, |
| "loss": 1.0705, |
| "step": 1 |
| }, |
| { |
| "epoch": 0.19607843137254902, |
| "grad_norm": 1.474342703819275, |
| "learning_rate": 1.0000000000000001e-07, |
| "loss": 1.0765, |
| "step": 2 |
| }, |
| { |
| "epoch": 0.29411764705882354, |
| "grad_norm": 1.4589208364486694, |
| "learning_rate": 1.5000000000000002e-07, |
| "loss": 1.0742, |
| "step": 3 |
| }, |
| { |
| "epoch": 0.39215686274509803, |
| "grad_norm": 1.4159084558486938, |
| "learning_rate": 2.0000000000000002e-07, |
| "loss": 1.0596, |
| "step": 4 |
| }, |
| { |
| "epoch": 0.49019607843137253, |
| "grad_norm": 1.4747488498687744, |
| "learning_rate": 2.5000000000000004e-07, |
| "loss": 1.1013, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.5882352941176471, |
| "grad_norm": 1.4644396305084229, |
| "learning_rate": 3.0000000000000004e-07, |
| "loss": 1.0602, |
| "step": 6 |
| }, |
| { |
| "epoch": 0.6862745098039216, |
| "grad_norm": 1.438894510269165, |
| "learning_rate": 3.5000000000000004e-07, |
| "loss": 1.0712, |
| "step": 7 |
| }, |
| { |
| "epoch": 0.7843137254901961, |
| "grad_norm": 1.4930068254470825, |
| "learning_rate": 4.0000000000000003e-07, |
| "loss": 1.1223, |
| "step": 8 |
| }, |
| { |
| "epoch": 0.8823529411764706, |
| "grad_norm": 1.4825564622879028, |
| "learning_rate": 4.5000000000000003e-07, |
| "loss": 1.0844, |
| "step": 9 |
| }, |
| { |
| "epoch": 0.9803921568627451, |
| "grad_norm": 1.4220027923583984, |
| "learning_rate": 5.000000000000001e-07, |
| "loss": 1.0679, |
| "step": 10 |
| }, |
| { |
| "epoch": 1.0, |
| "grad_norm": 1.4220027923583984, |
| "learning_rate": 5.5e-07, |
| "loss": 1.1218, |
| "step": 11 |
| }, |
| { |
| "epoch": 1.0980392156862746, |
| "grad_norm": 2.6347763538360596, |
| "learning_rate": 6.000000000000001e-07, |
| "loss": 1.0891, |
| "step": 12 |
| }, |
| { |
| "epoch": 1.196078431372549, |
| "grad_norm": 1.4003781080245972, |
| "learning_rate": 6.5e-07, |
| "loss": 1.0658, |
| "step": 13 |
| }, |
| { |
| "epoch": 1.2941176470588236, |
| "grad_norm": 1.3445370197296143, |
| "learning_rate": 7.000000000000001e-07, |
| "loss": 1.0734, |
| "step": 14 |
| }, |
| { |
| "epoch": 1.392156862745098, |
| "grad_norm": 1.3266352415084839, |
| "learning_rate": 7.5e-07, |
| "loss": 1.0526, |
| "step": 15 |
| }, |
| { |
| "epoch": 1.4901960784313726, |
| "grad_norm": 1.3781611919403076, |
| "learning_rate": 8.000000000000001e-07, |
| "loss": 1.0934, |
| "step": 16 |
| }, |
| { |
| "epoch": 1.5882352941176472, |
| "grad_norm": 1.2347468137741089, |
| "learning_rate": 8.500000000000001e-07, |
| "loss": 1.0504, |
| "step": 17 |
| }, |
| { |
| "epoch": 1.6862745098039216, |
| "grad_norm": 1.1824800968170166, |
| "learning_rate": 9.000000000000001e-07, |
| "loss": 1.0538, |
| "step": 18 |
| }, |
| { |
| "epoch": 1.784313725490196, |
| "grad_norm": 1.1078494787216187, |
| "learning_rate": 9.500000000000001e-07, |
| "loss": 1.0615, |
| "step": 19 |
| }, |
| { |
| "epoch": 1.8823529411764706, |
| "grad_norm": 1.069743037223816, |
| "learning_rate": 1.0000000000000002e-06, |
| "loss": 1.0463, |
| "step": 20 |
| }, |
| { |
| "epoch": 1.9803921568627452, |
| "grad_norm": 1.0157521963119507, |
| "learning_rate": 1.0500000000000001e-06, |
| "loss": 1.0517, |
| "step": 21 |
| }, |
| { |
| "epoch": 2.0, |
| "grad_norm": 1.0157521963119507, |
| "learning_rate": 1.1e-06, |
| "loss": 0.9878, |
| "step": 22 |
| }, |
| { |
| "epoch": 2.0980392156862746, |
| "grad_norm": 1.621635913848877, |
| "learning_rate": 1.1500000000000002e-06, |
| "loss": 1.0295, |
| "step": 23 |
| }, |
| { |
| "epoch": 2.196078431372549, |
| "grad_norm": 0.8550276756286621, |
| "learning_rate": 1.2000000000000002e-06, |
| "loss": 1.0034, |
| "step": 24 |
| }, |
| { |
| "epoch": 2.2941176470588234, |
| "grad_norm": 0.7927238941192627, |
| "learning_rate": 1.25e-06, |
| "loss": 1.0505, |
| "step": 25 |
| }, |
| { |
| "epoch": 2.392156862745098, |
| "grad_norm": 0.75254225730896, |
| "learning_rate": 1.3e-06, |
| "loss": 1.013, |
| "step": 26 |
| }, |
| { |
| "epoch": 2.4901960784313726, |
| "grad_norm": 0.6724694967269897, |
| "learning_rate": 1.3500000000000002e-06, |
| "loss": 1.0046, |
| "step": 27 |
| }, |
| { |
| "epoch": 2.588235294117647, |
| "grad_norm": 0.6393210291862488, |
| "learning_rate": 1.4000000000000001e-06, |
| "loss": 1.0386, |
| "step": 28 |
| }, |
| { |
| "epoch": 2.686274509803922, |
| "grad_norm": 0.6001161336898804, |
| "learning_rate": 1.45e-06, |
| "loss": 1.0182, |
| "step": 29 |
| }, |
| { |
| "epoch": 2.784313725490196, |
| "grad_norm": 0.5612940788269043, |
| "learning_rate": 1.5e-06, |
| "loss": 1.0176, |
| "step": 30 |
| }, |
| { |
| "epoch": 2.8823529411764706, |
| "grad_norm": 0.5290960073471069, |
| "learning_rate": 1.5500000000000002e-06, |
| "loss": 1.0033, |
| "step": 31 |
| }, |
| { |
| "epoch": 2.980392156862745, |
| "grad_norm": 0.5201172232627869, |
| "learning_rate": 1.6000000000000001e-06, |
| "loss": 0.9783, |
| "step": 32 |
| }, |
| { |
| "epoch": 3.0, |
| "grad_norm": 0.5201172232627869, |
| "learning_rate": 1.6500000000000003e-06, |
| "loss": 0.9665, |
| "step": 33 |
| }, |
| { |
| "epoch": 3.0980392156862746, |
| "grad_norm": 0.9337953925132751, |
| "learning_rate": 1.7000000000000002e-06, |
| "loss": 1.0076, |
| "step": 34 |
| }, |
| { |
| "epoch": 3.196078431372549, |
| "grad_norm": 0.4734288156032562, |
| "learning_rate": 1.75e-06, |
| "loss": 0.9988, |
| "step": 35 |
| }, |
| { |
| "epoch": 3.2941176470588234, |
| "grad_norm": 0.476301372051239, |
| "learning_rate": 1.8000000000000001e-06, |
| "loss": 0.9936, |
| "step": 36 |
| }, |
| { |
| "epoch": 3.392156862745098, |
| "grad_norm": 0.45640113949775696, |
| "learning_rate": 1.85e-06, |
| "loss": 0.9691, |
| "step": 37 |
| }, |
| { |
| "epoch": 3.4901960784313726, |
| "grad_norm": 0.4331822693347931, |
| "learning_rate": 1.9000000000000002e-06, |
| "loss": 0.9833, |
| "step": 38 |
| }, |
| { |
| "epoch": 3.588235294117647, |
| "grad_norm": 0.42620590329170227, |
| "learning_rate": 1.9500000000000004e-06, |
| "loss": 0.9741, |
| "step": 39 |
| }, |
| { |
| "epoch": 3.686274509803922, |
| "grad_norm": 0.40999871492385864, |
| "learning_rate": 2.0000000000000003e-06, |
| "loss": 0.9658, |
| "step": 40 |
| }, |
| { |
| "epoch": 3.784313725490196, |
| "grad_norm": 0.41157710552215576, |
| "learning_rate": 2.05e-06, |
| "loss": 0.9523, |
| "step": 41 |
| }, |
| { |
| "epoch": 3.8823529411764706, |
| "grad_norm": 0.3891652822494507, |
| "learning_rate": 2.1000000000000002e-06, |
| "loss": 0.9338, |
| "step": 42 |
| }, |
| { |
| "epoch": 3.980392156862745, |
| "grad_norm": 0.35877248644828796, |
| "learning_rate": 2.15e-06, |
| "loss": 0.9807, |
| "step": 43 |
| }, |
| { |
| "epoch": 4.0, |
| "grad_norm": 0.35877248644828796, |
| "learning_rate": 2.2e-06, |
| "loss": 0.9931, |
| "step": 44 |
| }, |
| { |
| "epoch": 4.098039215686274, |
| "grad_norm": 0.713307797908783, |
| "learning_rate": 2.25e-06, |
| "loss": 0.9579, |
| "step": 45 |
| }, |
| { |
| "epoch": 4.196078431372549, |
| "grad_norm": 0.33867117762565613, |
| "learning_rate": 2.3000000000000004e-06, |
| "loss": 0.9524, |
| "step": 46 |
| }, |
| { |
| "epoch": 4.294117647058823, |
| "grad_norm": 0.32721665501594543, |
| "learning_rate": 2.35e-06, |
| "loss": 0.9497, |
| "step": 47 |
| }, |
| { |
| "epoch": 4.392156862745098, |
| "grad_norm": 0.31279879808425903, |
| "learning_rate": 2.4000000000000003e-06, |
| "loss": 0.9235, |
| "step": 48 |
| }, |
| { |
| "epoch": 4.490196078431373, |
| "grad_norm": 0.29656586050987244, |
| "learning_rate": 2.4500000000000003e-06, |
| "loss": 0.9663, |
| "step": 49 |
| }, |
| { |
| "epoch": 4.588235294117647, |
| "grad_norm": 0.29088088870048523, |
| "learning_rate": 2.5e-06, |
| "loss": 0.9475, |
| "step": 50 |
| } |
| ], |
| "logging_steps": 1, |
| "max_steps": 60, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 6, |
| "save_steps": 10, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 3.494623005722542e+17, |
| "train_batch_size": 4, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|