| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 327, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.09216589861751152, | |
| "grad_norm": 4.813406044219536, | |
| "learning_rate": 2.7272727272727272e-06, | |
| "loss": 1.0405, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.18433179723502305, | |
| "grad_norm": 3.345813622526326, | |
| "learning_rate": 5.7575757575757586e-06, | |
| "loss": 0.8743, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.2764976958525346, | |
| "grad_norm": 2.6033242291153043, | |
| "learning_rate": 8.787878787878788e-06, | |
| "loss": 0.7349, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.3686635944700461, | |
| "grad_norm": 2.2863115914441297, | |
| "learning_rate": 9.989726963751683e-06, | |
| "loss": 0.6659, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.4608294930875576, | |
| "grad_norm": 1.9600220783574478, | |
| "learning_rate": 9.927100106776213e-06, | |
| "loss": 0.6552, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.5529953917050692, | |
| "grad_norm": 2.083222914335424, | |
| "learning_rate": 9.808267184205182e-06, | |
| "loss": 0.6342, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.6451612903225806, | |
| "grad_norm": 1.934166098672146, | |
| "learning_rate": 9.63458378673011e-06, | |
| "loss": 0.6231, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.7373271889400922, | |
| "grad_norm": 2.0320677586555442, | |
| "learning_rate": 9.408031213740045e-06, | |
| "loss": 0.6379, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.8294930875576036, | |
| "grad_norm": 1.762386820568039, | |
| "learning_rate": 9.131193871579975e-06, | |
| "loss": 0.6172, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.9216589861751152, | |
| "grad_norm": 2.3287514936815636, | |
| "learning_rate": 8.807229791845673e-06, | |
| "loss": 0.61, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.0092165898617511, | |
| "grad_norm": 1.9587757019311305, | |
| "learning_rate": 8.439834606028594e-06, | |
| "loss": 0.5518, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.1013824884792627, | |
| "grad_norm": 2.040723898419053, | |
| "learning_rate": 8.033199387471278e-06, | |
| "loss": 0.4937, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.1935483870967742, | |
| "grad_norm": 2.0478555793736994, | |
| "learning_rate": 7.591962841552627e-06, | |
| "loss": 0.4606, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.2857142857142856, | |
| "grad_norm": 1.549664911700247, | |
| "learning_rate": 7.121158389495187e-06, | |
| "loss": 0.437, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.3778801843317972, | |
| "grad_norm": 1.741081014100674, | |
| "learning_rate": 6.626156749437736e-06, | |
| "loss": 0.4504, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.4700460829493087, | |
| "grad_norm": 1.6376309277667895, | |
| "learning_rate": 6.112604669781572e-06, | |
| "loss": 0.4344, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.5622119815668203, | |
| "grad_norm": 1.82869067685848, | |
| "learning_rate": 5.586360513712011e-06, | |
| "loss": 0.4316, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.6543778801843319, | |
| "grad_norm": 1.653596930112548, | |
| "learning_rate": 5.053427429716867e-06, | |
| "loss": 0.4355, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.7465437788018434, | |
| "grad_norm": 1.7262059807509964, | |
| "learning_rate": 4.5198848704615915e-06, | |
| "loss": 0.4458, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.838709677419355, | |
| "grad_norm": 1.9825171755467725, | |
| "learning_rate": 3.991819241221836e-06, | |
| "loss": 0.4712, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.9308755760368663, | |
| "grad_norm": 2.864225291883103, | |
| "learning_rate": 3.475254469003865e-06, | |
| "loss": 0.462, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 2.0184331797235022, | |
| "grad_norm": 2.4466235742871714, | |
| "learning_rate": 2.976083284388031e-06, | |
| "loss": 0.3874, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 2.110599078341014, | |
| "grad_norm": 1.6714740269383614, | |
| "learning_rate": 2.5000000000000015e-06, | |
| "loss": 0.2984, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 2.2027649769585254, | |
| "grad_norm": 2.659423207900044, | |
| "learning_rate": 2.0524355524417017e-06, | |
| "loss": 0.3083, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 2.294930875576037, | |
| "grad_norm": 1.455707642538854, | |
| "learning_rate": 1.6384955486934157e-06, | |
| "loss": 0.3068, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 2.3870967741935485, | |
| "grad_norm": 1.6701598764906809, | |
| "learning_rate": 1.2629020237248241e-06, | |
| "loss": 0.3212, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 2.47926267281106, | |
| "grad_norm": 1.593065323038753, | |
| "learning_rate": 9.299395737170758e-07, | |
| "loss": 0.2936, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 2.571428571428571, | |
| "grad_norm": 1.5938273754182704, | |
| "learning_rate": 6.43406479383053e-07, | |
| "loss": 0.3342, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 2.6635944700460827, | |
| "grad_norm": 1.507075362888062, | |
| "learning_rate": 4.0657137694820826e-07, | |
| "loss": 0.3142, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 2.7557603686635943, | |
| "grad_norm": 1.639819132890916, | |
| "learning_rate": 2.2213597106929608e-07, | |
| "loss": 0.31, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.847926267281106, | |
| "grad_norm": 1.6048123491091828, | |
| "learning_rate": 9.22042150446728e-08, | |
| "loss": 0.3163, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 2.9400921658986174, | |
| "grad_norm": 1.7667300318705759, | |
| "learning_rate": 1.8258309893965375e-08, | |
| "loss": 0.299, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 327, | |
| "total_flos": 55031842603008.0, | |
| "train_loss": 0.4859297319289741, | |
| "train_runtime": 794.9666, | |
| "train_samples_per_second": 26.111, | |
| "train_steps_per_second": 0.411 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 327, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 55031842603008.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |