| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 327, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.09216589861751152, | |
| "grad_norm": 4.912416428745171, | |
| "learning_rate": 2.7272727272727272e-06, | |
| "loss": 0.8529, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.18433179723502305, | |
| "grad_norm": 2.1443951476400227, | |
| "learning_rate": 5.7575757575757586e-06, | |
| "loss": 0.6404, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.2764976958525346, | |
| "grad_norm": 1.658722389064836, | |
| "learning_rate": 8.787878787878788e-06, | |
| "loss": 0.5326, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.3686635944700461, | |
| "grad_norm": 1.5618873833827294, | |
| "learning_rate": 9.989726963751683e-06, | |
| "loss": 0.4925, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.4608294930875576, | |
| "grad_norm": 1.4925009785754748, | |
| "learning_rate": 9.927100106776213e-06, | |
| "loss": 0.4799, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.5529953917050692, | |
| "grad_norm": 1.322188289534146, | |
| "learning_rate": 9.808267184205182e-06, | |
| "loss": 0.4592, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.6451612903225806, | |
| "grad_norm": 1.4524544851906915, | |
| "learning_rate": 9.63458378673011e-06, | |
| "loss": 0.455, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.7373271889400922, | |
| "grad_norm": 1.400758206854473, | |
| "learning_rate": 9.408031213740045e-06, | |
| "loss": 0.4417, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.8294930875576036, | |
| "grad_norm": 1.393729749690361, | |
| "learning_rate": 9.131193871579975e-06, | |
| "loss": 0.4463, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.9216589861751152, | |
| "grad_norm": 1.4880361350108735, | |
| "learning_rate": 8.807229791845673e-06, | |
| "loss": 0.4508, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.0092165898617511, | |
| "grad_norm": 1.299774055064554, | |
| "learning_rate": 8.439834606028594e-06, | |
| "loss": 0.4024, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.1013824884792627, | |
| "grad_norm": 1.298198957499482, | |
| "learning_rate": 8.033199387471278e-06, | |
| "loss": 0.3197, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.1935483870967742, | |
| "grad_norm": 1.3650000791809225, | |
| "learning_rate": 7.591962841552627e-06, | |
| "loss": 0.3219, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.2857142857142856, | |
| "grad_norm": 1.434265135129886, | |
| "learning_rate": 7.121158389495187e-06, | |
| "loss": 0.3195, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.3778801843317972, | |
| "grad_norm": 1.495992136082361, | |
| "learning_rate": 6.626156749437736e-06, | |
| "loss": 0.3175, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.4700460829493087, | |
| "grad_norm": 1.3523477092551237, | |
| "learning_rate": 6.112604669781572e-06, | |
| "loss": 0.3065, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.5622119815668203, | |
| "grad_norm": 1.2695226148335443, | |
| "learning_rate": 5.586360513712011e-06, | |
| "loss": 0.3157, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.6543778801843319, | |
| "grad_norm": 1.2984094624484053, | |
| "learning_rate": 5.053427429716867e-06, | |
| "loss": 0.311, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.7465437788018434, | |
| "grad_norm": 1.465758872280537, | |
| "learning_rate": 4.5198848704615915e-06, | |
| "loss": 0.3131, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.838709677419355, | |
| "grad_norm": 1.3727910421793548, | |
| "learning_rate": 3.991819241221836e-06, | |
| "loss": 0.2933, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.9308755760368663, | |
| "grad_norm": 1.2600662931978257, | |
| "learning_rate": 3.475254469003865e-06, | |
| "loss": 0.2828, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 2.0184331797235022, | |
| "grad_norm": 1.2847409994029446, | |
| "learning_rate": 2.976083284388031e-06, | |
| "loss": 0.2656, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 2.110599078341014, | |
| "grad_norm": 1.4825208018701794, | |
| "learning_rate": 2.5000000000000015e-06, | |
| "loss": 0.1944, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 2.2027649769585254, | |
| "grad_norm": 1.4346771852112512, | |
| "learning_rate": 2.0524355524417017e-06, | |
| "loss": 0.1971, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 2.294930875576037, | |
| "grad_norm": 1.2773781048743025, | |
| "learning_rate": 1.6384955486934157e-06, | |
| "loss": 0.1971, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 2.3870967741935485, | |
| "grad_norm": 1.3513953567558703, | |
| "learning_rate": 1.2629020237248241e-06, | |
| "loss": 0.1934, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 2.47926267281106, | |
| "grad_norm": 1.1153786023075227, | |
| "learning_rate": 9.299395737170758e-07, | |
| "loss": 0.1975, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 2.571428571428571, | |
| "grad_norm": 1.255516210757092, | |
| "learning_rate": 6.43406479383053e-07, | |
| "loss": 0.2041, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 2.6635944700460827, | |
| "grad_norm": 1.2757448362037527, | |
| "learning_rate": 4.0657137694820826e-07, | |
| "loss": 0.1979, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 2.7557603686635943, | |
| "grad_norm": 1.218519400711122, | |
| "learning_rate": 2.2213597106929608e-07, | |
| "loss": 0.1919, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.847926267281106, | |
| "grad_norm": 1.2928947540930629, | |
| "learning_rate": 9.22042150446728e-08, | |
| "loss": 0.1897, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 2.9400921658986174, | |
| "grad_norm": 1.2273178341773785, | |
| "learning_rate": 1.8258309893965375e-08, | |
| "loss": 0.1877, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 327, | |
| "total_flos": 56487017447424.0, | |
| "train_loss": 0.33897173769248007, | |
| "train_runtime": 796.6909, | |
| "train_samples_per_second": 26.058, | |
| "train_steps_per_second": 0.41 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 327, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 56487017447424.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |