| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 9.70873786407767, | |
| "eval_steps": 500, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.24271844660194175, | |
| "grad_norm": 2.8362834453582764, | |
| "learning_rate": 4.960355987055016e-05, | |
| "loss": 6.0742, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.4854368932038835, | |
| "grad_norm": 2.9484496116638184, | |
| "learning_rate": 4.9199029126213595e-05, | |
| "loss": 5.7218, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.7281553398058253, | |
| "grad_norm": 3.542572259902954, | |
| "learning_rate": 4.879449838187702e-05, | |
| "loss": 5.4227, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.970873786407767, | |
| "grad_norm": 4.634098052978516, | |
| "learning_rate": 4.8389967637540455e-05, | |
| "loss": 5.1655, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.2135922330097086, | |
| "grad_norm": 5.699330806732178, | |
| "learning_rate": 4.798543689320388e-05, | |
| "loss": 4.8009, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.4563106796116505, | |
| "grad_norm": 5.711933135986328, | |
| "learning_rate": 4.7580906148867315e-05, | |
| "loss": 4.607, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.6990291262135924, | |
| "grad_norm": 5.113691806793213, | |
| "learning_rate": 4.717637540453075e-05, | |
| "loss": 4.462, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.941747572815534, | |
| "grad_norm": 5.632521152496338, | |
| "learning_rate": 4.6771844660194174e-05, | |
| "loss": 4.3695, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.1844660194174756, | |
| "grad_norm": 5.428906440734863, | |
| "learning_rate": 4.636731391585761e-05, | |
| "loss": 4.1549, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 2.4271844660194173, | |
| "grad_norm": 5.037013530731201, | |
| "learning_rate": 4.596278317152104e-05, | |
| "loss": 4.0921, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 2.6699029126213594, | |
| "grad_norm": 5.231846809387207, | |
| "learning_rate": 4.555825242718447e-05, | |
| "loss": 4.0662, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 2.912621359223301, | |
| "grad_norm": 5.506278038024902, | |
| "learning_rate": 4.51537216828479e-05, | |
| "loss": 4.0403, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 3.1553398058252426, | |
| "grad_norm": 5.410216331481934, | |
| "learning_rate": 4.4749190938511334e-05, | |
| "loss": 3.8375, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 3.3980582524271843, | |
| "grad_norm": 5.637699127197266, | |
| "learning_rate": 4.434466019417476e-05, | |
| "loss": 3.7473, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 3.6407766990291264, | |
| "grad_norm": 5.575273513793945, | |
| "learning_rate": 4.3940129449838194e-05, | |
| "loss": 3.7835, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 3.883495145631068, | |
| "grad_norm": 5.737198352813721, | |
| "learning_rate": 4.353559870550162e-05, | |
| "loss": 3.7919, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 4.12621359223301, | |
| "grad_norm": 5.645532608032227, | |
| "learning_rate": 4.313106796116505e-05, | |
| "loss": 3.6425, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 4.368932038834951, | |
| "grad_norm": 6.308927059173584, | |
| "learning_rate": 4.272653721682848e-05, | |
| "loss": 3.4785, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 4.611650485436893, | |
| "grad_norm": 5.863094329833984, | |
| "learning_rate": 4.232200647249191e-05, | |
| "loss": 3.5268, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 4.854368932038835, | |
| "grad_norm": 6.120258331298828, | |
| "learning_rate": 4.191747572815534e-05, | |
| "loss": 3.5296, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 5.097087378640777, | |
| "grad_norm": 6.231515884399414, | |
| "learning_rate": 4.1512944983818774e-05, | |
| "loss": 3.3802, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 5.339805825242719, | |
| "grad_norm": 6.211188316345215, | |
| "learning_rate": 4.11084142394822e-05, | |
| "loss": 3.2148, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 5.58252427184466, | |
| "grad_norm": 6.8676934242248535, | |
| "learning_rate": 4.0703883495145634e-05, | |
| "loss": 3.2111, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 5.825242718446602, | |
| "grad_norm": 6.851133346557617, | |
| "learning_rate": 4.029935275080906e-05, | |
| "loss": 3.2771, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 6.067961165048544, | |
| "grad_norm": 6.708765506744385, | |
| "learning_rate": 3.9894822006472494e-05, | |
| "loss": 3.1587, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 6.310679611650485, | |
| "grad_norm": 7.136553764343262, | |
| "learning_rate": 3.949029126213593e-05, | |
| "loss": 2.9192, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 6.553398058252427, | |
| "grad_norm": 7.370823383331299, | |
| "learning_rate": 3.9085760517799354e-05, | |
| "loss": 2.922, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 6.796116504854369, | |
| "grad_norm": 7.630361557006836, | |
| "learning_rate": 3.868122977346279e-05, | |
| "loss": 2.9417, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 7.038834951456311, | |
| "grad_norm": 7.765511989593506, | |
| "learning_rate": 3.827669902912622e-05, | |
| "loss": 2.8669, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 7.281553398058253, | |
| "grad_norm": 7.85439920425415, | |
| "learning_rate": 3.787216828478965e-05, | |
| "loss": 2.5412, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 7.524271844660194, | |
| "grad_norm": 8.064071655273438, | |
| "learning_rate": 3.746763754045307e-05, | |
| "loss": 2.6131, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 7.766990291262136, | |
| "grad_norm": 8.692522048950195, | |
| "learning_rate": 3.7063106796116507e-05, | |
| "loss": 2.6356, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 8.009708737864077, | |
| "grad_norm": 8.012410163879395, | |
| "learning_rate": 3.665857605177993e-05, | |
| "loss": 2.599, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 8.25242718446602, | |
| "grad_norm": 8.6799898147583, | |
| "learning_rate": 3.6254045307443366e-05, | |
| "loss": 2.1992, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 8.495145631067961, | |
| "grad_norm": 8.963507652282715, | |
| "learning_rate": 3.584951456310679e-05, | |
| "loss": 2.2701, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 8.737864077669903, | |
| "grad_norm": 9.833822250366211, | |
| "learning_rate": 3.5444983818770226e-05, | |
| "loss": 2.2837, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 8.980582524271846, | |
| "grad_norm": 9.47242259979248, | |
| "learning_rate": 3.504045307443366e-05, | |
| "loss": 2.2854, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 9.223300970873787, | |
| "grad_norm": 8.856402397155762, | |
| "learning_rate": 3.4635922330097086e-05, | |
| "loss": 1.8862, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 9.466019417475728, | |
| "grad_norm": 9.948341369628906, | |
| "learning_rate": 3.423139158576052e-05, | |
| "loss": 1.922, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 9.70873786407767, | |
| "grad_norm": 10.28939151763916, | |
| "learning_rate": 3.382686084142395e-05, | |
| "loss": 1.9622, | |
| "step": 2000 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 6180, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 30, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 928706985984000.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |