| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 411, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.072992700729927, | |
| "grad_norm": 7.62998519999138, | |
| "learning_rate": 3.4615384615384617e-06, | |
| "loss": 2.1931, | |
| "num_tokens": 2861698.0, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.145985401459854, | |
| "grad_norm": 2.0378838645741393, | |
| "learning_rate": 4.997196729961109e-06, | |
| "loss": 2.0271, | |
| "num_tokens": 5725093.0, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.21897810218978103, | |
| "grad_norm": 1.1687767688324222, | |
| "learning_rate": 4.980088396483145e-06, | |
| "loss": 1.9229, | |
| "num_tokens": 8584884.0, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.291970802919708, | |
| "grad_norm": 1.5019234268738317, | |
| "learning_rate": 4.947535513144287e-06, | |
| "loss": 1.899, | |
| "num_tokens": 11447171.0, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.36496350364963503, | |
| "grad_norm": 1.3274194647163335, | |
| "learning_rate": 4.899740800392143e-06, | |
| "loss": 1.8861, | |
| "num_tokens": 14305298.0, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.43795620437956206, | |
| "grad_norm": 1.3351216353908748, | |
| "learning_rate": 4.837001895933996e-06, | |
| "loss": 1.8724, | |
| "num_tokens": 17165371.0, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.5109489051094891, | |
| "grad_norm": 1.3694945668597924, | |
| "learning_rate": 4.75970950122206e-06, | |
| "loss": 1.864, | |
| "num_tokens": 20024286.0, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.583941605839416, | |
| "grad_norm": 1.3674205399654282, | |
| "learning_rate": 4.668344948391787e-06, | |
| "loss": 1.8498, | |
| "num_tokens": 22880217.0, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.656934306569343, | |
| "grad_norm": 1.377613524812094, | |
| "learning_rate": 4.563477202804941e-06, | |
| "loss": 1.8502, | |
| "num_tokens": 25733499.0, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.7299270072992701, | |
| "grad_norm": 1.4034954365891135, | |
| "learning_rate": 4.445759319863825e-06, | |
| "loss": 1.848, | |
| "num_tokens": 28583434.0, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.8029197080291971, | |
| "grad_norm": 1.412142777635451, | |
| "learning_rate": 4.315924378161599e-06, | |
| "loss": 1.8352, | |
| "num_tokens": 31446481.0, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.8759124087591241, | |
| "grad_norm": 1.3940616426918506, | |
| "learning_rate": 4.174780914294638e-06, | |
| "loss": 1.8398, | |
| "num_tokens": 34303388.0, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.948905109489051, | |
| "grad_norm": 1.3677454956298989, | |
| "learning_rate": 4.023207887766292e-06, | |
| "loss": 1.8312, | |
| "num_tokens": 37155157.0, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.0218978102189782, | |
| "grad_norm": 1.370013187149163, | |
| "learning_rate": 3.862149207337677e-06, | |
| "loss": 1.8195, | |
| "num_tokens": 40016949.0, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.094890510948905, | |
| "grad_norm": 1.3453807513004084, | |
| "learning_rate": 3.6926078529122245e-06, | |
| "loss": 1.8123, | |
| "num_tokens": 42870291.0, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.167883211678832, | |
| "grad_norm": 1.3275053674064692, | |
| "learning_rate": 3.5156396295594732e-06, | |
| "loss": 1.7984, | |
| "num_tokens": 45727372.0, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.2408759124087592, | |
| "grad_norm": 1.2934233224716136, | |
| "learning_rate": 3.332346592574404e-06, | |
| "loss": 1.7999, | |
| "num_tokens": 48577368.0, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.313868613138686, | |
| "grad_norm": 1.2551658629308613, | |
| "learning_rate": 3.1438701845172427e-06, | |
| "loss": 1.7963, | |
| "num_tokens": 51434586.0, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.3868613138686132, | |
| "grad_norm": 1.213032797696571, | |
| "learning_rate": 2.9513841269722554e-06, | |
| "loss": 1.7881, | |
| "num_tokens": 54297424.0, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.4598540145985401, | |
| "grad_norm": 1.156968122738453, | |
| "learning_rate": 2.756087111291533e-06, | |
| "loss": 1.7897, | |
| "num_tokens": 57162459.0, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.5328467153284673, | |
| "grad_norm": 1.1249392481057738, | |
| "learning_rate": 2.5591953338415597e-06, | |
| "loss": 1.7849, | |
| "num_tokens": 60018675.0, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.6058394160583942, | |
| "grad_norm": 1.0994749587493056, | |
| "learning_rate": 2.36193492223873e-06, | |
| "loss": 1.7775, | |
| "num_tokens": 62878668.0, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.6788321167883211, | |
| "grad_norm": 1.0637208182057718, | |
| "learning_rate": 2.1655342997388027e-06, | |
| "loss": 1.7909, | |
| "num_tokens": 65740651.0, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.7518248175182483, | |
| "grad_norm": 1.044107008859833, | |
| "learning_rate": 1.9712165353304528e-06, | |
| "loss": 1.7756, | |
| "num_tokens": 68601510.0, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.8248175182481752, | |
| "grad_norm": 0.9925194390180851, | |
| "learning_rate": 1.7801917271720841e-06, | |
| "loss": 1.7832, | |
| "num_tokens": 71458963.0, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.897810218978102, | |
| "grad_norm": 0.955995300227254, | |
| "learning_rate": 1.593649466803439e-06, | |
| "loss": 1.7844, | |
| "num_tokens": 74316993.0, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.9708029197080292, | |
| "grad_norm": 0.917851333269758, | |
| "learning_rate": 1.412751431060518e-06, | |
| "loss": 1.7762, | |
| "num_tokens": 77175911.0, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 2.0437956204379564, | |
| "grad_norm": 0.8904733879800006, | |
| "learning_rate": 1.2386241478270566e-06, | |
| "loss": 1.7703, | |
| "num_tokens": 80032620.0, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 2.116788321167883, | |
| "grad_norm": 0.8877855846920832, | |
| "learning_rate": 1.0723519806732633e-06, | |
| "loss": 1.7644, | |
| "num_tokens": 82889866.0, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 2.18978102189781, | |
| "grad_norm": 0.8648908773622401, | |
| "learning_rate": 9.149703760694034e-07, | |
| "loss": 1.7676, | |
| "num_tokens": 85743417.0, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.2627737226277373, | |
| "grad_norm": 0.8480142852639106, | |
| "learning_rate": 7.674594152266707e-07, | |
| "loss": 1.7678, | |
| "num_tokens": 88603527.0, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 2.335766423357664, | |
| "grad_norm": 0.8220641591692829, | |
| "learning_rate": 6.307377107207275e-07, | |
| "loss": 1.7687, | |
| "num_tokens": 91459160.0, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 2.408759124087591, | |
| "grad_norm": 0.8343743852690421, | |
| "learning_rate": 5.056566859062018e-07, | |
| "loss": 1.7634, | |
| "num_tokens": 94315375.0, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 2.4817518248175183, | |
| "grad_norm": 0.8234329687161832, | |
| "learning_rate": 3.9299527274662353e-07, | |
| "loss": 1.7634, | |
| "num_tokens": 97180408.0, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 2.554744525547445, | |
| "grad_norm": 0.8056748923441135, | |
| "learning_rate": 2.934550610786327e-07, | |
| "loss": 1.7609, | |
| "num_tokens": 100041451.0, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 2.627737226277372, | |
| "grad_norm": 0.8157145557692418, | |
| "learning_rate": 2.0765592951802804e-07, | |
| "loss": 1.7654, | |
| "num_tokens": 102901398.0, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 2.7007299270072993, | |
| "grad_norm": 0.8132927418871645, | |
| "learning_rate": 1.361321852158326e-07, | |
| "loss": 1.7625, | |
| "num_tokens": 105764937.0, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 2.7737226277372264, | |
| "grad_norm": 0.7925420271726725, | |
| "learning_rate": 7.932923650373209e-08, | |
| "loss": 1.7645, | |
| "num_tokens": 108617497.0, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 2.846715328467153, | |
| "grad_norm": 0.8078926286548843, | |
| "learning_rate": 3.7600819149724024e-08, | |
| "loss": 1.7616, | |
| "num_tokens": 111469621.0, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 2.9197080291970803, | |
| "grad_norm": 0.8084587727297771, | |
| "learning_rate": 1.1206793497235413e-08, | |
| "loss": 1.7543, | |
| "num_tokens": 114331353.0, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.9927007299270074, | |
| "grad_norm": 0.8140377250304603, | |
| "learning_rate": 3.11526205856727e-10, | |
| "loss": 1.7653, | |
| "num_tokens": 117187018.0, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "num_tokens": 117474320.0, | |
| "step": 411, | |
| "total_flos": 56147696418816.0, | |
| "train_loss": 1.8167425843630973, | |
| "train_runtime": 1364.5021, | |
| "train_samples_per_second": 153.902, | |
| "train_steps_per_second": 0.301 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 411, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": false, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 56147696418816.0, | |
| "train_batch_size": 64, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |