| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.8820286659316428, | |
| "eval_steps": 500, | |
| "global_step": 400, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.022050716648291068, | |
| "grad_norm": 75.05713653564453, | |
| "learning_rate": 9.782608695652175e-07, | |
| "loss": 3.2801, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.044101433296582136, | |
| "grad_norm": 23.524181365966797, | |
| "learning_rate": 2.065217391304348e-06, | |
| "loss": 1.3223, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.06615214994487321, | |
| "grad_norm": 6.137115478515625, | |
| "learning_rate": 3.152173913043479e-06, | |
| "loss": 0.1773, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.08820286659316427, | |
| "grad_norm": 4.109879016876221, | |
| "learning_rate": 4.239130434782609e-06, | |
| "loss": 0.0903, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.11025358324145534, | |
| "grad_norm": 4.534224987030029, | |
| "learning_rate": 4.999333020279094e-06, | |
| "loss": 0.0765, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.13230429988974643, | |
| "grad_norm": 1.7704753875732422, | |
| "learning_rate": 4.987485500883267e-06, | |
| "loss": 0.0485, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.1543550165380375, | |
| "grad_norm": 3.437958002090454, | |
| "learning_rate": 4.960897034310215e-06, | |
| "loss": 0.0476, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.17640573318632854, | |
| "grad_norm": 2.1654245853424072, | |
| "learning_rate": 4.919725184968307e-06, | |
| "loss": 0.0328, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.19845644983461963, | |
| "grad_norm": 2.156630516052246, | |
| "learning_rate": 4.864213939025955e-06, | |
| "loss": 0.0399, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.2205071664829107, | |
| "grad_norm": 2.317758083343506, | |
| "learning_rate": 4.794692258538973e-06, | |
| "loss": 0.0422, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.24255788313120177, | |
| "grad_norm": 1.3408821821212769, | |
| "learning_rate": 4.711572132007139e-06, | |
| "loss": 0.0373, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.26460859977949286, | |
| "grad_norm": 1.124430775642395, | |
| "learning_rate": 4.615346132912444e-06, | |
| "loss": 0.0371, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.2866593164277839, | |
| "grad_norm": 1.6655385494232178, | |
| "learning_rate": 4.506584500707229e-06, | |
| "loss": 0.034, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.308710033076075, | |
| "grad_norm": 1.0551161766052246, | |
| "learning_rate": 4.385931761550411e-06, | |
| "loss": 0.0345, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.33076074972436603, | |
| "grad_norm": 0.7072309851646423, | |
| "learning_rate": 4.254102908817454e-06, | |
| "loss": 0.0287, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.3528114663726571, | |
| "grad_norm": 0.7073365449905396, | |
| "learning_rate": 4.111879166018561e-06, | |
| "loss": 0.0229, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.3748621830209482, | |
| "grad_norm": 1.0040053129196167, | |
| "learning_rate": 3.960103357234192e-06, | |
| "loss": 0.0193, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.39691289966923926, | |
| "grad_norm": 0.8540388345718384, | |
| "learning_rate": 3.799674912502946e-06, | |
| "loss": 0.0288, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.4189636163175303, | |
| "grad_norm": 1.2971080541610718, | |
| "learning_rate": 3.63154453776006e-06, | |
| "loss": 0.022, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.4410143329658214, | |
| "grad_norm": 0.6404117345809937, | |
| "learning_rate": 3.4567085809127247e-06, | |
| "loss": 0.0268, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.46306504961411243, | |
| "grad_norm": 0.40261635184288025, | |
| "learning_rate": 3.2762031274390876e-06, | |
| "loss": 0.0192, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.48511576626240355, | |
| "grad_norm": 0.9443170428276062, | |
| "learning_rate": 3.091097860500683e-06, | |
| "loss": 0.0208, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.5071664829106945, | |
| "grad_norm": 0.6959162950515747, | |
| "learning_rate": 2.9024897219535326e-06, | |
| "loss": 0.0168, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.5292171995589857, | |
| "grad_norm": 0.8389456868171692, | |
| "learning_rate": 2.7114964118230352e-06, | |
| "loss": 0.0177, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.5512679162072768, | |
| "grad_norm": 0.3119961619377136, | |
| "learning_rate": 2.519249764765047e-06, | |
| "loss": 0.0143, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.5733186328555678, | |
| "grad_norm": 1.0202538967132568, | |
| "learning_rate": 2.3268890427645213e-06, | |
| "loss": 0.0141, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.5953693495038589, | |
| "grad_norm": 0.7544236779212952, | |
| "learning_rate": 2.1355541838194797e-06, | |
| "loss": 0.015, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.61742006615215, | |
| "grad_norm": 1.0978893041610718, | |
| "learning_rate": 1.946379046618894e-06, | |
| "loss": 0.0114, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.639470782800441, | |
| "grad_norm": 0.6933329701423645, | |
| "learning_rate": 1.7604846912468243e-06, | |
| "loss": 0.0111, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.6615214994487321, | |
| "grad_norm": 0.8670783638954163, | |
| "learning_rate": 1.5789727357316426e-06, | |
| "loss": 0.0115, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.6835722160970231, | |
| "grad_norm": 0.3083396553993225, | |
| "learning_rate": 1.40291882780972e-06, | |
| "loss": 0.0085, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.7056229327453142, | |
| "grad_norm": 0.7997369170188904, | |
| "learning_rate": 1.233366270590202e-06, | |
| "loss": 0.0098, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.7276736493936052, | |
| "grad_norm": 0.3282625377178192, | |
| "learning_rate": 1.0713198398954382e-06, | |
| "loss": 0.009, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.7497243660418964, | |
| "grad_norm": 0.31581273674964905, | |
| "learning_rate": 9.177398299157989e-07, | |
| "loss": 0.0103, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.7717750826901875, | |
| "grad_norm": 0.05854691565036774, | |
| "learning_rate": 7.735363624645712e-07, | |
| "loss": 0.0097, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.7938257993384785, | |
| "grad_norm": 0.2609463334083557, | |
| "learning_rate": 6.395639935565412e-07, | |
| "loss": 0.0139, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.8158765159867696, | |
| "grad_norm": 1.644800066947937, | |
| "learning_rate": 5.166166492719124e-07, | |
| "loss": 0.0134, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.8379272326350606, | |
| "grad_norm": 0.7762562036514282, | |
| "learning_rate": 4.0542292091585447e-07, | |
| "loss": 0.0099, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.8599779492833517, | |
| "grad_norm": 0.766238272190094, | |
| "learning_rate": 3.066417473547667e-07, | |
| "loss": 0.0122, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.8820286659316428, | |
| "grad_norm": 0.5094723701477051, | |
| "learning_rate": 2.2085851011591831e-07, | |
| "loss": 0.0081, | |
| "step": 400 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 454, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.986534988442501e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |