| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.6, | |
| "eval_steps": 500, | |
| "global_step": 200, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 0.4841386675834656, | |
| "learning_rate": 4.997807075247146e-05, | |
| "loss": 0.688, | |
| "num_input_tokens_seen": 46776, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 0.3365215063095093, | |
| "learning_rate": 4.991232148123761e-05, | |
| "loss": 0.763, | |
| "num_input_tokens_seen": 85136, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 0.5198595523834229, | |
| "learning_rate": 4.980286753286195e-05, | |
| "loss": 0.6882, | |
| "num_input_tokens_seen": 126584, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 0.40807807445526123, | |
| "learning_rate": 4.964990092676263e-05, | |
| "loss": 0.6951, | |
| "num_input_tokens_seen": 167968, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 0.439756840467453, | |
| "learning_rate": 4.9453690018345144e-05, | |
| "loss": 0.5008, | |
| "num_input_tokens_seen": 206832, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 0.45932796597480774, | |
| "learning_rate": 4.9214579028215776e-05, | |
| "loss": 0.542, | |
| "num_input_tokens_seen": 243656, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "grad_norm": 0.3281826376914978, | |
| "learning_rate": 4.893298743830168e-05, | |
| "loss": 0.5369, | |
| "num_input_tokens_seen": 284016, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.2551577389240265, | |
| "learning_rate": 4.860940925593703e-05, | |
| "loss": 0.4948, | |
| "num_input_tokens_seen": 327408, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "grad_norm": 0.5936368107795715, | |
| "learning_rate": 4.8244412147206284e-05, | |
| "loss": 0.5244, | |
| "num_input_tokens_seen": 363376, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 0.2480110377073288, | |
| "learning_rate": 4.783863644106502e-05, | |
| "loss": 0.421, | |
| "num_input_tokens_seen": 398840, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "grad_norm": 0.4601157307624817, | |
| "learning_rate": 4.7392794005985326e-05, | |
| "loss": 0.4517, | |
| "num_input_tokens_seen": 436136, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 0.6387960314750671, | |
| "learning_rate": 4.690766700109659e-05, | |
| "loss": 0.4661, | |
| "num_input_tokens_seen": 475856, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "grad_norm": 0.4365377128124237, | |
| "learning_rate": 4.638410650401267e-05, | |
| "loss": 0.4928, | |
| "num_input_tokens_seen": 514496, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "grad_norm": 0.2992519438266754, | |
| "learning_rate": 4.5823031017752485e-05, | |
| "loss": 0.5424, | |
| "num_input_tokens_seen": 554424, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 0.828787088394165, | |
| "learning_rate": 4.522542485937369e-05, | |
| "loss": 0.5419, | |
| "num_input_tokens_seen": 593264, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 0.2943418622016907, | |
| "learning_rate": 4.4592336433146e-05, | |
| "loss": 0.4558, | |
| "num_input_tokens_seen": 630264, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "grad_norm": 0.3707512617111206, | |
| "learning_rate": 4.3924876391293915e-05, | |
| "loss": 0.5656, | |
| "num_input_tokens_seen": 668864, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "grad_norm": 0.4809654653072357, | |
| "learning_rate": 4.3224215685535294e-05, | |
| "loss": 0.4832, | |
| "num_input_tokens_seen": 712504, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "grad_norm": 0.516292929649353, | |
| "learning_rate": 4.249158351283414e-05, | |
| "loss": 0.4626, | |
| "num_input_tokens_seen": 748872, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.5604212284088135, | |
| "learning_rate": 4.172826515897146e-05, | |
| "loss": 0.4837, | |
| "num_input_tokens_seen": 788408, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "grad_norm": 0.7355071902275085, | |
| "learning_rate": 4.093559974371725e-05, | |
| "loss": 0.5144, | |
| "num_input_tokens_seen": 828448, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "grad_norm": 0.46999862790107727, | |
| "learning_rate": 4.011497787155938e-05, | |
| "loss": 0.493, | |
| "num_input_tokens_seen": 864680, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "grad_norm": 0.5488362312316895, | |
| "learning_rate": 3.92678391921108e-05, | |
| "loss": 0.4083, | |
| "num_input_tokens_seen": 902568, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 0.5053719282150269, | |
| "learning_rate": 3.8395669874474915e-05, | |
| "loss": 0.5172, | |
| "num_input_tokens_seen": 944752, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.44234323501586914, | |
| "learning_rate": 3.7500000000000003e-05, | |
| "loss": 0.5843, | |
| "num_input_tokens_seen": 988656, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "grad_norm": 0.4518623352050781, | |
| "learning_rate": 3.6582400877996546e-05, | |
| "loss": 0.4567, | |
| "num_input_tokens_seen": 1028584, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "grad_norm": 0.39792686700820923, | |
| "learning_rate": 3.564448228912682e-05, | |
| "loss": 0.418, | |
| "num_input_tokens_seen": 1065104, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "grad_norm": 0.5141780376434326, | |
| "learning_rate": 3.4687889661302576e-05, | |
| "loss": 0.3785, | |
| "num_input_tokens_seen": 1106744, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "grad_norm": 0.5755515098571777, | |
| "learning_rate": 3.3714301183045385e-05, | |
| "loss": 0.4097, | |
| "num_input_tokens_seen": 1140672, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "grad_norm": 0.4626426696777344, | |
| "learning_rate": 3.272542485937369e-05, | |
| "loss": 0.4507, | |
| "num_input_tokens_seen": 1177280, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "grad_norm": 0.49699896574020386, | |
| "learning_rate": 3.172299551538164e-05, | |
| "loss": 0.368, | |
| "num_input_tokens_seen": 1215744, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "grad_norm": 0.5842998027801514, | |
| "learning_rate": 3.0708771752766394e-05, | |
| "loss": 0.4301, | |
| "num_input_tokens_seen": 1257040, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "grad_norm": 0.46173322200775146, | |
| "learning_rate": 2.9684532864643122e-05, | |
| "loss": 0.4488, | |
| "num_input_tokens_seen": 1295992, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 1.3599999999999999, | |
| "grad_norm": 0.4926168918609619, | |
| "learning_rate": 2.8652075714060295e-05, | |
| "loss": 0.4075, | |
| "num_input_tokens_seen": 1334672, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "grad_norm": 0.5110897421836853, | |
| "learning_rate": 2.761321158169134e-05, | |
| "loss": 0.4991, | |
| "num_input_tokens_seen": 1379280, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "grad_norm": 0.33892518281936646, | |
| "learning_rate": 2.656976298823284e-05, | |
| "loss": 0.4894, | |
| "num_input_tokens_seen": 1419704, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "grad_norm": 0.46542254090309143, | |
| "learning_rate": 2.5523560497083926e-05, | |
| "loss": 0.4967, | |
| "num_input_tokens_seen": 1459256, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "grad_norm": 0.9100029468536377, | |
| "learning_rate": 2.447643950291608e-05, | |
| "loss": 0.5297, | |
| "num_input_tokens_seen": 1503576, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "grad_norm": 0.7347203493118286, | |
| "learning_rate": 2.3430237011767167e-05, | |
| "loss": 0.3939, | |
| "num_input_tokens_seen": 1539904, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "grad_norm": 0.6078445315361023, | |
| "learning_rate": 2.238678841830867e-05, | |
| "loss": 0.461, | |
| "num_input_tokens_seen": 1580560, | |
| "step": 200 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 375, | |
| "num_input_tokens_seen": 1580560, | |
| "num_train_epochs": 3, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 6.724464885419213e+16, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |