| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 4.864, | |
| "eval_steps": 500, | |
| "global_step": 155, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 0.04806969687342644, | |
| "learning_rate": 6.249999999999999e-07, | |
| "loss": 1.1372, | |
| "mean_token_accuracy": 0.7175553351640701, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.04902997612953186, | |
| "learning_rate": 9.99543333708549e-07, | |
| "loss": 1.1327, | |
| "mean_token_accuracy": 0.7127479765564203, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 0.052965614944696426, | |
| "learning_rate": 9.944154131125642e-07, | |
| "loss": 1.1403, | |
| "mean_token_accuracy": 0.7101998340338469, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 0.049492210149765015, | |
| "learning_rate": 9.836474315195147e-07, | |
| "loss": 1.1492, | |
| "mean_token_accuracy": 0.7093485150486231, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.04770048335194588, | |
| "learning_rate": 9.673622250534155e-07, | |
| "loss": 1.0896, | |
| "mean_token_accuracy": 0.722656001150608, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 0.04960792139172554, | |
| "learning_rate": 9.457455677726447e-07, | |
| "loss": 1.1031, | |
| "mean_token_accuracy": 0.7249287318438291, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.096, | |
| "grad_norm": 0.05211903899908066, | |
| "learning_rate": 9.190440524459202e-07, | |
| "loss": 1.0969, | |
| "mean_token_accuracy": 0.7173009343883571, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 1.256, | |
| "grad_norm": 0.05225418508052826, | |
| "learning_rate": 8.875622775367259e-07, | |
| "loss": 1.1024, | |
| "mean_token_accuracy": 0.7184804223477841, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.416, | |
| "grad_norm": 0.053441036492586136, | |
| "learning_rate": 8.516593724857597e-07, | |
| "loss": 1.1062, | |
| "mean_token_accuracy": 0.7180147383362054, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 1.576, | |
| "grad_norm": 0.05642622709274292, | |
| "learning_rate": 8.117449009293668e-07, | |
| "loss": 1.1143, | |
| "mean_token_accuracy": 0.7181682541966439, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.736, | |
| "grad_norm": 0.06203881651163101, | |
| "learning_rate": 7.682741885881314e-07, | |
| "loss": 1.1619, | |
| "mean_token_accuracy": 0.7082567475736141, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 1.896, | |
| "grad_norm": 0.05656283721327782, | |
| "learning_rate": 7.217431291229067e-07, | |
| "loss": 1.1153, | |
| "mean_token_accuracy": 0.722714689001441, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.032, | |
| "grad_norm": 0.061042506247758865, | |
| "learning_rate": 6.726825272106538e-07, | |
| "loss": 1.1493, | |
| "mean_token_accuracy": 0.7165276228505022, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 2.192, | |
| "grad_norm": 0.058021657168865204, | |
| "learning_rate": 6.216520433716544e-07, | |
| "loss": 1.1358, | |
| "mean_token_accuracy": 0.7132049109786749, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.352, | |
| "grad_norm": 0.059838637709617615, | |
| "learning_rate": 5.69233809622687e-07, | |
| "loss": 1.1148, | |
| "mean_token_accuracy": 0.7178624272346497, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 2.512, | |
| "grad_norm": 0.06404729187488556, | |
| "learning_rate": 5.160257887858277e-07, | |
| "loss": 1.1569, | |
| "mean_token_accuracy": 0.7101449474692345, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 2.672, | |
| "grad_norm": 0.06028318777680397, | |
| "learning_rate": 4.626349532067879e-07, | |
| "loss": 1.0867, | |
| "mean_token_accuracy": 0.7239396564662457, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 2.832, | |
| "grad_norm": 0.06384854018688202, | |
| "learning_rate": 4.096703606968006e-07, | |
| "loss": 1.1188, | |
| "mean_token_accuracy": 0.7177162211388349, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 2.992, | |
| "grad_norm": 0.06171684339642525, | |
| "learning_rate": 3.577362066844838e-07, | |
| "loss": 1.1188, | |
| "mean_token_accuracy": 0.7161729197949172, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 3.128, | |
| "grad_norm": 0.0635930597782135, | |
| "learning_rate": 3.0742493183550454e-07, | |
| "loss": 1.0999, | |
| "mean_token_accuracy": 0.714051765992361, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 3.288, | |
| "grad_norm": 0.06290465593338013, | |
| "learning_rate": 2.593104637651087e-07, | |
| "loss": 1.138, | |
| "mean_token_accuracy": 0.7161103874444962, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 3.448, | |
| "grad_norm": 0.06601794064044952, | |
| "learning_rate": 2.1394166993891526e-07, | |
| "loss": 1.1073, | |
| "mean_token_accuracy": 0.7206942658871412, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 3.608, | |
| "grad_norm": 0.06451869755983353, | |
| "learning_rate": 1.7183609644824092e-07, | |
| "loss": 1.105, | |
| "mean_token_accuracy": 0.7173311490565538, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 3.768, | |
| "grad_norm": 0.06503637880086899, | |
| "learning_rate": 1.3347406408508694e-07, | |
| "loss": 1.1026, | |
| "mean_token_accuracy": 0.7226938724517822, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 3.928, | |
| "grad_norm": 0.06369519233703613, | |
| "learning_rate": 9.929318906602174e-08, | |
| "loss": 1.136, | |
| "mean_token_accuracy": 0.7119275834411383, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 4.064, | |
| "grad_norm": 0.0652015432715416, | |
| "learning_rate": 6.968339090999186e-08, | |
| "loss": 1.1041, | |
| "mean_token_accuracy": 0.7163285718244665, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 4.224, | |
| "grad_norm": 0.06359368562698364, | |
| "learning_rate": 4.498244441786675e-08, | |
| "loss": 1.1359, | |
| "mean_token_accuracy": 0.7156530544161797, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 4.384, | |
| "grad_norm": 0.06807275116443634, | |
| "learning_rate": 2.547212649466568e-08, | |
| "loss": 1.1229, | |
| "mean_token_accuracy": 0.7130052808672189, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 4.5440000000000005, | |
| "grad_norm": 0.06558381021022797, | |
| "learning_rate": 1.1375001769727999e-08, | |
| "loss": 1.0983, | |
| "mean_token_accuracy": 0.7218218572437763, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 4.704, | |
| "grad_norm": 0.06619936972856522, | |
| "learning_rate": 2.851883682973233e-09, | |
| "loss": 1.1149, | |
| "mean_token_accuracy": 0.7202201712876558, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 4.864, | |
| "grad_norm": 0.0672445297241211, | |
| "learning_rate": 0.0, | |
| "loss": 1.0885, | |
| "mean_token_accuracy": 0.7229270905256271, | |
| "step": 155 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 155, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 9.40617510529582e+18, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |