| { | |
| "best_global_step": 158, | |
| "best_metric": 0.9977549332388042, | |
| "best_model_checkpoint": "ckpt/checkpoint-158", | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 158, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.08917197452229299, | |
| "grad_norm": 8.361231803894043, | |
| "learning_rate": 5.2173913043478265e-06, | |
| "loss": 1.0968, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.17834394904458598, | |
| "grad_norm": 17.40561866760254, | |
| "learning_rate": 1.1304347826086957e-05, | |
| "loss": 0.8108, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.267515923566879, | |
| "grad_norm": 97.01270294189453, | |
| "learning_rate": 1.739130434782609e-05, | |
| "loss": 0.5853, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.35668789808917195, | |
| "grad_norm": 3.1324217319488525, | |
| "learning_rate": 1.9626168224299065e-05, | |
| "loss": 0.3892, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.445859872611465, | |
| "grad_norm": 1.4563965797424316, | |
| "learning_rate": 1.8971962616822433e-05, | |
| "loss": 0.1388, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.535031847133758, | |
| "grad_norm": 2.76413631439209, | |
| "learning_rate": 1.8317757009345797e-05, | |
| "loss": 0.089, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.6242038216560509, | |
| "grad_norm": 0.3331431448459625, | |
| "learning_rate": 1.766355140186916e-05, | |
| "loss": 0.0415, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.7133757961783439, | |
| "grad_norm": 0.1350180059671402, | |
| "learning_rate": 1.7009345794392526e-05, | |
| "loss": 0.0251, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.802547770700637, | |
| "grad_norm": 2.5682196617126465, | |
| "learning_rate": 1.635514018691589e-05, | |
| "loss": 0.0202, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.89171974522293, | |
| "grad_norm": 0.08181392401456833, | |
| "learning_rate": 1.5700934579439254e-05, | |
| "loss": 0.0297, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.9808917197452229, | |
| "grad_norm": 6.680551052093506, | |
| "learning_rate": 1.5046728971962619e-05, | |
| "loss": 0.0258, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.9971641261963843, | |
| "eval_f1": 0.9971641134681397, | |
| "eval_loss": 0.01230512373149395, | |
| "eval_runtime": 565.2201, | |
| "eval_samples_per_second": 29.946, | |
| "eval_steps_per_second": 0.469, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 1.0636942675159236, | |
| "grad_norm": 2.224886417388916, | |
| "learning_rate": 1.4392523364485981e-05, | |
| "loss": 0.0253, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 1.1528662420382165, | |
| "grad_norm": 3.0570154190063477, | |
| "learning_rate": 1.3738317757009347e-05, | |
| "loss": 0.0058, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 1.2420382165605095, | |
| "grad_norm": 2.5847160816192627, | |
| "learning_rate": 1.308411214953271e-05, | |
| "loss": 0.0248, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 1.3312101910828025, | |
| "grad_norm": 0.09943564236164093, | |
| "learning_rate": 1.2429906542056076e-05, | |
| "loss": 0.0072, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.4203821656050954, | |
| "grad_norm": 0.05498664453625679, | |
| "learning_rate": 1.177570093457944e-05, | |
| "loss": 0.0038, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 1.5095541401273884, | |
| "grad_norm": 0.04277191683650017, | |
| "learning_rate": 1.1121495327102804e-05, | |
| "loss": 0.0233, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 1.5987261146496814, | |
| "grad_norm": 0.03371246159076691, | |
| "learning_rate": 1.0467289719626168e-05, | |
| "loss": 0.0124, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 1.6878980891719744, | |
| "grad_norm": 2.893360137939453, | |
| "learning_rate": 9.813084112149533e-06, | |
| "loss": 0.0031, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 1.7770700636942676, | |
| "grad_norm": 0.38178956508636475, | |
| "learning_rate": 9.158878504672899e-06, | |
| "loss": 0.0189, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.8662420382165605, | |
| "grad_norm": 3.5433061122894287, | |
| "learning_rate": 8.504672897196263e-06, | |
| "loss": 0.0108, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 1.9554140127388535, | |
| "grad_norm": 3.068488121032715, | |
| "learning_rate": 7.850467289719627e-06, | |
| "loss": 0.0227, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.9977549332388042, | |
| "eval_f1": 0.9977549332388042, | |
| "eval_loss": 0.00890457071363926, | |
| "eval_runtime": 628.1179, | |
| "eval_samples_per_second": 26.947, | |
| "eval_steps_per_second": 0.422, | |
| "step": 158 | |
| } | |
| ], | |
| "logging_steps": 7, | |
| "max_steps": 237, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1315567088640000.0, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |