| { | |
| "best_metric": 1.0832968950271606, | |
| "best_model_checkpoint": "Tether2/checkpoint-774", | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 774, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.046511627906976744, | |
| "grad_norm": 3.96823787689209, | |
| "learning_rate": 7.692307692307694e-06, | |
| "loss": 3.2522, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.09302325581395349, | |
| "grad_norm": 5.03609561920166, | |
| "learning_rate": 1.5384615384615387e-05, | |
| "loss": 3.2157, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.13953488372093023, | |
| "grad_norm": 6.75431489944458, | |
| "learning_rate": 2.307692307692308e-05, | |
| "loss": 3.1114, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.18604651162790697, | |
| "grad_norm": 22.154630661010742, | |
| "learning_rate": 3.0769230769230774e-05, | |
| "loss": 2.9807, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.23255813953488372, | |
| "grad_norm": 9.958882331848145, | |
| "learning_rate": 3.846153846153846e-05, | |
| "loss": 2.8858, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.27906976744186046, | |
| "grad_norm": 8.20565128326416, | |
| "learning_rate": 4.615384615384616e-05, | |
| "loss": 2.7925, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.32558139534883723, | |
| "grad_norm": 9.204078674316406, | |
| "learning_rate": 4.9568965517241384e-05, | |
| "loss": 2.8233, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.37209302325581395, | |
| "grad_norm": 10.973845481872559, | |
| "learning_rate": 4.870689655172414e-05, | |
| "loss": 2.8384, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.4186046511627907, | |
| "grad_norm": 9.10534381866455, | |
| "learning_rate": 4.78448275862069e-05, | |
| "loss": 2.6588, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.46511627906976744, | |
| "grad_norm": 7.395918846130371, | |
| "learning_rate": 4.698275862068966e-05, | |
| "loss": 2.5407, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.5116279069767442, | |
| "grad_norm": 12.590022087097168, | |
| "learning_rate": 4.612068965517242e-05, | |
| "loss": 2.4686, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 0.5581395348837209, | |
| "grad_norm": 8.628605842590332, | |
| "learning_rate": 4.5258620689655176e-05, | |
| "loss": 2.3784, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.6046511627906976, | |
| "grad_norm": 12.470245361328125, | |
| "learning_rate": 4.4396551724137933e-05, | |
| "loss": 2.2395, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 0.6511627906976745, | |
| "grad_norm": 10.687325477600098, | |
| "learning_rate": 4.353448275862069e-05, | |
| "loss": 2.3603, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 0.6976744186046512, | |
| "grad_norm": 12.147785186767578, | |
| "learning_rate": 4.267241379310345e-05, | |
| "loss": 2.3779, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.7441860465116279, | |
| "grad_norm": 8.938596725463867, | |
| "learning_rate": 4.1810344827586205e-05, | |
| "loss": 2.0315, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 0.7906976744186046, | |
| "grad_norm": 16.89861488342285, | |
| "learning_rate": 4.094827586206897e-05, | |
| "loss": 1.8416, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 0.8372093023255814, | |
| "grad_norm": 25.249252319335938, | |
| "learning_rate": 4.0086206896551726e-05, | |
| "loss": 2.2595, | |
| "step": 216 | |
| }, | |
| { | |
| "epoch": 0.8837209302325582, | |
| "grad_norm": 19.04043960571289, | |
| "learning_rate": 3.922413793103448e-05, | |
| "loss": 2.0284, | |
| "step": 228 | |
| }, | |
| { | |
| "epoch": 0.9302325581395349, | |
| "grad_norm": 19.253995895385742, | |
| "learning_rate": 3.8362068965517246e-05, | |
| "loss": 1.9197, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.9767441860465116, | |
| "grad_norm": 11.264032363891602, | |
| "learning_rate": 3.7500000000000003e-05, | |
| "loss": 1.9238, | |
| "step": 252 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.5155038759689923, | |
| "eval_f1_macro": 0.25891991433400224, | |
| "eval_f1_micro": 0.5155038759689923, | |
| "eval_f1_weighted": 0.46535411334465737, | |
| "eval_loss": 1.7225582599639893, | |
| "eval_precision_macro": 0.2992259890948827, | |
| "eval_precision_micro": 0.5155038759689923, | |
| "eval_precision_weighted": 0.4992061498287774, | |
| "eval_recall_macro": 0.27495621077215004, | |
| "eval_recall_micro": 0.5155038759689923, | |
| "eval_recall_weighted": 0.5155038759689923, | |
| "eval_runtime": 69.7638, | |
| "eval_samples_per_second": 7.396, | |
| "eval_steps_per_second": 0.473, | |
| "step": 258 | |
| }, | |
| { | |
| "epoch": 1.0232558139534884, | |
| "grad_norm": 11.877735137939453, | |
| "learning_rate": 3.663793103448276e-05, | |
| "loss": 1.6197, | |
| "step": 264 | |
| }, | |
| { | |
| "epoch": 1.069767441860465, | |
| "grad_norm": 15.443757057189941, | |
| "learning_rate": 3.5775862068965524e-05, | |
| "loss": 1.7034, | |
| "step": 276 | |
| }, | |
| { | |
| "epoch": 1.1162790697674418, | |
| "grad_norm": 20.39283561706543, | |
| "learning_rate": 3.4913793103448275e-05, | |
| "loss": 1.4275, | |
| "step": 288 | |
| }, | |
| { | |
| "epoch": 1.1627906976744187, | |
| "grad_norm": 21.803442001342773, | |
| "learning_rate": 3.405172413793103e-05, | |
| "loss": 1.4556, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.2093023255813953, | |
| "grad_norm": 28.840351104736328, | |
| "learning_rate": 3.3189655172413796e-05, | |
| "loss": 1.6046, | |
| "step": 312 | |
| }, | |
| { | |
| "epoch": 1.255813953488372, | |
| "grad_norm": 12.907221794128418, | |
| "learning_rate": 3.232758620689655e-05, | |
| "loss": 1.579, | |
| "step": 324 | |
| }, | |
| { | |
| "epoch": 1.302325581395349, | |
| "grad_norm": 14.40406322479248, | |
| "learning_rate": 3.146551724137931e-05, | |
| "loss": 1.385, | |
| "step": 336 | |
| }, | |
| { | |
| "epoch": 1.3488372093023255, | |
| "grad_norm": 18.017547607421875, | |
| "learning_rate": 3.060344827586207e-05, | |
| "loss": 1.4536, | |
| "step": 348 | |
| }, | |
| { | |
| "epoch": 1.3953488372093024, | |
| "grad_norm": 39.87833023071289, | |
| "learning_rate": 2.974137931034483e-05, | |
| "loss": 1.5883, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.441860465116279, | |
| "grad_norm": 19.605663299560547, | |
| "learning_rate": 2.8879310344827588e-05, | |
| "loss": 1.5453, | |
| "step": 372 | |
| }, | |
| { | |
| "epoch": 1.4883720930232558, | |
| "grad_norm": 12.39608383178711, | |
| "learning_rate": 2.8017241379310345e-05, | |
| "loss": 1.3432, | |
| "step": 384 | |
| }, | |
| { | |
| "epoch": 1.5348837209302326, | |
| "grad_norm": 20.467025756835938, | |
| "learning_rate": 2.7155172413793105e-05, | |
| "loss": 1.4855, | |
| "step": 396 | |
| }, | |
| { | |
| "epoch": 1.5813953488372094, | |
| "grad_norm": 12.10073184967041, | |
| "learning_rate": 2.6293103448275862e-05, | |
| "loss": 1.1955, | |
| "step": 408 | |
| }, | |
| { | |
| "epoch": 1.627906976744186, | |
| "grad_norm": 21.128217697143555, | |
| "learning_rate": 2.543103448275862e-05, | |
| "loss": 1.3656, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.6744186046511627, | |
| "grad_norm": 27.748998641967773, | |
| "learning_rate": 2.456896551724138e-05, | |
| "loss": 1.403, | |
| "step": 432 | |
| }, | |
| { | |
| "epoch": 1.7209302325581395, | |
| "grad_norm": 33.165916442871094, | |
| "learning_rate": 2.370689655172414e-05, | |
| "loss": 1.2075, | |
| "step": 444 | |
| }, | |
| { | |
| "epoch": 1.7674418604651163, | |
| "grad_norm": 23.034250259399414, | |
| "learning_rate": 2.2844827586206897e-05, | |
| "loss": 1.3793, | |
| "step": 456 | |
| }, | |
| { | |
| "epoch": 1.8139534883720931, | |
| "grad_norm": 26.05168342590332, | |
| "learning_rate": 2.1982758620689654e-05, | |
| "loss": 1.1603, | |
| "step": 468 | |
| }, | |
| { | |
| "epoch": 1.8604651162790697, | |
| "grad_norm": 30.397899627685547, | |
| "learning_rate": 2.1120689655172415e-05, | |
| "loss": 1.246, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.9069767441860463, | |
| "grad_norm": 26.444442749023438, | |
| "learning_rate": 2.0258620689655172e-05, | |
| "loss": 1.5224, | |
| "step": 492 | |
| }, | |
| { | |
| "epoch": 1.9534883720930232, | |
| "grad_norm": 12.157238006591797, | |
| "learning_rate": 1.9396551724137932e-05, | |
| "loss": 1.2578, | |
| "step": 504 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 7.6723833084106445, | |
| "learning_rate": 1.8534482758620693e-05, | |
| "loss": 1.0881, | |
| "step": 516 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.6337209302325582, | |
| "eval_f1_macro": 0.38147868610651525, | |
| "eval_f1_micro": 0.6337209302325582, | |
| "eval_f1_weighted": 0.606852249582538, | |
| "eval_loss": 1.1795549392700195, | |
| "eval_precision_macro": 0.4036122122616632, | |
| "eval_precision_micro": 0.6337209302325582, | |
| "eval_precision_weighted": 0.6131562523466949, | |
| "eval_recall_macro": 0.39422572999423094, | |
| "eval_recall_micro": 0.6337209302325582, | |
| "eval_recall_weighted": 0.6337209302325582, | |
| "eval_runtime": 71.7188, | |
| "eval_samples_per_second": 7.195, | |
| "eval_steps_per_second": 0.46, | |
| "step": 516 | |
| }, | |
| { | |
| "epoch": 2.046511627906977, | |
| "grad_norm": 24.36311149597168, | |
| "learning_rate": 1.767241379310345e-05, | |
| "loss": 0.9298, | |
| "step": 528 | |
| }, | |
| { | |
| "epoch": 2.0930232558139537, | |
| "grad_norm": 16.763669967651367, | |
| "learning_rate": 1.6810344827586207e-05, | |
| "loss": 0.9922, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 2.13953488372093, | |
| "grad_norm": 26.640941619873047, | |
| "learning_rate": 1.5948275862068967e-05, | |
| "loss": 0.9947, | |
| "step": 552 | |
| }, | |
| { | |
| "epoch": 2.186046511627907, | |
| "grad_norm": 34.96234893798828, | |
| "learning_rate": 1.5086206896551724e-05, | |
| "loss": 0.8946, | |
| "step": 564 | |
| }, | |
| { | |
| "epoch": 2.2325581395348837, | |
| "grad_norm": 24.793346405029297, | |
| "learning_rate": 1.4224137931034485e-05, | |
| "loss": 0.9729, | |
| "step": 576 | |
| }, | |
| { | |
| "epoch": 2.2790697674418605, | |
| "grad_norm": 8.405088424682617, | |
| "learning_rate": 1.336206896551724e-05, | |
| "loss": 0.8749, | |
| "step": 588 | |
| }, | |
| { | |
| "epoch": 2.3255813953488373, | |
| "grad_norm": 25.962703704833984, | |
| "learning_rate": 1.25e-05, | |
| "loss": 0.9156, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 2.3720930232558137, | |
| "grad_norm": 5.019805431365967, | |
| "learning_rate": 1.163793103448276e-05, | |
| "loss": 0.7937, | |
| "step": 612 | |
| }, | |
| { | |
| "epoch": 2.4186046511627906, | |
| "grad_norm": 28.168231964111328, | |
| "learning_rate": 1.0775862068965516e-05, | |
| "loss": 0.9856, | |
| "step": 624 | |
| }, | |
| { | |
| "epoch": 2.4651162790697674, | |
| "grad_norm": 21.698854446411133, | |
| "learning_rate": 9.913793103448277e-06, | |
| "loss": 0.7219, | |
| "step": 636 | |
| }, | |
| { | |
| "epoch": 2.511627906976744, | |
| "grad_norm": 18.83635139465332, | |
| "learning_rate": 9.051724137931036e-06, | |
| "loss": 0.9052, | |
| "step": 648 | |
| }, | |
| { | |
| "epoch": 2.558139534883721, | |
| "grad_norm": 14.555943489074707, | |
| "learning_rate": 8.189655172413793e-06, | |
| "loss": 0.8442, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 2.604651162790698, | |
| "grad_norm": 28.036245346069336, | |
| "learning_rate": 7.3275862068965514e-06, | |
| "loss": 0.8283, | |
| "step": 672 | |
| }, | |
| { | |
| "epoch": 2.6511627906976747, | |
| "grad_norm": 17.01114845275879, | |
| "learning_rate": 6.465517241379311e-06, | |
| "loss": 0.8232, | |
| "step": 684 | |
| }, | |
| { | |
| "epoch": 2.697674418604651, | |
| "grad_norm": 19.991779327392578, | |
| "learning_rate": 5.603448275862069e-06, | |
| "loss": 0.7628, | |
| "step": 696 | |
| }, | |
| { | |
| "epoch": 2.744186046511628, | |
| "grad_norm": 26.620668411254883, | |
| "learning_rate": 4.741379310344828e-06, | |
| "loss": 0.7945, | |
| "step": 708 | |
| }, | |
| { | |
| "epoch": 2.7906976744186047, | |
| "grad_norm": 16.948158264160156, | |
| "learning_rate": 3.8793103448275865e-06, | |
| "loss": 0.7642, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 2.8372093023255816, | |
| "grad_norm": 5.877094745635986, | |
| "learning_rate": 3.017241379310345e-06, | |
| "loss": 0.6525, | |
| "step": 732 | |
| }, | |
| { | |
| "epoch": 2.883720930232558, | |
| "grad_norm": 9.920417785644531, | |
| "learning_rate": 2.1551724137931035e-06, | |
| "loss": 0.7873, | |
| "step": 744 | |
| }, | |
| { | |
| "epoch": 2.9302325581395348, | |
| "grad_norm": 25.48910140991211, | |
| "learning_rate": 1.293103448275862e-06, | |
| "loss": 0.689, | |
| "step": 756 | |
| }, | |
| { | |
| "epoch": 2.9767441860465116, | |
| "grad_norm": 11.988759994506836, | |
| "learning_rate": 4.3103448275862073e-07, | |
| "loss": 0.7837, | |
| "step": 768 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.6531007751937985, | |
| "eval_f1_macro": 0.41533378210893174, | |
| "eval_f1_micro": 0.6531007751937985, | |
| "eval_f1_weighted": 0.6340857128792562, | |
| "eval_loss": 1.0832968950271606, | |
| "eval_precision_macro": 0.43774810378738616, | |
| "eval_precision_micro": 0.6531007751937985, | |
| "eval_precision_weighted": 0.6386639752005375, | |
| "eval_recall_macro": 0.4299005706500963, | |
| "eval_recall_micro": 0.6531007751937985, | |
| "eval_recall_weighted": 0.6531007751937985, | |
| "eval_runtime": 74.0841, | |
| "eval_samples_per_second": 6.965, | |
| "eval_steps_per_second": 0.445, | |
| "step": 774 | |
| } | |
| ], | |
| "logging_steps": 12, | |
| "max_steps": 774, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 5, | |
| "early_stopping_threshold": 0.01 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 0 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 406791552950784.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |