| { |
| "best_global_step": 8271, |
| "best_metric": 0.8720680522378279, |
| "best_model_checkpoint": "outputs/hate-speech-detection/vihate-t5/checkpoint-8271", |
| "epoch": 9.0, |
| "eval_steps": 500, |
| "global_step": 8271, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.998911860718172, |
| "grad_norm": 4.305943965911865, |
| "learning_rate": 1.9996091589167148e-05, |
| "loss": 0.4014, |
| "step": 918 |
| }, |
| { |
| "epoch": 1.0, |
| "eval_accuracy": 0.8637209302325581, |
| "eval_f1": 0.8576683113718502, |
| "eval_loss": 0.3640185594558716, |
| "eval_precision": 0.8555800625765106, |
| "eval_recall": 0.8637209302325581, |
| "eval_runtime": 21.0759, |
| "eval_samples_per_second": 204.025, |
| "eval_steps_per_second": 6.405, |
| "step": 919 |
| }, |
| { |
| "epoch": 1.997823721436344, |
| "grad_norm": 5.548094272613525, |
| "learning_rate": 1.9982377977480694e-05, |
| "loss": 0.2814, |
| "step": 1836 |
| }, |
| { |
| "epoch": 2.0, |
| "eval_accuracy": 0.8716279069767442, |
| "eval_f1": 0.8649693053637155, |
| "eval_loss": 0.3619186580181122, |
| "eval_precision": 0.8605904079045565, |
| "eval_recall": 0.8716279069767442, |
| "eval_runtime": 21.0838, |
| "eval_samples_per_second": 203.948, |
| "eval_steps_per_second": 6.403, |
| "step": 1838 |
| }, |
| { |
| "epoch": 2.996735582154516, |
| "grad_norm": 8.380730628967285, |
| "learning_rate": 1.9958812963917564e-05, |
| "loss": 0.191, |
| "step": 2754 |
| }, |
| { |
| "epoch": 3.0, |
| "eval_accuracy": 0.87, |
| "eval_f1": 0.8628937123739281, |
| "eval_loss": 0.4661368429660797, |
| "eval_precision": 0.8588361297253848, |
| "eval_recall": 0.87, |
| "eval_runtime": 21.0807, |
| "eval_samples_per_second": 203.978, |
| "eval_steps_per_second": 6.404, |
| "step": 2757 |
| }, |
| { |
| "epoch": 3.995647442872688, |
| "grad_norm": 7.982128143310547, |
| "learning_rate": 1.992541980430111e-05, |
| "loss": 0.1179, |
| "step": 3672 |
| }, |
| { |
| "epoch": 4.0, |
| "eval_accuracy": 0.8588372093023255, |
| "eval_f1": 0.8598944782360041, |
| "eval_loss": 0.555857241153717, |
| "eval_precision": 0.8611954815216634, |
| "eval_recall": 0.8588372093023255, |
| "eval_runtime": 21.0689, |
| "eval_samples_per_second": 204.092, |
| "eval_steps_per_second": 6.408, |
| "step": 3676 |
| }, |
| { |
| "epoch": 4.99455930359086, |
| "grad_norm": 18.193098068237305, |
| "learning_rate": 1.9882231453648264e-05, |
| "loss": 0.0823, |
| "step": 4590 |
| }, |
| { |
| "epoch": 5.0, |
| "eval_accuracy": 0.8693023255813953, |
| "eval_f1": 0.8623605411584486, |
| "eval_loss": 0.8255857229232788, |
| "eval_precision": 0.8584079420364616, |
| "eval_recall": 0.8693023255813953, |
| "eval_runtime": 21.1139, |
| "eval_samples_per_second": 203.657, |
| "eval_steps_per_second": 6.394, |
| "step": 4595 |
| }, |
| { |
| "epoch": 5.993471164309032, |
| "grad_norm": 18.74983787536621, |
| "learning_rate": 1.982929053364693e-05, |
| "loss": 0.0574, |
| "step": 5508 |
| }, |
| { |
| "epoch": 6.0, |
| "eval_accuracy": 0.8744186046511628, |
| "eval_f1": 0.8661137464376901, |
| "eval_loss": 1.1137067079544067, |
| "eval_precision": 0.8612640241302489, |
| "eval_recall": 0.8744186046511628, |
| "eval_runtime": 21.0981, |
| "eval_samples_per_second": 203.81, |
| "eval_steps_per_second": 6.399, |
| "step": 5514 |
| }, |
| { |
| "epoch": 6.992383025027204, |
| "grad_norm": 26.94858169555664, |
| "learning_rate": 1.9766649290593513e-05, |
| "loss": 0.0508, |
| "step": 6426 |
| }, |
| { |
| "epoch": 7.0, |
| "eval_accuracy": 0.8586046511627907, |
| "eval_f1": 0.8607428207518112, |
| "eval_loss": 1.1956874132156372, |
| "eval_precision": 0.8631659432487684, |
| "eval_recall": 0.8586046511627907, |
| "eval_runtime": 21.1012, |
| "eval_samples_per_second": 203.78, |
| "eval_steps_per_second": 6.398, |
| "step": 6433 |
| }, |
| { |
| "epoch": 7.991294885745376, |
| "grad_norm": 0.0075432900339365005, |
| "learning_rate": 1.9694369543832138e-05, |
| "loss": 0.0413, |
| "step": 7344 |
| }, |
| { |
| "epoch": 8.0, |
| "eval_accuracy": 0.8758139534883721, |
| "eval_f1": 0.8665994257720897, |
| "eval_loss": 1.3685424327850342, |
| "eval_precision": 0.8617321415270166, |
| "eval_recall": 0.8758139534883721, |
| "eval_runtime": 21.0755, |
| "eval_samples_per_second": 204.029, |
| "eval_steps_per_second": 6.406, |
| "step": 7352 |
| }, |
| { |
| "epoch": 8.990206746463548, |
| "grad_norm": 0.007036261726170778, |
| "learning_rate": 1.9612522624746426e-05, |
| "loss": 0.0345, |
| "step": 8262 |
| }, |
| { |
| "epoch": 9.0, |
| "eval_accuracy": 0.881860465116279, |
| "eval_f1": 0.8720680522378279, |
| "eval_loss": 1.4441194534301758, |
| "eval_precision": 0.8677296023078702, |
| "eval_recall": 0.881860465116279, |
| "eval_runtime": 21.0711, |
| "eval_samples_per_second": 204.071, |
| "eval_steps_per_second": 6.407, |
| "step": 8271 |
| } |
| ], |
| "logging_steps": 918, |
| "max_steps": 91900, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 100, |
| "save_steps": 500, |
| "stateful_callbacks": { |
| "EarlyStoppingCallback": { |
| "args": { |
| "early_stopping_patience": 5, |
| "early_stopping_threshold": 0.0 |
| }, |
| "attributes": { |
| "early_stopping_patience_counter": 0 |
| } |
| }, |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 0.0, |
| "train_batch_size": 32, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|