| { | |
| "best_metric": 0.03730461373925209, | |
| "best_model_checkpoint": "doc-topic-model_eval-03_train-00/checkpoint-14000", | |
| "epoch": 9.368836291913215, | |
| "eval_steps": 1000, | |
| "global_step": 19000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.2465483234714004, | |
| "grad_norm": 0.32839319109916687, | |
| "learning_rate": 1.9950690335305722e-05, | |
| "loss": 0.1668, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.4930966469428008, | |
| "grad_norm": 0.4088590443134308, | |
| "learning_rate": 1.9901380670611442e-05, | |
| "loss": 0.0929, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.4930966469428008, | |
| "eval_accuracy": 0.981439120143997, | |
| "eval_f1": 0.0, | |
| "eval_loss": 0.09096558392047882, | |
| "eval_precision": 0.0, | |
| "eval_recall": 0.0, | |
| "eval_runtime": 12.2873, | |
| "eval_samples_per_second": 660.032, | |
| "eval_steps_per_second": 2.604, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.7396449704142012, | |
| "grad_norm": 0.33391374349594116, | |
| "learning_rate": 1.9852071005917162e-05, | |
| "loss": 0.0874, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.9861932938856016, | |
| "grad_norm": 0.3162483870983124, | |
| "learning_rate": 1.980276134122288e-05, | |
| "loss": 0.0785, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.9861932938856016, | |
| "eval_accuracy": 0.981439120143997, | |
| "eval_f1": 0.0, | |
| "eval_loss": 0.07048904895782471, | |
| "eval_precision": 0.0, | |
| "eval_recall": 0.0, | |
| "eval_runtime": 12.2928, | |
| "eval_samples_per_second": 659.735, | |
| "eval_steps_per_second": 2.603, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.232741617357002, | |
| "grad_norm": 0.31390729546546936, | |
| "learning_rate": 1.9753451676528602e-05, | |
| "loss": 0.0684, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.4792899408284024, | |
| "grad_norm": 0.40165144205093384, | |
| "learning_rate": 1.9704142011834322e-05, | |
| "loss": 0.0622, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.4792899408284024, | |
| "eval_accuracy": 0.9822838272602293, | |
| "eval_f1": 0.10407110091743119, | |
| "eval_loss": 0.057403650134801865, | |
| "eval_precision": 0.8481308411214953, | |
| "eval_recall": 0.05543677458766035, | |
| "eval_runtime": 14.5777, | |
| "eval_samples_per_second": 556.328, | |
| "eval_steps_per_second": 2.195, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.725838264299803, | |
| "grad_norm": 0.4135937988758087, | |
| "learning_rate": 1.965483234714004e-05, | |
| "loss": 0.0584, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 1.972386587771203, | |
| "grad_norm": 0.324859082698822, | |
| "learning_rate": 1.9605522682445763e-05, | |
| "loss": 0.0542, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.972386587771203, | |
| "eval_accuracy": 0.984075286647675, | |
| "eval_f1": 0.32589392848572113, | |
| "eval_loss": 0.050089504569768906, | |
| "eval_precision": 0.7603583426651735, | |
| "eval_recall": 0.2073915699450214, | |
| "eval_runtime": 14.5532, | |
| "eval_samples_per_second": 557.265, | |
| "eval_steps_per_second": 2.199, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 2.2189349112426036, | |
| "grad_norm": 0.3279891312122345, | |
| "learning_rate": 1.9556213017751483e-05, | |
| "loss": 0.0488, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 2.465483234714004, | |
| "grad_norm": 0.4424910843372345, | |
| "learning_rate": 1.95069033530572e-05, | |
| "loss": 0.048, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 2.465483234714004, | |
| "eval_accuracy": 0.9851410915996995, | |
| "eval_f1": 0.42064544650751545, | |
| "eval_loss": 0.04619205370545387, | |
| "eval_precision": 0.7612, | |
| "eval_recall": 0.2906230910201588, | |
| "eval_runtime": 14.7109, | |
| "eval_samples_per_second": 551.293, | |
| "eval_steps_per_second": 2.175, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 2.712031558185404, | |
| "grad_norm": 0.3125520646572113, | |
| "learning_rate": 1.9457593688362923e-05, | |
| "loss": 0.0453, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 2.9585798816568047, | |
| "grad_norm": 0.4686921536922455, | |
| "learning_rate": 1.940828402366864e-05, | |
| "loss": 0.0436, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 2.9585798816568047, | |
| "eval_accuracy": 0.985964539308644, | |
| "eval_f1": 0.5017859838003723, | |
| "eval_loss": 0.043464988470077515, | |
| "eval_precision": 0.7354372511428993, | |
| "eval_recall": 0.38080329871716556, | |
| "eval_runtime": 14.5175, | |
| "eval_samples_per_second": 558.638, | |
| "eval_steps_per_second": 2.204, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 3.2051282051282053, | |
| "grad_norm": 0.3562796711921692, | |
| "learning_rate": 1.935897435897436e-05, | |
| "loss": 0.0412, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 3.4516765285996054, | |
| "grad_norm": 0.24857360124588013, | |
| "learning_rate": 1.930966469428008e-05, | |
| "loss": 0.0384, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 3.4516765285996054, | |
| "eval_accuracy": 0.9862848477117792, | |
| "eval_f1": 0.533571118715959, | |
| "eval_loss": 0.04163265973329544, | |
| "eval_precision": 0.7234348451182852, | |
| "eval_recall": 0.42264813683567504, | |
| "eval_runtime": 14.4229, | |
| "eval_samples_per_second": 562.3, | |
| "eval_steps_per_second": 2.219, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 3.698224852071006, | |
| "grad_norm": 0.47940972447395325, | |
| "learning_rate": 1.92603550295858e-05, | |
| "loss": 0.0394, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 3.9447731755424065, | |
| "grad_norm": 0.39106041193008423, | |
| "learning_rate": 1.921104536489152e-05, | |
| "loss": 0.0385, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 3.9447731755424065, | |
| "eval_accuracy": 0.9865073628413906, | |
| "eval_f1": 0.5278714540765721, | |
| "eval_loss": 0.04007096588611603, | |
| "eval_precision": 0.7529711375212224, | |
| "eval_recall": 0.4063836285888821, | |
| "eval_runtime": 14.4262, | |
| "eval_samples_per_second": 562.172, | |
| "eval_steps_per_second": 2.218, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 4.191321499013807, | |
| "grad_norm": 0.4470931887626648, | |
| "learning_rate": 1.916173570019724e-05, | |
| "loss": 0.0342, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 4.437869822485207, | |
| "grad_norm": 0.33709824085235596, | |
| "learning_rate": 1.911242603550296e-05, | |
| "loss": 0.0343, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 4.437869822485207, | |
| "eval_accuracy": 0.9867497200844707, | |
| "eval_f1": 0.5560146269649048, | |
| "eval_loss": 0.03989700973033905, | |
| "eval_precision": 0.7353347569400829, | |
| "eval_recall": 0.4470067196090409, | |
| "eval_runtime": 14.3883, | |
| "eval_samples_per_second": 563.652, | |
| "eval_steps_per_second": 2.224, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 4.684418145956608, | |
| "grad_norm": 0.3310143053531647, | |
| "learning_rate": 1.906311637080868e-05, | |
| "loss": 0.0343, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 4.930966469428008, | |
| "grad_norm": 0.5156289935112, | |
| "learning_rate": 1.90138067061144e-05, | |
| "loss": 0.0343, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 4.930966469428008, | |
| "eval_accuracy": 0.9871649871734909, | |
| "eval_f1": 0.5751548132857947, | |
| "eval_loss": 0.03866345435380936, | |
| "eval_precision": 0.745742092457421, | |
| "eval_recall": 0.46808185705558947, | |
| "eval_runtime": 14.4627, | |
| "eval_samples_per_second": 560.752, | |
| "eval_steps_per_second": 2.213, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 5.177514792899408, | |
| "grad_norm": 0.3030126988887787, | |
| "learning_rate": 1.896459566074951e-05, | |
| "loss": 0.0314, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 5.424063116370808, | |
| "grad_norm": 0.49180835485458374, | |
| "learning_rate": 1.891528599605523e-05, | |
| "loss": 0.0304, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 5.424063116370808, | |
| "eval_accuracy": 0.9870048329719234, | |
| "eval_f1": 0.5786111494094398, | |
| "eval_loss": 0.03876357153058052, | |
| "eval_precision": 0.7266535842087036, | |
| "eval_recall": 0.4806811240073305, | |
| "eval_runtime": 14.5882, | |
| "eval_samples_per_second": 555.927, | |
| "eval_steps_per_second": 2.194, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 5.670611439842209, | |
| "grad_norm": 0.4685238003730774, | |
| "learning_rate": 1.886597633136095e-05, | |
| "loss": 0.0297, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 5.9171597633136095, | |
| "grad_norm": 0.46912193298339844, | |
| "learning_rate": 1.881666666666667e-05, | |
| "loss": 0.0299, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 5.9171597633136095, | |
| "eval_accuracy": 0.9874016752412943, | |
| "eval_f1": 0.6033290195903431, | |
| "eval_loss": 0.03738298639655113, | |
| "eval_precision": 0.7258670675399979, | |
| "eval_recall": 0.516188149053146, | |
| "eval_runtime": 14.6172, | |
| "eval_samples_per_second": 554.827, | |
| "eval_steps_per_second": 2.189, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 6.16370808678501, | |
| "grad_norm": 0.3353117108345032, | |
| "learning_rate": 1.876735700197239e-05, | |
| "loss": 0.0276, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 6.410256410256411, | |
| "grad_norm": 0.2552751898765564, | |
| "learning_rate": 1.8718145956607497e-05, | |
| "loss": 0.0265, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 6.410256410256411, | |
| "eval_accuracy": 0.9873634083081764, | |
| "eval_f1": 0.6095980383571241, | |
| "eval_loss": 0.0379130057990551, | |
| "eval_precision": 0.7145350030794498, | |
| "eval_recall": 0.5315363469761759, | |
| "eval_runtime": 14.5578, | |
| "eval_samples_per_second": 557.089, | |
| "eval_steps_per_second": 2.198, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 6.65680473372781, | |
| "grad_norm": 0.2560911774635315, | |
| "learning_rate": 1.8668836291913217e-05, | |
| "loss": 0.0273, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 6.903353057199211, | |
| "grad_norm": 0.4542127549648285, | |
| "learning_rate": 1.8619526627218937e-05, | |
| "loss": 0.0261, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 6.903353057199211, | |
| "eval_accuracy": 0.9875434046232124, | |
| "eval_f1": 0.607195530726257, | |
| "eval_loss": 0.03730461373925209, | |
| "eval_precision": 0.732083198620541, | |
| "eval_recall": 0.5187080024434942, | |
| "eval_runtime": 14.4386, | |
| "eval_samples_per_second": 561.69, | |
| "eval_steps_per_second": 2.216, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 7.149901380670611, | |
| "grad_norm": 0.2716215252876282, | |
| "learning_rate": 1.8570216962524657e-05, | |
| "loss": 0.025, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 7.396449704142012, | |
| "grad_norm": 0.2959195077419281, | |
| "learning_rate": 1.8520907297830377e-05, | |
| "loss": 0.0236, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 7.396449704142012, | |
| "eval_accuracy": 0.9876227730770866, | |
| "eval_f1": 0.6189624329159212, | |
| "eval_loss": 0.03789689019322395, | |
| "eval_precision": 0.7220808307034511, | |
| "eval_recall": 0.5416157605375688, | |
| "eval_runtime": 14.4749, | |
| "eval_samples_per_second": 560.279, | |
| "eval_steps_per_second": 2.211, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 7.642998027613412, | |
| "grad_norm": 0.44261014461517334, | |
| "learning_rate": 1.8471696252465485e-05, | |
| "loss": 0.0225, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 7.889546351084813, | |
| "grad_norm": 0.721012532711029, | |
| "learning_rate": 1.8422386587771205e-05, | |
| "loss": 0.0236, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 7.889546351084813, | |
| "eval_accuracy": 0.9877744235157391, | |
| "eval_f1": 0.620234216782601, | |
| "eval_loss": 0.03789624571800232, | |
| "eval_precision": 0.7323767935121647, | |
| "eval_recall": 0.5378741600488699, | |
| "eval_runtime": 14.3724, | |
| "eval_samples_per_second": 564.276, | |
| "eval_steps_per_second": 2.226, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 8.136094674556213, | |
| "grad_norm": 0.2590983510017395, | |
| "learning_rate": 1.8373076923076926e-05, | |
| "loss": 0.0219, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 8.382642998027613, | |
| "grad_norm": 0.6246200203895569, | |
| "learning_rate": 1.8323767258382646e-05, | |
| "loss": 0.0215, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 8.382642998027613, | |
| "eval_accuracy": 0.9877148971753335, | |
| "eval_f1": 0.6290018832391714, | |
| "eval_loss": 0.03818012773990631, | |
| "eval_precision": 0.7156213478768991, | |
| "eval_recall": 0.561087354917532, | |
| "eval_runtime": 14.4199, | |
| "eval_samples_per_second": 562.417, | |
| "eval_steps_per_second": 2.219, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 8.629191321499015, | |
| "grad_norm": 0.46473428606987, | |
| "learning_rate": 1.8274457593688362e-05, | |
| "loss": 0.0204, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 8.875739644970414, | |
| "grad_norm": 0.5553109049797058, | |
| "learning_rate": 1.822524654832347e-05, | |
| "loss": 0.0216, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 8.875739644970414, | |
| "eval_accuracy": 0.9877404084640787, | |
| "eval_f1": 0.6304682159945317, | |
| "eval_loss": 0.03833911567926407, | |
| "eval_precision": 0.7155740884406516, | |
| "eval_recall": 0.5634544899205864, | |
| "eval_runtime": 14.4815, | |
| "eval_samples_per_second": 560.025, | |
| "eval_steps_per_second": 2.21, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 9.122287968441814, | |
| "grad_norm": 0.3819935917854309, | |
| "learning_rate": 1.8175936883629194e-05, | |
| "loss": 0.0194, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 9.368836291913215, | |
| "grad_norm": 0.5073143243789673, | |
| "learning_rate": 1.8126627218934914e-05, | |
| "loss": 0.0177, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 9.368836291913215, | |
| "eval_accuracy": 0.9878481227943365, | |
| "eval_f1": 0.6344956944326029, | |
| "eval_loss": 0.038566723465919495, | |
| "eval_precision": 0.7182011194750049, | |
| "eval_recall": 0.568265119120342, | |
| "eval_runtime": 14.4157, | |
| "eval_samples_per_second": 562.582, | |
| "eval_steps_per_second": 2.22, | |
| "step": 19000 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 202800, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 100, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 5, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 0 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 430075849523328.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |