| { | |
| "best_metric": 0.037335559725761414, | |
| "best_model_checkpoint": "doc-topic-model_eval-00_train-01/checkpoint-15000", | |
| "epoch": 9.861932938856016, | |
| "eval_steps": 1000, | |
| "global_step": 20000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.2465483234714004, | |
| "grad_norm": 0.32877257466316223, | |
| "learning_rate": 1.9950690335305722e-05, | |
| "loss": 0.166, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.4930966469428008, | |
| "grad_norm": 0.3466370701789856, | |
| "learning_rate": 1.9901380670611442e-05, | |
| "loss": 0.0936, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.4930966469428008, | |
| "eval_accuracy": 0.9814660487265615, | |
| "eval_f1": 0.0, | |
| "eval_loss": 0.08824141323566437, | |
| "eval_precision": 0.0, | |
| "eval_recall": 0.0, | |
| "eval_runtime": 11.7844, | |
| "eval_samples_per_second": 688.2, | |
| "eval_steps_per_second": 2.715, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.7396449704142012, | |
| "grad_norm": 0.4002707004547119, | |
| "learning_rate": 1.9852071005917162e-05, | |
| "loss": 0.0851, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.9861932938856016, | |
| "grad_norm": 0.3627968430519104, | |
| "learning_rate": 1.980276134122288e-05, | |
| "loss": 0.0754, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.9861932938856016, | |
| "eval_accuracy": 0.9814674660203807, | |
| "eval_f1": 0.0006114338122898196, | |
| "eval_loss": 0.06823527067899704, | |
| "eval_precision": 0.5714285714285714, | |
| "eval_recall": 0.0003058805536438021, | |
| "eval_runtime": 14.2712, | |
| "eval_samples_per_second": 568.277, | |
| "eval_steps_per_second": 2.242, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.232741617357002, | |
| "grad_norm": 0.44398266077041626, | |
| "learning_rate": 1.9753451676528602e-05, | |
| "loss": 0.0668, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.4792899408284024, | |
| "grad_norm": 0.4649547040462494, | |
| "learning_rate": 1.9704142011834322e-05, | |
| "loss": 0.0614, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.4792899408284024, | |
| "eval_accuracy": 0.9824312258174243, | |
| "eval_f1": 0.14628099173553719, | |
| "eval_loss": 0.05610091611742973, | |
| "eval_precision": 0.735966735966736, | |
| "eval_recall": 0.08121128699242945, | |
| "eval_runtime": 14.1573, | |
| "eval_samples_per_second": 572.849, | |
| "eval_steps_per_second": 2.26, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.725838264299803, | |
| "grad_norm": 0.3433184325695038, | |
| "learning_rate": 1.965483234714004e-05, | |
| "loss": 0.057, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 1.972386587771203, | |
| "grad_norm": 0.38863804936408997, | |
| "learning_rate": 1.9605522682445763e-05, | |
| "loss": 0.053, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.972386587771203, | |
| "eval_accuracy": 0.9842269370863274, | |
| "eval_f1": 0.32069828480742235, | |
| "eval_loss": 0.049950942397117615, | |
| "eval_precision": 0.794615849969752, | |
| "eval_recall": 0.20088705360556702, | |
| "eval_runtime": 14.2839, | |
| "eval_samples_per_second": 567.773, | |
| "eval_steps_per_second": 2.24, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 2.2189349112426036, | |
| "grad_norm": 0.2720654606819153, | |
| "learning_rate": 1.9556213017751483e-05, | |
| "loss": 0.0492, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 2.465483234714004, | |
| "grad_norm": 0.4366288483142853, | |
| "learning_rate": 1.95069033530572e-05, | |
| "loss": 0.0477, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 2.465483234714004, | |
| "eval_accuracy": 0.9852785691001602, | |
| "eval_f1": 0.44534629145084637, | |
| "eval_loss": 0.04629644379019737, | |
| "eval_precision": 0.7380530973451327, | |
| "eval_recall": 0.3188804771736637, | |
| "eval_runtime": 14.1895, | |
| "eval_samples_per_second": 571.549, | |
| "eval_steps_per_second": 2.255, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 2.712031558185404, | |
| "grad_norm": 0.48470863699913025, | |
| "learning_rate": 1.9457593688362923e-05, | |
| "loss": 0.0456, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 2.9585798816568047, | |
| "grad_norm": 0.4175472855567932, | |
| "learning_rate": 1.940828402366864e-05, | |
| "loss": 0.0445, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 2.9585798816568047, | |
| "eval_accuracy": 0.9859120994373344, | |
| "eval_f1": 0.483153078202995, | |
| "eval_loss": 0.04348384588956833, | |
| "eval_precision": 0.7548334687246141, | |
| "eval_recall": 0.3552802630572761, | |
| "eval_runtime": 14.1524, | |
| "eval_samples_per_second": 573.047, | |
| "eval_steps_per_second": 2.261, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 3.2051282051282053, | |
| "grad_norm": 0.31389564275741577, | |
| "learning_rate": 1.935897435897436e-05, | |
| "loss": 0.0417, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 3.4516765285996054, | |
| "grad_norm": 0.3301604390144348, | |
| "learning_rate": 1.930966469428008e-05, | |
| "loss": 0.0385, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 3.4516765285996054, | |
| "eval_accuracy": 0.9865385433054127, | |
| "eval_f1": 0.5405823739963239, | |
| "eval_loss": 0.040967244654893875, | |
| "eval_precision": 0.7355535079636699, | |
| "eval_recall": 0.4273151334403915, | |
| "eval_runtime": 14.195, | |
| "eval_samples_per_second": 571.327, | |
| "eval_steps_per_second": 2.254, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 3.698224852071006, | |
| "grad_norm": 0.4754754900932312, | |
| "learning_rate": 1.92603550295858e-05, | |
| "loss": 0.0395, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 3.9447731755424065, | |
| "grad_norm": 0.5182355046272278, | |
| "learning_rate": 1.921104536489152e-05, | |
| "loss": 0.0384, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 3.9447731755424065, | |
| "eval_accuracy": 0.9867227915019062, | |
| "eval_f1": 0.5643195981769138, | |
| "eval_loss": 0.03995988145470619, | |
| "eval_precision": 0.7201186943620178, | |
| "eval_recall": 0.4639443297392368, | |
| "eval_runtime": 14.2921, | |
| "eval_samples_per_second": 567.448, | |
| "eval_steps_per_second": 2.239, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 4.191321499013807, | |
| "grad_norm": 0.3854539394378662, | |
| "learning_rate": 1.916173570019724e-05, | |
| "loss": 0.0353, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 4.437869822485207, | |
| "grad_norm": 0.36082467436790466, | |
| "learning_rate": 1.911242603550296e-05, | |
| "loss": 0.0347, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 4.437869822485207, | |
| "eval_accuracy": 0.987001998384285, | |
| "eval_f1": 0.5796011918404768, | |
| "eval_loss": 0.0386175811290741, | |
| "eval_precision": 0.7235065232318608, | |
| "eval_recall": 0.48344421503402923, | |
| "eval_runtime": 14.1561, | |
| "eval_samples_per_second": 572.899, | |
| "eval_steps_per_second": 2.261, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 4.684418145956608, | |
| "grad_norm": 0.5347279906272888, | |
| "learning_rate": 1.906311637080868e-05, | |
| "loss": 0.035, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 4.930966469428008, | |
| "grad_norm": 0.3976365029811859, | |
| "learning_rate": 1.90138067061144e-05, | |
| "loss": 0.0336, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 4.930966469428008, | |
| "eval_accuracy": 0.9872712842099296, | |
| "eval_f1": 0.5971019694046925, | |
| "eval_loss": 0.038116298615932465, | |
| "eval_precision": 0.7222704579986976, | |
| "eval_recall": 0.5089087711248758, | |
| "eval_runtime": 14.154, | |
| "eval_samples_per_second": 572.984, | |
| "eval_steps_per_second": 2.261, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 5.177514792899408, | |
| "grad_norm": 0.2203921228647232, | |
| "learning_rate": 1.896459566074951e-05, | |
| "loss": 0.0309, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 5.424063116370808, | |
| "grad_norm": 0.42421755194664, | |
| "learning_rate": 1.891528599605523e-05, | |
| "loss": 0.0299, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 5.424063116370808, | |
| "eval_accuracy": 0.9875249798035631, | |
| "eval_f1": 0.5941160195517846, | |
| "eval_loss": 0.037353336811065674, | |
| "eval_precision": 0.7482866767336508, | |
| "eval_recall": 0.4926206316433433, | |
| "eval_runtime": 14.1941, | |
| "eval_samples_per_second": 571.362, | |
| "eval_steps_per_second": 2.254, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 5.670611439842209, | |
| "grad_norm": 0.321264386177063, | |
| "learning_rate": 1.886597633136095e-05, | |
| "loss": 0.0314, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 5.9171597633136095, | |
| "grad_norm": 0.4067309498786926, | |
| "learning_rate": 1.881666666666667e-05, | |
| "loss": 0.0299, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 5.9171597633136095, | |
| "eval_accuracy": 0.9873520699576229, | |
| "eval_f1": 0.5978006129439336, | |
| "eval_loss": 0.03748217597603798, | |
| "eval_precision": 0.7279113159916585, | |
| "eval_recall": 0.5071499579414239, | |
| "eval_runtime": 14.2272, | |
| "eval_samples_per_second": 570.035, | |
| "eval_steps_per_second": 2.249, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 6.16370808678501, | |
| "grad_norm": 0.24124543368816376, | |
| "learning_rate": 1.8767455621301777e-05, | |
| "loss": 0.0272, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 6.410256410256411, | |
| "grad_norm": 0.35210439562797546, | |
| "learning_rate": 1.8718145956607497e-05, | |
| "loss": 0.0265, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 6.410256410256411, | |
| "eval_accuracy": 0.9873733293649106, | |
| "eval_f1": 0.6035334431044457, | |
| "eval_loss": 0.03765496611595154, | |
| "eval_precision": 0.7218437300404513, | |
| "eval_recall": 0.5185440085646555, | |
| "eval_runtime": 14.1601, | |
| "eval_samples_per_second": 572.737, | |
| "eval_steps_per_second": 2.26, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 6.65680473372781, | |
| "grad_norm": 0.24084919691085815, | |
| "learning_rate": 1.8668836291913217e-05, | |
| "loss": 0.026, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 6.903353057199211, | |
| "grad_norm": 0.2715472877025604, | |
| "learning_rate": 1.8619526627218937e-05, | |
| "loss": 0.0271, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 6.903353057199211, | |
| "eval_accuracy": 0.9872089232818856, | |
| "eval_f1": 0.6060500240080318, | |
| "eval_loss": 0.037930626422166824, | |
| "eval_precision": 0.7060618388934092, | |
| "eval_recall": 0.5308557008488185, | |
| "eval_runtime": 14.2508, | |
| "eval_samples_per_second": 569.091, | |
| "eval_steps_per_second": 2.245, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 7.149901380670611, | |
| "grad_norm": 0.4131653904914856, | |
| "learning_rate": 1.8570315581854045e-05, | |
| "loss": 0.0245, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 7.396449704142012, | |
| "grad_norm": 0.4167553186416626, | |
| "learning_rate": 1.8521005917159765e-05, | |
| "loss": 0.0229, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 7.396449704142012, | |
| "eval_accuracy": 0.9876766302422155, | |
| "eval_f1": 0.6253931325664556, | |
| "eval_loss": 0.037335559725761414, | |
| "eval_precision": 0.7162028813893823, | |
| "eval_recall": 0.5550202645866789, | |
| "eval_runtime": 14.1623, | |
| "eval_samples_per_second": 572.647, | |
| "eval_steps_per_second": 2.26, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 7.642998027613412, | |
| "grad_norm": 0.2288493514060974, | |
| "learning_rate": 1.8471696252465485e-05, | |
| "loss": 0.0236, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 7.889546351084813, | |
| "grad_norm": 0.43076005578041077, | |
| "learning_rate": 1.8422386587771205e-05, | |
| "loss": 0.0245, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 7.889546351084813, | |
| "eval_accuracy": 0.9878849724336353, | |
| "eval_f1": 0.6295076282940361, | |
| "eval_loss": 0.037781719118356705, | |
| "eval_precision": 0.7265632816408204, | |
| "eval_recall": 0.5553261451403227, | |
| "eval_runtime": 14.1262, | |
| "eval_samples_per_second": 574.112, | |
| "eval_steps_per_second": 2.265, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 8.136094674556213, | |
| "grad_norm": 0.20288299024105072, | |
| "learning_rate": 1.8373076923076926e-05, | |
| "loss": 0.0214, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 8.382642998027613, | |
| "grad_norm": 0.5671559572219849, | |
| "learning_rate": 1.8323865877712033e-05, | |
| "loss": 0.0205, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 8.382642998027613, | |
| "eval_accuracy": 0.9875915926130646, | |
| "eval_f1": 0.6300443693217832, | |
| "eval_loss": 0.03755596652626991, | |
| "eval_precision": 0.7040989799773328, | |
| "eval_recall": 0.5700848818536361, | |
| "eval_runtime": 14.1084, | |
| "eval_samples_per_second": 574.834, | |
| "eval_steps_per_second": 2.268, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 8.629191321499015, | |
| "grad_norm": 0.2839369475841522, | |
| "learning_rate": 1.8274556213017754e-05, | |
| "loss": 0.0208, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 8.875739644970414, | |
| "grad_norm": 0.5970295667648315, | |
| "learning_rate": 1.822524654832347e-05, | |
| "loss": 0.0213, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 8.875739644970414, | |
| "eval_accuracy": 0.9877545814022705, | |
| "eval_f1": 0.6302636083533036, | |
| "eval_loss": 0.038452088832855225, | |
| "eval_precision": 0.7155767175201633, | |
| "eval_recall": 0.5631260992582396, | |
| "eval_runtime": 14.1115, | |
| "eval_samples_per_second": 574.709, | |
| "eval_steps_per_second": 2.268, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 9.122287968441814, | |
| "grad_norm": 0.6406568288803101, | |
| "learning_rate": 1.8175936883629194e-05, | |
| "loss": 0.0198, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 9.368836291913215, | |
| "grad_norm": 0.6294471621513367, | |
| "learning_rate": 1.8126725838264302e-05, | |
| "loss": 0.0183, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 9.368836291913215, | |
| "eval_accuracy": 0.9877602505775472, | |
| "eval_f1": 0.6299597223412461, | |
| "eval_loss": 0.0388527438044548, | |
| "eval_precision": 0.71640191014521, | |
| "eval_recall": 0.5621319874588973, | |
| "eval_runtime": 14.3395, | |
| "eval_samples_per_second": 565.571, | |
| "eval_steps_per_second": 2.232, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 9.615384615384615, | |
| "grad_norm": 0.3235713541507721, | |
| "learning_rate": 1.8077416173570022e-05, | |
| "loss": 0.0183, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 9.861932938856016, | |
| "grad_norm": 0.3706725239753723, | |
| "learning_rate": 1.802810650887574e-05, | |
| "loss": 0.0182, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 9.861932938856016, | |
| "eval_accuracy": 0.9877574159899088, | |
| "eval_f1": 0.6321124361158432, | |
| "eval_loss": 0.03977630287408829, | |
| "eval_precision": 0.7133519177160434, | |
| "eval_recall": 0.5674848971476638, | |
| "eval_runtime": 14.1791, | |
| "eval_samples_per_second": 571.968, | |
| "eval_steps_per_second": 2.257, | |
| "step": 20000 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 202800, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 100, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 5, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 0 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 452312037970056.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |