| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 285, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.017543859649122806, | |
| "grad_norm": 1.052474021911621, | |
| "learning_rate": 1.6666666666666667e-06, | |
| "loss": 1.3678, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.03508771929824561, | |
| "grad_norm": 0.8925581574440002, | |
| "learning_rate": 3.75e-06, | |
| "loss": 1.341, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.05263157894736842, | |
| "grad_norm": 0.5862356424331665, | |
| "learning_rate": 5.833333333333334e-06, | |
| "loss": 1.3559, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.07017543859649122, | |
| "grad_norm": 0.605962872505188, | |
| "learning_rate": 7.916666666666667e-06, | |
| "loss": 1.3164, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.08771929824561403, | |
| "grad_norm": 0.719713568687439, | |
| "learning_rate": 9.999999999999999e-06, | |
| "loss": 1.2669, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.10526315789473684, | |
| "grad_norm": 0.7025077939033508, | |
| "learning_rate": 1.2083333333333333e-05, | |
| "loss": 1.2975, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.12280701754385964, | |
| "grad_norm": 0.6195961833000183, | |
| "learning_rate": 1.4166666666666666e-05, | |
| "loss": 1.2555, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.14035087719298245, | |
| "grad_norm": 0.5059925317764282, | |
| "learning_rate": 1.625e-05, | |
| "loss": 1.2483, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.15789473684210525, | |
| "grad_norm": 0.44754377007484436, | |
| "learning_rate": 1.8333333333333336e-05, | |
| "loss": 1.1871, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.17543859649122806, | |
| "grad_norm": 0.4830501675605774, | |
| "learning_rate": 2.0416666666666667e-05, | |
| "loss": 1.2557, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.19298245614035087, | |
| "grad_norm": 0.5137682557106018, | |
| "learning_rate": 2.25e-05, | |
| "loss": 1.2327, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.21052631578947367, | |
| "grad_norm": 0.6771454811096191, | |
| "learning_rate": 2.4583333333333332e-05, | |
| "loss": 1.163, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.22807017543859648, | |
| "grad_norm": 0.47523486614227295, | |
| "learning_rate": 2.6666666666666667e-05, | |
| "loss": 1.2184, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.24561403508771928, | |
| "grad_norm": 0.5108222365379333, | |
| "learning_rate": 2.875e-05, | |
| "loss": 1.1653, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.2631578947368421, | |
| "grad_norm": 0.5787661075592041, | |
| "learning_rate": 2.9999838257323155e-05, | |
| "loss": 1.1365, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.2807017543859649, | |
| "grad_norm": 0.5562297701835632, | |
| "learning_rate": 2.999801869226695e-05, | |
| "loss": 1.1322, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.2982456140350877, | |
| "grad_norm": 0.49115923047065735, | |
| "learning_rate": 2.9994177629874796e-05, | |
| "loss": 1.1604, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.3157894736842105, | |
| "grad_norm": 0.5512995719909668, | |
| "learning_rate": 2.998831558786157e-05, | |
| "loss": 1.1242, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.3333333333333333, | |
| "grad_norm": 0.5950692892074585, | |
| "learning_rate": 2.9980433356338452e-05, | |
| "loss": 1.1483, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.3508771929824561, | |
| "grad_norm": 0.5971149802207947, | |
| "learning_rate": 2.997053199770644e-05, | |
| "loss": 1.0541, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.3684210526315789, | |
| "grad_norm": 0.6665179133415222, | |
| "learning_rate": 2.9958612846513155e-05, | |
| "loss": 1.0925, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.38596491228070173, | |
| "grad_norm": 0.6457589864730835, | |
| "learning_rate": 2.994467750927295e-05, | |
| "loss": 1.0314, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.40350877192982454, | |
| "grad_norm": 0.6131024956703186, | |
| "learning_rate": 2.99287278642504e-05, | |
| "loss": 1.0386, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.42105263157894735, | |
| "grad_norm": 0.7078121900558472, | |
| "learning_rate": 2.991076606120712e-05, | |
| "loss": 1.0386, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.43859649122807015, | |
| "grad_norm": 0.6109601259231567, | |
| "learning_rate": 2.989079452111202e-05, | |
| "loss": 1.0023, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.45614035087719296, | |
| "grad_norm": 0.6941441297531128, | |
| "learning_rate": 2.9868815935815e-05, | |
| "loss": 0.9594, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.47368421052631576, | |
| "grad_norm": 0.7532424330711365, | |
| "learning_rate": 2.9844833267684125e-05, | |
| "loss": 0.9634, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.49122807017543857, | |
| "grad_norm": 0.6735473275184631, | |
| "learning_rate": 2.981884974920635e-05, | |
| "loss": 0.9899, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.5087719298245614, | |
| "grad_norm": 0.6795721054077148, | |
| "learning_rate": 2.9790868882551826e-05, | |
| "loss": 0.9987, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.5263157894736842, | |
| "grad_norm": 0.8002466559410095, | |
| "learning_rate": 2.9760894439101858e-05, | |
| "loss": 0.9458, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.543859649122807, | |
| "grad_norm": 0.7615883946418762, | |
| "learning_rate": 2.97289304589406e-05, | |
| "loss": 0.9223, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.5614035087719298, | |
| "grad_norm": 0.7222945094108582, | |
| "learning_rate": 2.96949812503105e-05, | |
| "loss": 0.9075, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.5789473684210527, | |
| "grad_norm": 0.7469078302383423, | |
| "learning_rate": 2.9659051389031616e-05, | |
| "loss": 0.9037, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.5964912280701754, | |
| "grad_norm": 0.7908621430397034, | |
| "learning_rate": 2.9621145717884875e-05, | |
| "loss": 0.8794, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.6140350877192983, | |
| "grad_norm": 0.8037143349647522, | |
| "learning_rate": 2.9581269345959334e-05, | |
| "loss": 0.8801, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.631578947368421, | |
| "grad_norm": 0.8847508430480957, | |
| "learning_rate": 2.953942764796357e-05, | |
| "loss": 0.9097, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.6491228070175439, | |
| "grad_norm": 0.8524879217147827, | |
| "learning_rate": 2.9495626263501234e-05, | |
| "loss": 0.858, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 0.7811633944511414, | |
| "learning_rate": 2.9449871096310937e-05, | |
| "loss": 0.8413, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.6842105263157895, | |
| "grad_norm": 0.797070324420929, | |
| "learning_rate": 2.940216831347051e-05, | |
| "loss": 0.8822, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.7017543859649122, | |
| "grad_norm": 0.8127879500389099, | |
| "learning_rate": 2.935252434456578e-05, | |
| "loss": 0.8048, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.7192982456140351, | |
| "grad_norm": 0.8690300583839417, | |
| "learning_rate": 2.9300945880823957e-05, | |
| "loss": 0.8857, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.7368421052631579, | |
| "grad_norm": 0.9427959322929382, | |
| "learning_rate": 2.9247439874211792e-05, | |
| "loss": 0.8438, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.7543859649122807, | |
| "grad_norm": 0.9349603652954102, | |
| "learning_rate": 2.919201353649851e-05, | |
| "loss": 0.8569, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.7719298245614035, | |
| "grad_norm": 1.0479152202606201, | |
| "learning_rate": 2.9134674338283824e-05, | |
| "loss": 0.8157, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.7894736842105263, | |
| "grad_norm": 0.8828158974647522, | |
| "learning_rate": 2.9075430007990978e-05, | |
| "loss": 0.7808, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.8070175438596491, | |
| "grad_norm": 1.0486538410186768, | |
| "learning_rate": 2.9014288530825118e-05, | |
| "loss": 0.7673, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.8245614035087719, | |
| "grad_norm": 0.9675076603889465, | |
| "learning_rate": 2.8951258147696973e-05, | |
| "loss": 0.7512, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.8421052631578947, | |
| "grad_norm": 0.9661276936531067, | |
| "learning_rate": 2.8886347354112122e-05, | |
| "loss": 0.7903, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.8596491228070176, | |
| "grad_norm": 1.0554648637771606, | |
| "learning_rate": 2.8819564899025953e-05, | |
| "loss": 0.7277, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.8771929824561403, | |
| "grad_norm": 0.8913574814796448, | |
| "learning_rate": 2.875091978366441e-05, | |
| "loss": 0.7391, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.8947368421052632, | |
| "grad_norm": 0.9491333961486816, | |
| "learning_rate": 2.8680421260310794e-05, | |
| "loss": 0.7534, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.9122807017543859, | |
| "grad_norm": 0.9836607575416565, | |
| "learning_rate": 2.8608078831058683e-05, | |
| "loss": 0.7263, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.9298245614035088, | |
| "grad_norm": 0.9923145771026611, | |
| "learning_rate": 2.8533902246531206e-05, | |
| "loss": 0.7526, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.9473684210526315, | |
| "grad_norm": 1.0657049417495728, | |
| "learning_rate": 2.8457901504566824e-05, | |
| "loss": 0.6987, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.9649122807017544, | |
| "grad_norm": 0.9763380289077759, | |
| "learning_rate": 2.838008684887176e-05, | |
| "loss": 0.672, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.9824561403508771, | |
| "grad_norm": 1.1240227222442627, | |
| "learning_rate": 2.8300468767639306e-05, | |
| "loss": 0.7127, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 1.302412748336792, | |
| "learning_rate": 2.8219057992136215e-05, | |
| "loss": 0.7443, | |
| "step": 285 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 1425, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 2000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.977535706711982e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |