| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 37500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 7.915888786315918, | |
| "learning_rate": 4.947192513368984e-05, | |
| "loss": 4.0126, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 3.184572219848633, | |
| "learning_rate": 4.880347593582888e-05, | |
| "loss": 0.6684, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 5.76563835144043, | |
| "learning_rate": 4.813502673796791e-05, | |
| "loss": 0.5942, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 1.7340965270996094, | |
| "learning_rate": 4.7466577540106955e-05, | |
| "loss": 0.5723, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 1.790732502937317, | |
| "learning_rate": 4.679812834224599e-05, | |
| "loss": 0.5391, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 2.157850742340088, | |
| "learning_rate": 4.612967914438503e-05, | |
| "loss": 0.5188, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "grad_norm": 1.461838960647583, | |
| "learning_rate": 4.5461229946524065e-05, | |
| "loss": 0.5113, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 1.106745719909668, | |
| "learning_rate": 4.47927807486631e-05, | |
| "loss": 0.4956, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "grad_norm": 1.2794815301895142, | |
| "learning_rate": 4.412433155080214e-05, | |
| "loss": 0.4906, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 1.2381389141082764, | |
| "learning_rate": 4.345588235294118e-05, | |
| "loss": 0.4717, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "grad_norm": 2.9455885887145996, | |
| "learning_rate": 4.278743315508021e-05, | |
| "loss": 0.4721, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 1.1448231935501099, | |
| "learning_rate": 4.2118983957219257e-05, | |
| "loss": 0.4745, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "grad_norm": 0.9075339436531067, | |
| "learning_rate": 4.145053475935829e-05, | |
| "loss": 0.4597, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "grad_norm": 0.962452232837677, | |
| "learning_rate": 4.078208556149733e-05, | |
| "loss": 0.4583, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 0.7801158428192139, | |
| "learning_rate": 4.011363636363637e-05, | |
| "loss": 0.4594, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 1.1708343029022217, | |
| "learning_rate": 3.9445187165775404e-05, | |
| "loss": 0.45, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "grad_norm": 1.1591224670410156, | |
| "learning_rate": 3.877807486631016e-05, | |
| "loss": 0.45, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "grad_norm": 0.8542613387107849, | |
| "learning_rate": 3.81096256684492e-05, | |
| "loss": 0.4433, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "grad_norm": 0.8478009700775146, | |
| "learning_rate": 3.744117647058823e-05, | |
| "loss": 0.4437, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.9733747839927673, | |
| "learning_rate": 3.677272727272728e-05, | |
| "loss": 0.4362, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "grad_norm": 0.7879180312156677, | |
| "learning_rate": 3.610427807486631e-05, | |
| "loss": 0.4324, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "grad_norm": 1.3161998987197876, | |
| "learning_rate": 3.543582887700535e-05, | |
| "loss": 0.4354, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "grad_norm": 1.153984785079956, | |
| "learning_rate": 3.476737967914439e-05, | |
| "loss": 0.4215, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 0.7850612998008728, | |
| "learning_rate": 3.4098930481283425e-05, | |
| "loss": 0.4298, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 1.3549928665161133, | |
| "learning_rate": 3.343181818181818e-05, | |
| "loss": 0.4245, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "grad_norm": 0.8405876755714417, | |
| "learning_rate": 3.2763368983957224e-05, | |
| "loss": 0.4204, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "grad_norm": 1.5088772773742676, | |
| "learning_rate": 3.2094919786096254e-05, | |
| "loss": 0.4218, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "grad_norm": 1.469650149345398, | |
| "learning_rate": 3.14264705882353e-05, | |
| "loss": 0.4074, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "grad_norm": 0.8406803607940674, | |
| "learning_rate": 3.075802139037433e-05, | |
| "loss": 0.4173, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "grad_norm": 0.754941999912262, | |
| "learning_rate": 3.0090909090909093e-05, | |
| "loss": 0.4095, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "grad_norm": 0.7950364947319031, | |
| "learning_rate": 2.9422459893048134e-05, | |
| "loss": 0.4053, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "grad_norm": 0.8804301023483276, | |
| "learning_rate": 2.8754010695187167e-05, | |
| "loss": 0.4117, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "grad_norm": 1.4785621166229248, | |
| "learning_rate": 2.8085561497326207e-05, | |
| "loss": 0.4068, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 1.3599999999999999, | |
| "grad_norm": 1.0254385471343994, | |
| "learning_rate": 2.7419786096256685e-05, | |
| "loss": 0.4032, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "grad_norm": 0.9251853823661804, | |
| "learning_rate": 2.6751336898395725e-05, | |
| "loss": 0.4023, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "grad_norm": 2.7361536026000977, | |
| "learning_rate": 2.608288770053476e-05, | |
| "loss": 0.3997, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "grad_norm": 0.9054092764854431, | |
| "learning_rate": 2.54144385026738e-05, | |
| "loss": 0.3981, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "grad_norm": 0.8799841403961182, | |
| "learning_rate": 2.4745989304812836e-05, | |
| "loss": 0.3943, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "grad_norm": 0.7638904452323914, | |
| "learning_rate": 2.4077540106951873e-05, | |
| "loss": 0.3962, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "grad_norm": 1.0725598335266113, | |
| "learning_rate": 2.341042780748663e-05, | |
| "loss": 0.3918, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 1.6400000000000001, | |
| "grad_norm": 0.7163866758346558, | |
| "learning_rate": 2.274197860962567e-05, | |
| "loss": 0.3987, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 1.6800000000000002, | |
| "grad_norm": 1.1992738246917725, | |
| "learning_rate": 2.2073529411764705e-05, | |
| "loss": 0.3983, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "grad_norm": 0.9536983966827393, | |
| "learning_rate": 2.1405080213903746e-05, | |
| "loss": 0.3961, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "grad_norm": 0.8780732154846191, | |
| "learning_rate": 2.0736631016042782e-05, | |
| "loss": 0.3926, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "grad_norm": 1.201428771018982, | |
| "learning_rate": 2.006818181818182e-05, | |
| "loss": 0.3903, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 1.8399999999999999, | |
| "grad_norm": 0.9713455438613892, | |
| "learning_rate": 1.9399732620320856e-05, | |
| "loss": 0.3977, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "grad_norm": 1.4893653392791748, | |
| "learning_rate": 1.8731283422459893e-05, | |
| "loss": 0.3923, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "grad_norm": 0.8749503493309021, | |
| "learning_rate": 1.8062834224598933e-05, | |
| "loss": 0.3868, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "grad_norm": 1.0122476816177368, | |
| "learning_rate": 1.739438502673797e-05, | |
| "loss": 0.3879, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.9151819944381714, | |
| "learning_rate": 1.6725935828877007e-05, | |
| "loss": 0.3878, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 2.04, | |
| "grad_norm": 0.9272295832633972, | |
| "learning_rate": 1.6057486631016044e-05, | |
| "loss": 0.3826, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 2.08, | |
| "grad_norm": 0.9182801842689514, | |
| "learning_rate": 1.5391711229946525e-05, | |
| "loss": 0.382, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 2.12, | |
| "grad_norm": 1.7542533874511719, | |
| "learning_rate": 1.4723262032085564e-05, | |
| "loss": 0.3786, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 2.16, | |
| "grad_norm": 1.5916212797164917, | |
| "learning_rate": 1.4054812834224599e-05, | |
| "loss": 0.3812, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "grad_norm": 0.9425985813140869, | |
| "learning_rate": 1.3386363636363636e-05, | |
| "loss": 0.3828, | |
| "step": 27500 | |
| }, | |
| { | |
| "epoch": 2.24, | |
| "grad_norm": 0.9243144989013672, | |
| "learning_rate": 1.2717914438502672e-05, | |
| "loss": 0.3807, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 2.2800000000000002, | |
| "grad_norm": 1.0092005729675293, | |
| "learning_rate": 1.2049465240641713e-05, | |
| "loss": 0.3714, | |
| "step": 28500 | |
| }, | |
| { | |
| "epoch": 2.32, | |
| "grad_norm": 0.8653339743614197, | |
| "learning_rate": 1.138101604278075e-05, | |
| "loss": 0.3792, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 2.36, | |
| "grad_norm": 0.9960777163505554, | |
| "learning_rate": 1.0712566844919786e-05, | |
| "loss": 0.369, | |
| "step": 29500 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "grad_norm": 1.3958770036697388, | |
| "learning_rate": 1.0045454545454545e-05, | |
| "loss": 0.3742, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 2.44, | |
| "grad_norm": 0.6805775761604309, | |
| "learning_rate": 9.377005347593582e-06, | |
| "loss": 0.3766, | |
| "step": 30500 | |
| }, | |
| { | |
| "epoch": 2.48, | |
| "grad_norm": 2.525830030441284, | |
| "learning_rate": 8.70855614973262e-06, | |
| "loss": 0.3781, | |
| "step": 31000 | |
| }, | |
| { | |
| "epoch": 2.52, | |
| "grad_norm": 1.4676679372787476, | |
| "learning_rate": 8.040106951871658e-06, | |
| "loss": 0.3758, | |
| "step": 31500 | |
| }, | |
| { | |
| "epoch": 2.56, | |
| "grad_norm": 1.1790502071380615, | |
| "learning_rate": 7.371657754010695e-06, | |
| "loss": 0.3756, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "grad_norm": 0.7830659747123718, | |
| "learning_rate": 6.704545454545455e-06, | |
| "loss": 0.3786, | |
| "step": 32500 | |
| }, | |
| { | |
| "epoch": 2.64, | |
| "grad_norm": 0.960568904876709, | |
| "learning_rate": 6.038770053475936e-06, | |
| "loss": 0.3778, | |
| "step": 33000 | |
| }, | |
| { | |
| "epoch": 2.68, | |
| "grad_norm": 1.0638457536697388, | |
| "learning_rate": 5.370320855614974e-06, | |
| "loss": 0.3826, | |
| "step": 33500 | |
| }, | |
| { | |
| "epoch": 2.7199999999999998, | |
| "grad_norm": 1.3440908193588257, | |
| "learning_rate": 4.701871657754011e-06, | |
| "loss": 0.3697, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 2.76, | |
| "grad_norm": 0.6467347145080566, | |
| "learning_rate": 4.0334224598930485e-06, | |
| "loss": 0.38, | |
| "step": 34500 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "grad_norm": 0.818299412727356, | |
| "learning_rate": 3.364973262032086e-06, | |
| "loss": 0.3677, | |
| "step": 35000 | |
| }, | |
| { | |
| "epoch": 2.84, | |
| "grad_norm": 1.8121366500854492, | |
| "learning_rate": 2.696524064171123e-06, | |
| "loss": 0.3777, | |
| "step": 35500 | |
| }, | |
| { | |
| "epoch": 2.88, | |
| "grad_norm": 0.7342619895935059, | |
| "learning_rate": 2.0280748663101604e-06, | |
| "loss": 0.3709, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 2.92, | |
| "grad_norm": 0.925221860408783, | |
| "learning_rate": 1.3596256684491979e-06, | |
| "loss": 0.3709, | |
| "step": 36500 | |
| }, | |
| { | |
| "epoch": 2.96, | |
| "grad_norm": 0.8312897682189941, | |
| "learning_rate": 6.911764705882354e-07, | |
| "loss": 0.3725, | |
| "step": 37000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.9641203880310059, | |
| "learning_rate": 2.272727272727273e-08, | |
| "loss": 0.3679, | |
| "step": 37500 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 37500, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 30000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.083549155328e+17, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |