| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.792079207920792, | |
| "eval_steps": 500, | |
| "global_step": 36, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.07920792079207921, | |
| "grad_norm": 161.63536071777344, | |
| "learning_rate": 2e-05, | |
| "loss": 6.492, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.15841584158415842, | |
| "grad_norm": 159.5704345703125, | |
| "learning_rate": 4e-05, | |
| "loss": 6.4968, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.2376237623762376, | |
| "grad_norm": 142.9398193359375, | |
| "learning_rate": 6e-05, | |
| "loss": 5.6031, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.31683168316831684, | |
| "grad_norm": 171.34854125976562, | |
| "learning_rate": 8e-05, | |
| "loss": 4.0835, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.39603960396039606, | |
| "grad_norm": 124.95561218261719, | |
| "learning_rate": 0.0001, | |
| "loss": 2.2652, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.4752475247524752, | |
| "grad_norm": 202.09657287597656, | |
| "learning_rate": 0.00012, | |
| "loss": 1.3066, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.5544554455445545, | |
| "grad_norm": 89.74258422851562, | |
| "learning_rate": 0.00014, | |
| "loss": 0.6958, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.6336633663366337, | |
| "grad_norm": 20.369661331176758, | |
| "learning_rate": 0.00016, | |
| "loss": 0.336, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.7128712871287128, | |
| "grad_norm": 4.07184362411499, | |
| "learning_rate": 0.00018, | |
| "loss": 0.173, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.7920792079207921, | |
| "grad_norm": 0.5548842549324036, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1229, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.8712871287128713, | |
| "grad_norm": 0.37989184260368347, | |
| "learning_rate": 0.00019230769230769233, | |
| "loss": 0.1154, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.9504950495049505, | |
| "grad_norm": 10.629559516906738, | |
| "learning_rate": 0.00018461538461538463, | |
| "loss": 0.1451, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.5450623631477356, | |
| "learning_rate": 0.00017692307692307693, | |
| "loss": 0.124, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 1.0792079207920793, | |
| "grad_norm": 1.0018385648727417, | |
| "learning_rate": 0.00016923076923076923, | |
| "loss": 0.1142, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 1.1584158415841583, | |
| "grad_norm": 3.8120734691619873, | |
| "learning_rate": 0.00016153846153846155, | |
| "loss": 0.0983, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 1.2376237623762376, | |
| "grad_norm": 0.4333324432373047, | |
| "learning_rate": 0.00015384615384615385, | |
| "loss": 0.1196, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 1.316831683168317, | |
| "grad_norm": 0.46964749693870544, | |
| "learning_rate": 0.00014615384615384615, | |
| "loss": 0.1008, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 1.396039603960396, | |
| "grad_norm": 0.4365217983722687, | |
| "learning_rate": 0.00013846153846153847, | |
| "loss": 0.0951, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 1.4752475247524752, | |
| "grad_norm": 7.36062479019165, | |
| "learning_rate": 0.00013076923076923077, | |
| "loss": 0.1117, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 1.5544554455445545, | |
| "grad_norm": 0.9376835823059082, | |
| "learning_rate": 0.0001230769230769231, | |
| "loss": 0.0908, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.6336633663366338, | |
| "grad_norm": 1.0286779403686523, | |
| "learning_rate": 0.00011538461538461538, | |
| "loss": 0.1072, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 1.7128712871287128, | |
| "grad_norm": 0.8556565642356873, | |
| "learning_rate": 0.0001076923076923077, | |
| "loss": 0.1045, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 1.7920792079207921, | |
| "grad_norm": 0.6213693618774414, | |
| "learning_rate": 0.0001, | |
| "loss": 0.0943, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 1.8712871287128712, | |
| "grad_norm": 0.1333772838115692, | |
| "learning_rate": 9.230769230769232e-05, | |
| "loss": 0.0882, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 1.9504950495049505, | |
| "grad_norm": 1.6561765670776367, | |
| "learning_rate": 8.461538461538461e-05, | |
| "loss": 0.0956, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.7169074416160583, | |
| "learning_rate": 7.692307692307693e-05, | |
| "loss": 0.1031, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 2.0792079207920793, | |
| "grad_norm": 0.19848042726516724, | |
| "learning_rate": 6.923076923076924e-05, | |
| "loss": 0.1033, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 2.1584158415841586, | |
| "grad_norm": 0.3161543309688568, | |
| "learning_rate": 6.153846153846155e-05, | |
| "loss": 0.0973, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 2.237623762376238, | |
| "grad_norm": 0.32770031690597534, | |
| "learning_rate": 5.384615384615385e-05, | |
| "loss": 0.0925, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 2.3168316831683167, | |
| "grad_norm": 0.3246991038322449, | |
| "learning_rate": 4.615384615384616e-05, | |
| "loss": 0.0838, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 2.396039603960396, | |
| "grad_norm": 0.3104535937309265, | |
| "learning_rate": 3.846153846153846e-05, | |
| "loss": 0.0823, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 2.4752475247524752, | |
| "grad_norm": 0.2570880055427551, | |
| "learning_rate": 3.0769230769230774e-05, | |
| "loss": 0.0796, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 2.5544554455445545, | |
| "grad_norm": 0.20462781190872192, | |
| "learning_rate": 2.307692307692308e-05, | |
| "loss": 0.0937, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 2.633663366336634, | |
| "grad_norm": 0.12997423112392426, | |
| "learning_rate": 1.5384615384615387e-05, | |
| "loss": 0.081, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 2.7128712871287126, | |
| "grad_norm": 0.12393197417259216, | |
| "learning_rate": 7.692307692307694e-06, | |
| "loss": 0.096, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 2.792079207920792, | |
| "grad_norm": 0.11649560928344727, | |
| "learning_rate": 0.0, | |
| "loss": 0.082, | |
| "step": 36 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 36, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 6165670693699584.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |