| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 190, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.026385224274406333, | |
| "grad_norm": 107.23584747314453, | |
| "learning_rate": 4.210526315789474e-07, | |
| "loss": 5.622, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.052770448548812667, | |
| "grad_norm": 77.56803894042969, | |
| "learning_rate": 9.473684210526317e-07, | |
| "loss": 5.3488, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.079155672823219, | |
| "grad_norm": 60.997257232666016, | |
| "learning_rate": 1.4736842105263159e-06, | |
| "loss": 4.9812, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.10554089709762533, | |
| "grad_norm": 33.36190414428711, | |
| "learning_rate": 2.0000000000000003e-06, | |
| "loss": 4.4552, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.13192612137203166, | |
| "grad_norm": 18.245769500732422, | |
| "learning_rate": 2.5263157894736844e-06, | |
| "loss": 4.0648, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.158311345646438, | |
| "grad_norm": 15.86693000793457, | |
| "learning_rate": 3.052631578947369e-06, | |
| "loss": 3.6069, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.18469656992084432, | |
| "grad_norm": 11.502686500549316, | |
| "learning_rate": 3.578947368421053e-06, | |
| "loss": 3.6728, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.21108179419525067, | |
| "grad_norm": 16.517114639282227, | |
| "learning_rate": 4.105263157894737e-06, | |
| "loss": 3.7924, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.23746701846965698, | |
| "grad_norm": 12.67491340637207, | |
| "learning_rate": 4.631578947368421e-06, | |
| "loss": 3.3256, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.2638522427440633, | |
| "grad_norm": 11.452499389648438, | |
| "learning_rate": 5.157894736842106e-06, | |
| "loss": 3.2939, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.29023746701846964, | |
| "grad_norm": 13.192882537841797, | |
| "learning_rate": 5.68421052631579e-06, | |
| "loss": 3.0344, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.316622691292876, | |
| "grad_norm": 11.595597267150879, | |
| "learning_rate": 6.2105263157894745e-06, | |
| "loss": 2.7082, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.34300791556728233, | |
| "grad_norm": 12.450468063354492, | |
| "learning_rate": 6.736842105263158e-06, | |
| "loss": 2.9807, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.36939313984168864, | |
| "grad_norm": 13.777202606201172, | |
| "learning_rate": 7.263157894736843e-06, | |
| "loss": 2.8359, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.39577836411609496, | |
| "grad_norm": 13.03334903717041, | |
| "learning_rate": 7.789473684210526e-06, | |
| "loss": 2.5446, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.42216358839050133, | |
| "grad_norm": 17.208057403564453, | |
| "learning_rate": 8.315789473684212e-06, | |
| "loss": 2.5668, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.44854881266490765, | |
| "grad_norm": 15.075675010681152, | |
| "learning_rate": 8.842105263157895e-06, | |
| "loss": 2.3511, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.47493403693931396, | |
| "grad_norm": 14.769970893859863, | |
| "learning_rate": 9.36842105263158e-06, | |
| "loss": 2.5317, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.5013192612137203, | |
| "grad_norm": 11.59249210357666, | |
| "learning_rate": 9.894736842105264e-06, | |
| "loss": 2.2466, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.5277044854881267, | |
| "grad_norm": 15.899335861206055, | |
| "learning_rate": 9.999459967758384e-06, | |
| "loss": 2.1577, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.554089709762533, | |
| "grad_norm": 18.471529006958008, | |
| "learning_rate": 9.99726628670463e-06, | |
| "loss": 1.9901, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.5804749340369393, | |
| "grad_norm": 13.399049758911133, | |
| "learning_rate": 9.993385944658086e-06, | |
| "loss": 1.9487, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.6068601583113457, | |
| "grad_norm": 11.711898803710938, | |
| "learning_rate": 9.987820251299121e-06, | |
| "loss": 1.6927, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.633245382585752, | |
| "grad_norm": 20.446998596191406, | |
| "learning_rate": 9.980571085142381e-06, | |
| "loss": 1.7541, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.6596306068601583, | |
| "grad_norm": 10.907441139221191, | |
| "learning_rate": 9.971640892902742e-06, | |
| "loss": 1.7187, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.6860158311345647, | |
| "grad_norm": 13.693713188171387, | |
| "learning_rate": 9.961032688669519e-06, | |
| "loss": 1.6546, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.712401055408971, | |
| "grad_norm": 9.666728019714355, | |
| "learning_rate": 9.94875005288915e-06, | |
| "loss": 1.6036, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.7387862796833773, | |
| "grad_norm": 12.30069637298584, | |
| "learning_rate": 9.934797131156745e-06, | |
| "loss": 1.3259, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.7651715039577837, | |
| "grad_norm": 10.972068786621094, | |
| "learning_rate": 9.919178632816864e-06, | |
| "loss": 1.3776, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.7915567282321899, | |
| "grad_norm": 13.099720001220703, | |
| "learning_rate": 9.901899829374048e-06, | |
| "loss": 1.2886, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.8179419525065963, | |
| "grad_norm": 9.663094520568848, | |
| "learning_rate": 9.88296655271359e-06, | |
| "loss": 1.133, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.8443271767810027, | |
| "grad_norm": 12.018184661865234, | |
| "learning_rate": 9.862385193133181e-06, | |
| "loss": 1.0579, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.8707124010554089, | |
| "grad_norm": 14.370702743530273, | |
| "learning_rate": 9.840162697186075e-06, | |
| "loss": 0.8807, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.8970976253298153, | |
| "grad_norm": 10.248346328735352, | |
| "learning_rate": 9.81630656533651e-06, | |
| "loss": 0.8824, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.9234828496042217, | |
| "grad_norm": 12.581779479980469, | |
| "learning_rate": 9.79082484942818e-06, | |
| "loss": 0.8748, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.9498680738786279, | |
| "grad_norm": 13.555765151977539, | |
| "learning_rate": 9.763726149966596e-06, | |
| "loss": 0.8773, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.9762532981530343, | |
| "grad_norm": 10.211019515991211, | |
| "learning_rate": 9.735019613216281e-06, | |
| "loss": 0.6417, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 7.966390609741211, | |
| "learning_rate": 9.704714928113743e-06, | |
| "loss": 0.7403, | |
| "step": 190 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 950, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.590152188672082e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |