| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.7037037037037037, | |
| "eval_steps": 200, | |
| "global_step": 400, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.009259259259259259, | |
| "eval_loss": 2.042346239089966, | |
| "eval_runtime": 28.0222, | |
| "eval_samples_per_second": 53.565, | |
| "eval_steps_per_second": 6.709, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.09259259259259259, | |
| "grad_norm": 2.78125, | |
| "learning_rate": 6.666666666666667e-05, | |
| "loss": 1.8234, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.18518518518518517, | |
| "grad_norm": 1.640625, | |
| "learning_rate": 0.00013333333333333334, | |
| "loss": 1.8256, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.2777777777777778, | |
| "grad_norm": 1.625, | |
| "learning_rate": 0.0002, | |
| "loss": 1.8582, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.37037037037037035, | |
| "grad_norm": 1.46875, | |
| "learning_rate": 0.00019984815164333163, | |
| "loss": 1.9652, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.46296296296296297, | |
| "grad_norm": 1.3203125, | |
| "learning_rate": 0.00019939306773179497, | |
| "loss": 1.9901, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.5555555555555556, | |
| "grad_norm": 1.3515625, | |
| "learning_rate": 0.00019863613034027224, | |
| "loss": 2.0195, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.6481481481481481, | |
| "grad_norm": 1.3828125, | |
| "learning_rate": 0.00019757963826274357, | |
| "loss": 2.0798, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.7407407407407407, | |
| "grad_norm": 1.3671875, | |
| "learning_rate": 0.00019622680003092503, | |
| "loss": 2.1091, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.8333333333333334, | |
| "grad_norm": 1.3984375, | |
| "learning_rate": 0.00019458172417006347, | |
| "loss": 2.0961, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.9259259259259259, | |
| "grad_norm": 1.3125, | |
| "learning_rate": 0.00019264940672148018, | |
| "loss": 2.1084, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.0185185185185186, | |
| "grad_norm": 2.203125, | |
| "learning_rate": 0.00019043571606975777, | |
| "loss": 1.8677, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.1111111111111112, | |
| "grad_norm": 1.421875, | |
| "learning_rate": 0.0001879473751206489, | |
| "loss": 0.9568, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.2037037037037037, | |
| "grad_norm": 1.3359375, | |
| "learning_rate": 0.00018519194088383273, | |
| "loss": 0.9149, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.2962962962962963, | |
| "grad_norm": 1.296875, | |
| "learning_rate": 0.0001821777815225245, | |
| "loss": 0.9166, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.3888888888888888, | |
| "grad_norm": 1.203125, | |
| "learning_rate": 0.00017891405093963938, | |
| "loss": 0.9197, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.4814814814814814, | |
| "grad_norm": 1.2578125, | |
| "learning_rate": 0.00017541066097768963, | |
| "loss": 0.9485, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.574074074074074, | |
| "grad_norm": 1.1328125, | |
| "learning_rate": 0.00017167825131684513, | |
| "loss": 0.9374, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.6666666666666665, | |
| "grad_norm": 1.203125, | |
| "learning_rate": 0.00016772815716257412, | |
| "loss": 0.9767, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.7592592592592593, | |
| "grad_norm": 1.21875, | |
| "learning_rate": 0.00016357237482099684, | |
| "loss": 0.9682, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.8518518518518519, | |
| "grad_norm": 1.2109375, | |
| "learning_rate": 0.00015922352526649803, | |
| "loss": 0.9898, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.8518518518518519, | |
| "eval_loss": 2.0737812519073486, | |
| "eval_runtime": 26.099, | |
| "eval_samples_per_second": 57.512, | |
| "eval_steps_per_second": 7.203, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.9444444444444444, | |
| "grad_norm": 1.21875, | |
| "learning_rate": 0.00015469481581224272, | |
| "loss": 0.9473, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 2.037037037037037, | |
| "grad_norm": 1.1015625, | |
| "learning_rate": 0.00015000000000000001, | |
| "loss": 0.7034, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 2.1296296296296298, | |
| "grad_norm": 0.92578125, | |
| "learning_rate": 0.00014515333583108896, | |
| "loss": 0.2726, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 2.2222222222222223, | |
| "grad_norm": 0.953125, | |
| "learning_rate": 0.00014016954246529696, | |
| "loss": 0.2504, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 2.314814814814815, | |
| "grad_norm": 0.93359375, | |
| "learning_rate": 0.00013506375551927547, | |
| "loss": 0.2544, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 2.4074074074074074, | |
| "grad_norm": 0.8359375, | |
| "learning_rate": 0.00012985148110016947, | |
| "loss": 0.2437, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": 0.796875, | |
| "learning_rate": 0.00012454854871407994, | |
| "loss": 0.2416, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 2.5925925925925926, | |
| "grad_norm": 0.84765625, | |
| "learning_rate": 0.00011917106319237386, | |
| "loss": 0.2402, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 2.685185185185185, | |
| "grad_norm": 0.8515625, | |
| "learning_rate": 0.00011373535578184082, | |
| "loss": 0.2521, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 2.7777777777777777, | |
| "grad_norm": 0.73828125, | |
| "learning_rate": 0.00010825793454723325, | |
| "loss": 0.2415, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.8703703703703702, | |
| "grad_norm": 0.84375, | |
| "learning_rate": 0.00010275543423681621, | |
| "loss": 0.2333, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 2.962962962962963, | |
| "grad_norm": 0.71484375, | |
| "learning_rate": 9.724456576318381e-05, | |
| "loss": 0.2251, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 3.0555555555555554, | |
| "grad_norm": 0.46875, | |
| "learning_rate": 9.174206545276677e-05, | |
| "loss": 0.1187, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 3.148148148148148, | |
| "grad_norm": 0.423828125, | |
| "learning_rate": 8.626464421815919e-05, | |
| "loss": 0.0489, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 3.240740740740741, | |
| "grad_norm": 0.333984375, | |
| "learning_rate": 8.082893680762619e-05, | |
| "loss": 0.0581, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 3.3333333333333335, | |
| "grad_norm": 0.376953125, | |
| "learning_rate": 7.54514512859201e-05, | |
| "loss": 0.0472, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 3.425925925925926, | |
| "grad_norm": 0.408203125, | |
| "learning_rate": 7.014851889983057e-05, | |
| "loss": 0.0478, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 3.5185185185185186, | |
| "grad_norm": 0.4375, | |
| "learning_rate": 6.493624448072457e-05, | |
| "loss": 0.0588, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 3.611111111111111, | |
| "grad_norm": 0.3515625, | |
| "learning_rate": 5.983045753470308e-05, | |
| "loss": 0.0446, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 3.7037037037037037, | |
| "grad_norm": 0.357421875, | |
| "learning_rate": 5.484666416891109e-05, | |
| "loss": 0.0379, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 3.7037037037037037, | |
| "eval_loss": 2.7180562019348145, | |
| "eval_runtime": 26.1711, | |
| "eval_samples_per_second": 57.353, | |
| "eval_steps_per_second": 7.183, | |
| "step": 400 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 600, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 6, | |
| "save_steps": 200, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.5981391044870144e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |