| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 5.0, | |
| "eval_steps": 500, | |
| "global_step": 34595, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.07226477814713109, | |
| "grad_norm": 22.313980102539062, | |
| "learning_rate": 2.9566411331117216e-05, | |
| "loss": 3.3429, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.14452955629426217, | |
| "grad_norm": 8.817935943603516, | |
| "learning_rate": 2.9132822662234428e-05, | |
| "loss": 1.7422, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.21679433444139326, | |
| "grad_norm": 96.14582061767578, | |
| "learning_rate": 2.8699233993351643e-05, | |
| "loss": 1.1619, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.28905911258852435, | |
| "grad_norm": 4.378906726837158, | |
| "learning_rate": 2.8265645324468854e-05, | |
| "loss": 1.0505, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.36132389073565546, | |
| "grad_norm": 4.367362022399902, | |
| "learning_rate": 2.783205665558607e-05, | |
| "loss": 0.9841, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.4335886688827865, | |
| "grad_norm": 342.0606689453125, | |
| "learning_rate": 2.739846798670328e-05, | |
| "loss": 0.861, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.5058534470299176, | |
| "grad_norm": 23.573060989379883, | |
| "learning_rate": 2.6964879317820496e-05, | |
| "loss": 0.8402, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.5781182251770487, | |
| "grad_norm": 42.652748107910156, | |
| "learning_rate": 2.6531290648937708e-05, | |
| "loss": 0.82, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.6503830033241798, | |
| "grad_norm": 46.95698928833008, | |
| "learning_rate": 2.6097701980054923e-05, | |
| "loss": 0.77, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.7226477814713109, | |
| "grad_norm": 44.56264877319336, | |
| "learning_rate": 2.5664113311172135e-05, | |
| "loss": 0.8083, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.794912559618442, | |
| "grad_norm": 80.95307159423828, | |
| "learning_rate": 2.523052464228935e-05, | |
| "loss": 0.8054, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.867177337765573, | |
| "grad_norm": 57.8803825378418, | |
| "learning_rate": 2.4796935973406565e-05, | |
| "loss": 0.7206, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.9394421159127041, | |
| "grad_norm": 17.753629684448242, | |
| "learning_rate": 2.4363347304523777e-05, | |
| "loss": 0.7892, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 1.0117068940598353, | |
| "grad_norm": 33.004276275634766, | |
| "learning_rate": 2.392975863564099e-05, | |
| "loss": 0.7122, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.0839716722069663, | |
| "grad_norm": 0.034875206649303436, | |
| "learning_rate": 2.34961699667582e-05, | |
| "loss": 0.4892, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 1.1562364503540974, | |
| "grad_norm": 26.65053367614746, | |
| "learning_rate": 2.3062581297875416e-05, | |
| "loss": 0.4959, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.2285012285012284, | |
| "grad_norm": 0.3080042898654938, | |
| "learning_rate": 2.2628992628992627e-05, | |
| "loss": 0.5166, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 1.3007660066483595, | |
| "grad_norm": 47.525901794433594, | |
| "learning_rate": 2.2195403960109842e-05, | |
| "loss": 0.4875, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.3730307847954908, | |
| "grad_norm": 2.4876151084899902, | |
| "learning_rate": 2.1761815291227054e-05, | |
| "loss": 0.4864, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 1.4452955629426218, | |
| "grad_norm": 0.8099455833435059, | |
| "learning_rate": 2.132822662234427e-05, | |
| "loss": 0.4836, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.517560341089753, | |
| "grad_norm": 9.619250297546387, | |
| "learning_rate": 2.089463795346148e-05, | |
| "loss": 0.5483, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 1.589825119236884, | |
| "grad_norm": 0.05848146229982376, | |
| "learning_rate": 2.0461049284578696e-05, | |
| "loss": 0.546, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 1.662089897384015, | |
| "grad_norm": 42.597862243652344, | |
| "learning_rate": 2.002746061569591e-05, | |
| "loss": 0.5164, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 1.734354675531146, | |
| "grad_norm": 47.1865119934082, | |
| "learning_rate": 1.9593871946813123e-05, | |
| "loss": 0.5734, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 1.8066194536782771, | |
| "grad_norm": 23.797060012817383, | |
| "learning_rate": 1.9160283277930338e-05, | |
| "loss": 0.5089, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 1.8788842318254084, | |
| "grad_norm": 0.4582177996635437, | |
| "learning_rate": 1.872669460904755e-05, | |
| "loss": 0.5102, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 1.9511490099725393, | |
| "grad_norm": 75.59239196777344, | |
| "learning_rate": 1.8293105940164765e-05, | |
| "loss": 0.5414, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 2.0234137881196705, | |
| "grad_norm": 27.790529251098633, | |
| "learning_rate": 1.7859517271281977e-05, | |
| "loss": 0.436, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 2.0956785662668014, | |
| "grad_norm": 0.07247856259346008, | |
| "learning_rate": 1.7425928602399192e-05, | |
| "loss": 0.3202, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 2.1679433444139327, | |
| "grad_norm": 6.203830242156982, | |
| "learning_rate": 1.6992339933516404e-05, | |
| "loss": 0.2893, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 2.240208122561064, | |
| "grad_norm": 0.011503485031425953, | |
| "learning_rate": 1.655875126463362e-05, | |
| "loss": 0.321, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 2.312472900708195, | |
| "grad_norm": 0.3793994188308716, | |
| "learning_rate": 1.6125162595750834e-05, | |
| "loss": 0.3078, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 2.384737678855326, | |
| "grad_norm": 42.47032165527344, | |
| "learning_rate": 1.5691573926868046e-05, | |
| "loss": 0.2855, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 2.457002457002457, | |
| "grad_norm": 73.11065673828125, | |
| "learning_rate": 1.5257985257985259e-05, | |
| "loss": 0.3357, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 2.529267235149588, | |
| "grad_norm": 0.03242184966802597, | |
| "learning_rate": 1.482439658910247e-05, | |
| "loss": 0.3016, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 2.601532013296719, | |
| "grad_norm": 0.020083703100681305, | |
| "learning_rate": 1.4390807920219684e-05, | |
| "loss": 0.3017, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 2.6737967914438503, | |
| "grad_norm": 12.890129089355469, | |
| "learning_rate": 1.39572192513369e-05, | |
| "loss": 0.3065, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 2.7460615695909816, | |
| "grad_norm": 0.07345220446586609, | |
| "learning_rate": 1.3523630582454113e-05, | |
| "loss": 0.338, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 2.8183263477381124, | |
| "grad_norm": 37.88114547729492, | |
| "learning_rate": 1.3090041913571326e-05, | |
| "loss": 0.313, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 2.8905911258852437, | |
| "grad_norm": 0.04430060461163521, | |
| "learning_rate": 1.265645324468854e-05, | |
| "loss": 0.2948, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 2.9628559040323745, | |
| "grad_norm": 0.06189202517271042, | |
| "learning_rate": 1.2222864575805753e-05, | |
| "loss": 0.2838, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 3.035120682179506, | |
| "grad_norm": 0.015777448192238808, | |
| "learning_rate": 1.1789275906922966e-05, | |
| "loss": 0.2385, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 3.1073854603266367, | |
| "grad_norm": 0.02253924310207367, | |
| "learning_rate": 1.135568723804018e-05, | |
| "loss": 0.1713, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 3.179650238473768, | |
| "grad_norm": 0.014288770034909248, | |
| "learning_rate": 1.0922098569157393e-05, | |
| "loss": 0.1905, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 3.2519150166208988, | |
| "grad_norm": 0.1594344526529312, | |
| "learning_rate": 1.0488509900274607e-05, | |
| "loss": 0.1578, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 3.32417979476803, | |
| "grad_norm": 0.011662287637591362, | |
| "learning_rate": 1.0054921231391818e-05, | |
| "loss": 0.1585, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 3.3964445729151613, | |
| "grad_norm": 0.02400069124996662, | |
| "learning_rate": 9.621332562509032e-06, | |
| "loss": 0.1732, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 3.468709351062292, | |
| "grad_norm": 0.07058549672365189, | |
| "learning_rate": 9.187743893626247e-06, | |
| "loss": 0.1597, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 3.5409741292094234, | |
| "grad_norm": 0.04298946261405945, | |
| "learning_rate": 8.75415522474346e-06, | |
| "loss": 0.1542, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 3.6132389073565543, | |
| "grad_norm": 0.005008801817893982, | |
| "learning_rate": 8.320566555860674e-06, | |
| "loss": 0.1514, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 3.6855036855036856, | |
| "grad_norm": 0.09852935373783112, | |
| "learning_rate": 7.886977886977887e-06, | |
| "loss": 0.1706, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 3.757768463650817, | |
| "grad_norm": 140.0009765625, | |
| "learning_rate": 7.453389218095101e-06, | |
| "loss": 0.1442, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 3.8300332417979477, | |
| "grad_norm": 0.0774250477552414, | |
| "learning_rate": 7.019800549212314e-06, | |
| "loss": 0.1743, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 3.9022980199450785, | |
| "grad_norm": 0.045473262667655945, | |
| "learning_rate": 6.586211880329528e-06, | |
| "loss": 0.1855, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 3.97456279809221, | |
| "grad_norm": 0.022451166063547134, | |
| "learning_rate": 6.152623211446741e-06, | |
| "loss": 0.1622, | |
| "step": 27500 | |
| }, | |
| { | |
| "epoch": 4.046827576239341, | |
| "grad_norm": 0.014400485903024673, | |
| "learning_rate": 5.7190345425639545e-06, | |
| "loss": 0.0775, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 4.119092354386472, | |
| "grad_norm": 0.025828225538134575, | |
| "learning_rate": 5.285445873681168e-06, | |
| "loss": 0.0835, | |
| "step": 28500 | |
| }, | |
| { | |
| "epoch": 4.191357132533603, | |
| "grad_norm": 0.048525627702474594, | |
| "learning_rate": 4.851857204798381e-06, | |
| "loss": 0.073, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 4.263621910680734, | |
| "grad_norm": 0.01667250506579876, | |
| "learning_rate": 4.418268535915595e-06, | |
| "loss": 0.074, | |
| "step": 29500 | |
| }, | |
| { | |
| "epoch": 4.335886688827865, | |
| "grad_norm": 0.01051491778343916, | |
| "learning_rate": 3.984679867032808e-06, | |
| "loss": 0.0839, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 4.408151466974997, | |
| "grad_norm": 0.02080466039478779, | |
| "learning_rate": 3.5510911981500216e-06, | |
| "loss": 0.1082, | |
| "step": 30500 | |
| }, | |
| { | |
| "epoch": 4.480416245122128, | |
| "grad_norm": 0.0018432741053402424, | |
| "learning_rate": 3.117502529267235e-06, | |
| "loss": 0.0994, | |
| "step": 31000 | |
| }, | |
| { | |
| "epoch": 4.552681023269258, | |
| "grad_norm": 0.2674684524536133, | |
| "learning_rate": 2.683913860384449e-06, | |
| "loss": 0.121, | |
| "step": 31500 | |
| }, | |
| { | |
| "epoch": 4.62494580141639, | |
| "grad_norm": 0.0021196361631155014, | |
| "learning_rate": 2.250325191501662e-06, | |
| "loss": 0.0784, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 4.697210579563521, | |
| "grad_norm": 0.010477827861905098, | |
| "learning_rate": 1.8167365226188756e-06, | |
| "loss": 0.0836, | |
| "step": 32500 | |
| }, | |
| { | |
| "epoch": 4.769475357710652, | |
| "grad_norm": 0.002533436520025134, | |
| "learning_rate": 1.383147853736089e-06, | |
| "loss": 0.0633, | |
| "step": 33000 | |
| }, | |
| { | |
| "epoch": 4.841740135857783, | |
| "grad_norm": 0.0024999654851853848, | |
| "learning_rate": 9.495591848533026e-07, | |
| "loss": 0.0588, | |
| "step": 33500 | |
| }, | |
| { | |
| "epoch": 4.914004914004914, | |
| "grad_norm": 0.0020577749237418175, | |
| "learning_rate": 5.15970515970516e-07, | |
| "loss": 0.0794, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 4.986269692152045, | |
| "grad_norm": 0.19813172519207, | |
| "learning_rate": 8.238184708772944e-08, | |
| "loss": 0.0608, | |
| "step": 34500 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "step": 34595, | |
| "total_flos": 3.6156938713344e+16, | |
| "train_loss": 0.43488813713979024, | |
| "train_runtime": 4328.4475, | |
| "train_samples_per_second": 31.969, | |
| "train_steps_per_second": 7.992 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 34595, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.6156938713344e+16, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |