| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 822, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0364963503649635, | |
| "grad_norm": 6.935588591840994, | |
| "learning_rate": 5e-06, | |
| "loss": 1.0327, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.072992700729927, | |
| "grad_norm": 2.670201985668935, | |
| "learning_rate": 5e-06, | |
| "loss": 0.9089, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.10948905109489052, | |
| "grad_norm": 6.488843905015272, | |
| "learning_rate": 5e-06, | |
| "loss": 0.8647, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.145985401459854, | |
| "grad_norm": 1.750141549981801, | |
| "learning_rate": 5e-06, | |
| "loss": 0.8455, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.18248175182481752, | |
| "grad_norm": 1.7426579902019592, | |
| "learning_rate": 5e-06, | |
| "loss": 0.8261, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.21897810218978103, | |
| "grad_norm": 1.5150538320225337, | |
| "learning_rate": 5e-06, | |
| "loss": 0.8215, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.25547445255474455, | |
| "grad_norm": 1.34752537394588, | |
| "learning_rate": 5e-06, | |
| "loss": 0.806, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.291970802919708, | |
| "grad_norm": 0.8501658634743531, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7944, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.3284671532846715, | |
| "grad_norm": 1.1940581870093743, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7843, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.36496350364963503, | |
| "grad_norm": 1.5996734385655718, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7782, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.40145985401459855, | |
| "grad_norm": 1.0860044183217865, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7704, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.43795620437956206, | |
| "grad_norm": 0.6827596601808271, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7721, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.4744525547445255, | |
| "grad_norm": 0.8446630134792682, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7694, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.5109489051094891, | |
| "grad_norm": 0.6527357476166321, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7607, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.5474452554744526, | |
| "grad_norm": 0.8473812345074847, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7654, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.583941605839416, | |
| "grad_norm": 0.680871697828666, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7569, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.6204379562043796, | |
| "grad_norm": 0.7106980775523508, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7558, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.656934306569343, | |
| "grad_norm": 0.6257154252846203, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7555, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.6934306569343066, | |
| "grad_norm": 0.8083982567358466, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7514, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.7299270072992701, | |
| "grad_norm": 0.8444839075154996, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7498, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.7664233576642335, | |
| "grad_norm": 0.7002950498691304, | |
| "learning_rate": 5e-06, | |
| "loss": 0.747, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.8029197080291971, | |
| "grad_norm": 0.8747715499460583, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7455, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.8394160583941606, | |
| "grad_norm": 0.8840789126221499, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7437, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.8759124087591241, | |
| "grad_norm": 0.6784150124578601, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7421, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.9124087591240876, | |
| "grad_norm": 0.6074960130500583, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7423, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.948905109489051, | |
| "grad_norm": 0.6838717489885743, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7427, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.9854014598540146, | |
| "grad_norm": 0.6526592018350256, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7416, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.7413060665130615, | |
| "eval_runtime": 26.3056, | |
| "eval_samples_per_second": 279.864, | |
| "eval_steps_per_second": 1.102, | |
| "step": 274 | |
| }, | |
| { | |
| "epoch": 1.0218978102189782, | |
| "grad_norm": 0.8154022012318396, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7114, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.0583941605839415, | |
| "grad_norm": 0.8045050782757963, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6861, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.094890510948905, | |
| "grad_norm": 0.6684516327458488, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6847, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.1313868613138687, | |
| "grad_norm": 0.6133382471958105, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6888, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 1.167883211678832, | |
| "grad_norm": 0.6070090310932738, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6864, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.2043795620437956, | |
| "grad_norm": 0.6691556220694154, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6858, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.2408759124087592, | |
| "grad_norm": 0.7214448473176965, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6888, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.2773722627737225, | |
| "grad_norm": 0.6313497992938364, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6895, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.313868613138686, | |
| "grad_norm": 0.6011076115864562, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6894, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.3503649635036497, | |
| "grad_norm": 0.6417225834212155, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6842, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.3868613138686132, | |
| "grad_norm": 0.8658988992278465, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6865, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.4233576642335766, | |
| "grad_norm": 1.1272922956683207, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6866, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 1.4598540145985401, | |
| "grad_norm": 0.5914462231869502, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6865, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.4963503649635037, | |
| "grad_norm": 0.5668202854702565, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6846, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.5328467153284673, | |
| "grad_norm": 0.6635467334798604, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6921, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.5693430656934306, | |
| "grad_norm": 0.7643912156465277, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6875, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 1.6058394160583942, | |
| "grad_norm": 0.6238580970561005, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6863, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.6423357664233578, | |
| "grad_norm": 0.6822599541559131, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6846, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.6788321167883211, | |
| "grad_norm": 0.6619644198552357, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6852, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.7153284671532847, | |
| "grad_norm": 0.5553682134217856, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6903, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.7518248175182483, | |
| "grad_norm": 0.68779697897221, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6857, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.7883211678832116, | |
| "grad_norm": 0.6635201246353984, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6875, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.8248175182481752, | |
| "grad_norm": 0.6121605469304142, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6868, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.8613138686131387, | |
| "grad_norm": 0.619764705154293, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6823, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.897810218978102, | |
| "grad_norm": 0.5832433162588191, | |
| "learning_rate": 5e-06, | |
| "loss": 0.681, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.9343065693430657, | |
| "grad_norm": 0.7973902031324107, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6801, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.9708029197080292, | |
| "grad_norm": 0.6524573193193096, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6856, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.7306408882141113, | |
| "eval_runtime": 29.0253, | |
| "eval_samples_per_second": 253.641, | |
| "eval_steps_per_second": 0.999, | |
| "step": 548 | |
| }, | |
| { | |
| "epoch": 2.0072992700729926, | |
| "grad_norm": 1.0397746521566786, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6708, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 2.0437956204379564, | |
| "grad_norm": 0.7448998852970502, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6358, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 2.0802919708029197, | |
| "grad_norm": 0.821170183708594, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6306, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 2.116788321167883, | |
| "grad_norm": 0.8192091109666185, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6289, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 2.153284671532847, | |
| "grad_norm": 0.6531060593153705, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6303, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 2.18978102189781, | |
| "grad_norm": 0.6207596231842625, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6291, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 2.2262773722627736, | |
| "grad_norm": 0.6299880389445941, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6266, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 2.2627737226277373, | |
| "grad_norm": 0.7577058406502929, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6316, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 2.2992700729927007, | |
| "grad_norm": 0.7972966242533703, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6324, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 2.335766423357664, | |
| "grad_norm": 0.6748643973886247, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6276, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 2.372262773722628, | |
| "grad_norm": 0.6136288389697012, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6376, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 2.408759124087591, | |
| "grad_norm": 0.6659808481432584, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6287, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 2.445255474452555, | |
| "grad_norm": 0.6729354487925674, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6355, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 2.4817518248175183, | |
| "grad_norm": 0.5975205704498788, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6361, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 2.5182481751824817, | |
| "grad_norm": 0.6572927598737819, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6377, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 2.554744525547445, | |
| "grad_norm": 0.5847912296345943, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6348, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 2.591240875912409, | |
| "grad_norm": 0.5992831189054557, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6409, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 2.627737226277372, | |
| "grad_norm": 0.6534494990778235, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6295, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 2.664233576642336, | |
| "grad_norm": 0.7980552337710113, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6353, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 2.7007299270072993, | |
| "grad_norm": 0.5906002533417896, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6363, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 2.7372262773722627, | |
| "grad_norm": 0.6317871178225047, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6349, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 2.7737226277372264, | |
| "grad_norm": 0.5701689110159228, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6407, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 2.81021897810219, | |
| "grad_norm": 0.7789184105520879, | |
| "learning_rate": 5e-06, | |
| "loss": 0.631, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 2.846715328467153, | |
| "grad_norm": 0.7395144400572116, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6298, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 2.883211678832117, | |
| "grad_norm": 0.7833250133924173, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6325, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 2.9197080291970803, | |
| "grad_norm": 0.750312659575106, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6425, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 2.9562043795620436, | |
| "grad_norm": 0.6284417569809958, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6323, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 2.9927007299270074, | |
| "grad_norm": 0.5631024071115539, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6383, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.7370563745498657, | |
| "eval_runtime": 26.6576, | |
| "eval_samples_per_second": 276.169, | |
| "eval_steps_per_second": 1.088, | |
| "step": 822 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 822, | |
| "total_flos": 1376880615751680.0, | |
| "train_loss": 0.7024027848765798, | |
| "train_runtime": 5290.9875, | |
| "train_samples_per_second": 79.308, | |
| "train_steps_per_second": 0.155 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 822, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1376880615751680.0, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |