| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 822, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0364963503649635, | |
| "grad_norm": 6.242694070966219, | |
| "learning_rate": 5e-06, | |
| "loss": 1.0388, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.072992700729927, | |
| "grad_norm": 6.140265839791405, | |
| "learning_rate": 5e-06, | |
| "loss": 0.9087, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.10948905109489052, | |
| "grad_norm": 2.0729558067197855, | |
| "learning_rate": 5e-06, | |
| "loss": 0.8718, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.145985401459854, | |
| "grad_norm": 2.0048530569345027, | |
| "learning_rate": 5e-06, | |
| "loss": 0.8466, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.18248175182481752, | |
| "grad_norm": 1.2903801782452229, | |
| "learning_rate": 5e-06, | |
| "loss": 0.8272, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.21897810218978103, | |
| "grad_norm": 1.9320500006452086, | |
| "learning_rate": 5e-06, | |
| "loss": 0.8176, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.25547445255474455, | |
| "grad_norm": 1.3130529470395709, | |
| "learning_rate": 5e-06, | |
| "loss": 0.8039, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.291970802919708, | |
| "grad_norm": 1.1106618130287023, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7935, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.3284671532846715, | |
| "grad_norm": 0.9675468601929832, | |
| "learning_rate": 5e-06, | |
| "loss": 0.78, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.36496350364963503, | |
| "grad_norm": 0.6972886370921517, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7753, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.40145985401459855, | |
| "grad_norm": 0.6872017378102949, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7722, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.43795620437956206, | |
| "grad_norm": 0.6670734728993309, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7673, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.4744525547445255, | |
| "grad_norm": 0.6673276704616633, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7643, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.5109489051094891, | |
| "grad_norm": 0.7068821433326216, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7637, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.5474452554744526, | |
| "grad_norm": 0.8071055608747135, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7604, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.583941605839416, | |
| "grad_norm": 0.6868966364491296, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7522, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.6204379562043796, | |
| "grad_norm": 0.9124104678824304, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7547, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.656934306569343, | |
| "grad_norm": 0.6792590544713151, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7562, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.6934306569343066, | |
| "grad_norm": 0.617023036902581, | |
| "learning_rate": 5e-06, | |
| "loss": 0.756, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.7299270072992701, | |
| "grad_norm": 0.6319144841610138, | |
| "learning_rate": 5e-06, | |
| "loss": 0.751, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.7664233576642335, | |
| "grad_norm": 0.6320543677861779, | |
| "learning_rate": 5e-06, | |
| "loss": 0.748, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.8029197080291971, | |
| "grad_norm": 0.6210695741046822, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7489, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.8394160583941606, | |
| "grad_norm": 0.7516050498994822, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7453, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.8759124087591241, | |
| "grad_norm": 0.5948362298346876, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7444, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.9124087591240876, | |
| "grad_norm": 0.7329567143609019, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7444, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.948905109489051, | |
| "grad_norm": 0.5724746440812548, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7372, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.9854014598540146, | |
| "grad_norm": 0.5915513551849372, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7414, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.737393319606781, | |
| "eval_runtime": 26.5119, | |
| "eval_samples_per_second": 278.403, | |
| "eval_steps_per_second": 1.094, | |
| "step": 274 | |
| }, | |
| { | |
| "epoch": 1.0218978102189782, | |
| "grad_norm": 1.1407224725998888, | |
| "learning_rate": 5e-06, | |
| "loss": 0.707, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.0583941605839415, | |
| "grad_norm": 0.8137371947663772, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6965, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.094890510948905, | |
| "grad_norm": 0.7624656836561821, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6929, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.1313868613138687, | |
| "grad_norm": 0.6731270507290879, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6907, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 1.167883211678832, | |
| "grad_norm": 0.6477624974220811, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6883, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.2043795620437956, | |
| "grad_norm": 0.6806120359954909, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6896, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.2408759124087592, | |
| "grad_norm": 0.6534094167466027, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6834, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.2773722627737225, | |
| "grad_norm": 0.7053154028357804, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6938, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.313868613138686, | |
| "grad_norm": 0.5843975789107667, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6905, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.3503649635036497, | |
| "grad_norm": 0.6607671591107314, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6887, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.3868613138686132, | |
| "grad_norm": 0.6948472684652132, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6863, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.4233576642335766, | |
| "grad_norm": 0.6575033962434685, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6886, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 1.4598540145985401, | |
| "grad_norm": 0.5872317321934506, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6868, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.4963503649635037, | |
| "grad_norm": 0.5826458330462528, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6895, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.5328467153284673, | |
| "grad_norm": 0.6464190966828731, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6836, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.5693430656934306, | |
| "grad_norm": 0.7159795940614451, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6834, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 1.6058394160583942, | |
| "grad_norm": 0.74415061125301, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6922, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.6423357664233578, | |
| "grad_norm": 0.5766064803057743, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6848, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.6788321167883211, | |
| "grad_norm": 0.6927585803335585, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6839, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.7153284671532847, | |
| "grad_norm": 0.551695429707459, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6859, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.7518248175182483, | |
| "grad_norm": 0.6002867275771926, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6872, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.7883211678832116, | |
| "grad_norm": 0.5786825602682476, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6882, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.8248175182481752, | |
| "grad_norm": 0.6235940806026485, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6869, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.8613138686131387, | |
| "grad_norm": 0.6421720075253999, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6869, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.897810218978102, | |
| "grad_norm": 0.6399996565023783, | |
| "learning_rate": 5e-06, | |
| "loss": 0.683, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.9343065693430657, | |
| "grad_norm": 0.6345252795641959, | |
| "learning_rate": 5e-06, | |
| "loss": 0.683, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.9708029197080292, | |
| "grad_norm": 0.6174798987748756, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6788, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.724261999130249, | |
| "eval_runtime": 29.7165, | |
| "eval_samples_per_second": 248.38, | |
| "eval_steps_per_second": 0.976, | |
| "step": 548 | |
| }, | |
| { | |
| "epoch": 2.0072992700729926, | |
| "grad_norm": 0.9503885013468557, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6733, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 2.0437956204379564, | |
| "grad_norm": 0.7079333930728279, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6294, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 2.0802919708029197, | |
| "grad_norm": 0.8055321987953024, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6282, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 2.116788321167883, | |
| "grad_norm": 0.7013752206943129, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6346, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 2.153284671532847, | |
| "grad_norm": 0.795586932183297, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6309, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 2.18978102189781, | |
| "grad_norm": 0.6072532710663935, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6288, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 2.2262773722627736, | |
| "grad_norm": 0.7308694689768187, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6335, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 2.2627737226277373, | |
| "grad_norm": 0.8888891048172273, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6347, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 2.2992700729927007, | |
| "grad_norm": 0.5694467380633018, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6358, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 2.335766423357664, | |
| "grad_norm": 0.6996612017894321, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6331, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 2.372262773722628, | |
| "grad_norm": 0.6798927912381371, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6355, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 2.408759124087591, | |
| "grad_norm": 0.7832060679040136, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6306, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 2.445255474452555, | |
| "grad_norm": 0.8518259483264891, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6329, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 2.4817518248175183, | |
| "grad_norm": 0.8847565076314947, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6355, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 2.5182481751824817, | |
| "grad_norm": 0.8790006051896251, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6341, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 2.554744525547445, | |
| "grad_norm": 0.7205003318722193, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6353, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 2.591240875912409, | |
| "grad_norm": 0.6695065635591323, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6336, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 2.627737226277372, | |
| "grad_norm": 0.701791622519896, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6392, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 2.664233576642336, | |
| "grad_norm": 0.6510579026114841, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6325, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 2.7007299270072993, | |
| "grad_norm": 0.6391591776977092, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6379, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 2.7372262773722627, | |
| "grad_norm": 0.6379410196843575, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6428, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 2.7737226277372264, | |
| "grad_norm": 0.5663117162856989, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6365, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 2.81021897810219, | |
| "grad_norm": 0.5976599060822565, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6337, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 2.846715328467153, | |
| "grad_norm": 0.5764241288666703, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6378, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 2.883211678832117, | |
| "grad_norm": 0.5923449235752416, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6414, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 2.9197080291970803, | |
| "grad_norm": 0.7012989231730122, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6389, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 2.9562043795620436, | |
| "grad_norm": 0.669400873506838, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6429, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 2.9927007299270074, | |
| "grad_norm": 0.6342356482957713, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6398, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.7273038625717163, | |
| "eval_runtime": 26.4167, | |
| "eval_samples_per_second": 279.406, | |
| "eval_steps_per_second": 1.098, | |
| "step": 822 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 822, | |
| "total_flos": 1376880615751680.0, | |
| "train_loss": 0.7032751636783572, | |
| "train_runtime": 5303.7541, | |
| "train_samples_per_second": 79.315, | |
| "train_steps_per_second": 0.155 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 822, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1376880615751680.0, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |