| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 822, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0364963503649635, | |
| "grad_norm": 3.2654903919327523, | |
| "learning_rate": 5e-06, | |
| "loss": 1.0419, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.072992700729927, | |
| "grad_norm": 1.5482985205580984, | |
| "learning_rate": 5e-06, | |
| "loss": 0.9117, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.10948905109489052, | |
| "grad_norm": 1.2069486973191534, | |
| "learning_rate": 5e-06, | |
| "loss": 0.8752, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.145985401459854, | |
| "grad_norm": 1.2169049310832125, | |
| "learning_rate": 5e-06, | |
| "loss": 0.8479, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.18248175182481752, | |
| "grad_norm": 1.0461259998073966, | |
| "learning_rate": 5e-06, | |
| "loss": 0.8276, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.21897810218978103, | |
| "grad_norm": 0.9136364083656688, | |
| "learning_rate": 5e-06, | |
| "loss": 0.8144, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.25547445255474455, | |
| "grad_norm": 1.0016252258412965, | |
| "learning_rate": 5e-06, | |
| "loss": 0.8029, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.291970802919708, | |
| "grad_norm": 0.7256070366121048, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7984, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.3284671532846715, | |
| "grad_norm": 0.8059560568156144, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7845, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.36496350364963503, | |
| "grad_norm": 1.0375230892576275, | |
| "learning_rate": 5e-06, | |
| "loss": 0.784, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.40145985401459855, | |
| "grad_norm": 0.6429193748930873, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7786, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.43795620437956206, | |
| "grad_norm": 0.6387684294515616, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7741, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.4744525547445255, | |
| "grad_norm": 0.7647783012503196, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7706, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.5109489051094891, | |
| "grad_norm": 0.6954646892322683, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7693, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.5474452554744526, | |
| "grad_norm": 0.7090301419858328, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7698, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.583941605839416, | |
| "grad_norm": 0.8080543613629317, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7595, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.6204379562043796, | |
| "grad_norm": 0.6419738319193917, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7614, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.656934306569343, | |
| "grad_norm": 0.5918340383797932, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7541, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.6934306569343066, | |
| "grad_norm": 1.152710341207945, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7573, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.7299270072992701, | |
| "grad_norm": 0.6391926393585599, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7582, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.7664233576642335, | |
| "grad_norm": 0.5988044301788384, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7582, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.8029197080291971, | |
| "grad_norm": 0.6020411724470022, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7519, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.8394160583941606, | |
| "grad_norm": 0.6220550687844844, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7551, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.8759124087591241, | |
| "grad_norm": 0.6082664111510806, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7519, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.9124087591240876, | |
| "grad_norm": 0.7929440801500307, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7471, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.948905109489051, | |
| "grad_norm": 0.8557482541404012, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7471, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.9854014598540146, | |
| "grad_norm": 0.5725008171409565, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7448, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.7464940547943115, | |
| "eval_runtime": 193.5595, | |
| "eval_samples_per_second": 38.143, | |
| "eval_steps_per_second": 0.599, | |
| "step": 274 | |
| }, | |
| { | |
| "epoch": 1.0218978102189782, | |
| "grad_norm": 0.8391272846123186, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7201, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.0583941605839415, | |
| "grad_norm": 0.8538310326832037, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6987, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.094890510948905, | |
| "grad_norm": 0.6827445863139939, | |
| "learning_rate": 5e-06, | |
| "loss": 0.695, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.1313868613138687, | |
| "grad_norm": 0.5882300886638387, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6922, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 1.167883211678832, | |
| "grad_norm": 0.9284544165624958, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7005, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.2043795620437956, | |
| "grad_norm": 0.7696038972626205, | |
| "learning_rate": 5e-06, | |
| "loss": 0.696, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.2408759124087592, | |
| "grad_norm": 0.626331783091493, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6961, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.2773722627737225, | |
| "grad_norm": 0.6263624432151553, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6955, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.313868613138686, | |
| "grad_norm": 0.8006137682650242, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6967, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.3503649635036497, | |
| "grad_norm": 0.5880134638823885, | |
| "learning_rate": 5e-06, | |
| "loss": 0.702, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.3868613138686132, | |
| "grad_norm": 0.647442922917404, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.4233576642335766, | |
| "grad_norm": 0.6033654206417786, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6935, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 1.4598540145985401, | |
| "grad_norm": 0.842826594179501, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6925, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.4963503649635037, | |
| "grad_norm": 0.6487088398169627, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6987, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.5328467153284673, | |
| "grad_norm": 0.644019689180568, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6975, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.5693430656934306, | |
| "grad_norm": 0.7554928160990546, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6955, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 1.6058394160583942, | |
| "grad_norm": 0.6360155091593863, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6947, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.6423357664233578, | |
| "grad_norm": 0.7649060131558615, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6979, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.6788321167883211, | |
| "grad_norm": 0.7060249710510591, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6887, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.7153284671532847, | |
| "grad_norm": 0.614705856783836, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6925, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.7518248175182483, | |
| "grad_norm": 0.6665801961837532, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6939, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.7883211678832116, | |
| "grad_norm": 0.6137773686568165, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6968, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.8248175182481752, | |
| "grad_norm": 0.597915675609789, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6903, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.8613138686131387, | |
| "grad_norm": 0.5853039235207544, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6919, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.897810218978102, | |
| "grad_norm": 0.6252196584141081, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6917, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.9343065693430657, | |
| "grad_norm": 0.6355616628686108, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6947, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.9708029197080292, | |
| "grad_norm": 0.7084920207276291, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6909, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.7327279448509216, | |
| "eval_runtime": 184.0166, | |
| "eval_samples_per_second": 40.121, | |
| "eval_steps_per_second": 0.63, | |
| "step": 548 | |
| }, | |
| { | |
| "epoch": 2.0072992700729926, | |
| "grad_norm": 0.8657992423171681, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6891, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 2.0437956204379564, | |
| "grad_norm": 0.7941631908501828, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6428, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 2.0802919708029197, | |
| "grad_norm": 0.9214323090929812, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6418, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 2.116788321167883, | |
| "grad_norm": 0.7759251168034266, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6411, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 2.153284671532847, | |
| "grad_norm": 0.7159794483510337, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6429, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 2.18978102189781, | |
| "grad_norm": 0.7275049985326262, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6418, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 2.2262773722627736, | |
| "grad_norm": 0.5798767855808342, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6455, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 2.2627737226277373, | |
| "grad_norm": 0.5604983214077892, | |
| "learning_rate": 5e-06, | |
| "loss": 0.64, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 2.2992700729927007, | |
| "grad_norm": 0.6059873724806449, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6424, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 2.335766423357664, | |
| "grad_norm": 0.8067784530621691, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6422, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 2.372262773722628, | |
| "grad_norm": 0.7136802882114688, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6437, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 2.408759124087591, | |
| "grad_norm": 0.6102999029962267, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6458, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 2.445255474452555, | |
| "grad_norm": 0.614293225765085, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6452, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 2.4817518248175183, | |
| "grad_norm": 0.600198323252072, | |
| "learning_rate": 5e-06, | |
| "loss": 0.644, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 2.5182481751824817, | |
| "grad_norm": 0.7644298597279312, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6491, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 2.554744525547445, | |
| "grad_norm": 0.778090825027965, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6431, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 2.591240875912409, | |
| "grad_norm": 0.6476324319228641, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6473, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 2.627737226277372, | |
| "grad_norm": 0.5998032151877339, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6465, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 2.664233576642336, | |
| "grad_norm": 0.5955651290954712, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6456, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 2.7007299270072993, | |
| "grad_norm": 0.6010226042821943, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6442, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 2.7372262773722627, | |
| "grad_norm": 0.821931399421341, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6459, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 2.7737226277372264, | |
| "grad_norm": 0.6668282736525819, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6503, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 2.81021897810219, | |
| "grad_norm": 0.7099179596842483, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6473, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 2.846715328467153, | |
| "grad_norm": 0.6671120507448849, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6426, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 2.883211678832117, | |
| "grad_norm": 0.6360799683340312, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6473, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 2.9197080291970803, | |
| "grad_norm": 0.7340811126545655, | |
| "learning_rate": 5e-06, | |
| "loss": 0.651, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 2.9562043795620436, | |
| "grad_norm": 0.618110632863654, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6448, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 2.9927007299270074, | |
| "grad_norm": 0.7521256358398457, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6423, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.7341417074203491, | |
| "eval_runtime": 183.6218, | |
| "eval_samples_per_second": 40.208, | |
| "eval_steps_per_second": 0.632, | |
| "step": 822 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 822, | |
| "total_flos": 1376671236096000.0, | |
| "train_loss": 0.7106172601962032, | |
| "train_runtime": 27349.7837, | |
| "train_samples_per_second": 15.387, | |
| "train_steps_per_second": 0.03 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 822, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1376671236096000.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |