| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.999068850303993, | |
| "eval_steps": 500, | |
| "global_step": 570, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 3.5087719298245615e-06, | |
| "loss": 1.2582, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 1.7543859649122806e-05, | |
| "loss": 1.2488, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 3.508771929824561e-05, | |
| "loss": 1.2359, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 5.2631578947368424e-05, | |
| "loss": 1.2016, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 7.017543859649122e-05, | |
| "loss": 1.155, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 8.771929824561403e-05, | |
| "loss": 1.1231, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.00010526315789473685, | |
| "loss": 1.1019, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.00012280701754385965, | |
| "loss": 1.0873, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.00014035087719298245, | |
| "loss": 1.0644, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.00015789473684210527, | |
| "loss": 1.0655, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.00017543859649122806, | |
| "loss": 1.0574, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 0.00019298245614035088, | |
| "loss": 1.0447, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.00019998312416333227, | |
| "loss": 1.0356, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.0001998800146766861, | |
| "loss": 1.0223, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.00019968326771610797, | |
| "loss": 1.031, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.00019939306773179497, | |
| "loss": 1.0194, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.00019900968678611666, | |
| "loss": 1.0126, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.00019853348429855672, | |
| "loss": 1.0082, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.0001979649067087574, | |
| "loss": 1.0099, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.00019730448705798239, | |
| "loss": 1.0018, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.00019655284448939094, | |
| "loss": 1.0068, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.00019571068366759143, | |
| "loss": 1.0011, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.00019477879411801844, | |
| "loss": 1.0074, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.00019375804948675306, | |
| "loss": 0.9902, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.00019264940672148018, | |
| "loss": 0.996, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.00019145390517435012, | |
| "loss": 1.0023, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.00019017266562758659, | |
| "loss": 1.0005, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.00018880688924275378, | |
| "loss": 0.9882, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.00018735785643466784, | |
| "loss": 0.9905, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.00018582692567100867, | |
| "loss": 0.9949, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.00018421553219875658, | |
| "loss": 0.9976, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.00018252518669864936, | |
| "loss": 0.9847, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.0001807574738689193, | |
| "loss": 0.9881, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 0.00017891405093963938, | |
| "loss": 0.9812, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.00017699664611907072, | |
| "loss": 1.0059, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.0001750070569734681, | |
| "loss": 0.9895, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.0001729471487418621, | |
| "loss": 0.9902, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.00017081885258739846, | |
| "loss": 0.9846, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 0.0001686241637868734, | |
| "loss": 0.9848, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.00016636513986016213, | |
| "loss": 0.9778, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 0.00016404389864129533, | |
| "loss": 0.9812, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 0.00016166261629298995, | |
| "loss": 1.0017, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 0.00015922352526649803, | |
| "loss": 0.9853, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 0.00015672891220868432, | |
| "loss": 0.9849, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 0.00015418111581829574, | |
| "loss": 0.9827, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 0.00015158252465343242, | |
| "loss": 0.9908, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 0.00014893557489227517, | |
| "loss": 0.9773, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 0.00014624274804916958, | |
| "loss": 0.988, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 0.00014350656864820733, | |
| "loss": 0.9897, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 0.00014072960185648577, | |
| "loss": 0.9757, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 0.00013791445107926478, | |
| "loss": 0.9731, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 0.00013506375551927547, | |
| "loss": 0.9815, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 0.00013218018770246858, | |
| "loss": 0.9755, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 0.0001292664509725226, | |
| "loss": 0.9746, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 0.00012632527695645993, | |
| "loss": 0.9744, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 0.00012335942300374788, | |
| "loss": 0.9807, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 0.00012037166960128443, | |
| "loss": 0.9888, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 0.00011736481776669306, | |
| "loss": 0.9848, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 0.00011434168642236964, | |
| "loss": 0.9797, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 0.00011130510975274409, | |
| "loss": 0.9799, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 0.00010825793454723325, | |
| "loss": 0.9939, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 0.00010520301753137724, | |
| "loss": 0.9693, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 0.00010214322268866032, | |
| "loss": 0.9846, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 9.908141857552737e-05, | |
| "loss": 0.9754, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 9.602047563211359e-05, | |
| "loss": 0.9912, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 9.296326349120785e-05, | |
| "loss": 0.9778, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 8.991264828797319e-05, | |
| "loss": 0.9828, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 8.687148997294621e-05, | |
| "loss": 0.9717, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 8.384263963083453e-05, | |
| "loss": 0.9907, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 8.082893680762619e-05, | |
| "loss": 0.9787, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 7.783320684851614e-05, | |
| "loss": 0.9793, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 7.485825824914659e-05, | |
| "loss": 0.9698, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 7.190688002264308e-05, | |
| "loss": 0.9792, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 6.898183908491617e-05, | |
| "loss": 0.9821, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 6.608587766067852e-05, | |
| "loss": 0.9796, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 6.322171071261071e-05, | |
| "loss": 0.9832, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 6.039202339608432e-05, | |
| "loss": 0.9853, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 5.7599468541830356e-05, | |
| "loss": 0.9659, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 5.484666416891109e-05, | |
| "loss": 0.971, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 5.2136191030328455e-05, | |
| "loss": 0.9705, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 4.9470590193569044e-05, | |
| "loss": 0.9829, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 4.685236065835443e-05, | |
| "loss": 0.9638, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 4.4283957013829846e-05, | |
| "loss": 0.9592, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 4.176778713738787e-05, | |
| "loss": 0.9758, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 3.9306209937284346e-05, | |
| "loss": 0.9843, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 3.69015331411628e-05, | |
| "loss": 0.979, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 3.455601113256073e-05, | |
| "loss": 0.9816, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 3.227184283742591e-05, | |
| "loss": 0.9885, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 3.0051169662624225e-05, | |
| "loss": 0.9757, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 2.789607348837153e-05, | |
| "loss": 0.9678, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 2.5808574716471856e-05, | |
| "loss": 0.9757, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 2.379063037619146e-05, | |
| "loss": 0.979, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 2.184413228954468e-05, | |
| "loss": 0.9789, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 1.9970905297711606e-05, | |
| "loss": 0.9728, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 1.8172705550250092e-05, | |
| "loss": 0.9863, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 1.6451218858706374e-05, | |
| "loss": 0.9887, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 1.4808059116167305e-05, | |
| "loss": 0.9824, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 1.3244766784236307e-05, | |
| "loss": 0.9743, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 1.176280744885121e-05, | |
| "loss": 0.9859, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 1.0363570446297999e-05, | |
| "loss": 0.9732, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 9.048367560708604e-06, | |
| "loss": 0.9768, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 7.818431794263836e-06, | |
| "loss": 0.9708, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 6.674916211254289e-06, | |
| "loss": 0.9679, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 5.618892857083069e-06, | |
| "loss": 0.9739, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "learning_rate": 4.65135175322361e-06, | |
| "loss": 0.9795, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 3.7731999690749585e-06, | |
| "loss": 0.9785, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 2.9852607715846193e-06, | |
| "loss": 0.9904, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 2.288272853436013e-06, | |
| "loss": 0.9778, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 1.6828896405244988e-06, | |
| "loss": 0.972, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 1.1696786793707781e-06, | |
| "loss": 0.9647, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 7.491211050462798e-07, | |
| "loss": 0.9865, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 4.216111901092501e-07, | |
| "loss": 0.9742, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 1.8745597497433765e-07, | |
| "loss": 0.9649, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 4.687498006236135e-08, | |
| "loss": 0.9745, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 0.0, | |
| "loss": 0.9771, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.981906533241272, | |
| "eval_runtime": 2107.2433, | |
| "eval_samples_per_second": 7.672, | |
| "eval_steps_per_second": 0.24, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 570, | |
| "total_flos": 1.1919078088043921e+19, | |
| "train_loss": 0.9983171429550438, | |
| "train_runtime": 52319.2765, | |
| "train_samples_per_second": 2.792, | |
| "train_steps_per_second": 0.011 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 570, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "total_flos": 1.1919078088043921e+19, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |