| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 5.0, | |
| "eval_steps": 500, | |
| "global_step": 395, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.012658227848101266, | |
| "grad_norm": 6.695639272540547, | |
| "learning_rate": 5.000000000000001e-07, | |
| "loss": 1.1195, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.06329113924050633, | |
| "grad_norm": 13.683866082279918, | |
| "learning_rate": 2.5e-06, | |
| "loss": 1.0968, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.12658227848101267, | |
| "grad_norm": 2.7914110872997275, | |
| "learning_rate": 5e-06, | |
| "loss": 1.0868, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.189873417721519, | |
| "grad_norm": 2.2869962494889995, | |
| "learning_rate": 7.500000000000001e-06, | |
| "loss": 1.0771, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.25316455696202533, | |
| "grad_norm": 2.315595057148159, | |
| "learning_rate": 1e-05, | |
| "loss": 1.0546, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.31645569620253167, | |
| "grad_norm": 1.9266353743480364, | |
| "learning_rate": 1.25e-05, | |
| "loss": 1.084, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.379746835443038, | |
| "grad_norm": 2.150532379708014, | |
| "learning_rate": 1.5000000000000002e-05, | |
| "loss": 1.0654, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.4430379746835443, | |
| "grad_norm": 2.1018953026935905, | |
| "learning_rate": 1.7500000000000002e-05, | |
| "loss": 1.0817, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.5063291139240507, | |
| "grad_norm": 2.3849545878679272, | |
| "learning_rate": 2e-05, | |
| "loss": 1.0971, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.569620253164557, | |
| "grad_norm": 2.345688862339495, | |
| "learning_rate": 1.9990212265199738e-05, | |
| "loss": 1.086, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.6329113924050633, | |
| "grad_norm": 2.5866807131537906, | |
| "learning_rate": 1.996086822074945e-05, | |
| "loss": 1.0656, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.6962025316455697, | |
| "grad_norm": 2.4392627537159655, | |
| "learning_rate": 1.9912025308994146e-05, | |
| "loss": 1.1059, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.759493670886076, | |
| "grad_norm": 2.2706485443813276, | |
| "learning_rate": 1.9843779142227258e-05, | |
| "loss": 1.0973, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.8227848101265823, | |
| "grad_norm": 2.3020097653483496, | |
| "learning_rate": 1.975626331552507e-05, | |
| "loss": 1.1083, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.8860759493670886, | |
| "grad_norm": 2.0670888705685955, | |
| "learning_rate": 1.96496491452281e-05, | |
| "loss": 1.1039, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.9493670886075949, | |
| "grad_norm": 2.371742516361207, | |
| "learning_rate": 1.9524145333581315e-05, | |
| "loss": 1.0895, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.7223408222198486, | |
| "eval_runtime": 213.2984, | |
| "eval_samples_per_second": 47.375, | |
| "eval_steps_per_second": 0.741, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 1.0126582278481013, | |
| "grad_norm": 8.106924593883994, | |
| "learning_rate": 1.9379997560189677e-05, | |
| "loss": 1.0198, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.0759493670886076, | |
| "grad_norm": 3.486817193319529, | |
| "learning_rate": 1.9217488001088784e-05, | |
| "loss": 0.6812, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.139240506329114, | |
| "grad_norm": 2.921219837737029, | |
| "learning_rate": 1.903693477637204e-05, | |
| "loss": 0.6683, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.2025316455696202, | |
| "grad_norm": 3.1078224489011683, | |
| "learning_rate": 1.883869132745561e-05, | |
| "loss": 0.653, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.2658227848101267, | |
| "grad_norm": 2.8280645620621874, | |
| "learning_rate": 1.862314572520028e-05, | |
| "loss": 0.6501, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.3291139240506329, | |
| "grad_norm": 2.920193489057834, | |
| "learning_rate": 1.8390719910244487e-05, | |
| "loss": 0.647, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.3924050632911391, | |
| "grad_norm": 2.647561794905359, | |
| "learning_rate": 1.8141868867035745e-05, | |
| "loss": 0.6531, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.4556962025316456, | |
| "grad_norm": 2.7660605803019447, | |
| "learning_rate": 1.7877079733177185e-05, | |
| "loss": 0.6232, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 1.518987341772152, | |
| "grad_norm": 2.497219995540892, | |
| "learning_rate": 1.759687084583285e-05, | |
| "loss": 0.6401, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.5822784810126582, | |
| "grad_norm": 2.447623483254921, | |
| "learning_rate": 1.7301790727058344e-05, | |
| "loss": 0.6592, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.6455696202531644, | |
| "grad_norm": 2.4783693483832563, | |
| "learning_rate": 1.6992417010043144e-05, | |
| "loss": 0.6441, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.7088607594936709, | |
| "grad_norm": 2.348228472338975, | |
| "learning_rate": 1.666935530836651e-05, | |
| "loss": 0.6439, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 1.7721518987341773, | |
| "grad_norm": 2.316917620320839, | |
| "learning_rate": 1.6333238030480473e-05, | |
| "loss": 0.6572, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.8354430379746836, | |
| "grad_norm": 2.614503810192861, | |
| "learning_rate": 1.5984723141740578e-05, | |
| "loss": 0.6764, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 1.8987341772151898, | |
| "grad_norm": 2.452736422029602, | |
| "learning_rate": 1.562449287640781e-05, | |
| "loss": 0.6555, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.9620253164556962, | |
| "grad_norm": 2.737510300725156, | |
| "learning_rate": 1.5253252402142989e-05, | |
| "loss": 0.6454, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.33173683285713196, | |
| "eval_runtime": 213.2106, | |
| "eval_samples_per_second": 47.394, | |
| "eval_steps_per_second": 0.741, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 2.0253164556962027, | |
| "grad_norm": 3.9057198522311167, | |
| "learning_rate": 1.4871728439607967e-05, | |
| "loss": 0.5134, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.088607594936709, | |
| "grad_norm": 3.4868328352161817, | |
| "learning_rate": 1.4480667839875786e-05, | |
| "loss": 0.3096, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 2.151898734177215, | |
| "grad_norm": 2.9717493172755014, | |
| "learning_rate": 1.408083612243465e-05, | |
| "loss": 0.2909, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 2.2151898734177213, | |
| "grad_norm": 2.642444961402327, | |
| "learning_rate": 1.367301597664757e-05, | |
| "loss": 0.2837, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 2.278481012658228, | |
| "grad_norm": 2.3997899647768346, | |
| "learning_rate": 1.3258005729601178e-05, | |
| "loss": 0.2818, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.3417721518987342, | |
| "grad_norm": 2.7423823494323774, | |
| "learning_rate": 1.2836617783342968e-05, | |
| "loss": 0.2907, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 2.4050632911392404, | |
| "grad_norm": 2.3807259553082276, | |
| "learning_rate": 1.2409677024566145e-05, | |
| "loss": 0.2872, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 2.4683544303797467, | |
| "grad_norm": 2.336572941961749, | |
| "learning_rate": 1.1978019209855174e-05, | |
| "loss": 0.2866, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 2.5316455696202533, | |
| "grad_norm": 2.2601309613900757, | |
| "learning_rate": 1.1542489329653024e-05, | |
| "loss": 0.2945, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.5949367088607596, | |
| "grad_norm": 2.3050926972713968, | |
| "learning_rate": 1.11039399541527e-05, | |
| "loss": 0.2937, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 2.6582278481012658, | |
| "grad_norm": 2.221130883158219, | |
| "learning_rate": 1.066322956435104e-05, | |
| "loss": 0.2961, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 2.721518987341772, | |
| "grad_norm": 2.270387701106379, | |
| "learning_rate": 1.022122087153187e-05, | |
| "loss": 0.2965, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 2.7848101265822782, | |
| "grad_norm": 2.279056997659604, | |
| "learning_rate": 9.778779128468133e-06, | |
| "loss": 0.2964, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 2.848101265822785, | |
| "grad_norm": 2.1417041666216696, | |
| "learning_rate": 9.336770435648963e-06, | |
| "loss": 0.2931, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 2.911392405063291, | |
| "grad_norm": 2.439196726609028, | |
| "learning_rate": 8.896060045847305e-06, | |
| "loss": 0.2977, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 2.9746835443037973, | |
| "grad_norm": 2.2397521488796417, | |
| "learning_rate": 8.457510670346976e-06, | |
| "loss": 0.2926, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.12934371829032898, | |
| "eval_runtime": 213.237, | |
| "eval_samples_per_second": 47.389, | |
| "eval_steps_per_second": 0.741, | |
| "step": 237 | |
| }, | |
| { | |
| "epoch": 3.037974683544304, | |
| "grad_norm": 2.2704813832171142, | |
| "learning_rate": 8.021980790144828e-06, | |
| "loss": 0.199, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 3.1012658227848102, | |
| "grad_norm": 2.132226843273487, | |
| "learning_rate": 7.590322975433857e-06, | |
| "loss": 0.1128, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 3.1645569620253164, | |
| "grad_norm": 2.0782142465832236, | |
| "learning_rate": 7.163382216657033e-06, | |
| "loss": 0.1129, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 3.2278481012658227, | |
| "grad_norm": 1.761669140987703, | |
| "learning_rate": 6.741994270398826e-06, | |
| "loss": 0.1099, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 3.291139240506329, | |
| "grad_norm": 1.717634933034142, | |
| "learning_rate": 6.326984023352435e-06, | |
| "loss": 0.1086, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 3.3544303797468356, | |
| "grad_norm": 1.6608157157785954, | |
| "learning_rate": 5.919163877565351e-06, | |
| "loss": 0.1068, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 3.4177215189873418, | |
| "grad_norm": 1.6064024695135044, | |
| "learning_rate": 5.519332160124215e-06, | |
| "loss": 0.1101, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 3.481012658227848, | |
| "grad_norm": 1.6349645931027217, | |
| "learning_rate": 5.128271560392037e-06, | |
| "loss": 0.1074, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 3.5443037974683547, | |
| "grad_norm": 1.7679069294086043, | |
| "learning_rate": 4.746747597857014e-06, | |
| "loss": 0.1118, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 3.607594936708861, | |
| "grad_norm": 1.5530405205049536, | |
| "learning_rate": 4.375507123592194e-06, | |
| "loss": 0.1077, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 3.670886075949367, | |
| "grad_norm": 1.5871889705183277, | |
| "learning_rate": 4.015276858259427e-06, | |
| "loss": 0.1082, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 3.7341772151898733, | |
| "grad_norm": 1.5985959451025067, | |
| "learning_rate": 3.6667619695195287e-06, | |
| "loss": 0.1063, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 3.7974683544303796, | |
| "grad_norm": 1.567040797684349, | |
| "learning_rate": 3.330644691633492e-06, | |
| "loss": 0.1067, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 3.8607594936708862, | |
| "grad_norm": 1.6098560275600409, | |
| "learning_rate": 3.00758298995686e-06, | |
| "loss": 0.1045, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 3.9240506329113924, | |
| "grad_norm": 1.495424841878041, | |
| "learning_rate": 2.698209272941659e-06, | |
| "loss": 0.1076, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 3.9873417721518987, | |
| "grad_norm": 1.4612235257248143, | |
| "learning_rate": 2.403129154167153e-06, | |
| "loss": 0.1048, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 0.054249346256256104, | |
| "eval_runtime": 213.4038, | |
| "eval_samples_per_second": 47.352, | |
| "eval_steps_per_second": 0.74, | |
| "step": 316 | |
| }, | |
| { | |
| "epoch": 4.050632911392405, | |
| "grad_norm": 0.8970304833218358, | |
| "learning_rate": 2.1229202668228197e-06, | |
| "loss": 0.0643, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 4.113924050632911, | |
| "grad_norm": 1.3031778574429476, | |
| "learning_rate": 1.8581311329642592e-06, | |
| "loss": 0.0496, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 4.177215189873418, | |
| "grad_norm": 1.0494986965992847, | |
| "learning_rate": 1.609280089755515e-06, | |
| "loss": 0.0498, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 4.2405063291139244, | |
| "grad_norm": 0.8871889072165642, | |
| "learning_rate": 1.3768542747997215e-06, | |
| "loss": 0.048, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 4.30379746835443, | |
| "grad_norm": 0.8947823783464494, | |
| "learning_rate": 1.161308672544389e-06, | |
| "loss": 0.0491, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 4.367088607594937, | |
| "grad_norm": 0.8681171283598373, | |
| "learning_rate": 9.630652236279626e-07, | |
| "loss": 0.0467, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 4.430379746835443, | |
| "grad_norm": 0.8483515397273593, | |
| "learning_rate": 7.825119989112173e-07, | |
| "loss": 0.0474, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 4.493670886075949, | |
| "grad_norm": 0.883073997111795, | |
| "learning_rate": 6.200024398103255e-07, | |
| "loss": 0.0461, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 4.556962025316456, | |
| "grad_norm": 0.8423665781515933, | |
| "learning_rate": 4.7585466641868696e-07, | |
| "loss": 0.0474, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 4.620253164556962, | |
| "grad_norm": 1.0038851569615406, | |
| "learning_rate": 3.5035085477190143e-07, | |
| "loss": 0.0476, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 4.6835443037974684, | |
| "grad_norm": 0.9089402076181121, | |
| "learning_rate": 2.4373668447493225e-07, | |
| "loss": 0.0467, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 4.746835443037975, | |
| "grad_norm": 0.8530919153878059, | |
| "learning_rate": 1.562208577727442e-07, | |
| "loss": 0.0469, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 4.810126582278481, | |
| "grad_norm": 0.8298393668849166, | |
| "learning_rate": 8.797469100585432e-08, | |
| "loss": 0.0481, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 4.8734177215189876, | |
| "grad_norm": 0.986632852960438, | |
| "learning_rate": 3.913177925055189e-08, | |
| "loss": 0.0471, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 4.936708860759493, | |
| "grad_norm": 0.9075462948662969, | |
| "learning_rate": 9.78773480026396e-09, | |
| "loss": 0.0474, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 0.7480159011876264, | |
| "learning_rate": 0.0, | |
| "loss": 0.0465, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_loss": 0.039588429033756256, | |
| "eval_runtime": 213.3054, | |
| "eval_samples_per_second": 47.373, | |
| "eval_steps_per_second": 0.741, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "step": 395, | |
| "total_flos": 165305238159360.0, | |
| "train_loss": 0.43836132314386245, | |
| "train_runtime": 6288.8643, | |
| "train_samples_per_second": 8.034, | |
| "train_steps_per_second": 0.063 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 395, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 165305238159360.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |