{ "best_metric": 1.82498932, "best_model_checkpoint": "/scratch/AzureNfsServer_INPUT1/vc_data/users/tabhishek/research/Multimodal-AS/ms-swift-chatas/output/MiniCPM-V-2_6/v21-20250405-130436/checkpoint-92000", "epoch": 3.9415620581808835, "eval_steps": 500, "global_step": 92000, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 4.284306584979221e-05, "grad_norm": 7.625639915466309, "learning_rate": 9.999999998188407e-05, "loss": 4.53970193862915, "memory(GiB)": 21.94, "step": 1, "token_acc": 0.32653061224489793, "train_speed(iter/s)": 0.208536 }, { "epoch": 0.00021421532924896104, "grad_norm": 5.20388650894165, "learning_rate": 9.999999954710156e-05, "loss": 3.9903435707092285, "memory(GiB)": 21.94, "step": 5, "token_acc": 0.3639705882352941, "train_speed(iter/s)": 0.515927 }, { "epoch": 0.0004284306584979221, "grad_norm": 3.545041799545288, "learning_rate": 9.99999981884062e-05, "loss": 3.3040283203125, "memory(GiB)": 25.61, "step": 10, "token_acc": 0.37003058103975534, "train_speed(iter/s)": 0.586145 }, { "epoch": 0.0006426459877468832, "grad_norm": 3.9932162761688232, "learning_rate": 9.999999592391398e-05, "loss": 2.845155715942383, "memory(GiB)": 36.71, "step": 15, "token_acc": 0.41946308724832215, "train_speed(iter/s)": 0.539159 }, { "epoch": 0.0008568613169958442, "grad_norm": 4.196536540985107, "learning_rate": 9.999999275362494e-05, "loss": 2.8200477600097655, "memory(GiB)": 36.71, "step": 20, "token_acc": 0.4645161290322581, "train_speed(iter/s)": 0.562687 }, { "epoch": 0.0010710766462448053, "grad_norm": 4.950491428375244, "learning_rate": 9.999998867753912e-05, "loss": 2.75195198059082, "memory(GiB)": 42.56, "step": 25, "token_acc": 0.42450142450142453, "train_speed(iter/s)": 0.565555 }, { "epoch": 0.0012852919754937663, "grad_norm": 4.6271185874938965, "learning_rate": 9.999998369565659e-05, "loss": 2.716029167175293, "memory(GiB)": 42.56, "step": 30, "token_acc": 0.4627831715210356, "train_speed(iter/s)": 0.582636 }, { "epoch": 0.0014995073047427273, "grad_norm": 4.945891857147217, "learning_rate": 9.999997780797748e-05, "loss": 2.7633424758911134, "memory(GiB)": 42.56, "step": 35, "token_acc": 0.4453125, "train_speed(iter/s)": 0.5888 }, { "epoch": 0.0017137226339916883, "grad_norm": 4.44663667678833, "learning_rate": 9.999997101450185e-05, "loss": 2.8328454971313475, "memory(GiB)": 42.56, "step": 40, "token_acc": 0.43478260869565216, "train_speed(iter/s)": 0.598157 }, { "epoch": 0.0019279379632406496, "grad_norm": 4.237065315246582, "learning_rate": 9.999996331522983e-05, "loss": 2.7573263168334963, "memory(GiB)": 42.58, "step": 45, "token_acc": 0.4262295081967213, "train_speed(iter/s)": 0.620172 }, { "epoch": 0.0021421532924896106, "grad_norm": 4.663661479949951, "learning_rate": 9.99999547101616e-05, "loss": 2.653850555419922, "memory(GiB)": 42.58, "step": 50, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.616742 }, { "epoch": 0.002356368621738572, "grad_norm": 5.980763912200928, "learning_rate": 9.999994519929725e-05, "loss": 3.0387969970703126, "memory(GiB)": 42.58, "step": 55, "token_acc": 0.4241573033707865, "train_speed(iter/s)": 0.629764 }, { "epoch": 0.0025705839509875326, "grad_norm": 3.7131495475769043, "learning_rate": 9.9999934782637e-05, "loss": 2.8447484970092773, "memory(GiB)": 42.58, "step": 60, "token_acc": 0.41946308724832215, "train_speed(iter/s)": 0.633625 }, { "epoch": 0.002784799280236494, "grad_norm": 3.5584945678710938, "learning_rate": 9.999992346018105e-05, "loss": 3.0613651275634766, "memory(GiB)": 42.58, "step": 65, "token_acc": 0.383399209486166, "train_speed(iter/s)": 0.645472 }, { "epoch": 0.0029990146094854547, "grad_norm": 3.7531204223632812, "learning_rate": 9.999991123192957e-05, "loss": 2.817147636413574, "memory(GiB)": 42.58, "step": 70, "token_acc": 0.4173441734417344, "train_speed(iter/s)": 0.654331 }, { "epoch": 0.003213229938734416, "grad_norm": 4.05025577545166, "learning_rate": 9.99998980978828e-05, "loss": 2.5642572402954102, "memory(GiB)": 42.58, "step": 75, "token_acc": 0.4602649006622517, "train_speed(iter/s)": 0.661372 }, { "epoch": 0.0034274452679833767, "grad_norm": 5.255312919616699, "learning_rate": 9.999988405804095e-05, "loss": 2.7653505325317385, "memory(GiB)": 42.58, "step": 80, "token_acc": 0.46545454545454545, "train_speed(iter/s)": 0.657687 }, { "epoch": 0.003641660597232338, "grad_norm": 4.263673305511475, "learning_rate": 9.999986911240431e-05, "loss": 2.60424690246582, "memory(GiB)": 42.58, "step": 85, "token_acc": 0.4766666666666667, "train_speed(iter/s)": 0.655605 }, { "epoch": 0.003855875926481299, "grad_norm": 4.209721088409424, "learning_rate": 9.999985326097314e-05, "loss": 2.5450958251953124, "memory(GiB)": 42.58, "step": 90, "token_acc": 0.49032258064516127, "train_speed(iter/s)": 0.660847 }, { "epoch": 0.00407009125573026, "grad_norm": 2.9156081676483154, "learning_rate": 9.999983650374773e-05, "loss": 2.552067756652832, "memory(GiB)": 42.58, "step": 95, "token_acc": 0.5055762081784386, "train_speed(iter/s)": 0.656301 }, { "epoch": 0.004284306584979221, "grad_norm": 4.166343688964844, "learning_rate": 9.999981884072838e-05, "loss": 2.516210746765137, "memory(GiB)": 42.58, "step": 100, "token_acc": 0.4702194357366771, "train_speed(iter/s)": 0.654271 }, { "epoch": 0.004498521914228182, "grad_norm": 4.864845275878906, "learning_rate": 9.999980027191539e-05, "loss": 2.740213394165039, "memory(GiB)": 42.58, "step": 105, "token_acc": 0.4594594594594595, "train_speed(iter/s)": 0.651683 }, { "epoch": 0.004712737243477144, "grad_norm": 3.30230450630188, "learning_rate": 9.999978079730912e-05, "loss": 2.784230422973633, "memory(GiB)": 42.58, "step": 110, "token_acc": 0.4370860927152318, "train_speed(iter/s)": 0.653332 }, { "epoch": 0.0049269525727261045, "grad_norm": 5.137172698974609, "learning_rate": 9.999976041690993e-05, "loss": 2.6812156677246093, "memory(GiB)": 42.58, "step": 115, "token_acc": 0.4577922077922078, "train_speed(iter/s)": 0.648901 }, { "epoch": 0.005141167901975065, "grad_norm": 2.3764455318450928, "learning_rate": 9.999973913071817e-05, "loss": 2.2821023941040037, "memory(GiB)": 42.58, "step": 120, "token_acc": 0.4889502762430939, "train_speed(iter/s)": 0.651609 }, { "epoch": 0.005355383231224026, "grad_norm": 4.399145126342773, "learning_rate": 9.999971693873423e-05, "loss": 2.346055793762207, "memory(GiB)": 48.47, "step": 125, "token_acc": 0.549407114624506, "train_speed(iter/s)": 0.650906 }, { "epoch": 0.005569598560472988, "grad_norm": 4.965338706970215, "learning_rate": 9.999969384095851e-05, "loss": 2.51334228515625, "memory(GiB)": 48.47, "step": 130, "token_acc": 0.48534201954397393, "train_speed(iter/s)": 0.654818 }, { "epoch": 0.0057838138897219485, "grad_norm": 4.984715461730957, "learning_rate": 9.999966983739143e-05, "loss": 2.7321386337280273, "memory(GiB)": 54.62, "step": 135, "token_acc": 0.45980707395498394, "train_speed(iter/s)": 0.65167 }, { "epoch": 0.005998029218970909, "grad_norm": 2.647524356842041, "learning_rate": 9.999964492803344e-05, "loss": 2.463837814331055, "memory(GiB)": 54.62, "step": 140, "token_acc": 0.4511784511784512, "train_speed(iter/s)": 0.652934 }, { "epoch": 0.006212244548219871, "grad_norm": 3.5858829021453857, "learning_rate": 9.999961911288497e-05, "loss": 2.8040050506591796, "memory(GiB)": 54.62, "step": 145, "token_acc": 0.3652173913043478, "train_speed(iter/s)": 0.647263 }, { "epoch": 0.006426459877468832, "grad_norm": 3.6278693675994873, "learning_rate": 9.99995923919465e-05, "loss": 2.5209497451782226, "memory(GiB)": 54.62, "step": 150, "token_acc": 0.4744525547445255, "train_speed(iter/s)": 0.641669 }, { "epoch": 0.006640675206717793, "grad_norm": 7.316452503204346, "learning_rate": 9.99995647652185e-05, "loss": 2.7946905136108398, "memory(GiB)": 54.62, "step": 155, "token_acc": 0.45151515151515154, "train_speed(iter/s)": 0.64344 }, { "epoch": 0.006854890535966753, "grad_norm": 4.288392066955566, "learning_rate": 9.99995362327015e-05, "loss": 2.671181297302246, "memory(GiB)": 54.62, "step": 160, "token_acc": 0.46689895470383275, "train_speed(iter/s)": 0.646759 }, { "epoch": 0.007069105865215715, "grad_norm": 3.2627146244049072, "learning_rate": 9.999950679439598e-05, "loss": 2.7744508743286134, "memory(GiB)": 54.62, "step": 165, "token_acc": 0.46564885496183206, "train_speed(iter/s)": 0.650156 }, { "epoch": 0.007283321194464676, "grad_norm": 3.9834187030792236, "learning_rate": 9.99994764503025e-05, "loss": 2.7649890899658205, "memory(GiB)": 54.62, "step": 170, "token_acc": 0.43853820598006643, "train_speed(iter/s)": 0.654038 }, { "epoch": 0.007497536523713637, "grad_norm": 3.8143532276153564, "learning_rate": 9.99994452004216e-05, "loss": 2.45929012298584, "memory(GiB)": 54.62, "step": 175, "token_acc": 0.5161290322580645, "train_speed(iter/s)": 0.656756 }, { "epoch": 0.007711751852962598, "grad_norm": 4.415961265563965, "learning_rate": 9.999941304475385e-05, "loss": 2.5884830474853517, "memory(GiB)": 54.62, "step": 180, "token_acc": 0.42953020134228187, "train_speed(iter/s)": 0.656934 }, { "epoch": 0.007925967182211559, "grad_norm": 2.5476198196411133, "learning_rate": 9.999937998329982e-05, "loss": 2.4622991561889647, "memory(GiB)": 54.62, "step": 185, "token_acc": 0.48286604361370716, "train_speed(iter/s)": 0.655316 }, { "epoch": 0.00814018251146052, "grad_norm": 2.9496631622314453, "learning_rate": 9.999934601606014e-05, "loss": 2.293362045288086, "memory(GiB)": 54.62, "step": 190, "token_acc": 0.4984025559105431, "train_speed(iter/s)": 0.653966 }, { "epoch": 0.00835439784070948, "grad_norm": 3.5460686683654785, "learning_rate": 9.999931114303538e-05, "loss": 2.6138561248779295, "memory(GiB)": 54.62, "step": 195, "token_acc": 0.4734848484848485, "train_speed(iter/s)": 0.656576 }, { "epoch": 0.008568613169958442, "grad_norm": 2.7859184741973877, "learning_rate": 9.99992753642262e-05, "loss": 2.6532058715820312, "memory(GiB)": 54.62, "step": 200, "token_acc": 0.4517241379310345, "train_speed(iter/s)": 0.656408 }, { "epoch": 0.008782828499207404, "grad_norm": 3.4940712451934814, "learning_rate": 9.999923867963326e-05, "loss": 2.578100395202637, "memory(GiB)": 54.62, "step": 205, "token_acc": 0.4492307692307692, "train_speed(iter/s)": 0.659795 }, { "epoch": 0.008997043828456364, "grad_norm": 5.422061443328857, "learning_rate": 9.999920108925719e-05, "loss": 2.7045495986938475, "memory(GiB)": 54.62, "step": 210, "token_acc": 0.4461538461538462, "train_speed(iter/s)": 0.658649 }, { "epoch": 0.009211259157705326, "grad_norm": 5.320497035980225, "learning_rate": 9.99991625930987e-05, "loss": 2.6211620330810548, "memory(GiB)": 54.62, "step": 215, "token_acc": 0.5210084033613446, "train_speed(iter/s)": 0.654396 }, { "epoch": 0.009425474486954287, "grad_norm": 2.4012420177459717, "learning_rate": 9.999912319115848e-05, "loss": 2.7152809143066405, "memory(GiB)": 54.62, "step": 220, "token_acc": 0.4554794520547945, "train_speed(iter/s)": 0.657236 }, { "epoch": 0.009639689816203247, "grad_norm": 4.434166431427002, "learning_rate": 9.999908288343722e-05, "loss": 2.8006513595581053, "memory(GiB)": 54.62, "step": 225, "token_acc": 0.46645367412140576, "train_speed(iter/s)": 0.660922 }, { "epoch": 0.009853905145452209, "grad_norm": 3.1756229400634766, "learning_rate": 9.999904166993568e-05, "loss": 2.3892099380493166, "memory(GiB)": 54.62, "step": 230, "token_acc": 0.49829351535836175, "train_speed(iter/s)": 0.663631 }, { "epoch": 0.010068120474701169, "grad_norm": 3.3113858699798584, "learning_rate": 9.999899955065461e-05, "loss": 2.8283599853515624, "memory(GiB)": 54.62, "step": 235, "token_acc": 0.42474916387959866, "train_speed(iter/s)": 0.666381 }, { "epoch": 0.01028233580395013, "grad_norm": 2.924527883529663, "learning_rate": 9.999895652559475e-05, "loss": 2.326822280883789, "memory(GiB)": 54.62, "step": 240, "token_acc": 0.525691699604743, "train_speed(iter/s)": 0.668084 }, { "epoch": 0.010496551133199092, "grad_norm": 3.118058681488037, "learning_rate": 9.999891259475688e-05, "loss": 2.5372035980224608, "memory(GiB)": 54.62, "step": 245, "token_acc": 0.4219269102990033, "train_speed(iter/s)": 0.669834 }, { "epoch": 0.010710766462448052, "grad_norm": 4.129244327545166, "learning_rate": 9.999886775814182e-05, "loss": 2.2587318420410156, "memory(GiB)": 54.62, "step": 250, "token_acc": 0.47703180212014135, "train_speed(iter/s)": 0.66787 }, { "epoch": 0.010924981791697014, "grad_norm": Infinity, "learning_rate": 9.999883123669073e-05, "loss": 2.6220046997070314, "memory(GiB)": 54.62, "step": 255, "token_acc": 0.4859154929577465, "train_speed(iter/s)": 0.667623 }, { "epoch": 0.011139197120945975, "grad_norm": 3.6966710090637207, "learning_rate": 9.999878476967874e-05, "loss": 2.7363780975341796, "memory(GiB)": 54.62, "step": 260, "token_acc": 0.5, "train_speed(iter/s)": 0.670321 }, { "epoch": 0.011353412450194935, "grad_norm": 5.121838092803955, "learning_rate": 9.99987373968919e-05, "loss": 2.3083774566650392, "memory(GiB)": 54.62, "step": 265, "token_acc": 0.5072463768115942, "train_speed(iter/s)": 0.668567 }, { "epoch": 0.011567627779443897, "grad_norm": 5.225976943969727, "learning_rate": 9.999868911833099e-05, "loss": 2.6572502136230467, "memory(GiB)": 54.62, "step": 270, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.669083 }, { "epoch": 0.011781843108692859, "grad_norm": 4.280091762542725, "learning_rate": 9.999863993399693e-05, "loss": 2.5873235702514648, "memory(GiB)": 54.62, "step": 275, "token_acc": 0.47232472324723246, "train_speed(iter/s)": 0.669342 }, { "epoch": 0.011996058437941819, "grad_norm": 3.6461284160614014, "learning_rate": 9.999858984389064e-05, "loss": 2.5865501403808593, "memory(GiB)": 54.62, "step": 280, "token_acc": 0.4577259475218659, "train_speed(iter/s)": 0.667201 }, { "epoch": 0.01221027376719078, "grad_norm": 3.871371269226074, "learning_rate": 9.999853884801296e-05, "loss": 2.322113037109375, "memory(GiB)": 54.62, "step": 285, "token_acc": 0.4983277591973244, "train_speed(iter/s)": 0.666331 }, { "epoch": 0.012424489096439742, "grad_norm": 2.4409549236297607, "learning_rate": 9.999848694636485e-05, "loss": 2.3678287506103515, "memory(GiB)": 54.62, "step": 290, "token_acc": 0.49609375, "train_speed(iter/s)": 0.667182 }, { "epoch": 0.012638704425688702, "grad_norm": 2.7017650604248047, "learning_rate": 9.999843413894724e-05, "loss": 2.7274572372436525, "memory(GiB)": 54.62, "step": 295, "token_acc": 0.48184818481848185, "train_speed(iter/s)": 0.667983 }, { "epoch": 0.012852919754937664, "grad_norm": 2.8204598426818848, "learning_rate": 9.999838042576112e-05, "loss": 2.6898830413818358, "memory(GiB)": 54.62, "step": 300, "token_acc": 0.47058823529411764, "train_speed(iter/s)": 0.671622 }, { "epoch": 0.013067135084186625, "grad_norm": 2.6833009719848633, "learning_rate": 9.999832580680741e-05, "loss": 2.4890445709228515, "memory(GiB)": 54.62, "step": 305, "token_acc": 0.4627831715210356, "train_speed(iter/s)": 0.669381 }, { "epoch": 0.013281350413435585, "grad_norm": 4.885848045349121, "learning_rate": 9.999827028208714e-05, "loss": 2.480109214782715, "memory(GiB)": 54.62, "step": 310, "token_acc": 0.47989276139410186, "train_speed(iter/s)": 0.669192 }, { "epoch": 0.013495565742684547, "grad_norm": 3.4065232276916504, "learning_rate": 9.99982138516013e-05, "loss": 2.5165855407714846, "memory(GiB)": 54.62, "step": 315, "token_acc": 0.45714285714285713, "train_speed(iter/s)": 0.667829 }, { "epoch": 0.013709781071933507, "grad_norm": 3.0110745429992676, "learning_rate": 9.999815651535092e-05, "loss": 2.398674201965332, "memory(GiB)": 54.62, "step": 320, "token_acc": 0.46511627906976744, "train_speed(iter/s)": 0.667807 }, { "epoch": 0.013923996401182468, "grad_norm": 2.7631678581237793, "learning_rate": 9.999809827333702e-05, "loss": 2.3853338241577147, "memory(GiB)": 54.62, "step": 325, "token_acc": 0.45098039215686275, "train_speed(iter/s)": 0.669119 }, { "epoch": 0.01413821173043143, "grad_norm": 2.528550386428833, "learning_rate": 9.999803912556067e-05, "loss": 2.740047645568848, "memory(GiB)": 54.62, "step": 330, "token_acc": 0.4326923076923077, "train_speed(iter/s)": 0.671952 }, { "epoch": 0.01435242705968039, "grad_norm": 5.426364898681641, "learning_rate": 9.999797907202294e-05, "loss": 2.466338539123535, "memory(GiB)": 54.62, "step": 335, "token_acc": 0.4962962962962963, "train_speed(iter/s)": 0.672593 }, { "epoch": 0.014566642388929352, "grad_norm": 2.1086597442626953, "learning_rate": 9.999791811272492e-05, "loss": 2.5259525299072267, "memory(GiB)": 54.62, "step": 340, "token_acc": 0.45819397993311034, "train_speed(iter/s)": 0.671649 }, { "epoch": 0.014780857718178313, "grad_norm": 2.207831382751465, "learning_rate": 9.999785624766771e-05, "loss": 2.6511865615844727, "memory(GiB)": 54.62, "step": 345, "token_acc": 0.44155844155844154, "train_speed(iter/s)": 0.671845 }, { "epoch": 0.014995073047427273, "grad_norm": 2.1674392223358154, "learning_rate": 9.999779347685243e-05, "loss": 2.401983451843262, "memory(GiB)": 54.62, "step": 350, "token_acc": 0.42201834862385323, "train_speed(iter/s)": 0.674779 }, { "epoch": 0.015209288376676235, "grad_norm": 3.1113524436950684, "learning_rate": 9.999772980028022e-05, "loss": 2.5977630615234375, "memory(GiB)": 54.62, "step": 355, "token_acc": 0.4222972972972973, "train_speed(iter/s)": 0.676891 }, { "epoch": 0.015423503705925197, "grad_norm": 3.635969877243042, "learning_rate": 9.999766521795224e-05, "loss": 2.5196908950805663, "memory(GiB)": 54.62, "step": 360, "token_acc": 0.4867549668874172, "train_speed(iter/s)": 0.675459 }, { "epoch": 0.015637719035174157, "grad_norm": 3.2260451316833496, "learning_rate": 9.999759972986965e-05, "loss": 2.681817054748535, "memory(GiB)": 54.62, "step": 365, "token_acc": 0.4557823129251701, "train_speed(iter/s)": 0.674594 }, { "epoch": 0.015851934364423118, "grad_norm": 3.9605650901794434, "learning_rate": 9.999753333603363e-05, "loss": 2.42960147857666, "memory(GiB)": 54.62, "step": 370, "token_acc": 0.48444444444444446, "train_speed(iter/s)": 0.6741 }, { "epoch": 0.01606614969367208, "grad_norm": 3.511993646621704, "learning_rate": 9.99974660364454e-05, "loss": 2.342910385131836, "memory(GiB)": 54.62, "step": 375, "token_acc": 0.4943820224719101, "train_speed(iter/s)": 0.674014 }, { "epoch": 0.01628036502292104, "grad_norm": 2.512105941772461, "learning_rate": 9.999741154463405e-05, "loss": 2.835100555419922, "memory(GiB)": 54.62, "step": 380, "token_acc": 0.40707964601769914, "train_speed(iter/s)": 0.673183 }, { "epoch": 0.01649458035217, "grad_norm": 2.7781546115875244, "learning_rate": 9.999734261469491e-05, "loss": 2.627513122558594, "memory(GiB)": 54.62, "step": 385, "token_acc": 0.48787878787878786, "train_speed(iter/s)": 0.674221 }, { "epoch": 0.01670879568141896, "grad_norm": 3.1425349712371826, "learning_rate": 9.9997272779007e-05, "loss": 2.3999704360961913, "memory(GiB)": 54.62, "step": 390, "token_acc": 0.5071428571428571, "train_speed(iter/s)": 0.671498 }, { "epoch": 0.016923011010667923, "grad_norm": 2.835977792739868, "learning_rate": 9.999720203757161e-05, "loss": 2.679454803466797, "memory(GiB)": 54.62, "step": 395, "token_acc": 0.44805194805194803, "train_speed(iter/s)": 0.672867 }, { "epoch": 0.017137226339916885, "grad_norm": 2.85933256149292, "learning_rate": 9.999713039038998e-05, "loss": 2.575173187255859, "memory(GiB)": 54.62, "step": 400, "token_acc": 0.4872611464968153, "train_speed(iter/s)": 0.67352 }, { "epoch": 0.017351441669165846, "grad_norm": 4.236718654632568, "learning_rate": 9.999705783746345e-05, "loss": 2.747630500793457, "memory(GiB)": 54.62, "step": 405, "token_acc": 0.4187725631768953, "train_speed(iter/s)": 0.673184 }, { "epoch": 0.017565656998414808, "grad_norm": 4.214564800262451, "learning_rate": 9.999698437879332e-05, "loss": 2.2102886199951173, "memory(GiB)": 54.62, "step": 410, "token_acc": 0.5469387755102041, "train_speed(iter/s)": 0.672753 }, { "epoch": 0.017779872327663766, "grad_norm": 4.460990905761719, "learning_rate": 9.99969100143809e-05, "loss": 2.4747161865234375, "memory(GiB)": 54.62, "step": 415, "token_acc": 0.49372384937238495, "train_speed(iter/s)": 0.673898 }, { "epoch": 0.017994087656912728, "grad_norm": 3.6805615425109863, "learning_rate": 9.999683474422757e-05, "loss": 2.404896545410156, "memory(GiB)": 54.62, "step": 420, "token_acc": 0.532, "train_speed(iter/s)": 0.673866 }, { "epoch": 0.01820830298616169, "grad_norm": 2.9253950119018555, "learning_rate": 9.999675856833468e-05, "loss": 2.8021387100219726, "memory(GiB)": 54.62, "step": 425, "token_acc": 0.4463667820069204, "train_speed(iter/s)": 0.673428 }, { "epoch": 0.01842251831541065, "grad_norm": 3.4196600914001465, "learning_rate": 9.999668148670359e-05, "loss": 2.6317535400390626, "memory(GiB)": 54.62, "step": 430, "token_acc": 0.4925925925925926, "train_speed(iter/s)": 0.673082 }, { "epoch": 0.018636733644659613, "grad_norm": 3.0155811309814453, "learning_rate": 9.999660349933573e-05, "loss": 2.573602294921875, "memory(GiB)": 54.62, "step": 435, "token_acc": 0.4413793103448276, "train_speed(iter/s)": 0.673917 }, { "epoch": 0.018850948973908575, "grad_norm": 4.0063090324401855, "learning_rate": 9.999652460623247e-05, "loss": 2.378360557556152, "memory(GiB)": 54.62, "step": 440, "token_acc": 0.5233333333333333, "train_speed(iter/s)": 0.674478 }, { "epoch": 0.019065164303157533, "grad_norm": 4.487542152404785, "learning_rate": 9.99964448073953e-05, "loss": 2.721476745605469, "memory(GiB)": 54.62, "step": 445, "token_acc": 0.42771084337349397, "train_speed(iter/s)": 0.674409 }, { "epoch": 0.019279379632406494, "grad_norm": 2.6629483699798584, "learning_rate": 9.999636410282562e-05, "loss": 2.3547346115112306, "memory(GiB)": 54.62, "step": 450, "token_acc": 0.5085324232081911, "train_speed(iter/s)": 0.673459 }, { "epoch": 0.019493594961655456, "grad_norm": 2.6754276752471924, "learning_rate": 9.99962824925249e-05, "loss": 2.4752368927001953, "memory(GiB)": 54.62, "step": 455, "token_acc": 0.4713804713804714, "train_speed(iter/s)": 0.673246 }, { "epoch": 0.019707810290904418, "grad_norm": 3.5499701499938965, "learning_rate": 9.999619997649463e-05, "loss": 2.503587341308594, "memory(GiB)": 54.62, "step": 460, "token_acc": 0.45302013422818793, "train_speed(iter/s)": 0.672905 }, { "epoch": 0.01992202562015338, "grad_norm": 2.996770143508911, "learning_rate": 9.999611655473629e-05, "loss": 2.2446775436401367, "memory(GiB)": 54.62, "step": 465, "token_acc": 0.5035971223021583, "train_speed(iter/s)": 0.673948 }, { "epoch": 0.020136240949402338, "grad_norm": 2.829357385635376, "learning_rate": 9.999603222725141e-05, "loss": 2.8194032669067384, "memory(GiB)": 54.62, "step": 470, "token_acc": 0.46296296296296297, "train_speed(iter/s)": 0.675521 }, { "epoch": 0.0203504562786513, "grad_norm": 2.510809898376465, "learning_rate": 9.999594699404149e-05, "loss": 2.712614059448242, "memory(GiB)": 54.62, "step": 475, "token_acc": 0.419672131147541, "train_speed(iter/s)": 0.674181 }, { "epoch": 0.02056467160790026, "grad_norm": 2.4910449981689453, "learning_rate": 9.99958608551081e-05, "loss": 2.460338592529297, "memory(GiB)": 54.62, "step": 480, "token_acc": 0.49615384615384617, "train_speed(iter/s)": 0.673516 }, { "epoch": 0.020778886937149223, "grad_norm": 3.070723056793213, "learning_rate": 9.999577381045277e-05, "loss": 2.447463035583496, "memory(GiB)": 54.62, "step": 485, "token_acc": 0.4628099173553719, "train_speed(iter/s)": 0.674167 }, { "epoch": 0.020993102266398184, "grad_norm": 2.5210344791412354, "learning_rate": 9.999568586007713e-05, "loss": 2.576895904541016, "memory(GiB)": 54.62, "step": 490, "token_acc": 0.49825783972125437, "train_speed(iter/s)": 0.674562 }, { "epoch": 0.021207317595647146, "grad_norm": 4.2215776443481445, "learning_rate": 9.999559700398272e-05, "loss": 2.4142438888549806, "memory(GiB)": 54.62, "step": 495, "token_acc": 0.47560975609756095, "train_speed(iter/s)": 0.671813 }, { "epoch": 0.021421532924896104, "grad_norm": 3.7115979194641113, "learning_rate": 9.999550724217117e-05, "loss": 2.518465995788574, "memory(GiB)": 54.62, "step": 500, "token_acc": 0.4694533762057878, "train_speed(iter/s)": 0.67127 }, { "epoch": 0.021421532924896104, "eval_loss": 2.1348280906677246, "eval_runtime": 17.4659, "eval_samples_per_second": 5.725, "eval_steps_per_second": 5.725, "eval_token_acc": 0.5140562248995983, "step": 500 }, { "epoch": 0.021635748254145066, "grad_norm": 2.713134527206421, "learning_rate": 9.999541657464412e-05, "loss": 2.5341829299926757, "memory(GiB)": 54.62, "step": 505, "token_acc": 0.5137795275590551, "train_speed(iter/s)": 0.65248 }, { "epoch": 0.021849963583394028, "grad_norm": 3.5443172454833984, "learning_rate": 9.999532500140318e-05, "loss": 2.5370044708251953, "memory(GiB)": 54.62, "step": 510, "token_acc": 0.4732142857142857, "train_speed(iter/s)": 0.653484 }, { "epoch": 0.02206417891264299, "grad_norm": 2.5254018306732178, "learning_rate": 9.999523252245005e-05, "loss": 2.472306823730469, "memory(GiB)": 54.62, "step": 515, "token_acc": 0.4585987261146497, "train_speed(iter/s)": 0.654336 }, { "epoch": 0.02227839424189195, "grad_norm": 5.1634745597839355, "learning_rate": 9.999513913778637e-05, "loss": 2.8752260208129883, "memory(GiB)": 54.62, "step": 520, "token_acc": 0.44964028776978415, "train_speed(iter/s)": 0.654709 }, { "epoch": 0.022492609571140913, "grad_norm": 2.286238670349121, "learning_rate": 9.999504484741385e-05, "loss": 2.3559764862060546, "memory(GiB)": 54.62, "step": 525, "token_acc": 0.4785276073619632, "train_speed(iter/s)": 0.655281 }, { "epoch": 0.02270682490038987, "grad_norm": 2.240556478500366, "learning_rate": 9.99949496513342e-05, "loss": 2.4075477600097654, "memory(GiB)": 54.62, "step": 530, "token_acc": 0.5241157556270096, "train_speed(iter/s)": 0.655813 }, { "epoch": 0.022921040229638832, "grad_norm": 4.718363285064697, "learning_rate": 9.999485354954913e-05, "loss": 2.3385526657104494, "memory(GiB)": 54.62, "step": 535, "token_acc": 0.4766666666666667, "train_speed(iter/s)": 0.656362 }, { "epoch": 0.023135255558887794, "grad_norm": 2.759511709213257, "learning_rate": 9.999475654206038e-05, "loss": 2.6456804275512695, "memory(GiB)": 54.62, "step": 540, "token_acc": 0.46124031007751937, "train_speed(iter/s)": 0.656833 }, { "epoch": 0.023349470888136756, "grad_norm": 2.4608898162841797, "learning_rate": 9.999465862886974e-05, "loss": 2.423737716674805, "memory(GiB)": 54.62, "step": 545, "token_acc": 0.48338368580060426, "train_speed(iter/s)": 0.657799 }, { "epoch": 0.023563686217385717, "grad_norm": 3.447770833969116, "learning_rate": 9.999455980997894e-05, "loss": 2.196798896789551, "memory(GiB)": 54.62, "step": 550, "token_acc": 0.5107142857142857, "train_speed(iter/s)": 0.658656 }, { "epoch": 0.023777901546634676, "grad_norm": 4.860440731048584, "learning_rate": 9.99944600853898e-05, "loss": 2.427891159057617, "memory(GiB)": 54.62, "step": 555, "token_acc": 0.4979253112033195, "train_speed(iter/s)": 0.657783 }, { "epoch": 0.023992116875883637, "grad_norm": 2.6826884746551514, "learning_rate": 9.999435945510411e-05, "loss": 2.706648826599121, "memory(GiB)": 54.62, "step": 560, "token_acc": 0.4895833333333333, "train_speed(iter/s)": 0.658999 }, { "epoch": 0.0242063322051326, "grad_norm": 2.974804401397705, "learning_rate": 9.999425791912371e-05, "loss": 2.470328521728516, "memory(GiB)": 54.62, "step": 565, "token_acc": 0.49096385542168675, "train_speed(iter/s)": 0.659905 }, { "epoch": 0.02442054753438156, "grad_norm": 2.8333287239074707, "learning_rate": 9.999415547745042e-05, "loss": 2.44503116607666, "memory(GiB)": 54.62, "step": 570, "token_acc": 0.4766081871345029, "train_speed(iter/s)": 0.658592 }, { "epoch": 0.024634762863630522, "grad_norm": 2.881246566772461, "learning_rate": 9.99940521300861e-05, "loss": 2.5349912643432617, "memory(GiB)": 54.62, "step": 575, "token_acc": 0.4750830564784053, "train_speed(iter/s)": 0.65918 }, { "epoch": 0.024848978192879484, "grad_norm": 4.399308204650879, "learning_rate": 9.999394787703265e-05, "loss": 2.7249982833862303, "memory(GiB)": 54.62, "step": 580, "token_acc": 0.48562300319488816, "train_speed(iter/s)": 0.660012 }, { "epoch": 0.025063193522128442, "grad_norm": 3.3873698711395264, "learning_rate": 9.999384271829191e-05, "loss": 2.322029876708984, "memory(GiB)": 54.62, "step": 585, "token_acc": 0.506578947368421, "train_speed(iter/s)": 0.659779 }, { "epoch": 0.025277408851377404, "grad_norm": 3.7013094425201416, "learning_rate": 9.999373665386583e-05, "loss": 2.522063446044922, "memory(GiB)": 54.62, "step": 590, "token_acc": 0.45364238410596025, "train_speed(iter/s)": 0.661777 }, { "epoch": 0.025491624180626365, "grad_norm": 1.9532318115234375, "learning_rate": 9.999362968375631e-05, "loss": 2.7114368438720704, "memory(GiB)": 54.62, "step": 595, "token_acc": 0.47023809523809523, "train_speed(iter/s)": 0.661638 }, { "epoch": 0.025705839509875327, "grad_norm": 2.8351223468780518, "learning_rate": 9.999352180796529e-05, "loss": 2.5519989013671873, "memory(GiB)": 54.62, "step": 600, "token_acc": 0.45185185185185184, "train_speed(iter/s)": 0.662545 }, { "epoch": 0.02592005483912429, "grad_norm": 3.3854715824127197, "learning_rate": 9.999341302649472e-05, "loss": 2.6731903076171877, "memory(GiB)": 54.62, "step": 605, "token_acc": 0.45364238410596025, "train_speed(iter/s)": 0.663403 }, { "epoch": 0.02613427016837325, "grad_norm": 3.8070197105407715, "learning_rate": 9.999330333934656e-05, "loss": 2.660052490234375, "memory(GiB)": 54.62, "step": 610, "token_acc": 0.45454545454545453, "train_speed(iter/s)": 0.661214 }, { "epoch": 0.02634848549762221, "grad_norm": 3.3281681537628174, "learning_rate": 9.999319274652283e-05, "loss": 3.0363183975219727, "memory(GiB)": 54.62, "step": 615, "token_acc": 0.3971830985915493, "train_speed(iter/s)": 0.661549 }, { "epoch": 0.02656270082687117, "grad_norm": 2.5218842029571533, "learning_rate": 9.999308124802552e-05, "loss": 2.3714529037475587, "memory(GiB)": 54.62, "step": 620, "token_acc": 0.5475285171102662, "train_speed(iter/s)": 0.662824 }, { "epoch": 0.026776916156120132, "grad_norm": 2.465986967086792, "learning_rate": 9.999296884385665e-05, "loss": 2.549900245666504, "memory(GiB)": 54.62, "step": 625, "token_acc": 0.44921875, "train_speed(iter/s)": 0.663331 }, { "epoch": 0.026991131485369094, "grad_norm": 3.2634215354919434, "learning_rate": 9.999285553401824e-05, "loss": 2.3942180633544923, "memory(GiB)": 54.62, "step": 630, "token_acc": 0.5189393939393939, "train_speed(iter/s)": 0.664543 }, { "epoch": 0.027205346814618055, "grad_norm": 3.6681692600250244, "learning_rate": 9.999274131851235e-05, "loss": 2.477688217163086, "memory(GiB)": 54.62, "step": 635, "token_acc": 0.5163934426229508, "train_speed(iter/s)": 0.66445 }, { "epoch": 0.027419562143867014, "grad_norm": 4.23026180267334, "learning_rate": 9.999262619734107e-05, "loss": 2.567488098144531, "memory(GiB)": 54.62, "step": 640, "token_acc": 0.46405228758169936, "train_speed(iter/s)": 0.665675 }, { "epoch": 0.027633777473115975, "grad_norm": 2.5809476375579834, "learning_rate": 9.999251017050645e-05, "loss": 2.1524738311767577, "memory(GiB)": 54.62, "step": 645, "token_acc": 0.5415162454873647, "train_speed(iter/s)": 0.664963 }, { "epoch": 0.027847992802364937, "grad_norm": 2.3652126789093018, "learning_rate": 9.999239323801062e-05, "loss": 2.3844228744506837, "memory(GiB)": 54.62, "step": 650, "token_acc": 0.4955223880597015, "train_speed(iter/s)": 0.665689 }, { "epoch": 0.0280622081316139, "grad_norm": 2.8527274131774902, "learning_rate": 9.99922753998557e-05, "loss": 2.913397216796875, "memory(GiB)": 54.62, "step": 655, "token_acc": 0.4404332129963899, "train_speed(iter/s)": 0.665988 }, { "epoch": 0.02827642346086286, "grad_norm": 2.336909055709839, "learning_rate": 9.99921566560438e-05, "loss": 2.418985366821289, "memory(GiB)": 54.7, "step": 660, "token_acc": 0.5, "train_speed(iter/s)": 0.665262 }, { "epoch": 0.028490638790111822, "grad_norm": 2.619492292404175, "learning_rate": 9.99920370065771e-05, "loss": 2.2974922180175783, "memory(GiB)": 54.7, "step": 665, "token_acc": 0.4772727272727273, "train_speed(iter/s)": 0.665654 }, { "epoch": 0.02870485411936078, "grad_norm": 5.417760848999023, "learning_rate": 9.999191645145774e-05, "loss": 2.483477783203125, "memory(GiB)": 54.7, "step": 670, "token_acc": 0.4692556634304207, "train_speed(iter/s)": 0.665573 }, { "epoch": 0.028919069448609742, "grad_norm": 2.639275074005127, "learning_rate": 9.999179499068794e-05, "loss": 2.4502771377563475, "memory(GiB)": 54.7, "step": 675, "token_acc": 0.49624060150375937, "train_speed(iter/s)": 0.665015 }, { "epoch": 0.029133284777858703, "grad_norm": 3.1233108043670654, "learning_rate": 9.999167262426985e-05, "loss": 2.630961608886719, "memory(GiB)": 54.7, "step": 680, "token_acc": 0.4392156862745098, "train_speed(iter/s)": 0.665586 }, { "epoch": 0.029347500107107665, "grad_norm": 2.8898186683654785, "learning_rate": 9.999154935220573e-05, "loss": 2.5132280349731446, "memory(GiB)": 59.76, "step": 685, "token_acc": 0.5197132616487455, "train_speed(iter/s)": 0.665273 }, { "epoch": 0.029561715436356627, "grad_norm": 1.9830567836761475, "learning_rate": 9.99914251744978e-05, "loss": 2.278265953063965, "memory(GiB)": 59.76, "step": 690, "token_acc": 0.496875, "train_speed(iter/s)": 0.666071 }, { "epoch": 0.02977593076560559, "grad_norm": 3.743652105331421, "learning_rate": 9.99913000911483e-05, "loss": 2.4120044708251953, "memory(GiB)": 59.76, "step": 695, "token_acc": 0.5018587360594795, "train_speed(iter/s)": 0.665654 }, { "epoch": 0.029990146094854547, "grad_norm": 2.4133541584014893, "learning_rate": 9.99911741021595e-05, "loss": 2.3855106353759767, "memory(GiB)": 59.76, "step": 700, "token_acc": 0.48936170212765956, "train_speed(iter/s)": 0.665041 }, { "epoch": 0.03020436142410351, "grad_norm": 2.676162004470825, "learning_rate": 9.999104720753368e-05, "loss": 2.4674989700317385, "memory(GiB)": 59.76, "step": 705, "token_acc": 0.4721311475409836, "train_speed(iter/s)": 0.665502 }, { "epoch": 0.03041857675335247, "grad_norm": 4.716123104095459, "learning_rate": 9.999091940727317e-05, "loss": 2.3293735504150392, "memory(GiB)": 59.76, "step": 710, "token_acc": 0.5057471264367817, "train_speed(iter/s)": 0.666688 }, { "epoch": 0.03063279208260143, "grad_norm": 2.947115659713745, "learning_rate": 9.999079070138024e-05, "loss": 2.39759521484375, "memory(GiB)": 59.76, "step": 715, "token_acc": 0.5019455252918288, "train_speed(iter/s)": 0.667292 }, { "epoch": 0.030847007411850393, "grad_norm": 3.618253707885742, "learning_rate": 9.999066108985724e-05, "loss": 2.3360755920410154, "memory(GiB)": 59.76, "step": 720, "token_acc": 0.5, "train_speed(iter/s)": 0.667571 }, { "epoch": 0.03106122274109935, "grad_norm": 2.906533718109131, "learning_rate": 9.999053057270652e-05, "loss": 2.5201339721679688, "memory(GiB)": 59.76, "step": 725, "token_acc": 0.4803370786516854, "train_speed(iter/s)": 0.667929 }, { "epoch": 0.03127543807034831, "grad_norm": 3.5709686279296875, "learning_rate": 9.999039914993044e-05, "loss": 2.4805870056152344, "memory(GiB)": 59.76, "step": 730, "token_acc": 0.4935064935064935, "train_speed(iter/s)": 0.66862 }, { "epoch": 0.03148965339959728, "grad_norm": 4.1522674560546875, "learning_rate": 9.99902668215314e-05, "loss": 2.6245189666748048, "memory(GiB)": 59.76, "step": 735, "token_acc": 0.4602076124567474, "train_speed(iter/s)": 0.669648 }, { "epoch": 0.031703868728846236, "grad_norm": 2.847503423690796, "learning_rate": 9.999013358751176e-05, "loss": 2.003310775756836, "memory(GiB)": 59.76, "step": 740, "token_acc": 0.5501730103806228, "train_speed(iter/s)": 0.670108 }, { "epoch": 0.031918084058095195, "grad_norm": 3.1880505084991455, "learning_rate": 9.998999944787398e-05, "loss": 2.2458026885986326, "memory(GiB)": 59.76, "step": 745, "token_acc": 0.4967532467532468, "train_speed(iter/s)": 0.669516 }, { "epoch": 0.03213229938734416, "grad_norm": 6.351881504058838, "learning_rate": 9.998986440262045e-05, "loss": 2.447923469543457, "memory(GiB)": 59.76, "step": 750, "token_acc": 0.47410358565737054, "train_speed(iter/s)": 0.668711 }, { "epoch": 0.03234651471659312, "grad_norm": 3.2980964183807373, "learning_rate": 9.998972845175365e-05, "loss": 2.977781295776367, "memory(GiB)": 59.76, "step": 755, "token_acc": 0.43636363636363634, "train_speed(iter/s)": 0.668911 }, { "epoch": 0.03256073004584208, "grad_norm": 3.4381752014160156, "learning_rate": 9.998959159527602e-05, "loss": 2.8471588134765624, "memory(GiB)": 59.76, "step": 760, "token_acc": 0.4583333333333333, "train_speed(iter/s)": 0.668621 }, { "epoch": 0.03277494537509104, "grad_norm": 4.565474510192871, "learning_rate": 9.998945383319003e-05, "loss": 2.383611297607422, "memory(GiB)": 59.76, "step": 765, "token_acc": 0.5041322314049587, "train_speed(iter/s)": 0.668596 }, { "epoch": 0.03298916070434, "grad_norm": 2.661597728729248, "learning_rate": 9.998931516549819e-05, "loss": 2.784406852722168, "memory(GiB)": 59.76, "step": 770, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.669038 }, { "epoch": 0.033203376033588965, "grad_norm": 2.217081308364868, "learning_rate": 9.998917559220302e-05, "loss": 2.6375885009765625, "memory(GiB)": 59.76, "step": 775, "token_acc": 0.4417808219178082, "train_speed(iter/s)": 0.66933 }, { "epoch": 0.03341759136283792, "grad_norm": 2.3345634937286377, "learning_rate": 9.998903511330705e-05, "loss": 2.688210678100586, "memory(GiB)": 59.76, "step": 780, "token_acc": 0.45714285714285713, "train_speed(iter/s)": 0.669004 }, { "epoch": 0.03363180669208689, "grad_norm": 3.5489377975463867, "learning_rate": 9.998889372881279e-05, "loss": 2.261855125427246, "memory(GiB)": 59.76, "step": 785, "token_acc": 0.5132450331125827, "train_speed(iter/s)": 0.669469 }, { "epoch": 0.033846022021335846, "grad_norm": 2.9521610736846924, "learning_rate": 9.998875143872284e-05, "loss": 2.395612335205078, "memory(GiB)": 59.76, "step": 790, "token_acc": 0.45149253731343286, "train_speed(iter/s)": 0.668923 }, { "epoch": 0.034060237350584804, "grad_norm": 2.4507229328155518, "learning_rate": 9.998860824303978e-05, "loss": 2.6755353927612306, "memory(GiB)": 59.76, "step": 795, "token_acc": 0.4339152119700748, "train_speed(iter/s)": 0.668939 }, { "epoch": 0.03427445267983377, "grad_norm": 2.489990711212158, "learning_rate": 9.998846414176616e-05, "loss": 2.4865036010742188, "memory(GiB)": 59.76, "step": 800, "token_acc": 0.49433962264150944, "train_speed(iter/s)": 0.668719 }, { "epoch": 0.03448866800908273, "grad_norm": 2.4918060302734375, "learning_rate": 9.998831913490464e-05, "loss": 2.6527965545654295, "memory(GiB)": 59.76, "step": 805, "token_acc": 0.43686006825938567, "train_speed(iter/s)": 0.670108 }, { "epoch": 0.03470288333833169, "grad_norm": 2.666670799255371, "learning_rate": 9.998817322245781e-05, "loss": 2.443550682067871, "memory(GiB)": 59.76, "step": 810, "token_acc": 0.5058823529411764, "train_speed(iter/s)": 0.671247 }, { "epoch": 0.03491709866758065, "grad_norm": 2.545412063598633, "learning_rate": 9.998802640442835e-05, "loss": 2.3376110076904295, "memory(GiB)": 59.76, "step": 815, "token_acc": 0.45987654320987653, "train_speed(iter/s)": 0.670626 }, { "epoch": 0.035131313996829616, "grad_norm": 3.0486607551574707, "learning_rate": 9.998787868081889e-05, "loss": 2.7098230361938476, "memory(GiB)": 59.76, "step": 820, "token_acc": 0.40634920634920635, "train_speed(iter/s)": 0.671038 }, { "epoch": 0.035345529326078574, "grad_norm": 3.0863606929779053, "learning_rate": 9.99877300516321e-05, "loss": 2.544964599609375, "memory(GiB)": 59.76, "step": 825, "token_acc": 0.4472843450479233, "train_speed(iter/s)": 0.671846 }, { "epoch": 0.03555974465532753, "grad_norm": 3.861515998840332, "learning_rate": 9.998758051687072e-05, "loss": 2.5315826416015623, "memory(GiB)": 59.76, "step": 830, "token_acc": 0.4558303886925795, "train_speed(iter/s)": 0.672291 }, { "epoch": 0.0357739599845765, "grad_norm": 1.9491697549819946, "learning_rate": 9.998743007653739e-05, "loss": 2.773722457885742, "memory(GiB)": 59.76, "step": 835, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.672645 }, { "epoch": 0.035988175313825456, "grad_norm": 3.7462189197540283, "learning_rate": 9.998727873063489e-05, "loss": 2.2591730117797852, "memory(GiB)": 59.76, "step": 840, "token_acc": 0.5214285714285715, "train_speed(iter/s)": 0.672293 }, { "epoch": 0.03620239064307442, "grad_norm": 2.4758565425872803, "learning_rate": 9.998712647916594e-05, "loss": 2.335140419006348, "memory(GiB)": 59.76, "step": 845, "token_acc": 0.5067567567567568, "train_speed(iter/s)": 0.672059 }, { "epoch": 0.03641660597232338, "grad_norm": 3.2883007526397705, "learning_rate": 9.998697332213331e-05, "loss": 2.5856307983398437, "memory(GiB)": 59.76, "step": 850, "token_acc": 0.4387755102040816, "train_speed(iter/s)": 0.671385 }, { "epoch": 0.03663082130157234, "grad_norm": 3.326568841934204, "learning_rate": 9.998681925953977e-05, "loss": 2.3719156265258787, "memory(GiB)": 59.76, "step": 855, "token_acc": 0.5444015444015444, "train_speed(iter/s)": 0.671611 }, { "epoch": 0.0368450366308213, "grad_norm": 3.948533296585083, "learning_rate": 9.998666429138809e-05, "loss": 2.473469924926758, "memory(GiB)": 59.76, "step": 860, "token_acc": 0.5372168284789643, "train_speed(iter/s)": 0.671995 }, { "epoch": 0.03705925196007026, "grad_norm": 3.5285654067993164, "learning_rate": 9.998650841768111e-05, "loss": 2.3927053451538085, "memory(GiB)": 59.76, "step": 865, "token_acc": 0.5, "train_speed(iter/s)": 0.671999 }, { "epoch": 0.037273467289319226, "grad_norm": 2.4442574977874756, "learning_rate": 9.998635163842164e-05, "loss": 2.467591667175293, "memory(GiB)": 59.76, "step": 870, "token_acc": 0.48828125, "train_speed(iter/s)": 0.672502 }, { "epoch": 0.037487682618568184, "grad_norm": 2.8511624336242676, "learning_rate": 9.99861939536125e-05, "loss": 2.4073339462280274, "memory(GiB)": 59.76, "step": 875, "token_acc": 0.4793103448275862, "train_speed(iter/s)": 0.67271 }, { "epoch": 0.03770189794781715, "grad_norm": 2.356694459915161, "learning_rate": 9.99860353632566e-05, "loss": 2.422062301635742, "memory(GiB)": 59.76, "step": 880, "token_acc": 0.4735376044568245, "train_speed(iter/s)": 0.672496 }, { "epoch": 0.03791611327706611, "grad_norm": 2.3313121795654297, "learning_rate": 9.998587586735675e-05, "loss": 2.607916069030762, "memory(GiB)": 59.76, "step": 885, "token_acc": 0.4894366197183099, "train_speed(iter/s)": 0.672579 }, { "epoch": 0.038130328606315066, "grad_norm": 2.416323661804199, "learning_rate": 9.998571546591587e-05, "loss": 2.4503864288330077, "memory(GiB)": 59.76, "step": 890, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.673801 }, { "epoch": 0.03834454393556403, "grad_norm": 3.3315584659576416, "learning_rate": 9.998555415893686e-05, "loss": 2.568191146850586, "memory(GiB)": 59.76, "step": 895, "token_acc": 0.49049429657794674, "train_speed(iter/s)": 0.673057 }, { "epoch": 0.03855875926481299, "grad_norm": 2.814087152481079, "learning_rate": 9.998539194642266e-05, "loss": 2.4763540267944335, "memory(GiB)": 59.76, "step": 900, "token_acc": 0.4891640866873065, "train_speed(iter/s)": 0.672251 }, { "epoch": 0.038772974594061954, "grad_norm": 2.854053258895874, "learning_rate": 9.99852288283762e-05, "loss": 2.4887420654296877, "memory(GiB)": 59.76, "step": 905, "token_acc": 0.4662756598240469, "train_speed(iter/s)": 0.671275 }, { "epoch": 0.03898718992331091, "grad_norm": 3.2843821048736572, "learning_rate": 9.998506480480043e-05, "loss": 2.4509937286376955, "memory(GiB)": 59.76, "step": 910, "token_acc": 0.4533762057877814, "train_speed(iter/s)": 0.671682 }, { "epoch": 0.03920140525255987, "grad_norm": 2.675539255142212, "learning_rate": 9.99848998756983e-05, "loss": 2.5020095825195314, "memory(GiB)": 59.76, "step": 915, "token_acc": 0.4631578947368421, "train_speed(iter/s)": 0.671313 }, { "epoch": 0.039415620581808836, "grad_norm": 3.138305187225342, "learning_rate": 9.998473404107284e-05, "loss": 2.3701812744140627, "memory(GiB)": 59.76, "step": 920, "token_acc": 0.47183098591549294, "train_speed(iter/s)": 0.67109 }, { "epoch": 0.039629835911057794, "grad_norm": 2.742785930633545, "learning_rate": 9.998456730092703e-05, "loss": 2.768656539916992, "memory(GiB)": 59.76, "step": 925, "token_acc": 0.40129449838187703, "train_speed(iter/s)": 0.671117 }, { "epoch": 0.03984405124030676, "grad_norm": 2.2914512157440186, "learning_rate": 9.998439965526388e-05, "loss": 2.447503852844238, "memory(GiB)": 59.76, "step": 930, "token_acc": 0.4309210526315789, "train_speed(iter/s)": 0.67166 }, { "epoch": 0.04005826656955572, "grad_norm": 2.6796770095825195, "learning_rate": 9.998423110408644e-05, "loss": 2.3164072036743164, "memory(GiB)": 59.76, "step": 935, "token_acc": 0.4885245901639344, "train_speed(iter/s)": 0.671809 }, { "epoch": 0.040272481898804675, "grad_norm": 2.3415634632110596, "learning_rate": 9.998406164739778e-05, "loss": 2.454269599914551, "memory(GiB)": 59.76, "step": 940, "token_acc": 0.46742209631728043, "train_speed(iter/s)": 0.671517 }, { "epoch": 0.04048669722805364, "grad_norm": 2.89662766456604, "learning_rate": 9.998389128520095e-05, "loss": 2.8207130432128906, "memory(GiB)": 59.76, "step": 945, "token_acc": 0.4258064516129032, "train_speed(iter/s)": 0.672135 }, { "epoch": 0.0407009125573026, "grad_norm": 2.85054874420166, "learning_rate": 9.998372001749904e-05, "loss": 2.4974027633666993, "memory(GiB)": 59.76, "step": 950, "token_acc": 0.48928571428571427, "train_speed(iter/s)": 0.672395 }, { "epoch": 0.040915127886551564, "grad_norm": 2.201915740966797, "learning_rate": 9.998354784429515e-05, "loss": 2.3969764709472656, "memory(GiB)": 59.76, "step": 955, "token_acc": 0.4816053511705686, "train_speed(iter/s)": 0.672687 }, { "epoch": 0.04112934321580052, "grad_norm": 2.655164957046509, "learning_rate": 9.998337476559241e-05, "loss": 2.5222347259521483, "memory(GiB)": 59.76, "step": 960, "token_acc": 0.4498567335243553, "train_speed(iter/s)": 0.673155 }, { "epoch": 0.04134355854504949, "grad_norm": 3.314729928970337, "learning_rate": 9.998320078139393e-05, "loss": 2.396233558654785, "memory(GiB)": 59.76, "step": 965, "token_acc": 0.47750865051903113, "train_speed(iter/s)": 0.673738 }, { "epoch": 0.041557773874298445, "grad_norm": 2.6908535957336426, "learning_rate": 9.998302589170289e-05, "loss": 2.5164920806884767, "memory(GiB)": 59.76, "step": 970, "token_acc": 0.5020746887966805, "train_speed(iter/s)": 0.674459 }, { "epoch": 0.041771989203547404, "grad_norm": 5.003871917724609, "learning_rate": 9.998285009652246e-05, "loss": 2.4057552337646486, "memory(GiB)": 59.76, "step": 975, "token_acc": 0.5063694267515924, "train_speed(iter/s)": 0.67468 }, { "epoch": 0.04198620453279637, "grad_norm": 2.4903035163879395, "learning_rate": 9.99826733958558e-05, "loss": 2.5561473846435545, "memory(GiB)": 59.76, "step": 980, "token_acc": 0.4444444444444444, "train_speed(iter/s)": 0.674505 }, { "epoch": 0.04220041986204533, "grad_norm": 3.275430202484131, "learning_rate": 9.998249578970613e-05, "loss": 2.8397144317626952, "memory(GiB)": 59.76, "step": 985, "token_acc": 0.423841059602649, "train_speed(iter/s)": 0.675278 }, { "epoch": 0.04241463519129429, "grad_norm": 3.6849966049194336, "learning_rate": 9.998231727807665e-05, "loss": 2.625379180908203, "memory(GiB)": 59.76, "step": 990, "token_acc": 0.488135593220339, "train_speed(iter/s)": 0.675591 }, { "epoch": 0.04262885052054325, "grad_norm": 3.3670542240142822, "learning_rate": 9.998213786097062e-05, "loss": 2.3797945022583007, "memory(GiB)": 59.76, "step": 995, "token_acc": 0.458041958041958, "train_speed(iter/s)": 0.676077 }, { "epoch": 0.04284306584979221, "grad_norm": 2.597395658493042, "learning_rate": 9.998195753839127e-05, "loss": 2.752528190612793, "memory(GiB)": 59.76, "step": 1000, "token_acc": 0.4489795918367347, "train_speed(iter/s)": 0.676568 }, { "epoch": 0.04284306584979221, "eval_loss": 2.2045421600341797, "eval_runtime": 16.253, "eval_samples_per_second": 6.153, "eval_steps_per_second": 6.153, "eval_token_acc": 0.4915492957746479, "step": 1000 }, { "epoch": 0.043057281179041174, "grad_norm": 2.3584344387054443, "learning_rate": 9.998177631034187e-05, "loss": 2.561487579345703, "memory(GiB)": 59.76, "step": 1005, "token_acc": 0.4847328244274809, "train_speed(iter/s)": 0.667789 }, { "epoch": 0.04327149650829013, "grad_norm": 2.7300567626953125, "learning_rate": 9.99815941768257e-05, "loss": 2.3087539672851562, "memory(GiB)": 59.76, "step": 1010, "token_acc": 0.5140845070422535, "train_speed(iter/s)": 0.667689 }, { "epoch": 0.0434857118375391, "grad_norm": 2.6863367557525635, "learning_rate": 9.998141113784609e-05, "loss": 2.6526983261108397, "memory(GiB)": 59.76, "step": 1015, "token_acc": 0.45985401459854014, "train_speed(iter/s)": 0.668338 }, { "epoch": 0.043699927166788055, "grad_norm": 2.5612528324127197, "learning_rate": 9.998122719340632e-05, "loss": 2.6215803146362306, "memory(GiB)": 59.76, "step": 1020, "token_acc": 0.43859649122807015, "train_speed(iter/s)": 0.668771 }, { "epoch": 0.04391414249603701, "grad_norm": 2.65720272064209, "learning_rate": 9.998104234350971e-05, "loss": 2.4114349365234373, "memory(GiB)": 59.76, "step": 1025, "token_acc": 0.4645390070921986, "train_speed(iter/s)": 0.669303 }, { "epoch": 0.04412835782528598, "grad_norm": 2.431424140930176, "learning_rate": 9.998085658815966e-05, "loss": 2.638285446166992, "memory(GiB)": 59.76, "step": 1030, "token_acc": 0.49032258064516127, "train_speed(iter/s)": 0.66976 }, { "epoch": 0.04434257315453494, "grad_norm": 3.133423328399658, "learning_rate": 9.998066992735949e-05, "loss": 2.6178911209106444, "memory(GiB)": 59.76, "step": 1035, "token_acc": 0.43416370106761565, "train_speed(iter/s)": 0.670092 }, { "epoch": 0.0445567884837839, "grad_norm": 2.7107956409454346, "learning_rate": 9.99804823611126e-05, "loss": 2.2540332794189455, "memory(GiB)": 59.76, "step": 1040, "token_acc": 0.5275862068965518, "train_speed(iter/s)": 0.670057 }, { "epoch": 0.04477100381303286, "grad_norm": 2.9061596393585205, "learning_rate": 9.998029388942239e-05, "loss": 2.5759124755859375, "memory(GiB)": 59.76, "step": 1045, "token_acc": 0.46885245901639344, "train_speed(iter/s)": 0.669303 }, { "epoch": 0.044985219142281825, "grad_norm": 3.928257942199707, "learning_rate": 9.998010451229227e-05, "loss": 2.6677423477172852, "memory(GiB)": 59.76, "step": 1050, "token_acc": 0.4448979591836735, "train_speed(iter/s)": 0.669774 }, { "epoch": 0.04519943447153078, "grad_norm": 2.717076063156128, "learning_rate": 9.997991422972568e-05, "loss": 2.3865373611450194, "memory(GiB)": 60.19, "step": 1055, "token_acc": 0.48299319727891155, "train_speed(iter/s)": 0.668954 }, { "epoch": 0.04541364980077974, "grad_norm": 2.9345760345458984, "learning_rate": 9.997972304172605e-05, "loss": 2.6539567947387694, "memory(GiB)": 60.19, "step": 1060, "token_acc": 0.4738955823293173, "train_speed(iter/s)": 0.668613 }, { "epoch": 0.04562786513002871, "grad_norm": 2.6845927238464355, "learning_rate": 9.997953094829686e-05, "loss": 2.2777278900146483, "memory(GiB)": 60.19, "step": 1065, "token_acc": 0.5363984674329502, "train_speed(iter/s)": 0.668925 }, { "epoch": 0.045842080459277665, "grad_norm": 2.971059799194336, "learning_rate": 9.997933794944157e-05, "loss": 2.3380268096923826, "memory(GiB)": 60.19, "step": 1070, "token_acc": 0.49814126394052044, "train_speed(iter/s)": 0.66957 }, { "epoch": 0.04605629578852663, "grad_norm": 3.8125662803649902, "learning_rate": 9.99791440451637e-05, "loss": 2.453446388244629, "memory(GiB)": 60.19, "step": 1075, "token_acc": 0.5369649805447471, "train_speed(iter/s)": 0.669531 }, { "epoch": 0.04627051111777559, "grad_norm": 2.4771504402160645, "learning_rate": 9.997894923546674e-05, "loss": 2.509562683105469, "memory(GiB)": 60.19, "step": 1080, "token_acc": 0.4573170731707317, "train_speed(iter/s)": 0.669747 }, { "epoch": 0.046484726447024546, "grad_norm": 3.3682193756103516, "learning_rate": 9.997875352035424e-05, "loss": 2.747494697570801, "memory(GiB)": 60.19, "step": 1085, "token_acc": 0.4117647058823529, "train_speed(iter/s)": 0.669829 }, { "epoch": 0.04669894177627351, "grad_norm": 4.381232738494873, "learning_rate": 9.997855689982973e-05, "loss": 2.5145332336425783, "memory(GiB)": 60.19, "step": 1090, "token_acc": 0.47468354430379744, "train_speed(iter/s)": 0.669394 }, { "epoch": 0.04691315710552247, "grad_norm": 2.048034429550171, "learning_rate": 9.99783593738968e-05, "loss": 2.438359832763672, "memory(GiB)": 60.19, "step": 1095, "token_acc": 0.486013986013986, "train_speed(iter/s)": 0.66983 }, { "epoch": 0.047127372434771435, "grad_norm": 2.477473258972168, "learning_rate": 9.997816094255897e-05, "loss": 2.488171195983887, "memory(GiB)": 60.19, "step": 1100, "token_acc": 0.4843205574912892, "train_speed(iter/s)": 0.669035 }, { "epoch": 0.04734158776402039, "grad_norm": 2.6468658447265625, "learning_rate": 9.99779616058199e-05, "loss": 2.2476085662841796, "memory(GiB)": 60.19, "step": 1105, "token_acc": 0.5220883534136547, "train_speed(iter/s)": 0.669257 }, { "epoch": 0.04755580309326935, "grad_norm": 2.1623973846435547, "learning_rate": 9.997776136368315e-05, "loss": 2.4350830078125, "memory(GiB)": 60.19, "step": 1110, "token_acc": 0.4804270462633452, "train_speed(iter/s)": 0.66921 }, { "epoch": 0.047770018422518316, "grad_norm": 3.878840208053589, "learning_rate": 9.99775602161524e-05, "loss": 2.6180423736572265, "memory(GiB)": 60.19, "step": 1115, "token_acc": 0.4674922600619195, "train_speed(iter/s)": 0.669817 }, { "epoch": 0.047984233751767275, "grad_norm": 3.14975905418396, "learning_rate": 9.997735816323123e-05, "loss": 2.525720977783203, "memory(GiB)": 60.19, "step": 1120, "token_acc": 0.46953405017921146, "train_speed(iter/s)": 0.670207 }, { "epoch": 0.04819844908101624, "grad_norm": 2.040764093399048, "learning_rate": 9.997715520492334e-05, "loss": 2.156782531738281, "memory(GiB)": 60.19, "step": 1125, "token_acc": 0.5348837209302325, "train_speed(iter/s)": 0.670046 }, { "epoch": 0.0484126644102652, "grad_norm": 6.068016529083252, "learning_rate": 9.997695134123241e-05, "loss": 2.5941583633422853, "memory(GiB)": 60.19, "step": 1130, "token_acc": 0.44011976047904194, "train_speed(iter/s)": 0.670001 }, { "epoch": 0.04862687973951416, "grad_norm": 2.5243539810180664, "learning_rate": 9.997674657216214e-05, "loss": 2.5929119110107424, "memory(GiB)": 60.19, "step": 1135, "token_acc": 0.42574257425742573, "train_speed(iter/s)": 0.67049 }, { "epoch": 0.04884109506876312, "grad_norm": 2.9502015113830566, "learning_rate": 9.99765408977162e-05, "loss": 2.7510013580322266, "memory(GiB)": 60.19, "step": 1140, "token_acc": 0.46774193548387094, "train_speed(iter/s)": 0.670243 }, { "epoch": 0.04905531039801208, "grad_norm": 3.494006395339966, "learning_rate": 9.997633431789836e-05, "loss": 2.610700225830078, "memory(GiB)": 60.19, "step": 1145, "token_acc": 0.48626373626373626, "train_speed(iter/s)": 0.670529 }, { "epoch": 0.049269525727261045, "grad_norm": 2.4302144050598145, "learning_rate": 9.997612683271232e-05, "loss": 2.722503662109375, "memory(GiB)": 60.19, "step": 1150, "token_acc": 0.45977011494252873, "train_speed(iter/s)": 0.670719 }, { "epoch": 0.04948374105651, "grad_norm": 2.8347177505493164, "learning_rate": 9.997591844216187e-05, "loss": 2.6660179138183593, "memory(GiB)": 60.19, "step": 1155, "token_acc": 0.436950146627566, "train_speed(iter/s)": 0.670063 }, { "epoch": 0.04969795638575897, "grad_norm": 3.047088861465454, "learning_rate": 9.997570914625079e-05, "loss": 2.5812347412109373, "memory(GiB)": 60.19, "step": 1160, "token_acc": 0.4708029197080292, "train_speed(iter/s)": 0.670409 }, { "epoch": 0.049912171715007926, "grad_norm": 3.585158348083496, "learning_rate": 9.997549894498284e-05, "loss": 2.555681037902832, "memory(GiB)": 60.19, "step": 1165, "token_acc": 0.44642857142857145, "train_speed(iter/s)": 0.670867 }, { "epoch": 0.050126387044256884, "grad_norm": 3.5295934677124023, "learning_rate": 9.997528783836185e-05, "loss": 2.5314117431640626, "memory(GiB)": 60.19, "step": 1170, "token_acc": 0.4868913857677903, "train_speed(iter/s)": 0.670866 }, { "epoch": 0.05034060237350585, "grad_norm": 3.659979820251465, "learning_rate": 9.997507582639165e-05, "loss": 2.5530046463012694, "memory(GiB)": 60.19, "step": 1175, "token_acc": 0.5085470085470085, "train_speed(iter/s)": 0.67107 }, { "epoch": 0.05055481770275481, "grad_norm": 2.9115400314331055, "learning_rate": 9.997486290907606e-05, "loss": 2.4083274841308593, "memory(GiB)": 60.19, "step": 1180, "token_acc": 0.5210084033613446, "train_speed(iter/s)": 0.671476 }, { "epoch": 0.05076903303200377, "grad_norm": 2.809870719909668, "learning_rate": 9.997464908641896e-05, "loss": 2.5525539398193358, "memory(GiB)": 60.19, "step": 1185, "token_acc": 0.4896551724137931, "train_speed(iter/s)": 0.671813 }, { "epoch": 0.05098324836125273, "grad_norm": 2.6464269161224365, "learning_rate": 9.997443435842419e-05, "loss": 2.3779880523681642, "memory(GiB)": 60.19, "step": 1190, "token_acc": 0.5251798561151079, "train_speed(iter/s)": 0.671911 }, { "epoch": 0.05119746369050169, "grad_norm": 2.5989644527435303, "learning_rate": 9.997421872509568e-05, "loss": 2.6515920639038084, "memory(GiB)": 60.19, "step": 1195, "token_acc": 0.484472049689441, "train_speed(iter/s)": 0.671922 }, { "epoch": 0.051411679019750654, "grad_norm": 2.899158239364624, "learning_rate": 9.997400218643731e-05, "loss": 2.5386390686035156, "memory(GiB)": 60.19, "step": 1200, "token_acc": 0.49236641221374045, "train_speed(iter/s)": 0.672221 }, { "epoch": 0.05162589434899961, "grad_norm": 2.3768391609191895, "learning_rate": 9.997378474245303e-05, "loss": 2.526541519165039, "memory(GiB)": 60.19, "step": 1205, "token_acc": 0.4902597402597403, "train_speed(iter/s)": 0.672421 }, { "epoch": 0.05184010967824858, "grad_norm": 3.3173210620880127, "learning_rate": 9.997356639314673e-05, "loss": 2.4409332275390625, "memory(GiB)": 60.19, "step": 1210, "token_acc": 0.4717741935483871, "train_speed(iter/s)": 0.67237 }, { "epoch": 0.052054325007497536, "grad_norm": 2.6563098430633545, "learning_rate": 9.997334713852241e-05, "loss": 2.634078025817871, "memory(GiB)": 60.19, "step": 1215, "token_acc": 0.4632768361581921, "train_speed(iter/s)": 0.671947 }, { "epoch": 0.0522685403367465, "grad_norm": 3.4604620933532715, "learning_rate": 9.997312697858403e-05, "loss": 2.560551643371582, "memory(GiB)": 60.19, "step": 1220, "token_acc": 0.5018315018315018, "train_speed(iter/s)": 0.671158 }, { "epoch": 0.05248275566599546, "grad_norm": 2.619927406311035, "learning_rate": 9.997290591333557e-05, "loss": 2.7530437469482423, "memory(GiB)": 60.19, "step": 1225, "token_acc": 0.4532967032967033, "train_speed(iter/s)": 0.671598 }, { "epoch": 0.05269697099524442, "grad_norm": 2.332350969314575, "learning_rate": 9.997268394278106e-05, "loss": 2.2954320907592773, "memory(GiB)": 60.19, "step": 1230, "token_acc": 0.4795539033457249, "train_speed(iter/s)": 0.671699 }, { "epoch": 0.05291118632449338, "grad_norm": 3.381535530090332, "learning_rate": 9.997246106692448e-05, "loss": 2.474668502807617, "memory(GiB)": 60.19, "step": 1235, "token_acc": 0.5168195718654435, "train_speed(iter/s)": 0.671373 }, { "epoch": 0.05312540165374234, "grad_norm": 3.2394087314605713, "learning_rate": 9.99722372857699e-05, "loss": 2.216074752807617, "memory(GiB)": 60.19, "step": 1240, "token_acc": 0.5, "train_speed(iter/s)": 0.672002 }, { "epoch": 0.053339616982991306, "grad_norm": 2.657513380050659, "learning_rate": 9.997201259932135e-05, "loss": 2.2786205291748045, "memory(GiB)": 60.19, "step": 1245, "token_acc": 0.49310344827586206, "train_speed(iter/s)": 0.671972 }, { "epoch": 0.053553832312240264, "grad_norm": 2.6649532318115234, "learning_rate": 9.997178700758293e-05, "loss": 2.3548254013061523, "memory(GiB)": 60.19, "step": 1250, "token_acc": 0.5229357798165137, "train_speed(iter/s)": 0.672083 }, { "epoch": 0.05376804764148922, "grad_norm": 2.594942092895508, "learning_rate": 9.99715605105587e-05, "loss": 2.331675910949707, "memory(GiB)": 60.19, "step": 1255, "token_acc": 0.48828125, "train_speed(iter/s)": 0.671889 }, { "epoch": 0.05398226297073819, "grad_norm": 2.5154826641082764, "learning_rate": 9.997133310825278e-05, "loss": 2.61854248046875, "memory(GiB)": 60.19, "step": 1260, "token_acc": 0.45, "train_speed(iter/s)": 0.671425 }, { "epoch": 0.054196478299987146, "grad_norm": 3.0075607299804688, "learning_rate": 9.997110480066929e-05, "loss": 2.349809455871582, "memory(GiB)": 60.19, "step": 1265, "token_acc": 0.4755700325732899, "train_speed(iter/s)": 0.671057 }, { "epoch": 0.05441069362923611, "grad_norm": 2.7061071395874023, "learning_rate": 9.997087558781236e-05, "loss": 2.431550979614258, "memory(GiB)": 60.19, "step": 1270, "token_acc": 0.4753521126760563, "train_speed(iter/s)": 0.670772 }, { "epoch": 0.05462490895848507, "grad_norm": 2.7159698009490967, "learning_rate": 9.997064546968613e-05, "loss": 2.5076034545898436, "memory(GiB)": 60.19, "step": 1275, "token_acc": 0.5, "train_speed(iter/s)": 0.671013 }, { "epoch": 0.05483912428773403, "grad_norm": 2.6523561477661133, "learning_rate": 9.997041444629478e-05, "loss": 2.55328369140625, "memory(GiB)": 60.19, "step": 1280, "token_acc": 0.47796610169491527, "train_speed(iter/s)": 0.670886 }, { "epoch": 0.05505333961698299, "grad_norm": 8.06808853149414, "learning_rate": 9.997018251764251e-05, "loss": 2.4689090728759764, "memory(GiB)": 60.19, "step": 1285, "token_acc": 0.4883720930232558, "train_speed(iter/s)": 0.671038 }, { "epoch": 0.05526755494623195, "grad_norm": 2.3894896507263184, "learning_rate": 9.99699496837335e-05, "loss": 2.5448007583618164, "memory(GiB)": 60.19, "step": 1290, "token_acc": 0.47572815533980584, "train_speed(iter/s)": 0.670387 }, { "epoch": 0.055481770275480916, "grad_norm": 2.5673248767852783, "learning_rate": 9.996971594457198e-05, "loss": 2.215565490722656, "memory(GiB)": 62.08, "step": 1295, "token_acc": 0.5049180327868853, "train_speed(iter/s)": 0.670039 }, { "epoch": 0.055695985604729874, "grad_norm": 2.911127805709839, "learning_rate": 9.996948130016216e-05, "loss": 2.502191925048828, "memory(GiB)": 62.08, "step": 1300, "token_acc": 0.44108761329305135, "train_speed(iter/s)": 0.669743 }, { "epoch": 0.05591020093397884, "grad_norm": 2.2582786083221436, "learning_rate": 9.996924575050834e-05, "loss": 2.50824089050293, "memory(GiB)": 62.08, "step": 1305, "token_acc": 0.475, "train_speed(iter/s)": 0.669919 }, { "epoch": 0.0561244162632278, "grad_norm": 3.2510359287261963, "learning_rate": 9.996900929561475e-05, "loss": 2.386900520324707, "memory(GiB)": 62.08, "step": 1310, "token_acc": 0.49808429118773945, "train_speed(iter/s)": 0.670155 }, { "epoch": 0.056338631592476755, "grad_norm": 2.2602782249450684, "learning_rate": 9.996877193548568e-05, "loss": 2.4196496963500977, "memory(GiB)": 62.08, "step": 1315, "token_acc": 0.5089605734767025, "train_speed(iter/s)": 0.670226 }, { "epoch": 0.05655284692172572, "grad_norm": 3.0289127826690674, "learning_rate": 9.996853367012543e-05, "loss": 2.63345947265625, "memory(GiB)": 62.08, "step": 1320, "token_acc": 0.4485049833887043, "train_speed(iter/s)": 0.670156 }, { "epoch": 0.05676706225097468, "grad_norm": 3.2797915935516357, "learning_rate": 9.996829449953831e-05, "loss": 2.394532012939453, "memory(GiB)": 62.08, "step": 1325, "token_acc": 0.5076335877862596, "train_speed(iter/s)": 0.669931 }, { "epoch": 0.056981277580223644, "grad_norm": 3.009809970855713, "learning_rate": 9.996805442372867e-05, "loss": 2.410894203186035, "memory(GiB)": 62.08, "step": 1330, "token_acc": 0.4805194805194805, "train_speed(iter/s)": 0.669814 }, { "epoch": 0.0571954929094726, "grad_norm": 3.040433406829834, "learning_rate": 9.996781344270085e-05, "loss": 2.539087104797363, "memory(GiB)": 62.08, "step": 1335, "token_acc": 0.4657534246575342, "train_speed(iter/s)": 0.669997 }, { "epoch": 0.05740970823872156, "grad_norm": 2.3869171142578125, "learning_rate": 9.996757155645923e-05, "loss": 2.6084232330322266, "memory(GiB)": 62.08, "step": 1340, "token_acc": 0.4722222222222222, "train_speed(iter/s)": 0.670527 }, { "epoch": 0.057623923567970525, "grad_norm": 2.979222297668457, "learning_rate": 9.996732876500816e-05, "loss": 2.5135498046875, "memory(GiB)": 62.08, "step": 1345, "token_acc": 0.4781021897810219, "train_speed(iter/s)": 0.670221 }, { "epoch": 0.057838138897219483, "grad_norm": 3.5038297176361084, "learning_rate": 9.996708506835206e-05, "loss": 2.1399913787841798, "memory(GiB)": 62.08, "step": 1350, "token_acc": 0.531496062992126, "train_speed(iter/s)": 0.670095 }, { "epoch": 0.05805235422646845, "grad_norm": 2.5690457820892334, "learning_rate": 9.996684046649533e-05, "loss": 2.7772762298583986, "memory(GiB)": 62.08, "step": 1355, "token_acc": 0.4216417910447761, "train_speed(iter/s)": 0.670683 }, { "epoch": 0.05826656955571741, "grad_norm": 2.074791431427002, "learning_rate": 9.996659495944244e-05, "loss": 2.5677734375, "memory(GiB)": 62.08, "step": 1360, "token_acc": 0.4808259587020649, "train_speed(iter/s)": 0.67142 }, { "epoch": 0.058480784884966365, "grad_norm": 3.475457191467285, "learning_rate": 9.99663485471978e-05, "loss": 2.6990060806274414, "memory(GiB)": 62.08, "step": 1365, "token_acc": 0.44366197183098594, "train_speed(iter/s)": 0.671442 }, { "epoch": 0.05869500021421533, "grad_norm": 2.8692872524261475, "learning_rate": 9.996610122976586e-05, "loss": 2.603835678100586, "memory(GiB)": 62.08, "step": 1370, "token_acc": 0.44642857142857145, "train_speed(iter/s)": 0.672025 }, { "epoch": 0.05890921554346429, "grad_norm": 2.537635326385498, "learning_rate": 9.996585300715116e-05, "loss": 2.615510177612305, "memory(GiB)": 62.08, "step": 1375, "token_acc": 0.432258064516129, "train_speed(iter/s)": 0.6719 }, { "epoch": 0.059123430872713253, "grad_norm": 2.406095027923584, "learning_rate": 9.996560387935814e-05, "loss": 2.3799766540527343, "memory(GiB)": 62.08, "step": 1380, "token_acc": 0.4865771812080537, "train_speed(iter/s)": 0.671782 }, { "epoch": 0.05933764620196221, "grad_norm": 3.7213551998138428, "learning_rate": 9.996535384639132e-05, "loss": 2.4646766662597654, "memory(GiB)": 62.08, "step": 1385, "token_acc": 0.44981412639405205, "train_speed(iter/s)": 0.671825 }, { "epoch": 0.05955186153121118, "grad_norm": 3.1454155445098877, "learning_rate": 9.996510290825526e-05, "loss": 2.4328941345214843, "memory(GiB)": 62.08, "step": 1390, "token_acc": 0.5051546391752577, "train_speed(iter/s)": 0.671796 }, { "epoch": 0.059766076860460135, "grad_norm": 3.143669605255127, "learning_rate": 9.99648510649545e-05, "loss": 2.357086753845215, "memory(GiB)": 62.08, "step": 1395, "token_acc": 0.46835443037974683, "train_speed(iter/s)": 0.671925 }, { "epoch": 0.05998029218970909, "grad_norm": 3.094806432723999, "learning_rate": 9.996459831649358e-05, "loss": 2.658376693725586, "memory(GiB)": 62.08, "step": 1400, "token_acc": 0.4794952681388013, "train_speed(iter/s)": 0.671743 }, { "epoch": 0.06019450751895806, "grad_norm": 2.9284210205078125, "learning_rate": 9.996434466287709e-05, "loss": 2.2916034698486327, "memory(GiB)": 62.08, "step": 1405, "token_acc": 0.48846153846153845, "train_speed(iter/s)": 0.672059 }, { "epoch": 0.06040872284820702, "grad_norm": 2.807147264480591, "learning_rate": 9.996409010410963e-05, "loss": 2.8911386489868165, "memory(GiB)": 62.08, "step": 1410, "token_acc": 0.44964028776978415, "train_speed(iter/s)": 0.672051 }, { "epoch": 0.06062293817745598, "grad_norm": 2.9061167240142822, "learning_rate": 9.99638346401958e-05, "loss": 2.6157636642456055, "memory(GiB)": 62.08, "step": 1415, "token_acc": 0.46229508196721314, "train_speed(iter/s)": 0.671772 }, { "epoch": 0.06083715350670494, "grad_norm": 2.8162505626678467, "learning_rate": 9.996357827114026e-05, "loss": 2.5498115539550783, "memory(GiB)": 62.08, "step": 1420, "token_acc": 0.4628975265017668, "train_speed(iter/s)": 0.671152 }, { "epoch": 0.0610513688359539, "grad_norm": 2.886957883834839, "learning_rate": 9.99633209969476e-05, "loss": 2.717921257019043, "memory(GiB)": 62.08, "step": 1425, "token_acc": 0.46078431372549017, "train_speed(iter/s)": 0.671334 }, { "epoch": 0.06126558416520286, "grad_norm": 3.2202048301696777, "learning_rate": 9.996306281762253e-05, "loss": 2.40112190246582, "memory(GiB)": 62.08, "step": 1430, "token_acc": 0.4738955823293173, "train_speed(iter/s)": 0.672075 }, { "epoch": 0.06147979949445182, "grad_norm": 3.2302770614624023, "learning_rate": 9.99628037331697e-05, "loss": 2.9304536819458007, "memory(GiB)": 62.08, "step": 1435, "token_acc": 0.47035573122529645, "train_speed(iter/s)": 0.672575 }, { "epoch": 0.06169401482370079, "grad_norm": 2.8925600051879883, "learning_rate": 9.996254374359381e-05, "loss": 2.2827171325683593, "memory(GiB)": 62.08, "step": 1440, "token_acc": 0.49280575539568344, "train_speed(iter/s)": 0.672519 }, { "epoch": 0.061908230152949745, "grad_norm": 2.924095869064331, "learning_rate": 9.996228284889958e-05, "loss": 2.4667312622070314, "memory(GiB)": 62.08, "step": 1445, "token_acc": 0.46579804560260585, "train_speed(iter/s)": 0.671823 }, { "epoch": 0.0621224454821987, "grad_norm": 2.4367902278900146, "learning_rate": 9.996202104909173e-05, "loss": 2.754535102844238, "memory(GiB)": 62.08, "step": 1450, "token_acc": 0.47368421052631576, "train_speed(iter/s)": 0.672026 }, { "epoch": 0.06233666081144767, "grad_norm": 3.305656909942627, "learning_rate": 9.996175834417499e-05, "loss": 2.2638355255126954, "memory(GiB)": 62.08, "step": 1455, "token_acc": 0.4580152671755725, "train_speed(iter/s)": 0.672523 }, { "epoch": 0.06255087614069663, "grad_norm": 2.486118793487549, "learning_rate": 9.996149473415413e-05, "loss": 2.3501266479492187, "memory(GiB)": 62.08, "step": 1460, "token_acc": 0.5162337662337663, "train_speed(iter/s)": 0.672261 }, { "epoch": 0.06276509146994559, "grad_norm": 2.0524351596832275, "learning_rate": 9.996123021903391e-05, "loss": 2.788173866271973, "memory(GiB)": 62.08, "step": 1465, "token_acc": 0.4448979591836735, "train_speed(iter/s)": 0.672416 }, { "epoch": 0.06297930679919456, "grad_norm": 2.73917555809021, "learning_rate": 9.996096479881918e-05, "loss": 2.4650707244873047, "memory(GiB)": 62.08, "step": 1470, "token_acc": 0.43137254901960786, "train_speed(iter/s)": 0.672821 }, { "epoch": 0.06319352212844351, "grad_norm": 2.8177645206451416, "learning_rate": 9.996069847351467e-05, "loss": 2.744881439208984, "memory(GiB)": 62.08, "step": 1475, "token_acc": 0.4631578947368421, "train_speed(iter/s)": 0.673268 }, { "epoch": 0.06340773745769247, "grad_norm": 3.4885752201080322, "learning_rate": 9.996043124312525e-05, "loss": 2.659830093383789, "memory(GiB)": 62.08, "step": 1480, "token_acc": 0.476038338658147, "train_speed(iter/s)": 0.673054 }, { "epoch": 0.06362195278694144, "grad_norm": 2.8074963092803955, "learning_rate": 9.996016310765574e-05, "loss": 2.74228515625, "memory(GiB)": 62.08, "step": 1485, "token_acc": 0.45525291828793774, "train_speed(iter/s)": 0.673274 }, { "epoch": 0.06383616811619039, "grad_norm": 2.769364833831787, "learning_rate": 9.995989406711103e-05, "loss": 2.7691680908203127, "memory(GiB)": 62.08, "step": 1490, "token_acc": 0.40634005763688763, "train_speed(iter/s)": 0.672995 }, { "epoch": 0.06405038344543935, "grad_norm": 2.6080892086029053, "learning_rate": 9.995962412149598e-05, "loss": 2.064108467102051, "memory(GiB)": 62.08, "step": 1495, "token_acc": 0.5458333333333333, "train_speed(iter/s)": 0.672727 }, { "epoch": 0.06426459877468832, "grad_norm": 2.435908079147339, "learning_rate": 9.995935327081544e-05, "loss": 2.250269889831543, "memory(GiB)": 62.08, "step": 1500, "token_acc": 0.4981549815498155, "train_speed(iter/s)": 0.672934 }, { "epoch": 0.06426459877468832, "eval_loss": 2.0458316802978516, "eval_runtime": 16.4887, "eval_samples_per_second": 6.065, "eval_steps_per_second": 6.065, "eval_token_acc": 0.5043604651162791, "step": 1500 }, { "epoch": 0.06447881410393727, "grad_norm": 3.5918869972229004, "learning_rate": 9.995908151507438e-05, "loss": 2.744732666015625, "memory(GiB)": 62.08, "step": 1505, "token_acc": 0.4900523560209424, "train_speed(iter/s)": 0.667255 }, { "epoch": 0.06469302943318624, "grad_norm": 3.1082215309143066, "learning_rate": 9.995880885427766e-05, "loss": 2.5567893981933594, "memory(GiB)": 62.08, "step": 1510, "token_acc": 0.48220064724919093, "train_speed(iter/s)": 0.667551 }, { "epoch": 0.0649072447624352, "grad_norm": 2.5517847537994385, "learning_rate": 9.995853528843029e-05, "loss": 2.5150590896606446, "memory(GiB)": 62.08, "step": 1515, "token_acc": 0.4672897196261682, "train_speed(iter/s)": 0.668014 }, { "epoch": 0.06512146009168417, "grad_norm": 3.0190160274505615, "learning_rate": 9.995826081753716e-05, "loss": 2.352939796447754, "memory(GiB)": 62.08, "step": 1520, "token_acc": 0.49696969696969695, "train_speed(iter/s)": 0.66864 }, { "epoch": 0.06533567542093312, "grad_norm": 4.760136604309082, "learning_rate": 9.995798544160328e-05, "loss": 2.703257751464844, "memory(GiB)": 62.08, "step": 1525, "token_acc": 0.45112781954887216, "train_speed(iter/s)": 0.669168 }, { "epoch": 0.06554989075018208, "grad_norm": 3.123629093170166, "learning_rate": 9.995770916063362e-05, "loss": 2.4061119079589846, "memory(GiB)": 62.08, "step": 1530, "token_acc": 0.46441947565543074, "train_speed(iter/s)": 0.669489 }, { "epoch": 0.06576410607943105, "grad_norm": 2.626248598098755, "learning_rate": 9.995743197463322e-05, "loss": 2.6470973968505858, "memory(GiB)": 62.08, "step": 1535, "token_acc": 0.5, "train_speed(iter/s)": 0.670088 }, { "epoch": 0.06597832140868, "grad_norm": 2.611569404602051, "learning_rate": 9.995715388360706e-05, "loss": 2.6195886611938475, "memory(GiB)": 62.08, "step": 1540, "token_acc": 0.48201438848920863, "train_speed(iter/s)": 0.670739 }, { "epoch": 0.06619253673792896, "grad_norm": 2.563127279281616, "learning_rate": 9.995687488756019e-05, "loss": 2.584370422363281, "memory(GiB)": 62.08, "step": 1545, "token_acc": 0.46875, "train_speed(iter/s)": 0.671003 }, { "epoch": 0.06640675206717793, "grad_norm": 3.8594701290130615, "learning_rate": 9.995659498649768e-05, "loss": 2.4714147567749025, "memory(GiB)": 62.08, "step": 1550, "token_acc": 0.4854014598540146, "train_speed(iter/s)": 0.671192 }, { "epoch": 0.0666209673964269, "grad_norm": 2.988906145095825, "learning_rate": 9.995631418042457e-05, "loss": 2.5522512435913085, "memory(GiB)": 62.08, "step": 1555, "token_acc": 0.47601476014760147, "train_speed(iter/s)": 0.671213 }, { "epoch": 0.06683518272567585, "grad_norm": 3.0993826389312744, "learning_rate": 9.995603246934598e-05, "loss": 2.3006717681884767, "memory(GiB)": 62.08, "step": 1560, "token_acc": 0.5067264573991032, "train_speed(iter/s)": 0.671241 }, { "epoch": 0.06704939805492481, "grad_norm": 3.2872073650360107, "learning_rate": 9.9955749853267e-05, "loss": 2.3663681030273436, "memory(GiB)": 62.08, "step": 1565, "token_acc": 0.49122807017543857, "train_speed(iter/s)": 0.671057 }, { "epoch": 0.06726361338417378, "grad_norm": 2.3361270427703857, "learning_rate": 9.995546633219275e-05, "loss": 2.684951400756836, "memory(GiB)": 62.08, "step": 1570, "token_acc": 0.468503937007874, "train_speed(iter/s)": 0.670914 }, { "epoch": 0.06747782871342273, "grad_norm": 2.5829453468322754, "learning_rate": 9.995518190612836e-05, "loss": 2.3161155700683596, "memory(GiB)": 62.08, "step": 1575, "token_acc": 0.4869281045751634, "train_speed(iter/s)": 0.670783 }, { "epoch": 0.06769204404267169, "grad_norm": 2.8307249546051025, "learning_rate": 9.995489657507899e-05, "loss": 2.7056182861328124, "memory(GiB)": 62.08, "step": 1580, "token_acc": 0.40397350993377484, "train_speed(iter/s)": 0.670104 }, { "epoch": 0.06790625937192066, "grad_norm": 2.4905078411102295, "learning_rate": 9.995461033904981e-05, "loss": 2.2495498657226562, "memory(GiB)": 62.08, "step": 1585, "token_acc": 0.5265151515151515, "train_speed(iter/s)": 0.669832 }, { "epoch": 0.06812047470116961, "grad_norm": 5.014430999755859, "learning_rate": 9.995432319804599e-05, "loss": 2.262774658203125, "memory(GiB)": 62.08, "step": 1590, "token_acc": 0.46691176470588236, "train_speed(iter/s)": 0.66998 }, { "epoch": 0.06833469003041857, "grad_norm": 2.732738494873047, "learning_rate": 9.995403515207275e-05, "loss": 2.476114845275879, "memory(GiB)": 62.08, "step": 1595, "token_acc": 0.45454545454545453, "train_speed(iter/s)": 0.669241 }, { "epoch": 0.06854890535966754, "grad_norm": 2.9734950065612793, "learning_rate": 9.995374620113532e-05, "loss": 2.672146034240723, "memory(GiB)": 62.08, "step": 1600, "token_acc": 0.472, "train_speed(iter/s)": 0.669246 }, { "epoch": 0.0687631206889165, "grad_norm": 2.7754881381988525, "learning_rate": 9.99534563452389e-05, "loss": 2.220382308959961, "memory(GiB)": 62.08, "step": 1605, "token_acc": 0.53125, "train_speed(iter/s)": 0.66892 }, { "epoch": 0.06897733601816546, "grad_norm": 3.4146907329559326, "learning_rate": 9.995316558438875e-05, "loss": 2.4165258407592773, "memory(GiB)": 62.08, "step": 1610, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.668262 }, { "epoch": 0.06919155134741442, "grad_norm": 3.948826789855957, "learning_rate": 9.995287391859016e-05, "loss": 2.432681846618652, "memory(GiB)": 62.08, "step": 1615, "token_acc": 0.4898785425101215, "train_speed(iter/s)": 0.667992 }, { "epoch": 0.06940576667666339, "grad_norm": 3.2957024574279785, "learning_rate": 9.99525813478484e-05, "loss": 2.6522598266601562, "memory(GiB)": 62.08, "step": 1620, "token_acc": 0.4759036144578313, "train_speed(iter/s)": 0.668228 }, { "epoch": 0.06961998200591234, "grad_norm": 3.3527417182922363, "learning_rate": 9.995228787216876e-05, "loss": 2.587006378173828, "memory(GiB)": 62.08, "step": 1625, "token_acc": 0.4728682170542636, "train_speed(iter/s)": 0.668318 }, { "epoch": 0.0698341973351613, "grad_norm": 3.3110463619232178, "learning_rate": 9.995199349155658e-05, "loss": 2.384866714477539, "memory(GiB)": 62.08, "step": 1630, "token_acc": 0.5064935064935064, "train_speed(iter/s)": 0.668958 }, { "epoch": 0.07004841266441027, "grad_norm": 4.07573127746582, "learning_rate": 9.995169820601715e-05, "loss": 2.278519058227539, "memory(GiB)": 62.08, "step": 1635, "token_acc": 0.47959183673469385, "train_speed(iter/s)": 0.668284 }, { "epoch": 0.07026262799365923, "grad_norm": 2.6557915210723877, "learning_rate": 9.995140201555589e-05, "loss": 2.6987606048583985, "memory(GiB)": 62.08, "step": 1640, "token_acc": 0.4459016393442623, "train_speed(iter/s)": 0.668806 }, { "epoch": 0.07047684332290818, "grad_norm": 2.091798782348633, "learning_rate": 9.99511049201781e-05, "loss": 2.5233959197998046, "memory(GiB)": 62.08, "step": 1645, "token_acc": 0.48562300319488816, "train_speed(iter/s)": 0.668955 }, { "epoch": 0.07069105865215715, "grad_norm": 2.6455535888671875, "learning_rate": 9.995080691988919e-05, "loss": 2.3498918533325197, "memory(GiB)": 62.08, "step": 1650, "token_acc": 0.4432624113475177, "train_speed(iter/s)": 0.669049 }, { "epoch": 0.07090527398140611, "grad_norm": 2.727426290512085, "learning_rate": 9.995050801469454e-05, "loss": 2.607665252685547, "memory(GiB)": 62.08, "step": 1655, "token_acc": 0.46075085324232085, "train_speed(iter/s)": 0.669158 }, { "epoch": 0.07111948931065507, "grad_norm": 3.681596279144287, "learning_rate": 9.995020820459959e-05, "loss": 2.458875846862793, "memory(GiB)": 62.08, "step": 1660, "token_acc": 0.49429657794676807, "train_speed(iter/s)": 0.66955 }, { "epoch": 0.07133370463990403, "grad_norm": 2.3505640029907227, "learning_rate": 9.994990748960977e-05, "loss": 2.4793670654296873, "memory(GiB)": 62.08, "step": 1665, "token_acc": 0.5056603773584906, "train_speed(iter/s)": 0.669458 }, { "epoch": 0.071547919969153, "grad_norm": 2.668290615081787, "learning_rate": 9.994960586973053e-05, "loss": 2.2742605209350586, "memory(GiB)": 62.08, "step": 1670, "token_acc": 0.4884488448844885, "train_speed(iter/s)": 0.669388 }, { "epoch": 0.07176213529840195, "grad_norm": 3.1762750148773193, "learning_rate": 9.994930334496731e-05, "loss": 2.5433868408203124, "memory(GiB)": 62.08, "step": 1675, "token_acc": 0.4618055555555556, "train_speed(iter/s)": 0.669517 }, { "epoch": 0.07197635062765091, "grad_norm": 5.277214527130127, "learning_rate": 9.994899991532559e-05, "loss": 2.2180200576782227, "memory(GiB)": 62.08, "step": 1680, "token_acc": 0.5204460966542751, "train_speed(iter/s)": 0.669321 }, { "epoch": 0.07219056595689988, "grad_norm": 3.0805087089538574, "learning_rate": 9.994869558081089e-05, "loss": 2.1837461471557615, "memory(GiB)": 62.08, "step": 1685, "token_acc": 0.5410447761194029, "train_speed(iter/s)": 0.669167 }, { "epoch": 0.07240478128614884, "grad_norm": 3.432307720184326, "learning_rate": 9.994839034142872e-05, "loss": 2.428533172607422, "memory(GiB)": 62.08, "step": 1690, "token_acc": 0.49508196721311476, "train_speed(iter/s)": 0.669145 }, { "epoch": 0.0726189966153978, "grad_norm": 7.242842674255371, "learning_rate": 9.99480841971846e-05, "loss": 2.5630365371704102, "memory(GiB)": 62.08, "step": 1695, "token_acc": 0.4929078014184397, "train_speed(iter/s)": 0.668737 }, { "epoch": 0.07283321194464676, "grad_norm": 2.459641456604004, "learning_rate": 9.994777714808408e-05, "loss": 2.474386787414551, "memory(GiB)": 62.08, "step": 1700, "token_acc": 0.45390070921985815, "train_speed(iter/s)": 0.668654 }, { "epoch": 0.07304742727389572, "grad_norm": 2.7535133361816406, "learning_rate": 9.994746919413272e-05, "loss": 2.4410839080810547, "memory(GiB)": 62.08, "step": 1705, "token_acc": 0.4470588235294118, "train_speed(iter/s)": 0.667609 }, { "epoch": 0.07326164260314467, "grad_norm": 2.9960460662841797, "learning_rate": 9.994716033533611e-05, "loss": 2.699314498901367, "memory(GiB)": 62.08, "step": 1710, "token_acc": 0.44947735191637633, "train_speed(iter/s)": 0.667795 }, { "epoch": 0.07347585793239364, "grad_norm": 3.373379945755005, "learning_rate": 9.994685057169982e-05, "loss": 2.5919544219970705, "memory(GiB)": 62.08, "step": 1715, "token_acc": 0.45454545454545453, "train_speed(iter/s)": 0.667778 }, { "epoch": 0.0736900732616426, "grad_norm": 4.009030818939209, "learning_rate": 9.994653990322949e-05, "loss": 2.3067211151123046, "memory(GiB)": 62.08, "step": 1720, "token_acc": 0.5157232704402516, "train_speed(iter/s)": 0.668029 }, { "epoch": 0.07390428859089157, "grad_norm": 2.8195645809173584, "learning_rate": 9.994622832993072e-05, "loss": 2.292642593383789, "memory(GiB)": 62.08, "step": 1725, "token_acc": 0.4583333333333333, "train_speed(iter/s)": 0.66832 }, { "epoch": 0.07411850392014052, "grad_norm": 2.673375368118286, "learning_rate": 9.994591585180919e-05, "loss": 2.450043487548828, "memory(GiB)": 62.08, "step": 1730, "token_acc": 0.4629156010230179, "train_speed(iter/s)": 0.668398 }, { "epoch": 0.07433271924938949, "grad_norm": 2.700171709060669, "learning_rate": 9.994560246887055e-05, "loss": 2.5470809936523438, "memory(GiB)": 62.08, "step": 1735, "token_acc": 0.45422535211267606, "train_speed(iter/s)": 0.668733 }, { "epoch": 0.07454693457863845, "grad_norm": 4.7529377937316895, "learning_rate": 9.994528818112044e-05, "loss": 2.309577751159668, "memory(GiB)": 62.08, "step": 1740, "token_acc": 0.4921259842519685, "train_speed(iter/s)": 0.669074 }, { "epoch": 0.0747611499078874, "grad_norm": 2.556361675262451, "learning_rate": 9.99449729885646e-05, "loss": 2.5201221466064454, "memory(GiB)": 62.08, "step": 1745, "token_acc": 0.444794952681388, "train_speed(iter/s)": 0.669454 }, { "epoch": 0.07497536523713637, "grad_norm": 3.804819107055664, "learning_rate": 9.994465689120871e-05, "loss": 2.404924011230469, "memory(GiB)": 62.08, "step": 1750, "token_acc": 0.46017699115044247, "train_speed(iter/s)": 0.669897 }, { "epoch": 0.07518958056638533, "grad_norm": 2.8151257038116455, "learning_rate": 9.994433988905851e-05, "loss": 2.528926467895508, "memory(GiB)": 62.08, "step": 1755, "token_acc": 0.49328859060402686, "train_speed(iter/s)": 0.669972 }, { "epoch": 0.0754037958956343, "grad_norm": 4.093504428863525, "learning_rate": 9.994402198211977e-05, "loss": 2.8784461975097657, "memory(GiB)": 62.08, "step": 1760, "token_acc": 0.4652014652014652, "train_speed(iter/s)": 0.670159 }, { "epoch": 0.07561801122488325, "grad_norm": 3.871126413345337, "learning_rate": 9.994370317039819e-05, "loss": 2.3623729705810548, "memory(GiB)": 62.08, "step": 1765, "token_acc": 0.4549019607843137, "train_speed(iter/s)": 0.670542 }, { "epoch": 0.07583222655413221, "grad_norm": 2.630730390548706, "learning_rate": 9.994338345389958e-05, "loss": 2.366230010986328, "memory(GiB)": 62.08, "step": 1770, "token_acc": 0.5070422535211268, "train_speed(iter/s)": 0.670454 }, { "epoch": 0.07604644188338118, "grad_norm": 4.943000793457031, "learning_rate": 9.994306283262973e-05, "loss": 2.2440671920776367, "memory(GiB)": 62.08, "step": 1775, "token_acc": 0.45051194539249145, "train_speed(iter/s)": 0.670327 }, { "epoch": 0.07626065721263013, "grad_norm": 3.5374796390533447, "learning_rate": 9.994274130659445e-05, "loss": 2.488209915161133, "memory(GiB)": 62.08, "step": 1780, "token_acc": 0.47318611987381703, "train_speed(iter/s)": 0.670141 }, { "epoch": 0.0764748725418791, "grad_norm": 2.1456375122070312, "learning_rate": 9.994241887579956e-05, "loss": 2.380768394470215, "memory(GiB)": 62.08, "step": 1785, "token_acc": 0.48405797101449277, "train_speed(iter/s)": 0.670085 }, { "epoch": 0.07668908787112806, "grad_norm": 2.91194486618042, "learning_rate": 9.994209554025091e-05, "loss": 2.5074436187744142, "memory(GiB)": 62.08, "step": 1790, "token_acc": 0.49809885931558934, "train_speed(iter/s)": 0.670371 }, { "epoch": 0.07690330320037701, "grad_norm": 3.127539873123169, "learning_rate": 9.994177129995434e-05, "loss": 2.7101768493652343, "memory(GiB)": 62.08, "step": 1795, "token_acc": 0.42296918767507, "train_speed(iter/s)": 0.670785 }, { "epoch": 0.07711751852962598, "grad_norm": 2.23175311088562, "learning_rate": 9.994144615491573e-05, "loss": 2.5108612060546873, "memory(GiB)": 62.08, "step": 1800, "token_acc": 0.46283783783783783, "train_speed(iter/s)": 0.670564 }, { "epoch": 0.07733173385887494, "grad_norm": 2.916194200515747, "learning_rate": 9.994112010514099e-05, "loss": 2.4505380630493163, "memory(GiB)": 62.08, "step": 1805, "token_acc": 0.48589341692789967, "train_speed(iter/s)": 0.670879 }, { "epoch": 0.07754594918812391, "grad_norm": 3.4708456993103027, "learning_rate": 9.994079315063598e-05, "loss": 2.482617950439453, "memory(GiB)": 62.08, "step": 1810, "token_acc": 0.47735191637630664, "train_speed(iter/s)": 0.671234 }, { "epoch": 0.07776016451737286, "grad_norm": 4.478388786315918, "learning_rate": 9.994046529140668e-05, "loss": 2.5403558731079103, "memory(GiB)": 62.08, "step": 1815, "token_acc": 0.4823529411764706, "train_speed(iter/s)": 0.671099 }, { "epoch": 0.07797437984662182, "grad_norm": 2.2479166984558105, "learning_rate": 9.9940136527459e-05, "loss": 2.6675041198730467, "memory(GiB)": 62.08, "step": 1820, "token_acc": 0.4604105571847507, "train_speed(iter/s)": 0.670939 }, { "epoch": 0.07818859517587079, "grad_norm": 2.4138505458831787, "learning_rate": 9.993980685879888e-05, "loss": 2.3011489868164063, "memory(GiB)": 62.08, "step": 1825, "token_acc": 0.5018181818181818, "train_speed(iter/s)": 0.670859 }, { "epoch": 0.07840281050511974, "grad_norm": 3.0831246376037598, "learning_rate": 9.993947628543234e-05, "loss": 2.412697410583496, "memory(GiB)": 62.08, "step": 1830, "token_acc": 0.4982456140350877, "train_speed(iter/s)": 0.670737 }, { "epoch": 0.0786170258343687, "grad_norm": 3.140913248062134, "learning_rate": 9.993914480736532e-05, "loss": 2.542030906677246, "memory(GiB)": 62.08, "step": 1835, "token_acc": 0.47794117647058826, "train_speed(iter/s)": 0.670626 }, { "epoch": 0.07883124116361767, "grad_norm": 2.660673141479492, "learning_rate": 9.993881242460387e-05, "loss": 2.697663688659668, "memory(GiB)": 62.08, "step": 1840, "token_acc": 0.45896656534954405, "train_speed(iter/s)": 0.671031 }, { "epoch": 0.07904545649286664, "grad_norm": 2.4171459674835205, "learning_rate": 9.993847913715396e-05, "loss": 2.378768539428711, "memory(GiB)": 62.08, "step": 1845, "token_acc": 0.45918367346938777, "train_speed(iter/s)": 0.671175 }, { "epoch": 0.07925967182211559, "grad_norm": 2.8504576683044434, "learning_rate": 9.993814494502167e-05, "loss": 2.3774698257446287, "memory(GiB)": 62.08, "step": 1850, "token_acc": 0.4876543209876543, "train_speed(iter/s)": 0.671372 }, { "epoch": 0.07947388715136455, "grad_norm": 2.5263428688049316, "learning_rate": 9.993780984821304e-05, "loss": 2.5043601989746094, "memory(GiB)": 62.08, "step": 1855, "token_acc": 0.48857142857142855, "train_speed(iter/s)": 0.671504 }, { "epoch": 0.07968810248061352, "grad_norm": 3.234280824661255, "learning_rate": 9.993747384673412e-05, "loss": 2.8189712524414063, "memory(GiB)": 62.08, "step": 1860, "token_acc": 0.4129692832764505, "train_speed(iter/s)": 0.671662 }, { "epoch": 0.07990231780986247, "grad_norm": 2.904743194580078, "learning_rate": 9.993713694059103e-05, "loss": 2.5983158111572267, "memory(GiB)": 62.08, "step": 1865, "token_acc": 0.46551724137931033, "train_speed(iter/s)": 0.672154 }, { "epoch": 0.08011653313911143, "grad_norm": 3.1607019901275635, "learning_rate": 9.993679912978986e-05, "loss": 2.6634859085083007, "memory(GiB)": 62.08, "step": 1870, "token_acc": 0.45584045584045585, "train_speed(iter/s)": 0.67188 }, { "epoch": 0.0803307484683604, "grad_norm": 2.3440043926239014, "learning_rate": 9.993646041433673e-05, "loss": 2.470184326171875, "memory(GiB)": 62.08, "step": 1875, "token_acc": 0.4690909090909091, "train_speed(iter/s)": 0.671802 }, { "epoch": 0.08054496379760935, "grad_norm": 2.703098773956299, "learning_rate": 9.993612079423776e-05, "loss": 1.9990102767944335, "memory(GiB)": 62.08, "step": 1880, "token_acc": 0.5450643776824035, "train_speed(iter/s)": 0.671967 }, { "epoch": 0.08075917912685832, "grad_norm": 2.7990875244140625, "learning_rate": 9.993578026949913e-05, "loss": 2.420424461364746, "memory(GiB)": 62.08, "step": 1885, "token_acc": 0.5201612903225806, "train_speed(iter/s)": 0.672326 }, { "epoch": 0.08097339445610728, "grad_norm": 2.924644708633423, "learning_rate": 9.9935438840127e-05, "loss": 2.4880294799804688, "memory(GiB)": 62.08, "step": 1890, "token_acc": 0.4886731391585761, "train_speed(iter/s)": 0.672196 }, { "epoch": 0.08118760978535625, "grad_norm": 3.4448885917663574, "learning_rate": 9.993509650612756e-05, "loss": 2.313882827758789, "memory(GiB)": 62.08, "step": 1895, "token_acc": 0.46179401993355484, "train_speed(iter/s)": 0.672191 }, { "epoch": 0.0814018251146052, "grad_norm": 2.1969821453094482, "learning_rate": 9.993475326750699e-05, "loss": 2.3656490325927733, "memory(GiB)": 62.08, "step": 1900, "token_acc": 0.4861111111111111, "train_speed(iter/s)": 0.672082 }, { "epoch": 0.08161604044385416, "grad_norm": 3.645211935043335, "learning_rate": 9.993440912427153e-05, "loss": 2.165558433532715, "memory(GiB)": 62.08, "step": 1905, "token_acc": 0.5073529411764706, "train_speed(iter/s)": 0.672314 }, { "epoch": 0.08183025577310313, "grad_norm": 2.9676480293273926, "learning_rate": 9.993406407642739e-05, "loss": 2.3555530548095702, "memory(GiB)": 62.08, "step": 1910, "token_acc": 0.5137614678899083, "train_speed(iter/s)": 0.672687 }, { "epoch": 0.08204447110235208, "grad_norm": 2.8690192699432373, "learning_rate": 9.993371812398085e-05, "loss": 2.5066350936889648, "memory(GiB)": 62.08, "step": 1915, "token_acc": 0.45806451612903226, "train_speed(iter/s)": 0.672807 }, { "epoch": 0.08225868643160104, "grad_norm": 3.239920139312744, "learning_rate": 9.993337126693815e-05, "loss": 2.2675743103027344, "memory(GiB)": 62.08, "step": 1920, "token_acc": 0.5, "train_speed(iter/s)": 0.673133 }, { "epoch": 0.08247290176085001, "grad_norm": 2.6506307125091553, "learning_rate": 9.99330235053056e-05, "loss": 2.2746198654174803, "memory(GiB)": 62.08, "step": 1925, "token_acc": 0.5195729537366548, "train_speed(iter/s)": 0.672529 }, { "epoch": 0.08268711709009897, "grad_norm": 2.8396055698394775, "learning_rate": 9.993267483908951e-05, "loss": 2.6333240509033202, "memory(GiB)": 62.08, "step": 1930, "token_acc": 0.4707792207792208, "train_speed(iter/s)": 0.672538 }, { "epoch": 0.08290133241934793, "grad_norm": 2.6648175716400146, "learning_rate": 9.993232526829615e-05, "loss": 2.720189666748047, "memory(GiB)": 62.08, "step": 1935, "token_acc": 0.40828402366863903, "train_speed(iter/s)": 0.672669 }, { "epoch": 0.08311554774859689, "grad_norm": 2.6782596111297607, "learning_rate": 9.993197479293188e-05, "loss": 2.391793060302734, "memory(GiB)": 62.08, "step": 1940, "token_acc": 0.5070422535211268, "train_speed(iter/s)": 0.672651 }, { "epoch": 0.08332976307784586, "grad_norm": 2.078641414642334, "learning_rate": 9.993162341300304e-05, "loss": 2.3523494720458986, "memory(GiB)": 62.08, "step": 1945, "token_acc": 0.5261437908496732, "train_speed(iter/s)": 0.672533 }, { "epoch": 0.08354397840709481, "grad_norm": 3.456526756286621, "learning_rate": 9.993127112851602e-05, "loss": 2.4426565170288086, "memory(GiB)": 62.08, "step": 1950, "token_acc": 0.47592067988668557, "train_speed(iter/s)": 0.672413 }, { "epoch": 0.08375819373634377, "grad_norm": 2.7578341960906982, "learning_rate": 9.993091793947717e-05, "loss": 2.3445858001708983, "memory(GiB)": 62.08, "step": 1955, "token_acc": 0.5, "train_speed(iter/s)": 0.671736 }, { "epoch": 0.08397240906559274, "grad_norm": 3.314490556716919, "learning_rate": 9.993056384589291e-05, "loss": 2.411545181274414, "memory(GiB)": 62.08, "step": 1960, "token_acc": 0.48226950354609927, "train_speed(iter/s)": 0.671717 }, { "epoch": 0.08418662439484169, "grad_norm": 4.860118389129639, "learning_rate": 9.993020884776965e-05, "loss": 2.6107282638549805, "memory(GiB)": 62.08, "step": 1965, "token_acc": 0.4517241379310345, "train_speed(iter/s)": 0.672129 }, { "epoch": 0.08440083972409065, "grad_norm": 2.8466436862945557, "learning_rate": 9.992985294511382e-05, "loss": 2.4900835037231444, "memory(GiB)": 62.08, "step": 1970, "token_acc": 0.47191011235955055, "train_speed(iter/s)": 0.672312 }, { "epoch": 0.08461505505333962, "grad_norm": 2.512146234512329, "learning_rate": 9.992949613793184e-05, "loss": 2.299394226074219, "memory(GiB)": 62.08, "step": 1975, "token_acc": 0.5428571428571428, "train_speed(iter/s)": 0.672329 }, { "epoch": 0.08482927038258858, "grad_norm": 2.282844305038452, "learning_rate": 9.992913842623022e-05, "loss": 2.4595977783203127, "memory(GiB)": 62.08, "step": 1980, "token_acc": 0.4925373134328358, "train_speed(iter/s)": 0.672463 }, { "epoch": 0.08504348571183754, "grad_norm": 3.174105405807495, "learning_rate": 9.992877981001543e-05, "loss": 2.3917869567871093, "memory(GiB)": 62.08, "step": 1985, "token_acc": 0.493006993006993, "train_speed(iter/s)": 0.671828 }, { "epoch": 0.0852577010410865, "grad_norm": 2.6068506240844727, "learning_rate": 9.992842028929395e-05, "loss": 2.4587467193603514, "memory(GiB)": 62.08, "step": 1990, "token_acc": 0.512280701754386, "train_speed(iter/s)": 0.671701 }, { "epoch": 0.08547191637033547, "grad_norm": 3.223079204559326, "learning_rate": 9.992805986407227e-05, "loss": 2.4878026962280275, "memory(GiB)": 62.08, "step": 1995, "token_acc": 0.444, "train_speed(iter/s)": 0.671534 }, { "epoch": 0.08568613169958442, "grad_norm": 3.0564956665039062, "learning_rate": 9.992769853435698e-05, "loss": 2.3672372817993166, "memory(GiB)": 62.08, "step": 2000, "token_acc": 0.4756944444444444, "train_speed(iter/s)": 0.671425 }, { "epoch": 0.08568613169958442, "eval_loss": 2.1064274311065674, "eval_runtime": 17.415, "eval_samples_per_second": 5.742, "eval_steps_per_second": 5.742, "eval_token_acc": 0.4818731117824773, "step": 2000 }, { "epoch": 0.08590034702883338, "grad_norm": 3.1396045684814453, "learning_rate": 9.992733630015459e-05, "loss": 2.511952590942383, "memory(GiB)": 62.08, "step": 2005, "token_acc": 0.4823906083244397, "train_speed(iter/s)": 0.667207 }, { "epoch": 0.08611456235808235, "grad_norm": 2.5622663497924805, "learning_rate": 9.992697316147167e-05, "loss": 2.782255172729492, "memory(GiB)": 62.08, "step": 2010, "token_acc": 0.4342857142857143, "train_speed(iter/s)": 0.666911 }, { "epoch": 0.08632877768733131, "grad_norm": 3.8597793579101562, "learning_rate": 9.992660911831479e-05, "loss": 2.335088920593262, "memory(GiB)": 62.08, "step": 2015, "token_acc": 0.5129032258064516, "train_speed(iter/s)": 0.667125 }, { "epoch": 0.08654299301658026, "grad_norm": 3.805985689163208, "learning_rate": 9.992624417069056e-05, "loss": 2.4520233154296873, "memory(GiB)": 62.08, "step": 2020, "token_acc": 0.5102739726027398, "train_speed(iter/s)": 0.667368 }, { "epoch": 0.08675720834582923, "grad_norm": 2.6948208808898926, "learning_rate": 9.992587831860558e-05, "loss": 2.4801986694335936, "memory(GiB)": 62.08, "step": 2025, "token_acc": 0.47678018575851394, "train_speed(iter/s)": 0.667461 }, { "epoch": 0.0869714236750782, "grad_norm": 2.154984712600708, "learning_rate": 9.992551156206646e-05, "loss": 2.3856121063232423, "memory(GiB)": 62.08, "step": 2030, "token_acc": 0.5075075075075075, "train_speed(iter/s)": 0.66722 }, { "epoch": 0.08718563900432715, "grad_norm": 2.3231728076934814, "learning_rate": 9.99251439010799e-05, "loss": 2.6646020889282225, "memory(GiB)": 62.08, "step": 2035, "token_acc": 0.4298507462686567, "train_speed(iter/s)": 0.667472 }, { "epoch": 0.08739985433357611, "grad_norm": 5.406282901763916, "learning_rate": 9.992477533565249e-05, "loss": 2.738014793395996, "memory(GiB)": 62.08, "step": 2040, "token_acc": 0.4166666666666667, "train_speed(iter/s)": 0.667667 }, { "epoch": 0.08761406966282508, "grad_norm": 2.76774525642395, "learning_rate": 9.992440586579095e-05, "loss": 2.4926883697509767, "memory(GiB)": 62.08, "step": 2045, "token_acc": 0.4823529411764706, "train_speed(iter/s)": 0.667316 }, { "epoch": 0.08782828499207403, "grad_norm": 3.222432851791382, "learning_rate": 9.992403549150198e-05, "loss": 2.618966484069824, "memory(GiB)": 62.08, "step": 2050, "token_acc": 0.47257383966244726, "train_speed(iter/s)": 0.667699 }, { "epoch": 0.08804250032132299, "grad_norm": 2.485558032989502, "learning_rate": 9.992366421279227e-05, "loss": 2.417836380004883, "memory(GiB)": 62.08, "step": 2055, "token_acc": 0.46853146853146854, "train_speed(iter/s)": 0.667981 }, { "epoch": 0.08825671565057196, "grad_norm": 2.328907012939453, "learning_rate": 9.992329202966855e-05, "loss": 2.5770538330078123, "memory(GiB)": 62.08, "step": 2060, "token_acc": 0.4249084249084249, "train_speed(iter/s)": 0.668 }, { "epoch": 0.08847093097982092, "grad_norm": 2.651778221130371, "learning_rate": 9.992291894213755e-05, "loss": 2.7708965301513673, "memory(GiB)": 62.08, "step": 2065, "token_acc": 0.4916387959866221, "train_speed(iter/s)": 0.66809 }, { "epoch": 0.08868514630906987, "grad_norm": 2.51782488822937, "learning_rate": 9.992254495020605e-05, "loss": 2.84723014831543, "memory(GiB)": 62.08, "step": 2070, "token_acc": 0.3987538940809969, "train_speed(iter/s)": 0.667881 }, { "epoch": 0.08889936163831884, "grad_norm": 3.2636728286743164, "learning_rate": 9.992217005388083e-05, "loss": 2.4627412796020507, "memory(GiB)": 62.08, "step": 2075, "token_acc": 0.4605809128630705, "train_speed(iter/s)": 0.667932 }, { "epoch": 0.0891135769675678, "grad_norm": 2.229637622833252, "learning_rate": 9.992179425316865e-05, "loss": 2.6077369689941405, "memory(GiB)": 62.08, "step": 2080, "token_acc": 0.5147540983606558, "train_speed(iter/s)": 0.668324 }, { "epoch": 0.08932779229681675, "grad_norm": 2.5455949306488037, "learning_rate": 9.992141754807635e-05, "loss": 2.5436119079589843, "memory(GiB)": 62.08, "step": 2085, "token_acc": 0.4755244755244755, "train_speed(iter/s)": 0.668517 }, { "epoch": 0.08954200762606572, "grad_norm": 2.143475294113159, "learning_rate": 9.992103993861072e-05, "loss": 2.3562198638916017, "memory(GiB)": 62.08, "step": 2090, "token_acc": 0.45098039215686275, "train_speed(iter/s)": 0.668831 }, { "epoch": 0.08975622295531469, "grad_norm": 2.652015447616577, "learning_rate": 9.992066142477865e-05, "loss": 2.3004201889038085, "memory(GiB)": 62.08, "step": 2095, "token_acc": 0.5163398692810458, "train_speed(iter/s)": 0.669209 }, { "epoch": 0.08997043828456365, "grad_norm": 2.6156704425811768, "learning_rate": 9.992028200658695e-05, "loss": 2.5154596328735352, "memory(GiB)": 62.08, "step": 2100, "token_acc": 0.44648318042813456, "train_speed(iter/s)": 0.669295 }, { "epoch": 0.0901846536138126, "grad_norm": 3.571282386779785, "learning_rate": 9.991990168404251e-05, "loss": 2.7117691040039062, "memory(GiB)": 62.08, "step": 2105, "token_acc": 0.43986254295532645, "train_speed(iter/s)": 0.669112 }, { "epoch": 0.09039886894306157, "grad_norm": 3.11337947845459, "learning_rate": 9.991952045715223e-05, "loss": 2.235332489013672, "memory(GiB)": 62.08, "step": 2110, "token_acc": 0.5019455252918288, "train_speed(iter/s)": 0.669223 }, { "epoch": 0.09061308427231053, "grad_norm": 3.7965586185455322, "learning_rate": 9.991913832592301e-05, "loss": 2.7633831024169924, "memory(GiB)": 62.08, "step": 2115, "token_acc": 0.4326923076923077, "train_speed(iter/s)": 0.669441 }, { "epoch": 0.09082729960155948, "grad_norm": 5.137047290802002, "learning_rate": 9.991875529036178e-05, "loss": 2.7278547286987305, "memory(GiB)": 62.08, "step": 2120, "token_acc": 0.4591439688715953, "train_speed(iter/s)": 0.669521 }, { "epoch": 0.09104151493080845, "grad_norm": 4.598978519439697, "learning_rate": 9.991837135047546e-05, "loss": 2.6067222595214843, "memory(GiB)": 62.08, "step": 2125, "token_acc": 0.43356643356643354, "train_speed(iter/s)": 0.669422 }, { "epoch": 0.09125573026005741, "grad_norm": 3.278451919555664, "learning_rate": 9.991798650627103e-05, "loss": 2.582527732849121, "memory(GiB)": 62.08, "step": 2130, "token_acc": 0.49032258064516127, "train_speed(iter/s)": 0.669577 }, { "epoch": 0.09146994558930636, "grad_norm": 2.8943259716033936, "learning_rate": 9.991760075775543e-05, "loss": 2.4733627319335936, "memory(GiB)": 62.08, "step": 2135, "token_acc": 0.48717948717948717, "train_speed(iter/s)": 0.669418 }, { "epoch": 0.09168416091855533, "grad_norm": 4.239304065704346, "learning_rate": 9.991721410493566e-05, "loss": 2.5262250900268555, "memory(GiB)": 62.08, "step": 2140, "token_acc": 0.49019607843137253, "train_speed(iter/s)": 0.669507 }, { "epoch": 0.0918983762478043, "grad_norm": 2.400482416152954, "learning_rate": 9.991682654781874e-05, "loss": 2.3551408767700197, "memory(GiB)": 62.08, "step": 2145, "token_acc": 0.5, "train_speed(iter/s)": 0.669441 }, { "epoch": 0.09211259157705326, "grad_norm": 4.095634460449219, "learning_rate": 9.991643808641168e-05, "loss": 2.596730041503906, "memory(GiB)": 62.08, "step": 2150, "token_acc": 0.4489051094890511, "train_speed(iter/s)": 0.669338 }, { "epoch": 0.09232680690630221, "grad_norm": 4.361393928527832, "learning_rate": 9.99160487207215e-05, "loss": 1.9146673202514648, "memory(GiB)": 62.08, "step": 2155, "token_acc": 0.5535714285714286, "train_speed(iter/s)": 0.66931 }, { "epoch": 0.09254102223555118, "grad_norm": 3.1221532821655273, "learning_rate": 9.991565845075531e-05, "loss": 2.2350826263427734, "memory(GiB)": 62.08, "step": 2160, "token_acc": 0.5018867924528302, "train_speed(iter/s)": 0.669429 }, { "epoch": 0.09275523756480014, "grad_norm": 3.0400044918060303, "learning_rate": 9.991526727652012e-05, "loss": 2.1149791717529296, "memory(GiB)": 62.08, "step": 2165, "token_acc": 0.5467625899280576, "train_speed(iter/s)": 0.669223 }, { "epoch": 0.09296945289404909, "grad_norm": 4.837386131286621, "learning_rate": 9.991487519802305e-05, "loss": 2.5002403259277344, "memory(GiB)": 62.08, "step": 2170, "token_acc": 0.46774193548387094, "train_speed(iter/s)": 0.669287 }, { "epoch": 0.09318366822329806, "grad_norm": 2.547358274459839, "learning_rate": 9.991448221527118e-05, "loss": 2.62548885345459, "memory(GiB)": 62.08, "step": 2175, "token_acc": 0.49333333333333335, "train_speed(iter/s)": 0.669059 }, { "epoch": 0.09339788355254702, "grad_norm": 3.7802135944366455, "learning_rate": 9.991408832827165e-05, "loss": 2.4411396026611327, "memory(GiB)": 62.08, "step": 2180, "token_acc": 0.4967105263157895, "train_speed(iter/s)": 0.668975 }, { "epoch": 0.09361209888179599, "grad_norm": 3.4469079971313477, "learning_rate": 9.991369353703159e-05, "loss": 2.757773017883301, "memory(GiB)": 62.08, "step": 2185, "token_acc": 0.48771929824561405, "train_speed(iter/s)": 0.669193 }, { "epoch": 0.09382631421104494, "grad_norm": 2.058375120162964, "learning_rate": 9.991329784155814e-05, "loss": 2.60113525390625, "memory(GiB)": 62.08, "step": 2190, "token_acc": 0.45980707395498394, "train_speed(iter/s)": 0.669211 }, { "epoch": 0.0940405295402939, "grad_norm": 2.959352970123291, "learning_rate": 9.991290124185849e-05, "loss": 2.2702400207519533, "memory(GiB)": 62.08, "step": 2195, "token_acc": 0.4860557768924303, "train_speed(iter/s)": 0.66905 }, { "epoch": 0.09425474486954287, "grad_norm": 2.496218204498291, "learning_rate": 9.991250373793979e-05, "loss": 2.5608407974243166, "memory(GiB)": 62.08, "step": 2200, "token_acc": 0.46875, "train_speed(iter/s)": 0.668523 }, { "epoch": 0.09446896019879182, "grad_norm": 2.4014649391174316, "learning_rate": 9.991210532980928e-05, "loss": 2.401562309265137, "memory(GiB)": 62.08, "step": 2205, "token_acc": 0.5159010600706714, "train_speed(iter/s)": 0.668584 }, { "epoch": 0.09468317552804079, "grad_norm": 2.1165730953216553, "learning_rate": 9.991170601747415e-05, "loss": 2.1930515289306642, "memory(GiB)": 62.08, "step": 2210, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.668209 }, { "epoch": 0.09489739085728975, "grad_norm": 5.0294976234436035, "learning_rate": 9.991130580094164e-05, "loss": 2.267447853088379, "memory(GiB)": 62.08, "step": 2215, "token_acc": 0.4977973568281938, "train_speed(iter/s)": 0.668254 }, { "epoch": 0.0951116061865387, "grad_norm": 2.5726239681243896, "learning_rate": 9.991090468021901e-05, "loss": 2.4473901748657227, "memory(GiB)": 62.08, "step": 2220, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.668048 }, { "epoch": 0.09532582151578767, "grad_norm": 3.861633062362671, "learning_rate": 9.991050265531354e-05, "loss": 2.6857021331787108, "memory(GiB)": 62.08, "step": 2225, "token_acc": 0.43014705882352944, "train_speed(iter/s)": 0.668196 }, { "epoch": 0.09554003684503663, "grad_norm": 2.436746835708618, "learning_rate": 9.991009972623248e-05, "loss": 2.184605026245117, "memory(GiB)": 62.08, "step": 2230, "token_acc": 0.55078125, "train_speed(iter/s)": 0.667803 }, { "epoch": 0.0957542521742856, "grad_norm": 2.819638967514038, "learning_rate": 9.990969589298314e-05, "loss": 2.267256736755371, "memory(GiB)": 62.08, "step": 2235, "token_acc": 0.49361702127659574, "train_speed(iter/s)": 0.667972 }, { "epoch": 0.09596846750353455, "grad_norm": 2.90053129196167, "learning_rate": 9.990929115557283e-05, "loss": 2.3271121978759766, "memory(GiB)": 62.08, "step": 2240, "token_acc": 0.4901315789473684, "train_speed(iter/s)": 0.668267 }, { "epoch": 0.09618268283278351, "grad_norm": 2.4649670124053955, "learning_rate": 9.990888551400893e-05, "loss": 2.5394672393798827, "memory(GiB)": 62.08, "step": 2245, "token_acc": 0.45964912280701753, "train_speed(iter/s)": 0.668621 }, { "epoch": 0.09639689816203248, "grad_norm": 2.296980857849121, "learning_rate": 9.990847896829872e-05, "loss": 2.536738967895508, "memory(GiB)": 62.08, "step": 2250, "token_acc": 0.4819277108433735, "train_speed(iter/s)": 0.668576 }, { "epoch": 0.09661111349128143, "grad_norm": 3.112304449081421, "learning_rate": 9.99080715184496e-05, "loss": 2.1698066711425783, "memory(GiB)": 62.08, "step": 2255, "token_acc": 0.5457875457875457, "train_speed(iter/s)": 0.668643 }, { "epoch": 0.0968253288205304, "grad_norm": 2.8515870571136475, "learning_rate": 9.990766316446894e-05, "loss": 2.4119930267333984, "memory(GiB)": 62.08, "step": 2260, "token_acc": 0.5, "train_speed(iter/s)": 0.668419 }, { "epoch": 0.09703954414977936, "grad_norm": 3.1210060119628906, "learning_rate": 9.990725390636416e-05, "loss": 2.4250999450683595, "memory(GiB)": 62.08, "step": 2265, "token_acc": 0.5058479532163743, "train_speed(iter/s)": 0.668149 }, { "epoch": 0.09725375947902833, "grad_norm": 2.4688773155212402, "learning_rate": 9.990684374414266e-05, "loss": 2.473614501953125, "memory(GiB)": 62.08, "step": 2270, "token_acc": 0.5179153094462541, "train_speed(iter/s)": 0.668161 }, { "epoch": 0.09746797480827728, "grad_norm": 3.1536428928375244, "learning_rate": 9.990643267781186e-05, "loss": 2.686799240112305, "memory(GiB)": 62.08, "step": 2275, "token_acc": 0.45614035087719296, "train_speed(iter/s)": 0.668135 }, { "epoch": 0.09768219013752624, "grad_norm": 3.232783317565918, "learning_rate": 9.990602070737921e-05, "loss": 2.6377933502197264, "memory(GiB)": 62.08, "step": 2280, "token_acc": 0.47678018575851394, "train_speed(iter/s)": 0.668367 }, { "epoch": 0.09789640546677521, "grad_norm": 2.6704068183898926, "learning_rate": 9.990560783285219e-05, "loss": 2.8402381896972657, "memory(GiB)": 62.08, "step": 2285, "token_acc": 0.4781144781144781, "train_speed(iter/s)": 0.668252 }, { "epoch": 0.09811062079602416, "grad_norm": 3.1785013675689697, "learning_rate": 9.990519405423825e-05, "loss": 2.4841730117797853, "memory(GiB)": 62.08, "step": 2290, "token_acc": 0.4980392156862745, "train_speed(iter/s)": 0.66827 }, { "epoch": 0.09832483612527312, "grad_norm": 5.719506740570068, "learning_rate": 9.990477937154495e-05, "loss": 2.2180728912353516, "memory(GiB)": 62.08, "step": 2295, "token_acc": 0.4854368932038835, "train_speed(iter/s)": 0.667963 }, { "epoch": 0.09853905145452209, "grad_norm": 3.4427566528320312, "learning_rate": 9.990436378477972e-05, "loss": 2.367195892333984, "memory(GiB)": 62.08, "step": 2300, "token_acc": 0.5, "train_speed(iter/s)": 0.667732 }, { "epoch": 0.09875326678377104, "grad_norm": 2.817991018295288, "learning_rate": 9.990394729395013e-05, "loss": 2.9096824645996096, "memory(GiB)": 62.08, "step": 2305, "token_acc": 0.44368600682593856, "train_speed(iter/s)": 0.667982 }, { "epoch": 0.09896748211302, "grad_norm": 3.200643539428711, "learning_rate": 9.990352989906372e-05, "loss": 2.529336357116699, "memory(GiB)": 62.08, "step": 2310, "token_acc": 0.4827586206896552, "train_speed(iter/s)": 0.668385 }, { "epoch": 0.09918169744226897, "grad_norm": 2.3672897815704346, "learning_rate": 9.990311160012806e-05, "loss": 2.4922863006591798, "memory(GiB)": 62.08, "step": 2315, "token_acc": 0.47530864197530864, "train_speed(iter/s)": 0.668361 }, { "epoch": 0.09939591277151794, "grad_norm": 2.843379497528076, "learning_rate": 9.990269239715072e-05, "loss": 2.565679931640625, "memory(GiB)": 62.08, "step": 2320, "token_acc": 0.5039370078740157, "train_speed(iter/s)": 0.668376 }, { "epoch": 0.09961012810076689, "grad_norm": 2.8930537700653076, "learning_rate": 9.990227229013928e-05, "loss": 2.4800676345825194, "memory(GiB)": 62.08, "step": 2325, "token_acc": 0.521875, "train_speed(iter/s)": 0.668269 }, { "epoch": 0.09982434343001585, "grad_norm": 3.6717114448547363, "learning_rate": 9.990185127910139e-05, "loss": 2.1751049041748045, "memory(GiB)": 62.08, "step": 2330, "token_acc": 0.5330739299610895, "train_speed(iter/s)": 0.668169 }, { "epoch": 0.10003855875926482, "grad_norm": 2.9315974712371826, "learning_rate": 9.990142936404463e-05, "loss": 2.4037967681884767, "memory(GiB)": 62.08, "step": 2335, "token_acc": 0.45121951219512196, "train_speed(iter/s)": 0.668293 }, { "epoch": 0.10025277408851377, "grad_norm": 2.758157968521118, "learning_rate": 9.990100654497668e-05, "loss": 2.584405708312988, "memory(GiB)": 62.08, "step": 2340, "token_acc": 0.43537414965986393, "train_speed(iter/s)": 0.668406 }, { "epoch": 0.10046698941776273, "grad_norm": 3.0440850257873535, "learning_rate": 9.990058282190519e-05, "loss": 2.6194637298583983, "memory(GiB)": 62.08, "step": 2345, "token_acc": 0.4755244755244755, "train_speed(iter/s)": 0.66832 }, { "epoch": 0.1006812047470117, "grad_norm": 2.2723042964935303, "learning_rate": 9.990015819483782e-05, "loss": 2.533182144165039, "memory(GiB)": 62.08, "step": 2350, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.668355 }, { "epoch": 0.10089542007626066, "grad_norm": 3.972580671310425, "learning_rate": 9.989973266378228e-05, "loss": 2.405169677734375, "memory(GiB)": 62.08, "step": 2355, "token_acc": 0.483974358974359, "train_speed(iter/s)": 0.668549 }, { "epoch": 0.10110963540550962, "grad_norm": 2.9747161865234375, "learning_rate": 9.989930622874626e-05, "loss": 2.5566192626953126, "memory(GiB)": 62.08, "step": 2360, "token_acc": 0.5096774193548387, "train_speed(iter/s)": 0.66853 }, { "epoch": 0.10132385073475858, "grad_norm": 2.7100515365600586, "learning_rate": 9.989887888973753e-05, "loss": 2.353483963012695, "memory(GiB)": 62.08, "step": 2365, "token_acc": 0.4879032258064516, "train_speed(iter/s)": 0.668499 }, { "epoch": 0.10153806606400755, "grad_norm": 2.5462841987609863, "learning_rate": 9.989845064676376e-05, "loss": 2.5327020645141602, "memory(GiB)": 62.08, "step": 2370, "token_acc": 0.49324324324324326, "train_speed(iter/s)": 0.668344 }, { "epoch": 0.1017522813932565, "grad_norm": 2.869344472885132, "learning_rate": 9.989802149983277e-05, "loss": 2.522291564941406, "memory(GiB)": 62.08, "step": 2375, "token_acc": 0.48562300319488816, "train_speed(iter/s)": 0.668343 }, { "epoch": 0.10196649672250546, "grad_norm": 2.7484230995178223, "learning_rate": 9.989759144895231e-05, "loss": 2.198067474365234, "memory(GiB)": 62.08, "step": 2380, "token_acc": 0.5128205128205128, "train_speed(iter/s)": 0.668573 }, { "epoch": 0.10218071205175443, "grad_norm": 2.3788564205169678, "learning_rate": 9.989716049413018e-05, "loss": 2.4426498413085938, "memory(GiB)": 62.08, "step": 2385, "token_acc": 0.5173611111111112, "train_speed(iter/s)": 0.668781 }, { "epoch": 0.10239492738100338, "grad_norm": 2.468919038772583, "learning_rate": 9.989672863537416e-05, "loss": 2.2644405364990234, "memory(GiB)": 62.08, "step": 2390, "token_acc": 0.5143884892086331, "train_speed(iter/s)": 0.669021 }, { "epoch": 0.10260914271025234, "grad_norm": 3.194448947906494, "learning_rate": 9.989629587269212e-05, "loss": 2.357087326049805, "memory(GiB)": 62.08, "step": 2395, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.669295 }, { "epoch": 0.10282335803950131, "grad_norm": 3.447255849838257, "learning_rate": 9.989586220609185e-05, "loss": 2.5594247817993163, "memory(GiB)": 62.08, "step": 2400, "token_acc": 0.43686006825938567, "train_speed(iter/s)": 0.669114 }, { "epoch": 0.10303757336875027, "grad_norm": 2.297830104827881, "learning_rate": 9.989542763558123e-05, "loss": 2.080608940124512, "memory(GiB)": 62.08, "step": 2405, "token_acc": 0.5354609929078015, "train_speed(iter/s)": 0.669139 }, { "epoch": 0.10325178869799922, "grad_norm": 2.3720905780792236, "learning_rate": 9.989499216116815e-05, "loss": 2.538391876220703, "memory(GiB)": 62.08, "step": 2410, "token_acc": 0.47896440129449835, "train_speed(iter/s)": 0.668986 }, { "epoch": 0.10346600402724819, "grad_norm": 3.3464269638061523, "learning_rate": 9.989455578286048e-05, "loss": 2.4057973861694335, "memory(GiB)": 62.08, "step": 2415, "token_acc": 0.48562300319488816, "train_speed(iter/s)": 0.669284 }, { "epoch": 0.10368021935649716, "grad_norm": 3.312866449356079, "learning_rate": 9.989411850066612e-05, "loss": 2.8301311492919923, "memory(GiB)": 62.08, "step": 2420, "token_acc": 0.40716612377850164, "train_speed(iter/s)": 0.669259 }, { "epoch": 0.1038944346857461, "grad_norm": 3.197211742401123, "learning_rate": 9.9893680314593e-05, "loss": 2.3642478942871095, "memory(GiB)": 62.08, "step": 2425, "token_acc": 0.5266903914590747, "train_speed(iter/s)": 0.669438 }, { "epoch": 0.10410865001499507, "grad_norm": 3.3186089992523193, "learning_rate": 9.989324122464906e-05, "loss": 2.40118293762207, "memory(GiB)": 62.08, "step": 2430, "token_acc": 0.4983388704318937, "train_speed(iter/s)": 0.669252 }, { "epoch": 0.10432286534424404, "grad_norm": 2.0158777236938477, "learning_rate": 9.989280123084225e-05, "loss": 2.528182601928711, "memory(GiB)": 62.08, "step": 2435, "token_acc": 0.4781021897810219, "train_speed(iter/s)": 0.669279 }, { "epoch": 0.104537080673493, "grad_norm": 5.134706974029541, "learning_rate": 9.989236033318054e-05, "loss": 2.5194232940673826, "memory(GiB)": 62.08, "step": 2440, "token_acc": 0.48214285714285715, "train_speed(iter/s)": 0.669394 }, { "epoch": 0.10475129600274195, "grad_norm": 3.7575483322143555, "learning_rate": 9.989191853167193e-05, "loss": 2.450284957885742, "memory(GiB)": 62.08, "step": 2445, "token_acc": 0.5343511450381679, "train_speed(iter/s)": 0.669533 }, { "epoch": 0.10496551133199092, "grad_norm": 3.6576898097991943, "learning_rate": 9.98914758263244e-05, "loss": 2.4680801391601563, "memory(GiB)": 62.08, "step": 2450, "token_acc": 0.5, "train_speed(iter/s)": 0.669872 }, { "epoch": 0.10517972666123988, "grad_norm": 2.297675132751465, "learning_rate": 9.989103221714598e-05, "loss": 2.584751319885254, "memory(GiB)": 62.08, "step": 2455, "token_acc": 0.4943820224719101, "train_speed(iter/s)": 0.669902 }, { "epoch": 0.10539394199048883, "grad_norm": 2.1622402667999268, "learning_rate": 9.989058770414472e-05, "loss": 2.77056999206543, "memory(GiB)": 62.08, "step": 2460, "token_acc": 0.4273255813953488, "train_speed(iter/s)": 0.669919 }, { "epoch": 0.1056081573197378, "grad_norm": 4.2315449714660645, "learning_rate": 9.989014228732867e-05, "loss": 2.5497060775756837, "memory(GiB)": 62.08, "step": 2465, "token_acc": 0.45357142857142857, "train_speed(iter/s)": 0.670285 }, { "epoch": 0.10582237264898676, "grad_norm": 2.645474433898926, "learning_rate": 9.988969596670587e-05, "loss": 2.540960693359375, "memory(GiB)": 62.08, "step": 2470, "token_acc": 0.47766323024054985, "train_speed(iter/s)": 0.670511 }, { "epoch": 0.10603658797823572, "grad_norm": 2.9432601928710938, "learning_rate": 9.988924874228445e-05, "loss": 2.2218852996826173, "memory(GiB)": 62.08, "step": 2475, "token_acc": 0.5426621160409556, "train_speed(iter/s)": 0.670651 }, { "epoch": 0.10625080330748468, "grad_norm": 2.8866937160491943, "learning_rate": 9.988880061407248e-05, "loss": 2.6382110595703123, "memory(GiB)": 62.08, "step": 2480, "token_acc": 0.4653846153846154, "train_speed(iter/s)": 0.670641 }, { "epoch": 0.10646501863673365, "grad_norm": 4.484962463378906, "learning_rate": 9.988835158207808e-05, "loss": 2.404461669921875, "memory(GiB)": 62.08, "step": 2485, "token_acc": 0.4868913857677903, "train_speed(iter/s)": 0.670917 }, { "epoch": 0.10667923396598261, "grad_norm": 2.76291823387146, "learning_rate": 9.98879016463094e-05, "loss": 2.357646942138672, "memory(GiB)": 62.08, "step": 2490, "token_acc": 0.4630225080385852, "train_speed(iter/s)": 0.670973 }, { "epoch": 0.10689344929523156, "grad_norm": 2.7526121139526367, "learning_rate": 9.988745080677457e-05, "loss": 2.5151836395263674, "memory(GiB)": 62.08, "step": 2495, "token_acc": 0.47468354430379744, "train_speed(iter/s)": 0.67106 }, { "epoch": 0.10710766462448053, "grad_norm": 2.690180540084839, "learning_rate": 9.988699906348179e-05, "loss": 2.315745735168457, "memory(GiB)": 62.08, "step": 2500, "token_acc": 0.49050632911392406, "train_speed(iter/s)": 0.67111 }, { "epoch": 0.10710766462448053, "eval_loss": 2.057887077331543, "eval_runtime": 17.0596, "eval_samples_per_second": 5.862, "eval_steps_per_second": 5.862, "eval_token_acc": 0.5138121546961326, "step": 2500 }, { "epoch": 0.1073218799537295, "grad_norm": 2.408210277557373, "learning_rate": 9.988654641643922e-05, "loss": 2.642716407775879, "memory(GiB)": 62.08, "step": 2505, "token_acc": 0.4971857410881801, "train_speed(iter/s)": 0.667884 }, { "epoch": 0.10753609528297844, "grad_norm": 3.3187525272369385, "learning_rate": 9.988609286565505e-05, "loss": 2.6019662857055663, "memory(GiB)": 62.08, "step": 2510, "token_acc": 0.44510385756676557, "train_speed(iter/s)": 0.66786 }, { "epoch": 0.10775031061222741, "grad_norm": 3.4024367332458496, "learning_rate": 9.988563841113752e-05, "loss": 2.4778200149536134, "memory(GiB)": 62.08, "step": 2515, "token_acc": 0.4779874213836478, "train_speed(iter/s)": 0.66758 }, { "epoch": 0.10796452594147637, "grad_norm": 3.398893117904663, "learning_rate": 9.988518305289487e-05, "loss": 2.483729934692383, "memory(GiB)": 62.08, "step": 2520, "token_acc": 0.46747967479674796, "train_speed(iter/s)": 0.667835 }, { "epoch": 0.10817874127072534, "grad_norm": 2.7132668495178223, "learning_rate": 9.988472679093532e-05, "loss": 2.2600784301757812, "memory(GiB)": 62.08, "step": 2525, "token_acc": 0.5090252707581228, "train_speed(iter/s)": 0.66795 }, { "epoch": 0.10839295659997429, "grad_norm": 2.158993721008301, "learning_rate": 9.988426962526714e-05, "loss": 2.5033313751220705, "memory(GiB)": 62.08, "step": 2530, "token_acc": 0.5151515151515151, "train_speed(iter/s)": 0.667997 }, { "epoch": 0.10860717192922326, "grad_norm": 3.3815083503723145, "learning_rate": 9.988381155589863e-05, "loss": 2.805323028564453, "memory(GiB)": 62.08, "step": 2535, "token_acc": 0.44966442953020136, "train_speed(iter/s)": 0.667836 }, { "epoch": 0.10882138725847222, "grad_norm": 2.912166118621826, "learning_rate": 9.988335258283808e-05, "loss": 2.5988533020019533, "memory(GiB)": 62.08, "step": 2540, "token_acc": 0.5096153846153846, "train_speed(iter/s)": 0.668178 }, { "epoch": 0.10903560258772117, "grad_norm": 2.603644847869873, "learning_rate": 9.988289270609378e-05, "loss": 2.4706710815429687, "memory(GiB)": 62.08, "step": 2545, "token_acc": 0.48036253776435045, "train_speed(iter/s)": 0.668249 }, { "epoch": 0.10924981791697014, "grad_norm": 2.9291741847991943, "learning_rate": 9.988243192567412e-05, "loss": 2.7912731170654297, "memory(GiB)": 62.08, "step": 2550, "token_acc": 0.4423791821561338, "train_speed(iter/s)": 0.667978 }, { "epoch": 0.1094640332462191, "grad_norm": 3.09578537940979, "learning_rate": 9.988197024158739e-05, "loss": 2.4379711151123047, "memory(GiB)": 62.08, "step": 2555, "token_acc": 0.49514563106796117, "train_speed(iter/s)": 0.668094 }, { "epoch": 0.10967824857546805, "grad_norm": 2.5501980781555176, "learning_rate": 9.988150765384199e-05, "loss": 2.5668521881103517, "memory(GiB)": 62.08, "step": 2560, "token_acc": 0.45614035087719296, "train_speed(iter/s)": 0.668359 }, { "epoch": 0.10989246390471702, "grad_norm": 2.9856860637664795, "learning_rate": 9.988104416244627e-05, "loss": 2.6207595825195313, "memory(GiB)": 62.08, "step": 2565, "token_acc": 0.45483870967741935, "train_speed(iter/s)": 0.66849 }, { "epoch": 0.11010667923396598, "grad_norm": 2.9302260875701904, "learning_rate": 9.988057976740865e-05, "loss": 2.5396299362182617, "memory(GiB)": 62.08, "step": 2570, "token_acc": 0.4548736462093863, "train_speed(iter/s)": 0.668468 }, { "epoch": 0.11032089456321495, "grad_norm": 2.4155399799346924, "learning_rate": 9.988011446873753e-05, "loss": 2.5557533264160157, "memory(GiB)": 62.08, "step": 2575, "token_acc": 0.42805755395683454, "train_speed(iter/s)": 0.668552 }, { "epoch": 0.1105351098924639, "grad_norm": 2.603672504425049, "learning_rate": 9.987964826644137e-05, "loss": 2.6039249420166017, "memory(GiB)": 62.08, "step": 2580, "token_acc": 0.47096774193548385, "train_speed(iter/s)": 0.66825 }, { "epoch": 0.11074932522171287, "grad_norm": 2.6391966342926025, "learning_rate": 9.987918116052856e-05, "loss": 2.726082611083984, "memory(GiB)": 62.08, "step": 2585, "token_acc": 0.4481792717086835, "train_speed(iter/s)": 0.66853 }, { "epoch": 0.11096354055096183, "grad_norm": 3.559931993484497, "learning_rate": 9.98787131510076e-05, "loss": 2.6304935455322265, "memory(GiB)": 62.08, "step": 2590, "token_acc": 0.4310850439882698, "train_speed(iter/s)": 0.668397 }, { "epoch": 0.11117775588021078, "grad_norm": 2.884880781173706, "learning_rate": 9.987824423788696e-05, "loss": 2.412160301208496, "memory(GiB)": 62.08, "step": 2595, "token_acc": 0.45722713864306785, "train_speed(iter/s)": 0.668696 }, { "epoch": 0.11139197120945975, "grad_norm": 2.5291242599487305, "learning_rate": 9.987777442117516e-05, "loss": 2.1115390777587892, "memory(GiB)": 62.08, "step": 2600, "token_acc": 0.575, "train_speed(iter/s)": 0.668835 }, { "epoch": 0.11160618653870871, "grad_norm": 3.246129274368286, "learning_rate": 9.987730370088067e-05, "loss": 2.403610610961914, "memory(GiB)": 62.08, "step": 2605, "token_acc": 0.5051194539249146, "train_speed(iter/s)": 0.669032 }, { "epoch": 0.11182040186795768, "grad_norm": 3.0517494678497314, "learning_rate": 9.987683207701203e-05, "loss": 2.537582206726074, "memory(GiB)": 62.08, "step": 2610, "token_acc": 0.5092592592592593, "train_speed(iter/s)": 0.669082 }, { "epoch": 0.11203461719720663, "grad_norm": 3.888883590698242, "learning_rate": 9.987635954957779e-05, "loss": 2.8429187774658202, "memory(GiB)": 62.08, "step": 2615, "token_acc": 0.4268292682926829, "train_speed(iter/s)": 0.669057 }, { "epoch": 0.1122488325264556, "grad_norm": 3.1613359451293945, "learning_rate": 9.987588611858651e-05, "loss": 2.1996318817138674, "memory(GiB)": 62.08, "step": 2620, "token_acc": 0.5035714285714286, "train_speed(iter/s)": 0.669238 }, { "epoch": 0.11246304785570456, "grad_norm": 2.750520944595337, "learning_rate": 9.987541178404678e-05, "loss": 2.2156503677368162, "memory(GiB)": 62.08, "step": 2625, "token_acc": 0.5647058823529412, "train_speed(iter/s)": 0.66914 }, { "epoch": 0.11267726318495351, "grad_norm": 3.5535526275634766, "learning_rate": 9.987493654596716e-05, "loss": 2.4923984527587892, "memory(GiB)": 62.08, "step": 2630, "token_acc": 0.4649122807017544, "train_speed(iter/s)": 0.669208 }, { "epoch": 0.11289147851420248, "grad_norm": 4.135267734527588, "learning_rate": 9.987446040435628e-05, "loss": 2.4980499267578127, "memory(GiB)": 62.08, "step": 2635, "token_acc": 0.49, "train_speed(iter/s)": 0.66883 }, { "epoch": 0.11310569384345144, "grad_norm": 3.1250054836273193, "learning_rate": 9.987398335922278e-05, "loss": 2.3371206283569337, "memory(GiB)": 62.08, "step": 2640, "token_acc": 0.49834983498349833, "train_speed(iter/s)": 0.668683 }, { "epoch": 0.11331990917270039, "grad_norm": 3.352205514907837, "learning_rate": 9.987350541057526e-05, "loss": 2.384830665588379, "memory(GiB)": 62.08, "step": 2645, "token_acc": 0.48014440433212996, "train_speed(iter/s)": 0.669002 }, { "epoch": 0.11353412450194936, "grad_norm": 3.66373872756958, "learning_rate": 9.987302655842242e-05, "loss": 2.280124855041504, "memory(GiB)": 62.08, "step": 2650, "token_acc": 0.5017064846416383, "train_speed(iter/s)": 0.669327 }, { "epoch": 0.11374833983119832, "grad_norm": 2.7396178245544434, "learning_rate": 9.987254680277291e-05, "loss": 2.3722984313964846, "memory(GiB)": 62.08, "step": 2655, "token_acc": 0.5037593984962406, "train_speed(iter/s)": 0.669368 }, { "epoch": 0.11396255516044729, "grad_norm": 2.864933729171753, "learning_rate": 9.987206614363545e-05, "loss": 2.60021915435791, "memory(GiB)": 62.08, "step": 2660, "token_acc": 0.45263157894736844, "train_speed(iter/s)": 0.669486 }, { "epoch": 0.11417677048969624, "grad_norm": 3.715085983276367, "learning_rate": 9.98715845810187e-05, "loss": 2.5667160034179686, "memory(GiB)": 62.08, "step": 2665, "token_acc": 0.44370860927152317, "train_speed(iter/s)": 0.669259 }, { "epoch": 0.1143909858189452, "grad_norm": 2.474193811416626, "learning_rate": 9.987110211493145e-05, "loss": 2.4101192474365236, "memory(GiB)": 62.08, "step": 2670, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.669519 }, { "epoch": 0.11460520114819417, "grad_norm": 2.716444730758667, "learning_rate": 9.987061874538237e-05, "loss": 2.6981679916381838, "memory(GiB)": 62.08, "step": 2675, "token_acc": 0.439873417721519, "train_speed(iter/s)": 0.669543 }, { "epoch": 0.11481941647744312, "grad_norm": 2.608539342880249, "learning_rate": 9.987013447238026e-05, "loss": 2.243803596496582, "memory(GiB)": 62.08, "step": 2680, "token_acc": 0.5070028011204482, "train_speed(iter/s)": 0.669508 }, { "epoch": 0.11503363180669209, "grad_norm": 2.551276922225952, "learning_rate": 9.986964929593387e-05, "loss": 2.5599231719970703, "memory(GiB)": 62.08, "step": 2685, "token_acc": 0.45, "train_speed(iter/s)": 0.669426 }, { "epoch": 0.11524784713594105, "grad_norm": 3.8161163330078125, "learning_rate": 9.986916321605202e-05, "loss": 2.405989646911621, "memory(GiB)": 62.08, "step": 2690, "token_acc": 0.4910394265232975, "train_speed(iter/s)": 0.669538 }, { "epoch": 0.11546206246519002, "grad_norm": 3.8306596279144287, "learning_rate": 9.98686762327435e-05, "loss": 2.5316497802734377, "memory(GiB)": 62.43, "step": 2695, "token_acc": 0.4676258992805755, "train_speed(iter/s)": 0.669411 }, { "epoch": 0.11567627779443897, "grad_norm": 9.099365234375, "learning_rate": 9.986818834601713e-05, "loss": 2.675355911254883, "memory(GiB)": 62.43, "step": 2700, "token_acc": 0.42641509433962266, "train_speed(iter/s)": 0.669414 }, { "epoch": 0.11589049312368793, "grad_norm": 4.2578654289245605, "learning_rate": 9.986769955588173e-05, "loss": 2.374826431274414, "memory(GiB)": 62.43, "step": 2705, "token_acc": 0.5, "train_speed(iter/s)": 0.669519 }, { "epoch": 0.1161047084529369, "grad_norm": 2.9745025634765625, "learning_rate": 9.986720986234617e-05, "loss": 2.177010917663574, "memory(GiB)": 62.43, "step": 2710, "token_acc": 0.5340909090909091, "train_speed(iter/s)": 0.669579 }, { "epoch": 0.11631892378218585, "grad_norm": 3.0933594703674316, "learning_rate": 9.986671926541934e-05, "loss": 2.4364709854125977, "memory(GiB)": 62.43, "step": 2715, "token_acc": 0.49159663865546216, "train_speed(iter/s)": 0.669492 }, { "epoch": 0.11653313911143481, "grad_norm": 2.8535258769989014, "learning_rate": 9.98662277651101e-05, "loss": 2.7496196746826174, "memory(GiB)": 62.43, "step": 2720, "token_acc": 0.4724137931034483, "train_speed(iter/s)": 0.669715 }, { "epoch": 0.11674735444068378, "grad_norm": 3.8174185752868652, "learning_rate": 9.986573536142738e-05, "loss": 2.4013139724731447, "memory(GiB)": 66.08, "step": 2725, "token_acc": 0.48363636363636364, "train_speed(iter/s)": 0.669641 }, { "epoch": 0.11696156976993273, "grad_norm": 2.801776170730591, "learning_rate": 9.986524205438009e-05, "loss": 2.3755876541137697, "memory(GiB)": 66.08, "step": 2730, "token_acc": 0.4882943143812709, "train_speed(iter/s)": 0.669764 }, { "epoch": 0.1171757850991817, "grad_norm": 3.4247519969940186, "learning_rate": 9.986474784397714e-05, "loss": 2.4827011108398436, "memory(GiB)": 66.08, "step": 2735, "token_acc": 0.46558704453441296, "train_speed(iter/s)": 0.669911 }, { "epoch": 0.11739000042843066, "grad_norm": 3.570073127746582, "learning_rate": 9.986425273022751e-05, "loss": 2.371577835083008, "memory(GiB)": 66.08, "step": 2740, "token_acc": 0.5355648535564853, "train_speed(iter/s)": 0.670024 }, { "epoch": 0.11760421575767963, "grad_norm": 3.328667402267456, "learning_rate": 9.986375671314018e-05, "loss": 2.573904037475586, "memory(GiB)": 66.08, "step": 2745, "token_acc": 0.4158415841584158, "train_speed(iter/s)": 0.670052 }, { "epoch": 0.11781843108692858, "grad_norm": 4.461948394775391, "learning_rate": 9.98632597927241e-05, "loss": 2.6201332092285154, "memory(GiB)": 66.08, "step": 2750, "token_acc": 0.4549019607843137, "train_speed(iter/s)": 0.670185 }, { "epoch": 0.11803264641617754, "grad_norm": 3.6352243423461914, "learning_rate": 9.986276196898831e-05, "loss": 2.5963104248046873, "memory(GiB)": 66.08, "step": 2755, "token_acc": 0.4632352941176471, "train_speed(iter/s)": 0.670312 }, { "epoch": 0.11824686174542651, "grad_norm": 2.9755876064300537, "learning_rate": 9.986226324194181e-05, "loss": 2.48937931060791, "memory(GiB)": 66.08, "step": 2760, "token_acc": 0.48828125, "train_speed(iter/s)": 0.670456 }, { "epoch": 0.11846107707467546, "grad_norm": 2.3914437294006348, "learning_rate": 9.986176361159363e-05, "loss": 2.3909881591796873, "memory(GiB)": 66.08, "step": 2765, "token_acc": 0.4794520547945205, "train_speed(iter/s)": 0.670288 }, { "epoch": 0.11867529240392442, "grad_norm": 3.013758659362793, "learning_rate": 9.986126307795285e-05, "loss": 2.4065021514892577, "memory(GiB)": 66.08, "step": 2770, "token_acc": 0.47419354838709676, "train_speed(iter/s)": 0.670304 }, { "epoch": 0.11888950773317339, "grad_norm": 3.056481122970581, "learning_rate": 9.98607616410285e-05, "loss": 2.0187286376953124, "memory(GiB)": 66.08, "step": 2775, "token_acc": 0.5605536332179931, "train_speed(iter/s)": 0.670286 }, { "epoch": 0.11910372306242235, "grad_norm": 2.7224576473236084, "learning_rate": 9.986025930082968e-05, "loss": 2.266440582275391, "memory(GiB)": 66.08, "step": 2780, "token_acc": 0.49097472924187724, "train_speed(iter/s)": 0.670442 }, { "epoch": 0.1193179383916713, "grad_norm": 2.7179033756256104, "learning_rate": 9.985975605736548e-05, "loss": 2.104292297363281, "memory(GiB)": 66.08, "step": 2785, "token_acc": 0.516245487364621, "train_speed(iter/s)": 0.670618 }, { "epoch": 0.11953215372092027, "grad_norm": 2.4229719638824463, "learning_rate": 9.985925191064503e-05, "loss": 2.56536750793457, "memory(GiB)": 66.08, "step": 2790, "token_acc": 0.49645390070921985, "train_speed(iter/s)": 0.670538 }, { "epoch": 0.11974636905016924, "grad_norm": 2.3360157012939453, "learning_rate": 9.985874686067746e-05, "loss": 2.7768264770507813, "memory(GiB)": 66.08, "step": 2795, "token_acc": 0.44314868804664725, "train_speed(iter/s)": 0.670403 }, { "epoch": 0.11996058437941819, "grad_norm": 2.7829129695892334, "learning_rate": 9.985824090747193e-05, "loss": 2.6082586288452148, "memory(GiB)": 66.08, "step": 2800, "token_acc": 0.4752475247524752, "train_speed(iter/s)": 0.670003 }, { "epoch": 0.12017479970866715, "grad_norm": 3.011054754257202, "learning_rate": 9.98577340510376e-05, "loss": 2.333896446228027, "memory(GiB)": 66.08, "step": 2805, "token_acc": 0.4728434504792332, "train_speed(iter/s)": 0.670021 }, { "epoch": 0.12038901503791612, "grad_norm": 3.8261592388153076, "learning_rate": 9.985722629138364e-05, "loss": 2.5458593368530273, "memory(GiB)": 66.08, "step": 2810, "token_acc": 0.4714285714285714, "train_speed(iter/s)": 0.67002 }, { "epoch": 0.12060323036716507, "grad_norm": 4.254089832305908, "learning_rate": 9.985671762851925e-05, "loss": 2.5783035278320314, "memory(GiB)": 66.08, "step": 2815, "token_acc": 0.5067567567567568, "train_speed(iter/s)": 0.669909 }, { "epoch": 0.12081744569641403, "grad_norm": 2.918055772781372, "learning_rate": 9.985620806245365e-05, "loss": 2.4485660552978517, "memory(GiB)": 66.08, "step": 2820, "token_acc": 0.45317220543806647, "train_speed(iter/s)": 0.670017 }, { "epoch": 0.121031661025663, "grad_norm": 4.152498245239258, "learning_rate": 9.985569759319607e-05, "loss": 2.649592971801758, "memory(GiB)": 66.08, "step": 2825, "token_acc": 0.4796747967479675, "train_speed(iter/s)": 0.669998 }, { "epoch": 0.12124587635491196, "grad_norm": 3.001735210418701, "learning_rate": 9.985518622075577e-05, "loss": 2.1934080123901367, "memory(GiB)": 66.08, "step": 2830, "token_acc": 0.4979757085020243, "train_speed(iter/s)": 0.670199 }, { "epoch": 0.12146009168416091, "grad_norm": 2.9338431358337402, "learning_rate": 9.985467394514201e-05, "loss": 2.82198486328125, "memory(GiB)": 66.08, "step": 2835, "token_acc": 0.44904458598726116, "train_speed(iter/s)": 0.670106 }, { "epoch": 0.12167430701340988, "grad_norm": 2.5356791019439697, "learning_rate": 9.985416076636404e-05, "loss": 2.343532943725586, "memory(GiB)": 66.08, "step": 2840, "token_acc": 0.4948453608247423, "train_speed(iter/s)": 0.670158 }, { "epoch": 0.12188852234265884, "grad_norm": 2.981173276901245, "learning_rate": 9.98536466844312e-05, "loss": 2.4047046661376954, "memory(GiB)": 66.08, "step": 2845, "token_acc": 0.5, "train_speed(iter/s)": 0.670101 }, { "epoch": 0.1221027376719078, "grad_norm": 3.2995007038116455, "learning_rate": 9.985313169935278e-05, "loss": 2.3912094116210936, "memory(GiB)": 66.08, "step": 2850, "token_acc": 0.4861111111111111, "train_speed(iter/s)": 0.670056 }, { "epoch": 0.12231695300115676, "grad_norm": 2.3125061988830566, "learning_rate": 9.98526158111381e-05, "loss": 2.447707748413086, "memory(GiB)": 66.08, "step": 2855, "token_acc": 0.5016611295681063, "train_speed(iter/s)": 0.670009 }, { "epoch": 0.12253116833040573, "grad_norm": 2.7092461585998535, "learning_rate": 9.985209901979653e-05, "loss": 2.381534767150879, "memory(GiB)": 66.08, "step": 2860, "token_acc": 0.525096525096525, "train_speed(iter/s)": 0.66996 }, { "epoch": 0.12274538365965469, "grad_norm": 5.66828727722168, "learning_rate": 9.985158132533743e-05, "loss": 2.644837760925293, "memory(GiB)": 66.08, "step": 2865, "token_acc": 0.43343653250773995, "train_speed(iter/s)": 0.66968 }, { "epoch": 0.12295959898890364, "grad_norm": 3.4533677101135254, "learning_rate": 9.985106272777017e-05, "loss": 2.3789649963378907, "memory(GiB)": 66.08, "step": 2870, "token_acc": 0.516728624535316, "train_speed(iter/s)": 0.669558 }, { "epoch": 0.12317381431815261, "grad_norm": 2.8379127979278564, "learning_rate": 9.985054322710412e-05, "loss": 2.317238998413086, "memory(GiB)": 66.08, "step": 2875, "token_acc": 0.48717948717948717, "train_speed(iter/s)": 0.66954 }, { "epoch": 0.12338802964740157, "grad_norm": 3.5253405570983887, "learning_rate": 9.985002282334875e-05, "loss": 2.4376941680908204, "memory(GiB)": 66.08, "step": 2880, "token_acc": 0.4750733137829912, "train_speed(iter/s)": 0.669789 }, { "epoch": 0.12360224497665052, "grad_norm": 2.6690433025360107, "learning_rate": 9.984950151651342e-05, "loss": 2.378250503540039, "memory(GiB)": 66.08, "step": 2885, "token_acc": 0.4742268041237113, "train_speed(iter/s)": 0.669787 }, { "epoch": 0.12381646030589949, "grad_norm": 2.8828558921813965, "learning_rate": 9.984897930660763e-05, "loss": 2.5237968444824217, "memory(GiB)": 66.08, "step": 2890, "token_acc": 0.47266881028938906, "train_speed(iter/s)": 0.66992 }, { "epoch": 0.12403067563514845, "grad_norm": 2.8241629600524902, "learning_rate": 9.98484561936408e-05, "loss": 2.519863319396973, "memory(GiB)": 66.08, "step": 2895, "token_acc": 0.5, "train_speed(iter/s)": 0.670275 }, { "epoch": 0.1242448909643974, "grad_norm": 4.203672885894775, "learning_rate": 9.984793217762244e-05, "loss": 2.528842735290527, "memory(GiB)": 66.08, "step": 2900, "token_acc": 0.4766666666666667, "train_speed(iter/s)": 0.670293 }, { "epoch": 0.12445910629364637, "grad_norm": 3.335637092590332, "learning_rate": 9.984740725856202e-05, "loss": 2.516367721557617, "memory(GiB)": 66.08, "step": 2905, "token_acc": 0.4425087108013937, "train_speed(iter/s)": 0.670284 }, { "epoch": 0.12467332162289534, "grad_norm": 3.1598997116088867, "learning_rate": 9.984688143646905e-05, "loss": 2.443208122253418, "memory(GiB)": 66.08, "step": 2910, "token_acc": 0.4901315789473684, "train_speed(iter/s)": 0.67029 }, { "epoch": 0.1248875369521443, "grad_norm": 3.065880060195923, "learning_rate": 9.984635471135308e-05, "loss": 2.4740364074707033, "memory(GiB)": 66.08, "step": 2915, "token_acc": 0.5, "train_speed(iter/s)": 0.670436 }, { "epoch": 0.12510175228139325, "grad_norm": 2.6338140964508057, "learning_rate": 9.984582708322364e-05, "loss": 2.212269401550293, "memory(GiB)": 66.08, "step": 2920, "token_acc": 0.5051903114186851, "train_speed(iter/s)": 0.670729 }, { "epoch": 0.1253159676106422, "grad_norm": 4.3427863121032715, "learning_rate": 9.984529855209027e-05, "loss": 2.4167739868164064, "memory(GiB)": 66.08, "step": 2925, "token_acc": 0.5204918032786885, "train_speed(iter/s)": 0.670943 }, { "epoch": 0.12553018293989118, "grad_norm": 2.2759909629821777, "learning_rate": 9.984476911796254e-05, "loss": 2.595112419128418, "memory(GiB)": 66.08, "step": 2930, "token_acc": 0.4723926380368098, "train_speed(iter/s)": 0.671195 }, { "epoch": 0.12574439826914013, "grad_norm": 2.3348591327667236, "learning_rate": 9.984423878085007e-05, "loss": 2.3663543701171874, "memory(GiB)": 66.08, "step": 2935, "token_acc": 0.48951048951048953, "train_speed(iter/s)": 0.67124 }, { "epoch": 0.1259586135983891, "grad_norm": 4.052712440490723, "learning_rate": 9.984370754076248e-05, "loss": 2.5901403427124023, "memory(GiB)": 66.08, "step": 2940, "token_acc": 0.5109489051094891, "train_speed(iter/s)": 0.671588 }, { "epoch": 0.12617282892763806, "grad_norm": 3.4286954402923584, "learning_rate": 9.984317539770935e-05, "loss": 2.591845703125, "memory(GiB)": 66.08, "step": 2945, "token_acc": 0.47017543859649125, "train_speed(iter/s)": 0.671631 }, { "epoch": 0.12638704425688702, "grad_norm": 3.693775177001953, "learning_rate": 9.984264235170034e-05, "loss": 2.441693878173828, "memory(GiB)": 66.08, "step": 2950, "token_acc": 0.4895833333333333, "train_speed(iter/s)": 0.671721 }, { "epoch": 0.126601259586136, "grad_norm": 2.4676268100738525, "learning_rate": 9.984210840274511e-05, "loss": 2.314929389953613, "memory(GiB)": 66.08, "step": 2955, "token_acc": 0.5, "train_speed(iter/s)": 0.671678 }, { "epoch": 0.12681547491538495, "grad_norm": 4.411177158355713, "learning_rate": 9.984157355085334e-05, "loss": 2.7116958618164064, "memory(GiB)": 66.08, "step": 2960, "token_acc": 0.4143835616438356, "train_speed(iter/s)": 0.671737 }, { "epoch": 0.1270296902446339, "grad_norm": 3.8246188163757324, "learning_rate": 9.98410377960347e-05, "loss": 2.436906623840332, "memory(GiB)": 66.08, "step": 2965, "token_acc": 0.4795539033457249, "train_speed(iter/s)": 0.671779 }, { "epoch": 0.12724390557388288, "grad_norm": 2.853074550628662, "learning_rate": 9.984050113829891e-05, "loss": 2.616805839538574, "memory(GiB)": 66.08, "step": 2970, "token_acc": 0.46830985915492956, "train_speed(iter/s)": 0.671885 }, { "epoch": 0.12745812090313183, "grad_norm": 2.331411838531494, "learning_rate": 9.983996357765569e-05, "loss": 2.4394195556640623, "memory(GiB)": 66.08, "step": 2975, "token_acc": 0.5058823529411764, "train_speed(iter/s)": 0.672123 }, { "epoch": 0.12767233623238078, "grad_norm": 2.355579137802124, "learning_rate": 9.983942511411477e-05, "loss": 2.3416091918945314, "memory(GiB)": 66.08, "step": 2980, "token_acc": 0.5033112582781457, "train_speed(iter/s)": 0.672226 }, { "epoch": 0.12788655156162976, "grad_norm": 6.936763763427734, "learning_rate": 9.983888574768592e-05, "loss": 2.4875770568847657, "memory(GiB)": 66.08, "step": 2985, "token_acc": 0.48013245033112584, "train_speed(iter/s)": 0.672432 }, { "epoch": 0.1281007668908787, "grad_norm": 3.149160146713257, "learning_rate": 9.98383454783789e-05, "loss": 2.5040380477905275, "memory(GiB)": 66.08, "step": 2990, "token_acc": 0.49808429118773945, "train_speed(iter/s)": 0.672549 }, { "epoch": 0.12831498222012766, "grad_norm": 4.383707523345947, "learning_rate": 9.983780430620348e-05, "loss": 2.365958404541016, "memory(GiB)": 66.08, "step": 2995, "token_acc": 0.4721189591078067, "train_speed(iter/s)": 0.672571 }, { "epoch": 0.12852919754937664, "grad_norm": 2.713146209716797, "learning_rate": 9.983726223116952e-05, "loss": 2.5325057983398436, "memory(GiB)": 66.08, "step": 3000, "token_acc": 0.4573170731707317, "train_speed(iter/s)": 0.67275 }, { "epoch": 0.12852919754937664, "eval_loss": 1.9310146570205688, "eval_runtime": 17.5355, "eval_samples_per_second": 5.703, "eval_steps_per_second": 5.703, "eval_token_acc": 0.5396113602391629, "step": 3000 }, { "epoch": 0.1287434128786256, "grad_norm": 3.2930822372436523, "learning_rate": 9.983671925328677e-05, "loss": 2.4476316452026365, "memory(GiB)": 66.08, "step": 3005, "token_acc": 0.5207423580786026, "train_speed(iter/s)": 0.669826 }, { "epoch": 0.12895762820787454, "grad_norm": 2.5567922592163086, "learning_rate": 9.983617537256511e-05, "loss": 2.4383981704711912, "memory(GiB)": 66.08, "step": 3010, "token_acc": 0.45422535211267606, "train_speed(iter/s)": 0.669929 }, { "epoch": 0.12917184353712352, "grad_norm": 2.6370906829833984, "learning_rate": 9.983563058901439e-05, "loss": 2.5067350387573244, "memory(GiB)": 66.08, "step": 3015, "token_acc": 0.4831804281345566, "train_speed(iter/s)": 0.669792 }, { "epoch": 0.12938605886637247, "grad_norm": 3.8966708183288574, "learning_rate": 9.983508490264445e-05, "loss": 2.470986175537109, "memory(GiB)": 66.08, "step": 3020, "token_acc": 0.468, "train_speed(iter/s)": 0.670065 }, { "epoch": 0.12960027419562145, "grad_norm": 2.7694473266601562, "learning_rate": 9.983453831346524e-05, "loss": 2.5541128158569335, "memory(GiB)": 66.08, "step": 3025, "token_acc": 0.45345345345345345, "train_speed(iter/s)": 0.67027 }, { "epoch": 0.1298144895248704, "grad_norm": 3.5106961727142334, "learning_rate": 9.983399082148659e-05, "loss": 2.433011054992676, "memory(GiB)": 66.08, "step": 3030, "token_acc": 0.47840531561461797, "train_speed(iter/s)": 0.670181 }, { "epoch": 0.13002870485411935, "grad_norm": 2.3883659839630127, "learning_rate": 9.983344242671845e-05, "loss": 2.5926025390625, "memory(GiB)": 66.08, "step": 3035, "token_acc": 0.47017543859649125, "train_speed(iter/s)": 0.670371 }, { "epoch": 0.13024292018336833, "grad_norm": 2.5490992069244385, "learning_rate": 9.983289312917076e-05, "loss": 2.3195415496826173, "memory(GiB)": 66.08, "step": 3040, "token_acc": 0.5018181818181818, "train_speed(iter/s)": 0.670607 }, { "epoch": 0.13045713551261728, "grad_norm": 4.106140613555908, "learning_rate": 9.983234292885346e-05, "loss": 2.647145080566406, "memory(GiB)": 66.08, "step": 3045, "token_acc": 0.4451219512195122, "train_speed(iter/s)": 0.670692 }, { "epoch": 0.13067135084186624, "grad_norm": 2.7559683322906494, "learning_rate": 9.983179182577651e-05, "loss": 2.107599449157715, "memory(GiB)": 66.08, "step": 3050, "token_acc": 0.5141843971631206, "train_speed(iter/s)": 0.67072 }, { "epoch": 0.13088556617111521, "grad_norm": 3.347252607345581, "learning_rate": 9.983123981994992e-05, "loss": 2.6149673461914062, "memory(GiB)": 66.08, "step": 3055, "token_acc": 0.4671814671814672, "train_speed(iter/s)": 0.671006 }, { "epoch": 0.13109978150036417, "grad_norm": 2.879178762435913, "learning_rate": 9.983068691138367e-05, "loss": 2.71195068359375, "memory(GiB)": 66.08, "step": 3060, "token_acc": 0.4529616724738676, "train_speed(iter/s)": 0.671102 }, { "epoch": 0.13131399682961312, "grad_norm": 3.128089666366577, "learning_rate": 9.98301331000878e-05, "loss": 2.501625823974609, "memory(GiB)": 66.08, "step": 3065, "token_acc": 0.46788990825688076, "train_speed(iter/s)": 0.671054 }, { "epoch": 0.1315282121588621, "grad_norm": 2.935476779937744, "learning_rate": 9.98295783860723e-05, "loss": 2.3040229797363283, "memory(GiB)": 66.08, "step": 3070, "token_acc": 0.4956268221574344, "train_speed(iter/s)": 0.671097 }, { "epoch": 0.13174242748811105, "grad_norm": 3.6376712322235107, "learning_rate": 9.982902276934725e-05, "loss": 2.5316064834594725, "memory(GiB)": 66.08, "step": 3075, "token_acc": 0.451505016722408, "train_speed(iter/s)": 0.671161 }, { "epoch": 0.13195664281736, "grad_norm": 4.3408403396606445, "learning_rate": 9.98284662499227e-05, "loss": 2.4949504852294924, "memory(GiB)": 66.08, "step": 3080, "token_acc": 0.4980544747081712, "train_speed(iter/s)": 0.671095 }, { "epoch": 0.13217085814660898, "grad_norm": 2.8479056358337402, "learning_rate": 9.982790882780876e-05, "loss": 2.486361503601074, "memory(GiB)": 66.08, "step": 3085, "token_acc": 0.4249084249084249, "train_speed(iter/s)": 0.671296 }, { "epoch": 0.13238507347585793, "grad_norm": 2.5000171661376953, "learning_rate": 9.98273505030155e-05, "loss": 2.5438875198364257, "memory(GiB)": 66.08, "step": 3090, "token_acc": 0.4555256064690027, "train_speed(iter/s)": 0.671363 }, { "epoch": 0.13259928880510688, "grad_norm": 3.524142026901245, "learning_rate": 9.982679127555306e-05, "loss": 2.2062376022338865, "memory(GiB)": 66.08, "step": 3095, "token_acc": 0.5366795366795367, "train_speed(iter/s)": 0.671503 }, { "epoch": 0.13281350413435586, "grad_norm": 2.6530601978302, "learning_rate": 9.982623114543152e-05, "loss": 2.556437301635742, "memory(GiB)": 66.08, "step": 3100, "token_acc": 0.4236111111111111, "train_speed(iter/s)": 0.671685 }, { "epoch": 0.1330277194636048, "grad_norm": 2.68041729927063, "learning_rate": 9.982567011266108e-05, "loss": 2.710948181152344, "memory(GiB)": 66.08, "step": 3105, "token_acc": 0.4533333333333333, "train_speed(iter/s)": 0.671782 }, { "epoch": 0.1332419347928538, "grad_norm": 2.46380352973938, "learning_rate": 9.98251081772519e-05, "loss": 2.489961624145508, "memory(GiB)": 66.08, "step": 3110, "token_acc": 0.49855907780979825, "train_speed(iter/s)": 0.67185 }, { "epoch": 0.13345615012210274, "grad_norm": 3.072028875350952, "learning_rate": 9.982454533921412e-05, "loss": 2.2892168045043944, "memory(GiB)": 66.08, "step": 3115, "token_acc": 0.5, "train_speed(iter/s)": 0.671818 }, { "epoch": 0.1336703654513517, "grad_norm": 3.2767722606658936, "learning_rate": 9.9823981598558e-05, "loss": 2.525812339782715, "memory(GiB)": 66.08, "step": 3120, "token_acc": 0.4659090909090909, "train_speed(iter/s)": 0.671955 }, { "epoch": 0.13388458078060067, "grad_norm": 2.5438930988311768, "learning_rate": 9.982341695529367e-05, "loss": 2.568071746826172, "memory(GiB)": 66.08, "step": 3125, "token_acc": 0.5052631578947369, "train_speed(iter/s)": 0.672123 }, { "epoch": 0.13409879610984962, "grad_norm": 2.883554458618164, "learning_rate": 9.982285140943142e-05, "loss": 2.524260139465332, "memory(GiB)": 66.08, "step": 3130, "token_acc": 0.45791245791245794, "train_speed(iter/s)": 0.672221 }, { "epoch": 0.13431301143909857, "grad_norm": 3.1501333713531494, "learning_rate": 9.98222849609815e-05, "loss": 2.197132682800293, "memory(GiB)": 66.08, "step": 3135, "token_acc": 0.5071942446043165, "train_speed(iter/s)": 0.672344 }, { "epoch": 0.13452722676834755, "grad_norm": 2.7207987308502197, "learning_rate": 9.982171760995412e-05, "loss": 2.537405776977539, "memory(GiB)": 66.08, "step": 3140, "token_acc": 0.4525993883792049, "train_speed(iter/s)": 0.672352 }, { "epoch": 0.1347414420975965, "grad_norm": 3.552762031555176, "learning_rate": 9.98211493563596e-05, "loss": 2.2295379638671875, "memory(GiB)": 66.08, "step": 3145, "token_acc": 0.4966442953020134, "train_speed(iter/s)": 0.672442 }, { "epoch": 0.13495565742684545, "grad_norm": 3.26625919342041, "learning_rate": 9.982058020020823e-05, "loss": 2.398886299133301, "memory(GiB)": 66.08, "step": 3150, "token_acc": 0.4921135646687697, "train_speed(iter/s)": 0.672536 }, { "epoch": 0.13516987275609443, "grad_norm": 3.9686412811279297, "learning_rate": 9.982001014151032e-05, "loss": 2.704905128479004, "memory(GiB)": 66.08, "step": 3155, "token_acc": 0.4470198675496689, "train_speed(iter/s)": 0.672336 }, { "epoch": 0.13538408808534338, "grad_norm": 2.36649227142334, "learning_rate": 9.981943918027617e-05, "loss": 2.7010194778442385, "memory(GiB)": 66.08, "step": 3160, "token_acc": 0.44041450777202074, "train_speed(iter/s)": 0.672463 }, { "epoch": 0.13559830341459234, "grad_norm": 3.14225697517395, "learning_rate": 9.981886731651614e-05, "loss": 2.274305725097656, "memory(GiB)": 66.08, "step": 3165, "token_acc": 0.4969512195121951, "train_speed(iter/s)": 0.672471 }, { "epoch": 0.13581251874384132, "grad_norm": 3.8941915035247803, "learning_rate": 9.981829455024063e-05, "loss": 2.6403829574584963, "memory(GiB)": 66.08, "step": 3170, "token_acc": 0.45, "train_speed(iter/s)": 0.67287 }, { "epoch": 0.13602673407309027, "grad_norm": 3.2528886795043945, "learning_rate": 9.981772088145995e-05, "loss": 2.4642412185668947, "memory(GiB)": 66.08, "step": 3175, "token_acc": 0.483974358974359, "train_speed(iter/s)": 0.67299 }, { "epoch": 0.13624094940233922, "grad_norm": 2.7473347187042236, "learning_rate": 9.981714631018455e-05, "loss": 2.489777755737305, "memory(GiB)": 66.08, "step": 3180, "token_acc": 0.506896551724138, "train_speed(iter/s)": 0.672959 }, { "epoch": 0.1364551647315882, "grad_norm": 3.198948621749878, "learning_rate": 9.98165708364248e-05, "loss": 2.6748119354248048, "memory(GiB)": 66.08, "step": 3185, "token_acc": 0.421875, "train_speed(iter/s)": 0.673048 }, { "epoch": 0.13666938006083715, "grad_norm": 2.9391751289367676, "learning_rate": 9.981599446019112e-05, "loss": 2.452955436706543, "memory(GiB)": 66.08, "step": 3190, "token_acc": 0.46629213483146065, "train_speed(iter/s)": 0.673244 }, { "epoch": 0.13688359539008613, "grad_norm": 2.9633681774139404, "learning_rate": 9.981541718149399e-05, "loss": 2.7320032119750977, "memory(GiB)": 66.08, "step": 3195, "token_acc": 0.4673913043478261, "train_speed(iter/s)": 0.673368 }, { "epoch": 0.13709781071933508, "grad_norm": 3.479557752609253, "learning_rate": 9.981483900034384e-05, "loss": 2.6045103073120117, "memory(GiB)": 66.08, "step": 3200, "token_acc": 0.4562043795620438, "train_speed(iter/s)": 0.67328 }, { "epoch": 0.13731202604858403, "grad_norm": 2.8672525882720947, "learning_rate": 9.981425991675116e-05, "loss": 2.4178558349609376, "memory(GiB)": 66.08, "step": 3205, "token_acc": 0.5181159420289855, "train_speed(iter/s)": 0.67331 }, { "epoch": 0.137526241377833, "grad_norm": 3.1268529891967773, "learning_rate": 9.981367993072643e-05, "loss": 2.5971961975097657, "memory(GiB)": 66.08, "step": 3210, "token_acc": 0.47692307692307695, "train_speed(iter/s)": 0.673105 }, { "epoch": 0.13774045670708196, "grad_norm": 2.908989429473877, "learning_rate": 9.981309904228014e-05, "loss": 2.273652458190918, "memory(GiB)": 66.08, "step": 3215, "token_acc": 0.47843137254901963, "train_speed(iter/s)": 0.673276 }, { "epoch": 0.1379546720363309, "grad_norm": 4.220602989196777, "learning_rate": 9.981251725142285e-05, "loss": 2.474073791503906, "memory(GiB)": 66.08, "step": 3220, "token_acc": 0.46206896551724136, "train_speed(iter/s)": 0.67323 }, { "epoch": 0.1381688873655799, "grad_norm": 2.9157259464263916, "learning_rate": 9.981193455816508e-05, "loss": 2.2235231399536133, "memory(GiB)": 66.08, "step": 3225, "token_acc": 0.5444839857651246, "train_speed(iter/s)": 0.673333 }, { "epoch": 0.13838310269482884, "grad_norm": 5.645750522613525, "learning_rate": 9.981135096251739e-05, "loss": 2.265762710571289, "memory(GiB)": 66.08, "step": 3230, "token_acc": 0.5034722222222222, "train_speed(iter/s)": 0.673283 }, { "epoch": 0.1385973180240778, "grad_norm": 3.218817949295044, "learning_rate": 9.981076646449034e-05, "loss": 2.5655691146850588, "memory(GiB)": 66.08, "step": 3235, "token_acc": 0.5062893081761006, "train_speed(iter/s)": 0.673239 }, { "epoch": 0.13881153335332677, "grad_norm": 4.30263090133667, "learning_rate": 9.981018106409454e-05, "loss": 2.576963996887207, "memory(GiB)": 66.08, "step": 3240, "token_acc": 0.4865771812080537, "train_speed(iter/s)": 0.673111 }, { "epoch": 0.13902574868257572, "grad_norm": 3.9758152961730957, "learning_rate": 9.980959476134058e-05, "loss": 2.317298698425293, "memory(GiB)": 66.08, "step": 3245, "token_acc": 0.5138888888888888, "train_speed(iter/s)": 0.672884 }, { "epoch": 0.13923996401182467, "grad_norm": 2.6875133514404297, "learning_rate": 9.980900755623907e-05, "loss": 2.203076934814453, "memory(GiB)": 66.08, "step": 3250, "token_acc": 0.5241379310344828, "train_speed(iter/s)": 0.672874 }, { "epoch": 0.13945417934107365, "grad_norm": 3.1011769771575928, "learning_rate": 9.980841944880069e-05, "loss": 2.296177291870117, "memory(GiB)": 66.08, "step": 3255, "token_acc": 0.5163636363636364, "train_speed(iter/s)": 0.67278 }, { "epoch": 0.1396683946703226, "grad_norm": 2.027205228805542, "learning_rate": 9.980783043903606e-05, "loss": 2.4704776763916017, "memory(GiB)": 66.08, "step": 3260, "token_acc": 0.4939759036144578, "train_speed(iter/s)": 0.672861 }, { "epoch": 0.13988260999957156, "grad_norm": 3.114534616470337, "learning_rate": 9.980724052695584e-05, "loss": 2.5387853622436523, "memory(GiB)": 66.08, "step": 3265, "token_acc": 0.4519230769230769, "train_speed(iter/s)": 0.672977 }, { "epoch": 0.14009682532882053, "grad_norm": 2.2828752994537354, "learning_rate": 9.980664971257077e-05, "loss": 2.478097343444824, "memory(GiB)": 66.08, "step": 3270, "token_acc": 0.5, "train_speed(iter/s)": 0.673185 }, { "epoch": 0.14031104065806949, "grad_norm": 3.177186965942383, "learning_rate": 9.98060579958915e-05, "loss": 2.4642093658447264, "memory(GiB)": 66.08, "step": 3275, "token_acc": 0.48623853211009177, "train_speed(iter/s)": 0.673262 }, { "epoch": 0.14052525598731846, "grad_norm": 2.938109874725342, "learning_rate": 9.980546537692876e-05, "loss": 2.405270576477051, "memory(GiB)": 66.08, "step": 3280, "token_acc": 0.5070422535211268, "train_speed(iter/s)": 0.67354 }, { "epoch": 0.14073947131656742, "grad_norm": 4.008969306945801, "learning_rate": 9.98048718556933e-05, "loss": 2.3953346252441405, "memory(GiB)": 66.08, "step": 3285, "token_acc": 0.4942528735632184, "train_speed(iter/s)": 0.673717 }, { "epoch": 0.14095368664581637, "grad_norm": 2.522555351257324, "learning_rate": 9.980427743219586e-05, "loss": 2.577300262451172, "memory(GiB)": 66.08, "step": 3290, "token_acc": 0.4457831325301205, "train_speed(iter/s)": 0.67395 }, { "epoch": 0.14116790197506535, "grad_norm": 3.4770243167877197, "learning_rate": 9.980368210644722e-05, "loss": 2.4622529983520507, "memory(GiB)": 66.08, "step": 3295, "token_acc": 0.5, "train_speed(iter/s)": 0.674031 }, { "epoch": 0.1413821173043143, "grad_norm": 5.0451250076293945, "learning_rate": 9.980308587845816e-05, "loss": 2.8392377853393556, "memory(GiB)": 66.08, "step": 3300, "token_acc": 0.49034749034749037, "train_speed(iter/s)": 0.67401 }, { "epoch": 0.14159633263356325, "grad_norm": 3.231117010116577, "learning_rate": 9.980248874823948e-05, "loss": 2.492647171020508, "memory(GiB)": 66.08, "step": 3305, "token_acc": 0.4518950437317784, "train_speed(iter/s)": 0.674191 }, { "epoch": 0.14181054796281223, "grad_norm": 3.5153865814208984, "learning_rate": 9.9801890715802e-05, "loss": 2.4350595474243164, "memory(GiB)": 66.08, "step": 3310, "token_acc": 0.5080385852090032, "train_speed(iter/s)": 0.674277 }, { "epoch": 0.14202476329206118, "grad_norm": 3.027200937271118, "learning_rate": 9.980129178115653e-05, "loss": 2.5067649841308595, "memory(GiB)": 66.08, "step": 3315, "token_acc": 0.45692883895131087, "train_speed(iter/s)": 0.673913 }, { "epoch": 0.14223897862131013, "grad_norm": 2.4559576511383057, "learning_rate": 9.980069194431397e-05, "loss": 2.553415870666504, "memory(GiB)": 66.08, "step": 3320, "token_acc": 0.42168674698795183, "train_speed(iter/s)": 0.674042 }, { "epoch": 0.1424531939505591, "grad_norm": 2.6163811683654785, "learning_rate": 9.980009120528515e-05, "loss": 2.384198760986328, "memory(GiB)": 66.08, "step": 3325, "token_acc": 0.5141843971631206, "train_speed(iter/s)": 0.67396 }, { "epoch": 0.14266740927980806, "grad_norm": 2.5201408863067627, "learning_rate": 9.979948956408096e-05, "loss": 2.637347602844238, "memory(GiB)": 66.08, "step": 3330, "token_acc": 0.46827794561933533, "train_speed(iter/s)": 0.673922 }, { "epoch": 0.142881624609057, "grad_norm": 2.3287878036499023, "learning_rate": 9.979888702071229e-05, "loss": 2.4675678253173827, "memory(GiB)": 66.08, "step": 3335, "token_acc": 0.48297213622291024, "train_speed(iter/s)": 0.674301 }, { "epoch": 0.143095839938306, "grad_norm": 2.6578218936920166, "learning_rate": 9.979828357519007e-05, "loss": 2.2502630233764647, "memory(GiB)": 66.08, "step": 3340, "token_acc": 0.47601476014760147, "train_speed(iter/s)": 0.674369 }, { "epoch": 0.14331005526755494, "grad_norm": 3.60438871383667, "learning_rate": 9.979767922752525e-05, "loss": 2.6308883666992187, "memory(GiB)": 66.08, "step": 3345, "token_acc": 0.43492063492063493, "train_speed(iter/s)": 0.674159 }, { "epoch": 0.1435242705968039, "grad_norm": 3.83205509185791, "learning_rate": 9.979707397772873e-05, "loss": 2.474849510192871, "memory(GiB)": 66.08, "step": 3350, "token_acc": 0.4524590163934426, "train_speed(iter/s)": 0.674472 }, { "epoch": 0.14373848592605287, "grad_norm": 2.7548282146453857, "learning_rate": 9.979646782581153e-05, "loss": 2.3720842361450196, "memory(GiB)": 66.08, "step": 3355, "token_acc": 0.4759036144578313, "train_speed(iter/s)": 0.674578 }, { "epoch": 0.14395270125530182, "grad_norm": 3.6524100303649902, "learning_rate": 9.979586077178458e-05, "loss": 2.425167465209961, "memory(GiB)": 66.08, "step": 3360, "token_acc": 0.5167785234899329, "train_speed(iter/s)": 0.674746 }, { "epoch": 0.1441669165845508, "grad_norm": 5.2183613777160645, "learning_rate": 9.979525281565891e-05, "loss": 2.6274032592773438, "memory(GiB)": 66.08, "step": 3365, "token_acc": 0.40468227424749165, "train_speed(iter/s)": 0.674933 }, { "epoch": 0.14438113191379975, "grad_norm": 2.713819742202759, "learning_rate": 9.979464395744551e-05, "loss": 2.6825632095336913, "memory(GiB)": 66.08, "step": 3370, "token_acc": 0.4421965317919075, "train_speed(iter/s)": 0.674989 }, { "epoch": 0.1445953472430487, "grad_norm": 2.4464988708496094, "learning_rate": 9.979403419715544e-05, "loss": 2.7259237289428713, "memory(GiB)": 66.08, "step": 3375, "token_acc": 0.43870967741935485, "train_speed(iter/s)": 0.675097 }, { "epoch": 0.14480956257229768, "grad_norm": 3.536633253097534, "learning_rate": 9.979342353479972e-05, "loss": 2.3539953231811523, "memory(GiB)": 66.08, "step": 3380, "token_acc": 0.5114503816793893, "train_speed(iter/s)": 0.67512 }, { "epoch": 0.14502377790154664, "grad_norm": 2.8293545246124268, "learning_rate": 9.979281197038943e-05, "loss": 2.36797981262207, "memory(GiB)": 66.08, "step": 3385, "token_acc": 0.5106382978723404, "train_speed(iter/s)": 0.675085 }, { "epoch": 0.1452379932307956, "grad_norm": 2.8072891235351562, "learning_rate": 9.979219950393564e-05, "loss": 2.1941812515258787, "memory(GiB)": 66.08, "step": 3390, "token_acc": 0.5368098159509203, "train_speed(iter/s)": 0.675115 }, { "epoch": 0.14545220856004457, "grad_norm": 3.244205951690674, "learning_rate": 9.979158613544943e-05, "loss": 2.290471649169922, "memory(GiB)": 66.08, "step": 3395, "token_acc": 0.49606299212598426, "train_speed(iter/s)": 0.675034 }, { "epoch": 0.14566642388929352, "grad_norm": 2.744187116622925, "learning_rate": 9.979097186494195e-05, "loss": 2.2969547271728517, "memory(GiB)": 66.08, "step": 3400, "token_acc": 0.4781144781144781, "train_speed(iter/s)": 0.675225 }, { "epoch": 0.14588063921854247, "grad_norm": 3.6334445476531982, "learning_rate": 9.97903566924243e-05, "loss": 2.5638803482055663, "memory(GiB)": 66.08, "step": 3405, "token_acc": 0.48828125, "train_speed(iter/s)": 0.675304 }, { "epoch": 0.14609485454779145, "grad_norm": 3.880031108856201, "learning_rate": 9.978974061790764e-05, "loss": 2.4868843078613283, "memory(GiB)": 66.08, "step": 3410, "token_acc": 0.45555555555555555, "train_speed(iter/s)": 0.675347 }, { "epoch": 0.1463090698770404, "grad_norm": 3.143080949783325, "learning_rate": 9.978912364140311e-05, "loss": 2.593165969848633, "memory(GiB)": 66.08, "step": 3415, "token_acc": 0.4825174825174825, "train_speed(iter/s)": 0.675494 }, { "epoch": 0.14652328520628935, "grad_norm": 5.640478610992432, "learning_rate": 9.97885057629219e-05, "loss": 2.3622629165649416, "memory(GiB)": 66.08, "step": 3420, "token_acc": 0.49382716049382713, "train_speed(iter/s)": 0.675526 }, { "epoch": 0.14673750053553833, "grad_norm": 2.162328004837036, "learning_rate": 9.97878869824752e-05, "loss": 2.2938053131103517, "memory(GiB)": 66.08, "step": 3425, "token_acc": 0.5083612040133779, "train_speed(iter/s)": 0.67555 }, { "epoch": 0.14695171586478728, "grad_norm": 3.031773090362549, "learning_rate": 9.978726730007424e-05, "loss": 2.36429443359375, "memory(GiB)": 66.08, "step": 3430, "token_acc": 0.48756218905472637, "train_speed(iter/s)": 0.675582 }, { "epoch": 0.14716593119403626, "grad_norm": 2.6687443256378174, "learning_rate": 9.978664671573021e-05, "loss": 2.3895545959472657, "memory(GiB)": 66.08, "step": 3435, "token_acc": 0.5017543859649123, "train_speed(iter/s)": 0.675532 }, { "epoch": 0.1473801465232852, "grad_norm": 3.0625689029693604, "learning_rate": 9.978602522945437e-05, "loss": 2.7431121826171876, "memory(GiB)": 66.08, "step": 3440, "token_acc": 0.45054945054945056, "train_speed(iter/s)": 0.675565 }, { "epoch": 0.14759436185253416, "grad_norm": 2.796128511428833, "learning_rate": 9.978540284125799e-05, "loss": 2.390386390686035, "memory(GiB)": 66.08, "step": 3445, "token_acc": 0.49185667752442996, "train_speed(iter/s)": 0.675824 }, { "epoch": 0.14780857718178314, "grad_norm": 2.619216203689575, "learning_rate": 9.978477955115234e-05, "loss": 2.3830778121948244, "memory(GiB)": 66.08, "step": 3450, "token_acc": 0.5173501577287066, "train_speed(iter/s)": 0.675969 }, { "epoch": 0.1480227925110321, "grad_norm": 2.8409273624420166, "learning_rate": 9.97841553591487e-05, "loss": 2.606410598754883, "memory(GiB)": 66.08, "step": 3455, "token_acc": 0.44482758620689655, "train_speed(iter/s)": 0.675838 }, { "epoch": 0.14823700784028104, "grad_norm": 2.779782295227051, "learning_rate": 9.978353026525838e-05, "loss": 2.426848602294922, "memory(GiB)": 66.08, "step": 3460, "token_acc": 0.5140562248995983, "train_speed(iter/s)": 0.675816 }, { "epoch": 0.14845122316953002, "grad_norm": 2.6998980045318604, "learning_rate": 9.978290426949271e-05, "loss": 2.438062286376953, "memory(GiB)": 66.08, "step": 3465, "token_acc": 0.5229007633587787, "train_speed(iter/s)": 0.675763 }, { "epoch": 0.14866543849877897, "grad_norm": 2.4258036613464355, "learning_rate": 9.978227737186303e-05, "loss": 2.3536455154418947, "memory(GiB)": 66.08, "step": 3470, "token_acc": 0.5444839857651246, "train_speed(iter/s)": 0.675714 }, { "epoch": 0.14887965382802792, "grad_norm": 2.6167373657226562, "learning_rate": 9.978164957238069e-05, "loss": 2.2665355682373045, "memory(GiB)": 66.08, "step": 3475, "token_acc": 0.48056537102473496, "train_speed(iter/s)": 0.675567 }, { "epoch": 0.1490938691572769, "grad_norm": 3.133150339126587, "learning_rate": 9.978102087105709e-05, "loss": 2.326369285583496, "memory(GiB)": 66.08, "step": 3480, "token_acc": 0.4817073170731707, "train_speed(iter/s)": 0.675452 }, { "epoch": 0.14930808448652585, "grad_norm": 2.4560134410858154, "learning_rate": 9.978039126790359e-05, "loss": 2.7176990509033203, "memory(GiB)": 66.08, "step": 3485, "token_acc": 0.4440677966101695, "train_speed(iter/s)": 0.675501 }, { "epoch": 0.1495222998157748, "grad_norm": 2.5334649085998535, "learning_rate": 9.97797607629316e-05, "loss": 2.5066354751586912, "memory(GiB)": 66.08, "step": 3490, "token_acc": 0.4460227272727273, "train_speed(iter/s)": 0.675836 }, { "epoch": 0.14973651514502379, "grad_norm": 2.4759418964385986, "learning_rate": 9.977912935615255e-05, "loss": 2.321665573120117, "memory(GiB)": 66.08, "step": 3495, "token_acc": 0.5144694533762058, "train_speed(iter/s)": 0.675923 }, { "epoch": 0.14995073047427274, "grad_norm": 3.058236837387085, "learning_rate": 9.977849704757787e-05, "loss": 2.1650741577148436, "memory(GiB)": 66.08, "step": 3500, "token_acc": 0.5028901734104047, "train_speed(iter/s)": 0.675865 }, { "epoch": 0.14995073047427274, "eval_loss": 2.081510543823242, "eval_runtime": 17.4435, "eval_samples_per_second": 5.733, "eval_steps_per_second": 5.733, "eval_token_acc": 0.506056527590848, "step": 3500 }, { "epoch": 0.1501649458035217, "grad_norm": 3.068471670150757, "learning_rate": 9.9777863837219e-05, "loss": 2.157304382324219, "memory(GiB)": 66.08, "step": 3505, "token_acc": 0.5092783505154639, "train_speed(iter/s)": 0.673419 }, { "epoch": 0.15037916113277067, "grad_norm": 2.4109959602355957, "learning_rate": 9.977722972508747e-05, "loss": 2.645260238647461, "memory(GiB)": 66.08, "step": 3510, "token_acc": 0.4387755102040816, "train_speed(iter/s)": 0.673435 }, { "epoch": 0.15059337646201962, "grad_norm": 3.260352849960327, "learning_rate": 9.977659471119471e-05, "loss": 2.2500682830810548, "memory(GiB)": 66.08, "step": 3515, "token_acc": 0.5020080321285141, "train_speed(iter/s)": 0.673419 }, { "epoch": 0.1508075917912686, "grad_norm": 3.359271764755249, "learning_rate": 9.977595879555224e-05, "loss": 2.5575416564941404, "memory(GiB)": 66.08, "step": 3520, "token_acc": 0.49206349206349204, "train_speed(iter/s)": 0.673525 }, { "epoch": 0.15102180712051755, "grad_norm": 3.4993200302124023, "learning_rate": 9.977532197817158e-05, "loss": 2.2911861419677733, "memory(GiB)": 66.08, "step": 3525, "token_acc": 0.5076335877862596, "train_speed(iter/s)": 0.673728 }, { "epoch": 0.1512360224497665, "grad_norm": 4.294642925262451, "learning_rate": 9.977468425906428e-05, "loss": 2.2393001556396483, "memory(GiB)": 66.08, "step": 3530, "token_acc": 0.47560975609756095, "train_speed(iter/s)": 0.67385 }, { "epoch": 0.15145023777901548, "grad_norm": 2.5273807048797607, "learning_rate": 9.977404563824188e-05, "loss": 2.707880401611328, "memory(GiB)": 66.08, "step": 3535, "token_acc": 0.4657534246575342, "train_speed(iter/s)": 0.674029 }, { "epoch": 0.15166445310826443, "grad_norm": 2.74422550201416, "learning_rate": 9.977340611571595e-05, "loss": 2.5067506790161134, "memory(GiB)": 66.08, "step": 3540, "token_acc": 0.4885245901639344, "train_speed(iter/s)": 0.673876 }, { "epoch": 0.15187866843751338, "grad_norm": 3.0281453132629395, "learning_rate": 9.977276569149807e-05, "loss": 2.531802749633789, "memory(GiB)": 66.08, "step": 3545, "token_acc": 0.5016181229773463, "train_speed(iter/s)": 0.674145 }, { "epoch": 0.15209288376676236, "grad_norm": 2.462907552719116, "learning_rate": 9.977212436559986e-05, "loss": 2.4649074554443358, "memory(GiB)": 66.08, "step": 3550, "token_acc": 0.461038961038961, "train_speed(iter/s)": 0.674115 }, { "epoch": 0.1523070990960113, "grad_norm": 3.022834062576294, "learning_rate": 9.977148213803291e-05, "loss": 2.314411163330078, "memory(GiB)": 66.08, "step": 3555, "token_acc": 0.4843205574912892, "train_speed(iter/s)": 0.674154 }, { "epoch": 0.15252131442526026, "grad_norm": 4.180974006652832, "learning_rate": 9.97708390088089e-05, "loss": 2.320064735412598, "memory(GiB)": 66.08, "step": 3560, "token_acc": 0.5228215767634855, "train_speed(iter/s)": 0.674342 }, { "epoch": 0.15273552975450924, "grad_norm": 3.6872169971466064, "learning_rate": 9.977019497793942e-05, "loss": 2.4808958053588865, "memory(GiB)": 66.08, "step": 3565, "token_acc": 0.477124183006536, "train_speed(iter/s)": 0.674166 }, { "epoch": 0.1529497450837582, "grad_norm": 2.72527813911438, "learning_rate": 9.97695500454362e-05, "loss": 2.546189308166504, "memory(GiB)": 66.08, "step": 3570, "token_acc": 0.5095541401273885, "train_speed(iter/s)": 0.673997 }, { "epoch": 0.15316396041300714, "grad_norm": 4.46050500869751, "learning_rate": 9.976890421131087e-05, "loss": 2.6079328536987303, "memory(GiB)": 66.08, "step": 3575, "token_acc": 0.44, "train_speed(iter/s)": 0.673787 }, { "epoch": 0.15337817574225612, "grad_norm": 2.6307482719421387, "learning_rate": 9.976825747557516e-05, "loss": 2.476240348815918, "memory(GiB)": 66.08, "step": 3580, "token_acc": 0.4787234042553192, "train_speed(iter/s)": 0.673823 }, { "epoch": 0.15359239107150507, "grad_norm": 5.192610263824463, "learning_rate": 9.976760983824079e-05, "loss": 2.4180938720703127, "memory(GiB)": 66.08, "step": 3585, "token_acc": 0.42911877394636017, "train_speed(iter/s)": 0.673638 }, { "epoch": 0.15380660640075403, "grad_norm": 5.075532913208008, "learning_rate": 9.976696129931948e-05, "loss": 2.3770755767822265, "memory(GiB)": 66.08, "step": 3590, "token_acc": 0.4981549815498155, "train_speed(iter/s)": 0.673582 }, { "epoch": 0.154020821730003, "grad_norm": 4.3526411056518555, "learning_rate": 9.976631185882297e-05, "loss": 2.416814994812012, "memory(GiB)": 66.08, "step": 3595, "token_acc": 0.48134328358208955, "train_speed(iter/s)": 0.673649 }, { "epoch": 0.15423503705925196, "grad_norm": 2.8748972415924072, "learning_rate": 9.976566151676303e-05, "loss": 2.415551948547363, "memory(GiB)": 66.08, "step": 3600, "token_acc": 0.4891304347826087, "train_speed(iter/s)": 0.673386 }, { "epoch": 0.15444925238850093, "grad_norm": 3.201450824737549, "learning_rate": 9.976501027315146e-05, "loss": 2.6040138244628905, "memory(GiB)": 66.08, "step": 3605, "token_acc": 0.47540983606557374, "train_speed(iter/s)": 0.673419 }, { "epoch": 0.1546634677177499, "grad_norm": 7.936071395874023, "learning_rate": 9.976435812800005e-05, "loss": 2.083328628540039, "memory(GiB)": 66.08, "step": 3610, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.673297 }, { "epoch": 0.15487768304699884, "grad_norm": 3.400327444076538, "learning_rate": 9.97637050813206e-05, "loss": 2.402189826965332, "memory(GiB)": 66.08, "step": 3615, "token_acc": 0.46229508196721314, "train_speed(iter/s)": 0.673358 }, { "epoch": 0.15509189837624782, "grad_norm": 2.942707061767578, "learning_rate": 9.976305113312497e-05, "loss": 2.4854368209838866, "memory(GiB)": 66.08, "step": 3620, "token_acc": 0.5401459854014599, "train_speed(iter/s)": 0.673564 }, { "epoch": 0.15530611370549677, "grad_norm": 3.568793296813965, "learning_rate": 9.976239628342496e-05, "loss": 2.507552719116211, "memory(GiB)": 66.08, "step": 3625, "token_acc": 0.46441947565543074, "train_speed(iter/s)": 0.673536 }, { "epoch": 0.15552032903474572, "grad_norm": 1.975095510482788, "learning_rate": 9.976174053223247e-05, "loss": 2.3219831466674803, "memory(GiB)": 66.08, "step": 3630, "token_acc": 0.5103448275862069, "train_speed(iter/s)": 0.673296 }, { "epoch": 0.1557345443639947, "grad_norm": 2.8084235191345215, "learning_rate": 9.976108387955938e-05, "loss": 2.4703983306884765, "memory(GiB)": 66.08, "step": 3635, "token_acc": 0.45454545454545453, "train_speed(iter/s)": 0.673226 }, { "epoch": 0.15594875969324365, "grad_norm": 2.4559309482574463, "learning_rate": 9.976042632541756e-05, "loss": 2.4644561767578126, "memory(GiB)": 66.08, "step": 3640, "token_acc": 0.4707792207792208, "train_speed(iter/s)": 0.673217 }, { "epoch": 0.1561629750224926, "grad_norm": 3.574251651763916, "learning_rate": 9.975976786981895e-05, "loss": 2.2877758026123045, "memory(GiB)": 66.08, "step": 3645, "token_acc": 0.5083612040133779, "train_speed(iter/s)": 0.673257 }, { "epoch": 0.15637719035174158, "grad_norm": 4.539845943450928, "learning_rate": 9.975910851277546e-05, "loss": 2.487049865722656, "memory(GiB)": 66.08, "step": 3650, "token_acc": 0.4646153846153846, "train_speed(iter/s)": 0.673068 }, { "epoch": 0.15659140568099053, "grad_norm": 2.6751949787139893, "learning_rate": 9.975844825429904e-05, "loss": 2.69995174407959, "memory(GiB)": 66.08, "step": 3655, "token_acc": 0.45652173913043476, "train_speed(iter/s)": 0.672928 }, { "epoch": 0.15680562101023948, "grad_norm": 3.111477851867676, "learning_rate": 9.975778709440167e-05, "loss": 2.365696334838867, "memory(GiB)": 66.08, "step": 3660, "token_acc": 0.4549019607843137, "train_speed(iter/s)": 0.672782 }, { "epoch": 0.15701983633948846, "grad_norm": 2.515302896499634, "learning_rate": 9.975712503309529e-05, "loss": 2.505581855773926, "memory(GiB)": 66.08, "step": 3665, "token_acc": 0.5, "train_speed(iter/s)": 0.672707 }, { "epoch": 0.1572340516687374, "grad_norm": 2.8717424869537354, "learning_rate": 9.975646207039192e-05, "loss": 2.3996294021606444, "memory(GiB)": 66.08, "step": 3670, "token_acc": 0.47735191637630664, "train_speed(iter/s)": 0.672626 }, { "epoch": 0.15744826699798636, "grad_norm": 2.3637607097625732, "learning_rate": 9.975579820630357e-05, "loss": 2.0933155059814452, "memory(GiB)": 66.08, "step": 3675, "token_acc": 0.5131086142322098, "train_speed(iter/s)": 0.672585 }, { "epoch": 0.15766248232723534, "grad_norm": 2.5624704360961914, "learning_rate": 9.975513344084223e-05, "loss": 2.1567874908447267, "memory(GiB)": 66.08, "step": 3680, "token_acc": 0.48936170212765956, "train_speed(iter/s)": 0.67281 }, { "epoch": 0.1578766976564843, "grad_norm": 2.634147882461548, "learning_rate": 9.975446777402002e-05, "loss": 2.7107702255249024, "memory(GiB)": 66.08, "step": 3685, "token_acc": 0.46254071661237783, "train_speed(iter/s)": 0.672948 }, { "epoch": 0.15809091298573327, "grad_norm": 2.333737850189209, "learning_rate": 9.975380120584893e-05, "loss": 2.697900390625, "memory(GiB)": 66.08, "step": 3690, "token_acc": 0.41830065359477125, "train_speed(iter/s)": 0.672907 }, { "epoch": 0.15830512831498222, "grad_norm": 2.4810445308685303, "learning_rate": 9.975313373634106e-05, "loss": 2.379864501953125, "memory(GiB)": 66.08, "step": 3695, "token_acc": 0.45819397993311034, "train_speed(iter/s)": 0.672925 }, { "epoch": 0.15851934364423118, "grad_norm": 3.5084640979766846, "learning_rate": 9.97524653655085e-05, "loss": 2.381480407714844, "memory(GiB)": 66.08, "step": 3700, "token_acc": 0.5342465753424658, "train_speed(iter/s)": 0.672718 }, { "epoch": 0.15873355897348015, "grad_norm": 2.693776845932007, "learning_rate": 9.975179609336336e-05, "loss": 2.467207908630371, "memory(GiB)": 66.08, "step": 3705, "token_acc": 0.49140893470790376, "train_speed(iter/s)": 0.672866 }, { "epoch": 0.1589477743027291, "grad_norm": 2.508188247680664, "learning_rate": 9.975112591991776e-05, "loss": 2.5250904083251955, "memory(GiB)": 67.19, "step": 3710, "token_acc": 0.48942598187311176, "train_speed(iter/s)": 0.672562 }, { "epoch": 0.15916198963197806, "grad_norm": 3.35979962348938, "learning_rate": 9.975045484518385e-05, "loss": 1.8914100646972656, "memory(GiB)": 67.19, "step": 3715, "token_acc": 0.5550239234449761, "train_speed(iter/s)": 0.672448 }, { "epoch": 0.15937620496122704, "grad_norm": 3.151909589767456, "learning_rate": 9.974978286917378e-05, "loss": 2.5239688873291017, "memory(GiB)": 67.19, "step": 3720, "token_acc": 0.478125, "train_speed(iter/s)": 0.672444 }, { "epoch": 0.159590420290476, "grad_norm": 3.6484310626983643, "learning_rate": 9.974910999189971e-05, "loss": 2.3025287628173827, "memory(GiB)": 67.19, "step": 3725, "token_acc": 0.5, "train_speed(iter/s)": 0.672524 }, { "epoch": 0.15980463561972494, "grad_norm": 3.504997968673706, "learning_rate": 9.974843621337386e-05, "loss": 2.315679359436035, "memory(GiB)": 67.19, "step": 3730, "token_acc": 0.4738562091503268, "train_speed(iter/s)": 0.672627 }, { "epoch": 0.16001885094897392, "grad_norm": 2.7569580078125, "learning_rate": 9.974776153360841e-05, "loss": 2.341358757019043, "memory(GiB)": 67.19, "step": 3735, "token_acc": 0.45695364238410596, "train_speed(iter/s)": 0.672703 }, { "epoch": 0.16023306627822287, "grad_norm": 3.5364999771118164, "learning_rate": 9.97470859526156e-05, "loss": 2.784396934509277, "memory(GiB)": 67.19, "step": 3740, "token_acc": 0.4169278996865204, "train_speed(iter/s)": 0.672901 }, { "epoch": 0.16044728160747182, "grad_norm": 3.1889431476593018, "learning_rate": 9.974640947040766e-05, "loss": 2.3586273193359375, "memory(GiB)": 67.19, "step": 3745, "token_acc": 0.5186567164179104, "train_speed(iter/s)": 0.672944 }, { "epoch": 0.1606614969367208, "grad_norm": 3.0101819038391113, "learning_rate": 9.974573208699685e-05, "loss": 2.537346076965332, "memory(GiB)": 67.19, "step": 3750, "token_acc": 0.4716312056737589, "train_speed(iter/s)": 0.673054 }, { "epoch": 0.16087571226596975, "grad_norm": 2.8173859119415283, "learning_rate": 9.974505380239543e-05, "loss": 2.342061233520508, "memory(GiB)": 67.19, "step": 3755, "token_acc": 0.5149501661129569, "train_speed(iter/s)": 0.673221 }, { "epoch": 0.1610899275952187, "grad_norm": 3.6014180183410645, "learning_rate": 9.97443746166157e-05, "loss": 2.5509862899780273, "memory(GiB)": 67.19, "step": 3760, "token_acc": 0.459915611814346, "train_speed(iter/s)": 0.673345 }, { "epoch": 0.16130414292446768, "grad_norm": 3.008511781692505, "learning_rate": 9.974369452966997e-05, "loss": 2.4623600006103517, "memory(GiB)": 67.19, "step": 3765, "token_acc": 0.5041322314049587, "train_speed(iter/s)": 0.673389 }, { "epoch": 0.16151835825371663, "grad_norm": 2.943246364593506, "learning_rate": 9.974301354157052e-05, "loss": 2.708440971374512, "memory(GiB)": 67.19, "step": 3770, "token_acc": 0.44402985074626866, "train_speed(iter/s)": 0.673556 }, { "epoch": 0.1617325735829656, "grad_norm": 11.854976654052734, "learning_rate": 9.974233165232974e-05, "loss": 2.5070056915283203, "memory(GiB)": 67.19, "step": 3775, "token_acc": 0.48627450980392156, "train_speed(iter/s)": 0.673739 }, { "epoch": 0.16194678891221456, "grad_norm": 2.9836513996124268, "learning_rate": 9.974164886195996e-05, "loss": 2.4914649963378905, "memory(GiB)": 67.19, "step": 3780, "token_acc": 0.5149700598802395, "train_speed(iter/s)": 0.673812 }, { "epoch": 0.1621610042414635, "grad_norm": 3.079115152359009, "learning_rate": 9.974096517047355e-05, "loss": 2.8901777267456055, "memory(GiB)": 67.19, "step": 3785, "token_acc": 0.41369047619047616, "train_speed(iter/s)": 0.673798 }, { "epoch": 0.1623752195707125, "grad_norm": 3.3935163021087646, "learning_rate": 9.974028057788288e-05, "loss": 2.384649467468262, "memory(GiB)": 67.19, "step": 3790, "token_acc": 0.5236363636363637, "train_speed(iter/s)": 0.673678 }, { "epoch": 0.16258943489996144, "grad_norm": 4.03995943069458, "learning_rate": 9.973959508420038e-05, "loss": 2.4713096618652344, "memory(GiB)": 67.19, "step": 3795, "token_acc": 0.505338078291815, "train_speed(iter/s)": 0.673555 }, { "epoch": 0.1628036502292104, "grad_norm": 2.9743566513061523, "learning_rate": 9.973890868943845e-05, "loss": 2.5042997360229493, "memory(GiB)": 67.19, "step": 3800, "token_acc": 0.5148514851485149, "train_speed(iter/s)": 0.673552 }, { "epoch": 0.16301786555845937, "grad_norm": 2.3469510078430176, "learning_rate": 9.973822139360953e-05, "loss": 2.397290802001953, "memory(GiB)": 67.19, "step": 3805, "token_acc": 0.5143603133159269, "train_speed(iter/s)": 0.673776 }, { "epoch": 0.16323208088770833, "grad_norm": 2.376286268234253, "learning_rate": 9.973753319672608e-05, "loss": 2.4628828048706053, "memory(GiB)": 67.19, "step": 3810, "token_acc": 0.43670886075949367, "train_speed(iter/s)": 0.673694 }, { "epoch": 0.16344629621695728, "grad_norm": 2.7534143924713135, "learning_rate": 9.973684409880055e-05, "loss": 2.7191226959228514, "memory(GiB)": 67.19, "step": 3815, "token_acc": 0.4738675958188153, "train_speed(iter/s)": 0.673654 }, { "epoch": 0.16366051154620626, "grad_norm": 3.671224594116211, "learning_rate": 9.973615409984544e-05, "loss": 2.109539604187012, "memory(GiB)": 67.19, "step": 3820, "token_acc": 0.5222222222222223, "train_speed(iter/s)": 0.673544 }, { "epoch": 0.1638747268754552, "grad_norm": 4.168557643890381, "learning_rate": 9.973546319987324e-05, "loss": 2.660366249084473, "memory(GiB)": 67.19, "step": 3825, "token_acc": 0.4482758620689655, "train_speed(iter/s)": 0.673271 }, { "epoch": 0.16408894220470416, "grad_norm": 2.43388032913208, "learning_rate": 9.973477139889646e-05, "loss": 2.5762609481811523, "memory(GiB)": 67.19, "step": 3830, "token_acc": 0.4690265486725664, "train_speed(iter/s)": 0.67311 }, { "epoch": 0.16430315753395314, "grad_norm": 2.9415574073791504, "learning_rate": 9.973407869692765e-05, "loss": 2.7139434814453125, "memory(GiB)": 67.19, "step": 3835, "token_acc": 0.4982456140350877, "train_speed(iter/s)": 0.673038 }, { "epoch": 0.1645173728632021, "grad_norm": 3.1328985691070557, "learning_rate": 9.973338509397934e-05, "loss": 2.556234931945801, "memory(GiB)": 67.19, "step": 3840, "token_acc": 0.5035211267605634, "train_speed(iter/s)": 0.672794 }, { "epoch": 0.16473158819245104, "grad_norm": 2.5356268882751465, "learning_rate": 9.973269059006411e-05, "loss": 2.431744194030762, "memory(GiB)": 67.19, "step": 3845, "token_acc": 0.5018050541516246, "train_speed(iter/s)": 0.672853 }, { "epoch": 0.16494580352170002, "grad_norm": 3.052919387817383, "learning_rate": 9.973199518519456e-05, "loss": 2.495331573486328, "memory(GiB)": 67.19, "step": 3850, "token_acc": 0.5102040816326531, "train_speed(iter/s)": 0.672798 }, { "epoch": 0.16516001885094897, "grad_norm": 4.516727924346924, "learning_rate": 9.973129887938324e-05, "loss": 2.5343982696533205, "memory(GiB)": 67.19, "step": 3855, "token_acc": 0.4532871972318339, "train_speed(iter/s)": 0.672897 }, { "epoch": 0.16537423418019795, "grad_norm": 3.304514169692993, "learning_rate": 9.973060167264279e-05, "loss": 2.2083709716796873, "memory(GiB)": 67.19, "step": 3860, "token_acc": 0.5191489361702127, "train_speed(iter/s)": 0.673122 }, { "epoch": 0.1655884495094469, "grad_norm": 2.4885690212249756, "learning_rate": 9.972990356498583e-05, "loss": 2.5585777282714846, "memory(GiB)": 67.19, "step": 3865, "token_acc": 0.4750733137829912, "train_speed(iter/s)": 0.673284 }, { "epoch": 0.16580266483869585, "grad_norm": 3.488416910171509, "learning_rate": 9.972920455642506e-05, "loss": 2.3803855895996096, "memory(GiB)": 67.19, "step": 3870, "token_acc": 0.5226480836236934, "train_speed(iter/s)": 0.672955 }, { "epoch": 0.16601688016794483, "grad_norm": 2.8538742065429688, "learning_rate": 9.972850464697306e-05, "loss": 2.681456756591797, "memory(GiB)": 67.19, "step": 3875, "token_acc": 0.4451219512195122, "train_speed(iter/s)": 0.673087 }, { "epoch": 0.16623109549719378, "grad_norm": 4.022404193878174, "learning_rate": 9.972780383664256e-05, "loss": 2.667791175842285, "memory(GiB)": 67.19, "step": 3880, "token_acc": 0.47214076246334313, "train_speed(iter/s)": 0.672976 }, { "epoch": 0.16644531082644273, "grad_norm": 4.527102947235107, "learning_rate": 9.972710212544623e-05, "loss": 2.471010208129883, "memory(GiB)": 67.19, "step": 3885, "token_acc": 0.484375, "train_speed(iter/s)": 0.673 }, { "epoch": 0.1666595261556917, "grad_norm": 3.0169601440429688, "learning_rate": 9.972639951339681e-05, "loss": 2.5410039901733397, "memory(GiB)": 67.19, "step": 3890, "token_acc": 0.5058823529411764, "train_speed(iter/s)": 0.672958 }, { "epoch": 0.16687374148494066, "grad_norm": 2.5208728313446045, "learning_rate": 9.972569600050704e-05, "loss": 2.413898468017578, "memory(GiB)": 67.19, "step": 3895, "token_acc": 0.5, "train_speed(iter/s)": 0.673031 }, { "epoch": 0.16708795681418961, "grad_norm": 2.280541181564331, "learning_rate": 9.97249915867896e-05, "loss": 2.2859512329101563, "memory(GiB)": 67.19, "step": 3900, "token_acc": 0.5036231884057971, "train_speed(iter/s)": 0.673169 }, { "epoch": 0.1673021721434386, "grad_norm": 2.7530629634857178, "learning_rate": 9.972428627225732e-05, "loss": 2.4183631896972657, "memory(GiB)": 67.19, "step": 3905, "token_acc": 0.4791666666666667, "train_speed(iter/s)": 0.673029 }, { "epoch": 0.16751638747268754, "grad_norm": 2.252941846847534, "learning_rate": 9.972358005692293e-05, "loss": 2.263197898864746, "memory(GiB)": 67.19, "step": 3910, "token_acc": 0.5273972602739726, "train_speed(iter/s)": 0.672907 }, { "epoch": 0.1677306028019365, "grad_norm": 2.98268461227417, "learning_rate": 9.972287294079925e-05, "loss": 2.591441535949707, "memory(GiB)": 67.19, "step": 3915, "token_acc": 0.4822485207100592, "train_speed(iter/s)": 0.672985 }, { "epoch": 0.16794481813118547, "grad_norm": 2.491884231567383, "learning_rate": 9.972216492389907e-05, "loss": 2.2288835525512694, "memory(GiB)": 67.19, "step": 3920, "token_acc": 0.49466192170818507, "train_speed(iter/s)": 0.672976 }, { "epoch": 0.16815903346043443, "grad_norm": 2.8835837841033936, "learning_rate": 9.972145600623526e-05, "loss": 2.471266174316406, "memory(GiB)": 67.19, "step": 3925, "token_acc": 0.5276872964169381, "train_speed(iter/s)": 0.673002 }, { "epoch": 0.16837324878968338, "grad_norm": 2.9481897354125977, "learning_rate": 9.972074618782061e-05, "loss": 2.582161712646484, "memory(GiB)": 67.19, "step": 3930, "token_acc": 0.45901639344262296, "train_speed(iter/s)": 0.673035 }, { "epoch": 0.16858746411893236, "grad_norm": 2.6544642448425293, "learning_rate": 9.9720035468668e-05, "loss": 2.3904319763183595, "memory(GiB)": 67.19, "step": 3935, "token_acc": 0.5084033613445378, "train_speed(iter/s)": 0.672796 }, { "epoch": 0.1688016794481813, "grad_norm": 2.621678113937378, "learning_rate": 9.971932384879031e-05, "loss": 2.2702865600585938, "memory(GiB)": 67.19, "step": 3940, "token_acc": 0.5050505050505051, "train_speed(iter/s)": 0.672561 }, { "epoch": 0.1690158947774303, "grad_norm": 3.1523549556732178, "learning_rate": 9.971861132820043e-05, "loss": 2.7577884674072264, "memory(GiB)": 67.19, "step": 3945, "token_acc": 0.4681528662420382, "train_speed(iter/s)": 0.672618 }, { "epoch": 0.16923011010667924, "grad_norm": 3.0845956802368164, "learning_rate": 9.971789790691127e-05, "loss": 2.4669849395751955, "memory(GiB)": 67.19, "step": 3950, "token_acc": 0.4966442953020134, "train_speed(iter/s)": 0.672699 }, { "epoch": 0.1694443254359282, "grad_norm": 2.4096791744232178, "learning_rate": 9.971718358493575e-05, "loss": 2.5033151626586916, "memory(GiB)": 67.19, "step": 3955, "token_acc": 0.5016501650165016, "train_speed(iter/s)": 0.672716 }, { "epoch": 0.16965854076517717, "grad_norm": 2.6301116943359375, "learning_rate": 9.971646836228681e-05, "loss": 2.338091278076172, "memory(GiB)": 67.19, "step": 3960, "token_acc": 0.4984126984126984, "train_speed(iter/s)": 0.672685 }, { "epoch": 0.16987275609442612, "grad_norm": 2.9007487297058105, "learning_rate": 9.97157522389774e-05, "loss": 2.354108428955078, "memory(GiB)": 67.19, "step": 3965, "token_acc": 0.48846153846153845, "train_speed(iter/s)": 0.672792 }, { "epoch": 0.17008697142367507, "grad_norm": 2.292775869369507, "learning_rate": 9.97150352150205e-05, "loss": 2.411166191101074, "memory(GiB)": 67.19, "step": 3970, "token_acc": 0.4819277108433735, "train_speed(iter/s)": 0.672708 }, { "epoch": 0.17030118675292405, "grad_norm": 2.7134456634521484, "learning_rate": 9.97143172904291e-05, "loss": 2.3742923736572266, "memory(GiB)": 67.19, "step": 3975, "token_acc": 0.49624060150375937, "train_speed(iter/s)": 0.672593 }, { "epoch": 0.170515402082173, "grad_norm": 3.119363307952881, "learning_rate": 9.971359846521621e-05, "loss": 2.5269580841064454, "memory(GiB)": 67.19, "step": 3980, "token_acc": 0.4539877300613497, "train_speed(iter/s)": 0.672753 }, { "epoch": 0.17072961741142195, "grad_norm": 2.019948959350586, "learning_rate": 9.971287873939487e-05, "loss": 2.5574167251586912, "memory(GiB)": 67.19, "step": 3985, "token_acc": 0.4604904632152589, "train_speed(iter/s)": 0.672818 }, { "epoch": 0.17094383274067093, "grad_norm": 2.7385263442993164, "learning_rate": 9.971215811297807e-05, "loss": 2.489386558532715, "memory(GiB)": 67.19, "step": 3990, "token_acc": 0.4618181818181818, "train_speed(iter/s)": 0.672987 }, { "epoch": 0.17115804806991988, "grad_norm": 3.694329261779785, "learning_rate": 9.97114365859789e-05, "loss": 2.422863006591797, "memory(GiB)": 67.19, "step": 3995, "token_acc": 0.5117845117845118, "train_speed(iter/s)": 0.673054 }, { "epoch": 0.17137226339916883, "grad_norm": 3.436460256576538, "learning_rate": 9.971071415841042e-05, "loss": 2.5885005950927735, "memory(GiB)": 67.19, "step": 4000, "token_acc": 0.44047619047619047, "train_speed(iter/s)": 0.673156 }, { "epoch": 0.17137226339916883, "eval_loss": 2.292482376098633, "eval_runtime": 16.1418, "eval_samples_per_second": 6.195, "eval_steps_per_second": 6.195, "eval_token_acc": 0.4992947813822285, "step": 4000 }, { "epoch": 0.1715864787284178, "grad_norm": 3.1398797035217285, "learning_rate": 9.970999083028574e-05, "loss": 2.485906982421875, "memory(GiB)": 67.19, "step": 4005, "token_acc": 0.4841997961264016, "train_speed(iter/s)": 0.671108 }, { "epoch": 0.17180069405766676, "grad_norm": 2.8661303520202637, "learning_rate": 9.970926660161793e-05, "loss": 2.1872432708740233, "memory(GiB)": 67.19, "step": 4010, "token_acc": 0.5357142857142857, "train_speed(iter/s)": 0.671102 }, { "epoch": 0.17201490938691572, "grad_norm": 2.739384889602661, "learning_rate": 9.970854147242011e-05, "loss": 2.729199981689453, "memory(GiB)": 67.19, "step": 4015, "token_acc": 0.46831955922865015, "train_speed(iter/s)": 0.671034 }, { "epoch": 0.1722291247161647, "grad_norm": 3.0590195655822754, "learning_rate": 9.970781544270544e-05, "loss": 2.3504426956176756, "memory(GiB)": 67.19, "step": 4020, "token_acc": 0.5125, "train_speed(iter/s)": 0.67118 }, { "epoch": 0.17244334004541365, "grad_norm": 3.622129440307617, "learning_rate": 9.970708851248707e-05, "loss": 2.599656867980957, "memory(GiB)": 67.19, "step": 4025, "token_acc": 0.4721189591078067, "train_speed(iter/s)": 0.671278 }, { "epoch": 0.17265755537466262, "grad_norm": 2.5134334564208984, "learning_rate": 9.970636068177817e-05, "loss": 2.6992071151733397, "memory(GiB)": 67.19, "step": 4030, "token_acc": 0.4766666666666667, "train_speed(iter/s)": 0.671342 }, { "epoch": 0.17287177070391158, "grad_norm": 3.5382673740386963, "learning_rate": 9.970563195059191e-05, "loss": 2.3722578048706056, "memory(GiB)": 67.19, "step": 4035, "token_acc": 0.4740740740740741, "train_speed(iter/s)": 0.671251 }, { "epoch": 0.17308598603316053, "grad_norm": 3.355583667755127, "learning_rate": 9.97049023189415e-05, "loss": 2.344722557067871, "memory(GiB)": 67.19, "step": 4040, "token_acc": 0.5407725321888412, "train_speed(iter/s)": 0.671156 }, { "epoch": 0.1733002013624095, "grad_norm": 3.725024461746216, "learning_rate": 9.970417178684014e-05, "loss": 2.3765237808227537, "memory(GiB)": 67.19, "step": 4045, "token_acc": 0.4781021897810219, "train_speed(iter/s)": 0.671206 }, { "epoch": 0.17351441669165846, "grad_norm": 2.9616334438323975, "learning_rate": 9.970344035430111e-05, "loss": 2.7336519241333006, "memory(GiB)": 67.19, "step": 4050, "token_acc": 0.47038327526132406, "train_speed(iter/s)": 0.671241 }, { "epoch": 0.1737286320209074, "grad_norm": 3.8308634757995605, "learning_rate": 9.97027080213376e-05, "loss": 2.6969770431518554, "memory(GiB)": 67.19, "step": 4055, "token_acc": 0.451505016722408, "train_speed(iter/s)": 0.671358 }, { "epoch": 0.1739428473501564, "grad_norm": 2.459043025970459, "learning_rate": 9.970197478796295e-05, "loss": 2.4009067535400392, "memory(GiB)": 67.19, "step": 4060, "token_acc": 0.49310344827586206, "train_speed(iter/s)": 0.67152 }, { "epoch": 0.17415706267940534, "grad_norm": 4.018019199371338, "learning_rate": 9.970124065419038e-05, "loss": 2.5007354736328127, "memory(GiB)": 67.19, "step": 4065, "token_acc": 0.4886731391585761, "train_speed(iter/s)": 0.671493 }, { "epoch": 0.1743712780086543, "grad_norm": 4.258320331573486, "learning_rate": 9.97005056200332e-05, "loss": 2.5925296783447265, "memory(GiB)": 67.19, "step": 4070, "token_acc": 0.4664310954063604, "train_speed(iter/s)": 0.671685 }, { "epoch": 0.17458549333790327, "grad_norm": 3.0373311042785645, "learning_rate": 9.969976968550477e-05, "loss": 2.270153617858887, "memory(GiB)": 67.19, "step": 4075, "token_acc": 0.5038461538461538, "train_speed(iter/s)": 0.671445 }, { "epoch": 0.17479970866715222, "grad_norm": 2.673405408859253, "learning_rate": 9.969903285061835e-05, "loss": 2.427581787109375, "memory(GiB)": 67.19, "step": 4080, "token_acc": 0.494949494949495, "train_speed(iter/s)": 0.671378 }, { "epoch": 0.17501392399640117, "grad_norm": 2.682447671890259, "learning_rate": 9.969829511538735e-05, "loss": 2.283497619628906, "memory(GiB)": 67.19, "step": 4085, "token_acc": 0.4868913857677903, "train_speed(iter/s)": 0.671492 }, { "epoch": 0.17522813932565015, "grad_norm": 2.867434024810791, "learning_rate": 9.969755647982512e-05, "loss": 2.430913543701172, "memory(GiB)": 67.19, "step": 4090, "token_acc": 0.4673202614379085, "train_speed(iter/s)": 0.671369 }, { "epoch": 0.1754423546548991, "grad_norm": 3.8648855686187744, "learning_rate": 9.969681694394503e-05, "loss": 2.004003715515137, "memory(GiB)": 67.19, "step": 4095, "token_acc": 0.5426356589147286, "train_speed(iter/s)": 0.671404 }, { "epoch": 0.17565656998414805, "grad_norm": 3.4996700286865234, "learning_rate": 9.969607650776047e-05, "loss": 2.8070907592773438, "memory(GiB)": 67.19, "step": 4100, "token_acc": 0.43304843304843305, "train_speed(iter/s)": 0.67138 }, { "epoch": 0.17587078531339703, "grad_norm": 3.3026936054229736, "learning_rate": 9.969533517128488e-05, "loss": 2.556264877319336, "memory(GiB)": 67.19, "step": 4105, "token_acc": 0.46048109965635736, "train_speed(iter/s)": 0.671476 }, { "epoch": 0.17608500064264598, "grad_norm": 2.6184520721435547, "learning_rate": 9.969459293453167e-05, "loss": 2.509219169616699, "memory(GiB)": 67.19, "step": 4110, "token_acc": 0.4394366197183099, "train_speed(iter/s)": 0.671632 }, { "epoch": 0.17629921597189496, "grad_norm": 4.0030436515808105, "learning_rate": 9.969384979751428e-05, "loss": 2.321974182128906, "memory(GiB)": 67.19, "step": 4115, "token_acc": 0.5238095238095238, "train_speed(iter/s)": 0.671742 }, { "epoch": 0.1765134313011439, "grad_norm": 2.910367965698242, "learning_rate": 9.969310576024619e-05, "loss": 2.2034950256347656, "memory(GiB)": 67.19, "step": 4120, "token_acc": 0.5259515570934256, "train_speed(iter/s)": 0.671861 }, { "epoch": 0.17672764663039287, "grad_norm": 2.813751220703125, "learning_rate": 9.969236082274089e-05, "loss": 2.5612873077392577, "memory(GiB)": 67.19, "step": 4125, "token_acc": 0.49829351535836175, "train_speed(iter/s)": 0.671887 }, { "epoch": 0.17694186195964184, "grad_norm": 3.449091911315918, "learning_rate": 9.969161498501185e-05, "loss": 2.387898826599121, "memory(GiB)": 67.19, "step": 4130, "token_acc": 0.49050632911392406, "train_speed(iter/s)": 0.671929 }, { "epoch": 0.1771560772888908, "grad_norm": 3.1564762592315674, "learning_rate": 9.969086824707259e-05, "loss": 2.4266990661621093, "memory(GiB)": 67.19, "step": 4135, "token_acc": 0.5300751879699248, "train_speed(iter/s)": 0.671993 }, { "epoch": 0.17737029261813975, "grad_norm": 4.393965244293213, "learning_rate": 9.969012060893663e-05, "loss": 2.788972091674805, "memory(GiB)": 67.19, "step": 4140, "token_acc": 0.45151515151515154, "train_speed(iter/s)": 0.672068 }, { "epoch": 0.17758450794738873, "grad_norm": 2.3296995162963867, "learning_rate": 9.968937207061752e-05, "loss": 2.489764595031738, "memory(GiB)": 67.19, "step": 4145, "token_acc": 0.4625, "train_speed(iter/s)": 0.672101 }, { "epoch": 0.17779872327663768, "grad_norm": 2.8701016902923584, "learning_rate": 9.968862263212884e-05, "loss": 2.492778778076172, "memory(GiB)": 67.19, "step": 4150, "token_acc": 0.4908424908424908, "train_speed(iter/s)": 0.672248 }, { "epoch": 0.17801293860588663, "grad_norm": 3.361177921295166, "learning_rate": 9.968787229348413e-05, "loss": 2.3730987548828124, "memory(GiB)": 67.19, "step": 4155, "token_acc": 0.47686832740213525, "train_speed(iter/s)": 0.672214 }, { "epoch": 0.1782271539351356, "grad_norm": 2.8162992000579834, "learning_rate": 9.968712105469702e-05, "loss": 2.3246862411499025, "memory(GiB)": 67.19, "step": 4160, "token_acc": 0.5034013605442177, "train_speed(iter/s)": 0.67254 }, { "epoch": 0.17844136926438456, "grad_norm": 2.309089422225952, "learning_rate": 9.96863689157811e-05, "loss": 2.365972137451172, "memory(GiB)": 67.19, "step": 4165, "token_acc": 0.49221183800623053, "train_speed(iter/s)": 0.672677 }, { "epoch": 0.1786555845936335, "grad_norm": 3.0407557487487793, "learning_rate": 9.968561587674998e-05, "loss": 2.651225280761719, "memory(GiB)": 67.19, "step": 4170, "token_acc": 0.42441860465116277, "train_speed(iter/s)": 0.672827 }, { "epoch": 0.1788697999228825, "grad_norm": 2.6054587364196777, "learning_rate": 9.968486193761734e-05, "loss": 2.2500789642333983, "memory(GiB)": 67.19, "step": 4175, "token_acc": 0.5091463414634146, "train_speed(iter/s)": 0.672898 }, { "epoch": 0.17908401525213144, "grad_norm": 2.8234951496124268, "learning_rate": 9.968410709839679e-05, "loss": 2.7208541870117187, "memory(GiB)": 67.19, "step": 4180, "token_acc": 0.4740484429065744, "train_speed(iter/s)": 0.673039 }, { "epoch": 0.1792982305813804, "grad_norm": 3.664280414581299, "learning_rate": 9.968335135910207e-05, "loss": 2.27041015625, "memory(GiB)": 67.19, "step": 4185, "token_acc": 0.5121107266435986, "train_speed(iter/s)": 0.672748 }, { "epoch": 0.17951244591062937, "grad_norm": 2.7444984912872314, "learning_rate": 9.96825947197468e-05, "loss": 2.6142337799072264, "memory(GiB)": 67.19, "step": 4190, "token_acc": 0.49390243902439024, "train_speed(iter/s)": 0.67273 }, { "epoch": 0.17972666123987832, "grad_norm": 2.820375442504883, "learning_rate": 9.968183718034474e-05, "loss": 2.46221981048584, "memory(GiB)": 67.19, "step": 4195, "token_acc": 0.47297297297297297, "train_speed(iter/s)": 0.672526 }, { "epoch": 0.1799408765691273, "grad_norm": 2.575134038925171, "learning_rate": 9.96810787409096e-05, "loss": 2.0202733993530275, "memory(GiB)": 67.19, "step": 4200, "token_acc": 0.5324232081911263, "train_speed(iter/s)": 0.672542 }, { "epoch": 0.18015509189837625, "grad_norm": 2.680696725845337, "learning_rate": 9.96803194014551e-05, "loss": 2.476537322998047, "memory(GiB)": 67.19, "step": 4205, "token_acc": 0.48905109489051096, "train_speed(iter/s)": 0.672417 }, { "epoch": 0.1803693072276252, "grad_norm": 4.251725673675537, "learning_rate": 9.9679559161995e-05, "loss": 2.477271842956543, "memory(GiB)": 67.19, "step": 4210, "token_acc": 0.4763636363636364, "train_speed(iter/s)": 0.672514 }, { "epoch": 0.18058352255687418, "grad_norm": 3.320899724960327, "learning_rate": 9.96787980225431e-05, "loss": 2.1524932861328123, "memory(GiB)": 67.19, "step": 4215, "token_acc": 0.5065359477124183, "train_speed(iter/s)": 0.672446 }, { "epoch": 0.18079773788612313, "grad_norm": 5.199128150939941, "learning_rate": 9.967803598311317e-05, "loss": 2.1825115203857424, "memory(GiB)": 67.19, "step": 4220, "token_acc": 0.5444015444015444, "train_speed(iter/s)": 0.672238 }, { "epoch": 0.18101195321537208, "grad_norm": 3.1099085807800293, "learning_rate": 9.967727304371901e-05, "loss": 2.4627498626708983, "memory(GiB)": 67.19, "step": 4225, "token_acc": 0.495114006514658, "train_speed(iter/s)": 0.672407 }, { "epoch": 0.18122616854462106, "grad_norm": 3.1534292697906494, "learning_rate": 9.967650920437445e-05, "loss": 2.2110824584960938, "memory(GiB)": 67.19, "step": 4230, "token_acc": 0.5292207792207793, "train_speed(iter/s)": 0.672509 }, { "epoch": 0.18144038387387001, "grad_norm": 2.6780667304992676, "learning_rate": 9.967574446509333e-05, "loss": 2.1388694763183596, "memory(GiB)": 67.19, "step": 4235, "token_acc": 0.5175097276264592, "train_speed(iter/s)": 0.672549 }, { "epoch": 0.18165459920311897, "grad_norm": 3.2276129722595215, "learning_rate": 9.96749788258895e-05, "loss": 2.2397588729858398, "memory(GiB)": 67.19, "step": 4240, "token_acc": 0.5234375, "train_speed(iter/s)": 0.672476 }, { "epoch": 0.18186881453236795, "grad_norm": 2.5548911094665527, "learning_rate": 9.967421228677683e-05, "loss": 2.6952980041503904, "memory(GiB)": 67.19, "step": 4245, "token_acc": 0.46283783783783783, "train_speed(iter/s)": 0.672395 }, { "epoch": 0.1820830298616169, "grad_norm": 3.3792881965637207, "learning_rate": 9.96734448477692e-05, "loss": 2.551762008666992, "memory(GiB)": 67.19, "step": 4250, "token_acc": 0.44672131147540983, "train_speed(iter/s)": 0.672492 }, { "epoch": 0.18229724519086585, "grad_norm": 2.6717939376831055, "learning_rate": 9.967267650888051e-05, "loss": 2.5040817260742188, "memory(GiB)": 67.19, "step": 4255, "token_acc": 0.4738562091503268, "train_speed(iter/s)": 0.672491 }, { "epoch": 0.18251146052011483, "grad_norm": 2.4311113357543945, "learning_rate": 9.96719072701247e-05, "loss": 2.538188934326172, "memory(GiB)": 67.19, "step": 4260, "token_acc": 0.4605263157894737, "train_speed(iter/s)": 0.67254 }, { "epoch": 0.18272567584936378, "grad_norm": 3.3721587657928467, "learning_rate": 9.967113713151568e-05, "loss": 2.7364097595214845, "memory(GiB)": 67.19, "step": 4265, "token_acc": 0.4552238805970149, "train_speed(iter/s)": 0.672415 }, { "epoch": 0.18293989117861273, "grad_norm": 2.8227972984313965, "learning_rate": 9.967036609306744e-05, "loss": 2.478546905517578, "memory(GiB)": 67.19, "step": 4270, "token_acc": 0.508833922261484, "train_speed(iter/s)": 0.672353 }, { "epoch": 0.1831541065078617, "grad_norm": 4.088844299316406, "learning_rate": 9.966959415479391e-05, "loss": 2.1547264099121093, "memory(GiB)": 67.19, "step": 4275, "token_acc": 0.5451505016722408, "train_speed(iter/s)": 0.672293 }, { "epoch": 0.18336832183711066, "grad_norm": 3.8799946308135986, "learning_rate": 9.96688213167091e-05, "loss": 2.5156301498413085, "memory(GiB)": 67.19, "step": 4280, "token_acc": 0.49823321554770317, "train_speed(iter/s)": 0.672375 }, { "epoch": 0.18358253716635964, "grad_norm": 3.420128583908081, "learning_rate": 9.966804757882697e-05, "loss": 2.9878259658813477, "memory(GiB)": 67.19, "step": 4285, "token_acc": 0.44150943396226416, "train_speed(iter/s)": 0.672545 }, { "epoch": 0.1837967524956086, "grad_norm": 3.0997698307037354, "learning_rate": 9.96672729411616e-05, "loss": 2.7165599822998048, "memory(GiB)": 67.19, "step": 4290, "token_acc": 0.4657534246575342, "train_speed(iter/s)": 0.672614 }, { "epoch": 0.18401096782485754, "grad_norm": 2.8408637046813965, "learning_rate": 9.966649740372696e-05, "loss": 2.642026901245117, "memory(GiB)": 67.19, "step": 4295, "token_acc": 0.44876325088339225, "train_speed(iter/s)": 0.672458 }, { "epoch": 0.18422518315410652, "grad_norm": 3.433830738067627, "learning_rate": 9.966572096653715e-05, "loss": 2.382908058166504, "memory(GiB)": 67.19, "step": 4300, "token_acc": 0.5155038759689923, "train_speed(iter/s)": 0.672438 }, { "epoch": 0.18443939848335547, "grad_norm": 2.6205811500549316, "learning_rate": 9.966494362960622e-05, "loss": 2.5267038345336914, "memory(GiB)": 67.19, "step": 4305, "token_acc": 0.4793103448275862, "train_speed(iter/s)": 0.672525 }, { "epoch": 0.18465361381260442, "grad_norm": 4.583723068237305, "learning_rate": 9.966416539294821e-05, "loss": 2.3202796936035157, "memory(GiB)": 67.19, "step": 4310, "token_acc": 0.5041322314049587, "train_speed(iter/s)": 0.672383 }, { "epoch": 0.1848678291418534, "grad_norm": 2.2419281005859375, "learning_rate": 9.96633862565773e-05, "loss": 2.401456832885742, "memory(GiB)": 67.19, "step": 4315, "token_acc": 0.498567335243553, "train_speed(iter/s)": 0.672417 }, { "epoch": 0.18508204447110235, "grad_norm": 5.192488670349121, "learning_rate": 9.966260622050753e-05, "loss": 2.549943542480469, "memory(GiB)": 67.19, "step": 4320, "token_acc": 0.4758364312267658, "train_speed(iter/s)": 0.67239 }, { "epoch": 0.1852962598003513, "grad_norm": 2.5122017860412598, "learning_rate": 9.966182528475306e-05, "loss": 2.7038852691650392, "memory(GiB)": 67.19, "step": 4325, "token_acc": 0.4768211920529801, "train_speed(iter/s)": 0.672499 }, { "epoch": 0.18551047512960028, "grad_norm": 2.758509635925293, "learning_rate": 9.966104344932805e-05, "loss": 2.2761005401611327, "memory(GiB)": 67.19, "step": 4330, "token_acc": 0.5096525096525096, "train_speed(iter/s)": 0.672511 }, { "epoch": 0.18572469045884923, "grad_norm": 2.733562707901001, "learning_rate": 9.966026071424665e-05, "loss": 2.4114519119262696, "memory(GiB)": 67.19, "step": 4335, "token_acc": 0.48134328358208955, "train_speed(iter/s)": 0.672596 }, { "epoch": 0.18593890578809819, "grad_norm": 3.088937759399414, "learning_rate": 9.965947707952304e-05, "loss": 2.411067581176758, "memory(GiB)": 67.19, "step": 4340, "token_acc": 0.46855345911949686, "train_speed(iter/s)": 0.672662 }, { "epoch": 0.18615312111734716, "grad_norm": 2.9057092666625977, "learning_rate": 9.965869254517141e-05, "loss": 2.1918716430664062, "memory(GiB)": 67.19, "step": 4345, "token_acc": 0.5302013422818792, "train_speed(iter/s)": 0.672581 }, { "epoch": 0.18636733644659612, "grad_norm": 2.829373359680176, "learning_rate": 9.9657907111206e-05, "loss": 2.305065727233887, "memory(GiB)": 67.19, "step": 4350, "token_acc": 0.49337748344370863, "train_speed(iter/s)": 0.672497 }, { "epoch": 0.18658155177584507, "grad_norm": 3.8140032291412354, "learning_rate": 9.965712077764102e-05, "loss": 2.258814811706543, "memory(GiB)": 67.19, "step": 4355, "token_acc": 0.5209125475285171, "train_speed(iter/s)": 0.672434 }, { "epoch": 0.18679576710509405, "grad_norm": 2.838838815689087, "learning_rate": 9.965633354449072e-05, "loss": 2.4059066772460938, "memory(GiB)": 67.19, "step": 4360, "token_acc": 0.45051194539249145, "train_speed(iter/s)": 0.672451 }, { "epoch": 0.187009982434343, "grad_norm": 2.595965623855591, "learning_rate": 9.965554541176933e-05, "loss": 2.480194664001465, "memory(GiB)": 67.19, "step": 4365, "token_acc": 0.4575757575757576, "train_speed(iter/s)": 0.672412 }, { "epoch": 0.18722419776359198, "grad_norm": 3.4535207748413086, "learning_rate": 9.965475637949115e-05, "loss": 2.2897567749023438, "memory(GiB)": 67.19, "step": 4370, "token_acc": 0.512280701754386, "train_speed(iter/s)": 0.672531 }, { "epoch": 0.18743841309284093, "grad_norm": 2.1209211349487305, "learning_rate": 9.965396644767051e-05, "loss": 2.2965677261352537, "memory(GiB)": 67.19, "step": 4375, "token_acc": 0.48, "train_speed(iter/s)": 0.672462 }, { "epoch": 0.18765262842208988, "grad_norm": 4.435836315155029, "learning_rate": 9.965317561632167e-05, "loss": 2.462802505493164, "memory(GiB)": 67.19, "step": 4380, "token_acc": 0.44912280701754387, "train_speed(iter/s)": 0.672486 }, { "epoch": 0.18786684375133886, "grad_norm": 3.0744080543518066, "learning_rate": 9.965238388545897e-05, "loss": 2.6146013259887697, "memory(GiB)": 67.19, "step": 4385, "token_acc": 0.4721311475409836, "train_speed(iter/s)": 0.672625 }, { "epoch": 0.1880810590805878, "grad_norm": 2.5276336669921875, "learning_rate": 9.965159125509677e-05, "loss": 2.733717346191406, "memory(GiB)": 67.19, "step": 4390, "token_acc": 0.4440789473684211, "train_speed(iter/s)": 0.672574 }, { "epoch": 0.18829527440983676, "grad_norm": 4.414692401885986, "learning_rate": 9.96507977252494e-05, "loss": 2.175972747802734, "memory(GiB)": 67.19, "step": 4395, "token_acc": 0.5020746887966805, "train_speed(iter/s)": 0.672757 }, { "epoch": 0.18850948973908574, "grad_norm": 3.570499897003174, "learning_rate": 9.965000329593129e-05, "loss": 2.593744659423828, "memory(GiB)": 67.19, "step": 4400, "token_acc": 0.5058365758754864, "train_speed(iter/s)": 0.672932 }, { "epoch": 0.1887237050683347, "grad_norm": 3.49418044090271, "learning_rate": 9.964920796715678e-05, "loss": 2.2305395126342775, "memory(GiB)": 67.19, "step": 4405, "token_acc": 0.5085910652920962, "train_speed(iter/s)": 0.672915 }, { "epoch": 0.18893792039758364, "grad_norm": 2.992095470428467, "learning_rate": 9.964841173894027e-05, "loss": 2.7123130798339843, "memory(GiB)": 67.19, "step": 4410, "token_acc": 0.4332129963898917, "train_speed(iter/s)": 0.672827 }, { "epoch": 0.18915213572683262, "grad_norm": 3.451171875, "learning_rate": 9.964761461129623e-05, "loss": 2.4757308959960938, "memory(GiB)": 67.19, "step": 4415, "token_acc": 0.47962382445141066, "train_speed(iter/s)": 0.6729 }, { "epoch": 0.18936635105608157, "grad_norm": 3.2778003215789795, "learning_rate": 9.964681658423907e-05, "loss": 2.3115615844726562, "memory(GiB)": 67.19, "step": 4420, "token_acc": 0.5551020408163265, "train_speed(iter/s)": 0.672924 }, { "epoch": 0.18958056638533052, "grad_norm": 3.4944615364074707, "learning_rate": 9.964601765778325e-05, "loss": 2.8523370742797853, "memory(GiB)": 67.19, "step": 4425, "token_acc": 0.4369747899159664, "train_speed(iter/s)": 0.672956 }, { "epoch": 0.1897947817145795, "grad_norm": 2.801302433013916, "learning_rate": 9.964521783194325e-05, "loss": 2.2089780807495116, "memory(GiB)": 67.19, "step": 4430, "token_acc": 0.5190615835777126, "train_speed(iter/s)": 0.672858 }, { "epoch": 0.19000899704382845, "grad_norm": 2.461693048477173, "learning_rate": 9.964441710673356e-05, "loss": 2.739231300354004, "memory(GiB)": 67.19, "step": 4435, "token_acc": 0.4418604651162791, "train_speed(iter/s)": 0.672985 }, { "epoch": 0.1902232123730774, "grad_norm": 2.730754852294922, "learning_rate": 9.96436154821687e-05, "loss": 2.063222885131836, "memory(GiB)": 67.19, "step": 4440, "token_acc": 0.5280898876404494, "train_speed(iter/s)": 0.673107 }, { "epoch": 0.19043742770232638, "grad_norm": 2.4430320262908936, "learning_rate": 9.964281295826316e-05, "loss": 2.2907867431640625, "memory(GiB)": 67.19, "step": 4445, "token_acc": 0.5311355311355311, "train_speed(iter/s)": 0.673034 }, { "epoch": 0.19065164303157534, "grad_norm": 2.809645891189575, "learning_rate": 9.964200953503149e-05, "loss": 2.4561931610107424, "memory(GiB)": 67.19, "step": 4450, "token_acc": 0.484, "train_speed(iter/s)": 0.673138 }, { "epoch": 0.19086585836082431, "grad_norm": 2.758549451828003, "learning_rate": 9.964120521248825e-05, "loss": 2.6815372467041017, "memory(GiB)": 67.19, "step": 4455, "token_acc": 0.4716312056737589, "train_speed(iter/s)": 0.673019 }, { "epoch": 0.19108007369007327, "grad_norm": 2.743807792663574, "learning_rate": 9.964039999064803e-05, "loss": 2.371870422363281, "memory(GiB)": 67.19, "step": 4460, "token_acc": 0.5, "train_speed(iter/s)": 0.673167 }, { "epoch": 0.19129428901932222, "grad_norm": 2.493312358856201, "learning_rate": 9.963959386952537e-05, "loss": 2.313666343688965, "memory(GiB)": 67.19, "step": 4465, "token_acc": 0.5344827586206896, "train_speed(iter/s)": 0.67303 }, { "epoch": 0.1915085043485712, "grad_norm": 3.418942928314209, "learning_rate": 9.963878684913492e-05, "loss": 2.7530651092529297, "memory(GiB)": 67.19, "step": 4470, "token_acc": 0.44921875, "train_speed(iter/s)": 0.672954 }, { "epoch": 0.19172271967782015, "grad_norm": 3.462233781814575, "learning_rate": 9.963797892949128e-05, "loss": 2.424669075012207, "memory(GiB)": 67.19, "step": 4475, "token_acc": 0.4883720930232558, "train_speed(iter/s)": 0.672956 }, { "epoch": 0.1919369350070691, "grad_norm": 3.126887083053589, "learning_rate": 9.963717011060909e-05, "loss": 2.332469177246094, "memory(GiB)": 67.19, "step": 4480, "token_acc": 0.5115511551155115, "train_speed(iter/s)": 0.673019 }, { "epoch": 0.19215115033631808, "grad_norm": 3.817655086517334, "learning_rate": 9.9636360392503e-05, "loss": 2.334208679199219, "memory(GiB)": 67.19, "step": 4485, "token_acc": 0.4734982332155477, "train_speed(iter/s)": 0.673087 }, { "epoch": 0.19236536566556703, "grad_norm": 2.566608428955078, "learning_rate": 9.963554977518766e-05, "loss": 2.3025609970092775, "memory(GiB)": 67.19, "step": 4490, "token_acc": 0.47151898734177217, "train_speed(iter/s)": 0.673085 }, { "epoch": 0.19257958099481598, "grad_norm": 2.923079252243042, "learning_rate": 9.96347382586778e-05, "loss": 2.5662418365478517, "memory(GiB)": 67.19, "step": 4495, "token_acc": 0.42696629213483145, "train_speed(iter/s)": 0.673259 }, { "epoch": 0.19279379632406496, "grad_norm": 2.6888010501861572, "learning_rate": 9.963392584298808e-05, "loss": 2.453485298156738, "memory(GiB)": 67.19, "step": 4500, "token_acc": 0.46835443037974683, "train_speed(iter/s)": 0.673284 }, { "epoch": 0.19279379632406496, "eval_loss": 2.1749465465545654, "eval_runtime": 17.3318, "eval_samples_per_second": 5.77, "eval_steps_per_second": 5.77, "eval_token_acc": 0.4942233632862644, "step": 4500 }, { "epoch": 0.1930080116533139, "grad_norm": 4.176661014556885, "learning_rate": 9.963311252813323e-05, "loss": 2.20910530090332, "memory(GiB)": 67.19, "step": 4505, "token_acc": 0.504835589941973, "train_speed(iter/s)": 0.671246 }, { "epoch": 0.19322222698256286, "grad_norm": 3.4262707233428955, "learning_rate": 9.9632298314128e-05, "loss": 2.389870452880859, "memory(GiB)": 67.19, "step": 4510, "token_acc": 0.49606299212598426, "train_speed(iter/s)": 0.67125 }, { "epoch": 0.19343644231181184, "grad_norm": 3.676064968109131, "learning_rate": 9.963148320098711e-05, "loss": 2.442768859863281, "memory(GiB)": 67.19, "step": 4515, "token_acc": 0.4806451612903226, "train_speed(iter/s)": 0.671258 }, { "epoch": 0.1936506576410608, "grad_norm": 2.503544569015503, "learning_rate": 9.963066718872536e-05, "loss": 2.194026756286621, "memory(GiB)": 67.19, "step": 4520, "token_acc": 0.5301204819277109, "train_speed(iter/s)": 0.671363 }, { "epoch": 0.19386487297030974, "grad_norm": 3.1876323223114014, "learning_rate": 9.962985027735749e-05, "loss": 2.659541130065918, "memory(GiB)": 67.19, "step": 4525, "token_acc": 0.44375, "train_speed(iter/s)": 0.671381 }, { "epoch": 0.19407908829955872, "grad_norm": 2.8209469318389893, "learning_rate": 9.962903246689836e-05, "loss": 2.559197998046875, "memory(GiB)": 67.19, "step": 4530, "token_acc": 0.453781512605042, "train_speed(iter/s)": 0.671465 }, { "epoch": 0.19429330362880767, "grad_norm": 2.700697898864746, "learning_rate": 9.962821375736274e-05, "loss": 2.3180011749267577, "memory(GiB)": 67.19, "step": 4535, "token_acc": 0.47962382445141066, "train_speed(iter/s)": 0.671489 }, { "epoch": 0.19450751895805665, "grad_norm": 2.865915536880493, "learning_rate": 9.962739414876545e-05, "loss": 2.482251739501953, "memory(GiB)": 67.19, "step": 4540, "token_acc": 0.49324324324324326, "train_speed(iter/s)": 0.671594 }, { "epoch": 0.1947217342873056, "grad_norm": 2.790904998779297, "learning_rate": 9.962657364112139e-05, "loss": 2.265231132507324, "memory(GiB)": 67.19, "step": 4545, "token_acc": 0.5074074074074074, "train_speed(iter/s)": 0.67172 }, { "epoch": 0.19493594961655455, "grad_norm": 3.731797695159912, "learning_rate": 9.96257522344454e-05, "loss": 2.417080116271973, "memory(GiB)": 67.19, "step": 4550, "token_acc": 0.5387323943661971, "train_speed(iter/s)": 0.671704 }, { "epoch": 0.19515016494580353, "grad_norm": 5.141436576843262, "learning_rate": 9.962492992875232e-05, "loss": 2.37097225189209, "memory(GiB)": 67.19, "step": 4555, "token_acc": 0.43700787401574803, "train_speed(iter/s)": 0.67188 }, { "epoch": 0.19536438027505248, "grad_norm": 3.3176183700561523, "learning_rate": 9.96241067240571e-05, "loss": 2.298516845703125, "memory(GiB)": 67.19, "step": 4560, "token_acc": 0.5182926829268293, "train_speed(iter/s)": 0.67188 }, { "epoch": 0.19557859560430144, "grad_norm": 3.0943844318389893, "learning_rate": 9.962328262037463e-05, "loss": 2.3411138534545897, "memory(GiB)": 67.19, "step": 4565, "token_acc": 0.5182724252491694, "train_speed(iter/s)": 0.671807 }, { "epoch": 0.19579281093355042, "grad_norm": 2.8087007999420166, "learning_rate": 9.962262269016789e-05, "loss": 2.5655860900878906, "memory(GiB)": 67.19, "step": 4570, "token_acc": 0.49280575539568344, "train_speed(iter/s)": 0.671928 }, { "epoch": 0.19600702626279937, "grad_norm": 2.698927164077759, "learning_rate": 9.962179696834601e-05, "loss": 2.4067989349365235, "memory(GiB)": 67.19, "step": 4575, "token_acc": 0.4882943143812709, "train_speed(iter/s)": 0.67206 }, { "epoch": 0.19622124159204832, "grad_norm": 4.36581563949585, "learning_rate": 9.962097034757876e-05, "loss": 2.377519989013672, "memory(GiB)": 67.19, "step": 4580, "token_acc": 0.4927536231884058, "train_speed(iter/s)": 0.672088 }, { "epoch": 0.1964354569212973, "grad_norm": 2.640573501586914, "learning_rate": 9.962014282788106e-05, "loss": 2.560517120361328, "memory(GiB)": 67.19, "step": 4585, "token_acc": 0.4983388704318937, "train_speed(iter/s)": 0.672158 }, { "epoch": 0.19664967225054625, "grad_norm": 2.4994711875915527, "learning_rate": 9.961931440926793e-05, "loss": 2.2758983612060546, "memory(GiB)": 67.19, "step": 4590, "token_acc": 0.528169014084507, "train_speed(iter/s)": 0.672218 }, { "epoch": 0.1968638875797952, "grad_norm": 3.500687837600708, "learning_rate": 9.961848509175437e-05, "loss": 2.3117816925048826, "memory(GiB)": 67.19, "step": 4595, "token_acc": 0.5315985130111525, "train_speed(iter/s)": 0.672229 }, { "epoch": 0.19707810290904418, "grad_norm": 3.7745392322540283, "learning_rate": 9.96176548753554e-05, "loss": 2.590357208251953, "memory(GiB)": 67.19, "step": 4600, "token_acc": 0.45794392523364486, "train_speed(iter/s)": 0.672129 }, { "epoch": 0.19729231823829313, "grad_norm": 3.3574306964874268, "learning_rate": 9.961682376008608e-05, "loss": 2.242618751525879, "memory(GiB)": 67.19, "step": 4605, "token_acc": 0.5121951219512195, "train_speed(iter/s)": 0.672089 }, { "epoch": 0.19750653356754208, "grad_norm": 2.875380039215088, "learning_rate": 9.961599174596145e-05, "loss": 2.4952503204345704, "memory(GiB)": 67.19, "step": 4610, "token_acc": 0.4721311475409836, "train_speed(iter/s)": 0.672046 }, { "epoch": 0.19772074889679106, "grad_norm": 2.774953842163086, "learning_rate": 9.961515883299659e-05, "loss": 2.137435722351074, "memory(GiB)": 67.19, "step": 4615, "token_acc": 0.540650406504065, "train_speed(iter/s)": 0.672031 }, { "epoch": 0.19793496422604, "grad_norm": 11.446446418762207, "learning_rate": 9.961432502120658e-05, "loss": 2.8100231170654295, "memory(GiB)": 67.19, "step": 4620, "token_acc": 0.4114441416893733, "train_speed(iter/s)": 0.672168 }, { "epoch": 0.198149179555289, "grad_norm": 2.9779915809631348, "learning_rate": 9.961349031060651e-05, "loss": 2.2796855926513673, "memory(GiB)": 67.19, "step": 4625, "token_acc": 0.5015197568389058, "train_speed(iter/s)": 0.672177 }, { "epoch": 0.19836339488453794, "grad_norm": 3.273287296295166, "learning_rate": 9.961265470121155e-05, "loss": 2.589596748352051, "memory(GiB)": 67.19, "step": 4630, "token_acc": 0.46441947565543074, "train_speed(iter/s)": 0.672258 }, { "epoch": 0.1985776102137869, "grad_norm": 3.6384689807891846, "learning_rate": 9.961181819303679e-05, "loss": 2.1086093902587892, "memory(GiB)": 67.19, "step": 4635, "token_acc": 0.5439330543933054, "train_speed(iter/s)": 0.672119 }, { "epoch": 0.19879182554303587, "grad_norm": 3.716012954711914, "learning_rate": 9.961098078609743e-05, "loss": 2.5139461517333985, "memory(GiB)": 67.19, "step": 4640, "token_acc": 0.4929078014184397, "train_speed(iter/s)": 0.672228 }, { "epoch": 0.19900604087228482, "grad_norm": 2.9546546936035156, "learning_rate": 9.961014248040859e-05, "loss": 2.5547813415527343, "memory(GiB)": 67.19, "step": 4645, "token_acc": 0.5036764705882353, "train_speed(iter/s)": 0.672124 }, { "epoch": 0.19922025620153377, "grad_norm": 2.8029098510742188, "learning_rate": 9.96093032759855e-05, "loss": 2.295742416381836, "memory(GiB)": 67.19, "step": 4650, "token_acc": 0.5040983606557377, "train_speed(iter/s)": 0.672097 }, { "epoch": 0.19943447153078275, "grad_norm": 2.6839447021484375, "learning_rate": 9.960846317284334e-05, "loss": 2.496154022216797, "memory(GiB)": 67.19, "step": 4655, "token_acc": 0.5, "train_speed(iter/s)": 0.672185 }, { "epoch": 0.1996486868600317, "grad_norm": 3.446319341659546, "learning_rate": 9.960762217099732e-05, "loss": 2.3334724426269533, "memory(GiB)": 67.19, "step": 4660, "token_acc": 0.49830508474576274, "train_speed(iter/s)": 0.672216 }, { "epoch": 0.19986290218928066, "grad_norm": 2.9213075637817383, "learning_rate": 9.96067802704627e-05, "loss": 2.39153995513916, "memory(GiB)": 67.19, "step": 4665, "token_acc": 0.4810810810810811, "train_speed(iter/s)": 0.672314 }, { "epoch": 0.20007711751852963, "grad_norm": 3.6098530292510986, "learning_rate": 9.960593747125471e-05, "loss": 2.815430450439453, "memory(GiB)": 67.19, "step": 4670, "token_acc": 0.4743202416918429, "train_speed(iter/s)": 0.672407 }, { "epoch": 0.20029133284777859, "grad_norm": 3.7466931343078613, "learning_rate": 9.960509377338864e-05, "loss": 2.5094205856323244, "memory(GiB)": 67.19, "step": 4675, "token_acc": 0.5059171597633136, "train_speed(iter/s)": 0.672448 }, { "epoch": 0.20050554817702754, "grad_norm": 5.42350435256958, "learning_rate": 9.960424917687976e-05, "loss": 2.3676509857177734, "memory(GiB)": 67.19, "step": 4680, "token_acc": 0.5320754716981132, "train_speed(iter/s)": 0.672499 }, { "epoch": 0.20071976350627652, "grad_norm": 2.8334665298461914, "learning_rate": 9.960340368174336e-05, "loss": 2.578531837463379, "memory(GiB)": 67.19, "step": 4685, "token_acc": 0.49295774647887325, "train_speed(iter/s)": 0.672497 }, { "epoch": 0.20093397883552547, "grad_norm": 3.5987281799316406, "learning_rate": 9.960255728799479e-05, "loss": 2.4617425918579103, "memory(GiB)": 67.19, "step": 4690, "token_acc": 0.5363984674329502, "train_speed(iter/s)": 0.672301 }, { "epoch": 0.20114819416477442, "grad_norm": 3.0655648708343506, "learning_rate": 9.960170999564935e-05, "loss": 2.4906326293945313, "memory(GiB)": 67.19, "step": 4695, "token_acc": 0.4866666666666667, "train_speed(iter/s)": 0.671977 }, { "epoch": 0.2013624094940234, "grad_norm": 2.8287124633789062, "learning_rate": 9.960086180472242e-05, "loss": 2.581980895996094, "memory(GiB)": 67.19, "step": 4700, "token_acc": 0.5126353790613718, "train_speed(iter/s)": 0.671891 }, { "epoch": 0.20157662482327235, "grad_norm": 3.240068197250366, "learning_rate": 9.960001271522932e-05, "loss": 2.550166130065918, "memory(GiB)": 67.19, "step": 4705, "token_acc": 0.4853801169590643, "train_speed(iter/s)": 0.671876 }, { "epoch": 0.20179084015252133, "grad_norm": 5.442161560058594, "learning_rate": 9.959916272718549e-05, "loss": 2.1744117736816406, "memory(GiB)": 67.19, "step": 4710, "token_acc": 0.5378151260504201, "train_speed(iter/s)": 0.671881 }, { "epoch": 0.20200505548177028, "grad_norm": 2.5032851696014404, "learning_rate": 9.959831184060628e-05, "loss": 2.220820999145508, "memory(GiB)": 67.19, "step": 4715, "token_acc": 0.5177865612648221, "train_speed(iter/s)": 0.671898 }, { "epoch": 0.20221927081101923, "grad_norm": 3.099259614944458, "learning_rate": 9.959746005550715e-05, "loss": 2.14715576171875, "memory(GiB)": 67.19, "step": 4720, "token_acc": 0.49828178694158076, "train_speed(iter/s)": 0.671901 }, { "epoch": 0.2024334861402682, "grad_norm": 3.2869009971618652, "learning_rate": 9.959660737190348e-05, "loss": 2.4206979751586912, "memory(GiB)": 67.19, "step": 4725, "token_acc": 0.46619217081850534, "train_speed(iter/s)": 0.672014 }, { "epoch": 0.20264770146951716, "grad_norm": 3.4130806922912598, "learning_rate": 9.959575378981075e-05, "loss": 2.1810192108154296, "memory(GiB)": 67.19, "step": 4730, "token_acc": 0.49458483754512633, "train_speed(iter/s)": 0.672144 }, { "epoch": 0.2028619167987661, "grad_norm": 3.5726165771484375, "learning_rate": 9.959489930924442e-05, "loss": 2.369894790649414, "memory(GiB)": 67.19, "step": 4735, "token_acc": 0.49491525423728816, "train_speed(iter/s)": 0.672146 }, { "epoch": 0.2030761321280151, "grad_norm": 3.044041633605957, "learning_rate": 9.959404393021996e-05, "loss": 2.3076412200927736, "memory(GiB)": 67.19, "step": 4740, "token_acc": 0.5525291828793775, "train_speed(iter/s)": 0.672219 }, { "epoch": 0.20329034745726404, "grad_norm": 2.766611099243164, "learning_rate": 9.959318765275287e-05, "loss": 2.4682071685791014, "memory(GiB)": 67.19, "step": 4745, "token_acc": 0.4828897338403042, "train_speed(iter/s)": 0.672285 }, { "epoch": 0.203504562786513, "grad_norm": 2.493941307067871, "learning_rate": 9.959233047685865e-05, "loss": 2.4926990509033202, "memory(GiB)": 67.19, "step": 4750, "token_acc": 0.5, "train_speed(iter/s)": 0.672271 }, { "epoch": 0.20371877811576197, "grad_norm": 3.2087442874908447, "learning_rate": 9.959147240255287e-05, "loss": 2.5178003311157227, "memory(GiB)": 67.19, "step": 4755, "token_acc": 0.46366782006920415, "train_speed(iter/s)": 0.67226 }, { "epoch": 0.20393299344501092, "grad_norm": 3.189117431640625, "learning_rate": 9.959061342985104e-05, "loss": 2.2299591064453126, "memory(GiB)": 67.19, "step": 4760, "token_acc": 0.5113636363636364, "train_speed(iter/s)": 0.672301 }, { "epoch": 0.20414720877425988, "grad_norm": 4.084780216217041, "learning_rate": 9.958975355876871e-05, "loss": 2.626809310913086, "memory(GiB)": 67.19, "step": 4765, "token_acc": 0.4724137931034483, "train_speed(iter/s)": 0.672456 }, { "epoch": 0.20436142410350885, "grad_norm": 2.9554357528686523, "learning_rate": 9.958889278932148e-05, "loss": 2.2537815093994142, "memory(GiB)": 67.19, "step": 4770, "token_acc": 0.512280701754386, "train_speed(iter/s)": 0.672507 }, { "epoch": 0.2045756394327578, "grad_norm": 3.260382652282715, "learning_rate": 9.958803112152495e-05, "loss": 2.2856861114501954, "memory(GiB)": 67.19, "step": 4775, "token_acc": 0.5108695652173914, "train_speed(iter/s)": 0.672538 }, { "epoch": 0.20478985476200676, "grad_norm": 2.7335333824157715, "learning_rate": 9.958716855539472e-05, "loss": 2.6145076751708984, "memory(GiB)": 67.19, "step": 4780, "token_acc": 0.45426829268292684, "train_speed(iter/s)": 0.672488 }, { "epoch": 0.20500407009125574, "grad_norm": 3.536878824234009, "learning_rate": 9.958630509094641e-05, "loss": 2.71905517578125, "memory(GiB)": 67.19, "step": 4785, "token_acc": 0.45936395759717313, "train_speed(iter/s)": 0.67262 }, { "epoch": 0.2052182854205047, "grad_norm": 3.262451171875, "learning_rate": 9.958544072819567e-05, "loss": 2.254521942138672, "memory(GiB)": 67.19, "step": 4790, "token_acc": 0.5120274914089347, "train_speed(iter/s)": 0.672867 }, { "epoch": 0.20543250074975367, "grad_norm": 3.3073525428771973, "learning_rate": 9.958457546715815e-05, "loss": 2.5042890548706054, "memory(GiB)": 67.19, "step": 4795, "token_acc": 0.46394984326018807, "train_speed(iter/s)": 0.672703 }, { "epoch": 0.20564671607900262, "grad_norm": 2.742593288421631, "learning_rate": 9.958370930784954e-05, "loss": 2.632832908630371, "memory(GiB)": 67.19, "step": 4800, "token_acc": 0.4948453608247423, "train_speed(iter/s)": 0.672619 }, { "epoch": 0.20586093140825157, "grad_norm": 2.607511281967163, "learning_rate": 9.958284225028552e-05, "loss": 2.2217290878295897, "memory(GiB)": 67.19, "step": 4805, "token_acc": 0.5338983050847458, "train_speed(iter/s)": 0.672637 }, { "epoch": 0.20607514673750055, "grad_norm": 2.654388666152954, "learning_rate": 9.95819742944818e-05, "loss": 2.5491193771362304, "memory(GiB)": 67.19, "step": 4810, "token_acc": 0.4603174603174603, "train_speed(iter/s)": 0.672678 }, { "epoch": 0.2062893620667495, "grad_norm": 2.9136593341827393, "learning_rate": 9.958110544045409e-05, "loss": 2.5055973052978517, "memory(GiB)": 67.19, "step": 4815, "token_acc": 0.4713804713804714, "train_speed(iter/s)": 0.672836 }, { "epoch": 0.20650357739599845, "grad_norm": 3.5905168056488037, "learning_rate": 9.958023568821816e-05, "loss": 2.5747825622558596, "memory(GiB)": 67.19, "step": 4820, "token_acc": 0.47368421052631576, "train_speed(iter/s)": 0.672863 }, { "epoch": 0.20671779272524743, "grad_norm": 2.6100876331329346, "learning_rate": 9.957936503778975e-05, "loss": 2.718444061279297, "memory(GiB)": 67.19, "step": 4825, "token_acc": 0.43884892086330934, "train_speed(iter/s)": 0.672936 }, { "epoch": 0.20693200805449638, "grad_norm": 3.0207958221435547, "learning_rate": 9.957849348918462e-05, "loss": 2.1165285110473633, "memory(GiB)": 67.19, "step": 4830, "token_acc": 0.5284552845528455, "train_speed(iter/s)": 0.67303 }, { "epoch": 0.20714622338374533, "grad_norm": 5.058740615844727, "learning_rate": 9.95776210424186e-05, "loss": 2.3623401641845705, "memory(GiB)": 67.19, "step": 4835, "token_acc": 0.4923547400611621, "train_speed(iter/s)": 0.6729 }, { "epoch": 0.2073604387129943, "grad_norm": 2.741086721420288, "learning_rate": 9.957674769750745e-05, "loss": 2.402373123168945, "memory(GiB)": 67.19, "step": 4840, "token_acc": 0.4690909090909091, "train_speed(iter/s)": 0.672747 }, { "epoch": 0.20757465404224326, "grad_norm": 2.4803571701049805, "learning_rate": 9.957587345446699e-05, "loss": 2.564596939086914, "memory(GiB)": 67.19, "step": 4845, "token_acc": 0.4854368932038835, "train_speed(iter/s)": 0.672807 }, { "epoch": 0.2077888693714922, "grad_norm": 2.7365150451660156, "learning_rate": 9.95749983133131e-05, "loss": 2.5362539291381836, "memory(GiB)": 67.19, "step": 4850, "token_acc": 0.5259259259259259, "train_speed(iter/s)": 0.672923 }, { "epoch": 0.2080030847007412, "grad_norm": 2.720745801925659, "learning_rate": 9.95741222740616e-05, "loss": 2.437957763671875, "memory(GiB)": 67.19, "step": 4855, "token_acc": 0.4627831715210356, "train_speed(iter/s)": 0.6729 }, { "epoch": 0.20821730002999014, "grad_norm": 3.338287115097046, "learning_rate": 9.957324533672838e-05, "loss": 2.476558876037598, "memory(GiB)": 67.19, "step": 4860, "token_acc": 0.4888888888888889, "train_speed(iter/s)": 0.672919 }, { "epoch": 0.2084315153592391, "grad_norm": 2.7502269744873047, "learning_rate": 9.957236750132931e-05, "loss": 2.3024709701538084, "memory(GiB)": 67.19, "step": 4865, "token_acc": 0.5373134328358209, "train_speed(iter/s)": 0.672851 }, { "epoch": 0.20864573068848807, "grad_norm": 3.6600964069366455, "learning_rate": 9.957148876788031e-05, "loss": 2.551706886291504, "memory(GiB)": 67.19, "step": 4870, "token_acc": 0.48201438848920863, "train_speed(iter/s)": 0.672492 }, { "epoch": 0.20885994601773702, "grad_norm": 2.9787049293518066, "learning_rate": 9.957060913639728e-05, "loss": 2.777752685546875, "memory(GiB)": 67.19, "step": 4875, "token_acc": 0.4358974358974359, "train_speed(iter/s)": 0.672546 }, { "epoch": 0.209074161346986, "grad_norm": 2.492277145385742, "learning_rate": 9.956972860689617e-05, "loss": 2.463638496398926, "memory(GiB)": 67.19, "step": 4880, "token_acc": 0.4688427299703264, "train_speed(iter/s)": 0.672449 }, { "epoch": 0.20928837667623496, "grad_norm": 2.5369248390197754, "learning_rate": 9.956884717939291e-05, "loss": 2.363514518737793, "memory(GiB)": 67.19, "step": 4885, "token_acc": 0.49691358024691357, "train_speed(iter/s)": 0.672417 }, { "epoch": 0.2095025920054839, "grad_norm": 3.5330264568328857, "learning_rate": 9.956796485390351e-05, "loss": 2.521170425415039, "memory(GiB)": 67.19, "step": 4890, "token_acc": 0.46120689655172414, "train_speed(iter/s)": 0.672255 }, { "epoch": 0.20971680733473289, "grad_norm": 2.957160472869873, "learning_rate": 9.95670816304439e-05, "loss": 2.4532941818237304, "memory(GiB)": 67.19, "step": 4895, "token_acc": 0.5309446254071661, "train_speed(iter/s)": 0.672406 }, { "epoch": 0.20993102266398184, "grad_norm": 3.0819239616394043, "learning_rate": 9.956619750903013e-05, "loss": 2.7181575775146483, "memory(GiB)": 67.19, "step": 4900, "token_acc": 0.4259818731117825, "train_speed(iter/s)": 0.672379 }, { "epoch": 0.2101452379932308, "grad_norm": 3.2676475048065186, "learning_rate": 9.95653124896782e-05, "loss": 2.6237161636352537, "memory(GiB)": 67.19, "step": 4905, "token_acc": 0.5017064846416383, "train_speed(iter/s)": 0.672374 }, { "epoch": 0.21035945332247977, "grad_norm": 2.7783710956573486, "learning_rate": 9.956442657240412e-05, "loss": 2.3715396881103517, "memory(GiB)": 67.19, "step": 4910, "token_acc": 0.48188405797101447, "train_speed(iter/s)": 0.672467 }, { "epoch": 0.21057366865172872, "grad_norm": 2.946526288986206, "learning_rate": 9.956353975722396e-05, "loss": 2.61602897644043, "memory(GiB)": 67.19, "step": 4915, "token_acc": 0.43558282208588955, "train_speed(iter/s)": 0.672336 }, { "epoch": 0.21078788398097767, "grad_norm": 2.831653118133545, "learning_rate": 9.956265204415378e-05, "loss": 2.3882850646972655, "memory(GiB)": 67.19, "step": 4920, "token_acc": 0.4755700325732899, "train_speed(iter/s)": 0.672309 }, { "epoch": 0.21100209931022665, "grad_norm": 3.583360433578491, "learning_rate": 9.95617634332097e-05, "loss": 2.46600399017334, "memory(GiB)": 67.19, "step": 4925, "token_acc": 0.4693140794223827, "train_speed(iter/s)": 0.672333 }, { "epoch": 0.2112163146394756, "grad_norm": 2.5871193408966064, "learning_rate": 9.956087392440774e-05, "loss": 2.560070037841797, "memory(GiB)": 67.19, "step": 4930, "token_acc": 0.42990654205607476, "train_speed(iter/s)": 0.67226 }, { "epoch": 0.21143052996872455, "grad_norm": 2.999296188354492, "learning_rate": 9.955998351776407e-05, "loss": 2.372940254211426, "memory(GiB)": 67.19, "step": 4935, "token_acc": 0.49458483754512633, "train_speed(iter/s)": 0.672233 }, { "epoch": 0.21164474529797353, "grad_norm": 2.7741715908050537, "learning_rate": 9.955909221329481e-05, "loss": 2.249554443359375, "memory(GiB)": 67.19, "step": 4940, "token_acc": 0.48188405797101447, "train_speed(iter/s)": 0.672179 }, { "epoch": 0.21185896062722248, "grad_norm": 5.999745845794678, "learning_rate": 9.95582000110161e-05, "loss": 2.227683258056641, "memory(GiB)": 67.19, "step": 4945, "token_acc": 0.5058823529411764, "train_speed(iter/s)": 0.672236 }, { "epoch": 0.21207317595647143, "grad_norm": 3.3391711711883545, "learning_rate": 9.955730691094411e-05, "loss": 2.7980560302734374, "memory(GiB)": 67.19, "step": 4950, "token_acc": 0.39862542955326463, "train_speed(iter/s)": 0.672144 }, { "epoch": 0.2122873912857204, "grad_norm": 2.208739757537842, "learning_rate": 9.955641291309501e-05, "loss": 2.2307716369628907, "memory(GiB)": 67.19, "step": 4955, "token_acc": 0.5318559556786704, "train_speed(iter/s)": 0.672168 }, { "epoch": 0.21250160661496936, "grad_norm": 3.6390066146850586, "learning_rate": 9.955551801748502e-05, "loss": 2.4982677459716798, "memory(GiB)": 67.19, "step": 4960, "token_acc": 0.5365853658536586, "train_speed(iter/s)": 0.672212 }, { "epoch": 0.21271582194421834, "grad_norm": 2.812426805496216, "learning_rate": 9.955462222413032e-05, "loss": 2.4700206756591796, "memory(GiB)": 67.19, "step": 4965, "token_acc": 0.45666666666666667, "train_speed(iter/s)": 0.672308 }, { "epoch": 0.2129300372734673, "grad_norm": 3.8603172302246094, "learning_rate": 9.955372553304716e-05, "loss": 2.3830652236938477, "memory(GiB)": 67.19, "step": 4970, "token_acc": 0.5043988269794721, "train_speed(iter/s)": 0.672203 }, { "epoch": 0.21314425260271624, "grad_norm": 3.1509180068969727, "learning_rate": 9.955282794425178e-05, "loss": 2.402687644958496, "memory(GiB)": 67.19, "step": 4975, "token_acc": 0.5095057034220533, "train_speed(iter/s)": 0.672214 }, { "epoch": 0.21335846793196522, "grad_norm": 3.8266818523406982, "learning_rate": 9.955192945776041e-05, "loss": 2.4434600830078126, "memory(GiB)": 67.19, "step": 4980, "token_acc": 0.492, "train_speed(iter/s)": 0.672247 }, { "epoch": 0.21357268326121417, "grad_norm": 3.5446314811706543, "learning_rate": 9.95510300735894e-05, "loss": 2.630116271972656, "memory(GiB)": 67.19, "step": 4985, "token_acc": 0.45075757575757575, "train_speed(iter/s)": 0.67235 }, { "epoch": 0.21378689859046313, "grad_norm": 3.3105409145355225, "learning_rate": 9.955012979175496e-05, "loss": 2.3800146102905275, "memory(GiB)": 67.19, "step": 4990, "token_acc": 0.4857142857142857, "train_speed(iter/s)": 0.672307 }, { "epoch": 0.2140011139197121, "grad_norm": 3.9127554893493652, "learning_rate": 9.954922861227344e-05, "loss": 2.662403678894043, "memory(GiB)": 67.19, "step": 4995, "token_acc": 0.45478723404255317, "train_speed(iter/s)": 0.672421 }, { "epoch": 0.21421532924896106, "grad_norm": 3.430070161819458, "learning_rate": 9.954832653516119e-05, "loss": 2.1952314376831055, "memory(GiB)": 67.19, "step": 5000, "token_acc": 0.5, "train_speed(iter/s)": 0.672256 }, { "epoch": 0.21421532924896106, "eval_loss": 2.1047840118408203, "eval_runtime": 16.6213, "eval_samples_per_second": 6.016, "eval_steps_per_second": 6.016, "eval_token_acc": 0.5050215208034433, "step": 5000 }, { "epoch": 0.21442954457821, "grad_norm": 2.762073040008545, "learning_rate": 9.95474235604345e-05, "loss": 2.761393737792969, "memory(GiB)": 67.19, "step": 5005, "token_acc": 0.48895582329317266, "train_speed(iter/s)": 0.67054 }, { "epoch": 0.214643759907459, "grad_norm": 2.688809394836426, "learning_rate": 9.954651968810976e-05, "loss": 2.332743453979492, "memory(GiB)": 67.19, "step": 5010, "token_acc": 0.46757679180887374, "train_speed(iter/s)": 0.670413 }, { "epoch": 0.21485797523670794, "grad_norm": 3.4272477626800537, "learning_rate": 9.954561491820333e-05, "loss": 2.350129318237305, "memory(GiB)": 67.19, "step": 5015, "token_acc": 0.4879032258064516, "train_speed(iter/s)": 0.670311 }, { "epoch": 0.2150721905659569, "grad_norm": 3.9761886596679688, "learning_rate": 9.954470925073163e-05, "loss": 2.699973297119141, "memory(GiB)": 67.19, "step": 5020, "token_acc": 0.4732824427480916, "train_speed(iter/s)": 0.670112 }, { "epoch": 0.21528640589520587, "grad_norm": 3.2756330966949463, "learning_rate": 9.954380268571104e-05, "loss": 2.40860595703125, "memory(GiB)": 67.19, "step": 5025, "token_acc": 0.4825174825174825, "train_speed(iter/s)": 0.670034 }, { "epoch": 0.21550062122445482, "grad_norm": 3.044018268585205, "learning_rate": 9.954289522315796e-05, "loss": 2.498831367492676, "memory(GiB)": 67.19, "step": 5030, "token_acc": 0.4865771812080537, "train_speed(iter/s)": 0.670032 }, { "epoch": 0.21571483655370377, "grad_norm": 2.7460691928863525, "learning_rate": 9.954198686308889e-05, "loss": 2.5906227111816404, "memory(GiB)": 67.19, "step": 5035, "token_acc": 0.4537313432835821, "train_speed(iter/s)": 0.669936 }, { "epoch": 0.21592905188295275, "grad_norm": 3.3187577724456787, "learning_rate": 9.954107760552024e-05, "loss": 2.6672618865966795, "memory(GiB)": 67.19, "step": 5040, "token_acc": 0.48299319727891155, "train_speed(iter/s)": 0.67 }, { "epoch": 0.2161432672122017, "grad_norm": 2.6315128803253174, "learning_rate": 9.95401674504685e-05, "loss": 2.73840217590332, "memory(GiB)": 67.19, "step": 5045, "token_acc": 0.43768115942028984, "train_speed(iter/s)": 0.669933 }, { "epoch": 0.21635748254145068, "grad_norm": 3.817004680633545, "learning_rate": 9.953925639795014e-05, "loss": 2.355721092224121, "memory(GiB)": 67.19, "step": 5050, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.669919 }, { "epoch": 0.21657169787069963, "grad_norm": 2.452704429626465, "learning_rate": 9.95383444479817e-05, "loss": 2.40594367980957, "memory(GiB)": 67.19, "step": 5055, "token_acc": 0.4734982332155477, "train_speed(iter/s)": 0.669779 }, { "epoch": 0.21678591319994858, "grad_norm": 3.0223190784454346, "learning_rate": 9.953743160057966e-05, "loss": 2.4220767974853517, "memory(GiB)": 67.19, "step": 5060, "token_acc": 0.5091575091575091, "train_speed(iter/s)": 0.669894 }, { "epoch": 0.21700012852919756, "grad_norm": 2.8007099628448486, "learning_rate": 9.95365178557606e-05, "loss": 2.3771030426025392, "memory(GiB)": 67.19, "step": 5065, "token_acc": 0.4923547400611621, "train_speed(iter/s)": 0.670024 }, { "epoch": 0.2172143438584465, "grad_norm": 2.5367653369903564, "learning_rate": 9.953560321354102e-05, "loss": 2.012766456604004, "memory(GiB)": 67.19, "step": 5070, "token_acc": 0.5319148936170213, "train_speed(iter/s)": 0.669951 }, { "epoch": 0.21742855918769546, "grad_norm": 2.878032684326172, "learning_rate": 9.953468767393754e-05, "loss": 2.0757747650146485, "memory(GiB)": 67.19, "step": 5075, "token_acc": 0.523121387283237, "train_speed(iter/s)": 0.669767 }, { "epoch": 0.21764277451694444, "grad_norm": 6.163705825805664, "learning_rate": 9.953377123696673e-05, "loss": 2.495429039001465, "memory(GiB)": 67.19, "step": 5080, "token_acc": 0.45555555555555555, "train_speed(iter/s)": 0.669676 }, { "epoch": 0.2178569898461934, "grad_norm": 3.3716659545898438, "learning_rate": 9.953285390264519e-05, "loss": 2.3220081329345703, "memory(GiB)": 67.19, "step": 5085, "token_acc": 0.5146443514644351, "train_speed(iter/s)": 0.669629 }, { "epoch": 0.21807120517544235, "grad_norm": 5.297702789306641, "learning_rate": 9.953193567098952e-05, "loss": 2.5566858291625976, "memory(GiB)": 67.19, "step": 5090, "token_acc": 0.4563758389261745, "train_speed(iter/s)": 0.669734 }, { "epoch": 0.21828542050469132, "grad_norm": 3.6507503986358643, "learning_rate": 9.953101654201638e-05, "loss": 2.7595706939697267, "memory(GiB)": 67.19, "step": 5095, "token_acc": 0.41638225255972694, "train_speed(iter/s)": 0.669695 }, { "epoch": 0.21849963583394028, "grad_norm": 2.653899669647217, "learning_rate": 9.953009651574241e-05, "loss": 2.5358442306518554, "memory(GiB)": 67.19, "step": 5100, "token_acc": 0.4707379134860051, "train_speed(iter/s)": 0.669756 }, { "epoch": 0.21871385116318923, "grad_norm": 2.743530511856079, "learning_rate": 9.952917559218427e-05, "loss": 2.49549560546875, "memory(GiB)": 67.19, "step": 5105, "token_acc": 0.4723127035830619, "train_speed(iter/s)": 0.669794 }, { "epoch": 0.2189280664924382, "grad_norm": 4.448938846588135, "learning_rate": 9.952825377135866e-05, "loss": 2.3936946868896483, "memory(GiB)": 67.19, "step": 5110, "token_acc": 0.47653429602888087, "train_speed(iter/s)": 0.669834 }, { "epoch": 0.21914228182168716, "grad_norm": 3.522733449935913, "learning_rate": 9.952733105328227e-05, "loss": 2.526842498779297, "memory(GiB)": 67.19, "step": 5115, "token_acc": 0.44376899696048633, "train_speed(iter/s)": 0.669829 }, { "epoch": 0.2193564971509361, "grad_norm": 4.173617362976074, "learning_rate": 9.952640743797181e-05, "loss": 2.4389888763427736, "memory(GiB)": 67.19, "step": 5120, "token_acc": 0.4727272727272727, "train_speed(iter/s)": 0.669809 }, { "epoch": 0.2195707124801851, "grad_norm": 2.9809443950653076, "learning_rate": 9.952548292544404e-05, "loss": 2.4550390243530273, "memory(GiB)": 67.19, "step": 5125, "token_acc": 0.4670846394984326, "train_speed(iter/s)": 0.669712 }, { "epoch": 0.21978492780943404, "grad_norm": 2.752281427383423, "learning_rate": 9.952455751571567e-05, "loss": 2.2868846893310546, "memory(GiB)": 67.19, "step": 5130, "token_acc": 0.5418502202643172, "train_speed(iter/s)": 0.669745 }, { "epoch": 0.21999914313868302, "grad_norm": 2.514512300491333, "learning_rate": 9.952363120880348e-05, "loss": 2.5827484130859375, "memory(GiB)": 67.19, "step": 5135, "token_acc": 0.46283783783783783, "train_speed(iter/s)": 0.669688 }, { "epoch": 0.22021335846793197, "grad_norm": 3.1921868324279785, "learning_rate": 9.952270400472428e-05, "loss": 2.631655693054199, "memory(GiB)": 67.19, "step": 5140, "token_acc": 0.46332046332046334, "train_speed(iter/s)": 0.66976 }, { "epoch": 0.22042757379718092, "grad_norm": 2.6012723445892334, "learning_rate": 9.952177590349481e-05, "loss": 2.295444297790527, "memory(GiB)": 67.19, "step": 5145, "token_acc": 0.5251798561151079, "train_speed(iter/s)": 0.669891 }, { "epoch": 0.2206417891264299, "grad_norm": 2.702033519744873, "learning_rate": 9.952084690513193e-05, "loss": 2.2083484649658205, "memory(GiB)": 67.19, "step": 5150, "token_acc": 0.494949494949495, "train_speed(iter/s)": 0.670054 }, { "epoch": 0.22085600445567885, "grad_norm": 3.0651586055755615, "learning_rate": 9.951991700965245e-05, "loss": 2.669190788269043, "memory(GiB)": 67.19, "step": 5155, "token_acc": 0.47474747474747475, "train_speed(iter/s)": 0.670203 }, { "epoch": 0.2210702197849278, "grad_norm": 3.696831464767456, "learning_rate": 9.951898621707324e-05, "loss": 2.7062063217163086, "memory(GiB)": 67.19, "step": 5160, "token_acc": 0.49328859060402686, "train_speed(iter/s)": 0.67011 }, { "epoch": 0.22128443511417678, "grad_norm": 2.649965763092041, "learning_rate": 9.95180545274111e-05, "loss": 2.204037666320801, "memory(GiB)": 67.19, "step": 5165, "token_acc": 0.5, "train_speed(iter/s)": 0.669948 }, { "epoch": 0.22149865044342573, "grad_norm": 3.721789598464966, "learning_rate": 9.951712194068298e-05, "loss": 2.381745147705078, "memory(GiB)": 67.19, "step": 5170, "token_acc": 0.4876325088339223, "train_speed(iter/s)": 0.66978 }, { "epoch": 0.22171286577267468, "grad_norm": 3.197411298751831, "learning_rate": 9.951618845690573e-05, "loss": 2.7393783569335937, "memory(GiB)": 67.19, "step": 5175, "token_acc": 0.4583333333333333, "train_speed(iter/s)": 0.669887 }, { "epoch": 0.22192708110192366, "grad_norm": 3.0175955295562744, "learning_rate": 9.95152540760963e-05, "loss": 2.0102302551269533, "memory(GiB)": 67.19, "step": 5180, "token_acc": 0.5375, "train_speed(iter/s)": 0.669889 }, { "epoch": 0.2221412964311726, "grad_norm": 2.4198293685913086, "learning_rate": 9.951431879827158e-05, "loss": 2.6120182037353517, "memory(GiB)": 67.19, "step": 5185, "token_acc": 0.496551724137931, "train_speed(iter/s)": 0.669976 }, { "epoch": 0.22235551176042156, "grad_norm": 3.086341381072998, "learning_rate": 9.951338262344852e-05, "loss": 2.6753076553344726, "memory(GiB)": 67.19, "step": 5190, "token_acc": 0.45374449339207046, "train_speed(iter/s)": 0.670168 }, { "epoch": 0.22256972708967054, "grad_norm": 3.259769916534424, "learning_rate": 9.95124455516441e-05, "loss": 2.6303930282592773, "memory(GiB)": 67.19, "step": 5195, "token_acc": 0.432258064516129, "train_speed(iter/s)": 0.670177 }, { "epoch": 0.2227839424189195, "grad_norm": 3.817173719406128, "learning_rate": 9.951150758287526e-05, "loss": 2.3444581985473634, "memory(GiB)": 67.19, "step": 5200, "token_acc": 0.45614035087719296, "train_speed(iter/s)": 0.670245 }, { "epoch": 0.22299815774816845, "grad_norm": 3.764359951019287, "learning_rate": 9.951056871715902e-05, "loss": 2.337006950378418, "memory(GiB)": 67.19, "step": 5205, "token_acc": 0.5018450184501845, "train_speed(iter/s)": 0.670267 }, { "epoch": 0.22321237307741743, "grad_norm": 3.176863193511963, "learning_rate": 9.950962895451241e-05, "loss": 2.5429636001586915, "memory(GiB)": 67.19, "step": 5210, "token_acc": 0.52, "train_speed(iter/s)": 0.670179 }, { "epoch": 0.22342658840666638, "grad_norm": 3.1224536895751953, "learning_rate": 9.950868829495242e-05, "loss": 2.3335533142089844, "memory(GiB)": 67.19, "step": 5215, "token_acc": 0.498220640569395, "train_speed(iter/s)": 0.670212 }, { "epoch": 0.22364080373591536, "grad_norm": 3.739410877227783, "learning_rate": 9.950774673849608e-05, "loss": 2.7070388793945312, "memory(GiB)": 67.19, "step": 5220, "token_acc": 0.4166666666666667, "train_speed(iter/s)": 0.670341 }, { "epoch": 0.2238550190651643, "grad_norm": 2.6491880416870117, "learning_rate": 9.950680428516046e-05, "loss": 2.5278579711914064, "memory(GiB)": 67.19, "step": 5225, "token_acc": 0.45454545454545453, "train_speed(iter/s)": 0.67035 }, { "epoch": 0.22406923439441326, "grad_norm": 2.7800819873809814, "learning_rate": 9.950586093496266e-05, "loss": 2.6464176177978516, "memory(GiB)": 67.19, "step": 5230, "token_acc": 0.43214285714285716, "train_speed(iter/s)": 0.670309 }, { "epoch": 0.22428344972366224, "grad_norm": 2.693230390548706, "learning_rate": 9.950491668791975e-05, "loss": 2.4318777084350587, "memory(GiB)": 67.19, "step": 5235, "token_acc": 0.4647058823529412, "train_speed(iter/s)": 0.670245 }, { "epoch": 0.2244976650529112, "grad_norm": 2.9078562259674072, "learning_rate": 9.950397154404882e-05, "loss": 2.4723291397094727, "memory(GiB)": 67.19, "step": 5240, "token_acc": 0.44366197183098594, "train_speed(iter/s)": 0.670253 }, { "epoch": 0.22471188038216014, "grad_norm": 2.538776159286499, "learning_rate": 9.950302550336701e-05, "loss": 2.3289207458496093, "memory(GiB)": 67.19, "step": 5245, "token_acc": 0.44966442953020136, "train_speed(iter/s)": 0.670194 }, { "epoch": 0.22492609571140912, "grad_norm": 2.6165571212768555, "learning_rate": 9.950207856589146e-05, "loss": 2.5356719970703123, "memory(GiB)": 67.19, "step": 5250, "token_acc": 0.4704225352112676, "train_speed(iter/s)": 0.670177 }, { "epoch": 0.22514031104065807, "grad_norm": 3.8649091720581055, "learning_rate": 9.950113073163932e-05, "loss": 2.530033302307129, "memory(GiB)": 67.19, "step": 5255, "token_acc": 0.4811320754716981, "train_speed(iter/s)": 0.670287 }, { "epoch": 0.22535452636990702, "grad_norm": 2.686535120010376, "learning_rate": 9.950018200062776e-05, "loss": 2.446687126159668, "memory(GiB)": 67.19, "step": 5260, "token_acc": 0.45794392523364486, "train_speed(iter/s)": 0.670361 }, { "epoch": 0.225568741699156, "grad_norm": 6.003696918487549, "learning_rate": 9.949923237287397e-05, "loss": 2.811530113220215, "memory(GiB)": 67.19, "step": 5265, "token_acc": 0.41896024464831805, "train_speed(iter/s)": 0.670441 }, { "epoch": 0.22578295702840495, "grad_norm": 3.4984548091888428, "learning_rate": 9.949828184839516e-05, "loss": 2.7172412872314453, "memory(GiB)": 67.19, "step": 5270, "token_acc": 0.4426751592356688, "train_speed(iter/s)": 0.670439 }, { "epoch": 0.2259971723576539, "grad_norm": 2.441460132598877, "learning_rate": 9.949733042720853e-05, "loss": 2.728480911254883, "memory(GiB)": 67.19, "step": 5275, "token_acc": 0.4673202614379085, "train_speed(iter/s)": 0.670403 }, { "epoch": 0.22621138768690288, "grad_norm": 3.646390199661255, "learning_rate": 9.949637810933132e-05, "loss": 2.7090682983398438, "memory(GiB)": 67.19, "step": 5280, "token_acc": 0.4883720930232558, "train_speed(iter/s)": 0.670491 }, { "epoch": 0.22642560301615183, "grad_norm": 2.682464599609375, "learning_rate": 9.94954248947808e-05, "loss": 2.5448713302612305, "memory(GiB)": 67.19, "step": 5285, "token_acc": 0.4744744744744745, "train_speed(iter/s)": 0.67059 }, { "epoch": 0.22663981834540078, "grad_norm": 3.1652143001556396, "learning_rate": 9.949447078357423e-05, "loss": 2.1262636184692383, "memory(GiB)": 67.19, "step": 5290, "token_acc": 0.548951048951049, "train_speed(iter/s)": 0.670491 }, { "epoch": 0.22685403367464976, "grad_norm": 3.484757661819458, "learning_rate": 9.949351577572888e-05, "loss": 2.362611198425293, "memory(GiB)": 67.19, "step": 5295, "token_acc": 0.5056179775280899, "train_speed(iter/s)": 0.670592 }, { "epoch": 0.22706824900389871, "grad_norm": 2.7999987602233887, "learning_rate": 9.949255987126207e-05, "loss": 2.4324228286743166, "memory(GiB)": 67.19, "step": 5300, "token_acc": 0.43859649122807015, "train_speed(iter/s)": 0.670701 }, { "epoch": 0.2272824643331477, "grad_norm": 3.3186962604522705, "learning_rate": 9.949160307019112e-05, "loss": 2.559764862060547, "memory(GiB)": 67.19, "step": 5305, "token_acc": 0.4539249146757679, "train_speed(iter/s)": 0.670624 }, { "epoch": 0.22749667966239664, "grad_norm": 3.3010764122009277, "learning_rate": 9.949064537253334e-05, "loss": 2.405417251586914, "memory(GiB)": 67.19, "step": 5310, "token_acc": 0.49612403100775193, "train_speed(iter/s)": 0.670753 }, { "epoch": 0.2277108949916456, "grad_norm": 4.7398762702941895, "learning_rate": 9.94896867783061e-05, "loss": 2.771778869628906, "memory(GiB)": 67.19, "step": 5315, "token_acc": 0.4644808743169399, "train_speed(iter/s)": 0.67059 }, { "epoch": 0.22792511032089458, "grad_norm": 5.012249946594238, "learning_rate": 9.948872728752675e-05, "loss": 2.308175468444824, "memory(GiB)": 67.19, "step": 5320, "token_acc": 0.5135135135135135, "train_speed(iter/s)": 0.670659 }, { "epoch": 0.22813932565014353, "grad_norm": 2.888132095336914, "learning_rate": 9.948776690021269e-05, "loss": 2.4445032119750976, "memory(GiB)": 67.19, "step": 5325, "token_acc": 0.47101449275362317, "train_speed(iter/s)": 0.670809 }, { "epoch": 0.22835354097939248, "grad_norm": 3.3942320346832275, "learning_rate": 9.94868056163813e-05, "loss": 2.696196746826172, "memory(GiB)": 67.19, "step": 5330, "token_acc": 0.46613545816733065, "train_speed(iter/s)": 0.670836 }, { "epoch": 0.22856775630864146, "grad_norm": 3.487609624862671, "learning_rate": 9.948584343605e-05, "loss": 2.3359947204589844, "memory(GiB)": 67.19, "step": 5335, "token_acc": 0.4983388704318937, "train_speed(iter/s)": 0.67094 }, { "epoch": 0.2287819716378904, "grad_norm": 3.065173864364624, "learning_rate": 9.948488035923625e-05, "loss": 2.412432861328125, "memory(GiB)": 67.19, "step": 5340, "token_acc": 0.5, "train_speed(iter/s)": 0.671082 }, { "epoch": 0.22899618696713936, "grad_norm": 3.01356840133667, "learning_rate": 9.948391638595746e-05, "loss": 2.471114921569824, "memory(GiB)": 67.19, "step": 5345, "token_acc": 0.4758842443729904, "train_speed(iter/s)": 0.671188 }, { "epoch": 0.22921040229638834, "grad_norm": 3.721825122833252, "learning_rate": 9.94829515162311e-05, "loss": 2.3906164169311523, "memory(GiB)": 67.19, "step": 5350, "token_acc": 0.4937888198757764, "train_speed(iter/s)": 0.671272 }, { "epoch": 0.2294246176256373, "grad_norm": 2.819566488265991, "learning_rate": 9.948198575007466e-05, "loss": 2.350813293457031, "memory(GiB)": 67.19, "step": 5355, "token_acc": 0.49693251533742333, "train_speed(iter/s)": 0.671354 }, { "epoch": 0.22963883295488624, "grad_norm": 4.477334499359131, "learning_rate": 9.948101908750563e-05, "loss": 2.5362844467163086, "memory(GiB)": 67.19, "step": 5360, "token_acc": 0.4542483660130719, "train_speed(iter/s)": 0.67141 }, { "epoch": 0.22985304828413522, "grad_norm": 3.0770599842071533, "learning_rate": 9.94800515285415e-05, "loss": 2.357835388183594, "memory(GiB)": 67.19, "step": 5365, "token_acc": 0.50814332247557, "train_speed(iter/s)": 0.671491 }, { "epoch": 0.23006726361338417, "grad_norm": 3.3739044666290283, "learning_rate": 9.947908307319984e-05, "loss": 2.216343879699707, "memory(GiB)": 67.19, "step": 5370, "token_acc": 0.4966442953020134, "train_speed(iter/s)": 0.671358 }, { "epoch": 0.23028147894263312, "grad_norm": 2.6843607425689697, "learning_rate": 9.947811372149817e-05, "loss": 2.6763986587524413, "memory(GiB)": 67.19, "step": 5375, "token_acc": 0.4444444444444444, "train_speed(iter/s)": 0.671468 }, { "epoch": 0.2304956942718821, "grad_norm": 2.6137588024139404, "learning_rate": 9.947714347345407e-05, "loss": 2.375712013244629, "memory(GiB)": 67.19, "step": 5380, "token_acc": 0.5019455252918288, "train_speed(iter/s)": 0.671394 }, { "epoch": 0.23070990960113105, "grad_norm": 3.0009491443634033, "learning_rate": 9.947617232908509e-05, "loss": 2.450156021118164, "memory(GiB)": 67.19, "step": 5385, "token_acc": 0.49295774647887325, "train_speed(iter/s)": 0.67146 }, { "epoch": 0.23092412493038003, "grad_norm": 2.6285736560821533, "learning_rate": 9.947520028840884e-05, "loss": 2.310775947570801, "memory(GiB)": 67.19, "step": 5390, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.671341 }, { "epoch": 0.23113834025962898, "grad_norm": 2.913930892944336, "learning_rate": 9.947422735144293e-05, "loss": 2.3016378402709963, "memory(GiB)": 67.19, "step": 5395, "token_acc": 0.4868421052631579, "train_speed(iter/s)": 0.671343 }, { "epoch": 0.23135255558887793, "grad_norm": 3.2839109897613525, "learning_rate": 9.947325351820498e-05, "loss": 2.4929677963256838, "memory(GiB)": 67.19, "step": 5400, "token_acc": 0.4594594594594595, "train_speed(iter/s)": 0.671358 }, { "epoch": 0.2315667709181269, "grad_norm": 3.484921455383301, "learning_rate": 9.947227878871261e-05, "loss": 2.4358951568603517, "memory(GiB)": 67.19, "step": 5405, "token_acc": 0.48928571428571427, "train_speed(iter/s)": 0.671242 }, { "epoch": 0.23178098624737586, "grad_norm": 2.503357410430908, "learning_rate": 9.947130316298354e-05, "loss": 2.2998445510864256, "memory(GiB)": 67.19, "step": 5410, "token_acc": 0.5358361774744027, "train_speed(iter/s)": 0.671261 }, { "epoch": 0.23199520157662482, "grad_norm": 2.9136557579040527, "learning_rate": 9.947032664103537e-05, "loss": 2.5696895599365233, "memory(GiB)": 67.19, "step": 5415, "token_acc": 0.5049833887043189, "train_speed(iter/s)": 0.671286 }, { "epoch": 0.2322094169058738, "grad_norm": 3.0783519744873047, "learning_rate": 9.946934922288584e-05, "loss": 2.3691829681396483, "memory(GiB)": 67.19, "step": 5420, "token_acc": 0.5138461538461538, "train_speed(iter/s)": 0.671379 }, { "epoch": 0.23242363223512275, "grad_norm": 3.611137866973877, "learning_rate": 9.946837090855264e-05, "loss": 2.04869384765625, "memory(GiB)": 67.19, "step": 5425, "token_acc": 0.5247148288973384, "train_speed(iter/s)": 0.671478 }, { "epoch": 0.2326378475643717, "grad_norm": 3.1266565322875977, "learning_rate": 9.94673916980535e-05, "loss": 2.7548011779785155, "memory(GiB)": 67.19, "step": 5430, "token_acc": 0.42857142857142855, "train_speed(iter/s)": 0.671503 }, { "epoch": 0.23285206289362068, "grad_norm": 2.5576226711273193, "learning_rate": 9.946641159140617e-05, "loss": 2.4732933044433594, "memory(GiB)": 67.19, "step": 5435, "token_acc": 0.48580441640378547, "train_speed(iter/s)": 0.671494 }, { "epoch": 0.23306627822286963, "grad_norm": 2.884782075881958, "learning_rate": 9.946543058862836e-05, "loss": 2.3279661178588866, "memory(GiB)": 67.19, "step": 5440, "token_acc": 0.526813880126183, "train_speed(iter/s)": 0.671614 }, { "epoch": 0.23328049355211858, "grad_norm": 3.079118490219116, "learning_rate": 9.946444868973789e-05, "loss": 2.7978992462158203, "memory(GiB)": 67.19, "step": 5445, "token_acc": 0.47509578544061304, "train_speed(iter/s)": 0.671709 }, { "epoch": 0.23349470888136756, "grad_norm": 3.043212890625, "learning_rate": 9.946346589475253e-05, "loss": 2.6162723541259765, "memory(GiB)": 67.19, "step": 5450, "token_acc": 0.4806201550387597, "train_speed(iter/s)": 0.671855 }, { "epoch": 0.2337089242106165, "grad_norm": 2.6556529998779297, "learning_rate": 9.946248220369008e-05, "loss": 2.455010414123535, "memory(GiB)": 67.19, "step": 5455, "token_acc": 0.5015673981191222, "train_speed(iter/s)": 0.671815 }, { "epoch": 0.23392313953986546, "grad_norm": 3.7936203479766846, "learning_rate": 9.946149761656837e-05, "loss": 2.450431060791016, "memory(GiB)": 67.19, "step": 5460, "token_acc": 0.4856115107913669, "train_speed(iter/s)": 0.67185 }, { "epoch": 0.23413735486911444, "grad_norm": 3.0269968509674072, "learning_rate": 9.946051213340524e-05, "loss": 2.172398567199707, "memory(GiB)": 67.19, "step": 5465, "token_acc": 0.4942528735632184, "train_speed(iter/s)": 0.67192 }, { "epoch": 0.2343515701983634, "grad_norm": 3.261636972427368, "learning_rate": 9.94595257542185e-05, "loss": 2.3572444915771484, "memory(GiB)": 67.19, "step": 5470, "token_acc": 0.4931972789115646, "train_speed(iter/s)": 0.671812 }, { "epoch": 0.23456578552761237, "grad_norm": 2.793675661087036, "learning_rate": 9.945853847902608e-05, "loss": 2.678486633300781, "memory(GiB)": 67.19, "step": 5475, "token_acc": 0.46296296296296297, "train_speed(iter/s)": 0.671865 }, { "epoch": 0.23478000085686132, "grad_norm": 3.158224582672119, "learning_rate": 9.945755030784585e-05, "loss": 2.3488317489624024, "memory(GiB)": 67.19, "step": 5480, "token_acc": 0.486646884272997, "train_speed(iter/s)": 0.671904 }, { "epoch": 0.23499421618611027, "grad_norm": 3.354581594467163, "learning_rate": 9.945656124069569e-05, "loss": 2.262125015258789, "memory(GiB)": 67.19, "step": 5485, "token_acc": 0.4929078014184397, "train_speed(iter/s)": 0.671896 }, { "epoch": 0.23520843151535925, "grad_norm": 3.5666487216949463, "learning_rate": 9.945557127759352e-05, "loss": 2.3397216796875, "memory(GiB)": 67.19, "step": 5490, "token_acc": 0.5060240963855421, "train_speed(iter/s)": 0.671919 }, { "epoch": 0.2354226468446082, "grad_norm": 3.3139426708221436, "learning_rate": 9.94545804185573e-05, "loss": 2.4512989044189455, "memory(GiB)": 67.19, "step": 5495, "token_acc": 0.48484848484848486, "train_speed(iter/s)": 0.671866 }, { "epoch": 0.23563686217385715, "grad_norm": 2.950448751449585, "learning_rate": 9.945358866360496e-05, "loss": 2.6113452911376953, "memory(GiB)": 67.19, "step": 5500, "token_acc": 0.5, "train_speed(iter/s)": 0.671666 }, { "epoch": 0.23563686217385715, "eval_loss": 1.9379009008407593, "eval_runtime": 17.7847, "eval_samples_per_second": 5.623, "eval_steps_per_second": 5.623, "eval_token_acc": 0.5244565217391305, "step": 5500 }, { "epoch": 0.23585107750310613, "grad_norm": 3.907750129699707, "learning_rate": 9.945259601275447e-05, "loss": 2.598211479187012, "memory(GiB)": 67.19, "step": 5505, "token_acc": 0.507218479307026, "train_speed(iter/s)": 0.669811 }, { "epoch": 0.23606529283235508, "grad_norm": 5.64682674407959, "learning_rate": 9.945160246602381e-05, "loss": 2.650316619873047, "memory(GiB)": 67.19, "step": 5510, "token_acc": 0.5102040816326531, "train_speed(iter/s)": 0.67001 }, { "epoch": 0.23627950816160403, "grad_norm": 3.1282474994659424, "learning_rate": 9.945060802343098e-05, "loss": 2.675784111022949, "memory(GiB)": 67.19, "step": 5515, "token_acc": 0.4578313253012048, "train_speed(iter/s)": 0.670028 }, { "epoch": 0.23649372349085301, "grad_norm": 3.131618022918701, "learning_rate": 9.9449612684994e-05, "loss": 2.632151412963867, "memory(GiB)": 67.19, "step": 5520, "token_acc": 0.45977011494252873, "train_speed(iter/s)": 0.669955 }, { "epoch": 0.23670793882010197, "grad_norm": 2.605466842651367, "learning_rate": 9.944861645073089e-05, "loss": 2.8973365783691407, "memory(GiB)": 67.19, "step": 5525, "token_acc": 0.42138364779874216, "train_speed(iter/s)": 0.66981 }, { "epoch": 0.23692215414935092, "grad_norm": 3.5129358768463135, "learning_rate": 9.944761932065971e-05, "loss": 2.2458173751831056, "memory(GiB)": 67.19, "step": 5530, "token_acc": 0.5815899581589958, "train_speed(iter/s)": 0.669883 }, { "epoch": 0.2371363694785999, "grad_norm": 2.743661403656006, "learning_rate": 9.944662129479852e-05, "loss": 2.412124252319336, "memory(GiB)": 67.19, "step": 5535, "token_acc": 0.46438746438746437, "train_speed(iter/s)": 0.669876 }, { "epoch": 0.23735058480784885, "grad_norm": 3.059570789337158, "learning_rate": 9.944562237316541e-05, "loss": 2.5504940032958983, "memory(GiB)": 67.19, "step": 5540, "token_acc": 0.4271186440677966, "train_speed(iter/s)": 0.66993 }, { "epoch": 0.2375648001370978, "grad_norm": 3.183274030685425, "learning_rate": 9.944462255577846e-05, "loss": 2.4396358489990235, "memory(GiB)": 67.19, "step": 5545, "token_acc": 0.5066225165562914, "train_speed(iter/s)": 0.669786 }, { "epoch": 0.23777901546634678, "grad_norm": 2.5840725898742676, "learning_rate": 9.944362184265578e-05, "loss": 2.5324626922607423, "memory(GiB)": 67.19, "step": 5550, "token_acc": 0.4479166666666667, "train_speed(iter/s)": 0.669725 }, { "epoch": 0.23799323079559573, "grad_norm": 3.2040252685546875, "learning_rate": 9.944262023381553e-05, "loss": 2.736400604248047, "memory(GiB)": 67.19, "step": 5555, "token_acc": 0.43322475570032576, "train_speed(iter/s)": 0.669619 }, { "epoch": 0.2382074461248447, "grad_norm": 3.0236642360687256, "learning_rate": 9.944161772927582e-05, "loss": 2.7456539154052733, "memory(GiB)": 67.19, "step": 5560, "token_acc": 0.4755700325732899, "train_speed(iter/s)": 0.669657 }, { "epoch": 0.23842166145409366, "grad_norm": 3.2785253524780273, "learning_rate": 9.944061432905483e-05, "loss": 2.4398197174072265, "memory(GiB)": 67.19, "step": 5565, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.669697 }, { "epoch": 0.2386358767833426, "grad_norm": 3.1230456829071045, "learning_rate": 9.943961003317073e-05, "loss": 2.4546770095825194, "memory(GiB)": 67.19, "step": 5570, "token_acc": 0.5106382978723404, "train_speed(iter/s)": 0.669612 }, { "epoch": 0.2388500921125916, "grad_norm": 3.1643223762512207, "learning_rate": 9.943860484164172e-05, "loss": 2.3466285705566405, "memory(GiB)": 67.19, "step": 5575, "token_acc": 0.49843260188087773, "train_speed(iter/s)": 0.669762 }, { "epoch": 0.23906430744184054, "grad_norm": 2.890522003173828, "learning_rate": 9.9437598754486e-05, "loss": 2.6326900482177735, "memory(GiB)": 67.19, "step": 5580, "token_acc": 0.4482758620689655, "train_speed(iter/s)": 0.669774 }, { "epoch": 0.2392785227710895, "grad_norm": 2.728153705596924, "learning_rate": 9.943659177172181e-05, "loss": 2.8163002014160154, "memory(GiB)": 67.19, "step": 5585, "token_acc": 0.46381578947368424, "train_speed(iter/s)": 0.669842 }, { "epoch": 0.23949273810033847, "grad_norm": 3.147416591644287, "learning_rate": 9.943558389336738e-05, "loss": 2.319766807556152, "memory(GiB)": 67.19, "step": 5590, "token_acc": 0.493368700265252, "train_speed(iter/s)": 0.669917 }, { "epoch": 0.23970695342958742, "grad_norm": 3.0032737255096436, "learning_rate": 9.943457511944096e-05, "loss": 2.7745208740234375, "memory(GiB)": 67.19, "step": 5595, "token_acc": 0.43, "train_speed(iter/s)": 0.669914 }, { "epoch": 0.23992116875883637, "grad_norm": 3.0923826694488525, "learning_rate": 9.943356544996087e-05, "loss": 2.448986625671387, "memory(GiB)": 67.19, "step": 5600, "token_acc": 0.5317460317460317, "train_speed(iter/s)": 0.66996 }, { "epoch": 0.24013538408808535, "grad_norm": 2.6228675842285156, "learning_rate": 9.943255488494534e-05, "loss": 2.3052946090698243, "memory(GiB)": 67.19, "step": 5605, "token_acc": 0.5063291139240507, "train_speed(iter/s)": 0.669902 }, { "epoch": 0.2403495994173343, "grad_norm": 2.315650701522827, "learning_rate": 9.943154342441272e-05, "loss": 2.280658721923828, "memory(GiB)": 67.19, "step": 5610, "token_acc": 0.4789156626506024, "train_speed(iter/s)": 0.670003 }, { "epoch": 0.24056381474658325, "grad_norm": 2.453479051589966, "learning_rate": 9.943053106838132e-05, "loss": 2.463773727416992, "memory(GiB)": 67.19, "step": 5615, "token_acc": 0.5345345345345346, "train_speed(iter/s)": 0.670047 }, { "epoch": 0.24077803007583223, "grad_norm": 3.1275675296783447, "learning_rate": 9.942951781686947e-05, "loss": 2.552057647705078, "memory(GiB)": 67.19, "step": 5620, "token_acc": 0.4619883040935672, "train_speed(iter/s)": 0.670023 }, { "epoch": 0.24099224540508118, "grad_norm": 2.2267634868621826, "learning_rate": 9.942850366989556e-05, "loss": 2.170100975036621, "memory(GiB)": 67.19, "step": 5625, "token_acc": 0.5288135593220339, "train_speed(iter/s)": 0.67013 }, { "epoch": 0.24120646073433014, "grad_norm": 2.9502344131469727, "learning_rate": 9.942748862747791e-05, "loss": 2.316722869873047, "memory(GiB)": 67.19, "step": 5630, "token_acc": 0.5338645418326693, "train_speed(iter/s)": 0.670234 }, { "epoch": 0.24142067606357911, "grad_norm": 2.970048427581787, "learning_rate": 9.942647268963496e-05, "loss": 2.374821662902832, "memory(GiB)": 67.19, "step": 5635, "token_acc": 0.4875, "train_speed(iter/s)": 0.670347 }, { "epoch": 0.24163489139282807, "grad_norm": 4.103857517242432, "learning_rate": 9.942545585638507e-05, "loss": 2.496773910522461, "memory(GiB)": 67.19, "step": 5640, "token_acc": 0.48125, "train_speed(iter/s)": 0.670299 }, { "epoch": 0.24184910672207705, "grad_norm": 3.3767659664154053, "learning_rate": 9.942443812774669e-05, "loss": 2.337641716003418, "memory(GiB)": 67.19, "step": 5645, "token_acc": 0.5076335877862596, "train_speed(iter/s)": 0.670229 }, { "epoch": 0.242063322051326, "grad_norm": 2.7206177711486816, "learning_rate": 9.942341950373825e-05, "loss": 2.1446966171264648, "memory(GiB)": 67.19, "step": 5650, "token_acc": 0.5183946488294314, "train_speed(iter/s)": 0.670382 }, { "epoch": 0.24227753738057495, "grad_norm": 3.5111870765686035, "learning_rate": 9.942239998437822e-05, "loss": 2.3069849014282227, "memory(GiB)": 67.19, "step": 5655, "token_acc": 0.5177304964539007, "train_speed(iter/s)": 0.670389 }, { "epoch": 0.24249175270982393, "grad_norm": 3.2904629707336426, "learning_rate": 9.942137956968502e-05, "loss": 2.6453521728515623, "memory(GiB)": 67.19, "step": 5660, "token_acc": 0.4263322884012539, "train_speed(iter/s)": 0.670462 }, { "epoch": 0.24270596803907288, "grad_norm": 2.6270244121551514, "learning_rate": 9.94203582596772e-05, "loss": 2.332375717163086, "memory(GiB)": 67.19, "step": 5665, "token_acc": 0.512987012987013, "train_speed(iter/s)": 0.670542 }, { "epoch": 0.24292018336832183, "grad_norm": 2.7852680683135986, "learning_rate": 9.941933605437322e-05, "loss": 2.4694820404052735, "memory(GiB)": 67.19, "step": 5670, "token_acc": 0.46296296296296297, "train_speed(iter/s)": 0.670572 }, { "epoch": 0.2431343986975708, "grad_norm": 2.759566068649292, "learning_rate": 9.941831295379159e-05, "loss": 2.2453895568847657, "memory(GiB)": 67.19, "step": 5675, "token_acc": 0.5306859205776173, "train_speed(iter/s)": 0.670587 }, { "epoch": 0.24334861402681976, "grad_norm": 3.075831651687622, "learning_rate": 9.941728895795088e-05, "loss": 2.1050392150878907, "memory(GiB)": 67.19, "step": 5680, "token_acc": 0.5202952029520295, "train_speed(iter/s)": 0.670683 }, { "epoch": 0.2435628293560687, "grad_norm": 3.4029083251953125, "learning_rate": 9.941626406686962e-05, "loss": 2.450748825073242, "memory(GiB)": 67.19, "step": 5685, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.670478 }, { "epoch": 0.2437770446853177, "grad_norm": 2.7752010822296143, "learning_rate": 9.941523828056638e-05, "loss": 2.4953533172607423, "memory(GiB)": 67.19, "step": 5690, "token_acc": 0.4624277456647399, "train_speed(iter/s)": 0.670386 }, { "epoch": 0.24399126001456664, "grad_norm": 3.6393651962280273, "learning_rate": 9.941421159905975e-05, "loss": 2.8754741668701174, "memory(GiB)": 67.19, "step": 5695, "token_acc": 0.45054945054945056, "train_speed(iter/s)": 0.670555 }, { "epoch": 0.2442054753438156, "grad_norm": 3.7535791397094727, "learning_rate": 9.941318402236831e-05, "loss": 2.168903350830078, "memory(GiB)": 67.19, "step": 5700, "token_acc": 0.5416666666666666, "train_speed(iter/s)": 0.670421 }, { "epoch": 0.24441969067306457, "grad_norm": 2.610297918319702, "learning_rate": 9.94121555505107e-05, "loss": 2.6091793060302733, "memory(GiB)": 67.19, "step": 5705, "token_acc": 0.4565826330532213, "train_speed(iter/s)": 0.670447 }, { "epoch": 0.24463390600231352, "grad_norm": 3.653465986251831, "learning_rate": 9.941112618350553e-05, "loss": 2.242985725402832, "memory(GiB)": 67.19, "step": 5710, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.670431 }, { "epoch": 0.24484812133156247, "grad_norm": 2.861609697341919, "learning_rate": 9.941009592137146e-05, "loss": 2.7549507141113283, "memory(GiB)": 67.19, "step": 5715, "token_acc": 0.4823529411764706, "train_speed(iter/s)": 0.670458 }, { "epoch": 0.24506233666081145, "grad_norm": 3.537522077560425, "learning_rate": 9.940906476412716e-05, "loss": 2.855455780029297, "memory(GiB)": 67.19, "step": 5720, "token_acc": 0.4269005847953216, "train_speed(iter/s)": 0.670585 }, { "epoch": 0.2452765519900604, "grad_norm": 3.1694741249084473, "learning_rate": 9.940803271179129e-05, "loss": 2.477086639404297, "memory(GiB)": 67.19, "step": 5725, "token_acc": 0.5202952029520295, "train_speed(iter/s)": 0.67046 }, { "epoch": 0.24549076731930938, "grad_norm": 3.9724864959716797, "learning_rate": 9.940699976438257e-05, "loss": 2.1926671981811525, "memory(GiB)": 67.19, "step": 5730, "token_acc": 0.516260162601626, "train_speed(iter/s)": 0.670432 }, { "epoch": 0.24570498264855833, "grad_norm": 4.499468803405762, "learning_rate": 9.940596592191968e-05, "loss": 2.4580255508422852, "memory(GiB)": 67.19, "step": 5735, "token_acc": 0.4907749077490775, "train_speed(iter/s)": 0.670442 }, { "epoch": 0.24591919797780729, "grad_norm": 3.3424932956695557, "learning_rate": 9.94049311844214e-05, "loss": 2.50787467956543, "memory(GiB)": 67.19, "step": 5740, "token_acc": 0.4919093851132686, "train_speed(iter/s)": 0.670364 }, { "epoch": 0.24613341330705626, "grad_norm": 4.338657855987549, "learning_rate": 9.940389555190643e-05, "loss": 2.8347541809082033, "memory(GiB)": 67.19, "step": 5745, "token_acc": 0.42948717948717946, "train_speed(iter/s)": 0.670438 }, { "epoch": 0.24634762863630522, "grad_norm": 2.853929281234741, "learning_rate": 9.940285902439356e-05, "loss": 2.1891889572143555, "memory(GiB)": 67.19, "step": 5750, "token_acc": 0.5084745762711864, "train_speed(iter/s)": 0.670414 }, { "epoch": 0.24656184396555417, "grad_norm": 3.9248576164245605, "learning_rate": 9.940182160190155e-05, "loss": 2.5297405242919924, "memory(GiB)": 67.19, "step": 5755, "token_acc": 0.5064935064935064, "train_speed(iter/s)": 0.670333 }, { "epoch": 0.24677605929480315, "grad_norm": 3.3086366653442383, "learning_rate": 9.94007832844492e-05, "loss": 2.532232666015625, "memory(GiB)": 67.19, "step": 5760, "token_acc": 0.4479495268138801, "train_speed(iter/s)": 0.670442 }, { "epoch": 0.2469902746240521, "grad_norm": 3.206134080886841, "learning_rate": 9.93997440720553e-05, "loss": 2.791141891479492, "memory(GiB)": 67.19, "step": 5765, "token_acc": 0.45993031358885017, "train_speed(iter/s)": 0.670483 }, { "epoch": 0.24720448995330105, "grad_norm": 3.7310783863067627, "learning_rate": 9.939870396473871e-05, "loss": 2.3589033126831054, "memory(GiB)": 67.19, "step": 5770, "token_acc": 0.5463576158940397, "train_speed(iter/s)": 0.670382 }, { "epoch": 0.24741870528255003, "grad_norm": 3.8692471981048584, "learning_rate": 9.939766296251827e-05, "loss": 2.5894989013671874, "memory(GiB)": 67.19, "step": 5775, "token_acc": 0.46735395189003437, "train_speed(iter/s)": 0.670451 }, { "epoch": 0.24763292061179898, "grad_norm": 2.387263536453247, "learning_rate": 9.939662106541281e-05, "loss": 2.497370147705078, "memory(GiB)": 67.19, "step": 5780, "token_acc": 0.48231511254019294, "train_speed(iter/s)": 0.670426 }, { "epoch": 0.24784713594104793, "grad_norm": 4.022426605224609, "learning_rate": 9.939557827344121e-05, "loss": 2.3339887619018556, "memory(GiB)": 67.19, "step": 5785, "token_acc": 0.4934640522875817, "train_speed(iter/s)": 0.670482 }, { "epoch": 0.2480613512702969, "grad_norm": 2.726962089538574, "learning_rate": 9.93945345866224e-05, "loss": 2.586861419677734, "memory(GiB)": 67.19, "step": 5790, "token_acc": 0.4462809917355372, "train_speed(iter/s)": 0.670586 }, { "epoch": 0.24827556659954586, "grad_norm": 2.665339708328247, "learning_rate": 9.939349000497524e-05, "loss": 2.6320335388183596, "memory(GiB)": 67.19, "step": 5795, "token_acc": 0.4440993788819876, "train_speed(iter/s)": 0.670609 }, { "epoch": 0.2484897819287948, "grad_norm": 3.028201103210449, "learning_rate": 9.939244452851869e-05, "loss": 2.536259078979492, "memory(GiB)": 67.19, "step": 5800, "token_acc": 0.4812286689419795, "train_speed(iter/s)": 0.670595 }, { "epoch": 0.2487039972580438, "grad_norm": 3.66929292678833, "learning_rate": 9.939139815727168e-05, "loss": 2.372038650512695, "memory(GiB)": 67.19, "step": 5805, "token_acc": 0.49107142857142855, "train_speed(iter/s)": 0.6706 }, { "epoch": 0.24891821258729274, "grad_norm": 2.791670799255371, "learning_rate": 9.939035089125313e-05, "loss": 2.539892578125, "memory(GiB)": 67.19, "step": 5810, "token_acc": 0.42567567567567566, "train_speed(iter/s)": 0.67068 }, { "epoch": 0.24913242791654172, "grad_norm": 3.7498884201049805, "learning_rate": 9.938930273048206e-05, "loss": 2.635009002685547, "memory(GiB)": 67.19, "step": 5815, "token_acc": 0.44363636363636366, "train_speed(iter/s)": 0.670757 }, { "epoch": 0.24934664324579067, "grad_norm": 2.9439868927001953, "learning_rate": 9.938825367497745e-05, "loss": 2.1953405380249023, "memory(GiB)": 67.19, "step": 5820, "token_acc": 0.4657534246575342, "train_speed(iter/s)": 0.670825 }, { "epoch": 0.24956085857503962, "grad_norm": 3.5225532054901123, "learning_rate": 9.938720372475827e-05, "loss": 2.438524055480957, "memory(GiB)": 67.19, "step": 5825, "token_acc": 0.46923076923076923, "train_speed(iter/s)": 0.670964 }, { "epoch": 0.2497750739042886, "grad_norm": 2.6804685592651367, "learning_rate": 9.938615287984359e-05, "loss": 2.379603958129883, "memory(GiB)": 67.19, "step": 5830, "token_acc": 0.4692556634304207, "train_speed(iter/s)": 0.670985 }, { "epoch": 0.24998928923353755, "grad_norm": 3.4680981636047363, "learning_rate": 9.938510114025241e-05, "loss": 2.4590696334838866, "memory(GiB)": 67.19, "step": 5835, "token_acc": 0.4732142857142857, "train_speed(iter/s)": 0.671024 }, { "epoch": 0.2502035045627865, "grad_norm": 3.616011142730713, "learning_rate": 9.938404850600377e-05, "loss": 2.414872169494629, "memory(GiB)": 67.19, "step": 5840, "token_acc": 0.46984126984126984, "train_speed(iter/s)": 0.671076 }, { "epoch": 0.2504177198920355, "grad_norm": 2.5880253314971924, "learning_rate": 9.93829949771168e-05, "loss": 2.656060981750488, "memory(GiB)": 67.19, "step": 5845, "token_acc": 0.4059701492537313, "train_speed(iter/s)": 0.671121 }, { "epoch": 0.2506319352212844, "grad_norm": 2.7191386222839355, "learning_rate": 9.938194055361056e-05, "loss": 2.507341766357422, "memory(GiB)": 67.19, "step": 5850, "token_acc": 0.4642857142857143, "train_speed(iter/s)": 0.671077 }, { "epoch": 0.2508461505505334, "grad_norm": 3.1355059146881104, "learning_rate": 9.938088523550412e-05, "loss": 2.53778076171875, "memory(GiB)": 67.19, "step": 5855, "token_acc": 0.4376899696048632, "train_speed(iter/s)": 0.671099 }, { "epoch": 0.25106036587978237, "grad_norm": 3.18672776222229, "learning_rate": 9.937982902281662e-05, "loss": 2.448589324951172, "memory(GiB)": 67.19, "step": 5860, "token_acc": 0.4662576687116564, "train_speed(iter/s)": 0.671141 }, { "epoch": 0.25127458120903134, "grad_norm": 3.1273300647735596, "learning_rate": 9.93787719155672e-05, "loss": 2.180960273742676, "memory(GiB)": 67.19, "step": 5865, "token_acc": 0.54, "train_speed(iter/s)": 0.671267 }, { "epoch": 0.25148879653828027, "grad_norm": 2.552206516265869, "learning_rate": 9.937771391377501e-05, "loss": 2.5839500427246094, "memory(GiB)": 67.19, "step": 5870, "token_acc": 0.4617737003058104, "train_speed(iter/s)": 0.671308 }, { "epoch": 0.25170301186752925, "grad_norm": 3.5302839279174805, "learning_rate": 9.937665501745921e-05, "loss": 2.597468948364258, "memory(GiB)": 67.19, "step": 5875, "token_acc": 0.4414715719063545, "train_speed(iter/s)": 0.671416 }, { "epoch": 0.2519172271967782, "grad_norm": 3.1669251918792725, "learning_rate": 9.937559522663899e-05, "loss": 2.596678352355957, "memory(GiB)": 67.19, "step": 5880, "token_acc": 0.46204620462046203, "train_speed(iter/s)": 0.671532 }, { "epoch": 0.25213144252602715, "grad_norm": 2.5170443058013916, "learning_rate": 9.937453454133355e-05, "loss": 2.3539052963256837, "memory(GiB)": 67.19, "step": 5885, "token_acc": 0.47796610169491527, "train_speed(iter/s)": 0.671383 }, { "epoch": 0.25234565785527613, "grad_norm": 3.134406805038452, "learning_rate": 9.937347296156207e-05, "loss": 2.3334835052490233, "memory(GiB)": 67.19, "step": 5890, "token_acc": 0.49146757679180886, "train_speed(iter/s)": 0.671572 }, { "epoch": 0.2525598731845251, "grad_norm": 3.3271865844726562, "learning_rate": 9.937241048734383e-05, "loss": 2.3542545318603514, "memory(GiB)": 67.19, "step": 5895, "token_acc": 0.5115511551155115, "train_speed(iter/s)": 0.671473 }, { "epoch": 0.25277408851377403, "grad_norm": 3.38163423538208, "learning_rate": 9.937134711869806e-05, "loss": 2.5367355346679688, "memory(GiB)": 67.19, "step": 5900, "token_acc": 0.4588235294117647, "train_speed(iter/s)": 0.671539 }, { "epoch": 0.252988303843023, "grad_norm": 2.1123416423797607, "learning_rate": 9.937028285564402e-05, "loss": 2.280523490905762, "memory(GiB)": 67.19, "step": 5905, "token_acc": 0.5080385852090032, "train_speed(iter/s)": 0.671442 }, { "epoch": 0.253202519172272, "grad_norm": 2.6805200576782227, "learning_rate": 9.9369217698201e-05, "loss": 1.9719470977783202, "memory(GiB)": 67.19, "step": 5910, "token_acc": 0.5570175438596491, "train_speed(iter/s)": 0.671408 }, { "epoch": 0.2534167345015209, "grad_norm": 3.171529769897461, "learning_rate": 9.936815164638829e-05, "loss": 2.5804161071777343, "memory(GiB)": 67.19, "step": 5915, "token_acc": 0.46710526315789475, "train_speed(iter/s)": 0.671526 }, { "epoch": 0.2536309498307699, "grad_norm": 4.135290622711182, "learning_rate": 9.93670847002252e-05, "loss": 2.4509979248046876, "memory(GiB)": 67.19, "step": 5920, "token_acc": 0.44594594594594594, "train_speed(iter/s)": 0.671649 }, { "epoch": 0.25384516516001887, "grad_norm": 2.5340240001678467, "learning_rate": 9.936601685973106e-05, "loss": 2.105211067199707, "memory(GiB)": 67.19, "step": 5925, "token_acc": 0.5246478873239436, "train_speed(iter/s)": 0.671684 }, { "epoch": 0.2540593804892678, "grad_norm": 2.6837270259857178, "learning_rate": 9.93649481249252e-05, "loss": 2.4366683959960938, "memory(GiB)": 67.19, "step": 5930, "token_acc": 0.4555160142348754, "train_speed(iter/s)": 0.671579 }, { "epoch": 0.2542735958185168, "grad_norm": 2.9403910636901855, "learning_rate": 9.936387849582702e-05, "loss": 2.484181594848633, "memory(GiB)": 67.19, "step": 5935, "token_acc": 0.5075757575757576, "train_speed(iter/s)": 0.671632 }, { "epoch": 0.25448781114776575, "grad_norm": 2.916747808456421, "learning_rate": 9.936280797245586e-05, "loss": 2.2568714141845705, "memory(GiB)": 67.19, "step": 5940, "token_acc": 0.5186567164179104, "train_speed(iter/s)": 0.671651 }, { "epoch": 0.2547020264770147, "grad_norm": 2.9824984073638916, "learning_rate": 9.936173655483114e-05, "loss": 2.517163848876953, "memory(GiB)": 67.19, "step": 5945, "token_acc": 0.4647887323943662, "train_speed(iter/s)": 0.67157 }, { "epoch": 0.25491624180626365, "grad_norm": 3.656463861465454, "learning_rate": 9.936066424297223e-05, "loss": 2.4902322769165037, "memory(GiB)": 67.19, "step": 5950, "token_acc": 0.4831081081081081, "train_speed(iter/s)": 0.671464 }, { "epoch": 0.25513045713551263, "grad_norm": 2.9755659103393555, "learning_rate": 9.93595910368986e-05, "loss": 2.3990013122558596, "memory(GiB)": 67.19, "step": 5955, "token_acc": 0.5285171102661597, "train_speed(iter/s)": 0.671541 }, { "epoch": 0.25534467246476156, "grad_norm": 2.9402966499328613, "learning_rate": 9.935851693662968e-05, "loss": 2.538032341003418, "memory(GiB)": 67.19, "step": 5960, "token_acc": 0.45806451612903226, "train_speed(iter/s)": 0.67157 }, { "epoch": 0.25555888779401054, "grad_norm": 4.241426467895508, "learning_rate": 9.935744194218492e-05, "loss": 2.442582130432129, "memory(GiB)": 67.19, "step": 5965, "token_acc": 0.4509090909090909, "train_speed(iter/s)": 0.671544 }, { "epoch": 0.2557731031232595, "grad_norm": 3.1661572456359863, "learning_rate": 9.935636605358381e-05, "loss": 2.6642026901245117, "memory(GiB)": 67.19, "step": 5970, "token_acc": 0.43037974683544306, "train_speed(iter/s)": 0.671596 }, { "epoch": 0.25598731845250844, "grad_norm": 3.367205858230591, "learning_rate": 9.93552892708458e-05, "loss": 2.453078842163086, "memory(GiB)": 67.19, "step": 5975, "token_acc": 0.5018867924528302, "train_speed(iter/s)": 0.671566 }, { "epoch": 0.2562015337817574, "grad_norm": 2.710610866546631, "learning_rate": 9.935421159399046e-05, "loss": 2.5525981903076174, "memory(GiB)": 67.19, "step": 5980, "token_acc": 0.48148148148148145, "train_speed(iter/s)": 0.671576 }, { "epoch": 0.2564157491110064, "grad_norm": 3.276658535003662, "learning_rate": 9.935313302303726e-05, "loss": 2.823476028442383, "memory(GiB)": 67.19, "step": 5985, "token_acc": 0.4201954397394137, "train_speed(iter/s)": 0.671629 }, { "epoch": 0.2566299644402553, "grad_norm": 3.2621877193450928, "learning_rate": 9.935205355800576e-05, "loss": 2.4306278228759766, "memory(GiB)": 67.19, "step": 5990, "token_acc": 0.49328859060402686, "train_speed(iter/s)": 0.671555 }, { "epoch": 0.2568441797695043, "grad_norm": 3.246954917907715, "learning_rate": 9.935097319891551e-05, "loss": 2.488150405883789, "memory(GiB)": 67.19, "step": 5995, "token_acc": 0.4318936877076412, "train_speed(iter/s)": 0.671381 }, { "epoch": 0.2570583950987533, "grad_norm": 2.68526029586792, "learning_rate": 9.93498919457861e-05, "loss": 2.538178825378418, "memory(GiB)": 67.19, "step": 6000, "token_acc": 0.48366013071895425, "train_speed(iter/s)": 0.671431 }, { "epoch": 0.2570583950987533, "eval_loss": 2.0633950233459473, "eval_runtime": 17.7324, "eval_samples_per_second": 5.639, "eval_steps_per_second": 5.639, "eval_token_acc": 0.5135135135135135, "step": 6000 }, { "epoch": 0.2572726104280022, "grad_norm": 3.659397840499878, "learning_rate": 9.93488097986371e-05, "loss": 2.71431941986084, "memory(GiB)": 67.19, "step": 6005, "token_acc": 0.5, "train_speed(iter/s)": 0.669893 }, { "epoch": 0.2574868257572512, "grad_norm": 2.6122591495513916, "learning_rate": 9.934772675748809e-05, "loss": 2.544088935852051, "memory(GiB)": 67.19, "step": 6010, "token_acc": 0.4501510574018127, "train_speed(iter/s)": 0.669955 }, { "epoch": 0.25770104108650016, "grad_norm": 2.6281495094299316, "learning_rate": 9.934664282235875e-05, "loss": 2.3810117721557615, "memory(GiB)": 67.19, "step": 6015, "token_acc": 0.4845360824742268, "train_speed(iter/s)": 0.670093 }, { "epoch": 0.2579152564157491, "grad_norm": 3.1313962936401367, "learning_rate": 9.934555799326866e-05, "loss": 2.3000654220581054, "memory(GiB)": 67.19, "step": 6020, "token_acc": 0.49110320284697506, "train_speed(iter/s)": 0.670084 }, { "epoch": 0.25812947174499806, "grad_norm": 3.9865033626556396, "learning_rate": 9.934447227023751e-05, "loss": 2.4337289810180662, "memory(GiB)": 67.19, "step": 6025, "token_acc": 0.45517241379310347, "train_speed(iter/s)": 0.66992 }, { "epoch": 0.25834368707424704, "grad_norm": 3.275233507156372, "learning_rate": 9.934338565328496e-05, "loss": 2.67127685546875, "memory(GiB)": 67.19, "step": 6030, "token_acc": 0.48580441640378547, "train_speed(iter/s)": 0.670019 }, { "epoch": 0.258557902403496, "grad_norm": 3.433689594268799, "learning_rate": 9.934229814243069e-05, "loss": 2.281266784667969, "memory(GiB)": 67.19, "step": 6035, "token_acc": 0.5173913043478261, "train_speed(iter/s)": 0.670023 }, { "epoch": 0.25877211773274494, "grad_norm": 3.3389620780944824, "learning_rate": 9.934120973769441e-05, "loss": 2.550152397155762, "memory(GiB)": 67.19, "step": 6040, "token_acc": 0.4626865671641791, "train_speed(iter/s)": 0.670009 }, { "epoch": 0.2589863330619939, "grad_norm": 3.1772286891937256, "learning_rate": 9.934012043909582e-05, "loss": 2.3307891845703126, "memory(GiB)": 67.19, "step": 6045, "token_acc": 0.4935064935064935, "train_speed(iter/s)": 0.670032 }, { "epoch": 0.2592005483912429, "grad_norm": 4.636223793029785, "learning_rate": 9.933903024665465e-05, "loss": 2.253623199462891, "memory(GiB)": 67.19, "step": 6050, "token_acc": 0.5040650406504065, "train_speed(iter/s)": 0.670062 }, { "epoch": 0.2594147637204918, "grad_norm": 2.7363836765289307, "learning_rate": 9.933793916039068e-05, "loss": 2.332748031616211, "memory(GiB)": 67.19, "step": 6055, "token_acc": 0.5148148148148148, "train_speed(iter/s)": 0.670152 }, { "epoch": 0.2596289790497408, "grad_norm": 4.228798866271973, "learning_rate": 9.933684718032365e-05, "loss": 2.6105607986450194, "memory(GiB)": 67.19, "step": 6060, "token_acc": 0.4712230215827338, "train_speed(iter/s)": 0.670254 }, { "epoch": 0.2598431943789898, "grad_norm": 2.2220633029937744, "learning_rate": 9.933575430647336e-05, "loss": 2.2462528228759764, "memory(GiB)": 67.19, "step": 6065, "token_acc": 0.5037037037037037, "train_speed(iter/s)": 0.670311 }, { "epoch": 0.2600574097082387, "grad_norm": 2.8242459297180176, "learning_rate": 9.93346605388596e-05, "loss": 2.591744804382324, "memory(GiB)": 67.19, "step": 6070, "token_acc": 0.44837758112094395, "train_speed(iter/s)": 0.670285 }, { "epoch": 0.2602716250374877, "grad_norm": 2.6867644786834717, "learning_rate": 9.933356587750217e-05, "loss": 2.4192764282226564, "memory(GiB)": 67.19, "step": 6075, "token_acc": 0.47790055248618785, "train_speed(iter/s)": 0.670359 }, { "epoch": 0.26048584036673667, "grad_norm": 3.08266282081604, "learning_rate": 9.933247032242095e-05, "loss": 2.548043632507324, "memory(GiB)": 67.19, "step": 6080, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.670379 }, { "epoch": 0.2607000556959856, "grad_norm": 2.7657470703125, "learning_rate": 9.933137387363572e-05, "loss": 2.421766662597656, "memory(GiB)": 67.19, "step": 6085, "token_acc": 0.45514950166112955, "train_speed(iter/s)": 0.670369 }, { "epoch": 0.26091427102523457, "grad_norm": 3.1873230934143066, "learning_rate": 9.933027653116639e-05, "loss": 2.5296783447265625, "memory(GiB)": 67.19, "step": 6090, "token_acc": 0.4838709677419355, "train_speed(iter/s)": 0.670482 }, { "epoch": 0.26112848635448355, "grad_norm": 3.0284504890441895, "learning_rate": 9.932917829503281e-05, "loss": 2.347423553466797, "memory(GiB)": 67.19, "step": 6095, "token_acc": 0.4924812030075188, "train_speed(iter/s)": 0.670494 }, { "epoch": 0.26134270168373247, "grad_norm": 3.100322723388672, "learning_rate": 9.93280791652549e-05, "loss": 2.1221591949462892, "memory(GiB)": 67.19, "step": 6100, "token_acc": 0.4981132075471698, "train_speed(iter/s)": 0.670546 }, { "epoch": 0.26155691701298145, "grad_norm": 2.9375627040863037, "learning_rate": 9.932697914185258e-05, "loss": 2.4180038452148436, "memory(GiB)": 67.19, "step": 6105, "token_acc": 0.5038759689922481, "train_speed(iter/s)": 0.670621 }, { "epoch": 0.26177113234223043, "grad_norm": 3.3333592414855957, "learning_rate": 9.932587822484574e-05, "loss": 2.0699974060058595, "memory(GiB)": 67.19, "step": 6110, "token_acc": 0.5075187969924813, "train_speed(iter/s)": 0.670632 }, { "epoch": 0.26198534767147935, "grad_norm": 3.19366455078125, "learning_rate": 9.932477641425435e-05, "loss": 2.5250791549682616, "memory(GiB)": 67.19, "step": 6115, "token_acc": 0.4608150470219436, "train_speed(iter/s)": 0.67061 }, { "epoch": 0.26219956300072833, "grad_norm": 2.8740508556365967, "learning_rate": 9.932367371009837e-05, "loss": 2.6763378143310548, "memory(GiB)": 67.19, "step": 6120, "token_acc": 0.4646840148698885, "train_speed(iter/s)": 0.670689 }, { "epoch": 0.2624137783299773, "grad_norm": 3.3891000747680664, "learning_rate": 9.932257011239776e-05, "loss": 2.339823341369629, "memory(GiB)": 67.19, "step": 6125, "token_acc": 0.495114006514658, "train_speed(iter/s)": 0.670562 }, { "epoch": 0.26262799365922623, "grad_norm": 3.1714837551116943, "learning_rate": 9.932146562117253e-05, "loss": 2.3294490814208983, "memory(GiB)": 67.19, "step": 6130, "token_acc": 0.49230769230769234, "train_speed(iter/s)": 0.670603 }, { "epoch": 0.2628422089884752, "grad_norm": 3.426358222961426, "learning_rate": 9.932036023644268e-05, "loss": 2.214742660522461, "memory(GiB)": 67.19, "step": 6135, "token_acc": 0.4963768115942029, "train_speed(iter/s)": 0.670714 }, { "epoch": 0.2630564243177242, "grad_norm": 3.0455899238586426, "learning_rate": 9.931925395822823e-05, "loss": 2.455543327331543, "memory(GiB)": 67.19, "step": 6140, "token_acc": 0.4586206896551724, "train_speed(iter/s)": 0.670803 }, { "epoch": 0.2632706396469731, "grad_norm": 2.627584457397461, "learning_rate": 9.931814678654925e-05, "loss": 2.2080816268920898, "memory(GiB)": 67.19, "step": 6145, "token_acc": 0.5164179104477612, "train_speed(iter/s)": 0.670746 }, { "epoch": 0.2634848549762221, "grad_norm": 4.24057149887085, "learning_rate": 9.931703872142576e-05, "loss": 2.572409248352051, "memory(GiB)": 67.19, "step": 6150, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.670711 }, { "epoch": 0.2636990703054711, "grad_norm": 3.3679146766662598, "learning_rate": 9.931592976287786e-05, "loss": 2.212027359008789, "memory(GiB)": 67.19, "step": 6155, "token_acc": 0.5183673469387755, "train_speed(iter/s)": 0.670729 }, { "epoch": 0.26391328563472, "grad_norm": 2.851391553878784, "learning_rate": 9.931481991092563e-05, "loss": 2.3245351791381834, "memory(GiB)": 67.19, "step": 6160, "token_acc": 0.5, "train_speed(iter/s)": 0.670799 }, { "epoch": 0.264127500963969, "grad_norm": 4.272902488708496, "learning_rate": 9.931370916558917e-05, "loss": 2.634746551513672, "memory(GiB)": 67.19, "step": 6165, "token_acc": 0.45993031358885017, "train_speed(iter/s)": 0.670823 }, { "epoch": 0.26434171629321795, "grad_norm": 3.0601553916931152, "learning_rate": 9.931259752688862e-05, "loss": 2.41424446105957, "memory(GiB)": 67.19, "step": 6170, "token_acc": 0.47368421052631576, "train_speed(iter/s)": 0.670873 }, { "epoch": 0.2645559316224669, "grad_norm": 2.968268632888794, "learning_rate": 9.931148499484409e-05, "loss": 2.528482437133789, "memory(GiB)": 67.19, "step": 6175, "token_acc": 0.4711864406779661, "train_speed(iter/s)": 0.67087 }, { "epoch": 0.26477014695171586, "grad_norm": 3.840956926345825, "learning_rate": 9.931037156947576e-05, "loss": 2.2978960037231446, "memory(GiB)": 67.19, "step": 6180, "token_acc": 0.4789272030651341, "train_speed(iter/s)": 0.67075 }, { "epoch": 0.26498436228096484, "grad_norm": 2.6214661598205566, "learning_rate": 9.93092572508038e-05, "loss": 2.3453628540039064, "memory(GiB)": 67.19, "step": 6185, "token_acc": 0.47878787878787876, "train_speed(iter/s)": 0.670679 }, { "epoch": 0.26519857761021376, "grad_norm": 4.858556747436523, "learning_rate": 9.930814203884837e-05, "loss": 2.4102848052978514, "memory(GiB)": 67.19, "step": 6190, "token_acc": 0.4852459016393443, "train_speed(iter/s)": 0.670705 }, { "epoch": 0.26541279293946274, "grad_norm": 3.3622994422912598, "learning_rate": 9.93070259336297e-05, "loss": 2.2799158096313477, "memory(GiB)": 67.19, "step": 6195, "token_acc": 0.479020979020979, "train_speed(iter/s)": 0.670748 }, { "epoch": 0.2656270082687117, "grad_norm": 3.0771024227142334, "learning_rate": 9.930590893516802e-05, "loss": 2.6559621810913088, "memory(GiB)": 67.19, "step": 6200, "token_acc": 0.4247787610619469, "train_speed(iter/s)": 0.670612 }, { "epoch": 0.2658412235979607, "grad_norm": 3.5348591804504395, "learning_rate": 9.930479104348352e-05, "loss": 2.5447761535644533, "memory(GiB)": 67.19, "step": 6205, "token_acc": 0.4478021978021978, "train_speed(iter/s)": 0.670705 }, { "epoch": 0.2660554389272096, "grad_norm": 2.905714511871338, "learning_rate": 9.93036722585965e-05, "loss": 2.333863639831543, "memory(GiB)": 67.19, "step": 6210, "token_acc": 0.49829351535836175, "train_speed(iter/s)": 0.670544 }, { "epoch": 0.2662696542564586, "grad_norm": 2.6849002838134766, "learning_rate": 9.930255258052719e-05, "loss": 2.453709602355957, "memory(GiB)": 67.19, "step": 6215, "token_acc": 0.48606811145510836, "train_speed(iter/s)": 0.670611 }, { "epoch": 0.2664838695857076, "grad_norm": 4.103528022766113, "learning_rate": 9.93014320092959e-05, "loss": 2.2948444366455076, "memory(GiB)": 67.19, "step": 6220, "token_acc": 0.508833922261484, "train_speed(iter/s)": 0.67054 }, { "epoch": 0.2666980849149565, "grad_norm": 4.403364658355713, "learning_rate": 9.930031054492292e-05, "loss": 2.2805776596069336, "memory(GiB)": 67.19, "step": 6225, "token_acc": 0.5247524752475248, "train_speed(iter/s)": 0.670487 }, { "epoch": 0.2669123002442055, "grad_norm": 3.307145118713379, "learning_rate": 9.929918818742856e-05, "loss": 2.6212640762329102, "memory(GiB)": 67.19, "step": 6230, "token_acc": 0.484472049689441, "train_speed(iter/s)": 0.67044 }, { "epoch": 0.26712651557345446, "grad_norm": 2.8111953735351562, "learning_rate": 9.929806493683317e-05, "loss": 2.466044235229492, "memory(GiB)": 67.19, "step": 6235, "token_acc": 0.49683544303797467, "train_speed(iter/s)": 0.67052 }, { "epoch": 0.2673407309027034, "grad_norm": 2.5320358276367188, "learning_rate": 9.929694079315708e-05, "loss": 2.4283578872680662, "memory(GiB)": 67.19, "step": 6240, "token_acc": 0.4782608695652174, "train_speed(iter/s)": 0.670603 }, { "epoch": 0.26755494623195236, "grad_norm": 3.595914602279663, "learning_rate": 9.929581575642067e-05, "loss": 2.4557626724243162, "memory(GiB)": 67.19, "step": 6245, "token_acc": 0.5125, "train_speed(iter/s)": 0.670666 }, { "epoch": 0.26776916156120134, "grad_norm": 3.0700294971466064, "learning_rate": 9.929468982664431e-05, "loss": 2.7152030944824217, "memory(GiB)": 67.19, "step": 6250, "token_acc": 0.48014440433212996, "train_speed(iter/s)": 0.670803 }, { "epoch": 0.26798337689045026, "grad_norm": 3.7278475761413574, "learning_rate": 9.92935630038484e-05, "loss": 2.4897531509399413, "memory(GiB)": 67.19, "step": 6255, "token_acc": 0.4627450980392157, "train_speed(iter/s)": 0.670701 }, { "epoch": 0.26819759221969924, "grad_norm": 3.4078333377838135, "learning_rate": 9.929243528805336e-05, "loss": 2.8401010513305662, "memory(GiB)": 67.19, "step": 6260, "token_acc": 0.4755700325732899, "train_speed(iter/s)": 0.670804 }, { "epoch": 0.2684118075489482, "grad_norm": 2.627333641052246, "learning_rate": 9.929130667927962e-05, "loss": 2.137273406982422, "memory(GiB)": 67.19, "step": 6265, "token_acc": 0.5018315018315018, "train_speed(iter/s)": 0.670744 }, { "epoch": 0.26862602287819715, "grad_norm": 3.0421411991119385, "learning_rate": 9.929017717754761e-05, "loss": 2.518071937561035, "memory(GiB)": 67.19, "step": 6270, "token_acc": 0.4634146341463415, "train_speed(iter/s)": 0.670697 }, { "epoch": 0.2688402382074461, "grad_norm": 2.795443296432495, "learning_rate": 9.92890467828778e-05, "loss": 2.6138532638549803, "memory(GiB)": 67.19, "step": 6275, "token_acc": 0.4797047970479705, "train_speed(iter/s)": 0.670544 }, { "epoch": 0.2690544535366951, "grad_norm": 3.1054840087890625, "learning_rate": 9.928791549529067e-05, "loss": 2.6055953979492186, "memory(GiB)": 67.19, "step": 6280, "token_acc": 0.4763636363636364, "train_speed(iter/s)": 0.670587 }, { "epoch": 0.269268668865944, "grad_norm": 3.030182123184204, "learning_rate": 9.928678331480672e-05, "loss": 2.5214115142822267, "memory(GiB)": 67.19, "step": 6285, "token_acc": 0.4584717607973422, "train_speed(iter/s)": 0.67058 }, { "epoch": 0.269482884195193, "grad_norm": 4.35249662399292, "learning_rate": 9.928565024144646e-05, "loss": 2.5968414306640626, "memory(GiB)": 67.19, "step": 6290, "token_acc": 0.449438202247191, "train_speed(iter/s)": 0.670651 }, { "epoch": 0.269697099524442, "grad_norm": 2.626840829849243, "learning_rate": 9.92845162752304e-05, "loss": 2.8533451080322267, "memory(GiB)": 67.19, "step": 6295, "token_acc": 0.46715328467153283, "train_speed(iter/s)": 0.670701 }, { "epoch": 0.2699113148536909, "grad_norm": 3.5085959434509277, "learning_rate": 9.928338141617911e-05, "loss": 2.2845857620239256, "memory(GiB)": 67.19, "step": 6300, "token_acc": 0.4909090909090909, "train_speed(iter/s)": 0.670594 }, { "epoch": 0.2701255301829399, "grad_norm": 3.8836958408355713, "learning_rate": 9.928224566431313e-05, "loss": 2.5409244537353515, "memory(GiB)": 67.19, "step": 6305, "token_acc": 0.4745762711864407, "train_speed(iter/s)": 0.670737 }, { "epoch": 0.27033974551218887, "grad_norm": 3.3889541625976562, "learning_rate": 9.928110901965305e-05, "loss": 2.56276912689209, "memory(GiB)": 67.19, "step": 6310, "token_acc": 0.44745762711864406, "train_speed(iter/s)": 0.670813 }, { "epoch": 0.2705539608414378, "grad_norm": 2.7845733165740967, "learning_rate": 9.927997148221943e-05, "loss": 2.3797286987304687, "memory(GiB)": 67.19, "step": 6315, "token_acc": 0.5043731778425656, "train_speed(iter/s)": 0.670831 }, { "epoch": 0.27076817617068677, "grad_norm": 3.9696414470672607, "learning_rate": 9.92788330520329e-05, "loss": 2.3844070434570312, "memory(GiB)": 67.19, "step": 6320, "token_acc": 0.4668769716088328, "train_speed(iter/s)": 0.670904 }, { "epoch": 0.27098239149993575, "grad_norm": 3.6059558391571045, "learning_rate": 9.927769372911409e-05, "loss": 2.5204544067382812, "memory(GiB)": 67.19, "step": 6325, "token_acc": 0.45454545454545453, "train_speed(iter/s)": 0.671051 }, { "epoch": 0.27119660682918467, "grad_norm": 2.7572977542877197, "learning_rate": 9.927655351348363e-05, "loss": 2.3121789932250976, "memory(GiB)": 67.19, "step": 6330, "token_acc": 0.49454545454545457, "train_speed(iter/s)": 0.67099 }, { "epoch": 0.27141082215843365, "grad_norm": 7.311095237731934, "learning_rate": 9.927541240516219e-05, "loss": 2.681496429443359, "memory(GiB)": 67.19, "step": 6335, "token_acc": 0.45255474452554745, "train_speed(iter/s)": 0.671073 }, { "epoch": 0.27162503748768263, "grad_norm": 2.965527296066284, "learning_rate": 9.927427040417041e-05, "loss": 2.301124382019043, "memory(GiB)": 67.19, "step": 6340, "token_acc": 0.48986486486486486, "train_speed(iter/s)": 0.670955 }, { "epoch": 0.27183925281693155, "grad_norm": 3.8398168087005615, "learning_rate": 9.9273127510529e-05, "loss": 2.6060218811035156, "memory(GiB)": 67.19, "step": 6345, "token_acc": 0.4628975265017668, "train_speed(iter/s)": 0.670955 }, { "epoch": 0.27205346814618053, "grad_norm": 2.8517279624938965, "learning_rate": 9.927198372425867e-05, "loss": 2.34141845703125, "memory(GiB)": 67.19, "step": 6350, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.670974 }, { "epoch": 0.2722676834754295, "grad_norm": 3.255017042160034, "learning_rate": 9.927083904538013e-05, "loss": 2.307905387878418, "memory(GiB)": 67.19, "step": 6355, "token_acc": 0.4788732394366197, "train_speed(iter/s)": 0.670902 }, { "epoch": 0.27248189880467844, "grad_norm": 2.6947731971740723, "learning_rate": 9.926969347391413e-05, "loss": 2.335915184020996, "memory(GiB)": 67.19, "step": 6360, "token_acc": 0.5265017667844523, "train_speed(iter/s)": 0.670861 }, { "epoch": 0.2726961141339274, "grad_norm": 3.937990665435791, "learning_rate": 9.926854700988141e-05, "loss": 2.33672981262207, "memory(GiB)": 67.19, "step": 6365, "token_acc": 0.5111111111111111, "train_speed(iter/s)": 0.670869 }, { "epoch": 0.2729103294631764, "grad_norm": 3.0313591957092285, "learning_rate": 9.926739965330274e-05, "loss": 2.298262023925781, "memory(GiB)": 67.19, "step": 6370, "token_acc": 0.49640287769784175, "train_speed(iter/s)": 0.67095 }, { "epoch": 0.27312454479242537, "grad_norm": 3.649174213409424, "learning_rate": 9.926625140419891e-05, "loss": 2.220107078552246, "memory(GiB)": 67.19, "step": 6375, "token_acc": 0.5062761506276151, "train_speed(iter/s)": 0.670953 }, { "epoch": 0.2733387601216743, "grad_norm": 2.977591037750244, "learning_rate": 9.926510226259071e-05, "loss": 2.1839679718017577, "memory(GiB)": 67.19, "step": 6380, "token_acc": 0.5402298850574713, "train_speed(iter/s)": 0.67091 }, { "epoch": 0.2735529754509233, "grad_norm": 3.468757152557373, "learning_rate": 9.926395222849899e-05, "loss": 2.4196762084960937, "memory(GiB)": 67.19, "step": 6385, "token_acc": 0.5126353790613718, "train_speed(iter/s)": 0.670986 }, { "epoch": 0.27376719078017225, "grad_norm": 3.103123903274536, "learning_rate": 9.926280130194456e-05, "loss": 2.5028125762939455, "memory(GiB)": 67.19, "step": 6390, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.670956 }, { "epoch": 0.2739814061094212, "grad_norm": 2.692829132080078, "learning_rate": 9.926164948294825e-05, "loss": 2.523530387878418, "memory(GiB)": 67.19, "step": 6395, "token_acc": 0.483271375464684, "train_speed(iter/s)": 0.671051 }, { "epoch": 0.27419562143867016, "grad_norm": 4.021646499633789, "learning_rate": 9.926049677153097e-05, "loss": 2.6054241180419924, "memory(GiB)": 67.19, "step": 6400, "token_acc": 0.4626865671641791, "train_speed(iter/s)": 0.671141 }, { "epoch": 0.27440983676791914, "grad_norm": 2.891818046569824, "learning_rate": 9.925934316771357e-05, "loss": 2.442857551574707, "memory(GiB)": 67.19, "step": 6405, "token_acc": 0.48717948717948717, "train_speed(iter/s)": 0.671238 }, { "epoch": 0.27462405209716806, "grad_norm": 3.0829896926879883, "learning_rate": 9.925818867151697e-05, "loss": 2.5162397384643556, "memory(GiB)": 67.19, "step": 6410, "token_acc": 0.4720496894409938, "train_speed(iter/s)": 0.671318 }, { "epoch": 0.27483826742641704, "grad_norm": 3.7627644538879395, "learning_rate": 9.925703328296205e-05, "loss": 2.2796072006225585, "memory(GiB)": 67.19, "step": 6415, "token_acc": 0.4921875, "train_speed(iter/s)": 0.671239 }, { "epoch": 0.275052482755666, "grad_norm": 3.4618959426879883, "learning_rate": 9.92558770020698e-05, "loss": 2.6606863021850584, "memory(GiB)": 67.19, "step": 6420, "token_acc": 0.43214285714285716, "train_speed(iter/s)": 0.671279 }, { "epoch": 0.27526669808491494, "grad_norm": 2.614047050476074, "learning_rate": 9.92547198288611e-05, "loss": 2.23645076751709, "memory(GiB)": 67.19, "step": 6425, "token_acc": 0.521875, "train_speed(iter/s)": 0.671316 }, { "epoch": 0.2754809134141639, "grad_norm": 2.9025990962982178, "learning_rate": 9.925356176335696e-05, "loss": 2.341520309448242, "memory(GiB)": 67.19, "step": 6430, "token_acc": 0.5252525252525253, "train_speed(iter/s)": 0.671319 }, { "epoch": 0.2756951287434129, "grad_norm": 2.9788014888763428, "learning_rate": 9.925240280557835e-05, "loss": 2.5566946029663087, "memory(GiB)": 67.19, "step": 6435, "token_acc": 0.5047021943573667, "train_speed(iter/s)": 0.671222 }, { "epoch": 0.2759093440726618, "grad_norm": 3.6458005905151367, "learning_rate": 9.925124295554627e-05, "loss": 2.4559221267700195, "memory(GiB)": 67.19, "step": 6440, "token_acc": 0.4794952681388013, "train_speed(iter/s)": 0.671235 }, { "epoch": 0.2761235594019108, "grad_norm": 3.059964179992676, "learning_rate": 9.925008221328169e-05, "loss": 2.447853851318359, "memory(GiB)": 67.19, "step": 6445, "token_acc": 0.48333333333333334, "train_speed(iter/s)": 0.671281 }, { "epoch": 0.2763377747311598, "grad_norm": 2.484778881072998, "learning_rate": 9.92489205788057e-05, "loss": 2.3540830612182617, "memory(GiB)": 67.19, "step": 6450, "token_acc": 0.49264705882352944, "train_speed(iter/s)": 0.67134 }, { "epoch": 0.2765519900604087, "grad_norm": 3.699434995651245, "learning_rate": 9.92477580521393e-05, "loss": 2.4021766662597654, "memory(GiB)": 67.19, "step": 6455, "token_acc": 0.47540983606557374, "train_speed(iter/s)": 0.671297 }, { "epoch": 0.2767662053896577, "grad_norm": 2.379530191421509, "learning_rate": 9.924659463330357e-05, "loss": 2.3009145736694334, "memory(GiB)": 67.19, "step": 6460, "token_acc": 0.5197132616487455, "train_speed(iter/s)": 0.67117 }, { "epoch": 0.27698042071890666, "grad_norm": 2.994231939315796, "learning_rate": 9.924543032231957e-05, "loss": 2.5209503173828125, "memory(GiB)": 67.19, "step": 6465, "token_acc": 0.492, "train_speed(iter/s)": 0.671248 }, { "epoch": 0.2771946360481556, "grad_norm": 3.9000296592712402, "learning_rate": 9.924426511920841e-05, "loss": 2.853534126281738, "memory(GiB)": 67.19, "step": 6470, "token_acc": 0.45075757575757575, "train_speed(iter/s)": 0.671352 }, { "epoch": 0.27740885137740456, "grad_norm": 3.548353672027588, "learning_rate": 9.924309902399121e-05, "loss": 2.423238754272461, "memory(GiB)": 67.19, "step": 6475, "token_acc": 0.4854014598540146, "train_speed(iter/s)": 0.671409 }, { "epoch": 0.27762306670665354, "grad_norm": 3.779263734817505, "learning_rate": 9.924193203668906e-05, "loss": 2.233998489379883, "memory(GiB)": 67.19, "step": 6480, "token_acc": 0.5096525096525096, "train_speed(iter/s)": 0.671261 }, { "epoch": 0.27783728203590247, "grad_norm": 3.333043336868286, "learning_rate": 9.92407641573231e-05, "loss": 2.418519973754883, "memory(GiB)": 67.19, "step": 6485, "token_acc": 0.4882943143812709, "train_speed(iter/s)": 0.671205 }, { "epoch": 0.27805149736515145, "grad_norm": 3.8956384658813477, "learning_rate": 9.923959538591454e-05, "loss": 2.2504987716674805, "memory(GiB)": 67.19, "step": 6490, "token_acc": 0.5267489711934157, "train_speed(iter/s)": 0.671215 }, { "epoch": 0.2782657126944004, "grad_norm": 2.885819911956787, "learning_rate": 9.92384257224845e-05, "loss": 2.2309322357177734, "memory(GiB)": 67.19, "step": 6495, "token_acc": 0.5311475409836065, "train_speed(iter/s)": 0.671291 }, { "epoch": 0.27847992802364935, "grad_norm": 3.580110549926758, "learning_rate": 9.92372551670542e-05, "loss": 2.4329208374023437, "memory(GiB)": 67.19, "step": 6500, "token_acc": 0.49834983498349833, "train_speed(iter/s)": 0.671316 }, { "epoch": 0.27847992802364935, "eval_loss": 2.1332826614379883, "eval_runtime": 17.3227, "eval_samples_per_second": 5.773, "eval_steps_per_second": 5.773, "eval_token_acc": 0.4780821917808219, "step": 6500 }, { "epoch": 0.2786941433528983, "grad_norm": 2.920062303543091, "learning_rate": 9.923608371964483e-05, "loss": 2.4092817306518555, "memory(GiB)": 67.19, "step": 6505, "token_acc": 0.47002854424357754, "train_speed(iter/s)": 0.669973 }, { "epoch": 0.2789083586821473, "grad_norm": 4.95303201675415, "learning_rate": 9.923491138027761e-05, "loss": 2.8816396713256838, "memory(GiB)": 67.19, "step": 6510, "token_acc": 0.436, "train_speed(iter/s)": 0.670066 }, { "epoch": 0.27912257401139623, "grad_norm": 2.584017038345337, "learning_rate": 9.923373814897379e-05, "loss": 2.530047607421875, "memory(GiB)": 67.19, "step": 6515, "token_acc": 0.4645161290322581, "train_speed(iter/s)": 0.670097 }, { "epoch": 0.2793367893406452, "grad_norm": 2.605271577835083, "learning_rate": 9.923256402575462e-05, "loss": 2.3762889862060548, "memory(GiB)": 67.19, "step": 6520, "token_acc": 0.5, "train_speed(iter/s)": 0.670126 }, { "epoch": 0.2795510046698942, "grad_norm": 5.620890140533447, "learning_rate": 9.923138901064138e-05, "loss": 2.6468746185302736, "memory(GiB)": 67.19, "step": 6525, "token_acc": 0.49375, "train_speed(iter/s)": 0.670213 }, { "epoch": 0.2797652199991431, "grad_norm": 3.9624850749969482, "learning_rate": 9.923021310365532e-05, "loss": 2.1906450271606444, "memory(GiB)": 67.19, "step": 6530, "token_acc": 0.509090909090909, "train_speed(iter/s)": 0.670295 }, { "epoch": 0.2799794353283921, "grad_norm": 2.7806620597839355, "learning_rate": 9.922903630481778e-05, "loss": 2.2542291641235352, "memory(GiB)": 67.19, "step": 6535, "token_acc": 0.5, "train_speed(iter/s)": 0.670327 }, { "epoch": 0.28019365065764107, "grad_norm": 3.0153584480285645, "learning_rate": 9.922785861415007e-05, "loss": 2.364406967163086, "memory(GiB)": 67.19, "step": 6540, "token_acc": 0.5190311418685121, "train_speed(iter/s)": 0.670113 }, { "epoch": 0.28040786598689005, "grad_norm": 3.948495626449585, "learning_rate": 9.922668003167351e-05, "loss": 2.4811578750610352, "memory(GiB)": 67.19, "step": 6545, "token_acc": 0.45733788395904434, "train_speed(iter/s)": 0.670204 }, { "epoch": 0.28062208131613897, "grad_norm": 4.9813432693481445, "learning_rate": 9.922550055740947e-05, "loss": 2.666337013244629, "memory(GiB)": 67.19, "step": 6550, "token_acc": 0.47157190635451507, "train_speed(iter/s)": 0.670153 }, { "epoch": 0.28083629664538795, "grad_norm": 3.1142938137054443, "learning_rate": 9.922432019137932e-05, "loss": 2.338174057006836, "memory(GiB)": 67.19, "step": 6555, "token_acc": 0.47509578544061304, "train_speed(iter/s)": 0.670213 }, { "epoch": 0.28105051197463693, "grad_norm": 2.9438583850860596, "learning_rate": 9.922313893360442e-05, "loss": 2.5705142974853517, "memory(GiB)": 67.19, "step": 6560, "token_acc": 0.48923076923076925, "train_speed(iter/s)": 0.670265 }, { "epoch": 0.28126472730388585, "grad_norm": 3.329268455505371, "learning_rate": 9.92219567841062e-05, "loss": 2.6900564193725587, "memory(GiB)": 67.19, "step": 6565, "token_acc": 0.4246575342465753, "train_speed(iter/s)": 0.670293 }, { "epoch": 0.28147894263313483, "grad_norm": 2.7769222259521484, "learning_rate": 9.922077374290604e-05, "loss": 2.7440265655517577, "memory(GiB)": 67.19, "step": 6570, "token_acc": 0.4468864468864469, "train_speed(iter/s)": 0.67023 }, { "epoch": 0.2816931579623838, "grad_norm": 3.3156254291534424, "learning_rate": 9.921958981002539e-05, "loss": 2.598683738708496, "memory(GiB)": 67.19, "step": 6575, "token_acc": 0.47194719471947194, "train_speed(iter/s)": 0.670189 }, { "epoch": 0.28190737329163273, "grad_norm": 3.191049098968506, "learning_rate": 9.921840498548571e-05, "loss": 2.207708740234375, "memory(GiB)": 67.19, "step": 6580, "token_acc": 0.4981684981684982, "train_speed(iter/s)": 0.670163 }, { "epoch": 0.2821215886208817, "grad_norm": 3.9691989421844482, "learning_rate": 9.921721926930845e-05, "loss": 2.4970945358276366, "memory(GiB)": 67.19, "step": 6585, "token_acc": 0.4808362369337979, "train_speed(iter/s)": 0.670267 }, { "epoch": 0.2823358039501307, "grad_norm": 3.787238121032715, "learning_rate": 9.921603266151509e-05, "loss": 2.3811182022094726, "memory(GiB)": 67.19, "step": 6590, "token_acc": 0.5089605734767025, "train_speed(iter/s)": 0.670203 }, { "epoch": 0.2825500192793796, "grad_norm": 3.394822359085083, "learning_rate": 9.921484516212713e-05, "loss": 2.275802230834961, "memory(GiB)": 67.19, "step": 6595, "token_acc": 0.475, "train_speed(iter/s)": 0.670259 }, { "epoch": 0.2827642346086286, "grad_norm": 3.2803292274475098, "learning_rate": 9.92136567711661e-05, "loss": 2.2597427368164062, "memory(GiB)": 67.19, "step": 6600, "token_acc": 0.5265151515151515, "train_speed(iter/s)": 0.67003 }, { "epoch": 0.2829784499378776, "grad_norm": 2.515254259109497, "learning_rate": 9.92124674886535e-05, "loss": 2.728364181518555, "memory(GiB)": 67.19, "step": 6605, "token_acc": 0.44482758620689655, "train_speed(iter/s)": 0.670134 }, { "epoch": 0.2831926652671265, "grad_norm": 3.0427942276000977, "learning_rate": 9.921127731461089e-05, "loss": 2.5692312240600588, "memory(GiB)": 67.19, "step": 6610, "token_acc": 0.5194805194805194, "train_speed(iter/s)": 0.670083 }, { "epoch": 0.2834068805963755, "grad_norm": 3.23686146736145, "learning_rate": 9.921008624905981e-05, "loss": 2.5018978118896484, "memory(GiB)": 67.19, "step": 6615, "token_acc": 0.46864686468646866, "train_speed(iter/s)": 0.670054 }, { "epoch": 0.28362109592562446, "grad_norm": 2.7456471920013428, "learning_rate": 9.920889429202187e-05, "loss": 2.364276885986328, "memory(GiB)": 67.19, "step": 6620, "token_acc": 0.49666666666666665, "train_speed(iter/s)": 0.670046 }, { "epoch": 0.2838353112548734, "grad_norm": 3.906085968017578, "learning_rate": 9.920770144351863e-05, "loss": 2.2782384872436525, "memory(GiB)": 67.19, "step": 6625, "token_acc": 0.4483985765124555, "train_speed(iter/s)": 0.670011 }, { "epoch": 0.28404952658412236, "grad_norm": 3.2476227283477783, "learning_rate": 9.920650770357174e-05, "loss": 2.2528228759765625, "memory(GiB)": 67.19, "step": 6630, "token_acc": 0.527972027972028, "train_speed(iter/s)": 0.670153 }, { "epoch": 0.28426374191337134, "grad_norm": 4.146799564361572, "learning_rate": 9.92053130722028e-05, "loss": 2.345270347595215, "memory(GiB)": 67.19, "step": 6635, "token_acc": 0.5, "train_speed(iter/s)": 0.670265 }, { "epoch": 0.28447795724262026, "grad_norm": 2.8446097373962402, "learning_rate": 9.920411754943344e-05, "loss": 2.4751943588256835, "memory(GiB)": 67.19, "step": 6640, "token_acc": 0.4812286689419795, "train_speed(iter/s)": 0.670263 }, { "epoch": 0.28469217257186924, "grad_norm": 2.8367233276367188, "learning_rate": 9.920292113528535e-05, "loss": 2.3251014709472657, "memory(GiB)": 67.19, "step": 6645, "token_acc": 0.5163934426229508, "train_speed(iter/s)": 0.670253 }, { "epoch": 0.2849063879011182, "grad_norm": 3.344069480895996, "learning_rate": 9.920172382978019e-05, "loss": 2.4065895080566406, "memory(GiB)": 67.19, "step": 6650, "token_acc": 0.5018181818181818, "train_speed(iter/s)": 0.670235 }, { "epoch": 0.28512060323036714, "grad_norm": 3.293288230895996, "learning_rate": 9.920052563293964e-05, "loss": 2.1727882385253907, "memory(GiB)": 67.19, "step": 6655, "token_acc": 0.5133079847908745, "train_speed(iter/s)": 0.670308 }, { "epoch": 0.2853348185596161, "grad_norm": 3.581721544265747, "learning_rate": 9.91993265447854e-05, "loss": 2.6540048599243162, "memory(GiB)": 67.19, "step": 6660, "token_acc": 0.46715328467153283, "train_speed(iter/s)": 0.670371 }, { "epoch": 0.2855490338888651, "grad_norm": 2.522601842880249, "learning_rate": 9.919812656533922e-05, "loss": 2.518966484069824, "memory(GiB)": 67.19, "step": 6665, "token_acc": 0.46853146853146854, "train_speed(iter/s)": 0.670249 }, { "epoch": 0.285763249218114, "grad_norm": 3.0716075897216797, "learning_rate": 9.919692569462283e-05, "loss": 2.426186752319336, "memory(GiB)": 67.19, "step": 6670, "token_acc": 0.48314606741573035, "train_speed(iter/s)": 0.670184 }, { "epoch": 0.285977464547363, "grad_norm": 4.478176593780518, "learning_rate": 9.919572393265798e-05, "loss": 2.787647247314453, "memory(GiB)": 67.19, "step": 6675, "token_acc": 0.46215139442231074, "train_speed(iter/s)": 0.670269 }, { "epoch": 0.286191679876612, "grad_norm": 5.1279449462890625, "learning_rate": 9.919452127946645e-05, "loss": 2.7297496795654297, "memory(GiB)": 67.19, "step": 6680, "token_acc": 0.43558282208588955, "train_speed(iter/s)": 0.670405 }, { "epoch": 0.2864058952058609, "grad_norm": 3.042219400405884, "learning_rate": 9.919331773507e-05, "loss": 2.458685874938965, "memory(GiB)": 67.19, "step": 6685, "token_acc": 0.48494983277591974, "train_speed(iter/s)": 0.670313 }, { "epoch": 0.2866201105351099, "grad_norm": 4.060826778411865, "learning_rate": 9.919211329949046e-05, "loss": 2.3756431579589843, "memory(GiB)": 67.19, "step": 6690, "token_acc": 0.54, "train_speed(iter/s)": 0.670261 }, { "epoch": 0.28683432586435886, "grad_norm": 3.140038251876831, "learning_rate": 9.919090797274963e-05, "loss": 2.4229846954345704, "memory(GiB)": 67.19, "step": 6695, "token_acc": 0.4722222222222222, "train_speed(iter/s)": 0.670328 }, { "epoch": 0.2870485411936078, "grad_norm": 3.7527496814727783, "learning_rate": 9.918970175486939e-05, "loss": 2.2477067947387694, "memory(GiB)": 67.19, "step": 6700, "token_acc": 0.50199203187251, "train_speed(iter/s)": 0.670399 }, { "epoch": 0.28726275652285677, "grad_norm": 4.748992919921875, "learning_rate": 9.918849464587152e-05, "loss": 2.806142807006836, "memory(GiB)": 67.19, "step": 6705, "token_acc": 0.4695121951219512, "train_speed(iter/s)": 0.670377 }, { "epoch": 0.28747697185210574, "grad_norm": 3.429586172103882, "learning_rate": 9.918728664577795e-05, "loss": 2.2130603790283203, "memory(GiB)": 67.19, "step": 6710, "token_acc": 0.5250836120401338, "train_speed(iter/s)": 0.670216 }, { "epoch": 0.2876911871813547, "grad_norm": 3.424736976623535, "learning_rate": 9.918607775461054e-05, "loss": 2.545282745361328, "memory(GiB)": 67.19, "step": 6715, "token_acc": 0.47249190938511326, "train_speed(iter/s)": 0.670209 }, { "epoch": 0.28790540251060365, "grad_norm": 3.4023218154907227, "learning_rate": 9.918486797239119e-05, "loss": 2.5125009536743166, "memory(GiB)": 67.19, "step": 6720, "token_acc": 0.475, "train_speed(iter/s)": 0.67021 }, { "epoch": 0.2881196178398526, "grad_norm": 3.273606061935425, "learning_rate": 9.918365729914181e-05, "loss": 2.454229164123535, "memory(GiB)": 67.19, "step": 6725, "token_acc": 0.519298245614035, "train_speed(iter/s)": 0.670195 }, { "epoch": 0.2883338331691016, "grad_norm": 4.31986665725708, "learning_rate": 9.918268811901544e-05, "loss": 2.4272117614746094, "memory(GiB)": 67.19, "step": 6730, "token_acc": 0.4950166112956811, "train_speed(iter/s)": 0.670273 }, { "epoch": 0.28854804849835053, "grad_norm": 3.52988338470459, "learning_rate": 9.91814758419673e-05, "loss": 2.4125633239746094, "memory(GiB)": 67.19, "step": 6735, "token_acc": 0.4652777777777778, "train_speed(iter/s)": 0.670218 }, { "epoch": 0.2887622638275995, "grad_norm": 3.1080565452575684, "learning_rate": 9.918026267395059e-05, "loss": 2.2443700790405274, "memory(GiB)": 67.19, "step": 6740, "token_acc": 0.49577464788732395, "train_speed(iter/s)": 0.670126 }, { "epoch": 0.2889764791568485, "grad_norm": 3.051196813583374, "learning_rate": 9.917904861498729e-05, "loss": 2.4140998840332033, "memory(GiB)": 67.19, "step": 6745, "token_acc": 0.4671280276816609, "train_speed(iter/s)": 0.670121 }, { "epoch": 0.2891906944860974, "grad_norm": 2.6533422470092773, "learning_rate": 9.91778336650994e-05, "loss": 2.483243942260742, "memory(GiB)": 67.19, "step": 6750, "token_acc": 0.4858156028368794, "train_speed(iter/s)": 0.670185 }, { "epoch": 0.2894049098153464, "grad_norm": 4.004144191741943, "learning_rate": 9.917661782430891e-05, "loss": 2.3402841567993162, "memory(GiB)": 67.19, "step": 6755, "token_acc": 0.4883720930232558, "train_speed(iter/s)": 0.670302 }, { "epoch": 0.28961912514459537, "grad_norm": 2.587442398071289, "learning_rate": 9.917540109263785e-05, "loss": 2.266551971435547, "memory(GiB)": 67.19, "step": 6760, "token_acc": 0.4894894894894895, "train_speed(iter/s)": 0.670342 }, { "epoch": 0.2898333404738443, "grad_norm": 2.7115397453308105, "learning_rate": 9.917418347010828e-05, "loss": 2.283537673950195, "memory(GiB)": 67.19, "step": 6765, "token_acc": 0.5139318885448917, "train_speed(iter/s)": 0.670325 }, { "epoch": 0.29004755580309327, "grad_norm": 3.3596560955047607, "learning_rate": 9.917296495674224e-05, "loss": 2.37354736328125, "memory(GiB)": 67.19, "step": 6770, "token_acc": 0.4525316455696203, "train_speed(iter/s)": 0.670422 }, { "epoch": 0.29026177113234225, "grad_norm": 4.666598320007324, "learning_rate": 9.917174555256183e-05, "loss": 2.4234710693359376, "memory(GiB)": 67.19, "step": 6775, "token_acc": 0.45588235294117646, "train_speed(iter/s)": 0.670501 }, { "epoch": 0.2904759864615912, "grad_norm": 3.612027168273926, "learning_rate": 9.91705252575891e-05, "loss": 2.288817596435547, "memory(GiB)": 67.19, "step": 6780, "token_acc": 0.5182481751824818, "train_speed(iter/s)": 0.670607 }, { "epoch": 0.29069020179084015, "grad_norm": 4.145589828491211, "learning_rate": 9.916930407184619e-05, "loss": 2.208659362792969, "memory(GiB)": 67.19, "step": 6785, "token_acc": 0.5168918918918919, "train_speed(iter/s)": 0.670619 }, { "epoch": 0.29090441712008913, "grad_norm": 3.2951595783233643, "learning_rate": 9.91680819953552e-05, "loss": 2.162725067138672, "memory(GiB)": 67.19, "step": 6790, "token_acc": 0.5, "train_speed(iter/s)": 0.67061 }, { "epoch": 0.29111863244933806, "grad_norm": 3.310253620147705, "learning_rate": 9.916685902813831e-05, "loss": 2.565390968322754, "memory(GiB)": 67.19, "step": 6795, "token_acc": 0.46308724832214765, "train_speed(iter/s)": 0.670543 }, { "epoch": 0.29133284777858703, "grad_norm": 2.921598434448242, "learning_rate": 9.916563517021761e-05, "loss": 2.3334192276000976, "memory(GiB)": 67.19, "step": 6800, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.670535 }, { "epoch": 0.291547063107836, "grad_norm": 4.096884727478027, "learning_rate": 9.916441042161536e-05, "loss": 2.098414993286133, "memory(GiB)": 67.19, "step": 6805, "token_acc": 0.5, "train_speed(iter/s)": 0.670411 }, { "epoch": 0.29176127843708494, "grad_norm": 4.022857666015625, "learning_rate": 9.916318478235364e-05, "loss": 2.3314035415649412, "memory(GiB)": 67.19, "step": 6810, "token_acc": 0.48375451263537905, "train_speed(iter/s)": 0.670413 }, { "epoch": 0.2919754937663339, "grad_norm": 3.296471357345581, "learning_rate": 9.916195825245473e-05, "loss": 2.519729232788086, "memory(GiB)": 67.19, "step": 6815, "token_acc": 0.4511784511784512, "train_speed(iter/s)": 0.670495 }, { "epoch": 0.2921897090955829, "grad_norm": 2.683727741241455, "learning_rate": 9.916073083194084e-05, "loss": 2.3619367599487306, "memory(GiB)": 67.19, "step": 6820, "token_acc": 0.5203252032520326, "train_speed(iter/s)": 0.670551 }, { "epoch": 0.2924039244248318, "grad_norm": 3.2527060508728027, "learning_rate": 9.915950252083418e-05, "loss": 2.7691862106323244, "memory(GiB)": 67.19, "step": 6825, "token_acc": 0.445993031358885, "train_speed(iter/s)": 0.670639 }, { "epoch": 0.2926181397540808, "grad_norm": 2.8739094734191895, "learning_rate": 9.915827331915701e-05, "loss": 2.5261035919189454, "memory(GiB)": 67.19, "step": 6830, "token_acc": 0.45993031358885017, "train_speed(iter/s)": 0.6706 }, { "epoch": 0.2928323550833298, "grad_norm": 3.013627052307129, "learning_rate": 9.915704322693162e-05, "loss": 2.3504507064819338, "memory(GiB)": 67.19, "step": 6835, "token_acc": 0.5340136054421769, "train_speed(iter/s)": 0.670737 }, { "epoch": 0.2930465704125787, "grad_norm": 4.116211414337158, "learning_rate": 9.915581224418028e-05, "loss": 2.2292417526245116, "memory(GiB)": 67.19, "step": 6840, "token_acc": 0.5236363636363637, "train_speed(iter/s)": 0.670614 }, { "epoch": 0.2932607857418277, "grad_norm": 3.072404146194458, "learning_rate": 9.915458037092529e-05, "loss": 2.5297618865966798, "memory(GiB)": 67.19, "step": 6845, "token_acc": 0.476038338658147, "train_speed(iter/s)": 0.670673 }, { "epoch": 0.29347500107107666, "grad_norm": 4.236630439758301, "learning_rate": 9.915334760718895e-05, "loss": 2.2322486877441405, "memory(GiB)": 67.19, "step": 6850, "token_acc": 0.5126353790613718, "train_speed(iter/s)": 0.670692 }, { "epoch": 0.2936892164003256, "grad_norm": 3.858764171600342, "learning_rate": 9.915211395299362e-05, "loss": 2.481222152709961, "memory(GiB)": 67.19, "step": 6855, "token_acc": 0.5503597122302158, "train_speed(iter/s)": 0.670624 }, { "epoch": 0.29390343172957456, "grad_norm": 2.883390426635742, "learning_rate": 9.915087940836163e-05, "loss": 2.2945510864257814, "memory(GiB)": 67.19, "step": 6860, "token_acc": 0.5, "train_speed(iter/s)": 0.670644 }, { "epoch": 0.29411764705882354, "grad_norm": 3.0342118740081787, "learning_rate": 9.914964397331537e-05, "loss": 2.334218406677246, "memory(GiB)": 67.19, "step": 6865, "token_acc": 0.4813664596273292, "train_speed(iter/s)": 0.670684 }, { "epoch": 0.2943318623880725, "grad_norm": 3.6026065349578857, "learning_rate": 9.914840764787719e-05, "loss": 2.7833351135253905, "memory(GiB)": 67.19, "step": 6870, "token_acc": 0.44518272425249167, "train_speed(iter/s)": 0.670777 }, { "epoch": 0.29454607771732144, "grad_norm": 3.062800168991089, "learning_rate": 9.914717043206949e-05, "loss": 2.3819564819335937, "memory(GiB)": 67.19, "step": 6875, "token_acc": 0.4948453608247423, "train_speed(iter/s)": 0.670774 }, { "epoch": 0.2947602930465704, "grad_norm": 6.184975624084473, "learning_rate": 9.91459323259147e-05, "loss": 2.7061721801757814, "memory(GiB)": 67.19, "step": 6880, "token_acc": 0.44573643410852715, "train_speed(iter/s)": 0.670836 }, { "epoch": 0.2949745083758194, "grad_norm": 3.268098831176758, "learning_rate": 9.914469332943526e-05, "loss": 2.5459129333496096, "memory(GiB)": 67.19, "step": 6885, "token_acc": 0.47419354838709676, "train_speed(iter/s)": 0.670855 }, { "epoch": 0.2951887237050683, "grad_norm": 2.8003385066986084, "learning_rate": 9.914345344265359e-05, "loss": 2.412988471984863, "memory(GiB)": 67.19, "step": 6890, "token_acc": 0.48148148148148145, "train_speed(iter/s)": 0.670921 }, { "epoch": 0.2954029390343173, "grad_norm": 3.133434772491455, "learning_rate": 9.914221266559215e-05, "loss": 2.34747200012207, "memory(GiB)": 67.19, "step": 6895, "token_acc": 0.5208333333333334, "train_speed(iter/s)": 0.671034 }, { "epoch": 0.2956171543635663, "grad_norm": 3.116382122039795, "learning_rate": 9.914097099827345e-05, "loss": 2.4815036773681642, "memory(GiB)": 67.19, "step": 6900, "token_acc": 0.47384615384615386, "train_speed(iter/s)": 0.671076 }, { "epoch": 0.2958313696928152, "grad_norm": 3.297598361968994, "learning_rate": 9.913972844071992e-05, "loss": 2.4146432876586914, "memory(GiB)": 67.19, "step": 6905, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.671084 }, { "epoch": 0.2960455850220642, "grad_norm": 3.4180123805999756, "learning_rate": 9.913848499295415e-05, "loss": 2.6431838989257814, "memory(GiB)": 67.19, "step": 6910, "token_acc": 0.4391691394658754, "train_speed(iter/s)": 0.671101 }, { "epoch": 0.29625980035131316, "grad_norm": 2.597280740737915, "learning_rate": 9.913724065499862e-05, "loss": 2.3134889602661133, "memory(GiB)": 67.19, "step": 6915, "token_acc": 0.5268456375838926, "train_speed(iter/s)": 0.670948 }, { "epoch": 0.2964740156805621, "grad_norm": 2.9564208984375, "learning_rate": 9.913624454371273e-05, "loss": 2.7695072174072264, "memory(GiB)": 67.19, "step": 6920, "token_acc": 0.44932432432432434, "train_speed(iter/s)": 0.670936 }, { "epoch": 0.29668823100981107, "grad_norm": 3.1329848766326904, "learning_rate": 9.913499860347246e-05, "loss": 2.485313606262207, "memory(GiB)": 67.19, "step": 6925, "token_acc": 0.4725274725274725, "train_speed(iter/s)": 0.670918 }, { "epoch": 0.29690244633906004, "grad_norm": 3.726516008377075, "learning_rate": 9.91337517731056e-05, "loss": 2.3873992919921876, "memory(GiB)": 67.19, "step": 6930, "token_acc": 0.5042016806722689, "train_speed(iter/s)": 0.670986 }, { "epoch": 0.29711666166830897, "grad_norm": 3.8126220703125, "learning_rate": 9.913250405263474e-05, "loss": 2.5157522201538085, "memory(GiB)": 67.19, "step": 6935, "token_acc": 0.4246575342465753, "train_speed(iter/s)": 0.670869 }, { "epoch": 0.29733087699755795, "grad_norm": 2.283294916152954, "learning_rate": 9.913125544208248e-05, "loss": 2.5165050506591795, "memory(GiB)": 67.19, "step": 6940, "token_acc": 0.4553072625698324, "train_speed(iter/s)": 0.670869 }, { "epoch": 0.2975450923268069, "grad_norm": 2.8899455070495605, "learning_rate": 9.913000594147144e-05, "loss": 2.5611406326293946, "memory(GiB)": 67.19, "step": 6945, "token_acc": 0.46105919003115264, "train_speed(iter/s)": 0.670924 }, { "epoch": 0.29775930765605585, "grad_norm": 3.0144400596618652, "learning_rate": 9.912875555082425e-05, "loss": 2.7340061187744142, "memory(GiB)": 67.19, "step": 6950, "token_acc": 0.44868035190615835, "train_speed(iter/s)": 0.670826 }, { "epoch": 0.29797352298530483, "grad_norm": 3.028301954269409, "learning_rate": 9.912750427016356e-05, "loss": 2.5597633361816405, "memory(GiB)": 67.19, "step": 6955, "token_acc": 0.4461538461538462, "train_speed(iter/s)": 0.670865 }, { "epoch": 0.2981877383145538, "grad_norm": 3.183939218521118, "learning_rate": 9.912625209951206e-05, "loss": 2.3630428314208984, "memory(GiB)": 67.19, "step": 6960, "token_acc": 0.486013986013986, "train_speed(iter/s)": 0.670901 }, { "epoch": 0.29840195364380273, "grad_norm": 2.5386645793914795, "learning_rate": 9.91249990388924e-05, "loss": 2.1736286163330076, "memory(GiB)": 67.19, "step": 6965, "token_acc": 0.4965277777777778, "train_speed(iter/s)": 0.670855 }, { "epoch": 0.2986161689730517, "grad_norm": 4.65431022644043, "learning_rate": 9.912374508832732e-05, "loss": 2.4352893829345703, "memory(GiB)": 67.19, "step": 6970, "token_acc": 0.47183098591549294, "train_speed(iter/s)": 0.670901 }, { "epoch": 0.2988303843023007, "grad_norm": 2.95918607711792, "learning_rate": 9.912249024783951e-05, "loss": 2.543866729736328, "memory(GiB)": 67.19, "step": 6975, "token_acc": 0.5249169435215947, "train_speed(iter/s)": 0.670978 }, { "epoch": 0.2990445996315496, "grad_norm": 3.2675411701202393, "learning_rate": 9.91212345174517e-05, "loss": 2.3380205154418947, "memory(GiB)": 67.19, "step": 6980, "token_acc": 0.46258503401360546, "train_speed(iter/s)": 0.670899 }, { "epoch": 0.2992588149607986, "grad_norm": 6.563283920288086, "learning_rate": 9.911997789718666e-05, "loss": 2.3654958724975588, "memory(GiB)": 67.19, "step": 6985, "token_acc": 0.5, "train_speed(iter/s)": 0.670962 }, { "epoch": 0.29947303029004757, "grad_norm": 2.9125683307647705, "learning_rate": 9.911872038706713e-05, "loss": 2.335659217834473, "memory(GiB)": 67.19, "step": 6990, "token_acc": 0.5, "train_speed(iter/s)": 0.671002 }, { "epoch": 0.2996872456192965, "grad_norm": 3.964625358581543, "learning_rate": 9.911746198711591e-05, "loss": 2.3326045989990236, "memory(GiB)": 67.19, "step": 6995, "token_acc": 0.5211726384364821, "train_speed(iter/s)": 0.670961 }, { "epoch": 0.2999014609485455, "grad_norm": 4.470602035522461, "learning_rate": 9.911620269735578e-05, "loss": 2.1036319732666016, "memory(GiB)": 67.19, "step": 7000, "token_acc": 0.5677655677655677, "train_speed(iter/s)": 0.670738 }, { "epoch": 0.2999014609485455, "eval_loss": 2.033358573913574, "eval_runtime": 16.7045, "eval_samples_per_second": 5.986, "eval_steps_per_second": 5.986, "eval_token_acc": 0.4918032786885246, "step": 7000 }, { "epoch": 0.30011567627779445, "grad_norm": 2.6826415061950684, "learning_rate": 9.911494251780957e-05, "loss": 2.5160987854003904, "memory(GiB)": 67.19, "step": 7005, "token_acc": 0.49107981220657276, "train_speed(iter/s)": 0.669501 }, { "epoch": 0.3003298916070434, "grad_norm": 3.352229595184326, "learning_rate": 9.911368144850011e-05, "loss": 2.5109615325927734, "memory(GiB)": 67.19, "step": 7010, "token_acc": 0.4876325088339223, "train_speed(iter/s)": 0.669462 }, { "epoch": 0.30054410693629235, "grad_norm": 2.8053078651428223, "learning_rate": 9.911241948945022e-05, "loss": 2.26534366607666, "memory(GiB)": 67.19, "step": 7015, "token_acc": 0.5367647058823529, "train_speed(iter/s)": 0.669475 }, { "epoch": 0.30075832226554133, "grad_norm": 3.265393018722534, "learning_rate": 9.91111566406828e-05, "loss": 2.796454429626465, "memory(GiB)": 67.19, "step": 7020, "token_acc": 0.46474358974358976, "train_speed(iter/s)": 0.669416 }, { "epoch": 0.30097253759479026, "grad_norm": 3.7021243572235107, "learning_rate": 9.91098929022207e-05, "loss": 2.4036380767822267, "memory(GiB)": 67.19, "step": 7025, "token_acc": 0.4626865671641791, "train_speed(iter/s)": 0.669309 }, { "epoch": 0.30118675292403924, "grad_norm": 3.311532497406006, "learning_rate": 9.910862827408682e-05, "loss": 2.5302433013916015, "memory(GiB)": 67.19, "step": 7030, "token_acc": 0.47384615384615386, "train_speed(iter/s)": 0.669355 }, { "epoch": 0.3014009682532882, "grad_norm": 3.666045904159546, "learning_rate": 9.910736275630408e-05, "loss": 2.3162635803222655, "memory(GiB)": 67.19, "step": 7035, "token_acc": 0.5055762081784386, "train_speed(iter/s)": 0.669402 }, { "epoch": 0.3016151835825372, "grad_norm": 3.4124722480773926, "learning_rate": 9.910609634889538e-05, "loss": 2.9133934020996093, "memory(GiB)": 67.19, "step": 7040, "token_acc": 0.4054982817869416, "train_speed(iter/s)": 0.669458 }, { "epoch": 0.3018293989117861, "grad_norm": 3.1408746242523193, "learning_rate": 9.91048290518837e-05, "loss": 2.6678754806518556, "memory(GiB)": 67.19, "step": 7045, "token_acc": 0.4691780821917808, "train_speed(iter/s)": 0.669423 }, { "epoch": 0.3020436142410351, "grad_norm": 2.8260726928710938, "learning_rate": 9.910356086529196e-05, "loss": 2.6880874633789062, "memory(GiB)": 67.19, "step": 7050, "token_acc": 0.4931972789115646, "train_speed(iter/s)": 0.669518 }, { "epoch": 0.3022578295702841, "grad_norm": 3.4117326736450195, "learning_rate": 9.910229178914317e-05, "loss": 2.4889015197753905, "memory(GiB)": 67.19, "step": 7055, "token_acc": 0.45787545787545786, "train_speed(iter/s)": 0.66945 }, { "epoch": 0.302472044899533, "grad_norm": 3.9196245670318604, "learning_rate": 9.910102182346029e-05, "loss": 2.2049480438232423, "memory(GiB)": 67.19, "step": 7060, "token_acc": 0.46887966804979253, "train_speed(iter/s)": 0.669384 }, { "epoch": 0.302686260228782, "grad_norm": 3.130417823791504, "learning_rate": 9.909975096826634e-05, "loss": 2.4074729919433593, "memory(GiB)": 67.19, "step": 7065, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.669414 }, { "epoch": 0.30290047555803096, "grad_norm": 2.8544399738311768, "learning_rate": 9.909847922358432e-05, "loss": 2.540211868286133, "memory(GiB)": 67.19, "step": 7070, "token_acc": 0.48242811501597443, "train_speed(iter/s)": 0.669419 }, { "epoch": 0.3031146908872799, "grad_norm": 2.832824945449829, "learning_rate": 9.909720658943733e-05, "loss": 2.636820602416992, "memory(GiB)": 67.19, "step": 7075, "token_acc": 0.4954128440366973, "train_speed(iter/s)": 0.669372 }, { "epoch": 0.30332890621652886, "grad_norm": 3.1478168964385986, "learning_rate": 9.909593306584837e-05, "loss": 2.224959945678711, "memory(GiB)": 67.19, "step": 7080, "token_acc": 0.5379061371841155, "train_speed(iter/s)": 0.669491 }, { "epoch": 0.30354312154577784, "grad_norm": 2.579474449157715, "learning_rate": 9.909465865284052e-05, "loss": 2.4127967834472654, "memory(GiB)": 67.19, "step": 7085, "token_acc": 0.4657534246575342, "train_speed(iter/s)": 0.669553 }, { "epoch": 0.30375733687502676, "grad_norm": 2.57029128074646, "learning_rate": 9.909338335043688e-05, "loss": 2.5218538284301757, "memory(GiB)": 67.19, "step": 7090, "token_acc": 0.5036764705882353, "train_speed(iter/s)": 0.669632 }, { "epoch": 0.30397155220427574, "grad_norm": 4.0566606521606445, "learning_rate": 9.909210715866055e-05, "loss": 2.598582077026367, "memory(GiB)": 67.19, "step": 7095, "token_acc": 0.4507042253521127, "train_speed(iter/s)": 0.669545 }, { "epoch": 0.3041857675335247, "grad_norm": 3.520559787750244, "learning_rate": 9.909083007753464e-05, "loss": 2.5786819458007812, "memory(GiB)": 67.19, "step": 7100, "token_acc": 0.45918367346938777, "train_speed(iter/s)": 0.669404 }, { "epoch": 0.30439998286277364, "grad_norm": 2.4375321865081787, "learning_rate": 9.90895521070823e-05, "loss": 2.6758262634277346, "memory(GiB)": 67.19, "step": 7105, "token_acc": 0.44984802431610943, "train_speed(iter/s)": 0.669426 }, { "epoch": 0.3046141981920226, "grad_norm": 3.736503839492798, "learning_rate": 9.908827324732667e-05, "loss": 2.616665840148926, "memory(GiB)": 67.19, "step": 7110, "token_acc": 0.46178343949044587, "train_speed(iter/s)": 0.669475 }, { "epoch": 0.3048284135212716, "grad_norm": 4.126852035522461, "learning_rate": 9.908699349829091e-05, "loss": 2.2423542022705076, "memory(GiB)": 67.19, "step": 7115, "token_acc": 0.5195729537366548, "train_speed(iter/s)": 0.669458 }, { "epoch": 0.3050426288505205, "grad_norm": 4.113752365112305, "learning_rate": 9.908571285999824e-05, "loss": 2.610172080993652, "memory(GiB)": 67.19, "step": 7120, "token_acc": 0.4536082474226804, "train_speed(iter/s)": 0.669554 }, { "epoch": 0.3052568441797695, "grad_norm": 3.541069746017456, "learning_rate": 9.908443133247182e-05, "loss": 2.548431396484375, "memory(GiB)": 67.19, "step": 7125, "token_acc": 0.4668769716088328, "train_speed(iter/s)": 0.669504 }, { "epoch": 0.3054710595090185, "grad_norm": 3.123814344406128, "learning_rate": 9.908314891573489e-05, "loss": 2.5031496047973634, "memory(GiB)": 67.19, "step": 7130, "token_acc": 0.5106382978723404, "train_speed(iter/s)": 0.669568 }, { "epoch": 0.3056852748382674, "grad_norm": 2.811424493789673, "learning_rate": 9.908186560981066e-05, "loss": 2.6688262939453127, "memory(GiB)": 67.19, "step": 7135, "token_acc": 0.4452296819787986, "train_speed(iter/s)": 0.669584 }, { "epoch": 0.3058994901675164, "grad_norm": 3.272369623184204, "learning_rate": 9.908058141472239e-05, "loss": 2.4289731979370117, "memory(GiB)": 67.19, "step": 7140, "token_acc": 0.5042372881355932, "train_speed(iter/s)": 0.669634 }, { "epoch": 0.30611370549676536, "grad_norm": 4.186466217041016, "learning_rate": 9.907929633049336e-05, "loss": 2.499521827697754, "memory(GiB)": 67.19, "step": 7145, "token_acc": 0.4966442953020134, "train_speed(iter/s)": 0.669727 }, { "epoch": 0.3063279208260143, "grad_norm": 3.5716562271118164, "learning_rate": 9.907801035714684e-05, "loss": 2.5643733978271483, "memory(GiB)": 67.19, "step": 7150, "token_acc": 0.4608150470219436, "train_speed(iter/s)": 0.669707 }, { "epoch": 0.30654213615526327, "grad_norm": 3.551788806915283, "learning_rate": 9.907672349470612e-05, "loss": 2.6086002349853517, "memory(GiB)": 67.19, "step": 7155, "token_acc": 0.46864686468646866, "train_speed(iter/s)": 0.669632 }, { "epoch": 0.30675635148451225, "grad_norm": 2.956149101257324, "learning_rate": 9.90754357431945e-05, "loss": 2.3329021453857424, "memory(GiB)": 67.19, "step": 7160, "token_acc": 0.5265017667844523, "train_speed(iter/s)": 0.669692 }, { "epoch": 0.30697056681376117, "grad_norm": 3.3988404273986816, "learning_rate": 9.907414710263534e-05, "loss": 2.6363054275512696, "memory(GiB)": 67.19, "step": 7165, "token_acc": 0.4756554307116105, "train_speed(iter/s)": 0.66978 }, { "epoch": 0.30718478214301015, "grad_norm": 4.011634349822998, "learning_rate": 9.907285757305198e-05, "loss": 2.8551801681518554, "memory(GiB)": 67.19, "step": 7170, "token_acc": 0.4404432132963989, "train_speed(iter/s)": 0.669846 }, { "epoch": 0.30739899747225913, "grad_norm": 3.854325294494629, "learning_rate": 9.907156715446775e-05, "loss": 2.3869781494140625, "memory(GiB)": 67.19, "step": 7175, "token_acc": 0.46613545816733065, "train_speed(iter/s)": 0.669863 }, { "epoch": 0.30761321280150805, "grad_norm": 3.1368021965026855, "learning_rate": 9.907027584690605e-05, "loss": 2.761881637573242, "memory(GiB)": 67.19, "step": 7180, "token_acc": 0.44107744107744107, "train_speed(iter/s)": 0.669914 }, { "epoch": 0.30782742813075703, "grad_norm": 4.091172218322754, "learning_rate": 9.906898365039027e-05, "loss": 2.21445369720459, "memory(GiB)": 67.19, "step": 7185, "token_acc": 0.5372549019607843, "train_speed(iter/s)": 0.669975 }, { "epoch": 0.308041643460006, "grad_norm": 2.655813217163086, "learning_rate": 9.906769056494384e-05, "loss": 2.1572187423706053, "memory(GiB)": 67.19, "step": 7190, "token_acc": 0.5140845070422535, "train_speed(iter/s)": 0.670003 }, { "epoch": 0.30825585878925493, "grad_norm": 2.569230079650879, "learning_rate": 9.906639659059015e-05, "loss": 2.4179325103759766, "memory(GiB)": 67.19, "step": 7195, "token_acc": 0.46621621621621623, "train_speed(iter/s)": 0.670066 }, { "epoch": 0.3084700741185039, "grad_norm": 3.164658546447754, "learning_rate": 9.906510172735266e-05, "loss": 2.164547157287598, "memory(GiB)": 67.19, "step": 7200, "token_acc": 0.5145631067961165, "train_speed(iter/s)": 0.670111 }, { "epoch": 0.3086842894477529, "grad_norm": 3.2999770641326904, "learning_rate": 9.906380597525484e-05, "loss": 2.2644674301147463, "memory(GiB)": 67.19, "step": 7205, "token_acc": 0.4529616724738676, "train_speed(iter/s)": 0.670164 }, { "epoch": 0.30889850477700187, "grad_norm": 3.359179735183716, "learning_rate": 9.906250933432013e-05, "loss": 2.4085996627807615, "memory(GiB)": 67.19, "step": 7210, "token_acc": 0.45652173913043476, "train_speed(iter/s)": 0.670261 }, { "epoch": 0.3091127201062508, "grad_norm": 3.424672842025757, "learning_rate": 9.906121180457204e-05, "loss": 2.489736557006836, "memory(GiB)": 67.19, "step": 7215, "token_acc": 0.5193798449612403, "train_speed(iter/s)": 0.670196 }, { "epoch": 0.3093269354354998, "grad_norm": 4.659539699554443, "learning_rate": 9.905991338603409e-05, "loss": 2.790024185180664, "memory(GiB)": 67.19, "step": 7220, "token_acc": 0.4375, "train_speed(iter/s)": 0.670294 }, { "epoch": 0.30954115076474875, "grad_norm": 2.5066139698028564, "learning_rate": 9.905861407872977e-05, "loss": 2.4868356704711916, "memory(GiB)": 67.19, "step": 7225, "token_acc": 0.4873417721518987, "train_speed(iter/s)": 0.670378 }, { "epoch": 0.3097553660939977, "grad_norm": 3.2782788276672363, "learning_rate": 9.905731388268265e-05, "loss": 2.709016036987305, "memory(GiB)": 67.19, "step": 7230, "token_acc": 0.45, "train_speed(iter/s)": 0.670301 }, { "epoch": 0.30996958142324665, "grad_norm": 3.1090643405914307, "learning_rate": 9.905601279791626e-05, "loss": 2.4162521362304688, "memory(GiB)": 67.19, "step": 7235, "token_acc": 0.46511627906976744, "train_speed(iter/s)": 0.670259 }, { "epoch": 0.31018379675249563, "grad_norm": 3.1300554275512695, "learning_rate": 9.905471082445419e-05, "loss": 2.4192245483398436, "memory(GiB)": 67.19, "step": 7240, "token_acc": 0.510548523206751, "train_speed(iter/s)": 0.67025 }, { "epoch": 0.31039801208174456, "grad_norm": 3.4237983226776123, "learning_rate": 9.905340796232e-05, "loss": 2.446399116516113, "memory(GiB)": 67.19, "step": 7245, "token_acc": 0.47575757575757577, "train_speed(iter/s)": 0.670292 }, { "epoch": 0.31061222741099354, "grad_norm": 4.6566901206970215, "learning_rate": 9.905210421153732e-05, "loss": 2.2187782287597657, "memory(GiB)": 67.19, "step": 7250, "token_acc": 0.516245487364621, "train_speed(iter/s)": 0.670294 }, { "epoch": 0.3108264427402425, "grad_norm": 3.166969060897827, "learning_rate": 9.905079957212975e-05, "loss": 2.478166198730469, "memory(GiB)": 67.19, "step": 7255, "token_acc": 0.47333333333333333, "train_speed(iter/s)": 0.670393 }, { "epoch": 0.31104065806949144, "grad_norm": 2.8044095039367676, "learning_rate": 9.904949404412094e-05, "loss": 1.961330032348633, "memory(GiB)": 67.19, "step": 7260, "token_acc": 0.56640625, "train_speed(iter/s)": 0.670424 }, { "epoch": 0.3112548733987404, "grad_norm": 2.5872204303741455, "learning_rate": 9.904818762753454e-05, "loss": 2.4789203643798827, "memory(GiB)": 67.19, "step": 7265, "token_acc": 0.486404833836858, "train_speed(iter/s)": 0.670496 }, { "epoch": 0.3114690887279894, "grad_norm": 2.9022319316864014, "learning_rate": 9.904688032239419e-05, "loss": 2.4004196166992187, "memory(GiB)": 67.19, "step": 7270, "token_acc": 0.4879518072289157, "train_speed(iter/s)": 0.670476 }, { "epoch": 0.3116833040572383, "grad_norm": 3.2639713287353516, "learning_rate": 9.904557212872361e-05, "loss": 2.54599609375, "memory(GiB)": 67.19, "step": 7275, "token_acc": 0.4749034749034749, "train_speed(iter/s)": 0.670553 }, { "epoch": 0.3118975193864873, "grad_norm": 2.419386148452759, "learning_rate": 9.904426304654648e-05, "loss": 2.046927642822266, "memory(GiB)": 67.19, "step": 7280, "token_acc": 0.5304659498207885, "train_speed(iter/s)": 0.670612 }, { "epoch": 0.3121117347157363, "grad_norm": 3.6702795028686523, "learning_rate": 9.904295307588651e-05, "loss": 2.206985282897949, "memory(GiB)": 67.19, "step": 7285, "token_acc": 0.5149253731343284, "train_speed(iter/s)": 0.670638 }, { "epoch": 0.3123259500449852, "grad_norm": 3.767210006713867, "learning_rate": 9.904164221676745e-05, "loss": 2.3558170318603517, "memory(GiB)": 67.19, "step": 7290, "token_acc": 0.5019011406844106, "train_speed(iter/s)": 0.670704 }, { "epoch": 0.3125401653742342, "grad_norm": 4.803961753845215, "learning_rate": 9.904033046921303e-05, "loss": 2.221796226501465, "memory(GiB)": 67.19, "step": 7295, "token_acc": 0.5220588235294118, "train_speed(iter/s)": 0.670692 }, { "epoch": 0.31275438070348316, "grad_norm": 4.208157062530518, "learning_rate": 9.903901783324702e-05, "loss": 2.4879798889160156, "memory(GiB)": 67.19, "step": 7300, "token_acc": 0.4713804713804714, "train_speed(iter/s)": 0.67082 }, { "epoch": 0.3129685960327321, "grad_norm": 4.070925235748291, "learning_rate": 9.90377043088932e-05, "loss": 2.3279258728027346, "memory(GiB)": 67.19, "step": 7305, "token_acc": 0.46311475409836067, "train_speed(iter/s)": 0.670822 }, { "epoch": 0.31318281136198106, "grad_norm": 3.6437387466430664, "learning_rate": 9.903638989617537e-05, "loss": 2.350263977050781, "memory(GiB)": 67.19, "step": 7310, "token_acc": 0.5560344827586207, "train_speed(iter/s)": 0.670799 }, { "epoch": 0.31339702669123004, "grad_norm": 4.176362037658691, "learning_rate": 9.903507459511733e-05, "loss": 2.5828678131103517, "memory(GiB)": 67.19, "step": 7315, "token_acc": 0.5032679738562091, "train_speed(iter/s)": 0.670878 }, { "epoch": 0.31361124202047896, "grad_norm": 5.083299160003662, "learning_rate": 9.903375840574291e-05, "loss": 2.3583532333374024, "memory(GiB)": 67.19, "step": 7320, "token_acc": 0.49691358024691357, "train_speed(iter/s)": 0.670958 }, { "epoch": 0.31382545734972794, "grad_norm": 4.049952030181885, "learning_rate": 9.903244132807597e-05, "loss": 2.422024154663086, "memory(GiB)": 67.19, "step": 7325, "token_acc": 0.4784172661870504, "train_speed(iter/s)": 0.670913 }, { "epoch": 0.3140396726789769, "grad_norm": 4.5283732414245605, "learning_rate": 9.903112336214035e-05, "loss": 2.7275859832763674, "memory(GiB)": 67.19, "step": 7330, "token_acc": 0.4217687074829932, "train_speed(iter/s)": 0.670997 }, { "epoch": 0.31425388800822585, "grad_norm": 3.6189143657684326, "learning_rate": 9.902980450795996e-05, "loss": 2.5850439071655273, "memory(GiB)": 67.19, "step": 7335, "token_acc": 0.425, "train_speed(iter/s)": 0.670971 }, { "epoch": 0.3144681033374748, "grad_norm": 2.746814489364624, "learning_rate": 9.902848476555864e-05, "loss": 2.180904579162598, "memory(GiB)": 67.19, "step": 7340, "token_acc": 0.5035714285714286, "train_speed(iter/s)": 0.670955 }, { "epoch": 0.3146823186667238, "grad_norm": 5.914935111999512, "learning_rate": 9.902716413496034e-05, "loss": 2.441696548461914, "memory(GiB)": 67.19, "step": 7345, "token_acc": 0.49458483754512633, "train_speed(iter/s)": 0.670969 }, { "epoch": 0.3148965339959727, "grad_norm": 3.8913350105285645, "learning_rate": 9.902584261618896e-05, "loss": 2.683534622192383, "memory(GiB)": 67.19, "step": 7350, "token_acc": 0.47651006711409394, "train_speed(iter/s)": 0.67104 }, { "epoch": 0.3151107493252217, "grad_norm": 2.7983133792877197, "learning_rate": 9.902452020926845e-05, "loss": 2.355704689025879, "memory(GiB)": 67.19, "step": 7355, "token_acc": 0.5, "train_speed(iter/s)": 0.671079 }, { "epoch": 0.3153249646544707, "grad_norm": 4.490823745727539, "learning_rate": 9.902319691422277e-05, "loss": 2.513704299926758, "memory(GiB)": 67.19, "step": 7360, "token_acc": 0.47419354838709676, "train_speed(iter/s)": 0.671124 }, { "epoch": 0.3155391799837196, "grad_norm": 3.435727119445801, "learning_rate": 9.902187273107591e-05, "loss": 2.5696733474731444, "memory(GiB)": 67.19, "step": 7365, "token_acc": 0.45454545454545453, "train_speed(iter/s)": 0.671155 }, { "epoch": 0.3157533953129686, "grad_norm": 2.6785948276519775, "learning_rate": 9.902054765985182e-05, "loss": 2.167158508300781, "memory(GiB)": 67.19, "step": 7370, "token_acc": 0.5071942446043165, "train_speed(iter/s)": 0.671145 }, { "epoch": 0.31596761064221757, "grad_norm": 3.0598134994506836, "learning_rate": 9.901922170057452e-05, "loss": 2.314278221130371, "memory(GiB)": 67.19, "step": 7375, "token_acc": 0.4856115107913669, "train_speed(iter/s)": 0.671237 }, { "epoch": 0.31618182597146655, "grad_norm": 3.6211743354797363, "learning_rate": 9.901789485326804e-05, "loss": 2.3622505187988283, "memory(GiB)": 67.19, "step": 7380, "token_acc": 0.4921135646687697, "train_speed(iter/s)": 0.671322 }, { "epoch": 0.31639604130071547, "grad_norm": 3.8881473541259766, "learning_rate": 9.901656711795641e-05, "loss": 2.646750259399414, "memory(GiB)": 67.19, "step": 7385, "token_acc": 0.4744744744744745, "train_speed(iter/s)": 0.671259 }, { "epoch": 0.31661025662996445, "grad_norm": 3.1443397998809814, "learning_rate": 9.90152384946637e-05, "loss": 2.7128501892089845, "memory(GiB)": 67.19, "step": 7390, "token_acc": 0.4339080459770115, "train_speed(iter/s)": 0.671252 }, { "epoch": 0.3168244719592134, "grad_norm": 3.592219829559326, "learning_rate": 9.901390898341397e-05, "loss": 2.646365928649902, "memory(GiB)": 67.19, "step": 7395, "token_acc": 0.48026315789473684, "train_speed(iter/s)": 0.671308 }, { "epoch": 0.31703868728846235, "grad_norm": 2.757309675216675, "learning_rate": 9.901257858423127e-05, "loss": 2.4971076965332033, "memory(GiB)": 67.19, "step": 7400, "token_acc": 0.4395973154362416, "train_speed(iter/s)": 0.671234 }, { "epoch": 0.31725290261771133, "grad_norm": 3.0087790489196777, "learning_rate": 9.901124729713975e-05, "loss": 2.670252799987793, "memory(GiB)": 67.19, "step": 7405, "token_acc": 0.44036697247706424, "train_speed(iter/s)": 0.671228 }, { "epoch": 0.3174671179469603, "grad_norm": 2.8662173748016357, "learning_rate": 9.900991512216351e-05, "loss": 2.379194641113281, "memory(GiB)": 67.19, "step": 7410, "token_acc": 0.501628664495114, "train_speed(iter/s)": 0.671265 }, { "epoch": 0.31768133327620923, "grad_norm": 4.0007004737854, "learning_rate": 9.900858205932668e-05, "loss": 2.5556177139282226, "memory(GiB)": 67.19, "step": 7415, "token_acc": 0.46551724137931033, "train_speed(iter/s)": 0.671284 }, { "epoch": 0.3178955486054582, "grad_norm": 2.7973594665527344, "learning_rate": 9.900724810865341e-05, "loss": 2.396091842651367, "memory(GiB)": 67.19, "step": 7420, "token_acc": 0.4902597402597403, "train_speed(iter/s)": 0.671282 }, { "epoch": 0.3181097639347072, "grad_norm": 3.3081114292144775, "learning_rate": 9.900591327016786e-05, "loss": 2.483126640319824, "memory(GiB)": 67.19, "step": 7425, "token_acc": 0.49206349206349204, "train_speed(iter/s)": 0.671267 }, { "epoch": 0.3183239792639561, "grad_norm": 2.9700000286102295, "learning_rate": 9.900457754389422e-05, "loss": 2.469610595703125, "memory(GiB)": 67.19, "step": 7430, "token_acc": 0.4984894259818731, "train_speed(iter/s)": 0.671209 }, { "epoch": 0.3185381945932051, "grad_norm": 3.3257575035095215, "learning_rate": 9.900324092985671e-05, "loss": 2.4569414138793944, "memory(GiB)": 67.19, "step": 7435, "token_acc": 0.4632352941176471, "train_speed(iter/s)": 0.671229 }, { "epoch": 0.31875240992245407, "grad_norm": 2.8589999675750732, "learning_rate": 9.900190342807951e-05, "loss": 2.34259033203125, "memory(GiB)": 67.19, "step": 7440, "token_acc": 0.49137931034482757, "train_speed(iter/s)": 0.671325 }, { "epoch": 0.318966625251703, "grad_norm": 3.9965453147888184, "learning_rate": 9.900056503858685e-05, "loss": 2.420806884765625, "memory(GiB)": 67.19, "step": 7445, "token_acc": 0.5140562248995983, "train_speed(iter/s)": 0.671377 }, { "epoch": 0.319180840580952, "grad_norm": 2.923738479614258, "learning_rate": 9.8999225761403e-05, "loss": 2.2898447036743166, "memory(GiB)": 67.19, "step": 7450, "token_acc": 0.5524475524475524, "train_speed(iter/s)": 0.671379 }, { "epoch": 0.31939505591020095, "grad_norm": 3.0846238136291504, "learning_rate": 9.899788559655221e-05, "loss": 2.6504730224609374, "memory(GiB)": 67.19, "step": 7455, "token_acc": 0.488135593220339, "train_speed(iter/s)": 0.671429 }, { "epoch": 0.3196092712394499, "grad_norm": 4.041626930236816, "learning_rate": 9.899654454405876e-05, "loss": 2.402034950256348, "memory(GiB)": 67.19, "step": 7460, "token_acc": 0.4982078853046595, "train_speed(iter/s)": 0.671512 }, { "epoch": 0.31982348656869886, "grad_norm": 4.410295009613037, "learning_rate": 9.899520260394695e-05, "loss": 2.4959793090820312, "memory(GiB)": 67.19, "step": 7465, "token_acc": 0.48253968253968255, "train_speed(iter/s)": 0.671628 }, { "epoch": 0.32003770189794783, "grad_norm": 4.616373538970947, "learning_rate": 9.899385977624107e-05, "loss": 2.6598434448242188, "memory(GiB)": 67.19, "step": 7470, "token_acc": 0.4637096774193548, "train_speed(iter/s)": 0.671627 }, { "epoch": 0.32025191722719676, "grad_norm": 2.9590632915496826, "learning_rate": 9.899251606096546e-05, "loss": 2.3807600021362303, "memory(GiB)": 67.19, "step": 7475, "token_acc": 0.450354609929078, "train_speed(iter/s)": 0.671661 }, { "epoch": 0.32046613255644574, "grad_norm": 2.9903788566589355, "learning_rate": 9.899117145814448e-05, "loss": 2.2958347320556642, "memory(GiB)": 67.19, "step": 7480, "token_acc": 0.49603174603174605, "train_speed(iter/s)": 0.671613 }, { "epoch": 0.3206803478856947, "grad_norm": 2.663029432296753, "learning_rate": 9.898982596780244e-05, "loss": 2.622235107421875, "memory(GiB)": 67.19, "step": 7485, "token_acc": 0.47468354430379744, "train_speed(iter/s)": 0.671621 }, { "epoch": 0.32089456321494364, "grad_norm": 2.8783061504364014, "learning_rate": 9.898847958996377e-05, "loss": 2.864510345458984, "memory(GiB)": 67.19, "step": 7490, "token_acc": 0.44857142857142857, "train_speed(iter/s)": 0.671689 }, { "epoch": 0.3211087785441926, "grad_norm": 3.8442561626434326, "learning_rate": 9.898713232465283e-05, "loss": 2.4017988204956056, "memory(GiB)": 67.19, "step": 7495, "token_acc": 0.48859934853420195, "train_speed(iter/s)": 0.671723 }, { "epoch": 0.3213229938734416, "grad_norm": 3.0953550338745117, "learning_rate": 9.898578417189403e-05, "loss": 2.4230777740478517, "memory(GiB)": 67.19, "step": 7500, "token_acc": 0.49838187702265374, "train_speed(iter/s)": 0.671873 }, { "epoch": 0.3213229938734416, "eval_loss": 1.9333804845809937, "eval_runtime": 17.4211, "eval_samples_per_second": 5.74, "eval_steps_per_second": 5.74, "eval_token_acc": 0.49710982658959535, "step": 7500 }, { "epoch": 0.3215372092026905, "grad_norm": 4.161280155181885, "learning_rate": 9.89844351317118e-05, "loss": 2.440923500061035, "memory(GiB)": 67.19, "step": 7505, "token_acc": 0.49085365853658536, "train_speed(iter/s)": 0.670699 }, { "epoch": 0.3217514245319395, "grad_norm": 2.402984380722046, "learning_rate": 9.89830852041306e-05, "loss": 2.4922840118408205, "memory(GiB)": 67.19, "step": 7510, "token_acc": 0.47959183673469385, "train_speed(iter/s)": 0.670698 }, { "epoch": 0.3219656398611885, "grad_norm": 3.1181530952453613, "learning_rate": 9.898173438917485e-05, "loss": 2.113861846923828, "memory(GiB)": 67.19, "step": 7515, "token_acc": 0.5149253731343284, "train_speed(iter/s)": 0.670711 }, { "epoch": 0.3221798551904374, "grad_norm": 3.23179292678833, "learning_rate": 9.8980382686869e-05, "loss": 2.904591751098633, "memory(GiB)": 67.19, "step": 7520, "token_acc": 0.4281150159744409, "train_speed(iter/s)": 0.670672 }, { "epoch": 0.3223940705196864, "grad_norm": 3.50475811958313, "learning_rate": 9.89790300972376e-05, "loss": 2.9717540740966797, "memory(GiB)": 67.19, "step": 7525, "token_acc": 0.42628205128205127, "train_speed(iter/s)": 0.670643 }, { "epoch": 0.32260828584893536, "grad_norm": 3.655829668045044, "learning_rate": 9.897767662030512e-05, "loss": 2.4917875289916993, "memory(GiB)": 67.19, "step": 7530, "token_acc": 0.4752851711026616, "train_speed(iter/s)": 0.670608 }, { "epoch": 0.3228225011781843, "grad_norm": 2.5003721714019775, "learning_rate": 9.897632225609607e-05, "loss": 2.472139358520508, "memory(GiB)": 67.19, "step": 7535, "token_acc": 0.5033112582781457, "train_speed(iter/s)": 0.670713 }, { "epoch": 0.32303671650743326, "grad_norm": 2.987637758255005, "learning_rate": 9.897496700463502e-05, "loss": 2.6971258163452148, "memory(GiB)": 67.19, "step": 7540, "token_acc": 0.4558011049723757, "train_speed(iter/s)": 0.670786 }, { "epoch": 0.32325093183668224, "grad_norm": 4.126799583435059, "learning_rate": 9.897361086594649e-05, "loss": 2.511674690246582, "memory(GiB)": 67.19, "step": 7545, "token_acc": 0.4536082474226804, "train_speed(iter/s)": 0.670745 }, { "epoch": 0.3234651471659312, "grad_norm": 2.6155948638916016, "learning_rate": 9.897225384005507e-05, "loss": 2.220904731750488, "memory(GiB)": 67.19, "step": 7550, "token_acc": 0.5114754098360655, "train_speed(iter/s)": 0.670785 }, { "epoch": 0.32367936249518015, "grad_norm": 2.7728652954101562, "learning_rate": 9.897089592698532e-05, "loss": 2.2497238159179687, "memory(GiB)": 67.19, "step": 7555, "token_acc": 0.49375, "train_speed(iter/s)": 0.670647 }, { "epoch": 0.3238935778244291, "grad_norm": 3.524749517440796, "learning_rate": 9.896953712676184e-05, "loss": 2.568111801147461, "memory(GiB)": 67.19, "step": 7560, "token_acc": 0.5, "train_speed(iter/s)": 0.670702 }, { "epoch": 0.3241077931536781, "grad_norm": 3.2622082233428955, "learning_rate": 9.896817743940928e-05, "loss": 2.4240951538085938, "memory(GiB)": 67.19, "step": 7565, "token_acc": 0.5145985401459854, "train_speed(iter/s)": 0.670759 }, { "epoch": 0.324322008482927, "grad_norm": 3.549494743347168, "learning_rate": 9.896681686495224e-05, "loss": 2.3687089920043944, "memory(GiB)": 67.19, "step": 7570, "token_acc": 0.5344827586206896, "train_speed(iter/s)": 0.670701 }, { "epoch": 0.324536223812176, "grad_norm": 3.915717124938965, "learning_rate": 9.896545540341538e-05, "loss": 2.2158599853515626, "memory(GiB)": 67.19, "step": 7575, "token_acc": 0.5209125475285171, "train_speed(iter/s)": 0.670586 }, { "epoch": 0.324750439141425, "grad_norm": 3.462482213973999, "learning_rate": 9.896409305482336e-05, "loss": 2.5582361221313477, "memory(GiB)": 67.19, "step": 7580, "token_acc": 0.4774436090225564, "train_speed(iter/s)": 0.670581 }, { "epoch": 0.3249646544706739, "grad_norm": 3.4790914058685303, "learning_rate": 9.896272981920087e-05, "loss": 2.673177146911621, "memory(GiB)": 67.19, "step": 7585, "token_acc": 0.436426116838488, "train_speed(iter/s)": 0.670639 }, { "epoch": 0.3251788697999229, "grad_norm": 2.6290128231048584, "learning_rate": 9.89613656965726e-05, "loss": 2.4897693634033202, "memory(GiB)": 67.19, "step": 7590, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.670626 }, { "epoch": 0.32539308512917187, "grad_norm": 3.4451444149017334, "learning_rate": 9.896000068696325e-05, "loss": 2.4265689849853516, "memory(GiB)": 67.19, "step": 7595, "token_acc": 0.4809688581314879, "train_speed(iter/s)": 0.670685 }, { "epoch": 0.3256073004584208, "grad_norm": 3.9849839210510254, "learning_rate": 9.895863479039756e-05, "loss": 2.3652626037597657, "memory(GiB)": 67.19, "step": 7600, "token_acc": 0.556910569105691, "train_speed(iter/s)": 0.670732 }, { "epoch": 0.32582151578766977, "grad_norm": 4.188665390014648, "learning_rate": 9.895726800690028e-05, "loss": 2.292842483520508, "memory(GiB)": 67.19, "step": 7605, "token_acc": 0.4878048780487805, "train_speed(iter/s)": 0.670746 }, { "epoch": 0.32603573111691875, "grad_norm": 3.416762590408325, "learning_rate": 9.895590033649616e-05, "loss": 2.429154968261719, "memory(GiB)": 67.19, "step": 7610, "token_acc": 0.4831932773109244, "train_speed(iter/s)": 0.670808 }, { "epoch": 0.32624994644616767, "grad_norm": 3.92926025390625, "learning_rate": 9.895453177920997e-05, "loss": 2.337320327758789, "memory(GiB)": 67.19, "step": 7615, "token_acc": 0.5160142348754448, "train_speed(iter/s)": 0.670659 }, { "epoch": 0.32646416177541665, "grad_norm": 2.95041561126709, "learning_rate": 9.895316233506653e-05, "loss": 2.2848642349243162, "memory(GiB)": 67.19, "step": 7620, "token_acc": 0.5138461538461538, "train_speed(iter/s)": 0.670664 }, { "epoch": 0.32667837710466563, "grad_norm": 3.2018256187438965, "learning_rate": 9.895179200409063e-05, "loss": 2.377381134033203, "memory(GiB)": 67.19, "step": 7625, "token_acc": 0.4657534246575342, "train_speed(iter/s)": 0.670599 }, { "epoch": 0.32689259243391455, "grad_norm": 2.4134716987609863, "learning_rate": 9.895042078630709e-05, "loss": 2.440427780151367, "memory(GiB)": 67.19, "step": 7630, "token_acc": 0.5075528700906344, "train_speed(iter/s)": 0.670696 }, { "epoch": 0.32710680776316353, "grad_norm": 3.2491233348846436, "learning_rate": 9.894904868174076e-05, "loss": 2.3536708831787108, "memory(GiB)": 67.19, "step": 7635, "token_acc": 0.47653429602888087, "train_speed(iter/s)": 0.670648 }, { "epoch": 0.3273210230924125, "grad_norm": 3.519399642944336, "learning_rate": 9.89476756904165e-05, "loss": 2.4032428741455076, "memory(GiB)": 67.19, "step": 7640, "token_acc": 0.4982456140350877, "train_speed(iter/s)": 0.670669 }, { "epoch": 0.32753523842166143, "grad_norm": 4.8981614112854, "learning_rate": 9.894630181235917e-05, "loss": 2.652790832519531, "memory(GiB)": 67.19, "step": 7645, "token_acc": 0.4355300859598854, "train_speed(iter/s)": 0.670725 }, { "epoch": 0.3277494537509104, "grad_norm": 3.11238169670105, "learning_rate": 9.894492704759369e-05, "loss": 2.3487165451049803, "memory(GiB)": 67.19, "step": 7650, "token_acc": 0.5177304964539007, "train_speed(iter/s)": 0.670759 }, { "epoch": 0.3279636690801594, "grad_norm": 3.3200769424438477, "learning_rate": 9.894355139614493e-05, "loss": 2.495069885253906, "memory(GiB)": 67.19, "step": 7655, "token_acc": 0.49523809523809526, "train_speed(iter/s)": 0.67074 }, { "epoch": 0.3281778844094083, "grad_norm": 3.8951992988586426, "learning_rate": 9.89421748580378e-05, "loss": 2.6704227447509767, "memory(GiB)": 67.19, "step": 7660, "token_acc": 0.48134328358208955, "train_speed(iter/s)": 0.670709 }, { "epoch": 0.3283920997386573, "grad_norm": 2.917707681655884, "learning_rate": 9.894079743329729e-05, "loss": 2.6422853469848633, "memory(GiB)": 67.19, "step": 7665, "token_acc": 0.44642857142857145, "train_speed(iter/s)": 0.670769 }, { "epoch": 0.3286063150679063, "grad_norm": 2.966890811920166, "learning_rate": 9.893941912194831e-05, "loss": 2.5227174758911133, "memory(GiB)": 67.19, "step": 7670, "token_acc": 0.49517684887459806, "train_speed(iter/s)": 0.670711 }, { "epoch": 0.3288205303971552, "grad_norm": 3.1786088943481445, "learning_rate": 9.893803992401586e-05, "loss": 2.557907485961914, "memory(GiB)": 67.19, "step": 7675, "token_acc": 0.4852941176470588, "train_speed(iter/s)": 0.670764 }, { "epoch": 0.3290347457264042, "grad_norm": 3.381103992462158, "learning_rate": 9.893665983952489e-05, "loss": 2.0514410018920897, "memory(GiB)": 67.19, "step": 7680, "token_acc": 0.544891640866873, "train_speed(iter/s)": 0.670843 }, { "epoch": 0.32924896105565316, "grad_norm": 3.5123279094696045, "learning_rate": 9.893527886850044e-05, "loss": 2.494339179992676, "memory(GiB)": 67.19, "step": 7685, "token_acc": 0.4708171206225681, "train_speed(iter/s)": 0.670841 }, { "epoch": 0.3294631763849021, "grad_norm": 2.5725467205047607, "learning_rate": 9.89338970109675e-05, "loss": 2.9913883209228516, "memory(GiB)": 67.19, "step": 7690, "token_acc": 0.4290322580645161, "train_speed(iter/s)": 0.670892 }, { "epoch": 0.32967739171415106, "grad_norm": 3.28409481048584, "learning_rate": 9.893251426695111e-05, "loss": 2.5258338928222654, "memory(GiB)": 67.19, "step": 7695, "token_acc": 0.4765625, "train_speed(iter/s)": 0.670849 }, { "epoch": 0.32989160704340004, "grad_norm": 5.153290748596191, "learning_rate": 9.893113063647632e-05, "loss": 2.5060176849365234, "memory(GiB)": 67.19, "step": 7700, "token_acc": 0.4981684981684982, "train_speed(iter/s)": 0.670825 }, { "epoch": 0.33010582237264896, "grad_norm": 2.9158575534820557, "learning_rate": 9.89297461195682e-05, "loss": 2.4518112182617187, "memory(GiB)": 67.19, "step": 7705, "token_acc": 0.48514851485148514, "train_speed(iter/s)": 0.670818 }, { "epoch": 0.33032003770189794, "grad_norm": 3.156184434890747, "learning_rate": 9.892836071625182e-05, "loss": 2.7760528564453124, "memory(GiB)": 67.19, "step": 7710, "token_acc": 0.4070175438596491, "train_speed(iter/s)": 0.670892 }, { "epoch": 0.3305342530311469, "grad_norm": 3.291501522064209, "learning_rate": 9.89269744265523e-05, "loss": 2.469478225708008, "memory(GiB)": 67.19, "step": 7715, "token_acc": 0.48518518518518516, "train_speed(iter/s)": 0.670938 }, { "epoch": 0.3307484683603959, "grad_norm": 3.4521632194519043, "learning_rate": 9.892558725049474e-05, "loss": 2.669778060913086, "memory(GiB)": 67.19, "step": 7720, "token_acc": 0.49280575539568344, "train_speed(iter/s)": 0.671003 }, { "epoch": 0.3309626836896448, "grad_norm": 2.600888967514038, "learning_rate": 9.892419918810426e-05, "loss": 2.4603347778320312, "memory(GiB)": 67.19, "step": 7725, "token_acc": 0.45364238410596025, "train_speed(iter/s)": 0.671072 }, { "epoch": 0.3311768990188938, "grad_norm": 4.628243923187256, "learning_rate": 9.892281023940602e-05, "loss": 2.291423797607422, "memory(GiB)": 67.19, "step": 7730, "token_acc": 0.5512820512820513, "train_speed(iter/s)": 0.671061 }, { "epoch": 0.3313911143481428, "grad_norm": 3.345651149749756, "learning_rate": 9.892142040442518e-05, "loss": 2.206598091125488, "memory(GiB)": 67.19, "step": 7735, "token_acc": 0.5451127819548872, "train_speed(iter/s)": 0.670905 }, { "epoch": 0.3316053296773917, "grad_norm": 2.808683395385742, "learning_rate": 9.892002968318692e-05, "loss": 2.2907455444335936, "memory(GiB)": 67.19, "step": 7740, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.670934 }, { "epoch": 0.3318195450066407, "grad_norm": 3.726792812347412, "learning_rate": 9.891863807571644e-05, "loss": 2.6355607986450194, "memory(GiB)": 67.19, "step": 7745, "token_acc": 0.45962732919254656, "train_speed(iter/s)": 0.670955 }, { "epoch": 0.33203376033588966, "grad_norm": 3.3030240535736084, "learning_rate": 9.891724558203893e-05, "loss": 2.513561248779297, "memory(GiB)": 67.19, "step": 7750, "token_acc": 0.4783950617283951, "train_speed(iter/s)": 0.670997 }, { "epoch": 0.3322479756651386, "grad_norm": 3.4052436351776123, "learning_rate": 9.891585220217964e-05, "loss": 2.267999839782715, "memory(GiB)": 67.19, "step": 7755, "token_acc": 0.5264900662251656, "train_speed(iter/s)": 0.670943 }, { "epoch": 0.33246219099438756, "grad_norm": 3.2978439331054688, "learning_rate": 9.891445793616378e-05, "loss": 2.433144760131836, "memory(GiB)": 67.19, "step": 7760, "token_acc": 0.49853372434017595, "train_speed(iter/s)": 0.670951 }, { "epoch": 0.33267640632363654, "grad_norm": 4.423701286315918, "learning_rate": 9.891306278401665e-05, "loss": 2.6026098251342775, "memory(GiB)": 67.19, "step": 7765, "token_acc": 0.43006993006993005, "train_speed(iter/s)": 0.670913 }, { "epoch": 0.33289062165288547, "grad_norm": 3.8620896339416504, "learning_rate": 9.891166674576349e-05, "loss": 2.384119987487793, "memory(GiB)": 67.19, "step": 7770, "token_acc": 0.4559386973180077, "train_speed(iter/s)": 0.670915 }, { "epoch": 0.33310483698213444, "grad_norm": 2.3438103199005127, "learning_rate": 9.891026982142962e-05, "loss": 2.4818435668945313, "memory(GiB)": 67.19, "step": 7775, "token_acc": 0.4912891986062718, "train_speed(iter/s)": 0.670919 }, { "epoch": 0.3333190523113834, "grad_norm": 2.9537453651428223, "learning_rate": 9.890887201104032e-05, "loss": 2.427325439453125, "memory(GiB)": 67.19, "step": 7780, "token_acc": 0.4591439688715953, "train_speed(iter/s)": 0.670919 }, { "epoch": 0.33353326764063235, "grad_norm": 3.0323874950408936, "learning_rate": 9.890747331462092e-05, "loss": 2.390580940246582, "memory(GiB)": 67.19, "step": 7785, "token_acc": 0.5089285714285714, "train_speed(iter/s)": 0.670878 }, { "epoch": 0.3337474829698813, "grad_norm": 3.036379814147949, "learning_rate": 9.890607373219676e-05, "loss": 2.353002738952637, "memory(GiB)": 67.19, "step": 7790, "token_acc": 0.45806451612903226, "train_speed(iter/s)": 0.67093 }, { "epoch": 0.3339616982991303, "grad_norm": 2.1143767833709717, "learning_rate": 9.89046732637932e-05, "loss": 2.438159942626953, "memory(GiB)": 67.19, "step": 7795, "token_acc": 0.4738372093023256, "train_speed(iter/s)": 0.67099 }, { "epoch": 0.33417591362837923, "grad_norm": 2.7656705379486084, "learning_rate": 9.890327190943561e-05, "loss": 2.21838493347168, "memory(GiB)": 67.19, "step": 7800, "token_acc": 0.5138888888888888, "train_speed(iter/s)": 0.670966 }, { "epoch": 0.3343901289576282, "grad_norm": 3.7313003540039062, "learning_rate": 9.890186966914938e-05, "loss": 2.235016441345215, "memory(GiB)": 67.19, "step": 7805, "token_acc": 0.4965753424657534, "train_speed(iter/s)": 0.670817 }, { "epoch": 0.3346043442868772, "grad_norm": 2.4622561931610107, "learning_rate": 9.89004665429599e-05, "loss": 2.465885543823242, "memory(GiB)": 67.19, "step": 7810, "token_acc": 0.48148148148148145, "train_speed(iter/s)": 0.670861 }, { "epoch": 0.3348185596161261, "grad_norm": 2.446173906326294, "learning_rate": 9.88990625308926e-05, "loss": 2.255339813232422, "memory(GiB)": 67.19, "step": 7815, "token_acc": 0.519298245614035, "train_speed(iter/s)": 0.670783 }, { "epoch": 0.3350327749453751, "grad_norm": 3.0277984142303467, "learning_rate": 9.889765763297291e-05, "loss": 2.581073188781738, "memory(GiB)": 67.19, "step": 7820, "token_acc": 0.4696969696969697, "train_speed(iter/s)": 0.670726 }, { "epoch": 0.33524699027462407, "grad_norm": 2.4128825664520264, "learning_rate": 9.889625184922628e-05, "loss": 2.1229631423950197, "memory(GiB)": 67.19, "step": 7825, "token_acc": 0.5207547169811321, "train_speed(iter/s)": 0.670805 }, { "epoch": 0.335461205603873, "grad_norm": 3.318272829055786, "learning_rate": 9.889484517967818e-05, "loss": 2.7129119873046874, "memory(GiB)": 67.19, "step": 7830, "token_acc": 0.4511494252873563, "train_speed(iter/s)": 0.670872 }, { "epoch": 0.33567542093312197, "grad_norm": 2.976301670074463, "learning_rate": 9.889343762435409e-05, "loss": 2.432292175292969, "memory(GiB)": 67.19, "step": 7835, "token_acc": 0.47107438016528924, "train_speed(iter/s)": 0.670859 }, { "epoch": 0.33588963626237095, "grad_norm": 3.012356996536255, "learning_rate": 9.88920291832795e-05, "loss": 2.807279586791992, "memory(GiB)": 67.19, "step": 7840, "token_acc": 0.44360902255639095, "train_speed(iter/s)": 0.670967 }, { "epoch": 0.3361038515916199, "grad_norm": 3.905215263366699, "learning_rate": 9.889061985647996e-05, "loss": 2.391712951660156, "memory(GiB)": 67.19, "step": 7845, "token_acc": 0.49146757679180886, "train_speed(iter/s)": 0.671028 }, { "epoch": 0.33631806692086885, "grad_norm": 3.5512430667877197, "learning_rate": 9.888920964398099e-05, "loss": 2.5768613815307617, "memory(GiB)": 67.19, "step": 7850, "token_acc": 0.4763231197771588, "train_speed(iter/s)": 0.67108 }, { "epoch": 0.33653228225011783, "grad_norm": 4.463839530944824, "learning_rate": 9.88877985458081e-05, "loss": 2.4469720840454103, "memory(GiB)": 67.19, "step": 7855, "token_acc": 0.4699248120300752, "train_speed(iter/s)": 0.671122 }, { "epoch": 0.33674649757936675, "grad_norm": 3.539891242980957, "learning_rate": 9.888638656198688e-05, "loss": 2.3411462783813475, "memory(GiB)": 67.19, "step": 7860, "token_acc": 0.5, "train_speed(iter/s)": 0.671125 }, { "epoch": 0.33696071290861573, "grad_norm": 3.232584238052368, "learning_rate": 9.88849736925429e-05, "loss": 2.2898542404174806, "memory(GiB)": 67.19, "step": 7865, "token_acc": 0.47058823529411764, "train_speed(iter/s)": 0.6712 }, { "epoch": 0.3371749282378647, "grad_norm": 2.7938339710235596, "learning_rate": 9.888355993750178e-05, "loss": 2.5197858810424805, "memory(GiB)": 67.19, "step": 7870, "token_acc": 0.45925925925925926, "train_speed(iter/s)": 0.671139 }, { "epoch": 0.33738914356711364, "grad_norm": 3.1709108352661133, "learning_rate": 9.888214529688912e-05, "loss": 2.237970161437988, "memory(GiB)": 67.19, "step": 7875, "token_acc": 0.5309734513274337, "train_speed(iter/s)": 0.671069 }, { "epoch": 0.3376033588963626, "grad_norm": 3.145932674407959, "learning_rate": 9.888072977073053e-05, "loss": 2.226058578491211, "memory(GiB)": 67.19, "step": 7880, "token_acc": 0.5342019543973942, "train_speed(iter/s)": 0.671099 }, { "epoch": 0.3378175742256116, "grad_norm": 3.1344921588897705, "learning_rate": 9.887931335905168e-05, "loss": 2.262767028808594, "memory(GiB)": 67.19, "step": 7885, "token_acc": 0.5049180327868853, "train_speed(iter/s)": 0.671116 }, { "epoch": 0.3380317895548606, "grad_norm": 3.1318485736846924, "learning_rate": 9.887789606187819e-05, "loss": 2.240114212036133, "memory(GiB)": 67.19, "step": 7890, "token_acc": 0.48322147651006714, "train_speed(iter/s)": 0.671182 }, { "epoch": 0.3382460048841095, "grad_norm": 2.8570163249969482, "learning_rate": 9.887647787923578e-05, "loss": 2.2744539260864256, "memory(GiB)": 67.19, "step": 7895, "token_acc": 0.5151515151515151, "train_speed(iter/s)": 0.671221 }, { "epoch": 0.3384602202133585, "grad_norm": 3.798799753189087, "learning_rate": 9.887505881115013e-05, "loss": 2.451969528198242, "memory(GiB)": 67.19, "step": 7900, "token_acc": 0.4801587301587302, "train_speed(iter/s)": 0.671214 }, { "epoch": 0.33867443554260745, "grad_norm": 2.6038239002227783, "learning_rate": 9.887363885764693e-05, "loss": 2.287862014770508, "memory(GiB)": 67.19, "step": 7905, "token_acc": 0.44983818770226536, "train_speed(iter/s)": 0.671086 }, { "epoch": 0.3388886508718564, "grad_norm": 4.941615104675293, "learning_rate": 9.887221801875192e-05, "loss": 2.4421760559082033, "memory(GiB)": 67.19, "step": 7910, "token_acc": 0.49264705882352944, "train_speed(iter/s)": 0.671086 }, { "epoch": 0.33910286620110536, "grad_norm": 3.1293323040008545, "learning_rate": 9.887079629449083e-05, "loss": 2.5570556640625, "memory(GiB)": 67.19, "step": 7915, "token_acc": 0.44904458598726116, "train_speed(iter/s)": 0.671071 }, { "epoch": 0.33931708153035434, "grad_norm": 3.990152597427368, "learning_rate": 9.886937368488942e-05, "loss": 2.458730125427246, "memory(GiB)": 67.19, "step": 7920, "token_acc": 0.4635036496350365, "train_speed(iter/s)": 0.671091 }, { "epoch": 0.33953129685960326, "grad_norm": 4.218891143798828, "learning_rate": 9.886795018997347e-05, "loss": 2.2066822052001953, "memory(GiB)": 67.19, "step": 7925, "token_acc": 0.5245901639344263, "train_speed(iter/s)": 0.671064 }, { "epoch": 0.33974551218885224, "grad_norm": 3.2260847091674805, "learning_rate": 9.886652580976876e-05, "loss": 2.309098815917969, "memory(GiB)": 67.19, "step": 7930, "token_acc": 0.4909090909090909, "train_speed(iter/s)": 0.671086 }, { "epoch": 0.3399597275181012, "grad_norm": 3.5041511058807373, "learning_rate": 9.886510054430108e-05, "loss": 2.847075653076172, "memory(GiB)": 67.19, "step": 7935, "token_acc": 0.46959459459459457, "train_speed(iter/s)": 0.671053 }, { "epoch": 0.34017394284735014, "grad_norm": 3.4642233848571777, "learning_rate": 9.886367439359627e-05, "loss": 2.3711166381835938, "memory(GiB)": 67.19, "step": 7940, "token_acc": 0.4937106918238994, "train_speed(iter/s)": 0.671085 }, { "epoch": 0.3403881581765991, "grad_norm": 3.305156707763672, "learning_rate": 9.886224735768017e-05, "loss": 2.6505760192871093, "memory(GiB)": 67.19, "step": 7945, "token_acc": 0.4349315068493151, "train_speed(iter/s)": 0.671046 }, { "epoch": 0.3406023735058481, "grad_norm": 4.1039323806762695, "learning_rate": 9.886081943657862e-05, "loss": 2.336368179321289, "memory(GiB)": 67.19, "step": 7950, "token_acc": 0.49843260188087773, "train_speed(iter/s)": 0.671035 }, { "epoch": 0.340816588835097, "grad_norm": 3.5364856719970703, "learning_rate": 9.885939063031748e-05, "loss": 2.592477035522461, "memory(GiB)": 67.19, "step": 7955, "token_acc": 0.47953216374269003, "train_speed(iter/s)": 0.671075 }, { "epoch": 0.341030804164346, "grad_norm": 2.96055006980896, "learning_rate": 9.885796093892266e-05, "loss": 2.3093944549560548, "memory(GiB)": 67.19, "step": 7960, "token_acc": 0.4804270462633452, "train_speed(iter/s)": 0.671145 }, { "epoch": 0.341245019493595, "grad_norm": 3.1250505447387695, "learning_rate": 9.885653036242004e-05, "loss": 2.479862594604492, "memory(GiB)": 67.19, "step": 7965, "token_acc": 0.48514851485148514, "train_speed(iter/s)": 0.671135 }, { "epoch": 0.3414592348228439, "grad_norm": 3.304014205932617, "learning_rate": 9.885509890083555e-05, "loss": 2.7080368041992187, "memory(GiB)": 67.19, "step": 7970, "token_acc": 0.461038961038961, "train_speed(iter/s)": 0.671127 }, { "epoch": 0.3416734501520929, "grad_norm": 3.649683713912964, "learning_rate": 9.88536665541951e-05, "loss": 2.1716211318969725, "memory(GiB)": 67.19, "step": 7975, "token_acc": 0.4967948717948718, "train_speed(iter/s)": 0.671238 }, { "epoch": 0.34188766548134186, "grad_norm": 4.186813831329346, "learning_rate": 9.885223332252464e-05, "loss": 2.32647819519043, "memory(GiB)": 67.19, "step": 7980, "token_acc": 0.460431654676259, "train_speed(iter/s)": 0.671318 }, { "epoch": 0.3421018808105908, "grad_norm": 4.293606758117676, "learning_rate": 9.885079920585017e-05, "loss": 2.5686647415161135, "memory(GiB)": 67.19, "step": 7985, "token_acc": 0.45724907063197023, "train_speed(iter/s)": 0.671388 }, { "epoch": 0.34231609613983977, "grad_norm": 4.346506595611572, "learning_rate": 9.884936420419763e-05, "loss": 2.6401132583618163, "memory(GiB)": 67.19, "step": 7990, "token_acc": 0.47750865051903113, "train_speed(iter/s)": 0.671279 }, { "epoch": 0.34253031146908874, "grad_norm": 3.2772867679595947, "learning_rate": 9.884792831759305e-05, "loss": 2.2676572799682617, "memory(GiB)": 67.19, "step": 7995, "token_acc": 0.4937106918238994, "train_speed(iter/s)": 0.671328 }, { "epoch": 0.34274452679833767, "grad_norm": 2.7742745876312256, "learning_rate": 9.884649154606242e-05, "loss": 2.4371006011962892, "memory(GiB)": 67.19, "step": 8000, "token_acc": 0.47440273037542663, "train_speed(iter/s)": 0.671363 }, { "epoch": 0.34274452679833767, "eval_loss": 2.0627593994140625, "eval_runtime": 17.2488, "eval_samples_per_second": 5.797, "eval_steps_per_second": 5.797, "eval_token_acc": 0.5094086021505376, "step": 8000 }, { "epoch": 0.34295874212758665, "grad_norm": 3.557868242263794, "learning_rate": 9.884505388963176e-05, "loss": 2.298310470581055, "memory(GiB)": 67.19, "step": 8005, "token_acc": 0.5048262548262549, "train_speed(iter/s)": 0.670019 }, { "epoch": 0.3431729574568356, "grad_norm": 3.698568820953369, "learning_rate": 9.884361534832716e-05, "loss": 2.644573211669922, "memory(GiB)": 67.19, "step": 8010, "token_acc": 0.41578947368421054, "train_speed(iter/s)": 0.669965 }, { "epoch": 0.34338717278608455, "grad_norm": 3.113436698913574, "learning_rate": 9.884217592217461e-05, "loss": 2.3371089935302733, "memory(GiB)": 67.19, "step": 8015, "token_acc": 0.5130111524163569, "train_speed(iter/s)": 0.670068 }, { "epoch": 0.34360138811533353, "grad_norm": 3.312652349472046, "learning_rate": 9.884073561120026e-05, "loss": 2.592789077758789, "memory(GiB)": 67.19, "step": 8020, "token_acc": 0.46078431372549017, "train_speed(iter/s)": 0.669975 }, { "epoch": 0.3438156034445825, "grad_norm": 4.408972263336182, "learning_rate": 9.883929441543014e-05, "loss": 2.2480117797851564, "memory(GiB)": 67.19, "step": 8025, "token_acc": 0.5181159420289855, "train_speed(iter/s)": 0.669994 }, { "epoch": 0.34402981877383143, "grad_norm": 4.474485397338867, "learning_rate": 9.88378523348904e-05, "loss": 2.1747146606445313, "memory(GiB)": 67.19, "step": 8030, "token_acc": 0.5467625899280576, "train_speed(iter/s)": 0.670042 }, { "epoch": 0.3442440341030804, "grad_norm": 4.812404632568359, "learning_rate": 9.883640936960716e-05, "loss": 2.588013458251953, "memory(GiB)": 67.19, "step": 8035, "token_acc": 0.4233576642335766, "train_speed(iter/s)": 0.670072 }, { "epoch": 0.3444582494323294, "grad_norm": 2.807459592819214, "learning_rate": 9.883496551960654e-05, "loss": 2.3840349197387694, "memory(GiB)": 67.19, "step": 8040, "token_acc": 0.47988505747126436, "train_speed(iter/s)": 0.669958 }, { "epoch": 0.3446724647615783, "grad_norm": 3.83442759513855, "learning_rate": 9.88335207849147e-05, "loss": 2.147307205200195, "memory(GiB)": 67.19, "step": 8045, "token_acc": 0.5309090909090909, "train_speed(iter/s)": 0.669977 }, { "epoch": 0.3448866800908273, "grad_norm": 4.037838459014893, "learning_rate": 9.883207516555784e-05, "loss": 2.6220733642578127, "memory(GiB)": 67.19, "step": 8050, "token_acc": 0.4507462686567164, "train_speed(iter/s)": 0.670039 }, { "epoch": 0.34510089542007627, "grad_norm": 3.314868211746216, "learning_rate": 9.883062866156213e-05, "loss": 2.5837419509887694, "memory(GiB)": 67.19, "step": 8055, "token_acc": 0.45, "train_speed(iter/s)": 0.670153 }, { "epoch": 0.34531511074932525, "grad_norm": 3.850893259048462, "learning_rate": 9.882918127295376e-05, "loss": 2.8404125213623046, "memory(GiB)": 67.19, "step": 8060, "token_acc": 0.4563758389261745, "train_speed(iter/s)": 0.67017 }, { "epoch": 0.3455293260785742, "grad_norm": 2.565079927444458, "learning_rate": 9.882773299975897e-05, "loss": 2.6271484375, "memory(GiB)": 67.19, "step": 8065, "token_acc": 0.4696969696969697, "train_speed(iter/s)": 0.670141 }, { "epoch": 0.34574354140782315, "grad_norm": 3.026597261428833, "learning_rate": 9.8826283842004e-05, "loss": 2.342300605773926, "memory(GiB)": 67.19, "step": 8070, "token_acc": 0.4808362369337979, "train_speed(iter/s)": 0.670246 }, { "epoch": 0.34595775673707213, "grad_norm": 2.390862464904785, "learning_rate": 9.882483379971509e-05, "loss": 2.179221343994141, "memory(GiB)": 67.19, "step": 8075, "token_acc": 0.4939759036144578, "train_speed(iter/s)": 0.670243 }, { "epoch": 0.34617197206632105, "grad_norm": 3.799018144607544, "learning_rate": 9.882338287291851e-05, "loss": 2.6694801330566404, "memory(GiB)": 67.19, "step": 8080, "token_acc": 0.48951048951048953, "train_speed(iter/s)": 0.670236 }, { "epoch": 0.34638618739557003, "grad_norm": 4.54005765914917, "learning_rate": 9.882193106164055e-05, "loss": 2.606907844543457, "memory(GiB)": 67.19, "step": 8085, "token_acc": 0.45588235294117646, "train_speed(iter/s)": 0.670187 }, { "epoch": 0.346600402724819, "grad_norm": 2.5567805767059326, "learning_rate": 9.882047836590752e-05, "loss": 2.616243362426758, "memory(GiB)": 67.19, "step": 8090, "token_acc": 0.43823529411764706, "train_speed(iter/s)": 0.670147 }, { "epoch": 0.34681461805406794, "grad_norm": 4.002796649932861, "learning_rate": 9.881902478574571e-05, "loss": 2.2678653717041017, "memory(GiB)": 67.19, "step": 8095, "token_acc": 0.5407407407407407, "train_speed(iter/s)": 0.670111 }, { "epoch": 0.3470288333833169, "grad_norm": 4.280463218688965, "learning_rate": 9.88175703211815e-05, "loss": 2.0644330978393555, "memory(GiB)": 67.19, "step": 8100, "token_acc": 0.5, "train_speed(iter/s)": 0.669985 }, { "epoch": 0.3472430487125659, "grad_norm": 4.067556381225586, "learning_rate": 9.88161149722412e-05, "loss": 2.349827003479004, "memory(GiB)": 67.19, "step": 8105, "token_acc": 0.49377593360995853, "train_speed(iter/s)": 0.669951 }, { "epoch": 0.3474572640418148, "grad_norm": 5.02705192565918, "learning_rate": 9.881465873895116e-05, "loss": 2.523386001586914, "memory(GiB)": 67.19, "step": 8110, "token_acc": 0.47794117647058826, "train_speed(iter/s)": 0.669969 }, { "epoch": 0.3476714793710638, "grad_norm": 4.622784614562988, "learning_rate": 9.881320162133781e-05, "loss": 2.497629165649414, "memory(GiB)": 67.19, "step": 8115, "token_acc": 0.5214521452145214, "train_speed(iter/s)": 0.670037 }, { "epoch": 0.3478856947003128, "grad_norm": 3.3779289722442627, "learning_rate": 9.881174361942751e-05, "loss": 2.501748275756836, "memory(GiB)": 67.19, "step": 8120, "token_acc": 0.49038461538461536, "train_speed(iter/s)": 0.670075 }, { "epoch": 0.3480999100295617, "grad_norm": 3.3554701805114746, "learning_rate": 9.881028473324669e-05, "loss": 2.6123594284057616, "memory(GiB)": 67.19, "step": 8125, "token_acc": 0.5273311897106109, "train_speed(iter/s)": 0.670139 }, { "epoch": 0.3483141253588107, "grad_norm": 3.550994396209717, "learning_rate": 9.880882496282176e-05, "loss": 2.433490180969238, "memory(GiB)": 67.19, "step": 8130, "token_acc": 0.47265625, "train_speed(iter/s)": 0.670096 }, { "epoch": 0.34852834068805966, "grad_norm": 3.881026268005371, "learning_rate": 9.88073643081792e-05, "loss": 2.549307632446289, "memory(GiB)": 67.19, "step": 8135, "token_acc": 0.48562300319488816, "train_speed(iter/s)": 0.670135 }, { "epoch": 0.3487425560173086, "grad_norm": 2.878720998764038, "learning_rate": 9.880590276934543e-05, "loss": 2.449875068664551, "memory(GiB)": 67.19, "step": 8140, "token_acc": 0.47305389221556887, "train_speed(iter/s)": 0.670241 }, { "epoch": 0.34895677134655756, "grad_norm": 3.008204460144043, "learning_rate": 9.880444034634698e-05, "loss": 2.2278377532958986, "memory(GiB)": 67.25, "step": 8145, "token_acc": 0.4838709677419355, "train_speed(iter/s)": 0.67021 }, { "epoch": 0.34917098667580654, "grad_norm": 3.1828789710998535, "learning_rate": 9.880297703921027e-05, "loss": 2.4043960571289062, "memory(GiB)": 67.25, "step": 8150, "token_acc": 0.4753521126760563, "train_speed(iter/s)": 0.670227 }, { "epoch": 0.34938520200505546, "grad_norm": 2.7902493476867676, "learning_rate": 9.880151284796187e-05, "loss": 2.728541946411133, "memory(GiB)": 67.25, "step": 8155, "token_acc": 0.49266862170087977, "train_speed(iter/s)": 0.670155 }, { "epoch": 0.34959941733430444, "grad_norm": 3.896953821182251, "learning_rate": 9.880004777262829e-05, "loss": 2.3543991088867187, "memory(GiB)": 67.25, "step": 8160, "token_acc": 0.48863636363636365, "train_speed(iter/s)": 0.670309 }, { "epoch": 0.3498136326635534, "grad_norm": 3.541951894760132, "learning_rate": 9.879858181323607e-05, "loss": 2.456346130371094, "memory(GiB)": 67.25, "step": 8165, "token_acc": 0.4927536231884058, "train_speed(iter/s)": 0.67033 }, { "epoch": 0.35002784799280234, "grad_norm": 4.730434894561768, "learning_rate": 9.879711496981174e-05, "loss": 2.753558349609375, "memory(GiB)": 68.52, "step": 8170, "token_acc": 0.4413793103448276, "train_speed(iter/s)": 0.670355 }, { "epoch": 0.3502420633220513, "grad_norm": 4.823009967803955, "learning_rate": 9.879564724238193e-05, "loss": 2.4662410736083986, "memory(GiB)": 68.52, "step": 8175, "token_acc": 0.468, "train_speed(iter/s)": 0.670418 }, { "epoch": 0.3504562786513003, "grad_norm": 3.401663303375244, "learning_rate": 9.879417863097318e-05, "loss": 2.4626575469970704, "memory(GiB)": 68.52, "step": 8180, "token_acc": 0.49097472924187724, "train_speed(iter/s)": 0.670376 }, { "epoch": 0.3506704939805492, "grad_norm": 4.214817047119141, "learning_rate": 9.879270913561209e-05, "loss": 2.587025451660156, "memory(GiB)": 68.52, "step": 8185, "token_acc": 0.45222929936305734, "train_speed(iter/s)": 0.670428 }, { "epoch": 0.3508847093097982, "grad_norm": 3.298600673675537, "learning_rate": 9.879123875632534e-05, "loss": 2.3884489059448244, "memory(GiB)": 68.52, "step": 8190, "token_acc": 0.4831804281345566, "train_speed(iter/s)": 0.670398 }, { "epoch": 0.3510989246390472, "grad_norm": 3.4742770195007324, "learning_rate": 9.878976749313951e-05, "loss": 2.42202091217041, "memory(GiB)": 68.52, "step": 8195, "token_acc": 0.484251968503937, "train_speed(iter/s)": 0.670441 }, { "epoch": 0.3513131399682961, "grad_norm": 2.6566267013549805, "learning_rate": 9.878829534608127e-05, "loss": 2.2544689178466797, "memory(GiB)": 68.52, "step": 8200, "token_acc": 0.5247524752475248, "train_speed(iter/s)": 0.670426 }, { "epoch": 0.3515273552975451, "grad_norm": 3.2724082469940186, "learning_rate": 9.878682231517731e-05, "loss": 1.9147798538208007, "memory(GiB)": 68.52, "step": 8205, "token_acc": 0.5864661654135338, "train_speed(iter/s)": 0.670384 }, { "epoch": 0.35174157062679406, "grad_norm": 3.6434731483459473, "learning_rate": 9.878534840045428e-05, "loss": 2.3791332244873047, "memory(GiB)": 68.52, "step": 8210, "token_acc": 0.45918367346938777, "train_speed(iter/s)": 0.670406 }, { "epoch": 0.351955785956043, "grad_norm": 3.081589698791504, "learning_rate": 9.878387360193891e-05, "loss": 2.677153205871582, "memory(GiB)": 68.52, "step": 8215, "token_acc": 0.453125, "train_speed(iter/s)": 0.670537 }, { "epoch": 0.35217000128529197, "grad_norm": 2.8252241611480713, "learning_rate": 9.87823979196579e-05, "loss": 2.4002986907958985, "memory(GiB)": 68.52, "step": 8220, "token_acc": 0.48148148148148145, "train_speed(iter/s)": 0.670607 }, { "epoch": 0.35238421661454095, "grad_norm": 2.3668084144592285, "learning_rate": 9.8780921353638e-05, "loss": 2.50281982421875, "memory(GiB)": 68.52, "step": 8225, "token_acc": 0.4584615384615385, "train_speed(iter/s)": 0.670678 }, { "epoch": 0.3525984319437899, "grad_norm": 4.121401309967041, "learning_rate": 9.877944390390594e-05, "loss": 2.205881881713867, "memory(GiB)": 68.52, "step": 8230, "token_acc": 0.5019455252918288, "train_speed(iter/s)": 0.670666 }, { "epoch": 0.35281264727303885, "grad_norm": 3.335205554962158, "learning_rate": 9.87779655704885e-05, "loss": 2.4440656661987306, "memory(GiB)": 68.52, "step": 8235, "token_acc": 0.4774436090225564, "train_speed(iter/s)": 0.670635 }, { "epoch": 0.3530268626022878, "grad_norm": 3.4495413303375244, "learning_rate": 9.877648635341245e-05, "loss": 2.4860855102539063, "memory(GiB)": 68.52, "step": 8240, "token_acc": 0.4713656387665198, "train_speed(iter/s)": 0.670723 }, { "epoch": 0.3532410779315368, "grad_norm": 4.408472061157227, "learning_rate": 9.877500625270459e-05, "loss": 2.5301673889160154, "memory(GiB)": 68.52, "step": 8245, "token_acc": 0.45390070921985815, "train_speed(iter/s)": 0.670696 }, { "epoch": 0.35345529326078573, "grad_norm": 9.570392608642578, "learning_rate": 9.877352526839174e-05, "loss": 2.2119693756103516, "memory(GiB)": 68.52, "step": 8250, "token_acc": 0.5413533834586466, "train_speed(iter/s)": 0.670628 }, { "epoch": 0.3536695085900347, "grad_norm": 2.703714609146118, "learning_rate": 9.877204340050075e-05, "loss": 2.2290172576904297, "memory(GiB)": 68.52, "step": 8255, "token_acc": 0.5136054421768708, "train_speed(iter/s)": 0.670609 }, { "epoch": 0.3538837239192837, "grad_norm": 3.1032795906066895, "learning_rate": 9.87705606490584e-05, "loss": 2.303827667236328, "memory(GiB)": 68.52, "step": 8260, "token_acc": 0.5112781954887218, "train_speed(iter/s)": 0.670691 }, { "epoch": 0.3540979392485326, "grad_norm": 2.905134439468384, "learning_rate": 9.876907701409164e-05, "loss": 2.4978485107421875, "memory(GiB)": 68.52, "step": 8265, "token_acc": 0.44366197183098594, "train_speed(iter/s)": 0.670747 }, { "epoch": 0.3543121545777816, "grad_norm": 2.486205577850342, "learning_rate": 9.876759249562727e-05, "loss": 2.51983585357666, "memory(GiB)": 68.52, "step": 8270, "token_acc": 0.4876325088339223, "train_speed(iter/s)": 0.670682 }, { "epoch": 0.35452636990703057, "grad_norm": 3.213646173477173, "learning_rate": 9.876610709369221e-05, "loss": 2.4617176055908203, "memory(GiB)": 68.52, "step": 8275, "token_acc": 0.5152838427947598, "train_speed(iter/s)": 0.670718 }, { "epoch": 0.3547405852362795, "grad_norm": 2.937588691711426, "learning_rate": 9.876462080831338e-05, "loss": 2.375570869445801, "memory(GiB)": 68.52, "step": 8280, "token_acc": 0.501577287066246, "train_speed(iter/s)": 0.670563 }, { "epoch": 0.35495480056552847, "grad_norm": 3.1224544048309326, "learning_rate": 9.876313363951772e-05, "loss": 2.6287521362304687, "memory(GiB)": 68.52, "step": 8285, "token_acc": 0.45, "train_speed(iter/s)": 0.670576 }, { "epoch": 0.35516901589477745, "grad_norm": 3.356656789779663, "learning_rate": 9.876164558733213e-05, "loss": 2.6564233779907225, "memory(GiB)": 68.52, "step": 8290, "token_acc": 0.44375, "train_speed(iter/s)": 0.670457 }, { "epoch": 0.3553832312240264, "grad_norm": 3.0417897701263428, "learning_rate": 9.87601566517836e-05, "loss": 2.374237823486328, "memory(GiB)": 68.52, "step": 8295, "token_acc": 0.47058823529411764, "train_speed(iter/s)": 0.67052 }, { "epoch": 0.35559744655327535, "grad_norm": 3.465109348297119, "learning_rate": 9.875866683289907e-05, "loss": 2.494786262512207, "memory(GiB)": 68.52, "step": 8300, "token_acc": 0.47079037800687284, "train_speed(iter/s)": 0.670541 }, { "epoch": 0.35581166188252433, "grad_norm": 5.128468036651611, "learning_rate": 9.875717613070558e-05, "loss": 2.3856826782226563, "memory(GiB)": 68.52, "step": 8305, "token_acc": 0.46830985915492956, "train_speed(iter/s)": 0.670626 }, { "epoch": 0.35602587721177326, "grad_norm": 3.255760908126831, "learning_rate": 9.875598293298644e-05, "loss": 2.304008674621582, "memory(GiB)": 68.52, "step": 8310, "token_acc": 0.5343511450381679, "train_speed(iter/s)": 0.670604 }, { "epoch": 0.35624009254102224, "grad_norm": 3.4222664833068848, "learning_rate": 9.875449064090483e-05, "loss": 2.0133094787597656, "memory(GiB)": 68.52, "step": 8315, "token_acc": 0.5642201834862385, "train_speed(iter/s)": 0.67067 }, { "epoch": 0.3564543078702712, "grad_norm": 3.2084081172943115, "learning_rate": 9.875299746558988e-05, "loss": 2.57504940032959, "memory(GiB)": 68.52, "step": 8320, "token_acc": 0.48857142857142855, "train_speed(iter/s)": 0.670658 }, { "epoch": 0.35666852319952014, "grad_norm": 3.5362820625305176, "learning_rate": 9.875150340706864e-05, "loss": 2.3436826705932616, "memory(GiB)": 68.52, "step": 8325, "token_acc": 0.48109965635738833, "train_speed(iter/s)": 0.670626 }, { "epoch": 0.3568827385287691, "grad_norm": 2.5247209072113037, "learning_rate": 9.875000846536818e-05, "loss": 2.549205207824707, "memory(GiB)": 68.52, "step": 8330, "token_acc": 0.44846796657381616, "train_speed(iter/s)": 0.670613 }, { "epoch": 0.3570969538580181, "grad_norm": 3.496807336807251, "learning_rate": 9.874851264051559e-05, "loss": 2.4903820037841795, "memory(GiB)": 68.52, "step": 8335, "token_acc": 0.4351851851851852, "train_speed(iter/s)": 0.670645 }, { "epoch": 0.357311169187267, "grad_norm": 3.514702796936035, "learning_rate": 9.874701593253797e-05, "loss": 2.6538389205932615, "memory(GiB)": 68.52, "step": 8340, "token_acc": 0.4691689008042895, "train_speed(iter/s)": 0.67065 }, { "epoch": 0.357525384516516, "grad_norm": 3.9462075233459473, "learning_rate": 9.874551834146242e-05, "loss": 2.4121728897094727, "memory(GiB)": 68.52, "step": 8345, "token_acc": 0.48854961832061067, "train_speed(iter/s)": 0.670614 }, { "epoch": 0.357739599845765, "grad_norm": 4.116827011108398, "learning_rate": 9.874401986731609e-05, "loss": 2.208618927001953, "memory(GiB)": 68.52, "step": 8350, "token_acc": 0.5220588235294118, "train_speed(iter/s)": 0.670642 }, { "epoch": 0.3579538151750139, "grad_norm": 5.604356288909912, "learning_rate": 9.87425205101261e-05, "loss": 2.5268482208251952, "memory(GiB)": 68.52, "step": 8355, "token_acc": 0.47101449275362317, "train_speed(iter/s)": 0.67062 }, { "epoch": 0.3581680305042629, "grad_norm": 3.3616621494293213, "learning_rate": 9.874102026991964e-05, "loss": 2.5725582122802733, "memory(GiB)": 68.52, "step": 8360, "token_acc": 0.4855072463768116, "train_speed(iter/s)": 0.670659 }, { "epoch": 0.35838224583351186, "grad_norm": 3.1126697063446045, "learning_rate": 9.873951914672386e-05, "loss": 2.3739292144775392, "memory(GiB)": 68.52, "step": 8365, "token_acc": 0.4797507788161994, "train_speed(iter/s)": 0.670683 }, { "epoch": 0.3585964611627608, "grad_norm": 3.8024203777313232, "learning_rate": 9.873801714056599e-05, "loss": 2.212778091430664, "memory(GiB)": 68.52, "step": 8370, "token_acc": 0.5107142857142857, "train_speed(iter/s)": 0.67068 }, { "epoch": 0.35881067649200976, "grad_norm": 2.9633848667144775, "learning_rate": 9.87365142514732e-05, "loss": 2.5407447814941406, "memory(GiB)": 68.52, "step": 8375, "token_acc": 0.47580645161290325, "train_speed(iter/s)": 0.670647 }, { "epoch": 0.35902489182125874, "grad_norm": 3.626485824584961, "learning_rate": 9.873501047947274e-05, "loss": 2.615427017211914, "memory(GiB)": 68.52, "step": 8380, "token_acc": 0.43670886075949367, "train_speed(iter/s)": 0.670662 }, { "epoch": 0.35923910715050766, "grad_norm": 3.9686553478240967, "learning_rate": 9.873350582459184e-05, "loss": 2.2121963500976562, "memory(GiB)": 68.52, "step": 8385, "token_acc": 0.5152542372881356, "train_speed(iter/s)": 0.670659 }, { "epoch": 0.35945332247975664, "grad_norm": 3.84218430519104, "learning_rate": 9.873200028685778e-05, "loss": 2.6304391860961913, "memory(GiB)": 68.52, "step": 8390, "token_acc": 0.46774193548387094, "train_speed(iter/s)": 0.670686 }, { "epoch": 0.3596675378090056, "grad_norm": 3.4530415534973145, "learning_rate": 9.873049386629782e-05, "loss": 2.354531097412109, "memory(GiB)": 68.52, "step": 8395, "token_acc": 0.5147058823529411, "train_speed(iter/s)": 0.670734 }, { "epoch": 0.3598817531382546, "grad_norm": 3.4919118881225586, "learning_rate": 9.872898656293925e-05, "loss": 2.266070747375488, "memory(GiB)": 70.96, "step": 8400, "token_acc": 0.5124223602484472, "train_speed(iter/s)": 0.670668 }, { "epoch": 0.3600959684675035, "grad_norm": 3.064103603363037, "learning_rate": 9.872747837680938e-05, "loss": 2.474532890319824, "memory(GiB)": 70.96, "step": 8405, "token_acc": 0.4580152671755725, "train_speed(iter/s)": 0.670667 }, { "epoch": 0.3603101837967525, "grad_norm": 3.198772668838501, "learning_rate": 9.872596930793551e-05, "loss": 2.3331613540649414, "memory(GiB)": 70.96, "step": 8410, "token_acc": 0.4725609756097561, "train_speed(iter/s)": 0.670726 }, { "epoch": 0.3605243991260015, "grad_norm": 3.734410524368286, "learning_rate": 9.872445935634502e-05, "loss": 2.432604217529297, "memory(GiB)": 70.96, "step": 8415, "token_acc": 0.4798657718120805, "train_speed(iter/s)": 0.670673 }, { "epoch": 0.3607386144552504, "grad_norm": 3.06530499458313, "learning_rate": 9.872294852206523e-05, "loss": 2.2956125259399416, "memory(GiB)": 70.96, "step": 8420, "token_acc": 0.4831081081081081, "train_speed(iter/s)": 0.670542 }, { "epoch": 0.3609528297844994, "grad_norm": 2.906522750854492, "learning_rate": 9.872143680512353e-05, "loss": 2.3963207244873046, "memory(GiB)": 70.96, "step": 8425, "token_acc": 0.4813664596273292, "train_speed(iter/s)": 0.670543 }, { "epoch": 0.36116704511374836, "grad_norm": 14.132458686828613, "learning_rate": 9.87199242055473e-05, "loss": 2.8360162734985352, "memory(GiB)": 70.96, "step": 8430, "token_acc": 0.4652567975830816, "train_speed(iter/s)": 0.67051 }, { "epoch": 0.3613812604429973, "grad_norm": 3.3452394008636475, "learning_rate": 9.871841072336393e-05, "loss": 2.3384531021118162, "memory(GiB)": 70.96, "step": 8435, "token_acc": 0.5410447761194029, "train_speed(iter/s)": 0.670557 }, { "epoch": 0.36159547577224627, "grad_norm": 2.5722086429595947, "learning_rate": 9.871689635860085e-05, "loss": 2.3986942291259767, "memory(GiB)": 70.96, "step": 8440, "token_acc": 0.48226950354609927, "train_speed(iter/s)": 0.670577 }, { "epoch": 0.36180969110149525, "grad_norm": 3.406531810760498, "learning_rate": 9.87153811112855e-05, "loss": 2.7823633193969726, "memory(GiB)": 70.96, "step": 8445, "token_acc": 0.4671280276816609, "train_speed(iter/s)": 0.670563 }, { "epoch": 0.36202390643074417, "grad_norm": 4.336486339569092, "learning_rate": 9.87138649814453e-05, "loss": 2.3874481201171873, "memory(GiB)": 70.96, "step": 8450, "token_acc": 0.47720364741641336, "train_speed(iter/s)": 0.670597 }, { "epoch": 0.36223812175999315, "grad_norm": 3.304215669631958, "learning_rate": 9.871234796910776e-05, "loss": 2.38768310546875, "memory(GiB)": 70.96, "step": 8455, "token_acc": 0.43728813559322033, "train_speed(iter/s)": 0.670643 }, { "epoch": 0.3624523370892421, "grad_norm": 3.423144578933716, "learning_rate": 9.871083007430033e-05, "loss": 2.6033416748046876, "memory(GiB)": 70.96, "step": 8460, "token_acc": 0.4808362369337979, "train_speed(iter/s)": 0.670578 }, { "epoch": 0.36266655241849105, "grad_norm": 2.7056376934051514, "learning_rate": 9.870931129705053e-05, "loss": 2.4396480560302733, "memory(GiB)": 70.96, "step": 8465, "token_acc": 0.4967741935483871, "train_speed(iter/s)": 0.670574 }, { "epoch": 0.36288076774774003, "grad_norm": 4.034947395324707, "learning_rate": 9.870779163738585e-05, "loss": 2.412648010253906, "memory(GiB)": 70.96, "step": 8470, "token_acc": 0.46178343949044587, "train_speed(iter/s)": 0.670632 }, { "epoch": 0.363094983076989, "grad_norm": 3.330928325653076, "learning_rate": 9.870627109533384e-05, "loss": 2.687074661254883, "memory(GiB)": 70.96, "step": 8475, "token_acc": 0.45263157894736844, "train_speed(iter/s)": 0.670589 }, { "epoch": 0.36330919840623793, "grad_norm": 2.6154885292053223, "learning_rate": 9.870474967092204e-05, "loss": 2.360422897338867, "memory(GiB)": 70.96, "step": 8480, "token_acc": 0.49504950495049505, "train_speed(iter/s)": 0.670635 }, { "epoch": 0.3635234137354869, "grad_norm": 3.5561563968658447, "learning_rate": 9.8703227364178e-05, "loss": 2.4160070419311523, "memory(GiB)": 70.96, "step": 8485, "token_acc": 0.48338368580060426, "train_speed(iter/s)": 0.670609 }, { "epoch": 0.3637376290647359, "grad_norm": 3.0959270000457764, "learning_rate": 9.870170417512934e-05, "loss": 2.400890350341797, "memory(GiB)": 70.96, "step": 8490, "token_acc": 0.48464163822525597, "train_speed(iter/s)": 0.670593 }, { "epoch": 0.3639518443939848, "grad_norm": 3.061124086380005, "learning_rate": 9.87001801038036e-05, "loss": 2.5184213638305666, "memory(GiB)": 70.96, "step": 8495, "token_acc": 0.4627831715210356, "train_speed(iter/s)": 0.670614 }, { "epoch": 0.3641660597232338, "grad_norm": 3.606372356414795, "learning_rate": 9.86986551502284e-05, "loss": 2.613388442993164, "memory(GiB)": 70.96, "step": 8500, "token_acc": 0.4574468085106383, "train_speed(iter/s)": 0.670621 }, { "epoch": 0.3641660597232338, "eval_loss": 2.201573371887207, "eval_runtime": 16.9804, "eval_samples_per_second": 5.889, "eval_steps_per_second": 5.889, "eval_token_acc": 0.4696132596685083, "step": 8500 }, { "epoch": 0.36438027505248277, "grad_norm": 3.197000503540039, "learning_rate": 9.86971293144314e-05, "loss": 2.4157384872436523, "memory(GiB)": 70.96, "step": 8505, "token_acc": 0.46099290780141844, "train_speed(iter/s)": 0.669548 }, { "epoch": 0.3645944903817317, "grad_norm": 3.077733278274536, "learning_rate": 9.869560259644021e-05, "loss": 2.3754192352294923, "memory(GiB)": 70.96, "step": 8510, "token_acc": 0.5100286532951289, "train_speed(iter/s)": 0.669445 }, { "epoch": 0.3648087057109807, "grad_norm": 3.8776328563690186, "learning_rate": 9.869407499628251e-05, "loss": 2.2794233322143556, "memory(GiB)": 70.96, "step": 8515, "token_acc": 0.483271375464684, "train_speed(iter/s)": 0.669459 }, { "epoch": 0.36502292104022965, "grad_norm": 2.9109854698181152, "learning_rate": 9.869254651398596e-05, "loss": 2.5435108184814452, "memory(GiB)": 70.96, "step": 8520, "token_acc": 0.46827794561933533, "train_speed(iter/s)": 0.669362 }, { "epoch": 0.3652371363694786, "grad_norm": 3.4610042572021484, "learning_rate": 9.869101714957825e-05, "loss": 2.0194021224975587, "memory(GiB)": 70.96, "step": 8525, "token_acc": 0.5763358778625954, "train_speed(iter/s)": 0.66943 }, { "epoch": 0.36545135169872756, "grad_norm": 4.758779525756836, "learning_rate": 9.868948690308708e-05, "loss": 2.4513923645019533, "memory(GiB)": 70.96, "step": 8530, "token_acc": 0.46273291925465837, "train_speed(iter/s)": 0.669424 }, { "epoch": 0.36566556702797653, "grad_norm": 11.627847671508789, "learning_rate": 9.868795577454019e-05, "loss": 2.756961441040039, "memory(GiB)": 70.96, "step": 8535, "token_acc": 0.4485049833887043, "train_speed(iter/s)": 0.669409 }, { "epoch": 0.36587978235722546, "grad_norm": 3.2000443935394287, "learning_rate": 9.868642376396531e-05, "loss": 2.574630546569824, "memory(GiB)": 70.96, "step": 8540, "token_acc": 0.48307692307692307, "train_speed(iter/s)": 0.66945 }, { "epoch": 0.36609399768647444, "grad_norm": 2.871514081954956, "learning_rate": 9.868489087139017e-05, "loss": 2.5973699569702147, "memory(GiB)": 70.96, "step": 8545, "token_acc": 0.4340659340659341, "train_speed(iter/s)": 0.669516 }, { "epoch": 0.3663082130157234, "grad_norm": 3.181042432785034, "learning_rate": 9.868335709684259e-05, "loss": 2.3207006454467773, "memory(GiB)": 70.96, "step": 8550, "token_acc": 0.5105633802816901, "train_speed(iter/s)": 0.669492 }, { "epoch": 0.36652242834497234, "grad_norm": 5.337417125701904, "learning_rate": 9.868182244035032e-05, "loss": 2.2662994384765627, "memory(GiB)": 70.96, "step": 8555, "token_acc": 0.4777327935222672, "train_speed(iter/s)": 0.66961 }, { "epoch": 0.3667366436742213, "grad_norm": 3.9199817180633545, "learning_rate": 9.868028690194115e-05, "loss": 2.6968454360961913, "memory(GiB)": 70.96, "step": 8560, "token_acc": 0.47796610169491527, "train_speed(iter/s)": 0.669631 }, { "epoch": 0.3669508590034703, "grad_norm": 5.221424102783203, "learning_rate": 9.867875048164294e-05, "loss": 2.2991079330444335, "memory(GiB)": 70.96, "step": 8565, "token_acc": 0.5047923322683706, "train_speed(iter/s)": 0.669656 }, { "epoch": 0.3671650743327193, "grad_norm": 3.5174453258514404, "learning_rate": 9.86772131794835e-05, "loss": 2.368901824951172, "memory(GiB)": 70.96, "step": 8570, "token_acc": 0.5015290519877675, "train_speed(iter/s)": 0.669594 }, { "epoch": 0.3673792896619682, "grad_norm": 2.933610439300537, "learning_rate": 9.867567499549067e-05, "loss": 2.3261745452880858, "memory(GiB)": 70.96, "step": 8575, "token_acc": 0.46551724137931033, "train_speed(iter/s)": 0.669554 }, { "epoch": 0.3675935049912172, "grad_norm": 3.777057409286499, "learning_rate": 9.867413592969232e-05, "loss": 2.346425247192383, "memory(GiB)": 70.96, "step": 8580, "token_acc": 0.48375451263537905, "train_speed(iter/s)": 0.669575 }, { "epoch": 0.36780772032046616, "grad_norm": 3.40524959564209, "learning_rate": 9.867259598211635e-05, "loss": 2.549146270751953, "memory(GiB)": 70.96, "step": 8585, "token_acc": 0.46178343949044587, "train_speed(iter/s)": 0.669438 }, { "epoch": 0.3680219356497151, "grad_norm": 3.0235185623168945, "learning_rate": 9.867105515279065e-05, "loss": 2.1929742813110353, "memory(GiB)": 70.96, "step": 8590, "token_acc": 0.49828178694158076, "train_speed(iter/s)": 0.66932 }, { "epoch": 0.36823615097896406, "grad_norm": 5.271905899047852, "learning_rate": 9.866951344174311e-05, "loss": 2.0736595153808595, "memory(GiB)": 70.96, "step": 8595, "token_acc": 0.543778801843318, "train_speed(iter/s)": 0.669283 }, { "epoch": 0.36845036630821304, "grad_norm": 4.538204193115234, "learning_rate": 9.86679708490017e-05, "loss": 2.439841461181641, "memory(GiB)": 70.96, "step": 8600, "token_acc": 0.47586206896551725, "train_speed(iter/s)": 0.669297 }, { "epoch": 0.36866458163746196, "grad_norm": 2.8118693828582764, "learning_rate": 9.866642737459433e-05, "loss": 2.30004768371582, "memory(GiB)": 70.96, "step": 8605, "token_acc": 0.4748201438848921, "train_speed(iter/s)": 0.66921 }, { "epoch": 0.36887879696671094, "grad_norm": 3.2119970321655273, "learning_rate": 9.866488301854898e-05, "loss": 2.2337223052978517, "memory(GiB)": 70.96, "step": 8610, "token_acc": 0.46567164179104475, "train_speed(iter/s)": 0.66924 }, { "epoch": 0.3690930122959599, "grad_norm": 2.793152332305908, "learning_rate": 9.866333778089363e-05, "loss": 2.3135244369506838, "memory(GiB)": 70.96, "step": 8615, "token_acc": 0.5062111801242236, "train_speed(iter/s)": 0.669163 }, { "epoch": 0.36930722762520884, "grad_norm": 3.8639023303985596, "learning_rate": 9.866179166165624e-05, "loss": 2.412774848937988, "memory(GiB)": 70.96, "step": 8620, "token_acc": 0.49363057324840764, "train_speed(iter/s)": 0.669171 }, { "epoch": 0.3695214429544578, "grad_norm": 3.659006118774414, "learning_rate": 9.866024466086487e-05, "loss": 2.571263885498047, "memory(GiB)": 70.96, "step": 8625, "token_acc": 0.503731343283582, "train_speed(iter/s)": 0.669116 }, { "epoch": 0.3697356582837068, "grad_norm": 3.6560750007629395, "learning_rate": 9.865869677854751e-05, "loss": 2.411839485168457, "memory(GiB)": 70.96, "step": 8630, "token_acc": 0.550314465408805, "train_speed(iter/s)": 0.669127 }, { "epoch": 0.3699498736129557, "grad_norm": 3.8659815788269043, "learning_rate": 9.865714801473223e-05, "loss": 2.8004261016845704, "memory(GiB)": 70.96, "step": 8635, "token_acc": 0.45364238410596025, "train_speed(iter/s)": 0.669155 }, { "epoch": 0.3701640889422047, "grad_norm": 3.100468873977661, "learning_rate": 9.865559836944705e-05, "loss": 2.5620471954345705, "memory(GiB)": 70.96, "step": 8640, "token_acc": 0.45484949832775917, "train_speed(iter/s)": 0.669226 }, { "epoch": 0.3703783042714537, "grad_norm": 3.842144012451172, "learning_rate": 9.865404784272007e-05, "loss": 2.452947425842285, "memory(GiB)": 70.96, "step": 8645, "token_acc": 0.46808510638297873, "train_speed(iter/s)": 0.669305 }, { "epoch": 0.3705925196007026, "grad_norm": 4.0782647132873535, "learning_rate": 9.865249643457936e-05, "loss": 2.4452487945556642, "memory(GiB)": 70.96, "step": 8650, "token_acc": 0.4944237918215613, "train_speed(iter/s)": 0.669407 }, { "epoch": 0.3708067349299516, "grad_norm": 3.38061261177063, "learning_rate": 9.865094414505305e-05, "loss": 2.5185012817382812, "memory(GiB)": 70.96, "step": 8655, "token_acc": 0.5154639175257731, "train_speed(iter/s)": 0.669406 }, { "epoch": 0.37102095025920057, "grad_norm": 3.401034116744995, "learning_rate": 9.864939097416926e-05, "loss": 2.485024642944336, "memory(GiB)": 70.96, "step": 8660, "token_acc": 0.4631578947368421, "train_speed(iter/s)": 0.669392 }, { "epoch": 0.3712351655884495, "grad_norm": 4.041676998138428, "learning_rate": 9.864783692195609e-05, "loss": 2.567208099365234, "memory(GiB)": 70.96, "step": 8665, "token_acc": 0.4919093851132686, "train_speed(iter/s)": 0.669394 }, { "epoch": 0.37144938091769847, "grad_norm": 3.6840648651123047, "learning_rate": 9.864628198844173e-05, "loss": 2.701060104370117, "memory(GiB)": 70.96, "step": 8670, "token_acc": 0.4847328244274809, "train_speed(iter/s)": 0.669325 }, { "epoch": 0.37166359624694745, "grad_norm": 3.6139888763427734, "learning_rate": 9.864472617365434e-05, "loss": 2.456930923461914, "memory(GiB)": 70.96, "step": 8675, "token_acc": 0.4539877300613497, "train_speed(iter/s)": 0.669357 }, { "epoch": 0.37187781157619637, "grad_norm": 2.9743590354919434, "learning_rate": 9.86431694776221e-05, "loss": 2.5969640731811525, "memory(GiB)": 70.96, "step": 8680, "token_acc": 0.4584527220630373, "train_speed(iter/s)": 0.669314 }, { "epoch": 0.37209202690544535, "grad_norm": 3.3988406658172607, "learning_rate": 9.864161190037322e-05, "loss": 2.338835906982422, "memory(GiB)": 70.96, "step": 8685, "token_acc": 0.46747967479674796, "train_speed(iter/s)": 0.669238 }, { "epoch": 0.37230624223469433, "grad_norm": 3.2744128704071045, "learning_rate": 9.864005344193591e-05, "loss": 2.558338165283203, "memory(GiB)": 70.96, "step": 8690, "token_acc": 0.4570552147239264, "train_speed(iter/s)": 0.669251 }, { "epoch": 0.37252045756394325, "grad_norm": 3.6731247901916504, "learning_rate": 9.86384941023384e-05, "loss": 2.2265233993530273, "memory(GiB)": 70.96, "step": 8695, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.669284 }, { "epoch": 0.37273467289319223, "grad_norm": 2.7715375423431396, "learning_rate": 9.863693388160895e-05, "loss": 2.210174560546875, "memory(GiB)": 70.96, "step": 8700, "token_acc": 0.5130718954248366, "train_speed(iter/s)": 0.669337 }, { "epoch": 0.3729488882224412, "grad_norm": 3.0338134765625, "learning_rate": 9.86353727797758e-05, "loss": 2.347762680053711, "memory(GiB)": 70.96, "step": 8705, "token_acc": 0.5151515151515151, "train_speed(iter/s)": 0.669374 }, { "epoch": 0.37316310355169013, "grad_norm": 4.801391124725342, "learning_rate": 9.863381079686727e-05, "loss": 2.5435914993286133, "memory(GiB)": 70.96, "step": 8710, "token_acc": 0.45569620253164556, "train_speed(iter/s)": 0.669388 }, { "epoch": 0.3733773188809391, "grad_norm": 2.7209599018096924, "learning_rate": 9.863224793291161e-05, "loss": 2.510660743713379, "memory(GiB)": 70.96, "step": 8715, "token_acc": 0.4894366197183099, "train_speed(iter/s)": 0.669442 }, { "epoch": 0.3735915342101881, "grad_norm": 4.6386284828186035, "learning_rate": 9.863068418793718e-05, "loss": 2.2692630767822264, "memory(GiB)": 70.96, "step": 8720, "token_acc": 0.48936170212765956, "train_speed(iter/s)": 0.66951 }, { "epoch": 0.373805749539437, "grad_norm": 3.270098924636841, "learning_rate": 9.862911956197227e-05, "loss": 2.2659271240234373, "memory(GiB)": 70.96, "step": 8725, "token_acc": 0.5094339622641509, "train_speed(iter/s)": 0.669318 }, { "epoch": 0.374019964868686, "grad_norm": 3.271124839782715, "learning_rate": 9.862755405504527e-05, "loss": 2.3771286010742188, "memory(GiB)": 70.96, "step": 8730, "token_acc": 0.47368421052631576, "train_speed(iter/s)": 0.669299 }, { "epoch": 0.374234180197935, "grad_norm": 3.5544490814208984, "learning_rate": 9.862598766718449e-05, "loss": 2.3711219787597657, "memory(GiB)": 70.96, "step": 8735, "token_acc": 0.4867549668874172, "train_speed(iter/s)": 0.669336 }, { "epoch": 0.37444839552718395, "grad_norm": 2.830521821975708, "learning_rate": 9.862442039841833e-05, "loss": 2.383512496948242, "memory(GiB)": 70.96, "step": 8740, "token_acc": 0.5359712230215827, "train_speed(iter/s)": 0.669267 }, { "epoch": 0.3746626108564329, "grad_norm": 3.4700372219085693, "learning_rate": 9.862285224877518e-05, "loss": 2.634149932861328, "memory(GiB)": 70.96, "step": 8745, "token_acc": 0.4597315436241611, "train_speed(iter/s)": 0.669152 }, { "epoch": 0.37487682618568186, "grad_norm": 3.5255963802337646, "learning_rate": 9.862128321828345e-05, "loss": 2.6619775772094725, "memory(GiB)": 70.96, "step": 8750, "token_acc": 0.4921875, "train_speed(iter/s)": 0.66919 }, { "epoch": 0.37509104151493083, "grad_norm": 6.263245582580566, "learning_rate": 9.861971330697157e-05, "loss": 2.573495864868164, "memory(GiB)": 70.96, "step": 8755, "token_acc": 0.44884488448844884, "train_speed(iter/s)": 0.66928 }, { "epoch": 0.37530525684417976, "grad_norm": 2.8159148693084717, "learning_rate": 9.861814251486796e-05, "loss": 2.2308156967163084, "memory(GiB)": 70.96, "step": 8760, "token_acc": 0.5340501792114696, "train_speed(iter/s)": 0.669386 }, { "epoch": 0.37551947217342874, "grad_norm": 3.6543807983398438, "learning_rate": 9.861657084200112e-05, "loss": 2.427849006652832, "memory(GiB)": 70.96, "step": 8765, "token_acc": 0.475, "train_speed(iter/s)": 0.669398 }, { "epoch": 0.3757336875026777, "grad_norm": 2.90189266204834, "learning_rate": 9.861499828839946e-05, "loss": 2.5050405502319335, "memory(GiB)": 70.96, "step": 8770, "token_acc": 0.4774436090225564, "train_speed(iter/s)": 0.669305 }, { "epoch": 0.37594790283192664, "grad_norm": 3.245464563369751, "learning_rate": 9.861342485409152e-05, "loss": 2.369827651977539, "memory(GiB)": 70.96, "step": 8775, "token_acc": 0.5292096219931272, "train_speed(iter/s)": 0.669354 }, { "epoch": 0.3761621181611756, "grad_norm": 2.9431371688842773, "learning_rate": 9.861185053910577e-05, "loss": 2.3939916610717775, "memory(GiB)": 70.96, "step": 8780, "token_acc": 0.48484848484848486, "train_speed(iter/s)": 0.669421 }, { "epoch": 0.3763763334904246, "grad_norm": 3.0735106468200684, "learning_rate": 9.861027534347075e-05, "loss": 2.748754119873047, "memory(GiB)": 70.96, "step": 8785, "token_acc": 0.470404984423676, "train_speed(iter/s)": 0.66945 }, { "epoch": 0.3765905488196735, "grad_norm": 3.814927339553833, "learning_rate": 9.860869926721502e-05, "loss": 2.397251510620117, "memory(GiB)": 70.96, "step": 8790, "token_acc": 0.4675324675324675, "train_speed(iter/s)": 0.669501 }, { "epoch": 0.3768047641489225, "grad_norm": 2.765106439590454, "learning_rate": 9.860712231036709e-05, "loss": 2.400067901611328, "memory(GiB)": 70.96, "step": 8795, "token_acc": 0.519163763066202, "train_speed(iter/s)": 0.669458 }, { "epoch": 0.3770189794781715, "grad_norm": 4.404904842376709, "learning_rate": 9.860554447295553e-05, "loss": 2.237331581115723, "memory(GiB)": 70.96, "step": 8800, "token_acc": 0.4882154882154882, "train_speed(iter/s)": 0.669407 }, { "epoch": 0.3772331948074204, "grad_norm": 3.0513081550598145, "learning_rate": 9.860396575500894e-05, "loss": 2.4816650390625, "memory(GiB)": 70.96, "step": 8805, "token_acc": 0.4945054945054945, "train_speed(iter/s)": 0.669363 }, { "epoch": 0.3774474101366694, "grad_norm": 3.334895372390747, "learning_rate": 9.860238615655591e-05, "loss": 2.7315101623535156, "memory(GiB)": 70.96, "step": 8810, "token_acc": 0.5020746887966805, "train_speed(iter/s)": 0.669404 }, { "epoch": 0.37766162546591836, "grad_norm": 4.270054340362549, "learning_rate": 9.860080567762508e-05, "loss": 2.620533561706543, "memory(GiB)": 70.96, "step": 8815, "token_acc": 0.4900662251655629, "train_speed(iter/s)": 0.669441 }, { "epoch": 0.3778758407951673, "grad_norm": 3.9127535820007324, "learning_rate": 9.859922431824505e-05, "loss": 2.6712823867797852, "memory(GiB)": 70.96, "step": 8820, "token_acc": 0.4919614147909968, "train_speed(iter/s)": 0.669476 }, { "epoch": 0.37809005612441626, "grad_norm": 3.792829751968384, "learning_rate": 9.859764207844448e-05, "loss": 2.123395538330078, "memory(GiB)": 70.96, "step": 8825, "token_acc": 0.5266903914590747, "train_speed(iter/s)": 0.669533 }, { "epoch": 0.37830427145366524, "grad_norm": 3.165712594985962, "learning_rate": 9.859605895825205e-05, "loss": 2.3723114013671873, "memory(GiB)": 70.96, "step": 8830, "token_acc": 0.48562300319488816, "train_speed(iter/s)": 0.669555 }, { "epoch": 0.37851848678291417, "grad_norm": 3.5984551906585693, "learning_rate": 9.859447495769641e-05, "loss": 2.6811933517456055, "memory(GiB)": 70.96, "step": 8835, "token_acc": 0.420863309352518, "train_speed(iter/s)": 0.669548 }, { "epoch": 0.37873270211216314, "grad_norm": 2.4983556270599365, "learning_rate": 9.859289007680628e-05, "loss": 2.5407352447509766, "memory(GiB)": 70.96, "step": 8840, "token_acc": 0.46285714285714286, "train_speed(iter/s)": 0.669505 }, { "epoch": 0.3789469174414121, "grad_norm": 2.8056726455688477, "learning_rate": 9.859130431561035e-05, "loss": 2.426786422729492, "memory(GiB)": 70.96, "step": 8845, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.669529 }, { "epoch": 0.37916113277066105, "grad_norm": 3.893275499343872, "learning_rate": 9.858971767413738e-05, "loss": 2.3847286224365236, "memory(GiB)": 70.96, "step": 8850, "token_acc": 0.5, "train_speed(iter/s)": 0.669458 }, { "epoch": 0.37937534809991, "grad_norm": 3.441770553588867, "learning_rate": 9.858813015241608e-05, "loss": 2.3654335021972654, "memory(GiB)": 70.96, "step": 8855, "token_acc": 0.49836065573770494, "train_speed(iter/s)": 0.669506 }, { "epoch": 0.379589563429159, "grad_norm": 2.318171739578247, "learning_rate": 9.858654175047523e-05, "loss": 2.430364227294922, "memory(GiB)": 70.96, "step": 8860, "token_acc": 0.475, "train_speed(iter/s)": 0.669531 }, { "epoch": 0.37980377875840793, "grad_norm": 3.4516313076019287, "learning_rate": 9.858495246834358e-05, "loss": 2.587477684020996, "memory(GiB)": 70.96, "step": 8865, "token_acc": 0.47368421052631576, "train_speed(iter/s)": 0.669466 }, { "epoch": 0.3800179940876569, "grad_norm": 3.4905712604522705, "learning_rate": 9.858336230604996e-05, "loss": 2.4709054946899416, "memory(GiB)": 70.96, "step": 8870, "token_acc": 0.5208333333333334, "train_speed(iter/s)": 0.669422 }, { "epoch": 0.3802322094169059, "grad_norm": 3.4414587020874023, "learning_rate": 9.858177126362315e-05, "loss": 2.495382881164551, "memory(GiB)": 70.96, "step": 8875, "token_acc": 0.4401294498381877, "train_speed(iter/s)": 0.669445 }, { "epoch": 0.3804464247461548, "grad_norm": 3.6015231609344482, "learning_rate": 9.858017934109198e-05, "loss": 2.210086441040039, "memory(GiB)": 70.96, "step": 8880, "token_acc": 0.5092936802973977, "train_speed(iter/s)": 0.669482 }, { "epoch": 0.3806606400754038, "grad_norm": 2.787656307220459, "learning_rate": 9.857858653848529e-05, "loss": 2.4884151458740233, "memory(GiB)": 70.96, "step": 8885, "token_acc": 0.4394904458598726, "train_speed(iter/s)": 0.669506 }, { "epoch": 0.38087485540465277, "grad_norm": 4.356506824493408, "learning_rate": 9.857699285583195e-05, "loss": 2.6710485458374023, "memory(GiB)": 70.96, "step": 8890, "token_acc": 0.45695364238410596, "train_speed(iter/s)": 0.6695 }, { "epoch": 0.3810890707339017, "grad_norm": 3.1482093334198, "learning_rate": 9.857539829316079e-05, "loss": 2.5126842498779296, "memory(GiB)": 70.96, "step": 8895, "token_acc": 0.4542372881355932, "train_speed(iter/s)": 0.66946 }, { "epoch": 0.38130328606315067, "grad_norm": 3.148987293243408, "learning_rate": 9.857380285050073e-05, "loss": 2.8706146240234376, "memory(GiB)": 70.96, "step": 8900, "token_acc": 0.4304635761589404, "train_speed(iter/s)": 0.669468 }, { "epoch": 0.38151750139239965, "grad_norm": 3.324942111968994, "learning_rate": 9.857220652788067e-05, "loss": 2.8134029388427733, "memory(GiB)": 70.96, "step": 8905, "token_acc": 0.42452830188679247, "train_speed(iter/s)": 0.669499 }, { "epoch": 0.38173171672164863, "grad_norm": 2.8856494426727295, "learning_rate": 9.857060932532953e-05, "loss": 2.645358657836914, "memory(GiB)": 70.96, "step": 8910, "token_acc": 0.5050847457627119, "train_speed(iter/s)": 0.669545 }, { "epoch": 0.38194593205089755, "grad_norm": 3.7645862102508545, "learning_rate": 9.856901124287623e-05, "loss": 2.477504920959473, "memory(GiB)": 70.96, "step": 8915, "token_acc": 0.4641509433962264, "train_speed(iter/s)": 0.66946 }, { "epoch": 0.38216014738014653, "grad_norm": 2.6719613075256348, "learning_rate": 9.856741228054973e-05, "loss": 2.4770111083984374, "memory(GiB)": 70.96, "step": 8920, "token_acc": 0.4850498338870432, "train_speed(iter/s)": 0.669463 }, { "epoch": 0.3823743627093955, "grad_norm": 3.2652769088745117, "learning_rate": 9.8565812438379e-05, "loss": 2.432328224182129, "memory(GiB)": 70.96, "step": 8925, "token_acc": 0.4812286689419795, "train_speed(iter/s)": 0.669519 }, { "epoch": 0.38258857803864443, "grad_norm": 3.3041036128997803, "learning_rate": 9.856421171639302e-05, "loss": 2.3656427383422853, "memory(GiB)": 70.96, "step": 8930, "token_acc": 0.5092936802973977, "train_speed(iter/s)": 0.669574 }, { "epoch": 0.3828027933678934, "grad_norm": 4.058431625366211, "learning_rate": 9.856261011462078e-05, "loss": 2.3372488021850586, "memory(GiB)": 70.96, "step": 8935, "token_acc": 0.5298245614035088, "train_speed(iter/s)": 0.669599 }, { "epoch": 0.3830170086971424, "grad_norm": 4.175623893737793, "learning_rate": 9.856100763309131e-05, "loss": 2.275884437561035, "memory(GiB)": 70.96, "step": 8940, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.669652 }, { "epoch": 0.3832312240263913, "grad_norm": 2.900660514831543, "learning_rate": 9.855940427183364e-05, "loss": 2.417108154296875, "memory(GiB)": 70.96, "step": 8945, "token_acc": 0.46885245901639344, "train_speed(iter/s)": 0.669673 }, { "epoch": 0.3834454393556403, "grad_norm": 3.262465238571167, "learning_rate": 9.855780003087679e-05, "loss": 2.242995834350586, "memory(GiB)": 70.96, "step": 8950, "token_acc": 0.5064935064935064, "train_speed(iter/s)": 0.66974 }, { "epoch": 0.3836596546848893, "grad_norm": 3.034620761871338, "learning_rate": 9.855619491024986e-05, "loss": 2.3592050552368162, "memory(GiB)": 70.96, "step": 8955, "token_acc": 0.50390625, "train_speed(iter/s)": 0.669768 }, { "epoch": 0.3838738700141382, "grad_norm": 3.2289133071899414, "learning_rate": 9.855458890998189e-05, "loss": 2.4620922088623045, "memory(GiB)": 70.96, "step": 8960, "token_acc": 0.488135593220339, "train_speed(iter/s)": 0.669814 }, { "epoch": 0.3840880853433872, "grad_norm": 3.2391433715820312, "learning_rate": 9.855298203010201e-05, "loss": 2.457451057434082, "memory(GiB)": 70.96, "step": 8965, "token_acc": 0.5175097276264592, "train_speed(iter/s)": 0.669846 }, { "epoch": 0.38430230067263615, "grad_norm": 3.1695289611816406, "learning_rate": 9.855137427063931e-05, "loss": 2.5303953170776365, "memory(GiB)": 70.96, "step": 8970, "token_acc": 0.47266881028938906, "train_speed(iter/s)": 0.669898 }, { "epoch": 0.3845165160018851, "grad_norm": 3.618049383163452, "learning_rate": 9.854976563162293e-05, "loss": 2.2859432220458986, "memory(GiB)": 70.96, "step": 8975, "token_acc": 0.5063291139240507, "train_speed(iter/s)": 0.669914 }, { "epoch": 0.38473073133113406, "grad_norm": 3.2388687133789062, "learning_rate": 9.854815611308198e-05, "loss": 2.359181785583496, "memory(GiB)": 70.96, "step": 8980, "token_acc": 0.4860557768924303, "train_speed(iter/s)": 0.669952 }, { "epoch": 0.38494494666038304, "grad_norm": 6.349316596984863, "learning_rate": 9.854654571504565e-05, "loss": 2.3214977264404295, "memory(GiB)": 70.96, "step": 8985, "token_acc": 0.5048543689320388, "train_speed(iter/s)": 0.669994 }, { "epoch": 0.38515916198963196, "grad_norm": 4.419235706329346, "learning_rate": 9.854493443754311e-05, "loss": 2.651328468322754, "memory(GiB)": 70.96, "step": 8990, "token_acc": 0.4725274725274725, "train_speed(iter/s)": 0.670077 }, { "epoch": 0.38537337731888094, "grad_norm": 2.5488972663879395, "learning_rate": 9.854332228060354e-05, "loss": 2.315779113769531, "memory(GiB)": 70.96, "step": 8995, "token_acc": 0.5091383812010444, "train_speed(iter/s)": 0.670059 }, { "epoch": 0.3855875926481299, "grad_norm": 4.30565881729126, "learning_rate": 9.854170924425614e-05, "loss": 2.5382511138916017, "memory(GiB)": 70.96, "step": 9000, "token_acc": 0.4790874524714829, "train_speed(iter/s)": 0.670036 }, { "epoch": 0.3855875926481299, "eval_loss": 2.2508585453033447, "eval_runtime": 16.2445, "eval_samples_per_second": 6.156, "eval_steps_per_second": 6.156, "eval_token_acc": 0.5, "step": 9000 }, { "epoch": 0.38580180797737884, "grad_norm": 3.9848239421844482, "learning_rate": 9.854009532853016e-05, "loss": 2.6507326126098634, "memory(GiB)": 70.96, "step": 9005, "token_acc": 0.48893166506256014, "train_speed(iter/s)": 0.669226 }, { "epoch": 0.3860160233066278, "grad_norm": 3.8726720809936523, "learning_rate": 9.853848053345481e-05, "loss": 2.5601593017578126, "memory(GiB)": 70.96, "step": 9010, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.669266 }, { "epoch": 0.3862302386358768, "grad_norm": 2.7780873775482178, "learning_rate": 9.853686485905935e-05, "loss": 2.6886682510375977, "memory(GiB)": 70.96, "step": 9015, "token_acc": 0.4426229508196721, "train_speed(iter/s)": 0.669296 }, { "epoch": 0.3864444539651257, "grad_norm": 4.223822116851807, "learning_rate": 9.853524830537307e-05, "loss": 2.4455450057983397, "memory(GiB)": 70.96, "step": 9020, "token_acc": 0.4980694980694981, "train_speed(iter/s)": 0.669256 }, { "epoch": 0.3866586692943747, "grad_norm": 3.2612924575805664, "learning_rate": 9.85336308724252e-05, "loss": 2.558125305175781, "memory(GiB)": 70.96, "step": 9025, "token_acc": 0.4954128440366973, "train_speed(iter/s)": 0.669185 }, { "epoch": 0.3868728846236237, "grad_norm": 3.1777329444885254, "learning_rate": 9.853201256024512e-05, "loss": 2.708434295654297, "memory(GiB)": 70.96, "step": 9030, "token_acc": 0.4482758620689655, "train_speed(iter/s)": 0.669196 }, { "epoch": 0.3870870999528726, "grad_norm": 3.1718506813049316, "learning_rate": 9.853039336886207e-05, "loss": 2.490852165222168, "memory(GiB)": 70.96, "step": 9035, "token_acc": 0.5, "train_speed(iter/s)": 0.669189 }, { "epoch": 0.3873013152821216, "grad_norm": 3.4429855346679688, "learning_rate": 9.852877329830544e-05, "loss": 2.283290481567383, "memory(GiB)": 70.96, "step": 9040, "token_acc": 0.4876325088339223, "train_speed(iter/s)": 0.6692 }, { "epoch": 0.38751553061137056, "grad_norm": 4.140913963317871, "learning_rate": 9.852715234860454e-05, "loss": 2.4481348037719726, "memory(GiB)": 70.96, "step": 9045, "token_acc": 0.45787545787545786, "train_speed(iter/s)": 0.669212 }, { "epoch": 0.3877297459406195, "grad_norm": 3.750577926635742, "learning_rate": 9.852553051978877e-05, "loss": 2.548078727722168, "memory(GiB)": 70.96, "step": 9050, "token_acc": 0.4929078014184397, "train_speed(iter/s)": 0.669233 }, { "epoch": 0.38794396126986846, "grad_norm": 4.844271183013916, "learning_rate": 9.852390781188749e-05, "loss": 2.4159717559814453, "memory(GiB)": 70.96, "step": 9055, "token_acc": 0.47950819672131145, "train_speed(iter/s)": 0.669264 }, { "epoch": 0.38815817659911744, "grad_norm": 3.6852705478668213, "learning_rate": 9.852228422493011e-05, "loss": 2.5666788101196287, "memory(GiB)": 70.96, "step": 9060, "token_acc": 0.4696485623003195, "train_speed(iter/s)": 0.669242 }, { "epoch": 0.38837239192836637, "grad_norm": 3.9005939960479736, "learning_rate": 9.852065975894601e-05, "loss": 2.3970775604248047, "memory(GiB)": 70.96, "step": 9065, "token_acc": 0.4375, "train_speed(iter/s)": 0.669146 }, { "epoch": 0.38858660725761535, "grad_norm": 2.7409958839416504, "learning_rate": 9.851903441396466e-05, "loss": 2.610875701904297, "memory(GiB)": 70.96, "step": 9070, "token_acc": 0.45714285714285713, "train_speed(iter/s)": 0.669136 }, { "epoch": 0.3888008225868643, "grad_norm": 3.2850100994110107, "learning_rate": 9.851740819001549e-05, "loss": 2.536575508117676, "memory(GiB)": 70.96, "step": 9075, "token_acc": 0.4850498338870432, "train_speed(iter/s)": 0.669183 }, { "epoch": 0.3890150379161133, "grad_norm": 2.967456579208374, "learning_rate": 9.851578108712795e-05, "loss": 2.5232475280761717, "memory(GiB)": 70.96, "step": 9080, "token_acc": 0.44966442953020136, "train_speed(iter/s)": 0.669249 }, { "epoch": 0.38922925324536223, "grad_norm": 3.4353878498077393, "learning_rate": 9.851415310533151e-05, "loss": 2.6445648193359377, "memory(GiB)": 70.96, "step": 9085, "token_acc": 0.4524590163934426, "train_speed(iter/s)": 0.669273 }, { "epoch": 0.3894434685746112, "grad_norm": 4.724151134490967, "learning_rate": 9.85125242446557e-05, "loss": 2.221598434448242, "memory(GiB)": 70.96, "step": 9090, "token_acc": 0.52, "train_speed(iter/s)": 0.669354 }, { "epoch": 0.3896576839038602, "grad_norm": 4.207653045654297, "learning_rate": 9.851089450513e-05, "loss": 2.2282176971435548, "memory(GiB)": 70.96, "step": 9095, "token_acc": 0.5153846153846153, "train_speed(iter/s)": 0.669477 }, { "epoch": 0.3898718992331091, "grad_norm": 2.8178882598876953, "learning_rate": 9.850926388678393e-05, "loss": 2.303899955749512, "memory(GiB)": 70.96, "step": 9100, "token_acc": 0.4897959183673469, "train_speed(iter/s)": 0.669533 }, { "epoch": 0.3900861145623581, "grad_norm": 3.9012675285339355, "learning_rate": 9.850763238964705e-05, "loss": 2.266895866394043, "memory(GiB)": 70.96, "step": 9105, "token_acc": 0.5148148148148148, "train_speed(iter/s)": 0.669434 }, { "epoch": 0.39030032989160707, "grad_norm": 2.787470579147339, "learning_rate": 9.85060000137489e-05, "loss": 2.682663917541504, "memory(GiB)": 70.96, "step": 9110, "token_acc": 0.4524714828897338, "train_speed(iter/s)": 0.669488 }, { "epoch": 0.390514545220856, "grad_norm": 3.1452434062957764, "learning_rate": 9.850436675911905e-05, "loss": 2.5089725494384765, "memory(GiB)": 70.96, "step": 9115, "token_acc": 0.5033557046979866, "train_speed(iter/s)": 0.669566 }, { "epoch": 0.39072876055010497, "grad_norm": 4.789245128631592, "learning_rate": 9.85027326257871e-05, "loss": 2.408730316162109, "memory(GiB)": 70.96, "step": 9120, "token_acc": 0.5257352941176471, "train_speed(iter/s)": 0.669633 }, { "epoch": 0.39094297587935395, "grad_norm": 3.343635082244873, "learning_rate": 9.850109761378266e-05, "loss": 2.574104881286621, "memory(GiB)": 70.96, "step": 9125, "token_acc": 0.45151515151515154, "train_speed(iter/s)": 0.669559 }, { "epoch": 0.3911571912086029, "grad_norm": 3.9554662704467773, "learning_rate": 9.849946172313533e-05, "loss": 2.410190391540527, "memory(GiB)": 70.96, "step": 9130, "token_acc": 0.496875, "train_speed(iter/s)": 0.669561 }, { "epoch": 0.39137140653785185, "grad_norm": 3.3222837448120117, "learning_rate": 9.849782495387476e-05, "loss": 2.5264495849609374, "memory(GiB)": 70.96, "step": 9135, "token_acc": 0.4575757575757576, "train_speed(iter/s)": 0.669584 }, { "epoch": 0.39158562186710083, "grad_norm": 3.4488720893859863, "learning_rate": 9.849618730603059e-05, "loss": 2.2178266525268553, "memory(GiB)": 70.96, "step": 9140, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.669687 }, { "epoch": 0.39179983719634975, "grad_norm": 3.901923894882202, "learning_rate": 9.84945487796325e-05, "loss": 2.3302146911621096, "memory(GiB)": 70.96, "step": 9145, "token_acc": 0.48148148148148145, "train_speed(iter/s)": 0.669701 }, { "epoch": 0.39201405252559873, "grad_norm": 4.9036383628845215, "learning_rate": 9.849290937471017e-05, "loss": 2.646915054321289, "memory(GiB)": 70.96, "step": 9150, "token_acc": 0.4397163120567376, "train_speed(iter/s)": 0.66979 }, { "epoch": 0.3922282678548477, "grad_norm": 3.0255484580993652, "learning_rate": 9.849126909129328e-05, "loss": 2.3957893371582033, "memory(GiB)": 70.96, "step": 9155, "token_acc": 0.4589041095890411, "train_speed(iter/s)": 0.66977 }, { "epoch": 0.39244248318409664, "grad_norm": 6.534266948699951, "learning_rate": 9.848962792941158e-05, "loss": 2.292491149902344, "memory(GiB)": 70.96, "step": 9160, "token_acc": 0.5367647058823529, "train_speed(iter/s)": 0.669879 }, { "epoch": 0.3926566985133456, "grad_norm": 3.996215343475342, "learning_rate": 9.848798588909478e-05, "loss": 2.3992282867431642, "memory(GiB)": 70.96, "step": 9165, "token_acc": 0.4407894736842105, "train_speed(iter/s)": 0.669934 }, { "epoch": 0.3928709138425946, "grad_norm": 4.259632587432861, "learning_rate": 9.848634297037261e-05, "loss": 2.7571319580078124, "memory(GiB)": 70.96, "step": 9170, "token_acc": 0.4152823920265781, "train_speed(iter/s)": 0.669984 }, { "epoch": 0.3930851291718435, "grad_norm": 3.6715610027313232, "learning_rate": 9.848469917327487e-05, "loss": 2.4442115783691407, "memory(GiB)": 70.96, "step": 9175, "token_acc": 0.48936170212765956, "train_speed(iter/s)": 0.669999 }, { "epoch": 0.3932993445010925, "grad_norm": 2.5918970108032227, "learning_rate": 9.848305449783132e-05, "loss": 2.4979719161987304, "memory(GiB)": 70.96, "step": 9180, "token_acc": 0.48348348348348347, "train_speed(iter/s)": 0.670056 }, { "epoch": 0.3935135598303415, "grad_norm": 2.72308087348938, "learning_rate": 9.848140894407176e-05, "loss": 2.362245559692383, "memory(GiB)": 70.96, "step": 9185, "token_acc": 0.4766355140186916, "train_speed(iter/s)": 0.670088 }, { "epoch": 0.3937277751595904, "grad_norm": 3.13216495513916, "learning_rate": 9.8479762512026e-05, "loss": 2.4450860977172852, "memory(GiB)": 70.96, "step": 9190, "token_acc": 0.504950495049505, "train_speed(iter/s)": 0.670084 }, { "epoch": 0.3939419904888394, "grad_norm": 3.0398004055023193, "learning_rate": 9.847811520172385e-05, "loss": 2.0587717056274415, "memory(GiB)": 70.96, "step": 9195, "token_acc": 0.5464684014869888, "train_speed(iter/s)": 0.670102 }, { "epoch": 0.39415620581808836, "grad_norm": 2.4632604122161865, "learning_rate": 9.847646701319519e-05, "loss": 2.5268728256225588, "memory(GiB)": 70.96, "step": 9200, "token_acc": 0.42990654205607476, "train_speed(iter/s)": 0.670138 }, { "epoch": 0.3943704211473373, "grad_norm": 3.3514020442962646, "learning_rate": 9.847481794646984e-05, "loss": 2.4244211196899412, "memory(GiB)": 70.96, "step": 9205, "token_acc": 0.48148148148148145, "train_speed(iter/s)": 0.670124 }, { "epoch": 0.39458463647658626, "grad_norm": 3.8796401023864746, "learning_rate": 9.847316800157769e-05, "loss": 2.591960906982422, "memory(GiB)": 70.96, "step": 9210, "token_acc": 0.45977011494252873, "train_speed(iter/s)": 0.670133 }, { "epoch": 0.39479885180583524, "grad_norm": 4.14222526550293, "learning_rate": 9.847151717854863e-05, "loss": 2.311723518371582, "memory(GiB)": 70.96, "step": 9215, "token_acc": 0.4873417721518987, "train_speed(iter/s)": 0.670159 }, { "epoch": 0.39501306713508416, "grad_norm": 2.944640636444092, "learning_rate": 9.846986547741256e-05, "loss": 2.192682647705078, "memory(GiB)": 70.96, "step": 9220, "token_acc": 0.5270758122743683, "train_speed(iter/s)": 0.670192 }, { "epoch": 0.39522728246433314, "grad_norm": 3.5746099948883057, "learning_rate": 9.846821289819943e-05, "loss": 2.486053466796875, "memory(GiB)": 70.96, "step": 9225, "token_acc": 0.4863013698630137, "train_speed(iter/s)": 0.670282 }, { "epoch": 0.3954414977935821, "grad_norm": 3.281283140182495, "learning_rate": 9.846655944093915e-05, "loss": 2.5794818878173826, "memory(GiB)": 70.96, "step": 9230, "token_acc": 0.4786885245901639, "train_speed(iter/s)": 0.670173 }, { "epoch": 0.39565571312283104, "grad_norm": 4.1005072593688965, "learning_rate": 9.846490510566167e-05, "loss": 2.227798843383789, "memory(GiB)": 70.96, "step": 9235, "token_acc": 0.4754601226993865, "train_speed(iter/s)": 0.670256 }, { "epoch": 0.39586992845208, "grad_norm": 3.7227025032043457, "learning_rate": 9.846324989239697e-05, "loss": 2.463117218017578, "memory(GiB)": 70.96, "step": 9240, "token_acc": 0.4966442953020134, "train_speed(iter/s)": 0.670346 }, { "epoch": 0.396084143781329, "grad_norm": 2.727323532104492, "learning_rate": 9.846159380117504e-05, "loss": 2.4035552978515624, "memory(GiB)": 70.96, "step": 9245, "token_acc": 0.5015576323987538, "train_speed(iter/s)": 0.670344 }, { "epoch": 0.396298359110578, "grad_norm": 3.3202311992645264, "learning_rate": 9.845993683202588e-05, "loss": 2.2299354553222654, "memory(GiB)": 70.96, "step": 9250, "token_acc": 0.4924812030075188, "train_speed(iter/s)": 0.670187 }, { "epoch": 0.3965125744398269, "grad_norm": 3.0986382961273193, "learning_rate": 9.84582789849795e-05, "loss": 2.1950145721435548, "memory(GiB)": 70.96, "step": 9255, "token_acc": 0.5298507462686567, "train_speed(iter/s)": 0.670197 }, { "epoch": 0.3967267897690759, "grad_norm": 5.583611488342285, "learning_rate": 9.845662026006595e-05, "loss": 2.399874687194824, "memory(GiB)": 70.96, "step": 9260, "token_acc": 0.4809384164222874, "train_speed(iter/s)": 0.67023 }, { "epoch": 0.39694100509832486, "grad_norm": 3.2464230060577393, "learning_rate": 9.845496065731526e-05, "loss": 2.0935001373291016, "memory(GiB)": 70.96, "step": 9265, "token_acc": 0.4888888888888889, "train_speed(iter/s)": 0.670186 }, { "epoch": 0.3971552204275738, "grad_norm": 3.7659859657287598, "learning_rate": 9.845330017675749e-05, "loss": 2.4113229751586913, "memory(GiB)": 70.96, "step": 9270, "token_acc": 0.4867924528301887, "train_speed(iter/s)": 0.670143 }, { "epoch": 0.39736943575682276, "grad_norm": 3.1539950370788574, "learning_rate": 9.845163881842276e-05, "loss": 2.3731632232666016, "memory(GiB)": 70.96, "step": 9275, "token_acc": 0.49019607843137253, "train_speed(iter/s)": 0.670192 }, { "epoch": 0.39758365108607174, "grad_norm": 3.39998459815979, "learning_rate": 9.844997658234112e-05, "loss": 2.3211475372314454, "memory(GiB)": 70.96, "step": 9280, "token_acc": 0.5286195286195287, "train_speed(iter/s)": 0.670163 }, { "epoch": 0.39779786641532067, "grad_norm": 3.692470073699951, "learning_rate": 9.844831346854271e-05, "loss": 2.3826221466064452, "memory(GiB)": 70.96, "step": 9285, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.670134 }, { "epoch": 0.39801208174456965, "grad_norm": 3.4480478763580322, "learning_rate": 9.844664947705766e-05, "loss": 2.517098617553711, "memory(GiB)": 70.96, "step": 9290, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.670197 }, { "epoch": 0.3982262970738186, "grad_norm": 4.278590202331543, "learning_rate": 9.844498460791611e-05, "loss": 2.1444042205810545, "memory(GiB)": 70.96, "step": 9295, "token_acc": 0.5252100840336135, "train_speed(iter/s)": 0.670156 }, { "epoch": 0.39844051240306755, "grad_norm": 3.075646162033081, "learning_rate": 9.844331886114821e-05, "loss": 2.6863376617431642, "memory(GiB)": 70.96, "step": 9300, "token_acc": 0.4394904458598726, "train_speed(iter/s)": 0.670134 }, { "epoch": 0.3986547277323165, "grad_norm": 3.977525472640991, "learning_rate": 9.844165223678414e-05, "loss": 2.6361820220947267, "memory(GiB)": 70.96, "step": 9305, "token_acc": 0.4950166112956811, "train_speed(iter/s)": 0.670162 }, { "epoch": 0.3988689430615655, "grad_norm": 3.515155553817749, "learning_rate": 9.843998473485412e-05, "loss": 2.5129642486572266, "memory(GiB)": 70.96, "step": 9310, "token_acc": 0.4828767123287671, "train_speed(iter/s)": 0.670157 }, { "epoch": 0.39908315839081443, "grad_norm": 4.198483943939209, "learning_rate": 9.843831635538832e-05, "loss": 2.4317535400390624, "memory(GiB)": 70.96, "step": 9315, "token_acc": 0.4773662551440329, "train_speed(iter/s)": 0.670146 }, { "epoch": 0.3992973737200634, "grad_norm": 3.5568878650665283, "learning_rate": 9.843664709841698e-05, "loss": 2.441328239440918, "memory(GiB)": 70.96, "step": 9320, "token_acc": 0.48615384615384616, "train_speed(iter/s)": 0.670151 }, { "epoch": 0.3995115890493124, "grad_norm": 3.3332793712615967, "learning_rate": 9.843497696397035e-05, "loss": 2.363911247253418, "memory(GiB)": 70.96, "step": 9325, "token_acc": 0.4899598393574297, "train_speed(iter/s)": 0.670134 }, { "epoch": 0.3997258043785613, "grad_norm": 3.0102357864379883, "learning_rate": 9.843330595207867e-05, "loss": 2.1540950775146483, "memory(GiB)": 70.96, "step": 9330, "token_acc": 0.5091575091575091, "train_speed(iter/s)": 0.670127 }, { "epoch": 0.3999400197078103, "grad_norm": 5.879663467407227, "learning_rate": 9.843163406277221e-05, "loss": 2.3470737457275392, "memory(GiB)": 70.96, "step": 9335, "token_acc": 0.4583333333333333, "train_speed(iter/s)": 0.670118 }, { "epoch": 0.40015423503705927, "grad_norm": 3.347581386566162, "learning_rate": 9.842996129608129e-05, "loss": 2.6645084381103517, "memory(GiB)": 70.96, "step": 9340, "token_acc": 0.4537313432835821, "train_speed(iter/s)": 0.670069 }, { "epoch": 0.4003684503663082, "grad_norm": 3.8423662185668945, "learning_rate": 9.842828765203617e-05, "loss": 2.398000717163086, "memory(GiB)": 70.96, "step": 9345, "token_acc": 0.48091603053435117, "train_speed(iter/s)": 0.669987 }, { "epoch": 0.40058266569555717, "grad_norm": 2.949366807937622, "learning_rate": 9.842661313066719e-05, "loss": 2.5471607208251954, "memory(GiB)": 70.96, "step": 9350, "token_acc": 0.48125, "train_speed(iter/s)": 0.669979 }, { "epoch": 0.40079688102480615, "grad_norm": 3.4353435039520264, "learning_rate": 9.84249377320047e-05, "loss": 2.196564865112305, "memory(GiB)": 70.96, "step": 9355, "token_acc": 0.5113636363636364, "train_speed(iter/s)": 0.67005 }, { "epoch": 0.4010110963540551, "grad_norm": 4.258780479431152, "learning_rate": 9.842326145607903e-05, "loss": 2.3884925842285156, "memory(GiB)": 70.96, "step": 9360, "token_acc": 0.5202952029520295, "train_speed(iter/s)": 0.670024 }, { "epoch": 0.40122531168330405, "grad_norm": 2.9348130226135254, "learning_rate": 9.842158430292054e-05, "loss": 2.510984420776367, "memory(GiB)": 70.96, "step": 9365, "token_acc": 0.484375, "train_speed(iter/s)": 0.670025 }, { "epoch": 0.40143952701255303, "grad_norm": 3.068787097930908, "learning_rate": 9.841990627255964e-05, "loss": 2.3204761505126954, "memory(GiB)": 70.96, "step": 9370, "token_acc": 0.5047318611987381, "train_speed(iter/s)": 0.669953 }, { "epoch": 0.40165374234180196, "grad_norm": 3.1317009925842285, "learning_rate": 9.841822736502671e-05, "loss": 2.7769390106201173, "memory(GiB)": 70.96, "step": 9375, "token_acc": 0.4425087108013937, "train_speed(iter/s)": 0.670014 }, { "epoch": 0.40186795767105093, "grad_norm": 4.796217918395996, "learning_rate": 9.841654758035218e-05, "loss": 2.4983354568481446, "memory(GiB)": 70.96, "step": 9380, "token_acc": 0.4485049833887043, "train_speed(iter/s)": 0.669954 }, { "epoch": 0.4020821730002999, "grad_norm": 4.337300777435303, "learning_rate": 9.841486691856647e-05, "loss": 2.3120628356933595, "memory(GiB)": 70.96, "step": 9385, "token_acc": 0.5672268907563025, "train_speed(iter/s)": 0.669877 }, { "epoch": 0.40229638832954884, "grad_norm": 3.1434738636016846, "learning_rate": 9.841318537970003e-05, "loss": 2.452569007873535, "memory(GiB)": 70.96, "step": 9390, "token_acc": 0.4964788732394366, "train_speed(iter/s)": 0.669955 }, { "epoch": 0.4025106036587978, "grad_norm": 3.4035141468048096, "learning_rate": 9.841150296378332e-05, "loss": 2.379144477844238, "memory(GiB)": 70.96, "step": 9395, "token_acc": 0.4794007490636704, "train_speed(iter/s)": 0.669943 }, { "epoch": 0.4027248189880468, "grad_norm": 3.245485782623291, "learning_rate": 9.840981967084682e-05, "loss": 2.394492530822754, "memory(GiB)": 70.96, "step": 9400, "token_acc": 0.5049180327868853, "train_speed(iter/s)": 0.669914 }, { "epoch": 0.4029390343172957, "grad_norm": 3.2188515663146973, "learning_rate": 9.840813550092101e-05, "loss": 2.540700149536133, "memory(GiB)": 70.96, "step": 9405, "token_acc": 0.4697508896797153, "train_speed(iter/s)": 0.66994 }, { "epoch": 0.4031532496465447, "grad_norm": 3.3072807788848877, "learning_rate": 9.840645045403644e-05, "loss": 2.2159690856933594, "memory(GiB)": 70.96, "step": 9410, "token_acc": 0.5433962264150943, "train_speed(iter/s)": 0.669977 }, { "epoch": 0.4033674649757937, "grad_norm": 3.5079829692840576, "learning_rate": 9.840476453022361e-05, "loss": 2.3363153457641603, "memory(GiB)": 70.96, "step": 9415, "token_acc": 0.48606811145510836, "train_speed(iter/s)": 0.669986 }, { "epoch": 0.40358168030504266, "grad_norm": 2.7949886322021484, "learning_rate": 9.840307772951304e-05, "loss": 2.275368309020996, "memory(GiB)": 70.96, "step": 9420, "token_acc": 0.5017182130584192, "train_speed(iter/s)": 0.669908 }, { "epoch": 0.4037958956342916, "grad_norm": 3.4426748752593994, "learning_rate": 9.840139005193531e-05, "loss": 2.314261627197266, "memory(GiB)": 70.96, "step": 9425, "token_acc": 0.5253164556962026, "train_speed(iter/s)": 0.669878 }, { "epoch": 0.40401011096354056, "grad_norm": 2.804164171218872, "learning_rate": 9.839970149752102e-05, "loss": 2.3219329833984377, "memory(GiB)": 70.96, "step": 9430, "token_acc": 0.5089605734767025, "train_speed(iter/s)": 0.669891 }, { "epoch": 0.40422432629278954, "grad_norm": 3.6822149753570557, "learning_rate": 9.839801206630073e-05, "loss": 2.4183563232421874, "memory(GiB)": 70.96, "step": 9435, "token_acc": 0.4626865671641791, "train_speed(iter/s)": 0.669872 }, { "epoch": 0.40443854162203846, "grad_norm": 4.216433048248291, "learning_rate": 9.839632175830504e-05, "loss": 2.4957557678222657, "memory(GiB)": 70.96, "step": 9440, "token_acc": 0.483271375464684, "train_speed(iter/s)": 0.669916 }, { "epoch": 0.40465275695128744, "grad_norm": 2.3051204681396484, "learning_rate": 9.839463057356459e-05, "loss": 2.2006675720214846, "memory(GiB)": 70.96, "step": 9445, "token_acc": 0.49712643678160917, "train_speed(iter/s)": 0.669873 }, { "epoch": 0.4048669722805364, "grad_norm": 3.9550886154174805, "learning_rate": 9.839293851211e-05, "loss": 2.5844322204589845, "memory(GiB)": 70.96, "step": 9450, "token_acc": 0.4478114478114478, "train_speed(iter/s)": 0.669902 }, { "epoch": 0.40508118760978534, "grad_norm": 4.1418890953063965, "learning_rate": 9.839124557397195e-05, "loss": 2.3645256042480467, "memory(GiB)": 70.96, "step": 9455, "token_acc": 0.4880952380952381, "train_speed(iter/s)": 0.669972 }, { "epoch": 0.4052954029390343, "grad_norm": 3.7454280853271484, "learning_rate": 9.838955175918108e-05, "loss": 2.387799072265625, "memory(GiB)": 70.96, "step": 9460, "token_acc": 0.48863636363636365, "train_speed(iter/s)": 0.670005 }, { "epoch": 0.4055096182682833, "grad_norm": 3.042099952697754, "learning_rate": 9.838785706776808e-05, "loss": 2.3956367492675783, "memory(GiB)": 70.96, "step": 9465, "token_acc": 0.4966887417218543, "train_speed(iter/s)": 0.669991 }, { "epoch": 0.4057238335975322, "grad_norm": 4.755323886871338, "learning_rate": 9.838616149976367e-05, "loss": 2.4374732971191406, "memory(GiB)": 70.96, "step": 9470, "token_acc": 0.4807692307692308, "train_speed(iter/s)": 0.670016 }, { "epoch": 0.4059380489267812, "grad_norm": 3.157120943069458, "learning_rate": 9.838446505519853e-05, "loss": 2.4731801986694335, "memory(GiB)": 70.96, "step": 9475, "token_acc": 0.46686746987951805, "train_speed(iter/s)": 0.670018 }, { "epoch": 0.4061522642560302, "grad_norm": 3.6192803382873535, "learning_rate": 9.838276773410344e-05, "loss": 2.501349449157715, "memory(GiB)": 70.96, "step": 9480, "token_acc": 0.4261744966442953, "train_speed(iter/s)": 0.670045 }, { "epoch": 0.4063664795852791, "grad_norm": 3.790822982788086, "learning_rate": 9.838106953650912e-05, "loss": 2.5028156280517577, "memory(GiB)": 70.96, "step": 9485, "token_acc": 0.47368421052631576, "train_speed(iter/s)": 0.670105 }, { "epoch": 0.4065806949145281, "grad_norm": 3.5197300910949707, "learning_rate": 9.837937046244635e-05, "loss": 2.2201587677001955, "memory(GiB)": 70.96, "step": 9490, "token_acc": 0.4847328244274809, "train_speed(iter/s)": 0.670131 }, { "epoch": 0.40679491024377706, "grad_norm": 3.2107949256896973, "learning_rate": 9.837767051194589e-05, "loss": 2.4222074508666993, "memory(GiB)": 70.96, "step": 9495, "token_acc": 0.4529616724738676, "train_speed(iter/s)": 0.670115 }, { "epoch": 0.407009125573026, "grad_norm": 2.5821540355682373, "learning_rate": 9.837596968503854e-05, "loss": 2.325736427307129, "memory(GiB)": 70.96, "step": 9500, "token_acc": 0.5135135135135135, "train_speed(iter/s)": 0.67008 }, { "epoch": 0.407009125573026, "eval_loss": 2.1936752796173096, "eval_runtime": 17.1614, "eval_samples_per_second": 5.827, "eval_steps_per_second": 5.827, "eval_token_acc": 0.48880597014925375, "step": 9500 }, { "epoch": 0.40722334090227497, "grad_norm": 3.6913692951202393, "learning_rate": 9.837426798175514e-05, "loss": 2.5483219146728517, "memory(GiB)": 70.96, "step": 9505, "token_acc": 0.4815481548154815, "train_speed(iter/s)": 0.66913 }, { "epoch": 0.40743755623152395, "grad_norm": 4.175588607788086, "learning_rate": 9.83725654021265e-05, "loss": 2.6253520965576174, "memory(GiB)": 70.96, "step": 9510, "token_acc": 0.4870848708487085, "train_speed(iter/s)": 0.669131 }, { "epoch": 0.40765177156077287, "grad_norm": 3.9089949131011963, "learning_rate": 9.837086194618344e-05, "loss": 2.552115249633789, "memory(GiB)": 70.96, "step": 9515, "token_acc": 0.47335423197492166, "train_speed(iter/s)": 0.669009 }, { "epoch": 0.40786598689002185, "grad_norm": 3.885526180267334, "learning_rate": 9.836915761395685e-05, "loss": 2.3022476196289063, "memory(GiB)": 70.96, "step": 9520, "token_acc": 0.4909090909090909, "train_speed(iter/s)": 0.668947 }, { "epoch": 0.4080802022192708, "grad_norm": 3.3557472229003906, "learning_rate": 9.836745240547758e-05, "loss": 2.3124919891357423, "memory(GiB)": 70.96, "step": 9525, "token_acc": 0.43824701195219123, "train_speed(iter/s)": 0.668973 }, { "epoch": 0.40829441754851975, "grad_norm": 3.1050713062286377, "learning_rate": 9.836574632077655e-05, "loss": 2.235109329223633, "memory(GiB)": 70.96, "step": 9530, "token_acc": 0.5305343511450382, "train_speed(iter/s)": 0.66892 }, { "epoch": 0.40850863287776873, "grad_norm": 3.2920374870300293, "learning_rate": 9.836403935988465e-05, "loss": 2.6214458465576174, "memory(GiB)": 70.96, "step": 9535, "token_acc": 0.4440894568690096, "train_speed(iter/s)": 0.668942 }, { "epoch": 0.4087228482070177, "grad_norm": 3.3590948581695557, "learning_rate": 9.836233152283282e-05, "loss": 2.438225746154785, "memory(GiB)": 70.96, "step": 9540, "token_acc": 0.4788273615635179, "train_speed(iter/s)": 0.668905 }, { "epoch": 0.40893706353626663, "grad_norm": 3.5652427673339844, "learning_rate": 9.836062280965197e-05, "loss": 2.5664674758911135, "memory(GiB)": 70.96, "step": 9545, "token_acc": 0.43853820598006643, "train_speed(iter/s)": 0.668869 }, { "epoch": 0.4091512788655156, "grad_norm": 3.6805777549743652, "learning_rate": 9.835891322037308e-05, "loss": 2.4403993606567385, "memory(GiB)": 70.96, "step": 9550, "token_acc": 0.5, "train_speed(iter/s)": 0.668891 }, { "epoch": 0.4093654941947646, "grad_norm": 2.8743674755096436, "learning_rate": 9.835720275502712e-05, "loss": 2.4508209228515625, "memory(GiB)": 70.96, "step": 9555, "token_acc": 0.4520123839009288, "train_speed(iter/s)": 0.668952 }, { "epoch": 0.4095797095240135, "grad_norm": 3.117770195007324, "learning_rate": 9.835549141364506e-05, "loss": 2.5060348510742188, "memory(GiB)": 70.96, "step": 9560, "token_acc": 0.4850498338870432, "train_speed(iter/s)": 0.668994 }, { "epoch": 0.4097939248532625, "grad_norm": 3.08426833152771, "learning_rate": 9.835377919625792e-05, "loss": 2.618630027770996, "memory(GiB)": 70.96, "step": 9565, "token_acc": 0.43462897526501765, "train_speed(iter/s)": 0.668914 }, { "epoch": 0.41000814018251147, "grad_norm": 3.2033205032348633, "learning_rate": 9.83520661028967e-05, "loss": 2.3279325485229494, "memory(GiB)": 70.96, "step": 9570, "token_acc": 0.5016722408026756, "train_speed(iter/s)": 0.668943 }, { "epoch": 0.4102223555117604, "grad_norm": 3.3103368282318115, "learning_rate": 9.835035213359245e-05, "loss": 2.460597801208496, "memory(GiB)": 70.96, "step": 9575, "token_acc": 0.4790996784565916, "train_speed(iter/s)": 0.66896 }, { "epoch": 0.4104365708410094, "grad_norm": 3.2215206623077393, "learning_rate": 9.834863728837622e-05, "loss": 2.3461532592773438, "memory(GiB)": 70.96, "step": 9580, "token_acc": 0.5292096219931272, "train_speed(iter/s)": 0.668969 }, { "epoch": 0.41065078617025835, "grad_norm": 3.259474515914917, "learning_rate": 9.834692156727906e-05, "loss": 2.3624431610107424, "memory(GiB)": 70.96, "step": 9585, "token_acc": 0.540785498489426, "train_speed(iter/s)": 0.669028 }, { "epoch": 0.41086500149950733, "grad_norm": 2.8796722888946533, "learning_rate": 9.834520497033206e-05, "loss": 2.266351890563965, "memory(GiB)": 70.96, "step": 9590, "token_acc": 0.5, "train_speed(iter/s)": 0.669018 }, { "epoch": 0.41107921682875626, "grad_norm": 3.185962677001953, "learning_rate": 9.834348749756634e-05, "loss": 2.4413402557373045, "memory(GiB)": 70.96, "step": 9595, "token_acc": 0.46579804560260585, "train_speed(iter/s)": 0.669046 }, { "epoch": 0.41129343215800523, "grad_norm": 3.317958354949951, "learning_rate": 9.834176914901298e-05, "loss": 2.290129280090332, "memory(GiB)": 70.96, "step": 9600, "token_acc": 0.5222222222222223, "train_speed(iter/s)": 0.669025 }, { "epoch": 0.4115076474872542, "grad_norm": 3.248084783554077, "learning_rate": 9.834004992470312e-05, "loss": 2.2721799850463866, "memory(GiB)": 70.96, "step": 9605, "token_acc": 0.4906832298136646, "train_speed(iter/s)": 0.669043 }, { "epoch": 0.41172186281650314, "grad_norm": 3.576434373855591, "learning_rate": 9.833832982466792e-05, "loss": 2.500484275817871, "memory(GiB)": 70.96, "step": 9610, "token_acc": 0.49838187702265374, "train_speed(iter/s)": 0.66907 }, { "epoch": 0.4119360781457521, "grad_norm": 2.9177169799804688, "learning_rate": 9.833660884893853e-05, "loss": 2.610107421875, "memory(GiB)": 70.96, "step": 9615, "token_acc": 0.44966442953020136, "train_speed(iter/s)": 0.669038 }, { "epoch": 0.4121502934750011, "grad_norm": 5.019582271575928, "learning_rate": 9.833488699754612e-05, "loss": 2.151416778564453, "memory(GiB)": 70.96, "step": 9620, "token_acc": 0.5133079847908745, "train_speed(iter/s)": 0.669055 }, { "epoch": 0.41236450880425, "grad_norm": 4.384885787963867, "learning_rate": 9.833316427052189e-05, "loss": 2.4538516998291016, "memory(GiB)": 70.96, "step": 9625, "token_acc": 0.5059760956175299, "train_speed(iter/s)": 0.668967 }, { "epoch": 0.412578724133499, "grad_norm": 3.171250581741333, "learning_rate": 9.833144066789706e-05, "loss": 2.32763786315918, "memory(GiB)": 70.96, "step": 9630, "token_acc": 0.46557377049180326, "train_speed(iter/s)": 0.669044 }, { "epoch": 0.412792939462748, "grad_norm": 3.407620429992676, "learning_rate": 9.832971618970284e-05, "loss": 2.0717119216918944, "memory(GiB)": 70.96, "step": 9635, "token_acc": 0.550185873605948, "train_speed(iter/s)": 0.669125 }, { "epoch": 0.4130071547919969, "grad_norm": 3.5942587852478027, "learning_rate": 9.832799083597046e-05, "loss": 2.403942680358887, "memory(GiB)": 70.96, "step": 9640, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.669172 }, { "epoch": 0.4132213701212459, "grad_norm": 3.8447418212890625, "learning_rate": 9.83262646067312e-05, "loss": 2.5205032348632814, "memory(GiB)": 70.96, "step": 9645, "token_acc": 0.4751131221719457, "train_speed(iter/s)": 0.669226 }, { "epoch": 0.41343558545049486, "grad_norm": 3.7392115592956543, "learning_rate": 9.832453750201633e-05, "loss": 2.601795196533203, "memory(GiB)": 70.96, "step": 9650, "token_acc": 0.45517241379310347, "train_speed(iter/s)": 0.66929 }, { "epoch": 0.4136498007797438, "grad_norm": 3.2309932708740234, "learning_rate": 9.832280952185711e-05, "loss": 2.330088806152344, "memory(GiB)": 70.96, "step": 9655, "token_acc": 0.4827586206896552, "train_speed(iter/s)": 0.669381 }, { "epoch": 0.41386401610899276, "grad_norm": 3.5177197456359863, "learning_rate": 9.832108066628489e-05, "loss": 2.65786190032959, "memory(GiB)": 70.96, "step": 9660, "token_acc": 0.4603174603174603, "train_speed(iter/s)": 0.66939 }, { "epoch": 0.41407823143824174, "grad_norm": 3.1408402919769287, "learning_rate": 9.831935093533096e-05, "loss": 2.6233894348144533, "memory(GiB)": 70.96, "step": 9665, "token_acc": 0.45723684210526316, "train_speed(iter/s)": 0.669368 }, { "epoch": 0.41429244676749066, "grad_norm": 3.259899377822876, "learning_rate": 9.831762032902665e-05, "loss": 2.4050518035888673, "memory(GiB)": 70.96, "step": 9670, "token_acc": 0.45425867507886436, "train_speed(iter/s)": 0.66942 }, { "epoch": 0.41450666209673964, "grad_norm": 3.3889524936676025, "learning_rate": 9.831588884740332e-05, "loss": 2.3198551177978515, "memory(GiB)": 70.96, "step": 9675, "token_acc": 0.49206349206349204, "train_speed(iter/s)": 0.669338 }, { "epoch": 0.4147208774259886, "grad_norm": 3.392216682434082, "learning_rate": 9.831415649049237e-05, "loss": 2.3732032775878906, "memory(GiB)": 70.96, "step": 9680, "token_acc": 0.48188405797101447, "train_speed(iter/s)": 0.669381 }, { "epoch": 0.41493509275523754, "grad_norm": 2.9937593936920166, "learning_rate": 9.83124232583251e-05, "loss": 2.1655139923095703, "memory(GiB)": 70.96, "step": 9685, "token_acc": 0.5625, "train_speed(iter/s)": 0.6693 }, { "epoch": 0.4151493080844865, "grad_norm": 3.6588802337646484, "learning_rate": 9.8310689150933e-05, "loss": 2.380560874938965, "memory(GiB)": 70.96, "step": 9690, "token_acc": 0.5351170568561873, "train_speed(iter/s)": 0.669308 }, { "epoch": 0.4153635234137355, "grad_norm": 3.5199170112609863, "learning_rate": 9.830895416834745e-05, "loss": 2.474161148071289, "memory(GiB)": 70.96, "step": 9695, "token_acc": 0.44947735191637633, "train_speed(iter/s)": 0.669374 }, { "epoch": 0.4155777387429844, "grad_norm": 3.2991745471954346, "learning_rate": 9.830721831059986e-05, "loss": 2.7805130004882814, "memory(GiB)": 70.96, "step": 9700, "token_acc": 0.42902208201892744, "train_speed(iter/s)": 0.669398 }, { "epoch": 0.4157919540722334, "grad_norm": 3.3807597160339355, "learning_rate": 9.83054815777217e-05, "loss": 2.5031482696533205, "memory(GiB)": 70.96, "step": 9705, "token_acc": 0.4728434504792332, "train_speed(iter/s)": 0.669355 }, { "epoch": 0.4160061694014824, "grad_norm": 3.5021812915802, "learning_rate": 9.830374396974442e-05, "loss": 2.53304443359375, "memory(GiB)": 70.96, "step": 9710, "token_acc": 0.45294117647058824, "train_speed(iter/s)": 0.669356 }, { "epoch": 0.4162203847307313, "grad_norm": 2.8501946926116943, "learning_rate": 9.830200548669952e-05, "loss": 2.387770652770996, "memory(GiB)": 70.96, "step": 9715, "token_acc": 0.4635036496350365, "train_speed(iter/s)": 0.669313 }, { "epoch": 0.4164346000599803, "grad_norm": 3.041288375854492, "learning_rate": 9.830026612861847e-05, "loss": 2.407380485534668, "memory(GiB)": 70.96, "step": 9720, "token_acc": 0.49853372434017595, "train_speed(iter/s)": 0.669322 }, { "epoch": 0.41664881538922927, "grad_norm": 3.7580134868621826, "learning_rate": 9.829852589553278e-05, "loss": 2.7045467376708983, "memory(GiB)": 70.96, "step": 9725, "token_acc": 0.49085365853658536, "train_speed(iter/s)": 0.669262 }, { "epoch": 0.4168630307184782, "grad_norm": 3.7203164100646973, "learning_rate": 9.829678478747402e-05, "loss": 2.626369667053223, "memory(GiB)": 70.96, "step": 9730, "token_acc": 0.4601226993865031, "train_speed(iter/s)": 0.669352 }, { "epoch": 0.41707724604772717, "grad_norm": 3.307079792022705, "learning_rate": 9.829504280447366e-05, "loss": 2.351698875427246, "memory(GiB)": 70.96, "step": 9735, "token_acc": 0.503448275862069, "train_speed(iter/s)": 0.669375 }, { "epoch": 0.41729146137697615, "grad_norm": 3.8782505989074707, "learning_rate": 9.829329994656332e-05, "loss": 2.092409133911133, "memory(GiB)": 70.96, "step": 9740, "token_acc": 0.5075187969924813, "train_speed(iter/s)": 0.669447 }, { "epoch": 0.41750567670622507, "grad_norm": 3.755429744720459, "learning_rate": 9.829155621377455e-05, "loss": 2.115167236328125, "memory(GiB)": 70.96, "step": 9745, "token_acc": 0.5487012987012987, "train_speed(iter/s)": 0.6695 }, { "epoch": 0.41771989203547405, "grad_norm": 3.3256399631500244, "learning_rate": 9.828981160613892e-05, "loss": 2.2228845596313476, "memory(GiB)": 70.96, "step": 9750, "token_acc": 0.5483870967741935, "train_speed(iter/s)": 0.669567 }, { "epoch": 0.41793410736472303, "grad_norm": 4.952759265899658, "learning_rate": 9.828806612368806e-05, "loss": 2.524056816101074, "memory(GiB)": 70.96, "step": 9755, "token_acc": 0.5084033613445378, "train_speed(iter/s)": 0.669604 }, { "epoch": 0.418148322693972, "grad_norm": 4.147083282470703, "learning_rate": 9.828631976645359e-05, "loss": 2.3649175643920897, "memory(GiB)": 70.96, "step": 9760, "token_acc": 0.45925925925925926, "train_speed(iter/s)": 0.669563 }, { "epoch": 0.41836253802322093, "grad_norm": 3.320779800415039, "learning_rate": 9.828457253446714e-05, "loss": 2.5686115264892577, "memory(GiB)": 70.96, "step": 9765, "token_acc": 0.4573170731707317, "train_speed(iter/s)": 0.669445 }, { "epoch": 0.4185767533524699, "grad_norm": 3.840336799621582, "learning_rate": 9.828282442776036e-05, "loss": 2.437259864807129, "memory(GiB)": 70.96, "step": 9770, "token_acc": 0.5037593984962406, "train_speed(iter/s)": 0.669425 }, { "epoch": 0.4187909686817189, "grad_norm": 3.1854681968688965, "learning_rate": 9.828107544636491e-05, "loss": 2.393703269958496, "memory(GiB)": 70.96, "step": 9775, "token_acc": 0.5098039215686274, "train_speed(iter/s)": 0.669476 }, { "epoch": 0.4190051840109678, "grad_norm": 3.3214805126190186, "learning_rate": 9.82793255903125e-05, "loss": 2.2702632904052735, "memory(GiB)": 70.96, "step": 9780, "token_acc": 0.5070921985815603, "train_speed(iter/s)": 0.669394 }, { "epoch": 0.4192193993402168, "grad_norm": 3.2827773094177246, "learning_rate": 9.827757485963482e-05, "loss": 2.4125627517700194, "memory(GiB)": 70.96, "step": 9785, "token_acc": 0.4930555555555556, "train_speed(iter/s)": 0.669252 }, { "epoch": 0.41943361466946577, "grad_norm": 3.102537155151367, "learning_rate": 9.827582325436358e-05, "loss": 2.203380584716797, "memory(GiB)": 70.96, "step": 9790, "token_acc": 0.5241635687732342, "train_speed(iter/s)": 0.669248 }, { "epoch": 0.4196478299987147, "grad_norm": 3.352046251296997, "learning_rate": 9.827407077453052e-05, "loss": 2.5030216217041015, "memory(GiB)": 70.96, "step": 9795, "token_acc": 0.463768115942029, "train_speed(iter/s)": 0.669243 }, { "epoch": 0.4198620453279637, "grad_norm": 4.78767204284668, "learning_rate": 9.827231742016738e-05, "loss": 2.6636219024658203, "memory(GiB)": 70.96, "step": 9800, "token_acc": 0.4444444444444444, "train_speed(iter/s)": 0.669235 }, { "epoch": 0.42007626065721265, "grad_norm": 2.6865532398223877, "learning_rate": 9.827056319130592e-05, "loss": 2.156420135498047, "memory(GiB)": 70.96, "step": 9805, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.669248 }, { "epoch": 0.4202904759864616, "grad_norm": 3.583724021911621, "learning_rate": 9.826880808797794e-05, "loss": 2.2697629928588867, "memory(GiB)": 70.96, "step": 9810, "token_acc": 0.515748031496063, "train_speed(iter/s)": 0.669281 }, { "epoch": 0.42050469131571055, "grad_norm": 3.3312065601348877, "learning_rate": 9.82670521102152e-05, "loss": 2.297399139404297, "memory(GiB)": 70.96, "step": 9815, "token_acc": 0.4946236559139785, "train_speed(iter/s)": 0.669339 }, { "epoch": 0.42071890664495953, "grad_norm": 3.4038803577423096, "learning_rate": 9.826529525804956e-05, "loss": 2.315891647338867, "memory(GiB)": 70.96, "step": 9820, "token_acc": 0.45918367346938777, "train_speed(iter/s)": 0.669424 }, { "epoch": 0.42093312197420846, "grad_norm": 4.156130790710449, "learning_rate": 9.826353753151281e-05, "loss": 2.526551055908203, "memory(GiB)": 70.96, "step": 9825, "token_acc": 0.45652173913043476, "train_speed(iter/s)": 0.669458 }, { "epoch": 0.42114733730345744, "grad_norm": 3.8361265659332275, "learning_rate": 9.826177893063679e-05, "loss": 2.511208152770996, "memory(GiB)": 70.96, "step": 9830, "token_acc": 0.4624624624624625, "train_speed(iter/s)": 0.66949 }, { "epoch": 0.4213615526327064, "grad_norm": 3.0611250400543213, "learning_rate": 9.826001945545339e-05, "loss": 2.9104755401611326, "memory(GiB)": 70.96, "step": 9835, "token_acc": 0.4276923076923077, "train_speed(iter/s)": 0.669505 }, { "epoch": 0.42157576796195534, "grad_norm": 3.70986008644104, "learning_rate": 9.825825910599445e-05, "loss": 2.320789909362793, "memory(GiB)": 70.96, "step": 9840, "token_acc": 0.5018181818181818, "train_speed(iter/s)": 0.669543 }, { "epoch": 0.4217899832912043, "grad_norm": 4.210616111755371, "learning_rate": 9.825649788229189e-05, "loss": 2.1509571075439453, "memory(GiB)": 70.96, "step": 9845, "token_acc": 0.5335570469798657, "train_speed(iter/s)": 0.669513 }, { "epoch": 0.4220041986204533, "grad_norm": 3.294579029083252, "learning_rate": 9.82547357843776e-05, "loss": 2.269580841064453, "memory(GiB)": 70.96, "step": 9850, "token_acc": 0.5406360424028268, "train_speed(iter/s)": 0.669558 }, { "epoch": 0.4222184139497022, "grad_norm": 3.2648141384124756, "learning_rate": 9.82529728122835e-05, "loss": 2.5068626403808594, "memory(GiB)": 70.96, "step": 9855, "token_acc": 0.4511784511784512, "train_speed(iter/s)": 0.669556 }, { "epoch": 0.4224326292789512, "grad_norm": 3.1119613647460938, "learning_rate": 9.825120896604152e-05, "loss": 2.3195175170898437, "memory(GiB)": 70.96, "step": 9860, "token_acc": 0.49122807017543857, "train_speed(iter/s)": 0.669529 }, { "epoch": 0.4226468446082002, "grad_norm": 3.7663543224334717, "learning_rate": 9.824944424568366e-05, "loss": 2.6084869384765623, "memory(GiB)": 70.96, "step": 9865, "token_acc": 0.4520547945205479, "train_speed(iter/s)": 0.66958 }, { "epoch": 0.4228610599374491, "grad_norm": 2.7600269317626953, "learning_rate": 9.824767865124182e-05, "loss": 2.4612985610961915, "memory(GiB)": 70.96, "step": 9870, "token_acc": 0.48638132295719844, "train_speed(iter/s)": 0.669585 }, { "epoch": 0.4230752752666981, "grad_norm": 3.039036512374878, "learning_rate": 9.824591218274803e-05, "loss": 2.570370674133301, "memory(GiB)": 70.96, "step": 9875, "token_acc": 0.4868421052631579, "train_speed(iter/s)": 0.669574 }, { "epoch": 0.42328949059594706, "grad_norm": 2.9275667667388916, "learning_rate": 9.824414484023429e-05, "loss": 2.739590072631836, "memory(GiB)": 70.96, "step": 9880, "token_acc": 0.42894736842105263, "train_speed(iter/s)": 0.669524 }, { "epoch": 0.423503705925196, "grad_norm": 3.287209987640381, "learning_rate": 9.82423766237326e-05, "loss": 2.3812797546386717, "memory(GiB)": 70.96, "step": 9885, "token_acc": 0.47843137254901963, "train_speed(iter/s)": 0.66957 }, { "epoch": 0.42371792125444496, "grad_norm": 2.9857449531555176, "learning_rate": 9.824060753327503e-05, "loss": 2.6109731674194334, "memory(GiB)": 70.96, "step": 9890, "token_acc": 0.4506172839506173, "train_speed(iter/s)": 0.669641 }, { "epoch": 0.42393213658369394, "grad_norm": 2.7316277027130127, "learning_rate": 9.823883756889359e-05, "loss": 2.4499528884887694, "memory(GiB)": 70.96, "step": 9895, "token_acc": 0.4658385093167702, "train_speed(iter/s)": 0.669664 }, { "epoch": 0.42414635191294286, "grad_norm": 2.6538009643554688, "learning_rate": 9.823706673062034e-05, "loss": 2.4069557189941406, "memory(GiB)": 70.96, "step": 9900, "token_acc": 0.46779661016949153, "train_speed(iter/s)": 0.669718 }, { "epoch": 0.42436056724219184, "grad_norm": 3.9792914390563965, "learning_rate": 9.823529501848738e-05, "loss": 2.3269323348999023, "memory(GiB)": 70.96, "step": 9905, "token_acc": 0.4931972789115646, "train_speed(iter/s)": 0.669749 }, { "epoch": 0.4245747825714408, "grad_norm": 3.4364631175994873, "learning_rate": 9.823352243252681e-05, "loss": 2.2729866027832033, "memory(GiB)": 70.96, "step": 9910, "token_acc": 0.51, "train_speed(iter/s)": 0.66978 }, { "epoch": 0.42478899790068975, "grad_norm": 3.4183928966522217, "learning_rate": 9.823174897277073e-05, "loss": 2.502919006347656, "memory(GiB)": 70.96, "step": 9915, "token_acc": 0.4438040345821326, "train_speed(iter/s)": 0.669841 }, { "epoch": 0.4250032132299387, "grad_norm": 2.9404749870300293, "learning_rate": 9.822997463925127e-05, "loss": 2.6388212203979493, "memory(GiB)": 70.96, "step": 9920, "token_acc": 0.5050847457627119, "train_speed(iter/s)": 0.669848 }, { "epoch": 0.4252174285591877, "grad_norm": 3.3003897666931152, "learning_rate": 9.822819943200059e-05, "loss": 2.497814750671387, "memory(GiB)": 70.96, "step": 9925, "token_acc": 0.48172757475083056, "train_speed(iter/s)": 0.669831 }, { "epoch": 0.4254316438884367, "grad_norm": 3.49035382270813, "learning_rate": 9.822642335105082e-05, "loss": 2.3132457733154297, "memory(GiB)": 70.96, "step": 9930, "token_acc": 0.5175718849840255, "train_speed(iter/s)": 0.669683 }, { "epoch": 0.4256458592176856, "grad_norm": 2.579679012298584, "learning_rate": 9.822464639643417e-05, "loss": 2.851132392883301, "memory(GiB)": 70.96, "step": 9935, "token_acc": 0.44696969696969696, "train_speed(iter/s)": 0.6697 }, { "epoch": 0.4258600745469346, "grad_norm": 4.032270908355713, "learning_rate": 9.822286856818279e-05, "loss": 2.3242382049560546, "memory(GiB)": 70.96, "step": 9940, "token_acc": 0.5120967741935484, "train_speed(iter/s)": 0.669733 }, { "epoch": 0.42607428987618357, "grad_norm": 4.147198677062988, "learning_rate": 9.822108986632892e-05, "loss": 2.322846603393555, "memory(GiB)": 70.96, "step": 9945, "token_acc": 0.4594594594594595, "train_speed(iter/s)": 0.669783 }, { "epoch": 0.4262885052054325, "grad_norm": 5.130254745483398, "learning_rate": 9.821931029090476e-05, "loss": 2.485670280456543, "memory(GiB)": 70.96, "step": 9950, "token_acc": 0.46710526315789475, "train_speed(iter/s)": 0.66979 }, { "epoch": 0.42650272053468147, "grad_norm": 3.4683587551116943, "learning_rate": 9.821752984194256e-05, "loss": 2.1967327117919924, "memory(GiB)": 70.96, "step": 9955, "token_acc": 0.5209790209790209, "train_speed(iter/s)": 0.669781 }, { "epoch": 0.42671693586393045, "grad_norm": 2.7954978942871094, "learning_rate": 9.821574851947459e-05, "loss": 2.4174137115478516, "memory(GiB)": 70.96, "step": 9960, "token_acc": 0.5120481927710844, "train_speed(iter/s)": 0.669836 }, { "epoch": 0.42693115119317937, "grad_norm": 3.3770837783813477, "learning_rate": 9.82139663235331e-05, "loss": 2.360455131530762, "memory(GiB)": 70.96, "step": 9965, "token_acc": 0.504225352112676, "train_speed(iter/s)": 0.669851 }, { "epoch": 0.42714536652242835, "grad_norm": 4.063412666320801, "learning_rate": 9.82121832541504e-05, "loss": 2.4613142013549805, "memory(GiB)": 70.96, "step": 9970, "token_acc": 0.4601226993865031, "train_speed(iter/s)": 0.669775 }, { "epoch": 0.42735958185167733, "grad_norm": 3.8781652450561523, "learning_rate": 9.821039931135874e-05, "loss": 2.2493587493896485, "memory(GiB)": 70.96, "step": 9975, "token_acc": 0.4859437751004016, "train_speed(iter/s)": 0.669725 }, { "epoch": 0.42757379718092625, "grad_norm": 2.98833966255188, "learning_rate": 9.82086144951905e-05, "loss": 2.4193227767944334, "memory(GiB)": 70.96, "step": 9980, "token_acc": 0.468503937007874, "train_speed(iter/s)": 0.669738 }, { "epoch": 0.42778801251017523, "grad_norm": 3.585679769515991, "learning_rate": 9.820682880567797e-05, "loss": 2.604385566711426, "memory(GiB)": 70.96, "step": 9985, "token_acc": 0.43636363636363634, "train_speed(iter/s)": 0.669734 }, { "epoch": 0.4280022278394242, "grad_norm": 4.176783084869385, "learning_rate": 9.820504224285351e-05, "loss": 2.2855579376220705, "memory(GiB)": 70.96, "step": 9990, "token_acc": 0.49356223175965663, "train_speed(iter/s)": 0.669774 }, { "epoch": 0.42821644316867313, "grad_norm": 4.120275020599365, "learning_rate": 9.82032548067495e-05, "loss": 2.4272697448730467, "memory(GiB)": 70.96, "step": 9995, "token_acc": 0.5054545454545455, "train_speed(iter/s)": 0.669764 }, { "epoch": 0.4284306584979221, "grad_norm": 4.98108434677124, "learning_rate": 9.82014664973983e-05, "loss": 2.3838287353515626, "memory(GiB)": 70.96, "step": 10000, "token_acc": 0.5, "train_speed(iter/s)": 0.669783 }, { "epoch": 0.4284306584979221, "eval_loss": 2.2467904090881348, "eval_runtime": 17.3749, "eval_samples_per_second": 5.755, "eval_steps_per_second": 5.755, "eval_token_acc": 0.48188405797101447, "step": 10000 }, { "epoch": 0.4286448738271711, "grad_norm": 2.8009254932403564, "learning_rate": 9.819967731483233e-05, "loss": 2.922917938232422, "memory(GiB)": 70.96, "step": 10005, "token_acc": 0.47137745974955275, "train_speed(iter/s)": 0.668918 }, { "epoch": 0.42885908915642, "grad_norm": 3.565535545349121, "learning_rate": 9.819788725908399e-05, "loss": 2.497084045410156, "memory(GiB)": 70.96, "step": 10010, "token_acc": 0.4813753581661891, "train_speed(iter/s)": 0.668939 }, { "epoch": 0.429073304485669, "grad_norm": 3.262392520904541, "learning_rate": 9.819609633018571e-05, "loss": 2.373646926879883, "memory(GiB)": 70.96, "step": 10015, "token_acc": 0.5189393939393939, "train_speed(iter/s)": 0.66893 }, { "epoch": 0.429287519814918, "grad_norm": 2.857902765274048, "learning_rate": 9.819430452816992e-05, "loss": 2.572442626953125, "memory(GiB)": 70.96, "step": 10020, "token_acc": 0.4617737003058104, "train_speed(iter/s)": 0.669026 }, { "epoch": 0.4295017351441669, "grad_norm": 3.6625616550445557, "learning_rate": 9.81925118530691e-05, "loss": 2.4894920349121095, "memory(GiB)": 70.96, "step": 10025, "token_acc": 0.5, "train_speed(iter/s)": 0.669065 }, { "epoch": 0.4297159504734159, "grad_norm": 4.643939018249512, "learning_rate": 9.819071830491573e-05, "loss": 2.451943016052246, "memory(GiB)": 70.96, "step": 10030, "token_acc": 0.4540059347181009, "train_speed(iter/s)": 0.669055 }, { "epoch": 0.42993016580266485, "grad_norm": 2.8141725063323975, "learning_rate": 9.818892388374229e-05, "loss": 2.4887081146240235, "memory(GiB)": 70.96, "step": 10035, "token_acc": 0.4644808743169399, "train_speed(iter/s)": 0.668956 }, { "epoch": 0.4301443811319138, "grad_norm": 4.2787322998046875, "learning_rate": 9.818712858958128e-05, "loss": 2.65103702545166, "memory(GiB)": 70.96, "step": 10040, "token_acc": 0.44482758620689655, "train_speed(iter/s)": 0.668984 }, { "epoch": 0.43035859646116276, "grad_norm": 4.315406322479248, "learning_rate": 9.818533242246523e-05, "loss": 2.4753744125366213, "memory(GiB)": 70.96, "step": 10045, "token_acc": 0.473015873015873, "train_speed(iter/s)": 0.668981 }, { "epoch": 0.43057281179041174, "grad_norm": 3.1121110916137695, "learning_rate": 9.818353538242668e-05, "loss": 2.3572097778320313, "memory(GiB)": 70.96, "step": 10050, "token_acc": 0.501779359430605, "train_speed(iter/s)": 0.669028 }, { "epoch": 0.43078702711966066, "grad_norm": 4.522809028625488, "learning_rate": 9.818173746949819e-05, "loss": 2.3863750457763673, "memory(GiB)": 70.96, "step": 10055, "token_acc": 0.47750865051903113, "train_speed(iter/s)": 0.66911 }, { "epoch": 0.43100124244890964, "grad_norm": 4.091737270355225, "learning_rate": 9.817993868371234e-05, "loss": 2.514369583129883, "memory(GiB)": 70.96, "step": 10060, "token_acc": 0.44813278008298757, "train_speed(iter/s)": 0.669242 }, { "epoch": 0.4312154577781586, "grad_norm": 2.833693504333496, "learning_rate": 9.817813902510169e-05, "loss": 2.5433271408081053, "memory(GiB)": 70.96, "step": 10065, "token_acc": 0.5030674846625767, "train_speed(iter/s)": 0.669282 }, { "epoch": 0.43142967310740754, "grad_norm": 2.997699499130249, "learning_rate": 9.817633849369887e-05, "loss": 2.35372257232666, "memory(GiB)": 70.96, "step": 10070, "token_acc": 0.5298507462686567, "train_speed(iter/s)": 0.669227 }, { "epoch": 0.4316438884366565, "grad_norm": 3.68801212310791, "learning_rate": 9.817453708953647e-05, "loss": 2.654384994506836, "memory(GiB)": 70.96, "step": 10075, "token_acc": 0.4, "train_speed(iter/s)": 0.669208 }, { "epoch": 0.4318581037659055, "grad_norm": 3.6207876205444336, "learning_rate": 9.817273481264715e-05, "loss": 2.408601760864258, "memory(GiB)": 70.96, "step": 10080, "token_acc": 0.48464163822525597, "train_speed(iter/s)": 0.669224 }, { "epoch": 0.4320723190951544, "grad_norm": 3.207230806350708, "learning_rate": 9.817093166306355e-05, "loss": 2.4172431945800783, "memory(GiB)": 70.96, "step": 10085, "token_acc": 0.46551724137931033, "train_speed(iter/s)": 0.669233 }, { "epoch": 0.4322865344244034, "grad_norm": 3.02983021736145, "learning_rate": 9.816912764081831e-05, "loss": 2.4996402740478514, "memory(GiB)": 70.96, "step": 10090, "token_acc": 0.48089171974522293, "train_speed(iter/s)": 0.669266 }, { "epoch": 0.4325007497536524, "grad_norm": 3.30538010597229, "learning_rate": 9.816732274594417e-05, "loss": 2.371418571472168, "memory(GiB)": 70.96, "step": 10095, "token_acc": 0.4913494809688581, "train_speed(iter/s)": 0.669338 }, { "epoch": 0.43271496508290136, "grad_norm": 4.274293422698975, "learning_rate": 9.816551697847378e-05, "loss": 2.1719905853271486, "memory(GiB)": 70.96, "step": 10100, "token_acc": 0.53125, "train_speed(iter/s)": 0.669443 }, { "epoch": 0.4329291804121503, "grad_norm": 3.4497854709625244, "learning_rate": 9.816371033843986e-05, "loss": 2.410537338256836, "memory(GiB)": 70.96, "step": 10105, "token_acc": 0.444, "train_speed(iter/s)": 0.669528 }, { "epoch": 0.43314339574139926, "grad_norm": 3.1986804008483887, "learning_rate": 9.816190282587516e-05, "loss": 2.3680572509765625, "memory(GiB)": 70.96, "step": 10110, "token_acc": 0.5, "train_speed(iter/s)": 0.669569 }, { "epoch": 0.43335761107064824, "grad_norm": 2.8722143173217773, "learning_rate": 9.81600944408124e-05, "loss": 2.394104766845703, "memory(GiB)": 70.96, "step": 10115, "token_acc": 0.5135135135135135, "train_speed(iter/s)": 0.669508 }, { "epoch": 0.43357182639989716, "grad_norm": 30.437744140625, "learning_rate": 9.815828518328435e-05, "loss": 2.234060859680176, "memory(GiB)": 70.96, "step": 10120, "token_acc": 0.47232472324723246, "train_speed(iter/s)": 0.669526 }, { "epoch": 0.43378604172914614, "grad_norm": 4.657374858856201, "learning_rate": 9.81564750533238e-05, "loss": 2.1972509384155274, "memory(GiB)": 70.96, "step": 10125, "token_acc": 0.5278969957081545, "train_speed(iter/s)": 0.669519 }, { "epoch": 0.4340002570583951, "grad_norm": 5.375337600708008, "learning_rate": 9.815466405096352e-05, "loss": 2.84020881652832, "memory(GiB)": 70.96, "step": 10130, "token_acc": 0.42366412213740456, "train_speed(iter/s)": 0.669555 }, { "epoch": 0.43421447238764405, "grad_norm": 3.4197025299072266, "learning_rate": 9.815285217623633e-05, "loss": 2.351426124572754, "memory(GiB)": 70.96, "step": 10135, "token_acc": 0.5099601593625498, "train_speed(iter/s)": 0.669584 }, { "epoch": 0.434428687716893, "grad_norm": 3.0659449100494385, "learning_rate": 9.815103942917506e-05, "loss": 2.505033493041992, "memory(GiB)": 70.96, "step": 10140, "token_acc": 0.46757679180887374, "train_speed(iter/s)": 0.669622 }, { "epoch": 0.434642903046142, "grad_norm": 3.13999342918396, "learning_rate": 9.814922580981254e-05, "loss": 2.2825302124023437, "memory(GiB)": 70.96, "step": 10145, "token_acc": 0.515358361774744, "train_speed(iter/s)": 0.669652 }, { "epoch": 0.4348571183753909, "grad_norm": 3.3501083850860596, "learning_rate": 9.814741131818162e-05, "loss": 2.2230430603027345, "memory(GiB)": 70.96, "step": 10150, "token_acc": 0.49264705882352944, "train_speed(iter/s)": 0.669679 }, { "epoch": 0.4350713337046399, "grad_norm": 4.741246223449707, "learning_rate": 9.814559595431517e-05, "loss": 2.492266082763672, "memory(GiB)": 70.96, "step": 10155, "token_acc": 0.47035573122529645, "train_speed(iter/s)": 0.669702 }, { "epoch": 0.4352855490338889, "grad_norm": 2.686434745788574, "learning_rate": 9.81437797182461e-05, "loss": 2.4050697326660155, "memory(GiB)": 70.96, "step": 10160, "token_acc": 0.48493975903614456, "train_speed(iter/s)": 0.669723 }, { "epoch": 0.4354997643631378, "grad_norm": 2.8792057037353516, "learning_rate": 9.81419626100073e-05, "loss": 2.1177499771118162, "memory(GiB)": 70.96, "step": 10165, "token_acc": 0.5048543689320388, "train_speed(iter/s)": 0.669754 }, { "epoch": 0.4357139796923868, "grad_norm": 3.0149574279785156, "learning_rate": 9.814014462963167e-05, "loss": 2.2628883361816405, "memory(GiB)": 70.96, "step": 10170, "token_acc": 0.5233333333333333, "train_speed(iter/s)": 0.66974 }, { "epoch": 0.43592819502163577, "grad_norm": 2.817704677581787, "learning_rate": 9.813832577715216e-05, "loss": 2.2243673324584963, "memory(GiB)": 70.96, "step": 10175, "token_acc": 0.4859437751004016, "train_speed(iter/s)": 0.66978 }, { "epoch": 0.4361424103508847, "grad_norm": 2.8366944789886475, "learning_rate": 9.813650605260174e-05, "loss": 2.1281320571899416, "memory(GiB)": 70.96, "step": 10180, "token_acc": 0.5223880597014925, "train_speed(iter/s)": 0.669855 }, { "epoch": 0.43635662568013367, "grad_norm": 4.06430196762085, "learning_rate": 9.813468545601334e-05, "loss": 2.3797149658203125, "memory(GiB)": 70.96, "step": 10185, "token_acc": 0.5142857142857142, "train_speed(iter/s)": 0.669879 }, { "epoch": 0.43657084100938265, "grad_norm": 4.9898881912231445, "learning_rate": 9.813286398741998e-05, "loss": 2.603183555603027, "memory(GiB)": 70.96, "step": 10190, "token_acc": 0.4900662251655629, "train_speed(iter/s)": 0.669966 }, { "epoch": 0.43678505633863157, "grad_norm": 4.7249579429626465, "learning_rate": 9.813104164685462e-05, "loss": 2.447932815551758, "memory(GiB)": 70.96, "step": 10195, "token_acc": 0.4962686567164179, "train_speed(iter/s)": 0.669869 }, { "epoch": 0.43699927166788055, "grad_norm": 2.9879329204559326, "learning_rate": 9.81292184343503e-05, "loss": 2.4267398834228517, "memory(GiB)": 70.96, "step": 10200, "token_acc": 0.4885245901639344, "train_speed(iter/s)": 0.669815 }, { "epoch": 0.43721348699712953, "grad_norm": 2.7617428302764893, "learning_rate": 9.812739434994002e-05, "loss": 2.357366752624512, "memory(GiB)": 70.96, "step": 10205, "token_acc": 0.5317725752508361, "train_speed(iter/s)": 0.669784 }, { "epoch": 0.43742770232637845, "grad_norm": 2.8701295852661133, "learning_rate": 9.812556939365687e-05, "loss": 2.624532699584961, "memory(GiB)": 70.96, "step": 10210, "token_acc": 0.48872180451127817, "train_speed(iter/s)": 0.669724 }, { "epoch": 0.43764191765562743, "grad_norm": 3.2337124347686768, "learning_rate": 9.812374356553386e-05, "loss": 2.3834766387939452, "memory(GiB)": 70.96, "step": 10215, "token_acc": 0.4852941176470588, "train_speed(iter/s)": 0.669695 }, { "epoch": 0.4378561329848764, "grad_norm": 2.904106378555298, "learning_rate": 9.81219168656041e-05, "loss": 2.39101505279541, "memory(GiB)": 70.96, "step": 10220, "token_acc": 0.5523465703971119, "train_speed(iter/s)": 0.669799 }, { "epoch": 0.43807034831412534, "grad_norm": 2.9516355991363525, "learning_rate": 9.812008929390069e-05, "loss": 2.3084941864013673, "memory(GiB)": 70.96, "step": 10225, "token_acc": 0.5322033898305085, "train_speed(iter/s)": 0.669783 }, { "epoch": 0.4382845636433743, "grad_norm": 4.197055339813232, "learning_rate": 9.81182608504567e-05, "loss": 2.3920566558837892, "memory(GiB)": 70.96, "step": 10230, "token_acc": 0.5017543859649123, "train_speed(iter/s)": 0.66979 }, { "epoch": 0.4384987789726233, "grad_norm": 3.780120849609375, "learning_rate": 9.81164315353053e-05, "loss": 2.585233688354492, "memory(GiB)": 70.96, "step": 10235, "token_acc": 0.46984126984126984, "train_speed(iter/s)": 0.669777 }, { "epoch": 0.4387129943018722, "grad_norm": 3.818474531173706, "learning_rate": 9.811460134847959e-05, "loss": 2.607882499694824, "memory(GiB)": 70.96, "step": 10240, "token_acc": 0.46441947565543074, "train_speed(iter/s)": 0.669867 }, { "epoch": 0.4389272096311212, "grad_norm": 3.075921058654785, "learning_rate": 9.811277029001274e-05, "loss": 2.2039913177490233, "memory(GiB)": 70.96, "step": 10245, "token_acc": 0.5381679389312977, "train_speed(iter/s)": 0.669954 }, { "epoch": 0.4391414249603702, "grad_norm": 3.6380820274353027, "learning_rate": 9.811093835993792e-05, "loss": 2.3243064880371094, "memory(GiB)": 70.96, "step": 10250, "token_acc": 0.4785992217898833, "train_speed(iter/s)": 0.669965 }, { "epoch": 0.4393556402896191, "grad_norm": 3.5001296997070312, "learning_rate": 9.810910555828833e-05, "loss": 2.539554214477539, "memory(GiB)": 70.96, "step": 10255, "token_acc": 0.47076023391812866, "train_speed(iter/s)": 0.669946 }, { "epoch": 0.4395698556188681, "grad_norm": 2.8056480884552, "learning_rate": 9.810727188509716e-05, "loss": 2.722810745239258, "memory(GiB)": 70.96, "step": 10260, "token_acc": 0.4166666666666667, "train_speed(iter/s)": 0.669912 }, { "epoch": 0.43978407094811706, "grad_norm": 3.211515426635742, "learning_rate": 9.810543734039763e-05, "loss": 2.316847038269043, "memory(GiB)": 70.96, "step": 10265, "token_acc": 0.5070422535211268, "train_speed(iter/s)": 0.669889 }, { "epoch": 0.43999828627736604, "grad_norm": 3.125701904296875, "learning_rate": 9.810360192422298e-05, "loss": 2.643931198120117, "memory(GiB)": 70.96, "step": 10270, "token_acc": 0.44376899696048633, "train_speed(iter/s)": 0.669891 }, { "epoch": 0.44021250160661496, "grad_norm": 2.95233154296875, "learning_rate": 9.810176563660644e-05, "loss": 2.363847351074219, "memory(GiB)": 70.96, "step": 10275, "token_acc": 0.5131086142322098, "train_speed(iter/s)": 0.669952 }, { "epoch": 0.44042671693586394, "grad_norm": 3.4371960163116455, "learning_rate": 9.809992847758132e-05, "loss": 2.241470146179199, "memory(GiB)": 70.96, "step": 10280, "token_acc": 0.484149855907781, "train_speed(iter/s)": 0.669991 }, { "epoch": 0.4406409322651129, "grad_norm": 3.5429012775421143, "learning_rate": 9.809809044718085e-05, "loss": 2.6959171295166016, "memory(GiB)": 70.96, "step": 10285, "token_acc": 0.47840531561461797, "train_speed(iter/s)": 0.670053 }, { "epoch": 0.44085514759436184, "grad_norm": 3.086937427520752, "learning_rate": 9.809625154543836e-05, "loss": 2.655058670043945, "memory(GiB)": 70.96, "step": 10290, "token_acc": 0.4671280276816609, "train_speed(iter/s)": 0.670028 }, { "epoch": 0.4410693629236108, "grad_norm": 4.9631266593933105, "learning_rate": 9.809441177238716e-05, "loss": 2.629162406921387, "memory(GiB)": 70.96, "step": 10295, "token_acc": 0.45723684210526316, "train_speed(iter/s)": 0.670001 }, { "epoch": 0.4412835782528598, "grad_norm": 3.112591028213501, "learning_rate": 9.809257112806056e-05, "loss": 2.239999008178711, "memory(GiB)": 70.96, "step": 10300, "token_acc": 0.5121951219512195, "train_speed(iter/s)": 0.670098 }, { "epoch": 0.4414977935821087, "grad_norm": 2.87636137008667, "learning_rate": 9.809072961249194e-05, "loss": 2.4665878295898436, "memory(GiB)": 70.96, "step": 10305, "token_acc": 0.4927536231884058, "train_speed(iter/s)": 0.670135 }, { "epoch": 0.4417120089113577, "grad_norm": 5.610252380371094, "learning_rate": 9.808888722571464e-05, "loss": 2.539797782897949, "memory(GiB)": 70.96, "step": 10310, "token_acc": 0.45874587458745875, "train_speed(iter/s)": 0.670142 }, { "epoch": 0.4419262242406067, "grad_norm": 3.0601766109466553, "learning_rate": 9.808704396776203e-05, "loss": 2.4991905212402346, "memory(GiB)": 70.96, "step": 10315, "token_acc": 0.48518518518518516, "train_speed(iter/s)": 0.670127 }, { "epoch": 0.4421404395698556, "grad_norm": 3.695058584213257, "learning_rate": 9.80851998386675e-05, "loss": 2.7010303497314454, "memory(GiB)": 70.96, "step": 10320, "token_acc": 0.4642857142857143, "train_speed(iter/s)": 0.670181 }, { "epoch": 0.4423546548991046, "grad_norm": 4.261292934417725, "learning_rate": 9.808335483846447e-05, "loss": 2.532793426513672, "memory(GiB)": 70.96, "step": 10325, "token_acc": 0.46938775510204084, "train_speed(iter/s)": 0.670204 }, { "epoch": 0.44256887022835356, "grad_norm": 3.1355183124542236, "learning_rate": 9.808150896718636e-05, "loss": 2.0100933074951173, "memory(GiB)": 70.96, "step": 10330, "token_acc": 0.5448028673835126, "train_speed(iter/s)": 0.670287 }, { "epoch": 0.4427830855576025, "grad_norm": 3.082307815551758, "learning_rate": 9.807966222486663e-05, "loss": 2.3071949005126955, "memory(GiB)": 70.96, "step": 10335, "token_acc": 0.48507462686567165, "train_speed(iter/s)": 0.670347 }, { "epoch": 0.44299730088685146, "grad_norm": 3.1425533294677734, "learning_rate": 9.807781461153868e-05, "loss": 2.6272560119628907, "memory(GiB)": 70.96, "step": 10340, "token_acc": 0.4883720930232558, "train_speed(iter/s)": 0.670468 }, { "epoch": 0.44321151621610044, "grad_norm": 3.895122528076172, "learning_rate": 9.807596612723606e-05, "loss": 2.4535602569580077, "memory(GiB)": 70.96, "step": 10345, "token_acc": 0.46325878594249204, "train_speed(iter/s)": 0.670545 }, { "epoch": 0.44342573154534937, "grad_norm": 2.4967727661132812, "learning_rate": 9.807411677199219e-05, "loss": 2.447506904602051, "memory(GiB)": 70.96, "step": 10350, "token_acc": 0.5169230769230769, "train_speed(iter/s)": 0.670595 }, { "epoch": 0.44363994687459835, "grad_norm": 2.7393178939819336, "learning_rate": 9.807226654584061e-05, "loss": 2.309465026855469, "memory(GiB)": 70.96, "step": 10355, "token_acc": 0.5537848605577689, "train_speed(iter/s)": 0.670602 }, { "epoch": 0.4438541622038473, "grad_norm": 5.018656253814697, "learning_rate": 9.807041544881481e-05, "loss": 2.438285255432129, "memory(GiB)": 70.96, "step": 10360, "token_acc": 0.48055555555555557, "train_speed(iter/s)": 0.670687 }, { "epoch": 0.44406837753309625, "grad_norm": 3.5095250606536865, "learning_rate": 9.806856348094835e-05, "loss": 2.697486686706543, "memory(GiB)": 70.96, "step": 10365, "token_acc": 0.46206896551724136, "train_speed(iter/s)": 0.670671 }, { "epoch": 0.4442825928623452, "grad_norm": 2.820688247680664, "learning_rate": 9.806671064227477e-05, "loss": 2.2378585815429686, "memory(GiB)": 70.96, "step": 10370, "token_acc": 0.50920245398773, "train_speed(iter/s)": 0.670757 }, { "epoch": 0.4444968081915942, "grad_norm": 3.1066715717315674, "learning_rate": 9.806485693282764e-05, "loss": 2.358298492431641, "memory(GiB)": 70.96, "step": 10375, "token_acc": 0.5066666666666667, "train_speed(iter/s)": 0.670772 }, { "epoch": 0.44471102352084313, "grad_norm": 3.488586187362671, "learning_rate": 9.806300235264053e-05, "loss": 2.279074859619141, "memory(GiB)": 70.96, "step": 10380, "token_acc": 0.48736462093862815, "train_speed(iter/s)": 0.670803 }, { "epoch": 0.4449252388500921, "grad_norm": 4.040151596069336, "learning_rate": 9.806114690174706e-05, "loss": 2.2614772796630858, "memory(GiB)": 70.96, "step": 10385, "token_acc": 0.527027027027027, "train_speed(iter/s)": 0.670807 }, { "epoch": 0.4451394541793411, "grad_norm": 4.6390557289123535, "learning_rate": 9.805929058018081e-05, "loss": 2.5165882110595703, "memory(GiB)": 70.96, "step": 10390, "token_acc": 0.46473029045643155, "train_speed(iter/s)": 0.670773 }, { "epoch": 0.44535366950859, "grad_norm": 2.9593658447265625, "learning_rate": 9.805743338797544e-05, "loss": 2.565419578552246, "memory(GiB)": 70.96, "step": 10395, "token_acc": 0.45170454545454547, "train_speed(iter/s)": 0.670735 }, { "epoch": 0.445567884837839, "grad_norm": 3.1710734367370605, "learning_rate": 9.805557532516458e-05, "loss": 2.587040901184082, "memory(GiB)": 70.96, "step": 10400, "token_acc": 0.4983388704318937, "train_speed(iter/s)": 0.670753 }, { "epoch": 0.44578210016708797, "grad_norm": 2.8081166744232178, "learning_rate": 9.805371639178189e-05, "loss": 2.479160499572754, "memory(GiB)": 70.96, "step": 10405, "token_acc": 0.49830508474576274, "train_speed(iter/s)": 0.670724 }, { "epoch": 0.4459963154963369, "grad_norm": 3.687185525894165, "learning_rate": 9.805185658786104e-05, "loss": 2.363906478881836, "memory(GiB)": 70.96, "step": 10410, "token_acc": 0.48638132295719844, "train_speed(iter/s)": 0.670774 }, { "epoch": 0.44621053082558587, "grad_norm": 5.829984188079834, "learning_rate": 9.804999591343575e-05, "loss": 2.394560623168945, "memory(GiB)": 70.96, "step": 10415, "token_acc": 0.49615384615384617, "train_speed(iter/s)": 0.670738 }, { "epoch": 0.44642474615483485, "grad_norm": 3.582037925720215, "learning_rate": 9.804813436853971e-05, "loss": 2.408504104614258, "memory(GiB)": 70.96, "step": 10420, "token_acc": 0.4326241134751773, "train_speed(iter/s)": 0.670707 }, { "epoch": 0.4466389614840838, "grad_norm": 3.2329177856445312, "learning_rate": 9.804627195320663e-05, "loss": 2.200254440307617, "memory(GiB)": 70.96, "step": 10425, "token_acc": 0.5209125475285171, "train_speed(iter/s)": 0.670717 }, { "epoch": 0.44685317681333275, "grad_norm": 3.0610618591308594, "learning_rate": 9.804440866747028e-05, "loss": 2.7420928955078123, "memory(GiB)": 70.96, "step": 10430, "token_acc": 0.46200607902735563, "train_speed(iter/s)": 0.670705 }, { "epoch": 0.44706739214258173, "grad_norm": 2.9604344367980957, "learning_rate": 9.804254451136439e-05, "loss": 2.1925643920898437, "memory(GiB)": 70.96, "step": 10435, "token_acc": 0.5532646048109966, "train_speed(iter/s)": 0.670677 }, { "epoch": 0.4472816074718307, "grad_norm": 2.976569414138794, "learning_rate": 9.804067948492273e-05, "loss": 2.286078453063965, "memory(GiB)": 70.96, "step": 10440, "token_acc": 0.5302013422818792, "train_speed(iter/s)": 0.670748 }, { "epoch": 0.44749582280107963, "grad_norm": 2.9632558822631836, "learning_rate": 9.80388135881791e-05, "loss": 2.5060522079467775, "memory(GiB)": 70.96, "step": 10445, "token_acc": 0.4808259587020649, "train_speed(iter/s)": 0.670735 }, { "epoch": 0.4477100381303286, "grad_norm": 4.030147552490234, "learning_rate": 9.80369468211673e-05, "loss": 2.2793048858642577, "memory(GiB)": 70.96, "step": 10450, "token_acc": 0.5045592705167173, "train_speed(iter/s)": 0.670725 }, { "epoch": 0.4479242534595776, "grad_norm": 3.752089500427246, "learning_rate": 9.803507918392114e-05, "loss": 2.5686677932739257, "memory(GiB)": 70.96, "step": 10455, "token_acc": 0.45126353790613716, "train_speed(iter/s)": 0.670714 }, { "epoch": 0.4481384687888265, "grad_norm": 6.51938009262085, "learning_rate": 9.803321067647446e-05, "loss": 2.3866573333740235, "memory(GiB)": 70.96, "step": 10460, "token_acc": 0.5032051282051282, "train_speed(iter/s)": 0.670813 }, { "epoch": 0.4483526841180755, "grad_norm": 3.975426435470581, "learning_rate": 9.803134129886112e-05, "loss": 2.2696475982666016, "memory(GiB)": 70.96, "step": 10465, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.670925 }, { "epoch": 0.4485668994473245, "grad_norm": 4.05512809753418, "learning_rate": 9.802947105111498e-05, "loss": 2.431774139404297, "memory(GiB)": 70.96, "step": 10470, "token_acc": 0.4794520547945205, "train_speed(iter/s)": 0.670829 }, { "epoch": 0.4487811147765734, "grad_norm": 3.1671104431152344, "learning_rate": 9.80275999332699e-05, "loss": 2.7258066177368163, "memory(GiB)": 70.96, "step": 10475, "token_acc": 0.4198717948717949, "train_speed(iter/s)": 0.67085 }, { "epoch": 0.4489953301058224, "grad_norm": 3.4529616832733154, "learning_rate": 9.80257279453598e-05, "loss": 2.3905717849731447, "memory(GiB)": 70.96, "step": 10480, "token_acc": 0.5017421602787456, "train_speed(iter/s)": 0.670882 }, { "epoch": 0.44920954543507136, "grad_norm": 2.9623308181762695, "learning_rate": 9.802385508741858e-05, "loss": 2.5024555206298826, "memory(GiB)": 70.96, "step": 10485, "token_acc": 0.48955223880597015, "train_speed(iter/s)": 0.670799 }, { "epoch": 0.4494237607643203, "grad_norm": 2.9322779178619385, "learning_rate": 9.802198135948019e-05, "loss": 2.4901782989501955, "memory(GiB)": 70.96, "step": 10490, "token_acc": 0.4768211920529801, "train_speed(iter/s)": 0.670859 }, { "epoch": 0.44963797609356926, "grad_norm": 3.42293381690979, "learning_rate": 9.802010676157854e-05, "loss": 2.4197666168212892, "memory(GiB)": 70.96, "step": 10495, "token_acc": 0.5048231511254019, "train_speed(iter/s)": 0.670865 }, { "epoch": 0.44985219142281824, "grad_norm": 4.417413234710693, "learning_rate": 9.801823129374763e-05, "loss": 2.578774833679199, "memory(GiB)": 70.96, "step": 10500, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.670881 }, { "epoch": 0.44985219142281824, "eval_loss": 2.0750763416290283, "eval_runtime": 16.2302, "eval_samples_per_second": 6.161, "eval_steps_per_second": 6.161, "eval_token_acc": 0.5118733509234829, "step": 10500 }, { "epoch": 0.45006640675206716, "grad_norm": 3.348151445388794, "learning_rate": 9.801635495602141e-05, "loss": 2.423817253112793, "memory(GiB)": 70.96, "step": 10505, "token_acc": 0.48161764705882354, "train_speed(iter/s)": 0.670078 }, { "epoch": 0.45028062208131614, "grad_norm": 2.935509443283081, "learning_rate": 9.801447774843386e-05, "loss": 2.565122604370117, "memory(GiB)": 70.96, "step": 10510, "token_acc": 0.4811320754716981, "train_speed(iter/s)": 0.670071 }, { "epoch": 0.4504948374105651, "grad_norm": 4.367796421051025, "learning_rate": 9.801259967101903e-05, "loss": 2.1632770538330077, "memory(GiB)": 70.96, "step": 10515, "token_acc": 0.5088757396449705, "train_speed(iter/s)": 0.670118 }, { "epoch": 0.45070905273981404, "grad_norm": 4.70820951461792, "learning_rate": 9.80107207238109e-05, "loss": 2.4507976531982423, "memory(GiB)": 70.96, "step": 10520, "token_acc": 0.44285714285714284, "train_speed(iter/s)": 0.670082 }, { "epoch": 0.450923268069063, "grad_norm": 3.607633352279663, "learning_rate": 9.800884090684352e-05, "loss": 2.862200927734375, "memory(GiB)": 70.96, "step": 10525, "token_acc": 0.43548387096774194, "train_speed(iter/s)": 0.670094 }, { "epoch": 0.451137483398312, "grad_norm": 3.4564359188079834, "learning_rate": 9.800696022015097e-05, "loss": 2.5655555725097656, "memory(GiB)": 70.96, "step": 10530, "token_acc": 0.4721189591078067, "train_speed(iter/s)": 0.67008 }, { "epoch": 0.4513516987275609, "grad_norm": 3.1131529808044434, "learning_rate": 9.80050786637673e-05, "loss": 2.5044654846191405, "memory(GiB)": 70.96, "step": 10535, "token_acc": 0.45104895104895104, "train_speed(iter/s)": 0.669975 }, { "epoch": 0.4515659140568099, "grad_norm": 3.717230796813965, "learning_rate": 9.800319623772658e-05, "loss": 2.2952911376953127, "memory(GiB)": 70.96, "step": 10540, "token_acc": 0.5171339563862928, "train_speed(iter/s)": 0.670009 }, { "epoch": 0.4517801293860589, "grad_norm": 4.713624954223633, "learning_rate": 9.800131294206294e-05, "loss": 2.209516716003418, "memory(GiB)": 70.96, "step": 10545, "token_acc": 0.5374449339207048, "train_speed(iter/s)": 0.670077 }, { "epoch": 0.4519943447153078, "grad_norm": 3.7325806617736816, "learning_rate": 9.799942877681049e-05, "loss": 2.6672121047973634, "memory(GiB)": 70.96, "step": 10550, "token_acc": 0.46689895470383275, "train_speed(iter/s)": 0.670147 }, { "epoch": 0.4522085600445568, "grad_norm": 5.7237372398376465, "learning_rate": 9.799754374200336e-05, "loss": 2.7222532272338866, "memory(GiB)": 70.96, "step": 10555, "token_acc": 0.4738562091503268, "train_speed(iter/s)": 0.670173 }, { "epoch": 0.45242277537380576, "grad_norm": 3.1947712898254395, "learning_rate": 9.79956578376757e-05, "loss": 2.5703529357910155, "memory(GiB)": 70.96, "step": 10560, "token_acc": 0.45098039215686275, "train_speed(iter/s)": 0.67015 }, { "epoch": 0.4526369907030547, "grad_norm": 3.659655809402466, "learning_rate": 9.799377106386167e-05, "loss": 2.657380294799805, "memory(GiB)": 70.96, "step": 10565, "token_acc": 0.45751633986928103, "train_speed(iter/s)": 0.670149 }, { "epoch": 0.45285120603230367, "grad_norm": 4.11981725692749, "learning_rate": 9.799188342059547e-05, "loss": 2.5080217361450194, "memory(GiB)": 70.96, "step": 10570, "token_acc": 0.4910394265232975, "train_speed(iter/s)": 0.670173 }, { "epoch": 0.45306542136155264, "grad_norm": 4.095824241638184, "learning_rate": 9.798999490791126e-05, "loss": 2.304286575317383, "memory(GiB)": 70.96, "step": 10575, "token_acc": 0.5, "train_speed(iter/s)": 0.670198 }, { "epoch": 0.45327963669080157, "grad_norm": 2.6521387100219727, "learning_rate": 9.798810552584328e-05, "loss": 2.607553482055664, "memory(GiB)": 70.96, "step": 10580, "token_acc": 0.48081841432225064, "train_speed(iter/s)": 0.670251 }, { "epoch": 0.45349385202005055, "grad_norm": 3.173173427581787, "learning_rate": 9.798621527442576e-05, "loss": 2.5740020751953123, "memory(GiB)": 70.96, "step": 10585, "token_acc": 0.4598337950138504, "train_speed(iter/s)": 0.670214 }, { "epoch": 0.4537080673492995, "grad_norm": 2.8894150257110596, "learning_rate": 9.798432415369294e-05, "loss": 2.5940584182739257, "memory(GiB)": 70.96, "step": 10590, "token_acc": 0.44871794871794873, "train_speed(iter/s)": 0.670251 }, { "epoch": 0.45392228267854845, "grad_norm": 2.8778278827667236, "learning_rate": 9.798243216367907e-05, "loss": 2.468179702758789, "memory(GiB)": 70.96, "step": 10595, "token_acc": 0.4419475655430712, "train_speed(iter/s)": 0.670257 }, { "epoch": 0.45413649800779743, "grad_norm": 3.5521066188812256, "learning_rate": 9.798053930441842e-05, "loss": 2.5057979583740235, "memory(GiB)": 70.96, "step": 10600, "token_acc": 0.4632352941176471, "train_speed(iter/s)": 0.670332 }, { "epoch": 0.4543507133370464, "grad_norm": 3.0317230224609375, "learning_rate": 9.79786455759453e-05, "loss": 2.6186363220214846, "memory(GiB)": 70.96, "step": 10605, "token_acc": 0.45901639344262296, "train_speed(iter/s)": 0.670274 }, { "epoch": 0.4545649286662954, "grad_norm": 3.1500649452209473, "learning_rate": 9.7976750978294e-05, "loss": 2.3170963287353517, "memory(GiB)": 70.96, "step": 10610, "token_acc": 0.4781144781144781, "train_speed(iter/s)": 0.670281 }, { "epoch": 0.4547791439955443, "grad_norm": 2.841967821121216, "learning_rate": 9.797485551149885e-05, "loss": 2.4307273864746093, "memory(GiB)": 70.96, "step": 10615, "token_acc": 0.45878136200716846, "train_speed(iter/s)": 0.670299 }, { "epoch": 0.4549933593247933, "grad_norm": 4.084149360656738, "learning_rate": 9.79729591755942e-05, "loss": 2.5626779556274415, "memory(GiB)": 70.96, "step": 10620, "token_acc": 0.4279661016949153, "train_speed(iter/s)": 0.670216 }, { "epoch": 0.45520757465404227, "grad_norm": 3.058648109436035, "learning_rate": 9.797106197061439e-05, "loss": 2.493616485595703, "memory(GiB)": 70.96, "step": 10625, "token_acc": 0.46710526315789475, "train_speed(iter/s)": 0.67023 }, { "epoch": 0.4554217899832912, "grad_norm": 3.536144733428955, "learning_rate": 9.796916389659379e-05, "loss": 2.3819768905639647, "memory(GiB)": 70.96, "step": 10630, "token_acc": 0.46607669616519176, "train_speed(iter/s)": 0.670204 }, { "epoch": 0.45563600531254017, "grad_norm": 3.7561962604522705, "learning_rate": 9.796726495356678e-05, "loss": 2.7064849853515627, "memory(GiB)": 70.96, "step": 10635, "token_acc": 0.44223107569721115, "train_speed(iter/s)": 0.670181 }, { "epoch": 0.45585022064178915, "grad_norm": 3.382385015487671, "learning_rate": 9.796536514156778e-05, "loss": 2.5042446136474608, "memory(GiB)": 70.96, "step": 10640, "token_acc": 0.4985507246376812, "train_speed(iter/s)": 0.670294 }, { "epoch": 0.4560644359710381, "grad_norm": 3.1854336261749268, "learning_rate": 9.796346446063118e-05, "loss": 2.464067840576172, "memory(GiB)": 70.96, "step": 10645, "token_acc": 0.4888888888888889, "train_speed(iter/s)": 0.670179 }, { "epoch": 0.45627865130028705, "grad_norm": 3.3332505226135254, "learning_rate": 9.796156291079143e-05, "loss": 2.25284481048584, "memory(GiB)": 70.96, "step": 10650, "token_acc": 0.5016393442622951, "train_speed(iter/s)": 0.670161 }, { "epoch": 0.45649286662953603, "grad_norm": 3.9039533138275146, "learning_rate": 9.795966049208301e-05, "loss": 2.4619077682495116, "memory(GiB)": 70.96, "step": 10655, "token_acc": 0.4577922077922078, "train_speed(iter/s)": 0.670196 }, { "epoch": 0.45670708195878496, "grad_norm": 3.7268660068511963, "learning_rate": 9.795775720454032e-05, "loss": 2.346849060058594, "memory(GiB)": 70.96, "step": 10660, "token_acc": 0.4965986394557823, "train_speed(iter/s)": 0.670245 }, { "epoch": 0.45692129728803393, "grad_norm": 4.400223731994629, "learning_rate": 9.795585304819788e-05, "loss": 2.4498619079589843, "memory(GiB)": 70.96, "step": 10665, "token_acc": 0.4610169491525424, "train_speed(iter/s)": 0.67021 }, { "epoch": 0.4571355126172829, "grad_norm": 2.9821560382843018, "learning_rate": 9.795394802309015e-05, "loss": 2.7382085800170897, "memory(GiB)": 70.96, "step": 10670, "token_acc": 0.4326241134751773, "train_speed(iter/s)": 0.670261 }, { "epoch": 0.45734972794653184, "grad_norm": 4.838650703430176, "learning_rate": 9.795204212925169e-05, "loss": 2.64306526184082, "memory(GiB)": 70.96, "step": 10675, "token_acc": 0.48717948717948717, "train_speed(iter/s)": 0.670286 }, { "epoch": 0.4575639432757808, "grad_norm": 3.485173225402832, "learning_rate": 9.7950135366717e-05, "loss": 2.370104217529297, "memory(GiB)": 70.96, "step": 10680, "token_acc": 0.5040322580645161, "train_speed(iter/s)": 0.670319 }, { "epoch": 0.4577781586050298, "grad_norm": 3.746811628341675, "learning_rate": 9.794822773552063e-05, "loss": 2.722137451171875, "memory(GiB)": 70.96, "step": 10685, "token_acc": 0.4367469879518072, "train_speed(iter/s)": 0.670405 }, { "epoch": 0.4579923739342787, "grad_norm": 3.197300434112549, "learning_rate": 9.794631923569714e-05, "loss": 2.5418073654174806, "memory(GiB)": 70.96, "step": 10690, "token_acc": 0.4807692307692308, "train_speed(iter/s)": 0.670448 }, { "epoch": 0.4582065892635277, "grad_norm": 4.44451904296875, "learning_rate": 9.794440986728109e-05, "loss": 2.363534164428711, "memory(GiB)": 70.96, "step": 10695, "token_acc": 0.5033783783783784, "train_speed(iter/s)": 0.670411 }, { "epoch": 0.4584208045927767, "grad_norm": 4.011234760284424, "learning_rate": 9.794249963030709e-05, "loss": 2.5575185775756837, "memory(GiB)": 70.96, "step": 10700, "token_acc": 0.49038461538461536, "train_speed(iter/s)": 0.670482 }, { "epoch": 0.4586350199220256, "grad_norm": 3.4776341915130615, "learning_rate": 9.794058852480972e-05, "loss": 2.0762813568115233, "memory(GiB)": 70.96, "step": 10705, "token_acc": 0.5670498084291188, "train_speed(iter/s)": 0.670499 }, { "epoch": 0.4588492352512746, "grad_norm": 2.5043370723724365, "learning_rate": 9.793867655082362e-05, "loss": 2.3389608383178713, "memory(GiB)": 70.96, "step": 10710, "token_acc": 0.4854111405835544, "train_speed(iter/s)": 0.670444 }, { "epoch": 0.45906345058052356, "grad_norm": 2.529435157775879, "learning_rate": 9.793676370838344e-05, "loss": 2.439559555053711, "memory(GiB)": 70.96, "step": 10715, "token_acc": 0.49698795180722893, "train_speed(iter/s)": 0.670426 }, { "epoch": 0.4592776659097725, "grad_norm": 3.29689621925354, "learning_rate": 9.79348499975238e-05, "loss": 2.4777738571166994, "memory(GiB)": 70.96, "step": 10720, "token_acc": 0.49044585987261147, "train_speed(iter/s)": 0.670379 }, { "epoch": 0.45949188123902146, "grad_norm": 3.5442614555358887, "learning_rate": 9.79329354182794e-05, "loss": 2.3924522399902344, "memory(GiB)": 70.96, "step": 10725, "token_acc": 0.4892086330935252, "train_speed(iter/s)": 0.670407 }, { "epoch": 0.45970609656827044, "grad_norm": 3.1348772048950195, "learning_rate": 9.79310199706849e-05, "loss": 2.6817947387695313, "memory(GiB)": 70.96, "step": 10730, "token_acc": 0.4873417721518987, "train_speed(iter/s)": 0.670392 }, { "epoch": 0.45992031189751936, "grad_norm": 2.9337968826293945, "learning_rate": 9.792910365477501e-05, "loss": 2.3727724075317385, "memory(GiB)": 70.96, "step": 10735, "token_acc": 0.4702194357366771, "train_speed(iter/s)": 0.670432 }, { "epoch": 0.46013452722676834, "grad_norm": 4.002341270446777, "learning_rate": 9.792718647058446e-05, "loss": 2.3996501922607423, "memory(GiB)": 70.96, "step": 10740, "token_acc": 0.4827586206896552, "train_speed(iter/s)": 0.670375 }, { "epoch": 0.4603487425560173, "grad_norm": 4.20035982131958, "learning_rate": 9.792526841814795e-05, "loss": 2.6734586715698243, "memory(GiB)": 70.96, "step": 10745, "token_acc": 0.42996742671009774, "train_speed(iter/s)": 0.670369 }, { "epoch": 0.46056295788526624, "grad_norm": 3.069949150085449, "learning_rate": 9.792334949750027e-05, "loss": 2.1475042343139648, "memory(GiB)": 70.96, "step": 10750, "token_acc": 0.5173745173745173, "train_speed(iter/s)": 0.670346 }, { "epoch": 0.4607771732145152, "grad_norm": 2.828881025314331, "learning_rate": 9.792142970867613e-05, "loss": 2.492085266113281, "memory(GiB)": 70.96, "step": 10755, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.670333 }, { "epoch": 0.4609913885437642, "grad_norm": 3.9835212230682373, "learning_rate": 9.791950905171034e-05, "loss": 2.342617416381836, "memory(GiB)": 70.96, "step": 10760, "token_acc": 0.5078740157480315, "train_speed(iter/s)": 0.670353 }, { "epoch": 0.4612056038730131, "grad_norm": 3.6550374031066895, "learning_rate": 9.791758752663771e-05, "loss": 2.1300643920898437, "memory(GiB)": 70.96, "step": 10765, "token_acc": 0.5375939849624061, "train_speed(iter/s)": 0.670382 }, { "epoch": 0.4614198192022621, "grad_norm": 3.3546488285064697, "learning_rate": 9.791566513349302e-05, "loss": 2.451016426086426, "memory(GiB)": 70.96, "step": 10770, "token_acc": 0.49206349206349204, "train_speed(iter/s)": 0.670352 }, { "epoch": 0.4616340345315111, "grad_norm": 2.716428279876709, "learning_rate": 9.791374187231111e-05, "loss": 2.1266408920288087, "memory(GiB)": 70.96, "step": 10775, "token_acc": 0.5476190476190477, "train_speed(iter/s)": 0.670352 }, { "epoch": 0.46184824986076006, "grad_norm": 3.077094554901123, "learning_rate": 9.791181774312681e-05, "loss": 2.2627580642700194, "memory(GiB)": 70.96, "step": 10780, "token_acc": 0.5166666666666667, "train_speed(iter/s)": 0.670369 }, { "epoch": 0.462062465190009, "grad_norm": 3.4765660762786865, "learning_rate": 9.790989274597499e-05, "loss": 2.346372604370117, "memory(GiB)": 70.96, "step": 10785, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.670418 }, { "epoch": 0.46227668051925797, "grad_norm": 4.069040298461914, "learning_rate": 9.790796688089052e-05, "loss": 2.6434139251708983, "memory(GiB)": 70.96, "step": 10790, "token_acc": 0.4456140350877193, "train_speed(iter/s)": 0.670459 }, { "epoch": 0.46249089584850694, "grad_norm": 3.747215509414673, "learning_rate": 9.79060401479083e-05, "loss": 2.3846275329589846, "memory(GiB)": 70.96, "step": 10795, "token_acc": 0.5035714285714286, "train_speed(iter/s)": 0.670483 }, { "epoch": 0.46270511117775587, "grad_norm": 2.9044225215911865, "learning_rate": 9.790411254706322e-05, "loss": 2.225391960144043, "memory(GiB)": 70.96, "step": 10800, "token_acc": 0.5193548387096775, "train_speed(iter/s)": 0.670479 }, { "epoch": 0.46291932650700485, "grad_norm": 4.3086700439453125, "learning_rate": 9.79021840783902e-05, "loss": 2.131510543823242, "memory(GiB)": 70.96, "step": 10805, "token_acc": 0.5057915057915058, "train_speed(iter/s)": 0.670539 }, { "epoch": 0.4631335418362538, "grad_norm": 4.4400715827941895, "learning_rate": 9.790025474192416e-05, "loss": 2.3758304595947264, "memory(GiB)": 70.96, "step": 10810, "token_acc": 0.48507462686567165, "train_speed(iter/s)": 0.6705 }, { "epoch": 0.46334775716550275, "grad_norm": 3.1213808059692383, "learning_rate": 9.78983245377001e-05, "loss": 2.413017463684082, "memory(GiB)": 70.96, "step": 10815, "token_acc": 0.46397694524495675, "train_speed(iter/s)": 0.670451 }, { "epoch": 0.46356197249475173, "grad_norm": 3.2221291065216064, "learning_rate": 9.789639346575294e-05, "loss": 2.4601417541503907, "memory(GiB)": 70.96, "step": 10820, "token_acc": 0.4659090909090909, "train_speed(iter/s)": 0.670506 }, { "epoch": 0.4637761878240007, "grad_norm": 3.9711105823516846, "learning_rate": 9.789446152611769e-05, "loss": 2.479313087463379, "memory(GiB)": 70.96, "step": 10825, "token_acc": 0.4816053511705686, "train_speed(iter/s)": 0.670509 }, { "epoch": 0.46399040315324963, "grad_norm": 3.140664577484131, "learning_rate": 9.789252871882934e-05, "loss": 2.2726844787597655, "memory(GiB)": 70.96, "step": 10830, "token_acc": 0.4485294117647059, "train_speed(iter/s)": 0.670456 }, { "epoch": 0.4642046184824986, "grad_norm": 3.358374834060669, "learning_rate": 9.789059504392293e-05, "loss": 2.355047607421875, "memory(GiB)": 70.96, "step": 10835, "token_acc": 0.4819672131147541, "train_speed(iter/s)": 0.670495 }, { "epoch": 0.4644188338117476, "grad_norm": 3.8328680992126465, "learning_rate": 9.788866050143344e-05, "loss": 2.314116096496582, "memory(GiB)": 70.96, "step": 10840, "token_acc": 0.53125, "train_speed(iter/s)": 0.670545 }, { "epoch": 0.4646330491409965, "grad_norm": 3.711651563644409, "learning_rate": 9.788672509139593e-05, "loss": 2.656549835205078, "memory(GiB)": 70.96, "step": 10845, "token_acc": 0.4779874213836478, "train_speed(iter/s)": 0.6706 }, { "epoch": 0.4648472644702455, "grad_norm": 2.522218942642212, "learning_rate": 9.78847888138455e-05, "loss": 2.398153877258301, "memory(GiB)": 70.96, "step": 10850, "token_acc": 0.44, "train_speed(iter/s)": 0.67064 }, { "epoch": 0.46506147979949447, "grad_norm": 4.136328220367432, "learning_rate": 9.788285166881718e-05, "loss": 2.652349853515625, "memory(GiB)": 70.96, "step": 10855, "token_acc": 0.4513888888888889, "train_speed(iter/s)": 0.670654 }, { "epoch": 0.4652756951287434, "grad_norm": 3.531116247177124, "learning_rate": 9.78809136563461e-05, "loss": 2.5151878356933595, "memory(GiB)": 70.96, "step": 10860, "token_acc": 0.4536741214057508, "train_speed(iter/s)": 0.670682 }, { "epoch": 0.4654899104579924, "grad_norm": 3.15735125541687, "learning_rate": 9.787897477646735e-05, "loss": 2.558992385864258, "memory(GiB)": 70.96, "step": 10865, "token_acc": 0.4864864864864865, "train_speed(iter/s)": 0.670723 }, { "epoch": 0.46570412578724135, "grad_norm": 2.7888646125793457, "learning_rate": 9.787703502921604e-05, "loss": 2.4058313369750977, "memory(GiB)": 70.96, "step": 10870, "token_acc": 0.5016835016835017, "train_speed(iter/s)": 0.670774 }, { "epoch": 0.4659183411164903, "grad_norm": 2.8875792026519775, "learning_rate": 9.787509441462734e-05, "loss": 2.3231821060180664, "memory(GiB)": 70.96, "step": 10875, "token_acc": 0.5335570469798657, "train_speed(iter/s)": 0.670787 }, { "epoch": 0.46613255644573925, "grad_norm": 3.9656143188476562, "learning_rate": 9.78731529327364e-05, "loss": 2.4166645050048827, "memory(GiB)": 70.96, "step": 10880, "token_acc": 0.4583333333333333, "train_speed(iter/s)": 0.670872 }, { "epoch": 0.46634677177498823, "grad_norm": 2.9607017040252686, "learning_rate": 9.787121058357839e-05, "loss": 2.4237775802612305, "memory(GiB)": 70.96, "step": 10885, "token_acc": 0.4696485623003195, "train_speed(iter/s)": 0.670936 }, { "epoch": 0.46656098710423716, "grad_norm": 3.627021074295044, "learning_rate": 9.786926736718848e-05, "loss": 2.3302345275878906, "memory(GiB)": 70.96, "step": 10890, "token_acc": 0.522633744855967, "train_speed(iter/s)": 0.67094 }, { "epoch": 0.46677520243348614, "grad_norm": 2.50978946685791, "learning_rate": 9.786732328360189e-05, "loss": 2.1946834564208983, "memory(GiB)": 70.96, "step": 10895, "token_acc": 0.5359712230215827, "train_speed(iter/s)": 0.670972 }, { "epoch": 0.4669894177627351, "grad_norm": 3.179630994796753, "learning_rate": 9.786537833285385e-05, "loss": 2.5887964248657225, "memory(GiB)": 70.96, "step": 10900, "token_acc": 0.45993031358885017, "train_speed(iter/s)": 0.670933 }, { "epoch": 0.46720363309198404, "grad_norm": 3.1275086402893066, "learning_rate": 9.786343251497956e-05, "loss": 2.6077152252197267, "memory(GiB)": 70.96, "step": 10905, "token_acc": 0.4743202416918429, "train_speed(iter/s)": 0.670935 }, { "epoch": 0.467417848421233, "grad_norm": 3.1334383487701416, "learning_rate": 9.786148583001431e-05, "loss": 2.745540428161621, "memory(GiB)": 70.96, "step": 10910, "token_acc": 0.4305177111716621, "train_speed(iter/s)": 0.670969 }, { "epoch": 0.467632063750482, "grad_norm": 3.0224080085754395, "learning_rate": 9.785953827799332e-05, "loss": 2.4191825866699217, "memory(GiB)": 70.96, "step": 10915, "token_acc": 0.49415204678362573, "train_speed(iter/s)": 0.670987 }, { "epoch": 0.4678462790797309, "grad_norm": 2.5128729343414307, "learning_rate": 9.785758985895192e-05, "loss": 2.3306774139404296, "memory(GiB)": 70.96, "step": 10920, "token_acc": 0.49491525423728816, "train_speed(iter/s)": 0.67099 }, { "epoch": 0.4680604944089799, "grad_norm": 4.351485729217529, "learning_rate": 9.785564057292538e-05, "loss": 2.3570146560668945, "memory(GiB)": 70.96, "step": 10925, "token_acc": 0.49809885931558934, "train_speed(iter/s)": 0.671014 }, { "epoch": 0.4682747097382289, "grad_norm": 3.6385915279388428, "learning_rate": 9.785369041994904e-05, "loss": 2.431440734863281, "memory(GiB)": 70.96, "step": 10930, "token_acc": 0.4935897435897436, "train_speed(iter/s)": 0.67108 }, { "epoch": 0.4684889250674778, "grad_norm": 3.9105401039123535, "learning_rate": 9.785173940005816e-05, "loss": 2.39694881439209, "memory(GiB)": 70.96, "step": 10935, "token_acc": 0.49266862170087977, "train_speed(iter/s)": 0.671004 }, { "epoch": 0.4687031403967268, "grad_norm": 2.9918272495269775, "learning_rate": 9.784978751328817e-05, "loss": 2.470682907104492, "memory(GiB)": 70.96, "step": 10940, "token_acc": 0.49393939393939396, "train_speed(iter/s)": 0.671036 }, { "epoch": 0.46891735572597576, "grad_norm": 5.589733600616455, "learning_rate": 9.784783475967438e-05, "loss": 2.6905038833618162, "memory(GiB)": 70.96, "step": 10945, "token_acc": 0.4339622641509434, "train_speed(iter/s)": 0.671052 }, { "epoch": 0.46913157105522474, "grad_norm": 3.6536457538604736, "learning_rate": 9.784588113925218e-05, "loss": 2.018789863586426, "memory(GiB)": 70.96, "step": 10950, "token_acc": 0.541958041958042, "train_speed(iter/s)": 0.671123 }, { "epoch": 0.46934578638447366, "grad_norm": 3.1465446949005127, "learning_rate": 9.784392665205695e-05, "loss": 2.6836212158203123, "memory(GiB)": 70.96, "step": 10955, "token_acc": 0.47129909365558914, "train_speed(iter/s)": 0.671043 }, { "epoch": 0.46956000171372264, "grad_norm": 4.094723224639893, "learning_rate": 9.784197129812411e-05, "loss": 2.1817134857177733, "memory(GiB)": 70.96, "step": 10960, "token_acc": 0.5503875968992248, "train_speed(iter/s)": 0.671012 }, { "epoch": 0.4697742170429716, "grad_norm": 3.3635685443878174, "learning_rate": 9.78400150774891e-05, "loss": 2.3056312561035157, "memory(GiB)": 70.96, "step": 10965, "token_acc": 0.47706422018348627, "train_speed(iter/s)": 0.670988 }, { "epoch": 0.46998843237222054, "grad_norm": 3.194026231765747, "learning_rate": 9.783805799018733e-05, "loss": 2.3683853149414062, "memory(GiB)": 70.96, "step": 10970, "token_acc": 0.4863013698630137, "train_speed(iter/s)": 0.671027 }, { "epoch": 0.4702026477014695, "grad_norm": 3.621450662612915, "learning_rate": 9.783610003625425e-05, "loss": 2.437575912475586, "memory(GiB)": 70.96, "step": 10975, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.67109 }, { "epoch": 0.4704168630307185, "grad_norm": 4.147749423980713, "learning_rate": 9.783414121572536e-05, "loss": 2.439041519165039, "memory(GiB)": 70.96, "step": 10980, "token_acc": 0.47297297297297297, "train_speed(iter/s)": 0.671013 }, { "epoch": 0.4706310783599674, "grad_norm": 4.286925315856934, "learning_rate": 9.783218152863611e-05, "loss": 2.274593162536621, "memory(GiB)": 70.96, "step": 10985, "token_acc": 0.5092936802973977, "train_speed(iter/s)": 0.671002 }, { "epoch": 0.4708452936892164, "grad_norm": 4.465156555175781, "learning_rate": 9.783022097502204e-05, "loss": 2.3895387649536133, "memory(GiB)": 70.96, "step": 10990, "token_acc": 0.4929078014184397, "train_speed(iter/s)": 0.670998 }, { "epoch": 0.4710595090184654, "grad_norm": 5.350874900817871, "learning_rate": 9.782825955491865e-05, "loss": 2.3263729095458983, "memory(GiB)": 70.96, "step": 10995, "token_acc": 0.5391304347826087, "train_speed(iter/s)": 0.670977 }, { "epoch": 0.4712737243477143, "grad_norm": 3.7932639122009277, "learning_rate": 9.782629726836146e-05, "loss": 2.3303646087646483, "memory(GiB)": 70.96, "step": 11000, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.671016 }, { "epoch": 0.4712737243477143, "eval_loss": 2.0288000106811523, "eval_runtime": 16.9199, "eval_samples_per_second": 5.91, "eval_steps_per_second": 5.91, "eval_token_acc": 0.5129682997118156, "step": 11000 }, { "epoch": 0.4714879396769633, "grad_norm": 5.008099555969238, "learning_rate": 9.782433411538604e-05, "loss": 2.475352478027344, "memory(GiB)": 70.96, "step": 11005, "token_acc": 0.5068205666316894, "train_speed(iter/s)": 0.670258 }, { "epoch": 0.47170215500621226, "grad_norm": 3.2325427532196045, "learning_rate": 9.782237009602793e-05, "loss": 2.3267135620117188, "memory(GiB)": 70.96, "step": 11010, "token_acc": 0.5097276264591439, "train_speed(iter/s)": 0.670226 }, { "epoch": 0.4719163703354612, "grad_norm": 4.752223491668701, "learning_rate": 9.782040521032274e-05, "loss": 2.518596649169922, "memory(GiB)": 70.96, "step": 11015, "token_acc": 0.47232472324723246, "train_speed(iter/s)": 0.670252 }, { "epoch": 0.47213058566471017, "grad_norm": 3.3004212379455566, "learning_rate": 9.781843945830604e-05, "loss": 2.3534284591674806, "memory(GiB)": 70.96, "step": 11020, "token_acc": 0.5082508250825083, "train_speed(iter/s)": 0.670304 }, { "epoch": 0.47234480099395915, "grad_norm": 3.3478143215179443, "learning_rate": 9.781647284001347e-05, "loss": 2.5016462326049806, "memory(GiB)": 70.96, "step": 11025, "token_acc": 0.49498327759197325, "train_speed(iter/s)": 0.670334 }, { "epoch": 0.47255901632320807, "grad_norm": 4.939833641052246, "learning_rate": 9.781450535548063e-05, "loss": 2.197698211669922, "memory(GiB)": 70.96, "step": 11030, "token_acc": 0.5080645161290323, "train_speed(iter/s)": 0.670404 }, { "epoch": 0.47277323165245705, "grad_norm": 3.469306468963623, "learning_rate": 9.781253700474317e-05, "loss": 2.6934478759765623, "memory(GiB)": 70.96, "step": 11035, "token_acc": 0.4485049833887043, "train_speed(iter/s)": 0.670453 }, { "epoch": 0.47298744698170603, "grad_norm": 3.126739263534546, "learning_rate": 9.781056778783675e-05, "loss": 2.4143470764160155, "memory(GiB)": 70.96, "step": 11040, "token_acc": 0.4793103448275862, "train_speed(iter/s)": 0.67047 }, { "epoch": 0.47320166231095495, "grad_norm": 3.6309328079223633, "learning_rate": 9.780859770479703e-05, "loss": 2.543191146850586, "memory(GiB)": 70.96, "step": 11045, "token_acc": 0.45787545787545786, "train_speed(iter/s)": 0.670492 }, { "epoch": 0.47341587764020393, "grad_norm": 2.980456829071045, "learning_rate": 9.780662675565972e-05, "loss": 2.272580146789551, "memory(GiB)": 70.96, "step": 11050, "token_acc": 0.511400651465798, "train_speed(iter/s)": 0.670526 }, { "epoch": 0.4736300929694529, "grad_norm": 3.264503002166748, "learning_rate": 9.780465494046054e-05, "loss": 2.272992706298828, "memory(GiB)": 70.96, "step": 11055, "token_acc": 0.5350553505535055, "train_speed(iter/s)": 0.67043 }, { "epoch": 0.47384430829870183, "grad_norm": 3.7948694229125977, "learning_rate": 9.780268225923517e-05, "loss": 2.5330474853515623, "memory(GiB)": 70.96, "step": 11060, "token_acc": 0.4570446735395189, "train_speed(iter/s)": 0.670429 }, { "epoch": 0.4740585236279508, "grad_norm": 5.409688949584961, "learning_rate": 9.780070871201939e-05, "loss": 2.4277162551879883, "memory(GiB)": 70.96, "step": 11065, "token_acc": 0.4624505928853755, "train_speed(iter/s)": 0.670405 }, { "epoch": 0.4742727389571998, "grad_norm": 3.342125654220581, "learning_rate": 9.779873429884891e-05, "loss": 2.3130260467529298, "memory(GiB)": 70.96, "step": 11070, "token_acc": 0.4835164835164835, "train_speed(iter/s)": 0.670426 }, { "epoch": 0.4744869542864487, "grad_norm": 4.014329433441162, "learning_rate": 9.779675901975953e-05, "loss": 2.842173194885254, "memory(GiB)": 70.96, "step": 11075, "token_acc": 0.4746376811594203, "train_speed(iter/s)": 0.670464 }, { "epoch": 0.4747011696156977, "grad_norm": 4.433996200561523, "learning_rate": 9.779478287478703e-05, "loss": 2.563043975830078, "memory(GiB)": 70.96, "step": 11080, "token_acc": 0.47924528301886793, "train_speed(iter/s)": 0.670536 }, { "epoch": 0.4749153849449467, "grad_norm": 3.0410661697387695, "learning_rate": 9.779280586396719e-05, "loss": 2.3749671936035157, "memory(GiB)": 70.96, "step": 11085, "token_acc": 0.5064516129032258, "train_speed(iter/s)": 0.670517 }, { "epoch": 0.4751296002741956, "grad_norm": 3.4332475662231445, "learning_rate": 9.779082798733584e-05, "loss": 2.7084012985229493, "memory(GiB)": 70.96, "step": 11090, "token_acc": 0.42244224422442245, "train_speed(iter/s)": 0.670567 }, { "epoch": 0.4753438156034446, "grad_norm": 3.3581252098083496, "learning_rate": 9.778884924492882e-05, "loss": 2.5263437271118163, "memory(GiB)": 70.96, "step": 11095, "token_acc": 0.4594594594594595, "train_speed(iter/s)": 0.670553 }, { "epoch": 0.47555803093269355, "grad_norm": 3.3327605724334717, "learning_rate": 9.778686963678195e-05, "loss": 2.172041320800781, "memory(GiB)": 70.96, "step": 11100, "token_acc": 0.5448717948717948, "train_speed(iter/s)": 0.670509 }, { "epoch": 0.4757722462619425, "grad_norm": 3.28818416595459, "learning_rate": 9.778488916293112e-05, "loss": 2.2872541427612303, "memory(GiB)": 70.96, "step": 11105, "token_acc": 0.509090909090909, "train_speed(iter/s)": 0.670528 }, { "epoch": 0.47598646159119146, "grad_norm": 4.2927937507629395, "learning_rate": 9.77829078234122e-05, "loss": 2.3000114440917967, "memory(GiB)": 70.96, "step": 11110, "token_acc": 0.5017064846416383, "train_speed(iter/s)": 0.670493 }, { "epoch": 0.47620067692044044, "grad_norm": 3.6900906562805176, "learning_rate": 9.778092561826106e-05, "loss": 2.4621603012084963, "memory(GiB)": 70.96, "step": 11115, "token_acc": 0.5015974440894568, "train_speed(iter/s)": 0.67048 }, { "epoch": 0.4764148922496894, "grad_norm": 2.8569319248199463, "learning_rate": 9.777894254751366e-05, "loss": 2.2854427337646483, "memory(GiB)": 70.96, "step": 11120, "token_acc": 0.4758842443729904, "train_speed(iter/s)": 0.670482 }, { "epoch": 0.47662910757893834, "grad_norm": 2.989565134048462, "learning_rate": 9.777695861120588e-05, "loss": 2.516901969909668, "memory(GiB)": 70.96, "step": 11125, "token_acc": 0.459375, "train_speed(iter/s)": 0.670426 }, { "epoch": 0.4768433229081873, "grad_norm": 3.3408544063568115, "learning_rate": 9.777497380937368e-05, "loss": 2.385581207275391, "memory(GiB)": 70.96, "step": 11130, "token_acc": 0.4924812030075188, "train_speed(iter/s)": 0.670473 }, { "epoch": 0.4770575382374363, "grad_norm": 3.8890154361724854, "learning_rate": 9.7772988142053e-05, "loss": 2.269021224975586, "memory(GiB)": 70.96, "step": 11135, "token_acc": 0.4942084942084942, "train_speed(iter/s)": 0.670453 }, { "epoch": 0.4772717535666852, "grad_norm": 5.429127216339111, "learning_rate": 9.777100160927983e-05, "loss": 2.4159021377563477, "memory(GiB)": 70.96, "step": 11140, "token_acc": 0.5055762081784386, "train_speed(iter/s)": 0.670514 }, { "epoch": 0.4774859688959342, "grad_norm": 3.3032171726226807, "learning_rate": 9.776901421109017e-05, "loss": 2.325882911682129, "memory(GiB)": 70.96, "step": 11145, "token_acc": 0.5436241610738255, "train_speed(iter/s)": 0.670507 }, { "epoch": 0.4777001842251832, "grad_norm": 3.301853895187378, "learning_rate": 9.776702594751999e-05, "loss": 2.3286178588867186, "memory(GiB)": 70.96, "step": 11150, "token_acc": 0.48627450980392156, "train_speed(iter/s)": 0.670508 }, { "epoch": 0.4779143995544321, "grad_norm": 3.929898977279663, "learning_rate": 9.776503681860534e-05, "loss": 2.5789770126342773, "memory(GiB)": 70.96, "step": 11155, "token_acc": 0.49454545454545457, "train_speed(iter/s)": 0.670486 }, { "epoch": 0.4781286148836811, "grad_norm": 3.099777936935425, "learning_rate": 9.776304682438223e-05, "loss": 2.5424240112304686, "memory(GiB)": 70.96, "step": 11160, "token_acc": 0.5034965034965035, "train_speed(iter/s)": 0.67044 }, { "epoch": 0.47834283021293006, "grad_norm": 5.1722917556762695, "learning_rate": 9.776105596488671e-05, "loss": 2.418440246582031, "memory(GiB)": 70.96, "step": 11165, "token_acc": 0.48148148148148145, "train_speed(iter/s)": 0.670368 }, { "epoch": 0.478557045542179, "grad_norm": 3.546506881713867, "learning_rate": 9.775906424015489e-05, "loss": 2.1651948928833007, "memory(GiB)": 70.96, "step": 11170, "token_acc": 0.5017064846416383, "train_speed(iter/s)": 0.670395 }, { "epoch": 0.47877126087142796, "grad_norm": 4.127270221710205, "learning_rate": 9.775707165022279e-05, "loss": 2.403122329711914, "memory(GiB)": 70.96, "step": 11175, "token_acc": 0.5229007633587787, "train_speed(iter/s)": 0.670421 }, { "epoch": 0.47898547620067694, "grad_norm": 3.7600390911102295, "learning_rate": 9.775507819512655e-05, "loss": 2.5579586029052734, "memory(GiB)": 70.96, "step": 11180, "token_acc": 0.48253968253968255, "train_speed(iter/s)": 0.670432 }, { "epoch": 0.47919969152992586, "grad_norm": 3.312432289123535, "learning_rate": 9.775308387490227e-05, "loss": 2.729108428955078, "memory(GiB)": 70.96, "step": 11185, "token_acc": 0.46540880503144655, "train_speed(iter/s)": 0.670442 }, { "epoch": 0.47941390685917484, "grad_norm": 4.3301849365234375, "learning_rate": 9.775108868958606e-05, "loss": 2.4489410400390623, "memory(GiB)": 70.96, "step": 11190, "token_acc": 0.46905537459283386, "train_speed(iter/s)": 0.670467 }, { "epoch": 0.4796281221884238, "grad_norm": 4.976790904998779, "learning_rate": 9.774909263921412e-05, "loss": 2.8241718292236326, "memory(GiB)": 70.96, "step": 11195, "token_acc": 0.425, "train_speed(iter/s)": 0.670441 }, { "epoch": 0.47984233751767275, "grad_norm": 3.7450668811798096, "learning_rate": 9.774709572382254e-05, "loss": 2.2791942596435546, "memory(GiB)": 70.96, "step": 11200, "token_acc": 0.5361216730038023, "train_speed(iter/s)": 0.670436 }, { "epoch": 0.4800565528469217, "grad_norm": 2.817652463912964, "learning_rate": 9.774509794344756e-05, "loss": 2.1706172943115236, "memory(GiB)": 70.96, "step": 11205, "token_acc": 0.5203488372093024, "train_speed(iter/s)": 0.670468 }, { "epoch": 0.4802707681761707, "grad_norm": 3.0622715950012207, "learning_rate": 9.774309929812533e-05, "loss": 2.238755989074707, "memory(GiB)": 70.96, "step": 11210, "token_acc": 0.5291666666666667, "train_speed(iter/s)": 0.670509 }, { "epoch": 0.4804849835054196, "grad_norm": 3.259493350982666, "learning_rate": 9.774109978789207e-05, "loss": 2.6292972564697266, "memory(GiB)": 70.96, "step": 11215, "token_acc": 0.4755700325732899, "train_speed(iter/s)": 0.670517 }, { "epoch": 0.4806991988346686, "grad_norm": 3.8139588832855225, "learning_rate": 9.7739099412784e-05, "loss": 2.410477638244629, "memory(GiB)": 70.96, "step": 11220, "token_acc": 0.5032679738562091, "train_speed(iter/s)": 0.67054 }, { "epoch": 0.4809134141639176, "grad_norm": 3.4635331630706787, "learning_rate": 9.773709817283736e-05, "loss": 2.56038875579834, "memory(GiB)": 70.96, "step": 11225, "token_acc": 0.4349315068493151, "train_speed(iter/s)": 0.670609 }, { "epoch": 0.4811276294931665, "grad_norm": 4.089176177978516, "learning_rate": 9.773509606808842e-05, "loss": 2.416499710083008, "memory(GiB)": 70.96, "step": 11230, "token_acc": 0.4840764331210191, "train_speed(iter/s)": 0.670644 }, { "epoch": 0.4813418448224155, "grad_norm": 3.4787404537200928, "learning_rate": 9.773309309857341e-05, "loss": 2.197915458679199, "memory(GiB)": 70.96, "step": 11235, "token_acc": 0.5330578512396694, "train_speed(iter/s)": 0.670641 }, { "epoch": 0.48155606015166447, "grad_norm": 3.3253490924835205, "learning_rate": 9.773108926432865e-05, "loss": 2.2451522827148436, "memory(GiB)": 70.96, "step": 11240, "token_acc": 0.5159235668789809, "train_speed(iter/s)": 0.670643 }, { "epoch": 0.4817702754809134, "grad_norm": 2.4065685272216797, "learning_rate": 9.772908456539046e-05, "loss": 2.439311408996582, "memory(GiB)": 70.96, "step": 11245, "token_acc": 0.4906166219839142, "train_speed(iter/s)": 0.670689 }, { "epoch": 0.48198449081016237, "grad_norm": 3.5473833084106445, "learning_rate": 9.772707900179509e-05, "loss": 2.315314292907715, "memory(GiB)": 70.96, "step": 11250, "token_acc": 0.4844961240310077, "train_speed(iter/s)": 0.670624 }, { "epoch": 0.48219870613941135, "grad_norm": 3.5796360969543457, "learning_rate": 9.772507257357893e-05, "loss": 2.3012008666992188, "memory(GiB)": 70.96, "step": 11255, "token_acc": 0.5052264808362369, "train_speed(iter/s)": 0.670638 }, { "epoch": 0.48241292146866027, "grad_norm": 3.313584089279175, "learning_rate": 9.772306528077833e-05, "loss": 2.098519515991211, "memory(GiB)": 70.96, "step": 11260, "token_acc": 0.5393258426966292, "train_speed(iter/s)": 0.670625 }, { "epoch": 0.48262713679790925, "grad_norm": 5.314758777618408, "learning_rate": 9.77210571234296e-05, "loss": 2.5806922912597656, "memory(GiB)": 70.96, "step": 11265, "token_acc": 0.43986254295532645, "train_speed(iter/s)": 0.670592 }, { "epoch": 0.48284135212715823, "grad_norm": 2.761502981185913, "learning_rate": 9.771904810156917e-05, "loss": 2.3753582000732423, "memory(GiB)": 70.96, "step": 11270, "token_acc": 0.4885057471264368, "train_speed(iter/s)": 0.670601 }, { "epoch": 0.48305556745640715, "grad_norm": 3.714533567428589, "learning_rate": 9.771703821523341e-05, "loss": 2.287105941772461, "memory(GiB)": 70.96, "step": 11275, "token_acc": 0.506896551724138, "train_speed(iter/s)": 0.670567 }, { "epoch": 0.48326978278565613, "grad_norm": 2.8723106384277344, "learning_rate": 9.771502746445875e-05, "loss": 2.7451892852783204, "memory(GiB)": 70.96, "step": 11280, "token_acc": 0.4448051948051948, "train_speed(iter/s)": 0.670643 }, { "epoch": 0.4834839981149051, "grad_norm": 3.715618133544922, "learning_rate": 9.771301584928161e-05, "loss": 2.2175294876098635, "memory(GiB)": 70.96, "step": 11285, "token_acc": 0.5296610169491526, "train_speed(iter/s)": 0.670614 }, { "epoch": 0.4836982134441541, "grad_norm": 3.689887046813965, "learning_rate": 9.771100336973843e-05, "loss": 2.539676475524902, "memory(GiB)": 70.96, "step": 11290, "token_acc": 0.48220064724919093, "train_speed(iter/s)": 0.670722 }, { "epoch": 0.483912428773403, "grad_norm": 3.1056745052337646, "learning_rate": 9.770899002586567e-05, "loss": 2.3107376098632812, "memory(GiB)": 70.96, "step": 11295, "token_acc": 0.45652173913043476, "train_speed(iter/s)": 0.670698 }, { "epoch": 0.484126644102652, "grad_norm": 2.4962055683135986, "learning_rate": 9.770697581769979e-05, "loss": 2.341014862060547, "memory(GiB)": 70.96, "step": 11300, "token_acc": 0.5064102564102564, "train_speed(iter/s)": 0.670712 }, { "epoch": 0.48434085943190097, "grad_norm": 2.8055078983306885, "learning_rate": 9.770496074527729e-05, "loss": 2.472004508972168, "memory(GiB)": 70.96, "step": 11305, "token_acc": 0.4600760456273764, "train_speed(iter/s)": 0.670749 }, { "epoch": 0.4845550747611499, "grad_norm": 5.361180782318115, "learning_rate": 9.770294480863468e-05, "loss": 2.4845727920532226, "memory(GiB)": 70.96, "step": 11310, "token_acc": 0.4859437751004016, "train_speed(iter/s)": 0.670811 }, { "epoch": 0.4847692900903989, "grad_norm": 5.262938976287842, "learning_rate": 9.770092800780847e-05, "loss": 2.538748359680176, "memory(GiB)": 70.96, "step": 11315, "token_acc": 0.44964028776978415, "train_speed(iter/s)": 0.670767 }, { "epoch": 0.48498350541964785, "grad_norm": 3.6894404888153076, "learning_rate": 9.769891034283522e-05, "loss": 2.728185272216797, "memory(GiB)": 70.96, "step": 11320, "token_acc": 0.4337748344370861, "train_speed(iter/s)": 0.670788 }, { "epoch": 0.4851977207488968, "grad_norm": 2.793410539627075, "learning_rate": 9.769689181375146e-05, "loss": 2.3987430572509765, "memory(GiB)": 70.96, "step": 11325, "token_acc": 0.5, "train_speed(iter/s)": 0.670865 }, { "epoch": 0.48541193607814576, "grad_norm": 3.4134020805358887, "learning_rate": 9.769487242059374e-05, "loss": 2.6145477294921875, "memory(GiB)": 70.96, "step": 11330, "token_acc": 0.4473684210526316, "train_speed(iter/s)": 0.670916 }, { "epoch": 0.48562615140739473, "grad_norm": 6.997649669647217, "learning_rate": 9.769285216339868e-05, "loss": 2.5864402770996096, "memory(GiB)": 70.96, "step": 11335, "token_acc": 0.5019455252918288, "train_speed(iter/s)": 0.670953 }, { "epoch": 0.48584036673664366, "grad_norm": 3.655623435974121, "learning_rate": 9.769083104220286e-05, "loss": 2.6888208389282227, "memory(GiB)": 70.96, "step": 11340, "token_acc": 0.4294294294294294, "train_speed(iter/s)": 0.670991 }, { "epoch": 0.48605458206589264, "grad_norm": 3.7572827339172363, "learning_rate": 9.76888090570429e-05, "loss": 2.6821968078613283, "memory(GiB)": 70.96, "step": 11345, "token_acc": 0.48201438848920863, "train_speed(iter/s)": 0.671062 }, { "epoch": 0.4862687973951416, "grad_norm": 3.6303226947784424, "learning_rate": 9.768678620795543e-05, "loss": 2.0088485717773437, "memory(GiB)": 70.96, "step": 11350, "token_acc": 0.5381818181818182, "train_speed(iter/s)": 0.671138 }, { "epoch": 0.48648301272439054, "grad_norm": 3.984602689743042, "learning_rate": 9.768476249497709e-05, "loss": 2.548391342163086, "memory(GiB)": 70.96, "step": 11355, "token_acc": 0.44106463878326996, "train_speed(iter/s)": 0.671193 }, { "epoch": 0.4866972280536395, "grad_norm": 5.2873334884643555, "learning_rate": 9.768273791814455e-05, "loss": 2.5648191452026365, "memory(GiB)": 70.96, "step": 11360, "token_acc": 0.4673913043478261, "train_speed(iter/s)": 0.671246 }, { "epoch": 0.4869114433828885, "grad_norm": 4.323720455169678, "learning_rate": 9.768071247749448e-05, "loss": 2.6302562713623048, "memory(GiB)": 70.96, "step": 11365, "token_acc": 0.43059490084985835, "train_speed(iter/s)": 0.671299 }, { "epoch": 0.4871256587121374, "grad_norm": 4.277100563049316, "learning_rate": 9.767868617306357e-05, "loss": 2.4829029083251952, "memory(GiB)": 70.96, "step": 11370, "token_acc": 0.49019607843137253, "train_speed(iter/s)": 0.671366 }, { "epoch": 0.4873398740413864, "grad_norm": 3.2613778114318848, "learning_rate": 9.767665900488853e-05, "loss": 2.7258838653564452, "memory(GiB)": 70.96, "step": 11375, "token_acc": 0.45821325648414984, "train_speed(iter/s)": 0.671427 }, { "epoch": 0.4875540893706354, "grad_norm": 3.1743083000183105, "learning_rate": 9.76746309730061e-05, "loss": 2.575848197937012, "memory(GiB)": 70.96, "step": 11380, "token_acc": 0.47277936962750716, "train_speed(iter/s)": 0.671469 }, { "epoch": 0.4877683046998843, "grad_norm": 2.8643274307250977, "learning_rate": 9.767260207745301e-05, "loss": 2.6386093139648437, "memory(GiB)": 70.96, "step": 11385, "token_acc": 0.4375, "train_speed(iter/s)": 0.671529 }, { "epoch": 0.4879825200291333, "grad_norm": 3.88551664352417, "learning_rate": 9.767057231826601e-05, "loss": 2.3088842391967774, "memory(GiB)": 70.96, "step": 11390, "token_acc": 0.5052631578947369, "train_speed(iter/s)": 0.671445 }, { "epoch": 0.48819673535838226, "grad_norm": 4.247511863708496, "learning_rate": 9.766854169548187e-05, "loss": 2.626754379272461, "memory(GiB)": 70.96, "step": 11395, "token_acc": 0.4637223974763407, "train_speed(iter/s)": 0.671419 }, { "epoch": 0.4884109506876312, "grad_norm": 4.046696186065674, "learning_rate": 9.766651020913739e-05, "loss": 2.424673652648926, "memory(GiB)": 70.96, "step": 11400, "token_acc": 0.4562043795620438, "train_speed(iter/s)": 0.671417 }, { "epoch": 0.48862516601688016, "grad_norm": 4.507302761077881, "learning_rate": 9.766447785926936e-05, "loss": 2.787455177307129, "memory(GiB)": 70.96, "step": 11405, "token_acc": 0.4343065693430657, "train_speed(iter/s)": 0.671353 }, { "epoch": 0.48883938134612914, "grad_norm": 3.4490091800689697, "learning_rate": 9.766244464591459e-05, "loss": 2.4201017379760743, "memory(GiB)": 70.96, "step": 11410, "token_acc": 0.46503496503496505, "train_speed(iter/s)": 0.671363 }, { "epoch": 0.48905359667537807, "grad_norm": 3.9869205951690674, "learning_rate": 9.766041056910993e-05, "loss": 2.501797103881836, "memory(GiB)": 70.96, "step": 11415, "token_acc": 0.49206349206349204, "train_speed(iter/s)": 0.671316 }, { "epoch": 0.48926781200462705, "grad_norm": 2.652609348297119, "learning_rate": 9.765837562889221e-05, "loss": 2.6695301055908205, "memory(GiB)": 70.96, "step": 11420, "token_acc": 0.43717277486910994, "train_speed(iter/s)": 0.671359 }, { "epoch": 0.489482027333876, "grad_norm": 3.4380362033843994, "learning_rate": 9.765633982529833e-05, "loss": 2.3222227096557617, "memory(GiB)": 70.96, "step": 11425, "token_acc": 0.5368421052631579, "train_speed(iter/s)": 0.671464 }, { "epoch": 0.48969624266312495, "grad_norm": 3.352386951446533, "learning_rate": 9.765430315836513e-05, "loss": 2.6127304077148437, "memory(GiB)": 70.96, "step": 11430, "token_acc": 0.45182724252491696, "train_speed(iter/s)": 0.671497 }, { "epoch": 0.4899104579923739, "grad_norm": 3.624906539916992, "learning_rate": 9.765226562812955e-05, "loss": 2.3564516067504884, "memory(GiB)": 70.96, "step": 11435, "token_acc": 0.517799352750809, "train_speed(iter/s)": 0.671544 }, { "epoch": 0.4901246733216229, "grad_norm": 3.4108266830444336, "learning_rate": 9.765022723462844e-05, "loss": 2.4818511962890626, "memory(GiB)": 70.96, "step": 11440, "token_acc": 0.44176706827309237, "train_speed(iter/s)": 0.671609 }, { "epoch": 0.49033888865087183, "grad_norm": 3.746577501296997, "learning_rate": 9.76481879778988e-05, "loss": 2.430887985229492, "memory(GiB)": 70.96, "step": 11445, "token_acc": 0.478134110787172, "train_speed(iter/s)": 0.671558 }, { "epoch": 0.4905531039801208, "grad_norm": 3.8634214401245117, "learning_rate": 9.764614785797752e-05, "loss": 2.108672523498535, "memory(GiB)": 70.96, "step": 11450, "token_acc": 0.5205992509363296, "train_speed(iter/s)": 0.671506 }, { "epoch": 0.4907673193093698, "grad_norm": 3.339914083480835, "learning_rate": 9.764410687490158e-05, "loss": 2.5074909210205076, "memory(GiB)": 70.96, "step": 11455, "token_acc": 0.49169435215946844, "train_speed(iter/s)": 0.671579 }, { "epoch": 0.49098153463861877, "grad_norm": 2.5724902153015137, "learning_rate": 9.764206502870793e-05, "loss": 2.3733083724975588, "memory(GiB)": 70.96, "step": 11460, "token_acc": 0.4788273615635179, "train_speed(iter/s)": 0.671524 }, { "epoch": 0.4911957499678677, "grad_norm": 3.685976982116699, "learning_rate": 9.764002231943361e-05, "loss": 2.4384929656982424, "memory(GiB)": 70.96, "step": 11465, "token_acc": 0.5047619047619047, "train_speed(iter/s)": 0.671606 }, { "epoch": 0.49140996529711667, "grad_norm": 3.529841661453247, "learning_rate": 9.76379787471156e-05, "loss": 2.640331268310547, "memory(GiB)": 70.96, "step": 11470, "token_acc": 0.47491638795986624, "train_speed(iter/s)": 0.671663 }, { "epoch": 0.49162418062636565, "grad_norm": 2.805180549621582, "learning_rate": 9.76359343117909e-05, "loss": 2.1975221633911133, "memory(GiB)": 70.96, "step": 11475, "token_acc": 0.5282392026578073, "train_speed(iter/s)": 0.671683 }, { "epoch": 0.49183839595561457, "grad_norm": 4.036559104919434, "learning_rate": 9.763388901349655e-05, "loss": 2.7198554992675783, "memory(GiB)": 70.96, "step": 11480, "token_acc": 0.46229508196721314, "train_speed(iter/s)": 0.671741 }, { "epoch": 0.49205261128486355, "grad_norm": 3.3460164070129395, "learning_rate": 9.763184285226963e-05, "loss": 2.4471660614013673, "memory(GiB)": 70.96, "step": 11485, "token_acc": 0.48314606741573035, "train_speed(iter/s)": 0.671757 }, { "epoch": 0.49226682661411253, "grad_norm": 4.41947078704834, "learning_rate": 9.76297958281472e-05, "loss": 2.5696426391601563, "memory(GiB)": 70.96, "step": 11490, "token_acc": 0.47307692307692306, "train_speed(iter/s)": 0.671783 }, { "epoch": 0.49248104194336145, "grad_norm": 3.560063123703003, "learning_rate": 9.762774794116634e-05, "loss": 2.7335336685180662, "memory(GiB)": 70.96, "step": 11495, "token_acc": 0.4584837545126354, "train_speed(iter/s)": 0.671814 }, { "epoch": 0.49269525727261043, "grad_norm": 3.4642293453216553, "learning_rate": 9.762569919136414e-05, "loss": 2.2819475173950194, "memory(GiB)": 70.96, "step": 11500, "token_acc": 0.5058365758754864, "train_speed(iter/s)": 0.671854 }, { "epoch": 0.49269525727261043, "eval_loss": 1.98979914188385, "eval_runtime": 16.7683, "eval_samples_per_second": 5.964, "eval_steps_per_second": 5.964, "eval_token_acc": 0.4969097651421508, "step": 11500 }, { "epoch": 0.4929094726018594, "grad_norm": 3.0996620655059814, "learning_rate": 9.762364957877773e-05, "loss": 2.7822025299072264, "memory(GiB)": 70.96, "step": 11505, "token_acc": 0.4817127564674398, "train_speed(iter/s)": 0.671129 }, { "epoch": 0.49312368793110833, "grad_norm": 7.469452857971191, "learning_rate": 9.762159910344421e-05, "loss": 2.2955625534057615, "memory(GiB)": 70.96, "step": 11510, "token_acc": 0.5017064846416383, "train_speed(iter/s)": 0.671175 }, { "epoch": 0.4933379032603573, "grad_norm": 3.866818904876709, "learning_rate": 9.761954776540078e-05, "loss": 2.56738166809082, "memory(GiB)": 70.96, "step": 11515, "token_acc": 0.5, "train_speed(iter/s)": 0.671137 }, { "epoch": 0.4935521185896063, "grad_norm": 3.2097911834716797, "learning_rate": 9.761749556468455e-05, "loss": 2.3960693359375, "memory(GiB)": 70.96, "step": 11520, "token_acc": 0.4963768115942029, "train_speed(iter/s)": 0.671113 }, { "epoch": 0.4937663339188552, "grad_norm": 3.0682806968688965, "learning_rate": 9.761544250133273e-05, "loss": 2.643154335021973, "memory(GiB)": 70.96, "step": 11525, "token_acc": 0.4131944444444444, "train_speed(iter/s)": 0.67119 }, { "epoch": 0.4939805492481042, "grad_norm": 3.004641532897949, "learning_rate": 9.76133885753825e-05, "loss": 2.44574089050293, "memory(GiB)": 70.96, "step": 11530, "token_acc": 0.47674418604651164, "train_speed(iter/s)": 0.671145 }, { "epoch": 0.4941947645773532, "grad_norm": 3.2730555534362793, "learning_rate": 9.761133378687108e-05, "loss": 2.5174175262451173, "memory(GiB)": 70.96, "step": 11535, "token_acc": 0.4652014652014652, "train_speed(iter/s)": 0.671204 }, { "epoch": 0.4944089799066021, "grad_norm": 2.412814140319824, "learning_rate": 9.760927813583567e-05, "loss": 2.2270946502685547, "memory(GiB)": 70.96, "step": 11540, "token_acc": 0.5302013422818792, "train_speed(iter/s)": 0.671241 }, { "epoch": 0.4946231952358511, "grad_norm": 3.481163740158081, "learning_rate": 9.760722162231352e-05, "loss": 2.338092231750488, "memory(GiB)": 70.96, "step": 11545, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.6712 }, { "epoch": 0.49483741056510006, "grad_norm": 4.35977840423584, "learning_rate": 9.760516424634192e-05, "loss": 2.5853553771972657, "memory(GiB)": 70.96, "step": 11550, "token_acc": 0.4788732394366197, "train_speed(iter/s)": 0.671268 }, { "epoch": 0.495051625894349, "grad_norm": 3.10810923576355, "learning_rate": 9.76031060079581e-05, "loss": 2.36778564453125, "memory(GiB)": 70.96, "step": 11555, "token_acc": 0.5045592705167173, "train_speed(iter/s)": 0.67134 }, { "epoch": 0.49526584122359796, "grad_norm": 3.0141046047210693, "learning_rate": 9.760104690719935e-05, "loss": 2.55081787109375, "memory(GiB)": 70.96, "step": 11560, "token_acc": 0.44660194174757284, "train_speed(iter/s)": 0.671371 }, { "epoch": 0.49548005655284694, "grad_norm": 3.7118265628814697, "learning_rate": 9.759898694410299e-05, "loss": 2.785566711425781, "memory(GiB)": 70.96, "step": 11565, "token_acc": 0.4234527687296417, "train_speed(iter/s)": 0.67136 }, { "epoch": 0.49569427188209586, "grad_norm": 3.7362916469573975, "learning_rate": 9.759692611870632e-05, "loss": 2.4925086975097654, "memory(GiB)": 70.96, "step": 11570, "token_acc": 0.47985347985347987, "train_speed(iter/s)": 0.671433 }, { "epoch": 0.49590848721134484, "grad_norm": 4.35245943069458, "learning_rate": 9.759486443104668e-05, "loss": 2.518222999572754, "memory(GiB)": 70.96, "step": 11575, "token_acc": 0.47854785478547857, "train_speed(iter/s)": 0.67145 }, { "epoch": 0.4961227025405938, "grad_norm": 3.4143974781036377, "learning_rate": 9.759280188116144e-05, "loss": 2.126613235473633, "memory(GiB)": 70.96, "step": 11580, "token_acc": 0.5361216730038023, "train_speed(iter/s)": 0.67146 }, { "epoch": 0.49633691786984274, "grad_norm": 3.4673383235931396, "learning_rate": 9.759073846908796e-05, "loss": 2.2209230422973634, "memory(GiB)": 70.96, "step": 11585, "token_acc": 0.48717948717948717, "train_speed(iter/s)": 0.671495 }, { "epoch": 0.4965511331990917, "grad_norm": 3.5280873775482178, "learning_rate": 9.758867419486357e-05, "loss": 2.381216812133789, "memory(GiB)": 70.96, "step": 11590, "token_acc": 0.5266666666666666, "train_speed(iter/s)": 0.671453 }, { "epoch": 0.4967653485283407, "grad_norm": 3.6560542583465576, "learning_rate": 9.758660905852574e-05, "loss": 2.449675369262695, "memory(GiB)": 70.96, "step": 11595, "token_acc": 0.47384615384615386, "train_speed(iter/s)": 0.671408 }, { "epoch": 0.4969795638575896, "grad_norm": 4.2714762687683105, "learning_rate": 9.758454306011182e-05, "loss": 2.479916000366211, "memory(GiB)": 70.96, "step": 11600, "token_acc": 0.48363636363636364, "train_speed(iter/s)": 0.671436 }, { "epoch": 0.4971937791868386, "grad_norm": 4.463154315948486, "learning_rate": 9.758247619965928e-05, "loss": 2.5692766189575194, "memory(GiB)": 70.96, "step": 11605, "token_acc": 0.4276094276094276, "train_speed(iter/s)": 0.671464 }, { "epoch": 0.4974079945160876, "grad_norm": 3.511709690093994, "learning_rate": 9.758040847720555e-05, "loss": 2.174623489379883, "memory(GiB)": 70.96, "step": 11610, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.671505 }, { "epoch": 0.4976222098453365, "grad_norm": 3.84507155418396, "learning_rate": 9.757833989278809e-05, "loss": 2.545375442504883, "memory(GiB)": 70.96, "step": 11615, "token_acc": 0.4597014925373134, "train_speed(iter/s)": 0.671521 }, { "epoch": 0.4978364251745855, "grad_norm": 3.6996641159057617, "learning_rate": 9.757627044644435e-05, "loss": 2.554524612426758, "memory(GiB)": 70.96, "step": 11620, "token_acc": 0.4495114006514658, "train_speed(iter/s)": 0.671564 }, { "epoch": 0.49805064050383446, "grad_norm": 3.0332226753234863, "learning_rate": 9.757420013821186e-05, "loss": 2.6606483459472656, "memory(GiB)": 70.96, "step": 11625, "token_acc": 0.43508771929824563, "train_speed(iter/s)": 0.671643 }, { "epoch": 0.49826485583308344, "grad_norm": 2.7529006004333496, "learning_rate": 9.75721289681281e-05, "loss": 2.6505611419677733, "memory(GiB)": 70.96, "step": 11630, "token_acc": 0.4840764331210191, "train_speed(iter/s)": 0.671707 }, { "epoch": 0.49847907116233237, "grad_norm": 3.0629796981811523, "learning_rate": 9.75700569362306e-05, "loss": 2.2996726989746095, "memory(GiB)": 70.96, "step": 11635, "token_acc": 0.5015873015873016, "train_speed(iter/s)": 0.67165 }, { "epoch": 0.49869328649158134, "grad_norm": 2.75620436668396, "learning_rate": 9.75679840425569e-05, "loss": 2.182806968688965, "memory(GiB)": 70.96, "step": 11640, "token_acc": 0.5067114093959731, "train_speed(iter/s)": 0.671619 }, { "epoch": 0.4989075018208303, "grad_norm": 3.493074417114258, "learning_rate": 9.756591028714452e-05, "loss": 2.199009132385254, "memory(GiB)": 70.96, "step": 11645, "token_acc": 0.52734375, "train_speed(iter/s)": 0.671634 }, { "epoch": 0.49912171715007925, "grad_norm": 2.70379900932312, "learning_rate": 9.756383567003107e-05, "loss": 2.6208141326904295, "memory(GiB)": 70.96, "step": 11650, "token_acc": 0.4470899470899471, "train_speed(iter/s)": 0.671618 }, { "epoch": 0.4993359324793282, "grad_norm": 3.6728596687316895, "learning_rate": 9.756176019125413e-05, "loss": 2.2626819610595703, "memory(GiB)": 70.96, "step": 11655, "token_acc": 0.4793650793650794, "train_speed(iter/s)": 0.671561 }, { "epoch": 0.4995501478085772, "grad_norm": 3.8879878520965576, "learning_rate": 9.755968385085128e-05, "loss": 2.672029495239258, "memory(GiB)": 70.96, "step": 11660, "token_acc": 0.4421052631578947, "train_speed(iter/s)": 0.671548 }, { "epoch": 0.49976436313782613, "grad_norm": 3.260103225708008, "learning_rate": 9.755760664886014e-05, "loss": 2.3320512771606445, "memory(GiB)": 70.96, "step": 11665, "token_acc": 0.49019607843137253, "train_speed(iter/s)": 0.671521 }, { "epoch": 0.4999785784670751, "grad_norm": 3.1637163162231445, "learning_rate": 9.755552858531833e-05, "loss": 2.2825332641601563, "memory(GiB)": 70.96, "step": 11670, "token_acc": 0.49387755102040815, "train_speed(iter/s)": 0.671565 }, { "epoch": 0.5001927937963241, "grad_norm": 4.2561163902282715, "learning_rate": 9.755344966026354e-05, "loss": 2.414881134033203, "memory(GiB)": 70.96, "step": 11675, "token_acc": 0.5051546391752577, "train_speed(iter/s)": 0.671616 }, { "epoch": 0.500407009125573, "grad_norm": 3.005034923553467, "learning_rate": 9.755136987373338e-05, "loss": 2.6874128341674806, "memory(GiB)": 70.96, "step": 11680, "token_acc": 0.4954954954954955, "train_speed(iter/s)": 0.671531 }, { "epoch": 0.5006212244548219, "grad_norm": 5.192647457122803, "learning_rate": 9.754928922576555e-05, "loss": 2.362274742126465, "memory(GiB)": 70.96, "step": 11685, "token_acc": 0.46551724137931033, "train_speed(iter/s)": 0.671497 }, { "epoch": 0.500835439784071, "grad_norm": 4.370744705200195, "learning_rate": 9.754720771639773e-05, "loss": 2.5067775726318358, "memory(GiB)": 70.96, "step": 11690, "token_acc": 0.45864661654135336, "train_speed(iter/s)": 0.671514 }, { "epoch": 0.5010496551133199, "grad_norm": 3.598534345626831, "learning_rate": 9.754512534566767e-05, "loss": 2.476156997680664, "memory(GiB)": 70.96, "step": 11695, "token_acc": 0.46825396825396826, "train_speed(iter/s)": 0.671553 }, { "epoch": 0.5012638704425688, "grad_norm": 3.3036139011383057, "learning_rate": 9.754304211361304e-05, "loss": 2.186275100708008, "memory(GiB)": 70.96, "step": 11700, "token_acc": 0.46875, "train_speed(iter/s)": 0.671539 }, { "epoch": 0.5014780857718178, "grad_norm": 4.2818284034729, "learning_rate": 9.754095802027161e-05, "loss": 2.7241695404052733, "memory(GiB)": 70.96, "step": 11705, "token_acc": 0.4110787172011662, "train_speed(iter/s)": 0.671561 }, { "epoch": 0.5016923011010668, "grad_norm": 4.107313632965088, "learning_rate": 9.753887306568113e-05, "loss": 2.469835662841797, "memory(GiB)": 70.96, "step": 11710, "token_acc": 0.4699248120300752, "train_speed(iter/s)": 0.671562 }, { "epoch": 0.5019065164303158, "grad_norm": 3.038407802581787, "learning_rate": 9.753678724987936e-05, "loss": 2.6726047515869142, "memory(GiB)": 70.96, "step": 11715, "token_acc": 0.4378698224852071, "train_speed(iter/s)": 0.671577 }, { "epoch": 0.5021207317595647, "grad_norm": 4.07322883605957, "learning_rate": 9.75347005729041e-05, "loss": 2.330135726928711, "memory(GiB)": 70.96, "step": 11720, "token_acc": 0.5022026431718062, "train_speed(iter/s)": 0.67152 }, { "epoch": 0.5023349470888137, "grad_norm": 3.404280662536621, "learning_rate": 9.753261303479315e-05, "loss": 2.3159816741943358, "memory(GiB)": 70.96, "step": 11725, "token_acc": 0.4981684981684982, "train_speed(iter/s)": 0.671522 }, { "epoch": 0.5025491624180627, "grad_norm": 3.364905834197998, "learning_rate": 9.753052463558432e-05, "loss": 2.3998672485351564, "memory(GiB)": 70.96, "step": 11730, "token_acc": 0.5095785440613027, "train_speed(iter/s)": 0.671556 }, { "epoch": 0.5027633777473116, "grad_norm": 3.5712087154388428, "learning_rate": 9.752843537531546e-05, "loss": 2.1709514617919923, "memory(GiB)": 70.96, "step": 11735, "token_acc": 0.5192307692307693, "train_speed(iter/s)": 0.671585 }, { "epoch": 0.5029775930765605, "grad_norm": 3.8973891735076904, "learning_rate": 9.75263452540244e-05, "loss": 2.561374473571777, "memory(GiB)": 70.96, "step": 11740, "token_acc": 0.4440993788819876, "train_speed(iter/s)": 0.671636 }, { "epoch": 0.5031918084058096, "grad_norm": 3.2387967109680176, "learning_rate": 9.752425427174901e-05, "loss": 2.6258686065673826, "memory(GiB)": 70.96, "step": 11745, "token_acc": 0.4575645756457565, "train_speed(iter/s)": 0.671722 }, { "epoch": 0.5034060237350585, "grad_norm": 4.214389324188232, "learning_rate": 9.752216242852719e-05, "loss": 2.5318254470825194, "memory(GiB)": 70.96, "step": 11750, "token_acc": 0.4895833333333333, "train_speed(iter/s)": 0.671746 }, { "epoch": 0.5036202390643074, "grad_norm": 3.114840269088745, "learning_rate": 9.75200697243968e-05, "loss": 2.4871505737304687, "memory(GiB)": 70.96, "step": 11755, "token_acc": 0.4384057971014493, "train_speed(iter/s)": 0.671806 }, { "epoch": 0.5038344543935565, "grad_norm": 3.0748908519744873, "learning_rate": 9.751797615939577e-05, "loss": 2.955985450744629, "memory(GiB)": 70.96, "step": 11760, "token_acc": 0.45454545454545453, "train_speed(iter/s)": 0.671804 }, { "epoch": 0.5040486697228054, "grad_norm": 3.239408254623413, "learning_rate": 9.751588173356204e-05, "loss": 2.1640485763549804, "memory(GiB)": 70.96, "step": 11765, "token_acc": 0.540785498489426, "train_speed(iter/s)": 0.671811 }, { "epoch": 0.5042628850520543, "grad_norm": 3.623112678527832, "learning_rate": 9.751378644693352e-05, "loss": 2.601653289794922, "memory(GiB)": 70.96, "step": 11770, "token_acc": 0.4664429530201342, "train_speed(iter/s)": 0.671788 }, { "epoch": 0.5044771003813033, "grad_norm": 3.7787749767303467, "learning_rate": 9.75116902995482e-05, "loss": 2.3172653198242186, "memory(GiB)": 70.96, "step": 11775, "token_acc": 0.4962121212121212, "train_speed(iter/s)": 0.671783 }, { "epoch": 0.5046913157105523, "grad_norm": 2.841041326522827, "learning_rate": 9.750959329144404e-05, "loss": 2.308502197265625, "memory(GiB)": 70.96, "step": 11780, "token_acc": 0.5378787878787878, "train_speed(iter/s)": 0.671846 }, { "epoch": 0.5049055310398012, "grad_norm": 4.76384973526001, "learning_rate": 9.750749542265902e-05, "loss": 2.286239814758301, "memory(GiB)": 70.96, "step": 11785, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.671853 }, { "epoch": 0.5051197463690502, "grad_norm": 3.3599021434783936, "learning_rate": 9.750539669323117e-05, "loss": 2.419381332397461, "memory(GiB)": 70.96, "step": 11790, "token_acc": 0.4627831715210356, "train_speed(iter/s)": 0.671851 }, { "epoch": 0.5053339616982991, "grad_norm": 4.607595920562744, "learning_rate": 9.75032971031985e-05, "loss": 2.3142301559448244, "memory(GiB)": 70.96, "step": 11795, "token_acc": 0.5610687022900763, "train_speed(iter/s)": 0.671886 }, { "epoch": 0.5055481770275481, "grad_norm": 3.1492886543273926, "learning_rate": 9.750119665259903e-05, "loss": 2.6179967880249024, "memory(GiB)": 70.96, "step": 11800, "token_acc": 0.451505016722408, "train_speed(iter/s)": 0.671888 }, { "epoch": 0.5057623923567971, "grad_norm": 2.9072799682617188, "learning_rate": 9.749909534147081e-05, "loss": 2.3408893585205077, "memory(GiB)": 70.96, "step": 11805, "token_acc": 0.5241635687732342, "train_speed(iter/s)": 0.671896 }, { "epoch": 0.505976607686046, "grad_norm": 4.864026069641113, "learning_rate": 9.749699316985193e-05, "loss": 2.5260183334350588, "memory(GiB)": 70.96, "step": 11810, "token_acc": 0.49480968858131485, "train_speed(iter/s)": 0.671888 }, { "epoch": 0.5061908230152949, "grad_norm": 3.1911208629608154, "learning_rate": 9.749489013778047e-05, "loss": 2.737284469604492, "memory(GiB)": 70.96, "step": 11815, "token_acc": 0.4440677966101695, "train_speed(iter/s)": 0.671904 }, { "epoch": 0.506405038344544, "grad_norm": 3.846933364868164, "learning_rate": 9.749278624529452e-05, "loss": 2.5153583526611327, "memory(GiB)": 70.96, "step": 11820, "token_acc": 0.4634146341463415, "train_speed(iter/s)": 0.671979 }, { "epoch": 0.5066192536737929, "grad_norm": 3.431077241897583, "learning_rate": 9.749068149243219e-05, "loss": 2.011977195739746, "memory(GiB)": 70.96, "step": 11825, "token_acc": 0.5219123505976095, "train_speed(iter/s)": 0.671974 }, { "epoch": 0.5068334690030418, "grad_norm": 3.54508113861084, "learning_rate": 9.748857587923162e-05, "loss": 2.332306480407715, "memory(GiB)": 70.96, "step": 11830, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.671975 }, { "epoch": 0.5070476843322909, "grad_norm": 3.8444228172302246, "learning_rate": 9.748646940573094e-05, "loss": 2.5568960189819334, "memory(GiB)": 70.96, "step": 11835, "token_acc": 0.48, "train_speed(iter/s)": 0.671996 }, { "epoch": 0.5072618996615398, "grad_norm": 3.755079746246338, "learning_rate": 9.748436207196834e-05, "loss": 2.6938024520874024, "memory(GiB)": 70.96, "step": 11840, "token_acc": 0.46303501945525294, "train_speed(iter/s)": 0.671934 }, { "epoch": 0.5074761149907887, "grad_norm": 5.010995864868164, "learning_rate": 9.748225387798195e-05, "loss": 2.3141338348388674, "memory(GiB)": 70.96, "step": 11845, "token_acc": 0.4944237918215613, "train_speed(iter/s)": 0.671971 }, { "epoch": 0.5076903303200377, "grad_norm": 2.8776865005493164, "learning_rate": 9.748014482381e-05, "loss": 2.805523681640625, "memory(GiB)": 70.96, "step": 11850, "token_acc": 0.4241573033707865, "train_speed(iter/s)": 0.672002 }, { "epoch": 0.5079045456492867, "grad_norm": 3.160721778869629, "learning_rate": 9.74780349094907e-05, "loss": 2.6301082611083983, "memory(GiB)": 70.96, "step": 11855, "token_acc": 0.43359375, "train_speed(iter/s)": 0.672033 }, { "epoch": 0.5081187609785356, "grad_norm": 2.874326705932617, "learning_rate": 9.747592413506224e-05, "loss": 2.52685661315918, "memory(GiB)": 70.96, "step": 11860, "token_acc": 0.4793103448275862, "train_speed(iter/s)": 0.672065 }, { "epoch": 0.5083329763077846, "grad_norm": 3.2768406867980957, "learning_rate": 9.747381250056289e-05, "loss": 2.250773811340332, "memory(GiB)": 70.96, "step": 11865, "token_acc": 0.5101214574898786, "train_speed(iter/s)": 0.672098 }, { "epoch": 0.5085471916370335, "grad_norm": 5.283932685852051, "learning_rate": 9.74717000060309e-05, "loss": 2.5849531173706053, "memory(GiB)": 70.96, "step": 11870, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.672103 }, { "epoch": 0.5087614069662825, "grad_norm": 2.8483963012695312, "learning_rate": 9.746958665150451e-05, "loss": 2.306132507324219, "memory(GiB)": 70.96, "step": 11875, "token_acc": 0.5230125523012552, "train_speed(iter/s)": 0.67212 }, { "epoch": 0.5089756222955315, "grad_norm": 2.4900388717651367, "learning_rate": 9.746747243702206e-05, "loss": 2.5632017135620115, "memory(GiB)": 70.96, "step": 11880, "token_acc": 0.49056603773584906, "train_speed(iter/s)": 0.672132 }, { "epoch": 0.5091898376247804, "grad_norm": 2.8381292819976807, "learning_rate": 9.746535736262178e-05, "loss": 2.65728645324707, "memory(GiB)": 70.96, "step": 11885, "token_acc": 0.4479166666666667, "train_speed(iter/s)": 0.672134 }, { "epoch": 0.5094040529540294, "grad_norm": 3.2776148319244385, "learning_rate": 9.746324142834205e-05, "loss": 2.6329319000244142, "memory(GiB)": 70.96, "step": 11890, "token_acc": 0.5125, "train_speed(iter/s)": 0.672156 }, { "epoch": 0.5096182682832784, "grad_norm": 3.6265859603881836, "learning_rate": 9.746112463422118e-05, "loss": 2.191189193725586, "memory(GiB)": 70.96, "step": 11895, "token_acc": 0.5048543689320388, "train_speed(iter/s)": 0.672151 }, { "epoch": 0.5098324836125273, "grad_norm": 3.798255443572998, "learning_rate": 9.74590069802975e-05, "loss": 2.4250215530395507, "memory(GiB)": 70.96, "step": 11900, "token_acc": 0.4883720930232558, "train_speed(iter/s)": 0.672071 }, { "epoch": 0.5100466989417762, "grad_norm": 3.5782434940338135, "learning_rate": 9.74568884666094e-05, "loss": 2.5564056396484376, "memory(GiB)": 70.96, "step": 11905, "token_acc": 0.47, "train_speed(iter/s)": 0.672119 }, { "epoch": 0.5102609142710253, "grad_norm": 4.446252822875977, "learning_rate": 9.745476909319524e-05, "loss": 2.521225357055664, "memory(GiB)": 70.96, "step": 11910, "token_acc": 0.458955223880597, "train_speed(iter/s)": 0.67218 }, { "epoch": 0.5104751296002742, "grad_norm": 3.4304893016815186, "learning_rate": 9.745264886009344e-05, "loss": 2.3020057678222656, "memory(GiB)": 70.96, "step": 11915, "token_acc": 0.5015873015873016, "train_speed(iter/s)": 0.672123 }, { "epoch": 0.5106893449295231, "grad_norm": 3.9463894367218018, "learning_rate": 9.745052776734236e-05, "loss": 2.544127655029297, "memory(GiB)": 70.96, "step": 11920, "token_acc": 0.5, "train_speed(iter/s)": 0.672102 }, { "epoch": 0.5109035602587721, "grad_norm": 3.426011562347412, "learning_rate": 9.744840581498048e-05, "loss": 2.217929649353027, "memory(GiB)": 70.96, "step": 11925, "token_acc": 0.511326860841424, "train_speed(iter/s)": 0.672195 }, { "epoch": 0.5111177755880211, "grad_norm": 3.1584346294403076, "learning_rate": 9.744628300304621e-05, "loss": 2.6890316009521484, "memory(GiB)": 70.96, "step": 11930, "token_acc": 0.42528735632183906, "train_speed(iter/s)": 0.672048 }, { "epoch": 0.51133199091727, "grad_norm": 3.7205450534820557, "learning_rate": 9.744415933157803e-05, "loss": 2.236942481994629, "memory(GiB)": 70.96, "step": 11935, "token_acc": 0.5491071428571429, "train_speed(iter/s)": 0.672066 }, { "epoch": 0.511546206246519, "grad_norm": 3.7603542804718018, "learning_rate": 9.744203480061438e-05, "loss": 2.6115562438964846, "memory(GiB)": 70.96, "step": 11940, "token_acc": 0.4669260700389105, "train_speed(iter/s)": 0.672109 }, { "epoch": 0.511760421575768, "grad_norm": 4.3333635330200195, "learning_rate": 9.743990941019377e-05, "loss": 2.5463272094726563, "memory(GiB)": 70.96, "step": 11945, "token_acc": 0.475, "train_speed(iter/s)": 0.672108 }, { "epoch": 0.5119746369050169, "grad_norm": 3.3250784873962402, "learning_rate": 9.74377831603547e-05, "loss": 2.5103775024414063, "memory(GiB)": 70.96, "step": 11950, "token_acc": 0.4769736842105263, "train_speed(iter/s)": 0.672144 }, { "epoch": 0.5121888522342659, "grad_norm": 3.6476376056671143, "learning_rate": 9.743565605113568e-05, "loss": 2.7391963958740235, "memory(GiB)": 70.96, "step": 11955, "token_acc": 0.4144486692015209, "train_speed(iter/s)": 0.672185 }, { "epoch": 0.5124030675635148, "grad_norm": 2.623647689819336, "learning_rate": 9.743352808257527e-05, "loss": 2.400534820556641, "memory(GiB)": 70.96, "step": 11960, "token_acc": 0.5273224043715847, "train_speed(iter/s)": 0.672183 }, { "epoch": 0.5126172828927638, "grad_norm": 4.0741424560546875, "learning_rate": 9.743139925471198e-05, "loss": 2.390140914916992, "memory(GiB)": 70.96, "step": 11965, "token_acc": 0.487012987012987, "train_speed(iter/s)": 0.672148 }, { "epoch": 0.5128314982220128, "grad_norm": 3.6700408458709717, "learning_rate": 9.742926956758442e-05, "loss": 2.5202564239501952, "memory(GiB)": 70.96, "step": 11970, "token_acc": 0.4746376811594203, "train_speed(iter/s)": 0.672182 }, { "epoch": 0.5130457135512617, "grad_norm": 3.5423269271850586, "learning_rate": 9.742713902123113e-05, "loss": 2.598152923583984, "memory(GiB)": 70.96, "step": 11975, "token_acc": 0.4722222222222222, "train_speed(iter/s)": 0.672255 }, { "epoch": 0.5132599288805106, "grad_norm": 4.165224552154541, "learning_rate": 9.742500761569074e-05, "loss": 2.4544532775878904, "memory(GiB)": 70.96, "step": 11980, "token_acc": 0.4827586206896552, "train_speed(iter/s)": 0.672257 }, { "epoch": 0.5134741442097597, "grad_norm": 2.9423718452453613, "learning_rate": 9.742287535100184e-05, "loss": 2.120349884033203, "memory(GiB)": 70.96, "step": 11985, "token_acc": 0.512280701754386, "train_speed(iter/s)": 0.672286 }, { "epoch": 0.5136883595390086, "grad_norm": 3.215383291244507, "learning_rate": 9.742074222720308e-05, "loss": 2.6314504623413084, "memory(GiB)": 70.96, "step": 11990, "token_acc": 0.45394736842105265, "train_speed(iter/s)": 0.672338 }, { "epoch": 0.5139025748682575, "grad_norm": 3.77051043510437, "learning_rate": 9.741860824433308e-05, "loss": 2.3997764587402344, "memory(GiB)": 70.96, "step": 11995, "token_acc": 0.5149253731343284, "train_speed(iter/s)": 0.672289 }, { "epoch": 0.5141167901975066, "grad_norm": 4.20968770980835, "learning_rate": 9.741647340243051e-05, "loss": 2.4207815170288085, "memory(GiB)": 70.96, "step": 12000, "token_acc": 0.5, "train_speed(iter/s)": 0.672356 }, { "epoch": 0.5141167901975066, "eval_loss": 2.1269047260284424, "eval_runtime": 17.0228, "eval_samples_per_second": 5.874, "eval_steps_per_second": 5.874, "eval_token_acc": 0.4930747922437673, "step": 12000 }, { "epoch": 0.5143310055267555, "grad_norm": 3.669879198074341, "learning_rate": 9.741433770153404e-05, "loss": 2.2929067611694336, "memory(GiB)": 70.96, "step": 12005, "token_acc": 0.5015739769150053, "train_speed(iter/s)": 0.671635 }, { "epoch": 0.5145452208560044, "grad_norm": 2.880828857421875, "learning_rate": 9.741220114168237e-05, "loss": 2.423558807373047, "memory(GiB)": 70.96, "step": 12010, "token_acc": 0.4943820224719101, "train_speed(iter/s)": 0.671605 }, { "epoch": 0.5147594361852534, "grad_norm": 3.7935469150543213, "learning_rate": 9.74100637229142e-05, "loss": 2.289639472961426, "memory(GiB)": 70.96, "step": 12015, "token_acc": 0.5155038759689923, "train_speed(iter/s)": 0.671635 }, { "epoch": 0.5149736515145024, "grad_norm": 3.6788864135742188, "learning_rate": 9.740792544526824e-05, "loss": 2.2823429107666016, "memory(GiB)": 70.96, "step": 12020, "token_acc": 0.47796610169491527, "train_speed(iter/s)": 0.671673 }, { "epoch": 0.5151878668437513, "grad_norm": 10.5322265625, "learning_rate": 9.740578630878326e-05, "loss": 2.6182092666625976, "memory(GiB)": 70.96, "step": 12025, "token_acc": 0.4627450980392157, "train_speed(iter/s)": 0.67176 }, { "epoch": 0.5154020821730003, "grad_norm": 3.030780076980591, "learning_rate": 9.740364631349799e-05, "loss": 2.2544593811035156, "memory(GiB)": 70.96, "step": 12030, "token_acc": 0.5488215488215489, "train_speed(iter/s)": 0.671786 }, { "epoch": 0.5156162975022492, "grad_norm": 3.8789446353912354, "learning_rate": 9.740150545945118e-05, "loss": 2.5687545776367187, "memory(GiB)": 70.96, "step": 12035, "token_acc": 0.4929078014184397, "train_speed(iter/s)": 0.671743 }, { "epoch": 0.5158305128314982, "grad_norm": 4.042695045471191, "learning_rate": 9.739936374668164e-05, "loss": 2.5039596557617188, "memory(GiB)": 70.96, "step": 12040, "token_acc": 0.5197132616487455, "train_speed(iter/s)": 0.671749 }, { "epoch": 0.5160447281607472, "grad_norm": 3.071727991104126, "learning_rate": 9.739722117522816e-05, "loss": 2.47239990234375, "memory(GiB)": 70.96, "step": 12045, "token_acc": 0.48161764705882354, "train_speed(iter/s)": 0.671724 }, { "epoch": 0.5162589434899961, "grad_norm": 4.15148401260376, "learning_rate": 9.739507774512957e-05, "loss": 2.420054244995117, "memory(GiB)": 70.96, "step": 12050, "token_acc": 0.4742268041237113, "train_speed(iter/s)": 0.671716 }, { "epoch": 0.5164731588192452, "grad_norm": 3.2118430137634277, "learning_rate": 9.739293345642466e-05, "loss": 2.3337732315063477, "memory(GiB)": 70.96, "step": 12055, "token_acc": 0.48905109489051096, "train_speed(iter/s)": 0.671648 }, { "epoch": 0.5166873741484941, "grad_norm": 3.5726511478424072, "learning_rate": 9.739078830915233e-05, "loss": 2.722496223449707, "memory(GiB)": 70.96, "step": 12060, "token_acc": 0.42857142857142855, "train_speed(iter/s)": 0.671687 }, { "epoch": 0.516901589477743, "grad_norm": 3.862825632095337, "learning_rate": 9.73886423033514e-05, "loss": 2.6900283813476564, "memory(GiB)": 70.96, "step": 12065, "token_acc": 0.45016077170418006, "train_speed(iter/s)": 0.67165 }, { "epoch": 0.517115804806992, "grad_norm": 2.3708295822143555, "learning_rate": 9.738649543906075e-05, "loss": 2.4820722579956054, "memory(GiB)": 70.96, "step": 12070, "token_acc": 0.4913494809688581, "train_speed(iter/s)": 0.671642 }, { "epoch": 0.517330020136241, "grad_norm": 3.3082656860351562, "learning_rate": 9.738434771631931e-05, "loss": 2.500839424133301, "memory(GiB)": 70.96, "step": 12075, "token_acc": 0.5114285714285715, "train_speed(iter/s)": 0.671603 }, { "epoch": 0.5175442354654899, "grad_norm": 2.9773192405700684, "learning_rate": 9.738219913516594e-05, "loss": 2.365749168395996, "memory(GiB)": 70.96, "step": 12080, "token_acc": 0.501779359430605, "train_speed(iter/s)": 0.671627 }, { "epoch": 0.5177584507947389, "grad_norm": 3.859179735183716, "learning_rate": 9.738004969563959e-05, "loss": 2.162284278869629, "memory(GiB)": 70.96, "step": 12085, "token_acc": 0.4811715481171548, "train_speed(iter/s)": 0.671641 }, { "epoch": 0.5179726661239878, "grad_norm": 3.2009332180023193, "learning_rate": 9.73778993977792e-05, "loss": 2.394257354736328, "memory(GiB)": 70.96, "step": 12090, "token_acc": 0.5141843971631206, "train_speed(iter/s)": 0.671679 }, { "epoch": 0.5181868814532368, "grad_norm": 4.4186882972717285, "learning_rate": 9.737574824162371e-05, "loss": 2.6934043884277346, "memory(GiB)": 70.96, "step": 12095, "token_acc": 0.44266666666666665, "train_speed(iter/s)": 0.671708 }, { "epoch": 0.5184010967824858, "grad_norm": 2.7586309909820557, "learning_rate": 9.73735962272121e-05, "loss": 2.3119365692138674, "memory(GiB)": 70.96, "step": 12100, "token_acc": 0.5016949152542373, "train_speed(iter/s)": 0.671663 }, { "epoch": 0.5186153121117347, "grad_norm": 6.891289234161377, "learning_rate": 9.737144335458335e-05, "loss": 2.741316223144531, "memory(GiB)": 70.96, "step": 12105, "token_acc": 0.5093457943925234, "train_speed(iter/s)": 0.67165 }, { "epoch": 0.5188295274409837, "grad_norm": 4.119211673736572, "learning_rate": 9.736928962377647e-05, "loss": 2.323820877075195, "memory(GiB)": 70.96, "step": 12110, "token_acc": 0.4889867841409692, "train_speed(iter/s)": 0.671647 }, { "epoch": 0.5190437427702327, "grad_norm": 5.328267574310303, "learning_rate": 9.736713503483048e-05, "loss": 2.3114843368530273, "memory(GiB)": 70.96, "step": 12115, "token_acc": 0.5085910652920962, "train_speed(iter/s)": 0.671679 }, { "epoch": 0.5192579580994816, "grad_norm": 2.5743446350097656, "learning_rate": 9.73649795877844e-05, "loss": 2.174683380126953, "memory(GiB)": 70.96, "step": 12120, "token_acc": 0.5544217687074829, "train_speed(iter/s)": 0.671728 }, { "epoch": 0.5194721734287305, "grad_norm": 4.279867649078369, "learning_rate": 9.736282328267727e-05, "loss": 2.4429492950439453, "memory(GiB)": 70.96, "step": 12125, "token_acc": 0.5051546391752577, "train_speed(iter/s)": 0.671625 }, { "epoch": 0.5196863887579796, "grad_norm": 3.3920419216156006, "learning_rate": 9.73606661195482e-05, "loss": 2.5384950637817383, "memory(GiB)": 70.96, "step": 12130, "token_acc": 0.4735099337748344, "train_speed(iter/s)": 0.671621 }, { "epoch": 0.5199006040872285, "grad_norm": 3.1182358264923096, "learning_rate": 9.735850809843621e-05, "loss": 2.4610370635986327, "memory(GiB)": 70.96, "step": 12135, "token_acc": 0.45874587458745875, "train_speed(iter/s)": 0.671651 }, { "epoch": 0.5201148194164774, "grad_norm": 2.996783494949341, "learning_rate": 9.735634921938042e-05, "loss": 2.783457565307617, "memory(GiB)": 70.96, "step": 12140, "token_acc": 0.47038327526132406, "train_speed(iter/s)": 0.671627 }, { "epoch": 0.5203290347457264, "grad_norm": 3.4172911643981934, "learning_rate": 9.735418948241994e-05, "loss": 2.51238956451416, "memory(GiB)": 70.96, "step": 12145, "token_acc": 0.5068493150684932, "train_speed(iter/s)": 0.671558 }, { "epoch": 0.5205432500749754, "grad_norm": 3.2329673767089844, "learning_rate": 9.73520288875939e-05, "loss": 2.3028804779052736, "memory(GiB)": 70.96, "step": 12150, "token_acc": 0.5, "train_speed(iter/s)": 0.671569 }, { "epoch": 0.5207574654042243, "grad_norm": 3.0043928623199463, "learning_rate": 9.734986743494143e-05, "loss": 2.6034313201904298, "memory(GiB)": 70.96, "step": 12155, "token_acc": 0.46178343949044587, "train_speed(iter/s)": 0.671547 }, { "epoch": 0.5209716807334733, "grad_norm": 2.8481452465057373, "learning_rate": 9.73477051245017e-05, "loss": 2.5820232391357423, "memory(GiB)": 70.96, "step": 12160, "token_acc": 0.4634920634920635, "train_speed(iter/s)": 0.671496 }, { "epoch": 0.5211858960627223, "grad_norm": 4.90792989730835, "learning_rate": 9.734554195631386e-05, "loss": 2.0471920013427733, "memory(GiB)": 70.96, "step": 12165, "token_acc": 0.5119047619047619, "train_speed(iter/s)": 0.671522 }, { "epoch": 0.5214001113919712, "grad_norm": 3.325274705886841, "learning_rate": 9.734337793041715e-05, "loss": 2.4434118270874023, "memory(GiB)": 70.96, "step": 12170, "token_acc": 0.47315436241610737, "train_speed(iter/s)": 0.671464 }, { "epoch": 0.5216143267212202, "grad_norm": 3.5566022396087646, "learning_rate": 9.734121304685071e-05, "loss": 2.2689205169677735, "memory(GiB)": 70.96, "step": 12175, "token_acc": 0.4910394265232975, "train_speed(iter/s)": 0.67146 }, { "epoch": 0.5218285420504691, "grad_norm": 3.308190107345581, "learning_rate": 9.733904730565379e-05, "loss": 2.294184684753418, "memory(GiB)": 70.96, "step": 12180, "token_acc": 0.49848024316109424, "train_speed(iter/s)": 0.671447 }, { "epoch": 0.5220427573797181, "grad_norm": 3.1434335708618164, "learning_rate": 9.733688070686562e-05, "loss": 2.5018524169921874, "memory(GiB)": 70.96, "step": 12185, "token_acc": 0.4811320754716981, "train_speed(iter/s)": 0.671426 }, { "epoch": 0.5222569727089671, "grad_norm": 4.432194709777832, "learning_rate": 9.733471325052545e-05, "loss": 2.4944921493530274, "memory(GiB)": 70.96, "step": 12190, "token_acc": 0.46229508196721314, "train_speed(iter/s)": 0.671475 }, { "epoch": 0.522471188038216, "grad_norm": 3.712569236755371, "learning_rate": 9.733254493667255e-05, "loss": 2.350665473937988, "memory(GiB)": 70.96, "step": 12195, "token_acc": 0.5318471337579618, "train_speed(iter/s)": 0.6714 }, { "epoch": 0.5226854033674649, "grad_norm": 2.839815616607666, "learning_rate": 9.73303757653462e-05, "loss": 2.1981760025024415, "memory(GiB)": 70.96, "step": 12200, "token_acc": 0.5017421602787456, "train_speed(iter/s)": 0.671435 }, { "epoch": 0.522899618696714, "grad_norm": 4.287315845489502, "learning_rate": 9.732820573658569e-05, "loss": 2.481785202026367, "memory(GiB)": 70.96, "step": 12205, "token_acc": 0.46545454545454545, "train_speed(iter/s)": 0.671431 }, { "epoch": 0.5231138340259629, "grad_norm": 3.287285804748535, "learning_rate": 9.732603485043033e-05, "loss": 2.237906265258789, "memory(GiB)": 70.96, "step": 12210, "token_acc": 0.5207547169811321, "train_speed(iter/s)": 0.671461 }, { "epoch": 0.5233280493552118, "grad_norm": 2.946629762649536, "learning_rate": 9.732386310691946e-05, "loss": 2.4969554901123048, "memory(GiB)": 70.96, "step": 12215, "token_acc": 0.5048231511254019, "train_speed(iter/s)": 0.67154 }, { "epoch": 0.5235422646844609, "grad_norm": 3.0513482093811035, "learning_rate": 9.732169050609241e-05, "loss": 2.223262405395508, "memory(GiB)": 70.96, "step": 12220, "token_acc": 0.5233333333333333, "train_speed(iter/s)": 0.671488 }, { "epoch": 0.5237564800137098, "grad_norm": 2.616330146789551, "learning_rate": 9.731951704798857e-05, "loss": 2.186240386962891, "memory(GiB)": 70.96, "step": 12225, "token_acc": 0.5150501672240803, "train_speed(iter/s)": 0.671431 }, { "epoch": 0.5239706953429587, "grad_norm": 3.280501365661621, "learning_rate": 9.731734273264725e-05, "loss": 2.4076332092285155, "memory(GiB)": 70.96, "step": 12230, "token_acc": 0.49174917491749176, "train_speed(iter/s)": 0.671438 }, { "epoch": 0.5241849106722077, "grad_norm": 3.0515525341033936, "learning_rate": 9.73151675601079e-05, "loss": 2.3317035675048827, "memory(GiB)": 70.96, "step": 12235, "token_acc": 0.5070921985815603, "train_speed(iter/s)": 0.671432 }, { "epoch": 0.5243991260014567, "grad_norm": 2.8457136154174805, "learning_rate": 9.731299153040991e-05, "loss": 2.5471105575561523, "memory(GiB)": 70.96, "step": 12240, "token_acc": 0.471976401179941, "train_speed(iter/s)": 0.671362 }, { "epoch": 0.5246133413307056, "grad_norm": 3.393770456314087, "learning_rate": 9.731081464359268e-05, "loss": 2.1279468536376953, "memory(GiB)": 70.96, "step": 12245, "token_acc": 0.5183946488294314, "train_speed(iter/s)": 0.671417 }, { "epoch": 0.5248275566599546, "grad_norm": 3.206292152404785, "learning_rate": 9.730863689969567e-05, "loss": 2.8119693756103517, "memory(GiB)": 70.96, "step": 12250, "token_acc": 0.4290657439446367, "train_speed(iter/s)": 0.671462 }, { "epoch": 0.5250417719892035, "grad_norm": 4.448146820068359, "learning_rate": 9.730645829875833e-05, "loss": 2.5847307205200196, "memory(GiB)": 70.96, "step": 12255, "token_acc": 0.4867924528301887, "train_speed(iter/s)": 0.671534 }, { "epoch": 0.5252559873184525, "grad_norm": 3.1026620864868164, "learning_rate": 9.730427884082012e-05, "loss": 2.5502899169921873, "memory(GiB)": 70.96, "step": 12260, "token_acc": 0.46308724832214765, "train_speed(iter/s)": 0.67152 }, { "epoch": 0.5254702026477015, "grad_norm": 4.051071643829346, "learning_rate": 9.730209852592052e-05, "loss": 2.233402633666992, "memory(GiB)": 70.96, "step": 12265, "token_acc": 0.49794238683127573, "train_speed(iter/s)": 0.671578 }, { "epoch": 0.5256844179769504, "grad_norm": 3.7050957679748535, "learning_rate": 9.729991735409902e-05, "loss": 2.6410106658935546, "memory(GiB)": 70.96, "step": 12270, "token_acc": 0.44666666666666666, "train_speed(iter/s)": 0.671616 }, { "epoch": 0.5258986333061993, "grad_norm": 4.7275800704956055, "learning_rate": 9.729773532539515e-05, "loss": 2.509653854370117, "memory(GiB)": 70.96, "step": 12275, "token_acc": 0.43769968051118213, "train_speed(iter/s)": 0.671578 }, { "epoch": 0.5261128486354484, "grad_norm": 5.416829586029053, "learning_rate": 9.729555243984845e-05, "loss": 2.4708648681640626, "memory(GiB)": 70.96, "step": 12280, "token_acc": 0.464, "train_speed(iter/s)": 0.67159 }, { "epoch": 0.5263270639646973, "grad_norm": 3.3354151248931885, "learning_rate": 9.729336869749843e-05, "loss": 2.0962102890014647, "memory(GiB)": 70.96, "step": 12285, "token_acc": 0.5265306122448979, "train_speed(iter/s)": 0.671604 }, { "epoch": 0.5265412792939462, "grad_norm": 3.580101251602173, "learning_rate": 9.729118409838469e-05, "loss": 2.3784162521362306, "memory(GiB)": 70.96, "step": 12290, "token_acc": 0.487012987012987, "train_speed(iter/s)": 0.671618 }, { "epoch": 0.5267554946231953, "grad_norm": 3.413634777069092, "learning_rate": 9.72889986425468e-05, "loss": 2.3421772003173826, "memory(GiB)": 70.96, "step": 12295, "token_acc": 0.48514851485148514, "train_speed(iter/s)": 0.671634 }, { "epoch": 0.5269697099524442, "grad_norm": 3.804788827896118, "learning_rate": 9.72868123300243e-05, "loss": 2.3444976806640625, "memory(GiB)": 70.96, "step": 12300, "token_acc": 0.5221843003412969, "train_speed(iter/s)": 0.671601 }, { "epoch": 0.5271839252816931, "grad_norm": 3.999389886856079, "learning_rate": 9.728462516085685e-05, "loss": 2.312881660461426, "memory(GiB)": 70.96, "step": 12305, "token_acc": 0.5311355311355311, "train_speed(iter/s)": 0.671556 }, { "epoch": 0.5273981406109421, "grad_norm": 4.326322555541992, "learning_rate": 9.728243713508407e-05, "loss": 2.5691722869873046, "memory(GiB)": 70.96, "step": 12310, "token_acc": 0.46984126984126984, "train_speed(iter/s)": 0.67157 }, { "epoch": 0.5276123559401911, "grad_norm": 3.997467279434204, "learning_rate": 9.728024825274558e-05, "loss": 2.4152902603149413, "memory(GiB)": 70.96, "step": 12315, "token_acc": 0.4791666666666667, "train_speed(iter/s)": 0.671552 }, { "epoch": 0.52782657126944, "grad_norm": 4.198988914489746, "learning_rate": 9.727805851388105e-05, "loss": 2.294603157043457, "memory(GiB)": 70.96, "step": 12320, "token_acc": 0.5161290322580645, "train_speed(iter/s)": 0.671587 }, { "epoch": 0.528040786598689, "grad_norm": 3.495198965072632, "learning_rate": 9.727586791853013e-05, "loss": 2.143914222717285, "memory(GiB)": 70.96, "step": 12325, "token_acc": 0.539622641509434, "train_speed(iter/s)": 0.67162 }, { "epoch": 0.528255001927938, "grad_norm": 3.2490100860595703, "learning_rate": 9.727367646673251e-05, "loss": 2.4131101608276366, "memory(GiB)": 70.96, "step": 12330, "token_acc": 0.4721311475409836, "train_speed(iter/s)": 0.671551 }, { "epoch": 0.5284692172571869, "grad_norm": 4.007875919342041, "learning_rate": 9.727148415852791e-05, "loss": 2.2186269760131836, "memory(GiB)": 70.96, "step": 12335, "token_acc": 0.5458333333333333, "train_speed(iter/s)": 0.671581 }, { "epoch": 0.5286834325864359, "grad_norm": 3.218609094619751, "learning_rate": 9.726929099395602e-05, "loss": 2.441402053833008, "memory(GiB)": 70.96, "step": 12340, "token_acc": 0.5606060606060606, "train_speed(iter/s)": 0.671528 }, { "epoch": 0.5288976479156848, "grad_norm": 3.1849374771118164, "learning_rate": 9.726709697305658e-05, "loss": 2.5119003295898437, "memory(GiB)": 70.96, "step": 12345, "token_acc": 0.477124183006536, "train_speed(iter/s)": 0.671543 }, { "epoch": 0.5291118632449338, "grad_norm": 4.1494927406311035, "learning_rate": 9.726490209586934e-05, "loss": 2.3221553802490233, "memory(GiB)": 70.96, "step": 12350, "token_acc": 0.48226950354609927, "train_speed(iter/s)": 0.671519 }, { "epoch": 0.5293260785741828, "grad_norm": 3.0146970748901367, "learning_rate": 9.726270636243406e-05, "loss": 2.4325170516967773, "memory(GiB)": 70.96, "step": 12355, "token_acc": 0.47419354838709676, "train_speed(iter/s)": 0.671533 }, { "epoch": 0.5295402939034317, "grad_norm": 4.86696195602417, "learning_rate": 9.726050977279052e-05, "loss": 2.3697465896606444, "memory(GiB)": 70.96, "step": 12360, "token_acc": 0.48412698412698413, "train_speed(iter/s)": 0.671552 }, { "epoch": 0.5297545092326806, "grad_norm": 4.071893692016602, "learning_rate": 9.725831232697851e-05, "loss": 2.4490741729736327, "memory(GiB)": 70.96, "step": 12365, "token_acc": 0.5214285714285715, "train_speed(iter/s)": 0.671488 }, { "epoch": 0.5299687245619297, "grad_norm": 3.8273701667785645, "learning_rate": 9.725611402503784e-05, "loss": 2.4645843505859375, "memory(GiB)": 70.96, "step": 12370, "token_acc": 0.4783950617283951, "train_speed(iter/s)": 0.671522 }, { "epoch": 0.5301829398911786, "grad_norm": 3.204495429992676, "learning_rate": 9.725391486700833e-05, "loss": 2.602070426940918, "memory(GiB)": 70.96, "step": 12375, "token_acc": 0.4456140350877193, "train_speed(iter/s)": 0.671534 }, { "epoch": 0.5303971552204275, "grad_norm": 4.117837429046631, "learning_rate": 9.725171485292983e-05, "loss": 2.480088996887207, "memory(GiB)": 70.96, "step": 12380, "token_acc": 0.46037735849056605, "train_speed(iter/s)": 0.671607 }, { "epoch": 0.5306113705496766, "grad_norm": 3.09335994720459, "learning_rate": 9.724951398284219e-05, "loss": 2.3886436462402343, "memory(GiB)": 70.96, "step": 12385, "token_acc": 0.5104477611940299, "train_speed(iter/s)": 0.671555 }, { "epoch": 0.5308255858789255, "grad_norm": 3.153211832046509, "learning_rate": 9.724731225678529e-05, "loss": 2.238887977600098, "memory(GiB)": 70.96, "step": 12390, "token_acc": 0.5050167224080268, "train_speed(iter/s)": 0.67156 }, { "epoch": 0.5310398012081745, "grad_norm": 4.4789605140686035, "learning_rate": 9.724510967479898e-05, "loss": 2.6587547302246093, "memory(GiB)": 70.96, "step": 12395, "token_acc": 0.42948717948717946, "train_speed(iter/s)": 0.671572 }, { "epoch": 0.5312540165374234, "grad_norm": 4.375490665435791, "learning_rate": 9.724290623692322e-05, "loss": 2.5554744720458986, "memory(GiB)": 70.96, "step": 12400, "token_acc": 0.4752186588921283, "train_speed(iter/s)": 0.671584 }, { "epoch": 0.5314682318666724, "grad_norm": 4.527034282684326, "learning_rate": 9.724070194319787e-05, "loss": 2.1249984741210937, "memory(GiB)": 70.96, "step": 12405, "token_acc": 0.5253623188405797, "train_speed(iter/s)": 0.671546 }, { "epoch": 0.5316824471959214, "grad_norm": 3.690420627593994, "learning_rate": 9.723849679366291e-05, "loss": 2.595631790161133, "memory(GiB)": 70.96, "step": 12410, "token_acc": 0.42452830188679247, "train_speed(iter/s)": 0.67151 }, { "epoch": 0.5318966625251703, "grad_norm": 5.624110221862793, "learning_rate": 9.723629078835825e-05, "loss": 2.7069950103759766, "memory(GiB)": 70.96, "step": 12415, "token_acc": 0.44363636363636366, "train_speed(iter/s)": 0.671522 }, { "epoch": 0.5321108778544192, "grad_norm": 4.107304096221924, "learning_rate": 9.72340839273239e-05, "loss": 2.688669776916504, "memory(GiB)": 70.96, "step": 12420, "token_acc": 0.43283582089552236, "train_speed(iter/s)": 0.671425 }, { "epoch": 0.5323250931836683, "grad_norm": 2.990659713745117, "learning_rate": 9.723187621059977e-05, "loss": 2.5702465057373045, "memory(GiB)": 70.96, "step": 12425, "token_acc": 0.46417445482866043, "train_speed(iter/s)": 0.671478 }, { "epoch": 0.5325393085129172, "grad_norm": 4.797845840454102, "learning_rate": 9.722966763822591e-05, "loss": 2.512091636657715, "memory(GiB)": 70.96, "step": 12430, "token_acc": 0.49295774647887325, "train_speed(iter/s)": 0.671493 }, { "epoch": 0.5327535238421661, "grad_norm": 3.160922050476074, "learning_rate": 9.722745821024233e-05, "loss": 2.5451787948608398, "memory(GiB)": 70.96, "step": 12435, "token_acc": 0.4726027397260274, "train_speed(iter/s)": 0.671527 }, { "epoch": 0.5329677391714152, "grad_norm": 3.584589719772339, "learning_rate": 9.722524792668903e-05, "loss": 2.435988426208496, "memory(GiB)": 70.96, "step": 12440, "token_acc": 0.4807017543859649, "train_speed(iter/s)": 0.671487 }, { "epoch": 0.5331819545006641, "grad_norm": 3.440222978591919, "learning_rate": 9.722303678760607e-05, "loss": 2.608650779724121, "memory(GiB)": 70.96, "step": 12445, "token_acc": 0.4931972789115646, "train_speed(iter/s)": 0.671515 }, { "epoch": 0.533396169829913, "grad_norm": 3.451423168182373, "learning_rate": 9.72208247930335e-05, "loss": 2.6108522415161133, "memory(GiB)": 70.96, "step": 12450, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.671541 }, { "epoch": 0.533610385159162, "grad_norm": 5.316859245300293, "learning_rate": 9.721861194301139e-05, "loss": 2.714408111572266, "memory(GiB)": 70.96, "step": 12455, "token_acc": 0.4785992217898833, "train_speed(iter/s)": 0.671494 }, { "epoch": 0.533824600488411, "grad_norm": 3.4276015758514404, "learning_rate": 9.721639823757982e-05, "loss": 2.4112060546875, "memory(GiB)": 70.96, "step": 12460, "token_acc": 0.49640287769784175, "train_speed(iter/s)": 0.671507 }, { "epoch": 0.5340388158176599, "grad_norm": 3.429535388946533, "learning_rate": 9.721418367677891e-05, "loss": 2.4774444580078123, "memory(GiB)": 70.96, "step": 12465, "token_acc": 0.44879518072289154, "train_speed(iter/s)": 0.671529 }, { "epoch": 0.5342530311469089, "grad_norm": 3.9712960720062256, "learning_rate": 9.721196826064877e-05, "loss": 2.558765411376953, "memory(GiB)": 70.96, "step": 12470, "token_acc": 0.4512987012987013, "train_speed(iter/s)": 0.671534 }, { "epoch": 0.5344672464761578, "grad_norm": 3.8747024536132812, "learning_rate": 9.720975198922955e-05, "loss": 2.6692298889160155, "memory(GiB)": 70.96, "step": 12475, "token_acc": 0.4485981308411215, "train_speed(iter/s)": 0.671488 }, { "epoch": 0.5346814618054068, "grad_norm": 3.3861637115478516, "learning_rate": 9.720753486256138e-05, "loss": 2.6611825942993166, "memory(GiB)": 70.96, "step": 12480, "token_acc": 0.436241610738255, "train_speed(iter/s)": 0.671458 }, { "epoch": 0.5348956771346558, "grad_norm": 2.727444648742676, "learning_rate": 9.720531688068441e-05, "loss": 2.597918701171875, "memory(GiB)": 70.96, "step": 12485, "token_acc": 0.46439628482972134, "train_speed(iter/s)": 0.671487 }, { "epoch": 0.5351098924639047, "grad_norm": 3.1630325317382812, "learning_rate": 9.720309804363888e-05, "loss": 2.386673164367676, "memory(GiB)": 70.96, "step": 12490, "token_acc": 0.45195729537366547, "train_speed(iter/s)": 0.671455 }, { "epoch": 0.5353241077931536, "grad_norm": 2.8171167373657227, "learning_rate": 9.720087835146492e-05, "loss": 2.42587890625, "memory(GiB)": 70.96, "step": 12495, "token_acc": 0.49185667752442996, "train_speed(iter/s)": 0.671435 }, { "epoch": 0.5355383231224027, "grad_norm": 4.110315322875977, "learning_rate": 9.719865780420278e-05, "loss": 2.342955207824707, "memory(GiB)": 70.96, "step": 12500, "token_acc": 0.4844961240310077, "train_speed(iter/s)": 0.671441 }, { "epoch": 0.5355383231224027, "eval_loss": 2.1968681812286377, "eval_runtime": 16.877, "eval_samples_per_second": 5.925, "eval_steps_per_second": 5.925, "eval_token_acc": 0.4863582443653618, "step": 12500 }, { "epoch": 0.5357525384516516, "grad_norm": 3.6428465843200684, "learning_rate": 9.719643640189267e-05, "loss": 2.5546783447265624, "memory(GiB)": 70.96, "step": 12505, "token_acc": 0.484629294755877, "train_speed(iter/s)": 0.670695 }, { "epoch": 0.5359667537809005, "grad_norm": 2.9372663497924805, "learning_rate": 9.719421414457485e-05, "loss": 2.8018268585205077, "memory(GiB)": 70.96, "step": 12510, "token_acc": 0.42735042735042733, "train_speed(iter/s)": 0.670721 }, { "epoch": 0.5361809691101496, "grad_norm": 4.879366397857666, "learning_rate": 9.719199103228957e-05, "loss": 2.3622379302978516, "memory(GiB)": 70.96, "step": 12515, "token_acc": 0.5045454545454545, "train_speed(iter/s)": 0.670741 }, { "epoch": 0.5363951844393985, "grad_norm": 5.525900840759277, "learning_rate": 9.71897670650771e-05, "loss": 2.6776992797851564, "memory(GiB)": 70.96, "step": 12520, "token_acc": 0.4180602006688963, "train_speed(iter/s)": 0.670749 }, { "epoch": 0.5366093997686474, "grad_norm": 3.044114112854004, "learning_rate": 9.718754224297774e-05, "loss": 2.6459072113037108, "memory(GiB)": 70.96, "step": 12525, "token_acc": 0.46488294314381273, "train_speed(iter/s)": 0.67076 }, { "epoch": 0.5368236150978964, "grad_norm": 2.8675167560577393, "learning_rate": 9.718531656603177e-05, "loss": 2.215106201171875, "memory(GiB)": 70.96, "step": 12530, "token_acc": 0.4966442953020134, "train_speed(iter/s)": 0.670826 }, { "epoch": 0.5370378304271454, "grad_norm": 3.452678680419922, "learning_rate": 9.718309003427955e-05, "loss": 2.432029151916504, "memory(GiB)": 70.96, "step": 12535, "token_acc": 0.5214723926380368, "train_speed(iter/s)": 0.670834 }, { "epoch": 0.5372520457563943, "grad_norm": 3.475168466567993, "learning_rate": 9.718086264776136e-05, "loss": 2.4084291458129883, "memory(GiB)": 70.96, "step": 12540, "token_acc": 0.4879725085910653, "train_speed(iter/s)": 0.670854 }, { "epoch": 0.5374662610856433, "grad_norm": 4.922580242156982, "learning_rate": 9.71786344065176e-05, "loss": 2.487571144104004, "memory(GiB)": 70.96, "step": 12545, "token_acc": 0.512280701754386, "train_speed(iter/s)": 0.670882 }, { "epoch": 0.5376804764148922, "grad_norm": 3.931931495666504, "learning_rate": 9.717640531058862e-05, "loss": 2.559335136413574, "memory(GiB)": 70.96, "step": 12550, "token_acc": 0.4639175257731959, "train_speed(iter/s)": 0.670946 }, { "epoch": 0.5378946917441412, "grad_norm": 3.3877367973327637, "learning_rate": 9.717417536001481e-05, "loss": 2.4456777572631836, "memory(GiB)": 70.96, "step": 12555, "token_acc": 0.5070422535211268, "train_speed(iter/s)": 0.670961 }, { "epoch": 0.5381089070733902, "grad_norm": 3.6914610862731934, "learning_rate": 9.717194455483656e-05, "loss": 2.4200439453125, "memory(GiB)": 70.96, "step": 12560, "token_acc": 0.4847457627118644, "train_speed(iter/s)": 0.670979 }, { "epoch": 0.5383231224026391, "grad_norm": 2.760577917098999, "learning_rate": 9.716971289509429e-05, "loss": 2.6617687225341795, "memory(GiB)": 70.96, "step": 12565, "token_acc": 0.4948453608247423, "train_speed(iter/s)": 0.670988 }, { "epoch": 0.538537337731888, "grad_norm": 4.7350640296936035, "learning_rate": 9.716748038082842e-05, "loss": 2.701291084289551, "memory(GiB)": 70.96, "step": 12570, "token_acc": 0.4485981308411215, "train_speed(iter/s)": 0.670969 }, { "epoch": 0.5387515530611371, "grad_norm": 4.137800216674805, "learning_rate": 9.716524701207938e-05, "loss": 2.426679992675781, "memory(GiB)": 70.96, "step": 12575, "token_acc": 0.49142857142857144, "train_speed(iter/s)": 0.67094 }, { "epoch": 0.538965768390386, "grad_norm": 2.99727463722229, "learning_rate": 9.716301278888764e-05, "loss": 2.589098358154297, "memory(GiB)": 70.96, "step": 12580, "token_acc": 0.4883720930232558, "train_speed(iter/s)": 0.670948 }, { "epoch": 0.5391799837196349, "grad_norm": 2.6242926120758057, "learning_rate": 9.71607777112937e-05, "loss": 2.5576091766357423, "memory(GiB)": 70.96, "step": 12585, "token_acc": 0.44510385756676557, "train_speed(iter/s)": 0.670959 }, { "epoch": 0.539394199048884, "grad_norm": 3.112011432647705, "learning_rate": 9.715854177933803e-05, "loss": 2.4222732543945313, "memory(GiB)": 70.96, "step": 12590, "token_acc": 0.49458483754512633, "train_speed(iter/s)": 0.670945 }, { "epoch": 0.5396084143781329, "grad_norm": 2.760246992111206, "learning_rate": 9.715630499306114e-05, "loss": 2.435091972351074, "memory(GiB)": 70.96, "step": 12595, "token_acc": 0.4788273615635179, "train_speed(iter/s)": 0.670879 }, { "epoch": 0.5398226297073818, "grad_norm": 3.6166834831237793, "learning_rate": 9.715406735250354e-05, "loss": 2.5751476287841797, "memory(GiB)": 70.96, "step": 12600, "token_acc": 0.4864864864864865, "train_speed(iter/s)": 0.670935 }, { "epoch": 0.5400368450366309, "grad_norm": 3.320732831954956, "learning_rate": 9.715182885770577e-05, "loss": 2.526177978515625, "memory(GiB)": 70.96, "step": 12605, "token_acc": 0.4696485623003195, "train_speed(iter/s)": 0.670922 }, { "epoch": 0.5402510603658798, "grad_norm": 3.9432642459869385, "learning_rate": 9.714958950870841e-05, "loss": 2.362362098693848, "memory(GiB)": 70.96, "step": 12610, "token_acc": 0.5137254901960784, "train_speed(iter/s)": 0.670894 }, { "epoch": 0.5404652756951287, "grad_norm": 2.678616523742676, "learning_rate": 9.714734930555198e-05, "loss": 2.5205591201782225, "memory(GiB)": 70.96, "step": 12615, "token_acc": 0.47096774193548385, "train_speed(iter/s)": 0.670907 }, { "epoch": 0.5406794910243777, "grad_norm": 3.935473680496216, "learning_rate": 9.71451082482771e-05, "loss": 2.2504011154174806, "memory(GiB)": 70.96, "step": 12620, "token_acc": 0.5160142348754448, "train_speed(iter/s)": 0.670951 }, { "epoch": 0.5408937063536267, "grad_norm": 4.5033392906188965, "learning_rate": 9.714286633692437e-05, "loss": 2.4674150466918947, "memory(GiB)": 70.96, "step": 12625, "token_acc": 0.5247524752475248, "train_speed(iter/s)": 0.670975 }, { "epoch": 0.5411079216828756, "grad_norm": 3.5573582649230957, "learning_rate": 9.714062357153437e-05, "loss": 2.3407829284667967, "memory(GiB)": 70.96, "step": 12630, "token_acc": 0.5181159420289855, "train_speed(iter/s)": 0.670938 }, { "epoch": 0.5413221370121246, "grad_norm": 3.825624942779541, "learning_rate": 9.713837995214778e-05, "loss": 2.4965337753295898, "memory(GiB)": 70.96, "step": 12635, "token_acc": 0.45244956772334294, "train_speed(iter/s)": 0.670946 }, { "epoch": 0.5415363523413735, "grad_norm": 3.3189821243286133, "learning_rate": 9.71361354788052e-05, "loss": 2.4229948043823244, "memory(GiB)": 70.96, "step": 12640, "token_acc": 0.4533333333333333, "train_speed(iter/s)": 0.670941 }, { "epoch": 0.5417505676706225, "grad_norm": 3.1770641803741455, "learning_rate": 9.713389015154731e-05, "loss": 2.1095003128051757, "memory(GiB)": 70.96, "step": 12645, "token_acc": 0.5471014492753623, "train_speed(iter/s)": 0.670829 }, { "epoch": 0.5419647829998715, "grad_norm": 3.241055965423584, "learning_rate": 9.713164397041478e-05, "loss": 2.5140636444091795, "memory(GiB)": 70.96, "step": 12650, "token_acc": 0.4805194805194805, "train_speed(iter/s)": 0.670858 }, { "epoch": 0.5421789983291204, "grad_norm": 4.31465482711792, "learning_rate": 9.712939693544832e-05, "loss": 2.293639373779297, "memory(GiB)": 70.96, "step": 12655, "token_acc": 0.4872881355932203, "train_speed(iter/s)": 0.670865 }, { "epoch": 0.5423932136583693, "grad_norm": 3.455073595046997, "learning_rate": 9.712714904668863e-05, "loss": 2.336151123046875, "memory(GiB)": 70.96, "step": 12660, "token_acc": 0.5158227848101266, "train_speed(iter/s)": 0.670913 }, { "epoch": 0.5426074289876184, "grad_norm": 3.231691360473633, "learning_rate": 9.71249003041764e-05, "loss": 2.3948415756225585, "memory(GiB)": 70.96, "step": 12665, "token_acc": 0.4754601226993865, "train_speed(iter/s)": 0.670881 }, { "epoch": 0.5428216443168673, "grad_norm": 3.9911155700683594, "learning_rate": 9.712265070795242e-05, "loss": 2.2884464263916016, "memory(GiB)": 70.96, "step": 12670, "token_acc": 0.5098814229249012, "train_speed(iter/s)": 0.670924 }, { "epoch": 0.5430358596461162, "grad_norm": 3.3460774421691895, "learning_rate": 9.71204002580574e-05, "loss": 2.1863842010498047, "memory(GiB)": 70.96, "step": 12675, "token_acc": 0.5306122448979592, "train_speed(iter/s)": 0.670928 }, { "epoch": 0.5432500749753653, "grad_norm": 3.453981637954712, "learning_rate": 9.711814895453213e-05, "loss": 2.2533592224121093, "memory(GiB)": 70.96, "step": 12680, "token_acc": 0.5420875420875421, "train_speed(iter/s)": 0.670954 }, { "epoch": 0.5434642903046142, "grad_norm": 3.716118335723877, "learning_rate": 9.71158967974174e-05, "loss": 2.1419240951538088, "memory(GiB)": 70.96, "step": 12685, "token_acc": 0.48905109489051096, "train_speed(iter/s)": 0.67092 }, { "epoch": 0.5436785056338631, "grad_norm": 3.396984577178955, "learning_rate": 9.711364378675399e-05, "loss": 2.3307422637939452, "memory(GiB)": 70.96, "step": 12690, "token_acc": 0.5227272727272727, "train_speed(iter/s)": 0.670897 }, { "epoch": 0.5438927209631121, "grad_norm": 2.7456510066986084, "learning_rate": 9.711138992258273e-05, "loss": 2.447982406616211, "memory(GiB)": 70.96, "step": 12695, "token_acc": 0.5064935064935064, "train_speed(iter/s)": 0.670901 }, { "epoch": 0.5441069362923611, "grad_norm": 4.159516334533691, "learning_rate": 9.710913520494444e-05, "loss": 2.3996280670166015, "memory(GiB)": 70.96, "step": 12700, "token_acc": 0.5033783783783784, "train_speed(iter/s)": 0.670968 }, { "epoch": 0.54432115162161, "grad_norm": 3.218975067138672, "learning_rate": 9.710687963387999e-05, "loss": 2.3888084411621096, "memory(GiB)": 70.96, "step": 12705, "token_acc": 0.4811320754716981, "train_speed(iter/s)": 0.670916 }, { "epoch": 0.544535366950859, "grad_norm": 3.421766757965088, "learning_rate": 9.710462320943021e-05, "loss": 2.4934343338012694, "memory(GiB)": 70.96, "step": 12710, "token_acc": 0.4714828897338403, "train_speed(iter/s)": 0.670889 }, { "epoch": 0.544749582280108, "grad_norm": 3.9249792098999023, "learning_rate": 9.710236593163599e-05, "loss": 2.414321517944336, "memory(GiB)": 70.96, "step": 12715, "token_acc": 0.4608433734939759, "train_speed(iter/s)": 0.670903 }, { "epoch": 0.5449637976093569, "grad_norm": 3.2157537937164307, "learning_rate": 9.710010780053826e-05, "loss": 2.3841312408447264, "memory(GiB)": 70.96, "step": 12720, "token_acc": 0.5017421602787456, "train_speed(iter/s)": 0.670938 }, { "epoch": 0.5451780129386059, "grad_norm": 10.708207130432129, "learning_rate": 9.709784881617786e-05, "loss": 2.3472585678100586, "memory(GiB)": 70.96, "step": 12725, "token_acc": 0.5254901960784314, "train_speed(iter/s)": 0.67098 }, { "epoch": 0.5453922282678548, "grad_norm": 3.6865103244781494, "learning_rate": 9.709558897859576e-05, "loss": 2.605635070800781, "memory(GiB)": 70.96, "step": 12730, "token_acc": 0.43843843843843844, "train_speed(iter/s)": 0.670985 }, { "epoch": 0.5456064435971039, "grad_norm": 3.7043440341949463, "learning_rate": 9.709332828783289e-05, "loss": 2.464032745361328, "memory(GiB)": 70.96, "step": 12735, "token_acc": 0.5121107266435986, "train_speed(iter/s)": 0.67094 }, { "epoch": 0.5458206589263528, "grad_norm": 3.2070651054382324, "learning_rate": 9.709106674393018e-05, "loss": 2.5200658798217774, "memory(GiB)": 70.96, "step": 12740, "token_acc": 0.46607669616519176, "train_speed(iter/s)": 0.6709 }, { "epoch": 0.5460348742556017, "grad_norm": 5.224951267242432, "learning_rate": 9.708880434692864e-05, "loss": 2.5884613037109374, "memory(GiB)": 70.96, "step": 12745, "token_acc": 0.48863636363636365, "train_speed(iter/s)": 0.67097 }, { "epoch": 0.5462490895848507, "grad_norm": 3.581069231033325, "learning_rate": 9.708654109686923e-05, "loss": 2.3108392715454102, "memory(GiB)": 70.96, "step": 12750, "token_acc": 0.5050505050505051, "train_speed(iter/s)": 0.670958 }, { "epoch": 0.5464633049140997, "grad_norm": 3.6627185344696045, "learning_rate": 9.708427699379297e-05, "loss": 2.408483123779297, "memory(GiB)": 70.96, "step": 12755, "token_acc": 0.5019157088122606, "train_speed(iter/s)": 0.670954 }, { "epoch": 0.5466775202433486, "grad_norm": 19.66691780090332, "learning_rate": 9.708201203774086e-05, "loss": 2.3690008163452148, "memory(GiB)": 70.96, "step": 12760, "token_acc": 0.5314685314685315, "train_speed(iter/s)": 0.670934 }, { "epoch": 0.5468917355725976, "grad_norm": 5.151769638061523, "learning_rate": 9.70797462287539e-05, "loss": 2.5613136291503906, "memory(GiB)": 70.96, "step": 12765, "token_acc": 0.45263157894736844, "train_speed(iter/s)": 0.670968 }, { "epoch": 0.5471059509018465, "grad_norm": 3.1889171600341797, "learning_rate": 9.707747956687322e-05, "loss": 2.4280181884765626, "memory(GiB)": 70.96, "step": 12770, "token_acc": 0.47745358090185674, "train_speed(iter/s)": 0.670934 }, { "epoch": 0.5473201662310955, "grad_norm": 3.012986898422241, "learning_rate": 9.70752120521398e-05, "loss": 2.7071086883544924, "memory(GiB)": 70.96, "step": 12775, "token_acc": 0.4416403785488959, "train_speed(iter/s)": 0.670908 }, { "epoch": 0.5475343815603445, "grad_norm": 3.331071615219116, "learning_rate": 9.707294368459479e-05, "loss": 2.6185302734375, "memory(GiB)": 70.96, "step": 12780, "token_acc": 0.4495114006514658, "train_speed(iter/s)": 0.670897 }, { "epoch": 0.5477485968895934, "grad_norm": 4.294422626495361, "learning_rate": 9.707067446427922e-05, "loss": 2.330148696899414, "memory(GiB)": 70.96, "step": 12785, "token_acc": 0.5370370370370371, "train_speed(iter/s)": 0.670964 }, { "epoch": 0.5479628122188424, "grad_norm": 4.463483810424805, "learning_rate": 9.706840439123423e-05, "loss": 2.481465530395508, "memory(GiB)": 70.96, "step": 12790, "token_acc": 0.4859154929577465, "train_speed(iter/s)": 0.670882 }, { "epoch": 0.5481770275480914, "grad_norm": 3.6532979011535645, "learning_rate": 9.706613346550095e-05, "loss": 2.4882898330688477, "memory(GiB)": 70.96, "step": 12795, "token_acc": 0.5047619047619047, "train_speed(iter/s)": 0.67093 }, { "epoch": 0.5483912428773403, "grad_norm": 2.7584171295166016, "learning_rate": 9.70638616871205e-05, "loss": 2.343748092651367, "memory(GiB)": 70.96, "step": 12800, "token_acc": 0.5178571428571429, "train_speed(iter/s)": 0.670963 }, { "epoch": 0.5486054582065892, "grad_norm": 3.1365137100219727, "learning_rate": 9.706158905613405e-05, "loss": 2.825952339172363, "memory(GiB)": 70.96, "step": 12805, "token_acc": 0.45930232558139533, "train_speed(iter/s)": 0.670995 }, { "epoch": 0.5488196735358383, "grad_norm": 3.318006753921509, "learning_rate": 9.705931557258277e-05, "loss": 2.4085596084594725, "memory(GiB)": 70.96, "step": 12810, "token_acc": 0.5201342281879194, "train_speed(iter/s)": 0.67095 }, { "epoch": 0.5490338888650872, "grad_norm": 3.6406424045562744, "learning_rate": 9.705704123650785e-05, "loss": 2.8114700317382812, "memory(GiB)": 70.96, "step": 12815, "token_acc": 0.4387755102040816, "train_speed(iter/s)": 0.670993 }, { "epoch": 0.5492481041943361, "grad_norm": 3.1571011543273926, "learning_rate": 9.705476604795048e-05, "loss": 2.5785125732421874, "memory(GiB)": 70.96, "step": 12820, "token_acc": 0.4694533762057878, "train_speed(iter/s)": 0.671 }, { "epoch": 0.5494623195235852, "grad_norm": 4.162564277648926, "learning_rate": 9.705249000695186e-05, "loss": 2.4689817428588867, "memory(GiB)": 70.96, "step": 12825, "token_acc": 0.4807692307692308, "train_speed(iter/s)": 0.671025 }, { "epoch": 0.5496765348528341, "grad_norm": 3.503831148147583, "learning_rate": 9.705021311355327e-05, "loss": 2.7262689590454103, "memory(GiB)": 70.96, "step": 12830, "token_acc": 0.47419354838709676, "train_speed(iter/s)": 0.670972 }, { "epoch": 0.549890750182083, "grad_norm": 3.087116241455078, "learning_rate": 9.704793536779593e-05, "loss": 2.4183589935302736, "memory(GiB)": 70.96, "step": 12835, "token_acc": 0.45195729537366547, "train_speed(iter/s)": 0.670938 }, { "epoch": 0.550104965511332, "grad_norm": 3.028451919555664, "learning_rate": 9.70456567697211e-05, "loss": 2.2244056701660155, "memory(GiB)": 70.96, "step": 12840, "token_acc": 0.512987012987013, "train_speed(iter/s)": 0.670962 }, { "epoch": 0.550319180840581, "grad_norm": 6.700645923614502, "learning_rate": 9.704337731937007e-05, "loss": 2.6434148788452148, "memory(GiB)": 70.96, "step": 12845, "token_acc": 0.4519774011299435, "train_speed(iter/s)": 0.670996 }, { "epoch": 0.5505333961698299, "grad_norm": 3.896732807159424, "learning_rate": 9.704109701678413e-05, "loss": 2.5907403945922853, "memory(GiB)": 70.96, "step": 12850, "token_acc": 0.4672897196261682, "train_speed(iter/s)": 0.670996 }, { "epoch": 0.5507476114990789, "grad_norm": 4.313056945800781, "learning_rate": 9.703881586200458e-05, "loss": 2.45629825592041, "memory(GiB)": 70.96, "step": 12855, "token_acc": 0.4953560371517028, "train_speed(iter/s)": 0.67109 }, { "epoch": 0.5509618268283278, "grad_norm": 3.757662534713745, "learning_rate": 9.703653385507276e-05, "loss": 2.4364452362060547, "memory(GiB)": 70.96, "step": 12860, "token_acc": 0.4931972789115646, "train_speed(iter/s)": 0.671117 }, { "epoch": 0.5511760421575768, "grad_norm": 3.1793651580810547, "learning_rate": 9.703425099603001e-05, "loss": 2.2105339050292967, "memory(GiB)": 70.96, "step": 12865, "token_acc": 0.5412186379928315, "train_speed(iter/s)": 0.671119 }, { "epoch": 0.5513902574868258, "grad_norm": 3.2843546867370605, "learning_rate": 9.703196728491768e-05, "loss": 2.436798858642578, "memory(GiB)": 70.96, "step": 12870, "token_acc": 0.49640287769784175, "train_speed(iter/s)": 0.67117 }, { "epoch": 0.5516044728160747, "grad_norm": 2.849519968032837, "learning_rate": 9.702968272177714e-05, "loss": 2.1427099227905275, "memory(GiB)": 70.96, "step": 12875, "token_acc": 0.4777777777777778, "train_speed(iter/s)": 0.671148 }, { "epoch": 0.5518186881453236, "grad_norm": 2.9836037158966064, "learning_rate": 9.702739730664978e-05, "loss": 2.3987247467041017, "memory(GiB)": 70.96, "step": 12880, "token_acc": 0.4875, "train_speed(iter/s)": 0.671076 }, { "epoch": 0.5520329034745727, "grad_norm": 3.596587896347046, "learning_rate": 9.7025111039577e-05, "loss": 2.3644598007202147, "memory(GiB)": 70.96, "step": 12885, "token_acc": 0.46689895470383275, "train_speed(iter/s)": 0.670995 }, { "epoch": 0.5522471188038216, "grad_norm": 4.081578254699707, "learning_rate": 9.702282392060022e-05, "loss": 2.4542938232421876, "memory(GiB)": 70.96, "step": 12890, "token_acc": 0.5051194539249146, "train_speed(iter/s)": 0.670994 }, { "epoch": 0.5524613341330705, "grad_norm": Infinity, "learning_rate": 9.702099361207577e-05, "loss": 2.2540447235107424, "memory(GiB)": 70.96, "step": 12895, "token_acc": 0.5166051660516605, "train_speed(iter/s)": 0.671023 }, { "epoch": 0.5526755494623196, "grad_norm": 2.9147026538848877, "learning_rate": 9.701870495977621e-05, "loss": 2.5122413635253906, "memory(GiB)": 70.96, "step": 12900, "token_acc": 0.48502994011976047, "train_speed(iter/s)": 0.671021 }, { "epoch": 0.5528897647915685, "grad_norm": 3.504514694213867, "learning_rate": 9.701641545568871e-05, "loss": 2.4861602783203125, "memory(GiB)": 70.96, "step": 12905, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.671069 }, { "epoch": 0.5531039801208174, "grad_norm": 3.5131545066833496, "learning_rate": 9.701412509985474e-05, "loss": 2.532360076904297, "memory(GiB)": 70.96, "step": 12910, "token_acc": 0.4758842443729904, "train_speed(iter/s)": 0.671103 }, { "epoch": 0.5533181954500664, "grad_norm": 3.0047388076782227, "learning_rate": 9.701183389231578e-05, "loss": 2.8158437728881838, "memory(GiB)": 70.96, "step": 12915, "token_acc": 0.4378531073446328, "train_speed(iter/s)": 0.671135 }, { "epoch": 0.5535324107793154, "grad_norm": 3.8658251762390137, "learning_rate": 9.700954183311338e-05, "loss": 2.5483381271362306, "memory(GiB)": 70.96, "step": 12920, "token_acc": 0.5074626865671642, "train_speed(iter/s)": 0.671169 }, { "epoch": 0.5537466261085643, "grad_norm": 3.4577407836914062, "learning_rate": 9.700724892228899e-05, "loss": 2.469626617431641, "memory(GiB)": 70.96, "step": 12925, "token_acc": 0.4963768115942029, "train_speed(iter/s)": 0.671208 }, { "epoch": 0.5539608414378133, "grad_norm": 4.737183570861816, "learning_rate": 9.700495515988422e-05, "loss": 2.3770330429077147, "memory(GiB)": 70.96, "step": 12930, "token_acc": 0.4840989399293286, "train_speed(iter/s)": 0.671205 }, { "epoch": 0.5541750567670622, "grad_norm": 5.079617023468018, "learning_rate": 9.700266054594058e-05, "loss": 2.6897985458374025, "memory(GiB)": 70.96, "step": 12935, "token_acc": 0.4782608695652174, "train_speed(iter/s)": 0.671187 }, { "epoch": 0.5543892720963112, "grad_norm": 3.5794501304626465, "learning_rate": 9.700036508049966e-05, "loss": 2.3038496017456054, "memory(GiB)": 70.96, "step": 12940, "token_acc": 0.49074074074074076, "train_speed(iter/s)": 0.67122 }, { "epoch": 0.5546034874255602, "grad_norm": 4.051178455352783, "learning_rate": 9.699806876360303e-05, "loss": 2.8253082275390624, "memory(GiB)": 70.96, "step": 12945, "token_acc": 0.4375, "train_speed(iter/s)": 0.671291 }, { "epoch": 0.5548177027548091, "grad_norm": 4.184239864349365, "learning_rate": 9.69957715952923e-05, "loss": 2.6320623397827148, "memory(GiB)": 70.96, "step": 12950, "token_acc": 0.48502994011976047, "train_speed(iter/s)": 0.671315 }, { "epoch": 0.555031918084058, "grad_norm": 3.693824052810669, "learning_rate": 9.699347357560906e-05, "loss": 2.778511619567871, "memory(GiB)": 70.96, "step": 12955, "token_acc": 0.42805755395683454, "train_speed(iter/s)": 0.671308 }, { "epoch": 0.5552461334133071, "grad_norm": 3.1837563514709473, "learning_rate": 9.6991174704595e-05, "loss": 2.487303352355957, "memory(GiB)": 70.96, "step": 12960, "token_acc": 0.5170940170940171, "train_speed(iter/s)": 0.671302 }, { "epoch": 0.555460348742556, "grad_norm": 3.616550922393799, "learning_rate": 9.69888749822917e-05, "loss": 2.760121536254883, "memory(GiB)": 70.96, "step": 12965, "token_acc": 0.41304347826086957, "train_speed(iter/s)": 0.671286 }, { "epoch": 0.5556745640718049, "grad_norm": 4.297040939331055, "learning_rate": 9.698657440874086e-05, "loss": 2.307792091369629, "memory(GiB)": 70.96, "step": 12970, "token_acc": 0.47346938775510206, "train_speed(iter/s)": 0.671329 }, { "epoch": 0.555888779401054, "grad_norm": 2.5621819496154785, "learning_rate": 9.698427298398415e-05, "loss": 2.495350646972656, "memory(GiB)": 70.96, "step": 12975, "token_acc": 0.4574468085106383, "train_speed(iter/s)": 0.671281 }, { "epoch": 0.5561029947303029, "grad_norm": 3.3177835941314697, "learning_rate": 9.698197070806326e-05, "loss": 2.489113616943359, "memory(GiB)": 70.96, "step": 12980, "token_acc": 0.45627376425855515, "train_speed(iter/s)": 0.671269 }, { "epoch": 0.5563172100595518, "grad_norm": 3.8160462379455566, "learning_rate": 9.697966758101989e-05, "loss": 2.3984893798828124, "memory(GiB)": 70.96, "step": 12985, "token_acc": 0.49458483754512633, "train_speed(iter/s)": 0.671273 }, { "epoch": 0.5565314253888008, "grad_norm": 3.4352052211761475, "learning_rate": 9.697736360289577e-05, "loss": 2.8580650329589843, "memory(GiB)": 70.96, "step": 12990, "token_acc": 0.44, "train_speed(iter/s)": 0.671308 }, { "epoch": 0.5567456407180498, "grad_norm": 3.768639087677002, "learning_rate": 9.697505877373265e-05, "loss": 2.517253303527832, "memory(GiB)": 70.96, "step": 12995, "token_acc": 0.44727272727272727, "train_speed(iter/s)": 0.671262 }, { "epoch": 0.5569598560472987, "grad_norm": 3.163280487060547, "learning_rate": 9.697275309357228e-05, "loss": 2.32583065032959, "memory(GiB)": 70.96, "step": 13000, "token_acc": 0.5164835164835165, "train_speed(iter/s)": 0.67126 }, { "epoch": 0.5569598560472987, "eval_loss": 2.191995620727539, "eval_runtime": 17.008, "eval_samples_per_second": 5.88, "eval_steps_per_second": 5.88, "eval_token_acc": 0.4755905511811024, "step": 13000 }, { "epoch": 0.5571740713765477, "grad_norm": 3.0864644050598145, "learning_rate": 9.697044656245642e-05, "loss": 2.3197484970092774, "memory(GiB)": 70.96, "step": 13005, "token_acc": 0.48148148148148145, "train_speed(iter/s)": 0.670586 }, { "epoch": 0.5573882867057967, "grad_norm": 3.890629768371582, "learning_rate": 9.696813918042684e-05, "loss": 2.5603610992431642, "memory(GiB)": 70.96, "step": 13010, "token_acc": 0.4921875, "train_speed(iter/s)": 0.670629 }, { "epoch": 0.5576025020350456, "grad_norm": 2.990838050842285, "learning_rate": 9.696583094752538e-05, "loss": 2.1887706756591796, "memory(GiB)": 70.96, "step": 13015, "token_acc": 0.52, "train_speed(iter/s)": 0.670656 }, { "epoch": 0.5578167173642946, "grad_norm": 3.2997426986694336, "learning_rate": 9.696352186379382e-05, "loss": 2.3144805908203123, "memory(GiB)": 70.96, "step": 13020, "token_acc": 0.49158249158249157, "train_speed(iter/s)": 0.670699 }, { "epoch": 0.5580309326935435, "grad_norm": 3.0322911739349365, "learning_rate": 9.696121192927402e-05, "loss": 2.4262212753295898, "memory(GiB)": 70.96, "step": 13025, "token_acc": 0.46060606060606063, "train_speed(iter/s)": 0.670725 }, { "epoch": 0.5582451480227925, "grad_norm": 3.596090316772461, "learning_rate": 9.69589011440078e-05, "loss": 2.424026107788086, "memory(GiB)": 70.96, "step": 13030, "token_acc": 0.4963768115942029, "train_speed(iter/s)": 0.670726 }, { "epoch": 0.5584593633520415, "grad_norm": 4.881282329559326, "learning_rate": 9.695658950803706e-05, "loss": 2.4459733963012695, "memory(GiB)": 70.96, "step": 13035, "token_acc": 0.4842105263157895, "train_speed(iter/s)": 0.670749 }, { "epoch": 0.5586735786812904, "grad_norm": 3.0204155445098877, "learning_rate": 9.695427702140363e-05, "loss": 2.515864944458008, "memory(GiB)": 70.96, "step": 13040, "token_acc": 0.45787545787545786, "train_speed(iter/s)": 0.670804 }, { "epoch": 0.5588877940105393, "grad_norm": 3.4418656826019287, "learning_rate": 9.695196368414942e-05, "loss": 2.406956100463867, "memory(GiB)": 70.96, "step": 13045, "token_acc": 0.462406015037594, "train_speed(iter/s)": 0.67085 }, { "epoch": 0.5591020093397884, "grad_norm": 4.476402759552002, "learning_rate": 9.694964949631636e-05, "loss": 2.284962272644043, "memory(GiB)": 70.96, "step": 13050, "token_acc": 0.5, "train_speed(iter/s)": 0.670877 }, { "epoch": 0.5593162246690373, "grad_norm": 2.8172662258148193, "learning_rate": 9.694733445794634e-05, "loss": 2.654646301269531, "memory(GiB)": 70.96, "step": 13055, "token_acc": 0.4696969696969697, "train_speed(iter/s)": 0.670897 }, { "epoch": 0.5595304399982862, "grad_norm": 3.476093053817749, "learning_rate": 9.694501856908133e-05, "loss": 2.6521879196166993, "memory(GiB)": 70.96, "step": 13060, "token_acc": 0.44932432432432434, "train_speed(iter/s)": 0.670945 }, { "epoch": 0.5597446553275353, "grad_norm": 3.1778690814971924, "learning_rate": 9.694270182976329e-05, "loss": 2.69094181060791, "memory(GiB)": 70.96, "step": 13065, "token_acc": 0.41454545454545455, "train_speed(iter/s)": 0.670966 }, { "epoch": 0.5599588706567842, "grad_norm": 3.190843343734741, "learning_rate": 9.694038424003414e-05, "loss": 2.597814178466797, "memory(GiB)": 70.96, "step": 13070, "token_acc": 0.45565749235474007, "train_speed(iter/s)": 0.670993 }, { "epoch": 0.5601730859860332, "grad_norm": 3.1475296020507812, "learning_rate": 9.693806579993589e-05, "loss": 2.242331695556641, "memory(GiB)": 70.96, "step": 13075, "token_acc": 0.5168539325842697, "train_speed(iter/s)": 0.671005 }, { "epoch": 0.5603873013152821, "grad_norm": 4.288150787353516, "learning_rate": 9.693574650951056e-05, "loss": 2.6213340759277344, "memory(GiB)": 70.96, "step": 13080, "token_acc": 0.4807692307692308, "train_speed(iter/s)": 0.671015 }, { "epoch": 0.5606015166445311, "grad_norm": 3.268763303756714, "learning_rate": 9.693342636880016e-05, "loss": 2.576343536376953, "memory(GiB)": 70.96, "step": 13085, "token_acc": 0.49836065573770494, "train_speed(iter/s)": 0.671013 }, { "epoch": 0.5608157319737801, "grad_norm": 3.2837836742401123, "learning_rate": 9.69311053778467e-05, "loss": 2.422634315490723, "memory(GiB)": 70.96, "step": 13090, "token_acc": 0.48344370860927155, "train_speed(iter/s)": 0.671017 }, { "epoch": 0.561029947303029, "grad_norm": 3.766542434692383, "learning_rate": 9.692878353669225e-05, "loss": 2.594505500793457, "memory(GiB)": 70.96, "step": 13095, "token_acc": 0.44891640866873067, "train_speed(iter/s)": 0.671032 }, { "epoch": 0.5612441626322779, "grad_norm": 5.508364677429199, "learning_rate": 9.692646084537887e-05, "loss": 2.6225685119628905, "memory(GiB)": 70.96, "step": 13100, "token_acc": 0.4623287671232877, "train_speed(iter/s)": 0.671052 }, { "epoch": 0.561458377961527, "grad_norm": 3.66971755027771, "learning_rate": 9.692413730394862e-05, "loss": 2.6848297119140625, "memory(GiB)": 70.96, "step": 13105, "token_acc": 0.4707792207792208, "train_speed(iter/s)": 0.671116 }, { "epoch": 0.5616725932907759, "grad_norm": 4.967896938323975, "learning_rate": 9.692181291244359e-05, "loss": 2.4213748931884767, "memory(GiB)": 70.96, "step": 13110, "token_acc": 0.47157190635451507, "train_speed(iter/s)": 0.671106 }, { "epoch": 0.5618868086200248, "grad_norm": 3.5490846633911133, "learning_rate": 9.691948767090593e-05, "loss": 2.4134260177612306, "memory(GiB)": 70.96, "step": 13115, "token_acc": 0.47101449275362317, "train_speed(iter/s)": 0.671176 }, { "epoch": 0.5621010239492739, "grad_norm": 3.4405152797698975, "learning_rate": 9.691716157937772e-05, "loss": 2.338612174987793, "memory(GiB)": 70.96, "step": 13120, "token_acc": 0.4863013698630137, "train_speed(iter/s)": 0.671169 }, { "epoch": 0.5623152392785228, "grad_norm": 4.085056781768799, "learning_rate": 9.691483463790112e-05, "loss": 2.46023006439209, "memory(GiB)": 70.96, "step": 13125, "token_acc": 0.48220064724919093, "train_speed(iter/s)": 0.67116 }, { "epoch": 0.5625294546077717, "grad_norm": 4.490726470947266, "learning_rate": 9.69125068465183e-05, "loss": 2.7236351013183593, "memory(GiB)": 70.96, "step": 13130, "token_acc": 0.45161290322580644, "train_speed(iter/s)": 0.671184 }, { "epoch": 0.5627436699370207, "grad_norm": 4.149823188781738, "learning_rate": 9.691017820527139e-05, "loss": 2.446076774597168, "memory(GiB)": 70.96, "step": 13135, "token_acc": 0.48372093023255813, "train_speed(iter/s)": 0.671196 }, { "epoch": 0.5629578852662697, "grad_norm": 4.027271747589111, "learning_rate": 9.69078487142026e-05, "loss": 2.519632911682129, "memory(GiB)": 70.96, "step": 13140, "token_acc": 0.4349442379182156, "train_speed(iter/s)": 0.671176 }, { "epoch": 0.5631721005955186, "grad_norm": 3.8462235927581787, "learning_rate": 9.690551837335414e-05, "loss": 2.4285568237304687, "memory(GiB)": 70.96, "step": 13145, "token_acc": 0.49242424242424243, "train_speed(iter/s)": 0.671215 }, { "epoch": 0.5633863159247676, "grad_norm": 4.2352399826049805, "learning_rate": 9.690318718276819e-05, "loss": 2.244552993774414, "memory(GiB)": 70.96, "step": 13150, "token_acc": 0.5039370078740157, "train_speed(iter/s)": 0.671219 }, { "epoch": 0.5636005312540165, "grad_norm": 2.726295232772827, "learning_rate": 9.690085514248701e-05, "loss": 2.1911857604980467, "memory(GiB)": 70.96, "step": 13155, "token_acc": 0.5222222222222223, "train_speed(iter/s)": 0.671225 }, { "epoch": 0.5638147465832655, "grad_norm": 2.802579402923584, "learning_rate": 9.689852225255284e-05, "loss": 2.2720703125, "memory(GiB)": 70.96, "step": 13160, "token_acc": 0.501628664495114, "train_speed(iter/s)": 0.67125 }, { "epoch": 0.5640289619125145, "grad_norm": 4.165881156921387, "learning_rate": 9.689618851300797e-05, "loss": 2.0761905670166017, "memory(GiB)": 70.96, "step": 13165, "token_acc": 0.5211726384364821, "train_speed(iter/s)": 0.671271 }, { "epoch": 0.5642431772417634, "grad_norm": 3.5631983280181885, "learning_rate": 9.689385392389463e-05, "loss": 2.437246322631836, "memory(GiB)": 70.96, "step": 13170, "token_acc": 0.4820359281437126, "train_speed(iter/s)": 0.671281 }, { "epoch": 0.5644573925710124, "grad_norm": 3.454388380050659, "learning_rate": 9.689151848525515e-05, "loss": 2.530527114868164, "memory(GiB)": 70.96, "step": 13175, "token_acc": 0.49538461538461537, "train_speed(iter/s)": 0.671283 }, { "epoch": 0.5646716079002614, "grad_norm": 3.8704276084899902, "learning_rate": 9.688918219713181e-05, "loss": 2.5351226806640623, "memory(GiB)": 70.96, "step": 13180, "token_acc": 0.4927536231884058, "train_speed(iter/s)": 0.671327 }, { "epoch": 0.5648858232295103, "grad_norm": 4.1539764404296875, "learning_rate": 9.688684505956697e-05, "loss": 2.307172393798828, "memory(GiB)": 70.96, "step": 13185, "token_acc": 0.5079365079365079, "train_speed(iter/s)": 0.671362 }, { "epoch": 0.5651000385587592, "grad_norm": 3.3093652725219727, "learning_rate": 9.688450707260293e-05, "loss": 2.373029899597168, "memory(GiB)": 70.96, "step": 13190, "token_acc": 0.4979253112033195, "train_speed(iter/s)": 0.671366 }, { "epoch": 0.5653142538880083, "grad_norm": 3.8667373657226562, "learning_rate": 9.688216823628207e-05, "loss": 2.138393783569336, "memory(GiB)": 70.96, "step": 13195, "token_acc": 0.4935064935064935, "train_speed(iter/s)": 0.67139 }, { "epoch": 0.5655284692172572, "grad_norm": 3.689976930618286, "learning_rate": 9.687982855064676e-05, "loss": 2.4945724487304686, "memory(GiB)": 70.96, "step": 13200, "token_acc": 0.47633136094674555, "train_speed(iter/s)": 0.671395 }, { "epoch": 0.5657426845465061, "grad_norm": 4.3154520988464355, "learning_rate": 9.687748801573937e-05, "loss": 2.4408851623535157, "memory(GiB)": 70.96, "step": 13205, "token_acc": 0.5018050541516246, "train_speed(iter/s)": 0.671452 }, { "epoch": 0.5659568998757551, "grad_norm": 3.8110268115997314, "learning_rate": 9.687514663160231e-05, "loss": 2.241567611694336, "memory(GiB)": 70.96, "step": 13210, "token_acc": 0.5542635658914729, "train_speed(iter/s)": 0.671477 }, { "epoch": 0.5661711152050041, "grad_norm": 3.5934786796569824, "learning_rate": 9.6872804398278e-05, "loss": 2.7520050048828124, "memory(GiB)": 70.96, "step": 13215, "token_acc": 0.4098939929328622, "train_speed(iter/s)": 0.671467 }, { "epoch": 0.566385330534253, "grad_norm": 5.366454124450684, "learning_rate": 9.687046131580886e-05, "loss": 2.1906455993652343, "memory(GiB)": 70.96, "step": 13220, "token_acc": 0.5213675213675214, "train_speed(iter/s)": 0.671523 }, { "epoch": 0.566599545863502, "grad_norm": 4.746337890625, "learning_rate": 9.686811738423736e-05, "loss": 2.455821990966797, "memory(GiB)": 70.96, "step": 13225, "token_acc": 0.4880546075085324, "train_speed(iter/s)": 0.671524 }, { "epoch": 0.566813761192751, "grad_norm": 3.3974034786224365, "learning_rate": 9.686577260360592e-05, "loss": 2.3129831314086915, "memory(GiB)": 70.96, "step": 13230, "token_acc": 0.548951048951049, "train_speed(iter/s)": 0.671585 }, { "epoch": 0.5670279765219999, "grad_norm": 2.5857913494110107, "learning_rate": 9.686342697395707e-05, "loss": 2.3512943267822264, "memory(GiB)": 70.96, "step": 13235, "token_acc": 0.5214285714285715, "train_speed(iter/s)": 0.671566 }, { "epoch": 0.5672421918512489, "grad_norm": 2.7485973834991455, "learning_rate": 9.686108049533328e-05, "loss": 2.2629356384277344, "memory(GiB)": 70.96, "step": 13240, "token_acc": 0.5111111111111111, "train_speed(iter/s)": 0.671499 }, { "epoch": 0.5674564071804978, "grad_norm": 3.329097032546997, "learning_rate": 9.685873316777705e-05, "loss": 2.1757436752319337, "memory(GiB)": 70.96, "step": 13245, "token_acc": 0.5197368421052632, "train_speed(iter/s)": 0.671445 }, { "epoch": 0.5676706225097468, "grad_norm": 3.204904556274414, "learning_rate": 9.685638499133091e-05, "loss": 2.478514289855957, "memory(GiB)": 70.96, "step": 13250, "token_acc": 0.46621621621621623, "train_speed(iter/s)": 0.671458 }, { "epoch": 0.5678848378389958, "grad_norm": 4.25732421875, "learning_rate": 9.68540359660374e-05, "loss": 2.600332260131836, "memory(GiB)": 70.96, "step": 13255, "token_acc": 0.4879725085910653, "train_speed(iter/s)": 0.671495 }, { "epoch": 0.5680990531682447, "grad_norm": 5.033324241638184, "learning_rate": 9.685168609193909e-05, "loss": 2.502394104003906, "memory(GiB)": 70.96, "step": 13260, "token_acc": 0.5257352941176471, "train_speed(iter/s)": 0.6715 }, { "epoch": 0.5683132684974936, "grad_norm": 2.5284016132354736, "learning_rate": 9.684933536907852e-05, "loss": 2.139469528198242, "memory(GiB)": 70.96, "step": 13265, "token_acc": 0.5255972696245734, "train_speed(iter/s)": 0.671443 }, { "epoch": 0.5685274838267427, "grad_norm": 4.068824291229248, "learning_rate": 9.68469837974983e-05, "loss": 2.720599555969238, "memory(GiB)": 70.96, "step": 13270, "token_acc": 0.4306569343065693, "train_speed(iter/s)": 0.671498 }, { "epoch": 0.5687416991559916, "grad_norm": 3.84326171875, "learning_rate": 9.684463137724101e-05, "loss": 2.63958740234375, "memory(GiB)": 70.96, "step": 13275, "token_acc": 0.4842105263157895, "train_speed(iter/s)": 0.671513 }, { "epoch": 0.5689559144852405, "grad_norm": 2.9387311935424805, "learning_rate": 9.684227810834929e-05, "loss": 2.574586296081543, "memory(GiB)": 70.96, "step": 13280, "token_acc": 0.44673539518900346, "train_speed(iter/s)": 0.67153 }, { "epoch": 0.5691701298144896, "grad_norm": 3.6215882301330566, "learning_rate": 9.683992399086577e-05, "loss": 2.4241214752197267, "memory(GiB)": 70.96, "step": 13285, "token_acc": 0.4885245901639344, "train_speed(iter/s)": 0.671561 }, { "epoch": 0.5693843451437385, "grad_norm": 3.1258976459503174, "learning_rate": 9.683756902483308e-05, "loss": 2.2803394317626955, "memory(GiB)": 70.96, "step": 13290, "token_acc": 0.5405405405405406, "train_speed(iter/s)": 0.671577 }, { "epoch": 0.5695985604729874, "grad_norm": 3.4912729263305664, "learning_rate": 9.683521321029388e-05, "loss": 2.2406803131103517, "memory(GiB)": 70.96, "step": 13295, "token_acc": 0.5473251028806584, "train_speed(iter/s)": 0.671643 }, { "epoch": 0.5698127758022364, "grad_norm": 2.978515386581421, "learning_rate": 9.683285654729086e-05, "loss": 2.1770387649536134, "memory(GiB)": 70.96, "step": 13300, "token_acc": 0.5259938837920489, "train_speed(iter/s)": 0.671588 }, { "epoch": 0.5700269911314854, "grad_norm": 2.742246627807617, "learning_rate": 9.683049903586671e-05, "loss": 2.5594406127929688, "memory(GiB)": 70.96, "step": 13305, "token_acc": 0.4826254826254826, "train_speed(iter/s)": 0.671623 }, { "epoch": 0.5702412064607343, "grad_norm": 3.725783348083496, "learning_rate": 9.682814067606414e-05, "loss": 2.331492805480957, "memory(GiB)": 70.96, "step": 13310, "token_acc": 0.47950819672131145, "train_speed(iter/s)": 0.671661 }, { "epoch": 0.5704554217899833, "grad_norm": 3.6204493045806885, "learning_rate": 9.682578146792589e-05, "loss": 2.3182645797729493, "memory(GiB)": 70.96, "step": 13315, "token_acc": 0.5, "train_speed(iter/s)": 0.671691 }, { "epoch": 0.5706696371192322, "grad_norm": 3.204521894454956, "learning_rate": 9.682342141149467e-05, "loss": 2.4664770126342774, "memory(GiB)": 70.96, "step": 13320, "token_acc": 0.488, "train_speed(iter/s)": 0.671659 }, { "epoch": 0.5708838524484812, "grad_norm": 4.287425518035889, "learning_rate": 9.682106050681324e-05, "loss": 2.5208108901977537, "memory(GiB)": 70.96, "step": 13325, "token_acc": 0.4826254826254826, "train_speed(iter/s)": 0.671651 }, { "epoch": 0.5710980677777302, "grad_norm": 3.620542526245117, "learning_rate": 9.681869875392439e-05, "loss": 2.698353958129883, "memory(GiB)": 70.96, "step": 13330, "token_acc": 0.4272151898734177, "train_speed(iter/s)": 0.671732 }, { "epoch": 0.5713122831069791, "grad_norm": 3.0553202629089355, "learning_rate": 9.681633615287088e-05, "loss": 2.5742847442626955, "memory(GiB)": 70.96, "step": 13335, "token_acc": 0.5, "train_speed(iter/s)": 0.671785 }, { "epoch": 0.571526498436228, "grad_norm": 5.946050643920898, "learning_rate": 9.681397270369553e-05, "loss": 2.6383819580078125, "memory(GiB)": 70.96, "step": 13340, "token_acc": 0.4807017543859649, "train_speed(iter/s)": 0.671778 }, { "epoch": 0.5717407137654771, "grad_norm": 3.2847776412963867, "learning_rate": 9.681160840644115e-05, "loss": 2.4610113143920898, "memory(GiB)": 70.96, "step": 13345, "token_acc": 0.5220338983050847, "train_speed(iter/s)": 0.671747 }, { "epoch": 0.571954929094726, "grad_norm": 4.233858585357666, "learning_rate": 9.680924326115058e-05, "loss": 2.3931093215942383, "memory(GiB)": 70.96, "step": 13350, "token_acc": 0.5036764705882353, "train_speed(iter/s)": 0.671687 }, { "epoch": 0.5721691444239749, "grad_norm": 2.9596168994903564, "learning_rate": 9.680687726786663e-05, "loss": 2.168690872192383, "memory(GiB)": 70.96, "step": 13355, "token_acc": 0.5053003533568905, "train_speed(iter/s)": 0.671737 }, { "epoch": 0.572383359753224, "grad_norm": 2.77896785736084, "learning_rate": 9.680451042663221e-05, "loss": 2.64947452545166, "memory(GiB)": 70.96, "step": 13360, "token_acc": 0.44477611940298506, "train_speed(iter/s)": 0.671691 }, { "epoch": 0.5725975750824729, "grad_norm": 3.7463109493255615, "learning_rate": 9.680214273749019e-05, "loss": 2.508728790283203, "memory(GiB)": 70.96, "step": 13365, "token_acc": 0.46875, "train_speed(iter/s)": 0.671735 }, { "epoch": 0.5728117904117218, "grad_norm": 3.6089279651641846, "learning_rate": 9.679977420048343e-05, "loss": 2.5387847900390623, "memory(GiB)": 70.96, "step": 13370, "token_acc": 0.46956521739130436, "train_speed(iter/s)": 0.671755 }, { "epoch": 0.5730260057409708, "grad_norm": 4.3692240715026855, "learning_rate": 9.679740481565486e-05, "loss": 2.5786882400512696, "memory(GiB)": 70.96, "step": 13375, "token_acc": 0.4564459930313589, "train_speed(iter/s)": 0.671773 }, { "epoch": 0.5732402210702198, "grad_norm": 2.829357624053955, "learning_rate": 9.679503458304741e-05, "loss": 2.579438018798828, "memory(GiB)": 70.96, "step": 13380, "token_acc": 0.4793103448275862, "train_speed(iter/s)": 0.671815 }, { "epoch": 0.5734544363994687, "grad_norm": 4.707968711853027, "learning_rate": 9.679266350270402e-05, "loss": 2.320212173461914, "memory(GiB)": 70.96, "step": 13385, "token_acc": 0.49038461538461536, "train_speed(iter/s)": 0.671798 }, { "epoch": 0.5736686517287177, "grad_norm": 3.2898976802825928, "learning_rate": 9.679029157466761e-05, "loss": 2.409801483154297, "memory(GiB)": 70.96, "step": 13390, "token_acc": 0.4748201438848921, "train_speed(iter/s)": 0.671832 }, { "epoch": 0.5738828670579667, "grad_norm": 3.139374017715454, "learning_rate": 9.678791879898119e-05, "loss": 2.3907825469970705, "memory(GiB)": 70.96, "step": 13395, "token_acc": 0.5098684210526315, "train_speed(iter/s)": 0.671786 }, { "epoch": 0.5740970823872156, "grad_norm": 3.2911152839660645, "learning_rate": 9.678554517568773e-05, "loss": 2.3508682250976562, "memory(GiB)": 70.96, "step": 13400, "token_acc": 0.49242424242424243, "train_speed(iter/s)": 0.671727 }, { "epoch": 0.5743112977164646, "grad_norm": 3.6293413639068604, "learning_rate": 9.678317070483023e-05, "loss": 2.4895580291748045, "memory(GiB)": 70.96, "step": 13405, "token_acc": 0.4664310954063604, "train_speed(iter/s)": 0.67173 }, { "epoch": 0.5745255130457135, "grad_norm": 3.5715322494506836, "learning_rate": 9.678079538645171e-05, "loss": 2.1936277389526366, "memory(GiB)": 70.96, "step": 13410, "token_acc": 0.5046728971962616, "train_speed(iter/s)": 0.671733 }, { "epoch": 0.5747397283749626, "grad_norm": 2.8633642196655273, "learning_rate": 9.677841922059518e-05, "loss": 2.4213687896728517, "memory(GiB)": 70.96, "step": 13415, "token_acc": 0.4970414201183432, "train_speed(iter/s)": 0.671729 }, { "epoch": 0.5749539437042115, "grad_norm": 3.088806629180908, "learning_rate": 9.677604220730373e-05, "loss": 2.5697341918945313, "memory(GiB)": 70.96, "step": 13420, "token_acc": 0.46504559270516715, "train_speed(iter/s)": 0.671735 }, { "epoch": 0.5751681590334604, "grad_norm": 4.104569435119629, "learning_rate": 9.677366434662039e-05, "loss": 2.3160505294799805, "memory(GiB)": 70.96, "step": 13425, "token_acc": 0.48863636363636365, "train_speed(iter/s)": 0.671782 }, { "epoch": 0.5753823743627094, "grad_norm": 3.5325539112091064, "learning_rate": 9.677128563858823e-05, "loss": 2.598938751220703, "memory(GiB)": 70.96, "step": 13430, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.671777 }, { "epoch": 0.5755965896919584, "grad_norm": 4.124508857727051, "learning_rate": 9.676890608325036e-05, "loss": 2.8133663177490233, "memory(GiB)": 70.96, "step": 13435, "token_acc": 0.42424242424242425, "train_speed(iter/s)": 0.671836 }, { "epoch": 0.5758108050212073, "grad_norm": 3.103529691696167, "learning_rate": 9.676652568064989e-05, "loss": 2.406601142883301, "memory(GiB)": 70.96, "step": 13440, "token_acc": 0.5079365079365079, "train_speed(iter/s)": 0.671836 }, { "epoch": 0.5760250203504563, "grad_norm": 3.1293468475341797, "learning_rate": 9.676414443082992e-05, "loss": 2.5070932388305662, "memory(GiB)": 70.96, "step": 13445, "token_acc": 0.46846846846846846, "train_speed(iter/s)": 0.671853 }, { "epoch": 0.5762392356797053, "grad_norm": 3.6459808349609375, "learning_rate": 9.676176233383362e-05, "loss": 2.742408561706543, "memory(GiB)": 70.96, "step": 13450, "token_acc": 0.46394984326018807, "train_speed(iter/s)": 0.671879 }, { "epoch": 0.5764534510089542, "grad_norm": 3.2342681884765625, "learning_rate": 9.675937938970414e-05, "loss": 2.3668704986572267, "memory(GiB)": 70.96, "step": 13455, "token_acc": 0.48773006134969327, "train_speed(iter/s)": 0.671943 }, { "epoch": 0.5766676663382032, "grad_norm": 3.1033236980438232, "learning_rate": 9.675699559848463e-05, "loss": 2.2838165283203127, "memory(GiB)": 70.96, "step": 13460, "token_acc": 0.5053003533568905, "train_speed(iter/s)": 0.671907 }, { "epoch": 0.5768818816674521, "grad_norm": 3.8382866382598877, "learning_rate": 9.675461096021828e-05, "loss": 2.4727672576904296, "memory(GiB)": 70.96, "step": 13465, "token_acc": 0.4827586206896552, "train_speed(iter/s)": 0.671899 }, { "epoch": 0.5770960969967011, "grad_norm": 3.4773409366607666, "learning_rate": 9.67522254749483e-05, "loss": 2.4258447647094727, "memory(GiB)": 70.96, "step": 13470, "token_acc": 0.5220883534136547, "train_speed(iter/s)": 0.67192 }, { "epoch": 0.5773103123259501, "grad_norm": 4.5595703125, "learning_rate": 9.674983914271788e-05, "loss": 2.326015090942383, "memory(GiB)": 70.96, "step": 13475, "token_acc": 0.48736462093862815, "train_speed(iter/s)": 0.671926 }, { "epoch": 0.577524527655199, "grad_norm": 3.6071040630340576, "learning_rate": 9.674745196357028e-05, "loss": 2.493094825744629, "memory(GiB)": 70.96, "step": 13480, "token_acc": 0.47987616099071206, "train_speed(iter/s)": 0.671917 }, { "epoch": 0.5777387429844479, "grad_norm": 3.5528013706207275, "learning_rate": 9.674506393754875e-05, "loss": 2.361952018737793, "memory(GiB)": 70.96, "step": 13485, "token_acc": 0.5081967213114754, "train_speed(iter/s)": 0.671949 }, { "epoch": 0.577952958313697, "grad_norm": 5.001256942749023, "learning_rate": 9.67426750646965e-05, "loss": 2.5745338439941405, "memory(GiB)": 70.96, "step": 13490, "token_acc": 0.4525316455696203, "train_speed(iter/s)": 0.67198 }, { "epoch": 0.5781671736429459, "grad_norm": 3.4810850620269775, "learning_rate": 9.674028534505687e-05, "loss": 2.4267032623291014, "memory(GiB)": 70.96, "step": 13495, "token_acc": 0.5162337662337663, "train_speed(iter/s)": 0.671954 }, { "epoch": 0.5783813889721948, "grad_norm": 4.128421783447266, "learning_rate": 9.673789477867312e-05, "loss": 2.3817010879516602, "memory(GiB)": 70.96, "step": 13500, "token_acc": 0.4927007299270073, "train_speed(iter/s)": 0.671947 }, { "epoch": 0.5783813889721948, "eval_loss": 2.1024131774902344, "eval_runtime": 15.8656, "eval_samples_per_second": 6.303, "eval_steps_per_second": 6.303, "eval_token_acc": 0.468586387434555, "step": 13500 }, { "epoch": 0.5785956043014439, "grad_norm": 3.36653208732605, "learning_rate": 9.673550336558857e-05, "loss": 2.5229360580444338, "memory(GiB)": 70.96, "step": 13505, "token_acc": 0.4735322425409047, "train_speed(iter/s)": 0.67129 }, { "epoch": 0.5788098196306928, "grad_norm": 4.816285610198975, "learning_rate": 9.673311110584653e-05, "loss": 2.4343948364257812, "memory(GiB)": 70.96, "step": 13510, "token_acc": 0.44666666666666666, "train_speed(iter/s)": 0.671205 }, { "epoch": 0.5790240349599417, "grad_norm": 3.5357937812805176, "learning_rate": 9.673071799949032e-05, "loss": 2.656909942626953, "memory(GiB)": 70.96, "step": 13515, "token_acc": 0.41964285714285715, "train_speed(iter/s)": 0.671219 }, { "epoch": 0.5792382502891907, "grad_norm": 3.183867931365967, "learning_rate": 9.672832404656335e-05, "loss": 2.495861053466797, "memory(GiB)": 70.96, "step": 13520, "token_acc": 0.475, "train_speed(iter/s)": 0.671209 }, { "epoch": 0.5794524656184397, "grad_norm": 3.464318037033081, "learning_rate": 9.672592924710894e-05, "loss": 2.4655670166015624, "memory(GiB)": 70.96, "step": 13525, "token_acc": 0.47692307692307695, "train_speed(iter/s)": 0.671196 }, { "epoch": 0.5796666809476886, "grad_norm": 4.192771911621094, "learning_rate": 9.672353360117048e-05, "loss": 2.3901365280151365, "memory(GiB)": 70.96, "step": 13530, "token_acc": 0.5119453924914675, "train_speed(iter/s)": 0.671213 }, { "epoch": 0.5798808962769376, "grad_norm": 3.934281587600708, "learning_rate": 9.67211371087914e-05, "loss": 2.054254150390625, "memory(GiB)": 70.96, "step": 13535, "token_acc": 0.5431034482758621, "train_speed(iter/s)": 0.671253 }, { "epoch": 0.5800951116061865, "grad_norm": 2.4905846118927, "learning_rate": 9.67187397700151e-05, "loss": 2.3366363525390623, "memory(GiB)": 70.96, "step": 13540, "token_acc": 0.49299719887955185, "train_speed(iter/s)": 0.671202 }, { "epoch": 0.5803093269354355, "grad_norm": 3.8755316734313965, "learning_rate": 9.671634158488496e-05, "loss": 2.6274913787841796, "memory(GiB)": 70.96, "step": 13545, "token_acc": 0.4556213017751479, "train_speed(iter/s)": 0.671234 }, { "epoch": 0.5805235422646845, "grad_norm": 3.388216733932495, "learning_rate": 9.671394255344451e-05, "loss": 2.0301143646240236, "memory(GiB)": 70.96, "step": 13550, "token_acc": 0.5492957746478874, "train_speed(iter/s)": 0.671154 }, { "epoch": 0.5807377575939334, "grad_norm": 4.617889881134033, "learning_rate": 9.671154267573714e-05, "loss": 2.2168060302734376, "memory(GiB)": 70.96, "step": 13555, "token_acc": 0.4811715481171548, "train_speed(iter/s)": 0.671104 }, { "epoch": 0.5809519729231823, "grad_norm": 2.9949560165405273, "learning_rate": 9.670914195180639e-05, "loss": 2.4222972869873045, "memory(GiB)": 70.96, "step": 13560, "token_acc": 0.4764705882352941, "train_speed(iter/s)": 0.671083 }, { "epoch": 0.5811661882524314, "grad_norm": 4.655354022979736, "learning_rate": 9.670674038169568e-05, "loss": 2.252206230163574, "memory(GiB)": 70.96, "step": 13565, "token_acc": 0.5, "train_speed(iter/s)": 0.671066 }, { "epoch": 0.5813804035816803, "grad_norm": 2.5706522464752197, "learning_rate": 9.670433796544858e-05, "loss": 2.263293647766113, "memory(GiB)": 70.96, "step": 13570, "token_acc": 0.4936708860759494, "train_speed(iter/s)": 0.67098 }, { "epoch": 0.5815946189109292, "grad_norm": 3.0630180835723877, "learning_rate": 9.670193470310857e-05, "loss": 2.3844860076904295, "memory(GiB)": 70.96, "step": 13575, "token_acc": 0.49240121580547114, "train_speed(iter/s)": 0.670956 }, { "epoch": 0.5818088342401783, "grad_norm": 3.6220762729644775, "learning_rate": 9.66995305947192e-05, "loss": 2.271818733215332, "memory(GiB)": 70.96, "step": 13580, "token_acc": 0.49586776859504134, "train_speed(iter/s)": 0.670985 }, { "epoch": 0.5820230495694272, "grad_norm": 4.290626525878906, "learning_rate": 9.669712564032404e-05, "loss": 2.4585296630859377, "memory(GiB)": 70.96, "step": 13585, "token_acc": 0.4865771812080537, "train_speed(iter/s)": 0.671018 }, { "epoch": 0.5822372648986761, "grad_norm": 5.6848626136779785, "learning_rate": 9.669471983996663e-05, "loss": 2.567498207092285, "memory(GiB)": 70.96, "step": 13590, "token_acc": 0.4567901234567901, "train_speed(iter/s)": 0.671049 }, { "epoch": 0.5824514802279251, "grad_norm": 3.180720090866089, "learning_rate": 9.669231319369059e-05, "loss": 2.280058479309082, "memory(GiB)": 70.96, "step": 13595, "token_acc": 0.5030674846625767, "train_speed(iter/s)": 0.671082 }, { "epoch": 0.5826656955571741, "grad_norm": 3.813692569732666, "learning_rate": 9.668990570153946e-05, "loss": 2.190080261230469, "memory(GiB)": 70.96, "step": 13600, "token_acc": 0.49828178694158076, "train_speed(iter/s)": 0.671071 }, { "epoch": 0.582879910886423, "grad_norm": 3.4887847900390625, "learning_rate": 9.668749736355692e-05, "loss": 2.3196449279785156, "memory(GiB)": 70.96, "step": 13605, "token_acc": 0.48363636363636364, "train_speed(iter/s)": 0.671071 }, { "epoch": 0.583094126215672, "grad_norm": 4.189397811889648, "learning_rate": 9.668508817978657e-05, "loss": 2.6158731460571287, "memory(GiB)": 70.96, "step": 13610, "token_acc": 0.4448051948051948, "train_speed(iter/s)": 0.671038 }, { "epoch": 0.583308341544921, "grad_norm": 2.963919162750244, "learning_rate": 9.668267815027203e-05, "loss": 2.3477264404296876, "memory(GiB)": 70.96, "step": 13615, "token_acc": 0.47703180212014135, "train_speed(iter/s)": 0.671055 }, { "epoch": 0.5835225568741699, "grad_norm": 3.259685754776001, "learning_rate": 9.668026727505699e-05, "loss": 2.5957530975341796, "memory(GiB)": 70.96, "step": 13620, "token_acc": 0.5209790209790209, "train_speed(iter/s)": 0.67108 }, { "epoch": 0.5837367722034189, "grad_norm": 3.9432358741760254, "learning_rate": 9.667785555418514e-05, "loss": 2.73629150390625, "memory(GiB)": 70.96, "step": 13625, "token_acc": 0.48089171974522293, "train_speed(iter/s)": 0.671134 }, { "epoch": 0.5839509875326678, "grad_norm": 2.824023723602295, "learning_rate": 9.667544298770013e-05, "loss": 2.699026870727539, "memory(GiB)": 70.96, "step": 13630, "token_acc": 0.4334470989761092, "train_speed(iter/s)": 0.671089 }, { "epoch": 0.5841652028619168, "grad_norm": 3.320723295211792, "learning_rate": 9.667302957564569e-05, "loss": 2.4522220611572267, "memory(GiB)": 70.96, "step": 13635, "token_acc": 0.50199203187251, "train_speed(iter/s)": 0.671148 }, { "epoch": 0.5843794181911658, "grad_norm": 5.915102005004883, "learning_rate": 9.667061531806552e-05, "loss": 2.144303321838379, "memory(GiB)": 70.96, "step": 13640, "token_acc": 0.5546875, "train_speed(iter/s)": 0.671178 }, { "epoch": 0.5845936335204147, "grad_norm": 3.394317626953125, "learning_rate": 9.666820021500341e-05, "loss": 2.579201889038086, "memory(GiB)": 70.96, "step": 13645, "token_acc": 0.48286604361370716, "train_speed(iter/s)": 0.671173 }, { "epoch": 0.5848078488496636, "grad_norm": 4.347491264343262, "learning_rate": 9.666578426650303e-05, "loss": 2.210639762878418, "memory(GiB)": 70.96, "step": 13650, "token_acc": 0.5167785234899329, "train_speed(iter/s)": 0.67119 }, { "epoch": 0.5850220641789127, "grad_norm": 3.4670002460479736, "learning_rate": 9.666336747260823e-05, "loss": 2.2195178985595705, "memory(GiB)": 70.96, "step": 13655, "token_acc": 0.5163636363636364, "train_speed(iter/s)": 0.671165 }, { "epoch": 0.5852362795081616, "grad_norm": 4.464306354522705, "learning_rate": 9.666094983336273e-05, "loss": 2.1650436401367186, "memory(GiB)": 70.96, "step": 13660, "token_acc": 0.5018050541516246, "train_speed(iter/s)": 0.671154 }, { "epoch": 0.5854504948374105, "grad_norm": 4.002740383148193, "learning_rate": 9.665853134881035e-05, "loss": 2.585171699523926, "memory(GiB)": 70.96, "step": 13665, "token_acc": 0.4746376811594203, "train_speed(iter/s)": 0.671137 }, { "epoch": 0.5856647101666596, "grad_norm": 3.031026840209961, "learning_rate": 9.665611201899493e-05, "loss": 2.2314121246337892, "memory(GiB)": 70.96, "step": 13670, "token_acc": 0.5257731958762887, "train_speed(iter/s)": 0.671168 }, { "epoch": 0.5858789254959085, "grad_norm": 3.382448673248291, "learning_rate": 9.665369184396027e-05, "loss": 2.3663711547851562, "memory(GiB)": 70.96, "step": 13675, "token_acc": 0.5120967741935484, "train_speed(iter/s)": 0.671142 }, { "epoch": 0.5860931408251574, "grad_norm": 3.6227197647094727, "learning_rate": 9.665127082375019e-05, "loss": 2.382856559753418, "memory(GiB)": 70.96, "step": 13680, "token_acc": 0.4896551724137931, "train_speed(iter/s)": 0.671137 }, { "epoch": 0.5863073561544064, "grad_norm": 3.09609317779541, "learning_rate": 9.66488489584086e-05, "loss": 2.4339622497558593, "memory(GiB)": 70.96, "step": 13685, "token_acc": 0.5070921985815603, "train_speed(iter/s)": 0.671129 }, { "epoch": 0.5865215714836554, "grad_norm": 2.9214696884155273, "learning_rate": 9.664642624797935e-05, "loss": 2.358022689819336, "memory(GiB)": 70.96, "step": 13690, "token_acc": 0.5032258064516129, "train_speed(iter/s)": 0.671128 }, { "epoch": 0.5867357868129043, "grad_norm": 3.981327772140503, "learning_rate": 9.664400269250632e-05, "loss": 2.549778938293457, "memory(GiB)": 70.96, "step": 13695, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.671188 }, { "epoch": 0.5869500021421533, "grad_norm": 3.5507493019104004, "learning_rate": 9.664157829203344e-05, "loss": 2.5796075820922852, "memory(GiB)": 70.96, "step": 13700, "token_acc": 0.4542124542124542, "train_speed(iter/s)": 0.671144 }, { "epoch": 0.5871642174714022, "grad_norm": 2.819627285003662, "learning_rate": 9.66391530466046e-05, "loss": 2.547530746459961, "memory(GiB)": 70.96, "step": 13705, "token_acc": 0.4645390070921986, "train_speed(iter/s)": 0.671177 }, { "epoch": 0.5873784328006512, "grad_norm": 3.25469708442688, "learning_rate": 9.663672695626376e-05, "loss": 2.50134334564209, "memory(GiB)": 70.96, "step": 13710, "token_acc": 0.5080906148867314, "train_speed(iter/s)": 0.671182 }, { "epoch": 0.5875926481299002, "grad_norm": 3.0855963230133057, "learning_rate": 9.663430002105487e-05, "loss": 2.3799812316894533, "memory(GiB)": 70.96, "step": 13715, "token_acc": 0.5030487804878049, "train_speed(iter/s)": 0.671155 }, { "epoch": 0.5878068634591491, "grad_norm": 4.58825159072876, "learning_rate": 9.663187224102189e-05, "loss": 2.4117332458496095, "memory(GiB)": 70.96, "step": 13720, "token_acc": 0.5127272727272727, "train_speed(iter/s)": 0.6711 }, { "epoch": 0.588021078788398, "grad_norm": 2.9395809173583984, "learning_rate": 9.662944361620878e-05, "loss": 2.4830097198486327, "memory(GiB)": 70.96, "step": 13725, "token_acc": 0.48253968253968255, "train_speed(iter/s)": 0.671106 }, { "epoch": 0.5882352941176471, "grad_norm": 2.859607696533203, "learning_rate": 9.662701414665956e-05, "loss": 2.2820693969726564, "memory(GiB)": 70.96, "step": 13730, "token_acc": 0.4748427672955975, "train_speed(iter/s)": 0.671061 }, { "epoch": 0.588449509446896, "grad_norm": 6.042450904846191, "learning_rate": 9.662458383241824e-05, "loss": 2.6544731140136717, "memory(GiB)": 70.96, "step": 13735, "token_acc": 0.47560975609756095, "train_speed(iter/s)": 0.671065 }, { "epoch": 0.588663724776145, "grad_norm": 3.291386842727661, "learning_rate": 9.662215267352886e-05, "loss": 2.44286994934082, "memory(GiB)": 70.96, "step": 13740, "token_acc": 0.4591549295774648, "train_speed(iter/s)": 0.67111 }, { "epoch": 0.588877940105394, "grad_norm": 3.077509880065918, "learning_rate": 9.661972067003544e-05, "loss": 2.770157814025879, "memory(GiB)": 70.96, "step": 13745, "token_acc": 0.4267515923566879, "train_speed(iter/s)": 0.671113 }, { "epoch": 0.5890921554346429, "grad_norm": 3.425266742706299, "learning_rate": 9.661728782198205e-05, "loss": 2.6892866134643554, "memory(GiB)": 70.96, "step": 13750, "token_acc": 0.4155844155844156, "train_speed(iter/s)": 0.671089 }, { "epoch": 0.5893063707638919, "grad_norm": 5.2887043952941895, "learning_rate": 9.661485412941275e-05, "loss": 2.2013486862182616, "memory(GiB)": 70.96, "step": 13755, "token_acc": 0.49814126394052044, "train_speed(iter/s)": 0.671106 }, { "epoch": 0.5895205860931408, "grad_norm": 4.2217793464660645, "learning_rate": 9.661241959237166e-05, "loss": 2.4108642578125, "memory(GiB)": 70.96, "step": 13760, "token_acc": 0.4945054945054945, "train_speed(iter/s)": 0.671106 }, { "epoch": 0.5897348014223898, "grad_norm": 3.2668917179107666, "learning_rate": 9.660998421090285e-05, "loss": 2.2287235260009766, "memory(GiB)": 70.96, "step": 13765, "token_acc": 0.5051546391752577, "train_speed(iter/s)": 0.671157 }, { "epoch": 0.5899490167516388, "grad_norm": 3.507328748703003, "learning_rate": 9.660754798505045e-05, "loss": 2.6092891693115234, "memory(GiB)": 70.96, "step": 13770, "token_acc": 0.4575757575757576, "train_speed(iter/s)": 0.671171 }, { "epoch": 0.5901632320808877, "grad_norm": 3.2184996604919434, "learning_rate": 9.660511091485861e-05, "loss": 2.365248107910156, "memory(GiB)": 70.96, "step": 13775, "token_acc": 0.5, "train_speed(iter/s)": 0.671138 }, { "epoch": 0.5903774474101366, "grad_norm": 3.568713903427124, "learning_rate": 9.660267300037146e-05, "loss": 2.350955772399902, "memory(GiB)": 70.96, "step": 13780, "token_acc": 0.48484848484848486, "train_speed(iter/s)": 0.671119 }, { "epoch": 0.5905916627393857, "grad_norm": 4.975555419921875, "learning_rate": 9.660023424163316e-05, "loss": 2.487325668334961, "memory(GiB)": 70.96, "step": 13785, "token_acc": 0.49193548387096775, "train_speed(iter/s)": 0.671135 }, { "epoch": 0.5908058780686346, "grad_norm": 3.4533748626708984, "learning_rate": 9.659779463868792e-05, "loss": 2.3922977447509766, "memory(GiB)": 70.96, "step": 13790, "token_acc": 0.52, "train_speed(iter/s)": 0.671095 }, { "epoch": 0.5910200933978835, "grad_norm": 3.843086004257202, "learning_rate": 9.659535419157991e-05, "loss": 2.4281362533569335, "memory(GiB)": 70.96, "step": 13795, "token_acc": 0.4740484429065744, "train_speed(iter/s)": 0.671111 }, { "epoch": 0.5912343087271326, "grad_norm": 3.1093945503234863, "learning_rate": 9.659291290035335e-05, "loss": 2.2243907928466795, "memory(GiB)": 70.96, "step": 13800, "token_acc": 0.5476190476190477, "train_speed(iter/s)": 0.671087 }, { "epoch": 0.5914485240563815, "grad_norm": 3.1994946002960205, "learning_rate": 9.659047076505245e-05, "loss": 2.3236228942871096, "memory(GiB)": 70.96, "step": 13805, "token_acc": 0.5074183976261127, "train_speed(iter/s)": 0.671072 }, { "epoch": 0.5916627393856304, "grad_norm": 2.9996755123138428, "learning_rate": 9.65880277857215e-05, "loss": 2.3493373870849608, "memory(GiB)": 70.96, "step": 13810, "token_acc": 0.4758364312267658, "train_speed(iter/s)": 0.671043 }, { "epoch": 0.5918769547148794, "grad_norm": 2.808246612548828, "learning_rate": 9.658558396240469e-05, "loss": 2.364798355102539, "memory(GiB)": 70.96, "step": 13815, "token_acc": 0.4735202492211838, "train_speed(iter/s)": 0.671032 }, { "epoch": 0.5920911700441284, "grad_norm": 3.1532535552978516, "learning_rate": 9.658313929514635e-05, "loss": 2.37437744140625, "memory(GiB)": 70.96, "step": 13820, "token_acc": 0.5, "train_speed(iter/s)": 0.671028 }, { "epoch": 0.5923053853733773, "grad_norm": 3.368961811065674, "learning_rate": 9.658069378399073e-05, "loss": 2.497292900085449, "memory(GiB)": 70.96, "step": 13825, "token_acc": 0.45483870967741935, "train_speed(iter/s)": 0.671013 }, { "epoch": 0.5925196007026263, "grad_norm": 3.2421514987945557, "learning_rate": 9.657824742898214e-05, "loss": 2.7619565963745116, "memory(GiB)": 70.96, "step": 13830, "token_acc": 0.43859649122807015, "train_speed(iter/s)": 0.671067 }, { "epoch": 0.5927338160318752, "grad_norm": 3.176614999771118, "learning_rate": 9.657580023016492e-05, "loss": 2.5403724670410157, "memory(GiB)": 70.96, "step": 13835, "token_acc": 0.438871473354232, "train_speed(iter/s)": 0.671064 }, { "epoch": 0.5929480313611242, "grad_norm": 2.751211166381836, "learning_rate": 9.657335218758338e-05, "loss": 2.292893981933594, "memory(GiB)": 70.96, "step": 13840, "token_acc": 0.4965753424657534, "train_speed(iter/s)": 0.671059 }, { "epoch": 0.5931622466903732, "grad_norm": 3.0442817211151123, "learning_rate": 9.657090330128187e-05, "loss": 2.285953903198242, "memory(GiB)": 70.96, "step": 13845, "token_acc": 0.44528301886792454, "train_speed(iter/s)": 0.671117 }, { "epoch": 0.5933764620196221, "grad_norm": 2.8931171894073486, "learning_rate": 9.656845357130477e-05, "loss": 2.437261772155762, "memory(GiB)": 70.96, "step": 13850, "token_acc": 0.4728434504792332, "train_speed(iter/s)": 0.671069 }, { "epoch": 0.593590677348871, "grad_norm": 3.4012742042541504, "learning_rate": 9.656600299769645e-05, "loss": 2.543768119812012, "memory(GiB)": 70.96, "step": 13855, "token_acc": 0.450199203187251, "train_speed(iter/s)": 0.671059 }, { "epoch": 0.5938048926781201, "grad_norm": 4.175589084625244, "learning_rate": 9.656355158050129e-05, "loss": 2.4896013259887697, "memory(GiB)": 70.96, "step": 13860, "token_acc": 0.4900662251655629, "train_speed(iter/s)": 0.671029 }, { "epoch": 0.594019108007369, "grad_norm": 3.0972297191619873, "learning_rate": 9.656109931976372e-05, "loss": 2.3072729110717773, "memory(GiB)": 70.96, "step": 13865, "token_acc": 0.5197132616487455, "train_speed(iter/s)": 0.67102 }, { "epoch": 0.5942333233366179, "grad_norm": 3.8801262378692627, "learning_rate": 9.655864621552814e-05, "loss": 2.2957719802856444, "memory(GiB)": 70.96, "step": 13870, "token_acc": 0.5087108013937283, "train_speed(iter/s)": 0.671068 }, { "epoch": 0.594447538665867, "grad_norm": 6.248198509216309, "learning_rate": 9.655619226783904e-05, "loss": 2.7749008178710937, "memory(GiB)": 70.96, "step": 13875, "token_acc": 0.46387832699619774, "train_speed(iter/s)": 0.671033 }, { "epoch": 0.5946617539951159, "grad_norm": 4.098964691162109, "learning_rate": 9.655373747674083e-05, "loss": 2.3723005294799804, "memory(GiB)": 70.96, "step": 13880, "token_acc": 0.46808510638297873, "train_speed(iter/s)": 0.671085 }, { "epoch": 0.5948759693243648, "grad_norm": 4.420795917510986, "learning_rate": 9.6551281842278e-05, "loss": 2.6686887741088867, "memory(GiB)": 70.96, "step": 13885, "token_acc": 0.4626865671641791, "train_speed(iter/s)": 0.671093 }, { "epoch": 0.5950901846536139, "grad_norm": 4.1121296882629395, "learning_rate": 9.654882536449502e-05, "loss": 2.255261993408203, "memory(GiB)": 70.96, "step": 13890, "token_acc": 0.49049429657794674, "train_speed(iter/s)": 0.671151 }, { "epoch": 0.5953043999828628, "grad_norm": 4.856686592102051, "learning_rate": 9.65463680434364e-05, "loss": 2.4807567596435547, "memory(GiB)": 70.96, "step": 13895, "token_acc": 0.4379310344827586, "train_speed(iter/s)": 0.671127 }, { "epoch": 0.5955186153121117, "grad_norm": 2.6945114135742188, "learning_rate": 9.654390987914668e-05, "loss": 2.4470476150512694, "memory(GiB)": 70.96, "step": 13900, "token_acc": 0.4937106918238994, "train_speed(iter/s)": 0.671143 }, { "epoch": 0.5957328306413607, "grad_norm": 3.661461114883423, "learning_rate": 9.654145087167034e-05, "loss": 2.4619686126708986, "memory(GiB)": 70.96, "step": 13905, "token_acc": 0.4728434504792332, "train_speed(iter/s)": 0.671164 }, { "epoch": 0.5959470459706097, "grad_norm": 2.4979124069213867, "learning_rate": 9.6538991021052e-05, "loss": 2.5190670013427736, "memory(GiB)": 70.96, "step": 13910, "token_acc": 0.4664429530201342, "train_speed(iter/s)": 0.671131 }, { "epoch": 0.5961612612998586, "grad_norm": 3.1079249382019043, "learning_rate": 9.653653032733614e-05, "loss": 2.6458616256713867, "memory(GiB)": 70.96, "step": 13915, "token_acc": 0.42727272727272725, "train_speed(iter/s)": 0.671152 }, { "epoch": 0.5963754766291076, "grad_norm": 3.5497610569000244, "learning_rate": 9.65340687905674e-05, "loss": 2.770955467224121, "memory(GiB)": 70.96, "step": 13920, "token_acc": 0.4755244755244755, "train_speed(iter/s)": 0.671199 }, { "epoch": 0.5965896919583565, "grad_norm": 3.316673994064331, "learning_rate": 9.653160641079036e-05, "loss": 2.343073272705078, "memory(GiB)": 70.96, "step": 13925, "token_acc": 0.48333333333333334, "train_speed(iter/s)": 0.671157 }, { "epoch": 0.5968039072876055, "grad_norm": 3.0172672271728516, "learning_rate": 9.65291431880496e-05, "loss": 2.3453975677490235, "memory(GiB)": 70.96, "step": 13930, "token_acc": 0.4963768115942029, "train_speed(iter/s)": 0.671163 }, { "epoch": 0.5970181226168545, "grad_norm": 5.117130279541016, "learning_rate": 9.652667912238976e-05, "loss": 2.196124267578125, "memory(GiB)": 70.96, "step": 13935, "token_acc": 0.46956521739130436, "train_speed(iter/s)": 0.671176 }, { "epoch": 0.5972323379461034, "grad_norm": 4.053595542907715, "learning_rate": 9.652421421385549e-05, "loss": 2.26232795715332, "memory(GiB)": 70.96, "step": 13940, "token_acc": 0.48, "train_speed(iter/s)": 0.671188 }, { "epoch": 0.5974465532753523, "grad_norm": 2.7509748935699463, "learning_rate": 9.652174846249144e-05, "loss": 2.204214096069336, "memory(GiB)": 70.96, "step": 13945, "token_acc": 0.5096774193548387, "train_speed(iter/s)": 0.671231 }, { "epoch": 0.5976607686046014, "grad_norm": 3.4984352588653564, "learning_rate": 9.651928186834227e-05, "loss": 2.4658685684204102, "memory(GiB)": 70.96, "step": 13950, "token_acc": 0.476, "train_speed(iter/s)": 0.671299 }, { "epoch": 0.5978749839338503, "grad_norm": 2.79132080078125, "learning_rate": 9.651681443145268e-05, "loss": 2.2971778869628907, "memory(GiB)": 70.96, "step": 13955, "token_acc": 0.5204460966542751, "train_speed(iter/s)": 0.671343 }, { "epoch": 0.5980891992630992, "grad_norm": 3.2428221702575684, "learning_rate": 9.651434615186735e-05, "loss": 2.1026542663574217, "memory(GiB)": 70.96, "step": 13960, "token_acc": 0.5101214574898786, "train_speed(iter/s)": 0.671369 }, { "epoch": 0.5983034145923483, "grad_norm": 4.7299933433532715, "learning_rate": 9.6511877029631e-05, "loss": 2.7278158187866213, "memory(GiB)": 70.96, "step": 13965, "token_acc": 0.43656716417910446, "train_speed(iter/s)": 0.671392 }, { "epoch": 0.5985176299215972, "grad_norm": 4.708878040313721, "learning_rate": 9.650940706478838e-05, "loss": 2.158191108703613, "memory(GiB)": 70.96, "step": 13970, "token_acc": 0.5818965517241379, "train_speed(iter/s)": 0.671402 }, { "epoch": 0.5987318452508461, "grad_norm": 3.3591206073760986, "learning_rate": 9.650693625738422e-05, "loss": 2.7386280059814454, "memory(GiB)": 70.96, "step": 13975, "token_acc": 0.44, "train_speed(iter/s)": 0.671439 }, { "epoch": 0.5989460605800951, "grad_norm": 3.343594789505005, "learning_rate": 9.650446460746327e-05, "loss": 2.263881301879883, "memory(GiB)": 70.96, "step": 13980, "token_acc": 0.5057471264367817, "train_speed(iter/s)": 0.671529 }, { "epoch": 0.5991602759093441, "grad_norm": 3.0630719661712646, "learning_rate": 9.650199211507034e-05, "loss": 2.4292348861694335, "memory(GiB)": 70.96, "step": 13985, "token_acc": 0.4583333333333333, "train_speed(iter/s)": 0.67152 }, { "epoch": 0.599374491238593, "grad_norm": 3.1703312397003174, "learning_rate": 9.649951878025019e-05, "loss": 2.605620765686035, "memory(GiB)": 70.96, "step": 13990, "token_acc": 0.4353932584269663, "train_speed(iter/s)": 0.671566 }, { "epoch": 0.599588706567842, "grad_norm": 3.9119808673858643, "learning_rate": 9.649704460304762e-05, "loss": 2.350077247619629, "memory(GiB)": 70.96, "step": 13995, "token_acc": 0.4528301886792453, "train_speed(iter/s)": 0.671545 }, { "epoch": 0.599802921897091, "grad_norm": 2.9158051013946533, "learning_rate": 9.649456958350749e-05, "loss": 2.5540714263916016, "memory(GiB)": 70.96, "step": 14000, "token_acc": 0.44333333333333336, "train_speed(iter/s)": 0.671529 }, { "epoch": 0.599802921897091, "eval_loss": 2.0684680938720703, "eval_runtime": 17.0649, "eval_samples_per_second": 5.86, "eval_steps_per_second": 5.86, "eval_token_acc": 0.48418156808803303, "step": 14000 }, { "epoch": 0.6000171372263399, "grad_norm": 3.743403196334839, "learning_rate": 9.649209372167461e-05, "loss": 2.201283645629883, "memory(GiB)": 70.96, "step": 14005, "token_acc": 0.4891640866873065, "train_speed(iter/s)": 0.67094 }, { "epoch": 0.6002313525555889, "grad_norm": 3.248814105987549, "learning_rate": 9.648961701759384e-05, "loss": 2.1848825454711913, "memory(GiB)": 70.96, "step": 14010, "token_acc": 0.5147679324894515, "train_speed(iter/s)": 0.670912 }, { "epoch": 0.6004455678848378, "grad_norm": 4.001185417175293, "learning_rate": 9.648713947131006e-05, "loss": 2.5701244354248045, "memory(GiB)": 70.96, "step": 14015, "token_acc": 0.47265625, "train_speed(iter/s)": 0.670934 }, { "epoch": 0.6006597832140868, "grad_norm": 4.059508800506592, "learning_rate": 9.648466108286813e-05, "loss": 2.499159240722656, "memory(GiB)": 70.96, "step": 14020, "token_acc": 0.4734848484848485, "train_speed(iter/s)": 0.670976 }, { "epoch": 0.6008739985433358, "grad_norm": 4.043659687042236, "learning_rate": 9.648218185231294e-05, "loss": 2.434103012084961, "memory(GiB)": 70.96, "step": 14025, "token_acc": 0.5082508250825083, "train_speed(iter/s)": 0.670941 }, { "epoch": 0.6010882138725847, "grad_norm": 3.669879674911499, "learning_rate": 9.647970177968945e-05, "loss": 2.7039228439331056, "memory(GiB)": 70.96, "step": 14030, "token_acc": 0.44074074074074077, "train_speed(iter/s)": 0.670978 }, { "epoch": 0.6013024292018336, "grad_norm": 2.905266761779785, "learning_rate": 9.647722086504254e-05, "loss": 2.3679573059082033, "memory(GiB)": 70.96, "step": 14035, "token_acc": 0.54858934169279, "train_speed(iter/s)": 0.670991 }, { "epoch": 0.6015166445310827, "grad_norm": 3.269237756729126, "learning_rate": 9.64747391084172e-05, "loss": 2.122709846496582, "memory(GiB)": 70.96, "step": 14040, "token_acc": 0.5215686274509804, "train_speed(iter/s)": 0.670983 }, { "epoch": 0.6017308598603316, "grad_norm": 4.1175432205200195, "learning_rate": 9.647225650985834e-05, "loss": 2.143061065673828, "memory(GiB)": 70.96, "step": 14045, "token_acc": 0.5655430711610487, "train_speed(iter/s)": 0.670979 }, { "epoch": 0.6019450751895805, "grad_norm": 2.9279274940490723, "learning_rate": 9.646977306941096e-05, "loss": 2.532633399963379, "memory(GiB)": 70.96, "step": 14050, "token_acc": 0.42702702702702705, "train_speed(iter/s)": 0.670853 }, { "epoch": 0.6021592905188295, "grad_norm": 5.235960006713867, "learning_rate": 9.646728878712005e-05, "loss": 2.5179569244384767, "memory(GiB)": 70.96, "step": 14055, "token_acc": 0.4808362369337979, "train_speed(iter/s)": 0.670878 }, { "epoch": 0.6023735058480785, "grad_norm": 6.45650053024292, "learning_rate": 9.646480366303063e-05, "loss": 2.529927444458008, "memory(GiB)": 70.96, "step": 14060, "token_acc": 0.49635036496350365, "train_speed(iter/s)": 0.670885 }, { "epoch": 0.6025877211773274, "grad_norm": 3.261650562286377, "learning_rate": 9.64623176971877e-05, "loss": 2.622615432739258, "memory(GiB)": 70.96, "step": 14065, "token_acc": 0.4859550561797753, "train_speed(iter/s)": 0.670935 }, { "epoch": 0.6028019365065764, "grad_norm": 4.61467170715332, "learning_rate": 9.645983088963629e-05, "loss": 1.9437101364135743, "memory(GiB)": 70.96, "step": 14070, "token_acc": 0.5516014234875445, "train_speed(iter/s)": 0.670961 }, { "epoch": 0.6030161518358254, "grad_norm": 4.137511253356934, "learning_rate": 9.645734324042147e-05, "loss": 2.42047119140625, "memory(GiB)": 70.96, "step": 14075, "token_acc": 0.4827586206896552, "train_speed(iter/s)": 0.670896 }, { "epoch": 0.6032303671650744, "grad_norm": 4.379906177520752, "learning_rate": 9.645485474958828e-05, "loss": 2.5621311187744142, "memory(GiB)": 70.96, "step": 14080, "token_acc": 0.44696969696969696, "train_speed(iter/s)": 0.670839 }, { "epoch": 0.6034445824943233, "grad_norm": 3.686011552810669, "learning_rate": 9.645236541718183e-05, "loss": 2.436137390136719, "memory(GiB)": 70.96, "step": 14085, "token_acc": 0.4702194357366771, "train_speed(iter/s)": 0.670812 }, { "epoch": 0.6036587978235722, "grad_norm": 2.9177300930023193, "learning_rate": 9.64498752432472e-05, "loss": 2.4546600341796876, "memory(GiB)": 70.96, "step": 14090, "token_acc": 0.5072463768115942, "train_speed(iter/s)": 0.670854 }, { "epoch": 0.6038730131528213, "grad_norm": 3.292344808578491, "learning_rate": 9.64473842278295e-05, "loss": 2.526370811462402, "memory(GiB)": 70.96, "step": 14095, "token_acc": 0.4554140127388535, "train_speed(iter/s)": 0.670819 }, { "epoch": 0.6040872284820702, "grad_norm": 2.9578676223754883, "learning_rate": 9.644489237097388e-05, "loss": 2.662725257873535, "memory(GiB)": 70.96, "step": 14100, "token_acc": 0.46417445482866043, "train_speed(iter/s)": 0.670803 }, { "epoch": 0.6043014438113191, "grad_norm": 3.132725954055786, "learning_rate": 9.644239967272545e-05, "loss": 2.1504238128662108, "memory(GiB)": 70.96, "step": 14105, "token_acc": 0.5107913669064749, "train_speed(iter/s)": 0.670766 }, { "epoch": 0.6045156591405682, "grad_norm": 4.360935211181641, "learning_rate": 9.643990613312942e-05, "loss": 2.458598327636719, "memory(GiB)": 70.96, "step": 14110, "token_acc": 0.519298245614035, "train_speed(iter/s)": 0.670749 }, { "epoch": 0.6047298744698171, "grad_norm": 3.820587396621704, "learning_rate": 9.643741175223089e-05, "loss": 2.361704635620117, "memory(GiB)": 70.96, "step": 14115, "token_acc": 0.49158249158249157, "train_speed(iter/s)": 0.670734 }, { "epoch": 0.604944089799066, "grad_norm": 3.421715021133423, "learning_rate": 9.643491653007508e-05, "loss": 2.1644882202148437, "memory(GiB)": 70.96, "step": 14120, "token_acc": 0.5053763440860215, "train_speed(iter/s)": 0.67076 }, { "epoch": 0.605158305128315, "grad_norm": 4.98826265335083, "learning_rate": 9.643242046670722e-05, "loss": 2.5964487075805662, "memory(GiB)": 70.96, "step": 14125, "token_acc": 0.4671814671814672, "train_speed(iter/s)": 0.670795 }, { "epoch": 0.605372520457564, "grad_norm": 3.5454821586608887, "learning_rate": 9.642992356217248e-05, "loss": 2.3677549362182617, "memory(GiB)": 70.96, "step": 14130, "token_acc": 0.5285171102661597, "train_speed(iter/s)": 0.67077 }, { "epoch": 0.6055867357868129, "grad_norm": 3.3340342044830322, "learning_rate": 9.642742581651615e-05, "loss": 2.4598834991455076, "memory(GiB)": 70.96, "step": 14135, "token_acc": 0.5169491525423728, "train_speed(iter/s)": 0.670766 }, { "epoch": 0.6058009511160619, "grad_norm": 3.3365278244018555, "learning_rate": 9.642492722978343e-05, "loss": 1.9492622375488282, "memory(GiB)": 70.96, "step": 14140, "token_acc": 0.5518672199170125, "train_speed(iter/s)": 0.670748 }, { "epoch": 0.6060151664453108, "grad_norm": 2.674161911010742, "learning_rate": 9.64224278020196e-05, "loss": 2.2138460159301756, "memory(GiB)": 70.96, "step": 14145, "token_acc": 0.4745762711864407, "train_speed(iter/s)": 0.670783 }, { "epoch": 0.6062293817745598, "grad_norm": 2.9822633266448975, "learning_rate": 9.641992753326996e-05, "loss": 2.303097152709961, "memory(GiB)": 70.96, "step": 14150, "token_acc": 0.4807017543859649, "train_speed(iter/s)": 0.670741 }, { "epoch": 0.6064435971038088, "grad_norm": 3.586895227432251, "learning_rate": 9.641742642357978e-05, "loss": 2.2019464492797853, "memory(GiB)": 70.96, "step": 14155, "token_acc": 0.5355805243445693, "train_speed(iter/s)": 0.670731 }, { "epoch": 0.6066578124330577, "grad_norm": 6.033274173736572, "learning_rate": 9.641492447299439e-05, "loss": 2.471556854248047, "memory(GiB)": 70.96, "step": 14160, "token_acc": 0.4790874524714829, "train_speed(iter/s)": 0.670767 }, { "epoch": 0.6068720277623066, "grad_norm": 3.651273012161255, "learning_rate": 9.641242168155907e-05, "loss": 2.464060974121094, "memory(GiB)": 70.96, "step": 14165, "token_acc": 0.4774193548387097, "train_speed(iter/s)": 0.670777 }, { "epoch": 0.6070862430915557, "grad_norm": 3.13087797164917, "learning_rate": 9.640991804931921e-05, "loss": 2.2559062957763674, "memory(GiB)": 70.96, "step": 14170, "token_acc": 0.4907749077490775, "train_speed(iter/s)": 0.670784 }, { "epoch": 0.6073004584208046, "grad_norm": 3.29133939743042, "learning_rate": 9.640741357632015e-05, "loss": 2.3744365692138674, "memory(GiB)": 70.96, "step": 14175, "token_acc": 0.49382716049382713, "train_speed(iter/s)": 0.67082 }, { "epoch": 0.6075146737500535, "grad_norm": 2.8247389793395996, "learning_rate": 9.640490826260726e-05, "loss": 2.442216682434082, "memory(GiB)": 70.96, "step": 14180, "token_acc": 0.48231511254019294, "train_speed(iter/s)": 0.67086 }, { "epoch": 0.6077288890793026, "grad_norm": 4.321446418762207, "learning_rate": 9.640240210822594e-05, "loss": 2.47408447265625, "memory(GiB)": 70.96, "step": 14185, "token_acc": 0.47079037800687284, "train_speed(iter/s)": 0.670876 }, { "epoch": 0.6079431044085515, "grad_norm": 3.4694983959198, "learning_rate": 9.639989511322155e-05, "loss": 2.8986167907714844, "memory(GiB)": 70.96, "step": 14190, "token_acc": 0.43217665615141954, "train_speed(iter/s)": 0.670914 }, { "epoch": 0.6081573197378004, "grad_norm": 3.6766185760498047, "learning_rate": 9.639738727763954e-05, "loss": 2.4722921371459963, "memory(GiB)": 70.96, "step": 14195, "token_acc": 0.4651898734177215, "train_speed(iter/s)": 0.670934 }, { "epoch": 0.6083715350670494, "grad_norm": 3.994969606399536, "learning_rate": 9.639487860152534e-05, "loss": 2.411226272583008, "memory(GiB)": 70.96, "step": 14200, "token_acc": 0.4674922600619195, "train_speed(iter/s)": 0.670887 }, { "epoch": 0.6085857503962984, "grad_norm": 3.291743755340576, "learning_rate": 9.639236908492439e-05, "loss": 2.4381494522094727, "memory(GiB)": 70.96, "step": 14205, "token_acc": 0.46418338108882523, "train_speed(iter/s)": 0.670858 }, { "epoch": 0.6087999657255473, "grad_norm": 3.7133901119232178, "learning_rate": 9.638985872788215e-05, "loss": 2.568778228759766, "memory(GiB)": 70.96, "step": 14210, "token_acc": 0.473015873015873, "train_speed(iter/s)": 0.670802 }, { "epoch": 0.6090141810547963, "grad_norm": 3.947234869003296, "learning_rate": 9.638734753044412e-05, "loss": 2.5895917892456053, "memory(GiB)": 70.96, "step": 14215, "token_acc": 0.5165562913907285, "train_speed(iter/s)": 0.670779 }, { "epoch": 0.6092283963840452, "grad_norm": 3.630869150161743, "learning_rate": 9.638483549265575e-05, "loss": 2.706243896484375, "memory(GiB)": 70.96, "step": 14220, "token_acc": 0.4281767955801105, "train_speed(iter/s)": 0.670811 }, { "epoch": 0.6094426117132942, "grad_norm": 3.519634246826172, "learning_rate": 9.638232261456259e-05, "loss": 2.4206056594848633, "memory(GiB)": 70.96, "step": 14225, "token_acc": 0.43356643356643354, "train_speed(iter/s)": 0.670833 }, { "epoch": 0.6096568270425432, "grad_norm": 4.6259870529174805, "learning_rate": 9.637980889621014e-05, "loss": 2.514453125, "memory(GiB)": 70.96, "step": 14230, "token_acc": 0.5087719298245614, "train_speed(iter/s)": 0.67086 }, { "epoch": 0.6098710423717921, "grad_norm": 3.8708243370056152, "learning_rate": 9.637729433764395e-05, "loss": 2.3941570281982423, "memory(GiB)": 70.96, "step": 14235, "token_acc": 0.5190839694656488, "train_speed(iter/s)": 0.670877 }, { "epoch": 0.610085257701041, "grad_norm": 2.9373421669006348, "learning_rate": 9.637477893890956e-05, "loss": 2.345088768005371, "memory(GiB)": 70.96, "step": 14240, "token_acc": 0.49337748344370863, "train_speed(iter/s)": 0.670808 }, { "epoch": 0.6102994730302901, "grad_norm": 3.3019044399261475, "learning_rate": 9.637226270005255e-05, "loss": 2.271257781982422, "memory(GiB)": 70.96, "step": 14245, "token_acc": 0.5154639175257731, "train_speed(iter/s)": 0.67078 }, { "epoch": 0.610513688359539, "grad_norm": 3.2005057334899902, "learning_rate": 9.636974562111852e-05, "loss": 2.2801843643188477, "memory(GiB)": 70.96, "step": 14250, "token_acc": 0.48188405797101447, "train_speed(iter/s)": 0.6708 }, { "epoch": 0.6107279036887879, "grad_norm": 4.118958950042725, "learning_rate": 9.636722770215301e-05, "loss": 2.2760395050048827, "memory(GiB)": 70.96, "step": 14255, "token_acc": 0.4608150470219436, "train_speed(iter/s)": 0.670749 }, { "epoch": 0.610942119018037, "grad_norm": 3.4684693813323975, "learning_rate": 9.63647089432017e-05, "loss": 2.5430625915527343, "memory(GiB)": 70.96, "step": 14260, "token_acc": 0.48363636363636364, "train_speed(iter/s)": 0.670743 }, { "epoch": 0.6111563343472859, "grad_norm": 3.3517231941223145, "learning_rate": 9.636218934431018e-05, "loss": 2.456259918212891, "memory(GiB)": 70.96, "step": 14265, "token_acc": 0.4981549815498155, "train_speed(iter/s)": 0.67075 }, { "epoch": 0.6113705496765348, "grad_norm": 3.6563010215759277, "learning_rate": 9.635966890552413e-05, "loss": 2.3310569763183593, "memory(GiB)": 70.96, "step": 14270, "token_acc": 0.4731182795698925, "train_speed(iter/s)": 0.670792 }, { "epoch": 0.6115847650057838, "grad_norm": 3.371795892715454, "learning_rate": 9.635714762688916e-05, "loss": 2.335804748535156, "memory(GiB)": 70.96, "step": 14275, "token_acc": 0.49280575539568344, "train_speed(iter/s)": 0.670811 }, { "epoch": 0.6117989803350328, "grad_norm": 4.405108451843262, "learning_rate": 9.6354625508451e-05, "loss": 2.26381778717041, "memory(GiB)": 70.96, "step": 14280, "token_acc": 0.4963768115942029, "train_speed(iter/s)": 0.670826 }, { "epoch": 0.6120131956642817, "grad_norm": 4.259702205657959, "learning_rate": 9.63521025502553e-05, "loss": 2.5591297149658203, "memory(GiB)": 70.96, "step": 14285, "token_acc": 0.4633431085043988, "train_speed(iter/s)": 0.670849 }, { "epoch": 0.6122274109935307, "grad_norm": 3.5332131385803223, "learning_rate": 9.634957875234779e-05, "loss": 2.382907485961914, "memory(GiB)": 70.96, "step": 14290, "token_acc": 0.5133079847908745, "train_speed(iter/s)": 0.670883 }, { "epoch": 0.6124416263227797, "grad_norm": 3.753387212753296, "learning_rate": 9.634705411477418e-05, "loss": 2.6384597778320313, "memory(GiB)": 70.96, "step": 14295, "token_acc": 0.4354243542435424, "train_speed(iter/s)": 0.670922 }, { "epoch": 0.6126558416520286, "grad_norm": 3.818192720413208, "learning_rate": 9.63445286375802e-05, "loss": 2.600748062133789, "memory(GiB)": 70.96, "step": 14300, "token_acc": 0.501779359430605, "train_speed(iter/s)": 0.670946 }, { "epoch": 0.6128700569812776, "grad_norm": 2.4029579162597656, "learning_rate": 9.634200232081161e-05, "loss": 2.6386383056640623, "memory(GiB)": 70.96, "step": 14305, "token_acc": 0.4694533762057878, "train_speed(iter/s)": 0.670979 }, { "epoch": 0.6130842723105265, "grad_norm": 3.7725956439971924, "learning_rate": 9.633947516451418e-05, "loss": 2.2229795455932617, "memory(GiB)": 70.96, "step": 14310, "token_acc": 0.5464285714285714, "train_speed(iter/s)": 0.670997 }, { "epoch": 0.6132984876397755, "grad_norm": 4.226745128631592, "learning_rate": 9.63369471687337e-05, "loss": 2.422349739074707, "memory(GiB)": 70.96, "step": 14315, "token_acc": 0.4828897338403042, "train_speed(iter/s)": 0.670942 }, { "epoch": 0.6135127029690245, "grad_norm": 4.311307907104492, "learning_rate": 9.633441833351593e-05, "loss": 2.4612590789794924, "memory(GiB)": 70.96, "step": 14320, "token_acc": 0.4892086330935252, "train_speed(iter/s)": 0.670923 }, { "epoch": 0.6137269182982734, "grad_norm": 3.0084280967712402, "learning_rate": 9.633188865890673e-05, "loss": 2.6084081649780275, "memory(GiB)": 70.96, "step": 14325, "token_acc": 0.45016077170418006, "train_speed(iter/s)": 0.670934 }, { "epoch": 0.6139411336275223, "grad_norm": 3.6537723541259766, "learning_rate": 9.632935814495188e-05, "loss": 2.286172294616699, "memory(GiB)": 70.96, "step": 14330, "token_acc": 0.4890282131661442, "train_speed(iter/s)": 0.6709 }, { "epoch": 0.6141553489567714, "grad_norm": 5.832762241363525, "learning_rate": 9.632682679169725e-05, "loss": 2.4348476409912108, "memory(GiB)": 70.96, "step": 14335, "token_acc": 0.4887459807073955, "train_speed(iter/s)": 0.670871 }, { "epoch": 0.6143695642860203, "grad_norm": 3.2364144325256348, "learning_rate": 9.632429459918871e-05, "loss": 2.35336856842041, "memory(GiB)": 70.96, "step": 14340, "token_acc": 0.49848024316109424, "train_speed(iter/s)": 0.670915 }, { "epoch": 0.6145837796152692, "grad_norm": 3.82326078414917, "learning_rate": 9.63217615674721e-05, "loss": 2.584588623046875, "memory(GiB)": 70.96, "step": 14345, "token_acc": 0.46715328467153283, "train_speed(iter/s)": 0.670956 }, { "epoch": 0.6147979949445183, "grad_norm": 3.300964593887329, "learning_rate": 9.631922769659333e-05, "loss": 2.3377965927124023, "memory(GiB)": 70.96, "step": 14350, "token_acc": 0.4981684981684982, "train_speed(iter/s)": 0.670959 }, { "epoch": 0.6150122102737672, "grad_norm": 4.134338855743408, "learning_rate": 9.631669298659831e-05, "loss": 2.5361167907714846, "memory(GiB)": 70.96, "step": 14355, "token_acc": 0.4785100286532951, "train_speed(iter/s)": 0.670994 }, { "epoch": 0.6152264256030161, "grad_norm": 3.5476458072662354, "learning_rate": 9.631415743753295e-05, "loss": 2.263842010498047, "memory(GiB)": 70.96, "step": 14360, "token_acc": 0.5393700787401575, "train_speed(iter/s)": 0.671039 }, { "epoch": 0.6154406409322651, "grad_norm": 4.762332916259766, "learning_rate": 9.631162104944318e-05, "loss": 2.167449951171875, "memory(GiB)": 70.96, "step": 14365, "token_acc": 0.5250836120401338, "train_speed(iter/s)": 0.671113 }, { "epoch": 0.6156548562615141, "grad_norm": 3.8367624282836914, "learning_rate": 9.630908382237495e-05, "loss": 2.8961980819702147, "memory(GiB)": 70.96, "step": 14370, "token_acc": 0.44554455445544555, "train_speed(iter/s)": 0.671139 }, { "epoch": 0.615869071590763, "grad_norm": 5.225412845611572, "learning_rate": 9.630654575637421e-05, "loss": 2.7726964950561523, "memory(GiB)": 70.96, "step": 14375, "token_acc": 0.42424242424242425, "train_speed(iter/s)": 0.671149 }, { "epoch": 0.616083286920012, "grad_norm": 3.5166029930114746, "learning_rate": 9.630400685148697e-05, "loss": 2.396898078918457, "memory(GiB)": 70.96, "step": 14380, "token_acc": 0.48253968253968255, "train_speed(iter/s)": 0.67115 }, { "epoch": 0.6162975022492609, "grad_norm": 5.631232738494873, "learning_rate": 9.630146710775921e-05, "loss": 2.407853889465332, "memory(GiB)": 70.96, "step": 14385, "token_acc": 0.5301204819277109, "train_speed(iter/s)": 0.671129 }, { "epoch": 0.6165117175785099, "grad_norm": 4.262017726898193, "learning_rate": 9.629892652523694e-05, "loss": 2.538730239868164, "memory(GiB)": 70.96, "step": 14390, "token_acc": 0.5272727272727272, "train_speed(iter/s)": 0.671133 }, { "epoch": 0.6167259329077589, "grad_norm": 3.7197539806365967, "learning_rate": 9.629638510396617e-05, "loss": 2.1317092895507814, "memory(GiB)": 70.96, "step": 14395, "token_acc": 0.5595667870036101, "train_speed(iter/s)": 0.671072 }, { "epoch": 0.6169401482370078, "grad_norm": 4.83473014831543, "learning_rate": 9.629384284399297e-05, "loss": 2.756025695800781, "memory(GiB)": 70.96, "step": 14400, "token_acc": 0.464, "train_speed(iter/s)": 0.67106 }, { "epoch": 0.6171543635662567, "grad_norm": 2.8709285259246826, "learning_rate": 9.629129974536337e-05, "loss": 2.185519218444824, "memory(GiB)": 70.96, "step": 14405, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.671127 }, { "epoch": 0.6173685788955058, "grad_norm": 3.821532726287842, "learning_rate": 9.628875580812344e-05, "loss": 2.310828971862793, "memory(GiB)": 70.96, "step": 14410, "token_acc": 0.516245487364621, "train_speed(iter/s)": 0.671095 }, { "epoch": 0.6175827942247547, "grad_norm": 3.7339494228363037, "learning_rate": 9.62862110323193e-05, "loss": 2.534266471862793, "memory(GiB)": 70.96, "step": 14415, "token_acc": 0.5228758169934641, "train_speed(iter/s)": 0.67112 }, { "epoch": 0.6177970095540037, "grad_norm": 3.1781198978424072, "learning_rate": 9.628366541799702e-05, "loss": 2.2061752319335937, "memory(GiB)": 70.96, "step": 14420, "token_acc": 0.5063291139240507, "train_speed(iter/s)": 0.671121 }, { "epoch": 0.6180112248832527, "grad_norm": 3.852355480194092, "learning_rate": 9.628111896520272e-05, "loss": 2.756174850463867, "memory(GiB)": 70.96, "step": 14425, "token_acc": 0.4485981308411215, "train_speed(iter/s)": 0.671095 }, { "epoch": 0.6182254402125016, "grad_norm": 2.956543207168579, "learning_rate": 9.627857167398253e-05, "loss": 2.7359807968139647, "memory(GiB)": 70.96, "step": 14430, "token_acc": 0.44722222222222224, "train_speed(iter/s)": 0.671123 }, { "epoch": 0.6184396555417506, "grad_norm": 3.300879955291748, "learning_rate": 9.627602354438261e-05, "loss": 2.4674896240234374, "memory(GiB)": 70.96, "step": 14435, "token_acc": 0.48936170212765956, "train_speed(iter/s)": 0.671175 }, { "epoch": 0.6186538708709995, "grad_norm": 2.9767343997955322, "learning_rate": 9.627347457644911e-05, "loss": 2.0832088470458983, "memory(GiB)": 70.96, "step": 14440, "token_acc": 0.5310344827586206, "train_speed(iter/s)": 0.671181 }, { "epoch": 0.6188680862002485, "grad_norm": 3.1652402877807617, "learning_rate": 9.627092477022819e-05, "loss": 2.2299339294433596, "memory(GiB)": 70.96, "step": 14445, "token_acc": 0.4913494809688581, "train_speed(iter/s)": 0.671145 }, { "epoch": 0.6190823015294975, "grad_norm": 5.6245317459106445, "learning_rate": 9.626837412576609e-05, "loss": 2.5385931015014647, "memory(GiB)": 70.96, "step": 14450, "token_acc": 0.43973941368078173, "train_speed(iter/s)": 0.671057 }, { "epoch": 0.6192965168587464, "grad_norm": 3.922079563140869, "learning_rate": 9.626582264310898e-05, "loss": 2.4326534271240234, "memory(GiB)": 70.96, "step": 14455, "token_acc": 0.4657039711191336, "train_speed(iter/s)": 0.671011 }, { "epoch": 0.6195107321879953, "grad_norm": 4.195559501647949, "learning_rate": 9.626327032230309e-05, "loss": 2.4123254776000977, "memory(GiB)": 70.96, "step": 14460, "token_acc": 0.47318611987381703, "train_speed(iter/s)": 0.670985 }, { "epoch": 0.6197249475172444, "grad_norm": 3.619375467300415, "learning_rate": 9.626071716339466e-05, "loss": 2.455464172363281, "memory(GiB)": 70.96, "step": 14465, "token_acc": 0.4682274247491639, "train_speed(iter/s)": 0.670984 }, { "epoch": 0.6199391628464933, "grad_norm": 3.5415608882904053, "learning_rate": 9.625816316642995e-05, "loss": 2.5194370269775392, "memory(GiB)": 70.96, "step": 14470, "token_acc": 0.5036764705882353, "train_speed(iter/s)": 0.670964 }, { "epoch": 0.6201533781757422, "grad_norm": 5.040711402893066, "learning_rate": 9.625560833145522e-05, "loss": 2.6752206802368166, "memory(GiB)": 70.96, "step": 14475, "token_acc": 0.43686006825938567, "train_speed(iter/s)": 0.670956 }, { "epoch": 0.6203675935049913, "grad_norm": 4.5338568687438965, "learning_rate": 9.625305265851674e-05, "loss": 2.300329399108887, "memory(GiB)": 70.96, "step": 14480, "token_acc": 0.4965034965034965, "train_speed(iter/s)": 0.67092 }, { "epoch": 0.6205818088342402, "grad_norm": 3.309967041015625, "learning_rate": 9.625049614766085e-05, "loss": 2.3890762329101562, "memory(GiB)": 70.96, "step": 14485, "token_acc": 0.4880546075085324, "train_speed(iter/s)": 0.67091 }, { "epoch": 0.6207960241634891, "grad_norm": 3.4029781818389893, "learning_rate": 9.62479387989338e-05, "loss": 2.368597984313965, "memory(GiB)": 70.96, "step": 14490, "token_acc": 0.4852459016393443, "train_speed(iter/s)": 0.670854 }, { "epoch": 0.6210102394927381, "grad_norm": 3.9416348934173584, "learning_rate": 9.624538061238197e-05, "loss": 2.2695350646972656, "memory(GiB)": 70.96, "step": 14495, "token_acc": 0.4966887417218543, "train_speed(iter/s)": 0.670873 }, { "epoch": 0.6212244548219871, "grad_norm": 4.1415886878967285, "learning_rate": 9.624282158805169e-05, "loss": 2.5071874618530274, "memory(GiB)": 70.96, "step": 14500, "token_acc": 0.5121212121212121, "train_speed(iter/s)": 0.670846 }, { "epoch": 0.6212244548219871, "eval_loss": 2.3133509159088135, "eval_runtime": 17.5745, "eval_samples_per_second": 5.69, "eval_steps_per_second": 5.69, "eval_token_acc": 0.477124183006536, "step": 14500 }, { "epoch": 0.621438670151236, "grad_norm": 3.0219404697418213, "learning_rate": 9.624026172598932e-05, "loss": 2.296611022949219, "memory(GiB)": 70.96, "step": 14505, "token_acc": 0.48326771653543305, "train_speed(iter/s)": 0.670155 }, { "epoch": 0.621652885480485, "grad_norm": 3.7134881019592285, "learning_rate": 9.623770102624123e-05, "loss": 2.4434329986572267, "memory(GiB)": 70.96, "step": 14510, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.67018 }, { "epoch": 0.621867100809734, "grad_norm": 3.463866710662842, "learning_rate": 9.62351394888538e-05, "loss": 2.457599067687988, "memory(GiB)": 70.96, "step": 14515, "token_acc": 0.48534201954397393, "train_speed(iter/s)": 0.670231 }, { "epoch": 0.6220813161389829, "grad_norm": 2.9467856884002686, "learning_rate": 9.623257711387346e-05, "loss": 2.5367158889770507, "memory(GiB)": 70.96, "step": 14520, "token_acc": 0.4847457627118644, "train_speed(iter/s)": 0.670251 }, { "epoch": 0.6222955314682319, "grad_norm": 4.078629970550537, "learning_rate": 9.623001390134662e-05, "loss": 2.2289552688598633, "memory(GiB)": 70.96, "step": 14525, "token_acc": 0.4781021897810219, "train_speed(iter/s)": 0.670297 }, { "epoch": 0.6225097467974808, "grad_norm": 3.0580124855041504, "learning_rate": 9.62274498513197e-05, "loss": 2.223225975036621, "memory(GiB)": 70.96, "step": 14530, "token_acc": 0.445859872611465, "train_speed(iter/s)": 0.670316 }, { "epoch": 0.6227239621267298, "grad_norm": 4.355770587921143, "learning_rate": 9.622488496383917e-05, "loss": 2.445241165161133, "memory(GiB)": 70.96, "step": 14535, "token_acc": 0.4891640866873065, "train_speed(iter/s)": 0.670305 }, { "epoch": 0.6229381774559788, "grad_norm": 4.001067638397217, "learning_rate": 9.622231923895149e-05, "loss": 2.6274253845214846, "memory(GiB)": 70.96, "step": 14540, "token_acc": 0.43537414965986393, "train_speed(iter/s)": 0.670293 }, { "epoch": 0.6231523927852277, "grad_norm": 3.2574448585510254, "learning_rate": 9.621975267670312e-05, "loss": 2.212502861022949, "memory(GiB)": 70.96, "step": 14545, "token_acc": 0.5367965367965368, "train_speed(iter/s)": 0.670289 }, { "epoch": 0.6233666081144766, "grad_norm": 4.661454200744629, "learning_rate": 9.621718527714057e-05, "loss": 2.4810165405273437, "memory(GiB)": 70.96, "step": 14550, "token_acc": 0.4740740740740741, "train_speed(iter/s)": 0.670182 }, { "epoch": 0.6235808234437257, "grad_norm": 2.850682497024536, "learning_rate": 9.621461704031038e-05, "loss": 2.695987319946289, "memory(GiB)": 70.96, "step": 14555, "token_acc": 0.4811320754716981, "train_speed(iter/s)": 0.670188 }, { "epoch": 0.6237950387729746, "grad_norm": 3.6249032020568848, "learning_rate": 9.621204796625902e-05, "loss": 2.4478668212890624, "memory(GiB)": 70.96, "step": 14560, "token_acc": 0.5017064846416383, "train_speed(iter/s)": 0.670202 }, { "epoch": 0.6240092541022235, "grad_norm": 3.8041858673095703, "learning_rate": 9.620947805503309e-05, "loss": 2.2834550857543947, "memory(GiB)": 70.96, "step": 14565, "token_acc": 0.49140893470790376, "train_speed(iter/s)": 0.670205 }, { "epoch": 0.6242234694314726, "grad_norm": 2.9548306465148926, "learning_rate": 9.62069073066791e-05, "loss": 2.3828758239746093, "memory(GiB)": 70.96, "step": 14570, "token_acc": 0.4630225080385852, "train_speed(iter/s)": 0.670248 }, { "epoch": 0.6244376847607215, "grad_norm": 4.7907257080078125, "learning_rate": 9.620433572124363e-05, "loss": 2.269556427001953, "memory(GiB)": 70.96, "step": 14575, "token_acc": 0.5092936802973977, "train_speed(iter/s)": 0.670203 }, { "epoch": 0.6246519000899704, "grad_norm": 2.954960346221924, "learning_rate": 9.620176329877329e-05, "loss": 2.546478271484375, "memory(GiB)": 70.96, "step": 14580, "token_acc": 0.4855072463768116, "train_speed(iter/s)": 0.670153 }, { "epoch": 0.6248661154192194, "grad_norm": 3.146740436553955, "learning_rate": 9.619919003931465e-05, "loss": 2.562275505065918, "memory(GiB)": 70.96, "step": 14585, "token_acc": 0.48464163822525597, "train_speed(iter/s)": 0.670184 }, { "epoch": 0.6250803307484684, "grad_norm": 3.1386184692382812, "learning_rate": 9.619661594291436e-05, "loss": 2.438266372680664, "memory(GiB)": 70.96, "step": 14590, "token_acc": 0.4984894259818731, "train_speed(iter/s)": 0.670179 }, { "epoch": 0.6252945460777173, "grad_norm": 3.360349655151367, "learning_rate": 9.619404100961903e-05, "loss": 2.373858642578125, "memory(GiB)": 70.96, "step": 14595, "token_acc": 0.5031847133757962, "train_speed(iter/s)": 0.670182 }, { "epoch": 0.6255087614069663, "grad_norm": 6.042630195617676, "learning_rate": 9.619146523947534e-05, "loss": 2.5275909423828127, "memory(GiB)": 70.96, "step": 14600, "token_acc": 0.42586750788643535, "train_speed(iter/s)": 0.670199 }, { "epoch": 0.6257229767362152, "grad_norm": 2.8992037773132324, "learning_rate": 9.618888863252989e-05, "loss": 2.5648393630981445, "memory(GiB)": 70.96, "step": 14605, "token_acc": 0.4617737003058104, "train_speed(iter/s)": 0.670153 }, { "epoch": 0.6259371920654642, "grad_norm": 3.348828077316284, "learning_rate": 9.61863111888294e-05, "loss": 2.1036613464355467, "memory(GiB)": 70.96, "step": 14610, "token_acc": 0.49377593360995853, "train_speed(iter/s)": 0.670168 }, { "epoch": 0.6261514073947132, "grad_norm": 3.5597825050354004, "learning_rate": 9.618373290842057e-05, "loss": 2.577318000793457, "memory(GiB)": 70.96, "step": 14615, "token_acc": 0.4918032786885246, "train_speed(iter/s)": 0.670202 }, { "epoch": 0.6263656227239621, "grad_norm": 3.966038942337036, "learning_rate": 9.618115379135009e-05, "loss": 2.9528181076049806, "memory(GiB)": 70.96, "step": 14620, "token_acc": 0.42857142857142855, "train_speed(iter/s)": 0.670278 }, { "epoch": 0.626579838053211, "grad_norm": 3.4745030403137207, "learning_rate": 9.617857383766468e-05, "loss": 2.5099645614624024, "memory(GiB)": 70.96, "step": 14625, "token_acc": 0.4624624624624625, "train_speed(iter/s)": 0.670218 }, { "epoch": 0.6267940533824601, "grad_norm": 3.0203800201416016, "learning_rate": 9.61759930474111e-05, "loss": 2.231603240966797, "memory(GiB)": 70.96, "step": 14630, "token_acc": 0.5111111111111111, "train_speed(iter/s)": 0.670192 }, { "epoch": 0.627008268711709, "grad_norm": 3.300673007965088, "learning_rate": 9.617341142063607e-05, "loss": 2.417850112915039, "memory(GiB)": 70.96, "step": 14635, "token_acc": 0.49466192170818507, "train_speed(iter/s)": 0.670201 }, { "epoch": 0.6272224840409579, "grad_norm": 4.358852386474609, "learning_rate": 9.617082895738639e-05, "loss": 2.4961395263671875, "memory(GiB)": 70.96, "step": 14640, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.670205 }, { "epoch": 0.627436699370207, "grad_norm": 3.5053348541259766, "learning_rate": 9.616824565770883e-05, "loss": 2.2345249176025392, "memory(GiB)": 70.96, "step": 14645, "token_acc": 0.5336134453781513, "train_speed(iter/s)": 0.670197 }, { "epoch": 0.6276509146994559, "grad_norm": 3.5166850090026855, "learning_rate": 9.616566152165019e-05, "loss": 2.26438045501709, "memory(GiB)": 70.96, "step": 14650, "token_acc": 0.5186567164179104, "train_speed(iter/s)": 0.67027 }, { "epoch": 0.6278651300287048, "grad_norm": 3.320265054702759, "learning_rate": 9.61630765492573e-05, "loss": 2.5829627990722654, "memory(GiB)": 70.96, "step": 14655, "token_acc": 0.4662379421221865, "train_speed(iter/s)": 0.670295 }, { "epoch": 0.6280793453579538, "grad_norm": 2.992992401123047, "learning_rate": 9.616049074057695e-05, "loss": 2.1794002532958983, "memory(GiB)": 70.96, "step": 14660, "token_acc": 0.5054151624548736, "train_speed(iter/s)": 0.67032 }, { "epoch": 0.6282935606872028, "grad_norm": 3.9913949966430664, "learning_rate": 9.615790409565601e-05, "loss": 2.224702072143555, "memory(GiB)": 70.96, "step": 14665, "token_acc": 0.5230769230769231, "train_speed(iter/s)": 0.67031 }, { "epoch": 0.6285077760164517, "grad_norm": 5.164859294891357, "learning_rate": 9.615531661454135e-05, "loss": 2.6824140548706055, "memory(GiB)": 70.96, "step": 14670, "token_acc": 0.4444444444444444, "train_speed(iter/s)": 0.670296 }, { "epoch": 0.6287219913457007, "grad_norm": 3.0849103927612305, "learning_rate": 9.615272829727982e-05, "loss": 2.6075119018554687, "memory(GiB)": 70.96, "step": 14675, "token_acc": 0.46048109965635736, "train_speed(iter/s)": 0.670317 }, { "epoch": 0.6289362066749496, "grad_norm": 3.052422046661377, "learning_rate": 9.615013914391833e-05, "loss": 2.0833499908447264, "memory(GiB)": 70.96, "step": 14680, "token_acc": 0.5357142857142857, "train_speed(iter/s)": 0.670325 }, { "epoch": 0.6291504220041986, "grad_norm": 3.58351993560791, "learning_rate": 9.614754915450378e-05, "loss": 2.687228775024414, "memory(GiB)": 70.96, "step": 14685, "token_acc": 0.43452380952380953, "train_speed(iter/s)": 0.670342 }, { "epoch": 0.6293646373334476, "grad_norm": 3.1863515377044678, "learning_rate": 9.614495832908307e-05, "loss": 2.758237838745117, "memory(GiB)": 70.96, "step": 14690, "token_acc": 0.41134751773049644, "train_speed(iter/s)": 0.670337 }, { "epoch": 0.6295788526626965, "grad_norm": 5.976968288421631, "learning_rate": 9.614236666770317e-05, "loss": 2.8333364486694337, "memory(GiB)": 70.96, "step": 14695, "token_acc": 0.45643153526970953, "train_speed(iter/s)": 0.670398 }, { "epoch": 0.6297930679919455, "grad_norm": 4.44890022277832, "learning_rate": 9.613977417041103e-05, "loss": 2.252988815307617, "memory(GiB)": 70.96, "step": 14700, "token_acc": 0.508130081300813, "train_speed(iter/s)": 0.670406 }, { "epoch": 0.6300072833211945, "grad_norm": 3.3888838291168213, "learning_rate": 9.613718083725356e-05, "loss": 2.5276418685913087, "memory(GiB)": 70.96, "step": 14705, "token_acc": 0.4684385382059801, "train_speed(iter/s)": 0.670386 }, { "epoch": 0.6302214986504434, "grad_norm": 3.9620473384857178, "learning_rate": 9.61345866682778e-05, "loss": 2.501216506958008, "memory(GiB)": 70.96, "step": 14710, "token_acc": 0.4725609756097561, "train_speed(iter/s)": 0.670412 }, { "epoch": 0.6304357139796923, "grad_norm": 3.283792495727539, "learning_rate": 9.613199166353073e-05, "loss": 2.5704721450805663, "memory(GiB)": 70.96, "step": 14715, "token_acc": 0.49433962264150944, "train_speed(iter/s)": 0.670334 }, { "epoch": 0.6306499293089414, "grad_norm": 4.17362117767334, "learning_rate": 9.612939582305933e-05, "loss": 2.66766357421875, "memory(GiB)": 70.96, "step": 14720, "token_acc": 0.4435483870967742, "train_speed(iter/s)": 0.670332 }, { "epoch": 0.6308641446381903, "grad_norm": 3.4237189292907715, "learning_rate": 9.612679914691067e-05, "loss": 2.47470645904541, "memory(GiB)": 70.96, "step": 14725, "token_acc": 0.48026315789473684, "train_speed(iter/s)": 0.670367 }, { "epoch": 0.6310783599674392, "grad_norm": 3.549229860305786, "learning_rate": 9.612420163513179e-05, "loss": 2.5971744537353514, "memory(GiB)": 70.96, "step": 14730, "token_acc": 0.4713375796178344, "train_speed(iter/s)": 0.670352 }, { "epoch": 0.6312925752966883, "grad_norm": 2.716902017593384, "learning_rate": 9.61216032877697e-05, "loss": 2.424894332885742, "memory(GiB)": 70.96, "step": 14735, "token_acc": 0.4838709677419355, "train_speed(iter/s)": 0.670388 }, { "epoch": 0.6315067906259372, "grad_norm": 3.191157817840576, "learning_rate": 9.611900410487152e-05, "loss": 2.3026634216308595, "memory(GiB)": 70.96, "step": 14740, "token_acc": 0.4852459016393443, "train_speed(iter/s)": 0.67036 }, { "epoch": 0.6317210059551861, "grad_norm": 2.675534963607788, "learning_rate": 9.61164040864843e-05, "loss": 2.4301694869995116, "memory(GiB)": 70.96, "step": 14745, "token_acc": 0.4961832061068702, "train_speed(iter/s)": 0.670325 }, { "epoch": 0.6319352212844351, "grad_norm": 3.4085211753845215, "learning_rate": 9.611380323265517e-05, "loss": 2.1904964447021484, "memory(GiB)": 70.96, "step": 14750, "token_acc": 0.5531135531135531, "train_speed(iter/s)": 0.670295 }, { "epoch": 0.6321494366136841, "grad_norm": 3.039344549179077, "learning_rate": 9.611120154343122e-05, "loss": 2.278696060180664, "memory(GiB)": 70.96, "step": 14755, "token_acc": 0.4634146341463415, "train_speed(iter/s)": 0.670296 }, { "epoch": 0.6323636519429331, "grad_norm": 5.452304363250732, "learning_rate": 9.610859901885963e-05, "loss": 2.5539314270019533, "memory(GiB)": 70.96, "step": 14760, "token_acc": 0.44223107569721115, "train_speed(iter/s)": 0.67029 }, { "epoch": 0.632577867272182, "grad_norm": 4.268456935882568, "learning_rate": 9.61059956589875e-05, "loss": 2.5455530166625975, "memory(GiB)": 70.96, "step": 14765, "token_acc": 0.48231511254019294, "train_speed(iter/s)": 0.670311 }, { "epoch": 0.6327920826014309, "grad_norm": 4.192315578460693, "learning_rate": 9.6103391463862e-05, "loss": 2.318006134033203, "memory(GiB)": 70.96, "step": 14770, "token_acc": 0.5, "train_speed(iter/s)": 0.67034 }, { "epoch": 0.63300629793068, "grad_norm": 4.765754699707031, "learning_rate": 9.610078643353032e-05, "loss": 2.674188232421875, "memory(GiB)": 70.96, "step": 14775, "token_acc": 0.4729241877256318, "train_speed(iter/s)": 0.67029 }, { "epoch": 0.6332205132599289, "grad_norm": 3.785626173019409, "learning_rate": 9.609818056803965e-05, "loss": 2.5985538482666017, "memory(GiB)": 70.96, "step": 14780, "token_acc": 0.501628664495114, "train_speed(iter/s)": 0.670286 }, { "epoch": 0.6334347285891778, "grad_norm": 4.2200541496276855, "learning_rate": 9.609557386743719e-05, "loss": 2.416750907897949, "memory(GiB)": 70.96, "step": 14785, "token_acc": 0.46229508196721314, "train_speed(iter/s)": 0.670328 }, { "epoch": 0.6336489439184269, "grad_norm": 4.294994354248047, "learning_rate": 9.609296633177018e-05, "loss": 2.413070869445801, "memory(GiB)": 70.96, "step": 14790, "token_acc": 0.44405594405594406, "train_speed(iter/s)": 0.670326 }, { "epoch": 0.6338631592476758, "grad_norm": 3.0336310863494873, "learning_rate": 9.609035796108583e-05, "loss": 2.3238037109375, "memory(GiB)": 70.96, "step": 14795, "token_acc": 0.49538461538461537, "train_speed(iter/s)": 0.670331 }, { "epoch": 0.6340773745769247, "grad_norm": 3.317519187927246, "learning_rate": 9.608774875543143e-05, "loss": 2.3094545364379884, "memory(GiB)": 70.96, "step": 14800, "token_acc": 0.48220064724919093, "train_speed(iter/s)": 0.670344 }, { "epoch": 0.6342915899061737, "grad_norm": 3.3940887451171875, "learning_rate": 9.60851387148542e-05, "loss": 2.3289201736450194, "memory(GiB)": 70.96, "step": 14805, "token_acc": 0.5179282868525896, "train_speed(iter/s)": 0.670384 }, { "epoch": 0.6345058052354227, "grad_norm": 4.543389320373535, "learning_rate": 9.608252783940148e-05, "loss": 2.3214963912963866, "memory(GiB)": 70.96, "step": 14810, "token_acc": 0.45507246376811594, "train_speed(iter/s)": 0.67043 }, { "epoch": 0.6347200205646716, "grad_norm": 3.3957622051239014, "learning_rate": 9.607991612912052e-05, "loss": 2.396668243408203, "memory(GiB)": 70.96, "step": 14815, "token_acc": 0.5070921985815603, "train_speed(iter/s)": 0.670428 }, { "epoch": 0.6349342358939206, "grad_norm": 3.468269109725952, "learning_rate": 9.607730358405867e-05, "loss": 2.4356220245361326, "memory(GiB)": 71.23, "step": 14820, "token_acc": 0.4807692307692308, "train_speed(iter/s)": 0.67039 }, { "epoch": 0.6351484512231695, "grad_norm": 3.205477476119995, "learning_rate": 9.607469020426322e-05, "loss": 2.6292144775390627, "memory(GiB)": 71.23, "step": 14825, "token_acc": 0.4787234042553192, "train_speed(iter/s)": 0.670432 }, { "epoch": 0.6353626665524185, "grad_norm": 4.0887932777404785, "learning_rate": 9.607207598978154e-05, "loss": 2.430350112915039, "memory(GiB)": 71.23, "step": 14830, "token_acc": 0.46938775510204084, "train_speed(iter/s)": 0.670409 }, { "epoch": 0.6355768818816675, "grad_norm": 4.046058654785156, "learning_rate": 9.6069460940661e-05, "loss": 2.641506385803223, "memory(GiB)": 71.23, "step": 14835, "token_acc": 0.5114503816793893, "train_speed(iter/s)": 0.670425 }, { "epoch": 0.6357910972109164, "grad_norm": 3.142002582550049, "learning_rate": 9.606684505694894e-05, "loss": 2.6276323318481447, "memory(GiB)": 71.23, "step": 14840, "token_acc": 0.4525993883792049, "train_speed(iter/s)": 0.67046 }, { "epoch": 0.6360053125401653, "grad_norm": 3.4975297451019287, "learning_rate": 9.606422833869277e-05, "loss": 2.6212711334228516, "memory(GiB)": 71.23, "step": 14845, "token_acc": 0.46017699115044247, "train_speed(iter/s)": 0.670395 }, { "epoch": 0.6362195278694144, "grad_norm": 4.4448065757751465, "learning_rate": 9.60616107859399e-05, "loss": 2.513706398010254, "memory(GiB)": 71.23, "step": 14850, "token_acc": 0.49829351535836175, "train_speed(iter/s)": 0.670349 }, { "epoch": 0.6364337431986633, "grad_norm": 3.2271604537963867, "learning_rate": 9.605899239873775e-05, "loss": 2.3228092193603516, "memory(GiB)": 71.23, "step": 14855, "token_acc": 0.5551330798479087, "train_speed(iter/s)": 0.670352 }, { "epoch": 0.6366479585279122, "grad_norm": 3.3707008361816406, "learning_rate": 9.605637317713373e-05, "loss": 2.6194339752197267, "memory(GiB)": 71.23, "step": 14860, "token_acc": 0.49834983498349833, "train_speed(iter/s)": 0.67033 }, { "epoch": 0.6368621738571613, "grad_norm": 3.580902338027954, "learning_rate": 9.605375312117531e-05, "loss": 2.3860511779785156, "memory(GiB)": 71.23, "step": 14865, "token_acc": 0.4772727272727273, "train_speed(iter/s)": 0.670364 }, { "epoch": 0.6370763891864102, "grad_norm": 4.299529552459717, "learning_rate": 9.605113223090996e-05, "loss": 2.1301265716552735, "memory(GiB)": 71.23, "step": 14870, "token_acc": 0.5515873015873016, "train_speed(iter/s)": 0.670313 }, { "epoch": 0.6372906045156591, "grad_norm": 3.8384971618652344, "learning_rate": 9.604851050638513e-05, "loss": 2.4696176528930662, "memory(GiB)": 71.23, "step": 14875, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.670275 }, { "epoch": 0.6375048198449081, "grad_norm": 3.117140769958496, "learning_rate": 9.604588794764834e-05, "loss": 2.313773536682129, "memory(GiB)": 71.23, "step": 14880, "token_acc": 0.4963768115942029, "train_speed(iter/s)": 0.670262 }, { "epoch": 0.6377190351741571, "grad_norm": 4.907201766967773, "learning_rate": 9.60432645547471e-05, "loss": 2.5381046295166017, "memory(GiB)": 71.23, "step": 14885, "token_acc": 0.45733788395904434, "train_speed(iter/s)": 0.670297 }, { "epoch": 0.637933250503406, "grad_norm": 3.806361675262451, "learning_rate": 9.604064032772893e-05, "loss": 2.450268363952637, "memory(GiB)": 71.23, "step": 14890, "token_acc": 0.4619883040935672, "train_speed(iter/s)": 0.670346 }, { "epoch": 0.638147465832655, "grad_norm": 4.3747100830078125, "learning_rate": 9.603801526664138e-05, "loss": 2.645819091796875, "memory(GiB)": 71.23, "step": 14895, "token_acc": 0.48134328358208955, "train_speed(iter/s)": 0.670407 }, { "epoch": 0.638361681161904, "grad_norm": 4.8424811363220215, "learning_rate": 9.603538937153199e-05, "loss": 2.6034320831298827, "memory(GiB)": 71.23, "step": 14900, "token_acc": 0.4675324675324675, "train_speed(iter/s)": 0.670467 }, { "epoch": 0.6385758964911529, "grad_norm": 4.208618640899658, "learning_rate": 9.603276264244834e-05, "loss": 2.4827854156494142, "memory(GiB)": 71.23, "step": 14905, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.670483 }, { "epoch": 0.6387901118204019, "grad_norm": 4.058903217315674, "learning_rate": 9.6030135079438e-05, "loss": 2.580326461791992, "memory(GiB)": 71.23, "step": 14910, "token_acc": 0.43167701863354035, "train_speed(iter/s)": 0.670443 }, { "epoch": 0.6390043271496508, "grad_norm": 3.8490891456604004, "learning_rate": 9.60275066825486e-05, "loss": 2.455029106140137, "memory(GiB)": 71.23, "step": 14915, "token_acc": 0.49444444444444446, "train_speed(iter/s)": 0.670479 }, { "epoch": 0.6392185424788998, "grad_norm": 4.608548641204834, "learning_rate": 9.602487745182774e-05, "loss": 2.179927444458008, "memory(GiB)": 71.23, "step": 14920, "token_acc": 0.5214285714285715, "train_speed(iter/s)": 0.670476 }, { "epoch": 0.6394327578081488, "grad_norm": 3.541161298751831, "learning_rate": 9.602224738732303e-05, "loss": 2.4242843627929687, "memory(GiB)": 71.23, "step": 14925, "token_acc": 0.46742209631728043, "train_speed(iter/s)": 0.670431 }, { "epoch": 0.6396469731373977, "grad_norm": 4.099557876586914, "learning_rate": 9.601961648908214e-05, "loss": 2.2667476654052736, "memory(GiB)": 71.23, "step": 14930, "token_acc": 0.5052264808362369, "train_speed(iter/s)": 0.670443 }, { "epoch": 0.6398611884666466, "grad_norm": 3.752007007598877, "learning_rate": 9.601698475715275e-05, "loss": 2.538437080383301, "memory(GiB)": 71.23, "step": 14935, "token_acc": 0.46920821114369504, "train_speed(iter/s)": 0.67043 }, { "epoch": 0.6400754037958957, "grad_norm": 3.8111612796783447, "learning_rate": 9.601435219158249e-05, "loss": 2.387489128112793, "memory(GiB)": 71.23, "step": 14940, "token_acc": 0.5, "train_speed(iter/s)": 0.670451 }, { "epoch": 0.6402896191251446, "grad_norm": 3.8173787593841553, "learning_rate": 9.601171879241909e-05, "loss": 2.3855838775634766, "memory(GiB)": 71.23, "step": 14945, "token_acc": 0.5142857142857142, "train_speed(iter/s)": 0.670455 }, { "epoch": 0.6405038344543935, "grad_norm": 3.247530460357666, "learning_rate": 9.600908455971022e-05, "loss": 2.4819360733032227, "memory(GiB)": 71.23, "step": 14950, "token_acc": 0.45422535211267606, "train_speed(iter/s)": 0.670468 }, { "epoch": 0.6407180497836426, "grad_norm": 2.9155545234680176, "learning_rate": 9.600644949350363e-05, "loss": 2.5112350463867186, "memory(GiB)": 71.23, "step": 14955, "token_acc": 0.47333333333333333, "train_speed(iter/s)": 0.670496 }, { "epoch": 0.6409322651128915, "grad_norm": 3.616335391998291, "learning_rate": 9.600381359384707e-05, "loss": 2.245035934448242, "memory(GiB)": 71.23, "step": 14960, "token_acc": 0.5150501672240803, "train_speed(iter/s)": 0.670471 }, { "epoch": 0.6411464804421404, "grad_norm": 3.6851742267608643, "learning_rate": 9.600117686078827e-05, "loss": 2.4975122451782226, "memory(GiB)": 71.23, "step": 14965, "token_acc": 0.4672897196261682, "train_speed(iter/s)": 0.670454 }, { "epoch": 0.6413606957713894, "grad_norm": 4.143742084503174, "learning_rate": 9.599853929437498e-05, "loss": 2.1541725158691407, "memory(GiB)": 71.23, "step": 14970, "token_acc": 0.5331010452961672, "train_speed(iter/s)": 0.670454 }, { "epoch": 0.6415749111006384, "grad_norm": 3.3934617042541504, "learning_rate": 9.599590089465501e-05, "loss": 2.532093620300293, "memory(GiB)": 71.23, "step": 14975, "token_acc": 0.4774774774774775, "train_speed(iter/s)": 0.670427 }, { "epoch": 0.6417891264298873, "grad_norm": 4.00237512588501, "learning_rate": 9.599326166167615e-05, "loss": 2.6586750030517576, "memory(GiB)": 71.23, "step": 14980, "token_acc": 0.4888888888888889, "train_speed(iter/s)": 0.670432 }, { "epoch": 0.6420033417591363, "grad_norm": 2.8653228282928467, "learning_rate": 9.599062159548619e-05, "loss": 2.5537891387939453, "memory(GiB)": 71.23, "step": 14985, "token_acc": 0.5, "train_speed(iter/s)": 0.670445 }, { "epoch": 0.6422175570883852, "grad_norm": 3.4542698860168457, "learning_rate": 9.598798069613301e-05, "loss": 2.52144775390625, "memory(GiB)": 71.23, "step": 14990, "token_acc": 0.45692883895131087, "train_speed(iter/s)": 0.67045 }, { "epoch": 0.6424317724176342, "grad_norm": 3.5336062908172607, "learning_rate": 9.59853389636644e-05, "loss": 2.6251190185546873, "memory(GiB)": 71.23, "step": 14995, "token_acc": 0.4696969696969697, "train_speed(iter/s)": 0.670517 }, { "epoch": 0.6426459877468832, "grad_norm": 4.189690589904785, "learning_rate": 9.598269639812824e-05, "loss": 2.017995262145996, "memory(GiB)": 71.23, "step": 15000, "token_acc": 0.5681818181818182, "train_speed(iter/s)": 0.670506 }, { "epoch": 0.6426459877468832, "eval_loss": 2.0140554904937744, "eval_runtime": 17.5653, "eval_samples_per_second": 5.693, "eval_steps_per_second": 5.693, "eval_token_acc": 0.5032679738562091, "step": 15000 }, { "epoch": 0.6428602030761321, "grad_norm": 3.148325204849243, "learning_rate": 9.59800529995724e-05, "loss": 2.460272789001465, "memory(GiB)": 71.23, "step": 15005, "token_acc": 0.49162011173184356, "train_speed(iter/s)": 0.669818 }, { "epoch": 0.643074418405381, "grad_norm": 4.232454776763916, "learning_rate": 9.597740876804477e-05, "loss": 2.1951625823974608, "memory(GiB)": 71.23, "step": 15010, "token_acc": 0.5388127853881278, "train_speed(iter/s)": 0.66977 }, { "epoch": 0.6432886337346301, "grad_norm": 3.1825976371765137, "learning_rate": 9.597476370359324e-05, "loss": 2.333258628845215, "memory(GiB)": 71.23, "step": 15015, "token_acc": 0.5095541401273885, "train_speed(iter/s)": 0.669808 }, { "epoch": 0.643502849063879, "grad_norm": 4.241125583648682, "learning_rate": 9.597211780626574e-05, "loss": 2.5575647354125977, "memory(GiB)": 71.23, "step": 15020, "token_acc": 0.49498327759197325, "train_speed(iter/s)": 0.66985 }, { "epoch": 0.6437170643931279, "grad_norm": 3.4830195903778076, "learning_rate": 9.596947107611021e-05, "loss": 2.64180850982666, "memory(GiB)": 71.23, "step": 15025, "token_acc": 0.47041420118343197, "train_speed(iter/s)": 0.669818 }, { "epoch": 0.643931279722377, "grad_norm": 2.6504483222961426, "learning_rate": 9.596682351317459e-05, "loss": 2.7391773223876954, "memory(GiB)": 71.23, "step": 15030, "token_acc": 0.4342105263157895, "train_speed(iter/s)": 0.669747 }, { "epoch": 0.6441454950516259, "grad_norm": 4.190567970275879, "learning_rate": 9.596417511750683e-05, "loss": 2.4943246841430664, "memory(GiB)": 71.23, "step": 15035, "token_acc": 0.48135593220338985, "train_speed(iter/s)": 0.669819 }, { "epoch": 0.6443597103808748, "grad_norm": 3.1260645389556885, "learning_rate": 9.596152588915492e-05, "loss": 2.2433708190917967, "memory(GiB)": 71.23, "step": 15040, "token_acc": 0.48398576512455516, "train_speed(iter/s)": 0.669766 }, { "epoch": 0.6445739257101238, "grad_norm": 3.7523562908172607, "learning_rate": 9.595887582816685e-05, "loss": 2.9707746505737305, "memory(GiB)": 71.23, "step": 15045, "token_acc": 0.43859649122807015, "train_speed(iter/s)": 0.66977 }, { "epoch": 0.6447881410393728, "grad_norm": 3.567861795425415, "learning_rate": 9.595622493459065e-05, "loss": 2.2192459106445312, "memory(GiB)": 71.23, "step": 15050, "token_acc": 0.4699248120300752, "train_speed(iter/s)": 0.669732 }, { "epoch": 0.6450023563686217, "grad_norm": 3.529038906097412, "learning_rate": 9.59535732084743e-05, "loss": 2.6161964416503904, "memory(GiB)": 71.23, "step": 15055, "token_acc": 0.4902597402597403, "train_speed(iter/s)": 0.669763 }, { "epoch": 0.6452165716978707, "grad_norm": 3.4368643760681152, "learning_rate": 9.595092064986589e-05, "loss": 2.4385448455810548, "memory(GiB)": 71.23, "step": 15060, "token_acc": 0.45016077170418006, "train_speed(iter/s)": 0.669738 }, { "epoch": 0.6454307870271196, "grad_norm": 3.5690860748291016, "learning_rate": 9.594826725881342e-05, "loss": 2.2937543869018553, "memory(GiB)": 71.23, "step": 15065, "token_acc": 0.4713656387665198, "train_speed(iter/s)": 0.669742 }, { "epoch": 0.6456450023563686, "grad_norm": 4.372715473175049, "learning_rate": 9.594561303536502e-05, "loss": 2.1322036743164063, "memory(GiB)": 71.23, "step": 15070, "token_acc": 0.5060606060606061, "train_speed(iter/s)": 0.669746 }, { "epoch": 0.6458592176856176, "grad_norm": 3.4493308067321777, "learning_rate": 9.594295797956872e-05, "loss": 2.421336555480957, "memory(GiB)": 71.23, "step": 15075, "token_acc": 0.5238095238095238, "train_speed(iter/s)": 0.669734 }, { "epoch": 0.6460734330148665, "grad_norm": 3.597094774246216, "learning_rate": 9.594030209147264e-05, "loss": 2.2354286193847654, "memory(GiB)": 71.23, "step": 15080, "token_acc": 0.5284810126582279, "train_speed(iter/s)": 0.669793 }, { "epoch": 0.6462876483441155, "grad_norm": 3.7014756202697754, "learning_rate": 9.593764537112488e-05, "loss": 2.7517271041870117, "memory(GiB)": 71.23, "step": 15085, "token_acc": 0.453416149068323, "train_speed(iter/s)": 0.669794 }, { "epoch": 0.6465018636733645, "grad_norm": 5.671361923217773, "learning_rate": 9.59349878185736e-05, "loss": 2.8329889297485353, "memory(GiB)": 71.23, "step": 15090, "token_acc": 0.47038327526132406, "train_speed(iter/s)": 0.669812 }, { "epoch": 0.6467160790026134, "grad_norm": 3.7842578887939453, "learning_rate": 9.593232943386691e-05, "loss": 2.368638610839844, "memory(GiB)": 71.23, "step": 15095, "token_acc": 0.5047923322683706, "train_speed(iter/s)": 0.669818 }, { "epoch": 0.6469302943318624, "grad_norm": 4.5861616134643555, "learning_rate": 9.5929670217053e-05, "loss": 2.782765579223633, "memory(GiB)": 71.23, "step": 15100, "token_acc": 0.4311594202898551, "train_speed(iter/s)": 0.669873 }, { "epoch": 0.6471445096611114, "grad_norm": 5.892267227172852, "learning_rate": 9.592701016818001e-05, "loss": 2.6949249267578126, "memory(GiB)": 71.23, "step": 15105, "token_acc": 0.44673539518900346, "train_speed(iter/s)": 0.669895 }, { "epoch": 0.6473587249903603, "grad_norm": 3.899001121520996, "learning_rate": 9.592434928729616e-05, "loss": 2.5231847763061523, "memory(GiB)": 71.23, "step": 15110, "token_acc": 0.44551282051282054, "train_speed(iter/s)": 0.669891 }, { "epoch": 0.6475729403196093, "grad_norm": 3.54197359085083, "learning_rate": 9.592168757444964e-05, "loss": 2.5401493072509767, "memory(GiB)": 71.23, "step": 15115, "token_acc": 0.4696969696969697, "train_speed(iter/s)": 0.669952 }, { "epoch": 0.6477871556488582, "grad_norm": 3.6129772663116455, "learning_rate": 9.591902502968866e-05, "loss": 2.518663024902344, "memory(GiB)": 71.23, "step": 15120, "token_acc": 0.45387453874538747, "train_speed(iter/s)": 0.670009 }, { "epoch": 0.6480013709781072, "grad_norm": 3.4197893142700195, "learning_rate": 9.591636165306148e-05, "loss": 2.442755126953125, "memory(GiB)": 71.23, "step": 15125, "token_acc": 0.5062893081761006, "train_speed(iter/s)": 0.670038 }, { "epoch": 0.6482155863073562, "grad_norm": 3.583366870880127, "learning_rate": 9.591369744461633e-05, "loss": 2.4178998947143553, "memory(GiB)": 71.23, "step": 15130, "token_acc": 0.5017421602787456, "train_speed(iter/s)": 0.670061 }, { "epoch": 0.6484298016366051, "grad_norm": 4.563483238220215, "learning_rate": 9.591103240440149e-05, "loss": 2.499576187133789, "memory(GiB)": 71.23, "step": 15135, "token_acc": 0.4820359281437126, "train_speed(iter/s)": 0.670032 }, { "epoch": 0.648644016965854, "grad_norm": 3.520010471343994, "learning_rate": 9.590836653246522e-05, "loss": 2.3146650314331056, "memory(GiB)": 71.23, "step": 15140, "token_acc": 0.5198675496688742, "train_speed(iter/s)": 0.670008 }, { "epoch": 0.6488582322951031, "grad_norm": 3.4386370182037354, "learning_rate": 9.590569982885582e-05, "loss": 2.3469104766845703, "memory(GiB)": 71.23, "step": 15145, "token_acc": 0.5019157088122606, "train_speed(iter/s)": 0.670064 }, { "epoch": 0.649072447624352, "grad_norm": 2.830937623977661, "learning_rate": 9.590303229362161e-05, "loss": 2.658129119873047, "memory(GiB)": 71.23, "step": 15150, "token_acc": 0.47953216374269003, "train_speed(iter/s)": 0.67007 }, { "epoch": 0.6492866629536009, "grad_norm": 3.6089134216308594, "learning_rate": 9.590036392681091e-05, "loss": 2.7502979278564452, "memory(GiB)": 71.23, "step": 15155, "token_acc": 0.45038167938931295, "train_speed(iter/s)": 0.670048 }, { "epoch": 0.64950087828285, "grad_norm": 4.499347686767578, "learning_rate": 9.589769472847208e-05, "loss": 2.3540012359619142, "memory(GiB)": 71.23, "step": 15160, "token_acc": 0.48375451263537905, "train_speed(iter/s)": 0.670014 }, { "epoch": 0.6497150936120989, "grad_norm": 3.35601544380188, "learning_rate": 9.589502469865343e-05, "loss": 2.5166425704956055, "memory(GiB)": 71.23, "step": 15165, "token_acc": 0.5074626865671642, "train_speed(iter/s)": 0.669999 }, { "epoch": 0.6499293089413478, "grad_norm": 3.5268912315368652, "learning_rate": 9.589235383740336e-05, "loss": 2.2305498123168945, "memory(GiB)": 71.23, "step": 15170, "token_acc": 0.4745762711864407, "train_speed(iter/s)": 0.670017 }, { "epoch": 0.6501435242705969, "grad_norm": 2.432612419128418, "learning_rate": 9.588968214477024e-05, "loss": 2.287833595275879, "memory(GiB)": 71.23, "step": 15175, "token_acc": 0.49584487534626037, "train_speed(iter/s)": 0.67005 }, { "epoch": 0.6503577395998458, "grad_norm": 3.134122133255005, "learning_rate": 9.58870096208025e-05, "loss": 2.353277397155762, "memory(GiB)": 71.23, "step": 15180, "token_acc": 0.48828125, "train_speed(iter/s)": 0.670068 }, { "epoch": 0.6505719549290947, "grad_norm": 2.866798162460327, "learning_rate": 9.588433626554852e-05, "loss": 2.808859443664551, "memory(GiB)": 71.23, "step": 15185, "token_acc": 0.4712230215827338, "train_speed(iter/s)": 0.670066 }, { "epoch": 0.6507861702583437, "grad_norm": 4.546158313751221, "learning_rate": 9.588166207905674e-05, "loss": 2.283347320556641, "memory(GiB)": 71.23, "step": 15190, "token_acc": 0.5154639175257731, "train_speed(iter/s)": 0.67011 }, { "epoch": 0.6510003855875927, "grad_norm": 3.3277461528778076, "learning_rate": 9.587898706137563e-05, "loss": 2.255132293701172, "memory(GiB)": 71.23, "step": 15195, "token_acc": 0.5109489051094891, "train_speed(iter/s)": 0.670127 }, { "epoch": 0.6512146009168416, "grad_norm": 3.43892240524292, "learning_rate": 9.587631121255363e-05, "loss": 2.628762435913086, "memory(GiB)": 71.23, "step": 15200, "token_acc": 0.4358974358974359, "train_speed(iter/s)": 0.670148 }, { "epoch": 0.6514288162460906, "grad_norm": 3.5041987895965576, "learning_rate": 9.58736345326392e-05, "loss": 2.395909881591797, "memory(GiB)": 71.23, "step": 15205, "token_acc": 0.5017421602787456, "train_speed(iter/s)": 0.670154 }, { "epoch": 0.6516430315753395, "grad_norm": 4.193615913391113, "learning_rate": 9.587095702168086e-05, "loss": 2.2736642837524412, "memory(GiB)": 71.23, "step": 15210, "token_acc": 0.508, "train_speed(iter/s)": 0.670156 }, { "epoch": 0.6518572469045885, "grad_norm": 3.679898500442505, "learning_rate": 9.586827867972709e-05, "loss": 2.2493091583251954, "memory(GiB)": 71.23, "step": 15215, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.670123 }, { "epoch": 0.6520714622338375, "grad_norm": 4.322254657745361, "learning_rate": 9.586559950682643e-05, "loss": 2.2947614669799803, "memory(GiB)": 71.23, "step": 15220, "token_acc": 0.4981684981684982, "train_speed(iter/s)": 0.670106 }, { "epoch": 0.6522856775630864, "grad_norm": 3.251950979232788, "learning_rate": 9.586291950302741e-05, "loss": 2.644721031188965, "memory(GiB)": 71.23, "step": 15225, "token_acc": 0.39634146341463417, "train_speed(iter/s)": 0.670176 }, { "epoch": 0.6524998928923353, "grad_norm": 3.206803560256958, "learning_rate": 9.586023866837859e-05, "loss": 2.7187067031860352, "memory(GiB)": 71.23, "step": 15230, "token_acc": 0.43728813559322033, "train_speed(iter/s)": 0.670144 }, { "epoch": 0.6527141082215844, "grad_norm": 2.8589789867401123, "learning_rate": 9.585755700292854e-05, "loss": 2.349597930908203, "memory(GiB)": 71.23, "step": 15235, "token_acc": 0.5277777777777778, "train_speed(iter/s)": 0.670187 }, { "epoch": 0.6529283235508333, "grad_norm": 4.364107608795166, "learning_rate": 9.585487450672579e-05, "loss": 2.4091680526733397, "memory(GiB)": 71.23, "step": 15240, "token_acc": 0.5306122448979592, "train_speed(iter/s)": 0.670183 }, { "epoch": 0.6531425388800822, "grad_norm": 5.052811622619629, "learning_rate": 9.5852191179819e-05, "loss": 2.770250129699707, "memory(GiB)": 71.23, "step": 15245, "token_acc": 0.40273037542662116, "train_speed(iter/s)": 0.67021 }, { "epoch": 0.6533567542093313, "grad_norm": 4.111407279968262, "learning_rate": 9.584950702225674e-05, "loss": 2.3851375579833984, "memory(GiB)": 71.23, "step": 15250, "token_acc": 0.4869281045751634, "train_speed(iter/s)": 0.670223 }, { "epoch": 0.6535709695385802, "grad_norm": 3.3639092445373535, "learning_rate": 9.584682203408765e-05, "loss": 2.4761518478393554, "memory(GiB)": 71.23, "step": 15255, "token_acc": 0.5140845070422535, "train_speed(iter/s)": 0.670289 }, { "epoch": 0.6537851848678291, "grad_norm": 3.3901851177215576, "learning_rate": 9.584413621536037e-05, "loss": 2.5592618942260743, "memory(GiB)": 71.23, "step": 15260, "token_acc": 0.4359861591695502, "train_speed(iter/s)": 0.670281 }, { "epoch": 0.6539994001970781, "grad_norm": 3.900202989578247, "learning_rate": 9.584144956612358e-05, "loss": 2.2071929931640626, "memory(GiB)": 71.23, "step": 15265, "token_acc": 0.5141955835962145, "train_speed(iter/s)": 0.67024 }, { "epoch": 0.6542136155263271, "grad_norm": 2.7452731132507324, "learning_rate": 9.58387620864259e-05, "loss": 2.3734386444091795, "memory(GiB)": 71.23, "step": 15270, "token_acc": 0.4944237918215613, "train_speed(iter/s)": 0.67019 }, { "epoch": 0.654427830855576, "grad_norm": 3.323514699935913, "learning_rate": 9.583607377631605e-05, "loss": 2.6879430770874024, "memory(GiB)": 71.23, "step": 15275, "token_acc": 0.4452054794520548, "train_speed(iter/s)": 0.670141 }, { "epoch": 0.654642046184825, "grad_norm": 2.8440122604370117, "learning_rate": 9.583338463584272e-05, "loss": 2.427631378173828, "memory(GiB)": 71.23, "step": 15280, "token_acc": 0.5102739726027398, "train_speed(iter/s)": 0.670117 }, { "epoch": 0.654856261514074, "grad_norm": 3.6298744678497314, "learning_rate": 9.583069466505465e-05, "loss": 2.318032073974609, "memory(GiB)": 71.23, "step": 15285, "token_acc": 0.4855072463768116, "train_speed(iter/s)": 0.670125 }, { "epoch": 0.6550704768433229, "grad_norm": 3.780956745147705, "learning_rate": 9.582800386400055e-05, "loss": 2.4963459014892577, "memory(GiB)": 71.23, "step": 15290, "token_acc": 0.45686900958466453, "train_speed(iter/s)": 0.670151 }, { "epoch": 0.6552846921725719, "grad_norm": 3.6585166454315186, "learning_rate": 9.582531223272917e-05, "loss": 2.5707836151123047, "memory(GiB)": 71.23, "step": 15295, "token_acc": 0.4811594202898551, "train_speed(iter/s)": 0.670154 }, { "epoch": 0.6554989075018208, "grad_norm": 3.447110414505005, "learning_rate": 9.582261977128925e-05, "loss": 2.2240360260009764, "memory(GiB)": 71.23, "step": 15300, "token_acc": 0.5418326693227091, "train_speed(iter/s)": 0.670137 }, { "epoch": 0.6557131228310698, "grad_norm": 3.807955265045166, "learning_rate": 9.581992647972962e-05, "loss": 2.4695207595825197, "memory(GiB)": 71.23, "step": 15305, "token_acc": 0.5062761506276151, "train_speed(iter/s)": 0.670121 }, { "epoch": 0.6559273381603188, "grad_norm": 4.612003803253174, "learning_rate": 9.581723235809902e-05, "loss": 2.334562873840332, "memory(GiB)": 71.23, "step": 15310, "token_acc": 0.4847457627118644, "train_speed(iter/s)": 0.670169 }, { "epoch": 0.6561415534895677, "grad_norm": 3.500316619873047, "learning_rate": 9.581453740644628e-05, "loss": 2.4428165435791014, "memory(GiB)": 71.23, "step": 15315, "token_acc": 0.4964788732394366, "train_speed(iter/s)": 0.670175 }, { "epoch": 0.6563557688188166, "grad_norm": 3.323315143585205, "learning_rate": 9.581184162482022e-05, "loss": 1.9701923370361327, "memory(GiB)": 71.23, "step": 15320, "token_acc": 0.5430711610486891, "train_speed(iter/s)": 0.670185 }, { "epoch": 0.6565699841480657, "grad_norm": 4.186804294586182, "learning_rate": 9.580914501326968e-05, "loss": 2.516781044006348, "memory(GiB)": 71.23, "step": 15325, "token_acc": 0.4823943661971831, "train_speed(iter/s)": 0.670204 }, { "epoch": 0.6567841994773146, "grad_norm": 3.6225509643554688, "learning_rate": 9.580644757184349e-05, "loss": 2.404816436767578, "memory(GiB)": 71.23, "step": 15330, "token_acc": 0.5054545454545455, "train_speed(iter/s)": 0.670242 }, { "epoch": 0.6569984148065635, "grad_norm": 5.9878315925598145, "learning_rate": 9.580374930059055e-05, "loss": 2.4230009078979493, "memory(GiB)": 71.23, "step": 15335, "token_acc": 0.4879725085910653, "train_speed(iter/s)": 0.67027 }, { "epoch": 0.6572126301358125, "grad_norm": 4.2081780433654785, "learning_rate": 9.58010501995597e-05, "loss": 2.646132469177246, "memory(GiB)": 71.23, "step": 15340, "token_acc": 0.4591194968553459, "train_speed(iter/s)": 0.67027 }, { "epoch": 0.6574268454650615, "grad_norm": 3.4387354850769043, "learning_rate": 9.579835026879988e-05, "loss": 2.737339973449707, "memory(GiB)": 71.23, "step": 15345, "token_acc": 0.4694533762057878, "train_speed(iter/s)": 0.670279 }, { "epoch": 0.6576410607943104, "grad_norm": 4.197080135345459, "learning_rate": 9.579564950835998e-05, "loss": 2.4872360229492188, "memory(GiB)": 71.23, "step": 15350, "token_acc": 0.5155038759689923, "train_speed(iter/s)": 0.670304 }, { "epoch": 0.6578552761235594, "grad_norm": 4.8169779777526855, "learning_rate": 9.579294791828893e-05, "loss": 2.4371173858642576, "memory(GiB)": 71.23, "step": 15355, "token_acc": 0.524904214559387, "train_speed(iter/s)": 0.670326 }, { "epoch": 0.6580694914528084, "grad_norm": 3.394742488861084, "learning_rate": 9.579024549863566e-05, "loss": 2.046014595031738, "memory(GiB)": 71.23, "step": 15360, "token_acc": 0.5206611570247934, "train_speed(iter/s)": 0.670347 }, { "epoch": 0.6582837067820573, "grad_norm": 2.8272464275360107, "learning_rate": 9.578754224944914e-05, "loss": 2.58487491607666, "memory(GiB)": 71.23, "step": 15365, "token_acc": 0.47750865051903113, "train_speed(iter/s)": 0.670348 }, { "epoch": 0.6584979221113063, "grad_norm": 4.334183216094971, "learning_rate": 9.578483817077835e-05, "loss": 2.3170444488525392, "memory(GiB)": 71.23, "step": 15370, "token_acc": 0.4892086330935252, "train_speed(iter/s)": 0.670301 }, { "epoch": 0.6587121374405552, "grad_norm": 4.647551536560059, "learning_rate": 9.578213326267227e-05, "loss": 2.7231819152832033, "memory(GiB)": 71.23, "step": 15375, "token_acc": 0.46742209631728043, "train_speed(iter/s)": 0.670325 }, { "epoch": 0.6589263527698042, "grad_norm": 3.5617318153381348, "learning_rate": 9.577942752517988e-05, "loss": 2.5610366821289063, "memory(GiB)": 71.23, "step": 15380, "token_acc": 0.4521452145214521, "train_speed(iter/s)": 0.670313 }, { "epoch": 0.6591405680990532, "grad_norm": 5.204919338226318, "learning_rate": 9.577672095835023e-05, "loss": 2.277140998840332, "memory(GiB)": 71.23, "step": 15385, "token_acc": 0.49640287769784175, "train_speed(iter/s)": 0.670317 }, { "epoch": 0.6593547834283021, "grad_norm": 3.143406629562378, "learning_rate": 9.577401356223233e-05, "loss": 2.450775146484375, "memory(GiB)": 71.23, "step": 15390, "token_acc": 0.49830508474576274, "train_speed(iter/s)": 0.670363 }, { "epoch": 0.659568998757551, "grad_norm": 3.287036180496216, "learning_rate": 9.577130533687524e-05, "loss": 2.3791553497314455, "memory(GiB)": 71.23, "step": 15395, "token_acc": 0.5065359477124183, "train_speed(iter/s)": 0.670368 }, { "epoch": 0.6597832140868001, "grad_norm": 3.7904059886932373, "learning_rate": 9.576859628232802e-05, "loss": 2.4110103607177735, "memory(GiB)": 71.23, "step": 15400, "token_acc": 0.4753521126760563, "train_speed(iter/s)": 0.670341 }, { "epoch": 0.659997429416049, "grad_norm": 3.0573017597198486, "learning_rate": 9.576588639863975e-05, "loss": 2.2886995315551757, "memory(GiB)": 71.23, "step": 15405, "token_acc": 0.4740932642487047, "train_speed(iter/s)": 0.670303 }, { "epoch": 0.6602116447452979, "grad_norm": 3.8673675060272217, "learning_rate": 9.57631756858595e-05, "loss": 2.3747058868408204, "memory(GiB)": 71.23, "step": 15410, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.670311 }, { "epoch": 0.660425860074547, "grad_norm": 4.890411853790283, "learning_rate": 9.57604641440364e-05, "loss": 2.641744613647461, "memory(GiB)": 71.23, "step": 15415, "token_acc": 0.496, "train_speed(iter/s)": 0.67037 }, { "epoch": 0.6606400754037959, "grad_norm": 2.797257900238037, "learning_rate": 9.575775177321956e-05, "loss": 2.0168550491333006, "memory(GiB)": 71.23, "step": 15420, "token_acc": 0.5272108843537415, "train_speed(iter/s)": 0.670388 }, { "epoch": 0.6608542907330448, "grad_norm": 2.731858491897583, "learning_rate": 9.575503857345813e-05, "loss": 2.3675701141357424, "memory(GiB)": 71.23, "step": 15425, "token_acc": 0.49056603773584906, "train_speed(iter/s)": 0.67041 }, { "epoch": 0.6610685060622938, "grad_norm": 4.535130500793457, "learning_rate": 9.575232454480126e-05, "loss": 2.5227584838867188, "memory(GiB)": 71.23, "step": 15430, "token_acc": 0.4925373134328358, "train_speed(iter/s)": 0.670425 }, { "epoch": 0.6612827213915428, "grad_norm": 6.610372066497803, "learning_rate": 9.574960968729809e-05, "loss": 2.3488933563232424, "memory(GiB)": 71.23, "step": 15435, "token_acc": 0.5473251028806584, "train_speed(iter/s)": 0.670463 }, { "epoch": 0.6614969367207918, "grad_norm": 3.36790132522583, "learning_rate": 9.574689400099784e-05, "loss": 2.3905847549438475, "memory(GiB)": 71.23, "step": 15440, "token_acc": 0.4935897435897436, "train_speed(iter/s)": 0.670423 }, { "epoch": 0.6617111520500407, "grad_norm": 4.398744106292725, "learning_rate": 9.574417748594968e-05, "loss": 2.652244758605957, "memory(GiB)": 71.23, "step": 15445, "token_acc": 0.4365079365079365, "train_speed(iter/s)": 0.670411 }, { "epoch": 0.6619253673792896, "grad_norm": 3.1280593872070312, "learning_rate": 9.574146014220284e-05, "loss": 2.526279258728027, "memory(GiB)": 71.23, "step": 15450, "token_acc": 0.47416413373860183, "train_speed(iter/s)": 0.670452 }, { "epoch": 0.6621395827085387, "grad_norm": 3.4262053966522217, "learning_rate": 9.573874196980656e-05, "loss": 2.6396051406860352, "memory(GiB)": 71.23, "step": 15455, "token_acc": 0.46504559270516715, "train_speed(iter/s)": 0.670439 }, { "epoch": 0.6623537980377876, "grad_norm": 3.9453866481781006, "learning_rate": 9.573602296881003e-05, "loss": 2.4620542526245117, "memory(GiB)": 71.23, "step": 15460, "token_acc": 0.4318936877076412, "train_speed(iter/s)": 0.670451 }, { "epoch": 0.6625680133670365, "grad_norm": 3.3483383655548096, "learning_rate": 9.573330313926257e-05, "loss": 2.599359893798828, "memory(GiB)": 71.23, "step": 15465, "token_acc": 0.468944099378882, "train_speed(iter/s)": 0.670429 }, { "epoch": 0.6627822286962856, "grad_norm": 3.4100704193115234, "learning_rate": 9.57305824812134e-05, "loss": 2.237646293640137, "memory(GiB)": 71.23, "step": 15470, "token_acc": 0.5416666666666666, "train_speed(iter/s)": 0.670437 }, { "epoch": 0.6629964440255345, "grad_norm": 4.3206400871276855, "learning_rate": 9.572786099471183e-05, "loss": 2.3382984161376954, "memory(GiB)": 71.23, "step": 15475, "token_acc": 0.47509578544061304, "train_speed(iter/s)": 0.670417 }, { "epoch": 0.6632106593547834, "grad_norm": 2.908062696456909, "learning_rate": 9.572513867980716e-05, "loss": 2.4267215728759766, "memory(GiB)": 71.23, "step": 15480, "token_acc": 0.5014409221902018, "train_speed(iter/s)": 0.670456 }, { "epoch": 0.6634248746840324, "grad_norm": 2.9796714782714844, "learning_rate": 9.572241553654874e-05, "loss": 2.6060459136962892, "memory(GiB)": 71.23, "step": 15485, "token_acc": 0.45121951219512196, "train_speed(iter/s)": 0.670449 }, { "epoch": 0.6636390900132814, "grad_norm": 3.058687686920166, "learning_rate": 9.571969156498584e-05, "loss": 2.5956867218017576, "memory(GiB)": 71.23, "step": 15490, "token_acc": 0.45394736842105265, "train_speed(iter/s)": 0.670474 }, { "epoch": 0.6638533053425303, "grad_norm": 3.3509082794189453, "learning_rate": 9.571696676516786e-05, "loss": 2.6528024673461914, "memory(GiB)": 71.23, "step": 15495, "token_acc": 0.4329608938547486, "train_speed(iter/s)": 0.670461 }, { "epoch": 0.6640675206717793, "grad_norm": 4.672063827514648, "learning_rate": 9.571424113714412e-05, "loss": 2.572261428833008, "memory(GiB)": 71.23, "step": 15500, "token_acc": 0.48322147651006714, "train_speed(iter/s)": 0.670413 }, { "epoch": 0.6640675206717793, "eval_loss": 2.015900135040283, "eval_runtime": 17.5623, "eval_samples_per_second": 5.694, "eval_steps_per_second": 5.694, "eval_token_acc": 0.5153631284916201, "step": 15500 }, { "epoch": 0.6642817360010282, "grad_norm": 3.8162331581115723, "learning_rate": 9.571151468096403e-05, "loss": 2.4741752624511717, "memory(GiB)": 71.23, "step": 15505, "token_acc": 0.5024582104228122, "train_speed(iter/s)": 0.669784 }, { "epoch": 0.6644959513302772, "grad_norm": 3.2227795124053955, "learning_rate": 9.570878739667697e-05, "loss": 2.145673370361328, "memory(GiB)": 71.23, "step": 15510, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.669821 }, { "epoch": 0.6647101666595262, "grad_norm": 3.03668212890625, "learning_rate": 9.570605928433233e-05, "loss": 2.381586265563965, "memory(GiB)": 71.23, "step": 15515, "token_acc": 0.47468354430379744, "train_speed(iter/s)": 0.669865 }, { "epoch": 0.6649243819887751, "grad_norm": 3.333827257156372, "learning_rate": 9.570333034397958e-05, "loss": 2.4317516326904296, "memory(GiB)": 71.23, "step": 15520, "token_acc": 0.4831081081081081, "train_speed(iter/s)": 0.66989 }, { "epoch": 0.665138597318024, "grad_norm": 2.9135570526123047, "learning_rate": 9.570060057566812e-05, "loss": 2.504205894470215, "memory(GiB)": 71.23, "step": 15525, "token_acc": 0.4326923076923077, "train_speed(iter/s)": 0.669937 }, { "epoch": 0.6653528126472731, "grad_norm": 5.788404941558838, "learning_rate": 9.569786997944741e-05, "loss": 2.672800064086914, "memory(GiB)": 71.23, "step": 15530, "token_acc": 0.45222929936305734, "train_speed(iter/s)": 0.669928 }, { "epoch": 0.665567027976522, "grad_norm": 3.2442846298217773, "learning_rate": 9.56951385553669e-05, "loss": 2.386140251159668, "memory(GiB)": 71.23, "step": 15535, "token_acc": 0.49056603773584906, "train_speed(iter/s)": 0.669974 }, { "epoch": 0.6657812433057709, "grad_norm": 4.719783306121826, "learning_rate": 9.569240630347611e-05, "loss": 2.1664899826049804, "memory(GiB)": 71.23, "step": 15540, "token_acc": 0.5159010600706714, "train_speed(iter/s)": 0.670018 }, { "epoch": 0.66599545863502, "grad_norm": 2.9904580116271973, "learning_rate": 9.568967322382449e-05, "loss": 2.4695735931396485, "memory(GiB)": 71.23, "step": 15545, "token_acc": 0.4620253164556962, "train_speed(iter/s)": 0.670048 }, { "epoch": 0.6662096739642689, "grad_norm": 3.1812658309936523, "learning_rate": 9.568693931646161e-05, "loss": 2.466103172302246, "memory(GiB)": 71.23, "step": 15550, "token_acc": 0.49034749034749037, "train_speed(iter/s)": 0.670066 }, { "epoch": 0.6664238892935178, "grad_norm": 3.980398416519165, "learning_rate": 9.568420458143696e-05, "loss": 2.319284439086914, "memory(GiB)": 71.23, "step": 15555, "token_acc": 0.46853146853146854, "train_speed(iter/s)": 0.670063 }, { "epoch": 0.6666381046227668, "grad_norm": 4.058180809020996, "learning_rate": 9.568146901880007e-05, "loss": 2.2942575454711913, "memory(GiB)": 71.23, "step": 15560, "token_acc": 0.5416666666666666, "train_speed(iter/s)": 0.67009 }, { "epoch": 0.6668523199520158, "grad_norm": 3.7352981567382812, "learning_rate": 9.567873262860054e-05, "loss": 2.6768091201782225, "memory(GiB)": 71.23, "step": 15565, "token_acc": 0.44947735191637633, "train_speed(iter/s)": 0.670113 }, { "epoch": 0.6670665352812647, "grad_norm": 4.094508171081543, "learning_rate": 9.567599541088789e-05, "loss": 2.5642505645751954, "memory(GiB)": 71.23, "step": 15570, "token_acc": 0.4749034749034749, "train_speed(iter/s)": 0.670132 }, { "epoch": 0.6672807506105137, "grad_norm": 4.074856758117676, "learning_rate": 9.567325736571175e-05, "loss": 2.4921024322509764, "memory(GiB)": 71.23, "step": 15575, "token_acc": 0.46706586826347307, "train_speed(iter/s)": 0.670176 }, { "epoch": 0.6674949659397627, "grad_norm": 2.7790873050689697, "learning_rate": 9.567051849312173e-05, "loss": 2.3361927032470704, "memory(GiB)": 71.23, "step": 15580, "token_acc": 0.5066225165562914, "train_speed(iter/s)": 0.670198 }, { "epoch": 0.6677091812690116, "grad_norm": 3.3744852542877197, "learning_rate": 9.56677787931674e-05, "loss": 2.522237014770508, "memory(GiB)": 71.23, "step": 15585, "token_acc": 0.4717607973421927, "train_speed(iter/s)": 0.670226 }, { "epoch": 0.6679233965982606, "grad_norm": 3.3223063945770264, "learning_rate": 9.566503826589842e-05, "loss": 2.4446462631225585, "memory(GiB)": 71.23, "step": 15590, "token_acc": 0.4888888888888889, "train_speed(iter/s)": 0.670266 }, { "epoch": 0.6681376119275095, "grad_norm": 3.6960864067077637, "learning_rate": 9.566229691136444e-05, "loss": 2.8189144134521484, "memory(GiB)": 71.23, "step": 15595, "token_acc": 0.44932432432432434, "train_speed(iter/s)": 0.670283 }, { "epoch": 0.6683518272567585, "grad_norm": 4.550871849060059, "learning_rate": 9.565955472961512e-05, "loss": 2.5199398040771483, "memory(GiB)": 71.23, "step": 15600, "token_acc": 0.4791666666666667, "train_speed(iter/s)": 0.670335 }, { "epoch": 0.6685660425860075, "grad_norm": 3.036888360977173, "learning_rate": 9.565681172070012e-05, "loss": 2.3533403396606447, "memory(GiB)": 71.23, "step": 15605, "token_acc": 0.47750865051903113, "train_speed(iter/s)": 0.670322 }, { "epoch": 0.6687802579152564, "grad_norm": 2.9400734901428223, "learning_rate": 9.565406788466915e-05, "loss": 2.590842437744141, "memory(GiB)": 71.23, "step": 15610, "token_acc": 0.4628975265017668, "train_speed(iter/s)": 0.670344 }, { "epoch": 0.6689944732445053, "grad_norm": 2.905735492706299, "learning_rate": 9.565132322157193e-05, "loss": 2.313670539855957, "memory(GiB)": 71.23, "step": 15615, "token_acc": 0.461038961038961, "train_speed(iter/s)": 0.670336 }, { "epoch": 0.6692086885737544, "grad_norm": 4.5255208015441895, "learning_rate": 9.564857773145815e-05, "loss": 2.4212995529174806, "memory(GiB)": 71.23, "step": 15620, "token_acc": 0.47560975609756095, "train_speed(iter/s)": 0.670375 }, { "epoch": 0.6694229039030033, "grad_norm": 2.790297508239746, "learning_rate": 9.564583141437757e-05, "loss": 2.639586067199707, "memory(GiB)": 71.23, "step": 15625, "token_acc": 0.4074074074074074, "train_speed(iter/s)": 0.67037 }, { "epoch": 0.6696371192322522, "grad_norm": 3.8820981979370117, "learning_rate": 9.564308427037992e-05, "loss": 2.3709280014038088, "memory(GiB)": 71.23, "step": 15630, "token_acc": 0.47335423197492166, "train_speed(iter/s)": 0.670395 }, { "epoch": 0.6698513345615013, "grad_norm": 2.8240628242492676, "learning_rate": 9.564033629951498e-05, "loss": 2.419932174682617, "memory(GiB)": 71.23, "step": 15635, "token_acc": 0.4691780821917808, "train_speed(iter/s)": 0.670428 }, { "epoch": 0.6700655498907502, "grad_norm": 3.0502073764801025, "learning_rate": 9.563758750183256e-05, "loss": 2.701731491088867, "memory(GiB)": 71.23, "step": 15640, "token_acc": 0.45098039215686275, "train_speed(iter/s)": 0.670421 }, { "epoch": 0.6702797652199991, "grad_norm": 3.3868496417999268, "learning_rate": 9.563483787738241e-05, "loss": 2.389781188964844, "memory(GiB)": 71.23, "step": 15645, "token_acc": 0.44528301886792454, "train_speed(iter/s)": 0.670427 }, { "epoch": 0.6704939805492481, "grad_norm": 3.1385486125946045, "learning_rate": 9.563208742621436e-05, "loss": 2.2890886306762694, "memory(GiB)": 71.23, "step": 15650, "token_acc": 0.524390243902439, "train_speed(iter/s)": 0.670435 }, { "epoch": 0.6707081958784971, "grad_norm": 3.748811960220337, "learning_rate": 9.562933614837826e-05, "loss": 2.6112470626831055, "memory(GiB)": 71.23, "step": 15655, "token_acc": 0.4831081081081081, "train_speed(iter/s)": 0.670437 }, { "epoch": 0.670922411207746, "grad_norm": 3.303093671798706, "learning_rate": 9.562658404392392e-05, "loss": 2.3673032760620116, "memory(GiB)": 71.23, "step": 15660, "token_acc": 0.4642857142857143, "train_speed(iter/s)": 0.670421 }, { "epoch": 0.671136626536995, "grad_norm": 3.2872400283813477, "learning_rate": 9.562383111290121e-05, "loss": 2.7586894989013673, "memory(GiB)": 71.23, "step": 15665, "token_acc": 0.4489795918367347, "train_speed(iter/s)": 0.670385 }, { "epoch": 0.6713508418662439, "grad_norm": 2.9874706268310547, "learning_rate": 9.562107735536002e-05, "loss": 2.693702507019043, "memory(GiB)": 71.23, "step": 15670, "token_acc": 0.46397694524495675, "train_speed(iter/s)": 0.670407 }, { "epoch": 0.6715650571954929, "grad_norm": 4.232640743255615, "learning_rate": 9.56183227713502e-05, "loss": 2.518440818786621, "memory(GiB)": 71.23, "step": 15675, "token_acc": 0.432, "train_speed(iter/s)": 0.670421 }, { "epoch": 0.6717792725247419, "grad_norm": 3.03826904296875, "learning_rate": 9.561556736092166e-05, "loss": 2.753668212890625, "memory(GiB)": 71.23, "step": 15680, "token_acc": 0.4506578947368421, "train_speed(iter/s)": 0.670369 }, { "epoch": 0.6719934878539908, "grad_norm": 3.338747262954712, "learning_rate": 9.561281112412432e-05, "loss": 2.5139911651611326, "memory(GiB)": 71.23, "step": 15685, "token_acc": 0.4603658536585366, "train_speed(iter/s)": 0.670333 }, { "epoch": 0.6722077031832397, "grad_norm": 3.818324327468872, "learning_rate": 9.561005406100814e-05, "loss": 2.5754613876342773, "memory(GiB)": 71.23, "step": 15690, "token_acc": 0.43882978723404253, "train_speed(iter/s)": 0.670366 }, { "epoch": 0.6724219185124888, "grad_norm": 3.0726170539855957, "learning_rate": 9.560729617162303e-05, "loss": 2.610603141784668, "memory(GiB)": 71.23, "step": 15695, "token_acc": 0.43452380952380953, "train_speed(iter/s)": 0.67043 }, { "epoch": 0.6726361338417377, "grad_norm": 2.4690918922424316, "learning_rate": 9.560453745601898e-05, "loss": 2.3483964920043947, "memory(GiB)": 71.23, "step": 15700, "token_acc": 0.4438040345821326, "train_speed(iter/s)": 0.670425 }, { "epoch": 0.6728503491709866, "grad_norm": 3.1673450469970703, "learning_rate": 9.560177791424595e-05, "loss": 2.245326614379883, "memory(GiB)": 71.23, "step": 15705, "token_acc": 0.5265151515151515, "train_speed(iter/s)": 0.670431 }, { "epoch": 0.6730645645002357, "grad_norm": 3.4926655292510986, "learning_rate": 9.559901754635393e-05, "loss": 2.1567779541015626, "memory(GiB)": 71.23, "step": 15710, "token_acc": 0.48046875, "train_speed(iter/s)": 0.670447 }, { "epoch": 0.6732787798294846, "grad_norm": 4.471959114074707, "learning_rate": 9.559625635239294e-05, "loss": 2.2662069320678713, "memory(GiB)": 71.23, "step": 15715, "token_acc": 0.5228758169934641, "train_speed(iter/s)": 0.670469 }, { "epoch": 0.6734929951587335, "grad_norm": 3.6414663791656494, "learning_rate": 9.559349433241298e-05, "loss": 2.44586296081543, "memory(GiB)": 71.23, "step": 15720, "token_acc": 0.4645669291338583, "train_speed(iter/s)": 0.670455 }, { "epoch": 0.6737072104879825, "grad_norm": 3.1448147296905518, "learning_rate": 9.559073148646412e-05, "loss": 2.1093267440795898, "memory(GiB)": 71.23, "step": 15725, "token_acc": 0.5370370370370371, "train_speed(iter/s)": 0.670442 }, { "epoch": 0.6739214258172315, "grad_norm": 3.788553237915039, "learning_rate": 9.558796781459639e-05, "loss": 2.3793285369873045, "memory(GiB)": 71.23, "step": 15730, "token_acc": 0.4658385093167702, "train_speed(iter/s)": 0.670455 }, { "epoch": 0.6741356411464804, "grad_norm": 5.936766147613525, "learning_rate": 9.558520331685982e-05, "loss": 2.2791362762451173, "memory(GiB)": 71.23, "step": 15735, "token_acc": 0.4954128440366973, "train_speed(iter/s)": 0.670521 }, { "epoch": 0.6743498564757294, "grad_norm": 3.7968907356262207, "learning_rate": 9.558243799330457e-05, "loss": 2.506591033935547, "memory(GiB)": 71.23, "step": 15740, "token_acc": 0.4931506849315068, "train_speed(iter/s)": 0.670482 }, { "epoch": 0.6745640718049783, "grad_norm": 3.6545474529266357, "learning_rate": 9.557967184398068e-05, "loss": 2.539352798461914, "memory(GiB)": 71.23, "step": 15745, "token_acc": 0.44744744744744747, "train_speed(iter/s)": 0.670515 }, { "epoch": 0.6747782871342273, "grad_norm": 8.570533752441406, "learning_rate": 9.557690486893829e-05, "loss": 2.463914680480957, "memory(GiB)": 71.23, "step": 15750, "token_acc": 0.4787234042553192, "train_speed(iter/s)": 0.670573 }, { "epoch": 0.6749925024634763, "grad_norm": 3.5986766815185547, "learning_rate": 9.55741370682275e-05, "loss": 2.1047893524169923, "memory(GiB)": 71.23, "step": 15755, "token_acc": 0.5477178423236515, "train_speed(iter/s)": 0.670613 }, { "epoch": 0.6752067177927252, "grad_norm": 4.143989562988281, "learning_rate": 9.557136844189847e-05, "loss": 2.7437198638916014, "memory(GiB)": 71.23, "step": 15760, "token_acc": 0.44983818770226536, "train_speed(iter/s)": 0.670606 }, { "epoch": 0.6754209331219742, "grad_norm": 2.720583915710449, "learning_rate": 9.556859899000135e-05, "loss": 2.560778999328613, "memory(GiB)": 71.23, "step": 15765, "token_acc": 0.47202797202797203, "train_speed(iter/s)": 0.670646 }, { "epoch": 0.6756351484512232, "grad_norm": 4.504915714263916, "learning_rate": 9.55658287125863e-05, "loss": 2.4760488510131835, "memory(GiB)": 71.23, "step": 15770, "token_acc": 0.47266881028938906, "train_speed(iter/s)": 0.670672 }, { "epoch": 0.6758493637804721, "grad_norm": 3.8929483890533447, "learning_rate": 9.556305760970354e-05, "loss": 2.5241846084594726, "memory(GiB)": 71.23, "step": 15775, "token_acc": 0.5141843971631206, "train_speed(iter/s)": 0.670657 }, { "epoch": 0.6760635791097211, "grad_norm": 3.2461698055267334, "learning_rate": 9.556028568140324e-05, "loss": 2.4221704483032225, "memory(GiB)": 71.23, "step": 15780, "token_acc": 0.4925373134328358, "train_speed(iter/s)": 0.670688 }, { "epoch": 0.6762777944389701, "grad_norm": 4.687538146972656, "learning_rate": 9.555751292773562e-05, "loss": 2.3967212677001952, "memory(GiB)": 71.23, "step": 15785, "token_acc": 0.48333333333333334, "train_speed(iter/s)": 0.670742 }, { "epoch": 0.676492009768219, "grad_norm": 2.9512393474578857, "learning_rate": 9.555473934875094e-05, "loss": 2.2846405029296877, "memory(GiB)": 71.23, "step": 15790, "token_acc": 0.538961038961039, "train_speed(iter/s)": 0.670744 }, { "epoch": 0.676706225097468, "grad_norm": 3.4983155727386475, "learning_rate": 9.55519649444994e-05, "loss": 2.4305864334106446, "memory(GiB)": 71.23, "step": 15795, "token_acc": 0.46075085324232085, "train_speed(iter/s)": 0.670756 }, { "epoch": 0.676920440426717, "grad_norm": 3.6987080574035645, "learning_rate": 9.55491897150313e-05, "loss": 2.3739953994750977, "memory(GiB)": 71.23, "step": 15800, "token_acc": 0.49044585987261147, "train_speed(iter/s)": 0.670787 }, { "epoch": 0.6771346557559659, "grad_norm": 3.433795213699341, "learning_rate": 9.554641366039688e-05, "loss": 2.2076719284057615, "memory(GiB)": 71.23, "step": 15805, "token_acc": 0.4861111111111111, "train_speed(iter/s)": 0.670808 }, { "epoch": 0.6773488710852149, "grad_norm": 3.0623700618743896, "learning_rate": 9.554363678064646e-05, "loss": 2.262320137023926, "memory(GiB)": 71.23, "step": 15810, "token_acc": 0.5177304964539007, "train_speed(iter/s)": 0.670813 }, { "epoch": 0.6775630864144638, "grad_norm": 3.9366750717163086, "learning_rate": 9.554085907583035e-05, "loss": 2.4064865112304688, "memory(GiB)": 71.23, "step": 15815, "token_acc": 0.4155844155844156, "train_speed(iter/s)": 0.670813 }, { "epoch": 0.6777773017437128, "grad_norm": 4.381114959716797, "learning_rate": 9.553808054599885e-05, "loss": 2.5613256454467774, "memory(GiB)": 71.23, "step": 15820, "token_acc": 0.4444444444444444, "train_speed(iter/s)": 0.670838 }, { "epoch": 0.6779915170729618, "grad_norm": 3.5956361293792725, "learning_rate": 9.553530119120228e-05, "loss": 2.2900041580200194, "memory(GiB)": 71.23, "step": 15825, "token_acc": 0.5346534653465347, "train_speed(iter/s)": 0.670853 }, { "epoch": 0.6782057324022107, "grad_norm": 3.5865960121154785, "learning_rate": 9.553252101149104e-05, "loss": 2.3895463943481445, "memory(GiB)": 71.23, "step": 15830, "token_acc": 0.5020080321285141, "train_speed(iter/s)": 0.670811 }, { "epoch": 0.6784199477314596, "grad_norm": 3.280383825302124, "learning_rate": 9.552974000691546e-05, "loss": 2.3272735595703127, "memory(GiB)": 71.23, "step": 15835, "token_acc": 0.46405228758169936, "train_speed(iter/s)": 0.670879 }, { "epoch": 0.6786341630607087, "grad_norm": 3.8291471004486084, "learning_rate": 9.552695817752592e-05, "loss": 2.70672550201416, "memory(GiB)": 71.23, "step": 15840, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.670914 }, { "epoch": 0.6788483783899576, "grad_norm": 3.6124191284179688, "learning_rate": 9.552417552337284e-05, "loss": 2.455348587036133, "memory(GiB)": 71.23, "step": 15845, "token_acc": 0.49825783972125437, "train_speed(iter/s)": 0.670916 }, { "epoch": 0.6790625937192065, "grad_norm": 2.756155014038086, "learning_rate": 9.552139204450661e-05, "loss": 2.442479705810547, "memory(GiB)": 71.23, "step": 15850, "token_acc": 0.445578231292517, "train_speed(iter/s)": 0.670928 }, { "epoch": 0.6792768090484556, "grad_norm": 3.7508058547973633, "learning_rate": 9.551860774097765e-05, "loss": 2.6725786209106444, "memory(GiB)": 71.23, "step": 15855, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.670938 }, { "epoch": 0.6794910243777045, "grad_norm": 4.100094318389893, "learning_rate": 9.551582261283642e-05, "loss": 1.9081653594970702, "memory(GiB)": 71.23, "step": 15860, "token_acc": 0.610655737704918, "train_speed(iter/s)": 0.670955 }, { "epoch": 0.6797052397069534, "grad_norm": 5.726300239562988, "learning_rate": 9.551303666013338e-05, "loss": 2.2385690689086912, "memory(GiB)": 71.23, "step": 15865, "token_acc": 0.5257731958762887, "train_speed(iter/s)": 0.67094 }, { "epoch": 0.6799194550362024, "grad_norm": 3.33029842376709, "learning_rate": 9.551024988291896e-05, "loss": 2.8663604736328123, "memory(GiB)": 71.23, "step": 15870, "token_acc": 0.41566265060240964, "train_speed(iter/s)": 0.670956 }, { "epoch": 0.6801336703654514, "grad_norm": 2.8917782306671143, "learning_rate": 9.55074622812437e-05, "loss": 2.130352592468262, "memory(GiB)": 71.23, "step": 15875, "token_acc": 0.5430711610486891, "train_speed(iter/s)": 0.670955 }, { "epoch": 0.6803478856947003, "grad_norm": 2.8947367668151855, "learning_rate": 9.550467385515805e-05, "loss": 2.3723834991455077, "memory(GiB)": 71.23, "step": 15880, "token_acc": 0.47796610169491527, "train_speed(iter/s)": 0.670958 }, { "epoch": 0.6805621010239493, "grad_norm": 4.271760940551758, "learning_rate": 9.550188460471254e-05, "loss": 2.381968307495117, "memory(GiB)": 71.23, "step": 15885, "token_acc": 0.5275590551181102, "train_speed(iter/s)": 0.670997 }, { "epoch": 0.6807763163531982, "grad_norm": 3.6142654418945312, "learning_rate": 9.549909452995773e-05, "loss": 2.6811342239379883, "memory(GiB)": 71.23, "step": 15890, "token_acc": 0.4930555555555556, "train_speed(iter/s)": 0.671008 }, { "epoch": 0.6809905316824472, "grad_norm": 3.1339540481567383, "learning_rate": 9.549630363094413e-05, "loss": 2.358026695251465, "memory(GiB)": 71.23, "step": 15895, "token_acc": 0.5271317829457365, "train_speed(iter/s)": 0.671029 }, { "epoch": 0.6812047470116962, "grad_norm": 3.234286069869995, "learning_rate": 9.549351190772231e-05, "loss": 2.6376930236816407, "memory(GiB)": 71.23, "step": 15900, "token_acc": 0.4637223974763407, "train_speed(iter/s)": 0.671066 }, { "epoch": 0.6814189623409451, "grad_norm": 4.239022254943848, "learning_rate": 9.549071936034284e-05, "loss": 2.352976989746094, "memory(GiB)": 71.23, "step": 15905, "token_acc": 0.5034722222222222, "train_speed(iter/s)": 0.67111 }, { "epoch": 0.681633177670194, "grad_norm": 2.619431257247925, "learning_rate": 9.548792598885631e-05, "loss": 2.4918590545654298, "memory(GiB)": 71.23, "step": 15910, "token_acc": 0.4934640522875817, "train_speed(iter/s)": 0.671086 }, { "epoch": 0.6818473929994431, "grad_norm": 3.640697717666626, "learning_rate": 9.548513179331335e-05, "loss": 2.4491735458374024, "memory(GiB)": 71.23, "step": 15915, "token_acc": 0.47674418604651164, "train_speed(iter/s)": 0.6711 }, { "epoch": 0.682061608328692, "grad_norm": 5.519345283508301, "learning_rate": 9.548233677376455e-05, "loss": 2.588352584838867, "memory(GiB)": 71.23, "step": 15920, "token_acc": 0.44545454545454544, "train_speed(iter/s)": 0.671146 }, { "epoch": 0.6822758236579409, "grad_norm": 3.7859370708465576, "learning_rate": 9.547954093026056e-05, "loss": 2.325282669067383, "memory(GiB)": 71.23, "step": 15925, "token_acc": 0.5137931034482759, "train_speed(iter/s)": 0.671203 }, { "epoch": 0.68249003898719, "grad_norm": 3.7702980041503906, "learning_rate": 9.547674426285201e-05, "loss": 2.48504695892334, "memory(GiB)": 71.23, "step": 15930, "token_acc": 0.4681528662420382, "train_speed(iter/s)": 0.671112 }, { "epoch": 0.6827042543164389, "grad_norm": 3.9554009437561035, "learning_rate": 9.547394677158958e-05, "loss": 2.672492790222168, "memory(GiB)": 71.23, "step": 15935, "token_acc": 0.42450142450142453, "train_speed(iter/s)": 0.671133 }, { "epoch": 0.6829184696456878, "grad_norm": 2.9597675800323486, "learning_rate": 9.547114845652395e-05, "loss": 2.617076301574707, "memory(GiB)": 71.23, "step": 15940, "token_acc": 0.42424242424242425, "train_speed(iter/s)": 0.67111 }, { "epoch": 0.6831326849749368, "grad_norm": 3.624133348464966, "learning_rate": 9.546834931770581e-05, "loss": 2.590013885498047, "memory(GiB)": 71.23, "step": 15945, "token_acc": 0.4574780058651026, "train_speed(iter/s)": 0.671092 }, { "epoch": 0.6833469003041858, "grad_norm": 3.224522829055786, "learning_rate": 9.546554935518586e-05, "loss": 2.8841827392578123, "memory(GiB)": 71.23, "step": 15950, "token_acc": 0.4491017964071856, "train_speed(iter/s)": 0.671134 }, { "epoch": 0.6835611156334347, "grad_norm": 3.357203245162964, "learning_rate": 9.546274856901485e-05, "loss": 2.681669235229492, "memory(GiB)": 71.23, "step": 15955, "token_acc": 0.41818181818181815, "train_speed(iter/s)": 0.671043 }, { "epoch": 0.6837753309626837, "grad_norm": 3.656757354736328, "learning_rate": 9.545994695924349e-05, "loss": 2.497698211669922, "memory(GiB)": 71.23, "step": 15960, "token_acc": 0.46864686468646866, "train_speed(iter/s)": 0.671021 }, { "epoch": 0.6839895462919326, "grad_norm": 4.073347568511963, "learning_rate": 9.545714452592257e-05, "loss": 2.175185966491699, "memory(GiB)": 71.23, "step": 15965, "token_acc": 0.544, "train_speed(iter/s)": 0.671014 }, { "epoch": 0.6842037616211816, "grad_norm": 3.4380033016204834, "learning_rate": 9.545434126910279e-05, "loss": 2.592732810974121, "memory(GiB)": 71.23, "step": 15970, "token_acc": 0.49514563106796117, "train_speed(iter/s)": 0.671019 }, { "epoch": 0.6844179769504306, "grad_norm": 2.9370245933532715, "learning_rate": 9.545153718883502e-05, "loss": 2.1370874404907227, "memory(GiB)": 71.23, "step": 15975, "token_acc": 0.5373134328358209, "train_speed(iter/s)": 0.67103 }, { "epoch": 0.6846321922796795, "grad_norm": 3.28885817527771, "learning_rate": 9.544873228517e-05, "loss": 2.5360734939575194, "memory(GiB)": 71.23, "step": 15980, "token_acc": 0.49295774647887325, "train_speed(iter/s)": 0.671029 }, { "epoch": 0.6848464076089285, "grad_norm": 3.304187536239624, "learning_rate": 9.544592655815855e-05, "loss": 2.3995601654052736, "memory(GiB)": 71.23, "step": 15985, "token_acc": 0.51, "train_speed(iter/s)": 0.670982 }, { "epoch": 0.6850606229381775, "grad_norm": 3.449781894683838, "learning_rate": 9.544312000785152e-05, "loss": 2.412211799621582, "memory(GiB)": 71.23, "step": 15990, "token_acc": 0.48514851485148514, "train_speed(iter/s)": 0.671028 }, { "epoch": 0.6852748382674264, "grad_norm": 4.338264465332031, "learning_rate": 9.544031263429974e-05, "loss": 2.278207778930664, "memory(GiB)": 71.23, "step": 15995, "token_acc": 0.4899328859060403, "train_speed(iter/s)": 0.671045 }, { "epoch": 0.6854890535966753, "grad_norm": 4.647590637207031, "learning_rate": 9.543750443755407e-05, "loss": 2.530801010131836, "memory(GiB)": 71.23, "step": 16000, "token_acc": 0.4726027397260274, "train_speed(iter/s)": 0.671042 }, { "epoch": 0.6854890535966753, "eval_loss": 2.020615816116333, "eval_runtime": 16.994, "eval_samples_per_second": 5.884, "eval_steps_per_second": 5.884, "eval_token_acc": 0.5145348837209303, "step": 16000 }, { "epoch": 0.6857032689259244, "grad_norm": 4.0969953536987305, "learning_rate": 9.543469541766538e-05, "loss": 2.2186405181884767, "memory(GiB)": 71.23, "step": 16005, "token_acc": 0.5173824130879345, "train_speed(iter/s)": 0.670495 }, { "epoch": 0.6859174842551733, "grad_norm": 3.5528507232666016, "learning_rate": 9.543188557468456e-05, "loss": 2.2405656814575194, "memory(GiB)": 71.23, "step": 16010, "token_acc": 0.4887459807073955, "train_speed(iter/s)": 0.670523 }, { "epoch": 0.6861316995844222, "grad_norm": 3.9070863723754883, "learning_rate": 9.542907490866254e-05, "loss": 2.310932159423828, "memory(GiB)": 71.23, "step": 16015, "token_acc": 0.5075187969924813, "train_speed(iter/s)": 0.670497 }, { "epoch": 0.6863459149136713, "grad_norm": 3.6021435260772705, "learning_rate": 9.542626341965018e-05, "loss": 2.177700424194336, "memory(GiB)": 71.23, "step": 16020, "token_acc": 0.4899328859060403, "train_speed(iter/s)": 0.670516 }, { "epoch": 0.6865601302429202, "grad_norm": 4.09331750869751, "learning_rate": 9.542345110769846e-05, "loss": 2.289377784729004, "memory(GiB)": 71.23, "step": 16025, "token_acc": 0.5033112582781457, "train_speed(iter/s)": 0.670485 }, { "epoch": 0.6867743455721691, "grad_norm": 3.9696505069732666, "learning_rate": 9.54206379728583e-05, "loss": 2.6301734924316404, "memory(GiB)": 71.23, "step": 16030, "token_acc": 0.46540880503144655, "train_speed(iter/s)": 0.670489 }, { "epoch": 0.6869885609014181, "grad_norm": 2.785356044769287, "learning_rate": 9.541782401518069e-05, "loss": 2.40997200012207, "memory(GiB)": 71.23, "step": 16035, "token_acc": 0.4837662337662338, "train_speed(iter/s)": 0.670484 }, { "epoch": 0.6872027762306671, "grad_norm": 4.31049108505249, "learning_rate": 9.541500923471658e-05, "loss": 2.6312028884887697, "memory(GiB)": 71.23, "step": 16040, "token_acc": 0.4520547945205479, "train_speed(iter/s)": 0.670498 }, { "epoch": 0.687416991559916, "grad_norm": 4.442522048950195, "learning_rate": 9.541219363151699e-05, "loss": 2.3730018615722654, "memory(GiB)": 71.23, "step": 16045, "token_acc": 0.48169014084507045, "train_speed(iter/s)": 0.670518 }, { "epoch": 0.687631206889165, "grad_norm": 7.135929107666016, "learning_rate": 9.540937720563289e-05, "loss": 2.8306039810180663, "memory(GiB)": 71.23, "step": 16050, "token_acc": 0.39846743295019155, "train_speed(iter/s)": 0.670489 }, { "epoch": 0.6878454222184139, "grad_norm": 2.77970552444458, "learning_rate": 9.540655995711534e-05, "loss": 2.3761419296264648, "memory(GiB)": 71.23, "step": 16055, "token_acc": 0.48823529411764705, "train_speed(iter/s)": 0.670473 }, { "epoch": 0.6880596375476629, "grad_norm": 5.090189456939697, "learning_rate": 9.540374188601537e-05, "loss": 2.417680549621582, "memory(GiB)": 71.23, "step": 16060, "token_acc": 0.5071090047393365, "train_speed(iter/s)": 0.670461 }, { "epoch": 0.6882738528769119, "grad_norm": 3.7535197734832764, "learning_rate": 9.540092299238401e-05, "loss": 2.3354047775268554, "memory(GiB)": 71.23, "step": 16065, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.670474 }, { "epoch": 0.6884880682061608, "grad_norm": 3.167930841445923, "learning_rate": 9.539810327627234e-05, "loss": 2.487378692626953, "memory(GiB)": 71.23, "step": 16070, "token_acc": 0.4564459930313589, "train_speed(iter/s)": 0.670506 }, { "epoch": 0.6887022835354097, "grad_norm": 2.891538143157959, "learning_rate": 9.539528273773147e-05, "loss": 2.3935626983642577, "memory(GiB)": 71.23, "step": 16075, "token_acc": 0.47962382445141066, "train_speed(iter/s)": 0.670523 }, { "epoch": 0.6889164988646588, "grad_norm": 3.3194539546966553, "learning_rate": 9.539246137681243e-05, "loss": 2.45595703125, "memory(GiB)": 71.23, "step": 16080, "token_acc": 0.4788273615635179, "train_speed(iter/s)": 0.670484 }, { "epoch": 0.6891307141939077, "grad_norm": 2.895723819732666, "learning_rate": 9.538963919356638e-05, "loss": 2.237504768371582, "memory(GiB)": 71.23, "step": 16085, "token_acc": 0.47761194029850745, "train_speed(iter/s)": 0.670516 }, { "epoch": 0.6893449295231566, "grad_norm": 4.835536479949951, "learning_rate": 9.538681618804445e-05, "loss": 2.3143856048583986, "memory(GiB)": 71.23, "step": 16090, "token_acc": 0.47315436241610737, "train_speed(iter/s)": 0.670555 }, { "epoch": 0.6895591448524057, "grad_norm": 3.6407220363616943, "learning_rate": 9.538399236029775e-05, "loss": 2.312334442138672, "memory(GiB)": 71.23, "step": 16095, "token_acc": 0.5261324041811847, "train_speed(iter/s)": 0.67059 }, { "epoch": 0.6897733601816546, "grad_norm": 3.1895217895507812, "learning_rate": 9.538116771037749e-05, "loss": 2.590440559387207, "memory(GiB)": 71.23, "step": 16100, "token_acc": 0.46745562130177515, "train_speed(iter/s)": 0.670602 }, { "epoch": 0.6899875755109035, "grad_norm": 2.8046388626098633, "learning_rate": 9.537834223833477e-05, "loss": 2.703339385986328, "memory(GiB)": 71.23, "step": 16105, "token_acc": 0.4329501915708812, "train_speed(iter/s)": 0.670628 }, { "epoch": 0.6902017908401525, "grad_norm": 4.204694747924805, "learning_rate": 9.537551594422083e-05, "loss": 2.4184906005859377, "memory(GiB)": 71.23, "step": 16110, "token_acc": 0.479020979020979, "train_speed(iter/s)": 0.670667 }, { "epoch": 0.6904160061694015, "grad_norm": 3.6522579193115234, "learning_rate": 9.537268882808684e-05, "loss": 2.5845409393310548, "memory(GiB)": 71.23, "step": 16115, "token_acc": 0.4489051094890511, "train_speed(iter/s)": 0.670724 }, { "epoch": 0.6906302214986505, "grad_norm": 3.637504816055298, "learning_rate": 9.536986088998403e-05, "loss": 2.69039306640625, "memory(GiB)": 71.23, "step": 16120, "token_acc": 0.4300341296928328, "train_speed(iter/s)": 0.670725 }, { "epoch": 0.6908444368278994, "grad_norm": 3.912257432937622, "learning_rate": 9.536703212996364e-05, "loss": 2.631424331665039, "memory(GiB)": 71.23, "step": 16125, "token_acc": 0.4421768707482993, "train_speed(iter/s)": 0.67069 }, { "epoch": 0.6910586521571483, "grad_norm": 3.339104413986206, "learning_rate": 9.536420254807689e-05, "loss": 2.590072441101074, "memory(GiB)": 71.23, "step": 16130, "token_acc": 0.4806451612903226, "train_speed(iter/s)": 0.670717 }, { "epoch": 0.6912728674863974, "grad_norm": 3.6316606998443604, "learning_rate": 9.536137214437509e-05, "loss": 2.3032577514648436, "memory(GiB)": 71.23, "step": 16135, "token_acc": 0.47959183673469385, "train_speed(iter/s)": 0.67072 }, { "epoch": 0.6914870828156463, "grad_norm": 3.528118133544922, "learning_rate": 9.535854091890944e-05, "loss": 2.2322036743164064, "memory(GiB)": 71.23, "step": 16140, "token_acc": 0.5121951219512195, "train_speed(iter/s)": 0.670735 }, { "epoch": 0.6917012981448952, "grad_norm": 4.68072509765625, "learning_rate": 9.535570887173129e-05, "loss": 2.3074216842651367, "memory(GiB)": 71.23, "step": 16145, "token_acc": 0.48639455782312924, "train_speed(iter/s)": 0.670737 }, { "epoch": 0.6919155134741443, "grad_norm": 4.191206455230713, "learning_rate": 9.535287600289193e-05, "loss": 2.6246564865112303, "memory(GiB)": 71.23, "step": 16150, "token_acc": 0.47575757575757577, "train_speed(iter/s)": 0.670659 }, { "epoch": 0.6921297288033932, "grad_norm": 2.9362621307373047, "learning_rate": 9.535004231244267e-05, "loss": 2.7714845657348635, "memory(GiB)": 71.23, "step": 16155, "token_acc": 0.44666666666666666, "train_speed(iter/s)": 0.670667 }, { "epoch": 0.6923439441326421, "grad_norm": 3.203786849975586, "learning_rate": 9.534720780043487e-05, "loss": 2.558705139160156, "memory(GiB)": 71.23, "step": 16160, "token_acc": 0.48089171974522293, "train_speed(iter/s)": 0.670623 }, { "epoch": 0.6925581594618911, "grad_norm": 3.7447569370269775, "learning_rate": 9.534437246691983e-05, "loss": 2.5970766067504885, "memory(GiB)": 71.23, "step": 16165, "token_acc": 0.4458204334365325, "train_speed(iter/s)": 0.670623 }, { "epoch": 0.6927723747911401, "grad_norm": 4.335312843322754, "learning_rate": 9.534153631194896e-05, "loss": 2.401152992248535, "memory(GiB)": 71.23, "step": 16170, "token_acc": 0.4641638225255973, "train_speed(iter/s)": 0.670544 }, { "epoch": 0.692986590120389, "grad_norm": 3.332698106765747, "learning_rate": 9.533869933557365e-05, "loss": 2.4054737091064453, "memory(GiB)": 71.23, "step": 16175, "token_acc": 0.4968944099378882, "train_speed(iter/s)": 0.67051 }, { "epoch": 0.693200805449638, "grad_norm": 4.290439128875732, "learning_rate": 9.533586153784524e-05, "loss": 2.621613693237305, "memory(GiB)": 71.23, "step": 16180, "token_acc": 0.42857142857142855, "train_speed(iter/s)": 0.670496 }, { "epoch": 0.693415020778887, "grad_norm": 3.2107107639312744, "learning_rate": 9.533302291881518e-05, "loss": 2.4167015075683596, "memory(GiB)": 71.23, "step": 16185, "token_acc": 0.5035211267605634, "train_speed(iter/s)": 0.670474 }, { "epoch": 0.6936292361081359, "grad_norm": 3.058537244796753, "learning_rate": 9.533018347853488e-05, "loss": 2.156382751464844, "memory(GiB)": 71.23, "step": 16190, "token_acc": 0.5291666666666667, "train_speed(iter/s)": 0.670452 }, { "epoch": 0.6938434514373849, "grad_norm": 3.7107582092285156, "learning_rate": 9.532734321705579e-05, "loss": 2.610948371887207, "memory(GiB)": 71.23, "step": 16195, "token_acc": 0.4816053511705686, "train_speed(iter/s)": 0.670478 }, { "epoch": 0.6940576667666338, "grad_norm": 3.7657229900360107, "learning_rate": 9.532450213442934e-05, "loss": 2.5094367980957033, "memory(GiB)": 71.23, "step": 16200, "token_acc": 0.4658385093167702, "train_speed(iter/s)": 0.670509 }, { "epoch": 0.6942718820958828, "grad_norm": 3.4648449420928955, "learning_rate": 9.532166023070704e-05, "loss": 2.1740814208984376, "memory(GiB)": 71.23, "step": 16205, "token_acc": 0.5, "train_speed(iter/s)": 0.670509 }, { "epoch": 0.6944860974251318, "grad_norm": 3.223705291748047, "learning_rate": 9.531881750594034e-05, "loss": 2.6849082946777343, "memory(GiB)": 71.23, "step": 16210, "token_acc": 0.45098039215686275, "train_speed(iter/s)": 0.670518 }, { "epoch": 0.6947003127543807, "grad_norm": 3.655050277709961, "learning_rate": 9.531597396018074e-05, "loss": 2.190512466430664, "memory(GiB)": 71.23, "step": 16215, "token_acc": 0.5, "train_speed(iter/s)": 0.670557 }, { "epoch": 0.6949145280836296, "grad_norm": 2.949561595916748, "learning_rate": 9.531312959347976e-05, "loss": 2.4350128173828125, "memory(GiB)": 71.23, "step": 16220, "token_acc": 0.501628664495114, "train_speed(iter/s)": 0.670562 }, { "epoch": 0.6951287434128787, "grad_norm": 4.547424793243408, "learning_rate": 9.531028440588894e-05, "loss": 2.4209257125854493, "memory(GiB)": 71.23, "step": 16225, "token_acc": 0.4492753623188406, "train_speed(iter/s)": 0.670553 }, { "epoch": 0.6953429587421276, "grad_norm": 3.8948566913604736, "learning_rate": 9.530743839745981e-05, "loss": 2.79797420501709, "memory(GiB)": 71.23, "step": 16230, "token_acc": 0.423728813559322, "train_speed(iter/s)": 0.670525 }, { "epoch": 0.6955571740713765, "grad_norm": 3.6377499103546143, "learning_rate": 9.530459156824394e-05, "loss": 2.3832387924194336, "memory(GiB)": 71.23, "step": 16235, "token_acc": 0.43884892086330934, "train_speed(iter/s)": 0.67056 }, { "epoch": 0.6957713894006256, "grad_norm": 3.9702210426330566, "learning_rate": 9.53017439182929e-05, "loss": 2.40826473236084, "memory(GiB)": 71.23, "step": 16240, "token_acc": 0.5114503816793893, "train_speed(iter/s)": 0.670593 }, { "epoch": 0.6959856047298745, "grad_norm": 3.1904876232147217, "learning_rate": 9.529889544765825e-05, "loss": 2.326534461975098, "memory(GiB)": 71.23, "step": 16245, "token_acc": 0.5524691358024691, "train_speed(iter/s)": 0.670635 }, { "epoch": 0.6961998200591234, "grad_norm": 3.2574384212493896, "learning_rate": 9.529604615639163e-05, "loss": 2.3758697509765625, "memory(GiB)": 71.23, "step": 16250, "token_acc": 0.4953271028037383, "train_speed(iter/s)": 0.67062 }, { "epoch": 0.6964140353883724, "grad_norm": 3.2979063987731934, "learning_rate": 9.529319604454464e-05, "loss": 2.1052696228027346, "memory(GiB)": 71.23, "step": 16255, "token_acc": 0.5697211155378487, "train_speed(iter/s)": 0.670612 }, { "epoch": 0.6966282507176214, "grad_norm": 3.354295015335083, "learning_rate": 9.529034511216892e-05, "loss": 2.6958507537841796, "memory(GiB)": 71.23, "step": 16260, "token_acc": 0.44871794871794873, "train_speed(iter/s)": 0.67064 }, { "epoch": 0.6968424660468703, "grad_norm": 3.462165355682373, "learning_rate": 9.52874933593161e-05, "loss": 2.199583625793457, "memory(GiB)": 71.23, "step": 16265, "token_acc": 0.543859649122807, "train_speed(iter/s)": 0.670666 }, { "epoch": 0.6970566813761193, "grad_norm": 3.662590742111206, "learning_rate": 9.528464078603787e-05, "loss": 2.4430408477783203, "memory(GiB)": 71.23, "step": 16270, "token_acc": 0.45592705167173253, "train_speed(iter/s)": 0.670651 }, { "epoch": 0.6972708967053682, "grad_norm": 3.365103244781494, "learning_rate": 9.528178739238587e-05, "loss": 2.4622261047363283, "memory(GiB)": 71.23, "step": 16275, "token_acc": 0.45964912280701753, "train_speed(iter/s)": 0.670646 }, { "epoch": 0.6974851120346172, "grad_norm": 3.5399348735809326, "learning_rate": 9.527893317841183e-05, "loss": 2.617336463928223, "memory(GiB)": 71.23, "step": 16280, "token_acc": 0.4421768707482993, "train_speed(iter/s)": 0.670672 }, { "epoch": 0.6976993273638662, "grad_norm": 3.954374313354492, "learning_rate": 9.527607814416743e-05, "loss": 2.6915950775146484, "memory(GiB)": 71.23, "step": 16285, "token_acc": 0.44727272727272727, "train_speed(iter/s)": 0.670706 }, { "epoch": 0.6979135426931151, "grad_norm": 3.0392372608184814, "learning_rate": 9.52732222897044e-05, "loss": 2.4273977279663086, "memory(GiB)": 71.23, "step": 16290, "token_acc": 0.47003154574132494, "train_speed(iter/s)": 0.670693 }, { "epoch": 0.698127758022364, "grad_norm": 4.220835208892822, "learning_rate": 9.527036561507451e-05, "loss": 2.7819353103637696, "memory(GiB)": 71.23, "step": 16295, "token_acc": 0.41846153846153844, "train_speed(iter/s)": 0.670622 }, { "epoch": 0.6983419733516131, "grad_norm": 3.6182425022125244, "learning_rate": 9.526750812032946e-05, "loss": 2.585312080383301, "memory(GiB)": 71.23, "step": 16300, "token_acc": 0.44510385756676557, "train_speed(iter/s)": 0.670649 }, { "epoch": 0.698556188680862, "grad_norm": 3.7379753589630127, "learning_rate": 9.526464980552104e-05, "loss": 2.2310775756835937, "memory(GiB)": 71.23, "step": 16305, "token_acc": 0.5504201680672269, "train_speed(iter/s)": 0.670643 }, { "epoch": 0.6987704040101109, "grad_norm": 5.73954439163208, "learning_rate": 9.5261790670701e-05, "loss": 2.3378047943115234, "memory(GiB)": 71.23, "step": 16310, "token_acc": 0.4916387959866221, "train_speed(iter/s)": 0.67067 }, { "epoch": 0.69898461933936, "grad_norm": 3.344156265258789, "learning_rate": 9.525893071592119e-05, "loss": 2.6032316207885744, "memory(GiB)": 71.23, "step": 16315, "token_acc": 0.5075757575757576, "train_speed(iter/s)": 0.670672 }, { "epoch": 0.6991988346686089, "grad_norm": 4.631258010864258, "learning_rate": 9.52560699412334e-05, "loss": 2.770992660522461, "memory(GiB)": 71.23, "step": 16320, "token_acc": 0.44299674267100975, "train_speed(iter/s)": 0.670687 }, { "epoch": 0.6994130499978578, "grad_norm": 2.3428194522857666, "learning_rate": 9.525320834668943e-05, "loss": 2.4752357482910154, "memory(GiB)": 71.23, "step": 16325, "token_acc": 0.4594594594594595, "train_speed(iter/s)": 0.670729 }, { "epoch": 0.6996272653271068, "grad_norm": 4.625335216522217, "learning_rate": 9.525034593234115e-05, "loss": 2.510259437561035, "memory(GiB)": 71.23, "step": 16330, "token_acc": 0.4589041095890411, "train_speed(iter/s)": 0.670722 }, { "epoch": 0.6998414806563558, "grad_norm": 2.6534035205841064, "learning_rate": 9.524748269824039e-05, "loss": 2.406125068664551, "memory(GiB)": 71.23, "step": 16335, "token_acc": 0.4588607594936709, "train_speed(iter/s)": 0.670712 }, { "epoch": 0.7000556959856047, "grad_norm": 2.9685568809509277, "learning_rate": 9.524461864443906e-05, "loss": 2.201639175415039, "memory(GiB)": 71.23, "step": 16340, "token_acc": 0.5135135135135135, "train_speed(iter/s)": 0.670723 }, { "epoch": 0.7002699113148537, "grad_norm": 3.932455062866211, "learning_rate": 9.5241753770989e-05, "loss": 2.5063514709472656, "memory(GiB)": 71.23, "step": 16345, "token_acc": 0.4646840148698885, "train_speed(iter/s)": 0.67071 }, { "epoch": 0.7004841266441026, "grad_norm": 4.122807502746582, "learning_rate": 9.523888807794214e-05, "loss": 2.695454406738281, "memory(GiB)": 71.23, "step": 16350, "token_acc": 0.43699731903485256, "train_speed(iter/s)": 0.670768 }, { "epoch": 0.7006983419733516, "grad_norm": 4.8540873527526855, "learning_rate": 9.523602156535038e-05, "loss": 2.4757129669189455, "memory(GiB)": 71.23, "step": 16355, "token_acc": 0.5096525096525096, "train_speed(iter/s)": 0.670778 }, { "epoch": 0.7009125573026006, "grad_norm": 3.332871198654175, "learning_rate": 9.523315423326566e-05, "loss": 2.203024673461914, "memory(GiB)": 71.23, "step": 16360, "token_acc": 0.5379746835443038, "train_speed(iter/s)": 0.670735 }, { "epoch": 0.7011267726318495, "grad_norm": 3.6396453380584717, "learning_rate": 9.523028608173992e-05, "loss": 2.702623748779297, "memory(GiB)": 71.23, "step": 16365, "token_acc": 0.4463087248322148, "train_speed(iter/s)": 0.670759 }, { "epoch": 0.7013409879610984, "grad_norm": 4.152496337890625, "learning_rate": 9.522741711082512e-05, "loss": 2.580779266357422, "memory(GiB)": 71.23, "step": 16370, "token_acc": 0.4803921568627451, "train_speed(iter/s)": 0.670788 }, { "epoch": 0.7015552032903475, "grad_norm": 5.3543500900268555, "learning_rate": 9.522454732057322e-05, "loss": 2.233481597900391, "memory(GiB)": 71.23, "step": 16375, "token_acc": 0.5150501672240803, "train_speed(iter/s)": 0.670812 }, { "epoch": 0.7017694186195964, "grad_norm": 3.2069168090820312, "learning_rate": 9.522167671103624e-05, "loss": 2.5372035980224608, "memory(GiB)": 71.23, "step": 16380, "token_acc": 0.48534201954397393, "train_speed(iter/s)": 0.670844 }, { "epoch": 0.7019836339488453, "grad_norm": 3.49625301361084, "learning_rate": 9.521880528226614e-05, "loss": 2.44757137298584, "memory(GiB)": 71.23, "step": 16385, "token_acc": 0.4882154882154882, "train_speed(iter/s)": 0.670868 }, { "epoch": 0.7021978492780944, "grad_norm": 3.343066930770874, "learning_rate": 9.521593303431497e-05, "loss": 2.2956159591674803, "memory(GiB)": 71.23, "step": 16390, "token_acc": 0.5017421602787456, "train_speed(iter/s)": 0.670858 }, { "epoch": 0.7024120646073433, "grad_norm": 3.5646278858184814, "learning_rate": 9.521305996723478e-05, "loss": 2.4647985458374024, "memory(GiB)": 71.23, "step": 16395, "token_acc": 0.4405797101449275, "train_speed(iter/s)": 0.67089 }, { "epoch": 0.7026262799365922, "grad_norm": 3.229846954345703, "learning_rate": 9.521018608107757e-05, "loss": 2.6033613204956056, "memory(GiB)": 71.23, "step": 16400, "token_acc": 0.43086816720257237, "train_speed(iter/s)": 0.670863 }, { "epoch": 0.7028404952658412, "grad_norm": 4.448710918426514, "learning_rate": 9.520731137589543e-05, "loss": 2.3615400314331056, "memory(GiB)": 71.23, "step": 16405, "token_acc": 0.5018181818181818, "train_speed(iter/s)": 0.67087 }, { "epoch": 0.7030547105950902, "grad_norm": 3.398484468460083, "learning_rate": 9.520443585174046e-05, "loss": 2.4330684661865236, "memory(GiB)": 72.85, "step": 16410, "token_acc": 0.48028673835125446, "train_speed(iter/s)": 0.670803 }, { "epoch": 0.7032689259243391, "grad_norm": 3.0191025733947754, "learning_rate": 9.520155950866471e-05, "loss": 2.430792236328125, "memory(GiB)": 72.85, "step": 16415, "token_acc": 0.46551724137931033, "train_speed(iter/s)": 0.670773 }, { "epoch": 0.7034831412535881, "grad_norm": 3.4464962482452393, "learning_rate": 9.519868234672031e-05, "loss": 2.502975273132324, "memory(GiB)": 72.85, "step": 16420, "token_acc": 0.5057034220532319, "train_speed(iter/s)": 0.670768 }, { "epoch": 0.703697356582837, "grad_norm": 3.818570852279663, "learning_rate": 9.519580436595938e-05, "loss": 2.505693054199219, "memory(GiB)": 72.85, "step": 16425, "token_acc": 0.4756944444444444, "train_speed(iter/s)": 0.670782 }, { "epoch": 0.703911571912086, "grad_norm": 3.87395977973938, "learning_rate": 9.519292556643405e-05, "loss": 2.4528560638427734, "memory(GiB)": 72.85, "step": 16430, "token_acc": 0.47315436241610737, "train_speed(iter/s)": 0.670817 }, { "epoch": 0.704125787241335, "grad_norm": 3.7561676502227783, "learning_rate": 9.519004594819649e-05, "loss": 2.3546985626220702, "memory(GiB)": 72.85, "step": 16435, "token_acc": 0.5, "train_speed(iter/s)": 0.670828 }, { "epoch": 0.7043400025705839, "grad_norm": 4.2078728675842285, "learning_rate": 9.518716551129885e-05, "loss": 2.3846569061279297, "memory(GiB)": 72.85, "step": 16440, "token_acc": 0.4775641025641026, "train_speed(iter/s)": 0.670855 }, { "epoch": 0.7045542178998329, "grad_norm": 5.569983959197998, "learning_rate": 9.518428425579332e-05, "loss": 2.3422077178955076, "memory(GiB)": 72.85, "step": 16445, "token_acc": 0.47388059701492535, "train_speed(iter/s)": 0.670884 }, { "epoch": 0.7047684332290819, "grad_norm": 3.3003151416778564, "learning_rate": 9.51814021817321e-05, "loss": 2.420082664489746, "memory(GiB)": 72.85, "step": 16450, "token_acc": 0.46715328467153283, "train_speed(iter/s)": 0.670868 }, { "epoch": 0.7049826485583308, "grad_norm": 3.8136444091796875, "learning_rate": 9.517851928916739e-05, "loss": 2.430623435974121, "memory(GiB)": 72.85, "step": 16455, "token_acc": 0.4807121661721068, "train_speed(iter/s)": 0.670852 }, { "epoch": 0.7051968638875799, "grad_norm": 2.961015224456787, "learning_rate": 9.517563557815141e-05, "loss": 2.787270355224609, "memory(GiB)": 72.85, "step": 16460, "token_acc": 0.46179401993355484, "train_speed(iter/s)": 0.670858 }, { "epoch": 0.7054110792168288, "grad_norm": 3.328477144241333, "learning_rate": 9.517275104873644e-05, "loss": 2.482010269165039, "memory(GiB)": 72.85, "step": 16465, "token_acc": 0.5194805194805194, "train_speed(iter/s)": 0.670892 }, { "epoch": 0.7056252945460777, "grad_norm": 4.200691223144531, "learning_rate": 9.516986570097467e-05, "loss": 2.4715099334716797, "memory(GiB)": 72.85, "step": 16470, "token_acc": 0.4233128834355828, "train_speed(iter/s)": 0.670908 }, { "epoch": 0.7058395098753267, "grad_norm": 3.5084710121154785, "learning_rate": 9.516697953491846e-05, "loss": 2.345102882385254, "memory(GiB)": 72.85, "step": 16475, "token_acc": 0.5102040816326531, "train_speed(iter/s)": 0.670912 }, { "epoch": 0.7060537252045757, "grad_norm": 6.698154449462891, "learning_rate": 9.516409255062003e-05, "loss": 2.7229230880737303, "memory(GiB)": 72.85, "step": 16480, "token_acc": 0.450920245398773, "train_speed(iter/s)": 0.670921 }, { "epoch": 0.7062679405338246, "grad_norm": 4.329956531524658, "learning_rate": 9.516120474813168e-05, "loss": 2.528863716125488, "memory(GiB)": 72.85, "step": 16485, "token_acc": 0.5135135135135135, "train_speed(iter/s)": 0.670965 }, { "epoch": 0.7064821558630736, "grad_norm": 3.2443277835845947, "learning_rate": 9.515831612750574e-05, "loss": 2.1802036285400392, "memory(GiB)": 72.85, "step": 16490, "token_acc": 0.5528169014084507, "train_speed(iter/s)": 0.670943 }, { "epoch": 0.7066963711923225, "grad_norm": 4.2786455154418945, "learning_rate": 9.515542668879455e-05, "loss": 2.038770294189453, "memory(GiB)": 72.85, "step": 16495, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.670953 }, { "epoch": 0.7069105865215715, "grad_norm": 5.12314510345459, "learning_rate": 9.515253643205045e-05, "loss": 2.2246337890625, "memory(GiB)": 72.85, "step": 16500, "token_acc": 0.5205992509363296, "train_speed(iter/s)": 0.670983 }, { "epoch": 0.7069105865215715, "eval_loss": 2.1134626865386963, "eval_runtime": 17.3636, "eval_samples_per_second": 5.759, "eval_steps_per_second": 5.759, "eval_token_acc": 0.5199449793672627, "step": 16500 }, { "epoch": 0.7071248018508205, "grad_norm": 3.8691842555999756, "learning_rate": 9.514964535732577e-05, "loss": 2.231495666503906, "memory(GiB)": 72.85, "step": 16505, "token_acc": 0.5230166503428012, "train_speed(iter/s)": 0.670453 }, { "epoch": 0.7073390171800694, "grad_norm": 3.559014320373535, "learning_rate": 9.514675346467295e-05, "loss": 2.49149169921875, "memory(GiB)": 72.85, "step": 16510, "token_acc": 0.46254071661237783, "train_speed(iter/s)": 0.670478 }, { "epoch": 0.7075532325093183, "grad_norm": 3.352451801300049, "learning_rate": 9.514386075414432e-05, "loss": 2.498264694213867, "memory(GiB)": 72.85, "step": 16515, "token_acc": 0.49666666666666665, "train_speed(iter/s)": 0.670469 }, { "epoch": 0.7077674478385674, "grad_norm": 3.490600824356079, "learning_rate": 9.514096722579231e-05, "loss": 2.6057281494140625, "memory(GiB)": 72.85, "step": 16520, "token_acc": 0.468944099378882, "train_speed(iter/s)": 0.670453 }, { "epoch": 0.7079816631678163, "grad_norm": 3.1694531440734863, "learning_rate": 9.513807287966932e-05, "loss": 2.331610107421875, "memory(GiB)": 72.85, "step": 16525, "token_acc": 0.49829351535836175, "train_speed(iter/s)": 0.670474 }, { "epoch": 0.7081958784970652, "grad_norm": 3.5388383865356445, "learning_rate": 9.513517771582782e-05, "loss": 2.412037467956543, "memory(GiB)": 72.85, "step": 16530, "token_acc": 0.5122699386503068, "train_speed(iter/s)": 0.670446 }, { "epoch": 0.7084100938263143, "grad_norm": 4.109991073608398, "learning_rate": 9.513228173432022e-05, "loss": 2.267598533630371, "memory(GiB)": 72.85, "step": 16535, "token_acc": 0.5015479876160991, "train_speed(iter/s)": 0.670478 }, { "epoch": 0.7086243091555632, "grad_norm": 2.9978244304656982, "learning_rate": 9.5129384935199e-05, "loss": 2.2238561630249025, "memory(GiB)": 72.85, "step": 16540, "token_acc": 0.5126582278481012, "train_speed(iter/s)": 0.670423 }, { "epoch": 0.7088385244848121, "grad_norm": 3.426783800125122, "learning_rate": 9.512648731851663e-05, "loss": 2.459983062744141, "memory(GiB)": 72.85, "step": 16545, "token_acc": 0.49603174603174605, "train_speed(iter/s)": 0.670433 }, { "epoch": 0.7090527398140611, "grad_norm": 3.2233927249908447, "learning_rate": 9.512358888432562e-05, "loss": 2.5365234375, "memory(GiB)": 72.85, "step": 16550, "token_acc": 0.4718498659517426, "train_speed(iter/s)": 0.670437 }, { "epoch": 0.7092669551433101, "grad_norm": 2.4496967792510986, "learning_rate": 9.512068963267846e-05, "loss": 2.541972541809082, "memory(GiB)": 72.85, "step": 16555, "token_acc": 0.46474358974358976, "train_speed(iter/s)": 0.67045 }, { "epoch": 0.709481170472559, "grad_norm": 4.004792213439941, "learning_rate": 9.511778956362769e-05, "loss": 2.2730972290039064, "memory(GiB)": 72.85, "step": 16560, "token_acc": 0.49823321554770317, "train_speed(iter/s)": 0.670429 }, { "epoch": 0.709695385801808, "grad_norm": 3.365309715270996, "learning_rate": 9.511488867722584e-05, "loss": 2.196044921875, "memory(GiB)": 72.85, "step": 16565, "token_acc": 0.49169435215946844, "train_speed(iter/s)": 0.670407 }, { "epoch": 0.7099096011310569, "grad_norm": 3.3918042182922363, "learning_rate": 9.511198697352545e-05, "loss": 2.5542623519897463, "memory(GiB)": 72.85, "step": 16570, "token_acc": 0.4634146341463415, "train_speed(iter/s)": 0.67042 }, { "epoch": 0.7101238164603059, "grad_norm": 4.267382621765137, "learning_rate": 9.510908445257911e-05, "loss": 2.299769973754883, "memory(GiB)": 72.85, "step": 16575, "token_acc": 0.4758364312267658, "train_speed(iter/s)": 0.670438 }, { "epoch": 0.7103380317895549, "grad_norm": 6.097955703735352, "learning_rate": 9.510618111443938e-05, "loss": 2.6042716979980467, "memory(GiB)": 72.85, "step": 16580, "token_acc": 0.47315436241610737, "train_speed(iter/s)": 0.670463 }, { "epoch": 0.7105522471188038, "grad_norm": 3.5857300758361816, "learning_rate": 9.510327695915885e-05, "loss": 2.5347455978393554, "memory(GiB)": 72.85, "step": 16585, "token_acc": 0.48028673835125446, "train_speed(iter/s)": 0.670463 }, { "epoch": 0.7107664624480527, "grad_norm": 3.7490227222442627, "learning_rate": 9.510037198679019e-05, "loss": 2.3605026245117187, "memory(GiB)": 72.85, "step": 16590, "token_acc": 0.43609022556390975, "train_speed(iter/s)": 0.670495 }, { "epoch": 0.7109806777773018, "grad_norm": 3.1458144187927246, "learning_rate": 9.509746619738595e-05, "loss": 2.1756940841674806, "memory(GiB)": 72.85, "step": 16595, "token_acc": 0.5163636363636364, "train_speed(iter/s)": 0.67047 }, { "epoch": 0.7111948931065507, "grad_norm": 2.967731475830078, "learning_rate": 9.509455959099882e-05, "loss": 2.4393051147460936, "memory(GiB)": 72.85, "step": 16600, "token_acc": 0.45703125, "train_speed(iter/s)": 0.670493 }, { "epoch": 0.7114091084357996, "grad_norm": 2.888641119003296, "learning_rate": 9.509165216768143e-05, "loss": 2.2220037460327147, "memory(GiB)": 72.85, "step": 16605, "token_acc": 0.496551724137931, "train_speed(iter/s)": 0.670522 }, { "epoch": 0.7116233237650487, "grad_norm": 4.2639594078063965, "learning_rate": 9.508874392748646e-05, "loss": 2.3009592056274415, "memory(GiB)": 72.85, "step": 16610, "token_acc": 0.47368421052631576, "train_speed(iter/s)": 0.670505 }, { "epoch": 0.7118375390942976, "grad_norm": 3.4061996936798096, "learning_rate": 9.508583487046661e-05, "loss": 2.349211502075195, "memory(GiB)": 72.85, "step": 16615, "token_acc": 0.4625, "train_speed(iter/s)": 0.670477 }, { "epoch": 0.7120517544235465, "grad_norm": 7.173224449157715, "learning_rate": 9.508292499667457e-05, "loss": 2.704397964477539, "memory(GiB)": 72.85, "step": 16620, "token_acc": 0.4404332129963899, "train_speed(iter/s)": 0.670501 }, { "epoch": 0.7122659697527955, "grad_norm": 3.790297746658325, "learning_rate": 9.508001430616304e-05, "loss": 2.5443965911865236, "memory(GiB)": 72.85, "step": 16625, "token_acc": 0.488135593220339, "train_speed(iter/s)": 0.670515 }, { "epoch": 0.7124801850820445, "grad_norm": 4.254510402679443, "learning_rate": 9.507710279898476e-05, "loss": 2.603601837158203, "memory(GiB)": 72.85, "step": 16630, "token_acc": 0.4744525547445255, "train_speed(iter/s)": 0.670499 }, { "epoch": 0.7126944004112934, "grad_norm": 4.236608982086182, "learning_rate": 9.507419047519248e-05, "loss": 2.2847723007202148, "memory(GiB)": 72.85, "step": 16635, "token_acc": 0.4754601226993865, "train_speed(iter/s)": 0.67052 }, { "epoch": 0.7129086157405424, "grad_norm": 3.2074952125549316, "learning_rate": 9.507127733483897e-05, "loss": 2.5759502410888673, "memory(GiB)": 72.85, "step": 16640, "token_acc": 0.4885245901639344, "train_speed(iter/s)": 0.670516 }, { "epoch": 0.7131228310697914, "grad_norm": 3.414696455001831, "learning_rate": 9.506836337797698e-05, "loss": 2.378934860229492, "memory(GiB)": 72.85, "step": 16645, "token_acc": 0.5072992700729927, "train_speed(iter/s)": 0.670522 }, { "epoch": 0.7133370463990403, "grad_norm": 4.066232204437256, "learning_rate": 9.506544860465932e-05, "loss": 2.5053630828857423, "memory(GiB)": 72.85, "step": 16650, "token_acc": 0.4852459016393443, "train_speed(iter/s)": 0.670525 }, { "epoch": 0.7135512617282893, "grad_norm": 3.803062915802002, "learning_rate": 9.506253301493876e-05, "loss": 2.2123498916625977, "memory(GiB)": 72.85, "step": 16655, "token_acc": 0.5506329113924051, "train_speed(iter/s)": 0.670556 }, { "epoch": 0.7137654770575382, "grad_norm": 3.001429557800293, "learning_rate": 9.505961660886816e-05, "loss": 2.0777217864990236, "memory(GiB)": 72.85, "step": 16660, "token_acc": 0.5535055350553506, "train_speed(iter/s)": 0.670582 }, { "epoch": 0.7139796923867872, "grad_norm": 3.7168734073638916, "learning_rate": 9.505669938650034e-05, "loss": 2.5682046890258787, "memory(GiB)": 72.85, "step": 16665, "token_acc": 0.46204620462046203, "train_speed(iter/s)": 0.670632 }, { "epoch": 0.7141939077160362, "grad_norm": 3.489006519317627, "learning_rate": 9.505378134788814e-05, "loss": 2.4154743194580077, "memory(GiB)": 72.85, "step": 16670, "token_acc": 0.47368421052631576, "train_speed(iter/s)": 0.670651 }, { "epoch": 0.7144081230452851, "grad_norm": 3.8267247676849365, "learning_rate": 9.505086249308442e-05, "loss": 2.342130661010742, "memory(GiB)": 72.85, "step": 16675, "token_acc": 0.5065963060686016, "train_speed(iter/s)": 0.670663 }, { "epoch": 0.714622338374534, "grad_norm": 2.993577003479004, "learning_rate": 9.504794282214207e-05, "loss": 2.3540855407714845, "memory(GiB)": 72.85, "step": 16680, "token_acc": 0.4899328859060403, "train_speed(iter/s)": 0.670613 }, { "epoch": 0.7148365537037831, "grad_norm": 4.437579154968262, "learning_rate": 9.504502233511396e-05, "loss": 2.5509872436523438, "memory(GiB)": 72.85, "step": 16685, "token_acc": 0.45493562231759654, "train_speed(iter/s)": 0.670586 }, { "epoch": 0.715050769033032, "grad_norm": 3.2468786239624023, "learning_rate": 9.504210103205305e-05, "loss": 2.1875173568725588, "memory(GiB)": 72.85, "step": 16690, "token_acc": 0.4963235294117647, "train_speed(iter/s)": 0.670555 }, { "epoch": 0.7152649843622809, "grad_norm": 3.6768457889556885, "learning_rate": 9.50391789130122e-05, "loss": 2.8333913803100588, "memory(GiB)": 72.85, "step": 16695, "token_acc": 0.42474916387959866, "train_speed(iter/s)": 0.670595 }, { "epoch": 0.71547919969153, "grad_norm": 3.9814836978912354, "learning_rate": 9.503625597804437e-05, "loss": 2.3540470123291017, "memory(GiB)": 72.85, "step": 16700, "token_acc": 0.49032258064516127, "train_speed(iter/s)": 0.670616 }, { "epoch": 0.7156934150207789, "grad_norm": 4.55074405670166, "learning_rate": 9.503333222720253e-05, "loss": 2.4425994873046877, "memory(GiB)": 72.85, "step": 16705, "token_acc": 0.5, "train_speed(iter/s)": 0.670639 }, { "epoch": 0.7159076303500278, "grad_norm": 3.2072582244873047, "learning_rate": 9.503040766053963e-05, "loss": 2.6126094818115235, "memory(GiB)": 72.85, "step": 16710, "token_acc": 0.45051194539249145, "train_speed(iter/s)": 0.67063 }, { "epoch": 0.7161218456792768, "grad_norm": 4.973138332366943, "learning_rate": 9.502748227810865e-05, "loss": 2.4731910705566404, "memory(GiB)": 72.85, "step": 16715, "token_acc": 0.49295774647887325, "train_speed(iter/s)": 0.670627 }, { "epoch": 0.7163360610085258, "grad_norm": 3.384373188018799, "learning_rate": 9.502455607996258e-05, "loss": 2.5375289916992188, "memory(GiB)": 72.85, "step": 16720, "token_acc": 0.44108761329305135, "train_speed(iter/s)": 0.670658 }, { "epoch": 0.7165502763377747, "grad_norm": 3.478512763977051, "learning_rate": 9.502162906615446e-05, "loss": 2.4579153060913086, "memory(GiB)": 72.85, "step": 16725, "token_acc": 0.45182724252491696, "train_speed(iter/s)": 0.670678 }, { "epoch": 0.7167644916670237, "grad_norm": 2.7233262062072754, "learning_rate": 9.50187012367373e-05, "loss": 2.061381721496582, "memory(GiB)": 72.85, "step": 16730, "token_acc": 0.5410447761194029, "train_speed(iter/s)": 0.670673 }, { "epoch": 0.7169787069962726, "grad_norm": 4.039930820465088, "learning_rate": 9.501577259176412e-05, "loss": 2.3425174713134767, "memory(GiB)": 72.85, "step": 16735, "token_acc": 0.47635135135135137, "train_speed(iter/s)": 0.670687 }, { "epoch": 0.7171929223255216, "grad_norm": 4.2180376052856445, "learning_rate": 9.5012843131288e-05, "loss": 2.6183547973632812, "memory(GiB)": 72.85, "step": 16740, "token_acc": 0.49050632911392406, "train_speed(iter/s)": 0.670662 }, { "epoch": 0.7174071376547706, "grad_norm": 3.983851432800293, "learning_rate": 9.500991285536198e-05, "loss": 2.4922494888305664, "memory(GiB)": 72.85, "step": 16745, "token_acc": 0.4631578947368421, "train_speed(iter/s)": 0.670683 }, { "epoch": 0.7176213529840195, "grad_norm": 3.8589632511138916, "learning_rate": 9.50069817640392e-05, "loss": 2.536539649963379, "memory(GiB)": 72.85, "step": 16750, "token_acc": 0.4809384164222874, "train_speed(iter/s)": 0.67069 }, { "epoch": 0.7178355683132684, "grad_norm": 3.3001363277435303, "learning_rate": 9.50040498573727e-05, "loss": 2.524151611328125, "memory(GiB)": 72.85, "step": 16755, "token_acc": 0.5319148936170213, "train_speed(iter/s)": 0.670713 }, { "epoch": 0.7180497836425175, "grad_norm": 3.273837089538574, "learning_rate": 9.500111713541562e-05, "loss": 2.34970760345459, "memory(GiB)": 72.85, "step": 16760, "token_acc": 0.4809688581314879, "train_speed(iter/s)": 0.670737 }, { "epoch": 0.7182639989717664, "grad_norm": 3.539116859436035, "learning_rate": 9.49981835982211e-05, "loss": 2.270031547546387, "memory(GiB)": 72.85, "step": 16765, "token_acc": 0.49666666666666665, "train_speed(iter/s)": 0.670749 }, { "epoch": 0.7184782143010153, "grad_norm": 3.247943639755249, "learning_rate": 9.499524924584226e-05, "loss": 2.3883947372436523, "memory(GiB)": 72.85, "step": 16770, "token_acc": 0.48322147651006714, "train_speed(iter/s)": 0.670718 }, { "epoch": 0.7186924296302644, "grad_norm": 3.5504050254821777, "learning_rate": 9.499231407833229e-05, "loss": 2.3187997817993162, "memory(GiB)": 72.85, "step": 16775, "token_acc": 0.4743083003952569, "train_speed(iter/s)": 0.670689 }, { "epoch": 0.7189066449595133, "grad_norm": 3.868877649307251, "learning_rate": 9.498937809574432e-05, "loss": 2.27774600982666, "memory(GiB)": 72.85, "step": 16780, "token_acc": 0.5032894736842105, "train_speed(iter/s)": 0.670615 }, { "epoch": 0.7191208602887622, "grad_norm": 5.279978275299072, "learning_rate": 9.498644129813158e-05, "loss": 2.4842239379882813, "memory(GiB)": 72.85, "step": 16785, "token_acc": 0.5034013605442177, "train_speed(iter/s)": 0.67062 }, { "epoch": 0.7193350756180112, "grad_norm": 4.261815547943115, "learning_rate": 9.498350368554726e-05, "loss": 2.067280387878418, "memory(GiB)": 72.85, "step": 16790, "token_acc": 0.5136186770428015, "train_speed(iter/s)": 0.670594 }, { "epoch": 0.7195492909472602, "grad_norm": 4.58241605758667, "learning_rate": 9.498056525804456e-05, "loss": 2.5848217010498047, "memory(GiB)": 72.85, "step": 16795, "token_acc": 0.4583333333333333, "train_speed(iter/s)": 0.670633 }, { "epoch": 0.7197635062765092, "grad_norm": 3.0059146881103516, "learning_rate": 9.497762601567672e-05, "loss": 2.418684387207031, "memory(GiB)": 72.85, "step": 16800, "token_acc": 0.49834983498349833, "train_speed(iter/s)": 0.670655 }, { "epoch": 0.7199777216057581, "grad_norm": 3.8391613960266113, "learning_rate": 9.497468595849701e-05, "loss": 2.4677730560302735, "memory(GiB)": 72.85, "step": 16805, "token_acc": 0.5086505190311419, "train_speed(iter/s)": 0.670672 }, { "epoch": 0.720191936935007, "grad_norm": 2.748828887939453, "learning_rate": 9.497174508655866e-05, "loss": 2.418872833251953, "memory(GiB)": 72.85, "step": 16810, "token_acc": 0.49230769230769234, "train_speed(iter/s)": 0.670701 }, { "epoch": 0.7204061522642561, "grad_norm": 4.127662658691406, "learning_rate": 9.496880339991497e-05, "loss": 2.333638381958008, "memory(GiB)": 72.85, "step": 16815, "token_acc": 0.4725274725274725, "train_speed(iter/s)": 0.670746 }, { "epoch": 0.720620367593505, "grad_norm": 3.6744954586029053, "learning_rate": 9.496586089861923e-05, "loss": 2.43590087890625, "memory(GiB)": 72.85, "step": 16820, "token_acc": 0.4658385093167702, "train_speed(iter/s)": 0.670728 }, { "epoch": 0.7208345829227539, "grad_norm": 3.8013734817504883, "learning_rate": 9.496291758272472e-05, "loss": 2.3282772064208985, "memory(GiB)": 72.85, "step": 16825, "token_acc": 0.4809688581314879, "train_speed(iter/s)": 0.67071 }, { "epoch": 0.721048798252003, "grad_norm": 3.667778253555298, "learning_rate": 9.495997345228477e-05, "loss": 2.0764930725097654, "memory(GiB)": 72.85, "step": 16830, "token_acc": 0.5205992509363296, "train_speed(iter/s)": 0.670753 }, { "epoch": 0.7212630135812519, "grad_norm": 4.325033187866211, "learning_rate": 9.495702850735275e-05, "loss": 2.3546401977539064, "memory(GiB)": 72.85, "step": 16835, "token_acc": 0.46598639455782315, "train_speed(iter/s)": 0.670712 }, { "epoch": 0.7214772289105008, "grad_norm": 3.715855121612549, "learning_rate": 9.495408274798198e-05, "loss": 2.4567970275878905, "memory(GiB)": 72.85, "step": 16840, "token_acc": 0.47266881028938906, "train_speed(iter/s)": 0.670665 }, { "epoch": 0.7216914442397498, "grad_norm": 3.628380298614502, "learning_rate": 9.495113617422583e-05, "loss": 2.299303436279297, "memory(GiB)": 72.85, "step": 16845, "token_acc": 0.5165289256198347, "train_speed(iter/s)": 0.670677 }, { "epoch": 0.7219056595689988, "grad_norm": 4.566257953643799, "learning_rate": 9.494818878613767e-05, "loss": 2.3078567504882814, "memory(GiB)": 72.85, "step": 16850, "token_acc": 0.4867924528301887, "train_speed(iter/s)": 0.67066 }, { "epoch": 0.7221198748982477, "grad_norm": 5.33034086227417, "learning_rate": 9.494524058377092e-05, "loss": 2.4600780487060545, "memory(GiB)": 72.85, "step": 16855, "token_acc": 0.5022222222222222, "train_speed(iter/s)": 0.670671 }, { "epoch": 0.7223340902274967, "grad_norm": 3.835768938064575, "learning_rate": 9.494229156717897e-05, "loss": 2.413389778137207, "memory(GiB)": 72.85, "step": 16860, "token_acc": 0.4603174603174603, "train_speed(iter/s)": 0.670707 }, { "epoch": 0.7225483055567457, "grad_norm": 4.183775901794434, "learning_rate": 9.493934173641524e-05, "loss": 2.4746822357177733, "memory(GiB)": 72.85, "step": 16865, "token_acc": 0.46, "train_speed(iter/s)": 0.670741 }, { "epoch": 0.7227625208859946, "grad_norm": 4.124482154846191, "learning_rate": 9.493639109153318e-05, "loss": 2.433829689025879, "memory(GiB)": 72.85, "step": 16870, "token_acc": 0.4768211920529801, "train_speed(iter/s)": 0.670716 }, { "epoch": 0.7229767362152436, "grad_norm": 3.258138656616211, "learning_rate": 9.493343963258626e-05, "loss": 2.306768226623535, "memory(GiB)": 72.85, "step": 16875, "token_acc": 0.49809885931558934, "train_speed(iter/s)": 0.670764 }, { "epoch": 0.7231909515444925, "grad_norm": 4.007720470428467, "learning_rate": 9.49304873596279e-05, "loss": 2.5289039611816406, "memory(GiB)": 72.85, "step": 16880, "token_acc": 0.47474747474747475, "train_speed(iter/s)": 0.670792 }, { "epoch": 0.7234051668737415, "grad_norm": 4.958920955657959, "learning_rate": 9.492753427271164e-05, "loss": 2.4976783752441407, "memory(GiB)": 72.85, "step": 16885, "token_acc": 0.46283783783783783, "train_speed(iter/s)": 0.670799 }, { "epoch": 0.7236193822029905, "grad_norm": 3.7960453033447266, "learning_rate": 9.492458037189094e-05, "loss": 2.3956941604614257, "memory(GiB)": 72.85, "step": 16890, "token_acc": 0.48554913294797686, "train_speed(iter/s)": 0.670794 }, { "epoch": 0.7238335975322394, "grad_norm": 3.291882276535034, "learning_rate": 9.492162565721933e-05, "loss": 2.564595413208008, "memory(GiB)": 72.85, "step": 16895, "token_acc": 0.498220640569395, "train_speed(iter/s)": 0.670818 }, { "epoch": 0.7240478128614883, "grad_norm": 3.253359794616699, "learning_rate": 9.491867012875033e-05, "loss": 2.2262372970581055, "memory(GiB)": 72.85, "step": 16900, "token_acc": 0.46785714285714286, "train_speed(iter/s)": 0.670846 }, { "epoch": 0.7242620281907374, "grad_norm": 4.923019886016846, "learning_rate": 9.491571378653748e-05, "loss": 2.597179412841797, "memory(GiB)": 72.85, "step": 16905, "token_acc": 0.45714285714285713, "train_speed(iter/s)": 0.670883 }, { "epoch": 0.7244762435199863, "grad_norm": 3.7680015563964844, "learning_rate": 9.491275663063434e-05, "loss": 2.453512191772461, "memory(GiB)": 72.85, "step": 16910, "token_acc": 0.4684385382059801, "train_speed(iter/s)": 0.670886 }, { "epoch": 0.7246904588492352, "grad_norm": 3.1504673957824707, "learning_rate": 9.490979866109449e-05, "loss": 2.2839401245117186, "memory(GiB)": 72.85, "step": 16915, "token_acc": 0.5419847328244275, "train_speed(iter/s)": 0.670887 }, { "epoch": 0.7249046741784843, "grad_norm": 4.042983531951904, "learning_rate": 9.49068398779715e-05, "loss": 2.2743202209472657, "memory(GiB)": 72.85, "step": 16920, "token_acc": 0.546242774566474, "train_speed(iter/s)": 0.670921 }, { "epoch": 0.7251188895077332, "grad_norm": 5.535808086395264, "learning_rate": 9.490388028131897e-05, "loss": 2.5977333068847654, "memory(GiB)": 72.85, "step": 16925, "token_acc": 0.5, "train_speed(iter/s)": 0.670928 }, { "epoch": 0.7253331048369821, "grad_norm": 4.752384662628174, "learning_rate": 9.490091987119055e-05, "loss": 2.118843650817871, "memory(GiB)": 72.85, "step": 16930, "token_acc": 0.4957983193277311, "train_speed(iter/s)": 0.670909 }, { "epoch": 0.7255473201662311, "grad_norm": 3.50651478767395, "learning_rate": 9.489795864763982e-05, "loss": 2.3862829208374023, "memory(GiB)": 72.85, "step": 16935, "token_acc": 0.5127118644067796, "train_speed(iter/s)": 0.670901 }, { "epoch": 0.7257615354954801, "grad_norm": 3.2980387210845947, "learning_rate": 9.489499661072045e-05, "loss": 2.6889503479003904, "memory(GiB)": 72.85, "step": 16940, "token_acc": 0.4600760456273764, "train_speed(iter/s)": 0.670903 }, { "epoch": 0.725975750824729, "grad_norm": 4.095372676849365, "learning_rate": 9.489203376048612e-05, "loss": 2.4302467346191405, "memory(GiB)": 72.85, "step": 16945, "token_acc": 0.48363636363636364, "train_speed(iter/s)": 0.670924 }, { "epoch": 0.726189966153978, "grad_norm": 3.4588329792022705, "learning_rate": 9.488907009699049e-05, "loss": 2.628093719482422, "memory(GiB)": 72.85, "step": 16950, "token_acc": 0.4583333333333333, "train_speed(iter/s)": 0.670929 }, { "epoch": 0.7264041814832269, "grad_norm": 3.843646287918091, "learning_rate": 9.488610562028724e-05, "loss": 2.426243209838867, "memory(GiB)": 72.85, "step": 16955, "token_acc": 0.5, "train_speed(iter/s)": 0.670941 }, { "epoch": 0.7266183968124759, "grad_norm": 3.420804977416992, "learning_rate": 9.488314033043006e-05, "loss": 2.427620697021484, "memory(GiB)": 72.85, "step": 16960, "token_acc": 0.4880952380952381, "train_speed(iter/s)": 0.670976 }, { "epoch": 0.7268326121417249, "grad_norm": 3.0811586380004883, "learning_rate": 9.488017422747271e-05, "loss": 2.3951847076416017, "memory(GiB)": 72.85, "step": 16965, "token_acc": 0.5345911949685535, "train_speed(iter/s)": 0.671012 }, { "epoch": 0.7270468274709738, "grad_norm": 4.344085693359375, "learning_rate": 9.48772073114689e-05, "loss": 2.385565757751465, "memory(GiB)": 72.85, "step": 16970, "token_acc": 0.4808362369337979, "train_speed(iter/s)": 0.671038 }, { "epoch": 0.7272610428002227, "grad_norm": 4.04475212097168, "learning_rate": 9.487423958247239e-05, "loss": 1.8717992782592774, "memory(GiB)": 72.85, "step": 16975, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.671039 }, { "epoch": 0.7274752581294718, "grad_norm": 4.48679256439209, "learning_rate": 9.487127104053692e-05, "loss": 2.5541534423828125, "memory(GiB)": 72.85, "step": 16980, "token_acc": 0.5016835016835017, "train_speed(iter/s)": 0.671027 }, { "epoch": 0.7276894734587207, "grad_norm": 3.645644426345825, "learning_rate": 9.486889562170865e-05, "loss": 2.5023782730102537, "memory(GiB)": 72.85, "step": 16985, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.671043 }, { "epoch": 0.7279036887879696, "grad_norm": 4.381438255310059, "learning_rate": 9.48659256166186e-05, "loss": 2.323589324951172, "memory(GiB)": 72.85, "step": 16990, "token_acc": 0.4730290456431535, "train_speed(iter/s)": 0.670998 }, { "epoch": 0.7281179041172187, "grad_norm": 4.555505752563477, "learning_rate": 9.486295479874025e-05, "loss": 2.3456592559814453, "memory(GiB)": 72.85, "step": 16995, "token_acc": 0.5283018867924528, "train_speed(iter/s)": 0.670991 }, { "epoch": 0.7283321194464676, "grad_norm": 4.314784049987793, "learning_rate": 9.485998316812736e-05, "loss": 2.493694877624512, "memory(GiB)": 72.85, "step": 17000, "token_acc": 0.4963235294117647, "train_speed(iter/s)": 0.671 }, { "epoch": 0.7283321194464676, "eval_loss": 2.1597466468811035, "eval_runtime": 16.9713, "eval_samples_per_second": 5.892, "eval_steps_per_second": 5.892, "eval_token_acc": 0.4909847434119279, "step": 17000 }, { "epoch": 0.7285463347757165, "grad_norm": 3.2833542823791504, "learning_rate": 9.485701072483381e-05, "loss": 2.2826507568359373, "memory(GiB)": 72.85, "step": 17005, "token_acc": 0.49333333333333335, "train_speed(iter/s)": 0.670472 }, { "epoch": 0.7287605501049655, "grad_norm": 3.2501797676086426, "learning_rate": 9.485403746891343e-05, "loss": 2.266253852844238, "memory(GiB)": 72.85, "step": 17010, "token_acc": 0.5062761506276151, "train_speed(iter/s)": 0.670468 }, { "epoch": 0.7289747654342145, "grad_norm": 3.4188668727874756, "learning_rate": 9.485106340042008e-05, "loss": 2.27923469543457, "memory(GiB)": 72.85, "step": 17015, "token_acc": 0.5221843003412969, "train_speed(iter/s)": 0.670485 }, { "epoch": 0.7291889807634634, "grad_norm": 3.753182888031006, "learning_rate": 9.484808851940763e-05, "loss": 2.5180900573730467, "memory(GiB)": 72.85, "step": 17020, "token_acc": 0.4784172661870504, "train_speed(iter/s)": 0.670522 }, { "epoch": 0.7294031960927124, "grad_norm": 3.365077257156372, "learning_rate": 9.484511282593e-05, "loss": 2.1981988906860352, "memory(GiB)": 72.85, "step": 17025, "token_acc": 0.48985507246376814, "train_speed(iter/s)": 0.670537 }, { "epoch": 0.7296174114219613, "grad_norm": 3.1652307510375977, "learning_rate": 9.484213632004108e-05, "loss": 2.207978630065918, "memory(GiB)": 72.85, "step": 17030, "token_acc": 0.512396694214876, "train_speed(iter/s)": 0.67056 }, { "epoch": 0.7298316267512103, "grad_norm": 3.916269302368164, "learning_rate": 9.483915900179481e-05, "loss": 2.2658451080322264, "memory(GiB)": 72.85, "step": 17035, "token_acc": 0.5433962264150943, "train_speed(iter/s)": 0.670606 }, { "epoch": 0.7300458420804593, "grad_norm": 3.939194917678833, "learning_rate": 9.48361808712451e-05, "loss": 2.7248504638671873, "memory(GiB)": 72.85, "step": 17040, "token_acc": 0.43478260869565216, "train_speed(iter/s)": 0.670605 }, { "epoch": 0.7302600574097082, "grad_norm": 6.1160454750061035, "learning_rate": 9.483320192844592e-05, "loss": 2.5222295761108398, "memory(GiB)": 72.85, "step": 17045, "token_acc": 0.459214501510574, "train_speed(iter/s)": 0.670595 }, { "epoch": 0.7304742727389572, "grad_norm": 3.677734851837158, "learning_rate": 9.483022217345121e-05, "loss": 2.5020299911499024, "memory(GiB)": 72.85, "step": 17050, "token_acc": 0.5058365758754864, "train_speed(iter/s)": 0.670599 }, { "epoch": 0.7306884880682062, "grad_norm": 3.327484130859375, "learning_rate": 9.4827241606315e-05, "loss": 2.3380207061767577, "memory(GiB)": 72.85, "step": 17055, "token_acc": 0.49295774647887325, "train_speed(iter/s)": 0.670577 }, { "epoch": 0.7309027033974551, "grad_norm": 4.930282115936279, "learning_rate": 9.482426022709123e-05, "loss": 2.560110855102539, "memory(GiB)": 72.85, "step": 17060, "token_acc": 0.4296875, "train_speed(iter/s)": 0.670558 }, { "epoch": 0.731116918726704, "grad_norm": 3.7647223472595215, "learning_rate": 9.482127803583396e-05, "loss": 2.552000617980957, "memory(GiB)": 72.85, "step": 17065, "token_acc": 0.4383116883116883, "train_speed(iter/s)": 0.670602 }, { "epoch": 0.7313311340559531, "grad_norm": 2.696387767791748, "learning_rate": 9.481829503259719e-05, "loss": 2.443585968017578, "memory(GiB)": 72.85, "step": 17070, "token_acc": 0.5048543689320388, "train_speed(iter/s)": 0.670629 }, { "epoch": 0.731545349385202, "grad_norm": 2.5997555255889893, "learning_rate": 9.481531121743494e-05, "loss": 2.6078948974609375, "memory(GiB)": 72.85, "step": 17075, "token_acc": 0.436950146627566, "train_speed(iter/s)": 0.670648 }, { "epoch": 0.7317595647144509, "grad_norm": 3.190836191177368, "learning_rate": 9.48123265904013e-05, "loss": 2.5302921295166017, "memory(GiB)": 72.85, "step": 17080, "token_acc": 0.4265232974910394, "train_speed(iter/s)": 0.670676 }, { "epoch": 0.7319737800437, "grad_norm": 3.686852216720581, "learning_rate": 9.480934115155033e-05, "loss": 2.485887145996094, "memory(GiB)": 72.85, "step": 17085, "token_acc": 0.450199203187251, "train_speed(iter/s)": 0.670731 }, { "epoch": 0.7321879953729489, "grad_norm": 2.8379297256469727, "learning_rate": 9.48063549009361e-05, "loss": 2.165671157836914, "memory(GiB)": 72.85, "step": 17090, "token_acc": 0.55859375, "train_speed(iter/s)": 0.670774 }, { "epoch": 0.7324022107021978, "grad_norm": 4.5769548416137695, "learning_rate": 9.480336783861274e-05, "loss": 2.441422462463379, "memory(GiB)": 72.85, "step": 17095, "token_acc": 0.4575645756457565, "train_speed(iter/s)": 0.670752 }, { "epoch": 0.7326164260314468, "grad_norm": 5.508566856384277, "learning_rate": 9.480037996463433e-05, "loss": 2.543080520629883, "memory(GiB)": 72.85, "step": 17100, "token_acc": 0.49097472924187724, "train_speed(iter/s)": 0.670702 }, { "epoch": 0.7328306413606958, "grad_norm": 3.2653045654296875, "learning_rate": 9.479739127905501e-05, "loss": 2.34041805267334, "memory(GiB)": 72.85, "step": 17105, "token_acc": 0.5109489051094891, "train_speed(iter/s)": 0.67074 }, { "epoch": 0.7330448566899447, "grad_norm": 4.0771942138671875, "learning_rate": 9.479440178192892e-05, "loss": 2.2372501373291014, "memory(GiB)": 72.85, "step": 17110, "token_acc": 0.5049833887043189, "train_speed(iter/s)": 0.670675 }, { "epoch": 0.7332590720191937, "grad_norm": 3.326314687728882, "learning_rate": 9.479141147331023e-05, "loss": 2.5436241149902346, "memory(GiB)": 72.85, "step": 17115, "token_acc": 0.4748603351955307, "train_speed(iter/s)": 0.670663 }, { "epoch": 0.7334732873484426, "grad_norm": 2.9085288047790527, "learning_rate": 9.47884203532531e-05, "loss": 2.304927444458008, "memory(GiB)": 72.85, "step": 17120, "token_acc": 0.4965986394557823, "train_speed(iter/s)": 0.670694 }, { "epoch": 0.7336875026776916, "grad_norm": 3.7343575954437256, "learning_rate": 9.478542842181172e-05, "loss": 2.4610008239746093, "memory(GiB)": 72.85, "step": 17125, "token_acc": 0.45483870967741935, "train_speed(iter/s)": 0.670732 }, { "epoch": 0.7339017180069406, "grad_norm": 3.535367250442505, "learning_rate": 9.478243567904028e-05, "loss": 2.4671817779541017, "memory(GiB)": 72.85, "step": 17130, "token_acc": 0.428125, "train_speed(iter/s)": 0.670726 }, { "epoch": 0.7341159333361895, "grad_norm": 4.380101203918457, "learning_rate": 9.477944212499304e-05, "loss": 2.3633901596069338, "memory(GiB)": 72.85, "step": 17135, "token_acc": 0.49624060150375937, "train_speed(iter/s)": 0.67073 }, { "epoch": 0.7343301486654386, "grad_norm": 2.7233803272247314, "learning_rate": 9.477644775972416e-05, "loss": 2.4965702056884767, "memory(GiB)": 72.85, "step": 17140, "token_acc": 0.4875, "train_speed(iter/s)": 0.670745 }, { "epoch": 0.7345443639946875, "grad_norm": 2.6732749938964844, "learning_rate": 9.477345258328795e-05, "loss": 2.8305080413818358, "memory(GiB)": 72.85, "step": 17145, "token_acc": 0.4652567975830816, "train_speed(iter/s)": 0.670713 }, { "epoch": 0.7347585793239364, "grad_norm": 3.324141263961792, "learning_rate": 9.477045659573864e-05, "loss": 2.4221637725830076, "memory(GiB)": 72.85, "step": 17150, "token_acc": 0.5355805243445693, "train_speed(iter/s)": 0.670694 }, { "epoch": 0.7349727946531854, "grad_norm": 3.750396966934204, "learning_rate": 9.476745979713051e-05, "loss": 2.375078582763672, "memory(GiB)": 72.85, "step": 17155, "token_acc": 0.45714285714285713, "train_speed(iter/s)": 0.670693 }, { "epoch": 0.7351870099824344, "grad_norm": 3.1061904430389404, "learning_rate": 9.476446218751786e-05, "loss": 2.734166717529297, "memory(GiB)": 72.85, "step": 17160, "token_acc": 0.45195729537366547, "train_speed(iter/s)": 0.670701 }, { "epoch": 0.7354012253116833, "grad_norm": 3.6676487922668457, "learning_rate": 9.476146376695497e-05, "loss": 2.2555574417114257, "memory(GiB)": 72.85, "step": 17165, "token_acc": 0.5269709543568465, "train_speed(iter/s)": 0.67071 }, { "epoch": 0.7356154406409323, "grad_norm": 3.3124125003814697, "learning_rate": 9.475846453549619e-05, "loss": 2.5645925521850588, "memory(GiB)": 72.85, "step": 17170, "token_acc": 0.46496815286624205, "train_speed(iter/s)": 0.670718 }, { "epoch": 0.7358296559701812, "grad_norm": 3.5435078144073486, "learning_rate": 9.475546449319583e-05, "loss": 2.3419912338256834, "memory(GiB)": 72.85, "step": 17175, "token_acc": 0.5201238390092879, "train_speed(iter/s)": 0.670737 }, { "epoch": 0.7360438712994302, "grad_norm": 3.927755832672119, "learning_rate": 9.475246364010823e-05, "loss": 2.636320114135742, "memory(GiB)": 72.85, "step": 17180, "token_acc": 0.4809384164222874, "train_speed(iter/s)": 0.670758 }, { "epoch": 0.7362580866286792, "grad_norm": 3.6421334743499756, "learning_rate": 9.47494619762878e-05, "loss": 2.5281967163085937, "memory(GiB)": 72.85, "step": 17185, "token_acc": 0.43283582089552236, "train_speed(iter/s)": 0.6708 }, { "epoch": 0.7364723019579281, "grad_norm": 3.519326686859131, "learning_rate": 9.474645950178886e-05, "loss": 2.4638574600219725, "memory(GiB)": 72.85, "step": 17190, "token_acc": 0.46494464944649444, "train_speed(iter/s)": 0.670825 }, { "epoch": 0.736686517287177, "grad_norm": 4.873022079467773, "learning_rate": 9.474345621666586e-05, "loss": 2.3609897613525392, "memory(GiB)": 72.85, "step": 17195, "token_acc": 0.5155555555555555, "train_speed(iter/s)": 0.670825 }, { "epoch": 0.7369007326164261, "grad_norm": 3.0874600410461426, "learning_rate": 9.474045212097315e-05, "loss": 2.134842300415039, "memory(GiB)": 72.85, "step": 17200, "token_acc": 0.524904214559387, "train_speed(iter/s)": 0.670802 }, { "epoch": 0.737114947945675, "grad_norm": 3.8371503353118896, "learning_rate": 9.473744721476519e-05, "loss": 2.3965295791625976, "memory(GiB)": 72.85, "step": 17205, "token_acc": 0.5017421602787456, "train_speed(iter/s)": 0.670794 }, { "epoch": 0.7373291632749239, "grad_norm": 3.565964698791504, "learning_rate": 9.47344414980964e-05, "loss": 2.7915441513061525, "memory(GiB)": 72.85, "step": 17210, "token_acc": 0.45121951219512196, "train_speed(iter/s)": 0.670777 }, { "epoch": 0.737543378604173, "grad_norm": 4.678500652313232, "learning_rate": 9.473143497102126e-05, "loss": 2.3317100524902346, "memory(GiB)": 72.85, "step": 17215, "token_acc": 0.5342960288808665, "train_speed(iter/s)": 0.670784 }, { "epoch": 0.7377575939334219, "grad_norm": 3.764350175857544, "learning_rate": 9.472842763359419e-05, "loss": 2.1390966415405273, "memory(GiB)": 72.85, "step": 17220, "token_acc": 0.528, "train_speed(iter/s)": 0.670792 }, { "epoch": 0.7379718092626708, "grad_norm": 3.354520082473755, "learning_rate": 9.47254194858697e-05, "loss": 2.356495666503906, "memory(GiB)": 72.85, "step": 17225, "token_acc": 0.49158249158249157, "train_speed(iter/s)": 0.670782 }, { "epoch": 0.7381860245919198, "grad_norm": 4.064976215362549, "learning_rate": 9.47224105279023e-05, "loss": 2.259383773803711, "memory(GiB)": 72.85, "step": 17230, "token_acc": 0.5113122171945701, "train_speed(iter/s)": 0.670784 }, { "epoch": 0.7384002399211688, "grad_norm": 3.6573262214660645, "learning_rate": 9.471940075974646e-05, "loss": 2.3230560302734373, "memory(GiB)": 72.85, "step": 17235, "token_acc": 0.5, "train_speed(iter/s)": 0.670825 }, { "epoch": 0.7386144552504177, "grad_norm": 3.532496213912964, "learning_rate": 9.471639018145673e-05, "loss": 2.2380954742431642, "memory(GiB)": 72.85, "step": 17240, "token_acc": 0.4826388888888889, "train_speed(iter/s)": 0.67086 }, { "epoch": 0.7388286705796667, "grad_norm": 4.114862442016602, "learning_rate": 9.471337879308765e-05, "loss": 2.4280649185180665, "memory(GiB)": 72.85, "step": 17245, "token_acc": 0.47896440129449835, "train_speed(iter/s)": 0.670875 }, { "epoch": 0.7390428859089156, "grad_norm": 3.2630858421325684, "learning_rate": 9.471036659469376e-05, "loss": 2.364945983886719, "memory(GiB)": 72.85, "step": 17250, "token_acc": 0.5032467532467533, "train_speed(iter/s)": 0.670862 }, { "epoch": 0.7392571012381646, "grad_norm": 3.5462327003479004, "learning_rate": 9.470735358632965e-05, "loss": 2.640242004394531, "memory(GiB)": 72.85, "step": 17255, "token_acc": 0.4650537634408602, "train_speed(iter/s)": 0.670879 }, { "epoch": 0.7394713165674136, "grad_norm": 3.885359048843384, "learning_rate": 9.470433976804988e-05, "loss": 2.5630672454833983, "memory(GiB)": 72.85, "step": 17260, "token_acc": 0.4696485623003195, "train_speed(iter/s)": 0.670874 }, { "epoch": 0.7396855318966625, "grad_norm": 3.597601890563965, "learning_rate": 9.470132513990906e-05, "loss": 2.3320472717285154, "memory(GiB)": 72.85, "step": 17265, "token_acc": 0.532051282051282, "train_speed(iter/s)": 0.670877 }, { "epoch": 0.7398997472259115, "grad_norm": 4.014153003692627, "learning_rate": 9.46983097019618e-05, "loss": 2.2208841323852537, "memory(GiB)": 72.85, "step": 17270, "token_acc": 0.5, "train_speed(iter/s)": 0.670876 }, { "epoch": 0.7401139625551605, "grad_norm": 4.03987979888916, "learning_rate": 9.469529345426273e-05, "loss": 2.2588953018188476, "memory(GiB)": 72.85, "step": 17275, "token_acc": 0.5058823529411764, "train_speed(iter/s)": 0.670913 }, { "epoch": 0.7403281778844094, "grad_norm": 3.71258807182312, "learning_rate": 9.469227639686649e-05, "loss": 2.676198196411133, "memory(GiB)": 72.85, "step": 17280, "token_acc": 0.48172757475083056, "train_speed(iter/s)": 0.670931 }, { "epoch": 0.7405423932136583, "grad_norm": 2.962296724319458, "learning_rate": 9.468925852982776e-05, "loss": 2.3921669006347654, "memory(GiB)": 72.85, "step": 17285, "token_acc": 0.5, "train_speed(iter/s)": 0.670966 }, { "epoch": 0.7407566085429074, "grad_norm": 3.92098069190979, "learning_rate": 9.468623985320115e-05, "loss": 2.046257972717285, "memory(GiB)": 72.85, "step": 17290, "token_acc": 0.5304659498207885, "train_speed(iter/s)": 0.670995 }, { "epoch": 0.7409708238721563, "grad_norm": 4.6581268310546875, "learning_rate": 9.468322036704142e-05, "loss": 2.381874847412109, "memory(GiB)": 72.85, "step": 17295, "token_acc": 0.4819277108433735, "train_speed(iter/s)": 0.670981 }, { "epoch": 0.7411850392014052, "grad_norm": 5.339822769165039, "learning_rate": 9.468020007140322e-05, "loss": 2.795334243774414, "memory(GiB)": 72.85, "step": 17300, "token_acc": 0.43573667711598746, "train_speed(iter/s)": 0.671017 }, { "epoch": 0.7413992545306543, "grad_norm": 3.399266004562378, "learning_rate": 9.467717896634129e-05, "loss": 2.3461624145507813, "memory(GiB)": 72.85, "step": 17305, "token_acc": 0.496551724137931, "train_speed(iter/s)": 0.671035 }, { "epoch": 0.7416134698599032, "grad_norm": 3.774620294570923, "learning_rate": 9.467415705191034e-05, "loss": 2.551399040222168, "memory(GiB)": 72.85, "step": 17310, "token_acc": 0.5017301038062284, "train_speed(iter/s)": 0.671044 }, { "epoch": 0.7418276851891521, "grad_norm": 2.792794942855835, "learning_rate": 9.467113432816514e-05, "loss": 2.2069326400756837, "memory(GiB)": 72.85, "step": 17315, "token_acc": 0.5115511551155115, "train_speed(iter/s)": 0.671066 }, { "epoch": 0.7420419005184011, "grad_norm": 7.360764503479004, "learning_rate": 9.466811079516044e-05, "loss": 2.2909975051879883, "memory(GiB)": 72.85, "step": 17320, "token_acc": 0.4826254826254826, "train_speed(iter/s)": 0.67108 }, { "epoch": 0.74225611584765, "grad_norm": 3.892874002456665, "learning_rate": 9.4665086452951e-05, "loss": 2.3091270446777346, "memory(GiB)": 72.85, "step": 17325, "token_acc": 0.5265017667844523, "train_speed(iter/s)": 0.671066 }, { "epoch": 0.742470331176899, "grad_norm": 3.1411139965057373, "learning_rate": 9.466206130159163e-05, "loss": 2.4062431335449217, "memory(GiB)": 72.85, "step": 17330, "token_acc": 0.5032679738562091, "train_speed(iter/s)": 0.671081 }, { "epoch": 0.742684546506148, "grad_norm": 4.365588665008545, "learning_rate": 9.465903534113711e-05, "loss": 2.527501106262207, "memory(GiB)": 72.85, "step": 17335, "token_acc": 0.4897959183673469, "train_speed(iter/s)": 0.671113 }, { "epoch": 0.7428987618353969, "grad_norm": 2.645362377166748, "learning_rate": 9.465600857164228e-05, "loss": 2.2463314056396486, "memory(GiB)": 72.85, "step": 17340, "token_acc": 0.5033112582781457, "train_speed(iter/s)": 0.671137 }, { "epoch": 0.7431129771646459, "grad_norm": 3.344285249710083, "learning_rate": 9.465298099316195e-05, "loss": 2.3959264755249023, "memory(GiB)": 72.85, "step": 17345, "token_acc": 0.45185185185185184, "train_speed(iter/s)": 0.671161 }, { "epoch": 0.7433271924938949, "grad_norm": 4.064239501953125, "learning_rate": 9.4649952605751e-05, "loss": 2.2651607513427736, "memory(GiB)": 72.85, "step": 17350, "token_acc": 0.5291970802919708, "train_speed(iter/s)": 0.67114 }, { "epoch": 0.7435414078231438, "grad_norm": 3.519813299179077, "learning_rate": 9.464692340946426e-05, "loss": 2.158920669555664, "memory(GiB)": 72.85, "step": 17355, "token_acc": 0.5070422535211268, "train_speed(iter/s)": 0.671167 }, { "epoch": 0.7437556231523927, "grad_norm": 3.5536184310913086, "learning_rate": 9.464389340435665e-05, "loss": 2.136942481994629, "memory(GiB)": 72.85, "step": 17360, "token_acc": 0.5428571428571428, "train_speed(iter/s)": 0.671151 }, { "epoch": 0.7439698384816418, "grad_norm": 3.2761714458465576, "learning_rate": 9.4640862590483e-05, "loss": 2.50836181640625, "memory(GiB)": 72.85, "step": 17365, "token_acc": 0.5191740412979351, "train_speed(iter/s)": 0.671168 }, { "epoch": 0.7441840538108907, "grad_norm": 4.3689069747924805, "learning_rate": 9.463783096789826e-05, "loss": 2.4683223724365235, "memory(GiB)": 72.85, "step": 17370, "token_acc": 0.5085910652920962, "train_speed(iter/s)": 0.671212 }, { "epoch": 0.7443982691401396, "grad_norm": 2.9944539070129395, "learning_rate": 9.463479853665733e-05, "loss": 2.3064926147460936, "memory(GiB)": 72.85, "step": 17375, "token_acc": 0.5063291139240507, "train_speed(iter/s)": 0.671134 }, { "epoch": 0.7446124844693887, "grad_norm": 4.2681355476379395, "learning_rate": 9.463176529681516e-05, "loss": 2.5109317779541014, "memory(GiB)": 72.85, "step": 17380, "token_acc": 0.4981684981684982, "train_speed(iter/s)": 0.671129 }, { "epoch": 0.7448266997986376, "grad_norm": 3.9456968307495117, "learning_rate": 9.462873124842668e-05, "loss": 2.364086723327637, "memory(GiB)": 72.85, "step": 17385, "token_acc": 0.4939759036144578, "train_speed(iter/s)": 0.671132 }, { "epoch": 0.7450409151278865, "grad_norm": 3.16727876663208, "learning_rate": 9.462569639154688e-05, "loss": 2.5372528076171874, "memory(GiB)": 72.85, "step": 17390, "token_acc": 0.4696969696969697, "train_speed(iter/s)": 0.671125 }, { "epoch": 0.7452551304571355, "grad_norm": 4.254570484161377, "learning_rate": 9.462266072623074e-05, "loss": 2.246072769165039, "memory(GiB)": 72.85, "step": 17395, "token_acc": 0.5036764705882353, "train_speed(iter/s)": 0.671133 }, { "epoch": 0.7454693457863845, "grad_norm": 3.0964787006378174, "learning_rate": 9.461962425253323e-05, "loss": 2.239493179321289, "memory(GiB)": 72.85, "step": 17400, "token_acc": 0.5361216730038023, "train_speed(iter/s)": 0.671164 }, { "epoch": 0.7456835611156334, "grad_norm": 4.185581207275391, "learning_rate": 9.461658697050936e-05, "loss": 2.4372093200683596, "memory(GiB)": 72.85, "step": 17405, "token_acc": 0.5167785234899329, "train_speed(iter/s)": 0.671137 }, { "epoch": 0.7458977764448824, "grad_norm": 3.0727336406707764, "learning_rate": 9.461354888021418e-05, "loss": 2.4941940307617188, "memory(GiB)": 72.85, "step": 17410, "token_acc": 0.47959183673469385, "train_speed(iter/s)": 0.671147 }, { "epoch": 0.7461119917741313, "grad_norm": 4.0858988761901855, "learning_rate": 9.46105099817027e-05, "loss": 2.4077871322631834, "memory(GiB)": 72.85, "step": 17415, "token_acc": 0.46579804560260585, "train_speed(iter/s)": 0.671152 }, { "epoch": 0.7463262071033803, "grad_norm": 4.1450514793396, "learning_rate": 9.460747027502999e-05, "loss": 2.6011384963989257, "memory(GiB)": 72.85, "step": 17420, "token_acc": 0.4767025089605735, "train_speed(iter/s)": 0.671155 }, { "epoch": 0.7465404224326293, "grad_norm": 4.940799236297607, "learning_rate": 9.460442976025111e-05, "loss": 2.2652519226074217, "memory(GiB)": 72.85, "step": 17425, "token_acc": 0.543859649122807, "train_speed(iter/s)": 0.671114 }, { "epoch": 0.7467546377618782, "grad_norm": 3.6217103004455566, "learning_rate": 9.460138843742114e-05, "loss": 2.743694877624512, "memory(GiB)": 72.85, "step": 17430, "token_acc": 0.4432624113475177, "train_speed(iter/s)": 0.671145 }, { "epoch": 0.7469688530911271, "grad_norm": 3.8280534744262695, "learning_rate": 9.459834630659518e-05, "loss": 2.2932226181030275, "memory(GiB)": 72.85, "step": 17435, "token_acc": 0.47904191616766467, "train_speed(iter/s)": 0.671134 }, { "epoch": 0.7471830684203762, "grad_norm": 4.103742599487305, "learning_rate": 9.459530336782833e-05, "loss": 2.541160774230957, "memory(GiB)": 72.85, "step": 17440, "token_acc": 0.49032258064516127, "train_speed(iter/s)": 0.671155 }, { "epoch": 0.7473972837496251, "grad_norm": 4.379855155944824, "learning_rate": 9.459225962117575e-05, "loss": 2.413958740234375, "memory(GiB)": 72.85, "step": 17445, "token_acc": 0.543046357615894, "train_speed(iter/s)": 0.671204 }, { "epoch": 0.747611499078874, "grad_norm": 4.413636207580566, "learning_rate": 9.458921506669253e-05, "loss": 2.2103246688842773, "memory(GiB)": 72.85, "step": 17450, "token_acc": 0.49825783972125437, "train_speed(iter/s)": 0.671185 }, { "epoch": 0.7478257144081231, "grad_norm": 2.7690911293029785, "learning_rate": 9.458616970443387e-05, "loss": 2.4772388458251955, "memory(GiB)": 72.85, "step": 17455, "token_acc": 0.47157190635451507, "train_speed(iter/s)": 0.671202 }, { "epoch": 0.748039929737372, "grad_norm": 2.8215370178222656, "learning_rate": 9.458312353445491e-05, "loss": 2.6445486068725588, "memory(GiB)": 72.85, "step": 17460, "token_acc": 0.4632768361581921, "train_speed(iter/s)": 0.671221 }, { "epoch": 0.7482541450666209, "grad_norm": 3.280226707458496, "learning_rate": 9.458007655681085e-05, "loss": 2.2354852676391603, "memory(GiB)": 72.85, "step": 17465, "token_acc": 0.5250836120401338, "train_speed(iter/s)": 0.671251 }, { "epoch": 0.74846836039587, "grad_norm": 2.738260507583618, "learning_rate": 9.457702877155689e-05, "loss": 2.4359485626220705, "memory(GiB)": 72.85, "step": 17470, "token_acc": 0.45871559633027525, "train_speed(iter/s)": 0.671177 }, { "epoch": 0.7486825757251189, "grad_norm": 3.2053699493408203, "learning_rate": 9.457398017874823e-05, "loss": 2.2118366241455076, "memory(GiB)": 72.85, "step": 17475, "token_acc": 0.5413533834586466, "train_speed(iter/s)": 0.671217 }, { "epoch": 0.7488967910543679, "grad_norm": 3.181473970413208, "learning_rate": 9.45709307784401e-05, "loss": 2.498218536376953, "memory(GiB)": 72.85, "step": 17480, "token_acc": 0.495114006514658, "train_speed(iter/s)": 0.671182 }, { "epoch": 0.7491110063836168, "grad_norm": 3.4935059547424316, "learning_rate": 9.456788057068776e-05, "loss": 2.112632942199707, "memory(GiB)": 72.85, "step": 17485, "token_acc": 0.5291970802919708, "train_speed(iter/s)": 0.6712 }, { "epoch": 0.7493252217128658, "grad_norm": 4.26706075668335, "learning_rate": 9.456482955554645e-05, "loss": 2.217370796203613, "memory(GiB)": 72.85, "step": 17490, "token_acc": 0.5106382978723404, "train_speed(iter/s)": 0.671175 }, { "epoch": 0.7495394370421148, "grad_norm": 3.809537172317505, "learning_rate": 9.456177773307143e-05, "loss": 2.3148578643798827, "memory(GiB)": 72.85, "step": 17495, "token_acc": 0.5095057034220533, "train_speed(iter/s)": 0.671195 }, { "epoch": 0.7497536523713637, "grad_norm": 4.639438152313232, "learning_rate": 9.455872510331802e-05, "loss": 2.701299285888672, "memory(GiB)": 72.85, "step": 17500, "token_acc": 0.48333333333333334, "train_speed(iter/s)": 0.671187 }, { "epoch": 0.7497536523713637, "eval_loss": 1.9822397232055664, "eval_runtime": 17.2609, "eval_samples_per_second": 5.793, "eval_steps_per_second": 5.793, "eval_token_acc": 0.51621271076524, "step": 17500 }, { "epoch": 0.7499678677006126, "grad_norm": 3.8665335178375244, "learning_rate": 9.45556716663415e-05, "loss": 2.3165279388427735, "memory(GiB)": 72.85, "step": 17505, "token_acc": 0.5106589147286822, "train_speed(iter/s)": 0.670667 }, { "epoch": 0.7501820830298617, "grad_norm": 3.7694826126098633, "learning_rate": 9.45526174221972e-05, "loss": 2.421217346191406, "memory(GiB)": 72.85, "step": 17510, "token_acc": 0.5085910652920962, "train_speed(iter/s)": 0.670692 }, { "epoch": 0.7503962983591106, "grad_norm": 3.1585922241210938, "learning_rate": 9.454956237094043e-05, "loss": 2.0487668991088865, "memory(GiB)": 72.85, "step": 17515, "token_acc": 0.5403508771929825, "train_speed(iter/s)": 0.670705 }, { "epoch": 0.7506105136883595, "grad_norm": 3.587907552719116, "learning_rate": 9.454650651262656e-05, "loss": 2.439474868774414, "memory(GiB)": 72.85, "step": 17520, "token_acc": 0.46619217081850534, "train_speed(iter/s)": 0.67072 }, { "epoch": 0.7508247290176085, "grad_norm": 3.779026508331299, "learning_rate": 9.454344984731094e-05, "loss": 2.335188102722168, "memory(GiB)": 72.85, "step": 17525, "token_acc": 0.5053003533568905, "train_speed(iter/s)": 0.670662 }, { "epoch": 0.7510389443468575, "grad_norm": 4.148402690887451, "learning_rate": 9.454039237504893e-05, "loss": 2.740304374694824, "memory(GiB)": 72.85, "step": 17530, "token_acc": 0.41975308641975306, "train_speed(iter/s)": 0.670665 }, { "epoch": 0.7512531596761064, "grad_norm": 3.3730266094207764, "learning_rate": 9.453733409589593e-05, "loss": 2.316074752807617, "memory(GiB)": 72.85, "step": 17535, "token_acc": 0.4726027397260274, "train_speed(iter/s)": 0.670647 }, { "epoch": 0.7514673750053554, "grad_norm": 4.073160171508789, "learning_rate": 9.453427500990734e-05, "loss": 2.4167613983154297, "memory(GiB)": 72.85, "step": 17540, "token_acc": 0.4391691394658754, "train_speed(iter/s)": 0.67068 }, { "epoch": 0.7516815903346044, "grad_norm": 3.1323962211608887, "learning_rate": 9.453121511713859e-05, "loss": 2.3883501052856446, "memory(GiB)": 72.85, "step": 17545, "token_acc": 0.4959677419354839, "train_speed(iter/s)": 0.67069 }, { "epoch": 0.7518958056638533, "grad_norm": 3.983065605163574, "learning_rate": 9.45281544176451e-05, "loss": 2.4045888900756838, "memory(GiB)": 72.85, "step": 17550, "token_acc": 0.4813664596273292, "train_speed(iter/s)": 0.670686 }, { "epoch": 0.7521100209931023, "grad_norm": 4.458312034606934, "learning_rate": 9.452509291148232e-05, "loss": 2.234214973449707, "memory(GiB)": 72.85, "step": 17555, "token_acc": 0.5214723926380368, "train_speed(iter/s)": 0.670697 }, { "epoch": 0.7523242363223512, "grad_norm": 7.532939910888672, "learning_rate": 9.452203059870573e-05, "loss": 2.5349571228027346, "memory(GiB)": 72.85, "step": 17560, "token_acc": 0.4847328244274809, "train_speed(iter/s)": 0.670708 }, { "epoch": 0.7525384516516002, "grad_norm": 4.221951007843018, "learning_rate": 9.451896747937077e-05, "loss": 2.3841842651367187, "memory(GiB)": 72.85, "step": 17565, "token_acc": 0.4791666666666667, "train_speed(iter/s)": 0.670673 }, { "epoch": 0.7527526669808492, "grad_norm": 3.273437738418579, "learning_rate": 9.451590355353296e-05, "loss": 2.655735397338867, "memory(GiB)": 72.85, "step": 17570, "token_acc": 0.4608695652173913, "train_speed(iter/s)": 0.670686 }, { "epoch": 0.7529668823100981, "grad_norm": 2.773707151412964, "learning_rate": 9.451283882124779e-05, "loss": 2.388767623901367, "memory(GiB)": 72.85, "step": 17575, "token_acc": 0.46710526315789475, "train_speed(iter/s)": 0.670712 }, { "epoch": 0.753181097639347, "grad_norm": 4.146472454071045, "learning_rate": 9.45097732825708e-05, "loss": 2.225164794921875, "memory(GiB)": 72.85, "step": 17580, "token_acc": 0.5250836120401338, "train_speed(iter/s)": 0.670721 }, { "epoch": 0.7533953129685961, "grad_norm": 2.709883213043213, "learning_rate": 9.45067069375575e-05, "loss": 2.6661861419677733, "memory(GiB)": 72.85, "step": 17585, "token_acc": 0.42780748663101603, "train_speed(iter/s)": 0.670726 }, { "epoch": 0.753609528297845, "grad_norm": 3.6495819091796875, "learning_rate": 9.450363978626348e-05, "loss": 2.4381549835205076, "memory(GiB)": 72.85, "step": 17590, "token_acc": 0.4873417721518987, "train_speed(iter/s)": 0.67076 }, { "epoch": 0.7538237436270939, "grad_norm": 3.1763391494750977, "learning_rate": 9.450057182874426e-05, "loss": 2.6818515777587892, "memory(GiB)": 72.85, "step": 17595, "token_acc": 0.45794392523364486, "train_speed(iter/s)": 0.670839 }, { "epoch": 0.754037958956343, "grad_norm": 4.770707607269287, "learning_rate": 9.449750306505542e-05, "loss": 2.4869970321655273, "memory(GiB)": 72.85, "step": 17600, "token_acc": 0.5020408163265306, "train_speed(iter/s)": 0.670861 }, { "epoch": 0.7542521742855919, "grad_norm": 3.0526888370513916, "learning_rate": 9.44944334952526e-05, "loss": 2.3725162506103517, "memory(GiB)": 72.85, "step": 17605, "token_acc": 0.5176848874598071, "train_speed(iter/s)": 0.670883 }, { "epoch": 0.7544663896148408, "grad_norm": 3.2148025035858154, "learning_rate": 9.449136311939137e-05, "loss": 2.234161949157715, "memory(GiB)": 72.85, "step": 17610, "token_acc": 0.513677811550152, "train_speed(iter/s)": 0.67093 }, { "epoch": 0.7546806049440898, "grad_norm": 3.9434046745300293, "learning_rate": 9.448829193752736e-05, "loss": 2.5427921295166014, "memory(GiB)": 72.85, "step": 17615, "token_acc": 0.444141689373297, "train_speed(iter/s)": 0.670944 }, { "epoch": 0.7548948202733388, "grad_norm": 3.287720203399658, "learning_rate": 9.448521994971618e-05, "loss": 2.7212013244628905, "memory(GiB)": 72.85, "step": 17620, "token_acc": 0.4459016393442623, "train_speed(iter/s)": 0.670927 }, { "epoch": 0.7551090356025877, "grad_norm": 3.85868763923645, "learning_rate": 9.448214715601354e-05, "loss": 2.50970458984375, "memory(GiB)": 72.85, "step": 17625, "token_acc": 0.437984496124031, "train_speed(iter/s)": 0.670931 }, { "epoch": 0.7553232509318367, "grad_norm": 3.554657220840454, "learning_rate": 9.447907355647507e-05, "loss": 2.735573577880859, "memory(GiB)": 72.85, "step": 17630, "token_acc": 0.4479495268138801, "train_speed(iter/s)": 0.670946 }, { "epoch": 0.7555374662610856, "grad_norm": 5.274256706237793, "learning_rate": 9.447599915115646e-05, "loss": 2.978921127319336, "memory(GiB)": 72.85, "step": 17635, "token_acc": 0.4054878048780488, "train_speed(iter/s)": 0.670951 }, { "epoch": 0.7557516815903346, "grad_norm": 3.168999671936035, "learning_rate": 9.44729239401134e-05, "loss": 1.946356964111328, "memory(GiB)": 72.85, "step": 17640, "token_acc": 0.5595667870036101, "train_speed(iter/s)": 0.670941 }, { "epoch": 0.7559658969195836, "grad_norm": 5.558811187744141, "learning_rate": 9.446984792340161e-05, "loss": 2.206915092468262, "memory(GiB)": 72.85, "step": 17645, "token_acc": 0.5136186770428015, "train_speed(iter/s)": 0.670981 }, { "epoch": 0.7561801122488325, "grad_norm": 3.8483564853668213, "learning_rate": 9.44667711010768e-05, "loss": 2.3017295837402343, "memory(GiB)": 72.85, "step": 17650, "token_acc": 0.4745098039215686, "train_speed(iter/s)": 0.671017 }, { "epoch": 0.7563943275780814, "grad_norm": 3.915196418762207, "learning_rate": 9.446369347319473e-05, "loss": 2.281760406494141, "memory(GiB)": 72.85, "step": 17655, "token_acc": 0.5311203319502075, "train_speed(iter/s)": 0.671053 }, { "epoch": 0.7566085429073305, "grad_norm": 4.4461588859558105, "learning_rate": 9.446061503981111e-05, "loss": 2.5201915740966796, "memory(GiB)": 72.85, "step": 17660, "token_acc": 0.48717948717948717, "train_speed(iter/s)": 0.671024 }, { "epoch": 0.7568227582365794, "grad_norm": 3.505291700363159, "learning_rate": 9.445753580098178e-05, "loss": 2.881251907348633, "memory(GiB)": 72.85, "step": 17665, "token_acc": 0.4475920679886686, "train_speed(iter/s)": 0.67104 }, { "epoch": 0.7570369735658283, "grad_norm": 2.863994598388672, "learning_rate": 9.445445575676248e-05, "loss": 2.7028255462646484, "memory(GiB)": 72.85, "step": 17670, "token_acc": 0.4273743016759777, "train_speed(iter/s)": 0.671019 }, { "epoch": 0.7572511888950774, "grad_norm": 3.3038573265075684, "learning_rate": 9.445137490720899e-05, "loss": 2.3582897186279297, "memory(GiB)": 72.85, "step": 17675, "token_acc": 0.5061728395061729, "train_speed(iter/s)": 0.671027 }, { "epoch": 0.7574654042243263, "grad_norm": 3.3600940704345703, "learning_rate": 9.444829325237716e-05, "loss": 2.4532636642456054, "memory(GiB)": 72.85, "step": 17680, "token_acc": 0.5197368421052632, "train_speed(iter/s)": 0.671012 }, { "epoch": 0.7576796195535752, "grad_norm": 3.5011203289031982, "learning_rate": 9.44452107923228e-05, "loss": 2.347461700439453, "memory(GiB)": 72.85, "step": 17685, "token_acc": 0.4962686567164179, "train_speed(iter/s)": 0.670979 }, { "epoch": 0.7578938348828242, "grad_norm": 3.2771670818328857, "learning_rate": 9.444212752710175e-05, "loss": 2.484307861328125, "memory(GiB)": 72.85, "step": 17690, "token_acc": 0.5109489051094891, "train_speed(iter/s)": 0.670941 }, { "epoch": 0.7581080502120732, "grad_norm": 4.157014846801758, "learning_rate": 9.443904345676989e-05, "loss": 2.291427993774414, "memory(GiB)": 72.85, "step": 17695, "token_acc": 0.49375, "train_speed(iter/s)": 0.670972 }, { "epoch": 0.7583222655413221, "grad_norm": 3.089095115661621, "learning_rate": 9.443595858138304e-05, "loss": 2.7568302154541016, "memory(GiB)": 72.85, "step": 17700, "token_acc": 0.43521594684385384, "train_speed(iter/s)": 0.671001 }, { "epoch": 0.7585364808705711, "grad_norm": 2.717435836791992, "learning_rate": 9.443287290099712e-05, "loss": 2.691522216796875, "memory(GiB)": 72.85, "step": 17705, "token_acc": 0.445578231292517, "train_speed(iter/s)": 0.670968 }, { "epoch": 0.75875069619982, "grad_norm": 3.2655622959136963, "learning_rate": 9.442978641566805e-05, "loss": 2.2036834716796876, "memory(GiB)": 72.85, "step": 17710, "token_acc": 0.496551724137931, "train_speed(iter/s)": 0.670972 }, { "epoch": 0.758964911529069, "grad_norm": 3.6555068492889404, "learning_rate": 9.44266991254517e-05, "loss": 2.2151809692382813, "memory(GiB)": 72.85, "step": 17715, "token_acc": 0.5126050420168067, "train_speed(iter/s)": 0.670965 }, { "epoch": 0.759179126858318, "grad_norm": 3.549882650375366, "learning_rate": 9.442361103040402e-05, "loss": 2.427854537963867, "memory(GiB)": 72.85, "step": 17720, "token_acc": 0.5236486486486487, "train_speed(iter/s)": 0.670964 }, { "epoch": 0.7593933421875669, "grad_norm": 3.1245555877685547, "learning_rate": 9.442052213058097e-05, "loss": 2.321803665161133, "memory(GiB)": 72.85, "step": 17725, "token_acc": 0.5183673469387755, "train_speed(iter/s)": 0.670991 }, { "epoch": 0.7596075575168159, "grad_norm": 2.99067759513855, "learning_rate": 9.441743242603846e-05, "loss": 2.6437828063964846, "memory(GiB)": 72.85, "step": 17730, "token_acc": 0.43799472295514513, "train_speed(iter/s)": 0.670953 }, { "epoch": 0.7598217728460649, "grad_norm": 2.6419615745544434, "learning_rate": 9.441434191683253e-05, "loss": 2.4502330780029298, "memory(GiB)": 72.85, "step": 17735, "token_acc": 0.49085365853658536, "train_speed(iter/s)": 0.670943 }, { "epoch": 0.7600359881753138, "grad_norm": 4.132720947265625, "learning_rate": 9.441125060301912e-05, "loss": 2.375100517272949, "memory(GiB)": 72.85, "step": 17740, "token_acc": 0.4981949458483754, "train_speed(iter/s)": 0.670968 }, { "epoch": 0.7602502035045627, "grad_norm": 3.7154510021209717, "learning_rate": 9.440815848465423e-05, "loss": 2.4120361328125, "memory(GiB)": 72.85, "step": 17745, "token_acc": 0.4979757085020243, "train_speed(iter/s)": 0.670922 }, { "epoch": 0.7604644188338118, "grad_norm": 3.834376096725464, "learning_rate": 9.440506556179391e-05, "loss": 2.4874244689941407, "memory(GiB)": 72.85, "step": 17750, "token_acc": 0.4634146341463415, "train_speed(iter/s)": 0.670943 }, { "epoch": 0.7606786341630607, "grad_norm": 3.8806979656219482, "learning_rate": 9.440197183449417e-05, "loss": 2.6059391021728517, "memory(GiB)": 72.85, "step": 17755, "token_acc": 0.45454545454545453, "train_speed(iter/s)": 0.670892 }, { "epoch": 0.7608928494923096, "grad_norm": 3.8003387451171875, "learning_rate": 9.439887730281105e-05, "loss": 2.462681007385254, "memory(GiB)": 72.85, "step": 17760, "token_acc": 0.48514851485148514, "train_speed(iter/s)": 0.670917 }, { "epoch": 0.7611070648215587, "grad_norm": 4.074242115020752, "learning_rate": 9.439578196680062e-05, "loss": 2.296865463256836, "memory(GiB)": 72.85, "step": 17765, "token_acc": 0.5223367697594502, "train_speed(iter/s)": 0.670912 }, { "epoch": 0.7613212801508076, "grad_norm": 3.41316294670105, "learning_rate": 9.439268582651895e-05, "loss": 2.6916019439697267, "memory(GiB)": 72.85, "step": 17770, "token_acc": 0.4410958904109589, "train_speed(iter/s)": 0.670915 }, { "epoch": 0.7615354954800565, "grad_norm": 4.0488152503967285, "learning_rate": 9.438958888202215e-05, "loss": 2.34792423248291, "memory(GiB)": 72.85, "step": 17775, "token_acc": 0.4797507788161994, "train_speed(iter/s)": 0.670967 }, { "epoch": 0.7617497108093055, "grad_norm": 3.2065913677215576, "learning_rate": 9.438649113336629e-05, "loss": 2.399784469604492, "memory(GiB)": 72.85, "step": 17780, "token_acc": 0.48742138364779874, "train_speed(iter/s)": 0.670954 }, { "epoch": 0.7619639261385545, "grad_norm": 4.256595611572266, "learning_rate": 9.438339258060749e-05, "loss": 2.2611438751220705, "memory(GiB)": 72.85, "step": 17785, "token_acc": 0.5650557620817844, "train_speed(iter/s)": 0.670962 }, { "epoch": 0.7621781414678034, "grad_norm": 4.5098557472229, "learning_rate": 9.438029322380193e-05, "loss": 2.499238395690918, "memory(GiB)": 72.85, "step": 17790, "token_acc": 0.49206349206349204, "train_speed(iter/s)": 0.67099 }, { "epoch": 0.7623923567970524, "grad_norm": 3.14839243888855, "learning_rate": 9.43771930630057e-05, "loss": 2.142035484313965, "memory(GiB)": 72.85, "step": 17795, "token_acc": 0.4782608695652174, "train_speed(iter/s)": 0.670927 }, { "epoch": 0.7626065721263013, "grad_norm": 3.4617154598236084, "learning_rate": 9.4374092098275e-05, "loss": 2.4310047149658205, "memory(GiB)": 72.85, "step": 17800, "token_acc": 0.4836795252225519, "train_speed(iter/s)": 0.670871 }, { "epoch": 0.7628207874555503, "grad_norm": 3.5173251628875732, "learning_rate": 9.4370990329666e-05, "loss": 2.2119335174560546, "memory(GiB)": 72.85, "step": 17805, "token_acc": 0.513677811550152, "train_speed(iter/s)": 0.670857 }, { "epoch": 0.7630350027847993, "grad_norm": 3.9316353797912598, "learning_rate": 9.436788775723488e-05, "loss": 2.2381853103637694, "memory(GiB)": 72.85, "step": 17810, "token_acc": 0.4967948717948718, "train_speed(iter/s)": 0.670866 }, { "epoch": 0.7632492181140482, "grad_norm": 4.7599077224731445, "learning_rate": 9.436478438103786e-05, "loss": 2.822479820251465, "memory(GiB)": 72.85, "step": 17815, "token_acc": 0.43018867924528303, "train_speed(iter/s)": 0.670863 }, { "epoch": 0.7634634334432973, "grad_norm": 4.569836616516113, "learning_rate": 9.436168020113114e-05, "loss": 2.591939926147461, "memory(GiB)": 72.85, "step": 17820, "token_acc": 0.45794392523364486, "train_speed(iter/s)": 0.670886 }, { "epoch": 0.7636776487725462, "grad_norm": 5.38376522064209, "learning_rate": 9.435857521757098e-05, "loss": 2.4246585845947264, "memory(GiB)": 72.85, "step": 17825, "token_acc": 0.4740740740740741, "train_speed(iter/s)": 0.670898 }, { "epoch": 0.7638918641017951, "grad_norm": 3.813704252243042, "learning_rate": 9.435546943041362e-05, "loss": 2.5745140075683595, "memory(GiB)": 72.85, "step": 17830, "token_acc": 0.4934210526315789, "train_speed(iter/s)": 0.670928 }, { "epoch": 0.7641060794310441, "grad_norm": 3.9788310527801514, "learning_rate": 9.435236283971531e-05, "loss": 2.4355655670166017, "memory(GiB)": 72.85, "step": 17835, "token_acc": 0.45151515151515154, "train_speed(iter/s)": 0.670957 }, { "epoch": 0.7643202947602931, "grad_norm": 3.235260248184204, "learning_rate": 9.434925544553236e-05, "loss": 2.310611343383789, "memory(GiB)": 72.85, "step": 17840, "token_acc": 0.4784172661870504, "train_speed(iter/s)": 0.67096 }, { "epoch": 0.764534510089542, "grad_norm": 3.6668355464935303, "learning_rate": 9.434614724792103e-05, "loss": 2.4512901306152344, "memory(GiB)": 72.85, "step": 17845, "token_acc": 0.5017064846416383, "train_speed(iter/s)": 0.670996 }, { "epoch": 0.764748725418791, "grad_norm": 3.1829874515533447, "learning_rate": 9.434303824693765e-05, "loss": 2.2875720977783205, "memory(GiB)": 72.85, "step": 17850, "token_acc": 0.5150375939849624, "train_speed(iter/s)": 0.67098 }, { "epoch": 0.7649629407480399, "grad_norm": 3.8754608631134033, "learning_rate": 9.433992844263853e-05, "loss": 2.437900924682617, "memory(GiB)": 72.85, "step": 17855, "token_acc": 0.4734982332155477, "train_speed(iter/s)": 0.670989 }, { "epoch": 0.7651771560772889, "grad_norm": 3.5489673614501953, "learning_rate": 9.433681783508001e-05, "loss": 2.3253156661987306, "memory(GiB)": 72.85, "step": 17860, "token_acc": 0.5, "train_speed(iter/s)": 0.670974 }, { "epoch": 0.7653913714065379, "grad_norm": 3.1224772930145264, "learning_rate": 9.433370642431846e-05, "loss": 2.582100677490234, "memory(GiB)": 72.85, "step": 17865, "token_acc": 0.4652567975830816, "train_speed(iter/s)": 0.670964 }, { "epoch": 0.7656055867357868, "grad_norm": 5.022916316986084, "learning_rate": 9.433059421041022e-05, "loss": 2.3931354522705077, "memory(GiB)": 72.85, "step": 17870, "token_acc": 0.47232472324723246, "train_speed(iter/s)": 0.670965 }, { "epoch": 0.7658198020650357, "grad_norm": 4.616805076599121, "learning_rate": 9.432748119341168e-05, "loss": 2.4836355209350587, "memory(GiB)": 72.85, "step": 17875, "token_acc": 0.45955882352941174, "train_speed(iter/s)": 0.671002 }, { "epoch": 0.7660340173942848, "grad_norm": 3.306670904159546, "learning_rate": 9.432436737337922e-05, "loss": 2.664718437194824, "memory(GiB)": 72.85, "step": 17880, "token_acc": 0.4785276073619632, "train_speed(iter/s)": 0.671056 }, { "epoch": 0.7662482327235337, "grad_norm": 2.878769636154175, "learning_rate": 9.432125275036928e-05, "loss": 2.424881935119629, "memory(GiB)": 72.85, "step": 17885, "token_acc": 0.4745222929936306, "train_speed(iter/s)": 0.671047 }, { "epoch": 0.7664624480527826, "grad_norm": 3.6258738040924072, "learning_rate": 9.431813732443827e-05, "loss": 2.181350517272949, "memory(GiB)": 72.85, "step": 17890, "token_acc": 0.5097087378640777, "train_speed(iter/s)": 0.671023 }, { "epoch": 0.7666766633820317, "grad_norm": 4.36349630355835, "learning_rate": 9.431502109564263e-05, "loss": 2.3195871353149413, "memory(GiB)": 72.85, "step": 17895, "token_acc": 0.47280334728033474, "train_speed(iter/s)": 0.671019 }, { "epoch": 0.7668908787112806, "grad_norm": 3.562161684036255, "learning_rate": 9.43119040640388e-05, "loss": 2.445933151245117, "memory(GiB)": 72.85, "step": 17900, "token_acc": 0.46551724137931033, "train_speed(iter/s)": 0.671051 }, { "epoch": 0.7671050940405295, "grad_norm": 3.1010990142822266, "learning_rate": 9.430878622968328e-05, "loss": 2.5871536254882814, "memory(GiB)": 72.85, "step": 17905, "token_acc": 0.44508670520231214, "train_speed(iter/s)": 0.671068 }, { "epoch": 0.7673193093697785, "grad_norm": 2.972071886062622, "learning_rate": 9.430566759263251e-05, "loss": 2.4278932571411134, "memory(GiB)": 72.85, "step": 17910, "token_acc": 0.49056603773584906, "train_speed(iter/s)": 0.671026 }, { "epoch": 0.7675335246990275, "grad_norm": 3.506941795349121, "learning_rate": 9.430254815294303e-05, "loss": 2.5303354263305664, "memory(GiB)": 72.85, "step": 17915, "token_acc": 0.4790996784565916, "train_speed(iter/s)": 0.671017 }, { "epoch": 0.7677477400282764, "grad_norm": 3.2869744300842285, "learning_rate": 9.429942791067132e-05, "loss": 2.3854766845703126, "memory(GiB)": 72.85, "step": 17920, "token_acc": 0.5015576323987538, "train_speed(iter/s)": 0.671037 }, { "epoch": 0.7679619553575254, "grad_norm": 3.519836187362671, "learning_rate": 9.429630686587392e-05, "loss": 2.3367008209228515, "memory(GiB)": 72.85, "step": 17925, "token_acc": 0.4962686567164179, "train_speed(iter/s)": 0.671028 }, { "epoch": 0.7681761706867744, "grad_norm": 3.638266086578369, "learning_rate": 9.429318501860737e-05, "loss": 2.7874897003173826, "memory(GiB)": 72.85, "step": 17930, "token_acc": 0.4161290322580645, "train_speed(iter/s)": 0.671057 }, { "epoch": 0.7683903860160233, "grad_norm": 4.617353439331055, "learning_rate": 9.429006236892821e-05, "loss": 2.534805679321289, "memory(GiB)": 72.85, "step": 17935, "token_acc": 0.4758364312267658, "train_speed(iter/s)": 0.671002 }, { "epoch": 0.7686046013452723, "grad_norm": 3.6088290214538574, "learning_rate": 9.428693891689304e-05, "loss": 2.404662322998047, "memory(GiB)": 72.85, "step": 17940, "token_acc": 0.4984520123839009, "train_speed(iter/s)": 0.671063 }, { "epoch": 0.7688188166745212, "grad_norm": 3.7424349784851074, "learning_rate": 9.428381466255842e-05, "loss": 2.4018779754638673, "memory(GiB)": 72.85, "step": 17945, "token_acc": 0.5055762081784386, "train_speed(iter/s)": 0.671066 }, { "epoch": 0.7690330320037702, "grad_norm": 4.332935333251953, "learning_rate": 9.428068960598096e-05, "loss": 2.110648536682129, "memory(GiB)": 72.85, "step": 17950, "token_acc": 0.5058823529411764, "train_speed(iter/s)": 0.671071 }, { "epoch": 0.7692472473330192, "grad_norm": 3.5080673694610596, "learning_rate": 9.427756374721726e-05, "loss": 2.3941516876220703, "memory(GiB)": 72.85, "step": 17955, "token_acc": 0.4612794612794613, "train_speed(iter/s)": 0.671086 }, { "epoch": 0.7694614626622681, "grad_norm": 3.5819971561431885, "learning_rate": 9.427443708632396e-05, "loss": 2.254843902587891, "memory(GiB)": 72.85, "step": 17960, "token_acc": 0.5, "train_speed(iter/s)": 0.671106 }, { "epoch": 0.769675677991517, "grad_norm": 3.6830971240997314, "learning_rate": 9.427130962335771e-05, "loss": 2.568659782409668, "memory(GiB)": 72.85, "step": 17965, "token_acc": 0.4776536312849162, "train_speed(iter/s)": 0.671139 }, { "epoch": 0.7698898933207661, "grad_norm": 3.367541551589966, "learning_rate": 9.426818135837513e-05, "loss": 2.270307731628418, "memory(GiB)": 72.85, "step": 17970, "token_acc": 0.4889705882352941, "train_speed(iter/s)": 0.67116 }, { "epoch": 0.770104108650015, "grad_norm": 4.056066036224365, "learning_rate": 9.426505229143294e-05, "loss": 2.4567964553833006, "memory(GiB)": 72.85, "step": 17975, "token_acc": 0.4823943661971831, "train_speed(iter/s)": 0.671198 }, { "epoch": 0.7703183239792639, "grad_norm": 4.1082353591918945, "learning_rate": 9.42619224225878e-05, "loss": 2.2603918075561524, "memory(GiB)": 72.85, "step": 17980, "token_acc": 0.4911242603550296, "train_speed(iter/s)": 0.671216 }, { "epoch": 0.770532539308513, "grad_norm": 3.870359420776367, "learning_rate": 9.425879175189643e-05, "loss": 2.548391342163086, "memory(GiB)": 72.85, "step": 17985, "token_acc": 0.444794952681388, "train_speed(iter/s)": 0.67119 }, { "epoch": 0.7707467546377619, "grad_norm": 5.431798934936523, "learning_rate": 9.42556602794155e-05, "loss": 2.449981689453125, "memory(GiB)": 72.85, "step": 17990, "token_acc": 0.4807121661721068, "train_speed(iter/s)": 0.671168 }, { "epoch": 0.7709609699670108, "grad_norm": 3.41998553276062, "learning_rate": 9.42525280052018e-05, "loss": 2.7926082611083984, "memory(GiB)": 72.85, "step": 17995, "token_acc": 0.42258064516129035, "train_speed(iter/s)": 0.671185 }, { "epoch": 0.7711751852962598, "grad_norm": 3.6291298866271973, "learning_rate": 9.4249394929312e-05, "loss": 2.050331687927246, "memory(GiB)": 72.85, "step": 18000, "token_acc": 0.5670103092783505, "train_speed(iter/s)": 0.671192 }, { "epoch": 0.7711751852962598, "eval_loss": 2.2075719833374023, "eval_runtime": 16.4042, "eval_samples_per_second": 6.096, "eval_steps_per_second": 6.096, "eval_token_acc": 0.476775956284153, "step": 18000 }, { "epoch": 0.7713894006255088, "grad_norm": 3.4805970191955566, "learning_rate": 9.424626105180294e-05, "loss": 2.529981231689453, "memory(GiB)": 72.85, "step": 18005, "token_acc": 0.47878787878787876, "train_speed(iter/s)": 0.670678 }, { "epoch": 0.7716036159547577, "grad_norm": 3.304752826690674, "learning_rate": 9.424312637273134e-05, "loss": 2.796855354309082, "memory(GiB)": 72.85, "step": 18010, "token_acc": 0.42771084337349397, "train_speed(iter/s)": 0.670636 }, { "epoch": 0.7718178312840067, "grad_norm": 3.4710748195648193, "learning_rate": 9.4239990892154e-05, "loss": 2.388579177856445, "memory(GiB)": 72.85, "step": 18015, "token_acc": 0.44642857142857145, "train_speed(iter/s)": 0.670661 }, { "epoch": 0.7720320466132556, "grad_norm": 4.6247239112854, "learning_rate": 9.423685461012773e-05, "loss": 2.6498506546020506, "memory(GiB)": 72.85, "step": 18020, "token_acc": 0.47019867549668876, "train_speed(iter/s)": 0.67064 }, { "epoch": 0.7722462619425046, "grad_norm": 3.390305757522583, "learning_rate": 9.423371752670933e-05, "loss": 2.329923629760742, "memory(GiB)": 72.85, "step": 18025, "token_acc": 0.4897360703812317, "train_speed(iter/s)": 0.670637 }, { "epoch": 0.7724604772717536, "grad_norm": 4.111706733703613, "learning_rate": 9.423057964195565e-05, "loss": 2.4581043243408205, "memory(GiB)": 72.85, "step": 18030, "token_acc": 0.4775641025641026, "train_speed(iter/s)": 0.670642 }, { "epoch": 0.7726746926010025, "grad_norm": 3.0846917629241943, "learning_rate": 9.422744095592354e-05, "loss": 2.342173767089844, "memory(GiB)": 72.85, "step": 18035, "token_acc": 0.4781144781144781, "train_speed(iter/s)": 0.670666 }, { "epoch": 0.7728889079302514, "grad_norm": 5.903794288635254, "learning_rate": 9.422430146866982e-05, "loss": 2.4146503448486327, "memory(GiB)": 72.85, "step": 18040, "token_acc": 0.4664179104477612, "train_speed(iter/s)": 0.670714 }, { "epoch": 0.7731031232595005, "grad_norm": 2.668710470199585, "learning_rate": 9.422116118025143e-05, "loss": 2.2292716979980467, "memory(GiB)": 72.85, "step": 18045, "token_acc": 0.5374592833876222, "train_speed(iter/s)": 0.670684 }, { "epoch": 0.7733173385887494, "grad_norm": 3.3895297050476074, "learning_rate": 9.42180200907252e-05, "loss": 2.4136987686157227, "memory(GiB)": 72.85, "step": 18050, "token_acc": 0.46612466124661245, "train_speed(iter/s)": 0.670665 }, { "epoch": 0.7735315539179983, "grad_norm": 4.556038856506348, "learning_rate": 9.421487820014806e-05, "loss": 1.9448677062988282, "memory(GiB)": 72.85, "step": 18055, "token_acc": 0.5306122448979592, "train_speed(iter/s)": 0.670667 }, { "epoch": 0.7737457692472474, "grad_norm": 4.194604873657227, "learning_rate": 9.421173550857692e-05, "loss": 2.512848663330078, "memory(GiB)": 72.85, "step": 18060, "token_acc": 0.4623287671232877, "train_speed(iter/s)": 0.670606 }, { "epoch": 0.7739599845764963, "grad_norm": 3.2912230491638184, "learning_rate": 9.420859201606873e-05, "loss": 2.3476099014282226, "memory(GiB)": 72.85, "step": 18065, "token_acc": 0.5078864353312302, "train_speed(iter/s)": 0.670593 }, { "epoch": 0.7741741999057452, "grad_norm": 5.338796615600586, "learning_rate": 9.42054477226804e-05, "loss": 2.042774200439453, "memory(GiB)": 72.85, "step": 18070, "token_acc": 0.5310077519379846, "train_speed(iter/s)": 0.670593 }, { "epoch": 0.7743884152349942, "grad_norm": 3.578627824783325, "learning_rate": 9.420230262846895e-05, "loss": 2.3975139617919923, "memory(GiB)": 72.85, "step": 18075, "token_acc": 0.5109717868338558, "train_speed(iter/s)": 0.670646 }, { "epoch": 0.7746026305642432, "grad_norm": 3.924243927001953, "learning_rate": 9.41991567334913e-05, "loss": 2.3937110900878906, "memory(GiB)": 72.85, "step": 18080, "token_acc": 0.532871972318339, "train_speed(iter/s)": 0.670673 }, { "epoch": 0.7748168458934921, "grad_norm": 3.079657554626465, "learning_rate": 9.419601003780447e-05, "loss": 2.7183761596679688, "memory(GiB)": 72.85, "step": 18085, "token_acc": 0.4444444444444444, "train_speed(iter/s)": 0.670704 }, { "epoch": 0.7750310612227411, "grad_norm": 2.842067241668701, "learning_rate": 9.419286254146545e-05, "loss": 2.6654563903808595, "memory(GiB)": 72.85, "step": 18090, "token_acc": 0.4864864864864865, "train_speed(iter/s)": 0.670718 }, { "epoch": 0.77524527655199, "grad_norm": 4.026620388031006, "learning_rate": 9.418971424453128e-05, "loss": 2.688328170776367, "memory(GiB)": 72.85, "step": 18095, "token_acc": 0.4646840148698885, "train_speed(iter/s)": 0.670745 }, { "epoch": 0.775459491881239, "grad_norm": 4.544774532318115, "learning_rate": 9.418656514705898e-05, "loss": 2.113072967529297, "memory(GiB)": 72.85, "step": 18100, "token_acc": 0.5056179775280899, "train_speed(iter/s)": 0.670768 }, { "epoch": 0.775673707210488, "grad_norm": 4.03330659866333, "learning_rate": 9.418341524910561e-05, "loss": 2.456378364562988, "memory(GiB)": 72.85, "step": 18105, "token_acc": 0.4559386973180077, "train_speed(iter/s)": 0.670811 }, { "epoch": 0.7758879225397369, "grad_norm": 3.3623266220092773, "learning_rate": 9.418026455072822e-05, "loss": 2.445050048828125, "memory(GiB)": 72.85, "step": 18110, "token_acc": 0.47572815533980584, "train_speed(iter/s)": 0.67076 }, { "epoch": 0.7761021378689859, "grad_norm": 2.977806806564331, "learning_rate": 9.417711305198387e-05, "loss": 2.3469364166259767, "memory(GiB)": 72.85, "step": 18115, "token_acc": 0.4868421052631579, "train_speed(iter/s)": 0.670758 }, { "epoch": 0.7763163531982349, "grad_norm": 3.8142385482788086, "learning_rate": 9.417396075292971e-05, "loss": 2.4705699920654296, "memory(GiB)": 72.85, "step": 18120, "token_acc": 0.45222929936305734, "train_speed(iter/s)": 0.670765 }, { "epoch": 0.7765305685274838, "grad_norm": 3.3038203716278076, "learning_rate": 9.417080765362279e-05, "loss": 2.3627113342285155, "memory(GiB)": 72.85, "step": 18125, "token_acc": 0.5192878338278932, "train_speed(iter/s)": 0.670729 }, { "epoch": 0.7767447838567327, "grad_norm": 4.157281398773193, "learning_rate": 9.416765375412026e-05, "loss": 2.6098678588867186, "memory(GiB)": 72.85, "step": 18130, "token_acc": 0.45454545454545453, "train_speed(iter/s)": 0.670747 }, { "epoch": 0.7769589991859818, "grad_norm": 4.006180763244629, "learning_rate": 9.416449905447926e-05, "loss": 2.3254343032836915, "memory(GiB)": 72.85, "step": 18135, "token_acc": 0.4902597402597403, "train_speed(iter/s)": 0.670743 }, { "epoch": 0.7771732145152307, "grad_norm": 3.158566951751709, "learning_rate": 9.416134355475692e-05, "loss": 2.5586963653564454, "memory(GiB)": 72.85, "step": 18140, "token_acc": 0.47147147147147145, "train_speed(iter/s)": 0.67072 }, { "epoch": 0.7773874298444796, "grad_norm": 3.5183193683624268, "learning_rate": 9.415818725501042e-05, "loss": 2.4852537155151366, "memory(GiB)": 72.85, "step": 18145, "token_acc": 0.43884892086330934, "train_speed(iter/s)": 0.67067 }, { "epoch": 0.7776016451737287, "grad_norm": 3.4852144718170166, "learning_rate": 9.415503015529693e-05, "loss": 2.366090774536133, "memory(GiB)": 72.85, "step": 18150, "token_acc": 0.5114754098360655, "train_speed(iter/s)": 0.670696 }, { "epoch": 0.7778158605029776, "grad_norm": 2.9439074993133545, "learning_rate": 9.415187225567364e-05, "loss": 2.8003122329711916, "memory(GiB)": 72.85, "step": 18155, "token_acc": 0.4702194357366771, "train_speed(iter/s)": 0.670753 }, { "epoch": 0.7780300758322266, "grad_norm": 4.354331016540527, "learning_rate": 9.414934536007841e-05, "loss": 2.5501047134399415, "memory(GiB)": 72.85, "step": 18160, "token_acc": 0.46688741721854304, "train_speed(iter/s)": 0.67075 }, { "epoch": 0.7782442911614755, "grad_norm": 3.298708200454712, "learning_rate": 9.414618602076169e-05, "loss": 2.3409049987792967, "memory(GiB)": 72.85, "step": 18165, "token_acc": 0.48120300751879697, "train_speed(iter/s)": 0.670741 }, { "epoch": 0.7784585064907245, "grad_norm": 4.361166477203369, "learning_rate": 9.414302588169538e-05, "loss": 2.5524566650390623, "memory(GiB)": 72.85, "step": 18170, "token_acc": 0.445993031358885, "train_speed(iter/s)": 0.670751 }, { "epoch": 0.7786727218199735, "grad_norm": 3.961848497390747, "learning_rate": 9.413986494293675e-05, "loss": 2.587220001220703, "memory(GiB)": 72.85, "step": 18175, "token_acc": 0.46579804560260585, "train_speed(iter/s)": 0.67073 }, { "epoch": 0.7788869371492224, "grad_norm": 3.3345224857330322, "learning_rate": 9.413670320454307e-05, "loss": 2.1998443603515625, "memory(GiB)": 72.85, "step": 18180, "token_acc": 0.518796992481203, "train_speed(iter/s)": 0.670664 }, { "epoch": 0.7791011524784713, "grad_norm": 2.9284732341766357, "learning_rate": 9.413354066657159e-05, "loss": 2.5030006408691405, "memory(GiB)": 72.85, "step": 18185, "token_acc": 0.4577259475218659, "train_speed(iter/s)": 0.670614 }, { "epoch": 0.7793153678077204, "grad_norm": 4.002991676330566, "learning_rate": 9.413037732907964e-05, "loss": 2.3270172119140624, "memory(GiB)": 72.85, "step": 18190, "token_acc": 0.5164835164835165, "train_speed(iter/s)": 0.670671 }, { "epoch": 0.7795295831369693, "grad_norm": 3.6034128665924072, "learning_rate": 9.412721319212449e-05, "loss": 2.442499351501465, "memory(GiB)": 72.85, "step": 18195, "token_acc": 0.46689895470383275, "train_speed(iter/s)": 0.670707 }, { "epoch": 0.7797437984662182, "grad_norm": 4.734498500823975, "learning_rate": 9.412404825576349e-05, "loss": 2.4574804306030273, "memory(GiB)": 72.85, "step": 18200, "token_acc": 0.49794238683127573, "train_speed(iter/s)": 0.670717 }, { "epoch": 0.7799580137954673, "grad_norm": 4.167382717132568, "learning_rate": 9.412088252005396e-05, "loss": 2.6075664520263673, "memory(GiB)": 72.85, "step": 18205, "token_acc": 0.4728434504792332, "train_speed(iter/s)": 0.670741 }, { "epoch": 0.7801722291247162, "grad_norm": 2.9596965312957764, "learning_rate": 9.411771598505326e-05, "loss": 2.4034679412841795, "memory(GiB)": 72.85, "step": 18210, "token_acc": 0.48059701492537316, "train_speed(iter/s)": 0.670731 }, { "epoch": 0.7803864444539651, "grad_norm": 3.5742053985595703, "learning_rate": 9.411454865081873e-05, "loss": 2.246723937988281, "memory(GiB)": 72.85, "step": 18215, "token_acc": 0.5238095238095238, "train_speed(iter/s)": 0.670776 }, { "epoch": 0.7806006597832141, "grad_norm": 4.075145244598389, "learning_rate": 9.411138051740779e-05, "loss": 2.678899955749512, "memory(GiB)": 72.85, "step": 18220, "token_acc": 0.4702194357366771, "train_speed(iter/s)": 0.670779 }, { "epoch": 0.7808148751124631, "grad_norm": 3.0733118057250977, "learning_rate": 9.410821158487779e-05, "loss": 2.3987071990966795, "memory(GiB)": 72.85, "step": 18225, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.670785 }, { "epoch": 0.781029090441712, "grad_norm": 4.31622838973999, "learning_rate": 9.410504185328618e-05, "loss": 2.2611553192138674, "memory(GiB)": 72.85, "step": 18230, "token_acc": 0.49609375, "train_speed(iter/s)": 0.670805 }, { "epoch": 0.781243305770961, "grad_norm": 3.781646490097046, "learning_rate": 9.410187132269036e-05, "loss": 2.2939430236816407, "memory(GiB)": 72.85, "step": 18235, "token_acc": 0.5049180327868853, "train_speed(iter/s)": 0.670789 }, { "epoch": 0.7814575211002099, "grad_norm": 5.285984039306641, "learning_rate": 9.409869999314776e-05, "loss": 2.6960033416748046, "memory(GiB)": 72.85, "step": 18240, "token_acc": 0.4296028880866426, "train_speed(iter/s)": 0.67084 }, { "epoch": 0.7816717364294589, "grad_norm": 3.3666329383850098, "learning_rate": 9.409552786471585e-05, "loss": 2.493722915649414, "memory(GiB)": 72.85, "step": 18245, "token_acc": 0.4723127035830619, "train_speed(iter/s)": 0.670868 }, { "epoch": 0.7818859517587079, "grad_norm": 3.919952869415283, "learning_rate": 9.40923549374521e-05, "loss": 2.345316696166992, "memory(GiB)": 72.85, "step": 18250, "token_acc": 0.46923076923076923, "train_speed(iter/s)": 0.670878 }, { "epoch": 0.7821001670879568, "grad_norm": 3.520833969116211, "learning_rate": 9.408918121141396e-05, "loss": 2.41247673034668, "memory(GiB)": 72.85, "step": 18255, "token_acc": 0.4954954954954955, "train_speed(iter/s)": 0.670938 }, { "epoch": 0.7823143824172057, "grad_norm": 3.608433961868286, "learning_rate": 9.408600668665895e-05, "loss": 2.1886001586914063, "memory(GiB)": 72.85, "step": 18260, "token_acc": 0.5381944444444444, "train_speed(iter/s)": 0.67095 }, { "epoch": 0.7825285977464548, "grad_norm": 3.5678138732910156, "learning_rate": 9.408283136324456e-05, "loss": 2.425633430480957, "memory(GiB)": 72.85, "step": 18265, "token_acc": 0.4612546125461255, "train_speed(iter/s)": 0.670989 }, { "epoch": 0.7827428130757037, "grad_norm": 3.838510036468506, "learning_rate": 9.407965524122835e-05, "loss": 2.6147296905517576, "memory(GiB)": 72.85, "step": 18270, "token_acc": 0.4662379421221865, "train_speed(iter/s)": 0.671037 }, { "epoch": 0.7829570284049526, "grad_norm": 4.550560474395752, "learning_rate": 9.407647832066782e-05, "loss": 2.7060623168945312, "memory(GiB)": 72.85, "step": 18275, "token_acc": 0.46938775510204084, "train_speed(iter/s)": 0.671035 }, { "epoch": 0.7831712437342017, "grad_norm": 3.7682671546936035, "learning_rate": 9.407330060162055e-05, "loss": 2.2404447555541993, "memory(GiB)": 72.85, "step": 18280, "token_acc": 0.5016077170418006, "train_speed(iter/s)": 0.671037 }, { "epoch": 0.7833854590634506, "grad_norm": 3.867647171020508, "learning_rate": 9.407012208414408e-05, "loss": 2.3133865356445313, "memory(GiB)": 72.85, "step": 18285, "token_acc": 0.4967532467532468, "train_speed(iter/s)": 0.671057 }, { "epoch": 0.7835996743926995, "grad_norm": 2.7535712718963623, "learning_rate": 9.406694276829602e-05, "loss": 2.265900993347168, "memory(GiB)": 72.85, "step": 18290, "token_acc": 0.5116279069767442, "train_speed(iter/s)": 0.671044 }, { "epoch": 0.7838138897219485, "grad_norm": 2.768428325653076, "learning_rate": 9.406376265413395e-05, "loss": 2.4075342178344727, "memory(GiB)": 72.85, "step": 18295, "token_acc": 0.4888268156424581, "train_speed(iter/s)": 0.671031 }, { "epoch": 0.7840281050511975, "grad_norm": 4.089361667633057, "learning_rate": 9.406058174171547e-05, "loss": 2.455349159240723, "memory(GiB)": 72.85, "step": 18300, "token_acc": 0.4931506849315068, "train_speed(iter/s)": 0.671032 }, { "epoch": 0.7842423203804464, "grad_norm": 4.314731597900391, "learning_rate": 9.405740003109824e-05, "loss": 2.696817970275879, "memory(GiB)": 72.85, "step": 18305, "token_acc": 0.4497991967871486, "train_speed(iter/s)": 0.67094 }, { "epoch": 0.7844565357096954, "grad_norm": 4.931824207305908, "learning_rate": 9.405421752233987e-05, "loss": 2.4153682708740236, "memory(GiB)": 72.85, "step": 18310, "token_acc": 0.5234375, "train_speed(iter/s)": 0.670955 }, { "epoch": 0.7846707510389443, "grad_norm": 4.495333671569824, "learning_rate": 9.405103421549801e-05, "loss": 2.4578907012939455, "memory(GiB)": 72.85, "step": 18315, "token_acc": 0.4576923076923077, "train_speed(iter/s)": 0.670923 }, { "epoch": 0.7848849663681933, "grad_norm": 3.5154995918273926, "learning_rate": 9.404785011063036e-05, "loss": 2.389862060546875, "memory(GiB)": 72.85, "step": 18320, "token_acc": 0.49127906976744184, "train_speed(iter/s)": 0.670899 }, { "epoch": 0.7850991816974423, "grad_norm": 3.8814194202423096, "learning_rate": 9.404466520779458e-05, "loss": 2.5453920364379883, "memory(GiB)": 72.85, "step": 18325, "token_acc": 0.47194719471947194, "train_speed(iter/s)": 0.670893 }, { "epoch": 0.7853133970266912, "grad_norm": 4.7421417236328125, "learning_rate": 9.404147950704838e-05, "loss": 2.230251121520996, "memory(GiB)": 72.85, "step": 18330, "token_acc": 0.5228215767634855, "train_speed(iter/s)": 0.670933 }, { "epoch": 0.7855276123559402, "grad_norm": 4.655930042266846, "learning_rate": 9.403829300844946e-05, "loss": 2.5517601013183593, "memory(GiB)": 72.85, "step": 18335, "token_acc": 0.4837758112094395, "train_speed(iter/s)": 0.670932 }, { "epoch": 0.7857418276851892, "grad_norm": 7.394059181213379, "learning_rate": 9.403510571205555e-05, "loss": 2.586776351928711, "memory(GiB)": 72.85, "step": 18340, "token_acc": 0.48109965635738833, "train_speed(iter/s)": 0.670984 }, { "epoch": 0.7859560430144381, "grad_norm": 3.5088422298431396, "learning_rate": 9.403191761792438e-05, "loss": 2.5822750091552735, "memory(GiB)": 72.85, "step": 18345, "token_acc": 0.4934640522875817, "train_speed(iter/s)": 0.670994 }, { "epoch": 0.786170258343687, "grad_norm": 3.7606658935546875, "learning_rate": 9.402872872611373e-05, "loss": 2.5610565185546874, "memory(GiB)": 72.85, "step": 18350, "token_acc": 0.47333333333333333, "train_speed(iter/s)": 0.671004 }, { "epoch": 0.7863844736729361, "grad_norm": 4.064438343048096, "learning_rate": 9.402553903668136e-05, "loss": 2.304425811767578, "memory(GiB)": 72.85, "step": 18355, "token_acc": 0.5015290519877675, "train_speed(iter/s)": 0.671014 }, { "epoch": 0.786598689002185, "grad_norm": 3.0354087352752686, "learning_rate": 9.402234854968505e-05, "loss": 2.9429731369018555, "memory(GiB)": 72.85, "step": 18360, "token_acc": 0.45051194539249145, "train_speed(iter/s)": 0.671039 }, { "epoch": 0.7868129043314339, "grad_norm": 4.131528377532959, "learning_rate": 9.40191572651826e-05, "loss": 2.8141876220703126, "memory(GiB)": 72.85, "step": 18365, "token_acc": 0.43356643356643354, "train_speed(iter/s)": 0.671047 }, { "epoch": 0.787027119660683, "grad_norm": 3.5949947834014893, "learning_rate": 9.401596518323181e-05, "loss": 2.295661735534668, "memory(GiB)": 72.85, "step": 18370, "token_acc": 0.5202702702702703, "train_speed(iter/s)": 0.67104 }, { "epoch": 0.7872413349899319, "grad_norm": 3.7996811866760254, "learning_rate": 9.401277230389054e-05, "loss": 2.396332931518555, "memory(GiB)": 72.85, "step": 18375, "token_acc": 0.47750865051903113, "train_speed(iter/s)": 0.671022 }, { "epoch": 0.7874555503191808, "grad_norm": 3.0075671672821045, "learning_rate": 9.40095786272166e-05, "loss": 2.466794967651367, "memory(GiB)": 72.85, "step": 18380, "token_acc": 0.49560117302052786, "train_speed(iter/s)": 0.67099 }, { "epoch": 0.7876697656484298, "grad_norm": 4.109794616699219, "learning_rate": 9.400638415326785e-05, "loss": 2.374326705932617, "memory(GiB)": 72.85, "step": 18385, "token_acc": 0.4827586206896552, "train_speed(iter/s)": 0.67098 }, { "epoch": 0.7878839809776788, "grad_norm": 4.130763530731201, "learning_rate": 9.400318888210218e-05, "loss": 2.3987497329711913, "memory(GiB)": 72.85, "step": 18390, "token_acc": 0.45318352059925093, "train_speed(iter/s)": 0.671001 }, { "epoch": 0.7880981963069277, "grad_norm": 3.3987929821014404, "learning_rate": 9.399999281377747e-05, "loss": 2.490793991088867, "memory(GiB)": 72.85, "step": 18395, "token_acc": 0.4758842443729904, "train_speed(iter/s)": 0.671043 }, { "epoch": 0.7883124116361767, "grad_norm": 4.784444332122803, "learning_rate": 9.399679594835161e-05, "loss": 2.3655170440673827, "memory(GiB)": 72.85, "step": 18400, "token_acc": 0.46863468634686345, "train_speed(iter/s)": 0.671066 }, { "epoch": 0.7885266269654256, "grad_norm": 4.007525444030762, "learning_rate": 9.399359828588253e-05, "loss": 2.4995574951171875, "memory(GiB)": 72.85, "step": 18405, "token_acc": 0.5300353356890459, "train_speed(iter/s)": 0.671084 }, { "epoch": 0.7887408422946746, "grad_norm": 5.073566436767578, "learning_rate": 9.399039982642812e-05, "loss": 2.449024772644043, "memory(GiB)": 72.85, "step": 18410, "token_acc": 0.4601449275362319, "train_speed(iter/s)": 0.671041 }, { "epoch": 0.7889550576239236, "grad_norm": 3.910858631134033, "learning_rate": 9.398720057004638e-05, "loss": 2.375087356567383, "memory(GiB)": 72.85, "step": 18415, "token_acc": 0.5095057034220533, "train_speed(iter/s)": 0.671041 }, { "epoch": 0.7891692729531725, "grad_norm": 3.188570022583008, "learning_rate": 9.398400051679521e-05, "loss": 2.6527532577514648, "memory(GiB)": 72.85, "step": 18420, "token_acc": 0.4396551724137931, "train_speed(iter/s)": 0.671022 }, { "epoch": 0.7893834882824214, "grad_norm": 2.945974349975586, "learning_rate": 9.398079966673263e-05, "loss": 2.4020339965820314, "memory(GiB)": 72.85, "step": 18425, "token_acc": 0.4778481012658228, "train_speed(iter/s)": 0.671012 }, { "epoch": 0.7895977036116705, "grad_norm": 3.964696168899536, "learning_rate": 9.397759801991661e-05, "loss": 2.4118165969848633, "memory(GiB)": 72.85, "step": 18430, "token_acc": 0.49310344827586206, "train_speed(iter/s)": 0.671024 }, { "epoch": 0.7898119189409194, "grad_norm": 3.576465368270874, "learning_rate": 9.397439557640513e-05, "loss": 2.367815399169922, "memory(GiB)": 72.85, "step": 18435, "token_acc": 0.4842105263157895, "train_speed(iter/s)": 0.671034 }, { "epoch": 0.7900261342701683, "grad_norm": 4.576586723327637, "learning_rate": 9.397119233625624e-05, "loss": 2.402269744873047, "memory(GiB)": 72.85, "step": 18440, "token_acc": 0.4816326530612245, "train_speed(iter/s)": 0.671056 }, { "epoch": 0.7902403495994174, "grad_norm": 4.398784160614014, "learning_rate": 9.396798829952794e-05, "loss": 2.4462120056152346, "memory(GiB)": 72.85, "step": 18445, "token_acc": 0.4939759036144578, "train_speed(iter/s)": 0.671072 }, { "epoch": 0.7904545649286663, "grad_norm": 3.1285815238952637, "learning_rate": 9.39647834662783e-05, "loss": 2.0665765762329102, "memory(GiB)": 72.85, "step": 18450, "token_acc": 0.5318352059925093, "train_speed(iter/s)": 0.671096 }, { "epoch": 0.7906687802579152, "grad_norm": 4.995850563049316, "learning_rate": 9.396157783656536e-05, "loss": 2.5335941314697266, "memory(GiB)": 72.85, "step": 18455, "token_acc": 0.46839080459770116, "train_speed(iter/s)": 0.671077 }, { "epoch": 0.7908829955871642, "grad_norm": 3.3789420127868652, "learning_rate": 9.39583714104472e-05, "loss": 2.591202163696289, "memory(GiB)": 72.85, "step": 18460, "token_acc": 0.4539249146757679, "train_speed(iter/s)": 0.671101 }, { "epoch": 0.7910972109164132, "grad_norm": 3.198176145553589, "learning_rate": 9.395516418798189e-05, "loss": 2.6706941604614256, "memory(GiB)": 72.85, "step": 18465, "token_acc": 0.47109826589595377, "train_speed(iter/s)": 0.671133 }, { "epoch": 0.7913114262456621, "grad_norm": 3.5155112743377686, "learning_rate": 9.395195616922756e-05, "loss": 2.4904321670532226, "memory(GiB)": 72.85, "step": 18470, "token_acc": 0.4491803278688525, "train_speed(iter/s)": 0.671127 }, { "epoch": 0.7915256415749111, "grad_norm": 3.132952928543091, "learning_rate": 9.394874735424231e-05, "loss": 2.50723876953125, "memory(GiB)": 72.85, "step": 18475, "token_acc": 0.4692737430167598, "train_speed(iter/s)": 0.67115 }, { "epoch": 0.79173985690416, "grad_norm": 2.7254979610443115, "learning_rate": 9.394553774308428e-05, "loss": 2.3734344482421874, "memory(GiB)": 72.85, "step": 18480, "token_acc": 0.45081967213114754, "train_speed(iter/s)": 0.671226 }, { "epoch": 0.791954072233409, "grad_norm": 3.447327136993408, "learning_rate": 9.39423273358116e-05, "loss": 2.3206729888916016, "memory(GiB)": 72.85, "step": 18485, "token_acc": 0.512987012987013, "train_speed(iter/s)": 0.671241 }, { "epoch": 0.792168287562658, "grad_norm": 4.412710189819336, "learning_rate": 9.393911613248245e-05, "loss": 2.5672096252441405, "memory(GiB)": 72.85, "step": 18490, "token_acc": 0.47297297297297297, "train_speed(iter/s)": 0.671241 }, { "epoch": 0.7923825028919069, "grad_norm": 3.538182258605957, "learning_rate": 9.393590413315498e-05, "loss": 2.4399057388305665, "memory(GiB)": 72.85, "step": 18495, "token_acc": 0.49612403100775193, "train_speed(iter/s)": 0.671198 }, { "epoch": 0.792596718221156, "grad_norm": 5.7265777587890625, "learning_rate": 9.393269133788742e-05, "loss": 2.2214687347412108, "memory(GiB)": 72.85, "step": 18500, "token_acc": 0.5363984674329502, "train_speed(iter/s)": 0.671243 }, { "epoch": 0.792596718221156, "eval_loss": 2.1601500511169434, "eval_runtime": 16.704, "eval_samples_per_second": 5.987, "eval_steps_per_second": 5.987, "eval_token_acc": 0.48860759493670886, "step": 18500 }, { "epoch": 0.7928109335504049, "grad_norm": 3.1356239318847656, "learning_rate": 9.392947774673791e-05, "loss": 2.4640106201171874, "memory(GiB)": 72.85, "step": 18505, "token_acc": 0.4821264894592117, "train_speed(iter/s)": 0.670795 }, { "epoch": 0.7930251488796538, "grad_norm": 5.5854926109313965, "learning_rate": 9.392626335976472e-05, "loss": 2.459165573120117, "memory(GiB)": 72.85, "step": 18510, "token_acc": 0.5164835164835165, "train_speed(iter/s)": 0.670787 }, { "epoch": 0.7932393642089028, "grad_norm": 3.651430368423462, "learning_rate": 9.392304817702606e-05, "loss": 2.263304901123047, "memory(GiB)": 72.85, "step": 18515, "token_acc": 0.5015479876160991, "train_speed(iter/s)": 0.6708 }, { "epoch": 0.7934535795381518, "grad_norm": 3.386854410171509, "learning_rate": 9.391983219858018e-05, "loss": 2.487293243408203, "memory(GiB)": 72.85, "step": 18520, "token_acc": 0.4707692307692308, "train_speed(iter/s)": 0.670786 }, { "epoch": 0.7936677948674007, "grad_norm": 2.910959005355835, "learning_rate": 9.391661542448533e-05, "loss": 2.347468376159668, "memory(GiB)": 72.85, "step": 18525, "token_acc": 0.49038461538461536, "train_speed(iter/s)": 0.67078 }, { "epoch": 0.7938820101966497, "grad_norm": 3.987398624420166, "learning_rate": 9.39133978547998e-05, "loss": 2.325409698486328, "memory(GiB)": 72.85, "step": 18530, "token_acc": 0.4982078853046595, "train_speed(iter/s)": 0.670802 }, { "epoch": 0.7940962255258986, "grad_norm": 3.0138769149780273, "learning_rate": 9.391017948958189e-05, "loss": 2.4375499725341796, "memory(GiB)": 72.85, "step": 18535, "token_acc": 0.47335423197492166, "train_speed(iter/s)": 0.67083 }, { "epoch": 0.7943104408551476, "grad_norm": 3.258871078491211, "learning_rate": 9.390696032888986e-05, "loss": 2.1874038696289064, "memory(GiB)": 72.85, "step": 18540, "token_acc": 0.5209790209790209, "train_speed(iter/s)": 0.670871 }, { "epoch": 0.7945246561843966, "grad_norm": 3.9597320556640625, "learning_rate": 9.390374037278207e-05, "loss": 2.3543018341064452, "memory(GiB)": 72.85, "step": 18545, "token_acc": 0.475177304964539, "train_speed(iter/s)": 0.670891 }, { "epoch": 0.7947388715136455, "grad_norm": 4.7623162269592285, "learning_rate": 9.390051962131685e-05, "loss": 2.1098758697509767, "memory(GiB)": 72.85, "step": 18550, "token_acc": 0.5324074074074074, "train_speed(iter/s)": 0.670883 }, { "epoch": 0.7949530868428945, "grad_norm": 6.1056365966796875, "learning_rate": 9.389729807455252e-05, "loss": 2.5951366424560547, "memory(GiB)": 72.85, "step": 18555, "token_acc": 0.43523316062176165, "train_speed(iter/s)": 0.670896 }, { "epoch": 0.7951673021721435, "grad_norm": 4.289394855499268, "learning_rate": 9.389407573254745e-05, "loss": 2.850641059875488, "memory(GiB)": 72.85, "step": 18560, "token_acc": 0.4402332361516035, "train_speed(iter/s)": 0.670942 }, { "epoch": 0.7953815175013924, "grad_norm": 3.4513368606567383, "learning_rate": 9.389085259536003e-05, "loss": 2.4918491363525392, "memory(GiB)": 72.85, "step": 18565, "token_acc": 0.4967532467532468, "train_speed(iter/s)": 0.670944 }, { "epoch": 0.7955957328306413, "grad_norm": 3.947906732559204, "learning_rate": 9.388762866304864e-05, "loss": 2.35519962310791, "memory(GiB)": 72.85, "step": 18570, "token_acc": 0.54375, "train_speed(iter/s)": 0.670968 }, { "epoch": 0.7958099481598904, "grad_norm": 2.706291437149048, "learning_rate": 9.388440393567169e-05, "loss": 2.458469009399414, "memory(GiB)": 72.85, "step": 18575, "token_acc": 0.4786885245901639, "train_speed(iter/s)": 0.670969 }, { "epoch": 0.7960241634891393, "grad_norm": 3.887188673019409, "learning_rate": 9.388117841328761e-05, "loss": 2.3318603515625, "memory(GiB)": 72.85, "step": 18580, "token_acc": 0.5050505050505051, "train_speed(iter/s)": 0.670987 }, { "epoch": 0.7962383788183882, "grad_norm": 3.877317190170288, "learning_rate": 9.387795209595479e-05, "loss": 2.5432723999023437, "memory(GiB)": 72.85, "step": 18585, "token_acc": 0.4559748427672956, "train_speed(iter/s)": 0.671023 }, { "epoch": 0.7964525941476372, "grad_norm": 2.6953415870666504, "learning_rate": 9.387472498373173e-05, "loss": 2.472052574157715, "memory(GiB)": 72.85, "step": 18590, "token_acc": 0.4642857142857143, "train_speed(iter/s)": 0.671032 }, { "epoch": 0.7966668094768862, "grad_norm": 3.6074140071868896, "learning_rate": 9.387149707667687e-05, "loss": 2.2872413635253905, "memory(GiB)": 72.85, "step": 18595, "token_acc": 0.4978902953586498, "train_speed(iter/s)": 0.671029 }, { "epoch": 0.7968810248061351, "grad_norm": 2.81174373626709, "learning_rate": 9.386826837484866e-05, "loss": 2.2907629013061523, "memory(GiB)": 72.85, "step": 18600, "token_acc": 0.5085910652920962, "train_speed(iter/s)": 0.671088 }, { "epoch": 0.7970952401353841, "grad_norm": 3.406879425048828, "learning_rate": 9.386503887830563e-05, "loss": 2.3824148178100586, "memory(GiB)": 72.85, "step": 18605, "token_acc": 0.49216300940438873, "train_speed(iter/s)": 0.671052 }, { "epoch": 0.797309455464633, "grad_norm": 8.174583435058594, "learning_rate": 9.386180858710628e-05, "loss": 2.368357849121094, "memory(GiB)": 72.85, "step": 18610, "token_acc": 0.4584837545126354, "train_speed(iter/s)": 0.671029 }, { "epoch": 0.797523670793882, "grad_norm": 3.914515972137451, "learning_rate": 9.385857750130912e-05, "loss": 2.264310836791992, "memory(GiB)": 72.85, "step": 18615, "token_acc": 0.5195729537366548, "train_speed(iter/s)": 0.671045 }, { "epoch": 0.797737886123131, "grad_norm": 4.678875923156738, "learning_rate": 9.385534562097268e-05, "loss": 2.5857122421264647, "memory(GiB)": 72.85, "step": 18620, "token_acc": 0.4620938628158845, "train_speed(iter/s)": 0.670998 }, { "epoch": 0.7979521014523799, "grad_norm": 2.9633305072784424, "learning_rate": 9.385211294615553e-05, "loss": 2.2857675552368164, "memory(GiB)": 72.85, "step": 18625, "token_acc": 0.49825783972125437, "train_speed(iter/s)": 0.670978 }, { "epoch": 0.7981663167816289, "grad_norm": 3.28332781791687, "learning_rate": 9.384887947691618e-05, "loss": 2.604303550720215, "memory(GiB)": 72.85, "step": 18630, "token_acc": 0.4828767123287671, "train_speed(iter/s)": 0.670956 }, { "epoch": 0.7983805321108779, "grad_norm": 4.045192718505859, "learning_rate": 9.384564521331327e-05, "loss": 2.755268096923828, "memory(GiB)": 72.85, "step": 18635, "token_acc": 0.46994535519125685, "train_speed(iter/s)": 0.670963 }, { "epoch": 0.7985947474401268, "grad_norm": 5.4520463943481445, "learning_rate": 9.384241015540538e-05, "loss": 2.690293312072754, "memory(GiB)": 72.85, "step": 18640, "token_acc": 0.4581673306772908, "train_speed(iter/s)": 0.670934 }, { "epoch": 0.7988089627693757, "grad_norm": 4.287193298339844, "learning_rate": 9.38391743032511e-05, "loss": 2.385487365722656, "memory(GiB)": 72.85, "step": 18645, "token_acc": 0.4641638225255973, "train_speed(iter/s)": 0.670888 }, { "epoch": 0.7990231780986248, "grad_norm": 4.215967655181885, "learning_rate": 9.383593765690902e-05, "loss": 2.6272937774658205, "memory(GiB)": 72.85, "step": 18650, "token_acc": 0.4208754208754209, "train_speed(iter/s)": 0.670829 }, { "epoch": 0.7992373934278737, "grad_norm": 4.021899700164795, "learning_rate": 9.383270021643783e-05, "loss": 2.279988098144531, "memory(GiB)": 72.85, "step": 18655, "token_acc": 0.5040322580645161, "train_speed(iter/s)": 0.670806 }, { "epoch": 0.7994516087571226, "grad_norm": 4.432606220245361, "learning_rate": 9.382946198189615e-05, "loss": 2.6661705017089843, "memory(GiB)": 72.85, "step": 18660, "token_acc": 0.431438127090301, "train_speed(iter/s)": 0.670794 }, { "epoch": 0.7996658240863717, "grad_norm": 7.874723434448242, "learning_rate": 9.382622295334267e-05, "loss": 2.6482410430908203, "memory(GiB)": 72.85, "step": 18665, "token_acc": 0.46551724137931033, "train_speed(iter/s)": 0.670796 }, { "epoch": 0.7998800394156206, "grad_norm": 3.3361032009124756, "learning_rate": 9.382298313083604e-05, "loss": 2.1945869445800783, "memory(GiB)": 72.85, "step": 18670, "token_acc": 0.5136186770428015, "train_speed(iter/s)": 0.670827 }, { "epoch": 0.8000942547448695, "grad_norm": 3.9041929244995117, "learning_rate": 9.381974251443495e-05, "loss": 2.0964996337890627, "memory(GiB)": 72.85, "step": 18675, "token_acc": 0.5168918918918919, "train_speed(iter/s)": 0.670827 }, { "epoch": 0.8003084700741185, "grad_norm": 3.606235980987549, "learning_rate": 9.381650110419813e-05, "loss": 2.4817489624023437, "memory(GiB)": 72.85, "step": 18680, "token_acc": 0.46048109965635736, "train_speed(iter/s)": 0.670838 }, { "epoch": 0.8005226854033675, "grad_norm": 3.8425276279449463, "learning_rate": 9.381325890018428e-05, "loss": 2.457722854614258, "memory(GiB)": 72.85, "step": 18685, "token_acc": 0.49480968858131485, "train_speed(iter/s)": 0.670868 }, { "epoch": 0.8007369007326164, "grad_norm": 3.9779043197631836, "learning_rate": 9.381001590245215e-05, "loss": 2.475655364990234, "memory(GiB)": 72.85, "step": 18690, "token_acc": 0.4939759036144578, "train_speed(iter/s)": 0.670887 }, { "epoch": 0.8009511160618654, "grad_norm": 4.133604049682617, "learning_rate": 9.380677211106049e-05, "loss": 2.2865570068359373, "memory(GiB)": 72.85, "step": 18695, "token_acc": 0.4924812030075188, "train_speed(iter/s)": 0.670919 }, { "epoch": 0.8011653313911143, "grad_norm": 4.308883190155029, "learning_rate": 9.380352752606804e-05, "loss": 2.473331069946289, "memory(GiB)": 72.85, "step": 18700, "token_acc": 0.475, "train_speed(iter/s)": 0.670964 }, { "epoch": 0.8013795467203633, "grad_norm": 3.6440281867980957, "learning_rate": 9.380028214753361e-05, "loss": 2.570382308959961, "memory(GiB)": 72.85, "step": 18705, "token_acc": 0.45907473309608543, "train_speed(iter/s)": 0.67095 }, { "epoch": 0.8015937620496123, "grad_norm": 4.051533222198486, "learning_rate": 9.379703597551599e-05, "loss": 2.706941604614258, "memory(GiB)": 72.85, "step": 18710, "token_acc": 0.42356687898089174, "train_speed(iter/s)": 0.670991 }, { "epoch": 0.8018079773788612, "grad_norm": 3.2932686805725098, "learning_rate": 9.379378901007397e-05, "loss": 2.5898553848266603, "memory(GiB)": 72.85, "step": 18715, "token_acc": 0.4713804713804714, "train_speed(iter/s)": 0.670994 }, { "epoch": 0.8020221927081101, "grad_norm": 3.9563169479370117, "learning_rate": 9.379054125126639e-05, "loss": 2.178948974609375, "memory(GiB)": 72.85, "step": 18720, "token_acc": 0.5533596837944664, "train_speed(iter/s)": 0.670977 }, { "epoch": 0.8022364080373592, "grad_norm": 8.109323501586914, "learning_rate": 9.378729269915205e-05, "loss": 2.4540422439575194, "memory(GiB)": 72.85, "step": 18725, "token_acc": 0.4523809523809524, "train_speed(iter/s)": 0.670969 }, { "epoch": 0.8024506233666081, "grad_norm": 3.5614917278289795, "learning_rate": 9.378404335378985e-05, "loss": 2.1422075271606444, "memory(GiB)": 72.85, "step": 18730, "token_acc": 0.4910394265232975, "train_speed(iter/s)": 0.671003 }, { "epoch": 0.802664838695857, "grad_norm": 4.793461799621582, "learning_rate": 9.378079321523862e-05, "loss": 2.2876203536987303, "memory(GiB)": 72.85, "step": 18735, "token_acc": 0.5050847457627119, "train_speed(iter/s)": 0.671033 }, { "epoch": 0.8028790540251061, "grad_norm": 5.28770637512207, "learning_rate": 9.377754228355726e-05, "loss": 2.723734664916992, "memory(GiB)": 72.85, "step": 18740, "token_acc": 0.45454545454545453, "train_speed(iter/s)": 0.671025 }, { "epoch": 0.803093269354355, "grad_norm": 3.754546880722046, "learning_rate": 9.377429055880465e-05, "loss": 2.504810333251953, "memory(GiB)": 72.85, "step": 18745, "token_acc": 0.49645390070921985, "train_speed(iter/s)": 0.671059 }, { "epoch": 0.8033074846836039, "grad_norm": 3.8979761600494385, "learning_rate": 9.37710380410397e-05, "loss": 2.4307632446289062, "memory(GiB)": 72.85, "step": 18750, "token_acc": 0.47794117647058826, "train_speed(iter/s)": 0.671074 }, { "epoch": 0.803521700012853, "grad_norm": 3.942913770675659, "learning_rate": 9.376778473032134e-05, "loss": 2.3890533447265625, "memory(GiB)": 72.85, "step": 18755, "token_acc": 0.47191011235955055, "train_speed(iter/s)": 0.671107 }, { "epoch": 0.8037359153421019, "grad_norm": 3.2925949096679688, "learning_rate": 9.376453062670851e-05, "loss": 2.105743980407715, "memory(GiB)": 72.85, "step": 18760, "token_acc": 0.5375939849624061, "train_speed(iter/s)": 0.67113 }, { "epoch": 0.8039501306713508, "grad_norm": 3.7290139198303223, "learning_rate": 9.376127573026014e-05, "loss": 2.5025566101074217, "memory(GiB)": 72.85, "step": 18765, "token_acc": 0.4524590163934426, "train_speed(iter/s)": 0.67115 }, { "epoch": 0.8041643460005998, "grad_norm": 3.388040542602539, "learning_rate": 9.375802004103522e-05, "loss": 2.7266292572021484, "memory(GiB)": 72.85, "step": 18770, "token_acc": 0.4533333333333333, "train_speed(iter/s)": 0.671142 }, { "epoch": 0.8043785613298488, "grad_norm": 3.605422019958496, "learning_rate": 9.375476355909271e-05, "loss": 2.4807010650634767, "memory(GiB)": 72.85, "step": 18775, "token_acc": 0.46503496503496505, "train_speed(iter/s)": 0.671165 }, { "epoch": 0.8045927766590977, "grad_norm": 3.0692825317382812, "learning_rate": 9.375150628449162e-05, "loss": 2.6068145751953127, "memory(GiB)": 72.85, "step": 18780, "token_acc": 0.46864686468646866, "train_speed(iter/s)": 0.671197 }, { "epoch": 0.8048069919883467, "grad_norm": 2.784193992614746, "learning_rate": 9.374824821729094e-05, "loss": 2.390949249267578, "memory(GiB)": 72.85, "step": 18785, "token_acc": 0.5092250922509225, "train_speed(iter/s)": 0.671177 }, { "epoch": 0.8050212073175956, "grad_norm": 3.1936848163604736, "learning_rate": 9.374498935754972e-05, "loss": 2.4753915786743166, "memory(GiB)": 72.85, "step": 18790, "token_acc": 0.5205992509363296, "train_speed(iter/s)": 0.671185 }, { "epoch": 0.8052354226468446, "grad_norm": 3.8100781440734863, "learning_rate": 9.374172970532697e-05, "loss": 2.401678466796875, "memory(GiB)": 72.85, "step": 18795, "token_acc": 0.48698884758364314, "train_speed(iter/s)": 0.671223 }, { "epoch": 0.8054496379760936, "grad_norm": 3.4259071350097656, "learning_rate": 9.373846926068176e-05, "loss": 2.6102256774902344, "memory(GiB)": 72.85, "step": 18800, "token_acc": 0.44654088050314467, "train_speed(iter/s)": 0.671227 }, { "epoch": 0.8056638533053425, "grad_norm": 3.2654953002929688, "learning_rate": 9.373520802367315e-05, "loss": 2.4872106552124023, "memory(GiB)": 72.85, "step": 18805, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.671237 }, { "epoch": 0.8058780686345914, "grad_norm": 4.0425801277160645, "learning_rate": 9.37319459943602e-05, "loss": 2.562176513671875, "memory(GiB)": 72.85, "step": 18810, "token_acc": 0.46646341463414637, "train_speed(iter/s)": 0.671221 }, { "epoch": 0.8060922839638405, "grad_norm": 3.4383316040039062, "learning_rate": 9.372868317280206e-05, "loss": 2.5451848983764647, "memory(GiB)": 72.85, "step": 18815, "token_acc": 0.48494983277591974, "train_speed(iter/s)": 0.671198 }, { "epoch": 0.8063064992930894, "grad_norm": 3.736764907836914, "learning_rate": 9.372541955905777e-05, "loss": 2.379995918273926, "memory(GiB)": 72.85, "step": 18820, "token_acc": 0.5036496350364964, "train_speed(iter/s)": 0.671224 }, { "epoch": 0.8065207146223383, "grad_norm": 2.8450446128845215, "learning_rate": 9.37221551531865e-05, "loss": 2.479876899719238, "memory(GiB)": 72.85, "step": 18825, "token_acc": 0.4627831715210356, "train_speed(iter/s)": 0.671219 }, { "epoch": 0.8067349299515874, "grad_norm": 4.736846446990967, "learning_rate": 9.371888995524738e-05, "loss": 2.0464834213256835, "memory(GiB)": 72.85, "step": 18830, "token_acc": 0.5393586005830904, "train_speed(iter/s)": 0.671218 }, { "epoch": 0.8069491452808363, "grad_norm": 3.9550719261169434, "learning_rate": 9.371562396529955e-05, "loss": 2.4981351852416993, "memory(GiB)": 72.85, "step": 18835, "token_acc": 0.4482758620689655, "train_speed(iter/s)": 0.671237 }, { "epoch": 0.8071633606100853, "grad_norm": 4.361787796020508, "learning_rate": 9.371235718340219e-05, "loss": 2.6312114715576174, "memory(GiB)": 72.85, "step": 18840, "token_acc": 0.4735099337748344, "train_speed(iter/s)": 0.67126 }, { "epoch": 0.8073775759393342, "grad_norm": 3.9508323669433594, "learning_rate": 9.370908960961447e-05, "loss": 2.526144790649414, "memory(GiB)": 72.85, "step": 18845, "token_acc": 0.4684385382059801, "train_speed(iter/s)": 0.671265 }, { "epoch": 0.8075917912685832, "grad_norm": 4.9275007247924805, "learning_rate": 9.370582124399559e-05, "loss": 2.353806495666504, "memory(GiB)": 72.85, "step": 18850, "token_acc": 0.5018450184501845, "train_speed(iter/s)": 0.671288 }, { "epoch": 0.8078060065978322, "grad_norm": 3.631540060043335, "learning_rate": 9.370255208660475e-05, "loss": 2.62391357421875, "memory(GiB)": 72.85, "step": 18855, "token_acc": 0.4444444444444444, "train_speed(iter/s)": 0.671297 }, { "epoch": 0.8080202219270811, "grad_norm": 3.808502435684204, "learning_rate": 9.369928213750119e-05, "loss": 2.1844053268432617, "memory(GiB)": 72.85, "step": 18860, "token_acc": 0.5099601593625498, "train_speed(iter/s)": 0.6713 }, { "epoch": 0.80823443725633, "grad_norm": 3.5936667919158936, "learning_rate": 9.369601139674414e-05, "loss": 2.4083715438842774, "memory(GiB)": 72.85, "step": 18865, "token_acc": 0.5038167938931297, "train_speed(iter/s)": 0.671291 }, { "epoch": 0.8084486525855791, "grad_norm": 3.355651378631592, "learning_rate": 9.369273986439285e-05, "loss": 2.7004281997680666, "memory(GiB)": 72.85, "step": 18870, "token_acc": 0.44554455445544555, "train_speed(iter/s)": 0.671236 }, { "epoch": 0.808662867914828, "grad_norm": 3.85564923286438, "learning_rate": 9.36894675405066e-05, "loss": 1.9189149856567382, "memory(GiB)": 72.85, "step": 18875, "token_acc": 0.5251798561151079, "train_speed(iter/s)": 0.671279 }, { "epoch": 0.8088770832440769, "grad_norm": 5.6047868728637695, "learning_rate": 9.368619442514466e-05, "loss": 2.564251708984375, "memory(GiB)": 72.85, "step": 18880, "token_acc": 0.4754601226993865, "train_speed(iter/s)": 0.671289 }, { "epoch": 0.809091298573326, "grad_norm": 3.0563433170318604, "learning_rate": 9.368292051836631e-05, "loss": 2.615108299255371, "memory(GiB)": 72.85, "step": 18885, "token_acc": 0.441340782122905, "train_speed(iter/s)": 0.671306 }, { "epoch": 0.8093055139025749, "grad_norm": 3.1947193145751953, "learning_rate": 9.36796458202309e-05, "loss": 2.1965383529663085, "memory(GiB)": 72.85, "step": 18890, "token_acc": 0.47796610169491527, "train_speed(iter/s)": 0.671213 }, { "epoch": 0.8095197292318238, "grad_norm": 4.381551265716553, "learning_rate": 9.367637033079773e-05, "loss": 2.4072574615478515, "memory(GiB)": 72.85, "step": 18895, "token_acc": 0.5070422535211268, "train_speed(iter/s)": 0.671252 }, { "epoch": 0.8097339445610728, "grad_norm": 3.4452996253967285, "learning_rate": 9.367309405012612e-05, "loss": 2.4673927307128904, "memory(GiB)": 72.85, "step": 18900, "token_acc": 0.48598130841121495, "train_speed(iter/s)": 0.671225 }, { "epoch": 0.8099481598903218, "grad_norm": 3.366070032119751, "learning_rate": 9.366981697827545e-05, "loss": 2.669246482849121, "memory(GiB)": 72.85, "step": 18905, "token_acc": 0.4273255813953488, "train_speed(iter/s)": 0.671221 }, { "epoch": 0.8101623752195707, "grad_norm": 3.6402251720428467, "learning_rate": 9.366653911530509e-05, "loss": 2.5404029846191407, "memory(GiB)": 72.85, "step": 18910, "token_acc": 0.4583333333333333, "train_speed(iter/s)": 0.671183 }, { "epoch": 0.8103765905488197, "grad_norm": 3.4180636405944824, "learning_rate": 9.36632604612744e-05, "loss": 2.2979110717773437, "memory(GiB)": 72.85, "step": 18915, "token_acc": 0.5268456375838926, "train_speed(iter/s)": 0.671218 }, { "epoch": 0.8105908058780686, "grad_norm": 5.091119289398193, "learning_rate": 9.365998101624279e-05, "loss": 2.2879150390625, "memory(GiB)": 72.85, "step": 18920, "token_acc": 0.5506756756756757, "train_speed(iter/s)": 0.671206 }, { "epoch": 0.8108050212073176, "grad_norm": 4.80275297164917, "learning_rate": 9.365670078026967e-05, "loss": 2.075881576538086, "memory(GiB)": 72.85, "step": 18925, "token_acc": 0.5254237288135594, "train_speed(iter/s)": 0.671185 }, { "epoch": 0.8110192365365666, "grad_norm": 3.97213077545166, "learning_rate": 9.365341975341446e-05, "loss": 2.2295326232910155, "memory(GiB)": 72.85, "step": 18930, "token_acc": 0.5119453924914675, "train_speed(iter/s)": 0.671225 }, { "epoch": 0.8112334518658155, "grad_norm": 3.8121469020843506, "learning_rate": 9.36501379357366e-05, "loss": 2.1729499816894533, "memory(GiB)": 72.85, "step": 18935, "token_acc": 0.4794520547945205, "train_speed(iter/s)": 0.671234 }, { "epoch": 0.8114476671950644, "grad_norm": 3.8483035564422607, "learning_rate": 9.364685532729555e-05, "loss": 2.5030139923095702, "memory(GiB)": 72.85, "step": 18940, "token_acc": 0.46264367816091956, "train_speed(iter/s)": 0.671236 }, { "epoch": 0.8116618825243135, "grad_norm": 3.405895948410034, "learning_rate": 9.364357192815076e-05, "loss": 2.4880449295043947, "memory(GiB)": 72.85, "step": 18945, "token_acc": 0.450199203187251, "train_speed(iter/s)": 0.671258 }, { "epoch": 0.8118760978535624, "grad_norm": 3.8397414684295654, "learning_rate": 9.364028773836175e-05, "loss": 2.315484046936035, "memory(GiB)": 72.85, "step": 18950, "token_acc": 0.4882943143812709, "train_speed(iter/s)": 0.671264 }, { "epoch": 0.8120903131828113, "grad_norm": 5.687384128570557, "learning_rate": 9.363700275798797e-05, "loss": 2.438718795776367, "memory(GiB)": 72.85, "step": 18955, "token_acc": 0.4962962962962963, "train_speed(iter/s)": 0.671277 }, { "epoch": 0.8123045285120604, "grad_norm": 3.627171277999878, "learning_rate": 9.363371698708894e-05, "loss": 2.1192043304443358, "memory(GiB)": 72.85, "step": 18960, "token_acc": 0.5265017667844523, "train_speed(iter/s)": 0.671311 }, { "epoch": 0.8125187438413093, "grad_norm": 6.753027439117432, "learning_rate": 9.363043042572422e-05, "loss": 2.756243896484375, "memory(GiB)": 72.85, "step": 18965, "token_acc": 0.4264705882352941, "train_speed(iter/s)": 0.671325 }, { "epoch": 0.8127329591705582, "grad_norm": 4.764512538909912, "learning_rate": 9.362714307395331e-05, "loss": 2.2130252838134767, "memory(GiB)": 72.85, "step": 18970, "token_acc": 0.5218855218855218, "train_speed(iter/s)": 0.671294 }, { "epoch": 0.8129471744998072, "grad_norm": 4.220763206481934, "learning_rate": 9.362385493183577e-05, "loss": 2.2332813262939455, "memory(GiB)": 72.85, "step": 18975, "token_acc": 0.5368421052631579, "train_speed(iter/s)": 0.671318 }, { "epoch": 0.8131613898290562, "grad_norm": 5.823452472686768, "learning_rate": 9.362056599943119e-05, "loss": 2.0257408142089846, "memory(GiB)": 72.85, "step": 18980, "token_acc": 0.5925925925925926, "train_speed(iter/s)": 0.671301 }, { "epoch": 0.8133756051583051, "grad_norm": 5.560976982116699, "learning_rate": 9.361727627679912e-05, "loss": 2.097158432006836, "memory(GiB)": 72.85, "step": 18985, "token_acc": 0.5040983606557377, "train_speed(iter/s)": 0.671316 }, { "epoch": 0.8135898204875541, "grad_norm": 3.8534774780273438, "learning_rate": 9.36139857639992e-05, "loss": 2.4946447372436524, "memory(GiB)": 72.85, "step": 18990, "token_acc": 0.4807017543859649, "train_speed(iter/s)": 0.67129 }, { "epoch": 0.813804035816803, "grad_norm": 4.0810933113098145, "learning_rate": 9.361069446109099e-05, "loss": 2.3499149322509765, "memory(GiB)": 72.85, "step": 18995, "token_acc": 0.4883720930232558, "train_speed(iter/s)": 0.671304 }, { "epoch": 0.814018251146052, "grad_norm": 6.340082168579102, "learning_rate": 9.360740236813415e-05, "loss": 2.7759185791015626, "memory(GiB)": 72.85, "step": 19000, "token_acc": 0.45714285714285713, "train_speed(iter/s)": 0.671282 }, { "epoch": 0.814018251146052, "eval_loss": 1.9701255559921265, "eval_runtime": 16.9029, "eval_samples_per_second": 5.916, "eval_steps_per_second": 5.916, "eval_token_acc": 0.4979919678714859, "step": 19000 }, { "epoch": 0.814232466475301, "grad_norm": 3.439021348953247, "learning_rate": 9.360410948518831e-05, "loss": 2.612441635131836, "memory(GiB)": 72.85, "step": 19005, "token_acc": 0.48852772466539196, "train_speed(iter/s)": 0.670821 }, { "epoch": 0.8144466818045499, "grad_norm": 4.990092754364014, "learning_rate": 9.360081581231313e-05, "loss": 2.4854883193969726, "memory(GiB)": 72.85, "step": 19010, "token_acc": 0.476056338028169, "train_speed(iter/s)": 0.670816 }, { "epoch": 0.8146608971337989, "grad_norm": 5.537199974060059, "learning_rate": 9.359752134956824e-05, "loss": 2.7283676147460936, "memory(GiB)": 72.85, "step": 19015, "token_acc": 0.4608150470219436, "train_speed(iter/s)": 0.670858 }, { "epoch": 0.8148751124630479, "grad_norm": 3.4888076782226562, "learning_rate": 9.359422609701338e-05, "loss": 2.5058567047119142, "memory(GiB)": 72.85, "step": 19020, "token_acc": 0.4940239043824701, "train_speed(iter/s)": 0.670829 }, { "epoch": 0.8150893277922968, "grad_norm": 3.9473063945770264, "learning_rate": 9.359093005470822e-05, "loss": 2.2225105285644533, "memory(GiB)": 72.85, "step": 19025, "token_acc": 0.5613382899628253, "train_speed(iter/s)": 0.670857 }, { "epoch": 0.8153035431215457, "grad_norm": 3.18666672706604, "learning_rate": 9.358763322271246e-05, "loss": 2.6842618942260743, "memory(GiB)": 72.85, "step": 19030, "token_acc": 0.5, "train_speed(iter/s)": 0.670823 }, { "epoch": 0.8155177584507948, "grad_norm": 3.2981600761413574, "learning_rate": 9.358433560108585e-05, "loss": 2.510787582397461, "memory(GiB)": 72.85, "step": 19035, "token_acc": 0.4511784511784512, "train_speed(iter/s)": 0.670808 }, { "epoch": 0.8157319737800437, "grad_norm": 4.127114772796631, "learning_rate": 9.358103718988812e-05, "loss": 2.332255172729492, "memory(GiB)": 72.85, "step": 19040, "token_acc": 0.5037593984962406, "train_speed(iter/s)": 0.670838 }, { "epoch": 0.8159461891092926, "grad_norm": 3.0404183864593506, "learning_rate": 9.357773798917899e-05, "loss": 2.2215938568115234, "memory(GiB)": 72.85, "step": 19045, "token_acc": 0.5035714285714286, "train_speed(iter/s)": 0.670859 }, { "epoch": 0.8161604044385417, "grad_norm": 3.6463797092437744, "learning_rate": 9.357443799901828e-05, "loss": 2.4722862243652344, "memory(GiB)": 72.85, "step": 19050, "token_acc": 0.4381625441696113, "train_speed(iter/s)": 0.670866 }, { "epoch": 0.8163746197677906, "grad_norm": 3.1104042530059814, "learning_rate": 9.357113721946573e-05, "loss": 2.1612424850463867, "memory(GiB)": 72.85, "step": 19055, "token_acc": 0.4921135646687697, "train_speed(iter/s)": 0.670865 }, { "epoch": 0.8165888350970395, "grad_norm": 3.590822219848633, "learning_rate": 9.356783565058118e-05, "loss": 2.4371902465820314, "memory(GiB)": 72.85, "step": 19060, "token_acc": 0.456, "train_speed(iter/s)": 0.670861 }, { "epoch": 0.8168030504262885, "grad_norm": 3.060732126235962, "learning_rate": 9.35645332924244e-05, "loss": 2.3496337890625, "memory(GiB)": 72.85, "step": 19065, "token_acc": 0.5349794238683128, "train_speed(iter/s)": 0.670845 }, { "epoch": 0.8170172657555375, "grad_norm": 4.806979179382324, "learning_rate": 9.356123014505523e-05, "loss": 2.217112159729004, "memory(GiB)": 72.85, "step": 19070, "token_acc": 0.4801762114537445, "train_speed(iter/s)": 0.670793 }, { "epoch": 0.8172314810847864, "grad_norm": 3.9634978771209717, "learning_rate": 9.355792620853352e-05, "loss": 2.288003921508789, "memory(GiB)": 72.85, "step": 19075, "token_acc": 0.5152542372881356, "train_speed(iter/s)": 0.670772 }, { "epoch": 0.8174456964140354, "grad_norm": 3.2480697631835938, "learning_rate": 9.355462148291912e-05, "loss": 2.6485664367675783, "memory(GiB)": 72.85, "step": 19080, "token_acc": 0.4870848708487085, "train_speed(iter/s)": 0.670773 }, { "epoch": 0.8176599117432843, "grad_norm": 3.317230701446533, "learning_rate": 9.355131596827189e-05, "loss": 2.5137256622314452, "memory(GiB)": 72.85, "step": 19085, "token_acc": 0.4468864468864469, "train_speed(iter/s)": 0.670685 }, { "epoch": 0.8178741270725333, "grad_norm": 3.725830078125, "learning_rate": 9.354800966465171e-05, "loss": 2.4660036087036135, "memory(GiB)": 72.85, "step": 19090, "token_acc": 0.4772727272727273, "train_speed(iter/s)": 0.670642 }, { "epoch": 0.8180883424017823, "grad_norm": 2.7493979930877686, "learning_rate": 9.35447025721185e-05, "loss": 2.1921575546264647, "memory(GiB)": 72.85, "step": 19095, "token_acc": 0.49645390070921985, "train_speed(iter/s)": 0.670641 }, { "epoch": 0.8183025577310312, "grad_norm": 3.372483730316162, "learning_rate": 9.354139469073214e-05, "loss": 2.264898490905762, "memory(GiB)": 72.85, "step": 19100, "token_acc": 0.5136054421768708, "train_speed(iter/s)": 0.670628 }, { "epoch": 0.8185167730602801, "grad_norm": 7.676172256469727, "learning_rate": 9.353808602055259e-05, "loss": 2.418758010864258, "memory(GiB)": 72.85, "step": 19105, "token_acc": 0.45660377358490567, "train_speed(iter/s)": 0.67067 }, { "epoch": 0.8187309883895292, "grad_norm": 3.2518417835235596, "learning_rate": 9.353477656163974e-05, "loss": 2.2345394134521483, "memory(GiB)": 72.85, "step": 19110, "token_acc": 0.5241635687732342, "train_speed(iter/s)": 0.670683 }, { "epoch": 0.8189452037187781, "grad_norm": 3.4270262718200684, "learning_rate": 9.35314663140536e-05, "loss": 2.4042743682861327, "memory(GiB)": 72.85, "step": 19115, "token_acc": 0.506896551724138, "train_speed(iter/s)": 0.670638 }, { "epoch": 0.819159419048027, "grad_norm": 4.096381187438965, "learning_rate": 9.35281552778541e-05, "loss": 2.126998519897461, "memory(GiB)": 72.85, "step": 19120, "token_acc": 0.5, "train_speed(iter/s)": 0.670652 }, { "epoch": 0.8193736343772761, "grad_norm": 4.921952247619629, "learning_rate": 9.352484345310123e-05, "loss": 2.379354476928711, "memory(GiB)": 72.85, "step": 19125, "token_acc": 0.44981412639405205, "train_speed(iter/s)": 0.67067 }, { "epoch": 0.819587849706525, "grad_norm": 3.870173931121826, "learning_rate": 9.3521530839855e-05, "loss": 2.462455940246582, "memory(GiB)": 72.85, "step": 19130, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.67069 }, { "epoch": 0.8198020650357739, "grad_norm": 3.9838149547576904, "learning_rate": 9.351821743817541e-05, "loss": 2.576887512207031, "memory(GiB)": 72.85, "step": 19135, "token_acc": 0.46107784431137727, "train_speed(iter/s)": 0.670658 }, { "epoch": 0.8200162803650229, "grad_norm": 3.533656120300293, "learning_rate": 9.351490324812248e-05, "loss": 2.684839057922363, "memory(GiB)": 72.85, "step": 19140, "token_acc": 0.43902439024390244, "train_speed(iter/s)": 0.670583 }, { "epoch": 0.8202304956942719, "grad_norm": 4.159970760345459, "learning_rate": 9.351158826975626e-05, "loss": 2.4302202224731446, "memory(GiB)": 72.85, "step": 19145, "token_acc": 0.5029411764705882, "train_speed(iter/s)": 0.670616 }, { "epoch": 0.8204447110235208, "grad_norm": 3.385136365890503, "learning_rate": 9.350827250313681e-05, "loss": 2.626369857788086, "memory(GiB)": 72.85, "step": 19150, "token_acc": 0.4648648648648649, "train_speed(iter/s)": 0.670623 }, { "epoch": 0.8206589263527698, "grad_norm": 5.017786979675293, "learning_rate": 9.350495594832418e-05, "loss": 2.520590972900391, "memory(GiB)": 72.85, "step": 19155, "token_acc": 0.48507462686567165, "train_speed(iter/s)": 0.670619 }, { "epoch": 0.8208731416820187, "grad_norm": 3.2122340202331543, "learning_rate": 9.350163860537847e-05, "loss": 2.4764938354492188, "memory(GiB)": 72.85, "step": 19160, "token_acc": 0.48656716417910445, "train_speed(iter/s)": 0.670652 }, { "epoch": 0.8210873570112677, "grad_norm": 4.143450736999512, "learning_rate": 9.349832047435977e-05, "loss": 2.5135427474975587, "memory(GiB)": 72.85, "step": 19165, "token_acc": 0.4409937888198758, "train_speed(iter/s)": 0.670641 }, { "epoch": 0.8213015723405167, "grad_norm": 3.394770383834839, "learning_rate": 9.349500155532817e-05, "loss": 2.2578014373779296, "memory(GiB)": 72.85, "step": 19170, "token_acc": 0.4916387959866221, "train_speed(iter/s)": 0.670647 }, { "epoch": 0.8215157876697656, "grad_norm": 3.5227108001708984, "learning_rate": 9.349168184834385e-05, "loss": 2.3601394653320313, "memory(GiB)": 72.85, "step": 19175, "token_acc": 0.516245487364621, "train_speed(iter/s)": 0.670624 }, { "epoch": 0.8217300029990147, "grad_norm": 3.19976806640625, "learning_rate": 9.348836135346688e-05, "loss": 2.6227039337158202, "memory(GiB)": 72.85, "step": 19180, "token_acc": 0.48, "train_speed(iter/s)": 0.67065 }, { "epoch": 0.8219442183282636, "grad_norm": 3.037544012069702, "learning_rate": 9.348504007075747e-05, "loss": 2.364405059814453, "memory(GiB)": 72.85, "step": 19185, "token_acc": 0.49859943977591037, "train_speed(iter/s)": 0.670626 }, { "epoch": 0.8221584336575125, "grad_norm": 5.490344524383545, "learning_rate": 9.348171800027577e-05, "loss": 2.114413261413574, "memory(GiB)": 72.85, "step": 19190, "token_acc": 0.541501976284585, "train_speed(iter/s)": 0.670641 }, { "epoch": 0.8223726489867615, "grad_norm": 5.3662428855896, "learning_rate": 9.347839514208194e-05, "loss": 2.5476430892944335, "memory(GiB)": 72.85, "step": 19195, "token_acc": 0.4840989399293286, "train_speed(iter/s)": 0.670663 }, { "epoch": 0.8225868643160105, "grad_norm": 3.5917248725891113, "learning_rate": 9.347507149623621e-05, "loss": 2.4225997924804688, "memory(GiB)": 72.85, "step": 19200, "token_acc": 0.4908424908424908, "train_speed(iter/s)": 0.67064 }, { "epoch": 0.8228010796452594, "grad_norm": 4.065125942230225, "learning_rate": 9.347174706279878e-05, "loss": 2.6118030548095703, "memory(GiB)": 72.85, "step": 19205, "token_acc": 0.446875, "train_speed(iter/s)": 0.670666 }, { "epoch": 0.8230152949745084, "grad_norm": 2.914731740951538, "learning_rate": 9.346842184182985e-05, "loss": 2.317289352416992, "memory(GiB)": 72.85, "step": 19210, "token_acc": 0.5091463414634146, "train_speed(iter/s)": 0.670644 }, { "epoch": 0.8232295103037574, "grad_norm": 4.683845520019531, "learning_rate": 9.34650958333897e-05, "loss": 2.2915245056152345, "memory(GiB)": 72.85, "step": 19215, "token_acc": 0.4946236559139785, "train_speed(iter/s)": 0.670599 }, { "epoch": 0.8234437256330063, "grad_norm": 3.811002731323242, "learning_rate": 9.346176903753857e-05, "loss": 2.2286355972290037, "memory(GiB)": 72.85, "step": 19220, "token_acc": 0.48134328358208955, "train_speed(iter/s)": 0.670611 }, { "epoch": 0.8236579409622553, "grad_norm": 4.870709419250488, "learning_rate": 9.345844145433671e-05, "loss": 2.486041450500488, "memory(GiB)": 72.85, "step": 19225, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.670638 }, { "epoch": 0.8238721562915042, "grad_norm": 4.398255348205566, "learning_rate": 9.345511308384443e-05, "loss": 2.4680557250976562, "memory(GiB)": 72.85, "step": 19230, "token_acc": 0.45993031358885017, "train_speed(iter/s)": 0.670641 }, { "epoch": 0.8240863716207532, "grad_norm": 2.952059745788574, "learning_rate": 9.3451783926122e-05, "loss": 2.6065797805786133, "memory(GiB)": 72.85, "step": 19235, "token_acc": 0.4383954154727794, "train_speed(iter/s)": 0.670654 }, { "epoch": 0.8243005869500022, "grad_norm": 3.2424426078796387, "learning_rate": 9.344845398122974e-05, "loss": 2.364938163757324, "memory(GiB)": 72.85, "step": 19240, "token_acc": 0.5093632958801498, "train_speed(iter/s)": 0.670659 }, { "epoch": 0.8245148022792511, "grad_norm": 4.461920261383057, "learning_rate": 9.344512324922799e-05, "loss": 2.4313543319702147, "memory(GiB)": 72.85, "step": 19245, "token_acc": 0.4676470588235294, "train_speed(iter/s)": 0.670638 }, { "epoch": 0.8247290176085, "grad_norm": 3.128047227859497, "learning_rate": 9.344179173017708e-05, "loss": 2.4891788482666017, "memory(GiB)": 72.85, "step": 19250, "token_acc": 0.4827586206896552, "train_speed(iter/s)": 0.670658 }, { "epoch": 0.8249432329377491, "grad_norm": 2.975123167037964, "learning_rate": 9.343845942413735e-05, "loss": 2.435733413696289, "memory(GiB)": 72.85, "step": 19255, "token_acc": 0.5093167701863354, "train_speed(iter/s)": 0.670643 }, { "epoch": 0.825157448266998, "grad_norm": 3.7135064601898193, "learning_rate": 9.343512633116917e-05, "loss": 2.2968772888183593, "memory(GiB)": 72.85, "step": 19260, "token_acc": 0.5314685314685315, "train_speed(iter/s)": 0.670675 }, { "epoch": 0.8253716635962469, "grad_norm": 3.296022653579712, "learning_rate": 9.343179245133295e-05, "loss": 2.5758487701416017, "memory(GiB)": 72.85, "step": 19265, "token_acc": 0.4707692307692308, "train_speed(iter/s)": 0.670687 }, { "epoch": 0.825585878925496, "grad_norm": 2.832545042037964, "learning_rate": 9.342845778468908e-05, "loss": 2.6009634017944334, "memory(GiB)": 72.85, "step": 19270, "token_acc": 0.5016501650165016, "train_speed(iter/s)": 0.670706 }, { "epoch": 0.8258000942547449, "grad_norm": 2.8513970375061035, "learning_rate": 9.342512233129792e-05, "loss": 2.032163619995117, "memory(GiB)": 72.85, "step": 19275, "token_acc": 0.5919732441471572, "train_speed(iter/s)": 0.670717 }, { "epoch": 0.8260143095839938, "grad_norm": 4.608035087585449, "learning_rate": 9.342178609121996e-05, "loss": 2.749506378173828, "memory(GiB)": 72.85, "step": 19280, "token_acc": 0.4774193548387097, "train_speed(iter/s)": 0.670721 }, { "epoch": 0.8262285249132428, "grad_norm": 3.3327465057373047, "learning_rate": 9.34184490645156e-05, "loss": 2.1135839462280273, "memory(GiB)": 72.85, "step": 19285, "token_acc": 0.5243055555555556, "train_speed(iter/s)": 0.670708 }, { "epoch": 0.8264427402424918, "grad_norm": 3.848008632659912, "learning_rate": 9.341511125124532e-05, "loss": 2.611906623840332, "memory(GiB)": 72.85, "step": 19290, "token_acc": 0.46779661016949153, "train_speed(iter/s)": 0.670713 }, { "epoch": 0.8266569555717407, "grad_norm": 3.173713445663452, "learning_rate": 9.341177265146956e-05, "loss": 2.2451557159423827, "memory(GiB)": 72.85, "step": 19295, "token_acc": 0.5309090909090909, "train_speed(iter/s)": 0.670708 }, { "epoch": 0.8268711709009897, "grad_norm": 3.129455804824829, "learning_rate": 9.340843326524882e-05, "loss": 2.428952217102051, "memory(GiB)": 72.85, "step": 19300, "token_acc": 0.45714285714285713, "train_speed(iter/s)": 0.670722 }, { "epoch": 0.8270853862302386, "grad_norm": 3.6701269149780273, "learning_rate": 9.340509309264358e-05, "loss": 2.4648998260498045, "memory(GiB)": 72.85, "step": 19305, "token_acc": 0.4875, "train_speed(iter/s)": 0.670673 }, { "epoch": 0.8272996015594876, "grad_norm": 3.306865930557251, "learning_rate": 9.340175213371437e-05, "loss": 2.482575607299805, "memory(GiB)": 72.85, "step": 19310, "token_acc": 0.43125, "train_speed(iter/s)": 0.670711 }, { "epoch": 0.8275138168887366, "grad_norm": 3.5221896171569824, "learning_rate": 9.339841038852171e-05, "loss": 2.290231704711914, "memory(GiB)": 72.85, "step": 19315, "token_acc": 0.49848024316109424, "train_speed(iter/s)": 0.670722 }, { "epoch": 0.8277280322179855, "grad_norm": 3.5125980377197266, "learning_rate": 9.339506785712612e-05, "loss": 2.1382556915283204, "memory(GiB)": 72.85, "step": 19320, "token_acc": 0.54421768707483, "train_speed(iter/s)": 0.670705 }, { "epoch": 0.8279422475472344, "grad_norm": 5.358231544494629, "learning_rate": 9.339172453958817e-05, "loss": 2.2960573196411134, "memory(GiB)": 72.85, "step": 19325, "token_acc": 0.5236363636363637, "train_speed(iter/s)": 0.670735 }, { "epoch": 0.8281564628764835, "grad_norm": 3.5993659496307373, "learning_rate": 9.338838043596845e-05, "loss": 2.499937057495117, "memory(GiB)": 72.85, "step": 19330, "token_acc": 0.498371335504886, "train_speed(iter/s)": 0.670737 }, { "epoch": 0.8283706782057324, "grad_norm": 4.089853286743164, "learning_rate": 9.338503554632749e-05, "loss": 2.4606260299682616, "memory(GiB)": 72.85, "step": 19335, "token_acc": 0.5201342281879194, "train_speed(iter/s)": 0.670752 }, { "epoch": 0.8285848935349813, "grad_norm": 3.6026430130004883, "learning_rate": 9.338168987072593e-05, "loss": 2.754596710205078, "memory(GiB)": 72.85, "step": 19340, "token_acc": 0.4233576642335766, "train_speed(iter/s)": 0.670765 }, { "epoch": 0.8287991088642304, "grad_norm": 3.4468116760253906, "learning_rate": 9.337834340922436e-05, "loss": 2.5211410522460938, "memory(GiB)": 72.85, "step": 19345, "token_acc": 0.4332344213649852, "train_speed(iter/s)": 0.670772 }, { "epoch": 0.8290133241934793, "grad_norm": 3.0994646549224854, "learning_rate": 9.33749961618834e-05, "loss": 2.448387336730957, "memory(GiB)": 72.85, "step": 19350, "token_acc": 0.48484848484848486, "train_speed(iter/s)": 0.670804 }, { "epoch": 0.8292275395227282, "grad_norm": 3.626842975616455, "learning_rate": 9.33716481287637e-05, "loss": 2.2619447708129883, "memory(GiB)": 72.85, "step": 19355, "token_acc": 0.49538461538461537, "train_speed(iter/s)": 0.670814 }, { "epoch": 0.8294417548519772, "grad_norm": 3.36556339263916, "learning_rate": 9.33682993099259e-05, "loss": 2.4029674530029297, "memory(GiB)": 72.85, "step": 19360, "token_acc": 0.5103448275862069, "train_speed(iter/s)": 0.670835 }, { "epoch": 0.8296559701812262, "grad_norm": 3.363788604736328, "learning_rate": 9.336494970543068e-05, "loss": 2.286343002319336, "memory(GiB)": 72.85, "step": 19365, "token_acc": 0.5372549019607843, "train_speed(iter/s)": 0.670818 }, { "epoch": 0.8298701855104751, "grad_norm": 3.747108221054077, "learning_rate": 9.336159931533872e-05, "loss": 2.2726158142089843, "memory(GiB)": 72.85, "step": 19370, "token_acc": 0.5038461538461538, "train_speed(iter/s)": 0.670782 }, { "epoch": 0.8300844008397241, "grad_norm": 4.091557025909424, "learning_rate": 9.335824813971072e-05, "loss": 2.5095998764038088, "memory(GiB)": 72.85, "step": 19375, "token_acc": 0.45014245014245013, "train_speed(iter/s)": 0.670797 }, { "epoch": 0.830298616168973, "grad_norm": 4.838551044464111, "learning_rate": 9.335489617860737e-05, "loss": 2.0379892349243165, "memory(GiB)": 72.85, "step": 19380, "token_acc": 0.5421686746987951, "train_speed(iter/s)": 0.670819 }, { "epoch": 0.830512831498222, "grad_norm": 3.4157216548919678, "learning_rate": 9.33515434320894e-05, "loss": 2.656298828125, "memory(GiB)": 72.85, "step": 19385, "token_acc": 0.4610169491525424, "train_speed(iter/s)": 0.670746 }, { "epoch": 0.830727046827471, "grad_norm": 3.9824507236480713, "learning_rate": 9.334818990021757e-05, "loss": 2.5176651000976564, "memory(GiB)": 72.85, "step": 19390, "token_acc": 0.4894366197183099, "train_speed(iter/s)": 0.670758 }, { "epoch": 0.8309412621567199, "grad_norm": 3.7342779636383057, "learning_rate": 9.33448355830526e-05, "loss": 2.1309608459472655, "memory(GiB)": 72.85, "step": 19395, "token_acc": 0.5352564102564102, "train_speed(iter/s)": 0.670734 }, { "epoch": 0.8311554774859689, "grad_norm": 3.4733922481536865, "learning_rate": 9.334148048065529e-05, "loss": 2.377668571472168, "memory(GiB)": 72.85, "step": 19400, "token_acc": 0.49828178694158076, "train_speed(iter/s)": 0.670753 }, { "epoch": 0.8313696928152179, "grad_norm": 3.4485177993774414, "learning_rate": 9.333812459308638e-05, "loss": 2.1981239318847656, "memory(GiB)": 72.85, "step": 19405, "token_acc": 0.5089605734767025, "train_speed(iter/s)": 0.670727 }, { "epoch": 0.8315839081444668, "grad_norm": 4.38336706161499, "learning_rate": 9.33347679204067e-05, "loss": 2.673833465576172, "memory(GiB)": 72.85, "step": 19410, "token_acc": 0.43157894736842106, "train_speed(iter/s)": 0.670664 }, { "epoch": 0.8317981234737157, "grad_norm": 4.165842533111572, "learning_rate": 9.333141046267706e-05, "loss": 2.273573875427246, "memory(GiB)": 72.85, "step": 19415, "token_acc": 0.5133333333333333, "train_speed(iter/s)": 0.670658 }, { "epoch": 0.8320123388029648, "grad_norm": 7.870602130889893, "learning_rate": 9.332805221995827e-05, "loss": 2.5058107376098633, "memory(GiB)": 72.85, "step": 19420, "token_acc": 0.44813278008298757, "train_speed(iter/s)": 0.670633 }, { "epoch": 0.8322265541322137, "grad_norm": 5.208462715148926, "learning_rate": 9.332469319231114e-05, "loss": 2.584977912902832, "memory(GiB)": 72.85, "step": 19425, "token_acc": 0.47750865051903113, "train_speed(iter/s)": 0.670615 }, { "epoch": 0.8324407694614626, "grad_norm": 4.407419681549072, "learning_rate": 9.332133337979658e-05, "loss": 2.583117866516113, "memory(GiB)": 72.85, "step": 19430, "token_acc": 0.47533632286995514, "train_speed(iter/s)": 0.670591 }, { "epoch": 0.8326549847907116, "grad_norm": 3.9105756282806396, "learning_rate": 9.331797278247541e-05, "loss": 2.2178220748901367, "memory(GiB)": 72.85, "step": 19435, "token_acc": 0.5546558704453441, "train_speed(iter/s)": 0.670577 }, { "epoch": 0.8328692001199606, "grad_norm": 2.999655246734619, "learning_rate": 9.331461140040854e-05, "loss": 2.4805522918701173, "memory(GiB)": 72.85, "step": 19440, "token_acc": 0.4743202416918429, "train_speed(iter/s)": 0.670574 }, { "epoch": 0.8330834154492095, "grad_norm": 3.3506340980529785, "learning_rate": 9.331124923365685e-05, "loss": 2.654713439941406, "memory(GiB)": 72.85, "step": 19445, "token_acc": 0.4608695652173913, "train_speed(iter/s)": 0.670604 }, { "epoch": 0.8332976307784585, "grad_norm": 3.7228844165802, "learning_rate": 9.330788628228124e-05, "loss": 2.591746139526367, "memory(GiB)": 72.85, "step": 19450, "token_acc": 0.492, "train_speed(iter/s)": 0.670551 }, { "epoch": 0.8335118461077075, "grad_norm": 3.5126681327819824, "learning_rate": 9.330452254634265e-05, "loss": 2.5890432357788087, "memory(GiB)": 72.85, "step": 19455, "token_acc": 0.48184818481848185, "train_speed(iter/s)": 0.670545 }, { "epoch": 0.8337260614369564, "grad_norm": 3.3221004009246826, "learning_rate": 9.330115802590202e-05, "loss": 2.5951004028320312, "memory(GiB)": 72.85, "step": 19460, "token_acc": 0.4419889502762431, "train_speed(iter/s)": 0.670565 }, { "epoch": 0.8339402767662054, "grad_norm": 3.2044904232025146, "learning_rate": 9.32977927210203e-05, "loss": 2.641308403015137, "memory(GiB)": 72.85, "step": 19465, "token_acc": 0.4671280276816609, "train_speed(iter/s)": 0.670596 }, { "epoch": 0.8341544920954543, "grad_norm": 3.176605701446533, "learning_rate": 9.329442663175842e-05, "loss": 2.4032569885253907, "memory(GiB)": 72.85, "step": 19470, "token_acc": 0.44876325088339225, "train_speed(iter/s)": 0.670604 }, { "epoch": 0.8343687074247033, "grad_norm": 3.60719633102417, "learning_rate": 9.32910597581774e-05, "loss": 2.462482452392578, "memory(GiB)": 72.85, "step": 19475, "token_acc": 0.45666666666666667, "train_speed(iter/s)": 0.67063 }, { "epoch": 0.8345829227539523, "grad_norm": 6.508607387542725, "learning_rate": 9.328769210033822e-05, "loss": 2.4522085189819336, "memory(GiB)": 72.85, "step": 19480, "token_acc": 0.5047318611987381, "train_speed(iter/s)": 0.670623 }, { "epoch": 0.8347971380832012, "grad_norm": 3.077972173690796, "learning_rate": 9.328432365830191e-05, "loss": 2.3427019119262695, "memory(GiB)": 72.85, "step": 19485, "token_acc": 0.49140893470790376, "train_speed(iter/s)": 0.670647 }, { "epoch": 0.8350113534124501, "grad_norm": 3.9148447513580322, "learning_rate": 9.328095443212945e-05, "loss": 2.5967296600341796, "memory(GiB)": 72.85, "step": 19490, "token_acc": 0.4618055555555556, "train_speed(iter/s)": 0.670596 }, { "epoch": 0.8352255687416992, "grad_norm": 3.371319532394409, "learning_rate": 9.327758442188192e-05, "loss": 2.5470550537109373, "memory(GiB)": 72.85, "step": 19495, "token_acc": 0.479020979020979, "train_speed(iter/s)": 0.670565 }, { "epoch": 0.8354397840709481, "grad_norm": 3.425157070159912, "learning_rate": 9.327421362762034e-05, "loss": 2.4316572189331054, "memory(GiB)": 72.85, "step": 19500, "token_acc": 0.5145985401459854, "train_speed(iter/s)": 0.670526 }, { "epoch": 0.8354397840709481, "eval_loss": 2.1830856800079346, "eval_runtime": 16.9865, "eval_samples_per_second": 5.887, "eval_steps_per_second": 5.887, "eval_token_acc": 0.5028409090909091, "step": 19500 }, { "epoch": 0.835653999400197, "grad_norm": 3.2914860248565674, "learning_rate": 9.327084204940577e-05, "loss": 2.615245819091797, "memory(GiB)": 72.85, "step": 19505, "token_acc": 0.486905916585839, "train_speed(iter/s)": 0.670064 }, { "epoch": 0.8358682147294461, "grad_norm": 3.1302337646484375, "learning_rate": 9.326746968729933e-05, "loss": 2.5672229766845702, "memory(GiB)": 72.85, "step": 19510, "token_acc": 0.4625668449197861, "train_speed(iter/s)": 0.670103 }, { "epoch": 0.836082430058695, "grad_norm": 3.1686692237854004, "learning_rate": 9.326409654136208e-05, "loss": 2.296581268310547, "memory(GiB)": 72.85, "step": 19515, "token_acc": 0.4772727272727273, "train_speed(iter/s)": 0.670115 }, { "epoch": 0.836296645387944, "grad_norm": 4.577554702758789, "learning_rate": 9.326072261165512e-05, "loss": 2.6471487045288087, "memory(GiB)": 72.85, "step": 19520, "token_acc": 0.46366782006920415, "train_speed(iter/s)": 0.670086 }, { "epoch": 0.8365108607171929, "grad_norm": 4.386234283447266, "learning_rate": 9.325734789823961e-05, "loss": 2.0953910827636717, "memory(GiB)": 72.85, "step": 19525, "token_acc": 0.4959349593495935, "train_speed(iter/s)": 0.670078 }, { "epoch": 0.8367250760464419, "grad_norm": 5.760611534118652, "learning_rate": 9.325397240117665e-05, "loss": 2.385052299499512, "memory(GiB)": 72.85, "step": 19530, "token_acc": 0.46688741721854304, "train_speed(iter/s)": 0.670047 }, { "epoch": 0.8369392913756909, "grad_norm": 3.32908296585083, "learning_rate": 9.325059612052743e-05, "loss": 2.1288684844970702, "memory(GiB)": 72.85, "step": 19535, "token_acc": 0.5567010309278351, "train_speed(iter/s)": 0.670064 }, { "epoch": 0.8371535067049398, "grad_norm": 4.108778953552246, "learning_rate": 9.324721905635305e-05, "loss": 2.5956771850585936, "memory(GiB)": 72.85, "step": 19540, "token_acc": 0.4526627218934911, "train_speed(iter/s)": 0.670059 }, { "epoch": 0.8373677220341887, "grad_norm": 2.745811939239502, "learning_rate": 9.324384120871476e-05, "loss": 2.4746719360351563, "memory(GiB)": 72.85, "step": 19545, "token_acc": 0.47959183673469385, "train_speed(iter/s)": 0.670065 }, { "epoch": 0.8375819373634378, "grad_norm": 3.5244479179382324, "learning_rate": 9.32404625776737e-05, "loss": 2.2574512481689455, "memory(GiB)": 72.85, "step": 19550, "token_acc": 0.5135135135135135, "train_speed(iter/s)": 0.670072 }, { "epoch": 0.8377961526926867, "grad_norm": 4.961248397827148, "learning_rate": 9.32370831632911e-05, "loss": 2.5351905822753906, "memory(GiB)": 72.85, "step": 19555, "token_acc": 0.4583333333333333, "train_speed(iter/s)": 0.670057 }, { "epoch": 0.8380103680219356, "grad_norm": 3.3187108039855957, "learning_rate": 9.323370296562819e-05, "loss": 2.4757320404052736, "memory(GiB)": 72.85, "step": 19560, "token_acc": 0.5064102564102564, "train_speed(iter/s)": 0.67008 }, { "epoch": 0.8382245833511847, "grad_norm": 5.002192497253418, "learning_rate": 9.32303219847462e-05, "loss": 2.3839019775390624, "memory(GiB)": 72.85, "step": 19565, "token_acc": 0.48242811501597443, "train_speed(iter/s)": 0.67007 }, { "epoch": 0.8384387986804336, "grad_norm": 4.115938186645508, "learning_rate": 9.322694022070635e-05, "loss": 2.4087501525878907, "memory(GiB)": 72.85, "step": 19570, "token_acc": 0.5087719298245614, "train_speed(iter/s)": 0.670081 }, { "epoch": 0.8386530140096825, "grad_norm": 3.199866771697998, "learning_rate": 9.322355767356993e-05, "loss": 2.4580408096313477, "memory(GiB)": 72.85, "step": 19575, "token_acc": 0.46959459459459457, "train_speed(iter/s)": 0.670071 }, { "epoch": 0.8388672293389315, "grad_norm": 3.765012264251709, "learning_rate": 9.322017434339823e-05, "loss": 2.4171112060546873, "memory(GiB)": 72.85, "step": 19580, "token_acc": 0.47157190635451507, "train_speed(iter/s)": 0.670057 }, { "epoch": 0.8390814446681805, "grad_norm": 3.9073140621185303, "learning_rate": 9.321679023025253e-05, "loss": 2.415322494506836, "memory(GiB)": 72.85, "step": 19585, "token_acc": 0.5058823529411764, "train_speed(iter/s)": 0.670053 }, { "epoch": 0.8392956599974294, "grad_norm": 3.309022903442383, "learning_rate": 9.321340533419412e-05, "loss": 2.5281919479370116, "memory(GiB)": 72.85, "step": 19590, "token_acc": 0.47043010752688175, "train_speed(iter/s)": 0.670076 }, { "epoch": 0.8395098753266784, "grad_norm": 3.328585624694824, "learning_rate": 9.321001965528436e-05, "loss": 2.5369544982910157, "memory(GiB)": 72.85, "step": 19595, "token_acc": 0.46607669616519176, "train_speed(iter/s)": 0.6701 }, { "epoch": 0.8397240906559273, "grad_norm": 5.18661642074585, "learning_rate": 9.320663319358454e-05, "loss": 2.4242879867553713, "memory(GiB)": 72.85, "step": 19600, "token_acc": 0.524822695035461, "train_speed(iter/s)": 0.670145 }, { "epoch": 0.8399383059851763, "grad_norm": 5.290574073791504, "learning_rate": 9.320324594915602e-05, "loss": 2.2745954513549806, "memory(GiB)": 72.85, "step": 19605, "token_acc": 0.5381526104417671, "train_speed(iter/s)": 0.67017 }, { "epoch": 0.8401525213144253, "grad_norm": 5.529487133026123, "learning_rate": 9.319985792206019e-05, "loss": 2.7107624053955077, "memory(GiB)": 72.85, "step": 19610, "token_acc": 0.43902439024390244, "train_speed(iter/s)": 0.670123 }, { "epoch": 0.8403667366436742, "grad_norm": 3.9436237812042236, "learning_rate": 9.31964691123584e-05, "loss": 2.4518144607543944, "memory(GiB)": 72.85, "step": 19615, "token_acc": 0.4630225080385852, "train_speed(iter/s)": 0.670119 }, { "epoch": 0.8405809519729232, "grad_norm": 3.244680643081665, "learning_rate": 9.319307952011205e-05, "loss": 2.704593849182129, "memory(GiB)": 72.85, "step": 19620, "token_acc": 0.4565916398713826, "train_speed(iter/s)": 0.670131 }, { "epoch": 0.8407951673021722, "grad_norm": 2.9301252365112305, "learning_rate": 9.318968914538256e-05, "loss": 2.486091423034668, "memory(GiB)": 72.85, "step": 19625, "token_acc": 0.48725212464589235, "train_speed(iter/s)": 0.670169 }, { "epoch": 0.8410093826314211, "grad_norm": 4.618443965911865, "learning_rate": 9.318629798823131e-05, "loss": 2.5963771820068358, "memory(GiB)": 72.85, "step": 19630, "token_acc": 0.5, "train_speed(iter/s)": 0.670188 }, { "epoch": 0.84122359796067, "grad_norm": 4.4978227615356445, "learning_rate": 9.318290604871978e-05, "loss": 2.514069175720215, "memory(GiB)": 72.85, "step": 19635, "token_acc": 0.47653429602888087, "train_speed(iter/s)": 0.670177 }, { "epoch": 0.8414378132899191, "grad_norm": 4.999866485595703, "learning_rate": 9.317951332690938e-05, "loss": 2.807808494567871, "memory(GiB)": 72.85, "step": 19640, "token_acc": 0.46283783783783783, "train_speed(iter/s)": 0.670248 }, { "epoch": 0.841652028619168, "grad_norm": 3.624316930770874, "learning_rate": 9.317611982286161e-05, "loss": 2.4725948333740235, "memory(GiB)": 72.85, "step": 19645, "token_acc": 0.46464646464646464, "train_speed(iter/s)": 0.670255 }, { "epoch": 0.8418662439484169, "grad_norm": 3.0705478191375732, "learning_rate": 9.317272553663791e-05, "loss": 2.4294715881347657, "memory(GiB)": 72.85, "step": 19650, "token_acc": 0.47023809523809523, "train_speed(iter/s)": 0.670252 }, { "epoch": 0.842080459277666, "grad_norm": 2.673926830291748, "learning_rate": 9.316933046829981e-05, "loss": 2.320955276489258, "memory(GiB)": 72.85, "step": 19655, "token_acc": 0.46607669616519176, "train_speed(iter/s)": 0.670266 }, { "epoch": 0.8422946746069149, "grad_norm": 3.3480212688446045, "learning_rate": 9.316593461790876e-05, "loss": 2.486317825317383, "memory(GiB)": 72.85, "step": 19660, "token_acc": 0.5116959064327485, "train_speed(iter/s)": 0.670289 }, { "epoch": 0.8425088899361638, "grad_norm": 4.153810501098633, "learning_rate": 9.316253798552634e-05, "loss": 2.6211997985839846, "memory(GiB)": 72.85, "step": 19665, "token_acc": 0.42574257425742573, "train_speed(iter/s)": 0.670314 }, { "epoch": 0.8427231052654128, "grad_norm": 3.1626389026641846, "learning_rate": 9.315914057121405e-05, "loss": 2.4296188354492188, "memory(GiB)": 72.85, "step": 19670, "token_acc": 0.5183823529411765, "train_speed(iter/s)": 0.67034 }, { "epoch": 0.8429373205946618, "grad_norm": 3.686988353729248, "learning_rate": 9.315574237503343e-05, "loss": 2.444879150390625, "memory(GiB)": 72.85, "step": 19675, "token_acc": 0.4603174603174603, "train_speed(iter/s)": 0.670372 }, { "epoch": 0.8431515359239107, "grad_norm": 3.575975179672241, "learning_rate": 9.315234339704607e-05, "loss": 2.507388687133789, "memory(GiB)": 72.85, "step": 19680, "token_acc": 0.4612794612794613, "train_speed(iter/s)": 0.670366 }, { "epoch": 0.8433657512531597, "grad_norm": 3.201046943664551, "learning_rate": 9.314894363731352e-05, "loss": 2.5674339294433595, "memory(GiB)": 72.85, "step": 19685, "token_acc": 0.5028248587570622, "train_speed(iter/s)": 0.670405 }, { "epoch": 0.8435799665824086, "grad_norm": 3.755218029022217, "learning_rate": 9.314554309589739e-05, "loss": 2.4011198043823243, "memory(GiB)": 72.85, "step": 19690, "token_acc": 0.48523206751054854, "train_speed(iter/s)": 0.670374 }, { "epoch": 0.8437941819116576, "grad_norm": 3.6708436012268066, "learning_rate": 9.314214177285925e-05, "loss": 2.2263702392578124, "memory(GiB)": 72.85, "step": 19695, "token_acc": 0.49407114624505927, "train_speed(iter/s)": 0.670396 }, { "epoch": 0.8440083972409066, "grad_norm": 4.177743434906006, "learning_rate": 9.313873966826075e-05, "loss": 2.441193962097168, "memory(GiB)": 72.85, "step": 19700, "token_acc": 0.4738562091503268, "train_speed(iter/s)": 0.670399 }, { "epoch": 0.8442226125701555, "grad_norm": 2.833059549331665, "learning_rate": 9.313533678216353e-05, "loss": 2.440812873840332, "memory(GiB)": 72.85, "step": 19705, "token_acc": 0.4869791666666667, "train_speed(iter/s)": 0.670439 }, { "epoch": 0.8444368278994044, "grad_norm": 3.9899237155914307, "learning_rate": 9.31319331146292e-05, "loss": 2.6497882843017577, "memory(GiB)": 72.85, "step": 19710, "token_acc": 0.47435897435897434, "train_speed(iter/s)": 0.670428 }, { "epoch": 0.8446510432286535, "grad_norm": 3.842008590698242, "learning_rate": 9.312852866571944e-05, "loss": 2.249213218688965, "memory(GiB)": 72.85, "step": 19715, "token_acc": 0.5229007633587787, "train_speed(iter/s)": 0.670436 }, { "epoch": 0.8448652585579024, "grad_norm": 4.19020414352417, "learning_rate": 9.312512343549594e-05, "loss": 2.4451984405517577, "memory(GiB)": 72.85, "step": 19720, "token_acc": 0.48014440433212996, "train_speed(iter/s)": 0.670409 }, { "epoch": 0.8450794738871513, "grad_norm": 3.5997579097747803, "learning_rate": 9.312171742402037e-05, "loss": 2.3190093994140626, "memory(GiB)": 72.85, "step": 19725, "token_acc": 0.46105919003115264, "train_speed(iter/s)": 0.670394 }, { "epoch": 0.8452936892164004, "grad_norm": 3.234591007232666, "learning_rate": 9.311831063135443e-05, "loss": 2.490701675415039, "memory(GiB)": 72.85, "step": 19730, "token_acc": 0.4509283819628647, "train_speed(iter/s)": 0.670376 }, { "epoch": 0.8455079045456493, "grad_norm": 3.4959027767181396, "learning_rate": 9.311490305755986e-05, "loss": 2.645585632324219, "memory(GiB)": 72.85, "step": 19735, "token_acc": 0.45268542199488493, "train_speed(iter/s)": 0.670422 }, { "epoch": 0.8457221198748982, "grad_norm": 4.2569260597229, "learning_rate": 9.311149470269836e-05, "loss": 2.4351261138916014, "memory(GiB)": 72.85, "step": 19740, "token_acc": 0.46938775510204084, "train_speed(iter/s)": 0.670416 }, { "epoch": 0.8459363352041472, "grad_norm": 3.8961048126220703, "learning_rate": 9.31080855668317e-05, "loss": 2.491044044494629, "memory(GiB)": 72.85, "step": 19745, "token_acc": 0.48221343873517786, "train_speed(iter/s)": 0.670449 }, { "epoch": 0.8461505505333962, "grad_norm": 5.329202651977539, "learning_rate": 9.310467565002162e-05, "loss": 2.3029897689819334, "memory(GiB)": 72.85, "step": 19750, "token_acc": 0.48264984227129337, "train_speed(iter/s)": 0.670463 }, { "epoch": 0.8463647658626451, "grad_norm": 3.6743273735046387, "learning_rate": 9.310126495232991e-05, "loss": 2.459309768676758, "memory(GiB)": 72.85, "step": 19755, "token_acc": 0.44375, "train_speed(iter/s)": 0.670445 }, { "epoch": 0.8465789811918941, "grad_norm": 3.3843164443969727, "learning_rate": 9.309785347381836e-05, "loss": 2.5746021270751953, "memory(GiB)": 72.85, "step": 19760, "token_acc": 0.44126074498567336, "train_speed(iter/s)": 0.670444 }, { "epoch": 0.846793196521143, "grad_norm": 4.0995988845825195, "learning_rate": 9.309444121454877e-05, "loss": 2.6440086364746094, "memory(GiB)": 72.85, "step": 19765, "token_acc": 0.46112600536193027, "train_speed(iter/s)": 0.670439 }, { "epoch": 0.847007411850392, "grad_norm": 3.8647522926330566, "learning_rate": 9.309102817458294e-05, "loss": 2.283732223510742, "memory(GiB)": 72.85, "step": 19770, "token_acc": 0.5428571428571428, "train_speed(iter/s)": 0.670462 }, { "epoch": 0.847221627179641, "grad_norm": 4.653307914733887, "learning_rate": 9.308761435398272e-05, "loss": 2.1080448150634767, "memory(GiB)": 72.85, "step": 19775, "token_acc": 0.5719844357976653, "train_speed(iter/s)": 0.670462 }, { "epoch": 0.8474358425088899, "grad_norm": 3.8940072059631348, "learning_rate": 9.308419975280993e-05, "loss": 2.438307762145996, "memory(GiB)": 72.85, "step": 19780, "token_acc": 0.45660377358490567, "train_speed(iter/s)": 0.67048 }, { "epoch": 0.8476500578381388, "grad_norm": 3.2656233310699463, "learning_rate": 9.308078437112646e-05, "loss": 2.5301792144775392, "memory(GiB)": 72.85, "step": 19785, "token_acc": 0.49295774647887325, "train_speed(iter/s)": 0.670446 }, { "epoch": 0.8478642731673879, "grad_norm": 3.6879470348358154, "learning_rate": 9.307736820899418e-05, "loss": 2.566412353515625, "memory(GiB)": 72.85, "step": 19790, "token_acc": 0.49575070821529743, "train_speed(iter/s)": 0.670397 }, { "epoch": 0.8480784884966368, "grad_norm": 3.1996312141418457, "learning_rate": 9.307395126647493e-05, "loss": 2.319231414794922, "memory(GiB)": 72.85, "step": 19795, "token_acc": 0.4907749077490775, "train_speed(iter/s)": 0.670403 }, { "epoch": 0.8482927038258857, "grad_norm": 4.778593063354492, "learning_rate": 9.307053354363068e-05, "loss": 2.4052942276000975, "memory(GiB)": 72.85, "step": 19800, "token_acc": 0.47547169811320755, "train_speed(iter/s)": 0.670433 }, { "epoch": 0.8485069191551348, "grad_norm": 2.923565626144409, "learning_rate": 9.30671150405233e-05, "loss": 2.6225326538085936, "memory(GiB)": 72.85, "step": 19805, "token_acc": 0.4281984334203655, "train_speed(iter/s)": 0.670433 }, { "epoch": 0.8487211344843837, "grad_norm": 5.5898823738098145, "learning_rate": 9.306369575721471e-05, "loss": 2.2658161163330077, "memory(GiB)": 72.85, "step": 19810, "token_acc": 0.4938650306748466, "train_speed(iter/s)": 0.670403 }, { "epoch": 0.8489353498136326, "grad_norm": 4.498488903045654, "learning_rate": 9.306027569376691e-05, "loss": 2.5110973358154296, "memory(GiB)": 72.85, "step": 19815, "token_acc": 0.47692307692307695, "train_speed(iter/s)": 0.670367 }, { "epoch": 0.8491495651428816, "grad_norm": 3.6451239585876465, "learning_rate": 9.305685485024181e-05, "loss": 2.568583106994629, "memory(GiB)": 72.85, "step": 19820, "token_acc": 0.48046875, "train_speed(iter/s)": 0.670355 }, { "epoch": 0.8493637804721306, "grad_norm": 3.510092258453369, "learning_rate": 9.305343322670138e-05, "loss": 2.478648567199707, "memory(GiB)": 72.85, "step": 19825, "token_acc": 0.4744318181818182, "train_speed(iter/s)": 0.670355 }, { "epoch": 0.8495779958013795, "grad_norm": 3.245739459991455, "learning_rate": 9.305001082320763e-05, "loss": 2.277973175048828, "memory(GiB)": 72.85, "step": 19830, "token_acc": 0.4766666666666667, "train_speed(iter/s)": 0.67039 }, { "epoch": 0.8497922111306285, "grad_norm": 3.7211709022521973, "learning_rate": 9.304658763982255e-05, "loss": 2.3564115524291993, "memory(GiB)": 72.85, "step": 19835, "token_acc": 0.4797687861271676, "train_speed(iter/s)": 0.670351 }, { "epoch": 0.8500064264598775, "grad_norm": 3.106818437576294, "learning_rate": 9.304316367660815e-05, "loss": 2.449456787109375, "memory(GiB)": 72.85, "step": 19840, "token_acc": 0.48231511254019294, "train_speed(iter/s)": 0.670421 }, { "epoch": 0.8502206417891264, "grad_norm": 2.9165592193603516, "learning_rate": 9.303973893362646e-05, "loss": 2.3892749786376952, "memory(GiB)": 72.85, "step": 19845, "token_acc": 0.5105105105105106, "train_speed(iter/s)": 0.670409 }, { "epoch": 0.8504348571183754, "grad_norm": 3.1791815757751465, "learning_rate": 9.303631341093953e-05, "loss": 2.3225631713867188, "memory(GiB)": 72.85, "step": 19850, "token_acc": 0.49852507374631266, "train_speed(iter/s)": 0.670422 }, { "epoch": 0.8506490724476243, "grad_norm": 5.394770622253418, "learning_rate": 9.303288710860942e-05, "loss": 2.2625095367431642, "memory(GiB)": 72.85, "step": 19855, "token_acc": 0.5413223140495868, "train_speed(iter/s)": 0.670409 }, { "epoch": 0.8508632877768734, "grad_norm": 4.297464847564697, "learning_rate": 9.302946002669818e-05, "loss": 2.438614273071289, "memory(GiB)": 72.85, "step": 19860, "token_acc": 0.4497991967871486, "train_speed(iter/s)": 0.670411 }, { "epoch": 0.8510775031061223, "grad_norm": 2.8720874786376953, "learning_rate": 9.302603216526791e-05, "loss": 2.48120231628418, "memory(GiB)": 72.85, "step": 19865, "token_acc": 0.49363057324840764, "train_speed(iter/s)": 0.670397 }, { "epoch": 0.8512917184353712, "grad_norm": 3.8263237476348877, "learning_rate": 9.30226035243807e-05, "loss": 2.1112077713012694, "memory(GiB)": 72.85, "step": 19870, "token_acc": 0.562962962962963, "train_speed(iter/s)": 0.670405 }, { "epoch": 0.8515059337646202, "grad_norm": 2.5203864574432373, "learning_rate": 9.301917410409868e-05, "loss": 2.409856605529785, "memory(GiB)": 72.85, "step": 19875, "token_acc": 0.48, "train_speed(iter/s)": 0.670412 }, { "epoch": 0.8517201490938692, "grad_norm": 4.551082611083984, "learning_rate": 9.301574390448396e-05, "loss": 2.4128211975097655, "memory(GiB)": 72.85, "step": 19880, "token_acc": 0.49477351916376305, "train_speed(iter/s)": 0.670395 }, { "epoch": 0.8519343644231181, "grad_norm": 3.4298977851867676, "learning_rate": 9.301231292559869e-05, "loss": 2.3369705200195314, "memory(GiB)": 72.85, "step": 19885, "token_acc": 0.5087719298245614, "train_speed(iter/s)": 0.670358 }, { "epoch": 0.8521485797523671, "grad_norm": 3.7409706115722656, "learning_rate": 9.300888116750501e-05, "loss": 2.574335479736328, "memory(GiB)": 72.85, "step": 19890, "token_acc": 0.47297297297297297, "train_speed(iter/s)": 0.670339 }, { "epoch": 0.852362795081616, "grad_norm": 4.323854446411133, "learning_rate": 9.300544863026511e-05, "loss": 2.4434560775756835, "memory(GiB)": 72.85, "step": 19895, "token_acc": 0.5193798449612403, "train_speed(iter/s)": 0.670344 }, { "epoch": 0.852577010410865, "grad_norm": 3.5324580669403076, "learning_rate": 9.300201531394117e-05, "loss": 2.5019062042236326, "memory(GiB)": 72.85, "step": 19900, "token_acc": 0.48375451263537905, "train_speed(iter/s)": 0.67031 }, { "epoch": 0.852791225740114, "grad_norm": 3.525831699371338, "learning_rate": 9.299858121859538e-05, "loss": 2.5930423736572266, "memory(GiB)": 72.85, "step": 19905, "token_acc": 0.5092250922509225, "train_speed(iter/s)": 0.670298 }, { "epoch": 0.8530054410693629, "grad_norm": 3.896038770675659, "learning_rate": 9.299514634428996e-05, "loss": 2.4410959243774415, "memory(GiB)": 72.85, "step": 19910, "token_acc": 0.4562043795620438, "train_speed(iter/s)": 0.670242 }, { "epoch": 0.8532196563986119, "grad_norm": 3.8095715045928955, "learning_rate": 9.299171069108712e-05, "loss": 2.4853775024414064, "memory(GiB)": 72.85, "step": 19915, "token_acc": 0.4489795918367347, "train_speed(iter/s)": 0.670219 }, { "epoch": 0.8534338717278609, "grad_norm": 3.7169077396392822, "learning_rate": 9.298827425904913e-05, "loss": 2.266391563415527, "memory(GiB)": 72.85, "step": 19920, "token_acc": 0.48175182481751827, "train_speed(iter/s)": 0.670233 }, { "epoch": 0.8536480870571098, "grad_norm": 4.91625452041626, "learning_rate": 9.298483704823821e-05, "loss": 2.6716339111328127, "memory(GiB)": 72.85, "step": 19925, "token_acc": 0.436426116838488, "train_speed(iter/s)": 0.670246 }, { "epoch": 0.8538623023863587, "grad_norm": 3.518587827682495, "learning_rate": 9.298139905871664e-05, "loss": 2.363102149963379, "memory(GiB)": 72.85, "step": 19930, "token_acc": 0.45722713864306785, "train_speed(iter/s)": 0.670224 }, { "epoch": 0.8540765177156078, "grad_norm": 3.327817916870117, "learning_rate": 9.297796029054671e-05, "loss": 2.3340002059936524, "memory(GiB)": 72.85, "step": 19935, "token_acc": 0.4804270462633452, "train_speed(iter/s)": 0.670191 }, { "epoch": 0.8542907330448567, "grad_norm": 2.9353790283203125, "learning_rate": 9.297452074379072e-05, "loss": 2.2610153198242187, "memory(GiB)": 72.85, "step": 19940, "token_acc": 0.4657534246575342, "train_speed(iter/s)": 0.670218 }, { "epoch": 0.8545049483741056, "grad_norm": 3.312242031097412, "learning_rate": 9.297108041851096e-05, "loss": 2.1979515075683596, "memory(GiB)": 72.85, "step": 19945, "token_acc": 0.5193548387096775, "train_speed(iter/s)": 0.670206 }, { "epoch": 0.8547191637033547, "grad_norm": 4.065151214599609, "learning_rate": 9.296763931476979e-05, "loss": 2.29984073638916, "memory(GiB)": 72.85, "step": 19950, "token_acc": 0.5078125, "train_speed(iter/s)": 0.67025 }, { "epoch": 0.8549333790326036, "grad_norm": 4.474510192871094, "learning_rate": 9.296419743262952e-05, "loss": 2.8298080444335936, "memory(GiB)": 72.85, "step": 19955, "token_acc": 0.46494464944649444, "train_speed(iter/s)": 0.670287 }, { "epoch": 0.8551475943618525, "grad_norm": 3.2882401943206787, "learning_rate": 9.29607547721525e-05, "loss": 2.341718864440918, "memory(GiB)": 72.85, "step": 19960, "token_acc": 0.5304659498207885, "train_speed(iter/s)": 0.670304 }, { "epoch": 0.8553618096911015, "grad_norm": 3.4576237201690674, "learning_rate": 9.295731133340111e-05, "loss": 2.6743927001953125, "memory(GiB)": 72.85, "step": 19965, "token_acc": 0.45, "train_speed(iter/s)": 0.670345 }, { "epoch": 0.8555760250203505, "grad_norm": 3.2433903217315674, "learning_rate": 9.295386711643772e-05, "loss": 2.24979305267334, "memory(GiB)": 72.85, "step": 19970, "token_acc": 0.5144694533762058, "train_speed(iter/s)": 0.670374 }, { "epoch": 0.8557902403495994, "grad_norm": 3.149200916290283, "learning_rate": 9.295042212132475e-05, "loss": 2.3433570861816406, "memory(GiB)": 72.85, "step": 19975, "token_acc": 0.49823321554770317, "train_speed(iter/s)": 0.670395 }, { "epoch": 0.8560044556788484, "grad_norm": 4.384479522705078, "learning_rate": 9.29469763481246e-05, "loss": 2.4493682861328123, "memory(GiB)": 72.85, "step": 19980, "token_acc": 0.4676258992805755, "train_speed(iter/s)": 0.670408 }, { "epoch": 0.8562186710080973, "grad_norm": 6.219555377960205, "learning_rate": 9.294352979689966e-05, "loss": 2.1898740768432616, "memory(GiB)": 72.85, "step": 19985, "token_acc": 0.524904214559387, "train_speed(iter/s)": 0.670375 }, { "epoch": 0.8564328863373463, "grad_norm": 3.6515629291534424, "learning_rate": 9.29400824677124e-05, "loss": 2.4092876434326174, "memory(GiB)": 72.85, "step": 19990, "token_acc": 0.5257352941176471, "train_speed(iter/s)": 0.670429 }, { "epoch": 0.8566471016665953, "grad_norm": 2.7949013710021973, "learning_rate": 9.293663436062527e-05, "loss": 2.4125431060791014, "memory(GiB)": 72.85, "step": 19995, "token_acc": 0.4657039711191336, "train_speed(iter/s)": 0.670438 }, { "epoch": 0.8568613169958442, "grad_norm": 3.677311658859253, "learning_rate": 9.293318547570073e-05, "loss": 2.4416486740112306, "memory(GiB)": 72.85, "step": 20000, "token_acc": 0.48338368580060426, "train_speed(iter/s)": 0.670415 }, { "epoch": 0.8568613169958442, "eval_loss": 2.0343167781829834, "eval_runtime": 17.0912, "eval_samples_per_second": 5.851, "eval_steps_per_second": 5.851, "eval_token_acc": 0.5215686274509804, "step": 20000 }, { "epoch": 0.8570755323250931, "grad_norm": 4.673543930053711, "learning_rate": 9.292973581300126e-05, "loss": 2.311128044128418, "memory(GiB)": 72.85, "step": 20005, "token_acc": 0.5194174757281553, "train_speed(iter/s)": 0.669997 }, { "epoch": 0.8572897476543422, "grad_norm": 3.402397394180298, "learning_rate": 9.292628537258937e-05, "loss": 2.330133819580078, "memory(GiB)": 72.85, "step": 20010, "token_acc": 0.519434628975265, "train_speed(iter/s)": 0.670016 }, { "epoch": 0.8575039629835911, "grad_norm": 3.847869396209717, "learning_rate": 9.292283415452753e-05, "loss": 2.2256614685058596, "memory(GiB)": 72.85, "step": 20015, "token_acc": 0.5337837837837838, "train_speed(iter/s)": 0.670026 }, { "epoch": 0.85771817831284, "grad_norm": 3.121363401412964, "learning_rate": 9.291938215887828e-05, "loss": 2.475783348083496, "memory(GiB)": 72.85, "step": 20020, "token_acc": 0.49498327759197325, "train_speed(iter/s)": 0.670065 }, { "epoch": 0.8579323936420891, "grad_norm": 3.9407012462615967, "learning_rate": 9.291592938570418e-05, "loss": 2.741859245300293, "memory(GiB)": 72.85, "step": 20025, "token_acc": 0.43450479233226835, "train_speed(iter/s)": 0.670021 }, { "epoch": 0.858146608971338, "grad_norm": 5.750284194946289, "learning_rate": 9.291247583506775e-05, "loss": 2.706318664550781, "memory(GiB)": 72.85, "step": 20030, "token_acc": 0.4200626959247649, "train_speed(iter/s)": 0.67004 }, { "epoch": 0.8583608243005869, "grad_norm": 5.1829915046691895, "learning_rate": 9.290902150703158e-05, "loss": 2.558033752441406, "memory(GiB)": 72.85, "step": 20035, "token_acc": 0.508833922261484, "train_speed(iter/s)": 0.670065 }, { "epoch": 0.858575039629836, "grad_norm": 4.734776496887207, "learning_rate": 9.290556640165822e-05, "loss": 2.3141033172607424, "memory(GiB)": 72.85, "step": 20040, "token_acc": 0.5507246376811594, "train_speed(iter/s)": 0.670098 }, { "epoch": 0.8587892549590849, "grad_norm": 3.5239105224609375, "learning_rate": 9.290211051901027e-05, "loss": 2.5418926239013673, "memory(GiB)": 72.85, "step": 20045, "token_acc": 0.48563218390804597, "train_speed(iter/s)": 0.670152 }, { "epoch": 0.8590034702883338, "grad_norm": 3.1930429935455322, "learning_rate": 9.289865385915035e-05, "loss": 2.2765182495117187, "memory(GiB)": 72.85, "step": 20050, "token_acc": 0.5114285714285715, "train_speed(iter/s)": 0.670162 }, { "epoch": 0.8592176856175828, "grad_norm": 4.485609531402588, "learning_rate": 9.289519642214108e-05, "loss": 2.5075611114501952, "memory(GiB)": 72.85, "step": 20055, "token_acc": 0.47157190635451507, "train_speed(iter/s)": 0.670195 }, { "epoch": 0.8594319009468318, "grad_norm": 4.828231334686279, "learning_rate": 9.289173820804508e-05, "loss": 2.7235626220703124, "memory(GiB)": 72.85, "step": 20060, "token_acc": 0.4523809523809524, "train_speed(iter/s)": 0.670217 }, { "epoch": 0.8596461162760807, "grad_norm": 3.908292770385742, "learning_rate": 9.2888279216925e-05, "loss": 2.4738914489746096, "memory(GiB)": 72.85, "step": 20065, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.670232 }, { "epoch": 0.8598603316053297, "grad_norm": 4.408742427825928, "learning_rate": 9.288481944884353e-05, "loss": 2.688981056213379, "memory(GiB)": 72.85, "step": 20070, "token_acc": 0.4440789473684211, "train_speed(iter/s)": 0.670252 }, { "epoch": 0.8600745469345786, "grad_norm": 3.643808126449585, "learning_rate": 9.288135890386331e-05, "loss": 2.58927001953125, "memory(GiB)": 72.85, "step": 20075, "token_acc": 0.4563953488372093, "train_speed(iter/s)": 0.670277 }, { "epoch": 0.8602887622638276, "grad_norm": 4.5332841873168945, "learning_rate": 9.287789758204706e-05, "loss": 2.3463794708251955, "memory(GiB)": 72.85, "step": 20080, "token_acc": 0.5196850393700787, "train_speed(iter/s)": 0.670246 }, { "epoch": 0.8605029775930766, "grad_norm": 3.7552859783172607, "learning_rate": 9.287443548345746e-05, "loss": 2.2866752624511717, "memory(GiB)": 72.85, "step": 20085, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.670262 }, { "epoch": 0.8607171929223255, "grad_norm": 3.7116730213165283, "learning_rate": 9.287097260815724e-05, "loss": 2.479880714416504, "memory(GiB)": 72.85, "step": 20090, "token_acc": 0.49842271293375395, "train_speed(iter/s)": 0.670233 }, { "epoch": 0.8609314082515744, "grad_norm": 3.5826189517974854, "learning_rate": 9.286750895620914e-05, "loss": 2.4003219604492188, "memory(GiB)": 72.85, "step": 20095, "token_acc": 0.4684931506849315, "train_speed(iter/s)": 0.670244 }, { "epoch": 0.8611456235808235, "grad_norm": 3.6483569145202637, "learning_rate": 9.28640445276759e-05, "loss": 2.370542526245117, "memory(GiB)": 72.85, "step": 20100, "token_acc": 0.4521072796934866, "train_speed(iter/s)": 0.670241 }, { "epoch": 0.8613598389100724, "grad_norm": 4.158718585968018, "learning_rate": 9.286057932262029e-05, "loss": 2.399163818359375, "memory(GiB)": 72.85, "step": 20105, "token_acc": 0.550185873605948, "train_speed(iter/s)": 0.670291 }, { "epoch": 0.8615740542393213, "grad_norm": 3.414077043533325, "learning_rate": 9.285711334110508e-05, "loss": 2.606454849243164, "memory(GiB)": 72.85, "step": 20110, "token_acc": 0.4657039711191336, "train_speed(iter/s)": 0.670301 }, { "epoch": 0.8617882695685704, "grad_norm": 3.682670831680298, "learning_rate": 9.285364658319305e-05, "loss": 2.3978790283203124, "memory(GiB)": 72.85, "step": 20115, "token_acc": 0.4674922600619195, "train_speed(iter/s)": 0.670313 }, { "epoch": 0.8620024848978193, "grad_norm": 3.7714579105377197, "learning_rate": 9.285017904894702e-05, "loss": 2.324006271362305, "memory(GiB)": 72.85, "step": 20120, "token_acc": 0.5060240963855421, "train_speed(iter/s)": 0.670253 }, { "epoch": 0.8622167002270682, "grad_norm": 3.8364574909210205, "learning_rate": 9.284671073842979e-05, "loss": 2.3709096908569336, "memory(GiB)": 72.85, "step": 20125, "token_acc": 0.4822485207100592, "train_speed(iter/s)": 0.67028 }, { "epoch": 0.8624309155563172, "grad_norm": 3.810469627380371, "learning_rate": 9.284324165170421e-05, "loss": 2.6732479095458985, "memory(GiB)": 72.85, "step": 20130, "token_acc": 0.45454545454545453, "train_speed(iter/s)": 0.670256 }, { "epoch": 0.8626451308855662, "grad_norm": 3.276801824569702, "learning_rate": 9.283977178883312e-05, "loss": 2.64675350189209, "memory(GiB)": 72.85, "step": 20135, "token_acc": 0.4535211267605634, "train_speed(iter/s)": 0.670246 }, { "epoch": 0.8628593462148151, "grad_norm": 3.260769844055176, "learning_rate": 9.283630114987937e-05, "loss": 2.616472053527832, "memory(GiB)": 72.85, "step": 20140, "token_acc": 0.48089171974522293, "train_speed(iter/s)": 0.670261 }, { "epoch": 0.8630735615440641, "grad_norm": 3.6267247200012207, "learning_rate": 9.283282973490585e-05, "loss": 2.315013313293457, "memory(GiB)": 72.85, "step": 20145, "token_acc": 0.4794952681388013, "train_speed(iter/s)": 0.670295 }, { "epoch": 0.863287776873313, "grad_norm": 2.9902701377868652, "learning_rate": 9.282935754397543e-05, "loss": 2.422476959228516, "memory(GiB)": 72.85, "step": 20150, "token_acc": 0.49201277955271566, "train_speed(iter/s)": 0.670342 }, { "epoch": 0.863501992202562, "grad_norm": 3.5334672927856445, "learning_rate": 9.282588457715102e-05, "loss": 2.4542055130004883, "memory(GiB)": 72.85, "step": 20155, "token_acc": 0.5019305019305019, "train_speed(iter/s)": 0.670341 }, { "epoch": 0.863716207531811, "grad_norm": 3.957378625869751, "learning_rate": 9.282241083449555e-05, "loss": 2.509323310852051, "memory(GiB)": 72.85, "step": 20160, "token_acc": 0.47202797202797203, "train_speed(iter/s)": 0.670369 }, { "epoch": 0.8639304228610599, "grad_norm": 3.524238348007202, "learning_rate": 9.281893631607192e-05, "loss": 2.370881271362305, "memory(GiB)": 72.85, "step": 20165, "token_acc": 0.5033557046979866, "train_speed(iter/s)": 0.670295 }, { "epoch": 0.8641446381903088, "grad_norm": 5.206194877624512, "learning_rate": 9.28154610219431e-05, "loss": 2.330872344970703, "memory(GiB)": 72.85, "step": 20170, "token_acc": 0.45555555555555555, "train_speed(iter/s)": 0.67025 }, { "epoch": 0.8643588535195579, "grad_norm": 5.043061256408691, "learning_rate": 9.281198495217203e-05, "loss": 2.224467468261719, "memory(GiB)": 72.85, "step": 20175, "token_acc": 0.5136986301369864, "train_speed(iter/s)": 0.670273 }, { "epoch": 0.8645730688488068, "grad_norm": 3.4524176120758057, "learning_rate": 9.28085081068217e-05, "loss": 2.3164512634277346, "memory(GiB)": 72.85, "step": 20180, "token_acc": 0.4778156996587031, "train_speed(iter/s)": 0.670276 }, { "epoch": 0.8647872841780557, "grad_norm": 3.7757909297943115, "learning_rate": 9.280503048595509e-05, "loss": 2.494068908691406, "memory(GiB)": 72.85, "step": 20185, "token_acc": 0.47648902821316613, "train_speed(iter/s)": 0.670276 }, { "epoch": 0.8650014995073048, "grad_norm": 3.7475061416625977, "learning_rate": 9.280155208963519e-05, "loss": 2.5563276290893553, "memory(GiB)": 72.85, "step": 20190, "token_acc": 0.46296296296296297, "train_speed(iter/s)": 0.670317 }, { "epoch": 0.8652157148365537, "grad_norm": 4.189110279083252, "learning_rate": 9.279807291792504e-05, "loss": 2.5137798309326174, "memory(GiB)": 72.85, "step": 20195, "token_acc": 0.4589041095890411, "train_speed(iter/s)": 0.670316 }, { "epoch": 0.8654299301658027, "grad_norm": 3.1372008323669434, "learning_rate": 9.279459297088764e-05, "loss": 2.6318288803100587, "memory(GiB)": 72.85, "step": 20200, "token_acc": 0.450402144772118, "train_speed(iter/s)": 0.670312 }, { "epoch": 0.8656441454950516, "grad_norm": 3.7816810607910156, "learning_rate": 9.279111224858604e-05, "loss": 2.5188644409179686, "memory(GiB)": 72.85, "step": 20205, "token_acc": 0.4692556634304207, "train_speed(iter/s)": 0.670318 }, { "epoch": 0.8658583608243006, "grad_norm": 3.4508206844329834, "learning_rate": 9.27876307510833e-05, "loss": 2.5704723358154298, "memory(GiB)": 72.85, "step": 20210, "token_acc": 0.5225806451612903, "train_speed(iter/s)": 0.670359 }, { "epoch": 0.8660725761535496, "grad_norm": 3.6717588901519775, "learning_rate": 9.27841484784425e-05, "loss": 2.3431156158447264, "memory(GiB)": 72.85, "step": 20215, "token_acc": 0.5032258064516129, "train_speed(iter/s)": 0.670372 }, { "epoch": 0.8662867914827985, "grad_norm": 4.094695568084717, "learning_rate": 9.27806654307267e-05, "loss": 2.8902870178222657, "memory(GiB)": 72.85, "step": 20220, "token_acc": 0.46387832699619774, "train_speed(iter/s)": 0.670398 }, { "epoch": 0.8665010068120474, "grad_norm": 2.940553665161133, "learning_rate": 9.277718160799905e-05, "loss": 2.6659095764160154, "memory(GiB)": 72.85, "step": 20225, "token_acc": 0.4384858044164038, "train_speed(iter/s)": 0.670437 }, { "epoch": 0.8667152221412965, "grad_norm": 3.788738489151001, "learning_rate": 9.277369701032259e-05, "loss": 2.6991247177124023, "memory(GiB)": 72.85, "step": 20230, "token_acc": 0.4738562091503268, "train_speed(iter/s)": 0.670424 }, { "epoch": 0.8669294374705454, "grad_norm": 3.6610960960388184, "learning_rate": 9.277021163776049e-05, "loss": 2.648579216003418, "memory(GiB)": 72.85, "step": 20235, "token_acc": 0.4704225352112676, "train_speed(iter/s)": 0.670417 }, { "epoch": 0.8671436527997943, "grad_norm": 3.9282689094543457, "learning_rate": 9.276672549037589e-05, "loss": 2.364290237426758, "memory(GiB)": 72.85, "step": 20240, "token_acc": 0.512280701754386, "train_speed(iter/s)": 0.670367 }, { "epoch": 0.8673578681290434, "grad_norm": 3.739492416381836, "learning_rate": 9.276323856823193e-05, "loss": 2.5787721633911134, "memory(GiB)": 72.85, "step": 20245, "token_acc": 0.4738675958188153, "train_speed(iter/s)": 0.670377 }, { "epoch": 0.8675720834582923, "grad_norm": 3.070293426513672, "learning_rate": 9.275975087139181e-05, "loss": 2.2749492645263674, "memory(GiB)": 72.85, "step": 20250, "token_acc": 0.5062893081761006, "train_speed(iter/s)": 0.670389 }, { "epoch": 0.8677862987875412, "grad_norm": 2.8190340995788574, "learning_rate": 9.275626239991868e-05, "loss": 2.5591493606567384, "memory(GiB)": 72.85, "step": 20255, "token_acc": 0.478125, "train_speed(iter/s)": 0.670372 }, { "epoch": 0.8680005141167902, "grad_norm": 2.8938488960266113, "learning_rate": 9.275277315387574e-05, "loss": 2.5045467376708985, "memory(GiB)": 72.85, "step": 20260, "token_acc": 0.47883597883597884, "train_speed(iter/s)": 0.670408 }, { "epoch": 0.8682147294460392, "grad_norm": 2.858980417251587, "learning_rate": 9.274928313332621e-05, "loss": 2.3925609588623047, "memory(GiB)": 72.85, "step": 20265, "token_acc": 0.4715909090909091, "train_speed(iter/s)": 0.670403 }, { "epoch": 0.8684289447752881, "grad_norm": 3.19193172454834, "learning_rate": 9.274579233833332e-05, "loss": 2.399462127685547, "memory(GiB)": 72.85, "step": 20270, "token_acc": 0.4630681818181818, "train_speed(iter/s)": 0.670391 }, { "epoch": 0.8686431601045371, "grad_norm": 2.500861644744873, "learning_rate": 9.274230076896032e-05, "loss": 2.4694360733032226, "memory(GiB)": 72.85, "step": 20275, "token_acc": 0.4479166666666667, "train_speed(iter/s)": 0.670401 }, { "epoch": 0.868857375433786, "grad_norm": 4.041868209838867, "learning_rate": 9.273880842527042e-05, "loss": 2.2051279067993166, "memory(GiB)": 72.85, "step": 20280, "token_acc": 0.5204918032786885, "train_speed(iter/s)": 0.670419 }, { "epoch": 0.869071590763035, "grad_norm": 3.5404677391052246, "learning_rate": 9.27353153073269e-05, "loss": 2.3082744598388674, "memory(GiB)": 72.85, "step": 20285, "token_acc": 0.46715328467153283, "train_speed(iter/s)": 0.670429 }, { "epoch": 0.869285806092284, "grad_norm": 4.51783561706543, "learning_rate": 9.27318214151931e-05, "loss": 2.143968200683594, "memory(GiB)": 72.85, "step": 20290, "token_acc": 0.5421245421245421, "train_speed(iter/s)": 0.670433 }, { "epoch": 0.8695000214215329, "grad_norm": 3.767099618911743, "learning_rate": 9.272832674893224e-05, "loss": 2.5604257583618164, "memory(GiB)": 72.85, "step": 20295, "token_acc": 0.4794007490636704, "train_speed(iter/s)": 0.670455 }, { "epoch": 0.8697142367507819, "grad_norm": 3.1452460289001465, "learning_rate": 9.272483130860765e-05, "loss": 2.3975902557373048, "memory(GiB)": 72.85, "step": 20300, "token_acc": 0.4618055555555556, "train_speed(iter/s)": 0.670451 }, { "epoch": 0.8699284520800309, "grad_norm": 3.480515241622925, "learning_rate": 9.27213350942827e-05, "loss": 2.5072534561157225, "memory(GiB)": 72.85, "step": 20305, "token_acc": 0.4728434504792332, "train_speed(iter/s)": 0.670465 }, { "epoch": 0.8701426674092798, "grad_norm": 3.822962522506714, "learning_rate": 9.271783810602065e-05, "loss": 2.4139987945556642, "memory(GiB)": 72.85, "step": 20310, "token_acc": 0.48375451263537905, "train_speed(iter/s)": 0.670458 }, { "epoch": 0.8703568827385287, "grad_norm": 3.2536449432373047, "learning_rate": 9.27143403438849e-05, "loss": 2.6305566787719727, "memory(GiB)": 72.85, "step": 20315, "token_acc": 0.44871794871794873, "train_speed(iter/s)": 0.67052 }, { "epoch": 0.8705710980677778, "grad_norm": 4.040157318115234, "learning_rate": 9.271084180793881e-05, "loss": 2.4425289154052736, "memory(GiB)": 72.85, "step": 20320, "token_acc": 0.4787234042553192, "train_speed(iter/s)": 0.670506 }, { "epoch": 0.8707853133970267, "grad_norm": 6.063348770141602, "learning_rate": 9.270734249824575e-05, "loss": 2.4394815444946287, "memory(GiB)": 72.85, "step": 20325, "token_acc": 0.4908424908424908, "train_speed(iter/s)": 0.67054 }, { "epoch": 0.8709995287262756, "grad_norm": 4.3685455322265625, "learning_rate": 9.270384241486914e-05, "loss": 2.5416751861572267, "memory(GiB)": 72.85, "step": 20330, "token_acc": 0.5209125475285171, "train_speed(iter/s)": 0.670511 }, { "epoch": 0.8712137440555247, "grad_norm": 3.37146258354187, "learning_rate": 9.270034155787233e-05, "loss": 2.479094123840332, "memory(GiB)": 72.85, "step": 20335, "token_acc": 0.4676258992805755, "train_speed(iter/s)": 0.670543 }, { "epoch": 0.8714279593847736, "grad_norm": 5.330536842346191, "learning_rate": 9.26968399273188e-05, "loss": 2.254835510253906, "memory(GiB)": 72.85, "step": 20340, "token_acc": 0.5470085470085471, "train_speed(iter/s)": 0.670565 }, { "epoch": 0.8716421747140225, "grad_norm": 5.926287651062012, "learning_rate": 9.269333752327196e-05, "loss": 2.6560863494873046, "memory(GiB)": 72.85, "step": 20345, "token_acc": 0.4682274247491639, "train_speed(iter/s)": 0.670501 }, { "epoch": 0.8718563900432715, "grad_norm": 3.956707000732422, "learning_rate": 9.268983434579526e-05, "loss": 2.3374349594116213, "memory(GiB)": 72.85, "step": 20350, "token_acc": 0.49063670411985016, "train_speed(iter/s)": 0.670508 }, { "epoch": 0.8720706053725205, "grad_norm": 3.693394660949707, "learning_rate": 9.268633039495216e-05, "loss": 2.3752445220947265, "memory(GiB)": 72.85, "step": 20355, "token_acc": 0.49836065573770494, "train_speed(iter/s)": 0.670504 }, { "epoch": 0.8722848207017694, "grad_norm": 3.6122257709503174, "learning_rate": 9.268282567080614e-05, "loss": 2.556824493408203, "memory(GiB)": 72.85, "step": 20360, "token_acc": 0.43252595155709345, "train_speed(iter/s)": 0.670502 }, { "epoch": 0.8724990360310184, "grad_norm": 5.656414031982422, "learning_rate": 9.267932017342069e-05, "loss": 2.5422222137451174, "memory(GiB)": 72.85, "step": 20365, "token_acc": 0.43666666666666665, "train_speed(iter/s)": 0.670507 }, { "epoch": 0.8727132513602673, "grad_norm": 3.718993663787842, "learning_rate": 9.267581390285934e-05, "loss": 2.2315555572509767, "memory(GiB)": 72.85, "step": 20370, "token_acc": 0.5257731958762887, "train_speed(iter/s)": 0.670508 }, { "epoch": 0.8729274666895163, "grad_norm": 2.829806089401245, "learning_rate": 9.267230685918556e-05, "loss": 2.2367958068847655, "memory(GiB)": 72.85, "step": 20375, "token_acc": 0.5075757575757576, "train_speed(iter/s)": 0.670489 }, { "epoch": 0.8731416820187653, "grad_norm": 3.9553921222686768, "learning_rate": 9.266879904246293e-05, "loss": 2.363162612915039, "memory(GiB)": 72.85, "step": 20380, "token_acc": 0.4714828897338403, "train_speed(iter/s)": 0.670531 }, { "epoch": 0.8733558973480142, "grad_norm": 2.911281108856201, "learning_rate": 9.266529045275498e-05, "loss": 2.4973098754882814, "memory(GiB)": 72.85, "step": 20385, "token_acc": 0.46853146853146854, "train_speed(iter/s)": 0.670499 }, { "epoch": 0.8735701126772631, "grad_norm": 3.899282455444336, "learning_rate": 9.266178109012526e-05, "loss": 2.3084537506103517, "memory(GiB)": 72.85, "step": 20390, "token_acc": 0.48880597014925375, "train_speed(iter/s)": 0.670524 }, { "epoch": 0.8737843280065122, "grad_norm": 4.518158435821533, "learning_rate": 9.265827095463735e-05, "loss": 2.4615785598754885, "memory(GiB)": 72.85, "step": 20395, "token_acc": 0.48535564853556484, "train_speed(iter/s)": 0.670538 }, { "epoch": 0.8739985433357611, "grad_norm": 3.1549832820892334, "learning_rate": 9.265476004635486e-05, "loss": 2.2649818420410157, "memory(GiB)": 72.85, "step": 20400, "token_acc": 0.4560260586319218, "train_speed(iter/s)": 0.670518 }, { "epoch": 0.87421275866501, "grad_norm": 4.1607346534729, "learning_rate": 9.265124836534139e-05, "loss": 2.5087440490722654, "memory(GiB)": 72.85, "step": 20405, "token_acc": 0.478125, "train_speed(iter/s)": 0.670511 }, { "epoch": 0.8744269739942591, "grad_norm": 4.089958667755127, "learning_rate": 9.264773591166052e-05, "loss": 2.703564453125, "memory(GiB)": 72.85, "step": 20410, "token_acc": 0.44816053511705684, "train_speed(iter/s)": 0.670543 }, { "epoch": 0.874641189323508, "grad_norm": 4.37449312210083, "learning_rate": 9.264422268537592e-05, "loss": 2.646206855773926, "memory(GiB)": 72.85, "step": 20415, "token_acc": 0.486013986013986, "train_speed(iter/s)": 0.670525 }, { "epoch": 0.8748554046527569, "grad_norm": 4.365012168884277, "learning_rate": 9.264070868655121e-05, "loss": 2.4014509201049803, "memory(GiB)": 72.85, "step": 20420, "token_acc": 0.4852459016393443, "train_speed(iter/s)": 0.670513 }, { "epoch": 0.8750696199820059, "grad_norm": 3.5518670082092285, "learning_rate": 9.263719391525007e-05, "loss": 2.2612051010131835, "memory(GiB)": 72.85, "step": 20425, "token_acc": 0.5160142348754448, "train_speed(iter/s)": 0.670525 }, { "epoch": 0.8752838353112549, "grad_norm": 4.171792507171631, "learning_rate": 9.263367837153616e-05, "loss": 2.326763725280762, "memory(GiB)": 72.85, "step": 20430, "token_acc": 0.5093167701863354, "train_speed(iter/s)": 0.670524 }, { "epoch": 0.8754980506405038, "grad_norm": 4.128069877624512, "learning_rate": 9.263016205547318e-05, "loss": 2.3387474060058593, "memory(GiB)": 72.85, "step": 20435, "token_acc": 0.4847457627118644, "train_speed(iter/s)": 0.670554 }, { "epoch": 0.8757122659697528, "grad_norm": 3.8046886920928955, "learning_rate": 9.262664496712482e-05, "loss": 2.471019172668457, "memory(GiB)": 72.85, "step": 20440, "token_acc": 0.4662379421221865, "train_speed(iter/s)": 0.670546 }, { "epoch": 0.8759264812990017, "grad_norm": 4.799554824829102, "learning_rate": 9.262312710655481e-05, "loss": 2.4098039627075196, "memory(GiB)": 72.85, "step": 20445, "token_acc": 0.5085910652920962, "train_speed(iter/s)": 0.670565 }, { "epoch": 0.8761406966282507, "grad_norm": 3.909968852996826, "learning_rate": 9.261960847382684e-05, "loss": 2.1709272384643556, "memory(GiB)": 72.85, "step": 20450, "token_acc": 0.5582191780821918, "train_speed(iter/s)": 0.670567 }, { "epoch": 0.8763549119574997, "grad_norm": 3.3903558254241943, "learning_rate": 9.261608906900472e-05, "loss": 2.6972848892211916, "memory(GiB)": 72.85, "step": 20455, "token_acc": 0.46226415094339623, "train_speed(iter/s)": 0.670569 }, { "epoch": 0.8765691272867486, "grad_norm": 4.990915775299072, "learning_rate": 9.261256889215215e-05, "loss": 2.4003215789794923, "memory(GiB)": 72.85, "step": 20460, "token_acc": 0.49266862170087977, "train_speed(iter/s)": 0.67059 }, { "epoch": 0.8767833426159976, "grad_norm": 3.8422744274139404, "learning_rate": 9.260904794333292e-05, "loss": 2.3237958908081056, "memory(GiB)": 72.85, "step": 20465, "token_acc": 0.5049833887043189, "train_speed(iter/s)": 0.6706 }, { "epoch": 0.8769975579452466, "grad_norm": 7.439640522003174, "learning_rate": 9.260552622261081e-05, "loss": 2.180418586730957, "memory(GiB)": 72.85, "step": 20470, "token_acc": 0.5573770491803278, "train_speed(iter/s)": 0.670608 }, { "epoch": 0.8772117732744955, "grad_norm": 3.77761173248291, "learning_rate": 9.260200373004964e-05, "loss": 2.7982301712036133, "memory(GiB)": 72.85, "step": 20475, "token_acc": 0.4483985765124555, "train_speed(iter/s)": 0.670592 }, { "epoch": 0.8774259886037444, "grad_norm": 3.377702474594116, "learning_rate": 9.25984804657132e-05, "loss": 2.3241058349609376, "memory(GiB)": 72.85, "step": 20480, "token_acc": 0.517799352750809, "train_speed(iter/s)": 0.670606 }, { "epoch": 0.8776402039329935, "grad_norm": 4.556361675262451, "learning_rate": 9.259495642966534e-05, "loss": 2.3207023620605467, "memory(GiB)": 72.85, "step": 20485, "token_acc": 0.50187265917603, "train_speed(iter/s)": 0.670608 }, { "epoch": 0.8778544192622424, "grad_norm": 3.564312219619751, "learning_rate": 9.259143162196986e-05, "loss": 2.3704849243164063, "memory(GiB)": 72.85, "step": 20490, "token_acc": 0.512987012987013, "train_speed(iter/s)": 0.670611 }, { "epoch": 0.8780686345914913, "grad_norm": 3.9602675437927246, "learning_rate": 9.258790604269065e-05, "loss": 2.2885919570922852, "memory(GiB)": 72.85, "step": 20495, "token_acc": 0.5070422535211268, "train_speed(iter/s)": 0.670613 }, { "epoch": 0.8782828499207403, "grad_norm": 4.771568775177002, "learning_rate": 9.25843796918916e-05, "loss": 2.6254737854003904, "memory(GiB)": 72.85, "step": 20500, "token_acc": 0.49477351916376305, "train_speed(iter/s)": 0.670606 }, { "epoch": 0.8782828499207403, "eval_loss": 2.167361259460449, "eval_runtime": 17.2741, "eval_samples_per_second": 5.789, "eval_steps_per_second": 5.789, "eval_token_acc": 0.4933862433862434, "step": 20500 }, { "epoch": 0.8784970652499893, "grad_norm": 3.543285369873047, "learning_rate": 9.258085256963654e-05, "loss": 2.2980073928833007, "memory(GiB)": 72.85, "step": 20505, "token_acc": 0.5024390243902439, "train_speed(iter/s)": 0.67016 }, { "epoch": 0.8787112805792382, "grad_norm": 3.7669005393981934, "learning_rate": 9.257732467598939e-05, "loss": 2.4726659774780275, "memory(GiB)": 72.85, "step": 20510, "token_acc": 0.4879518072289157, "train_speed(iter/s)": 0.670143 }, { "epoch": 0.8789254959084872, "grad_norm": 3.4951250553131104, "learning_rate": 9.257379601101406e-05, "loss": 2.6809803009033204, "memory(GiB)": 72.85, "step": 20515, "token_acc": 0.45396825396825397, "train_speed(iter/s)": 0.670171 }, { "epoch": 0.8791397112377362, "grad_norm": 2.9114174842834473, "learning_rate": 9.257026657477449e-05, "loss": 2.0432952880859374, "memory(GiB)": 72.85, "step": 20520, "token_acc": 0.5242718446601942, "train_speed(iter/s)": 0.670186 }, { "epoch": 0.8793539265669851, "grad_norm": 3.8137686252593994, "learning_rate": 9.256673636733459e-05, "loss": 2.419272232055664, "memory(GiB)": 72.85, "step": 20525, "token_acc": 0.46953405017921146, "train_speed(iter/s)": 0.670186 }, { "epoch": 0.8795681418962341, "grad_norm": 6.427465438842773, "learning_rate": 9.256320538875836e-05, "loss": 2.3564929962158203, "memory(GiB)": 72.85, "step": 20530, "token_acc": 0.5298245614035088, "train_speed(iter/s)": 0.67021 }, { "epoch": 0.879782357225483, "grad_norm": 4.060159683227539, "learning_rate": 9.255967363910971e-05, "loss": 2.437969207763672, "memory(GiB)": 72.85, "step": 20535, "token_acc": 0.5106382978723404, "train_speed(iter/s)": 0.67022 }, { "epoch": 0.8799965725547321, "grad_norm": 3.9747912883758545, "learning_rate": 9.255614111845266e-05, "loss": 2.1401161193847655, "memory(GiB)": 72.85, "step": 20540, "token_acc": 0.5271966527196653, "train_speed(iter/s)": 0.670164 }, { "epoch": 0.880210787883981, "grad_norm": 4.352922439575195, "learning_rate": 9.255260782685118e-05, "loss": 2.27252254486084, "memory(GiB)": 72.85, "step": 20545, "token_acc": 0.4975609756097561, "train_speed(iter/s)": 0.670165 }, { "epoch": 0.8804250032132299, "grad_norm": 3.2916007041931152, "learning_rate": 9.254907376436931e-05, "loss": 2.403587532043457, "memory(GiB)": 72.85, "step": 20550, "token_acc": 0.44314868804664725, "train_speed(iter/s)": 0.670144 }, { "epoch": 0.880639218542479, "grad_norm": 3.4523138999938965, "learning_rate": 9.254553893107104e-05, "loss": 2.280898666381836, "memory(GiB)": 72.85, "step": 20555, "token_acc": 0.49404761904761907, "train_speed(iter/s)": 0.670127 }, { "epoch": 0.8808534338717279, "grad_norm": 4.137212753295898, "learning_rate": 9.254200332702043e-05, "loss": 2.456137275695801, "memory(GiB)": 72.85, "step": 20560, "token_acc": 0.47038327526132406, "train_speed(iter/s)": 0.670119 }, { "epoch": 0.8810676492009768, "grad_norm": 3.640150547027588, "learning_rate": 9.253846695228152e-05, "loss": 2.28662109375, "memory(GiB)": 72.85, "step": 20565, "token_acc": 0.46885245901639344, "train_speed(iter/s)": 0.670116 }, { "epoch": 0.8812818645302258, "grad_norm": 2.857203245162964, "learning_rate": 9.253492980691838e-05, "loss": 2.104167175292969, "memory(GiB)": 72.85, "step": 20570, "token_acc": 0.5509433962264151, "train_speed(iter/s)": 0.670076 }, { "epoch": 0.8814960798594748, "grad_norm": 5.804282188415527, "learning_rate": 9.253139189099506e-05, "loss": 2.1788673400878906, "memory(GiB)": 72.85, "step": 20575, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.670098 }, { "epoch": 0.8817102951887237, "grad_norm": 4.495368957519531, "learning_rate": 9.25278532045757e-05, "loss": 2.46580810546875, "memory(GiB)": 72.85, "step": 20580, "token_acc": 0.4805194805194805, "train_speed(iter/s)": 0.670075 }, { "epoch": 0.8819245105179727, "grad_norm": 11.359814643859863, "learning_rate": 9.25243137477244e-05, "loss": 2.1624881744384767, "memory(GiB)": 72.85, "step": 20585, "token_acc": 0.4983277591973244, "train_speed(iter/s)": 0.670114 }, { "epoch": 0.8821387258472216, "grad_norm": 3.9399895668029785, "learning_rate": 9.252077352050526e-05, "loss": 2.3334497451782226, "memory(GiB)": 72.85, "step": 20590, "token_acc": 0.4555984555984556, "train_speed(iter/s)": 0.670088 }, { "epoch": 0.8823529411764706, "grad_norm": 2.652312994003296, "learning_rate": 9.251723252298239e-05, "loss": 2.3591468811035154, "memory(GiB)": 72.85, "step": 20595, "token_acc": 0.5045317220543807, "train_speed(iter/s)": 0.670065 }, { "epoch": 0.8825671565057196, "grad_norm": 2.879958391189575, "learning_rate": 9.251369075521999e-05, "loss": 2.341073989868164, "memory(GiB)": 72.85, "step": 20600, "token_acc": 0.517799352750809, "train_speed(iter/s)": 0.670085 }, { "epoch": 0.8827813718349685, "grad_norm": 5.437633514404297, "learning_rate": 9.25101482172822e-05, "loss": 2.627121925354004, "memory(GiB)": 72.85, "step": 20605, "token_acc": 0.48134328358208955, "train_speed(iter/s)": 0.670094 }, { "epoch": 0.8829955871642174, "grad_norm": 3.8557071685791016, "learning_rate": 9.25066049092332e-05, "loss": 2.1266284942626954, "memory(GiB)": 72.85, "step": 20610, "token_acc": 0.5355805243445693, "train_speed(iter/s)": 0.670112 }, { "epoch": 0.8832098024934665, "grad_norm": 2.852768659591675, "learning_rate": 9.250306083113718e-05, "loss": 2.4493858337402346, "memory(GiB)": 72.85, "step": 20615, "token_acc": 0.5053003533568905, "train_speed(iter/s)": 0.670142 }, { "epoch": 0.8834240178227154, "grad_norm": 3.1888158321380615, "learning_rate": 9.249951598305833e-05, "loss": 2.4015422821044923, "memory(GiB)": 72.85, "step": 20620, "token_acc": 0.5079872204472844, "train_speed(iter/s)": 0.670132 }, { "epoch": 0.8836382331519643, "grad_norm": 4.043846607208252, "learning_rate": 9.249597036506087e-05, "loss": 2.652807426452637, "memory(GiB)": 72.85, "step": 20625, "token_acc": 0.45, "train_speed(iter/s)": 0.670138 }, { "epoch": 0.8838524484812134, "grad_norm": 3.3779890537261963, "learning_rate": 9.249242397720908e-05, "loss": 2.573093032836914, "memory(GiB)": 72.85, "step": 20630, "token_acc": 0.44954128440366975, "train_speed(iter/s)": 0.670175 }, { "epoch": 0.8840666638104623, "grad_norm": 3.6690051555633545, "learning_rate": 9.248887681956713e-05, "loss": 2.402086639404297, "memory(GiB)": 72.85, "step": 20635, "token_acc": 0.5, "train_speed(iter/s)": 0.670226 }, { "epoch": 0.8842808791397112, "grad_norm": 2.80582857131958, "learning_rate": 9.248532889219934e-05, "loss": 2.431000518798828, "memory(GiB)": 72.85, "step": 20640, "token_acc": 0.4940119760479042, "train_speed(iter/s)": 0.670222 }, { "epoch": 0.8844950944689602, "grad_norm": 3.508693218231201, "learning_rate": 9.248178019516997e-05, "loss": 2.5475622177124024, "memory(GiB)": 72.85, "step": 20645, "token_acc": 0.44256756756756754, "train_speed(iter/s)": 0.670261 }, { "epoch": 0.8847093097982092, "grad_norm": 3.7064461708068848, "learning_rate": 9.247823072854329e-05, "loss": 2.349268913269043, "memory(GiB)": 72.85, "step": 20650, "token_acc": 0.5035460992907801, "train_speed(iter/s)": 0.670257 }, { "epoch": 0.8849235251274581, "grad_norm": 4.065171241760254, "learning_rate": 9.24746804923836e-05, "loss": 2.3729827880859373, "memory(GiB)": 72.85, "step": 20655, "token_acc": 0.49324324324324326, "train_speed(iter/s)": 0.670255 }, { "epoch": 0.8851377404567071, "grad_norm": 2.9633843898773193, "learning_rate": 9.247112948675525e-05, "loss": 2.4938756942749025, "memory(GiB)": 72.85, "step": 20660, "token_acc": 0.48717948717948717, "train_speed(iter/s)": 0.670267 }, { "epoch": 0.885351955785956, "grad_norm": 3.9418771266937256, "learning_rate": 9.246757771172254e-05, "loss": 2.4219385147094727, "memory(GiB)": 72.85, "step": 20665, "token_acc": 0.46179401993355484, "train_speed(iter/s)": 0.670247 }, { "epoch": 0.885566171115205, "grad_norm": 4.315752983093262, "learning_rate": 9.246402516734981e-05, "loss": 2.3595333099365234, "memory(GiB)": 72.85, "step": 20670, "token_acc": 0.516245487364621, "train_speed(iter/s)": 0.670261 }, { "epoch": 0.885780386444454, "grad_norm": 3.6661031246185303, "learning_rate": 9.246047185370144e-05, "loss": 2.566493034362793, "memory(GiB)": 72.85, "step": 20675, "token_acc": 0.4440677966101695, "train_speed(iter/s)": 0.670282 }, { "epoch": 0.8859946017737029, "grad_norm": 5.001988410949707, "learning_rate": 9.24569177708418e-05, "loss": 2.59634952545166, "memory(GiB)": 72.85, "step": 20680, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.670248 }, { "epoch": 0.8862088171029519, "grad_norm": 3.36660099029541, "learning_rate": 9.245336291883525e-05, "loss": 2.4638015747070314, "memory(GiB)": 72.85, "step": 20685, "token_acc": 0.47794117647058826, "train_speed(iter/s)": 0.670273 }, { "epoch": 0.8864230324322009, "grad_norm": 3.8696129322052, "learning_rate": 9.244980729774621e-05, "loss": 2.558097267150879, "memory(GiB)": 72.85, "step": 20690, "token_acc": 0.49454545454545457, "train_speed(iter/s)": 0.670282 }, { "epoch": 0.8866372477614498, "grad_norm": 3.7888214588165283, "learning_rate": 9.24462509076391e-05, "loss": 2.506334686279297, "memory(GiB)": 72.85, "step": 20695, "token_acc": 0.49158249158249157, "train_speed(iter/s)": 0.670311 }, { "epoch": 0.8868514630906987, "grad_norm": 3.0634915828704834, "learning_rate": 9.244269374857833e-05, "loss": 2.4131191253662108, "memory(GiB)": 72.85, "step": 20700, "token_acc": 0.506896551724138, "train_speed(iter/s)": 0.670325 }, { "epoch": 0.8870656784199478, "grad_norm": 3.0039446353912354, "learning_rate": 9.243913582062836e-05, "loss": 2.4827093124389648, "memory(GiB)": 72.85, "step": 20705, "token_acc": 0.4652567975830816, "train_speed(iter/s)": 0.670295 }, { "epoch": 0.8872798937491967, "grad_norm": 3.430910110473633, "learning_rate": 9.24355771238536e-05, "loss": 2.554223823547363, "memory(GiB)": 72.85, "step": 20710, "token_acc": 0.46, "train_speed(iter/s)": 0.670337 }, { "epoch": 0.8874941090784456, "grad_norm": 3.7104623317718506, "learning_rate": 9.243201765831859e-05, "loss": 2.3053615570068358, "memory(GiB)": 72.85, "step": 20715, "token_acc": 0.48579545454545453, "train_speed(iter/s)": 0.670335 }, { "epoch": 0.8877083244076946, "grad_norm": 3.983299970626831, "learning_rate": 9.242845742408776e-05, "loss": 2.2556392669677736, "memory(GiB)": 72.85, "step": 20720, "token_acc": 0.5317725752508361, "train_speed(iter/s)": 0.670352 }, { "epoch": 0.8879225397369436, "grad_norm": 3.2910022735595703, "learning_rate": 9.242489642122561e-05, "loss": 2.3594526290893554, "memory(GiB)": 72.85, "step": 20725, "token_acc": 0.4897959183673469, "train_speed(iter/s)": 0.670335 }, { "epoch": 0.8881367550661925, "grad_norm": 8.635111808776855, "learning_rate": 9.242133464979667e-05, "loss": 2.485666275024414, "memory(GiB)": 72.85, "step": 20730, "token_acc": 0.49783549783549785, "train_speed(iter/s)": 0.670322 }, { "epoch": 0.8883509703954415, "grad_norm": 3.2503042221069336, "learning_rate": 9.241777210986548e-05, "loss": 2.5694746017456054, "memory(GiB)": 72.85, "step": 20735, "token_acc": 0.5096774193548387, "train_speed(iter/s)": 0.670312 }, { "epoch": 0.8885651857246905, "grad_norm": 3.4874942302703857, "learning_rate": 9.241420880149653e-05, "loss": 2.1373144149780274, "memory(GiB)": 72.85, "step": 20740, "token_acc": 0.5364806866952789, "train_speed(iter/s)": 0.670341 }, { "epoch": 0.8887794010539394, "grad_norm": 4.079653739929199, "learning_rate": 9.241064472475442e-05, "loss": 2.823813629150391, "memory(GiB)": 72.85, "step": 20745, "token_acc": 0.4575757575757576, "train_speed(iter/s)": 0.670358 }, { "epoch": 0.8889936163831884, "grad_norm": 3.777827501296997, "learning_rate": 9.24070798797037e-05, "loss": 2.0942710876464843, "memory(GiB)": 72.85, "step": 20750, "token_acc": 0.5259515570934256, "train_speed(iter/s)": 0.670336 }, { "epoch": 0.8892078317124373, "grad_norm": 4.810459136962891, "learning_rate": 9.240351426640892e-05, "loss": 2.5691802978515623, "memory(GiB)": 72.85, "step": 20755, "token_acc": 0.47297297297297297, "train_speed(iter/s)": 0.67034 }, { "epoch": 0.8894220470416863, "grad_norm": 4.436393737792969, "learning_rate": 9.239994788493472e-05, "loss": 2.3411760330200195, "memory(GiB)": 72.85, "step": 20760, "token_acc": 0.5057471264367817, "train_speed(iter/s)": 0.670337 }, { "epoch": 0.8896362623709353, "grad_norm": 4.333904266357422, "learning_rate": 9.239638073534569e-05, "loss": 2.1740385055541993, "memory(GiB)": 72.85, "step": 20765, "token_acc": 0.5655430711610487, "train_speed(iter/s)": 0.670381 }, { "epoch": 0.8898504777001842, "grad_norm": 3.7615790367126465, "learning_rate": 9.239281281770644e-05, "loss": 2.7353794097900392, "memory(GiB)": 72.85, "step": 20770, "token_acc": 0.439873417721519, "train_speed(iter/s)": 0.670427 }, { "epoch": 0.8900646930294331, "grad_norm": 3.681203842163086, "learning_rate": 9.238924413208163e-05, "loss": 2.4030136108398437, "memory(GiB)": 72.85, "step": 20775, "token_acc": 0.48830409356725146, "train_speed(iter/s)": 0.670462 }, { "epoch": 0.8902789083586822, "grad_norm": 3.3934433460235596, "learning_rate": 9.238567467853589e-05, "loss": 2.3036455154418944, "memory(GiB)": 72.85, "step": 20780, "token_acc": 0.5063291139240507, "train_speed(iter/s)": 0.670438 }, { "epoch": 0.8904931236879311, "grad_norm": 3.9597156047821045, "learning_rate": 9.238210445713391e-05, "loss": 2.2908151626586912, "memory(GiB)": 72.85, "step": 20785, "token_acc": 0.5251798561151079, "train_speed(iter/s)": 0.670466 }, { "epoch": 0.89070733901718, "grad_norm": 2.8909242153167725, "learning_rate": 9.237853346794034e-05, "loss": 2.56502685546875, "memory(GiB)": 72.85, "step": 20790, "token_acc": 0.49363057324840764, "train_speed(iter/s)": 0.670477 }, { "epoch": 0.8909215543464291, "grad_norm": 4.3378167152404785, "learning_rate": 9.237496171101987e-05, "loss": 2.4032222747802736, "memory(GiB)": 72.85, "step": 20795, "token_acc": 0.46691176470588236, "train_speed(iter/s)": 0.670469 }, { "epoch": 0.891135769675678, "grad_norm": 3.541602611541748, "learning_rate": 9.237138918643723e-05, "loss": 2.2811725616455076, "memory(GiB)": 72.85, "step": 20800, "token_acc": 0.5714285714285714, "train_speed(iter/s)": 0.670485 }, { "epoch": 0.8913499850049269, "grad_norm": 4.395987033843994, "learning_rate": 9.236781589425712e-05, "loss": 2.335010528564453, "memory(GiB)": 72.85, "step": 20805, "token_acc": 0.5241379310344828, "train_speed(iter/s)": 0.670486 }, { "epoch": 0.8915642003341759, "grad_norm": 4.239588260650635, "learning_rate": 9.236424183454429e-05, "loss": 2.4657140731811524, "memory(GiB)": 72.85, "step": 20810, "token_acc": 0.4876325088339223, "train_speed(iter/s)": 0.670501 }, { "epoch": 0.8917784156634249, "grad_norm": 4.0293049812316895, "learning_rate": 9.236066700736348e-05, "loss": 2.3685043334960936, "memory(GiB)": 72.85, "step": 20815, "token_acc": 0.5241635687732342, "train_speed(iter/s)": 0.670482 }, { "epoch": 0.8919926309926738, "grad_norm": 6.95515251159668, "learning_rate": 9.235709141277944e-05, "loss": 2.5181350708007812, "memory(GiB)": 72.85, "step": 20820, "token_acc": 0.44368600682593856, "train_speed(iter/s)": 0.670501 }, { "epoch": 0.8922068463219228, "grad_norm": 5.26400899887085, "learning_rate": 9.235351505085696e-05, "loss": 2.5677558898925783, "memory(GiB)": 72.85, "step": 20825, "token_acc": 0.48188405797101447, "train_speed(iter/s)": 0.670497 }, { "epoch": 0.8924210616511717, "grad_norm": 3.848255157470703, "learning_rate": 9.234993792166081e-05, "loss": 2.4088729858398437, "memory(GiB)": 72.85, "step": 20830, "token_acc": 0.4740484429065744, "train_speed(iter/s)": 0.670488 }, { "epoch": 0.8926352769804207, "grad_norm": 3.432089328765869, "learning_rate": 9.234636002525582e-05, "loss": 2.440348243713379, "memory(GiB)": 72.85, "step": 20835, "token_acc": 0.4894366197183099, "train_speed(iter/s)": 0.67048 }, { "epoch": 0.8928494923096697, "grad_norm": 4.621888637542725, "learning_rate": 9.23427813617068e-05, "loss": 2.1051830291748046, "memory(GiB)": 72.85, "step": 20840, "token_acc": 0.5090252707581228, "train_speed(iter/s)": 0.670503 }, { "epoch": 0.8930637076389186, "grad_norm": 3.751208543777466, "learning_rate": 9.233920193107857e-05, "loss": 2.3582508087158205, "memory(GiB)": 72.85, "step": 20845, "token_acc": 0.5285714285714286, "train_speed(iter/s)": 0.670517 }, { "epoch": 0.8932779229681675, "grad_norm": 3.5543904304504395, "learning_rate": 9.233562173343598e-05, "loss": 2.137840461730957, "memory(GiB)": 72.85, "step": 20850, "token_acc": 0.5675675675675675, "train_speed(iter/s)": 0.67052 }, { "epoch": 0.8934921382974166, "grad_norm": 3.5849673748016357, "learning_rate": 9.233204076884388e-05, "loss": 2.4445383071899416, "memory(GiB)": 72.85, "step": 20855, "token_acc": 0.5151515151515151, "train_speed(iter/s)": 0.670509 }, { "epoch": 0.8937063536266655, "grad_norm": 3.854121208190918, "learning_rate": 9.232845903736716e-05, "loss": 2.4248485565185547, "memory(GiB)": 72.85, "step": 20860, "token_acc": 0.4909090909090909, "train_speed(iter/s)": 0.670517 }, { "epoch": 0.8939205689559145, "grad_norm": 3.459320068359375, "learning_rate": 9.232487653907069e-05, "loss": 2.651076889038086, "memory(GiB)": 72.85, "step": 20865, "token_acc": 0.45263157894736844, "train_speed(iter/s)": 0.670506 }, { "epoch": 0.8941347842851635, "grad_norm": 5.178680896759033, "learning_rate": 9.232129327401937e-05, "loss": 2.4552915573120115, "memory(GiB)": 72.85, "step": 20870, "token_acc": 0.45, "train_speed(iter/s)": 0.670466 }, { "epoch": 0.8943489996144124, "grad_norm": 3.0156567096710205, "learning_rate": 9.231770924227814e-05, "loss": 2.5175962448120117, "memory(GiB)": 72.85, "step": 20875, "token_acc": 0.4690265486725664, "train_speed(iter/s)": 0.67043 }, { "epoch": 0.8945632149436614, "grad_norm": 3.8575315475463867, "learning_rate": 9.231412444391189e-05, "loss": 2.910160255432129, "memory(GiB)": 72.85, "step": 20880, "token_acc": 0.4782608695652174, "train_speed(iter/s)": 0.670458 }, { "epoch": 0.8947774302729103, "grad_norm": 4.6629533767700195, "learning_rate": 9.231053887898561e-05, "loss": 2.2438501358032226, "memory(GiB)": 72.85, "step": 20885, "token_acc": 0.5261194029850746, "train_speed(iter/s)": 0.670427 }, { "epoch": 0.8949916456021593, "grad_norm": 3.6620609760284424, "learning_rate": 9.23069525475642e-05, "loss": 2.559294319152832, "memory(GiB)": 72.85, "step": 20890, "token_acc": 0.5036764705882353, "train_speed(iter/s)": 0.670421 }, { "epoch": 0.8952058609314083, "grad_norm": 3.6676793098449707, "learning_rate": 9.230336544971267e-05, "loss": 2.488569450378418, "memory(GiB)": 72.85, "step": 20895, "token_acc": 0.4613003095975232, "train_speed(iter/s)": 0.670409 }, { "epoch": 0.8954200762606572, "grad_norm": 3.874181032180786, "learning_rate": 9.2299777585496e-05, "loss": 2.691630744934082, "memory(GiB)": 72.85, "step": 20900, "token_acc": 0.4250871080139373, "train_speed(iter/s)": 0.670386 }, { "epoch": 0.8956342915899062, "grad_norm": 3.656942129135132, "learning_rate": 9.229618895497918e-05, "loss": 2.245355415344238, "memory(GiB)": 72.85, "step": 20905, "token_acc": 0.5177304964539007, "train_speed(iter/s)": 0.670426 }, { "epoch": 0.8958485069191552, "grad_norm": 3.4763286113739014, "learning_rate": 9.22925995582272e-05, "loss": 2.3205211639404295, "memory(GiB)": 72.85, "step": 20910, "token_acc": 0.4485049833887043, "train_speed(iter/s)": 0.670434 }, { "epoch": 0.8960627222484041, "grad_norm": 3.8751156330108643, "learning_rate": 9.228900939530511e-05, "loss": 2.29049072265625, "memory(GiB)": 72.85, "step": 20915, "token_acc": 0.5131578947368421, "train_speed(iter/s)": 0.670405 }, { "epoch": 0.896276937577653, "grad_norm": 5.187064170837402, "learning_rate": 9.228541846627796e-05, "loss": 2.475773048400879, "memory(GiB)": 72.85, "step": 20920, "token_acc": 0.4837662337662338, "train_speed(iter/s)": 0.670437 }, { "epoch": 0.8964911529069021, "grad_norm": 4.197824478149414, "learning_rate": 9.228182677121077e-05, "loss": 2.760221481323242, "memory(GiB)": 72.85, "step": 20925, "token_acc": 0.4405797101449275, "train_speed(iter/s)": 0.670429 }, { "epoch": 0.896705368236151, "grad_norm": 3.685703992843628, "learning_rate": 9.227823431016864e-05, "loss": 2.638164520263672, "memory(GiB)": 72.85, "step": 20930, "token_acc": 0.48606811145510836, "train_speed(iter/s)": 0.670427 }, { "epoch": 0.8969195835653999, "grad_norm": 3.728745937347412, "learning_rate": 9.227464108321661e-05, "loss": 2.222895622253418, "memory(GiB)": 72.85, "step": 20935, "token_acc": 0.5176056338028169, "train_speed(iter/s)": 0.670424 }, { "epoch": 0.897133798894649, "grad_norm": 2.9486117362976074, "learning_rate": 9.227104709041983e-05, "loss": 2.4412857055664063, "memory(GiB)": 72.85, "step": 20940, "token_acc": 0.48220064724919093, "train_speed(iter/s)": 0.670403 }, { "epoch": 0.8973480142238979, "grad_norm": 3.014286756515503, "learning_rate": 9.226745233184336e-05, "loss": 2.4154327392578123, "memory(GiB)": 72.85, "step": 20945, "token_acc": 0.46885245901639344, "train_speed(iter/s)": 0.670404 }, { "epoch": 0.8975622295531468, "grad_norm": 3.3186569213867188, "learning_rate": 9.226385680755234e-05, "loss": 2.6315980911254884, "memory(GiB)": 72.85, "step": 20950, "token_acc": 0.46229508196721314, "train_speed(iter/s)": 0.67043 }, { "epoch": 0.8977764448823958, "grad_norm": 3.79091477394104, "learning_rate": 9.226026051761192e-05, "loss": 2.7109825134277346, "memory(GiB)": 72.85, "step": 20955, "token_acc": 0.41875, "train_speed(iter/s)": 0.670405 }, { "epoch": 0.8979906602116448, "grad_norm": 3.6348700523376465, "learning_rate": 9.225666346208724e-05, "loss": 2.546565628051758, "memory(GiB)": 72.85, "step": 20960, "token_acc": 0.4584717607973422, "train_speed(iter/s)": 0.670379 }, { "epoch": 0.8982048755408937, "grad_norm": 4.804843425750732, "learning_rate": 9.225306564104344e-05, "loss": 2.4587339401245116, "memory(GiB)": 72.85, "step": 20965, "token_acc": 0.5176056338028169, "train_speed(iter/s)": 0.670374 }, { "epoch": 0.8984190908701427, "grad_norm": 3.2126383781433105, "learning_rate": 9.224946705454573e-05, "loss": 2.4070850372314454, "memory(GiB)": 72.85, "step": 20970, "token_acc": 0.43902439024390244, "train_speed(iter/s)": 0.670399 }, { "epoch": 0.8986333061993916, "grad_norm": 2.6298904418945312, "learning_rate": 9.22458677026593e-05, "loss": 2.4786762237548827, "memory(GiB)": 72.85, "step": 20975, "token_acc": 0.4811594202898551, "train_speed(iter/s)": 0.670397 }, { "epoch": 0.8988475215286406, "grad_norm": 4.950822830200195, "learning_rate": 9.224226758544934e-05, "loss": 2.604500579833984, "memory(GiB)": 72.85, "step": 20980, "token_acc": 0.4568345323741007, "train_speed(iter/s)": 0.67037 }, { "epoch": 0.8990617368578896, "grad_norm": 4.047872066497803, "learning_rate": 9.223866670298106e-05, "loss": 2.4818037033081053, "memory(GiB)": 72.85, "step": 20985, "token_acc": 0.47315436241610737, "train_speed(iter/s)": 0.670336 }, { "epoch": 0.8992759521871385, "grad_norm": 4.480554580688477, "learning_rate": 9.223506505531973e-05, "loss": 2.408422088623047, "memory(GiB)": 72.85, "step": 20990, "token_acc": 0.4918032786885246, "train_speed(iter/s)": 0.67033 }, { "epoch": 0.8994901675163874, "grad_norm": 5.15771484375, "learning_rate": 9.223146264253057e-05, "loss": 2.7218883514404295, "memory(GiB)": 72.85, "step": 20995, "token_acc": 0.48951048951048953, "train_speed(iter/s)": 0.670351 }, { "epoch": 0.8997043828456365, "grad_norm": 3.0965898036956787, "learning_rate": 9.222785946467887e-05, "loss": 2.449513244628906, "memory(GiB)": 72.85, "step": 21000, "token_acc": 0.4983388704318937, "train_speed(iter/s)": 0.670337 }, { "epoch": 0.8997043828456365, "eval_loss": 1.9717392921447754, "eval_runtime": 17.5143, "eval_samples_per_second": 5.71, "eval_steps_per_second": 5.71, "eval_token_acc": 0.5096296296296297, "step": 21000 }, { "epoch": 0.8999185981748854, "grad_norm": 4.122361660003662, "learning_rate": 9.22249763715963e-05, "loss": 3.0347011566162108, "memory(GiB)": 72.85, "step": 21005, "token_acc": 0.49341438703140833, "train_speed(iter/s)": 0.669937 }, { "epoch": 0.9001328135041343, "grad_norm": 4.296237468719482, "learning_rate": 9.222137181679648e-05, "loss": 2.2319971084594727, "memory(GiB)": 72.85, "step": 21010, "token_acc": 0.516245487364621, "train_speed(iter/s)": 0.669966 }, { "epoch": 0.9003470288333834, "grad_norm": 3.495149850845337, "learning_rate": 9.221776649711689e-05, "loss": 2.130527877807617, "memory(GiB)": 72.85, "step": 21015, "token_acc": 0.5275080906148867, "train_speed(iter/s)": 0.67 }, { "epoch": 0.9005612441626323, "grad_norm": 4.048348426818848, "learning_rate": 9.221416041262285e-05, "loss": 2.527081298828125, "memory(GiB)": 72.85, "step": 21020, "token_acc": 0.4674922600619195, "train_speed(iter/s)": 0.670039 }, { "epoch": 0.9007754594918812, "grad_norm": 3.619839668273926, "learning_rate": 9.22105535633797e-05, "loss": 2.217118835449219, "memory(GiB)": 72.85, "step": 21025, "token_acc": 0.4883720930232558, "train_speed(iter/s)": 0.670022 }, { "epoch": 0.9009896748211302, "grad_norm": 3.71456241607666, "learning_rate": 9.220694594945278e-05, "loss": 2.4085052490234373, "memory(GiB)": 72.85, "step": 21030, "token_acc": 0.5, "train_speed(iter/s)": 0.670029 }, { "epoch": 0.9012038901503792, "grad_norm": 3.4065306186676025, "learning_rate": 9.220333757090745e-05, "loss": 2.5682544708251953, "memory(GiB)": 72.85, "step": 21035, "token_acc": 0.509090909090909, "train_speed(iter/s)": 0.669991 }, { "epoch": 0.9014181054796281, "grad_norm": 3.8044652938842773, "learning_rate": 9.219972842780907e-05, "loss": 2.1204496383666993, "memory(GiB)": 72.85, "step": 21040, "token_acc": 0.5238095238095238, "train_speed(iter/s)": 0.670022 }, { "epoch": 0.9016323208088771, "grad_norm": 2.848782539367676, "learning_rate": 9.219611852022301e-05, "loss": 2.70562858581543, "memory(GiB)": 72.85, "step": 21045, "token_acc": 0.4697986577181208, "train_speed(iter/s)": 0.670028 }, { "epoch": 0.901846536138126, "grad_norm": 3.746478319168091, "learning_rate": 9.219250784821467e-05, "loss": 2.43115177154541, "memory(GiB)": 72.85, "step": 21050, "token_acc": 0.4843304843304843, "train_speed(iter/s)": 0.670038 }, { "epoch": 0.902060751467375, "grad_norm": 3.4564390182495117, "learning_rate": 9.218889641184949e-05, "loss": 2.4760292053222654, "memory(GiB)": 72.85, "step": 21055, "token_acc": 0.4605263157894737, "train_speed(iter/s)": 0.670071 }, { "epoch": 0.902274966796624, "grad_norm": 3.9976441860198975, "learning_rate": 9.218528421119287e-05, "loss": 2.3351940155029296, "memory(GiB)": 72.85, "step": 21060, "token_acc": 0.5037878787878788, "train_speed(iter/s)": 0.670086 }, { "epoch": 0.9024891821258729, "grad_norm": 4.461452484130859, "learning_rate": 9.218167124631025e-05, "loss": 2.385093116760254, "memory(GiB)": 72.85, "step": 21065, "token_acc": 0.5035714285714286, "train_speed(iter/s)": 0.670041 }, { "epoch": 0.9027033974551218, "grad_norm": 3.8873422145843506, "learning_rate": 9.21780575172671e-05, "loss": 2.3599124908447267, "memory(GiB)": 72.85, "step": 21070, "token_acc": 0.5204918032786885, "train_speed(iter/s)": 0.670025 }, { "epoch": 0.9029176127843709, "grad_norm": 5.078805446624756, "learning_rate": 9.217444302412886e-05, "loss": 2.483306884765625, "memory(GiB)": 72.85, "step": 21075, "token_acc": 0.44966442953020136, "train_speed(iter/s)": 0.670041 }, { "epoch": 0.9031318281136198, "grad_norm": 3.8284473419189453, "learning_rate": 9.217082776696101e-05, "loss": 2.765123748779297, "memory(GiB)": 72.85, "step": 21080, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.670016 }, { "epoch": 0.9033460434428687, "grad_norm": 4.043118000030518, "learning_rate": 9.216721174582907e-05, "loss": 2.5770198822021486, "memory(GiB)": 72.85, "step": 21085, "token_acc": 0.47808764940239046, "train_speed(iter/s)": 0.670028 }, { "epoch": 0.9035602587721178, "grad_norm": 4.504569053649902, "learning_rate": 9.216359496079851e-05, "loss": 2.5786163330078127, "memory(GiB)": 72.85, "step": 21090, "token_acc": 0.44329896907216493, "train_speed(iter/s)": 0.670019 }, { "epoch": 0.9037744741013667, "grad_norm": 3.667022466659546, "learning_rate": 9.215997741193491e-05, "loss": 2.387757682800293, "memory(GiB)": 72.85, "step": 21095, "token_acc": 0.4885245901639344, "train_speed(iter/s)": 0.670023 }, { "epoch": 0.9039886894306156, "grad_norm": 4.137155055999756, "learning_rate": 9.215635909930376e-05, "loss": 2.638009262084961, "memory(GiB)": 72.85, "step": 21100, "token_acc": 0.4444444444444444, "train_speed(iter/s)": 0.670033 }, { "epoch": 0.9042029047598646, "grad_norm": 2.90501070022583, "learning_rate": 9.21527400229706e-05, "loss": 2.4293426513671874, "memory(GiB)": 72.85, "step": 21105, "token_acc": 0.5016835016835017, "train_speed(iter/s)": 0.670067 }, { "epoch": 0.9044171200891136, "grad_norm": 5.031125068664551, "learning_rate": 9.214912018300103e-05, "loss": 2.3477479934692385, "memory(GiB)": 72.85, "step": 21110, "token_acc": 0.4791666666666667, "train_speed(iter/s)": 0.670083 }, { "epoch": 0.9046313354183625, "grad_norm": 4.816263675689697, "learning_rate": 9.214549957946061e-05, "loss": 2.5396629333496095, "memory(GiB)": 72.85, "step": 21115, "token_acc": 0.5097402597402597, "train_speed(iter/s)": 0.670098 }, { "epoch": 0.9048455507476115, "grad_norm": 4.263367176055908, "learning_rate": 9.214187821241492e-05, "loss": 2.6262914657592775, "memory(GiB)": 72.85, "step": 21120, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.670112 }, { "epoch": 0.9050597660768605, "grad_norm": 3.348865509033203, "learning_rate": 9.213825608192959e-05, "loss": 2.168745422363281, "memory(GiB)": 72.85, "step": 21125, "token_acc": 0.5399361022364217, "train_speed(iter/s)": 0.670099 }, { "epoch": 0.9052739814061094, "grad_norm": 3.8589370250701904, "learning_rate": 9.213463318807021e-05, "loss": 2.158125877380371, "memory(GiB)": 72.85, "step": 21130, "token_acc": 0.5252100840336135, "train_speed(iter/s)": 0.670043 }, { "epoch": 0.9054881967353584, "grad_norm": 2.825248956680298, "learning_rate": 9.213100953090241e-05, "loss": 2.246445655822754, "memory(GiB)": 72.85, "step": 21135, "token_acc": 0.5067567567567568, "train_speed(iter/s)": 0.670068 }, { "epoch": 0.9057024120646073, "grad_norm": 4.042382717132568, "learning_rate": 9.212738511049187e-05, "loss": 2.4076936721801756, "memory(GiB)": 72.85, "step": 21140, "token_acc": 0.5053763440860215, "train_speed(iter/s)": 0.67 }, { "epoch": 0.9059166273938563, "grad_norm": 3.9627652168273926, "learning_rate": 9.212375992690423e-05, "loss": 2.4117523193359376, "memory(GiB)": 72.85, "step": 21145, "token_acc": 0.4984520123839009, "train_speed(iter/s)": 0.670002 }, { "epoch": 0.9061308427231053, "grad_norm": 3.42207670211792, "learning_rate": 9.212013398020516e-05, "loss": 2.7323617935180664, "memory(GiB)": 72.85, "step": 21150, "token_acc": 0.4934640522875817, "train_speed(iter/s)": 0.670031 }, { "epoch": 0.9063450580523542, "grad_norm": 4.061954498291016, "learning_rate": 9.211650727046033e-05, "loss": 2.14135684967041, "memory(GiB)": 72.85, "step": 21155, "token_acc": 0.4962121212121212, "train_speed(iter/s)": 0.670053 }, { "epoch": 0.9065592733816031, "grad_norm": 3.341872453689575, "learning_rate": 9.211287979773548e-05, "loss": 2.289666748046875, "memory(GiB)": 72.85, "step": 21160, "token_acc": 0.4934210526315789, "train_speed(iter/s)": 0.670048 }, { "epoch": 0.9067734887108522, "grad_norm": 3.574660062789917, "learning_rate": 9.210925156209632e-05, "loss": 2.6672250747680666, "memory(GiB)": 72.85, "step": 21165, "token_acc": 0.4393939393939394, "train_speed(iter/s)": 0.670045 }, { "epoch": 0.9069877040401011, "grad_norm": 3.1435928344726562, "learning_rate": 9.210562256360855e-05, "loss": 2.27541561126709, "memory(GiB)": 72.85, "step": 21170, "token_acc": 0.508833922261484, "train_speed(iter/s)": 0.670045 }, { "epoch": 0.90720191936935, "grad_norm": 3.653836965560913, "learning_rate": 9.210199280233794e-05, "loss": 2.4454559326171874, "memory(GiB)": 72.85, "step": 21175, "token_acc": 0.4921875, "train_speed(iter/s)": 0.670069 }, { "epoch": 0.907416134698599, "grad_norm": 3.9071972370147705, "learning_rate": 9.209836227835022e-05, "loss": 2.448108100891113, "memory(GiB)": 72.85, "step": 21180, "token_acc": 0.45454545454545453, "train_speed(iter/s)": 0.670086 }, { "epoch": 0.907630350027848, "grad_norm": 3.70668625831604, "learning_rate": 9.20947309917112e-05, "loss": 2.370966148376465, "memory(GiB)": 72.85, "step": 21185, "token_acc": 0.47692307692307695, "train_speed(iter/s)": 0.670104 }, { "epoch": 0.9078445653570969, "grad_norm": 3.2371912002563477, "learning_rate": 9.209109894248662e-05, "loss": 2.400463104248047, "memory(GiB)": 72.85, "step": 21190, "token_acc": 0.47109826589595377, "train_speed(iter/s)": 0.670106 }, { "epoch": 0.9080587806863459, "grad_norm": 3.5499210357666016, "learning_rate": 9.208746613074231e-05, "loss": 2.404946517944336, "memory(GiB)": 72.85, "step": 21195, "token_acc": 0.4603174603174603, "train_speed(iter/s)": 0.67009 }, { "epoch": 0.9082729960155949, "grad_norm": 3.1062536239624023, "learning_rate": 9.208383255654406e-05, "loss": 2.502149963378906, "memory(GiB)": 72.85, "step": 21200, "token_acc": 0.505338078291815, "train_speed(iter/s)": 0.670107 }, { "epoch": 0.9084872113448439, "grad_norm": 3.5825417041778564, "learning_rate": 9.20801982199577e-05, "loss": 2.502720260620117, "memory(GiB)": 72.85, "step": 21205, "token_acc": 0.47129909365558914, "train_speed(iter/s)": 0.670139 }, { "epoch": 0.9087014266740928, "grad_norm": 5.02797794342041, "learning_rate": 9.20765631210491e-05, "loss": 2.489698791503906, "memory(GiB)": 72.85, "step": 21210, "token_acc": 0.46200607902735563, "train_speed(iter/s)": 0.670141 }, { "epoch": 0.9089156420033417, "grad_norm": 5.029191970825195, "learning_rate": 9.207292725988405e-05, "loss": 2.677388381958008, "memory(GiB)": 72.85, "step": 21215, "token_acc": 0.45, "train_speed(iter/s)": 0.67017 }, { "epoch": 0.9091298573325908, "grad_norm": 3.6256325244903564, "learning_rate": 9.206929063652849e-05, "loss": 2.5270212173461912, "memory(GiB)": 72.85, "step": 21220, "token_acc": 0.4983164983164983, "train_speed(iter/s)": 0.670236 }, { "epoch": 0.9093440726618397, "grad_norm": 2.821667194366455, "learning_rate": 9.206565325104826e-05, "loss": 2.3103006362915037, "memory(GiB)": 72.85, "step": 21225, "token_acc": 0.5074626865671642, "train_speed(iter/s)": 0.67023 }, { "epoch": 0.9095582879910886, "grad_norm": 3.8122169971466064, "learning_rate": 9.206201510350925e-05, "loss": 2.070207405090332, "memory(GiB)": 72.85, "step": 21230, "token_acc": 0.5315985130111525, "train_speed(iter/s)": 0.670222 }, { "epoch": 0.9097725033203377, "grad_norm": 5.260936737060547, "learning_rate": 9.205837619397738e-05, "loss": 2.050741195678711, "memory(GiB)": 72.85, "step": 21235, "token_acc": 0.5490196078431373, "train_speed(iter/s)": 0.670207 }, { "epoch": 0.9099867186495866, "grad_norm": 3.49941349029541, "learning_rate": 9.205473652251858e-05, "loss": 2.234136962890625, "memory(GiB)": 72.85, "step": 21240, "token_acc": 0.48172757475083056, "train_speed(iter/s)": 0.67019 }, { "epoch": 0.9102009339788355, "grad_norm": 4.326557159423828, "learning_rate": 9.205109608919878e-05, "loss": 2.8048166275024413, "memory(GiB)": 72.85, "step": 21245, "token_acc": 0.4489795918367347, "train_speed(iter/s)": 0.67018 }, { "epoch": 0.9104151493080845, "grad_norm": 5.233603477478027, "learning_rate": 9.20474548940839e-05, "loss": 2.3047416687011717, "memory(GiB)": 72.85, "step": 21250, "token_acc": 0.4899598393574297, "train_speed(iter/s)": 0.670186 }, { "epoch": 0.9106293646373335, "grad_norm": 2.8452208042144775, "learning_rate": 9.204381293723996e-05, "loss": 2.5168928146362304, "memory(GiB)": 72.85, "step": 21255, "token_acc": 0.4852941176470588, "train_speed(iter/s)": 0.670215 }, { "epoch": 0.9108435799665824, "grad_norm": 3.5033681392669678, "learning_rate": 9.204017021873289e-05, "loss": 2.6927511215209963, "memory(GiB)": 72.85, "step": 21260, "token_acc": 0.45161290322580644, "train_speed(iter/s)": 0.670241 }, { "epoch": 0.9110577952958314, "grad_norm": 3.7541656494140625, "learning_rate": 9.20365267386287e-05, "loss": 2.4057207107543945, "memory(GiB)": 72.85, "step": 21265, "token_acc": 0.4574468085106383, "train_speed(iter/s)": 0.67023 }, { "epoch": 0.9112720106250803, "grad_norm": 3.794952392578125, "learning_rate": 9.203288249699341e-05, "loss": 2.716959762573242, "memory(GiB)": 72.85, "step": 21270, "token_acc": 0.44223107569721115, "train_speed(iter/s)": 0.670239 }, { "epoch": 0.9114862259543293, "grad_norm": 3.8984007835388184, "learning_rate": 9.202923749389302e-05, "loss": 1.9985359191894532, "memory(GiB)": 72.85, "step": 21275, "token_acc": 0.5579399141630901, "train_speed(iter/s)": 0.670258 }, { "epoch": 0.9117004412835783, "grad_norm": 3.604973316192627, "learning_rate": 9.202559172939355e-05, "loss": 2.3652368545532227, "memory(GiB)": 72.85, "step": 21280, "token_acc": 0.49185667752442996, "train_speed(iter/s)": 0.670279 }, { "epoch": 0.9119146566128272, "grad_norm": 2.9346282482147217, "learning_rate": 9.202194520356108e-05, "loss": 2.377607536315918, "memory(GiB)": 72.85, "step": 21285, "token_acc": 0.5083056478405316, "train_speed(iter/s)": 0.670276 }, { "epoch": 0.9121288719420761, "grad_norm": 3.0939011573791504, "learning_rate": 9.201829791646165e-05, "loss": 2.435677337646484, "memory(GiB)": 72.85, "step": 21290, "token_acc": 0.4855305466237942, "train_speed(iter/s)": 0.670289 }, { "epoch": 0.9123430872713252, "grad_norm": 3.834336519241333, "learning_rate": 9.201464986816132e-05, "loss": 2.276201057434082, "memory(GiB)": 72.85, "step": 21295, "token_acc": 0.5101351351351351, "train_speed(iter/s)": 0.670301 }, { "epoch": 0.9125573026005741, "grad_norm": 3.060943126678467, "learning_rate": 9.201100105872622e-05, "loss": 2.4668506622314452, "memory(GiB)": 72.85, "step": 21300, "token_acc": 0.4812286689419795, "train_speed(iter/s)": 0.670304 }, { "epoch": 0.912771517929823, "grad_norm": 4.295551776885986, "learning_rate": 9.200735148822241e-05, "loss": 2.4068510055541994, "memory(GiB)": 72.85, "step": 21305, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.67031 }, { "epoch": 0.9129857332590721, "grad_norm": 5.101131916046143, "learning_rate": 9.200370115671604e-05, "loss": 2.521864318847656, "memory(GiB)": 72.85, "step": 21310, "token_acc": 0.49808429118773945, "train_speed(iter/s)": 0.670277 }, { "epoch": 0.913199948588321, "grad_norm": 3.6096410751342773, "learning_rate": 9.20000500642732e-05, "loss": 2.570260238647461, "memory(GiB)": 72.85, "step": 21315, "token_acc": 0.46236559139784944, "train_speed(iter/s)": 0.670263 }, { "epoch": 0.9134141639175699, "grad_norm": 5.740509986877441, "learning_rate": 9.199639821096006e-05, "loss": 2.6441726684570312, "memory(GiB)": 72.85, "step": 21320, "token_acc": 0.4724137931034483, "train_speed(iter/s)": 0.670288 }, { "epoch": 0.913628379246819, "grad_norm": 3.9782121181488037, "learning_rate": 9.199274559684277e-05, "loss": 2.495766830444336, "memory(GiB)": 72.85, "step": 21325, "token_acc": 0.5269230769230769, "train_speed(iter/s)": 0.670285 }, { "epoch": 0.9138425945760679, "grad_norm": 2.653540849685669, "learning_rate": 9.198909222198751e-05, "loss": 2.022120475769043, "memory(GiB)": 72.85, "step": 21330, "token_acc": 0.5389830508474577, "train_speed(iter/s)": 0.67029 }, { "epoch": 0.9140568099053168, "grad_norm": 3.122147798538208, "learning_rate": 9.198543808646045e-05, "loss": 2.357937240600586, "memory(GiB)": 72.85, "step": 21335, "token_acc": 0.5, "train_speed(iter/s)": 0.670313 }, { "epoch": 0.9142710252345658, "grad_norm": 3.0016844272613525, "learning_rate": 9.19817831903278e-05, "loss": 2.2311800003051756, "memory(GiB)": 72.85, "step": 21340, "token_acc": 0.49142857142857144, "train_speed(iter/s)": 0.670327 }, { "epoch": 0.9144852405638147, "grad_norm": 4.302616119384766, "learning_rate": 9.197812753365575e-05, "loss": 2.5339427947998048, "memory(GiB)": 72.85, "step": 21345, "token_acc": 0.47017543859649125, "train_speed(iter/s)": 0.670326 }, { "epoch": 0.9146994558930637, "grad_norm": 4.536956787109375, "learning_rate": 9.197447111651055e-05, "loss": 2.286769485473633, "memory(GiB)": 72.85, "step": 21350, "token_acc": 0.5, "train_speed(iter/s)": 0.670338 }, { "epoch": 0.9149136712223127, "grad_norm": 3.471343755722046, "learning_rate": 9.197081393895843e-05, "loss": 2.4182575225830076, "memory(GiB)": 72.85, "step": 21355, "token_acc": 0.44107744107744107, "train_speed(iter/s)": 0.670322 }, { "epoch": 0.9151278865515616, "grad_norm": 2.9810898303985596, "learning_rate": 9.196715600106564e-05, "loss": 2.261967086791992, "memory(GiB)": 72.85, "step": 21360, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.670291 }, { "epoch": 0.9153421018808106, "grad_norm": 3.5965769290924072, "learning_rate": 9.196349730289845e-05, "loss": 2.4135046005249023, "memory(GiB)": 72.85, "step": 21365, "token_acc": 0.45307443365695793, "train_speed(iter/s)": 0.670267 }, { "epoch": 0.9155563172100596, "grad_norm": 2.724778652191162, "learning_rate": 9.195983784452315e-05, "loss": 2.331862449645996, "memory(GiB)": 72.85, "step": 21370, "token_acc": 0.50814332247557, "train_speed(iter/s)": 0.670283 }, { "epoch": 0.9157705325393085, "grad_norm": 6.360398292541504, "learning_rate": 9.195617762600601e-05, "loss": 2.34149284362793, "memory(GiB)": 72.85, "step": 21375, "token_acc": 0.4861111111111111, "train_speed(iter/s)": 0.670296 }, { "epoch": 0.9159847478685574, "grad_norm": 4.000616550445557, "learning_rate": 9.195251664741337e-05, "loss": 2.5258541107177734, "memory(GiB)": 72.85, "step": 21380, "token_acc": 0.449438202247191, "train_speed(iter/s)": 0.670299 }, { "epoch": 0.9161989631978065, "grad_norm": 4.118985652923584, "learning_rate": 9.194885490881153e-05, "loss": 2.678347587585449, "memory(GiB)": 72.85, "step": 21385, "token_acc": 0.41292134831460675, "train_speed(iter/s)": 0.670322 }, { "epoch": 0.9164131785270554, "grad_norm": 2.765498161315918, "learning_rate": 9.194519241026684e-05, "loss": 2.270577239990234, "memory(GiB)": 72.85, "step": 21390, "token_acc": 0.49074074074074076, "train_speed(iter/s)": 0.670339 }, { "epoch": 0.9166273938563043, "grad_norm": 2.7759621143341064, "learning_rate": 9.194152915184564e-05, "loss": 2.2925884246826174, "memory(GiB)": 72.85, "step": 21395, "token_acc": 0.4376899696048632, "train_speed(iter/s)": 0.670356 }, { "epoch": 0.9168416091855534, "grad_norm": 3.2080764770507812, "learning_rate": 9.193786513361428e-05, "loss": 2.407622146606445, "memory(GiB)": 72.85, "step": 21400, "token_acc": 0.4678362573099415, "train_speed(iter/s)": 0.670328 }, { "epoch": 0.9170558245148023, "grad_norm": 3.971278190612793, "learning_rate": 9.193420035563916e-05, "loss": 2.5595367431640623, "memory(GiB)": 72.85, "step": 21405, "token_acc": 0.4206896551724138, "train_speed(iter/s)": 0.670321 }, { "epoch": 0.9172700398440512, "grad_norm": 3.067190408706665, "learning_rate": 9.193053481798667e-05, "loss": 2.454802322387695, "memory(GiB)": 72.85, "step": 21410, "token_acc": 0.47305389221556887, "train_speed(iter/s)": 0.670363 }, { "epoch": 0.9174842551733002, "grad_norm": 3.077792167663574, "learning_rate": 9.192686852072321e-05, "loss": 2.1487598419189453, "memory(GiB)": 72.85, "step": 21415, "token_acc": 0.5225225225225225, "train_speed(iter/s)": 0.670349 }, { "epoch": 0.9176984705025492, "grad_norm": 7.365727424621582, "learning_rate": 9.192320146391518e-05, "loss": 2.309639549255371, "memory(GiB)": 72.85, "step": 21420, "token_acc": 0.5102040816326531, "train_speed(iter/s)": 0.670276 }, { "epoch": 0.9179126858317981, "grad_norm": 4.686807155609131, "learning_rate": 9.191953364762904e-05, "loss": 2.4985683441162108, "memory(GiB)": 72.85, "step": 21425, "token_acc": 0.468944099378882, "train_speed(iter/s)": 0.670276 }, { "epoch": 0.9181269011610471, "grad_norm": 3.717555046081543, "learning_rate": 9.191586507193122e-05, "loss": 2.2792856216430666, "memory(GiB)": 72.85, "step": 21430, "token_acc": 0.47959183673469385, "train_speed(iter/s)": 0.670255 }, { "epoch": 0.918341116490296, "grad_norm": 5.3694071769714355, "learning_rate": 9.191219573688819e-05, "loss": 2.060452461242676, "memory(GiB)": 72.85, "step": 21435, "token_acc": 0.5211864406779662, "train_speed(iter/s)": 0.670291 }, { "epoch": 0.918555331819545, "grad_norm": 3.2545368671417236, "learning_rate": 9.190852564256641e-05, "loss": 2.273840141296387, "memory(GiB)": 72.85, "step": 21440, "token_acc": 0.4624505928853755, "train_speed(iter/s)": 0.670279 }, { "epoch": 0.918769547148794, "grad_norm": 3.303955554962158, "learning_rate": 9.190485478903238e-05, "loss": 2.2780038833618166, "memory(GiB)": 72.85, "step": 21445, "token_acc": 0.511400651465798, "train_speed(iter/s)": 0.670262 }, { "epoch": 0.9189837624780429, "grad_norm": 3.4227051734924316, "learning_rate": 9.190118317635259e-05, "loss": 2.674961471557617, "memory(GiB)": 72.85, "step": 21450, "token_acc": 0.4729241877256318, "train_speed(iter/s)": 0.670293 }, { "epoch": 0.9191979778072918, "grad_norm": 4.15801477432251, "learning_rate": 9.189751080459357e-05, "loss": 2.625298500061035, "memory(GiB)": 72.85, "step": 21455, "token_acc": 0.4281150159744409, "train_speed(iter/s)": 0.670314 }, { "epoch": 0.9194121931365409, "grad_norm": 3.2521092891693115, "learning_rate": 9.189383767382182e-05, "loss": 2.3672922134399412, "memory(GiB)": 72.85, "step": 21460, "token_acc": 0.4809688581314879, "train_speed(iter/s)": 0.670345 }, { "epoch": 0.9196264084657898, "grad_norm": 5.153797626495361, "learning_rate": 9.189016378410393e-05, "loss": 2.7197185516357423, "memory(GiB)": 72.85, "step": 21465, "token_acc": 0.4388059701492537, "train_speed(iter/s)": 0.670365 }, { "epoch": 0.9198406237950387, "grad_norm": 3.6282286643981934, "learning_rate": 9.188648913550641e-05, "loss": 2.7619247436523438, "memory(GiB)": 72.85, "step": 21470, "token_acc": 0.4664429530201342, "train_speed(iter/s)": 0.670351 }, { "epoch": 0.9200548391242878, "grad_norm": 4.1813578605651855, "learning_rate": 9.188281372809584e-05, "loss": 2.3110639572143556, "memory(GiB)": 72.85, "step": 21475, "token_acc": 0.46488294314381273, "train_speed(iter/s)": 0.670373 }, { "epoch": 0.9202690544535367, "grad_norm": 3.980816602706909, "learning_rate": 9.187913756193882e-05, "loss": 2.450113296508789, "memory(GiB)": 72.85, "step": 21480, "token_acc": 0.5053003533568905, "train_speed(iter/s)": 0.670389 }, { "epoch": 0.9204832697827856, "grad_norm": 4.476561069488525, "learning_rate": 9.187546063710193e-05, "loss": 2.497245025634766, "memory(GiB)": 72.85, "step": 21485, "token_acc": 0.4936708860759494, "train_speed(iter/s)": 0.670378 }, { "epoch": 0.9206974851120346, "grad_norm": 3.6565146446228027, "learning_rate": 9.18717829536518e-05, "loss": 2.3338294982910157, "memory(GiB)": 72.85, "step": 21490, "token_acc": 0.48727272727272725, "train_speed(iter/s)": 0.670382 }, { "epoch": 0.9209117004412836, "grad_norm": 3.135725259780884, "learning_rate": 9.186810451165502e-05, "loss": 2.3519351959228514, "memory(GiB)": 72.85, "step": 21495, "token_acc": 0.5127272727272727, "train_speed(iter/s)": 0.670385 }, { "epoch": 0.9211259157705325, "grad_norm": 3.411400079727173, "learning_rate": 9.186442531117828e-05, "loss": 2.302259826660156, "memory(GiB)": 72.85, "step": 21500, "token_acc": 0.46735395189003437, "train_speed(iter/s)": 0.670381 }, { "epoch": 0.9211259157705325, "eval_loss": 2.2381017208099365, "eval_runtime": 16.228, "eval_samples_per_second": 6.162, "eval_steps_per_second": 6.162, "eval_token_acc": 0.4811443433029909, "step": 21500 }, { "epoch": 0.9213401310997815, "grad_norm": 4.351228713989258, "learning_rate": 9.18607453522882e-05, "loss": 2.722544288635254, "memory(GiB)": 72.85, "step": 21505, "token_acc": 0.4708603145235893, "train_speed(iter/s)": 0.669975 }, { "epoch": 0.9215543464290304, "grad_norm": 3.727968215942383, "learning_rate": 9.185706463505143e-05, "loss": 2.3874826431274414, "memory(GiB)": 72.85, "step": 21510, "token_acc": 0.4980237154150198, "train_speed(iter/s)": 0.669986 }, { "epoch": 0.9217685617582794, "grad_norm": 4.279057025909424, "learning_rate": 9.185338315953468e-05, "loss": 2.5411069869995115, "memory(GiB)": 72.85, "step": 21515, "token_acc": 0.47572815533980584, "train_speed(iter/s)": 0.670002 }, { "epoch": 0.9219827770875284, "grad_norm": 3.8966493606567383, "learning_rate": 9.184970092580463e-05, "loss": 2.5094518661499023, "memory(GiB)": 72.85, "step": 21520, "token_acc": 0.47735191637630664, "train_speed(iter/s)": 0.670006 }, { "epoch": 0.9221969924167773, "grad_norm": 3.544684886932373, "learning_rate": 9.1846017933928e-05, "loss": 2.5651479721069337, "memory(GiB)": 72.85, "step": 21525, "token_acc": 0.5211726384364821, "train_speed(iter/s)": 0.669988 }, { "epoch": 0.9224112077460263, "grad_norm": 4.163408279418945, "learning_rate": 9.184233418397148e-05, "loss": 2.3289058685302733, "memory(GiB)": 72.85, "step": 21530, "token_acc": 0.5186440677966102, "train_speed(iter/s)": 0.670006 }, { "epoch": 0.9226254230752753, "grad_norm": 3.3411285877227783, "learning_rate": 9.183864967600184e-05, "loss": 2.119881439208984, "memory(GiB)": 72.85, "step": 21535, "token_acc": 0.5483870967741935, "train_speed(iter/s)": 0.67002 }, { "epoch": 0.9228396384045242, "grad_norm": 3.2405505180358887, "learning_rate": 9.183496441008581e-05, "loss": 2.3607643127441404, "memory(GiB)": 72.85, "step": 21540, "token_acc": 0.5, "train_speed(iter/s)": 0.670047 }, { "epoch": 0.9230538537337732, "grad_norm": 3.3659186363220215, "learning_rate": 9.183127838629016e-05, "loss": 2.542988967895508, "memory(GiB)": 72.85, "step": 21545, "token_acc": 0.5090252707581228, "train_speed(iter/s)": 0.670107 }, { "epoch": 0.9232680690630222, "grad_norm": 3.6342625617980957, "learning_rate": 9.182759160468164e-05, "loss": 2.448194885253906, "memory(GiB)": 72.85, "step": 21550, "token_acc": 0.498220640569395, "train_speed(iter/s)": 0.67012 }, { "epoch": 0.9234822843922711, "grad_norm": 5.255943775177002, "learning_rate": 9.182390406532708e-05, "loss": 2.5223691940307615, "memory(GiB)": 72.85, "step": 21555, "token_acc": 0.48757763975155277, "train_speed(iter/s)": 0.670114 }, { "epoch": 0.9236964997215201, "grad_norm": 3.704568386077881, "learning_rate": 9.182021576829326e-05, "loss": 2.3344959259033202, "memory(GiB)": 72.85, "step": 21560, "token_acc": 0.5306122448979592, "train_speed(iter/s)": 0.67015 }, { "epoch": 0.923910715050769, "grad_norm": 4.311225414276123, "learning_rate": 9.1816526713647e-05, "loss": 2.3449546813964846, "memory(GiB)": 72.85, "step": 21565, "token_acc": 0.49828178694158076, "train_speed(iter/s)": 0.670138 }, { "epoch": 0.924124930380018, "grad_norm": 3.3337371349334717, "learning_rate": 9.181283690145514e-05, "loss": 2.4565738677978515, "memory(GiB)": 72.85, "step": 21570, "token_acc": 0.4851190476190476, "train_speed(iter/s)": 0.670127 }, { "epoch": 0.924339145709267, "grad_norm": 5.15795373916626, "learning_rate": 9.18091463317845e-05, "loss": 2.3890235900878904, "memory(GiB)": 72.85, "step": 21575, "token_acc": 0.47547169811320755, "train_speed(iter/s)": 0.670147 }, { "epoch": 0.9245533610385159, "grad_norm": 3.6154956817626953, "learning_rate": 9.180545500470197e-05, "loss": 2.6206254959106445, "memory(GiB)": 72.85, "step": 21580, "token_acc": 0.4652777777777778, "train_speed(iter/s)": 0.670148 }, { "epoch": 0.9247675763677649, "grad_norm": 4.051456451416016, "learning_rate": 9.18017629202744e-05, "loss": 2.425581359863281, "memory(GiB)": 72.85, "step": 21585, "token_acc": 0.5033333333333333, "train_speed(iter/s)": 0.670161 }, { "epoch": 0.9249817916970139, "grad_norm": 5.836161136627197, "learning_rate": 9.179807007856867e-05, "loss": 2.663752555847168, "memory(GiB)": 72.85, "step": 21590, "token_acc": 0.4854014598540146, "train_speed(iter/s)": 0.670171 }, { "epoch": 0.9251960070262628, "grad_norm": 3.4773361682891846, "learning_rate": 9.179437647965172e-05, "loss": 2.4530059814453127, "memory(GiB)": 72.85, "step": 21595, "token_acc": 0.4409722222222222, "train_speed(iter/s)": 0.670188 }, { "epoch": 0.9254102223555117, "grad_norm": 4.270783424377441, "learning_rate": 9.179068212359041e-05, "loss": 2.6744096755981444, "memory(GiB)": 72.85, "step": 21600, "token_acc": 0.46366782006920415, "train_speed(iter/s)": 0.670167 }, { "epoch": 0.9256244376847608, "grad_norm": 3.4839940071105957, "learning_rate": 9.178698701045169e-05, "loss": 2.5680694580078125, "memory(GiB)": 72.85, "step": 21605, "token_acc": 0.4567901234567901, "train_speed(iter/s)": 0.670211 }, { "epoch": 0.9258386530140097, "grad_norm": 3.7556328773498535, "learning_rate": 9.178329114030251e-05, "loss": 2.4653411865234376, "memory(GiB)": 72.85, "step": 21610, "token_acc": 0.512280701754386, "train_speed(iter/s)": 0.670274 }, { "epoch": 0.9260528683432586, "grad_norm": 4.118699550628662, "learning_rate": 9.177959451320981e-05, "loss": 2.2874881744384767, "memory(GiB)": 72.85, "step": 21615, "token_acc": 0.5150375939849624, "train_speed(iter/s)": 0.670292 }, { "epoch": 0.9262670836725077, "grad_norm": 3.429490804672241, "learning_rate": 9.177589712924055e-05, "loss": 2.4651134490966795, "memory(GiB)": 72.85, "step": 21620, "token_acc": 0.48134328358208955, "train_speed(iter/s)": 0.67033 }, { "epoch": 0.9264812990017566, "grad_norm": 4.084052085876465, "learning_rate": 9.177219898846175e-05, "loss": 2.696294403076172, "memory(GiB)": 72.85, "step": 21625, "token_acc": 0.4598337950138504, "train_speed(iter/s)": 0.670359 }, { "epoch": 0.9266955143310055, "grad_norm": 8.518472671508789, "learning_rate": 9.176850009094037e-05, "loss": 2.2738311767578123, "memory(GiB)": 72.85, "step": 21630, "token_acc": 0.5081967213114754, "train_speed(iter/s)": 0.670375 }, { "epoch": 0.9269097296602545, "grad_norm": 4.267012596130371, "learning_rate": 9.176480043674343e-05, "loss": 2.386882209777832, "memory(GiB)": 72.85, "step": 21635, "token_acc": 0.4911660777385159, "train_speed(iter/s)": 0.670325 }, { "epoch": 0.9271239449895035, "grad_norm": 3.568761110305786, "learning_rate": 9.176110002593794e-05, "loss": 2.2465444564819337, "memory(GiB)": 72.85, "step": 21640, "token_acc": 0.5072463768115942, "train_speed(iter/s)": 0.670282 }, { "epoch": 0.9273381603187524, "grad_norm": 3.391522169113159, "learning_rate": 9.175739885859095e-05, "loss": 2.480437088012695, "memory(GiB)": 72.85, "step": 21645, "token_acc": 0.45674740484429066, "train_speed(iter/s)": 0.670272 }, { "epoch": 0.9275523756480014, "grad_norm": 4.483253002166748, "learning_rate": 9.175369693476951e-05, "loss": 2.3353271484375, "memory(GiB)": 72.85, "step": 21650, "token_acc": 0.4811594202898551, "train_speed(iter/s)": 0.670234 }, { "epoch": 0.9277665909772503, "grad_norm": 3.0451223850250244, "learning_rate": 9.17499942545407e-05, "loss": 2.0682960510253907, "memory(GiB)": 72.85, "step": 21655, "token_acc": 0.5247148288973384, "train_speed(iter/s)": 0.670219 }, { "epoch": 0.9279808063064993, "grad_norm": 3.3297948837280273, "learning_rate": 9.174629081797156e-05, "loss": 2.6708017349243165, "memory(GiB)": 72.85, "step": 21660, "token_acc": 0.46564885496183206, "train_speed(iter/s)": 0.670202 }, { "epoch": 0.9281950216357483, "grad_norm": 4.611396312713623, "learning_rate": 9.174258662512921e-05, "loss": 2.6569282531738283, "memory(GiB)": 72.85, "step": 21665, "token_acc": 0.4933920704845815, "train_speed(iter/s)": 0.670221 }, { "epoch": 0.9284092369649972, "grad_norm": 5.370468616485596, "learning_rate": 9.173888167608074e-05, "loss": 2.551813316345215, "memory(GiB)": 72.85, "step": 21670, "token_acc": 0.46360153256704983, "train_speed(iter/s)": 0.670209 }, { "epoch": 0.9286234522942461, "grad_norm": 4.337526321411133, "learning_rate": 9.173517597089328e-05, "loss": 2.187968444824219, "memory(GiB)": 72.85, "step": 21675, "token_acc": 0.5064377682403434, "train_speed(iter/s)": 0.670213 }, { "epoch": 0.9288376676234952, "grad_norm": 2.639068126678467, "learning_rate": 9.173146950963396e-05, "loss": 2.6560623168945314, "memory(GiB)": 72.85, "step": 21680, "token_acc": 0.4555256064690027, "train_speed(iter/s)": 0.670197 }, { "epoch": 0.9290518829527441, "grad_norm": 4.716750144958496, "learning_rate": 9.17277622923699e-05, "loss": 2.3626129150390627, "memory(GiB)": 72.85, "step": 21685, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.670196 }, { "epoch": 0.929266098281993, "grad_norm": 4.228027820587158, "learning_rate": 9.172405431916831e-05, "loss": 2.5742429733276366, "memory(GiB)": 72.85, "step": 21690, "token_acc": 0.46200607902735563, "train_speed(iter/s)": 0.670183 }, { "epoch": 0.9294803136112421, "grad_norm": 2.8505969047546387, "learning_rate": 9.172034559009632e-05, "loss": 2.4885812759399415, "memory(GiB)": 72.85, "step": 21695, "token_acc": 0.4652014652014652, "train_speed(iter/s)": 0.670205 }, { "epoch": 0.929694528940491, "grad_norm": 3.558950424194336, "learning_rate": 9.171663610522114e-05, "loss": 2.2663894653320313, "memory(GiB)": 72.85, "step": 21700, "token_acc": 0.49174917491749176, "train_speed(iter/s)": 0.67017 }, { "epoch": 0.9299087442697399, "grad_norm": 3.4308035373687744, "learning_rate": 9.171292586460996e-05, "loss": 2.495400047302246, "memory(GiB)": 72.85, "step": 21705, "token_acc": 0.4660493827160494, "train_speed(iter/s)": 0.670168 }, { "epoch": 0.9301229595989889, "grad_norm": 3.622570037841797, "learning_rate": 9.170921486833e-05, "loss": 2.3066539764404297, "memory(GiB)": 72.85, "step": 21710, "token_acc": 0.4843205574912892, "train_speed(iter/s)": 0.670165 }, { "epoch": 0.9303371749282379, "grad_norm": 3.1767196655273438, "learning_rate": 9.170550311644848e-05, "loss": 2.287833023071289, "memory(GiB)": 72.85, "step": 21715, "token_acc": 0.48172757475083056, "train_speed(iter/s)": 0.670195 }, { "epoch": 0.9305513902574868, "grad_norm": 3.531714677810669, "learning_rate": 9.170179060903265e-05, "loss": 2.5520402908325197, "memory(GiB)": 72.85, "step": 21720, "token_acc": 0.4882943143812709, "train_speed(iter/s)": 0.670212 }, { "epoch": 0.9307656055867358, "grad_norm": 3.5732264518737793, "learning_rate": 9.169807734614976e-05, "loss": 2.4017206192016602, "memory(GiB)": 72.85, "step": 21725, "token_acc": 0.5163398692810458, "train_speed(iter/s)": 0.670187 }, { "epoch": 0.9309798209159847, "grad_norm": 3.8616440296173096, "learning_rate": 9.16943633278671e-05, "loss": 2.4651300430297853, "memory(GiB)": 72.85, "step": 21730, "token_acc": 0.48717948717948717, "train_speed(iter/s)": 0.670216 }, { "epoch": 0.9311940362452337, "grad_norm": 3.186054229736328, "learning_rate": 9.169064855425191e-05, "loss": 2.4164833068847655, "memory(GiB)": 72.85, "step": 21735, "token_acc": 0.504950495049505, "train_speed(iter/s)": 0.670208 }, { "epoch": 0.9314082515744827, "grad_norm": 4.466302394866943, "learning_rate": 9.168693302537155e-05, "loss": 2.570832061767578, "memory(GiB)": 72.85, "step": 21740, "token_acc": 0.46325878594249204, "train_speed(iter/s)": 0.670229 }, { "epoch": 0.9316224669037316, "grad_norm": 3.24838924407959, "learning_rate": 9.168321674129326e-05, "loss": 2.328400802612305, "memory(GiB)": 72.85, "step": 21745, "token_acc": 0.5179153094462541, "train_speed(iter/s)": 0.670284 }, { "epoch": 0.9318366822329806, "grad_norm": 3.7113888263702393, "learning_rate": 9.16794997020844e-05, "loss": 2.2660327911376954, "memory(GiB)": 72.85, "step": 21750, "token_acc": 0.5368421052631579, "train_speed(iter/s)": 0.670317 }, { "epoch": 0.9320508975622296, "grad_norm": 3.105623722076416, "learning_rate": 9.167578190781232e-05, "loss": 2.318818283081055, "memory(GiB)": 72.85, "step": 21755, "token_acc": 0.487012987012987, "train_speed(iter/s)": 0.670318 }, { "epoch": 0.9322651128914785, "grad_norm": 6.07889986038208, "learning_rate": 9.167206335854435e-05, "loss": 2.1014755249023436, "memory(GiB)": 72.85, "step": 21760, "token_acc": 0.5076923076923077, "train_speed(iter/s)": 0.670334 }, { "epoch": 0.9324793282207274, "grad_norm": 4.776703357696533, "learning_rate": 9.166834405434785e-05, "loss": 2.3194063186645506, "memory(GiB)": 72.85, "step": 21765, "token_acc": 0.48398576512455516, "train_speed(iter/s)": 0.670341 }, { "epoch": 0.9326935435499765, "grad_norm": 3.5968525409698486, "learning_rate": 9.166462399529021e-05, "loss": 2.578142547607422, "memory(GiB)": 72.85, "step": 21770, "token_acc": 0.4622356495468278, "train_speed(iter/s)": 0.670365 }, { "epoch": 0.9329077588792254, "grad_norm": 3.250736713409424, "learning_rate": 9.166090318143883e-05, "loss": 2.148639106750488, "memory(GiB)": 72.85, "step": 21775, "token_acc": 0.5548387096774193, "train_speed(iter/s)": 0.670366 }, { "epoch": 0.9331219742084743, "grad_norm": 4.210941791534424, "learning_rate": 9.165718161286111e-05, "loss": 2.3855669021606447, "memory(GiB)": 72.85, "step": 21780, "token_acc": 0.4948805460750853, "train_speed(iter/s)": 0.670329 }, { "epoch": 0.9333361895377233, "grad_norm": 4.349168300628662, "learning_rate": 9.165345928962446e-05, "loss": 2.3396955490112306, "memory(GiB)": 72.85, "step": 21785, "token_acc": 0.5285714285714286, "train_speed(iter/s)": 0.670347 }, { "epoch": 0.9335504048669723, "grad_norm": 3.535858154296875, "learning_rate": 9.164973621179634e-05, "loss": 2.3107444763183596, "memory(GiB)": 72.85, "step": 21790, "token_acc": 0.519650655021834, "train_speed(iter/s)": 0.670361 }, { "epoch": 0.9337646201962212, "grad_norm": 4.204564094543457, "learning_rate": 9.164601237944415e-05, "loss": 2.4522449493408205, "memory(GiB)": 72.85, "step": 21795, "token_acc": 0.476027397260274, "train_speed(iter/s)": 0.670311 }, { "epoch": 0.9339788355254702, "grad_norm": 5.277792453765869, "learning_rate": 9.16422877926354e-05, "loss": 2.474377250671387, "memory(GiB)": 72.85, "step": 21800, "token_acc": 0.4511784511784512, "train_speed(iter/s)": 0.670292 }, { "epoch": 0.9341930508547192, "grad_norm": 5.71762228012085, "learning_rate": 9.163856245143752e-05, "loss": 2.4941057205200194, "memory(GiB)": 72.85, "step": 21805, "token_acc": 0.4681647940074906, "train_speed(iter/s)": 0.670286 }, { "epoch": 0.9344072661839681, "grad_norm": 2.9085378646850586, "learning_rate": 9.163483635591804e-05, "loss": 2.530388069152832, "memory(GiB)": 72.85, "step": 21810, "token_acc": 0.476027397260274, "train_speed(iter/s)": 0.670281 }, { "epoch": 0.9346214815132171, "grad_norm": 4.284427642822266, "learning_rate": 9.163110950614445e-05, "loss": 2.5362773895263673, "memory(GiB)": 72.85, "step": 21815, "token_acc": 0.4713804713804714, "train_speed(iter/s)": 0.670295 }, { "epoch": 0.934835696842466, "grad_norm": 3.237154960632324, "learning_rate": 9.162738190218424e-05, "loss": 2.3732139587402346, "memory(GiB)": 72.85, "step": 21820, "token_acc": 0.46296296296296297, "train_speed(iter/s)": 0.670299 }, { "epoch": 0.935049912171715, "grad_norm": 3.724945306777954, "learning_rate": 9.162365354410496e-05, "loss": 2.408334732055664, "memory(GiB)": 72.85, "step": 21825, "token_acc": 0.47157190635451507, "train_speed(iter/s)": 0.670341 }, { "epoch": 0.935264127500964, "grad_norm": 3.887888193130493, "learning_rate": 9.161992443197416e-05, "loss": 2.3774484634399413, "memory(GiB)": 72.85, "step": 21830, "token_acc": 0.5082644628099173, "train_speed(iter/s)": 0.670324 }, { "epoch": 0.9354783428302129, "grad_norm": 4.2246880531311035, "learning_rate": 9.161619456585937e-05, "loss": 2.5506481170654296, "memory(GiB)": 72.85, "step": 21835, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.670321 }, { "epoch": 0.9356925581594618, "grad_norm": 3.2520179748535156, "learning_rate": 9.161246394582818e-05, "loss": 2.430060386657715, "memory(GiB)": 72.85, "step": 21840, "token_acc": 0.5212355212355212, "train_speed(iter/s)": 0.670295 }, { "epoch": 0.9359067734887109, "grad_norm": 4.346063613891602, "learning_rate": 9.160873257194818e-05, "loss": 2.4572444915771485, "memory(GiB)": 72.85, "step": 21845, "token_acc": 0.4791666666666667, "train_speed(iter/s)": 0.670316 }, { "epoch": 0.9361209888179598, "grad_norm": 3.493626356124878, "learning_rate": 9.160500044428696e-05, "loss": 2.3818315505981444, "memory(GiB)": 72.85, "step": 21850, "token_acc": 0.4876325088339223, "train_speed(iter/s)": 0.670329 }, { "epoch": 0.9363352041472087, "grad_norm": 3.526549816131592, "learning_rate": 9.160126756291211e-05, "loss": 2.2718345642089846, "memory(GiB)": 72.85, "step": 21855, "token_acc": 0.5067567567567568, "train_speed(iter/s)": 0.67032 }, { "epoch": 0.9365494194764578, "grad_norm": 4.328083038330078, "learning_rate": 9.15975339278913e-05, "loss": 2.5516969680786135, "memory(GiB)": 72.85, "step": 21860, "token_acc": 0.45907473309608543, "train_speed(iter/s)": 0.6703 }, { "epoch": 0.9367636348057067, "grad_norm": 4.792230129241943, "learning_rate": 9.159379953929213e-05, "loss": 2.7438543319702147, "memory(GiB)": 72.85, "step": 21865, "token_acc": 0.44805194805194803, "train_speed(iter/s)": 0.670305 }, { "epoch": 0.9369778501349556, "grad_norm": 3.588176727294922, "learning_rate": 9.159006439718226e-05, "loss": 2.295747184753418, "memory(GiB)": 72.85, "step": 21870, "token_acc": 0.5141843971631206, "train_speed(iter/s)": 0.670312 }, { "epoch": 0.9371920654642046, "grad_norm": 3.5078322887420654, "learning_rate": 9.158632850162935e-05, "loss": 2.478569984436035, "memory(GiB)": 72.85, "step": 21875, "token_acc": 0.46875, "train_speed(iter/s)": 0.670315 }, { "epoch": 0.9374062807934536, "grad_norm": 4.011059761047363, "learning_rate": 9.158259185270108e-05, "loss": 2.3364545822143556, "memory(GiB)": 72.85, "step": 21880, "token_acc": 0.55, "train_speed(iter/s)": 0.670359 }, { "epoch": 0.9376204961227026, "grad_norm": 3.2190988063812256, "learning_rate": 9.157885445046519e-05, "loss": 2.612650489807129, "memory(GiB)": 72.85, "step": 21885, "token_acc": 0.47191011235955055, "train_speed(iter/s)": 0.670392 }, { "epoch": 0.9378347114519515, "grad_norm": 3.7409768104553223, "learning_rate": 9.157511629498932e-05, "loss": 2.603751373291016, "memory(GiB)": 72.85, "step": 21890, "token_acc": 0.44966442953020136, "train_speed(iter/s)": 0.670381 }, { "epoch": 0.9380489267812004, "grad_norm": 3.2766804695129395, "learning_rate": 9.157137738634122e-05, "loss": 2.3243484497070312, "memory(GiB)": 72.85, "step": 21895, "token_acc": 0.49606299212598426, "train_speed(iter/s)": 0.670384 }, { "epoch": 0.9382631421104495, "grad_norm": 4.598373889923096, "learning_rate": 9.156763772458862e-05, "loss": 2.728291320800781, "memory(GiB)": 72.85, "step": 21900, "token_acc": 0.4519230769230769, "train_speed(iter/s)": 0.670385 }, { "epoch": 0.9384773574396984, "grad_norm": 4.975515842437744, "learning_rate": 9.156389730979928e-05, "loss": 2.648921012878418, "memory(GiB)": 72.85, "step": 21905, "token_acc": 0.4561933534743202, "train_speed(iter/s)": 0.670378 }, { "epoch": 0.9386915727689473, "grad_norm": 2.9917373657226562, "learning_rate": 9.156015614204094e-05, "loss": 2.4745647430419924, "memory(GiB)": 72.85, "step": 21910, "token_acc": 0.4788273615635179, "train_speed(iter/s)": 0.670402 }, { "epoch": 0.9389057880981964, "grad_norm": 4.159672737121582, "learning_rate": 9.155641422138139e-05, "loss": 2.5232912063598634, "memory(GiB)": 72.85, "step": 21915, "token_acc": 0.46779661016949153, "train_speed(iter/s)": 0.67039 }, { "epoch": 0.9391200034274453, "grad_norm": 3.380448818206787, "learning_rate": 9.15526715478884e-05, "loss": 2.4803089141845702, "memory(GiB)": 72.85, "step": 21920, "token_acc": 0.4375, "train_speed(iter/s)": 0.670376 }, { "epoch": 0.9393342187566942, "grad_norm": 4.482100963592529, "learning_rate": 9.15489281216298e-05, "loss": 2.1884624481201174, "memory(GiB)": 72.85, "step": 21925, "token_acc": 0.5, "train_speed(iter/s)": 0.670365 }, { "epoch": 0.9395484340859432, "grad_norm": 3.2952210903167725, "learning_rate": 9.154518394267338e-05, "loss": 2.523224449157715, "memory(GiB)": 72.85, "step": 21930, "token_acc": 0.4827586206896552, "train_speed(iter/s)": 0.670383 }, { "epoch": 0.9397626494151922, "grad_norm": 4.0957231521606445, "learning_rate": 9.154143901108699e-05, "loss": 2.5459728240966797, "memory(GiB)": 72.85, "step": 21935, "token_acc": 0.47719298245614034, "train_speed(iter/s)": 0.670383 }, { "epoch": 0.9399768647444411, "grad_norm": 3.698870897293091, "learning_rate": 9.153769332693847e-05, "loss": 2.395962142944336, "memory(GiB)": 72.85, "step": 21940, "token_acc": 0.45454545454545453, "train_speed(iter/s)": 0.670398 }, { "epoch": 0.9401910800736901, "grad_norm": 3.9076781272888184, "learning_rate": 9.153394689029566e-05, "loss": 2.615176963806152, "memory(GiB)": 72.85, "step": 21945, "token_acc": 0.4315352697095436, "train_speed(iter/s)": 0.670379 }, { "epoch": 0.940405295402939, "grad_norm": 7.767866134643555, "learning_rate": 9.153019970122643e-05, "loss": 2.489019012451172, "memory(GiB)": 72.85, "step": 21950, "token_acc": 0.48322147651006714, "train_speed(iter/s)": 0.670387 }, { "epoch": 0.940619510732188, "grad_norm": 4.008657932281494, "learning_rate": 9.15264517597987e-05, "loss": 2.2723104476928713, "memory(GiB)": 72.85, "step": 21955, "token_acc": 0.5226480836236934, "train_speed(iter/s)": 0.6704 }, { "epoch": 0.940833726061437, "grad_norm": 3.359523296356201, "learning_rate": 9.152270306608031e-05, "loss": 2.484615516662598, "memory(GiB)": 72.85, "step": 21960, "token_acc": 0.48656716417910445, "train_speed(iter/s)": 0.670437 }, { "epoch": 0.9410479413906859, "grad_norm": 3.482231855392456, "learning_rate": 9.151895362013922e-05, "loss": 2.4281896591186523, "memory(GiB)": 72.85, "step": 21965, "token_acc": 0.48299319727891155, "train_speed(iter/s)": 0.670438 }, { "epoch": 0.9412621567199349, "grad_norm": 4.095120429992676, "learning_rate": 9.151520342204334e-05, "loss": 2.8069103240966795, "memory(GiB)": 72.85, "step": 21970, "token_acc": 0.4394904458598726, "train_speed(iter/s)": 0.670422 }, { "epoch": 0.9414763720491839, "grad_norm": 3.747591733932495, "learning_rate": 9.151145247186061e-05, "loss": 2.6042238235473634, "memory(GiB)": 72.85, "step": 21975, "token_acc": 0.4744744744744745, "train_speed(iter/s)": 0.670446 }, { "epoch": 0.9416905873784328, "grad_norm": 3.658419609069824, "learning_rate": 9.150770076965895e-05, "loss": 2.6536331176757812, "memory(GiB)": 72.85, "step": 21980, "token_acc": 0.4586894586894587, "train_speed(iter/s)": 0.670442 }, { "epoch": 0.9419048027076817, "grad_norm": 4.79494047164917, "learning_rate": 9.15039483155064e-05, "loss": 2.8869667053222656, "memory(GiB)": 72.85, "step": 21985, "token_acc": 0.4027777777777778, "train_speed(iter/s)": 0.670421 }, { "epoch": 0.9421190180369308, "grad_norm": 3.025493621826172, "learning_rate": 9.150019510947086e-05, "loss": 2.517643356323242, "memory(GiB)": 72.85, "step": 21990, "token_acc": 0.49038461538461536, "train_speed(iter/s)": 0.670397 }, { "epoch": 0.9423332333661797, "grad_norm": 4.278468608856201, "learning_rate": 9.149644115162035e-05, "loss": 2.5327726364135743, "memory(GiB)": 72.85, "step": 21995, "token_acc": 0.4756554307116105, "train_speed(iter/s)": 0.670428 }, { "epoch": 0.9425474486954286, "grad_norm": 2.617295742034912, "learning_rate": 9.149268644202289e-05, "loss": 2.454793930053711, "memory(GiB)": 72.85, "step": 22000, "token_acc": 0.4935483870967742, "train_speed(iter/s)": 0.670443 }, { "epoch": 0.9425474486954286, "eval_loss": 2.0275659561157227, "eval_runtime": 17.4801, "eval_samples_per_second": 5.721, "eval_steps_per_second": 5.721, "eval_token_acc": 0.48413793103448277, "step": 22000 }, { "epoch": 0.9427616640246776, "grad_norm": 3.331247568130493, "learning_rate": 9.148893098074649e-05, "loss": 2.6995277404785156, "memory(GiB)": 72.85, "step": 22005, "token_acc": 0.48151750972762647, "train_speed(iter/s)": 0.67005 }, { "epoch": 0.9429758793539266, "grad_norm": 4.107296943664551, "learning_rate": 9.148517476785918e-05, "loss": 2.6450992584228517, "memory(GiB)": 72.85, "step": 22010, "token_acc": 0.45907473309608543, "train_speed(iter/s)": 0.670051 }, { "epoch": 0.9431900946831755, "grad_norm": 8.348737716674805, "learning_rate": 9.148141780342903e-05, "loss": 2.6044668197631835, "memory(GiB)": 72.85, "step": 22015, "token_acc": 0.4605809128630705, "train_speed(iter/s)": 0.670063 }, { "epoch": 0.9434043100124245, "grad_norm": 4.031294345855713, "learning_rate": 9.147766008752407e-05, "loss": 2.6795833587646483, "memory(GiB)": 72.85, "step": 22020, "token_acc": 0.5075757575757576, "train_speed(iter/s)": 0.670056 }, { "epoch": 0.9436185253416735, "grad_norm": 2.9755911827087402, "learning_rate": 9.14739016202124e-05, "loss": 2.0523738861083984, "memory(GiB)": 72.85, "step": 22025, "token_acc": 0.5396825396825397, "train_speed(iter/s)": 0.670094 }, { "epoch": 0.9438327406709224, "grad_norm": 3.1351678371429443, "learning_rate": 9.14701424015621e-05, "loss": 2.4758987426757812, "memory(GiB)": 72.85, "step": 22030, "token_acc": 0.49056603773584906, "train_speed(iter/s)": 0.670036 }, { "epoch": 0.9440469560001714, "grad_norm": 3.131349563598633, "learning_rate": 9.146638243164125e-05, "loss": 2.481821632385254, "memory(GiB)": 72.85, "step": 22035, "token_acc": 0.4879725085910653, "train_speed(iter/s)": 0.670052 }, { "epoch": 0.9442611713294203, "grad_norm": 3.475125312805176, "learning_rate": 9.1462621710518e-05, "loss": 2.4257450103759766, "memory(GiB)": 72.85, "step": 22040, "token_acc": 0.47307692307692306, "train_speed(iter/s)": 0.670063 }, { "epoch": 0.9444753866586693, "grad_norm": 3.638957977294922, "learning_rate": 9.145886023826044e-05, "loss": 2.529573440551758, "memory(GiB)": 72.85, "step": 22045, "token_acc": 0.47924528301886793, "train_speed(iter/s)": 0.670095 }, { "epoch": 0.9446896019879183, "grad_norm": 3.7121737003326416, "learning_rate": 9.145509801493677e-05, "loss": 2.5407970428466795, "memory(GiB)": 72.85, "step": 22050, "token_acc": 0.47335423197492166, "train_speed(iter/s)": 0.670082 }, { "epoch": 0.9449038173171672, "grad_norm": 3.427503824234009, "learning_rate": 9.145133504061509e-05, "loss": 2.544826889038086, "memory(GiB)": 72.85, "step": 22055, "token_acc": 0.49044585987261147, "train_speed(iter/s)": 0.670066 }, { "epoch": 0.9451180326464161, "grad_norm": 3.5909149646759033, "learning_rate": 9.14475713153636e-05, "loss": 2.2099720001220704, "memory(GiB)": 72.85, "step": 22060, "token_acc": 0.5223880597014925, "train_speed(iter/s)": 0.670058 }, { "epoch": 0.9453322479756652, "grad_norm": 4.066627502441406, "learning_rate": 9.144380683925044e-05, "loss": 2.3400739669799804, "memory(GiB)": 72.85, "step": 22065, "token_acc": 0.5094339622641509, "train_speed(iter/s)": 0.670058 }, { "epoch": 0.9455464633049141, "grad_norm": 2.811908483505249, "learning_rate": 9.144004161234388e-05, "loss": 2.3698455810546877, "memory(GiB)": 72.85, "step": 22070, "token_acc": 0.5208333333333334, "train_speed(iter/s)": 0.670051 }, { "epoch": 0.945760678634163, "grad_norm": 4.359206676483154, "learning_rate": 9.143627563471209e-05, "loss": 2.577792167663574, "memory(GiB)": 72.85, "step": 22075, "token_acc": 0.45089285714285715, "train_speed(iter/s)": 0.670077 }, { "epoch": 0.9459748939634121, "grad_norm": 3.381427526473999, "learning_rate": 9.143250890642327e-05, "loss": 2.5083641052246093, "memory(GiB)": 72.85, "step": 22080, "token_acc": 0.5, "train_speed(iter/s)": 0.67011 }, { "epoch": 0.946189109292661, "grad_norm": 5.804192066192627, "learning_rate": 9.142874142754572e-05, "loss": 2.5154258728027346, "memory(GiB)": 72.85, "step": 22085, "token_acc": 0.47280334728033474, "train_speed(iter/s)": 0.670106 }, { "epoch": 0.9464033246219099, "grad_norm": 3.815683126449585, "learning_rate": 9.142497319814764e-05, "loss": 2.5604740142822267, "memory(GiB)": 72.85, "step": 22090, "token_acc": 0.4751552795031056, "train_speed(iter/s)": 0.670096 }, { "epoch": 0.9466175399511589, "grad_norm": 3.8307013511657715, "learning_rate": 9.142120421829729e-05, "loss": 2.4027210235595704, "memory(GiB)": 72.85, "step": 22095, "token_acc": 0.4842105263157895, "train_speed(iter/s)": 0.670122 }, { "epoch": 0.9468317552804079, "grad_norm": 4.1741251945495605, "learning_rate": 9.141743448806301e-05, "loss": 2.160361671447754, "memory(GiB)": 72.85, "step": 22100, "token_acc": 0.5435540069686411, "train_speed(iter/s)": 0.670137 }, { "epoch": 0.9470459706096568, "grad_norm": 3.636101245880127, "learning_rate": 9.141366400751301e-05, "loss": 2.386992835998535, "memory(GiB)": 72.85, "step": 22105, "token_acc": 0.4963235294117647, "train_speed(iter/s)": 0.670146 }, { "epoch": 0.9472601859389058, "grad_norm": 3.737105131149292, "learning_rate": 9.140989277671567e-05, "loss": 2.1794574737548826, "memory(GiB)": 72.85, "step": 22110, "token_acc": 0.5418326693227091, "train_speed(iter/s)": 0.670118 }, { "epoch": 0.9474744012681547, "grad_norm": 3.213210105895996, "learning_rate": 9.140612079573927e-05, "loss": 2.5023836135864257, "memory(GiB)": 72.85, "step": 22115, "token_acc": 0.506896551724138, "train_speed(iter/s)": 0.670143 }, { "epoch": 0.9476886165974037, "grad_norm": 3.1517839431762695, "learning_rate": 9.140234806465214e-05, "loss": 2.6557424545288084, "memory(GiB)": 72.85, "step": 22120, "token_acc": 0.4583333333333333, "train_speed(iter/s)": 0.670149 }, { "epoch": 0.9479028319266527, "grad_norm": 3.697126626968384, "learning_rate": 9.139857458352263e-05, "loss": 2.4638189315795898, "memory(GiB)": 72.85, "step": 22125, "token_acc": 0.4721311475409836, "train_speed(iter/s)": 0.670147 }, { "epoch": 0.9481170472559016, "grad_norm": 3.2994396686553955, "learning_rate": 9.139480035241912e-05, "loss": 2.465438461303711, "memory(GiB)": 72.85, "step": 22130, "token_acc": 0.4782608695652174, "train_speed(iter/s)": 0.670129 }, { "epoch": 0.9483312625851505, "grad_norm": 2.849717140197754, "learning_rate": 9.139102537140996e-05, "loss": 2.120479965209961, "memory(GiB)": 72.85, "step": 22135, "token_acc": 0.5434782608695652, "train_speed(iter/s)": 0.670163 }, { "epoch": 0.9485454779143996, "grad_norm": 3.253892421722412, "learning_rate": 9.138724964056355e-05, "loss": 2.4859031677246093, "memory(GiB)": 72.85, "step": 22140, "token_acc": 0.46496815286624205, "train_speed(iter/s)": 0.670162 }, { "epoch": 0.9487596932436485, "grad_norm": 4.253243923187256, "learning_rate": 9.13834731599483e-05, "loss": 2.3606599807739257, "memory(GiB)": 72.85, "step": 22145, "token_acc": 0.4965034965034965, "train_speed(iter/s)": 0.670163 }, { "epoch": 0.9489739085728974, "grad_norm": 3.0786991119384766, "learning_rate": 9.13796959296326e-05, "loss": 2.2784656524658202, "memory(GiB)": 72.85, "step": 22150, "token_acc": 0.5269230769230769, "train_speed(iter/s)": 0.670148 }, { "epoch": 0.9491881239021465, "grad_norm": 6.1486053466796875, "learning_rate": 9.137591794968489e-05, "loss": 2.368860054016113, "memory(GiB)": 72.85, "step": 22155, "token_acc": 0.48698884758364314, "train_speed(iter/s)": 0.670124 }, { "epoch": 0.9494023392313954, "grad_norm": 3.659550189971924, "learning_rate": 9.137213922017363e-05, "loss": 2.466214179992676, "memory(GiB)": 72.85, "step": 22160, "token_acc": 0.49122807017543857, "train_speed(iter/s)": 0.670108 }, { "epoch": 0.9496165545606443, "grad_norm": 4.582035064697266, "learning_rate": 9.136835974116724e-05, "loss": 2.484465789794922, "memory(GiB)": 72.85, "step": 22165, "token_acc": 0.4707792207792208, "train_speed(iter/s)": 0.670105 }, { "epoch": 0.9498307698898933, "grad_norm": 3.8173534870147705, "learning_rate": 9.136457951273423e-05, "loss": 2.6193389892578125, "memory(GiB)": 72.85, "step": 22170, "token_acc": 0.458041958041958, "train_speed(iter/s)": 0.670101 }, { "epoch": 0.9500449852191423, "grad_norm": 3.475773811340332, "learning_rate": 9.136079853494304e-05, "loss": 2.4528270721435548, "memory(GiB)": 72.85, "step": 22175, "token_acc": 0.48639455782312924, "train_speed(iter/s)": 0.670119 }, { "epoch": 0.9502592005483912, "grad_norm": 2.952626943588257, "learning_rate": 9.135701680786218e-05, "loss": 2.391707420349121, "memory(GiB)": 72.85, "step": 22180, "token_acc": 0.4797297297297297, "train_speed(iter/s)": 0.670128 }, { "epoch": 0.9504734158776402, "grad_norm": 3.777817726135254, "learning_rate": 9.135323433156018e-05, "loss": 2.50325870513916, "memory(GiB)": 72.85, "step": 22185, "token_acc": 0.4778481012658228, "train_speed(iter/s)": 0.670159 }, { "epoch": 0.9506876312068891, "grad_norm": 4.4581379890441895, "learning_rate": 9.134945110610554e-05, "loss": 2.3624265670776365, "memory(GiB)": 72.85, "step": 22190, "token_acc": 0.4882943143812709, "train_speed(iter/s)": 0.670178 }, { "epoch": 0.9509018465361381, "grad_norm": 4.0548882484436035, "learning_rate": 9.134566713156679e-05, "loss": 2.3987483978271484, "memory(GiB)": 72.85, "step": 22195, "token_acc": 0.5119453924914675, "train_speed(iter/s)": 0.670188 }, { "epoch": 0.9511160618653871, "grad_norm": 3.918518304824829, "learning_rate": 9.134188240801251e-05, "loss": 2.155735969543457, "memory(GiB)": 72.85, "step": 22200, "token_acc": 0.503448275862069, "train_speed(iter/s)": 0.67017 }, { "epoch": 0.951330277194636, "grad_norm": 3.231337070465088, "learning_rate": 9.133809693551125e-05, "loss": 2.365540885925293, "memory(GiB)": 72.85, "step": 22205, "token_acc": 0.5075757575757576, "train_speed(iter/s)": 0.670196 }, { "epoch": 0.951544492523885, "grad_norm": 5.083272457122803, "learning_rate": 9.133431071413158e-05, "loss": 2.420216751098633, "memory(GiB)": 72.85, "step": 22210, "token_acc": 0.5129032258064516, "train_speed(iter/s)": 0.670222 }, { "epoch": 0.951758707853134, "grad_norm": 3.5866799354553223, "learning_rate": 9.13305237439421e-05, "loss": 2.3219837188720702, "memory(GiB)": 72.85, "step": 22215, "token_acc": 0.5527426160337553, "train_speed(iter/s)": 0.670258 }, { "epoch": 0.9519729231823829, "grad_norm": 3.0939621925354004, "learning_rate": 9.13267360250114e-05, "loss": 2.278137969970703, "memory(GiB)": 72.85, "step": 22220, "token_acc": 0.5017064846416383, "train_speed(iter/s)": 0.670224 }, { "epoch": 0.952187138511632, "grad_norm": 3.8709566593170166, "learning_rate": 9.132294755740814e-05, "loss": 2.784561347961426, "memory(GiB)": 72.85, "step": 22225, "token_acc": 0.4463087248322148, "train_speed(iter/s)": 0.670235 }, { "epoch": 0.9524013538408809, "grad_norm": 3.2458550930023193, "learning_rate": 9.131915834120088e-05, "loss": 2.433064079284668, "memory(GiB)": 72.85, "step": 22230, "token_acc": 0.4876325088339223, "train_speed(iter/s)": 0.670272 }, { "epoch": 0.9526155691701298, "grad_norm": 3.4524455070495605, "learning_rate": 9.131536837645833e-05, "loss": 2.5797128677368164, "memory(GiB)": 72.85, "step": 22235, "token_acc": 0.4577922077922078, "train_speed(iter/s)": 0.670281 }, { "epoch": 0.9528297844993788, "grad_norm": 3.7843515872955322, "learning_rate": 9.131157766324912e-05, "loss": 2.2130159378051757, "memory(GiB)": 72.85, "step": 22240, "token_acc": 0.49206349206349204, "train_speed(iter/s)": 0.670257 }, { "epoch": 0.9530439998286278, "grad_norm": 5.316688060760498, "learning_rate": 9.130778620164193e-05, "loss": 2.372295379638672, "memory(GiB)": 72.85, "step": 22245, "token_acc": 0.4483870967741935, "train_speed(iter/s)": 0.670265 }, { "epoch": 0.9532582151578767, "grad_norm": 5.220573425292969, "learning_rate": 9.130399399170544e-05, "loss": 2.3618146896362306, "memory(GiB)": 72.85, "step": 22250, "token_acc": 0.48598130841121495, "train_speed(iter/s)": 0.67029 }, { "epoch": 0.9534724304871257, "grad_norm": 4.261163711547852, "learning_rate": 9.130020103350836e-05, "loss": 2.5158992767333985, "memory(GiB)": 72.85, "step": 22255, "token_acc": 0.49640287769784175, "train_speed(iter/s)": 0.670253 }, { "epoch": 0.9536866458163746, "grad_norm": 3.5698294639587402, "learning_rate": 9.12964073271194e-05, "loss": 2.282703971862793, "memory(GiB)": 72.85, "step": 22260, "token_acc": 0.49477351916376305, "train_speed(iter/s)": 0.67023 }, { "epoch": 0.9539008611456236, "grad_norm": 4.832814693450928, "learning_rate": 9.129261287260726e-05, "loss": 2.35264892578125, "memory(GiB)": 72.85, "step": 22265, "token_acc": 0.4827586206896552, "train_speed(iter/s)": 0.670213 }, { "epoch": 0.9541150764748726, "grad_norm": 3.390570640563965, "learning_rate": 9.128881767004072e-05, "loss": 2.5387313842773436, "memory(GiB)": 72.85, "step": 22270, "token_acc": 0.5241379310344828, "train_speed(iter/s)": 0.670231 }, { "epoch": 0.9543292918041215, "grad_norm": 3.9219000339508057, "learning_rate": 9.12850217194885e-05, "loss": 2.3844425201416017, "memory(GiB)": 72.85, "step": 22275, "token_acc": 0.5408560311284046, "train_speed(iter/s)": 0.670237 }, { "epoch": 0.9545435071333704, "grad_norm": 3.984739065170288, "learning_rate": 9.12812250210194e-05, "loss": 2.3354610443115233, "memory(GiB)": 72.85, "step": 22280, "token_acc": 0.46886446886446886, "train_speed(iter/s)": 0.670244 }, { "epoch": 0.9547577224626195, "grad_norm": 3.0101029872894287, "learning_rate": 9.127742757470217e-05, "loss": 2.2779800415039064, "memory(GiB)": 72.85, "step": 22285, "token_acc": 0.5016835016835017, "train_speed(iter/s)": 0.670216 }, { "epoch": 0.9549719377918684, "grad_norm": 3.319117784500122, "learning_rate": 9.127362938060563e-05, "loss": 2.6604116439819334, "memory(GiB)": 72.85, "step": 22290, "token_acc": 0.4451219512195122, "train_speed(iter/s)": 0.670229 }, { "epoch": 0.9551861531211173, "grad_norm": 3.632667303085327, "learning_rate": 9.126983043879857e-05, "loss": 2.665925216674805, "memory(GiB)": 72.85, "step": 22295, "token_acc": 0.4554140127388535, "train_speed(iter/s)": 0.670226 }, { "epoch": 0.9554003684503664, "grad_norm": 3.2163374423980713, "learning_rate": 9.126603074934982e-05, "loss": 2.271941375732422, "memory(GiB)": 72.85, "step": 22300, "token_acc": 0.5298245614035088, "train_speed(iter/s)": 0.67024 }, { "epoch": 0.9556145837796153, "grad_norm": 2.805938959121704, "learning_rate": 9.126223031232822e-05, "loss": 2.3640180587768556, "memory(GiB)": 72.85, "step": 22305, "token_acc": 0.43197278911564624, "train_speed(iter/s)": 0.670259 }, { "epoch": 0.9558287991088642, "grad_norm": 4.328995704650879, "learning_rate": 9.125842912780259e-05, "loss": 2.1890769958496095, "memory(GiB)": 72.85, "step": 22310, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.670275 }, { "epoch": 0.9560430144381132, "grad_norm": 3.924445629119873, "learning_rate": 9.125462719584183e-05, "loss": 2.161892318725586, "memory(GiB)": 72.85, "step": 22315, "token_acc": 0.5604838709677419, "train_speed(iter/s)": 0.670274 }, { "epoch": 0.9562572297673622, "grad_norm": 4.234912395477295, "learning_rate": 9.125082451651479e-05, "loss": 2.583066940307617, "memory(GiB)": 72.85, "step": 22320, "token_acc": 0.4418604651162791, "train_speed(iter/s)": 0.670297 }, { "epoch": 0.9564714450966111, "grad_norm": 10.379645347595215, "learning_rate": 9.124702108989036e-05, "loss": 2.5701162338256838, "memory(GiB)": 72.85, "step": 22325, "token_acc": 0.4731182795698925, "train_speed(iter/s)": 0.670279 }, { "epoch": 0.9566856604258601, "grad_norm": 4.2297282218933105, "learning_rate": 9.124321691603747e-05, "loss": 2.4107446670532227, "memory(GiB)": 72.85, "step": 22330, "token_acc": 0.4857142857142857, "train_speed(iter/s)": 0.670258 }, { "epoch": 0.956899875755109, "grad_norm": 5.448421478271484, "learning_rate": 9.123941199502501e-05, "loss": 2.0864444732666017, "memory(GiB)": 72.85, "step": 22335, "token_acc": 0.5330882352941176, "train_speed(iter/s)": 0.670235 }, { "epoch": 0.957114091084358, "grad_norm": 4.229190349578857, "learning_rate": 9.12356063269219e-05, "loss": 2.524475860595703, "memory(GiB)": 72.85, "step": 22340, "token_acc": 0.4651898734177215, "train_speed(iter/s)": 0.670224 }, { "epoch": 0.957328306413607, "grad_norm": 3.454861640930176, "learning_rate": 9.123179991179711e-05, "loss": 2.6568008422851563, "memory(GiB)": 72.85, "step": 22345, "token_acc": 0.46511627906976744, "train_speed(iter/s)": 0.670245 }, { "epoch": 0.9575425217428559, "grad_norm": 4.261241912841797, "learning_rate": 9.122799274971959e-05, "loss": 2.361363410949707, "memory(GiB)": 72.85, "step": 22350, "token_acc": 0.4680232558139535, "train_speed(iter/s)": 0.670244 }, { "epoch": 0.9577567370721048, "grad_norm": 3.988821029663086, "learning_rate": 9.12241848407583e-05, "loss": 2.4587425231933593, "memory(GiB)": 72.85, "step": 22355, "token_acc": 0.44545454545454544, "train_speed(iter/s)": 0.670257 }, { "epoch": 0.9579709524013539, "grad_norm": 3.324772596359253, "learning_rate": 9.122037618498225e-05, "loss": 2.165094566345215, "memory(GiB)": 72.85, "step": 22360, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.67023 }, { "epoch": 0.9581851677306028, "grad_norm": 3.0840766429901123, "learning_rate": 9.12165667824604e-05, "loss": 2.5256656646728515, "memory(GiB)": 72.85, "step": 22365, "token_acc": 0.42758620689655175, "train_speed(iter/s)": 0.670241 }, { "epoch": 0.9583993830598517, "grad_norm": 3.362562894821167, "learning_rate": 9.121275663326178e-05, "loss": 2.3990440368652344, "memory(GiB)": 72.85, "step": 22370, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.670257 }, { "epoch": 0.9586135983891008, "grad_norm": 3.4356400966644287, "learning_rate": 9.120894573745542e-05, "loss": 2.2560047149658202, "memory(GiB)": 72.85, "step": 22375, "token_acc": 0.5, "train_speed(iter/s)": 0.670227 }, { "epoch": 0.9588278137183497, "grad_norm": 3.1070120334625244, "learning_rate": 9.120513409511033e-05, "loss": 2.712571716308594, "memory(GiB)": 72.85, "step": 22380, "token_acc": 0.4189189189189189, "train_speed(iter/s)": 0.670231 }, { "epoch": 0.9590420290475986, "grad_norm": 4.400808811187744, "learning_rate": 9.12013217062956e-05, "loss": 2.7714187622070314, "memory(GiB)": 72.85, "step": 22385, "token_acc": 0.4489795918367347, "train_speed(iter/s)": 0.670205 }, { "epoch": 0.9592562443768476, "grad_norm": 2.776123285293579, "learning_rate": 9.119750857108027e-05, "loss": 2.033346939086914, "memory(GiB)": 72.85, "step": 22390, "token_acc": 0.5551181102362205, "train_speed(iter/s)": 0.670223 }, { "epoch": 0.9594704597060966, "grad_norm": 3.7583916187286377, "learning_rate": 9.119369468953344e-05, "loss": 2.564706039428711, "memory(GiB)": 72.85, "step": 22395, "token_acc": 0.4684385382059801, "train_speed(iter/s)": 0.670197 }, { "epoch": 0.9596846750353455, "grad_norm": 3.331238031387329, "learning_rate": 9.118988006172418e-05, "loss": 2.5190427780151365, "memory(GiB)": 72.85, "step": 22400, "token_acc": 0.5064935064935064, "train_speed(iter/s)": 0.670216 }, { "epoch": 0.9598988903645945, "grad_norm": 4.375800132751465, "learning_rate": 9.11860646877216e-05, "loss": 2.563999366760254, "memory(GiB)": 72.85, "step": 22405, "token_acc": 0.46688741721854304, "train_speed(iter/s)": 0.670197 }, { "epoch": 0.9601131056938434, "grad_norm": 2.551848888397217, "learning_rate": 9.118224856759482e-05, "loss": 2.415664291381836, "memory(GiB)": 72.85, "step": 22410, "token_acc": 0.5137614678899083, "train_speed(iter/s)": 0.670204 }, { "epoch": 0.9603273210230924, "grad_norm": 3.969700813293457, "learning_rate": 9.117843170141297e-05, "loss": 2.41232852935791, "memory(GiB)": 72.85, "step": 22415, "token_acc": 0.5019011406844106, "train_speed(iter/s)": 0.67019 }, { "epoch": 0.9605415363523414, "grad_norm": 3.0748250484466553, "learning_rate": 9.117461408924521e-05, "loss": 2.4530094146728514, "memory(GiB)": 72.85, "step": 22420, "token_acc": 0.5059880239520959, "train_speed(iter/s)": 0.670192 }, { "epoch": 0.9607557516815903, "grad_norm": 4.55377721786499, "learning_rate": 9.11707957311607e-05, "loss": 2.4172927856445314, "memory(GiB)": 72.85, "step": 22425, "token_acc": 0.48013245033112584, "train_speed(iter/s)": 0.670169 }, { "epoch": 0.9609699670108393, "grad_norm": 3.5805747509002686, "learning_rate": 9.116697662722859e-05, "loss": 2.49448299407959, "memory(GiB)": 72.85, "step": 22430, "token_acc": 0.5060240963855421, "train_speed(iter/s)": 0.670181 }, { "epoch": 0.9611841823400883, "grad_norm": 4.395144939422607, "learning_rate": 9.116315677751807e-05, "loss": 2.4341299057006838, "memory(GiB)": 72.85, "step": 22435, "token_acc": 0.4728682170542636, "train_speed(iter/s)": 0.670223 }, { "epoch": 0.9613983976693372, "grad_norm": 3.723111391067505, "learning_rate": 9.115933618209838e-05, "loss": 2.427523612976074, "memory(GiB)": 72.85, "step": 22440, "token_acc": 0.4801223241590214, "train_speed(iter/s)": 0.670215 }, { "epoch": 0.9616126129985861, "grad_norm": 3.3024802207946777, "learning_rate": 9.115551484103869e-05, "loss": 2.6554710388183596, "memory(GiB)": 72.85, "step": 22445, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.670224 }, { "epoch": 0.9618268283278352, "grad_norm": 4.280091285705566, "learning_rate": 9.115169275440825e-05, "loss": 2.5084033966064454, "memory(GiB)": 72.85, "step": 22450, "token_acc": 0.48348348348348347, "train_speed(iter/s)": 0.670211 }, { "epoch": 0.9620410436570841, "grad_norm": 3.5184292793273926, "learning_rate": 9.114786992227629e-05, "loss": 2.419570541381836, "memory(GiB)": 72.85, "step": 22455, "token_acc": 0.47249190938511326, "train_speed(iter/s)": 0.670247 }, { "epoch": 0.962255258986333, "grad_norm": 4.78488302230835, "learning_rate": 9.114404634471205e-05, "loss": 2.4411880493164064, "memory(GiB)": 72.85, "step": 22460, "token_acc": 0.49337748344370863, "train_speed(iter/s)": 0.670265 }, { "epoch": 0.962469474315582, "grad_norm": 2.95344877243042, "learning_rate": 9.114022202178483e-05, "loss": 2.3524091720581053, "memory(GiB)": 72.85, "step": 22465, "token_acc": 0.476038338658147, "train_speed(iter/s)": 0.670292 }, { "epoch": 0.962683689644831, "grad_norm": 3.0188956260681152, "learning_rate": 9.113639695356388e-05, "loss": 2.354191780090332, "memory(GiB)": 72.85, "step": 22470, "token_acc": 0.48717948717948717, "train_speed(iter/s)": 0.670311 }, { "epoch": 0.9628979049740799, "grad_norm": 3.414060115814209, "learning_rate": 9.113257114011852e-05, "loss": 2.405953598022461, "memory(GiB)": 72.85, "step": 22475, "token_acc": 0.4935483870967742, "train_speed(iter/s)": 0.67033 }, { "epoch": 0.9631121203033289, "grad_norm": 4.405368804931641, "learning_rate": 9.112874458151805e-05, "loss": 2.529093933105469, "memory(GiB)": 72.85, "step": 22480, "token_acc": 0.45686900958466453, "train_speed(iter/s)": 0.670369 }, { "epoch": 0.9633263356325779, "grad_norm": 3.759761095046997, "learning_rate": 9.112491727783179e-05, "loss": 2.49066276550293, "memory(GiB)": 72.85, "step": 22485, "token_acc": 0.47035573122529645, "train_speed(iter/s)": 0.670379 }, { "epoch": 0.9635405509618268, "grad_norm": 5.602633476257324, "learning_rate": 9.112108922912907e-05, "loss": 2.480093765258789, "memory(GiB)": 72.85, "step": 22490, "token_acc": 0.46551724137931033, "train_speed(iter/s)": 0.670378 }, { "epoch": 0.9637547662910758, "grad_norm": 3.4461281299591064, "learning_rate": 9.111726043547926e-05, "loss": 2.5111858367919924, "memory(GiB)": 72.85, "step": 22495, "token_acc": 0.47878787878787876, "train_speed(iter/s)": 0.670389 }, { "epoch": 0.9639689816203247, "grad_norm": 3.1503427028656006, "learning_rate": 9.111343089695168e-05, "loss": 2.378135871887207, "memory(GiB)": 72.85, "step": 22500, "token_acc": 0.45426829268292684, "train_speed(iter/s)": 0.670351 }, { "epoch": 0.9639689816203247, "eval_loss": 2.0239920616149902, "eval_runtime": 17.3219, "eval_samples_per_second": 5.773, "eval_steps_per_second": 5.773, "eval_token_acc": 0.5036818851251841, "step": 22500 }, { "epoch": 0.9641831969495737, "grad_norm": 3.5143346786499023, "learning_rate": 9.110960061361575e-05, "loss": 2.411225128173828, "memory(GiB)": 72.85, "step": 22505, "token_acc": 0.49794238683127573, "train_speed(iter/s)": 0.669956 }, { "epoch": 0.9643974122788227, "grad_norm": 4.336343288421631, "learning_rate": 9.110576958554085e-05, "loss": 2.277334213256836, "memory(GiB)": 72.85, "step": 22510, "token_acc": 0.4948805460750853, "train_speed(iter/s)": 0.669982 }, { "epoch": 0.9646116276080716, "grad_norm": 3.431123733520508, "learning_rate": 9.110193781279635e-05, "loss": 2.5021366119384765, "memory(GiB)": 72.85, "step": 22515, "token_acc": 0.4695121951219512, "train_speed(iter/s)": 0.669922 }, { "epoch": 0.9648258429373205, "grad_norm": 3.032919406890869, "learning_rate": 9.109810529545171e-05, "loss": 2.4795644760131834, "memory(GiB)": 72.85, "step": 22520, "token_acc": 0.46788990825688076, "train_speed(iter/s)": 0.669943 }, { "epoch": 0.9650400582665696, "grad_norm": 4.288566589355469, "learning_rate": 9.109427203357632e-05, "loss": 2.558938217163086, "memory(GiB)": 72.85, "step": 22525, "token_acc": 0.46996466431095407, "train_speed(iter/s)": 0.669955 }, { "epoch": 0.9652542735958185, "grad_norm": 4.218137741088867, "learning_rate": 9.109043802723967e-05, "loss": 2.24482421875, "memory(GiB)": 72.85, "step": 22530, "token_acc": 0.505338078291815, "train_speed(iter/s)": 0.669962 }, { "epoch": 0.9654684889250674, "grad_norm": 4.103758811950684, "learning_rate": 9.108660327651116e-05, "loss": 2.64119873046875, "memory(GiB)": 72.85, "step": 22535, "token_acc": 0.5058823529411764, "train_speed(iter/s)": 0.669985 }, { "epoch": 0.9656827042543165, "grad_norm": 4.541025161743164, "learning_rate": 9.10827677814603e-05, "loss": 2.3040229797363283, "memory(GiB)": 72.85, "step": 22540, "token_acc": 0.5261437908496732, "train_speed(iter/s)": 0.669985 }, { "epoch": 0.9658969195835654, "grad_norm": 2.737985610961914, "learning_rate": 9.107893154215656e-05, "loss": 2.392146110534668, "memory(GiB)": 72.85, "step": 22545, "token_acc": 0.5064102564102564, "train_speed(iter/s)": 0.669984 }, { "epoch": 0.9661111349128143, "grad_norm": 3.187033176422119, "learning_rate": 9.107509455866945e-05, "loss": 2.444240760803223, "memory(GiB)": 72.85, "step": 22550, "token_acc": 0.4652777777777778, "train_speed(iter/s)": 0.669989 }, { "epoch": 0.9663253502420633, "grad_norm": 3.9944658279418945, "learning_rate": 9.107125683106848e-05, "loss": 2.451568603515625, "memory(GiB)": 72.85, "step": 22555, "token_acc": 0.43636363636363634, "train_speed(iter/s)": 0.670028 }, { "epoch": 0.9665395655713123, "grad_norm": 3.565894603729248, "learning_rate": 9.106741835942314e-05, "loss": 2.3765985488891603, "memory(GiB)": 72.85, "step": 22560, "token_acc": 0.4946236559139785, "train_speed(iter/s)": 0.670051 }, { "epoch": 0.9667537809005613, "grad_norm": 3.1581592559814453, "learning_rate": 9.106357914380299e-05, "loss": 2.1385065078735352, "memory(GiB)": 72.85, "step": 22565, "token_acc": 0.5271565495207667, "train_speed(iter/s)": 0.670045 }, { "epoch": 0.9669679962298102, "grad_norm": 4.421570777893066, "learning_rate": 9.105973918427759e-05, "loss": 2.3042041778564455, "memory(GiB)": 72.85, "step": 22570, "token_acc": 0.4738675958188153, "train_speed(iter/s)": 0.670037 }, { "epoch": 0.9671822115590591, "grad_norm": 4.071243762969971, "learning_rate": 9.105589848091651e-05, "loss": 2.123065376281738, "memory(GiB)": 72.85, "step": 22575, "token_acc": 0.5141700404858299, "train_speed(iter/s)": 0.670097 }, { "epoch": 0.9673964268883082, "grad_norm": 3.904764175415039, "learning_rate": 9.105205703378931e-05, "loss": 2.2234119415283202, "memory(GiB)": 72.85, "step": 22580, "token_acc": 0.524390243902439, "train_speed(iter/s)": 0.670113 }, { "epoch": 0.9676106422175571, "grad_norm": 4.274250507354736, "learning_rate": 9.104821484296559e-05, "loss": 2.297161865234375, "memory(GiB)": 72.85, "step": 22585, "token_acc": 0.5059288537549407, "train_speed(iter/s)": 0.670087 }, { "epoch": 0.967824857546806, "grad_norm": 3.943835496902466, "learning_rate": 9.104437190851493e-05, "loss": 2.281549263000488, "memory(GiB)": 72.85, "step": 22590, "token_acc": 0.5083612040133779, "train_speed(iter/s)": 0.670081 }, { "epoch": 0.9680390728760551, "grad_norm": 4.481269836425781, "learning_rate": 9.104052823050699e-05, "loss": 2.402079963684082, "memory(GiB)": 72.85, "step": 22595, "token_acc": 0.4889705882352941, "train_speed(iter/s)": 0.670104 }, { "epoch": 0.968253288205304, "grad_norm": 3.276689052581787, "learning_rate": 9.103668380901138e-05, "loss": 2.4438961029052733, "memory(GiB)": 72.85, "step": 22600, "token_acc": 0.5032467532467533, "train_speed(iter/s)": 0.670138 }, { "epoch": 0.9684675035345529, "grad_norm": 3.511566162109375, "learning_rate": 9.103283864409775e-05, "loss": 2.6972835540771483, "memory(GiB)": 72.85, "step": 22605, "token_acc": 0.44482758620689655, "train_speed(iter/s)": 0.670078 }, { "epoch": 0.9686817188638019, "grad_norm": 3.3430187702178955, "learning_rate": 9.102899273583575e-05, "loss": 2.4045671463012694, "memory(GiB)": 72.85, "step": 22610, "token_acc": 0.5, "train_speed(iter/s)": 0.670092 }, { "epoch": 0.9688959341930509, "grad_norm": 6.776773929595947, "learning_rate": 9.102514608429507e-05, "loss": 2.6103464126586915, "memory(GiB)": 72.85, "step": 22615, "token_acc": 0.42567567567567566, "train_speed(iter/s)": 0.670086 }, { "epoch": 0.9691101495222998, "grad_norm": 4.19447135925293, "learning_rate": 9.102129868954537e-05, "loss": 2.477442741394043, "memory(GiB)": 72.85, "step": 22620, "token_acc": 0.4684385382059801, "train_speed(iter/s)": 0.670039 }, { "epoch": 0.9693243648515488, "grad_norm": 5.73632287979126, "learning_rate": 9.101745055165635e-05, "loss": 2.2660675048828125, "memory(GiB)": 72.85, "step": 22625, "token_acc": 0.5502183406113537, "train_speed(iter/s)": 0.670059 }, { "epoch": 0.9695385801807977, "grad_norm": 3.8093881607055664, "learning_rate": 9.101360167069777e-05, "loss": 2.5443626403808595, "memory(GiB)": 72.85, "step": 22630, "token_acc": 0.45751633986928103, "train_speed(iter/s)": 0.670105 }, { "epoch": 0.9697527955100467, "grad_norm": 3.8592655658721924, "learning_rate": 9.100975204673929e-05, "loss": 2.361690711975098, "memory(GiB)": 72.85, "step": 22635, "token_acc": 0.4673913043478261, "train_speed(iter/s)": 0.670091 }, { "epoch": 0.9699670108392957, "grad_norm": 3.874363899230957, "learning_rate": 9.10059016798507e-05, "loss": 2.358974647521973, "memory(GiB)": 72.85, "step": 22640, "token_acc": 0.5215827338129496, "train_speed(iter/s)": 0.670083 }, { "epoch": 0.9701812261685446, "grad_norm": 3.3055193424224854, "learning_rate": 9.100205057010174e-05, "loss": 2.5077278137207033, "memory(GiB)": 72.85, "step": 22645, "token_acc": 0.49645390070921985, "train_speed(iter/s)": 0.670126 }, { "epoch": 0.9703954414977936, "grad_norm": 3.6832120418548584, "learning_rate": 9.099819871756215e-05, "loss": 2.637652587890625, "memory(GiB)": 72.85, "step": 22650, "token_acc": 0.48059701492537316, "train_speed(iter/s)": 0.670096 }, { "epoch": 0.9706096568270426, "grad_norm": 3.1303741931915283, "learning_rate": 9.099434612230175e-05, "loss": 2.511269950866699, "memory(GiB)": 72.85, "step": 22655, "token_acc": 0.4608433734939759, "train_speed(iter/s)": 0.670133 }, { "epoch": 0.9708238721562915, "grad_norm": 3.844036340713501, "learning_rate": 9.099049278439029e-05, "loss": 2.4965726852416994, "memory(GiB)": 72.85, "step": 22660, "token_acc": 0.5, "train_speed(iter/s)": 0.67013 }, { "epoch": 0.9710380874855404, "grad_norm": 3.673778772354126, "learning_rate": 9.098663870389763e-05, "loss": 2.801707077026367, "memory(GiB)": 72.85, "step": 22665, "token_acc": 0.4594594594594595, "train_speed(iter/s)": 0.670149 }, { "epoch": 0.9712523028147895, "grad_norm": 3.6847786903381348, "learning_rate": 9.098278388089354e-05, "loss": 2.5409706115722654, "memory(GiB)": 72.85, "step": 22670, "token_acc": 0.46598639455782315, "train_speed(iter/s)": 0.670148 }, { "epoch": 0.9714665181440384, "grad_norm": 3.5909745693206787, "learning_rate": 9.097892831544789e-05, "loss": 2.766721725463867, "memory(GiB)": 72.85, "step": 22675, "token_acc": 0.4483985765124555, "train_speed(iter/s)": 0.670143 }, { "epoch": 0.9716807334732873, "grad_norm": 4.115965366363525, "learning_rate": 9.097507200763052e-05, "loss": 2.310406303405762, "memory(GiB)": 72.85, "step": 22680, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.670143 }, { "epoch": 0.9718949488025364, "grad_norm": 3.7222957611083984, "learning_rate": 9.097121495751126e-05, "loss": 2.2669883728027345, "memory(GiB)": 72.85, "step": 22685, "token_acc": 0.5381944444444444, "train_speed(iter/s)": 0.670186 }, { "epoch": 0.9721091641317853, "grad_norm": 3.8642935752868652, "learning_rate": 9.096735716516001e-05, "loss": 2.3861467361450197, "memory(GiB)": 72.85, "step": 22690, "token_acc": 0.48846153846153845, "train_speed(iter/s)": 0.67016 }, { "epoch": 0.9723233794610342, "grad_norm": 3.439105987548828, "learning_rate": 9.096349863064666e-05, "loss": 2.4870601654052735, "memory(GiB)": 72.85, "step": 22695, "token_acc": 0.4962121212121212, "train_speed(iter/s)": 0.670177 }, { "epoch": 0.9725375947902832, "grad_norm": 4.341305255889893, "learning_rate": 9.09596393540411e-05, "loss": 2.4056663513183594, "memory(GiB)": 72.85, "step": 22700, "token_acc": 0.490272373540856, "train_speed(iter/s)": 0.670166 }, { "epoch": 0.9727518101195322, "grad_norm": 4.436673164367676, "learning_rate": 9.095577933541326e-05, "loss": 2.422800636291504, "memory(GiB)": 72.85, "step": 22705, "token_acc": 0.5231788079470199, "train_speed(iter/s)": 0.670152 }, { "epoch": 0.9729660254487811, "grad_norm": 3.127800703048706, "learning_rate": 9.095191857483305e-05, "loss": 2.2682397842407225, "memory(GiB)": 72.85, "step": 22710, "token_acc": 0.512987012987013, "train_speed(iter/s)": 0.670124 }, { "epoch": 0.9731802407780301, "grad_norm": 3.7702949047088623, "learning_rate": 9.094805707237041e-05, "loss": 2.4046545028686523, "memory(GiB)": 72.85, "step": 22715, "token_acc": 0.48736462093862815, "train_speed(iter/s)": 0.670144 }, { "epoch": 0.973394456107279, "grad_norm": 2.6942360401153564, "learning_rate": 9.094419482809534e-05, "loss": 2.034708595275879, "memory(GiB)": 72.85, "step": 22720, "token_acc": 0.5207547169811321, "train_speed(iter/s)": 0.670112 }, { "epoch": 0.973608671436528, "grad_norm": 3.4647104740142822, "learning_rate": 9.094033184207774e-05, "loss": 2.343598175048828, "memory(GiB)": 72.85, "step": 22725, "token_acc": 0.4888888888888889, "train_speed(iter/s)": 0.670089 }, { "epoch": 0.973822886765777, "grad_norm": 7.458486557006836, "learning_rate": 9.093646811438762e-05, "loss": 2.397966766357422, "memory(GiB)": 72.85, "step": 22730, "token_acc": 0.4708029197080292, "train_speed(iter/s)": 0.670091 }, { "epoch": 0.9740371020950259, "grad_norm": 3.1650264263153076, "learning_rate": 9.0932603645095e-05, "loss": 2.4265697479248045, "memory(GiB)": 72.85, "step": 22735, "token_acc": 0.4831081081081081, "train_speed(iter/s)": 0.670077 }, { "epoch": 0.9742513174242748, "grad_norm": 5.107834815979004, "learning_rate": 9.092873843426986e-05, "loss": 2.4837724685668947, "memory(GiB)": 72.85, "step": 22740, "token_acc": 0.490625, "train_speed(iter/s)": 0.670106 }, { "epoch": 0.9744655327535239, "grad_norm": 4.220930099487305, "learning_rate": 9.092487248198222e-05, "loss": 2.598301315307617, "memory(GiB)": 72.85, "step": 22745, "token_acc": 0.43529411764705883, "train_speed(iter/s)": 0.670081 }, { "epoch": 0.9746797480827728, "grad_norm": 4.187096118927002, "learning_rate": 9.092100578830214e-05, "loss": 2.2343589782714846, "memory(GiB)": 72.85, "step": 22750, "token_acc": 0.5140845070422535, "train_speed(iter/s)": 0.670027 }, { "epoch": 0.9748939634120217, "grad_norm": 4.049689769744873, "learning_rate": 9.091713835329964e-05, "loss": 2.041005325317383, "memory(GiB)": 72.85, "step": 22755, "token_acc": 0.5363984674329502, "train_speed(iter/s)": 0.670018 }, { "epoch": 0.9751081787412708, "grad_norm": 4.257724761962891, "learning_rate": 9.091327017704479e-05, "loss": 2.1912918090820312, "memory(GiB)": 72.85, "step": 22760, "token_acc": 0.48161764705882354, "train_speed(iter/s)": 0.670029 }, { "epoch": 0.9753223940705197, "grad_norm": 4.168087482452393, "learning_rate": 9.090940125960769e-05, "loss": 2.5339258193969725, "memory(GiB)": 72.85, "step": 22765, "token_acc": 0.48028673835125446, "train_speed(iter/s)": 0.670016 }, { "epoch": 0.9755366093997686, "grad_norm": 3.8549673557281494, "learning_rate": 9.090553160105839e-05, "loss": 2.3984766006469727, "memory(GiB)": 72.85, "step": 22770, "token_acc": 0.4639175257731959, "train_speed(iter/s)": 0.670021 }, { "epoch": 0.9757508247290176, "grad_norm": 4.553198337554932, "learning_rate": 9.090166120146702e-05, "loss": 2.594826889038086, "memory(GiB)": 72.85, "step": 22775, "token_acc": 0.4777777777777778, "train_speed(iter/s)": 0.670031 }, { "epoch": 0.9759650400582666, "grad_norm": 3.3257555961608887, "learning_rate": 9.08977900609037e-05, "loss": 2.414104461669922, "memory(GiB)": 72.85, "step": 22780, "token_acc": 0.4642857142857143, "train_speed(iter/s)": 0.670044 }, { "epoch": 0.9761792553875155, "grad_norm": 3.6928250789642334, "learning_rate": 9.089391817943853e-05, "loss": 2.3650083541870117, "memory(GiB)": 72.85, "step": 22785, "token_acc": 0.49469964664310956, "train_speed(iter/s)": 0.670074 }, { "epoch": 0.9763934707167645, "grad_norm": 3.3333864212036133, "learning_rate": 9.089004555714168e-05, "loss": 2.3068058013916017, "memory(GiB)": 72.85, "step": 22790, "token_acc": 0.4542372881355932, "train_speed(iter/s)": 0.670095 }, { "epoch": 0.9766076860460134, "grad_norm": 2.7551119327545166, "learning_rate": 9.08861721940833e-05, "loss": 2.330870246887207, "memory(GiB)": 72.85, "step": 22795, "token_acc": 0.4847560975609756, "train_speed(iter/s)": 0.670096 }, { "epoch": 0.9768219013752624, "grad_norm": 4.246376991271973, "learning_rate": 9.088229809033355e-05, "loss": 2.6948997497558596, "memory(GiB)": 72.85, "step": 22800, "token_acc": 0.4114285714285714, "train_speed(iter/s)": 0.67012 }, { "epoch": 0.9770361167045114, "grad_norm": 3.171186923980713, "learning_rate": 9.087842324596262e-05, "loss": 2.5669885635375977, "memory(GiB)": 72.85, "step": 22805, "token_acc": 0.4882154882154882, "train_speed(iter/s)": 0.670122 }, { "epoch": 0.9772503320337603, "grad_norm": 3.4633278846740723, "learning_rate": 9.087454766104071e-05, "loss": 2.228283500671387, "memory(GiB)": 72.85, "step": 22810, "token_acc": 0.5054545454545455, "train_speed(iter/s)": 0.670122 }, { "epoch": 0.9774645473630093, "grad_norm": 3.603775978088379, "learning_rate": 9.087067133563803e-05, "loss": 2.464664840698242, "memory(GiB)": 72.85, "step": 22815, "token_acc": 0.49280575539568344, "train_speed(iter/s)": 0.670133 }, { "epoch": 0.9776787626922583, "grad_norm": 3.053187370300293, "learning_rate": 9.086679426982479e-05, "loss": 2.4006092071533205, "memory(GiB)": 72.85, "step": 22820, "token_acc": 0.5681818181818182, "train_speed(iter/s)": 0.670098 }, { "epoch": 0.9778929780215072, "grad_norm": 3.302705764770508, "learning_rate": 9.086291646367123e-05, "loss": 2.3281200408935545, "memory(GiB)": 72.85, "step": 22825, "token_acc": 0.4855072463768116, "train_speed(iter/s)": 0.670125 }, { "epoch": 0.9781071933507561, "grad_norm": 5.200552463531494, "learning_rate": 9.085903791724761e-05, "loss": 2.420596694946289, "memory(GiB)": 72.85, "step": 22830, "token_acc": 0.49063670411985016, "train_speed(iter/s)": 0.67013 }, { "epoch": 0.9783214086800052, "grad_norm": 3.7121334075927734, "learning_rate": 9.085515863062419e-05, "loss": 2.5876930236816404, "memory(GiB)": 72.85, "step": 22835, "token_acc": 0.4353312302839117, "train_speed(iter/s)": 0.670109 }, { "epoch": 0.9785356240092541, "grad_norm": 2.8736572265625, "learning_rate": 9.085127860387126e-05, "loss": 2.4183961868286135, "memory(GiB)": 72.85, "step": 22840, "token_acc": 0.4931506849315068, "train_speed(iter/s)": 0.67013 }, { "epoch": 0.978749839338503, "grad_norm": 4.574516296386719, "learning_rate": 9.084739783705909e-05, "loss": 2.3842342376708983, "memory(GiB)": 72.85, "step": 22845, "token_acc": 0.4674329501915709, "train_speed(iter/s)": 0.670128 }, { "epoch": 0.978964054667752, "grad_norm": 3.8152318000793457, "learning_rate": 9.084351633025798e-05, "loss": 2.3983396530151366, "memory(GiB)": 72.85, "step": 22850, "token_acc": 0.4934640522875817, "train_speed(iter/s)": 0.670148 }, { "epoch": 0.979178269997001, "grad_norm": 4.531711578369141, "learning_rate": 9.083963408353825e-05, "loss": 2.5679317474365235, "memory(GiB)": 72.85, "step": 22855, "token_acc": 0.484375, "train_speed(iter/s)": 0.670162 }, { "epoch": 0.9793924853262499, "grad_norm": 3.6201629638671875, "learning_rate": 9.083575109697027e-05, "loss": 2.3313320159912108, "memory(GiB)": 72.85, "step": 22860, "token_acc": 0.48562300319488816, "train_speed(iter/s)": 0.670181 }, { "epoch": 0.9796067006554989, "grad_norm": 4.0611772537231445, "learning_rate": 9.083186737062432e-05, "loss": 2.57796688079834, "memory(GiB)": 72.85, "step": 22865, "token_acc": 0.47896440129449835, "train_speed(iter/s)": 0.670201 }, { "epoch": 0.9798209159847479, "grad_norm": 4.318239212036133, "learning_rate": 9.082798290457081e-05, "loss": 2.580569839477539, "memory(GiB)": 72.85, "step": 22870, "token_acc": 0.5, "train_speed(iter/s)": 0.670203 }, { "epoch": 0.9800351313139968, "grad_norm": 4.576657295227051, "learning_rate": 9.082409769888008e-05, "loss": 2.5502864837646486, "memory(GiB)": 72.85, "step": 22875, "token_acc": 0.4889705882352941, "train_speed(iter/s)": 0.670235 }, { "epoch": 0.9802493466432458, "grad_norm": 4.722431659698486, "learning_rate": 9.082021175362252e-05, "loss": 2.8270530700683594, "memory(GiB)": 72.85, "step": 22880, "token_acc": 0.46984126984126984, "train_speed(iter/s)": 0.670223 }, { "epoch": 0.9804635619724947, "grad_norm": 5.002728462219238, "learning_rate": 9.081632506886854e-05, "loss": 2.2266427993774416, "memory(GiB)": 72.85, "step": 22885, "token_acc": 0.5168918918918919, "train_speed(iter/s)": 0.670208 }, { "epoch": 0.9806777773017437, "grad_norm": 5.829839706420898, "learning_rate": 9.081243764468854e-05, "loss": 2.4487024307250977, "memory(GiB)": 72.85, "step": 22890, "token_acc": 0.4809688581314879, "train_speed(iter/s)": 0.670231 }, { "epoch": 0.9808919926309927, "grad_norm": 4.153658866882324, "learning_rate": 9.080854948115295e-05, "loss": 2.5346628189086915, "memory(GiB)": 72.85, "step": 22895, "token_acc": 0.48639455782312924, "train_speed(iter/s)": 0.670231 }, { "epoch": 0.9811062079602416, "grad_norm": 3.7200541496276855, "learning_rate": 9.080466057833221e-05, "loss": 2.4627891540527345, "memory(GiB)": 72.85, "step": 22900, "token_acc": 0.4897959183673469, "train_speed(iter/s)": 0.670252 }, { "epoch": 0.9813204232894907, "grad_norm": 3.782588481903076, "learning_rate": 9.080077093629675e-05, "loss": 2.4454532623291017, "memory(GiB)": 72.85, "step": 22905, "token_acc": 0.5016835016835017, "train_speed(iter/s)": 0.670274 }, { "epoch": 0.9815346386187396, "grad_norm": 4.509589195251465, "learning_rate": 9.079688055511707e-05, "loss": 2.514362335205078, "memory(GiB)": 72.85, "step": 22910, "token_acc": 0.484472049689441, "train_speed(iter/s)": 0.670295 }, { "epoch": 0.9817488539479885, "grad_norm": 3.760549545288086, "learning_rate": 9.079298943486361e-05, "loss": 2.377471160888672, "memory(GiB)": 72.85, "step": 22915, "token_acc": 0.49473684210526314, "train_speed(iter/s)": 0.670333 }, { "epoch": 0.9819630692772375, "grad_norm": 5.1023406982421875, "learning_rate": 9.078909757560687e-05, "loss": 2.4864978790283203, "memory(GiB)": 72.85, "step": 22920, "token_acc": 0.4807017543859649, "train_speed(iter/s)": 0.670333 }, { "epoch": 0.9821772846064865, "grad_norm": 3.2972240447998047, "learning_rate": 9.07852049774174e-05, "loss": 2.2615699768066406, "memory(GiB)": 72.85, "step": 22925, "token_acc": 0.49640287769784175, "train_speed(iter/s)": 0.670368 }, { "epoch": 0.9823914999357354, "grad_norm": 4.629004955291748, "learning_rate": 9.078131164036565e-05, "loss": 2.504877281188965, "memory(GiB)": 72.85, "step": 22930, "token_acc": 0.46598639455782315, "train_speed(iter/s)": 0.670372 }, { "epoch": 0.9826057152649844, "grad_norm": 3.757141351699829, "learning_rate": 9.07774175645222e-05, "loss": 2.3496822357177733, "memory(GiB)": 72.85, "step": 22935, "token_acc": 0.4931506849315068, "train_speed(iter/s)": 0.67037 }, { "epoch": 0.9828199305942333, "grad_norm": 3.1410341262817383, "learning_rate": 9.077352274995757e-05, "loss": 2.2707170486450194, "memory(GiB)": 72.85, "step": 22940, "token_acc": 0.4909090909090909, "train_speed(iter/s)": 0.670358 }, { "epoch": 0.9830341459234823, "grad_norm": 2.7746388912200928, "learning_rate": 9.076962719674233e-05, "loss": 2.478908920288086, "memory(GiB)": 72.85, "step": 22945, "token_acc": 0.4882943143812709, "train_speed(iter/s)": 0.670378 }, { "epoch": 0.9832483612527313, "grad_norm": 5.018842697143555, "learning_rate": 9.076573090494704e-05, "loss": 2.4367563247680666, "memory(GiB)": 72.85, "step": 22950, "token_acc": 0.49344978165938863, "train_speed(iter/s)": 0.670396 }, { "epoch": 0.9834625765819802, "grad_norm": 3.9854071140289307, "learning_rate": 9.076183387464232e-05, "loss": 2.4996076583862306, "memory(GiB)": 72.85, "step": 22955, "token_acc": 0.5016393442622951, "train_speed(iter/s)": 0.670416 }, { "epoch": 0.9836767919112291, "grad_norm": 5.03887414932251, "learning_rate": 9.075793610589871e-05, "loss": 2.736362838745117, "memory(GiB)": 72.85, "step": 22960, "token_acc": 0.4342105263157895, "train_speed(iter/s)": 0.670413 }, { "epoch": 0.9838910072404782, "grad_norm": 2.6615688800811768, "learning_rate": 9.075403759878687e-05, "loss": 2.3037494659423827, "memory(GiB)": 72.85, "step": 22965, "token_acc": 0.5116279069767442, "train_speed(iter/s)": 0.670426 }, { "epoch": 0.9841052225697271, "grad_norm": 3.5964932441711426, "learning_rate": 9.075013835337742e-05, "loss": 2.4583717346191407, "memory(GiB)": 72.85, "step": 22970, "token_acc": 0.43670886075949367, "train_speed(iter/s)": 0.670452 }, { "epoch": 0.984319437898976, "grad_norm": 4.1253342628479, "learning_rate": 9.074623836974097e-05, "loss": 2.5419349670410156, "memory(GiB)": 72.85, "step": 22975, "token_acc": 0.46794871794871795, "train_speed(iter/s)": 0.670478 }, { "epoch": 0.9845336532282251, "grad_norm": 3.343578815460205, "learning_rate": 9.074233764794818e-05, "loss": 2.479625701904297, "memory(GiB)": 72.85, "step": 22980, "token_acc": 0.48464163822525597, "train_speed(iter/s)": 0.670471 }, { "epoch": 0.984747868557474, "grad_norm": 3.821505546569824, "learning_rate": 9.073843618806974e-05, "loss": 2.2998882293701173, "memory(GiB)": 72.85, "step": 22985, "token_acc": 0.5065359477124183, "train_speed(iter/s)": 0.670456 }, { "epoch": 0.9849620838867229, "grad_norm": 3.772998571395874, "learning_rate": 9.073453399017631e-05, "loss": 2.6155324935913087, "memory(GiB)": 72.85, "step": 22990, "token_acc": 0.47540983606557374, "train_speed(iter/s)": 0.670447 }, { "epoch": 0.9851762992159719, "grad_norm": 3.5538158416748047, "learning_rate": 9.073063105433859e-05, "loss": 2.2530490875244142, "memory(GiB)": 72.85, "step": 22995, "token_acc": 0.5018181818181818, "train_speed(iter/s)": 0.670443 }, { "epoch": 0.9853905145452209, "grad_norm": 3.2823429107666016, "learning_rate": 9.072672738062726e-05, "loss": 2.617134666442871, "memory(GiB)": 72.85, "step": 23000, "token_acc": 0.4684385382059801, "train_speed(iter/s)": 0.670453 }, { "epoch": 0.9853905145452209, "eval_loss": 1.9776326417922974, "eval_runtime": 17.4831, "eval_samples_per_second": 5.72, "eval_steps_per_second": 5.72, "eval_token_acc": 0.5164179104477612, "step": 23000 }, { "epoch": 0.9856047298744698, "grad_norm": 3.9273557662963867, "learning_rate": 9.072282296911308e-05, "loss": 2.6335487365722656, "memory(GiB)": 72.85, "step": 23005, "token_acc": 0.49143468950749464, "train_speed(iter/s)": 0.670059 }, { "epoch": 0.9858189452037188, "grad_norm": 4.042629718780518, "learning_rate": 9.071891781986675e-05, "loss": 2.2042705535888674, "memory(GiB)": 72.85, "step": 23010, "token_acc": 0.5272206303724928, "train_speed(iter/s)": 0.670068 }, { "epoch": 0.9860331605329677, "grad_norm": 3.5154905319213867, "learning_rate": 9.071501193295903e-05, "loss": 2.5058218002319337, "memory(GiB)": 72.85, "step": 23015, "token_acc": 0.45396825396825397, "train_speed(iter/s)": 0.670102 }, { "epoch": 0.9862473758622167, "grad_norm": 3.7338173389434814, "learning_rate": 9.071110530846067e-05, "loss": 2.3343526840209963, "memory(GiB)": 72.85, "step": 23020, "token_acc": 0.4945054945054945, "train_speed(iter/s)": 0.670076 }, { "epoch": 0.9864615911914657, "grad_norm": 3.4439940452575684, "learning_rate": 9.070719794644245e-05, "loss": 2.5598644256591796, "memory(GiB)": 72.85, "step": 23025, "token_acc": 0.5068493150684932, "train_speed(iter/s)": 0.670097 }, { "epoch": 0.9866758065207146, "grad_norm": 3.3405210971832275, "learning_rate": 9.070328984697516e-05, "loss": 2.3271457672119142, "memory(GiB)": 72.85, "step": 23030, "token_acc": 0.5018867924528302, "train_speed(iter/s)": 0.670101 }, { "epoch": 0.9868900218499636, "grad_norm": 3.9145195484161377, "learning_rate": 9.069938101012958e-05, "loss": 2.537692642211914, "memory(GiB)": 72.85, "step": 23035, "token_acc": 0.43278688524590164, "train_speed(iter/s)": 0.670097 }, { "epoch": 0.9871042371792126, "grad_norm": 3.780580759048462, "learning_rate": 9.069547143597655e-05, "loss": 2.4938865661621095, "memory(GiB)": 72.85, "step": 23040, "token_acc": 0.4620938628158845, "train_speed(iter/s)": 0.670082 }, { "epoch": 0.9873184525084615, "grad_norm": 3.7019295692443848, "learning_rate": 9.069156112458685e-05, "loss": 2.3604251861572267, "memory(GiB)": 72.85, "step": 23045, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.670079 }, { "epoch": 0.9875326678377104, "grad_norm": 3.799929141998291, "learning_rate": 9.068765007603137e-05, "loss": 2.298074150085449, "memory(GiB)": 72.85, "step": 23050, "token_acc": 0.5321100917431193, "train_speed(iter/s)": 0.670066 }, { "epoch": 0.9877468831669595, "grad_norm": 3.6756410598754883, "learning_rate": 9.068373829038095e-05, "loss": 2.4760818481445312, "memory(GiB)": 72.85, "step": 23055, "token_acc": 0.4928571428571429, "train_speed(iter/s)": 0.670054 }, { "epoch": 0.9879610984962084, "grad_norm": 3.8298375606536865, "learning_rate": 9.067982576770644e-05, "loss": 2.607314109802246, "memory(GiB)": 72.85, "step": 23060, "token_acc": 0.46464646464646464, "train_speed(iter/s)": 0.67008 }, { "epoch": 0.9881753138254573, "grad_norm": 4.728804588317871, "learning_rate": 9.067591250807872e-05, "loss": 2.599417495727539, "memory(GiB)": 72.85, "step": 23065, "token_acc": 0.44919786096256686, "train_speed(iter/s)": 0.670092 }, { "epoch": 0.9883895291547063, "grad_norm": 4.372117519378662, "learning_rate": 9.067199851156869e-05, "loss": 2.9440900802612306, "memory(GiB)": 72.85, "step": 23070, "token_acc": 0.43097643097643096, "train_speed(iter/s)": 0.670086 }, { "epoch": 0.9886037444839553, "grad_norm": 4.4477691650390625, "learning_rate": 9.066808377824725e-05, "loss": 2.2871047973632814, "memory(GiB)": 72.85, "step": 23075, "token_acc": 0.5503597122302158, "train_speed(iter/s)": 0.670076 }, { "epoch": 0.9888179598132042, "grad_norm": 3.5345780849456787, "learning_rate": 9.066416830818531e-05, "loss": 1.7819013595581055, "memory(GiB)": 72.85, "step": 23080, "token_acc": 0.5952380952380952, "train_speed(iter/s)": 0.6701 }, { "epoch": 0.9890321751424532, "grad_norm": 2.9890081882476807, "learning_rate": 9.066025210145384e-05, "loss": 2.2606887817382812, "memory(GiB)": 72.85, "step": 23085, "token_acc": 0.463768115942029, "train_speed(iter/s)": 0.670139 }, { "epoch": 0.9892463904717022, "grad_norm": 3.030910015106201, "learning_rate": 9.065633515812376e-05, "loss": 2.315658950805664, "memory(GiB)": 72.85, "step": 23090, "token_acc": 0.47435897435897434, "train_speed(iter/s)": 0.670149 }, { "epoch": 0.9894606058009511, "grad_norm": 3.9188382625579834, "learning_rate": 9.0652417478266e-05, "loss": 2.325149917602539, "memory(GiB)": 72.85, "step": 23095, "token_acc": 0.48046875, "train_speed(iter/s)": 0.670126 }, { "epoch": 0.9896748211302001, "grad_norm": 3.5082571506500244, "learning_rate": 9.064849906195159e-05, "loss": 2.362357521057129, "memory(GiB)": 72.85, "step": 23100, "token_acc": 0.5016611295681063, "train_speed(iter/s)": 0.670102 }, { "epoch": 0.989889036459449, "grad_norm": 4.562411308288574, "learning_rate": 9.064457990925149e-05, "loss": 2.208911895751953, "memory(GiB)": 72.85, "step": 23105, "token_acc": 0.5365079365079365, "train_speed(iter/s)": 0.670111 }, { "epoch": 0.990103251788698, "grad_norm": 4.121547222137451, "learning_rate": 9.064066002023668e-05, "loss": 2.2240066528320312, "memory(GiB)": 72.85, "step": 23110, "token_acc": 0.5104602510460251, "train_speed(iter/s)": 0.670127 }, { "epoch": 0.990317467117947, "grad_norm": 3.118565082550049, "learning_rate": 9.06367393949782e-05, "loss": 2.5131557464599608, "memory(GiB)": 72.85, "step": 23115, "token_acc": 0.4874551971326165, "train_speed(iter/s)": 0.670126 }, { "epoch": 0.9905316824471959, "grad_norm": 3.1797540187835693, "learning_rate": 9.063281803354707e-05, "loss": 2.5548582077026367, "memory(GiB)": 72.85, "step": 23120, "token_acc": 0.4581005586592179, "train_speed(iter/s)": 0.670133 }, { "epoch": 0.9907458977764448, "grad_norm": 4.2426371574401855, "learning_rate": 9.062889593601432e-05, "loss": 2.726740074157715, "memory(GiB)": 72.85, "step": 23125, "token_acc": 0.45018450184501846, "train_speed(iter/s)": 0.670101 }, { "epoch": 0.9909601131056939, "grad_norm": 3.0944676399230957, "learning_rate": 9.0624973102451e-05, "loss": 2.2846240997314453, "memory(GiB)": 72.85, "step": 23130, "token_acc": 0.5073529411764706, "train_speed(iter/s)": 0.670088 }, { "epoch": 0.9911743284349428, "grad_norm": 3.594496488571167, "learning_rate": 9.062104953292819e-05, "loss": 2.561855697631836, "memory(GiB)": 72.85, "step": 23135, "token_acc": 0.4537037037037037, "train_speed(iter/s)": 0.670106 }, { "epoch": 0.9913885437641917, "grad_norm": 4.167648792266846, "learning_rate": 9.061712522751696e-05, "loss": 2.480657386779785, "memory(GiB)": 72.85, "step": 23140, "token_acc": 0.4631578947368421, "train_speed(iter/s)": 0.670119 }, { "epoch": 0.9916027590934408, "grad_norm": 3.925546884536743, "learning_rate": 9.06132001862884e-05, "loss": 2.305967330932617, "memory(GiB)": 72.85, "step": 23145, "token_acc": 0.5201612903225806, "train_speed(iter/s)": 0.670059 }, { "epoch": 0.9918169744226897, "grad_norm": 3.963623523712158, "learning_rate": 9.060927440931362e-05, "loss": 2.532781410217285, "memory(GiB)": 72.85, "step": 23150, "token_acc": 0.4969512195121951, "train_speed(iter/s)": 0.670082 }, { "epoch": 0.9920311897519386, "grad_norm": 4.504964351654053, "learning_rate": 9.060534789666374e-05, "loss": 2.6244335174560547, "memory(GiB)": 72.85, "step": 23155, "token_acc": 0.4792332268370607, "train_speed(iter/s)": 0.670106 }, { "epoch": 0.9922454050811876, "grad_norm": 4.829727649688721, "learning_rate": 9.06014206484099e-05, "loss": 2.339894676208496, "memory(GiB)": 72.85, "step": 23160, "token_acc": 0.5080645161290323, "train_speed(iter/s)": 0.670089 }, { "epoch": 0.9924596204104366, "grad_norm": 3.542539358139038, "learning_rate": 9.059749266462324e-05, "loss": 2.2351634979248045, "memory(GiB)": 72.85, "step": 23165, "token_acc": 0.515527950310559, "train_speed(iter/s)": 0.670088 }, { "epoch": 0.9926738357396855, "grad_norm": 3.2613754272460938, "learning_rate": 9.05935639453749e-05, "loss": 2.53098201751709, "memory(GiB)": 72.85, "step": 23170, "token_acc": 0.46688741721854304, "train_speed(iter/s)": 0.670109 }, { "epoch": 0.9928880510689345, "grad_norm": 3.682408332824707, "learning_rate": 9.058963449073607e-05, "loss": 2.7114530563354493, "memory(GiB)": 72.85, "step": 23175, "token_acc": 0.43354430379746833, "train_speed(iter/s)": 0.670135 }, { "epoch": 0.9931022663981834, "grad_norm": 4.96393346786499, "learning_rate": 9.058570430077795e-05, "loss": 2.199036979675293, "memory(GiB)": 72.85, "step": 23180, "token_acc": 0.4959016393442623, "train_speed(iter/s)": 0.670139 }, { "epoch": 0.9933164817274324, "grad_norm": 4.079585552215576, "learning_rate": 9.058177337557172e-05, "loss": 2.44228572845459, "memory(GiB)": 72.85, "step": 23185, "token_acc": 0.5134228187919463, "train_speed(iter/s)": 0.670143 }, { "epoch": 0.9935306970566814, "grad_norm": 3.8215537071228027, "learning_rate": 9.057784171518861e-05, "loss": 2.301752281188965, "memory(GiB)": 72.85, "step": 23190, "token_acc": 0.49393939393939396, "train_speed(iter/s)": 0.670168 }, { "epoch": 0.9937449123859303, "grad_norm": 3.1103997230529785, "learning_rate": 9.057390931969981e-05, "loss": 2.381563186645508, "memory(GiB)": 72.85, "step": 23195, "token_acc": 0.5122950819672131, "train_speed(iter/s)": 0.670178 }, { "epoch": 0.9939591277151792, "grad_norm": 3.1564550399780273, "learning_rate": 9.056997618917659e-05, "loss": 2.137060356140137, "memory(GiB)": 72.85, "step": 23200, "token_acc": 0.5239616613418531, "train_speed(iter/s)": 0.670161 }, { "epoch": 0.9941733430444283, "grad_norm": 4.543132781982422, "learning_rate": 9.056604232369019e-05, "loss": 2.541748046875, "memory(GiB)": 72.85, "step": 23205, "token_acc": 0.5087108013937283, "train_speed(iter/s)": 0.670205 }, { "epoch": 0.9943875583736772, "grad_norm": 3.4016733169555664, "learning_rate": 9.056210772331188e-05, "loss": 2.326010513305664, "memory(GiB)": 72.85, "step": 23210, "token_acc": 0.5098684210526315, "train_speed(iter/s)": 0.670197 }, { "epoch": 0.9946017737029261, "grad_norm": 3.5309548377990723, "learning_rate": 9.055817238811295e-05, "loss": 2.439906883239746, "memory(GiB)": 72.85, "step": 23215, "token_acc": 0.5020746887966805, "train_speed(iter/s)": 0.670211 }, { "epoch": 0.9948159890321752, "grad_norm": 3.4416322708129883, "learning_rate": 9.055423631816466e-05, "loss": 2.6227039337158202, "memory(GiB)": 72.85, "step": 23220, "token_acc": 0.4668769716088328, "train_speed(iter/s)": 0.670215 }, { "epoch": 0.9950302043614241, "grad_norm": 3.516765594482422, "learning_rate": 9.055029951353835e-05, "loss": 2.6839229583740236, "memory(GiB)": 72.85, "step": 23225, "token_acc": 0.4486301369863014, "train_speed(iter/s)": 0.670196 }, { "epoch": 0.995244419690673, "grad_norm": 3.3401577472686768, "learning_rate": 9.054636197430533e-05, "loss": 2.4139019012451173, "memory(GiB)": 72.85, "step": 23230, "token_acc": 0.5, "train_speed(iter/s)": 0.670195 }, { "epoch": 0.995458635019922, "grad_norm": 3.8647115230560303, "learning_rate": 9.054242370053691e-05, "loss": 2.4760404586791993, "memory(GiB)": 72.85, "step": 23235, "token_acc": 0.4884488448844885, "train_speed(iter/s)": 0.670205 }, { "epoch": 0.995672850349171, "grad_norm": 3.1189491748809814, "learning_rate": 9.053848469230446e-05, "loss": 2.626285171508789, "memory(GiB)": 72.85, "step": 23240, "token_acc": 0.4487534626038781, "train_speed(iter/s)": 0.670196 }, { "epoch": 0.99588706567842, "grad_norm": 4.014514446258545, "learning_rate": 9.053454494967935e-05, "loss": 2.6304676055908205, "memory(GiB)": 72.85, "step": 23245, "token_acc": 0.4623955431754875, "train_speed(iter/s)": 0.670209 }, { "epoch": 0.9961012810076689, "grad_norm": 3.696443796157837, "learning_rate": 9.053060447273291e-05, "loss": 2.4965885162353514, "memory(GiB)": 72.85, "step": 23250, "token_acc": 0.5069124423963134, "train_speed(iter/s)": 0.6702 }, { "epoch": 0.9963154963369178, "grad_norm": 4.487699508666992, "learning_rate": 9.052666326153656e-05, "loss": 2.2256839752197264, "memory(GiB)": 72.85, "step": 23255, "token_acc": 0.5, "train_speed(iter/s)": 0.670207 }, { "epoch": 0.9965297116661669, "grad_norm": 3.4927546977996826, "learning_rate": 9.052272131616168e-05, "loss": 2.2704273223876954, "memory(GiB)": 72.85, "step": 23260, "token_acc": 0.4884488448844885, "train_speed(iter/s)": 0.670249 }, { "epoch": 0.9967439269954158, "grad_norm": 5.351614952087402, "learning_rate": 9.051877863667969e-05, "loss": 2.7273616790771484, "memory(GiB)": 72.85, "step": 23265, "token_acc": 0.426056338028169, "train_speed(iter/s)": 0.670283 }, { "epoch": 0.9969581423246647, "grad_norm": 3.442117691040039, "learning_rate": 9.051483522316202e-05, "loss": 2.3968765258789064, "memory(GiB)": 72.85, "step": 23270, "token_acc": 0.5016722408026756, "train_speed(iter/s)": 0.670262 }, { "epoch": 0.9971723576539138, "grad_norm": 3.451460838317871, "learning_rate": 9.05108910756801e-05, "loss": 2.6483245849609376, "memory(GiB)": 72.85, "step": 23275, "token_acc": 0.4701492537313433, "train_speed(iter/s)": 0.670272 }, { "epoch": 0.9973865729831627, "grad_norm": 3.178690195083618, "learning_rate": 9.050694619430539e-05, "loss": 2.260745620727539, "memory(GiB)": 72.85, "step": 23280, "token_acc": 0.49363057324840764, "train_speed(iter/s)": 0.670282 }, { "epoch": 0.9976007883124116, "grad_norm": 3.2720680236816406, "learning_rate": 9.050300057910936e-05, "loss": 2.742686080932617, "memory(GiB)": 72.85, "step": 23285, "token_acc": 0.4636363636363636, "train_speed(iter/s)": 0.670311 }, { "epoch": 0.9978150036416606, "grad_norm": 5.959604740142822, "learning_rate": 9.049905423016347e-05, "loss": 2.593385696411133, "memory(GiB)": 72.85, "step": 23290, "token_acc": 0.4962121212121212, "train_speed(iter/s)": 0.670309 }, { "epoch": 0.9980292189709096, "grad_norm": 3.1537203788757324, "learning_rate": 9.049510714753922e-05, "loss": 2.5537628173828124, "memory(GiB)": 72.85, "step": 23295, "token_acc": 0.45671641791044776, "train_speed(iter/s)": 0.670333 }, { "epoch": 0.9982434343001585, "grad_norm": 3.887732982635498, "learning_rate": 9.049115933130811e-05, "loss": 2.7229598999023437, "memory(GiB)": 72.85, "step": 23300, "token_acc": 0.40390879478827363, "train_speed(iter/s)": 0.670321 }, { "epoch": 0.9984576496294075, "grad_norm": 3.1045620441436768, "learning_rate": 9.048721078154168e-05, "loss": 2.3892641067504883, "memory(GiB)": 72.85, "step": 23305, "token_acc": 0.47058823529411764, "train_speed(iter/s)": 0.670305 }, { "epoch": 0.9986718649586565, "grad_norm": 3.4666953086853027, "learning_rate": 9.048326149831143e-05, "loss": 2.406372833251953, "memory(GiB)": 72.85, "step": 23310, "token_acc": 0.5074183976261127, "train_speed(iter/s)": 0.670268 }, { "epoch": 0.9988860802879054, "grad_norm": 4.74826717376709, "learning_rate": 9.047931148168894e-05, "loss": 2.108066749572754, "memory(GiB)": 72.85, "step": 23315, "token_acc": 0.5570175438596491, "train_speed(iter/s)": 0.670282 }, { "epoch": 0.9991002956171544, "grad_norm": 4.315427780151367, "learning_rate": 9.047536073174573e-05, "loss": 2.4804386138916015, "memory(GiB)": 72.85, "step": 23320, "token_acc": 0.46319018404907975, "train_speed(iter/s)": 0.67028 }, { "epoch": 0.9993145109464033, "grad_norm": 3.5688018798828125, "learning_rate": 9.047140924855342e-05, "loss": 2.4200885772705076, "memory(GiB)": 72.85, "step": 23325, "token_acc": 0.49101796407185627, "train_speed(iter/s)": 0.670229 }, { "epoch": 0.9995287262756523, "grad_norm": 3.8521230220794678, "learning_rate": 9.046745703218356e-05, "loss": 2.521519088745117, "memory(GiB)": 72.85, "step": 23330, "token_acc": 0.436426116838488, "train_speed(iter/s)": 0.670215 }, { "epoch": 0.9997429416049013, "grad_norm": 4.567031383514404, "learning_rate": 9.046350408270772e-05, "loss": 2.433522415161133, "memory(GiB)": 72.85, "step": 23335, "token_acc": 0.4523809523809524, "train_speed(iter/s)": 0.670222 }, { "epoch": 0.9999571569341502, "grad_norm": 4.675999164581299, "learning_rate": 9.045955040019758e-05, "loss": 2.195869255065918, "memory(GiB)": 72.85, "step": 23340, "token_acc": 0.4758842443729904, "train_speed(iter/s)": 0.670267 }, { "epoch": 1.0001713722633991, "grad_norm": 3.640052318572998, "learning_rate": 9.045559598472472e-05, "loss": 2.229319190979004, "memory(GiB)": 72.85, "step": 23345, "token_acc": 0.5447470817120622, "train_speed(iter/s)": 0.670288 }, { "epoch": 1.0003855875926482, "grad_norm": 3.4434847831726074, "learning_rate": 9.045164083636079e-05, "loss": 2.464854621887207, "memory(GiB)": 72.85, "step": 23350, "token_acc": 0.4916387959866221, "train_speed(iter/s)": 0.670257 }, { "epoch": 1.000599802921897, "grad_norm": 3.5148022174835205, "learning_rate": 9.044768495517744e-05, "loss": 2.500771141052246, "memory(GiB)": 72.85, "step": 23355, "token_acc": 0.4636363636363636, "train_speed(iter/s)": 0.67026 }, { "epoch": 1.000814018251146, "grad_norm": 3.3085851669311523, "learning_rate": 9.044372834124632e-05, "loss": 2.539552688598633, "memory(GiB)": 72.85, "step": 23360, "token_acc": 0.4766081871345029, "train_speed(iter/s)": 0.670288 }, { "epoch": 1.001028233580395, "grad_norm": 3.784240484237671, "learning_rate": 9.043977099463914e-05, "loss": 2.1814786911010744, "memory(GiB)": 72.85, "step": 23365, "token_acc": 0.5099601593625498, "train_speed(iter/s)": 0.670308 }, { "epoch": 1.0012424489096439, "grad_norm": 3.2702598571777344, "learning_rate": 9.043581291542757e-05, "loss": 2.267334747314453, "memory(GiB)": 72.85, "step": 23370, "token_acc": 0.5189003436426117, "train_speed(iter/s)": 0.67033 }, { "epoch": 1.001456664238893, "grad_norm": 4.475317001342773, "learning_rate": 9.043185410368332e-05, "loss": 2.1675483703613283, "memory(GiB)": 72.85, "step": 23375, "token_acc": 0.5468164794007491, "train_speed(iter/s)": 0.670343 }, { "epoch": 1.001670879568142, "grad_norm": 3.1276957988739014, "learning_rate": 9.042789455947808e-05, "loss": 2.3521642684936523, "memory(GiB)": 72.85, "step": 23380, "token_acc": 0.5085324232081911, "train_speed(iter/s)": 0.670361 }, { "epoch": 1.0018850948973907, "grad_norm": 3.7708394527435303, "learning_rate": 9.042393428288363e-05, "loss": 2.2131622314453123, "memory(GiB)": 72.85, "step": 23385, "token_acc": 0.5236593059936908, "train_speed(iter/s)": 0.670349 }, { "epoch": 1.0020993102266398, "grad_norm": 2.8927462100982666, "learning_rate": 9.041997327397169e-05, "loss": 2.469706344604492, "memory(GiB)": 72.85, "step": 23390, "token_acc": 0.5073746312684366, "train_speed(iter/s)": 0.670363 }, { "epoch": 1.0023135255558888, "grad_norm": 3.7381014823913574, "learning_rate": 9.0416011532814e-05, "loss": 2.3837635040283205, "memory(GiB)": 72.85, "step": 23395, "token_acc": 0.47093023255813954, "train_speed(iter/s)": 0.670386 }, { "epoch": 1.0025277408851376, "grad_norm": 3.1490602493286133, "learning_rate": 9.041204905948236e-05, "loss": 2.493561935424805, "memory(GiB)": 72.85, "step": 23400, "token_acc": 0.45103857566765576, "train_speed(iter/s)": 0.670389 }, { "epoch": 1.0027419562143867, "grad_norm": 4.500817775726318, "learning_rate": 9.040808585404854e-05, "loss": 2.4282230377197265, "memory(GiB)": 72.85, "step": 23405, "token_acc": 0.5, "train_speed(iter/s)": 0.670401 }, { "epoch": 1.0029561715436357, "grad_norm": 3.702363967895508, "learning_rate": 9.040412191658434e-05, "loss": 2.3073657989501952, "memory(GiB)": 72.85, "step": 23410, "token_acc": 0.5244299674267101, "train_speed(iter/s)": 0.670418 }, { "epoch": 1.0031703868728847, "grad_norm": 4.505660533905029, "learning_rate": 9.040015724716157e-05, "loss": 2.3898778915405274, "memory(GiB)": 72.85, "step": 23415, "token_acc": 0.4790996784565916, "train_speed(iter/s)": 0.670389 }, { "epoch": 1.0033846022021335, "grad_norm": 5.152685642242432, "learning_rate": 9.039619184585204e-05, "loss": 2.2392501831054688, "memory(GiB)": 72.85, "step": 23420, "token_acc": 0.5092250922509225, "train_speed(iter/s)": 0.670403 }, { "epoch": 1.0035988175313826, "grad_norm": 6.042549133300781, "learning_rate": 9.039222571272763e-05, "loss": 2.1499351501464843, "memory(GiB)": 72.85, "step": 23425, "token_acc": 0.5280373831775701, "train_speed(iter/s)": 0.670378 }, { "epoch": 1.0038130328606316, "grad_norm": 4.235795974731445, "learning_rate": 9.038825884786013e-05, "loss": 2.0609409332275392, "memory(GiB)": 72.85, "step": 23430, "token_acc": 0.5376344086021505, "train_speed(iter/s)": 0.670368 }, { "epoch": 1.0040272481898804, "grad_norm": 3.5908737182617188, "learning_rate": 9.038429125132143e-05, "loss": 2.3870174407958986, "memory(GiB)": 72.85, "step": 23435, "token_acc": 0.5095541401273885, "train_speed(iter/s)": 0.670386 }, { "epoch": 1.0042414635191295, "grad_norm": 4.84592866897583, "learning_rate": 9.038032292318343e-05, "loss": 2.614064598083496, "memory(GiB)": 72.85, "step": 23440, "token_acc": 0.4591194968553459, "train_speed(iter/s)": 0.670344 }, { "epoch": 1.0044556788483785, "grad_norm": 3.1092324256896973, "learning_rate": 9.037635386351801e-05, "loss": 2.4842945098876954, "memory(GiB)": 72.85, "step": 23445, "token_acc": 0.4838709677419355, "train_speed(iter/s)": 0.670369 }, { "epoch": 1.0046698941776273, "grad_norm": 4.6941657066345215, "learning_rate": 9.037238407239705e-05, "loss": 2.288806915283203, "memory(GiB)": 72.85, "step": 23450, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.670351 }, { "epoch": 1.0048841095068763, "grad_norm": 5.158677577972412, "learning_rate": 9.036841354989248e-05, "loss": 2.457442855834961, "memory(GiB)": 72.85, "step": 23455, "token_acc": 0.4791666666666667, "train_speed(iter/s)": 0.670367 }, { "epoch": 1.0050983248361254, "grad_norm": 3.7334253787994385, "learning_rate": 9.036444229607623e-05, "loss": 2.638558578491211, "memory(GiB)": 72.85, "step": 23460, "token_acc": 0.4559748427672956, "train_speed(iter/s)": 0.67038 }, { "epoch": 1.0053125401653742, "grad_norm": 3.3027639389038086, "learning_rate": 9.036047031102024e-05, "loss": 2.2473644256591796, "memory(GiB)": 72.85, "step": 23465, "token_acc": 0.4560810810810811, "train_speed(iter/s)": 0.670368 }, { "epoch": 1.0055267554946232, "grad_norm": 3.972773313522339, "learning_rate": 9.035649759479648e-05, "loss": 2.3681201934814453, "memory(GiB)": 72.85, "step": 23470, "token_acc": 0.4694533762057878, "train_speed(iter/s)": 0.670365 }, { "epoch": 1.0057409708238723, "grad_norm": 3.8241047859191895, "learning_rate": 9.03525241474769e-05, "loss": 2.6618417739868163, "memory(GiB)": 72.85, "step": 23475, "token_acc": 0.468944099378882, "train_speed(iter/s)": 0.670385 }, { "epoch": 1.005955186153121, "grad_norm": 2.9879703521728516, "learning_rate": 9.034854996913349e-05, "loss": 2.5252609252929688, "memory(GiB)": 72.85, "step": 23480, "token_acc": 0.46546546546546547, "train_speed(iter/s)": 0.670365 }, { "epoch": 1.00616940148237, "grad_norm": 3.641508102416992, "learning_rate": 9.034457505983825e-05, "loss": 2.183427429199219, "memory(GiB)": 72.85, "step": 23485, "token_acc": 0.5074183976261127, "train_speed(iter/s)": 0.67037 }, { "epoch": 1.0063836168116191, "grad_norm": 3.978336811065674, "learning_rate": 9.034059941966318e-05, "loss": 2.892131805419922, "memory(GiB)": 72.85, "step": 23490, "token_acc": 0.47527472527472525, "train_speed(iter/s)": 0.670405 }, { "epoch": 1.006597832140868, "grad_norm": 4.995403289794922, "learning_rate": 9.033662304868031e-05, "loss": 2.414031982421875, "memory(GiB)": 72.85, "step": 23495, "token_acc": 0.5147679324894515, "train_speed(iter/s)": 0.670401 }, { "epoch": 1.006812047470117, "grad_norm": 5.125531196594238, "learning_rate": 9.033264594696169e-05, "loss": 2.356593132019043, "memory(GiB)": 72.85, "step": 23500, "token_acc": 0.5057034220532319, "train_speed(iter/s)": 0.670405 }, { "epoch": 1.006812047470117, "eval_loss": 1.9956026077270508, "eval_runtime": 16.6206, "eval_samples_per_second": 6.017, "eval_steps_per_second": 6.017, "eval_token_acc": 0.5138888888888888, "step": 23500 }, { "epoch": 1.007026262799366, "grad_norm": 4.045413970947266, "learning_rate": 9.032866811457935e-05, "loss": 2.342884063720703, "memory(GiB)": 72.85, "step": 23505, "token_acc": 0.5070281124497992, "train_speed(iter/s)": 0.670063 }, { "epoch": 1.0072404781286148, "grad_norm": 2.689612627029419, "learning_rate": 9.032468955160533e-05, "loss": 2.0424020767211912, "memory(GiB)": 72.85, "step": 23510, "token_acc": 0.5283018867924528, "train_speed(iter/s)": 0.670036 }, { "epoch": 1.0074546934578639, "grad_norm": 3.598170042037964, "learning_rate": 9.032071025811175e-05, "loss": 2.116469383239746, "memory(GiB)": 72.85, "step": 23515, "token_acc": 0.5215827338129496, "train_speed(iter/s)": 0.670007 }, { "epoch": 1.007668908787113, "grad_norm": 3.1714494228363037, "learning_rate": 9.031673023417069e-05, "loss": 2.519486999511719, "memory(GiB)": 72.85, "step": 23520, "token_acc": 0.4721311475409836, "train_speed(iter/s)": 0.669976 }, { "epoch": 1.0078831241163617, "grad_norm": 3.9694840908050537, "learning_rate": 9.031274947985422e-05, "loss": 2.3346494674682616, "memory(GiB)": 72.85, "step": 23525, "token_acc": 0.5, "train_speed(iter/s)": 0.670024 }, { "epoch": 1.0080973394456108, "grad_norm": 3.8239598274230957, "learning_rate": 9.03087679952345e-05, "loss": 2.4451225280761717, "memory(GiB)": 72.85, "step": 23530, "token_acc": 0.5016722408026756, "train_speed(iter/s)": 0.670014 }, { "epoch": 1.0083115547748598, "grad_norm": 4.388778209686279, "learning_rate": 9.030478578038361e-05, "loss": 2.2854204177856445, "memory(GiB)": 72.85, "step": 23535, "token_acc": 0.5396825396825397, "train_speed(iter/s)": 0.670044 }, { "epoch": 1.0085257701041086, "grad_norm": 3.4467015266418457, "learning_rate": 9.030080283537374e-05, "loss": 2.383942413330078, "memory(GiB)": 72.85, "step": 23540, "token_acc": 0.47770700636942676, "train_speed(iter/s)": 0.670047 }, { "epoch": 1.0087399854333576, "grad_norm": 3.4266514778137207, "learning_rate": 9.029681916027701e-05, "loss": 2.2347654342651366, "memory(GiB)": 72.85, "step": 23545, "token_acc": 0.5196078431372549, "train_speed(iter/s)": 0.67004 }, { "epoch": 1.0089542007626067, "grad_norm": 4.020151615142822, "learning_rate": 9.029283475516561e-05, "loss": 2.034076690673828, "memory(GiB)": 72.85, "step": 23550, "token_acc": 0.49645390070921985, "train_speed(iter/s)": 0.670039 }, { "epoch": 1.0091684160918555, "grad_norm": 3.9969730377197266, "learning_rate": 9.028884962011169e-05, "loss": 2.208098220825195, "memory(GiB)": 72.85, "step": 23555, "token_acc": 0.4965986394557823, "train_speed(iter/s)": 0.670057 }, { "epoch": 1.0093826314211045, "grad_norm": 3.6133105754852295, "learning_rate": 9.028486375518748e-05, "loss": 2.283215141296387, "memory(GiB)": 72.85, "step": 23560, "token_acc": 0.4892966360856269, "train_speed(iter/s)": 0.670015 }, { "epoch": 1.0095968467503535, "grad_norm": 3.9538800716400146, "learning_rate": 9.028087716046516e-05, "loss": 2.3798336029052733, "memory(GiB)": 72.85, "step": 23565, "token_acc": 0.4850498338870432, "train_speed(iter/s)": 0.670033 }, { "epoch": 1.0098110620796024, "grad_norm": 6.356213092803955, "learning_rate": 9.027688983601699e-05, "loss": 2.2217195510864256, "memory(GiB)": 72.85, "step": 23570, "token_acc": 0.5598455598455598, "train_speed(iter/s)": 0.670015 }, { "epoch": 1.0100252774088514, "grad_norm": 5.012721061706543, "learning_rate": 9.027290178191515e-05, "loss": 2.293868827819824, "memory(GiB)": 72.85, "step": 23575, "token_acc": 0.4981132075471698, "train_speed(iter/s)": 0.670031 }, { "epoch": 1.0102394927381004, "grad_norm": 3.607440710067749, "learning_rate": 9.026891299823192e-05, "loss": 2.1040103912353514, "memory(GiB)": 72.85, "step": 23580, "token_acc": 0.5103448275862069, "train_speed(iter/s)": 0.67003 }, { "epoch": 1.0104537080673492, "grad_norm": 3.61287260055542, "learning_rate": 9.026492348503957e-05, "loss": 2.489266204833984, "memory(GiB)": 72.85, "step": 23585, "token_acc": 0.4925925925925926, "train_speed(iter/s)": 0.670016 }, { "epoch": 1.0106679233965983, "grad_norm": 5.777554512023926, "learning_rate": 9.026093324241035e-05, "loss": 2.105291748046875, "memory(GiB)": 72.85, "step": 23590, "token_acc": 0.5307017543859649, "train_speed(iter/s)": 0.670036 }, { "epoch": 1.0108821387258473, "grad_norm": 6.360179424285889, "learning_rate": 9.025694227041656e-05, "loss": 2.1504968643188476, "memory(GiB)": 72.85, "step": 23595, "token_acc": 0.5296052631578947, "train_speed(iter/s)": 0.670053 }, { "epoch": 1.0110963540550961, "grad_norm": 3.4159603118896484, "learning_rate": 9.025295056913049e-05, "loss": 2.5643840789794923, "memory(GiB)": 72.85, "step": 23600, "token_acc": 0.4855072463768116, "train_speed(iter/s)": 0.670042 }, { "epoch": 1.0113105693843452, "grad_norm": 3.2307565212249756, "learning_rate": 9.024895813862446e-05, "loss": 2.2079416275024415, "memory(GiB)": 72.85, "step": 23605, "token_acc": 0.5155709342560554, "train_speed(iter/s)": 0.670069 }, { "epoch": 1.0115247847135942, "grad_norm": 4.264486312866211, "learning_rate": 9.024496497897082e-05, "loss": 2.312539482116699, "memory(GiB)": 72.85, "step": 23610, "token_acc": 0.47854785478547857, "train_speed(iter/s)": 0.670071 }, { "epoch": 1.011739000042843, "grad_norm": 3.3218588829040527, "learning_rate": 9.024097109024186e-05, "loss": 2.6078405380249023, "memory(GiB)": 72.85, "step": 23615, "token_acc": 0.5057915057915058, "train_speed(iter/s)": 0.670081 }, { "epoch": 1.011953215372092, "grad_norm": 4.695964813232422, "learning_rate": 9.023697647250995e-05, "loss": 2.0508262634277346, "memory(GiB)": 72.85, "step": 23620, "token_acc": 0.5309090909090909, "train_speed(iter/s)": 0.670088 }, { "epoch": 1.012167430701341, "grad_norm": 4.668831825256348, "learning_rate": 9.023298112584749e-05, "loss": 2.3181169509887694, "memory(GiB)": 72.85, "step": 23625, "token_acc": 0.4896551724137931, "train_speed(iter/s)": 0.67009 }, { "epoch": 1.0123816460305899, "grad_norm": 4.188012599945068, "learning_rate": 9.022898505032685e-05, "loss": 2.414878273010254, "memory(GiB)": 72.85, "step": 23630, "token_acc": 0.439873417721519, "train_speed(iter/s)": 0.670125 }, { "epoch": 1.012595861359839, "grad_norm": 3.525970458984375, "learning_rate": 9.022498824602037e-05, "loss": 2.349308395385742, "memory(GiB)": 72.85, "step": 23635, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.670137 }, { "epoch": 1.012810076689088, "grad_norm": 3.591824531555176, "learning_rate": 9.022099071300052e-05, "loss": 2.3283563613891602, "memory(GiB)": 72.85, "step": 23640, "token_acc": 0.4915254237288136, "train_speed(iter/s)": 0.670123 }, { "epoch": 1.0130242920183368, "grad_norm": 3.556931972503662, "learning_rate": 9.021699245133967e-05, "loss": 2.303254318237305, "memory(GiB)": 72.85, "step": 23645, "token_acc": 0.47335423197492166, "train_speed(iter/s)": 0.670136 }, { "epoch": 1.0132385073475858, "grad_norm": 4.165482521057129, "learning_rate": 9.02129934611103e-05, "loss": 2.5917015075683594, "memory(GiB)": 72.85, "step": 23650, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.670144 }, { "epoch": 1.0134527226768348, "grad_norm": 3.4403774738311768, "learning_rate": 9.020899374238481e-05, "loss": 2.149338150024414, "memory(GiB)": 72.85, "step": 23655, "token_acc": 0.5145631067961165, "train_speed(iter/s)": 0.670134 }, { "epoch": 1.0136669380060837, "grad_norm": 5.667196273803711, "learning_rate": 9.020499329523569e-05, "loss": 2.3482574462890624, "memory(GiB)": 72.85, "step": 23660, "token_acc": 0.51171875, "train_speed(iter/s)": 0.670166 }, { "epoch": 1.0138811533353327, "grad_norm": 4.534487724304199, "learning_rate": 9.02009921197354e-05, "loss": 2.472939872741699, "memory(GiB)": 72.85, "step": 23665, "token_acc": 0.4523809523809524, "train_speed(iter/s)": 0.670148 }, { "epoch": 1.0140953686645817, "grad_norm": 2.88771915435791, "learning_rate": 9.019699021595642e-05, "loss": 2.5311325073242186, "memory(GiB)": 72.85, "step": 23670, "token_acc": 0.46368715083798884, "train_speed(iter/s)": 0.670134 }, { "epoch": 1.0143095839938305, "grad_norm": 5.8065619468688965, "learning_rate": 9.019298758397127e-05, "loss": 2.4836841583251954, "memory(GiB)": 72.85, "step": 23675, "token_acc": 0.47410358565737054, "train_speed(iter/s)": 0.670149 }, { "epoch": 1.0145237993230796, "grad_norm": 4.630249977111816, "learning_rate": 9.018898422385243e-05, "loss": 2.221549224853516, "memory(GiB)": 72.85, "step": 23680, "token_acc": 0.5424354243542435, "train_speed(iter/s)": 0.670172 }, { "epoch": 1.0147380146523286, "grad_norm": 3.936143159866333, "learning_rate": 9.018498013567244e-05, "loss": 2.3072887420654298, "memory(GiB)": 72.85, "step": 23685, "token_acc": 0.5018315018315018, "train_speed(iter/s)": 0.670127 }, { "epoch": 1.0149522299815774, "grad_norm": 3.4261510372161865, "learning_rate": 9.018097531950385e-05, "loss": 2.1525197982788087, "memory(GiB)": 72.85, "step": 23690, "token_acc": 0.541958041958042, "train_speed(iter/s)": 0.670106 }, { "epoch": 1.0151664453108264, "grad_norm": 4.819684028625488, "learning_rate": 9.01769697754192e-05, "loss": 2.2931940078735353, "memory(GiB)": 72.85, "step": 23695, "token_acc": 0.49828178694158076, "train_speed(iter/s)": 0.67014 }, { "epoch": 1.0153806606400755, "grad_norm": 3.9616096019744873, "learning_rate": 9.017296350349105e-05, "loss": 2.078196716308594, "memory(GiB)": 72.85, "step": 23700, "token_acc": 0.5037593984962406, "train_speed(iter/s)": 0.670131 }, { "epoch": 1.0155948759693243, "grad_norm": 2.496546745300293, "learning_rate": 9.016895650379198e-05, "loss": 2.235495185852051, "memory(GiB)": 72.85, "step": 23705, "token_acc": 0.5079365079365079, "train_speed(iter/s)": 0.670147 }, { "epoch": 1.0158090912985733, "grad_norm": 4.103524684906006, "learning_rate": 9.016494877639457e-05, "loss": 2.4740310668945313, "memory(GiB)": 72.85, "step": 23710, "token_acc": 0.49538461538461537, "train_speed(iter/s)": 0.670119 }, { "epoch": 1.0160233066278224, "grad_norm": 4.056247711181641, "learning_rate": 9.016094032137144e-05, "loss": 2.398722457885742, "memory(GiB)": 72.85, "step": 23715, "token_acc": 0.5, "train_speed(iter/s)": 0.670141 }, { "epoch": 1.0162375219570712, "grad_norm": 4.741580486297607, "learning_rate": 9.015693113879521e-05, "loss": 2.140182113647461, "memory(GiB)": 72.85, "step": 23720, "token_acc": 0.540268456375839, "train_speed(iter/s)": 0.670166 }, { "epoch": 1.0164517372863202, "grad_norm": 6.22979211807251, "learning_rate": 9.01529212287385e-05, "loss": 2.4712604522705077, "memory(GiB)": 72.85, "step": 23725, "token_acc": 0.49538461538461537, "train_speed(iter/s)": 0.670177 }, { "epoch": 1.0166659526155692, "grad_norm": 8.721623420715332, "learning_rate": 9.014891059127395e-05, "loss": 2.360072135925293, "memory(GiB)": 72.85, "step": 23730, "token_acc": 0.5234042553191489, "train_speed(iter/s)": 0.670214 }, { "epoch": 1.016880167944818, "grad_norm": 3.50738263130188, "learning_rate": 9.014489922647423e-05, "loss": 2.3020393371582033, "memory(GiB)": 72.85, "step": 23735, "token_acc": 0.49838187702265374, "train_speed(iter/s)": 0.670212 }, { "epoch": 1.017094383274067, "grad_norm": 3.2515618801116943, "learning_rate": 9.014088713441199e-05, "loss": 2.489496612548828, "memory(GiB)": 72.85, "step": 23740, "token_acc": 0.46355685131195334, "train_speed(iter/s)": 0.67022 }, { "epoch": 1.0173085986033161, "grad_norm": 4.690944671630859, "learning_rate": 9.013687431515994e-05, "loss": 2.299686241149902, "memory(GiB)": 72.85, "step": 23745, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.670241 }, { "epoch": 1.017522813932565, "grad_norm": 4.358466148376465, "learning_rate": 9.013286076879075e-05, "loss": 2.436829948425293, "memory(GiB)": 72.85, "step": 23750, "token_acc": 0.5018450184501845, "train_speed(iter/s)": 0.670268 }, { "epoch": 1.017737029261814, "grad_norm": 3.681964874267578, "learning_rate": 9.012884649537715e-05, "loss": 2.1688259124755858, "memory(GiB)": 72.85, "step": 23755, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.670258 }, { "epoch": 1.017951244591063, "grad_norm": 4.151150703430176, "learning_rate": 9.012483149499184e-05, "loss": 2.3813232421875, "memory(GiB)": 72.85, "step": 23760, "token_acc": 0.4831081081081081, "train_speed(iter/s)": 0.670252 }, { "epoch": 1.0181654599203118, "grad_norm": 3.7897183895111084, "learning_rate": 9.012081576770757e-05, "loss": 1.911599349975586, "memory(GiB)": 72.85, "step": 23765, "token_acc": 0.5135135135135135, "train_speed(iter/s)": 0.670278 }, { "epoch": 1.0183796752495609, "grad_norm": 4.4749908447265625, "learning_rate": 9.011679931359708e-05, "loss": 2.2832340240478515, "memory(GiB)": 72.85, "step": 23770, "token_acc": 0.5291828793774319, "train_speed(iter/s)": 0.670274 }, { "epoch": 1.01859389057881, "grad_norm": 6.3284783363342285, "learning_rate": 9.011278213273315e-05, "loss": 2.115856742858887, "memory(GiB)": 72.85, "step": 23775, "token_acc": 0.5355805243445693, "train_speed(iter/s)": 0.670275 }, { "epoch": 1.0188081059080587, "grad_norm": 4.88137674331665, "learning_rate": 9.010876422518854e-05, "loss": 2.341826248168945, "memory(GiB)": 72.85, "step": 23780, "token_acc": 0.5292096219931272, "train_speed(iter/s)": 0.670292 }, { "epoch": 1.0190223212373077, "grad_norm": 3.326983690261841, "learning_rate": 9.010474559103604e-05, "loss": 2.3998172760009764, "memory(GiB)": 72.85, "step": 23785, "token_acc": 0.49830508474576274, "train_speed(iter/s)": 0.670292 }, { "epoch": 1.0192365365665568, "grad_norm": 3.8893473148345947, "learning_rate": 9.010072623034845e-05, "loss": 2.0948238372802734, "memory(GiB)": 72.85, "step": 23790, "token_acc": 0.5271966527196653, "train_speed(iter/s)": 0.670298 }, { "epoch": 1.0194507518958056, "grad_norm": 4.64300537109375, "learning_rate": 9.00967061431986e-05, "loss": 2.4054250717163086, "memory(GiB)": 72.85, "step": 23795, "token_acc": 0.4984423676012461, "train_speed(iter/s)": 0.670305 }, { "epoch": 1.0196649672250546, "grad_norm": 4.915195941925049, "learning_rate": 9.009268532965929e-05, "loss": 2.2658443450927734, "memory(GiB)": 72.85, "step": 23800, "token_acc": 0.5270758122743683, "train_speed(iter/s)": 0.670273 }, { "epoch": 1.0198791825543037, "grad_norm": 4.086065292358398, "learning_rate": 9.008866378980338e-05, "loss": 2.441600227355957, "memory(GiB)": 72.85, "step": 23805, "token_acc": 0.4859154929577465, "train_speed(iter/s)": 0.670229 }, { "epoch": 1.0200933978835525, "grad_norm": 6.145879745483398, "learning_rate": 9.008464152370371e-05, "loss": 2.170720863342285, "memory(GiB)": 72.85, "step": 23810, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.670223 }, { "epoch": 1.0203076132128015, "grad_norm": 3.72263765335083, "learning_rate": 9.008061853143318e-05, "loss": 2.237879180908203, "memory(GiB)": 72.85, "step": 23815, "token_acc": 0.48249027237354086, "train_speed(iter/s)": 0.670228 }, { "epoch": 1.0205218285420505, "grad_norm": 3.9480807781219482, "learning_rate": 9.00765948130646e-05, "loss": 2.374271011352539, "memory(GiB)": 72.85, "step": 23820, "token_acc": 0.5127388535031847, "train_speed(iter/s)": 0.670226 }, { "epoch": 1.0207360438712993, "grad_norm": 4.810409069061279, "learning_rate": 9.007257036867095e-05, "loss": 2.3002609252929687, "memory(GiB)": 72.85, "step": 23825, "token_acc": 0.5087209302325582, "train_speed(iter/s)": 0.670213 }, { "epoch": 1.0209502592005484, "grad_norm": 4.476785182952881, "learning_rate": 9.006854519832509e-05, "loss": 2.197031784057617, "memory(GiB)": 72.85, "step": 23830, "token_acc": 0.5272727272727272, "train_speed(iter/s)": 0.670217 }, { "epoch": 1.0211644745297974, "grad_norm": 3.615773916244507, "learning_rate": 9.006451930209995e-05, "loss": 2.2038097381591797, "memory(GiB)": 72.85, "step": 23835, "token_acc": 0.5048231511254019, "train_speed(iter/s)": 0.670212 }, { "epoch": 1.0213786898590462, "grad_norm": 4.149505615234375, "learning_rate": 9.006049268006844e-05, "loss": 2.247480010986328, "memory(GiB)": 72.85, "step": 23840, "token_acc": 0.5342960288808665, "train_speed(iter/s)": 0.670236 }, { "epoch": 1.0215929051882953, "grad_norm": 4.581421852111816, "learning_rate": 9.005646533230354e-05, "loss": 2.3233871459960938, "memory(GiB)": 72.85, "step": 23845, "token_acc": 0.5076335877862596, "train_speed(iter/s)": 0.670197 }, { "epoch": 1.0218071205175443, "grad_norm": 7.472766399383545, "learning_rate": 9.005243725887819e-05, "loss": 2.230320167541504, "memory(GiB)": 72.85, "step": 23850, "token_acc": 0.5015673981191222, "train_speed(iter/s)": 0.670183 }, { "epoch": 1.022021335846793, "grad_norm": 4.018210411071777, "learning_rate": 9.004840845986538e-05, "loss": 2.1638742446899415, "memory(GiB)": 72.85, "step": 23855, "token_acc": 0.49823321554770317, "train_speed(iter/s)": 0.670163 }, { "epoch": 1.0222355511760421, "grad_norm": 3.9171881675720215, "learning_rate": 9.004437893533807e-05, "loss": 2.397225761413574, "memory(GiB)": 72.85, "step": 23860, "token_acc": 0.4447852760736196, "train_speed(iter/s)": 0.670151 }, { "epoch": 1.0224497665052912, "grad_norm": 4.238703727722168, "learning_rate": 9.004034868536929e-05, "loss": 2.4318071365356446, "memory(GiB)": 72.85, "step": 23865, "token_acc": 0.4670846394984326, "train_speed(iter/s)": 0.670156 }, { "epoch": 1.02266398183454, "grad_norm": 3.6097350120544434, "learning_rate": 9.0036317710032e-05, "loss": 2.33437557220459, "memory(GiB)": 72.85, "step": 23870, "token_acc": 0.5, "train_speed(iter/s)": 0.670168 }, { "epoch": 1.022878197163789, "grad_norm": 3.9292125701904297, "learning_rate": 9.003228600939926e-05, "loss": 2.3010013580322264, "memory(GiB)": 72.85, "step": 23875, "token_acc": 0.5138888888888888, "train_speed(iter/s)": 0.670181 }, { "epoch": 1.023092412493038, "grad_norm": 4.052996635437012, "learning_rate": 9.002825358354414e-05, "loss": 2.2437000274658203, "memory(GiB)": 72.85, "step": 23880, "token_acc": 0.5186567164179104, "train_speed(iter/s)": 0.67017 }, { "epoch": 1.0233066278222869, "grad_norm": 3.567265510559082, "learning_rate": 9.002422043253962e-05, "loss": 2.1025054931640623, "memory(GiB)": 72.85, "step": 23885, "token_acc": 0.49834983498349833, "train_speed(iter/s)": 0.670136 }, { "epoch": 1.023520843151536, "grad_norm": 4.356810092926025, "learning_rate": 9.002018655645882e-05, "loss": 2.449769973754883, "memory(GiB)": 72.85, "step": 23890, "token_acc": 0.4913494809688581, "train_speed(iter/s)": 0.670148 }, { "epoch": 1.023735058480785, "grad_norm": 3.0684406757354736, "learning_rate": 9.00161519553748e-05, "loss": 2.389752197265625, "memory(GiB)": 72.85, "step": 23895, "token_acc": 0.5, "train_speed(iter/s)": 0.670156 }, { "epoch": 1.0239492738100338, "grad_norm": 3.851580858230591, "learning_rate": 9.001211662936065e-05, "loss": 2.528121566772461, "memory(GiB)": 72.85, "step": 23900, "token_acc": 0.4792332268370607, "train_speed(iter/s)": 0.670135 }, { "epoch": 1.0241634891392828, "grad_norm": 3.2644662857055664, "learning_rate": 9.000808057848946e-05, "loss": 2.004416847229004, "memory(GiB)": 72.85, "step": 23905, "token_acc": 0.5315985130111525, "train_speed(iter/s)": 0.670149 }, { "epoch": 1.0243777044685318, "grad_norm": 3.479654312133789, "learning_rate": 9.000404380283435e-05, "loss": 2.2194421768188475, "memory(GiB)": 72.85, "step": 23910, "token_acc": 0.5189873417721519, "train_speed(iter/s)": 0.670138 }, { "epoch": 1.0245919197977806, "grad_norm": 4.728374004364014, "learning_rate": 9.000000630246848e-05, "loss": 2.313573455810547, "memory(GiB)": 72.85, "step": 23915, "token_acc": 0.4900398406374502, "train_speed(iter/s)": 0.6701 }, { "epoch": 1.0248061351270297, "grad_norm": 3.4877381324768066, "learning_rate": 8.999596807746497e-05, "loss": 2.5486244201660155, "memory(GiB)": 72.85, "step": 23920, "token_acc": 0.4679245283018868, "train_speed(iter/s)": 0.67015 }, { "epoch": 1.0250203504562787, "grad_norm": 3.207829475402832, "learning_rate": 8.999192912789697e-05, "loss": 2.5402997970581054, "memory(GiB)": 72.85, "step": 23925, "token_acc": 0.47878787878787876, "train_speed(iter/s)": 0.670156 }, { "epoch": 1.0252345657855275, "grad_norm": 4.0446271896362305, "learning_rate": 8.998788945383768e-05, "loss": 2.1397592544555666, "memory(GiB)": 72.85, "step": 23930, "token_acc": 0.5223367697594502, "train_speed(iter/s)": 0.670152 }, { "epoch": 1.0254487811147766, "grad_norm": 4.62409782409668, "learning_rate": 8.998384905536025e-05, "loss": 2.3016677856445313, "memory(GiB)": 72.85, "step": 23935, "token_acc": 0.5302013422818792, "train_speed(iter/s)": 0.670145 }, { "epoch": 1.0256629964440256, "grad_norm": 3.4606711864471436, "learning_rate": 8.997980793253789e-05, "loss": 2.2911661148071287, "memory(GiB)": 72.85, "step": 23940, "token_acc": 0.5041666666666667, "train_speed(iter/s)": 0.670162 }, { "epoch": 1.0258772117732744, "grad_norm": 3.2648138999938965, "learning_rate": 8.99757660854438e-05, "loss": 2.1407241821289062, "memory(GiB)": 72.85, "step": 23945, "token_acc": 0.5238095238095238, "train_speed(iter/s)": 0.670145 }, { "epoch": 1.0260914271025234, "grad_norm": 4.927407264709473, "learning_rate": 8.99717235141512e-05, "loss": 2.4011857986450194, "memory(GiB)": 72.85, "step": 23950, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.670127 }, { "epoch": 1.0263056424317725, "grad_norm": 5.01608419418335, "learning_rate": 8.996768021873334e-05, "loss": 2.7568763732910155, "memory(GiB)": 72.85, "step": 23955, "token_acc": 0.44666666666666666, "train_speed(iter/s)": 0.670091 }, { "epoch": 1.0265198577610213, "grad_norm": 3.8277533054351807, "learning_rate": 8.996363619926346e-05, "loss": 2.2759220123291017, "memory(GiB)": 72.85, "step": 23960, "token_acc": 0.4897959183673469, "train_speed(iter/s)": 0.670059 }, { "epoch": 1.0267340730902703, "grad_norm": 4.167908668518066, "learning_rate": 8.995959145581482e-05, "loss": 2.4069664001464846, "memory(GiB)": 72.85, "step": 23965, "token_acc": 0.514792899408284, "train_speed(iter/s)": 0.670043 }, { "epoch": 1.0269482884195194, "grad_norm": 3.554163694381714, "learning_rate": 8.995554598846069e-05, "loss": 2.306612968444824, "memory(GiB)": 72.85, "step": 23970, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.670073 }, { "epoch": 1.0271625037487682, "grad_norm": 2.7214136123657227, "learning_rate": 8.995149979727437e-05, "loss": 2.3649133682250976, "memory(GiB)": 72.85, "step": 23975, "token_acc": 0.510989010989011, "train_speed(iter/s)": 0.67007 }, { "epoch": 1.0273767190780172, "grad_norm": 3.7505531311035156, "learning_rate": 8.994745288232916e-05, "loss": 2.2577146530151366, "memory(GiB)": 72.85, "step": 23980, "token_acc": 0.5287356321839081, "train_speed(iter/s)": 0.670063 }, { "epoch": 1.0275909344072662, "grad_norm": 3.928755760192871, "learning_rate": 8.994340524369836e-05, "loss": 2.130649185180664, "memory(GiB)": 72.85, "step": 23985, "token_acc": 0.5183823529411765, "train_speed(iter/s)": 0.670078 }, { "epoch": 1.027805149736515, "grad_norm": 4.088456630706787, "learning_rate": 8.993935688145529e-05, "loss": 2.2886228561401367, "memory(GiB)": 72.85, "step": 23990, "token_acc": 0.5014925373134328, "train_speed(iter/s)": 0.670054 }, { "epoch": 1.028019365065764, "grad_norm": 3.1865005493164062, "learning_rate": 8.993530779567332e-05, "loss": 2.219376564025879, "memory(GiB)": 72.85, "step": 23995, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.670057 }, { "epoch": 1.0282335803950131, "grad_norm": 4.760087013244629, "learning_rate": 8.993125798642579e-05, "loss": 2.289718818664551, "memory(GiB)": 72.85, "step": 24000, "token_acc": 0.4965034965034965, "train_speed(iter/s)": 0.67001 }, { "epoch": 1.0282335803950131, "eval_loss": 2.0461559295654297, "eval_runtime": 17.0311, "eval_samples_per_second": 5.872, "eval_steps_per_second": 5.872, "eval_token_acc": 0.49855907780979825, "step": 24000 }, { "epoch": 1.028447795724262, "grad_norm": 4.139863967895508, "learning_rate": 8.992720745378605e-05, "loss": 2.432975006103516, "memory(GiB)": 72.85, "step": 24005, "token_acc": 0.49229287090558765, "train_speed(iter/s)": 0.669667 }, { "epoch": 1.028662011053511, "grad_norm": 3.178633213043213, "learning_rate": 8.99231561978275e-05, "loss": 2.4150285720825195, "memory(GiB)": 72.85, "step": 24010, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.669657 }, { "epoch": 1.02887622638276, "grad_norm": 5.256226062774658, "learning_rate": 8.991910421862352e-05, "loss": 2.3382850646972657, "memory(GiB)": 72.85, "step": 24015, "token_acc": 0.4714285714285714, "train_speed(iter/s)": 0.669617 }, { "epoch": 1.0290904417120088, "grad_norm": 3.9339993000030518, "learning_rate": 8.991505151624752e-05, "loss": 2.227708435058594, "memory(GiB)": 72.85, "step": 24020, "token_acc": 0.5136986301369864, "train_speed(iter/s)": 0.669622 }, { "epoch": 1.0293046570412578, "grad_norm": 4.985021114349365, "learning_rate": 8.991099809077292e-05, "loss": 2.2927417755126953, "memory(GiB)": 72.85, "step": 24025, "token_acc": 0.512, "train_speed(iter/s)": 0.669624 }, { "epoch": 1.0295188723705069, "grad_norm": 3.735914468765259, "learning_rate": 8.990694394227317e-05, "loss": 2.2036153793334963, "memory(GiB)": 72.85, "step": 24030, "token_acc": 0.5522388059701493, "train_speed(iter/s)": 0.669619 }, { "epoch": 1.0297330876997557, "grad_norm": 4.353451728820801, "learning_rate": 8.990288907082168e-05, "loss": 2.3691566467285154, "memory(GiB)": 72.85, "step": 24035, "token_acc": 0.45517241379310347, "train_speed(iter/s)": 0.669586 }, { "epoch": 1.0299473030290047, "grad_norm": 10.091327667236328, "learning_rate": 8.989883347649191e-05, "loss": 2.3088293075561523, "memory(GiB)": 72.85, "step": 24040, "token_acc": 0.48788927335640137, "train_speed(iter/s)": 0.669601 }, { "epoch": 1.0301615183582538, "grad_norm": 3.8360044956207275, "learning_rate": 8.989477715935735e-05, "loss": 2.4578083038330076, "memory(GiB)": 72.85, "step": 24045, "token_acc": 0.4840989399293286, "train_speed(iter/s)": 0.66963 }, { "epoch": 1.0303757336875026, "grad_norm": 4.062998294830322, "learning_rate": 8.989072011949148e-05, "loss": 2.5111516952514648, "memory(GiB)": 72.85, "step": 24050, "token_acc": 0.49538461538461537, "train_speed(iter/s)": 0.669664 }, { "epoch": 1.0305899490167516, "grad_norm": 4.199966907501221, "learning_rate": 8.988666235696779e-05, "loss": 2.4896453857421874, "memory(GiB)": 72.85, "step": 24055, "token_acc": 0.4888888888888889, "train_speed(iter/s)": 0.669652 }, { "epoch": 1.0308041643460006, "grad_norm": 4.407293319702148, "learning_rate": 8.98826038718598e-05, "loss": 2.4029747009277345, "memory(GiB)": 72.85, "step": 24060, "token_acc": 0.4619883040935672, "train_speed(iter/s)": 0.669667 }, { "epoch": 1.0310183796752495, "grad_norm": 3.7141964435577393, "learning_rate": 8.987854466424103e-05, "loss": 2.451553153991699, "memory(GiB)": 72.85, "step": 24065, "token_acc": 0.43986254295532645, "train_speed(iter/s)": 0.669659 }, { "epoch": 1.0312325950044985, "grad_norm": 4.681596279144287, "learning_rate": 8.987448473418502e-05, "loss": 2.462375259399414, "memory(GiB)": 72.85, "step": 24070, "token_acc": 0.46757679180887374, "train_speed(iter/s)": 0.66962 }, { "epoch": 1.0314468103337475, "grad_norm": 3.386364698410034, "learning_rate": 8.98704240817653e-05, "loss": 2.070959281921387, "memory(GiB)": 72.85, "step": 24075, "token_acc": 0.5674603174603174, "train_speed(iter/s)": 0.669627 }, { "epoch": 1.0316610256629963, "grad_norm": 3.4633572101593018, "learning_rate": 8.986636270705545e-05, "loss": 2.1756973266601562, "memory(GiB)": 72.85, "step": 24080, "token_acc": 0.498371335504886, "train_speed(iter/s)": 0.669579 }, { "epoch": 1.0318752409922454, "grad_norm": 5.1557440757751465, "learning_rate": 8.986230061012905e-05, "loss": 2.311701202392578, "memory(GiB)": 72.85, "step": 24085, "token_acc": 0.509090909090909, "train_speed(iter/s)": 0.66962 }, { "epoch": 1.0320894563214944, "grad_norm": 3.6240315437316895, "learning_rate": 8.985823779105968e-05, "loss": 2.4244203567504883, "memory(GiB)": 72.85, "step": 24090, "token_acc": 0.5, "train_speed(iter/s)": 0.669635 }, { "epoch": 1.0323036716507432, "grad_norm": 5.14008903503418, "learning_rate": 8.985417424992093e-05, "loss": 2.2237438201904296, "memory(GiB)": 72.85, "step": 24095, "token_acc": 0.5451127819548872, "train_speed(iter/s)": 0.669632 }, { "epoch": 1.0325178869799922, "grad_norm": 4.462664604187012, "learning_rate": 8.985010998678642e-05, "loss": 2.209218215942383, "memory(GiB)": 72.85, "step": 24100, "token_acc": 0.5146443514644351, "train_speed(iter/s)": 0.6696 }, { "epoch": 1.0327321023092413, "grad_norm": 4.028770923614502, "learning_rate": 8.984604500172982e-05, "loss": 2.108722114562988, "memory(GiB)": 72.85, "step": 24105, "token_acc": 0.5152671755725191, "train_speed(iter/s)": 0.669638 }, { "epoch": 1.03294631763849, "grad_norm": 3.9054112434387207, "learning_rate": 8.984197929482471e-05, "loss": 2.2917572021484376, "memory(GiB)": 72.85, "step": 24110, "token_acc": 0.506896551724138, "train_speed(iter/s)": 0.669664 }, { "epoch": 1.0331605329677391, "grad_norm": 4.089771747589111, "learning_rate": 8.983791286614476e-05, "loss": 2.600506591796875, "memory(GiB)": 72.85, "step": 24115, "token_acc": 0.47639484978540775, "train_speed(iter/s)": 0.669707 }, { "epoch": 1.0333747482969882, "grad_norm": 6.925342082977295, "learning_rate": 8.983465920357246e-05, "loss": 2.6895336151123046, "memory(GiB)": 72.85, "step": 24120, "token_acc": 0.4377358490566038, "train_speed(iter/s)": 0.669705 }, { "epoch": 1.0335889636262372, "grad_norm": 4.721043109893799, "learning_rate": 8.983059147588347e-05, "loss": 2.3660884857177735, "memory(GiB)": 72.85, "step": 24125, "token_acc": 0.4895833333333333, "train_speed(iter/s)": 0.669717 }, { "epoch": 1.033803178955486, "grad_norm": 3.8558051586151123, "learning_rate": 8.982652302662596e-05, "loss": 2.6020984649658203, "memory(GiB)": 72.85, "step": 24130, "token_acc": 0.45714285714285713, "train_speed(iter/s)": 0.669685 }, { "epoch": 1.034017394284735, "grad_norm": 4.142866611480713, "learning_rate": 8.982245385587364e-05, "loss": 2.271434211730957, "memory(GiB)": 72.85, "step": 24135, "token_acc": 0.4558303886925795, "train_speed(iter/s)": 0.669722 }, { "epoch": 1.034231609613984, "grad_norm": 3.6448543071746826, "learning_rate": 8.981838396370019e-05, "loss": 2.3906089782714846, "memory(GiB)": 72.85, "step": 24140, "token_acc": 0.4981684981684982, "train_speed(iter/s)": 0.669712 }, { "epoch": 1.034445824943233, "grad_norm": 4.306787014007568, "learning_rate": 8.981431335017939e-05, "loss": 2.4881210327148438, "memory(GiB)": 72.85, "step": 24145, "token_acc": 0.495114006514658, "train_speed(iter/s)": 0.669738 }, { "epoch": 1.034660040272482, "grad_norm": 4.293931484222412, "learning_rate": 8.981024201538493e-05, "loss": 2.41387882232666, "memory(GiB)": 72.85, "step": 24150, "token_acc": 0.46953405017921146, "train_speed(iter/s)": 0.669734 }, { "epoch": 1.034874255601731, "grad_norm": 8.224340438842773, "learning_rate": 8.98061699593906e-05, "loss": 2.063572883605957, "memory(GiB)": 72.85, "step": 24155, "token_acc": 0.5443037974683544, "train_speed(iter/s)": 0.669742 }, { "epoch": 1.0350884709309798, "grad_norm": 2.906790018081665, "learning_rate": 8.980209718227016e-05, "loss": 2.1858184814453123, "memory(GiB)": 72.85, "step": 24160, "token_acc": 0.5399361022364217, "train_speed(iter/s)": 0.669746 }, { "epoch": 1.0353026862602288, "grad_norm": 3.3484928607940674, "learning_rate": 8.979802368409738e-05, "loss": 2.512320709228516, "memory(GiB)": 72.85, "step": 24165, "token_acc": 0.47686832740213525, "train_speed(iter/s)": 0.669792 }, { "epoch": 1.0355169015894778, "grad_norm": 4.600776195526123, "learning_rate": 8.979394946494608e-05, "loss": 2.5581884384155273, "memory(GiB)": 72.85, "step": 24170, "token_acc": 0.45768025078369906, "train_speed(iter/s)": 0.669808 }, { "epoch": 1.0357311169187267, "grad_norm": 3.662315845489502, "learning_rate": 8.978987452489005e-05, "loss": 2.3802799224853515, "memory(GiB)": 72.85, "step": 24175, "token_acc": 0.4735202492211838, "train_speed(iter/s)": 0.669816 }, { "epoch": 1.0359453322479757, "grad_norm": 3.905271291732788, "learning_rate": 8.978579886400313e-05, "loss": 2.489491271972656, "memory(GiB)": 72.85, "step": 24180, "token_acc": 0.4890282131661442, "train_speed(iter/s)": 0.669826 }, { "epoch": 1.0361595475772247, "grad_norm": 4.560506343841553, "learning_rate": 8.978172248235912e-05, "loss": 2.3825035095214844, "memory(GiB)": 72.85, "step": 24185, "token_acc": 0.5208333333333334, "train_speed(iter/s)": 0.669826 }, { "epoch": 1.0363737629064735, "grad_norm": 3.777470588684082, "learning_rate": 8.97776453800319e-05, "loss": 2.5037086486816404, "memory(GiB)": 72.85, "step": 24190, "token_acc": 0.47023809523809523, "train_speed(iter/s)": 0.669829 }, { "epoch": 1.0365879782357226, "grad_norm": 4.145737171173096, "learning_rate": 8.977356755709531e-05, "loss": 2.1870431900024414, "memory(GiB)": 72.85, "step": 24195, "token_acc": 0.48, "train_speed(iter/s)": 0.669823 }, { "epoch": 1.0368021935649716, "grad_norm": 4.0831427574157715, "learning_rate": 8.976948901362326e-05, "loss": 2.533743667602539, "memory(GiB)": 72.85, "step": 24200, "token_acc": 0.48923076923076925, "train_speed(iter/s)": 0.669801 }, { "epoch": 1.0370164088942204, "grad_norm": 7.314621925354004, "learning_rate": 8.976540974968959e-05, "loss": 2.7062637329101564, "memory(GiB)": 72.85, "step": 24205, "token_acc": 0.42424242424242425, "train_speed(iter/s)": 0.669799 }, { "epoch": 1.0372306242234695, "grad_norm": 5.148069381713867, "learning_rate": 8.976132976536822e-05, "loss": 2.2809268951416017, "memory(GiB)": 72.85, "step": 24210, "token_acc": 0.5214285714285715, "train_speed(iter/s)": 0.669795 }, { "epoch": 1.0374448395527185, "grad_norm": 5.999290466308594, "learning_rate": 8.975724906073306e-05, "loss": 2.2956264495849608, "memory(GiB)": 72.85, "step": 24215, "token_acc": 0.5481171548117155, "train_speed(iter/s)": 0.669818 }, { "epoch": 1.0376590548819673, "grad_norm": 3.5759222507476807, "learning_rate": 8.975316763585805e-05, "loss": 2.1311344146728515, "memory(GiB)": 72.85, "step": 24220, "token_acc": 0.5345911949685535, "train_speed(iter/s)": 0.669855 }, { "epoch": 1.0378732702112163, "grad_norm": 4.264134883880615, "learning_rate": 8.974908549081711e-05, "loss": 2.4600252151489257, "memory(GiB)": 72.85, "step": 24225, "token_acc": 0.47076023391812866, "train_speed(iter/s)": 0.669806 }, { "epoch": 1.0380874855404654, "grad_norm": 3.8241982460021973, "learning_rate": 8.974500262568419e-05, "loss": 2.095082092285156, "memory(GiB)": 72.85, "step": 24230, "token_acc": 0.5390070921985816, "train_speed(iter/s)": 0.669827 }, { "epoch": 1.0383017008697142, "grad_norm": 4.408856391906738, "learning_rate": 8.974091904053328e-05, "loss": 2.4586135864257814, "memory(GiB)": 72.85, "step": 24235, "token_acc": 0.4734848484848485, "train_speed(iter/s)": 0.669823 }, { "epoch": 1.0385159161989632, "grad_norm": 3.4943418502807617, "learning_rate": 8.973683473543832e-05, "loss": 2.2726932525634767, "memory(GiB)": 72.85, "step": 24240, "token_acc": 0.5077881619937694, "train_speed(iter/s)": 0.669837 }, { "epoch": 1.0387301315282123, "grad_norm": 4.287461280822754, "learning_rate": 8.973274971047335e-05, "loss": 2.7402885437011717, "memory(GiB)": 72.85, "step": 24245, "token_acc": 0.45674740484429066, "train_speed(iter/s)": 0.669847 }, { "epoch": 1.038944346857461, "grad_norm": 3.637408971786499, "learning_rate": 8.972866396571233e-05, "loss": 2.0877174377441405, "memory(GiB)": 72.85, "step": 24250, "token_acc": 0.5070921985815603, "train_speed(iter/s)": 0.669845 }, { "epoch": 1.03915856218671, "grad_norm": 4.218483924865723, "learning_rate": 8.972457750122929e-05, "loss": 2.579564094543457, "memory(GiB)": 72.85, "step": 24255, "token_acc": 0.5020080321285141, "train_speed(iter/s)": 0.669852 }, { "epoch": 1.0393727775159591, "grad_norm": 5.211498260498047, "learning_rate": 8.972049031709829e-05, "loss": 2.500564956665039, "memory(GiB)": 72.85, "step": 24260, "token_acc": 0.5073746312684366, "train_speed(iter/s)": 0.669842 }, { "epoch": 1.039586992845208, "grad_norm": 4.799142360687256, "learning_rate": 8.971640241339332e-05, "loss": 1.9518278121948243, "memory(GiB)": 72.85, "step": 24265, "token_acc": 0.5547703180212014, "train_speed(iter/s)": 0.669853 }, { "epoch": 1.039801208174457, "grad_norm": 4.478473663330078, "learning_rate": 8.971231379018848e-05, "loss": 2.0962957382202148, "memory(GiB)": 72.85, "step": 24270, "token_acc": 0.5657894736842105, "train_speed(iter/s)": 0.669869 }, { "epoch": 1.040015423503706, "grad_norm": 2.8175554275512695, "learning_rate": 8.97082244475578e-05, "loss": 2.3819355010986327, "memory(GiB)": 72.85, "step": 24275, "token_acc": 0.5, "train_speed(iter/s)": 0.669891 }, { "epoch": 1.0402296388329548, "grad_norm": 3.3369550704956055, "learning_rate": 8.970413438557539e-05, "loss": 2.199123191833496, "memory(GiB)": 72.85, "step": 24280, "token_acc": 0.5276752767527675, "train_speed(iter/s)": 0.66989 }, { "epoch": 1.0404438541622039, "grad_norm": 3.182513952255249, "learning_rate": 8.970004360431535e-05, "loss": 2.5870489120483398, "memory(GiB)": 72.85, "step": 24285, "token_acc": 0.4670846394984326, "train_speed(iter/s)": 0.669894 }, { "epoch": 1.040658069491453, "grad_norm": 3.813157081604004, "learning_rate": 8.969595210385177e-05, "loss": 2.6266847610473634, "memory(GiB)": 72.85, "step": 24290, "token_acc": 0.45936395759717313, "train_speed(iter/s)": 0.669908 }, { "epoch": 1.0408722848207017, "grad_norm": 4.50279426574707, "learning_rate": 8.969185988425879e-05, "loss": 2.4858325958251952, "memory(GiB)": 72.85, "step": 24295, "token_acc": 0.5064377682403434, "train_speed(iter/s)": 0.669904 }, { "epoch": 1.0410865001499507, "grad_norm": 4.608509063720703, "learning_rate": 8.968776694561055e-05, "loss": 2.3818696975708007, "memory(GiB)": 72.85, "step": 24300, "token_acc": 0.5034722222222222, "train_speed(iter/s)": 0.669933 }, { "epoch": 1.0413007154791998, "grad_norm": 5.490851879119873, "learning_rate": 8.968367328798116e-05, "loss": 2.0967498779296876, "memory(GiB)": 72.85, "step": 24305, "token_acc": 0.5143884892086331, "train_speed(iter/s)": 0.669926 }, { "epoch": 1.0415149308084486, "grad_norm": 3.58315110206604, "learning_rate": 8.967957891144479e-05, "loss": 2.4749521255493163, "memory(GiB)": 72.85, "step": 24310, "token_acc": 0.4884910485933504, "train_speed(iter/s)": 0.669932 }, { "epoch": 1.0417291461376976, "grad_norm": 3.6850545406341553, "learning_rate": 8.967548381607565e-05, "loss": 2.3373125076293944, "memory(GiB)": 72.85, "step": 24315, "token_acc": 0.4883720930232558, "train_speed(iter/s)": 0.669932 }, { "epoch": 1.0419433614669467, "grad_norm": 3.842390298843384, "learning_rate": 8.967138800194787e-05, "loss": 2.3997257232666014, "memory(GiB)": 72.85, "step": 24320, "token_acc": 0.46646341463414637, "train_speed(iter/s)": 0.669928 }, { "epoch": 1.0421575767961955, "grad_norm": 6.62354040145874, "learning_rate": 8.966729146913571e-05, "loss": 2.0459102630615233, "memory(GiB)": 72.85, "step": 24325, "token_acc": 0.5507246376811594, "train_speed(iter/s)": 0.669872 }, { "epoch": 1.0423717921254445, "grad_norm": 5.960872173309326, "learning_rate": 8.966319421771335e-05, "loss": 2.4210887908935548, "memory(GiB)": 72.85, "step": 24330, "token_acc": 0.44904458598726116, "train_speed(iter/s)": 0.669867 }, { "epoch": 1.0425860074546935, "grad_norm": 5.518343925476074, "learning_rate": 8.965909624775501e-05, "loss": 2.086781883239746, "memory(GiB)": 72.85, "step": 24335, "token_acc": 0.5265151515151515, "train_speed(iter/s)": 0.669852 }, { "epoch": 1.0428002227839424, "grad_norm": 3.0963611602783203, "learning_rate": 8.965499755933495e-05, "loss": 2.0735549926757812, "memory(GiB)": 72.85, "step": 24340, "token_acc": 0.5220338983050847, "train_speed(iter/s)": 0.669865 }, { "epoch": 1.0430144381131914, "grad_norm": 5.008915424346924, "learning_rate": 8.96508981525274e-05, "loss": 2.4908552169799805, "memory(GiB)": 72.85, "step": 24345, "token_acc": 0.4908424908424908, "train_speed(iter/s)": 0.669867 }, { "epoch": 1.0432286534424404, "grad_norm": 4.083357334136963, "learning_rate": 8.964679802740665e-05, "loss": 2.38039608001709, "memory(GiB)": 72.85, "step": 24350, "token_acc": 0.4828767123287671, "train_speed(iter/s)": 0.669905 }, { "epoch": 1.0434428687716892, "grad_norm": 4.415421485900879, "learning_rate": 8.964269718404695e-05, "loss": 2.2344247817993166, "memory(GiB)": 72.85, "step": 24355, "token_acc": 0.5244755244755245, "train_speed(iter/s)": 0.669922 }, { "epoch": 1.0436570841009383, "grad_norm": 3.2366321086883545, "learning_rate": 8.963859562252261e-05, "loss": 2.334675979614258, "memory(GiB)": 72.85, "step": 24360, "token_acc": 0.4831081081081081, "train_speed(iter/s)": 0.669895 }, { "epoch": 1.0438712994301873, "grad_norm": 3.4142754077911377, "learning_rate": 8.963449334290792e-05, "loss": 2.425305938720703, "memory(GiB)": 72.85, "step": 24365, "token_acc": 0.4492753623188406, "train_speed(iter/s)": 0.669902 }, { "epoch": 1.0440855147594361, "grad_norm": 4.698883533477783, "learning_rate": 8.96303903452772e-05, "loss": 2.3515920639038086, "memory(GiB)": 72.85, "step": 24370, "token_acc": 0.5206349206349207, "train_speed(iter/s)": 0.669918 }, { "epoch": 1.0442997300886852, "grad_norm": 5.259530544281006, "learning_rate": 8.962628662970479e-05, "loss": 2.442157173156738, "memory(GiB)": 72.85, "step": 24375, "token_acc": 0.46846846846846846, "train_speed(iter/s)": 0.669919 }, { "epoch": 1.0445139454179342, "grad_norm": 3.855620861053467, "learning_rate": 8.962218219626501e-05, "loss": 2.570150375366211, "memory(GiB)": 72.85, "step": 24380, "token_acc": 0.490625, "train_speed(iter/s)": 0.669896 }, { "epoch": 1.044728160747183, "grad_norm": 5.132887363433838, "learning_rate": 8.961807704503224e-05, "loss": 2.2798648834228517, "memory(GiB)": 72.85, "step": 24385, "token_acc": 0.48928571428571427, "train_speed(iter/s)": 0.669878 }, { "epoch": 1.044942376076432, "grad_norm": 3.2473819255828857, "learning_rate": 8.961397117608086e-05, "loss": 2.1256752014160156, "memory(GiB)": 72.85, "step": 24390, "token_acc": 0.55, "train_speed(iter/s)": 0.669846 }, { "epoch": 1.045156591405681, "grad_norm": 5.647526741027832, "learning_rate": 8.960986458948521e-05, "loss": 2.3841392517089846, "memory(GiB)": 72.85, "step": 24395, "token_acc": 0.4979253112033195, "train_speed(iter/s)": 0.669848 }, { "epoch": 1.0453708067349299, "grad_norm": 4.117587089538574, "learning_rate": 8.96057572853197e-05, "loss": 2.332962417602539, "memory(GiB)": 72.85, "step": 24400, "token_acc": 0.48698884758364314, "train_speed(iter/s)": 0.669821 }, { "epoch": 1.045585022064179, "grad_norm": 3.3987224102020264, "learning_rate": 8.960164926365874e-05, "loss": 2.404888153076172, "memory(GiB)": 72.85, "step": 24405, "token_acc": 0.5116279069767442, "train_speed(iter/s)": 0.669874 }, { "epoch": 1.045799237393428, "grad_norm": 3.506554126739502, "learning_rate": 8.959754052457679e-05, "loss": 2.0400680541992187, "memory(GiB)": 72.85, "step": 24410, "token_acc": 0.5863453815261044, "train_speed(iter/s)": 0.66987 }, { "epoch": 1.0460134527226768, "grad_norm": 3.736159086227417, "learning_rate": 8.959343106814821e-05, "loss": 2.4157827377319334, "memory(GiB)": 72.85, "step": 24415, "token_acc": 0.5318471337579618, "train_speed(iter/s)": 0.669858 }, { "epoch": 1.0462276680519258, "grad_norm": 4.1456618309021, "learning_rate": 8.95893208944475e-05, "loss": 2.7369335174560545, "memory(GiB)": 72.85, "step": 24420, "token_acc": 0.47575757575757577, "train_speed(iter/s)": 0.669864 }, { "epoch": 1.0464418833811748, "grad_norm": 3.8810160160064697, "learning_rate": 8.958521000354911e-05, "loss": 2.4942771911621096, "memory(GiB)": 72.85, "step": 24425, "token_acc": 0.48639455782312924, "train_speed(iter/s)": 0.669882 }, { "epoch": 1.0466560987104236, "grad_norm": 3.6594362258911133, "learning_rate": 8.95810983955275e-05, "loss": 2.438639259338379, "memory(GiB)": 72.85, "step": 24430, "token_acc": 0.4804270462633452, "train_speed(iter/s)": 0.669908 }, { "epoch": 1.0468703140396727, "grad_norm": 4.897302150726318, "learning_rate": 8.957698607045716e-05, "loss": 2.57379093170166, "memory(GiB)": 72.85, "step": 24435, "token_acc": 0.4652567975830816, "train_speed(iter/s)": 0.669915 }, { "epoch": 1.0470845293689217, "grad_norm": 4.023571014404297, "learning_rate": 8.95728730284126e-05, "loss": 2.061886215209961, "memory(GiB)": 72.85, "step": 24440, "token_acc": 0.528957528957529, "train_speed(iter/s)": 0.669917 }, { "epoch": 1.0472987446981705, "grad_norm": 3.9421627521514893, "learning_rate": 8.956875926946832e-05, "loss": 2.3962173461914062, "memory(GiB)": 72.85, "step": 24445, "token_acc": 0.4744525547445255, "train_speed(iter/s)": 0.66995 }, { "epoch": 1.0475129600274196, "grad_norm": 3.735006809234619, "learning_rate": 8.956464479369883e-05, "loss": 2.433555030822754, "memory(GiB)": 72.85, "step": 24450, "token_acc": 0.49310344827586206, "train_speed(iter/s)": 0.66993 }, { "epoch": 1.0477271753566686, "grad_norm": 3.6852169036865234, "learning_rate": 8.956052960117871e-05, "loss": 2.2904426574707033, "memory(GiB)": 72.85, "step": 24455, "token_acc": 0.5261437908496732, "train_speed(iter/s)": 0.669953 }, { "epoch": 1.0479413906859174, "grad_norm": 4.469356536865234, "learning_rate": 8.955641369198247e-05, "loss": 2.5005908966064454, "memory(GiB)": 72.85, "step": 24460, "token_acc": 0.4618055555555556, "train_speed(iter/s)": 0.669927 }, { "epoch": 1.0481556060151664, "grad_norm": 3.3384528160095215, "learning_rate": 8.955229706618469e-05, "loss": 2.202378845214844, "memory(GiB)": 72.85, "step": 24465, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.66993 }, { "epoch": 1.0483698213444155, "grad_norm": 3.986513376235962, "learning_rate": 8.954817972385996e-05, "loss": 2.655917167663574, "memory(GiB)": 72.85, "step": 24470, "token_acc": 0.4803921568627451, "train_speed(iter/s)": 0.669978 }, { "epoch": 1.0485840366736643, "grad_norm": 4.110473155975342, "learning_rate": 8.954406166508284e-05, "loss": 2.636677932739258, "memory(GiB)": 72.85, "step": 24475, "token_acc": 0.4524714828897338, "train_speed(iter/s)": 0.66999 }, { "epoch": 1.0487982520029133, "grad_norm": 4.490509510040283, "learning_rate": 8.953994288992796e-05, "loss": 2.2434762954711913, "memory(GiB)": 72.85, "step": 24480, "token_acc": 0.48502994011976047, "train_speed(iter/s)": 0.669976 }, { "epoch": 1.0490124673321624, "grad_norm": 3.612704038619995, "learning_rate": 8.95358233984699e-05, "loss": 2.2621223449707033, "memory(GiB)": 72.85, "step": 24485, "token_acc": 0.47335423197492166, "train_speed(iter/s)": 0.669968 }, { "epoch": 1.0492266826614112, "grad_norm": 3.441239595413208, "learning_rate": 8.953170319078333e-05, "loss": 2.141912269592285, "memory(GiB)": 72.85, "step": 24490, "token_acc": 0.540625, "train_speed(iter/s)": 0.669956 }, { "epoch": 1.0494408979906602, "grad_norm": 3.978933572769165, "learning_rate": 8.952758226694287e-05, "loss": 2.5275169372558595, "memory(GiB)": 72.85, "step": 24495, "token_acc": 0.4537037037037037, "train_speed(iter/s)": 0.66995 }, { "epoch": 1.0496551133199092, "grad_norm": 4.368017673492432, "learning_rate": 8.952346062702317e-05, "loss": 2.3126861572265627, "memory(GiB)": 72.85, "step": 24500, "token_acc": 0.47244094488188976, "train_speed(iter/s)": 0.669988 }, { "epoch": 1.0496551133199092, "eval_loss": 2.0148019790649414, "eval_runtime": 16.1671, "eval_samples_per_second": 6.185, "eval_steps_per_second": 6.185, "eval_token_acc": 0.5084485407066052, "step": 24500 }, { "epoch": 1.049869328649158, "grad_norm": 4.6572346687316895, "learning_rate": 8.951933827109892e-05, "loss": 2.759017753601074, "memory(GiB)": 72.85, "step": 24505, "token_acc": 0.48589341692789967, "train_speed(iter/s)": 0.669644 }, { "epoch": 1.050083543978407, "grad_norm": 5.381182670593262, "learning_rate": 8.951521519924478e-05, "loss": 2.458211898803711, "memory(GiB)": 72.85, "step": 24510, "token_acc": 0.44808743169398907, "train_speed(iter/s)": 0.669665 }, { "epoch": 1.0502977593076561, "grad_norm": 4.1279096603393555, "learning_rate": 8.951109141153544e-05, "loss": 2.344526481628418, "memory(GiB)": 72.85, "step": 24515, "token_acc": 0.5444444444444444, "train_speed(iter/s)": 0.669651 }, { "epoch": 1.050511974636905, "grad_norm": 4.244344711303711, "learning_rate": 8.950696690804562e-05, "loss": 2.270478439331055, "memory(GiB)": 72.85, "step": 24520, "token_acc": 0.5179856115107914, "train_speed(iter/s)": 0.669673 }, { "epoch": 1.050726189966154, "grad_norm": 7.255115032196045, "learning_rate": 8.950284168885003e-05, "loss": 2.4627429962158205, "memory(GiB)": 72.85, "step": 24525, "token_acc": 0.42911877394636017, "train_speed(iter/s)": 0.66962 }, { "epoch": 1.050940405295403, "grad_norm": 4.090050220489502, "learning_rate": 8.949871575402341e-05, "loss": 2.5075586318969725, "memory(GiB)": 72.85, "step": 24530, "token_acc": 0.47720364741641336, "train_speed(iter/s)": 0.669566 }, { "epoch": 1.0511546206246518, "grad_norm": 3.9561855792999268, "learning_rate": 8.94945891036405e-05, "loss": 2.336191940307617, "memory(GiB)": 72.85, "step": 24535, "token_acc": 0.5130111524163569, "train_speed(iter/s)": 0.669562 }, { "epoch": 1.0513688359539008, "grad_norm": 3.5019936561584473, "learning_rate": 8.949046173777606e-05, "loss": 2.26143856048584, "memory(GiB)": 72.85, "step": 24540, "token_acc": 0.5075757575757576, "train_speed(iter/s)": 0.669536 }, { "epoch": 1.0515830512831499, "grad_norm": 6.221694469451904, "learning_rate": 8.948633365650487e-05, "loss": 2.389867401123047, "memory(GiB)": 72.85, "step": 24545, "token_acc": 0.5070921985815603, "train_speed(iter/s)": 0.669552 }, { "epoch": 1.0517972666123987, "grad_norm": 4.139732360839844, "learning_rate": 8.948220485990169e-05, "loss": 2.198739433288574, "memory(GiB)": 72.85, "step": 24550, "token_acc": 0.5072992700729927, "train_speed(iter/s)": 0.669564 }, { "epoch": 1.0520114819416477, "grad_norm": 4.132297515869141, "learning_rate": 8.947807534804134e-05, "loss": 2.1667064666748046, "memory(GiB)": 72.85, "step": 24555, "token_acc": 0.5440251572327044, "train_speed(iter/s)": 0.669586 }, { "epoch": 1.0522256972708968, "grad_norm": 4.1917290687561035, "learning_rate": 8.947394512099861e-05, "loss": 2.2235427856445313, "memory(GiB)": 72.85, "step": 24560, "token_acc": 0.4971590909090909, "train_speed(iter/s)": 0.669582 }, { "epoch": 1.0524399126001456, "grad_norm": 4.830970287322998, "learning_rate": 8.946981417884834e-05, "loss": 2.258778381347656, "memory(GiB)": 72.85, "step": 24565, "token_acc": 0.5033557046979866, "train_speed(iter/s)": 0.669577 }, { "epoch": 1.0526541279293946, "grad_norm": 3.359206199645996, "learning_rate": 8.946568252166538e-05, "loss": 2.53079776763916, "memory(GiB)": 72.85, "step": 24570, "token_acc": 0.4239766081871345, "train_speed(iter/s)": 0.669554 }, { "epoch": 1.0528683432586436, "grad_norm": 3.846900463104248, "learning_rate": 8.946155014952453e-05, "loss": 2.3387191772460936, "memory(GiB)": 72.85, "step": 24575, "token_acc": 0.5082508250825083, "train_speed(iter/s)": 0.669543 }, { "epoch": 1.0530825585878925, "grad_norm": 4.542093753814697, "learning_rate": 8.94574170625007e-05, "loss": 2.269932174682617, "memory(GiB)": 72.85, "step": 24580, "token_acc": 0.4803921568627451, "train_speed(iter/s)": 0.669567 }, { "epoch": 1.0532967739171415, "grad_norm": 3.7014055252075195, "learning_rate": 8.945328326066874e-05, "loss": 2.0899072647094727, "memory(GiB)": 72.85, "step": 24585, "token_acc": 0.5437956204379562, "train_speed(iter/s)": 0.669584 }, { "epoch": 1.0535109892463905, "grad_norm": 3.222262382507324, "learning_rate": 8.944914874410354e-05, "loss": 2.0958702087402346, "memory(GiB)": 72.85, "step": 24590, "token_acc": 0.5488215488215489, "train_speed(iter/s)": 0.669621 }, { "epoch": 1.0537252045756393, "grad_norm": 4.4569926261901855, "learning_rate": 8.944501351288002e-05, "loss": 2.168721008300781, "memory(GiB)": 72.85, "step": 24595, "token_acc": 0.5160142348754448, "train_speed(iter/s)": 0.669607 }, { "epoch": 1.0539394199048884, "grad_norm": 3.9712440967559814, "learning_rate": 8.944087756707306e-05, "loss": 2.4123729705810546, "memory(GiB)": 72.85, "step": 24600, "token_acc": 0.5067567567567568, "train_speed(iter/s)": 0.66959 }, { "epoch": 1.0541536352341374, "grad_norm": 5.735115051269531, "learning_rate": 8.943674090675759e-05, "loss": 2.230890083312988, "memory(GiB)": 72.85, "step": 24605, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.669518 }, { "epoch": 1.0543678505633862, "grad_norm": 4.733466148376465, "learning_rate": 8.94326035320086e-05, "loss": 2.2885406494140623, "memory(GiB)": 72.85, "step": 24610, "token_acc": 0.5034246575342466, "train_speed(iter/s)": 0.669521 }, { "epoch": 1.0545820658926353, "grad_norm": 3.563413619995117, "learning_rate": 8.942846544290099e-05, "loss": 2.1875383377075197, "memory(GiB)": 72.85, "step": 24615, "token_acc": 0.5204081632653061, "train_speed(iter/s)": 0.669513 }, { "epoch": 1.0547962812218843, "grad_norm": 4.8733110427856445, "learning_rate": 8.942432663950974e-05, "loss": 2.5112775802612304, "memory(GiB)": 72.85, "step": 24620, "token_acc": 0.4642857142857143, "train_speed(iter/s)": 0.669495 }, { "epoch": 1.055010496551133, "grad_norm": 4.960849761962891, "learning_rate": 8.942018712190983e-05, "loss": 2.2905910491943358, "memory(GiB)": 72.85, "step": 24625, "token_acc": 0.5159235668789809, "train_speed(iter/s)": 0.669498 }, { "epoch": 1.0552247118803821, "grad_norm": 4.88832426071167, "learning_rate": 8.941604689017627e-05, "loss": 2.397261047363281, "memory(GiB)": 72.85, "step": 24630, "token_acc": 0.4856115107913669, "train_speed(iter/s)": 0.669491 }, { "epoch": 1.0554389272096312, "grad_norm": 3.875213861465454, "learning_rate": 8.941190594438402e-05, "loss": 2.111627769470215, "memory(GiB)": 72.85, "step": 24635, "token_acc": 0.5629370629370629, "train_speed(iter/s)": 0.669514 }, { "epoch": 1.05565314253888, "grad_norm": 4.10315465927124, "learning_rate": 8.940776428460813e-05, "loss": 2.3185394287109373, "memory(GiB)": 72.85, "step": 24640, "token_acc": 0.5019455252918288, "train_speed(iter/s)": 0.669508 }, { "epoch": 1.055867357868129, "grad_norm": 4.624300003051758, "learning_rate": 8.940362191092362e-05, "loss": 2.444966125488281, "memory(GiB)": 72.85, "step": 24645, "token_acc": 0.45054945054945056, "train_speed(iter/s)": 0.669558 }, { "epoch": 1.056081573197378, "grad_norm": 4.173475742340088, "learning_rate": 8.939947882340554e-05, "loss": 2.0577102661132813, "memory(GiB)": 72.85, "step": 24650, "token_acc": 0.5283687943262412, "train_speed(iter/s)": 0.669559 }, { "epoch": 1.0562957885266269, "grad_norm": 4.526961803436279, "learning_rate": 8.939533502212893e-05, "loss": 2.264741134643555, "memory(GiB)": 72.85, "step": 24655, "token_acc": 0.5145985401459854, "train_speed(iter/s)": 0.669575 }, { "epoch": 1.056510003855876, "grad_norm": 4.0541839599609375, "learning_rate": 8.939119050716889e-05, "loss": 2.265817642211914, "memory(GiB)": 72.85, "step": 24660, "token_acc": 0.5134228187919463, "train_speed(iter/s)": 0.66954 }, { "epoch": 1.056724219185125, "grad_norm": 3.7219486236572266, "learning_rate": 8.938704527860047e-05, "loss": 2.569473648071289, "memory(GiB)": 72.85, "step": 24665, "token_acc": 0.47039473684210525, "train_speed(iter/s)": 0.669553 }, { "epoch": 1.0569384345143737, "grad_norm": 5.500358581542969, "learning_rate": 8.938289933649878e-05, "loss": 2.106797790527344, "memory(GiB)": 72.85, "step": 24670, "token_acc": 0.5058823529411764, "train_speed(iter/s)": 0.669528 }, { "epoch": 1.0571526498436228, "grad_norm": 3.16610050201416, "learning_rate": 8.937875268093892e-05, "loss": 2.4050601959228515, "memory(GiB)": 72.85, "step": 24675, "token_acc": 0.47513812154696133, "train_speed(iter/s)": 0.669541 }, { "epoch": 1.0573668651728718, "grad_norm": 4.976731777191162, "learning_rate": 8.937460531199602e-05, "loss": 2.3782176971435547, "memory(GiB)": 72.85, "step": 24680, "token_acc": 0.49828178694158076, "train_speed(iter/s)": 0.669541 }, { "epoch": 1.0575810805021206, "grad_norm": 4.23645544052124, "learning_rate": 8.93704572297452e-05, "loss": 2.2862483978271486, "memory(GiB)": 72.85, "step": 24685, "token_acc": 0.48297213622291024, "train_speed(iter/s)": 0.669522 }, { "epoch": 1.0577952958313697, "grad_norm": 5.007875442504883, "learning_rate": 8.936630843426164e-05, "loss": 2.4015262603759764, "memory(GiB)": 72.85, "step": 24690, "token_acc": 0.4868913857677903, "train_speed(iter/s)": 0.669535 }, { "epoch": 1.0580095111606187, "grad_norm": 4.799905300140381, "learning_rate": 8.936215892562046e-05, "loss": 2.4475944519042967, "memory(GiB)": 72.85, "step": 24695, "token_acc": 0.5083333333333333, "train_speed(iter/s)": 0.669511 }, { "epoch": 1.0582237264898675, "grad_norm": 3.686492919921875, "learning_rate": 8.935800870389684e-05, "loss": 2.195695686340332, "memory(GiB)": 72.85, "step": 24700, "token_acc": 0.5164473684210527, "train_speed(iter/s)": 0.669508 }, { "epoch": 1.0584379418191165, "grad_norm": 4.104644775390625, "learning_rate": 8.935385776916595e-05, "loss": 2.255909538269043, "memory(GiB)": 72.85, "step": 24705, "token_acc": 0.5, "train_speed(iter/s)": 0.669505 }, { "epoch": 1.0586521571483656, "grad_norm": 3.755491256713867, "learning_rate": 8.934970612150304e-05, "loss": 2.3940174102783205, "memory(GiB)": 72.85, "step": 24710, "token_acc": 0.5029940119760479, "train_speed(iter/s)": 0.669472 }, { "epoch": 1.0588663724776144, "grad_norm": 4.050288677215576, "learning_rate": 8.93455537609833e-05, "loss": 2.280731964111328, "memory(GiB)": 72.85, "step": 24715, "token_acc": 0.47692307692307695, "train_speed(iter/s)": 0.669496 }, { "epoch": 1.0590805878068634, "grad_norm": 4.535091400146484, "learning_rate": 8.934140068768192e-05, "loss": 2.370192527770996, "memory(GiB)": 72.85, "step": 24720, "token_acc": 0.5128205128205128, "train_speed(iter/s)": 0.669514 }, { "epoch": 1.0592948031361125, "grad_norm": 3.7013206481933594, "learning_rate": 8.933724690167417e-05, "loss": 2.0833511352539062, "memory(GiB)": 72.85, "step": 24725, "token_acc": 0.5287356321839081, "train_speed(iter/s)": 0.66949 }, { "epoch": 1.0595090184653613, "grad_norm": 4.758694171905518, "learning_rate": 8.933309240303529e-05, "loss": 2.157914161682129, "memory(GiB)": 72.85, "step": 24730, "token_acc": 0.5375, "train_speed(iter/s)": 0.669468 }, { "epoch": 1.0597232337946103, "grad_norm": 5.431443691253662, "learning_rate": 8.932893719184054e-05, "loss": 2.3806901931762696, "memory(GiB)": 72.85, "step": 24735, "token_acc": 0.45768025078369906, "train_speed(iter/s)": 0.669463 }, { "epoch": 1.0599374491238593, "grad_norm": 5.610350131988525, "learning_rate": 8.932478126816521e-05, "loss": 2.3025835037231444, "memory(GiB)": 72.85, "step": 24740, "token_acc": 0.4511784511784512, "train_speed(iter/s)": 0.669464 }, { "epoch": 1.0601516644531084, "grad_norm": 4.993907451629639, "learning_rate": 8.932062463208457e-05, "loss": 2.2898891448974608, "memory(GiB)": 72.85, "step": 24745, "token_acc": 0.5, "train_speed(iter/s)": 0.669444 }, { "epoch": 1.0603658797823572, "grad_norm": 4.04432487487793, "learning_rate": 8.931646728367394e-05, "loss": 2.340106201171875, "memory(GiB)": 72.85, "step": 24750, "token_acc": 0.47604790419161674, "train_speed(iter/s)": 0.669435 }, { "epoch": 1.0605800951116062, "grad_norm": 3.6360161304473877, "learning_rate": 8.93123092230086e-05, "loss": 2.166082000732422, "memory(GiB)": 72.85, "step": 24755, "token_acc": 0.5330188679245284, "train_speed(iter/s)": 0.669421 }, { "epoch": 1.0607943104408553, "grad_norm": 5.204171657562256, "learning_rate": 8.930815045016392e-05, "loss": 2.215592956542969, "memory(GiB)": 72.85, "step": 24760, "token_acc": 0.4931972789115646, "train_speed(iter/s)": 0.669434 }, { "epoch": 1.061008525770104, "grad_norm": 3.375192642211914, "learning_rate": 8.930399096521521e-05, "loss": 2.2588340759277346, "memory(GiB)": 72.85, "step": 24765, "token_acc": 0.4952978056426332, "train_speed(iter/s)": 0.669454 }, { "epoch": 1.061222741099353, "grad_norm": 4.293218612670898, "learning_rate": 8.929983076823784e-05, "loss": 2.638026809692383, "memory(GiB)": 72.85, "step": 24770, "token_acc": 0.4512987012987013, "train_speed(iter/s)": 0.669469 }, { "epoch": 1.0614369564286021, "grad_norm": 4.450348854064941, "learning_rate": 8.929566985930717e-05, "loss": 2.105376052856445, "memory(GiB)": 72.85, "step": 24775, "token_acc": 0.5192307692307693, "train_speed(iter/s)": 0.669496 }, { "epoch": 1.061651171757851, "grad_norm": 4.292396068572998, "learning_rate": 8.929150823849856e-05, "loss": 2.1308929443359377, "memory(GiB)": 72.85, "step": 24780, "token_acc": 0.5186567164179104, "train_speed(iter/s)": 0.669493 }, { "epoch": 1.0618653870871, "grad_norm": 3.839233636856079, "learning_rate": 8.928734590588746e-05, "loss": 2.249803924560547, "memory(GiB)": 72.85, "step": 24785, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.669527 }, { "epoch": 1.062079602416349, "grad_norm": 3.7259185314178467, "learning_rate": 8.928318286154919e-05, "loss": 2.553511619567871, "memory(GiB)": 72.85, "step": 24790, "token_acc": 0.46905537459283386, "train_speed(iter/s)": 0.669522 }, { "epoch": 1.0622938177455978, "grad_norm": 3.7516605854034424, "learning_rate": 8.927901910555925e-05, "loss": 2.685155487060547, "memory(GiB)": 72.85, "step": 24795, "token_acc": 0.4910394265232975, "train_speed(iter/s)": 0.669572 }, { "epoch": 1.0625080330748469, "grad_norm": 4.674527645111084, "learning_rate": 8.927485463799301e-05, "loss": 2.1320589065551756, "memory(GiB)": 72.85, "step": 24800, "token_acc": 0.5057915057915058, "train_speed(iter/s)": 0.669565 }, { "epoch": 1.062722248404096, "grad_norm": 3.6329169273376465, "learning_rate": 8.927068945892594e-05, "loss": 2.6704023361206053, "memory(GiB)": 72.85, "step": 24805, "token_acc": 0.44680851063829785, "train_speed(iter/s)": 0.669586 }, { "epoch": 1.0629364637333447, "grad_norm": 4.372280597686768, "learning_rate": 8.926652356843348e-05, "loss": 2.306835746765137, "memory(GiB)": 72.85, "step": 24810, "token_acc": 0.5283687943262412, "train_speed(iter/s)": 0.669607 }, { "epoch": 1.0631506790625938, "grad_norm": 5.231503963470459, "learning_rate": 8.926235696659113e-05, "loss": 2.3772340774536134, "memory(GiB)": 72.85, "step": 24815, "token_acc": 0.4772727272727273, "train_speed(iter/s)": 0.669608 }, { "epoch": 1.0633648943918428, "grad_norm": 3.9714269638061523, "learning_rate": 8.925818965347434e-05, "loss": 2.4630477905273436, "memory(GiB)": 72.85, "step": 24820, "token_acc": 0.4944649446494465, "train_speed(iter/s)": 0.669596 }, { "epoch": 1.0635791097210916, "grad_norm": 3.4537131786346436, "learning_rate": 8.925402162915862e-05, "loss": 2.4599138259887696, "memory(GiB)": 72.85, "step": 24825, "token_acc": 0.459546925566343, "train_speed(iter/s)": 0.669618 }, { "epoch": 1.0637933250503406, "grad_norm": 3.3906807899475098, "learning_rate": 8.924985289371947e-05, "loss": 2.3064300537109377, "memory(GiB)": 72.85, "step": 24830, "token_acc": 0.48026315789473684, "train_speed(iter/s)": 0.66959 }, { "epoch": 1.0640075403795897, "grad_norm": 4.286004066467285, "learning_rate": 8.924568344723243e-05, "loss": 2.135405349731445, "memory(GiB)": 72.85, "step": 24835, "token_acc": 0.5408560311284046, "train_speed(iter/s)": 0.669592 }, { "epoch": 1.0642217557088385, "grad_norm": 3.6977040767669678, "learning_rate": 8.924151328977301e-05, "loss": 2.014900779724121, "memory(GiB)": 72.85, "step": 24840, "token_acc": 0.5525291828793775, "train_speed(iter/s)": 0.669584 }, { "epoch": 1.0644359710380875, "grad_norm": 2.982491970062256, "learning_rate": 8.923734242141677e-05, "loss": 2.4868806838989257, "memory(GiB)": 72.85, "step": 24845, "token_acc": 0.5, "train_speed(iter/s)": 0.669586 }, { "epoch": 1.0646501863673365, "grad_norm": 3.9614057540893555, "learning_rate": 8.923317084223926e-05, "loss": 2.4324546813964845, "memory(GiB)": 72.85, "step": 24850, "token_acc": 0.5220883534136547, "train_speed(iter/s)": 0.669589 }, { "epoch": 1.0648644016965854, "grad_norm": 3.969945192337036, "learning_rate": 8.922899855231606e-05, "loss": 2.2925596237182617, "memory(GiB)": 72.85, "step": 24855, "token_acc": 0.5228070175438596, "train_speed(iter/s)": 0.669593 }, { "epoch": 1.0650786170258344, "grad_norm": 3.587162971496582, "learning_rate": 8.922482555172275e-05, "loss": 2.022053527832031, "memory(GiB)": 72.85, "step": 24860, "token_acc": 0.5780590717299579, "train_speed(iter/s)": 0.669606 }, { "epoch": 1.0652928323550834, "grad_norm": 4.287528991699219, "learning_rate": 8.922065184053492e-05, "loss": 2.621006965637207, "memory(GiB)": 72.85, "step": 24865, "token_acc": 0.46863468634686345, "train_speed(iter/s)": 0.66957 }, { "epoch": 1.0655070476843322, "grad_norm": 4.192502498626709, "learning_rate": 8.92164774188282e-05, "loss": 2.3854583740234374, "memory(GiB)": 72.85, "step": 24870, "token_acc": 0.5033333333333333, "train_speed(iter/s)": 0.669558 }, { "epoch": 1.0657212630135813, "grad_norm": 3.9482128620147705, "learning_rate": 8.92123022866782e-05, "loss": 1.7389068603515625, "memory(GiB)": 72.85, "step": 24875, "token_acc": 0.6037735849056604, "train_speed(iter/s)": 0.669582 }, { "epoch": 1.0659354783428303, "grad_norm": 3.5371999740600586, "learning_rate": 8.920812644416057e-05, "loss": 2.478119468688965, "memory(GiB)": 72.85, "step": 24880, "token_acc": 0.47883597883597884, "train_speed(iter/s)": 0.66958 }, { "epoch": 1.0661496936720791, "grad_norm": 4.1243438720703125, "learning_rate": 8.920394989135094e-05, "loss": 2.336733818054199, "memory(GiB)": 72.85, "step": 24885, "token_acc": 0.4890829694323144, "train_speed(iter/s)": 0.669573 }, { "epoch": 1.0663639090013282, "grad_norm": 3.1873364448547363, "learning_rate": 8.919977262832498e-05, "loss": 2.4741842269897463, "memory(GiB)": 72.85, "step": 24890, "token_acc": 0.5125786163522013, "train_speed(iter/s)": 0.66958 }, { "epoch": 1.0665781243305772, "grad_norm": 3.740185260772705, "learning_rate": 8.919559465515837e-05, "loss": 2.1639753341674806, "memory(GiB)": 72.85, "step": 24895, "token_acc": 0.5468164794007491, "train_speed(iter/s)": 0.669593 }, { "epoch": 1.066792339659826, "grad_norm": 5.192952632904053, "learning_rate": 8.919141597192679e-05, "loss": 2.330756950378418, "memory(GiB)": 72.85, "step": 24900, "token_acc": 0.5335968379446641, "train_speed(iter/s)": 0.669579 }, { "epoch": 1.067006554989075, "grad_norm": 5.387836933135986, "learning_rate": 8.918723657870595e-05, "loss": 2.412508010864258, "memory(GiB)": 72.85, "step": 24905, "token_acc": 0.49416342412451364, "train_speed(iter/s)": 0.669616 }, { "epoch": 1.067220770318324, "grad_norm": 3.418959379196167, "learning_rate": 8.918305647557155e-05, "loss": 2.1993070602416993, "memory(GiB)": 72.85, "step": 24910, "token_acc": 0.5231316725978647, "train_speed(iter/s)": 0.669608 }, { "epoch": 1.0674349856475729, "grad_norm": 3.2908685207366943, "learning_rate": 8.917887566259934e-05, "loss": 2.228778839111328, "memory(GiB)": 72.85, "step": 24915, "token_acc": 0.5, "train_speed(iter/s)": 0.669612 }, { "epoch": 1.067649200976822, "grad_norm": 3.582563638687134, "learning_rate": 8.917469413986504e-05, "loss": 2.202861213684082, "memory(GiB)": 72.85, "step": 24920, "token_acc": 0.541795665634675, "train_speed(iter/s)": 0.669595 }, { "epoch": 1.067863416306071, "grad_norm": 5.094380855560303, "learning_rate": 8.91705119074444e-05, "loss": 2.4222904205322267, "memory(GiB)": 72.85, "step": 24925, "token_acc": 0.4931506849315068, "train_speed(iter/s)": 0.669554 }, { "epoch": 1.0680776316353198, "grad_norm": 5.283092975616455, "learning_rate": 8.91663289654132e-05, "loss": 2.1518205642700194, "memory(GiB)": 72.85, "step": 24930, "token_acc": 0.5316455696202531, "train_speed(iter/s)": 0.669568 }, { "epoch": 1.0682918469645688, "grad_norm": 5.2186408042907715, "learning_rate": 8.916214531384719e-05, "loss": 2.133572578430176, "memory(GiB)": 72.85, "step": 24935, "token_acc": 0.5289855072463768, "train_speed(iter/s)": 0.66958 }, { "epoch": 1.0685060622938178, "grad_norm": 3.9687600135803223, "learning_rate": 8.915796095282221e-05, "loss": 2.3320615768432615, "memory(GiB)": 72.85, "step": 24940, "token_acc": 0.44972067039106145, "train_speed(iter/s)": 0.669544 }, { "epoch": 1.0687202776230667, "grad_norm": 6.120513916015625, "learning_rate": 8.915377588241401e-05, "loss": 2.0937482833862306, "memory(GiB)": 72.85, "step": 24945, "token_acc": 0.536, "train_speed(iter/s)": 0.669555 }, { "epoch": 1.0689344929523157, "grad_norm": 5.188884735107422, "learning_rate": 8.914959010269845e-05, "loss": 2.1741973876953127, "memory(GiB)": 72.85, "step": 24950, "token_acc": 0.5106382978723404, "train_speed(iter/s)": 0.669547 }, { "epoch": 1.0691487082815647, "grad_norm": 3.894040107727051, "learning_rate": 8.914540361375132e-05, "loss": 2.4475419998168944, "memory(GiB)": 72.85, "step": 24955, "token_acc": 0.5076923076923077, "train_speed(iter/s)": 0.669555 }, { "epoch": 1.0693629236108135, "grad_norm": 5.054797649383545, "learning_rate": 8.91412164156485e-05, "loss": 2.4075092315673827, "memory(GiB)": 72.85, "step": 24960, "token_acc": 0.45714285714285713, "train_speed(iter/s)": 0.669571 }, { "epoch": 1.0695771389400626, "grad_norm": 3.304353713989258, "learning_rate": 8.913702850846581e-05, "loss": 2.3735633850097657, "memory(GiB)": 72.85, "step": 24965, "token_acc": 0.5083612040133779, "train_speed(iter/s)": 0.669574 }, { "epoch": 1.0697913542693116, "grad_norm": 3.3884098529815674, "learning_rate": 8.913283989227915e-05, "loss": 2.1314722061157227, "memory(GiB)": 72.85, "step": 24970, "token_acc": 0.5158227848101266, "train_speed(iter/s)": 0.669594 }, { "epoch": 1.0700055695985604, "grad_norm": 3.8201496601104736, "learning_rate": 8.912865056716437e-05, "loss": 2.1827070236206056, "memory(GiB)": 72.85, "step": 24975, "token_acc": 0.5211726384364821, "train_speed(iter/s)": 0.669559 }, { "epoch": 1.0702197849278094, "grad_norm": 4.704988956451416, "learning_rate": 8.91244605331974e-05, "loss": 2.3475648880004885, "memory(GiB)": 72.85, "step": 24980, "token_acc": 0.4794520547945205, "train_speed(iter/s)": 0.669551 }, { "epoch": 1.0704340002570585, "grad_norm": 3.850278615951538, "learning_rate": 8.912026979045411e-05, "loss": 2.401318359375, "memory(GiB)": 72.85, "step": 24985, "token_acc": 0.48314606741573035, "train_speed(iter/s)": 0.669578 }, { "epoch": 1.0706482155863073, "grad_norm": 4.026090621948242, "learning_rate": 8.911607833901045e-05, "loss": 2.464438247680664, "memory(GiB)": 72.85, "step": 24990, "token_acc": 0.4674329501915709, "train_speed(iter/s)": 0.669602 }, { "epoch": 1.0708624309155563, "grad_norm": 3.8340678215026855, "learning_rate": 8.911188617894233e-05, "loss": 2.1411470413208007, "memory(GiB)": 72.85, "step": 24995, "token_acc": 0.5171102661596958, "train_speed(iter/s)": 0.669601 }, { "epoch": 1.0710766462448054, "grad_norm": 3.014845371246338, "learning_rate": 8.91076933103257e-05, "loss": 2.2530460357666016, "memory(GiB)": 72.85, "step": 25000, "token_acc": 0.47126436781609193, "train_speed(iter/s)": 0.669602 }, { "epoch": 1.0710766462448054, "eval_loss": 2.0495495796203613, "eval_runtime": 17.567, "eval_samples_per_second": 5.692, "eval_steps_per_second": 5.692, "eval_token_acc": 0.4977973568281938, "step": 25000 }, { "epoch": 1.0712908615740542, "grad_norm": 4.369643688201904, "learning_rate": 8.910349973323651e-05, "loss": 2.294796943664551, "memory(GiB)": 72.85, "step": 25005, "token_acc": 0.49603960396039604, "train_speed(iter/s)": 0.669249 }, { "epoch": 1.0715050769033032, "grad_norm": 3.7608752250671387, "learning_rate": 8.909930544775076e-05, "loss": 2.1117828369140623, "memory(GiB)": 72.85, "step": 25010, "token_acc": 0.5142857142857142, "train_speed(iter/s)": 0.669263 }, { "epoch": 1.0717192922325522, "grad_norm": 4.194964408874512, "learning_rate": 8.90951104539444e-05, "loss": 2.274433708190918, "memory(GiB)": 72.85, "step": 25015, "token_acc": 0.5, "train_speed(iter/s)": 0.669273 }, { "epoch": 1.071933507561801, "grad_norm": 4.462652206420898, "learning_rate": 8.909091475189344e-05, "loss": 2.2689231872558593, "memory(GiB)": 72.85, "step": 25020, "token_acc": 0.4934210526315789, "train_speed(iter/s)": 0.669232 }, { "epoch": 1.07214772289105, "grad_norm": 5.6490912437438965, "learning_rate": 8.90867183416739e-05, "loss": 2.341214179992676, "memory(GiB)": 72.85, "step": 25025, "token_acc": 0.46757679180887374, "train_speed(iter/s)": 0.669243 }, { "epoch": 1.0723619382202991, "grad_norm": 3.817026376724243, "learning_rate": 8.908252122336178e-05, "loss": 2.064630889892578, "memory(GiB)": 72.85, "step": 25030, "token_acc": 0.5409252669039146, "train_speed(iter/s)": 0.669262 }, { "epoch": 1.072576153549548, "grad_norm": 3.274174213409424, "learning_rate": 8.907832339703313e-05, "loss": 2.1393394470214844, "memory(GiB)": 72.85, "step": 25035, "token_acc": 0.543918918918919, "train_speed(iter/s)": 0.669248 }, { "epoch": 1.072790368878797, "grad_norm": 4.55834436416626, "learning_rate": 8.907412486276399e-05, "loss": 2.239279365539551, "memory(GiB)": 72.85, "step": 25040, "token_acc": 0.5416666666666666, "train_speed(iter/s)": 0.669222 }, { "epoch": 1.073004584208046, "grad_norm": 5.008718013763428, "learning_rate": 8.906992562063043e-05, "loss": 2.355909538269043, "memory(GiB)": 72.85, "step": 25045, "token_acc": 0.5, "train_speed(iter/s)": 0.669258 }, { "epoch": 1.0732187995372948, "grad_norm": 4.085183620452881, "learning_rate": 8.90657256707085e-05, "loss": 2.3366931915283202, "memory(GiB)": 72.85, "step": 25050, "token_acc": 0.4873417721518987, "train_speed(iter/s)": 0.669274 }, { "epoch": 1.0734330148665439, "grad_norm": 4.891236305236816, "learning_rate": 8.906152501307432e-05, "loss": 2.3216325759887697, "memory(GiB)": 72.85, "step": 25055, "token_acc": 0.49070631970260226, "train_speed(iter/s)": 0.669254 }, { "epoch": 1.073647230195793, "grad_norm": 3.7943506240844727, "learning_rate": 8.905732364780398e-05, "loss": 2.3385150909423826, "memory(GiB)": 72.85, "step": 25060, "token_acc": 0.5, "train_speed(iter/s)": 0.669233 }, { "epoch": 1.0738614455250417, "grad_norm": 3.9685845375061035, "learning_rate": 8.905312157497356e-05, "loss": 2.201896858215332, "memory(GiB)": 72.85, "step": 25065, "token_acc": 0.5, "train_speed(iter/s)": 0.669244 }, { "epoch": 1.0740756608542907, "grad_norm": 4.098239421844482, "learning_rate": 8.904891879465922e-05, "loss": 2.290470504760742, "memory(GiB)": 72.85, "step": 25070, "token_acc": 0.49538461538461537, "train_speed(iter/s)": 0.669273 }, { "epoch": 1.0742898761835398, "grad_norm": 3.453425168991089, "learning_rate": 8.904471530693709e-05, "loss": 2.181517791748047, "memory(GiB)": 72.85, "step": 25075, "token_acc": 0.49830508474576274, "train_speed(iter/s)": 0.66927 }, { "epoch": 1.0745040915127886, "grad_norm": 3.3159019947052, "learning_rate": 8.90405111118833e-05, "loss": 2.024066925048828, "memory(GiB)": 72.85, "step": 25080, "token_acc": 0.5714285714285714, "train_speed(iter/s)": 0.669259 }, { "epoch": 1.0747183068420376, "grad_norm": 4.358461380004883, "learning_rate": 8.903630620957403e-05, "loss": 2.232524871826172, "memory(GiB)": 72.85, "step": 25085, "token_acc": 0.49328859060402686, "train_speed(iter/s)": 0.669277 }, { "epoch": 1.0749325221712867, "grad_norm": 3.8327558040618896, "learning_rate": 8.903210060008547e-05, "loss": 2.415380096435547, "memory(GiB)": 72.85, "step": 25090, "token_acc": 0.4542857142857143, "train_speed(iter/s)": 0.669258 }, { "epoch": 1.0751467375005355, "grad_norm": 3.6711084842681885, "learning_rate": 8.902789428349379e-05, "loss": 2.4206798553466795, "memory(GiB)": 72.85, "step": 25095, "token_acc": 0.5092348284960422, "train_speed(iter/s)": 0.669282 }, { "epoch": 1.0753609528297845, "grad_norm": 3.204024076461792, "learning_rate": 8.902368725987518e-05, "loss": 2.35156307220459, "memory(GiB)": 72.85, "step": 25100, "token_acc": 0.47096774193548385, "train_speed(iter/s)": 0.669305 }, { "epoch": 1.0755751681590335, "grad_norm": 5.4614458084106445, "learning_rate": 8.901947952930588e-05, "loss": 2.4078887939453124, "memory(GiB)": 72.85, "step": 25105, "token_acc": 0.4704225352112676, "train_speed(iter/s)": 0.669331 }, { "epoch": 1.0757893834882823, "grad_norm": 4.176644802093506, "learning_rate": 8.90152710918621e-05, "loss": 2.1159826278686524, "memory(GiB)": 72.85, "step": 25110, "token_acc": 0.5672727272727273, "train_speed(iter/s)": 0.66932 }, { "epoch": 1.0760035988175314, "grad_norm": 3.3364436626434326, "learning_rate": 8.90110619476201e-05, "loss": 2.2628301620483398, "memory(GiB)": 72.85, "step": 25115, "token_acc": 0.5190311418685121, "train_speed(iter/s)": 0.66931 }, { "epoch": 1.0762178141467804, "grad_norm": 3.722672700881958, "learning_rate": 8.900685209665609e-05, "loss": 2.2494346618652346, "memory(GiB)": 72.85, "step": 25120, "token_acc": 0.4983388704318937, "train_speed(iter/s)": 0.66935 }, { "epoch": 1.0764320294760292, "grad_norm": 4.133214950561523, "learning_rate": 8.900264153904639e-05, "loss": 2.3951114654541015, "memory(GiB)": 72.85, "step": 25125, "token_acc": 0.48909657320872274, "train_speed(iter/s)": 0.669372 }, { "epoch": 1.0766462448052783, "grad_norm": 4.978464603424072, "learning_rate": 8.899843027486723e-05, "loss": 2.1095638275146484, "memory(GiB)": 72.85, "step": 25130, "token_acc": 0.5242290748898678, "train_speed(iter/s)": 0.66936 }, { "epoch": 1.0768604601345273, "grad_norm": 3.272484302520752, "learning_rate": 8.899421830419492e-05, "loss": 2.3584611892700194, "memory(GiB)": 72.85, "step": 25135, "token_acc": 0.46646341463414637, "train_speed(iter/s)": 0.669379 }, { "epoch": 1.077074675463776, "grad_norm": 4.522555828094482, "learning_rate": 8.899000562710578e-05, "loss": 2.548640251159668, "memory(GiB)": 72.85, "step": 25140, "token_acc": 0.45454545454545453, "train_speed(iter/s)": 0.669405 }, { "epoch": 1.0772888907930251, "grad_norm": 3.6022369861602783, "learning_rate": 8.898579224367611e-05, "loss": 2.203339767456055, "memory(GiB)": 72.85, "step": 25145, "token_acc": 0.536741214057508, "train_speed(iter/s)": 0.669422 }, { "epoch": 1.0775031061222742, "grad_norm": 4.264606475830078, "learning_rate": 8.898157815398224e-05, "loss": 2.361963653564453, "memory(GiB)": 72.85, "step": 25150, "token_acc": 0.483974358974359, "train_speed(iter/s)": 0.669417 }, { "epoch": 1.077717321451523, "grad_norm": 4.998246669769287, "learning_rate": 8.897736335810052e-05, "loss": 2.2578399658203123, "memory(GiB)": 72.85, "step": 25155, "token_acc": 0.4890282131661442, "train_speed(iter/s)": 0.669442 }, { "epoch": 1.077931536780772, "grad_norm": 3.795400381088257, "learning_rate": 8.897314785610728e-05, "loss": 2.4403636932373045, "memory(GiB)": 72.85, "step": 25160, "token_acc": 0.4583333333333333, "train_speed(iter/s)": 0.669464 }, { "epoch": 1.078145752110021, "grad_norm": 3.7571916580200195, "learning_rate": 8.896893164807892e-05, "loss": 2.428822326660156, "memory(GiB)": 72.85, "step": 25165, "token_acc": 0.4633333333333333, "train_speed(iter/s)": 0.669468 }, { "epoch": 1.0783599674392699, "grad_norm": 3.1951427459716797, "learning_rate": 8.896471473409181e-05, "loss": 2.415924644470215, "memory(GiB)": 72.85, "step": 25170, "token_acc": 0.46488294314381273, "train_speed(iter/s)": 0.669482 }, { "epoch": 1.078574182768519, "grad_norm": 4.102923393249512, "learning_rate": 8.896049711422235e-05, "loss": 2.363081359863281, "memory(GiB)": 72.85, "step": 25175, "token_acc": 0.5056603773584906, "train_speed(iter/s)": 0.669441 }, { "epoch": 1.078788398097768, "grad_norm": 3.2982752323150635, "learning_rate": 8.895627878854692e-05, "loss": 2.269643211364746, "memory(GiB)": 72.85, "step": 25180, "token_acc": 0.5056179775280899, "train_speed(iter/s)": 0.669421 }, { "epoch": 1.0790026134270168, "grad_norm": 6.62733268737793, "learning_rate": 8.895205975714195e-05, "loss": 2.323639488220215, "memory(GiB)": 72.85, "step": 25185, "token_acc": 0.5408805031446541, "train_speed(iter/s)": 0.669419 }, { "epoch": 1.0792168287562658, "grad_norm": 4.630000114440918, "learning_rate": 8.894784002008389e-05, "loss": 2.2428251266479493, "memory(GiB)": 72.85, "step": 25190, "token_acc": 0.5187713310580204, "train_speed(iter/s)": 0.669396 }, { "epoch": 1.0794310440855148, "grad_norm": 4.564755916595459, "learning_rate": 8.894361957744917e-05, "loss": 2.5467838287353515, "memory(GiB)": 72.85, "step": 25195, "token_acc": 0.4276729559748428, "train_speed(iter/s)": 0.669386 }, { "epoch": 1.0796452594147636, "grad_norm": 3.9983069896698, "learning_rate": 8.893939842931425e-05, "loss": 2.2678314208984376, "memory(GiB)": 72.85, "step": 25200, "token_acc": 0.48184818481848185, "train_speed(iter/s)": 0.669395 }, { "epoch": 1.0798594747440127, "grad_norm": 3.6243984699249268, "learning_rate": 8.89351765757556e-05, "loss": 2.372212600708008, "memory(GiB)": 72.85, "step": 25205, "token_acc": 0.47904191616766467, "train_speed(iter/s)": 0.669405 }, { "epoch": 1.0800736900732617, "grad_norm": 3.9062600135803223, "learning_rate": 8.89309540168497e-05, "loss": 2.342168426513672, "memory(GiB)": 72.85, "step": 25210, "token_acc": 0.4748201438848921, "train_speed(iter/s)": 0.669424 }, { "epoch": 1.0802879054025105, "grad_norm": 5.477530479431152, "learning_rate": 8.892673075267304e-05, "loss": 2.2428722381591797, "memory(GiB)": 72.85, "step": 25215, "token_acc": 0.49612403100775193, "train_speed(iter/s)": 0.669383 }, { "epoch": 1.0805021207317596, "grad_norm": 3.908940553665161, "learning_rate": 8.892250678330216e-05, "loss": 2.275430679321289, "memory(GiB)": 72.85, "step": 25220, "token_acc": 0.5087108013937283, "train_speed(iter/s)": 0.669355 }, { "epoch": 1.0807163360610086, "grad_norm": 4.543735504150391, "learning_rate": 8.891828210881353e-05, "loss": 2.351797866821289, "memory(GiB)": 72.85, "step": 25225, "token_acc": 0.5078864353312302, "train_speed(iter/s)": 0.669356 }, { "epoch": 1.0809305513902574, "grad_norm": 4.508285045623779, "learning_rate": 8.891405672928373e-05, "loss": 2.419955253601074, "memory(GiB)": 72.85, "step": 25230, "token_acc": 0.5128205128205128, "train_speed(iter/s)": 0.669372 }, { "epoch": 1.0811447667195064, "grad_norm": 5.165104866027832, "learning_rate": 8.890983064478928e-05, "loss": 2.3852115631103517, "memory(GiB)": 72.85, "step": 25235, "token_acc": 0.4931506849315068, "train_speed(iter/s)": 0.669389 }, { "epoch": 1.0813589820487555, "grad_norm": 3.4083802700042725, "learning_rate": 8.890560385540675e-05, "loss": 2.2443115234375, "memory(GiB)": 72.85, "step": 25240, "token_acc": 0.5, "train_speed(iter/s)": 0.669369 }, { "epoch": 1.0815731973780043, "grad_norm": 3.7194149494171143, "learning_rate": 8.890137636121271e-05, "loss": 2.489371681213379, "memory(GiB)": 72.85, "step": 25245, "token_acc": 0.4827586206896552, "train_speed(iter/s)": 0.669349 }, { "epoch": 1.0817874127072533, "grad_norm": 4.228519916534424, "learning_rate": 8.889714816228374e-05, "loss": 2.407736968994141, "memory(GiB)": 72.85, "step": 25250, "token_acc": 0.47648902821316613, "train_speed(iter/s)": 0.669371 }, { "epoch": 1.0820016280365023, "grad_norm": 4.682793140411377, "learning_rate": 8.889291925869646e-05, "loss": 2.363298225402832, "memory(GiB)": 72.85, "step": 25255, "token_acc": 0.4952978056426332, "train_speed(iter/s)": 0.669383 }, { "epoch": 1.0822158433657512, "grad_norm": 4.375211715698242, "learning_rate": 8.888868965052743e-05, "loss": 2.6041728973388674, "memory(GiB)": 72.85, "step": 25260, "token_acc": 0.4385026737967914, "train_speed(iter/s)": 0.669377 }, { "epoch": 1.0824300586950002, "grad_norm": 4.203679084777832, "learning_rate": 8.888445933785333e-05, "loss": 2.2117889404296873, "memory(GiB)": 72.85, "step": 25265, "token_acc": 0.5436241610738255, "train_speed(iter/s)": 0.669388 }, { "epoch": 1.0826442740242492, "grad_norm": 4.103422164916992, "learning_rate": 8.888022832075076e-05, "loss": 2.1675540924072267, "memory(GiB)": 72.85, "step": 25270, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.669363 }, { "epoch": 1.082858489353498, "grad_norm": 3.89609956741333, "learning_rate": 8.887599659929639e-05, "loss": 2.2529163360595703, "memory(GiB)": 72.85, "step": 25275, "token_acc": 0.5317919075144508, "train_speed(iter/s)": 0.669354 }, { "epoch": 1.083072704682747, "grad_norm": 3.5435566902160645, "learning_rate": 8.887176417356688e-05, "loss": 2.659628486633301, "memory(GiB)": 72.85, "step": 25280, "token_acc": 0.4323529411764706, "train_speed(iter/s)": 0.669364 }, { "epoch": 1.0832869200119961, "grad_norm": 4.556239604949951, "learning_rate": 8.88675310436389e-05, "loss": 2.2292484283447265, "memory(GiB)": 72.85, "step": 25285, "token_acc": 0.4925925925925926, "train_speed(iter/s)": 0.669363 }, { "epoch": 1.083501135341245, "grad_norm": 4.538937091827393, "learning_rate": 8.886329720958911e-05, "loss": 2.231167221069336, "memory(GiB)": 72.85, "step": 25290, "token_acc": 0.5209125475285171, "train_speed(iter/s)": 0.669368 }, { "epoch": 1.083715350670494, "grad_norm": 4.397746562957764, "learning_rate": 8.885906267149425e-05, "loss": 2.140619468688965, "memory(GiB)": 72.85, "step": 25295, "token_acc": 0.5129151291512916, "train_speed(iter/s)": 0.669375 }, { "epoch": 1.083929565999743, "grad_norm": 9.505019187927246, "learning_rate": 8.885482742943102e-05, "loss": 2.588969039916992, "memory(GiB)": 72.85, "step": 25300, "token_acc": 0.4485294117647059, "train_speed(iter/s)": 0.669367 }, { "epoch": 1.0841437813289918, "grad_norm": 3.8227388858795166, "learning_rate": 8.885059148347615e-05, "loss": 2.3618255615234376, "memory(GiB)": 72.85, "step": 25305, "token_acc": 0.48333333333333334, "train_speed(iter/s)": 0.669364 }, { "epoch": 1.0843579966582408, "grad_norm": 5.239981174468994, "learning_rate": 8.884635483370635e-05, "loss": 2.1285205841064454, "memory(GiB)": 72.85, "step": 25310, "token_acc": 0.5086505190311419, "train_speed(iter/s)": 0.669347 }, { "epoch": 1.0845722119874899, "grad_norm": 3.5666685104370117, "learning_rate": 8.88421174801984e-05, "loss": 2.180691146850586, "memory(GiB)": 72.85, "step": 25315, "token_acc": 0.5048231511254019, "train_speed(iter/s)": 0.669348 }, { "epoch": 1.0847864273167387, "grad_norm": 3.482680320739746, "learning_rate": 8.883787942302906e-05, "loss": 2.388117790222168, "memory(GiB)": 72.85, "step": 25320, "token_acc": 0.46273291925465837, "train_speed(iter/s)": 0.669359 }, { "epoch": 1.0850006426459877, "grad_norm": 6.447813034057617, "learning_rate": 8.883364066227508e-05, "loss": 2.361981201171875, "memory(GiB)": 72.85, "step": 25325, "token_acc": 0.4854014598540146, "train_speed(iter/s)": 0.669389 }, { "epoch": 1.0852148579752368, "grad_norm": 3.386073350906372, "learning_rate": 8.88294011980133e-05, "loss": 2.5524654388427734, "memory(GiB)": 72.85, "step": 25330, "token_acc": 0.4901315789473684, "train_speed(iter/s)": 0.669413 }, { "epoch": 1.0854290733044856, "grad_norm": 5.175686836242676, "learning_rate": 8.882516103032048e-05, "loss": 2.688035583496094, "memory(GiB)": 72.85, "step": 25335, "token_acc": 0.4447592067988669, "train_speed(iter/s)": 0.669395 }, { "epoch": 1.0856432886337346, "grad_norm": 3.312906503677368, "learning_rate": 8.882092015927344e-05, "loss": 2.1587173461914064, "memory(GiB)": 72.85, "step": 25340, "token_acc": 0.4898785425101215, "train_speed(iter/s)": 0.669355 }, { "epoch": 1.0858575039629836, "grad_norm": 5.992039680480957, "learning_rate": 8.881667858494903e-05, "loss": 2.4397411346435547, "memory(GiB)": 72.85, "step": 25345, "token_acc": 0.4946236559139785, "train_speed(iter/s)": 0.669351 }, { "epoch": 1.0860717192922325, "grad_norm": 4.138718605041504, "learning_rate": 8.881243630742406e-05, "loss": 2.4338993072509765, "memory(GiB)": 72.85, "step": 25350, "token_acc": 0.48742138364779874, "train_speed(iter/s)": 0.66934 }, { "epoch": 1.0862859346214815, "grad_norm": 5.102180004119873, "learning_rate": 8.880819332677539e-05, "loss": 2.468877410888672, "memory(GiB)": 72.85, "step": 25355, "token_acc": 0.519434628975265, "train_speed(iter/s)": 0.669338 }, { "epoch": 1.0865001499507305, "grad_norm": 4.066393852233887, "learning_rate": 8.880394964307991e-05, "loss": 2.283486747741699, "memory(GiB)": 72.85, "step": 25360, "token_acc": 0.47750865051903113, "train_speed(iter/s)": 0.669343 }, { "epoch": 1.0867143652799793, "grad_norm": 4.029842376708984, "learning_rate": 8.879970525641448e-05, "loss": 2.5186134338378907, "memory(GiB)": 72.85, "step": 25365, "token_acc": 0.45294117647058824, "train_speed(iter/s)": 0.669367 }, { "epoch": 1.0869285806092284, "grad_norm": 5.613079071044922, "learning_rate": 8.879546016685599e-05, "loss": 2.140548324584961, "memory(GiB)": 72.85, "step": 25370, "token_acc": 0.5571955719557196, "train_speed(iter/s)": 0.669368 }, { "epoch": 1.0871427959384774, "grad_norm": 3.9483349323272705, "learning_rate": 8.879121437448134e-05, "loss": 2.4035629272460937, "memory(GiB)": 72.85, "step": 25375, "token_acc": 0.4967532467532468, "train_speed(iter/s)": 0.669388 }, { "epoch": 1.0873570112677262, "grad_norm": 4.292850017547607, "learning_rate": 8.878696787936746e-05, "loss": 2.6411903381347654, "memory(GiB)": 72.85, "step": 25380, "token_acc": 0.4735202492211838, "train_speed(iter/s)": 0.669393 }, { "epoch": 1.0875712265969752, "grad_norm": 4.5878095626831055, "learning_rate": 8.878272068159126e-05, "loss": 2.3255191802978517, "memory(GiB)": 72.85, "step": 25385, "token_acc": 0.49836065573770494, "train_speed(iter/s)": 0.669399 }, { "epoch": 1.0877854419262243, "grad_norm": 4.642908573150635, "learning_rate": 8.87784727812297e-05, "loss": 1.9965370178222657, "memory(GiB)": 72.85, "step": 25390, "token_acc": 0.48582995951417, "train_speed(iter/s)": 0.669417 }, { "epoch": 1.087999657255473, "grad_norm": 4.821013927459717, "learning_rate": 8.877422417835973e-05, "loss": 2.7959272384643556, "memory(GiB)": 72.85, "step": 25395, "token_acc": 0.47635135135135137, "train_speed(iter/s)": 0.669428 }, { "epoch": 1.0882138725847221, "grad_norm": 3.7129366397857666, "learning_rate": 8.876997487305831e-05, "loss": 2.266365814208984, "memory(GiB)": 72.85, "step": 25400, "token_acc": 0.5, "train_speed(iter/s)": 0.669435 }, { "epoch": 1.0884280879139712, "grad_norm": 4.199863433837891, "learning_rate": 8.876572486540244e-05, "loss": 2.4101287841796877, "memory(GiB)": 72.85, "step": 25405, "token_acc": 0.4696969696969697, "train_speed(iter/s)": 0.669455 }, { "epoch": 1.08864230324322, "grad_norm": 2.805032253265381, "learning_rate": 8.876147415546909e-05, "loss": 2.089629364013672, "memory(GiB)": 72.85, "step": 25410, "token_acc": 0.5335689045936396, "train_speed(iter/s)": 0.669474 }, { "epoch": 1.088856518572469, "grad_norm": 3.818619728088379, "learning_rate": 8.875722274333528e-05, "loss": 2.123616027832031, "memory(GiB)": 72.85, "step": 25415, "token_acc": 0.5703971119133574, "train_speed(iter/s)": 0.669483 }, { "epoch": 1.089070733901718, "grad_norm": 5.575611114501953, "learning_rate": 8.875297062907801e-05, "loss": 2.4749372482299803, "memory(GiB)": 72.85, "step": 25420, "token_acc": 0.5308641975308642, "train_speed(iter/s)": 0.669452 }, { "epoch": 1.0892849492309669, "grad_norm": 4.030648231506348, "learning_rate": 8.874871781277435e-05, "loss": 2.4990272521972656, "memory(GiB)": 72.85, "step": 25425, "token_acc": 0.5042016806722689, "train_speed(iter/s)": 0.669465 }, { "epoch": 1.089499164560216, "grad_norm": 3.6642305850982666, "learning_rate": 8.87444642945013e-05, "loss": 2.376732063293457, "memory(GiB)": 72.85, "step": 25430, "token_acc": 0.48028673835125446, "train_speed(iter/s)": 0.669502 }, { "epoch": 1.089713379889465, "grad_norm": 5.702221393585205, "learning_rate": 8.874021007433593e-05, "loss": 2.5493335723876953, "memory(GiB)": 72.85, "step": 25435, "token_acc": 0.47315436241610737, "train_speed(iter/s)": 0.669509 }, { "epoch": 1.0899275952187137, "grad_norm": 4.00006103515625, "learning_rate": 8.87359551523553e-05, "loss": 2.518582344055176, "memory(GiB)": 72.85, "step": 25440, "token_acc": 0.465625, "train_speed(iter/s)": 0.669536 }, { "epoch": 1.0901418105479628, "grad_norm": 5.622408866882324, "learning_rate": 8.873169952863654e-05, "loss": 2.2508935928344727, "memory(GiB)": 72.85, "step": 25445, "token_acc": 0.53515625, "train_speed(iter/s)": 0.669526 }, { "epoch": 1.0903560258772118, "grad_norm": 4.021664142608643, "learning_rate": 8.87274432032567e-05, "loss": 2.3382951736450197, "memory(GiB)": 72.85, "step": 25450, "token_acc": 0.5338345864661654, "train_speed(iter/s)": 0.669544 }, { "epoch": 1.0905702412064606, "grad_norm": 6.988734722137451, "learning_rate": 8.872318617629287e-05, "loss": 2.4738229751586913, "memory(GiB)": 72.85, "step": 25455, "token_acc": 0.5052264808362369, "train_speed(iter/s)": 0.669547 }, { "epoch": 1.0907844565357097, "grad_norm": 4.084742069244385, "learning_rate": 8.871892844782223e-05, "loss": 2.3662654876708986, "memory(GiB)": 72.85, "step": 25460, "token_acc": 0.48220064724919093, "train_speed(iter/s)": 0.669568 }, { "epoch": 1.0909986718649587, "grad_norm": 4.841038703918457, "learning_rate": 8.871467001792186e-05, "loss": 2.4096874237060546, "memory(GiB)": 72.85, "step": 25465, "token_acc": 0.43508771929824563, "train_speed(iter/s)": 0.66958 }, { "epoch": 1.0912128871942075, "grad_norm": 3.32814621925354, "learning_rate": 8.871041088666894e-05, "loss": 2.295318603515625, "memory(GiB)": 72.85, "step": 25470, "token_acc": 0.48363636363636364, "train_speed(iter/s)": 0.669586 }, { "epoch": 1.0914271025234565, "grad_norm": 5.318390369415283, "learning_rate": 8.870615105414059e-05, "loss": 2.5874439239501954, "memory(GiB)": 72.85, "step": 25475, "token_acc": 0.4477124183006536, "train_speed(iter/s)": 0.669607 }, { "epoch": 1.0916413178527056, "grad_norm": 4.7261643409729, "learning_rate": 8.870189052041402e-05, "loss": 2.3039779663085938, "memory(GiB)": 72.85, "step": 25480, "token_acc": 0.4758842443729904, "train_speed(iter/s)": 0.669634 }, { "epoch": 1.0918555331819544, "grad_norm": 3.8388590812683105, "learning_rate": 8.86976292855664e-05, "loss": 2.1992605209350584, "memory(GiB)": 72.85, "step": 25485, "token_acc": 0.5144694533762058, "train_speed(iter/s)": 0.669609 }, { "epoch": 1.0920697485112034, "grad_norm": 5.281728267669678, "learning_rate": 8.869336734967493e-05, "loss": 2.324992561340332, "memory(GiB)": 72.85, "step": 25490, "token_acc": 0.5149253731343284, "train_speed(iter/s)": 0.669613 }, { "epoch": 1.0922839638404525, "grad_norm": 2.836423635482788, "learning_rate": 8.868910471281679e-05, "loss": 2.4680429458618165, "memory(GiB)": 72.85, "step": 25495, "token_acc": 0.4817073170731707, "train_speed(iter/s)": 0.669612 }, { "epoch": 1.0924981791697013, "grad_norm": 4.660342216491699, "learning_rate": 8.868484137506925e-05, "loss": 2.4916452407836913, "memory(GiB)": 72.85, "step": 25500, "token_acc": 0.4768211920529801, "train_speed(iter/s)": 0.66961 }, { "epoch": 1.0924981791697013, "eval_loss": 2.029500961303711, "eval_runtime": 17.0019, "eval_samples_per_second": 5.882, "eval_steps_per_second": 5.882, "eval_token_acc": 0.46616541353383456, "step": 25500 }, { "epoch": 1.0927123944989503, "grad_norm": 4.832819938659668, "learning_rate": 8.86805773365095e-05, "loss": 2.187275505065918, "memory(GiB)": 72.85, "step": 25505, "token_acc": 0.4751640112464855, "train_speed(iter/s)": 0.66923 }, { "epoch": 1.0929266098281993, "grad_norm": 3.889902114868164, "learning_rate": 8.867631259721483e-05, "loss": 2.109565544128418, "memory(GiB)": 72.85, "step": 25510, "token_acc": 0.5491525423728814, "train_speed(iter/s)": 0.669261 }, { "epoch": 1.0931408251574484, "grad_norm": 3.7067513465881348, "learning_rate": 8.867204715726245e-05, "loss": 2.197999954223633, "memory(GiB)": 72.85, "step": 25515, "token_acc": 0.504885993485342, "train_speed(iter/s)": 0.669277 }, { "epoch": 1.0933550404866972, "grad_norm": 3.612788438796997, "learning_rate": 8.866778101672968e-05, "loss": 2.2911128997802734, "memory(GiB)": 72.85, "step": 25520, "token_acc": 0.5181518151815182, "train_speed(iter/s)": 0.669271 }, { "epoch": 1.0935692558159462, "grad_norm": 5.298141002655029, "learning_rate": 8.866351417569376e-05, "loss": 2.2173343658447267, "memory(GiB)": 72.85, "step": 25525, "token_acc": 0.5420875420875421, "train_speed(iter/s)": 0.669272 }, { "epoch": 1.0937834711451953, "grad_norm": 15.451239585876465, "learning_rate": 8.865924663423203e-05, "loss": 2.5093206405639648, "memory(GiB)": 72.85, "step": 25530, "token_acc": 0.517799352750809, "train_speed(iter/s)": 0.669291 }, { "epoch": 1.093997686474444, "grad_norm": 4.517609119415283, "learning_rate": 8.865497839242179e-05, "loss": 2.6539783477783203, "memory(GiB)": 72.85, "step": 25535, "token_acc": 0.42950819672131146, "train_speed(iter/s)": 0.669301 }, { "epoch": 1.094211901803693, "grad_norm": 4.219753742218018, "learning_rate": 8.865070945034033e-05, "loss": 2.1798934936523438, "memory(GiB)": 72.85, "step": 25540, "token_acc": 0.5288888888888889, "train_speed(iter/s)": 0.669329 }, { "epoch": 1.0944261171329421, "grad_norm": 3.233691453933716, "learning_rate": 8.864643980806501e-05, "loss": 2.3554462432861327, "memory(GiB)": 72.85, "step": 25545, "token_acc": 0.4722222222222222, "train_speed(iter/s)": 0.669314 }, { "epoch": 1.094640332462191, "grad_norm": 5.295504570007324, "learning_rate": 8.864216946567321e-05, "loss": 2.244951629638672, "memory(GiB)": 72.85, "step": 25550, "token_acc": 0.5461254612546126, "train_speed(iter/s)": 0.669308 }, { "epoch": 1.09485454779144, "grad_norm": 4.262939929962158, "learning_rate": 8.863789842324224e-05, "loss": 2.427484321594238, "memory(GiB)": 72.85, "step": 25555, "token_acc": 0.5, "train_speed(iter/s)": 0.669306 }, { "epoch": 1.095068763120689, "grad_norm": 4.866933345794678, "learning_rate": 8.86336266808495e-05, "loss": 2.670512580871582, "memory(GiB)": 72.85, "step": 25560, "token_acc": 0.45936395759717313, "train_speed(iter/s)": 0.669314 }, { "epoch": 1.0952829784499378, "grad_norm": 3.332897424697876, "learning_rate": 8.862935423857236e-05, "loss": 2.3970005035400392, "memory(GiB)": 72.85, "step": 25565, "token_acc": 0.49491525423728816, "train_speed(iter/s)": 0.669323 }, { "epoch": 1.0954971937791869, "grad_norm": 4.366113185882568, "learning_rate": 8.862508109648823e-05, "loss": 2.444970703125, "memory(GiB)": 72.85, "step": 25570, "token_acc": 0.45422535211267606, "train_speed(iter/s)": 0.669346 }, { "epoch": 1.095711409108436, "grad_norm": 4.458312034606934, "learning_rate": 8.862080725467454e-05, "loss": 2.448086166381836, "memory(GiB)": 72.85, "step": 25575, "token_acc": 0.5346153846153846, "train_speed(iter/s)": 0.66936 }, { "epoch": 1.0959256244376847, "grad_norm": 3.585529088973999, "learning_rate": 8.861653271320869e-05, "loss": 2.14565372467041, "memory(GiB)": 72.85, "step": 25580, "token_acc": 0.503030303030303, "train_speed(iter/s)": 0.669378 }, { "epoch": 1.0961398397669337, "grad_norm": 5.392632961273193, "learning_rate": 8.861225747216812e-05, "loss": 2.493033599853516, "memory(GiB)": 72.85, "step": 25585, "token_acc": 0.461038961038961, "train_speed(iter/s)": 0.669371 }, { "epoch": 1.0963540550961828, "grad_norm": 3.4101905822753906, "learning_rate": 8.86079815316303e-05, "loss": 2.471866798400879, "memory(GiB)": 72.85, "step": 25590, "token_acc": 0.4585987261146497, "train_speed(iter/s)": 0.669388 }, { "epoch": 1.0965682704254316, "grad_norm": 3.8324761390686035, "learning_rate": 8.860370489167267e-05, "loss": 2.426805305480957, "memory(GiB)": 72.85, "step": 25595, "token_acc": 0.5150501672240803, "train_speed(iter/s)": 0.669411 }, { "epoch": 1.0967824857546806, "grad_norm": 5.711782932281494, "learning_rate": 8.859942755237271e-05, "loss": 2.1872631072998048, "memory(GiB)": 72.85, "step": 25600, "token_acc": 0.5364963503649635, "train_speed(iter/s)": 0.669413 }, { "epoch": 1.0969967010839297, "grad_norm": 3.1554057598114014, "learning_rate": 8.85951495138079e-05, "loss": 2.36427001953125, "memory(GiB)": 72.85, "step": 25605, "token_acc": 0.47229551451187335, "train_speed(iter/s)": 0.669419 }, { "epoch": 1.0972109164131785, "grad_norm": 3.6652750968933105, "learning_rate": 8.859087077605577e-05, "loss": 2.4045522689819334, "memory(GiB)": 72.85, "step": 25610, "token_acc": 0.4782608695652174, "train_speed(iter/s)": 0.669437 }, { "epoch": 1.0974251317424275, "grad_norm": 3.917619228363037, "learning_rate": 8.858659133919381e-05, "loss": 2.4722700119018555, "memory(GiB)": 72.85, "step": 25615, "token_acc": 0.5031446540880503, "train_speed(iter/s)": 0.669428 }, { "epoch": 1.0976393470716765, "grad_norm": 4.060898780822754, "learning_rate": 8.858231120329954e-05, "loss": 2.1010929107666017, "memory(GiB)": 72.85, "step": 25620, "token_acc": 0.509090909090909, "train_speed(iter/s)": 0.66946 }, { "epoch": 1.0978535624009254, "grad_norm": 3.926894187927246, "learning_rate": 8.857803036845052e-05, "loss": 2.4670654296875, "memory(GiB)": 72.85, "step": 25625, "token_acc": 0.48355263157894735, "train_speed(iter/s)": 0.669484 }, { "epoch": 1.0980677777301744, "grad_norm": 3.898453712463379, "learning_rate": 8.857374883472428e-05, "loss": 2.5989748001098634, "memory(GiB)": 72.85, "step": 25630, "token_acc": 0.47183098591549294, "train_speed(iter/s)": 0.669456 }, { "epoch": 1.0982819930594234, "grad_norm": 3.9476358890533447, "learning_rate": 8.85694666021984e-05, "loss": 2.1207401275634767, "memory(GiB)": 72.85, "step": 25635, "token_acc": 0.5363321799307958, "train_speed(iter/s)": 0.669473 }, { "epoch": 1.0984962083886722, "grad_norm": 4.0342888832092285, "learning_rate": 8.856518367095045e-05, "loss": 2.2353466033935545, "memory(GiB)": 72.85, "step": 25640, "token_acc": 0.49504950495049505, "train_speed(iter/s)": 0.669462 }, { "epoch": 1.0987104237179213, "grad_norm": 3.3931076526641846, "learning_rate": 8.856090004105802e-05, "loss": 2.37581729888916, "memory(GiB)": 72.85, "step": 25645, "token_acc": 0.4864864864864865, "train_speed(iter/s)": 0.669451 }, { "epoch": 1.0989246390471703, "grad_norm": 3.8126420974731445, "learning_rate": 8.855661571259873e-05, "loss": 2.308403968811035, "memory(GiB)": 72.85, "step": 25650, "token_acc": 0.5134328358208955, "train_speed(iter/s)": 0.66945 }, { "epoch": 1.0991388543764191, "grad_norm": 3.7972962856292725, "learning_rate": 8.855233068565016e-05, "loss": 2.297416877746582, "memory(GiB)": 72.85, "step": 25655, "token_acc": 0.5032894736842105, "train_speed(iter/s)": 0.669434 }, { "epoch": 1.0993530697056682, "grad_norm": 4.801402568817139, "learning_rate": 8.854804496028995e-05, "loss": 2.495299530029297, "memory(GiB)": 72.85, "step": 25660, "token_acc": 0.47692307692307695, "train_speed(iter/s)": 0.669423 }, { "epoch": 1.0995672850349172, "grad_norm": 3.933661699295044, "learning_rate": 8.854375853659575e-05, "loss": 2.695606231689453, "memory(GiB)": 72.85, "step": 25665, "token_acc": 0.479020979020979, "train_speed(iter/s)": 0.669451 }, { "epoch": 1.099781500364166, "grad_norm": 3.8879494667053223, "learning_rate": 8.853947141464521e-05, "loss": 2.276567268371582, "memory(GiB)": 72.85, "step": 25670, "token_acc": 0.539568345323741, "train_speed(iter/s)": 0.669459 }, { "epoch": 1.099995715693415, "grad_norm": 4.917379856109619, "learning_rate": 8.8535183594516e-05, "loss": 2.4769481658935546, "memory(GiB)": 72.85, "step": 25675, "token_acc": 0.48028673835125446, "train_speed(iter/s)": 0.669471 }, { "epoch": 1.100209931022664, "grad_norm": 4.222143650054932, "learning_rate": 8.853089507628578e-05, "loss": 1.9946517944335938, "memory(GiB)": 72.85, "step": 25680, "token_acc": 0.5575221238938053, "train_speed(iter/s)": 0.669483 }, { "epoch": 1.1004241463519129, "grad_norm": 4.244278907775879, "learning_rate": 8.852660586003225e-05, "loss": 2.1080141067504883, "memory(GiB)": 72.85, "step": 25685, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.669479 }, { "epoch": 1.100638361681162, "grad_norm": 4.696629524230957, "learning_rate": 8.852231594583313e-05, "loss": 2.49136962890625, "memory(GiB)": 72.85, "step": 25690, "token_acc": 0.45051194539249145, "train_speed(iter/s)": 0.66947 }, { "epoch": 1.100852577010411, "grad_norm": 4.040224552154541, "learning_rate": 8.85180253337661e-05, "loss": 2.1497989654541017, "memory(GiB)": 72.85, "step": 25695, "token_acc": 0.55, "train_speed(iter/s)": 0.669489 }, { "epoch": 1.1010667923396598, "grad_norm": 3.5935089588165283, "learning_rate": 8.851373402390891e-05, "loss": 2.274459457397461, "memory(GiB)": 72.85, "step": 25700, "token_acc": 0.5, "train_speed(iter/s)": 0.669485 }, { "epoch": 1.1012810076689088, "grad_norm": 5.382990837097168, "learning_rate": 8.85094420163393e-05, "loss": 2.1566747665405273, "memory(GiB)": 72.85, "step": 25705, "token_acc": 0.5255972696245734, "train_speed(iter/s)": 0.669509 }, { "epoch": 1.1014952229981578, "grad_norm": 3.8669517040252686, "learning_rate": 8.850514931113504e-05, "loss": 2.7873279571533205, "memory(GiB)": 72.85, "step": 25710, "token_acc": 0.4723127035830619, "train_speed(iter/s)": 0.669524 }, { "epoch": 1.1017094383274066, "grad_norm": 3.7187411785125732, "learning_rate": 8.850085590837388e-05, "loss": 2.4706382751464844, "memory(GiB)": 72.85, "step": 25715, "token_acc": 0.5054151624548736, "train_speed(iter/s)": 0.669529 }, { "epoch": 1.1019236536566557, "grad_norm": 4.445605754852295, "learning_rate": 8.849656180813357e-05, "loss": 2.3961456298828123, "memory(GiB)": 72.85, "step": 25720, "token_acc": 0.47035573122529645, "train_speed(iter/s)": 0.669512 }, { "epoch": 1.1021378689859047, "grad_norm": 5.905172348022461, "learning_rate": 8.849226701049194e-05, "loss": 2.339490509033203, "memory(GiB)": 72.85, "step": 25725, "token_acc": 0.5157894736842106, "train_speed(iter/s)": 0.669513 }, { "epoch": 1.1023520843151535, "grad_norm": 4.12897253036499, "learning_rate": 8.84879715155268e-05, "loss": 2.1464876174926757, "memory(GiB)": 72.85, "step": 25730, "token_acc": 0.5522388059701493, "train_speed(iter/s)": 0.669502 }, { "epoch": 1.1025662996444026, "grad_norm": 4.740164279937744, "learning_rate": 8.848367532331594e-05, "loss": 2.446752166748047, "memory(GiB)": 72.85, "step": 25735, "token_acc": 0.5016393442622951, "train_speed(iter/s)": 0.669525 }, { "epoch": 1.1027805149736516, "grad_norm": 3.612147569656372, "learning_rate": 8.84793784339372e-05, "loss": 2.288640594482422, "memory(GiB)": 72.85, "step": 25740, "token_acc": 0.4637223974763407, "train_speed(iter/s)": 0.669546 }, { "epoch": 1.1029947303029004, "grad_norm": 4.5307841300964355, "learning_rate": 8.847508084746843e-05, "loss": 2.4380729675292967, "memory(GiB)": 72.85, "step": 25745, "token_acc": 0.4842105263157895, "train_speed(iter/s)": 0.669564 }, { "epoch": 1.1032089456321494, "grad_norm": 3.5589308738708496, "learning_rate": 8.847078256398746e-05, "loss": 2.2632421493530273, "memory(GiB)": 72.85, "step": 25750, "token_acc": 0.5272727272727272, "train_speed(iter/s)": 0.669593 }, { "epoch": 1.1034231609613985, "grad_norm": 4.692621231079102, "learning_rate": 8.84664835835722e-05, "loss": 2.43634147644043, "memory(GiB)": 72.85, "step": 25755, "token_acc": 0.46853146853146854, "train_speed(iter/s)": 0.669592 }, { "epoch": 1.1036373762906473, "grad_norm": 12.507830619812012, "learning_rate": 8.84621839063005e-05, "loss": 2.517874526977539, "memory(GiB)": 72.85, "step": 25760, "token_acc": 0.4723926380368098, "train_speed(iter/s)": 0.669571 }, { "epoch": 1.1038515916198963, "grad_norm": 4.344453811645508, "learning_rate": 8.845788353225023e-05, "loss": 2.265086555480957, "memory(GiB)": 72.85, "step": 25765, "token_acc": 0.47719298245614034, "train_speed(iter/s)": 0.669578 }, { "epoch": 1.1040658069491454, "grad_norm": 3.980065107345581, "learning_rate": 8.845358246149936e-05, "loss": 2.0525474548339844, "memory(GiB)": 72.85, "step": 25770, "token_acc": 0.5450980392156862, "train_speed(iter/s)": 0.669592 }, { "epoch": 1.1042800222783942, "grad_norm": 3.9971015453338623, "learning_rate": 8.844928069412576e-05, "loss": 2.3453964233398437, "memory(GiB)": 72.85, "step": 25775, "token_acc": 0.47750865051903113, "train_speed(iter/s)": 0.669593 }, { "epoch": 1.1044942376076432, "grad_norm": 3.826558828353882, "learning_rate": 8.844497823020736e-05, "loss": 2.3922128677368164, "memory(GiB)": 72.85, "step": 25780, "token_acc": 0.524904214559387, "train_speed(iter/s)": 0.669605 }, { "epoch": 1.1047084529368922, "grad_norm": 4.741629600524902, "learning_rate": 8.844067506982215e-05, "loss": 2.3389251708984373, "memory(GiB)": 72.85, "step": 25785, "token_acc": 0.48184818481848185, "train_speed(iter/s)": 0.669585 }, { "epoch": 1.104922668266141, "grad_norm": 3.300173044204712, "learning_rate": 8.8436371213048e-05, "loss": 1.9823654174804688, "memory(GiB)": 72.85, "step": 25790, "token_acc": 0.5539033457249071, "train_speed(iter/s)": 0.669585 }, { "epoch": 1.10513688359539, "grad_norm": 3.323646068572998, "learning_rate": 8.843206665996296e-05, "loss": 2.0772594451904296, "memory(GiB)": 72.85, "step": 25795, "token_acc": 0.5306122448979592, "train_speed(iter/s)": 0.669609 }, { "epoch": 1.1053510989246391, "grad_norm": 4.0968017578125, "learning_rate": 8.842776141064497e-05, "loss": 2.292657470703125, "memory(GiB)": 72.85, "step": 25800, "token_acc": 0.5285171102661597, "train_speed(iter/s)": 0.6696 }, { "epoch": 1.105565314253888, "grad_norm": 5.2219719886779785, "learning_rate": 8.842345546517205e-05, "loss": 2.437542724609375, "memory(GiB)": 72.85, "step": 25805, "token_acc": 0.463768115942029, "train_speed(iter/s)": 0.669601 }, { "epoch": 1.105779529583137, "grad_norm": 4.401686668395996, "learning_rate": 8.841914882362216e-05, "loss": 2.339700126647949, "memory(GiB)": 72.85, "step": 25810, "token_acc": 0.5016835016835017, "train_speed(iter/s)": 0.669613 }, { "epoch": 1.105993744912386, "grad_norm": 4.070099830627441, "learning_rate": 8.841484148607338e-05, "loss": 2.3137210845947265, "memory(GiB)": 72.85, "step": 25815, "token_acc": 0.4878048780487805, "train_speed(iter/s)": 0.669624 }, { "epoch": 1.1062079602416348, "grad_norm": 3.5757226943969727, "learning_rate": 8.841053345260371e-05, "loss": 2.3887508392333983, "memory(GiB)": 72.85, "step": 25820, "token_acc": 0.4929078014184397, "train_speed(iter/s)": 0.669634 }, { "epoch": 1.1064221755708838, "grad_norm": 6.142699718475342, "learning_rate": 8.840622472329117e-05, "loss": 2.4077852249145506, "memory(GiB)": 72.85, "step": 25825, "token_acc": 0.47474747474747475, "train_speed(iter/s)": 0.669623 }, { "epoch": 1.1066363909001329, "grad_norm": 3.779576301574707, "learning_rate": 8.840191529821386e-05, "loss": 2.210194206237793, "memory(GiB)": 72.85, "step": 25830, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.669593 }, { "epoch": 1.1068506062293817, "grad_norm": 3.6898834705352783, "learning_rate": 8.839760517744982e-05, "loss": 2.270800971984863, "memory(GiB)": 72.85, "step": 25835, "token_acc": 0.5155709342560554, "train_speed(iter/s)": 0.669601 }, { "epoch": 1.1070648215586307, "grad_norm": 4.068101406097412, "learning_rate": 8.839329436107714e-05, "loss": 2.233007621765137, "memory(GiB)": 72.85, "step": 25840, "token_acc": 0.5480769230769231, "train_speed(iter/s)": 0.669584 }, { "epoch": 1.1072790368878798, "grad_norm": 3.6081738471984863, "learning_rate": 8.838898284917394e-05, "loss": 1.8859800338745116, "memory(GiB)": 72.85, "step": 25845, "token_acc": 0.5772357723577236, "train_speed(iter/s)": 0.669569 }, { "epoch": 1.1074932522171286, "grad_norm": 4.049208164215088, "learning_rate": 8.838467064181828e-05, "loss": 2.0372236251831053, "memory(GiB)": 72.85, "step": 25850, "token_acc": 0.5228215767634855, "train_speed(iter/s)": 0.669586 }, { "epoch": 1.1077074675463776, "grad_norm": 4.344283103942871, "learning_rate": 8.83803577390883e-05, "loss": 2.3828361511230467, "memory(GiB)": 72.85, "step": 25855, "token_acc": 0.5241157556270096, "train_speed(iter/s)": 0.669602 }, { "epoch": 1.1079216828756266, "grad_norm": 3.4447641372680664, "learning_rate": 8.837604414106215e-05, "loss": 2.642043113708496, "memory(GiB)": 72.85, "step": 25860, "token_acc": 0.46735395189003437, "train_speed(iter/s)": 0.669601 }, { "epoch": 1.1081358982048755, "grad_norm": 3.7551348209381104, "learning_rate": 8.837172984781796e-05, "loss": 2.3746700286865234, "memory(GiB)": 72.85, "step": 25865, "token_acc": 0.501432664756447, "train_speed(iter/s)": 0.669616 }, { "epoch": 1.1083501135341245, "grad_norm": 3.8820908069610596, "learning_rate": 8.836741485943389e-05, "loss": 2.0757627487182617, "memory(GiB)": 72.85, "step": 25870, "token_acc": 0.5652173913043478, "train_speed(iter/s)": 0.669635 }, { "epoch": 1.1085643288633735, "grad_norm": 4.096611499786377, "learning_rate": 8.836309917598811e-05, "loss": 2.3134254455566405, "memory(GiB)": 72.85, "step": 25875, "token_acc": 0.4791666666666667, "train_speed(iter/s)": 0.669667 }, { "epoch": 1.1087785441926223, "grad_norm": 4.269583225250244, "learning_rate": 8.835878279755879e-05, "loss": 2.1653087615966795, "memory(GiB)": 72.85, "step": 25880, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.669681 }, { "epoch": 1.1089927595218714, "grad_norm": 3.848276376724243, "learning_rate": 8.835446572422415e-05, "loss": 2.4724498748779298, "memory(GiB)": 72.85, "step": 25885, "token_acc": 0.47232472324723246, "train_speed(iter/s)": 0.669683 }, { "epoch": 1.1092069748511204, "grad_norm": 4.512289524078369, "learning_rate": 8.835014795606235e-05, "loss": 2.529353904724121, "memory(GiB)": 72.85, "step": 25890, "token_acc": 0.5059288537549407, "train_speed(iter/s)": 0.669669 }, { "epoch": 1.1094211901803692, "grad_norm": 4.473285675048828, "learning_rate": 8.834582949315169e-05, "loss": 2.359328842163086, "memory(GiB)": 72.85, "step": 25895, "token_acc": 0.48398576512455516, "train_speed(iter/s)": 0.66968 }, { "epoch": 1.1096354055096183, "grad_norm": 4.312793254852295, "learning_rate": 8.834151033557033e-05, "loss": 2.193083572387695, "memory(GiB)": 72.85, "step": 25900, "token_acc": 0.5126353790613718, "train_speed(iter/s)": 0.669681 }, { "epoch": 1.1098496208388673, "grad_norm": 4.826013088226318, "learning_rate": 8.833719048339654e-05, "loss": 2.150822067260742, "memory(GiB)": 72.85, "step": 25905, "token_acc": 0.51931330472103, "train_speed(iter/s)": 0.669694 }, { "epoch": 1.110063836168116, "grad_norm": 4.788055896759033, "learning_rate": 8.83328699367086e-05, "loss": 2.328817939758301, "memory(GiB)": 72.85, "step": 25910, "token_acc": 0.4684931506849315, "train_speed(iter/s)": 0.669695 }, { "epoch": 1.1102780514973651, "grad_norm": 4.920809745788574, "learning_rate": 8.832854869558476e-05, "loss": 2.519141960144043, "memory(GiB)": 72.85, "step": 25915, "token_acc": 0.4053156146179402, "train_speed(iter/s)": 0.669705 }, { "epoch": 1.1104922668266142, "grad_norm": 4.484220027923584, "learning_rate": 8.832422676010329e-05, "loss": 2.375439453125, "memory(GiB)": 72.85, "step": 25920, "token_acc": 0.46905537459283386, "train_speed(iter/s)": 0.669711 }, { "epoch": 1.110706482155863, "grad_norm": 3.884439706802368, "learning_rate": 8.83199041303425e-05, "loss": 2.3061466217041016, "memory(GiB)": 72.85, "step": 25925, "token_acc": 0.5286195286195287, "train_speed(iter/s)": 0.669704 }, { "epoch": 1.110920697485112, "grad_norm": 4.519631862640381, "learning_rate": 8.831558080638072e-05, "loss": 2.1766075134277343, "memory(GiB)": 72.85, "step": 25930, "token_acc": 0.4897260273972603, "train_speed(iter/s)": 0.6697 }, { "epoch": 1.111134912814361, "grad_norm": 4.718623638153076, "learning_rate": 8.831125678829624e-05, "loss": 2.424921989440918, "memory(GiB)": 72.85, "step": 25935, "token_acc": 0.4684385382059801, "train_speed(iter/s)": 0.669709 }, { "epoch": 1.1113491281436099, "grad_norm": 3.4263479709625244, "learning_rate": 8.83069320761674e-05, "loss": 2.3876243591308595, "memory(GiB)": 72.85, "step": 25940, "token_acc": 0.47278911564625853, "train_speed(iter/s)": 0.66972 }, { "epoch": 1.111563343472859, "grad_norm": 4.540286540985107, "learning_rate": 8.830260667007255e-05, "loss": 2.1435997009277346, "memory(GiB)": 72.85, "step": 25945, "token_acc": 0.5236051502145923, "train_speed(iter/s)": 0.669743 }, { "epoch": 1.111777558802108, "grad_norm": 3.631410837173462, "learning_rate": 8.829828057009007e-05, "loss": 2.60280818939209, "memory(GiB)": 72.85, "step": 25950, "token_acc": 0.45528455284552843, "train_speed(iter/s)": 0.669765 }, { "epoch": 1.1119917741313567, "grad_norm": 5.937687397003174, "learning_rate": 8.829395377629829e-05, "loss": 2.5742284774780275, "memory(GiB)": 72.85, "step": 25955, "token_acc": 0.47794117647058826, "train_speed(iter/s)": 0.669767 }, { "epoch": 1.1122059894606058, "grad_norm": 4.587806701660156, "learning_rate": 8.828962628877562e-05, "loss": 2.226784896850586, "memory(GiB)": 72.85, "step": 25960, "token_acc": 0.5106382978723404, "train_speed(iter/s)": 0.669749 }, { "epoch": 1.1124202047898548, "grad_norm": 3.7266602516174316, "learning_rate": 8.828529810760044e-05, "loss": 2.281622886657715, "memory(GiB)": 72.85, "step": 25965, "token_acc": 0.5015673981191222, "train_speed(iter/s)": 0.669726 }, { "epoch": 1.1126344201191036, "grad_norm": 5.8684306144714355, "learning_rate": 8.828096923285118e-05, "loss": 2.582189750671387, "memory(GiB)": 72.85, "step": 25970, "token_acc": 0.4827586206896552, "train_speed(iter/s)": 0.66968 }, { "epoch": 1.1128486354483527, "grad_norm": 4.023320198059082, "learning_rate": 8.827663966460627e-05, "loss": 2.44490966796875, "memory(GiB)": 72.85, "step": 25975, "token_acc": 0.484251968503937, "train_speed(iter/s)": 0.669708 }, { "epoch": 1.1130628507776017, "grad_norm": 3.701446294784546, "learning_rate": 8.827230940294411e-05, "loss": 1.8638202667236328, "memory(GiB)": 72.85, "step": 25980, "token_acc": 0.5647058823529412, "train_speed(iter/s)": 0.669686 }, { "epoch": 1.1132770661068505, "grad_norm": 5.01838493347168, "learning_rate": 8.826797844794315e-05, "loss": 2.3837520599365236, "memory(GiB)": 72.85, "step": 25985, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.66968 }, { "epoch": 1.1134912814360995, "grad_norm": 4.822000503540039, "learning_rate": 8.826364679968188e-05, "loss": 2.1312606811523436, "memory(GiB)": 72.85, "step": 25990, "token_acc": 0.5416666666666666, "train_speed(iter/s)": 0.669688 }, { "epoch": 1.1137054967653486, "grad_norm": 4.858102321624756, "learning_rate": 8.825931445823876e-05, "loss": 2.278117561340332, "memory(GiB)": 72.85, "step": 25995, "token_acc": 0.5229681978798587, "train_speed(iter/s)": 0.669704 }, { "epoch": 1.1139197120945974, "grad_norm": 4.160287380218506, "learning_rate": 8.825498142369225e-05, "loss": 2.6999526977539063, "memory(GiB)": 72.85, "step": 26000, "token_acc": 0.44483985765124556, "train_speed(iter/s)": 0.66973 }, { "epoch": 1.1139197120945974, "eval_loss": 1.9759092330932617, "eval_runtime": 17.4787, "eval_samples_per_second": 5.721, "eval_steps_per_second": 5.721, "eval_token_acc": 0.5040214477211796, "step": 26000 }, { "epoch": 1.1141339274238464, "grad_norm": 4.2156453132629395, "learning_rate": 8.825064769612088e-05, "loss": 2.4775154113769533, "memory(GiB)": 72.85, "step": 26005, "token_acc": 0.5004633920296571, "train_speed(iter/s)": 0.669395 }, { "epoch": 1.1143481427530955, "grad_norm": 3.9969279766082764, "learning_rate": 8.824631327560313e-05, "loss": 2.711654853820801, "memory(GiB)": 72.85, "step": 26010, "token_acc": 0.4444444444444444, "train_speed(iter/s)": 0.669369 }, { "epoch": 1.1145623580823443, "grad_norm": 4.449093341827393, "learning_rate": 8.824197816221754e-05, "loss": 2.0989173889160155, "memory(GiB)": 72.85, "step": 26015, "token_acc": 0.521311475409836, "train_speed(iter/s)": 0.66937 }, { "epoch": 1.1147765734115933, "grad_norm": 5.346417427062988, "learning_rate": 8.823764235604264e-05, "loss": 2.421963691711426, "memory(GiB)": 72.85, "step": 26020, "token_acc": 0.489010989010989, "train_speed(iter/s)": 0.66933 }, { "epoch": 1.1149907887408423, "grad_norm": 4.69746208190918, "learning_rate": 8.823330585715699e-05, "loss": 2.093242645263672, "memory(GiB)": 72.85, "step": 26025, "token_acc": 0.542016806722689, "train_speed(iter/s)": 0.669337 }, { "epoch": 1.1152050040700912, "grad_norm": 4.018325328826904, "learning_rate": 8.822896866563914e-05, "loss": 2.3263067245483398, "memory(GiB)": 72.85, "step": 26030, "token_acc": 0.5091575091575091, "train_speed(iter/s)": 0.669354 }, { "epoch": 1.1154192193993402, "grad_norm": 3.315519332885742, "learning_rate": 8.822463078156765e-05, "loss": 2.1805936813354494, "memory(GiB)": 72.85, "step": 26035, "token_acc": 0.5086505190311419, "train_speed(iter/s)": 0.669343 }, { "epoch": 1.1156334347285892, "grad_norm": 3.9754817485809326, "learning_rate": 8.822029220502113e-05, "loss": 2.3238876342773436, "memory(GiB)": 72.85, "step": 26040, "token_acc": 0.5050505050505051, "train_speed(iter/s)": 0.66933 }, { "epoch": 1.115847650057838, "grad_norm": 3.9679887294769287, "learning_rate": 8.821595293607815e-05, "loss": 2.399368095397949, "memory(GiB)": 72.85, "step": 26045, "token_acc": 0.4809688581314879, "train_speed(iter/s)": 0.669349 }, { "epoch": 1.116061865387087, "grad_norm": 4.782232761383057, "learning_rate": 8.821161297481732e-05, "loss": 2.2671913146972655, "memory(GiB)": 72.85, "step": 26050, "token_acc": 0.5306748466257669, "train_speed(iter/s)": 0.669316 }, { "epoch": 1.116276080716336, "grad_norm": 3.8113861083984375, "learning_rate": 8.82072723213173e-05, "loss": 2.30999641418457, "memory(GiB)": 72.85, "step": 26055, "token_acc": 0.5343511450381679, "train_speed(iter/s)": 0.669329 }, { "epoch": 1.1164902960455851, "grad_norm": 4.258012771606445, "learning_rate": 8.82029309756567e-05, "loss": 2.410386657714844, "memory(GiB)": 72.85, "step": 26060, "token_acc": 0.5102739726027398, "train_speed(iter/s)": 0.66933 }, { "epoch": 1.116704511374834, "grad_norm": 5.961940765380859, "learning_rate": 8.819858893791415e-05, "loss": 2.209736633300781, "memory(GiB)": 72.85, "step": 26065, "token_acc": 0.5076335877862596, "train_speed(iter/s)": 0.669319 }, { "epoch": 1.116918726704083, "grad_norm": 5.028115749359131, "learning_rate": 8.819424620816834e-05, "loss": 2.162643241882324, "memory(GiB)": 72.85, "step": 26070, "token_acc": 0.55, "train_speed(iter/s)": 0.669334 }, { "epoch": 1.117132942033332, "grad_norm": 3.9201905727386475, "learning_rate": 8.818990278649793e-05, "loss": 2.3318408966064452, "memory(GiB)": 72.85, "step": 26075, "token_acc": 0.49640287769784175, "train_speed(iter/s)": 0.669353 }, { "epoch": 1.1173471573625808, "grad_norm": 4.9387617111206055, "learning_rate": 8.818555867298161e-05, "loss": 2.2387733459472656, "memory(GiB)": 72.85, "step": 26080, "token_acc": 0.49508196721311476, "train_speed(iter/s)": 0.669374 }, { "epoch": 1.1175613726918299, "grad_norm": 4.414402961730957, "learning_rate": 8.818121386769806e-05, "loss": 2.378768539428711, "memory(GiB)": 72.85, "step": 26085, "token_acc": 0.5182724252491694, "train_speed(iter/s)": 0.669375 }, { "epoch": 1.117775588021079, "grad_norm": 5.052832126617432, "learning_rate": 8.817686837072602e-05, "loss": 2.7092258453369142, "memory(GiB)": 72.85, "step": 26090, "token_acc": 0.44696969696969696, "train_speed(iter/s)": 0.66939 }, { "epoch": 1.1179898033503277, "grad_norm": 5.511709213256836, "learning_rate": 8.81725221821442e-05, "loss": 2.2723567962646483, "memory(GiB)": 72.85, "step": 26095, "token_acc": 0.5299145299145299, "train_speed(iter/s)": 0.669405 }, { "epoch": 1.1182040186795768, "grad_norm": 3.7406845092773438, "learning_rate": 8.816817530203131e-05, "loss": 2.3277347564697264, "memory(GiB)": 72.85, "step": 26100, "token_acc": 0.5202952029520295, "train_speed(iter/s)": 0.669439 }, { "epoch": 1.1184182340088258, "grad_norm": 4.52923583984375, "learning_rate": 8.816382773046615e-05, "loss": 2.472727584838867, "memory(GiB)": 72.85, "step": 26105, "token_acc": 0.48928571428571427, "train_speed(iter/s)": 0.669446 }, { "epoch": 1.1186324493380746, "grad_norm": 4.055858135223389, "learning_rate": 8.815947946752743e-05, "loss": 2.2147518157958985, "memory(GiB)": 72.85, "step": 26110, "token_acc": 0.5409836065573771, "train_speed(iter/s)": 0.669471 }, { "epoch": 1.1188466646673236, "grad_norm": 4.143456935882568, "learning_rate": 8.815513051329394e-05, "loss": 2.111102485656738, "memory(GiB)": 72.85, "step": 26115, "token_acc": 0.5405405405405406, "train_speed(iter/s)": 0.669477 }, { "epoch": 1.1190608799965727, "grad_norm": 4.450899124145508, "learning_rate": 8.815078086784447e-05, "loss": 2.2754022598266603, "memory(GiB)": 72.85, "step": 26120, "token_acc": 0.5128205128205128, "train_speed(iter/s)": 0.669478 }, { "epoch": 1.1192750953258215, "grad_norm": 4.333767890930176, "learning_rate": 8.814643053125781e-05, "loss": 2.4363195419311525, "memory(GiB)": 72.85, "step": 26125, "token_acc": 0.5030487804878049, "train_speed(iter/s)": 0.669479 }, { "epoch": 1.1194893106550705, "grad_norm": 4.6560444831848145, "learning_rate": 8.81420795036128e-05, "loss": 2.4821613311767576, "memory(GiB)": 72.85, "step": 26130, "token_acc": 0.479020979020979, "train_speed(iter/s)": 0.669488 }, { "epoch": 1.1197035259843195, "grad_norm": 4.530797004699707, "learning_rate": 8.813772778498823e-05, "loss": 2.104539489746094, "memory(GiB)": 72.85, "step": 26135, "token_acc": 0.5035714285714286, "train_speed(iter/s)": 0.669502 }, { "epoch": 1.1199177413135684, "grad_norm": 3.4733853340148926, "learning_rate": 8.813337537546294e-05, "loss": 2.374750518798828, "memory(GiB)": 72.85, "step": 26140, "token_acc": 0.4862068965517241, "train_speed(iter/s)": 0.669503 }, { "epoch": 1.1201319566428174, "grad_norm": 5.61564826965332, "learning_rate": 8.812902227511578e-05, "loss": 2.5767749786376952, "memory(GiB)": 72.85, "step": 26145, "token_acc": 0.5115384615384615, "train_speed(iter/s)": 0.669518 }, { "epoch": 1.1203461719720664, "grad_norm": 5.969141006469727, "learning_rate": 8.81246684840256e-05, "loss": 2.24307861328125, "memory(GiB)": 72.85, "step": 26150, "token_acc": 0.525691699604743, "train_speed(iter/s)": 0.669513 }, { "epoch": 1.1205603873013152, "grad_norm": 4.9374799728393555, "learning_rate": 8.812031400227132e-05, "loss": 2.330775833129883, "memory(GiB)": 72.85, "step": 26155, "token_acc": 0.5093632958801498, "train_speed(iter/s)": 0.669498 }, { "epoch": 1.1207746026305643, "grad_norm": 4.000616550445557, "learning_rate": 8.811595882993177e-05, "loss": 2.423470115661621, "memory(GiB)": 72.85, "step": 26160, "token_acc": 0.501628664495114, "train_speed(iter/s)": 0.66945 }, { "epoch": 1.1209888179598133, "grad_norm": 4.936199188232422, "learning_rate": 8.811160296708589e-05, "loss": 2.61293888092041, "memory(GiB)": 72.85, "step": 26165, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.669463 }, { "epoch": 1.1212030332890621, "grad_norm": 4.572817802429199, "learning_rate": 8.810724641381253e-05, "loss": 2.6532272338867187, "memory(GiB)": 72.85, "step": 26170, "token_acc": 0.4641509433962264, "train_speed(iter/s)": 0.669481 }, { "epoch": 1.1214172486183112, "grad_norm": 4.198477745056152, "learning_rate": 8.81028891701907e-05, "loss": 2.2932626724243166, "memory(GiB)": 72.85, "step": 26175, "token_acc": 0.4735202492211838, "train_speed(iter/s)": 0.669466 }, { "epoch": 1.1216314639475602, "grad_norm": 6.607544898986816, "learning_rate": 8.809853123629928e-05, "loss": 2.112202835083008, "memory(GiB)": 72.85, "step": 26180, "token_acc": 0.5266666666666666, "train_speed(iter/s)": 0.669476 }, { "epoch": 1.121845679276809, "grad_norm": 3.767423152923584, "learning_rate": 8.80941726122172e-05, "loss": 2.390673065185547, "memory(GiB)": 72.85, "step": 26185, "token_acc": 0.47774480712166173, "train_speed(iter/s)": 0.669463 }, { "epoch": 1.122059894606058, "grad_norm": 3.8396975994110107, "learning_rate": 8.808981329802347e-05, "loss": 2.0183528900146483, "memory(GiB)": 72.85, "step": 26190, "token_acc": 0.5259259259259259, "train_speed(iter/s)": 0.669473 }, { "epoch": 1.122274109935307, "grad_norm": 3.479956865310669, "learning_rate": 8.808545329379704e-05, "loss": 2.4448173522949217, "memory(GiB)": 72.85, "step": 26195, "token_acc": 0.4880239520958084, "train_speed(iter/s)": 0.66949 }, { "epoch": 1.1224883252645559, "grad_norm": 3.8153469562530518, "learning_rate": 8.808109259961688e-05, "loss": 2.298037528991699, "memory(GiB)": 72.85, "step": 26200, "token_acc": 0.4891640866873065, "train_speed(iter/s)": 0.669504 }, { "epoch": 1.122702540593805, "grad_norm": 4.4014716148376465, "learning_rate": 8.807673121556202e-05, "loss": 2.428156852722168, "memory(GiB)": 72.85, "step": 26205, "token_acc": 0.47735191637630664, "train_speed(iter/s)": 0.669492 }, { "epoch": 1.122916755923054, "grad_norm": 4.102936267852783, "learning_rate": 8.807236914171147e-05, "loss": 2.387845993041992, "memory(GiB)": 72.85, "step": 26210, "token_acc": 0.4897260273972603, "train_speed(iter/s)": 0.669483 }, { "epoch": 1.1231309712523028, "grad_norm": 4.5030059814453125, "learning_rate": 8.806800637814421e-05, "loss": 2.489422416687012, "memory(GiB)": 72.85, "step": 26215, "token_acc": 0.4612794612794613, "train_speed(iter/s)": 0.669458 }, { "epoch": 1.1233451865815518, "grad_norm": 4.655910491943359, "learning_rate": 8.806364292493932e-05, "loss": 2.5237159729003906, "memory(GiB)": 72.85, "step": 26220, "token_acc": 0.5067567567567568, "train_speed(iter/s)": 0.669471 }, { "epoch": 1.1235594019108008, "grad_norm": 3.7475531101226807, "learning_rate": 8.805927878217583e-05, "loss": 2.193097686767578, "memory(GiB)": 72.85, "step": 26225, "token_acc": 0.5589225589225589, "train_speed(iter/s)": 0.669495 }, { "epoch": 1.1237736172400496, "grad_norm": 4.121120452880859, "learning_rate": 8.805491394993279e-05, "loss": 2.159084129333496, "memory(GiB)": 72.85, "step": 26230, "token_acc": 0.5646551724137931, "train_speed(iter/s)": 0.669486 }, { "epoch": 1.1239878325692987, "grad_norm": 3.1366517543792725, "learning_rate": 8.80505484282893e-05, "loss": 2.6548704147338866, "memory(GiB)": 72.85, "step": 26235, "token_acc": 0.40584415584415584, "train_speed(iter/s)": 0.669503 }, { "epoch": 1.1242020478985477, "grad_norm": 3.5146541595458984, "learning_rate": 8.804618221732443e-05, "loss": 2.0130218505859374, "memory(GiB)": 72.85, "step": 26240, "token_acc": 0.5052631578947369, "train_speed(iter/s)": 0.669509 }, { "epoch": 1.1244162632277965, "grad_norm": 3.179826021194458, "learning_rate": 8.804181531711727e-05, "loss": 2.3659502029418946, "memory(GiB)": 72.85, "step": 26245, "token_acc": 0.4597014925373134, "train_speed(iter/s)": 0.669495 }, { "epoch": 1.1246304785570456, "grad_norm": 4.070383071899414, "learning_rate": 8.803744772774694e-05, "loss": 2.534161186218262, "memory(GiB)": 72.85, "step": 26250, "token_acc": 0.4694533762057878, "train_speed(iter/s)": 0.669512 }, { "epoch": 1.1248446938862946, "grad_norm": 4.4691057205200195, "learning_rate": 8.803307944929257e-05, "loss": 2.535188102722168, "memory(GiB)": 72.85, "step": 26255, "token_acc": 0.4594594594594595, "train_speed(iter/s)": 0.669533 }, { "epoch": 1.1250589092155434, "grad_norm": 4.101958751678467, "learning_rate": 8.802871048183328e-05, "loss": 2.270981788635254, "memory(GiB)": 72.85, "step": 26260, "token_acc": 0.4966442953020134, "train_speed(iter/s)": 0.669533 }, { "epoch": 1.1252731245447924, "grad_norm": 4.441269397735596, "learning_rate": 8.802434082544822e-05, "loss": 2.37514591217041, "memory(GiB)": 72.85, "step": 26265, "token_acc": 0.4756944444444444, "train_speed(iter/s)": 0.669539 }, { "epoch": 1.1254873398740415, "grad_norm": 3.227839231491089, "learning_rate": 8.801997048021657e-05, "loss": 2.3643861770629884, "memory(GiB)": 72.85, "step": 26270, "token_acc": 0.4968152866242038, "train_speed(iter/s)": 0.669538 }, { "epoch": 1.1257015552032903, "grad_norm": 3.2522380352020264, "learning_rate": 8.80155994462175e-05, "loss": 2.197877311706543, "memory(GiB)": 72.85, "step": 26275, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.669555 }, { "epoch": 1.1259157705325393, "grad_norm": 5.368088245391846, "learning_rate": 8.801122772353016e-05, "loss": 2.4121707916259765, "memory(GiB)": 72.85, "step": 26280, "token_acc": 0.5098039215686274, "train_speed(iter/s)": 0.66957 }, { "epoch": 1.1261299858617884, "grad_norm": 4.014406204223633, "learning_rate": 8.800685531223378e-05, "loss": 2.439434623718262, "memory(GiB)": 72.85, "step": 26285, "token_acc": 0.46953405017921146, "train_speed(iter/s)": 0.669591 }, { "epoch": 1.1263442011910372, "grad_norm": 5.501341342926025, "learning_rate": 8.800248221240757e-05, "loss": 2.424835205078125, "memory(GiB)": 72.85, "step": 26290, "token_acc": 0.49624060150375937, "train_speed(iter/s)": 0.66961 }, { "epoch": 1.1265584165202862, "grad_norm": 3.038914918899536, "learning_rate": 8.799810842413074e-05, "loss": 2.5422456741333006, "memory(GiB)": 72.85, "step": 26295, "token_acc": 0.4937888198757764, "train_speed(iter/s)": 0.669576 }, { "epoch": 1.1267726318495352, "grad_norm": 4.018488883972168, "learning_rate": 8.799373394748252e-05, "loss": 2.3838432312011717, "memory(GiB)": 72.85, "step": 26300, "token_acc": 0.5, "train_speed(iter/s)": 0.669548 }, { "epoch": 1.126986847178784, "grad_norm": 3.491334915161133, "learning_rate": 8.798935878254218e-05, "loss": 2.1199790954589846, "memory(GiB)": 72.85, "step": 26305, "token_acc": 0.5301587301587302, "train_speed(iter/s)": 0.669549 }, { "epoch": 1.127201062508033, "grad_norm": 3.931880235671997, "learning_rate": 8.798498292938897e-05, "loss": 2.3445724487304687, "memory(GiB)": 72.85, "step": 26310, "token_acc": 0.4924812030075188, "train_speed(iter/s)": 0.669521 }, { "epoch": 1.1274152778372821, "grad_norm": 4.687253952026367, "learning_rate": 8.798060638810216e-05, "loss": 2.125059700012207, "memory(GiB)": 72.85, "step": 26315, "token_acc": 0.5613382899628253, "train_speed(iter/s)": 0.669518 }, { "epoch": 1.127629493166531, "grad_norm": 3.5559964179992676, "learning_rate": 8.797622915876104e-05, "loss": 2.410317230224609, "memory(GiB)": 72.85, "step": 26320, "token_acc": 0.5179856115107914, "train_speed(iter/s)": 0.669511 }, { "epoch": 1.12784370849578, "grad_norm": 3.6003522872924805, "learning_rate": 8.797185124144489e-05, "loss": 2.424028205871582, "memory(GiB)": 72.85, "step": 26325, "token_acc": 0.48736462093862815, "train_speed(iter/s)": 0.669537 }, { "epoch": 1.128057923825029, "grad_norm": 3.615432024002075, "learning_rate": 8.796747263623305e-05, "loss": 2.0453617095947267, "memory(GiB)": 72.85, "step": 26330, "token_acc": 0.5735849056603773, "train_speed(iter/s)": 0.669549 }, { "epoch": 1.1282721391542778, "grad_norm": 3.2719309329986572, "learning_rate": 8.796309334320483e-05, "loss": 2.3628633499145506, "memory(GiB)": 72.85, "step": 26335, "token_acc": 0.4880952380952381, "train_speed(iter/s)": 0.669543 }, { "epoch": 1.1284863544835269, "grad_norm": 5.819234371185303, "learning_rate": 8.795871336243955e-05, "loss": 2.458090972900391, "memory(GiB)": 72.85, "step": 26340, "token_acc": 0.49101796407185627, "train_speed(iter/s)": 0.66951 }, { "epoch": 1.128700569812776, "grad_norm": 6.263538837432861, "learning_rate": 8.795433269401657e-05, "loss": 2.150882911682129, "memory(GiB)": 72.85, "step": 26345, "token_acc": 0.5, "train_speed(iter/s)": 0.669496 }, { "epoch": 1.1289147851420247, "grad_norm": 4.371565818786621, "learning_rate": 8.794995133801525e-05, "loss": 2.5596200942993166, "memory(GiB)": 72.85, "step": 26350, "token_acc": 0.4862068965517241, "train_speed(iter/s)": 0.669508 }, { "epoch": 1.1291290004712737, "grad_norm": 4.21018123626709, "learning_rate": 8.794556929451496e-05, "loss": 2.595623016357422, "memory(GiB)": 72.85, "step": 26355, "token_acc": 0.5029239766081871, "train_speed(iter/s)": 0.669543 }, { "epoch": 1.1293432158005228, "grad_norm": 4.0828633308410645, "learning_rate": 8.794118656359509e-05, "loss": 2.533023452758789, "memory(GiB)": 72.85, "step": 26360, "token_acc": 0.4661016949152542, "train_speed(iter/s)": 0.669562 }, { "epoch": 1.1295574311297716, "grad_norm": 4.034542083740234, "learning_rate": 8.793680314533503e-05, "loss": 2.0757204055786134, "memory(GiB)": 72.85, "step": 26365, "token_acc": 0.5289855072463768, "train_speed(iter/s)": 0.66957 }, { "epoch": 1.1297716464590206, "grad_norm": 5.512319087982178, "learning_rate": 8.793241903981421e-05, "loss": 2.3499881744384767, "memory(GiB)": 72.85, "step": 26370, "token_acc": 0.5215827338129496, "train_speed(iter/s)": 0.66958 }, { "epoch": 1.1299858617882697, "grad_norm": 5.298647403717041, "learning_rate": 8.792803424711203e-05, "loss": 2.4903657913208006, "memory(GiB)": 72.85, "step": 26375, "token_acc": 0.5193548387096775, "train_speed(iter/s)": 0.669585 }, { "epoch": 1.1302000771175185, "grad_norm": 3.5833094120025635, "learning_rate": 8.79236487673079e-05, "loss": 2.105919075012207, "memory(GiB)": 72.85, "step": 26380, "token_acc": 0.5157894736842106, "train_speed(iter/s)": 0.669585 }, { "epoch": 1.1304142924467675, "grad_norm": 4.029695510864258, "learning_rate": 8.791926260048133e-05, "loss": 2.4392696380615235, "memory(GiB)": 72.85, "step": 26385, "token_acc": 0.48214285714285715, "train_speed(iter/s)": 0.669594 }, { "epoch": 1.1306285077760165, "grad_norm": 4.71312141418457, "learning_rate": 8.791487574671173e-05, "loss": 2.327499580383301, "memory(GiB)": 72.85, "step": 26390, "token_acc": 0.4864864864864865, "train_speed(iter/s)": 0.669564 }, { "epoch": 1.1308427231052653, "grad_norm": 4.306338310241699, "learning_rate": 8.791048820607861e-05, "loss": 2.374531936645508, "memory(GiB)": 72.85, "step": 26395, "token_acc": 0.5078864353312302, "train_speed(iter/s)": 0.669566 }, { "epoch": 1.1310569384345144, "grad_norm": 3.609612464904785, "learning_rate": 8.790609997866141e-05, "loss": 2.4586484909057615, "memory(GiB)": 72.85, "step": 26400, "token_acc": 0.44966442953020136, "train_speed(iter/s)": 0.669561 }, { "epoch": 1.1312711537637634, "grad_norm": 6.195533752441406, "learning_rate": 8.790171106453966e-05, "loss": 2.5021539688110352, "memory(GiB)": 72.85, "step": 26405, "token_acc": 0.4797297297297297, "train_speed(iter/s)": 0.669566 }, { "epoch": 1.1314853690930122, "grad_norm": 5.169102668762207, "learning_rate": 8.789732146379286e-05, "loss": 2.2821617126464844, "memory(GiB)": 72.85, "step": 26410, "token_acc": 0.5369774919614148, "train_speed(iter/s)": 0.669588 }, { "epoch": 1.1316995844222613, "grad_norm": 3.9351868629455566, "learning_rate": 8.789293117650052e-05, "loss": 2.306585693359375, "memory(GiB)": 72.85, "step": 26415, "token_acc": 0.46691176470588236, "train_speed(iter/s)": 0.669579 }, { "epoch": 1.1319137997515103, "grad_norm": 3.508356809616089, "learning_rate": 8.78885402027422e-05, "loss": 2.5736104965209963, "memory(GiB)": 72.85, "step": 26420, "token_acc": 0.45098039215686275, "train_speed(iter/s)": 0.669591 }, { "epoch": 1.132128015080759, "grad_norm": 3.82340931892395, "learning_rate": 8.788414854259744e-05, "loss": 2.2641933441162108, "memory(GiB)": 72.85, "step": 26425, "token_acc": 0.5160256410256411, "train_speed(iter/s)": 0.66959 }, { "epoch": 1.1323422304100081, "grad_norm": 4.351130962371826, "learning_rate": 8.787975619614577e-05, "loss": 2.2885900497436524, "memory(GiB)": 72.85, "step": 26430, "token_acc": 0.49683544303797467, "train_speed(iter/s)": 0.66961 }, { "epoch": 1.1325564457392572, "grad_norm": 4.64445686340332, "learning_rate": 8.787536316346678e-05, "loss": 2.578146743774414, "memory(GiB)": 72.85, "step": 26435, "token_acc": 0.48014440433212996, "train_speed(iter/s)": 0.669622 }, { "epoch": 1.132770661068506, "grad_norm": 4.3316969871521, "learning_rate": 8.787096944464008e-05, "loss": 2.0913070678710937, "memory(GiB)": 72.85, "step": 26440, "token_acc": 0.5363321799307958, "train_speed(iter/s)": 0.669621 }, { "epoch": 1.132984876397755, "grad_norm": 4.56871223449707, "learning_rate": 8.786657503974522e-05, "loss": 2.2443588256835936, "memory(GiB)": 72.85, "step": 26445, "token_acc": 0.4789272030651341, "train_speed(iter/s)": 0.669629 }, { "epoch": 1.133199091727004, "grad_norm": 3.848968744277954, "learning_rate": 8.786217994886186e-05, "loss": 2.4038949966430665, "memory(GiB)": 72.85, "step": 26450, "token_acc": 0.5068027210884354, "train_speed(iter/s)": 0.6696 }, { "epoch": 1.1334133070562529, "grad_norm": 3.6907846927642822, "learning_rate": 8.785778417206956e-05, "loss": 2.218471717834473, "memory(GiB)": 72.85, "step": 26455, "token_acc": 0.49609375, "train_speed(iter/s)": 0.669602 }, { "epoch": 1.133627522385502, "grad_norm": 5.200262546539307, "learning_rate": 8.7853387709448e-05, "loss": 2.502408981323242, "memory(GiB)": 72.85, "step": 26460, "token_acc": 0.4675324675324675, "train_speed(iter/s)": 0.669614 }, { "epoch": 1.133841737714751, "grad_norm": 4.8646674156188965, "learning_rate": 8.784899056107681e-05, "loss": 2.2062152862548827, "memory(GiB)": 72.85, "step": 26465, "token_acc": 0.5436507936507936, "train_speed(iter/s)": 0.66964 }, { "epoch": 1.1340559530439998, "grad_norm": 4.231088638305664, "learning_rate": 8.784459272703565e-05, "loss": 2.1645534515380858, "memory(GiB)": 72.85, "step": 26470, "token_acc": 0.5257731958762887, "train_speed(iter/s)": 0.669676 }, { "epoch": 1.1342701683732488, "grad_norm": 3.2244386672973633, "learning_rate": 8.784019420740419e-05, "loss": 2.151260566711426, "memory(GiB)": 72.85, "step": 26475, "token_acc": 0.5032051282051282, "train_speed(iter/s)": 0.669684 }, { "epoch": 1.1344843837024978, "grad_norm": 5.910134792327881, "learning_rate": 8.783579500226212e-05, "loss": 2.2852546691894533, "memory(GiB)": 72.85, "step": 26480, "token_acc": 0.5124555160142349, "train_speed(iter/s)": 0.669702 }, { "epoch": 1.1346985990317466, "grad_norm": 3.893399953842163, "learning_rate": 8.783139511168914e-05, "loss": 2.6249202728271483, "memory(GiB)": 72.85, "step": 26485, "token_acc": 0.45075757575757575, "train_speed(iter/s)": 0.669723 }, { "epoch": 1.1349128143609957, "grad_norm": 3.8854026794433594, "learning_rate": 8.782699453576492e-05, "loss": 2.287427520751953, "memory(GiB)": 72.85, "step": 26490, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.669718 }, { "epoch": 1.1351270296902447, "grad_norm": 3.795220375061035, "learning_rate": 8.782259327456924e-05, "loss": 2.5312665939331054, "memory(GiB)": 72.85, "step": 26495, "token_acc": 0.45222929936305734, "train_speed(iter/s)": 0.669735 }, { "epoch": 1.1353412450194935, "grad_norm": 3.552849531173706, "learning_rate": 8.781819132818179e-05, "loss": 2.6393232345581055, "memory(GiB)": 72.85, "step": 26500, "token_acc": 0.4631578947368421, "train_speed(iter/s)": 0.669759 }, { "epoch": 1.1353412450194935, "eval_loss": 2.0270793437957764, "eval_runtime": 16.1491, "eval_samples_per_second": 6.192, "eval_steps_per_second": 6.192, "eval_token_acc": 0.5081967213114754, "step": 26500 }, { "epoch": 1.1355554603487426, "grad_norm": 2.9500629901885986, "learning_rate": 8.781378869668233e-05, "loss": 2.473474884033203, "memory(GiB)": 72.85, "step": 26505, "token_acc": 0.4961832061068702, "train_speed(iter/s)": 0.669472 }, { "epoch": 1.1357696756779916, "grad_norm": 3.907672882080078, "learning_rate": 8.780938538015061e-05, "loss": 2.227077674865723, "memory(GiB)": 72.85, "step": 26510, "token_acc": 0.5301204819277109, "train_speed(iter/s)": 0.669489 }, { "epoch": 1.1359838910072404, "grad_norm": 3.8956472873687744, "learning_rate": 8.780498137866642e-05, "loss": 2.5718662261962892, "memory(GiB)": 72.85, "step": 26515, "token_acc": 0.49291784702549574, "train_speed(iter/s)": 0.669517 }, { "epoch": 1.1361981063364894, "grad_norm": 4.040287971496582, "learning_rate": 8.780057669230951e-05, "loss": 2.1770082473754884, "memory(GiB)": 72.85, "step": 26520, "token_acc": 0.4752475247524752, "train_speed(iter/s)": 0.669537 }, { "epoch": 1.1364123216657385, "grad_norm": 6.079890727996826, "learning_rate": 8.77961713211597e-05, "loss": 2.390591049194336, "memory(GiB)": 72.85, "step": 26525, "token_acc": 0.5138461538461538, "train_speed(iter/s)": 0.669491 }, { "epoch": 1.1366265369949873, "grad_norm": 4.285965442657471, "learning_rate": 8.77917652652968e-05, "loss": 2.1150539398193358, "memory(GiB)": 72.85, "step": 26530, "token_acc": 0.5109489051094891, "train_speed(iter/s)": 0.669488 }, { "epoch": 1.1368407523242363, "grad_norm": 3.8277838230133057, "learning_rate": 8.77873585248006e-05, "loss": 2.1255720138549803, "memory(GiB)": 72.85, "step": 26535, "token_acc": 0.5451388888888888, "train_speed(iter/s)": 0.669482 }, { "epoch": 1.1370549676534853, "grad_norm": 4.006350517272949, "learning_rate": 8.778295109975098e-05, "loss": 2.1867206573486326, "memory(GiB)": 72.85, "step": 26540, "token_acc": 0.5186567164179104, "train_speed(iter/s)": 0.6695 }, { "epoch": 1.1372691829827342, "grad_norm": 4.714529514312744, "learning_rate": 8.777854299022774e-05, "loss": 2.4471115112304687, "memory(GiB)": 72.85, "step": 26545, "token_acc": 0.47277936962750716, "train_speed(iter/s)": 0.669521 }, { "epoch": 1.1374833983119832, "grad_norm": 3.9334700107574463, "learning_rate": 8.777413419631075e-05, "loss": 2.5001096725463867, "memory(GiB)": 72.85, "step": 26550, "token_acc": 0.46200607902735563, "train_speed(iter/s)": 0.669523 }, { "epoch": 1.1376976136412322, "grad_norm": 4.369621276855469, "learning_rate": 8.776972471807989e-05, "loss": 2.5139122009277344, "memory(GiB)": 72.85, "step": 26555, "token_acc": 0.4955223880597015, "train_speed(iter/s)": 0.669516 }, { "epoch": 1.137911828970481, "grad_norm": 3.985602617263794, "learning_rate": 8.776531455561505e-05, "loss": 2.010667419433594, "memory(GiB)": 72.85, "step": 26560, "token_acc": 0.5201612903225806, "train_speed(iter/s)": 0.669551 }, { "epoch": 1.13812604429973, "grad_norm": 5.09039306640625, "learning_rate": 8.77609037089961e-05, "loss": 2.347423553466797, "memory(GiB)": 72.85, "step": 26565, "token_acc": 0.5223880597014925, "train_speed(iter/s)": 0.669577 }, { "epoch": 1.1383402596289791, "grad_norm": 5.196629047393799, "learning_rate": 8.775649217830295e-05, "loss": 2.3597875595092774, "memory(GiB)": 72.85, "step": 26570, "token_acc": 0.4855072463768116, "train_speed(iter/s)": 0.669571 }, { "epoch": 1.138554474958228, "grad_norm": 3.4314424991607666, "learning_rate": 8.775207996361553e-05, "loss": 2.4826745986938477, "memory(GiB)": 72.85, "step": 26575, "token_acc": 0.5, "train_speed(iter/s)": 0.669569 }, { "epoch": 1.138768690287477, "grad_norm": 4.837031364440918, "learning_rate": 8.774766706501377e-05, "loss": 2.1824140548706055, "memory(GiB)": 72.85, "step": 26580, "token_acc": 0.5251141552511416, "train_speed(iter/s)": 0.669556 }, { "epoch": 1.138982905616726, "grad_norm": 4.0729265213012695, "learning_rate": 8.774325348257761e-05, "loss": 2.205516242980957, "memory(GiB)": 72.85, "step": 26585, "token_acc": 0.5065359477124183, "train_speed(iter/s)": 0.669575 }, { "epoch": 1.1391971209459748, "grad_norm": 3.6301021575927734, "learning_rate": 8.7738839216387e-05, "loss": 2.114749526977539, "memory(GiB)": 72.85, "step": 26590, "token_acc": 0.5087719298245614, "train_speed(iter/s)": 0.66959 }, { "epoch": 1.1394113362752238, "grad_norm": 4.599440574645996, "learning_rate": 8.773442426652192e-05, "loss": 2.4264251708984377, "memory(GiB)": 72.85, "step": 26595, "token_acc": 0.4391891891891892, "train_speed(iter/s)": 0.669585 }, { "epoch": 1.1396255516044729, "grad_norm": 4.656891345977783, "learning_rate": 8.773000863306235e-05, "loss": 2.5238590240478516, "memory(GiB)": 72.85, "step": 26600, "token_acc": 0.4962686567164179, "train_speed(iter/s)": 0.669592 }, { "epoch": 1.1398397669337217, "grad_norm": 6.057092666625977, "learning_rate": 8.772559231608829e-05, "loss": 2.215220260620117, "memory(GiB)": 72.85, "step": 26605, "token_acc": 0.5170940170940171, "train_speed(iter/s)": 0.6696 }, { "epoch": 1.1400539822629707, "grad_norm": 4.895996570587158, "learning_rate": 8.772117531567972e-05, "loss": 2.351102828979492, "memory(GiB)": 72.85, "step": 26610, "token_acc": 0.49363057324840764, "train_speed(iter/s)": 0.669628 }, { "epoch": 1.1402681975922198, "grad_norm": 3.7874691486358643, "learning_rate": 8.771675763191667e-05, "loss": 2.4789749145507813, "memory(GiB)": 72.85, "step": 26615, "token_acc": 0.4855305466237942, "train_speed(iter/s)": 0.66963 }, { "epoch": 1.1404824129214686, "grad_norm": 4.668694019317627, "learning_rate": 8.771233926487919e-05, "loss": 2.122132682800293, "memory(GiB)": 72.85, "step": 26620, "token_acc": 0.525, "train_speed(iter/s)": 0.669646 }, { "epoch": 1.1406966282507176, "grad_norm": 4.178886890411377, "learning_rate": 8.770792021464729e-05, "loss": 2.456705093383789, "memory(GiB)": 72.85, "step": 26625, "token_acc": 0.4380952380952381, "train_speed(iter/s)": 0.669625 }, { "epoch": 1.1409108435799666, "grad_norm": 5.099415302276611, "learning_rate": 8.770350048130105e-05, "loss": 2.2188531875610353, "memory(GiB)": 72.85, "step": 26630, "token_acc": 0.48717948717948717, "train_speed(iter/s)": 0.669607 }, { "epoch": 1.1411250589092155, "grad_norm": 3.8164775371551514, "learning_rate": 8.769908006492053e-05, "loss": 2.550840950012207, "memory(GiB)": 72.85, "step": 26635, "token_acc": 0.43234323432343236, "train_speed(iter/s)": 0.669607 }, { "epoch": 1.1413392742384645, "grad_norm": 5.902568817138672, "learning_rate": 8.769465896558581e-05, "loss": 2.325590896606445, "memory(GiB)": 72.85, "step": 26640, "token_acc": 0.532, "train_speed(iter/s)": 0.669584 }, { "epoch": 1.1415534895677135, "grad_norm": 4.083983898162842, "learning_rate": 8.769023718337699e-05, "loss": 2.2734432220458984, "memory(GiB)": 72.85, "step": 26645, "token_acc": 0.5238095238095238, "train_speed(iter/s)": 0.669604 }, { "epoch": 1.1417677048969623, "grad_norm": 3.9488608837127686, "learning_rate": 8.768581471837416e-05, "loss": 2.048962211608887, "memory(GiB)": 72.85, "step": 26650, "token_acc": 0.516245487364621, "train_speed(iter/s)": 0.669595 }, { "epoch": 1.1419819202262114, "grad_norm": 3.9166066646575928, "learning_rate": 8.768139157065744e-05, "loss": 2.4216556549072266, "memory(GiB)": 72.85, "step": 26655, "token_acc": 0.47151898734177217, "train_speed(iter/s)": 0.669604 }, { "epoch": 1.1421961355554604, "grad_norm": 6.924411773681641, "learning_rate": 8.767696774030698e-05, "loss": 2.385462188720703, "memory(GiB)": 72.85, "step": 26660, "token_acc": 0.4708171206225681, "train_speed(iter/s)": 0.669595 }, { "epoch": 1.1424103508847092, "grad_norm": 5.535421371459961, "learning_rate": 8.767254322740288e-05, "loss": 2.196482849121094, "memory(GiB)": 72.85, "step": 26665, "token_acc": 0.5436241610738255, "train_speed(iter/s)": 0.669618 }, { "epoch": 1.1426245662139582, "grad_norm": 4.586564540863037, "learning_rate": 8.766900312569488e-05, "loss": 2.324492645263672, "memory(GiB)": 72.85, "step": 26670, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.669603 }, { "epoch": 1.1428387815432073, "grad_norm": 5.194805145263672, "learning_rate": 8.766457738439628e-05, "loss": 2.3819822311401366, "memory(GiB)": 72.85, "step": 26675, "token_acc": 0.49800796812749004, "train_speed(iter/s)": 0.669638 }, { "epoch": 1.143052996872456, "grad_norm": 3.8122146129608154, "learning_rate": 8.766015096076854e-05, "loss": 2.558418083190918, "memory(GiB)": 72.85, "step": 26680, "token_acc": 0.4658385093167702, "train_speed(iter/s)": 0.669651 }, { "epoch": 1.1432672122017051, "grad_norm": 3.8735384941101074, "learning_rate": 8.765572385489183e-05, "loss": 2.4449644088745117, "memory(GiB)": 72.85, "step": 26685, "token_acc": 0.46178343949044587, "train_speed(iter/s)": 0.669657 }, { "epoch": 1.1434814275309542, "grad_norm": 3.795897960662842, "learning_rate": 8.765129606684635e-05, "loss": 2.045125961303711, "memory(GiB)": 72.85, "step": 26690, "token_acc": 0.5326460481099656, "train_speed(iter/s)": 0.669663 }, { "epoch": 1.143695642860203, "grad_norm": 4.514263153076172, "learning_rate": 8.764686759671235e-05, "loss": 2.305158233642578, "memory(GiB)": 72.85, "step": 26695, "token_acc": 0.4876543209876543, "train_speed(iter/s)": 0.66966 }, { "epoch": 1.143909858189452, "grad_norm": 3.473950147628784, "learning_rate": 8.764243844457e-05, "loss": 2.200082206726074, "memory(GiB)": 72.85, "step": 26700, "token_acc": 0.5381944444444444, "train_speed(iter/s)": 0.669624 }, { "epoch": 1.144124073518701, "grad_norm": 4.326805591583252, "learning_rate": 8.763800861049958e-05, "loss": 2.5277727127075194, "memory(GiB)": 72.85, "step": 26705, "token_acc": 0.4826388888888889, "train_speed(iter/s)": 0.669622 }, { "epoch": 1.1443382888479499, "grad_norm": 4.373260974884033, "learning_rate": 8.763357809458131e-05, "loss": 2.6159168243408204, "memory(GiB)": 72.85, "step": 26710, "token_acc": 0.4668769716088328, "train_speed(iter/s)": 0.669596 }, { "epoch": 1.144552504177199, "grad_norm": 4.357953071594238, "learning_rate": 8.762914689689548e-05, "loss": 2.2360227584838865, "memory(GiB)": 72.85, "step": 26715, "token_acc": 0.4859154929577465, "train_speed(iter/s)": 0.66959 }, { "epoch": 1.144766719506448, "grad_norm": 6.504082679748535, "learning_rate": 8.762471501752236e-05, "loss": 2.567857360839844, "memory(GiB)": 72.85, "step": 26720, "token_acc": 0.49056603773584906, "train_speed(iter/s)": 0.669608 }, { "epoch": 1.1449809348356967, "grad_norm": 3.4032347202301025, "learning_rate": 8.762028245654224e-05, "loss": 2.3631654739379884, "memory(GiB)": 72.85, "step": 26725, "token_acc": 0.4758364312267658, "train_speed(iter/s)": 0.669619 }, { "epoch": 1.1451951501649458, "grad_norm": 3.224942684173584, "learning_rate": 8.761584921403539e-05, "loss": 2.1082090377807616, "memory(GiB)": 72.85, "step": 26730, "token_acc": 0.5190311418685121, "train_speed(iter/s)": 0.66961 }, { "epoch": 1.1454093654941948, "grad_norm": 3.719651222229004, "learning_rate": 8.761141529008216e-05, "loss": 2.5548303604125975, "memory(GiB)": 72.85, "step": 26735, "token_acc": 0.4696485623003195, "train_speed(iter/s)": 0.66964 }, { "epoch": 1.1456235808234436, "grad_norm": 3.8684186935424805, "learning_rate": 8.760698068476285e-05, "loss": 2.251201057434082, "memory(GiB)": 72.85, "step": 26740, "token_acc": 0.5017543859649123, "train_speed(iter/s)": 0.669674 }, { "epoch": 1.1458377961526927, "grad_norm": 4.267103672027588, "learning_rate": 8.760254539815782e-05, "loss": 2.1980003356933593, "memory(GiB)": 72.85, "step": 26745, "token_acc": 0.5231316725978647, "train_speed(iter/s)": 0.669668 }, { "epoch": 1.1460520114819417, "grad_norm": 3.8489673137664795, "learning_rate": 8.75981094303474e-05, "loss": 2.239213752746582, "memory(GiB)": 72.85, "step": 26750, "token_acc": 0.5052631578947369, "train_speed(iter/s)": 0.669691 }, { "epoch": 1.1462662268111905, "grad_norm": 5.22525691986084, "learning_rate": 8.759367278141195e-05, "loss": 2.683186149597168, "memory(GiB)": 72.85, "step": 26755, "token_acc": 0.4606741573033708, "train_speed(iter/s)": 0.669669 }, { "epoch": 1.1464804421404395, "grad_norm": 3.479088306427002, "learning_rate": 8.758923545143187e-05, "loss": 2.4548038482666015, "memory(GiB)": 72.85, "step": 26760, "token_acc": 0.5, "train_speed(iter/s)": 0.669674 }, { "epoch": 1.1466946574696886, "grad_norm": 4.208492279052734, "learning_rate": 8.758479744048751e-05, "loss": 2.7173601150512696, "memory(GiB)": 72.85, "step": 26765, "token_acc": 0.4380664652567976, "train_speed(iter/s)": 0.669713 }, { "epoch": 1.1469088727989374, "grad_norm": 5.4452972412109375, "learning_rate": 8.758035874865929e-05, "loss": 2.36290283203125, "memory(GiB)": 72.85, "step": 26770, "token_acc": 0.5018587360594795, "train_speed(iter/s)": 0.669679 }, { "epoch": 1.1471230881281864, "grad_norm": 4.982216835021973, "learning_rate": 8.757591937602762e-05, "loss": 2.4110410690307615, "memory(GiB)": 72.85, "step": 26775, "token_acc": 0.5157894736842106, "train_speed(iter/s)": 0.66969 }, { "epoch": 1.1473373034574355, "grad_norm": 3.999345302581787, "learning_rate": 8.757147932267293e-05, "loss": 2.431977081298828, "memory(GiB)": 72.85, "step": 26780, "token_acc": 0.5117845117845118, "train_speed(iter/s)": 0.669699 }, { "epoch": 1.1475515187866843, "grad_norm": 4.300210475921631, "learning_rate": 8.756703858867564e-05, "loss": 2.296141242980957, "memory(GiB)": 72.85, "step": 26785, "token_acc": 0.5421686746987951, "train_speed(iter/s)": 0.669696 }, { "epoch": 1.1477657341159333, "grad_norm": 4.678747177124023, "learning_rate": 8.756259717411621e-05, "loss": 2.378570556640625, "memory(GiB)": 72.85, "step": 26790, "token_acc": 0.5125448028673835, "train_speed(iter/s)": 0.669709 }, { "epoch": 1.1479799494451823, "grad_norm": 3.957632064819336, "learning_rate": 8.755815507907509e-05, "loss": 2.2410442352294924, "memory(GiB)": 72.85, "step": 26795, "token_acc": 0.501628664495114, "train_speed(iter/s)": 0.669721 }, { "epoch": 1.1481941647744311, "grad_norm": 4.692312240600586, "learning_rate": 8.755371230363276e-05, "loss": 2.050467300415039, "memory(GiB)": 72.85, "step": 26800, "token_acc": 0.5354330708661418, "train_speed(iter/s)": 0.669736 }, { "epoch": 1.1484083801036802, "grad_norm": 4.4014363288879395, "learning_rate": 8.754926884786972e-05, "loss": 2.5367710113525392, "memory(GiB)": 72.85, "step": 26805, "token_acc": 0.5060606060606061, "train_speed(iter/s)": 0.66973 }, { "epoch": 1.1486225954329292, "grad_norm": 4.66566801071167, "learning_rate": 8.754482471186643e-05, "loss": 2.3125837326049803, "memory(GiB)": 72.85, "step": 26810, "token_acc": 0.5035714285714286, "train_speed(iter/s)": 0.669743 }, { "epoch": 1.148836810762178, "grad_norm": 4.728904724121094, "learning_rate": 8.754037989570342e-05, "loss": 2.53810977935791, "memory(GiB)": 72.85, "step": 26815, "token_acc": 0.4936708860759494, "train_speed(iter/s)": 0.669739 }, { "epoch": 1.149051026091427, "grad_norm": 3.442044734954834, "learning_rate": 8.753593439946122e-05, "loss": 2.386534881591797, "memory(GiB)": 72.85, "step": 26820, "token_acc": 0.4676258992805755, "train_speed(iter/s)": 0.669739 }, { "epoch": 1.149265241420676, "grad_norm": 4.546586990356445, "learning_rate": 8.753148822322034e-05, "loss": 2.2074253082275392, "memory(GiB)": 72.85, "step": 26825, "token_acc": 0.49812734082397003, "train_speed(iter/s)": 0.66975 }, { "epoch": 1.149479456749925, "grad_norm": 3.1555099487304688, "learning_rate": 8.752704136706135e-05, "loss": 2.1884181976318358, "memory(GiB)": 72.85, "step": 26830, "token_acc": 0.4843205574912892, "train_speed(iter/s)": 0.669766 }, { "epoch": 1.149693672079174, "grad_norm": 4.00119686126709, "learning_rate": 8.752259383106483e-05, "loss": 2.222734832763672, "memory(GiB)": 72.85, "step": 26835, "token_acc": 0.5229681978798587, "train_speed(iter/s)": 0.669805 }, { "epoch": 1.149907887408423, "grad_norm": 5.185853958129883, "learning_rate": 8.75181456153113e-05, "loss": 2.277943229675293, "memory(GiB)": 72.85, "step": 26840, "token_acc": 0.48148148148148145, "train_speed(iter/s)": 0.669784 }, { "epoch": 1.1501221027376718, "grad_norm": 4.874390602111816, "learning_rate": 8.751369671988136e-05, "loss": 2.4950931549072264, "memory(GiB)": 72.85, "step": 26845, "token_acc": 0.4826388888888889, "train_speed(iter/s)": 0.669775 }, { "epoch": 1.1503363180669208, "grad_norm": 3.0782999992370605, "learning_rate": 8.750924714485562e-05, "loss": 2.53741512298584, "memory(GiB)": 72.85, "step": 26850, "token_acc": 0.475177304964539, "train_speed(iter/s)": 0.669811 }, { "epoch": 1.1505505333961699, "grad_norm": 3.3511102199554443, "learning_rate": 8.75047968903147e-05, "loss": 2.3785242080688476, "memory(GiB)": 72.85, "step": 26855, "token_acc": 0.5, "train_speed(iter/s)": 0.669768 }, { "epoch": 1.1507647487254187, "grad_norm": 4.498587131500244, "learning_rate": 8.750034595633918e-05, "loss": 2.44051456451416, "memory(GiB)": 72.85, "step": 26860, "token_acc": 0.4742268041237113, "train_speed(iter/s)": 0.669765 }, { "epoch": 1.1509789640546677, "grad_norm": 3.478123188018799, "learning_rate": 8.749589434300973e-05, "loss": 2.1685361862182617, "memory(GiB)": 72.85, "step": 26865, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.669786 }, { "epoch": 1.1511931793839167, "grad_norm": 3.3647468090057373, "learning_rate": 8.749144205040697e-05, "loss": 2.393443489074707, "memory(GiB)": 72.85, "step": 26870, "token_acc": 0.5087108013937283, "train_speed(iter/s)": 0.669793 }, { "epoch": 1.1514073947131656, "grad_norm": 4.439180374145508, "learning_rate": 8.748698907861157e-05, "loss": 2.630526542663574, "memory(GiB)": 72.85, "step": 26875, "token_acc": 0.4491525423728814, "train_speed(iter/s)": 0.669802 }, { "epoch": 1.1516216100424146, "grad_norm": 4.357089042663574, "learning_rate": 8.74825354277042e-05, "loss": 2.428066062927246, "memory(GiB)": 72.85, "step": 26880, "token_acc": 0.47266881028938906, "train_speed(iter/s)": 0.669804 }, { "epoch": 1.1518358253716636, "grad_norm": 4.967461585998535, "learning_rate": 8.747808109776553e-05, "loss": 2.3674041748046877, "memory(GiB)": 72.85, "step": 26885, "token_acc": 0.4775510204081633, "train_speed(iter/s)": 0.669799 }, { "epoch": 1.1520500407009124, "grad_norm": 4.661907196044922, "learning_rate": 8.747362608887627e-05, "loss": 2.414247512817383, "memory(GiB)": 72.85, "step": 26890, "token_acc": 0.49, "train_speed(iter/s)": 0.669807 }, { "epoch": 1.1522642560301615, "grad_norm": 3.906043529510498, "learning_rate": 8.746917040111711e-05, "loss": 2.3506752014160157, "memory(GiB)": 72.85, "step": 26895, "token_acc": 0.48089171974522293, "train_speed(iter/s)": 0.669825 }, { "epoch": 1.1524784713594105, "grad_norm": 3.640537977218628, "learning_rate": 8.746471403456879e-05, "loss": 2.310944747924805, "memory(GiB)": 72.85, "step": 26900, "token_acc": 0.5117845117845118, "train_speed(iter/s)": 0.669843 }, { "epoch": 1.1526926866886593, "grad_norm": 3.657132148742676, "learning_rate": 8.746025698931204e-05, "loss": 2.276624298095703, "memory(GiB)": 72.85, "step": 26905, "token_acc": 0.49635036496350365, "train_speed(iter/s)": 0.669839 }, { "epoch": 1.1529069020179084, "grad_norm": 4.297985553741455, "learning_rate": 8.745579926542757e-05, "loss": 2.41175537109375, "memory(GiB)": 72.85, "step": 26910, "token_acc": 0.483739837398374, "train_speed(iter/s)": 0.669846 }, { "epoch": 1.1531211173471574, "grad_norm": 3.3134443759918213, "learning_rate": 8.74513408629962e-05, "loss": 2.28540153503418, "memory(GiB)": 72.85, "step": 26915, "token_acc": 0.5393939393939394, "train_speed(iter/s)": 0.669875 }, { "epoch": 1.1533353326764062, "grad_norm": 4.156652450561523, "learning_rate": 8.744688178209862e-05, "loss": 2.2841381072998046, "memory(GiB)": 72.85, "step": 26920, "token_acc": 0.5171102661596958, "train_speed(iter/s)": 0.669877 }, { "epoch": 1.1535495480056552, "grad_norm": 5.021247386932373, "learning_rate": 8.744242202281565e-05, "loss": 2.6054702758789063, "memory(GiB)": 72.85, "step": 26925, "token_acc": 0.44745762711864406, "train_speed(iter/s)": 0.669895 }, { "epoch": 1.1537637633349043, "grad_norm": 3.2372887134552, "learning_rate": 8.74379615852281e-05, "loss": 2.223626708984375, "memory(GiB)": 72.85, "step": 26930, "token_acc": 0.5064102564102564, "train_speed(iter/s)": 0.669876 }, { "epoch": 1.1539779786641533, "grad_norm": 5.2485737800598145, "learning_rate": 8.743350046941675e-05, "loss": 2.445109176635742, "memory(GiB)": 72.85, "step": 26935, "token_acc": 0.4844290657439446, "train_speed(iter/s)": 0.669902 }, { "epoch": 1.1541921939934021, "grad_norm": 6.7358622550964355, "learning_rate": 8.742903867546242e-05, "loss": 2.583846664428711, "memory(GiB)": 72.85, "step": 26940, "token_acc": 0.45993031358885017, "train_speed(iter/s)": 0.669924 }, { "epoch": 1.1544064093226512, "grad_norm": 3.3556406497955322, "learning_rate": 8.742457620344595e-05, "loss": 2.545209503173828, "memory(GiB)": 72.85, "step": 26945, "token_acc": 0.4653179190751445, "train_speed(iter/s)": 0.669938 }, { "epoch": 1.1546206246519002, "grad_norm": 13.607524871826172, "learning_rate": 8.742011305344817e-05, "loss": 2.2991504669189453, "memory(GiB)": 72.85, "step": 26950, "token_acc": 0.5095785440613027, "train_speed(iter/s)": 0.66995 }, { "epoch": 1.154834839981149, "grad_norm": 3.7944629192352295, "learning_rate": 8.741564922554994e-05, "loss": 2.3335664749145506, "memory(GiB)": 72.85, "step": 26955, "token_acc": 0.5020576131687243, "train_speed(iter/s)": 0.669976 }, { "epoch": 1.155049055310398, "grad_norm": 3.961392641067505, "learning_rate": 8.741118471983212e-05, "loss": 2.6026584625244142, "memory(GiB)": 72.85, "step": 26960, "token_acc": 0.47770700636942676, "train_speed(iter/s)": 0.669992 }, { "epoch": 1.155263270639647, "grad_norm": 3.3495187759399414, "learning_rate": 8.74067195363756e-05, "loss": 2.596539115905762, "memory(GiB)": 72.85, "step": 26965, "token_acc": 0.5, "train_speed(iter/s)": 0.669986 }, { "epoch": 1.1554774859688959, "grad_norm": 4.292473793029785, "learning_rate": 8.740225367526126e-05, "loss": 2.7083738327026365, "memory(GiB)": 72.85, "step": 26970, "token_acc": 0.4980694980694981, "train_speed(iter/s)": 0.669997 }, { "epoch": 1.155691701298145, "grad_norm": 5.648579120635986, "learning_rate": 8.739778713657001e-05, "loss": 2.1443519592285156, "memory(GiB)": 72.85, "step": 26975, "token_acc": 0.5211864406779662, "train_speed(iter/s)": 0.670006 }, { "epoch": 1.155905916627394, "grad_norm": 3.4327476024627686, "learning_rate": 8.739331992038277e-05, "loss": 2.3755069732666017, "memory(GiB)": 72.85, "step": 26980, "token_acc": 0.44256756756756754, "train_speed(iter/s)": 0.670028 }, { "epoch": 1.1561201319566428, "grad_norm": 3.537740468978882, "learning_rate": 8.738885202678045e-05, "loss": 2.4490102767944335, "memory(GiB)": 72.85, "step": 26985, "token_acc": 0.4769736842105263, "train_speed(iter/s)": 0.670033 }, { "epoch": 1.1563343472858918, "grad_norm": 3.5212464332580566, "learning_rate": 8.738438345584401e-05, "loss": 2.4265615463256838, "memory(GiB)": 72.85, "step": 26990, "token_acc": 0.49514563106796117, "train_speed(iter/s)": 0.670016 }, { "epoch": 1.1565485626151408, "grad_norm": 3.1266684532165527, "learning_rate": 8.73799142076544e-05, "loss": 2.5365394592285155, "memory(GiB)": 72.85, "step": 26995, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.670014 }, { "epoch": 1.1567627779443896, "grad_norm": 5.885438919067383, "learning_rate": 8.737544428229258e-05, "loss": 2.485251045227051, "memory(GiB)": 72.85, "step": 27000, "token_acc": 0.49140893470790376, "train_speed(iter/s)": 0.670037 }, { "epoch": 1.1567627779443896, "eval_loss": 2.0035970211029053, "eval_runtime": 16.579, "eval_samples_per_second": 6.032, "eval_steps_per_second": 6.032, "eval_token_acc": 0.5055096418732782, "step": 27000 }, { "epoch": 1.1569769932736387, "grad_norm": 3.869210720062256, "learning_rate": 8.737097367983951e-05, "loss": 2.303303527832031, "memory(GiB)": 72.85, "step": 27005, "token_acc": 0.5044687189672294, "train_speed(iter/s)": 0.66971 }, { "epoch": 1.1571912086028877, "grad_norm": 4.025478839874268, "learning_rate": 8.73665024003762e-05, "loss": 2.3616050720214843, "memory(GiB)": 72.85, "step": 27010, "token_acc": 0.4789272030651341, "train_speed(iter/s)": 0.6697 }, { "epoch": 1.1574054239321365, "grad_norm": 3.6226556301116943, "learning_rate": 8.736203044398366e-05, "loss": 2.3520025253295898, "memory(GiB)": 72.85, "step": 27015, "token_acc": 0.49185667752442996, "train_speed(iter/s)": 0.669736 }, { "epoch": 1.1576196392613856, "grad_norm": 3.891129493713379, "learning_rate": 8.735755781074288e-05, "loss": 2.278635597229004, "memory(GiB)": 72.85, "step": 27020, "token_acc": 0.5182481751824818, "train_speed(iter/s)": 0.669721 }, { "epoch": 1.1578338545906346, "grad_norm": 4.207469940185547, "learning_rate": 8.735308450073489e-05, "loss": 2.3670650482177735, "memory(GiB)": 72.85, "step": 27025, "token_acc": 0.49603174603174605, "train_speed(iter/s)": 0.669708 }, { "epoch": 1.1580480699198834, "grad_norm": 4.3175578117370605, "learning_rate": 8.734861051404075e-05, "loss": 2.0970247268676756, "memory(GiB)": 72.85, "step": 27030, "token_acc": 0.54296875, "train_speed(iter/s)": 0.669739 }, { "epoch": 1.1582622852491324, "grad_norm": 5.1620283126831055, "learning_rate": 8.734413585074149e-05, "loss": 2.409737396240234, "memory(GiB)": 72.85, "step": 27035, "token_acc": 0.477124183006536, "train_speed(iter/s)": 0.669749 }, { "epoch": 1.1584765005783815, "grad_norm": 4.76256799697876, "learning_rate": 8.733966051091816e-05, "loss": 2.3699506759643554, "memory(GiB)": 72.85, "step": 27040, "token_acc": 0.5197132616487455, "train_speed(iter/s)": 0.669753 }, { "epoch": 1.1586907159076303, "grad_norm": 3.5507209300994873, "learning_rate": 8.733518449465187e-05, "loss": 2.3358837127685548, "memory(GiB)": 72.85, "step": 27045, "token_acc": 0.49185667752442996, "train_speed(iter/s)": 0.669766 }, { "epoch": 1.1589049312368793, "grad_norm": 4.227436542510986, "learning_rate": 8.733070780202368e-05, "loss": 2.2016670227050783, "memory(GiB)": 72.85, "step": 27050, "token_acc": 0.5109489051094891, "train_speed(iter/s)": 0.6698 }, { "epoch": 1.1591191465661284, "grad_norm": 6.1268391609191895, "learning_rate": 8.732623043311471e-05, "loss": 2.4319229125976562, "memory(GiB)": 72.85, "step": 27055, "token_acc": 0.439873417721519, "train_speed(iter/s)": 0.6698 }, { "epoch": 1.1593333618953772, "grad_norm": 5.2327070236206055, "learning_rate": 8.732175238800606e-05, "loss": 2.3799129486083985, "memory(GiB)": 72.85, "step": 27060, "token_acc": 0.5134228187919463, "train_speed(iter/s)": 0.669805 }, { "epoch": 1.1595475772246262, "grad_norm": 4.460882186889648, "learning_rate": 8.731727366677885e-05, "loss": 2.3007492065429687, "memory(GiB)": 72.85, "step": 27065, "token_acc": 0.5160256410256411, "train_speed(iter/s)": 0.669812 }, { "epoch": 1.1597617925538752, "grad_norm": 4.694085121154785, "learning_rate": 8.731279426951422e-05, "loss": 2.426531982421875, "memory(GiB)": 72.85, "step": 27070, "token_acc": 0.47076023391812866, "train_speed(iter/s)": 0.669833 }, { "epoch": 1.159976007883124, "grad_norm": 3.771627902984619, "learning_rate": 8.730831419629332e-05, "loss": 2.171373176574707, "memory(GiB)": 72.85, "step": 27075, "token_acc": 0.5483870967741935, "train_speed(iter/s)": 0.669853 }, { "epoch": 1.160190223212373, "grad_norm": 3.7478275299072266, "learning_rate": 8.73038334471973e-05, "loss": 2.2265060424804686, "memory(GiB)": 72.85, "step": 27080, "token_acc": 0.49514563106796117, "train_speed(iter/s)": 0.669855 }, { "epoch": 1.1604044385416221, "grad_norm": 4.045331954956055, "learning_rate": 8.729935202230737e-05, "loss": 2.577393341064453, "memory(GiB)": 72.85, "step": 27085, "token_acc": 0.46774193548387094, "train_speed(iter/s)": 0.669875 }, { "epoch": 1.160618653870871, "grad_norm": 3.313401460647583, "learning_rate": 8.729486992170465e-05, "loss": 2.132851982116699, "memory(GiB)": 72.85, "step": 27090, "token_acc": 0.4854111405835544, "train_speed(iter/s)": 0.669884 }, { "epoch": 1.16083286920012, "grad_norm": 4.007187366485596, "learning_rate": 8.729038714547043e-05, "loss": 2.534004974365234, "memory(GiB)": 72.85, "step": 27095, "token_acc": 0.4822485207100592, "train_speed(iter/s)": 0.669891 }, { "epoch": 1.161047084529369, "grad_norm": 3.9508841037750244, "learning_rate": 8.728590369368582e-05, "loss": 2.4096195220947267, "memory(GiB)": 72.85, "step": 27100, "token_acc": 0.5114754098360655, "train_speed(iter/s)": 0.66991 }, { "epoch": 1.1612612998586178, "grad_norm": 4.256785869598389, "learning_rate": 8.72814195664321e-05, "loss": 2.472469520568848, "memory(GiB)": 72.85, "step": 27105, "token_acc": 0.4732824427480916, "train_speed(iter/s)": 0.669941 }, { "epoch": 1.1614755151878668, "grad_norm": 4.102194309234619, "learning_rate": 8.72769347637905e-05, "loss": 2.4068403244018555, "memory(GiB)": 72.85, "step": 27110, "token_acc": 0.4536741214057508, "train_speed(iter/s)": 0.669971 }, { "epoch": 1.1616897305171159, "grad_norm": 4.3913774490356445, "learning_rate": 8.727244928584224e-05, "loss": 2.1235639572143556, "memory(GiB)": 72.85, "step": 27115, "token_acc": 0.5254901960784314, "train_speed(iter/s)": 0.669974 }, { "epoch": 1.1619039458463647, "grad_norm": 3.760114908218384, "learning_rate": 8.72679631326686e-05, "loss": 2.2410058975219727, "memory(GiB)": 72.85, "step": 27120, "token_acc": 0.4755244755244755, "train_speed(iter/s)": 0.66999 }, { "epoch": 1.1621181611756137, "grad_norm": 4.514386177062988, "learning_rate": 8.726347630435088e-05, "loss": 2.512576103210449, "memory(GiB)": 72.85, "step": 27125, "token_acc": 0.4507042253521127, "train_speed(iter/s)": 0.669984 }, { "epoch": 1.1623323765048628, "grad_norm": 4.23671293258667, "learning_rate": 8.72589888009703e-05, "loss": 2.655847358703613, "memory(GiB)": 72.85, "step": 27130, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.669991 }, { "epoch": 1.1625465918341116, "grad_norm": 4.6728997230529785, "learning_rate": 8.725450062260819e-05, "loss": 2.6526655197143554, "memory(GiB)": 72.85, "step": 27135, "token_acc": 0.4618181818181818, "train_speed(iter/s)": 0.669988 }, { "epoch": 1.1627608071633606, "grad_norm": 3.8909316062927246, "learning_rate": 8.725001176934587e-05, "loss": 2.2082799911499023, "memory(GiB)": 72.85, "step": 27140, "token_acc": 0.5100671140939598, "train_speed(iter/s)": 0.670019 }, { "epoch": 1.1629750224926096, "grad_norm": 3.9431636333465576, "learning_rate": 8.72455222412646e-05, "loss": 2.560785484313965, "memory(GiB)": 72.85, "step": 27145, "token_acc": 0.46503496503496505, "train_speed(iter/s)": 0.670011 }, { "epoch": 1.1631892378218585, "grad_norm": 4.195104122161865, "learning_rate": 8.72410320384458e-05, "loss": 2.3729034423828126, "memory(GiB)": 72.85, "step": 27150, "token_acc": 0.5065359477124183, "train_speed(iter/s)": 0.670007 }, { "epoch": 1.1634034531511075, "grad_norm": 3.616875171661377, "learning_rate": 8.723654116097075e-05, "loss": 2.217347526550293, "memory(GiB)": 72.85, "step": 27155, "token_acc": 0.524904214559387, "train_speed(iter/s)": 0.670001 }, { "epoch": 1.1636176684803565, "grad_norm": 5.856142520904541, "learning_rate": 8.723204960892085e-05, "loss": 2.462172508239746, "memory(GiB)": 72.85, "step": 27160, "token_acc": 0.47509578544061304, "train_speed(iter/s)": 0.670004 }, { "epoch": 1.1638318838096053, "grad_norm": 3.2189509868621826, "learning_rate": 8.722755738237742e-05, "loss": 2.2473358154296874, "memory(GiB)": 72.85, "step": 27165, "token_acc": 0.5089285714285714, "train_speed(iter/s)": 0.669984 }, { "epoch": 1.1640460991388544, "grad_norm": 3.957221031188965, "learning_rate": 8.722306448142187e-05, "loss": 2.239192581176758, "memory(GiB)": 72.85, "step": 27170, "token_acc": 0.48788927335640137, "train_speed(iter/s)": 0.669993 }, { "epoch": 1.1642603144681034, "grad_norm": 3.299194574356079, "learning_rate": 8.721857090613559e-05, "loss": 2.3733846664428713, "memory(GiB)": 72.85, "step": 27175, "token_acc": 0.4880546075085324, "train_speed(iter/s)": 0.669986 }, { "epoch": 1.1644745297973522, "grad_norm": 4.397407054901123, "learning_rate": 8.721407665659998e-05, "loss": 2.1860443115234376, "memory(GiB)": 72.85, "step": 27180, "token_acc": 0.4946236559139785, "train_speed(iter/s)": 0.669969 }, { "epoch": 1.1646887451266013, "grad_norm": 6.666600227355957, "learning_rate": 8.720958173289647e-05, "loss": 2.5015275955200194, "memory(GiB)": 72.85, "step": 27185, "token_acc": 0.48184818481848185, "train_speed(iter/s)": 0.669977 }, { "epoch": 1.1649029604558503, "grad_norm": 5.2926859855651855, "learning_rate": 8.720508613510648e-05, "loss": 2.8380077362060545, "memory(GiB)": 72.85, "step": 27190, "token_acc": 0.44554455445544555, "train_speed(iter/s)": 0.669995 }, { "epoch": 1.165117175785099, "grad_norm": 5.371315002441406, "learning_rate": 8.720058986331145e-05, "loss": 2.550024223327637, "memory(GiB)": 72.85, "step": 27195, "token_acc": 0.47959183673469385, "train_speed(iter/s)": 0.669959 }, { "epoch": 1.1653313911143481, "grad_norm": 4.41343879699707, "learning_rate": 8.719609291759285e-05, "loss": 2.15451602935791, "memory(GiB)": 72.85, "step": 27200, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.669959 }, { "epoch": 1.1655456064435972, "grad_norm": 3.7191736698150635, "learning_rate": 8.719159529803211e-05, "loss": 2.5905752182006836, "memory(GiB)": 72.85, "step": 27205, "token_acc": 0.4899328859060403, "train_speed(iter/s)": 0.669963 }, { "epoch": 1.165759821772846, "grad_norm": 5.582658767700195, "learning_rate": 8.718709700471075e-05, "loss": 2.2873550415039063, "memory(GiB)": 72.85, "step": 27210, "token_acc": 0.5191740412979351, "train_speed(iter/s)": 0.669964 }, { "epoch": 1.165974037102095, "grad_norm": 3.703333854675293, "learning_rate": 8.718259803771025e-05, "loss": 2.672417640686035, "memory(GiB)": 72.85, "step": 27215, "token_acc": 0.48013245033112584, "train_speed(iter/s)": 0.669962 }, { "epoch": 1.166188252431344, "grad_norm": 4.1372857093811035, "learning_rate": 8.717809839711208e-05, "loss": 2.416579246520996, "memory(GiB)": 72.85, "step": 27220, "token_acc": 0.5031847133757962, "train_speed(iter/s)": 0.669937 }, { "epoch": 1.1664024677605929, "grad_norm": 4.452701568603516, "learning_rate": 8.717359808299781e-05, "loss": 2.514178657531738, "memory(GiB)": 72.85, "step": 27225, "token_acc": 0.4610951008645533, "train_speed(iter/s)": 0.669914 }, { "epoch": 1.166616683089842, "grad_norm": 4.8850016593933105, "learning_rate": 8.716909709544893e-05, "loss": 2.663093376159668, "memory(GiB)": 72.85, "step": 27230, "token_acc": 0.44106463878326996, "train_speed(iter/s)": 0.669923 }, { "epoch": 1.166830898419091, "grad_norm": 4.8331618309021, "learning_rate": 8.716459543454699e-05, "loss": 2.462297248840332, "memory(GiB)": 72.85, "step": 27235, "token_acc": 0.43853820598006643, "train_speed(iter/s)": 0.669912 }, { "epoch": 1.1670451137483397, "grad_norm": 3.791466236114502, "learning_rate": 8.716009310037353e-05, "loss": 2.418317985534668, "memory(GiB)": 72.85, "step": 27240, "token_acc": 0.5, "train_speed(iter/s)": 0.669925 }, { "epoch": 1.1672593290775888, "grad_norm": 8.569113731384277, "learning_rate": 8.715559009301013e-05, "loss": 2.2270252227783205, "memory(GiB)": 72.85, "step": 27245, "token_acc": 0.5392491467576792, "train_speed(iter/s)": 0.66994 }, { "epoch": 1.1674735444068378, "grad_norm": 3.59828782081604, "learning_rate": 8.715108641253835e-05, "loss": 2.511278533935547, "memory(GiB)": 72.85, "step": 27250, "token_acc": 0.49828178694158076, "train_speed(iter/s)": 0.669944 }, { "epoch": 1.1676877597360866, "grad_norm": 3.4054055213928223, "learning_rate": 8.714658205903981e-05, "loss": 2.1984331130981447, "memory(GiB)": 72.85, "step": 27255, "token_acc": 0.5054151624548736, "train_speed(iter/s)": 0.669942 }, { "epoch": 1.1679019750653357, "grad_norm": 4.10741662979126, "learning_rate": 8.714207703259608e-05, "loss": 2.4928855895996094, "memory(GiB)": 72.85, "step": 27260, "token_acc": 0.4925373134328358, "train_speed(iter/s)": 0.669926 }, { "epoch": 1.1681161903945847, "grad_norm": 3.602142810821533, "learning_rate": 8.713757133328879e-05, "loss": 2.369028663635254, "memory(GiB)": 72.85, "step": 27265, "token_acc": 0.512, "train_speed(iter/s)": 0.669928 }, { "epoch": 1.1683304057238335, "grad_norm": 4.973043918609619, "learning_rate": 8.713306496119955e-05, "loss": 2.6947357177734377, "memory(GiB)": 72.85, "step": 27270, "token_acc": 0.4816326530612245, "train_speed(iter/s)": 0.669955 }, { "epoch": 1.1685446210530825, "grad_norm": 4.035355091094971, "learning_rate": 8.712855791641e-05, "loss": 2.333383560180664, "memory(GiB)": 72.85, "step": 27275, "token_acc": 0.5131578947368421, "train_speed(iter/s)": 0.669969 }, { "epoch": 1.1687588363823316, "grad_norm": 3.7961413860321045, "learning_rate": 8.712405019900179e-05, "loss": 2.5604820251464844, "memory(GiB)": 72.85, "step": 27280, "token_acc": 0.4755700325732899, "train_speed(iter/s)": 0.669996 }, { "epoch": 1.1689730517115804, "grad_norm": 3.6220085620880127, "learning_rate": 8.711954180905661e-05, "loss": 2.6715023040771486, "memory(GiB)": 72.85, "step": 27285, "token_acc": 0.45161290322580644, "train_speed(iter/s)": 0.669987 }, { "epoch": 1.1691872670408294, "grad_norm": 3.3908824920654297, "learning_rate": 8.711503274665609e-05, "loss": 2.151406669616699, "memory(GiB)": 72.85, "step": 27290, "token_acc": 0.4984126984126984, "train_speed(iter/s)": 0.670002 }, { "epoch": 1.1694014823700785, "grad_norm": 4.577834606170654, "learning_rate": 8.711052301188193e-05, "loss": 2.6367244720458984, "memory(GiB)": 72.85, "step": 27295, "token_acc": 0.4624505928853755, "train_speed(iter/s)": 0.670011 }, { "epoch": 1.1696156976993273, "grad_norm": 3.788386821746826, "learning_rate": 8.710601260481583e-05, "loss": 2.299910545349121, "memory(GiB)": 72.85, "step": 27300, "token_acc": 0.5107913669064749, "train_speed(iter/s)": 0.670052 }, { "epoch": 1.1698299130285763, "grad_norm": 4.140405178070068, "learning_rate": 8.710150152553953e-05, "loss": 2.181583786010742, "memory(GiB)": 72.85, "step": 27305, "token_acc": 0.5378787878787878, "train_speed(iter/s)": 0.670059 }, { "epoch": 1.1700441283578253, "grad_norm": 3.36018443107605, "learning_rate": 8.70969897741347e-05, "loss": 2.178552436828613, "memory(GiB)": 72.85, "step": 27310, "token_acc": 0.5605095541401274, "train_speed(iter/s)": 0.670065 }, { "epoch": 1.1702583436870742, "grad_norm": 2.8964884281158447, "learning_rate": 8.709247735068311e-05, "loss": 2.5056800842285156, "memory(GiB)": 72.85, "step": 27315, "token_acc": 0.46932515337423314, "train_speed(iter/s)": 0.670075 }, { "epoch": 1.1704725590163232, "grad_norm": 3.609377861022949, "learning_rate": 8.708796425526651e-05, "loss": 2.359777069091797, "memory(GiB)": 72.85, "step": 27320, "token_acc": 0.47985347985347987, "train_speed(iter/s)": 0.670075 }, { "epoch": 1.1706867743455722, "grad_norm": 3.8143534660339355, "learning_rate": 8.708345048796664e-05, "loss": 2.474045181274414, "memory(GiB)": 72.85, "step": 27325, "token_acc": 0.4953560371517028, "train_speed(iter/s)": 0.670078 }, { "epoch": 1.170900989674821, "grad_norm": 3.2626445293426514, "learning_rate": 8.707893604886526e-05, "loss": 2.477455902099609, "memory(GiB)": 72.85, "step": 27330, "token_acc": 0.509375, "train_speed(iter/s)": 0.67006 }, { "epoch": 1.17111520500407, "grad_norm": 4.1317830085754395, "learning_rate": 8.707442093804419e-05, "loss": 2.069805908203125, "memory(GiB)": 72.85, "step": 27335, "token_acc": 0.5291479820627802, "train_speed(iter/s)": 0.670071 }, { "epoch": 1.171329420333319, "grad_norm": 4.274250030517578, "learning_rate": 8.70699051555852e-05, "loss": 2.357343292236328, "memory(GiB)": 72.85, "step": 27340, "token_acc": 0.49333333333333335, "train_speed(iter/s)": 0.670074 }, { "epoch": 1.171543635662568, "grad_norm": 4.49885892868042, "learning_rate": 8.706538870157011e-05, "loss": 2.4651630401611326, "memory(GiB)": 72.85, "step": 27345, "token_acc": 0.5234899328859061, "train_speed(iter/s)": 0.670075 }, { "epoch": 1.171757850991817, "grad_norm": 3.865506172180176, "learning_rate": 8.706087157608071e-05, "loss": 2.1966663360595704, "memory(GiB)": 72.85, "step": 27350, "token_acc": 0.5113636363636364, "train_speed(iter/s)": 0.670087 }, { "epoch": 1.171972066321066, "grad_norm": 3.758547067642212, "learning_rate": 8.705635377919888e-05, "loss": 2.6229389190673826, "memory(GiB)": 72.85, "step": 27355, "token_acc": 0.49324324324324326, "train_speed(iter/s)": 0.670102 }, { "epoch": 1.172186281650315, "grad_norm": 3.132476329803467, "learning_rate": 8.705183531100643e-05, "loss": 2.4095861434936525, "memory(GiB)": 72.85, "step": 27360, "token_acc": 0.4983922829581994, "train_speed(iter/s)": 0.670113 }, { "epoch": 1.1724004969795638, "grad_norm": 3.1497814655303955, "learning_rate": 8.704731617158522e-05, "loss": 2.201802444458008, "memory(GiB)": 72.85, "step": 27365, "token_acc": 0.5224358974358975, "train_speed(iter/s)": 0.670121 }, { "epoch": 1.1726147123088129, "grad_norm": 4.5457444190979, "learning_rate": 8.704279636101713e-05, "loss": 2.171800422668457, "memory(GiB)": 72.85, "step": 27370, "token_acc": 0.526984126984127, "train_speed(iter/s)": 0.670139 }, { "epoch": 1.172828927638062, "grad_norm": 3.6524858474731445, "learning_rate": 8.703827587938405e-05, "loss": 2.4175304412841796, "memory(GiB)": 72.85, "step": 27375, "token_acc": 0.4681528662420382, "train_speed(iter/s)": 0.67013 }, { "epoch": 1.1730431429673107, "grad_norm": 3.632796049118042, "learning_rate": 8.703375472676784e-05, "loss": 2.181477355957031, "memory(GiB)": 72.85, "step": 27380, "token_acc": 0.5, "train_speed(iter/s)": 0.670131 }, { "epoch": 1.1732573582965597, "grad_norm": 3.6593387126922607, "learning_rate": 8.702923290325042e-05, "loss": 2.470458984375, "memory(GiB)": 72.85, "step": 27385, "token_acc": 0.49174917491749176, "train_speed(iter/s)": 0.67016 }, { "epoch": 1.1734715736258088, "grad_norm": 4.214940547943115, "learning_rate": 8.702471040891371e-05, "loss": 2.5941898345947267, "memory(GiB)": 72.85, "step": 27390, "token_acc": 0.4560260586319218, "train_speed(iter/s)": 0.670182 }, { "epoch": 1.1736857889550576, "grad_norm": 3.3071820735931396, "learning_rate": 8.702018724383965e-05, "loss": 2.184556579589844, "memory(GiB)": 72.85, "step": 27395, "token_acc": 0.5224913494809689, "train_speed(iter/s)": 0.670216 }, { "epoch": 1.1739000042843066, "grad_norm": 4.689560890197754, "learning_rate": 8.701566340811018e-05, "loss": 2.3943531036376955, "memory(GiB)": 72.85, "step": 27400, "token_acc": 0.44876325088339225, "train_speed(iter/s)": 0.670233 }, { "epoch": 1.1741142196135557, "grad_norm": 3.9742846488952637, "learning_rate": 8.701113890180721e-05, "loss": 2.447538375854492, "memory(GiB)": 72.85, "step": 27405, "token_acc": 0.488135593220339, "train_speed(iter/s)": 0.67025 }, { "epoch": 1.1743284349428045, "grad_norm": 5.111000061035156, "learning_rate": 8.700661372501276e-05, "loss": 2.507090377807617, "memory(GiB)": 72.85, "step": 27410, "token_acc": 0.46831955922865015, "train_speed(iter/s)": 0.670269 }, { "epoch": 1.1745426502720535, "grad_norm": 4.769659042358398, "learning_rate": 8.70020878778088e-05, "loss": 2.337746429443359, "memory(GiB)": 72.85, "step": 27415, "token_acc": 0.5056179775280899, "train_speed(iter/s)": 0.670278 }, { "epoch": 1.1747568656013025, "grad_norm": 4.015910625457764, "learning_rate": 8.699756136027728e-05, "loss": 2.448190689086914, "memory(GiB)": 72.85, "step": 27420, "token_acc": 0.5097276264591439, "train_speed(iter/s)": 0.670279 }, { "epoch": 1.1749710809305514, "grad_norm": 3.363961935043335, "learning_rate": 8.699303417250024e-05, "loss": 2.117427444458008, "memory(GiB)": 72.85, "step": 27425, "token_acc": 0.5448028673835126, "train_speed(iter/s)": 0.670285 }, { "epoch": 1.1751852962598004, "grad_norm": 4.270669937133789, "learning_rate": 8.69885063145597e-05, "loss": 2.350942611694336, "memory(GiB)": 72.85, "step": 27430, "token_acc": 0.5181518151815182, "train_speed(iter/s)": 0.670303 }, { "epoch": 1.1753995115890494, "grad_norm": 3.818570613861084, "learning_rate": 8.698397778653767e-05, "loss": 2.2047702789306642, "memory(GiB)": 72.85, "step": 27435, "token_acc": 0.5353159851301115, "train_speed(iter/s)": 0.670329 }, { "epoch": 1.1756137269182982, "grad_norm": 4.346576690673828, "learning_rate": 8.697944858851617e-05, "loss": 2.528196334838867, "memory(GiB)": 72.85, "step": 27440, "token_acc": 0.516728624535316, "train_speed(iter/s)": 0.670336 }, { "epoch": 1.1758279422475473, "grad_norm": 3.8212740421295166, "learning_rate": 8.697491872057726e-05, "loss": 2.3633939743041994, "memory(GiB)": 72.85, "step": 27445, "token_acc": 0.47003154574132494, "train_speed(iter/s)": 0.670365 }, { "epoch": 1.1760421575767963, "grad_norm": 4.795933723449707, "learning_rate": 8.697038818280303e-05, "loss": 2.3514781951904298, "memory(GiB)": 72.85, "step": 27450, "token_acc": 0.5140845070422535, "train_speed(iter/s)": 0.670349 }, { "epoch": 1.1762563729060451, "grad_norm": 3.83178448677063, "learning_rate": 8.696585697527554e-05, "loss": 2.3725822448730467, "memory(GiB)": 72.85, "step": 27455, "token_acc": 0.5059523809523809, "train_speed(iter/s)": 0.670367 }, { "epoch": 1.1764705882352942, "grad_norm": 3.6923375129699707, "learning_rate": 8.696132509807686e-05, "loss": 2.412805366516113, "memory(GiB)": 72.85, "step": 27460, "token_acc": 0.4950166112956811, "train_speed(iter/s)": 0.670404 }, { "epoch": 1.1766848035645432, "grad_norm": 4.36692476272583, "learning_rate": 8.695679255128912e-05, "loss": 2.103376770019531, "memory(GiB)": 72.85, "step": 27465, "token_acc": 0.5789473684210527, "train_speed(iter/s)": 0.670424 }, { "epoch": 1.176899018893792, "grad_norm": 4.344459056854248, "learning_rate": 8.69522593349944e-05, "loss": 2.290055847167969, "memory(GiB)": 72.85, "step": 27470, "token_acc": 0.5160142348754448, "train_speed(iter/s)": 0.670452 }, { "epoch": 1.177113234223041, "grad_norm": 4.489073753356934, "learning_rate": 8.694772544927485e-05, "loss": 2.6815668106079102, "memory(GiB)": 72.85, "step": 27475, "token_acc": 0.4722222222222222, "train_speed(iter/s)": 0.670464 }, { "epoch": 1.17732744955229, "grad_norm": 3.3612074851989746, "learning_rate": 8.694319089421258e-05, "loss": 2.2265174865722654, "memory(GiB)": 72.85, "step": 27480, "token_acc": 0.5197132616487455, "train_speed(iter/s)": 0.670501 }, { "epoch": 1.1775416648815389, "grad_norm": 3.4575986862182617, "learning_rate": 8.693865566988977e-05, "loss": 2.2917015075683596, "memory(GiB)": 72.85, "step": 27485, "token_acc": 0.4981949458483754, "train_speed(iter/s)": 0.67049 }, { "epoch": 1.177755880210788, "grad_norm": 3.270615339279175, "learning_rate": 8.693411977638855e-05, "loss": 2.481495666503906, "memory(GiB)": 72.85, "step": 27490, "token_acc": 0.49693251533742333, "train_speed(iter/s)": 0.67049 }, { "epoch": 1.177970095540037, "grad_norm": 3.7881903648376465, "learning_rate": 8.69295832137911e-05, "loss": 2.514999008178711, "memory(GiB)": 72.85, "step": 27495, "token_acc": 0.48024316109422494, "train_speed(iter/s)": 0.670525 }, { "epoch": 1.1781843108692858, "grad_norm": 3.903610944747925, "learning_rate": 8.692504598217961e-05, "loss": 2.3165460586547852, "memory(GiB)": 72.85, "step": 27500, "token_acc": 0.49645390070921985, "train_speed(iter/s)": 0.670513 }, { "epoch": 1.1781843108692858, "eval_loss": 2.2808847427368164, "eval_runtime": 15.6917, "eval_samples_per_second": 6.373, "eval_steps_per_second": 6.373, "eval_token_acc": 0.5006605019815059, "step": 27500 }, { "epoch": 1.1783985261985348, "grad_norm": 3.8121471405029297, "learning_rate": 8.692050808163628e-05, "loss": 2.0827075958251955, "memory(GiB)": 72.85, "step": 27505, "token_acc": 0.5095095095095095, "train_speed(iter/s)": 0.670202 }, { "epoch": 1.1786127415277838, "grad_norm": 3.175058603286743, "learning_rate": 8.69159695122433e-05, "loss": 1.9568635940551757, "memory(GiB)": 72.85, "step": 27510, "token_acc": 0.6098484848484849, "train_speed(iter/s)": 0.670187 }, { "epoch": 1.1788269568570326, "grad_norm": 3.4452195167541504, "learning_rate": 8.69114302740829e-05, "loss": 2.090604782104492, "memory(GiB)": 72.85, "step": 27515, "token_acc": 0.5107033639143731, "train_speed(iter/s)": 0.670195 }, { "epoch": 1.1790411721862817, "grad_norm": 4.79313325881958, "learning_rate": 8.690689036723733e-05, "loss": 2.535116767883301, "memory(GiB)": 72.85, "step": 27520, "token_acc": 0.4768392370572207, "train_speed(iter/s)": 0.670213 }, { "epoch": 1.1792553875155307, "grad_norm": 4.354017734527588, "learning_rate": 8.690234979178881e-05, "loss": 2.0885543823242188, "memory(GiB)": 72.85, "step": 27525, "token_acc": 0.5472972972972973, "train_speed(iter/s)": 0.670232 }, { "epoch": 1.1794696028447795, "grad_norm": 4.1392107009887695, "learning_rate": 8.689780854781962e-05, "loss": 2.0856643676757813, "memory(GiB)": 72.85, "step": 27530, "token_acc": 0.5102880658436214, "train_speed(iter/s)": 0.670242 }, { "epoch": 1.1796838181740286, "grad_norm": 4.754573822021484, "learning_rate": 8.6893266635412e-05, "loss": 2.394754409790039, "memory(GiB)": 72.85, "step": 27535, "token_acc": 0.5360824742268041, "train_speed(iter/s)": 0.670259 }, { "epoch": 1.1798980335032776, "grad_norm": 4.426087856292725, "learning_rate": 8.688872405464825e-05, "loss": 2.3830005645751955, "memory(GiB)": 72.85, "step": 27540, "token_acc": 0.4944649446494465, "train_speed(iter/s)": 0.670248 }, { "epoch": 1.1801122488325264, "grad_norm": 5.511617660522461, "learning_rate": 8.688418080561067e-05, "loss": 2.4839456558227537, "memory(GiB)": 72.85, "step": 27545, "token_acc": 0.4803921568627451, "train_speed(iter/s)": 0.670264 }, { "epoch": 1.1803264641617754, "grad_norm": 4.3455281257629395, "learning_rate": 8.687963688838154e-05, "loss": 2.0645473480224608, "memory(GiB)": 72.85, "step": 27550, "token_acc": 0.5335689045936396, "train_speed(iter/s)": 0.670288 }, { "epoch": 1.1805406794910245, "grad_norm": 4.101743698120117, "learning_rate": 8.687509230304319e-05, "loss": 2.343110466003418, "memory(GiB)": 72.85, "step": 27555, "token_acc": 0.49504950495049505, "train_speed(iter/s)": 0.670284 }, { "epoch": 1.1807548948202733, "grad_norm": 3.188279628753662, "learning_rate": 8.687054704967796e-05, "loss": 2.1907222747802733, "memory(GiB)": 72.85, "step": 27560, "token_acc": 0.5133079847908745, "train_speed(iter/s)": 0.670266 }, { "epoch": 1.1809691101495223, "grad_norm": 2.9696385860443115, "learning_rate": 8.686600112836818e-05, "loss": 2.634583282470703, "memory(GiB)": 72.85, "step": 27565, "token_acc": 0.4567901234567901, "train_speed(iter/s)": 0.670283 }, { "epoch": 1.1811833254787714, "grad_norm": 3.436209201812744, "learning_rate": 8.68614545391962e-05, "loss": 2.405536651611328, "memory(GiB)": 72.85, "step": 27570, "token_acc": 0.4819672131147541, "train_speed(iter/s)": 0.670303 }, { "epoch": 1.1813975408080202, "grad_norm": 3.917691230773926, "learning_rate": 8.68569072822444e-05, "loss": 2.3823652267456055, "memory(GiB)": 72.85, "step": 27575, "token_acc": 0.4750733137829912, "train_speed(iter/s)": 0.670295 }, { "epoch": 1.1816117561372692, "grad_norm": 6.308925151824951, "learning_rate": 8.685235935759516e-05, "loss": 2.3037353515625, "memory(GiB)": 72.85, "step": 27580, "token_acc": 0.5166051660516605, "train_speed(iter/s)": 0.670277 }, { "epoch": 1.1818259714665182, "grad_norm": 3.857243537902832, "learning_rate": 8.684781076533085e-05, "loss": 2.156229019165039, "memory(GiB)": 72.85, "step": 27585, "token_acc": 0.5419354838709678, "train_speed(iter/s)": 0.670254 }, { "epoch": 1.182040186795767, "grad_norm": 4.173947334289551, "learning_rate": 8.684326150553388e-05, "loss": 2.3343742370605467, "memory(GiB)": 72.85, "step": 27590, "token_acc": 0.48863636363636365, "train_speed(iter/s)": 0.670244 }, { "epoch": 1.182254402125016, "grad_norm": 3.786839246749878, "learning_rate": 8.683871157828668e-05, "loss": 2.3112552642822264, "memory(GiB)": 72.85, "step": 27595, "token_acc": 0.49498327759197325, "train_speed(iter/s)": 0.670252 }, { "epoch": 1.1824686174542651, "grad_norm": 3.9805896282196045, "learning_rate": 8.683416098367163e-05, "loss": 2.390167808532715, "memory(GiB)": 72.85, "step": 27600, "token_acc": 0.504950495049505, "train_speed(iter/s)": 0.670266 }, { "epoch": 1.182682832783514, "grad_norm": 3.8141531944274902, "learning_rate": 8.682960972177123e-05, "loss": 2.508990478515625, "memory(GiB)": 72.85, "step": 27605, "token_acc": 0.5111111111111111, "train_speed(iter/s)": 0.67027 }, { "epoch": 1.182897048112763, "grad_norm": 13.80804443359375, "learning_rate": 8.682505779266789e-05, "loss": 2.043051910400391, "memory(GiB)": 72.85, "step": 27610, "token_acc": 0.5477178423236515, "train_speed(iter/s)": 0.670287 }, { "epoch": 1.183111263442012, "grad_norm": 4.341646671295166, "learning_rate": 8.682050519644408e-05, "loss": 2.555972099304199, "memory(GiB)": 72.85, "step": 27615, "token_acc": 0.49477351916376305, "train_speed(iter/s)": 0.670306 }, { "epoch": 1.1833254787712608, "grad_norm": 4.988469123840332, "learning_rate": 8.681595193318228e-05, "loss": 2.2078052520751954, "memory(GiB)": 72.85, "step": 27620, "token_acc": 0.5121107266435986, "train_speed(iter/s)": 0.670288 }, { "epoch": 1.1835396941005099, "grad_norm": 5.198513507843018, "learning_rate": 8.681139800296498e-05, "loss": 2.2249475479125977, "memory(GiB)": 72.85, "step": 27625, "token_acc": 0.5117056856187291, "train_speed(iter/s)": 0.670298 }, { "epoch": 1.1837539094297589, "grad_norm": 4.24138879776001, "learning_rate": 8.680684340587468e-05, "loss": 2.4571937561035155, "memory(GiB)": 72.85, "step": 27630, "token_acc": 0.4936708860759494, "train_speed(iter/s)": 0.67028 }, { "epoch": 1.1839681247590077, "grad_norm": 3.372159242630005, "learning_rate": 8.680228814199387e-05, "loss": 2.11147403717041, "memory(GiB)": 72.85, "step": 27635, "token_acc": 0.5084745762711864, "train_speed(iter/s)": 0.670261 }, { "epoch": 1.1841823400882567, "grad_norm": 3.8112213611602783, "learning_rate": 8.679773221140509e-05, "loss": 2.218613052368164, "memory(GiB)": 72.85, "step": 27640, "token_acc": 0.5160349854227405, "train_speed(iter/s)": 0.670279 }, { "epoch": 1.1843965554175058, "grad_norm": 4.9857635498046875, "learning_rate": 8.679317561419087e-05, "loss": 2.2750343322753905, "memory(GiB)": 72.85, "step": 27645, "token_acc": 0.548, "train_speed(iter/s)": 0.670248 }, { "epoch": 1.1846107707467546, "grad_norm": 4.311339378356934, "learning_rate": 8.678861835043377e-05, "loss": 2.3798107147216796, "memory(GiB)": 72.85, "step": 27650, "token_acc": 0.5076452599388379, "train_speed(iter/s)": 0.670206 }, { "epoch": 1.1848249860760036, "grad_norm": 4.33917760848999, "learning_rate": 8.678406042021632e-05, "loss": 2.719565200805664, "memory(GiB)": 72.85, "step": 27655, "token_acc": 0.47038327526132406, "train_speed(iter/s)": 0.670238 }, { "epoch": 1.1850392014052527, "grad_norm": 5.801537990570068, "learning_rate": 8.677950182362114e-05, "loss": 2.338563346862793, "memory(GiB)": 72.85, "step": 27660, "token_acc": 0.5135135135135135, "train_speed(iter/s)": 0.670227 }, { "epoch": 1.1852534167345015, "grad_norm": 3.8859667778015137, "learning_rate": 8.677494256073075e-05, "loss": 2.409520149230957, "memory(GiB)": 72.85, "step": 27665, "token_acc": 0.5015576323987538, "train_speed(iter/s)": 0.670247 }, { "epoch": 1.1854676320637505, "grad_norm": 2.8029215335845947, "learning_rate": 8.677038263162778e-05, "loss": 2.476744842529297, "memory(GiB)": 72.85, "step": 27670, "token_acc": 0.4699140401146132, "train_speed(iter/s)": 0.670279 }, { "epoch": 1.1856818473929995, "grad_norm": 4.452514171600342, "learning_rate": 8.676582203639485e-05, "loss": 2.2926956176757813, "memory(GiB)": 72.85, "step": 27675, "token_acc": 0.49517684887459806, "train_speed(iter/s)": 0.670292 }, { "epoch": 1.1858960627222483, "grad_norm": 4.721117973327637, "learning_rate": 8.676126077511456e-05, "loss": 2.439519691467285, "memory(GiB)": 72.85, "step": 27680, "token_acc": 0.4426229508196721, "train_speed(iter/s)": 0.670282 }, { "epoch": 1.1861102780514974, "grad_norm": 4.83423376083374, "learning_rate": 8.675669884786954e-05, "loss": 2.4839927673339846, "memory(GiB)": 72.85, "step": 27685, "token_acc": 0.4746376811594203, "train_speed(iter/s)": 0.670301 }, { "epoch": 1.1863244933807464, "grad_norm": 4.062659740447998, "learning_rate": 8.675213625474246e-05, "loss": 2.4119745254516602, "memory(GiB)": 72.85, "step": 27690, "token_acc": 0.4731182795698925, "train_speed(iter/s)": 0.670306 }, { "epoch": 1.1865387087099952, "grad_norm": 3.25185489654541, "learning_rate": 8.674757299581594e-05, "loss": 2.405831527709961, "memory(GiB)": 72.85, "step": 27695, "token_acc": 0.49508196721311476, "train_speed(iter/s)": 0.670314 }, { "epoch": 1.1867529240392443, "grad_norm": 4.839440822601318, "learning_rate": 8.674300907117265e-05, "loss": 2.572952651977539, "memory(GiB)": 72.85, "step": 27700, "token_acc": 0.43911439114391143, "train_speed(iter/s)": 0.670313 }, { "epoch": 1.1869671393684933, "grad_norm": 5.007638454437256, "learning_rate": 8.67384444808953e-05, "loss": 2.2927852630615235, "memory(GiB)": 72.85, "step": 27705, "token_acc": 0.5189504373177842, "train_speed(iter/s)": 0.670302 }, { "epoch": 1.187181354697742, "grad_norm": 4.303777694702148, "learning_rate": 8.673387922506657e-05, "loss": 2.4305404663085937, "memory(GiB)": 72.85, "step": 27710, "token_acc": 0.5070422535211268, "train_speed(iter/s)": 0.670297 }, { "epoch": 1.1873955700269911, "grad_norm": 3.5994925498962402, "learning_rate": 8.672931330376916e-05, "loss": 2.149345588684082, "memory(GiB)": 72.85, "step": 27715, "token_acc": 0.5108225108225108, "train_speed(iter/s)": 0.670294 }, { "epoch": 1.1876097853562402, "grad_norm": 3.7648138999938965, "learning_rate": 8.672474671708577e-05, "loss": 2.299795722961426, "memory(GiB)": 72.85, "step": 27720, "token_acc": 0.5153374233128835, "train_speed(iter/s)": 0.670312 }, { "epoch": 1.187824000685489, "grad_norm": 4.393477439880371, "learning_rate": 8.672017946509914e-05, "loss": 2.403790283203125, "memory(GiB)": 72.85, "step": 27725, "token_acc": 0.4925925925925926, "train_speed(iter/s)": 0.670322 }, { "epoch": 1.188038216014738, "grad_norm": 3.983936071395874, "learning_rate": 8.671561154789202e-05, "loss": 2.4598955154418944, "memory(GiB)": 72.85, "step": 27730, "token_acc": 0.46075085324232085, "train_speed(iter/s)": 0.670356 }, { "epoch": 1.188252431343987, "grad_norm": 6.950322151184082, "learning_rate": 8.671104296554715e-05, "loss": 2.2377962112426757, "memory(GiB)": 72.85, "step": 27735, "token_acc": 0.5057915057915058, "train_speed(iter/s)": 0.670361 }, { "epoch": 1.1884666466732359, "grad_norm": 5.574502944946289, "learning_rate": 8.670647371814732e-05, "loss": 2.2417783737182617, "memory(GiB)": 72.85, "step": 27740, "token_acc": 0.509090909090909, "train_speed(iter/s)": 0.670381 }, { "epoch": 1.188680862002485, "grad_norm": 4.279617786407471, "learning_rate": 8.670190380577527e-05, "loss": 2.439286994934082, "memory(GiB)": 72.85, "step": 27745, "token_acc": 0.4645390070921986, "train_speed(iter/s)": 0.670394 }, { "epoch": 1.188895077331734, "grad_norm": 5.635440349578857, "learning_rate": 8.669733322851379e-05, "loss": 2.2688793182373046, "memory(GiB)": 72.85, "step": 27750, "token_acc": 0.502127659574468, "train_speed(iter/s)": 0.670392 }, { "epoch": 1.1891092926609828, "grad_norm": 4.209367752075195, "learning_rate": 8.669276198644572e-05, "loss": 2.420352745056152, "memory(GiB)": 72.85, "step": 27755, "token_acc": 0.4983277591973244, "train_speed(iter/s)": 0.67038 }, { "epoch": 1.1893235079902318, "grad_norm": 2.9555163383483887, "learning_rate": 8.668819007965383e-05, "loss": 2.334872817993164, "memory(GiB)": 72.85, "step": 27760, "token_acc": 0.48717948717948717, "train_speed(iter/s)": 0.670394 }, { "epoch": 1.1895377233194808, "grad_norm": 4.223445892333984, "learning_rate": 8.668361750822096e-05, "loss": 2.3053064346313477, "memory(GiB)": 72.85, "step": 27765, "token_acc": 0.4542124542124542, "train_speed(iter/s)": 0.670416 }, { "epoch": 1.1897519386487296, "grad_norm": 4.389516830444336, "learning_rate": 8.667904427222995e-05, "loss": 2.3260082244873046, "memory(GiB)": 72.85, "step": 27770, "token_acc": 0.5376712328767124, "train_speed(iter/s)": 0.670455 }, { "epoch": 1.1899661539779787, "grad_norm": 3.938511848449707, "learning_rate": 8.667447037176364e-05, "loss": 2.2631809234619142, "memory(GiB)": 72.85, "step": 27775, "token_acc": 0.5173611111111112, "train_speed(iter/s)": 0.670447 }, { "epoch": 1.1901803693072277, "grad_norm": 5.442898750305176, "learning_rate": 8.66698958069049e-05, "loss": 2.276002311706543, "memory(GiB)": 72.85, "step": 27780, "token_acc": 0.4892086330935252, "train_speed(iter/s)": 0.670456 }, { "epoch": 1.1903945846364765, "grad_norm": 3.9378907680511475, "learning_rate": 8.666532057773661e-05, "loss": 2.477119255065918, "memory(GiB)": 72.85, "step": 27785, "token_acc": 0.4574468085106383, "train_speed(iter/s)": 0.670467 }, { "epoch": 1.1906087999657256, "grad_norm": 3.6060783863067627, "learning_rate": 8.666074468434162e-05, "loss": 2.457790565490723, "memory(GiB)": 72.85, "step": 27790, "token_acc": 0.48936170212765956, "train_speed(iter/s)": 0.67048 }, { "epoch": 1.1908230152949746, "grad_norm": 6.1958513259887695, "learning_rate": 8.665616812680288e-05, "loss": 2.353824424743652, "memory(GiB)": 72.85, "step": 27795, "token_acc": 0.5, "train_speed(iter/s)": 0.670487 }, { "epoch": 1.1910372306242234, "grad_norm": 3.5281872749328613, "learning_rate": 8.665159090520323e-05, "loss": 2.6472091674804688, "memory(GiB)": 72.85, "step": 27800, "token_acc": 0.447887323943662, "train_speed(iter/s)": 0.670514 }, { "epoch": 1.1912514459534724, "grad_norm": 4.591470241546631, "learning_rate": 8.664701301962566e-05, "loss": 2.648457145690918, "memory(GiB)": 72.85, "step": 27805, "token_acc": 0.44144144144144143, "train_speed(iter/s)": 0.670528 }, { "epoch": 1.1914656612827215, "grad_norm": 6.698062896728516, "learning_rate": 8.664243447015305e-05, "loss": 2.2061386108398438, "memory(GiB)": 72.85, "step": 27810, "token_acc": 0.5220125786163522, "train_speed(iter/s)": 0.670528 }, { "epoch": 1.1916798766119703, "grad_norm": 5.134876728057861, "learning_rate": 8.663785525686838e-05, "loss": 2.5847684860229494, "memory(GiB)": 72.85, "step": 27815, "token_acc": 0.5160142348754448, "train_speed(iter/s)": 0.670519 }, { "epoch": 1.1918940919412193, "grad_norm": 3.633690595626831, "learning_rate": 8.663327537985459e-05, "loss": 2.068899726867676, "memory(GiB)": 72.85, "step": 27820, "token_acc": 0.5236486486486487, "train_speed(iter/s)": 0.670498 }, { "epoch": 1.1921083072704683, "grad_norm": 4.592554092407227, "learning_rate": 8.662869483919466e-05, "loss": 2.582809257507324, "memory(GiB)": 72.85, "step": 27825, "token_acc": 0.4607142857142857, "train_speed(iter/s)": 0.670511 }, { "epoch": 1.1923225225997172, "grad_norm": 3.208221435546875, "learning_rate": 8.662411363497155e-05, "loss": 2.3070871353149416, "memory(GiB)": 72.85, "step": 27830, "token_acc": 0.4533333333333333, "train_speed(iter/s)": 0.670502 }, { "epoch": 1.1925367379289662, "grad_norm": 5.269526481628418, "learning_rate": 8.661953176726827e-05, "loss": 2.471891403198242, "memory(GiB)": 72.85, "step": 27835, "token_acc": 0.4395973154362416, "train_speed(iter/s)": 0.670521 }, { "epoch": 1.1927509532582152, "grad_norm": 4.909334659576416, "learning_rate": 8.661494923616783e-05, "loss": 2.091400909423828, "memory(GiB)": 72.85, "step": 27840, "token_acc": 0.5481481481481482, "train_speed(iter/s)": 0.670527 }, { "epoch": 1.192965168587464, "grad_norm": 5.212004661560059, "learning_rate": 8.661036604175321e-05, "loss": 2.1863071441650392, "memory(GiB)": 72.85, "step": 27845, "token_acc": 0.5198675496688742, "train_speed(iter/s)": 0.670499 }, { "epoch": 1.193179383916713, "grad_norm": 4.7052741050720215, "learning_rate": 8.660578218410748e-05, "loss": 2.742046356201172, "memory(GiB)": 72.85, "step": 27850, "token_acc": 0.46885245901639344, "train_speed(iter/s)": 0.670501 }, { "epoch": 1.1933935992459621, "grad_norm": 3.581967353820801, "learning_rate": 8.660119766331367e-05, "loss": 2.2494085311889647, "memory(GiB)": 72.85, "step": 27855, "token_acc": 0.5285171102661597, "train_speed(iter/s)": 0.670514 }, { "epoch": 1.193607814575211, "grad_norm": 4.448185920715332, "learning_rate": 8.659661247945483e-05, "loss": 2.3912384033203127, "memory(GiB)": 72.85, "step": 27860, "token_acc": 0.4862068965517241, "train_speed(iter/s)": 0.670534 }, { "epoch": 1.19382202990446, "grad_norm": 5.2140679359436035, "learning_rate": 8.659202663261402e-05, "loss": 2.3539426803588865, "memory(GiB)": 72.85, "step": 27865, "token_acc": 0.49264705882352944, "train_speed(iter/s)": 0.670525 }, { "epoch": 1.194036245233709, "grad_norm": 3.675539970397949, "learning_rate": 8.658744012287433e-05, "loss": 2.52828311920166, "memory(GiB)": 72.85, "step": 27870, "token_acc": 0.484251968503937, "train_speed(iter/s)": 0.670534 }, { "epoch": 1.1942504605629578, "grad_norm": 4.223853588104248, "learning_rate": 8.658285295031883e-05, "loss": 2.4470943450927733, "memory(GiB)": 72.85, "step": 27875, "token_acc": 0.5208333333333334, "train_speed(iter/s)": 0.670536 }, { "epoch": 1.1944646758922068, "grad_norm": 5.8321003913879395, "learning_rate": 8.657826511503065e-05, "loss": 2.4698081970214845, "memory(GiB)": 72.85, "step": 27880, "token_acc": 0.4789915966386555, "train_speed(iter/s)": 0.670525 }, { "epoch": 1.1946788912214559, "grad_norm": 4.0198211669921875, "learning_rate": 8.657367661709285e-05, "loss": 2.4696455001831055, "memory(GiB)": 72.85, "step": 27885, "token_acc": 0.4525316455696203, "train_speed(iter/s)": 0.670542 }, { "epoch": 1.1948931065507047, "grad_norm": 4.235238075256348, "learning_rate": 8.656908745658862e-05, "loss": 2.7236560821533202, "memory(GiB)": 72.85, "step": 27890, "token_acc": 0.47388059701492535, "train_speed(iter/s)": 0.67057 }, { "epoch": 1.1951073218799537, "grad_norm": 3.7000067234039307, "learning_rate": 8.656449763360105e-05, "loss": 2.3399600982666016, "memory(GiB)": 72.85, "step": 27895, "token_acc": 0.46827794561933533, "train_speed(iter/s)": 0.670605 }, { "epoch": 1.1953215372092028, "grad_norm": 3.9515883922576904, "learning_rate": 8.655990714821331e-05, "loss": 2.355699157714844, "memory(GiB)": 72.85, "step": 27900, "token_acc": 0.47703180212014135, "train_speed(iter/s)": 0.670617 }, { "epoch": 1.1955357525384516, "grad_norm": 3.7320055961608887, "learning_rate": 8.655531600050856e-05, "loss": 2.4788957595825196, "memory(GiB)": 72.85, "step": 27905, "token_acc": 0.4690909090909091, "train_speed(iter/s)": 0.670617 }, { "epoch": 1.1957499678677006, "grad_norm": 3.5896313190460205, "learning_rate": 8.655072419056997e-05, "loss": 2.4285953521728514, "memory(GiB)": 72.85, "step": 27910, "token_acc": 0.5273311897106109, "train_speed(iter/s)": 0.670606 }, { "epoch": 1.1959641831969496, "grad_norm": 4.304381847381592, "learning_rate": 8.654613171848072e-05, "loss": 2.213099479675293, "memory(GiB)": 72.85, "step": 27915, "token_acc": 0.5447154471544715, "train_speed(iter/s)": 0.670593 }, { "epoch": 1.1961783985261984, "grad_norm": 4.121492862701416, "learning_rate": 8.654153858432401e-05, "loss": 2.310099792480469, "memory(GiB)": 72.85, "step": 27920, "token_acc": 0.4983922829581994, "train_speed(iter/s)": 0.670574 }, { "epoch": 1.1963926138554475, "grad_norm": 3.770120143890381, "learning_rate": 8.653694478818304e-05, "loss": 2.192479705810547, "memory(GiB)": 72.85, "step": 27925, "token_acc": 0.5176991150442478, "train_speed(iter/s)": 0.670566 }, { "epoch": 1.1966068291846965, "grad_norm": 4.603169918060303, "learning_rate": 8.653235033014105e-05, "loss": 2.473994255065918, "memory(GiB)": 72.85, "step": 27930, "token_acc": 0.4876325088339223, "train_speed(iter/s)": 0.670568 }, { "epoch": 1.1968210445139453, "grad_norm": 5.339007377624512, "learning_rate": 8.652775521028127e-05, "loss": 2.1877832412719727, "memory(GiB)": 72.85, "step": 27935, "token_acc": 0.5207547169811321, "train_speed(iter/s)": 0.670555 }, { "epoch": 1.1970352598431944, "grad_norm": 4.2233405113220215, "learning_rate": 8.652315942868694e-05, "loss": 2.230115509033203, "memory(GiB)": 72.85, "step": 27940, "token_acc": 0.5183946488294314, "train_speed(iter/s)": 0.670554 }, { "epoch": 1.1972494751724434, "grad_norm": 3.8436529636383057, "learning_rate": 8.65185629854413e-05, "loss": 2.437627410888672, "memory(GiB)": 72.85, "step": 27945, "token_acc": 0.4852459016393443, "train_speed(iter/s)": 0.670572 }, { "epoch": 1.1974636905016922, "grad_norm": 3.101160764694214, "learning_rate": 8.651396588062764e-05, "loss": 2.3686573028564455, "memory(GiB)": 72.85, "step": 27950, "token_acc": 0.512987012987013, "train_speed(iter/s)": 0.670585 }, { "epoch": 1.1976779058309412, "grad_norm": 3.4025449752807617, "learning_rate": 8.650936811432924e-05, "loss": 2.3976078033447266, "memory(GiB)": 72.85, "step": 27955, "token_acc": 0.46875, "train_speed(iter/s)": 0.67057 }, { "epoch": 1.1978921211601903, "grad_norm": 4.893815517425537, "learning_rate": 8.65047696866294e-05, "loss": 2.5285614013671873, "memory(GiB)": 72.85, "step": 27960, "token_acc": 0.45017182130584193, "train_speed(iter/s)": 0.670576 }, { "epoch": 1.198106336489439, "grad_norm": 4.218569755554199, "learning_rate": 8.65001705976114e-05, "loss": 2.2210880279541017, "memory(GiB)": 72.85, "step": 27965, "token_acc": 0.5141065830721003, "train_speed(iter/s)": 0.670595 }, { "epoch": 1.1983205518186881, "grad_norm": 4.31313943862915, "learning_rate": 8.649557084735855e-05, "loss": 2.343202590942383, "memory(GiB)": 72.85, "step": 27970, "token_acc": 0.49603174603174605, "train_speed(iter/s)": 0.670612 }, { "epoch": 1.1985347671479372, "grad_norm": 5.489354133605957, "learning_rate": 8.649097043595422e-05, "loss": 2.665810966491699, "memory(GiB)": 72.85, "step": 27975, "token_acc": 0.479108635097493, "train_speed(iter/s)": 0.67063 }, { "epoch": 1.198748982477186, "grad_norm": 4.300017833709717, "learning_rate": 8.648636936348173e-05, "loss": 2.2822196960449217, "memory(GiB)": 72.85, "step": 27980, "token_acc": 0.5176848874598071, "train_speed(iter/s)": 0.670637 }, { "epoch": 1.198963197806435, "grad_norm": 3.5966711044311523, "learning_rate": 8.648176763002442e-05, "loss": 2.2487646102905274, "memory(GiB)": 72.85, "step": 27985, "token_acc": 0.48494983277591974, "train_speed(iter/s)": 0.670645 }, { "epoch": 1.199177413135684, "grad_norm": 6.134557723999023, "learning_rate": 8.647716523566569e-05, "loss": 2.3710987091064455, "memory(GiB)": 72.85, "step": 27990, "token_acc": 0.5055762081784386, "train_speed(iter/s)": 0.670649 }, { "epoch": 1.1993916284649329, "grad_norm": 4.552657127380371, "learning_rate": 8.647256218048887e-05, "loss": 2.0804319381713867, "memory(GiB)": 72.85, "step": 27995, "token_acc": 0.5396825396825397, "train_speed(iter/s)": 0.670668 }, { "epoch": 1.199605843794182, "grad_norm": 5.178404331207275, "learning_rate": 8.646795846457738e-05, "loss": 2.3424989700317385, "memory(GiB)": 72.85, "step": 28000, "token_acc": 0.45751633986928103, "train_speed(iter/s)": 0.67065 }, { "epoch": 1.199605843794182, "eval_loss": 2.0177793502807617, "eval_runtime": 16.02, "eval_samples_per_second": 6.242, "eval_steps_per_second": 6.242, "eval_token_acc": 0.5006321112515802, "step": 28000 }, { "epoch": 1.199820059123431, "grad_norm": 5.397165298461914, "learning_rate": 8.646335408801461e-05, "loss": 2.4737899780273436, "memory(GiB)": 72.85, "step": 28005, "token_acc": 0.4946445959104187, "train_speed(iter/s)": 0.670362 }, { "epoch": 1.2000342744526797, "grad_norm": 4.680639743804932, "learning_rate": 8.645874905088399e-05, "loss": 2.144757080078125, "memory(GiB)": 72.85, "step": 28010, "token_acc": 0.5156794425087108, "train_speed(iter/s)": 0.670374 }, { "epoch": 1.2002484897819288, "grad_norm": 5.658175945281982, "learning_rate": 8.645414335326891e-05, "loss": 2.296906661987305, "memory(GiB)": 72.85, "step": 28015, "token_acc": 0.5109717868338558, "train_speed(iter/s)": 0.670386 }, { "epoch": 1.2004627051111778, "grad_norm": 4.207187175750732, "learning_rate": 8.644953699525284e-05, "loss": 2.3285144805908202, "memory(GiB)": 72.85, "step": 28020, "token_acc": 0.46229508196721314, "train_speed(iter/s)": 0.670398 }, { "epoch": 1.2006769204404266, "grad_norm": 3.9525723457336426, "learning_rate": 8.64449299769192e-05, "loss": 2.0825580596923827, "memory(GiB)": 72.85, "step": 28025, "token_acc": 0.543859649122807, "train_speed(iter/s)": 0.670383 }, { "epoch": 1.2008911357696757, "grad_norm": 3.9885237216949463, "learning_rate": 8.644032229835148e-05, "loss": 2.094126892089844, "memory(GiB)": 72.85, "step": 28030, "token_acc": 0.5441696113074205, "train_speed(iter/s)": 0.670348 }, { "epoch": 1.2011053510989247, "grad_norm": 4.447451591491699, "learning_rate": 8.643571395963314e-05, "loss": 3.0409122467041017, "memory(GiB)": 72.85, "step": 28035, "token_acc": 0.42485549132947975, "train_speed(iter/s)": 0.670365 }, { "epoch": 1.2013195664281735, "grad_norm": 3.9744951725006104, "learning_rate": 8.643110496084766e-05, "loss": 2.4499244689941406, "memory(GiB)": 72.85, "step": 28040, "token_acc": 0.5261627906976745, "train_speed(iter/s)": 0.670381 }, { "epoch": 1.2015337817574225, "grad_norm": 3.966182231903076, "learning_rate": 8.642649530207853e-05, "loss": 2.5556941986083985, "memory(GiB)": 72.85, "step": 28045, "token_acc": 0.5, "train_speed(iter/s)": 0.670386 }, { "epoch": 1.2017479970866716, "grad_norm": 3.403247833251953, "learning_rate": 8.642188498340926e-05, "loss": 2.122970771789551, "memory(GiB)": 72.85, "step": 28050, "token_acc": 0.5518518518518518, "train_speed(iter/s)": 0.670368 }, { "epoch": 1.2019622124159204, "grad_norm": 3.564847707748413, "learning_rate": 8.641727400492339e-05, "loss": 2.334202194213867, "memory(GiB)": 72.85, "step": 28055, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.670363 }, { "epoch": 1.2021764277451694, "grad_norm": 6.044728755950928, "learning_rate": 8.641266236670444e-05, "loss": 2.477732849121094, "memory(GiB)": 72.85, "step": 28060, "token_acc": 0.47555555555555556, "train_speed(iter/s)": 0.670365 }, { "epoch": 1.2023906430744185, "grad_norm": 3.501648187637329, "learning_rate": 8.640805006883595e-05, "loss": 2.385022735595703, "memory(GiB)": 72.85, "step": 28065, "token_acc": 0.4682926829268293, "train_speed(iter/s)": 0.670391 }, { "epoch": 1.2026048584036673, "grad_norm": 3.914902925491333, "learning_rate": 8.640343711140147e-05, "loss": 2.199579048156738, "memory(GiB)": 72.85, "step": 28070, "token_acc": 0.5160142348754448, "train_speed(iter/s)": 0.670379 }, { "epoch": 1.2028190737329163, "grad_norm": 4.124277114868164, "learning_rate": 8.63988234944846e-05, "loss": 2.4772634506225586, "memory(GiB)": 72.85, "step": 28075, "token_acc": 0.42618384401114207, "train_speed(iter/s)": 0.670373 }, { "epoch": 1.2030332890621653, "grad_norm": 4.611649036407471, "learning_rate": 8.639420921816887e-05, "loss": 2.5292587280273438, "memory(GiB)": 72.85, "step": 28080, "token_acc": 0.4912891986062718, "train_speed(iter/s)": 0.670363 }, { "epoch": 1.2032475043914141, "grad_norm": 3.301135540008545, "learning_rate": 8.638959428253792e-05, "loss": 2.342416000366211, "memory(GiB)": 72.85, "step": 28085, "token_acc": 0.4838709677419355, "train_speed(iter/s)": 0.670373 }, { "epoch": 1.2034617197206632, "grad_norm": 3.8333656787872314, "learning_rate": 8.638497868767533e-05, "loss": 2.0318078994750977, "memory(GiB)": 72.85, "step": 28090, "token_acc": 0.5608856088560885, "train_speed(iter/s)": 0.670341 }, { "epoch": 1.2036759350499122, "grad_norm": 4.8414998054504395, "learning_rate": 8.638036243366471e-05, "loss": 2.390040397644043, "memory(GiB)": 72.85, "step": 28095, "token_acc": 0.5079365079365079, "train_speed(iter/s)": 0.670378 }, { "epoch": 1.203890150379161, "grad_norm": 3.8529720306396484, "learning_rate": 8.637574552058972e-05, "loss": 2.3565746307373048, "memory(GiB)": 72.85, "step": 28100, "token_acc": 0.5144927536231884, "train_speed(iter/s)": 0.670381 }, { "epoch": 1.20410436570841, "grad_norm": 4.1036858558654785, "learning_rate": 8.637112794853395e-05, "loss": 2.1060201644897463, "memory(GiB)": 72.85, "step": 28105, "token_acc": 0.5150501672240803, "train_speed(iter/s)": 0.670405 }, { "epoch": 1.204318581037659, "grad_norm": 4.5461812019348145, "learning_rate": 8.636650971758108e-05, "loss": 2.4097454071044924, "memory(GiB)": 72.85, "step": 28110, "token_acc": 0.4807692307692308, "train_speed(iter/s)": 0.670418 }, { "epoch": 1.204532796366908, "grad_norm": 4.242333889007568, "learning_rate": 8.636189082781479e-05, "loss": 2.2362991333007813, "memory(GiB)": 72.85, "step": 28115, "token_acc": 0.4738562091503268, "train_speed(iter/s)": 0.670425 }, { "epoch": 1.204747011696157, "grad_norm": 3.426011562347412, "learning_rate": 8.635727127931872e-05, "loss": 2.4131580352783204, "memory(GiB)": 72.85, "step": 28120, "token_acc": 0.5125, "train_speed(iter/s)": 0.67043 }, { "epoch": 1.204961227025406, "grad_norm": 4.256217002868652, "learning_rate": 8.635265107217659e-05, "loss": 2.260051155090332, "memory(GiB)": 72.85, "step": 28125, "token_acc": 0.5224489795918368, "train_speed(iter/s)": 0.67042 }, { "epoch": 1.2051754423546548, "grad_norm": 5.056133270263672, "learning_rate": 8.634803020647208e-05, "loss": 2.2430644989013673, "memory(GiB)": 72.85, "step": 28130, "token_acc": 0.5348837209302325, "train_speed(iter/s)": 0.670421 }, { "epoch": 1.2053896576839038, "grad_norm": 4.69618034362793, "learning_rate": 8.63434086822889e-05, "loss": 2.27063102722168, "memory(GiB)": 72.85, "step": 28135, "token_acc": 0.5052264808362369, "train_speed(iter/s)": 0.67042 }, { "epoch": 1.2056038730131529, "grad_norm": 4.8113908767700195, "learning_rate": 8.633878649971079e-05, "loss": 2.4196361541748046, "memory(GiB)": 72.85, "step": 28140, "token_acc": 0.4659498207885305, "train_speed(iter/s)": 0.670433 }, { "epoch": 1.2058180883424017, "grad_norm": 3.8279006481170654, "learning_rate": 8.633416365882146e-05, "loss": 2.30798225402832, "memory(GiB)": 72.85, "step": 28145, "token_acc": 0.47714285714285715, "train_speed(iter/s)": 0.670432 }, { "epoch": 1.2060323036716507, "grad_norm": 4.271023750305176, "learning_rate": 8.632954015970466e-05, "loss": 2.4145719528198244, "memory(GiB)": 72.85, "step": 28150, "token_acc": 0.4876543209876543, "train_speed(iter/s)": 0.670414 }, { "epoch": 1.2062465190008997, "grad_norm": 4.27205228805542, "learning_rate": 8.632491600244419e-05, "loss": 2.350164794921875, "memory(GiB)": 72.85, "step": 28155, "token_acc": 0.4807017543859649, "train_speed(iter/s)": 0.670431 }, { "epoch": 1.2064607343301486, "grad_norm": 3.8945960998535156, "learning_rate": 8.632029118712378e-05, "loss": 2.098562240600586, "memory(GiB)": 72.85, "step": 28160, "token_acc": 0.5068493150684932, "train_speed(iter/s)": 0.670426 }, { "epoch": 1.2066749496593976, "grad_norm": 3.894294261932373, "learning_rate": 8.631566571382722e-05, "loss": 2.442911148071289, "memory(GiB)": 72.85, "step": 28165, "token_acc": 0.4894366197183099, "train_speed(iter/s)": 0.670436 }, { "epoch": 1.2068891649886466, "grad_norm": 4.657113552093506, "learning_rate": 8.631103958263831e-05, "loss": 2.438752365112305, "memory(GiB)": 72.85, "step": 28170, "token_acc": 0.4835164835164835, "train_speed(iter/s)": 0.670435 }, { "epoch": 1.2071033803178954, "grad_norm": 4.456425189971924, "learning_rate": 8.630641279364086e-05, "loss": 2.2354833602905275, "memory(GiB)": 72.85, "step": 28175, "token_acc": 0.4723926380368098, "train_speed(iter/s)": 0.670458 }, { "epoch": 1.2073175956471445, "grad_norm": 3.0151617527008057, "learning_rate": 8.630178534691868e-05, "loss": 2.124278259277344, "memory(GiB)": 72.85, "step": 28180, "token_acc": 0.5195729537366548, "train_speed(iter/s)": 0.670428 }, { "epoch": 1.2075318109763935, "grad_norm": 4.455056190490723, "learning_rate": 8.629715724255561e-05, "loss": 2.1639631271362303, "memory(GiB)": 72.85, "step": 28185, "token_acc": 0.5718954248366013, "train_speed(iter/s)": 0.670399 }, { "epoch": 1.2077460263056423, "grad_norm": 4.153079032897949, "learning_rate": 8.629252848063547e-05, "loss": 2.5207725524902345, "memory(GiB)": 72.85, "step": 28190, "token_acc": 0.46984126984126984, "train_speed(iter/s)": 0.670385 }, { "epoch": 1.2079602416348914, "grad_norm": 4.424499034881592, "learning_rate": 8.628789906124214e-05, "loss": 2.887877082824707, "memory(GiB)": 72.85, "step": 28195, "token_acc": 0.4208955223880597, "train_speed(iter/s)": 0.670383 }, { "epoch": 1.2081744569641404, "grad_norm": 3.5627472400665283, "learning_rate": 8.628326898445949e-05, "loss": 2.1934444427490236, "memory(GiB)": 72.85, "step": 28200, "token_acc": 0.535593220338983, "train_speed(iter/s)": 0.670386 }, { "epoch": 1.2083886722933892, "grad_norm": 3.7423014640808105, "learning_rate": 8.627863825037137e-05, "loss": 2.099422073364258, "memory(GiB)": 72.85, "step": 28205, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.670405 }, { "epoch": 1.2086028876226382, "grad_norm": 4.267122268676758, "learning_rate": 8.627400685906171e-05, "loss": 2.498446464538574, "memory(GiB)": 72.85, "step": 28210, "token_acc": 0.49185667752442996, "train_speed(iter/s)": 0.670399 }, { "epoch": 1.2088171029518873, "grad_norm": 4.873013973236084, "learning_rate": 8.626937481061437e-05, "loss": 2.2123954772949217, "memory(GiB)": 72.85, "step": 28215, "token_acc": 0.49491525423728816, "train_speed(iter/s)": 0.67037 }, { "epoch": 1.209031318281136, "grad_norm": 4.515688896179199, "learning_rate": 8.626474210511327e-05, "loss": 2.0589513778686523, "memory(GiB)": 72.85, "step": 28220, "token_acc": 0.5451388888888888, "train_speed(iter/s)": 0.670382 }, { "epoch": 1.2092455336103851, "grad_norm": 5.120685577392578, "learning_rate": 8.626010874264236e-05, "loss": 2.67718448638916, "memory(GiB)": 72.85, "step": 28225, "token_acc": 0.4620253164556962, "train_speed(iter/s)": 0.670366 }, { "epoch": 1.2094597489396341, "grad_norm": 4.492436408996582, "learning_rate": 8.625547472328556e-05, "loss": 2.467780876159668, "memory(GiB)": 72.85, "step": 28230, "token_acc": 0.4939271255060729, "train_speed(iter/s)": 0.670384 }, { "epoch": 1.209673964268883, "grad_norm": 3.6760103702545166, "learning_rate": 8.625084004712684e-05, "loss": 2.2632946014404296, "memory(GiB)": 72.85, "step": 28235, "token_acc": 0.541033434650456, "train_speed(iter/s)": 0.670378 }, { "epoch": 1.209888179598132, "grad_norm": 4.385683536529541, "learning_rate": 8.624620471425014e-05, "loss": 2.124472808837891, "memory(GiB)": 72.85, "step": 28240, "token_acc": 0.5423728813559322, "train_speed(iter/s)": 0.670382 }, { "epoch": 1.210102394927381, "grad_norm": 4.885747909545898, "learning_rate": 8.624156872473942e-05, "loss": 2.2374805450439452, "memory(GiB)": 72.85, "step": 28245, "token_acc": 0.5047318611987381, "train_speed(iter/s)": 0.670384 }, { "epoch": 1.2103166102566298, "grad_norm": 3.8749842643737793, "learning_rate": 8.623693207867872e-05, "loss": 2.4347566604614257, "memory(GiB)": 72.85, "step": 28250, "token_acc": 0.5018450184501845, "train_speed(iter/s)": 0.670396 }, { "epoch": 1.2105308255858789, "grad_norm": 3.658832550048828, "learning_rate": 8.623229477615198e-05, "loss": 2.4931053161621093, "memory(GiB)": 72.85, "step": 28255, "token_acc": 0.4716417910447761, "train_speed(iter/s)": 0.670401 }, { "epoch": 1.210745040915128, "grad_norm": 4.019267559051514, "learning_rate": 8.622765681724324e-05, "loss": 2.708706283569336, "memory(GiB)": 72.85, "step": 28260, "token_acc": 0.46779661016949153, "train_speed(iter/s)": 0.670404 }, { "epoch": 1.2109592562443767, "grad_norm": 5.766371726989746, "learning_rate": 8.622301820203651e-05, "loss": 2.1440486907958984, "memory(GiB)": 72.85, "step": 28265, "token_acc": 0.5020408163265306, "train_speed(iter/s)": 0.670414 }, { "epoch": 1.2111734715736258, "grad_norm": 4.092783451080322, "learning_rate": 8.621930683739304e-05, "loss": 2.3280403137207033, "memory(GiB)": 72.85, "step": 28270, "token_acc": 0.5058823529411764, "train_speed(iter/s)": 0.670396 }, { "epoch": 1.2113876869028748, "grad_norm": 4.175919055938721, "learning_rate": 8.621466704106171e-05, "loss": 2.372615432739258, "memory(GiB)": 72.85, "step": 28275, "token_acc": 0.5222222222222223, "train_speed(iter/s)": 0.670385 }, { "epoch": 1.2116019022321236, "grad_norm": 4.019552707672119, "learning_rate": 8.62100265886677e-05, "loss": 2.5312692642211916, "memory(GiB)": 72.85, "step": 28280, "token_acc": 0.4734982332155477, "train_speed(iter/s)": 0.670432 }, { "epoch": 1.2118161175613726, "grad_norm": 3.1551711559295654, "learning_rate": 8.62053854802951e-05, "loss": 2.063238525390625, "memory(GiB)": 72.85, "step": 28285, "token_acc": 0.5324232081911263, "train_speed(iter/s)": 0.670455 }, { "epoch": 1.2120303328906217, "grad_norm": 5.377480506896973, "learning_rate": 8.620074371602798e-05, "loss": 2.2779632568359376, "memory(GiB)": 72.85, "step": 28290, "token_acc": 0.4863013698630137, "train_speed(iter/s)": 0.670424 }, { "epoch": 1.2122445482198707, "grad_norm": 4.834970474243164, "learning_rate": 8.619610129595041e-05, "loss": 2.4809123992919924, "memory(GiB)": 72.85, "step": 28295, "token_acc": 0.49491525423728816, "train_speed(iter/s)": 0.67044 }, { "epoch": 1.2124587635491195, "grad_norm": 3.9644274711608887, "learning_rate": 8.619145822014653e-05, "loss": 2.3533843994140624, "memory(GiB)": 72.85, "step": 28300, "token_acc": 0.47808764940239046, "train_speed(iter/s)": 0.67046 }, { "epoch": 1.2126729788783686, "grad_norm": 5.110656261444092, "learning_rate": 8.618681448870043e-05, "loss": 2.70762939453125, "memory(GiB)": 72.85, "step": 28305, "token_acc": 0.46956521739130436, "train_speed(iter/s)": 0.67046 }, { "epoch": 1.2128871942076176, "grad_norm": 5.722635746002197, "learning_rate": 8.618217010169623e-05, "loss": 2.3182918548583986, "memory(GiB)": 72.85, "step": 28310, "token_acc": 0.48830409356725146, "train_speed(iter/s)": 0.670474 }, { "epoch": 1.2131014095368664, "grad_norm": 4.542238712310791, "learning_rate": 8.61775250592181e-05, "loss": 2.2196189880371096, "memory(GiB)": 72.85, "step": 28315, "token_acc": 0.4915254237288136, "train_speed(iter/s)": 0.67051 }, { "epoch": 1.2133156248661154, "grad_norm": 5.006811141967773, "learning_rate": 8.617287936135015e-05, "loss": 2.395132064819336, "memory(GiB)": 72.85, "step": 28320, "token_acc": 0.4777777777777778, "train_speed(iter/s)": 0.670483 }, { "epoch": 1.2135298401953645, "grad_norm": 4.3677191734313965, "learning_rate": 8.616823300817656e-05, "loss": 2.3761981964111327, "memory(GiB)": 72.85, "step": 28325, "token_acc": 0.5168918918918919, "train_speed(iter/s)": 0.670482 }, { "epoch": 1.2137440555246133, "grad_norm": 9.752554893493652, "learning_rate": 8.616358599978149e-05, "loss": 2.5992202758789062, "memory(GiB)": 72.85, "step": 28330, "token_acc": 0.47112462006079026, "train_speed(iter/s)": 0.670526 }, { "epoch": 1.2139582708538623, "grad_norm": 3.494947671890259, "learning_rate": 8.615893833624915e-05, "loss": 2.2906845092773436, "memory(GiB)": 72.85, "step": 28335, "token_acc": 0.5176470588235295, "train_speed(iter/s)": 0.670514 }, { "epoch": 1.2141724861831114, "grad_norm": 8.728540420532227, "learning_rate": 8.615429001766371e-05, "loss": 2.275959014892578, "memory(GiB)": 72.85, "step": 28340, "token_acc": 0.528, "train_speed(iter/s)": 0.670513 }, { "epoch": 1.2143867015123602, "grad_norm": 4.622932434082031, "learning_rate": 8.61496410441094e-05, "loss": 2.4352272033691404, "memory(GiB)": 72.85, "step": 28345, "token_acc": 0.4730878186968839, "train_speed(iter/s)": 0.670504 }, { "epoch": 1.2146009168416092, "grad_norm": 6.176684379577637, "learning_rate": 8.614499141567043e-05, "loss": 2.4480466842651367, "memory(GiB)": 72.85, "step": 28350, "token_acc": 0.5019762845849802, "train_speed(iter/s)": 0.670487 }, { "epoch": 1.2148151321708582, "grad_norm": 3.5330166816711426, "learning_rate": 8.614034113243103e-05, "loss": 2.39471435546875, "memory(GiB)": 72.85, "step": 28355, "token_acc": 0.4885057471264368, "train_speed(iter/s)": 0.670472 }, { "epoch": 1.215029347500107, "grad_norm": 5.344357490539551, "learning_rate": 8.613569019447545e-05, "loss": 2.568490409851074, "memory(GiB)": 72.85, "step": 28360, "token_acc": 0.5155709342560554, "train_speed(iter/s)": 0.670468 }, { "epoch": 1.215243562829356, "grad_norm": 3.330751657485962, "learning_rate": 8.613103860188795e-05, "loss": 2.320369911193848, "memory(GiB)": 72.85, "step": 28365, "token_acc": 0.48363636363636364, "train_speed(iter/s)": 0.670472 }, { "epoch": 1.2154577781586051, "grad_norm": 6.34586763381958, "learning_rate": 8.612638635475279e-05, "loss": 2.1950279235839845, "memory(GiB)": 72.85, "step": 28370, "token_acc": 0.5016611295681063, "train_speed(iter/s)": 0.670478 }, { "epoch": 1.215671993487854, "grad_norm": 3.5396811962127686, "learning_rate": 8.612173345315425e-05, "loss": 2.3704452514648438, "memory(GiB)": 72.85, "step": 28375, "token_acc": 0.473015873015873, "train_speed(iter/s)": 0.670446 }, { "epoch": 1.215886208817103, "grad_norm": 5.297982215881348, "learning_rate": 8.61170798971766e-05, "loss": 2.410622787475586, "memory(GiB)": 72.85, "step": 28380, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.670449 }, { "epoch": 1.216100424146352, "grad_norm": 4.05841064453125, "learning_rate": 8.61124256869042e-05, "loss": 2.22326774597168, "memory(GiB)": 72.85, "step": 28385, "token_acc": 0.5311572700296736, "train_speed(iter/s)": 0.670426 }, { "epoch": 1.2163146394756008, "grad_norm": 3.9757697582244873, "learning_rate": 8.610777082242133e-05, "loss": 2.263228416442871, "memory(GiB)": 72.85, "step": 28390, "token_acc": 0.5016949152542373, "train_speed(iter/s)": 0.670431 }, { "epoch": 1.2165288548048498, "grad_norm": 3.472522258758545, "learning_rate": 8.610311530381231e-05, "loss": 2.2667627334594727, "memory(GiB)": 72.85, "step": 28395, "token_acc": 0.5165562913907285, "train_speed(iter/s)": 0.67043 }, { "epoch": 1.2167430701340989, "grad_norm": 4.60237979888916, "learning_rate": 8.60984591311615e-05, "loss": 2.2100603103637697, "memory(GiB)": 72.85, "step": 28400, "token_acc": 0.5387323943661971, "train_speed(iter/s)": 0.670439 }, { "epoch": 1.2169572854633477, "grad_norm": 5.119596481323242, "learning_rate": 8.609380230455323e-05, "loss": 2.4970991134643556, "memory(GiB)": 72.85, "step": 28405, "token_acc": 0.4934210526315789, "train_speed(iter/s)": 0.670457 }, { "epoch": 1.2171715007925967, "grad_norm": 4.226705551147461, "learning_rate": 8.608914482407187e-05, "loss": 1.9388362884521484, "memory(GiB)": 72.85, "step": 28410, "token_acc": 0.5359712230215827, "train_speed(iter/s)": 0.670474 }, { "epoch": 1.2173857161218458, "grad_norm": 5.138035774230957, "learning_rate": 8.60844866898018e-05, "loss": 2.793119430541992, "memory(GiB)": 72.85, "step": 28415, "token_acc": 0.4591194968553459, "train_speed(iter/s)": 0.67046 }, { "epoch": 1.2175999314510946, "grad_norm": 5.279399871826172, "learning_rate": 8.60798279018274e-05, "loss": 2.6164663314819334, "memory(GiB)": 72.85, "step": 28420, "token_acc": 0.4628975265017668, "train_speed(iter/s)": 0.670475 }, { "epoch": 1.2178141467803436, "grad_norm": 4.624782085418701, "learning_rate": 8.607516846023309e-05, "loss": 2.4139945983886717, "memory(GiB)": 72.85, "step": 28425, "token_acc": 0.4953560371517028, "train_speed(iter/s)": 0.670491 }, { "epoch": 1.2180283621095926, "grad_norm": 5.251690864562988, "learning_rate": 8.607050836510324e-05, "loss": 2.21270751953125, "memory(GiB)": 72.85, "step": 28430, "token_acc": 0.5095541401273885, "train_speed(iter/s)": 0.670474 }, { "epoch": 1.2182425774388415, "grad_norm": 5.005397319793701, "learning_rate": 8.60658476165223e-05, "loss": 2.4858730316162108, "memory(GiB)": 72.85, "step": 28435, "token_acc": 0.4634146341463415, "train_speed(iter/s)": 0.670484 }, { "epoch": 1.2184567927680905, "grad_norm": 7.636468887329102, "learning_rate": 8.606118621457471e-05, "loss": 2.2207805633544924, "memory(GiB)": 72.85, "step": 28440, "token_acc": 0.5074074074074074, "train_speed(iter/s)": 0.670493 }, { "epoch": 1.2186710080973395, "grad_norm": 3.4330427646636963, "learning_rate": 8.605652415934489e-05, "loss": 2.3605642318725586, "memory(GiB)": 72.85, "step": 28445, "token_acc": 0.49216300940438873, "train_speed(iter/s)": 0.670474 }, { "epoch": 1.2188852234265883, "grad_norm": 4.376817226409912, "learning_rate": 8.605186145091731e-05, "loss": 2.578536033630371, "memory(GiB)": 72.85, "step": 28450, "token_acc": 0.4379746835443038, "train_speed(iter/s)": 0.670496 }, { "epoch": 1.2190994387558374, "grad_norm": 3.9046003818511963, "learning_rate": 8.604719808937644e-05, "loss": 2.5266529083251954, "memory(GiB)": 72.85, "step": 28455, "token_acc": 0.4682274247491639, "train_speed(iter/s)": 0.670518 }, { "epoch": 1.2193136540850864, "grad_norm": 4.372311115264893, "learning_rate": 8.604253407480677e-05, "loss": 2.643987274169922, "memory(GiB)": 72.85, "step": 28460, "token_acc": 0.47368421052631576, "train_speed(iter/s)": 0.670538 }, { "epoch": 1.2195278694143352, "grad_norm": 3.3572425842285156, "learning_rate": 8.603786940729278e-05, "loss": 2.1490409851074217, "memory(GiB)": 72.85, "step": 28465, "token_acc": 0.5622895622895623, "train_speed(iter/s)": 0.670534 }, { "epoch": 1.2197420847435843, "grad_norm": 4.62576961517334, "learning_rate": 8.603320408691898e-05, "loss": 2.3756460189819335, "memory(GiB)": 72.85, "step": 28470, "token_acc": 0.47368421052631576, "train_speed(iter/s)": 0.670543 }, { "epoch": 1.2199563000728333, "grad_norm": 4.098677158355713, "learning_rate": 8.602853811376989e-05, "loss": 2.343211555480957, "memory(GiB)": 72.85, "step": 28475, "token_acc": 0.48297213622291024, "train_speed(iter/s)": 0.670564 }, { "epoch": 1.220170515402082, "grad_norm": 4.226718425750732, "learning_rate": 8.602387148793002e-05, "loss": 2.4693458557128904, "memory(GiB)": 72.85, "step": 28480, "token_acc": 0.47315436241610737, "train_speed(iter/s)": 0.670566 }, { "epoch": 1.2203847307313311, "grad_norm": 4.5893354415893555, "learning_rate": 8.601920420948395e-05, "loss": 2.312918853759766, "memory(GiB)": 72.85, "step": 28485, "token_acc": 0.5141700404858299, "train_speed(iter/s)": 0.670596 }, { "epoch": 1.2205989460605802, "grad_norm": 4.20906925201416, "learning_rate": 8.601453627851619e-05, "loss": 2.2778533935546874, "memory(GiB)": 72.85, "step": 28490, "token_acc": 0.4897360703812317, "train_speed(iter/s)": 0.670585 }, { "epoch": 1.220813161389829, "grad_norm": 3.7410683631896973, "learning_rate": 8.600986769511133e-05, "loss": 2.3223196029663087, "memory(GiB)": 72.85, "step": 28495, "token_acc": 0.4973544973544973, "train_speed(iter/s)": 0.670559 }, { "epoch": 1.221027376719078, "grad_norm": 3.6638081073760986, "learning_rate": 8.600519845935393e-05, "loss": 2.7765787124633787, "memory(GiB)": 72.85, "step": 28500, "token_acc": 0.45481927710843373, "train_speed(iter/s)": 0.67053 }, { "epoch": 1.221027376719078, "eval_loss": 2.0398149490356445, "eval_runtime": 15.5616, "eval_samples_per_second": 6.426, "eval_steps_per_second": 6.426, "eval_token_acc": 0.5062761506276151, "step": 28500 }, { "epoch": 1.221241592048327, "grad_norm": 3.79596209526062, "learning_rate": 8.600052857132858e-05, "loss": 2.210581588745117, "memory(GiB)": 72.85, "step": 28505, "token_acc": 0.5062439961575408, "train_speed(iter/s)": 0.670267 }, { "epoch": 1.2214558073775759, "grad_norm": 5.504413604736328, "learning_rate": 8.599585803111989e-05, "loss": 2.201824951171875, "memory(GiB)": 72.85, "step": 28510, "token_acc": 0.5296442687747036, "train_speed(iter/s)": 0.670269 }, { "epoch": 1.221670022706825, "grad_norm": 5.129675388336182, "learning_rate": 8.599118683881247e-05, "loss": 2.235732078552246, "memory(GiB)": 72.85, "step": 28515, "token_acc": 0.5145985401459854, "train_speed(iter/s)": 0.670269 }, { "epoch": 1.221884238036074, "grad_norm": 4.13328218460083, "learning_rate": 8.598651499449091e-05, "loss": 2.4570423126220704, "memory(GiB)": 72.85, "step": 28520, "token_acc": 0.5232558139534884, "train_speed(iter/s)": 0.670278 }, { "epoch": 1.2220984533653227, "grad_norm": 4.543712139129639, "learning_rate": 8.59818424982399e-05, "loss": 2.4662090301513673, "memory(GiB)": 72.85, "step": 28525, "token_acc": 0.5057471264367817, "train_speed(iter/s)": 0.670274 }, { "epoch": 1.2223126686945718, "grad_norm": 4.119974613189697, "learning_rate": 8.597716935014406e-05, "loss": 2.1304426193237305, "memory(GiB)": 72.85, "step": 28530, "token_acc": 0.5202702702702703, "train_speed(iter/s)": 0.670257 }, { "epoch": 1.2225268840238208, "grad_norm": 3.8904855251312256, "learning_rate": 8.597249555028804e-05, "loss": 2.4549943923950197, "memory(GiB)": 72.85, "step": 28535, "token_acc": 0.4721311475409836, "train_speed(iter/s)": 0.670221 }, { "epoch": 1.2227410993530696, "grad_norm": 4.867424964904785, "learning_rate": 8.596782109875652e-05, "loss": 2.3611143112182615, "memory(GiB)": 72.85, "step": 28540, "token_acc": 0.5390625, "train_speed(iter/s)": 0.670235 }, { "epoch": 1.2229553146823187, "grad_norm": 4.397748947143555, "learning_rate": 8.596314599563419e-05, "loss": 2.5889684677124025, "memory(GiB)": 72.85, "step": 28545, "token_acc": 0.45584045584045585, "train_speed(iter/s)": 0.670245 }, { "epoch": 1.2231695300115677, "grad_norm": 3.524688243865967, "learning_rate": 8.595847024100571e-05, "loss": 2.4082164764404297, "memory(GiB)": 72.85, "step": 28550, "token_acc": 0.5017301038062284, "train_speed(iter/s)": 0.670251 }, { "epoch": 1.2233837453408165, "grad_norm": 3.268857955932617, "learning_rate": 8.595379383495583e-05, "loss": 2.536807632446289, "memory(GiB)": 72.85, "step": 28555, "token_acc": 0.496551724137931, "train_speed(iter/s)": 0.670235 }, { "epoch": 1.2235979606700655, "grad_norm": 3.7710072994232178, "learning_rate": 8.594911677756925e-05, "loss": 2.452680206298828, "memory(GiB)": 72.85, "step": 28560, "token_acc": 0.4713375796178344, "train_speed(iter/s)": 0.670265 }, { "epoch": 1.2238121759993146, "grad_norm": 3.6299264430999756, "learning_rate": 8.594443906893068e-05, "loss": 2.589461898803711, "memory(GiB)": 72.85, "step": 28565, "token_acc": 0.5031055900621118, "train_speed(iter/s)": 0.670291 }, { "epoch": 1.2240263913285634, "grad_norm": 4.797063827514648, "learning_rate": 8.593976070912489e-05, "loss": 2.2430347442626952, "memory(GiB)": 72.85, "step": 28570, "token_acc": 0.5235109717868338, "train_speed(iter/s)": 0.670244 }, { "epoch": 1.2242406066578124, "grad_norm": 5.142703533172607, "learning_rate": 8.59350816982366e-05, "loss": 2.503507614135742, "memory(GiB)": 72.85, "step": 28575, "token_acc": 0.5265017667844523, "train_speed(iter/s)": 0.670254 }, { "epoch": 1.2244548219870615, "grad_norm": 4.340314865112305, "learning_rate": 8.593040203635063e-05, "loss": 2.321721076965332, "memory(GiB)": 72.85, "step": 28580, "token_acc": 0.5424836601307189, "train_speed(iter/s)": 0.670246 }, { "epoch": 1.2246690373163103, "grad_norm": 4.858875274658203, "learning_rate": 8.59257217235517e-05, "loss": 2.38746337890625, "memory(GiB)": 72.85, "step": 28585, "token_acc": 0.47703180212014135, "train_speed(iter/s)": 0.670273 }, { "epoch": 1.2248832526455593, "grad_norm": 4.769619464874268, "learning_rate": 8.592104075992462e-05, "loss": 2.389818572998047, "memory(GiB)": 72.85, "step": 28590, "token_acc": 0.4626334519572954, "train_speed(iter/s)": 0.67027 }, { "epoch": 1.2250974679748083, "grad_norm": 5.155447959899902, "learning_rate": 8.59163591455542e-05, "loss": 2.533532905578613, "memory(GiB)": 72.85, "step": 28595, "token_acc": 0.49049429657794674, "train_speed(iter/s)": 0.670277 }, { "epoch": 1.2253116833040572, "grad_norm": 3.4295594692230225, "learning_rate": 8.591167688052523e-05, "loss": 2.477693557739258, "memory(GiB)": 72.85, "step": 28600, "token_acc": 0.49842271293375395, "train_speed(iter/s)": 0.67028 }, { "epoch": 1.2255258986333062, "grad_norm": 3.93498158454895, "learning_rate": 8.590699396492255e-05, "loss": 2.3396984100341798, "memory(GiB)": 72.85, "step": 28605, "token_acc": 0.5016835016835017, "train_speed(iter/s)": 0.670278 }, { "epoch": 1.2257401139625552, "grad_norm": 4.281641006469727, "learning_rate": 8.590231039883099e-05, "loss": 2.305752182006836, "memory(GiB)": 72.85, "step": 28610, "token_acc": 0.5035971223021583, "train_speed(iter/s)": 0.6703 }, { "epoch": 1.225954329291804, "grad_norm": 4.032376289367676, "learning_rate": 8.58976261823354e-05, "loss": 2.194927215576172, "memory(GiB)": 72.85, "step": 28615, "token_acc": 0.5226480836236934, "train_speed(iter/s)": 0.670312 }, { "epoch": 1.226168544621053, "grad_norm": 4.019314289093018, "learning_rate": 8.589294131552063e-05, "loss": 2.306291389465332, "memory(GiB)": 72.85, "step": 28620, "token_acc": 0.5450819672131147, "train_speed(iter/s)": 0.670298 }, { "epoch": 1.226382759950302, "grad_norm": 7.202642917633057, "learning_rate": 8.588825579847157e-05, "loss": 2.375951385498047, "memory(GiB)": 72.85, "step": 28625, "token_acc": 0.49760765550239233, "train_speed(iter/s)": 0.670287 }, { "epoch": 1.226596975279551, "grad_norm": 6.6464715003967285, "learning_rate": 8.588356963127309e-05, "loss": 2.5331018447875975, "memory(GiB)": 72.85, "step": 28630, "token_acc": 0.4732824427480916, "train_speed(iter/s)": 0.670294 }, { "epoch": 1.2268111906088, "grad_norm": 4.111722469329834, "learning_rate": 8.587888281401008e-05, "loss": 2.4567163467407225, "memory(GiB)": 72.85, "step": 28635, "token_acc": 0.49221183800623053, "train_speed(iter/s)": 0.670305 }, { "epoch": 1.227025405938049, "grad_norm": 4.215019226074219, "learning_rate": 8.587419534676743e-05, "loss": 2.382282829284668, "memory(GiB)": 72.85, "step": 28640, "token_acc": 0.5068027210884354, "train_speed(iter/s)": 0.670344 }, { "epoch": 1.2272396212672978, "grad_norm": 3.6001882553100586, "learning_rate": 8.586950722963012e-05, "loss": 2.179525947570801, "memory(GiB)": 72.85, "step": 28645, "token_acc": 0.5244755244755245, "train_speed(iter/s)": 0.670383 }, { "epoch": 1.2274538365965468, "grad_norm": 5.367768287658691, "learning_rate": 8.586481846268301e-05, "loss": 2.5404396057128906, "memory(GiB)": 72.85, "step": 28650, "token_acc": 0.47572815533980584, "train_speed(iter/s)": 0.670366 }, { "epoch": 1.2276680519257959, "grad_norm": 4.455494403839111, "learning_rate": 8.586012904601107e-05, "loss": 2.2786222457885743, "memory(GiB)": 72.85, "step": 28655, "token_acc": 0.511400651465798, "train_speed(iter/s)": 0.670362 }, { "epoch": 1.2278822672550447, "grad_norm": 5.307365417480469, "learning_rate": 8.585543897969927e-05, "loss": 2.4590936660766602, "memory(GiB)": 72.85, "step": 28660, "token_acc": 0.46779661016949153, "train_speed(iter/s)": 0.670334 }, { "epoch": 1.2280964825842937, "grad_norm": 4.355436325073242, "learning_rate": 8.585074826383255e-05, "loss": 2.099066734313965, "memory(GiB)": 72.85, "step": 28665, "token_acc": 0.550185873605948, "train_speed(iter/s)": 0.670333 }, { "epoch": 1.2283106979135427, "grad_norm": 3.69016432762146, "learning_rate": 8.584605689849587e-05, "loss": 2.467310333251953, "memory(GiB)": 72.85, "step": 28670, "token_acc": 0.4943820224719101, "train_speed(iter/s)": 0.670328 }, { "epoch": 1.2285249132427916, "grad_norm": 4.622020721435547, "learning_rate": 8.584136488377427e-05, "loss": 2.5326864242553713, "memory(GiB)": 72.85, "step": 28675, "token_acc": 0.4675324675324675, "train_speed(iter/s)": 0.670337 }, { "epoch": 1.2287391285720406, "grad_norm": 3.958106756210327, "learning_rate": 8.583667221975272e-05, "loss": 2.328388214111328, "memory(GiB)": 72.85, "step": 28680, "token_acc": 0.4583333333333333, "train_speed(iter/s)": 0.670358 }, { "epoch": 1.2289533439012896, "grad_norm": 4.668035984039307, "learning_rate": 8.583197890651623e-05, "loss": 2.2935871124267577, "memory(GiB)": 72.85, "step": 28685, "token_acc": 0.5342960288808665, "train_speed(iter/s)": 0.670383 }, { "epoch": 1.2291675592305384, "grad_norm": 4.363793849945068, "learning_rate": 8.582728494414985e-05, "loss": 2.4799272537231447, "memory(GiB)": 72.85, "step": 28690, "token_acc": 0.44368600682593856, "train_speed(iter/s)": 0.6704 }, { "epoch": 1.2293817745597875, "grad_norm": 4.306490898132324, "learning_rate": 8.582259033273856e-05, "loss": 2.5240047454833983, "memory(GiB)": 72.85, "step": 28695, "token_acc": 0.4626334519572954, "train_speed(iter/s)": 0.670403 }, { "epoch": 1.2295959898890365, "grad_norm": 3.78707218170166, "learning_rate": 8.581789507236746e-05, "loss": 2.5935325622558594, "memory(GiB)": 72.85, "step": 28700, "token_acc": 0.44565217391304346, "train_speed(iter/s)": 0.670365 }, { "epoch": 1.2298102052182853, "grad_norm": 4.419566631317139, "learning_rate": 8.581319916312158e-05, "loss": 2.598112678527832, "memory(GiB)": 72.85, "step": 28705, "token_acc": 0.43812709030100333, "train_speed(iter/s)": 0.670373 }, { "epoch": 1.2300244205475344, "grad_norm": 4.0759782791137695, "learning_rate": 8.580850260508601e-05, "loss": 2.230238342285156, "memory(GiB)": 72.85, "step": 28710, "token_acc": 0.547244094488189, "train_speed(iter/s)": 0.670411 }, { "epoch": 1.2302386358767834, "grad_norm": 4.309594631195068, "learning_rate": 8.580380539834583e-05, "loss": 2.4720333099365233, "memory(GiB)": 72.85, "step": 28715, "token_acc": 0.5018587360594795, "train_speed(iter/s)": 0.670403 }, { "epoch": 1.2304528512060324, "grad_norm": 4.16147518157959, "learning_rate": 8.579910754298612e-05, "loss": 2.3306863784790037, "memory(GiB)": 72.85, "step": 28720, "token_acc": 0.5197368421052632, "train_speed(iter/s)": 0.670426 }, { "epoch": 1.2306670665352812, "grad_norm": 4.683584213256836, "learning_rate": 8.579440903909201e-05, "loss": 2.853437042236328, "memory(GiB)": 72.85, "step": 28725, "token_acc": 0.4166666666666667, "train_speed(iter/s)": 0.670455 }, { "epoch": 1.2308812818645303, "grad_norm": 3.561819553375244, "learning_rate": 8.578970988674859e-05, "loss": 2.4481250762939455, "memory(GiB)": 72.85, "step": 28730, "token_acc": 0.4723127035830619, "train_speed(iter/s)": 0.670435 }, { "epoch": 1.2310954971937793, "grad_norm": 3.104780435562134, "learning_rate": 8.578501008604099e-05, "loss": 2.256478500366211, "memory(GiB)": 72.85, "step": 28735, "token_acc": 0.4662756598240469, "train_speed(iter/s)": 0.67043 }, { "epoch": 1.2313097125230281, "grad_norm": 4.5272536277771, "learning_rate": 8.57803096370544e-05, "loss": 2.342551040649414, "memory(GiB)": 72.85, "step": 28740, "token_acc": 0.5241379310344828, "train_speed(iter/s)": 0.670445 }, { "epoch": 1.2315239278522772, "grad_norm": 4.544042110443115, "learning_rate": 8.57756085398739e-05, "loss": 2.5014999389648436, "memory(GiB)": 72.85, "step": 28745, "token_acc": 0.490625, "train_speed(iter/s)": 0.670461 }, { "epoch": 1.2317381431815262, "grad_norm": 4.666535377502441, "learning_rate": 8.577090679458469e-05, "loss": 2.343211364746094, "memory(GiB)": 72.85, "step": 28750, "token_acc": 0.5300751879699248, "train_speed(iter/s)": 0.670483 }, { "epoch": 1.231952358510775, "grad_norm": 4.801046848297119, "learning_rate": 8.576620440127197e-05, "loss": 2.2929534912109375, "memory(GiB)": 72.85, "step": 28755, "token_acc": 0.5153846153846153, "train_speed(iter/s)": 0.670504 }, { "epoch": 1.232166573840024, "grad_norm": 4.640854835510254, "learning_rate": 8.576150136002089e-05, "loss": 2.5271812438964845, "memory(GiB)": 72.85, "step": 28760, "token_acc": 0.4868913857677903, "train_speed(iter/s)": 0.670529 }, { "epoch": 1.232380789169273, "grad_norm": 4.215889930725098, "learning_rate": 8.575679767091666e-05, "loss": 2.3003448486328124, "memory(GiB)": 72.85, "step": 28765, "token_acc": 0.47794117647058826, "train_speed(iter/s)": 0.670525 }, { "epoch": 1.2325950044985219, "grad_norm": 3.7458202838897705, "learning_rate": 8.575209333404453e-05, "loss": 2.453984260559082, "memory(GiB)": 72.85, "step": 28770, "token_acc": 0.4750830564784053, "train_speed(iter/s)": 0.670563 }, { "epoch": 1.232809219827771, "grad_norm": 3.439065456390381, "learning_rate": 8.574738834948967e-05, "loss": 2.6820652008056642, "memory(GiB)": 72.85, "step": 28775, "token_acc": 0.47058823529411764, "train_speed(iter/s)": 0.670531 }, { "epoch": 1.23302343515702, "grad_norm": 5.610289096832275, "learning_rate": 8.574268271733733e-05, "loss": 2.261374664306641, "memory(GiB)": 72.85, "step": 28780, "token_acc": 0.525096525096525, "train_speed(iter/s)": 0.670549 }, { "epoch": 1.2332376504862688, "grad_norm": 4.240126132965088, "learning_rate": 8.573797643767278e-05, "loss": 2.2552717208862303, "memory(GiB)": 72.85, "step": 28785, "token_acc": 0.5034246575342466, "train_speed(iter/s)": 0.670554 }, { "epoch": 1.2334518658155178, "grad_norm": 4.121037483215332, "learning_rate": 8.573326951058125e-05, "loss": 2.4110374450683594, "memory(GiB)": 72.85, "step": 28790, "token_acc": 0.5228758169934641, "train_speed(iter/s)": 0.670569 }, { "epoch": 1.2336660811447668, "grad_norm": 4.386409759521484, "learning_rate": 8.572856193614802e-05, "loss": 2.5025325775146485, "memory(GiB)": 72.85, "step": 28795, "token_acc": 0.43343653250773995, "train_speed(iter/s)": 0.670575 }, { "epoch": 1.2338802964740156, "grad_norm": 4.539286136627197, "learning_rate": 8.572385371445837e-05, "loss": 2.2452497482299805, "memory(GiB)": 72.85, "step": 28800, "token_acc": 0.5224913494809689, "train_speed(iter/s)": 0.670581 }, { "epoch": 1.2340945118032647, "grad_norm": 4.212223529815674, "learning_rate": 8.571914484559762e-05, "loss": 2.5513824462890624, "memory(GiB)": 72.85, "step": 28805, "token_acc": 0.4592391304347826, "train_speed(iter/s)": 0.67055 }, { "epoch": 1.2343087271325137, "grad_norm": 4.880184173583984, "learning_rate": 8.571443532965103e-05, "loss": 2.321095085144043, "memory(GiB)": 72.85, "step": 28810, "token_acc": 0.49264705882352944, "train_speed(iter/s)": 0.670565 }, { "epoch": 1.2345229424617625, "grad_norm": 4.162059307098389, "learning_rate": 8.570972516670395e-05, "loss": 2.340321922302246, "memory(GiB)": 72.85, "step": 28815, "token_acc": 0.5228758169934641, "train_speed(iter/s)": 0.670582 }, { "epoch": 1.2347371577910116, "grad_norm": 4.176064491271973, "learning_rate": 8.57050143568417e-05, "loss": 2.0984920501708983, "memory(GiB)": 72.85, "step": 28820, "token_acc": 0.5166051660516605, "train_speed(iter/s)": 0.67057 }, { "epoch": 1.2349513731202606, "grad_norm": 4.367953777313232, "learning_rate": 8.570030290014963e-05, "loss": 2.290004539489746, "memory(GiB)": 72.85, "step": 28825, "token_acc": 0.5166666666666667, "train_speed(iter/s)": 0.670576 }, { "epoch": 1.2351655884495094, "grad_norm": 3.519456386566162, "learning_rate": 8.569559079671308e-05, "loss": 2.329519844055176, "memory(GiB)": 72.85, "step": 28830, "token_acc": 0.4713804713804714, "train_speed(iter/s)": 0.67059 }, { "epoch": 1.2353798037787584, "grad_norm": 3.6124985218048096, "learning_rate": 8.569087804661742e-05, "loss": 2.2439435958862304, "memory(GiB)": 72.85, "step": 28835, "token_acc": 0.5306748466257669, "train_speed(iter/s)": 0.670598 }, { "epoch": 1.2355940191080075, "grad_norm": 3.910189390182495, "learning_rate": 8.5686164649948e-05, "loss": 2.516252899169922, "memory(GiB)": 72.85, "step": 28840, "token_acc": 0.4408284023668639, "train_speed(iter/s)": 0.670601 }, { "epoch": 1.2358082344372563, "grad_norm": 3.9639596939086914, "learning_rate": 8.568145060679025e-05, "loss": 2.414749526977539, "memory(GiB)": 72.85, "step": 28845, "token_acc": 0.5136054421768708, "train_speed(iter/s)": 0.670619 }, { "epoch": 1.2360224497665053, "grad_norm": 4.634134292602539, "learning_rate": 8.567673591722955e-05, "loss": 2.5752559661865235, "memory(GiB)": 72.85, "step": 28850, "token_acc": 0.4876325088339223, "train_speed(iter/s)": 0.670635 }, { "epoch": 1.2362366650957544, "grad_norm": 4.69074010848999, "learning_rate": 8.567202058135131e-05, "loss": 2.339922332763672, "memory(GiB)": 72.85, "step": 28855, "token_acc": 0.4861111111111111, "train_speed(iter/s)": 0.670627 }, { "epoch": 1.2364508804250032, "grad_norm": 4.9407525062561035, "learning_rate": 8.566730459924096e-05, "loss": 2.1110021591186525, "memory(GiB)": 72.85, "step": 28860, "token_acc": 0.5271317829457365, "train_speed(iter/s)": 0.670645 }, { "epoch": 1.2366650957542522, "grad_norm": 4.594541549682617, "learning_rate": 8.566258797098392e-05, "loss": 2.1496177673339845, "memory(GiB)": 72.85, "step": 28865, "token_acc": 0.516, "train_speed(iter/s)": 0.670663 }, { "epoch": 1.2368793110835012, "grad_norm": 3.8041749000549316, "learning_rate": 8.565787069666564e-05, "loss": 2.454901123046875, "memory(GiB)": 72.85, "step": 28870, "token_acc": 0.46883468834688347, "train_speed(iter/s)": 0.670676 }, { "epoch": 1.23709352641275, "grad_norm": 4.341359615325928, "learning_rate": 8.56531527763716e-05, "loss": 2.236385726928711, "memory(GiB)": 72.85, "step": 28875, "token_acc": 0.5, "train_speed(iter/s)": 0.670663 }, { "epoch": 1.237307741741999, "grad_norm": 4.590850353240967, "learning_rate": 8.564843421018725e-05, "loss": 2.068098258972168, "memory(GiB)": 72.85, "step": 28880, "token_acc": 0.5148514851485149, "train_speed(iter/s)": 0.670659 }, { "epoch": 1.2375219570712481, "grad_norm": 6.809397220611572, "learning_rate": 8.564371499819805e-05, "loss": 1.9291509628295898, "memory(GiB)": 72.85, "step": 28885, "token_acc": 0.5669291338582677, "train_speed(iter/s)": 0.670683 }, { "epoch": 1.237736172400497, "grad_norm": 4.2096686363220215, "learning_rate": 8.563899514048954e-05, "loss": 2.5202112197875977, "memory(GiB)": 72.85, "step": 28890, "token_acc": 0.467966573816156, "train_speed(iter/s)": 0.670701 }, { "epoch": 1.237950387729746, "grad_norm": 5.528386116027832, "learning_rate": 8.56342746371472e-05, "loss": 2.506491470336914, "memory(GiB)": 72.85, "step": 28895, "token_acc": 0.48672566371681414, "train_speed(iter/s)": 0.670682 }, { "epoch": 1.238164603058995, "grad_norm": 4.1949896812438965, "learning_rate": 8.562955348825655e-05, "loss": 2.1386194229125977, "memory(GiB)": 72.85, "step": 28900, "token_acc": 0.5578512396694215, "train_speed(iter/s)": 0.670729 }, { "epoch": 1.2383788183882438, "grad_norm": 5.136402130126953, "learning_rate": 8.56248316939031e-05, "loss": 2.4303157806396483, "memory(GiB)": 72.85, "step": 28905, "token_acc": 0.5277777777777778, "train_speed(iter/s)": 0.670715 }, { "epoch": 1.2385930337174929, "grad_norm": 5.86838960647583, "learning_rate": 8.56201092541724e-05, "loss": 2.4569820404052733, "memory(GiB)": 72.85, "step": 28910, "token_acc": 0.4712230215827338, "train_speed(iter/s)": 0.670725 }, { "epoch": 1.2388072490467419, "grad_norm": 3.4356563091278076, "learning_rate": 8.561538616915001e-05, "loss": 2.2737377166748045, "memory(GiB)": 72.85, "step": 28915, "token_acc": 0.5202952029520295, "train_speed(iter/s)": 0.670719 }, { "epoch": 1.2390214643759907, "grad_norm": 4.564637184143066, "learning_rate": 8.561066243892151e-05, "loss": 2.358902359008789, "memory(GiB)": 72.85, "step": 28920, "token_acc": 0.4900398406374502, "train_speed(iter/s)": 0.670664 }, { "epoch": 1.2392356797052397, "grad_norm": 4.167521953582764, "learning_rate": 8.560593806357244e-05, "loss": 2.382468414306641, "memory(GiB)": 72.85, "step": 28925, "token_acc": 0.49615384615384617, "train_speed(iter/s)": 0.67068 }, { "epoch": 1.2394498950344888, "grad_norm": 3.293867588043213, "learning_rate": 8.56012130431884e-05, "loss": 2.2674533843994142, "memory(GiB)": 72.85, "step": 28930, "token_acc": 0.5193548387096775, "train_speed(iter/s)": 0.670701 }, { "epoch": 1.2396641103637376, "grad_norm": 4.280488014221191, "learning_rate": 8.559648737785499e-05, "loss": 2.6601531982421873, "memory(GiB)": 72.85, "step": 28935, "token_acc": 0.4845679012345679, "train_speed(iter/s)": 0.670669 }, { "epoch": 1.2398783256929866, "grad_norm": 3.8630785942077637, "learning_rate": 8.559176106765782e-05, "loss": 2.087692642211914, "memory(GiB)": 72.85, "step": 28940, "token_acc": 0.5220588235294118, "train_speed(iter/s)": 0.670669 }, { "epoch": 1.2400925410222357, "grad_norm": 3.2261126041412354, "learning_rate": 8.558703411268249e-05, "loss": 2.315913772583008, "memory(GiB)": 72.85, "step": 28945, "token_acc": 0.47419354838709676, "train_speed(iter/s)": 0.670653 }, { "epoch": 1.2403067563514845, "grad_norm": 4.378329277038574, "learning_rate": 8.558230651301468e-05, "loss": 2.5531116485595704, "memory(GiB)": 72.85, "step": 28950, "token_acc": 0.48355263157894735, "train_speed(iter/s)": 0.670674 }, { "epoch": 1.2405209716807335, "grad_norm": 4.1805949211120605, "learning_rate": 8.557757826874002e-05, "loss": 2.330048942565918, "memory(GiB)": 72.85, "step": 28955, "token_acc": 0.4911660777385159, "train_speed(iter/s)": 0.670701 }, { "epoch": 1.2407351870099825, "grad_norm": 4.032968997955322, "learning_rate": 8.557284937994413e-05, "loss": 2.493879699707031, "memory(GiB)": 72.85, "step": 28960, "token_acc": 0.504297994269341, "train_speed(iter/s)": 0.670696 }, { "epoch": 1.2409494023392313, "grad_norm": 3.7592828273773193, "learning_rate": 8.556811984671272e-05, "loss": 1.9268880844116212, "memory(GiB)": 72.85, "step": 28965, "token_acc": 0.5291970802919708, "train_speed(iter/s)": 0.670696 }, { "epoch": 1.2411636176684804, "grad_norm": 5.406024932861328, "learning_rate": 8.556338966913144e-05, "loss": 2.1064498901367186, "memory(GiB)": 72.85, "step": 28970, "token_acc": 0.5215827338129496, "train_speed(iter/s)": 0.670695 }, { "epoch": 1.2413778329977294, "grad_norm": 4.380596160888672, "learning_rate": 8.5558658847286e-05, "loss": 2.2874992370605467, "memory(GiB)": 72.85, "step": 28975, "token_acc": 0.5107142857142857, "train_speed(iter/s)": 0.670666 }, { "epoch": 1.2415920483269782, "grad_norm": 3.27313232421875, "learning_rate": 8.555392738126209e-05, "loss": 2.6317937850952147, "memory(GiB)": 72.85, "step": 28980, "token_acc": 0.4641638225255973, "train_speed(iter/s)": 0.670636 }, { "epoch": 1.2418062636562273, "grad_norm": 3.9793097972869873, "learning_rate": 8.554919527114545e-05, "loss": 2.236166763305664, "memory(GiB)": 72.85, "step": 28985, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.670634 }, { "epoch": 1.2420204789854763, "grad_norm": 3.827012538909912, "learning_rate": 8.554446251702177e-05, "loss": 2.0336986541748048, "memory(GiB)": 72.85, "step": 28990, "token_acc": 0.5676691729323309, "train_speed(iter/s)": 0.670658 }, { "epoch": 1.242234694314725, "grad_norm": 4.559518337249756, "learning_rate": 8.553972911897683e-05, "loss": 2.446842384338379, "memory(GiB)": 72.85, "step": 28995, "token_acc": 0.48031496062992124, "train_speed(iter/s)": 0.670662 }, { "epoch": 1.2424489096439741, "grad_norm": 7.38093376159668, "learning_rate": 8.553499507709635e-05, "loss": 2.8237485885620117, "memory(GiB)": 72.85, "step": 29000, "token_acc": 0.4578313253012048, "train_speed(iter/s)": 0.670679 }, { "epoch": 1.2424489096439741, "eval_loss": 1.9993805885314941, "eval_runtime": 15.4662, "eval_samples_per_second": 6.466, "eval_steps_per_second": 6.466, "eval_token_acc": 0.49934469200524245, "step": 29000 }, { "epoch": 1.2426631249732232, "grad_norm": 3.5714077949523926, "learning_rate": 8.553026039146608e-05, "loss": 2.3419872283935548, "memory(GiB)": 72.85, "step": 29005, "token_acc": 0.5019193857965452, "train_speed(iter/s)": 0.670376 }, { "epoch": 1.242877340302472, "grad_norm": 5.322798252105713, "learning_rate": 8.552552506217185e-05, "loss": 2.3924163818359374, "memory(GiB)": 72.85, "step": 29010, "token_acc": 0.5243055555555556, "train_speed(iter/s)": 0.670386 }, { "epoch": 1.243091555631721, "grad_norm": 6.474551200866699, "learning_rate": 8.552078908929938e-05, "loss": 2.50323429107666, "memory(GiB)": 72.85, "step": 29015, "token_acc": 0.46381578947368424, "train_speed(iter/s)": 0.670419 }, { "epoch": 1.24330577096097, "grad_norm": 3.903468608856201, "learning_rate": 8.55160524729345e-05, "loss": 2.1812395095825194, "memory(GiB)": 72.85, "step": 29020, "token_acc": 0.5370370370370371, "train_speed(iter/s)": 0.670452 }, { "epoch": 1.2435199862902189, "grad_norm": 5.32007360458374, "learning_rate": 8.551131521316301e-05, "loss": 2.6665933609008787, "memory(GiB)": 72.85, "step": 29025, "token_acc": 0.42857142857142855, "train_speed(iter/s)": 0.670481 }, { "epoch": 1.243734201619468, "grad_norm": 4.059370994567871, "learning_rate": 8.550657731007075e-05, "loss": 2.4207801818847656, "memory(GiB)": 72.85, "step": 29030, "token_acc": 0.5150602409638554, "train_speed(iter/s)": 0.670505 }, { "epoch": 1.243948416948717, "grad_norm": 3.9680871963500977, "learning_rate": 8.550183876374351e-05, "loss": 2.6220514297485353, "memory(GiB)": 72.85, "step": 29035, "token_acc": 0.4753521126760563, "train_speed(iter/s)": 0.670503 }, { "epoch": 1.2441626322779658, "grad_norm": 6.481157302856445, "learning_rate": 8.549709957426716e-05, "loss": 2.2301185607910154, "memory(GiB)": 72.85, "step": 29040, "token_acc": 0.5257731958762887, "train_speed(iter/s)": 0.670516 }, { "epoch": 1.2443768476072148, "grad_norm": 3.2486062049865723, "learning_rate": 8.549235974172758e-05, "loss": 2.5005313873291017, "memory(GiB)": 72.85, "step": 29045, "token_acc": 0.4740484429065744, "train_speed(iter/s)": 0.670511 }, { "epoch": 1.2445910629364638, "grad_norm": 3.5010225772857666, "learning_rate": 8.548761926621058e-05, "loss": 2.331307601928711, "memory(GiB)": 72.85, "step": 29050, "token_acc": 0.49242424242424243, "train_speed(iter/s)": 0.670526 }, { "epoch": 1.2448052782657126, "grad_norm": 3.779597282409668, "learning_rate": 8.548287814780207e-05, "loss": 2.4384660720825195, "memory(GiB)": 72.85, "step": 29055, "token_acc": 0.4940239043824701, "train_speed(iter/s)": 0.670486 }, { "epoch": 1.2450194935949617, "grad_norm": 4.133913516998291, "learning_rate": 8.547813638658795e-05, "loss": 2.317573356628418, "memory(GiB)": 72.85, "step": 29060, "token_acc": 0.5126353790613718, "train_speed(iter/s)": 0.670497 }, { "epoch": 1.2452337089242107, "grad_norm": 5.389011859893799, "learning_rate": 8.54733939826541e-05, "loss": 2.106742095947266, "memory(GiB)": 72.85, "step": 29065, "token_acc": 0.528, "train_speed(iter/s)": 0.670498 }, { "epoch": 1.2454479242534595, "grad_norm": 3.735682249069214, "learning_rate": 8.546865093608645e-05, "loss": 2.5429248809814453, "memory(GiB)": 72.85, "step": 29070, "token_acc": 0.4845360824742268, "train_speed(iter/s)": 0.670476 }, { "epoch": 1.2456621395827085, "grad_norm": 4.055561065673828, "learning_rate": 8.546390724697091e-05, "loss": 2.404839515686035, "memory(GiB)": 72.85, "step": 29075, "token_acc": 0.4675324675324675, "train_speed(iter/s)": 0.670475 }, { "epoch": 1.2458763549119576, "grad_norm": 4.869115352630615, "learning_rate": 8.545916291539342e-05, "loss": 2.3309356689453127, "memory(GiB)": 72.85, "step": 29080, "token_acc": 0.49818181818181817, "train_speed(iter/s)": 0.670469 }, { "epoch": 1.2460905702412064, "grad_norm": 4.48916482925415, "learning_rate": 8.545441794143993e-05, "loss": 2.6300514221191404, "memory(GiB)": 72.85, "step": 29085, "token_acc": 0.4604519774011299, "train_speed(iter/s)": 0.67051 }, { "epoch": 1.2463047855704554, "grad_norm": 4.242565631866455, "learning_rate": 8.544967232519641e-05, "loss": 2.235183525085449, "memory(GiB)": 72.85, "step": 29090, "token_acc": 0.4852941176470588, "train_speed(iter/s)": 0.670539 }, { "epoch": 1.2465190008997045, "grad_norm": 4.4422831535339355, "learning_rate": 8.544492606674882e-05, "loss": 2.7006465911865236, "memory(GiB)": 72.85, "step": 29095, "token_acc": 0.47735191637630664, "train_speed(iter/s)": 0.670553 }, { "epoch": 1.2467332162289533, "grad_norm": 3.99996018409729, "learning_rate": 8.544017916618312e-05, "loss": 2.183906364440918, "memory(GiB)": 72.85, "step": 29100, "token_acc": 0.5305343511450382, "train_speed(iter/s)": 0.67054 }, { "epoch": 1.2469474315582023, "grad_norm": 4.353458881378174, "learning_rate": 8.543543162358535e-05, "loss": 2.336251449584961, "memory(GiB)": 72.85, "step": 29105, "token_acc": 0.4727272727272727, "train_speed(iter/s)": 0.670514 }, { "epoch": 1.2471616468874513, "grad_norm": 3.0416505336761475, "learning_rate": 8.54306834390415e-05, "loss": 2.066351890563965, "memory(GiB)": 72.85, "step": 29110, "token_acc": 0.5525291828793775, "train_speed(iter/s)": 0.670505 }, { "epoch": 1.2473758622167002, "grad_norm": 5.64004373550415, "learning_rate": 8.542593461263757e-05, "loss": 2.434557342529297, "memory(GiB)": 72.85, "step": 29115, "token_acc": 0.4575645756457565, "train_speed(iter/s)": 0.670522 }, { "epoch": 1.2475900775459492, "grad_norm": 4.081405162811279, "learning_rate": 8.54211851444596e-05, "loss": 2.546645164489746, "memory(GiB)": 72.85, "step": 29120, "token_acc": 0.4723127035830619, "train_speed(iter/s)": 0.670517 }, { "epoch": 1.2478042928751982, "grad_norm": 3.7409958839416504, "learning_rate": 8.541643503459365e-05, "loss": 2.381618690490723, "memory(GiB)": 72.85, "step": 29125, "token_acc": 0.48717948717948717, "train_speed(iter/s)": 0.670512 }, { "epoch": 1.248018508204447, "grad_norm": 3.507423162460327, "learning_rate": 8.541168428312574e-05, "loss": 2.2766117095947265, "memory(GiB)": 72.85, "step": 29130, "token_acc": 0.5303643724696356, "train_speed(iter/s)": 0.670525 }, { "epoch": 1.248232723533696, "grad_norm": 3.8705272674560547, "learning_rate": 8.540693289014194e-05, "loss": 2.6182899475097656, "memory(GiB)": 72.85, "step": 29135, "token_acc": 0.4867549668874172, "train_speed(iter/s)": 0.670531 }, { "epoch": 1.248446938862945, "grad_norm": 4.2997636795043945, "learning_rate": 8.540218085572836e-05, "loss": 2.2726066589355467, "memory(GiB)": 72.85, "step": 29140, "token_acc": 0.53515625, "train_speed(iter/s)": 0.67052 }, { "epoch": 1.248661154192194, "grad_norm": 4.736447334289551, "learning_rate": 8.539742817997106e-05, "loss": 2.2842384338378907, "memory(GiB)": 72.85, "step": 29145, "token_acc": 0.555984555984556, "train_speed(iter/s)": 0.670517 }, { "epoch": 1.248875369521443, "grad_norm": 3.0577616691589355, "learning_rate": 8.539267486295615e-05, "loss": 2.4902408599853514, "memory(GiB)": 72.85, "step": 29150, "token_acc": 0.4523809523809524, "train_speed(iter/s)": 0.670534 }, { "epoch": 1.249089584850692, "grad_norm": 4.697091579437256, "learning_rate": 8.538792090476972e-05, "loss": 2.2127960205078123, "memory(GiB)": 72.85, "step": 29155, "token_acc": 0.5198412698412699, "train_speed(iter/s)": 0.67055 }, { "epoch": 1.2493038001799408, "grad_norm": 4.10744571685791, "learning_rate": 8.53831663054979e-05, "loss": 2.3365413665771486, "memory(GiB)": 72.85, "step": 29160, "token_acc": 0.5416666666666666, "train_speed(iter/s)": 0.670566 }, { "epoch": 1.2495180155091898, "grad_norm": 4.902934551239014, "learning_rate": 8.537841106522684e-05, "loss": 2.4780391693115233, "memory(GiB)": 72.85, "step": 29165, "token_acc": 0.4713375796178344, "train_speed(iter/s)": 0.670596 }, { "epoch": 1.2497322308384389, "grad_norm": 4.00508451461792, "learning_rate": 8.537365518404268e-05, "loss": 1.9421001434326173, "memory(GiB)": 72.85, "step": 29170, "token_acc": 0.5573122529644269, "train_speed(iter/s)": 0.670622 }, { "epoch": 1.2499464461676877, "grad_norm": 4.621702194213867, "learning_rate": 8.536889866203158e-05, "loss": 2.4918745040893553, "memory(GiB)": 72.85, "step": 29175, "token_acc": 0.4528301886792453, "train_speed(iter/s)": 0.670606 }, { "epoch": 1.2501606614969367, "grad_norm": 6.237338542938232, "learning_rate": 8.536414149927968e-05, "loss": 2.219707489013672, "memory(GiB)": 72.85, "step": 29180, "token_acc": 0.5186915887850467, "train_speed(iter/s)": 0.670604 }, { "epoch": 1.2503748768261858, "grad_norm": 3.2461490631103516, "learning_rate": 8.535938369587319e-05, "loss": 2.3038518905639647, "memory(GiB)": 72.85, "step": 29185, "token_acc": 0.5285714285714286, "train_speed(iter/s)": 0.670591 }, { "epoch": 1.2505890921554346, "grad_norm": 3.5546603202819824, "learning_rate": 8.535462525189831e-05, "loss": 2.2100685119628904, "memory(GiB)": 72.85, "step": 29190, "token_acc": 0.5291970802919708, "train_speed(iter/s)": 0.670573 }, { "epoch": 1.2508033074846836, "grad_norm": 4.466154098510742, "learning_rate": 8.534986616744121e-05, "loss": 2.5325128555297853, "memory(GiB)": 72.85, "step": 29195, "token_acc": 0.4878048780487805, "train_speed(iter/s)": 0.670564 }, { "epoch": 1.2510175228139326, "grad_norm": 2.7880451679229736, "learning_rate": 8.534510644258813e-05, "loss": 2.198301887512207, "memory(GiB)": 72.85, "step": 29200, "token_acc": 0.5173611111111112, "train_speed(iter/s)": 0.670591 }, { "epoch": 1.2512317381431814, "grad_norm": 4.853797435760498, "learning_rate": 8.53403460774253e-05, "loss": 2.1949228286743163, "memory(GiB)": 72.85, "step": 29205, "token_acc": 0.519163763066202, "train_speed(iter/s)": 0.670604 }, { "epoch": 1.2514459534724305, "grad_norm": 2.9368317127227783, "learning_rate": 8.533558507203893e-05, "loss": 2.6123111724853514, "memory(GiB)": 72.85, "step": 29210, "token_acc": 0.4692737430167598, "train_speed(iter/s)": 0.670609 }, { "epoch": 1.2516601688016795, "grad_norm": 3.4071288108825684, "learning_rate": 8.533082342651528e-05, "loss": 2.4739532470703125, "memory(GiB)": 72.85, "step": 29215, "token_acc": 0.5016181229773463, "train_speed(iter/s)": 0.670638 }, { "epoch": 1.2518743841309283, "grad_norm": 4.588313579559326, "learning_rate": 8.532606114094065e-05, "loss": 2.5182121276855467, "memory(GiB)": 72.85, "step": 29220, "token_acc": 0.4813664596273292, "train_speed(iter/s)": 0.670636 }, { "epoch": 1.2520885994601774, "grad_norm": 4.272294521331787, "learning_rate": 8.532129821540127e-05, "loss": 2.2385631561279298, "memory(GiB)": 72.85, "step": 29225, "token_acc": 0.5261324041811847, "train_speed(iter/s)": 0.670644 }, { "epoch": 1.2523028147894264, "grad_norm": 4.347289562225342, "learning_rate": 8.531653464998345e-05, "loss": 2.075936508178711, "memory(GiB)": 72.85, "step": 29230, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.670667 }, { "epoch": 1.2525170301186752, "grad_norm": 4.6911940574646, "learning_rate": 8.531177044477346e-05, "loss": 2.409967231750488, "memory(GiB)": 72.85, "step": 29235, "token_acc": 0.5324675324675324, "train_speed(iter/s)": 0.670658 }, { "epoch": 1.2527312454479242, "grad_norm": 5.15477991104126, "learning_rate": 8.530700559985763e-05, "loss": 2.641266632080078, "memory(GiB)": 72.85, "step": 29240, "token_acc": 0.49645390070921985, "train_speed(iter/s)": 0.670671 }, { "epoch": 1.2529454607771733, "grad_norm": 3.9897499084472656, "learning_rate": 8.530224011532228e-05, "loss": 2.1028121948242187, "memory(GiB)": 72.85, "step": 29245, "token_acc": 0.553030303030303, "train_speed(iter/s)": 0.670688 }, { "epoch": 1.253159676106422, "grad_norm": 5.607504367828369, "learning_rate": 8.529747399125372e-05, "loss": 2.42663688659668, "memory(GiB)": 72.85, "step": 29250, "token_acc": 0.4820846905537459, "train_speed(iter/s)": 0.670691 }, { "epoch": 1.2533738914356711, "grad_norm": 4.507942199707031, "learning_rate": 8.529270722773832e-05, "loss": 2.2555000305175783, "memory(GiB)": 72.85, "step": 29255, "token_acc": 0.4927007299270073, "train_speed(iter/s)": 0.670688 }, { "epoch": 1.2535881067649202, "grad_norm": 4.194817543029785, "learning_rate": 8.528793982486241e-05, "loss": 2.39263801574707, "memory(GiB)": 72.85, "step": 29260, "token_acc": 0.5016393442622951, "train_speed(iter/s)": 0.670681 }, { "epoch": 1.253802322094169, "grad_norm": 4.3479766845703125, "learning_rate": 8.528317178271237e-05, "loss": 2.607854461669922, "memory(GiB)": 72.85, "step": 29265, "token_acc": 0.44089456869009586, "train_speed(iter/s)": 0.670707 }, { "epoch": 1.254016537423418, "grad_norm": 3.513761281967163, "learning_rate": 8.527840310137458e-05, "loss": 2.581480598449707, "memory(GiB)": 72.85, "step": 29270, "token_acc": 0.45084745762711864, "train_speed(iter/s)": 0.670716 }, { "epoch": 1.254230752752667, "grad_norm": 4.0748209953308105, "learning_rate": 8.527363378093543e-05, "loss": 2.667904281616211, "memory(GiB)": 72.85, "step": 29275, "token_acc": 0.4605678233438486, "train_speed(iter/s)": 0.670698 }, { "epoch": 1.2544449680819159, "grad_norm": 5.427794456481934, "learning_rate": 8.526886382148129e-05, "loss": 2.405181884765625, "memory(GiB)": 72.85, "step": 29280, "token_acc": 0.498220640569395, "train_speed(iter/s)": 0.670716 }, { "epoch": 1.254659183411165, "grad_norm": 4.7168145179748535, "learning_rate": 8.526409322309862e-05, "loss": 2.1241588592529297, "memory(GiB)": 72.85, "step": 29285, "token_acc": 0.5394736842105263, "train_speed(iter/s)": 0.670726 }, { "epoch": 1.254873398740414, "grad_norm": 4.076179504394531, "learning_rate": 8.525932198587381e-05, "loss": 2.421693229675293, "memory(GiB)": 72.85, "step": 29290, "token_acc": 0.4887459807073955, "train_speed(iter/s)": 0.670752 }, { "epoch": 1.2550876140696627, "grad_norm": 4.820228576660156, "learning_rate": 8.525455010989331e-05, "loss": 2.4121047973632814, "memory(GiB)": 72.85, "step": 29295, "token_acc": 0.5363321799307958, "train_speed(iter/s)": 0.670748 }, { "epoch": 1.2553018293989118, "grad_norm": 3.7061333656311035, "learning_rate": 8.524977759524356e-05, "loss": 2.305202674865723, "memory(GiB)": 72.85, "step": 29300, "token_acc": 0.5049833887043189, "train_speed(iter/s)": 0.670786 }, { "epoch": 1.2555160447281608, "grad_norm": 3.860668182373047, "learning_rate": 8.524500444201104e-05, "loss": 2.385336685180664, "memory(GiB)": 72.85, "step": 29305, "token_acc": 0.4855072463768116, "train_speed(iter/s)": 0.670772 }, { "epoch": 1.2557302600574096, "grad_norm": 4.685576438903809, "learning_rate": 8.524023065028219e-05, "loss": 2.1060260772705077, "memory(GiB)": 72.85, "step": 29310, "token_acc": 0.546875, "train_speed(iter/s)": 0.670782 }, { "epoch": 1.2559444753866587, "grad_norm": 3.926962375640869, "learning_rate": 8.52354562201435e-05, "loss": 2.2419206619262697, "memory(GiB)": 72.85, "step": 29315, "token_acc": 0.5190311418685121, "train_speed(iter/s)": 0.670788 }, { "epoch": 1.2561586907159077, "grad_norm": 5.129450798034668, "learning_rate": 8.523068115168147e-05, "loss": 2.392062187194824, "memory(GiB)": 72.85, "step": 29320, "token_acc": 0.49019607843137253, "train_speed(iter/s)": 0.670795 }, { "epoch": 1.2563729060451565, "grad_norm": 5.441232204437256, "learning_rate": 8.522590544498259e-05, "loss": 2.4016448974609377, "memory(GiB)": 72.85, "step": 29325, "token_acc": 0.49615384615384617, "train_speed(iter/s)": 0.670781 }, { "epoch": 1.2565871213744055, "grad_norm": 4.700616359710693, "learning_rate": 8.52211291001334e-05, "loss": 2.2357305526733398, "memory(GiB)": 72.85, "step": 29330, "token_acc": 0.5169811320754717, "train_speed(iter/s)": 0.67077 }, { "epoch": 1.2568013367036546, "grad_norm": 3.792017936706543, "learning_rate": 8.521635211722041e-05, "loss": 2.467333984375, "memory(GiB)": 72.85, "step": 29335, "token_acc": 0.46710526315789475, "train_speed(iter/s)": 0.670758 }, { "epoch": 1.2570155520329034, "grad_norm": 3.8151204586029053, "learning_rate": 8.521157449633017e-05, "loss": 2.3744234085083007, "memory(GiB)": 72.85, "step": 29340, "token_acc": 0.498371335504886, "train_speed(iter/s)": 0.670761 }, { "epoch": 1.2572297673621524, "grad_norm": 3.651313543319702, "learning_rate": 8.520679623754924e-05, "loss": 2.1955570220947265, "memory(GiB)": 72.85, "step": 29345, "token_acc": 0.49324324324324326, "train_speed(iter/s)": 0.670731 }, { "epoch": 1.2574439826914015, "grad_norm": 4.181849479675293, "learning_rate": 8.520201734096414e-05, "loss": 2.2644321441650392, "memory(GiB)": 72.85, "step": 29350, "token_acc": 0.4852941176470588, "train_speed(iter/s)": 0.670729 }, { "epoch": 1.2576581980206503, "grad_norm": 3.745603322982788, "learning_rate": 8.51972378066615e-05, "loss": 2.1357547760009767, "memory(GiB)": 72.85, "step": 29355, "token_acc": 0.515358361774744, "train_speed(iter/s)": 0.670705 }, { "epoch": 1.2578724133498993, "grad_norm": 4.604321002960205, "learning_rate": 8.519245763472786e-05, "loss": 2.474528121948242, "memory(GiB)": 72.85, "step": 29360, "token_acc": 0.49523809523809526, "train_speed(iter/s)": 0.670699 }, { "epoch": 1.2580866286791483, "grad_norm": 3.858668804168701, "learning_rate": 8.518767682524985e-05, "loss": 2.691478729248047, "memory(GiB)": 72.85, "step": 29365, "token_acc": 0.4425287356321839, "train_speed(iter/s)": 0.670716 }, { "epoch": 1.2583008440083971, "grad_norm": 4.636096954345703, "learning_rate": 8.518289537831407e-05, "loss": 2.316040802001953, "memory(GiB)": 72.85, "step": 29370, "token_acc": 0.4725609756097561, "train_speed(iter/s)": 0.67072 }, { "epoch": 1.2585150593376462, "grad_norm": 3.808232069015503, "learning_rate": 8.517811329400713e-05, "loss": 2.1796405792236326, "memory(GiB)": 72.85, "step": 29375, "token_acc": 0.573943661971831, "train_speed(iter/s)": 0.670736 }, { "epoch": 1.2587292746668952, "grad_norm": 4.3780198097229, "learning_rate": 8.517333057241565e-05, "loss": 2.715861129760742, "memory(GiB)": 72.85, "step": 29380, "token_acc": 0.47959183673469385, "train_speed(iter/s)": 0.670738 }, { "epoch": 1.258943489996144, "grad_norm": 2.9618117809295654, "learning_rate": 8.516854721362632e-05, "loss": 2.3491859436035156, "memory(GiB)": 72.85, "step": 29385, "token_acc": 0.5083056478405316, "train_speed(iter/s)": 0.670754 }, { "epoch": 1.259157705325393, "grad_norm": 3.7572011947631836, "learning_rate": 8.516376321772575e-05, "loss": 2.277543067932129, "memory(GiB)": 72.85, "step": 29390, "token_acc": 0.5316901408450704, "train_speed(iter/s)": 0.670765 }, { "epoch": 1.259371920654642, "grad_norm": 4.866521835327148, "learning_rate": 8.515897858480064e-05, "loss": 2.3297374725341795, "memory(GiB)": 72.85, "step": 29395, "token_acc": 0.47645429362880887, "train_speed(iter/s)": 0.670769 }, { "epoch": 1.259586135983891, "grad_norm": 4.139445781707764, "learning_rate": 8.515419331493763e-05, "loss": 2.2652362823486327, "memory(GiB)": 72.85, "step": 29400, "token_acc": 0.4891640866873065, "train_speed(iter/s)": 0.67077 }, { "epoch": 1.25980035131314, "grad_norm": 3.7596185207366943, "learning_rate": 8.514940740822343e-05, "loss": 2.4030622482299804, "memory(GiB)": 72.85, "step": 29405, "token_acc": 0.5, "train_speed(iter/s)": 0.67078 }, { "epoch": 1.260014566642389, "grad_norm": 3.6818652153015137, "learning_rate": 8.514462086474474e-05, "loss": 2.4798202514648438, "memory(GiB)": 72.85, "step": 29410, "token_acc": 0.4523809523809524, "train_speed(iter/s)": 0.670802 }, { "epoch": 1.2602287819716378, "grad_norm": 4.947805404663086, "learning_rate": 8.513983368458831e-05, "loss": 2.3319820404052733, "memory(GiB)": 72.85, "step": 29415, "token_acc": 0.45307443365695793, "train_speed(iter/s)": 0.670758 }, { "epoch": 1.2604429973008868, "grad_norm": 4.8030924797058105, "learning_rate": 8.51350458678408e-05, "loss": 2.1845388412475586, "memory(GiB)": 72.85, "step": 29420, "token_acc": 0.5355805243445693, "train_speed(iter/s)": 0.67077 }, { "epoch": 1.2606572126301359, "grad_norm": 4.099498748779297, "learning_rate": 8.513025741458897e-05, "loss": 2.647276496887207, "memory(GiB)": 72.85, "step": 29425, "token_acc": 0.4652777777777778, "train_speed(iter/s)": 0.670787 }, { "epoch": 1.2608714279593847, "grad_norm": 4.603829860687256, "learning_rate": 8.512546832491958e-05, "loss": 2.3608463287353514, "memory(GiB)": 72.85, "step": 29430, "token_acc": 0.5020080321285141, "train_speed(iter/s)": 0.670786 }, { "epoch": 1.2610856432886337, "grad_norm": 3.8536365032196045, "learning_rate": 8.512067859891939e-05, "loss": 2.0950464248657226, "memory(GiB)": 72.85, "step": 29435, "token_acc": 0.531055900621118, "train_speed(iter/s)": 0.670805 }, { "epoch": 1.2612998586178827, "grad_norm": 4.685035228729248, "learning_rate": 8.511588823667516e-05, "loss": 2.152060127258301, "memory(GiB)": 72.85, "step": 29440, "token_acc": 0.5361216730038023, "train_speed(iter/s)": 0.670788 }, { "epoch": 1.2615140739471316, "grad_norm": 4.316427707672119, "learning_rate": 8.511109723827366e-05, "loss": 2.430608367919922, "memory(GiB)": 72.85, "step": 29445, "token_acc": 0.4935897435897436, "train_speed(iter/s)": 0.670808 }, { "epoch": 1.2617282892763806, "grad_norm": 3.808300495147705, "learning_rate": 8.51063056038017e-05, "loss": 2.470743179321289, "memory(GiB)": 72.85, "step": 29450, "token_acc": 0.4589041095890411, "train_speed(iter/s)": 0.670817 }, { "epoch": 1.2619425046056296, "grad_norm": 3.90875506401062, "learning_rate": 8.51015133333461e-05, "loss": 2.359955978393555, "memory(GiB)": 72.85, "step": 29455, "token_acc": 0.4755244755244755, "train_speed(iter/s)": 0.670817 }, { "epoch": 1.2621567199348784, "grad_norm": 4.48732328414917, "learning_rate": 8.509672042699364e-05, "loss": 2.5372695922851562, "memory(GiB)": 72.85, "step": 29460, "token_acc": 0.48, "train_speed(iter/s)": 0.670822 }, { "epoch": 1.2623709352641275, "grad_norm": 4.214877605438232, "learning_rate": 8.509192688483116e-05, "loss": 2.5331228256225584, "memory(GiB)": 72.85, "step": 29465, "token_acc": 0.41605839416058393, "train_speed(iter/s)": 0.670831 }, { "epoch": 1.2625851505933765, "grad_norm": 7.111804485321045, "learning_rate": 8.508713270694552e-05, "loss": 2.4731597900390625, "memory(GiB)": 72.85, "step": 29470, "token_acc": 0.48226950354609927, "train_speed(iter/s)": 0.670831 }, { "epoch": 1.2627993659226253, "grad_norm": 4.748203754425049, "learning_rate": 8.508233789342355e-05, "loss": 2.74530086517334, "memory(GiB)": 72.85, "step": 29475, "token_acc": 0.4934210526315789, "train_speed(iter/s)": 0.670834 }, { "epoch": 1.2630135812518744, "grad_norm": 3.7853145599365234, "learning_rate": 8.507754244435212e-05, "loss": 2.3218482971191405, "memory(GiB)": 72.85, "step": 29480, "token_acc": 0.4838709677419355, "train_speed(iter/s)": 0.670827 }, { "epoch": 1.2632277965811234, "grad_norm": 4.900112628936768, "learning_rate": 8.507274635981811e-05, "loss": 2.5175336837768554, "memory(GiB)": 72.85, "step": 29485, "token_acc": 0.42810457516339867, "train_speed(iter/s)": 0.670829 }, { "epoch": 1.2634420119103722, "grad_norm": 4.231743335723877, "learning_rate": 8.506794963990838e-05, "loss": 2.122623825073242, "memory(GiB)": 72.85, "step": 29490, "token_acc": 0.5331230283911672, "train_speed(iter/s)": 0.670828 }, { "epoch": 1.2636562272396212, "grad_norm": 3.8845009803771973, "learning_rate": 8.506315228470986e-05, "loss": 2.303715133666992, "memory(GiB)": 72.85, "step": 29495, "token_acc": 0.5034965034965035, "train_speed(iter/s)": 0.670814 }, { "epoch": 1.2638704425688703, "grad_norm": 4.717406749725342, "learning_rate": 8.505835429430946e-05, "loss": 2.413782501220703, "memory(GiB)": 72.85, "step": 29500, "token_acc": 0.5152838427947598, "train_speed(iter/s)": 0.670832 }, { "epoch": 1.2638704425688703, "eval_loss": 2.0089194774627686, "eval_runtime": 16.6038, "eval_samples_per_second": 6.023, "eval_steps_per_second": 6.023, "eval_token_acc": 0.522038567493113, "step": 29500 }, { "epoch": 1.264084657898119, "grad_norm": 4.44148063659668, "learning_rate": 8.505355566879407e-05, "loss": 2.359353256225586, "memory(GiB)": 72.85, "step": 29505, "token_acc": 0.515748031496063, "train_speed(iter/s)": 0.670544 }, { "epoch": 1.2642988732273681, "grad_norm": 3.7270636558532715, "learning_rate": 8.504875640825061e-05, "loss": 2.196018409729004, "memory(GiB)": 72.85, "step": 29510, "token_acc": 0.5111111111111111, "train_speed(iter/s)": 0.670518 }, { "epoch": 1.2645130885566171, "grad_norm": 5.296363353729248, "learning_rate": 8.504395651276609e-05, "loss": 2.479753112792969, "memory(GiB)": 72.85, "step": 29515, "token_acc": 0.4527027027027027, "train_speed(iter/s)": 0.670537 }, { "epoch": 1.264727303885866, "grad_norm": 3.795776844024658, "learning_rate": 8.50391559824274e-05, "loss": 2.376881790161133, "memory(GiB)": 72.85, "step": 29520, "token_acc": 0.4851190476190476, "train_speed(iter/s)": 0.670517 }, { "epoch": 1.264941519215115, "grad_norm": 4.82700252532959, "learning_rate": 8.503435481732153e-05, "loss": 2.4636077880859375, "memory(GiB)": 72.85, "step": 29525, "token_acc": 0.48554913294797686, "train_speed(iter/s)": 0.670525 }, { "epoch": 1.265155734544364, "grad_norm": 4.344024658203125, "learning_rate": 8.502955301753548e-05, "loss": 2.5588302612304688, "memory(GiB)": 72.85, "step": 29530, "token_acc": 0.4883720930232558, "train_speed(iter/s)": 0.67054 }, { "epoch": 1.2653699498736128, "grad_norm": 3.9074740409851074, "learning_rate": 8.502475058315619e-05, "loss": 2.333202934265137, "memory(GiB)": 72.85, "step": 29535, "token_acc": 0.5096153846153846, "train_speed(iter/s)": 0.670523 }, { "epoch": 1.2655841652028619, "grad_norm": 4.308223247528076, "learning_rate": 8.501994751427072e-05, "loss": 2.332021141052246, "memory(GiB)": 72.85, "step": 29540, "token_acc": 0.5214285714285715, "train_speed(iter/s)": 0.670515 }, { "epoch": 1.265798380532111, "grad_norm": 3.4620604515075684, "learning_rate": 8.501514381096601e-05, "loss": 2.16663818359375, "memory(GiB)": 72.85, "step": 29545, "token_acc": 0.535593220338983, "train_speed(iter/s)": 0.670511 }, { "epoch": 1.2660125958613597, "grad_norm": 4.378659725189209, "learning_rate": 8.501033947332915e-05, "loss": 2.4758590698242187, "memory(GiB)": 72.85, "step": 29550, "token_acc": 0.48344370860927155, "train_speed(iter/s)": 0.670493 }, { "epoch": 1.2662268111906088, "grad_norm": 5.052291393280029, "learning_rate": 8.500553450144713e-05, "loss": 2.1480369567871094, "memory(GiB)": 72.85, "step": 29555, "token_acc": 0.5338078291814946, "train_speed(iter/s)": 0.670511 }, { "epoch": 1.2664410265198578, "grad_norm": 4.2989654541015625, "learning_rate": 8.500072889540702e-05, "loss": 2.4450902938842773, "memory(GiB)": 72.85, "step": 29560, "token_acc": 0.5033783783783784, "train_speed(iter/s)": 0.670528 }, { "epoch": 1.2666552418491066, "grad_norm": 5.289300918579102, "learning_rate": 8.499592265529588e-05, "loss": 2.296860122680664, "memory(GiB)": 72.85, "step": 29565, "token_acc": 0.5378787878787878, "train_speed(iter/s)": 0.670541 }, { "epoch": 1.2668694571783556, "grad_norm": 4.375695705413818, "learning_rate": 8.499111578120078e-05, "loss": 2.329682540893555, "memory(GiB)": 72.85, "step": 29570, "token_acc": 0.5340909090909091, "train_speed(iter/s)": 0.670561 }, { "epoch": 1.2670836725076047, "grad_norm": 5.61409854888916, "learning_rate": 8.498630827320877e-05, "loss": 2.164872741699219, "memory(GiB)": 72.85, "step": 29575, "token_acc": 0.5144927536231884, "train_speed(iter/s)": 0.67057 }, { "epoch": 1.2672978878368535, "grad_norm": 5.703190326690674, "learning_rate": 8.498150013140698e-05, "loss": 2.374569892883301, "memory(GiB)": 72.85, "step": 29580, "token_acc": 0.5355805243445693, "train_speed(iter/s)": 0.670568 }, { "epoch": 1.2675121031661025, "grad_norm": 3.4593617916107178, "learning_rate": 8.49766913558825e-05, "loss": 2.459248161315918, "memory(GiB)": 72.85, "step": 29585, "token_acc": 0.4735099337748344, "train_speed(iter/s)": 0.670559 }, { "epoch": 1.2677263184953516, "grad_norm": 3.4466521739959717, "learning_rate": 8.497188194672244e-05, "loss": 2.6456634521484377, "memory(GiB)": 72.85, "step": 29590, "token_acc": 0.42048517520215634, "train_speed(iter/s)": 0.670556 }, { "epoch": 1.2679405338246004, "grad_norm": 4.981774806976318, "learning_rate": 8.496707190401394e-05, "loss": 2.533096122741699, "memory(GiB)": 72.85, "step": 29595, "token_acc": 0.4463667820069204, "train_speed(iter/s)": 0.670551 }, { "epoch": 1.2681547491538494, "grad_norm": 4.141618251800537, "learning_rate": 8.496226122784413e-05, "loss": 2.4391780853271485, "memory(GiB)": 72.85, "step": 29600, "token_acc": 0.49169435215946844, "train_speed(iter/s)": 0.670569 }, { "epoch": 1.2683689644830984, "grad_norm": 3.433145761489868, "learning_rate": 8.495744991830016e-05, "loss": 2.3109498977661134, "memory(GiB)": 72.85, "step": 29605, "token_acc": 0.5120967741935484, "train_speed(iter/s)": 0.670605 }, { "epoch": 1.2685831798123473, "grad_norm": 4.200360298156738, "learning_rate": 8.495263797546919e-05, "loss": 2.0787565231323244, "memory(GiB)": 72.85, "step": 29610, "token_acc": 0.5622317596566524, "train_speed(iter/s)": 0.670596 }, { "epoch": 1.2687973951415963, "grad_norm": 4.329577445983887, "learning_rate": 8.49478253994384e-05, "loss": 2.4701976776123047, "memory(GiB)": 72.85, "step": 29615, "token_acc": 0.4597315436241611, "train_speed(iter/s)": 0.670586 }, { "epoch": 1.2690116104708453, "grad_norm": 3.919633150100708, "learning_rate": 8.494301219029495e-05, "loss": 2.48515625, "memory(GiB)": 72.85, "step": 29620, "token_acc": 0.46099290780141844, "train_speed(iter/s)": 0.670612 }, { "epoch": 1.2692258258000941, "grad_norm": 3.3142876625061035, "learning_rate": 8.493819834812607e-05, "loss": 2.2811120986938476, "memory(GiB)": 72.85, "step": 29625, "token_acc": 0.4784172661870504, "train_speed(iter/s)": 0.670626 }, { "epoch": 1.2694400411293432, "grad_norm": 3.994901418685913, "learning_rate": 8.493338387301895e-05, "loss": 2.462966537475586, "memory(GiB)": 72.85, "step": 29630, "token_acc": 0.5058823529411764, "train_speed(iter/s)": 0.670626 }, { "epoch": 1.2696542564585922, "grad_norm": 4.400965690612793, "learning_rate": 8.492856876506082e-05, "loss": 2.3434965133666994, "memory(GiB)": 72.85, "step": 29635, "token_acc": 0.504950495049505, "train_speed(iter/s)": 0.670618 }, { "epoch": 1.269868471787841, "grad_norm": 3.9088408946990967, "learning_rate": 8.492375302433887e-05, "loss": 2.3744789123535157, "memory(GiB)": 72.85, "step": 29640, "token_acc": 0.4697508896797153, "train_speed(iter/s)": 0.670586 }, { "epoch": 1.27008268711709, "grad_norm": 3.079373598098755, "learning_rate": 8.49189366509404e-05, "loss": 2.356658172607422, "memory(GiB)": 72.85, "step": 29645, "token_acc": 0.52, "train_speed(iter/s)": 0.670589 }, { "epoch": 1.270296902446339, "grad_norm": 4.293788909912109, "learning_rate": 8.491411964495264e-05, "loss": 2.595718765258789, "memory(GiB)": 72.85, "step": 29650, "token_acc": 0.47202797202797203, "train_speed(iter/s)": 0.670575 }, { "epoch": 1.270511117775588, "grad_norm": 3.904979944229126, "learning_rate": 8.490930200646283e-05, "loss": 2.3759702682495116, "memory(GiB)": 72.85, "step": 29655, "token_acc": 0.46886446886446886, "train_speed(iter/s)": 0.670596 }, { "epoch": 1.270725333104837, "grad_norm": 3.9976959228515625, "learning_rate": 8.490448373555828e-05, "loss": 2.230230522155762, "memory(GiB)": 72.85, "step": 29660, "token_acc": 0.5100671140939598, "train_speed(iter/s)": 0.670606 }, { "epoch": 1.270939548434086, "grad_norm": 5.568293571472168, "learning_rate": 8.489966483232626e-05, "loss": 2.3627681732177734, "memory(GiB)": 72.85, "step": 29665, "token_acc": 0.4591194968553459, "train_speed(iter/s)": 0.670624 }, { "epoch": 1.2711537637633348, "grad_norm": 4.519097805023193, "learning_rate": 8.489484529685407e-05, "loss": 2.2575008392333986, "memory(GiB)": 72.85, "step": 29670, "token_acc": 0.5226586102719033, "train_speed(iter/s)": 0.670631 }, { "epoch": 1.2713679790925838, "grad_norm": 3.226100444793701, "learning_rate": 8.489002512922903e-05, "loss": 2.492184066772461, "memory(GiB)": 72.85, "step": 29675, "token_acc": 0.4789272030651341, "train_speed(iter/s)": 0.67065 }, { "epoch": 1.2715821944218328, "grad_norm": 3.1604559421539307, "learning_rate": 8.488520432953848e-05, "loss": 2.227401351928711, "memory(GiB)": 72.85, "step": 29680, "token_acc": 0.5298245614035088, "train_speed(iter/s)": 0.670668 }, { "epoch": 1.2717964097510817, "grad_norm": 5.3343634605407715, "learning_rate": 8.488038289786969e-05, "loss": 2.252358818054199, "memory(GiB)": 72.85, "step": 29685, "token_acc": 0.5254901960784314, "train_speed(iter/s)": 0.670681 }, { "epoch": 1.2720106250803307, "grad_norm": 5.65110969543457, "learning_rate": 8.487556083431005e-05, "loss": 2.343344306945801, "memory(GiB)": 72.85, "step": 29690, "token_acc": 0.5096153846153846, "train_speed(iter/s)": 0.670702 }, { "epoch": 1.2722248404095797, "grad_norm": 3.963519334793091, "learning_rate": 8.487073813894692e-05, "loss": 2.4651880264282227, "memory(GiB)": 72.85, "step": 29695, "token_acc": 0.49390243902439024, "train_speed(iter/s)": 0.670705 }, { "epoch": 1.2724390557388285, "grad_norm": 3.5277929306030273, "learning_rate": 8.486591481186765e-05, "loss": 2.2869754791259767, "memory(GiB)": 72.85, "step": 29700, "token_acc": 0.4738675958188153, "train_speed(iter/s)": 0.670689 }, { "epoch": 1.2726532710680776, "grad_norm": 6.1757683753967285, "learning_rate": 8.486109085315963e-05, "loss": 2.353869819641113, "memory(GiB)": 72.85, "step": 29705, "token_acc": 0.5104166666666666, "train_speed(iter/s)": 0.670666 }, { "epoch": 1.2728674863973266, "grad_norm": 3.7843170166015625, "learning_rate": 8.485626626291025e-05, "loss": 2.1670452117919923, "memory(GiB)": 72.85, "step": 29710, "token_acc": 0.5165562913907285, "train_speed(iter/s)": 0.670686 }, { "epoch": 1.2730817017265754, "grad_norm": 5.331122398376465, "learning_rate": 8.48514410412069e-05, "loss": 2.065746879577637, "memory(GiB)": 72.85, "step": 29715, "token_acc": 0.5299145299145299, "train_speed(iter/s)": 0.670684 }, { "epoch": 1.2732959170558245, "grad_norm": 3.54829740524292, "learning_rate": 8.484661518813701e-05, "loss": 2.302295112609863, "memory(GiB)": 72.85, "step": 29720, "token_acc": 0.5480427046263345, "train_speed(iter/s)": 0.670686 }, { "epoch": 1.2735101323850735, "grad_norm": 4.863493919372559, "learning_rate": 8.4841788703788e-05, "loss": 2.4423105239868166, "memory(GiB)": 72.85, "step": 29725, "token_acc": 0.49032258064516127, "train_speed(iter/s)": 0.670688 }, { "epoch": 1.2737243477143223, "grad_norm": 5.278687477111816, "learning_rate": 8.483696158824729e-05, "loss": 2.5305686950683595, "memory(GiB)": 72.85, "step": 29730, "token_acc": 0.47586206896551725, "train_speed(iter/s)": 0.670672 }, { "epoch": 1.2739385630435713, "grad_norm": 3.631648302078247, "learning_rate": 8.483213384160234e-05, "loss": 2.433330535888672, "memory(GiB)": 72.85, "step": 29735, "token_acc": 0.49, "train_speed(iter/s)": 0.670683 }, { "epoch": 1.2741527783728204, "grad_norm": 3.4320833683013916, "learning_rate": 8.482730546394062e-05, "loss": 2.273563575744629, "memory(GiB)": 72.85, "step": 29740, "token_acc": 0.5145631067961165, "train_speed(iter/s)": 0.670677 }, { "epoch": 1.2743669937020692, "grad_norm": 5.170049667358398, "learning_rate": 8.48224764553496e-05, "loss": 2.2933712005615234, "memory(GiB)": 72.85, "step": 29745, "token_acc": 0.47988505747126436, "train_speed(iter/s)": 0.670686 }, { "epoch": 1.2745812090313182, "grad_norm": 3.1765356063842773, "learning_rate": 8.481764681591674e-05, "loss": 2.6043140411376955, "memory(GiB)": 72.85, "step": 29750, "token_acc": 0.48632218844984804, "train_speed(iter/s)": 0.670709 }, { "epoch": 1.2747954243605673, "grad_norm": 4.193886756896973, "learning_rate": 8.481281654572955e-05, "loss": 2.1058221817016602, "memory(GiB)": 72.85, "step": 29755, "token_acc": 0.5047318611987381, "train_speed(iter/s)": 0.67069 }, { "epoch": 1.275009639689816, "grad_norm": 5.543620586395264, "learning_rate": 8.480798564487553e-05, "loss": 2.454357147216797, "memory(GiB)": 72.85, "step": 29760, "token_acc": 0.45425867507886436, "train_speed(iter/s)": 0.670679 }, { "epoch": 1.275223855019065, "grad_norm": 4.632687568664551, "learning_rate": 8.480315411344219e-05, "loss": 2.3207496643066405, "memory(GiB)": 72.85, "step": 29765, "token_acc": 0.5174825174825175, "train_speed(iter/s)": 0.670704 }, { "epoch": 1.2754380703483141, "grad_norm": 5.408384323120117, "learning_rate": 8.479832195151707e-05, "loss": 2.2285634994506838, "memory(GiB)": 72.85, "step": 29770, "token_acc": 0.5056179775280899, "train_speed(iter/s)": 0.670729 }, { "epoch": 1.2756522856775632, "grad_norm": 4.0010600090026855, "learning_rate": 8.479348915918771e-05, "loss": 2.4917423248291017, "memory(GiB)": 72.85, "step": 29775, "token_acc": 0.5, "train_speed(iter/s)": 0.670715 }, { "epoch": 1.275866501006812, "grad_norm": 3.8575263023376465, "learning_rate": 8.478865573654166e-05, "loss": 2.193657875061035, "memory(GiB)": 72.85, "step": 29780, "token_acc": 0.4965753424657534, "train_speed(iter/s)": 0.670695 }, { "epoch": 1.276080716336061, "grad_norm": 4.3006978034973145, "learning_rate": 8.478382168366647e-05, "loss": 2.2070428848266603, "memory(GiB)": 72.85, "step": 29785, "token_acc": 0.4880546075085324, "train_speed(iter/s)": 0.670713 }, { "epoch": 1.27629493166531, "grad_norm": 4.115438461303711, "learning_rate": 8.477898700064972e-05, "loss": 2.3245403289794924, "memory(GiB)": 72.85, "step": 29790, "token_acc": 0.4840989399293286, "train_speed(iter/s)": 0.67074 }, { "epoch": 1.2765091469945589, "grad_norm": 4.247696876525879, "learning_rate": 8.477415168757899e-05, "loss": 2.0123586654663086, "memory(GiB)": 72.85, "step": 29795, "token_acc": 0.5853658536585366, "train_speed(iter/s)": 0.670749 }, { "epoch": 1.276723362323808, "grad_norm": 5.272927284240723, "learning_rate": 8.476931574454188e-05, "loss": 2.3167205810546876, "memory(GiB)": 72.85, "step": 29800, "token_acc": 0.45195729537366547, "train_speed(iter/s)": 0.670744 }, { "epoch": 1.276937577653057, "grad_norm": 3.6412200927734375, "learning_rate": 8.4764479171626e-05, "loss": 2.439450263977051, "memory(GiB)": 72.85, "step": 29805, "token_acc": 0.4574780058651026, "train_speed(iter/s)": 0.670741 }, { "epoch": 1.2771517929823057, "grad_norm": 3.7937331199645996, "learning_rate": 8.475964196891898e-05, "loss": 2.540605926513672, "memory(GiB)": 72.85, "step": 29810, "token_acc": 0.48148148148148145, "train_speed(iter/s)": 0.670747 }, { "epoch": 1.2773660083115548, "grad_norm": 4.397784233093262, "learning_rate": 8.475480413650842e-05, "loss": 2.244602012634277, "memory(GiB)": 72.85, "step": 29815, "token_acc": 0.49411764705882355, "train_speed(iter/s)": 0.670719 }, { "epoch": 1.2775802236408038, "grad_norm": 3.516864061355591, "learning_rate": 8.474996567448201e-05, "loss": 2.4734722137451173, "memory(GiB)": 72.85, "step": 29820, "token_acc": 0.45938375350140054, "train_speed(iter/s)": 0.670721 }, { "epoch": 1.2777944389700526, "grad_norm": 3.232645034790039, "learning_rate": 8.474512658292734e-05, "loss": 2.3524694442749023, "memory(GiB)": 72.85, "step": 29825, "token_acc": 0.4948453608247423, "train_speed(iter/s)": 0.670723 }, { "epoch": 1.2780086542993017, "grad_norm": 4.037967205047607, "learning_rate": 8.474028686193214e-05, "loss": 2.062612533569336, "memory(GiB)": 72.85, "step": 29830, "token_acc": 0.5563139931740614, "train_speed(iter/s)": 0.670733 }, { "epoch": 1.2782228696285507, "grad_norm": 4.1755051612854, "learning_rate": 8.473544651158404e-05, "loss": 2.298407554626465, "memory(GiB)": 72.85, "step": 29835, "token_acc": 0.5082508250825083, "train_speed(iter/s)": 0.67074 }, { "epoch": 1.2784370849577995, "grad_norm": 2.742119789123535, "learning_rate": 8.473060553197073e-05, "loss": 2.105813980102539, "memory(GiB)": 72.85, "step": 29840, "token_acc": 0.5363321799307958, "train_speed(iter/s)": 0.670742 }, { "epoch": 1.2786513002870485, "grad_norm": 4.786521911621094, "learning_rate": 8.472576392317994e-05, "loss": 2.5731988906860352, "memory(GiB)": 72.85, "step": 29845, "token_acc": 0.436426116838488, "train_speed(iter/s)": 0.670744 }, { "epoch": 1.2788655156162976, "grad_norm": 3.9956040382385254, "learning_rate": 8.472092168529935e-05, "loss": 2.479897308349609, "memory(GiB)": 72.85, "step": 29850, "token_acc": 0.5, "train_speed(iter/s)": 0.670746 }, { "epoch": 1.2790797309455464, "grad_norm": 4.217756748199463, "learning_rate": 8.47160788184167e-05, "loss": 2.4661380767822267, "memory(GiB)": 72.85, "step": 29855, "token_acc": 0.5245398773006135, "train_speed(iter/s)": 0.670712 }, { "epoch": 1.2792939462747954, "grad_norm": 4.052262783050537, "learning_rate": 8.47112353226197e-05, "loss": 2.489189338684082, "memory(GiB)": 72.85, "step": 29860, "token_acc": 0.4709897610921502, "train_speed(iter/s)": 0.670695 }, { "epoch": 1.2795081616040445, "grad_norm": 3.879957675933838, "learning_rate": 8.470639119799613e-05, "loss": 2.4183429718017577, "memory(GiB)": 72.85, "step": 29865, "token_acc": 0.49226006191950467, "train_speed(iter/s)": 0.670713 }, { "epoch": 1.2797223769332933, "grad_norm": 7.326960563659668, "learning_rate": 8.470154644463371e-05, "loss": 2.471711349487305, "memory(GiB)": 72.85, "step": 29870, "token_acc": 0.4186991869918699, "train_speed(iter/s)": 0.670727 }, { "epoch": 1.2799365922625423, "grad_norm": 4.781050205230713, "learning_rate": 8.469670106262025e-05, "loss": 2.0661617279052735, "memory(GiB)": 72.85, "step": 29875, "token_acc": 0.5587044534412956, "train_speed(iter/s)": 0.67071 }, { "epoch": 1.2801508075917913, "grad_norm": 4.382388114929199, "learning_rate": 8.469185505204347e-05, "loss": 2.254822540283203, "memory(GiB)": 72.85, "step": 29880, "token_acc": 0.46774193548387094, "train_speed(iter/s)": 0.670706 }, { "epoch": 1.2803650229210402, "grad_norm": 4.978410720825195, "learning_rate": 8.46870084129912e-05, "loss": 2.2522565841674806, "memory(GiB)": 72.85, "step": 29885, "token_acc": 0.517799352750809, "train_speed(iter/s)": 0.670718 }, { "epoch": 1.2805792382502892, "grad_norm": 6.184935569763184, "learning_rate": 8.468216114555125e-05, "loss": 2.1838716506958007, "memory(GiB)": 72.85, "step": 29890, "token_acc": 0.5124555160142349, "train_speed(iter/s)": 0.670714 }, { "epoch": 1.2807934535795382, "grad_norm": 5.939759731292725, "learning_rate": 8.46773132498114e-05, "loss": 2.5701873779296873, "memory(GiB)": 72.85, "step": 29895, "token_acc": 0.4685714285714286, "train_speed(iter/s)": 0.670701 }, { "epoch": 1.281007668908787, "grad_norm": 3.9197471141815186, "learning_rate": 8.46724647258595e-05, "loss": 2.3065811157226563, "memory(GiB)": 72.85, "step": 29900, "token_acc": 0.5241635687732342, "train_speed(iter/s)": 0.670679 }, { "epoch": 1.281221884238036, "grad_norm": 6.306802749633789, "learning_rate": 8.466761557378337e-05, "loss": 2.5342735290527343, "memory(GiB)": 72.85, "step": 29905, "token_acc": 0.4935483870967742, "train_speed(iter/s)": 0.670661 }, { "epoch": 1.281436099567285, "grad_norm": 3.283576726913452, "learning_rate": 8.466276579367088e-05, "loss": 2.556218719482422, "memory(GiB)": 72.85, "step": 29910, "token_acc": 0.4447852760736196, "train_speed(iter/s)": 0.670682 }, { "epoch": 1.281650314896534, "grad_norm": 4.10752010345459, "learning_rate": 8.465791538560983e-05, "loss": 2.30550594329834, "memory(GiB)": 72.85, "step": 29915, "token_acc": 0.501466275659824, "train_speed(iter/s)": 0.670717 }, { "epoch": 1.281864530225783, "grad_norm": 4.969217300415039, "learning_rate": 8.465306434968817e-05, "loss": 2.4337425231933594, "memory(GiB)": 72.85, "step": 29920, "token_acc": 0.4870848708487085, "train_speed(iter/s)": 0.670685 }, { "epoch": 1.282078745555032, "grad_norm": 3.252267837524414, "learning_rate": 8.464821268599373e-05, "loss": 2.4037010192871096, "memory(GiB)": 72.85, "step": 29925, "token_acc": 0.4942528735632184, "train_speed(iter/s)": 0.670682 }, { "epoch": 1.2822929608842808, "grad_norm": 3.788663387298584, "learning_rate": 8.46433603946144e-05, "loss": 2.1392585754394533, "memory(GiB)": 72.85, "step": 29930, "token_acc": 0.5136186770428015, "train_speed(iter/s)": 0.670702 }, { "epoch": 1.2825071762135298, "grad_norm": 4.891904354095459, "learning_rate": 8.463850747563812e-05, "loss": 2.38546199798584, "memory(GiB)": 72.85, "step": 29935, "token_acc": 0.48161764705882354, "train_speed(iter/s)": 0.670703 }, { "epoch": 1.2827213915427789, "grad_norm": 4.890442848205566, "learning_rate": 8.463365392915279e-05, "loss": 2.600347900390625, "memory(GiB)": 72.85, "step": 29940, "token_acc": 0.487012987012987, "train_speed(iter/s)": 0.670682 }, { "epoch": 1.282935606872028, "grad_norm": 5.434875011444092, "learning_rate": 8.462879975524631e-05, "loss": 2.269286346435547, "memory(GiB)": 72.85, "step": 29945, "token_acc": 0.5017667844522968, "train_speed(iter/s)": 0.670668 }, { "epoch": 1.2831498222012767, "grad_norm": 4.4030375480651855, "learning_rate": 8.462394495400664e-05, "loss": 2.4017074584960936, "memory(GiB)": 72.85, "step": 29950, "token_acc": 0.4886731391585761, "train_speed(iter/s)": 0.670661 }, { "epoch": 1.2833640375305257, "grad_norm": 4.449587821960449, "learning_rate": 8.461908952552173e-05, "loss": 2.309645080566406, "memory(GiB)": 72.85, "step": 29955, "token_acc": 0.47653429602888087, "train_speed(iter/s)": 0.670659 }, { "epoch": 1.2835782528597748, "grad_norm": 4.917846202850342, "learning_rate": 8.461423346987954e-05, "loss": 2.1858903884887697, "memory(GiB)": 72.85, "step": 29960, "token_acc": 0.53125, "train_speed(iter/s)": 0.670628 }, { "epoch": 1.2837924681890236, "grad_norm": 4.265270709991455, "learning_rate": 8.460937678716804e-05, "loss": 2.2368465423583985, "memory(GiB)": 72.85, "step": 29965, "token_acc": 0.4930555555555556, "train_speed(iter/s)": 0.670648 }, { "epoch": 1.2840066835182726, "grad_norm": 4.7376790046691895, "learning_rate": 8.460451947747521e-05, "loss": 2.5397329330444336, "memory(GiB)": 72.85, "step": 29970, "token_acc": 0.4584837545126354, "train_speed(iter/s)": 0.670655 }, { "epoch": 1.2842208988475217, "grad_norm": 4.539063930511475, "learning_rate": 8.459966154088905e-05, "loss": 2.336394691467285, "memory(GiB)": 72.85, "step": 29975, "token_acc": 0.5215686274509804, "train_speed(iter/s)": 0.670647 }, { "epoch": 1.2844351141767705, "grad_norm": 5.492408752441406, "learning_rate": 8.459480297749757e-05, "loss": 2.0177896499633787, "memory(GiB)": 72.85, "step": 29980, "token_acc": 0.5257352941176471, "train_speed(iter/s)": 0.670667 }, { "epoch": 1.2846493295060195, "grad_norm": 3.8661282062530518, "learning_rate": 8.458994378738878e-05, "loss": 2.0703033447265624, "memory(GiB)": 72.85, "step": 29985, "token_acc": 0.5463576158940397, "train_speed(iter/s)": 0.670683 }, { "epoch": 1.2848635448352685, "grad_norm": 4.178402900695801, "learning_rate": 8.45850839706507e-05, "loss": 2.544852066040039, "memory(GiB)": 72.85, "step": 29990, "token_acc": 0.45819397993311034, "train_speed(iter/s)": 0.67068 }, { "epoch": 1.2850777601645174, "grad_norm": 3.4775140285491943, "learning_rate": 8.458022352737138e-05, "loss": 2.3517759323120115, "memory(GiB)": 72.85, "step": 29995, "token_acc": 0.49544072948328266, "train_speed(iter/s)": 0.67069 }, { "epoch": 1.2852919754937664, "grad_norm": 3.658792734146118, "learning_rate": 8.457536245763889e-05, "loss": 2.1962745666503904, "memory(GiB)": 72.85, "step": 30000, "token_acc": 0.5119453924914675, "train_speed(iter/s)": 0.670693 }, { "epoch": 1.2852919754937664, "eval_loss": 2.0824782848358154, "eval_runtime": 16.0466, "eval_samples_per_second": 6.232, "eval_steps_per_second": 6.232, "eval_token_acc": 0.48297604035308955, "step": 30000 }, { "epoch": 1.2855061908230154, "grad_norm": 3.8011057376861572, "learning_rate": 8.457050076154125e-05, "loss": 2.464375305175781, "memory(GiB)": 72.85, "step": 30005, "token_acc": 0.48338081671415006, "train_speed(iter/s)": 0.670424 }, { "epoch": 1.2857204061522642, "grad_norm": 3.9497244358062744, "learning_rate": 8.456563843916658e-05, "loss": 2.2215248107910157, "memory(GiB)": 72.85, "step": 30010, "token_acc": 0.44876325088339225, "train_speed(iter/s)": 0.670422 }, { "epoch": 1.2859346214815133, "grad_norm": 5.492932319641113, "learning_rate": 8.456077549060293e-05, "loss": 2.203835678100586, "memory(GiB)": 72.85, "step": 30015, "token_acc": 0.5035460992907801, "train_speed(iter/s)": 0.670452 }, { "epoch": 1.2861488368107623, "grad_norm": 3.9491307735443115, "learning_rate": 8.455591191593843e-05, "loss": 2.561419677734375, "memory(GiB)": 72.85, "step": 30020, "token_acc": 0.45257452574525747, "train_speed(iter/s)": 0.670472 }, { "epoch": 1.2863630521400111, "grad_norm": 3.9663166999816895, "learning_rate": 8.455104771526114e-05, "loss": 2.465948295593262, "memory(GiB)": 72.85, "step": 30025, "token_acc": 0.5, "train_speed(iter/s)": 0.670449 }, { "epoch": 1.2865772674692602, "grad_norm": 4.19925594329834, "learning_rate": 8.454618288865923e-05, "loss": 2.202913284301758, "memory(GiB)": 72.85, "step": 30030, "token_acc": 0.49224806201550386, "train_speed(iter/s)": 0.670421 }, { "epoch": 1.2867914827985092, "grad_norm": 3.9125027656555176, "learning_rate": 8.454131743622081e-05, "loss": 2.0888437271118163, "memory(GiB)": 72.85, "step": 30035, "token_acc": 0.5265017667844523, "train_speed(iter/s)": 0.670443 }, { "epoch": 1.287005698127758, "grad_norm": 3.7869527339935303, "learning_rate": 8.453645135803403e-05, "loss": 2.4375991821289062, "memory(GiB)": 72.85, "step": 30040, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.670409 }, { "epoch": 1.287219913457007, "grad_norm": 4.674441337585449, "learning_rate": 8.453158465418702e-05, "loss": 2.45717830657959, "memory(GiB)": 72.85, "step": 30045, "token_acc": 0.47854785478547857, "train_speed(iter/s)": 0.670424 }, { "epoch": 1.287434128786256, "grad_norm": 4.191596508026123, "learning_rate": 8.452671732476797e-05, "loss": 2.1591489791870115, "memory(GiB)": 72.85, "step": 30050, "token_acc": 0.5216049382716049, "train_speed(iter/s)": 0.670428 }, { "epoch": 1.2876483441155049, "grad_norm": 3.888339042663574, "learning_rate": 8.452184936986505e-05, "loss": 2.655838203430176, "memory(GiB)": 72.85, "step": 30055, "token_acc": 0.4896142433234421, "train_speed(iter/s)": 0.67044 }, { "epoch": 1.287862559444754, "grad_norm": 4.509603500366211, "learning_rate": 8.451698078956643e-05, "loss": 2.0828346252441405, "memory(GiB)": 72.85, "step": 30060, "token_acc": 0.5412844036697247, "train_speed(iter/s)": 0.670424 }, { "epoch": 1.288076774774003, "grad_norm": 4.797011852264404, "learning_rate": 8.451211158396033e-05, "loss": 2.311946487426758, "memory(GiB)": 72.85, "step": 30065, "token_acc": 0.4746376811594203, "train_speed(iter/s)": 0.670409 }, { "epoch": 1.2882909901032518, "grad_norm": 4.7675862312316895, "learning_rate": 8.450724175313495e-05, "loss": 2.3470361709594725, "memory(GiB)": 72.85, "step": 30070, "token_acc": 0.5053763440860215, "train_speed(iter/s)": 0.670412 }, { "epoch": 1.2885052054325008, "grad_norm": 3.072643280029297, "learning_rate": 8.450237129717852e-05, "loss": 2.2448619842529296, "memory(GiB)": 72.85, "step": 30075, "token_acc": 0.5016501650165016, "train_speed(iter/s)": 0.670432 }, { "epoch": 1.2887194207617498, "grad_norm": 5.231781005859375, "learning_rate": 8.449750021617926e-05, "loss": 2.262044143676758, "memory(GiB)": 72.85, "step": 30080, "token_acc": 0.5252918287937743, "train_speed(iter/s)": 0.670425 }, { "epoch": 1.2889336360909986, "grad_norm": 3.4464550018310547, "learning_rate": 8.449262851022544e-05, "loss": 2.5354902267456056, "memory(GiB)": 72.85, "step": 30085, "token_acc": 0.46216216216216216, "train_speed(iter/s)": 0.670392 }, { "epoch": 1.2891478514202477, "grad_norm": 5.076977252960205, "learning_rate": 8.448775617940529e-05, "loss": 2.2903411865234373, "memory(GiB)": 72.85, "step": 30090, "token_acc": 0.4867549668874172, "train_speed(iter/s)": 0.670393 }, { "epoch": 1.2893620667494967, "grad_norm": 4.7468342781066895, "learning_rate": 8.448288322380709e-05, "loss": 1.9730754852294923, "memory(GiB)": 72.85, "step": 30095, "token_acc": 0.5605536332179931, "train_speed(iter/s)": 0.670403 }, { "epoch": 1.2895762820787455, "grad_norm": 4.053619384765625, "learning_rate": 8.447800964351908e-05, "loss": 2.1707687377929688, "memory(GiB)": 72.85, "step": 30100, "token_acc": 0.549520766773163, "train_speed(iter/s)": 0.670428 }, { "epoch": 1.2897904974079946, "grad_norm": 4.798998832702637, "learning_rate": 8.447313543862962e-05, "loss": 2.0605087280273438, "memory(GiB)": 72.85, "step": 30105, "token_acc": 0.5547703180212014, "train_speed(iter/s)": 0.670429 }, { "epoch": 1.2900047127372436, "grad_norm": 2.8571512699127197, "learning_rate": 8.446826060922695e-05, "loss": 2.262802505493164, "memory(GiB)": 72.85, "step": 30110, "token_acc": 0.5175718849840255, "train_speed(iter/s)": 0.670415 }, { "epoch": 1.2902189280664924, "grad_norm": 3.385033369064331, "learning_rate": 8.446338515539942e-05, "loss": 2.2537574768066406, "memory(GiB)": 72.85, "step": 30115, "token_acc": 0.5420560747663551, "train_speed(iter/s)": 0.670399 }, { "epoch": 1.2904331433957414, "grad_norm": 4.642463684082031, "learning_rate": 8.445850907723533e-05, "loss": 2.0058416366577148, "memory(GiB)": 72.85, "step": 30120, "token_acc": 0.5283018867924528, "train_speed(iter/s)": 0.670398 }, { "epoch": 1.2906473587249905, "grad_norm": 4.561819076538086, "learning_rate": 8.445363237482304e-05, "loss": 2.2406990051269533, "memory(GiB)": 72.85, "step": 30125, "token_acc": 0.5287356321839081, "train_speed(iter/s)": 0.670405 }, { "epoch": 1.2908615740542393, "grad_norm": 3.682042360305786, "learning_rate": 8.444875504825087e-05, "loss": 2.6465564727783204, "memory(GiB)": 72.85, "step": 30130, "token_acc": 0.4745222929936306, "train_speed(iter/s)": 0.670399 }, { "epoch": 1.2910757893834883, "grad_norm": 3.1460015773773193, "learning_rate": 8.44438770976072e-05, "loss": 2.164988899230957, "memory(GiB)": 72.85, "step": 30135, "token_acc": 0.4913494809688581, "train_speed(iter/s)": 0.670436 }, { "epoch": 1.2912900047127374, "grad_norm": 6.029977798461914, "learning_rate": 8.443899852298034e-05, "loss": 2.724040412902832, "memory(GiB)": 72.85, "step": 30140, "token_acc": 0.48417721518987344, "train_speed(iter/s)": 0.670444 }, { "epoch": 1.2915042200419862, "grad_norm": 4.089663982391357, "learning_rate": 8.443411932445875e-05, "loss": 2.080008697509766, "memory(GiB)": 72.85, "step": 30145, "token_acc": 0.5462555066079295, "train_speed(iter/s)": 0.670442 }, { "epoch": 1.2917184353712352, "grad_norm": 4.408427715301514, "learning_rate": 8.44292395021308e-05, "loss": 2.306582450866699, "memory(GiB)": 72.85, "step": 30150, "token_acc": 0.5017301038062284, "train_speed(iter/s)": 0.670448 }, { "epoch": 1.2919326507004842, "grad_norm": 4.794512748718262, "learning_rate": 8.442435905608486e-05, "loss": 2.051478958129883, "memory(GiB)": 72.85, "step": 30155, "token_acc": 0.5321428571428571, "train_speed(iter/s)": 0.670471 }, { "epoch": 1.292146866029733, "grad_norm": 3.5266947746276855, "learning_rate": 8.441947798640934e-05, "loss": 2.439689064025879, "memory(GiB)": 72.85, "step": 30160, "token_acc": 0.4952681388012618, "train_speed(iter/s)": 0.670472 }, { "epoch": 1.292361081358982, "grad_norm": 4.983579635620117, "learning_rate": 8.441459629319273e-05, "loss": 2.5789007186889648, "memory(GiB)": 72.85, "step": 30165, "token_acc": 0.4576271186440678, "train_speed(iter/s)": 0.670486 }, { "epoch": 1.2925752966882311, "grad_norm": 4.01373815536499, "learning_rate": 8.440971397652341e-05, "loss": 2.4345046997070314, "memory(GiB)": 72.85, "step": 30170, "token_acc": 0.5050505050505051, "train_speed(iter/s)": 0.670507 }, { "epoch": 1.29278951201748, "grad_norm": 3.468358278274536, "learning_rate": 8.440483103648983e-05, "loss": 2.195797157287598, "memory(GiB)": 72.85, "step": 30175, "token_acc": 0.5428571428571428, "train_speed(iter/s)": 0.670512 }, { "epoch": 1.293003727346729, "grad_norm": 4.729220867156982, "learning_rate": 8.439994747318047e-05, "loss": 2.0749597549438477, "memory(GiB)": 72.85, "step": 30180, "token_acc": 0.5207667731629393, "train_speed(iter/s)": 0.670523 }, { "epoch": 1.293217942675978, "grad_norm": 3.935926914215088, "learning_rate": 8.439506328668379e-05, "loss": 2.1384220123291016, "memory(GiB)": 72.85, "step": 30185, "token_acc": 0.527027027027027, "train_speed(iter/s)": 0.670531 }, { "epoch": 1.2934321580052268, "grad_norm": 4.3849334716796875, "learning_rate": 8.439017847708828e-05, "loss": 2.4710437774658205, "memory(GiB)": 72.85, "step": 30190, "token_acc": 0.5095785440613027, "train_speed(iter/s)": 0.670541 }, { "epoch": 1.2936463733344759, "grad_norm": 4.3495707511901855, "learning_rate": 8.438529304448241e-05, "loss": 2.4897878646850584, "memory(GiB)": 72.85, "step": 30195, "token_acc": 0.5088967971530249, "train_speed(iter/s)": 0.670526 }, { "epoch": 1.2938605886637249, "grad_norm": 4.03389835357666, "learning_rate": 8.438040698895473e-05, "loss": 2.2771795272827147, "memory(GiB)": 72.85, "step": 30200, "token_acc": 0.5146579804560261, "train_speed(iter/s)": 0.670536 }, { "epoch": 1.2940748039929737, "grad_norm": 6.028606414794922, "learning_rate": 8.43755203105937e-05, "loss": 2.347586441040039, "memory(GiB)": 72.85, "step": 30205, "token_acc": 0.4419475655430712, "train_speed(iter/s)": 0.670576 }, { "epoch": 1.2942890193222227, "grad_norm": 4.751256942749023, "learning_rate": 8.437063300948789e-05, "loss": 1.7580718994140625, "memory(GiB)": 72.85, "step": 30210, "token_acc": 0.5912408759124088, "train_speed(iter/s)": 0.670565 }, { "epoch": 1.2945032346514718, "grad_norm": 3.566877841949463, "learning_rate": 8.43657450857258e-05, "loss": 2.3308994293212892, "memory(GiB)": 72.85, "step": 30215, "token_acc": 0.48846153846153845, "train_speed(iter/s)": 0.67055 }, { "epoch": 1.2947174499807206, "grad_norm": 4.283622741699219, "learning_rate": 8.436085653939602e-05, "loss": 2.1997844696044924, "memory(GiB)": 72.85, "step": 30220, "token_acc": 0.5167785234899329, "train_speed(iter/s)": 0.670538 }, { "epoch": 1.2949316653099696, "grad_norm": 4.80388879776001, "learning_rate": 8.435596737058709e-05, "loss": 2.164881706237793, "memory(GiB)": 72.85, "step": 30225, "token_acc": 0.4772727272727273, "train_speed(iter/s)": 0.670541 }, { "epoch": 1.2951458806392186, "grad_norm": 3.946333885192871, "learning_rate": 8.435107757938755e-05, "loss": 2.3167219161987305, "memory(GiB)": 72.85, "step": 30230, "token_acc": 0.4755244755244755, "train_speed(iter/s)": 0.670553 }, { "epoch": 1.2953600959684675, "grad_norm": 4.26747989654541, "learning_rate": 8.434618716588606e-05, "loss": 1.9865524291992187, "memory(GiB)": 72.85, "step": 30235, "token_acc": 0.548951048951049, "train_speed(iter/s)": 0.670546 }, { "epoch": 1.2955743112977165, "grad_norm": 4.208287715911865, "learning_rate": 8.434129613017115e-05, "loss": 2.3744232177734377, "memory(GiB)": 72.85, "step": 30240, "token_acc": 0.5083612040133779, "train_speed(iter/s)": 0.67056 }, { "epoch": 1.2957885266269655, "grad_norm": 4.977441787719727, "learning_rate": 8.433640447233144e-05, "loss": 2.131130599975586, "memory(GiB)": 72.85, "step": 30245, "token_acc": 0.4961832061068702, "train_speed(iter/s)": 0.670527 }, { "epoch": 1.2960027419562143, "grad_norm": 4.110190391540527, "learning_rate": 8.433151219245557e-05, "loss": 2.367254638671875, "memory(GiB)": 72.85, "step": 30250, "token_acc": 0.4942528735632184, "train_speed(iter/s)": 0.670531 }, { "epoch": 1.2962169572854634, "grad_norm": 4.596216201782227, "learning_rate": 8.432661929063213e-05, "loss": 2.82930965423584, "memory(GiB)": 72.85, "step": 30255, "token_acc": 0.42704626334519574, "train_speed(iter/s)": 0.670528 }, { "epoch": 1.2964311726147124, "grad_norm": 4.073469161987305, "learning_rate": 8.43217257669498e-05, "loss": 2.2125011444091798, "memory(GiB)": 72.85, "step": 30260, "token_acc": 0.4983277591973244, "train_speed(iter/s)": 0.670519 }, { "epoch": 1.2966453879439612, "grad_norm": 4.758541584014893, "learning_rate": 8.43168316214972e-05, "loss": 2.4642444610595704, "memory(GiB)": 72.85, "step": 30265, "token_acc": 0.4788732394366197, "train_speed(iter/s)": 0.670546 }, { "epoch": 1.2968596032732103, "grad_norm": 3.309112310409546, "learning_rate": 8.431193685436301e-05, "loss": 2.546609878540039, "memory(GiB)": 72.85, "step": 30270, "token_acc": 0.44808743169398907, "train_speed(iter/s)": 0.67056 }, { "epoch": 1.2970738186024593, "grad_norm": 3.897979259490967, "learning_rate": 8.430704146563589e-05, "loss": 2.071503829956055, "memory(GiB)": 72.85, "step": 30275, "token_acc": 0.5367647058823529, "train_speed(iter/s)": 0.670566 }, { "epoch": 1.297288033931708, "grad_norm": 3.2487871646881104, "learning_rate": 8.430214545540454e-05, "loss": 2.1856409072875977, "memory(GiB)": 72.85, "step": 30280, "token_acc": 0.5304054054054054, "train_speed(iter/s)": 0.670566 }, { "epoch": 1.2975022492609571, "grad_norm": 3.7466979026794434, "learning_rate": 8.429724882375763e-05, "loss": 2.186947250366211, "memory(GiB)": 72.85, "step": 30285, "token_acc": 0.5328467153284672, "train_speed(iter/s)": 0.670563 }, { "epoch": 1.2977164645902062, "grad_norm": 5.0291032791137695, "learning_rate": 8.42923515707839e-05, "loss": 2.643162727355957, "memory(GiB)": 72.85, "step": 30290, "token_acc": 0.4394904458598726, "train_speed(iter/s)": 0.670582 }, { "epoch": 1.297930679919455, "grad_norm": 3.4496676921844482, "learning_rate": 8.428745369657205e-05, "loss": 2.5263654708862306, "memory(GiB)": 72.85, "step": 30295, "token_acc": 0.4597014925373134, "train_speed(iter/s)": 0.670597 }, { "epoch": 1.298144895248704, "grad_norm": 3.54581356048584, "learning_rate": 8.42825552012108e-05, "loss": 2.131982421875, "memory(GiB)": 72.85, "step": 30300, "token_acc": 0.580952380952381, "train_speed(iter/s)": 0.670606 }, { "epoch": 1.298359110577953, "grad_norm": 4.3012542724609375, "learning_rate": 8.427765608478892e-05, "loss": 2.374094581604004, "memory(GiB)": 72.85, "step": 30305, "token_acc": 0.45387453874538747, "train_speed(iter/s)": 0.670586 }, { "epoch": 1.2985733259072019, "grad_norm": 5.428466320037842, "learning_rate": 8.427275634739514e-05, "loss": 2.3685062408447264, "memory(GiB)": 72.85, "step": 30310, "token_acc": 0.5245901639344263, "train_speed(iter/s)": 0.670594 }, { "epoch": 1.298787541236451, "grad_norm": 4.221463680267334, "learning_rate": 8.426785598911822e-05, "loss": 2.425282669067383, "memory(GiB)": 72.85, "step": 30315, "token_acc": 0.49337748344370863, "train_speed(iter/s)": 0.670596 }, { "epoch": 1.2990017565657, "grad_norm": 4.793148994445801, "learning_rate": 8.426295501004696e-05, "loss": 2.5559768676757812, "memory(GiB)": 72.85, "step": 30320, "token_acc": 0.4874551971326165, "train_speed(iter/s)": 0.670587 }, { "epoch": 1.2992159718949488, "grad_norm": 3.8687212467193604, "learning_rate": 8.425805341027011e-05, "loss": 2.047863006591797, "memory(GiB)": 72.85, "step": 30325, "token_acc": 0.5238095238095238, "train_speed(iter/s)": 0.670598 }, { "epoch": 1.2994301872241978, "grad_norm": 4.130486488342285, "learning_rate": 8.425315118987649e-05, "loss": 2.4253868103027343, "memory(GiB)": 72.85, "step": 30330, "token_acc": 0.5362903225806451, "train_speed(iter/s)": 0.670621 }, { "epoch": 1.2996444025534468, "grad_norm": 4.546720504760742, "learning_rate": 8.424824834895491e-05, "loss": 2.4234718322753905, "memory(GiB)": 72.85, "step": 30335, "token_acc": 0.46540880503144655, "train_speed(iter/s)": 0.670618 }, { "epoch": 1.2998586178826956, "grad_norm": 4.225708961486816, "learning_rate": 8.424334488759418e-05, "loss": 2.349911117553711, "memory(GiB)": 72.85, "step": 30340, "token_acc": 0.49185667752442996, "train_speed(iter/s)": 0.670636 }, { "epoch": 1.3000728332119447, "grad_norm": 3.802523374557495, "learning_rate": 8.423844080588314e-05, "loss": 2.228878402709961, "memory(GiB)": 72.85, "step": 30345, "token_acc": 0.5047923322683706, "train_speed(iter/s)": 0.670644 }, { "epoch": 1.3002870485411937, "grad_norm": 4.642494201660156, "learning_rate": 8.423353610391063e-05, "loss": 2.4572784423828127, "memory(GiB)": 72.85, "step": 30350, "token_acc": 0.49848024316109424, "train_speed(iter/s)": 0.670632 }, { "epoch": 1.3005012638704425, "grad_norm": 3.988197088241577, "learning_rate": 8.422863078176548e-05, "loss": 2.482484817504883, "memory(GiB)": 72.85, "step": 30355, "token_acc": 0.4792332268370607, "train_speed(iter/s)": 0.67066 }, { "epoch": 1.3007154791996915, "grad_norm": 3.3890719413757324, "learning_rate": 8.422372483953659e-05, "loss": 2.3808481216430666, "memory(GiB)": 72.85, "step": 30360, "token_acc": 0.49258160237388726, "train_speed(iter/s)": 0.670694 }, { "epoch": 1.3009296945289406, "grad_norm": 4.537890911102295, "learning_rate": 8.421881827731281e-05, "loss": 2.066646957397461, "memory(GiB)": 72.85, "step": 30365, "token_acc": 0.5538461538461539, "train_speed(iter/s)": 0.670702 }, { "epoch": 1.3011439098581894, "grad_norm": 3.4681923389434814, "learning_rate": 8.421391109518305e-05, "loss": 1.9421968460083008, "memory(GiB)": 72.85, "step": 30370, "token_acc": 0.5416666666666666, "train_speed(iter/s)": 0.670714 }, { "epoch": 1.3013581251874384, "grad_norm": 8.606400489807129, "learning_rate": 8.42090032932362e-05, "loss": 2.3710493087768554, "memory(GiB)": 72.85, "step": 30375, "token_acc": 0.4965986394557823, "train_speed(iter/s)": 0.670747 }, { "epoch": 1.3015723405166875, "grad_norm": 4.328911304473877, "learning_rate": 8.420409487156115e-05, "loss": 2.282195472717285, "memory(GiB)": 72.85, "step": 30380, "token_acc": 0.5212765957446809, "train_speed(iter/s)": 0.670756 }, { "epoch": 1.3017865558459363, "grad_norm": 3.75217604637146, "learning_rate": 8.419918583024685e-05, "loss": 2.5765892028808595, "memory(GiB)": 72.85, "step": 30385, "token_acc": 0.4820846905537459, "train_speed(iter/s)": 0.670725 }, { "epoch": 1.3020007711751853, "grad_norm": 6.573220252990723, "learning_rate": 8.419427616938219e-05, "loss": 2.866390037536621, "memory(GiB)": 72.85, "step": 30390, "token_acc": 0.4340836012861736, "train_speed(iter/s)": 0.670752 }, { "epoch": 1.3022149865044343, "grad_norm": 4.7548508644104, "learning_rate": 8.418936588905614e-05, "loss": 2.538233184814453, "memory(GiB)": 72.85, "step": 30395, "token_acc": 0.493006993006993, "train_speed(iter/s)": 0.670753 }, { "epoch": 1.3024292018336832, "grad_norm": 4.55478048324585, "learning_rate": 8.418445498935769e-05, "loss": 2.1647306442260743, "memory(GiB)": 72.85, "step": 30400, "token_acc": 0.5487364620938628, "train_speed(iter/s)": 0.670789 }, { "epoch": 1.3026434171629322, "grad_norm": 4.102353096008301, "learning_rate": 8.417954347037575e-05, "loss": 2.848897933959961, "memory(GiB)": 72.85, "step": 30405, "token_acc": 0.46689895470383275, "train_speed(iter/s)": 0.670798 }, { "epoch": 1.3028576324921812, "grad_norm": 3.7585971355438232, "learning_rate": 8.417463133219931e-05, "loss": 2.286564826965332, "memory(GiB)": 72.85, "step": 30410, "token_acc": 0.5142857142857142, "train_speed(iter/s)": 0.670806 }, { "epoch": 1.30307184782143, "grad_norm": 3.664386034011841, "learning_rate": 8.416971857491738e-05, "loss": 2.6168487548828123, "memory(GiB)": 72.85, "step": 30415, "token_acc": 0.4392523364485981, "train_speed(iter/s)": 0.670811 }, { "epoch": 1.303286063150679, "grad_norm": 5.795607566833496, "learning_rate": 8.416480519861894e-05, "loss": 2.6050603866577147, "memory(GiB)": 72.85, "step": 30420, "token_acc": 0.4811594202898551, "train_speed(iter/s)": 0.670808 }, { "epoch": 1.303500278479928, "grad_norm": 3.0888712406158447, "learning_rate": 8.4159891203393e-05, "loss": 2.3461484909057617, "memory(GiB)": 72.85, "step": 30425, "token_acc": 0.5151515151515151, "train_speed(iter/s)": 0.670777 }, { "epoch": 1.303714493809177, "grad_norm": 3.8487486839294434, "learning_rate": 8.415497658932859e-05, "loss": 2.430509567260742, "memory(GiB)": 72.85, "step": 30430, "token_acc": 0.49324324324324326, "train_speed(iter/s)": 0.670768 }, { "epoch": 1.303928709138426, "grad_norm": 3.711930751800537, "learning_rate": 8.415006135651473e-05, "loss": 2.1617835998535155, "memory(GiB)": 72.85, "step": 30435, "token_acc": 0.5367647058823529, "train_speed(iter/s)": 0.670778 }, { "epoch": 1.304142924467675, "grad_norm": 3.1705515384674072, "learning_rate": 8.414514550504049e-05, "loss": 2.2531196594238283, "memory(GiB)": 72.85, "step": 30440, "token_acc": 0.5037037037037037, "train_speed(iter/s)": 0.670787 }, { "epoch": 1.3043571397969238, "grad_norm": 4.429086208343506, "learning_rate": 8.41402290349949e-05, "loss": 2.642892837524414, "memory(GiB)": 72.85, "step": 30445, "token_acc": 0.4840764331210191, "train_speed(iter/s)": 0.670789 }, { "epoch": 1.3045713551261728, "grad_norm": 4.814577102661133, "learning_rate": 8.413531194646704e-05, "loss": 2.395440864562988, "memory(GiB)": 72.85, "step": 30450, "token_acc": 0.49122807017543857, "train_speed(iter/s)": 0.670781 }, { "epoch": 1.3047855704554219, "grad_norm": 4.756682395935059, "learning_rate": 8.413039423954598e-05, "loss": 2.4321908950805664, "memory(GiB)": 72.85, "step": 30455, "token_acc": 0.5177865612648221, "train_speed(iter/s)": 0.670794 }, { "epoch": 1.3049997857846707, "grad_norm": 2.80421781539917, "learning_rate": 8.412547591432081e-05, "loss": 2.420383262634277, "memory(GiB)": 72.85, "step": 30460, "token_acc": 0.4818181818181818, "train_speed(iter/s)": 0.670791 }, { "epoch": 1.3052140011139197, "grad_norm": 6.865057945251465, "learning_rate": 8.412055697088062e-05, "loss": 2.6925991058349608, "memory(GiB)": 72.85, "step": 30465, "token_acc": 0.4782608695652174, "train_speed(iter/s)": 0.670805 }, { "epoch": 1.3054282164431688, "grad_norm": 4.021371364593506, "learning_rate": 8.411563740931454e-05, "loss": 2.007830047607422, "memory(GiB)": 72.85, "step": 30470, "token_acc": 0.5221238938053098, "train_speed(iter/s)": 0.67081 }, { "epoch": 1.3056424317724176, "grad_norm": 4.487924098968506, "learning_rate": 8.411071722971169e-05, "loss": 2.3528297424316404, "memory(GiB)": 72.85, "step": 30475, "token_acc": 0.5159010600706714, "train_speed(iter/s)": 0.670794 }, { "epoch": 1.3058566471016666, "grad_norm": 3.733137607574463, "learning_rate": 8.41057964321612e-05, "loss": 1.9850578308105469, "memory(GiB)": 72.85, "step": 30480, "token_acc": 0.5367647058823529, "train_speed(iter/s)": 0.670774 }, { "epoch": 1.3060708624309156, "grad_norm": 4.101669788360596, "learning_rate": 8.410087501675221e-05, "loss": 2.447156524658203, "memory(GiB)": 72.85, "step": 30485, "token_acc": 0.5175718849840255, "train_speed(iter/s)": 0.670787 }, { "epoch": 1.3062850777601644, "grad_norm": 5.723836898803711, "learning_rate": 8.409595298357389e-05, "loss": 2.1012767791748046, "memory(GiB)": 72.85, "step": 30490, "token_acc": 0.5064377682403434, "train_speed(iter/s)": 0.670779 }, { "epoch": 1.3064992930894135, "grad_norm": 3.3147590160369873, "learning_rate": 8.409103033271538e-05, "loss": 2.42468376159668, "memory(GiB)": 72.85, "step": 30495, "token_acc": 0.453416149068323, "train_speed(iter/s)": 0.670792 }, { "epoch": 1.3067135084186625, "grad_norm": 4.607506275177002, "learning_rate": 8.408610706426588e-05, "loss": 2.545652389526367, "memory(GiB)": 72.85, "step": 30500, "token_acc": 0.45864661654135336, "train_speed(iter/s)": 0.670777 }, { "epoch": 1.3067135084186625, "eval_loss": 2.164261817932129, "eval_runtime": 16.3148, "eval_samples_per_second": 6.129, "eval_steps_per_second": 6.129, "eval_token_acc": 0.4804045512010114, "step": 30500 }, { "epoch": 1.3069277237479113, "grad_norm": 5.072554588317871, "learning_rate": 8.408118317831457e-05, "loss": 2.257181930541992, "memory(GiB)": 72.85, "step": 30505, "token_acc": 0.4944954128440367, "train_speed(iter/s)": 0.670489 }, { "epoch": 1.3071419390771604, "grad_norm": 4.157049179077148, "learning_rate": 8.407625867495067e-05, "loss": 2.0869293212890625, "memory(GiB)": 72.85, "step": 30510, "token_acc": 0.5284552845528455, "train_speed(iter/s)": 0.6705 }, { "epoch": 1.3073561544064094, "grad_norm": 3.818243980407715, "learning_rate": 8.407133355426338e-05, "loss": 2.2993459701538086, "memory(GiB)": 72.85, "step": 30515, "token_acc": 0.5104166666666666, "train_speed(iter/s)": 0.670513 }, { "epoch": 1.3075703697356582, "grad_norm": 4.477462291717529, "learning_rate": 8.406640781634191e-05, "loss": 2.537556457519531, "memory(GiB)": 72.85, "step": 30520, "token_acc": 0.4931506849315068, "train_speed(iter/s)": 0.670548 }, { "epoch": 1.3077845850649072, "grad_norm": 3.2611494064331055, "learning_rate": 8.40614814612755e-05, "loss": 2.2332229614257812, "memory(GiB)": 72.85, "step": 30525, "token_acc": 0.5148514851485149, "train_speed(iter/s)": 0.67054 }, { "epoch": 1.3079988003941563, "grad_norm": 4.692071437835693, "learning_rate": 8.405655448915341e-05, "loss": 2.7218212127685546, "memory(GiB)": 72.85, "step": 30530, "token_acc": 0.45938375350140054, "train_speed(iter/s)": 0.670531 }, { "epoch": 1.308213015723405, "grad_norm": 4.506800651550293, "learning_rate": 8.40516269000649e-05, "loss": 2.290547752380371, "memory(GiB)": 72.85, "step": 30535, "token_acc": 0.5550847457627118, "train_speed(iter/s)": 0.670555 }, { "epoch": 1.3084272310526541, "grad_norm": 3.7874577045440674, "learning_rate": 8.404669869409923e-05, "loss": 2.2175308227539063, "memory(GiB)": 72.85, "step": 30540, "token_acc": 0.5149501661129569, "train_speed(iter/s)": 0.670519 }, { "epoch": 1.3086414463819032, "grad_norm": 4.981441974639893, "learning_rate": 8.404176987134565e-05, "loss": 2.5166933059692385, "memory(GiB)": 72.85, "step": 30545, "token_acc": 0.484472049689441, "train_speed(iter/s)": 0.670537 }, { "epoch": 1.308855661711152, "grad_norm": 3.336853504180908, "learning_rate": 8.403684043189348e-05, "loss": 2.1722707748413086, "memory(GiB)": 72.85, "step": 30550, "token_acc": 0.5214723926380368, "train_speed(iter/s)": 0.670534 }, { "epoch": 1.309069877040401, "grad_norm": 3.7377521991729736, "learning_rate": 8.4031910375832e-05, "loss": 2.5011978149414062, "memory(GiB)": 72.85, "step": 30555, "token_acc": 0.47633136094674555, "train_speed(iter/s)": 0.670546 }, { "epoch": 1.30928409236965, "grad_norm": 3.207735538482666, "learning_rate": 8.402697970325057e-05, "loss": 2.534108352661133, "memory(GiB)": 72.85, "step": 30560, "token_acc": 0.4801223241590214, "train_speed(iter/s)": 0.670557 }, { "epoch": 1.3094983076988989, "grad_norm": 4.565073013305664, "learning_rate": 8.402204841423847e-05, "loss": 2.348562812805176, "memory(GiB)": 72.85, "step": 30565, "token_acc": 0.4840989399293286, "train_speed(iter/s)": 0.670544 }, { "epoch": 1.309712523028148, "grad_norm": 3.688495397567749, "learning_rate": 8.401711650888506e-05, "loss": 2.1749752044677733, "memory(GiB)": 72.85, "step": 30570, "token_acc": 0.5050847457627119, "train_speed(iter/s)": 0.670543 }, { "epoch": 1.309926738357397, "grad_norm": 3.6629860401153564, "learning_rate": 8.401218398727966e-05, "loss": 2.4217645645141603, "memory(GiB)": 72.85, "step": 30575, "token_acc": 0.5170278637770898, "train_speed(iter/s)": 0.670561 }, { "epoch": 1.3101409536866457, "grad_norm": 4.730736255645752, "learning_rate": 8.400725084951164e-05, "loss": 2.5724819183349608, "memory(GiB)": 72.85, "step": 30580, "token_acc": 0.44621513944223107, "train_speed(iter/s)": 0.670567 }, { "epoch": 1.3103551690158948, "grad_norm": 3.8955025672912598, "learning_rate": 8.400231709567037e-05, "loss": 2.0116724014282226, "memory(GiB)": 72.85, "step": 30585, "token_acc": 0.556, "train_speed(iter/s)": 0.670584 }, { "epoch": 1.3105693843451438, "grad_norm": 4.628363132476807, "learning_rate": 8.399738272584524e-05, "loss": 2.4365720748901367, "memory(GiB)": 72.85, "step": 30590, "token_acc": 0.4934210526315789, "train_speed(iter/s)": 0.670576 }, { "epoch": 1.3107835996743926, "grad_norm": 3.328345537185669, "learning_rate": 8.399244774012562e-05, "loss": 2.051371383666992, "memory(GiB)": 72.85, "step": 30595, "token_acc": 0.5386996904024768, "train_speed(iter/s)": 0.670594 }, { "epoch": 1.3109978150036417, "grad_norm": 4.277751445770264, "learning_rate": 8.398751213860093e-05, "loss": 2.100655746459961, "memory(GiB)": 72.85, "step": 30600, "token_acc": 0.5399239543726235, "train_speed(iter/s)": 0.670594 }, { "epoch": 1.3112120303328907, "grad_norm": 6.01072359085083, "learning_rate": 8.398257592136057e-05, "loss": 2.2073925018310545, "memory(GiB)": 72.85, "step": 30605, "token_acc": 0.4885245901639344, "train_speed(iter/s)": 0.670568 }, { "epoch": 1.3114262456621395, "grad_norm": 5.383959770202637, "learning_rate": 8.397763908849397e-05, "loss": 2.6709293365478515, "memory(GiB)": 72.85, "step": 30610, "token_acc": 0.4612903225806452, "train_speed(iter/s)": 0.670591 }, { "epoch": 1.3116404609913885, "grad_norm": 4.068572044372559, "learning_rate": 8.397270164009057e-05, "loss": 2.7231557846069334, "memory(GiB)": 72.85, "step": 30615, "token_acc": 0.4530386740331492, "train_speed(iter/s)": 0.670594 }, { "epoch": 1.3118546763206376, "grad_norm": 4.936639785766602, "learning_rate": 8.396776357623979e-05, "loss": 2.392185592651367, "memory(GiB)": 72.85, "step": 30620, "token_acc": 0.5015873015873016, "train_speed(iter/s)": 0.670594 }, { "epoch": 1.3120688916498864, "grad_norm": 4.040285587310791, "learning_rate": 8.396282489703113e-05, "loss": 2.3531509399414063, "memory(GiB)": 72.85, "step": 30625, "token_acc": 0.5433962264150943, "train_speed(iter/s)": 0.670572 }, { "epoch": 1.3122831069791354, "grad_norm": 4.444121837615967, "learning_rate": 8.395788560255401e-05, "loss": 2.402959442138672, "memory(GiB)": 72.85, "step": 30630, "token_acc": 0.5, "train_speed(iter/s)": 0.670596 }, { "epoch": 1.3124973223083845, "grad_norm": 4.387836933135986, "learning_rate": 8.395294569289797e-05, "loss": 2.1648962020874025, "memory(GiB)": 72.85, "step": 30635, "token_acc": 0.47058823529411764, "train_speed(iter/s)": 0.670617 }, { "epoch": 1.3127115376376333, "grad_norm": 3.5398571491241455, "learning_rate": 8.394800516815245e-05, "loss": 2.5575414657592774, "memory(GiB)": 72.85, "step": 30640, "token_acc": 0.48056537102473496, "train_speed(iter/s)": 0.670641 }, { "epoch": 1.3129257529668823, "grad_norm": 5.015799522399902, "learning_rate": 8.394306402840699e-05, "loss": 2.4072608947753906, "memory(GiB)": 72.85, "step": 30645, "token_acc": 0.46443514644351463, "train_speed(iter/s)": 0.67066 }, { "epoch": 1.3131399682961313, "grad_norm": 6.20443868637085, "learning_rate": 8.393812227375107e-05, "loss": 2.593037796020508, "memory(GiB)": 72.85, "step": 30650, "token_acc": 0.47384615384615386, "train_speed(iter/s)": 0.670671 }, { "epoch": 1.3133541836253801, "grad_norm": 4.778703689575195, "learning_rate": 8.39331799042742e-05, "loss": 2.4968828201293944, "memory(GiB)": 72.85, "step": 30655, "token_acc": 0.4691358024691358, "train_speed(iter/s)": 0.670672 }, { "epoch": 1.3135683989546292, "grad_norm": 4.454440593719482, "learning_rate": 8.392823692006598e-05, "loss": 2.363800048828125, "memory(GiB)": 72.85, "step": 30660, "token_acc": 0.5201238390092879, "train_speed(iter/s)": 0.670662 }, { "epoch": 1.3137826142838782, "grad_norm": 7.746145248413086, "learning_rate": 8.392329332121591e-05, "loss": 2.1426467895507812, "memory(GiB)": 72.85, "step": 30665, "token_acc": 0.5283687943262412, "train_speed(iter/s)": 0.670651 }, { "epoch": 1.313996829613127, "grad_norm": 4.468807697296143, "learning_rate": 8.391834910781357e-05, "loss": 2.3547338485717773, "memory(GiB)": 72.85, "step": 30670, "token_acc": 0.5112540192926045, "train_speed(iter/s)": 0.670634 }, { "epoch": 1.314211044942376, "grad_norm": 3.2481942176818848, "learning_rate": 8.391340427994852e-05, "loss": 2.2494564056396484, "memory(GiB)": 72.85, "step": 30675, "token_acc": 0.5093167701863354, "train_speed(iter/s)": 0.670626 }, { "epoch": 1.314425260271625, "grad_norm": 5.300195217132568, "learning_rate": 8.390845883771032e-05, "loss": 2.5508460998535156, "memory(GiB)": 72.85, "step": 30680, "token_acc": 0.45110410094637227, "train_speed(iter/s)": 0.670642 }, { "epoch": 1.314639475600874, "grad_norm": 4.699094295501709, "learning_rate": 8.390351278118859e-05, "loss": 2.207568359375, "memory(GiB)": 72.85, "step": 30685, "token_acc": 0.5, "train_speed(iter/s)": 0.67064 }, { "epoch": 1.314853690930123, "grad_norm": 5.075193881988525, "learning_rate": 8.389856611047291e-05, "loss": 2.3154748916625976, "memory(GiB)": 72.85, "step": 30690, "token_acc": 0.4963768115942029, "train_speed(iter/s)": 0.67066 }, { "epoch": 1.315067906259372, "grad_norm": 3.132152795791626, "learning_rate": 8.389361882565292e-05, "loss": 2.50683650970459, "memory(GiB)": 72.85, "step": 30695, "token_acc": 0.5223880597014925, "train_speed(iter/s)": 0.670658 }, { "epoch": 1.3152821215886208, "grad_norm": 4.341196537017822, "learning_rate": 8.388867092681822e-05, "loss": 2.258316993713379, "memory(GiB)": 72.85, "step": 30700, "token_acc": 0.5166666666666667, "train_speed(iter/s)": 0.670652 }, { "epoch": 1.3154963369178698, "grad_norm": 3.691038131713867, "learning_rate": 8.388372241405846e-05, "loss": 2.31786994934082, "memory(GiB)": 72.85, "step": 30705, "token_acc": 0.47717842323651455, "train_speed(iter/s)": 0.670652 }, { "epoch": 1.3157105522471189, "grad_norm": 4.51615571975708, "learning_rate": 8.387877328746329e-05, "loss": 2.2459508895874025, "memory(GiB)": 72.85, "step": 30710, "token_acc": 0.4882154882154882, "train_speed(iter/s)": 0.670645 }, { "epoch": 1.3159247675763677, "grad_norm": 4.252839088439941, "learning_rate": 8.387382354712236e-05, "loss": 2.378538131713867, "memory(GiB)": 72.85, "step": 30715, "token_acc": 0.49070631970260226, "train_speed(iter/s)": 0.670637 }, { "epoch": 1.3161389829056167, "grad_norm": 3.575915575027466, "learning_rate": 8.386887319312533e-05, "loss": 2.1832727432250976, "memory(GiB)": 72.85, "step": 30720, "token_acc": 0.5095057034220533, "train_speed(iter/s)": 0.670618 }, { "epoch": 1.3163531982348657, "grad_norm": 5.209935665130615, "learning_rate": 8.38639222255619e-05, "loss": 2.5154985427856444, "memory(GiB)": 72.85, "step": 30725, "token_acc": 0.48161764705882354, "train_speed(iter/s)": 0.670602 }, { "epoch": 1.3165674135641146, "grad_norm": 4.450595855712891, "learning_rate": 8.385897064452174e-05, "loss": 2.272617149353027, "memory(GiB)": 72.85, "step": 30730, "token_acc": 0.4897360703812317, "train_speed(iter/s)": 0.670607 }, { "epoch": 1.3167816288933636, "grad_norm": 3.320239305496216, "learning_rate": 8.385401845009457e-05, "loss": 2.7877920150756834, "memory(GiB)": 72.85, "step": 30735, "token_acc": 0.5161290322580645, "train_speed(iter/s)": 0.670628 }, { "epoch": 1.3169958442226126, "grad_norm": 4.180581092834473, "learning_rate": 8.38490656423701e-05, "loss": 2.4064701080322264, "memory(GiB)": 72.85, "step": 30740, "token_acc": 0.4937888198757764, "train_speed(iter/s)": 0.670646 }, { "epoch": 1.3172100595518614, "grad_norm": 3.932450771331787, "learning_rate": 8.384411222143805e-05, "loss": 2.5002819061279298, "memory(GiB)": 72.85, "step": 30745, "token_acc": 0.4622356495468278, "train_speed(iter/s)": 0.670652 }, { "epoch": 1.3174242748811105, "grad_norm": 4.122936248779297, "learning_rate": 8.383915818738816e-05, "loss": 2.4066162109375, "memory(GiB)": 72.85, "step": 30750, "token_acc": 0.46229508196721314, "train_speed(iter/s)": 0.670645 }, { "epoch": 1.3176384902103595, "grad_norm": 5.104159355163574, "learning_rate": 8.383420354031019e-05, "loss": 1.9368556976318358, "memory(GiB)": 72.85, "step": 30755, "token_acc": 0.5179153094462541, "train_speed(iter/s)": 0.670658 }, { "epoch": 1.3178527055396083, "grad_norm": 5.984591960906982, "learning_rate": 8.382924828029385e-05, "loss": 2.3311321258544924, "memory(GiB)": 72.85, "step": 30760, "token_acc": 0.547244094488189, "train_speed(iter/s)": 0.670676 }, { "epoch": 1.3180669208688574, "grad_norm": 4.436121940612793, "learning_rate": 8.382429240742897e-05, "loss": 2.395725631713867, "memory(GiB)": 72.85, "step": 30765, "token_acc": 0.4945054945054945, "train_speed(iter/s)": 0.670684 }, { "epoch": 1.3182811361981064, "grad_norm": 4.319125175476074, "learning_rate": 8.381933592180531e-05, "loss": 2.153542900085449, "memory(GiB)": 72.85, "step": 30770, "token_acc": 0.5070422535211268, "train_speed(iter/s)": 0.67067 }, { "epoch": 1.3184953515273552, "grad_norm": 4.59865140914917, "learning_rate": 8.381437882351265e-05, "loss": 2.240996551513672, "memory(GiB)": 72.85, "step": 30775, "token_acc": 0.49140893470790376, "train_speed(iter/s)": 0.670668 }, { "epoch": 1.3187095668566042, "grad_norm": 4.2264227867126465, "learning_rate": 8.380942111264078e-05, "loss": 2.3078033447265627, "memory(GiB)": 72.85, "step": 30780, "token_acc": 0.5136054421768708, "train_speed(iter/s)": 0.670658 }, { "epoch": 1.3189237821858533, "grad_norm": 5.655858039855957, "learning_rate": 8.380446278927954e-05, "loss": 2.1583734512329102, "memory(GiB)": 72.85, "step": 30785, "token_acc": 0.5360824742268041, "train_speed(iter/s)": 0.670662 }, { "epoch": 1.319137997515102, "grad_norm": 4.288005352020264, "learning_rate": 8.379950385351875e-05, "loss": 2.304135322570801, "memory(GiB)": 72.85, "step": 30790, "token_acc": 0.5127272727272727, "train_speed(iter/s)": 0.670677 }, { "epoch": 1.3193522128443511, "grad_norm": 4.0270280838012695, "learning_rate": 8.379454430544822e-05, "loss": 2.2365886688232424, "memory(GiB)": 72.85, "step": 30795, "token_acc": 0.5085324232081911, "train_speed(iter/s)": 0.670694 }, { "epoch": 1.3195664281736001, "grad_norm": 4.284780979156494, "learning_rate": 8.378958414515786e-05, "loss": 2.4076507568359373, "memory(GiB)": 72.85, "step": 30800, "token_acc": 0.4943820224719101, "train_speed(iter/s)": 0.670708 }, { "epoch": 1.319780643502849, "grad_norm": 4.591030120849609, "learning_rate": 8.378462337273745e-05, "loss": 2.160020637512207, "memory(GiB)": 72.85, "step": 30805, "token_acc": 0.5180327868852459, "train_speed(iter/s)": 0.670695 }, { "epoch": 1.319994858832098, "grad_norm": 4.333517551422119, "learning_rate": 8.37796619882769e-05, "loss": 2.1052194595336915, "memory(GiB)": 72.85, "step": 30810, "token_acc": 0.5477941176470589, "train_speed(iter/s)": 0.6707 }, { "epoch": 1.320209074161347, "grad_norm": 4.889498710632324, "learning_rate": 8.37746999918661e-05, "loss": 2.2964157104492187, "memory(GiB)": 72.85, "step": 30815, "token_acc": 0.48638132295719844, "train_speed(iter/s)": 0.670697 }, { "epoch": 1.3204232894905958, "grad_norm": 4.365584850311279, "learning_rate": 8.37697373835949e-05, "loss": 2.306155967712402, "memory(GiB)": 72.85, "step": 30820, "token_acc": 0.4868035190615836, "train_speed(iter/s)": 0.670702 }, { "epoch": 1.3206375048198449, "grad_norm": 4.815459728240967, "learning_rate": 8.376477416355326e-05, "loss": 2.0788015365600585, "memory(GiB)": 72.85, "step": 30825, "token_acc": 0.5340136054421769, "train_speed(iter/s)": 0.670722 }, { "epoch": 1.320851720149094, "grad_norm": 4.309135437011719, "learning_rate": 8.375981033183105e-05, "loss": 2.5954868316650392, "memory(GiB)": 72.85, "step": 30830, "token_acc": 0.45799457994579945, "train_speed(iter/s)": 0.670695 }, { "epoch": 1.3210659354783427, "grad_norm": 4.591107368469238, "learning_rate": 8.37548458885182e-05, "loss": 2.187169647216797, "memory(GiB)": 72.85, "step": 30835, "token_acc": 0.5106382978723404, "train_speed(iter/s)": 0.670706 }, { "epoch": 1.3212801508075918, "grad_norm": 4.534945011138916, "learning_rate": 8.374988083370465e-05, "loss": 2.4240562438964846, "memory(GiB)": 72.85, "step": 30840, "token_acc": 0.486013986013986, "train_speed(iter/s)": 0.670716 }, { "epoch": 1.3214943661368408, "grad_norm": 4.813765048980713, "learning_rate": 8.374491516748036e-05, "loss": 2.3484792709350586, "memory(GiB)": 72.85, "step": 30845, "token_acc": 0.5239616613418531, "train_speed(iter/s)": 0.670701 }, { "epoch": 1.3217085814660896, "grad_norm": 2.883185625076294, "learning_rate": 8.373994888993529e-05, "loss": 2.266042709350586, "memory(GiB)": 72.85, "step": 30850, "token_acc": 0.549079754601227, "train_speed(iter/s)": 0.670696 }, { "epoch": 1.3219227967953386, "grad_norm": 4.689186096191406, "learning_rate": 8.373498200115937e-05, "loss": 2.262316131591797, "memory(GiB)": 72.85, "step": 30855, "token_acc": 0.5198776758409785, "train_speed(iter/s)": 0.670707 }, { "epoch": 1.3221370121245877, "grad_norm": 4.263940334320068, "learning_rate": 8.37300145012426e-05, "loss": 2.376585578918457, "memory(GiB)": 72.85, "step": 30860, "token_acc": 0.49310344827586206, "train_speed(iter/s)": 0.670732 }, { "epoch": 1.3223512274538365, "grad_norm": 4.366884231567383, "learning_rate": 8.372504639027499e-05, "loss": 2.6254066467285155, "memory(GiB)": 72.85, "step": 30865, "token_acc": 0.44904458598726116, "train_speed(iter/s)": 0.670701 }, { "epoch": 1.3225654427830855, "grad_norm": 3.894728899002075, "learning_rate": 8.372007766834653e-05, "loss": 2.6604612350463865, "memory(GiB)": 72.85, "step": 30870, "token_acc": 0.47635135135135137, "train_speed(iter/s)": 0.670719 }, { "epoch": 1.3227796581123346, "grad_norm": 4.592299938201904, "learning_rate": 8.371510833554723e-05, "loss": 1.9677923202514649, "memory(GiB)": 72.85, "step": 30875, "token_acc": 0.5658914728682171, "train_speed(iter/s)": 0.670739 }, { "epoch": 1.3229938734415834, "grad_norm": 4.356856346130371, "learning_rate": 8.371013839196711e-05, "loss": 2.134613037109375, "memory(GiB)": 72.85, "step": 30880, "token_acc": 0.5163934426229508, "train_speed(iter/s)": 0.670742 }, { "epoch": 1.3232080887708324, "grad_norm": 3.6926658153533936, "learning_rate": 8.370516783769621e-05, "loss": 2.488681983947754, "memory(GiB)": 72.85, "step": 30885, "token_acc": 0.4785714285714286, "train_speed(iter/s)": 0.670749 }, { "epoch": 1.3234223041000814, "grad_norm": 4.7834577560424805, "learning_rate": 8.370019667282458e-05, "loss": 2.3500139236450197, "memory(GiB)": 72.85, "step": 30890, "token_acc": 0.46441947565543074, "train_speed(iter/s)": 0.670737 }, { "epoch": 1.3236365194293302, "grad_norm": 6.79533576965332, "learning_rate": 8.369522489744228e-05, "loss": 2.41005916595459, "memory(GiB)": 72.85, "step": 30895, "token_acc": 0.49122807017543857, "train_speed(iter/s)": 0.670746 }, { "epoch": 1.3238507347585793, "grad_norm": 4.729168891906738, "learning_rate": 8.369025251163937e-05, "loss": 2.405320930480957, "memory(GiB)": 72.85, "step": 30900, "token_acc": 0.473015873015873, "train_speed(iter/s)": 0.670776 }, { "epoch": 1.3240649500878283, "grad_norm": 5.87495756149292, "learning_rate": 8.368527951550592e-05, "loss": 2.4639667510986327, "memory(GiB)": 72.85, "step": 30905, "token_acc": 0.5450643776824035, "train_speed(iter/s)": 0.670745 }, { "epoch": 1.3242791654170771, "grad_norm": 3.5308737754821777, "learning_rate": 8.368030590913204e-05, "loss": 2.624924659729004, "memory(GiB)": 72.85, "step": 30910, "token_acc": 0.45151515151515154, "train_speed(iter/s)": 0.670741 }, { "epoch": 1.3244933807463262, "grad_norm": 5.190079212188721, "learning_rate": 8.367533169260782e-05, "loss": 2.258839797973633, "memory(GiB)": 72.85, "step": 30915, "token_acc": 0.5070921985815603, "train_speed(iter/s)": 0.67075 }, { "epoch": 1.3247075960755752, "grad_norm": 4.0497660636901855, "learning_rate": 8.367035686602338e-05, "loss": 2.5030593872070312, "memory(GiB)": 72.85, "step": 30920, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.670762 }, { "epoch": 1.324921811404824, "grad_norm": 4.067663669586182, "learning_rate": 8.366538142946885e-05, "loss": 2.3078645706176757, "memory(GiB)": 72.85, "step": 30925, "token_acc": 0.4868421052631579, "train_speed(iter/s)": 0.670782 }, { "epoch": 1.325136026734073, "grad_norm": 4.415250778198242, "learning_rate": 8.366040538303435e-05, "loss": 2.339382553100586, "memory(GiB)": 72.85, "step": 30930, "token_acc": 0.4882154882154882, "train_speed(iter/s)": 0.670809 }, { "epoch": 1.325350242063322, "grad_norm": 6.367894172668457, "learning_rate": 8.365542872681002e-05, "loss": 2.473668098449707, "memory(GiB)": 72.85, "step": 30935, "token_acc": 0.48928571428571427, "train_speed(iter/s)": 0.670823 }, { "epoch": 1.325564457392571, "grad_norm": 4.55069637298584, "learning_rate": 8.365045146088602e-05, "loss": 2.1984619140625, "memory(GiB)": 72.85, "step": 30940, "token_acc": 0.5253623188405797, "train_speed(iter/s)": 0.670818 }, { "epoch": 1.32577867272182, "grad_norm": 3.9362423419952393, "learning_rate": 8.364547358535255e-05, "loss": 2.4354312896728514, "memory(GiB)": 72.85, "step": 30945, "token_acc": 0.4562043795620438, "train_speed(iter/s)": 0.670807 }, { "epoch": 1.325992888051069, "grad_norm": 5.016369342803955, "learning_rate": 8.364049510029974e-05, "loss": 2.2813339233398438, "memory(GiB)": 72.85, "step": 30950, "token_acc": 0.5503875968992248, "train_speed(iter/s)": 0.670782 }, { "epoch": 1.3262071033803178, "grad_norm": 3.657639503479004, "learning_rate": 8.363551600581782e-05, "loss": 2.433808708190918, "memory(GiB)": 72.85, "step": 30955, "token_acc": 0.4901315789473684, "train_speed(iter/s)": 0.67077 }, { "epoch": 1.3264213187095668, "grad_norm": 3.3486645221710205, "learning_rate": 8.363053630199698e-05, "loss": 2.3998977661132814, "memory(GiB)": 72.85, "step": 30960, "token_acc": 0.463855421686747, "train_speed(iter/s)": 0.670785 }, { "epoch": 1.3266355340388158, "grad_norm": 3.366558790206909, "learning_rate": 8.362555598892741e-05, "loss": 2.0284542083740233, "memory(GiB)": 72.85, "step": 30965, "token_acc": 0.5431654676258992, "train_speed(iter/s)": 0.670787 }, { "epoch": 1.3268497493680647, "grad_norm": 4.692281723022461, "learning_rate": 8.362057506669935e-05, "loss": 2.615953063964844, "memory(GiB)": 72.85, "step": 30970, "token_acc": 0.4897260273972603, "train_speed(iter/s)": 0.67079 }, { "epoch": 1.3270639646973137, "grad_norm": 3.6306874752044678, "learning_rate": 8.361559353540304e-05, "loss": 2.1421327590942383, "memory(GiB)": 72.85, "step": 30975, "token_acc": 0.5397489539748954, "train_speed(iter/s)": 0.670806 }, { "epoch": 1.3272781800265627, "grad_norm": 3.994367837905884, "learning_rate": 8.361061139512873e-05, "loss": 2.478912353515625, "memory(GiB)": 72.85, "step": 30980, "token_acc": 0.4930555555555556, "train_speed(iter/s)": 0.670781 }, { "epoch": 1.3274923953558115, "grad_norm": 4.18635368347168, "learning_rate": 8.360562864596666e-05, "loss": 2.0265476226806642, "memory(GiB)": 72.85, "step": 30985, "token_acc": 0.5469387755102041, "train_speed(iter/s)": 0.670769 }, { "epoch": 1.3277066106850606, "grad_norm": 4.010317325592041, "learning_rate": 8.360064528800708e-05, "loss": 2.4675619125366213, "memory(GiB)": 72.85, "step": 30990, "token_acc": 0.4984423676012461, "train_speed(iter/s)": 0.670794 }, { "epoch": 1.3279208260143096, "grad_norm": 4.220749378204346, "learning_rate": 8.359566132134032e-05, "loss": 2.131159019470215, "memory(GiB)": 72.85, "step": 30995, "token_acc": 0.5662650602409639, "train_speed(iter/s)": 0.670786 }, { "epoch": 1.3281350413435584, "grad_norm": 4.808384895324707, "learning_rate": 8.359067674605663e-05, "loss": 2.60772762298584, "memory(GiB)": 72.85, "step": 31000, "token_acc": 0.46357615894039733, "train_speed(iter/s)": 0.670798 }, { "epoch": 1.3281350413435584, "eval_loss": 2.147235155105591, "eval_runtime": 14.7419, "eval_samples_per_second": 6.783, "eval_steps_per_second": 6.783, "eval_token_acc": 0.4948717948717949, "step": 31000 }, { "epoch": 1.3283492566728075, "grad_norm": 3.7176599502563477, "learning_rate": 8.358569156224632e-05, "loss": 2.5504032135009767, "memory(GiB)": 72.85, "step": 31005, "token_acc": 0.4943289224952741, "train_speed(iter/s)": 0.670538 }, { "epoch": 1.3285634720020565, "grad_norm": 4.672543525695801, "learning_rate": 8.35807057699997e-05, "loss": 2.2453174591064453, "memory(GiB)": 72.85, "step": 31010, "token_acc": 0.5035460992907801, "train_speed(iter/s)": 0.670545 }, { "epoch": 1.3287776873313053, "grad_norm": 3.578603506088257, "learning_rate": 8.357571936940709e-05, "loss": 2.21200008392334, "memory(GiB)": 72.85, "step": 31015, "token_acc": 0.47530864197530864, "train_speed(iter/s)": 0.670564 }, { "epoch": 1.3289919026605543, "grad_norm": 3.2226459980010986, "learning_rate": 8.357073236055884e-05, "loss": 2.1914108276367186, "memory(GiB)": 72.85, "step": 31020, "token_acc": 0.5288135593220339, "train_speed(iter/s)": 0.670583 }, { "epoch": 1.3292061179898034, "grad_norm": 3.9166603088378906, "learning_rate": 8.356574474354527e-05, "loss": 2.159514808654785, "memory(GiB)": 72.85, "step": 31025, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.670614 }, { "epoch": 1.3294203333190522, "grad_norm": 4.560371398925781, "learning_rate": 8.356075651845675e-05, "loss": 2.6324651718139647, "memory(GiB)": 72.85, "step": 31030, "token_acc": 0.45774647887323944, "train_speed(iter/s)": 0.670635 }, { "epoch": 1.3296345486483012, "grad_norm": 4.164619445800781, "learning_rate": 8.355576768538363e-05, "loss": 2.620716094970703, "memory(GiB)": 72.85, "step": 31035, "token_acc": 0.4633333333333333, "train_speed(iter/s)": 0.670638 }, { "epoch": 1.3298487639775503, "grad_norm": 4.7529096603393555, "learning_rate": 8.355077824441632e-05, "loss": 2.201188087463379, "memory(GiB)": 72.85, "step": 31040, "token_acc": 0.5291970802919708, "train_speed(iter/s)": 0.670654 }, { "epoch": 1.330062979306799, "grad_norm": 3.110938787460327, "learning_rate": 8.354578819564519e-05, "loss": 2.549592208862305, "memory(GiB)": 72.85, "step": 31045, "token_acc": 0.5034965034965035, "train_speed(iter/s)": 0.670655 }, { "epoch": 1.330277194636048, "grad_norm": 4.162050247192383, "learning_rate": 8.354079753916062e-05, "loss": 2.4022750854492188, "memory(GiB)": 72.85, "step": 31050, "token_acc": 0.47416413373860183, "train_speed(iter/s)": 0.670631 }, { "epoch": 1.3304914099652971, "grad_norm": 7.410121917724609, "learning_rate": 8.353580627505305e-05, "loss": 2.383803939819336, "memory(GiB)": 72.85, "step": 31055, "token_acc": 0.5100401606425703, "train_speed(iter/s)": 0.670623 }, { "epoch": 1.330705625294546, "grad_norm": 4.147312641143799, "learning_rate": 8.353081440341288e-05, "loss": 2.506197929382324, "memory(GiB)": 72.85, "step": 31060, "token_acc": 0.4444444444444444, "train_speed(iter/s)": 0.670627 }, { "epoch": 1.330919840623795, "grad_norm": 4.400981426239014, "learning_rate": 8.352582192433055e-05, "loss": 2.177347183227539, "memory(GiB)": 72.85, "step": 31065, "token_acc": 0.5136986301369864, "train_speed(iter/s)": 0.670645 }, { "epoch": 1.331134055953044, "grad_norm": 4.097431182861328, "learning_rate": 8.352082883789654e-05, "loss": 2.4411128997802733, "memory(GiB)": 72.85, "step": 31070, "token_acc": 0.4852459016393443, "train_speed(iter/s)": 0.670661 }, { "epoch": 1.3313482712822928, "grad_norm": 4.080574035644531, "learning_rate": 8.351583514420125e-05, "loss": 2.0199300765991213, "memory(GiB)": 72.85, "step": 31075, "token_acc": 0.5765124555160143, "train_speed(iter/s)": 0.670663 }, { "epoch": 1.3315624866115419, "grad_norm": 4.174803733825684, "learning_rate": 8.351084084333516e-05, "loss": 2.3057062149047853, "memory(GiB)": 72.85, "step": 31080, "token_acc": 0.49830508474576274, "train_speed(iter/s)": 0.670676 }, { "epoch": 1.331776701940791, "grad_norm": 4.670220375061035, "learning_rate": 8.350584593538876e-05, "loss": 2.393305015563965, "memory(GiB)": 72.85, "step": 31085, "token_acc": 0.5065359477124183, "train_speed(iter/s)": 0.670677 }, { "epoch": 1.3319909172700397, "grad_norm": 3.54693341255188, "learning_rate": 8.350085042045252e-05, "loss": 2.494781494140625, "memory(GiB)": 72.85, "step": 31090, "token_acc": 0.48580441640378547, "train_speed(iter/s)": 0.670687 }, { "epoch": 1.3322051325992887, "grad_norm": 3.770352602005005, "learning_rate": 8.349585429861697e-05, "loss": 2.022794723510742, "memory(GiB)": 72.85, "step": 31095, "token_acc": 0.5506756756756757, "train_speed(iter/s)": 0.670681 }, { "epoch": 1.3324193479285378, "grad_norm": 4.007109642028809, "learning_rate": 8.349085756997257e-05, "loss": 2.439829444885254, "memory(GiB)": 72.85, "step": 31100, "token_acc": 0.4923547400611621, "train_speed(iter/s)": 0.670682 }, { "epoch": 1.3326335632577866, "grad_norm": 4.339457988739014, "learning_rate": 8.348586023460988e-05, "loss": 1.9823362350463867, "memory(GiB)": 72.85, "step": 31105, "token_acc": 0.549407114624506, "train_speed(iter/s)": 0.670688 }, { "epoch": 1.3328477785870356, "grad_norm": 3.6734743118286133, "learning_rate": 8.348086229261942e-05, "loss": 2.3950387954711916, "memory(GiB)": 72.85, "step": 31110, "token_acc": 0.48787878787878786, "train_speed(iter/s)": 0.670677 }, { "epoch": 1.3330619939162847, "grad_norm": 4.360872268676758, "learning_rate": 8.347586374409174e-05, "loss": 2.380532646179199, "memory(GiB)": 72.85, "step": 31115, "token_acc": 0.50187265917603, "train_speed(iter/s)": 0.670695 }, { "epoch": 1.3332762092455335, "grad_norm": 4.227655410766602, "learning_rate": 8.347086458911737e-05, "loss": 2.1290618896484377, "memory(GiB)": 72.85, "step": 31120, "token_acc": 0.5163398692810458, "train_speed(iter/s)": 0.670704 }, { "epoch": 1.3334904245747825, "grad_norm": 4.20727014541626, "learning_rate": 8.346586482778691e-05, "loss": 2.385085678100586, "memory(GiB)": 72.85, "step": 31125, "token_acc": 0.49032258064516127, "train_speed(iter/s)": 0.670703 }, { "epoch": 1.3337046399040315, "grad_norm": 5.9425272941589355, "learning_rate": 8.346086446019091e-05, "loss": 2.02467041015625, "memory(GiB)": 72.85, "step": 31130, "token_acc": 0.5186721991701245, "train_speed(iter/s)": 0.670719 }, { "epoch": 1.3339188552332806, "grad_norm": 8.082184791564941, "learning_rate": 8.345586348641993e-05, "loss": 2.496310997009277, "memory(GiB)": 72.85, "step": 31135, "token_acc": 0.4618055555555556, "train_speed(iter/s)": 0.670743 }, { "epoch": 1.3341330705625294, "grad_norm": 4.762873649597168, "learning_rate": 8.345086190656463e-05, "loss": 2.58726806640625, "memory(GiB)": 72.85, "step": 31140, "token_acc": 0.4523809523809524, "train_speed(iter/s)": 0.670739 }, { "epoch": 1.3343472858917784, "grad_norm": 4.368741512298584, "learning_rate": 8.344585972071558e-05, "loss": 2.50439453125, "memory(GiB)": 72.85, "step": 31145, "token_acc": 0.4968553459119497, "train_speed(iter/s)": 0.670763 }, { "epoch": 1.3345615012210275, "grad_norm": 5.0363359451293945, "learning_rate": 8.34408569289634e-05, "loss": 2.456914520263672, "memory(GiB)": 72.85, "step": 31150, "token_acc": 0.4842105263157895, "train_speed(iter/s)": 0.670781 }, { "epoch": 1.3347757165502763, "grad_norm": 4.143333911895752, "learning_rate": 8.343585353139873e-05, "loss": 1.8954065322875977, "memory(GiB)": 72.85, "step": 31155, "token_acc": 0.5662100456621004, "train_speed(iter/s)": 0.670801 }, { "epoch": 1.3349899318795253, "grad_norm": 3.7124247550964355, "learning_rate": 8.343084952811222e-05, "loss": 2.122026824951172, "memory(GiB)": 72.85, "step": 31160, "token_acc": 0.5210355987055016, "train_speed(iter/s)": 0.670782 }, { "epoch": 1.3352041472087743, "grad_norm": 4.542791366577148, "learning_rate": 8.34258449191945e-05, "loss": 1.9728429794311524, "memory(GiB)": 72.85, "step": 31165, "token_acc": 0.5298245614035088, "train_speed(iter/s)": 0.670782 }, { "epoch": 1.3354183625380232, "grad_norm": 4.03206729888916, "learning_rate": 8.342083970473626e-05, "loss": 2.3651424407958985, "memory(GiB)": 72.85, "step": 31170, "token_acc": 0.5054151624548736, "train_speed(iter/s)": 0.670789 }, { "epoch": 1.3356325778672722, "grad_norm": 3.817995071411133, "learning_rate": 8.341583388482812e-05, "loss": 2.312081146240234, "memory(GiB)": 72.85, "step": 31175, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.670799 }, { "epoch": 1.3358467931965212, "grad_norm": 2.6340529918670654, "learning_rate": 8.341082745956083e-05, "loss": 1.9858413696289063, "memory(GiB)": 72.85, "step": 31180, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.670784 }, { "epoch": 1.33606100852577, "grad_norm": 4.399026870727539, "learning_rate": 8.340582042902506e-05, "loss": 2.1620882034301756, "memory(GiB)": 72.85, "step": 31185, "token_acc": 0.5, "train_speed(iter/s)": 0.670778 }, { "epoch": 1.336275223855019, "grad_norm": 3.7014100551605225, "learning_rate": 8.340081279331152e-05, "loss": 2.440446472167969, "memory(GiB)": 72.85, "step": 31190, "token_acc": 0.4838709677419355, "train_speed(iter/s)": 0.670754 }, { "epoch": 1.336489439184268, "grad_norm": 4.176650047302246, "learning_rate": 8.33958045525109e-05, "loss": 2.4404680252075197, "memory(GiB)": 72.85, "step": 31195, "token_acc": 0.48484848484848486, "train_speed(iter/s)": 0.670769 }, { "epoch": 1.336703654513517, "grad_norm": 4.025315284729004, "learning_rate": 8.339079570671395e-05, "loss": 2.4780221939086915, "memory(GiB)": 72.85, "step": 31200, "token_acc": 0.4645161290322581, "train_speed(iter/s)": 0.67079 }, { "epoch": 1.336917869842766, "grad_norm": 3.9739372730255127, "learning_rate": 8.338578625601144e-05, "loss": 2.3894561767578124, "memory(GiB)": 72.85, "step": 31205, "token_acc": 0.4673202614379085, "train_speed(iter/s)": 0.670776 }, { "epoch": 1.337132085172015, "grad_norm": 5.055972576141357, "learning_rate": 8.338077620049407e-05, "loss": 2.3429771423339845, "memory(GiB)": 72.85, "step": 31210, "token_acc": 0.4910394265232975, "train_speed(iter/s)": 0.670782 }, { "epoch": 1.3373463005012638, "grad_norm": 5.7508015632629395, "learning_rate": 8.337576554025264e-05, "loss": 2.301859664916992, "memory(GiB)": 72.85, "step": 31215, "token_acc": 0.5017301038062284, "train_speed(iter/s)": 0.67079 }, { "epoch": 1.3375605158305128, "grad_norm": 4.050544738769531, "learning_rate": 8.337075427537791e-05, "loss": 1.9755935668945312, "memory(GiB)": 72.85, "step": 31220, "token_acc": 0.5344129554655871, "train_speed(iter/s)": 0.670814 }, { "epoch": 1.3377747311597619, "grad_norm": 4.561308860778809, "learning_rate": 8.336574240596067e-05, "loss": 2.201236534118652, "memory(GiB)": 72.85, "step": 31225, "token_acc": 0.5228070175438596, "train_speed(iter/s)": 0.670799 }, { "epoch": 1.3379889464890107, "grad_norm": 3.3667478561401367, "learning_rate": 8.336072993209169e-05, "loss": 2.256711959838867, "memory(GiB)": 72.85, "step": 31230, "token_acc": 0.5047619047619047, "train_speed(iter/s)": 0.670821 }, { "epoch": 1.3382031618182597, "grad_norm": 4.451312065124512, "learning_rate": 8.335571685386178e-05, "loss": 2.5055681228637696, "memory(GiB)": 72.85, "step": 31235, "token_acc": 0.4921135646687697, "train_speed(iter/s)": 0.670801 }, { "epoch": 1.3384173771475087, "grad_norm": 3.6144306659698486, "learning_rate": 8.33507031713618e-05, "loss": 2.1955190658569337, "memory(GiB)": 72.85, "step": 31240, "token_acc": 0.5150602409638554, "train_speed(iter/s)": 0.670818 }, { "epoch": 1.3386315924767576, "grad_norm": 4.487339973449707, "learning_rate": 8.334568888468253e-05, "loss": 2.3707242965698243, "memory(GiB)": 72.85, "step": 31245, "token_acc": 0.48135593220338985, "train_speed(iter/s)": 0.670824 }, { "epoch": 1.3388458078060066, "grad_norm": 5.972048759460449, "learning_rate": 8.334067399391484e-05, "loss": 2.0236587524414062, "memory(GiB)": 72.85, "step": 31250, "token_acc": 0.538135593220339, "train_speed(iter/s)": 0.670832 }, { "epoch": 1.3390600231352556, "grad_norm": 5.132779121398926, "learning_rate": 8.333565849914952e-05, "loss": 2.306101608276367, "memory(GiB)": 72.85, "step": 31255, "token_acc": 0.4955223880597015, "train_speed(iter/s)": 0.670836 }, { "epoch": 1.3392742384645044, "grad_norm": 4.583808898925781, "learning_rate": 8.333064240047752e-05, "loss": 2.572004699707031, "memory(GiB)": 72.85, "step": 31260, "token_acc": 0.45144356955380577, "train_speed(iter/s)": 0.67085 }, { "epoch": 1.3394884537937535, "grad_norm": 3.378676176071167, "learning_rate": 8.332562569798966e-05, "loss": 2.256416130065918, "memory(GiB)": 72.85, "step": 31265, "token_acc": 0.48909657320872274, "train_speed(iter/s)": 0.670847 }, { "epoch": 1.3397026691230025, "grad_norm": 6.1157917976379395, "learning_rate": 8.332060839177683e-05, "loss": 2.517823600769043, "memory(GiB)": 72.85, "step": 31270, "token_acc": 0.4483870967741935, "train_speed(iter/s)": 0.670843 }, { "epoch": 1.3399168844522513, "grad_norm": 3.5991251468658447, "learning_rate": 8.33155904819299e-05, "loss": 2.3402257919311524, "memory(GiB)": 72.85, "step": 31275, "token_acc": 0.5155038759689923, "train_speed(iter/s)": 0.670863 }, { "epoch": 1.3401310997815004, "grad_norm": 5.144855976104736, "learning_rate": 8.33105719685398e-05, "loss": 2.3186260223388673, "memory(GiB)": 72.85, "step": 31280, "token_acc": 0.4830188679245283, "train_speed(iter/s)": 0.670877 }, { "epoch": 1.3403453151107494, "grad_norm": 4.505998611450195, "learning_rate": 8.330555285169745e-05, "loss": 2.2679330825805666, "memory(GiB)": 72.85, "step": 31285, "token_acc": 0.5136054421768708, "train_speed(iter/s)": 0.670877 }, { "epoch": 1.3405595304399982, "grad_norm": 5.341085433959961, "learning_rate": 8.330053313149378e-05, "loss": 2.3406238555908203, "memory(GiB)": 72.85, "step": 31290, "token_acc": 0.46886446886446886, "train_speed(iter/s)": 0.670878 }, { "epoch": 1.3407737457692472, "grad_norm": 3.6062402725219727, "learning_rate": 8.329551280801969e-05, "loss": 2.4058353424072267, "memory(GiB)": 72.85, "step": 31295, "token_acc": 0.48104956268221577, "train_speed(iter/s)": 0.670906 }, { "epoch": 1.3409879610984963, "grad_norm": 2.863393545150757, "learning_rate": 8.329049188136617e-05, "loss": 2.5488372802734376, "memory(GiB)": 72.85, "step": 31300, "token_acc": 0.4635416666666667, "train_speed(iter/s)": 0.6709 }, { "epoch": 1.3412021764277453, "grad_norm": 3.784775495529175, "learning_rate": 8.328547035162414e-05, "loss": 2.2069107055664063, "memory(GiB)": 72.85, "step": 31305, "token_acc": 0.5129682997118156, "train_speed(iter/s)": 0.670919 }, { "epoch": 1.3414163917569941, "grad_norm": 3.905428647994995, "learning_rate": 8.32804482188846e-05, "loss": 2.260313034057617, "memory(GiB)": 72.85, "step": 31310, "token_acc": 0.5238095238095238, "train_speed(iter/s)": 0.670918 }, { "epoch": 1.3416306070862432, "grad_norm": 4.207884311676025, "learning_rate": 8.327542548323854e-05, "loss": 2.201919746398926, "memory(GiB)": 72.85, "step": 31315, "token_acc": 0.5075987841945289, "train_speed(iter/s)": 0.670906 }, { "epoch": 1.3418448224154922, "grad_norm": 4.240156173706055, "learning_rate": 8.32704021447769e-05, "loss": 2.2392677307128905, "memory(GiB)": 72.85, "step": 31320, "token_acc": 0.5261538461538462, "train_speed(iter/s)": 0.670914 }, { "epoch": 1.342059037744741, "grad_norm": 4.747804641723633, "learning_rate": 8.326537820359074e-05, "loss": 2.1409347534179686, "memory(GiB)": 72.85, "step": 31325, "token_acc": 0.5401459854014599, "train_speed(iter/s)": 0.670928 }, { "epoch": 1.34227325307399, "grad_norm": 3.9780170917510986, "learning_rate": 8.326035365977105e-05, "loss": 2.473677062988281, "memory(GiB)": 72.85, "step": 31330, "token_acc": 0.519434628975265, "train_speed(iter/s)": 0.670948 }, { "epoch": 1.342487468403239, "grad_norm": 4.661735534667969, "learning_rate": 8.325532851340883e-05, "loss": 2.469913101196289, "memory(GiB)": 72.85, "step": 31335, "token_acc": 0.4795539033457249, "train_speed(iter/s)": 0.670939 }, { "epoch": 1.3427016837324879, "grad_norm": 4.122978687286377, "learning_rate": 8.325030276459515e-05, "loss": 2.080935478210449, "memory(GiB)": 72.85, "step": 31340, "token_acc": 0.5404255319148936, "train_speed(iter/s)": 0.670945 }, { "epoch": 1.342915899061737, "grad_norm": 3.3196280002593994, "learning_rate": 8.324527641342106e-05, "loss": 2.446803665161133, "memory(GiB)": 72.85, "step": 31345, "token_acc": 0.53156146179402, "train_speed(iter/s)": 0.670954 }, { "epoch": 1.343130114390986, "grad_norm": 3.7887542247772217, "learning_rate": 8.324024945997759e-05, "loss": 2.1004005432128907, "memory(GiB)": 72.85, "step": 31350, "token_acc": 0.5310344827586206, "train_speed(iter/s)": 0.670932 }, { "epoch": 1.3433443297202348, "grad_norm": 7.962434768676758, "learning_rate": 8.32352219043558e-05, "loss": 2.3387561798095704, "memory(GiB)": 72.85, "step": 31355, "token_acc": 0.5016835016835017, "train_speed(iter/s)": 0.670923 }, { "epoch": 1.3435585450494838, "grad_norm": 3.59248948097229, "learning_rate": 8.32301937466468e-05, "loss": 2.5027923583984375, "memory(GiB)": 72.85, "step": 31360, "token_acc": 0.4642857142857143, "train_speed(iter/s)": 0.670953 }, { "epoch": 1.3437727603787328, "grad_norm": 3.3890562057495117, "learning_rate": 8.322516498694169e-05, "loss": 2.5367202758789062, "memory(GiB)": 72.85, "step": 31365, "token_acc": 0.4954682779456193, "train_speed(iter/s)": 0.670968 }, { "epoch": 1.3439869757079816, "grad_norm": 4.1766486167907715, "learning_rate": 8.32201356253315e-05, "loss": 2.575513458251953, "memory(GiB)": 72.85, "step": 31370, "token_acc": 0.4766081871345029, "train_speed(iter/s)": 0.670967 }, { "epoch": 1.3442011910372307, "grad_norm": 4.524115085601807, "learning_rate": 8.321510566190743e-05, "loss": 2.156215858459473, "memory(GiB)": 72.85, "step": 31375, "token_acc": 0.5018867924528302, "train_speed(iter/s)": 0.67096 }, { "epoch": 1.3444154063664797, "grad_norm": 4.415525436401367, "learning_rate": 8.321007509676055e-05, "loss": 2.3106292724609374, "memory(GiB)": 72.85, "step": 31380, "token_acc": 0.52, "train_speed(iter/s)": 0.67095 }, { "epoch": 1.3446296216957285, "grad_norm": 4.576881408691406, "learning_rate": 8.320504392998202e-05, "loss": 2.370777893066406, "memory(GiB)": 72.85, "step": 31385, "token_acc": 0.4820846905537459, "train_speed(iter/s)": 0.670958 }, { "epoch": 1.3448438370249776, "grad_norm": 4.438047409057617, "learning_rate": 8.320001216166296e-05, "loss": 2.320794868469238, "memory(GiB)": 72.85, "step": 31390, "token_acc": 0.5062111801242236, "train_speed(iter/s)": 0.670969 }, { "epoch": 1.3450580523542266, "grad_norm": 4.661468505859375, "learning_rate": 8.319497979189453e-05, "loss": 2.4284833908081054, "memory(GiB)": 72.85, "step": 31395, "token_acc": 0.5104166666666666, "train_speed(iter/s)": 0.670961 }, { "epoch": 1.3452722676834754, "grad_norm": 3.8742763996124268, "learning_rate": 8.31899468207679e-05, "loss": 2.528006935119629, "memory(GiB)": 72.85, "step": 31400, "token_acc": 0.4564459930313589, "train_speed(iter/s)": 0.670976 }, { "epoch": 1.3454864830127244, "grad_norm": 4.26659631729126, "learning_rate": 8.318491324837426e-05, "loss": 2.439616394042969, "memory(GiB)": 72.85, "step": 31405, "token_acc": 0.49466192170818507, "train_speed(iter/s)": 0.670996 }, { "epoch": 1.3457006983419735, "grad_norm": 4.324955463409424, "learning_rate": 8.317987907480478e-05, "loss": 2.4190128326416014, "memory(GiB)": 72.85, "step": 31410, "token_acc": 0.5017667844522968, "train_speed(iter/s)": 0.670987 }, { "epoch": 1.3459149136712223, "grad_norm": 3.0103228092193604, "learning_rate": 8.317484430015067e-05, "loss": 1.9845115661621093, "memory(GiB)": 72.85, "step": 31415, "token_acc": 0.5481727574750831, "train_speed(iter/s)": 0.670997 }, { "epoch": 1.3461291290004713, "grad_norm": 4.1309075355529785, "learning_rate": 8.316980892450312e-05, "loss": 2.2453161239624024, "memory(GiB)": 72.85, "step": 31420, "token_acc": 0.5138888888888888, "train_speed(iter/s)": 0.670995 }, { "epoch": 1.3463433443297204, "grad_norm": 3.695988893508911, "learning_rate": 8.316477294795338e-05, "loss": 2.306378173828125, "memory(GiB)": 72.85, "step": 31425, "token_acc": 0.4625, "train_speed(iter/s)": 0.670986 }, { "epoch": 1.3465575596589692, "grad_norm": 5.394837379455566, "learning_rate": 8.315973637059267e-05, "loss": 2.3497196197509767, "memory(GiB)": 72.85, "step": 31430, "token_acc": 0.4927536231884058, "train_speed(iter/s)": 0.670976 }, { "epoch": 1.3467717749882182, "grad_norm": 4.220078945159912, "learning_rate": 8.315469919251222e-05, "loss": 2.6392280578613283, "memory(GiB)": 72.85, "step": 31435, "token_acc": 0.4542372881355932, "train_speed(iter/s)": 0.670991 }, { "epoch": 1.3469859903174672, "grad_norm": 4.001670837402344, "learning_rate": 8.31496614138033e-05, "loss": 2.7353469848632814, "memory(GiB)": 72.85, "step": 31440, "token_acc": 0.4913294797687861, "train_speed(iter/s)": 0.670998 }, { "epoch": 1.347200205646716, "grad_norm": 4.155776500701904, "learning_rate": 8.314462303455716e-05, "loss": 2.42435359954834, "memory(GiB)": 72.85, "step": 31445, "token_acc": 0.5071428571428571, "train_speed(iter/s)": 0.671018 }, { "epoch": 1.347414420975965, "grad_norm": 4.994842052459717, "learning_rate": 8.313958405486508e-05, "loss": 2.1218671798706055, "memory(GiB)": 72.85, "step": 31450, "token_acc": 0.5063291139240507, "train_speed(iter/s)": 0.671036 }, { "epoch": 1.3476286363052141, "grad_norm": 4.117115497589111, "learning_rate": 8.313454447481836e-05, "loss": 2.350931167602539, "memory(GiB)": 72.85, "step": 31455, "token_acc": 0.496, "train_speed(iter/s)": 0.671015 }, { "epoch": 1.347842851634463, "grad_norm": 4.284056186676025, "learning_rate": 8.312950429450828e-05, "loss": 2.4453927993774416, "memory(GiB)": 72.85, "step": 31460, "token_acc": 0.46875, "train_speed(iter/s)": 0.671042 }, { "epoch": 1.348057066963712, "grad_norm": 4.291605472564697, "learning_rate": 8.312446351402614e-05, "loss": 2.3471025466918944, "memory(GiB)": 72.85, "step": 31465, "token_acc": 0.5122699386503068, "train_speed(iter/s)": 0.67103 }, { "epoch": 1.348271282292961, "grad_norm": 5.176390171051025, "learning_rate": 8.311942213346326e-05, "loss": 2.2062761306762697, "memory(GiB)": 72.85, "step": 31470, "token_acc": 0.525, "train_speed(iter/s)": 0.671024 }, { "epoch": 1.3484854976222098, "grad_norm": 4.153855323791504, "learning_rate": 8.311438015291101e-05, "loss": 2.1583324432373048, "memory(GiB)": 72.85, "step": 31475, "token_acc": 0.46564885496183206, "train_speed(iter/s)": 0.671027 }, { "epoch": 1.3486997129514589, "grad_norm": 3.8080368041992188, "learning_rate": 8.310933757246069e-05, "loss": 2.0962759017944337, "memory(GiB)": 72.85, "step": 31480, "token_acc": 0.5619834710743802, "train_speed(iter/s)": 0.671036 }, { "epoch": 1.3489139282807079, "grad_norm": 3.7630691528320312, "learning_rate": 8.310429439220366e-05, "loss": 2.2792816162109375, "memory(GiB)": 72.85, "step": 31485, "token_acc": 0.5477031802120141, "train_speed(iter/s)": 0.671017 }, { "epoch": 1.3491281436099567, "grad_norm": 4.947911739349365, "learning_rate": 8.309925061223127e-05, "loss": 2.377581787109375, "memory(GiB)": 72.85, "step": 31490, "token_acc": 0.5310077519379846, "train_speed(iter/s)": 0.671012 }, { "epoch": 1.3493423589392057, "grad_norm": 4.3952741622924805, "learning_rate": 8.309420623263493e-05, "loss": 2.546280860900879, "memory(GiB)": 72.85, "step": 31495, "token_acc": 0.4878048780487805, "train_speed(iter/s)": 0.671029 }, { "epoch": 1.3495565742684548, "grad_norm": 4.4532647132873535, "learning_rate": 8.308916125350598e-05, "loss": 2.408656120300293, "memory(GiB)": 72.85, "step": 31500, "token_acc": 0.46875, "train_speed(iter/s)": 0.671033 }, { "epoch": 1.3495565742684548, "eval_loss": 1.9269263744354248, "eval_runtime": 16.0601, "eval_samples_per_second": 6.227, "eval_steps_per_second": 6.227, "eval_token_acc": 0.5059347181008902, "step": 31500 }, { "epoch": 1.3497707895977036, "grad_norm": 5.061892032623291, "learning_rate": 8.308411567493584e-05, "loss": 2.176553153991699, "memory(GiB)": 72.85, "step": 31505, "token_acc": 0.516028955532575, "train_speed(iter/s)": 0.670772 }, { "epoch": 1.3499850049269526, "grad_norm": 4.940303802490234, "learning_rate": 8.30790694970159e-05, "loss": 2.483449172973633, "memory(GiB)": 72.85, "step": 31510, "token_acc": 0.47962382445141066, "train_speed(iter/s)": 0.670789 }, { "epoch": 1.3501992202562016, "grad_norm": 5.221519470214844, "learning_rate": 8.307402271983759e-05, "loss": 2.5714012145996095, "memory(GiB)": 72.85, "step": 31515, "token_acc": 0.4859154929577465, "train_speed(iter/s)": 0.670765 }, { "epoch": 1.3504134355854505, "grad_norm": 4.119228363037109, "learning_rate": 8.306897534349234e-05, "loss": 2.63989372253418, "memory(GiB)": 72.85, "step": 31520, "token_acc": 0.43820224719101125, "train_speed(iter/s)": 0.670769 }, { "epoch": 1.3506276509146995, "grad_norm": 4.147197246551514, "learning_rate": 8.306392736807158e-05, "loss": 2.354852485656738, "memory(GiB)": 72.85, "step": 31525, "token_acc": 0.4884488448844885, "train_speed(iter/s)": 0.670755 }, { "epoch": 1.3508418662439485, "grad_norm": 4.922616004943848, "learning_rate": 8.305887879366676e-05, "loss": 2.1456172943115233, "memory(GiB)": 72.85, "step": 31530, "token_acc": 0.5155709342560554, "train_speed(iter/s)": 0.670749 }, { "epoch": 1.3510560815731973, "grad_norm": 4.7220611572265625, "learning_rate": 8.305382962036933e-05, "loss": 2.6195329666137694, "memory(GiB)": 72.85, "step": 31535, "token_acc": 0.42209631728045327, "train_speed(iter/s)": 0.670762 }, { "epoch": 1.3512702969024464, "grad_norm": 4.581668376922607, "learning_rate": 8.304877984827078e-05, "loss": 2.6131595611572265, "memory(GiB)": 72.85, "step": 31540, "token_acc": 0.5, "train_speed(iter/s)": 0.670768 }, { "epoch": 1.3514845122316954, "grad_norm": 4.6186723709106445, "learning_rate": 8.304372947746256e-05, "loss": 2.5971059799194336, "memory(GiB)": 72.85, "step": 31545, "token_acc": 0.4982078853046595, "train_speed(iter/s)": 0.670755 }, { "epoch": 1.3516987275609442, "grad_norm": 3.8960518836975098, "learning_rate": 8.303867850803619e-05, "loss": 2.4747053146362306, "memory(GiB)": 72.85, "step": 31550, "token_acc": 0.4740061162079511, "train_speed(iter/s)": 0.670769 }, { "epoch": 1.3519129428901933, "grad_norm": 4.224179744720459, "learning_rate": 8.303362694008318e-05, "loss": 2.4752630233764648, "memory(GiB)": 72.85, "step": 31555, "token_acc": 0.47126436781609193, "train_speed(iter/s)": 0.670779 }, { "epoch": 1.3521271582194423, "grad_norm": 3.9871811866760254, "learning_rate": 8.302857477369503e-05, "loss": 2.301081085205078, "memory(GiB)": 72.85, "step": 31560, "token_acc": 0.4481707317073171, "train_speed(iter/s)": 0.670813 }, { "epoch": 1.352341373548691, "grad_norm": 4.775388717651367, "learning_rate": 8.302352200896326e-05, "loss": 2.063199996948242, "memory(GiB)": 72.85, "step": 31565, "token_acc": 0.5737704918032787, "train_speed(iter/s)": 0.670797 }, { "epoch": 1.3525555888779401, "grad_norm": 4.2934136390686035, "learning_rate": 8.30184686459794e-05, "loss": 2.414967727661133, "memory(GiB)": 72.85, "step": 31570, "token_acc": 0.47147147147147145, "train_speed(iter/s)": 0.670784 }, { "epoch": 1.3527698042071892, "grad_norm": 4.33308744430542, "learning_rate": 8.301341468483503e-05, "loss": 2.2307891845703125, "memory(GiB)": 72.85, "step": 31575, "token_acc": 0.5, "train_speed(iter/s)": 0.670789 }, { "epoch": 1.352984019536438, "grad_norm": 4.375946044921875, "learning_rate": 8.300836012562165e-05, "loss": 2.264063262939453, "memory(GiB)": 72.85, "step": 31580, "token_acc": 0.48013245033112584, "train_speed(iter/s)": 0.670801 }, { "epoch": 1.353198234865687, "grad_norm": 4.57374906539917, "learning_rate": 8.300330496843088e-05, "loss": 2.1782968521118162, "memory(GiB)": 72.85, "step": 31585, "token_acc": 0.5327102803738317, "train_speed(iter/s)": 0.67079 }, { "epoch": 1.353412450194936, "grad_norm": 4.406397819519043, "learning_rate": 8.299824921335428e-05, "loss": 2.255160903930664, "memory(GiB)": 72.85, "step": 31590, "token_acc": 0.5167785234899329, "train_speed(iter/s)": 0.6708 }, { "epoch": 1.3536266655241849, "grad_norm": 3.663600444793701, "learning_rate": 8.299319286048347e-05, "loss": 2.2562324523925783, "memory(GiB)": 72.85, "step": 31595, "token_acc": 0.501779359430605, "train_speed(iter/s)": 0.670795 }, { "epoch": 1.353840880853434, "grad_norm": 4.193398952484131, "learning_rate": 8.298813590990997e-05, "loss": 2.4701873779296877, "memory(GiB)": 72.85, "step": 31600, "token_acc": 0.45918367346938777, "train_speed(iter/s)": 0.670793 }, { "epoch": 1.354055096182683, "grad_norm": 4.060516834259033, "learning_rate": 8.298307836172548e-05, "loss": 2.3383222579956056, "memory(GiB)": 72.85, "step": 31605, "token_acc": 0.48639455782312924, "train_speed(iter/s)": 0.670805 }, { "epoch": 1.3542693115119318, "grad_norm": 5.158191680908203, "learning_rate": 8.297802021602157e-05, "loss": 2.45007209777832, "memory(GiB)": 72.85, "step": 31610, "token_acc": 0.49575070821529743, "train_speed(iter/s)": 0.670801 }, { "epoch": 1.3544835268411808, "grad_norm": 3.316481113433838, "learning_rate": 8.297296147288987e-05, "loss": 2.286603546142578, "memory(GiB)": 72.85, "step": 31615, "token_acc": 0.4873417721518987, "train_speed(iter/s)": 0.670809 }, { "epoch": 1.3546977421704298, "grad_norm": 5.685810089111328, "learning_rate": 8.296790213242208e-05, "loss": 2.5019094467163088, "memory(GiB)": 72.85, "step": 31620, "token_acc": 0.4669260700389105, "train_speed(iter/s)": 0.670829 }, { "epoch": 1.3549119574996786, "grad_norm": 3.955263137817383, "learning_rate": 8.296284219470979e-05, "loss": 2.2466012954711916, "memory(GiB)": 72.85, "step": 31625, "token_acc": 0.5461847389558233, "train_speed(iter/s)": 0.670847 }, { "epoch": 1.3551261728289277, "grad_norm": 3.8720767498016357, "learning_rate": 8.295778165984469e-05, "loss": 2.197881507873535, "memory(GiB)": 72.85, "step": 31630, "token_acc": 0.48928571428571427, "train_speed(iter/s)": 0.670847 }, { "epoch": 1.3553403881581767, "grad_norm": 5.345104217529297, "learning_rate": 8.295272052791847e-05, "loss": 2.5599296569824217, "memory(GiB)": 72.85, "step": 31635, "token_acc": 0.45555555555555555, "train_speed(iter/s)": 0.670854 }, { "epoch": 1.3555546034874255, "grad_norm": 5.905639171600342, "learning_rate": 8.294765879902283e-05, "loss": 2.453575325012207, "memory(GiB)": 72.85, "step": 31640, "token_acc": 0.4542372881355932, "train_speed(iter/s)": 0.670844 }, { "epoch": 1.3557688188166745, "grad_norm": 4.612081527709961, "learning_rate": 8.29425964732494e-05, "loss": 2.315886688232422, "memory(GiB)": 72.85, "step": 31645, "token_acc": 0.49712643678160917, "train_speed(iter/s)": 0.670845 }, { "epoch": 1.3559830341459236, "grad_norm": 4.467816352844238, "learning_rate": 8.293753355068995e-05, "loss": 2.741871643066406, "memory(GiB)": 72.85, "step": 31650, "token_acc": 0.41785714285714287, "train_speed(iter/s)": 0.670868 }, { "epoch": 1.3561972494751724, "grad_norm": 6.522138595581055, "learning_rate": 8.293247003143617e-05, "loss": 2.2418594360351562, "memory(GiB)": 72.85, "step": 31655, "token_acc": 0.5134228187919463, "train_speed(iter/s)": 0.670872 }, { "epoch": 1.3564114648044214, "grad_norm": 3.975092887878418, "learning_rate": 8.292740591557981e-05, "loss": 2.305216598510742, "memory(GiB)": 72.85, "step": 31660, "token_acc": 0.5124555160142349, "train_speed(iter/s)": 0.670876 }, { "epoch": 1.3566256801336705, "grad_norm": 4.404365539550781, "learning_rate": 8.29223412032126e-05, "loss": 2.2800666809082033, "memory(GiB)": 72.85, "step": 31665, "token_acc": 0.49818181818181817, "train_speed(iter/s)": 0.670883 }, { "epoch": 1.3568398954629193, "grad_norm": 4.228309631347656, "learning_rate": 8.291727589442632e-05, "loss": 2.152435302734375, "memory(GiB)": 72.85, "step": 31670, "token_acc": 0.5189003436426117, "train_speed(iter/s)": 0.670898 }, { "epoch": 1.3570541107921683, "grad_norm": 4.709212303161621, "learning_rate": 8.291220998931269e-05, "loss": 2.356475067138672, "memory(GiB)": 72.85, "step": 31675, "token_acc": 0.49333333333333335, "train_speed(iter/s)": 0.670895 }, { "epoch": 1.3572683261214173, "grad_norm": 3.695439577102661, "learning_rate": 8.29071434879635e-05, "loss": 2.310606002807617, "memory(GiB)": 72.85, "step": 31680, "token_acc": 0.5193548387096775, "train_speed(iter/s)": 0.670897 }, { "epoch": 1.3574825414506662, "grad_norm": 3.0594570636749268, "learning_rate": 8.290207639047054e-05, "loss": 2.585369110107422, "memory(GiB)": 72.85, "step": 31685, "token_acc": 0.4588235294117647, "train_speed(iter/s)": 0.670896 }, { "epoch": 1.3576967567799152, "grad_norm": 3.701401948928833, "learning_rate": 8.289700869692559e-05, "loss": 2.5757055282592773, "memory(GiB)": 72.85, "step": 31690, "token_acc": 0.47854785478547857, "train_speed(iter/s)": 0.67091 }, { "epoch": 1.3579109721091642, "grad_norm": 4.289559364318848, "learning_rate": 8.289194040742048e-05, "loss": 2.3488323211669924, "memory(GiB)": 72.85, "step": 31695, "token_acc": 0.48695652173913045, "train_speed(iter/s)": 0.670893 }, { "epoch": 1.358125187438413, "grad_norm": 5.180080413818359, "learning_rate": 8.288687152204701e-05, "loss": 2.4847326278686523, "memory(GiB)": 72.85, "step": 31700, "token_acc": 0.4774436090225564, "train_speed(iter/s)": 0.670867 }, { "epoch": 1.358339402767662, "grad_norm": 6.013463973999023, "learning_rate": 8.288180204089702e-05, "loss": 2.535462188720703, "memory(GiB)": 72.85, "step": 31705, "token_acc": 0.484375, "train_speed(iter/s)": 0.670861 }, { "epoch": 1.358553618096911, "grad_norm": 3.9013476371765137, "learning_rate": 8.287673196406234e-05, "loss": 2.5356866836547853, "memory(GiB)": 72.85, "step": 31710, "token_acc": 0.47315436241610737, "train_speed(iter/s)": 0.670862 }, { "epoch": 1.35876783342616, "grad_norm": 4.128876209259033, "learning_rate": 8.28716612916348e-05, "loss": 2.2382068634033203, "memory(GiB)": 72.85, "step": 31715, "token_acc": 0.4935897435897436, "train_speed(iter/s)": 0.670863 }, { "epoch": 1.358982048755409, "grad_norm": 4.689456939697266, "learning_rate": 8.28665900237063e-05, "loss": 2.6150577545166014, "memory(GiB)": 72.85, "step": 31720, "token_acc": 0.4717607973421927, "train_speed(iter/s)": 0.670864 }, { "epoch": 1.359196264084658, "grad_norm": 4.546947479248047, "learning_rate": 8.286151816036868e-05, "loss": 2.221078872680664, "memory(GiB)": 72.85, "step": 31725, "token_acc": 0.5232974910394266, "train_speed(iter/s)": 0.67086 }, { "epoch": 1.3594104794139068, "grad_norm": 3.4582462310791016, "learning_rate": 8.285644570171382e-05, "loss": 2.209828567504883, "memory(GiB)": 72.85, "step": 31730, "token_acc": 0.5187713310580204, "train_speed(iter/s)": 0.670887 }, { "epoch": 1.3596246947431558, "grad_norm": 3.4796929359436035, "learning_rate": 8.285137264783366e-05, "loss": 2.5132823944091798, "memory(GiB)": 72.85, "step": 31735, "token_acc": 0.46875, "train_speed(iter/s)": 0.670897 }, { "epoch": 1.3598389100724049, "grad_norm": 5.617320537567139, "learning_rate": 8.284629899882003e-05, "loss": 2.3226085662841798, "memory(GiB)": 72.85, "step": 31740, "token_acc": 0.4785100286532951, "train_speed(iter/s)": 0.670899 }, { "epoch": 1.3600531254016537, "grad_norm": 4.348715305328369, "learning_rate": 8.284122475476492e-05, "loss": 2.4389415740966798, "memory(GiB)": 72.85, "step": 31745, "token_acc": 0.48338368580060426, "train_speed(iter/s)": 0.670895 }, { "epoch": 1.3602673407309027, "grad_norm": 5.384598731994629, "learning_rate": 8.283614991576019e-05, "loss": 2.4726236343383787, "memory(GiB)": 72.85, "step": 31750, "token_acc": 0.4619883040935672, "train_speed(iter/s)": 0.670904 }, { "epoch": 1.3604815560601518, "grad_norm": 3.9662892818450928, "learning_rate": 8.28310744818978e-05, "loss": 2.0990066528320312, "memory(GiB)": 72.85, "step": 31755, "token_acc": 0.49818181818181817, "train_speed(iter/s)": 0.670889 }, { "epoch": 1.3606957713894006, "grad_norm": 4.946498394012451, "learning_rate": 8.282599845326971e-05, "loss": 2.3161067962646484, "memory(GiB)": 72.85, "step": 31760, "token_acc": 0.5138339920948617, "train_speed(iter/s)": 0.670896 }, { "epoch": 1.3609099867186496, "grad_norm": 3.7297635078430176, "learning_rate": 8.282092182996785e-05, "loss": 2.4545974731445312, "memory(GiB)": 72.85, "step": 31765, "token_acc": 0.48727272727272725, "train_speed(iter/s)": 0.670871 }, { "epoch": 1.3611242020478986, "grad_norm": 3.50797176361084, "learning_rate": 8.281584461208421e-05, "loss": 2.0864553451538086, "memory(GiB)": 72.85, "step": 31770, "token_acc": 0.5301724137931034, "train_speed(iter/s)": 0.67088 }, { "epoch": 1.3613384173771474, "grad_norm": 3.552767276763916, "learning_rate": 8.281076679971077e-05, "loss": 1.9654769897460938, "memory(GiB)": 72.85, "step": 31775, "token_acc": 0.5776173285198556, "train_speed(iter/s)": 0.670863 }, { "epoch": 1.3615526327063965, "grad_norm": 4.062856197357178, "learning_rate": 8.28056883929395e-05, "loss": 2.562786102294922, "memory(GiB)": 72.85, "step": 31780, "token_acc": 0.45017182130584193, "train_speed(iter/s)": 0.670857 }, { "epoch": 1.3617668480356455, "grad_norm": 2.8560781478881836, "learning_rate": 8.280060939186242e-05, "loss": 2.1871969223022463, "memory(GiB)": 72.85, "step": 31785, "token_acc": 0.5076923076923077, "train_speed(iter/s)": 0.670869 }, { "epoch": 1.3619810633648943, "grad_norm": 5.354153156280518, "learning_rate": 8.279552979657152e-05, "loss": 2.3303977966308596, "memory(GiB)": 72.85, "step": 31790, "token_acc": 0.5362903225806451, "train_speed(iter/s)": 0.670882 }, { "epoch": 1.3621952786941434, "grad_norm": 5.196098804473877, "learning_rate": 8.279044960715883e-05, "loss": 2.3188461303710937, "memory(GiB)": 72.85, "step": 31795, "token_acc": 0.4627450980392157, "train_speed(iter/s)": 0.670887 }, { "epoch": 1.3624094940233924, "grad_norm": 4.818370342254639, "learning_rate": 8.278536882371639e-05, "loss": 2.402140998840332, "memory(GiB)": 72.85, "step": 31800, "token_acc": 0.4797297297297297, "train_speed(iter/s)": 0.67088 }, { "epoch": 1.3626237093526412, "grad_norm": 4.425116539001465, "learning_rate": 8.278028744633624e-05, "loss": 2.4489229202270506, "memory(GiB)": 72.85, "step": 31805, "token_acc": 0.5169811320754717, "train_speed(iter/s)": 0.6709 }, { "epoch": 1.3628379246818902, "grad_norm": 4.290149688720703, "learning_rate": 8.277520547511044e-05, "loss": 2.2309003829956056, "memory(GiB)": 72.85, "step": 31810, "token_acc": 0.4963768115942029, "train_speed(iter/s)": 0.670887 }, { "epoch": 1.3630521400111393, "grad_norm": 5.900975227355957, "learning_rate": 8.277012291013104e-05, "loss": 2.398720359802246, "memory(GiB)": 72.85, "step": 31815, "token_acc": 0.48698884758364314, "train_speed(iter/s)": 0.670882 }, { "epoch": 1.363266355340388, "grad_norm": 3.6165716648101807, "learning_rate": 8.276503975149013e-05, "loss": 2.3149341583251952, "memory(GiB)": 72.85, "step": 31820, "token_acc": 0.46464646464646464, "train_speed(iter/s)": 0.670905 }, { "epoch": 1.3634805706696371, "grad_norm": 3.948392629623413, "learning_rate": 8.275995599927978e-05, "loss": 2.4329193115234373, "memory(GiB)": 72.85, "step": 31825, "token_acc": 0.47962382445141066, "train_speed(iter/s)": 0.670917 }, { "epoch": 1.3636947859988862, "grad_norm": 3.7181034088134766, "learning_rate": 8.27548716535921e-05, "loss": 2.260066604614258, "memory(GiB)": 72.85, "step": 31830, "token_acc": 0.5099337748344371, "train_speed(iter/s)": 0.670878 }, { "epoch": 1.363909001328135, "grad_norm": 6.302920818328857, "learning_rate": 8.274978671451919e-05, "loss": 2.5877712249755858, "memory(GiB)": 72.85, "step": 31835, "token_acc": 0.504950495049505, "train_speed(iter/s)": 0.670884 }, { "epoch": 1.364123216657384, "grad_norm": 3.5204238891601562, "learning_rate": 8.274470118215317e-05, "loss": 2.2756679534912108, "memory(GiB)": 72.85, "step": 31840, "token_acc": 0.4878048780487805, "train_speed(iter/s)": 0.670884 }, { "epoch": 1.364337431986633, "grad_norm": 4.544287204742432, "learning_rate": 8.273961505658618e-05, "loss": 2.2603986740112303, "memory(GiB)": 72.85, "step": 31845, "token_acc": 0.47440273037542663, "train_speed(iter/s)": 0.670875 }, { "epoch": 1.3645516473158819, "grad_norm": 5.6459126472473145, "learning_rate": 8.273452833791034e-05, "loss": 2.366859245300293, "memory(GiB)": 72.85, "step": 31850, "token_acc": 0.5035460992907801, "train_speed(iter/s)": 0.670864 }, { "epoch": 1.364765862645131, "grad_norm": 3.5176987648010254, "learning_rate": 8.272944102621782e-05, "loss": 2.423842430114746, "memory(GiB)": 72.85, "step": 31855, "token_acc": 0.5104166666666666, "train_speed(iter/s)": 0.670849 }, { "epoch": 1.36498007797438, "grad_norm": 4.381810188293457, "learning_rate": 8.272435312160077e-05, "loss": 2.372970390319824, "memory(GiB)": 72.85, "step": 31860, "token_acc": 0.486013986013986, "train_speed(iter/s)": 0.670841 }, { "epoch": 1.3651942933036287, "grad_norm": 5.136582374572754, "learning_rate": 8.271926462415137e-05, "loss": 2.427252006530762, "memory(GiB)": 72.85, "step": 31865, "token_acc": 0.4520547945205479, "train_speed(iter/s)": 0.670817 }, { "epoch": 1.3654085086328778, "grad_norm": 3.197763681411743, "learning_rate": 8.271417553396179e-05, "loss": 1.9543087005615234, "memory(GiB)": 72.85, "step": 31870, "token_acc": 0.5435540069686411, "train_speed(iter/s)": 0.670782 }, { "epoch": 1.3656227239621268, "grad_norm": 3.5202689170837402, "learning_rate": 8.270908585112423e-05, "loss": 2.3529701232910156, "memory(GiB)": 72.85, "step": 31875, "token_acc": 0.48494983277591974, "train_speed(iter/s)": 0.67076 }, { "epoch": 1.3658369392913756, "grad_norm": 3.8328373432159424, "learning_rate": 8.27039955757309e-05, "loss": 2.36984806060791, "memory(GiB)": 72.85, "step": 31880, "token_acc": 0.4591439688715953, "train_speed(iter/s)": 0.670743 }, { "epoch": 1.3660511546206247, "grad_norm": 4.527298450469971, "learning_rate": 8.269890470787402e-05, "loss": 2.5692007064819338, "memory(GiB)": 72.85, "step": 31885, "token_acc": 0.471875, "train_speed(iter/s)": 0.670759 }, { "epoch": 1.3662653699498737, "grad_norm": 3.4934017658233643, "learning_rate": 8.26938132476458e-05, "loss": 2.6873500823974608, "memory(GiB)": 72.85, "step": 31890, "token_acc": 0.4929078014184397, "train_speed(iter/s)": 0.670731 }, { "epoch": 1.3664795852791225, "grad_norm": 3.711522102355957, "learning_rate": 8.268872119513849e-05, "loss": 2.3964210510253907, "memory(GiB)": 72.85, "step": 31895, "token_acc": 0.49538461538461537, "train_speed(iter/s)": 0.670727 }, { "epoch": 1.3666938006083715, "grad_norm": 4.389928340911865, "learning_rate": 8.268362855044433e-05, "loss": 2.4687618255615233, "memory(GiB)": 72.85, "step": 31900, "token_acc": 0.4482758620689655, "train_speed(iter/s)": 0.670728 }, { "epoch": 1.3669080159376206, "grad_norm": 4.8750996589660645, "learning_rate": 8.267853531365557e-05, "loss": 2.040863800048828, "memory(GiB)": 72.85, "step": 31905, "token_acc": 0.5173611111111112, "train_speed(iter/s)": 0.670686 }, { "epoch": 1.3671222312668694, "grad_norm": 5.1664137840271, "learning_rate": 8.26734414848645e-05, "loss": 2.3683338165283203, "memory(GiB)": 72.85, "step": 31910, "token_acc": 0.49158249158249157, "train_speed(iter/s)": 0.670697 }, { "epoch": 1.3673364465961184, "grad_norm": 3.8792898654937744, "learning_rate": 8.266834706416338e-05, "loss": 2.4428359985351564, "memory(GiB)": 72.85, "step": 31915, "token_acc": 0.5115511551155115, "train_speed(iter/s)": 0.670685 }, { "epoch": 1.3675506619253675, "grad_norm": 4.059250354766846, "learning_rate": 8.26632520516445e-05, "loss": 2.2341939926147463, "memory(GiB)": 72.85, "step": 31920, "token_acc": 0.5, "train_speed(iter/s)": 0.670718 }, { "epoch": 1.3677648772546163, "grad_norm": 4.176027297973633, "learning_rate": 8.265815644740017e-05, "loss": 2.2859642028808596, "memory(GiB)": 72.85, "step": 31925, "token_acc": 0.4953560371517028, "train_speed(iter/s)": 0.670718 }, { "epoch": 1.3679790925838653, "grad_norm": 5.073952674865723, "learning_rate": 8.265306025152271e-05, "loss": 2.075273895263672, "memory(GiB)": 72.85, "step": 31930, "token_acc": 0.5036496350364964, "train_speed(iter/s)": 0.670726 }, { "epoch": 1.3681933079131143, "grad_norm": 4.885994911193848, "learning_rate": 8.264796346410443e-05, "loss": 2.238574981689453, "memory(GiB)": 72.85, "step": 31935, "token_acc": 0.5231788079470199, "train_speed(iter/s)": 0.670748 }, { "epoch": 1.3684075232423631, "grad_norm": 4.105824947357178, "learning_rate": 8.264286608523765e-05, "loss": 2.620960998535156, "memory(GiB)": 72.85, "step": 31940, "token_acc": 0.46496815286624205, "train_speed(iter/s)": 0.670703 }, { "epoch": 1.3686217385716122, "grad_norm": 5.072221279144287, "learning_rate": 8.263776811501475e-05, "loss": 2.501393127441406, "memory(GiB)": 72.85, "step": 31945, "token_acc": 0.4478114478114478, "train_speed(iter/s)": 0.670704 }, { "epoch": 1.3688359539008612, "grad_norm": 4.608165264129639, "learning_rate": 8.263266955352806e-05, "loss": 2.0686132431030275, "memory(GiB)": 72.85, "step": 31950, "token_acc": 0.5377358490566038, "train_speed(iter/s)": 0.670684 }, { "epoch": 1.36905016923011, "grad_norm": 5.751520156860352, "learning_rate": 8.262757040086995e-05, "loss": 2.021062469482422, "memory(GiB)": 72.85, "step": 31955, "token_acc": 0.5416666666666666, "train_speed(iter/s)": 0.670703 }, { "epoch": 1.369264384559359, "grad_norm": 3.6281046867370605, "learning_rate": 8.262247065713278e-05, "loss": 2.4553253173828127, "memory(GiB)": 72.85, "step": 31960, "token_acc": 0.5031446540880503, "train_speed(iter/s)": 0.670704 }, { "epoch": 1.369478599888608, "grad_norm": 4.736834526062012, "learning_rate": 8.261737032240897e-05, "loss": 2.585248565673828, "memory(GiB)": 72.85, "step": 31965, "token_acc": 0.4676923076923077, "train_speed(iter/s)": 0.670717 }, { "epoch": 1.369692815217857, "grad_norm": 4.7174530029296875, "learning_rate": 8.26122693967909e-05, "loss": 2.4330207824707033, "memory(GiB)": 72.85, "step": 31970, "token_acc": 0.48627450980392156, "train_speed(iter/s)": 0.670723 }, { "epoch": 1.369907030547106, "grad_norm": 4.962123870849609, "learning_rate": 8.260716788037096e-05, "loss": 2.2327316284179686, "memory(GiB)": 72.85, "step": 31975, "token_acc": 0.5136186770428015, "train_speed(iter/s)": 0.670704 }, { "epoch": 1.370121245876355, "grad_norm": 5.531832218170166, "learning_rate": 8.260206577324161e-05, "loss": 2.2898208618164064, "memory(GiB)": 72.85, "step": 31980, "token_acc": 0.4892086330935252, "train_speed(iter/s)": 0.670702 }, { "epoch": 1.3703354612056038, "grad_norm": 4.282350063323975, "learning_rate": 8.259696307549523e-05, "loss": 2.238267517089844, "memory(GiB)": 72.85, "step": 31985, "token_acc": 0.49800796812749004, "train_speed(iter/s)": 0.670701 }, { "epoch": 1.3705496765348528, "grad_norm": 4.487701892852783, "learning_rate": 8.25918597872243e-05, "loss": 2.3418300628662108, "memory(GiB)": 72.85, "step": 31990, "token_acc": 0.479020979020979, "train_speed(iter/s)": 0.670719 }, { "epoch": 1.3707638918641019, "grad_norm": 4.227960109710693, "learning_rate": 8.258675590852125e-05, "loss": 2.1556888580322267, "memory(GiB)": 72.85, "step": 31995, "token_acc": 0.5323741007194245, "train_speed(iter/s)": 0.670724 }, { "epoch": 1.3709781071933507, "grad_norm": 4.482341766357422, "learning_rate": 8.258165143947855e-05, "loss": 2.2390745162963865, "memory(GiB)": 72.85, "step": 32000, "token_acc": 0.5528455284552846, "train_speed(iter/s)": 0.670706 }, { "epoch": 1.3709781071933507, "eval_loss": 2.0070526599884033, "eval_runtime": 14.2318, "eval_samples_per_second": 7.027, "eval_steps_per_second": 7.027, "eval_token_acc": 0.5094086021505376, "step": 32000 }, { "epoch": 1.3711923225225997, "grad_norm": 4.465327739715576, "learning_rate": 8.257654638018866e-05, "loss": 2.2294986724853514, "memory(GiB)": 72.85, "step": 32005, "token_acc": 0.5096899224806202, "train_speed(iter/s)": 0.67047 }, { "epoch": 1.3714065378518487, "grad_norm": 4.249327182769775, "learning_rate": 8.257144073074409e-05, "loss": 2.510961151123047, "memory(GiB)": 72.85, "step": 32010, "token_acc": 0.43804034582132567, "train_speed(iter/s)": 0.670474 }, { "epoch": 1.3716207531810976, "grad_norm": 4.061126232147217, "learning_rate": 8.256633449123732e-05, "loss": 2.3427724838256836, "memory(GiB)": 72.85, "step": 32015, "token_acc": 0.5322033898305085, "train_speed(iter/s)": 0.670484 }, { "epoch": 1.3718349685103466, "grad_norm": 3.8735780715942383, "learning_rate": 8.256122766176083e-05, "loss": 2.232590675354004, "memory(GiB)": 72.85, "step": 32020, "token_acc": 0.49074074074074076, "train_speed(iter/s)": 0.670461 }, { "epoch": 1.3720491838395956, "grad_norm": 5.114034175872803, "learning_rate": 8.255612024240716e-05, "loss": 2.423280143737793, "memory(GiB)": 72.85, "step": 32025, "token_acc": 0.4721311475409836, "train_speed(iter/s)": 0.670449 }, { "epoch": 1.3722633991688444, "grad_norm": 5.3832807540893555, "learning_rate": 8.255101223326884e-05, "loss": 2.310138130187988, "memory(GiB)": 72.85, "step": 32030, "token_acc": 0.4785276073619632, "train_speed(iter/s)": 0.670468 }, { "epoch": 1.3724776144980935, "grad_norm": 4.304898262023926, "learning_rate": 8.25459036344384e-05, "loss": 2.157273292541504, "memory(GiB)": 72.85, "step": 32035, "token_acc": 0.49433962264150944, "train_speed(iter/s)": 0.670453 }, { "epoch": 1.3726918298273425, "grad_norm": 3.4540958404541016, "learning_rate": 8.254079444600837e-05, "loss": 2.150146484375, "memory(GiB)": 72.85, "step": 32040, "token_acc": 0.5063694267515924, "train_speed(iter/s)": 0.670462 }, { "epoch": 1.3729060451565913, "grad_norm": 3.6178932189941406, "learning_rate": 8.253568466807135e-05, "loss": 2.4395820617675783, "memory(GiB)": 72.85, "step": 32045, "token_acc": 0.4803921568627451, "train_speed(iter/s)": 0.670482 }, { "epoch": 1.3731202604858403, "grad_norm": 4.534244537353516, "learning_rate": 8.253057430071987e-05, "loss": 2.342867851257324, "memory(GiB)": 72.85, "step": 32050, "token_acc": 0.486284289276808, "train_speed(iter/s)": 0.670472 }, { "epoch": 1.3733344758150894, "grad_norm": 5.672849655151367, "learning_rate": 8.252546334404652e-05, "loss": 2.2042314529418947, "memory(GiB)": 72.85, "step": 32055, "token_acc": 0.539622641509434, "train_speed(iter/s)": 0.670475 }, { "epoch": 1.3735486911443382, "grad_norm": 3.812840461730957, "learning_rate": 8.252035179814388e-05, "loss": 2.0858755111694336, "memory(GiB)": 72.85, "step": 32060, "token_acc": 0.5357142857142857, "train_speed(iter/s)": 0.67045 }, { "epoch": 1.3737629064735872, "grad_norm": 5.08303689956665, "learning_rate": 8.251523966310457e-05, "loss": 2.520725631713867, "memory(GiB)": 72.85, "step": 32065, "token_acc": 0.5109489051094891, "train_speed(iter/s)": 0.670454 }, { "epoch": 1.3739771218028363, "grad_norm": 4.029460906982422, "learning_rate": 8.251012693902118e-05, "loss": 2.144116973876953, "memory(GiB)": 72.85, "step": 32070, "token_acc": 0.5469255663430421, "train_speed(iter/s)": 0.670451 }, { "epoch": 1.374191337132085, "grad_norm": 4.27755880355835, "learning_rate": 8.250501362598635e-05, "loss": 2.4358057022094726, "memory(GiB)": 72.85, "step": 32075, "token_acc": 0.45695364238410596, "train_speed(iter/s)": 0.670439 }, { "epoch": 1.3744055524613341, "grad_norm": 3.371558904647827, "learning_rate": 8.249989972409271e-05, "loss": 2.645787239074707, "memory(GiB)": 72.85, "step": 32080, "token_acc": 0.5035460992907801, "train_speed(iter/s)": 0.67044 }, { "epoch": 1.3746197677905831, "grad_norm": 4.313518524169922, "learning_rate": 8.249478523343291e-05, "loss": 2.667113494873047, "memory(GiB)": 72.85, "step": 32085, "token_acc": 0.4697508896797153, "train_speed(iter/s)": 0.670441 }, { "epoch": 1.374833983119832, "grad_norm": 4.181258678436279, "learning_rate": 8.248967015409956e-05, "loss": 2.195412254333496, "memory(GiB)": 72.85, "step": 32090, "token_acc": 0.5434782608695652, "train_speed(iter/s)": 0.670451 }, { "epoch": 1.375048198449081, "grad_norm": 4.522584915161133, "learning_rate": 8.248455448618538e-05, "loss": 2.089267921447754, "memory(GiB)": 72.85, "step": 32095, "token_acc": 0.5433962264150943, "train_speed(iter/s)": 0.670451 }, { "epoch": 1.37526241377833, "grad_norm": 3.4430291652679443, "learning_rate": 8.247943822978302e-05, "loss": 2.1678356170654296, "memory(GiB)": 72.85, "step": 32100, "token_acc": 0.535483870967742, "train_speed(iter/s)": 0.670476 }, { "epoch": 1.3754766291075788, "grad_norm": 5.114493370056152, "learning_rate": 8.247432138498518e-05, "loss": 2.280723762512207, "memory(GiB)": 72.85, "step": 32105, "token_acc": 0.5202952029520295, "train_speed(iter/s)": 0.670467 }, { "epoch": 1.3756908444368279, "grad_norm": 4.678249359130859, "learning_rate": 8.246920395188453e-05, "loss": 2.528839874267578, "memory(GiB)": 72.85, "step": 32110, "token_acc": 0.4497991967871486, "train_speed(iter/s)": 0.670475 }, { "epoch": 1.375905059766077, "grad_norm": 5.02876091003418, "learning_rate": 8.24640859305738e-05, "loss": 2.1712482452392576, "memory(GiB)": 72.85, "step": 32115, "token_acc": 0.49825783972125437, "train_speed(iter/s)": 0.670452 }, { "epoch": 1.3761192750953257, "grad_norm": 3.048959493637085, "learning_rate": 8.245896732114569e-05, "loss": 2.313570976257324, "memory(GiB)": 72.85, "step": 32120, "token_acc": 0.5290322580645161, "train_speed(iter/s)": 0.670463 }, { "epoch": 1.3763334904245748, "grad_norm": 4.328846454620361, "learning_rate": 8.245384812369294e-05, "loss": 2.204569625854492, "memory(GiB)": 72.85, "step": 32125, "token_acc": 0.5209790209790209, "train_speed(iter/s)": 0.670473 }, { "epoch": 1.3765477057538238, "grad_norm": 3.422299385070801, "learning_rate": 8.24487283383083e-05, "loss": 2.5420202255249023, "memory(GiB)": 72.85, "step": 32130, "token_acc": 0.43174603174603177, "train_speed(iter/s)": 0.670462 }, { "epoch": 1.3767619210830726, "grad_norm": 3.719125747680664, "learning_rate": 8.244360796508448e-05, "loss": 2.434687042236328, "memory(GiB)": 72.85, "step": 32135, "token_acc": 0.49859154929577465, "train_speed(iter/s)": 0.67047 }, { "epoch": 1.3769761364123216, "grad_norm": 4.948770046234131, "learning_rate": 8.24384870041143e-05, "loss": 2.170930099487305, "memory(GiB)": 72.85, "step": 32140, "token_acc": 0.5125786163522013, "train_speed(iter/s)": 0.670446 }, { "epoch": 1.3771903517415707, "grad_norm": 4.8467183113098145, "learning_rate": 8.243336545549047e-05, "loss": 2.3663326263427735, "memory(GiB)": 72.85, "step": 32145, "token_acc": 0.47315436241610737, "train_speed(iter/s)": 0.670435 }, { "epoch": 1.3774045670708195, "grad_norm": 5.476717948913574, "learning_rate": 8.24282433193058e-05, "loss": 2.530979537963867, "memory(GiB)": 72.85, "step": 32150, "token_acc": 0.48398576512455516, "train_speed(iter/s)": 0.670444 }, { "epoch": 1.3776187824000685, "grad_norm": 7.916449069976807, "learning_rate": 8.242312059565311e-05, "loss": 2.2504438400268554, "memory(GiB)": 72.85, "step": 32155, "token_acc": 0.531496062992126, "train_speed(iter/s)": 0.670454 }, { "epoch": 1.3778329977293176, "grad_norm": 3.930546998977661, "learning_rate": 8.241799728462516e-05, "loss": 2.4154485702514648, "memory(GiB)": 72.85, "step": 32160, "token_acc": 0.5277777777777778, "train_speed(iter/s)": 0.670465 }, { "epoch": 1.3780472130585664, "grad_norm": 3.9903061389923096, "learning_rate": 8.241287338631478e-05, "loss": 2.127430725097656, "memory(GiB)": 72.85, "step": 32165, "token_acc": 0.5277777777777778, "train_speed(iter/s)": 0.670478 }, { "epoch": 1.3782614283878154, "grad_norm": 3.963423728942871, "learning_rate": 8.240774890081479e-05, "loss": 2.16961727142334, "memory(GiB)": 72.85, "step": 32170, "token_acc": 0.49295774647887325, "train_speed(iter/s)": 0.670478 }, { "epoch": 1.3784756437170644, "grad_norm": 3.7348265647888184, "learning_rate": 8.240262382821802e-05, "loss": 2.1413373947143555, "memory(GiB)": 72.85, "step": 32175, "token_acc": 0.55, "train_speed(iter/s)": 0.670495 }, { "epoch": 1.3786898590463132, "grad_norm": 4.380642890930176, "learning_rate": 8.239749816861732e-05, "loss": 2.424448585510254, "memory(GiB)": 72.85, "step": 32180, "token_acc": 0.4788273615635179, "train_speed(iter/s)": 0.670502 }, { "epoch": 1.3789040743755623, "grad_norm": 3.604891061782837, "learning_rate": 8.239237192210557e-05, "loss": 2.2447784423828123, "memory(GiB)": 72.85, "step": 32185, "token_acc": 0.5149700598802395, "train_speed(iter/s)": 0.670521 }, { "epoch": 1.3791182897048113, "grad_norm": 3.8494303226470947, "learning_rate": 8.238724508877562e-05, "loss": 2.4252227783203124, "memory(GiB)": 72.85, "step": 32190, "token_acc": 0.4913494809688581, "train_speed(iter/s)": 0.670471 }, { "epoch": 1.3793325050340601, "grad_norm": 3.7589187622070312, "learning_rate": 8.238211766872033e-05, "loss": 2.5701828002929688, "memory(GiB)": 72.85, "step": 32195, "token_acc": 0.49333333333333335, "train_speed(iter/s)": 0.670482 }, { "epoch": 1.3795467203633092, "grad_norm": 4.866722106933594, "learning_rate": 8.23769896620326e-05, "loss": 2.55840950012207, "memory(GiB)": 72.85, "step": 32200, "token_acc": 0.46853146853146854, "train_speed(iter/s)": 0.670485 }, { "epoch": 1.3797609356925582, "grad_norm": 4.773185729980469, "learning_rate": 8.237186106880532e-05, "loss": 2.296742630004883, "memory(GiB)": 72.85, "step": 32205, "token_acc": 0.4684014869888476, "train_speed(iter/s)": 0.670501 }, { "epoch": 1.379975151021807, "grad_norm": 4.261193752288818, "learning_rate": 8.236673188913142e-05, "loss": 2.541493797302246, "memory(GiB)": 72.85, "step": 32210, "token_acc": 0.49666666666666665, "train_speed(iter/s)": 0.670497 }, { "epoch": 1.380189366351056, "grad_norm": 3.6997387409210205, "learning_rate": 8.236160212310382e-05, "loss": 2.0588214874267576, "memory(GiB)": 72.85, "step": 32215, "token_acc": 0.5187713310580204, "train_speed(iter/s)": 0.670504 }, { "epoch": 1.380403581680305, "grad_norm": 4.933006763458252, "learning_rate": 8.235647177081543e-05, "loss": 2.3395532608032226, "memory(GiB)": 72.85, "step": 32220, "token_acc": 0.51985559566787, "train_speed(iter/s)": 0.670506 }, { "epoch": 1.380617797009554, "grad_norm": 6.061836242675781, "learning_rate": 8.23513408323592e-05, "loss": 2.442374420166016, "memory(GiB)": 72.85, "step": 32225, "token_acc": 0.5104166666666666, "train_speed(iter/s)": 0.670531 }, { "epoch": 1.380832012338803, "grad_norm": 3.9388740062713623, "learning_rate": 8.234620930782808e-05, "loss": 2.497257614135742, "memory(GiB)": 72.85, "step": 32230, "token_acc": 0.4585987261146497, "train_speed(iter/s)": 0.670533 }, { "epoch": 1.381046227668052, "grad_norm": 6.799793720245361, "learning_rate": 8.234107719731506e-05, "loss": 2.6731048583984376, "memory(GiB)": 72.85, "step": 32235, "token_acc": 0.43037974683544306, "train_speed(iter/s)": 0.670522 }, { "epoch": 1.3812604429973008, "grad_norm": 4.212690353393555, "learning_rate": 8.233594450091306e-05, "loss": 2.7791374206542967, "memory(GiB)": 72.85, "step": 32240, "token_acc": 0.4370629370629371, "train_speed(iter/s)": 0.670541 }, { "epoch": 1.3814746583265498, "grad_norm": 3.699436902999878, "learning_rate": 8.233081121871509e-05, "loss": 2.443575859069824, "memory(GiB)": 72.85, "step": 32245, "token_acc": 0.49615384615384617, "train_speed(iter/s)": 0.670542 }, { "epoch": 1.3816888736557988, "grad_norm": 3.975080966949463, "learning_rate": 8.232567735081416e-05, "loss": 2.398280715942383, "memory(GiB)": 72.85, "step": 32250, "token_acc": 0.48986486486486486, "train_speed(iter/s)": 0.670531 }, { "epoch": 1.3819030889850477, "grad_norm": 5.534165859222412, "learning_rate": 8.232054289730326e-05, "loss": 2.447195053100586, "memory(GiB)": 72.85, "step": 32255, "token_acc": 0.47191011235955055, "train_speed(iter/s)": 0.67055 }, { "epoch": 1.3821173043142967, "grad_norm": 3.8416173458099365, "learning_rate": 8.23154078582754e-05, "loss": 2.5333621978759764, "memory(GiB)": 72.85, "step": 32260, "token_acc": 0.48161764705882354, "train_speed(iter/s)": 0.670581 }, { "epoch": 1.3823315196435457, "grad_norm": 4.773869037628174, "learning_rate": 8.231027223382362e-05, "loss": 2.1755290985107423, "memory(GiB)": 72.85, "step": 32265, "token_acc": 0.5348837209302325, "train_speed(iter/s)": 0.670595 }, { "epoch": 1.3825457349727945, "grad_norm": 4.190746307373047, "learning_rate": 8.230513602404094e-05, "loss": 2.2613025665283204, "memory(GiB)": 72.85, "step": 32270, "token_acc": 0.4763779527559055, "train_speed(iter/s)": 0.670612 }, { "epoch": 1.3827599503020436, "grad_norm": 4.336389064788818, "learning_rate": 8.229999922902043e-05, "loss": 2.365435791015625, "memory(GiB)": 72.85, "step": 32275, "token_acc": 0.5166051660516605, "train_speed(iter/s)": 0.670611 }, { "epoch": 1.3829741656312926, "grad_norm": 5.282728672027588, "learning_rate": 8.229486184885511e-05, "loss": 2.6085933685302733, "memory(GiB)": 72.85, "step": 32280, "token_acc": 0.4646840148698885, "train_speed(iter/s)": 0.670612 }, { "epoch": 1.3831883809605414, "grad_norm": 4.288825035095215, "learning_rate": 8.228972388363809e-05, "loss": 2.3356494903564453, "memory(GiB)": 72.85, "step": 32285, "token_acc": 0.5302013422818792, "train_speed(iter/s)": 0.670623 }, { "epoch": 1.3834025962897905, "grad_norm": 4.522719860076904, "learning_rate": 8.228458533346243e-05, "loss": 2.1370027542114256, "memory(GiB)": 72.85, "step": 32290, "token_acc": 0.5206896551724138, "train_speed(iter/s)": 0.670619 }, { "epoch": 1.3836168116190395, "grad_norm": 5.1876068115234375, "learning_rate": 8.227944619842122e-05, "loss": 2.3602890014648437, "memory(GiB)": 72.85, "step": 32295, "token_acc": 0.5103448275862069, "train_speed(iter/s)": 0.670621 }, { "epoch": 1.3838310269482883, "grad_norm": 4.164478778839111, "learning_rate": 8.227430647860757e-05, "loss": 2.4091501235961914, "memory(GiB)": 72.85, "step": 32300, "token_acc": 0.47474747474747475, "train_speed(iter/s)": 0.670636 }, { "epoch": 1.3840452422775373, "grad_norm": 3.5779290199279785, "learning_rate": 8.226916617411457e-05, "loss": 2.262989044189453, "memory(GiB)": 72.85, "step": 32305, "token_acc": 0.5241157556270096, "train_speed(iter/s)": 0.670659 }, { "epoch": 1.3842594576067864, "grad_norm": 3.2540223598480225, "learning_rate": 8.226402528503536e-05, "loss": 1.9567455291748046, "memory(GiB)": 72.85, "step": 32310, "token_acc": 0.5606694560669456, "train_speed(iter/s)": 0.670654 }, { "epoch": 1.3844736729360352, "grad_norm": 4.149267196655273, "learning_rate": 8.225888381146306e-05, "loss": 2.2417139053344726, "memory(GiB)": 72.85, "step": 32315, "token_acc": 0.5122699386503068, "train_speed(iter/s)": 0.670617 }, { "epoch": 1.3846878882652842, "grad_norm": 4.7242512702941895, "learning_rate": 8.225374175349082e-05, "loss": 2.2318212509155275, "memory(GiB)": 72.85, "step": 32320, "token_acc": 0.5433333333333333, "train_speed(iter/s)": 0.670618 }, { "epoch": 1.3849021035945333, "grad_norm": 5.65032958984375, "learning_rate": 8.224859911121179e-05, "loss": 2.5118473052978514, "memory(GiB)": 72.85, "step": 32325, "token_acc": 0.46757679180887374, "train_speed(iter/s)": 0.670633 }, { "epoch": 1.385116318923782, "grad_norm": 4.0879340171813965, "learning_rate": 8.224345588471914e-05, "loss": 2.605678176879883, "memory(GiB)": 72.85, "step": 32330, "token_acc": 0.4470198675496689, "train_speed(iter/s)": 0.670622 }, { "epoch": 1.385330534253031, "grad_norm": 5.342716217041016, "learning_rate": 8.223831207410604e-05, "loss": 2.2036411285400392, "memory(GiB)": 72.85, "step": 32335, "token_acc": 0.48945147679324896, "train_speed(iter/s)": 0.670613 }, { "epoch": 1.3855447495822801, "grad_norm": 4.939912796020508, "learning_rate": 8.223316767946567e-05, "loss": 2.4933956146240233, "memory(GiB)": 72.85, "step": 32340, "token_acc": 0.43214285714285716, "train_speed(iter/s)": 0.670642 }, { "epoch": 1.385758964911529, "grad_norm": 4.141592025756836, "learning_rate": 8.222802270089126e-05, "loss": 2.434149169921875, "memory(GiB)": 72.85, "step": 32345, "token_acc": 0.48355263157894735, "train_speed(iter/s)": 0.67063 }, { "epoch": 1.385973180240778, "grad_norm": 4.2349934577941895, "learning_rate": 8.222287713847593e-05, "loss": 2.176634979248047, "memory(GiB)": 72.85, "step": 32350, "token_acc": 0.4786885245901639, "train_speed(iter/s)": 0.670634 }, { "epoch": 1.386187395570027, "grad_norm": 4.808685302734375, "learning_rate": 8.221773099231299e-05, "loss": 2.6001565933227537, "memory(GiB)": 72.85, "step": 32355, "token_acc": 0.49363057324840764, "train_speed(iter/s)": 0.670611 }, { "epoch": 1.3864016108992758, "grad_norm": 3.8443586826324463, "learning_rate": 8.221258426249563e-05, "loss": 2.4009790420532227, "memory(GiB)": 72.85, "step": 32360, "token_acc": 0.4854771784232365, "train_speed(iter/s)": 0.670603 }, { "epoch": 1.3866158262285249, "grad_norm": 3.532878875732422, "learning_rate": 8.220743694911707e-05, "loss": 2.2124061584472656, "memory(GiB)": 72.85, "step": 32365, "token_acc": 0.5039370078740157, "train_speed(iter/s)": 0.670597 }, { "epoch": 1.386830041557774, "grad_norm": 4.440154552459717, "learning_rate": 8.220228905227061e-05, "loss": 2.3917266845703127, "memory(GiB)": 72.85, "step": 32370, "token_acc": 0.49642857142857144, "train_speed(iter/s)": 0.670608 }, { "epoch": 1.3870442568870227, "grad_norm": 5.63718318939209, "learning_rate": 8.219714057204944e-05, "loss": 2.5435672760009767, "memory(GiB)": 72.85, "step": 32375, "token_acc": 0.5062111801242236, "train_speed(iter/s)": 0.670605 }, { "epoch": 1.3872584722162717, "grad_norm": 4.6255974769592285, "learning_rate": 8.219199150854688e-05, "loss": 2.0352046966552733, "memory(GiB)": 72.85, "step": 32380, "token_acc": 0.5697211155378487, "train_speed(iter/s)": 0.67061 }, { "epoch": 1.3874726875455208, "grad_norm": 4.102219104766846, "learning_rate": 8.218684186185622e-05, "loss": 2.2615570068359374, "memory(GiB)": 72.85, "step": 32385, "token_acc": 0.5033333333333333, "train_speed(iter/s)": 0.670631 }, { "epoch": 1.3876869028747696, "grad_norm": 5.239809513092041, "learning_rate": 8.218169163207068e-05, "loss": 2.409151077270508, "memory(GiB)": 72.85, "step": 32390, "token_acc": 0.51931330472103, "train_speed(iter/s)": 0.670655 }, { "epoch": 1.3879011182040186, "grad_norm": 3.3418407440185547, "learning_rate": 8.217654081928364e-05, "loss": 2.5625328063964843, "memory(GiB)": 72.85, "step": 32395, "token_acc": 0.44, "train_speed(iter/s)": 0.670661 }, { "epoch": 1.3881153335332677, "grad_norm": 12.004657745361328, "learning_rate": 8.217138942358836e-05, "loss": 2.243642234802246, "memory(GiB)": 72.85, "step": 32400, "token_acc": 0.5460526315789473, "train_speed(iter/s)": 0.670644 }, { "epoch": 1.3883295488625165, "grad_norm": 4.958562850952148, "learning_rate": 8.216623744507819e-05, "loss": 2.445138168334961, "memory(GiB)": 72.85, "step": 32405, "token_acc": 0.4838709677419355, "train_speed(iter/s)": 0.670674 }, { "epoch": 1.3885437641917655, "grad_norm": 4.561666488647461, "learning_rate": 8.216108488384645e-05, "loss": 2.233106803894043, "memory(GiB)": 72.85, "step": 32410, "token_acc": 0.48534201954397393, "train_speed(iter/s)": 0.670686 }, { "epoch": 1.3887579795210145, "grad_norm": 4.471095561981201, "learning_rate": 8.21559317399865e-05, "loss": 2.5435237884521484, "memory(GiB)": 72.85, "step": 32415, "token_acc": 0.4723756906077348, "train_speed(iter/s)": 0.670711 }, { "epoch": 1.3889721948502634, "grad_norm": 5.3444390296936035, "learning_rate": 8.215077801359168e-05, "loss": 2.687479782104492, "memory(GiB)": 72.85, "step": 32420, "token_acc": 0.4573170731707317, "train_speed(iter/s)": 0.670723 }, { "epoch": 1.3891864101795124, "grad_norm": 4.750268936157227, "learning_rate": 8.214562370475533e-05, "loss": 2.4383691787719726, "memory(GiB)": 72.85, "step": 32425, "token_acc": 0.4381625441696113, "train_speed(iter/s)": 0.670747 }, { "epoch": 1.3894006255087614, "grad_norm": 4.54951810836792, "learning_rate": 8.214046881357087e-05, "loss": 1.9375518798828124, "memory(GiB)": 72.85, "step": 32430, "token_acc": 0.5642857142857143, "train_speed(iter/s)": 0.670755 }, { "epoch": 1.3896148408380102, "grad_norm": 4.657774448394775, "learning_rate": 8.213531334013167e-05, "loss": 2.128046417236328, "memory(GiB)": 72.85, "step": 32435, "token_acc": 0.5190476190476191, "train_speed(iter/s)": 0.670761 }, { "epoch": 1.3898290561672593, "grad_norm": 3.851917028427124, "learning_rate": 8.213015728453113e-05, "loss": 2.337056541442871, "memory(GiB)": 72.85, "step": 32440, "token_acc": 0.48905109489051096, "train_speed(iter/s)": 0.670764 }, { "epoch": 1.3900432714965083, "grad_norm": 3.7004127502441406, "learning_rate": 8.212500064686264e-05, "loss": 2.046993637084961, "memory(GiB)": 72.85, "step": 32445, "token_acc": 0.5622489959839357, "train_speed(iter/s)": 0.670771 }, { "epoch": 1.3902574868257571, "grad_norm": 5.009748935699463, "learning_rate": 8.211984342721963e-05, "loss": 2.643693733215332, "memory(GiB)": 72.85, "step": 32450, "token_acc": 0.4610169491525424, "train_speed(iter/s)": 0.670772 }, { "epoch": 1.3904717021550062, "grad_norm": 3.2614376544952393, "learning_rate": 8.211468562569553e-05, "loss": 2.244522476196289, "memory(GiB)": 72.85, "step": 32455, "token_acc": 0.525, "train_speed(iter/s)": 0.670779 }, { "epoch": 1.3906859174842552, "grad_norm": 3.9816794395446777, "learning_rate": 8.210952724238377e-05, "loss": 2.637177276611328, "memory(GiB)": 72.85, "step": 32460, "token_acc": 0.4301369863013699, "train_speed(iter/s)": 0.67077 }, { "epoch": 1.390900132813504, "grad_norm": 3.8606433868408203, "learning_rate": 8.21043682773778e-05, "loss": 2.232489013671875, "memory(GiB)": 72.85, "step": 32465, "token_acc": 0.47719298245614034, "train_speed(iter/s)": 0.670775 }, { "epoch": 1.391114348142753, "grad_norm": 4.0110650062561035, "learning_rate": 8.209920873077109e-05, "loss": 2.030526351928711, "memory(GiB)": 72.85, "step": 32470, "token_acc": 0.5138339920948617, "train_speed(iter/s)": 0.670783 }, { "epoch": 1.391328563472002, "grad_norm": 4.880606651306152, "learning_rate": 8.209404860265709e-05, "loss": 2.2086017608642576, "memory(GiB)": 72.85, "step": 32475, "token_acc": 0.5311203319502075, "train_speed(iter/s)": 0.670796 }, { "epoch": 1.3915427788012509, "grad_norm": 4.979836463928223, "learning_rate": 8.208888789312929e-05, "loss": 2.2492372512817385, "memory(GiB)": 72.85, "step": 32480, "token_acc": 0.49377593360995853, "train_speed(iter/s)": 0.670818 }, { "epoch": 1.3917569941305, "grad_norm": 5.238133907318115, "learning_rate": 8.20837266022812e-05, "loss": 2.1436996459960938, "memory(GiB)": 72.85, "step": 32485, "token_acc": 0.5290102389078498, "train_speed(iter/s)": 0.670822 }, { "epoch": 1.391971209459749, "grad_norm": 4.4695963859558105, "learning_rate": 8.207856473020629e-05, "loss": 2.6238088607788086, "memory(GiB)": 72.85, "step": 32490, "token_acc": 0.4362934362934363, "train_speed(iter/s)": 0.67079 }, { "epoch": 1.392185424788998, "grad_norm": 4.026541233062744, "learning_rate": 8.20734022769981e-05, "loss": 2.267851638793945, "memory(GiB)": 72.85, "step": 32495, "token_acc": 0.5130718954248366, "train_speed(iter/s)": 0.670758 }, { "epoch": 1.3923996401182468, "grad_norm": 4.017603397369385, "learning_rate": 8.206823924275015e-05, "loss": 2.421858215332031, "memory(GiB)": 72.85, "step": 32500, "token_acc": 0.5153374233128835, "train_speed(iter/s)": 0.670742 }, { "epoch": 1.3923996401182468, "eval_loss": 2.3525805473327637, "eval_runtime": 15.091, "eval_samples_per_second": 6.626, "eval_steps_per_second": 6.626, "eval_token_acc": 0.49809885931558934, "step": 32500 }, { "epoch": 1.3926138554474958, "grad_norm": 3.7025949954986572, "learning_rate": 8.206307562755592e-05, "loss": 2.383790397644043, "memory(GiB)": 72.85, "step": 32505, "token_acc": 0.49402023919043236, "train_speed(iter/s)": 0.670499 }, { "epoch": 1.3928280707767449, "grad_norm": 6.160221099853516, "learning_rate": 8.205791143150905e-05, "loss": 2.3339752197265624, "memory(GiB)": 72.85, "step": 32510, "token_acc": 0.5064935064935064, "train_speed(iter/s)": 0.670527 }, { "epoch": 1.3930422861059937, "grad_norm": 4.7086615562438965, "learning_rate": 8.205274665470301e-05, "loss": 2.380982208251953, "memory(GiB)": 72.85, "step": 32515, "token_acc": 0.4693140794223827, "train_speed(iter/s)": 0.670504 }, { "epoch": 1.3932565014352427, "grad_norm": 3.738332748413086, "learning_rate": 8.204758129723142e-05, "loss": 2.45247859954834, "memory(GiB)": 72.85, "step": 32520, "token_acc": 0.4884393063583815, "train_speed(iter/s)": 0.6705 }, { "epoch": 1.3934707167644917, "grad_norm": 5.558591842651367, "learning_rate": 8.204241535918782e-05, "loss": 2.3738059997558594, "memory(GiB)": 72.85, "step": 32525, "token_acc": 0.5037878787878788, "train_speed(iter/s)": 0.670527 }, { "epoch": 1.3936849320937406, "grad_norm": 3.9193782806396484, "learning_rate": 8.20372488406658e-05, "loss": 2.3221588134765625, "memory(GiB)": 72.85, "step": 32530, "token_acc": 0.4934210526315789, "train_speed(iter/s)": 0.670515 }, { "epoch": 1.3938991474229896, "grad_norm": 4.470978736877441, "learning_rate": 8.203208174175897e-05, "loss": 2.6357887268066404, "memory(GiB)": 72.85, "step": 32535, "token_acc": 0.4427710843373494, "train_speed(iter/s)": 0.670486 }, { "epoch": 1.3941133627522386, "grad_norm": 4.211836338043213, "learning_rate": 8.202691406256092e-05, "loss": 2.3013978958129884, "memory(GiB)": 72.85, "step": 32540, "token_acc": 0.4981132075471698, "train_speed(iter/s)": 0.670502 }, { "epoch": 1.3943275780814874, "grad_norm": 3.2450742721557617, "learning_rate": 8.20217458031653e-05, "loss": 2.4708126068115233, "memory(GiB)": 72.85, "step": 32545, "token_acc": 0.49538461538461537, "train_speed(iter/s)": 0.670455 }, { "epoch": 1.3945417934107365, "grad_norm": 4.29420804977417, "learning_rate": 8.20165769636657e-05, "loss": 2.2395416259765626, "memory(GiB)": 72.85, "step": 32550, "token_acc": 0.5212765957446809, "train_speed(iter/s)": 0.670457 }, { "epoch": 1.3947560087399855, "grad_norm": 4.186147689819336, "learning_rate": 8.201140754415577e-05, "loss": 2.0329328536987306, "memory(GiB)": 72.85, "step": 32555, "token_acc": 0.5675675675675675, "train_speed(iter/s)": 0.670454 }, { "epoch": 1.3949702240692343, "grad_norm": 3.910404682159424, "learning_rate": 8.200623754472918e-05, "loss": 2.1743310928344726, "memory(GiB)": 72.85, "step": 32560, "token_acc": 0.5095785440613027, "train_speed(iter/s)": 0.670441 }, { "epoch": 1.3951844393984834, "grad_norm": 3.5205211639404297, "learning_rate": 8.200106696547956e-05, "loss": 2.345709228515625, "memory(GiB)": 72.85, "step": 32565, "token_acc": 0.4950166112956811, "train_speed(iter/s)": 0.670454 }, { "epoch": 1.3953986547277324, "grad_norm": 4.7175612449646, "learning_rate": 8.19958958065006e-05, "loss": 2.2110963821411134, "memory(GiB)": 72.85, "step": 32570, "token_acc": 0.5331010452961672, "train_speed(iter/s)": 0.670446 }, { "epoch": 1.3956128700569812, "grad_norm": 4.660590648651123, "learning_rate": 8.199072406788598e-05, "loss": 2.3682273864746093, "memory(GiB)": 72.85, "step": 32575, "token_acc": 0.4758364312267658, "train_speed(iter/s)": 0.670464 }, { "epoch": 1.3958270853862302, "grad_norm": 3.9689784049987793, "learning_rate": 8.198555174972936e-05, "loss": 2.2421680450439454, "memory(GiB)": 72.85, "step": 32580, "token_acc": 0.5110410094637224, "train_speed(iter/s)": 0.670455 }, { "epoch": 1.3960413007154793, "grad_norm": 4.5779218673706055, "learning_rate": 8.198037885212449e-05, "loss": 2.3798126220703124, "memory(GiB)": 72.85, "step": 32585, "token_acc": 0.5099337748344371, "train_speed(iter/s)": 0.670479 }, { "epoch": 1.396255516044728, "grad_norm": 4.317559242248535, "learning_rate": 8.197520537516504e-05, "loss": 2.608799934387207, "memory(GiB)": 72.85, "step": 32590, "token_acc": 0.46308724832214765, "train_speed(iter/s)": 0.670461 }, { "epoch": 1.3964697313739771, "grad_norm": 5.157623767852783, "learning_rate": 8.197003131894477e-05, "loss": 2.293400192260742, "memory(GiB)": 72.85, "step": 32595, "token_acc": 0.5386996904024768, "train_speed(iter/s)": 0.67046 }, { "epoch": 1.3966839467032262, "grad_norm": 3.5339741706848145, "learning_rate": 8.196485668355736e-05, "loss": 1.9377897262573243, "memory(GiB)": 72.85, "step": 32600, "token_acc": 0.5767790262172284, "train_speed(iter/s)": 0.670478 }, { "epoch": 1.396898162032475, "grad_norm": 4.209023952484131, "learning_rate": 8.19596814690966e-05, "loss": 2.146601104736328, "memory(GiB)": 72.85, "step": 32605, "token_acc": 0.5424354243542435, "train_speed(iter/s)": 0.670481 }, { "epoch": 1.397112377361724, "grad_norm": 4.439004421234131, "learning_rate": 8.195450567565624e-05, "loss": 2.448414611816406, "memory(GiB)": 72.85, "step": 32610, "token_acc": 0.49070631970260226, "train_speed(iter/s)": 0.670498 }, { "epoch": 1.397326592690973, "grad_norm": 3.9346678256988525, "learning_rate": 8.194932930333003e-05, "loss": 2.3718072891235353, "memory(GiB)": 72.85, "step": 32615, "token_acc": 0.5014925373134328, "train_speed(iter/s)": 0.670501 }, { "epoch": 1.3975408080202218, "grad_norm": 4.409459590911865, "learning_rate": 8.194415235221174e-05, "loss": 2.704544258117676, "memory(GiB)": 72.85, "step": 32620, "token_acc": 0.4882943143812709, "train_speed(iter/s)": 0.670516 }, { "epoch": 1.3977550233494709, "grad_norm": 4.2168121337890625, "learning_rate": 8.193897482239517e-05, "loss": 2.377264404296875, "memory(GiB)": 72.85, "step": 32625, "token_acc": 0.4937888198757764, "train_speed(iter/s)": 0.670535 }, { "epoch": 1.39796923867872, "grad_norm": 4.753865718841553, "learning_rate": 8.193379671397411e-05, "loss": 2.1986560821533203, "memory(GiB)": 72.85, "step": 32630, "token_acc": 0.5, "train_speed(iter/s)": 0.670552 }, { "epoch": 1.3981834540079687, "grad_norm": 3.7775840759277344, "learning_rate": 8.192861802704236e-05, "loss": 2.309089469909668, "memory(GiB)": 72.85, "step": 32635, "token_acc": 0.4521452145214521, "train_speed(iter/s)": 0.670564 }, { "epoch": 1.3983976693372178, "grad_norm": 5.957776069641113, "learning_rate": 8.192343876169375e-05, "loss": 2.7392601013183593, "memory(GiB)": 72.85, "step": 32640, "token_acc": 0.41237113402061853, "train_speed(iter/s)": 0.670564 }, { "epoch": 1.3986118846664668, "grad_norm": 3.3313279151916504, "learning_rate": 8.191825891802211e-05, "loss": 2.263278007507324, "memory(GiB)": 72.85, "step": 32645, "token_acc": 0.5310077519379846, "train_speed(iter/s)": 0.670558 }, { "epoch": 1.3988260999957156, "grad_norm": 3.807706117630005, "learning_rate": 8.191307849612124e-05, "loss": 2.5184741973876954, "memory(GiB)": 72.85, "step": 32650, "token_acc": 0.47183098591549294, "train_speed(iter/s)": 0.670592 }, { "epoch": 1.3990403153249646, "grad_norm": 4.172436237335205, "learning_rate": 8.190789749608505e-05, "loss": 2.3592132568359374, "memory(GiB)": 72.85, "step": 32655, "token_acc": 0.5, "train_speed(iter/s)": 0.670613 }, { "epoch": 1.3992545306542137, "grad_norm": 4.637584686279297, "learning_rate": 8.190271591800735e-05, "loss": 2.667847442626953, "memory(GiB)": 72.85, "step": 32660, "token_acc": 0.4855072463768116, "train_speed(iter/s)": 0.670623 }, { "epoch": 1.3994687459834627, "grad_norm": 3.6788957118988037, "learning_rate": 8.189753376198202e-05, "loss": 2.4895647048950194, "memory(GiB)": 72.85, "step": 32665, "token_acc": 0.47038327526132406, "train_speed(iter/s)": 0.670621 }, { "epoch": 1.3996829613127115, "grad_norm": 4.604398727416992, "learning_rate": 8.189235102810293e-05, "loss": 2.426490592956543, "memory(GiB)": 72.85, "step": 32670, "token_acc": 0.4983277591973244, "train_speed(iter/s)": 0.670616 }, { "epoch": 1.3998971766419606, "grad_norm": 3.9829800128936768, "learning_rate": 8.1887167716464e-05, "loss": 2.142173004150391, "memory(GiB)": 72.85, "step": 32675, "token_acc": 0.4664179104477612, "train_speed(iter/s)": 0.670636 }, { "epoch": 1.4001113919712096, "grad_norm": 3.9092726707458496, "learning_rate": 8.188198382715913e-05, "loss": 2.1853099822998048, "memory(GiB)": 72.85, "step": 32680, "token_acc": 0.5236363636363637, "train_speed(iter/s)": 0.67065 }, { "epoch": 1.4003256073004584, "grad_norm": 5.0105133056640625, "learning_rate": 8.187679936028219e-05, "loss": 2.2370046615600585, "memory(GiB)": 72.85, "step": 32685, "token_acc": 0.528957528957529, "train_speed(iter/s)": 0.670634 }, { "epoch": 1.4005398226297074, "grad_norm": 3.862349271774292, "learning_rate": 8.187161431592714e-05, "loss": 2.113368606567383, "memory(GiB)": 72.85, "step": 32690, "token_acc": 0.5313531353135313, "train_speed(iter/s)": 0.670644 }, { "epoch": 1.4007540379589565, "grad_norm": 5.01990270614624, "learning_rate": 8.186642869418789e-05, "loss": 2.1065040588378907, "memory(GiB)": 72.85, "step": 32695, "token_acc": 0.5702479338842975, "train_speed(iter/s)": 0.670668 }, { "epoch": 1.4009682532882053, "grad_norm": 4.065638065338135, "learning_rate": 8.18612424951584e-05, "loss": 2.3982988357543946, "memory(GiB)": 72.85, "step": 32700, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.67068 }, { "epoch": 1.4011824686174543, "grad_norm": 4.058230876922607, "learning_rate": 8.18560557189326e-05, "loss": 2.2643173217773436, "memory(GiB)": 72.85, "step": 32705, "token_acc": 0.5, "train_speed(iter/s)": 0.670692 }, { "epoch": 1.4013966839467034, "grad_norm": 4.176575660705566, "learning_rate": 8.185086836560448e-05, "loss": 2.7103424072265625, "memory(GiB)": 72.85, "step": 32710, "token_acc": 0.465625, "train_speed(iter/s)": 0.67069 }, { "epoch": 1.4016108992759522, "grad_norm": 5.112689018249512, "learning_rate": 8.1845680435268e-05, "loss": 2.5723602294921877, "memory(GiB)": 72.85, "step": 32715, "token_acc": 0.4520123839009288, "train_speed(iter/s)": 0.670693 }, { "epoch": 1.4018251146052012, "grad_norm": 5.334451198577881, "learning_rate": 8.184049192801715e-05, "loss": 2.471982002258301, "memory(GiB)": 72.85, "step": 32720, "token_acc": 0.5197132616487455, "train_speed(iter/s)": 0.67069 }, { "epoch": 1.4020393299344502, "grad_norm": 5.094999313354492, "learning_rate": 8.183530284394589e-05, "loss": 1.9800268173217774, "memory(GiB)": 72.85, "step": 32725, "token_acc": 0.5687022900763359, "train_speed(iter/s)": 0.670701 }, { "epoch": 1.402253545263699, "grad_norm": 3.8305020332336426, "learning_rate": 8.183011318314829e-05, "loss": 2.5310684204101563, "memory(GiB)": 72.85, "step": 32730, "token_acc": 0.5050167224080268, "train_speed(iter/s)": 0.670721 }, { "epoch": 1.402467760592948, "grad_norm": 5.178485870361328, "learning_rate": 8.182492294571831e-05, "loss": 2.28063850402832, "memory(GiB)": 72.85, "step": 32735, "token_acc": 0.5350553505535055, "train_speed(iter/s)": 0.670715 }, { "epoch": 1.4026819759221971, "grad_norm": 4.627052307128906, "learning_rate": 8.181973213175001e-05, "loss": 2.4377580642700196, "memory(GiB)": 72.85, "step": 32740, "token_acc": 0.4626334519572954, "train_speed(iter/s)": 0.670741 }, { "epoch": 1.402896191251446, "grad_norm": 4.07354736328125, "learning_rate": 8.181454074133741e-05, "loss": 2.314627456665039, "memory(GiB)": 72.85, "step": 32745, "token_acc": 0.5445205479452054, "train_speed(iter/s)": 0.670728 }, { "epoch": 1.403110406580695, "grad_norm": 4.239196300506592, "learning_rate": 8.180934877457456e-05, "loss": 2.391201972961426, "memory(GiB)": 72.85, "step": 32750, "token_acc": 0.476038338658147, "train_speed(iter/s)": 0.670722 }, { "epoch": 1.403324621909944, "grad_norm": 3.2947683334350586, "learning_rate": 8.180415623155552e-05, "loss": 2.34985294342041, "memory(GiB)": 72.85, "step": 32755, "token_acc": 0.5135135135135135, "train_speed(iter/s)": 0.670711 }, { "epoch": 1.4035388372391928, "grad_norm": 4.3111701011657715, "learning_rate": 8.179896311237434e-05, "loss": 2.1369983673095705, "memory(GiB)": 72.85, "step": 32760, "token_acc": 0.5287009063444109, "train_speed(iter/s)": 0.67069 }, { "epoch": 1.4037530525684419, "grad_norm": 4.468491077423096, "learning_rate": 8.179376941712513e-05, "loss": 2.2960979461669924, "memory(GiB)": 72.85, "step": 32765, "token_acc": 0.4527687296416938, "train_speed(iter/s)": 0.67069 }, { "epoch": 1.4039672678976909, "grad_norm": 3.8849546909332275, "learning_rate": 8.178857514590196e-05, "loss": 2.4395639419555666, "memory(GiB)": 72.85, "step": 32770, "token_acc": 0.5078864353312302, "train_speed(iter/s)": 0.670707 }, { "epoch": 1.4041814832269397, "grad_norm": 3.7118358612060547, "learning_rate": 8.178338029879893e-05, "loss": 2.560617446899414, "memory(GiB)": 72.85, "step": 32775, "token_acc": 0.49814126394052044, "train_speed(iter/s)": 0.670675 }, { "epoch": 1.4043956985561887, "grad_norm": 4.1074371337890625, "learning_rate": 8.177818487591013e-05, "loss": 2.229989242553711, "memory(GiB)": 72.85, "step": 32780, "token_acc": 0.5127388535031847, "train_speed(iter/s)": 0.670656 }, { "epoch": 1.4046099138854378, "grad_norm": 3.661418914794922, "learning_rate": 8.177298887732974e-05, "loss": 2.3483549118041993, "memory(GiB)": 72.85, "step": 32785, "token_acc": 0.5220588235294118, "train_speed(iter/s)": 0.670666 }, { "epoch": 1.4048241292146866, "grad_norm": 3.8018884658813477, "learning_rate": 8.176779230315183e-05, "loss": 2.380503845214844, "memory(GiB)": 72.85, "step": 32790, "token_acc": 0.4636363636363636, "train_speed(iter/s)": 0.670691 }, { "epoch": 1.4050383445439356, "grad_norm": 4.984127998352051, "learning_rate": 8.176259515347055e-05, "loss": 2.1603473663330077, "memory(GiB)": 72.85, "step": 32795, "token_acc": 0.5296803652968036, "train_speed(iter/s)": 0.670698 }, { "epoch": 1.4052525598731846, "grad_norm": 4.393282890319824, "learning_rate": 8.175739742838007e-05, "loss": 2.2780237197875977, "memory(GiB)": 72.85, "step": 32800, "token_acc": 0.504885993485342, "train_speed(iter/s)": 0.670719 }, { "epoch": 1.4054667752024335, "grad_norm": 4.123854160308838, "learning_rate": 8.175219912797457e-05, "loss": 1.980984878540039, "memory(GiB)": 72.85, "step": 32805, "token_acc": 0.556420233463035, "train_speed(iter/s)": 0.670719 }, { "epoch": 1.4056809905316825, "grad_norm": 4.809103012084961, "learning_rate": 8.174700025234817e-05, "loss": 2.4066699981689452, "memory(GiB)": 72.85, "step": 32810, "token_acc": 0.506578947368421, "train_speed(iter/s)": 0.670709 }, { "epoch": 1.4058952058609315, "grad_norm": 4.411139011383057, "learning_rate": 8.174180080159508e-05, "loss": 2.0203052520751954, "memory(GiB)": 72.85, "step": 32815, "token_acc": 0.5541666666666667, "train_speed(iter/s)": 0.670713 }, { "epoch": 1.4061094211901803, "grad_norm": 4.5102691650390625, "learning_rate": 8.173660077580949e-05, "loss": 2.314217185974121, "memory(GiB)": 72.85, "step": 32820, "token_acc": 0.5051546391752577, "train_speed(iter/s)": 0.670704 }, { "epoch": 1.4063236365194294, "grad_norm": 4.427989959716797, "learning_rate": 8.173140017508562e-05, "loss": 2.2629114151000977, "memory(GiB)": 72.85, "step": 32825, "token_acc": 0.524904214559387, "train_speed(iter/s)": 0.670673 }, { "epoch": 1.4065378518486784, "grad_norm": 5.655019760131836, "learning_rate": 8.172619899951765e-05, "loss": 2.204834747314453, "memory(GiB)": 72.85, "step": 32830, "token_acc": 0.5204460966542751, "train_speed(iter/s)": 0.6707 }, { "epoch": 1.4067520671779272, "grad_norm": 4.9938740730285645, "learning_rate": 8.172099724919984e-05, "loss": 2.38099365234375, "memory(GiB)": 72.85, "step": 32835, "token_acc": 0.49242424242424243, "train_speed(iter/s)": 0.670677 }, { "epoch": 1.4069662825071763, "grad_norm": 4.064129829406738, "learning_rate": 8.17157949242264e-05, "loss": 2.4679847717285157, "memory(GiB)": 72.85, "step": 32840, "token_acc": 0.4896551724137931, "train_speed(iter/s)": 0.670706 }, { "epoch": 1.4071804978364253, "grad_norm": 3.9458365440368652, "learning_rate": 8.171059202469159e-05, "loss": 2.0271724700927733, "memory(GiB)": 72.85, "step": 32845, "token_acc": 0.5563380281690141, "train_speed(iter/s)": 0.670724 }, { "epoch": 1.407394713165674, "grad_norm": 3.5499427318573, "learning_rate": 8.170538855068966e-05, "loss": 2.393756866455078, "memory(GiB)": 72.85, "step": 32850, "token_acc": 0.5061349693251533, "train_speed(iter/s)": 0.670715 }, { "epoch": 1.4076089284949231, "grad_norm": 3.803884983062744, "learning_rate": 8.170018450231487e-05, "loss": 2.6257911682128907, "memory(GiB)": 72.85, "step": 32855, "token_acc": 0.45179063360881544, "train_speed(iter/s)": 0.670719 }, { "epoch": 1.4078231438241722, "grad_norm": 4.107553005218506, "learning_rate": 8.169497987966151e-05, "loss": 2.263134002685547, "memory(GiB)": 72.85, "step": 32860, "token_acc": 0.521311475409836, "train_speed(iter/s)": 0.670714 }, { "epoch": 1.408037359153421, "grad_norm": 3.8536152839660645, "learning_rate": 8.168977468282384e-05, "loss": 2.4565845489501954, "memory(GiB)": 72.85, "step": 32865, "token_acc": 0.4983388704318937, "train_speed(iter/s)": 0.670719 }, { "epoch": 1.40825157448267, "grad_norm": 4.795900821685791, "learning_rate": 8.168456891189618e-05, "loss": 2.2292095184326173, "memory(GiB)": 72.85, "step": 32870, "token_acc": 0.5274725274725275, "train_speed(iter/s)": 0.670728 }, { "epoch": 1.408465789811919, "grad_norm": 6.90460729598999, "learning_rate": 8.167936256697284e-05, "loss": 2.338755226135254, "memory(GiB)": 72.85, "step": 32875, "token_acc": 0.49624060150375937, "train_speed(iter/s)": 0.670741 }, { "epoch": 1.4086800051411679, "grad_norm": 3.5367653369903564, "learning_rate": 8.167415564814812e-05, "loss": 2.2411418914794923, "memory(GiB)": 72.85, "step": 32880, "token_acc": 0.543918918918919, "train_speed(iter/s)": 0.670745 }, { "epoch": 1.408894220470417, "grad_norm": 4.117340087890625, "learning_rate": 8.166894815551636e-05, "loss": 2.454463005065918, "memory(GiB)": 72.85, "step": 32885, "token_acc": 0.4553314121037464, "train_speed(iter/s)": 0.67074 }, { "epoch": 1.409108435799666, "grad_norm": 5.258001327514648, "learning_rate": 8.16637400891719e-05, "loss": 2.575408935546875, "memory(GiB)": 72.85, "step": 32890, "token_acc": 0.49382716049382713, "train_speed(iter/s)": 0.670742 }, { "epoch": 1.4093226511289147, "grad_norm": 3.3930318355560303, "learning_rate": 8.165853144920907e-05, "loss": 2.4558155059814455, "memory(GiB)": 72.85, "step": 32895, "token_acc": 0.4913294797687861, "train_speed(iter/s)": 0.670739 }, { "epoch": 1.4095368664581638, "grad_norm": 3.280968427658081, "learning_rate": 8.165332223572226e-05, "loss": 2.4282020568847655, "memory(GiB)": 72.85, "step": 32900, "token_acc": 0.5, "train_speed(iter/s)": 0.670744 }, { "epoch": 1.4097510817874128, "grad_norm": 3.6034557819366455, "learning_rate": 8.164811244880583e-05, "loss": 2.3718042373657227, "memory(GiB)": 72.85, "step": 32905, "token_acc": 0.508, "train_speed(iter/s)": 0.670721 }, { "epoch": 1.4099652971166616, "grad_norm": 4.307796478271484, "learning_rate": 8.164394420646677e-05, "loss": 2.4065998077392576, "memory(GiB)": 72.85, "step": 32910, "token_acc": 0.5084745762711864, "train_speed(iter/s)": 0.670744 }, { "epoch": 1.4101795124459107, "grad_norm": 3.430388927459717, "learning_rate": 8.163873338761486e-05, "loss": 1.993046188354492, "memory(GiB)": 72.85, "step": 32915, "token_acc": 0.5249169435215947, "train_speed(iter/s)": 0.670749 }, { "epoch": 1.4103937277751597, "grad_norm": 5.083575248718262, "learning_rate": 8.16335219955976e-05, "loss": 2.041106414794922, "memory(GiB)": 72.85, "step": 32920, "token_acc": 0.549407114624506, "train_speed(iter/s)": 0.670746 }, { "epoch": 1.4106079431044085, "grad_norm": 4.243595600128174, "learning_rate": 8.162831003050942e-05, "loss": 2.3354873657226562, "memory(GiB)": 72.85, "step": 32925, "token_acc": 0.5, "train_speed(iter/s)": 0.670753 }, { "epoch": 1.4108221584336575, "grad_norm": 4.2741594314575195, "learning_rate": 8.162309749244473e-05, "loss": 2.493063545227051, "memory(GiB)": 72.85, "step": 32930, "token_acc": 0.523972602739726, "train_speed(iter/s)": 0.670759 }, { "epoch": 1.4110363737629066, "grad_norm": 3.8527019023895264, "learning_rate": 8.161788438149797e-05, "loss": 2.0427505493164064, "memory(GiB)": 72.85, "step": 32935, "token_acc": 0.5783132530120482, "train_speed(iter/s)": 0.67077 }, { "epoch": 1.4112505890921554, "grad_norm": 4.3661017417907715, "learning_rate": 8.161267069776357e-05, "loss": 2.1442691802978517, "memory(GiB)": 72.85, "step": 32940, "token_acc": 0.5524193548387096, "train_speed(iter/s)": 0.670764 }, { "epoch": 1.4114648044214044, "grad_norm": 4.927290439605713, "learning_rate": 8.1607456441336e-05, "loss": 2.2398386001586914, "memory(GiB)": 72.85, "step": 32945, "token_acc": 0.5373134328358209, "train_speed(iter/s)": 0.670786 }, { "epoch": 1.4116790197506535, "grad_norm": 5.25898551940918, "learning_rate": 8.160224161230969e-05, "loss": 2.212017822265625, "memory(GiB)": 72.85, "step": 32950, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.670776 }, { "epoch": 1.4118932350799023, "grad_norm": 5.039083480834961, "learning_rate": 8.159702621077911e-05, "loss": 2.5514379501342774, "memory(GiB)": 72.85, "step": 32955, "token_acc": 0.4791666666666667, "train_speed(iter/s)": 0.670767 }, { "epoch": 1.4121074504091513, "grad_norm": 5.313144683837891, "learning_rate": 8.159181023683879e-05, "loss": 2.164800262451172, "memory(GiB)": 72.85, "step": 32960, "token_acc": 0.47058823529411764, "train_speed(iter/s)": 0.670725 }, { "epoch": 1.4123216657384003, "grad_norm": 2.9702887535095215, "learning_rate": 8.158659369058319e-05, "loss": 2.020615005493164, "memory(GiB)": 72.85, "step": 32965, "token_acc": 0.5487804878048781, "train_speed(iter/s)": 0.670713 }, { "epoch": 1.4125358810676492, "grad_norm": 3.4147074222564697, "learning_rate": 8.158137657210681e-05, "loss": 2.221263885498047, "memory(GiB)": 72.85, "step": 32970, "token_acc": 0.5186440677966102, "train_speed(iter/s)": 0.670702 }, { "epoch": 1.4127500963968982, "grad_norm": 4.184993267059326, "learning_rate": 8.157615888150416e-05, "loss": 2.4191131591796875, "memory(GiB)": 72.85, "step": 32975, "token_acc": 0.4751552795031056, "train_speed(iter/s)": 0.670734 }, { "epoch": 1.4129643117261472, "grad_norm": 4.674031734466553, "learning_rate": 8.157094061886979e-05, "loss": 2.637513542175293, "memory(GiB)": 72.85, "step": 32980, "token_acc": 0.4878048780487805, "train_speed(iter/s)": 0.670726 }, { "epoch": 1.413178527055396, "grad_norm": 4.092775821685791, "learning_rate": 8.15657217842982e-05, "loss": 2.241680908203125, "memory(GiB)": 72.85, "step": 32985, "token_acc": 0.5275080906148867, "train_speed(iter/s)": 0.670734 }, { "epoch": 1.413392742384645, "grad_norm": 5.196542739868164, "learning_rate": 8.156050237788395e-05, "loss": 2.351512145996094, "memory(GiB)": 72.85, "step": 32990, "token_acc": 0.5265151515151515, "train_speed(iter/s)": 0.670757 }, { "epoch": 1.413606957713894, "grad_norm": 5.357199192047119, "learning_rate": 8.155528239972158e-05, "loss": 2.458220100402832, "memory(GiB)": 72.85, "step": 32995, "token_acc": 0.4792332268370607, "train_speed(iter/s)": 0.670775 }, { "epoch": 1.413821173043143, "grad_norm": 5.5831780433654785, "learning_rate": 8.15500618499057e-05, "loss": 2.2494075775146483, "memory(GiB)": 72.85, "step": 33000, "token_acc": 0.52, "train_speed(iter/s)": 0.670761 }, { "epoch": 1.413821173043143, "eval_loss": 2.101428508758545, "eval_runtime": 16.2881, "eval_samples_per_second": 6.139, "eval_steps_per_second": 6.139, "eval_token_acc": 0.5084033613445378, "step": 33000 }, { "epoch": 1.414035388372392, "grad_norm": 3.3266468048095703, "learning_rate": 8.154484072853084e-05, "loss": 2.477065658569336, "memory(GiB)": 72.85, "step": 33005, "token_acc": 0.5028735632183908, "train_speed(iter/s)": 0.670476 }, { "epoch": 1.414249603701641, "grad_norm": 3.5258071422576904, "learning_rate": 8.153961903569158e-05, "loss": 2.4470169067382814, "memory(GiB)": 72.85, "step": 33010, "token_acc": 0.459214501510574, "train_speed(iter/s)": 0.670492 }, { "epoch": 1.4144638190308898, "grad_norm": 3.9566593170166016, "learning_rate": 8.153439677148255e-05, "loss": 2.372933006286621, "memory(GiB)": 72.85, "step": 33015, "token_acc": 0.47017543859649125, "train_speed(iter/s)": 0.670494 }, { "epoch": 1.4146780343601388, "grad_norm": 4.316427230834961, "learning_rate": 8.152917393599835e-05, "loss": 2.2679374694824217, "memory(GiB)": 72.85, "step": 33020, "token_acc": 0.5099601593625498, "train_speed(iter/s)": 0.670498 }, { "epoch": 1.4148922496893879, "grad_norm": 3.8066463470458984, "learning_rate": 8.152395052933357e-05, "loss": 2.5198013305664064, "memory(GiB)": 72.85, "step": 33025, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.670515 }, { "epoch": 1.4151064650186367, "grad_norm": 5.925121784210205, "learning_rate": 8.151872655158285e-05, "loss": 2.0996118545532227, "memory(GiB)": 72.85, "step": 33030, "token_acc": 0.5598455598455598, "train_speed(iter/s)": 0.670516 }, { "epoch": 1.4153206803478857, "grad_norm": 4.630978584289551, "learning_rate": 8.151350200284084e-05, "loss": 2.6107772827148437, "memory(GiB)": 72.85, "step": 33035, "token_acc": 0.4980237154150198, "train_speed(iter/s)": 0.670509 }, { "epoch": 1.4155348956771348, "grad_norm": 3.8601627349853516, "learning_rate": 8.150827688320219e-05, "loss": 2.4949718475341798, "memory(GiB)": 72.85, "step": 33040, "token_acc": 0.46645367412140576, "train_speed(iter/s)": 0.670508 }, { "epoch": 1.4157491110063836, "grad_norm": 4.034106731414795, "learning_rate": 8.150305119276155e-05, "loss": 2.396784210205078, "memory(GiB)": 72.85, "step": 33045, "token_acc": 0.44982698961937717, "train_speed(iter/s)": 0.670522 }, { "epoch": 1.4159633263356326, "grad_norm": 3.519824743270874, "learning_rate": 8.149782493161357e-05, "loss": 2.408828926086426, "memory(GiB)": 72.85, "step": 33050, "token_acc": 0.48562300319488816, "train_speed(iter/s)": 0.670504 }, { "epoch": 1.4161775416648816, "grad_norm": 4.821855068206787, "learning_rate": 8.149259809985294e-05, "loss": 2.6580526351928713, "memory(GiB)": 72.85, "step": 33055, "token_acc": 0.4838709677419355, "train_speed(iter/s)": 0.670519 }, { "epoch": 1.4163917569941304, "grad_norm": 3.6175782680511475, "learning_rate": 8.148737069757438e-05, "loss": 2.4430000305175783, "memory(GiB)": 72.85, "step": 33060, "token_acc": 0.5049180327868853, "train_speed(iter/s)": 0.670536 }, { "epoch": 1.4166059723233795, "grad_norm": 4.404756546020508, "learning_rate": 8.148214272487253e-05, "loss": 2.439264678955078, "memory(GiB)": 72.85, "step": 33065, "token_acc": 0.459546925566343, "train_speed(iter/s)": 0.670564 }, { "epoch": 1.4168201876526285, "grad_norm": 3.780949115753174, "learning_rate": 8.147691418184216e-05, "loss": 2.509225845336914, "memory(GiB)": 72.85, "step": 33070, "token_acc": 0.4716981132075472, "train_speed(iter/s)": 0.67058 }, { "epoch": 1.4170344029818773, "grad_norm": 4.005032539367676, "learning_rate": 8.147168506857794e-05, "loss": 2.521044921875, "memory(GiB)": 72.85, "step": 33075, "token_acc": 0.4716312056737589, "train_speed(iter/s)": 0.670586 }, { "epoch": 1.4172486183111264, "grad_norm": 3.8436055183410645, "learning_rate": 8.146645538517463e-05, "loss": 2.4446426391601563, "memory(GiB)": 72.85, "step": 33080, "token_acc": 0.49242424242424243, "train_speed(iter/s)": 0.670591 }, { "epoch": 1.4174628336403754, "grad_norm": 4.01034688949585, "learning_rate": 8.146122513172695e-05, "loss": 2.576471519470215, "memory(GiB)": 72.85, "step": 33085, "token_acc": 0.4525316455696203, "train_speed(iter/s)": 0.670616 }, { "epoch": 1.4176770489696242, "grad_norm": 3.217456340789795, "learning_rate": 8.145599430832968e-05, "loss": 2.4242353439331055, "memory(GiB)": 72.85, "step": 33090, "token_acc": 0.5015015015015015, "train_speed(iter/s)": 0.670623 }, { "epoch": 1.4178912642988732, "grad_norm": 5.843472480773926, "learning_rate": 8.145076291507758e-05, "loss": 2.469099426269531, "memory(GiB)": 72.85, "step": 33095, "token_acc": 0.45396825396825397, "train_speed(iter/s)": 0.670606 }, { "epoch": 1.4181054796281223, "grad_norm": 4.280871868133545, "learning_rate": 8.144553095206537e-05, "loss": 2.4284189224243162, "memory(GiB)": 72.85, "step": 33100, "token_acc": 0.5100671140939598, "train_speed(iter/s)": 0.670616 }, { "epoch": 1.418319694957371, "grad_norm": 3.795900821685791, "learning_rate": 8.14402984193879e-05, "loss": 2.1575477600097654, "memory(GiB)": 72.85, "step": 33105, "token_acc": 0.545816733067729, "train_speed(iter/s)": 0.670625 }, { "epoch": 1.4185339102866201, "grad_norm": 5.008774280548096, "learning_rate": 8.143506531713992e-05, "loss": 2.1446704864501953, "memory(GiB)": 72.85, "step": 33110, "token_acc": 0.5659574468085107, "train_speed(iter/s)": 0.670629 }, { "epoch": 1.4187481256158692, "grad_norm": 3.5588362216949463, "learning_rate": 8.142983164541624e-05, "loss": 2.565298080444336, "memory(GiB)": 72.85, "step": 33115, "token_acc": 0.5036231884057971, "train_speed(iter/s)": 0.670639 }, { "epoch": 1.418962340945118, "grad_norm": 6.3627543449401855, "learning_rate": 8.142459740431167e-05, "loss": 2.3458070755004883, "memory(GiB)": 72.85, "step": 33120, "token_acc": 0.47202797202797203, "train_speed(iter/s)": 0.670651 }, { "epoch": 1.419176556274367, "grad_norm": 4.041476726531982, "learning_rate": 8.141936259392105e-05, "loss": 2.296380043029785, "memory(GiB)": 72.85, "step": 33125, "token_acc": 0.5196078431372549, "train_speed(iter/s)": 0.670666 }, { "epoch": 1.419390771603616, "grad_norm": 3.852503776550293, "learning_rate": 8.141412721433919e-05, "loss": 2.3969970703125, "memory(GiB)": 72.85, "step": 33130, "token_acc": 0.49299719887955185, "train_speed(iter/s)": 0.670676 }, { "epoch": 1.4196049869328649, "grad_norm": 3.790877103805542, "learning_rate": 8.140889126566095e-05, "loss": 2.319397735595703, "memory(GiB)": 72.85, "step": 33135, "token_acc": 0.5082508250825083, "train_speed(iter/s)": 0.670687 }, { "epoch": 1.4198192022621139, "grad_norm": 4.899580955505371, "learning_rate": 8.14036547479812e-05, "loss": 2.7309944152832033, "memory(GiB)": 72.85, "step": 33140, "token_acc": 0.45609065155807366, "train_speed(iter/s)": 0.6707 }, { "epoch": 1.420033417591363, "grad_norm": 4.2783589363098145, "learning_rate": 8.139841766139476e-05, "loss": 2.328699493408203, "memory(GiB)": 72.85, "step": 33145, "token_acc": 0.5077399380804953, "train_speed(iter/s)": 0.670705 }, { "epoch": 1.4202476329206117, "grad_norm": 4.218436241149902, "learning_rate": 8.139318000599654e-05, "loss": 2.368866539001465, "memory(GiB)": 72.85, "step": 33150, "token_acc": 0.4803921568627451, "train_speed(iter/s)": 0.670719 }, { "epoch": 1.4204618482498608, "grad_norm": 4.385653495788574, "learning_rate": 8.138794178188143e-05, "loss": 2.2279851913452147, "memory(GiB)": 72.85, "step": 33155, "token_acc": 0.541501976284585, "train_speed(iter/s)": 0.670721 }, { "epoch": 1.4206760635791098, "grad_norm": 4.173451900482178, "learning_rate": 8.13827029891443e-05, "loss": 2.661623001098633, "memory(GiB)": 72.85, "step": 33160, "token_acc": 0.4489795918367347, "train_speed(iter/s)": 0.670693 }, { "epoch": 1.4208902789083586, "grad_norm": 4.450870037078857, "learning_rate": 8.137746362788006e-05, "loss": 2.1037052154541014, "memory(GiB)": 72.85, "step": 33165, "token_acc": 0.5328467153284672, "train_speed(iter/s)": 0.67069 }, { "epoch": 1.4211044942376077, "grad_norm": 3.3031177520751953, "learning_rate": 8.137222369818363e-05, "loss": 2.211751365661621, "memory(GiB)": 72.85, "step": 33170, "token_acc": 0.49828178694158076, "train_speed(iter/s)": 0.670713 }, { "epoch": 1.4213187095668567, "grad_norm": 4.330551624298096, "learning_rate": 8.136698320014995e-05, "loss": 2.4971311569213865, "memory(GiB)": 72.85, "step": 33175, "token_acc": 0.4600760456273764, "train_speed(iter/s)": 0.670733 }, { "epoch": 1.4215329248961055, "grad_norm": 4.7054362297058105, "learning_rate": 8.136174213387394e-05, "loss": 2.4983774185180665, "memory(GiB)": 72.85, "step": 33180, "token_acc": 0.44666666666666666, "train_speed(iter/s)": 0.670746 }, { "epoch": 1.4217471402253545, "grad_norm": 5.277019023895264, "learning_rate": 8.135650049945054e-05, "loss": 2.679850769042969, "memory(GiB)": 72.85, "step": 33185, "token_acc": 0.45806451612903226, "train_speed(iter/s)": 0.670722 }, { "epoch": 1.4219613555546036, "grad_norm": 4.245491981506348, "learning_rate": 8.135125829697473e-05, "loss": 2.447841262817383, "memory(GiB)": 72.85, "step": 33190, "token_acc": 0.4732142857142857, "train_speed(iter/s)": 0.670712 }, { "epoch": 1.4221755708838524, "grad_norm": 4.021172046661377, "learning_rate": 8.134601552654147e-05, "loss": 2.2854991912841798, "memory(GiB)": 72.85, "step": 33195, "token_acc": 0.5223367697594502, "train_speed(iter/s)": 0.670725 }, { "epoch": 1.4223897862131014, "grad_norm": 3.8451099395751953, "learning_rate": 8.134077218824572e-05, "loss": 2.4099124908447265, "memory(GiB)": 72.85, "step": 33200, "token_acc": 0.4935064935064935, "train_speed(iter/s)": 0.670755 }, { "epoch": 1.4226040015423504, "grad_norm": 4.384872913360596, "learning_rate": 8.133552828218249e-05, "loss": 2.545650291442871, "memory(GiB)": 72.85, "step": 33205, "token_acc": 0.46551724137931033, "train_speed(iter/s)": 0.670782 }, { "epoch": 1.4228182168715993, "grad_norm": 3.9688825607299805, "learning_rate": 8.133028380844678e-05, "loss": 2.831709289550781, "memory(GiB)": 72.85, "step": 33210, "token_acc": 0.409375, "train_speed(iter/s)": 0.670748 }, { "epoch": 1.4230324322008483, "grad_norm": 3.4818661212921143, "learning_rate": 8.132503876713357e-05, "loss": 2.4103492736816405, "memory(GiB)": 72.85, "step": 33215, "token_acc": 0.4887459807073955, "train_speed(iter/s)": 0.670748 }, { "epoch": 1.4232466475300973, "grad_norm": 3.976449728012085, "learning_rate": 8.13197931583379e-05, "loss": 2.480992889404297, "memory(GiB)": 72.85, "step": 33220, "token_acc": 0.5, "train_speed(iter/s)": 0.670757 }, { "epoch": 1.4234608628593461, "grad_norm": 4.009617328643799, "learning_rate": 8.131454698215482e-05, "loss": 2.373792839050293, "memory(GiB)": 72.85, "step": 33225, "token_acc": 0.5039370078740157, "train_speed(iter/s)": 0.67078 }, { "epoch": 1.4236750781885952, "grad_norm": 4.429073333740234, "learning_rate": 8.130930023867931e-05, "loss": 2.4168636322021486, "memory(GiB)": 72.85, "step": 33230, "token_acc": 0.48328267477203646, "train_speed(iter/s)": 0.670774 }, { "epoch": 1.4238892935178442, "grad_norm": 4.043453693389893, "learning_rate": 8.130405292800648e-05, "loss": 2.405790328979492, "memory(GiB)": 72.85, "step": 33235, "token_acc": 0.4744525547445255, "train_speed(iter/s)": 0.670764 }, { "epoch": 1.424103508847093, "grad_norm": 3.9410500526428223, "learning_rate": 8.129880505023136e-05, "loss": 2.3088254928588867, "memory(GiB)": 72.85, "step": 33240, "token_acc": 0.5051546391752577, "train_speed(iter/s)": 0.67075 }, { "epoch": 1.424317724176342, "grad_norm": 3.7334299087524414, "learning_rate": 8.129355660544902e-05, "loss": 2.4075958251953127, "memory(GiB)": 72.85, "step": 33245, "token_acc": 0.4983388704318937, "train_speed(iter/s)": 0.670758 }, { "epoch": 1.424531939505591, "grad_norm": 4.199587345123291, "learning_rate": 8.128830759375457e-05, "loss": 2.415963554382324, "memory(GiB)": 72.85, "step": 33250, "token_acc": 0.476878612716763, "train_speed(iter/s)": 0.670744 }, { "epoch": 1.42474615483484, "grad_norm": 4.0601487159729, "learning_rate": 8.128305801524306e-05, "loss": 2.3487876892089843, "memory(GiB)": 72.85, "step": 33255, "token_acc": 0.5, "train_speed(iter/s)": 0.670751 }, { "epoch": 1.424960370164089, "grad_norm": 4.85698127746582, "learning_rate": 8.127780787000959e-05, "loss": 2.2132076263427733, "memory(GiB)": 72.85, "step": 33260, "token_acc": 0.5209790209790209, "train_speed(iter/s)": 0.670742 }, { "epoch": 1.425174585493338, "grad_norm": 4.21191930770874, "learning_rate": 8.12725571581493e-05, "loss": 2.1767759323120117, "memory(GiB)": 72.85, "step": 33265, "token_acc": 0.5379310344827586, "train_speed(iter/s)": 0.67074 }, { "epoch": 1.4253888008225868, "grad_norm": 4.910336971282959, "learning_rate": 8.126730587975733e-05, "loss": 2.4200340270996095, "memory(GiB)": 72.85, "step": 33270, "token_acc": 0.4963235294117647, "train_speed(iter/s)": 0.670757 }, { "epoch": 1.4256030161518358, "grad_norm": 6.089210510253906, "learning_rate": 8.126205403492875e-05, "loss": 2.5677349090576174, "memory(GiB)": 72.85, "step": 33275, "token_acc": 0.4664179104477612, "train_speed(iter/s)": 0.670773 }, { "epoch": 1.4258172314810849, "grad_norm": 4.866679668426514, "learning_rate": 8.125680162375876e-05, "loss": 2.338601303100586, "memory(GiB)": 72.85, "step": 33280, "token_acc": 0.541501976284585, "train_speed(iter/s)": 0.670754 }, { "epoch": 1.4260314468103337, "grad_norm": 4.637077808380127, "learning_rate": 8.125154864634245e-05, "loss": 2.5346389770507813, "memory(GiB)": 72.85, "step": 33285, "token_acc": 0.4892086330935252, "train_speed(iter/s)": 0.670778 }, { "epoch": 1.4262456621395827, "grad_norm": 4.888805866241455, "learning_rate": 8.124629510277505e-05, "loss": 2.438170623779297, "memory(GiB)": 72.85, "step": 33290, "token_acc": 0.4740484429065744, "train_speed(iter/s)": 0.670785 }, { "epoch": 1.4264598774688317, "grad_norm": 4.752209186553955, "learning_rate": 8.12410409931517e-05, "loss": 2.3863056182861326, "memory(GiB)": 72.85, "step": 33295, "token_acc": 0.48727272727272725, "train_speed(iter/s)": 0.670784 }, { "epoch": 1.4266740927980806, "grad_norm": 4.354053497314453, "learning_rate": 8.123578631756758e-05, "loss": 2.156790924072266, "memory(GiB)": 72.85, "step": 33300, "token_acc": 0.4965753424657534, "train_speed(iter/s)": 0.67079 }, { "epoch": 1.4268883081273296, "grad_norm": 4.8643293380737305, "learning_rate": 8.123053107611789e-05, "loss": 2.4367950439453123, "memory(GiB)": 72.85, "step": 33305, "token_acc": 0.4983164983164983, "train_speed(iter/s)": 0.670807 }, { "epoch": 1.4271025234565786, "grad_norm": 4.474662780761719, "learning_rate": 8.122527526889784e-05, "loss": 2.240528869628906, "memory(GiB)": 72.85, "step": 33310, "token_acc": 0.5053763440860215, "train_speed(iter/s)": 0.670819 }, { "epoch": 1.4273167387858274, "grad_norm": 4.813877582550049, "learning_rate": 8.122001889600263e-05, "loss": 2.2859834671020507, "memory(GiB)": 72.85, "step": 33315, "token_acc": 0.5342960288808665, "train_speed(iter/s)": 0.670792 }, { "epoch": 1.4275309541150765, "grad_norm": 4.804427623748779, "learning_rate": 8.121476195752748e-05, "loss": 2.7952129364013674, "memory(GiB)": 72.85, "step": 33320, "token_acc": 0.44141689373297005, "train_speed(iter/s)": 0.670789 }, { "epoch": 1.4277451694443255, "grad_norm": 3.633592128753662, "learning_rate": 8.120950445356765e-05, "loss": 2.4676677703857424, "memory(GiB)": 72.85, "step": 33325, "token_acc": 0.5300751879699248, "train_speed(iter/s)": 0.670789 }, { "epoch": 1.4279593847735743, "grad_norm": 4.9236741065979, "learning_rate": 8.120424638421837e-05, "loss": 2.328067970275879, "memory(GiB)": 72.85, "step": 33330, "token_acc": 0.5092250922509225, "train_speed(iter/s)": 0.670797 }, { "epoch": 1.4281736001028233, "grad_norm": 4.568042278289795, "learning_rate": 8.11989877495749e-05, "loss": 2.5714305877685546, "memory(GiB)": 72.85, "step": 33335, "token_acc": 0.4507042253521127, "train_speed(iter/s)": 0.67077 }, { "epoch": 1.4283878154320724, "grad_norm": 4.098578929901123, "learning_rate": 8.119372854973249e-05, "loss": 2.0945138931274414, "memory(GiB)": 72.85, "step": 33340, "token_acc": 0.4918032786885246, "train_speed(iter/s)": 0.670799 }, { "epoch": 1.4286020307613212, "grad_norm": 4.742707252502441, "learning_rate": 8.118846878478642e-05, "loss": 2.1732410430908202, "memory(GiB)": 72.85, "step": 33345, "token_acc": 0.54296875, "train_speed(iter/s)": 0.670812 }, { "epoch": 1.4288162460905702, "grad_norm": 5.30301570892334, "learning_rate": 8.1183208454832e-05, "loss": 2.180557632446289, "memory(GiB)": 72.85, "step": 33350, "token_acc": 0.5275862068965518, "train_speed(iter/s)": 0.670795 }, { "epoch": 1.4290304614198193, "grad_norm": 3.911377429962158, "learning_rate": 8.117794755996448e-05, "loss": 2.3808074951171876, "memory(GiB)": 72.85, "step": 33355, "token_acc": 0.4963235294117647, "train_speed(iter/s)": 0.670793 }, { "epoch": 1.429244676749068, "grad_norm": 4.780503749847412, "learning_rate": 8.11726861002792e-05, "loss": 2.5830848693847654, "memory(GiB)": 72.85, "step": 33360, "token_acc": 0.4235294117647059, "train_speed(iter/s)": 0.670747 }, { "epoch": 1.4294588920783171, "grad_norm": 4.400708198547363, "learning_rate": 8.116742407587148e-05, "loss": 2.265022087097168, "memory(GiB)": 72.85, "step": 33365, "token_acc": 0.4896551724137931, "train_speed(iter/s)": 0.670747 }, { "epoch": 1.4296731074075661, "grad_norm": 5.1828694343566895, "learning_rate": 8.116216148683665e-05, "loss": 2.5299524307250976, "memory(GiB)": 72.85, "step": 33370, "token_acc": 0.46846846846846846, "train_speed(iter/s)": 0.670768 }, { "epoch": 1.429887322736815, "grad_norm": 4.865315914154053, "learning_rate": 8.115689833327e-05, "loss": 2.3768104553222655, "memory(GiB)": 72.85, "step": 33375, "token_acc": 0.4668769716088328, "train_speed(iter/s)": 0.670749 }, { "epoch": 1.430101538066064, "grad_norm": 4.654971599578857, "learning_rate": 8.115163461526692e-05, "loss": 2.218401336669922, "memory(GiB)": 72.85, "step": 33380, "token_acc": 0.5202702702702703, "train_speed(iter/s)": 0.670742 }, { "epoch": 1.430315753395313, "grad_norm": 3.719597816467285, "learning_rate": 8.114637033292276e-05, "loss": 2.354515266418457, "memory(GiB)": 72.85, "step": 33385, "token_acc": 0.5209790209790209, "train_speed(iter/s)": 0.670746 }, { "epoch": 1.4305299687245618, "grad_norm": 4.032611846923828, "learning_rate": 8.114110548633289e-05, "loss": 2.521066665649414, "memory(GiB)": 72.85, "step": 33390, "token_acc": 0.49714285714285716, "train_speed(iter/s)": 0.670765 }, { "epoch": 1.4307441840538109, "grad_norm": 3.81976056098938, "learning_rate": 8.113584007559267e-05, "loss": 2.1375551223754883, "memory(GiB)": 72.85, "step": 33395, "token_acc": 0.5803921568627451, "train_speed(iter/s)": 0.670768 }, { "epoch": 1.43095839938306, "grad_norm": 4.220517158508301, "learning_rate": 8.113057410079749e-05, "loss": 2.5835920333862306, "memory(GiB)": 72.85, "step": 33400, "token_acc": 0.45396825396825397, "train_speed(iter/s)": 0.670782 }, { "epoch": 1.4311726147123087, "grad_norm": 4.161579608917236, "learning_rate": 8.112530756204279e-05, "loss": 2.3978225708007814, "memory(GiB)": 72.85, "step": 33405, "token_acc": 0.49044585987261147, "train_speed(iter/s)": 0.67078 }, { "epoch": 1.4313868300415578, "grad_norm": 3.8651602268218994, "learning_rate": 8.112004045942392e-05, "loss": 2.5423255920410157, "memory(GiB)": 72.85, "step": 33410, "token_acc": 0.45686900958466453, "train_speed(iter/s)": 0.670781 }, { "epoch": 1.4316010453708068, "grad_norm": 3.7037596702575684, "learning_rate": 8.111477279303635e-05, "loss": 2.430011749267578, "memory(GiB)": 72.85, "step": 33415, "token_acc": 0.4916387959866221, "train_speed(iter/s)": 0.670813 }, { "epoch": 1.4318152607000556, "grad_norm": 4.419185161590576, "learning_rate": 8.110950456297545e-05, "loss": 2.2849332809448244, "memory(GiB)": 72.85, "step": 33420, "token_acc": 0.49624060150375937, "train_speed(iter/s)": 0.670814 }, { "epoch": 1.4320294760293046, "grad_norm": 4.140038967132568, "learning_rate": 8.11042357693367e-05, "loss": 2.253358268737793, "memory(GiB)": 72.85, "step": 33425, "token_acc": 0.4939271255060729, "train_speed(iter/s)": 0.670792 }, { "epoch": 1.4322436913585537, "grad_norm": 3.857790946960449, "learning_rate": 8.109896641221555e-05, "loss": 2.4581981658935548, "memory(GiB)": 72.85, "step": 33430, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.670801 }, { "epoch": 1.4324579066878025, "grad_norm": 4.807675838470459, "learning_rate": 8.109369649170744e-05, "loss": 2.1587059020996096, "memory(GiB)": 72.85, "step": 33435, "token_acc": 0.5421686746987951, "train_speed(iter/s)": 0.670786 }, { "epoch": 1.4326721220170515, "grad_norm": 3.786339521408081, "learning_rate": 8.108842600790786e-05, "loss": 2.267757034301758, "memory(GiB)": 72.85, "step": 33440, "token_acc": 0.555956678700361, "train_speed(iter/s)": 0.670786 }, { "epoch": 1.4328863373463006, "grad_norm": 4.684709072113037, "learning_rate": 8.108315496091228e-05, "loss": 2.5629209518432616, "memory(GiB)": 72.85, "step": 33445, "token_acc": 0.48623853211009177, "train_speed(iter/s)": 0.670809 }, { "epoch": 1.4331005526755494, "grad_norm": 4.907763481140137, "learning_rate": 8.107788335081618e-05, "loss": 2.1683380126953127, "memory(GiB)": 72.85, "step": 33450, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.670822 }, { "epoch": 1.4333147680047984, "grad_norm": 3.8090646266937256, "learning_rate": 8.107261117771507e-05, "loss": 2.363060188293457, "memory(GiB)": 72.85, "step": 33455, "token_acc": 0.47474747474747475, "train_speed(iter/s)": 0.670837 }, { "epoch": 1.4335289833340474, "grad_norm": 4.376867771148682, "learning_rate": 8.106733844170446e-05, "loss": 2.4052295684814453, "memory(GiB)": 72.85, "step": 33460, "token_acc": 0.4774011299435028, "train_speed(iter/s)": 0.670855 }, { "epoch": 1.4337431986632962, "grad_norm": 3.6455183029174805, "learning_rate": 8.106206514287989e-05, "loss": 2.7398056030273437, "memory(GiB)": 72.85, "step": 33465, "token_acc": 0.45692883895131087, "train_speed(iter/s)": 0.670852 }, { "epoch": 1.4339574139925453, "grad_norm": 4.846372604370117, "learning_rate": 8.105679128133686e-05, "loss": 2.4448009490966798, "memory(GiB)": 72.85, "step": 33470, "token_acc": 0.4671280276816609, "train_speed(iter/s)": 0.670853 }, { "epoch": 1.4341716293217943, "grad_norm": 4.198386192321777, "learning_rate": 8.105151685717092e-05, "loss": 2.456887626647949, "memory(GiB)": 72.85, "step": 33475, "token_acc": 0.47191011235955055, "train_speed(iter/s)": 0.670873 }, { "epoch": 1.4343858446510431, "grad_norm": 4.477809429168701, "learning_rate": 8.104624187047762e-05, "loss": 2.072324752807617, "memory(GiB)": 72.85, "step": 33480, "token_acc": 0.5775862068965517, "train_speed(iter/s)": 0.670873 }, { "epoch": 1.4346000599802922, "grad_norm": 3.801684617996216, "learning_rate": 8.104096632135252e-05, "loss": 2.261194610595703, "memory(GiB)": 72.85, "step": 33485, "token_acc": 0.532520325203252, "train_speed(iter/s)": 0.670893 }, { "epoch": 1.4348142753095412, "grad_norm": 4.594130992889404, "learning_rate": 8.103569020989121e-05, "loss": 2.040902328491211, "memory(GiB)": 72.85, "step": 33490, "token_acc": 0.5416666666666666, "train_speed(iter/s)": 0.670905 }, { "epoch": 1.43502849063879, "grad_norm": 4.114211082458496, "learning_rate": 8.103041353618925e-05, "loss": 2.2415390014648438, "memory(GiB)": 72.85, "step": 33495, "token_acc": 0.5236220472440944, "train_speed(iter/s)": 0.670908 }, { "epoch": 1.435242705968039, "grad_norm": 3.8143770694732666, "learning_rate": 8.102513630034225e-05, "loss": 2.651300811767578, "memory(GiB)": 72.85, "step": 33500, "token_acc": 0.46598639455782315, "train_speed(iter/s)": 0.670926 }, { "epoch": 1.435242705968039, "eval_loss": 2.0137505531311035, "eval_runtime": 16.0098, "eval_samples_per_second": 6.246, "eval_steps_per_second": 6.246, "eval_token_acc": 0.5088757396449705, "step": 33500 }, { "epoch": 1.435456921297288, "grad_norm": 4.024796962738037, "learning_rate": 8.101985850244579e-05, "loss": 2.0352462768554687, "memory(GiB)": 72.85, "step": 33505, "token_acc": 0.5164473684210527, "train_speed(iter/s)": 0.670687 }, { "epoch": 1.435671136626537, "grad_norm": 4.1335930824279785, "learning_rate": 8.101458014259548e-05, "loss": 2.158766746520996, "memory(GiB)": 72.85, "step": 33510, "token_acc": 0.5368421052631579, "train_speed(iter/s)": 0.670685 }, { "epoch": 1.435885351955786, "grad_norm": 3.9795782566070557, "learning_rate": 8.100930122088699e-05, "loss": 2.3270195007324217, "memory(GiB)": 72.85, "step": 33515, "token_acc": 0.532608695652174, "train_speed(iter/s)": 0.670658 }, { "epoch": 1.436099567285035, "grad_norm": 3.945338487625122, "learning_rate": 8.10040217374159e-05, "loss": 2.158119010925293, "memory(GiB)": 72.85, "step": 33520, "token_acc": 0.5400696864111498, "train_speed(iter/s)": 0.670648 }, { "epoch": 1.4363137826142838, "grad_norm": 4.439880847930908, "learning_rate": 8.099874169227788e-05, "loss": 2.5497978210449217, "memory(GiB)": 72.85, "step": 33525, "token_acc": 0.4936708860759494, "train_speed(iter/s)": 0.670631 }, { "epoch": 1.4365279979435328, "grad_norm": 5.058881759643555, "learning_rate": 8.099346108556857e-05, "loss": 2.656361198425293, "memory(GiB)": 72.85, "step": 33530, "token_acc": 0.45936395759717313, "train_speed(iter/s)": 0.670642 }, { "epoch": 1.4367422132727818, "grad_norm": 4.065288066864014, "learning_rate": 8.098817991738365e-05, "loss": 2.7824066162109373, "memory(GiB)": 72.85, "step": 33535, "token_acc": 0.44551282051282054, "train_speed(iter/s)": 0.670642 }, { "epoch": 1.4369564286020307, "grad_norm": 4.773068904876709, "learning_rate": 8.098289818781876e-05, "loss": 2.315897750854492, "memory(GiB)": 72.85, "step": 33540, "token_acc": 0.5472972972972973, "train_speed(iter/s)": 0.670654 }, { "epoch": 1.4371706439312797, "grad_norm": 4.339503288269043, "learning_rate": 8.097761589696962e-05, "loss": 2.3615545272827148, "memory(GiB)": 72.85, "step": 33545, "token_acc": 0.49691358024691357, "train_speed(iter/s)": 0.670657 }, { "epoch": 1.4373848592605287, "grad_norm": 3.075416326522827, "learning_rate": 8.097233304493191e-05, "loss": 2.3240083694458007, "memory(GiB)": 72.85, "step": 33550, "token_acc": 0.4815950920245399, "train_speed(iter/s)": 0.670646 }, { "epoch": 1.4375990745897775, "grad_norm": 4.834654331207275, "learning_rate": 8.096704963180131e-05, "loss": 2.272114562988281, "memory(GiB)": 72.85, "step": 33555, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.670655 }, { "epoch": 1.4378132899190266, "grad_norm": 3.9454853534698486, "learning_rate": 8.096176565767359e-05, "loss": 2.071837615966797, "memory(GiB)": 72.85, "step": 33560, "token_acc": 0.5096774193548387, "train_speed(iter/s)": 0.670649 }, { "epoch": 1.4380275052482756, "grad_norm": 4.3697638511657715, "learning_rate": 8.095648112264443e-05, "loss": 2.2921512603759764, "memory(GiB)": 72.85, "step": 33565, "token_acc": 0.5, "train_speed(iter/s)": 0.670641 }, { "epoch": 1.4382417205775244, "grad_norm": 4.2401323318481445, "learning_rate": 8.095119602680956e-05, "loss": 2.414510726928711, "memory(GiB)": 72.85, "step": 33570, "token_acc": 0.4828767123287671, "train_speed(iter/s)": 0.670627 }, { "epoch": 1.4384559359067735, "grad_norm": 4.122740268707275, "learning_rate": 8.094591037026475e-05, "loss": 2.5885297775268556, "memory(GiB)": 72.85, "step": 33575, "token_acc": 0.45263157894736844, "train_speed(iter/s)": 0.670641 }, { "epoch": 1.4386701512360225, "grad_norm": 3.8688018321990967, "learning_rate": 8.094062415310575e-05, "loss": 2.265196990966797, "memory(GiB)": 72.85, "step": 33580, "token_acc": 0.5103448275862069, "train_speed(iter/s)": 0.670615 }, { "epoch": 1.4388843665652713, "grad_norm": 5.024126052856445, "learning_rate": 8.093533737542829e-05, "loss": 2.5186016082763674, "memory(GiB)": 72.85, "step": 33585, "token_acc": 0.45217391304347826, "train_speed(iter/s)": 0.670601 }, { "epoch": 1.4390985818945203, "grad_norm": 4.536497592926025, "learning_rate": 8.09300500373282e-05, "loss": 2.2131139755249025, "memory(GiB)": 72.85, "step": 33590, "token_acc": 0.475177304964539, "train_speed(iter/s)": 0.670618 }, { "epoch": 1.4393127972237694, "grad_norm": 4.8916850090026855, "learning_rate": 8.092476213890124e-05, "loss": 2.6574459075927734, "memory(GiB)": 72.85, "step": 33595, "token_acc": 0.4420289855072464, "train_speed(iter/s)": 0.670612 }, { "epoch": 1.4395270125530182, "grad_norm": 4.549213886260986, "learning_rate": 8.09194736802432e-05, "loss": 2.520174026489258, "memory(GiB)": 72.85, "step": 33600, "token_acc": 0.47808764940239046, "train_speed(iter/s)": 0.670609 }, { "epoch": 1.4397412278822672, "grad_norm": 4.568776607513428, "learning_rate": 8.091418466144989e-05, "loss": 2.411275100708008, "memory(GiB)": 72.85, "step": 33605, "token_acc": 0.4925373134328358, "train_speed(iter/s)": 0.670605 }, { "epoch": 1.4399554432115163, "grad_norm": 4.250730991363525, "learning_rate": 8.090889508261712e-05, "loss": 2.43029727935791, "memory(GiB)": 72.85, "step": 33610, "token_acc": 0.48742138364779874, "train_speed(iter/s)": 0.670575 }, { "epoch": 1.440169658540765, "grad_norm": 3.623695135116577, "learning_rate": 8.090360494384073e-05, "loss": 2.6587902069091798, "memory(GiB)": 72.85, "step": 33615, "token_acc": 0.45901639344262296, "train_speed(iter/s)": 0.670572 }, { "epoch": 1.440383873870014, "grad_norm": 4.0562639236450195, "learning_rate": 8.089831424521653e-05, "loss": 2.524144172668457, "memory(GiB)": 72.85, "step": 33620, "token_acc": 0.4582043343653251, "train_speed(iter/s)": 0.67059 }, { "epoch": 1.4405980891992631, "grad_norm": 3.515717029571533, "learning_rate": 8.089302298684039e-05, "loss": 2.1786081314086916, "memory(GiB)": 72.85, "step": 33625, "token_acc": 0.4891304347826087, "train_speed(iter/s)": 0.670595 }, { "epoch": 1.440812304528512, "grad_norm": 5.014739990234375, "learning_rate": 8.088773116880817e-05, "loss": 2.1988094329833983, "memory(GiB)": 72.85, "step": 33630, "token_acc": 0.5160142348754448, "train_speed(iter/s)": 0.670612 }, { "epoch": 1.441026519857761, "grad_norm": 3.565001964569092, "learning_rate": 8.088243879121571e-05, "loss": 1.946315383911133, "memory(GiB)": 72.85, "step": 33635, "token_acc": 0.5506072874493927, "train_speed(iter/s)": 0.670608 }, { "epoch": 1.44124073518701, "grad_norm": 5.118478298187256, "learning_rate": 8.087714585415892e-05, "loss": 2.4683467864990236, "memory(GiB)": 72.85, "step": 33640, "token_acc": 0.490272373540856, "train_speed(iter/s)": 0.67061 }, { "epoch": 1.4414549505162588, "grad_norm": 5.101363658905029, "learning_rate": 8.087185235773365e-05, "loss": 2.2407102584838867, "memory(GiB)": 72.85, "step": 33645, "token_acc": 0.5142857142857142, "train_speed(iter/s)": 0.670595 }, { "epoch": 1.4416691658455079, "grad_norm": 4.328855037689209, "learning_rate": 8.086655830203581e-05, "loss": 2.1488441467285155, "memory(GiB)": 72.85, "step": 33650, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.67057 }, { "epoch": 1.441883381174757, "grad_norm": 3.947768449783325, "learning_rate": 8.086126368716131e-05, "loss": 2.3850160598754884, "memory(GiB)": 72.85, "step": 33655, "token_acc": 0.5035460992907801, "train_speed(iter/s)": 0.670542 }, { "epoch": 1.4420975965040057, "grad_norm": 5.210814952850342, "learning_rate": 8.085596851320609e-05, "loss": 2.3596960067749024, "memory(GiB)": 72.85, "step": 33660, "token_acc": 0.506578947368421, "train_speed(iter/s)": 0.670553 }, { "epoch": 1.4423118118332547, "grad_norm": 5.550347805023193, "learning_rate": 8.085067278026604e-05, "loss": 2.5660608291625975, "memory(GiB)": 72.85, "step": 33665, "token_acc": 0.46048109965635736, "train_speed(iter/s)": 0.670563 }, { "epoch": 1.4425260271625038, "grad_norm": 4.665592670440674, "learning_rate": 8.084537648843711e-05, "loss": 2.5293899536132813, "memory(GiB)": 72.85, "step": 33670, "token_acc": 0.5231316725978647, "train_speed(iter/s)": 0.670555 }, { "epoch": 1.4427402424917526, "grad_norm": 3.5231082439422607, "learning_rate": 8.084007963781525e-05, "loss": 2.220172882080078, "memory(GiB)": 72.85, "step": 33675, "token_acc": 0.48727272727272725, "train_speed(iter/s)": 0.67056 }, { "epoch": 1.4429544578210016, "grad_norm": 3.382953643798828, "learning_rate": 8.083478222849643e-05, "loss": 2.3490888595581056, "memory(GiB)": 72.85, "step": 33680, "token_acc": 0.48307692307692307, "train_speed(iter/s)": 0.670554 }, { "epoch": 1.4431686731502507, "grad_norm": 4.699378967285156, "learning_rate": 8.082948426057661e-05, "loss": 2.5723175048828124, "memory(GiB)": 72.85, "step": 33685, "token_acc": 0.46578947368421053, "train_speed(iter/s)": 0.670545 }, { "epoch": 1.4433828884794995, "grad_norm": 6.733895301818848, "learning_rate": 8.082418573415173e-05, "loss": 2.417597007751465, "memory(GiB)": 72.85, "step": 33690, "token_acc": 0.4946236559139785, "train_speed(iter/s)": 0.670552 }, { "epoch": 1.4435971038087485, "grad_norm": 4.326153755187988, "learning_rate": 8.081888664931783e-05, "loss": 2.4158506393432617, "memory(GiB)": 72.85, "step": 33695, "token_acc": 0.5038167938931297, "train_speed(iter/s)": 0.670565 }, { "epoch": 1.4438113191379975, "grad_norm": 5.318588733673096, "learning_rate": 8.08135870061709e-05, "loss": 2.1371492385864257, "memory(GiB)": 72.85, "step": 33700, "token_acc": 0.527972027972028, "train_speed(iter/s)": 0.670559 }, { "epoch": 1.4440255344672464, "grad_norm": 4.254302978515625, "learning_rate": 8.080828680480692e-05, "loss": 2.3127132415771485, "memory(GiB)": 72.85, "step": 33705, "token_acc": 0.5071633237822349, "train_speed(iter/s)": 0.670554 }, { "epoch": 1.4442397497964954, "grad_norm": 5.460823059082031, "learning_rate": 8.080298604532192e-05, "loss": 2.644627571105957, "memory(GiB)": 72.85, "step": 33710, "token_acc": 0.5071428571428571, "train_speed(iter/s)": 0.670567 }, { "epoch": 1.4444539651257444, "grad_norm": 4.974159240722656, "learning_rate": 8.079768472781195e-05, "loss": 2.3976133346557615, "memory(GiB)": 72.85, "step": 33715, "token_acc": 0.4874551971326165, "train_speed(iter/s)": 0.670573 }, { "epoch": 1.4446681804549932, "grad_norm": 3.8855044841766357, "learning_rate": 8.079238285237302e-05, "loss": 2.2650474548339843, "memory(GiB)": 72.85, "step": 33720, "token_acc": 0.511864406779661, "train_speed(iter/s)": 0.670591 }, { "epoch": 1.4448823957842423, "grad_norm": 3.576871395111084, "learning_rate": 8.078708041910119e-05, "loss": 2.2911470413208006, "memory(GiB)": 72.85, "step": 33725, "token_acc": 0.554858934169279, "train_speed(iter/s)": 0.670597 }, { "epoch": 1.4450966111134913, "grad_norm": 4.5949578285217285, "learning_rate": 8.078177742809251e-05, "loss": 2.506329154968262, "memory(GiB)": 72.85, "step": 33730, "token_acc": 0.5, "train_speed(iter/s)": 0.670565 }, { "epoch": 1.4453108264427401, "grad_norm": 4.640181064605713, "learning_rate": 8.077647387944306e-05, "loss": 2.173170280456543, "memory(GiB)": 72.85, "step": 33735, "token_acc": 0.5292096219931272, "train_speed(iter/s)": 0.670568 }, { "epoch": 1.4455250417719891, "grad_norm": 4.681975364685059, "learning_rate": 8.077116977324893e-05, "loss": 2.352303886413574, "memory(GiB)": 72.85, "step": 33740, "token_acc": 0.48242811501597443, "train_speed(iter/s)": 0.670581 }, { "epoch": 1.4457392571012382, "grad_norm": 6.368250370025635, "learning_rate": 8.076586510960617e-05, "loss": 2.821900177001953, "memory(GiB)": 72.85, "step": 33745, "token_acc": 0.43673469387755104, "train_speed(iter/s)": 0.670595 }, { "epoch": 1.445953472430487, "grad_norm": 4.316351413726807, "learning_rate": 8.076055988861092e-05, "loss": 2.183955764770508, "memory(GiB)": 72.85, "step": 33750, "token_acc": 0.5016181229773463, "train_speed(iter/s)": 0.670598 }, { "epoch": 1.446167687759736, "grad_norm": 4.137393474578857, "learning_rate": 8.075525411035927e-05, "loss": 2.4120683670043945, "memory(GiB)": 72.85, "step": 33755, "token_acc": 0.5017064846416383, "train_speed(iter/s)": 0.670627 }, { "epoch": 1.446381903088985, "grad_norm": 5.151253700256348, "learning_rate": 8.074994777494733e-05, "loss": 2.372209167480469, "memory(GiB)": 72.85, "step": 33760, "token_acc": 0.48188405797101447, "train_speed(iter/s)": 0.670651 }, { "epoch": 1.4465961184182339, "grad_norm": 4.088521957397461, "learning_rate": 8.074464088247126e-05, "loss": 2.1537296295166017, "memory(GiB)": 72.85, "step": 33765, "token_acc": 0.5424354243542435, "train_speed(iter/s)": 0.670605 }, { "epoch": 1.446810333747483, "grad_norm": 4.845283031463623, "learning_rate": 8.073933343302715e-05, "loss": 2.4402372360229494, "memory(GiB)": 72.85, "step": 33770, "token_acc": 0.4838709677419355, "train_speed(iter/s)": 0.670574 }, { "epoch": 1.447024549076732, "grad_norm": 4.502617835998535, "learning_rate": 8.07340254267112e-05, "loss": 2.1816173553466798, "memory(GiB)": 72.85, "step": 33775, "token_acc": 0.5215827338129496, "train_speed(iter/s)": 0.670583 }, { "epoch": 1.4472387644059808, "grad_norm": 5.673195838928223, "learning_rate": 8.072871686361955e-05, "loss": 2.388005256652832, "memory(GiB)": 72.85, "step": 33780, "token_acc": 0.5064516129032258, "train_speed(iter/s)": 0.67057 }, { "epoch": 1.4474529797352298, "grad_norm": 4.986849308013916, "learning_rate": 8.072340774384836e-05, "loss": 2.3950017929077148, "memory(GiB)": 72.85, "step": 33785, "token_acc": 0.5099601593625498, "train_speed(iter/s)": 0.670545 }, { "epoch": 1.4476671950644788, "grad_norm": 3.7307510375976562, "learning_rate": 8.071809806749382e-05, "loss": 2.5586483001708986, "memory(GiB)": 72.85, "step": 33790, "token_acc": 0.4876543209876543, "train_speed(iter/s)": 0.670551 }, { "epoch": 1.4478814103937276, "grad_norm": 3.8579795360565186, "learning_rate": 8.071278783465213e-05, "loss": 2.150254249572754, "memory(GiB)": 72.85, "step": 33795, "token_acc": 0.5241157556270096, "train_speed(iter/s)": 0.670553 }, { "epoch": 1.4480956257229767, "grad_norm": 4.533455848693848, "learning_rate": 8.070747704541945e-05, "loss": 2.447788619995117, "memory(GiB)": 72.85, "step": 33800, "token_acc": 0.4744525547445255, "train_speed(iter/s)": 0.670558 }, { "epoch": 1.4483098410522257, "grad_norm": 5.0796284675598145, "learning_rate": 8.070216569989204e-05, "loss": 2.2297874450683595, "memory(GiB)": 72.85, "step": 33805, "token_acc": 0.5369649805447471, "train_speed(iter/s)": 0.670544 }, { "epoch": 1.4485240563814745, "grad_norm": 3.4943759441375732, "learning_rate": 8.069685379816609e-05, "loss": 1.9846986770629882, "memory(GiB)": 72.85, "step": 33810, "token_acc": 0.5436507936507936, "train_speed(iter/s)": 0.670533 }, { "epoch": 1.4487382717107236, "grad_norm": 5.543814659118652, "learning_rate": 8.069154134033784e-05, "loss": 2.3876047134399414, "memory(GiB)": 72.85, "step": 33815, "token_acc": 0.49812734082397003, "train_speed(iter/s)": 0.670543 }, { "epoch": 1.4489524870399726, "grad_norm": 4.099289894104004, "learning_rate": 8.068622832650352e-05, "loss": 2.0004705429077148, "memory(GiB)": 72.85, "step": 33820, "token_acc": 0.5225806451612903, "train_speed(iter/s)": 0.670549 }, { "epoch": 1.4491667023692214, "grad_norm": 3.400474786758423, "learning_rate": 8.06809147567594e-05, "loss": 2.2770221710205076, "memory(GiB)": 72.85, "step": 33825, "token_acc": 0.528125, "train_speed(iter/s)": 0.670554 }, { "epoch": 1.4493809176984704, "grad_norm": 4.0029401779174805, "learning_rate": 8.067560063120173e-05, "loss": 2.2621225357055663, "memory(GiB)": 72.85, "step": 33830, "token_acc": 0.5085324232081911, "train_speed(iter/s)": 0.670545 }, { "epoch": 1.4495951330277195, "grad_norm": 3.6112027168273926, "learning_rate": 8.067028594992677e-05, "loss": 2.423746871948242, "memory(GiB)": 72.85, "step": 33835, "token_acc": 0.48466257668711654, "train_speed(iter/s)": 0.670535 }, { "epoch": 1.4498093483569683, "grad_norm": 3.022078037261963, "learning_rate": 8.06649707130308e-05, "loss": 2.105556869506836, "memory(GiB)": 72.85, "step": 33840, "token_acc": 0.5562700964630225, "train_speed(iter/s)": 0.670505 }, { "epoch": 1.4500235636862173, "grad_norm": 3.557783603668213, "learning_rate": 8.065965492061012e-05, "loss": 2.35571174621582, "memory(GiB)": 72.85, "step": 33845, "token_acc": 0.5, "train_speed(iter/s)": 0.670497 }, { "epoch": 1.4502377790154664, "grad_norm": 4.455594062805176, "learning_rate": 8.065433857276106e-05, "loss": 2.404143714904785, "memory(GiB)": 72.85, "step": 33850, "token_acc": 0.4444444444444444, "train_speed(iter/s)": 0.670513 }, { "epoch": 1.4504519943447154, "grad_norm": 3.899898052215576, "learning_rate": 8.064902166957987e-05, "loss": 2.347235107421875, "memory(GiB)": 72.85, "step": 33855, "token_acc": 0.5526315789473685, "train_speed(iter/s)": 0.670536 }, { "epoch": 1.4506662096739642, "grad_norm": 4.170205593109131, "learning_rate": 8.06437042111629e-05, "loss": 1.8888740539550781, "memory(GiB)": 72.85, "step": 33860, "token_acc": 0.5277777777777778, "train_speed(iter/s)": 0.670533 }, { "epoch": 1.4508804250032132, "grad_norm": 3.544260025024414, "learning_rate": 8.063838619760651e-05, "loss": 2.5510986328125, "memory(GiB)": 72.85, "step": 33865, "token_acc": 0.486646884272997, "train_speed(iter/s)": 0.670553 }, { "epoch": 1.4510946403324623, "grad_norm": 4.6172027587890625, "learning_rate": 8.0633067629007e-05, "loss": 2.483293151855469, "memory(GiB)": 72.85, "step": 33870, "token_acc": 0.5047923322683706, "train_speed(iter/s)": 0.670536 }, { "epoch": 1.451308855661711, "grad_norm": 3.882540702819824, "learning_rate": 8.062774850546072e-05, "loss": 2.2015945434570314, "memory(GiB)": 72.85, "step": 33875, "token_acc": 0.4937888198757764, "train_speed(iter/s)": 0.670545 }, { "epoch": 1.4515230709909601, "grad_norm": 4.192780494689941, "learning_rate": 8.062242882706406e-05, "loss": 2.3394338607788088, "memory(GiB)": 72.85, "step": 33880, "token_acc": 0.504950495049505, "train_speed(iter/s)": 0.670529 }, { "epoch": 1.4517372863202092, "grad_norm": 4.342288494110107, "learning_rate": 8.061710859391337e-05, "loss": 2.520826721191406, "memory(GiB)": 72.85, "step": 33885, "token_acc": 0.5045317220543807, "train_speed(iter/s)": 0.670534 }, { "epoch": 1.451951501649458, "grad_norm": 4.01625919342041, "learning_rate": 8.061178780610504e-05, "loss": 2.4267066955566405, "memory(GiB)": 72.85, "step": 33890, "token_acc": 0.4887640449438202, "train_speed(iter/s)": 0.670551 }, { "epoch": 1.452165716978707, "grad_norm": 4.474276065826416, "learning_rate": 8.060646646373544e-05, "loss": 2.3148120880126952, "memory(GiB)": 72.85, "step": 33895, "token_acc": 0.47520661157024796, "train_speed(iter/s)": 0.670528 }, { "epoch": 1.452379932307956, "grad_norm": 3.9095258712768555, "learning_rate": 8.060114456690101e-05, "loss": 2.290681266784668, "memory(GiB)": 72.85, "step": 33900, "token_acc": 0.48295454545454547, "train_speed(iter/s)": 0.670543 }, { "epoch": 1.4525941476372048, "grad_norm": 3.8382415771484375, "learning_rate": 8.059582211569813e-05, "loss": 2.4592864990234373, "memory(GiB)": 72.85, "step": 33905, "token_acc": 0.4790874524714829, "train_speed(iter/s)": 0.670544 }, { "epoch": 1.4528083629664539, "grad_norm": 5.158317565917969, "learning_rate": 8.059049911022323e-05, "loss": 2.7073974609375, "memory(GiB)": 72.85, "step": 33910, "token_acc": 0.450354609929078, "train_speed(iter/s)": 0.670518 }, { "epoch": 1.453022578295703, "grad_norm": 6.43672513961792, "learning_rate": 8.058517555057275e-05, "loss": 2.55360107421875, "memory(GiB)": 72.85, "step": 33915, "token_acc": 0.4551083591331269, "train_speed(iter/s)": 0.670518 }, { "epoch": 1.4532367936249517, "grad_norm": 3.858205556869507, "learning_rate": 8.057985143684312e-05, "loss": 2.4610622406005858, "memory(GiB)": 72.85, "step": 33920, "token_acc": 0.45272206303724927, "train_speed(iter/s)": 0.670533 }, { "epoch": 1.4534510089542008, "grad_norm": 4.6449875831604, "learning_rate": 8.05745267691308e-05, "loss": 2.392522430419922, "memory(GiB)": 72.85, "step": 33925, "token_acc": 0.5, "train_speed(iter/s)": 0.67055 }, { "epoch": 1.4536652242834498, "grad_norm": 4.089311122894287, "learning_rate": 8.056920154753224e-05, "loss": 2.3290258407592774, "memory(GiB)": 72.85, "step": 33930, "token_acc": 0.4828897338403042, "train_speed(iter/s)": 0.670544 }, { "epoch": 1.4538794396126986, "grad_norm": 4.182172775268555, "learning_rate": 8.056387577214391e-05, "loss": 2.2201984405517576, "memory(GiB)": 72.85, "step": 33935, "token_acc": 0.5224913494809689, "train_speed(iter/s)": 0.670535 }, { "epoch": 1.4540936549419476, "grad_norm": 4.046298503875732, "learning_rate": 8.055854944306232e-05, "loss": 2.386513328552246, "memory(GiB)": 72.85, "step": 33940, "token_acc": 0.5236220472440944, "train_speed(iter/s)": 0.670528 }, { "epoch": 1.4543078702711967, "grad_norm": 3.7009356021881104, "learning_rate": 8.055322256038393e-05, "loss": 2.56221866607666, "memory(GiB)": 72.85, "step": 33945, "token_acc": 0.45787545787545786, "train_speed(iter/s)": 0.670541 }, { "epoch": 1.4545220856004455, "grad_norm": 5.945263862609863, "learning_rate": 8.054789512420524e-05, "loss": 2.756915283203125, "memory(GiB)": 72.85, "step": 33950, "token_acc": 0.45738636363636365, "train_speed(iter/s)": 0.670569 }, { "epoch": 1.4547363009296945, "grad_norm": 3.993832588195801, "learning_rate": 8.054256713462278e-05, "loss": 2.387126350402832, "memory(GiB)": 72.85, "step": 33955, "token_acc": 0.4865771812080537, "train_speed(iter/s)": 0.670579 }, { "epoch": 1.4549505162589436, "grad_norm": 3.728430986404419, "learning_rate": 8.053723859173307e-05, "loss": 2.216624069213867, "memory(GiB)": 72.85, "step": 33960, "token_acc": 0.5406360424028268, "train_speed(iter/s)": 0.670585 }, { "epoch": 1.4551647315881924, "grad_norm": 4.238762855529785, "learning_rate": 8.053190949563265e-05, "loss": 2.381273651123047, "memory(GiB)": 72.85, "step": 33965, "token_acc": 0.5181159420289855, "train_speed(iter/s)": 0.670586 }, { "epoch": 1.4553789469174414, "grad_norm": 4.805182456970215, "learning_rate": 8.052657984641803e-05, "loss": 2.6945478439331056, "memory(GiB)": 72.85, "step": 33970, "token_acc": 0.48024316109422494, "train_speed(iter/s)": 0.670567 }, { "epoch": 1.4555931622466904, "grad_norm": 3.565286874771118, "learning_rate": 8.052124964418579e-05, "loss": 2.5219594955444338, "memory(GiB)": 72.85, "step": 33975, "token_acc": 0.4588607594936709, "train_speed(iter/s)": 0.670546 }, { "epoch": 1.4558073775759393, "grad_norm": 3.8112916946411133, "learning_rate": 8.051591888903247e-05, "loss": 2.3962387084960937, "memory(GiB)": 72.85, "step": 33980, "token_acc": 0.5, "train_speed(iter/s)": 0.670526 }, { "epoch": 1.4560215929051883, "grad_norm": 3.8315742015838623, "learning_rate": 8.051058758105467e-05, "loss": 2.323067474365234, "memory(GiB)": 72.85, "step": 33985, "token_acc": 0.47878787878787876, "train_speed(iter/s)": 0.670538 }, { "epoch": 1.4562358082344373, "grad_norm": 4.262530326843262, "learning_rate": 8.050525572034892e-05, "loss": 2.7979644775390624, "memory(GiB)": 72.85, "step": 33990, "token_acc": 0.4, "train_speed(iter/s)": 0.670536 }, { "epoch": 1.4564500235636861, "grad_norm": 3.2615950107574463, "learning_rate": 8.049992330701188e-05, "loss": 2.42089900970459, "memory(GiB)": 72.85, "step": 33995, "token_acc": 0.47674418604651164, "train_speed(iter/s)": 0.670533 }, { "epoch": 1.4566642388929352, "grad_norm": 3.503493547439575, "learning_rate": 8.049459034114011e-05, "loss": 2.2047140121459963, "memory(GiB)": 72.85, "step": 34000, "token_acc": 0.4966216216216216, "train_speed(iter/s)": 0.670489 }, { "epoch": 1.4566642388929352, "eval_loss": 2.0516066551208496, "eval_runtime": 16.0868, "eval_samples_per_second": 6.216, "eval_steps_per_second": 6.216, "eval_token_acc": 0.46802721088435373, "step": 34000 }, { "epoch": 1.4568784542221842, "grad_norm": 2.874521255493164, "learning_rate": 8.048925682283022e-05, "loss": 2.189488983154297, "memory(GiB)": 72.85, "step": 34005, "token_acc": 0.4913461538461538, "train_speed(iter/s)": 0.670261 }, { "epoch": 1.457092669551433, "grad_norm": 4.453402519226074, "learning_rate": 8.048392275217886e-05, "loss": 2.3175662994384765, "memory(GiB)": 72.85, "step": 34010, "token_acc": 0.5085910652920962, "train_speed(iter/s)": 0.670269 }, { "epoch": 1.457306884880682, "grad_norm": 3.454986572265625, "learning_rate": 8.047858812928264e-05, "loss": 2.3100446701049804, "memory(GiB)": 72.85, "step": 34015, "token_acc": 0.5060975609756098, "train_speed(iter/s)": 0.670296 }, { "epoch": 1.457521100209931, "grad_norm": 3.5302233695983887, "learning_rate": 8.04732529542382e-05, "loss": 2.4999773025512697, "memory(GiB)": 72.85, "step": 34020, "token_acc": 0.4935897435897436, "train_speed(iter/s)": 0.670304 }, { "epoch": 1.4577353155391801, "grad_norm": 4.0040283203125, "learning_rate": 8.046791722714218e-05, "loss": 2.5200403213500975, "memory(GiB)": 72.85, "step": 34025, "token_acc": 0.5155038759689923, "train_speed(iter/s)": 0.67032 }, { "epoch": 1.457949530868429, "grad_norm": 4.459872245788574, "learning_rate": 8.046258094809127e-05, "loss": 2.577519416809082, "memory(GiB)": 72.85, "step": 34030, "token_acc": 0.4575342465753425, "train_speed(iter/s)": 0.670336 }, { "epoch": 1.458163746197678, "grad_norm": 6.176324844360352, "learning_rate": 8.045724411718214e-05, "loss": 2.1823474884033205, "memory(GiB)": 72.85, "step": 34035, "token_acc": 0.5348837209302325, "train_speed(iter/s)": 0.670319 }, { "epoch": 1.458377961526927, "grad_norm": 3.681763172149658, "learning_rate": 8.045190673451146e-05, "loss": 2.1676950454711914, "memory(GiB)": 72.85, "step": 34040, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.670314 }, { "epoch": 1.4585921768561758, "grad_norm": 4.385598659515381, "learning_rate": 8.044656880017591e-05, "loss": 2.476145553588867, "memory(GiB)": 72.85, "step": 34045, "token_acc": 0.4423791821561338, "train_speed(iter/s)": 0.670316 }, { "epoch": 1.4588063921854248, "grad_norm": 4.203603267669678, "learning_rate": 8.044123031427222e-05, "loss": 2.4232643127441404, "memory(GiB)": 72.85, "step": 34050, "token_acc": 0.4724137931034483, "train_speed(iter/s)": 0.67033 }, { "epoch": 1.4590206075146739, "grad_norm": 3.2107186317443848, "learning_rate": 8.043589127689706e-05, "loss": 2.1414922714233398, "memory(GiB)": 72.85, "step": 34055, "token_acc": 0.5346153846153846, "train_speed(iter/s)": 0.670335 }, { "epoch": 1.4592348228439227, "grad_norm": 5.618509769439697, "learning_rate": 8.043055168814722e-05, "loss": 2.142414093017578, "memory(GiB)": 72.85, "step": 34060, "token_acc": 0.532608695652174, "train_speed(iter/s)": 0.670342 }, { "epoch": 1.4594490381731717, "grad_norm": 4.256384372711182, "learning_rate": 8.042521154811934e-05, "loss": 2.431450080871582, "memory(GiB)": 72.85, "step": 34065, "token_acc": 0.4581818181818182, "train_speed(iter/s)": 0.670354 }, { "epoch": 1.4596632535024208, "grad_norm": 3.6916863918304443, "learning_rate": 8.041987085691026e-05, "loss": 2.2531917572021483, "memory(GiB)": 72.85, "step": 34070, "token_acc": 0.4894366197183099, "train_speed(iter/s)": 0.670377 }, { "epoch": 1.4598774688316696, "grad_norm": 3.432591676712036, "learning_rate": 8.041452961461665e-05, "loss": 2.4320438385009764, "memory(GiB)": 72.85, "step": 34075, "token_acc": 0.5, "train_speed(iter/s)": 0.67037 }, { "epoch": 1.4600916841609186, "grad_norm": 4.184640884399414, "learning_rate": 8.040918782133532e-05, "loss": 2.583971405029297, "memory(GiB)": 72.85, "step": 34080, "token_acc": 0.5113636363636364, "train_speed(iter/s)": 0.670358 }, { "epoch": 1.4603058994901676, "grad_norm": 4.599289894104004, "learning_rate": 8.040384547716302e-05, "loss": 2.132400703430176, "memory(GiB)": 72.85, "step": 34085, "token_acc": 0.541795665634675, "train_speed(iter/s)": 0.67035 }, { "epoch": 1.4605201148194165, "grad_norm": 4.473696231842041, "learning_rate": 8.039850258219655e-05, "loss": 2.390949249267578, "memory(GiB)": 72.85, "step": 34090, "token_acc": 0.5171232876712328, "train_speed(iter/s)": 0.670344 }, { "epoch": 1.4607343301486655, "grad_norm": 5.313993453979492, "learning_rate": 8.039315913653267e-05, "loss": 2.165274429321289, "memory(GiB)": 72.85, "step": 34095, "token_acc": 0.48760330578512395, "train_speed(iter/s)": 0.670356 }, { "epoch": 1.4609485454779145, "grad_norm": 3.81154727935791, "learning_rate": 8.03878151402682e-05, "loss": 2.3118564605712892, "memory(GiB)": 72.85, "step": 34100, "token_acc": 0.47686832740213525, "train_speed(iter/s)": 0.670362 }, { "epoch": 1.4611627608071633, "grad_norm": 4.538435459136963, "learning_rate": 8.038247059349998e-05, "loss": 2.3770151138305664, "memory(GiB)": 72.85, "step": 34105, "token_acc": 0.5019455252918288, "train_speed(iter/s)": 0.670369 }, { "epoch": 1.4613769761364124, "grad_norm": 3.857401132583618, "learning_rate": 8.037712549632479e-05, "loss": 2.3073043823242188, "memory(GiB)": 72.85, "step": 34110, "token_acc": 0.5160256410256411, "train_speed(iter/s)": 0.670366 }, { "epoch": 1.4615911914656614, "grad_norm": 3.9323365688323975, "learning_rate": 8.037177984883948e-05, "loss": 2.189548873901367, "memory(GiB)": 72.85, "step": 34115, "token_acc": 0.49117647058823527, "train_speed(iter/s)": 0.670387 }, { "epoch": 1.4618054067949102, "grad_norm": 4.089108943939209, "learning_rate": 8.036643365114087e-05, "loss": 2.1356813430786135, "memory(GiB)": 72.85, "step": 34120, "token_acc": 0.583969465648855, "train_speed(iter/s)": 0.670394 }, { "epoch": 1.4620196221241593, "grad_norm": 4.835240840911865, "learning_rate": 8.036108690332584e-05, "loss": 2.188472366333008, "memory(GiB)": 72.85, "step": 34125, "token_acc": 0.5121951219512195, "train_speed(iter/s)": 0.670402 }, { "epoch": 1.4622338374534083, "grad_norm": 3.847472667694092, "learning_rate": 8.035573960549126e-05, "loss": 2.4694902420043947, "memory(GiB)": 72.85, "step": 34130, "token_acc": 0.4411764705882353, "train_speed(iter/s)": 0.670394 }, { "epoch": 1.462448052782657, "grad_norm": 4.1768574714660645, "learning_rate": 8.035039175773393e-05, "loss": 2.5365421295166017, "memory(GiB)": 72.85, "step": 34135, "token_acc": 0.4743935309973046, "train_speed(iter/s)": 0.670377 }, { "epoch": 1.4626622681119061, "grad_norm": 3.9942739009857178, "learning_rate": 8.034504336015083e-05, "loss": 2.499897575378418, "memory(GiB)": 72.85, "step": 34140, "token_acc": 0.4883720930232558, "train_speed(iter/s)": 0.670384 }, { "epoch": 1.4628764834411552, "grad_norm": 9.458495140075684, "learning_rate": 8.033969441283879e-05, "loss": 2.2584962844848633, "memory(GiB)": 72.85, "step": 34145, "token_acc": 0.4895833333333333, "train_speed(iter/s)": 0.670393 }, { "epoch": 1.463090698770404, "grad_norm": 3.975297689437866, "learning_rate": 8.033434491589471e-05, "loss": 2.497118377685547, "memory(GiB)": 72.85, "step": 34150, "token_acc": 0.48013245033112584, "train_speed(iter/s)": 0.670411 }, { "epoch": 1.463304914099653, "grad_norm": 4.6571946144104, "learning_rate": 8.032899486941554e-05, "loss": 2.7803133010864256, "memory(GiB)": 72.85, "step": 34155, "token_acc": 0.4478114478114478, "train_speed(iter/s)": 0.670404 }, { "epoch": 1.463519129428902, "grad_norm": 4.610515117645264, "learning_rate": 8.032364427349817e-05, "loss": 2.5964351654052735, "memory(GiB)": 72.85, "step": 34160, "token_acc": 0.4563106796116505, "train_speed(iter/s)": 0.670403 }, { "epoch": 1.4637333447581509, "grad_norm": 4.0329179763793945, "learning_rate": 8.031829312823954e-05, "loss": 2.4525705337524415, "memory(GiB)": 72.85, "step": 34165, "token_acc": 0.49280575539568344, "train_speed(iter/s)": 0.670382 }, { "epoch": 1.4639475600874, "grad_norm": 3.5361311435699463, "learning_rate": 8.031294143373656e-05, "loss": 2.2601062774658205, "memory(GiB)": 72.85, "step": 34170, "token_acc": 0.5, "train_speed(iter/s)": 0.670356 }, { "epoch": 1.464161775416649, "grad_norm": 4.382315635681152, "learning_rate": 8.030758919008623e-05, "loss": 2.5032453536987305, "memory(GiB)": 72.85, "step": 34175, "token_acc": 0.4961832061068702, "train_speed(iter/s)": 0.670362 }, { "epoch": 1.4643759907458977, "grad_norm": 3.3424880504608154, "learning_rate": 8.03022363973855e-05, "loss": 2.344425582885742, "memory(GiB)": 72.85, "step": 34180, "token_acc": 0.5183946488294314, "train_speed(iter/s)": 0.670381 }, { "epoch": 1.4645902060751468, "grad_norm": 3.6335318088531494, "learning_rate": 8.029688305573133e-05, "loss": 2.5005929946899412, "memory(GiB)": 72.85, "step": 34185, "token_acc": 0.49146757679180886, "train_speed(iter/s)": 0.670387 }, { "epoch": 1.4648044214043958, "grad_norm": 3.6398885250091553, "learning_rate": 8.02915291652207e-05, "loss": 2.291584587097168, "memory(GiB)": 72.85, "step": 34190, "token_acc": 0.4962686567164179, "train_speed(iter/s)": 0.670381 }, { "epoch": 1.4650186367336446, "grad_norm": 3.726095199584961, "learning_rate": 8.02861747259506e-05, "loss": 2.267990493774414, "memory(GiB)": 72.85, "step": 34195, "token_acc": 0.48028673835125446, "train_speed(iter/s)": 0.670375 }, { "epoch": 1.4652328520628937, "grad_norm": 3.760996103286743, "learning_rate": 8.028081973801802e-05, "loss": 2.1578594207763673, "memory(GiB)": 72.85, "step": 34200, "token_acc": 0.5285171102661597, "train_speed(iter/s)": 0.670368 }, { "epoch": 1.4654470673921427, "grad_norm": 4.093011379241943, "learning_rate": 8.027546420152001e-05, "loss": 2.243631362915039, "memory(GiB)": 72.85, "step": 34205, "token_acc": 0.5214521452145214, "train_speed(iter/s)": 0.670359 }, { "epoch": 1.4656612827213915, "grad_norm": 4.537707328796387, "learning_rate": 8.027010811655354e-05, "loss": 2.3188945770263674, "memory(GiB)": 72.85, "step": 34210, "token_acc": 0.5146443514644351, "train_speed(iter/s)": 0.670352 }, { "epoch": 1.4658754980506405, "grad_norm": 4.688474655151367, "learning_rate": 8.026475148321568e-05, "loss": 2.1644571304321287, "memory(GiB)": 72.85, "step": 34215, "token_acc": 0.48184818481848185, "train_speed(iter/s)": 0.670347 }, { "epoch": 1.4660897133798896, "grad_norm": 4.493636131286621, "learning_rate": 8.025939430160346e-05, "loss": 2.1894412994384767, "memory(GiB)": 72.85, "step": 34220, "token_acc": 0.498220640569395, "train_speed(iter/s)": 0.67033 }, { "epoch": 1.4663039287091384, "grad_norm": 3.851217746734619, "learning_rate": 8.025403657181393e-05, "loss": 2.2428329467773436, "memory(GiB)": 72.85, "step": 34225, "token_acc": 0.5387453874538746, "train_speed(iter/s)": 0.67034 }, { "epoch": 1.4665181440383874, "grad_norm": 5.323233127593994, "learning_rate": 8.024867829394413e-05, "loss": 2.220548629760742, "memory(GiB)": 72.85, "step": 34230, "token_acc": 0.5175097276264592, "train_speed(iter/s)": 0.670372 }, { "epoch": 1.4667323593676365, "grad_norm": 6.2843852043151855, "learning_rate": 8.024331946809117e-05, "loss": 2.3531112670898438, "memory(GiB)": 72.85, "step": 34235, "token_acc": 0.5409252669039146, "train_speed(iter/s)": 0.67038 }, { "epoch": 1.4669465746968853, "grad_norm": 5.820172309875488, "learning_rate": 8.02379600943521e-05, "loss": 2.1198734283447265, "memory(GiB)": 72.85, "step": 34240, "token_acc": 0.5349794238683128, "train_speed(iter/s)": 0.670388 }, { "epoch": 1.4671607900261343, "grad_norm": 4.397724151611328, "learning_rate": 8.023260017282401e-05, "loss": 2.3439441680908204, "memory(GiB)": 72.85, "step": 34245, "token_acc": 0.449438202247191, "train_speed(iter/s)": 0.670396 }, { "epoch": 1.4673750053553833, "grad_norm": 5.04194450378418, "learning_rate": 8.0227239703604e-05, "loss": 2.3244930267333985, "memory(GiB)": 72.85, "step": 34250, "token_acc": 0.5016835016835017, "train_speed(iter/s)": 0.670404 }, { "epoch": 1.4675892206846322, "grad_norm": 4.868307113647461, "learning_rate": 8.022187868678921e-05, "loss": 2.357381057739258, "memory(GiB)": 72.85, "step": 34255, "token_acc": 0.48314606741573035, "train_speed(iter/s)": 0.670418 }, { "epoch": 1.4678034360138812, "grad_norm": 4.375193119049072, "learning_rate": 8.021651712247671e-05, "loss": 2.2084476470947267, "memory(GiB)": 72.85, "step": 34260, "token_acc": 0.525096525096525, "train_speed(iter/s)": 0.670421 }, { "epoch": 1.4680176513431302, "grad_norm": 3.3241875171661377, "learning_rate": 8.021115501076369e-05, "loss": 2.162697982788086, "memory(GiB)": 72.85, "step": 34265, "token_acc": 0.5079787234042553, "train_speed(iter/s)": 0.670432 }, { "epoch": 1.468231866672379, "grad_norm": 3.8296704292297363, "learning_rate": 8.020579235174722e-05, "loss": 2.2682821273803713, "memory(GiB)": 72.85, "step": 34270, "token_acc": 0.5015479876160991, "train_speed(iter/s)": 0.670455 }, { "epoch": 1.468446082001628, "grad_norm": 4.878271102905273, "learning_rate": 8.020042914552451e-05, "loss": 2.3239044189453124, "memory(GiB)": 72.85, "step": 34275, "token_acc": 0.5117845117845118, "train_speed(iter/s)": 0.670457 }, { "epoch": 1.468660297330877, "grad_norm": 4.545825958251953, "learning_rate": 8.019506539219269e-05, "loss": 2.256613349914551, "memory(GiB)": 72.85, "step": 34280, "token_acc": 0.5102040816326531, "train_speed(iter/s)": 0.670475 }, { "epoch": 1.468874512660126, "grad_norm": 3.837064027786255, "learning_rate": 8.018970109184894e-05, "loss": 2.409763526916504, "memory(GiB)": 72.85, "step": 34285, "token_acc": 0.4778481012658228, "train_speed(iter/s)": 0.670458 }, { "epoch": 1.469088727989375, "grad_norm": 3.549020767211914, "learning_rate": 8.018433624459042e-05, "loss": 2.1119712829589843, "memory(GiB)": 72.85, "step": 34290, "token_acc": 0.5572289156626506, "train_speed(iter/s)": 0.670469 }, { "epoch": 1.469302943318624, "grad_norm": 4.962244510650635, "learning_rate": 8.017897085051435e-05, "loss": 2.7582324981689452, "memory(GiB)": 72.85, "step": 34295, "token_acc": 0.42857142857142855, "train_speed(iter/s)": 0.670483 }, { "epoch": 1.4695171586478728, "grad_norm": 6.32048225402832, "learning_rate": 8.01736049097179e-05, "loss": 2.307154083251953, "memory(GiB)": 72.85, "step": 34300, "token_acc": 0.5211267605633803, "train_speed(iter/s)": 0.670481 }, { "epoch": 1.4697313739771218, "grad_norm": 4.90799617767334, "learning_rate": 8.01682384222983e-05, "loss": 2.2045623779296877, "memory(GiB)": 72.85, "step": 34305, "token_acc": 0.5117056856187291, "train_speed(iter/s)": 0.670493 }, { "epoch": 1.4699455893063709, "grad_norm": 4.818366050720215, "learning_rate": 8.016287138835277e-05, "loss": 2.510153579711914, "memory(GiB)": 72.85, "step": 34310, "token_acc": 0.46808510638297873, "train_speed(iter/s)": 0.670516 }, { "epoch": 1.4701598046356197, "grad_norm": 3.7846174240112305, "learning_rate": 8.01575038079785e-05, "loss": 2.548066329956055, "memory(GiB)": 72.85, "step": 34315, "token_acc": 0.4786885245901639, "train_speed(iter/s)": 0.670521 }, { "epoch": 1.4703740199648687, "grad_norm": 5.704685211181641, "learning_rate": 8.015213568127278e-05, "loss": 2.5178293228149413, "memory(GiB)": 72.85, "step": 34320, "token_acc": 0.4819672131147541, "train_speed(iter/s)": 0.670545 }, { "epoch": 1.4705882352941178, "grad_norm": 3.391746759414673, "learning_rate": 8.014676700833284e-05, "loss": 2.272053527832031, "memory(GiB)": 72.85, "step": 34325, "token_acc": 0.484375, "train_speed(iter/s)": 0.670573 }, { "epoch": 1.4708024506233666, "grad_norm": 6.201812267303467, "learning_rate": 8.014139778925593e-05, "loss": 2.372295379638672, "memory(GiB)": 72.85, "step": 34330, "token_acc": 0.4517241379310345, "train_speed(iter/s)": 0.670592 }, { "epoch": 1.4710166659526156, "grad_norm": 4.0191450119018555, "learning_rate": 8.013602802413932e-05, "loss": 2.4766956329345704, "memory(GiB)": 72.85, "step": 34335, "token_acc": 0.43573667711598746, "train_speed(iter/s)": 0.670577 }, { "epoch": 1.4712308812818646, "grad_norm": 3.8286538124084473, "learning_rate": 8.013065771308031e-05, "loss": 2.392102813720703, "memory(GiB)": 72.85, "step": 34340, "token_acc": 0.4681647940074906, "train_speed(iter/s)": 0.670571 }, { "epoch": 1.4714450966111134, "grad_norm": 3.986523389816284, "learning_rate": 8.012528685617615e-05, "loss": 2.4802082061767576, "memory(GiB)": 72.85, "step": 34345, "token_acc": 0.4709897610921502, "train_speed(iter/s)": 0.670577 }, { "epoch": 1.4716593119403625, "grad_norm": 5.915815830230713, "learning_rate": 8.011991545352415e-05, "loss": 2.302387237548828, "memory(GiB)": 72.85, "step": 34350, "token_acc": 0.47572815533980584, "train_speed(iter/s)": 0.67056 }, { "epoch": 1.4718735272696115, "grad_norm": 4.340681076049805, "learning_rate": 8.011454350522165e-05, "loss": 2.4389535903930666, "memory(GiB)": 72.85, "step": 34355, "token_acc": 0.4982456140350877, "train_speed(iter/s)": 0.670573 }, { "epoch": 1.4720877425988603, "grad_norm": 6.475215911865234, "learning_rate": 8.010917101136593e-05, "loss": 2.3902149200439453, "memory(GiB)": 72.85, "step": 34360, "token_acc": 0.4606741573033708, "train_speed(iter/s)": 0.670573 }, { "epoch": 1.4723019579281094, "grad_norm": 3.6003639698028564, "learning_rate": 8.010379797205433e-05, "loss": 2.3891998291015626, "memory(GiB)": 72.85, "step": 34365, "token_acc": 0.5065359477124183, "train_speed(iter/s)": 0.67058 }, { "epoch": 1.4725161732573584, "grad_norm": 4.625465393066406, "learning_rate": 8.009842438738422e-05, "loss": 2.3448617935180662, "memory(GiB)": 72.85, "step": 34370, "token_acc": 0.555921052631579, "train_speed(iter/s)": 0.670573 }, { "epoch": 1.4727303885866072, "grad_norm": 5.430233001708984, "learning_rate": 8.00930502574529e-05, "loss": 2.0171247482299806, "memory(GiB)": 72.85, "step": 34375, "token_acc": 0.5778546712802768, "train_speed(iter/s)": 0.670578 }, { "epoch": 1.4729446039158562, "grad_norm": 4.155650615692139, "learning_rate": 8.008767558235775e-05, "loss": 2.3294464111328126, "memory(GiB)": 72.85, "step": 34380, "token_acc": 0.5358490566037736, "train_speed(iter/s)": 0.670599 }, { "epoch": 1.4731588192451053, "grad_norm": 5.693789005279541, "learning_rate": 8.00823003621961e-05, "loss": 2.1823768615722656, "memory(GiB)": 72.85, "step": 34385, "token_acc": 0.545816733067729, "train_speed(iter/s)": 0.670574 }, { "epoch": 1.473373034574354, "grad_norm": 4.023550987243652, "learning_rate": 8.00769245970654e-05, "loss": 2.16021785736084, "memory(GiB)": 72.85, "step": 34390, "token_acc": 0.5319148936170213, "train_speed(iter/s)": 0.67058 }, { "epoch": 1.4735872499036031, "grad_norm": 3.6162843704223633, "learning_rate": 8.007154828706298e-05, "loss": 2.629109573364258, "memory(GiB)": 72.85, "step": 34395, "token_acc": 0.45454545454545453, "train_speed(iter/s)": 0.670556 }, { "epoch": 1.4738014652328522, "grad_norm": 5.66105318069458, "learning_rate": 8.006617143228626e-05, "loss": 2.274684715270996, "memory(GiB)": 72.85, "step": 34400, "token_acc": 0.4868913857677903, "train_speed(iter/s)": 0.670551 }, { "epoch": 1.474015680562101, "grad_norm": 4.293038368225098, "learning_rate": 8.006079403283262e-05, "loss": 2.4395269393920898, "memory(GiB)": 72.85, "step": 34405, "token_acc": 0.49477351916376305, "train_speed(iter/s)": 0.670539 }, { "epoch": 1.47422989589135, "grad_norm": 5.343498706817627, "learning_rate": 8.005541608879952e-05, "loss": 2.4431198120117186, "memory(GiB)": 72.85, "step": 34410, "token_acc": 0.4823008849557522, "train_speed(iter/s)": 0.670537 }, { "epoch": 1.474444111220599, "grad_norm": 4.367417335510254, "learning_rate": 8.005003760028436e-05, "loss": 2.3323211669921875, "memory(GiB)": 72.85, "step": 34415, "token_acc": 0.49508196721311476, "train_speed(iter/s)": 0.670529 }, { "epoch": 1.4746583265498479, "grad_norm": 4.941127777099609, "learning_rate": 8.004465856738457e-05, "loss": 2.7926864624023438, "memory(GiB)": 72.85, "step": 34420, "token_acc": 0.4570446735395189, "train_speed(iter/s)": 0.670532 }, { "epoch": 1.4748725418790969, "grad_norm": 3.7846224308013916, "learning_rate": 8.003927899019761e-05, "loss": 2.440199279785156, "memory(GiB)": 72.85, "step": 34425, "token_acc": 0.48299319727891155, "train_speed(iter/s)": 0.670548 }, { "epoch": 1.475086757208346, "grad_norm": 4.8972063064575195, "learning_rate": 8.003389886882094e-05, "loss": 2.3156070709228516, "memory(GiB)": 72.85, "step": 34430, "token_acc": 0.4843205574912892, "train_speed(iter/s)": 0.670532 }, { "epoch": 1.4753009725375947, "grad_norm": 4.374034881591797, "learning_rate": 8.0028518203352e-05, "loss": 2.673414611816406, "memory(GiB)": 72.85, "step": 34435, "token_acc": 0.47586206896551725, "train_speed(iter/s)": 0.670548 }, { "epoch": 1.4755151878668438, "grad_norm": 5.629209041595459, "learning_rate": 8.002313699388828e-05, "loss": 2.3815982818603514, "memory(GiB)": 72.85, "step": 34440, "token_acc": 0.4823943661971831, "train_speed(iter/s)": 0.670548 }, { "epoch": 1.4757294031960928, "grad_norm": 3.772618532180786, "learning_rate": 8.001775524052729e-05, "loss": 2.2022769927978514, "memory(GiB)": 72.85, "step": 34445, "token_acc": 0.5, "train_speed(iter/s)": 0.670549 }, { "epoch": 1.4759436185253416, "grad_norm": 3.620013475418091, "learning_rate": 8.00123729433665e-05, "loss": 2.402755928039551, "memory(GiB)": 72.85, "step": 34450, "token_acc": 0.5150602409638554, "train_speed(iter/s)": 0.670552 }, { "epoch": 1.4761578338545907, "grad_norm": 3.911278247833252, "learning_rate": 8.000699010250342e-05, "loss": 2.3888727188110352, "memory(GiB)": 72.85, "step": 34455, "token_acc": 0.48363636363636364, "train_speed(iter/s)": 0.67054 }, { "epoch": 1.4763720491838397, "grad_norm": 3.545527696609497, "learning_rate": 8.000160671803556e-05, "loss": 2.350167083740234, "memory(GiB)": 72.85, "step": 34460, "token_acc": 0.47750865051903113, "train_speed(iter/s)": 0.670525 }, { "epoch": 1.4765862645130885, "grad_norm": 4.044427871704102, "learning_rate": 7.999622279006046e-05, "loss": 2.4986316680908205, "memory(GiB)": 72.85, "step": 34465, "token_acc": 0.4713804713804714, "train_speed(iter/s)": 0.670513 }, { "epoch": 1.4768004798423375, "grad_norm": 4.448097229003906, "learning_rate": 7.999083831867563e-05, "loss": 2.500925827026367, "memory(GiB)": 72.85, "step": 34470, "token_acc": 0.46757679180887374, "train_speed(iter/s)": 0.670525 }, { "epoch": 1.4770146951715866, "grad_norm": 4.992995262145996, "learning_rate": 7.998545330397864e-05, "loss": 2.4152908325195312, "memory(GiB)": 72.85, "step": 34475, "token_acc": 0.4827586206896552, "train_speed(iter/s)": 0.670515 }, { "epoch": 1.4772289105008354, "grad_norm": 3.5936636924743652, "learning_rate": 7.998006774606703e-05, "loss": 2.6100790023803713, "memory(GiB)": 72.85, "step": 34480, "token_acc": 0.4173441734417344, "train_speed(iter/s)": 0.670519 }, { "epoch": 1.4774431258300844, "grad_norm": 3.518786668777466, "learning_rate": 7.997468164503839e-05, "loss": 2.2574525833129884, "memory(GiB)": 72.85, "step": 34485, "token_acc": 0.5031055900621118, "train_speed(iter/s)": 0.670538 }, { "epoch": 1.4776573411593334, "grad_norm": 4.832996845245361, "learning_rate": 7.996929500099025e-05, "loss": 2.7567184448242186, "memory(GiB)": 72.85, "step": 34490, "token_acc": 0.4797047970479705, "train_speed(iter/s)": 0.670551 }, { "epoch": 1.4778715564885823, "grad_norm": 3.6369729042053223, "learning_rate": 7.996390781402024e-05, "loss": 2.3751190185546873, "memory(GiB)": 72.85, "step": 34495, "token_acc": 0.5229007633587787, "train_speed(iter/s)": 0.670564 }, { "epoch": 1.4780857718178313, "grad_norm": 3.87239146232605, "learning_rate": 7.995852008422591e-05, "loss": 2.216882514953613, "memory(GiB)": 72.85, "step": 34500, "token_acc": 0.5128205128205128, "train_speed(iter/s)": 0.670539 }, { "epoch": 1.4780857718178313, "eval_loss": 2.1520586013793945, "eval_runtime": 16.73, "eval_samples_per_second": 5.977, "eval_steps_per_second": 5.977, "eval_token_acc": 0.47956403269754766, "step": 34500 }, { "epoch": 1.4782999871470803, "grad_norm": 4.611786365509033, "learning_rate": 7.995313181170487e-05, "loss": 2.1614334106445314, "memory(GiB)": 72.85, "step": 34505, "token_acc": 0.4856577645895153, "train_speed(iter/s)": 0.670307 }, { "epoch": 1.4785142024763291, "grad_norm": 5.907651424407959, "learning_rate": 7.994774299655478e-05, "loss": 2.2986476898193358, "memory(GiB)": 72.85, "step": 34510, "token_acc": 0.4981132075471698, "train_speed(iter/s)": 0.670312 }, { "epoch": 1.4787284178055782, "grad_norm": 4.536204814910889, "learning_rate": 7.994235363887322e-05, "loss": 2.3426910400390626, "memory(GiB)": 72.85, "step": 34515, "token_acc": 0.49050632911392406, "train_speed(iter/s)": 0.670323 }, { "epoch": 1.4789426331348272, "grad_norm": 3.9450230598449707, "learning_rate": 7.993696373875784e-05, "loss": 2.2397138595581056, "memory(GiB)": 72.85, "step": 34520, "token_acc": 0.51875, "train_speed(iter/s)": 0.670333 }, { "epoch": 1.479156848464076, "grad_norm": 4.793577671051025, "learning_rate": 7.993157329630628e-05, "loss": 2.204816436767578, "memory(GiB)": 72.85, "step": 34525, "token_acc": 0.5253623188405797, "train_speed(iter/s)": 0.670352 }, { "epoch": 1.479371063793325, "grad_norm": 4.756554126739502, "learning_rate": 7.99261823116162e-05, "loss": 2.3464895248413087, "memory(GiB)": 72.85, "step": 34530, "token_acc": 0.463768115942029, "train_speed(iter/s)": 0.670321 }, { "epoch": 1.479585279122574, "grad_norm": 4.720856666564941, "learning_rate": 7.992079078478524e-05, "loss": 2.443126106262207, "memory(GiB)": 72.85, "step": 34535, "token_acc": 0.5180327868852459, "train_speed(iter/s)": 0.670309 }, { "epoch": 1.479799494451823, "grad_norm": 4.040621757507324, "learning_rate": 7.99153987159111e-05, "loss": 2.388212966918945, "memory(GiB)": 72.85, "step": 34540, "token_acc": 0.484149855907781, "train_speed(iter/s)": 0.670312 }, { "epoch": 1.480013709781072, "grad_norm": 3.967686891555786, "learning_rate": 7.991000610509143e-05, "loss": 2.4181385040283203, "memory(GiB)": 72.85, "step": 34545, "token_acc": 0.45925925925925926, "train_speed(iter/s)": 0.670317 }, { "epoch": 1.480227925110321, "grad_norm": 4.3102288246154785, "learning_rate": 7.990461295242394e-05, "loss": 2.2404882431030275, "memory(GiB)": 72.85, "step": 34550, "token_acc": 0.5093167701863354, "train_speed(iter/s)": 0.670331 }, { "epoch": 1.4804421404395698, "grad_norm": 3.952209949493408, "learning_rate": 7.989921925800634e-05, "loss": 2.426106834411621, "memory(GiB)": 72.85, "step": 34555, "token_acc": 0.48148148148148145, "train_speed(iter/s)": 0.670335 }, { "epoch": 1.4806563557688188, "grad_norm": 4.378711700439453, "learning_rate": 7.989382502193634e-05, "loss": 1.9822067260742187, "memory(GiB)": 72.85, "step": 34560, "token_acc": 0.5374592833876222, "train_speed(iter/s)": 0.670345 }, { "epoch": 1.4808705710980679, "grad_norm": 4.98108434677124, "learning_rate": 7.988843024431167e-05, "loss": 2.3837461471557617, "memory(GiB)": 72.85, "step": 34565, "token_acc": 0.49146757679180886, "train_speed(iter/s)": 0.670352 }, { "epoch": 1.4810847864273167, "grad_norm": 3.5792746543884277, "learning_rate": 7.988303492523003e-05, "loss": 2.1828725814819334, "memory(GiB)": 72.85, "step": 34570, "token_acc": 0.5054945054945055, "train_speed(iter/s)": 0.670363 }, { "epoch": 1.4812990017565657, "grad_norm": 5.5607805252075195, "learning_rate": 7.987763906478919e-05, "loss": 2.485712242126465, "memory(GiB)": 72.85, "step": 34575, "token_acc": 0.4871060171919771, "train_speed(iter/s)": 0.670374 }, { "epoch": 1.4815132170858147, "grad_norm": 5.098114013671875, "learning_rate": 7.987224266308689e-05, "loss": 2.1079593658447267, "memory(GiB)": 72.85, "step": 34580, "token_acc": 0.51953125, "train_speed(iter/s)": 0.67038 }, { "epoch": 1.4817274324150636, "grad_norm": 6.180630207061768, "learning_rate": 7.986684572022088e-05, "loss": 2.357026290893555, "memory(GiB)": 72.85, "step": 34585, "token_acc": 0.5270758122743683, "train_speed(iter/s)": 0.670373 }, { "epoch": 1.4819416477443126, "grad_norm": 3.5207414627075195, "learning_rate": 7.986144823628895e-05, "loss": 2.058099937438965, "memory(GiB)": 72.85, "step": 34590, "token_acc": 0.548951048951049, "train_speed(iter/s)": 0.670374 }, { "epoch": 1.4821558630735616, "grad_norm": 5.170985698699951, "learning_rate": 7.985605021138888e-05, "loss": 2.2314008712768554, "memory(GiB)": 72.85, "step": 34595, "token_acc": 0.5111940298507462, "train_speed(iter/s)": 0.670375 }, { "epoch": 1.4823700784028104, "grad_norm": 4.189124584197998, "learning_rate": 7.985065164561845e-05, "loss": 2.3609233856201173, "memory(GiB)": 72.85, "step": 34600, "token_acc": 0.4941860465116279, "train_speed(iter/s)": 0.670397 }, { "epoch": 1.4825842937320595, "grad_norm": 3.576601266860962, "learning_rate": 7.984525253907545e-05, "loss": 2.1471158981323244, "memory(GiB)": 72.85, "step": 34605, "token_acc": 0.5164835164835165, "train_speed(iter/s)": 0.670391 }, { "epoch": 1.4827985090613085, "grad_norm": 4.9100494384765625, "learning_rate": 7.983985289185772e-05, "loss": 2.1615278244018556, "memory(GiB)": 72.85, "step": 34610, "token_acc": 0.527027027027027, "train_speed(iter/s)": 0.670386 }, { "epoch": 1.4830127243905573, "grad_norm": 4.8589277267456055, "learning_rate": 7.983445270406305e-05, "loss": 2.565748596191406, "memory(GiB)": 72.85, "step": 34615, "token_acc": 0.4670846394984326, "train_speed(iter/s)": 0.670367 }, { "epoch": 1.4832269397198063, "grad_norm": 4.530729293823242, "learning_rate": 7.982905197578928e-05, "loss": 2.2890514373779296, "memory(GiB)": 72.85, "step": 34620, "token_acc": 0.49612403100775193, "train_speed(iter/s)": 0.670351 }, { "epoch": 1.4834411550490554, "grad_norm": 5.927967548370361, "learning_rate": 7.982365070713426e-05, "loss": 2.3673444747924806, "memory(GiB)": 72.85, "step": 34625, "token_acc": 0.4806451612903226, "train_speed(iter/s)": 0.670341 }, { "epoch": 1.4836553703783042, "grad_norm": 4.646783351898193, "learning_rate": 7.981824889819584e-05, "loss": 2.245909881591797, "memory(GiB)": 72.85, "step": 34630, "token_acc": 0.5045592705167173, "train_speed(iter/s)": 0.670358 }, { "epoch": 1.4838695857075532, "grad_norm": 4.488165378570557, "learning_rate": 7.981284654907186e-05, "loss": 2.2655576705932616, "memory(GiB)": 72.85, "step": 34635, "token_acc": 0.4755700325732899, "train_speed(iter/s)": 0.670371 }, { "epoch": 1.4840838010368023, "grad_norm": 4.178486347198486, "learning_rate": 7.98074436598602e-05, "loss": 2.3775556564331053, "memory(GiB)": 72.85, "step": 34640, "token_acc": 0.5109489051094891, "train_speed(iter/s)": 0.670376 }, { "epoch": 1.484298016366051, "grad_norm": 3.142549991607666, "learning_rate": 7.980204023065872e-05, "loss": 2.347821044921875, "memory(GiB)": 72.85, "step": 34645, "token_acc": 0.5259259259259259, "train_speed(iter/s)": 0.670391 }, { "epoch": 1.4845122316953, "grad_norm": 3.62725830078125, "learning_rate": 7.979663626156535e-05, "loss": 2.6880970001220703, "memory(GiB)": 72.85, "step": 34650, "token_acc": 0.4, "train_speed(iter/s)": 0.670383 }, { "epoch": 1.4847264470245491, "grad_norm": 3.726815700531006, "learning_rate": 7.979123175267794e-05, "loss": 2.070137786865234, "memory(GiB)": 72.85, "step": 34655, "token_acc": 0.5619047619047619, "train_speed(iter/s)": 0.670398 }, { "epoch": 1.484940662353798, "grad_norm": 4.840754985809326, "learning_rate": 7.978582670409443e-05, "loss": 2.1845605850219725, "memory(GiB)": 72.85, "step": 34660, "token_acc": 0.5367647058823529, "train_speed(iter/s)": 0.670399 }, { "epoch": 1.485154877683047, "grad_norm": 4.272642135620117, "learning_rate": 7.978042111591273e-05, "loss": 2.8005603790283202, "memory(GiB)": 72.85, "step": 34665, "token_acc": 0.4246153846153846, "train_speed(iter/s)": 0.670397 }, { "epoch": 1.485369093012296, "grad_norm": 4.1212873458862305, "learning_rate": 7.977501498823076e-05, "loss": 2.524112892150879, "memory(GiB)": 72.85, "step": 34670, "token_acc": 0.5032894736842105, "train_speed(iter/s)": 0.670377 }, { "epoch": 1.4855833083415448, "grad_norm": 3.7093000411987305, "learning_rate": 7.976960832114648e-05, "loss": 2.2242584228515625, "memory(GiB)": 72.85, "step": 34675, "token_acc": 0.5035971223021583, "train_speed(iter/s)": 0.670403 }, { "epoch": 1.4857975236707939, "grad_norm": 3.7174794673919678, "learning_rate": 7.976420111475781e-05, "loss": 2.2563735961914064, "memory(GiB)": 72.85, "step": 34680, "token_acc": 0.5099337748344371, "train_speed(iter/s)": 0.670367 }, { "epoch": 1.486011739000043, "grad_norm": 5.8151140213012695, "learning_rate": 7.975879336916272e-05, "loss": 2.3236446380615234, "memory(GiB)": 72.85, "step": 34685, "token_acc": 0.5104602510460251, "train_speed(iter/s)": 0.670344 }, { "epoch": 1.4862259543292917, "grad_norm": 4.834623336791992, "learning_rate": 7.975338508445916e-05, "loss": 2.4849695205688476, "memory(GiB)": 72.85, "step": 34690, "token_acc": 0.5122699386503068, "train_speed(iter/s)": 0.670331 }, { "epoch": 1.4864401696585408, "grad_norm": 4.757440090179443, "learning_rate": 7.97479762607451e-05, "loss": 2.1365684509277343, "memory(GiB)": 72.85, "step": 34695, "token_acc": 0.540084388185654, "train_speed(iter/s)": 0.670311 }, { "epoch": 1.4866543849877898, "grad_norm": 4.02193546295166, "learning_rate": 7.974256689811857e-05, "loss": 2.240390396118164, "memory(GiB)": 72.85, "step": 34700, "token_acc": 0.5148148148148148, "train_speed(iter/s)": 0.670306 }, { "epoch": 1.4868686003170386, "grad_norm": 5.912433624267578, "learning_rate": 7.973715699667754e-05, "loss": 2.071224403381348, "memory(GiB)": 72.85, "step": 34705, "token_acc": 0.5370370370370371, "train_speed(iter/s)": 0.670299 }, { "epoch": 1.4870828156462876, "grad_norm": 4.740879535675049, "learning_rate": 7.973174655652003e-05, "loss": 2.3413536071777346, "memory(GiB)": 72.85, "step": 34710, "token_acc": 0.4857142857142857, "train_speed(iter/s)": 0.670308 }, { "epoch": 1.4872970309755367, "grad_norm": 3.8884265422821045, "learning_rate": 7.972633557774401e-05, "loss": 2.5738739013671874, "memory(GiB)": 72.85, "step": 34715, "token_acc": 0.48757763975155277, "train_speed(iter/s)": 0.670321 }, { "epoch": 1.4875112463047855, "grad_norm": 5.008087158203125, "learning_rate": 7.972092406044756e-05, "loss": 2.5296249389648438, "memory(GiB)": 72.85, "step": 34720, "token_acc": 0.44680851063829785, "train_speed(iter/s)": 0.670372 }, { "epoch": 1.4877254616340345, "grad_norm": 4.90673828125, "learning_rate": 7.971551200472866e-05, "loss": 2.588125801086426, "memory(GiB)": 72.85, "step": 34725, "token_acc": 0.4375, "train_speed(iter/s)": 0.670379 }, { "epoch": 1.4879396769632836, "grad_norm": 4.966181755065918, "learning_rate": 7.97100994106854e-05, "loss": 2.893436050415039, "memory(GiB)": 72.85, "step": 34730, "token_acc": 0.4621212121212121, "train_speed(iter/s)": 0.670385 }, { "epoch": 1.4881538922925324, "grad_norm": 5.604063510894775, "learning_rate": 7.970468627841583e-05, "loss": 2.3102144241333007, "memory(GiB)": 72.85, "step": 34735, "token_acc": 0.5068027210884354, "train_speed(iter/s)": 0.670389 }, { "epoch": 1.4883681076217814, "grad_norm": 4.95324182510376, "learning_rate": 7.9699272608018e-05, "loss": 2.442444610595703, "memory(GiB)": 72.85, "step": 34740, "token_acc": 0.4504792332268371, "train_speed(iter/s)": 0.670409 }, { "epoch": 1.4885823229510304, "grad_norm": 6.113974094390869, "learning_rate": 7.969385839958997e-05, "loss": 2.4376235961914063, "memory(GiB)": 72.85, "step": 34745, "token_acc": 0.506896551724138, "train_speed(iter/s)": 0.670428 }, { "epoch": 1.4887965382802792, "grad_norm": 5.787266731262207, "learning_rate": 7.968844365322985e-05, "loss": 2.231883239746094, "memory(GiB)": 72.85, "step": 34750, "token_acc": 0.49356223175965663, "train_speed(iter/s)": 0.670451 }, { "epoch": 1.4890107536095283, "grad_norm": 3.2446553707122803, "learning_rate": 7.968302836903573e-05, "loss": 2.276592826843262, "memory(GiB)": 72.85, "step": 34755, "token_acc": 0.4935897435897436, "train_speed(iter/s)": 0.670461 }, { "epoch": 1.4892249689387773, "grad_norm": 3.6001129150390625, "learning_rate": 7.96776125471057e-05, "loss": 2.2590358734130858, "memory(GiB)": 72.85, "step": 34760, "token_acc": 0.5409836065573771, "train_speed(iter/s)": 0.670474 }, { "epoch": 1.4894391842680261, "grad_norm": 3.543565034866333, "learning_rate": 7.967219618753787e-05, "loss": 1.8512096405029297, "memory(GiB)": 72.85, "step": 34765, "token_acc": 0.5983263598326359, "train_speed(iter/s)": 0.67047 }, { "epoch": 1.4896533995972752, "grad_norm": 3.7638649940490723, "learning_rate": 7.966677929043038e-05, "loss": 2.261477470397949, "memory(GiB)": 72.85, "step": 34770, "token_acc": 0.5017064846416383, "train_speed(iter/s)": 0.670486 }, { "epoch": 1.4898676149265242, "grad_norm": 3.7940726280212402, "learning_rate": 7.966136185588135e-05, "loss": 2.2111377716064453, "memory(GiB)": 72.85, "step": 34775, "token_acc": 0.5295950155763239, "train_speed(iter/s)": 0.670481 }, { "epoch": 1.490081830255773, "grad_norm": 4.254322528839111, "learning_rate": 7.965594388398894e-05, "loss": 2.222972869873047, "memory(GiB)": 72.85, "step": 34780, "token_acc": 0.5256410256410257, "train_speed(iter/s)": 0.670495 }, { "epoch": 1.490296045585022, "grad_norm": 4.596476078033447, "learning_rate": 7.965052537485127e-05, "loss": 2.582352066040039, "memory(GiB)": 72.85, "step": 34785, "token_acc": 0.47547169811320755, "train_speed(iter/s)": 0.670479 }, { "epoch": 1.490510260914271, "grad_norm": 4.236804008483887, "learning_rate": 7.964510632856652e-05, "loss": 2.20364990234375, "memory(GiB)": 72.85, "step": 34790, "token_acc": 0.48736462093862815, "train_speed(iter/s)": 0.670484 }, { "epoch": 1.49072447624352, "grad_norm": 4.678492546081543, "learning_rate": 7.963968674523288e-05, "loss": 2.5842872619628907, "memory(GiB)": 72.85, "step": 34795, "token_acc": 0.43666666666666665, "train_speed(iter/s)": 0.670475 }, { "epoch": 1.490938691572769, "grad_norm": 5.873129367828369, "learning_rate": 7.963426662494848e-05, "loss": 2.2352867126464844, "memory(GiB)": 72.85, "step": 34800, "token_acc": 0.49645390070921985, "train_speed(iter/s)": 0.67047 }, { "epoch": 1.491152906902018, "grad_norm": 3.666409730911255, "learning_rate": 7.962884596781155e-05, "loss": 2.165882682800293, "memory(GiB)": 72.85, "step": 34805, "token_acc": 0.509493670886076, "train_speed(iter/s)": 0.670482 }, { "epoch": 1.4913671222312668, "grad_norm": 5.281476974487305, "learning_rate": 7.962342477392028e-05, "loss": 2.3835781097412108, "memory(GiB)": 72.85, "step": 34810, "token_acc": 0.5219123505976095, "train_speed(iter/s)": 0.670497 }, { "epoch": 1.4915813375605158, "grad_norm": 4.196275234222412, "learning_rate": 7.96180030433729e-05, "loss": 2.1691730499267576, "memory(GiB)": 72.85, "step": 34815, "token_acc": 0.5125448028673835, "train_speed(iter/s)": 0.6705 }, { "epoch": 1.4917955528897648, "grad_norm": 5.278790473937988, "learning_rate": 7.96125807762676e-05, "loss": 2.3652927398681642, "memory(GiB)": 72.85, "step": 34820, "token_acc": 0.5170278637770898, "train_speed(iter/s)": 0.670525 }, { "epoch": 1.4920097682190137, "grad_norm": 3.875662326812744, "learning_rate": 7.960715797270261e-05, "loss": 2.5875173568725587, "memory(GiB)": 72.85, "step": 34825, "token_acc": 0.4812286689419795, "train_speed(iter/s)": 0.670539 }, { "epoch": 1.4922239835482627, "grad_norm": 5.2026848793029785, "learning_rate": 7.96017346327762e-05, "loss": 2.3722856521606444, "memory(GiB)": 72.85, "step": 34830, "token_acc": 0.4944237918215613, "train_speed(iter/s)": 0.670551 }, { "epoch": 1.4924381988775117, "grad_norm": 3.625417470932007, "learning_rate": 7.959631075658658e-05, "loss": 2.0508132934570313, "memory(GiB)": 72.85, "step": 34835, "token_acc": 0.5337837837837838, "train_speed(iter/s)": 0.670541 }, { "epoch": 1.4926524142067605, "grad_norm": 4.713221549987793, "learning_rate": 7.959088634423205e-05, "loss": 2.655680274963379, "memory(GiB)": 72.85, "step": 34840, "token_acc": 0.44155844155844154, "train_speed(iter/s)": 0.670537 }, { "epoch": 1.4928666295360096, "grad_norm": 3.634777545928955, "learning_rate": 7.958546139581082e-05, "loss": 2.563966751098633, "memory(GiB)": 72.85, "step": 34845, "token_acc": 0.4809688581314879, "train_speed(iter/s)": 0.670518 }, { "epoch": 1.4930808448652586, "grad_norm": 5.202334403991699, "learning_rate": 7.958003591142122e-05, "loss": 2.332132339477539, "memory(GiB)": 72.85, "step": 34850, "token_acc": 0.47183098591549294, "train_speed(iter/s)": 0.670526 }, { "epoch": 1.4932950601945074, "grad_norm": 6.0413289070129395, "learning_rate": 7.957460989116153e-05, "loss": 2.5779037475585938, "memory(GiB)": 72.85, "step": 34855, "token_acc": 0.48253968253968255, "train_speed(iter/s)": 0.670514 }, { "epoch": 1.4935092755237565, "grad_norm": 4.8063201904296875, "learning_rate": 7.956918333513003e-05, "loss": 2.3494998931884767, "memory(GiB)": 72.85, "step": 34860, "token_acc": 0.49056603773584906, "train_speed(iter/s)": 0.670511 }, { "epoch": 1.4937234908530055, "grad_norm": 4.253718376159668, "learning_rate": 7.956375624342504e-05, "loss": 2.133171272277832, "memory(GiB)": 72.85, "step": 34865, "token_acc": 0.515748031496063, "train_speed(iter/s)": 0.670518 }, { "epoch": 1.4939377061822543, "grad_norm": 3.8896799087524414, "learning_rate": 7.955832861614487e-05, "loss": 2.414081001281738, "memory(GiB)": 72.85, "step": 34870, "token_acc": 0.49363057324840764, "train_speed(iter/s)": 0.670517 }, { "epoch": 1.4941519215115033, "grad_norm": 4.061620235443115, "learning_rate": 7.955290045338785e-05, "loss": 2.248069953918457, "memory(GiB)": 72.85, "step": 34875, "token_acc": 0.49523809523809526, "train_speed(iter/s)": 0.67052 }, { "epoch": 1.4943661368407524, "grad_norm": 4.5327253341674805, "learning_rate": 7.954747175525231e-05, "loss": 2.431040954589844, "memory(GiB)": 72.85, "step": 34880, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.670534 }, { "epoch": 1.4945803521700012, "grad_norm": 4.0340423583984375, "learning_rate": 7.954204252183662e-05, "loss": 2.307282638549805, "memory(GiB)": 72.85, "step": 34885, "token_acc": 0.511864406779661, "train_speed(iter/s)": 0.670547 }, { "epoch": 1.4947945674992502, "grad_norm": 6.060317039489746, "learning_rate": 7.953661275323909e-05, "loss": 2.1704660415649415, "memory(GiB)": 72.85, "step": 34890, "token_acc": 0.5298245614035088, "train_speed(iter/s)": 0.670543 }, { "epoch": 1.4950087828284992, "grad_norm": 4.306400299072266, "learning_rate": 7.953118244955813e-05, "loss": 2.4269479751586913, "memory(GiB)": 72.85, "step": 34895, "token_acc": 0.5016835016835017, "train_speed(iter/s)": 0.670559 }, { "epoch": 1.495222998157748, "grad_norm": 4.859921455383301, "learning_rate": 7.952575161089209e-05, "loss": 2.6824872970581053, "memory(GiB)": 72.85, "step": 34900, "token_acc": 0.46905537459283386, "train_speed(iter/s)": 0.670568 }, { "epoch": 1.495437213486997, "grad_norm": 4.4027934074401855, "learning_rate": 7.952032023733937e-05, "loss": 2.315907096862793, "memory(GiB)": 72.85, "step": 34905, "token_acc": 0.4980392156862745, "train_speed(iter/s)": 0.670549 }, { "epoch": 1.4956514288162461, "grad_norm": 5.292370319366455, "learning_rate": 7.951488832899836e-05, "loss": 2.5673734664916994, "memory(GiB)": 72.85, "step": 34910, "token_acc": 0.51985559566787, "train_speed(iter/s)": 0.670549 }, { "epoch": 1.495865644145495, "grad_norm": 3.873321771621704, "learning_rate": 7.950945588596745e-05, "loss": 2.4013669967651365, "memory(GiB)": 72.85, "step": 34915, "token_acc": 0.5, "train_speed(iter/s)": 0.670563 }, { "epoch": 1.496079859474744, "grad_norm": 3.9505982398986816, "learning_rate": 7.950402290834506e-05, "loss": 2.035645294189453, "memory(GiB)": 72.85, "step": 34920, "token_acc": 0.5108225108225108, "train_speed(iter/s)": 0.670588 }, { "epoch": 1.496294074803993, "grad_norm": 4.240943431854248, "learning_rate": 7.949858939622963e-05, "loss": 2.2188526153564454, "memory(GiB)": 72.85, "step": 34925, "token_acc": 0.5509433962264151, "train_speed(iter/s)": 0.670603 }, { "epoch": 1.4965082901332418, "grad_norm": 4.701571464538574, "learning_rate": 7.949315534971957e-05, "loss": 2.369823455810547, "memory(GiB)": 72.85, "step": 34930, "token_acc": 0.47126436781609193, "train_speed(iter/s)": 0.670617 }, { "epoch": 1.4967225054624909, "grad_norm": 3.4905738830566406, "learning_rate": 7.948772076891335e-05, "loss": 2.527129364013672, "memory(GiB)": 72.85, "step": 34935, "token_acc": 0.5143769968051118, "train_speed(iter/s)": 0.670614 }, { "epoch": 1.49693672079174, "grad_norm": 3.898327112197876, "learning_rate": 7.94822856539094e-05, "loss": 2.1016693115234375, "memory(GiB)": 72.85, "step": 34940, "token_acc": 0.5324232081911263, "train_speed(iter/s)": 0.670623 }, { "epoch": 1.4971509361209887, "grad_norm": 4.057865619659424, "learning_rate": 7.947685000480617e-05, "loss": 2.558106231689453, "memory(GiB)": 72.85, "step": 34945, "token_acc": 0.4444444444444444, "train_speed(iter/s)": 0.670633 }, { "epoch": 1.4973651514502377, "grad_norm": 4.272323131561279, "learning_rate": 7.947141382170218e-05, "loss": 2.392985153198242, "memory(GiB)": 72.85, "step": 34950, "token_acc": 0.5016949152542373, "train_speed(iter/s)": 0.670652 }, { "epoch": 1.4975793667794868, "grad_norm": 3.6469197273254395, "learning_rate": 7.946597710469586e-05, "loss": 2.558964157104492, "memory(GiB)": 72.85, "step": 34955, "token_acc": 0.47318611987381703, "train_speed(iter/s)": 0.670631 }, { "epoch": 1.4977935821087356, "grad_norm": 5.8697662353515625, "learning_rate": 7.946053985388573e-05, "loss": 2.553898239135742, "memory(GiB)": 72.85, "step": 34960, "token_acc": 0.4568345323741007, "train_speed(iter/s)": 0.670642 }, { "epoch": 1.4980077974379846, "grad_norm": 4.829788684844971, "learning_rate": 7.94551020693703e-05, "loss": 2.5524259567260743, "memory(GiB)": 72.85, "step": 34965, "token_acc": 0.4669603524229075, "train_speed(iter/s)": 0.670646 }, { "epoch": 1.4982220127672337, "grad_norm": 4.776860237121582, "learning_rate": 7.944966375124805e-05, "loss": 2.349943733215332, "memory(GiB)": 72.85, "step": 34970, "token_acc": 0.46646341463414637, "train_speed(iter/s)": 0.670647 }, { "epoch": 1.4984362280964825, "grad_norm": 3.886059284210205, "learning_rate": 7.944422489961752e-05, "loss": 2.3954944610595703, "memory(GiB)": 72.85, "step": 34975, "token_acc": 0.48771929824561405, "train_speed(iter/s)": 0.670629 }, { "epoch": 1.4986504434257315, "grad_norm": 3.469698429107666, "learning_rate": 7.943878551457722e-05, "loss": 2.3846660614013673, "memory(GiB)": 72.85, "step": 34980, "token_acc": 0.5034013605442177, "train_speed(iter/s)": 0.670631 }, { "epoch": 1.4988646587549805, "grad_norm": 4.022664546966553, "learning_rate": 7.943334559622572e-05, "loss": 2.4871505737304687, "memory(GiB)": 72.85, "step": 34985, "token_acc": 0.47058823529411764, "train_speed(iter/s)": 0.670649 }, { "epoch": 1.4990788740842294, "grad_norm": 4.2816901206970215, "learning_rate": 7.942790514466154e-05, "loss": 2.262243461608887, "memory(GiB)": 72.85, "step": 34990, "token_acc": 0.5075528700906344, "train_speed(iter/s)": 0.670656 }, { "epoch": 1.4992930894134784, "grad_norm": 5.113549709320068, "learning_rate": 7.942246415998326e-05, "loss": 2.307269287109375, "memory(GiB)": 72.85, "step": 34995, "token_acc": 0.5175097276264592, "train_speed(iter/s)": 0.670662 }, { "epoch": 1.4995073047427274, "grad_norm": 4.880138397216797, "learning_rate": 7.941702264228945e-05, "loss": 2.149275207519531, "memory(GiB)": 72.85, "step": 35000, "token_acc": 0.5439330543933054, "train_speed(iter/s)": 0.670654 }, { "epoch": 1.4995073047427274, "eval_loss": 2.009007215499878, "eval_runtime": 14.8556, "eval_samples_per_second": 6.731, "eval_steps_per_second": 6.731, "eval_token_acc": 0.5333333333333333, "step": 35000 }, { "epoch": 1.4997215200719762, "grad_norm": 3.7809765338897705, "learning_rate": 7.941158059167869e-05, "loss": 2.251259994506836, "memory(GiB)": 72.85, "step": 35005, "token_acc": 0.5191768007483629, "train_speed(iter/s)": 0.670468 }, { "epoch": 1.4999357354012253, "grad_norm": 4.102450370788574, "learning_rate": 7.940613800824953e-05, "loss": 2.219586944580078, "memory(GiB)": 72.85, "step": 35010, "token_acc": 0.5046153846153846, "train_speed(iter/s)": 0.67048 }, { "epoch": 1.5001499507304743, "grad_norm": 5.439478874206543, "learning_rate": 7.94006948921006e-05, "loss": 2.2184911727905274, "memory(GiB)": 72.85, "step": 35015, "token_acc": 0.52, "train_speed(iter/s)": 0.670492 }, { "epoch": 1.5003641660597231, "grad_norm": 3.8823366165161133, "learning_rate": 7.939525124333051e-05, "loss": 2.1678327560424804, "memory(GiB)": 72.85, "step": 35020, "token_acc": 0.5425531914893617, "train_speed(iter/s)": 0.670502 }, { "epoch": 1.5005783813889721, "grad_norm": 3.7796759605407715, "learning_rate": 7.938980706203787e-05, "loss": 2.274483489990234, "memory(GiB)": 72.85, "step": 35025, "token_acc": 0.5165562913907285, "train_speed(iter/s)": 0.670507 }, { "epoch": 1.5007925967182212, "grad_norm": 4.094045639038086, "learning_rate": 7.93843623483213e-05, "loss": 2.598550033569336, "memory(GiB)": 72.85, "step": 35030, "token_acc": 0.4778481012658228, "train_speed(iter/s)": 0.670528 }, { "epoch": 1.50100681204747, "grad_norm": 4.541673183441162, "learning_rate": 7.937891710227945e-05, "loss": 2.446039581298828, "memory(GiB)": 72.85, "step": 35035, "token_acc": 0.4657039711191336, "train_speed(iter/s)": 0.670536 }, { "epoch": 1.501221027376719, "grad_norm": 4.696268081665039, "learning_rate": 7.937347132401093e-05, "loss": 2.0529010772705076, "memory(GiB)": 72.85, "step": 35040, "token_acc": 0.5223367697594502, "train_speed(iter/s)": 0.670538 }, { "epoch": 1.501435242705968, "grad_norm": 3.8300657272338867, "learning_rate": 7.936802501361446e-05, "loss": 2.1925643920898437, "memory(GiB)": 72.85, "step": 35045, "token_acc": 0.5064516129032258, "train_speed(iter/s)": 0.670528 }, { "epoch": 1.5016494580352169, "grad_norm": 3.7164840698242188, "learning_rate": 7.936257817118864e-05, "loss": 1.9347593307495117, "memory(GiB)": 72.85, "step": 35050, "token_acc": 0.5766129032258065, "train_speed(iter/s)": 0.670519 }, { "epoch": 1.501863673364466, "grad_norm": 5.091597080230713, "learning_rate": 7.935713079683219e-05, "loss": 2.6041942596435548, "memory(GiB)": 72.85, "step": 35055, "token_acc": 0.48, "train_speed(iter/s)": 0.670536 }, { "epoch": 1.502077888693715, "grad_norm": 3.9872617721557617, "learning_rate": 7.935168289064377e-05, "loss": 2.252240753173828, "memory(GiB)": 72.85, "step": 35060, "token_acc": 0.5268456375838926, "train_speed(iter/s)": 0.670539 }, { "epoch": 1.5022921040229638, "grad_norm": 3.837784767150879, "learning_rate": 7.934623445272207e-05, "loss": 1.9613920211791993, "memory(GiB)": 72.85, "step": 35065, "token_acc": 0.5641025641025641, "train_speed(iter/s)": 0.670522 }, { "epoch": 1.5025063193522128, "grad_norm": 4.924561977386475, "learning_rate": 7.934078548316581e-05, "loss": 2.3235538482666014, "memory(GiB)": 72.85, "step": 35070, "token_acc": 0.48854961832061067, "train_speed(iter/s)": 0.670529 }, { "epoch": 1.5027205346814618, "grad_norm": 4.7361860275268555, "learning_rate": 7.93353359820737e-05, "loss": 2.226045036315918, "memory(GiB)": 72.85, "step": 35075, "token_acc": 0.509493670886076, "train_speed(iter/s)": 0.670533 }, { "epoch": 1.5029347500107106, "grad_norm": 10.602499961853027, "learning_rate": 7.932988594954447e-05, "loss": 2.55028076171875, "memory(GiB)": 72.85, "step": 35080, "token_acc": 0.46938775510204084, "train_speed(iter/s)": 0.670536 }, { "epoch": 1.5031489653399597, "grad_norm": 4.506058216094971, "learning_rate": 7.932443538567683e-05, "loss": 2.1839967727661134, "memory(GiB)": 72.85, "step": 35085, "token_acc": 0.49375, "train_speed(iter/s)": 0.670532 }, { "epoch": 1.5033631806692087, "grad_norm": 4.705049514770508, "learning_rate": 7.931898429056953e-05, "loss": 2.618072509765625, "memory(GiB)": 72.85, "step": 35090, "token_acc": 0.4542372881355932, "train_speed(iter/s)": 0.67054 }, { "epoch": 1.5035773959984575, "grad_norm": 6.4542012214660645, "learning_rate": 7.931353266432133e-05, "loss": 2.4733938217163085, "memory(GiB)": 72.85, "step": 35095, "token_acc": 0.5080645161290323, "train_speed(iter/s)": 0.670552 }, { "epoch": 1.5037916113277066, "grad_norm": 4.480567932128906, "learning_rate": 7.9308080507031e-05, "loss": 2.293115425109863, "memory(GiB)": 72.85, "step": 35100, "token_acc": 0.5017301038062284, "train_speed(iter/s)": 0.670555 }, { "epoch": 1.5040058266569556, "grad_norm": 4.311004161834717, "learning_rate": 7.930262781879727e-05, "loss": 2.5518301010131834, "memory(GiB)": 72.85, "step": 35105, "token_acc": 0.4298780487804878, "train_speed(iter/s)": 0.670564 }, { "epoch": 1.5042200419862044, "grad_norm": 3.680640459060669, "learning_rate": 7.929717459971897e-05, "loss": 2.3833314895629885, "memory(GiB)": 72.85, "step": 35110, "token_acc": 0.49242424242424243, "train_speed(iter/s)": 0.670559 }, { "epoch": 1.5044342573154537, "grad_norm": 4.250953197479248, "learning_rate": 7.929172084989487e-05, "loss": 2.312030792236328, "memory(GiB)": 72.85, "step": 35115, "token_acc": 0.48135593220338985, "train_speed(iter/s)": 0.670572 }, { "epoch": 1.5046484726447025, "grad_norm": 4.869614601135254, "learning_rate": 7.928626656942377e-05, "loss": 2.2204429626464846, "memory(GiB)": 72.85, "step": 35120, "token_acc": 0.49146757679180886, "train_speed(iter/s)": 0.670575 }, { "epoch": 1.5048626879739513, "grad_norm": 3.3529610633850098, "learning_rate": 7.928081175840447e-05, "loss": 2.3145336151123046, "memory(GiB)": 72.85, "step": 35125, "token_acc": 0.5150501672240803, "train_speed(iter/s)": 0.670595 }, { "epoch": 1.5050769033032005, "grad_norm": 4.956563472747803, "learning_rate": 7.92753564169358e-05, "loss": 2.411221504211426, "memory(GiB)": 72.85, "step": 35130, "token_acc": 0.49458483754512633, "train_speed(iter/s)": 0.670614 }, { "epoch": 1.5052911186324494, "grad_norm": 4.851398944854736, "learning_rate": 7.926990054511658e-05, "loss": 2.5361608505249023, "memory(GiB)": 72.85, "step": 35135, "token_acc": 0.46691176470588236, "train_speed(iter/s)": 0.670631 }, { "epoch": 1.5055053339616982, "grad_norm": 5.171714782714844, "learning_rate": 7.926444414304567e-05, "loss": 2.595026397705078, "memory(GiB)": 72.85, "step": 35140, "token_acc": 0.46726190476190477, "train_speed(iter/s)": 0.67065 }, { "epoch": 1.5057195492909474, "grad_norm": 4.281332969665527, "learning_rate": 7.92589872108219e-05, "loss": 2.4721935272216795, "memory(GiB)": 72.85, "step": 35145, "token_acc": 0.5311475409836065, "train_speed(iter/s)": 0.670671 }, { "epoch": 1.5059337646201962, "grad_norm": 5.14074182510376, "learning_rate": 7.925352974854413e-05, "loss": 2.473077392578125, "memory(GiB)": 72.85, "step": 35150, "token_acc": 0.46405228758169936, "train_speed(iter/s)": 0.670682 }, { "epoch": 1.506147979949445, "grad_norm": 4.314969539642334, "learning_rate": 7.924807175631122e-05, "loss": 2.0230560302734375, "memory(GiB)": 72.85, "step": 35155, "token_acc": 0.5568627450980392, "train_speed(iter/s)": 0.670664 }, { "epoch": 1.5063621952786943, "grad_norm": 3.7755255699157715, "learning_rate": 7.924261323422204e-05, "loss": 2.1647981643676757, "memory(GiB)": 72.85, "step": 35160, "token_acc": 0.54, "train_speed(iter/s)": 0.67067 }, { "epoch": 1.5065764106079431, "grad_norm": 5.720251083374023, "learning_rate": 7.923715418237551e-05, "loss": 2.400176239013672, "memory(GiB)": 72.85, "step": 35165, "token_acc": 0.48854961832061067, "train_speed(iter/s)": 0.670675 }, { "epoch": 1.506790625937192, "grad_norm": 3.533174514770508, "learning_rate": 7.923169460087047e-05, "loss": 2.1355430603027346, "memory(GiB)": 72.85, "step": 35170, "token_acc": 0.5532646048109966, "train_speed(iter/s)": 0.670699 }, { "epoch": 1.5070048412664412, "grad_norm": 4.045811653137207, "learning_rate": 7.922623448980591e-05, "loss": 2.4016433715820313, "memory(GiB)": 72.85, "step": 35175, "token_acc": 0.4959785522788204, "train_speed(iter/s)": 0.67069 }, { "epoch": 1.50721905659569, "grad_norm": 4.695067405700684, "learning_rate": 7.922077384928067e-05, "loss": 2.5608264923095705, "memory(GiB)": 72.85, "step": 35180, "token_acc": 0.43521594684385384, "train_speed(iter/s)": 0.670679 }, { "epoch": 1.5074332719249388, "grad_norm": 6.771310806274414, "learning_rate": 7.921531267939371e-05, "loss": 2.516821098327637, "memory(GiB)": 72.85, "step": 35185, "token_acc": 0.509090909090909, "train_speed(iter/s)": 0.670667 }, { "epoch": 1.507647487254188, "grad_norm": 4.398081302642822, "learning_rate": 7.920985098024393e-05, "loss": 2.363093376159668, "memory(GiB)": 72.85, "step": 35190, "token_acc": 0.5, "train_speed(iter/s)": 0.670669 }, { "epoch": 1.5078617025834369, "grad_norm": 4.522797107696533, "learning_rate": 7.920438875193033e-05, "loss": 2.797158050537109, "memory(GiB)": 72.85, "step": 35195, "token_acc": 0.43564356435643564, "train_speed(iter/s)": 0.670662 }, { "epoch": 1.5080759179126857, "grad_norm": 4.246337890625, "learning_rate": 7.919892599455183e-05, "loss": 2.121649169921875, "memory(GiB)": 72.85, "step": 35200, "token_acc": 0.49458483754512633, "train_speed(iter/s)": 0.670663 }, { "epoch": 1.508290133241935, "grad_norm": 4.400655746459961, "learning_rate": 7.919346270820737e-05, "loss": 2.2528514862060547, "memory(GiB)": 72.85, "step": 35205, "token_acc": 0.49838187702265374, "train_speed(iter/s)": 0.670661 }, { "epoch": 1.5085043485711838, "grad_norm": 4.535823822021484, "learning_rate": 7.918799889299596e-05, "loss": 2.3173704147338867, "memory(GiB)": 72.85, "step": 35210, "token_acc": 0.5036231884057971, "train_speed(iter/s)": 0.670634 }, { "epoch": 1.5087185639004326, "grad_norm": 3.2806804180145264, "learning_rate": 7.918253454901657e-05, "loss": 2.2393064498901367, "memory(GiB)": 72.85, "step": 35215, "token_acc": 0.5044642857142857, "train_speed(iter/s)": 0.670613 }, { "epoch": 1.5089327792296818, "grad_norm": 4.410939693450928, "learning_rate": 7.91770696763682e-05, "loss": 2.390804100036621, "memory(GiB)": 72.85, "step": 35220, "token_acc": 0.4875444839857651, "train_speed(iter/s)": 0.670585 }, { "epoch": 1.5091469945589306, "grad_norm": 4.525294303894043, "learning_rate": 7.917160427514982e-05, "loss": 2.3857803344726562, "memory(GiB)": 72.85, "step": 35225, "token_acc": 0.46808510638297873, "train_speed(iter/s)": 0.670554 }, { "epoch": 1.5093612098881795, "grad_norm": 5.7461771965026855, "learning_rate": 7.916613834546049e-05, "loss": 2.5433799743652346, "memory(GiB)": 72.85, "step": 35230, "token_acc": 0.45977011494252873, "train_speed(iter/s)": 0.670563 }, { "epoch": 1.5095754252174287, "grad_norm": 4.828155517578125, "learning_rate": 7.91606718873992e-05, "loss": 2.4465614318847657, "memory(GiB)": 72.85, "step": 35235, "token_acc": 0.47035573122529645, "train_speed(iter/s)": 0.670584 }, { "epoch": 1.5097896405466775, "grad_norm": 3.8419806957244873, "learning_rate": 7.915520490106497e-05, "loss": 2.5706621170043946, "memory(GiB)": 72.85, "step": 35240, "token_acc": 0.45819397993311034, "train_speed(iter/s)": 0.670598 }, { "epoch": 1.5100038558759263, "grad_norm": 5.03416633605957, "learning_rate": 7.914973738655686e-05, "loss": 2.110641098022461, "memory(GiB)": 72.85, "step": 35245, "token_acc": 0.5423728813559322, "train_speed(iter/s)": 0.670599 }, { "epoch": 1.5102180712051756, "grad_norm": 4.266659736633301, "learning_rate": 7.91442693439739e-05, "loss": 2.416642761230469, "memory(GiB)": 72.85, "step": 35250, "token_acc": 0.46178343949044587, "train_speed(iter/s)": 0.67061 }, { "epoch": 1.5104322865344244, "grad_norm": 4.388421535491943, "learning_rate": 7.913880077341517e-05, "loss": 2.538405990600586, "memory(GiB)": 72.85, "step": 35255, "token_acc": 0.4837758112094395, "train_speed(iter/s)": 0.67062 }, { "epoch": 1.5106465018636732, "grad_norm": 5.4128031730651855, "learning_rate": 7.913333167497973e-05, "loss": 2.136491394042969, "memory(GiB)": 72.85, "step": 35260, "token_acc": 0.5630252100840336, "train_speed(iter/s)": 0.670606 }, { "epoch": 1.5108607171929225, "grad_norm": 4.678789138793945, "learning_rate": 7.912786204876668e-05, "loss": 2.472673034667969, "memory(GiB)": 72.85, "step": 35265, "token_acc": 0.5041322314049587, "train_speed(iter/s)": 0.670611 }, { "epoch": 1.5110749325221713, "grad_norm": 4.593124866485596, "learning_rate": 7.912239189487506e-05, "loss": 2.2273895263671877, "memory(GiB)": 72.85, "step": 35270, "token_acc": 0.5241935483870968, "train_speed(iter/s)": 0.670616 }, { "epoch": 1.51128914785142, "grad_norm": 3.575976610183716, "learning_rate": 7.9116921213404e-05, "loss": 2.4510547637939455, "memory(GiB)": 72.85, "step": 35275, "token_acc": 0.5130718954248366, "train_speed(iter/s)": 0.67063 }, { "epoch": 1.5115033631806694, "grad_norm": 2.8654074668884277, "learning_rate": 7.911145000445259e-05, "loss": 2.4910634994506835, "memory(GiB)": 72.85, "step": 35280, "token_acc": 0.46944444444444444, "train_speed(iter/s)": 0.670618 }, { "epoch": 1.5117175785099182, "grad_norm": 4.885190010070801, "learning_rate": 7.910597826811996e-05, "loss": 2.472845268249512, "memory(GiB)": 72.85, "step": 35285, "token_acc": 0.49140893470790376, "train_speed(iter/s)": 0.670599 }, { "epoch": 1.511931793839167, "grad_norm": 4.3521223068237305, "learning_rate": 7.910050600450522e-05, "loss": 2.5156198501586915, "memory(GiB)": 72.85, "step": 35290, "token_acc": 0.49477351916376305, "train_speed(iter/s)": 0.670609 }, { "epoch": 1.5121460091684162, "grad_norm": 4.959815979003906, "learning_rate": 7.909503321370754e-05, "loss": 2.3595708847045898, "memory(GiB)": 72.85, "step": 35295, "token_acc": 0.48936170212765956, "train_speed(iter/s)": 0.670619 }, { "epoch": 1.512360224497665, "grad_norm": 4.364584445953369, "learning_rate": 7.908955989582603e-05, "loss": 2.229939270019531, "memory(GiB)": 72.85, "step": 35300, "token_acc": 0.5283018867924528, "train_speed(iter/s)": 0.6706 }, { "epoch": 1.5125744398269139, "grad_norm": 4.7755022048950195, "learning_rate": 7.908408605095983e-05, "loss": 2.3400217056274415, "memory(GiB)": 72.85, "step": 35305, "token_acc": 0.49469964664310956, "train_speed(iter/s)": 0.670599 }, { "epoch": 1.5127886551561631, "grad_norm": 4.699301719665527, "learning_rate": 7.907861167920816e-05, "loss": 2.6105110168457033, "memory(GiB)": 72.85, "step": 35310, "token_acc": 0.4803370786516854, "train_speed(iter/s)": 0.670604 }, { "epoch": 1.513002870485412, "grad_norm": 6.422908782958984, "learning_rate": 7.907313678067014e-05, "loss": 2.263429641723633, "memory(GiB)": 72.85, "step": 35315, "token_acc": 0.5152671755725191, "train_speed(iter/s)": 0.670611 }, { "epoch": 1.5132170858146607, "grad_norm": 4.344354629516602, "learning_rate": 7.906766135544498e-05, "loss": 2.438067626953125, "memory(GiB)": 72.85, "step": 35320, "token_acc": 0.5245283018867924, "train_speed(iter/s)": 0.670587 }, { "epoch": 1.51343130114391, "grad_norm": 3.520127534866333, "learning_rate": 7.906218540363187e-05, "loss": 2.03388671875, "memory(GiB)": 72.85, "step": 35325, "token_acc": 0.5306122448979592, "train_speed(iter/s)": 0.670572 }, { "epoch": 1.5136455164731588, "grad_norm": 3.6629550457000732, "learning_rate": 7.905670892533002e-05, "loss": 2.414278030395508, "memory(GiB)": 72.85, "step": 35330, "token_acc": 0.47368421052631576, "train_speed(iter/s)": 0.670584 }, { "epoch": 1.5138597318024076, "grad_norm": 3.7384469509124756, "learning_rate": 7.905123192063862e-05, "loss": 2.1784151077270506, "memory(GiB)": 72.85, "step": 35335, "token_acc": 0.49416342412451364, "train_speed(iter/s)": 0.670577 }, { "epoch": 1.5140739471316569, "grad_norm": 4.367391586303711, "learning_rate": 7.904575438965689e-05, "loss": 2.463724899291992, "memory(GiB)": 72.85, "step": 35340, "token_acc": 0.49044585987261147, "train_speed(iter/s)": 0.670577 }, { "epoch": 1.5142881624609057, "grad_norm": 3.5538268089294434, "learning_rate": 7.904027633248409e-05, "loss": 2.2572235107421874, "memory(GiB)": 72.85, "step": 35345, "token_acc": 0.48026315789473684, "train_speed(iter/s)": 0.670592 }, { "epoch": 1.5145023777901545, "grad_norm": 5.297663688659668, "learning_rate": 7.903479774921944e-05, "loss": 2.398846435546875, "memory(GiB)": 72.85, "step": 35350, "token_acc": 0.5079872204472844, "train_speed(iter/s)": 0.670621 }, { "epoch": 1.5147165931194038, "grad_norm": 3.443664789199829, "learning_rate": 7.902931863996217e-05, "loss": 2.1646942138671874, "memory(GiB)": 72.85, "step": 35355, "token_acc": 0.5313807531380753, "train_speed(iter/s)": 0.670614 }, { "epoch": 1.5149308084486526, "grad_norm": 4.3573174476623535, "learning_rate": 7.902383900481158e-05, "loss": 2.6196632385253906, "memory(GiB)": 72.85, "step": 35360, "token_acc": 0.45454545454545453, "train_speed(iter/s)": 0.67059 }, { "epoch": 1.5151450237779014, "grad_norm": 3.7614123821258545, "learning_rate": 7.901835884386693e-05, "loss": 2.2052352905273436, "memory(GiB)": 72.85, "step": 35365, "token_acc": 0.523972602739726, "train_speed(iter/s)": 0.670612 }, { "epoch": 1.5153592391071506, "grad_norm": 4.033133506774902, "learning_rate": 7.901287815722747e-05, "loss": 2.346920394897461, "memory(GiB)": 72.85, "step": 35370, "token_acc": 0.5122699386503068, "train_speed(iter/s)": 0.670612 }, { "epoch": 1.5155734544363995, "grad_norm": 3.9626214504241943, "learning_rate": 7.900739694499251e-05, "loss": 2.455207633972168, "memory(GiB)": 72.85, "step": 35375, "token_acc": 0.46984126984126984, "train_speed(iter/s)": 0.670614 }, { "epoch": 1.5157876697656483, "grad_norm": 4.013888359069824, "learning_rate": 7.900191520726135e-05, "loss": 2.3484745025634766, "memory(GiB)": 72.85, "step": 35380, "token_acc": 0.501779359430605, "train_speed(iter/s)": 0.670621 }, { "epoch": 1.5160018850948975, "grad_norm": 4.992003917694092, "learning_rate": 7.899643294413328e-05, "loss": 2.169850730895996, "memory(GiB)": 72.85, "step": 35385, "token_acc": 0.5054151624548736, "train_speed(iter/s)": 0.670636 }, { "epoch": 1.5162161004241463, "grad_norm": 4.721122741699219, "learning_rate": 7.899095015570763e-05, "loss": 1.9331541061401367, "memory(GiB)": 72.85, "step": 35390, "token_acc": 0.5778546712802768, "train_speed(iter/s)": 0.670648 }, { "epoch": 1.5164303157533952, "grad_norm": 3.764366865158081, "learning_rate": 7.898546684208373e-05, "loss": 2.117177963256836, "memory(GiB)": 72.85, "step": 35395, "token_acc": 0.5347222222222222, "train_speed(iter/s)": 0.670657 }, { "epoch": 1.5166445310826444, "grad_norm": 4.884263515472412, "learning_rate": 7.89799830033609e-05, "loss": 2.364990997314453, "memory(GiB)": 72.85, "step": 35400, "token_acc": 0.4696969696969697, "train_speed(iter/s)": 0.67067 }, { "epoch": 1.5168587464118932, "grad_norm": 4.275450229644775, "learning_rate": 7.89744986396385e-05, "loss": 2.469247817993164, "memory(GiB)": 72.85, "step": 35405, "token_acc": 0.4781144781144781, "train_speed(iter/s)": 0.670673 }, { "epoch": 1.517072961741142, "grad_norm": 4.4665446281433105, "learning_rate": 7.896901375101587e-05, "loss": 2.41949462890625, "memory(GiB)": 72.85, "step": 35410, "token_acc": 0.44314868804664725, "train_speed(iter/s)": 0.670668 }, { "epoch": 1.5172871770703913, "grad_norm": 3.7566630840301514, "learning_rate": 7.896352833759237e-05, "loss": 2.7915111541748048, "memory(GiB)": 72.85, "step": 35415, "token_acc": 0.44, "train_speed(iter/s)": 0.670663 }, { "epoch": 1.51750139239964, "grad_norm": 3.8461852073669434, "learning_rate": 7.89580423994674e-05, "loss": 2.0796003341674805, "memory(GiB)": 72.85, "step": 35420, "token_acc": 0.570957095709571, "train_speed(iter/s)": 0.670667 }, { "epoch": 1.517715607728889, "grad_norm": 4.6450886726379395, "learning_rate": 7.895255593674033e-05, "loss": 2.479803466796875, "memory(GiB)": 72.85, "step": 35425, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.670674 }, { "epoch": 1.5179298230581382, "grad_norm": 4.578297138214111, "learning_rate": 7.894706894951053e-05, "loss": 2.587675666809082, "memory(GiB)": 72.85, "step": 35430, "token_acc": 0.4652014652014652, "train_speed(iter/s)": 0.670695 }, { "epoch": 1.518144038387387, "grad_norm": 4.060963153839111, "learning_rate": 7.894158143787745e-05, "loss": 2.0492639541625977, "memory(GiB)": 72.85, "step": 35435, "token_acc": 0.5471698113207547, "train_speed(iter/s)": 0.67069 }, { "epoch": 1.5183582537166358, "grad_norm": 3.8582763671875, "learning_rate": 7.893609340194044e-05, "loss": 2.4509843826293944, "memory(GiB)": 72.85, "step": 35440, "token_acc": 0.4930555555555556, "train_speed(iter/s)": 0.670676 }, { "epoch": 1.518572469045885, "grad_norm": 4.904073238372803, "learning_rate": 7.893060484179897e-05, "loss": 2.558502197265625, "memory(GiB)": 72.85, "step": 35445, "token_acc": 0.47959183673469385, "train_speed(iter/s)": 0.67069 }, { "epoch": 1.5187866843751339, "grad_norm": 3.921412229537964, "learning_rate": 7.892511575755246e-05, "loss": 2.324622917175293, "memory(GiB)": 72.85, "step": 35450, "token_acc": 0.46963562753036436, "train_speed(iter/s)": 0.670695 }, { "epoch": 1.519000899704383, "grad_norm": 4.071432590484619, "learning_rate": 7.891962614930034e-05, "loss": 2.277683448791504, "memory(GiB)": 72.85, "step": 35455, "token_acc": 0.5149501661129569, "train_speed(iter/s)": 0.670717 }, { "epoch": 1.519215115033632, "grad_norm": 3.6334829330444336, "learning_rate": 7.891413601714206e-05, "loss": 2.214244079589844, "memory(GiB)": 72.85, "step": 35460, "token_acc": 0.48518518518518516, "train_speed(iter/s)": 0.670716 }, { "epoch": 1.5194293303628807, "grad_norm": 4.873870372772217, "learning_rate": 7.890864536117708e-05, "loss": 2.4218023300170897, "memory(GiB)": 72.85, "step": 35465, "token_acc": 0.4965277777777778, "train_speed(iter/s)": 0.67073 }, { "epoch": 1.5196435456921298, "grad_norm": 3.6476848125457764, "learning_rate": 7.890315418150488e-05, "loss": 2.213751220703125, "memory(GiB)": 72.85, "step": 35470, "token_acc": 0.48344370860927155, "train_speed(iter/s)": 0.670733 }, { "epoch": 1.5198577610213788, "grad_norm": 4.462466716766357, "learning_rate": 7.889766247822492e-05, "loss": 2.4827220916748045, "memory(GiB)": 72.85, "step": 35475, "token_acc": 0.47941176470588237, "train_speed(iter/s)": 0.670763 }, { "epoch": 1.5200719763506276, "grad_norm": 3.501085042953491, "learning_rate": 7.88921702514367e-05, "loss": 2.3641361236572265, "memory(GiB)": 72.85, "step": 35480, "token_acc": 0.4868421052631579, "train_speed(iter/s)": 0.670767 }, { "epoch": 1.5202861916798767, "grad_norm": 4.428238391876221, "learning_rate": 7.888667750123971e-05, "loss": 2.3367988586425783, "memory(GiB)": 72.85, "step": 35485, "token_acc": 0.5271565495207667, "train_speed(iter/s)": 0.670751 }, { "epoch": 1.5205004070091257, "grad_norm": 4.239969730377197, "learning_rate": 7.888118422773347e-05, "loss": 2.0959239959716798, "memory(GiB)": 72.85, "step": 35490, "token_acc": 0.5212765957446809, "train_speed(iter/s)": 0.670761 }, { "epoch": 1.5207146223383745, "grad_norm": 4.961824417114258, "learning_rate": 7.887569043101749e-05, "loss": 2.435881423950195, "memory(GiB)": 72.85, "step": 35495, "token_acc": 0.477124183006536, "train_speed(iter/s)": 0.670762 }, { "epoch": 1.5209288376676235, "grad_norm": 4.847708225250244, "learning_rate": 7.887019611119126e-05, "loss": 2.547166442871094, "memory(GiB)": 72.85, "step": 35500, "token_acc": 0.45125348189415043, "train_speed(iter/s)": 0.670751 }, { "epoch": 1.5209288376676235, "eval_loss": 2.040184736251831, "eval_runtime": 15.7445, "eval_samples_per_second": 6.351, "eval_steps_per_second": 6.351, "eval_token_acc": 0.4959785522788204, "step": 35500 }, { "epoch": 1.5211430529968726, "grad_norm": 4.149020671844482, "learning_rate": 7.886470126835436e-05, "loss": 2.296621894836426, "memory(GiB)": 72.85, "step": 35505, "token_acc": 0.48927875243664715, "train_speed(iter/s)": 0.670501 }, { "epoch": 1.5213572683261214, "grad_norm": 4.380781650543213, "learning_rate": 7.885920590260631e-05, "loss": 2.244603729248047, "memory(GiB)": 72.85, "step": 35510, "token_acc": 0.5212765957446809, "train_speed(iter/s)": 0.670483 }, { "epoch": 1.5215714836553704, "grad_norm": 4.3671956062316895, "learning_rate": 7.885371001404669e-05, "loss": 2.266364097595215, "memory(GiB)": 72.85, "step": 35515, "token_acc": 0.5035971223021583, "train_speed(iter/s)": 0.670478 }, { "epoch": 1.5217856989846195, "grad_norm": 5.076059341430664, "learning_rate": 7.884821360277505e-05, "loss": 2.4916446685791014, "memory(GiB)": 72.85, "step": 35520, "token_acc": 0.47333333333333333, "train_speed(iter/s)": 0.670482 }, { "epoch": 1.5219999143138683, "grad_norm": 4.891374588012695, "learning_rate": 7.884271666889095e-05, "loss": 2.5672510147094725, "memory(GiB)": 72.85, "step": 35525, "token_acc": 0.48398576512455516, "train_speed(iter/s)": 0.670486 }, { "epoch": 1.5222141296431173, "grad_norm": 4.038789749145508, "learning_rate": 7.883721921249399e-05, "loss": 2.477993392944336, "memory(GiB)": 72.85, "step": 35530, "token_acc": 0.5, "train_speed(iter/s)": 0.670503 }, { "epoch": 1.5224283449723663, "grad_norm": 6.114706516265869, "learning_rate": 7.883172123368373e-05, "loss": 2.5870433807373048, "memory(GiB)": 72.85, "step": 35535, "token_acc": 0.486013986013986, "train_speed(iter/s)": 0.670515 }, { "epoch": 1.5226425603016152, "grad_norm": 3.6573891639709473, "learning_rate": 7.882622273255979e-05, "loss": 2.578363800048828, "memory(GiB)": 72.85, "step": 35540, "token_acc": 0.4613003095975232, "train_speed(iter/s)": 0.670518 }, { "epoch": 1.5228567756308642, "grad_norm": 4.297632217407227, "learning_rate": 7.882072370922182e-05, "loss": 2.4472923278808594, "memory(GiB)": 72.85, "step": 35545, "token_acc": 0.4952978056426332, "train_speed(iter/s)": 0.670526 }, { "epoch": 1.5230709909601132, "grad_norm": 4.780965328216553, "learning_rate": 7.881522416376938e-05, "loss": 2.313125419616699, "memory(GiB)": 72.85, "step": 35550, "token_acc": 0.46621621621621623, "train_speed(iter/s)": 0.67054 }, { "epoch": 1.523285206289362, "grad_norm": 3.2265896797180176, "learning_rate": 7.880972409630213e-05, "loss": 2.557581901550293, "memory(GiB)": 72.85, "step": 35555, "token_acc": 0.4812286689419795, "train_speed(iter/s)": 0.670537 }, { "epoch": 1.523499421618611, "grad_norm": 3.7452409267425537, "learning_rate": 7.880422350691969e-05, "loss": 2.5068033218383787, "memory(GiB)": 72.85, "step": 35560, "token_acc": 0.48534201954397393, "train_speed(iter/s)": 0.670528 }, { "epoch": 1.52371363694786, "grad_norm": 3.698296546936035, "learning_rate": 7.879872239572173e-05, "loss": 2.2934877395629885, "memory(GiB)": 72.85, "step": 35565, "token_acc": 0.5064102564102564, "train_speed(iter/s)": 0.670539 }, { "epoch": 1.523927852277109, "grad_norm": 5.201925754547119, "learning_rate": 7.879322076280791e-05, "loss": 2.2355672836303713, "memory(GiB)": 72.85, "step": 35570, "token_acc": 0.5, "train_speed(iter/s)": 0.670534 }, { "epoch": 1.524142067606358, "grad_norm": 5.378963947296143, "learning_rate": 7.878771860827787e-05, "loss": 2.1751148223876955, "memory(GiB)": 72.85, "step": 35575, "token_acc": 0.5245901639344263, "train_speed(iter/s)": 0.670539 }, { "epoch": 1.524356282935607, "grad_norm": 6.032558917999268, "learning_rate": 7.87822159322313e-05, "loss": 2.008100891113281, "memory(GiB)": 72.85, "step": 35580, "token_acc": 0.562753036437247, "train_speed(iter/s)": 0.67053 }, { "epoch": 1.5245704982648558, "grad_norm": 4.244627952575684, "learning_rate": 7.87767127347679e-05, "loss": 2.4079078674316405, "memory(GiB)": 72.85, "step": 35585, "token_acc": 0.48120300751879697, "train_speed(iter/s)": 0.670527 }, { "epoch": 1.5247847135941048, "grad_norm": 3.9179210662841797, "learning_rate": 7.877120901598736e-05, "loss": 2.476247024536133, "memory(GiB)": 72.85, "step": 35590, "token_acc": 0.4567901234567901, "train_speed(iter/s)": 0.670539 }, { "epoch": 1.5249989289233539, "grad_norm": 5.388053894042969, "learning_rate": 7.876570477598935e-05, "loss": 2.704574775695801, "memory(GiB)": 72.85, "step": 35595, "token_acc": 0.4558303886925795, "train_speed(iter/s)": 0.670553 }, { "epoch": 1.5252131442526027, "grad_norm": 4.226272106170654, "learning_rate": 7.876020001487365e-05, "loss": 2.3839305877685546, "memory(GiB)": 72.85, "step": 35600, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.67053 }, { "epoch": 1.5254273595818517, "grad_norm": 4.4692816734313965, "learning_rate": 7.875469473273994e-05, "loss": 2.4174938201904297, "memory(GiB)": 72.85, "step": 35605, "token_acc": 0.4892966360856269, "train_speed(iter/s)": 0.670527 }, { "epoch": 1.5256415749111008, "grad_norm": 5.784163951873779, "learning_rate": 7.874918892968794e-05, "loss": 2.744822692871094, "memory(GiB)": 72.85, "step": 35610, "token_acc": 0.4523076923076923, "train_speed(iter/s)": 0.670529 }, { "epoch": 1.5258557902403496, "grad_norm": 4.093582630157471, "learning_rate": 7.874368260581741e-05, "loss": 2.2930755615234375, "memory(GiB)": 72.85, "step": 35615, "token_acc": 0.5241635687732342, "train_speed(iter/s)": 0.670537 }, { "epoch": 1.5260700055695986, "grad_norm": 3.502830743789673, "learning_rate": 7.873817576122813e-05, "loss": 2.2775833129882814, "memory(GiB)": 72.85, "step": 35620, "token_acc": 0.5030674846625767, "train_speed(iter/s)": 0.670551 }, { "epoch": 1.5262842208988476, "grad_norm": 3.8854126930236816, "learning_rate": 7.873266839601982e-05, "loss": 2.5122259140014647, "memory(GiB)": 72.85, "step": 35625, "token_acc": 0.5033783783783784, "train_speed(iter/s)": 0.670537 }, { "epoch": 1.5264984362280964, "grad_norm": 7.125522136688232, "learning_rate": 7.872716051029228e-05, "loss": 2.3884654998779298, "memory(GiB)": 72.85, "step": 35630, "token_acc": 0.512396694214876, "train_speed(iter/s)": 0.670527 }, { "epoch": 1.5267126515573455, "grad_norm": 4.2709784507751465, "learning_rate": 7.872165210414528e-05, "loss": 2.166753387451172, "memory(GiB)": 72.85, "step": 35635, "token_acc": 0.4934210526315789, "train_speed(iter/s)": 0.670532 }, { "epoch": 1.5269268668865945, "grad_norm": 4.583229064941406, "learning_rate": 7.87161431776786e-05, "loss": 2.5369029998779298, "memory(GiB)": 72.85, "step": 35640, "token_acc": 0.48951048951048953, "train_speed(iter/s)": 0.670524 }, { "epoch": 1.5271410822158433, "grad_norm": 4.3785481452941895, "learning_rate": 7.871063373099205e-05, "loss": 2.6040096282958984, "memory(GiB)": 72.85, "step": 35645, "token_acc": 0.47896440129449835, "train_speed(iter/s)": 0.670523 }, { "epoch": 1.5273552975450924, "grad_norm": 4.308966159820557, "learning_rate": 7.870512376418544e-05, "loss": 2.3211002349853516, "memory(GiB)": 72.85, "step": 35650, "token_acc": 0.5050505050505051, "train_speed(iter/s)": 0.670522 }, { "epoch": 1.5275695128743414, "grad_norm": 4.574093818664551, "learning_rate": 7.869961327735857e-05, "loss": 2.0907360076904298, "memory(GiB)": 72.85, "step": 35655, "token_acc": 0.5465116279069767, "train_speed(iter/s)": 0.670515 }, { "epoch": 1.5277837282035902, "grad_norm": 5.10589075088501, "learning_rate": 7.869410227061132e-05, "loss": 2.4236309051513674, "memory(GiB)": 72.85, "step": 35660, "token_acc": 0.4845360824742268, "train_speed(iter/s)": 0.670498 }, { "epoch": 1.5279979435328392, "grad_norm": 3.934713840484619, "learning_rate": 7.868859074404346e-05, "loss": 2.7020734786987304, "memory(GiB)": 72.85, "step": 35665, "token_acc": 0.42138364779874216, "train_speed(iter/s)": 0.670493 }, { "epoch": 1.5282121588620883, "grad_norm": 3.5110535621643066, "learning_rate": 7.868307869775487e-05, "loss": 2.3816808700561523, "memory(GiB)": 72.85, "step": 35670, "token_acc": 0.49498327759197325, "train_speed(iter/s)": 0.670479 }, { "epoch": 1.528426374191337, "grad_norm": 4.089376926422119, "learning_rate": 7.867756613184542e-05, "loss": 2.2802221298217775, "memory(GiB)": 72.85, "step": 35675, "token_acc": 0.47410358565737054, "train_speed(iter/s)": 0.670495 }, { "epoch": 1.5286405895205861, "grad_norm": 3.8846967220306396, "learning_rate": 7.867205304641494e-05, "loss": 2.5532073974609375, "memory(GiB)": 72.85, "step": 35680, "token_acc": 0.4866920152091255, "train_speed(iter/s)": 0.670509 }, { "epoch": 1.5288548048498352, "grad_norm": 4.20993709564209, "learning_rate": 7.866653944156333e-05, "loss": 2.228793907165527, "memory(GiB)": 72.85, "step": 35685, "token_acc": 0.5108695652173914, "train_speed(iter/s)": 0.670537 }, { "epoch": 1.529069020179084, "grad_norm": 4.669922351837158, "learning_rate": 7.866102531739046e-05, "loss": 2.387538719177246, "memory(GiB)": 72.85, "step": 35690, "token_acc": 0.44360902255639095, "train_speed(iter/s)": 0.67054 }, { "epoch": 1.529283235508333, "grad_norm": 5.6592116355896, "learning_rate": 7.865551067399624e-05, "loss": 2.3689022064208984, "memory(GiB)": 72.85, "step": 35695, "token_acc": 0.46735395189003437, "train_speed(iter/s)": 0.670547 }, { "epoch": 1.529497450837582, "grad_norm": 4.854830741882324, "learning_rate": 7.864999551148055e-05, "loss": 2.427428436279297, "memory(GiB)": 72.85, "step": 35700, "token_acc": 0.48626373626373626, "train_speed(iter/s)": 0.670554 }, { "epoch": 1.5297116661668309, "grad_norm": 4.317539691925049, "learning_rate": 7.864447982994332e-05, "loss": 2.411777687072754, "memory(GiB)": 72.85, "step": 35705, "token_acc": 0.4927536231884058, "train_speed(iter/s)": 0.670555 }, { "epoch": 1.5299258814960799, "grad_norm": 3.6128106117248535, "learning_rate": 7.863896362948448e-05, "loss": 2.4196666717529296, "memory(GiB)": 72.85, "step": 35710, "token_acc": 0.4807121661721068, "train_speed(iter/s)": 0.670567 }, { "epoch": 1.530140096825329, "grad_norm": 4.224031448364258, "learning_rate": 7.863344691020393e-05, "loss": 2.413655471801758, "memory(GiB)": 72.85, "step": 35715, "token_acc": 0.4842105263157895, "train_speed(iter/s)": 0.670562 }, { "epoch": 1.5303543121545777, "grad_norm": 4.99735689163208, "learning_rate": 7.862792967220165e-05, "loss": 2.4309547424316404, "memory(GiB)": 72.85, "step": 35720, "token_acc": 0.49216300940438873, "train_speed(iter/s)": 0.670559 }, { "epoch": 1.5305685274838268, "grad_norm": 4.438407897949219, "learning_rate": 7.862241191557754e-05, "loss": 2.395486831665039, "memory(GiB)": 72.85, "step": 35725, "token_acc": 0.4897119341563786, "train_speed(iter/s)": 0.670571 }, { "epoch": 1.5307827428130758, "grad_norm": 4.2248148918151855, "learning_rate": 7.861689364043161e-05, "loss": 2.2198400497436523, "memory(GiB)": 72.85, "step": 35730, "token_acc": 0.4876325088339223, "train_speed(iter/s)": 0.670573 }, { "epoch": 1.5309969581423246, "grad_norm": 3.929489850997925, "learning_rate": 7.86113748468638e-05, "loss": 2.288986015319824, "memory(GiB)": 72.85, "step": 35735, "token_acc": 0.506993006993007, "train_speed(iter/s)": 0.670571 }, { "epoch": 1.5312111734715736, "grad_norm": 3.813300132751465, "learning_rate": 7.86058555349741e-05, "loss": 2.165925979614258, "memory(GiB)": 72.85, "step": 35740, "token_acc": 0.5098039215686274, "train_speed(iter/s)": 0.670582 }, { "epoch": 1.5314253888008227, "grad_norm": 4.108116626739502, "learning_rate": 7.86003357048625e-05, "loss": 2.7026905059814452, "memory(GiB)": 72.85, "step": 35745, "token_acc": 0.4192634560906516, "train_speed(iter/s)": 0.670558 }, { "epoch": 1.5316396041300715, "grad_norm": 3.798302173614502, "learning_rate": 7.859481535662897e-05, "loss": 2.426375389099121, "memory(GiB)": 72.85, "step": 35750, "token_acc": 0.511326860841424, "train_speed(iter/s)": 0.67055 }, { "epoch": 1.5318538194593205, "grad_norm": 3.564390182495117, "learning_rate": 7.858929449037355e-05, "loss": 2.2017663955688476, "memory(GiB)": 72.85, "step": 35755, "token_acc": 0.49266862170087977, "train_speed(iter/s)": 0.670535 }, { "epoch": 1.5320680347885696, "grad_norm": 5.161865711212158, "learning_rate": 7.858377310619622e-05, "loss": 2.054366111755371, "memory(GiB)": 72.85, "step": 35760, "token_acc": 0.5519713261648745, "train_speed(iter/s)": 0.670538 }, { "epoch": 1.5322822501178184, "grad_norm": 4.465765476226807, "learning_rate": 7.857825120419704e-05, "loss": 2.32431697845459, "memory(GiB)": 72.85, "step": 35765, "token_acc": 0.45751633986928103, "train_speed(iter/s)": 0.670542 }, { "epoch": 1.5324964654470674, "grad_norm": 3.6488986015319824, "learning_rate": 7.857272878447604e-05, "loss": 2.5510801315307616, "memory(GiB)": 72.85, "step": 35770, "token_acc": 0.45977011494252873, "train_speed(iter/s)": 0.670558 }, { "epoch": 1.5327106807763164, "grad_norm": 4.545658588409424, "learning_rate": 7.856720584713324e-05, "loss": 2.3342199325561523, "memory(GiB)": 72.85, "step": 35775, "token_acc": 0.48656716417910445, "train_speed(iter/s)": 0.670583 }, { "epoch": 1.5329248961055653, "grad_norm": 3.6088902950286865, "learning_rate": 7.856168239226873e-05, "loss": 2.047241973876953, "memory(GiB)": 72.85, "step": 35780, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.670594 }, { "epoch": 1.5331391114348143, "grad_norm": 4.5756516456604, "learning_rate": 7.855615841998253e-05, "loss": 2.3579843521118162, "memory(GiB)": 72.85, "step": 35785, "token_acc": 0.5214285714285715, "train_speed(iter/s)": 0.670604 }, { "epoch": 1.5333533267640633, "grad_norm": 3.3981289863586426, "learning_rate": 7.855063393037474e-05, "loss": 2.4819278717041016, "memory(GiB)": 72.85, "step": 35790, "token_acc": 0.5238095238095238, "train_speed(iter/s)": 0.670604 }, { "epoch": 1.5335675420933121, "grad_norm": 4.108059883117676, "learning_rate": 7.854510892354542e-05, "loss": 2.416087341308594, "memory(GiB)": 72.85, "step": 35795, "token_acc": 0.4866920152091255, "train_speed(iter/s)": 0.6706 }, { "epoch": 1.5337817574225612, "grad_norm": 4.807345390319824, "learning_rate": 7.853958339959468e-05, "loss": 2.1106224060058594, "memory(GiB)": 72.85, "step": 35800, "token_acc": 0.5488721804511278, "train_speed(iter/s)": 0.670602 }, { "epoch": 1.5339959727518102, "grad_norm": 3.3402626514434814, "learning_rate": 7.853405735862263e-05, "loss": 2.421364402770996, "memory(GiB)": 72.85, "step": 35805, "token_acc": 0.48466257668711654, "train_speed(iter/s)": 0.670618 }, { "epoch": 1.534210188081059, "grad_norm": 3.7304389476776123, "learning_rate": 7.852853080072936e-05, "loss": 2.3778383255004885, "memory(GiB)": 72.85, "step": 35810, "token_acc": 0.4709897610921502, "train_speed(iter/s)": 0.670597 }, { "epoch": 1.534424403410308, "grad_norm": 4.786494731903076, "learning_rate": 7.852300372601499e-05, "loss": 2.5969482421875, "memory(GiB)": 72.85, "step": 35815, "token_acc": 0.4608150470219436, "train_speed(iter/s)": 0.670593 }, { "epoch": 1.534638618739557, "grad_norm": 5.180427074432373, "learning_rate": 7.851747613457965e-05, "loss": 2.3470325469970703, "memory(GiB)": 72.85, "step": 35820, "token_acc": 0.5285714285714286, "train_speed(iter/s)": 0.670581 }, { "epoch": 1.534852834068806, "grad_norm": 4.5797505378723145, "learning_rate": 7.851194802652346e-05, "loss": 2.382337188720703, "memory(GiB)": 72.85, "step": 35825, "token_acc": 0.48299319727891155, "train_speed(iter/s)": 0.670592 }, { "epoch": 1.535067049398055, "grad_norm": 4.546103477478027, "learning_rate": 7.850641940194661e-05, "loss": 2.3115406036376953, "memory(GiB)": 72.85, "step": 35830, "token_acc": 0.5220338983050847, "train_speed(iter/s)": 0.670596 }, { "epoch": 1.535281264727304, "grad_norm": 4.460447311401367, "learning_rate": 7.850089026094923e-05, "loss": 2.2580848693847657, "memory(GiB)": 72.85, "step": 35835, "token_acc": 0.5126353790613718, "train_speed(iter/s)": 0.670593 }, { "epoch": 1.5354954800565528, "grad_norm": 4.029225826263428, "learning_rate": 7.849536060363147e-05, "loss": 2.3539926528930666, "memory(GiB)": 72.85, "step": 35840, "token_acc": 0.5052631578947369, "train_speed(iter/s)": 0.670609 }, { "epoch": 1.5357096953858018, "grad_norm": 5.196866989135742, "learning_rate": 7.848983043009352e-05, "loss": 2.6033403396606447, "memory(GiB)": 72.85, "step": 35845, "token_acc": 0.45741324921135645, "train_speed(iter/s)": 0.670625 }, { "epoch": 1.5359239107150509, "grad_norm": 4.435822486877441, "learning_rate": 7.848429974043559e-05, "loss": 2.342605972290039, "memory(GiB)": 72.85, "step": 35850, "token_acc": 0.4818181818181818, "train_speed(iter/s)": 0.670636 }, { "epoch": 1.5361381260442997, "grad_norm": 4.892307758331299, "learning_rate": 7.847876853475784e-05, "loss": 2.242877960205078, "memory(GiB)": 72.85, "step": 35855, "token_acc": 0.50187265917603, "train_speed(iter/s)": 0.67063 }, { "epoch": 1.5363523413735487, "grad_norm": 3.9953908920288086, "learning_rate": 7.847323681316048e-05, "loss": 2.349440574645996, "memory(GiB)": 72.85, "step": 35860, "token_acc": 0.4919093851132686, "train_speed(iter/s)": 0.670633 }, { "epoch": 1.5365665567027977, "grad_norm": 4.092937469482422, "learning_rate": 7.846770457574373e-05, "loss": 2.3388931274414064, "memory(GiB)": 72.85, "step": 35865, "token_acc": 0.5309090909090909, "train_speed(iter/s)": 0.670651 }, { "epoch": 1.5367807720320465, "grad_norm": 5.413322925567627, "learning_rate": 7.84621718226078e-05, "loss": 2.112582778930664, "memory(GiB)": 72.85, "step": 35870, "token_acc": 0.5071428571428571, "train_speed(iter/s)": 0.670674 }, { "epoch": 1.5369949873612956, "grad_norm": 3.5897762775421143, "learning_rate": 7.845663855385292e-05, "loss": 2.452855682373047, "memory(GiB)": 72.85, "step": 35875, "token_acc": 0.4923547400611621, "train_speed(iter/s)": 0.670682 }, { "epoch": 1.5372092026905446, "grad_norm": 4.4062180519104, "learning_rate": 7.845110476957935e-05, "loss": 2.3127042770385744, "memory(GiB)": 72.85, "step": 35880, "token_acc": 0.5230263157894737, "train_speed(iter/s)": 0.670704 }, { "epoch": 1.5374234180197934, "grad_norm": 4.539583683013916, "learning_rate": 7.844557046988732e-05, "loss": 2.3134218215942384, "memory(GiB)": 72.85, "step": 35885, "token_acc": 0.5017667844522968, "train_speed(iter/s)": 0.670702 }, { "epoch": 1.5376376333490425, "grad_norm": 3.482787609100342, "learning_rate": 7.84400356548771e-05, "loss": 2.4132640838623045, "memory(GiB)": 72.85, "step": 35890, "token_acc": 0.47854785478547857, "train_speed(iter/s)": 0.670722 }, { "epoch": 1.5378518486782915, "grad_norm": 5.291714668273926, "learning_rate": 7.843450032464897e-05, "loss": 2.235453796386719, "memory(GiB)": 72.85, "step": 35895, "token_acc": 0.47865853658536583, "train_speed(iter/s)": 0.670737 }, { "epoch": 1.5380660640075403, "grad_norm": 4.022680282592773, "learning_rate": 7.842896447930317e-05, "loss": 2.124194526672363, "memory(GiB)": 72.85, "step": 35900, "token_acc": 0.552901023890785, "train_speed(iter/s)": 0.670739 }, { "epoch": 1.5382802793367893, "grad_norm": 3.735485553741455, "learning_rate": 7.842342811894003e-05, "loss": 2.0981418609619142, "memory(GiB)": 72.85, "step": 35905, "token_acc": 0.5441176470588235, "train_speed(iter/s)": 0.67077 }, { "epoch": 1.5384944946660384, "grad_norm": 4.57112979888916, "learning_rate": 7.841789124365981e-05, "loss": 2.2902603149414062, "memory(GiB)": 72.85, "step": 35910, "token_acc": 0.5087108013937283, "train_speed(iter/s)": 0.670763 }, { "epoch": 1.5387087099952872, "grad_norm": 4.124881744384766, "learning_rate": 7.841235385356283e-05, "loss": 2.5712650299072264, "memory(GiB)": 72.85, "step": 35915, "token_acc": 0.4886731391585761, "train_speed(iter/s)": 0.670741 }, { "epoch": 1.5389229253245362, "grad_norm": 3.610812187194824, "learning_rate": 7.840681594874942e-05, "loss": 2.244525909423828, "memory(GiB)": 72.85, "step": 35920, "token_acc": 0.535031847133758, "train_speed(iter/s)": 0.670742 }, { "epoch": 1.5391371406537853, "grad_norm": 4.488850116729736, "learning_rate": 7.840127752931989e-05, "loss": 2.3754459381103517, "memory(GiB)": 72.85, "step": 35925, "token_acc": 0.4934210526315789, "train_speed(iter/s)": 0.670735 }, { "epoch": 1.539351355983034, "grad_norm": 3.8243227005004883, "learning_rate": 7.839573859537457e-05, "loss": 2.3407630920410156, "memory(GiB)": 72.85, "step": 35930, "token_acc": 0.4779874213836478, "train_speed(iter/s)": 0.67074 }, { "epoch": 1.539565571312283, "grad_norm": 3.522181272506714, "learning_rate": 7.839019914701382e-05, "loss": 2.5179470062255858, "memory(GiB)": 72.85, "step": 35935, "token_acc": 0.5093833780160858, "train_speed(iter/s)": 0.670727 }, { "epoch": 1.5397797866415321, "grad_norm": 4.7811970710754395, "learning_rate": 7.838465918433796e-05, "loss": 2.427790641784668, "memory(GiB)": 72.85, "step": 35940, "token_acc": 0.4813664596273292, "train_speed(iter/s)": 0.670718 }, { "epoch": 1.539994001970781, "grad_norm": 4.709369659423828, "learning_rate": 7.837911870744738e-05, "loss": 2.3465505599975587, "memory(GiB)": 72.85, "step": 35945, "token_acc": 0.4788732394366197, "train_speed(iter/s)": 0.670724 }, { "epoch": 1.54020821730003, "grad_norm": 4.673506259918213, "learning_rate": 7.837357771644245e-05, "loss": 2.2288959503173826, "memory(GiB)": 72.85, "step": 35950, "token_acc": 0.5056603773584906, "train_speed(iter/s)": 0.670735 }, { "epoch": 1.540422432629279, "grad_norm": 4.85781717300415, "learning_rate": 7.836803621142354e-05, "loss": 2.2432025909423827, "memory(GiB)": 72.85, "step": 35955, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.670721 }, { "epoch": 1.5406366479585278, "grad_norm": 3.9436864852905273, "learning_rate": 7.836249419249104e-05, "loss": 2.2117401123046876, "memory(GiB)": 72.85, "step": 35960, "token_acc": 0.5376344086021505, "train_speed(iter/s)": 0.670734 }, { "epoch": 1.5408508632877769, "grad_norm": 4.384356498718262, "learning_rate": 7.835695165974534e-05, "loss": 2.329621505737305, "memory(GiB)": 72.85, "step": 35965, "token_acc": 0.513986013986014, "train_speed(iter/s)": 0.670721 }, { "epoch": 1.541065078617026, "grad_norm": 4.027388095855713, "learning_rate": 7.835140861328688e-05, "loss": 2.0032476425170898, "memory(GiB)": 72.85, "step": 35970, "token_acc": 0.5315985130111525, "train_speed(iter/s)": 0.670731 }, { "epoch": 1.5412792939462747, "grad_norm": 4.364253044128418, "learning_rate": 7.834586505321607e-05, "loss": 2.398886871337891, "memory(GiB)": 72.85, "step": 35975, "token_acc": 0.5126353790613718, "train_speed(iter/s)": 0.670709 }, { "epoch": 1.5414935092755238, "grad_norm": 4.321422576904297, "learning_rate": 7.834032097963328e-05, "loss": 2.2407283782958984, "memory(GiB)": 72.85, "step": 35980, "token_acc": 0.5035460992907801, "train_speed(iter/s)": 0.670699 }, { "epoch": 1.5417077246047728, "grad_norm": 4.107391834259033, "learning_rate": 7.833477639263902e-05, "loss": 2.1286216735839845, "memory(GiB)": 72.85, "step": 35985, "token_acc": 0.5461847389558233, "train_speed(iter/s)": 0.6707 }, { "epoch": 1.5419219399340216, "grad_norm": 4.826338291168213, "learning_rate": 7.832923129233369e-05, "loss": 2.5369247436523437, "memory(GiB)": 72.85, "step": 35990, "token_acc": 0.5067114093959731, "train_speed(iter/s)": 0.670715 }, { "epoch": 1.5421361552632706, "grad_norm": 4.438459396362305, "learning_rate": 7.832368567881778e-05, "loss": 2.5768787384033205, "memory(GiB)": 72.85, "step": 35995, "token_acc": 0.48534201954397393, "train_speed(iter/s)": 0.67073 }, { "epoch": 1.5423503705925197, "grad_norm": 4.803354263305664, "learning_rate": 7.831813955219172e-05, "loss": 2.354417610168457, "memory(GiB)": 72.85, "step": 36000, "token_acc": 0.5178571428571429, "train_speed(iter/s)": 0.670726 }, { "epoch": 1.5423503705925197, "eval_loss": 2.1085455417633057, "eval_runtime": 16.2898, "eval_samples_per_second": 6.139, "eval_steps_per_second": 6.139, "eval_token_acc": 0.49286640726329445, "step": 36000 }, { "epoch": 1.5425645859217685, "grad_norm": 5.093508243560791, "learning_rate": 7.8312592912556e-05, "loss": 2.0470243453979493, "memory(GiB)": 72.85, "step": 36005, "token_acc": 0.5073260073260073, "train_speed(iter/s)": 0.670481 }, { "epoch": 1.5427788012510175, "grad_norm": 4.084209442138672, "learning_rate": 7.83070457600111e-05, "loss": 2.4579488754272463, "memory(GiB)": 72.85, "step": 36010, "token_acc": 0.45733788395904434, "train_speed(iter/s)": 0.670456 }, { "epoch": 1.5429930165802666, "grad_norm": 4.223424434661865, "learning_rate": 7.830149809465751e-05, "loss": 2.3120235443115233, "memory(GiB)": 72.85, "step": 36015, "token_acc": 0.45017182130584193, "train_speed(iter/s)": 0.670458 }, { "epoch": 1.5432072319095154, "grad_norm": 4.544179916381836, "learning_rate": 7.829594991659574e-05, "loss": 2.4035617828369142, "memory(GiB)": 72.85, "step": 36020, "token_acc": 0.4807692307692308, "train_speed(iter/s)": 0.670468 }, { "epoch": 1.5434214472387644, "grad_norm": 5.1666579246521, "learning_rate": 7.829040122592627e-05, "loss": 2.417115020751953, "memory(GiB)": 72.85, "step": 36025, "token_acc": 0.5157593123209169, "train_speed(iter/s)": 0.670481 }, { "epoch": 1.5436356625680134, "grad_norm": 3.897935390472412, "learning_rate": 7.828485202274966e-05, "loss": 2.2721973419189454, "memory(GiB)": 72.85, "step": 36030, "token_acc": 0.521594684385382, "train_speed(iter/s)": 0.670503 }, { "epoch": 1.5438498778972622, "grad_norm": 5.589102745056152, "learning_rate": 7.827930230716644e-05, "loss": 2.410022735595703, "memory(GiB)": 72.85, "step": 36035, "token_acc": 0.48, "train_speed(iter/s)": 0.670501 }, { "epoch": 1.5440640932265113, "grad_norm": 4.038293838500977, "learning_rate": 7.827375207927713e-05, "loss": 2.2465648651123047, "memory(GiB)": 72.85, "step": 36040, "token_acc": 0.4847457627118644, "train_speed(iter/s)": 0.670515 }, { "epoch": 1.5442783085557603, "grad_norm": 3.469539165496826, "learning_rate": 7.826820133918224e-05, "loss": 2.491197395324707, "memory(GiB)": 72.85, "step": 36045, "token_acc": 0.47703180212014135, "train_speed(iter/s)": 0.670505 }, { "epoch": 1.5444925238850091, "grad_norm": 4.7681074142456055, "learning_rate": 7.826265008698239e-05, "loss": 2.650579261779785, "memory(GiB)": 72.85, "step": 36050, "token_acc": 0.4652777777777778, "train_speed(iter/s)": 0.670514 }, { "epoch": 1.5447067392142582, "grad_norm": 4.988958835601807, "learning_rate": 7.825709832277812e-05, "loss": 2.258879852294922, "memory(GiB)": 72.85, "step": 36055, "token_acc": 0.4956896551724138, "train_speed(iter/s)": 0.67053 }, { "epoch": 1.5449209545435072, "grad_norm": 4.14395809173584, "learning_rate": 7.825154604667e-05, "loss": 2.257504463195801, "memory(GiB)": 72.85, "step": 36060, "token_acc": 0.4833948339483395, "train_speed(iter/s)": 0.670526 }, { "epoch": 1.545135169872756, "grad_norm": 4.134223937988281, "learning_rate": 7.824599325875863e-05, "loss": 2.173754119873047, "memory(GiB)": 72.85, "step": 36065, "token_acc": 0.5291666666666667, "train_speed(iter/s)": 0.670519 }, { "epoch": 1.545349385202005, "grad_norm": 4.928133964538574, "learning_rate": 7.82404399591446e-05, "loss": 2.2187263488769533, "memory(GiB)": 72.85, "step": 36070, "token_acc": 0.5266903914590747, "train_speed(iter/s)": 0.670512 }, { "epoch": 1.545563600531254, "grad_norm": 6.3087158203125, "learning_rate": 7.823488614792851e-05, "loss": 2.493986701965332, "memory(GiB)": 72.85, "step": 36075, "token_acc": 0.48580441640378547, "train_speed(iter/s)": 0.670512 }, { "epoch": 1.545777815860503, "grad_norm": 5.099682331085205, "learning_rate": 7.822933182521094e-05, "loss": 2.4398754119873045, "memory(GiB)": 72.85, "step": 36080, "token_acc": 0.4843205574912892, "train_speed(iter/s)": 0.67051 }, { "epoch": 1.545992031189752, "grad_norm": 4.964293003082275, "learning_rate": 7.822377699109257e-05, "loss": 2.467561149597168, "memory(GiB)": 72.85, "step": 36085, "token_acc": 0.45955882352941174, "train_speed(iter/s)": 0.670496 }, { "epoch": 1.546206246519001, "grad_norm": 3.4773645401000977, "learning_rate": 7.821822164567401e-05, "loss": 2.506546974182129, "memory(GiB)": 72.85, "step": 36090, "token_acc": 0.5081967213114754, "train_speed(iter/s)": 0.670508 }, { "epoch": 1.5464204618482498, "grad_norm": 4.453849792480469, "learning_rate": 7.821266578905587e-05, "loss": 2.0853292465209963, "memory(GiB)": 72.85, "step": 36095, "token_acc": 0.5667870036101083, "train_speed(iter/s)": 0.670524 }, { "epoch": 1.5466346771774988, "grad_norm": 3.7831034660339355, "learning_rate": 7.820710942133884e-05, "loss": 2.5012868881225585, "memory(GiB)": 72.85, "step": 36100, "token_acc": 0.47194719471947194, "train_speed(iter/s)": 0.670528 }, { "epoch": 1.5468488925067478, "grad_norm": 5.512815952301025, "learning_rate": 7.820155254262356e-05, "loss": 2.1659679412841797, "memory(GiB)": 72.85, "step": 36105, "token_acc": 0.49829351535836175, "train_speed(iter/s)": 0.670552 }, { "epoch": 1.5470631078359967, "grad_norm": 3.903228998184204, "learning_rate": 7.81959951530107e-05, "loss": 2.4518890380859375, "memory(GiB)": 72.85, "step": 36110, "token_acc": 0.4775641025641026, "train_speed(iter/s)": 0.67056 }, { "epoch": 1.5472773231652457, "grad_norm": 3.757406234741211, "learning_rate": 7.819043725260093e-05, "loss": 2.249877166748047, "memory(GiB)": 72.85, "step": 36115, "token_acc": 0.5091463414634146, "train_speed(iter/s)": 0.670576 }, { "epoch": 1.5474915384944947, "grad_norm": 5.354044437408447, "learning_rate": 7.818487884149496e-05, "loss": 2.293016815185547, "memory(GiB)": 72.85, "step": 36120, "token_acc": 0.4894366197183099, "train_speed(iter/s)": 0.6706 }, { "epoch": 1.5477057538237435, "grad_norm": 4.280666351318359, "learning_rate": 7.817931991979345e-05, "loss": 2.3708099365234374, "memory(GiB)": 72.85, "step": 36125, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.670626 }, { "epoch": 1.5479199691529926, "grad_norm": 5.226457595825195, "learning_rate": 7.817376048759714e-05, "loss": 2.6102548599243165, "memory(GiB)": 72.85, "step": 36130, "token_acc": 0.44642857142857145, "train_speed(iter/s)": 0.67062 }, { "epoch": 1.5481341844822416, "grad_norm": 3.586820125579834, "learning_rate": 7.816820054500674e-05, "loss": 2.1595623016357424, "memory(GiB)": 72.85, "step": 36135, "token_acc": 0.5697674418604651, "train_speed(iter/s)": 0.67062 }, { "epoch": 1.5483483998114904, "grad_norm": 3.838047504425049, "learning_rate": 7.816264009212294e-05, "loss": 2.232390022277832, "memory(GiB)": 72.85, "step": 36140, "token_acc": 0.5382165605095541, "train_speed(iter/s)": 0.670628 }, { "epoch": 1.5485626151407395, "grad_norm": 3.5212130546569824, "learning_rate": 7.815707912904651e-05, "loss": 2.252240753173828, "memory(GiB)": 72.85, "step": 36145, "token_acc": 0.4959016393442623, "train_speed(iter/s)": 0.670653 }, { "epoch": 1.5487768304699885, "grad_norm": 4.80721378326416, "learning_rate": 7.815151765587818e-05, "loss": 2.4642045974731444, "memory(GiB)": 72.85, "step": 36150, "token_acc": 0.4782608695652174, "train_speed(iter/s)": 0.670639 }, { "epoch": 1.5489910457992373, "grad_norm": 4.070844650268555, "learning_rate": 7.814595567271869e-05, "loss": 2.6501712799072266, "memory(GiB)": 72.85, "step": 36155, "token_acc": 0.40202702702702703, "train_speed(iter/s)": 0.670598 }, { "epoch": 1.5492052611284863, "grad_norm": 6.942684173583984, "learning_rate": 7.814039317966882e-05, "loss": 2.376329803466797, "memory(GiB)": 72.85, "step": 36160, "token_acc": 0.5243055555555556, "train_speed(iter/s)": 0.670579 }, { "epoch": 1.5494194764577354, "grad_norm": 3.5804998874664307, "learning_rate": 7.81348301768293e-05, "loss": 2.435498046875, "memory(GiB)": 72.85, "step": 36165, "token_acc": 0.49310344827586206, "train_speed(iter/s)": 0.670564 }, { "epoch": 1.5496336917869842, "grad_norm": 3.2797091007232666, "learning_rate": 7.812926666430098e-05, "loss": 2.3645057678222656, "memory(GiB)": 72.85, "step": 36170, "token_acc": 0.4889589905362776, "train_speed(iter/s)": 0.670548 }, { "epoch": 1.5498479071162332, "grad_norm": 3.615959405899048, "learning_rate": 7.812370264218459e-05, "loss": 2.596788024902344, "memory(GiB)": 72.85, "step": 36175, "token_acc": 0.46715328467153283, "train_speed(iter/s)": 0.670544 }, { "epoch": 1.5500621224454822, "grad_norm": 3.3872556686401367, "learning_rate": 7.811813811058093e-05, "loss": 2.262954902648926, "memory(GiB)": 72.85, "step": 36180, "token_acc": 0.5404411764705882, "train_speed(iter/s)": 0.67057 }, { "epoch": 1.550276337774731, "grad_norm": 4.485887050628662, "learning_rate": 7.811257306959083e-05, "loss": 2.4004705429077147, "memory(GiB)": 72.85, "step": 36185, "token_acc": 0.47419354838709676, "train_speed(iter/s)": 0.670561 }, { "epoch": 1.55049055310398, "grad_norm": 3.3722519874572754, "learning_rate": 7.810700751931512e-05, "loss": 2.3461673736572264, "memory(GiB)": 72.85, "step": 36190, "token_acc": 0.4470198675496689, "train_speed(iter/s)": 0.670535 }, { "epoch": 1.5507047684332291, "grad_norm": 3.8140056133270264, "learning_rate": 7.810144145985457e-05, "loss": 2.406671142578125, "memory(GiB)": 72.85, "step": 36195, "token_acc": 0.5451263537906137, "train_speed(iter/s)": 0.670541 }, { "epoch": 1.550918983762478, "grad_norm": 3.418025493621826, "learning_rate": 7.809587489131006e-05, "loss": 2.3026653289794923, "memory(GiB)": 72.85, "step": 36200, "token_acc": 0.48328267477203646, "train_speed(iter/s)": 0.670572 }, { "epoch": 1.551133199091727, "grad_norm": 4.77647066116333, "learning_rate": 7.809030781378242e-05, "loss": 2.2256134033203123, "memory(GiB)": 72.85, "step": 36205, "token_acc": 0.5139442231075697, "train_speed(iter/s)": 0.670589 }, { "epoch": 1.551347414420976, "grad_norm": 3.6114537715911865, "learning_rate": 7.80847402273725e-05, "loss": 2.2558349609375, "memory(GiB)": 72.85, "step": 36210, "token_acc": 0.5075528700906344, "train_speed(iter/s)": 0.670595 }, { "epoch": 1.5515616297502248, "grad_norm": 4.651354789733887, "learning_rate": 7.807917213218117e-05, "loss": 2.2080867767333983, "memory(GiB)": 72.85, "step": 36215, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.670592 }, { "epoch": 1.5517758450794739, "grad_norm": 5.375672340393066, "learning_rate": 7.80736035283093e-05, "loss": 2.386161041259766, "memory(GiB)": 72.85, "step": 36220, "token_acc": 0.47368421052631576, "train_speed(iter/s)": 0.670581 }, { "epoch": 1.551990060408723, "grad_norm": 4.282434463500977, "learning_rate": 7.806803441585776e-05, "loss": 2.2320697784423826, "memory(GiB)": 72.85, "step": 36225, "token_acc": 0.5378486055776892, "train_speed(iter/s)": 0.670563 }, { "epoch": 1.5522042757379717, "grad_norm": 3.651348352432251, "learning_rate": 7.806246479492745e-05, "loss": 2.6669147491455076, "memory(GiB)": 72.85, "step": 36230, "token_acc": 0.4882154882154882, "train_speed(iter/s)": 0.670558 }, { "epoch": 1.5524184910672207, "grad_norm": 4.802724361419678, "learning_rate": 7.805689466561927e-05, "loss": 2.345436668395996, "memory(GiB)": 72.85, "step": 36235, "token_acc": 0.4838709677419355, "train_speed(iter/s)": 0.670563 }, { "epoch": 1.5526327063964698, "grad_norm": 4.611881256103516, "learning_rate": 7.805132402803413e-05, "loss": 2.24674072265625, "memory(GiB)": 72.85, "step": 36240, "token_acc": 0.4811715481171548, "train_speed(iter/s)": 0.670563 }, { "epoch": 1.5528469217257186, "grad_norm": 4.031742572784424, "learning_rate": 7.804575288227291e-05, "loss": 2.3315670013427736, "memory(GiB)": 72.85, "step": 36245, "token_acc": 0.476027397260274, "train_speed(iter/s)": 0.670571 }, { "epoch": 1.5530611370549676, "grad_norm": 5.384084224700928, "learning_rate": 7.804018122843659e-05, "loss": 2.370879364013672, "memory(GiB)": 72.85, "step": 36250, "token_acc": 0.5169811320754717, "train_speed(iter/s)": 0.670579 }, { "epoch": 1.5532753523842167, "grad_norm": 4.612273693084717, "learning_rate": 7.803460906662608e-05, "loss": 2.602690887451172, "memory(GiB)": 72.85, "step": 36255, "token_acc": 0.5, "train_speed(iter/s)": 0.670568 }, { "epoch": 1.5534895677134655, "grad_norm": 3.782636880874634, "learning_rate": 7.802903639694233e-05, "loss": 2.382596969604492, "memory(GiB)": 72.85, "step": 36260, "token_acc": 0.5131578947368421, "train_speed(iter/s)": 0.670586 }, { "epoch": 1.5537037830427145, "grad_norm": 4.855930805206299, "learning_rate": 7.802346321948628e-05, "loss": 2.4481605529785155, "memory(GiB)": 72.85, "step": 36265, "token_acc": 0.4966442953020134, "train_speed(iter/s)": 0.670594 }, { "epoch": 1.5539179983719635, "grad_norm": 3.9561188220977783, "learning_rate": 7.801788953435893e-05, "loss": 2.357844352722168, "memory(GiB)": 72.85, "step": 36270, "token_acc": 0.4804804804804805, "train_speed(iter/s)": 0.670606 }, { "epoch": 1.5541322137012124, "grad_norm": 3.322143316268921, "learning_rate": 7.80123153416612e-05, "loss": 2.665111541748047, "memory(GiB)": 72.85, "step": 36275, "token_acc": 0.4723127035830619, "train_speed(iter/s)": 0.670627 }, { "epoch": 1.5543464290304614, "grad_norm": 3.703360080718994, "learning_rate": 7.800674064149411e-05, "loss": 2.39575309753418, "memory(GiB)": 72.85, "step": 36280, "token_acc": 0.4981549815498155, "train_speed(iter/s)": 0.670661 }, { "epoch": 1.5545606443597104, "grad_norm": 5.097012519836426, "learning_rate": 7.800116543395866e-05, "loss": 2.4484596252441406, "memory(GiB)": 72.85, "step": 36285, "token_acc": 0.5030674846625767, "train_speed(iter/s)": 0.670646 }, { "epoch": 1.5547748596889592, "grad_norm": 4.080333232879639, "learning_rate": 7.79955897191558e-05, "loss": 2.324231719970703, "memory(GiB)": 72.85, "step": 36290, "token_acc": 0.46846846846846846, "train_speed(iter/s)": 0.670643 }, { "epoch": 1.5549890750182083, "grad_norm": 4.4936065673828125, "learning_rate": 7.79900134971866e-05, "loss": 2.461617088317871, "memory(GiB)": 72.85, "step": 36295, "token_acc": 0.47039473684210525, "train_speed(iter/s)": 0.670639 }, { "epoch": 1.5552032903474573, "grad_norm": 4.9447455406188965, "learning_rate": 7.798443676815203e-05, "loss": 2.016456413269043, "memory(GiB)": 72.85, "step": 36300, "token_acc": 0.6066176470588235, "train_speed(iter/s)": 0.670646 }, { "epoch": 1.5554175056767061, "grad_norm": 3.5703327655792236, "learning_rate": 7.797885953215314e-05, "loss": 2.640696334838867, "memory(GiB)": 72.85, "step": 36305, "token_acc": 0.47109826589595377, "train_speed(iter/s)": 0.670654 }, { "epoch": 1.5556317210059551, "grad_norm": 6.100685119628906, "learning_rate": 7.797328178929096e-05, "loss": 2.311946678161621, "memory(GiB)": 72.85, "step": 36310, "token_acc": 0.483271375464684, "train_speed(iter/s)": 0.670664 }, { "epoch": 1.5558459363352042, "grad_norm": 4.625831127166748, "learning_rate": 7.796770353966654e-05, "loss": 2.659516525268555, "memory(GiB)": 72.85, "step": 36315, "token_acc": 0.4142857142857143, "train_speed(iter/s)": 0.670673 }, { "epoch": 1.556060151664453, "grad_norm": 6.552182674407959, "learning_rate": 7.796212478338094e-05, "loss": 2.01317138671875, "memory(GiB)": 72.85, "step": 36320, "token_acc": 0.5165289256198347, "train_speed(iter/s)": 0.670678 }, { "epoch": 1.556274366993702, "grad_norm": 4.356464385986328, "learning_rate": 7.795654552053522e-05, "loss": 2.3875759124755858, "memory(GiB)": 72.85, "step": 36325, "token_acc": 0.47634069400630913, "train_speed(iter/s)": 0.670691 }, { "epoch": 1.556488582322951, "grad_norm": 3.6870644092559814, "learning_rate": 7.795096575123045e-05, "loss": 2.0779258728027346, "memory(GiB)": 72.85, "step": 36330, "token_acc": 0.5, "train_speed(iter/s)": 0.670697 }, { "epoch": 1.5567027976521999, "grad_norm": 3.1526808738708496, "learning_rate": 7.794538547556771e-05, "loss": 2.4387481689453123, "memory(GiB)": 72.85, "step": 36335, "token_acc": 0.4983277591973244, "train_speed(iter/s)": 0.670705 }, { "epoch": 1.556917012981449, "grad_norm": 3.5543925762176514, "learning_rate": 7.793980469364811e-05, "loss": 2.3354814529418944, "memory(GiB)": 72.85, "step": 36340, "token_acc": 0.5077399380804953, "train_speed(iter/s)": 0.670708 }, { "epoch": 1.557131228310698, "grad_norm": 4.71927547454834, "learning_rate": 7.793422340557273e-05, "loss": 2.5694791793823244, "memory(GiB)": 72.85, "step": 36345, "token_acc": 0.47678018575851394, "train_speed(iter/s)": 0.670724 }, { "epoch": 1.5573454436399468, "grad_norm": 4.073342800140381, "learning_rate": 7.792864161144269e-05, "loss": 2.229099655151367, "memory(GiB)": 72.85, "step": 36350, "token_acc": 0.47440273037542663, "train_speed(iter/s)": 0.670738 }, { "epoch": 1.5575596589691958, "grad_norm": 5.506391525268555, "learning_rate": 7.792305931135911e-05, "loss": 2.4497444152832033, "memory(GiB)": 72.85, "step": 36355, "token_acc": 0.47950819672131145, "train_speed(iter/s)": 0.670741 }, { "epoch": 1.5577738742984448, "grad_norm": 4.2180657386779785, "learning_rate": 7.791747650542311e-05, "loss": 2.4024936676025392, "memory(GiB)": 72.85, "step": 36360, "token_acc": 0.45, "train_speed(iter/s)": 0.670736 }, { "epoch": 1.5579880896276936, "grad_norm": 5.129955768585205, "learning_rate": 7.791189319373585e-05, "loss": 2.182939338684082, "memory(GiB)": 72.85, "step": 36365, "token_acc": 0.5163636363636364, "train_speed(iter/s)": 0.670717 }, { "epoch": 1.5582023049569427, "grad_norm": 3.50677227973938, "learning_rate": 7.790630937639844e-05, "loss": 2.3045299530029295, "memory(GiB)": 72.85, "step": 36370, "token_acc": 0.5527156549520766, "train_speed(iter/s)": 0.670709 }, { "epoch": 1.5584165202861917, "grad_norm": 3.7456178665161133, "learning_rate": 7.790072505351207e-05, "loss": 2.367877388000488, "memory(GiB)": 72.85, "step": 36375, "token_acc": 0.4944649446494465, "train_speed(iter/s)": 0.670709 }, { "epoch": 1.5586307356154405, "grad_norm": 5.002495288848877, "learning_rate": 7.789514022517789e-05, "loss": 2.1911243438720702, "memory(GiB)": 72.85, "step": 36380, "token_acc": 0.5318352059925093, "train_speed(iter/s)": 0.670713 }, { "epoch": 1.5588449509446896, "grad_norm": 4.583559513092041, "learning_rate": 7.788955489149708e-05, "loss": 2.113875961303711, "memory(GiB)": 72.85, "step": 36385, "token_acc": 0.5372168284789643, "train_speed(iter/s)": 0.670705 }, { "epoch": 1.5590591662739386, "grad_norm": 6.086052417755127, "learning_rate": 7.78839690525708e-05, "loss": 2.598947525024414, "memory(GiB)": 72.85, "step": 36390, "token_acc": 0.4478114478114478, "train_speed(iter/s)": 0.670722 }, { "epoch": 1.5592733816031874, "grad_norm": 3.9887306690216064, "learning_rate": 7.787838270850028e-05, "loss": 2.069078826904297, "memory(GiB)": 72.85, "step": 36395, "token_acc": 0.5017921146953405, "train_speed(iter/s)": 0.670749 }, { "epoch": 1.5594875969324364, "grad_norm": 5.080212116241455, "learning_rate": 7.787279585938671e-05, "loss": 2.354537010192871, "memory(GiB)": 72.85, "step": 36400, "token_acc": 0.5, "train_speed(iter/s)": 0.670773 }, { "epoch": 1.5597018122616855, "grad_norm": 3.8769662380218506, "learning_rate": 7.786720850533129e-05, "loss": 2.4531444549560546, "memory(GiB)": 72.85, "step": 36405, "token_acc": 0.47896440129449835, "train_speed(iter/s)": 0.670789 }, { "epoch": 1.5599160275909343, "grad_norm": 3.963301658630371, "learning_rate": 7.786162064643525e-05, "loss": 2.5634742736816407, "memory(GiB)": 72.85, "step": 36410, "token_acc": 0.4807017543859649, "train_speed(iter/s)": 0.670802 }, { "epoch": 1.5601302429201833, "grad_norm": 4.5645904541015625, "learning_rate": 7.785603228279981e-05, "loss": 2.4996734619140626, "memory(GiB)": 72.85, "step": 36415, "token_acc": 0.48846153846153845, "train_speed(iter/s)": 0.67078 }, { "epoch": 1.5603444582494324, "grad_norm": 4.43815803527832, "learning_rate": 7.785044341452623e-05, "loss": 2.306351661682129, "memory(GiB)": 72.85, "step": 36420, "token_acc": 0.49834983498349833, "train_speed(iter/s)": 0.670781 }, { "epoch": 1.5605586735786812, "grad_norm": 4.64076042175293, "learning_rate": 7.784485404171573e-05, "loss": 2.2032779693603515, "memory(GiB)": 72.85, "step": 36425, "token_acc": 0.531986531986532, "train_speed(iter/s)": 0.670793 }, { "epoch": 1.5607728889079302, "grad_norm": 3.4431774616241455, "learning_rate": 7.783926416446957e-05, "loss": 2.2620594024658205, "memory(GiB)": 72.85, "step": 36430, "token_acc": 0.4873417721518987, "train_speed(iter/s)": 0.670809 }, { "epoch": 1.5609871042371792, "grad_norm": 3.4796156883239746, "learning_rate": 7.783367378288903e-05, "loss": 2.602540969848633, "memory(GiB)": 72.85, "step": 36435, "token_acc": 0.4940119760479042, "train_speed(iter/s)": 0.670809 }, { "epoch": 1.561201319566428, "grad_norm": 5.609246253967285, "learning_rate": 7.782808289707538e-05, "loss": 2.5754182815551756, "memory(GiB)": 72.85, "step": 36440, "token_acc": 0.48484848484848486, "train_speed(iter/s)": 0.670813 }, { "epoch": 1.561415534895677, "grad_norm": 5.993564128875732, "learning_rate": 7.782249150712992e-05, "loss": 2.4889949798583983, "memory(GiB)": 72.85, "step": 36445, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.670817 }, { "epoch": 1.5616297502249261, "grad_norm": 8.155011177062988, "learning_rate": 7.781689961315391e-05, "loss": 2.0957836151123046, "memory(GiB)": 72.85, "step": 36450, "token_acc": 0.5176991150442478, "train_speed(iter/s)": 0.670832 }, { "epoch": 1.561843965554175, "grad_norm": 3.6989388465881348, "learning_rate": 7.781130721524866e-05, "loss": 2.4007137298583983, "memory(GiB)": 72.85, "step": 36455, "token_acc": 0.484149855907781, "train_speed(iter/s)": 0.670838 }, { "epoch": 1.562058180883424, "grad_norm": 3.7878994941711426, "learning_rate": 7.780571431351551e-05, "loss": 2.5430599212646485, "memory(GiB)": 72.85, "step": 36460, "token_acc": 0.4738675958188153, "train_speed(iter/s)": 0.670838 }, { "epoch": 1.562272396212673, "grad_norm": 3.384744882583618, "learning_rate": 7.780012090805575e-05, "loss": 2.2070938110351563, "memory(GiB)": 72.85, "step": 36465, "token_acc": 0.4964788732394366, "train_speed(iter/s)": 0.670847 }, { "epoch": 1.5624866115419218, "grad_norm": 4.782304286956787, "learning_rate": 7.779452699897072e-05, "loss": 2.8185474395751955, "memory(GiB)": 72.85, "step": 36470, "token_acc": 0.44126984126984126, "train_speed(iter/s)": 0.670853 }, { "epoch": 1.562700826871171, "grad_norm": 3.7564051151275635, "learning_rate": 7.778893258636177e-05, "loss": 2.300713539123535, "memory(GiB)": 72.85, "step": 36475, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.670866 }, { "epoch": 1.5629150422004199, "grad_norm": 3.1100215911865234, "learning_rate": 7.778333767033024e-05, "loss": 2.4826257705688475, "memory(GiB)": 72.85, "step": 36480, "token_acc": 0.4843205574912892, "train_speed(iter/s)": 0.670879 }, { "epoch": 1.5631292575296687, "grad_norm": 3.2519538402557373, "learning_rate": 7.777774225097748e-05, "loss": 2.5870843887329102, "memory(GiB)": 72.85, "step": 36485, "token_acc": 0.4678362573099415, "train_speed(iter/s)": 0.670896 }, { "epoch": 1.563343472858918, "grad_norm": 4.009922981262207, "learning_rate": 7.777214632840485e-05, "loss": 2.045972442626953, "memory(GiB)": 72.85, "step": 36490, "token_acc": 0.5342465753424658, "train_speed(iter/s)": 0.67087 }, { "epoch": 1.5635576881881668, "grad_norm": 4.814267635345459, "learning_rate": 7.776654990271375e-05, "loss": 2.4376617431640626, "memory(GiB)": 72.85, "step": 36495, "token_acc": 0.4781144781144781, "train_speed(iter/s)": 0.67088 }, { "epoch": 1.5637719035174156, "grad_norm": 3.2633090019226074, "learning_rate": 7.776095297400554e-05, "loss": 2.2864700317382813, "memory(GiB)": 72.85, "step": 36500, "token_acc": 0.5031055900621118, "train_speed(iter/s)": 0.670888 }, { "epoch": 1.5637719035174156, "eval_loss": 1.9887882471084595, "eval_runtime": 15.5857, "eval_samples_per_second": 6.416, "eval_steps_per_second": 6.416, "eval_token_acc": 0.515748031496063, "step": 36500 }, { "epoch": 1.5639861188466648, "grad_norm": 5.398926258087158, "learning_rate": 7.775535554238163e-05, "loss": 2.4201826095581054, "memory(GiB)": 72.85, "step": 36505, "token_acc": 0.5138755980861244, "train_speed(iter/s)": 0.670668 }, { "epoch": 1.5642003341759136, "grad_norm": 4.347439289093018, "learning_rate": 7.774975760794343e-05, "loss": 2.412152099609375, "memory(GiB)": 72.85, "step": 36510, "token_acc": 0.5033783783783784, "train_speed(iter/s)": 0.670668 }, { "epoch": 1.5644145495051625, "grad_norm": 4.0913262367248535, "learning_rate": 7.774415917079232e-05, "loss": 2.5734426498413088, "memory(GiB)": 72.85, "step": 36515, "token_acc": 0.52, "train_speed(iter/s)": 0.670687 }, { "epoch": 1.5646287648344117, "grad_norm": 4.533064842224121, "learning_rate": 7.773856023102975e-05, "loss": 2.4649852752685546, "memory(GiB)": 72.85, "step": 36520, "token_acc": 0.5045317220543807, "train_speed(iter/s)": 0.670679 }, { "epoch": 1.5648429801636605, "grad_norm": 3.670192003250122, "learning_rate": 7.773296078875714e-05, "loss": 2.219274711608887, "memory(GiB)": 72.85, "step": 36525, "token_acc": 0.4929078014184397, "train_speed(iter/s)": 0.670693 }, { "epoch": 1.5650571954929093, "grad_norm": 4.102992534637451, "learning_rate": 7.772736084407593e-05, "loss": 2.204111099243164, "memory(GiB)": 72.85, "step": 36530, "token_acc": 0.5018315018315018, "train_speed(iter/s)": 0.670695 }, { "epoch": 1.5652714108221586, "grad_norm": 4.076228141784668, "learning_rate": 7.772176039708758e-05, "loss": 2.474593925476074, "memory(GiB)": 72.85, "step": 36535, "token_acc": 0.4588235294117647, "train_speed(iter/s)": 0.670693 }, { "epoch": 1.5654856261514074, "grad_norm": 3.974236011505127, "learning_rate": 7.771615944789351e-05, "loss": 2.370111083984375, "memory(GiB)": 72.85, "step": 36540, "token_acc": 0.5133531157270029, "train_speed(iter/s)": 0.670685 }, { "epoch": 1.5656998414806562, "grad_norm": 3.568420648574829, "learning_rate": 7.771055799659523e-05, "loss": 2.419327735900879, "memory(GiB)": 72.85, "step": 36545, "token_acc": 0.488135593220339, "train_speed(iter/s)": 0.670678 }, { "epoch": 1.5659140568099055, "grad_norm": 5.592650890350342, "learning_rate": 7.77049560432942e-05, "loss": 2.4884946823120115, "memory(GiB)": 72.85, "step": 36550, "token_acc": 0.5048231511254019, "train_speed(iter/s)": 0.670689 }, { "epoch": 1.5661282721391543, "grad_norm": 4.404977798461914, "learning_rate": 7.769935358809189e-05, "loss": 2.3851242065429688, "memory(GiB)": 72.85, "step": 36555, "token_acc": 0.5074183976261127, "train_speed(iter/s)": 0.670699 }, { "epoch": 1.566342487468403, "grad_norm": 3.571415662765503, "learning_rate": 7.769375063108981e-05, "loss": 2.366683769226074, "memory(GiB)": 72.85, "step": 36560, "token_acc": 0.473972602739726, "train_speed(iter/s)": 0.670704 }, { "epoch": 1.5665567027976524, "grad_norm": 4.413965225219727, "learning_rate": 7.768814717238946e-05, "loss": 2.547347068786621, "memory(GiB)": 72.85, "step": 36565, "token_acc": 0.46381578947368424, "train_speed(iter/s)": 0.670694 }, { "epoch": 1.5667709181269012, "grad_norm": 3.650796890258789, "learning_rate": 7.768254321209234e-05, "loss": 2.2651695251464843, "memory(GiB)": 72.85, "step": 36570, "token_acc": 0.532608695652174, "train_speed(iter/s)": 0.6707 }, { "epoch": 1.56698513345615, "grad_norm": 4.833939075469971, "learning_rate": 7.76769387503e-05, "loss": 2.2287235260009766, "memory(GiB)": 72.85, "step": 36575, "token_acc": 0.5184049079754601, "train_speed(iter/s)": 0.670697 }, { "epoch": 1.5671993487853992, "grad_norm": 4.626119136810303, "learning_rate": 7.767133378711394e-05, "loss": 2.337661361694336, "memory(GiB)": 72.85, "step": 36580, "token_acc": 0.5247524752475248, "train_speed(iter/s)": 0.670692 }, { "epoch": 1.567413564114648, "grad_norm": 3.9643938541412354, "learning_rate": 7.76657283226357e-05, "loss": 2.2881137847900392, "memory(GiB)": 72.85, "step": 36585, "token_acc": 0.5069444444444444, "train_speed(iter/s)": 0.670682 }, { "epoch": 1.5676277794438969, "grad_norm": 3.8889095783233643, "learning_rate": 7.766012235696687e-05, "loss": 2.612729072570801, "memory(GiB)": 72.85, "step": 36590, "token_acc": 0.4485981308411215, "train_speed(iter/s)": 0.670701 }, { "epoch": 1.5678419947731461, "grad_norm": 5.141719818115234, "learning_rate": 7.765451589020896e-05, "loss": 2.264612579345703, "memory(GiB)": 72.85, "step": 36595, "token_acc": 0.47635135135135137, "train_speed(iter/s)": 0.670725 }, { "epoch": 1.568056210102395, "grad_norm": 4.244338035583496, "learning_rate": 7.764890892246355e-05, "loss": 2.1079971313476564, "memory(GiB)": 72.85, "step": 36600, "token_acc": 0.5232974910394266, "train_speed(iter/s)": 0.670729 }, { "epoch": 1.5682704254316437, "grad_norm": 6.473599910736084, "learning_rate": 7.764330145383221e-05, "loss": 2.5390806198120117, "memory(GiB)": 72.85, "step": 36605, "token_acc": 0.4618181818181818, "train_speed(iter/s)": 0.670739 }, { "epoch": 1.568484640760893, "grad_norm": 4.154105186462402, "learning_rate": 7.763769348441654e-05, "loss": 2.32181453704834, "memory(GiB)": 72.85, "step": 36610, "token_acc": 0.5223880597014925, "train_speed(iter/s)": 0.670741 }, { "epoch": 1.5686988560901418, "grad_norm": 3.7635622024536133, "learning_rate": 7.763208501431813e-05, "loss": 2.415091133117676, "memory(GiB)": 72.85, "step": 36615, "token_acc": 0.4758842443729904, "train_speed(iter/s)": 0.670725 }, { "epoch": 1.5689130714193906, "grad_norm": 3.804987668991089, "learning_rate": 7.762647604363857e-05, "loss": 2.3982540130615235, "memory(GiB)": 72.85, "step": 36620, "token_acc": 0.4875, "train_speed(iter/s)": 0.670693 }, { "epoch": 1.5691272867486399, "grad_norm": 4.743718147277832, "learning_rate": 7.762086657247948e-05, "loss": 2.3382951736450197, "memory(GiB)": 72.85, "step": 36625, "token_acc": 0.4842105263157895, "train_speed(iter/s)": 0.670714 }, { "epoch": 1.5693415020778887, "grad_norm": 4.159206867218018, "learning_rate": 7.76152566009425e-05, "loss": 2.415788269042969, "memory(GiB)": 72.85, "step": 36630, "token_acc": 0.488135593220339, "train_speed(iter/s)": 0.670729 }, { "epoch": 1.5695557174071375, "grad_norm": 3.4685215950012207, "learning_rate": 7.760964612912923e-05, "loss": 2.3404495239257814, "memory(GiB)": 72.85, "step": 36635, "token_acc": 0.4746376811594203, "train_speed(iter/s)": 0.670705 }, { "epoch": 1.5697699327363868, "grad_norm": 4.599617958068848, "learning_rate": 7.760403515714132e-05, "loss": 2.345783996582031, "memory(GiB)": 72.85, "step": 36640, "token_acc": 0.4965034965034965, "train_speed(iter/s)": 0.670686 }, { "epoch": 1.5699841480656356, "grad_norm": 3.5261974334716797, "learning_rate": 7.759842368508043e-05, "loss": 2.2680212020874024, "memory(GiB)": 72.85, "step": 36645, "token_acc": 0.5239616613418531, "train_speed(iter/s)": 0.670671 }, { "epoch": 1.5701983633948844, "grad_norm": 4.747467994689941, "learning_rate": 7.759281171304817e-05, "loss": 2.2716339111328123, "memory(GiB)": 72.85, "step": 36650, "token_acc": 0.5201465201465202, "train_speed(iter/s)": 0.670679 }, { "epoch": 1.5704125787241336, "grad_norm": 5.631164073944092, "learning_rate": 7.758719924114627e-05, "loss": 2.33218994140625, "memory(GiB)": 72.85, "step": 36655, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.670688 }, { "epoch": 1.5706267940533825, "grad_norm": 3.6728663444519043, "learning_rate": 7.758158626947638e-05, "loss": 2.2753168106079102, "memory(GiB)": 72.85, "step": 36660, "token_acc": 0.5145985401459854, "train_speed(iter/s)": 0.670697 }, { "epoch": 1.5708410093826313, "grad_norm": 3.522390604019165, "learning_rate": 7.757597279814017e-05, "loss": 2.31978702545166, "memory(GiB)": 72.85, "step": 36665, "token_acc": 0.4827586206896552, "train_speed(iter/s)": 0.670688 }, { "epoch": 1.5710552247118805, "grad_norm": 3.1098172664642334, "learning_rate": 7.757035882723936e-05, "loss": 2.178498458862305, "memory(GiB)": 72.85, "step": 36670, "token_acc": 0.54, "train_speed(iter/s)": 0.670687 }, { "epoch": 1.5712694400411293, "grad_norm": 3.9216527938842773, "learning_rate": 7.756474435687562e-05, "loss": 2.520961570739746, "memory(GiB)": 72.85, "step": 36675, "token_acc": 0.4664310954063604, "train_speed(iter/s)": 0.670706 }, { "epoch": 1.5714836553703782, "grad_norm": 4.420570373535156, "learning_rate": 7.755912938715069e-05, "loss": 2.490346336364746, "memory(GiB)": 72.85, "step": 36680, "token_acc": 0.45794392523364486, "train_speed(iter/s)": 0.670733 }, { "epoch": 1.5716978706996274, "grad_norm": 4.572836399078369, "learning_rate": 7.755351391816626e-05, "loss": 2.5547088623046874, "memory(GiB)": 72.85, "step": 36685, "token_acc": 0.4940828402366864, "train_speed(iter/s)": 0.670737 }, { "epoch": 1.5719120860288762, "grad_norm": 3.2554330825805664, "learning_rate": 7.754789795002409e-05, "loss": 2.4527896881103515, "memory(GiB)": 72.85, "step": 36690, "token_acc": 0.4801223241590214, "train_speed(iter/s)": 0.670733 }, { "epoch": 1.572126301358125, "grad_norm": 3.718360185623169, "learning_rate": 7.754228148282591e-05, "loss": 2.4529722213745115, "memory(GiB)": 72.85, "step": 36695, "token_acc": 0.46130952380952384, "train_speed(iter/s)": 0.670736 }, { "epoch": 1.5723405166873743, "grad_norm": 4.021354675292969, "learning_rate": 7.753666451667347e-05, "loss": 2.500881385803223, "memory(GiB)": 72.85, "step": 36700, "token_acc": 0.45517241379310347, "train_speed(iter/s)": 0.670749 }, { "epoch": 1.572554732016623, "grad_norm": 3.5949790477752686, "learning_rate": 7.753104705166851e-05, "loss": 2.119292640686035, "memory(GiB)": 72.85, "step": 36705, "token_acc": 0.5733333333333334, "train_speed(iter/s)": 0.670763 }, { "epoch": 1.572768947345872, "grad_norm": 4.38385009765625, "learning_rate": 7.75254290879128e-05, "loss": 2.467277717590332, "memory(GiB)": 72.85, "step": 36710, "token_acc": 0.4707792207792208, "train_speed(iter/s)": 0.67077 }, { "epoch": 1.5729831626751212, "grad_norm": 3.7575223445892334, "learning_rate": 7.751981062550813e-05, "loss": 2.264936065673828, "memory(GiB)": 72.85, "step": 36715, "token_acc": 0.5045871559633027, "train_speed(iter/s)": 0.670751 }, { "epoch": 1.57319737800437, "grad_norm": 8.293753623962402, "learning_rate": 7.751419166455629e-05, "loss": 2.182003402709961, "memory(GiB)": 72.85, "step": 36720, "token_acc": 0.5181159420289855, "train_speed(iter/s)": 0.67076 }, { "epoch": 1.5734115933336188, "grad_norm": 4.956820487976074, "learning_rate": 7.750857220515902e-05, "loss": 2.46185188293457, "memory(GiB)": 72.85, "step": 36725, "token_acc": 0.4952978056426332, "train_speed(iter/s)": 0.670781 }, { "epoch": 1.573625808662868, "grad_norm": 4.632805824279785, "learning_rate": 7.75029522474182e-05, "loss": 2.3761964797973634, "memory(GiB)": 72.85, "step": 36730, "token_acc": 0.4819672131147541, "train_speed(iter/s)": 0.670797 }, { "epoch": 1.5738400239921169, "grad_norm": 3.895047187805176, "learning_rate": 7.749733179143557e-05, "loss": 2.359082794189453, "memory(GiB)": 72.85, "step": 36735, "token_acc": 0.5133079847908745, "train_speed(iter/s)": 0.670816 }, { "epoch": 1.5740542393213657, "grad_norm": 3.6522216796875, "learning_rate": 7.7491710837313e-05, "loss": 2.2614667892456053, "memory(GiB)": 72.85, "step": 36740, "token_acc": 0.49044585987261147, "train_speed(iter/s)": 0.670814 }, { "epoch": 1.574268454650615, "grad_norm": 4.883214473724365, "learning_rate": 7.748608938515228e-05, "loss": 2.3607778549194336, "memory(GiB)": 72.85, "step": 36745, "token_acc": 0.4819672131147541, "train_speed(iter/s)": 0.670812 }, { "epoch": 1.5744826699798637, "grad_norm": 8.971433639526367, "learning_rate": 7.748046743505529e-05, "loss": 2.53350830078125, "memory(GiB)": 72.85, "step": 36750, "token_acc": 0.4838709677419355, "train_speed(iter/s)": 0.670797 }, { "epoch": 1.5746968853091126, "grad_norm": 3.9642090797424316, "learning_rate": 7.747484498712383e-05, "loss": 2.4010383605957033, "memory(GiB)": 72.85, "step": 36755, "token_acc": 0.5128205128205128, "train_speed(iter/s)": 0.670795 }, { "epoch": 1.5749111006383618, "grad_norm": 4.9939961433410645, "learning_rate": 7.746922204145979e-05, "loss": 2.219578742980957, "memory(GiB)": 72.85, "step": 36760, "token_acc": 0.5313653136531366, "train_speed(iter/s)": 0.670799 }, { "epoch": 1.5751253159676106, "grad_norm": 5.430907249450684, "learning_rate": 7.746359859816503e-05, "loss": 2.4846397399902345, "memory(GiB)": 72.85, "step": 36765, "token_acc": 0.5202952029520295, "train_speed(iter/s)": 0.670806 }, { "epoch": 1.5753395312968594, "grad_norm": 3.9282212257385254, "learning_rate": 7.745797465734141e-05, "loss": 2.4109493255615235, "memory(GiB)": 72.85, "step": 36770, "token_acc": 0.4892086330935252, "train_speed(iter/s)": 0.670815 }, { "epoch": 1.5755537466261087, "grad_norm": 3.0586905479431152, "learning_rate": 7.745235021909082e-05, "loss": 2.4245346069335936, "memory(GiB)": 72.85, "step": 36775, "token_acc": 0.4810126582278481, "train_speed(iter/s)": 0.670811 }, { "epoch": 1.5757679619553575, "grad_norm": 5.128255367279053, "learning_rate": 7.744672528351515e-05, "loss": 2.1243413925170898, "memory(GiB)": 72.85, "step": 36780, "token_acc": 0.505338078291815, "train_speed(iter/s)": 0.6708 }, { "epoch": 1.5759821772846063, "grad_norm": 3.241504669189453, "learning_rate": 7.744109985071632e-05, "loss": 2.286022186279297, "memory(GiB)": 72.85, "step": 36785, "token_acc": 0.5278688524590164, "train_speed(iter/s)": 0.670806 }, { "epoch": 1.5761963926138556, "grad_norm": 4.419157028198242, "learning_rate": 7.743547392079622e-05, "loss": 2.3364852905273437, "memory(GiB)": 72.85, "step": 36790, "token_acc": 0.5054545454545455, "train_speed(iter/s)": 0.670828 }, { "epoch": 1.5764106079431044, "grad_norm": 4.617485046386719, "learning_rate": 7.742984749385678e-05, "loss": 2.306952476501465, "memory(GiB)": 72.85, "step": 36795, "token_acc": 0.5296610169491526, "train_speed(iter/s)": 0.670825 }, { "epoch": 1.5766248232723532, "grad_norm": 3.6383161544799805, "learning_rate": 7.74242205699999e-05, "loss": 2.220305633544922, "memory(GiB)": 72.85, "step": 36800, "token_acc": 0.517799352750809, "train_speed(iter/s)": 0.670839 }, { "epoch": 1.5768390386016025, "grad_norm": 4.170210361480713, "learning_rate": 7.741859314932756e-05, "loss": 2.486735153198242, "memory(GiB)": 72.85, "step": 36805, "token_acc": 0.48788927335640137, "train_speed(iter/s)": 0.670837 }, { "epoch": 1.5770532539308513, "grad_norm": 3.940816879272461, "learning_rate": 7.741296523194165e-05, "loss": 2.319932556152344, "memory(GiB)": 72.85, "step": 36810, "token_acc": 0.5017301038062284, "train_speed(iter/s)": 0.670828 }, { "epoch": 1.5772674692601003, "grad_norm": 4.372797966003418, "learning_rate": 7.74073368179442e-05, "loss": 1.9615798950195313, "memory(GiB)": 72.85, "step": 36815, "token_acc": 0.5769230769230769, "train_speed(iter/s)": 0.670832 }, { "epoch": 1.5774816845893493, "grad_norm": 4.944199085235596, "learning_rate": 7.740170790743711e-05, "loss": 2.543103790283203, "memory(GiB)": 72.85, "step": 36820, "token_acc": 0.43564356435643564, "train_speed(iter/s)": 0.670822 }, { "epoch": 1.5776958999185982, "grad_norm": 3.9093053340911865, "learning_rate": 7.73960785005224e-05, "loss": 2.0724916458129883, "memory(GiB)": 72.85, "step": 36825, "token_acc": 0.5615384615384615, "train_speed(iter/s)": 0.670828 }, { "epoch": 1.5779101152478472, "grad_norm": 4.026191711425781, "learning_rate": 7.7390448597302e-05, "loss": 2.724164581298828, "memory(GiB)": 72.85, "step": 36830, "token_acc": 0.4420731707317073, "train_speed(iter/s)": 0.670846 }, { "epoch": 1.5781243305770962, "grad_norm": 3.610010862350464, "learning_rate": 7.738481819787794e-05, "loss": 2.2586086273193358, "memory(GiB)": 72.85, "step": 36835, "token_acc": 0.5379061371841155, "train_speed(iter/s)": 0.670854 }, { "epoch": 1.578338545906345, "grad_norm": 3.6750388145446777, "learning_rate": 7.737918730235221e-05, "loss": 2.20544319152832, "memory(GiB)": 72.85, "step": 36840, "token_acc": 0.4976303317535545, "train_speed(iter/s)": 0.670861 }, { "epoch": 1.578552761235594, "grad_norm": 4.233132839202881, "learning_rate": 7.737355591082682e-05, "loss": 2.0830223083496096, "memory(GiB)": 72.85, "step": 36845, "token_acc": 0.5102739726027398, "train_speed(iter/s)": 0.670862 }, { "epoch": 1.578766976564843, "grad_norm": 7.5171732902526855, "learning_rate": 7.736792402340378e-05, "loss": 2.186372184753418, "memory(GiB)": 72.85, "step": 36850, "token_acc": 0.5359712230215827, "train_speed(iter/s)": 0.670843 }, { "epoch": 1.578981191894092, "grad_norm": 3.892314910888672, "learning_rate": 7.736229164018514e-05, "loss": 2.261129379272461, "memory(GiB)": 72.85, "step": 36855, "token_acc": 0.47058823529411764, "train_speed(iter/s)": 0.670837 }, { "epoch": 1.579195407223341, "grad_norm": 4.6178364753723145, "learning_rate": 7.735665876127289e-05, "loss": 2.195208740234375, "memory(GiB)": 72.85, "step": 36860, "token_acc": 0.5215686274509804, "train_speed(iter/s)": 0.670843 }, { "epoch": 1.57940962255259, "grad_norm": 6.810202598571777, "learning_rate": 7.735102538676914e-05, "loss": 2.256536293029785, "memory(GiB)": 72.85, "step": 36865, "token_acc": 0.528957528957529, "train_speed(iter/s)": 0.670846 }, { "epoch": 1.5796238378818388, "grad_norm": 5.082344055175781, "learning_rate": 7.734539151677587e-05, "loss": 2.3095911026000975, "memory(GiB)": 72.85, "step": 36870, "token_acc": 0.49477351916376305, "train_speed(iter/s)": 0.670856 }, { "epoch": 1.5798380532110878, "grad_norm": 7.076399326324463, "learning_rate": 7.733975715139521e-05, "loss": 2.6154518127441406, "memory(GiB)": 72.85, "step": 36875, "token_acc": 0.4766081871345029, "train_speed(iter/s)": 0.670852 }, { "epoch": 1.5800522685403369, "grad_norm": 5.124238014221191, "learning_rate": 7.733412229072918e-05, "loss": 2.2038570404052735, "memory(GiB)": 72.85, "step": 36880, "token_acc": 0.5375939849624061, "train_speed(iter/s)": 0.670846 }, { "epoch": 1.5802664838695857, "grad_norm": 3.348217010498047, "learning_rate": 7.732848693487991e-05, "loss": 2.397859573364258, "memory(GiB)": 72.85, "step": 36885, "token_acc": 0.47435897435897434, "train_speed(iter/s)": 0.670834 }, { "epoch": 1.5804806991988347, "grad_norm": 4.066976070404053, "learning_rate": 7.732285108394944e-05, "loss": 2.635219192504883, "memory(GiB)": 72.85, "step": 36890, "token_acc": 0.45425867507886436, "train_speed(iter/s)": 0.67085 }, { "epoch": 1.5806949145280837, "grad_norm": 3.25182843208313, "learning_rate": 7.73172147380399e-05, "loss": 2.391396713256836, "memory(GiB)": 72.85, "step": 36895, "token_acc": 0.5086505190311419, "train_speed(iter/s)": 0.670858 }, { "epoch": 1.5809091298573326, "grad_norm": 4.09330940246582, "learning_rate": 7.73115778972534e-05, "loss": 2.494657516479492, "memory(GiB)": 72.85, "step": 36900, "token_acc": 0.45103857566765576, "train_speed(iter/s)": 0.670866 }, { "epoch": 1.5811233451865816, "grad_norm": 3.2221884727478027, "learning_rate": 7.730594056169203e-05, "loss": 2.316394805908203, "memory(GiB)": 72.85, "step": 36905, "token_acc": 0.4895833333333333, "train_speed(iter/s)": 0.670851 }, { "epoch": 1.5813375605158306, "grad_norm": 5.4705376625061035, "learning_rate": 7.730030273145793e-05, "loss": 2.4551525115966797, "memory(GiB)": 72.85, "step": 36910, "token_acc": 0.4981132075471698, "train_speed(iter/s)": 0.670859 }, { "epoch": 1.5815517758450794, "grad_norm": 5.128872871398926, "learning_rate": 7.729466440665325e-05, "loss": 2.395804595947266, "memory(GiB)": 72.85, "step": 36915, "token_acc": 0.48089171974522293, "train_speed(iter/s)": 0.670873 }, { "epoch": 1.5817659911743285, "grad_norm": 4.73221492767334, "learning_rate": 7.728902558738011e-05, "loss": 2.407915496826172, "memory(GiB)": 72.85, "step": 36920, "token_acc": 0.45878136200716846, "train_speed(iter/s)": 0.670884 }, { "epoch": 1.5819802065035775, "grad_norm": 3.2178122997283936, "learning_rate": 7.728338627374068e-05, "loss": 2.3146951675415037, "memory(GiB)": 72.85, "step": 36925, "token_acc": 0.5544217687074829, "train_speed(iter/s)": 0.670878 }, { "epoch": 1.5821944218328263, "grad_norm": 4.862575054168701, "learning_rate": 7.727774646583711e-05, "loss": 2.511076736450195, "memory(GiB)": 72.85, "step": 36930, "token_acc": 0.5068493150684932, "train_speed(iter/s)": 0.670903 }, { "epoch": 1.5824086371620754, "grad_norm": 3.6361567974090576, "learning_rate": 7.727210616377157e-05, "loss": 2.23775577545166, "memory(GiB)": 72.85, "step": 36935, "token_acc": 0.5088967971530249, "train_speed(iter/s)": 0.670894 }, { "epoch": 1.5826228524913244, "grad_norm": 4.020750045776367, "learning_rate": 7.726646536764625e-05, "loss": 2.282794189453125, "memory(GiB)": 72.85, "step": 36940, "token_acc": 0.5382059800664452, "train_speed(iter/s)": 0.670905 }, { "epoch": 1.5828370678205732, "grad_norm": 5.061667442321777, "learning_rate": 7.726082407756332e-05, "loss": 2.3971471786499023, "memory(GiB)": 72.85, "step": 36945, "token_acc": 0.506896551724138, "train_speed(iter/s)": 0.670922 }, { "epoch": 1.5830512831498222, "grad_norm": 3.374929666519165, "learning_rate": 7.7255182293625e-05, "loss": 2.3662944793701173, "memory(GiB)": 72.85, "step": 36950, "token_acc": 0.5072046109510087, "train_speed(iter/s)": 0.67092 }, { "epoch": 1.5832654984790713, "grad_norm": 4.766103267669678, "learning_rate": 7.724954001593348e-05, "loss": 2.1572492599487303, "memory(GiB)": 72.85, "step": 36955, "token_acc": 0.5165289256198347, "train_speed(iter/s)": 0.670934 }, { "epoch": 1.58347971380832, "grad_norm": 3.3561036586761475, "learning_rate": 7.724502583834665e-05, "loss": 2.325917434692383, "memory(GiB)": 72.85, "step": 36960, "token_acc": 0.5051194539249146, "train_speed(iter/s)": 0.670965 }, { "epoch": 1.5836939291375691, "grad_norm": 5.92710542678833, "learning_rate": 7.723938267215698e-05, "loss": 2.2824575424194338, "memory(GiB)": 72.85, "step": 36965, "token_acc": 0.484251968503937, "train_speed(iter/s)": 0.670974 }, { "epoch": 1.5839081444668182, "grad_norm": 4.557084560394287, "learning_rate": 7.723373901250032e-05, "loss": 2.39825553894043, "memory(GiB)": 72.85, "step": 36970, "token_acc": 0.47720364741641336, "train_speed(iter/s)": 0.670963 }, { "epoch": 1.584122359796067, "grad_norm": 4.36439323425293, "learning_rate": 7.722809485947895e-05, "loss": 2.4110897064208983, "memory(GiB)": 72.85, "step": 36975, "token_acc": 0.45263157894736844, "train_speed(iter/s)": 0.670967 }, { "epoch": 1.584336575125316, "grad_norm": 4.531524181365967, "learning_rate": 7.72224502131951e-05, "loss": 2.0580219268798827, "memory(GiB)": 72.85, "step": 36980, "token_acc": 0.49603174603174605, "train_speed(iter/s)": 0.67097 }, { "epoch": 1.584550790454565, "grad_norm": 5.716397285461426, "learning_rate": 7.721680507375102e-05, "loss": 2.5972564697265623, "memory(GiB)": 72.85, "step": 36985, "token_acc": 0.4482758620689655, "train_speed(iter/s)": 0.67097 }, { "epoch": 1.5847650057838139, "grad_norm": 3.5383522510528564, "learning_rate": 7.721115944124897e-05, "loss": 2.498358154296875, "memory(GiB)": 72.85, "step": 36990, "token_acc": 0.49828178694158076, "train_speed(iter/s)": 0.670986 }, { "epoch": 1.5849792211130629, "grad_norm": 4.610274314880371, "learning_rate": 7.720551331579126e-05, "loss": 2.5820285797119142, "memory(GiB)": 72.85, "step": 36995, "token_acc": 0.4791666666666667, "train_speed(iter/s)": 0.670982 }, { "epoch": 1.585193436442312, "grad_norm": 5.188425064086914, "learning_rate": 7.719986669748013e-05, "loss": 2.385128211975098, "memory(GiB)": 72.85, "step": 37000, "token_acc": 0.4927536231884058, "train_speed(iter/s)": 0.670997 }, { "epoch": 1.585193436442312, "eval_loss": 2.075681686401367, "eval_runtime": 16.2289, "eval_samples_per_second": 6.162, "eval_steps_per_second": 6.162, "eval_token_acc": 0.49442896935933145, "step": 37000 }, { "epoch": 1.5854076517715607, "grad_norm": 4.831814289093018, "learning_rate": 7.719421958641794e-05, "loss": 2.461628723144531, "memory(GiB)": 72.85, "step": 37005, "token_acc": 0.4830590513068732, "train_speed(iter/s)": 0.670791 }, { "epoch": 1.5856218671008098, "grad_norm": 3.8766982555389404, "learning_rate": 7.718857198270692e-05, "loss": 2.2021583557128905, "memory(GiB)": 72.85, "step": 37010, "token_acc": 0.5090252707581228, "train_speed(iter/s)": 0.670795 }, { "epoch": 1.5858360824300588, "grad_norm": 4.577364444732666, "learning_rate": 7.718292388644943e-05, "loss": 2.5234378814697265, "memory(GiB)": 72.85, "step": 37015, "token_acc": 0.47157190635451507, "train_speed(iter/s)": 0.670807 }, { "epoch": 1.5860502977593076, "grad_norm": 5.89702844619751, "learning_rate": 7.717727529774777e-05, "loss": 2.3785289764404296, "memory(GiB)": 72.85, "step": 37020, "token_acc": 0.4699248120300752, "train_speed(iter/s)": 0.670827 }, { "epoch": 1.5862645130885566, "grad_norm": 4.087315082550049, "learning_rate": 7.717162621670427e-05, "loss": 2.261242485046387, "memory(GiB)": 72.85, "step": 37025, "token_acc": 0.46785714285714286, "train_speed(iter/s)": 0.670845 }, { "epoch": 1.5864787284178057, "grad_norm": 5.07124662399292, "learning_rate": 7.716597664342127e-05, "loss": 2.2238603591918946, "memory(GiB)": 72.85, "step": 37030, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.670851 }, { "epoch": 1.5866929437470545, "grad_norm": 5.499255180358887, "learning_rate": 7.716032657800113e-05, "loss": 2.2743576049804686, "memory(GiB)": 72.85, "step": 37035, "token_acc": 0.5232558139534884, "train_speed(iter/s)": 0.670871 }, { "epoch": 1.5869071590763035, "grad_norm": 4.108824729919434, "learning_rate": 7.715467602054618e-05, "loss": 2.204466438293457, "memory(GiB)": 72.85, "step": 37040, "token_acc": 0.5274725274725275, "train_speed(iter/s)": 0.670884 }, { "epoch": 1.5871213744055526, "grad_norm": 3.254901170730591, "learning_rate": 7.714902497115881e-05, "loss": 2.450870895385742, "memory(GiB)": 72.85, "step": 37045, "token_acc": 0.5311572700296736, "train_speed(iter/s)": 0.67089 }, { "epoch": 1.5873355897348014, "grad_norm": 4.70145320892334, "learning_rate": 7.714337342994139e-05, "loss": 2.421160125732422, "memory(GiB)": 72.85, "step": 37050, "token_acc": 0.44649446494464945, "train_speed(iter/s)": 0.670908 }, { "epoch": 1.5875498050640504, "grad_norm": 3.8937180042266846, "learning_rate": 7.71377213969963e-05, "loss": 2.0504337310791017, "memory(GiB)": 72.85, "step": 37055, "token_acc": 0.5269709543568465, "train_speed(iter/s)": 0.670905 }, { "epoch": 1.5877640203932994, "grad_norm": 3.7969486713409424, "learning_rate": 7.713206887242592e-05, "loss": 2.143074798583984, "memory(GiB)": 72.85, "step": 37060, "token_acc": 0.4934210526315789, "train_speed(iter/s)": 0.670912 }, { "epoch": 1.5879782357225483, "grad_norm": 3.549487590789795, "learning_rate": 7.712641585633265e-05, "loss": 2.2600269317626953, "memory(GiB)": 72.85, "step": 37065, "token_acc": 0.5230263157894737, "train_speed(iter/s)": 0.67091 }, { "epoch": 1.5881924510517973, "grad_norm": 3.5451958179473877, "learning_rate": 7.712076234881893e-05, "loss": 2.2286144256591798, "memory(GiB)": 72.85, "step": 37070, "token_acc": 0.4966887417218543, "train_speed(iter/s)": 0.670927 }, { "epoch": 1.5884066663810463, "grad_norm": 4.383281230926514, "learning_rate": 7.711510834998714e-05, "loss": 2.4070917129516602, "memory(GiB)": 72.85, "step": 37075, "token_acc": 0.5, "train_speed(iter/s)": 0.670945 }, { "epoch": 1.5886208817102951, "grad_norm": 4.320414066314697, "learning_rate": 7.710945385993975e-05, "loss": 2.4485429763793944, "memory(GiB)": 72.85, "step": 37080, "token_acc": 0.4826254826254826, "train_speed(iter/s)": 0.670932 }, { "epoch": 1.5888350970395442, "grad_norm": 4.218962669372559, "learning_rate": 7.710379887877917e-05, "loss": 2.388189697265625, "memory(GiB)": 72.85, "step": 37085, "token_acc": 0.49137931034482757, "train_speed(iter/s)": 0.670939 }, { "epoch": 1.5890493123687932, "grad_norm": 4.753740310668945, "learning_rate": 7.709814340660784e-05, "loss": 2.4534011840820313, "memory(GiB)": 72.85, "step": 37090, "token_acc": 0.46774193548387094, "train_speed(iter/s)": 0.670942 }, { "epoch": 1.589263527698042, "grad_norm": 3.5410313606262207, "learning_rate": 7.709248744352822e-05, "loss": 2.3491064071655274, "memory(GiB)": 72.85, "step": 37095, "token_acc": 0.5319148936170213, "train_speed(iter/s)": 0.670918 }, { "epoch": 1.589477743027291, "grad_norm": 4.059463024139404, "learning_rate": 7.708683098964275e-05, "loss": 2.2782398223876954, "memory(GiB)": 72.85, "step": 37100, "token_acc": 0.49466192170818507, "train_speed(iter/s)": 0.670939 }, { "epoch": 1.58969195835654, "grad_norm": 4.8099493980407715, "learning_rate": 7.708117404505397e-05, "loss": 2.542163848876953, "memory(GiB)": 72.85, "step": 37105, "token_acc": 0.4157706093189964, "train_speed(iter/s)": 0.670939 }, { "epoch": 1.589906173685789, "grad_norm": 4.853213310241699, "learning_rate": 7.707551660986429e-05, "loss": 2.522703170776367, "memory(GiB)": 72.85, "step": 37110, "token_acc": 0.49504950495049505, "train_speed(iter/s)": 0.670933 }, { "epoch": 1.590120389015038, "grad_norm": 8.33082103729248, "learning_rate": 7.706985868417624e-05, "loss": 2.176326370239258, "memory(GiB)": 72.85, "step": 37115, "token_acc": 0.5440613026819924, "train_speed(iter/s)": 0.670905 }, { "epoch": 1.590334604344287, "grad_norm": 4.000674247741699, "learning_rate": 7.706420026809232e-05, "loss": 2.361958885192871, "memory(GiB)": 72.85, "step": 37120, "token_acc": 0.4982078853046595, "train_speed(iter/s)": 0.670912 }, { "epoch": 1.5905488196735358, "grad_norm": 5.547845840454102, "learning_rate": 7.7058541361715e-05, "loss": 2.1116970062255858, "memory(GiB)": 72.85, "step": 37125, "token_acc": 0.49407114624505927, "train_speed(iter/s)": 0.670939 }, { "epoch": 1.5907630350027848, "grad_norm": 4.767724514007568, "learning_rate": 7.705288196514682e-05, "loss": 2.342430305480957, "memory(GiB)": 72.85, "step": 37130, "token_acc": 0.49085365853658536, "train_speed(iter/s)": 0.670935 }, { "epoch": 1.5909772503320339, "grad_norm": 4.387194633483887, "learning_rate": 7.70472220784903e-05, "loss": 2.367826461791992, "memory(GiB)": 72.85, "step": 37135, "token_acc": 0.5186567164179104, "train_speed(iter/s)": 0.670945 }, { "epoch": 1.5911914656612827, "grad_norm": 4.149634838104248, "learning_rate": 7.704156170184801e-05, "loss": 2.6362478256225588, "memory(GiB)": 72.85, "step": 37140, "token_acc": 0.48773006134969327, "train_speed(iter/s)": 0.670956 }, { "epoch": 1.5914056809905317, "grad_norm": 4.08011531829834, "learning_rate": 7.703590083532244e-05, "loss": 2.727508544921875, "memory(GiB)": 72.85, "step": 37145, "token_acc": 0.47041420118343197, "train_speed(iter/s)": 0.670983 }, { "epoch": 1.5916198963197807, "grad_norm": 3.5121195316314697, "learning_rate": 7.703023947901618e-05, "loss": 2.4928979873657227, "memory(GiB)": 72.85, "step": 37150, "token_acc": 0.5071633237822349, "train_speed(iter/s)": 0.670995 }, { "epoch": 1.5918341116490295, "grad_norm": 3.2892799377441406, "learning_rate": 7.702457763303177e-05, "loss": 2.2682167053222657, "memory(GiB)": 72.85, "step": 37155, "token_acc": 0.4892086330935252, "train_speed(iter/s)": 0.67099 }, { "epoch": 1.5920483269782786, "grad_norm": 3.660240411758423, "learning_rate": 7.701891529747178e-05, "loss": 2.517170715332031, "memory(GiB)": 72.85, "step": 37160, "token_acc": 0.48563218390804597, "train_speed(iter/s)": 0.670991 }, { "epoch": 1.5922625423075276, "grad_norm": 4.193500518798828, "learning_rate": 7.70132524724388e-05, "loss": 2.143436241149902, "memory(GiB)": 72.85, "step": 37165, "token_acc": 0.5618374558303887, "train_speed(iter/s)": 0.671002 }, { "epoch": 1.5924767576367764, "grad_norm": 4.5777106285095215, "learning_rate": 7.70075891580354e-05, "loss": 2.3299983978271483, "memory(GiB)": 72.85, "step": 37170, "token_acc": 0.5060728744939271, "train_speed(iter/s)": 0.670998 }, { "epoch": 1.5926909729660255, "grad_norm": 4.723599433898926, "learning_rate": 7.70019253543642e-05, "loss": 2.331789016723633, "memory(GiB)": 72.85, "step": 37175, "token_acc": 0.5648854961832062, "train_speed(iter/s)": 0.670971 }, { "epoch": 1.5929051882952745, "grad_norm": 3.5964951515197754, "learning_rate": 7.699626106152778e-05, "loss": 2.1362350463867186, "memory(GiB)": 72.85, "step": 37180, "token_acc": 0.5460526315789473, "train_speed(iter/s)": 0.670979 }, { "epoch": 1.5931194036245233, "grad_norm": 3.7075724601745605, "learning_rate": 7.699059627962877e-05, "loss": 2.246287536621094, "memory(GiB)": 72.85, "step": 37185, "token_acc": 0.5123674911660777, "train_speed(iter/s)": 0.670992 }, { "epoch": 1.5933336189537723, "grad_norm": 5.259133338928223, "learning_rate": 7.698493100876979e-05, "loss": 2.2179513931274415, "memory(GiB)": 72.85, "step": 37190, "token_acc": 0.540650406504065, "train_speed(iter/s)": 0.670982 }, { "epoch": 1.5935478342830214, "grad_norm": 3.9035584926605225, "learning_rate": 7.697926524905348e-05, "loss": 2.3117721557617186, "memory(GiB)": 72.85, "step": 37195, "token_acc": 0.5120274914089347, "train_speed(iter/s)": 0.670976 }, { "epoch": 1.5937620496122702, "grad_norm": 4.144166469573975, "learning_rate": 7.697359900058245e-05, "loss": 2.616476058959961, "memory(GiB)": 72.85, "step": 37200, "token_acc": 0.473015873015873, "train_speed(iter/s)": 0.670971 }, { "epoch": 1.5939762649415192, "grad_norm": 4.603262901306152, "learning_rate": 7.696793226345939e-05, "loss": 2.3249855041503906, "memory(GiB)": 72.85, "step": 37205, "token_acc": 0.4641509433962264, "train_speed(iter/s)": 0.670984 }, { "epoch": 1.5941904802707683, "grad_norm": 10.971495628356934, "learning_rate": 7.696226503778694e-05, "loss": 2.5547624588012696, "memory(GiB)": 72.85, "step": 37210, "token_acc": 0.4808259587020649, "train_speed(iter/s)": 0.67099 }, { "epoch": 1.594404695600017, "grad_norm": 3.1276862621307373, "learning_rate": 7.695659732366774e-05, "loss": 2.5552610397338866, "memory(GiB)": 72.85, "step": 37215, "token_acc": 0.4692556634304207, "train_speed(iter/s)": 0.670978 }, { "epoch": 1.594618910929266, "grad_norm": 6.368250370025635, "learning_rate": 7.695092912120452e-05, "loss": 2.3016916275024415, "memory(GiB)": 72.85, "step": 37220, "token_acc": 0.5266903914590747, "train_speed(iter/s)": 0.670957 }, { "epoch": 1.5948331262585151, "grad_norm": 4.390338897705078, "learning_rate": 7.694526043049995e-05, "loss": 2.340081977844238, "memory(GiB)": 72.85, "step": 37225, "token_acc": 0.4981949458483754, "train_speed(iter/s)": 0.670963 }, { "epoch": 1.595047341587764, "grad_norm": 4.200460910797119, "learning_rate": 7.693959125165666e-05, "loss": 2.6625654220581056, "memory(GiB)": 72.85, "step": 37230, "token_acc": 0.4808259587020649, "train_speed(iter/s)": 0.670955 }, { "epoch": 1.595261556917013, "grad_norm": 5.531347751617432, "learning_rate": 7.693392158477745e-05, "loss": 2.5030515670776365, "memory(GiB)": 72.85, "step": 37235, "token_acc": 0.5017064846416383, "train_speed(iter/s)": 0.67096 }, { "epoch": 1.595475772246262, "grad_norm": 4.7154622077941895, "learning_rate": 7.692825142996498e-05, "loss": 2.2832950592041015, "memory(GiB)": 72.85, "step": 37240, "token_acc": 0.5162241887905604, "train_speed(iter/s)": 0.670935 }, { "epoch": 1.5956899875755108, "grad_norm": 4.282580852508545, "learning_rate": 7.692258078732196e-05, "loss": 2.3151416778564453, "memory(GiB)": 72.85, "step": 37245, "token_acc": 0.48188405797101447, "train_speed(iter/s)": 0.670907 }, { "epoch": 1.5959042029047599, "grad_norm": 3.688023567199707, "learning_rate": 7.69169096569511e-05, "loss": 2.2847209930419923, "memory(GiB)": 72.85, "step": 37250, "token_acc": 0.5, "train_speed(iter/s)": 0.670925 }, { "epoch": 1.596118418234009, "grad_norm": 5.364178657531738, "learning_rate": 7.691123803895523e-05, "loss": 2.47569580078125, "memory(GiB)": 72.85, "step": 37255, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.670928 }, { "epoch": 1.5963326335632577, "grad_norm": 5.100982666015625, "learning_rate": 7.6905565933437e-05, "loss": 2.2087278366088867, "memory(GiB)": 72.85, "step": 37260, "token_acc": 0.5236363636363637, "train_speed(iter/s)": 0.670936 }, { "epoch": 1.5965468488925068, "grad_norm": 3.9777278900146484, "learning_rate": 7.689989334049923e-05, "loss": 2.5052253723144533, "memory(GiB)": 72.85, "step": 37265, "token_acc": 0.4766666666666667, "train_speed(iter/s)": 0.670927 }, { "epoch": 1.5967610642217558, "grad_norm": 4.028194904327393, "learning_rate": 7.689422026024464e-05, "loss": 2.361902046203613, "memory(GiB)": 72.85, "step": 37270, "token_acc": 0.49185667752442996, "train_speed(iter/s)": 0.670921 }, { "epoch": 1.5969752795510046, "grad_norm": 4.805271148681641, "learning_rate": 7.688854669277604e-05, "loss": 2.4907421112060546, "memory(GiB)": 72.85, "step": 37275, "token_acc": 0.44404332129963897, "train_speed(iter/s)": 0.6709 }, { "epoch": 1.5971894948802536, "grad_norm": 4.659358501434326, "learning_rate": 7.688287263819617e-05, "loss": 2.3040164947509765, "memory(GiB)": 72.85, "step": 37280, "token_acc": 0.4822485207100592, "train_speed(iter/s)": 0.670893 }, { "epoch": 1.5974037102095027, "grad_norm": 4.689207553863525, "learning_rate": 7.687719809660785e-05, "loss": 2.424998474121094, "memory(GiB)": 72.85, "step": 37285, "token_acc": 0.48, "train_speed(iter/s)": 0.67091 }, { "epoch": 1.5976179255387515, "grad_norm": 4.037234783172607, "learning_rate": 7.687152306811388e-05, "loss": 2.2813955307006837, "memory(GiB)": 72.85, "step": 37290, "token_acc": 0.5074074074074074, "train_speed(iter/s)": 0.6709 }, { "epoch": 1.5978321408680005, "grad_norm": 4.42262077331543, "learning_rate": 7.686584755281708e-05, "loss": 2.347281265258789, "memory(GiB)": 72.85, "step": 37295, "token_acc": 0.4623955431754875, "train_speed(iter/s)": 0.670905 }, { "epoch": 1.5980463561972496, "grad_norm": 3.9459424018859863, "learning_rate": 7.686017155082021e-05, "loss": 2.3652664184570313, "memory(GiB)": 72.85, "step": 37300, "token_acc": 0.476027397260274, "train_speed(iter/s)": 0.670901 }, { "epoch": 1.5982605715264984, "grad_norm": 4.608458995819092, "learning_rate": 7.685449506222616e-05, "loss": 2.1830062866210938, "memory(GiB)": 72.85, "step": 37305, "token_acc": 0.5, "train_speed(iter/s)": 0.670897 }, { "epoch": 1.5984747868557474, "grad_norm": 3.8287808895111084, "learning_rate": 7.684881808713774e-05, "loss": 2.5442363739013674, "memory(GiB)": 72.85, "step": 37310, "token_acc": 0.4856115107913669, "train_speed(iter/s)": 0.670917 }, { "epoch": 1.5986890021849964, "grad_norm": 4.222219467163086, "learning_rate": 7.684314062565779e-05, "loss": 2.5746109008789064, "memory(GiB)": 72.85, "step": 37315, "token_acc": 0.4965986394557823, "train_speed(iter/s)": 0.670946 }, { "epoch": 1.5989032175142452, "grad_norm": 4.628283977508545, "learning_rate": 7.683746267788916e-05, "loss": 2.390165328979492, "memory(GiB)": 72.85, "step": 37320, "token_acc": 0.45774647887323944, "train_speed(iter/s)": 0.670958 }, { "epoch": 1.5991174328434943, "grad_norm": 3.8565123081207275, "learning_rate": 7.683178424393472e-05, "loss": 2.6498350143432616, "memory(GiB)": 72.85, "step": 37325, "token_acc": 0.44565217391304346, "train_speed(iter/s)": 0.670964 }, { "epoch": 1.5993316481727433, "grad_norm": 3.9309656620025635, "learning_rate": 7.682610532389734e-05, "loss": 2.519921875, "memory(GiB)": 72.85, "step": 37330, "token_acc": 0.4523809523809524, "train_speed(iter/s)": 0.670961 }, { "epoch": 1.5995458635019921, "grad_norm": 4.384228229522705, "learning_rate": 7.68204259178799e-05, "loss": 2.4348770141601563, "memory(GiB)": 72.85, "step": 37335, "token_acc": 0.49097472924187724, "train_speed(iter/s)": 0.670975 }, { "epoch": 1.5997600788312412, "grad_norm": 4.034115314483643, "learning_rate": 7.681474602598529e-05, "loss": 2.3767263412475588, "memory(GiB)": 72.85, "step": 37340, "token_acc": 0.5035211267605634, "train_speed(iter/s)": 0.670996 }, { "epoch": 1.5999742941604902, "grad_norm": 5.1546125411987305, "learning_rate": 7.68090656483164e-05, "loss": 2.2801324844360353, "memory(GiB)": 72.85, "step": 37345, "token_acc": 0.5506607929515418, "train_speed(iter/s)": 0.670974 }, { "epoch": 1.600188509489739, "grad_norm": 5.143927097320557, "learning_rate": 7.680338478497613e-05, "loss": 2.280745506286621, "memory(GiB)": 72.85, "step": 37350, "token_acc": 0.5051546391752577, "train_speed(iter/s)": 0.67097 }, { "epoch": 1.600402724818988, "grad_norm": 3.5990841388702393, "learning_rate": 7.679770343606741e-05, "loss": 2.124689483642578, "memory(GiB)": 72.85, "step": 37355, "token_acc": 0.5051194539249146, "train_speed(iter/s)": 0.670973 }, { "epoch": 1.600616940148237, "grad_norm": 6.086662769317627, "learning_rate": 7.679202160169314e-05, "loss": 2.759434127807617, "memory(GiB)": 72.85, "step": 37360, "token_acc": 0.4557823129251701, "train_speed(iter/s)": 0.670981 }, { "epoch": 1.600831155477486, "grad_norm": 4.08652400970459, "learning_rate": 7.678633928195628e-05, "loss": 2.1587554931640627, "memory(GiB)": 72.85, "step": 37365, "token_acc": 0.5620689655172414, "train_speed(iter/s)": 0.670983 }, { "epoch": 1.601045370806735, "grad_norm": 4.039398670196533, "learning_rate": 7.678065647695975e-05, "loss": 2.248031997680664, "memory(GiB)": 72.85, "step": 37370, "token_acc": 0.47988505747126436, "train_speed(iter/s)": 0.670989 }, { "epoch": 1.601259586135984, "grad_norm": 3.8371737003326416, "learning_rate": 7.67749731868065e-05, "loss": 2.3635976791381834, "memory(GiB)": 72.85, "step": 37375, "token_acc": 0.43661971830985913, "train_speed(iter/s)": 0.670956 }, { "epoch": 1.6014738014652328, "grad_norm": 4.265581130981445, "learning_rate": 7.676928941159951e-05, "loss": 2.1852725982666015, "memory(GiB)": 72.85, "step": 37380, "token_acc": 0.49173553719008267, "train_speed(iter/s)": 0.670977 }, { "epoch": 1.6016880167944818, "grad_norm": 4.004779815673828, "learning_rate": 7.676360515144172e-05, "loss": 2.407651901245117, "memory(GiB)": 72.85, "step": 37385, "token_acc": 0.4963235294117647, "train_speed(iter/s)": 0.670989 }, { "epoch": 1.6019022321237308, "grad_norm": 4.237004280090332, "learning_rate": 7.675792040643611e-05, "loss": 2.365019989013672, "memory(GiB)": 72.85, "step": 37390, "token_acc": 0.5066225165562914, "train_speed(iter/s)": 0.671009 }, { "epoch": 1.6021164474529797, "grad_norm": 4.8712921142578125, "learning_rate": 7.675223517668569e-05, "loss": 2.0593494415283202, "memory(GiB)": 72.85, "step": 37395, "token_acc": 0.5345622119815668, "train_speed(iter/s)": 0.671017 }, { "epoch": 1.6023306627822287, "grad_norm": 5.2704668045043945, "learning_rate": 7.67465494622934e-05, "loss": 2.308279037475586, "memory(GiB)": 72.85, "step": 37400, "token_acc": 0.5081967213114754, "train_speed(iter/s)": 0.671024 }, { "epoch": 1.6025448781114777, "grad_norm": 4.053473949432373, "learning_rate": 7.67408632633623e-05, "loss": 2.4438909530639648, "memory(GiB)": 72.85, "step": 37405, "token_acc": 0.5284810126582279, "train_speed(iter/s)": 0.671025 }, { "epoch": 1.6027590934407265, "grad_norm": 4.3932623863220215, "learning_rate": 7.673517657999538e-05, "loss": 2.457386016845703, "memory(GiB)": 72.85, "step": 37410, "token_acc": 0.49433962264150944, "train_speed(iter/s)": 0.671027 }, { "epoch": 1.6029733087699756, "grad_norm": 4.320974826812744, "learning_rate": 7.672948941229565e-05, "loss": 2.5237831115722655, "memory(GiB)": 72.85, "step": 37415, "token_acc": 0.47750865051903113, "train_speed(iter/s)": 0.671031 }, { "epoch": 1.6031875240992246, "grad_norm": 4.397305488586426, "learning_rate": 7.672380176036615e-05, "loss": 2.266510009765625, "memory(GiB)": 72.85, "step": 37420, "token_acc": 0.5304659498207885, "train_speed(iter/s)": 0.671046 }, { "epoch": 1.6034017394284734, "grad_norm": 3.791951894760132, "learning_rate": 7.671811362430992e-05, "loss": 2.111348342895508, "memory(GiB)": 72.85, "step": 37425, "token_acc": 0.5285171102661597, "train_speed(iter/s)": 0.671031 }, { "epoch": 1.6036159547577225, "grad_norm": 3.935228109359741, "learning_rate": 7.671242500422998e-05, "loss": 2.5147159576416014, "memory(GiB)": 72.85, "step": 37430, "token_acc": 0.4786885245901639, "train_speed(iter/s)": 0.671045 }, { "epoch": 1.6038301700869715, "grad_norm": 3.747166633605957, "learning_rate": 7.670673590022939e-05, "loss": 2.2246679306030273, "memory(GiB)": 72.85, "step": 37435, "token_acc": 0.47686832740213525, "train_speed(iter/s)": 0.671056 }, { "epoch": 1.6040443854162203, "grad_norm": 3.386439323425293, "learning_rate": 7.670104631241126e-05, "loss": 2.5669151306152345, "memory(GiB)": 72.85, "step": 37440, "token_acc": 0.46808510638297873, "train_speed(iter/s)": 0.671049 }, { "epoch": 1.6042586007454693, "grad_norm": 5.023217678070068, "learning_rate": 7.66953562408786e-05, "loss": 2.10650691986084, "memory(GiB)": 72.85, "step": 37445, "token_acc": 0.5124555160142349, "train_speed(iter/s)": 0.671052 }, { "epoch": 1.6044728160747184, "grad_norm": 4.015639305114746, "learning_rate": 7.668966568573455e-05, "loss": 2.1572298049926757, "memory(GiB)": 72.85, "step": 37450, "token_acc": 0.5508196721311476, "train_speed(iter/s)": 0.671058 }, { "epoch": 1.6046870314039672, "grad_norm": 4.7446064949035645, "learning_rate": 7.668397464708214e-05, "loss": 2.2705665588378907, "memory(GiB)": 72.85, "step": 37455, "token_acc": 0.5121951219512195, "train_speed(iter/s)": 0.671066 }, { "epoch": 1.6049012467332162, "grad_norm": 5.33461332321167, "learning_rate": 7.667828312502452e-05, "loss": 2.2846954345703123, "memory(GiB)": 72.85, "step": 37460, "token_acc": 0.4980237154150198, "train_speed(iter/s)": 0.671087 }, { "epoch": 1.6051154620624652, "grad_norm": 4.966617584228516, "learning_rate": 7.667259111966476e-05, "loss": 2.3954578399658204, "memory(GiB)": 72.85, "step": 37465, "token_acc": 0.5112781954887218, "train_speed(iter/s)": 0.671096 }, { "epoch": 1.605329677391714, "grad_norm": 4.213747978210449, "learning_rate": 7.6666898631106e-05, "loss": 1.9830287933349608, "memory(GiB)": 72.85, "step": 37470, "token_acc": 0.5570934256055363, "train_speed(iter/s)": 0.671101 }, { "epoch": 1.605543892720963, "grad_norm": 5.7598090171813965, "learning_rate": 7.666120565945135e-05, "loss": 2.5781124114990233, "memory(GiB)": 72.85, "step": 37475, "token_acc": 0.45425867507886436, "train_speed(iter/s)": 0.671088 }, { "epoch": 1.6057581080502121, "grad_norm": 4.727099418640137, "learning_rate": 7.665551220480395e-05, "loss": 2.394405174255371, "memory(GiB)": 72.85, "step": 37480, "token_acc": 0.5098814229249012, "train_speed(iter/s)": 0.671075 }, { "epoch": 1.605972323379461, "grad_norm": 4.068863868713379, "learning_rate": 7.664981826726695e-05, "loss": 2.301284980773926, "memory(GiB)": 72.85, "step": 37485, "token_acc": 0.5268817204301075, "train_speed(iter/s)": 0.67106 }, { "epoch": 1.60618653870871, "grad_norm": 4.528168678283691, "learning_rate": 7.664412384694348e-05, "loss": 2.3546398162841795, "memory(GiB)": 72.85, "step": 37490, "token_acc": 0.5110294117647058, "train_speed(iter/s)": 0.671083 }, { "epoch": 1.606400754037959, "grad_norm": 3.9718806743621826, "learning_rate": 7.663842894393672e-05, "loss": 2.3316120147705077, "memory(GiB)": 72.85, "step": 37495, "token_acc": 0.488135593220339, "train_speed(iter/s)": 0.671097 }, { "epoch": 1.6066149693672078, "grad_norm": 3.629213333129883, "learning_rate": 7.663273355834984e-05, "loss": 2.4740680694580077, "memory(GiB)": 72.85, "step": 37500, "token_acc": 0.4868421052631579, "train_speed(iter/s)": 0.671079 }, { "epoch": 1.6066149693672078, "eval_loss": 1.8512307405471802, "eval_runtime": 16.6482, "eval_samples_per_second": 6.007, "eval_steps_per_second": 6.007, "eval_token_acc": 0.5431918008784773, "step": 37500 }, { "epoch": 1.6068291846964569, "grad_norm": 4.706310749053955, "learning_rate": 7.662703769028599e-05, "loss": 2.6036510467529297, "memory(GiB)": 72.85, "step": 37505, "token_acc": 0.5194274028629857, "train_speed(iter/s)": 0.670866 }, { "epoch": 1.607043400025706, "grad_norm": 4.018729209899902, "learning_rate": 7.662134133984838e-05, "loss": 2.534840393066406, "memory(GiB)": 72.85, "step": 37510, "token_acc": 0.4728682170542636, "train_speed(iter/s)": 0.670872 }, { "epoch": 1.6072576153549547, "grad_norm": 4.712137222290039, "learning_rate": 7.66156445071402e-05, "loss": 2.568851089477539, "memory(GiB)": 72.85, "step": 37515, "token_acc": 0.45396825396825397, "train_speed(iter/s)": 0.670854 }, { "epoch": 1.6074718306842037, "grad_norm": 4.5378499031066895, "learning_rate": 7.660994719226464e-05, "loss": 2.723824691772461, "memory(GiB)": 72.85, "step": 37520, "token_acc": 0.4617737003058104, "train_speed(iter/s)": 0.670845 }, { "epoch": 1.6076860460134528, "grad_norm": 3.6479761600494385, "learning_rate": 7.660424939532494e-05, "loss": 2.144865798950195, "memory(GiB)": 72.85, "step": 37525, "token_acc": 0.5365853658536586, "train_speed(iter/s)": 0.670838 }, { "epoch": 1.6079002613427016, "grad_norm": 3.6248443126678467, "learning_rate": 7.65985511164243e-05, "loss": 2.3788022994995117, "memory(GiB)": 72.85, "step": 37530, "token_acc": 0.4923547400611621, "train_speed(iter/s)": 0.670846 }, { "epoch": 1.6081144766719506, "grad_norm": 3.940495729446411, "learning_rate": 7.659285235566596e-05, "loss": 2.5772836685180662, "memory(GiB)": 72.85, "step": 37535, "token_acc": 0.5121107266435986, "train_speed(iter/s)": 0.670839 }, { "epoch": 1.6083286920011997, "grad_norm": 3.106919765472412, "learning_rate": 7.658715311315314e-05, "loss": 2.1543558120727537, "memory(GiB)": 72.85, "step": 37540, "token_acc": 0.5552238805970149, "train_speed(iter/s)": 0.670849 }, { "epoch": 1.6085429073304485, "grad_norm": 4.417055606842041, "learning_rate": 7.658145338898912e-05, "loss": 2.1478269577026365, "memory(GiB)": 72.85, "step": 37545, "token_acc": 0.53156146179402, "train_speed(iter/s)": 0.670857 }, { "epoch": 1.6087571226596975, "grad_norm": 3.92724609375, "learning_rate": 7.657575318327712e-05, "loss": 2.5654300689697265, "memory(GiB)": 72.85, "step": 37550, "token_acc": 0.47720364741641336, "train_speed(iter/s)": 0.67083 }, { "epoch": 1.6089713379889465, "grad_norm": 6.478294849395752, "learning_rate": 7.657005249612044e-05, "loss": 2.415126419067383, "memory(GiB)": 72.85, "step": 37555, "token_acc": 0.4749034749034749, "train_speed(iter/s)": 0.670844 }, { "epoch": 1.6091855533181953, "grad_norm": 3.8902087211608887, "learning_rate": 7.65643513276223e-05, "loss": 2.505155563354492, "memory(GiB)": 72.85, "step": 37560, "token_acc": 0.4637223974763407, "train_speed(iter/s)": 0.670859 }, { "epoch": 1.6093997686474444, "grad_norm": 4.639413356781006, "learning_rate": 7.655864967788605e-05, "loss": 2.521011161804199, "memory(GiB)": 72.85, "step": 37565, "token_acc": 0.5168539325842697, "train_speed(iter/s)": 0.670854 }, { "epoch": 1.6096139839766934, "grad_norm": 3.3188133239746094, "learning_rate": 7.655294754701494e-05, "loss": 2.3522294998168944, "memory(GiB)": 72.85, "step": 37570, "token_acc": 0.5211726384364821, "train_speed(iter/s)": 0.670874 }, { "epoch": 1.6098281993059422, "grad_norm": 3.9645471572875977, "learning_rate": 7.654724493511227e-05, "loss": 2.352802276611328, "memory(GiB)": 72.85, "step": 37575, "token_acc": 0.4861111111111111, "train_speed(iter/s)": 0.67088 }, { "epoch": 1.6100424146351913, "grad_norm": 5.7341766357421875, "learning_rate": 7.654154184228137e-05, "loss": 2.634365463256836, "memory(GiB)": 72.85, "step": 37580, "token_acc": 0.4628099173553719, "train_speed(iter/s)": 0.670859 }, { "epoch": 1.6102566299644403, "grad_norm": 4.5185980796813965, "learning_rate": 7.653583826862552e-05, "loss": 2.281462860107422, "memory(GiB)": 72.85, "step": 37585, "token_acc": 0.51875, "train_speed(iter/s)": 0.670867 }, { "epoch": 1.6104708452936891, "grad_norm": 3.8012502193450928, "learning_rate": 7.653013421424806e-05, "loss": 2.0165008544921874, "memory(GiB)": 72.85, "step": 37590, "token_acc": 0.5198412698412699, "train_speed(iter/s)": 0.670834 }, { "epoch": 1.6106850606229381, "grad_norm": 3.660846471786499, "learning_rate": 7.652442967925236e-05, "loss": 2.190708351135254, "memory(GiB)": 72.85, "step": 37595, "token_acc": 0.5348837209302325, "train_speed(iter/s)": 0.670821 }, { "epoch": 1.6108992759521872, "grad_norm": 4.116012096405029, "learning_rate": 7.651872466374172e-05, "loss": 2.337445831298828, "memory(GiB)": 72.85, "step": 37600, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.670847 }, { "epoch": 1.611113491281436, "grad_norm": 4.618675708770752, "learning_rate": 7.65130191678195e-05, "loss": 2.032042121887207, "memory(GiB)": 72.85, "step": 37605, "token_acc": 0.5482625482625483, "train_speed(iter/s)": 0.670827 }, { "epoch": 1.611327706610685, "grad_norm": 4.233274936676025, "learning_rate": 7.650731319158908e-05, "loss": 2.392864227294922, "memory(GiB)": 72.85, "step": 37610, "token_acc": 0.5, "train_speed(iter/s)": 0.670838 }, { "epoch": 1.611541921939934, "grad_norm": 3.3966193199157715, "learning_rate": 7.650160673515381e-05, "loss": 2.411896896362305, "memory(GiB)": 72.85, "step": 37615, "token_acc": 0.4567901234567901, "train_speed(iter/s)": 0.670844 }, { "epoch": 1.6117561372691829, "grad_norm": 3.561009407043457, "learning_rate": 7.649589979861706e-05, "loss": 2.3695209503173826, "memory(GiB)": 72.85, "step": 37620, "token_acc": 0.5290322580645161, "train_speed(iter/s)": 0.670845 }, { "epoch": 1.611970352598432, "grad_norm": 4.737189292907715, "learning_rate": 7.649019238208226e-05, "loss": 2.2136592864990234, "memory(GiB)": 72.85, "step": 37625, "token_acc": 0.5236363636363637, "train_speed(iter/s)": 0.670841 }, { "epoch": 1.612184567927681, "grad_norm": 5.1894941329956055, "learning_rate": 7.648448448565275e-05, "loss": 2.4628067016601562, "memory(GiB)": 72.85, "step": 37630, "token_acc": 0.5056603773584906, "train_speed(iter/s)": 0.670858 }, { "epoch": 1.6123987832569298, "grad_norm": 4.560578346252441, "learning_rate": 7.647877610943197e-05, "loss": 1.9590869903564454, "memory(GiB)": 72.85, "step": 37635, "token_acc": 0.5725490196078431, "train_speed(iter/s)": 0.670869 }, { "epoch": 1.6126129985861788, "grad_norm": 4.669956684112549, "learning_rate": 7.647306725352332e-05, "loss": 2.5980640411376954, "memory(GiB)": 72.85, "step": 37640, "token_acc": 0.4668587896253602, "train_speed(iter/s)": 0.67088 }, { "epoch": 1.6128272139154278, "grad_norm": 3.808007001876831, "learning_rate": 7.646849982349062e-05, "loss": 2.3395902633666994, "memory(GiB)": 72.85, "step": 37645, "token_acc": 0.4758842443729904, "train_speed(iter/s)": 0.670889 }, { "epoch": 1.6130414292446766, "grad_norm": 3.8795087337493896, "learning_rate": 7.646279010440444e-05, "loss": 2.1968765258789062, "memory(GiB)": 72.85, "step": 37650, "token_acc": 0.5398550724637681, "train_speed(iter/s)": 0.670882 }, { "epoch": 1.6132556445739257, "grad_norm": 3.769984006881714, "learning_rate": 7.645707990591999e-05, "loss": 2.4600887298583984, "memory(GiB)": 72.85, "step": 37655, "token_acc": 0.4565916398713826, "train_speed(iter/s)": 0.670899 }, { "epoch": 1.6134698599031747, "grad_norm": 4.673602104187012, "learning_rate": 7.645136922814071e-05, "loss": 2.442160797119141, "memory(GiB)": 72.85, "step": 37660, "token_acc": 0.5019305019305019, "train_speed(iter/s)": 0.670875 }, { "epoch": 1.6136840752324235, "grad_norm": 4.541689395904541, "learning_rate": 7.644565807117008e-05, "loss": 2.6729875564575196, "memory(GiB)": 72.85, "step": 37665, "token_acc": 0.44569288389513106, "train_speed(iter/s)": 0.670886 }, { "epoch": 1.6138982905616726, "grad_norm": 5.338139533996582, "learning_rate": 7.643994643511153e-05, "loss": 2.2070632934570313, "memory(GiB)": 72.85, "step": 37670, "token_acc": 0.5327868852459017, "train_speed(iter/s)": 0.670881 }, { "epoch": 1.6141125058909216, "grad_norm": 3.2730793952941895, "learning_rate": 7.643423432006855e-05, "loss": 2.3332134246826173, "memory(GiB)": 72.85, "step": 37675, "token_acc": 0.5418181818181819, "train_speed(iter/s)": 0.670871 }, { "epoch": 1.6143267212201704, "grad_norm": 6.056574821472168, "learning_rate": 7.642852172614463e-05, "loss": 2.4977882385253904, "memory(GiB)": 72.85, "step": 37680, "token_acc": 0.4318181818181818, "train_speed(iter/s)": 0.670884 }, { "epoch": 1.6145409365494194, "grad_norm": 4.104222297668457, "learning_rate": 7.642280865344322e-05, "loss": 2.1972021102905273, "memory(GiB)": 72.85, "step": 37685, "token_acc": 0.5154929577464789, "train_speed(iter/s)": 0.670908 }, { "epoch": 1.6147551518786685, "grad_norm": 3.2600600719451904, "learning_rate": 7.641709510206787e-05, "loss": 2.7895090103149416, "memory(GiB)": 72.85, "step": 37690, "token_acc": 0.45121951219512196, "train_speed(iter/s)": 0.670913 }, { "epoch": 1.6149693672079173, "grad_norm": 4.3483405113220215, "learning_rate": 7.641138107212207e-05, "loss": 2.424521827697754, "memory(GiB)": 72.85, "step": 37695, "token_acc": 0.4817073170731707, "train_speed(iter/s)": 0.670921 }, { "epoch": 1.6151835825371663, "grad_norm": 5.319947719573975, "learning_rate": 7.640566656370928e-05, "loss": 2.4493202209472655, "memory(GiB)": 72.85, "step": 37700, "token_acc": 0.4387755102040816, "train_speed(iter/s)": 0.670922 }, { "epoch": 1.6153977978664154, "grad_norm": 4.50126314163208, "learning_rate": 7.639995157693312e-05, "loss": 2.118907928466797, "memory(GiB)": 72.85, "step": 37705, "token_acc": 0.5225806451612903, "train_speed(iter/s)": 0.670918 }, { "epoch": 1.6156120131956642, "grad_norm": 4.837767601013184, "learning_rate": 7.639423611189704e-05, "loss": 2.4409984588623046, "memory(GiB)": 72.85, "step": 37710, "token_acc": 0.4857142857142857, "train_speed(iter/s)": 0.670864 }, { "epoch": 1.6158262285249132, "grad_norm": 3.7583515644073486, "learning_rate": 7.638852016870462e-05, "loss": 2.411575698852539, "memory(GiB)": 72.85, "step": 37715, "token_acc": 0.5057034220532319, "train_speed(iter/s)": 0.670862 }, { "epoch": 1.6160404438541622, "grad_norm": 3.4051339626312256, "learning_rate": 7.63828037474594e-05, "loss": 2.4771799087524413, "memory(GiB)": 72.85, "step": 37720, "token_acc": 0.4728682170542636, "train_speed(iter/s)": 0.670863 }, { "epoch": 1.616254659183411, "grad_norm": 4.418071746826172, "learning_rate": 7.637708684826497e-05, "loss": 2.1680530548095702, "memory(GiB)": 72.85, "step": 37725, "token_acc": 0.4921135646687697, "train_speed(iter/s)": 0.670855 }, { "epoch": 1.61646887451266, "grad_norm": 6.065805435180664, "learning_rate": 7.637136947122483e-05, "loss": 2.1873247146606447, "memory(GiB)": 72.85, "step": 37730, "token_acc": 0.5136986301369864, "train_speed(iter/s)": 0.670831 }, { "epoch": 1.6166830898419091, "grad_norm": 4.61495304107666, "learning_rate": 7.636565161644263e-05, "loss": 2.448294258117676, "memory(GiB)": 72.85, "step": 37735, "token_acc": 0.4470198675496689, "train_speed(iter/s)": 0.670821 }, { "epoch": 1.616897305171158, "grad_norm": 3.20807147026062, "learning_rate": 7.635993328402187e-05, "loss": 2.518869400024414, "memory(GiB)": 72.85, "step": 37740, "token_acc": 0.47474747474747475, "train_speed(iter/s)": 0.67084 }, { "epoch": 1.617111520500407, "grad_norm": 4.181016445159912, "learning_rate": 7.635421447406623e-05, "loss": 2.156326675415039, "memory(GiB)": 72.85, "step": 37745, "token_acc": 0.5056603773584906, "train_speed(iter/s)": 0.670862 }, { "epoch": 1.617325735829656, "grad_norm": 4.252533435821533, "learning_rate": 7.634849518667925e-05, "loss": 2.6299583435058596, "memory(GiB)": 72.85, "step": 37750, "token_acc": 0.48056537102473496, "train_speed(iter/s)": 0.670839 }, { "epoch": 1.6175399511589048, "grad_norm": 3.752793550491333, "learning_rate": 7.634277542196459e-05, "loss": 2.195395660400391, "memory(GiB)": 72.85, "step": 37755, "token_acc": 0.5477031802120141, "train_speed(iter/s)": 0.670864 }, { "epoch": 1.6177541664881538, "grad_norm": 3.6416587829589844, "learning_rate": 7.63370551800258e-05, "loss": 1.9207586288452148, "memory(GiB)": 72.85, "step": 37760, "token_acc": 0.5018315018315018, "train_speed(iter/s)": 0.670875 }, { "epoch": 1.6179683818174029, "grad_norm": 4.3966803550720215, "learning_rate": 7.633133446096658e-05, "loss": 2.3313823699951173, "memory(GiB)": 72.85, "step": 37765, "token_acc": 0.4790996784565916, "train_speed(iter/s)": 0.670874 }, { "epoch": 1.6181825971466517, "grad_norm": 3.2381675243377686, "learning_rate": 7.632561326489056e-05, "loss": 2.6133659362792967, "memory(GiB)": 72.85, "step": 37770, "token_acc": 0.46710526315789475, "train_speed(iter/s)": 0.670886 }, { "epoch": 1.6183968124759007, "grad_norm": 4.875457286834717, "learning_rate": 7.63198915919013e-05, "loss": 2.48598575592041, "memory(GiB)": 72.85, "step": 37775, "token_acc": 0.4852459016393443, "train_speed(iter/s)": 0.6709 }, { "epoch": 1.6186110278051498, "grad_norm": 4.218002796173096, "learning_rate": 7.631416944210255e-05, "loss": 2.262970733642578, "memory(GiB)": 72.85, "step": 37780, "token_acc": 0.5137931034482759, "train_speed(iter/s)": 0.670906 }, { "epoch": 1.6188252431343986, "grad_norm": 4.613779067993164, "learning_rate": 7.630844681559794e-05, "loss": 2.2465789794921873, "memory(GiB)": 72.85, "step": 37785, "token_acc": 0.5150375939849624, "train_speed(iter/s)": 0.670903 }, { "epoch": 1.6190394584636476, "grad_norm": 8.410496711730957, "learning_rate": 7.630272371249111e-05, "loss": 2.1574487686157227, "memory(GiB)": 72.85, "step": 37790, "token_acc": 0.49606299212598426, "train_speed(iter/s)": 0.670896 }, { "epoch": 1.6192536737928966, "grad_norm": 4.575769901275635, "learning_rate": 7.629700013288579e-05, "loss": 2.488384819030762, "memory(GiB)": 72.85, "step": 37795, "token_acc": 0.4863636363636364, "train_speed(iter/s)": 0.670891 }, { "epoch": 1.6194678891221455, "grad_norm": 4.48237419128418, "learning_rate": 7.629127607688563e-05, "loss": 2.605516242980957, "memory(GiB)": 72.85, "step": 37800, "token_acc": 0.4625, "train_speed(iter/s)": 0.670901 }, { "epoch": 1.6196821044513945, "grad_norm": 5.379840850830078, "learning_rate": 7.628555154459436e-05, "loss": 2.324542236328125, "memory(GiB)": 72.85, "step": 37805, "token_acc": 0.5075757575757576, "train_speed(iter/s)": 0.670888 }, { "epoch": 1.6198963197806435, "grad_norm": 4.374698162078857, "learning_rate": 7.627982653611565e-05, "loss": 2.504121780395508, "memory(GiB)": 72.85, "step": 37810, "token_acc": 0.45, "train_speed(iter/s)": 0.670889 }, { "epoch": 1.6201105351098923, "grad_norm": 4.137178897857666, "learning_rate": 7.627410105155324e-05, "loss": 2.2990903854370117, "memory(GiB)": 72.85, "step": 37815, "token_acc": 0.5193548387096775, "train_speed(iter/s)": 0.670876 }, { "epoch": 1.6203247504391414, "grad_norm": 3.858349561691284, "learning_rate": 7.626837509101082e-05, "loss": 2.1376256942749023, "memory(GiB)": 72.85, "step": 37820, "token_acc": 0.5617977528089888, "train_speed(iter/s)": 0.670876 }, { "epoch": 1.6205389657683904, "grad_norm": 3.270420551300049, "learning_rate": 7.626264865459217e-05, "loss": 2.4993141174316404, "memory(GiB)": 72.85, "step": 37825, "token_acc": 0.44954128440366975, "train_speed(iter/s)": 0.670875 }, { "epoch": 1.6207531810976392, "grad_norm": 4.258398056030273, "learning_rate": 7.625692174240098e-05, "loss": 2.607600212097168, "memory(GiB)": 72.85, "step": 37830, "token_acc": 0.4612676056338028, "train_speed(iter/s)": 0.670884 }, { "epoch": 1.6209673964268885, "grad_norm": 5.06923770904541, "learning_rate": 7.625119435454105e-05, "loss": 2.3213857650756835, "memory(GiB)": 72.85, "step": 37835, "token_acc": 0.4866920152091255, "train_speed(iter/s)": 0.67087 }, { "epoch": 1.6211816117561373, "grad_norm": 4.541833400726318, "learning_rate": 7.624546649111608e-05, "loss": 2.3969005584716796, "memory(GiB)": 72.85, "step": 37840, "token_acc": 0.4542124542124542, "train_speed(iter/s)": 0.670855 }, { "epoch": 1.621395827085386, "grad_norm": 4.170678615570068, "learning_rate": 7.623973815222988e-05, "loss": 2.2202407836914064, "memory(GiB)": 72.85, "step": 37845, "token_acc": 0.5656934306569343, "train_speed(iter/s)": 0.670869 }, { "epoch": 1.6216100424146354, "grad_norm": 5.0650129318237305, "learning_rate": 7.623400933798618e-05, "loss": 2.297069549560547, "memory(GiB)": 72.85, "step": 37850, "token_acc": 0.5083333333333333, "train_speed(iter/s)": 0.670884 }, { "epoch": 1.6218242577438842, "grad_norm": 4.4993367195129395, "learning_rate": 7.622828004848884e-05, "loss": 2.908125877380371, "memory(GiB)": 72.85, "step": 37855, "token_acc": 0.4254658385093168, "train_speed(iter/s)": 0.6709 }, { "epoch": 1.622038473073133, "grad_norm": 3.4517593383789062, "learning_rate": 7.622255028384158e-05, "loss": 2.571042060852051, "memory(GiB)": 72.85, "step": 37860, "token_acc": 0.4468599033816425, "train_speed(iter/s)": 0.670908 }, { "epoch": 1.6222526884023822, "grad_norm": 3.4480018615722656, "learning_rate": 7.621682004414822e-05, "loss": 2.1746835708618164, "memory(GiB)": 72.85, "step": 37865, "token_acc": 0.5111940298507462, "train_speed(iter/s)": 0.670907 }, { "epoch": 1.622466903731631, "grad_norm": 4.329675197601318, "learning_rate": 7.621108932951257e-05, "loss": 2.373421859741211, "memory(GiB)": 72.85, "step": 37870, "token_acc": 0.5055762081784386, "train_speed(iter/s)": 0.670909 }, { "epoch": 1.6226811190608799, "grad_norm": 5.36822509765625, "learning_rate": 7.620535814003845e-05, "loss": 2.610038185119629, "memory(GiB)": 72.85, "step": 37875, "token_acc": 0.4478114478114478, "train_speed(iter/s)": 0.670914 }, { "epoch": 1.6228953343901291, "grad_norm": 3.295743227005005, "learning_rate": 7.61996264758297e-05, "loss": 2.421209144592285, "memory(GiB)": 72.85, "step": 37880, "token_acc": 0.5134228187919463, "train_speed(iter/s)": 0.670915 }, { "epoch": 1.623109549719378, "grad_norm": 4.026916980743408, "learning_rate": 7.61938943369901e-05, "loss": 2.0251541137695312, "memory(GiB)": 72.85, "step": 37885, "token_acc": 0.5471014492753623, "train_speed(iter/s)": 0.670917 }, { "epoch": 1.6233237650486267, "grad_norm": 4.265584945678711, "learning_rate": 7.618816172362357e-05, "loss": 2.524953269958496, "memory(GiB)": 72.85, "step": 37890, "token_acc": 0.4940239043824701, "train_speed(iter/s)": 0.670887 }, { "epoch": 1.623537980377876, "grad_norm": 6.551115036010742, "learning_rate": 7.618242863583391e-05, "loss": 2.7573354721069334, "memory(GiB)": 72.85, "step": 37895, "token_acc": 0.4495798319327731, "train_speed(iter/s)": 0.670877 }, { "epoch": 1.6237521957071248, "grad_norm": 4.088472366333008, "learning_rate": 7.6176695073725e-05, "loss": 2.347957420349121, "memory(GiB)": 72.85, "step": 37900, "token_acc": 0.5119453924914675, "train_speed(iter/s)": 0.670872 }, { "epoch": 1.6239664110363736, "grad_norm": 4.486438751220703, "learning_rate": 7.61709610374007e-05, "loss": 2.1261241912841795, "memory(GiB)": 72.85, "step": 37905, "token_acc": 0.5278810408921933, "train_speed(iter/s)": 0.670863 }, { "epoch": 1.6241806263656229, "grad_norm": 3.0807442665100098, "learning_rate": 7.61652265269649e-05, "loss": 2.5974607467651367, "memory(GiB)": 72.85, "step": 37910, "token_acc": 0.447887323943662, "train_speed(iter/s)": 0.670887 }, { "epoch": 1.6243948416948717, "grad_norm": 5.888533115386963, "learning_rate": 7.615949154252146e-05, "loss": 2.3711423873901367, "memory(GiB)": 72.85, "step": 37915, "token_acc": 0.5128205128205128, "train_speed(iter/s)": 0.670896 }, { "epoch": 1.6246090570241205, "grad_norm": 5.6237101554870605, "learning_rate": 7.61537560841743e-05, "loss": 2.4399436950683593, "memory(GiB)": 72.85, "step": 37920, "token_acc": 0.4449152542372881, "train_speed(iter/s)": 0.670899 }, { "epoch": 1.6248232723533698, "grad_norm": 3.8066956996917725, "learning_rate": 7.614802015202731e-05, "loss": 2.468850517272949, "memory(GiB)": 72.85, "step": 37925, "token_acc": 0.45930232558139533, "train_speed(iter/s)": 0.670891 }, { "epoch": 1.6250374876826186, "grad_norm": 3.542591094970703, "learning_rate": 7.61422837461844e-05, "loss": 2.1593576431274415, "memory(GiB)": 72.85, "step": 37930, "token_acc": 0.5498154981549815, "train_speed(iter/s)": 0.670882 }, { "epoch": 1.6252517030118674, "grad_norm": 5.100202560424805, "learning_rate": 7.613654686674951e-05, "loss": 2.413287353515625, "memory(GiB)": 72.85, "step": 37935, "token_acc": 0.48484848484848486, "train_speed(iter/s)": 0.670893 }, { "epoch": 1.6254659183411166, "grad_norm": 3.1491949558258057, "learning_rate": 7.613080951382655e-05, "loss": 2.4456409454345702, "memory(GiB)": 72.85, "step": 37940, "token_acc": 0.4713375796178344, "train_speed(iter/s)": 0.670914 }, { "epoch": 1.6256801336703655, "grad_norm": 8.26660442352295, "learning_rate": 7.612507168751945e-05, "loss": 2.035493850708008, "memory(GiB)": 72.85, "step": 37945, "token_acc": 0.5324074074074074, "train_speed(iter/s)": 0.67091 }, { "epoch": 1.6258943489996143, "grad_norm": 3.982961416244507, "learning_rate": 7.61193333879322e-05, "loss": 2.4093780517578125, "memory(GiB)": 72.85, "step": 37950, "token_acc": 0.4688427299703264, "train_speed(iter/s)": 0.67088 }, { "epoch": 1.6261085643288635, "grad_norm": 3.969637632369995, "learning_rate": 7.61135946151687e-05, "loss": 2.208241271972656, "memory(GiB)": 72.85, "step": 37955, "token_acc": 0.528169014084507, "train_speed(iter/s)": 0.67088 }, { "epoch": 1.6263227796581123, "grad_norm": 3.9947290420532227, "learning_rate": 7.610785536933294e-05, "loss": 2.3794576644897463, "memory(GiB)": 72.85, "step": 37960, "token_acc": 0.47575757575757577, "train_speed(iter/s)": 0.670866 }, { "epoch": 1.6265369949873612, "grad_norm": 4.032431602478027, "learning_rate": 7.610211565052889e-05, "loss": 2.391964340209961, "memory(GiB)": 72.85, "step": 37965, "token_acc": 0.4962686567164179, "train_speed(iter/s)": 0.670885 }, { "epoch": 1.6267512103166104, "grad_norm": 3.832343578338623, "learning_rate": 7.609637545886052e-05, "loss": 2.471251678466797, "memory(GiB)": 72.85, "step": 37970, "token_acc": 0.49056603773584906, "train_speed(iter/s)": 0.670862 }, { "epoch": 1.6269654256458592, "grad_norm": 6.346863746643066, "learning_rate": 7.609063479443185e-05, "loss": 2.5721614837646483, "memory(GiB)": 72.85, "step": 37975, "token_acc": 0.4375, "train_speed(iter/s)": 0.670858 }, { "epoch": 1.627179640975108, "grad_norm": 4.413777828216553, "learning_rate": 7.608489365734684e-05, "loss": 2.420718955993652, "memory(GiB)": 72.85, "step": 37980, "token_acc": 0.5132075471698113, "train_speed(iter/s)": 0.670865 }, { "epoch": 1.6273938563043573, "grad_norm": 4.004787445068359, "learning_rate": 7.607915204770952e-05, "loss": 2.3762840270996093, "memory(GiB)": 72.85, "step": 37985, "token_acc": 0.4797297297297297, "train_speed(iter/s)": 0.670873 }, { "epoch": 1.627608071633606, "grad_norm": 4.819878578186035, "learning_rate": 7.607340996562388e-05, "loss": 2.3755167007446287, "memory(GiB)": 72.85, "step": 37990, "token_acc": 0.4934640522875817, "train_speed(iter/s)": 0.67088 }, { "epoch": 1.627822286962855, "grad_norm": 4.978928565979004, "learning_rate": 7.6067667411194e-05, "loss": 2.504231071472168, "memory(GiB)": 72.85, "step": 37995, "token_acc": 0.4763636363636364, "train_speed(iter/s)": 0.670873 }, { "epoch": 1.6280365022921042, "grad_norm": 5.685202121734619, "learning_rate": 7.606192438452383e-05, "loss": 2.466079521179199, "memory(GiB)": 72.85, "step": 38000, "token_acc": 0.5128205128205128, "train_speed(iter/s)": 0.670883 }, { "epoch": 1.6280365022921042, "eval_loss": 2.166025400161743, "eval_runtime": 15.5613, "eval_samples_per_second": 6.426, "eval_steps_per_second": 6.426, "eval_token_acc": 0.4961832061068702, "step": 38000 }, { "epoch": 1.628250717621353, "grad_norm": 4.982601165771484, "learning_rate": 7.605618088571746e-05, "loss": 2.436188888549805, "memory(GiB)": 72.85, "step": 38005, "token_acc": 0.49767441860465117, "train_speed(iter/s)": 0.670669 }, { "epoch": 1.6284649329506018, "grad_norm": 3.447723388671875, "learning_rate": 7.605043691487894e-05, "loss": 2.2469694137573244, "memory(GiB)": 72.85, "step": 38010, "token_acc": 0.5016501650165016, "train_speed(iter/s)": 0.670686 }, { "epoch": 1.628679148279851, "grad_norm": 3.6019067764282227, "learning_rate": 7.604469247211232e-05, "loss": 2.0864633560180663, "memory(GiB)": 72.85, "step": 38015, "token_acc": 0.518796992481203, "train_speed(iter/s)": 0.67071 }, { "epoch": 1.6288933636090999, "grad_norm": 3.4879062175750732, "learning_rate": 7.603894755752166e-05, "loss": 1.9445390701293945, "memory(GiB)": 72.85, "step": 38020, "token_acc": 0.5330739299610895, "train_speed(iter/s)": 0.670716 }, { "epoch": 1.6291075789383487, "grad_norm": 4.527988433837891, "learning_rate": 7.603320217121104e-05, "loss": 2.877256393432617, "memory(GiB)": 72.85, "step": 38025, "token_acc": 0.47023809523809523, "train_speed(iter/s)": 0.670723 }, { "epoch": 1.629321794267598, "grad_norm": 3.050835609436035, "learning_rate": 7.602745631328454e-05, "loss": 2.497127151489258, "memory(GiB)": 72.85, "step": 38030, "token_acc": 0.4427710843373494, "train_speed(iter/s)": 0.670727 }, { "epoch": 1.6295360095968467, "grad_norm": 3.8622050285339355, "learning_rate": 7.602170998384623e-05, "loss": 2.4056196212768555, "memory(GiB)": 72.85, "step": 38035, "token_acc": 0.4789272030651341, "train_speed(iter/s)": 0.670733 }, { "epoch": 1.6297502249260956, "grad_norm": 4.0141215324401855, "learning_rate": 7.601596318300026e-05, "loss": 2.1141658782958985, "memory(GiB)": 72.85, "step": 38040, "token_acc": 0.5572519083969466, "train_speed(iter/s)": 0.670746 }, { "epoch": 1.6299644402553448, "grad_norm": 5.912663459777832, "learning_rate": 7.601021591085071e-05, "loss": 2.501364326477051, "memory(GiB)": 72.85, "step": 38045, "token_acc": 0.44025157232704404, "train_speed(iter/s)": 0.670754 }, { "epoch": 1.6301786555845936, "grad_norm": 3.9581451416015625, "learning_rate": 7.60044681675017e-05, "loss": 2.5506872177124023, "memory(GiB)": 72.85, "step": 38050, "token_acc": 0.4343065693430657, "train_speed(iter/s)": 0.670778 }, { "epoch": 1.6303928709138424, "grad_norm": 5.294705867767334, "learning_rate": 7.599871995305735e-05, "loss": 2.3331714630126954, "memory(GiB)": 72.85, "step": 38055, "token_acc": 0.47648902821316613, "train_speed(iter/s)": 0.670796 }, { "epoch": 1.6306070862430917, "grad_norm": 5.200851917266846, "learning_rate": 7.59929712676218e-05, "loss": 2.45328311920166, "memory(GiB)": 72.85, "step": 38060, "token_acc": 0.49812734082397003, "train_speed(iter/s)": 0.670798 }, { "epoch": 1.6308213015723405, "grad_norm": 4.629194259643555, "learning_rate": 7.598722211129918e-05, "loss": 2.3861385345458985, "memory(GiB)": 72.85, "step": 38065, "token_acc": 0.48360655737704916, "train_speed(iter/s)": 0.67081 }, { "epoch": 1.6310355169015893, "grad_norm": 8.744131088256836, "learning_rate": 7.598147248419367e-05, "loss": 2.5945104598999023, "memory(GiB)": 72.85, "step": 38070, "token_acc": 0.4793388429752066, "train_speed(iter/s)": 0.670825 }, { "epoch": 1.6312497322308386, "grad_norm": 3.2663516998291016, "learning_rate": 7.597572238640942e-05, "loss": 2.3008060455322266, "memory(GiB)": 72.85, "step": 38075, "token_acc": 0.4919614147909968, "train_speed(iter/s)": 0.670832 }, { "epoch": 1.6314639475600874, "grad_norm": 3.6759467124938965, "learning_rate": 7.596997181805057e-05, "loss": 2.129317855834961, "memory(GiB)": 72.85, "step": 38080, "token_acc": 0.5242424242424243, "train_speed(iter/s)": 0.670818 }, { "epoch": 1.6316781628893362, "grad_norm": 4.381392478942871, "learning_rate": 7.596422077922134e-05, "loss": 2.4330013275146483, "memory(GiB)": 72.85, "step": 38085, "token_acc": 0.4789156626506024, "train_speed(iter/s)": 0.670829 }, { "epoch": 1.6318923782185855, "grad_norm": 4.015140533447266, "learning_rate": 7.595846927002588e-05, "loss": 2.411111831665039, "memory(GiB)": 72.85, "step": 38090, "token_acc": 0.4682080924855491, "train_speed(iter/s)": 0.670852 }, { "epoch": 1.6321065935478343, "grad_norm": 4.835360527038574, "learning_rate": 7.59527172905684e-05, "loss": 2.0046667098999023, "memory(GiB)": 72.85, "step": 38095, "token_acc": 0.5540540540540541, "train_speed(iter/s)": 0.670868 }, { "epoch": 1.632320808877083, "grad_norm": 3.343441963195801, "learning_rate": 7.594696484095311e-05, "loss": 2.521478271484375, "memory(GiB)": 72.85, "step": 38100, "token_acc": 0.5107692307692308, "train_speed(iter/s)": 0.670889 }, { "epoch": 1.6325350242063323, "grad_norm": 4.541643142700195, "learning_rate": 7.594121192128419e-05, "loss": 2.220744514465332, "memory(GiB)": 72.85, "step": 38105, "token_acc": 0.5373665480427047, "train_speed(iter/s)": 0.670893 }, { "epoch": 1.6327492395355812, "grad_norm": 3.3796324729919434, "learning_rate": 7.593545853166591e-05, "loss": 2.220140266418457, "memory(GiB)": 72.85, "step": 38110, "token_acc": 0.5419354838709678, "train_speed(iter/s)": 0.670894 }, { "epoch": 1.63296345486483, "grad_norm": 3.7351226806640625, "learning_rate": 7.592970467220244e-05, "loss": 2.214984321594238, "memory(GiB)": 72.85, "step": 38115, "token_acc": 0.5168918918918919, "train_speed(iter/s)": 0.670908 }, { "epoch": 1.6331776701940792, "grad_norm": 5.192699432373047, "learning_rate": 7.592395034299806e-05, "loss": 2.2319158554077148, "memory(GiB)": 72.85, "step": 38120, "token_acc": 0.52734375, "train_speed(iter/s)": 0.670914 }, { "epoch": 1.633391885523328, "grad_norm": 4.066338539123535, "learning_rate": 7.5918195544157e-05, "loss": 2.4487648010253906, "memory(GiB)": 72.85, "step": 38125, "token_acc": 0.43416370106761565, "train_speed(iter/s)": 0.670921 }, { "epoch": 1.6336061008525768, "grad_norm": 4.509997844696045, "learning_rate": 7.591244027578352e-05, "loss": 2.1191856384277346, "memory(GiB)": 72.85, "step": 38130, "token_acc": 0.5421245421245421, "train_speed(iter/s)": 0.670936 }, { "epoch": 1.633820316181826, "grad_norm": 4.049501419067383, "learning_rate": 7.590668453798187e-05, "loss": 2.2743528366088865, "memory(GiB)": 72.85, "step": 38135, "token_acc": 0.5174825174825175, "train_speed(iter/s)": 0.670927 }, { "epoch": 1.634034531511075, "grad_norm": 4.320271968841553, "learning_rate": 7.590092833085633e-05, "loss": 2.3329662322998046, "memory(GiB)": 72.85, "step": 38140, "token_acc": 0.4807017543859649, "train_speed(iter/s)": 0.670925 }, { "epoch": 1.6342487468403237, "grad_norm": 3.7815520763397217, "learning_rate": 7.58951716545112e-05, "loss": 2.55428466796875, "memory(GiB)": 72.85, "step": 38145, "token_acc": 0.44554455445544555, "train_speed(iter/s)": 0.670925 }, { "epoch": 1.634462962169573, "grad_norm": 4.591255187988281, "learning_rate": 7.588941450905069e-05, "loss": 2.223445510864258, "memory(GiB)": 72.85, "step": 38150, "token_acc": 0.4864864864864865, "train_speed(iter/s)": 0.670922 }, { "epoch": 1.6346771774988218, "grad_norm": 4.549450397491455, "learning_rate": 7.58836568945792e-05, "loss": 2.1649097442626952, "memory(GiB)": 72.85, "step": 38155, "token_acc": 0.5034965034965035, "train_speed(iter/s)": 0.670923 }, { "epoch": 1.6348913928280706, "grad_norm": 3.4700496196746826, "learning_rate": 7.587789881120096e-05, "loss": 2.118096923828125, "memory(GiB)": 72.85, "step": 38160, "token_acc": 0.5303030303030303, "train_speed(iter/s)": 0.670922 }, { "epoch": 1.6351056081573199, "grad_norm": 4.09562349319458, "learning_rate": 7.58721402590203e-05, "loss": 2.517706108093262, "memory(GiB)": 72.85, "step": 38165, "token_acc": 0.4831081081081081, "train_speed(iter/s)": 0.670939 }, { "epoch": 1.6353198234865687, "grad_norm": 3.4591050148010254, "learning_rate": 7.586638123814157e-05, "loss": 2.4051610946655275, "memory(GiB)": 72.85, "step": 38170, "token_acc": 0.4778156996587031, "train_speed(iter/s)": 0.670918 }, { "epoch": 1.6355340388158177, "grad_norm": 3.3265559673309326, "learning_rate": 7.586062174866908e-05, "loss": 2.170155906677246, "memory(GiB)": 72.85, "step": 38175, "token_acc": 0.5255681818181818, "train_speed(iter/s)": 0.67092 }, { "epoch": 1.6357482541450667, "grad_norm": 4.081451892852783, "learning_rate": 7.585486179070714e-05, "loss": 2.675136756896973, "memory(GiB)": 72.85, "step": 38180, "token_acc": 0.4552238805970149, "train_speed(iter/s)": 0.67091 }, { "epoch": 1.6359624694743156, "grad_norm": 4.72926139831543, "learning_rate": 7.584910136436014e-05, "loss": 2.3919830322265625, "memory(GiB)": 72.85, "step": 38185, "token_acc": 0.4702194357366771, "train_speed(iter/s)": 0.670919 }, { "epoch": 1.6361766848035646, "grad_norm": 4.933905124664307, "learning_rate": 7.584334046973243e-05, "loss": 2.397039031982422, "memory(GiB)": 72.85, "step": 38190, "token_acc": 0.4823943661971831, "train_speed(iter/s)": 0.670939 }, { "epoch": 1.6363909001328136, "grad_norm": 4.101505756378174, "learning_rate": 7.583757910692834e-05, "loss": 2.4994504928588865, "memory(GiB)": 72.85, "step": 38195, "token_acc": 0.49, "train_speed(iter/s)": 0.670946 }, { "epoch": 1.6366051154620624, "grad_norm": 3.746809244155884, "learning_rate": 7.583181727605231e-05, "loss": 2.4361459732055666, "memory(GiB)": 72.85, "step": 38200, "token_acc": 0.4740740740740741, "train_speed(iter/s)": 0.670965 }, { "epoch": 1.6368193307913115, "grad_norm": 3.9807815551757812, "learning_rate": 7.582605497720865e-05, "loss": 2.440085601806641, "memory(GiB)": 72.85, "step": 38205, "token_acc": 0.48366013071895425, "train_speed(iter/s)": 0.670989 }, { "epoch": 1.6370335461205605, "grad_norm": 4.000557899475098, "learning_rate": 7.582029221050177e-05, "loss": 2.3087907791137696, "memory(GiB)": 72.85, "step": 38210, "token_acc": 0.5190311418685121, "train_speed(iter/s)": 0.671004 }, { "epoch": 1.6372477614498093, "grad_norm": 4.8644514083862305, "learning_rate": 7.581452897603609e-05, "loss": 2.340612030029297, "memory(GiB)": 72.85, "step": 38215, "token_acc": 0.48161764705882354, "train_speed(iter/s)": 0.671018 }, { "epoch": 1.6374619767790584, "grad_norm": 6.3540778160095215, "learning_rate": 7.580876527391599e-05, "loss": 2.3574623107910155, "memory(GiB)": 72.85, "step": 38220, "token_acc": 0.47202797202797203, "train_speed(iter/s)": 0.67103 }, { "epoch": 1.6376761921083074, "grad_norm": 4.16153621673584, "learning_rate": 7.580300110424589e-05, "loss": 2.164218521118164, "memory(GiB)": 72.85, "step": 38225, "token_acc": 0.5054945054945055, "train_speed(iter/s)": 0.671047 }, { "epoch": 1.6378904074375562, "grad_norm": 4.258874893188477, "learning_rate": 7.579723646713025e-05, "loss": 2.151776885986328, "memory(GiB)": 72.85, "step": 38230, "token_acc": 0.5054545454545455, "train_speed(iter/s)": 0.671052 }, { "epoch": 1.6381046227668052, "grad_norm": 4.639291286468506, "learning_rate": 7.579147136267345e-05, "loss": 2.580347442626953, "memory(GiB)": 72.85, "step": 38235, "token_acc": 0.4891304347826087, "train_speed(iter/s)": 0.671027 }, { "epoch": 1.6383188380960543, "grad_norm": 3.5025665760040283, "learning_rate": 7.578570579097996e-05, "loss": 2.328056526184082, "memory(GiB)": 72.85, "step": 38240, "token_acc": 0.5111111111111111, "train_speed(iter/s)": 0.671019 }, { "epoch": 1.638533053425303, "grad_norm": 4.267632961273193, "learning_rate": 7.577993975215422e-05, "loss": 2.210355758666992, "memory(GiB)": 72.85, "step": 38245, "token_acc": 0.4897360703812317, "train_speed(iter/s)": 0.67101 }, { "epoch": 1.6387472687545521, "grad_norm": 3.513214349746704, "learning_rate": 7.577417324630068e-05, "loss": 2.6368860244750976, "memory(GiB)": 72.85, "step": 38250, "token_acc": 0.48044692737430167, "train_speed(iter/s)": 0.671018 }, { "epoch": 1.6389614840838012, "grad_norm": 3.0339503288269043, "learning_rate": 7.576840627352381e-05, "loss": 2.404736328125, "memory(GiB)": 72.85, "step": 38255, "token_acc": 0.5225806451612903, "train_speed(iter/s)": 0.671026 }, { "epoch": 1.63917569941305, "grad_norm": 3.459223985671997, "learning_rate": 7.576263883392811e-05, "loss": 2.2667009353637697, "memory(GiB)": 72.85, "step": 38260, "token_acc": 0.5189003436426117, "train_speed(iter/s)": 0.671036 }, { "epoch": 1.639389914742299, "grad_norm": 4.491063117980957, "learning_rate": 7.575687092761801e-05, "loss": 2.567903518676758, "memory(GiB)": 72.85, "step": 38265, "token_acc": 0.4888888888888889, "train_speed(iter/s)": 0.671065 }, { "epoch": 1.639604130071548, "grad_norm": 2.916736602783203, "learning_rate": 7.575110255469806e-05, "loss": 2.4562211990356446, "memory(GiB)": 72.85, "step": 38270, "token_acc": 0.494949494949495, "train_speed(iter/s)": 0.671077 }, { "epoch": 1.6398183454007969, "grad_norm": 4.250716686248779, "learning_rate": 7.574533371527273e-05, "loss": 2.315940284729004, "memory(GiB)": 72.85, "step": 38275, "token_acc": 0.5, "train_speed(iter/s)": 0.671065 }, { "epoch": 1.6400325607300459, "grad_norm": 4.514209747314453, "learning_rate": 7.57395644094465e-05, "loss": 2.3281421661376953, "memory(GiB)": 72.85, "step": 38280, "token_acc": 0.5285171102661597, "train_speed(iter/s)": 0.671075 }, { "epoch": 1.640246776059295, "grad_norm": 3.4844746589660645, "learning_rate": 7.573379463732393e-05, "loss": 2.576501655578613, "memory(GiB)": 72.85, "step": 38285, "token_acc": 0.48344370860927155, "train_speed(iter/s)": 0.671093 }, { "epoch": 1.6404609913885437, "grad_norm": 4.867025375366211, "learning_rate": 7.572802439900955e-05, "loss": 2.338467025756836, "memory(GiB)": 72.85, "step": 38290, "token_acc": 0.4723127035830619, "train_speed(iter/s)": 0.67108 }, { "epoch": 1.6406752067177928, "grad_norm": 6.261167526245117, "learning_rate": 7.572225369460783e-05, "loss": 2.4951000213623047, "memory(GiB)": 72.85, "step": 38295, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.671099 }, { "epoch": 1.6408894220470418, "grad_norm": 4.292654514312744, "learning_rate": 7.571648252422338e-05, "loss": 2.091958236694336, "memory(GiB)": 72.85, "step": 38300, "token_acc": 0.5636363636363636, "train_speed(iter/s)": 0.671112 }, { "epoch": 1.6411036373762906, "grad_norm": 4.633296012878418, "learning_rate": 7.571071088796074e-05, "loss": 2.236636734008789, "memory(GiB)": 72.85, "step": 38305, "token_acc": 0.5198412698412699, "train_speed(iter/s)": 0.671117 }, { "epoch": 1.6413178527055396, "grad_norm": 3.5102338790893555, "learning_rate": 7.570493878592444e-05, "loss": 2.284188461303711, "memory(GiB)": 72.85, "step": 38310, "token_acc": 0.5219123505976095, "train_speed(iter/s)": 0.671138 }, { "epoch": 1.6415320680347887, "grad_norm": 4.068361282348633, "learning_rate": 7.569916621821906e-05, "loss": 2.1064599990844726, "memory(GiB)": 72.85, "step": 38315, "token_acc": 0.5478260869565217, "train_speed(iter/s)": 0.671133 }, { "epoch": 1.6417462833640375, "grad_norm": 4.216267108917236, "learning_rate": 7.569339318494917e-05, "loss": 2.1780817031860353, "memory(GiB)": 72.85, "step": 38320, "token_acc": 0.5372549019607843, "train_speed(iter/s)": 0.671129 }, { "epoch": 1.6419604986932865, "grad_norm": 4.165698051452637, "learning_rate": 7.568761968621937e-05, "loss": 2.364113998413086, "memory(GiB)": 72.85, "step": 38325, "token_acc": 0.46179401993355484, "train_speed(iter/s)": 0.671125 }, { "epoch": 1.6421747140225356, "grad_norm": 5.183764457702637, "learning_rate": 7.568184572213423e-05, "loss": 2.4864852905273436, "memory(GiB)": 72.85, "step": 38330, "token_acc": 0.49173553719008267, "train_speed(iter/s)": 0.671142 }, { "epoch": 1.6423889293517844, "grad_norm": 7.853949546813965, "learning_rate": 7.567607129279836e-05, "loss": 2.303092193603516, "memory(GiB)": 72.85, "step": 38335, "token_acc": 0.4939759036144578, "train_speed(iter/s)": 0.671136 }, { "epoch": 1.6426031446810334, "grad_norm": 4.816299915313721, "learning_rate": 7.56702963983164e-05, "loss": 2.5255807876586913, "memory(GiB)": 72.85, "step": 38340, "token_acc": 0.5052264808362369, "train_speed(iter/s)": 0.671157 }, { "epoch": 1.6428173600102824, "grad_norm": 4.013819217681885, "learning_rate": 7.566452103879292e-05, "loss": 2.1816991806030273, "memory(GiB)": 72.85, "step": 38345, "token_acc": 0.5322033898305085, "train_speed(iter/s)": 0.671145 }, { "epoch": 1.6430315753395313, "grad_norm": 3.4501140117645264, "learning_rate": 7.565874521433256e-05, "loss": 2.1165678024291994, "memory(GiB)": 72.85, "step": 38350, "token_acc": 0.53515625, "train_speed(iter/s)": 0.671154 }, { "epoch": 1.6432457906687803, "grad_norm": 5.0354509353637695, "learning_rate": 7.565296892503997e-05, "loss": 2.3976316452026367, "memory(GiB)": 72.85, "step": 38355, "token_acc": 0.5303030303030303, "train_speed(iter/s)": 0.671158 }, { "epoch": 1.6434600059980293, "grad_norm": 4.56996488571167, "learning_rate": 7.56471921710198e-05, "loss": 2.010062026977539, "memory(GiB)": 72.85, "step": 38360, "token_acc": 0.5253623188405797, "train_speed(iter/s)": 0.671146 }, { "epoch": 1.6436742213272781, "grad_norm": 4.337915897369385, "learning_rate": 7.564141495237666e-05, "loss": 2.4574575424194336, "memory(GiB)": 72.85, "step": 38365, "token_acc": 0.4925373134328358, "train_speed(iter/s)": 0.67115 }, { "epoch": 1.6438884366565272, "grad_norm": 4.622575759887695, "learning_rate": 7.563563726921524e-05, "loss": 2.3432703018188477, "memory(GiB)": 72.85, "step": 38370, "token_acc": 0.5352112676056338, "train_speed(iter/s)": 0.671138 }, { "epoch": 1.6441026519857762, "grad_norm": 5.024258136749268, "learning_rate": 7.56298591216402e-05, "loss": 2.403640365600586, "memory(GiB)": 72.85, "step": 38375, "token_acc": 0.4901315789473684, "train_speed(iter/s)": 0.671146 }, { "epoch": 1.644316867315025, "grad_norm": 3.8647189140319824, "learning_rate": 7.562408050975623e-05, "loss": 2.0983694076538084, "memory(GiB)": 72.85, "step": 38380, "token_acc": 0.527972027972028, "train_speed(iter/s)": 0.67115 }, { "epoch": 1.644531082644274, "grad_norm": 4.642683506011963, "learning_rate": 7.5618301433668e-05, "loss": 2.2161123275756838, "memory(GiB)": 72.85, "step": 38385, "token_acc": 0.4919614147909968, "train_speed(iter/s)": 0.671138 }, { "epoch": 1.644745297973523, "grad_norm": 3.8195295333862305, "learning_rate": 7.561252189348022e-05, "loss": 2.5082820892333983, "memory(GiB)": 72.85, "step": 38390, "token_acc": 0.4344569288389513, "train_speed(iter/s)": 0.671147 }, { "epoch": 1.644959513302772, "grad_norm": 3.774275302886963, "learning_rate": 7.560674188929756e-05, "loss": 2.5186559677124025, "memory(GiB)": 72.85, "step": 38395, "token_acc": 0.47023809523809523, "train_speed(iter/s)": 0.671157 }, { "epoch": 1.645173728632021, "grad_norm": 5.030632019042969, "learning_rate": 7.560096142122476e-05, "loss": 2.175410842895508, "memory(GiB)": 72.85, "step": 38400, "token_acc": 0.5222222222222223, "train_speed(iter/s)": 0.671156 }, { "epoch": 1.64538794396127, "grad_norm": 4.041262149810791, "learning_rate": 7.559518048936652e-05, "loss": 2.21301383972168, "memory(GiB)": 72.85, "step": 38405, "token_acc": 0.5324675324675324, "train_speed(iter/s)": 0.671138 }, { "epoch": 1.6456021592905188, "grad_norm": 4.588741779327393, "learning_rate": 7.558939909382758e-05, "loss": 2.230803680419922, "memory(GiB)": 72.85, "step": 38410, "token_acc": 0.515527950310559, "train_speed(iter/s)": 0.671118 }, { "epoch": 1.6458163746197678, "grad_norm": 3.9795961380004883, "learning_rate": 7.558361723471267e-05, "loss": 2.3172740936279297, "memory(GiB)": 72.85, "step": 38415, "token_acc": 0.5420560747663551, "train_speed(iter/s)": 0.671115 }, { "epoch": 1.6460305899490169, "grad_norm": 3.476166009902954, "learning_rate": 7.557783491212653e-05, "loss": 2.546521759033203, "memory(GiB)": 72.85, "step": 38420, "token_acc": 0.46511627906976744, "train_speed(iter/s)": 0.67114 }, { "epoch": 1.6462448052782657, "grad_norm": 3.7483925819396973, "learning_rate": 7.557205212617392e-05, "loss": 2.435955810546875, "memory(GiB)": 72.85, "step": 38425, "token_acc": 0.4772727272727273, "train_speed(iter/s)": 0.671153 }, { "epoch": 1.6464590206075147, "grad_norm": 4.854668140411377, "learning_rate": 7.556626887695961e-05, "loss": 2.6251461029052736, "memory(GiB)": 72.85, "step": 38430, "token_acc": 0.47388059701492535, "train_speed(iter/s)": 0.671163 }, { "epoch": 1.6466732359367637, "grad_norm": 3.987727165222168, "learning_rate": 7.556048516458834e-05, "loss": 2.3108142852783202, "memory(GiB)": 72.85, "step": 38435, "token_acc": 0.5, "train_speed(iter/s)": 0.67116 }, { "epoch": 1.6468874512660125, "grad_norm": 3.7103464603424072, "learning_rate": 7.555470098916491e-05, "loss": 2.1186712265014647, "memory(GiB)": 72.85, "step": 38440, "token_acc": 0.526813880126183, "train_speed(iter/s)": 0.671154 }, { "epoch": 1.6471016665952616, "grad_norm": 4.082543849945068, "learning_rate": 7.554891635079409e-05, "loss": 2.41388053894043, "memory(GiB)": 72.85, "step": 38445, "token_acc": 0.5418326693227091, "train_speed(iter/s)": 0.671144 }, { "epoch": 1.6473158819245106, "grad_norm": 5.592261791229248, "learning_rate": 7.554313124958071e-05, "loss": 2.6932256698608397, "memory(GiB)": 72.85, "step": 38450, "token_acc": 0.44518272425249167, "train_speed(iter/s)": 0.671142 }, { "epoch": 1.6475300972537594, "grad_norm": 4.178456783294678, "learning_rate": 7.553734568562951e-05, "loss": 2.4441978454589846, "memory(GiB)": 72.85, "step": 38455, "token_acc": 0.45706371191135736, "train_speed(iter/s)": 0.671154 }, { "epoch": 1.6477443125830085, "grad_norm": 3.709465503692627, "learning_rate": 7.553155965904535e-05, "loss": 2.4617496490478517, "memory(GiB)": 72.85, "step": 38460, "token_acc": 0.4780058651026393, "train_speed(iter/s)": 0.671131 }, { "epoch": 1.6479585279122575, "grad_norm": 5.426375865936279, "learning_rate": 7.552577316993306e-05, "loss": 2.1489025115966798, "memory(GiB)": 72.85, "step": 38465, "token_acc": 0.5753424657534246, "train_speed(iter/s)": 0.671147 }, { "epoch": 1.6481727432415063, "grad_norm": 4.459959030151367, "learning_rate": 7.551998621839741e-05, "loss": 2.4448215484619142, "memory(GiB)": 72.85, "step": 38470, "token_acc": 0.47876447876447875, "train_speed(iter/s)": 0.671129 }, { "epoch": 1.6483869585707553, "grad_norm": 4.050034999847412, "learning_rate": 7.551419880454329e-05, "loss": 2.482889175415039, "memory(GiB)": 72.85, "step": 38475, "token_acc": 0.5033783783783784, "train_speed(iter/s)": 0.671144 }, { "epoch": 1.6486011739000044, "grad_norm": 3.391995429992676, "learning_rate": 7.550841092847552e-05, "loss": 2.4247919082641602, "memory(GiB)": 72.85, "step": 38480, "token_acc": 0.5077399380804953, "train_speed(iter/s)": 0.671138 }, { "epoch": 1.6488153892292532, "grad_norm": 3.6030983924865723, "learning_rate": 7.550262259029895e-05, "loss": 2.51052188873291, "memory(GiB)": 72.85, "step": 38485, "token_acc": 0.508, "train_speed(iter/s)": 0.671133 }, { "epoch": 1.6490296045585022, "grad_norm": 4.712317943572998, "learning_rate": 7.549683379011845e-05, "loss": 2.5780590057373045, "memory(GiB)": 72.85, "step": 38490, "token_acc": 0.4978723404255319, "train_speed(iter/s)": 0.671128 }, { "epoch": 1.6492438198877513, "grad_norm": 4.538653373718262, "learning_rate": 7.549104452803888e-05, "loss": 2.5364496231079103, "memory(GiB)": 72.85, "step": 38495, "token_acc": 0.4780058651026393, "train_speed(iter/s)": 0.671136 }, { "epoch": 1.649458035217, "grad_norm": 4.297987461090088, "learning_rate": 7.548525480416515e-05, "loss": 2.096689987182617, "memory(GiB)": 72.85, "step": 38500, "token_acc": 0.4983388704318937, "train_speed(iter/s)": 0.671141 }, { "epoch": 1.649458035217, "eval_loss": 2.160280704498291, "eval_runtime": 15.8996, "eval_samples_per_second": 6.289, "eval_steps_per_second": 6.289, "eval_token_acc": 0.45987261146496816, "step": 38500 }, { "epoch": 1.649672250546249, "grad_norm": 4.15482759475708, "learning_rate": 7.547946461860211e-05, "loss": 2.0004497528076173, "memory(GiB)": 72.85, "step": 38505, "token_acc": 0.48350612629594725, "train_speed(iter/s)": 0.670903 }, { "epoch": 1.6498864658754981, "grad_norm": 4.847787857055664, "learning_rate": 7.547367397145466e-05, "loss": 2.6468761444091795, "memory(GiB)": 72.85, "step": 38510, "token_acc": 0.46987951807228917, "train_speed(iter/s)": 0.670904 }, { "epoch": 1.650100681204747, "grad_norm": 4.3928985595703125, "learning_rate": 7.546788286282771e-05, "loss": 2.464599609375, "memory(GiB)": 72.85, "step": 38515, "token_acc": 0.4887459807073955, "train_speed(iter/s)": 0.670913 }, { "epoch": 1.650314896533996, "grad_norm": 3.969074249267578, "learning_rate": 7.546209129282618e-05, "loss": 2.464869499206543, "memory(GiB)": 72.85, "step": 38520, "token_acc": 0.48484848484848486, "train_speed(iter/s)": 0.670889 }, { "epoch": 1.650529111863245, "grad_norm": 4.071657657623291, "learning_rate": 7.545629926155498e-05, "loss": 2.435393524169922, "memory(GiB)": 72.85, "step": 38525, "token_acc": 0.504885993485342, "train_speed(iter/s)": 0.670912 }, { "epoch": 1.6507433271924938, "grad_norm": 4.5073466300964355, "learning_rate": 7.545050676911905e-05, "loss": 2.4204132080078127, "memory(GiB)": 72.85, "step": 38530, "token_acc": 0.49702380952380953, "train_speed(iter/s)": 0.670942 }, { "epoch": 1.6509575425217429, "grad_norm": 3.4300217628479004, "learning_rate": 7.54447138156233e-05, "loss": 2.5052774429321287, "memory(GiB)": 72.85, "step": 38535, "token_acc": 0.46853146853146854, "train_speed(iter/s)": 0.670925 }, { "epoch": 1.651171757850992, "grad_norm": 3.7777140140533447, "learning_rate": 7.543892040117268e-05, "loss": 2.5622331619262697, "memory(GiB)": 72.85, "step": 38540, "token_acc": 0.4670846394984326, "train_speed(iter/s)": 0.670918 }, { "epoch": 1.6513859731802407, "grad_norm": 3.746521234512329, "learning_rate": 7.543312652587217e-05, "loss": 2.199374198913574, "memory(GiB)": 72.85, "step": 38545, "token_acc": 0.519434628975265, "train_speed(iter/s)": 0.670914 }, { "epoch": 1.6516001885094898, "grad_norm": 4.9151387214660645, "learning_rate": 7.542733218982672e-05, "loss": 2.6927425384521486, "memory(GiB)": 72.85, "step": 38550, "token_acc": 0.4401294498381877, "train_speed(iter/s)": 0.670917 }, { "epoch": 1.6518144038387388, "grad_norm": 4.000053405761719, "learning_rate": 7.542153739314127e-05, "loss": 2.415274810791016, "memory(GiB)": 72.85, "step": 38555, "token_acc": 0.44368600682593856, "train_speed(iter/s)": 0.67093 }, { "epoch": 1.6520286191679876, "grad_norm": 4.105499267578125, "learning_rate": 7.541574213592084e-05, "loss": 2.4823024749755858, "memory(GiB)": 72.85, "step": 38560, "token_acc": 0.4425287356321839, "train_speed(iter/s)": 0.670928 }, { "epoch": 1.6522428344972366, "grad_norm": 4.261410236358643, "learning_rate": 7.540994641827042e-05, "loss": 2.096278762817383, "memory(GiB)": 72.85, "step": 38565, "token_acc": 0.5669291338582677, "train_speed(iter/s)": 0.670906 }, { "epoch": 1.6524570498264857, "grad_norm": 4.601348876953125, "learning_rate": 7.540415024029495e-05, "loss": 1.8356948852539063, "memory(GiB)": 72.85, "step": 38570, "token_acc": 0.6198347107438017, "train_speed(iter/s)": 0.67091 }, { "epoch": 1.6526712651557345, "grad_norm": 4.410287380218506, "learning_rate": 7.539835360209948e-05, "loss": 2.2913684844970703, "memory(GiB)": 72.85, "step": 38575, "token_acc": 0.5076923076923077, "train_speed(iter/s)": 0.670916 }, { "epoch": 1.6528854804849835, "grad_norm": 6.201785564422607, "learning_rate": 7.539255650378901e-05, "loss": 2.1163776397705076, "memory(GiB)": 72.85, "step": 38580, "token_acc": 0.5390946502057613, "train_speed(iter/s)": 0.670903 }, { "epoch": 1.6530996958142326, "grad_norm": 3.852839469909668, "learning_rate": 7.538675894546856e-05, "loss": 2.619472122192383, "memory(GiB)": 72.85, "step": 38585, "token_acc": 0.46726190476190477, "train_speed(iter/s)": 0.670873 }, { "epoch": 1.6533139111434814, "grad_norm": 3.869274616241455, "learning_rate": 7.538096092724314e-05, "loss": 2.585811424255371, "memory(GiB)": 72.85, "step": 38590, "token_acc": 0.4768211920529801, "train_speed(iter/s)": 0.670877 }, { "epoch": 1.6535281264727304, "grad_norm": 5.647308349609375, "learning_rate": 7.537516244921783e-05, "loss": 2.4867496490478516, "memory(GiB)": 72.85, "step": 38595, "token_acc": 0.4775510204081633, "train_speed(iter/s)": 0.670887 }, { "epoch": 1.6537423418019794, "grad_norm": 4.920405864715576, "learning_rate": 7.536936351149763e-05, "loss": 2.6992753982543944, "memory(GiB)": 72.85, "step": 38600, "token_acc": 0.4135593220338983, "train_speed(iter/s)": 0.670889 }, { "epoch": 1.6539565571312282, "grad_norm": 5.652836322784424, "learning_rate": 7.536356411418762e-05, "loss": 2.0918731689453125, "memory(GiB)": 72.85, "step": 38605, "token_acc": 0.5594059405940595, "train_speed(iter/s)": 0.670898 }, { "epoch": 1.6541707724604773, "grad_norm": 5.353868007659912, "learning_rate": 7.535776425739285e-05, "loss": 2.41931266784668, "memory(GiB)": 72.85, "step": 38610, "token_acc": 0.5093167701863354, "train_speed(iter/s)": 0.670907 }, { "epoch": 1.6543849877897263, "grad_norm": 4.179035186767578, "learning_rate": 7.53519639412184e-05, "loss": 2.2501033782958983, "memory(GiB)": 72.85, "step": 38615, "token_acc": 0.46710526315789475, "train_speed(iter/s)": 0.670911 }, { "epoch": 1.6545992031189751, "grad_norm": 3.393653392791748, "learning_rate": 7.534616316576933e-05, "loss": 2.296335220336914, "memory(GiB)": 72.85, "step": 38620, "token_acc": 0.48375451263537905, "train_speed(iter/s)": 0.670905 }, { "epoch": 1.6548134184482242, "grad_norm": 6.940486431121826, "learning_rate": 7.534036193115073e-05, "loss": 2.3651191711425783, "memory(GiB)": 72.85, "step": 38625, "token_acc": 0.48917748917748916, "train_speed(iter/s)": 0.67092 }, { "epoch": 1.6550276337774732, "grad_norm": 5.142519474029541, "learning_rate": 7.533456023746773e-05, "loss": 2.2962583541870116, "memory(GiB)": 72.85, "step": 38630, "token_acc": 0.5450980392156862, "train_speed(iter/s)": 0.670928 }, { "epoch": 1.655241849106722, "grad_norm": 3.8970742225646973, "learning_rate": 7.532875808482538e-05, "loss": 2.358424758911133, "memory(GiB)": 72.85, "step": 38635, "token_acc": 0.48013245033112584, "train_speed(iter/s)": 0.670926 }, { "epoch": 1.655456064435971, "grad_norm": 4.855230331420898, "learning_rate": 7.532295547332883e-05, "loss": 2.413601303100586, "memory(GiB)": 72.85, "step": 38640, "token_acc": 0.5145985401459854, "train_speed(iter/s)": 0.670919 }, { "epoch": 1.65567027976522, "grad_norm": 4.068282604217529, "learning_rate": 7.531715240308319e-05, "loss": 2.388699531555176, "memory(GiB)": 72.85, "step": 38645, "token_acc": 0.4899328859060403, "train_speed(iter/s)": 0.670901 }, { "epoch": 1.6558844950944689, "grad_norm": 3.941824197769165, "learning_rate": 7.531134887419358e-05, "loss": 2.0887516021728514, "memory(GiB)": 72.85, "step": 38650, "token_acc": 0.5522388059701493, "train_speed(iter/s)": 0.670908 }, { "epoch": 1.656098710423718, "grad_norm": 3.865079402923584, "learning_rate": 7.530554488676513e-05, "loss": 2.213615608215332, "memory(GiB)": 72.85, "step": 38655, "token_acc": 0.5232974910394266, "train_speed(iter/s)": 0.670888 }, { "epoch": 1.656312925752967, "grad_norm": 3.529210090637207, "learning_rate": 7.529974044090301e-05, "loss": 2.2470733642578127, "memory(GiB)": 72.85, "step": 38660, "token_acc": 0.48484848484848486, "train_speed(iter/s)": 0.670909 }, { "epoch": 1.6565271410822158, "grad_norm": 4.077538967132568, "learning_rate": 7.529393553671236e-05, "loss": 2.3523202896118165, "memory(GiB)": 72.85, "step": 38665, "token_acc": 0.5017543859649123, "train_speed(iter/s)": 0.670921 }, { "epoch": 1.6567413564114648, "grad_norm": 3.952765703201294, "learning_rate": 7.528813017429832e-05, "loss": 2.3560447692871094, "memory(GiB)": 72.85, "step": 38670, "token_acc": 0.4836795252225519, "train_speed(iter/s)": 0.670927 }, { "epoch": 1.6569555717407138, "grad_norm": 4.867530345916748, "learning_rate": 7.52823243537661e-05, "loss": 2.1670894622802734, "memory(GiB)": 72.85, "step": 38675, "token_acc": 0.5313653136531366, "train_speed(iter/s)": 0.670919 }, { "epoch": 1.6571697870699627, "grad_norm": 4.772010326385498, "learning_rate": 7.527651807522085e-05, "loss": 2.738071060180664, "memory(GiB)": 72.85, "step": 38680, "token_acc": 0.4781144781144781, "train_speed(iter/s)": 0.670917 }, { "epoch": 1.6573840023992117, "grad_norm": 4.560057640075684, "learning_rate": 7.527071133876777e-05, "loss": 2.2475357055664062, "memory(GiB)": 72.85, "step": 38685, "token_acc": 0.5182186234817814, "train_speed(iter/s)": 0.670935 }, { "epoch": 1.6575982177284607, "grad_norm": 5.89581823348999, "learning_rate": 7.526490414451204e-05, "loss": 2.665142059326172, "memory(GiB)": 72.85, "step": 38690, "token_acc": 0.45454545454545453, "train_speed(iter/s)": 0.670948 }, { "epoch": 1.6578124330577095, "grad_norm": 5.40963888168335, "learning_rate": 7.525909649255889e-05, "loss": 2.6269107818603517, "memory(GiB)": 72.85, "step": 38695, "token_acc": 0.46551724137931033, "train_speed(iter/s)": 0.670951 }, { "epoch": 1.6580266483869586, "grad_norm": 4.848232269287109, "learning_rate": 7.525328838301349e-05, "loss": 2.7984861373901366, "memory(GiB)": 72.85, "step": 38700, "token_acc": 0.4697508896797153, "train_speed(iter/s)": 0.670945 }, { "epoch": 1.6582408637162076, "grad_norm": 3.774062156677246, "learning_rate": 7.524747981598108e-05, "loss": 2.3386775970458986, "memory(GiB)": 72.85, "step": 38705, "token_acc": 0.5142857142857142, "train_speed(iter/s)": 0.67096 }, { "epoch": 1.6584550790454564, "grad_norm": 4.175724983215332, "learning_rate": 7.52416707915669e-05, "loss": 2.4499839782714843, "memory(GiB)": 72.85, "step": 38710, "token_acc": 0.4750830564784053, "train_speed(iter/s)": 0.670964 }, { "epoch": 1.6586692943747054, "grad_norm": 4.596437454223633, "learning_rate": 7.523586130987617e-05, "loss": 2.0488183975219725, "memory(GiB)": 72.85, "step": 38715, "token_acc": 0.5676567656765676, "train_speed(iter/s)": 0.67096 }, { "epoch": 1.6588835097039545, "grad_norm": 3.8522939682006836, "learning_rate": 7.523005137101416e-05, "loss": 2.2052257537841795, "memory(GiB)": 72.85, "step": 38720, "token_acc": 0.5354609929078015, "train_speed(iter/s)": 0.670955 }, { "epoch": 1.6590977250332033, "grad_norm": 3.956073522567749, "learning_rate": 7.522424097508607e-05, "loss": 2.3110713958740234, "memory(GiB)": 72.85, "step": 38725, "token_acc": 0.45864661654135336, "train_speed(iter/s)": 0.670969 }, { "epoch": 1.6593119403624523, "grad_norm": 3.4367470741271973, "learning_rate": 7.52184301221972e-05, "loss": 2.4246828079223635, "memory(GiB)": 72.85, "step": 38730, "token_acc": 0.5, "train_speed(iter/s)": 0.67097 }, { "epoch": 1.6595261556917014, "grad_norm": 3.6972732543945312, "learning_rate": 7.521261881245283e-05, "loss": 2.4399938583374023, "memory(GiB)": 72.85, "step": 38735, "token_acc": 0.532871972318339, "train_speed(iter/s)": 0.670973 }, { "epoch": 1.6597403710209502, "grad_norm": 4.323653221130371, "learning_rate": 7.520680704595819e-05, "loss": 2.2851985931396483, "memory(GiB)": 72.85, "step": 38740, "token_acc": 0.46994535519125685, "train_speed(iter/s)": 0.670973 }, { "epoch": 1.6599545863501992, "grad_norm": 4.9263014793396, "learning_rate": 7.520099482281862e-05, "loss": 2.396411323547363, "memory(GiB)": 72.85, "step": 38745, "token_acc": 0.4519230769230769, "train_speed(iter/s)": 0.670981 }, { "epoch": 1.6601688016794482, "grad_norm": 5.375004768371582, "learning_rate": 7.519518214313938e-05, "loss": 2.4051342010498047, "memory(GiB)": 72.85, "step": 38750, "token_acc": 0.5075528700906344, "train_speed(iter/s)": 0.670984 }, { "epoch": 1.660383017008697, "grad_norm": 5.180655479431152, "learning_rate": 7.518936900702578e-05, "loss": 2.385067939758301, "memory(GiB)": 72.85, "step": 38755, "token_acc": 0.47419354838709676, "train_speed(iter/s)": 0.670994 }, { "epoch": 1.660597232337946, "grad_norm": 3.4652960300445557, "learning_rate": 7.518355541458313e-05, "loss": 2.600347709655762, "memory(GiB)": 72.85, "step": 38760, "token_acc": 0.46534653465346537, "train_speed(iter/s)": 0.671001 }, { "epoch": 1.6608114476671951, "grad_norm": 4.861260414123535, "learning_rate": 7.517774136591675e-05, "loss": 2.3901384353637694, "memory(GiB)": 72.85, "step": 38765, "token_acc": 0.4768211920529801, "train_speed(iter/s)": 0.671012 }, { "epoch": 1.661025662996444, "grad_norm": 4.763458251953125, "learning_rate": 7.517192686113197e-05, "loss": 2.2756547927856445, "memory(GiB)": 72.85, "step": 38770, "token_acc": 0.5177304964539007, "train_speed(iter/s)": 0.670994 }, { "epoch": 1.661239878325693, "grad_norm": 3.637024164199829, "learning_rate": 7.516611190033411e-05, "loss": 2.4781490325927735, "memory(GiB)": 72.85, "step": 38775, "token_acc": 0.5078369905956113, "train_speed(iter/s)": 0.670986 }, { "epoch": 1.661454093654942, "grad_norm": 3.6512882709503174, "learning_rate": 7.516029648362855e-05, "loss": 2.048093795776367, "memory(GiB)": 72.85, "step": 38780, "token_acc": 0.5014925373134328, "train_speed(iter/s)": 0.671002 }, { "epoch": 1.6616683089841908, "grad_norm": 3.459677219390869, "learning_rate": 7.515448061112058e-05, "loss": 2.4652725219726563, "memory(GiB)": 72.85, "step": 38785, "token_acc": 0.49363057324840764, "train_speed(iter/s)": 0.671 }, { "epoch": 1.6618825243134399, "grad_norm": 4.5118818283081055, "learning_rate": 7.514866428291563e-05, "loss": 2.1614017486572266, "memory(GiB)": 72.85, "step": 38790, "token_acc": 0.5637583892617449, "train_speed(iter/s)": 0.67098 }, { "epoch": 1.662096739642689, "grad_norm": 4.2095723152160645, "learning_rate": 7.514284749911903e-05, "loss": 2.3580713272094727, "memory(GiB)": 72.85, "step": 38795, "token_acc": 0.4658385093167702, "train_speed(iter/s)": 0.671002 }, { "epoch": 1.6623109549719377, "grad_norm": 4.631654739379883, "learning_rate": 7.513703025983614e-05, "loss": 2.2656478881835938, "memory(GiB)": 72.85, "step": 38800, "token_acc": 0.5121951219512195, "train_speed(iter/s)": 0.671014 }, { "epoch": 1.6625251703011867, "grad_norm": 3.5558831691741943, "learning_rate": 7.51312125651724e-05, "loss": 2.3703163146972654, "memory(GiB)": 72.85, "step": 38805, "token_acc": 0.5186440677966102, "train_speed(iter/s)": 0.670999 }, { "epoch": 1.6627393856304358, "grad_norm": 4.687065601348877, "learning_rate": 7.512539441523314e-05, "loss": 2.6115612030029296, "memory(GiB)": 72.85, "step": 38810, "token_acc": 0.46226415094339623, "train_speed(iter/s)": 0.670983 }, { "epoch": 1.6629536009596846, "grad_norm": 4.705606460571289, "learning_rate": 7.511957581012378e-05, "loss": 2.033974838256836, "memory(GiB)": 72.85, "step": 38815, "token_acc": 0.5446009389671361, "train_speed(iter/s)": 0.671 }, { "epoch": 1.6631678162889336, "grad_norm": 3.4417402744293213, "learning_rate": 7.511375674994978e-05, "loss": 2.132645034790039, "memory(GiB)": 72.85, "step": 38820, "token_acc": 0.5434083601286174, "train_speed(iter/s)": 0.671014 }, { "epoch": 1.6633820316181827, "grad_norm": 4.192662715911865, "learning_rate": 7.51079372348165e-05, "loss": 2.548554039001465, "memory(GiB)": 72.85, "step": 38825, "token_acc": 0.4697986577181208, "train_speed(iter/s)": 0.671021 }, { "epoch": 1.6635962469474315, "grad_norm": 4.726995944976807, "learning_rate": 7.510211726482938e-05, "loss": 2.3401817321777343, "memory(GiB)": 72.85, "step": 38830, "token_acc": 0.5238095238095238, "train_speed(iter/s)": 0.671009 }, { "epoch": 1.6638104622766805, "grad_norm": 4.985805034637451, "learning_rate": 7.509629684009387e-05, "loss": 2.318740463256836, "memory(GiB)": 72.85, "step": 38835, "token_acc": 0.5100864553314121, "train_speed(iter/s)": 0.670997 }, { "epoch": 1.6640246776059295, "grad_norm": 5.142449378967285, "learning_rate": 7.50904759607154e-05, "loss": 2.0687671661376954, "memory(GiB)": 72.85, "step": 38840, "token_acc": 0.5538461538461539, "train_speed(iter/s)": 0.670989 }, { "epoch": 1.6642388929351783, "grad_norm": 4.430341720581055, "learning_rate": 7.508465462679938e-05, "loss": 2.5103479385375977, "memory(GiB)": 72.85, "step": 38845, "token_acc": 0.45394736842105265, "train_speed(iter/s)": 0.671001 }, { "epoch": 1.6644531082644274, "grad_norm": 3.333031415939331, "learning_rate": 7.507883283845136e-05, "loss": 2.2163515090942383, "memory(GiB)": 72.85, "step": 38850, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.67101 }, { "epoch": 1.6646673235936764, "grad_norm": 4.001158714294434, "learning_rate": 7.507301059577674e-05, "loss": 2.3176944732666014, "memory(GiB)": 72.85, "step": 38855, "token_acc": 0.5056179775280899, "train_speed(iter/s)": 0.671012 }, { "epoch": 1.6648815389229252, "grad_norm": 4.434139251708984, "learning_rate": 7.5067187898881e-05, "loss": 2.4419748306274416, "memory(GiB)": 72.85, "step": 38860, "token_acc": 0.44542772861356933, "train_speed(iter/s)": 0.671005 }, { "epoch": 1.6650957542521743, "grad_norm": 4.254383087158203, "learning_rate": 7.506136474786966e-05, "loss": 2.4046173095703125, "memory(GiB)": 72.85, "step": 38865, "token_acc": 0.5081967213114754, "train_speed(iter/s)": 0.671007 }, { "epoch": 1.6653099695814233, "grad_norm": 4.580646514892578, "learning_rate": 7.505554114284819e-05, "loss": 2.2365785598754884, "memory(GiB)": 72.85, "step": 38870, "token_acc": 0.5074074074074074, "train_speed(iter/s)": 0.671009 }, { "epoch": 1.6655241849106721, "grad_norm": 3.659688949584961, "learning_rate": 7.504971708392206e-05, "loss": 2.6726593017578124, "memory(GiB)": 72.85, "step": 38875, "token_acc": 0.4564459930313589, "train_speed(iter/s)": 0.671 }, { "epoch": 1.6657384002399211, "grad_norm": 4.773250102996826, "learning_rate": 7.504389257119682e-05, "loss": 2.4239423751831053, "memory(GiB)": 72.85, "step": 38880, "token_acc": 0.4908424908424908, "train_speed(iter/s)": 0.670986 }, { "epoch": 1.6659526155691702, "grad_norm": 4.848700523376465, "learning_rate": 7.503806760477799e-05, "loss": 2.613325500488281, "memory(GiB)": 72.85, "step": 38885, "token_acc": 0.44921875, "train_speed(iter/s)": 0.670982 }, { "epoch": 1.666166830898419, "grad_norm": 4.2086310386657715, "learning_rate": 7.503224218477105e-05, "loss": 2.157190704345703, "memory(GiB)": 72.85, "step": 38890, "token_acc": 0.5016835016835017, "train_speed(iter/s)": 0.670994 }, { "epoch": 1.666381046227668, "grad_norm": 3.428834915161133, "learning_rate": 7.502641631128157e-05, "loss": 2.245643424987793, "memory(GiB)": 72.85, "step": 38895, "token_acc": 0.5, "train_speed(iter/s)": 0.670999 }, { "epoch": 1.666595261556917, "grad_norm": 6.4267473220825195, "learning_rate": 7.502058998441508e-05, "loss": 2.3577081680297853, "memory(GiB)": 72.85, "step": 38900, "token_acc": 0.5224489795918368, "train_speed(iter/s)": 0.671009 }, { "epoch": 1.6668094768861659, "grad_norm": 3.372438430786133, "learning_rate": 7.501476320427714e-05, "loss": 2.247026252746582, "memory(GiB)": 72.85, "step": 38905, "token_acc": 0.46303501945525294, "train_speed(iter/s)": 0.671018 }, { "epoch": 1.667023692215415, "grad_norm": 3.660841941833496, "learning_rate": 7.50089359709733e-05, "loss": 2.428351402282715, "memory(GiB)": 72.85, "step": 38910, "token_acc": 0.45, "train_speed(iter/s)": 0.67102 }, { "epoch": 1.667237907544664, "grad_norm": 4.972269058227539, "learning_rate": 7.500310828460913e-05, "loss": 2.3195470809936523, "memory(GiB)": 72.85, "step": 38915, "token_acc": 0.4623287671232877, "train_speed(iter/s)": 0.671015 }, { "epoch": 1.6674521228739128, "grad_norm": 4.783389091491699, "learning_rate": 7.499728014529017e-05, "loss": 2.5124881744384764, "memory(GiB)": 72.85, "step": 38920, "token_acc": 0.4758842443729904, "train_speed(iter/s)": 0.671039 }, { "epoch": 1.6676663382031618, "grad_norm": 4.164443016052246, "learning_rate": 7.499145155312206e-05, "loss": 2.5878173828125, "memory(GiB)": 72.85, "step": 38925, "token_acc": 0.47572815533980584, "train_speed(iter/s)": 0.671043 }, { "epoch": 1.6678805535324108, "grad_norm": 4.194735527038574, "learning_rate": 7.498562250821035e-05, "loss": 2.3366672515869142, "memory(GiB)": 72.85, "step": 38930, "token_acc": 0.5341365461847389, "train_speed(iter/s)": 0.671034 }, { "epoch": 1.6680947688616596, "grad_norm": 3.3122482299804688, "learning_rate": 7.497979301066065e-05, "loss": 2.0916927337646483, "memory(GiB)": 72.85, "step": 38935, "token_acc": 0.5625, "train_speed(iter/s)": 0.671014 }, { "epoch": 1.6683089841909087, "grad_norm": 5.221594333648682, "learning_rate": 7.497396306057857e-05, "loss": 2.6641357421875, "memory(GiB)": 72.85, "step": 38940, "token_acc": 0.42671009771986973, "train_speed(iter/s)": 0.671002 }, { "epoch": 1.6685231995201577, "grad_norm": 3.979966402053833, "learning_rate": 7.496813265806971e-05, "loss": 2.4013040542602537, "memory(GiB)": 72.85, "step": 38945, "token_acc": 0.494949494949495, "train_speed(iter/s)": 0.671007 }, { "epoch": 1.6687374148494065, "grad_norm": 3.56699538230896, "learning_rate": 7.496230180323973e-05, "loss": 2.1822948455810547, "memory(GiB)": 72.85, "step": 38950, "token_acc": 0.46464646464646464, "train_speed(iter/s)": 0.670998 }, { "epoch": 1.6689516301786556, "grad_norm": 4.798302173614502, "learning_rate": 7.495647049619422e-05, "loss": 2.676258087158203, "memory(GiB)": 72.85, "step": 38955, "token_acc": 0.453416149068323, "train_speed(iter/s)": 0.67099 }, { "epoch": 1.6691658455079046, "grad_norm": 4.858779430389404, "learning_rate": 7.495063873703881e-05, "loss": 2.560393524169922, "memory(GiB)": 72.85, "step": 38960, "token_acc": 0.4714828897338403, "train_speed(iter/s)": 0.670981 }, { "epoch": 1.6693800608371534, "grad_norm": 6.350332260131836, "learning_rate": 7.494480652587921e-05, "loss": 2.7134849548339846, "memory(GiB)": 72.85, "step": 38965, "token_acc": 0.47330960854092524, "train_speed(iter/s)": 0.67098 }, { "epoch": 1.6695942761664024, "grad_norm": 4.263083457946777, "learning_rate": 7.493897386282103e-05, "loss": 2.482892608642578, "memory(GiB)": 72.85, "step": 38970, "token_acc": 0.43234323432343236, "train_speed(iter/s)": 0.670983 }, { "epoch": 1.6698084914956515, "grad_norm": 3.6832640171051025, "learning_rate": 7.493314074796993e-05, "loss": 2.373727798461914, "memory(GiB)": 72.85, "step": 38975, "token_acc": 0.5209125475285171, "train_speed(iter/s)": 0.670989 }, { "epoch": 1.6700227068249003, "grad_norm": 5.279082298278809, "learning_rate": 7.492730718143161e-05, "loss": 2.126346969604492, "memory(GiB)": 72.85, "step": 38980, "token_acc": 0.5378151260504201, "train_speed(iter/s)": 0.67099 }, { "epoch": 1.6702369221541493, "grad_norm": 4.087491512298584, "learning_rate": 7.492147316331175e-05, "loss": 2.380519485473633, "memory(GiB)": 72.85, "step": 38985, "token_acc": 0.5182481751824818, "train_speed(iter/s)": 0.670988 }, { "epoch": 1.6704511374833984, "grad_norm": 3.719926118850708, "learning_rate": 7.491563869371601e-05, "loss": 2.4418121337890626, "memory(GiB)": 72.85, "step": 38990, "token_acc": 0.4608150470219436, "train_speed(iter/s)": 0.670972 }, { "epoch": 1.6706653528126472, "grad_norm": 3.8981285095214844, "learning_rate": 7.49098037727501e-05, "loss": 2.5947086334228517, "memory(GiB)": 72.85, "step": 38995, "token_acc": 0.43023255813953487, "train_speed(iter/s)": 0.670984 }, { "epoch": 1.6708795681418962, "grad_norm": 3.5881166458129883, "learning_rate": 7.490396840051972e-05, "loss": 2.4479515075683596, "memory(GiB)": 72.85, "step": 39000, "token_acc": 0.4893048128342246, "train_speed(iter/s)": 0.670987 }, { "epoch": 1.6708795681418962, "eval_loss": 1.9542219638824463, "eval_runtime": 14.9703, "eval_samples_per_second": 6.68, "eval_steps_per_second": 6.68, "eval_token_acc": 0.4945054945054945, "step": 39000 }, { "epoch": 1.6710937834711452, "grad_norm": 3.6017253398895264, "learning_rate": 7.48981325771306e-05, "loss": 2.3199954986572267, "memory(GiB)": 72.85, "step": 39005, "token_acc": 0.4900990099009901, "train_speed(iter/s)": 0.670783 }, { "epoch": 1.671307998800394, "grad_norm": 4.663700103759766, "learning_rate": 7.489229630268847e-05, "loss": 2.3548017501831056, "memory(GiB)": 72.85, "step": 39010, "token_acc": 0.4555984555984556, "train_speed(iter/s)": 0.670771 }, { "epoch": 1.671522214129643, "grad_norm": 4.309309959411621, "learning_rate": 7.488645957729903e-05, "loss": 2.3054874420166014, "memory(GiB)": 72.85, "step": 39015, "token_acc": 0.5063291139240507, "train_speed(iter/s)": 0.67078 }, { "epoch": 1.6717364294588921, "grad_norm": 3.511150598526001, "learning_rate": 7.488062240106801e-05, "loss": 2.2903911590576174, "memory(GiB)": 72.85, "step": 39020, "token_acc": 0.4607142857142857, "train_speed(iter/s)": 0.670775 }, { "epoch": 1.671950644788141, "grad_norm": 3.8556413650512695, "learning_rate": 7.48747847741012e-05, "loss": 2.422299385070801, "memory(GiB)": 72.85, "step": 39025, "token_acc": 0.49469964664310956, "train_speed(iter/s)": 0.670793 }, { "epoch": 1.67216486011739, "grad_norm": 5.051599502563477, "learning_rate": 7.486894669650431e-05, "loss": 2.707464408874512, "memory(GiB)": 72.85, "step": 39030, "token_acc": 0.4131147540983607, "train_speed(iter/s)": 0.670798 }, { "epoch": 1.672379075446639, "grad_norm": 3.2896547317504883, "learning_rate": 7.486310816838314e-05, "loss": 2.135547637939453, "memory(GiB)": 72.85, "step": 39035, "token_acc": 0.5089285714285714, "train_speed(iter/s)": 0.670796 }, { "epoch": 1.6725932907758878, "grad_norm": 3.6510586738586426, "learning_rate": 7.485726918984342e-05, "loss": 2.4964771270751953, "memory(GiB)": 72.85, "step": 39040, "token_acc": 0.4754601226993865, "train_speed(iter/s)": 0.670794 }, { "epoch": 1.6728075061051368, "grad_norm": 3.7882370948791504, "learning_rate": 7.485142976099096e-05, "loss": 2.208551025390625, "memory(GiB)": 72.85, "step": 39045, "token_acc": 0.5632183908045977, "train_speed(iter/s)": 0.670805 }, { "epoch": 1.6730217214343859, "grad_norm": 4.5908613204956055, "learning_rate": 7.484558988193156e-05, "loss": 2.196141815185547, "memory(GiB)": 72.85, "step": 39050, "token_acc": 0.5092592592592593, "train_speed(iter/s)": 0.670799 }, { "epoch": 1.6732359367636347, "grad_norm": 4.35763692855835, "learning_rate": 7.483974955277097e-05, "loss": 2.4056772232055663, "memory(GiB)": 72.85, "step": 39055, "token_acc": 0.45396825396825397, "train_speed(iter/s)": 0.67081 }, { "epoch": 1.6734501520928837, "grad_norm": 4.182394504547119, "learning_rate": 7.483390877361502e-05, "loss": 2.252914237976074, "memory(GiB)": 72.85, "step": 39060, "token_acc": 0.4948805460750853, "train_speed(iter/s)": 0.670819 }, { "epoch": 1.6736643674221328, "grad_norm": 4.3442277908325195, "learning_rate": 7.482806754456952e-05, "loss": 2.0509586334228516, "memory(GiB)": 72.85, "step": 39065, "token_acc": 0.49140893470790376, "train_speed(iter/s)": 0.670824 }, { "epoch": 1.6738785827513816, "grad_norm": 3.82426118850708, "learning_rate": 7.482222586574026e-05, "loss": 2.2538190841674806, "memory(GiB)": 72.85, "step": 39070, "token_acc": 0.4696969696969697, "train_speed(iter/s)": 0.670813 }, { "epoch": 1.6740927980806306, "grad_norm": 3.4853954315185547, "learning_rate": 7.481638373723312e-05, "loss": 2.122608757019043, "memory(GiB)": 72.85, "step": 39075, "token_acc": 0.5372670807453416, "train_speed(iter/s)": 0.670815 }, { "epoch": 1.6743070134098796, "grad_norm": 4.774211406707764, "learning_rate": 7.48105411591539e-05, "loss": 2.4122119903564454, "memory(GiB)": 72.85, "step": 39080, "token_acc": 0.5179856115107914, "train_speed(iter/s)": 0.670789 }, { "epoch": 1.6745212287391285, "grad_norm": 4.986435413360596, "learning_rate": 7.480469813160847e-05, "loss": 2.1858852386474608, "memory(GiB)": 72.85, "step": 39085, "token_acc": 0.5258964143426295, "train_speed(iter/s)": 0.670792 }, { "epoch": 1.6747354440683775, "grad_norm": 4.24039363861084, "learning_rate": 7.479885465470264e-05, "loss": 2.371613883972168, "memory(GiB)": 72.85, "step": 39090, "token_acc": 0.49140893470790376, "train_speed(iter/s)": 0.670797 }, { "epoch": 1.6749496593976265, "grad_norm": 4.284745216369629, "learning_rate": 7.47930107285423e-05, "loss": 2.370491600036621, "memory(GiB)": 72.85, "step": 39095, "token_acc": 0.5173611111111112, "train_speed(iter/s)": 0.670796 }, { "epoch": 1.6751638747268753, "grad_norm": 3.066532850265503, "learning_rate": 7.478716635323333e-05, "loss": 1.7165319442749023, "memory(GiB)": 72.85, "step": 39100, "token_acc": 0.5836298932384342, "train_speed(iter/s)": 0.67081 }, { "epoch": 1.6753780900561244, "grad_norm": 4.544808387756348, "learning_rate": 7.478132152888155e-05, "loss": 2.471925735473633, "memory(GiB)": 72.85, "step": 39105, "token_acc": 0.4743083003952569, "train_speed(iter/s)": 0.670804 }, { "epoch": 1.6755923053853734, "grad_norm": 4.390525817871094, "learning_rate": 7.477547625559292e-05, "loss": 2.55108528137207, "memory(GiB)": 72.85, "step": 39110, "token_acc": 0.5018867924528302, "train_speed(iter/s)": 0.670816 }, { "epoch": 1.6758065207146222, "grad_norm": 3.9918882846832275, "learning_rate": 7.47696305334733e-05, "loss": 2.1531049728393556, "memory(GiB)": 72.85, "step": 39115, "token_acc": 0.5272727272727272, "train_speed(iter/s)": 0.670825 }, { "epoch": 1.6760207360438713, "grad_norm": 6.05661153793335, "learning_rate": 7.476378436262856e-05, "loss": 2.4650299072265627, "memory(GiB)": 72.85, "step": 39120, "token_acc": 0.4934210526315789, "train_speed(iter/s)": 0.670833 }, { "epoch": 1.6762349513731203, "grad_norm": 3.8611035346984863, "learning_rate": 7.475793774316466e-05, "loss": 2.123662567138672, "memory(GiB)": 72.85, "step": 39125, "token_acc": 0.5396825396825397, "train_speed(iter/s)": 0.670853 }, { "epoch": 1.676449166702369, "grad_norm": 5.068925380706787, "learning_rate": 7.47520906751875e-05, "loss": 2.3153457641601562, "memory(GiB)": 72.85, "step": 39130, "token_acc": 0.4652567975830816, "train_speed(iter/s)": 0.670846 }, { "epoch": 1.6766633820316181, "grad_norm": 4.257052898406982, "learning_rate": 7.474624315880297e-05, "loss": 2.079722595214844, "memory(GiB)": 72.85, "step": 39135, "token_acc": 0.5617977528089888, "train_speed(iter/s)": 0.670833 }, { "epoch": 1.6768775973608672, "grad_norm": 3.367541551589966, "learning_rate": 7.474039519411705e-05, "loss": 2.565016174316406, "memory(GiB)": 72.85, "step": 39140, "token_acc": 0.4901315789473684, "train_speed(iter/s)": 0.670809 }, { "epoch": 1.677091812690116, "grad_norm": 4.089968204498291, "learning_rate": 7.473454678123564e-05, "loss": 2.426533317565918, "memory(GiB)": 72.85, "step": 39145, "token_acc": 0.4745762711864407, "train_speed(iter/s)": 0.670841 }, { "epoch": 1.677306028019365, "grad_norm": 6.90756893157959, "learning_rate": 7.472869792026472e-05, "loss": 2.3528594970703125, "memory(GiB)": 72.85, "step": 39150, "token_acc": 0.5166051660516605, "train_speed(iter/s)": 0.670849 }, { "epoch": 1.677520243348614, "grad_norm": 4.005716323852539, "learning_rate": 7.472284861131026e-05, "loss": 2.312949562072754, "memory(GiB)": 72.85, "step": 39155, "token_acc": 0.4883720930232558, "train_speed(iter/s)": 0.670858 }, { "epoch": 1.6777344586778629, "grad_norm": 4.211273193359375, "learning_rate": 7.47169988544782e-05, "loss": 2.4058799743652344, "memory(GiB)": 72.85, "step": 39160, "token_acc": 0.47278911564625853, "train_speed(iter/s)": 0.670862 }, { "epoch": 1.677948674007112, "grad_norm": 4.791128158569336, "learning_rate": 7.47111486498745e-05, "loss": 2.288576126098633, "memory(GiB)": 72.85, "step": 39165, "token_acc": 0.5254237288135594, "train_speed(iter/s)": 0.670865 }, { "epoch": 1.678162889336361, "grad_norm": 3.183791399002075, "learning_rate": 7.470529799760517e-05, "loss": 2.2252647399902346, "memory(GiB)": 72.85, "step": 39170, "token_acc": 0.494949494949495, "train_speed(iter/s)": 0.670857 }, { "epoch": 1.6783771046656097, "grad_norm": 5.085690975189209, "learning_rate": 7.469944689777619e-05, "loss": 2.4171619415283203, "memory(GiB)": 72.85, "step": 39175, "token_acc": 0.49823321554770317, "train_speed(iter/s)": 0.670846 }, { "epoch": 1.6785913199948588, "grad_norm": 4.6907525062561035, "learning_rate": 7.469359535049357e-05, "loss": 2.234834671020508, "memory(GiB)": 72.85, "step": 39180, "token_acc": 0.49056603773584906, "train_speed(iter/s)": 0.67086 }, { "epoch": 1.6788055353241078, "grad_norm": 4.77105188369751, "learning_rate": 7.46877433558633e-05, "loss": 2.441508483886719, "memory(GiB)": 72.85, "step": 39185, "token_acc": 0.4791666666666667, "train_speed(iter/s)": 0.670842 }, { "epoch": 1.6790197506533566, "grad_norm": 3.8177101612091064, "learning_rate": 7.468189091399141e-05, "loss": 2.319419288635254, "memory(GiB)": 72.85, "step": 39190, "token_acc": 0.4472843450479233, "train_speed(iter/s)": 0.670851 }, { "epoch": 1.6792339659826059, "grad_norm": 3.6301677227020264, "learning_rate": 7.467603802498389e-05, "loss": 2.176627540588379, "memory(GiB)": 72.85, "step": 39195, "token_acc": 0.49683544303797467, "train_speed(iter/s)": 0.670853 }, { "epoch": 1.6794481813118547, "grad_norm": 4.293480396270752, "learning_rate": 7.467018468894682e-05, "loss": 2.5296503067016602, "memory(GiB)": 72.85, "step": 39200, "token_acc": 0.44727272727272727, "train_speed(iter/s)": 0.670849 }, { "epoch": 1.6796623966411035, "grad_norm": 3.6558420658111572, "learning_rate": 7.46643309059862e-05, "loss": 2.368108558654785, "memory(GiB)": 72.85, "step": 39205, "token_acc": 0.4603174603174603, "train_speed(iter/s)": 0.67086 }, { "epoch": 1.6798766119703528, "grad_norm": 3.905024528503418, "learning_rate": 7.465847667620808e-05, "loss": 2.3927656173706056, "memory(GiB)": 72.85, "step": 39210, "token_acc": 0.45588235294117646, "train_speed(iter/s)": 0.670843 }, { "epoch": 1.6800908272996016, "grad_norm": 4.567498207092285, "learning_rate": 7.465262199971853e-05, "loss": 2.2426101684570314, "memory(GiB)": 72.85, "step": 39215, "token_acc": 0.4716312056737589, "train_speed(iter/s)": 0.670831 }, { "epoch": 1.6803050426288504, "grad_norm": 4.464001655578613, "learning_rate": 7.46467668766236e-05, "loss": 2.4783870697021486, "memory(GiB)": 72.85, "step": 39220, "token_acc": 0.46417445482866043, "train_speed(iter/s)": 0.670849 }, { "epoch": 1.6805192579580996, "grad_norm": 4.99913215637207, "learning_rate": 7.464091130702937e-05, "loss": 2.2542802810668947, "memory(GiB)": 72.85, "step": 39225, "token_acc": 0.524, "train_speed(iter/s)": 0.670832 }, { "epoch": 1.6807334732873485, "grad_norm": 3.850342273712158, "learning_rate": 7.463505529104193e-05, "loss": 2.1289737701416014, "memory(GiB)": 72.85, "step": 39230, "token_acc": 0.5251798561151079, "train_speed(iter/s)": 0.670814 }, { "epoch": 1.6809476886165973, "grad_norm": 5.745799541473389, "learning_rate": 7.462919882876733e-05, "loss": 2.5248546600341797, "memory(GiB)": 72.85, "step": 39235, "token_acc": 0.4965986394557823, "train_speed(iter/s)": 0.670827 }, { "epoch": 1.6811619039458465, "grad_norm": 3.9247243404388428, "learning_rate": 7.462334192031172e-05, "loss": 2.360234260559082, "memory(GiB)": 72.85, "step": 39240, "token_acc": 0.4940828402366864, "train_speed(iter/s)": 0.670819 }, { "epoch": 1.6813761192750953, "grad_norm": 4.657504081726074, "learning_rate": 7.461748456578116e-05, "loss": 2.3315515518188477, "memory(GiB)": 72.85, "step": 39245, "token_acc": 0.4948805460750853, "train_speed(iter/s)": 0.670819 }, { "epoch": 1.6815903346043442, "grad_norm": 3.834829330444336, "learning_rate": 7.461162676528176e-05, "loss": 2.2237998962402346, "memory(GiB)": 72.85, "step": 39250, "token_acc": 0.5019762845849802, "train_speed(iter/s)": 0.670832 }, { "epoch": 1.6818045499335934, "grad_norm": 5.004354000091553, "learning_rate": 7.460576851891967e-05, "loss": 2.374156951904297, "memory(GiB)": 72.85, "step": 39255, "token_acc": 0.49691358024691357, "train_speed(iter/s)": 0.670849 }, { "epoch": 1.6820187652628422, "grad_norm": 3.84730863571167, "learning_rate": 7.459990982680098e-05, "loss": 2.144277572631836, "memory(GiB)": 72.85, "step": 39260, "token_acc": 0.53125, "train_speed(iter/s)": 0.670862 }, { "epoch": 1.682232980592091, "grad_norm": 4.361083507537842, "learning_rate": 7.459405068903187e-05, "loss": 2.4293941497802733, "memory(GiB)": 72.85, "step": 39265, "token_acc": 0.4340836012861736, "train_speed(iter/s)": 0.670881 }, { "epoch": 1.6824471959213403, "grad_norm": 5.903897762298584, "learning_rate": 7.458819110571845e-05, "loss": 2.0809459686279297, "memory(GiB)": 72.85, "step": 39270, "token_acc": 0.5579710144927537, "train_speed(iter/s)": 0.670896 }, { "epoch": 1.682661411250589, "grad_norm": 4.518985748291016, "learning_rate": 7.458233107696689e-05, "loss": 2.1957897186279296, "memory(GiB)": 72.85, "step": 39275, "token_acc": 0.5586206896551724, "train_speed(iter/s)": 0.670913 }, { "epoch": 1.682875626579838, "grad_norm": 4.763889789581299, "learning_rate": 7.457647060288333e-05, "loss": 2.544145202636719, "memory(GiB)": 72.85, "step": 39280, "token_acc": 0.46875, "train_speed(iter/s)": 0.670919 }, { "epoch": 1.6830898419090872, "grad_norm": 4.49472713470459, "learning_rate": 7.457060968357397e-05, "loss": 2.434919548034668, "memory(GiB)": 72.85, "step": 39285, "token_acc": 0.5125786163522013, "train_speed(iter/s)": 0.670914 }, { "epoch": 1.683304057238336, "grad_norm": 4.745805740356445, "learning_rate": 7.456474831914494e-05, "loss": 2.425592613220215, "memory(GiB)": 72.85, "step": 39290, "token_acc": 0.47194719471947194, "train_speed(iter/s)": 0.670932 }, { "epoch": 1.6835182725675848, "grad_norm": 5.481906890869141, "learning_rate": 7.455888650970247e-05, "loss": 2.5868011474609376, "memory(GiB)": 72.85, "step": 39295, "token_acc": 0.4724137931034483, "train_speed(iter/s)": 0.670938 }, { "epoch": 1.683732487896834, "grad_norm": 3.5927162170410156, "learning_rate": 7.455302425535273e-05, "loss": 2.2315263748168945, "memory(GiB)": 72.85, "step": 39300, "token_acc": 0.5021097046413502, "train_speed(iter/s)": 0.67096 }, { "epoch": 1.6839467032260829, "grad_norm": 4.7900519371032715, "learning_rate": 7.454716155620192e-05, "loss": 2.483461380004883, "memory(GiB)": 72.85, "step": 39305, "token_acc": 0.4681647940074906, "train_speed(iter/s)": 0.670991 }, { "epoch": 1.6841609185553317, "grad_norm": 4.860448360443115, "learning_rate": 7.454129841235624e-05, "loss": 2.191554069519043, "memory(GiB)": 72.85, "step": 39310, "token_acc": 0.5363636363636364, "train_speed(iter/s)": 0.671 }, { "epoch": 1.684375133884581, "grad_norm": 5.301401615142822, "learning_rate": 7.453543482392195e-05, "loss": 2.3289443969726564, "memory(GiB)": 72.85, "step": 39315, "token_acc": 0.4849624060150376, "train_speed(iter/s)": 0.670994 }, { "epoch": 1.6845893492138297, "grad_norm": 3.5991568565368652, "learning_rate": 7.45295707910052e-05, "loss": 2.3240041732788086, "memory(GiB)": 72.85, "step": 39320, "token_acc": 0.45794392523364486, "train_speed(iter/s)": 0.670989 }, { "epoch": 1.6848035645430786, "grad_norm": 3.271604061126709, "learning_rate": 7.45237063137123e-05, "loss": 2.623760223388672, "memory(GiB)": 72.85, "step": 39325, "token_acc": 0.44272445820433437, "train_speed(iter/s)": 0.670997 }, { "epoch": 1.6850177798723278, "grad_norm": 4.205989837646484, "learning_rate": 7.451784139214944e-05, "loss": 2.4114917755126952, "memory(GiB)": 72.85, "step": 39330, "token_acc": 0.5034246575342466, "train_speed(iter/s)": 0.67101 }, { "epoch": 1.6852319952015766, "grad_norm": 3.2311694622039795, "learning_rate": 7.451197602642288e-05, "loss": 2.2756399154663085, "memory(GiB)": 72.85, "step": 39335, "token_acc": 0.5460526315789473, "train_speed(iter/s)": 0.671025 }, { "epoch": 1.6854462105308254, "grad_norm": 4.019348621368408, "learning_rate": 7.45061102166389e-05, "loss": 2.1860658645629885, "memory(GiB)": 72.85, "step": 39340, "token_acc": 0.5335276967930029, "train_speed(iter/s)": 0.671036 }, { "epoch": 1.6856604258600747, "grad_norm": 5.341994762420654, "learning_rate": 7.450024396290372e-05, "loss": 2.519389343261719, "memory(GiB)": 72.85, "step": 39345, "token_acc": 0.48120300751879697, "train_speed(iter/s)": 0.671041 }, { "epoch": 1.6858746411893235, "grad_norm": 7.186130523681641, "learning_rate": 7.449437726532364e-05, "loss": 2.7535606384277345, "memory(GiB)": 72.85, "step": 39350, "token_acc": 0.4425087108013937, "train_speed(iter/s)": 0.67103 }, { "epoch": 1.6860888565185723, "grad_norm": 5.207269668579102, "learning_rate": 7.448851012400495e-05, "loss": 2.4213558197021485, "memory(GiB)": 72.85, "step": 39355, "token_acc": 0.48616600790513836, "train_speed(iter/s)": 0.671049 }, { "epoch": 1.6863030718478216, "grad_norm": 4.556234359741211, "learning_rate": 7.448264253905393e-05, "loss": 2.1904544830322266, "memory(GiB)": 72.85, "step": 39360, "token_acc": 0.49056603773584906, "train_speed(iter/s)": 0.671043 }, { "epoch": 1.6865172871770704, "grad_norm": 3.419678211212158, "learning_rate": 7.447677451057686e-05, "loss": 2.2490074157714846, "memory(GiB)": 72.85, "step": 39365, "token_acc": 0.5460750853242321, "train_speed(iter/s)": 0.671067 }, { "epoch": 1.6867315025063192, "grad_norm": 5.47413969039917, "learning_rate": 7.447090603868006e-05, "loss": 2.446017265319824, "memory(GiB)": 72.85, "step": 39370, "token_acc": 0.48863636363636365, "train_speed(iter/s)": 0.671083 }, { "epoch": 1.6869457178355685, "grad_norm": 4.212732315063477, "learning_rate": 7.446503712346985e-05, "loss": 2.437731170654297, "memory(GiB)": 72.85, "step": 39375, "token_acc": 0.5, "train_speed(iter/s)": 0.671075 }, { "epoch": 1.6871599331648173, "grad_norm": 3.8554842472076416, "learning_rate": 7.445916776505254e-05, "loss": 2.2430919647216796, "memory(GiB)": 72.85, "step": 39380, "token_acc": 0.5357142857142857, "train_speed(iter/s)": 0.671075 }, { "epoch": 1.687374148494066, "grad_norm": 4.246274471282959, "learning_rate": 7.445329796353446e-05, "loss": 2.3167224884033204, "memory(GiB)": 72.85, "step": 39385, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.671073 }, { "epoch": 1.6875883638233153, "grad_norm": 4.704021453857422, "learning_rate": 7.444742771902196e-05, "loss": 2.7409664154052735, "memory(GiB)": 72.85, "step": 39390, "token_acc": 0.4758842443729904, "train_speed(iter/s)": 0.671078 }, { "epoch": 1.6878025791525642, "grad_norm": 4.763752460479736, "learning_rate": 7.444155703162135e-05, "loss": 2.632854461669922, "memory(GiB)": 72.85, "step": 39395, "token_acc": 0.4717607973421927, "train_speed(iter/s)": 0.671072 }, { "epoch": 1.688016794481813, "grad_norm": 4.435133457183838, "learning_rate": 7.443568590143901e-05, "loss": 2.2479278564453127, "memory(GiB)": 72.85, "step": 39400, "token_acc": 0.5261194029850746, "train_speed(iter/s)": 0.671074 }, { "epoch": 1.6882310098110622, "grad_norm": 3.565748929977417, "learning_rate": 7.442981432858132e-05, "loss": 2.582170295715332, "memory(GiB)": 72.85, "step": 39405, "token_acc": 0.45103092783505155, "train_speed(iter/s)": 0.671093 }, { "epoch": 1.688445225140311, "grad_norm": 4.818395614624023, "learning_rate": 7.44239423131546e-05, "loss": 2.405940055847168, "memory(GiB)": 72.85, "step": 39410, "token_acc": 0.5168539325842697, "train_speed(iter/s)": 0.671092 }, { "epoch": 1.6886594404695598, "grad_norm": 3.9209213256835938, "learning_rate": 7.441806985526527e-05, "loss": 2.3756549835205076, "memory(GiB)": 72.85, "step": 39415, "token_acc": 0.49572649572649574, "train_speed(iter/s)": 0.6711 }, { "epoch": 1.688873655798809, "grad_norm": 4.6536102294921875, "learning_rate": 7.441219695501971e-05, "loss": 2.497939109802246, "memory(GiB)": 72.85, "step": 39420, "token_acc": 0.46757679180887374, "train_speed(iter/s)": 0.671112 }, { "epoch": 1.689087871128058, "grad_norm": 4.689438819885254, "learning_rate": 7.440632361252428e-05, "loss": 2.123983383178711, "memory(GiB)": 72.85, "step": 39425, "token_acc": 0.5037037037037037, "train_speed(iter/s)": 0.67112 }, { "epoch": 1.6893020864573067, "grad_norm": 5.206161975860596, "learning_rate": 7.440044982788541e-05, "loss": 2.4062484741210937, "memory(GiB)": 72.85, "step": 39430, "token_acc": 0.5, "train_speed(iter/s)": 0.671141 }, { "epoch": 1.689516301786556, "grad_norm": 4.375946044921875, "learning_rate": 7.43945756012095e-05, "loss": 2.419207000732422, "memory(GiB)": 72.85, "step": 39435, "token_acc": 0.4421052631578947, "train_speed(iter/s)": 0.67114 }, { "epoch": 1.6897305171158048, "grad_norm": 5.683651924133301, "learning_rate": 7.438870093260295e-05, "loss": 2.5225048065185547, "memory(GiB)": 72.85, "step": 39440, "token_acc": 0.48161764705882354, "train_speed(iter/s)": 0.671126 }, { "epoch": 1.6899447324450536, "grad_norm": 4.203981399536133, "learning_rate": 7.438282582217224e-05, "loss": 2.3356044769287108, "memory(GiB)": 72.85, "step": 39445, "token_acc": 0.46357615894039733, "train_speed(iter/s)": 0.671144 }, { "epoch": 1.6901589477743029, "grad_norm": 4.334364414215088, "learning_rate": 7.437695027002374e-05, "loss": 2.3779983520507812, "memory(GiB)": 72.85, "step": 39450, "token_acc": 0.47297297297297297, "train_speed(iter/s)": 0.67116 }, { "epoch": 1.6903731631035517, "grad_norm": 3.4241273403167725, "learning_rate": 7.437107427626393e-05, "loss": 2.541329574584961, "memory(GiB)": 72.85, "step": 39455, "token_acc": 0.4485981308411215, "train_speed(iter/s)": 0.671171 }, { "epoch": 1.6905873784328005, "grad_norm": 4.160327434539795, "learning_rate": 7.436519784099926e-05, "loss": 2.3922088623046873, "memory(GiB)": 72.85, "step": 39460, "token_acc": 0.5047619047619047, "train_speed(iter/s)": 0.671169 }, { "epoch": 1.6908015937620497, "grad_norm": 3.881425380706787, "learning_rate": 7.435932096433616e-05, "loss": 2.5500396728515624, "memory(GiB)": 72.85, "step": 39465, "token_acc": 0.504424778761062, "train_speed(iter/s)": 0.671169 }, { "epoch": 1.6910158090912986, "grad_norm": 4.558581829071045, "learning_rate": 7.43534436463811e-05, "loss": 2.9545745849609375, "memory(GiB)": 72.85, "step": 39470, "token_acc": 0.4227129337539432, "train_speed(iter/s)": 0.671178 }, { "epoch": 1.6912300244205474, "grad_norm": 5.828633785247803, "learning_rate": 7.434756588724058e-05, "loss": 2.6071493148803713, "memory(GiB)": 72.85, "step": 39475, "token_acc": 0.43508771929824563, "train_speed(iter/s)": 0.671187 }, { "epoch": 1.6914442397497966, "grad_norm": 4.273815155029297, "learning_rate": 7.434168768702106e-05, "loss": 2.303572082519531, "memory(GiB)": 72.85, "step": 39480, "token_acc": 0.5045592705167173, "train_speed(iter/s)": 0.671185 }, { "epoch": 1.6916584550790454, "grad_norm": 3.9256224632263184, "learning_rate": 7.433580904582903e-05, "loss": 2.113911437988281, "memory(GiB)": 72.85, "step": 39485, "token_acc": 0.5, "train_speed(iter/s)": 0.671198 }, { "epoch": 1.6918726704082943, "grad_norm": 4.461656093597412, "learning_rate": 7.432992996377099e-05, "loss": 2.4076255798339843, "memory(GiB)": 72.85, "step": 39490, "token_acc": 0.49230769230769234, "train_speed(iter/s)": 0.67119 }, { "epoch": 1.6920868857375435, "grad_norm": 3.3476388454437256, "learning_rate": 7.432405044095346e-05, "loss": 2.5534643173217773, "memory(GiB)": 72.85, "step": 39495, "token_acc": 0.46745562130177515, "train_speed(iter/s)": 0.671168 }, { "epoch": 1.6923011010667923, "grad_norm": 5.184874057769775, "learning_rate": 7.431817047748292e-05, "loss": 2.3190011978149414, "memory(GiB)": 72.85, "step": 39500, "token_acc": 0.4634146341463415, "train_speed(iter/s)": 0.671183 }, { "epoch": 1.6923011010667923, "eval_loss": 2.1408355236053467, "eval_runtime": 14.7552, "eval_samples_per_second": 6.777, "eval_steps_per_second": 6.777, "eval_token_acc": 0.49338146811071, "step": 39500 }, { "epoch": 1.6925153163960411, "grad_norm": 5.481943607330322, "learning_rate": 7.431229007346591e-05, "loss": 2.553913116455078, "memory(GiB)": 72.85, "step": 39505, "token_acc": 0.4951197870452529, "train_speed(iter/s)": 0.670986 }, { "epoch": 1.6927295317252904, "grad_norm": 3.64129900932312, "learning_rate": 7.430640922900897e-05, "loss": 2.379390525817871, "memory(GiB)": 72.85, "step": 39510, "token_acc": 0.5, "train_speed(iter/s)": 0.671001 }, { "epoch": 1.6929437470545392, "grad_norm": 4.486857891082764, "learning_rate": 7.43005279442186e-05, "loss": 2.47705135345459, "memory(GiB)": 72.85, "step": 39515, "token_acc": 0.4936708860759494, "train_speed(iter/s)": 0.670994 }, { "epoch": 1.693157962383788, "grad_norm": 3.692335367202759, "learning_rate": 7.429464621920142e-05, "loss": 2.5905447006225586, "memory(GiB)": 72.85, "step": 39520, "token_acc": 0.4876543209876543, "train_speed(iter/s)": 0.671001 }, { "epoch": 1.6933721777130373, "grad_norm": 3.3402464389801025, "learning_rate": 7.428876405406389e-05, "loss": 2.251715087890625, "memory(GiB)": 72.85, "step": 39525, "token_acc": 0.4835164835164835, "train_speed(iter/s)": 0.671002 }, { "epoch": 1.693586393042286, "grad_norm": 3.1751391887664795, "learning_rate": 7.428288144891263e-05, "loss": 2.008901596069336, "memory(GiB)": 72.85, "step": 39530, "token_acc": 0.555921052631579, "train_speed(iter/s)": 0.670995 }, { "epoch": 1.6938006083715351, "grad_norm": 3.7975313663482666, "learning_rate": 7.42769984038542e-05, "loss": 2.251383399963379, "memory(GiB)": 72.85, "step": 39535, "token_acc": 0.5063291139240507, "train_speed(iter/s)": 0.671005 }, { "epoch": 1.6940148237007842, "grad_norm": 3.7988972663879395, "learning_rate": 7.427111491899516e-05, "loss": 2.355864334106445, "memory(GiB)": 72.85, "step": 39540, "token_acc": 0.5160256410256411, "train_speed(iter/s)": 0.671008 }, { "epoch": 1.694229039030033, "grad_norm": 4.51554536819458, "learning_rate": 7.42652309944421e-05, "loss": 2.160073471069336, "memory(GiB)": 72.85, "step": 39545, "token_acc": 0.5231788079470199, "train_speed(iter/s)": 0.670997 }, { "epoch": 1.694443254359282, "grad_norm": 4.90764856338501, "learning_rate": 7.425934663030163e-05, "loss": 2.5483425140380858, "memory(GiB)": 72.85, "step": 39550, "token_acc": 0.49624060150375937, "train_speed(iter/s)": 0.67101 }, { "epoch": 1.694657469688531, "grad_norm": 3.6820271015167236, "learning_rate": 7.425346182668033e-05, "loss": 2.103306198120117, "memory(GiB)": 72.85, "step": 39555, "token_acc": 0.5604395604395604, "train_speed(iter/s)": 0.671022 }, { "epoch": 1.6948716850177798, "grad_norm": 4.161357402801514, "learning_rate": 7.424757658368482e-05, "loss": 2.5254501342773437, "memory(GiB)": 72.85, "step": 39560, "token_acc": 0.47157190635451507, "train_speed(iter/s)": 0.67105 }, { "epoch": 1.6950859003470289, "grad_norm": 5.381215572357178, "learning_rate": 7.424169090142172e-05, "loss": 2.277798080444336, "memory(GiB)": 72.85, "step": 39565, "token_acc": 0.4913494809688581, "train_speed(iter/s)": 0.671068 }, { "epoch": 1.695300115676278, "grad_norm": 5.958134651184082, "learning_rate": 7.423580477999763e-05, "loss": 2.2771734237670898, "memory(GiB)": 72.85, "step": 39570, "token_acc": 0.5131086142322098, "train_speed(iter/s)": 0.671088 }, { "epoch": 1.6955143310055267, "grad_norm": 3.908302068710327, "learning_rate": 7.422991821951923e-05, "loss": 2.5147377014160157, "memory(GiB)": 72.85, "step": 39575, "token_acc": 0.4482758620689655, "train_speed(iter/s)": 0.671087 }, { "epoch": 1.6957285463347758, "grad_norm": 4.53363561630249, "learning_rate": 7.422403122009312e-05, "loss": 2.2783432006835938, "memory(GiB)": 72.85, "step": 39580, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.671118 }, { "epoch": 1.6959427616640248, "grad_norm": 5.905533790588379, "learning_rate": 7.421814378182595e-05, "loss": 2.111061859130859, "memory(GiB)": 72.85, "step": 39585, "token_acc": 0.4979757085020243, "train_speed(iter/s)": 0.671139 }, { "epoch": 1.6961569769932736, "grad_norm": 4.112025737762451, "learning_rate": 7.421225590482439e-05, "loss": 2.306970405578613, "memory(GiB)": 72.85, "step": 39590, "token_acc": 0.5122699386503068, "train_speed(iter/s)": 0.671135 }, { "epoch": 1.6963711923225226, "grad_norm": 3.752948045730591, "learning_rate": 7.42063675891951e-05, "loss": 2.4178050994873046, "memory(GiB)": 72.85, "step": 39595, "token_acc": 0.498567335243553, "train_speed(iter/s)": 0.671144 }, { "epoch": 1.6965854076517717, "grad_norm": 4.710076808929443, "learning_rate": 7.420047883504476e-05, "loss": 2.6853044509887694, "memory(GiB)": 72.85, "step": 39600, "token_acc": 0.43086816720257237, "train_speed(iter/s)": 0.671139 }, { "epoch": 1.6967996229810205, "grad_norm": 3.776121139526367, "learning_rate": 7.419458964248005e-05, "loss": 2.3522951126098635, "memory(GiB)": 72.85, "step": 39605, "token_acc": 0.5034965034965035, "train_speed(iter/s)": 0.671157 }, { "epoch": 1.6970138383102695, "grad_norm": 5.184537410736084, "learning_rate": 7.418870001160764e-05, "loss": 2.2556575775146483, "memory(GiB)": 72.85, "step": 39610, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.671153 }, { "epoch": 1.6972280536395186, "grad_norm": 5.091438293457031, "learning_rate": 7.418280994253425e-05, "loss": 2.4455562591552735, "memory(GiB)": 72.85, "step": 39615, "token_acc": 0.49712643678160917, "train_speed(iter/s)": 0.671132 }, { "epoch": 1.6974422689687674, "grad_norm": 4.446206569671631, "learning_rate": 7.417691943536658e-05, "loss": 2.2629262924194338, "memory(GiB)": 72.85, "step": 39620, "token_acc": 0.5166051660516605, "train_speed(iter/s)": 0.671146 }, { "epoch": 1.6976564842980164, "grad_norm": 4.486208915710449, "learning_rate": 7.417102849021131e-05, "loss": 2.356703758239746, "memory(GiB)": 72.85, "step": 39625, "token_acc": 0.4597315436241611, "train_speed(iter/s)": 0.671163 }, { "epoch": 1.6978706996272654, "grad_norm": 4.439669132232666, "learning_rate": 7.416513710717519e-05, "loss": 2.383335304260254, "memory(GiB)": 72.85, "step": 39630, "token_acc": 0.4912891986062718, "train_speed(iter/s)": 0.671148 }, { "epoch": 1.6980849149565143, "grad_norm": 4.389982223510742, "learning_rate": 7.415924528636496e-05, "loss": 2.3901369094848635, "memory(GiB)": 72.85, "step": 39635, "token_acc": 0.49480968858131485, "train_speed(iter/s)": 0.671155 }, { "epoch": 1.6982991302857633, "grad_norm": 5.232461452484131, "learning_rate": 7.415335302788731e-05, "loss": 2.34940185546875, "memory(GiB)": 72.85, "step": 39640, "token_acc": 0.4904214559386973, "train_speed(iter/s)": 0.671157 }, { "epoch": 1.6985133456150123, "grad_norm": 3.596656322479248, "learning_rate": 7.414746033184904e-05, "loss": 2.713917541503906, "memory(GiB)": 72.85, "step": 39645, "token_acc": 0.42452830188679247, "train_speed(iter/s)": 0.671159 }, { "epoch": 1.6987275609442611, "grad_norm": 5.606624126434326, "learning_rate": 7.414156719835688e-05, "loss": 2.465474510192871, "memory(GiB)": 72.85, "step": 39650, "token_acc": 0.48091603053435117, "train_speed(iter/s)": 0.671153 }, { "epoch": 1.6989417762735102, "grad_norm": 4.857123851776123, "learning_rate": 7.413567362751756e-05, "loss": 2.5228370666503905, "memory(GiB)": 72.85, "step": 39655, "token_acc": 0.4551282051282051, "train_speed(iter/s)": 0.671148 }, { "epoch": 1.6991559916027592, "grad_norm": 4.509739398956299, "learning_rate": 7.412977961943786e-05, "loss": 2.5600343704223634, "memory(GiB)": 72.85, "step": 39660, "token_acc": 0.4785714285714286, "train_speed(iter/s)": 0.671146 }, { "epoch": 1.699370206932008, "grad_norm": 4.0035223960876465, "learning_rate": 7.412388517422459e-05, "loss": 2.055192565917969, "memory(GiB)": 72.85, "step": 39665, "token_acc": 0.5357142857142857, "train_speed(iter/s)": 0.671138 }, { "epoch": 1.699584422261257, "grad_norm": 5.023301601409912, "learning_rate": 7.41179902919845e-05, "loss": 2.4259311676025392, "memory(GiB)": 72.85, "step": 39670, "token_acc": 0.49085365853658536, "train_speed(iter/s)": 0.671151 }, { "epoch": 1.699798637590506, "grad_norm": 4.370429515838623, "learning_rate": 7.41120949728244e-05, "loss": 2.3097333908081055, "memory(GiB)": 72.85, "step": 39675, "token_acc": 0.5145228215767634, "train_speed(iter/s)": 0.671147 }, { "epoch": 1.700012852919755, "grad_norm": 4.199328899383545, "learning_rate": 7.410619921685109e-05, "loss": 2.3605361938476563, "memory(GiB)": 72.85, "step": 39680, "token_acc": 0.53125, "train_speed(iter/s)": 0.671132 }, { "epoch": 1.700227068249004, "grad_norm": 4.6270365715026855, "learning_rate": 7.410030302417135e-05, "loss": 2.557576560974121, "memory(GiB)": 72.85, "step": 39685, "token_acc": 0.47112462006079026, "train_speed(iter/s)": 0.671141 }, { "epoch": 1.700441283578253, "grad_norm": 4.741785526275635, "learning_rate": 7.409440639489202e-05, "loss": 2.3678197860717773, "memory(GiB)": 72.85, "step": 39690, "token_acc": 0.46464646464646464, "train_speed(iter/s)": 0.671138 }, { "epoch": 1.7006554989075018, "grad_norm": 4.463021278381348, "learning_rate": 7.408850932911991e-05, "loss": 2.3104215621948243, "memory(GiB)": 72.85, "step": 39695, "token_acc": 0.5361216730038023, "train_speed(iter/s)": 0.671155 }, { "epoch": 1.7008697142367508, "grad_norm": 4.331776142120361, "learning_rate": 7.408261182696186e-05, "loss": 2.2918685913085937, "memory(GiB)": 72.85, "step": 39700, "token_acc": 0.4931972789115646, "train_speed(iter/s)": 0.671145 }, { "epoch": 1.7010839295659999, "grad_norm": 5.384716987609863, "learning_rate": 7.407671388852471e-05, "loss": 2.4624134063720704, "memory(GiB)": 72.85, "step": 39705, "token_acc": 0.4785276073619632, "train_speed(iter/s)": 0.671145 }, { "epoch": 1.7012981448952487, "grad_norm": 3.190192222595215, "learning_rate": 7.40708155139153e-05, "loss": 2.679496955871582, "memory(GiB)": 72.85, "step": 39710, "token_acc": 0.46449704142011833, "train_speed(iter/s)": 0.671139 }, { "epoch": 1.7015123602244977, "grad_norm": 3.935662269592285, "learning_rate": 7.406491670324047e-05, "loss": 2.5086320877075194, "memory(GiB)": 72.85, "step": 39715, "token_acc": 0.4925373134328358, "train_speed(iter/s)": 0.671135 }, { "epoch": 1.7017265755537467, "grad_norm": 3.690265655517578, "learning_rate": 7.405901745660711e-05, "loss": 2.407974052429199, "memory(GiB)": 72.85, "step": 39720, "token_acc": 0.4661016949152542, "train_speed(iter/s)": 0.671163 }, { "epoch": 1.7019407908829955, "grad_norm": 3.972486734390259, "learning_rate": 7.405311777412208e-05, "loss": 2.2643543243408204, "memory(GiB)": 72.85, "step": 39725, "token_acc": 0.5423076923076923, "train_speed(iter/s)": 0.671179 }, { "epoch": 1.7021550062122446, "grad_norm": 4.397244930267334, "learning_rate": 7.404721765589227e-05, "loss": 2.405278205871582, "memory(GiB)": 72.85, "step": 39730, "token_acc": 0.4763779527559055, "train_speed(iter/s)": 0.671187 }, { "epoch": 1.7023692215414936, "grad_norm": 4.257275104522705, "learning_rate": 7.404131710202455e-05, "loss": 2.4695663452148438, "memory(GiB)": 72.85, "step": 39735, "token_acc": 0.49074074074074076, "train_speed(iter/s)": 0.671198 }, { "epoch": 1.7025834368707424, "grad_norm": 7.889994144439697, "learning_rate": 7.403541611262579e-05, "loss": 1.8939218521118164, "memory(GiB)": 72.85, "step": 39740, "token_acc": 0.5666666666666667, "train_speed(iter/s)": 0.671181 }, { "epoch": 1.7027976521999915, "grad_norm": 3.3764171600341797, "learning_rate": 7.402951468780296e-05, "loss": 2.323431396484375, "memory(GiB)": 72.85, "step": 39745, "token_acc": 0.5177514792899408, "train_speed(iter/s)": 0.671178 }, { "epoch": 1.7030118675292405, "grad_norm": 4.327255725860596, "learning_rate": 7.402361282766292e-05, "loss": 2.5297836303710937, "memory(GiB)": 72.85, "step": 39750, "token_acc": 0.4677871148459384, "train_speed(iter/s)": 0.671154 }, { "epoch": 1.7032260828584893, "grad_norm": 4.972187519073486, "learning_rate": 7.401771053231257e-05, "loss": 2.35975341796875, "memory(GiB)": 72.85, "step": 39755, "token_acc": 0.49640287769784175, "train_speed(iter/s)": 0.671142 }, { "epoch": 1.7034402981877383, "grad_norm": 4.8141303062438965, "learning_rate": 7.40118078018589e-05, "loss": 2.0761375427246094, "memory(GiB)": 72.85, "step": 39760, "token_acc": 0.5015576323987538, "train_speed(iter/s)": 0.671171 }, { "epoch": 1.7036545135169874, "grad_norm": 3.638817548751831, "learning_rate": 7.40059046364088e-05, "loss": 2.176527214050293, "memory(GiB)": 72.85, "step": 39765, "token_acc": 0.5015873015873016, "train_speed(iter/s)": 0.671159 }, { "epoch": 1.7038687288462362, "grad_norm": 3.9773671627044678, "learning_rate": 7.400000103606919e-05, "loss": 2.087384033203125, "memory(GiB)": 72.85, "step": 39770, "token_acc": 0.5382059800664452, "train_speed(iter/s)": 0.671162 }, { "epoch": 1.7040829441754852, "grad_norm": 3.7809338569641113, "learning_rate": 7.399409700094707e-05, "loss": 2.3876590728759766, "memory(GiB)": 72.85, "step": 39775, "token_acc": 0.4631578947368421, "train_speed(iter/s)": 0.671168 }, { "epoch": 1.7042971595047343, "grad_norm": 3.2155027389526367, "learning_rate": 7.398819253114937e-05, "loss": 2.1884864807128905, "memory(GiB)": 72.85, "step": 39780, "token_acc": 0.5, "train_speed(iter/s)": 0.671164 }, { "epoch": 1.704511374833983, "grad_norm": 5.901324272155762, "learning_rate": 7.398228762678305e-05, "loss": 2.300421714782715, "memory(GiB)": 72.85, "step": 39785, "token_acc": 0.5317460317460317, "train_speed(iter/s)": 0.671174 }, { "epoch": 1.704725590163232, "grad_norm": 4.009257793426514, "learning_rate": 7.397638228795511e-05, "loss": 2.2830425262451173, "memory(GiB)": 72.85, "step": 39790, "token_acc": 0.5273224043715847, "train_speed(iter/s)": 0.671168 }, { "epoch": 1.7049398054924811, "grad_norm": 5.072516441345215, "learning_rate": 7.397047651477252e-05, "loss": 2.5685853958129883, "memory(GiB)": 72.85, "step": 39795, "token_acc": 0.48338368580060426, "train_speed(iter/s)": 0.67116 }, { "epoch": 1.70515402082173, "grad_norm": 4.117753028869629, "learning_rate": 7.396457030734224e-05, "loss": 2.4062999725341796, "memory(GiB)": 72.85, "step": 39800, "token_acc": 0.5245901639344263, "train_speed(iter/s)": 0.671165 }, { "epoch": 1.705368236150979, "grad_norm": 4.152995586395264, "learning_rate": 7.39586636657713e-05, "loss": 2.375234603881836, "memory(GiB)": 72.85, "step": 39805, "token_acc": 0.5323076923076923, "train_speed(iter/s)": 0.671178 }, { "epoch": 1.705582451480228, "grad_norm": 3.531130790710449, "learning_rate": 7.395275659016668e-05, "loss": 2.6626440048217774, "memory(GiB)": 72.85, "step": 39810, "token_acc": 0.45272206303724927, "train_speed(iter/s)": 0.671174 }, { "epoch": 1.7057966668094768, "grad_norm": 4.530642032623291, "learning_rate": 7.39468490806354e-05, "loss": 2.2318132400512694, "memory(GiB)": 72.85, "step": 39815, "token_acc": 0.5127272727272727, "train_speed(iter/s)": 0.671169 }, { "epoch": 1.7060108821387259, "grad_norm": 4.377468109130859, "learning_rate": 7.394094113728452e-05, "loss": 2.453637886047363, "memory(GiB)": 72.85, "step": 39820, "token_acc": 0.43010752688172044, "train_speed(iter/s)": 0.671177 }, { "epoch": 1.706225097467975, "grad_norm": 4.552980422973633, "learning_rate": 7.393503276022101e-05, "loss": 2.205768585205078, "memory(GiB)": 72.85, "step": 39825, "token_acc": 0.51953125, "train_speed(iter/s)": 0.671174 }, { "epoch": 1.7064393127972237, "grad_norm": 4.88911247253418, "learning_rate": 7.392912394955192e-05, "loss": 2.418840789794922, "memory(GiB)": 72.85, "step": 39830, "token_acc": 0.4791666666666667, "train_speed(iter/s)": 0.671187 }, { "epoch": 1.7066535281264728, "grad_norm": 4.233193397521973, "learning_rate": 7.392321470538432e-05, "loss": 2.4715896606445313, "memory(GiB)": 72.85, "step": 39835, "token_acc": 0.4326241134751773, "train_speed(iter/s)": 0.671204 }, { "epoch": 1.7068677434557218, "grad_norm": 3.8355374336242676, "learning_rate": 7.391730502782524e-05, "loss": 2.2087568283081054, "memory(GiB)": 72.85, "step": 39840, "token_acc": 0.5052631578947369, "train_speed(iter/s)": 0.671184 }, { "epoch": 1.7070819587849706, "grad_norm": 6.3271803855896, "learning_rate": 7.391139491698173e-05, "loss": 2.4542118072509767, "memory(GiB)": 72.85, "step": 39845, "token_acc": 0.5069444444444444, "train_speed(iter/s)": 0.671186 }, { "epoch": 1.7072961741142196, "grad_norm": 4.509565353393555, "learning_rate": 7.390548437296088e-05, "loss": 2.2768630981445312, "memory(GiB)": 72.85, "step": 39850, "token_acc": 0.5222222222222223, "train_speed(iter/s)": 0.671185 }, { "epoch": 1.7075103894434687, "grad_norm": 4.825742721557617, "learning_rate": 7.389957339586976e-05, "loss": 2.4971376419067384, "memory(GiB)": 72.85, "step": 39855, "token_acc": 0.49696969696969695, "train_speed(iter/s)": 0.671147 }, { "epoch": 1.7077246047727175, "grad_norm": 3.813976287841797, "learning_rate": 7.389366198581544e-05, "loss": 2.445143127441406, "memory(GiB)": 72.85, "step": 39860, "token_acc": 0.44542772861356933, "train_speed(iter/s)": 0.67116 }, { "epoch": 1.7079388201019665, "grad_norm": 3.6040515899658203, "learning_rate": 7.388775014290503e-05, "loss": 2.3441633224487304, "memory(GiB)": 72.85, "step": 39865, "token_acc": 0.4863013698630137, "train_speed(iter/s)": 0.671163 }, { "epoch": 1.7081530354312155, "grad_norm": 4.392492294311523, "learning_rate": 7.38818378672456e-05, "loss": 2.389675331115723, "memory(GiB)": 72.85, "step": 39870, "token_acc": 0.4954954954954955, "train_speed(iter/s)": 0.67118 }, { "epoch": 1.7083672507604644, "grad_norm": 4.8052978515625, "learning_rate": 7.387592515894429e-05, "loss": 2.7107051849365233, "memory(GiB)": 72.85, "step": 39875, "token_acc": 0.47330960854092524, "train_speed(iter/s)": 0.671184 }, { "epoch": 1.7085814660897134, "grad_norm": 3.682986259460449, "learning_rate": 7.38700120181082e-05, "loss": 2.3910926818847655, "memory(GiB)": 72.85, "step": 39880, "token_acc": 0.5156794425087108, "train_speed(iter/s)": 0.671193 }, { "epoch": 1.7087956814189624, "grad_norm": 4.563267707824707, "learning_rate": 7.386409844484442e-05, "loss": 2.3007469177246094, "memory(GiB)": 72.85, "step": 39885, "token_acc": 0.5075757575757576, "train_speed(iter/s)": 0.671207 }, { "epoch": 1.7090098967482112, "grad_norm": 4.472109794616699, "learning_rate": 7.385818443926014e-05, "loss": 2.606006050109863, "memory(GiB)": 72.85, "step": 39890, "token_acc": 0.43508771929824563, "train_speed(iter/s)": 0.671209 }, { "epoch": 1.7092241120774603, "grad_norm": 4.2932820320129395, "learning_rate": 7.385227000146247e-05, "loss": 2.499224090576172, "memory(GiB)": 72.85, "step": 39895, "token_acc": 0.43730886850152906, "train_speed(iter/s)": 0.671215 }, { "epoch": 1.7094383274067093, "grad_norm": 4.43754243850708, "learning_rate": 7.384635513155854e-05, "loss": 2.4516468048095703, "memory(GiB)": 72.85, "step": 39900, "token_acc": 0.46545454545454545, "train_speed(iter/s)": 0.671226 }, { "epoch": 1.7096525427359581, "grad_norm": 4.913219928741455, "learning_rate": 7.384043982965552e-05, "loss": 2.4767765045166015, "memory(GiB)": 72.85, "step": 39905, "token_acc": 0.44376899696048633, "train_speed(iter/s)": 0.67124 }, { "epoch": 1.7098667580652072, "grad_norm": 4.876440525054932, "learning_rate": 7.383452409586058e-05, "loss": 2.5508230209350584, "memory(GiB)": 72.85, "step": 39910, "token_acc": 0.48788927335640137, "train_speed(iter/s)": 0.671257 }, { "epoch": 1.7100809733944562, "grad_norm": 4.854902267456055, "learning_rate": 7.382860793028086e-05, "loss": 2.327640151977539, "memory(GiB)": 72.85, "step": 39915, "token_acc": 0.5357142857142857, "train_speed(iter/s)": 0.671243 }, { "epoch": 1.710295188723705, "grad_norm": 3.7956888675689697, "learning_rate": 7.382269133302357e-05, "loss": 2.4917369842529298, "memory(GiB)": 72.85, "step": 39920, "token_acc": 0.48830409356725146, "train_speed(iter/s)": 0.67125 }, { "epoch": 1.710509404052954, "grad_norm": 5.541321754455566, "learning_rate": 7.381677430419587e-05, "loss": 2.2445241928100588, "memory(GiB)": 72.85, "step": 39925, "token_acc": 0.4632352941176471, "train_speed(iter/s)": 0.671263 }, { "epoch": 1.710723619382203, "grad_norm": 4.543847560882568, "learning_rate": 7.381085684390495e-05, "loss": 2.1852981567382814, "memory(GiB)": 72.85, "step": 39930, "token_acc": 0.5308641975308642, "train_speed(iter/s)": 0.671281 }, { "epoch": 1.7109378347114519, "grad_norm": 4.8704657554626465, "learning_rate": 7.380493895225804e-05, "loss": 2.0556009292602537, "memory(GiB)": 72.85, "step": 39935, "token_acc": 0.5447470817120622, "train_speed(iter/s)": 0.671286 }, { "epoch": 1.711152050040701, "grad_norm": 4.807701110839844, "learning_rate": 7.379902062936233e-05, "loss": 2.6229412078857424, "memory(GiB)": 72.85, "step": 39940, "token_acc": 0.4459016393442623, "train_speed(iter/s)": 0.671276 }, { "epoch": 1.71136626536995, "grad_norm": 4.387634754180908, "learning_rate": 7.379310187532502e-05, "loss": 2.826684761047363, "memory(GiB)": 72.85, "step": 39945, "token_acc": 0.462406015037594, "train_speed(iter/s)": 0.671264 }, { "epoch": 1.7115804806991988, "grad_norm": 3.525982618331909, "learning_rate": 7.378718269025338e-05, "loss": 2.4742055892944337, "memory(GiB)": 72.85, "step": 39950, "token_acc": 0.5098684210526315, "train_speed(iter/s)": 0.671266 }, { "epoch": 1.7117946960284478, "grad_norm": 3.984863758087158, "learning_rate": 7.378126307425458e-05, "loss": 2.384701156616211, "memory(GiB)": 72.85, "step": 39955, "token_acc": 0.498371335504886, "train_speed(iter/s)": 0.67127 }, { "epoch": 1.7120089113576968, "grad_norm": 5.68740177154541, "learning_rate": 7.37753430274359e-05, "loss": 2.4772611618041993, "memory(GiB)": 72.85, "step": 39960, "token_acc": 0.4479495268138801, "train_speed(iter/s)": 0.671256 }, { "epoch": 1.7122231266869457, "grad_norm": 3.683420181274414, "learning_rate": 7.376942254990458e-05, "loss": 2.328022766113281, "memory(GiB)": 72.85, "step": 39965, "token_acc": 0.4895833333333333, "train_speed(iter/s)": 0.67125 }, { "epoch": 1.7124373420161947, "grad_norm": 3.7875263690948486, "learning_rate": 7.376350164176788e-05, "loss": 2.233386993408203, "memory(GiB)": 72.85, "step": 39970, "token_acc": 0.5088967971530249, "train_speed(iter/s)": 0.671245 }, { "epoch": 1.7126515573454437, "grad_norm": 3.924647808074951, "learning_rate": 7.375758030313304e-05, "loss": 2.3503952026367188, "memory(GiB)": 72.85, "step": 39975, "token_acc": 0.47648902821316613, "train_speed(iter/s)": 0.671248 }, { "epoch": 1.7128657726746925, "grad_norm": 4.216540813446045, "learning_rate": 7.375165853410736e-05, "loss": 1.9429943084716796, "memory(GiB)": 72.85, "step": 39980, "token_acc": 0.5508474576271186, "train_speed(iter/s)": 0.671251 }, { "epoch": 1.7130799880039416, "grad_norm": 4.121796607971191, "learning_rate": 7.37457363347981e-05, "loss": 2.2108089447021486, "memory(GiB)": 72.85, "step": 39985, "token_acc": 0.515625, "train_speed(iter/s)": 0.671258 }, { "epoch": 1.7132942033331906, "grad_norm": 5.544744491577148, "learning_rate": 7.373981370531256e-05, "loss": 2.469208335876465, "memory(GiB)": 72.85, "step": 39990, "token_acc": 0.4967532467532468, "train_speed(iter/s)": 0.671266 }, { "epoch": 1.7135084186624394, "grad_norm": 4.961206912994385, "learning_rate": 7.373389064575801e-05, "loss": 2.6427331924438477, "memory(GiB)": 72.85, "step": 39995, "token_acc": 0.4788273615635179, "train_speed(iter/s)": 0.671267 }, { "epoch": 1.7137226339916884, "grad_norm": 4.446823596954346, "learning_rate": 7.372796715624177e-05, "loss": 2.593844985961914, "memory(GiB)": 72.85, "step": 40000, "token_acc": 0.441340782122905, "train_speed(iter/s)": 0.67126 }, { "epoch": 1.7137226339916884, "eval_loss": 1.9524585008621216, "eval_runtime": 15.7711, "eval_samples_per_second": 6.341, "eval_steps_per_second": 6.341, "eval_token_acc": 0.5013440860215054, "step": 40000 }, { "epoch": 1.7139368493209375, "grad_norm": 4.283174514770508, "learning_rate": 7.372204323687115e-05, "loss": 2.3520267486572264, "memory(GiB)": 72.85, "step": 40005, "token_acc": 0.504225352112676, "train_speed(iter/s)": 0.671062 }, { "epoch": 1.7141510646501863, "grad_norm": 3.123032331466675, "learning_rate": 7.371611888775347e-05, "loss": 2.1019779205322267, "memory(GiB)": 72.85, "step": 40010, "token_acc": 0.5785714285714286, "train_speed(iter/s)": 0.671081 }, { "epoch": 1.7143652799794353, "grad_norm": 5.592560768127441, "learning_rate": 7.371019410899603e-05, "loss": 2.3500240325927733, "memory(GiB)": 72.85, "step": 40015, "token_acc": 0.4786885245901639, "train_speed(iter/s)": 0.671083 }, { "epoch": 1.7145794953086844, "grad_norm": 4.221484661102295, "learning_rate": 7.370426890070621e-05, "loss": 2.5346240997314453, "memory(GiB)": 72.85, "step": 40020, "token_acc": 0.4563106796116505, "train_speed(iter/s)": 0.671091 }, { "epoch": 1.7147937106379332, "grad_norm": 6.597304344177246, "learning_rate": 7.36983432629913e-05, "loss": 2.5434221267700194, "memory(GiB)": 72.85, "step": 40025, "token_acc": 0.48936170212765956, "train_speed(iter/s)": 0.671096 }, { "epoch": 1.7150079259671822, "grad_norm": 5.293783664703369, "learning_rate": 7.369241719595866e-05, "loss": 2.276202392578125, "memory(GiB)": 72.85, "step": 40030, "token_acc": 0.5161290322580645, "train_speed(iter/s)": 0.671076 }, { "epoch": 1.7152221412964312, "grad_norm": 4.75269889831543, "learning_rate": 7.368649069971568e-05, "loss": 2.480347442626953, "memory(GiB)": 72.85, "step": 40035, "token_acc": 0.4713804713804714, "train_speed(iter/s)": 0.67108 }, { "epoch": 1.71543635662568, "grad_norm": 4.199528217315674, "learning_rate": 7.36805637743697e-05, "loss": 2.2507740020751954, "memory(GiB)": 72.85, "step": 40040, "token_acc": 0.48936170212765956, "train_speed(iter/s)": 0.671105 }, { "epoch": 1.715650571954929, "grad_norm": 4.146126747131348, "learning_rate": 7.367463642002809e-05, "loss": 2.3901079177856444, "memory(GiB)": 72.85, "step": 40045, "token_acc": 0.5017667844522968, "train_speed(iter/s)": 0.671097 }, { "epoch": 1.7158647872841781, "grad_norm": 3.899862051010132, "learning_rate": 7.366870863679823e-05, "loss": 2.5740005493164064, "memory(GiB)": 72.85, "step": 40050, "token_acc": 0.4548872180451128, "train_speed(iter/s)": 0.671085 }, { "epoch": 1.716079002613427, "grad_norm": 3.651503562927246, "learning_rate": 7.366278042478753e-05, "loss": 2.3710777282714846, "memory(GiB)": 72.85, "step": 40055, "token_acc": 0.5190839694656488, "train_speed(iter/s)": 0.671096 }, { "epoch": 1.716293217942676, "grad_norm": 3.2534334659576416, "learning_rate": 7.365685178410333e-05, "loss": 2.220838737487793, "memory(GiB)": 72.85, "step": 40060, "token_acc": 0.5121951219512195, "train_speed(iter/s)": 0.671097 }, { "epoch": 1.716507433271925, "grad_norm": 4.173246383666992, "learning_rate": 7.365092271485308e-05, "loss": 2.756297492980957, "memory(GiB)": 72.85, "step": 40065, "token_acc": 0.4457831325301205, "train_speed(iter/s)": 0.671117 }, { "epoch": 1.7167216486011738, "grad_norm": 4.298925876617432, "learning_rate": 7.364499321714419e-05, "loss": 2.3850381851196287, "memory(GiB)": 72.85, "step": 40070, "token_acc": 0.4728434504792332, "train_speed(iter/s)": 0.671142 }, { "epoch": 1.7169358639304229, "grad_norm": 7.171089172363281, "learning_rate": 7.363906329108405e-05, "loss": 2.296423149108887, "memory(GiB)": 72.85, "step": 40075, "token_acc": 0.5050505050505051, "train_speed(iter/s)": 0.671149 }, { "epoch": 1.717150079259672, "grad_norm": 4.439593315124512, "learning_rate": 7.363313293678012e-05, "loss": 2.130344772338867, "memory(GiB)": 72.85, "step": 40080, "token_acc": 0.5313653136531366, "train_speed(iter/s)": 0.671154 }, { "epoch": 1.7173642945889207, "grad_norm": 4.422212600708008, "learning_rate": 7.362720215433982e-05, "loss": 2.4471885681152346, "memory(GiB)": 72.85, "step": 40085, "token_acc": 0.46283783783783783, "train_speed(iter/s)": 0.671169 }, { "epoch": 1.7175785099181697, "grad_norm": 3.2543089389801025, "learning_rate": 7.362127094387056e-05, "loss": 2.1124467849731445, "memory(GiB)": 72.85, "step": 40090, "token_acc": 0.5340909090909091, "train_speed(iter/s)": 0.671157 }, { "epoch": 1.7177927252474188, "grad_norm": 3.955258369445801, "learning_rate": 7.361533930547986e-05, "loss": 2.2319107055664062, "memory(GiB)": 72.85, "step": 40095, "token_acc": 0.49572649572649574, "train_speed(iter/s)": 0.671165 }, { "epoch": 1.7180069405766676, "grad_norm": 5.146475791931152, "learning_rate": 7.36094072392751e-05, "loss": 2.408662796020508, "memory(GiB)": 72.85, "step": 40100, "token_acc": 0.4732142857142857, "train_speed(iter/s)": 0.671166 }, { "epoch": 1.7182211559059166, "grad_norm": 4.254671096801758, "learning_rate": 7.36034747453638e-05, "loss": 2.1676393508911134, "memory(GiB)": 72.85, "step": 40105, "token_acc": 0.5209790209790209, "train_speed(iter/s)": 0.671156 }, { "epoch": 1.7184353712351657, "grad_norm": 3.6770524978637695, "learning_rate": 7.359754182385341e-05, "loss": 2.164590263366699, "memory(GiB)": 72.85, "step": 40110, "token_acc": 0.511400651465798, "train_speed(iter/s)": 0.671143 }, { "epoch": 1.7186495865644145, "grad_norm": 3.9792532920837402, "learning_rate": 7.35916084748514e-05, "loss": 2.4929969787597654, "memory(GiB)": 72.85, "step": 40115, "token_acc": 0.49174917491749176, "train_speed(iter/s)": 0.671124 }, { "epoch": 1.7188638018936635, "grad_norm": 3.3639113903045654, "learning_rate": 7.358567469846529e-05, "loss": 2.1726825714111326, "memory(GiB)": 72.85, "step": 40120, "token_acc": 0.49823321554770317, "train_speed(iter/s)": 0.671123 }, { "epoch": 1.7190780172229125, "grad_norm": 5.00362491607666, "learning_rate": 7.357974049480256e-05, "loss": 2.2211631774902343, "memory(GiB)": 72.85, "step": 40125, "token_acc": 0.5138888888888888, "train_speed(iter/s)": 0.671121 }, { "epoch": 1.7192922325521613, "grad_norm": 5.02894926071167, "learning_rate": 7.35738058639707e-05, "loss": 2.3394399642944337, "memory(GiB)": 72.85, "step": 40130, "token_acc": 0.5145985401459854, "train_speed(iter/s)": 0.671134 }, { "epoch": 1.7195064478814104, "grad_norm": 4.193275451660156, "learning_rate": 7.356787080607723e-05, "loss": 2.468593788146973, "memory(GiB)": 72.85, "step": 40135, "token_acc": 0.5277777777777778, "train_speed(iter/s)": 0.671157 }, { "epoch": 1.7197206632106594, "grad_norm": 3.928093910217285, "learning_rate": 7.356193532122967e-05, "loss": 2.2726112365722657, "memory(GiB)": 72.85, "step": 40140, "token_acc": 0.5058139534883721, "train_speed(iter/s)": 0.671182 }, { "epoch": 1.7199348785399082, "grad_norm": 4.549805164337158, "learning_rate": 7.355599940953555e-05, "loss": 2.3609525680541994, "memory(GiB)": 72.85, "step": 40145, "token_acc": 0.4624505928853755, "train_speed(iter/s)": 0.671195 }, { "epoch": 1.7201490938691573, "grad_norm": 4.200537204742432, "learning_rate": 7.355006307110242e-05, "loss": 2.5005218505859377, "memory(GiB)": 72.85, "step": 40150, "token_acc": 0.4758842443729904, "train_speed(iter/s)": 0.671196 }, { "epoch": 1.7203633091984063, "grad_norm": 5.473891258239746, "learning_rate": 7.354412630603779e-05, "loss": 2.303693389892578, "memory(GiB)": 72.85, "step": 40155, "token_acc": 0.5418326693227091, "train_speed(iter/s)": 0.671209 }, { "epoch": 1.7205775245276551, "grad_norm": 4.9198384284973145, "learning_rate": 7.353818911444922e-05, "loss": 2.232763481140137, "memory(GiB)": 72.85, "step": 40160, "token_acc": 0.5378787878787878, "train_speed(iter/s)": 0.671223 }, { "epoch": 1.7207917398569041, "grad_norm": 3.567657232284546, "learning_rate": 7.353225149644429e-05, "loss": 2.3003978729248047, "memory(GiB)": 72.85, "step": 40165, "token_acc": 0.5085227272727273, "train_speed(iter/s)": 0.67123 }, { "epoch": 1.7210059551861532, "grad_norm": 4.491098880767822, "learning_rate": 7.352631345213054e-05, "loss": 2.2529922485351563, "memory(GiB)": 72.85, "step": 40170, "token_acc": 0.5105633802816901, "train_speed(iter/s)": 0.671239 }, { "epoch": 1.721220170515402, "grad_norm": 4.041088581085205, "learning_rate": 7.352037498161556e-05, "loss": 2.3819408416748047, "memory(GiB)": 72.85, "step": 40175, "token_acc": 0.48109965635738833, "train_speed(iter/s)": 0.671246 }, { "epoch": 1.721434385844651, "grad_norm": 5.386983394622803, "learning_rate": 7.35144360850069e-05, "loss": 2.418751525878906, "memory(GiB)": 72.85, "step": 40180, "token_acc": 0.45993031358885017, "train_speed(iter/s)": 0.671251 }, { "epoch": 1.7216486011739, "grad_norm": 4.344141006469727, "learning_rate": 7.35084967624122e-05, "loss": 2.7011587142944338, "memory(GiB)": 72.85, "step": 40185, "token_acc": 0.495114006514658, "train_speed(iter/s)": 0.671263 }, { "epoch": 1.7218628165031489, "grad_norm": 4.421750545501709, "learning_rate": 7.350255701393902e-05, "loss": 2.1563947677612303, "memory(GiB)": 72.85, "step": 40190, "token_acc": 0.5181818181818182, "train_speed(iter/s)": 0.671264 }, { "epoch": 1.722077031832398, "grad_norm": 4.933328151702881, "learning_rate": 7.349661683969498e-05, "loss": 2.444464111328125, "memory(GiB)": 72.85, "step": 40195, "token_acc": 0.4766355140186916, "train_speed(iter/s)": 0.671261 }, { "epoch": 1.722291247161647, "grad_norm": 4.356494426727295, "learning_rate": 7.349067623978767e-05, "loss": 2.5032562255859374, "memory(GiB)": 72.85, "step": 40200, "token_acc": 0.490272373540856, "train_speed(iter/s)": 0.671268 }, { "epoch": 1.7225054624908958, "grad_norm": 3.754817485809326, "learning_rate": 7.348473521432473e-05, "loss": 2.241928482055664, "memory(GiB)": 72.85, "step": 40205, "token_acc": 0.552, "train_speed(iter/s)": 0.671279 }, { "epoch": 1.7227196778201448, "grad_norm": 4.4695658683776855, "learning_rate": 7.347879376341378e-05, "loss": 2.354538345336914, "memory(GiB)": 72.85, "step": 40210, "token_acc": 0.4919093851132686, "train_speed(iter/s)": 0.671307 }, { "epoch": 1.7229338931493938, "grad_norm": 3.7148985862731934, "learning_rate": 7.347285188716245e-05, "loss": 2.28924617767334, "memory(GiB)": 72.85, "step": 40215, "token_acc": 0.5032258064516129, "train_speed(iter/s)": 0.671313 }, { "epoch": 1.7231481084786426, "grad_norm": 4.492621898651123, "learning_rate": 7.346690958567839e-05, "loss": 2.4169889450073243, "memory(GiB)": 72.85, "step": 40220, "token_acc": 0.5071428571428571, "train_speed(iter/s)": 0.671321 }, { "epoch": 1.7233623238078917, "grad_norm": 3.5021755695343018, "learning_rate": 7.346096685906927e-05, "loss": 2.6661033630371094, "memory(GiB)": 72.85, "step": 40225, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.671333 }, { "epoch": 1.7235765391371407, "grad_norm": 4.566503524780273, "learning_rate": 7.34550237074427e-05, "loss": 2.4791135787963867, "memory(GiB)": 72.85, "step": 40230, "token_acc": 0.4744525547445255, "train_speed(iter/s)": 0.671332 }, { "epoch": 1.7237907544663895, "grad_norm": 4.112529277801514, "learning_rate": 7.34490801309064e-05, "loss": 2.1209754943847656, "memory(GiB)": 72.85, "step": 40235, "token_acc": 0.5186335403726708, "train_speed(iter/s)": 0.671329 }, { "epoch": 1.7240049697956386, "grad_norm": 3.8300223350524902, "learning_rate": 7.3443136129568e-05, "loss": 2.3328788757324217, "memory(GiB)": 72.85, "step": 40240, "token_acc": 0.501628664495114, "train_speed(iter/s)": 0.671336 }, { "epoch": 1.7242191851248876, "grad_norm": 3.838567018508911, "learning_rate": 7.343719170353519e-05, "loss": 2.1861749649047852, "memory(GiB)": 72.85, "step": 40245, "token_acc": 0.5396825396825397, "train_speed(iter/s)": 0.671336 }, { "epoch": 1.7244334004541364, "grad_norm": 3.760434865951538, "learning_rate": 7.343124685291567e-05, "loss": 2.3973552703857424, "memory(GiB)": 72.85, "step": 40250, "token_acc": 0.46179401993355484, "train_speed(iter/s)": 0.671359 }, { "epoch": 1.7246476157833854, "grad_norm": 4.357938289642334, "learning_rate": 7.342530157781714e-05, "loss": 2.2205482482910157, "memory(GiB)": 72.85, "step": 40255, "token_acc": 0.5143884892086331, "train_speed(iter/s)": 0.671367 }, { "epoch": 1.7248618311126345, "grad_norm": 5.581855297088623, "learning_rate": 7.34193558783473e-05, "loss": 2.535365676879883, "memory(GiB)": 72.85, "step": 40260, "token_acc": 0.4879518072289157, "train_speed(iter/s)": 0.671393 }, { "epoch": 1.7250760464418833, "grad_norm": 3.9325952529907227, "learning_rate": 7.341340975461385e-05, "loss": 2.634022903442383, "memory(GiB)": 72.85, "step": 40265, "token_acc": 0.48742138364779874, "train_speed(iter/s)": 0.671415 }, { "epoch": 1.7252902617711323, "grad_norm": 4.202033996582031, "learning_rate": 7.340746320672452e-05, "loss": 2.2619392395019533, "memory(GiB)": 72.85, "step": 40270, "token_acc": 0.5378787878787878, "train_speed(iter/s)": 0.671418 }, { "epoch": 1.7255044771003814, "grad_norm": 4.202047348022461, "learning_rate": 7.340151623478704e-05, "loss": 2.288425636291504, "memory(GiB)": 72.85, "step": 40275, "token_acc": 0.5161290322580645, "train_speed(iter/s)": 0.671429 }, { "epoch": 1.7257186924296302, "grad_norm": 4.250439167022705, "learning_rate": 7.339556883890915e-05, "loss": 2.313853073120117, "memory(GiB)": 72.85, "step": 40280, "token_acc": 0.5218855218855218, "train_speed(iter/s)": 0.671437 }, { "epoch": 1.7259329077588792, "grad_norm": 4.50750732421875, "learning_rate": 7.338962101919857e-05, "loss": 2.8265316009521486, "memory(GiB)": 72.85, "step": 40285, "token_acc": 0.46264367816091956, "train_speed(iter/s)": 0.671428 }, { "epoch": 1.7261471230881282, "grad_norm": 6.846667289733887, "learning_rate": 7.338367277576306e-05, "loss": 2.182842254638672, "memory(GiB)": 72.85, "step": 40290, "token_acc": 0.5365853658536586, "train_speed(iter/s)": 0.671435 }, { "epoch": 1.726361338417377, "grad_norm": 3.584928512573242, "learning_rate": 7.337772410871038e-05, "loss": 2.4146203994750977, "memory(GiB)": 72.85, "step": 40295, "token_acc": 0.47017543859649125, "train_speed(iter/s)": 0.671447 }, { "epoch": 1.726575553746626, "grad_norm": 5.1751227378845215, "learning_rate": 7.337177501814832e-05, "loss": 2.334212875366211, "memory(GiB)": 72.85, "step": 40300, "token_acc": 0.5155709342560554, "train_speed(iter/s)": 0.671441 }, { "epoch": 1.7267897690758751, "grad_norm": 3.517087459564209, "learning_rate": 7.336582550418462e-05, "loss": 2.3967315673828127, "memory(GiB)": 72.85, "step": 40305, "token_acc": 0.48986486486486486, "train_speed(iter/s)": 0.67145 }, { "epoch": 1.727003984405124, "grad_norm": 4.956716537475586, "learning_rate": 7.335987556692707e-05, "loss": 2.455598068237305, "memory(GiB)": 72.85, "step": 40310, "token_acc": 0.49454545454545457, "train_speed(iter/s)": 0.671455 }, { "epoch": 1.727218199734373, "grad_norm": 3.879920482635498, "learning_rate": 7.335392520648346e-05, "loss": 2.44159049987793, "memory(GiB)": 72.85, "step": 40315, "token_acc": 0.49693251533742333, "train_speed(iter/s)": 0.67147 }, { "epoch": 1.727432415063622, "grad_norm": 5.066563129425049, "learning_rate": 7.334797442296159e-05, "loss": 2.4065757751464845, "memory(GiB)": 72.85, "step": 40320, "token_acc": 0.4828767123287671, "train_speed(iter/s)": 0.671443 }, { "epoch": 1.7276466303928708, "grad_norm": 3.4188761711120605, "learning_rate": 7.334202321646926e-05, "loss": 2.2944950103759765, "memory(GiB)": 72.85, "step": 40325, "token_acc": 0.4868035190615836, "train_speed(iter/s)": 0.671454 }, { "epoch": 1.7278608457221198, "grad_norm": 4.343893527984619, "learning_rate": 7.333607158711428e-05, "loss": 2.2590890884399415, "memory(GiB)": 72.85, "step": 40330, "token_acc": 0.5259515570934256, "train_speed(iter/s)": 0.671461 }, { "epoch": 1.7280750610513689, "grad_norm": 3.856576919555664, "learning_rate": 7.333011953500447e-05, "loss": 2.3439590454101564, "memory(GiB)": 72.85, "step": 40335, "token_acc": 0.4904214559386973, "train_speed(iter/s)": 0.671456 }, { "epoch": 1.7282892763806177, "grad_norm": 4.896367073059082, "learning_rate": 7.332416706024767e-05, "loss": 2.8102813720703126, "memory(GiB)": 72.85, "step": 40340, "token_acc": 0.43564356435643564, "train_speed(iter/s)": 0.671458 }, { "epoch": 1.7285034917098667, "grad_norm": 3.2067832946777344, "learning_rate": 7.33182141629517e-05, "loss": 2.1110530853271485, "memory(GiB)": 72.85, "step": 40345, "token_acc": 0.5542168674698795, "train_speed(iter/s)": 0.671474 }, { "epoch": 1.7287177070391158, "grad_norm": 3.9811558723449707, "learning_rate": 7.33122608432244e-05, "loss": 2.2969451904296876, "memory(GiB)": 72.85, "step": 40350, "token_acc": 0.5034722222222222, "train_speed(iter/s)": 0.671476 }, { "epoch": 1.7289319223683646, "grad_norm": 4.625373840332031, "learning_rate": 7.330630710117363e-05, "loss": 2.3667537689208986, "memory(GiB)": 72.85, "step": 40355, "token_acc": 0.47928994082840237, "train_speed(iter/s)": 0.671471 }, { "epoch": 1.7291461376976136, "grad_norm": 3.6555044651031494, "learning_rate": 7.330035293690725e-05, "loss": 2.3172698974609376, "memory(GiB)": 72.85, "step": 40360, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.671462 }, { "epoch": 1.7293603530268626, "grad_norm": 6.374300956726074, "learning_rate": 7.32943983505331e-05, "loss": 2.448114776611328, "memory(GiB)": 72.85, "step": 40365, "token_acc": 0.4689655172413793, "train_speed(iter/s)": 0.671461 }, { "epoch": 1.7295745683561115, "grad_norm": 5.246699333190918, "learning_rate": 7.328844334215908e-05, "loss": 2.47479133605957, "memory(GiB)": 72.85, "step": 40370, "token_acc": 0.4729241877256318, "train_speed(iter/s)": 0.671477 }, { "epoch": 1.7297887836853605, "grad_norm": 3.9965200424194336, "learning_rate": 7.328248791189308e-05, "loss": 2.194881820678711, "memory(GiB)": 72.85, "step": 40375, "token_acc": 0.5369649805447471, "train_speed(iter/s)": 0.671486 }, { "epoch": 1.7300029990146095, "grad_norm": 4.007454872131348, "learning_rate": 7.327653205984296e-05, "loss": 2.626777458190918, "memory(GiB)": 72.85, "step": 40380, "token_acc": 0.483271375464684, "train_speed(iter/s)": 0.671483 }, { "epoch": 1.7302172143438583, "grad_norm": 3.528625249862671, "learning_rate": 7.327057578611663e-05, "loss": 2.6466875076293945, "memory(GiB)": 72.85, "step": 40385, "token_acc": 0.4491017964071856, "train_speed(iter/s)": 0.671474 }, { "epoch": 1.7304314296731074, "grad_norm": 3.7446448802948, "learning_rate": 7.326461909082198e-05, "loss": 2.856271171569824, "memory(GiB)": 72.85, "step": 40390, "token_acc": 0.4176470588235294, "train_speed(iter/s)": 0.671472 }, { "epoch": 1.7306456450023564, "grad_norm": 7.4208784103393555, "learning_rate": 7.325866197406693e-05, "loss": 2.4760982513427736, "memory(GiB)": 72.85, "step": 40395, "token_acc": 0.49415204678362573, "train_speed(iter/s)": 0.671502 }, { "epoch": 1.7308598603316052, "grad_norm": 4.7270827293396, "learning_rate": 7.325270443595939e-05, "loss": 2.4702030181884767, "memory(GiB)": 72.85, "step": 40400, "token_acc": 0.5037593984962406, "train_speed(iter/s)": 0.671511 }, { "epoch": 1.7310740756608543, "grad_norm": 4.3759589195251465, "learning_rate": 7.324674647660733e-05, "loss": 2.2319398880004884, "memory(GiB)": 72.85, "step": 40405, "token_acc": 0.5129151291512916, "train_speed(iter/s)": 0.671511 }, { "epoch": 1.7312882909901033, "grad_norm": 3.9312357902526855, "learning_rate": 7.324078809611863e-05, "loss": 2.5670528411865234, "memory(GiB)": 72.85, "step": 40410, "token_acc": 0.4428969359331476, "train_speed(iter/s)": 0.671509 }, { "epoch": 1.731502506319352, "grad_norm": 4.687577724456787, "learning_rate": 7.323482929460126e-05, "loss": 2.23250732421875, "memory(GiB)": 72.85, "step": 40415, "token_acc": 0.5, "train_speed(iter/s)": 0.671516 }, { "epoch": 1.7317167216486011, "grad_norm": 6.502435207366943, "learning_rate": 7.322887007216315e-05, "loss": 2.2786415100097654, "memory(GiB)": 72.85, "step": 40420, "token_acc": 0.47202797202797203, "train_speed(iter/s)": 0.671511 }, { "epoch": 1.7319309369778502, "grad_norm": 4.015013217926025, "learning_rate": 7.322291042891227e-05, "loss": 2.520839309692383, "memory(GiB)": 72.85, "step": 40425, "token_acc": 0.506993006993007, "train_speed(iter/s)": 0.671513 }, { "epoch": 1.732145152307099, "grad_norm": 4.376513957977295, "learning_rate": 7.32169503649566e-05, "loss": 2.17543888092041, "memory(GiB)": 72.85, "step": 40430, "token_acc": 0.5132075471698113, "train_speed(iter/s)": 0.671532 }, { "epoch": 1.732359367636348, "grad_norm": 4.649764537811279, "learning_rate": 7.321098988040406e-05, "loss": 2.188151741027832, "memory(GiB)": 72.85, "step": 40435, "token_acc": 0.5206611570247934, "train_speed(iter/s)": 0.671548 }, { "epoch": 1.732573582965597, "grad_norm": 4.187249183654785, "learning_rate": 7.320502897536269e-05, "loss": 2.239417839050293, "memory(GiB)": 72.85, "step": 40440, "token_acc": 0.52, "train_speed(iter/s)": 0.671567 }, { "epoch": 1.7327877982948459, "grad_norm": 4.821316242218018, "learning_rate": 7.319906764994046e-05, "loss": 2.4765623092651365, "memory(GiB)": 72.85, "step": 40445, "token_acc": 0.45674740484429066, "train_speed(iter/s)": 0.671567 }, { "epoch": 1.733002013624095, "grad_norm": 4.9043965339660645, "learning_rate": 7.319310590424532e-05, "loss": 2.08687744140625, "memory(GiB)": 72.85, "step": 40450, "token_acc": 0.5518394648829431, "train_speed(iter/s)": 0.67155 }, { "epoch": 1.733216228953344, "grad_norm": 3.181251287460327, "learning_rate": 7.318714373838536e-05, "loss": 2.4485004425048826, "memory(GiB)": 72.85, "step": 40455, "token_acc": 0.4513888888888889, "train_speed(iter/s)": 0.671555 }, { "epoch": 1.7334304442825927, "grad_norm": 6.204898834228516, "learning_rate": 7.318118115246852e-05, "loss": 2.2413715362548827, "memory(GiB)": 72.85, "step": 40460, "token_acc": 0.5157593123209169, "train_speed(iter/s)": 0.671547 }, { "epoch": 1.7336446596118418, "grad_norm": 3.839531183242798, "learning_rate": 7.317521814660282e-05, "loss": 2.2592710494995116, "memory(GiB)": 72.85, "step": 40465, "token_acc": 0.4668769716088328, "train_speed(iter/s)": 0.671562 }, { "epoch": 1.7338588749410908, "grad_norm": 3.7415823936462402, "learning_rate": 7.316925472089632e-05, "loss": 2.438621711730957, "memory(GiB)": 72.85, "step": 40470, "token_acc": 0.5066225165562914, "train_speed(iter/s)": 0.671566 }, { "epoch": 1.7340730902703396, "grad_norm": 5.6914825439453125, "learning_rate": 7.316329087545703e-05, "loss": 2.3869564056396486, "memory(GiB)": 72.85, "step": 40475, "token_acc": 0.4935064935064935, "train_speed(iter/s)": 0.671564 }, { "epoch": 1.7342873055995887, "grad_norm": 3.817795753479004, "learning_rate": 7.315732661039298e-05, "loss": 2.644746017456055, "memory(GiB)": 72.85, "step": 40480, "token_acc": 0.4641509433962264, "train_speed(iter/s)": 0.671569 }, { "epoch": 1.7345015209288377, "grad_norm": 4.451274871826172, "learning_rate": 7.315136192581227e-05, "loss": 2.48736457824707, "memory(GiB)": 72.85, "step": 40485, "token_acc": 0.4868421052631579, "train_speed(iter/s)": 0.671575 }, { "epoch": 1.7347157362580865, "grad_norm": 3.98066782951355, "learning_rate": 7.314539682182291e-05, "loss": 2.4586599349975584, "memory(GiB)": 72.85, "step": 40490, "token_acc": 0.48333333333333334, "train_speed(iter/s)": 0.671574 }, { "epoch": 1.7349299515873355, "grad_norm": 4.1878275871276855, "learning_rate": 7.313943129853296e-05, "loss": 2.2452091217041015, "memory(GiB)": 72.85, "step": 40495, "token_acc": 0.5300751879699248, "train_speed(iter/s)": 0.67159 }, { "epoch": 1.7351441669165846, "grad_norm": 4.272491455078125, "learning_rate": 7.313346535605052e-05, "loss": 2.242580795288086, "memory(GiB)": 72.85, "step": 40500, "token_acc": 0.49063670411985016, "train_speed(iter/s)": 0.671614 }, { "epoch": 1.7351441669165846, "eval_loss": 2.105419874191284, "eval_runtime": 15.2841, "eval_samples_per_second": 6.543, "eval_steps_per_second": 6.543, "eval_token_acc": 0.48525073746312686, "step": 40500 }, { "epoch": 1.7353583822458334, "grad_norm": 4.549574851989746, "learning_rate": 7.312749899448365e-05, "loss": 2.520038032531738, "memory(GiB)": 72.85, "step": 40505, "token_acc": 0.4753593429158111, "train_speed(iter/s)": 0.671423 }, { "epoch": 1.7355725975750824, "grad_norm": 3.974760055541992, "learning_rate": 7.312153221394042e-05, "loss": 2.4580013275146486, "memory(GiB)": 72.85, "step": 40510, "token_acc": 0.5046153846153846, "train_speed(iter/s)": 0.671444 }, { "epoch": 1.7357868129043315, "grad_norm": 3.370898723602295, "learning_rate": 7.311556501452897e-05, "loss": 2.587380218505859, "memory(GiB)": 72.85, "step": 40515, "token_acc": 0.4809688581314879, "train_speed(iter/s)": 0.671457 }, { "epoch": 1.7360010282335803, "grad_norm": 5.420435428619385, "learning_rate": 7.310959739635739e-05, "loss": 2.5527326583862306, "memory(GiB)": 72.85, "step": 40520, "token_acc": 0.45692883895131087, "train_speed(iter/s)": 0.671454 }, { "epoch": 1.7362152435628293, "grad_norm": 4.645427703857422, "learning_rate": 7.310362935953375e-05, "loss": 2.3212305068969727, "memory(GiB)": 72.85, "step": 40525, "token_acc": 0.4748201438848921, "train_speed(iter/s)": 0.671448 }, { "epoch": 1.7364294588920783, "grad_norm": 3.3528892993927, "learning_rate": 7.309766090416619e-05, "loss": 2.269697380065918, "memory(GiB)": 72.85, "step": 40530, "token_acc": 0.5118343195266272, "train_speed(iter/s)": 0.671448 }, { "epoch": 1.7366436742213271, "grad_norm": 5.205406188964844, "learning_rate": 7.309169203036283e-05, "loss": 2.4005691528320314, "memory(GiB)": 72.85, "step": 40535, "token_acc": 0.5057915057915058, "train_speed(iter/s)": 0.67145 }, { "epoch": 1.7368578895505762, "grad_norm": 3.7794415950775146, "learning_rate": 7.308572273823182e-05, "loss": 2.358535385131836, "memory(GiB)": 72.85, "step": 40540, "token_acc": 0.5141955835962145, "train_speed(iter/s)": 0.671459 }, { "epoch": 1.7370721048798252, "grad_norm": 3.8065006732940674, "learning_rate": 7.307975302788128e-05, "loss": 2.449640464782715, "memory(GiB)": 72.85, "step": 40545, "token_acc": 0.4618181818181818, "train_speed(iter/s)": 0.671446 }, { "epoch": 1.737286320209074, "grad_norm": 4.2681145668029785, "learning_rate": 7.307378289941938e-05, "loss": 2.6020929336547853, "memory(GiB)": 72.85, "step": 40550, "token_acc": 0.44545454545454544, "train_speed(iter/s)": 0.671425 }, { "epoch": 1.7375005355383233, "grad_norm": 4.983099460601807, "learning_rate": 7.306781235295423e-05, "loss": 2.44521484375, "memory(GiB)": 72.85, "step": 40555, "token_acc": 0.47686832740213525, "train_speed(iter/s)": 0.671429 }, { "epoch": 1.737714750867572, "grad_norm": 4.544744968414307, "learning_rate": 7.306184138859405e-05, "loss": 2.3460922241210938, "memory(GiB)": 72.85, "step": 40560, "token_acc": 0.5129151291512916, "train_speed(iter/s)": 0.671435 }, { "epoch": 1.737928966196821, "grad_norm": 4.24368953704834, "learning_rate": 7.305587000644696e-05, "loss": 2.3576261520385744, "memory(GiB)": 72.85, "step": 40565, "token_acc": 0.5056179775280899, "train_speed(iter/s)": 0.671437 }, { "epoch": 1.7381431815260702, "grad_norm": 4.7146196365356445, "learning_rate": 7.304989820662115e-05, "loss": 2.587753677368164, "memory(GiB)": 72.85, "step": 40570, "token_acc": 0.48220064724919093, "train_speed(iter/s)": 0.671438 }, { "epoch": 1.738357396855319, "grad_norm": 4.163466453552246, "learning_rate": 7.304392598922483e-05, "loss": 2.4195009231567384, "memory(GiB)": 72.85, "step": 40575, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.671443 }, { "epoch": 1.7385716121845678, "grad_norm": 4.7824625968933105, "learning_rate": 7.303795335436617e-05, "loss": 2.362600326538086, "memory(GiB)": 72.85, "step": 40580, "token_acc": 0.5055762081784386, "train_speed(iter/s)": 0.671456 }, { "epoch": 1.738785827513817, "grad_norm": 4.933558940887451, "learning_rate": 7.303198030215336e-05, "loss": 2.3619525909423826, "memory(GiB)": 72.85, "step": 40585, "token_acc": 0.4810126582278481, "train_speed(iter/s)": 0.671456 }, { "epoch": 1.7390000428430659, "grad_norm": 4.495782852172852, "learning_rate": 7.302600683269464e-05, "loss": 2.2227249145507812, "memory(GiB)": 72.85, "step": 40590, "token_acc": 0.4981949458483754, "train_speed(iter/s)": 0.671469 }, { "epoch": 1.7392142581723147, "grad_norm": 4.667151927947998, "learning_rate": 7.302003294609819e-05, "loss": 2.7639337539672852, "memory(GiB)": 72.85, "step": 40595, "token_acc": 0.43333333333333335, "train_speed(iter/s)": 0.671472 }, { "epoch": 1.739428473501564, "grad_norm": 4.526971340179443, "learning_rate": 7.301405864247226e-05, "loss": 2.3258636474609373, "memory(GiB)": 72.85, "step": 40600, "token_acc": 0.46357615894039733, "train_speed(iter/s)": 0.671474 }, { "epoch": 1.7396426888308127, "grad_norm": 4.006394386291504, "learning_rate": 7.300808392192508e-05, "loss": 2.498992156982422, "memory(GiB)": 72.85, "step": 40605, "token_acc": 0.47157190635451507, "train_speed(iter/s)": 0.671475 }, { "epoch": 1.7398569041600616, "grad_norm": 3.487558603286743, "learning_rate": 7.300210878456484e-05, "loss": 2.1983333587646485, "memory(GiB)": 72.85, "step": 40610, "token_acc": 0.5285714285714286, "train_speed(iter/s)": 0.67148 }, { "epoch": 1.7400711194893108, "grad_norm": 4.971522331237793, "learning_rate": 7.299613323049985e-05, "loss": 2.283761978149414, "memory(GiB)": 72.85, "step": 40615, "token_acc": 0.5403726708074534, "train_speed(iter/s)": 0.671466 }, { "epoch": 1.7402853348185596, "grad_norm": 3.6810250282287598, "learning_rate": 7.299015725983833e-05, "loss": 2.193142318725586, "memory(GiB)": 72.85, "step": 40620, "token_acc": 0.5287356321839081, "train_speed(iter/s)": 0.671456 }, { "epoch": 1.7404995501478084, "grad_norm": 4.914768695831299, "learning_rate": 7.298418087268854e-05, "loss": 2.289683151245117, "memory(GiB)": 72.85, "step": 40625, "token_acc": 0.49469964664310956, "train_speed(iter/s)": 0.671431 }, { "epoch": 1.7407137654770577, "grad_norm": 6.027994632720947, "learning_rate": 7.297820406915875e-05, "loss": 2.6294652938842775, "memory(GiB)": 72.85, "step": 40630, "token_acc": 0.517799352750809, "train_speed(iter/s)": 0.671447 }, { "epoch": 1.7409279808063065, "grad_norm": 3.84521746635437, "learning_rate": 7.297222684935726e-05, "loss": 2.354930877685547, "memory(GiB)": 72.85, "step": 40635, "token_acc": 0.5087719298245614, "train_speed(iter/s)": 0.671467 }, { "epoch": 1.7411421961355553, "grad_norm": 3.6608071327209473, "learning_rate": 7.29662492133923e-05, "loss": 2.2382091522216796, "memory(GiB)": 72.85, "step": 40640, "token_acc": 0.4690909090909091, "train_speed(iter/s)": 0.671468 }, { "epoch": 1.7413564114648046, "grad_norm": 5.419205188751221, "learning_rate": 7.296027116137221e-05, "loss": 2.457286071777344, "memory(GiB)": 72.85, "step": 40645, "token_acc": 0.4716312056737589, "train_speed(iter/s)": 0.671462 }, { "epoch": 1.7415706267940534, "grad_norm": 3.775301218032837, "learning_rate": 7.295429269340526e-05, "loss": 2.278335189819336, "memory(GiB)": 72.85, "step": 40650, "token_acc": 0.5028409090909091, "train_speed(iter/s)": 0.671456 }, { "epoch": 1.7417848421233022, "grad_norm": 6.0247721672058105, "learning_rate": 7.294831380959977e-05, "loss": 2.4800268173217774, "memory(GiB)": 72.85, "step": 40655, "token_acc": 0.4524714828897338, "train_speed(iter/s)": 0.671462 }, { "epoch": 1.7419990574525515, "grad_norm": 3.5607779026031494, "learning_rate": 7.294233451006404e-05, "loss": 2.427467155456543, "memory(GiB)": 72.85, "step": 40660, "token_acc": 0.462406015037594, "train_speed(iter/s)": 0.671455 }, { "epoch": 1.7422132727818003, "grad_norm": 4.489529132843018, "learning_rate": 7.293635479490642e-05, "loss": 1.9979068756103515, "memory(GiB)": 72.85, "step": 40665, "token_acc": 0.524, "train_speed(iter/s)": 0.671452 }, { "epoch": 1.742427488111049, "grad_norm": 5.1258158683776855, "learning_rate": 7.293037466423519e-05, "loss": 2.582272529602051, "memory(GiB)": 72.85, "step": 40670, "token_acc": 0.4554794520547945, "train_speed(iter/s)": 0.671446 }, { "epoch": 1.7426417034402983, "grad_norm": 5.074355125427246, "learning_rate": 7.292439411815872e-05, "loss": 2.4545759201049804, "memory(GiB)": 72.85, "step": 40675, "token_acc": 0.525691699604743, "train_speed(iter/s)": 0.671458 }, { "epoch": 1.7428559187695472, "grad_norm": 5.018807888031006, "learning_rate": 7.291841315678535e-05, "loss": 2.452415657043457, "memory(GiB)": 72.85, "step": 40680, "token_acc": 0.5034013605442177, "train_speed(iter/s)": 0.671465 }, { "epoch": 1.743070134098796, "grad_norm": 4.39376974105835, "learning_rate": 7.291243178022343e-05, "loss": 2.4129703521728514, "memory(GiB)": 72.85, "step": 40685, "token_acc": 0.46757679180887374, "train_speed(iter/s)": 0.67147 }, { "epoch": 1.7432843494280452, "grad_norm": 4.402719497680664, "learning_rate": 7.290644998858132e-05, "loss": 2.5407657623291016, "memory(GiB)": 72.85, "step": 40690, "token_acc": 0.5, "train_speed(iter/s)": 0.671461 }, { "epoch": 1.743498564757294, "grad_norm": 3.963414192199707, "learning_rate": 7.290046778196735e-05, "loss": 2.1954296112060545, "memory(GiB)": 72.85, "step": 40695, "token_acc": 0.5498392282958199, "train_speed(iter/s)": 0.671462 }, { "epoch": 1.7437127800865428, "grad_norm": 3.374340534210205, "learning_rate": 7.289448516048996e-05, "loss": 2.4791872024536135, "memory(GiB)": 72.85, "step": 40700, "token_acc": 0.46355685131195334, "train_speed(iter/s)": 0.671449 }, { "epoch": 1.743926995415792, "grad_norm": 4.626251220703125, "learning_rate": 7.288850212425748e-05, "loss": 2.355815124511719, "memory(GiB)": 72.85, "step": 40705, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.671493 }, { "epoch": 1.744141210745041, "grad_norm": 3.8893446922302246, "learning_rate": 7.28825186733783e-05, "loss": 2.209175872802734, "memory(GiB)": 72.85, "step": 40710, "token_acc": 0.4896755162241888, "train_speed(iter/s)": 0.671502 }, { "epoch": 1.7443554260742897, "grad_norm": 4.448317050933838, "learning_rate": 7.287653480796085e-05, "loss": 2.3018123626708986, "memory(GiB)": 72.85, "step": 40715, "token_acc": 0.5291970802919708, "train_speed(iter/s)": 0.671508 }, { "epoch": 1.744569641403539, "grad_norm": 3.5471351146698, "learning_rate": 7.28705505281135e-05, "loss": 2.533207893371582, "memory(GiB)": 72.85, "step": 40720, "token_acc": 0.4878048780487805, "train_speed(iter/s)": 0.671489 }, { "epoch": 1.7447838567327878, "grad_norm": 4.460064888000488, "learning_rate": 7.286456583394468e-05, "loss": 2.4490711212158205, "memory(GiB)": 72.85, "step": 40725, "token_acc": 0.497737556561086, "train_speed(iter/s)": 0.671494 }, { "epoch": 1.7449980720620366, "grad_norm": 4.379994869232178, "learning_rate": 7.28585807255628e-05, "loss": 2.654378318786621, "memory(GiB)": 72.85, "step": 40730, "token_acc": 0.4749262536873156, "train_speed(iter/s)": 0.671498 }, { "epoch": 1.7452122873912859, "grad_norm": 3.6112875938415527, "learning_rate": 7.285259520307629e-05, "loss": 2.3186056137084963, "memory(GiB)": 72.85, "step": 40735, "token_acc": 0.49415204678362573, "train_speed(iter/s)": 0.671458 }, { "epoch": 1.7454265027205347, "grad_norm": 3.75571870803833, "learning_rate": 7.284660926659357e-05, "loss": 2.572907066345215, "memory(GiB)": 72.85, "step": 40740, "token_acc": 0.4925925925925926, "train_speed(iter/s)": 0.67148 }, { "epoch": 1.7456407180497835, "grad_norm": 3.9114420413970947, "learning_rate": 7.28406229162231e-05, "loss": 2.6514123916625976, "memory(GiB)": 72.85, "step": 40745, "token_acc": 0.4109947643979058, "train_speed(iter/s)": 0.671442 }, { "epoch": 1.7458549333790327, "grad_norm": 4.744034290313721, "learning_rate": 7.283463615207333e-05, "loss": 2.6632179260253905, "memory(GiB)": 72.85, "step": 40750, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.671435 }, { "epoch": 1.7460691487082816, "grad_norm": 3.397425413131714, "learning_rate": 7.282864897425268e-05, "loss": 1.9821353912353517, "memory(GiB)": 72.85, "step": 40755, "token_acc": 0.5161290322580645, "train_speed(iter/s)": 0.671439 }, { "epoch": 1.7462833640375304, "grad_norm": 4.506324768066406, "learning_rate": 7.282266138286966e-05, "loss": 2.3771114349365234, "memory(GiB)": 72.85, "step": 40760, "token_acc": 0.5073529411764706, "train_speed(iter/s)": 0.671453 }, { "epoch": 1.7464975793667796, "grad_norm": 3.3773882389068604, "learning_rate": 7.281667337803271e-05, "loss": 2.132434844970703, "memory(GiB)": 72.85, "step": 40765, "token_acc": 0.4923547400611621, "train_speed(iter/s)": 0.671437 }, { "epoch": 1.7467117946960284, "grad_norm": 3.83510422706604, "learning_rate": 7.28106849598503e-05, "loss": 2.2696792602539064, "memory(GiB)": 72.85, "step": 40770, "token_acc": 0.5301204819277109, "train_speed(iter/s)": 0.671441 }, { "epoch": 1.7469260100252773, "grad_norm": 3.996152400970459, "learning_rate": 7.280469612843097e-05, "loss": 2.2821329116821287, "memory(GiB)": 72.85, "step": 40775, "token_acc": 0.4968944099378882, "train_speed(iter/s)": 0.671429 }, { "epoch": 1.7471402253545265, "grad_norm": 5.084626197814941, "learning_rate": 7.279870688388316e-05, "loss": 2.53157901763916, "memory(GiB)": 72.85, "step": 40780, "token_acc": 0.45555555555555555, "train_speed(iter/s)": 0.671414 }, { "epoch": 1.7473544406837753, "grad_norm": 3.7948291301727295, "learning_rate": 7.279271722631538e-05, "loss": 2.4724958419799803, "memory(GiB)": 72.85, "step": 40785, "token_acc": 0.4368231046931408, "train_speed(iter/s)": 0.671418 }, { "epoch": 1.7475686560130241, "grad_norm": 3.9811389446258545, "learning_rate": 7.278672715583616e-05, "loss": 2.3460182189941405, "memory(GiB)": 72.85, "step": 40790, "token_acc": 0.474025974025974, "train_speed(iter/s)": 0.671418 }, { "epoch": 1.7477828713422734, "grad_norm": 4.1911821365356445, "learning_rate": 7.278073667255399e-05, "loss": 2.4456953048706054, "memory(GiB)": 72.85, "step": 40795, "token_acc": 0.48366013071895425, "train_speed(iter/s)": 0.671411 }, { "epoch": 1.7479970866715222, "grad_norm": 6.034060478210449, "learning_rate": 7.277474577657741e-05, "loss": 2.317539596557617, "memory(GiB)": 72.85, "step": 40800, "token_acc": 0.5019157088122606, "train_speed(iter/s)": 0.671433 }, { "epoch": 1.748211302000771, "grad_norm": 4.320111274719238, "learning_rate": 7.276875446801494e-05, "loss": 2.4476692199707033, "memory(GiB)": 72.85, "step": 40805, "token_acc": 0.4858757062146893, "train_speed(iter/s)": 0.671443 }, { "epoch": 1.7484255173300203, "grad_norm": 4.797301292419434, "learning_rate": 7.276276274697514e-05, "loss": 2.2859119415283202, "memory(GiB)": 72.85, "step": 40810, "token_acc": 0.5071428571428571, "train_speed(iter/s)": 0.671451 }, { "epoch": 1.748639732659269, "grad_norm": 5.468875408172607, "learning_rate": 7.275677061356652e-05, "loss": 2.1980941772460936, "memory(GiB)": 72.85, "step": 40815, "token_acc": 0.48220064724919093, "train_speed(iter/s)": 0.671446 }, { "epoch": 1.748853947988518, "grad_norm": 3.7394802570343018, "learning_rate": 7.275077806789767e-05, "loss": 2.2404335021972654, "memory(GiB)": 72.85, "step": 40820, "token_acc": 0.5092250922509225, "train_speed(iter/s)": 0.671463 }, { "epoch": 1.7490681633177672, "grad_norm": 4.92283296585083, "learning_rate": 7.274478511007712e-05, "loss": 2.045248031616211, "memory(GiB)": 72.85, "step": 40825, "token_acc": 0.5774058577405857, "train_speed(iter/s)": 0.671456 }, { "epoch": 1.749282378647016, "grad_norm": 4.867828845977783, "learning_rate": 7.273879174021348e-05, "loss": 2.205825424194336, "memory(GiB)": 72.85, "step": 40830, "token_acc": 0.5120481927710844, "train_speed(iter/s)": 0.67146 }, { "epoch": 1.7494965939762648, "grad_norm": 4.893065452575684, "learning_rate": 7.273279795841527e-05, "loss": 2.553483581542969, "memory(GiB)": 72.85, "step": 40835, "token_acc": 0.525096525096525, "train_speed(iter/s)": 0.67145 }, { "epoch": 1.749710809305514, "grad_norm": 5.312457084655762, "learning_rate": 7.27268037647911e-05, "loss": 2.5357866287231445, "memory(GiB)": 72.85, "step": 40840, "token_acc": 0.4548736462093863, "train_speed(iter/s)": 0.67146 }, { "epoch": 1.7499250246347628, "grad_norm": 3.339338779449463, "learning_rate": 7.272080915944956e-05, "loss": 2.238106346130371, "memory(GiB)": 72.85, "step": 40845, "token_acc": 0.5287769784172662, "train_speed(iter/s)": 0.671454 }, { "epoch": 1.7501392399640117, "grad_norm": 4.901723861694336, "learning_rate": 7.271481414249925e-05, "loss": 2.513153839111328, "memory(GiB)": 72.85, "step": 40850, "token_acc": 0.49612403100775193, "train_speed(iter/s)": 0.671449 }, { "epoch": 1.750353455293261, "grad_norm": 5.587104797363281, "learning_rate": 7.270881871404878e-05, "loss": 2.175125503540039, "memory(GiB)": 72.85, "step": 40855, "token_acc": 0.5404411764705882, "train_speed(iter/s)": 0.671424 }, { "epoch": 1.7505676706225097, "grad_norm": 6.397116184234619, "learning_rate": 7.270282287420676e-05, "loss": 2.3290246963500976, "memory(GiB)": 72.85, "step": 40860, "token_acc": 0.5287356321839081, "train_speed(iter/s)": 0.671428 }, { "epoch": 1.7507818859517585, "grad_norm": 2.9906234741210938, "learning_rate": 7.26968266230818e-05, "loss": 2.447323226928711, "memory(GiB)": 72.85, "step": 40865, "token_acc": 0.496551724137931, "train_speed(iter/s)": 0.671425 }, { "epoch": 1.7509961012810078, "grad_norm": 3.5177597999572754, "learning_rate": 7.269082996078252e-05, "loss": 2.229076957702637, "memory(GiB)": 72.85, "step": 40870, "token_acc": 0.5244299674267101, "train_speed(iter/s)": 0.671427 }, { "epoch": 1.7512103166102566, "grad_norm": 5.065359115600586, "learning_rate": 7.26848328874176e-05, "loss": 2.5110549926757812, "memory(GiB)": 72.85, "step": 40875, "token_acc": 0.4837662337662338, "train_speed(iter/s)": 0.671434 }, { "epoch": 1.7514245319395054, "grad_norm": 3.360151529312134, "learning_rate": 7.267883540309563e-05, "loss": 2.178798866271973, "memory(GiB)": 72.85, "step": 40880, "token_acc": 0.4966887417218543, "train_speed(iter/s)": 0.671429 }, { "epoch": 1.7516387472687547, "grad_norm": 5.770689487457275, "learning_rate": 7.267283750792529e-05, "loss": 2.533579635620117, "memory(GiB)": 72.85, "step": 40885, "token_acc": 0.48, "train_speed(iter/s)": 0.671431 }, { "epoch": 1.7518529625980035, "grad_norm": 4.255074977874756, "learning_rate": 7.266683920201522e-05, "loss": 2.6046989440917967, "memory(GiB)": 72.85, "step": 40890, "token_acc": 0.45722713864306785, "train_speed(iter/s)": 0.671429 }, { "epoch": 1.7520671779272525, "grad_norm": 4.552187919616699, "learning_rate": 7.266084048547412e-05, "loss": 2.3211511611938476, "memory(GiB)": 72.85, "step": 40895, "token_acc": 0.4749034749034749, "train_speed(iter/s)": 0.671434 }, { "epoch": 1.7522813932565016, "grad_norm": 4.225492477416992, "learning_rate": 7.265484135841061e-05, "loss": 2.1611469268798826, "memory(GiB)": 72.85, "step": 40900, "token_acc": 0.5035971223021583, "train_speed(iter/s)": 0.671427 }, { "epoch": 1.7524956085857504, "grad_norm": 3.7703070640563965, "learning_rate": 7.264884182093341e-05, "loss": 2.1606712341308594, "memory(GiB)": 72.85, "step": 40905, "token_acc": 0.5160256410256411, "train_speed(iter/s)": 0.671457 }, { "epoch": 1.7527098239149994, "grad_norm": 5.082446575164795, "learning_rate": 7.264284187315117e-05, "loss": 2.618006134033203, "memory(GiB)": 72.85, "step": 40910, "token_acc": 0.4492753623188406, "train_speed(iter/s)": 0.671463 }, { "epoch": 1.7529240392442484, "grad_norm": 4.480302810668945, "learning_rate": 7.263684151517264e-05, "loss": 2.2866058349609375, "memory(GiB)": 72.85, "step": 40915, "token_acc": 0.48344370860927155, "train_speed(iter/s)": 0.671475 }, { "epoch": 1.7531382545734973, "grad_norm": 5.688897132873535, "learning_rate": 7.263084074710647e-05, "loss": 2.370193290710449, "memory(GiB)": 72.85, "step": 40920, "token_acc": 0.48507462686567165, "train_speed(iter/s)": 0.67147 }, { "epoch": 1.7533524699027463, "grad_norm": 4.103311538696289, "learning_rate": 7.262483956906141e-05, "loss": 2.09518985748291, "memory(GiB)": 72.85, "step": 40925, "token_acc": 0.5473684210526316, "train_speed(iter/s)": 0.671501 }, { "epoch": 1.7535666852319953, "grad_norm": 3.570913314819336, "learning_rate": 7.261883798114615e-05, "loss": 2.2685258865356444, "memory(GiB)": 72.85, "step": 40930, "token_acc": 0.5285714285714286, "train_speed(iter/s)": 0.671505 }, { "epoch": 1.7537809005612441, "grad_norm": 3.758249521255493, "learning_rate": 7.261283598346942e-05, "loss": 2.6814247131347657, "memory(GiB)": 72.85, "step": 40935, "token_acc": 0.44376899696048633, "train_speed(iter/s)": 0.671513 }, { "epoch": 1.7539951158904932, "grad_norm": 3.7408621311187744, "learning_rate": 7.260683357613996e-05, "loss": 2.4735206604003905, "memory(GiB)": 72.85, "step": 40940, "token_acc": 0.49074074074074076, "train_speed(iter/s)": 0.671511 }, { "epoch": 1.7542093312197422, "grad_norm": 3.507253408432007, "learning_rate": 7.260083075926649e-05, "loss": 2.122234916687012, "memory(GiB)": 72.85, "step": 40945, "token_acc": 0.540453074433657, "train_speed(iter/s)": 0.671522 }, { "epoch": 1.754423546548991, "grad_norm": 4.058714389801025, "learning_rate": 7.259482753295777e-05, "loss": 2.402284622192383, "memory(GiB)": 72.85, "step": 40950, "token_acc": 0.4662379421221865, "train_speed(iter/s)": 0.671531 }, { "epoch": 1.75463776187824, "grad_norm": 4.599761009216309, "learning_rate": 7.258882389732256e-05, "loss": 2.2876422882080076, "memory(GiB)": 72.85, "step": 40955, "token_acc": 0.495114006514658, "train_speed(iter/s)": 0.671511 }, { "epoch": 1.754851977207489, "grad_norm": 3.579223871231079, "learning_rate": 7.258281985246961e-05, "loss": 2.4701061248779297, "memory(GiB)": 72.85, "step": 40960, "token_acc": 0.47878787878787876, "train_speed(iter/s)": 0.671507 }, { "epoch": 1.755066192536738, "grad_norm": 3.3834872245788574, "learning_rate": 7.25768153985077e-05, "loss": 2.2182132720947267, "memory(GiB)": 72.85, "step": 40965, "token_acc": 0.5296296296296297, "train_speed(iter/s)": 0.671475 }, { "epoch": 1.755280407865987, "grad_norm": 4.596184253692627, "learning_rate": 7.25708105355456e-05, "loss": 2.559766387939453, "memory(GiB)": 72.85, "step": 40970, "token_acc": 0.4602272727272727, "train_speed(iter/s)": 0.671474 }, { "epoch": 1.755494623195236, "grad_norm": 5.875462055206299, "learning_rate": 7.256480526369209e-05, "loss": 2.641679573059082, "memory(GiB)": 72.85, "step": 40975, "token_acc": 0.4358974358974359, "train_speed(iter/s)": 0.67149 }, { "epoch": 1.7557088385244848, "grad_norm": 4.43356990814209, "learning_rate": 7.255879958305598e-05, "loss": 2.6517677307128906, "memory(GiB)": 72.85, "step": 40980, "token_acc": 0.4858156028368794, "train_speed(iter/s)": 0.671495 }, { "epoch": 1.7559230538537338, "grad_norm": 4.579339504241943, "learning_rate": 7.255279349374603e-05, "loss": 2.175335693359375, "memory(GiB)": 72.85, "step": 40985, "token_acc": 0.48757763975155277, "train_speed(iter/s)": 0.67147 }, { "epoch": 1.7561372691829829, "grad_norm": 3.8154075145721436, "learning_rate": 7.254678699587108e-05, "loss": 2.0993721008300783, "memory(GiB)": 72.85, "step": 40990, "token_acc": 0.5528455284552846, "train_speed(iter/s)": 0.671485 }, { "epoch": 1.7563514845122317, "grad_norm": 4.959954738616943, "learning_rate": 7.254078008953994e-05, "loss": 2.1402441024780274, "memory(GiB)": 72.85, "step": 40995, "token_acc": 0.544, "train_speed(iter/s)": 0.671508 }, { "epoch": 1.7565656998414807, "grad_norm": 5.0523834228515625, "learning_rate": 7.25347727748614e-05, "loss": 2.0024017333984374, "memory(GiB)": 72.85, "step": 41000, "token_acc": 0.5143884892086331, "train_speed(iter/s)": 0.671509 }, { "epoch": 1.7565656998414807, "eval_loss": 2.210456132888794, "eval_runtime": 15.4231, "eval_samples_per_second": 6.484, "eval_steps_per_second": 6.484, "eval_token_acc": 0.4932795698924731, "step": 41000 }, { "epoch": 1.7567799151707297, "grad_norm": 5.773283004760742, "learning_rate": 7.252876505194434e-05, "loss": 2.1338151931762694, "memory(GiB)": 72.85, "step": 41005, "token_acc": 0.5019762845849802, "train_speed(iter/s)": 0.671271 }, { "epoch": 1.7569941304999785, "grad_norm": 4.844553470611572, "learning_rate": 7.252275692089756e-05, "loss": 2.3866615295410156, "memory(GiB)": 72.85, "step": 41010, "token_acc": 0.4657534246575342, "train_speed(iter/s)": 0.671274 }, { "epoch": 1.7572083458292276, "grad_norm": 6.407010555267334, "learning_rate": 7.251674838182992e-05, "loss": 2.3491884231567384, "memory(GiB)": 72.85, "step": 41015, "token_acc": 0.4859437751004016, "train_speed(iter/s)": 0.671268 }, { "epoch": 1.7574225611584766, "grad_norm": 4.797567844390869, "learning_rate": 7.251073943485025e-05, "loss": 2.3525997161865235, "memory(GiB)": 72.85, "step": 41020, "token_acc": 0.4863013698630137, "train_speed(iter/s)": 0.671284 }, { "epoch": 1.7576367764877254, "grad_norm": 3.6222083568573, "learning_rate": 7.25047300800674e-05, "loss": 2.5898147583007813, "memory(GiB)": 72.85, "step": 41025, "token_acc": 0.5014005602240896, "train_speed(iter/s)": 0.671265 }, { "epoch": 1.7578509918169745, "grad_norm": 5.496143817901611, "learning_rate": 7.249872031759027e-05, "loss": 2.43270263671875, "memory(GiB)": 72.85, "step": 41030, "token_acc": 0.4953560371517028, "train_speed(iter/s)": 0.671257 }, { "epoch": 1.7580652071462235, "grad_norm": 4.546595096588135, "learning_rate": 7.249271014752772e-05, "loss": 2.334503173828125, "memory(GiB)": 72.85, "step": 41035, "token_acc": 0.48398576512455516, "train_speed(iter/s)": 0.671259 }, { "epoch": 1.7582794224754723, "grad_norm": 3.8770697116851807, "learning_rate": 7.248669956998862e-05, "loss": 2.4928218841552736, "memory(GiB)": 72.85, "step": 41040, "token_acc": 0.4859154929577465, "train_speed(iter/s)": 0.671255 }, { "epoch": 1.7584936378047213, "grad_norm": 3.546452522277832, "learning_rate": 7.248068858508185e-05, "loss": 2.4839954376220703, "memory(GiB)": 72.85, "step": 41045, "token_acc": 0.4836795252225519, "train_speed(iter/s)": 0.671261 }, { "epoch": 1.7587078531339704, "grad_norm": 5.8802008628845215, "learning_rate": 7.247467719291634e-05, "loss": 2.0547306060791017, "memory(GiB)": 72.85, "step": 41050, "token_acc": 0.5292207792207793, "train_speed(iter/s)": 0.671257 }, { "epoch": 1.7589220684632192, "grad_norm": 5.036243438720703, "learning_rate": 7.246866539360096e-05, "loss": 2.351179504394531, "memory(GiB)": 72.85, "step": 41055, "token_acc": 0.5017543859649123, "train_speed(iter/s)": 0.671255 }, { "epoch": 1.7591362837924682, "grad_norm": 5.186659812927246, "learning_rate": 7.246265318724463e-05, "loss": 2.507577896118164, "memory(GiB)": 72.85, "step": 41060, "token_acc": 0.48355263157894735, "train_speed(iter/s)": 0.67126 }, { "epoch": 1.7593504991217173, "grad_norm": 5.3019795417785645, "learning_rate": 7.245664057395626e-05, "loss": 2.278446006774902, "memory(GiB)": 72.85, "step": 41065, "token_acc": 0.4902597402597403, "train_speed(iter/s)": 0.67127 }, { "epoch": 1.759564714450966, "grad_norm": 4.188981056213379, "learning_rate": 7.24506275538448e-05, "loss": 2.3942998886108398, "memory(GiB)": 72.85, "step": 41070, "token_acc": 0.5296167247386759, "train_speed(iter/s)": 0.671285 }, { "epoch": 1.759778929780215, "grad_norm": 4.115375995635986, "learning_rate": 7.244461412701915e-05, "loss": 2.358140563964844, "memory(GiB)": 72.85, "step": 41075, "token_acc": 0.5136054421768708, "train_speed(iter/s)": 0.67128 }, { "epoch": 1.7599931451094641, "grad_norm": 3.9485366344451904, "learning_rate": 7.243860029358825e-05, "loss": 2.555582809448242, "memory(GiB)": 72.85, "step": 41080, "token_acc": 0.4558303886925795, "train_speed(iter/s)": 0.671291 }, { "epoch": 1.760207360438713, "grad_norm": 4.716385841369629, "learning_rate": 7.243258605366107e-05, "loss": 2.40487003326416, "memory(GiB)": 72.85, "step": 41085, "token_acc": 0.48175182481751827, "train_speed(iter/s)": 0.671292 }, { "epoch": 1.760421575767962, "grad_norm": 3.9251108169555664, "learning_rate": 7.242657140734654e-05, "loss": 2.403152847290039, "memory(GiB)": 72.85, "step": 41090, "token_acc": 0.47875354107648727, "train_speed(iter/s)": 0.67128 }, { "epoch": 1.760635791097211, "grad_norm": 4.07175350189209, "learning_rate": 7.242055635475365e-05, "loss": 2.4931814193725588, "memory(GiB)": 72.85, "step": 41095, "token_acc": 0.5032894736842105, "train_speed(iter/s)": 0.671287 }, { "epoch": 1.7608500064264598, "grad_norm": 5.250166893005371, "learning_rate": 7.241454089599133e-05, "loss": 2.2958913803100587, "memory(GiB)": 72.85, "step": 41100, "token_acc": 0.5077519379844961, "train_speed(iter/s)": 0.671296 }, { "epoch": 1.7610642217557089, "grad_norm": 3.4597482681274414, "learning_rate": 7.240852503116857e-05, "loss": 2.606170082092285, "memory(GiB)": 72.85, "step": 41105, "token_acc": 0.47678018575851394, "train_speed(iter/s)": 0.671301 }, { "epoch": 1.761278437084958, "grad_norm": 4.86911678314209, "learning_rate": 7.240250876039438e-05, "loss": 2.313191223144531, "memory(GiB)": 72.85, "step": 41110, "token_acc": 0.4652014652014652, "train_speed(iter/s)": 0.671295 }, { "epoch": 1.7614926524142067, "grad_norm": 5.163240909576416, "learning_rate": 7.239649208377771e-05, "loss": 2.5054420471191405, "memory(GiB)": 72.85, "step": 41115, "token_acc": 0.4791666666666667, "train_speed(iter/s)": 0.671296 }, { "epoch": 1.7617068677434558, "grad_norm": 4.364309787750244, "learning_rate": 7.23904750014276e-05, "loss": 2.2831293106079102, "memory(GiB)": 72.85, "step": 41120, "token_acc": 0.5306122448979592, "train_speed(iter/s)": 0.671298 }, { "epoch": 1.7619210830727048, "grad_norm": 3.6941287517547607, "learning_rate": 7.2384457513453e-05, "loss": 2.5083547592163087, "memory(GiB)": 72.85, "step": 41125, "token_acc": 0.45808383233532934, "train_speed(iter/s)": 0.671293 }, { "epoch": 1.7621352984019536, "grad_norm": 3.270747661590576, "learning_rate": 7.237843961996298e-05, "loss": 2.3792255401611326, "memory(GiB)": 72.85, "step": 41130, "token_acc": 0.4931506849315068, "train_speed(iter/s)": 0.671293 }, { "epoch": 1.7623495137312026, "grad_norm": 3.2550652027130127, "learning_rate": 7.237242132106651e-05, "loss": 2.2377645492553713, "memory(GiB)": 72.85, "step": 41135, "token_acc": 0.5563636363636364, "train_speed(iter/s)": 0.671276 }, { "epoch": 1.7625637290604517, "grad_norm": 6.736490249633789, "learning_rate": 7.236640261687267e-05, "loss": 2.2984540939331053, "memory(GiB)": 72.85, "step": 41140, "token_acc": 0.4823943661971831, "train_speed(iter/s)": 0.671295 }, { "epoch": 1.7627779443897005, "grad_norm": 3.8397748470306396, "learning_rate": 7.236038350749043e-05, "loss": 2.6445392608642577, "memory(GiB)": 72.85, "step": 41145, "token_acc": 0.4963768115942029, "train_speed(iter/s)": 0.671292 }, { "epoch": 1.7629921597189495, "grad_norm": 4.320956707000732, "learning_rate": 7.235436399302889e-05, "loss": 2.152790641784668, "memory(GiB)": 72.85, "step": 41150, "token_acc": 0.5641891891891891, "train_speed(iter/s)": 0.671286 }, { "epoch": 1.7632063750481985, "grad_norm": 4.272256851196289, "learning_rate": 7.234834407359708e-05, "loss": 2.6362964630126955, "memory(GiB)": 72.85, "step": 41155, "token_acc": 0.49823321554770317, "train_speed(iter/s)": 0.67129 }, { "epoch": 1.7634205903774474, "grad_norm": 4.321959972381592, "learning_rate": 7.234232374930404e-05, "loss": 2.200808525085449, "memory(GiB)": 72.85, "step": 41160, "token_acc": 0.523972602739726, "train_speed(iter/s)": 0.671288 }, { "epoch": 1.7636348057066964, "grad_norm": 4.261555194854736, "learning_rate": 7.233630302025886e-05, "loss": 2.257355308532715, "memory(GiB)": 72.85, "step": 41165, "token_acc": 0.4755244755244755, "train_speed(iter/s)": 0.671258 }, { "epoch": 1.7638490210359454, "grad_norm": 5.943386554718018, "learning_rate": 7.233028188657058e-05, "loss": 2.1460805892944337, "memory(GiB)": 72.85, "step": 41170, "token_acc": 0.5441176470588235, "train_speed(iter/s)": 0.671232 }, { "epoch": 1.7640632363651942, "grad_norm": 5.787703037261963, "learning_rate": 7.232426034834829e-05, "loss": 2.4480525970458986, "memory(GiB)": 72.85, "step": 41175, "token_acc": 0.4861111111111111, "train_speed(iter/s)": 0.671245 }, { "epoch": 1.7642774516944433, "grad_norm": 5.121469497680664, "learning_rate": 7.231823840570111e-05, "loss": 2.5339328765869142, "memory(GiB)": 72.85, "step": 41180, "token_acc": 0.5017543859649123, "train_speed(iter/s)": 0.671248 }, { "epoch": 1.7644916670236923, "grad_norm": 4.358181476593018, "learning_rate": 7.231221605873809e-05, "loss": 2.246508979797363, "memory(GiB)": 72.85, "step": 41185, "token_acc": 0.504950495049505, "train_speed(iter/s)": 0.671264 }, { "epoch": 1.7647058823529411, "grad_norm": 6.523571491241455, "learning_rate": 7.230619330756835e-05, "loss": 2.329562759399414, "memory(GiB)": 72.85, "step": 41190, "token_acc": 0.4867549668874172, "train_speed(iter/s)": 0.671251 }, { "epoch": 1.7649200976821902, "grad_norm": 3.8111236095428467, "learning_rate": 7.230017015230099e-05, "loss": 2.2486316680908205, "memory(GiB)": 72.85, "step": 41195, "token_acc": 0.519298245614035, "train_speed(iter/s)": 0.671239 }, { "epoch": 1.7651343130114392, "grad_norm": 3.5853312015533447, "learning_rate": 7.229414659304513e-05, "loss": 2.2271520614624025, "memory(GiB)": 72.85, "step": 41200, "token_acc": 0.5308988764044944, "train_speed(iter/s)": 0.671234 }, { "epoch": 1.765348528340688, "grad_norm": 3.9383506774902344, "learning_rate": 7.22881226299099e-05, "loss": 2.4014854431152344, "memory(GiB)": 72.85, "step": 41205, "token_acc": 0.4850498338870432, "train_speed(iter/s)": 0.671236 }, { "epoch": 1.765562743669937, "grad_norm": 4.010715007781982, "learning_rate": 7.22820982630044e-05, "loss": 2.13343505859375, "memory(GiB)": 72.85, "step": 41210, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.671243 }, { "epoch": 1.765776958999186, "grad_norm": 4.745704650878906, "learning_rate": 7.22760734924378e-05, "loss": 2.2796680450439455, "memory(GiB)": 72.85, "step": 41215, "token_acc": 0.5016835016835017, "train_speed(iter/s)": 0.671244 }, { "epoch": 1.7659911743284349, "grad_norm": 4.393392086029053, "learning_rate": 7.227004831831923e-05, "loss": 2.4464179992675783, "memory(GiB)": 72.85, "step": 41220, "token_acc": 0.4652014652014652, "train_speed(iter/s)": 0.671253 }, { "epoch": 1.766205389657684, "grad_norm": 4.170444488525391, "learning_rate": 7.226402274075784e-05, "loss": 2.4063957214355467, "memory(GiB)": 72.85, "step": 41225, "token_acc": 0.5033557046979866, "train_speed(iter/s)": 0.671275 }, { "epoch": 1.766419604986933, "grad_norm": 3.9299604892730713, "learning_rate": 7.22579967598628e-05, "loss": 2.3226865768432616, "memory(GiB)": 72.85, "step": 41230, "token_acc": 0.5316901408450704, "train_speed(iter/s)": 0.671271 }, { "epoch": 1.7666338203161818, "grad_norm": 4.188253879547119, "learning_rate": 7.225197037574327e-05, "loss": 2.3720394134521485, "memory(GiB)": 72.85, "step": 41235, "token_acc": 0.4743202416918429, "train_speed(iter/s)": 0.671266 }, { "epoch": 1.7668480356454308, "grad_norm": 4.6446380615234375, "learning_rate": 7.224594358850842e-05, "loss": 2.4434488296508787, "memory(GiB)": 72.85, "step": 41240, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.671261 }, { "epoch": 1.7670622509746798, "grad_norm": 5.14304780960083, "learning_rate": 7.223991639826742e-05, "loss": 2.429954719543457, "memory(GiB)": 72.85, "step": 41245, "token_acc": 0.4823943661971831, "train_speed(iter/s)": 0.671249 }, { "epoch": 1.7672764663039287, "grad_norm": 4.056108474731445, "learning_rate": 7.223388880512948e-05, "loss": 2.2140434265136717, "memory(GiB)": 72.85, "step": 41250, "token_acc": 0.5304054054054054, "train_speed(iter/s)": 0.671238 }, { "epoch": 1.7674906816331777, "grad_norm": 3.968848943710327, "learning_rate": 7.22278608092038e-05, "loss": 2.528463935852051, "memory(GiB)": 72.85, "step": 41255, "token_acc": 0.498371335504886, "train_speed(iter/s)": 0.671249 }, { "epoch": 1.7677048969624267, "grad_norm": 5.054109573364258, "learning_rate": 7.222183241059955e-05, "loss": 2.599019241333008, "memory(GiB)": 72.85, "step": 41260, "token_acc": 0.4697986577181208, "train_speed(iter/s)": 0.671256 }, { "epoch": 1.7679191122916755, "grad_norm": 3.589902400970459, "learning_rate": 7.221580360942598e-05, "loss": 2.2075168609619142, "memory(GiB)": 72.85, "step": 41265, "token_acc": 0.5106382978723404, "train_speed(iter/s)": 0.67127 }, { "epoch": 1.7681333276209246, "grad_norm": 5.149444103240967, "learning_rate": 7.220977440579229e-05, "loss": 2.4060977935791015, "memory(GiB)": 72.85, "step": 41270, "token_acc": 0.5, "train_speed(iter/s)": 0.671287 }, { "epoch": 1.7683475429501736, "grad_norm": 4.734385967254639, "learning_rate": 7.220374479980768e-05, "loss": 2.6094261169433595, "memory(GiB)": 72.85, "step": 41275, "token_acc": 0.45483870967741935, "train_speed(iter/s)": 0.671306 }, { "epoch": 1.7685617582794224, "grad_norm": 4.143499851226807, "learning_rate": 7.219771479158142e-05, "loss": 2.527671051025391, "memory(GiB)": 72.85, "step": 41280, "token_acc": 0.44755244755244755, "train_speed(iter/s)": 0.67131 }, { "epoch": 1.7687759736086714, "grad_norm": 3.5887069702148438, "learning_rate": 7.219168438122273e-05, "loss": 2.249787712097168, "memory(GiB)": 72.85, "step": 41285, "token_acc": 0.46484375, "train_speed(iter/s)": 0.671318 }, { "epoch": 1.7689901889379205, "grad_norm": 7.222774028778076, "learning_rate": 7.218565356884084e-05, "loss": 2.419214630126953, "memory(GiB)": 72.85, "step": 41290, "token_acc": 0.46715328467153283, "train_speed(iter/s)": 0.671328 }, { "epoch": 1.7692044042671693, "grad_norm": 4.748754978179932, "learning_rate": 7.217962235454506e-05, "loss": 2.6138290405273437, "memory(GiB)": 72.85, "step": 41295, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.671326 }, { "epoch": 1.7694186195964183, "grad_norm": 3.689988851547241, "learning_rate": 7.21735907384446e-05, "loss": 2.4726490020751952, "memory(GiB)": 72.85, "step": 41300, "token_acc": 0.45288753799392095, "train_speed(iter/s)": 0.671315 }, { "epoch": 1.7696328349256674, "grad_norm": 4.880086898803711, "learning_rate": 7.216755872064873e-05, "loss": 2.547237777709961, "memory(GiB)": 72.85, "step": 41305, "token_acc": 0.46440677966101696, "train_speed(iter/s)": 0.671302 }, { "epoch": 1.7698470502549162, "grad_norm": 3.741290807723999, "learning_rate": 7.216152630126676e-05, "loss": 2.3238971710205076, "memory(GiB)": 72.85, "step": 41310, "token_acc": 0.5120274914089347, "train_speed(iter/s)": 0.671275 }, { "epoch": 1.7700612655841652, "grad_norm": 4.206756114959717, "learning_rate": 7.215549348040793e-05, "loss": 2.1272605895996093, "memory(GiB)": 72.85, "step": 41315, "token_acc": 0.5415162454873647, "train_speed(iter/s)": 0.671282 }, { "epoch": 1.7702754809134142, "grad_norm": 4.748021602630615, "learning_rate": 7.214946025818157e-05, "loss": 2.1700735092163086, "memory(GiB)": 72.85, "step": 41320, "token_acc": 0.4964788732394366, "train_speed(iter/s)": 0.671299 }, { "epoch": 1.770489696242663, "grad_norm": 4.509149074554443, "learning_rate": 7.214342663469695e-05, "loss": 2.4462635040283205, "memory(GiB)": 72.85, "step": 41325, "token_acc": 0.48739495798319327, "train_speed(iter/s)": 0.67132 }, { "epoch": 1.770703911571912, "grad_norm": 4.724447727203369, "learning_rate": 7.213739261006339e-05, "loss": 2.317825698852539, "memory(GiB)": 72.85, "step": 41330, "token_acc": 0.4972375690607735, "train_speed(iter/s)": 0.671332 }, { "epoch": 1.7709181269011611, "grad_norm": 4.502163887023926, "learning_rate": 7.213135818439018e-05, "loss": 2.459789276123047, "memory(GiB)": 72.85, "step": 41335, "token_acc": 0.4791666666666667, "train_speed(iter/s)": 0.671345 }, { "epoch": 1.77113234223041, "grad_norm": 4.320649147033691, "learning_rate": 7.212532335778668e-05, "loss": 2.169284439086914, "memory(GiB)": 72.85, "step": 41340, "token_acc": 0.5186335403726708, "train_speed(iter/s)": 0.671317 }, { "epoch": 1.771346557559659, "grad_norm": 4.655956268310547, "learning_rate": 7.211928813036217e-05, "loss": 2.5579681396484375, "memory(GiB)": 72.85, "step": 41345, "token_acc": 0.49193548387096775, "train_speed(iter/s)": 0.671325 }, { "epoch": 1.771560772888908, "grad_norm": 5.743666172027588, "learning_rate": 7.211325250222603e-05, "loss": 2.494068145751953, "memory(GiB)": 72.85, "step": 41350, "token_acc": 0.4837758112094395, "train_speed(iter/s)": 0.671317 }, { "epoch": 1.7717749882181568, "grad_norm": 3.5191049575805664, "learning_rate": 7.210721647348757e-05, "loss": 2.1999584197998048, "memory(GiB)": 72.85, "step": 41355, "token_acc": 0.5528455284552846, "train_speed(iter/s)": 0.671319 }, { "epoch": 1.7719892035474059, "grad_norm": 6.504626274108887, "learning_rate": 7.210118004425612e-05, "loss": 2.5450824737548827, "memory(GiB)": 72.85, "step": 41360, "token_acc": 0.49645390070921985, "train_speed(iter/s)": 0.671315 }, { "epoch": 1.772203418876655, "grad_norm": 3.5025315284729004, "learning_rate": 7.209514321464108e-05, "loss": 2.236470031738281, "memory(GiB)": 72.85, "step": 41365, "token_acc": 0.5622895622895623, "train_speed(iter/s)": 0.67129 }, { "epoch": 1.7724176342059037, "grad_norm": 4.2711710929870605, "learning_rate": 7.20891059847518e-05, "loss": 2.613056945800781, "memory(GiB)": 72.85, "step": 41370, "token_acc": 0.44881889763779526, "train_speed(iter/s)": 0.671303 }, { "epoch": 1.7726318495351527, "grad_norm": 3.9086153507232666, "learning_rate": 7.208306835469763e-05, "loss": 2.2179519653320314, "memory(GiB)": 72.85, "step": 41375, "token_acc": 0.4789915966386555, "train_speed(iter/s)": 0.671296 }, { "epoch": 1.7728460648644018, "grad_norm": 5.069774150848389, "learning_rate": 7.207703032458797e-05, "loss": 2.1723377227783205, "memory(GiB)": 72.85, "step": 41380, "token_acc": 0.5689655172413793, "train_speed(iter/s)": 0.671293 }, { "epoch": 1.7730602801936506, "grad_norm": 5.003979206085205, "learning_rate": 7.20709918945322e-05, "loss": 2.618922233581543, "memory(GiB)": 72.85, "step": 41385, "token_acc": 0.4840989399293286, "train_speed(iter/s)": 0.671297 }, { "epoch": 1.7732744955228996, "grad_norm": 5.4513044357299805, "learning_rate": 7.206495306463969e-05, "loss": 2.6588905334472654, "memory(GiB)": 72.85, "step": 41390, "token_acc": 0.48854961832061067, "train_speed(iter/s)": 0.671311 }, { "epoch": 1.7734887108521487, "grad_norm": 3.5024712085723877, "learning_rate": 7.205891383501987e-05, "loss": 2.2079301834106446, "memory(GiB)": 72.85, "step": 41395, "token_acc": 0.48507462686567165, "train_speed(iter/s)": 0.671319 }, { "epoch": 1.7737029261813975, "grad_norm": 5.603788375854492, "learning_rate": 7.205287420578214e-05, "loss": 2.942970848083496, "memory(GiB)": 72.85, "step": 41400, "token_acc": 0.4534412955465587, "train_speed(iter/s)": 0.671323 }, { "epoch": 1.7739171415106465, "grad_norm": 4.3320512771606445, "learning_rate": 7.20468341770359e-05, "loss": 2.2631946563720704, "memory(GiB)": 72.85, "step": 41405, "token_acc": 0.49508196721311476, "train_speed(iter/s)": 0.671296 }, { "epoch": 1.7741313568398955, "grad_norm": 4.5555949211120605, "learning_rate": 7.204079374889057e-05, "loss": 2.447544479370117, "memory(GiB)": 72.85, "step": 41410, "token_acc": 0.487012987012987, "train_speed(iter/s)": 0.671299 }, { "epoch": 1.7743455721691443, "grad_norm": 4.758311748504639, "learning_rate": 7.203475292145559e-05, "loss": 2.772336959838867, "memory(GiB)": 72.85, "step": 41415, "token_acc": 0.47307692307692306, "train_speed(iter/s)": 0.671315 }, { "epoch": 1.7745597874983934, "grad_norm": 3.6281588077545166, "learning_rate": 7.202871169484039e-05, "loss": 2.587263298034668, "memory(GiB)": 72.85, "step": 41420, "token_acc": 0.4405144694533762, "train_speed(iter/s)": 0.671312 }, { "epoch": 1.7747740028276424, "grad_norm": 5.130710124969482, "learning_rate": 7.202267006915441e-05, "loss": 2.4404294967651365, "memory(GiB)": 72.85, "step": 41425, "token_acc": 0.4709897610921502, "train_speed(iter/s)": 0.671323 }, { "epoch": 1.7749882181568912, "grad_norm": 4.983991622924805, "learning_rate": 7.201662804450712e-05, "loss": 2.084789276123047, "memory(GiB)": 72.85, "step": 41430, "token_acc": 0.5646551724137931, "train_speed(iter/s)": 0.671328 }, { "epoch": 1.7752024334861403, "grad_norm": 4.5293049812316895, "learning_rate": 7.201058562100793e-05, "loss": 2.4350385665893555, "memory(GiB)": 72.85, "step": 41435, "token_acc": 0.4612676056338028, "train_speed(iter/s)": 0.671331 }, { "epoch": 1.7754166488153893, "grad_norm": 6.200393199920654, "learning_rate": 7.200454279876637e-05, "loss": 2.264087104797363, "memory(GiB)": 72.85, "step": 41440, "token_acc": 0.49146757679180886, "train_speed(iter/s)": 0.67134 }, { "epoch": 1.775630864144638, "grad_norm": 5.138232707977295, "learning_rate": 7.199849957789186e-05, "loss": 2.4843204498291014, "memory(GiB)": 72.85, "step": 41445, "token_acc": 0.43167701863354035, "train_speed(iter/s)": 0.671343 }, { "epoch": 1.7758450794738871, "grad_norm": 5.715761661529541, "learning_rate": 7.199245595849387e-05, "loss": 2.1665748596191405, "memory(GiB)": 72.85, "step": 41450, "token_acc": 0.5376344086021505, "train_speed(iter/s)": 0.671318 }, { "epoch": 1.7760592948031362, "grad_norm": 4.599054336547852, "learning_rate": 7.198641194068194e-05, "loss": 2.3682254791259765, "memory(GiB)": 72.85, "step": 41455, "token_acc": 0.5016835016835017, "train_speed(iter/s)": 0.671323 }, { "epoch": 1.776273510132385, "grad_norm": 4.192246437072754, "learning_rate": 7.198036752456552e-05, "loss": 2.326839065551758, "memory(GiB)": 72.85, "step": 41460, "token_acc": 0.5106382978723404, "train_speed(iter/s)": 0.671315 }, { "epoch": 1.776487725461634, "grad_norm": 4.09819221496582, "learning_rate": 7.197432271025412e-05, "loss": 2.507324981689453, "memory(GiB)": 72.85, "step": 41465, "token_acc": 0.4734848484848485, "train_speed(iter/s)": 0.671314 }, { "epoch": 1.776701940790883, "grad_norm": 4.629897594451904, "learning_rate": 7.196827749785727e-05, "loss": 2.529690170288086, "memory(GiB)": 72.85, "step": 41470, "token_acc": 0.4108761329305136, "train_speed(iter/s)": 0.671321 }, { "epoch": 1.7769161561201319, "grad_norm": 3.433212995529175, "learning_rate": 7.196223188748445e-05, "loss": 2.1527503967285155, "memory(GiB)": 72.85, "step": 41475, "token_acc": 0.5422535211267606, "train_speed(iter/s)": 0.67134 }, { "epoch": 1.777130371449381, "grad_norm": 4.653863430023193, "learning_rate": 7.195618587924518e-05, "loss": 2.4928815841674803, "memory(GiB)": 72.85, "step": 41480, "token_acc": 0.5120274914089347, "train_speed(iter/s)": 0.671361 }, { "epoch": 1.77734458677863, "grad_norm": 4.364820957183838, "learning_rate": 7.195013947324903e-05, "loss": 2.495803451538086, "memory(GiB)": 72.85, "step": 41485, "token_acc": 0.5067114093959731, "train_speed(iter/s)": 0.671382 }, { "epoch": 1.7775588021078788, "grad_norm": 4.614840507507324, "learning_rate": 7.194409266960551e-05, "loss": 2.4975290298461914, "memory(GiB)": 72.85, "step": 41490, "token_acc": 0.5017543859649123, "train_speed(iter/s)": 0.671373 }, { "epoch": 1.7777730174371278, "grad_norm": 4.5711846351623535, "learning_rate": 7.193804546842417e-05, "loss": 2.333383560180664, "memory(GiB)": 72.85, "step": 41495, "token_acc": 0.4935064935064935, "train_speed(iter/s)": 0.67138 }, { "epoch": 1.7779872327663768, "grad_norm": 3.3480565547943115, "learning_rate": 7.193199786981456e-05, "loss": 2.2692699432373047, "memory(GiB)": 72.85, "step": 41500, "token_acc": 0.5013404825737265, "train_speed(iter/s)": 0.671378 }, { "epoch": 1.7779872327663768, "eval_loss": 2.0822770595550537, "eval_runtime": 15.7173, "eval_samples_per_second": 6.362, "eval_steps_per_second": 6.362, "eval_token_acc": 0.46194926568758343, "step": 41500 }, { "epoch": 1.7782014480956256, "grad_norm": 4.586766242980957, "learning_rate": 7.192594987388621e-05, "loss": 2.2095224380493166, "memory(GiB)": 72.85, "step": 41505, "token_acc": 0.48484848484848486, "train_speed(iter/s)": 0.671179 }, { "epoch": 1.7784156634248747, "grad_norm": 3.9693496227264404, "learning_rate": 7.191990148074872e-05, "loss": 2.4839298248291017, "memory(GiB)": 72.85, "step": 41510, "token_acc": 0.48494983277591974, "train_speed(iter/s)": 0.671149 }, { "epoch": 1.7786298787541237, "grad_norm": 3.6657307147979736, "learning_rate": 7.191385269051166e-05, "loss": 2.3830766677856445, "memory(GiB)": 72.85, "step": 41515, "token_acc": 0.5075187969924813, "train_speed(iter/s)": 0.671161 }, { "epoch": 1.7788440940833725, "grad_norm": 4.0116658210754395, "learning_rate": 7.190780350328459e-05, "loss": 2.512873077392578, "memory(GiB)": 72.85, "step": 41520, "token_acc": 0.4767932489451477, "train_speed(iter/s)": 0.671159 }, { "epoch": 1.7790583094126216, "grad_norm": 5.0362420082092285, "learning_rate": 7.190175391917713e-05, "loss": 2.4876956939697266, "memory(GiB)": 72.85, "step": 41525, "token_acc": 0.45045045045045046, "train_speed(iter/s)": 0.671147 }, { "epoch": 1.7792725247418706, "grad_norm": 3.9304378032684326, "learning_rate": 7.189570393829885e-05, "loss": 2.44302978515625, "memory(GiB)": 72.85, "step": 41530, "token_acc": 0.4795539033457249, "train_speed(iter/s)": 0.671167 }, { "epoch": 1.7794867400711194, "grad_norm": 5.408188343048096, "learning_rate": 7.188965356075932e-05, "loss": 2.4803321838378904, "memory(GiB)": 72.85, "step": 41535, "token_acc": 0.47147147147147145, "train_speed(iter/s)": 0.67118 }, { "epoch": 1.7797009554003684, "grad_norm": 4.802494525909424, "learning_rate": 7.188360278666821e-05, "loss": 2.3436384201049805, "memory(GiB)": 72.85, "step": 41540, "token_acc": 0.45484949832775917, "train_speed(iter/s)": 0.671178 }, { "epoch": 1.7799151707296175, "grad_norm": 4.62080192565918, "learning_rate": 7.18775516161351e-05, "loss": 2.3467809677124025, "memory(GiB)": 72.85, "step": 41545, "token_acc": 0.46946564885496184, "train_speed(iter/s)": 0.67118 }, { "epoch": 1.7801293860588663, "grad_norm": 3.0386877059936523, "learning_rate": 7.187150004926961e-05, "loss": 2.336966323852539, "memory(GiB)": 72.85, "step": 41550, "token_acc": 0.548951048951049, "train_speed(iter/s)": 0.671166 }, { "epoch": 1.7803436013881153, "grad_norm": 4.695474147796631, "learning_rate": 7.18654480861814e-05, "loss": 2.4337203979492186, "memory(GiB)": 72.85, "step": 41555, "token_acc": 0.4983388704318937, "train_speed(iter/s)": 0.67118 }, { "epoch": 1.7805578167173644, "grad_norm": 3.750095844268799, "learning_rate": 7.185939572698008e-05, "loss": 2.5096378326416016, "memory(GiB)": 72.85, "step": 41560, "token_acc": 0.45396825396825397, "train_speed(iter/s)": 0.671165 }, { "epoch": 1.7807720320466132, "grad_norm": 3.4749677181243896, "learning_rate": 7.185334297177529e-05, "loss": 2.075247383117676, "memory(GiB)": 72.85, "step": 41565, "token_acc": 0.5570934256055363, "train_speed(iter/s)": 0.671173 }, { "epoch": 1.7809862473758622, "grad_norm": 4.008700370788574, "learning_rate": 7.18472898206767e-05, "loss": 2.3006027221679686, "memory(GiB)": 72.85, "step": 41570, "token_acc": 0.5379310344827586, "train_speed(iter/s)": 0.671169 }, { "epoch": 1.7812004627051112, "grad_norm": 3.6057682037353516, "learning_rate": 7.184123627379395e-05, "loss": 2.3769901275634764, "memory(GiB)": 72.85, "step": 41575, "token_acc": 0.4925925925925926, "train_speed(iter/s)": 0.671192 }, { "epoch": 1.78141467803436, "grad_norm": 5.3531107902526855, "learning_rate": 7.183518233123673e-05, "loss": 2.3078916549682615, "memory(GiB)": 72.85, "step": 41580, "token_acc": 0.48134328358208955, "train_speed(iter/s)": 0.671183 }, { "epoch": 1.781628893363609, "grad_norm": 4.559267997741699, "learning_rate": 7.18291279931147e-05, "loss": 2.117456817626953, "memory(GiB)": 72.85, "step": 41585, "token_acc": 0.5420560747663551, "train_speed(iter/s)": 0.671193 }, { "epoch": 1.7818431086928581, "grad_norm": 3.9546725749969482, "learning_rate": 7.182307325953755e-05, "loss": 2.39303035736084, "memory(GiB)": 72.85, "step": 41590, "token_acc": 0.5049833887043189, "train_speed(iter/s)": 0.671194 }, { "epoch": 1.782057324022107, "grad_norm": 3.9854767322540283, "learning_rate": 7.181701813061492e-05, "loss": 2.2341455459594726, "memory(GiB)": 72.85, "step": 41595, "token_acc": 0.5186721991701245, "train_speed(iter/s)": 0.671199 }, { "epoch": 1.782271539351356, "grad_norm": 4.742231845855713, "learning_rate": 7.181096260645657e-05, "loss": 2.3791177749633787, "memory(GiB)": 72.85, "step": 41600, "token_acc": 0.4591194968553459, "train_speed(iter/s)": 0.671212 }, { "epoch": 1.782485754680605, "grad_norm": 4.123025894165039, "learning_rate": 7.180490668717215e-05, "loss": 2.400823211669922, "memory(GiB)": 72.85, "step": 41605, "token_acc": 0.4967948717948718, "train_speed(iter/s)": 0.671231 }, { "epoch": 1.7826999700098538, "grad_norm": 3.8360698223114014, "learning_rate": 7.179885037287142e-05, "loss": 2.411499786376953, "memory(GiB)": 72.85, "step": 41610, "token_acc": 0.48264984227129337, "train_speed(iter/s)": 0.671226 }, { "epoch": 1.7829141853391028, "grad_norm": 5.259121417999268, "learning_rate": 7.179279366366405e-05, "loss": 2.3412851333618163, "memory(GiB)": 72.85, "step": 41615, "token_acc": 0.47766323024054985, "train_speed(iter/s)": 0.671217 }, { "epoch": 1.7831284006683519, "grad_norm": 4.243776798248291, "learning_rate": 7.178673655965977e-05, "loss": 2.26294002532959, "memory(GiB)": 72.85, "step": 41620, "token_acc": 0.5434782608695652, "train_speed(iter/s)": 0.671221 }, { "epoch": 1.7833426159976007, "grad_norm": 3.8612780570983887, "learning_rate": 7.178067906096832e-05, "loss": 2.2388689041137697, "memory(GiB)": 72.85, "step": 41625, "token_acc": 0.5648854961832062, "train_speed(iter/s)": 0.671218 }, { "epoch": 1.7835568313268497, "grad_norm": 4.029297351837158, "learning_rate": 7.177462116769945e-05, "loss": 1.968838882446289, "memory(GiB)": 72.85, "step": 41630, "token_acc": 0.5436507936507936, "train_speed(iter/s)": 0.671219 }, { "epoch": 1.7837710466560988, "grad_norm": 5.635096073150635, "learning_rate": 7.176856287996289e-05, "loss": 2.1505199432373048, "memory(GiB)": 72.85, "step": 41635, "token_acc": 0.5353535353535354, "train_speed(iter/s)": 0.671227 }, { "epoch": 1.7839852619853476, "grad_norm": 3.4820661544799805, "learning_rate": 7.176250419786839e-05, "loss": 1.987483024597168, "memory(GiB)": 72.85, "step": 41640, "token_acc": 0.5421245421245421, "train_speed(iter/s)": 0.671227 }, { "epoch": 1.7841994773145966, "grad_norm": 3.9008076190948486, "learning_rate": 7.175644512152572e-05, "loss": 2.2195388793945314, "memory(GiB)": 72.85, "step": 41645, "token_acc": 0.5176056338028169, "train_speed(iter/s)": 0.671229 }, { "epoch": 1.7844136926438456, "grad_norm": 3.671182870864868, "learning_rate": 7.175038565104461e-05, "loss": 2.349492073059082, "memory(GiB)": 72.85, "step": 41650, "token_acc": 0.535483870967742, "train_speed(iter/s)": 0.671235 }, { "epoch": 1.7846279079730945, "grad_norm": 4.073594570159912, "learning_rate": 7.174553779095387e-05, "loss": 2.4411617279052735, "memory(GiB)": 72.85, "step": 41655, "token_acc": 0.5043859649122807, "train_speed(iter/s)": 0.671236 }, { "epoch": 1.7848421233023435, "grad_norm": 4.508917331695557, "learning_rate": 7.173947761130025e-05, "loss": 2.2509634017944338, "memory(GiB)": 72.85, "step": 41660, "token_acc": 0.5269230769230769, "train_speed(iter/s)": 0.671255 }, { "epoch": 1.7850563386315925, "grad_norm": 4.6670308113098145, "learning_rate": 7.173341703781562e-05, "loss": 2.5368621826171873, "memory(GiB)": 72.85, "step": 41665, "token_acc": 0.449685534591195, "train_speed(iter/s)": 0.671272 }, { "epoch": 1.7852705539608413, "grad_norm": 4.336816310882568, "learning_rate": 7.172735607060975e-05, "loss": 2.2110198974609374, "memory(GiB)": 72.85, "step": 41670, "token_acc": 0.5094339622641509, "train_speed(iter/s)": 0.671275 }, { "epoch": 1.7854847692900904, "grad_norm": 4.579561233520508, "learning_rate": 7.172129470979246e-05, "loss": 2.485904312133789, "memory(GiB)": 72.85, "step": 41675, "token_acc": 0.4664429530201342, "train_speed(iter/s)": 0.671266 }, { "epoch": 1.7856989846193394, "grad_norm": 3.7064030170440674, "learning_rate": 7.171523295547352e-05, "loss": 2.127897834777832, "memory(GiB)": 72.85, "step": 41680, "token_acc": 0.5211267605633803, "train_speed(iter/s)": 0.671254 }, { "epoch": 1.7859131999485882, "grad_norm": 4.412271499633789, "learning_rate": 7.170917080776278e-05, "loss": 2.387185478210449, "memory(GiB)": 72.85, "step": 41685, "token_acc": 0.47985347985347987, "train_speed(iter/s)": 0.671262 }, { "epoch": 1.7861274152778372, "grad_norm": 4.057504653930664, "learning_rate": 7.170310826677002e-05, "loss": 2.20389347076416, "memory(GiB)": 72.85, "step": 41690, "token_acc": 0.5168918918918919, "train_speed(iter/s)": 0.671273 }, { "epoch": 1.7863416306070863, "grad_norm": 4.252926826477051, "learning_rate": 7.16970453326051e-05, "loss": 2.1969011306762694, "memory(GiB)": 72.85, "step": 41695, "token_acc": 0.5159010600706714, "train_speed(iter/s)": 0.671279 }, { "epoch": 1.786555845936335, "grad_norm": 3.531733751296997, "learning_rate": 7.169098200537787e-05, "loss": 2.39351863861084, "memory(GiB)": 72.85, "step": 41700, "token_acc": 0.4724137931034483, "train_speed(iter/s)": 0.67127 }, { "epoch": 1.7867700612655841, "grad_norm": 4.478392124176025, "learning_rate": 7.168491828519814e-05, "loss": 2.438063621520996, "memory(GiB)": 72.85, "step": 41705, "token_acc": 0.4766355140186916, "train_speed(iter/s)": 0.671279 }, { "epoch": 1.7869842765948332, "grad_norm": 4.877347469329834, "learning_rate": 7.167885417217577e-05, "loss": 2.4818464279174806, "memory(GiB)": 72.85, "step": 41710, "token_acc": 0.47266881028938906, "train_speed(iter/s)": 0.671287 }, { "epoch": 1.787198491924082, "grad_norm": 4.899033546447754, "learning_rate": 7.167278966642063e-05, "loss": 2.402818298339844, "memory(GiB)": 72.85, "step": 41715, "token_acc": 0.4768211920529801, "train_speed(iter/s)": 0.671292 }, { "epoch": 1.787412707253331, "grad_norm": 5.587950706481934, "learning_rate": 7.166672476804257e-05, "loss": 2.370590591430664, "memory(GiB)": 72.85, "step": 41720, "token_acc": 0.5, "train_speed(iter/s)": 0.671286 }, { "epoch": 1.78762692258258, "grad_norm": 4.7876482009887695, "learning_rate": 7.166065947715146e-05, "loss": 2.4033931732177733, "memory(GiB)": 72.85, "step": 41725, "token_acc": 0.5461254612546126, "train_speed(iter/s)": 0.671297 }, { "epoch": 1.7878411379118289, "grad_norm": 4.675436973571777, "learning_rate": 7.16545937938572e-05, "loss": 2.196686935424805, "memory(GiB)": 72.85, "step": 41730, "token_acc": 0.5035971223021583, "train_speed(iter/s)": 0.671294 }, { "epoch": 1.788055353241078, "grad_norm": 4.641538143157959, "learning_rate": 7.164852771826965e-05, "loss": 2.413774871826172, "memory(GiB)": 72.85, "step": 41735, "token_acc": 0.48214285714285715, "train_speed(iter/s)": 0.671296 }, { "epoch": 1.788269568570327, "grad_norm": 3.6433403491973877, "learning_rate": 7.164246125049872e-05, "loss": 2.472596549987793, "memory(GiB)": 72.85, "step": 41740, "token_acc": 0.5063694267515924, "train_speed(iter/s)": 0.67129 }, { "epoch": 1.7884837838995757, "grad_norm": 3.6328160762786865, "learning_rate": 7.16363943906543e-05, "loss": 2.29967041015625, "memory(GiB)": 72.85, "step": 41745, "token_acc": 0.4900662251655629, "train_speed(iter/s)": 0.671284 }, { "epoch": 1.7886979992288248, "grad_norm": 4.203352451324463, "learning_rate": 7.163032713884628e-05, "loss": 2.636279296875, "memory(GiB)": 72.85, "step": 41750, "token_acc": 0.484472049689441, "train_speed(iter/s)": 0.671288 }, { "epoch": 1.7889122145580738, "grad_norm": 5.776928901672363, "learning_rate": 7.162425949518461e-05, "loss": 2.2642681121826174, "memory(GiB)": 72.85, "step": 41755, "token_acc": 0.5094339622641509, "train_speed(iter/s)": 0.671298 }, { "epoch": 1.7891264298873226, "grad_norm": 4.922156810760498, "learning_rate": 7.161819145977919e-05, "loss": 2.2053462982177736, "memory(GiB)": 72.85, "step": 41760, "token_acc": 0.5092936802973977, "train_speed(iter/s)": 0.671291 }, { "epoch": 1.7893406452165717, "grad_norm": 5.636740207672119, "learning_rate": 7.161212303273997e-05, "loss": 1.967229461669922, "memory(GiB)": 72.85, "step": 41765, "token_acc": 0.5797665369649806, "train_speed(iter/s)": 0.671294 }, { "epoch": 1.7895548605458207, "grad_norm": 4.001282215118408, "learning_rate": 7.160605421417684e-05, "loss": 2.3348663330078123, "memory(GiB)": 72.85, "step": 41770, "token_acc": 0.4880636604774536, "train_speed(iter/s)": 0.671283 }, { "epoch": 1.7897690758750695, "grad_norm": 3.657428741455078, "learning_rate": 7.159998500419978e-05, "loss": 2.371731758117676, "memory(GiB)": 72.85, "step": 41775, "token_acc": 0.5068027210884354, "train_speed(iter/s)": 0.671274 }, { "epoch": 1.7899832912043185, "grad_norm": 3.724599838256836, "learning_rate": 7.159391540291873e-05, "loss": 2.2369968414306642, "memory(GiB)": 72.85, "step": 41780, "token_acc": 0.5207547169811321, "train_speed(iter/s)": 0.671283 }, { "epoch": 1.7901975065335676, "grad_norm": 6.713475704193115, "learning_rate": 7.158784541044364e-05, "loss": 2.282373046875, "memory(GiB)": 72.85, "step": 41785, "token_acc": 0.5616438356164384, "train_speed(iter/s)": 0.671287 }, { "epoch": 1.7904117218628164, "grad_norm": 5.453012943267822, "learning_rate": 7.158177502688448e-05, "loss": 2.4337778091430664, "memory(GiB)": 72.85, "step": 41790, "token_acc": 0.48201438848920863, "train_speed(iter/s)": 0.671303 }, { "epoch": 1.7906259371920654, "grad_norm": 4.906391143798828, "learning_rate": 7.157570425235124e-05, "loss": 2.2966176986694338, "memory(GiB)": 72.85, "step": 41795, "token_acc": 0.5064102564102564, "train_speed(iter/s)": 0.671288 }, { "epoch": 1.7908401525213145, "grad_norm": 4.359907627105713, "learning_rate": 7.156963308695385e-05, "loss": 2.580198287963867, "memory(GiB)": 72.85, "step": 41800, "token_acc": 0.4539249146757679, "train_speed(iter/s)": 0.671292 }, { "epoch": 1.7910543678505633, "grad_norm": 4.323345184326172, "learning_rate": 7.156356153080234e-05, "loss": 2.4531497955322266, "memory(GiB)": 72.85, "step": 41805, "token_acc": 0.46835443037974683, "train_speed(iter/s)": 0.67128 }, { "epoch": 1.7912685831798123, "grad_norm": 4.464927673339844, "learning_rate": 7.155748958400667e-05, "loss": 2.1516515731811525, "memory(GiB)": 72.85, "step": 41810, "token_acc": 0.5503597122302158, "train_speed(iter/s)": 0.6713 }, { "epoch": 1.7914827985090613, "grad_norm": 3.7610082626342773, "learning_rate": 7.155141724667687e-05, "loss": 2.2760526657104494, "memory(GiB)": 72.85, "step": 41815, "token_acc": 0.5097493036211699, "train_speed(iter/s)": 0.671295 }, { "epoch": 1.7916970138383101, "grad_norm": 3.4594786167144775, "learning_rate": 7.154534451892292e-05, "loss": 2.158914566040039, "memory(GiB)": 72.85, "step": 41820, "token_acc": 0.4840764331210191, "train_speed(iter/s)": 0.671309 }, { "epoch": 1.7919112291675592, "grad_norm": 4.105143070220947, "learning_rate": 7.153927140085485e-05, "loss": 2.5667648315429688, "memory(GiB)": 72.85, "step": 41825, "token_acc": 0.4813664596273292, "train_speed(iter/s)": 0.67131 }, { "epoch": 1.7921254444968082, "grad_norm": 4.682702541351318, "learning_rate": 7.153319789258269e-05, "loss": 2.612024116516113, "memory(GiB)": 72.85, "step": 41830, "token_acc": 0.4645390070921986, "train_speed(iter/s)": 0.671326 }, { "epoch": 1.792339659826057, "grad_norm": 4.562289237976074, "learning_rate": 7.152712399421642e-05, "loss": 2.3545536041259765, "memory(GiB)": 72.85, "step": 41835, "token_acc": 0.5177865612648221, "train_speed(iter/s)": 0.671345 }, { "epoch": 1.792553875155306, "grad_norm": 3.4821972846984863, "learning_rate": 7.152104970586611e-05, "loss": 2.523566246032715, "memory(GiB)": 72.85, "step": 41840, "token_acc": 0.4880239520958084, "train_speed(iter/s)": 0.671363 }, { "epoch": 1.792768090484555, "grad_norm": 4.15421199798584, "learning_rate": 7.151497502764179e-05, "loss": 2.2961143493652343, "memory(GiB)": 72.85, "step": 41845, "token_acc": 0.4939759036144578, "train_speed(iter/s)": 0.671353 }, { "epoch": 1.792982305813804, "grad_norm": 5.061278343200684, "learning_rate": 7.150889995965353e-05, "loss": 2.423716354370117, "memory(GiB)": 72.85, "step": 41850, "token_acc": 0.4837758112094395, "train_speed(iter/s)": 0.671366 }, { "epoch": 1.793196521143053, "grad_norm": 5.522870063781738, "learning_rate": 7.150282450201138e-05, "loss": 2.2040876388549804, "memory(GiB)": 72.85, "step": 41855, "token_acc": 0.5325670498084292, "train_speed(iter/s)": 0.671397 }, { "epoch": 1.793410736472302, "grad_norm": 3.7609410285949707, "learning_rate": 7.149674865482537e-05, "loss": 2.4783430099487305, "memory(GiB)": 72.85, "step": 41860, "token_acc": 0.5, "train_speed(iter/s)": 0.671408 }, { "epoch": 1.7936249518015508, "grad_norm": 3.4404563903808594, "learning_rate": 7.14906724182056e-05, "loss": 2.525887298583984, "memory(GiB)": 72.85, "step": 41865, "token_acc": 0.5119453924914675, "train_speed(iter/s)": 0.671419 }, { "epoch": 1.7938391671307998, "grad_norm": 3.396871566772461, "learning_rate": 7.148459579226215e-05, "loss": 2.629226303100586, "memory(GiB)": 72.85, "step": 41870, "token_acc": 0.4697508896797153, "train_speed(iter/s)": 0.671417 }, { "epoch": 1.7940533824600489, "grad_norm": 2.7170841693878174, "learning_rate": 7.147851877710508e-05, "loss": 2.2096220016479493, "memory(GiB)": 72.85, "step": 41875, "token_acc": 0.5074626865671642, "train_speed(iter/s)": 0.671422 }, { "epoch": 1.7942675977892977, "grad_norm": 3.808377742767334, "learning_rate": 7.14724413728445e-05, "loss": 2.436020088195801, "memory(GiB)": 72.85, "step": 41880, "token_acc": 0.4647887323943662, "train_speed(iter/s)": 0.671442 }, { "epoch": 1.7944818131185467, "grad_norm": 3.6432812213897705, "learning_rate": 7.146636357959049e-05, "loss": 2.3743431091308596, "memory(GiB)": 72.85, "step": 41885, "token_acc": 0.4868913857677903, "train_speed(iter/s)": 0.671428 }, { "epoch": 1.7946960284477957, "grad_norm": 3.669119358062744, "learning_rate": 7.146028539745318e-05, "loss": 2.1915266036987306, "memory(GiB)": 72.85, "step": 41890, "token_acc": 0.5031446540880503, "train_speed(iter/s)": 0.671432 }, { "epoch": 1.7949102437770446, "grad_norm": 3.958061456680298, "learning_rate": 7.145420682654265e-05, "loss": 2.2334218978881837, "memory(GiB)": 72.85, "step": 41895, "token_acc": 0.483974358974359, "train_speed(iter/s)": 0.671435 }, { "epoch": 1.7951244591062938, "grad_norm": 4.724113941192627, "learning_rate": 7.144812786696907e-05, "loss": 2.5138973236083983, "memory(GiB)": 72.85, "step": 41900, "token_acc": 0.47297297297297297, "train_speed(iter/s)": 0.671448 }, { "epoch": 1.7953386744355426, "grad_norm": 4.239015102386475, "learning_rate": 7.14420485188425e-05, "loss": 2.3662782669067384, "memory(GiB)": 72.85, "step": 41905, "token_acc": 0.5091575091575091, "train_speed(iter/s)": 0.671444 }, { "epoch": 1.7955528897647914, "grad_norm": 4.23897647857666, "learning_rate": 7.143596878227315e-05, "loss": 2.1433012008666994, "memory(GiB)": 72.85, "step": 41910, "token_acc": 0.518796992481203, "train_speed(iter/s)": 0.67143 }, { "epoch": 1.7957671050940407, "grad_norm": 3.7172868251800537, "learning_rate": 7.142988865737108e-05, "loss": 2.2155046463012695, "memory(GiB)": 72.85, "step": 41915, "token_acc": 0.47794117647058826, "train_speed(iter/s)": 0.671419 }, { "epoch": 1.7959813204232895, "grad_norm": 4.773506164550781, "learning_rate": 7.142380814424648e-05, "loss": 2.2627439498901367, "memory(GiB)": 72.85, "step": 41920, "token_acc": 0.5074626865671642, "train_speed(iter/s)": 0.671425 }, { "epoch": 1.7961955357525383, "grad_norm": 3.878016948699951, "learning_rate": 7.14177272430095e-05, "loss": 2.0757431030273437, "memory(GiB)": 72.85, "step": 41925, "token_acc": 0.5595667870036101, "train_speed(iter/s)": 0.67144 }, { "epoch": 1.7964097510817876, "grad_norm": 3.976418972015381, "learning_rate": 7.14116459537703e-05, "loss": 2.418208694458008, "memory(GiB)": 72.85, "step": 41930, "token_acc": 0.4714285714285714, "train_speed(iter/s)": 0.671459 }, { "epoch": 1.7966239664110364, "grad_norm": 3.2403903007507324, "learning_rate": 7.140556427663904e-05, "loss": 2.8897754669189455, "memory(GiB)": 72.85, "step": 41935, "token_acc": 0.4634146341463415, "train_speed(iter/s)": 0.671475 }, { "epoch": 1.7968381817402852, "grad_norm": 4.4854960441589355, "learning_rate": 7.139948221172592e-05, "loss": 2.4097280502319336, "memory(GiB)": 72.85, "step": 41940, "token_acc": 0.49295774647887325, "train_speed(iter/s)": 0.671473 }, { "epoch": 1.7970523970695345, "grad_norm": 3.9563348293304443, "learning_rate": 7.139339975914111e-05, "loss": 2.5389101028442385, "memory(GiB)": 72.85, "step": 41945, "token_acc": 0.48299319727891155, "train_speed(iter/s)": 0.671483 }, { "epoch": 1.7972666123987833, "grad_norm": 4.2867841720581055, "learning_rate": 7.138731691899478e-05, "loss": 2.5209781646728517, "memory(GiB)": 72.85, "step": 41950, "token_acc": 0.5167785234899329, "train_speed(iter/s)": 0.671487 }, { "epoch": 1.797480827728032, "grad_norm": 5.095965385437012, "learning_rate": 7.138123369139714e-05, "loss": 2.430099296569824, "memory(GiB)": 72.85, "step": 41955, "token_acc": 0.5082508250825083, "train_speed(iter/s)": 0.671482 }, { "epoch": 1.7976950430572813, "grad_norm": 4.359273910522461, "learning_rate": 7.13751500764584e-05, "loss": 2.209629440307617, "memory(GiB)": 72.85, "step": 41960, "token_acc": 0.5331230283911672, "train_speed(iter/s)": 0.671478 }, { "epoch": 1.7979092583865302, "grad_norm": 4.123367786407471, "learning_rate": 7.136906607428875e-05, "loss": 2.60146484375, "memory(GiB)": 72.85, "step": 41965, "token_acc": 0.48059701492537316, "train_speed(iter/s)": 0.671472 }, { "epoch": 1.798123473715779, "grad_norm": 4.370743751525879, "learning_rate": 7.136298168499844e-05, "loss": 2.3397872924804686, "memory(GiB)": 72.85, "step": 41970, "token_acc": 0.49056603773584906, "train_speed(iter/s)": 0.671466 }, { "epoch": 1.7983376890450282, "grad_norm": 4.91053581237793, "learning_rate": 7.135689690869767e-05, "loss": 2.088546371459961, "memory(GiB)": 72.85, "step": 41975, "token_acc": 0.4982698961937716, "train_speed(iter/s)": 0.671473 }, { "epoch": 1.798551904374277, "grad_norm": 4.20283842086792, "learning_rate": 7.13508117454967e-05, "loss": 2.2980127334594727, "memory(GiB)": 72.85, "step": 41980, "token_acc": 0.5136054421768708, "train_speed(iter/s)": 0.67148 }, { "epoch": 1.7987661197035258, "grad_norm": 4.468259334564209, "learning_rate": 7.134472619550572e-05, "loss": 2.3569368362426757, "memory(GiB)": 72.85, "step": 41985, "token_acc": 0.4577922077922078, "train_speed(iter/s)": 0.671487 }, { "epoch": 1.798980335032775, "grad_norm": 5.112709045410156, "learning_rate": 7.1338640258835e-05, "loss": 2.4238426208496096, "memory(GiB)": 72.85, "step": 41990, "token_acc": 0.4738562091503268, "train_speed(iter/s)": 0.67147 }, { "epoch": 1.799194550362024, "grad_norm": 5.3742499351501465, "learning_rate": 7.13325539355948e-05, "loss": 2.15689697265625, "memory(GiB)": 72.85, "step": 41995, "token_acc": 0.5207373271889401, "train_speed(iter/s)": 0.671465 }, { "epoch": 1.7994087656912727, "grad_norm": 3.3083629608154297, "learning_rate": 7.132646722589537e-05, "loss": 2.468746566772461, "memory(GiB)": 72.85, "step": 42000, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.671469 }, { "epoch": 1.7994087656912727, "eval_loss": 2.247581958770752, "eval_runtime": 15.9207, "eval_samples_per_second": 6.281, "eval_steps_per_second": 6.281, "eval_token_acc": 0.4902676399026764, "step": 42000 }, { "epoch": 1.799622981020522, "grad_norm": 4.492758750915527, "learning_rate": 7.132038012984701e-05, "loss": 2.2821014404296873, "memory(GiB)": 72.85, "step": 42005, "token_acc": 0.4893805309734513, "train_speed(iter/s)": 0.671286 }, { "epoch": 1.7998371963497708, "grad_norm": 5.2591729164123535, "learning_rate": 7.131429264755993e-05, "loss": 2.1821746826171875, "memory(GiB)": 72.85, "step": 42010, "token_acc": 0.5109489051094891, "train_speed(iter/s)": 0.671287 }, { "epoch": 1.8000514116790196, "grad_norm": 4.38872766494751, "learning_rate": 7.130820477914446e-05, "loss": 2.3767044067382814, "memory(GiB)": 72.85, "step": 42015, "token_acc": 0.49310344827586206, "train_speed(iter/s)": 0.671294 }, { "epoch": 1.8002656270082689, "grad_norm": 4.905683994293213, "learning_rate": 7.130211652471086e-05, "loss": 2.31176643371582, "memory(GiB)": 72.85, "step": 42020, "token_acc": 0.47796610169491527, "train_speed(iter/s)": 0.671286 }, { "epoch": 1.8004798423375177, "grad_norm": 5.124529838562012, "learning_rate": 7.129602788436944e-05, "loss": 2.209792709350586, "memory(GiB)": 72.85, "step": 42025, "token_acc": 0.5, "train_speed(iter/s)": 0.671291 }, { "epoch": 1.8006940576667665, "grad_norm": 4.055619716644287, "learning_rate": 7.128993885823053e-05, "loss": 2.422111701965332, "memory(GiB)": 72.85, "step": 42030, "token_acc": 0.5313531353135313, "train_speed(iter/s)": 0.67129 }, { "epoch": 1.8009082729960157, "grad_norm": 5.0163960456848145, "learning_rate": 7.128384944640436e-05, "loss": 2.3176950454711913, "memory(GiB)": 72.85, "step": 42035, "token_acc": 0.5033112582781457, "train_speed(iter/s)": 0.671272 }, { "epoch": 1.8011224883252646, "grad_norm": 3.79270339012146, "learning_rate": 7.12777596490013e-05, "loss": 2.7120101928710936, "memory(GiB)": 72.85, "step": 42040, "token_acc": 0.40285714285714286, "train_speed(iter/s)": 0.671278 }, { "epoch": 1.8013367036545134, "grad_norm": 3.4015116691589355, "learning_rate": 7.127166946613168e-05, "loss": 2.4073740005493165, "memory(GiB)": 72.85, "step": 42045, "token_acc": 0.48534201954397393, "train_speed(iter/s)": 0.671284 }, { "epoch": 1.8015509189837626, "grad_norm": 4.3898749351501465, "learning_rate": 7.12655788979058e-05, "loss": 2.57639217376709, "memory(GiB)": 72.85, "step": 42050, "token_acc": 0.48338368580060426, "train_speed(iter/s)": 0.671289 }, { "epoch": 1.8017651343130114, "grad_norm": 4.43271541595459, "learning_rate": 7.125948794443402e-05, "loss": 2.3346738815307617, "memory(GiB)": 72.85, "step": 42055, "token_acc": 0.4845360824742268, "train_speed(iter/s)": 0.671298 }, { "epoch": 1.8019793496422603, "grad_norm": 4.966652870178223, "learning_rate": 7.125339660582668e-05, "loss": 2.4243032455444338, "memory(GiB)": 72.85, "step": 42060, "token_acc": 0.5017182130584192, "train_speed(iter/s)": 0.671297 }, { "epoch": 1.8021935649715095, "grad_norm": 4.217916488647461, "learning_rate": 7.12473048821941e-05, "loss": 2.4624671936035156, "memory(GiB)": 72.85, "step": 42065, "token_acc": 0.44886363636363635, "train_speed(iter/s)": 0.671287 }, { "epoch": 1.8024077803007583, "grad_norm": 3.910344123840332, "learning_rate": 7.124121277364666e-05, "loss": 2.295697021484375, "memory(GiB)": 72.85, "step": 42070, "token_acc": 0.48951048951048953, "train_speed(iter/s)": 0.671292 }, { "epoch": 1.8026219956300071, "grad_norm": 5.1122660636901855, "learning_rate": 7.123512028029475e-05, "loss": 2.4549903869628906, "memory(GiB)": 72.85, "step": 42075, "token_acc": 0.49206349206349204, "train_speed(iter/s)": 0.67131 }, { "epoch": 1.8028362109592564, "grad_norm": 4.357280731201172, "learning_rate": 7.122902740224869e-05, "loss": 2.222216033935547, "memory(GiB)": 72.85, "step": 42080, "token_acc": 0.5108225108225108, "train_speed(iter/s)": 0.671324 }, { "epoch": 1.8030504262885052, "grad_norm": 4.273272514343262, "learning_rate": 7.12229341396189e-05, "loss": 2.472598648071289, "memory(GiB)": 72.85, "step": 42085, "token_acc": 0.4454828660436137, "train_speed(iter/s)": 0.671319 }, { "epoch": 1.803264641617754, "grad_norm": 4.58110237121582, "learning_rate": 7.121684049251575e-05, "loss": 2.487131118774414, "memory(GiB)": 72.85, "step": 42090, "token_acc": 0.46060606060606063, "train_speed(iter/s)": 0.671301 }, { "epoch": 1.8034788569470033, "grad_norm": 4.12831974029541, "learning_rate": 7.121074646104962e-05, "loss": 2.286640167236328, "memory(GiB)": 72.85, "step": 42095, "token_acc": 0.5245398773006135, "train_speed(iter/s)": 0.671316 }, { "epoch": 1.803693072276252, "grad_norm": 3.9924209117889404, "learning_rate": 7.120465204533093e-05, "loss": 2.4983238220214843, "memory(GiB)": 72.85, "step": 42100, "token_acc": 0.46601941747572817, "train_speed(iter/s)": 0.671324 }, { "epoch": 1.803907287605501, "grad_norm": 4.43526029586792, "learning_rate": 7.119855724547008e-05, "loss": 2.0797502517700197, "memory(GiB)": 72.85, "step": 42105, "token_acc": 0.48639455782312924, "train_speed(iter/s)": 0.671339 }, { "epoch": 1.8041215029347502, "grad_norm": 5.439842224121094, "learning_rate": 7.119246206157745e-05, "loss": 2.173626708984375, "memory(GiB)": 72.85, "step": 42110, "token_acc": 0.5021097046413502, "train_speed(iter/s)": 0.671337 }, { "epoch": 1.804335718263999, "grad_norm": 4.757728099822998, "learning_rate": 7.118636649376354e-05, "loss": 2.268987464904785, "memory(GiB)": 72.85, "step": 42115, "token_acc": 0.5166666666666667, "train_speed(iter/s)": 0.671349 }, { "epoch": 1.8045499335932478, "grad_norm": 4.347389221191406, "learning_rate": 7.118027054213869e-05, "loss": 2.1268402099609376, "memory(GiB)": 72.85, "step": 42120, "token_acc": 0.5535714285714286, "train_speed(iter/s)": 0.671364 }, { "epoch": 1.804764148922497, "grad_norm": 4.131709098815918, "learning_rate": 7.117417420681338e-05, "loss": 2.538951301574707, "memory(GiB)": 72.85, "step": 42125, "token_acc": 0.4625, "train_speed(iter/s)": 0.671355 }, { "epoch": 1.8049783642517458, "grad_norm": 4.385239124298096, "learning_rate": 7.116807748789805e-05, "loss": 2.1226476669311523, "memory(GiB)": 72.85, "step": 42130, "token_acc": 0.48828125, "train_speed(iter/s)": 0.671365 }, { "epoch": 1.8051925795809947, "grad_norm": 4.837535858154297, "learning_rate": 7.116198038550312e-05, "loss": 2.314035415649414, "memory(GiB)": 72.85, "step": 42135, "token_acc": 0.4968553459119497, "train_speed(iter/s)": 0.67137 }, { "epoch": 1.805406794910244, "grad_norm": 5.736173629760742, "learning_rate": 7.115588289973908e-05, "loss": 2.5143815994262697, "memory(GiB)": 72.85, "step": 42140, "token_acc": 0.45180722891566266, "train_speed(iter/s)": 0.671382 }, { "epoch": 1.8056210102394927, "grad_norm": 4.307316780090332, "learning_rate": 7.114978503071637e-05, "loss": 2.2993549346923827, "memory(GiB)": 72.85, "step": 42145, "token_acc": 0.5305343511450382, "train_speed(iter/s)": 0.67139 }, { "epoch": 1.8058352255687415, "grad_norm": 5.271712779998779, "learning_rate": 7.114368677854546e-05, "loss": 2.1083953857421873, "memory(GiB)": 72.85, "step": 42150, "token_acc": 0.5340136054421769, "train_speed(iter/s)": 0.671401 }, { "epoch": 1.8060494408979908, "grad_norm": 3.5017151832580566, "learning_rate": 7.113758814333683e-05, "loss": 2.2047962188720702, "memory(GiB)": 72.85, "step": 42155, "token_acc": 0.525974025974026, "train_speed(iter/s)": 0.671409 }, { "epoch": 1.8062636562272396, "grad_norm": 3.8641343116760254, "learning_rate": 7.113148912520097e-05, "loss": 2.2186042785644533, "memory(GiB)": 72.85, "step": 42160, "token_acc": 0.546875, "train_speed(iter/s)": 0.671402 }, { "epoch": 1.8064778715564884, "grad_norm": 5.245913982391357, "learning_rate": 7.112538972424836e-05, "loss": 2.245571327209473, "memory(GiB)": 72.85, "step": 42165, "token_acc": 0.5135135135135135, "train_speed(iter/s)": 0.671422 }, { "epoch": 1.8066920868857377, "grad_norm": 4.352231979370117, "learning_rate": 7.111928994058949e-05, "loss": 2.2861665725708007, "memory(GiB)": 72.85, "step": 42170, "token_acc": 0.48026315789473684, "train_speed(iter/s)": 0.67143 }, { "epoch": 1.8069063022149865, "grad_norm": 5.4792070388793945, "learning_rate": 7.111318977433489e-05, "loss": 2.505384063720703, "memory(GiB)": 72.85, "step": 42175, "token_acc": 0.46258503401360546, "train_speed(iter/s)": 0.67142 }, { "epoch": 1.8071205175442353, "grad_norm": 5.373441696166992, "learning_rate": 7.110708922559503e-05, "loss": 2.2083223342895506, "memory(GiB)": 72.85, "step": 42180, "token_acc": 0.4859437751004016, "train_speed(iter/s)": 0.671432 }, { "epoch": 1.8073347328734846, "grad_norm": 4.014828205108643, "learning_rate": 7.110098829448045e-05, "loss": 2.451283073425293, "memory(GiB)": 72.85, "step": 42185, "token_acc": 0.44518272425249167, "train_speed(iter/s)": 0.671441 }, { "epoch": 1.8075489482027334, "grad_norm": 5.677745342254639, "learning_rate": 7.109488698110168e-05, "loss": 2.423504638671875, "memory(GiB)": 72.85, "step": 42190, "token_acc": 0.5259259259259259, "train_speed(iter/s)": 0.671464 }, { "epoch": 1.8077631635319822, "grad_norm": 5.028759956359863, "learning_rate": 7.108878528556923e-05, "loss": 2.525323677062988, "memory(GiB)": 72.85, "step": 42195, "token_acc": 0.49480968858131485, "train_speed(iter/s)": 0.671466 }, { "epoch": 1.8079773788612314, "grad_norm": 4.739214897155762, "learning_rate": 7.108268320799369e-05, "loss": 2.478961181640625, "memory(GiB)": 72.85, "step": 42200, "token_acc": 0.4891304347826087, "train_speed(iter/s)": 0.671474 }, { "epoch": 1.8081915941904803, "grad_norm": 5.363802433013916, "learning_rate": 7.107658074848554e-05, "loss": 2.5452655792236327, "memory(GiB)": 72.85, "step": 42205, "token_acc": 0.4608150470219436, "train_speed(iter/s)": 0.671464 }, { "epoch": 1.808405809519729, "grad_norm": 6.52755880355835, "learning_rate": 7.107047790715535e-05, "loss": 2.495071220397949, "memory(GiB)": 72.85, "step": 42210, "token_acc": 0.4746376811594203, "train_speed(iter/s)": 0.671463 }, { "epoch": 1.8086200248489783, "grad_norm": 4.939635276794434, "learning_rate": 7.106437468411371e-05, "loss": 2.265710639953613, "memory(GiB)": 72.85, "step": 42215, "token_acc": 0.5068027210884354, "train_speed(iter/s)": 0.671468 }, { "epoch": 1.8088342401782271, "grad_norm": 4.1782097816467285, "learning_rate": 7.105827107947114e-05, "loss": 2.3280872344970702, "memory(GiB)": 72.85, "step": 42220, "token_acc": 0.5175097276264592, "train_speed(iter/s)": 0.671481 }, { "epoch": 1.809048455507476, "grad_norm": 6.7554802894592285, "learning_rate": 7.105216709333824e-05, "loss": 2.316933822631836, "memory(GiB)": 72.85, "step": 42225, "token_acc": 0.4577922077922078, "train_speed(iter/s)": 0.671459 }, { "epoch": 1.8092626708367252, "grad_norm": 4.379814147949219, "learning_rate": 7.104606272582559e-05, "loss": 2.533111572265625, "memory(GiB)": 72.85, "step": 42230, "token_acc": 0.49097472924187724, "train_speed(iter/s)": 0.671453 }, { "epoch": 1.809476886165974, "grad_norm": 4.354522705078125, "learning_rate": 7.103995797704378e-05, "loss": 2.3151535034179687, "memory(GiB)": 72.85, "step": 42235, "token_acc": 0.5145631067961165, "train_speed(iter/s)": 0.671459 }, { "epoch": 1.809691101495223, "grad_norm": 4.037356376647949, "learning_rate": 7.103385284710337e-05, "loss": 2.454107666015625, "memory(GiB)": 72.85, "step": 42240, "token_acc": 0.4852941176470588, "train_speed(iter/s)": 0.67146 }, { "epoch": 1.809905316824472, "grad_norm": 6.826387405395508, "learning_rate": 7.102774733611501e-05, "loss": 2.3479679107666014, "memory(GiB)": 72.85, "step": 42245, "token_acc": 0.4965277777777778, "train_speed(iter/s)": 0.671466 }, { "epoch": 1.810119532153721, "grad_norm": 3.8461782932281494, "learning_rate": 7.102164144418925e-05, "loss": 2.501820373535156, "memory(GiB)": 72.85, "step": 42250, "token_acc": 0.49666666666666665, "train_speed(iter/s)": 0.671458 }, { "epoch": 1.81033374748297, "grad_norm": 6.240277290344238, "learning_rate": 7.101553517143676e-05, "loss": 2.150281524658203, "memory(GiB)": 72.85, "step": 42255, "token_acc": 0.5707964601769911, "train_speed(iter/s)": 0.671462 }, { "epoch": 1.810547962812219, "grad_norm": 4.435615062713623, "learning_rate": 7.100942851796814e-05, "loss": 2.453211212158203, "memory(GiB)": 72.85, "step": 42260, "token_acc": 0.47761194029850745, "train_speed(iter/s)": 0.671454 }, { "epoch": 1.8107621781414678, "grad_norm": 3.3017828464508057, "learning_rate": 7.100332148389401e-05, "loss": 2.2655834197998046, "memory(GiB)": 72.85, "step": 42265, "token_acc": 0.5166666666666667, "train_speed(iter/s)": 0.671456 }, { "epoch": 1.8109763934707168, "grad_norm": 3.9319424629211426, "learning_rate": 7.099721406932501e-05, "loss": 2.2246515274047853, "memory(GiB)": 72.85, "step": 42270, "token_acc": 0.5150375939849624, "train_speed(iter/s)": 0.671472 }, { "epoch": 1.8111906087999659, "grad_norm": 3.4353063106536865, "learning_rate": 7.099110627437176e-05, "loss": 2.3423799514770507, "memory(GiB)": 72.85, "step": 42275, "token_acc": 0.4788273615635179, "train_speed(iter/s)": 0.671483 }, { "epoch": 1.8114048241292147, "grad_norm": 3.9539530277252197, "learning_rate": 7.098499809914494e-05, "loss": 2.0908130645751952, "memory(GiB)": 72.85, "step": 42280, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.671488 }, { "epoch": 1.8116190394584637, "grad_norm": 4.187204837799072, "learning_rate": 7.09788895437552e-05, "loss": 2.587727165222168, "memory(GiB)": 72.85, "step": 42285, "token_acc": 0.4778156996587031, "train_speed(iter/s)": 0.671486 }, { "epoch": 1.8118332547877127, "grad_norm": 4.294317245483398, "learning_rate": 7.09727806083132e-05, "loss": 2.362834930419922, "memory(GiB)": 72.85, "step": 42290, "token_acc": 0.49122807017543857, "train_speed(iter/s)": 0.671491 }, { "epoch": 1.8120474701169615, "grad_norm": 4.112758636474609, "learning_rate": 7.09666712929296e-05, "loss": 2.4897865295410155, "memory(GiB)": 72.85, "step": 42295, "token_acc": 0.4379310344827586, "train_speed(iter/s)": 0.671494 }, { "epoch": 1.8122616854462106, "grad_norm": 3.6066765785217285, "learning_rate": 7.096056159771507e-05, "loss": 2.4168418884277343, "memory(GiB)": 72.85, "step": 42300, "token_acc": 0.48909657320872274, "train_speed(iter/s)": 0.671487 }, { "epoch": 1.8124759007754596, "grad_norm": 3.8620567321777344, "learning_rate": 7.095445152278032e-05, "loss": 2.393751525878906, "memory(GiB)": 72.85, "step": 42305, "token_acc": 0.47191011235955055, "train_speed(iter/s)": 0.671476 }, { "epoch": 1.8126901161047084, "grad_norm": 4.12153959274292, "learning_rate": 7.0948341068236e-05, "loss": 2.2618946075439452, "memory(GiB)": 72.85, "step": 42310, "token_acc": 0.5178571428571429, "train_speed(iter/s)": 0.671473 }, { "epoch": 1.8129043314339575, "grad_norm": 3.6556246280670166, "learning_rate": 7.094223023419286e-05, "loss": 2.446196365356445, "memory(GiB)": 72.85, "step": 42315, "token_acc": 0.48534201954397393, "train_speed(iter/s)": 0.671466 }, { "epoch": 1.8131185467632065, "grad_norm": 3.9119579792022705, "learning_rate": 7.093611902076156e-05, "loss": 2.148135757446289, "memory(GiB)": 72.85, "step": 42320, "token_acc": 0.5399361022364217, "train_speed(iter/s)": 0.671469 }, { "epoch": 1.8133327620924553, "grad_norm": 4.56791353225708, "learning_rate": 7.093000742805283e-05, "loss": 2.3970808029174804, "memory(GiB)": 72.85, "step": 42325, "token_acc": 0.486013986013986, "train_speed(iter/s)": 0.671457 }, { "epoch": 1.8135469774217043, "grad_norm": 4.602718830108643, "learning_rate": 7.092389545617737e-05, "loss": 2.5155101776123048, "memory(GiB)": 72.85, "step": 42330, "token_acc": 0.47953216374269003, "train_speed(iter/s)": 0.671474 }, { "epoch": 1.8137611927509534, "grad_norm": 3.9954216480255127, "learning_rate": 7.091778310524593e-05, "loss": 2.128004455566406, "memory(GiB)": 72.85, "step": 42335, "token_acc": 0.527027027027027, "train_speed(iter/s)": 0.671463 }, { "epoch": 1.8139754080802022, "grad_norm": 4.997162818908691, "learning_rate": 7.091167037536922e-05, "loss": 2.44490966796875, "memory(GiB)": 72.85, "step": 42340, "token_acc": 0.46254071661237783, "train_speed(iter/s)": 0.671448 }, { "epoch": 1.8141896234094512, "grad_norm": 3.9077165126800537, "learning_rate": 7.090555726665798e-05, "loss": 2.3234619140625, "memory(GiB)": 72.85, "step": 42345, "token_acc": 0.5152542372881356, "train_speed(iter/s)": 0.671457 }, { "epoch": 1.8144038387387003, "grad_norm": 3.890772819519043, "learning_rate": 7.089944377922296e-05, "loss": 2.7734846115112304, "memory(GiB)": 72.85, "step": 42350, "token_acc": 0.446064139941691, "train_speed(iter/s)": 0.671461 }, { "epoch": 1.814618054067949, "grad_norm": 4.601730823516846, "learning_rate": 7.08933299131749e-05, "loss": 2.164776611328125, "memory(GiB)": 72.85, "step": 42355, "token_acc": 0.5054945054945055, "train_speed(iter/s)": 0.671456 }, { "epoch": 1.814832269397198, "grad_norm": 4.693329811096191, "learning_rate": 7.08872156686246e-05, "loss": 2.2931755065917967, "memory(GiB)": 72.85, "step": 42360, "token_acc": 0.5152542372881356, "train_speed(iter/s)": 0.671465 }, { "epoch": 1.8150464847264471, "grad_norm": 3.9181337356567383, "learning_rate": 7.088110104568277e-05, "loss": 2.3823129653930666, "memory(GiB)": 72.85, "step": 42365, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.671465 }, { "epoch": 1.815260700055696, "grad_norm": 4.805749416351318, "learning_rate": 7.087498604446021e-05, "loss": 2.3354639053344726, "memory(GiB)": 72.85, "step": 42370, "token_acc": 0.4911660777385159, "train_speed(iter/s)": 0.671463 }, { "epoch": 1.815474915384945, "grad_norm": 3.9995741844177246, "learning_rate": 7.086887066506771e-05, "loss": 2.382802391052246, "memory(GiB)": 72.85, "step": 42375, "token_acc": 0.4968553459119497, "train_speed(iter/s)": 0.671449 }, { "epoch": 1.815689130714194, "grad_norm": 4.084371566772461, "learning_rate": 7.086275490761602e-05, "loss": 2.2035789489746094, "memory(GiB)": 72.85, "step": 42380, "token_acc": 0.4866920152091255, "train_speed(iter/s)": 0.671467 }, { "epoch": 1.8159033460434428, "grad_norm": 4.091052532196045, "learning_rate": 7.085663877221596e-05, "loss": 2.6090456008911134, "memory(GiB)": 72.85, "step": 42385, "token_acc": 0.4723756906077348, "train_speed(iter/s)": 0.67148 }, { "epoch": 1.8161175613726919, "grad_norm": 3.7964346408843994, "learning_rate": 7.085052225897835e-05, "loss": 2.457464408874512, "memory(GiB)": 72.85, "step": 42390, "token_acc": 0.4844290657439446, "train_speed(iter/s)": 0.671478 }, { "epoch": 1.816331776701941, "grad_norm": 6.169978141784668, "learning_rate": 7.084440536801394e-05, "loss": 2.5156349182128905, "memory(GiB)": 72.85, "step": 42395, "token_acc": 0.4657534246575342, "train_speed(iter/s)": 0.671466 }, { "epoch": 1.8165459920311897, "grad_norm": 5.165440082550049, "learning_rate": 7.083828809943358e-05, "loss": 2.3430755615234373, "memory(GiB)": 72.85, "step": 42400, "token_acc": 0.48464163822525597, "train_speed(iter/s)": 0.671458 }, { "epoch": 1.8167602073604388, "grad_norm": 5.506157875061035, "learning_rate": 7.083217045334808e-05, "loss": 2.3424158096313477, "memory(GiB)": 72.85, "step": 42405, "token_acc": 0.5056179775280899, "train_speed(iter/s)": 0.671465 }, { "epoch": 1.8169744226896878, "grad_norm": 4.110300540924072, "learning_rate": 7.082605242986827e-05, "loss": 2.035441017150879, "memory(GiB)": 72.85, "step": 42410, "token_acc": 0.5587044534412956, "train_speed(iter/s)": 0.671461 }, { "epoch": 1.8171886380189366, "grad_norm": 4.283524036407471, "learning_rate": 7.081993402910498e-05, "loss": 2.0809911727905273, "memory(GiB)": 72.85, "step": 42415, "token_acc": 0.572463768115942, "train_speed(iter/s)": 0.671447 }, { "epoch": 1.8174028533481856, "grad_norm": 4.7416768074035645, "learning_rate": 7.081381525116908e-05, "loss": 2.1434152603149412, "memory(GiB)": 72.85, "step": 42420, "token_acc": 0.5393258426966292, "train_speed(iter/s)": 0.67145 }, { "epoch": 1.8176170686774347, "grad_norm": 4.317594528198242, "learning_rate": 7.080769609617136e-05, "loss": 2.529230499267578, "memory(GiB)": 72.85, "step": 42425, "token_acc": 0.5, "train_speed(iter/s)": 0.67148 }, { "epoch": 1.8178312840066835, "grad_norm": 4.686809062957764, "learning_rate": 7.080157656422273e-05, "loss": 2.7188655853271486, "memory(GiB)": 72.85, "step": 42430, "token_acc": 0.47648902821316613, "train_speed(iter/s)": 0.671487 }, { "epoch": 1.8180454993359325, "grad_norm": 3.567336320877075, "learning_rate": 7.079545665543402e-05, "loss": 2.263916778564453, "memory(GiB)": 72.85, "step": 42435, "token_acc": 0.5394736842105263, "train_speed(iter/s)": 0.671484 }, { "epoch": 1.8182597146651815, "grad_norm": 4.130575656890869, "learning_rate": 7.07893363699161e-05, "loss": 2.469972038269043, "memory(GiB)": 72.85, "step": 42440, "token_acc": 0.4724137931034483, "train_speed(iter/s)": 0.671496 }, { "epoch": 1.8184739299944304, "grad_norm": 3.8785572052001953, "learning_rate": 7.078321570777985e-05, "loss": 2.1549816131591797, "memory(GiB)": 72.85, "step": 42445, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.671492 }, { "epoch": 1.8186881453236794, "grad_norm": 3.8000872135162354, "learning_rate": 7.077709466913615e-05, "loss": 2.071017837524414, "memory(GiB)": 72.85, "step": 42450, "token_acc": 0.5783582089552238, "train_speed(iter/s)": 0.671508 }, { "epoch": 1.8189023606529284, "grad_norm": 5.115477561950684, "learning_rate": 7.07709732540959e-05, "loss": 2.261750030517578, "memory(GiB)": 72.85, "step": 42455, "token_acc": 0.47540983606557374, "train_speed(iter/s)": 0.671511 }, { "epoch": 1.8191165759821772, "grad_norm": 3.7216548919677734, "learning_rate": 7.076485146276996e-05, "loss": 2.7150617599487306, "memory(GiB)": 72.85, "step": 42460, "token_acc": 0.46710526315789475, "train_speed(iter/s)": 0.671517 }, { "epoch": 1.8193307913114263, "grad_norm": 5.651722431182861, "learning_rate": 7.075872929526929e-05, "loss": 2.656427574157715, "memory(GiB)": 72.85, "step": 42465, "token_acc": 0.4482758620689655, "train_speed(iter/s)": 0.671534 }, { "epoch": 1.8195450066406753, "grad_norm": 3.648184299468994, "learning_rate": 7.075260675170474e-05, "loss": 2.0624032974243165, "memory(GiB)": 72.85, "step": 42470, "token_acc": 0.5134099616858238, "train_speed(iter/s)": 0.671535 }, { "epoch": 1.8197592219699241, "grad_norm": 7.859819412231445, "learning_rate": 7.074648383218728e-05, "loss": 2.5726341247558593, "memory(GiB)": 72.85, "step": 42475, "token_acc": 0.4827586206896552, "train_speed(iter/s)": 0.671541 }, { "epoch": 1.8199734372991732, "grad_norm": 4.121777534484863, "learning_rate": 7.074036053682777e-05, "loss": 2.3010435104370117, "memory(GiB)": 72.85, "step": 42480, "token_acc": 0.5278810408921933, "train_speed(iter/s)": 0.671545 }, { "epoch": 1.8201876526284222, "grad_norm": 3.986464738845825, "learning_rate": 7.073423686573718e-05, "loss": 2.1332513809204103, "memory(GiB)": 72.85, "step": 42485, "token_acc": 0.5338345864661654, "train_speed(iter/s)": 0.671547 }, { "epoch": 1.820401867957671, "grad_norm": 5.882228374481201, "learning_rate": 7.072811281902645e-05, "loss": 2.4747299194335937, "memory(GiB)": 72.85, "step": 42490, "token_acc": 0.5159235668789809, "train_speed(iter/s)": 0.671554 }, { "epoch": 1.82061608328692, "grad_norm": 3.5413851737976074, "learning_rate": 7.072198839680651e-05, "loss": 2.4597614288330076, "memory(GiB)": 72.85, "step": 42495, "token_acc": 0.45565749235474007, "train_speed(iter/s)": 0.671571 }, { "epoch": 1.820830298616169, "grad_norm": 4.626516819000244, "learning_rate": 7.071586359918831e-05, "loss": 2.2587162017822267, "memory(GiB)": 72.85, "step": 42500, "token_acc": 0.5130718954248366, "train_speed(iter/s)": 0.671584 }, { "epoch": 1.820830298616169, "eval_loss": 2.0682578086853027, "eval_runtime": 15.8519, "eval_samples_per_second": 6.308, "eval_steps_per_second": 6.308, "eval_token_acc": 0.4688601645123384, "step": 42500 }, { "epoch": 1.8210445139454179, "grad_norm": 4.452617645263672, "learning_rate": 7.070973842628281e-05, "loss": 2.2196884155273438, "memory(GiB)": 72.85, "step": 42505, "token_acc": 0.48857644991212656, "train_speed(iter/s)": 0.671378 }, { "epoch": 1.821258729274667, "grad_norm": 4.173298358917236, "learning_rate": 7.070361287820097e-05, "loss": 2.374437141418457, "memory(GiB)": 72.85, "step": 42510, "token_acc": 0.5015974440894568, "train_speed(iter/s)": 0.671362 }, { "epoch": 1.821472944603916, "grad_norm": 4.166634559631348, "learning_rate": 7.069748695505374e-05, "loss": 2.354888343811035, "memory(GiB)": 72.85, "step": 42515, "token_acc": 0.47, "train_speed(iter/s)": 0.671365 }, { "epoch": 1.8216871599331648, "grad_norm": 5.179194927215576, "learning_rate": 7.069136065695214e-05, "loss": 2.318570137023926, "memory(GiB)": 72.85, "step": 42520, "token_acc": 0.46096654275092935, "train_speed(iter/s)": 0.671361 }, { "epoch": 1.8219013752624138, "grad_norm": 4.7838640213012695, "learning_rate": 7.068523398400714e-05, "loss": 2.3756954193115236, "memory(GiB)": 72.85, "step": 42525, "token_acc": 0.5197132616487455, "train_speed(iter/s)": 0.671368 }, { "epoch": 1.8221155905916628, "grad_norm": 3.817514181137085, "learning_rate": 7.067910693632971e-05, "loss": 2.370956230163574, "memory(GiB)": 72.85, "step": 42530, "token_acc": 0.4864864864864865, "train_speed(iter/s)": 0.671389 }, { "epoch": 1.8223298059209116, "grad_norm": 3.417269706726074, "learning_rate": 7.067297951403087e-05, "loss": 2.289569091796875, "memory(GiB)": 72.85, "step": 42535, "token_acc": 0.47635135135135137, "train_speed(iter/s)": 0.671388 }, { "epoch": 1.8225440212501607, "grad_norm": 5.031912326812744, "learning_rate": 7.066685171722159e-05, "loss": 2.236215591430664, "memory(GiB)": 72.85, "step": 42540, "token_acc": 0.5109034267912772, "train_speed(iter/s)": 0.671392 }, { "epoch": 1.8227582365794097, "grad_norm": 3.959864854812622, "learning_rate": 7.066072354601292e-05, "loss": 2.5123348236083984, "memory(GiB)": 72.85, "step": 42545, "token_acc": 0.4870967741935484, "train_speed(iter/s)": 0.671389 }, { "epoch": 1.8229724519086585, "grad_norm": 4.035908222198486, "learning_rate": 7.065459500051585e-05, "loss": 2.1115718841552735, "memory(GiB)": 72.85, "step": 42550, "token_acc": 0.5537974683544303, "train_speed(iter/s)": 0.671382 }, { "epoch": 1.8231866672379076, "grad_norm": 4.984616756439209, "learning_rate": 7.064846608084143e-05, "loss": 2.3060720443725584, "memory(GiB)": 72.85, "step": 42555, "token_acc": 0.5016393442622951, "train_speed(iter/s)": 0.671391 }, { "epoch": 1.8234008825671566, "grad_norm": 4.111175060272217, "learning_rate": 7.064233678710068e-05, "loss": 2.241588592529297, "memory(GiB)": 72.85, "step": 42560, "token_acc": 0.527972027972028, "train_speed(iter/s)": 0.671376 }, { "epoch": 1.8236150978964054, "grad_norm": 4.659877300262451, "learning_rate": 7.063620711940463e-05, "loss": 2.16806640625, "memory(GiB)": 72.85, "step": 42565, "token_acc": 0.5326460481099656, "train_speed(iter/s)": 0.671395 }, { "epoch": 1.8238293132256544, "grad_norm": 4.311757564544678, "learning_rate": 7.063007707786432e-05, "loss": 2.2660297393798827, "memory(GiB)": 72.85, "step": 42570, "token_acc": 0.5057471264367817, "train_speed(iter/s)": 0.671381 }, { "epoch": 1.8240435285549035, "grad_norm": 4.069806098937988, "learning_rate": 7.062394666259082e-05, "loss": 2.18204402923584, "memory(GiB)": 72.85, "step": 42575, "token_acc": 0.5490909090909091, "train_speed(iter/s)": 0.671386 }, { "epoch": 1.8242577438841523, "grad_norm": 4.544327735900879, "learning_rate": 7.061781587369519e-05, "loss": 2.3279983520507814, "memory(GiB)": 72.85, "step": 42580, "token_acc": 0.47191011235955055, "train_speed(iter/s)": 0.671402 }, { "epoch": 1.8244719592134013, "grad_norm": 3.9262869358062744, "learning_rate": 7.061168471128846e-05, "loss": 2.337554168701172, "memory(GiB)": 72.85, "step": 42585, "token_acc": 0.4888888888888889, "train_speed(iter/s)": 0.671396 }, { "epoch": 1.8246861745426504, "grad_norm": 5.236508846282959, "learning_rate": 7.060555317548175e-05, "loss": 2.3052162170410155, "memory(GiB)": 72.85, "step": 42590, "token_acc": 0.5083612040133779, "train_speed(iter/s)": 0.671409 }, { "epoch": 1.8249003898718992, "grad_norm": 5.48347282409668, "learning_rate": 7.05994212663861e-05, "loss": 2.4524843215942385, "memory(GiB)": 72.85, "step": 42595, "token_acc": 0.506896551724138, "train_speed(iter/s)": 0.671386 }, { "epoch": 1.8251146052011482, "grad_norm": 4.72567892074585, "learning_rate": 7.05932889841126e-05, "loss": 2.4709157943725586, "memory(GiB)": 72.85, "step": 42600, "token_acc": 0.5176848874598071, "train_speed(iter/s)": 0.671391 }, { "epoch": 1.8253288205303972, "grad_norm": 6.948080539703369, "learning_rate": 7.058715632877238e-05, "loss": 2.1992347717285154, "memory(GiB)": 72.85, "step": 42605, "token_acc": 0.48333333333333334, "train_speed(iter/s)": 0.671397 }, { "epoch": 1.825543035859646, "grad_norm": 5.330658435821533, "learning_rate": 7.058102330047649e-05, "loss": 2.1989110946655273, "memory(GiB)": 72.85, "step": 42610, "token_acc": 0.5691056910569106, "train_speed(iter/s)": 0.671402 }, { "epoch": 1.825757251188895, "grad_norm": 4.515141010284424, "learning_rate": 7.057488989933606e-05, "loss": 2.441617965698242, "memory(GiB)": 72.85, "step": 42615, "token_acc": 0.5038759689922481, "train_speed(iter/s)": 0.671405 }, { "epoch": 1.8259714665181441, "grad_norm": 3.5263593196868896, "learning_rate": 7.056875612546222e-05, "loss": 2.16311092376709, "memory(GiB)": 72.85, "step": 42620, "token_acc": 0.5218978102189781, "train_speed(iter/s)": 0.671427 }, { "epoch": 1.826185681847393, "grad_norm": 3.9688892364501953, "learning_rate": 7.056262197896603e-05, "loss": 2.3501876831054687, "memory(GiB)": 72.85, "step": 42625, "token_acc": 0.4954128440366973, "train_speed(iter/s)": 0.671409 }, { "epoch": 1.826399897176642, "grad_norm": 5.3641438484191895, "learning_rate": 7.055648745995866e-05, "loss": 2.33448486328125, "memory(GiB)": 72.85, "step": 42630, "token_acc": 0.544, "train_speed(iter/s)": 0.671423 }, { "epoch": 1.826614112505891, "grad_norm": 4.98757266998291, "learning_rate": 7.055035256855127e-05, "loss": 2.2617353439331054, "memory(GiB)": 72.85, "step": 42635, "token_acc": 0.49830508474576274, "train_speed(iter/s)": 0.671438 }, { "epoch": 1.8268283278351398, "grad_norm": 3.774540662765503, "learning_rate": 7.054421730485493e-05, "loss": 2.610203170776367, "memory(GiB)": 72.85, "step": 42640, "token_acc": 0.44135802469135804, "train_speed(iter/s)": 0.671433 }, { "epoch": 1.8270425431643889, "grad_norm": 7.7271223068237305, "learning_rate": 7.053808166898084e-05, "loss": 2.3282182693481444, "memory(GiB)": 72.85, "step": 42645, "token_acc": 0.4885245901639344, "train_speed(iter/s)": 0.671436 }, { "epoch": 1.827256758493638, "grad_norm": 4.6139607429504395, "learning_rate": 7.053194566104014e-05, "loss": 2.415671539306641, "memory(GiB)": 72.85, "step": 42650, "token_acc": 0.5, "train_speed(iter/s)": 0.67143 }, { "epoch": 1.8274709738228867, "grad_norm": 4.203500270843506, "learning_rate": 7.052580928114396e-05, "loss": 2.3636327743530274, "memory(GiB)": 72.85, "step": 42655, "token_acc": 0.49624060150375937, "train_speed(iter/s)": 0.671438 }, { "epoch": 1.8276851891521357, "grad_norm": 6.410083293914795, "learning_rate": 7.051967252940348e-05, "loss": 2.278317451477051, "memory(GiB)": 72.85, "step": 42660, "token_acc": 0.5110294117647058, "train_speed(iter/s)": 0.671442 }, { "epoch": 1.8278994044813848, "grad_norm": 5.495559215545654, "learning_rate": 7.051353540592993e-05, "loss": 2.1444820404052733, "memory(GiB)": 72.85, "step": 42665, "token_acc": 0.5394190871369294, "train_speed(iter/s)": 0.67143 }, { "epoch": 1.8281136198106336, "grad_norm": 3.9014201164245605, "learning_rate": 7.050739791083442e-05, "loss": 2.5330360412597654, "memory(GiB)": 72.85, "step": 42670, "token_acc": 0.4745098039215686, "train_speed(iter/s)": 0.671434 }, { "epoch": 1.8283278351398826, "grad_norm": 4.379390239715576, "learning_rate": 7.050126004422814e-05, "loss": 2.2449581146240236, "memory(GiB)": 72.85, "step": 42675, "token_acc": 0.4980544747081712, "train_speed(iter/s)": 0.671421 }, { "epoch": 1.8285420504691317, "grad_norm": 5.965646266937256, "learning_rate": 7.049512180622233e-05, "loss": 2.2933671951293944, "memory(GiB)": 72.85, "step": 42680, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.671428 }, { "epoch": 1.8287562657983805, "grad_norm": 3.515120506286621, "learning_rate": 7.048898319692816e-05, "loss": 2.2378625869750977, "memory(GiB)": 72.85, "step": 42685, "token_acc": 0.5036496350364964, "train_speed(iter/s)": 0.671431 }, { "epoch": 1.8289704811276295, "grad_norm": 3.853548765182495, "learning_rate": 7.048284421645684e-05, "loss": 2.346327209472656, "memory(GiB)": 72.85, "step": 42690, "token_acc": 0.5036231884057971, "train_speed(iter/s)": 0.671423 }, { "epoch": 1.8291846964568785, "grad_norm": 5.294758319854736, "learning_rate": 7.047670486491957e-05, "loss": 2.2413093566894533, "memory(GiB)": 72.85, "step": 42695, "token_acc": 0.5230125523012552, "train_speed(iter/s)": 0.671433 }, { "epoch": 1.8293989117861273, "grad_norm": 3.4626781940460205, "learning_rate": 7.047056514242757e-05, "loss": 2.2001861572265624, "memory(GiB)": 72.85, "step": 42700, "token_acc": 0.5017182130584192, "train_speed(iter/s)": 0.671437 }, { "epoch": 1.8296131271153764, "grad_norm": 4.050875663757324, "learning_rate": 7.046442504909211e-05, "loss": 2.5429719924926757, "memory(GiB)": 72.85, "step": 42705, "token_acc": 0.4482758620689655, "train_speed(iter/s)": 0.671439 }, { "epoch": 1.8298273424446254, "grad_norm": 4.2242841720581055, "learning_rate": 7.045828458502438e-05, "loss": 2.478177070617676, "memory(GiB)": 72.85, "step": 42710, "token_acc": 0.4605263157894737, "train_speed(iter/s)": 0.671429 }, { "epoch": 1.8300415577738742, "grad_norm": 3.518420696258545, "learning_rate": 7.045214375033563e-05, "loss": 2.1960613250732424, "memory(GiB)": 72.85, "step": 42715, "token_acc": 0.5108695652173914, "train_speed(iter/s)": 0.671447 }, { "epoch": 1.8302557731031233, "grad_norm": 4.829948902130127, "learning_rate": 7.044600254513712e-05, "loss": 2.522757339477539, "memory(GiB)": 72.85, "step": 42720, "token_acc": 0.4685714285714286, "train_speed(iter/s)": 0.671456 }, { "epoch": 1.8304699884323723, "grad_norm": 4.770733833312988, "learning_rate": 7.04398609695401e-05, "loss": 2.197898292541504, "memory(GiB)": 72.85, "step": 42725, "token_acc": 0.5234375, "train_speed(iter/s)": 0.671451 }, { "epoch": 1.830684203761621, "grad_norm": 4.1057209968566895, "learning_rate": 7.04337190236558e-05, "loss": 2.402741813659668, "memory(GiB)": 72.85, "step": 42730, "token_acc": 0.49, "train_speed(iter/s)": 0.671459 }, { "epoch": 1.8308984190908701, "grad_norm": 4.190586566925049, "learning_rate": 7.042757670759552e-05, "loss": 2.188705062866211, "memory(GiB)": 72.85, "step": 42735, "token_acc": 0.5214899713467048, "train_speed(iter/s)": 0.671469 }, { "epoch": 1.8311126344201192, "grad_norm": 3.906813859939575, "learning_rate": 7.042143402147053e-05, "loss": 2.2758459091186523, "memory(GiB)": 72.85, "step": 42740, "token_acc": 0.49070631970260226, "train_speed(iter/s)": 0.671461 }, { "epoch": 1.831326849749368, "grad_norm": 4.028787136077881, "learning_rate": 7.04152909653921e-05, "loss": 2.2657501220703127, "memory(GiB)": 72.85, "step": 42745, "token_acc": 0.48787878787878786, "train_speed(iter/s)": 0.671462 }, { "epoch": 1.831541065078617, "grad_norm": 4.53533411026001, "learning_rate": 7.040914753947155e-05, "loss": 2.548129653930664, "memory(GiB)": 72.85, "step": 42750, "token_acc": 0.4664179104477612, "train_speed(iter/s)": 0.671476 }, { "epoch": 1.831755280407866, "grad_norm": 3.1108741760253906, "learning_rate": 7.040300374382011e-05, "loss": 2.2196849822998046, "memory(GiB)": 72.85, "step": 42755, "token_acc": 0.4906166219839142, "train_speed(iter/s)": 0.671467 }, { "epoch": 1.8319694957371149, "grad_norm": 4.65959358215332, "learning_rate": 7.039685957854914e-05, "loss": 2.4688045501708986, "memory(GiB)": 72.85, "step": 42760, "token_acc": 0.5037878787878788, "train_speed(iter/s)": 0.671466 }, { "epoch": 1.832183711066364, "grad_norm": 5.004300594329834, "learning_rate": 7.039071504376992e-05, "loss": 2.4455596923828127, "memory(GiB)": 72.85, "step": 42765, "token_acc": 0.4681647940074906, "train_speed(iter/s)": 0.67149 }, { "epoch": 1.832397926395613, "grad_norm": 5.683574199676514, "learning_rate": 7.038457013959376e-05, "loss": 2.3699913024902344, "memory(GiB)": 72.85, "step": 42770, "token_acc": 0.5017921146953405, "train_speed(iter/s)": 0.671482 }, { "epoch": 1.8326121417248618, "grad_norm": 4.0293989181518555, "learning_rate": 7.037842486613199e-05, "loss": 2.215940475463867, "memory(GiB)": 72.85, "step": 42775, "token_acc": 0.5066225165562914, "train_speed(iter/s)": 0.6715 }, { "epoch": 1.8328263570541108, "grad_norm": 5.859943866729736, "learning_rate": 7.037227922349594e-05, "loss": 2.4085243225097654, "memory(GiB)": 72.85, "step": 42780, "token_acc": 0.4892966360856269, "train_speed(iter/s)": 0.671515 }, { "epoch": 1.8330405723833598, "grad_norm": 3.6736350059509277, "learning_rate": 7.036613321179696e-05, "loss": 2.035796546936035, "memory(GiB)": 72.85, "step": 42785, "token_acc": 0.556420233463035, "train_speed(iter/s)": 0.671515 }, { "epoch": 1.8332547877126086, "grad_norm": 3.7391395568847656, "learning_rate": 7.035998683114635e-05, "loss": 1.8570318222045898, "memory(GiB)": 72.85, "step": 42790, "token_acc": 0.5858585858585859, "train_speed(iter/s)": 0.671522 }, { "epoch": 1.8334690030418577, "grad_norm": 6.526456356048584, "learning_rate": 7.035384008165548e-05, "loss": 2.1516674041748045, "memory(GiB)": 72.85, "step": 42795, "token_acc": 0.5458015267175572, "train_speed(iter/s)": 0.671535 }, { "epoch": 1.8336832183711067, "grad_norm": 5.271064281463623, "learning_rate": 7.034769296343571e-05, "loss": 2.4830039978027343, "memory(GiB)": 72.85, "step": 42800, "token_acc": 0.42318059299191374, "train_speed(iter/s)": 0.671547 }, { "epoch": 1.8338974337003555, "grad_norm": 4.57347297668457, "learning_rate": 7.03415454765984e-05, "loss": 2.6217151641845704, "memory(GiB)": 72.85, "step": 42805, "token_acc": 0.4582210242587601, "train_speed(iter/s)": 0.671551 }, { "epoch": 1.8341116490296046, "grad_norm": 5.162769794464111, "learning_rate": 7.03353976212549e-05, "loss": 2.462541389465332, "memory(GiB)": 72.85, "step": 42810, "token_acc": 0.44, "train_speed(iter/s)": 0.671534 }, { "epoch": 1.8343258643588536, "grad_norm": 4.255177974700928, "learning_rate": 7.03292493975166e-05, "loss": 2.176973342895508, "memory(GiB)": 72.85, "step": 42815, "token_acc": 0.5, "train_speed(iter/s)": 0.671534 }, { "epoch": 1.8345400796881024, "grad_norm": 3.3886446952819824, "learning_rate": 7.032310080549488e-05, "loss": 2.5742414474487303, "memory(GiB)": 72.85, "step": 42820, "token_acc": 0.42045454545454547, "train_speed(iter/s)": 0.671534 }, { "epoch": 1.8347542950173514, "grad_norm": 5.282469272613525, "learning_rate": 7.031695184530113e-05, "loss": 2.408885955810547, "memory(GiB)": 72.85, "step": 42825, "token_acc": 0.5017182130584192, "train_speed(iter/s)": 0.671535 }, { "epoch": 1.8349685103466005, "grad_norm": 3.9152138233184814, "learning_rate": 7.031080251704672e-05, "loss": 2.438826560974121, "memory(GiB)": 72.85, "step": 42830, "token_acc": 0.47527472527472525, "train_speed(iter/s)": 0.671545 }, { "epoch": 1.8351827256758493, "grad_norm": 5.133516311645508, "learning_rate": 7.030465282084309e-05, "loss": 2.2366132736206055, "memory(GiB)": 72.85, "step": 42835, "token_acc": 0.5, "train_speed(iter/s)": 0.671549 }, { "epoch": 1.8353969410050983, "grad_norm": 3.3044087886810303, "learning_rate": 7.029850275680161e-05, "loss": 2.231631851196289, "memory(GiB)": 72.85, "step": 42840, "token_acc": 0.5107692307692308, "train_speed(iter/s)": 0.67153 }, { "epoch": 1.8356111563343473, "grad_norm": 3.5765154361724854, "learning_rate": 7.029235232503371e-05, "loss": 2.330720329284668, "memory(GiB)": 72.85, "step": 42845, "token_acc": 0.5066666666666667, "train_speed(iter/s)": 0.671553 }, { "epoch": 1.8358253716635962, "grad_norm": 5.319865703582764, "learning_rate": 7.028620152565082e-05, "loss": 2.473418426513672, "memory(GiB)": 72.85, "step": 42850, "token_acc": 0.4697508896797153, "train_speed(iter/s)": 0.67156 }, { "epoch": 1.8360395869928452, "grad_norm": 4.064535140991211, "learning_rate": 7.028005035876437e-05, "loss": 2.1764995574951174, "memory(GiB)": 72.85, "step": 42855, "token_acc": 0.5148514851485149, "train_speed(iter/s)": 0.671555 }, { "epoch": 1.8362538023220942, "grad_norm": 7.94792366027832, "learning_rate": 7.027389882448576e-05, "loss": 2.5803812026977537, "memory(GiB)": 72.85, "step": 42860, "token_acc": 0.4646153846153846, "train_speed(iter/s)": 0.671537 }, { "epoch": 1.836468017651343, "grad_norm": 4.680457592010498, "learning_rate": 7.026774692292649e-05, "loss": 2.568368339538574, "memory(GiB)": 72.85, "step": 42865, "token_acc": 0.4782608695652174, "train_speed(iter/s)": 0.671557 }, { "epoch": 1.836682232980592, "grad_norm": 4.124451637268066, "learning_rate": 7.026159465419795e-05, "loss": 2.3356563568115236, "memory(GiB)": 72.85, "step": 42870, "token_acc": 0.4712230215827338, "train_speed(iter/s)": 0.671561 }, { "epoch": 1.8368964483098411, "grad_norm": 3.954235553741455, "learning_rate": 7.02554420184116e-05, "loss": 2.1561725616455076, "memory(GiB)": 72.85, "step": 42875, "token_acc": 0.5169491525423728, "train_speed(iter/s)": 0.67154 }, { "epoch": 1.83711066363909, "grad_norm": 4.063472747802734, "learning_rate": 7.024928901567893e-05, "loss": 2.2568737030029298, "memory(GiB)": 72.85, "step": 42880, "token_acc": 0.5241935483870968, "train_speed(iter/s)": 0.671513 }, { "epoch": 1.837324878968339, "grad_norm": 4.820133686065674, "learning_rate": 7.024313564611141e-05, "loss": 2.7217594146728517, "memory(GiB)": 72.85, "step": 42885, "token_acc": 0.4528985507246377, "train_speed(iter/s)": 0.671514 }, { "epoch": 1.837539094297588, "grad_norm": 4.035514831542969, "learning_rate": 7.023698190982047e-05, "loss": 2.4436271667480467, "memory(GiB)": 72.85, "step": 42890, "token_acc": 0.4788732394366197, "train_speed(iter/s)": 0.671488 }, { "epoch": 1.8377533096268368, "grad_norm": 4.590405464172363, "learning_rate": 7.023082780691763e-05, "loss": 2.524094009399414, "memory(GiB)": 72.85, "step": 42895, "token_acc": 0.49032258064516127, "train_speed(iter/s)": 0.671491 }, { "epoch": 1.8379675249560858, "grad_norm": 3.802104949951172, "learning_rate": 7.022467333751438e-05, "loss": 2.0428436279296873, "memory(GiB)": 72.85, "step": 42900, "token_acc": 0.505338078291815, "train_speed(iter/s)": 0.671497 }, { "epoch": 1.8381817402853349, "grad_norm": 4.743404865264893, "learning_rate": 7.021851850172219e-05, "loss": 2.0249691009521484, "memory(GiB)": 72.85, "step": 42905, "token_acc": 0.5308641975308642, "train_speed(iter/s)": 0.671511 }, { "epoch": 1.8383959556145837, "grad_norm": 6.669246196746826, "learning_rate": 7.021236329965257e-05, "loss": 2.252491569519043, "memory(GiB)": 72.85, "step": 42910, "token_acc": 0.49333333333333335, "train_speed(iter/s)": 0.671501 }, { "epoch": 1.8386101709438327, "grad_norm": 4.310415744781494, "learning_rate": 7.020620773141703e-05, "loss": 2.346464729309082, "memory(GiB)": 72.85, "step": 42915, "token_acc": 0.49140893470790376, "train_speed(iter/s)": 0.671485 }, { "epoch": 1.8388243862730818, "grad_norm": 4.328274726867676, "learning_rate": 7.020005179712707e-05, "loss": 2.2551923751831056, "memory(GiB)": 72.85, "step": 42920, "token_acc": 0.5, "train_speed(iter/s)": 0.671469 }, { "epoch": 1.8390386016023306, "grad_norm": 4.944238662719727, "learning_rate": 7.019389549689423e-05, "loss": 2.3210237503051756, "memory(GiB)": 72.85, "step": 42925, "token_acc": 0.5287769784172662, "train_speed(iter/s)": 0.671461 }, { "epoch": 1.8392528169315796, "grad_norm": 4.405674934387207, "learning_rate": 7.018773883083004e-05, "loss": 2.298793411254883, "memory(GiB)": 72.85, "step": 42930, "token_acc": 0.46484375, "train_speed(iter/s)": 0.671468 }, { "epoch": 1.8394670322608286, "grad_norm": 5.430563449859619, "learning_rate": 7.018158179904602e-05, "loss": 2.3346879959106444, "memory(GiB)": 72.85, "step": 42935, "token_acc": 0.48221343873517786, "train_speed(iter/s)": 0.671468 }, { "epoch": 1.8396812475900775, "grad_norm": 8.045374870300293, "learning_rate": 7.01754244016537e-05, "loss": 1.9660049438476563, "memory(GiB)": 72.85, "step": 42940, "token_acc": 0.5239852398523985, "train_speed(iter/s)": 0.671437 }, { "epoch": 1.8398954629193265, "grad_norm": 3.6959638595581055, "learning_rate": 7.016926663876466e-05, "loss": 2.4424062728881837, "memory(GiB)": 72.85, "step": 42945, "token_acc": 0.49169435215946844, "train_speed(iter/s)": 0.671461 }, { "epoch": 1.8401096782485755, "grad_norm": 4.593182563781738, "learning_rate": 7.016310851049041e-05, "loss": 2.4397621154785156, "memory(GiB)": 72.85, "step": 42950, "token_acc": 0.4968152866242038, "train_speed(iter/s)": 0.671458 }, { "epoch": 1.8403238935778243, "grad_norm": 5.119756698608398, "learning_rate": 7.015695001694255e-05, "loss": 2.5062276840209963, "memory(GiB)": 72.85, "step": 42955, "token_acc": 0.46449704142011833, "train_speed(iter/s)": 0.671462 }, { "epoch": 1.8405381089070734, "grad_norm": 4.405935764312744, "learning_rate": 7.015079115823265e-05, "loss": 2.4332576751708985, "memory(GiB)": 72.85, "step": 42960, "token_acc": 0.4645161290322581, "train_speed(iter/s)": 0.671469 }, { "epoch": 1.8407523242363224, "grad_norm": 5.203641414642334, "learning_rate": 7.014463193447222e-05, "loss": 2.3763101577758787, "memory(GiB)": 72.85, "step": 42965, "token_acc": 0.48598130841121495, "train_speed(iter/s)": 0.671471 }, { "epoch": 1.8409665395655712, "grad_norm": 4.033645153045654, "learning_rate": 7.013847234577293e-05, "loss": 2.336761474609375, "memory(GiB)": 72.85, "step": 42970, "token_acc": 0.4983164983164983, "train_speed(iter/s)": 0.67147 }, { "epoch": 1.8411807548948202, "grad_norm": 3.916135311126709, "learning_rate": 7.013231239224629e-05, "loss": 2.347930335998535, "memory(GiB)": 72.85, "step": 42975, "token_acc": 0.5075757575757576, "train_speed(iter/s)": 0.671472 }, { "epoch": 1.8413949702240693, "grad_norm": 4.6116414070129395, "learning_rate": 7.012615207400394e-05, "loss": 2.399697494506836, "memory(GiB)": 72.85, "step": 42980, "token_acc": 0.4774193548387097, "train_speed(iter/s)": 0.671472 }, { "epoch": 1.841609185553318, "grad_norm": 4.64769172668457, "learning_rate": 7.011999139115748e-05, "loss": 2.3060611724853515, "memory(GiB)": 72.85, "step": 42985, "token_acc": 0.50199203187251, "train_speed(iter/s)": 0.671471 }, { "epoch": 1.8418234008825671, "grad_norm": 3.8926985263824463, "learning_rate": 7.011383034381848e-05, "loss": 2.333473014831543, "memory(GiB)": 72.85, "step": 42990, "token_acc": 0.5, "train_speed(iter/s)": 0.671482 }, { "epoch": 1.8420376162118162, "grad_norm": 4.171022415161133, "learning_rate": 7.010766893209858e-05, "loss": 2.086995315551758, "memory(GiB)": 72.85, "step": 42995, "token_acc": 0.5373665480427047, "train_speed(iter/s)": 0.671481 }, { "epoch": 1.842251831541065, "grad_norm": 3.3796324729919434, "learning_rate": 7.01015071561094e-05, "loss": 2.1995819091796873, "memory(GiB)": 72.85, "step": 43000, "token_acc": 0.5202312138728323, "train_speed(iter/s)": 0.67149 }, { "epoch": 1.842251831541065, "eval_loss": 2.1110434532165527, "eval_runtime": 15.5194, "eval_samples_per_second": 6.444, "eval_steps_per_second": 6.444, "eval_token_acc": 0.5140562248995983, "step": 43000 }, { "epoch": 1.842466046870314, "grad_norm": 4.938746452331543, "learning_rate": 7.009534501596255e-05, "loss": 2.499956512451172, "memory(GiB)": 72.85, "step": 43005, "token_acc": 0.5080645161290323, "train_speed(iter/s)": 0.671303 }, { "epoch": 1.842680262199563, "grad_norm": 4.224830150604248, "learning_rate": 7.00891825117697e-05, "loss": 2.3013492584228517, "memory(GiB)": 72.85, "step": 43010, "token_acc": 0.5192307692307693, "train_speed(iter/s)": 0.671307 }, { "epoch": 1.8428944775288119, "grad_norm": 4.288341522216797, "learning_rate": 7.008301964364244e-05, "loss": 2.307320404052734, "memory(GiB)": 72.85, "step": 43015, "token_acc": 0.5129032258064516, "train_speed(iter/s)": 0.671297 }, { "epoch": 1.843108692858061, "grad_norm": 5.730436325073242, "learning_rate": 7.007685641169244e-05, "loss": 2.2584314346313477, "memory(GiB)": 72.85, "step": 43020, "token_acc": 0.4823529411764706, "train_speed(iter/s)": 0.671307 }, { "epoch": 1.84332290818731, "grad_norm": 5.518737316131592, "learning_rate": 7.007069281603136e-05, "loss": 2.6085136413574217, "memory(GiB)": 72.85, "step": 43025, "token_acc": 0.46308724832214765, "train_speed(iter/s)": 0.671309 }, { "epoch": 1.8435371235165587, "grad_norm": 5.231285572052002, "learning_rate": 7.006452885677083e-05, "loss": 2.1377180099487303, "memory(GiB)": 72.85, "step": 43030, "token_acc": 0.5084745762711864, "train_speed(iter/s)": 0.671319 }, { "epoch": 1.8437513388458078, "grad_norm": 4.655817031860352, "learning_rate": 7.005836453402257e-05, "loss": 2.5285528182983397, "memory(GiB)": 72.85, "step": 43035, "token_acc": 0.4503105590062112, "train_speed(iter/s)": 0.671312 }, { "epoch": 1.8439655541750568, "grad_norm": 3.3455002307891846, "learning_rate": 7.00521998478982e-05, "loss": 2.314322662353516, "memory(GiB)": 72.85, "step": 43040, "token_acc": 0.521865889212828, "train_speed(iter/s)": 0.671327 }, { "epoch": 1.8441797695043056, "grad_norm": 4.8705549240112305, "learning_rate": 7.004603479850944e-05, "loss": 2.267271041870117, "memory(GiB)": 72.85, "step": 43045, "token_acc": 0.4870848708487085, "train_speed(iter/s)": 0.671304 }, { "epoch": 1.8443939848335547, "grad_norm": 4.742059230804443, "learning_rate": 7.003986938596793e-05, "loss": 2.5129627227783202, "memory(GiB)": 72.85, "step": 43050, "token_acc": 0.4532871972318339, "train_speed(iter/s)": 0.67131 }, { "epoch": 1.8446082001628037, "grad_norm": 5.24919319152832, "learning_rate": 7.003370361038541e-05, "loss": 2.552025032043457, "memory(GiB)": 72.85, "step": 43055, "token_acc": 0.4379310344827586, "train_speed(iter/s)": 0.671308 }, { "epoch": 1.8448224154920525, "grad_norm": 3.866835832595825, "learning_rate": 7.002753747187354e-05, "loss": 2.6407670974731445, "memory(GiB)": 72.85, "step": 43060, "token_acc": 0.45396825396825397, "train_speed(iter/s)": 0.671321 }, { "epoch": 1.8450366308213015, "grad_norm": 4.199645519256592, "learning_rate": 7.002137097054404e-05, "loss": 2.4825611114501953, "memory(GiB)": 72.85, "step": 43065, "token_acc": 0.45555555555555555, "train_speed(iter/s)": 0.67132 }, { "epoch": 1.8452508461505506, "grad_norm": 4.170590400695801, "learning_rate": 7.001520410650863e-05, "loss": 2.416270065307617, "memory(GiB)": 72.85, "step": 43070, "token_acc": 0.47540983606557374, "train_speed(iter/s)": 0.671322 }, { "epoch": 1.8454650614797994, "grad_norm": 4.165356636047363, "learning_rate": 7.000903687987903e-05, "loss": 2.492926025390625, "memory(GiB)": 72.85, "step": 43075, "token_acc": 0.4496124031007752, "train_speed(iter/s)": 0.671309 }, { "epoch": 1.8456792768090484, "grad_norm": 4.458388805389404, "learning_rate": 7.000286929076693e-05, "loss": 2.413503646850586, "memory(GiB)": 72.85, "step": 43080, "token_acc": 0.5124555160142349, "train_speed(iter/s)": 0.671322 }, { "epoch": 1.8458934921382975, "grad_norm": 7.156371116638184, "learning_rate": 6.999670133928412e-05, "loss": 2.4416709899902345, "memory(GiB)": 72.85, "step": 43085, "token_acc": 0.4897959183673469, "train_speed(iter/s)": 0.67134 }, { "epoch": 1.8461077074675463, "grad_norm": 3.905407190322876, "learning_rate": 6.999053302554228e-05, "loss": 2.324625015258789, "memory(GiB)": 72.85, "step": 43090, "token_acc": 0.5162337662337663, "train_speed(iter/s)": 0.671348 }, { "epoch": 1.8463219227967953, "grad_norm": 3.917184591293335, "learning_rate": 6.99843643496532e-05, "loss": 2.1510042190551757, "memory(GiB)": 72.85, "step": 43095, "token_acc": 0.5337423312883436, "train_speed(iter/s)": 0.671329 }, { "epoch": 1.8465361381260443, "grad_norm": 3.405735731124878, "learning_rate": 6.9979429148271e-05, "loss": 2.3462055206298826, "memory(GiB)": 72.85, "step": 43100, "token_acc": 0.5, "train_speed(iter/s)": 0.671335 }, { "epoch": 1.8467503534552931, "grad_norm": 3.5223894119262695, "learning_rate": 6.997325982079848e-05, "loss": 2.4769266128540037, "memory(GiB)": 72.85, "step": 43105, "token_acc": 0.49258160237388726, "train_speed(iter/s)": 0.671315 }, { "epoch": 1.8469645687845422, "grad_norm": 4.0296525955200195, "learning_rate": 6.996709013149161e-05, "loss": 2.262604522705078, "memory(GiB)": 72.85, "step": 43110, "token_acc": 0.5045871559633027, "train_speed(iter/s)": 0.671309 }, { "epoch": 1.8471787841137912, "grad_norm": 4.410674571990967, "learning_rate": 6.996092008046216e-05, "loss": 2.4343936920166014, "memory(GiB)": 72.85, "step": 43115, "token_acc": 0.5037593984962406, "train_speed(iter/s)": 0.671315 }, { "epoch": 1.84739299944304, "grad_norm": 3.868110179901123, "learning_rate": 6.995474966782193e-05, "loss": 2.4346393585205077, "memory(GiB)": 72.85, "step": 43120, "token_acc": 0.5143884892086331, "train_speed(iter/s)": 0.671299 }, { "epoch": 1.847607214772289, "grad_norm": 5.8885884284973145, "learning_rate": 6.99485788936827e-05, "loss": 2.185497283935547, "memory(GiB)": 72.85, "step": 43125, "token_acc": 0.5020576131687243, "train_speed(iter/s)": 0.6713 }, { "epoch": 1.847821430101538, "grad_norm": 5.386285781860352, "learning_rate": 6.994240775815624e-05, "loss": 2.333906555175781, "memory(GiB)": 72.85, "step": 43130, "token_acc": 0.5136186770428015, "train_speed(iter/s)": 0.671281 }, { "epoch": 1.848035645430787, "grad_norm": 4.667481899261475, "learning_rate": 6.993623626135436e-05, "loss": 2.1056190490722657, "memory(GiB)": 72.85, "step": 43135, "token_acc": 0.5284280936454849, "train_speed(iter/s)": 0.671285 }, { "epoch": 1.848249860760036, "grad_norm": 3.277848482131958, "learning_rate": 6.993006440338885e-05, "loss": 2.165676498413086, "memory(GiB)": 72.85, "step": 43140, "token_acc": 0.5297619047619048, "train_speed(iter/s)": 0.671269 }, { "epoch": 1.848464076089285, "grad_norm": 5.066921234130859, "learning_rate": 6.992389218437154e-05, "loss": 2.2009099960327148, "memory(GiB)": 72.85, "step": 43145, "token_acc": 0.49809885931558934, "train_speed(iter/s)": 0.671277 }, { "epoch": 1.8486782914185338, "grad_norm": 4.028867721557617, "learning_rate": 6.991771960441421e-05, "loss": 2.8525218963623047, "memory(GiB)": 72.85, "step": 43150, "token_acc": 0.4526627218934911, "train_speed(iter/s)": 0.671279 }, { "epoch": 1.8488925067477828, "grad_norm": 4.371655464172363, "learning_rate": 6.991154666362873e-05, "loss": 2.4179302215576173, "memory(GiB)": 72.85, "step": 43155, "token_acc": 0.4895833333333333, "train_speed(iter/s)": 0.671265 }, { "epoch": 1.8491067220770319, "grad_norm": 4.234040260314941, "learning_rate": 6.990537336212688e-05, "loss": 2.2798046112060546, "memory(GiB)": 72.85, "step": 43160, "token_acc": 0.4919093851132686, "train_speed(iter/s)": 0.67127 }, { "epoch": 1.8493209374062807, "grad_norm": 3.9956352710723877, "learning_rate": 6.989919970002053e-05, "loss": 2.4863021850585936, "memory(GiB)": 72.85, "step": 43165, "token_acc": 0.4862068965517241, "train_speed(iter/s)": 0.67128 }, { "epoch": 1.8495351527355297, "grad_norm": 3.0839943885803223, "learning_rate": 6.989302567742152e-05, "loss": 2.274322509765625, "memory(GiB)": 72.85, "step": 43170, "token_acc": 0.524390243902439, "train_speed(iter/s)": 0.671284 }, { "epoch": 1.8497493680647787, "grad_norm": 4.525620460510254, "learning_rate": 6.988685129444168e-05, "loss": 2.0637981414794924, "memory(GiB)": 72.85, "step": 43175, "token_acc": 0.541501976284585, "train_speed(iter/s)": 0.67128 }, { "epoch": 1.8499635833940276, "grad_norm": 3.8515615463256836, "learning_rate": 6.988067655119288e-05, "loss": 2.173713493347168, "memory(GiB)": 72.85, "step": 43180, "token_acc": 0.5190311418685121, "train_speed(iter/s)": 0.671286 }, { "epoch": 1.8501777987232766, "grad_norm": 4.090356349945068, "learning_rate": 6.987450144778697e-05, "loss": 2.380714988708496, "memory(GiB)": 72.85, "step": 43185, "token_acc": 0.5, "train_speed(iter/s)": 0.671244 }, { "epoch": 1.8503920140525256, "grad_norm": 4.8032636642456055, "learning_rate": 6.986832598433581e-05, "loss": 2.41700439453125, "memory(GiB)": 72.85, "step": 43190, "token_acc": 0.5102739726027398, "train_speed(iter/s)": 0.671226 }, { "epoch": 1.8506062293817744, "grad_norm": 6.376444339752197, "learning_rate": 6.98621501609513e-05, "loss": 2.353656196594238, "memory(GiB)": 72.85, "step": 43195, "token_acc": 0.5147058823529411, "train_speed(iter/s)": 0.671223 }, { "epoch": 1.8508204447110235, "grad_norm": 4.044776439666748, "learning_rate": 6.985597397774531e-05, "loss": 2.3222131729125977, "memory(GiB)": 72.85, "step": 43200, "token_acc": 0.4982698961937716, "train_speed(iter/s)": 0.671231 }, { "epoch": 1.8510346600402725, "grad_norm": 5.067968368530273, "learning_rate": 6.984979743482972e-05, "loss": 2.321532440185547, "memory(GiB)": 72.85, "step": 43205, "token_acc": 0.51953125, "train_speed(iter/s)": 0.67124 }, { "epoch": 1.8512488753695213, "grad_norm": 4.373540878295898, "learning_rate": 6.984362053231644e-05, "loss": 2.362545394897461, "memory(GiB)": 72.85, "step": 43210, "token_acc": 0.5307443365695793, "train_speed(iter/s)": 0.671223 }, { "epoch": 1.8514630906987704, "grad_norm": 4.474796295166016, "learning_rate": 6.983744327031733e-05, "loss": 2.315311622619629, "memory(GiB)": 72.85, "step": 43215, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.67125 }, { "epoch": 1.8516773060280194, "grad_norm": 4.672846794128418, "learning_rate": 6.983126564894436e-05, "loss": 2.603815269470215, "memory(GiB)": 72.85, "step": 43220, "token_acc": 0.4825174825174825, "train_speed(iter/s)": 0.671249 }, { "epoch": 1.8518915213572682, "grad_norm": 4.08417272567749, "learning_rate": 6.982508766830938e-05, "loss": 2.2552265167236327, "memory(GiB)": 72.85, "step": 43225, "token_acc": 0.5298013245033113, "train_speed(iter/s)": 0.671245 }, { "epoch": 1.8521057366865172, "grad_norm": 3.50960373878479, "learning_rate": 6.981890932852437e-05, "loss": 2.503732109069824, "memory(GiB)": 72.85, "step": 43230, "token_acc": 0.49050632911392406, "train_speed(iter/s)": 0.67125 }, { "epoch": 1.8523199520157663, "grad_norm": 4.380397796630859, "learning_rate": 6.98127306297012e-05, "loss": 1.991995620727539, "memory(GiB)": 72.85, "step": 43235, "token_acc": 0.5588235294117647, "train_speed(iter/s)": 0.671271 }, { "epoch": 1.852534167345015, "grad_norm": 5.235072135925293, "learning_rate": 6.980655157195185e-05, "loss": 2.190754508972168, "memory(GiB)": 72.85, "step": 43240, "token_acc": 0.5128205128205128, "train_speed(iter/s)": 0.671246 }, { "epoch": 1.8527483826742641, "grad_norm": 3.653918981552124, "learning_rate": 6.980037215538821e-05, "loss": 2.2901126861572267, "memory(GiB)": 72.85, "step": 43245, "token_acc": 0.4886731391585761, "train_speed(iter/s)": 0.67123 }, { "epoch": 1.8529625980035132, "grad_norm": 4.1027116775512695, "learning_rate": 6.979419238012228e-05, "loss": 2.3440345764160155, "memory(GiB)": 72.85, "step": 43250, "token_acc": 0.4981549815498155, "train_speed(iter/s)": 0.671238 }, { "epoch": 1.853176813332762, "grad_norm": 3.9136312007904053, "learning_rate": 6.978801224626599e-05, "loss": 2.167345428466797, "memory(GiB)": 72.85, "step": 43255, "token_acc": 0.5055762081784386, "train_speed(iter/s)": 0.671233 }, { "epoch": 1.8533910286620112, "grad_norm": 7.323945999145508, "learning_rate": 6.978183175393127e-05, "loss": 2.2818038940429686, "memory(GiB)": 72.85, "step": 43260, "token_acc": 0.5226480836236934, "train_speed(iter/s)": 0.671228 }, { "epoch": 1.85360524399126, "grad_norm": 3.3150999546051025, "learning_rate": 6.977565090323013e-05, "loss": 2.178341293334961, "memory(GiB)": 72.85, "step": 43265, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.671228 }, { "epoch": 1.8538194593205088, "grad_norm": 4.396907329559326, "learning_rate": 6.976946969427451e-05, "loss": 2.2891637802124025, "memory(GiB)": 72.85, "step": 43270, "token_acc": 0.5222222222222223, "train_speed(iter/s)": 0.671233 }, { "epoch": 1.854033674649758, "grad_norm": 4.809250831604004, "learning_rate": 6.97632881271764e-05, "loss": 2.339922332763672, "memory(GiB)": 72.85, "step": 43275, "token_acc": 0.48148148148148145, "train_speed(iter/s)": 0.671248 }, { "epoch": 1.854247889979007, "grad_norm": 4.383684158325195, "learning_rate": 6.975710620204781e-05, "loss": 2.490283966064453, "memory(GiB)": 72.85, "step": 43280, "token_acc": 0.48299319727891155, "train_speed(iter/s)": 0.671241 }, { "epoch": 1.8544621053082557, "grad_norm": 3.9508066177368164, "learning_rate": 6.97509239190007e-05, "loss": 2.4283449172973635, "memory(GiB)": 72.85, "step": 43285, "token_acc": 0.4749034749034749, "train_speed(iter/s)": 0.671262 }, { "epoch": 1.854676320637505, "grad_norm": 4.2710981369018555, "learning_rate": 6.974474127814705e-05, "loss": 2.2659006118774414, "memory(GiB)": 72.85, "step": 43290, "token_acc": 0.5063291139240507, "train_speed(iter/s)": 0.671265 }, { "epoch": 1.8548905359667538, "grad_norm": 3.497317314147949, "learning_rate": 6.973855827959892e-05, "loss": 2.551946258544922, "memory(GiB)": 72.85, "step": 43295, "token_acc": 0.49842271293375395, "train_speed(iter/s)": 0.671264 }, { "epoch": 1.8551047512960026, "grad_norm": 4.273509502410889, "learning_rate": 6.973237492346826e-05, "loss": 2.4280723571777343, "memory(GiB)": 72.85, "step": 43300, "token_acc": 0.5054545454545455, "train_speed(iter/s)": 0.671272 }, { "epoch": 1.8553189666252519, "grad_norm": 4.86867618560791, "learning_rate": 6.972619120986714e-05, "loss": 2.456954574584961, "memory(GiB)": 72.85, "step": 43305, "token_acc": 0.45874587458745875, "train_speed(iter/s)": 0.671271 }, { "epoch": 1.8555331819545007, "grad_norm": 3.9365603923797607, "learning_rate": 6.972000713890756e-05, "loss": 1.9229660034179688, "memory(GiB)": 72.85, "step": 43310, "token_acc": 0.51440329218107, "train_speed(iter/s)": 0.671292 }, { "epoch": 1.8557473972837495, "grad_norm": 7.826115131378174, "learning_rate": 6.971382271070155e-05, "loss": 2.1438764572143554, "memory(GiB)": 72.85, "step": 43315, "token_acc": 0.5444015444015444, "train_speed(iter/s)": 0.671271 }, { "epoch": 1.8559616126129987, "grad_norm": 4.6851701736450195, "learning_rate": 6.970763792536115e-05, "loss": 2.3651779174804686, "memory(GiB)": 72.85, "step": 43320, "token_acc": 0.5017182130584192, "train_speed(iter/s)": 0.671279 }, { "epoch": 1.8561758279422476, "grad_norm": 3.6889593601226807, "learning_rate": 6.97014527829984e-05, "loss": 2.3537534713745116, "memory(GiB)": 72.85, "step": 43325, "token_acc": 0.4892086330935252, "train_speed(iter/s)": 0.671281 }, { "epoch": 1.8563900432714964, "grad_norm": 5.2395782470703125, "learning_rate": 6.969526728372535e-05, "loss": 2.329433250427246, "memory(GiB)": 72.85, "step": 43330, "token_acc": 0.5, "train_speed(iter/s)": 0.671278 }, { "epoch": 1.8566042586007456, "grad_norm": 4.0198655128479, "learning_rate": 6.968908142765405e-05, "loss": 2.2045223236083986, "memory(GiB)": 72.85, "step": 43335, "token_acc": 0.49666666666666665, "train_speed(iter/s)": 0.671293 }, { "epoch": 1.8568184739299944, "grad_norm": 5.179927349090576, "learning_rate": 6.968289521489658e-05, "loss": 2.4736713409423827, "memory(GiB)": 72.85, "step": 43340, "token_acc": 0.49337748344370863, "train_speed(iter/s)": 0.671301 }, { "epoch": 1.8570326892592433, "grad_norm": 4.531662940979004, "learning_rate": 6.967670864556501e-05, "loss": 2.5467453002929688, "memory(GiB)": 72.85, "step": 43345, "token_acc": 0.4645390070921986, "train_speed(iter/s)": 0.671288 }, { "epoch": 1.8572469045884925, "grad_norm": 3.563788414001465, "learning_rate": 6.967052171977139e-05, "loss": 2.378607177734375, "memory(GiB)": 72.85, "step": 43350, "token_acc": 0.503030303030303, "train_speed(iter/s)": 0.671263 }, { "epoch": 1.8574611199177413, "grad_norm": 4.408109188079834, "learning_rate": 6.966433443762782e-05, "loss": 2.647275924682617, "memory(GiB)": 72.85, "step": 43355, "token_acc": 0.48089171974522293, "train_speed(iter/s)": 0.67126 }, { "epoch": 1.8576753352469901, "grad_norm": 3.447859048843384, "learning_rate": 6.965814679924639e-05, "loss": 2.2322595596313475, "memory(GiB)": 72.85, "step": 43360, "token_acc": 0.53156146179402, "train_speed(iter/s)": 0.671234 }, { "epoch": 1.8578895505762394, "grad_norm": 6.509181499481201, "learning_rate": 6.965195880473916e-05, "loss": 2.244470977783203, "memory(GiB)": 72.85, "step": 43365, "token_acc": 0.4945054945054945, "train_speed(iter/s)": 0.671247 }, { "epoch": 1.8581037659054882, "grad_norm": 2.842501640319824, "learning_rate": 6.96457704542183e-05, "loss": 2.444585990905762, "memory(GiB)": 72.85, "step": 43370, "token_acc": 0.4903047091412742, "train_speed(iter/s)": 0.671253 }, { "epoch": 1.858317981234737, "grad_norm": 4.40366792678833, "learning_rate": 6.963958174779585e-05, "loss": 2.2948436737060547, "memory(GiB)": 72.85, "step": 43375, "token_acc": 0.5036764705882353, "train_speed(iter/s)": 0.671258 }, { "epoch": 1.8585321965639863, "grad_norm": 4.625124454498291, "learning_rate": 6.963339268558398e-05, "loss": 2.2213985443115236, "memory(GiB)": 72.85, "step": 43380, "token_acc": 0.4864864864864865, "train_speed(iter/s)": 0.671275 }, { "epoch": 1.858746411893235, "grad_norm": 7.1204729080200195, "learning_rate": 6.962720326769477e-05, "loss": 2.499855804443359, "memory(GiB)": 72.85, "step": 43385, "token_acc": 0.49477351916376305, "train_speed(iter/s)": 0.671273 }, { "epoch": 1.858960627222484, "grad_norm": 4.216725826263428, "learning_rate": 6.962101349424036e-05, "loss": 2.623886489868164, "memory(GiB)": 72.85, "step": 43390, "token_acc": 0.44366197183098594, "train_speed(iter/s)": 0.67128 }, { "epoch": 1.8591748425517332, "grad_norm": 4.812685966491699, "learning_rate": 6.961482336533288e-05, "loss": 2.311009407043457, "memory(GiB)": 72.85, "step": 43395, "token_acc": 0.48732394366197185, "train_speed(iter/s)": 0.671289 }, { "epoch": 1.859389057880982, "grad_norm": 3.5513839721679688, "learning_rate": 6.96086328810845e-05, "loss": 2.273570442199707, "memory(GiB)": 72.85, "step": 43400, "token_acc": 0.5196078431372549, "train_speed(iter/s)": 0.671291 }, { "epoch": 1.8596032732102308, "grad_norm": 4.9993109703063965, "learning_rate": 6.960244204160731e-05, "loss": 2.3621700286865233, "memory(GiB)": 72.85, "step": 43405, "token_acc": 0.48771929824561405, "train_speed(iter/s)": 0.671304 }, { "epoch": 1.85981748853948, "grad_norm": 4.085455417633057, "learning_rate": 6.95962508470135e-05, "loss": 2.3703485488891602, "memory(GiB)": 72.85, "step": 43410, "token_acc": 0.4597315436241611, "train_speed(iter/s)": 0.671318 }, { "epoch": 1.8600317038687288, "grad_norm": 3.539792776107788, "learning_rate": 6.959005929741523e-05, "loss": 2.3090642929077148, "memory(GiB)": 72.85, "step": 43415, "token_acc": 0.47959183673469385, "train_speed(iter/s)": 0.671334 }, { "epoch": 1.8602459191979777, "grad_norm": 7.539988994598389, "learning_rate": 6.958386739292464e-05, "loss": 2.137151336669922, "memory(GiB)": 72.85, "step": 43420, "token_acc": 0.5155038759689923, "train_speed(iter/s)": 0.671349 }, { "epoch": 1.860460134527227, "grad_norm": 5.154055118560791, "learning_rate": 6.957767513365397e-05, "loss": 2.4179157257080077, "memory(GiB)": 72.85, "step": 43425, "token_acc": 0.494949494949495, "train_speed(iter/s)": 0.671349 }, { "epoch": 1.8606743498564757, "grad_norm": 3.412029266357422, "learning_rate": 6.957148251971531e-05, "loss": 2.0820812225341796, "memory(GiB)": 72.85, "step": 43430, "token_acc": 0.5483870967741935, "train_speed(iter/s)": 0.671354 }, { "epoch": 1.8608885651857245, "grad_norm": 3.924440860748291, "learning_rate": 6.956528955122089e-05, "loss": 2.726866912841797, "memory(GiB)": 72.85, "step": 43435, "token_acc": 0.446875, "train_speed(iter/s)": 0.671368 }, { "epoch": 1.8611027805149738, "grad_norm": 5.188439846038818, "learning_rate": 6.95590962282829e-05, "loss": 2.455443000793457, "memory(GiB)": 72.85, "step": 43440, "token_acc": 0.5, "train_speed(iter/s)": 0.671356 }, { "epoch": 1.8613169958442226, "grad_norm": 6.5474982261657715, "learning_rate": 6.955290255101353e-05, "loss": 2.1442693710327148, "memory(GiB)": 72.85, "step": 43445, "token_acc": 0.5365853658536586, "train_speed(iter/s)": 0.671367 }, { "epoch": 1.8615312111734714, "grad_norm": 5.020592212677002, "learning_rate": 6.954670851952498e-05, "loss": 2.5540321350097654, "memory(GiB)": 72.85, "step": 43450, "token_acc": 0.49140893470790376, "train_speed(iter/s)": 0.671373 }, { "epoch": 1.8617454265027207, "grad_norm": 6.681899547576904, "learning_rate": 6.95405141339295e-05, "loss": 2.4168798446655275, "memory(GiB)": 72.85, "step": 43455, "token_acc": 0.47278911564625853, "train_speed(iter/s)": 0.671382 }, { "epoch": 1.8619596418319695, "grad_norm": 7.715261459350586, "learning_rate": 6.953431939433925e-05, "loss": 2.2256490707397463, "memory(GiB)": 72.85, "step": 43460, "token_acc": 0.495114006514658, "train_speed(iter/s)": 0.6714 }, { "epoch": 1.8621738571612183, "grad_norm": 4.391885280609131, "learning_rate": 6.952812430086648e-05, "loss": 2.432052803039551, "memory(GiB)": 72.85, "step": 43465, "token_acc": 0.49002849002849, "train_speed(iter/s)": 0.671402 }, { "epoch": 1.8623880724904676, "grad_norm": 3.504575490951538, "learning_rate": 6.952192885362343e-05, "loss": 2.1804269790649413, "memory(GiB)": 72.85, "step": 43470, "token_acc": 0.515748031496063, "train_speed(iter/s)": 0.671396 }, { "epoch": 1.8626022878197164, "grad_norm": 4.526137828826904, "learning_rate": 6.951573305272233e-05, "loss": 2.094582939147949, "memory(GiB)": 72.85, "step": 43475, "token_acc": 0.5107692307692308, "train_speed(iter/s)": 0.671404 }, { "epoch": 1.8628165031489652, "grad_norm": 4.681823253631592, "learning_rate": 6.950953689827539e-05, "loss": 2.383317756652832, "memory(GiB)": 72.85, "step": 43480, "token_acc": 0.475, "train_speed(iter/s)": 0.671406 }, { "epoch": 1.8630307184782144, "grad_norm": 3.2212610244750977, "learning_rate": 6.950334039039491e-05, "loss": 2.3054534912109377, "memory(GiB)": 72.85, "step": 43485, "token_acc": 0.4970414201183432, "train_speed(iter/s)": 0.6714 }, { "epoch": 1.8632449338074633, "grad_norm": 7.746864318847656, "learning_rate": 6.949714352919312e-05, "loss": 2.4017892837524415, "memory(GiB)": 72.85, "step": 43490, "token_acc": 0.49333333333333335, "train_speed(iter/s)": 0.671403 }, { "epoch": 1.863459149136712, "grad_norm": 4.021536350250244, "learning_rate": 6.949094631478227e-05, "loss": 2.384538269042969, "memory(GiB)": 72.85, "step": 43495, "token_acc": 0.48184818481848185, "train_speed(iter/s)": 0.671382 }, { "epoch": 1.8636733644659613, "grad_norm": 3.9811806678771973, "learning_rate": 6.948474874727467e-05, "loss": 2.3283197402954103, "memory(GiB)": 72.85, "step": 43500, "token_acc": 0.4980694980694981, "train_speed(iter/s)": 0.671388 }, { "epoch": 1.8636733644659613, "eval_loss": 2.1149544715881348, "eval_runtime": 15.3437, "eval_samples_per_second": 6.517, "eval_steps_per_second": 6.517, "eval_token_acc": 0.514324693042292, "step": 43500 }, { "epoch": 1.8638875797952101, "grad_norm": 3.8380649089813232, "learning_rate": 6.947855082678256e-05, "loss": 2.332943153381348, "memory(GiB)": 72.85, "step": 43505, "token_acc": 0.49371980676328503, "train_speed(iter/s)": 0.67122 }, { "epoch": 1.864101795124459, "grad_norm": 4.5399651527404785, "learning_rate": 6.947235255341822e-05, "loss": 2.1578935623168944, "memory(GiB)": 72.85, "step": 43510, "token_acc": 0.5234375, "train_speed(iter/s)": 0.671224 }, { "epoch": 1.8643160104537082, "grad_norm": 4.963656425476074, "learning_rate": 6.946615392729397e-05, "loss": 2.550357627868652, "memory(GiB)": 72.85, "step": 43515, "token_acc": 0.5, "train_speed(iter/s)": 0.671238 }, { "epoch": 1.864530225782957, "grad_norm": 4.718644142150879, "learning_rate": 6.945995494852206e-05, "loss": 2.3630355834960937, "memory(GiB)": 72.85, "step": 43520, "token_acc": 0.4440894568690096, "train_speed(iter/s)": 0.67125 }, { "epoch": 1.8647444411122058, "grad_norm": 4.285966396331787, "learning_rate": 6.945375561721481e-05, "loss": 2.3129018783569335, "memory(GiB)": 72.85, "step": 43525, "token_acc": 0.524904214559387, "train_speed(iter/s)": 0.671243 }, { "epoch": 1.864958656441455, "grad_norm": 4.2683515548706055, "learning_rate": 6.944755593348454e-05, "loss": 2.2754867553710936, "memory(GiB)": 72.85, "step": 43530, "token_acc": 0.5353535353535354, "train_speed(iter/s)": 0.67124 }, { "epoch": 1.865172871770704, "grad_norm": 3.5504608154296875, "learning_rate": 6.944135589744354e-05, "loss": 2.428481101989746, "memory(GiB)": 72.85, "step": 43535, "token_acc": 0.46984126984126984, "train_speed(iter/s)": 0.671228 }, { "epoch": 1.8653870870999527, "grad_norm": 6.1602067947387695, "learning_rate": 6.943515550920413e-05, "loss": 2.2433536529541014, "memory(GiB)": 72.85, "step": 43540, "token_acc": 0.53125, "train_speed(iter/s)": 0.671226 }, { "epoch": 1.865601302429202, "grad_norm": 4.707345008850098, "learning_rate": 6.942895476887868e-05, "loss": 2.646615409851074, "memory(GiB)": 72.85, "step": 43545, "token_acc": 0.4867549668874172, "train_speed(iter/s)": 0.671232 }, { "epoch": 1.8658155177584508, "grad_norm": 3.9066078662872314, "learning_rate": 6.942275367657947e-05, "loss": 2.433843994140625, "memory(GiB)": 72.85, "step": 43550, "token_acc": 0.4876543209876543, "train_speed(iter/s)": 0.671232 }, { "epoch": 1.8660297330876996, "grad_norm": 4.601195812225342, "learning_rate": 6.941655223241885e-05, "loss": 2.4873218536376953, "memory(GiB)": 72.85, "step": 43555, "token_acc": 0.4807692307692308, "train_speed(iter/s)": 0.671242 }, { "epoch": 1.8662439484169489, "grad_norm": 3.6947245597839355, "learning_rate": 6.941035043650917e-05, "loss": 2.556581497192383, "memory(GiB)": 72.85, "step": 43560, "token_acc": 0.4908424908424908, "train_speed(iter/s)": 0.67125 }, { "epoch": 1.8664581637461977, "grad_norm": 3.489525079727173, "learning_rate": 6.94041482889628e-05, "loss": 2.551132011413574, "memory(GiB)": 72.85, "step": 43565, "token_acc": 0.4952978056426332, "train_speed(iter/s)": 0.671259 }, { "epoch": 1.8666723790754465, "grad_norm": 3.793041467666626, "learning_rate": 6.939794578989207e-05, "loss": 2.235553741455078, "memory(GiB)": 72.85, "step": 43570, "token_acc": 0.49828178694158076, "train_speed(iter/s)": 0.671252 }, { "epoch": 1.8668865944046957, "grad_norm": 4.716154098510742, "learning_rate": 6.939174293940936e-05, "loss": 2.3785552978515625, "memory(GiB)": 72.85, "step": 43575, "token_acc": 0.4891304347826087, "train_speed(iter/s)": 0.671246 }, { "epoch": 1.8671008097339445, "grad_norm": 5.255684852600098, "learning_rate": 6.938553973762704e-05, "loss": 2.493077850341797, "memory(GiB)": 72.85, "step": 43580, "token_acc": 0.4529616724738676, "train_speed(iter/s)": 0.671243 }, { "epoch": 1.8673150250631934, "grad_norm": 3.748713970184326, "learning_rate": 6.937933618465748e-05, "loss": 2.5332883834838866, "memory(GiB)": 72.85, "step": 43585, "token_acc": 0.48742138364779874, "train_speed(iter/s)": 0.671241 }, { "epoch": 1.8675292403924426, "grad_norm": 3.6210029125213623, "learning_rate": 6.937313228061306e-05, "loss": 2.731489562988281, "memory(GiB)": 72.85, "step": 43590, "token_acc": 0.44476744186046513, "train_speed(iter/s)": 0.671242 }, { "epoch": 1.8677434557216914, "grad_norm": 4.129323482513428, "learning_rate": 6.936692802560618e-05, "loss": 2.6145620346069336, "memory(GiB)": 72.85, "step": 43595, "token_acc": 0.45703125, "train_speed(iter/s)": 0.671238 }, { "epoch": 1.8679576710509405, "grad_norm": 5.351141929626465, "learning_rate": 6.936072341974922e-05, "loss": 2.172073745727539, "memory(GiB)": 72.85, "step": 43600, "token_acc": 0.552, "train_speed(iter/s)": 0.671241 }, { "epoch": 1.8681718863801895, "grad_norm": 4.955965518951416, "learning_rate": 6.935451846315461e-05, "loss": 2.1909149169921873, "memory(GiB)": 72.85, "step": 43605, "token_acc": 0.5283687943262412, "train_speed(iter/s)": 0.671238 }, { "epoch": 1.8683861017094383, "grad_norm": 4.420402526855469, "learning_rate": 6.934831315593472e-05, "loss": 2.5369194030761717, "memory(GiB)": 72.85, "step": 43610, "token_acc": 0.4967948717948718, "train_speed(iter/s)": 0.671227 }, { "epoch": 1.8686003170386873, "grad_norm": 4.4576568603515625, "learning_rate": 6.934210749820202e-05, "loss": 2.49825496673584, "memory(GiB)": 72.85, "step": 43615, "token_acc": 0.46467391304347827, "train_speed(iter/s)": 0.671205 }, { "epoch": 1.8688145323679364, "grad_norm": 4.028408050537109, "learning_rate": 6.933590149006887e-05, "loss": 2.177752876281738, "memory(GiB)": 72.85, "step": 43620, "token_acc": 0.5337423312883436, "train_speed(iter/s)": 0.671206 }, { "epoch": 1.8690287476971852, "grad_norm": 5.074117660522461, "learning_rate": 6.932969513164775e-05, "loss": 2.5262453079223635, "memory(GiB)": 72.85, "step": 43625, "token_acc": 0.45901639344262296, "train_speed(iter/s)": 0.671195 }, { "epoch": 1.8692429630264342, "grad_norm": 4.802428722381592, "learning_rate": 6.932348842305104e-05, "loss": 2.543354797363281, "memory(GiB)": 72.85, "step": 43630, "token_acc": 0.4421052631578947, "train_speed(iter/s)": 0.671196 }, { "epoch": 1.8694571783556833, "grad_norm": 5.4978156089782715, "learning_rate": 6.931728136439123e-05, "loss": 2.2102075576782227, "memory(GiB)": 72.85, "step": 43635, "token_acc": 0.48659003831417624, "train_speed(iter/s)": 0.671185 }, { "epoch": 1.869671393684932, "grad_norm": 3.6676597595214844, "learning_rate": 6.931107395578074e-05, "loss": 2.391304779052734, "memory(GiB)": 72.85, "step": 43640, "token_acc": 0.4592833876221498, "train_speed(iter/s)": 0.671205 }, { "epoch": 1.869885609014181, "grad_norm": 4.191681385040283, "learning_rate": 6.930486619733203e-05, "loss": 2.2193389892578126, "memory(GiB)": 72.85, "step": 43645, "token_acc": 0.5015673981191222, "train_speed(iter/s)": 0.671212 }, { "epoch": 1.8700998243434301, "grad_norm": 3.95707631111145, "learning_rate": 6.929865808915756e-05, "loss": 2.534071350097656, "memory(GiB)": 72.85, "step": 43650, "token_acc": 0.46855345911949686, "train_speed(iter/s)": 0.671223 }, { "epoch": 1.870314039672679, "grad_norm": 4.747668743133545, "learning_rate": 6.92924496313698e-05, "loss": 2.4521146774291993, "memory(GiB)": 72.85, "step": 43655, "token_acc": 0.484472049689441, "train_speed(iter/s)": 0.67122 }, { "epoch": 1.870528255001928, "grad_norm": 4.137221336364746, "learning_rate": 6.928624082408123e-05, "loss": 2.7290143966674805, "memory(GiB)": 72.85, "step": 43660, "token_acc": 0.4660493827160494, "train_speed(iter/s)": 0.671231 }, { "epoch": 1.870742470331177, "grad_norm": 4.924098491668701, "learning_rate": 6.928003166740428e-05, "loss": 2.2674060821533204, "memory(GiB)": 72.85, "step": 43665, "token_acc": 0.5239852398523985, "train_speed(iter/s)": 0.671232 }, { "epoch": 1.8709566856604258, "grad_norm": 4.999814033508301, "learning_rate": 6.92738221614515e-05, "loss": 2.551837158203125, "memory(GiB)": 72.85, "step": 43670, "token_acc": 0.5031645569620253, "train_speed(iter/s)": 0.67124 }, { "epoch": 1.8711709009896749, "grad_norm": 7.415895462036133, "learning_rate": 6.926761230633534e-05, "loss": 2.408491516113281, "memory(GiB)": 72.85, "step": 43675, "token_acc": 0.5041666666666667, "train_speed(iter/s)": 0.671243 }, { "epoch": 1.871385116318924, "grad_norm": 4.526251792907715, "learning_rate": 6.926140210216831e-05, "loss": 2.2490779876708986, "memory(GiB)": 72.85, "step": 43680, "token_acc": 0.5070921985815603, "train_speed(iter/s)": 0.671238 }, { "epoch": 1.8715993316481727, "grad_norm": 4.4354987144470215, "learning_rate": 6.925519154906292e-05, "loss": 2.6241764068603515, "memory(GiB)": 72.85, "step": 43685, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.671238 }, { "epoch": 1.8718135469774217, "grad_norm": 5.1200270652771, "learning_rate": 6.924898064713167e-05, "loss": 2.1638370513916017, "memory(GiB)": 72.85, "step": 43690, "token_acc": 0.5397923875432526, "train_speed(iter/s)": 0.671239 }, { "epoch": 1.8720277623066708, "grad_norm": 5.0273756980896, "learning_rate": 6.924276939648706e-05, "loss": 2.361603546142578, "memory(GiB)": 72.85, "step": 43695, "token_acc": 0.5189873417721519, "train_speed(iter/s)": 0.67125 }, { "epoch": 1.8722419776359196, "grad_norm": 3.923649787902832, "learning_rate": 6.923655779724165e-05, "loss": 2.567249870300293, "memory(GiB)": 72.85, "step": 43700, "token_acc": 0.4440789473684211, "train_speed(iter/s)": 0.671268 }, { "epoch": 1.8724561929651686, "grad_norm": 4.9077324867248535, "learning_rate": 6.923034584950794e-05, "loss": 2.5170215606689452, "memory(GiB)": 72.85, "step": 43705, "token_acc": 0.4426877470355731, "train_speed(iter/s)": 0.671278 }, { "epoch": 1.8726704082944177, "grad_norm": 3.2169811725616455, "learning_rate": 6.922413355339847e-05, "loss": 2.3398128509521485, "memory(GiB)": 72.85, "step": 43710, "token_acc": 0.49834983498349833, "train_speed(iter/s)": 0.671288 }, { "epoch": 1.8728846236236665, "grad_norm": 3.9713644981384277, "learning_rate": 6.92179209090258e-05, "loss": 2.2632055282592773, "memory(GiB)": 72.85, "step": 43715, "token_acc": 0.48014440433212996, "train_speed(iter/s)": 0.671293 }, { "epoch": 1.8730988389529155, "grad_norm": 6.243172645568848, "learning_rate": 6.921170791650248e-05, "loss": 2.394988250732422, "memory(GiB)": 72.85, "step": 43720, "token_acc": 0.48942598187311176, "train_speed(iter/s)": 0.671287 }, { "epoch": 1.8733130542821645, "grad_norm": 4.22630500793457, "learning_rate": 6.920549457594102e-05, "loss": 2.578434944152832, "memory(GiB)": 72.85, "step": 43725, "token_acc": 0.47651006711409394, "train_speed(iter/s)": 0.671273 }, { "epoch": 1.8735272696114134, "grad_norm": 6.289456367492676, "learning_rate": 6.919928088745402e-05, "loss": 2.231405258178711, "memory(GiB)": 72.85, "step": 43730, "token_acc": 0.5346153846153846, "train_speed(iter/s)": 0.671276 }, { "epoch": 1.8737414849406624, "grad_norm": 3.8536572456359863, "learning_rate": 6.919306685115403e-05, "loss": 2.2844001770019533, "memory(GiB)": 72.85, "step": 43735, "token_acc": 0.5317725752508361, "train_speed(iter/s)": 0.671288 }, { "epoch": 1.8739557002699114, "grad_norm": 3.9706225395202637, "learning_rate": 6.918685246715364e-05, "loss": 2.5652788162231444, "memory(GiB)": 72.85, "step": 43740, "token_acc": 0.5036764705882353, "train_speed(iter/s)": 0.671278 }, { "epoch": 1.8741699155991602, "grad_norm": 4.489100456237793, "learning_rate": 6.918063773556542e-05, "loss": 2.3683887481689454, "memory(GiB)": 72.85, "step": 43745, "token_acc": 0.4701086956521739, "train_speed(iter/s)": 0.671286 }, { "epoch": 1.8743841309284093, "grad_norm": 4.110165596008301, "learning_rate": 6.917442265650196e-05, "loss": 2.529238700866699, "memory(GiB)": 72.85, "step": 43750, "token_acc": 0.4728434504792332, "train_speed(iter/s)": 0.671285 }, { "epoch": 1.8745983462576583, "grad_norm": 4.396657943725586, "learning_rate": 6.916820723007582e-05, "loss": 2.3435916900634766, "memory(GiB)": 72.85, "step": 43755, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.67128 }, { "epoch": 1.8748125615869071, "grad_norm": 4.766840934753418, "learning_rate": 6.916199145639966e-05, "loss": 2.55584831237793, "memory(GiB)": 72.85, "step": 43760, "token_acc": 0.4628099173553719, "train_speed(iter/s)": 0.671286 }, { "epoch": 1.8750267769161562, "grad_norm": 5.499342441558838, "learning_rate": 6.915577533558601e-05, "loss": 2.533284568786621, "memory(GiB)": 72.85, "step": 43765, "token_acc": 0.4671280276816609, "train_speed(iter/s)": 0.671307 }, { "epoch": 1.8752409922454052, "grad_norm": 4.423417091369629, "learning_rate": 6.914955886774756e-05, "loss": 2.1330528259277344, "memory(GiB)": 72.85, "step": 43770, "token_acc": 0.5642023346303502, "train_speed(iter/s)": 0.671304 }, { "epoch": 1.875455207574654, "grad_norm": 5.411364555358887, "learning_rate": 6.914334205299686e-05, "loss": 2.058219146728516, "memory(GiB)": 72.85, "step": 43775, "token_acc": 0.5570934256055363, "train_speed(iter/s)": 0.671301 }, { "epoch": 1.875669422903903, "grad_norm": 4.232036590576172, "learning_rate": 6.913712489144658e-05, "loss": 2.5900638580322264, "memory(GiB)": 72.85, "step": 43780, "token_acc": 0.44144144144144143, "train_speed(iter/s)": 0.671306 }, { "epoch": 1.875883638233152, "grad_norm": 3.7959349155426025, "learning_rate": 6.91309073832093e-05, "loss": 2.3546779632568358, "memory(GiB)": 72.85, "step": 43785, "token_acc": 0.5246913580246914, "train_speed(iter/s)": 0.671318 }, { "epoch": 1.8760978535624009, "grad_norm": 4.776268005371094, "learning_rate": 6.912468952839772e-05, "loss": 2.3453752517700197, "memory(GiB)": 72.85, "step": 43790, "token_acc": 0.5113636363636364, "train_speed(iter/s)": 0.671316 }, { "epoch": 1.87631206889165, "grad_norm": 4.612290859222412, "learning_rate": 6.911847132712442e-05, "loss": 2.350620460510254, "memory(GiB)": 72.85, "step": 43795, "token_acc": 0.5298245614035088, "train_speed(iter/s)": 0.671321 }, { "epoch": 1.876526284220899, "grad_norm": 4.789938449859619, "learning_rate": 6.91122527795021e-05, "loss": 2.2939210891723634, "memory(GiB)": 72.85, "step": 43800, "token_acc": 0.49624060150375937, "train_speed(iter/s)": 0.671329 }, { "epoch": 1.8767404995501478, "grad_norm": 4.327606678009033, "learning_rate": 6.910603388564338e-05, "loss": 2.2648237228393553, "memory(GiB)": 72.85, "step": 43805, "token_acc": 0.47840531561461797, "train_speed(iter/s)": 0.67133 }, { "epoch": 1.8769547148793968, "grad_norm": 5.70072603225708, "learning_rate": 6.909981464566094e-05, "loss": 2.1968223571777346, "memory(GiB)": 72.85, "step": 43810, "token_acc": 0.5228070175438596, "train_speed(iter/s)": 0.671334 }, { "epoch": 1.8771689302086458, "grad_norm": 3.204447031021118, "learning_rate": 6.909359505966743e-05, "loss": 2.384993553161621, "memory(GiB)": 72.85, "step": 43815, "token_acc": 0.4891640866873065, "train_speed(iter/s)": 0.671344 }, { "epoch": 1.8773831455378946, "grad_norm": 4.178986072540283, "learning_rate": 6.908737512777555e-05, "loss": 2.2218603134155273, "memory(GiB)": 72.85, "step": 43820, "token_acc": 0.48314606741573035, "train_speed(iter/s)": 0.671362 }, { "epoch": 1.8775973608671437, "grad_norm": 6.9575324058532715, "learning_rate": 6.908115485009795e-05, "loss": 2.2030189514160154, "memory(GiB)": 72.85, "step": 43825, "token_acc": 0.5092936802973977, "train_speed(iter/s)": 0.671369 }, { "epoch": 1.8778115761963927, "grad_norm": 7.585745811462402, "learning_rate": 6.907493422674735e-05, "loss": 2.0325082778930663, "memory(GiB)": 72.85, "step": 43830, "token_acc": 0.5545851528384279, "train_speed(iter/s)": 0.671347 }, { "epoch": 1.8780257915256415, "grad_norm": 4.946994304656982, "learning_rate": 6.90687132578364e-05, "loss": 2.5850624084472655, "memory(GiB)": 72.85, "step": 43835, "token_acc": 0.48514851485148514, "train_speed(iter/s)": 0.671346 }, { "epoch": 1.8782400068548906, "grad_norm": 3.801032781600952, "learning_rate": 6.906249194347783e-05, "loss": 2.3599981307983398, "memory(GiB)": 72.85, "step": 43840, "token_acc": 0.5058479532163743, "train_speed(iter/s)": 0.671327 }, { "epoch": 1.8784542221841396, "grad_norm": 4.661753177642822, "learning_rate": 6.905627028378434e-05, "loss": 2.651255798339844, "memory(GiB)": 72.85, "step": 43845, "token_acc": 0.41533546325878595, "train_speed(iter/s)": 0.67133 }, { "epoch": 1.8786684375133884, "grad_norm": 4.2406744956970215, "learning_rate": 6.905004827886864e-05, "loss": 2.5877119064331056, "memory(GiB)": 72.85, "step": 43850, "token_acc": 0.4576271186440678, "train_speed(iter/s)": 0.671333 }, { "epoch": 1.8788826528426374, "grad_norm": 4.719285011291504, "learning_rate": 6.904382592884343e-05, "loss": 2.0340646743774413, "memory(GiB)": 72.85, "step": 43855, "token_acc": 0.5241935483870968, "train_speed(iter/s)": 0.671345 }, { "epoch": 1.8790968681718865, "grad_norm": 4.146718502044678, "learning_rate": 6.903760323382147e-05, "loss": 2.168177032470703, "memory(GiB)": 72.85, "step": 43860, "token_acc": 0.503731343283582, "train_speed(iter/s)": 0.671356 }, { "epoch": 1.8793110835011353, "grad_norm": 4.256763458251953, "learning_rate": 6.903138019391545e-05, "loss": 2.1777408599853514, "memory(GiB)": 72.85, "step": 43865, "token_acc": 0.5111111111111111, "train_speed(iter/s)": 0.671369 }, { "epoch": 1.8795252988303843, "grad_norm": 4.217166423797607, "learning_rate": 6.902515680923813e-05, "loss": 2.3161434173583983, "memory(GiB)": 72.85, "step": 43870, "token_acc": 0.4864864864864865, "train_speed(iter/s)": 0.671369 }, { "epoch": 1.8797395141596334, "grad_norm": 3.508714199066162, "learning_rate": 6.901893307990227e-05, "loss": 2.4941375732421873, "memory(GiB)": 72.85, "step": 43875, "token_acc": 0.5234375, "train_speed(iter/s)": 0.671371 }, { "epoch": 1.8799537294888822, "grad_norm": 4.896133899688721, "learning_rate": 6.901270900602056e-05, "loss": 2.1830148696899414, "memory(GiB)": 72.85, "step": 43880, "token_acc": 0.5148148148148148, "train_speed(iter/s)": 0.671349 }, { "epoch": 1.8801679448181312, "grad_norm": 4.88762092590332, "learning_rate": 6.900648458770581e-05, "loss": 2.1440975189208986, "memory(GiB)": 72.85, "step": 43885, "token_acc": 0.5440613026819924, "train_speed(iter/s)": 0.671355 }, { "epoch": 1.8803821601473802, "grad_norm": 3.8881211280822754, "learning_rate": 6.900025982507074e-05, "loss": 2.2590232849121095, "memory(GiB)": 72.85, "step": 43890, "token_acc": 0.5123456790123457, "train_speed(iter/s)": 0.671366 }, { "epoch": 1.880596375476629, "grad_norm": 4.2790021896362305, "learning_rate": 6.899403471822817e-05, "loss": 2.4636503219604493, "memory(GiB)": 72.85, "step": 43895, "token_acc": 0.45962732919254656, "train_speed(iter/s)": 0.671385 }, { "epoch": 1.880810590805878, "grad_norm": 4.639139652252197, "learning_rate": 6.898780926729083e-05, "loss": 2.8669763565063477, "memory(GiB)": 72.85, "step": 43900, "token_acc": 0.43283582089552236, "train_speed(iter/s)": 0.671392 }, { "epoch": 1.8810248061351271, "grad_norm": 4.226595401763916, "learning_rate": 6.898158347237152e-05, "loss": 2.2998733520507812, "memory(GiB)": 72.85, "step": 43905, "token_acc": 0.5159010600706714, "train_speed(iter/s)": 0.671387 }, { "epoch": 1.881239021464376, "grad_norm": 3.531804084777832, "learning_rate": 6.897535733358301e-05, "loss": 2.0785249710083007, "memory(GiB)": 72.85, "step": 43910, "token_acc": 0.5374149659863946, "train_speed(iter/s)": 0.671372 }, { "epoch": 1.881453236793625, "grad_norm": 4.202241897583008, "learning_rate": 6.89691308510381e-05, "loss": 2.0069625854492186, "memory(GiB)": 72.85, "step": 43915, "token_acc": 0.5483870967741935, "train_speed(iter/s)": 0.671383 }, { "epoch": 1.881667452122874, "grad_norm": 3.5595204830169678, "learning_rate": 6.89629040248496e-05, "loss": 2.227663993835449, "memory(GiB)": 72.85, "step": 43920, "token_acc": 0.48788927335640137, "train_speed(iter/s)": 0.671392 }, { "epoch": 1.8818816674521228, "grad_norm": 4.119143962860107, "learning_rate": 6.895667685513032e-05, "loss": 2.3069293975830076, "memory(GiB)": 72.85, "step": 43925, "token_acc": 0.503030303030303, "train_speed(iter/s)": 0.671401 }, { "epoch": 1.8820958827813719, "grad_norm": 3.219576835632324, "learning_rate": 6.895044934199301e-05, "loss": 2.6284839630126955, "memory(GiB)": 72.85, "step": 43930, "token_acc": 0.4775510204081633, "train_speed(iter/s)": 0.671405 }, { "epoch": 1.8823100981106209, "grad_norm": 3.913331985473633, "learning_rate": 6.894422148555057e-05, "loss": 2.421119499206543, "memory(GiB)": 72.85, "step": 43935, "token_acc": 0.48467966573816157, "train_speed(iter/s)": 0.671417 }, { "epoch": 1.8825243134398697, "grad_norm": 3.8526997566223145, "learning_rate": 6.893799328591577e-05, "loss": 2.407207489013672, "memory(GiB)": 72.85, "step": 43940, "token_acc": 0.49122807017543857, "train_speed(iter/s)": 0.671431 }, { "epoch": 1.8827385287691187, "grad_norm": 4.912351131439209, "learning_rate": 6.893176474320147e-05, "loss": 1.9631620407104493, "memory(GiB)": 72.85, "step": 43945, "token_acc": 0.5702479338842975, "train_speed(iter/s)": 0.67144 }, { "epoch": 1.8829527440983678, "grad_norm": 3.4968008995056152, "learning_rate": 6.892553585752049e-05, "loss": 2.748951530456543, "memory(GiB)": 72.85, "step": 43950, "token_acc": 0.44871794871794873, "train_speed(iter/s)": 0.671455 }, { "epoch": 1.8831669594276166, "grad_norm": 4.860035419464111, "learning_rate": 6.891930662898567e-05, "loss": 2.3307199478149414, "memory(GiB)": 72.85, "step": 43955, "token_acc": 0.4944649446494465, "train_speed(iter/s)": 0.671459 }, { "epoch": 1.8833811747568656, "grad_norm": 5.107782363891602, "learning_rate": 6.891307705770986e-05, "loss": 2.315134811401367, "memory(GiB)": 72.85, "step": 43960, "token_acc": 0.515625, "train_speed(iter/s)": 0.671471 }, { "epoch": 1.8835953900861147, "grad_norm": 4.229640960693359, "learning_rate": 6.890684714380592e-05, "loss": 2.2930599212646485, "memory(GiB)": 72.85, "step": 43965, "token_acc": 0.4726027397260274, "train_speed(iter/s)": 0.671449 }, { "epoch": 1.8838096054153635, "grad_norm": 5.044089317321777, "learning_rate": 6.89006168873867e-05, "loss": 2.3721160888671875, "memory(GiB)": 72.85, "step": 43970, "token_acc": 0.5212765957446809, "train_speed(iter/s)": 0.671448 }, { "epoch": 1.8840238207446125, "grad_norm": 4.233063220977783, "learning_rate": 6.889438628856508e-05, "loss": 2.1802490234375, "memory(GiB)": 72.85, "step": 43975, "token_acc": 0.5511811023622047, "train_speed(iter/s)": 0.671449 }, { "epoch": 1.8842380360738615, "grad_norm": 5.288496494293213, "learning_rate": 6.888815534745394e-05, "loss": 2.426993179321289, "memory(GiB)": 72.85, "step": 43980, "token_acc": 0.48322147651006714, "train_speed(iter/s)": 0.671441 }, { "epoch": 1.8844522514031103, "grad_norm": 4.087758541107178, "learning_rate": 6.888192406416613e-05, "loss": 2.2760032653808593, "memory(GiB)": 72.85, "step": 43985, "token_acc": 0.4981949458483754, "train_speed(iter/s)": 0.671439 }, { "epoch": 1.8846664667323594, "grad_norm": 4.969193935394287, "learning_rate": 6.887569243881456e-05, "loss": 2.2345624923706056, "memory(GiB)": 72.85, "step": 43990, "token_acc": 0.5300751879699248, "train_speed(iter/s)": 0.671431 }, { "epoch": 1.8848806820616084, "grad_norm": 5.130980014801025, "learning_rate": 6.886946047151212e-05, "loss": 2.707749938964844, "memory(GiB)": 72.85, "step": 43995, "token_acc": 0.4634146341463415, "train_speed(iter/s)": 0.671436 }, { "epoch": 1.8850948973908572, "grad_norm": 4.8876752853393555, "learning_rate": 6.88632281623717e-05, "loss": 2.3235836029052734, "memory(GiB)": 72.85, "step": 44000, "token_acc": 0.52, "train_speed(iter/s)": 0.671431 }, { "epoch": 1.8850948973908572, "eval_loss": 2.068408966064453, "eval_runtime": 15.3246, "eval_samples_per_second": 6.525, "eval_steps_per_second": 6.525, "eval_token_acc": 0.5014367816091954, "step": 44000 }, { "epoch": 1.8853091127201063, "grad_norm": 4.146597385406494, "learning_rate": 6.885699551150619e-05, "loss": 2.2393028259277346, "memory(GiB)": 72.85, "step": 44005, "token_acc": 0.4837758112094395, "train_speed(iter/s)": 0.671243 }, { "epoch": 1.8855233280493553, "grad_norm": 4.233890056610107, "learning_rate": 6.885076251902854e-05, "loss": 2.5351247787475586, "memory(GiB)": 72.85, "step": 44010, "token_acc": 0.465625, "train_speed(iter/s)": 0.671239 }, { "epoch": 1.885737543378604, "grad_norm": 4.834653854370117, "learning_rate": 6.884452918505163e-05, "loss": 2.5641048431396483, "memory(GiB)": 72.85, "step": 44015, "token_acc": 0.45674740484429066, "train_speed(iter/s)": 0.671216 }, { "epoch": 1.8859517587078531, "grad_norm": 4.841982841491699, "learning_rate": 6.883829550968841e-05, "loss": 2.301930236816406, "memory(GiB)": 72.85, "step": 44020, "token_acc": 0.5112994350282486, "train_speed(iter/s)": 0.671213 }, { "epoch": 1.8861659740371022, "grad_norm": 3.7209668159484863, "learning_rate": 6.883206149305177e-05, "loss": 2.6656850814819335, "memory(GiB)": 72.85, "step": 44025, "token_acc": 0.47686832740213525, "train_speed(iter/s)": 0.671222 }, { "epoch": 1.886380189366351, "grad_norm": 4.436228275299072, "learning_rate": 6.88258271352547e-05, "loss": 2.405393600463867, "memory(GiB)": 72.85, "step": 44030, "token_acc": 0.5091575091575091, "train_speed(iter/s)": 0.67124 }, { "epoch": 1.8865944046956, "grad_norm": 4.308507919311523, "learning_rate": 6.88195924364101e-05, "loss": 2.3807662963867187, "memory(GiB)": 72.85, "step": 44035, "token_acc": 0.48484848484848486, "train_speed(iter/s)": 0.671234 }, { "epoch": 1.886808620024849, "grad_norm": 3.19640851020813, "learning_rate": 6.881335739663093e-05, "loss": 2.326032257080078, "memory(GiB)": 72.85, "step": 44040, "token_acc": 0.5164473684210527, "train_speed(iter/s)": 0.671248 }, { "epoch": 1.8870228353540979, "grad_norm": 3.4368844032287598, "learning_rate": 6.880712201603013e-05, "loss": 2.3533632278442385, "memory(GiB)": 72.85, "step": 44045, "token_acc": 0.5125, "train_speed(iter/s)": 0.671254 }, { "epoch": 1.887237050683347, "grad_norm": 3.291818857192993, "learning_rate": 6.880088629472068e-05, "loss": 2.4393646240234377, "memory(GiB)": 72.85, "step": 44050, "token_acc": 0.46875, "train_speed(iter/s)": 0.671247 }, { "epoch": 1.887451266012596, "grad_norm": 3.6067445278167725, "learning_rate": 6.879465023281554e-05, "loss": 2.3555389404296876, "memory(GiB)": 72.85, "step": 44055, "token_acc": 0.5175097276264592, "train_speed(iter/s)": 0.671255 }, { "epoch": 1.8876654813418448, "grad_norm": 4.1337785720825195, "learning_rate": 6.87884138304277e-05, "loss": 2.7523117065429688, "memory(GiB)": 72.85, "step": 44060, "token_acc": 0.4968152866242038, "train_speed(iter/s)": 0.671267 }, { "epoch": 1.8878796966710938, "grad_norm": 3.5036139488220215, "learning_rate": 6.878217708767008e-05, "loss": 2.436872673034668, "memory(GiB)": 72.85, "step": 44065, "token_acc": 0.447098976109215, "train_speed(iter/s)": 0.671263 }, { "epoch": 1.8880939120003428, "grad_norm": 4.8001604080200195, "learning_rate": 6.877594000465573e-05, "loss": 2.3510482788085936, "memory(GiB)": 72.85, "step": 44070, "token_acc": 0.46785714285714286, "train_speed(iter/s)": 0.671279 }, { "epoch": 1.8883081273295916, "grad_norm": 4.734903812408447, "learning_rate": 6.87697025814976e-05, "loss": 2.4551809310913084, "memory(GiB)": 72.85, "step": 44075, "token_acc": 0.5183823529411765, "train_speed(iter/s)": 0.671296 }, { "epoch": 1.8885223426588407, "grad_norm": 4.444118499755859, "learning_rate": 6.87634648183087e-05, "loss": 2.084398651123047, "memory(GiB)": 72.85, "step": 44080, "token_acc": 0.5057915057915058, "train_speed(iter/s)": 0.671291 }, { "epoch": 1.8887365579880897, "grad_norm": 3.423865795135498, "learning_rate": 6.875722671520204e-05, "loss": 2.258658218383789, "memory(GiB)": 72.85, "step": 44085, "token_acc": 0.4816053511705686, "train_speed(iter/s)": 0.671294 }, { "epoch": 1.8889507733173385, "grad_norm": 4.457231044769287, "learning_rate": 6.875098827229061e-05, "loss": 2.3115005493164062, "memory(GiB)": 72.85, "step": 44090, "token_acc": 0.5119453924914675, "train_speed(iter/s)": 0.671301 }, { "epoch": 1.8891649886465876, "grad_norm": 3.7432072162628174, "learning_rate": 6.874474948968747e-05, "loss": 2.364213562011719, "memory(GiB)": 72.85, "step": 44095, "token_acc": 0.517799352750809, "train_speed(iter/s)": 0.671316 }, { "epoch": 1.8893792039758366, "grad_norm": 3.8532376289367676, "learning_rate": 6.873851036750557e-05, "loss": 2.405622100830078, "memory(GiB)": 72.85, "step": 44100, "token_acc": 0.4952076677316294, "train_speed(iter/s)": 0.671308 }, { "epoch": 1.8895934193050854, "grad_norm": 5.061852931976318, "learning_rate": 6.8732270905858e-05, "loss": 2.0053972244262694, "memory(GiB)": 72.85, "step": 44105, "token_acc": 0.5296296296296297, "train_speed(iter/s)": 0.671311 }, { "epoch": 1.8898076346343344, "grad_norm": 4.984246730804443, "learning_rate": 6.872603110485776e-05, "loss": 2.377432632446289, "memory(GiB)": 72.85, "step": 44110, "token_acc": 0.5127272727272727, "train_speed(iter/s)": 0.671323 }, { "epoch": 1.8900218499635835, "grad_norm": 4.196009635925293, "learning_rate": 6.87197909646179e-05, "loss": 2.251942825317383, "memory(GiB)": 72.85, "step": 44115, "token_acc": 0.509493670886076, "train_speed(iter/s)": 0.671347 }, { "epoch": 1.8902360652928323, "grad_norm": 3.6802585124969482, "learning_rate": 6.871355048525148e-05, "loss": 2.75244026184082, "memory(GiB)": 72.85, "step": 44120, "token_acc": 0.4555160142348754, "train_speed(iter/s)": 0.671344 }, { "epoch": 1.8904502806220813, "grad_norm": 4.719115257263184, "learning_rate": 6.870730966687154e-05, "loss": 2.4924795150756838, "memory(GiB)": 72.85, "step": 44125, "token_acc": 0.4716981132075472, "train_speed(iter/s)": 0.671337 }, { "epoch": 1.8906644959513303, "grad_norm": 4.246121883392334, "learning_rate": 6.87010685095911e-05, "loss": 2.2674545288085937, "memory(GiB)": 72.85, "step": 44130, "token_acc": 0.47079037800687284, "train_speed(iter/s)": 0.671348 }, { "epoch": 1.8908787112805792, "grad_norm": 4.500247478485107, "learning_rate": 6.86948270135233e-05, "loss": 2.2984046936035156, "memory(GiB)": 72.85, "step": 44135, "token_acc": 0.49206349206349204, "train_speed(iter/s)": 0.671333 }, { "epoch": 1.8910929266098282, "grad_norm": 3.6856164932250977, "learning_rate": 6.868858517878117e-05, "loss": 2.359461212158203, "memory(GiB)": 72.85, "step": 44140, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.671339 }, { "epoch": 1.8913071419390772, "grad_norm": 5.942952632904053, "learning_rate": 6.868234300547776e-05, "loss": 2.2055643081665037, "memory(GiB)": 72.85, "step": 44145, "token_acc": 0.5019011406844106, "train_speed(iter/s)": 0.671345 }, { "epoch": 1.891521357268326, "grad_norm": 4.166161060333252, "learning_rate": 6.867610049372619e-05, "loss": 2.217427444458008, "memory(GiB)": 72.85, "step": 44150, "token_acc": 0.5197368421052632, "train_speed(iter/s)": 0.671336 }, { "epoch": 1.891735572597575, "grad_norm": 4.982780933380127, "learning_rate": 6.866985764363955e-05, "loss": 2.600840759277344, "memory(GiB)": 72.85, "step": 44155, "token_acc": 0.4811320754716981, "train_speed(iter/s)": 0.671334 }, { "epoch": 1.8919497879268241, "grad_norm": 6.093146800994873, "learning_rate": 6.866361445533091e-05, "loss": 2.08123664855957, "memory(GiB)": 72.85, "step": 44160, "token_acc": 0.4945054945054945, "train_speed(iter/s)": 0.671332 }, { "epoch": 1.892164003256073, "grad_norm": 3.6225271224975586, "learning_rate": 6.865737092891342e-05, "loss": 2.4890798568725585, "memory(GiB)": 72.85, "step": 44165, "token_acc": 0.4791666666666667, "train_speed(iter/s)": 0.671344 }, { "epoch": 1.892378218585322, "grad_norm": 4.735494136810303, "learning_rate": 6.865112706450012e-05, "loss": 2.4688156127929686, "memory(GiB)": 72.85, "step": 44170, "token_acc": 0.5080645161290323, "train_speed(iter/s)": 0.67136 }, { "epoch": 1.892592433914571, "grad_norm": 5.608760833740234, "learning_rate": 6.864488286220416e-05, "loss": 2.420815658569336, "memory(GiB)": 72.85, "step": 44175, "token_acc": 0.5, "train_speed(iter/s)": 0.671366 }, { "epoch": 1.8928066492438198, "grad_norm": 4.319356441497803, "learning_rate": 6.863863832213868e-05, "loss": 2.4539695739746095, "memory(GiB)": 72.85, "step": 44180, "token_acc": 0.4788273615635179, "train_speed(iter/s)": 0.671373 }, { "epoch": 1.8930208645730688, "grad_norm": 4.002623558044434, "learning_rate": 6.863239344441677e-05, "loss": 2.7317867279052734, "memory(GiB)": 72.85, "step": 44185, "token_acc": 0.44483985765124556, "train_speed(iter/s)": 0.671385 }, { "epoch": 1.8932350799023179, "grad_norm": 5.692779541015625, "learning_rate": 6.862614822915156e-05, "loss": 2.55682487487793, "memory(GiB)": 72.85, "step": 44190, "token_acc": 0.44402985074626866, "train_speed(iter/s)": 0.671386 }, { "epoch": 1.8934492952315667, "grad_norm": 5.283348560333252, "learning_rate": 6.861990267645622e-05, "loss": 2.3262033462524414, "memory(GiB)": 72.85, "step": 44195, "token_acc": 0.50187265917603, "train_speed(iter/s)": 0.671396 }, { "epoch": 1.8936635105608157, "grad_norm": 4.728171348571777, "learning_rate": 6.861365678644386e-05, "loss": 2.7118352890014648, "memory(GiB)": 72.85, "step": 44200, "token_acc": 0.47761194029850745, "train_speed(iter/s)": 0.671395 }, { "epoch": 1.8938777258900648, "grad_norm": 5.297503471374512, "learning_rate": 6.860741055922766e-05, "loss": 2.6982839584350584, "memory(GiB)": 72.85, "step": 44205, "token_acc": 0.45819397993311034, "train_speed(iter/s)": 0.671402 }, { "epoch": 1.8940919412193136, "grad_norm": 3.9023914337158203, "learning_rate": 6.860116399492075e-05, "loss": 2.20723819732666, "memory(GiB)": 72.85, "step": 44210, "token_acc": 0.49201277955271566, "train_speed(iter/s)": 0.671389 }, { "epoch": 1.8943061565485626, "grad_norm": 3.7714552879333496, "learning_rate": 6.859491709363633e-05, "loss": 2.1178016662597656, "memory(GiB)": 72.85, "step": 44215, "token_acc": 0.5205479452054794, "train_speed(iter/s)": 0.671397 }, { "epoch": 1.8945203718778116, "grad_norm": 3.821101188659668, "learning_rate": 6.858866985548751e-05, "loss": 2.1695415496826174, "memory(GiB)": 72.85, "step": 44220, "token_acc": 0.5393700787401575, "train_speed(iter/s)": 0.671393 }, { "epoch": 1.8947345872070604, "grad_norm": 4.226133346557617, "learning_rate": 6.858242228058752e-05, "loss": 2.319847297668457, "memory(GiB)": 72.85, "step": 44225, "token_acc": 0.4844290657439446, "train_speed(iter/s)": 0.671397 }, { "epoch": 1.8949488025363095, "grad_norm": 4.149488925933838, "learning_rate": 6.85761743690495e-05, "loss": 1.9364696502685548, "memory(GiB)": 72.85, "step": 44230, "token_acc": 0.5568181818181818, "train_speed(iter/s)": 0.671416 }, { "epoch": 1.8951630178655585, "grad_norm": 4.337803840637207, "learning_rate": 6.856992612098668e-05, "loss": 2.545291709899902, "memory(GiB)": 72.85, "step": 44235, "token_acc": 0.48880597014925375, "train_speed(iter/s)": 0.671408 }, { "epoch": 1.8953772331948073, "grad_norm": 5.71504020690918, "learning_rate": 6.856367753651223e-05, "loss": 2.2868045806884765, "memory(GiB)": 72.85, "step": 44240, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.671421 }, { "epoch": 1.8955914485240564, "grad_norm": 4.281521320343018, "learning_rate": 6.855742861573932e-05, "loss": 2.352316474914551, "memory(GiB)": 72.85, "step": 44245, "token_acc": 0.4382716049382716, "train_speed(iter/s)": 0.671409 }, { "epoch": 1.8958056638533054, "grad_norm": 5.180147171020508, "learning_rate": 6.85511793587812e-05, "loss": 2.42667236328125, "memory(GiB)": 72.85, "step": 44250, "token_acc": 0.4713804713804714, "train_speed(iter/s)": 0.6714 }, { "epoch": 1.8960198791825542, "grad_norm": 4.383231163024902, "learning_rate": 6.854492976575107e-05, "loss": 2.4838985443115233, "memory(GiB)": 72.85, "step": 44255, "token_acc": 0.47959183673469385, "train_speed(iter/s)": 0.671406 }, { "epoch": 1.8962340945118032, "grad_norm": 5.046006202697754, "learning_rate": 6.853867983676212e-05, "loss": 2.3545743942260744, "memory(GiB)": 72.85, "step": 44260, "token_acc": 0.5016611295681063, "train_speed(iter/s)": 0.671411 }, { "epoch": 1.8964483098410523, "grad_norm": 5.654983997344971, "learning_rate": 6.85324295719276e-05, "loss": 2.532639503479004, "memory(GiB)": 72.85, "step": 44265, "token_acc": 0.488135593220339, "train_speed(iter/s)": 0.67142 }, { "epoch": 1.896662525170301, "grad_norm": 7.114240646362305, "learning_rate": 6.852617897136075e-05, "loss": 2.3681901931762694, "memory(GiB)": 72.85, "step": 44270, "token_acc": 0.4726027397260274, "train_speed(iter/s)": 0.671431 }, { "epoch": 1.8968767404995501, "grad_norm": 5.2255754470825195, "learning_rate": 6.851992803517478e-05, "loss": 2.4975814819335938, "memory(GiB)": 72.85, "step": 44275, "token_acc": 0.4924924924924925, "train_speed(iter/s)": 0.67144 }, { "epoch": 1.8970909558287992, "grad_norm": 4.208090305328369, "learning_rate": 6.851367676348294e-05, "loss": 1.935384750366211, "memory(GiB)": 72.85, "step": 44280, "token_acc": 0.572463768115942, "train_speed(iter/s)": 0.671455 }, { "epoch": 1.897305171158048, "grad_norm": 3.6060242652893066, "learning_rate": 6.850742515639846e-05, "loss": 2.3712337493896483, "memory(GiB)": 72.85, "step": 44285, "token_acc": 0.5319148936170213, "train_speed(iter/s)": 0.671463 }, { "epoch": 1.897519386487297, "grad_norm": 5.114687919616699, "learning_rate": 6.850117321403464e-05, "loss": 2.1308124542236326, "memory(GiB)": 72.85, "step": 44290, "token_acc": 0.5117845117845118, "train_speed(iter/s)": 0.671451 }, { "epoch": 1.897733601816546, "grad_norm": 5.382737159729004, "learning_rate": 6.84949209365047e-05, "loss": 2.5596315383911135, "memory(GiB)": 72.85, "step": 44295, "token_acc": 0.471875, "train_speed(iter/s)": 0.671454 }, { "epoch": 1.8979478171457949, "grad_norm": 3.983297109603882, "learning_rate": 6.84886683239219e-05, "loss": 2.290082550048828, "memory(GiB)": 72.85, "step": 44300, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.671452 }, { "epoch": 1.898162032475044, "grad_norm": 3.785557985305786, "learning_rate": 6.848241537639956e-05, "loss": 2.4421009063720702, "memory(GiB)": 72.85, "step": 44305, "token_acc": 0.5074074074074074, "train_speed(iter/s)": 0.67147 }, { "epoch": 1.898376247804293, "grad_norm": 3.8819010257720947, "learning_rate": 6.847616209405092e-05, "loss": 2.4572433471679687, "memory(GiB)": 72.85, "step": 44310, "token_acc": 0.4907749077490775, "train_speed(iter/s)": 0.67148 }, { "epoch": 1.8985904631335417, "grad_norm": 4.413961887359619, "learning_rate": 6.846990847698926e-05, "loss": 2.3306228637695314, "memory(GiB)": 72.85, "step": 44315, "token_acc": 0.5018181818181818, "train_speed(iter/s)": 0.671458 }, { "epoch": 1.8988046784627908, "grad_norm": 4.254293441772461, "learning_rate": 6.84636545253279e-05, "loss": 2.6727476119995117, "memory(GiB)": 72.85, "step": 44320, "token_acc": 0.44912280701754387, "train_speed(iter/s)": 0.671481 }, { "epoch": 1.8990188937920398, "grad_norm": 3.9572396278381348, "learning_rate": 6.845740023918011e-05, "loss": 2.4250640869140625, "memory(GiB)": 72.85, "step": 44325, "token_acc": 0.505338078291815, "train_speed(iter/s)": 0.671479 }, { "epoch": 1.8992331091212886, "grad_norm": 3.0267550945281982, "learning_rate": 6.845114561865918e-05, "loss": 2.1564943313598635, "memory(GiB)": 72.85, "step": 44330, "token_acc": 0.5240793201133145, "train_speed(iter/s)": 0.671485 }, { "epoch": 1.8994473244505377, "grad_norm": 4.018959999084473, "learning_rate": 6.844489066387845e-05, "loss": 2.28076057434082, "memory(GiB)": 72.85, "step": 44335, "token_acc": 0.509375, "train_speed(iter/s)": 0.671492 }, { "epoch": 1.8996615397797867, "grad_norm": 3.951655864715576, "learning_rate": 6.843863537495123e-05, "loss": 2.4975116729736326, "memory(GiB)": 72.85, "step": 44340, "token_acc": 0.49146757679180886, "train_speed(iter/s)": 0.671481 }, { "epoch": 1.8998757551090355, "grad_norm": 3.3732516765594482, "learning_rate": 6.843237975199084e-05, "loss": 2.452006721496582, "memory(GiB)": 72.85, "step": 44345, "token_acc": 0.49823321554770317, "train_speed(iter/s)": 0.671504 }, { "epoch": 1.9000899704382845, "grad_norm": 3.44219708442688, "learning_rate": 6.842612379511058e-05, "loss": 2.4093673706054686, "memory(GiB)": 72.85, "step": 44350, "token_acc": 0.5108359133126935, "train_speed(iter/s)": 0.671496 }, { "epoch": 1.9003041857675336, "grad_norm": 5.190240383148193, "learning_rate": 6.841986750442383e-05, "loss": 2.28515739440918, "memory(GiB)": 72.85, "step": 44355, "token_acc": 0.516260162601626, "train_speed(iter/s)": 0.671509 }, { "epoch": 1.9005184010967824, "grad_norm": 3.1952030658721924, "learning_rate": 6.841361088004388e-05, "loss": 2.192731475830078, "memory(GiB)": 72.85, "step": 44360, "token_acc": 0.5163398692810458, "train_speed(iter/s)": 0.67153 }, { "epoch": 1.9007326164260314, "grad_norm": 4.868704795837402, "learning_rate": 6.84073539220841e-05, "loss": 2.311532402038574, "memory(GiB)": 72.85, "step": 44365, "token_acc": 0.515358361774744, "train_speed(iter/s)": 0.671518 }, { "epoch": 1.9009468317552805, "grad_norm": 4.177788734436035, "learning_rate": 6.840109663065783e-05, "loss": 2.0481184005737303, "memory(GiB)": 72.85, "step": 44370, "token_acc": 0.5137254901960784, "train_speed(iter/s)": 0.671519 }, { "epoch": 1.9011610470845293, "grad_norm": 3.7298879623413086, "learning_rate": 6.839483900587843e-05, "loss": 2.117453193664551, "memory(GiB)": 72.85, "step": 44375, "token_acc": 0.5074074074074074, "train_speed(iter/s)": 0.671518 }, { "epoch": 1.9013752624137783, "grad_norm": 4.450662612915039, "learning_rate": 6.838858104785928e-05, "loss": 2.2417272567749023, "memory(GiB)": 72.85, "step": 44380, "token_acc": 0.5017667844522968, "train_speed(iter/s)": 0.671525 }, { "epoch": 1.9015894777430273, "grad_norm": 4.564273834228516, "learning_rate": 6.838232275671374e-05, "loss": 2.3708362579345703, "memory(GiB)": 72.85, "step": 44385, "token_acc": 0.4948453608247423, "train_speed(iter/s)": 0.671529 }, { "epoch": 1.9018036930722761, "grad_norm": 3.5410590171813965, "learning_rate": 6.837606413255517e-05, "loss": 2.009758949279785, "memory(GiB)": 72.85, "step": 44390, "token_acc": 0.548936170212766, "train_speed(iter/s)": 0.671535 }, { "epoch": 1.9020179084015252, "grad_norm": 3.8745129108428955, "learning_rate": 6.836980517549695e-05, "loss": 2.4207983016967773, "memory(GiB)": 72.85, "step": 44395, "token_acc": 0.5100671140939598, "train_speed(iter/s)": 0.67154 }, { "epoch": 1.9022321237307742, "grad_norm": 4.604518413543701, "learning_rate": 6.836354588565249e-05, "loss": 2.412686347961426, "memory(GiB)": 72.85, "step": 44400, "token_acc": 0.4726027397260274, "train_speed(iter/s)": 0.67156 }, { "epoch": 1.902446339060023, "grad_norm": 4.1132707595825195, "learning_rate": 6.835728626313515e-05, "loss": 2.0779052734375, "memory(GiB)": 72.85, "step": 44405, "token_acc": 0.4961832061068702, "train_speed(iter/s)": 0.671565 }, { "epoch": 1.902660554389272, "grad_norm": 4.5650200843811035, "learning_rate": 6.835102630805836e-05, "loss": 2.115064239501953, "memory(GiB)": 72.85, "step": 44410, "token_acc": 0.5058365758754864, "train_speed(iter/s)": 0.671577 }, { "epoch": 1.902874769718521, "grad_norm": 4.466882228851318, "learning_rate": 6.834476602053553e-05, "loss": 2.2654075622558594, "memory(GiB)": 72.85, "step": 44415, "token_acc": 0.5071942446043165, "train_speed(iter/s)": 0.671579 }, { "epoch": 1.90308898504777, "grad_norm": 3.9809844493865967, "learning_rate": 6.833850540068004e-05, "loss": 2.546484184265137, "memory(GiB)": 72.85, "step": 44420, "token_acc": 0.4857142857142857, "train_speed(iter/s)": 0.67157 }, { "epoch": 1.903303200377019, "grad_norm": 4.730869293212891, "learning_rate": 6.833224444860533e-05, "loss": 2.9451927185058593, "memory(GiB)": 72.85, "step": 44425, "token_acc": 0.45, "train_speed(iter/s)": 0.671577 }, { "epoch": 1.903517415706268, "grad_norm": 3.603302240371704, "learning_rate": 6.83259831644248e-05, "loss": 2.331124687194824, "memory(GiB)": 72.85, "step": 44430, "token_acc": 0.5084745762711864, "train_speed(iter/s)": 0.671591 }, { "epoch": 1.9037316310355168, "grad_norm": 3.812991142272949, "learning_rate": 6.831972154825191e-05, "loss": 2.374921417236328, "memory(GiB)": 72.85, "step": 44435, "token_acc": 0.49097472924187724, "train_speed(iter/s)": 0.671609 }, { "epoch": 1.9039458463647658, "grad_norm": 3.9552409648895264, "learning_rate": 6.831345960020008e-05, "loss": 2.338020896911621, "memory(GiB)": 72.85, "step": 44440, "token_acc": 0.46616541353383456, "train_speed(iter/s)": 0.671629 }, { "epoch": 1.9041600616940149, "grad_norm": 5.00155782699585, "learning_rate": 6.830719732038274e-05, "loss": 2.1478883743286135, "memory(GiB)": 72.85, "step": 44445, "token_acc": 0.5542168674698795, "train_speed(iter/s)": 0.671636 }, { "epoch": 1.9043742770232637, "grad_norm": 7.411136150360107, "learning_rate": 6.830093470891333e-05, "loss": 2.6863435745239257, "memory(GiB)": 72.85, "step": 44450, "token_acc": 0.431438127090301, "train_speed(iter/s)": 0.671605 }, { "epoch": 1.9045884923525127, "grad_norm": 4.817777156829834, "learning_rate": 6.829467176590535e-05, "loss": 2.584981155395508, "memory(GiB)": 72.85, "step": 44455, "token_acc": 0.47096774193548385, "train_speed(iter/s)": 0.671582 }, { "epoch": 1.9048027076817617, "grad_norm": 4.889761447906494, "learning_rate": 6.828840849147221e-05, "loss": 2.432125473022461, "memory(GiB)": 72.85, "step": 44460, "token_acc": 0.4519230769230769, "train_speed(iter/s)": 0.671582 }, { "epoch": 1.9050169230110106, "grad_norm": 3.475965976715088, "learning_rate": 6.82821448857274e-05, "loss": 2.1334564208984377, "memory(GiB)": 72.85, "step": 44465, "token_acc": 0.5618374558303887, "train_speed(iter/s)": 0.671568 }, { "epoch": 1.9052311383402596, "grad_norm": 6.019885063171387, "learning_rate": 6.82758809487844e-05, "loss": 2.7168354034423827, "memory(GiB)": 72.85, "step": 44470, "token_acc": 0.4485981308411215, "train_speed(iter/s)": 0.671571 }, { "epoch": 1.9054453536695086, "grad_norm": 3.8107378482818604, "learning_rate": 6.826961668075665e-05, "loss": 2.517788314819336, "memory(GiB)": 72.85, "step": 44475, "token_acc": 0.47904191616766467, "train_speed(iter/s)": 0.671574 }, { "epoch": 1.9056595689987574, "grad_norm": 4.266739368438721, "learning_rate": 6.826335208175768e-05, "loss": 2.2187944412231446, "memory(GiB)": 72.85, "step": 44480, "token_acc": 0.5387755102040817, "train_speed(iter/s)": 0.671579 }, { "epoch": 1.9058737843280065, "grad_norm": 4.025498390197754, "learning_rate": 6.825708715190094e-05, "loss": 2.2159231185913084, "memory(GiB)": 72.85, "step": 44485, "token_acc": 0.54421768707483, "train_speed(iter/s)": 0.671596 }, { "epoch": 1.9060879996572555, "grad_norm": 4.500513553619385, "learning_rate": 6.825082189129992e-05, "loss": 2.392721939086914, "memory(GiB)": 72.85, "step": 44490, "token_acc": 0.5236593059936908, "train_speed(iter/s)": 0.67161 }, { "epoch": 1.9063022149865043, "grad_norm": 3.6746692657470703, "learning_rate": 6.824455630006818e-05, "loss": 2.2655689239501955, "memory(GiB)": 72.85, "step": 44495, "token_acc": 0.5102040816326531, "train_speed(iter/s)": 0.671601 }, { "epoch": 1.9065164303157534, "grad_norm": 5.110026836395264, "learning_rate": 6.823829037831917e-05, "loss": 2.255843734741211, "memory(GiB)": 72.85, "step": 44500, "token_acc": 0.5015576323987538, "train_speed(iter/s)": 0.671611 }, { "epoch": 1.9065164303157534, "eval_loss": 2.1569716930389404, "eval_runtime": 15.6814, "eval_samples_per_second": 6.377, "eval_steps_per_second": 6.377, "eval_token_acc": 0.48429319371727747, "step": 44500 }, { "epoch": 1.9067306456450024, "grad_norm": 3.9481070041656494, "learning_rate": 6.82320241261664e-05, "loss": 2.6251224517822265, "memory(GiB)": 72.85, "step": 44505, "token_acc": 0.4746376811594203, "train_speed(iter/s)": 0.671424 }, { "epoch": 1.9069448609742512, "grad_norm": 3.6597533226013184, "learning_rate": 6.822575754372345e-05, "loss": 2.3338441848754883, "memory(GiB)": 72.85, "step": 44510, "token_acc": 0.5034013605442177, "train_speed(iter/s)": 0.671434 }, { "epoch": 1.9071590763035002, "grad_norm": 4.104883193969727, "learning_rate": 6.821949063110378e-05, "loss": 2.752589225769043, "memory(GiB)": 72.85, "step": 44515, "token_acc": 0.4381625441696113, "train_speed(iter/s)": 0.671444 }, { "epoch": 1.9073732916327493, "grad_norm": 4.861772060394287, "learning_rate": 6.821322338842093e-05, "loss": 2.369985008239746, "memory(GiB)": 72.85, "step": 44520, "token_acc": 0.4982456140350877, "train_speed(iter/s)": 0.671445 }, { "epoch": 1.907587506961998, "grad_norm": 4.45605993270874, "learning_rate": 6.820695581578849e-05, "loss": 2.5293304443359377, "memory(GiB)": 72.85, "step": 44525, "token_acc": 0.4559748427672956, "train_speed(iter/s)": 0.671449 }, { "epoch": 1.9078017222912471, "grad_norm": 4.193339824676514, "learning_rate": 6.820068791331995e-05, "loss": 2.3633544921875, "memory(GiB)": 72.85, "step": 44530, "token_acc": 0.5378787878787878, "train_speed(iter/s)": 0.671447 }, { "epoch": 1.9080159376204961, "grad_norm": 5.627883434295654, "learning_rate": 6.819441968112886e-05, "loss": 2.5133724212646484, "memory(GiB)": 72.85, "step": 44535, "token_acc": 0.48863636363636365, "train_speed(iter/s)": 0.67146 }, { "epoch": 1.908230152949745, "grad_norm": 2.758496046066284, "learning_rate": 6.818815111932881e-05, "loss": 2.186773490905762, "memory(GiB)": 72.85, "step": 44540, "token_acc": 0.5070921985815603, "train_speed(iter/s)": 0.671452 }, { "epoch": 1.908444368278994, "grad_norm": 4.293938159942627, "learning_rate": 6.818188222803335e-05, "loss": 2.3928604125976562, "memory(GiB)": 72.85, "step": 44545, "token_acc": 0.4744318181818182, "train_speed(iter/s)": 0.671443 }, { "epoch": 1.908658583608243, "grad_norm": 4.336596488952637, "learning_rate": 6.817561300735601e-05, "loss": 2.26003360748291, "memory(GiB)": 72.85, "step": 44550, "token_acc": 0.5170068027210885, "train_speed(iter/s)": 0.671447 }, { "epoch": 1.9088727989374918, "grad_norm": 4.679239273071289, "learning_rate": 6.81693434574104e-05, "loss": 2.5044511795043944, "memory(GiB)": 72.85, "step": 44555, "token_acc": 0.476027397260274, "train_speed(iter/s)": 0.671452 }, { "epoch": 1.9090870142667409, "grad_norm": 3.7128915786743164, "learning_rate": 6.816307357831009e-05, "loss": 2.2923343658447264, "memory(GiB)": 72.85, "step": 44560, "token_acc": 0.49375, "train_speed(iter/s)": 0.671452 }, { "epoch": 1.90930122959599, "grad_norm": 4.734518527984619, "learning_rate": 6.815680337016866e-05, "loss": 2.2937433242797853, "memory(GiB)": 72.85, "step": 44565, "token_acc": 0.46558704453441296, "train_speed(iter/s)": 0.671452 }, { "epoch": 1.9095154449252387, "grad_norm": 4.6113057136535645, "learning_rate": 6.815053283309971e-05, "loss": 2.4258163452148436, "memory(GiB)": 72.85, "step": 44570, "token_acc": 0.4765625, "train_speed(iter/s)": 0.671422 }, { "epoch": 1.9097296602544878, "grad_norm": 4.722816467285156, "learning_rate": 6.814426196721683e-05, "loss": 2.240510368347168, "memory(GiB)": 72.85, "step": 44575, "token_acc": 0.5300353356890459, "train_speed(iter/s)": 0.671432 }, { "epoch": 1.9099438755837368, "grad_norm": 2.981400966644287, "learning_rate": 6.813799077263363e-05, "loss": 2.404866409301758, "memory(GiB)": 72.85, "step": 44580, "token_acc": 0.5092250922509225, "train_speed(iter/s)": 0.671449 }, { "epoch": 1.9101580909129856, "grad_norm": 3.7282445430755615, "learning_rate": 6.813171924946371e-05, "loss": 2.4273855209350588, "memory(GiB)": 72.85, "step": 44585, "token_acc": 0.4573170731707317, "train_speed(iter/s)": 0.671458 }, { "epoch": 1.9103723062422346, "grad_norm": 5.052133560180664, "learning_rate": 6.812544739782067e-05, "loss": 2.5845027923583985, "memory(GiB)": 72.85, "step": 44590, "token_acc": 0.47266881028938906, "train_speed(iter/s)": 0.671443 }, { "epoch": 1.9105865215714837, "grad_norm": 4.780214309692383, "learning_rate": 6.811917521781816e-05, "loss": 2.300947570800781, "memory(GiB)": 72.85, "step": 44595, "token_acc": 0.48717948717948717, "train_speed(iter/s)": 0.671455 }, { "epoch": 1.9108007369007325, "grad_norm": 5.192872047424316, "learning_rate": 6.81129027095698e-05, "loss": 2.55924072265625, "memory(GiB)": 72.85, "step": 44600, "token_acc": 0.4512987012987013, "train_speed(iter/s)": 0.671466 }, { "epoch": 1.9110149522299815, "grad_norm": 4.464160919189453, "learning_rate": 6.810662987318921e-05, "loss": 2.423923301696777, "memory(GiB)": 72.85, "step": 44605, "token_acc": 0.4982078853046595, "train_speed(iter/s)": 0.67148 }, { "epoch": 1.9112291675592306, "grad_norm": 5.251681327819824, "learning_rate": 6.810035670879003e-05, "loss": 2.494312858581543, "memory(GiB)": 72.85, "step": 44610, "token_acc": 0.44482758620689655, "train_speed(iter/s)": 0.67149 }, { "epoch": 1.9114433828884794, "grad_norm": 3.502258777618408, "learning_rate": 6.809408321648592e-05, "loss": 2.3894063949584963, "memory(GiB)": 72.85, "step": 44615, "token_acc": 0.47735191637630664, "train_speed(iter/s)": 0.671501 }, { "epoch": 1.9116575982177286, "grad_norm": 4.668592929840088, "learning_rate": 6.808780939639051e-05, "loss": 2.552961540222168, "memory(GiB)": 72.85, "step": 44620, "token_acc": 0.48028673835125446, "train_speed(iter/s)": 0.671526 }, { "epoch": 1.9118718135469774, "grad_norm": 4.789304256439209, "learning_rate": 6.808153524861748e-05, "loss": 2.0791839599609374, "memory(GiB)": 72.85, "step": 44625, "token_acc": 0.5146579804560261, "train_speed(iter/s)": 0.671526 }, { "epoch": 1.9120860288762263, "grad_norm": 4.941326141357422, "learning_rate": 6.807526077328045e-05, "loss": 2.420663070678711, "memory(GiB)": 72.85, "step": 44630, "token_acc": 0.4860335195530726, "train_speed(iter/s)": 0.671527 }, { "epoch": 1.9123002442054755, "grad_norm": 4.310459136962891, "learning_rate": 6.806898597049313e-05, "loss": 2.2730655670166016, "memory(GiB)": 72.85, "step": 44635, "token_acc": 0.5226586102719033, "train_speed(iter/s)": 0.671532 }, { "epoch": 1.9125144595347243, "grad_norm": 5.574220657348633, "learning_rate": 6.806271084036918e-05, "loss": 2.129151153564453, "memory(GiB)": 72.85, "step": 44640, "token_acc": 0.5368852459016393, "train_speed(iter/s)": 0.671538 }, { "epoch": 1.9127286748639731, "grad_norm": 4.634283542633057, "learning_rate": 6.805643538302229e-05, "loss": 2.4381893157958983, "memory(GiB)": 72.85, "step": 44645, "token_acc": 0.5138888888888888, "train_speed(iter/s)": 0.671518 }, { "epoch": 1.9129428901932224, "grad_norm": 4.3312225341796875, "learning_rate": 6.805015959856611e-05, "loss": 2.2034475326538088, "memory(GiB)": 72.85, "step": 44650, "token_acc": 0.49458483754512633, "train_speed(iter/s)": 0.67152 }, { "epoch": 1.9131571055224712, "grad_norm": 4.904921531677246, "learning_rate": 6.804388348711438e-05, "loss": 2.5467451095581053, "memory(GiB)": 72.85, "step": 44655, "token_acc": 0.5298013245033113, "train_speed(iter/s)": 0.671534 }, { "epoch": 1.91337132085172, "grad_norm": 5.106456279754639, "learning_rate": 6.803760704878078e-05, "loss": 2.225870132446289, "memory(GiB)": 72.85, "step": 44660, "token_acc": 0.5423728813559322, "train_speed(iter/s)": 0.671528 }, { "epoch": 1.9135855361809693, "grad_norm": 4.166433334350586, "learning_rate": 6.803133028367896e-05, "loss": 2.4210081100463867, "memory(GiB)": 72.85, "step": 44665, "token_acc": 0.5096153846153846, "train_speed(iter/s)": 0.671505 }, { "epoch": 1.913799751510218, "grad_norm": 4.854907989501953, "learning_rate": 6.802505319192272e-05, "loss": 2.306675338745117, "memory(GiB)": 72.85, "step": 44670, "token_acc": 0.49725274725274726, "train_speed(iter/s)": 0.6715 }, { "epoch": 1.914013966839467, "grad_norm": 5.838477611541748, "learning_rate": 6.801877577362572e-05, "loss": 2.398581123352051, "memory(GiB)": 72.85, "step": 44675, "token_acc": 0.4896551724137931, "train_speed(iter/s)": 0.671498 }, { "epoch": 1.9142281821687162, "grad_norm": 5.572704315185547, "learning_rate": 6.801249802890169e-05, "loss": 2.392056465148926, "memory(GiB)": 72.85, "step": 44680, "token_acc": 0.5103448275862069, "train_speed(iter/s)": 0.671494 }, { "epoch": 1.914442397497965, "grad_norm": 4.237691879272461, "learning_rate": 6.800621995786437e-05, "loss": 2.5081809997558593, "memory(GiB)": 72.85, "step": 44685, "token_acc": 0.4972375690607735, "train_speed(iter/s)": 0.671509 }, { "epoch": 1.9146566128272138, "grad_norm": 4.221291542053223, "learning_rate": 6.799994156062748e-05, "loss": 2.242766189575195, "memory(GiB)": 72.85, "step": 44690, "token_acc": 0.5157894736842106, "train_speed(iter/s)": 0.671514 }, { "epoch": 1.914870828156463, "grad_norm": 4.867758274078369, "learning_rate": 6.799366283730476e-05, "loss": 2.385848617553711, "memory(GiB)": 72.85, "step": 44695, "token_acc": 0.4727272727272727, "train_speed(iter/s)": 0.671529 }, { "epoch": 1.9150850434857118, "grad_norm": 4.581665992736816, "learning_rate": 6.798738378800997e-05, "loss": 2.6634748458862303, "memory(GiB)": 72.85, "step": 44700, "token_acc": 0.48148148148148145, "train_speed(iter/s)": 0.671543 }, { "epoch": 1.9152992588149607, "grad_norm": 3.398179531097412, "learning_rate": 6.798110441285683e-05, "loss": 2.2362159729003905, "memory(GiB)": 72.85, "step": 44705, "token_acc": 0.4811320754716981, "train_speed(iter/s)": 0.671562 }, { "epoch": 1.91551347414421, "grad_norm": 3.454294443130493, "learning_rate": 6.797482471195913e-05, "loss": 2.2609649658203126, "memory(GiB)": 72.85, "step": 44710, "token_acc": 0.5328467153284672, "train_speed(iter/s)": 0.671572 }, { "epoch": 1.9157276894734587, "grad_norm": 4.2155351638793945, "learning_rate": 6.796854468543061e-05, "loss": 2.6209285736083983, "memory(GiB)": 72.85, "step": 44715, "token_acc": 0.4758842443729904, "train_speed(iter/s)": 0.671587 }, { "epoch": 1.9159419048027075, "grad_norm": 4.159522533416748, "learning_rate": 6.796226433338505e-05, "loss": 2.1897144317626953, "memory(GiB)": 72.85, "step": 44720, "token_acc": 0.512, "train_speed(iter/s)": 0.671582 }, { "epoch": 1.9161561201319568, "grad_norm": 3.1805553436279297, "learning_rate": 6.795598365593623e-05, "loss": 2.4117603302001953, "memory(GiB)": 72.85, "step": 44725, "token_acc": 0.5, "train_speed(iter/s)": 0.671586 }, { "epoch": 1.9163703354612056, "grad_norm": 3.5225541591644287, "learning_rate": 6.794970265319792e-05, "loss": 2.4642284393310545, "memory(GiB)": 72.85, "step": 44730, "token_acc": 0.4629080118694362, "train_speed(iter/s)": 0.67158 }, { "epoch": 1.9165845507904544, "grad_norm": 4.531983852386475, "learning_rate": 6.79434213252839e-05, "loss": 2.2512126922607423, "memory(GiB)": 72.85, "step": 44735, "token_acc": 0.5236220472440944, "train_speed(iter/s)": 0.671597 }, { "epoch": 1.9167987661197037, "grad_norm": 4.375959873199463, "learning_rate": 6.793713967230796e-05, "loss": 2.2939611434936524, "memory(GiB)": 72.85, "step": 44740, "token_acc": 0.5125, "train_speed(iter/s)": 0.671608 }, { "epoch": 1.9170129814489525, "grad_norm": 3.8321950435638428, "learning_rate": 6.793085769438395e-05, "loss": 2.297329330444336, "memory(GiB)": 72.85, "step": 44745, "token_acc": 0.49097472924187724, "train_speed(iter/s)": 0.671612 }, { "epoch": 1.9172271967782013, "grad_norm": 4.377362251281738, "learning_rate": 6.792457539162559e-05, "loss": 2.5318288803100586, "memory(GiB)": 72.85, "step": 44750, "token_acc": 0.4797843665768194, "train_speed(iter/s)": 0.671611 }, { "epoch": 1.9174414121074506, "grad_norm": 4.344082832336426, "learning_rate": 6.791829276414676e-05, "loss": 2.0726139068603517, "memory(GiB)": 72.85, "step": 44755, "token_acc": 0.5445205479452054, "train_speed(iter/s)": 0.671587 }, { "epoch": 1.9176556274366994, "grad_norm": 3.914573907852173, "learning_rate": 6.791200981206123e-05, "loss": 2.3199668884277345, "memory(GiB)": 72.85, "step": 44760, "token_acc": 0.5059523809523809, "train_speed(iter/s)": 0.671592 }, { "epoch": 1.9178698427659482, "grad_norm": 3.479743480682373, "learning_rate": 6.790572653548284e-05, "loss": 2.3702072143554687, "memory(GiB)": 72.85, "step": 44765, "token_acc": 0.4751552795031056, "train_speed(iter/s)": 0.671597 }, { "epoch": 1.9180840580951974, "grad_norm": 6.9498186111450195, "learning_rate": 6.789944293452542e-05, "loss": 2.3779844284057616, "memory(GiB)": 72.85, "step": 44770, "token_acc": 0.5015873015873016, "train_speed(iter/s)": 0.671606 }, { "epoch": 1.9182982734244463, "grad_norm": 3.6134533882141113, "learning_rate": 6.78931590093028e-05, "loss": 2.42100830078125, "memory(GiB)": 72.85, "step": 44775, "token_acc": 0.5026737967914439, "train_speed(iter/s)": 0.671615 }, { "epoch": 1.918512488753695, "grad_norm": 5.991666316986084, "learning_rate": 6.788687475992882e-05, "loss": 2.489702033996582, "memory(GiB)": 72.85, "step": 44780, "token_acc": 0.4714285714285714, "train_speed(iter/s)": 0.671623 }, { "epoch": 1.9187267040829443, "grad_norm": 5.74528169631958, "learning_rate": 6.788059018651734e-05, "loss": 2.427885055541992, "memory(GiB)": 72.85, "step": 44785, "token_acc": 0.4709897610921502, "train_speed(iter/s)": 0.671612 }, { "epoch": 1.9189409194121931, "grad_norm": 3.769371509552002, "learning_rate": 6.787430528918217e-05, "loss": 2.6035348892211916, "memory(GiB)": 72.85, "step": 44790, "token_acc": 0.46308724832214765, "train_speed(iter/s)": 0.671626 }, { "epoch": 1.919155134741442, "grad_norm": 3.598863363265991, "learning_rate": 6.78680200680372e-05, "loss": 2.1728530883789063, "memory(GiB)": 72.85, "step": 44795, "token_acc": 0.5756302521008403, "train_speed(iter/s)": 0.671635 }, { "epoch": 1.9193693500706912, "grad_norm": 3.2027533054351807, "learning_rate": 6.78617345231963e-05, "loss": 2.139661407470703, "memory(GiB)": 72.85, "step": 44800, "token_acc": 0.5058365758754864, "train_speed(iter/s)": 0.671631 }, { "epoch": 1.91958356539994, "grad_norm": 3.9742844104766846, "learning_rate": 6.78554486547733e-05, "loss": 2.2440494537353515, "memory(GiB)": 72.85, "step": 44805, "token_acc": 0.49508196721311476, "train_speed(iter/s)": 0.671635 }, { "epoch": 1.9197977807291888, "grad_norm": 4.204288959503174, "learning_rate": 6.784916246288212e-05, "loss": 2.2884544372558593, "memory(GiB)": 72.85, "step": 44810, "token_acc": 0.5427350427350427, "train_speed(iter/s)": 0.671642 }, { "epoch": 1.920011996058438, "grad_norm": 3.5427136421203613, "learning_rate": 6.78428759476366e-05, "loss": 2.368218994140625, "memory(GiB)": 72.85, "step": 44815, "token_acc": 0.495114006514658, "train_speed(iter/s)": 0.671643 }, { "epoch": 1.920226211387687, "grad_norm": 4.128699779510498, "learning_rate": 6.783658910915065e-05, "loss": 2.4121162414550783, "memory(GiB)": 72.85, "step": 44820, "token_acc": 0.4845360824742268, "train_speed(iter/s)": 0.671648 }, { "epoch": 1.9204404267169357, "grad_norm": 5.516144752502441, "learning_rate": 6.783030194753817e-05, "loss": 2.409741020202637, "memory(GiB)": 72.85, "step": 44825, "token_acc": 0.5055350553505535, "train_speed(iter/s)": 0.671654 }, { "epoch": 1.920654642046185, "grad_norm": 4.8758225440979, "learning_rate": 6.782401446291302e-05, "loss": 2.3699670791625977, "memory(GiB)": 72.85, "step": 44830, "token_acc": 0.45357142857142857, "train_speed(iter/s)": 0.671665 }, { "epoch": 1.9208688573754338, "grad_norm": 3.254117965698242, "learning_rate": 6.781772665538914e-05, "loss": 2.2574436187744142, "memory(GiB)": 72.85, "step": 44835, "token_acc": 0.503968253968254, "train_speed(iter/s)": 0.671674 }, { "epoch": 1.9210830727046826, "grad_norm": 4.907145023345947, "learning_rate": 6.781143852508043e-05, "loss": 2.556303024291992, "memory(GiB)": 72.85, "step": 44840, "token_acc": 0.43217665615141954, "train_speed(iter/s)": 0.671691 }, { "epoch": 1.9212972880339318, "grad_norm": 6.5815839767456055, "learning_rate": 6.780515007210082e-05, "loss": 2.1402368545532227, "memory(GiB)": 72.85, "step": 44845, "token_acc": 0.5409836065573771, "train_speed(iter/s)": 0.671706 }, { "epoch": 1.9215115033631807, "grad_norm": 3.572816848754883, "learning_rate": 6.779886129656417e-05, "loss": 2.3472286224365235, "memory(GiB)": 72.85, "step": 44850, "token_acc": 0.517799352750809, "train_speed(iter/s)": 0.671708 }, { "epoch": 1.9217257186924295, "grad_norm": 4.960790157318115, "learning_rate": 6.779257219858448e-05, "loss": 2.4920358657836914, "memory(GiB)": 72.85, "step": 44855, "token_acc": 0.475, "train_speed(iter/s)": 0.671727 }, { "epoch": 1.9219399340216787, "grad_norm": 6.318089962005615, "learning_rate": 6.778628277827565e-05, "loss": 2.2714664459228517, "memory(GiB)": 72.85, "step": 44860, "token_acc": 0.48598130841121495, "train_speed(iter/s)": 0.67175 }, { "epoch": 1.9221541493509275, "grad_norm": 4.518908977508545, "learning_rate": 6.777999303575163e-05, "loss": 2.374191665649414, "memory(GiB)": 72.85, "step": 44865, "token_acc": 0.47735191637630664, "train_speed(iter/s)": 0.671758 }, { "epoch": 1.9223683646801764, "grad_norm": 4.636952877044678, "learning_rate": 6.777370297112634e-05, "loss": 2.1046073913574217, "memory(GiB)": 72.85, "step": 44870, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.671758 }, { "epoch": 1.9225825800094256, "grad_norm": 5.090972423553467, "learning_rate": 6.776741258451376e-05, "loss": 2.1567325592041016, "memory(GiB)": 72.85, "step": 44875, "token_acc": 0.5308641975308642, "train_speed(iter/s)": 0.671754 }, { "epoch": 1.9227967953386744, "grad_norm": 4.358738899230957, "learning_rate": 6.776112187602783e-05, "loss": 2.113418960571289, "memory(GiB)": 72.85, "step": 44880, "token_acc": 0.5593869731800766, "train_speed(iter/s)": 0.67174 }, { "epoch": 1.9230110106679232, "grad_norm": 4.6128106117248535, "learning_rate": 6.775483084578252e-05, "loss": 2.478196716308594, "memory(GiB)": 72.85, "step": 44885, "token_acc": 0.5123674911660777, "train_speed(iter/s)": 0.67173 }, { "epoch": 1.9232252259971725, "grad_norm": 3.9121954441070557, "learning_rate": 6.774853949389178e-05, "loss": 2.1687105178833006, "memory(GiB)": 72.85, "step": 44890, "token_acc": 0.5113636363636364, "train_speed(iter/s)": 0.671745 }, { "epoch": 1.9234394413264213, "grad_norm": 4.536375999450684, "learning_rate": 6.77422478204696e-05, "loss": 2.3694665908813475, "memory(GiB)": 72.85, "step": 44895, "token_acc": 0.4878048780487805, "train_speed(iter/s)": 0.671734 }, { "epoch": 1.9236536566556701, "grad_norm": 5.02125358581543, "learning_rate": 6.773595582562998e-05, "loss": 2.4699020385742188, "memory(GiB)": 72.85, "step": 44900, "token_acc": 0.4591439688715953, "train_speed(iter/s)": 0.671732 }, { "epoch": 1.9238678719849194, "grad_norm": 3.741658926010132, "learning_rate": 6.772966350948686e-05, "loss": 2.385219764709473, "memory(GiB)": 72.85, "step": 44905, "token_acc": 0.5182724252491694, "train_speed(iter/s)": 0.671722 }, { "epoch": 1.9240820873141682, "grad_norm": 3.2624776363372803, "learning_rate": 6.772337087215425e-05, "loss": 2.41491756439209, "memory(GiB)": 72.85, "step": 44910, "token_acc": 0.5068027210884354, "train_speed(iter/s)": 0.671721 }, { "epoch": 1.924296302643417, "grad_norm": 4.246402740478516, "learning_rate": 6.771707791374618e-05, "loss": 2.2931324005126954, "memory(GiB)": 72.85, "step": 44915, "token_acc": 0.48514851485148514, "train_speed(iter/s)": 0.671718 }, { "epoch": 1.9245105179726663, "grad_norm": 3.7628955841064453, "learning_rate": 6.77107846343766e-05, "loss": 1.7470844268798829, "memory(GiB)": 72.85, "step": 44920, "token_acc": 0.5877862595419847, "train_speed(iter/s)": 0.671716 }, { "epoch": 1.924724733301915, "grad_norm": 5.66076135635376, "learning_rate": 6.770449103415953e-05, "loss": 2.5273454666137694, "memory(GiB)": 72.85, "step": 44925, "token_acc": 0.4610951008645533, "train_speed(iter/s)": 0.671721 }, { "epoch": 1.9249389486311639, "grad_norm": 3.5085978507995605, "learning_rate": 6.769819711320902e-05, "loss": 2.619095802307129, "memory(GiB)": 72.85, "step": 44930, "token_acc": 0.4813664596273292, "train_speed(iter/s)": 0.671737 }, { "epoch": 1.9251531639604131, "grad_norm": 3.8012285232543945, "learning_rate": 6.769190287163906e-05, "loss": 2.461726188659668, "memory(GiB)": 72.85, "step": 44935, "token_acc": 0.5265017667844523, "train_speed(iter/s)": 0.67174 }, { "epoch": 1.925367379289662, "grad_norm": 4.251378059387207, "learning_rate": 6.768560830956369e-05, "loss": 2.103042984008789, "memory(GiB)": 72.85, "step": 44940, "token_acc": 0.5358490566037736, "train_speed(iter/s)": 0.671743 }, { "epoch": 1.9255815946189108, "grad_norm": 4.520435333251953, "learning_rate": 6.767931342709694e-05, "loss": 2.6200887680053713, "memory(GiB)": 72.85, "step": 44945, "token_acc": 0.46875, "train_speed(iter/s)": 0.671757 }, { "epoch": 1.92579580994816, "grad_norm": 3.9657697677612305, "learning_rate": 6.767301822435284e-05, "loss": 2.2943340301513673, "memory(GiB)": 72.85, "step": 44950, "token_acc": 0.49504950495049505, "train_speed(iter/s)": 0.671769 }, { "epoch": 1.9260100252774088, "grad_norm": 4.156700134277344, "learning_rate": 6.766672270144541e-05, "loss": 2.0795724868774412, "memory(GiB)": 72.85, "step": 44955, "token_acc": 0.47750865051903113, "train_speed(iter/s)": 0.671786 }, { "epoch": 1.9262242406066579, "grad_norm": 6.0380401611328125, "learning_rate": 6.766042685848874e-05, "loss": 1.908380889892578, "memory(GiB)": 72.85, "step": 44960, "token_acc": 0.5527272727272727, "train_speed(iter/s)": 0.671788 }, { "epoch": 1.926438455935907, "grad_norm": 4.756781578063965, "learning_rate": 6.765413069559687e-05, "loss": 2.4604347229003904, "memory(GiB)": 72.85, "step": 44965, "token_acc": 0.4648318042813456, "train_speed(iter/s)": 0.671808 }, { "epoch": 1.9266526712651557, "grad_norm": 3.974719285964966, "learning_rate": 6.764783421288388e-05, "loss": 2.305549430847168, "memory(GiB)": 72.85, "step": 44970, "token_acc": 0.5048231511254019, "train_speed(iter/s)": 0.671813 }, { "epoch": 1.9268668865944047, "grad_norm": 5.416631698608398, "learning_rate": 6.76415374104638e-05, "loss": 2.2573591232299806, "memory(GiB)": 72.85, "step": 44975, "token_acc": 0.50187265917603, "train_speed(iter/s)": 0.671826 }, { "epoch": 1.9270811019236538, "grad_norm": 4.165024280548096, "learning_rate": 6.763524028845072e-05, "loss": 2.2199071884155273, "memory(GiB)": 72.85, "step": 44980, "token_acc": 0.4880546075085324, "train_speed(iter/s)": 0.67181 }, { "epoch": 1.9272953172529026, "grad_norm": 4.872103691101074, "learning_rate": 6.762894284695873e-05, "loss": 2.542043113708496, "memory(GiB)": 72.85, "step": 44985, "token_acc": 0.447098976109215, "train_speed(iter/s)": 0.671811 }, { "epoch": 1.9275095325821516, "grad_norm": 4.801909446716309, "learning_rate": 6.76226450861019e-05, "loss": 2.7232309341430665, "memory(GiB)": 72.85, "step": 44990, "token_acc": 0.4699248120300752, "train_speed(iter/s)": 0.671815 }, { "epoch": 1.9277237479114007, "grad_norm": 4.194411277770996, "learning_rate": 6.76163470059943e-05, "loss": 2.3663211822509767, "memory(GiB)": 72.85, "step": 44995, "token_acc": 0.47572815533980584, "train_speed(iter/s)": 0.671808 }, { "epoch": 1.9279379632406495, "grad_norm": 4.025434970855713, "learning_rate": 6.761004860675008e-05, "loss": 2.3094932556152346, "memory(GiB)": 72.85, "step": 45000, "token_acc": 0.4950166112956811, "train_speed(iter/s)": 0.671812 }, { "epoch": 1.9279379632406495, "eval_loss": 2.105952501296997, "eval_runtime": 16.2572, "eval_samples_per_second": 6.151, "eval_steps_per_second": 6.151, "eval_token_acc": 0.47251461988304094, "step": 45000 }, { "epoch": 1.9281521785698985, "grad_norm": 5.051790237426758, "learning_rate": 6.760374988848331e-05, "loss": 2.3790409088134767, "memory(GiB)": 72.85, "step": 45005, "token_acc": 0.4856373429084381, "train_speed(iter/s)": 0.671623 }, { "epoch": 1.9283663938991475, "grad_norm": 4.124001502990723, "learning_rate": 6.759745085130807e-05, "loss": 2.3576305389404295, "memory(GiB)": 72.85, "step": 45010, "token_acc": 0.4843205574912892, "train_speed(iter/s)": 0.67164 }, { "epoch": 1.9285806092283964, "grad_norm": 4.170805931091309, "learning_rate": 6.759115149533853e-05, "loss": 2.139231491088867, "memory(GiB)": 72.85, "step": 45015, "token_acc": 0.5274725274725275, "train_speed(iter/s)": 0.671664 }, { "epoch": 1.9287948245576454, "grad_norm": 3.8425240516662598, "learning_rate": 6.758485182068876e-05, "loss": 2.200330924987793, "memory(GiB)": 72.85, "step": 45020, "token_acc": 0.5335276967930029, "train_speed(iter/s)": 0.671684 }, { "epoch": 1.9290090398868944, "grad_norm": 6.279949188232422, "learning_rate": 6.75785518274729e-05, "loss": 2.6777523040771483, "memory(GiB)": 72.85, "step": 45025, "token_acc": 0.47297297297297297, "train_speed(iter/s)": 0.671681 }, { "epoch": 1.9292232552161432, "grad_norm": 5.040210247039795, "learning_rate": 6.75722515158051e-05, "loss": 2.309161567687988, "memory(GiB)": 72.85, "step": 45030, "token_acc": 0.48627450980392156, "train_speed(iter/s)": 0.671693 }, { "epoch": 1.9294374705453923, "grad_norm": 4.567631244659424, "learning_rate": 6.756595088579947e-05, "loss": 2.272049331665039, "memory(GiB)": 72.85, "step": 45035, "token_acc": 0.5, "train_speed(iter/s)": 0.671686 }, { "epoch": 1.9296516858746413, "grad_norm": 3.9248127937316895, "learning_rate": 6.755964993757015e-05, "loss": 2.399981880187988, "memory(GiB)": 72.85, "step": 45040, "token_acc": 0.4787234042553192, "train_speed(iter/s)": 0.671701 }, { "epoch": 1.9298659012038901, "grad_norm": 4.211643218994141, "learning_rate": 6.755334867123131e-05, "loss": 2.657177543640137, "memory(GiB)": 72.85, "step": 45045, "token_acc": 0.5, "train_speed(iter/s)": 0.671712 }, { "epoch": 1.9300801165331392, "grad_norm": 4.290843486785889, "learning_rate": 6.754704708689711e-05, "loss": 2.3743522644042967, "memory(GiB)": 72.85, "step": 45050, "token_acc": 0.5127272727272727, "train_speed(iter/s)": 0.671704 }, { "epoch": 1.9302943318623882, "grad_norm": 3.96525239944458, "learning_rate": 6.754074518468167e-05, "loss": 2.3830947875976562, "memory(GiB)": 72.85, "step": 45055, "token_acc": 0.4873417721518987, "train_speed(iter/s)": 0.6717 }, { "epoch": 1.930508547191637, "grad_norm": 4.7223219871521, "learning_rate": 6.753444296469919e-05, "loss": 2.551647186279297, "memory(GiB)": 72.85, "step": 45060, "token_acc": 0.460431654676259, "train_speed(iter/s)": 0.671704 }, { "epoch": 1.930722762520886, "grad_norm": 4.833303451538086, "learning_rate": 6.752814042706381e-05, "loss": 2.372934913635254, "memory(GiB)": 72.85, "step": 45065, "token_acc": 0.4916387959866221, "train_speed(iter/s)": 0.671698 }, { "epoch": 1.930936977850135, "grad_norm": 4.1608710289001465, "learning_rate": 6.752183757188974e-05, "loss": 2.473697853088379, "memory(GiB)": 72.85, "step": 45070, "token_acc": 0.5160256410256411, "train_speed(iter/s)": 0.67172 }, { "epoch": 1.9311511931793839, "grad_norm": 3.996220827102661, "learning_rate": 6.751553439929113e-05, "loss": 2.511185646057129, "memory(GiB)": 72.85, "step": 45075, "token_acc": 0.5032467532467533, "train_speed(iter/s)": 0.671734 }, { "epoch": 1.931365408508633, "grad_norm": 4.546627521514893, "learning_rate": 6.75092309093822e-05, "loss": 2.5102943420410155, "memory(GiB)": 72.85, "step": 45080, "token_acc": 0.46296296296296297, "train_speed(iter/s)": 0.671696 }, { "epoch": 1.931579623837882, "grad_norm": 4.48702335357666, "learning_rate": 6.75029271022771e-05, "loss": 2.4060001373291016, "memory(GiB)": 72.85, "step": 45085, "token_acc": 0.4962121212121212, "train_speed(iter/s)": 0.671703 }, { "epoch": 1.9317938391671308, "grad_norm": 4.212628364562988, "learning_rate": 6.749662297809009e-05, "loss": 2.4952604293823244, "memory(GiB)": 72.85, "step": 45090, "token_acc": 0.4641638225255973, "train_speed(iter/s)": 0.671719 }, { "epoch": 1.9320080544963798, "grad_norm": 4.3796515464782715, "learning_rate": 6.749031853693533e-05, "loss": 2.2715946197509767, "memory(GiB)": 72.85, "step": 45095, "token_acc": 0.5098039215686274, "train_speed(iter/s)": 0.671736 }, { "epoch": 1.9322222698256288, "grad_norm": 5.704232215881348, "learning_rate": 6.748401377892703e-05, "loss": 2.493794631958008, "memory(GiB)": 72.85, "step": 45100, "token_acc": 0.43109540636042404, "train_speed(iter/s)": 0.671732 }, { "epoch": 1.9324364851548776, "grad_norm": 5.0956010818481445, "learning_rate": 6.747770870417943e-05, "loss": 2.4081747055053713, "memory(GiB)": 72.85, "step": 45105, "token_acc": 0.48985507246376814, "train_speed(iter/s)": 0.671709 }, { "epoch": 1.9326507004841267, "grad_norm": 5.058190822601318, "learning_rate": 6.747140331280674e-05, "loss": 2.0905071258544923, "memory(GiB)": 72.85, "step": 45110, "token_acc": 0.5507246376811594, "train_speed(iter/s)": 0.671714 }, { "epoch": 1.9328649158133757, "grad_norm": 3.673902988433838, "learning_rate": 6.746509760492317e-05, "loss": 2.2706073760986327, "memory(GiB)": 72.85, "step": 45115, "token_acc": 0.5486111111111112, "train_speed(iter/s)": 0.671722 }, { "epoch": 1.9330791311426245, "grad_norm": 3.7995805740356445, "learning_rate": 6.745879158064301e-05, "loss": 2.24249382019043, "memory(GiB)": 72.85, "step": 45120, "token_acc": 0.5231788079470199, "train_speed(iter/s)": 0.671733 }, { "epoch": 1.9332933464718736, "grad_norm": 5.43921422958374, "learning_rate": 6.745248524008043e-05, "loss": 2.42148380279541, "memory(GiB)": 72.85, "step": 45125, "token_acc": 0.4980544747081712, "train_speed(iter/s)": 0.671744 }, { "epoch": 1.9335075618011226, "grad_norm": 5.099311828613281, "learning_rate": 6.744617858334975e-05, "loss": 2.250390625, "memory(GiB)": 72.85, "step": 45130, "token_acc": 0.5121107266435986, "train_speed(iter/s)": 0.671741 }, { "epoch": 1.9337217771303714, "grad_norm": 4.211860656738281, "learning_rate": 6.743987161056514e-05, "loss": 2.3568790435791014, "memory(GiB)": 72.85, "step": 45135, "token_acc": 0.5267489711934157, "train_speed(iter/s)": 0.671746 }, { "epoch": 1.9339359924596204, "grad_norm": 4.464521408081055, "learning_rate": 6.74335643218409e-05, "loss": 2.4378921508789064, "memory(GiB)": 72.85, "step": 45140, "token_acc": 0.44807121661721067, "train_speed(iter/s)": 0.671755 }, { "epoch": 1.9341502077888695, "grad_norm": 4.73348331451416, "learning_rate": 6.74272567172913e-05, "loss": 2.542256164550781, "memory(GiB)": 72.85, "step": 45145, "token_acc": 0.49812734082397003, "train_speed(iter/s)": 0.671769 }, { "epoch": 1.9343644231181183, "grad_norm": 4.310379505157471, "learning_rate": 6.74209487970306e-05, "loss": 2.4073577880859376, "memory(GiB)": 72.85, "step": 45150, "token_acc": 0.49683544303797467, "train_speed(iter/s)": 0.671782 }, { "epoch": 1.9345786384473673, "grad_norm": 7.734879970550537, "learning_rate": 6.741464056117306e-05, "loss": 2.231918716430664, "memory(GiB)": 72.85, "step": 45155, "token_acc": 0.48466257668711654, "train_speed(iter/s)": 0.671779 }, { "epoch": 1.9347928537766164, "grad_norm": 4.662271022796631, "learning_rate": 6.740833200983297e-05, "loss": 2.301402282714844, "memory(GiB)": 72.85, "step": 45160, "token_acc": 0.4823943661971831, "train_speed(iter/s)": 0.671789 }, { "epoch": 1.9350070691058652, "grad_norm": 4.228717803955078, "learning_rate": 6.740202314312463e-05, "loss": 2.4104951858520507, "memory(GiB)": 72.85, "step": 45165, "token_acc": 0.46179401993355484, "train_speed(iter/s)": 0.671779 }, { "epoch": 1.9352212844351142, "grad_norm": 4.079423904418945, "learning_rate": 6.73957139611623e-05, "loss": 2.2671756744384766, "memory(GiB)": 72.85, "step": 45170, "token_acc": 0.4965753424657534, "train_speed(iter/s)": 0.671768 }, { "epoch": 1.9354354997643632, "grad_norm": 3.6541128158569336, "learning_rate": 6.73894044640603e-05, "loss": 2.1167064666748048, "memory(GiB)": 72.85, "step": 45175, "token_acc": 0.49390243902439024, "train_speed(iter/s)": 0.671775 }, { "epoch": 1.935649715093612, "grad_norm": 3.187197685241699, "learning_rate": 6.738309465193293e-05, "loss": 2.3205780029296874, "memory(GiB)": 72.85, "step": 45180, "token_acc": 0.4984126984126984, "train_speed(iter/s)": 0.671776 }, { "epoch": 1.935863930422861, "grad_norm": 4.391221523284912, "learning_rate": 6.737678452489447e-05, "loss": 2.5885860443115236, "memory(GiB)": 72.85, "step": 45185, "token_acc": 0.4784172661870504, "train_speed(iter/s)": 0.671779 }, { "epoch": 1.9360781457521101, "grad_norm": 4.210840702056885, "learning_rate": 6.737047408305928e-05, "loss": 2.3792446136474608, "memory(GiB)": 72.85, "step": 45190, "token_acc": 0.46875, "train_speed(iter/s)": 0.671788 }, { "epoch": 1.936292361081359, "grad_norm": 5.937564849853516, "learning_rate": 6.736416332654165e-05, "loss": 2.297628402709961, "memory(GiB)": 72.85, "step": 45195, "token_acc": 0.4948453608247423, "train_speed(iter/s)": 0.671785 }, { "epoch": 1.936506576410608, "grad_norm": 3.781055450439453, "learning_rate": 6.735785225545592e-05, "loss": 2.646519088745117, "memory(GiB)": 72.85, "step": 45200, "token_acc": 0.4489795918367347, "train_speed(iter/s)": 0.671787 }, { "epoch": 1.936720791739857, "grad_norm": 3.803187370300293, "learning_rate": 6.735154086991641e-05, "loss": 1.970201301574707, "memory(GiB)": 72.85, "step": 45205, "token_acc": 0.5520833333333334, "train_speed(iter/s)": 0.671797 }, { "epoch": 1.9369350070691058, "grad_norm": 5.224987983703613, "learning_rate": 6.734522917003747e-05, "loss": 2.072958755493164, "memory(GiB)": 72.85, "step": 45210, "token_acc": 0.5214285714285715, "train_speed(iter/s)": 0.671789 }, { "epoch": 1.9371492223983549, "grad_norm": 3.4814400672912598, "learning_rate": 6.73389171559334e-05, "loss": 2.729105758666992, "memory(GiB)": 72.85, "step": 45215, "token_acc": 0.465625, "train_speed(iter/s)": 0.671792 }, { "epoch": 1.9373634377276039, "grad_norm": 5.249056816101074, "learning_rate": 6.73326048277186e-05, "loss": 2.2023637771606444, "memory(GiB)": 72.85, "step": 45220, "token_acc": 0.5517241379310345, "train_speed(iter/s)": 0.67179 }, { "epoch": 1.9375776530568527, "grad_norm": 4.519927978515625, "learning_rate": 6.732629218550741e-05, "loss": 2.359175682067871, "memory(GiB)": 72.85, "step": 45225, "token_acc": 0.49019607843137253, "train_speed(iter/s)": 0.671808 }, { "epoch": 1.9377918683861017, "grad_norm": 4.269407749176025, "learning_rate": 6.731997922941417e-05, "loss": 2.332104301452637, "memory(GiB)": 72.85, "step": 45230, "token_acc": 0.5070921985815603, "train_speed(iter/s)": 0.671796 }, { "epoch": 1.9380060837153508, "grad_norm": 5.665274143218994, "learning_rate": 6.731366595955327e-05, "loss": 2.6246692657470705, "memory(GiB)": 72.85, "step": 45235, "token_acc": 0.42565597667638483, "train_speed(iter/s)": 0.671786 }, { "epoch": 1.9382202990445996, "grad_norm": 4.2915568351745605, "learning_rate": 6.730735237603909e-05, "loss": 2.24883975982666, "memory(GiB)": 72.85, "step": 45240, "token_acc": 0.4863013698630137, "train_speed(iter/s)": 0.671785 }, { "epoch": 1.9384345143738486, "grad_norm": 3.9584760665893555, "learning_rate": 6.730103847898595e-05, "loss": 2.4948678970336915, "memory(GiB)": 72.85, "step": 45245, "token_acc": 0.5034013605442177, "train_speed(iter/s)": 0.671782 }, { "epoch": 1.9386487297030977, "grad_norm": 3.8287203311920166, "learning_rate": 6.72947242685083e-05, "loss": 2.3082290649414063, "memory(GiB)": 72.85, "step": 45250, "token_acc": 0.4965277777777778, "train_speed(iter/s)": 0.671811 }, { "epoch": 1.9388629450323465, "grad_norm": 4.430871963500977, "learning_rate": 6.728840974472046e-05, "loss": 2.2534326553344726, "memory(GiB)": 72.85, "step": 45255, "token_acc": 0.48255813953488375, "train_speed(iter/s)": 0.671804 }, { "epoch": 1.9390771603615955, "grad_norm": 5.5724263191223145, "learning_rate": 6.728209490773687e-05, "loss": 2.2224056243896486, "memory(GiB)": 72.85, "step": 45260, "token_acc": 0.5311203319502075, "train_speed(iter/s)": 0.671809 }, { "epoch": 1.9392913756908445, "grad_norm": 4.485770225524902, "learning_rate": 6.727577975767193e-05, "loss": 2.2124452590942383, "memory(GiB)": 72.85, "step": 45265, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.671802 }, { "epoch": 1.9395055910200933, "grad_norm": 4.625151634216309, "learning_rate": 6.726946429464003e-05, "loss": 2.3377904891967773, "memory(GiB)": 72.85, "step": 45270, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.671817 }, { "epoch": 1.9397198063493424, "grad_norm": 4.5717949867248535, "learning_rate": 6.726314851875559e-05, "loss": 2.213501739501953, "memory(GiB)": 72.85, "step": 45275, "token_acc": 0.4901315789473684, "train_speed(iter/s)": 0.671827 }, { "epoch": 1.9399340216785914, "grad_norm": 3.5846893787384033, "learning_rate": 6.725683243013301e-05, "loss": 2.4758651733398436, "memory(GiB)": 72.85, "step": 45280, "token_acc": 0.5, "train_speed(iter/s)": 0.671838 }, { "epoch": 1.9401482370078402, "grad_norm": 3.843966007232666, "learning_rate": 6.725051602888672e-05, "loss": 2.23266544342041, "memory(GiB)": 72.85, "step": 45285, "token_acc": 0.5364963503649635, "train_speed(iter/s)": 0.671849 }, { "epoch": 1.9403624523370893, "grad_norm": 3.793273687362671, "learning_rate": 6.724419931513117e-05, "loss": 2.3007225036621093, "memory(GiB)": 72.85, "step": 45290, "token_acc": 0.5317220543806647, "train_speed(iter/s)": 0.671821 }, { "epoch": 1.9405766676663383, "grad_norm": 5.000503063201904, "learning_rate": 6.723788228898076e-05, "loss": 2.2294775009155274, "memory(GiB)": 72.85, "step": 45295, "token_acc": 0.5440613026819924, "train_speed(iter/s)": 0.671808 }, { "epoch": 1.940790882995587, "grad_norm": 3.6409363746643066, "learning_rate": 6.723156495054993e-05, "loss": 2.372293472290039, "memory(GiB)": 72.85, "step": 45300, "token_acc": 0.5, "train_speed(iter/s)": 0.671827 }, { "epoch": 1.9410050983248361, "grad_norm": 5.540421009063721, "learning_rate": 6.722524729995315e-05, "loss": 2.391459083557129, "memory(GiB)": 72.85, "step": 45305, "token_acc": 0.49800796812749004, "train_speed(iter/s)": 0.671828 }, { "epoch": 1.9412193136540852, "grad_norm": 3.8994007110595703, "learning_rate": 6.721892933730486e-05, "loss": 2.3489490509033204, "memory(GiB)": 72.85, "step": 45310, "token_acc": 0.5030674846625767, "train_speed(iter/s)": 0.671829 }, { "epoch": 1.941433528983334, "grad_norm": 4.522304058074951, "learning_rate": 6.72126110627195e-05, "loss": 2.4057804107666017, "memory(GiB)": 72.85, "step": 45315, "token_acc": 0.4785276073619632, "train_speed(iter/s)": 0.671814 }, { "epoch": 1.941647744312583, "grad_norm": 3.516197443008423, "learning_rate": 6.720629247631155e-05, "loss": 2.398727035522461, "memory(GiB)": 72.85, "step": 45320, "token_acc": 0.498220640569395, "train_speed(iter/s)": 0.671819 }, { "epoch": 1.941861959641832, "grad_norm": 4.504734992980957, "learning_rate": 6.719997357819548e-05, "loss": 2.156310272216797, "memory(GiB)": 72.85, "step": 45325, "token_acc": 0.5130111524163569, "train_speed(iter/s)": 0.671827 }, { "epoch": 1.9420761749710809, "grad_norm": 3.375957727432251, "learning_rate": 6.719365436848574e-05, "loss": 2.3987920761108397, "memory(GiB)": 72.85, "step": 45330, "token_acc": 0.4791666666666667, "train_speed(iter/s)": 0.671841 }, { "epoch": 1.94229039030033, "grad_norm": 4.389821529388428, "learning_rate": 6.718733484729683e-05, "loss": 2.2035871505737306, "memory(GiB)": 72.85, "step": 45335, "token_acc": 0.513595166163142, "train_speed(iter/s)": 0.671862 }, { "epoch": 1.942504605629579, "grad_norm": 3.554668426513672, "learning_rate": 6.718101501474323e-05, "loss": 2.3724842071533203, "memory(GiB)": 72.85, "step": 45340, "token_acc": 0.49264705882352944, "train_speed(iter/s)": 0.671873 }, { "epoch": 1.9427188209588278, "grad_norm": 3.7986695766448975, "learning_rate": 6.717469487093941e-05, "loss": 2.253508377075195, "memory(GiB)": 72.85, "step": 45345, "token_acc": 0.5127388535031847, "train_speed(iter/s)": 0.671854 }, { "epoch": 1.9429330362880768, "grad_norm": 4.092241287231445, "learning_rate": 6.71683744159999e-05, "loss": 2.329769515991211, "memory(GiB)": 72.85, "step": 45350, "token_acc": 0.47191011235955055, "train_speed(iter/s)": 0.671849 }, { "epoch": 1.9431472516173258, "grad_norm": 4.755707263946533, "learning_rate": 6.716205365003918e-05, "loss": 2.3366722106933593, "memory(GiB)": 72.85, "step": 45355, "token_acc": 0.4928571428571429, "train_speed(iter/s)": 0.671832 }, { "epoch": 1.9433614669465746, "grad_norm": 4.164011001586914, "learning_rate": 6.715573257317174e-05, "loss": 2.3451206207275392, "memory(GiB)": 72.85, "step": 45360, "token_acc": 0.5289855072463768, "train_speed(iter/s)": 0.671848 }, { "epoch": 1.9435756822758237, "grad_norm": 3.487154483795166, "learning_rate": 6.714941118551213e-05, "loss": 2.499978256225586, "memory(GiB)": 72.85, "step": 45365, "token_acc": 0.48563218390804597, "train_speed(iter/s)": 0.671837 }, { "epoch": 1.9437898976050727, "grad_norm": 2.7529120445251465, "learning_rate": 6.714308948717484e-05, "loss": 2.291146659851074, "memory(GiB)": 72.85, "step": 45370, "token_acc": 0.5084175084175084, "train_speed(iter/s)": 0.671823 }, { "epoch": 1.9440041129343215, "grad_norm": 3.8405354022979736, "learning_rate": 6.71367674782744e-05, "loss": 2.398030662536621, "memory(GiB)": 72.85, "step": 45375, "token_acc": 0.51, "train_speed(iter/s)": 0.671828 }, { "epoch": 1.9442183282635705, "grad_norm": 5.004117012023926, "learning_rate": 6.713044515892535e-05, "loss": 2.2242387771606444, "memory(GiB)": 72.85, "step": 45380, "token_acc": 0.48828125, "train_speed(iter/s)": 0.671813 }, { "epoch": 1.9444325435928196, "grad_norm": 4.454989910125732, "learning_rate": 6.712412252924222e-05, "loss": 2.380812072753906, "memory(GiB)": 72.85, "step": 45385, "token_acc": 0.504225352112676, "train_speed(iter/s)": 0.67183 }, { "epoch": 1.9446467589220684, "grad_norm": 4.519231796264648, "learning_rate": 6.711779958933952e-05, "loss": 2.3342269897460937, "memory(GiB)": 72.85, "step": 45390, "token_acc": 0.5238095238095238, "train_speed(iter/s)": 0.671848 }, { "epoch": 1.9448609742513174, "grad_norm": 3.955084800720215, "learning_rate": 6.711147633933186e-05, "loss": 2.097136878967285, "memory(GiB)": 72.85, "step": 45395, "token_acc": 0.5596330275229358, "train_speed(iter/s)": 0.671851 }, { "epoch": 1.9450751895805665, "grad_norm": 4.279021263122559, "learning_rate": 6.710515277933373e-05, "loss": 2.663913536071777, "memory(GiB)": 72.85, "step": 45400, "token_acc": 0.4457831325301205, "train_speed(iter/s)": 0.671867 }, { "epoch": 1.9452894049098153, "grad_norm": 5.157720565795898, "learning_rate": 6.709882890945971e-05, "loss": 2.441811370849609, "memory(GiB)": 72.85, "step": 45405, "token_acc": 0.5, "train_speed(iter/s)": 0.671873 }, { "epoch": 1.9455036202390643, "grad_norm": 4.029540061950684, "learning_rate": 6.709250472982438e-05, "loss": 2.4863775253295897, "memory(GiB)": 72.85, "step": 45410, "token_acc": 0.4774436090225564, "train_speed(iter/s)": 0.671876 }, { "epoch": 1.9457178355683133, "grad_norm": 3.8298659324645996, "learning_rate": 6.708618024054227e-05, "loss": 2.389844512939453, "memory(GiB)": 72.85, "step": 45415, "token_acc": 0.5173745173745173, "train_speed(iter/s)": 0.671873 }, { "epoch": 1.9459320508975622, "grad_norm": 5.057129859924316, "learning_rate": 6.7079855441728e-05, "loss": 2.501119613647461, "memory(GiB)": 72.85, "step": 45420, "token_acc": 0.48771929824561405, "train_speed(iter/s)": 0.671888 }, { "epoch": 1.9461462662268112, "grad_norm": 5.635359287261963, "learning_rate": 6.707353033349613e-05, "loss": 2.4803863525390626, "memory(GiB)": 72.85, "step": 45425, "token_acc": 0.47692307692307695, "train_speed(iter/s)": 0.671863 }, { "epoch": 1.9463604815560602, "grad_norm": 4.646702766418457, "learning_rate": 6.706720491596122e-05, "loss": 2.2127586364746095, "memory(GiB)": 72.85, "step": 45430, "token_acc": 0.5057915057915058, "train_speed(iter/s)": 0.671863 }, { "epoch": 1.946574696885309, "grad_norm": 5.906876564025879, "learning_rate": 6.706087918923789e-05, "loss": 2.568551254272461, "memory(GiB)": 72.85, "step": 45435, "token_acc": 0.47005988023952094, "train_speed(iter/s)": 0.671866 }, { "epoch": 1.946788912214558, "grad_norm": 3.513234853744507, "learning_rate": 6.705455315344073e-05, "loss": 2.5405601501464843, "memory(GiB)": 72.85, "step": 45440, "token_acc": 0.46630727762803237, "train_speed(iter/s)": 0.671874 }, { "epoch": 1.947003127543807, "grad_norm": 5.584967613220215, "learning_rate": 6.704822680868434e-05, "loss": 2.440843391418457, "memory(GiB)": 72.85, "step": 45445, "token_acc": 0.49070631970260226, "train_speed(iter/s)": 0.67188 }, { "epoch": 1.947217342873056, "grad_norm": 4.462546348571777, "learning_rate": 6.704190015508334e-05, "loss": 2.113016891479492, "memory(GiB)": 72.85, "step": 45450, "token_acc": 0.5395189003436426, "train_speed(iter/s)": 0.67188 }, { "epoch": 1.947431558202305, "grad_norm": 3.1439096927642822, "learning_rate": 6.703557319275232e-05, "loss": 2.479811668395996, "memory(GiB)": 72.85, "step": 45455, "token_acc": 0.47003154574132494, "train_speed(iter/s)": 0.67187 }, { "epoch": 1.947645773531554, "grad_norm": 4.803674697875977, "learning_rate": 6.702924592180592e-05, "loss": 2.680713653564453, "memory(GiB)": 72.85, "step": 45460, "token_acc": 0.4355400696864111, "train_speed(iter/s)": 0.671885 }, { "epoch": 1.9478599888608028, "grad_norm": 4.405099868774414, "learning_rate": 6.702291834235874e-05, "loss": 2.658055877685547, "memory(GiB)": 72.85, "step": 45465, "token_acc": 0.4606413994169096, "train_speed(iter/s)": 0.671886 }, { "epoch": 1.9480742041900518, "grad_norm": 4.454670429229736, "learning_rate": 6.701659045452545e-05, "loss": 2.5323657989501953, "memory(GiB)": 72.85, "step": 45470, "token_acc": 0.46864686468646866, "train_speed(iter/s)": 0.67188 }, { "epoch": 1.9482884195193009, "grad_norm": 4.002155303955078, "learning_rate": 6.701026225842064e-05, "loss": 2.4820667266845704, "memory(GiB)": 72.85, "step": 45475, "token_acc": 0.468944099378882, "train_speed(iter/s)": 0.671896 }, { "epoch": 1.9485026348485497, "grad_norm": 4.478905200958252, "learning_rate": 6.700393375415898e-05, "loss": 2.204711151123047, "memory(GiB)": 72.85, "step": 45480, "token_acc": 0.528957528957529, "train_speed(iter/s)": 0.671899 }, { "epoch": 1.9487168501777987, "grad_norm": 3.6941440105438232, "learning_rate": 6.699760494185511e-05, "loss": 2.5796106338500975, "memory(GiB)": 72.85, "step": 45485, "token_acc": 0.4826388888888889, "train_speed(iter/s)": 0.671902 }, { "epoch": 1.9489310655070478, "grad_norm": 4.18190336227417, "learning_rate": 6.699127582162366e-05, "loss": 2.2179595947265627, "memory(GiB)": 72.85, "step": 45490, "token_acc": 0.48698884758364314, "train_speed(iter/s)": 0.671883 }, { "epoch": 1.9491452808362966, "grad_norm": 5.098371982574463, "learning_rate": 6.698494639357934e-05, "loss": 2.502609443664551, "memory(GiB)": 72.85, "step": 45495, "token_acc": 0.46621621621621623, "train_speed(iter/s)": 0.671883 }, { "epoch": 1.9493594961655456, "grad_norm": 4.699001312255859, "learning_rate": 6.697861665783678e-05, "loss": 2.5858667373657225, "memory(GiB)": 72.85, "step": 45500, "token_acc": 0.47863247863247865, "train_speed(iter/s)": 0.671844 }, { "epoch": 1.9493594961655456, "eval_loss": 2.078249216079712, "eval_runtime": 14.9092, "eval_samples_per_second": 6.707, "eval_steps_per_second": 6.707, "eval_token_acc": 0.4951048951048951, "step": 45500 }, { "epoch": 1.9495737114947946, "grad_norm": 5.268234729766846, "learning_rate": 6.697228661451064e-05, "loss": 2.4347597122192384, "memory(GiB)": 72.85, "step": 45505, "token_acc": 0.4857142857142857, "train_speed(iter/s)": 0.671678 }, { "epoch": 1.9497879268240434, "grad_norm": 4.0838470458984375, "learning_rate": 6.696595626371561e-05, "loss": 2.2337785720825196, "memory(GiB)": 72.85, "step": 45510, "token_acc": 0.5, "train_speed(iter/s)": 0.671676 }, { "epoch": 1.9500021421532925, "grad_norm": 4.764203071594238, "learning_rate": 6.695962560556637e-05, "loss": 2.0820980072021484, "memory(GiB)": 72.85, "step": 45515, "token_acc": 0.52734375, "train_speed(iter/s)": 0.671677 }, { "epoch": 1.9502163574825415, "grad_norm": 4.989162445068359, "learning_rate": 6.695329464017759e-05, "loss": 2.2537574768066406, "memory(GiB)": 72.85, "step": 45520, "token_acc": 0.5397489539748954, "train_speed(iter/s)": 0.671682 }, { "epoch": 1.9504305728117903, "grad_norm": 5.218460559844971, "learning_rate": 6.694696336766398e-05, "loss": 2.3605506896972654, "memory(GiB)": 72.85, "step": 45525, "token_acc": 0.5313531353135313, "train_speed(iter/s)": 0.67169 }, { "epoch": 1.9506447881410394, "grad_norm": 4.11811637878418, "learning_rate": 6.694063178814024e-05, "loss": 2.3605907440185545, "memory(GiB)": 72.85, "step": 45530, "token_acc": 0.46296296296296297, "train_speed(iter/s)": 0.671691 }, { "epoch": 1.9508590034702884, "grad_norm": 4.785073757171631, "learning_rate": 6.693429990172107e-05, "loss": 2.4288494110107424, "memory(GiB)": 72.85, "step": 45535, "token_acc": 0.4707792207792208, "train_speed(iter/s)": 0.671681 }, { "epoch": 1.9510732187995372, "grad_norm": 4.409539699554443, "learning_rate": 6.692796770852115e-05, "loss": 2.3947187423706056, "memory(GiB)": 72.85, "step": 45540, "token_acc": 0.49645390070921985, "train_speed(iter/s)": 0.671698 }, { "epoch": 1.9512874341287862, "grad_norm": 6.670198440551758, "learning_rate": 6.692163520865523e-05, "loss": 2.3328296661376955, "memory(GiB)": 72.85, "step": 45545, "token_acc": 0.5, "train_speed(iter/s)": 0.671676 }, { "epoch": 1.9515016494580353, "grad_norm": 3.6524598598480225, "learning_rate": 6.691530240223801e-05, "loss": 2.4471014022827147, "memory(GiB)": 72.85, "step": 45550, "token_acc": 0.490625, "train_speed(iter/s)": 0.671667 }, { "epoch": 1.951715864787284, "grad_norm": 3.277512788772583, "learning_rate": 6.690896928938422e-05, "loss": 2.5561830520629885, "memory(GiB)": 72.85, "step": 45555, "token_acc": 0.4625, "train_speed(iter/s)": 0.671654 }, { "epoch": 1.9519300801165331, "grad_norm": 3.9415674209594727, "learning_rate": 6.690263587020857e-05, "loss": 2.1975326538085938, "memory(GiB)": 72.85, "step": 45560, "token_acc": 0.5173611111111112, "train_speed(iter/s)": 0.67165 }, { "epoch": 1.9521442954457822, "grad_norm": 3.97548246383667, "learning_rate": 6.689630214482585e-05, "loss": 2.5857475280761717, "memory(GiB)": 72.85, "step": 45565, "token_acc": 0.47987616099071206, "train_speed(iter/s)": 0.671663 }, { "epoch": 1.952358510775031, "grad_norm": 3.7981128692626953, "learning_rate": 6.688996811335076e-05, "loss": 2.3360832214355467, "memory(GiB)": 72.85, "step": 45570, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.671674 }, { "epoch": 1.95257272610428, "grad_norm": 4.212635517120361, "learning_rate": 6.688363377589803e-05, "loss": 2.4281795501708983, "memory(GiB)": 72.85, "step": 45575, "token_acc": 0.5, "train_speed(iter/s)": 0.671681 }, { "epoch": 1.952786941433529, "grad_norm": 4.761507034301758, "learning_rate": 6.687729913258245e-05, "loss": 2.5694147109985352, "memory(GiB)": 72.85, "step": 45580, "token_acc": 0.43567251461988304, "train_speed(iter/s)": 0.671685 }, { "epoch": 1.9530011567627779, "grad_norm": 4.8938727378845215, "learning_rate": 6.687096418351877e-05, "loss": 2.276418113708496, "memory(GiB)": 72.85, "step": 45585, "token_acc": 0.4646017699115044, "train_speed(iter/s)": 0.671661 }, { "epoch": 1.953215372092027, "grad_norm": 5.673743724822998, "learning_rate": 6.686462892882173e-05, "loss": 2.414280128479004, "memory(GiB)": 72.85, "step": 45590, "token_acc": 0.4627831715210356, "train_speed(iter/s)": 0.671647 }, { "epoch": 1.953429587421276, "grad_norm": 5.157188415527344, "learning_rate": 6.685829336860613e-05, "loss": 2.398349571228027, "memory(GiB)": 72.85, "step": 45595, "token_acc": 0.4696485623003195, "train_speed(iter/s)": 0.671653 }, { "epoch": 1.9536438027505247, "grad_norm": 4.718594551086426, "learning_rate": 6.685195750298674e-05, "loss": 2.4488336563110353, "memory(GiB)": 72.85, "step": 45600, "token_acc": 0.4632352941176471, "train_speed(iter/s)": 0.671667 }, { "epoch": 1.9538580180797738, "grad_norm": 4.54071569442749, "learning_rate": 6.684562133207832e-05, "loss": 2.434905433654785, "memory(GiB)": 72.85, "step": 45605, "token_acc": 0.5357142857142857, "train_speed(iter/s)": 0.671657 }, { "epoch": 1.9540722334090228, "grad_norm": 4.656367301940918, "learning_rate": 6.683928485599566e-05, "loss": 2.641051483154297, "memory(GiB)": 72.85, "step": 45610, "token_acc": 0.4444444444444444, "train_speed(iter/s)": 0.671668 }, { "epoch": 1.9542864487382716, "grad_norm": 5.077322959899902, "learning_rate": 6.683294807485357e-05, "loss": 2.597019577026367, "memory(GiB)": 72.85, "step": 45615, "token_acc": 0.46179401993355484, "train_speed(iter/s)": 0.671672 }, { "epoch": 1.9545006640675207, "grad_norm": 3.540815830230713, "learning_rate": 6.682661098876684e-05, "loss": 2.180709457397461, "memory(GiB)": 72.85, "step": 45620, "token_acc": 0.5328185328185329, "train_speed(iter/s)": 0.671667 }, { "epoch": 1.9547148793967697, "grad_norm": 3.886194944381714, "learning_rate": 6.682027359785024e-05, "loss": 2.337677001953125, "memory(GiB)": 72.85, "step": 45625, "token_acc": 0.4775641025641026, "train_speed(iter/s)": 0.67166 }, { "epoch": 1.9549290947260185, "grad_norm": 5.171553134918213, "learning_rate": 6.681393590221862e-05, "loss": 2.251671028137207, "memory(GiB)": 72.85, "step": 45630, "token_acc": 0.4896551724137931, "train_speed(iter/s)": 0.671666 }, { "epoch": 1.9551433100552675, "grad_norm": 6.461792945861816, "learning_rate": 6.680759790198679e-05, "loss": 2.7258169174194338, "memory(GiB)": 72.85, "step": 45635, "token_acc": 0.46366782006920415, "train_speed(iter/s)": 0.671678 }, { "epoch": 1.9553575253845166, "grad_norm": 3.821254014968872, "learning_rate": 6.680125959726956e-05, "loss": 2.2412899017333983, "memory(GiB)": 72.85, "step": 45640, "token_acc": 0.5487804878048781, "train_speed(iter/s)": 0.671687 }, { "epoch": 1.9555717407137654, "grad_norm": 4.47216796875, "learning_rate": 6.679492098818174e-05, "loss": 2.454344940185547, "memory(GiB)": 72.85, "step": 45645, "token_acc": 0.44954128440366975, "train_speed(iter/s)": 0.671696 }, { "epoch": 1.9557859560430144, "grad_norm": 4.138546466827393, "learning_rate": 6.678858207483816e-05, "loss": 2.3253456115722657, "memory(GiB)": 72.85, "step": 45650, "token_acc": 0.4864864864864865, "train_speed(iter/s)": 0.671698 }, { "epoch": 1.9560001713722635, "grad_norm": 4.51683235168457, "learning_rate": 6.678224285735368e-05, "loss": 2.211712646484375, "memory(GiB)": 72.85, "step": 45655, "token_acc": 0.531496062992126, "train_speed(iter/s)": 0.671695 }, { "epoch": 1.9562143867015123, "grad_norm": 3.9301834106445312, "learning_rate": 6.677590333584312e-05, "loss": 2.2903690338134766, "memory(GiB)": 72.85, "step": 45660, "token_acc": 0.47840531561461797, "train_speed(iter/s)": 0.671679 }, { "epoch": 1.9564286020307613, "grad_norm": 4.234980583190918, "learning_rate": 6.676956351042135e-05, "loss": 2.364483451843262, "memory(GiB)": 72.85, "step": 45665, "token_acc": 0.5274725274725275, "train_speed(iter/s)": 0.67168 }, { "epoch": 1.9566428173600103, "grad_norm": 4.193456649780273, "learning_rate": 6.67632233812032e-05, "loss": 2.734293556213379, "memory(GiB)": 72.85, "step": 45670, "token_acc": 0.4582278481012658, "train_speed(iter/s)": 0.671668 }, { "epoch": 1.9568570326892591, "grad_norm": 4.103916645050049, "learning_rate": 6.675688294830351e-05, "loss": 2.4274658203125, "memory(GiB)": 72.85, "step": 45675, "token_acc": 0.4956268221574344, "train_speed(iter/s)": 0.671672 }, { "epoch": 1.9570712480185082, "grad_norm": 3.772822618484497, "learning_rate": 6.675054221183719e-05, "loss": 2.159359169006348, "memory(GiB)": 72.85, "step": 45680, "token_acc": 0.5190311418685121, "train_speed(iter/s)": 0.671678 }, { "epoch": 1.9572854633477572, "grad_norm": 3.5755774974823, "learning_rate": 6.67442011719191e-05, "loss": 2.2777528762817383, "memory(GiB)": 72.85, "step": 45685, "token_acc": 0.4797843665768194, "train_speed(iter/s)": 0.67168 }, { "epoch": 1.957499678677006, "grad_norm": 4.433387279510498, "learning_rate": 6.673785982866407e-05, "loss": 2.336104965209961, "memory(GiB)": 72.85, "step": 45690, "token_acc": 0.52, "train_speed(iter/s)": 0.671693 }, { "epoch": 1.957713894006255, "grad_norm": 5.643434047698975, "learning_rate": 6.673151818218701e-05, "loss": 2.2024965286254883, "memory(GiB)": 72.85, "step": 45695, "token_acc": 0.5, "train_speed(iter/s)": 0.671696 }, { "epoch": 1.957928109335504, "grad_norm": 4.067555904388428, "learning_rate": 6.672517623260282e-05, "loss": 2.58143310546875, "memory(GiB)": 72.85, "step": 45700, "token_acc": 0.4797507788161994, "train_speed(iter/s)": 0.671696 }, { "epoch": 1.958142324664753, "grad_norm": 3.971015691757202, "learning_rate": 6.671883398002635e-05, "loss": 2.162571334838867, "memory(GiB)": 72.85, "step": 45705, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.671714 }, { "epoch": 1.958356539994002, "grad_norm": 6.591923236846924, "learning_rate": 6.671249142457254e-05, "loss": 2.197010612487793, "memory(GiB)": 72.85, "step": 45710, "token_acc": 0.4908424908424908, "train_speed(iter/s)": 0.671722 }, { "epoch": 1.958570755323251, "grad_norm": 4.21871280670166, "learning_rate": 6.670614856635626e-05, "loss": 2.2083612442016602, "memory(GiB)": 72.85, "step": 45715, "token_acc": 0.525096525096525, "train_speed(iter/s)": 0.671725 }, { "epoch": 1.9587849706524998, "grad_norm": 5.692894458770752, "learning_rate": 6.669980540549243e-05, "loss": 2.3792707443237306, "memory(GiB)": 72.85, "step": 45720, "token_acc": 0.4868421052631579, "train_speed(iter/s)": 0.671726 }, { "epoch": 1.9589991859817488, "grad_norm": 4.536086082458496, "learning_rate": 6.669346194209596e-05, "loss": 2.194820594787598, "memory(GiB)": 72.85, "step": 45725, "token_acc": 0.5210843373493976, "train_speed(iter/s)": 0.67172 }, { "epoch": 1.9592134013109979, "grad_norm": 5.03751802444458, "learning_rate": 6.668711817628177e-05, "loss": 2.044844627380371, "memory(GiB)": 72.85, "step": 45730, "token_acc": 0.55, "train_speed(iter/s)": 0.671714 }, { "epoch": 1.9594276166402467, "grad_norm": 4.3045454025268555, "learning_rate": 6.668077410816477e-05, "loss": 2.4453020095825195, "memory(GiB)": 72.85, "step": 45735, "token_acc": 0.4833948339483395, "train_speed(iter/s)": 0.671718 }, { "epoch": 1.9596418319694957, "grad_norm": 4.0404133796691895, "learning_rate": 6.66744297378599e-05, "loss": 2.282180404663086, "memory(GiB)": 72.85, "step": 45740, "token_acc": 0.4966442953020134, "train_speed(iter/s)": 0.671722 }, { "epoch": 1.9598560472987447, "grad_norm": 3.7639122009277344, "learning_rate": 6.66680850654821e-05, "loss": 2.6129722595214844, "memory(GiB)": 72.85, "step": 45745, "token_acc": 0.48464163822525597, "train_speed(iter/s)": 0.671722 }, { "epoch": 1.9600702626279936, "grad_norm": 3.8555407524108887, "learning_rate": 6.666174009114629e-05, "loss": 2.53759880065918, "memory(GiB)": 72.85, "step": 45750, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.671733 }, { "epoch": 1.9602844779572426, "grad_norm": 3.8360512256622314, "learning_rate": 6.665539481496745e-05, "loss": 2.4597213745117186, "memory(GiB)": 72.85, "step": 45755, "token_acc": 0.4612546125461255, "train_speed(iter/s)": 0.67175 }, { "epoch": 1.9604986932864916, "grad_norm": 4.206702709197998, "learning_rate": 6.66490492370605e-05, "loss": 2.6158779144287108, "memory(GiB)": 72.85, "step": 45760, "token_acc": 0.4712328767123288, "train_speed(iter/s)": 0.671761 }, { "epoch": 1.9607129086157404, "grad_norm": 4.528964996337891, "learning_rate": 6.66427033575404e-05, "loss": 2.428974151611328, "memory(GiB)": 72.85, "step": 45765, "token_acc": 0.48307692307692307, "train_speed(iter/s)": 0.671756 }, { "epoch": 1.9609271239449895, "grad_norm": 5.12030029296875, "learning_rate": 6.663635717652213e-05, "loss": 2.4763763427734373, "memory(GiB)": 72.85, "step": 45770, "token_acc": 0.44857142857142857, "train_speed(iter/s)": 0.671763 }, { "epoch": 1.9611413392742385, "grad_norm": 4.683897495269775, "learning_rate": 6.663001069412063e-05, "loss": 2.4330413818359373, "memory(GiB)": 72.85, "step": 45775, "token_acc": 0.5209790209790209, "train_speed(iter/s)": 0.671774 }, { "epoch": 1.9613555546034873, "grad_norm": 4.253027439117432, "learning_rate": 6.662366391045088e-05, "loss": 2.4948097229003907, "memory(GiB)": 72.85, "step": 45780, "token_acc": 0.48161764705882354, "train_speed(iter/s)": 0.67177 }, { "epoch": 1.9615697699327364, "grad_norm": 4.2453837394714355, "learning_rate": 6.661731682562788e-05, "loss": 2.354289436340332, "memory(GiB)": 72.85, "step": 45785, "token_acc": 0.48639455782312924, "train_speed(iter/s)": 0.67177 }, { "epoch": 1.9617839852619854, "grad_norm": 4.039902687072754, "learning_rate": 6.66109694397666e-05, "loss": 2.4443943023681642, "memory(GiB)": 72.85, "step": 45790, "token_acc": 0.48936170212765956, "train_speed(iter/s)": 0.671765 }, { "epoch": 1.9619982005912342, "grad_norm": 3.755032777786255, "learning_rate": 6.660462175298201e-05, "loss": 2.7141342163085938, "memory(GiB)": 72.85, "step": 45795, "token_acc": 0.4758842443729904, "train_speed(iter/s)": 0.671774 }, { "epoch": 1.9622124159204832, "grad_norm": 3.912283182144165, "learning_rate": 6.659827376538913e-05, "loss": 2.3277986526489256, "memory(GiB)": 72.85, "step": 45800, "token_acc": 0.4794007490636704, "train_speed(iter/s)": 0.671779 }, { "epoch": 1.9624266312497323, "grad_norm": 4.129295349121094, "learning_rate": 6.659192547710294e-05, "loss": 2.145241928100586, "memory(GiB)": 72.85, "step": 45805, "token_acc": 0.5341365461847389, "train_speed(iter/s)": 0.671774 }, { "epoch": 1.962640846578981, "grad_norm": 4.208159923553467, "learning_rate": 6.658557688823846e-05, "loss": 2.1706964492797853, "memory(GiB)": 72.85, "step": 45810, "token_acc": 0.49295774647887325, "train_speed(iter/s)": 0.671763 }, { "epoch": 1.9628550619082301, "grad_norm": 5.498264312744141, "learning_rate": 6.657922799891071e-05, "loss": 2.3256742477416994, "memory(GiB)": 72.85, "step": 45815, "token_acc": 0.4809688581314879, "train_speed(iter/s)": 0.671776 }, { "epoch": 1.9630692772374791, "grad_norm": 5.032858848571777, "learning_rate": 6.657287880923468e-05, "loss": 2.6061588287353517, "memory(GiB)": 72.85, "step": 45820, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.671773 }, { "epoch": 1.963283492566728, "grad_norm": 4.103262424468994, "learning_rate": 6.65665293193254e-05, "loss": 2.1801334381103517, "memory(GiB)": 72.85, "step": 45825, "token_acc": 0.5085324232081911, "train_speed(iter/s)": 0.671766 }, { "epoch": 1.963497707895977, "grad_norm": 4.013000011444092, "learning_rate": 6.656017952929792e-05, "loss": 2.465230369567871, "memory(GiB)": 72.85, "step": 45830, "token_acc": 0.45964912280701753, "train_speed(iter/s)": 0.671763 }, { "epoch": 1.963711923225226, "grad_norm": 3.7879021167755127, "learning_rate": 6.655382943926724e-05, "loss": 2.511137008666992, "memory(GiB)": 72.85, "step": 45835, "token_acc": 0.4613003095975232, "train_speed(iter/s)": 0.671769 }, { "epoch": 1.9639261385544748, "grad_norm": 6.843696594238281, "learning_rate": 6.65474790493484e-05, "loss": 2.550147819519043, "memory(GiB)": 72.85, "step": 45840, "token_acc": 0.4714828897338403, "train_speed(iter/s)": 0.67176 }, { "epoch": 1.9641403538837239, "grad_norm": 3.8864586353302, "learning_rate": 6.654112835965648e-05, "loss": 2.477253532409668, "memory(GiB)": 72.85, "step": 45845, "token_acc": 0.48502994011976047, "train_speed(iter/s)": 0.67175 }, { "epoch": 1.964354569212973, "grad_norm": 4.6952385902404785, "learning_rate": 6.653477737030647e-05, "loss": 2.363052749633789, "memory(GiB)": 72.85, "step": 45850, "token_acc": 0.4845679012345679, "train_speed(iter/s)": 0.671765 }, { "epoch": 1.9645687845422217, "grad_norm": 3.575216770172119, "learning_rate": 6.652842608141348e-05, "loss": 2.2858741760253904, "memory(GiB)": 72.85, "step": 45855, "token_acc": 0.5085910652920962, "train_speed(iter/s)": 0.671773 }, { "epoch": 1.9647829998714708, "grad_norm": 5.837158203125, "learning_rate": 6.652207449309255e-05, "loss": 2.450822639465332, "memory(GiB)": 72.85, "step": 45860, "token_acc": 0.5220883534136547, "train_speed(iter/s)": 0.671787 }, { "epoch": 1.9649972152007198, "grad_norm": 4.7243266105651855, "learning_rate": 6.651572260545871e-05, "loss": 2.538032341003418, "memory(GiB)": 72.85, "step": 45865, "token_acc": 0.4755700325732899, "train_speed(iter/s)": 0.671808 }, { "epoch": 1.9652114305299686, "grad_norm": 4.6965718269348145, "learning_rate": 6.650937041862711e-05, "loss": 2.4341381072998045, "memory(GiB)": 72.85, "step": 45870, "token_acc": 0.4866666666666667, "train_speed(iter/s)": 0.671807 }, { "epoch": 1.9654256458592176, "grad_norm": 4.8802080154418945, "learning_rate": 6.650301793271275e-05, "loss": 2.425838088989258, "memory(GiB)": 72.85, "step": 45875, "token_acc": 0.516245487364621, "train_speed(iter/s)": 0.671818 }, { "epoch": 1.9656398611884667, "grad_norm": 4.069157123565674, "learning_rate": 6.649666514783074e-05, "loss": 2.208962821960449, "memory(GiB)": 72.85, "step": 45880, "token_acc": 0.5114754098360655, "train_speed(iter/s)": 0.671832 }, { "epoch": 1.9658540765177155, "grad_norm": 4.338556289672852, "learning_rate": 6.649031206409616e-05, "loss": 2.254701614379883, "memory(GiB)": 72.85, "step": 45885, "token_acc": 0.49508196721311476, "train_speed(iter/s)": 0.671823 }, { "epoch": 1.9660682918469645, "grad_norm": 4.8430609703063965, "learning_rate": 6.648395868162411e-05, "loss": 2.610421371459961, "memory(GiB)": 72.85, "step": 45890, "token_acc": 0.4353312302839117, "train_speed(iter/s)": 0.671834 }, { "epoch": 1.9662825071762136, "grad_norm": 3.9849066734313965, "learning_rate": 6.64776050005297e-05, "loss": 2.3843273162841796, "memory(GiB)": 72.85, "step": 45895, "token_acc": 0.5019762845849802, "train_speed(iter/s)": 0.671809 }, { "epoch": 1.9664967225054624, "grad_norm": 4.779799461364746, "learning_rate": 6.647125102092801e-05, "loss": 2.3925737380981444, "memory(GiB)": 72.85, "step": 45900, "token_acc": 0.4927536231884058, "train_speed(iter/s)": 0.671812 }, { "epoch": 1.9667109378347114, "grad_norm": 4.495355606079102, "learning_rate": 6.646489674293416e-05, "loss": 2.5623119354248045, "memory(GiB)": 72.85, "step": 45905, "token_acc": 0.4551971326164875, "train_speed(iter/s)": 0.671804 }, { "epoch": 1.9669251531639604, "grad_norm": 4.796465873718262, "learning_rate": 6.645854216666326e-05, "loss": 2.2749460220336912, "memory(GiB)": 72.85, "step": 45910, "token_acc": 0.4811715481171548, "train_speed(iter/s)": 0.671816 }, { "epoch": 1.9671393684932093, "grad_norm": 4.932728290557861, "learning_rate": 6.645218729223041e-05, "loss": 2.808140182495117, "memory(GiB)": 72.85, "step": 45915, "token_acc": 0.4329268292682927, "train_speed(iter/s)": 0.671803 }, { "epoch": 1.9673535838224583, "grad_norm": 3.702521324157715, "learning_rate": 6.644583211975077e-05, "loss": 2.4753082275390623, "memory(GiB)": 72.85, "step": 45920, "token_acc": 0.4641509433962264, "train_speed(iter/s)": 0.671795 }, { "epoch": 1.9675677991517073, "grad_norm": 3.856517791748047, "learning_rate": 6.643947664933946e-05, "loss": 2.3043590545654298, "memory(GiB)": 72.85, "step": 45925, "token_acc": 0.5084745762711864, "train_speed(iter/s)": 0.671796 }, { "epoch": 1.9677820144809561, "grad_norm": 4.175832748413086, "learning_rate": 6.64331208811116e-05, "loss": 2.563961219787598, "memory(GiB)": 72.85, "step": 45930, "token_acc": 0.45125348189415043, "train_speed(iter/s)": 0.671795 }, { "epoch": 1.9679962298102052, "grad_norm": 4.001059532165527, "learning_rate": 6.642676481518234e-05, "loss": 2.2925559997558596, "memory(GiB)": 72.85, "step": 45935, "token_acc": 0.48201438848920863, "train_speed(iter/s)": 0.671788 }, { "epoch": 1.9682104451394542, "grad_norm": 3.5831456184387207, "learning_rate": 6.642040845166682e-05, "loss": 2.1846939086914063, "memory(GiB)": 72.85, "step": 45940, "token_acc": 0.5420875420875421, "train_speed(iter/s)": 0.671803 }, { "epoch": 1.968424660468703, "grad_norm": 6.526817798614502, "learning_rate": 6.641405179068022e-05, "loss": 2.2421199798583986, "memory(GiB)": 72.85, "step": 45945, "token_acc": 0.490272373540856, "train_speed(iter/s)": 0.671821 }, { "epoch": 1.968638875797952, "grad_norm": 4.244130611419678, "learning_rate": 6.640769483233764e-05, "loss": 2.5423118591308596, "memory(GiB)": 72.85, "step": 45950, "token_acc": 0.48333333333333334, "train_speed(iter/s)": 0.671829 }, { "epoch": 1.968853091127201, "grad_norm": 4.256565570831299, "learning_rate": 6.640133757675431e-05, "loss": 2.3063304901123045, "memory(GiB)": 72.85, "step": 45955, "token_acc": 0.49019607843137253, "train_speed(iter/s)": 0.67184 }, { "epoch": 1.96906730645645, "grad_norm": 4.4723944664001465, "learning_rate": 6.639498002404534e-05, "loss": 2.3593521118164062, "memory(GiB)": 72.85, "step": 45960, "token_acc": 0.5441176470588235, "train_speed(iter/s)": 0.671825 }, { "epoch": 1.969281521785699, "grad_norm": 5.123311996459961, "learning_rate": 6.638862217432594e-05, "loss": 2.3939952850341797, "memory(GiB)": 72.85, "step": 45965, "token_acc": 0.5125448028673835, "train_speed(iter/s)": 0.671821 }, { "epoch": 1.969495737114948, "grad_norm": 5.466601371765137, "learning_rate": 6.638226402771126e-05, "loss": 2.2861486434936524, "memory(GiB)": 72.85, "step": 45970, "token_acc": 0.5033333333333333, "train_speed(iter/s)": 0.671826 }, { "epoch": 1.9697099524441968, "grad_norm": 4.294203281402588, "learning_rate": 6.637590558431652e-05, "loss": 2.2352985382080077, "memory(GiB)": 72.85, "step": 45975, "token_acc": 0.4674329501915709, "train_speed(iter/s)": 0.671828 }, { "epoch": 1.969924167773446, "grad_norm": 5.860401153564453, "learning_rate": 6.636954684425688e-05, "loss": 2.422469711303711, "memory(GiB)": 72.85, "step": 45980, "token_acc": 0.4939759036144578, "train_speed(iter/s)": 0.671832 }, { "epoch": 1.9701383831026948, "grad_norm": 4.977965831756592, "learning_rate": 6.636318780764754e-05, "loss": 2.3127229690551756, "memory(GiB)": 72.85, "step": 45985, "token_acc": 0.5054945054945055, "train_speed(iter/s)": 0.67186 }, { "epoch": 1.9703525984319437, "grad_norm": 5.679347991943359, "learning_rate": 6.635682847460371e-05, "loss": 2.4951282501220704, "memory(GiB)": 72.85, "step": 45990, "token_acc": 0.5065359477124183, "train_speed(iter/s)": 0.671867 }, { "epoch": 1.970566813761193, "grad_norm": 3.986407518386841, "learning_rate": 6.635046884524057e-05, "loss": 2.369822311401367, "memory(GiB)": 72.85, "step": 45995, "token_acc": 0.5257452574525745, "train_speed(iter/s)": 0.671881 }, { "epoch": 1.9707810290904417, "grad_norm": 4.279757976531982, "learning_rate": 6.634410891967337e-05, "loss": 2.109252166748047, "memory(GiB)": 72.85, "step": 46000, "token_acc": 0.5359477124183006, "train_speed(iter/s)": 0.671872 }, { "epoch": 1.9707810290904417, "eval_loss": 2.0089545249938965, "eval_runtime": 14.7479, "eval_samples_per_second": 6.781, "eval_steps_per_second": 6.781, "eval_token_acc": 0.5126404494382022, "step": 46000 }, { "epoch": 1.9709952444196905, "grad_norm": 3.7517526149749756, "learning_rate": 6.63377486980173e-05, "loss": 2.2453855514526366, "memory(GiB)": 72.85, "step": 46005, "token_acc": 0.501004016064257, "train_speed(iter/s)": 0.671709 }, { "epoch": 1.9712094597489398, "grad_norm": 3.753103017807007, "learning_rate": 6.633138818038757e-05, "loss": 2.321697998046875, "memory(GiB)": 72.85, "step": 46010, "token_acc": 0.5032679738562091, "train_speed(iter/s)": 0.671713 }, { "epoch": 1.9714236750781886, "grad_norm": 3.9433717727661133, "learning_rate": 6.632502736689945e-05, "loss": 2.566877555847168, "memory(GiB)": 72.85, "step": 46015, "token_acc": 0.46325878594249204, "train_speed(iter/s)": 0.671701 }, { "epoch": 1.9716378904074374, "grad_norm": 4.895465850830078, "learning_rate": 6.631866625766813e-05, "loss": 2.2796890258789064, "memory(GiB)": 72.85, "step": 46020, "token_acc": 0.5132450331125827, "train_speed(iter/s)": 0.671703 }, { "epoch": 1.9718521057366867, "grad_norm": 4.170074939727783, "learning_rate": 6.631230485280887e-05, "loss": 2.217002105712891, "memory(GiB)": 72.85, "step": 46025, "token_acc": 0.5059523809523809, "train_speed(iter/s)": 0.67171 }, { "epoch": 1.9720663210659355, "grad_norm": 5.222000598907471, "learning_rate": 6.63059431524369e-05, "loss": 2.178888130187988, "memory(GiB)": 72.85, "step": 46030, "token_acc": 0.536, "train_speed(iter/s)": 0.671711 }, { "epoch": 1.9722805363951843, "grad_norm": 5.620966911315918, "learning_rate": 6.629958115666748e-05, "loss": 2.049421501159668, "memory(GiB)": 72.85, "step": 46035, "token_acc": 0.5502392344497608, "train_speed(iter/s)": 0.671717 }, { "epoch": 1.9724947517244336, "grad_norm": 4.103930950164795, "learning_rate": 6.629321886561584e-05, "loss": 2.4001562118530275, "memory(GiB)": 72.85, "step": 46040, "token_acc": 0.4722222222222222, "train_speed(iter/s)": 0.671725 }, { "epoch": 1.9727089670536824, "grad_norm": 4.310683250427246, "learning_rate": 6.628685627939726e-05, "loss": 2.0912010192871096, "memory(GiB)": 72.85, "step": 46045, "token_acc": 0.5269230769230769, "train_speed(iter/s)": 0.671732 }, { "epoch": 1.9729231823829312, "grad_norm": 4.279469966888428, "learning_rate": 6.628049339812702e-05, "loss": 2.4538219451904295, "memory(GiB)": 72.85, "step": 46050, "token_acc": 0.48220064724919093, "train_speed(iter/s)": 0.671729 }, { "epoch": 1.9731373977121804, "grad_norm": 5.758904457092285, "learning_rate": 6.627413022192035e-05, "loss": 2.684976005554199, "memory(GiB)": 72.85, "step": 46055, "token_acc": 0.4234875444839858, "train_speed(iter/s)": 0.67172 }, { "epoch": 1.9733516130414293, "grad_norm": 3.5329031944274902, "learning_rate": 6.626776675089256e-05, "loss": 2.4386245727539064, "memory(GiB)": 72.85, "step": 46060, "token_acc": 0.5239852398523985, "train_speed(iter/s)": 0.671714 }, { "epoch": 1.973565828370678, "grad_norm": 4.802579402923584, "learning_rate": 6.626140298515891e-05, "loss": 2.2327749252319338, "memory(GiB)": 72.85, "step": 46065, "token_acc": 0.47540983606557374, "train_speed(iter/s)": 0.671714 }, { "epoch": 1.9737800436999273, "grad_norm": 4.035881042480469, "learning_rate": 6.625503892483466e-05, "loss": 2.324433135986328, "memory(GiB)": 72.85, "step": 46070, "token_acc": 0.49159663865546216, "train_speed(iter/s)": 0.67173 }, { "epoch": 1.9739942590291761, "grad_norm": 4.349686622619629, "learning_rate": 6.624867457003517e-05, "loss": 2.2229408264160155, "memory(GiB)": 72.85, "step": 46075, "token_acc": 0.5152542372881356, "train_speed(iter/s)": 0.671747 }, { "epoch": 1.974208474358425, "grad_norm": 4.264474868774414, "learning_rate": 6.624230992087567e-05, "loss": 2.3956062316894533, "memory(GiB)": 72.85, "step": 46080, "token_acc": 0.509375, "train_speed(iter/s)": 0.671765 }, { "epoch": 1.9744226896876742, "grad_norm": 4.401305675506592, "learning_rate": 6.623594497747152e-05, "loss": 2.2528032302856444, "memory(GiB)": 72.85, "step": 46085, "token_acc": 0.5105633802816901, "train_speed(iter/s)": 0.671771 }, { "epoch": 1.974636905016923, "grad_norm": 5.852652549743652, "learning_rate": 6.6229579739938e-05, "loss": 2.47070198059082, "memory(GiB)": 72.85, "step": 46090, "token_acc": 0.5072992700729927, "train_speed(iter/s)": 0.671759 }, { "epoch": 1.9748511203461718, "grad_norm": 4.084074974060059, "learning_rate": 6.622321420839038e-05, "loss": 2.295261573791504, "memory(GiB)": 72.85, "step": 46095, "token_acc": 0.5179856115107914, "train_speed(iter/s)": 0.671772 }, { "epoch": 1.975065335675421, "grad_norm": 4.027132987976074, "learning_rate": 6.621684838294405e-05, "loss": 2.622697639465332, "memory(GiB)": 72.85, "step": 46100, "token_acc": 0.5067114093959731, "train_speed(iter/s)": 0.671778 }, { "epoch": 1.97527955100467, "grad_norm": 4.983493804931641, "learning_rate": 6.621048226371429e-05, "loss": 2.484848976135254, "memory(GiB)": 72.85, "step": 46105, "token_acc": 0.473015873015873, "train_speed(iter/s)": 0.67178 }, { "epoch": 1.9754937663339187, "grad_norm": 3.5926241874694824, "learning_rate": 6.620411585081641e-05, "loss": 2.196016311645508, "memory(GiB)": 72.85, "step": 46110, "token_acc": 0.4631578947368421, "train_speed(iter/s)": 0.67178 }, { "epoch": 1.975707981663168, "grad_norm": 3.8925182819366455, "learning_rate": 6.61977491443658e-05, "loss": 2.467741775512695, "memory(GiB)": 72.85, "step": 46115, "token_acc": 0.447098976109215, "train_speed(iter/s)": 0.671782 }, { "epoch": 1.9759221969924168, "grad_norm": 3.600316047668457, "learning_rate": 6.619138214447777e-05, "loss": 2.2236568450927736, "memory(GiB)": 72.85, "step": 46120, "token_acc": 0.5137254901960784, "train_speed(iter/s)": 0.671802 }, { "epoch": 1.9761364123216656, "grad_norm": 4.99717903137207, "learning_rate": 6.618501485126764e-05, "loss": 2.4056751251220705, "memory(GiB)": 72.85, "step": 46125, "token_acc": 0.4944237918215613, "train_speed(iter/s)": 0.67181 }, { "epoch": 1.9763506276509148, "grad_norm": 4.594592094421387, "learning_rate": 6.617864726485081e-05, "loss": 2.1260046005249023, "memory(GiB)": 72.85, "step": 46130, "token_acc": 0.5201465201465202, "train_speed(iter/s)": 0.671823 }, { "epoch": 1.9765648429801637, "grad_norm": 3.5359816551208496, "learning_rate": 6.617227938534261e-05, "loss": 2.566376876831055, "memory(GiB)": 72.85, "step": 46135, "token_acc": 0.47232472324723246, "train_speed(iter/s)": 0.671825 }, { "epoch": 1.9767790583094125, "grad_norm": 3.3459572792053223, "learning_rate": 6.616591121285837e-05, "loss": 2.010062026977539, "memory(GiB)": 72.85, "step": 46140, "token_acc": 0.5408560311284046, "train_speed(iter/s)": 0.671821 }, { "epoch": 1.9769932736386617, "grad_norm": 4.150940418243408, "learning_rate": 6.615954274751349e-05, "loss": 2.6956920623779297, "memory(GiB)": 72.85, "step": 46145, "token_acc": 0.4461538461538462, "train_speed(iter/s)": 0.671829 }, { "epoch": 1.9772074889679105, "grad_norm": 3.814865827560425, "learning_rate": 6.615317398942336e-05, "loss": 2.561699104309082, "memory(GiB)": 72.85, "step": 46150, "token_acc": 0.45576407506702415, "train_speed(iter/s)": 0.671847 }, { "epoch": 1.9774217042971594, "grad_norm": 4.666806221008301, "learning_rate": 6.61468049387033e-05, "loss": 2.3915073394775392, "memory(GiB)": 72.85, "step": 46155, "token_acc": 0.45918367346938777, "train_speed(iter/s)": 0.671857 }, { "epoch": 1.9776359196264086, "grad_norm": 4.344357967376709, "learning_rate": 6.614043559546874e-05, "loss": 2.1670385360717774, "memory(GiB)": 72.85, "step": 46160, "token_acc": 0.5444839857651246, "train_speed(iter/s)": 0.671858 }, { "epoch": 1.9778501349556574, "grad_norm": 5.4037909507751465, "learning_rate": 6.613406595983505e-05, "loss": 2.3738067626953123, "memory(GiB)": 72.85, "step": 46165, "token_acc": 0.5032679738562091, "train_speed(iter/s)": 0.671869 }, { "epoch": 1.9780643502849062, "grad_norm": 3.5935144424438477, "learning_rate": 6.612769603191761e-05, "loss": 2.3390811920166015, "memory(GiB)": 72.85, "step": 46170, "token_acc": 0.5163398692810458, "train_speed(iter/s)": 0.671891 }, { "epoch": 1.9782785656141555, "grad_norm": 5.376062870025635, "learning_rate": 6.612132581183183e-05, "loss": 2.116507720947266, "memory(GiB)": 72.85, "step": 46175, "token_acc": 0.5524193548387096, "train_speed(iter/s)": 0.671894 }, { "epoch": 1.9784927809434043, "grad_norm": 4.409389495849609, "learning_rate": 6.611495529969311e-05, "loss": 2.4498065948486327, "memory(GiB)": 72.85, "step": 46180, "token_acc": 0.48623853211009177, "train_speed(iter/s)": 0.671882 }, { "epoch": 1.9787069962726531, "grad_norm": 3.9257776737213135, "learning_rate": 6.610858449561685e-05, "loss": 2.2381420135498047, "memory(GiB)": 72.85, "step": 46185, "token_acc": 0.49480968858131485, "train_speed(iter/s)": 0.671887 }, { "epoch": 1.9789212116019024, "grad_norm": 3.5007569789886475, "learning_rate": 6.610221339971848e-05, "loss": 2.3450803756713867, "memory(GiB)": 72.85, "step": 46190, "token_acc": 0.49838187702265374, "train_speed(iter/s)": 0.671869 }, { "epoch": 1.9791354269311512, "grad_norm": 4.62169885635376, "learning_rate": 6.60958420121134e-05, "loss": 2.6953468322753906, "memory(GiB)": 72.85, "step": 46195, "token_acc": 0.5, "train_speed(iter/s)": 0.67188 }, { "epoch": 1.9793496422604, "grad_norm": 3.6999576091766357, "learning_rate": 6.608947033291706e-05, "loss": 2.830837059020996, "memory(GiB)": 72.85, "step": 46200, "token_acc": 0.4738675958188153, "train_speed(iter/s)": 0.671892 }, { "epoch": 1.9795638575896493, "grad_norm": 4.520834445953369, "learning_rate": 6.608309836224486e-05, "loss": 2.0889698028564454, "memory(GiB)": 72.85, "step": 46205, "token_acc": 0.5272727272727272, "train_speed(iter/s)": 0.671882 }, { "epoch": 1.979778072918898, "grad_norm": 4.80844259262085, "learning_rate": 6.607672610021225e-05, "loss": 2.2593177795410155, "memory(GiB)": 72.85, "step": 46210, "token_acc": 0.5230769230769231, "train_speed(iter/s)": 0.671884 }, { "epoch": 1.9799922882481469, "grad_norm": 4.137812614440918, "learning_rate": 6.607035354693467e-05, "loss": 2.2817924499511717, "memory(GiB)": 72.85, "step": 46215, "token_acc": 0.5367647058823529, "train_speed(iter/s)": 0.671886 }, { "epoch": 1.9802065035773961, "grad_norm": 4.806305408477783, "learning_rate": 6.606398070252755e-05, "loss": 2.540335273742676, "memory(GiB)": 72.85, "step": 46220, "token_acc": 0.4852941176470588, "train_speed(iter/s)": 0.671872 }, { "epoch": 1.980420718906645, "grad_norm": 4.650110721588135, "learning_rate": 6.605760756710635e-05, "loss": 2.03678035736084, "memory(GiB)": 72.85, "step": 46225, "token_acc": 0.559322033898305, "train_speed(iter/s)": 0.671864 }, { "epoch": 1.9806349342358938, "grad_norm": 3.937122344970703, "learning_rate": 6.605123414078653e-05, "loss": 2.5044084548950196, "memory(GiB)": 72.85, "step": 46230, "token_acc": 0.4418604651162791, "train_speed(iter/s)": 0.671869 }, { "epoch": 1.980849149565143, "grad_norm": 5.077508449554443, "learning_rate": 6.604486042368355e-05, "loss": 2.41800537109375, "memory(GiB)": 72.85, "step": 46235, "token_acc": 0.45977011494252873, "train_speed(iter/s)": 0.671881 }, { "epoch": 1.9810633648943918, "grad_norm": 4.256247043609619, "learning_rate": 6.603848641591286e-05, "loss": 2.2574167251586914, "memory(GiB)": 72.85, "step": 46240, "token_acc": 0.5331010452961672, "train_speed(iter/s)": 0.671873 }, { "epoch": 1.9812775802236406, "grad_norm": 3.521064519882202, "learning_rate": 6.603211211758996e-05, "loss": 2.3214849472045898, "memory(GiB)": 72.85, "step": 46245, "token_acc": 0.4982698961937716, "train_speed(iter/s)": 0.671888 }, { "epoch": 1.98149179555289, "grad_norm": 5.111050605773926, "learning_rate": 6.602573752883032e-05, "loss": 2.4588184356689453, "memory(GiB)": 72.85, "step": 46250, "token_acc": 0.4753521126760563, "train_speed(iter/s)": 0.671873 }, { "epoch": 1.9817060108821387, "grad_norm": 4.165240287780762, "learning_rate": 6.601936264974937e-05, "loss": 2.236554718017578, "memory(GiB)": 72.85, "step": 46255, "token_acc": 0.5157232704402516, "train_speed(iter/s)": 0.671876 }, { "epoch": 1.9819202262113875, "grad_norm": 4.495955944061279, "learning_rate": 6.601298748046267e-05, "loss": 2.2446466445922852, "memory(GiB)": 72.85, "step": 46260, "token_acc": 0.5505226480836237, "train_speed(iter/s)": 0.671889 }, { "epoch": 1.9821344415406368, "grad_norm": 3.755387783050537, "learning_rate": 6.600661202108567e-05, "loss": 2.5181570053100586, "memory(GiB)": 72.85, "step": 46265, "token_acc": 0.45229681978798586, "train_speed(iter/s)": 0.671894 }, { "epoch": 1.9823486568698856, "grad_norm": 4.064822196960449, "learning_rate": 6.600023627173387e-05, "loss": 2.2250490188598633, "memory(GiB)": 72.85, "step": 46270, "token_acc": 0.5287769784172662, "train_speed(iter/s)": 0.671888 }, { "epoch": 1.9825628721991344, "grad_norm": 4.414491653442383, "learning_rate": 6.599386023252279e-05, "loss": 2.6822206497192385, "memory(GiB)": 72.85, "step": 46275, "token_acc": 0.44, "train_speed(iter/s)": 0.671888 }, { "epoch": 1.9827770875283837, "grad_norm": 4.638831615447998, "learning_rate": 6.598748390356794e-05, "loss": 2.2727228164672852, "memory(GiB)": 72.85, "step": 46280, "token_acc": 0.547945205479452, "train_speed(iter/s)": 0.671877 }, { "epoch": 1.9829913028576325, "grad_norm": 3.5812997817993164, "learning_rate": 6.598110728498481e-05, "loss": 2.0987361907958983, "memory(GiB)": 72.85, "step": 46285, "token_acc": 0.5375494071146245, "train_speed(iter/s)": 0.671865 }, { "epoch": 1.9832055181868813, "grad_norm": 3.7733640670776367, "learning_rate": 6.597473037688892e-05, "loss": 2.6118806838989257, "memory(GiB)": 72.85, "step": 46290, "token_acc": 0.47988505747126436, "train_speed(iter/s)": 0.671855 }, { "epoch": 1.9834197335161305, "grad_norm": 3.6561193466186523, "learning_rate": 6.596835317939582e-05, "loss": 2.2502145767211914, "memory(GiB)": 72.85, "step": 46295, "token_acc": 0.48427672955974843, "train_speed(iter/s)": 0.67186 }, { "epoch": 1.9836339488453794, "grad_norm": 3.2444958686828613, "learning_rate": 6.596197569262102e-05, "loss": 2.4337005615234375, "memory(GiB)": 72.85, "step": 46300, "token_acc": 0.512280701754386, "train_speed(iter/s)": 0.671872 }, { "epoch": 1.9838481641746282, "grad_norm": 3.818897247314453, "learning_rate": 6.595559791668005e-05, "loss": 2.5356155395507813, "memory(GiB)": 72.85, "step": 46305, "token_acc": 0.45544554455445546, "train_speed(iter/s)": 0.671858 }, { "epoch": 1.9840623795038774, "grad_norm": 5.0805535316467285, "learning_rate": 6.594921985168846e-05, "loss": 2.5051061630249025, "memory(GiB)": 72.85, "step": 46310, "token_acc": 0.45918367346938777, "train_speed(iter/s)": 0.67186 }, { "epoch": 1.9842765948331262, "grad_norm": 5.458848476409912, "learning_rate": 6.594284149776179e-05, "loss": 2.245641326904297, "memory(GiB)": 72.85, "step": 46315, "token_acc": 0.5096774193548387, "train_speed(iter/s)": 0.671866 }, { "epoch": 1.9844908101623753, "grad_norm": 4.389017581939697, "learning_rate": 6.593646285501559e-05, "loss": 2.542979431152344, "memory(GiB)": 72.85, "step": 46320, "token_acc": 0.4838709677419355, "train_speed(iter/s)": 0.671866 }, { "epoch": 1.9847050254916243, "grad_norm": 3.9949185848236084, "learning_rate": 6.593008392356542e-05, "loss": 2.4979543685913086, "memory(GiB)": 72.85, "step": 46325, "token_acc": 0.44787644787644787, "train_speed(iter/s)": 0.671867 }, { "epoch": 1.9849192408208731, "grad_norm": 3.5655648708343506, "learning_rate": 6.592370470352683e-05, "loss": 2.4709041595458983, "memory(GiB)": 72.85, "step": 46330, "token_acc": 0.4824561403508772, "train_speed(iter/s)": 0.671876 }, { "epoch": 1.9851334561501222, "grad_norm": 5.54287052154541, "learning_rate": 6.591732519501539e-05, "loss": 2.294544219970703, "memory(GiB)": 72.85, "step": 46335, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.671896 }, { "epoch": 1.9853476714793712, "grad_norm": 3.6643853187561035, "learning_rate": 6.591094539814669e-05, "loss": 2.2771190643310546, "memory(GiB)": 72.85, "step": 46340, "token_acc": 0.5228070175438596, "train_speed(iter/s)": 0.671912 }, { "epoch": 1.98556188680862, "grad_norm": 5.457579135894775, "learning_rate": 6.590456531303626e-05, "loss": 2.6141077041625977, "memory(GiB)": 72.85, "step": 46345, "token_acc": 0.46439628482972134, "train_speed(iter/s)": 0.671922 }, { "epoch": 1.985776102137869, "grad_norm": 3.255094528198242, "learning_rate": 6.589818493979973e-05, "loss": 2.3942455291748046, "memory(GiB)": 72.85, "step": 46350, "token_acc": 0.4608433734939759, "train_speed(iter/s)": 0.671945 }, { "epoch": 1.985990317467118, "grad_norm": 5.454539775848389, "learning_rate": 6.589180427855266e-05, "loss": 2.4122825622558595, "memory(GiB)": 72.85, "step": 46355, "token_acc": 0.49473684210526314, "train_speed(iter/s)": 0.671936 }, { "epoch": 1.9862045327963669, "grad_norm": 4.568434715270996, "learning_rate": 6.588542332941065e-05, "loss": 2.4101367950439454, "memory(GiB)": 72.85, "step": 46360, "token_acc": 0.4820846905537459, "train_speed(iter/s)": 0.671944 }, { "epoch": 1.986418748125616, "grad_norm": 3.523280620574951, "learning_rate": 6.587904209248928e-05, "loss": 2.555611801147461, "memory(GiB)": 72.85, "step": 46365, "token_acc": 0.4740061162079511, "train_speed(iter/s)": 0.671949 }, { "epoch": 1.986632963454865, "grad_norm": 3.8985862731933594, "learning_rate": 6.587266056790418e-05, "loss": 2.4729881286621094, "memory(GiB)": 72.85, "step": 46370, "token_acc": 0.46875, "train_speed(iter/s)": 0.671947 }, { "epoch": 1.9868471787841138, "grad_norm": 5.733015537261963, "learning_rate": 6.586627875577094e-05, "loss": 2.423452377319336, "memory(GiB)": 72.85, "step": 46375, "token_acc": 0.5164473684210527, "train_speed(iter/s)": 0.671938 }, { "epoch": 1.9870613941133628, "grad_norm": 4.790804386138916, "learning_rate": 6.58598966562052e-05, "loss": 2.6595767974853515, "memory(GiB)": 72.85, "step": 46380, "token_acc": 0.46616541353383456, "train_speed(iter/s)": 0.671947 }, { "epoch": 1.9872756094426118, "grad_norm": 3.0832066535949707, "learning_rate": 6.585351426932253e-05, "loss": 2.3385873794555665, "memory(GiB)": 72.85, "step": 46385, "token_acc": 0.4941860465116279, "train_speed(iter/s)": 0.67194 }, { "epoch": 1.9874898247718606, "grad_norm": 6.611907005310059, "learning_rate": 6.58471315952386e-05, "loss": 2.3477619171142576, "memory(GiB)": 72.85, "step": 46390, "token_acc": 0.508130081300813, "train_speed(iter/s)": 0.671958 }, { "epoch": 1.9877040401011097, "grad_norm": 5.341731071472168, "learning_rate": 6.584074863406898e-05, "loss": 2.3504318237304687, "memory(GiB)": 72.85, "step": 46395, "token_acc": 0.49146757679180886, "train_speed(iter/s)": 0.671947 }, { "epoch": 1.9879182554303587, "grad_norm": 5.049339771270752, "learning_rate": 6.583436538592935e-05, "loss": 2.0782773971557615, "memory(GiB)": 72.85, "step": 46400, "token_acc": 0.5451807228915663, "train_speed(iter/s)": 0.671943 }, { "epoch": 1.9881324707596075, "grad_norm": 3.6435739994049072, "learning_rate": 6.582798185093535e-05, "loss": 2.153999900817871, "memory(GiB)": 72.85, "step": 46405, "token_acc": 0.5246478873239436, "train_speed(iter/s)": 0.67195 }, { "epoch": 1.9883466860888566, "grad_norm": 5.035109043121338, "learning_rate": 6.582159802920261e-05, "loss": 2.5302797317504884, "memory(GiB)": 72.85, "step": 46410, "token_acc": 0.4930555555555556, "train_speed(iter/s)": 0.671945 }, { "epoch": 1.9885609014181056, "grad_norm": 4.447586536407471, "learning_rate": 6.581521392084677e-05, "loss": 2.275531768798828, "memory(GiB)": 72.85, "step": 46415, "token_acc": 0.4934640522875817, "train_speed(iter/s)": 0.671949 }, { "epoch": 1.9887751167473544, "grad_norm": 4.225378036499023, "learning_rate": 6.58088295259835e-05, "loss": 2.6456972122192384, "memory(GiB)": 72.85, "step": 46420, "token_acc": 0.4147058823529412, "train_speed(iter/s)": 0.671966 }, { "epoch": 1.9889893320766034, "grad_norm": 4.807002067565918, "learning_rate": 6.580244484472845e-05, "loss": 2.3309810638427733, "memory(GiB)": 72.85, "step": 46425, "token_acc": 0.5357142857142857, "train_speed(iter/s)": 0.671964 }, { "epoch": 1.9892035474058525, "grad_norm": 5.7258710861206055, "learning_rate": 6.579605987719728e-05, "loss": 2.259547805786133, "memory(GiB)": 72.85, "step": 46430, "token_acc": 0.5426356589147286, "train_speed(iter/s)": 0.671957 }, { "epoch": 1.9894177627351013, "grad_norm": 3.518317699432373, "learning_rate": 6.578967462350569e-05, "loss": 2.611531066894531, "memory(GiB)": 72.85, "step": 46435, "token_acc": 0.4276094276094276, "train_speed(iter/s)": 0.671959 }, { "epoch": 1.9896319780643503, "grad_norm": 5.302667140960693, "learning_rate": 6.578328908376932e-05, "loss": 2.408486557006836, "memory(GiB)": 72.85, "step": 46440, "token_acc": 0.4729241877256318, "train_speed(iter/s)": 0.67197 }, { "epoch": 1.9898461933935994, "grad_norm": 3.5558254718780518, "learning_rate": 6.577690325810385e-05, "loss": 2.6235679626464843, "memory(GiB)": 72.85, "step": 46445, "token_acc": 0.4471299093655589, "train_speed(iter/s)": 0.671957 }, { "epoch": 1.9900604087228482, "grad_norm": 4.578438758850098, "learning_rate": 6.577051714662499e-05, "loss": 2.3455297470092775, "memory(GiB)": 72.85, "step": 46450, "token_acc": 0.46037735849056605, "train_speed(iter/s)": 0.671957 }, { "epoch": 1.9902746240520972, "grad_norm": 4.898419380187988, "learning_rate": 6.576413074944842e-05, "loss": 2.375691604614258, "memory(GiB)": 72.85, "step": 46455, "token_acc": 0.48148148148148145, "train_speed(iter/s)": 0.671955 }, { "epoch": 1.9904888393813462, "grad_norm": 3.6300950050354004, "learning_rate": 6.575774406668983e-05, "loss": 2.4030925750732424, "memory(GiB)": 72.85, "step": 46460, "token_acc": 0.47041420118343197, "train_speed(iter/s)": 0.67196 }, { "epoch": 1.990703054710595, "grad_norm": 4.429596424102783, "learning_rate": 6.575135709846492e-05, "loss": 2.2878551483154297, "memory(GiB)": 72.85, "step": 46465, "token_acc": 0.5399239543726235, "train_speed(iter/s)": 0.671949 }, { "epoch": 1.990917270039844, "grad_norm": 5.637802600860596, "learning_rate": 6.57449698448894e-05, "loss": 2.2124420166015626, "memory(GiB)": 72.85, "step": 46470, "token_acc": 0.4980237154150198, "train_speed(iter/s)": 0.671947 }, { "epoch": 1.9911314853690931, "grad_norm": 4.07767391204834, "learning_rate": 6.573858230607899e-05, "loss": 2.0614831924438475, "memory(GiB)": 72.85, "step": 46475, "token_acc": 0.5225563909774437, "train_speed(iter/s)": 0.67195 }, { "epoch": 1.991345700698342, "grad_norm": 3.479156255722046, "learning_rate": 6.57321944821494e-05, "loss": 2.2643997192382814, "memory(GiB)": 72.85, "step": 46480, "token_acc": 0.5183673469387755, "train_speed(iter/s)": 0.671953 }, { "epoch": 1.991559916027591, "grad_norm": 3.9148871898651123, "learning_rate": 6.572580637321635e-05, "loss": 2.1112594604492188, "memory(GiB)": 72.85, "step": 46485, "token_acc": 0.5382059800664452, "train_speed(iter/s)": 0.671936 }, { "epoch": 1.99177413135684, "grad_norm": 4.849170207977295, "learning_rate": 6.571941797939555e-05, "loss": 2.574970817565918, "memory(GiB)": 72.85, "step": 46490, "token_acc": 0.45987654320987653, "train_speed(iter/s)": 0.67194 }, { "epoch": 1.9919883466860888, "grad_norm": 4.294142246246338, "learning_rate": 6.571302930080275e-05, "loss": 2.181289863586426, "memory(GiB)": 72.85, "step": 46495, "token_acc": 0.5811320754716981, "train_speed(iter/s)": 0.671956 }, { "epoch": 1.9922025620153379, "grad_norm": 4.428546905517578, "learning_rate": 6.570664033755369e-05, "loss": 2.4136564254760744, "memory(GiB)": 72.85, "step": 46500, "token_acc": 0.47096774193548385, "train_speed(iter/s)": 0.671957 }, { "epoch": 1.9922025620153379, "eval_loss": 2.112295150756836, "eval_runtime": 17.1184, "eval_samples_per_second": 5.842, "eval_steps_per_second": 5.842, "eval_token_acc": 0.4722564734895191, "step": 46500 }, { "epoch": 1.9924167773445869, "grad_norm": 4.472082614898682, "learning_rate": 6.57002510897641e-05, "loss": 2.3691396713256836, "memory(GiB)": 72.85, "step": 46505, "token_acc": 0.4829443447037702, "train_speed(iter/s)": 0.671777 }, { "epoch": 1.9926309926738357, "grad_norm": 5.294619560241699, "learning_rate": 6.569386155754975e-05, "loss": 2.2648784637451174, "memory(GiB)": 72.85, "step": 46510, "token_acc": 0.5179640718562875, "train_speed(iter/s)": 0.671767 }, { "epoch": 1.9928452080030847, "grad_norm": 3.893764019012451, "learning_rate": 6.568747174102635e-05, "loss": 2.4535701751708983, "memory(GiB)": 72.85, "step": 46515, "token_acc": 0.4728434504792332, "train_speed(iter/s)": 0.671763 }, { "epoch": 1.9930594233323338, "grad_norm": 4.3068413734436035, "learning_rate": 6.56810816403097e-05, "loss": 2.2222930908203127, "memory(GiB)": 72.85, "step": 46520, "token_acc": 0.5466666666666666, "train_speed(iter/s)": 0.671776 }, { "epoch": 1.9932736386615826, "grad_norm": 3.8245692253112793, "learning_rate": 6.567469125551555e-05, "loss": 2.5304216384887694, "memory(GiB)": 72.85, "step": 46525, "token_acc": 0.4564459930313589, "train_speed(iter/s)": 0.67177 }, { "epoch": 1.9934878539908316, "grad_norm": 3.7223544120788574, "learning_rate": 6.566830058675966e-05, "loss": 2.307107925415039, "memory(GiB)": 72.85, "step": 46530, "token_acc": 0.4797507788161994, "train_speed(iter/s)": 0.67177 }, { "epoch": 1.9937020693200806, "grad_norm": 4.145549774169922, "learning_rate": 6.566190963415781e-05, "loss": 2.5025398254394533, "memory(GiB)": 72.85, "step": 46535, "token_acc": 0.45364238410596025, "train_speed(iter/s)": 0.671774 }, { "epoch": 1.9939162846493295, "grad_norm": 4.3250861167907715, "learning_rate": 6.565551839782577e-05, "loss": 2.3467376708984373, "memory(GiB)": 72.85, "step": 46540, "token_acc": 0.4735099337748344, "train_speed(iter/s)": 0.671785 }, { "epoch": 1.9941304999785785, "grad_norm": 5.5118088722229, "learning_rate": 6.564912687787933e-05, "loss": 2.4280200958251954, "memory(GiB)": 72.85, "step": 46545, "token_acc": 0.4679245283018868, "train_speed(iter/s)": 0.671777 }, { "epoch": 1.9943447153078275, "grad_norm": 5.6004533767700195, "learning_rate": 6.564273507443427e-05, "loss": 2.412751388549805, "memory(GiB)": 72.85, "step": 46550, "token_acc": 0.4823943661971831, "train_speed(iter/s)": 0.671768 }, { "epoch": 1.9945589306370763, "grad_norm": 4.723461151123047, "learning_rate": 6.56363429876064e-05, "loss": 2.3279756546020507, "memory(GiB)": 72.85, "step": 46555, "token_acc": 0.4944237918215613, "train_speed(iter/s)": 0.67177 }, { "epoch": 1.9947731459663254, "grad_norm": 4.525100231170654, "learning_rate": 6.56299506175115e-05, "loss": 2.5622831344604493, "memory(GiB)": 72.85, "step": 46560, "token_acc": 0.4965753424657534, "train_speed(iter/s)": 0.671783 }, { "epoch": 1.9949873612955744, "grad_norm": 5.018245697021484, "learning_rate": 6.562355796426539e-05, "loss": 2.276577949523926, "memory(GiB)": 72.85, "step": 46565, "token_acc": 0.4716981132075472, "train_speed(iter/s)": 0.671773 }, { "epoch": 1.9952015766248232, "grad_norm": 3.560544967651367, "learning_rate": 6.561716502798388e-05, "loss": 2.5235923767089843, "memory(GiB)": 72.85, "step": 46570, "token_acc": 0.4501510574018127, "train_speed(iter/s)": 0.671762 }, { "epoch": 1.9954157919540723, "grad_norm": 4.725076198577881, "learning_rate": 6.561077180878274e-05, "loss": 2.2343795776367186, "memory(GiB)": 72.85, "step": 46575, "token_acc": 0.5175097276264592, "train_speed(iter/s)": 0.671765 }, { "epoch": 1.9956300072833213, "grad_norm": 5.230018615722656, "learning_rate": 6.560565702979757e-05, "loss": 2.8533357620239257, "memory(GiB)": 72.85, "step": 46580, "token_acc": 0.48046875, "train_speed(iter/s)": 0.67178 }, { "epoch": 1.99584422261257, "grad_norm": 3.723695993423462, "learning_rate": 6.559926330163305e-05, "loss": 2.3293651580810546, "memory(GiB)": 72.85, "step": 46585, "token_acc": 0.4674922600619195, "train_speed(iter/s)": 0.671793 }, { "epoch": 1.9960584379418191, "grad_norm": 4.161057949066162, "learning_rate": 6.559286929087324e-05, "loss": 2.141961669921875, "memory(GiB)": 72.85, "step": 46590, "token_acc": 0.5169230769230769, "train_speed(iter/s)": 0.671795 }, { "epoch": 1.9962726532710682, "grad_norm": 4.065131664276123, "learning_rate": 6.558647499763399e-05, "loss": 2.518161392211914, "memory(GiB)": 72.85, "step": 46595, "token_acc": 0.4584837545126354, "train_speed(iter/s)": 0.671781 }, { "epoch": 1.996486868600317, "grad_norm": 4.4415411949157715, "learning_rate": 6.55800804220311e-05, "loss": 2.230995750427246, "memory(GiB)": 72.85, "step": 46600, "token_acc": 0.4967741935483871, "train_speed(iter/s)": 0.671801 }, { "epoch": 1.996701083929566, "grad_norm": 3.796996593475342, "learning_rate": 6.557368556418045e-05, "loss": 2.2597635269165037, "memory(GiB)": 72.85, "step": 46605, "token_acc": 0.5050847457627119, "train_speed(iter/s)": 0.671814 }, { "epoch": 1.996915299258815, "grad_norm": 5.307206153869629, "learning_rate": 6.556729042419789e-05, "loss": 2.1007280349731445, "memory(GiB)": 72.85, "step": 46610, "token_acc": 0.5393939393939394, "train_speed(iter/s)": 0.671825 }, { "epoch": 1.9971295145880639, "grad_norm": 4.411706447601318, "learning_rate": 6.556089500219924e-05, "loss": 2.319708061218262, "memory(GiB)": 72.85, "step": 46615, "token_acc": 0.4797297297297297, "train_speed(iter/s)": 0.671805 }, { "epoch": 1.997343729917313, "grad_norm": 4.631034851074219, "learning_rate": 6.555449929830038e-05, "loss": 2.105015182495117, "memory(GiB)": 72.85, "step": 46620, "token_acc": 0.5296167247386759, "train_speed(iter/s)": 0.671802 }, { "epoch": 1.997557945246562, "grad_norm": 5.471512317657471, "learning_rate": 6.554810331261719e-05, "loss": 2.3778217315673826, "memory(GiB)": 72.85, "step": 46625, "token_acc": 0.4862068965517241, "train_speed(iter/s)": 0.671815 }, { "epoch": 1.9977721605758108, "grad_norm": 3.66054105758667, "learning_rate": 6.554170704526552e-05, "loss": 2.2410144805908203, "memory(GiB)": 72.85, "step": 46630, "token_acc": 0.5341365461847389, "train_speed(iter/s)": 0.671829 }, { "epoch": 1.9979863759050598, "grad_norm": 4.836370468139648, "learning_rate": 6.553531049636124e-05, "loss": 2.3468738555908204, "memory(GiB)": 72.85, "step": 46635, "token_acc": 0.49469964664310956, "train_speed(iter/s)": 0.671831 }, { "epoch": 1.9982005912343088, "grad_norm": 4.30033016204834, "learning_rate": 6.552891366602023e-05, "loss": 2.2724842071533202, "memory(GiB)": 72.85, "step": 46640, "token_acc": 0.5198412698412699, "train_speed(iter/s)": 0.671829 }, { "epoch": 1.9984148065635576, "grad_norm": 4.687586784362793, "learning_rate": 6.55225165543584e-05, "loss": 2.5137155532836912, "memory(GiB)": 72.85, "step": 46645, "token_acc": 0.48355263157894735, "train_speed(iter/s)": 0.671827 }, { "epoch": 1.9986290218928067, "grad_norm": 3.897671937942505, "learning_rate": 6.55161191614916e-05, "loss": 2.2242937088012695, "memory(GiB)": 72.85, "step": 46650, "token_acc": 0.532258064516129, "train_speed(iter/s)": 0.671815 }, { "epoch": 1.9988432372220557, "grad_norm": 4.032737731933594, "learning_rate": 6.550972148753576e-05, "loss": 2.4258426666259765, "memory(GiB)": 72.85, "step": 46655, "token_acc": 0.4652567975830816, "train_speed(iter/s)": 0.671811 }, { "epoch": 1.9990574525513045, "grad_norm": 4.03397798538208, "learning_rate": 6.550332353260676e-05, "loss": 1.9899326324462892, "memory(GiB)": 72.85, "step": 46660, "token_acc": 0.54421768707483, "train_speed(iter/s)": 0.671809 }, { "epoch": 1.9992716678805535, "grad_norm": 4.62186336517334, "learning_rate": 6.549692529682053e-05, "loss": 2.6773590087890624, "memory(GiB)": 72.85, "step": 46665, "token_acc": 0.46534653465346537, "train_speed(iter/s)": 0.671812 }, { "epoch": 1.9994858832098026, "grad_norm": 4.4983229637146, "learning_rate": 6.549052678029293e-05, "loss": 2.418838119506836, "memory(GiB)": 72.85, "step": 46670, "token_acc": 0.5406360424028268, "train_speed(iter/s)": 0.671825 }, { "epoch": 1.9997000985390514, "grad_norm": 2.9077866077423096, "learning_rate": 6.548412798313993e-05, "loss": 2.315627670288086, "memory(GiB)": 72.85, "step": 46675, "token_acc": 0.4668769716088328, "train_speed(iter/s)": 0.671838 }, { "epoch": 1.9999143138683004, "grad_norm": 9.655109405517578, "learning_rate": 6.547772890547742e-05, "loss": 2.7610141754150392, "memory(GiB)": 72.85, "step": 46680, "token_acc": 0.43416370106761565, "train_speed(iter/s)": 0.671846 }, { "epoch": 2.0001285291975495, "grad_norm": 3.9948344230651855, "learning_rate": 6.547132954742132e-05, "loss": 2.1156904220581056, "memory(GiB)": 72.85, "step": 46685, "token_acc": 0.5261044176706827, "train_speed(iter/s)": 0.671863 }, { "epoch": 2.0003427445267983, "grad_norm": 4.3075690269470215, "learning_rate": 6.54649299090876e-05, "loss": 2.5162818908691404, "memory(GiB)": 72.85, "step": 46690, "token_acc": 0.45980707395498394, "train_speed(iter/s)": 0.671851 }, { "epoch": 2.000556959856047, "grad_norm": 5.316599369049072, "learning_rate": 6.545852999059214e-05, "loss": 2.2576320648193358, "memory(GiB)": 72.85, "step": 46695, "token_acc": 0.4945054945054945, "train_speed(iter/s)": 0.671844 }, { "epoch": 2.0007711751852963, "grad_norm": 4.653919219970703, "learning_rate": 6.545212979205091e-05, "loss": 2.3238971710205076, "memory(GiB)": 72.85, "step": 46700, "token_acc": 0.4965986394557823, "train_speed(iter/s)": 0.671853 }, { "epoch": 2.000985390514545, "grad_norm": 3.769685745239258, "learning_rate": 6.544572931357986e-05, "loss": 2.2675561904907227, "memory(GiB)": 72.85, "step": 46705, "token_acc": 0.5246376811594203, "train_speed(iter/s)": 0.671857 }, { "epoch": 2.001199605843794, "grad_norm": 3.6111104488372803, "learning_rate": 6.543932855529493e-05, "loss": 2.1648157119750975, "memory(GiB)": 72.85, "step": 46710, "token_acc": 0.5532646048109966, "train_speed(iter/s)": 0.671847 }, { "epoch": 2.0014138211730432, "grad_norm": 4.320132732391357, "learning_rate": 6.543292751731208e-05, "loss": 2.334084892272949, "memory(GiB)": 72.85, "step": 46715, "token_acc": 0.5094339622641509, "train_speed(iter/s)": 0.671848 }, { "epoch": 2.001628036502292, "grad_norm": 3.6402504444122314, "learning_rate": 6.542652619974729e-05, "loss": 2.530701446533203, "memory(GiB)": 72.85, "step": 46720, "token_acc": 0.49823321554770317, "train_speed(iter/s)": 0.671859 }, { "epoch": 2.001842251831541, "grad_norm": 4.426703453063965, "learning_rate": 6.54201246027165e-05, "loss": 2.048204040527344, "memory(GiB)": 72.85, "step": 46725, "token_acc": 0.5575539568345323, "train_speed(iter/s)": 0.671869 }, { "epoch": 2.00205646716079, "grad_norm": 3.75313138961792, "learning_rate": 6.541372272633567e-05, "loss": 2.1353330612182617, "memory(GiB)": 72.85, "step": 46730, "token_acc": 0.5169230769230769, "train_speed(iter/s)": 0.671875 }, { "epoch": 2.002270682490039, "grad_norm": 3.7432987689971924, "learning_rate": 6.540732057072081e-05, "loss": 2.2639739990234373, "memory(GiB)": 72.85, "step": 46735, "token_acc": 0.48466257668711654, "train_speed(iter/s)": 0.671871 }, { "epoch": 2.0024848978192877, "grad_norm": 5.148110389709473, "learning_rate": 6.540091813598787e-05, "loss": 2.258126449584961, "memory(GiB)": 72.85, "step": 46740, "token_acc": 0.4809688581314879, "train_speed(iter/s)": 0.671884 }, { "epoch": 2.002699113148537, "grad_norm": 4.326194763183594, "learning_rate": 6.539451542225286e-05, "loss": 2.248413848876953, "memory(GiB)": 72.85, "step": 46745, "token_acc": 0.5387596899224806, "train_speed(iter/s)": 0.671891 }, { "epoch": 2.002913328477786, "grad_norm": 5.702746391296387, "learning_rate": 6.538811242963176e-05, "loss": 2.0079025268554687, "memory(GiB)": 72.85, "step": 46750, "token_acc": 0.5189003436426117, "train_speed(iter/s)": 0.671882 }, { "epoch": 2.0031275438070346, "grad_norm": 4.351986885070801, "learning_rate": 6.538170915824056e-05, "loss": 2.1679033279418944, "memory(GiB)": 72.85, "step": 46755, "token_acc": 0.5320754716981132, "train_speed(iter/s)": 0.671889 }, { "epoch": 2.003341759136284, "grad_norm": 4.391238212585449, "learning_rate": 6.537530560819527e-05, "loss": 2.618434524536133, "memory(GiB)": 72.85, "step": 46760, "token_acc": 0.4673913043478261, "train_speed(iter/s)": 0.671886 }, { "epoch": 2.0035559744655327, "grad_norm": 3.591583728790283, "learning_rate": 6.536890177961192e-05, "loss": 2.3217138290405273, "memory(GiB)": 72.85, "step": 46765, "token_acc": 0.5146579804560261, "train_speed(iter/s)": 0.671888 }, { "epoch": 2.0037701897947815, "grad_norm": 4.924428939819336, "learning_rate": 6.536249767260647e-05, "loss": 2.0255306243896483, "memory(GiB)": 72.85, "step": 46770, "token_acc": 0.5179282868525896, "train_speed(iter/s)": 0.671876 }, { "epoch": 2.0039844051240308, "grad_norm": 3.2399179935455322, "learning_rate": 6.535609328729498e-05, "loss": 2.321396064758301, "memory(GiB)": 72.85, "step": 46775, "token_acc": 0.527027027027027, "train_speed(iter/s)": 0.671878 }, { "epoch": 2.0041986204532796, "grad_norm": 5.806821823120117, "learning_rate": 6.534968862379345e-05, "loss": 2.2440208435058593, "memory(GiB)": 72.85, "step": 46780, "token_acc": 0.5125448028673835, "train_speed(iter/s)": 0.671868 }, { "epoch": 2.0044128357825284, "grad_norm": 3.342529773712158, "learning_rate": 6.534328368221791e-05, "loss": 2.3989355087280275, "memory(GiB)": 72.85, "step": 46785, "token_acc": 0.49014084507042255, "train_speed(iter/s)": 0.671877 }, { "epoch": 2.0046270511117776, "grad_norm": 3.9345943927764893, "learning_rate": 6.533687846268439e-05, "loss": 2.122242736816406, "memory(GiB)": 72.85, "step": 46790, "token_acc": 0.4910394265232975, "train_speed(iter/s)": 0.671902 }, { "epoch": 2.0048412664410264, "grad_norm": 4.600401878356934, "learning_rate": 6.533047296530894e-05, "loss": 2.238173484802246, "memory(GiB)": 72.85, "step": 46795, "token_acc": 0.5167785234899329, "train_speed(iter/s)": 0.671913 }, { "epoch": 2.0050554817702753, "grad_norm": 4.761384963989258, "learning_rate": 6.532406719020758e-05, "loss": 2.1729440689086914, "memory(GiB)": 72.85, "step": 46800, "token_acc": 0.4937106918238994, "train_speed(iter/s)": 0.671921 }, { "epoch": 2.0052696970995245, "grad_norm": 5.2957353591918945, "learning_rate": 6.531766113749638e-05, "loss": 2.2822431564331054, "memory(GiB)": 72.85, "step": 46805, "token_acc": 0.49570200573065903, "train_speed(iter/s)": 0.671914 }, { "epoch": 2.0054839124287733, "grad_norm": 5.857024192810059, "learning_rate": 6.531125480729137e-05, "loss": 2.4242855072021485, "memory(GiB)": 72.85, "step": 46810, "token_acc": 0.5034246575342466, "train_speed(iter/s)": 0.671914 }, { "epoch": 2.0056981277580226, "grad_norm": 5.785645008087158, "learning_rate": 6.530484819970861e-05, "loss": 2.1595476150512694, "memory(GiB)": 72.85, "step": 46815, "token_acc": 0.5155038759689923, "train_speed(iter/s)": 0.671932 }, { "epoch": 2.0059123430872714, "grad_norm": 4.372957706451416, "learning_rate": 6.529844131486418e-05, "loss": 2.3321414947509767, "memory(GiB)": 72.85, "step": 46820, "token_acc": 0.5059880239520959, "train_speed(iter/s)": 0.671926 }, { "epoch": 2.00612655841652, "grad_norm": 4.6615891456604, "learning_rate": 6.529203415287413e-05, "loss": 2.2661033630371095, "memory(GiB)": 72.85, "step": 46825, "token_acc": 0.5171232876712328, "train_speed(iter/s)": 0.671913 }, { "epoch": 2.0063407737457695, "grad_norm": 3.8034188747406006, "learning_rate": 6.528562671385453e-05, "loss": 2.383292007446289, "memory(GiB)": 72.85, "step": 46830, "token_acc": 0.4696485623003195, "train_speed(iter/s)": 0.671914 }, { "epoch": 2.0065549890750183, "grad_norm": 4.267506122589111, "learning_rate": 6.527921899792146e-05, "loss": 2.3848121643066404, "memory(GiB)": 72.85, "step": 46835, "token_acc": 0.5288461538461539, "train_speed(iter/s)": 0.671915 }, { "epoch": 2.006769204404267, "grad_norm": 3.4899744987487793, "learning_rate": 6.527281100519103e-05, "loss": 2.3523330688476562, "memory(GiB)": 72.85, "step": 46840, "token_acc": 0.5067114093959731, "train_speed(iter/s)": 0.671913 }, { "epoch": 2.0069834197335163, "grad_norm": 4.341711044311523, "learning_rate": 6.526640273577928e-05, "loss": 2.084621810913086, "memory(GiB)": 72.85, "step": 46845, "token_acc": 0.5639097744360902, "train_speed(iter/s)": 0.671919 }, { "epoch": 2.007197635062765, "grad_norm": 4.104999542236328, "learning_rate": 6.525999418980234e-05, "loss": 1.7994630813598633, "memory(GiB)": 72.85, "step": 46850, "token_acc": 0.5734265734265734, "train_speed(iter/s)": 0.671905 }, { "epoch": 2.007411850392014, "grad_norm": 7.0870256423950195, "learning_rate": 6.525358536737627e-05, "loss": 2.3547359466552735, "memory(GiB)": 72.85, "step": 46855, "token_acc": 0.5189003436426117, "train_speed(iter/s)": 0.671898 }, { "epoch": 2.0076260657212632, "grad_norm": 3.6290879249572754, "learning_rate": 6.52471762686172e-05, "loss": 2.3082164764404296, "memory(GiB)": 72.85, "step": 46860, "token_acc": 0.5341614906832298, "train_speed(iter/s)": 0.671912 }, { "epoch": 2.007840281050512, "grad_norm": 4.372542858123779, "learning_rate": 6.524076689364122e-05, "loss": 2.065317153930664, "memory(GiB)": 72.85, "step": 46865, "token_acc": 0.5162337662337663, "train_speed(iter/s)": 0.671926 }, { "epoch": 2.008054496379761, "grad_norm": 4.796763896942139, "learning_rate": 6.523435724256448e-05, "loss": 2.0407697677612306, "memory(GiB)": 72.85, "step": 46870, "token_acc": 0.5382059800664452, "train_speed(iter/s)": 0.671934 }, { "epoch": 2.00826871170901, "grad_norm": 6.357532501220703, "learning_rate": 6.522794731550304e-05, "loss": 2.446548080444336, "memory(GiB)": 72.85, "step": 46875, "token_acc": 0.44970414201183434, "train_speed(iter/s)": 0.671924 }, { "epoch": 2.008482927038259, "grad_norm": 6.650539398193359, "learning_rate": 6.522153711257306e-05, "loss": 2.525452423095703, "memory(GiB)": 72.85, "step": 46880, "token_acc": 0.48880597014925375, "train_speed(iter/s)": 0.67192 }, { "epoch": 2.0086971423675077, "grad_norm": 3.8267812728881836, "learning_rate": 6.521512663389065e-05, "loss": 2.190700149536133, "memory(GiB)": 72.85, "step": 46885, "token_acc": 0.5141955835962145, "train_speed(iter/s)": 0.671928 }, { "epoch": 2.008911357696757, "grad_norm": 4.5943708419799805, "learning_rate": 6.520871587957193e-05, "loss": 2.174306297302246, "memory(GiB)": 72.85, "step": 46890, "token_acc": 0.5033783783783784, "train_speed(iter/s)": 0.67194 }, { "epoch": 2.009125573026006, "grad_norm": 4.560358047485352, "learning_rate": 6.52023048497331e-05, "loss": 2.0586406707763674, "memory(GiB)": 72.85, "step": 46895, "token_acc": 0.5114503816793893, "train_speed(iter/s)": 0.67194 }, { "epoch": 2.0093397883552546, "grad_norm": 4.317680835723877, "learning_rate": 6.519589354449022e-05, "loss": 2.4801250457763673, "memory(GiB)": 72.85, "step": 46900, "token_acc": 0.5138888888888888, "train_speed(iter/s)": 0.671937 }, { "epoch": 2.009554003684504, "grad_norm": 4.142745018005371, "learning_rate": 6.518948196395948e-05, "loss": 2.109526824951172, "memory(GiB)": 72.85, "step": 46905, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.671949 }, { "epoch": 2.0097682190137527, "grad_norm": 3.963341236114502, "learning_rate": 6.518307010825705e-05, "loss": 2.0306039810180665, "memory(GiB)": 72.85, "step": 46910, "token_acc": 0.5187713310580204, "train_speed(iter/s)": 0.671945 }, { "epoch": 2.0099824343430015, "grad_norm": 5.827889442443848, "learning_rate": 6.517665797749904e-05, "loss": 2.104066276550293, "memory(GiB)": 72.85, "step": 46915, "token_acc": 0.5525291828793775, "train_speed(iter/s)": 0.671934 }, { "epoch": 2.0101966496722508, "grad_norm": 5.69235372543335, "learning_rate": 6.517024557180164e-05, "loss": 2.0524917602539063, "memory(GiB)": 72.85, "step": 46920, "token_acc": 0.5545171339563862, "train_speed(iter/s)": 0.671938 }, { "epoch": 2.0104108650014996, "grad_norm": 9.17878532409668, "learning_rate": 6.516383289128101e-05, "loss": 2.182925224304199, "memory(GiB)": 72.85, "step": 46925, "token_acc": 0.534965034965035, "train_speed(iter/s)": 0.671948 }, { "epoch": 2.0106250803307484, "grad_norm": 4.177545070648193, "learning_rate": 6.515741993605331e-05, "loss": 2.3223093032836912, "memory(GiB)": 72.85, "step": 46930, "token_acc": 0.4707792207792208, "train_speed(iter/s)": 0.671949 }, { "epoch": 2.0108392956599976, "grad_norm": 6.86592960357666, "learning_rate": 6.515100670623474e-05, "loss": 2.2276193618774416, "memory(GiB)": 72.85, "step": 46935, "token_acc": 0.5225806451612903, "train_speed(iter/s)": 0.671958 }, { "epoch": 2.0110535109892465, "grad_norm": 5.0630364418029785, "learning_rate": 6.514459320194148e-05, "loss": 2.269978713989258, "memory(GiB)": 72.85, "step": 46940, "token_acc": 0.5, "train_speed(iter/s)": 0.671977 }, { "epoch": 2.0112677263184953, "grad_norm": 4.832208156585693, "learning_rate": 6.513817942328969e-05, "loss": 2.1919424057006838, "memory(GiB)": 72.85, "step": 46945, "token_acc": 0.4935064935064935, "train_speed(iter/s)": 0.671958 }, { "epoch": 2.0114819416477445, "grad_norm": 4.974915027618408, "learning_rate": 6.51317653703956e-05, "loss": 1.916377067565918, "memory(GiB)": 72.85, "step": 46950, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.671968 }, { "epoch": 2.0116961569769933, "grad_norm": 5.516735076904297, "learning_rate": 6.512535104337537e-05, "loss": 2.4951908111572267, "memory(GiB)": 72.85, "step": 46955, "token_acc": 0.4813664596273292, "train_speed(iter/s)": 0.671969 }, { "epoch": 2.011910372306242, "grad_norm": 4.9135332107543945, "learning_rate": 6.511893644234521e-05, "loss": 2.014236068725586, "memory(GiB)": 72.85, "step": 46960, "token_acc": 0.5429553264604811, "train_speed(iter/s)": 0.671979 }, { "epoch": 2.0121245876354914, "grad_norm": 4.729252815246582, "learning_rate": 6.511252156742134e-05, "loss": 2.299057388305664, "memory(GiB)": 72.85, "step": 46965, "token_acc": 0.5151515151515151, "train_speed(iter/s)": 0.671978 }, { "epoch": 2.01233880296474, "grad_norm": 6.0526442527771, "learning_rate": 6.510610641871998e-05, "loss": 2.39962158203125, "memory(GiB)": 72.85, "step": 46970, "token_acc": 0.4769736842105263, "train_speed(iter/s)": 0.671986 }, { "epoch": 2.012553018293989, "grad_norm": 5.328378677368164, "learning_rate": 6.509969099635731e-05, "loss": 2.371633529663086, "memory(GiB)": 72.85, "step": 46975, "token_acc": 0.46078431372549017, "train_speed(iter/s)": 0.671973 }, { "epoch": 2.0127672336232383, "grad_norm": 3.9219672679901123, "learning_rate": 6.50932753004496e-05, "loss": 2.1295278549194334, "memory(GiB)": 72.85, "step": 46980, "token_acc": 0.57421875, "train_speed(iter/s)": 0.671981 }, { "epoch": 2.012981448952487, "grad_norm": 4.674825191497803, "learning_rate": 6.508685933111303e-05, "loss": 2.412091064453125, "memory(GiB)": 72.85, "step": 46985, "token_acc": 0.5121107266435986, "train_speed(iter/s)": 0.671976 }, { "epoch": 2.013195664281736, "grad_norm": 4.498875617980957, "learning_rate": 6.508044308846384e-05, "loss": 2.1945110321044923, "memory(GiB)": 72.85, "step": 46990, "token_acc": 0.5214285714285715, "train_speed(iter/s)": 0.671979 }, { "epoch": 2.013409879610985, "grad_norm": 4.24662971496582, "learning_rate": 6.50740265726183e-05, "loss": 2.1761453628540037, "memory(GiB)": 72.85, "step": 46995, "token_acc": 0.5419847328244275, "train_speed(iter/s)": 0.671983 }, { "epoch": 2.013624094940234, "grad_norm": 3.243466854095459, "learning_rate": 6.506760978369262e-05, "loss": 2.355434608459473, "memory(GiB)": 72.85, "step": 47000, "token_acc": 0.5331325301204819, "train_speed(iter/s)": 0.671999 }, { "epoch": 2.013624094940234, "eval_loss": 2.0889010429382324, "eval_runtime": 15.3916, "eval_samples_per_second": 6.497, "eval_steps_per_second": 6.497, "eval_token_acc": 0.5077922077922078, "step": 47000 }, { "epoch": 2.013838310269483, "grad_norm": 4.658740043640137, "learning_rate": 6.506119272180304e-05, "loss": 2.2596147537231444, "memory(GiB)": 72.85, "step": 47005, "token_acc": 0.5054151624548736, "train_speed(iter/s)": 0.671838 }, { "epoch": 2.014052525598732, "grad_norm": 5.558268070220947, "learning_rate": 6.505477538706584e-05, "loss": 2.149364471435547, "memory(GiB)": 72.85, "step": 47010, "token_acc": 0.5277777777777778, "train_speed(iter/s)": 0.671835 }, { "epoch": 2.014266740927981, "grad_norm": 4.57516622543335, "learning_rate": 6.504835777959727e-05, "loss": 2.054066467285156, "memory(GiB)": 72.85, "step": 47015, "token_acc": 0.532051282051282, "train_speed(iter/s)": 0.671852 }, { "epoch": 2.0144809562572297, "grad_norm": 6.207513809204102, "learning_rate": 6.504193989951356e-05, "loss": 2.4388095855712892, "memory(GiB)": 72.85, "step": 47020, "token_acc": 0.4868804664723032, "train_speed(iter/s)": 0.671851 }, { "epoch": 2.014695171586479, "grad_norm": 6.575070858001709, "learning_rate": 6.503552174693102e-05, "loss": 2.448031997680664, "memory(GiB)": 72.85, "step": 47025, "token_acc": 0.5300751879699248, "train_speed(iter/s)": 0.671858 }, { "epoch": 2.0149093869157277, "grad_norm": 4.5035014152526855, "learning_rate": 6.502910332196588e-05, "loss": 2.1033998489379884, "memory(GiB)": 72.85, "step": 47030, "token_acc": 0.5372549019607843, "train_speed(iter/s)": 0.671855 }, { "epoch": 2.0151236022449766, "grad_norm": 4.3220086097717285, "learning_rate": 6.502268462473444e-05, "loss": 2.111830139160156, "memory(GiB)": 72.85, "step": 47035, "token_acc": 0.5155709342560554, "train_speed(iter/s)": 0.671843 }, { "epoch": 2.015337817574226, "grad_norm": 5.4975409507751465, "learning_rate": 6.501626565535298e-05, "loss": 2.46844539642334, "memory(GiB)": 72.85, "step": 47040, "token_acc": 0.4968944099378882, "train_speed(iter/s)": 0.671846 }, { "epoch": 2.0155520329034746, "grad_norm": 4.346859455108643, "learning_rate": 6.500984641393777e-05, "loss": 2.1095211029052736, "memory(GiB)": 72.85, "step": 47045, "token_acc": 0.5466666666666666, "train_speed(iter/s)": 0.671841 }, { "epoch": 2.0157662482327234, "grad_norm": 4.283621311187744, "learning_rate": 6.500342690060513e-05, "loss": 2.5451709747314455, "memory(GiB)": 72.85, "step": 47050, "token_acc": 0.4740061162079511, "train_speed(iter/s)": 0.671842 }, { "epoch": 2.0159804635619727, "grad_norm": 3.8623499870300293, "learning_rate": 6.499700711547134e-05, "loss": 2.001965141296387, "memory(GiB)": 72.85, "step": 47055, "token_acc": 0.5245901639344263, "train_speed(iter/s)": 0.671845 }, { "epoch": 2.0161946788912215, "grad_norm": 4.899980545043945, "learning_rate": 6.499058705865268e-05, "loss": 2.145173263549805, "memory(GiB)": 72.85, "step": 47060, "token_acc": 0.5165562913907285, "train_speed(iter/s)": 0.67185 }, { "epoch": 2.0164088942204703, "grad_norm": 6.844315528869629, "learning_rate": 6.498416673026549e-05, "loss": 2.047066307067871, "memory(GiB)": 72.85, "step": 47065, "token_acc": 0.5540983606557377, "train_speed(iter/s)": 0.67185 }, { "epoch": 2.0166231095497196, "grad_norm": 4.262969017028809, "learning_rate": 6.497774613042605e-05, "loss": 2.4863344192504884, "memory(GiB)": 72.85, "step": 47070, "token_acc": 0.4886731391585761, "train_speed(iter/s)": 0.671857 }, { "epoch": 2.0168373248789684, "grad_norm": 6.426335334777832, "learning_rate": 6.497132525925067e-05, "loss": 1.8951833724975586, "memory(GiB)": 72.85, "step": 47075, "token_acc": 0.6286919831223629, "train_speed(iter/s)": 0.671841 }, { "epoch": 2.017051540208217, "grad_norm": 3.586606979370117, "learning_rate": 6.496490411685572e-05, "loss": 2.132790374755859, "memory(GiB)": 72.85, "step": 47080, "token_acc": 0.5324675324675324, "train_speed(iter/s)": 0.671847 }, { "epoch": 2.0172657555374665, "grad_norm": 5.612794876098633, "learning_rate": 6.495848270335749e-05, "loss": 2.493940544128418, "memory(GiB)": 72.85, "step": 47085, "token_acc": 0.49498327759197325, "train_speed(iter/s)": 0.671851 }, { "epoch": 2.0174799708667153, "grad_norm": 5.486477851867676, "learning_rate": 6.49520610188723e-05, "loss": 2.31146354675293, "memory(GiB)": 72.85, "step": 47090, "token_acc": 0.5099337748344371, "train_speed(iter/s)": 0.671874 }, { "epoch": 2.017694186195964, "grad_norm": 5.966147422790527, "learning_rate": 6.494563906351651e-05, "loss": 2.270356369018555, "memory(GiB)": 72.85, "step": 47095, "token_acc": 0.47719298245614034, "train_speed(iter/s)": 0.671877 }, { "epoch": 2.0179084015252133, "grad_norm": 5.003561496734619, "learning_rate": 6.493921683740645e-05, "loss": 2.0614063262939455, "memory(GiB)": 72.85, "step": 47100, "token_acc": 0.522633744855967, "train_speed(iter/s)": 0.671868 }, { "epoch": 2.018122616854462, "grad_norm": 4.714433193206787, "learning_rate": 6.493279434065847e-05, "loss": 2.4903045654296876, "memory(GiB)": 72.85, "step": 47105, "token_acc": 0.4463768115942029, "train_speed(iter/s)": 0.671856 }, { "epoch": 2.018336832183711, "grad_norm": 4.948630332946777, "learning_rate": 6.49263715733889e-05, "loss": 2.1896677017211914, "memory(GiB)": 72.85, "step": 47110, "token_acc": 0.5098814229249012, "train_speed(iter/s)": 0.671862 }, { "epoch": 2.01855104751296, "grad_norm": 4.506821155548096, "learning_rate": 6.49199485357141e-05, "loss": 2.3070323944091795, "memory(GiB)": 72.85, "step": 47115, "token_acc": 0.5634328358208955, "train_speed(iter/s)": 0.671872 }, { "epoch": 2.018765262842209, "grad_norm": 4.273702144622803, "learning_rate": 6.491352522775043e-05, "loss": 2.328134536743164, "memory(GiB)": 72.85, "step": 47120, "token_acc": 0.4563758389261745, "train_speed(iter/s)": 0.671867 }, { "epoch": 2.018979478171458, "grad_norm": 5.358789920806885, "learning_rate": 6.490710164961428e-05, "loss": 2.1445285797119142, "memory(GiB)": 72.85, "step": 47125, "token_acc": 0.5396825396825397, "train_speed(iter/s)": 0.67187 }, { "epoch": 2.019193693500707, "grad_norm": 4.452960968017578, "learning_rate": 6.490067780142201e-05, "loss": 2.4819610595703123, "memory(GiB)": 72.85, "step": 47130, "token_acc": 0.4558303886925795, "train_speed(iter/s)": 0.67187 }, { "epoch": 2.019407908829956, "grad_norm": 4.926126003265381, "learning_rate": 6.489425368328996e-05, "loss": 2.286403846740723, "memory(GiB)": 72.85, "step": 47135, "token_acc": 0.534375, "train_speed(iter/s)": 0.671876 }, { "epoch": 2.0196221241592047, "grad_norm": 5.543732643127441, "learning_rate": 6.488782929533453e-05, "loss": 2.276019287109375, "memory(GiB)": 72.85, "step": 47140, "token_acc": 0.5151515151515151, "train_speed(iter/s)": 0.671868 }, { "epoch": 2.019836339488454, "grad_norm": 4.314444541931152, "learning_rate": 6.488140463767211e-05, "loss": 1.9643508911132812, "memory(GiB)": 72.85, "step": 47145, "token_acc": 0.5798611111111112, "train_speed(iter/s)": 0.671875 }, { "epoch": 2.020050554817703, "grad_norm": 5.89453649520874, "learning_rate": 6.487497971041909e-05, "loss": 2.277938461303711, "memory(GiB)": 72.85, "step": 47150, "token_acc": 0.5, "train_speed(iter/s)": 0.671873 }, { "epoch": 2.0202647701469516, "grad_norm": 3.555081367492676, "learning_rate": 6.486855451369187e-05, "loss": 2.2826732635498046, "memory(GiB)": 72.85, "step": 47155, "token_acc": 0.5017421602787456, "train_speed(iter/s)": 0.671892 }, { "epoch": 2.020478985476201, "grad_norm": 4.7096381187438965, "learning_rate": 6.486212904760683e-05, "loss": 2.4930551528930662, "memory(GiB)": 72.85, "step": 47160, "token_acc": 0.5104166666666666, "train_speed(iter/s)": 0.67189 }, { "epoch": 2.0206932008054497, "grad_norm": 4.9902520179748535, "learning_rate": 6.485570331228036e-05, "loss": 2.467730712890625, "memory(GiB)": 72.85, "step": 47165, "token_acc": 0.4875, "train_speed(iter/s)": 0.671891 }, { "epoch": 2.0209074161346985, "grad_norm": 4.096108436584473, "learning_rate": 6.484927730782892e-05, "loss": 2.5581565856933595, "memory(GiB)": 72.85, "step": 47170, "token_acc": 0.47491638795986624, "train_speed(iter/s)": 0.671887 }, { "epoch": 2.0211216314639477, "grad_norm": 6.071502208709717, "learning_rate": 6.484285103436889e-05, "loss": 2.3450237274169923, "memory(GiB)": 72.85, "step": 47175, "token_acc": 0.5, "train_speed(iter/s)": 0.671877 }, { "epoch": 2.0213358467931966, "grad_norm": 3.7485599517822266, "learning_rate": 6.483642449201667e-05, "loss": 2.131818389892578, "memory(GiB)": 72.85, "step": 47180, "token_acc": 0.5159235668789809, "train_speed(iter/s)": 0.671879 }, { "epoch": 2.0215500621224454, "grad_norm": 3.745190382003784, "learning_rate": 6.482999768088871e-05, "loss": 2.286814880371094, "memory(GiB)": 72.85, "step": 47185, "token_acc": 0.5259938837920489, "train_speed(iter/s)": 0.671891 }, { "epoch": 2.0217642774516946, "grad_norm": 6.789985656738281, "learning_rate": 6.482357060110143e-05, "loss": 2.5105018615722656, "memory(GiB)": 72.85, "step": 47190, "token_acc": 0.4768211920529801, "train_speed(iter/s)": 0.671896 }, { "epoch": 2.0219784927809434, "grad_norm": 5.626152515411377, "learning_rate": 6.481714325277126e-05, "loss": 2.3028591156005858, "memory(GiB)": 72.85, "step": 47195, "token_acc": 0.4854014598540146, "train_speed(iter/s)": 0.671905 }, { "epoch": 2.0221927081101922, "grad_norm": 4.375533103942871, "learning_rate": 6.481071563601465e-05, "loss": 1.9031015396118165, "memory(GiB)": 72.85, "step": 47200, "token_acc": 0.5754385964912281, "train_speed(iter/s)": 0.671893 }, { "epoch": 2.0224069234394415, "grad_norm": 5.02755880355835, "learning_rate": 6.480428775094803e-05, "loss": 2.050235366821289, "memory(GiB)": 72.85, "step": 47205, "token_acc": 0.5736434108527132, "train_speed(iter/s)": 0.671888 }, { "epoch": 2.0226211387686903, "grad_norm": 5.788663864135742, "learning_rate": 6.479785959768785e-05, "loss": 2.1122158050537108, "memory(GiB)": 72.85, "step": 47210, "token_acc": 0.5328467153284672, "train_speed(iter/s)": 0.671883 }, { "epoch": 2.022835354097939, "grad_norm": 4.511973857879639, "learning_rate": 6.479143117635056e-05, "loss": 2.2629167556762697, "memory(GiB)": 72.85, "step": 47215, "token_acc": 0.5457875457875457, "train_speed(iter/s)": 0.671874 }, { "epoch": 2.0230495694271884, "grad_norm": 3.562861204147339, "learning_rate": 6.478500248705261e-05, "loss": 1.9972929000854491, "memory(GiB)": 72.85, "step": 47220, "token_acc": 0.5407166123778502, "train_speed(iter/s)": 0.671889 }, { "epoch": 2.023263784756437, "grad_norm": 5.043150424957275, "learning_rate": 6.477857352991046e-05, "loss": 2.2179336547851562, "memory(GiB)": 72.85, "step": 47225, "token_acc": 0.5145985401459854, "train_speed(iter/s)": 0.671902 }, { "epoch": 2.023478000085686, "grad_norm": 4.537486553192139, "learning_rate": 6.477214430504061e-05, "loss": 2.2063358306884764, "memory(GiB)": 72.85, "step": 47230, "token_acc": 0.486404833836858, "train_speed(iter/s)": 0.6719 }, { "epoch": 2.0236922154149353, "grad_norm": 4.928689956665039, "learning_rate": 6.47657148125595e-05, "loss": 2.4350582122802735, "memory(GiB)": 72.85, "step": 47235, "token_acc": 0.5075757575757576, "train_speed(iter/s)": 0.671918 }, { "epoch": 2.023906430744184, "grad_norm": 4.651821136474609, "learning_rate": 6.475928505258362e-05, "loss": 2.202604866027832, "memory(GiB)": 72.85, "step": 47240, "token_acc": 0.5116279069767442, "train_speed(iter/s)": 0.671917 }, { "epoch": 2.024120646073433, "grad_norm": 4.564537525177002, "learning_rate": 6.475285502522947e-05, "loss": 2.4242345809936525, "memory(GiB)": 72.85, "step": 47245, "token_acc": 0.48333333333333334, "train_speed(iter/s)": 0.671924 }, { "epoch": 2.024334861402682, "grad_norm": 6.596981525421143, "learning_rate": 6.474642473061348e-05, "loss": 2.05889835357666, "memory(GiB)": 72.85, "step": 47250, "token_acc": 0.5252918287937743, "train_speed(iter/s)": 0.67195 }, { "epoch": 2.024549076731931, "grad_norm": 5.376502990722656, "learning_rate": 6.473999416885217e-05, "loss": 2.3533370971679686, "memory(GiB)": 72.85, "step": 47255, "token_acc": 0.496551724137931, "train_speed(iter/s)": 0.671953 }, { "epoch": 2.0247632920611798, "grad_norm": 3.9512686729431152, "learning_rate": 6.473356334006204e-05, "loss": 2.1019771575927733, "memory(GiB)": 72.85, "step": 47260, "token_acc": 0.5435540069686411, "train_speed(iter/s)": 0.67195 }, { "epoch": 2.024977507390429, "grad_norm": 3.761820077896118, "learning_rate": 6.47271322443596e-05, "loss": 2.2349658966064454, "memory(GiB)": 72.85, "step": 47265, "token_acc": 0.5376344086021505, "train_speed(iter/s)": 0.671951 }, { "epoch": 2.025191722719678, "grad_norm": 4.205257415771484, "learning_rate": 6.472070088186134e-05, "loss": 2.235013961791992, "memory(GiB)": 72.85, "step": 47270, "token_acc": 0.5474452554744526, "train_speed(iter/s)": 0.671959 }, { "epoch": 2.0254059380489267, "grad_norm": 5.347382068634033, "learning_rate": 6.471426925268378e-05, "loss": 2.5402225494384765, "memory(GiB)": 72.85, "step": 47275, "token_acc": 0.5016181229773463, "train_speed(iter/s)": 0.671971 }, { "epoch": 2.025620153378176, "grad_norm": 6.396944046020508, "learning_rate": 6.470783735694343e-05, "loss": 2.188646697998047, "memory(GiB)": 72.85, "step": 47280, "token_acc": 0.5125448028673835, "train_speed(iter/s)": 0.671953 }, { "epoch": 2.0258343687074247, "grad_norm": 4.313447952270508, "learning_rate": 6.47014051947568e-05, "loss": 2.2336320877075195, "memory(GiB)": 72.85, "step": 47285, "token_acc": 0.5016835016835017, "train_speed(iter/s)": 0.671957 }, { "epoch": 2.0260485840366735, "grad_norm": 5.837279796600342, "learning_rate": 6.469497276624043e-05, "loss": 2.311463737487793, "memory(GiB)": 72.85, "step": 47290, "token_acc": 0.5214285714285715, "train_speed(iter/s)": 0.671961 }, { "epoch": 2.026262799365923, "grad_norm": 4.876980781555176, "learning_rate": 6.468854007151084e-05, "loss": 2.0965076446533204, "memory(GiB)": 72.85, "step": 47295, "token_acc": 0.546875, "train_speed(iter/s)": 0.671958 }, { "epoch": 2.0264770146951716, "grad_norm": 6.947698593139648, "learning_rate": 6.468210711068457e-05, "loss": 2.4571159362792967, "memory(GiB)": 72.85, "step": 47300, "token_acc": 0.45980707395498394, "train_speed(iter/s)": 0.671951 }, { "epoch": 2.0266912300244204, "grad_norm": 4.264169216156006, "learning_rate": 6.467567388387814e-05, "loss": 2.0338674545288087, "memory(GiB)": 72.85, "step": 47305, "token_acc": 0.51875, "train_speed(iter/s)": 0.67196 }, { "epoch": 2.0269054453536697, "grad_norm": 3.948971748352051, "learning_rate": 6.466924039120813e-05, "loss": 2.303752899169922, "memory(GiB)": 72.85, "step": 47310, "token_acc": 0.5086705202312138, "train_speed(iter/s)": 0.671959 }, { "epoch": 2.0271196606829185, "grad_norm": 4.355888843536377, "learning_rate": 6.466280663279108e-05, "loss": 2.537685775756836, "memory(GiB)": 72.85, "step": 47315, "token_acc": 0.4583333333333333, "train_speed(iter/s)": 0.67196 }, { "epoch": 2.0273338760121673, "grad_norm": 5.074545860290527, "learning_rate": 6.46563726087435e-05, "loss": 2.2882545471191404, "memory(GiB)": 72.85, "step": 47320, "token_acc": 0.47876447876447875, "train_speed(iter/s)": 0.671968 }, { "epoch": 2.0275480913414166, "grad_norm": 5.25331449508667, "learning_rate": 6.464993831918201e-05, "loss": 2.3460515975952148, "memory(GiB)": 72.85, "step": 47325, "token_acc": 0.48494983277591974, "train_speed(iter/s)": 0.671978 }, { "epoch": 2.0277623066706654, "grad_norm": 5.042882442474365, "learning_rate": 6.464350376422314e-05, "loss": 2.0694911956787108, "memory(GiB)": 72.85, "step": 47330, "token_acc": 0.5298507462686567, "train_speed(iter/s)": 0.671994 }, { "epoch": 2.027976521999914, "grad_norm": 5.024554252624512, "learning_rate": 6.463706894398344e-05, "loss": 2.2578201293945312, "memory(GiB)": 72.85, "step": 47335, "token_acc": 0.5201465201465202, "train_speed(iter/s)": 0.672009 }, { "epoch": 2.0281907373291634, "grad_norm": 4.778515338897705, "learning_rate": 6.463063385857953e-05, "loss": 2.2396663665771483, "memory(GiB)": 72.85, "step": 47340, "token_acc": 0.5084175084175084, "train_speed(iter/s)": 0.67202 }, { "epoch": 2.0284049526584123, "grad_norm": 4.371269702911377, "learning_rate": 6.462419850812795e-05, "loss": 1.9749340057373046, "memory(GiB)": 72.85, "step": 47345, "token_acc": 0.5737704918032787, "train_speed(iter/s)": 0.672021 }, { "epoch": 2.028619167987661, "grad_norm": 3.673877000808716, "learning_rate": 6.461776289274531e-05, "loss": 2.2125808715820314, "memory(GiB)": 72.85, "step": 47350, "token_acc": 0.5413793103448276, "train_speed(iter/s)": 0.672025 }, { "epoch": 2.0288333833169103, "grad_norm": 4.370152473449707, "learning_rate": 6.461132701254818e-05, "loss": 2.258810043334961, "memory(GiB)": 72.85, "step": 47355, "token_acc": 0.48562300319488816, "train_speed(iter/s)": 0.672019 }, { "epoch": 2.029047598646159, "grad_norm": 5.787072658538818, "learning_rate": 6.460489086765313e-05, "loss": 2.3587690353393556, "memory(GiB)": 72.85, "step": 47360, "token_acc": 0.474025974025974, "train_speed(iter/s)": 0.672026 }, { "epoch": 2.029261813975408, "grad_norm": 4.2218451499938965, "learning_rate": 6.45984544581768e-05, "loss": 2.2368736267089844, "memory(GiB)": 72.85, "step": 47365, "token_acc": 0.52734375, "train_speed(iter/s)": 0.672021 }, { "epoch": 2.029476029304657, "grad_norm": 3.6599771976470947, "learning_rate": 6.459201778423578e-05, "loss": 2.027309608459473, "memory(GiB)": 72.85, "step": 47370, "token_acc": 0.5335365853658537, "train_speed(iter/s)": 0.672035 }, { "epoch": 2.029690244633906, "grad_norm": 4.085577487945557, "learning_rate": 6.458558084594666e-05, "loss": 2.2749256134033202, "memory(GiB)": 72.85, "step": 47375, "token_acc": 0.4984126984126984, "train_speed(iter/s)": 0.672044 }, { "epoch": 2.029904459963155, "grad_norm": 6.22536039352417, "learning_rate": 6.457914364342606e-05, "loss": 2.175663185119629, "memory(GiB)": 72.85, "step": 47380, "token_acc": 0.5419354838709678, "train_speed(iter/s)": 0.672053 }, { "epoch": 2.030118675292404, "grad_norm": 3.82031512260437, "learning_rate": 6.45727061767906e-05, "loss": 1.8571285247802733, "memory(GiB)": 72.85, "step": 47385, "token_acc": 0.5551020408163265, "train_speed(iter/s)": 0.672054 }, { "epoch": 2.030332890621653, "grad_norm": 4.87142276763916, "learning_rate": 6.45662684461569e-05, "loss": 2.4749141693115235, "memory(GiB)": 72.85, "step": 47390, "token_acc": 0.4727272727272727, "train_speed(iter/s)": 0.672037 }, { "epoch": 2.0305471059509017, "grad_norm": 4.995570659637451, "learning_rate": 6.455983045164158e-05, "loss": 2.0747468948364256, "memory(GiB)": 72.85, "step": 47395, "token_acc": 0.5485232067510548, "train_speed(iter/s)": 0.67203 }, { "epoch": 2.030761321280151, "grad_norm": 4.091058731079102, "learning_rate": 6.455339219336127e-05, "loss": 2.2730518341064454, "memory(GiB)": 72.85, "step": 47400, "token_acc": 0.5271317829457365, "train_speed(iter/s)": 0.672037 }, { "epoch": 2.0309755366094, "grad_norm": 3.930417537689209, "learning_rate": 6.454695367143261e-05, "loss": 2.4348308563232424, "memory(GiB)": 72.85, "step": 47405, "token_acc": 0.4699248120300752, "train_speed(iter/s)": 0.672048 }, { "epoch": 2.0311897519386486, "grad_norm": 4.248569011688232, "learning_rate": 6.454051488597223e-05, "loss": 1.9928766250610352, "memory(GiB)": 72.85, "step": 47410, "token_acc": 0.5458333333333333, "train_speed(iter/s)": 0.672057 }, { "epoch": 2.031403967267898, "grad_norm": 4.186906337738037, "learning_rate": 6.453407583709679e-05, "loss": 2.2679351806640624, "memory(GiB)": 72.85, "step": 47415, "token_acc": 0.49666666666666665, "train_speed(iter/s)": 0.672049 }, { "epoch": 2.0316181825971467, "grad_norm": 5.824984073638916, "learning_rate": 6.452763652492294e-05, "loss": 2.470287322998047, "memory(GiB)": 72.85, "step": 47420, "token_acc": 0.4850498338870432, "train_speed(iter/s)": 0.672029 }, { "epoch": 2.0318323979263955, "grad_norm": 3.8931870460510254, "learning_rate": 6.452119694956732e-05, "loss": 2.2327213287353516, "memory(GiB)": 72.85, "step": 47425, "token_acc": 0.5034013605442177, "train_speed(iter/s)": 0.672031 }, { "epoch": 2.0320466132556447, "grad_norm": 5.883737087249756, "learning_rate": 6.451475711114659e-05, "loss": 2.3684755325317384, "memory(GiB)": 72.85, "step": 47430, "token_acc": 0.47586206896551725, "train_speed(iter/s)": 0.672035 }, { "epoch": 2.0322608285848935, "grad_norm": 4.810351371765137, "learning_rate": 6.450831700977742e-05, "loss": 2.3736007690429686, "memory(GiB)": 72.85, "step": 47435, "token_acc": 0.5095541401273885, "train_speed(iter/s)": 0.672029 }, { "epoch": 2.0324750439141424, "grad_norm": 5.234283924102783, "learning_rate": 6.45018766455765e-05, "loss": 2.1240386962890625, "memory(GiB)": 72.85, "step": 47440, "token_acc": 0.553030303030303, "train_speed(iter/s)": 0.672039 }, { "epoch": 2.0326892592433916, "grad_norm": 6.015618324279785, "learning_rate": 6.449543601866045e-05, "loss": 2.2905862808227537, "memory(GiB)": 72.85, "step": 47445, "token_acc": 0.5141065830721003, "train_speed(iter/s)": 0.672047 }, { "epoch": 2.0329034745726404, "grad_norm": 4.4644341468811035, "learning_rate": 6.448899512914598e-05, "loss": 2.234909439086914, "memory(GiB)": 72.85, "step": 47450, "token_acc": 0.518796992481203, "train_speed(iter/s)": 0.672051 }, { "epoch": 2.0331176899018892, "grad_norm": 5.092884063720703, "learning_rate": 6.44825539771498e-05, "loss": 2.226584625244141, "memory(GiB)": 72.85, "step": 47455, "token_acc": 0.5338645418326693, "train_speed(iter/s)": 0.672047 }, { "epoch": 2.0333319052311385, "grad_norm": 5.1150665283203125, "learning_rate": 6.447611256278856e-05, "loss": 2.2099641799926757, "memory(GiB)": 72.85, "step": 47460, "token_acc": 0.5317220543806647, "train_speed(iter/s)": 0.672061 }, { "epoch": 2.0335461205603873, "grad_norm": 4.252492427825928, "learning_rate": 6.446967088617894e-05, "loss": 2.016604995727539, "memory(GiB)": 72.85, "step": 47465, "token_acc": 0.5595667870036101, "train_speed(iter/s)": 0.67207 }, { "epoch": 2.033760335889636, "grad_norm": 5.15846586227417, "learning_rate": 6.446322894743766e-05, "loss": 1.9253442764282227, "memory(GiB)": 72.85, "step": 47470, "token_acc": 0.5734265734265734, "train_speed(iter/s)": 0.672079 }, { "epoch": 2.0339745512188854, "grad_norm": 5.115092754364014, "learning_rate": 6.445678674668144e-05, "loss": 2.06192684173584, "memory(GiB)": 72.85, "step": 47475, "token_acc": 0.5191082802547771, "train_speed(iter/s)": 0.672061 }, { "epoch": 2.034188766548134, "grad_norm": 3.516200065612793, "learning_rate": 6.445034428402695e-05, "loss": 2.3470149993896485, "memory(GiB)": 72.85, "step": 47480, "token_acc": 0.5, "train_speed(iter/s)": 0.672049 }, { "epoch": 2.034402981877383, "grad_norm": 5.5857672691345215, "learning_rate": 6.444390155959092e-05, "loss": 2.087248611450195, "memory(GiB)": 72.85, "step": 47485, "token_acc": 0.5441176470588235, "train_speed(iter/s)": 0.672063 }, { "epoch": 2.0346171972066323, "grad_norm": 5.530879020690918, "learning_rate": 6.443745857349009e-05, "loss": 2.1424415588378904, "memory(GiB)": 72.85, "step": 47490, "token_acc": 0.5018181818181818, "train_speed(iter/s)": 0.672067 }, { "epoch": 2.034831412535881, "grad_norm": 8.288151741027832, "learning_rate": 6.443101532584112e-05, "loss": 2.17187614440918, "memory(GiB)": 72.85, "step": 47495, "token_acc": 0.5208333333333334, "train_speed(iter/s)": 0.672062 }, { "epoch": 2.03504562786513, "grad_norm": 4.905542373657227, "learning_rate": 6.442457181676078e-05, "loss": 2.337541961669922, "memory(GiB)": 72.85, "step": 47500, "token_acc": 0.50625, "train_speed(iter/s)": 0.672081 }, { "epoch": 2.03504562786513, "eval_loss": 2.1349892616271973, "eval_runtime": 16.177, "eval_samples_per_second": 6.182, "eval_steps_per_second": 6.182, "eval_token_acc": 0.4986225895316804, "step": 47500 }, { "epoch": 2.035259843194379, "grad_norm": 4.942996025085449, "learning_rate": 6.44181280463658e-05, "loss": 2.195734977722168, "memory(GiB)": 72.85, "step": 47505, "token_acc": 0.5004793863854267, "train_speed(iter/s)": 0.671896 }, { "epoch": 2.035474058523628, "grad_norm": 5.382339954376221, "learning_rate": 6.44116840147729e-05, "loss": 2.019505500793457, "memory(GiB)": 72.85, "step": 47510, "token_acc": 0.5331125827814569, "train_speed(iter/s)": 0.671888 }, { "epoch": 2.0356882738528768, "grad_norm": 6.542463779449463, "learning_rate": 6.440523972209882e-05, "loss": 2.245443344116211, "memory(GiB)": 72.85, "step": 47515, "token_acc": 0.5338345864661654, "train_speed(iter/s)": 0.671885 }, { "epoch": 2.035902489182126, "grad_norm": 4.1907958984375, "learning_rate": 6.439879516846031e-05, "loss": 2.531122016906738, "memory(GiB)": 72.85, "step": 47520, "token_acc": 0.47530864197530864, "train_speed(iter/s)": 0.671883 }, { "epoch": 2.036116704511375, "grad_norm": 5.28610372543335, "learning_rate": 6.439235035397412e-05, "loss": 2.436323356628418, "memory(GiB)": 72.85, "step": 47525, "token_acc": 0.45390070921985815, "train_speed(iter/s)": 0.671898 }, { "epoch": 2.0363309198406236, "grad_norm": 5.047333240509033, "learning_rate": 6.438590527875701e-05, "loss": 2.1806110382080077, "memory(GiB)": 72.85, "step": 47530, "token_acc": 0.5416666666666666, "train_speed(iter/s)": 0.67189 }, { "epoch": 2.036545135169873, "grad_norm": 5.883185386657715, "learning_rate": 6.437945994292572e-05, "loss": 2.108548164367676, "memory(GiB)": 72.85, "step": 47535, "token_acc": 0.5606060606060606, "train_speed(iter/s)": 0.671886 }, { "epoch": 2.0367593504991217, "grad_norm": 3.567499876022339, "learning_rate": 6.437301434659703e-05, "loss": 2.269184875488281, "memory(GiB)": 72.85, "step": 47540, "token_acc": 0.4732142857142857, "train_speed(iter/s)": 0.671876 }, { "epoch": 2.0369735658283705, "grad_norm": 5.577042579650879, "learning_rate": 6.43665684898877e-05, "loss": 2.131974983215332, "memory(GiB)": 72.85, "step": 47545, "token_acc": 0.5462184873949579, "train_speed(iter/s)": 0.67187 }, { "epoch": 2.03718778115762, "grad_norm": 4.5379638671875, "learning_rate": 6.43601223729145e-05, "loss": 2.253250503540039, "memory(GiB)": 72.85, "step": 47550, "token_acc": 0.5362776025236593, "train_speed(iter/s)": 0.671877 }, { "epoch": 2.0374019964868686, "grad_norm": 5.354333400726318, "learning_rate": 6.435367599579421e-05, "loss": 2.2962255477905273, "memory(GiB)": 72.85, "step": 47555, "token_acc": 0.5197132616487455, "train_speed(iter/s)": 0.671888 }, { "epoch": 2.0376162118161174, "grad_norm": 4.397290229797363, "learning_rate": 6.434722935864364e-05, "loss": 2.083455467224121, "memory(GiB)": 72.85, "step": 47560, "token_acc": 0.531496062992126, "train_speed(iter/s)": 0.671879 }, { "epoch": 2.0378304271453667, "grad_norm": 4.839981555938721, "learning_rate": 6.434078246157954e-05, "loss": 2.1665155410766603, "memory(GiB)": 72.85, "step": 47565, "token_acc": 0.5486725663716814, "train_speed(iter/s)": 0.671865 }, { "epoch": 2.0380446424746155, "grad_norm": 3.448868989944458, "learning_rate": 6.43343353047187e-05, "loss": 2.165439414978027, "memory(GiB)": 72.85, "step": 47570, "token_acc": 0.5521235521235521, "train_speed(iter/s)": 0.671861 }, { "epoch": 2.0382588578038643, "grad_norm": 4.25673246383667, "learning_rate": 6.432788788817793e-05, "loss": 2.259776306152344, "memory(GiB)": 72.85, "step": 47575, "token_acc": 0.5087719298245614, "train_speed(iter/s)": 0.671871 }, { "epoch": 2.0384730731331135, "grad_norm": 3.6200106143951416, "learning_rate": 6.432144021207404e-05, "loss": 2.307555389404297, "memory(GiB)": 72.85, "step": 47580, "token_acc": 0.484472049689441, "train_speed(iter/s)": 0.671871 }, { "epoch": 2.0386872884623624, "grad_norm": 6.62623405456543, "learning_rate": 6.431499227652383e-05, "loss": 2.224033546447754, "memory(GiB)": 72.85, "step": 47585, "token_acc": 0.5225806451612903, "train_speed(iter/s)": 0.671863 }, { "epoch": 2.038901503791611, "grad_norm": 4.864681720733643, "learning_rate": 6.43085440816441e-05, "loss": 2.2514930725097657, "memory(GiB)": 72.85, "step": 47590, "token_acc": 0.5032467532467533, "train_speed(iter/s)": 0.671858 }, { "epoch": 2.0391157191208604, "grad_norm": 5.416516304016113, "learning_rate": 6.430209562755165e-05, "loss": 1.9407249450683595, "memory(GiB)": 72.85, "step": 47595, "token_acc": 0.583011583011583, "train_speed(iter/s)": 0.671859 }, { "epoch": 2.0393299344501092, "grad_norm": 4.0834150314331055, "learning_rate": 6.429564691436334e-05, "loss": 1.8672561645507812, "memory(GiB)": 72.85, "step": 47600, "token_acc": 0.5754385964912281, "train_speed(iter/s)": 0.671862 }, { "epoch": 2.039544149779358, "grad_norm": 6.481766223907471, "learning_rate": 6.4289197942196e-05, "loss": 2.034151077270508, "memory(GiB)": 72.85, "step": 47605, "token_acc": 0.5161290322580645, "train_speed(iter/s)": 0.671876 }, { "epoch": 2.0397583651086073, "grad_norm": 4.126658916473389, "learning_rate": 6.42827487111664e-05, "loss": 2.016444778442383, "memory(GiB)": 72.85, "step": 47610, "token_acc": 0.5773195876288659, "train_speed(iter/s)": 0.671878 }, { "epoch": 2.039972580437856, "grad_norm": 4.251189708709717, "learning_rate": 6.427629922139142e-05, "loss": 2.1177139282226562, "memory(GiB)": 72.85, "step": 47615, "token_acc": 0.5244755244755245, "train_speed(iter/s)": 0.67187 }, { "epoch": 2.040186795767105, "grad_norm": 5.245889186859131, "learning_rate": 6.426984947298789e-05, "loss": 2.120344543457031, "memory(GiB)": 72.85, "step": 47620, "token_acc": 0.48863636363636365, "train_speed(iter/s)": 0.67187 }, { "epoch": 2.040401011096354, "grad_norm": 4.346991539001465, "learning_rate": 6.426339946607264e-05, "loss": 1.8842430114746094, "memory(GiB)": 72.85, "step": 47625, "token_acc": 0.568904593639576, "train_speed(iter/s)": 0.671871 }, { "epoch": 2.040615226425603, "grad_norm": 3.987177610397339, "learning_rate": 6.425694920076253e-05, "loss": 2.3043975830078125, "memory(GiB)": 72.85, "step": 47630, "token_acc": 0.49324324324324326, "train_speed(iter/s)": 0.671862 }, { "epoch": 2.040829441754852, "grad_norm": 4.648918151855469, "learning_rate": 6.425049867717443e-05, "loss": 2.247554397583008, "memory(GiB)": 72.85, "step": 47635, "token_acc": 0.5171339563862928, "train_speed(iter/s)": 0.671866 }, { "epoch": 2.041043657084101, "grad_norm": 5.549439430236816, "learning_rate": 6.424404789542515e-05, "loss": 2.4948469161987306, "memory(GiB)": 72.85, "step": 47640, "token_acc": 0.4807692307692308, "train_speed(iter/s)": 0.671871 }, { "epoch": 2.04125787241335, "grad_norm": 6.154771327972412, "learning_rate": 6.423759685563161e-05, "loss": 2.394541549682617, "memory(GiB)": 72.85, "step": 47645, "token_acc": 0.4857142857142857, "train_speed(iter/s)": 0.671876 }, { "epoch": 2.0414720877425987, "grad_norm": 5.314195156097412, "learning_rate": 6.423114555791063e-05, "loss": 2.3115604400634764, "memory(GiB)": 72.85, "step": 47650, "token_acc": 0.4956268221574344, "train_speed(iter/s)": 0.671871 }, { "epoch": 2.041686303071848, "grad_norm": 4.216509819030762, "learning_rate": 6.422469400237911e-05, "loss": 2.373733711242676, "memory(GiB)": 72.85, "step": 47655, "token_acc": 0.48951048951048953, "train_speed(iter/s)": 0.671864 }, { "epoch": 2.0419005184010968, "grad_norm": 5.1199631690979, "learning_rate": 6.42182421891539e-05, "loss": 2.3748615264892576, "memory(GiB)": 72.85, "step": 47660, "token_acc": 0.48221343873517786, "train_speed(iter/s)": 0.671863 }, { "epoch": 2.0421147337303456, "grad_norm": 3.8588943481445312, "learning_rate": 6.42117901183519e-05, "loss": 2.2637050628662108, "memory(GiB)": 72.85, "step": 47665, "token_acc": 0.5576923076923077, "train_speed(iter/s)": 0.671854 }, { "epoch": 2.042328949059595, "grad_norm": 4.9036078453063965, "learning_rate": 6.420533779008999e-05, "loss": 2.2401702880859373, "memory(GiB)": 72.85, "step": 47670, "token_acc": 0.5316901408450704, "train_speed(iter/s)": 0.671864 }, { "epoch": 2.0425431643888436, "grad_norm": 3.9304161071777344, "learning_rate": 6.419888520448507e-05, "loss": 2.380784606933594, "memory(GiB)": 72.85, "step": 47675, "token_acc": 0.5295950155763239, "train_speed(iter/s)": 0.671857 }, { "epoch": 2.0427573797180925, "grad_norm": 4.708745002746582, "learning_rate": 6.419243236165402e-05, "loss": 2.423214912414551, "memory(GiB)": 72.85, "step": 47680, "token_acc": 0.4835820895522388, "train_speed(iter/s)": 0.671861 }, { "epoch": 2.0429715950473417, "grad_norm": 4.88259744644165, "learning_rate": 6.418597926171372e-05, "loss": 2.430790328979492, "memory(GiB)": 72.85, "step": 47685, "token_acc": 0.49473684210526314, "train_speed(iter/s)": 0.671878 }, { "epoch": 2.0431858103765905, "grad_norm": 5.079507827758789, "learning_rate": 6.417952590478113e-05, "loss": 2.1800918579101562, "memory(GiB)": 72.85, "step": 47690, "token_acc": 0.4653179190751445, "train_speed(iter/s)": 0.67187 }, { "epoch": 2.0434000257058393, "grad_norm": 4.8914055824279785, "learning_rate": 6.417307229097312e-05, "loss": 2.3969654083251952, "memory(GiB)": 72.85, "step": 47695, "token_acc": 0.5069444444444444, "train_speed(iter/s)": 0.671882 }, { "epoch": 2.0436142410350886, "grad_norm": 4.465951442718506, "learning_rate": 6.416661842040661e-05, "loss": 2.4038930892944337, "memory(GiB)": 72.85, "step": 47700, "token_acc": 0.4901315789473684, "train_speed(iter/s)": 0.671888 }, { "epoch": 2.0438284563643374, "grad_norm": 9.661120414733887, "learning_rate": 6.416016429319851e-05, "loss": 2.170431900024414, "memory(GiB)": 72.85, "step": 47705, "token_acc": 0.5322033898305085, "train_speed(iter/s)": 0.671877 }, { "epoch": 2.044042671693586, "grad_norm": 4.207025051116943, "learning_rate": 6.415370990946575e-05, "loss": 2.2559412002563475, "memory(GiB)": 72.85, "step": 47710, "token_acc": 0.5287009063444109, "train_speed(iter/s)": 0.671871 }, { "epoch": 2.0442568870228355, "grad_norm": 5.248459339141846, "learning_rate": 6.414725526932526e-05, "loss": 2.075777053833008, "memory(GiB)": 72.85, "step": 47715, "token_acc": 0.5137254901960784, "train_speed(iter/s)": 0.671872 }, { "epoch": 2.0444711023520843, "grad_norm": 4.317421913146973, "learning_rate": 6.414080037289398e-05, "loss": 2.241580581665039, "memory(GiB)": 72.85, "step": 47720, "token_acc": 0.4740061162079511, "train_speed(iter/s)": 0.671877 }, { "epoch": 2.044685317681333, "grad_norm": 5.434630393981934, "learning_rate": 6.413434522028882e-05, "loss": 2.2987573623657225, "memory(GiB)": 72.85, "step": 47725, "token_acc": 0.5152542372881356, "train_speed(iter/s)": 0.67188 }, { "epoch": 2.0448995330105824, "grad_norm": 4.7438507080078125, "learning_rate": 6.412788981162676e-05, "loss": 2.5219085693359373, "memory(GiB)": 72.85, "step": 47730, "token_acc": 0.4658753709198813, "train_speed(iter/s)": 0.671888 }, { "epoch": 2.045113748339831, "grad_norm": 5.87055778503418, "learning_rate": 6.41214341470247e-05, "loss": 2.3481807708740234, "memory(GiB)": 72.85, "step": 47735, "token_acc": 0.5156794425087108, "train_speed(iter/s)": 0.671867 }, { "epoch": 2.04532796366908, "grad_norm": 5.008071422576904, "learning_rate": 6.411497822659961e-05, "loss": 2.313657760620117, "memory(GiB)": 72.85, "step": 47740, "token_acc": 0.49666666666666665, "train_speed(iter/s)": 0.671879 }, { "epoch": 2.0455421789983292, "grad_norm": 4.361314296722412, "learning_rate": 6.410852205046846e-05, "loss": 1.9794513702392578, "memory(GiB)": 72.85, "step": 47745, "token_acc": 0.53125, "train_speed(iter/s)": 0.671886 }, { "epoch": 2.045756394327578, "grad_norm": 4.637351036071777, "learning_rate": 6.41020656187482e-05, "loss": 2.19378719329834, "memory(GiB)": 72.85, "step": 47750, "token_acc": 0.5201342281879194, "train_speed(iter/s)": 0.671891 }, { "epoch": 2.045970609656827, "grad_norm": 4.33072566986084, "learning_rate": 6.409560893155579e-05, "loss": 2.4386978149414062, "memory(GiB)": 72.85, "step": 47755, "token_acc": 0.49038461538461536, "train_speed(iter/s)": 0.671901 }, { "epoch": 2.046184824986076, "grad_norm": 4.646083831787109, "learning_rate": 6.408915198900821e-05, "loss": 2.28857479095459, "memory(GiB)": 72.85, "step": 47760, "token_acc": 0.476038338658147, "train_speed(iter/s)": 0.671891 }, { "epoch": 2.046399040315325, "grad_norm": 10.579062461853027, "learning_rate": 6.408269479122241e-05, "loss": 2.3137466430664064, "memory(GiB)": 72.85, "step": 47765, "token_acc": 0.5153846153846153, "train_speed(iter/s)": 0.671904 }, { "epoch": 2.0466132556445737, "grad_norm": 3.8772695064544678, "learning_rate": 6.40762373383154e-05, "loss": 2.148398780822754, "memory(GiB)": 72.85, "step": 47770, "token_acc": 0.5447761194029851, "train_speed(iter/s)": 0.671917 }, { "epoch": 2.046827470973823, "grad_norm": 4.2407379150390625, "learning_rate": 6.406977963040414e-05, "loss": 1.937534713745117, "memory(GiB)": 72.85, "step": 47775, "token_acc": 0.5644444444444444, "train_speed(iter/s)": 0.671902 }, { "epoch": 2.047041686303072, "grad_norm": 5.534945011138916, "learning_rate": 6.406332166760561e-05, "loss": 2.307234764099121, "memory(GiB)": 72.85, "step": 47780, "token_acc": 0.5129151291512916, "train_speed(iter/s)": 0.671904 }, { "epoch": 2.0472559016323206, "grad_norm": 4.060173988342285, "learning_rate": 6.405686345003682e-05, "loss": 2.179366111755371, "memory(GiB)": 72.85, "step": 47785, "token_acc": 0.5085324232081911, "train_speed(iter/s)": 0.671906 }, { "epoch": 2.04747011696157, "grad_norm": 5.111035346984863, "learning_rate": 6.405040497781478e-05, "loss": 2.36851806640625, "memory(GiB)": 72.85, "step": 47790, "token_acc": 0.48615384615384616, "train_speed(iter/s)": 0.671902 }, { "epoch": 2.0476843322908187, "grad_norm": 3.997594118118286, "learning_rate": 6.404394625105647e-05, "loss": 1.9781234741210938, "memory(GiB)": 72.85, "step": 47795, "token_acc": 0.5186440677966102, "train_speed(iter/s)": 0.671898 }, { "epoch": 2.0478985476200675, "grad_norm": 5.003727912902832, "learning_rate": 6.403748726987888e-05, "loss": 2.277974319458008, "memory(GiB)": 72.85, "step": 47800, "token_acc": 0.4768211920529801, "train_speed(iter/s)": 0.6719 }, { "epoch": 2.0481127629493168, "grad_norm": 4.234455108642578, "learning_rate": 6.403102803439906e-05, "loss": 2.1138635635375977, "memory(GiB)": 72.85, "step": 47805, "token_acc": 0.5377358490566038, "train_speed(iter/s)": 0.671898 }, { "epoch": 2.0483269782785656, "grad_norm": 4.312431335449219, "learning_rate": 6.402456854473398e-05, "loss": 2.0350908279418944, "memory(GiB)": 72.85, "step": 47810, "token_acc": 0.5351170568561873, "train_speed(iter/s)": 0.671885 }, { "epoch": 2.0485411936078144, "grad_norm": 6.721011161804199, "learning_rate": 6.40181088010007e-05, "loss": 2.234580230712891, "memory(GiB)": 72.85, "step": 47815, "token_acc": 0.4947916666666667, "train_speed(iter/s)": 0.671892 }, { "epoch": 2.0487554089370636, "grad_norm": 4.182737350463867, "learning_rate": 6.401164880331623e-05, "loss": 2.267201614379883, "memory(GiB)": 72.85, "step": 47820, "token_acc": 0.5437262357414449, "train_speed(iter/s)": 0.671906 }, { "epoch": 2.0489696242663125, "grad_norm": 6.861081123352051, "learning_rate": 6.40051885517976e-05, "loss": 2.275847625732422, "memory(GiB)": 72.85, "step": 47825, "token_acc": 0.5210084033613446, "train_speed(iter/s)": 0.671921 }, { "epoch": 2.0491838395955613, "grad_norm": 3.799835443496704, "learning_rate": 6.399872804656185e-05, "loss": 1.9589065551757812, "memory(GiB)": 72.85, "step": 47830, "token_acc": 0.5478547854785478, "train_speed(iter/s)": 0.671906 }, { "epoch": 2.0493980549248105, "grad_norm": 4.467652797698975, "learning_rate": 6.399226728772598e-05, "loss": 2.31063232421875, "memory(GiB)": 72.85, "step": 47835, "token_acc": 0.5083612040133779, "train_speed(iter/s)": 0.671906 }, { "epoch": 2.0496122702540593, "grad_norm": 4.410149574279785, "learning_rate": 6.39858062754071e-05, "loss": 2.27470703125, "memory(GiB)": 72.85, "step": 47840, "token_acc": 0.46691176470588236, "train_speed(iter/s)": 0.671915 }, { "epoch": 2.049826485583308, "grad_norm": 4.932074546813965, "learning_rate": 6.397934500972219e-05, "loss": 2.184342384338379, "memory(GiB)": 72.85, "step": 47845, "token_acc": 0.5014749262536873, "train_speed(iter/s)": 0.671923 }, { "epoch": 2.0500407009125574, "grad_norm": 7.330170631408691, "learning_rate": 6.397288349078834e-05, "loss": 2.238937759399414, "memory(GiB)": 72.85, "step": 47850, "token_acc": 0.5097402597402597, "train_speed(iter/s)": 0.671919 }, { "epoch": 2.0502549162418062, "grad_norm": 4.993459701538086, "learning_rate": 6.39664217187226e-05, "loss": 2.242568588256836, "memory(GiB)": 72.85, "step": 47855, "token_acc": 0.5, "train_speed(iter/s)": 0.671927 }, { "epoch": 2.050469131571055, "grad_norm": 5.212102890014648, "learning_rate": 6.395995969364202e-05, "loss": 2.340570640563965, "memory(GiB)": 72.85, "step": 47860, "token_acc": 0.5031645569620253, "train_speed(iter/s)": 0.671923 }, { "epoch": 2.0506833469003043, "grad_norm": 5.652220249176025, "learning_rate": 6.395349741566369e-05, "loss": 2.3104333877563477, "memory(GiB)": 72.85, "step": 47865, "token_acc": 0.516728624535316, "train_speed(iter/s)": 0.671929 }, { "epoch": 2.050897562229553, "grad_norm": 4.885467529296875, "learning_rate": 6.394703488490466e-05, "loss": 2.1251874923706056, "memory(GiB)": 72.85, "step": 47870, "token_acc": 0.567398119122257, "train_speed(iter/s)": 0.671896 }, { "epoch": 2.051111777558802, "grad_norm": 4.812568664550781, "learning_rate": 6.394057210148201e-05, "loss": 2.467799758911133, "memory(GiB)": 72.85, "step": 47875, "token_acc": 0.46464646464646464, "train_speed(iter/s)": 0.671888 }, { "epoch": 2.051325992888051, "grad_norm": 5.266317844390869, "learning_rate": 6.393410906551282e-05, "loss": 2.369532585144043, "memory(GiB)": 72.85, "step": 47880, "token_acc": 0.4885057471264368, "train_speed(iter/s)": 0.67189 }, { "epoch": 2.0515402082173, "grad_norm": 4.74164342880249, "learning_rate": 6.392764577711417e-05, "loss": 2.2899681091308595, "memory(GiB)": 72.85, "step": 47885, "token_acc": 0.4939759036144578, "train_speed(iter/s)": 0.671901 }, { "epoch": 2.051754423546549, "grad_norm": 4.280007362365723, "learning_rate": 6.392118223640313e-05, "loss": 1.9873344421386718, "memory(GiB)": 72.85, "step": 47890, "token_acc": 0.5311475409836065, "train_speed(iter/s)": 0.671905 }, { "epoch": 2.051968638875798, "grad_norm": 7.392014026641846, "learning_rate": 6.391471844349684e-05, "loss": 2.7500452041625976, "memory(GiB)": 72.85, "step": 47895, "token_acc": 0.4570446735395189, "train_speed(iter/s)": 0.671896 }, { "epoch": 2.052182854205047, "grad_norm": 4.576486587524414, "learning_rate": 6.390825439851237e-05, "loss": 2.199676513671875, "memory(GiB)": 72.85, "step": 47900, "token_acc": 0.4937106918238994, "train_speed(iter/s)": 0.671895 }, { "epoch": 2.0523970695342957, "grad_norm": 4.6883063316345215, "learning_rate": 6.390179010156682e-05, "loss": 2.206796073913574, "memory(GiB)": 72.85, "step": 47905, "token_acc": 0.5255972696245734, "train_speed(iter/s)": 0.671891 }, { "epoch": 2.052611284863545, "grad_norm": 6.014286041259766, "learning_rate": 6.389532555277732e-05, "loss": 2.635938835144043, "memory(GiB)": 72.85, "step": 47910, "token_acc": 0.47244094488188976, "train_speed(iter/s)": 0.671892 }, { "epoch": 2.0528255001927938, "grad_norm": 5.268110752105713, "learning_rate": 6.388886075226094e-05, "loss": 2.2884654998779297, "memory(GiB)": 72.85, "step": 47915, "token_acc": 0.5265306122448979, "train_speed(iter/s)": 0.671898 }, { "epoch": 2.0530397155220426, "grad_norm": 5.817694664001465, "learning_rate": 6.388239570013483e-05, "loss": 1.9387414932250977, "memory(GiB)": 72.85, "step": 47920, "token_acc": 0.5398550724637681, "train_speed(iter/s)": 0.671909 }, { "epoch": 2.053253930851292, "grad_norm": 3.7527480125427246, "learning_rate": 6.387593039651609e-05, "loss": 2.3115190505981444, "memory(GiB)": 72.85, "step": 47925, "token_acc": 0.5117647058823529, "train_speed(iter/s)": 0.671908 }, { "epoch": 2.0534681461805406, "grad_norm": 5.330589294433594, "learning_rate": 6.386946484152185e-05, "loss": 2.262508010864258, "memory(GiB)": 72.85, "step": 47930, "token_acc": 0.5206896551724138, "train_speed(iter/s)": 0.671923 }, { "epoch": 2.0536823615097894, "grad_norm": 3.9816174507141113, "learning_rate": 6.386299903526926e-05, "loss": 2.3142412185668944, "memory(GiB)": 72.85, "step": 47935, "token_acc": 0.49624060150375937, "train_speed(iter/s)": 0.671935 }, { "epoch": 2.0538965768390387, "grad_norm": 4.580104827880859, "learning_rate": 6.385653297787544e-05, "loss": 2.1383228302001953, "memory(GiB)": 72.85, "step": 47940, "token_acc": 0.5389221556886228, "train_speed(iter/s)": 0.671926 }, { "epoch": 2.0541107921682875, "grad_norm": 4.394717216491699, "learning_rate": 6.38500666694575e-05, "loss": 1.897751235961914, "memory(GiB)": 72.85, "step": 47945, "token_acc": 0.6043478260869565, "train_speed(iter/s)": 0.671926 }, { "epoch": 2.0543250074975363, "grad_norm": 5.070891380310059, "learning_rate": 6.384360011013264e-05, "loss": 1.9902994155883789, "memory(GiB)": 72.85, "step": 47950, "token_acc": 0.5324675324675324, "train_speed(iter/s)": 0.671937 }, { "epoch": 2.0545392228267856, "grad_norm": 4.983882427215576, "learning_rate": 6.383713330001796e-05, "loss": 2.1142162322998046, "memory(GiB)": 72.85, "step": 47955, "token_acc": 0.5714285714285714, "train_speed(iter/s)": 0.671937 }, { "epoch": 2.0547534381560344, "grad_norm": 4.558765411376953, "learning_rate": 6.383066623923064e-05, "loss": 1.9567007064819335, "memory(GiB)": 72.85, "step": 47960, "token_acc": 0.5434782608695652, "train_speed(iter/s)": 0.671929 }, { "epoch": 2.054967653485283, "grad_norm": 3.6109249591827393, "learning_rate": 6.382419892788781e-05, "loss": 2.2906227111816406, "memory(GiB)": 72.85, "step": 47965, "token_acc": 0.5340050377833753, "train_speed(iter/s)": 0.671933 }, { "epoch": 2.0551818688145325, "grad_norm": 6.121634006500244, "learning_rate": 6.381773136610668e-05, "loss": 2.2672183990478514, "memory(GiB)": 72.85, "step": 47970, "token_acc": 0.5088967971530249, "train_speed(iter/s)": 0.671957 }, { "epoch": 2.0553960841437813, "grad_norm": 3.6001148223876953, "learning_rate": 6.381126355400435e-05, "loss": 2.402147674560547, "memory(GiB)": 72.85, "step": 47975, "token_acc": 0.4759036144578313, "train_speed(iter/s)": 0.671971 }, { "epoch": 2.05561029947303, "grad_norm": 8.01008415222168, "learning_rate": 6.380479549169804e-05, "loss": 2.2415924072265625, "memory(GiB)": 72.85, "step": 47980, "token_acc": 0.5319148936170213, "train_speed(iter/s)": 0.671976 }, { "epoch": 2.0558245148022793, "grad_norm": 5.858731269836426, "learning_rate": 6.37983271793049e-05, "loss": 2.265080451965332, "memory(GiB)": 72.85, "step": 47985, "token_acc": 0.5547445255474452, "train_speed(iter/s)": 0.671992 }, { "epoch": 2.056038730131528, "grad_norm": 4.246639251708984, "learning_rate": 6.379185861694214e-05, "loss": 2.141673469543457, "memory(GiB)": 72.85, "step": 47990, "token_acc": 0.5509554140127388, "train_speed(iter/s)": 0.671993 }, { "epoch": 2.056252945460777, "grad_norm": 5.511474132537842, "learning_rate": 6.378538980472691e-05, "loss": 2.4094526290893556, "memory(GiB)": 72.85, "step": 47995, "token_acc": 0.4651898734177215, "train_speed(iter/s)": 0.671973 }, { "epoch": 2.0564671607900262, "grad_norm": 5.231044292449951, "learning_rate": 6.37789207427764e-05, "loss": 2.394388961791992, "memory(GiB)": 72.85, "step": 48000, "token_acc": 0.4943181818181818, "train_speed(iter/s)": 0.671979 }, { "epoch": 2.0564671607900262, "eval_loss": 2.324321746826172, "eval_runtime": 15.7556, "eval_samples_per_second": 6.347, "eval_steps_per_second": 6.347, "eval_token_acc": 0.46226415094339623, "step": 48000 }, { "epoch": 2.056681376119275, "grad_norm": 3.601832628250122, "learning_rate": 6.377245143120783e-05, "loss": 2.383550262451172, "memory(GiB)": 72.85, "step": 48005, "token_acc": 0.46815834767641995, "train_speed(iter/s)": 0.671824 }, { "epoch": 2.056895591448524, "grad_norm": 6.401268005371094, "learning_rate": 6.376598187013839e-05, "loss": 1.886191177368164, "memory(GiB)": 72.85, "step": 48010, "token_acc": 0.5318181818181819, "train_speed(iter/s)": 0.671809 }, { "epoch": 2.057109806777773, "grad_norm": 4.486291885375977, "learning_rate": 6.375951205968526e-05, "loss": 1.8203128814697265, "memory(GiB)": 72.85, "step": 48015, "token_acc": 0.62109375, "train_speed(iter/s)": 0.671801 }, { "epoch": 2.057324022107022, "grad_norm": 4.323875427246094, "learning_rate": 6.375304199996566e-05, "loss": 2.361566162109375, "memory(GiB)": 72.85, "step": 48020, "token_acc": 0.45224719101123595, "train_speed(iter/s)": 0.671822 }, { "epoch": 2.0575382374362707, "grad_norm": 5.562254905700684, "learning_rate": 6.374657169109683e-05, "loss": 2.2573328018188477, "memory(GiB)": 72.85, "step": 48025, "token_acc": 0.5034722222222222, "train_speed(iter/s)": 0.671837 }, { "epoch": 2.05775245276552, "grad_norm": 5.495441436767578, "learning_rate": 6.374010113319593e-05, "loss": 2.1212207794189455, "memory(GiB)": 72.85, "step": 48030, "token_acc": 0.5467128027681661, "train_speed(iter/s)": 0.671849 }, { "epoch": 2.057966668094769, "grad_norm": 4.747060775756836, "learning_rate": 6.373363032638022e-05, "loss": 2.2105804443359376, "memory(GiB)": 72.85, "step": 48035, "token_acc": 0.538961038961039, "train_speed(iter/s)": 0.671862 }, { "epoch": 2.0581808834240176, "grad_norm": 6.853487014770508, "learning_rate": 6.372715927076691e-05, "loss": 2.2269994735717775, "memory(GiB)": 72.85, "step": 48040, "token_acc": 0.5121107266435986, "train_speed(iter/s)": 0.671832 }, { "epoch": 2.058395098753267, "grad_norm": 5.044247150421143, "learning_rate": 6.372068796647322e-05, "loss": 2.1180212020874025, "memory(GiB)": 72.85, "step": 48045, "token_acc": 0.5434782608695652, "train_speed(iter/s)": 0.671827 }, { "epoch": 2.0586093140825157, "grad_norm": 4.829572677612305, "learning_rate": 6.371421641361642e-05, "loss": 2.231287956237793, "memory(GiB)": 72.85, "step": 48050, "token_acc": 0.5189873417721519, "train_speed(iter/s)": 0.671815 }, { "epoch": 2.0588235294117645, "grad_norm": 4.3252854347229, "learning_rate": 6.370774461231372e-05, "loss": 2.2498443603515623, "memory(GiB)": 72.85, "step": 48055, "token_acc": 0.519298245614035, "train_speed(iter/s)": 0.671822 }, { "epoch": 2.0590377447410138, "grad_norm": 4.833038330078125, "learning_rate": 6.370127256268236e-05, "loss": 1.9938013076782226, "memory(GiB)": 72.85, "step": 48060, "token_acc": 0.5654008438818565, "train_speed(iter/s)": 0.671836 }, { "epoch": 2.0592519600702626, "grad_norm": 4.400327205657959, "learning_rate": 6.36948002648396e-05, "loss": 2.3148921966552733, "memory(GiB)": 72.85, "step": 48065, "token_acc": 0.5149501661129569, "train_speed(iter/s)": 0.671843 }, { "epoch": 2.0594661753995114, "grad_norm": 4.16092586517334, "learning_rate": 6.368832771890268e-05, "loss": 2.118830108642578, "memory(GiB)": 72.85, "step": 48070, "token_acc": 0.5132275132275133, "train_speed(iter/s)": 0.67185 }, { "epoch": 2.0596803907287606, "grad_norm": 4.795346736907959, "learning_rate": 6.368185492498886e-05, "loss": 2.5279766082763673, "memory(GiB)": 72.85, "step": 48075, "token_acc": 0.4882154882154882, "train_speed(iter/s)": 0.671848 }, { "epoch": 2.0598946060580094, "grad_norm": 6.609912395477295, "learning_rate": 6.367538188321541e-05, "loss": 2.3379398345947267, "memory(GiB)": 72.85, "step": 48080, "token_acc": 0.5077881619937694, "train_speed(iter/s)": 0.671862 }, { "epoch": 2.0601088213872583, "grad_norm": 6.451868057250977, "learning_rate": 6.36689085936996e-05, "loss": 2.330828094482422, "memory(GiB)": 72.85, "step": 48085, "token_acc": 0.4897119341563786, "train_speed(iter/s)": 0.671868 }, { "epoch": 2.0603230367165075, "grad_norm": 3.750608205795288, "learning_rate": 6.366243505655866e-05, "loss": 2.1081398010253904, "memory(GiB)": 72.85, "step": 48090, "token_acc": 0.5322033898305085, "train_speed(iter/s)": 0.671886 }, { "epoch": 2.0605372520457563, "grad_norm": 5.129945278167725, "learning_rate": 6.365596127190992e-05, "loss": 2.0704092025756835, "memory(GiB)": 72.85, "step": 48095, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.671885 }, { "epoch": 2.060751467375005, "grad_norm": 5.251479148864746, "learning_rate": 6.364948723987062e-05, "loss": 2.093579864501953, "memory(GiB)": 72.85, "step": 48100, "token_acc": 0.5471698113207547, "train_speed(iter/s)": 0.671894 }, { "epoch": 2.0609656827042544, "grad_norm": 4.027524471282959, "learning_rate": 6.364301296055806e-05, "loss": 2.2729068756103517, "memory(GiB)": 72.85, "step": 48105, "token_acc": 0.5060240963855421, "train_speed(iter/s)": 0.671906 }, { "epoch": 2.061179898033503, "grad_norm": 3.8347666263580322, "learning_rate": 6.36365384340895e-05, "loss": 2.5513179779052733, "memory(GiB)": 72.85, "step": 48110, "token_acc": 0.47147147147147145, "train_speed(iter/s)": 0.671895 }, { "epoch": 2.061394113362752, "grad_norm": 5.351351737976074, "learning_rate": 6.363006366058228e-05, "loss": 2.3830162048339845, "memory(GiB)": 72.85, "step": 48115, "token_acc": 0.49333333333333335, "train_speed(iter/s)": 0.67191 }, { "epoch": 2.0616083286920013, "grad_norm": 5.998321533203125, "learning_rate": 6.362358864015365e-05, "loss": 2.1459491729736326, "memory(GiB)": 72.85, "step": 48120, "token_acc": 0.524, "train_speed(iter/s)": 0.67192 }, { "epoch": 2.06182254402125, "grad_norm": 4.421061038970947, "learning_rate": 6.361711337292095e-05, "loss": 2.1401079177856444, "memory(GiB)": 72.85, "step": 48125, "token_acc": 0.5192307692307693, "train_speed(iter/s)": 0.671929 }, { "epoch": 2.062036759350499, "grad_norm": 5.2704315185546875, "learning_rate": 6.361063785900145e-05, "loss": 2.022308349609375, "memory(GiB)": 72.85, "step": 48130, "token_acc": 0.5597014925373134, "train_speed(iter/s)": 0.671941 }, { "epoch": 2.062250974679748, "grad_norm": 4.497775554656982, "learning_rate": 6.360416209851249e-05, "loss": 1.9424766540527343, "memory(GiB)": 72.85, "step": 48135, "token_acc": 0.5676691729323309, "train_speed(iter/s)": 0.671941 }, { "epoch": 2.062465190008997, "grad_norm": 4.360332489013672, "learning_rate": 6.359768609157138e-05, "loss": 1.9375570297241211, "memory(GiB)": 72.85, "step": 48140, "token_acc": 0.5618374558303887, "train_speed(iter/s)": 0.67196 }, { "epoch": 2.062679405338246, "grad_norm": 4.678948879241943, "learning_rate": 6.359120983829542e-05, "loss": 2.512226104736328, "memory(GiB)": 72.85, "step": 48145, "token_acc": 0.4721311475409836, "train_speed(iter/s)": 0.671959 }, { "epoch": 2.062893620667495, "grad_norm": 6.728996753692627, "learning_rate": 6.358473333880194e-05, "loss": 2.607589530944824, "memory(GiB)": 72.85, "step": 48150, "token_acc": 0.4434250764525994, "train_speed(iter/s)": 0.671949 }, { "epoch": 2.063107835996744, "grad_norm": 6.205591201782227, "learning_rate": 6.357825659320829e-05, "loss": 2.016309928894043, "memory(GiB)": 72.85, "step": 48155, "token_acc": 0.49122807017543857, "train_speed(iter/s)": 0.671953 }, { "epoch": 2.0633220513259927, "grad_norm": 3.6436421871185303, "learning_rate": 6.357177960163175e-05, "loss": 2.2369997024536135, "memory(GiB)": 72.85, "step": 48160, "token_acc": 0.5265151515151515, "train_speed(iter/s)": 0.671948 }, { "epoch": 2.063536266655242, "grad_norm": 4.532093048095703, "learning_rate": 6.356530236418972e-05, "loss": 2.184749221801758, "memory(GiB)": 72.85, "step": 48165, "token_acc": 0.5167785234899329, "train_speed(iter/s)": 0.671951 }, { "epoch": 2.0637504819844907, "grad_norm": 5.69591760635376, "learning_rate": 6.355882488099951e-05, "loss": 2.2682113647460938, "memory(GiB)": 72.85, "step": 48170, "token_acc": 0.4728434504792332, "train_speed(iter/s)": 0.671946 }, { "epoch": 2.0639646973137395, "grad_norm": 4.960935592651367, "learning_rate": 6.355234715217845e-05, "loss": 2.353993606567383, "memory(GiB)": 72.85, "step": 48175, "token_acc": 0.48089171974522293, "train_speed(iter/s)": 0.671932 }, { "epoch": 2.064178912642989, "grad_norm": 4.63568115234375, "learning_rate": 6.354586917784395e-05, "loss": 2.328599739074707, "memory(GiB)": 72.85, "step": 48180, "token_acc": 0.49303621169916434, "train_speed(iter/s)": 0.671929 }, { "epoch": 2.0643931279722376, "grad_norm": 5.1564226150512695, "learning_rate": 6.353939095811327e-05, "loss": 2.1604175567626953, "memory(GiB)": 72.85, "step": 48185, "token_acc": 0.5323076923076923, "train_speed(iter/s)": 0.671927 }, { "epoch": 2.0646073433014864, "grad_norm": 4.4956135749816895, "learning_rate": 6.353291249310385e-05, "loss": 2.192675018310547, "memory(GiB)": 72.85, "step": 48190, "token_acc": 0.5416666666666666, "train_speed(iter/s)": 0.671909 }, { "epoch": 2.0648215586307357, "grad_norm": 5.558483123779297, "learning_rate": 6.352643378293302e-05, "loss": 2.287187194824219, "memory(GiB)": 72.85, "step": 48195, "token_acc": 0.4793650793650794, "train_speed(iter/s)": 0.67189 }, { "epoch": 2.0650357739599845, "grad_norm": 4.1721720695495605, "learning_rate": 6.351995482771817e-05, "loss": 2.378596878051758, "memory(GiB)": 72.85, "step": 48200, "token_acc": 0.4858156028368794, "train_speed(iter/s)": 0.671893 }, { "epoch": 2.0652499892892333, "grad_norm": 4.561003684997559, "learning_rate": 6.351347562757663e-05, "loss": 2.454647254943848, "memory(GiB)": 72.85, "step": 48205, "token_acc": 0.5016949152542373, "train_speed(iter/s)": 0.671871 }, { "epoch": 2.0654642046184826, "grad_norm": 5.286055564880371, "learning_rate": 6.350699618262581e-05, "loss": 2.211211395263672, "memory(GiB)": 72.85, "step": 48210, "token_acc": 0.5201465201465202, "train_speed(iter/s)": 0.671887 }, { "epoch": 2.0656784199477314, "grad_norm": 5.144593238830566, "learning_rate": 6.350051649298309e-05, "loss": 2.2010734558105467, "memory(GiB)": 72.85, "step": 48215, "token_acc": 0.5, "train_speed(iter/s)": 0.67187 }, { "epoch": 2.06589263527698, "grad_norm": 4.563419342041016, "learning_rate": 6.349403655876583e-05, "loss": 2.401737594604492, "memory(GiB)": 72.85, "step": 48220, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.671877 }, { "epoch": 2.0661068506062295, "grad_norm": 5.300559997558594, "learning_rate": 6.348755638009146e-05, "loss": 1.7645536422729493, "memory(GiB)": 72.85, "step": 48225, "token_acc": 0.5810276679841897, "train_speed(iter/s)": 0.671886 }, { "epoch": 2.0663210659354783, "grad_norm": 5.738534450531006, "learning_rate": 6.348107595707735e-05, "loss": 2.452304458618164, "memory(GiB)": 72.85, "step": 48230, "token_acc": 0.4961832061068702, "train_speed(iter/s)": 0.671883 }, { "epoch": 2.066535281264727, "grad_norm": 5.691180229187012, "learning_rate": 6.34745952898409e-05, "loss": 1.994637680053711, "memory(GiB)": 72.85, "step": 48235, "token_acc": 0.5585284280936454, "train_speed(iter/s)": 0.671875 }, { "epoch": 2.0667494965939763, "grad_norm": 7.056878566741943, "learning_rate": 6.346811437849952e-05, "loss": 2.204849052429199, "memory(GiB)": 72.85, "step": 48240, "token_acc": 0.5042016806722689, "train_speed(iter/s)": 0.671872 }, { "epoch": 2.066963711923225, "grad_norm": 4.390390396118164, "learning_rate": 6.34616332231706e-05, "loss": 2.037007141113281, "memory(GiB)": 72.85, "step": 48245, "token_acc": 0.5364963503649635, "train_speed(iter/s)": 0.671876 }, { "epoch": 2.0671779272524744, "grad_norm": 3.6392767429351807, "learning_rate": 6.345515182397159e-05, "loss": 2.296806526184082, "memory(GiB)": 72.85, "step": 48250, "token_acc": 0.48398576512455516, "train_speed(iter/s)": 0.67188 }, { "epoch": 2.067392142581723, "grad_norm": 4.555827617645264, "learning_rate": 6.344867018101987e-05, "loss": 2.1686351776123045, "memory(GiB)": 72.85, "step": 48255, "token_acc": 0.5198675496688742, "train_speed(iter/s)": 0.671874 }, { "epoch": 2.067606357910972, "grad_norm": 4.5892014503479, "learning_rate": 6.344218829443287e-05, "loss": 2.3780359268188476, "memory(GiB)": 72.85, "step": 48260, "token_acc": 0.46808510638297873, "train_speed(iter/s)": 0.67187 }, { "epoch": 2.0678205732402213, "grad_norm": 6.3930792808532715, "learning_rate": 6.343570616432801e-05, "loss": 2.341208648681641, "memory(GiB)": 72.85, "step": 48265, "token_acc": 0.5362776025236593, "train_speed(iter/s)": 0.671882 }, { "epoch": 2.06803478856947, "grad_norm": 4.1083903312683105, "learning_rate": 6.342922379082275e-05, "loss": 2.2399570465087892, "memory(GiB)": 72.85, "step": 48270, "token_acc": 0.5229357798165137, "train_speed(iter/s)": 0.671888 }, { "epoch": 2.068249003898719, "grad_norm": 5.461843013763428, "learning_rate": 6.342274117403449e-05, "loss": 2.5852399826049806, "memory(GiB)": 72.85, "step": 48275, "token_acc": 0.4564459930313589, "train_speed(iter/s)": 0.671891 }, { "epoch": 2.068463219227968, "grad_norm": 4.037466526031494, "learning_rate": 6.34162583140807e-05, "loss": 2.6056108474731445, "memory(GiB)": 72.85, "step": 48280, "token_acc": 0.4916387959866221, "train_speed(iter/s)": 0.671886 }, { "epoch": 2.068677434557217, "grad_norm": 4.59629487991333, "learning_rate": 6.340977521107879e-05, "loss": 2.1750024795532226, "memory(GiB)": 72.85, "step": 48285, "token_acc": 0.47041420118343197, "train_speed(iter/s)": 0.671892 }, { "epoch": 2.068891649886466, "grad_norm": 4.404232978820801, "learning_rate": 6.340329186514622e-05, "loss": 2.4784496307373045, "memory(GiB)": 72.85, "step": 48290, "token_acc": 0.47601476014760147, "train_speed(iter/s)": 0.671879 }, { "epoch": 2.069105865215715, "grad_norm": 3.876957416534424, "learning_rate": 6.339680827640044e-05, "loss": 2.5548728942871093, "memory(GiB)": 72.85, "step": 48295, "token_acc": 0.4678362573099415, "train_speed(iter/s)": 0.671885 }, { "epoch": 2.069320080544964, "grad_norm": 4.908746719360352, "learning_rate": 6.339032444495894e-05, "loss": 2.3935825347900392, "memory(GiB)": 72.85, "step": 48300, "token_acc": 0.5068493150684932, "train_speed(iter/s)": 0.671893 }, { "epoch": 2.0695342958742127, "grad_norm": 4.169879913330078, "learning_rate": 6.338384037093912e-05, "loss": 2.133279228210449, "memory(GiB)": 72.85, "step": 48305, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.671887 }, { "epoch": 2.069748511203462, "grad_norm": 4.2548627853393555, "learning_rate": 6.337735605445849e-05, "loss": 2.069640350341797, "memory(GiB)": 72.85, "step": 48310, "token_acc": 0.5645756457564576, "train_speed(iter/s)": 0.671892 }, { "epoch": 2.0699627265327107, "grad_norm": 5.46332311630249, "learning_rate": 6.337087149563453e-05, "loss": 2.1797840118408205, "memory(GiB)": 72.85, "step": 48315, "token_acc": 0.5381165919282511, "train_speed(iter/s)": 0.671897 }, { "epoch": 2.0701769418619596, "grad_norm": 5.044042110443115, "learning_rate": 6.336438669458465e-05, "loss": 2.3321460723876952, "memory(GiB)": 72.85, "step": 48320, "token_acc": 0.46785714285714286, "train_speed(iter/s)": 0.671909 }, { "epoch": 2.070391157191209, "grad_norm": 4.251594543457031, "learning_rate": 6.335790165142638e-05, "loss": 2.400600242614746, "memory(GiB)": 72.85, "step": 48325, "token_acc": 0.4782608695652174, "train_speed(iter/s)": 0.671918 }, { "epoch": 2.0706053725204576, "grad_norm": 5.403046607971191, "learning_rate": 6.33514163662772e-05, "loss": 2.236830139160156, "memory(GiB)": 72.85, "step": 48330, "token_acc": 0.49830508474576274, "train_speed(iter/s)": 0.67191 }, { "epoch": 2.0708195878497064, "grad_norm": 5.829616069793701, "learning_rate": 6.334493083925456e-05, "loss": 2.243999481201172, "memory(GiB)": 72.85, "step": 48335, "token_acc": 0.4830188679245283, "train_speed(iter/s)": 0.671916 }, { "epoch": 2.0710338031789557, "grad_norm": 4.916101932525635, "learning_rate": 6.333844507047602e-05, "loss": 2.136665916442871, "memory(GiB)": 72.85, "step": 48340, "token_acc": 0.5364806866952789, "train_speed(iter/s)": 0.671931 }, { "epoch": 2.0712480185082045, "grad_norm": 4.960885047912598, "learning_rate": 6.333195906005902e-05, "loss": 2.2248517990112306, "memory(GiB)": 72.85, "step": 48345, "token_acc": 0.515625, "train_speed(iter/s)": 0.671923 }, { "epoch": 2.0714622338374533, "grad_norm": 7.286027431488037, "learning_rate": 6.332547280812106e-05, "loss": 2.114392852783203, "memory(GiB)": 72.85, "step": 48350, "token_acc": 0.5, "train_speed(iter/s)": 0.671931 }, { "epoch": 2.0716764491667026, "grad_norm": 4.5873188972473145, "learning_rate": 6.331898631477968e-05, "loss": 1.8849010467529297, "memory(GiB)": 72.85, "step": 48355, "token_acc": 0.5451127819548872, "train_speed(iter/s)": 0.671928 }, { "epoch": 2.0718906644959514, "grad_norm": 5.11079216003418, "learning_rate": 6.331249958015235e-05, "loss": 2.3272056579589844, "memory(GiB)": 72.85, "step": 48360, "token_acc": 0.4750830564784053, "train_speed(iter/s)": 0.67192 }, { "epoch": 2.0721048798252, "grad_norm": 4.203359603881836, "learning_rate": 6.330601260435663e-05, "loss": 2.308877944946289, "memory(GiB)": 72.85, "step": 48365, "token_acc": 0.49851632047477745, "train_speed(iter/s)": 0.671918 }, { "epoch": 2.0723190951544495, "grad_norm": 4.411031246185303, "learning_rate": 6.329952538750998e-05, "loss": 2.1491308212280273, "memory(GiB)": 72.85, "step": 48370, "token_acc": 0.5197568389057751, "train_speed(iter/s)": 0.671911 }, { "epoch": 2.0725333104836983, "grad_norm": 4.048519611358643, "learning_rate": 6.329303792972996e-05, "loss": 2.0844112396240235, "memory(GiB)": 72.85, "step": 48375, "token_acc": 0.5252808988764045, "train_speed(iter/s)": 0.671919 }, { "epoch": 2.072747525812947, "grad_norm": 4.197086811065674, "learning_rate": 6.328655023113408e-05, "loss": 2.110171890258789, "memory(GiB)": 72.85, "step": 48380, "token_acc": 0.5284090909090909, "train_speed(iter/s)": 0.671918 }, { "epoch": 2.0729617411421963, "grad_norm": 3.733670234680176, "learning_rate": 6.32800622918399e-05, "loss": 2.3078079223632812, "memory(GiB)": 72.85, "step": 48385, "token_acc": 0.5508196721311476, "train_speed(iter/s)": 0.67193 }, { "epoch": 2.073175956471445, "grad_norm": 4.732198715209961, "learning_rate": 6.327357411196492e-05, "loss": 2.2714609146118163, "memory(GiB)": 72.85, "step": 48390, "token_acc": 0.5460750853242321, "train_speed(iter/s)": 0.671927 }, { "epoch": 2.073390171800694, "grad_norm": 5.39647912979126, "learning_rate": 6.32670856916267e-05, "loss": 2.4148773193359374, "memory(GiB)": 72.85, "step": 48395, "token_acc": 0.5116279069767442, "train_speed(iter/s)": 0.671925 }, { "epoch": 2.073604387129943, "grad_norm": 6.919320583343506, "learning_rate": 6.326059703094278e-05, "loss": 2.37890625, "memory(GiB)": 72.85, "step": 48400, "token_acc": 0.5037878787878788, "train_speed(iter/s)": 0.67194 }, { "epoch": 2.073818602459192, "grad_norm": 4.595363616943359, "learning_rate": 6.32541081300307e-05, "loss": 2.0882522583007814, "memory(GiB)": 72.85, "step": 48405, "token_acc": 0.5537974683544303, "train_speed(iter/s)": 0.67194 }, { "epoch": 2.074032817788441, "grad_norm": 6.856731414794922, "learning_rate": 6.324761898900801e-05, "loss": 2.07806282043457, "memory(GiB)": 72.85, "step": 48410, "token_acc": 0.5466666666666666, "train_speed(iter/s)": 0.671936 }, { "epoch": 2.07424703311769, "grad_norm": 4.031952857971191, "learning_rate": 6.32411296079923e-05, "loss": 2.044130325317383, "memory(GiB)": 72.85, "step": 48415, "token_acc": 0.5284810126582279, "train_speed(iter/s)": 0.671943 }, { "epoch": 2.074461248446939, "grad_norm": 6.996781349182129, "learning_rate": 6.323463998710106e-05, "loss": 2.2778961181640627, "memory(GiB)": 72.85, "step": 48420, "token_acc": 0.48546511627906974, "train_speed(iter/s)": 0.671945 }, { "epoch": 2.0746754637761877, "grad_norm": 4.778059959411621, "learning_rate": 6.322815012645193e-05, "loss": 2.4247373580932616, "memory(GiB)": 72.85, "step": 48425, "token_acc": 0.5146579804560261, "train_speed(iter/s)": 0.671946 }, { "epoch": 2.074889679105437, "grad_norm": 4.091307163238525, "learning_rate": 6.322166002616246e-05, "loss": 2.034140205383301, "memory(GiB)": 72.85, "step": 48430, "token_acc": 0.474025974025974, "train_speed(iter/s)": 0.67194 }, { "epoch": 2.075103894434686, "grad_norm": 4.7769856452941895, "learning_rate": 6.32151696863502e-05, "loss": 1.962179946899414, "memory(GiB)": 72.85, "step": 48435, "token_acc": 0.5401929260450161, "train_speed(iter/s)": 0.671966 }, { "epoch": 2.0753181097639346, "grad_norm": 4.206348896026611, "learning_rate": 6.320867910713276e-05, "loss": 2.574367713928223, "memory(GiB)": 72.85, "step": 48440, "token_acc": 0.47770700636942676, "train_speed(iter/s)": 0.671975 }, { "epoch": 2.075532325093184, "grad_norm": 4.119123458862305, "learning_rate": 6.32021882886277e-05, "loss": 2.1780765533447264, "memory(GiB)": 72.85, "step": 48445, "token_acc": 0.5522875816993464, "train_speed(iter/s)": 0.67199 }, { "epoch": 2.0757465404224327, "grad_norm": 4.032463550567627, "learning_rate": 6.31956972309526e-05, "loss": 2.2872114181518555, "memory(GiB)": 72.85, "step": 48450, "token_acc": 0.49266862170087977, "train_speed(iter/s)": 0.672015 }, { "epoch": 2.0759607557516815, "grad_norm": 4.5968756675720215, "learning_rate": 6.31892059342251e-05, "loss": 1.9905189514160155, "memory(GiB)": 72.85, "step": 48455, "token_acc": 0.5268456375838926, "train_speed(iter/s)": 0.672026 }, { "epoch": 2.0761749710809307, "grad_norm": 4.203211307525635, "learning_rate": 6.318271439856273e-05, "loss": 2.1793033599853517, "memory(GiB)": 72.85, "step": 48460, "token_acc": 0.5138888888888888, "train_speed(iter/s)": 0.672042 }, { "epoch": 2.0763891864101796, "grad_norm": 5.707036972045898, "learning_rate": 6.317622262408314e-05, "loss": 2.019341468811035, "memory(GiB)": 72.85, "step": 48465, "token_acc": 0.5461847389558233, "train_speed(iter/s)": 0.67205 }, { "epoch": 2.0766034017394284, "grad_norm": 5.359068393707275, "learning_rate": 6.316973061090391e-05, "loss": 2.2475685119628905, "memory(GiB)": 72.85, "step": 48470, "token_acc": 0.5231316725978647, "train_speed(iter/s)": 0.672048 }, { "epoch": 2.0768176170686776, "grad_norm": 4.884721279144287, "learning_rate": 6.316323835914265e-05, "loss": 2.352537155151367, "memory(GiB)": 72.85, "step": 48475, "token_acc": 0.479020979020979, "train_speed(iter/s)": 0.672039 }, { "epoch": 2.0770318323979264, "grad_norm": 4.603456020355225, "learning_rate": 6.315674586891698e-05, "loss": 2.168111038208008, "memory(GiB)": 72.85, "step": 48480, "token_acc": 0.5067114093959731, "train_speed(iter/s)": 0.672052 }, { "epoch": 2.0772460477271752, "grad_norm": 4.065252780914307, "learning_rate": 6.315025314034453e-05, "loss": 2.042251968383789, "memory(GiB)": 72.85, "step": 48485, "token_acc": 0.5224913494809689, "train_speed(iter/s)": 0.672073 }, { "epoch": 2.0774602630564245, "grad_norm": 5.213207244873047, "learning_rate": 6.31437601735429e-05, "loss": 2.329994773864746, "memory(GiB)": 72.85, "step": 48490, "token_acc": 0.513595166163142, "train_speed(iter/s)": 0.672075 }, { "epoch": 2.0776744783856733, "grad_norm": 5.900386333465576, "learning_rate": 6.313726696862971e-05, "loss": 2.0984121322631837, "memory(GiB)": 72.85, "step": 48495, "token_acc": 0.5234657039711191, "train_speed(iter/s)": 0.672085 }, { "epoch": 2.077888693714922, "grad_norm": 4.114498615264893, "learning_rate": 6.313077352572263e-05, "loss": 2.147500228881836, "memory(GiB)": 72.85, "step": 48500, "token_acc": 0.5142857142857142, "train_speed(iter/s)": 0.672103 }, { "epoch": 2.077888693714922, "eval_loss": 2.145054578781128, "eval_runtime": 15.181, "eval_samples_per_second": 6.587, "eval_steps_per_second": 6.587, "eval_token_acc": 0.49548387096774194, "step": 48500 }, { "epoch": 2.0781029090441714, "grad_norm": 5.952365398406982, "learning_rate": 6.312427984493926e-05, "loss": 2.261513900756836, "memory(GiB)": 72.85, "step": 48505, "token_acc": 0.4961759082217973, "train_speed(iter/s)": 0.671939 }, { "epoch": 2.07831712437342, "grad_norm": 3.4196066856384277, "learning_rate": 6.311778592639726e-05, "loss": 2.0084611892700197, "memory(GiB)": 72.85, "step": 48510, "token_acc": 0.5435540069686411, "train_speed(iter/s)": 0.67194 }, { "epoch": 2.078531339702669, "grad_norm": 3.5871939659118652, "learning_rate": 6.311129177021424e-05, "loss": 2.2275434494018556, "memory(GiB)": 72.85, "step": 48515, "token_acc": 0.5171339563862928, "train_speed(iter/s)": 0.671934 }, { "epoch": 2.0787455550319183, "grad_norm": 5.223836421966553, "learning_rate": 6.310479737650789e-05, "loss": 2.032484245300293, "memory(GiB)": 72.85, "step": 48520, "token_acc": 0.514792899408284, "train_speed(iter/s)": 0.671921 }, { "epoch": 2.078959770361167, "grad_norm": 8.066630363464355, "learning_rate": 6.309830274539582e-05, "loss": 2.196309471130371, "memory(GiB)": 72.85, "step": 48525, "token_acc": 0.503731343283582, "train_speed(iter/s)": 0.671907 }, { "epoch": 2.079173985690416, "grad_norm": 4.947096347808838, "learning_rate": 6.309180787699574e-05, "loss": 2.199825096130371, "memory(GiB)": 72.85, "step": 48530, "token_acc": 0.5214285714285715, "train_speed(iter/s)": 0.671907 }, { "epoch": 2.079388201019665, "grad_norm": 6.010552883148193, "learning_rate": 6.308531277142524e-05, "loss": 2.3868381500244142, "memory(GiB)": 72.85, "step": 48535, "token_acc": 0.5316901408450704, "train_speed(iter/s)": 0.671918 }, { "epoch": 2.079602416348914, "grad_norm": 3.877211093902588, "learning_rate": 6.307881742880205e-05, "loss": 2.235342025756836, "memory(GiB)": 72.85, "step": 48540, "token_acc": 0.5170068027210885, "train_speed(iter/s)": 0.671929 }, { "epoch": 2.0798166316781628, "grad_norm": 5.0740180015563965, "learning_rate": 6.307232184924383e-05, "loss": 2.103710746765137, "memory(GiB)": 72.85, "step": 48545, "token_acc": 0.5157894736842106, "train_speed(iter/s)": 0.671927 }, { "epoch": 2.080030847007412, "grad_norm": 4.0105133056640625, "learning_rate": 6.306582603286818e-05, "loss": 2.384288024902344, "memory(GiB)": 72.85, "step": 48550, "token_acc": 0.458955223880597, "train_speed(iter/s)": 0.671931 }, { "epoch": 2.080245062336661, "grad_norm": 4.86367654800415, "learning_rate": 6.305932997979289e-05, "loss": 2.2503868103027345, "memory(GiB)": 72.85, "step": 48555, "token_acc": 0.48659003831417624, "train_speed(iter/s)": 0.671928 }, { "epoch": 2.0804592776659097, "grad_norm": 4.641293525695801, "learning_rate": 6.305283369013557e-05, "loss": 2.3174095153808594, "memory(GiB)": 72.85, "step": 48560, "token_acc": 0.50625, "train_speed(iter/s)": 0.671914 }, { "epoch": 2.080673492995159, "grad_norm": 3.340409994125366, "learning_rate": 6.304633716401392e-05, "loss": 2.1992376327514647, "memory(GiB)": 72.85, "step": 48565, "token_acc": 0.5016949152542373, "train_speed(iter/s)": 0.671916 }, { "epoch": 2.0808877083244077, "grad_norm": 8.079404830932617, "learning_rate": 6.303984040154563e-05, "loss": 2.408283805847168, "memory(GiB)": 72.85, "step": 48570, "token_acc": 0.5019157088122606, "train_speed(iter/s)": 0.671914 }, { "epoch": 2.0811019236536565, "grad_norm": 4.223382949829102, "learning_rate": 6.303334340284841e-05, "loss": 2.316575622558594, "memory(GiB)": 72.85, "step": 48575, "token_acc": 0.48520710059171596, "train_speed(iter/s)": 0.671907 }, { "epoch": 2.081316138982906, "grad_norm": 4.480376243591309, "learning_rate": 6.302684616803994e-05, "loss": 2.4983259201049806, "memory(GiB)": 72.85, "step": 48580, "token_acc": 0.4935897435897436, "train_speed(iter/s)": 0.671921 }, { "epoch": 2.0815303543121546, "grad_norm": 8.6572265625, "learning_rate": 6.302034869723793e-05, "loss": 2.164540481567383, "memory(GiB)": 72.85, "step": 48585, "token_acc": 0.5083612040133779, "train_speed(iter/s)": 0.671924 }, { "epoch": 2.0817445696414034, "grad_norm": 4.377542018890381, "learning_rate": 6.301385099056008e-05, "loss": 2.3184831619262694, "memory(GiB)": 72.85, "step": 48590, "token_acc": 0.5064935064935064, "train_speed(iter/s)": 0.671921 }, { "epoch": 2.0819587849706527, "grad_norm": 6.287519931793213, "learning_rate": 6.300735304812412e-05, "loss": 2.223697280883789, "memory(GiB)": 72.85, "step": 48595, "token_acc": 0.5061728395061729, "train_speed(iter/s)": 0.671926 }, { "epoch": 2.0821730002999015, "grad_norm": 4.674376964569092, "learning_rate": 6.300085487004777e-05, "loss": 2.016396331787109, "memory(GiB)": 72.85, "step": 48600, "token_acc": 0.5361842105263158, "train_speed(iter/s)": 0.671932 }, { "epoch": 2.0823872156291503, "grad_norm": 3.768902063369751, "learning_rate": 6.299435645644874e-05, "loss": 2.2148649215698244, "memory(GiB)": 72.85, "step": 48605, "token_acc": 0.5139318885448917, "train_speed(iter/s)": 0.671932 }, { "epoch": 2.0826014309583996, "grad_norm": 4.711845397949219, "learning_rate": 6.298785780744473e-05, "loss": 2.4426315307617186, "memory(GiB)": 72.85, "step": 48610, "token_acc": 0.5114754098360655, "train_speed(iter/s)": 0.671939 }, { "epoch": 2.0828156462876484, "grad_norm": 4.495090484619141, "learning_rate": 6.298135892315351e-05, "loss": 2.166418266296387, "memory(GiB)": 72.85, "step": 48615, "token_acc": 0.5108695652173914, "train_speed(iter/s)": 0.671942 }, { "epoch": 2.083029861616897, "grad_norm": 4.623827934265137, "learning_rate": 6.297485980369277e-05, "loss": 2.085641288757324, "memory(GiB)": 72.85, "step": 48620, "token_acc": 0.5160142348754448, "train_speed(iter/s)": 0.671937 }, { "epoch": 2.0832440769461464, "grad_norm": 4.224023818969727, "learning_rate": 6.296836044918031e-05, "loss": 2.4073278427124025, "memory(GiB)": 72.85, "step": 48625, "token_acc": 0.5124555160142349, "train_speed(iter/s)": 0.671925 }, { "epoch": 2.0834582922753953, "grad_norm": 4.457852363586426, "learning_rate": 6.296186085973381e-05, "loss": 2.366895294189453, "memory(GiB)": 72.85, "step": 48630, "token_acc": 0.49019607843137253, "train_speed(iter/s)": 0.671926 }, { "epoch": 2.083672507604644, "grad_norm": 6.056446552276611, "learning_rate": 6.295536103547106e-05, "loss": 2.4773895263671877, "memory(GiB)": 72.85, "step": 48635, "token_acc": 0.4766666666666667, "train_speed(iter/s)": 0.671929 }, { "epoch": 2.0838867229338933, "grad_norm": 3.9445230960845947, "learning_rate": 6.294886097650977e-05, "loss": 2.2218093872070312, "memory(GiB)": 72.85, "step": 48640, "token_acc": 0.5304659498207885, "train_speed(iter/s)": 0.671939 }, { "epoch": 2.084100938263142, "grad_norm": 4.636092185974121, "learning_rate": 6.294236068296774e-05, "loss": 1.9333768844604493, "memory(GiB)": 72.85, "step": 48645, "token_acc": 0.5748299319727891, "train_speed(iter/s)": 0.67196 }, { "epoch": 2.084315153592391, "grad_norm": 5.921014785766602, "learning_rate": 6.293586015496268e-05, "loss": 2.4965429306030273, "memory(GiB)": 72.85, "step": 48650, "token_acc": 0.4392857142857143, "train_speed(iter/s)": 0.671956 }, { "epoch": 2.08452936892164, "grad_norm": 23.15443992614746, "learning_rate": 6.29293593926124e-05, "loss": 2.277524948120117, "memory(GiB)": 72.85, "step": 48655, "token_acc": 0.5330882352941176, "train_speed(iter/s)": 0.671954 }, { "epoch": 2.084743584250889, "grad_norm": 4.09714412689209, "learning_rate": 6.292285839603465e-05, "loss": 2.151351547241211, "memory(GiB)": 72.85, "step": 48660, "token_acc": 0.5461847389558233, "train_speed(iter/s)": 0.671918 }, { "epoch": 2.084957799580138, "grad_norm": 4.410927772521973, "learning_rate": 6.291635716534718e-05, "loss": 2.0342845916748047, "memory(GiB)": 72.85, "step": 48665, "token_acc": 0.5667870036101083, "train_speed(iter/s)": 0.671922 }, { "epoch": 2.085172014909387, "grad_norm": 5.401809215545654, "learning_rate": 6.291115601231736e-05, "loss": 2.339646339416504, "memory(GiB)": 72.85, "step": 48670, "token_acc": 0.5066225165562914, "train_speed(iter/s)": 0.671931 }, { "epoch": 2.085386230238636, "grad_norm": 4.650276184082031, "learning_rate": 6.290465436052921e-05, "loss": 2.185017776489258, "memory(GiB)": 72.85, "step": 48675, "token_acc": 0.5255972696245734, "train_speed(iter/s)": 0.671952 }, { "epoch": 2.0856004455678847, "grad_norm": 6.261753082275391, "learning_rate": 6.289815247496117e-05, "loss": 2.2210819244384767, "memory(GiB)": 72.85, "step": 48680, "token_acc": 0.5201342281879194, "train_speed(iter/s)": 0.671961 }, { "epoch": 2.085814660897134, "grad_norm": 4.983606338500977, "learning_rate": 6.289165035573098e-05, "loss": 2.30933837890625, "memory(GiB)": 72.85, "step": 48685, "token_acc": 0.5451263537906137, "train_speed(iter/s)": 0.671965 }, { "epoch": 2.0860288762263828, "grad_norm": 4.418600082397461, "learning_rate": 6.288514800295647e-05, "loss": 2.0111183166503905, "memory(GiB)": 72.85, "step": 48690, "token_acc": 0.5846153846153846, "train_speed(iter/s)": 0.671959 }, { "epoch": 2.0862430915556316, "grad_norm": 4.594109058380127, "learning_rate": 6.287864541675542e-05, "loss": 2.3664113998413088, "memory(GiB)": 72.85, "step": 48695, "token_acc": 0.5044510385756676, "train_speed(iter/s)": 0.671982 }, { "epoch": 2.086457306884881, "grad_norm": 4.436500549316406, "learning_rate": 6.287214259724559e-05, "loss": 2.027358627319336, "memory(GiB)": 72.85, "step": 48700, "token_acc": 0.5531914893617021, "train_speed(iter/s)": 0.671973 }, { "epoch": 2.0866715222141297, "grad_norm": 6.261582374572754, "learning_rate": 6.286563954454485e-05, "loss": 2.2779603958129884, "memory(GiB)": 72.85, "step": 48705, "token_acc": 0.5063291139240507, "train_speed(iter/s)": 0.671983 }, { "epoch": 2.0868857375433785, "grad_norm": 4.132805347442627, "learning_rate": 6.285913625877097e-05, "loss": 2.452281188964844, "memory(GiB)": 72.85, "step": 48710, "token_acc": 0.4911242603550296, "train_speed(iter/s)": 0.671979 }, { "epoch": 2.0870999528726277, "grad_norm": 5.026377201080322, "learning_rate": 6.285263274004179e-05, "loss": 2.2326005935668944, "memory(GiB)": 72.85, "step": 48715, "token_acc": 0.4872611464968153, "train_speed(iter/s)": 0.671979 }, { "epoch": 2.0873141682018765, "grad_norm": 4.038769245147705, "learning_rate": 6.284612898847508e-05, "loss": 2.1591636657714846, "memory(GiB)": 72.85, "step": 48720, "token_acc": 0.5143769968051118, "train_speed(iter/s)": 0.671988 }, { "epoch": 2.0875283835311254, "grad_norm": 4.735772609710693, "learning_rate": 6.283962500418872e-05, "loss": 2.1767330169677734, "memory(GiB)": 72.85, "step": 48725, "token_acc": 0.5251572327044025, "train_speed(iter/s)": 0.671992 }, { "epoch": 2.0877425988603746, "grad_norm": 4.902061462402344, "learning_rate": 6.28331207873005e-05, "loss": 2.5132946014404296, "memory(GiB)": 72.85, "step": 48730, "token_acc": 0.4878048780487805, "train_speed(iter/s)": 0.671994 }, { "epoch": 2.0879568141896234, "grad_norm": 4.7770209312438965, "learning_rate": 6.282661633792826e-05, "loss": 2.2836540222167967, "memory(GiB)": 72.85, "step": 48735, "token_acc": 0.5060606060606061, "train_speed(iter/s)": 0.67199 }, { "epoch": 2.0881710295188722, "grad_norm": 4.378352165222168, "learning_rate": 6.282011165618984e-05, "loss": 2.537491798400879, "memory(GiB)": 72.85, "step": 48740, "token_acc": 0.5064516129032258, "train_speed(iter/s)": 0.671993 }, { "epoch": 2.0883852448481215, "grad_norm": 5.055965423583984, "learning_rate": 6.281360674220305e-05, "loss": 2.088459587097168, "memory(GiB)": 72.85, "step": 48745, "token_acc": 0.5494880546075085, "train_speed(iter/s)": 0.672001 }, { "epoch": 2.0885994601773703, "grad_norm": 4.320870399475098, "learning_rate": 6.280710159608578e-05, "loss": 2.105240249633789, "memory(GiB)": 72.85, "step": 48750, "token_acc": 0.5642633228840125, "train_speed(iter/s)": 0.671978 }, { "epoch": 2.088813675506619, "grad_norm": 4.432773590087891, "learning_rate": 6.280059621795583e-05, "loss": 2.1641063690185547, "memory(GiB)": 72.85, "step": 48755, "token_acc": 0.48854961832061067, "train_speed(iter/s)": 0.671969 }, { "epoch": 2.0890278908358684, "grad_norm": 4.090360164642334, "learning_rate": 6.279409060793109e-05, "loss": 2.093874931335449, "memory(GiB)": 72.85, "step": 48760, "token_acc": 0.5236220472440944, "train_speed(iter/s)": 0.671965 }, { "epoch": 2.089242106165117, "grad_norm": 4.33043098449707, "learning_rate": 6.278758476612937e-05, "loss": 1.8977312088012694, "memory(GiB)": 72.85, "step": 48765, "token_acc": 0.5947955390334573, "train_speed(iter/s)": 0.671958 }, { "epoch": 2.089456321494366, "grad_norm": 4.16834831237793, "learning_rate": 6.278107869266859e-05, "loss": 2.2657466888427735, "memory(GiB)": 72.85, "step": 48770, "token_acc": 0.5, "train_speed(iter/s)": 0.671968 }, { "epoch": 2.0896705368236153, "grad_norm": 5.156919956207275, "learning_rate": 6.277457238766655e-05, "loss": 2.2037612915039064, "memory(GiB)": 72.85, "step": 48775, "token_acc": 0.4928571428571429, "train_speed(iter/s)": 0.671976 }, { "epoch": 2.089884752152864, "grad_norm": 3.924032211303711, "learning_rate": 6.276806585124116e-05, "loss": 2.2714912414550783, "memory(GiB)": 72.85, "step": 48780, "token_acc": 0.5161290322580645, "train_speed(iter/s)": 0.671964 }, { "epoch": 2.090098967482113, "grad_norm": 5.179303169250488, "learning_rate": 6.276155908351027e-05, "loss": 2.0158376693725586, "memory(GiB)": 72.85, "step": 48785, "token_acc": 0.5397923875432526, "train_speed(iter/s)": 0.671959 }, { "epoch": 2.090313182811362, "grad_norm": 5.2027668952941895, "learning_rate": 6.275505208459178e-05, "loss": 2.3233358383178713, "memory(GiB)": 72.85, "step": 48790, "token_acc": 0.5158227848101266, "train_speed(iter/s)": 0.671957 }, { "epoch": 2.090527398140611, "grad_norm": 4.605799198150635, "learning_rate": 6.274854485460355e-05, "loss": 2.0404470443725584, "memory(GiB)": 72.85, "step": 48795, "token_acc": 0.5290322580645161, "train_speed(iter/s)": 0.671971 }, { "epoch": 2.0907416134698598, "grad_norm": 5.6866888999938965, "learning_rate": 6.274203739366347e-05, "loss": 2.351571273803711, "memory(GiB)": 72.85, "step": 48800, "token_acc": 0.4782608695652174, "train_speed(iter/s)": 0.671967 }, { "epoch": 2.090955828799109, "grad_norm": 3.6357321739196777, "learning_rate": 6.273552970188942e-05, "loss": 2.405035972595215, "memory(GiB)": 72.85, "step": 48805, "token_acc": 0.49700598802395207, "train_speed(iter/s)": 0.671976 }, { "epoch": 2.091170044128358, "grad_norm": 6.330029010772705, "learning_rate": 6.272902177939933e-05, "loss": 2.3988052368164063, "memory(GiB)": 72.85, "step": 48810, "token_acc": 0.5136054421768708, "train_speed(iter/s)": 0.671977 }, { "epoch": 2.0913842594576066, "grad_norm": 6.744255542755127, "learning_rate": 6.272251362631107e-05, "loss": 2.2039390563964845, "memory(GiB)": 72.85, "step": 48815, "token_acc": 0.5171102661596958, "train_speed(iter/s)": 0.671988 }, { "epoch": 2.091598474786856, "grad_norm": 5.5593581199646, "learning_rate": 6.27160052427425e-05, "loss": 2.209650421142578, "memory(GiB)": 72.85, "step": 48820, "token_acc": 0.5211726384364821, "train_speed(iter/s)": 0.671987 }, { "epoch": 2.0918126901161047, "grad_norm": 3.5498886108398438, "learning_rate": 6.27094966288116e-05, "loss": 2.235030746459961, "memory(GiB)": 72.85, "step": 48825, "token_acc": 0.5131195335276968, "train_speed(iter/s)": 0.671985 }, { "epoch": 2.0920269054453535, "grad_norm": 4.500118732452393, "learning_rate": 6.270298778463624e-05, "loss": 2.3207630157470702, "memory(GiB)": 72.85, "step": 48830, "token_acc": 0.5231316725978647, "train_speed(iter/s)": 0.671968 }, { "epoch": 2.092241120774603, "grad_norm": 5.516082286834717, "learning_rate": 6.269647871033432e-05, "loss": 2.383761978149414, "memory(GiB)": 72.85, "step": 48835, "token_acc": 0.5134099616858238, "train_speed(iter/s)": 0.671973 }, { "epoch": 2.0924553361038516, "grad_norm": 5.9855852127075195, "learning_rate": 6.26899694060238e-05, "loss": 2.1952917098999025, "memory(GiB)": 72.85, "step": 48840, "token_acc": 0.5709090909090909, "train_speed(iter/s)": 0.67198 }, { "epoch": 2.0926695514331004, "grad_norm": 3.730851173400879, "learning_rate": 6.26834598718226e-05, "loss": 2.2884164810180665, "memory(GiB)": 72.85, "step": 48845, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.67197 }, { "epoch": 2.0928837667623497, "grad_norm": 4.8740925788879395, "learning_rate": 6.267695010784859e-05, "loss": 2.2104351043701174, "memory(GiB)": 72.85, "step": 48850, "token_acc": 0.5340136054421769, "train_speed(iter/s)": 0.67198 }, { "epoch": 2.0930979820915985, "grad_norm": 5.043956756591797, "learning_rate": 6.267044011421975e-05, "loss": 2.142007827758789, "memory(GiB)": 72.85, "step": 48855, "token_acc": 0.552, "train_speed(iter/s)": 0.671982 }, { "epoch": 2.0933121974208473, "grad_norm": 4.591806888580322, "learning_rate": 6.2663929891054e-05, "loss": 2.4925922393798827, "memory(GiB)": 72.85, "step": 48860, "token_acc": 0.5114754098360655, "train_speed(iter/s)": 0.671992 }, { "epoch": 2.0935264127500965, "grad_norm": 5.081711292266846, "learning_rate": 6.265741943846926e-05, "loss": 2.125905990600586, "memory(GiB)": 72.85, "step": 48865, "token_acc": 0.6015625, "train_speed(iter/s)": 0.671981 }, { "epoch": 2.0937406280793454, "grad_norm": 5.074251651763916, "learning_rate": 6.265090875658353e-05, "loss": 2.2792327880859373, "memory(GiB)": 72.85, "step": 48870, "token_acc": 0.4969512195121951, "train_speed(iter/s)": 0.671992 }, { "epoch": 2.093954843408594, "grad_norm": 4.239144325256348, "learning_rate": 6.264439784551472e-05, "loss": 1.9167634963989257, "memory(GiB)": 72.85, "step": 48875, "token_acc": 0.5661157024793388, "train_speed(iter/s)": 0.672011 }, { "epoch": 2.0941690587378434, "grad_norm": 6.317681789398193, "learning_rate": 6.263788670538075e-05, "loss": 2.1613441467285157, "memory(GiB)": 72.85, "step": 48880, "token_acc": 0.5309734513274337, "train_speed(iter/s)": 0.672009 }, { "epoch": 2.0943832740670922, "grad_norm": 4.287763595581055, "learning_rate": 6.263137533629963e-05, "loss": 2.0353073120117187, "memory(GiB)": 72.85, "step": 48885, "token_acc": 0.5303030303030303, "train_speed(iter/s)": 0.671995 }, { "epoch": 2.094597489396341, "grad_norm": 4.031517505645752, "learning_rate": 6.262486373838929e-05, "loss": 1.916064453125, "memory(GiB)": 72.85, "step": 48890, "token_acc": 0.563265306122449, "train_speed(iter/s)": 0.671984 }, { "epoch": 2.0948117047255903, "grad_norm": 4.954631805419922, "learning_rate": 6.261835191176769e-05, "loss": 2.3215747833251954, "memory(GiB)": 72.85, "step": 48895, "token_acc": 0.5037593984962406, "train_speed(iter/s)": 0.671978 }, { "epoch": 2.095025920054839, "grad_norm": 4.082631587982178, "learning_rate": 6.261183985655281e-05, "loss": 2.0426847457885744, "memory(GiB)": 72.85, "step": 48900, "token_acc": 0.5504885993485342, "train_speed(iter/s)": 0.671997 }, { "epoch": 2.095240135384088, "grad_norm": 5.881319999694824, "learning_rate": 6.260532757286264e-05, "loss": 2.1588626861572267, "memory(GiB)": 72.85, "step": 48905, "token_acc": 0.5032679738562091, "train_speed(iter/s)": 0.672 }, { "epoch": 2.095454350713337, "grad_norm": 4.800403594970703, "learning_rate": 6.259881506081512e-05, "loss": 2.3928220748901365, "memory(GiB)": 72.85, "step": 48910, "token_acc": 0.4876325088339223, "train_speed(iter/s)": 0.671993 }, { "epoch": 2.095668566042586, "grad_norm": 4.199578285217285, "learning_rate": 6.259230232052826e-05, "loss": 2.2931392669677733, "memory(GiB)": 72.85, "step": 48915, "token_acc": 0.4983922829581994, "train_speed(iter/s)": 0.671998 }, { "epoch": 2.095882781371835, "grad_norm": 3.8812508583068848, "learning_rate": 6.258578935212e-05, "loss": 2.743081474304199, "memory(GiB)": 72.85, "step": 48920, "token_acc": 0.4556213017751479, "train_speed(iter/s)": 0.672003 }, { "epoch": 2.096096996701084, "grad_norm": 5.379225254058838, "learning_rate": 6.257927615570839e-05, "loss": 2.189438056945801, "memory(GiB)": 72.85, "step": 48925, "token_acc": 0.49454545454545457, "train_speed(iter/s)": 0.672007 }, { "epoch": 2.096311212030333, "grad_norm": 5.251006126403809, "learning_rate": 6.257276273141139e-05, "loss": 2.232743263244629, "memory(GiB)": 72.85, "step": 48930, "token_acc": 0.5202952029520295, "train_speed(iter/s)": 0.672004 }, { "epoch": 2.0965254273595817, "grad_norm": 4.500327110290527, "learning_rate": 6.256624907934699e-05, "loss": 2.2623863220214844, "memory(GiB)": 72.85, "step": 48935, "token_acc": 0.5360501567398119, "train_speed(iter/s)": 0.672004 }, { "epoch": 2.096739642688831, "grad_norm": 4.491793632507324, "learning_rate": 6.25597351996332e-05, "loss": 2.2651340484619142, "memory(GiB)": 72.85, "step": 48940, "token_acc": 0.49696969696969695, "train_speed(iter/s)": 0.671995 }, { "epoch": 2.0969538580180798, "grad_norm": 4.7217607498168945, "learning_rate": 6.255322109238803e-05, "loss": 2.173226737976074, "memory(GiB)": 72.85, "step": 48945, "token_acc": 0.5412844036697247, "train_speed(iter/s)": 0.671996 }, { "epoch": 2.0971680733473286, "grad_norm": 4.145639896392822, "learning_rate": 6.254670675772947e-05, "loss": 2.126712417602539, "memory(GiB)": 72.85, "step": 48950, "token_acc": 0.5203761755485894, "train_speed(iter/s)": 0.67199 }, { "epoch": 2.097382288676578, "grad_norm": 4.044556617736816, "learning_rate": 6.254019219577556e-05, "loss": 2.2578857421875, "memory(GiB)": 72.85, "step": 48955, "token_acc": 0.4940119760479042, "train_speed(iter/s)": 0.671988 }, { "epoch": 2.0975965040058266, "grad_norm": 4.396174907684326, "learning_rate": 6.253367740664431e-05, "loss": 2.452286148071289, "memory(GiB)": 72.85, "step": 48960, "token_acc": 0.4691358024691358, "train_speed(iter/s)": 0.672002 }, { "epoch": 2.0978107193350755, "grad_norm": 4.363997936248779, "learning_rate": 6.252716239045372e-05, "loss": 2.124688720703125, "memory(GiB)": 72.85, "step": 48965, "token_acc": 0.5018050541516246, "train_speed(iter/s)": 0.672004 }, { "epoch": 2.0980249346643247, "grad_norm": 5.452573776245117, "learning_rate": 6.252064714732185e-05, "loss": 2.446722984313965, "memory(GiB)": 72.85, "step": 48970, "token_acc": 0.48013245033112584, "train_speed(iter/s)": 0.672015 }, { "epoch": 2.0982391499935735, "grad_norm": 5.0575127601623535, "learning_rate": 6.25141316773667e-05, "loss": 2.5675220489501953, "memory(GiB)": 72.85, "step": 48975, "token_acc": 0.477088948787062, "train_speed(iter/s)": 0.67201 }, { "epoch": 2.0984533653228223, "grad_norm": 5.564598083496094, "learning_rate": 6.250761598070632e-05, "loss": 2.3057395935058596, "memory(GiB)": 72.85, "step": 48980, "token_acc": 0.4962686567164179, "train_speed(iter/s)": 0.672027 }, { "epoch": 2.0986675806520716, "grad_norm": 4.353306770324707, "learning_rate": 6.250110005745874e-05, "loss": 2.3219799041748046, "memory(GiB)": 72.85, "step": 48985, "token_acc": 0.4858757062146893, "train_speed(iter/s)": 0.672033 }, { "epoch": 2.0988817959813204, "grad_norm": 4.160150527954102, "learning_rate": 6.249458390774201e-05, "loss": 2.0945611953735352, "memory(GiB)": 72.85, "step": 48990, "token_acc": 0.5481727574750831, "train_speed(iter/s)": 0.672037 }, { "epoch": 2.099096011310569, "grad_norm": 6.933920383453369, "learning_rate": 6.248806753167417e-05, "loss": 2.111593246459961, "memory(GiB)": 72.85, "step": 48995, "token_acc": 0.5361842105263158, "train_speed(iter/s)": 0.672052 }, { "epoch": 2.0993102266398185, "grad_norm": 3.91937518119812, "learning_rate": 6.248155092937326e-05, "loss": 2.1537101745605467, "memory(GiB)": 72.85, "step": 49000, "token_acc": 0.5273775216138329, "train_speed(iter/s)": 0.672059 }, { "epoch": 2.0993102266398185, "eval_loss": 2.1136066913604736, "eval_runtime": 15.863, "eval_samples_per_second": 6.304, "eval_steps_per_second": 6.304, "eval_token_acc": 0.4904109589041096, "step": 49000 }, { "epoch": 2.0995244419690673, "grad_norm": 4.9672770500183105, "learning_rate": 6.247503410095737e-05, "loss": 2.3717838287353517, "memory(GiB)": 72.85, "step": 49005, "token_acc": 0.5004840271055179, "train_speed(iter/s)": 0.671874 }, { "epoch": 2.099738657298316, "grad_norm": 4.9781365394592285, "learning_rate": 6.246851704654451e-05, "loss": 2.4309648513793944, "memory(GiB)": 72.85, "step": 49010, "token_acc": 0.5, "train_speed(iter/s)": 0.67189 }, { "epoch": 2.0999528726275654, "grad_norm": 5.488847732543945, "learning_rate": 6.246199976625277e-05, "loss": 2.0735517501831056, "memory(GiB)": 72.85, "step": 49015, "token_acc": 0.5890909090909091, "train_speed(iter/s)": 0.671892 }, { "epoch": 2.100167087956814, "grad_norm": 5.0537638664245605, "learning_rate": 6.245548226020024e-05, "loss": 2.3741691589355467, "memory(GiB)": 72.85, "step": 49020, "token_acc": 0.49363057324840764, "train_speed(iter/s)": 0.67187 }, { "epoch": 2.100381303286063, "grad_norm": 3.707411527633667, "learning_rate": 6.244896452850494e-05, "loss": 2.2771207809448244, "memory(GiB)": 72.85, "step": 49025, "token_acc": 0.4967948717948718, "train_speed(iter/s)": 0.671865 }, { "epoch": 2.1005955186153122, "grad_norm": 4.453234672546387, "learning_rate": 6.244244657128498e-05, "loss": 2.2944589614868165, "memory(GiB)": 72.85, "step": 49030, "token_acc": 0.5107033639143731, "train_speed(iter/s)": 0.671865 }, { "epoch": 2.100809733944561, "grad_norm": 6.213059425354004, "learning_rate": 6.243592838865842e-05, "loss": 2.1487310409545897, "memory(GiB)": 72.85, "step": 49035, "token_acc": 0.5046728971962616, "train_speed(iter/s)": 0.671861 }, { "epoch": 2.10102394927381, "grad_norm": 4.085338115692139, "learning_rate": 6.242940998074336e-05, "loss": 2.318798828125, "memory(GiB)": 72.85, "step": 49040, "token_acc": 0.5031446540880503, "train_speed(iter/s)": 0.67188 }, { "epoch": 2.101238164603059, "grad_norm": 7.209193706512451, "learning_rate": 6.242289134765788e-05, "loss": 2.4183082580566406, "memory(GiB)": 72.85, "step": 49045, "token_acc": 0.4868913857677903, "train_speed(iter/s)": 0.671887 }, { "epoch": 2.101452379932308, "grad_norm": 4.791311264038086, "learning_rate": 6.241637248952006e-05, "loss": 2.308378791809082, "memory(GiB)": 72.85, "step": 49050, "token_acc": 0.5370370370370371, "train_speed(iter/s)": 0.671883 }, { "epoch": 2.1016665952615567, "grad_norm": 5.536331653594971, "learning_rate": 6.2409853406448e-05, "loss": 2.272092819213867, "memory(GiB)": 72.85, "step": 49055, "token_acc": 0.4716312056737589, "train_speed(iter/s)": 0.671894 }, { "epoch": 2.101880810590806, "grad_norm": 4.798561096191406, "learning_rate": 6.240333409855983e-05, "loss": 2.1683902740478516, "memory(GiB)": 72.85, "step": 49060, "token_acc": 0.5268456375838926, "train_speed(iter/s)": 0.671907 }, { "epoch": 2.102095025920055, "grad_norm": 5.294879913330078, "learning_rate": 6.239681456597361e-05, "loss": 2.190114402770996, "memory(GiB)": 72.85, "step": 49065, "token_acc": 0.5525423728813559, "train_speed(iter/s)": 0.671892 }, { "epoch": 2.1023092412493036, "grad_norm": 4.173735618591309, "learning_rate": 6.239029480880747e-05, "loss": 2.3527374267578125, "memory(GiB)": 72.85, "step": 49070, "token_acc": 0.5108695652173914, "train_speed(iter/s)": 0.671885 }, { "epoch": 2.102523456578553, "grad_norm": 4.522976875305176, "learning_rate": 6.238377482717951e-05, "loss": 2.2007999420166016, "memory(GiB)": 72.85, "step": 49075, "token_acc": 0.5015479876160991, "train_speed(iter/s)": 0.671886 }, { "epoch": 2.1027376719078017, "grad_norm": 5.172046184539795, "learning_rate": 6.237725462120784e-05, "loss": 2.2807901382446287, "memory(GiB)": 72.85, "step": 49080, "token_acc": 0.4896551724137931, "train_speed(iter/s)": 0.671876 }, { "epoch": 2.1029518872370505, "grad_norm": 4.830670356750488, "learning_rate": 6.237073419101061e-05, "loss": 2.2151103973388673, "memory(GiB)": 72.85, "step": 49085, "token_acc": 0.5140845070422535, "train_speed(iter/s)": 0.671883 }, { "epoch": 2.1031661025662998, "grad_norm": 5.396189212799072, "learning_rate": 6.236421353670592e-05, "loss": 2.504326820373535, "memory(GiB)": 72.85, "step": 49090, "token_acc": 0.4774774774774775, "train_speed(iter/s)": 0.671891 }, { "epoch": 2.1033803178955486, "grad_norm": 4.4560227394104, "learning_rate": 6.235769265841191e-05, "loss": 2.2449356079101563, "memory(GiB)": 72.85, "step": 49095, "token_acc": 0.47491638795986624, "train_speed(iter/s)": 0.671885 }, { "epoch": 2.1035945332247974, "grad_norm": 6.183562278747559, "learning_rate": 6.235117155624671e-05, "loss": 2.4729215621948244, "memory(GiB)": 72.85, "step": 49100, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.671886 }, { "epoch": 2.1038087485540466, "grad_norm": 4.318286418914795, "learning_rate": 6.234465023032844e-05, "loss": 2.1227222442626954, "memory(GiB)": 72.85, "step": 49105, "token_acc": 0.4987146529562982, "train_speed(iter/s)": 0.671887 }, { "epoch": 2.1040229638832955, "grad_norm": 5.3551740646362305, "learning_rate": 6.233812868077525e-05, "loss": 2.3428682327270507, "memory(GiB)": 72.85, "step": 49110, "token_acc": 0.5119047619047619, "train_speed(iter/s)": 0.671889 }, { "epoch": 2.1042371792125443, "grad_norm": 4.9223408699035645, "learning_rate": 6.233160690770528e-05, "loss": 2.0466939926147463, "memory(GiB)": 72.85, "step": 49115, "token_acc": 0.5183823529411765, "train_speed(iter/s)": 0.671873 }, { "epoch": 2.1044513945417935, "grad_norm": 11.332558631896973, "learning_rate": 6.23250849112367e-05, "loss": 2.3563671112060547, "memory(GiB)": 72.85, "step": 49120, "token_acc": 0.5188679245283019, "train_speed(iter/s)": 0.671875 }, { "epoch": 2.1046656098710423, "grad_norm": 4.092160224914551, "learning_rate": 6.231856269148762e-05, "loss": 2.324373245239258, "memory(GiB)": 72.85, "step": 49125, "token_acc": 0.5015873015873016, "train_speed(iter/s)": 0.67187 }, { "epoch": 2.104879825200291, "grad_norm": 5.901068687438965, "learning_rate": 6.231204024857624e-05, "loss": 2.139100456237793, "memory(GiB)": 72.85, "step": 49130, "token_acc": 0.532319391634981, "train_speed(iter/s)": 0.671869 }, { "epoch": 2.1050940405295404, "grad_norm": 6.36407470703125, "learning_rate": 6.23055175826207e-05, "loss": 2.192861557006836, "memory(GiB)": 72.85, "step": 49135, "token_acc": 0.5358490566037736, "train_speed(iter/s)": 0.671873 }, { "epoch": 2.1053082558587892, "grad_norm": 4.015073776245117, "learning_rate": 6.229899469373917e-05, "loss": 2.688673400878906, "memory(GiB)": 72.85, "step": 49140, "token_acc": 0.444, "train_speed(iter/s)": 0.671884 }, { "epoch": 2.105522471188038, "grad_norm": 3.639738082885742, "learning_rate": 6.229247158204981e-05, "loss": 2.129800224304199, "memory(GiB)": 72.85, "step": 49145, "token_acc": 0.5601503759398496, "train_speed(iter/s)": 0.671886 }, { "epoch": 2.1057366865172873, "grad_norm": 4.558908462524414, "learning_rate": 6.228594824767078e-05, "loss": 2.1264022827148437, "memory(GiB)": 72.85, "step": 49150, "token_acc": 0.5414012738853503, "train_speed(iter/s)": 0.671891 }, { "epoch": 2.105950901846536, "grad_norm": 4.805797576904297, "learning_rate": 6.227942469072027e-05, "loss": 2.2203857421875, "memory(GiB)": 72.85, "step": 49155, "token_acc": 0.5121951219512195, "train_speed(iter/s)": 0.6719 }, { "epoch": 2.106165117175785, "grad_norm": 5.618766784667969, "learning_rate": 6.227290091131648e-05, "loss": 2.280317687988281, "memory(GiB)": 72.85, "step": 49160, "token_acc": 0.5, "train_speed(iter/s)": 0.671879 }, { "epoch": 2.106379332505034, "grad_norm": 7.843561172485352, "learning_rate": 6.22663769095776e-05, "loss": 2.18216552734375, "memory(GiB)": 72.85, "step": 49165, "token_acc": 0.5016949152542373, "train_speed(iter/s)": 0.671878 }, { "epoch": 2.106593547834283, "grad_norm": 4.372894287109375, "learning_rate": 6.225985268562175e-05, "loss": 2.1073923110961914, "memory(GiB)": 72.85, "step": 49170, "token_acc": 0.5532786885245902, "train_speed(iter/s)": 0.671897 }, { "epoch": 2.106807763163532, "grad_norm": 3.895946741104126, "learning_rate": 6.22533282395672e-05, "loss": 2.2205650329589846, "memory(GiB)": 72.85, "step": 49175, "token_acc": 0.5387323943661971, "train_speed(iter/s)": 0.671901 }, { "epoch": 2.107021978492781, "grad_norm": 4.083151340484619, "learning_rate": 6.22468035715321e-05, "loss": 2.4540889739990233, "memory(GiB)": 72.85, "step": 49180, "token_acc": 0.4934210526315789, "train_speed(iter/s)": 0.67192 }, { "epoch": 2.10723619382203, "grad_norm": 5.776334285736084, "learning_rate": 6.224027868163467e-05, "loss": 2.151932716369629, "memory(GiB)": 72.85, "step": 49185, "token_acc": 0.50390625, "train_speed(iter/s)": 0.671936 }, { "epoch": 2.1074504091512787, "grad_norm": 5.185715675354004, "learning_rate": 6.223375356999311e-05, "loss": 2.360203170776367, "memory(GiB)": 72.85, "step": 49190, "token_acc": 0.48695652173913045, "train_speed(iter/s)": 0.671972 }, { "epoch": 2.107664624480528, "grad_norm": 4.93487548828125, "learning_rate": 6.222722823672562e-05, "loss": 2.1102663040161134, "memory(GiB)": 72.85, "step": 49195, "token_acc": 0.49216300940438873, "train_speed(iter/s)": 0.67198 }, { "epoch": 2.1078788398097767, "grad_norm": 3.946611166000366, "learning_rate": 6.222070268195041e-05, "loss": 2.137722396850586, "memory(GiB)": 72.85, "step": 49200, "token_acc": 0.5241379310344828, "train_speed(iter/s)": 0.671974 }, { "epoch": 2.1080930551390256, "grad_norm": 4.32077169418335, "learning_rate": 6.221417690578574e-05, "loss": 2.5582454681396483, "memory(GiB)": 72.85, "step": 49205, "token_acc": 0.4647058823529412, "train_speed(iter/s)": 0.671995 }, { "epoch": 2.108307270468275, "grad_norm": 5.636714458465576, "learning_rate": 6.220765090834977e-05, "loss": 2.2510265350341796, "memory(GiB)": 72.85, "step": 49210, "token_acc": 0.5030864197530864, "train_speed(iter/s)": 0.671998 }, { "epoch": 2.1085214857975236, "grad_norm": 4.7507524490356445, "learning_rate": 6.220112468976076e-05, "loss": 2.2717796325683595, "memory(GiB)": 72.85, "step": 49215, "token_acc": 0.5218855218855218, "train_speed(iter/s)": 0.671988 }, { "epoch": 2.1087357011267724, "grad_norm": 4.0852227210998535, "learning_rate": 6.219459825013694e-05, "loss": 2.059092140197754, "memory(GiB)": 72.85, "step": 49220, "token_acc": 0.5656565656565656, "train_speed(iter/s)": 0.671983 }, { "epoch": 2.1089499164560217, "grad_norm": 4.376667022705078, "learning_rate": 6.218807158959652e-05, "loss": 2.0552600860595702, "memory(GiB)": 72.85, "step": 49225, "token_acc": 0.5291970802919708, "train_speed(iter/s)": 0.671986 }, { "epoch": 2.1091641317852705, "grad_norm": 6.103546619415283, "learning_rate": 6.218154470825775e-05, "loss": 2.165603446960449, "memory(GiB)": 72.85, "step": 49230, "token_acc": 0.5116279069767442, "train_speed(iter/s)": 0.671991 }, { "epoch": 2.1093783471145193, "grad_norm": 6.723374366760254, "learning_rate": 6.217501760623889e-05, "loss": 2.4541418075561525, "memory(GiB)": 72.85, "step": 49235, "token_acc": 0.5, "train_speed(iter/s)": 0.671998 }, { "epoch": 2.1095925624437686, "grad_norm": 4.472034931182861, "learning_rate": 6.216849028365815e-05, "loss": 2.342671775817871, "memory(GiB)": 72.85, "step": 49240, "token_acc": 0.5019011406844106, "train_speed(iter/s)": 0.672005 }, { "epoch": 2.1098067777730174, "grad_norm": 5.267621040344238, "learning_rate": 6.216196274063379e-05, "loss": 2.0340831756591795, "memory(GiB)": 72.85, "step": 49245, "token_acc": 0.5229357798165137, "train_speed(iter/s)": 0.672004 }, { "epoch": 2.110020993102266, "grad_norm": 5.443288803100586, "learning_rate": 6.215543497728407e-05, "loss": 2.4310287475585937, "memory(GiB)": 72.85, "step": 49250, "token_acc": 0.47330960854092524, "train_speed(iter/s)": 0.671997 }, { "epoch": 2.1102352084315155, "grad_norm": 4.736913204193115, "learning_rate": 6.214890699372724e-05, "loss": 2.384319877624512, "memory(GiB)": 72.85, "step": 49255, "token_acc": 0.5073746312684366, "train_speed(iter/s)": 0.672006 }, { "epoch": 2.1104494237607643, "grad_norm": 5.284970283508301, "learning_rate": 6.214237879008157e-05, "loss": 2.4460481643676757, "memory(GiB)": 72.85, "step": 49260, "token_acc": 0.4672897196261682, "train_speed(iter/s)": 0.672 }, { "epoch": 2.110663639090013, "grad_norm": 4.830852031707764, "learning_rate": 6.213585036646531e-05, "loss": 2.222176742553711, "memory(GiB)": 72.85, "step": 49265, "token_acc": 0.506578947368421, "train_speed(iter/s)": 0.672001 }, { "epoch": 2.1108778544192623, "grad_norm": 6.526650428771973, "learning_rate": 6.212932172299674e-05, "loss": 2.2581226348876955, "memory(GiB)": 72.85, "step": 49270, "token_acc": 0.5, "train_speed(iter/s)": 0.672019 }, { "epoch": 2.111092069748511, "grad_norm": 5.149075508117676, "learning_rate": 6.212279285979412e-05, "loss": 2.379703903198242, "memory(GiB)": 72.85, "step": 49275, "token_acc": 0.48, "train_speed(iter/s)": 0.672009 }, { "epoch": 2.11130628507776, "grad_norm": 6.361412525177002, "learning_rate": 6.211626377697575e-05, "loss": 1.879354476928711, "memory(GiB)": 72.85, "step": 49280, "token_acc": 0.5749128919860628, "train_speed(iter/s)": 0.671995 }, { "epoch": 2.1115205004070092, "grad_norm": 4.0624518394470215, "learning_rate": 6.210973447465988e-05, "loss": 2.052577781677246, "memory(GiB)": 72.85, "step": 49285, "token_acc": 0.534375, "train_speed(iter/s)": 0.671988 }, { "epoch": 2.111734715736258, "grad_norm": 5.339844226837158, "learning_rate": 6.210320495296484e-05, "loss": 2.0728923797607424, "memory(GiB)": 72.85, "step": 49290, "token_acc": 0.552901023890785, "train_speed(iter/s)": 0.671981 }, { "epoch": 2.111948931065507, "grad_norm": 4.08728551864624, "learning_rate": 6.209667521200886e-05, "loss": 2.327083396911621, "memory(GiB)": 72.85, "step": 49295, "token_acc": 0.5087719298245614, "train_speed(iter/s)": 0.671975 }, { "epoch": 2.112163146394756, "grad_norm": 3.9867329597473145, "learning_rate": 6.209014525191025e-05, "loss": 2.2584407806396483, "memory(GiB)": 72.85, "step": 49300, "token_acc": 0.511864406779661, "train_speed(iter/s)": 0.671967 }, { "epoch": 2.112377361724005, "grad_norm": 4.250401496887207, "learning_rate": 6.208361507278735e-05, "loss": 2.2187870025634764, "memory(GiB)": 72.85, "step": 49305, "token_acc": 0.5047318611987381, "train_speed(iter/s)": 0.671957 }, { "epoch": 2.1125915770532537, "grad_norm": 4.154455184936523, "learning_rate": 6.207708467475842e-05, "loss": 2.2824432373046877, "memory(GiB)": 72.85, "step": 49310, "token_acc": 0.5078125, "train_speed(iter/s)": 0.671962 }, { "epoch": 2.112805792382503, "grad_norm": 4.25262451171875, "learning_rate": 6.207055405794176e-05, "loss": 2.3190738677978517, "memory(GiB)": 72.85, "step": 49315, "token_acc": 0.5037878787878788, "train_speed(iter/s)": 0.671968 }, { "epoch": 2.113020007711752, "grad_norm": 5.555129528045654, "learning_rate": 6.20640232224557e-05, "loss": 2.469269943237305, "memory(GiB)": 72.85, "step": 49320, "token_acc": 0.4644808743169399, "train_speed(iter/s)": 0.671956 }, { "epoch": 2.1132342230410006, "grad_norm": 5.300929546356201, "learning_rate": 6.205749216841855e-05, "loss": 2.09674129486084, "memory(GiB)": 72.85, "step": 49325, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.671967 }, { "epoch": 2.11344843837025, "grad_norm": 3.930260181427002, "learning_rate": 6.20509608959486e-05, "loss": 2.2811264038085937, "memory(GiB)": 72.85, "step": 49330, "token_acc": 0.509375, "train_speed(iter/s)": 0.671955 }, { "epoch": 2.1136626536994987, "grad_norm": 3.801546335220337, "learning_rate": 6.20444294051642e-05, "loss": 2.3319913864135744, "memory(GiB)": 72.85, "step": 49335, "token_acc": 0.4909090909090909, "train_speed(iter/s)": 0.671953 }, { "epoch": 2.1138768690287475, "grad_norm": 5.478598117828369, "learning_rate": 6.203789769618365e-05, "loss": 2.016348648071289, "memory(GiB)": 72.85, "step": 49340, "token_acc": 0.506993006993007, "train_speed(iter/s)": 0.671952 }, { "epoch": 2.1140910843579968, "grad_norm": 4.724478244781494, "learning_rate": 6.203136576912529e-05, "loss": 2.31097354888916, "memory(GiB)": 72.85, "step": 49345, "token_acc": 0.49171270718232046, "train_speed(iter/s)": 0.671947 }, { "epoch": 2.1143052996872456, "grad_norm": 5.831323623657227, "learning_rate": 6.202483362410748e-05, "loss": 2.1736637115478517, "memory(GiB)": 72.85, "step": 49350, "token_acc": 0.5392857142857143, "train_speed(iter/s)": 0.671938 }, { "epoch": 2.1145195150164944, "grad_norm": 4.782958507537842, "learning_rate": 6.20183012612485e-05, "loss": 2.105186653137207, "memory(GiB)": 72.85, "step": 49355, "token_acc": 0.5205047318611987, "train_speed(iter/s)": 0.671938 }, { "epoch": 2.1147337303457436, "grad_norm": 4.972532749176025, "learning_rate": 6.201176868066674e-05, "loss": 2.3938594818115235, "memory(GiB)": 72.85, "step": 49360, "token_acc": 0.47601476014760147, "train_speed(iter/s)": 0.671937 }, { "epoch": 2.1149479456749924, "grad_norm": 3.9962644577026367, "learning_rate": 6.20052358824805e-05, "loss": 2.0942028045654295, "memory(GiB)": 72.85, "step": 49365, "token_acc": 0.5510835913312694, "train_speed(iter/s)": 0.671934 }, { "epoch": 2.1151621610042413, "grad_norm": 8.186612129211426, "learning_rate": 6.199870286680817e-05, "loss": 2.3252655029296876, "memory(GiB)": 72.85, "step": 49370, "token_acc": 0.49635036496350365, "train_speed(iter/s)": 0.671912 }, { "epoch": 2.1153763763334905, "grad_norm": 5.548274517059326, "learning_rate": 6.199216963376806e-05, "loss": 2.12100887298584, "memory(GiB)": 72.85, "step": 49375, "token_acc": 0.5656934306569343, "train_speed(iter/s)": 0.671897 }, { "epoch": 2.1155905916627393, "grad_norm": 3.9281294345855713, "learning_rate": 6.198563618347857e-05, "loss": 2.4052343368530273, "memory(GiB)": 72.85, "step": 49380, "token_acc": 0.5416666666666666, "train_speed(iter/s)": 0.671884 }, { "epoch": 2.115804806991988, "grad_norm": 5.046541213989258, "learning_rate": 6.197910251605803e-05, "loss": 2.3480979919433596, "memory(GiB)": 72.85, "step": 49385, "token_acc": 0.5330578512396694, "train_speed(iter/s)": 0.671884 }, { "epoch": 2.1160190223212374, "grad_norm": 5.28444242477417, "learning_rate": 6.19725686316248e-05, "loss": 2.478005218505859, "memory(GiB)": 72.85, "step": 49390, "token_acc": 0.45864661654135336, "train_speed(iter/s)": 0.671898 }, { "epoch": 2.116233237650486, "grad_norm": 4.353879451751709, "learning_rate": 6.196603453029728e-05, "loss": 2.3029430389404295, "memory(GiB)": 72.85, "step": 49395, "token_acc": 0.49429657794676807, "train_speed(iter/s)": 0.671903 }, { "epoch": 2.116447452979735, "grad_norm": 4.615295886993408, "learning_rate": 6.19595002121938e-05, "loss": 2.144808769226074, "memory(GiB)": 72.85, "step": 49400, "token_acc": 0.4828767123287671, "train_speed(iter/s)": 0.671894 }, { "epoch": 2.1166616683089843, "grad_norm": 5.6351318359375, "learning_rate": 6.195296567743277e-05, "loss": 2.23297061920166, "memory(GiB)": 72.85, "step": 49405, "token_acc": 0.47586206896551725, "train_speed(iter/s)": 0.671865 }, { "epoch": 2.116875883638233, "grad_norm": 4.340866565704346, "learning_rate": 6.194643092613254e-05, "loss": 2.3159368515014647, "memory(GiB)": 72.85, "step": 49410, "token_acc": 0.5313653136531366, "train_speed(iter/s)": 0.671868 }, { "epoch": 2.117090098967482, "grad_norm": 4.557781219482422, "learning_rate": 6.193989595841151e-05, "loss": 2.121644973754883, "memory(GiB)": 72.85, "step": 49415, "token_acc": 0.5360501567398119, "train_speed(iter/s)": 0.671866 }, { "epoch": 2.117304314296731, "grad_norm": 6.348583698272705, "learning_rate": 6.193336077438807e-05, "loss": 2.1404140472412108, "memory(GiB)": 72.85, "step": 49420, "token_acc": 0.5638297872340425, "train_speed(iter/s)": 0.671883 }, { "epoch": 2.11751852962598, "grad_norm": 4.704872131347656, "learning_rate": 6.192682537418061e-05, "loss": 2.2655755996704103, "memory(GiB)": 72.85, "step": 49425, "token_acc": 0.5096952908587258, "train_speed(iter/s)": 0.671887 }, { "epoch": 2.117732744955229, "grad_norm": 8.252947807312012, "learning_rate": 6.19202897579075e-05, "loss": 2.1000770568847655, "memory(GiB)": 72.85, "step": 49430, "token_acc": 0.49683544303797467, "train_speed(iter/s)": 0.671884 }, { "epoch": 2.117946960284478, "grad_norm": 5.258710861206055, "learning_rate": 6.191375392568718e-05, "loss": 2.329058074951172, "memory(GiB)": 72.85, "step": 49435, "token_acc": 0.5047021943573667, "train_speed(iter/s)": 0.671894 }, { "epoch": 2.118161175613727, "grad_norm": 5.216538906097412, "learning_rate": 6.190721787763801e-05, "loss": 2.5580717086791993, "memory(GiB)": 72.85, "step": 49440, "token_acc": 0.4306049822064057, "train_speed(iter/s)": 0.6719 }, { "epoch": 2.1183753909429757, "grad_norm": 5.107013702392578, "learning_rate": 6.190068161387844e-05, "loss": 1.957541275024414, "memory(GiB)": 72.85, "step": 49445, "token_acc": 0.5986159169550173, "train_speed(iter/s)": 0.671915 }, { "epoch": 2.118589606272225, "grad_norm": 4.062565326690674, "learning_rate": 6.189414513452685e-05, "loss": 2.1172996520996095, "memory(GiB)": 72.85, "step": 49450, "token_acc": 0.5415384615384615, "train_speed(iter/s)": 0.671907 }, { "epoch": 2.1188038216014737, "grad_norm": 5.083890914916992, "learning_rate": 6.188760843970166e-05, "loss": 2.162037658691406, "memory(GiB)": 72.85, "step": 49455, "token_acc": 0.5532646048109966, "train_speed(iter/s)": 0.671902 }, { "epoch": 2.1190180369307225, "grad_norm": 6.081031322479248, "learning_rate": 6.188107152952129e-05, "loss": 2.434823989868164, "memory(GiB)": 72.85, "step": 49460, "token_acc": 0.4860557768924303, "train_speed(iter/s)": 0.671922 }, { "epoch": 2.119232252259972, "grad_norm": 6.857807159423828, "learning_rate": 6.187453440410418e-05, "loss": 2.1449331283569335, "memory(GiB)": 72.85, "step": 49465, "token_acc": 0.5370370370370371, "train_speed(iter/s)": 0.671922 }, { "epoch": 2.1194464675892206, "grad_norm": 5.403292179107666, "learning_rate": 6.186799706356872e-05, "loss": 2.2073726654052734, "memory(GiB)": 72.85, "step": 49470, "token_acc": 0.49242424242424243, "train_speed(iter/s)": 0.671918 }, { "epoch": 2.11966068291847, "grad_norm": 3.9857847690582275, "learning_rate": 6.186145950803337e-05, "loss": 2.4083465576171874, "memory(GiB)": 72.85, "step": 49475, "token_acc": 0.4901315789473684, "train_speed(iter/s)": 0.671911 }, { "epoch": 2.1198748982477187, "grad_norm": 4.7235188484191895, "learning_rate": 6.185492173761655e-05, "loss": 2.2370189666748046, "memory(GiB)": 72.85, "step": 49480, "token_acc": 0.47435897435897434, "train_speed(iter/s)": 0.671917 }, { "epoch": 2.1200891135769675, "grad_norm": 4.1213531494140625, "learning_rate": 6.184838375243671e-05, "loss": 2.3940252304077148, "memory(GiB)": 72.85, "step": 49485, "token_acc": 0.4734982332155477, "train_speed(iter/s)": 0.671909 }, { "epoch": 2.1203033289062168, "grad_norm": 5.771902561187744, "learning_rate": 6.184184555261227e-05, "loss": 2.157218170166016, "memory(GiB)": 72.85, "step": 49490, "token_acc": 0.5328185328185329, "train_speed(iter/s)": 0.671915 }, { "epoch": 2.1205175442354656, "grad_norm": 4.0646257400512695, "learning_rate": 6.18353071382617e-05, "loss": 2.073634910583496, "memory(GiB)": 72.85, "step": 49495, "token_acc": 0.5368098159509203, "train_speed(iter/s)": 0.671904 }, { "epoch": 2.1207317595647144, "grad_norm": 4.308159351348877, "learning_rate": 6.182876850950344e-05, "loss": 2.340277099609375, "memory(GiB)": 72.85, "step": 49500, "token_acc": 0.4753521126760563, "train_speed(iter/s)": 0.671917 }, { "epoch": 2.1207317595647144, "eval_loss": 2.18259334564209, "eval_runtime": 15.5678, "eval_samples_per_second": 6.424, "eval_steps_per_second": 6.424, "eval_token_acc": 0.503968253968254, "step": 49500 }, { "epoch": 2.1209459748939636, "grad_norm": 6.766024589538574, "learning_rate": 6.182222966645593e-05, "loss": 2.4239980697631838, "memory(GiB)": 72.85, "step": 49505, "token_acc": 0.500473933649289, "train_speed(iter/s)": 0.671747 }, { "epoch": 2.1211601902232124, "grad_norm": 4.6705474853515625, "learning_rate": 6.181569060923765e-05, "loss": 2.292110824584961, "memory(GiB)": 72.85, "step": 49510, "token_acc": 0.5366666666666666, "train_speed(iter/s)": 0.671766 }, { "epoch": 2.1213744055524613, "grad_norm": 4.413917541503906, "learning_rate": 6.180915133796705e-05, "loss": 2.1992618560791017, "memory(GiB)": 72.85, "step": 49515, "token_acc": 0.5278688524590164, "train_speed(iter/s)": 0.671759 }, { "epoch": 2.1215886208817105, "grad_norm": 5.340565204620361, "learning_rate": 6.180261185276259e-05, "loss": 2.0512517929077148, "memory(GiB)": 72.85, "step": 49520, "token_acc": 0.5424836601307189, "train_speed(iter/s)": 0.671771 }, { "epoch": 2.1218028362109593, "grad_norm": 4.873335838317871, "learning_rate": 6.179607215374274e-05, "loss": 2.0190765380859377, "memory(GiB)": 72.85, "step": 49525, "token_acc": 0.5794701986754967, "train_speed(iter/s)": 0.671746 }, { "epoch": 2.122017051540208, "grad_norm": 5.082498550415039, "learning_rate": 6.178953224102599e-05, "loss": 2.216120147705078, "memory(GiB)": 72.85, "step": 49530, "token_acc": 0.48299319727891155, "train_speed(iter/s)": 0.671763 }, { "epoch": 2.1222312668694574, "grad_norm": 4.466240882873535, "learning_rate": 6.178299211473081e-05, "loss": 2.088905906677246, "memory(GiB)": 72.85, "step": 49535, "token_acc": 0.5492063492063493, "train_speed(iter/s)": 0.671771 }, { "epoch": 2.122445482198706, "grad_norm": 4.877878189086914, "learning_rate": 6.177645177497566e-05, "loss": 2.298503112792969, "memory(GiB)": 72.85, "step": 49540, "token_acc": 0.54, "train_speed(iter/s)": 0.671774 }, { "epoch": 2.122659697527955, "grad_norm": 5.90775728225708, "learning_rate": 6.176991122187904e-05, "loss": 2.287364196777344, "memory(GiB)": 72.85, "step": 49545, "token_acc": 0.5278810408921933, "train_speed(iter/s)": 0.671781 }, { "epoch": 2.1228739128572043, "grad_norm": 3.8877553939819336, "learning_rate": 6.176337045555944e-05, "loss": 2.15591983795166, "memory(GiB)": 72.85, "step": 49550, "token_acc": 0.5411392405063291, "train_speed(iter/s)": 0.671773 }, { "epoch": 2.123088128186453, "grad_norm": 4.360231399536133, "learning_rate": 6.175682947613534e-05, "loss": 1.9584640502929687, "memory(GiB)": 72.85, "step": 49555, "token_acc": 0.5789473684210527, "train_speed(iter/s)": 0.671769 }, { "epoch": 2.123302343515702, "grad_norm": 5.501235008239746, "learning_rate": 6.175028828372527e-05, "loss": 2.0050392150878906, "memory(GiB)": 72.85, "step": 49560, "token_acc": 0.5146579804560261, "train_speed(iter/s)": 0.671775 }, { "epoch": 2.123516558844951, "grad_norm": 4.894646644592285, "learning_rate": 6.174374687844769e-05, "loss": 2.414589500427246, "memory(GiB)": 72.85, "step": 49565, "token_acc": 0.4696485623003195, "train_speed(iter/s)": 0.671772 }, { "epoch": 2.1237307741742, "grad_norm": 4.818248748779297, "learning_rate": 6.173720526042112e-05, "loss": 2.248607635498047, "memory(GiB)": 72.85, "step": 49570, "token_acc": 0.5176848874598071, "train_speed(iter/s)": 0.671774 }, { "epoch": 2.123944989503449, "grad_norm": 5.223663330078125, "learning_rate": 6.173066342976405e-05, "loss": 2.1971277236938476, "memory(GiB)": 72.85, "step": 49575, "token_acc": 0.48514851485148514, "train_speed(iter/s)": 0.671772 }, { "epoch": 2.124159204832698, "grad_norm": 4.2220048904418945, "learning_rate": 6.172412138659504e-05, "loss": 2.5606388092041015, "memory(GiB)": 72.85, "step": 49580, "token_acc": 0.4501510574018127, "train_speed(iter/s)": 0.671752 }, { "epoch": 2.124373420161947, "grad_norm": 5.710137844085693, "learning_rate": 6.171757913103255e-05, "loss": 2.2714134216308595, "memory(GiB)": 72.85, "step": 49585, "token_acc": 0.4767025089605735, "train_speed(iter/s)": 0.671753 }, { "epoch": 2.1245876354911957, "grad_norm": 11.84182357788086, "learning_rate": 6.171103666319514e-05, "loss": 2.5626605987548827, "memory(GiB)": 72.85, "step": 49590, "token_acc": 0.44621513944223107, "train_speed(iter/s)": 0.671754 }, { "epoch": 2.124801850820445, "grad_norm": 3.973019599914551, "learning_rate": 6.17044939832013e-05, "loss": 2.0832305908203126, "memory(GiB)": 72.85, "step": 49595, "token_acc": 0.48172757475083056, "train_speed(iter/s)": 0.671767 }, { "epoch": 2.1250160661496937, "grad_norm": 5.370612144470215, "learning_rate": 6.169795109116957e-05, "loss": 2.2072994232177736, "memory(GiB)": 72.85, "step": 49600, "token_acc": 0.46048109965635736, "train_speed(iter/s)": 0.671778 }, { "epoch": 2.1252302814789426, "grad_norm": 4.958377361297607, "learning_rate": 6.169140798721847e-05, "loss": 2.200687599182129, "memory(GiB)": 72.85, "step": 49605, "token_acc": 0.5437956204379562, "train_speed(iter/s)": 0.671785 }, { "epoch": 2.125444496808192, "grad_norm": 4.380502700805664, "learning_rate": 6.168486467146658e-05, "loss": 1.9997026443481445, "memory(GiB)": 72.85, "step": 49610, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.671793 }, { "epoch": 2.1256587121374406, "grad_norm": 3.9530117511749268, "learning_rate": 6.167832114403238e-05, "loss": 2.246762275695801, "memory(GiB)": 72.85, "step": 49615, "token_acc": 0.468013468013468, "train_speed(iter/s)": 0.671794 }, { "epoch": 2.1258729274666894, "grad_norm": 5.779570579528809, "learning_rate": 6.167177740503444e-05, "loss": 2.093797492980957, "memory(GiB)": 72.85, "step": 49620, "token_acc": 0.5921985815602837, "train_speed(iter/s)": 0.671796 }, { "epoch": 2.1260871427959387, "grad_norm": 4.541615962982178, "learning_rate": 6.166523345459132e-05, "loss": 2.277455139160156, "memory(GiB)": 72.85, "step": 49625, "token_acc": 0.5129032258064516, "train_speed(iter/s)": 0.671802 }, { "epoch": 2.1263013581251875, "grad_norm": 4.587970733642578, "learning_rate": 6.165868929282155e-05, "loss": 2.367789649963379, "memory(GiB)": 72.85, "step": 49630, "token_acc": 0.5134099616858238, "train_speed(iter/s)": 0.6718 }, { "epoch": 2.1265155734544363, "grad_norm": 4.368774890899658, "learning_rate": 6.165214491984367e-05, "loss": 2.315774154663086, "memory(GiB)": 72.85, "step": 49635, "token_acc": 0.4563106796116505, "train_speed(iter/s)": 0.671808 }, { "epoch": 2.1267297887836856, "grad_norm": 5.43410587310791, "learning_rate": 6.164560033577626e-05, "loss": 2.145237350463867, "memory(GiB)": 72.85, "step": 49640, "token_acc": 0.5160142348754448, "train_speed(iter/s)": 0.671813 }, { "epoch": 2.1269440041129344, "grad_norm": 5.504659175872803, "learning_rate": 6.163905554073787e-05, "loss": 2.3979286193847655, "memory(GiB)": 72.85, "step": 49645, "token_acc": 0.5239520958083832, "train_speed(iter/s)": 0.671815 }, { "epoch": 2.127158219442183, "grad_norm": 3.682321071624756, "learning_rate": 6.16325105348471e-05, "loss": 2.009234619140625, "memory(GiB)": 72.85, "step": 49650, "token_acc": 0.5492957746478874, "train_speed(iter/s)": 0.671818 }, { "epoch": 2.1273724347714325, "grad_norm": 4.576602458953857, "learning_rate": 6.162596531822247e-05, "loss": 2.2860157012939455, "memory(GiB)": 72.85, "step": 49655, "token_acc": 0.5053003533568905, "train_speed(iter/s)": 0.671818 }, { "epoch": 2.1275866501006813, "grad_norm": 5.813301086425781, "learning_rate": 6.161941989098256e-05, "loss": 2.171933174133301, "memory(GiB)": 72.85, "step": 49660, "token_acc": 0.52734375, "train_speed(iter/s)": 0.671802 }, { "epoch": 2.12780086542993, "grad_norm": 5.5450263023376465, "learning_rate": 6.161287425324597e-05, "loss": 2.1063661575317383, "memory(GiB)": 72.85, "step": 49665, "token_acc": 0.5345454545454545, "train_speed(iter/s)": 0.671807 }, { "epoch": 2.1280150807591793, "grad_norm": 3.71498703956604, "learning_rate": 6.160632840513127e-05, "loss": 2.0218549728393556, "memory(GiB)": 72.85, "step": 49670, "token_acc": 0.5162337662337663, "train_speed(iter/s)": 0.671807 }, { "epoch": 2.128229296088428, "grad_norm": 4.985803127288818, "learning_rate": 6.159978234675704e-05, "loss": 2.2754037857055662, "memory(GiB)": 72.85, "step": 49675, "token_acc": 0.5145631067961165, "train_speed(iter/s)": 0.671803 }, { "epoch": 2.128443511417677, "grad_norm": 4.548055171966553, "learning_rate": 6.159323607824188e-05, "loss": 2.322280502319336, "memory(GiB)": 72.85, "step": 49680, "token_acc": 0.5165562913907285, "train_speed(iter/s)": 0.671814 }, { "epoch": 2.128657726746926, "grad_norm": 5.616968631744385, "learning_rate": 6.158668959970437e-05, "loss": 2.4281871795654295, "memory(GiB)": 72.85, "step": 49685, "token_acc": 0.4678362573099415, "train_speed(iter/s)": 0.671807 }, { "epoch": 2.128871942076175, "grad_norm": 3.677379608154297, "learning_rate": 6.158014291126311e-05, "loss": 2.3734880447387696, "memory(GiB)": 72.85, "step": 49690, "token_acc": 0.47335423197492166, "train_speed(iter/s)": 0.671794 }, { "epoch": 2.129086157405424, "grad_norm": 4.702078342437744, "learning_rate": 6.15735960130367e-05, "loss": 2.2318841934204103, "memory(GiB)": 72.85, "step": 49695, "token_acc": 0.5348837209302325, "train_speed(iter/s)": 0.671764 }, { "epoch": 2.129300372734673, "grad_norm": 4.999802112579346, "learning_rate": 6.156704890514372e-05, "loss": 2.2066276550292967, "memory(GiB)": 72.85, "step": 49700, "token_acc": 0.5182724252491694, "train_speed(iter/s)": 0.671751 }, { "epoch": 2.129514588063922, "grad_norm": 5.964393138885498, "learning_rate": 6.156050158770282e-05, "loss": 2.19814453125, "memory(GiB)": 72.85, "step": 49705, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.671768 }, { "epoch": 2.1297288033931707, "grad_norm": 4.267690658569336, "learning_rate": 6.155395406083257e-05, "loss": 2.3416828155517577, "memory(GiB)": 72.85, "step": 49710, "token_acc": 0.5077881619937694, "train_speed(iter/s)": 0.671777 }, { "epoch": 2.12994301872242, "grad_norm": 4.915685653686523, "learning_rate": 6.154740632465162e-05, "loss": 2.2778694152832033, "memory(GiB)": 72.85, "step": 49715, "token_acc": 0.5, "train_speed(iter/s)": 0.67179 }, { "epoch": 2.130157234051669, "grad_norm": 4.965020179748535, "learning_rate": 6.154085837927857e-05, "loss": 2.4053823471069338, "memory(GiB)": 72.85, "step": 49720, "token_acc": 0.4846153846153846, "train_speed(iter/s)": 0.671801 }, { "epoch": 2.1303714493809176, "grad_norm": 4.565946578979492, "learning_rate": 6.153431022483205e-05, "loss": 2.2068626403808596, "memory(GiB)": 72.85, "step": 49725, "token_acc": 0.5179640718562875, "train_speed(iter/s)": 0.671793 }, { "epoch": 2.130585664710167, "grad_norm": 5.681163787841797, "learning_rate": 6.152776186143067e-05, "loss": 2.1689579010009767, "memory(GiB)": 72.85, "step": 49730, "token_acc": 0.5114503816793893, "train_speed(iter/s)": 0.671799 }, { "epoch": 2.1307998800394157, "grad_norm": 4.049786567687988, "learning_rate": 6.152121328919307e-05, "loss": 2.024899482727051, "memory(GiB)": 72.85, "step": 49735, "token_acc": 0.5853658536585366, "train_speed(iter/s)": 0.671804 }, { "epoch": 2.1310140953686645, "grad_norm": 5.734432697296143, "learning_rate": 6.15146645082379e-05, "loss": 2.1874351501464844, "memory(GiB)": 72.85, "step": 49740, "token_acc": 0.49049429657794674, "train_speed(iter/s)": 0.67181 }, { "epoch": 2.1312283106979137, "grad_norm": 8.199812889099121, "learning_rate": 6.150811551868377e-05, "loss": 2.2062362670898437, "memory(GiB)": 72.85, "step": 49745, "token_acc": 0.4968152866242038, "train_speed(iter/s)": 0.671814 }, { "epoch": 2.1314425260271626, "grad_norm": 5.547325611114502, "learning_rate": 6.15015663206493e-05, "loss": 2.0787330627441407, "memory(GiB)": 72.85, "step": 49750, "token_acc": 0.5672268907563025, "train_speed(iter/s)": 0.671813 }, { "epoch": 2.1316567413564114, "grad_norm": 4.021817207336426, "learning_rate": 6.149501691425321e-05, "loss": 2.2117639541625977, "memory(GiB)": 72.85, "step": 49755, "token_acc": 0.5154320987654321, "train_speed(iter/s)": 0.671818 }, { "epoch": 2.1318709566856606, "grad_norm": 3.908459424972534, "learning_rate": 6.148846729961409e-05, "loss": 2.1204328536987305, "memory(GiB)": 72.85, "step": 49760, "token_acc": 0.5251798561151079, "train_speed(iter/s)": 0.671819 }, { "epoch": 2.1320851720149094, "grad_norm": 4.138870716094971, "learning_rate": 6.148191747685061e-05, "loss": 2.514938545227051, "memory(GiB)": 72.85, "step": 49765, "token_acc": 0.47720364741641336, "train_speed(iter/s)": 0.671821 }, { "epoch": 2.1322993873441582, "grad_norm": 6.25148344039917, "learning_rate": 6.147536744608143e-05, "loss": 2.186786079406738, "memory(GiB)": 72.85, "step": 49770, "token_acc": 0.5239852398523985, "train_speed(iter/s)": 0.67181 }, { "epoch": 2.1325136026734075, "grad_norm": 4.91251802444458, "learning_rate": 6.146881720742519e-05, "loss": 2.3549448013305665, "memory(GiB)": 72.85, "step": 49775, "token_acc": 0.48717948717948717, "train_speed(iter/s)": 0.671821 }, { "epoch": 2.1327278180026563, "grad_norm": 4.601755142211914, "learning_rate": 6.146226676100058e-05, "loss": 2.40112419128418, "memory(GiB)": 72.85, "step": 49780, "token_acc": 0.498220640569395, "train_speed(iter/s)": 0.671831 }, { "epoch": 2.132942033331905, "grad_norm": 4.833427429199219, "learning_rate": 6.145571610692624e-05, "loss": 2.0656764984130858, "memory(GiB)": 72.85, "step": 49785, "token_acc": 0.568561872909699, "train_speed(iter/s)": 0.671838 }, { "epoch": 2.1331562486611544, "grad_norm": 4.698286533355713, "learning_rate": 6.144916524532086e-05, "loss": 2.048316764831543, "memory(GiB)": 72.85, "step": 49790, "token_acc": 0.5317919075144508, "train_speed(iter/s)": 0.671847 }, { "epoch": 2.133370463990403, "grad_norm": 6.952693939208984, "learning_rate": 6.144261417630313e-05, "loss": 1.9916641235351562, "memory(GiB)": 72.85, "step": 49795, "token_acc": 0.537117903930131, "train_speed(iter/s)": 0.671858 }, { "epoch": 2.133584679319652, "grad_norm": 5.74739933013916, "learning_rate": 6.143606289999169e-05, "loss": 2.230007362365723, "memory(GiB)": 72.85, "step": 49800, "token_acc": 0.5128205128205128, "train_speed(iter/s)": 0.671868 }, { "epoch": 2.1337988946489013, "grad_norm": 7.576051235198975, "learning_rate": 6.142951141650527e-05, "loss": 2.1654958724975586, "memory(GiB)": 72.85, "step": 49805, "token_acc": 0.5120967741935484, "train_speed(iter/s)": 0.671867 }, { "epoch": 2.13401310997815, "grad_norm": 3.9096779823303223, "learning_rate": 6.14229597259625e-05, "loss": 2.1039508819580077, "memory(GiB)": 72.85, "step": 49810, "token_acc": 0.5264705882352941, "train_speed(iter/s)": 0.671861 }, { "epoch": 2.134227325307399, "grad_norm": 5.343882083892822, "learning_rate": 6.141640782848211e-05, "loss": 2.0487247467041017, "memory(GiB)": 72.85, "step": 49815, "token_acc": 0.5142857142857142, "train_speed(iter/s)": 0.671872 }, { "epoch": 2.134441540636648, "grad_norm": 5.403346061706543, "learning_rate": 6.140985572418276e-05, "loss": 2.403571891784668, "memory(GiB)": 72.85, "step": 49820, "token_acc": 0.4847560975609756, "train_speed(iter/s)": 0.671879 }, { "epoch": 2.134655755965897, "grad_norm": 4.714571952819824, "learning_rate": 6.14033034131832e-05, "loss": 2.279382514953613, "memory(GiB)": 72.85, "step": 49825, "token_acc": 0.5327102803738317, "train_speed(iter/s)": 0.671891 }, { "epoch": 2.1348699712951458, "grad_norm": 5.1658453941345215, "learning_rate": 6.13967508956021e-05, "loss": 2.1299150466918944, "memory(GiB)": 72.85, "step": 49830, "token_acc": 0.5425531914893617, "train_speed(iter/s)": 0.67189 }, { "epoch": 2.135084186624395, "grad_norm": 5.159926891326904, "learning_rate": 6.139019817155815e-05, "loss": 2.3703874588012694, "memory(GiB)": 72.85, "step": 49835, "token_acc": 0.4927536231884058, "train_speed(iter/s)": 0.671878 }, { "epoch": 2.135298401953644, "grad_norm": 6.1839070320129395, "learning_rate": 6.13836452411701e-05, "loss": 2.453943061828613, "memory(GiB)": 72.85, "step": 49840, "token_acc": 0.4875444839857651, "train_speed(iter/s)": 0.671883 }, { "epoch": 2.1355126172828927, "grad_norm": 7.591617584228516, "learning_rate": 6.137709210455661e-05, "loss": 2.2349266052246093, "memory(GiB)": 72.85, "step": 49845, "token_acc": 0.4856115107913669, "train_speed(iter/s)": 0.671868 }, { "epoch": 2.135726832612142, "grad_norm": 5.284181594848633, "learning_rate": 6.137053876183644e-05, "loss": 2.062244415283203, "memory(GiB)": 72.85, "step": 49850, "token_acc": 0.5811965811965812, "train_speed(iter/s)": 0.671868 }, { "epoch": 2.1359410479413907, "grad_norm": 4.7696309089660645, "learning_rate": 6.136398521312829e-05, "loss": 2.4118284225463866, "memory(GiB)": 72.85, "step": 49855, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.671861 }, { "epoch": 2.1361552632706395, "grad_norm": 6.021076202392578, "learning_rate": 6.135743145855088e-05, "loss": 2.197897529602051, "memory(GiB)": 72.85, "step": 49860, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.671871 }, { "epoch": 2.136369478599889, "grad_norm": 3.44547176361084, "learning_rate": 6.135087749822296e-05, "loss": 2.2852333068847654, "memory(GiB)": 72.85, "step": 49865, "token_acc": 0.5212464589235127, "train_speed(iter/s)": 0.671873 }, { "epoch": 2.1365836939291376, "grad_norm": 5.46606969833374, "learning_rate": 6.134432333226324e-05, "loss": 2.3279983520507814, "memory(GiB)": 72.85, "step": 49870, "token_acc": 0.553030303030303, "train_speed(iter/s)": 0.671883 }, { "epoch": 2.1367979092583864, "grad_norm": 4.400029182434082, "learning_rate": 6.133776896079045e-05, "loss": 2.373858642578125, "memory(GiB)": 72.85, "step": 49875, "token_acc": 0.4750733137829912, "train_speed(iter/s)": 0.671888 }, { "epoch": 2.1370121245876357, "grad_norm": 4.388162136077881, "learning_rate": 6.133121438392336e-05, "loss": 1.892667007446289, "memory(GiB)": 72.85, "step": 49880, "token_acc": 0.5537848605577689, "train_speed(iter/s)": 0.671887 }, { "epoch": 2.1372263399168845, "grad_norm": 5.593564987182617, "learning_rate": 6.132465960178069e-05, "loss": 2.162587356567383, "memory(GiB)": 72.85, "step": 49885, "token_acc": 0.5039370078740157, "train_speed(iter/s)": 0.671902 }, { "epoch": 2.1374405552461333, "grad_norm": 5.713860511779785, "learning_rate": 6.131810461448118e-05, "loss": 2.1005746841430666, "memory(GiB)": 72.85, "step": 49890, "token_acc": 0.5036764705882353, "train_speed(iter/s)": 0.671891 }, { "epoch": 2.1376547705753826, "grad_norm": 4.073886394500732, "learning_rate": 6.131154942214356e-05, "loss": 2.0395036697387696, "memory(GiB)": 72.85, "step": 49895, "token_acc": 0.5528169014084507, "train_speed(iter/s)": 0.671873 }, { "epoch": 2.1378689859046314, "grad_norm": 4.734230995178223, "learning_rate": 6.130499402488665e-05, "loss": 2.295420455932617, "memory(GiB)": 72.85, "step": 49900, "token_acc": 0.4757834757834758, "train_speed(iter/s)": 0.67186 }, { "epoch": 2.13808320123388, "grad_norm": 5.480769634246826, "learning_rate": 6.129843842282915e-05, "loss": 2.3271865844726562, "memory(GiB)": 72.85, "step": 49905, "token_acc": 0.5096153846153846, "train_speed(iter/s)": 0.671852 }, { "epoch": 2.1382974165631294, "grad_norm": 4.942173957824707, "learning_rate": 6.129188261608985e-05, "loss": 2.2539663314819336, "memory(GiB)": 72.85, "step": 49910, "token_acc": 0.5105633802816901, "train_speed(iter/s)": 0.671855 }, { "epoch": 2.1385116318923783, "grad_norm": 5.8445234298706055, "learning_rate": 6.12853266047875e-05, "loss": 2.4528583526611327, "memory(GiB)": 72.85, "step": 49915, "token_acc": 0.4786885245901639, "train_speed(iter/s)": 0.671857 }, { "epoch": 2.138725847221627, "grad_norm": 4.262293815612793, "learning_rate": 6.127877038904087e-05, "loss": 2.2949600219726562, "memory(GiB)": 72.85, "step": 49920, "token_acc": 0.495114006514658, "train_speed(iter/s)": 0.671864 }, { "epoch": 2.1389400625508763, "grad_norm": 4.875040054321289, "learning_rate": 6.127221396896876e-05, "loss": 2.1612516403198243, "memory(GiB)": 72.85, "step": 49925, "token_acc": 0.49295774647887325, "train_speed(iter/s)": 0.671864 }, { "epoch": 2.139154277880125, "grad_norm": 4.207501411437988, "learning_rate": 6.126565734468987e-05, "loss": 2.323721694946289, "memory(GiB)": 72.85, "step": 49930, "token_acc": 0.5109289617486339, "train_speed(iter/s)": 0.671871 }, { "epoch": 2.139368493209374, "grad_norm": 5.518064975738525, "learning_rate": 6.125910051632305e-05, "loss": 2.0320472717285156, "memory(GiB)": 72.85, "step": 49935, "token_acc": 0.5219123505976095, "train_speed(iter/s)": 0.671876 }, { "epoch": 2.139582708538623, "grad_norm": 3.94040584564209, "learning_rate": 6.125254348398708e-05, "loss": 2.3911870956420898, "memory(GiB)": 72.85, "step": 49940, "token_acc": 0.4577259475218659, "train_speed(iter/s)": 0.671896 }, { "epoch": 2.139796923867872, "grad_norm": 6.316915512084961, "learning_rate": 6.124598624780071e-05, "loss": 2.2595088958740233, "memory(GiB)": 72.85, "step": 49945, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.671904 }, { "epoch": 2.140011139197121, "grad_norm": 4.364997863769531, "learning_rate": 6.123942880788276e-05, "loss": 2.371501922607422, "memory(GiB)": 72.85, "step": 49950, "token_acc": 0.49640287769784175, "train_speed(iter/s)": 0.67191 }, { "epoch": 2.14022535452637, "grad_norm": 4.681000709533691, "learning_rate": 6.123287116435201e-05, "loss": 2.2759654998779295, "memory(GiB)": 72.85, "step": 49955, "token_acc": 0.5141700404858299, "train_speed(iter/s)": 0.671914 }, { "epoch": 2.140439569855619, "grad_norm": 6.403932094573975, "learning_rate": 6.122631331732726e-05, "loss": 2.0875396728515625, "memory(GiB)": 72.85, "step": 49960, "token_acc": 0.552901023890785, "train_speed(iter/s)": 0.671931 }, { "epoch": 2.1406537851848677, "grad_norm": 5.768587589263916, "learning_rate": 6.121975526692731e-05, "loss": 2.037042999267578, "memory(GiB)": 72.85, "step": 49965, "token_acc": 0.552, "train_speed(iter/s)": 0.671918 }, { "epoch": 2.140868000514117, "grad_norm": 3.590881824493408, "learning_rate": 6.121319701327097e-05, "loss": 1.971061897277832, "memory(GiB)": 72.85, "step": 49970, "token_acc": 0.528052805280528, "train_speed(iter/s)": 0.671899 }, { "epoch": 2.1410822158433658, "grad_norm": 4.348567008972168, "learning_rate": 6.120663855647706e-05, "loss": 2.2671966552734375, "memory(GiB)": 72.85, "step": 49975, "token_acc": 0.5271317829457365, "train_speed(iter/s)": 0.671916 }, { "epoch": 2.1412964311726146, "grad_norm": 5.4629950523376465, "learning_rate": 6.120007989666437e-05, "loss": 2.124734306335449, "memory(GiB)": 72.85, "step": 49980, "token_acc": 0.49466192170818507, "train_speed(iter/s)": 0.671912 }, { "epoch": 2.141510646501864, "grad_norm": 4.938169479370117, "learning_rate": 6.119352103395172e-05, "loss": 2.10467586517334, "memory(GiB)": 72.85, "step": 49985, "token_acc": 0.5427631578947368, "train_speed(iter/s)": 0.671924 }, { "epoch": 2.1417248618311127, "grad_norm": 5.078742504119873, "learning_rate": 6.118696196845793e-05, "loss": 2.1009653091430662, "memory(GiB)": 72.85, "step": 49990, "token_acc": 0.5229681978798587, "train_speed(iter/s)": 0.671918 }, { "epoch": 2.1419390771603615, "grad_norm": 5.544097423553467, "learning_rate": 6.118040270030185e-05, "loss": 2.2911104202270507, "memory(GiB)": 72.85, "step": 49995, "token_acc": 0.5108695652173914, "train_speed(iter/s)": 0.671922 }, { "epoch": 2.1421532924896107, "grad_norm": 5.4788923263549805, "learning_rate": 6.117384322960228e-05, "loss": 2.3025794982910157, "memory(GiB)": 72.85, "step": 50000, "token_acc": 0.5115384615384615, "train_speed(iter/s)": 0.671923 }, { "epoch": 2.1421532924896107, "eval_loss": 1.9741239547729492, "eval_runtime": 16.0854, "eval_samples_per_second": 6.217, "eval_steps_per_second": 6.217, "eval_token_acc": 0.5225352112676056, "step": 50000 }, { "epoch": 2.1423675078188595, "grad_norm": 3.6648972034454346, "learning_rate": 6.116728355647805e-05, "loss": 1.9448925018310548, "memory(GiB)": 72.85, "step": 50005, "token_acc": 0.5231984205330701, "train_speed(iter/s)": 0.671768 }, { "epoch": 2.1425817231481084, "grad_norm": 4.214945316314697, "learning_rate": 6.1160723681048e-05, "loss": 2.149923324584961, "memory(GiB)": 72.85, "step": 50010, "token_acc": 0.5335820895522388, "train_speed(iter/s)": 0.671771 }, { "epoch": 2.1427959384773576, "grad_norm": 4.961206912994385, "learning_rate": 6.115416360343099e-05, "loss": 2.2454206466674806, "memory(GiB)": 72.85, "step": 50015, "token_acc": 0.49640287769784175, "train_speed(iter/s)": 0.671766 }, { "epoch": 2.1430101538066064, "grad_norm": 4.6143622398376465, "learning_rate": 6.114760332374582e-05, "loss": 2.1892261505126953, "memory(GiB)": 72.85, "step": 50020, "token_acc": 0.5340136054421769, "train_speed(iter/s)": 0.671742 }, { "epoch": 2.1432243691358552, "grad_norm": 4.1612372398376465, "learning_rate": 6.114104284211139e-05, "loss": 2.228171730041504, "memory(GiB)": 72.85, "step": 50025, "token_acc": 0.47924528301886793, "train_speed(iter/s)": 0.67173 }, { "epoch": 2.1434385844651045, "grad_norm": 5.390008449554443, "learning_rate": 6.11344821586465e-05, "loss": 2.3386251449584963, "memory(GiB)": 72.85, "step": 50030, "token_acc": 0.5709090909090909, "train_speed(iter/s)": 0.671713 }, { "epoch": 2.1436527997943533, "grad_norm": 5.19488000869751, "learning_rate": 6.112792127347001e-05, "loss": 2.193052864074707, "memory(GiB)": 72.85, "step": 50035, "token_acc": 0.4956772334293948, "train_speed(iter/s)": 0.671717 }, { "epoch": 2.143867015123602, "grad_norm": 5.5193400382995605, "learning_rate": 6.112136018670079e-05, "loss": 2.1076381683349608, "memory(GiB)": 72.85, "step": 50040, "token_acc": 0.5563380281690141, "train_speed(iter/s)": 0.671721 }, { "epoch": 2.1440812304528514, "grad_norm": 6.984809875488281, "learning_rate": 6.111479889845772e-05, "loss": 2.010565757751465, "memory(GiB)": 72.85, "step": 50045, "token_acc": 0.5707964601769911, "train_speed(iter/s)": 0.671698 }, { "epoch": 2.1442954457821, "grad_norm": 4.767887592315674, "learning_rate": 6.110823740885962e-05, "loss": 2.263780403137207, "memory(GiB)": 72.85, "step": 50050, "token_acc": 0.5271565495207667, "train_speed(iter/s)": 0.671705 }, { "epoch": 2.144509661111349, "grad_norm": 5.877846717834473, "learning_rate": 6.110167571802538e-05, "loss": 2.047568511962891, "memory(GiB)": 72.85, "step": 50055, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.671697 }, { "epoch": 2.1447238764405983, "grad_norm": 4.828906059265137, "learning_rate": 6.109511382607388e-05, "loss": 2.375263977050781, "memory(GiB)": 72.85, "step": 50060, "token_acc": 0.5087108013937283, "train_speed(iter/s)": 0.67168 }, { "epoch": 2.144938091769847, "grad_norm": 5.600560188293457, "learning_rate": 6.108855173312397e-05, "loss": 2.1749599456787108, "memory(GiB)": 72.85, "step": 50065, "token_acc": 0.5292096219931272, "train_speed(iter/s)": 0.6717 }, { "epoch": 2.145152307099096, "grad_norm": 7.609065532684326, "learning_rate": 6.108198943929457e-05, "loss": 2.4152055740356446, "memory(GiB)": 72.85, "step": 50070, "token_acc": 0.47282608695652173, "train_speed(iter/s)": 0.671697 }, { "epoch": 2.145366522428345, "grad_norm": 4.493794918060303, "learning_rate": 6.107542694470452e-05, "loss": 2.251653289794922, "memory(GiB)": 72.85, "step": 50075, "token_acc": 0.5277777777777778, "train_speed(iter/s)": 0.671716 }, { "epoch": 2.145580737757594, "grad_norm": 6.450525283813477, "learning_rate": 6.10688642494727e-05, "loss": 2.3179176330566404, "memory(GiB)": 72.85, "step": 50080, "token_acc": 0.48905109489051096, "train_speed(iter/s)": 0.671726 }, { "epoch": 2.1457949530868428, "grad_norm": 6.277184009552002, "learning_rate": 6.106230135371804e-05, "loss": 2.2963701248168946, "memory(GiB)": 72.85, "step": 50085, "token_acc": 0.5215686274509804, "train_speed(iter/s)": 0.671726 }, { "epoch": 2.146009168416092, "grad_norm": 4.213027477264404, "learning_rate": 6.105573825755942e-05, "loss": 2.319149208068848, "memory(GiB)": 72.85, "step": 50090, "token_acc": 0.5050167224080268, "train_speed(iter/s)": 0.67173 }, { "epoch": 2.146223383745341, "grad_norm": 4.232710361480713, "learning_rate": 6.104917496111574e-05, "loss": 2.332462501525879, "memory(GiB)": 72.85, "step": 50095, "token_acc": 0.5223367697594502, "train_speed(iter/s)": 0.671732 }, { "epoch": 2.1464375990745896, "grad_norm": 4.298731803894043, "learning_rate": 6.104261146450588e-05, "loss": 2.2443572998046877, "memory(GiB)": 72.85, "step": 50100, "token_acc": 0.5097493036211699, "train_speed(iter/s)": 0.67174 }, { "epoch": 2.146651814403839, "grad_norm": 4.973060131072998, "learning_rate": 6.103604776784872e-05, "loss": 2.2907201766967775, "memory(GiB)": 72.85, "step": 50105, "token_acc": 0.48641304347826086, "train_speed(iter/s)": 0.671757 }, { "epoch": 2.1468660297330877, "grad_norm": 4.769514560699463, "learning_rate": 6.102948387126325e-05, "loss": 2.099453926086426, "memory(GiB)": 72.85, "step": 50110, "token_acc": 0.51875, "train_speed(iter/s)": 0.671767 }, { "epoch": 2.1470802450623365, "grad_norm": 3.3769824504852295, "learning_rate": 6.10229197748683e-05, "loss": 2.287876510620117, "memory(GiB)": 72.85, "step": 50115, "token_acc": 0.4716981132075472, "train_speed(iter/s)": 0.67177 }, { "epoch": 2.147294460391586, "grad_norm": 5.440306663513184, "learning_rate": 6.101635547878285e-05, "loss": 2.0246356964111327, "memory(GiB)": 72.85, "step": 50120, "token_acc": 0.5037037037037037, "train_speed(iter/s)": 0.671772 }, { "epoch": 2.1475086757208346, "grad_norm": 4.330105781555176, "learning_rate": 6.100979098312576e-05, "loss": 2.408889389038086, "memory(GiB)": 72.85, "step": 50125, "token_acc": 0.48148148148148145, "train_speed(iter/s)": 0.671766 }, { "epoch": 2.1477228910500834, "grad_norm": 4.899474143981934, "learning_rate": 6.100322628801599e-05, "loss": 2.134604835510254, "memory(GiB)": 72.85, "step": 50130, "token_acc": 0.5631768953068592, "train_speed(iter/s)": 0.671771 }, { "epoch": 2.1479371063793327, "grad_norm": 4.505870819091797, "learning_rate": 6.0996661393572454e-05, "loss": 1.939243698120117, "memory(GiB)": 72.85, "step": 50135, "token_acc": 0.5910652920962199, "train_speed(iter/s)": 0.67177 }, { "epoch": 2.1481513217085815, "grad_norm": 3.634305715560913, "learning_rate": 6.099009629991408e-05, "loss": 2.007126235961914, "memory(GiB)": 72.85, "step": 50140, "token_acc": 0.5594405594405595, "train_speed(iter/s)": 0.671784 }, { "epoch": 2.1483655370378303, "grad_norm": 4.714554309844971, "learning_rate": 6.098353100715981e-05, "loss": 2.218277168273926, "memory(GiB)": 72.85, "step": 50145, "token_acc": 0.4937106918238994, "train_speed(iter/s)": 0.671786 }, { "epoch": 2.1485797523670795, "grad_norm": 4.306176662445068, "learning_rate": 6.0976965515428554e-05, "loss": 2.25433292388916, "memory(GiB)": 72.85, "step": 50150, "token_acc": 0.45962732919254656, "train_speed(iter/s)": 0.6718 }, { "epoch": 2.1487939676963284, "grad_norm": 5.050239086151123, "learning_rate": 6.097039982483927e-05, "loss": 2.1316961288452148, "memory(GiB)": 72.85, "step": 50155, "token_acc": 0.4966442953020134, "train_speed(iter/s)": 0.671806 }, { "epoch": 2.149008183025577, "grad_norm": 4.4858622550964355, "learning_rate": 6.0963833935510916e-05, "loss": 2.0563833236694338, "memory(GiB)": 72.85, "step": 50160, "token_acc": 0.5487012987012987, "train_speed(iter/s)": 0.671816 }, { "epoch": 2.1492223983548264, "grad_norm": 4.9058756828308105, "learning_rate": 6.0957267847562414e-05, "loss": 1.8854389190673828, "memory(GiB)": 72.85, "step": 50165, "token_acc": 0.5464684014869888, "train_speed(iter/s)": 0.67182 }, { "epoch": 2.1494366136840752, "grad_norm": 3.8306050300598145, "learning_rate": 6.095070156111274e-05, "loss": 2.120853805541992, "memory(GiB)": 72.85, "step": 50170, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.671828 }, { "epoch": 2.149650829013324, "grad_norm": 4.059969425201416, "learning_rate": 6.094413507628084e-05, "loss": 2.0639827728271483, "memory(GiB)": 72.85, "step": 50175, "token_acc": 0.5324675324675324, "train_speed(iter/s)": 0.671828 }, { "epoch": 2.1498650443425733, "grad_norm": 6.922610282897949, "learning_rate": 6.093756839318565e-05, "loss": 1.8424880981445313, "memory(GiB)": 72.85, "step": 50180, "token_acc": 0.573943661971831, "train_speed(iter/s)": 0.671817 }, { "epoch": 2.150079259671822, "grad_norm": 4.741069793701172, "learning_rate": 6.093100151194615e-05, "loss": 2.23834171295166, "memory(GiB)": 72.85, "step": 50185, "token_acc": 0.5238095238095238, "train_speed(iter/s)": 0.671819 }, { "epoch": 2.150293475001071, "grad_norm": 5.298539638519287, "learning_rate": 6.09244344326813e-05, "loss": 2.1185834884643553, "memory(GiB)": 72.85, "step": 50190, "token_acc": 0.5192307692307693, "train_speed(iter/s)": 0.671802 }, { "epoch": 2.15050769033032, "grad_norm": 5.782657146453857, "learning_rate": 6.091786715551008e-05, "loss": 2.1870222091674805, "memory(GiB)": 72.85, "step": 50195, "token_acc": 0.5129032258064516, "train_speed(iter/s)": 0.6718 }, { "epoch": 2.150721905659569, "grad_norm": 6.418704509735107, "learning_rate": 6.091129968055146e-05, "loss": 2.422318458557129, "memory(GiB)": 72.85, "step": 50200, "token_acc": 0.4723127035830619, "train_speed(iter/s)": 0.671792 }, { "epoch": 2.150936120988818, "grad_norm": 5.359246253967285, "learning_rate": 6.09047320079244e-05, "loss": 2.1108282089233397, "memory(GiB)": 72.85, "step": 50205, "token_acc": 0.5277777777777778, "train_speed(iter/s)": 0.671775 }, { "epoch": 2.151150336318067, "grad_norm": 4.78953742980957, "learning_rate": 6.0898164137747893e-05, "loss": 2.4551185607910155, "memory(GiB)": 72.85, "step": 50210, "token_acc": 0.48328267477203646, "train_speed(iter/s)": 0.671766 }, { "epoch": 2.151364551647316, "grad_norm": 6.138023853302002, "learning_rate": 6.089159607014092e-05, "loss": 2.1577219009399413, "memory(GiB)": 72.85, "step": 50215, "token_acc": 0.5205992509363296, "train_speed(iter/s)": 0.671766 }, { "epoch": 2.1515787669765647, "grad_norm": 4.419003963470459, "learning_rate": 6.0885027805222484e-05, "loss": 2.1461196899414063, "memory(GiB)": 72.85, "step": 50220, "token_acc": 0.5392156862745098, "train_speed(iter/s)": 0.671767 }, { "epoch": 2.151792982305814, "grad_norm": 6.526965141296387, "learning_rate": 6.0878459343111517e-05, "loss": 2.1835607528686523, "memory(GiB)": 72.85, "step": 50225, "token_acc": 0.5155709342560554, "train_speed(iter/s)": 0.671778 }, { "epoch": 2.1520071976350628, "grad_norm": 4.131188869476318, "learning_rate": 6.087189068392709e-05, "loss": 2.439041328430176, "memory(GiB)": 72.85, "step": 50230, "token_acc": 0.4952076677316294, "train_speed(iter/s)": 0.671767 }, { "epoch": 2.1522214129643116, "grad_norm": 4.548436641693115, "learning_rate": 6.0865321827788154e-05, "loss": 2.2509790420532227, "memory(GiB)": 72.85, "step": 50235, "token_acc": 0.49818181818181817, "train_speed(iter/s)": 0.671754 }, { "epoch": 2.152435628293561, "grad_norm": 4.056807518005371, "learning_rate": 6.085875277481372e-05, "loss": 2.045450210571289, "memory(GiB)": 72.85, "step": 50240, "token_acc": 0.5317220543806647, "train_speed(iter/s)": 0.671758 }, { "epoch": 2.1526498436228096, "grad_norm": 4.016176700592041, "learning_rate": 6.08521835251228e-05, "loss": 2.22043399810791, "memory(GiB)": 72.85, "step": 50245, "token_acc": 0.5457875457875457, "train_speed(iter/s)": 0.671751 }, { "epoch": 2.1528640589520585, "grad_norm": 6.174413681030273, "learning_rate": 6.084561407883438e-05, "loss": 2.3917022705078126, "memory(GiB)": 72.85, "step": 50250, "token_acc": 0.5167597765363129, "train_speed(iter/s)": 0.67177 }, { "epoch": 2.1530782742813077, "grad_norm": 4.314099311828613, "learning_rate": 6.083904443606751e-05, "loss": 2.1569263458251955, "memory(GiB)": 72.85, "step": 50255, "token_acc": 0.5016393442622951, "train_speed(iter/s)": 0.671773 }, { "epoch": 2.1532924896105565, "grad_norm": 5.7706122398376465, "learning_rate": 6.083247459694117e-05, "loss": 2.3130746841430665, "memory(GiB)": 72.85, "step": 50260, "token_acc": 0.5141065830721003, "train_speed(iter/s)": 0.671776 }, { "epoch": 2.1535067049398053, "grad_norm": 5.329835414886475, "learning_rate": 6.0825904561574374e-05, "loss": 2.2575925827026366, "memory(GiB)": 72.85, "step": 50265, "token_acc": 0.48727272727272725, "train_speed(iter/s)": 0.671759 }, { "epoch": 2.1537209202690546, "grad_norm": 3.3964803218841553, "learning_rate": 6.081933433008617e-05, "loss": 2.087543487548828, "memory(GiB)": 72.85, "step": 50270, "token_acc": 0.4923547400611621, "train_speed(iter/s)": 0.671765 }, { "epoch": 2.1539351355983034, "grad_norm": 5.632302761077881, "learning_rate": 6.081276390259559e-05, "loss": 2.156910514831543, "memory(GiB)": 72.85, "step": 50275, "token_acc": 0.5043478260869565, "train_speed(iter/s)": 0.671766 }, { "epoch": 2.154149350927552, "grad_norm": 4.722671985626221, "learning_rate": 6.0806193279221634e-05, "loss": 2.1140779495239257, "memory(GiB)": 72.85, "step": 50280, "token_acc": 0.4852459016393443, "train_speed(iter/s)": 0.671768 }, { "epoch": 2.1543635662568015, "grad_norm": 4.049758434295654, "learning_rate": 6.079962246008336e-05, "loss": 2.3289546966552734, "memory(GiB)": 72.85, "step": 50285, "token_acc": 0.49049429657794674, "train_speed(iter/s)": 0.671763 }, { "epoch": 2.1545777815860503, "grad_norm": 6.263044357299805, "learning_rate": 6.07930514452998e-05, "loss": 2.408977508544922, "memory(GiB)": 72.85, "step": 50290, "token_acc": 0.5112540192926045, "train_speed(iter/s)": 0.671793 }, { "epoch": 2.154791996915299, "grad_norm": 4.924918174743652, "learning_rate": 6.0786480234989976e-05, "loss": 2.1337757110595703, "memory(GiB)": 72.85, "step": 50295, "token_acc": 0.5259259259259259, "train_speed(iter/s)": 0.6718 }, { "epoch": 2.1550062122445484, "grad_norm": 6.10414457321167, "learning_rate": 6.0779908829272936e-05, "loss": 2.40203857421875, "memory(GiB)": 72.85, "step": 50300, "token_acc": 0.5073529411764706, "train_speed(iter/s)": 0.671813 }, { "epoch": 2.155220427573797, "grad_norm": 4.280865669250488, "learning_rate": 6.077333722826775e-05, "loss": 2.253132438659668, "memory(GiB)": 72.85, "step": 50305, "token_acc": 0.5115511551155115, "train_speed(iter/s)": 0.671818 }, { "epoch": 2.155434642903046, "grad_norm": 4.603756427764893, "learning_rate": 6.076676543209344e-05, "loss": 2.390407752990723, "memory(GiB)": 72.85, "step": 50310, "token_acc": 0.511326860841424, "train_speed(iter/s)": 0.671816 }, { "epoch": 2.1556488582322952, "grad_norm": 4.721193313598633, "learning_rate": 6.07601934408691e-05, "loss": 1.954931640625, "memory(GiB)": 72.85, "step": 50315, "token_acc": 0.5579710144927537, "train_speed(iter/s)": 0.671826 }, { "epoch": 2.155863073561544, "grad_norm": 5.548201084136963, "learning_rate": 6.075362125471374e-05, "loss": 2.1798940658569337, "memory(GiB)": 72.85, "step": 50320, "token_acc": 0.5075187969924813, "train_speed(iter/s)": 0.671827 }, { "epoch": 2.156077288890793, "grad_norm": 5.662086009979248, "learning_rate": 6.0747048873746446e-05, "loss": 2.600350570678711, "memory(GiB)": 72.85, "step": 50325, "token_acc": 0.4755700325732899, "train_speed(iter/s)": 0.671822 }, { "epoch": 2.156291504220042, "grad_norm": 5.087068557739258, "learning_rate": 6.074047629808629e-05, "loss": 2.327171516418457, "memory(GiB)": 72.85, "step": 50330, "token_acc": 0.5027777777777778, "train_speed(iter/s)": 0.671828 }, { "epoch": 2.156505719549291, "grad_norm": 5.33787202835083, "learning_rate": 6.073390352785232e-05, "loss": 2.361582565307617, "memory(GiB)": 72.85, "step": 50335, "token_acc": 0.49110320284697506, "train_speed(iter/s)": 0.671835 }, { "epoch": 2.1567199348785397, "grad_norm": 5.450136661529541, "learning_rate": 6.0727330563163624e-05, "loss": 2.1822797775268556, "memory(GiB)": 72.85, "step": 50340, "token_acc": 0.50187265917603, "train_speed(iter/s)": 0.671824 }, { "epoch": 2.156934150207789, "grad_norm": 3.765199899673462, "learning_rate": 6.072075740413926e-05, "loss": 1.9231489181518555, "memory(GiB)": 72.85, "step": 50345, "token_acc": 0.5693950177935944, "train_speed(iter/s)": 0.671831 }, { "epoch": 2.157148365537038, "grad_norm": 5.023677349090576, "learning_rate": 6.071418405089834e-05, "loss": 2.377846336364746, "memory(GiB)": 72.85, "step": 50350, "token_acc": 0.4879725085910653, "train_speed(iter/s)": 0.67185 }, { "epoch": 2.1573625808662866, "grad_norm": 4.4646077156066895, "learning_rate": 6.070761050355991e-05, "loss": 2.2300172805786134, "memory(GiB)": 72.85, "step": 50355, "token_acc": 0.4804270462633452, "train_speed(iter/s)": 0.671843 }, { "epoch": 2.157576796195536, "grad_norm": 5.739037036895752, "learning_rate": 6.070103676224308e-05, "loss": 2.3937328338623045, "memory(GiB)": 72.85, "step": 50360, "token_acc": 0.5016501650165016, "train_speed(iter/s)": 0.671848 }, { "epoch": 2.1577910115247847, "grad_norm": 5.121065139770508, "learning_rate": 6.069446282706692e-05, "loss": 2.2638063430786133, "memory(GiB)": 72.85, "step": 50365, "token_acc": 0.4864864864864865, "train_speed(iter/s)": 0.67185 }, { "epoch": 2.1580052268540335, "grad_norm": 5.331583499908447, "learning_rate": 6.068788869815054e-05, "loss": 1.9705753326416016, "memory(GiB)": 72.85, "step": 50370, "token_acc": 0.5547445255474452, "train_speed(iter/s)": 0.671838 }, { "epoch": 2.1582194421832828, "grad_norm": 5.338424205780029, "learning_rate": 6.068131437561303e-05, "loss": 2.1145551681518553, "memory(GiB)": 72.85, "step": 50375, "token_acc": 0.5255972696245734, "train_speed(iter/s)": 0.671844 }, { "epoch": 2.1584336575125316, "grad_norm": 5.802259922027588, "learning_rate": 6.067473985957349e-05, "loss": 2.6624099731445314, "memory(GiB)": 72.85, "step": 50380, "token_acc": 0.450354609929078, "train_speed(iter/s)": 0.671852 }, { "epoch": 2.1586478728417804, "grad_norm": 6.296213626861572, "learning_rate": 6.066816515015101e-05, "loss": 2.076486587524414, "memory(GiB)": 72.85, "step": 50385, "token_acc": 0.531496062992126, "train_speed(iter/s)": 0.671851 }, { "epoch": 2.1588620881710296, "grad_norm": 6.033364772796631, "learning_rate": 6.0661590247464736e-05, "loss": 2.408445358276367, "memory(GiB)": 72.85, "step": 50390, "token_acc": 0.46226415094339623, "train_speed(iter/s)": 0.671851 }, { "epoch": 2.1590763035002785, "grad_norm": 5.015231609344482, "learning_rate": 6.065501515163374e-05, "loss": 2.3041122436523436, "memory(GiB)": 72.85, "step": 50395, "token_acc": 0.49838187702265374, "train_speed(iter/s)": 0.671839 }, { "epoch": 2.1592905188295273, "grad_norm": 5.363202095031738, "learning_rate": 6.064843986277715e-05, "loss": 2.058120536804199, "memory(GiB)": 72.85, "step": 50400, "token_acc": 0.5016835016835017, "train_speed(iter/s)": 0.671831 }, { "epoch": 2.1595047341587765, "grad_norm": 4.320107460021973, "learning_rate": 6.064186438101409e-05, "loss": 2.1422380447387694, "memory(GiB)": 72.85, "step": 50405, "token_acc": 0.5371621621621622, "train_speed(iter/s)": 0.671845 }, { "epoch": 2.1597189494880253, "grad_norm": 6.095240116119385, "learning_rate": 6.063528870646367e-05, "loss": 2.089213752746582, "memory(GiB)": 72.85, "step": 50410, "token_acc": 0.5151515151515151, "train_speed(iter/s)": 0.671841 }, { "epoch": 2.159933164817274, "grad_norm": 4.457316875457764, "learning_rate": 6.0628712839245005e-05, "loss": 2.3063066482543944, "memory(GiB)": 72.85, "step": 50415, "token_acc": 0.5034013605442177, "train_speed(iter/s)": 0.671834 }, { "epoch": 2.1601473801465234, "grad_norm": 4.501878261566162, "learning_rate": 6.0622136779477254e-05, "loss": 2.2368825912475585, "memory(GiB)": 72.85, "step": 50420, "token_acc": 0.5309090909090909, "train_speed(iter/s)": 0.671839 }, { "epoch": 2.1603615954757722, "grad_norm": 4.687439918518066, "learning_rate": 6.0615560527279514e-05, "loss": 2.388815689086914, "memory(GiB)": 72.85, "step": 50425, "token_acc": 0.5102040816326531, "train_speed(iter/s)": 0.671837 }, { "epoch": 2.160575810805021, "grad_norm": 5.255372047424316, "learning_rate": 6.060898408277096e-05, "loss": 2.468709945678711, "memory(GiB)": 72.85, "step": 50430, "token_acc": 0.4773662551440329, "train_speed(iter/s)": 0.671835 }, { "epoch": 2.1607900261342703, "grad_norm": 4.256497859954834, "learning_rate": 6.06024074460707e-05, "loss": 2.283095932006836, "memory(GiB)": 72.85, "step": 50435, "token_acc": 0.48188405797101447, "train_speed(iter/s)": 0.671828 }, { "epoch": 2.161004241463519, "grad_norm": 5.259701251983643, "learning_rate": 6.059583061729787e-05, "loss": 2.0727563858032227, "memory(GiB)": 72.85, "step": 50440, "token_acc": 0.5084745762711864, "train_speed(iter/s)": 0.671837 }, { "epoch": 2.161218456792768, "grad_norm": 9.330817222595215, "learning_rate": 6.058925359657164e-05, "loss": 1.7762153625488282, "memory(GiB)": 72.85, "step": 50445, "token_acc": 0.5903614457831325, "train_speed(iter/s)": 0.671843 }, { "epoch": 2.161432672122017, "grad_norm": 5.431083679199219, "learning_rate": 6.058267638401114e-05, "loss": 2.472739410400391, "memory(GiB)": 72.85, "step": 50450, "token_acc": 0.5046728971962616, "train_speed(iter/s)": 0.67185 }, { "epoch": 2.161646887451266, "grad_norm": 4.263227462768555, "learning_rate": 6.057609897973552e-05, "loss": 2.19896240234375, "memory(GiB)": 72.85, "step": 50455, "token_acc": 0.5488721804511278, "train_speed(iter/s)": 0.671849 }, { "epoch": 2.161861102780515, "grad_norm": 4.339183807373047, "learning_rate": 6.056952138386397e-05, "loss": 2.2854862213134766, "memory(GiB)": 72.85, "step": 50460, "token_acc": 0.5073313782991202, "train_speed(iter/s)": 0.671858 }, { "epoch": 2.162075318109764, "grad_norm": 5.720958232879639, "learning_rate": 6.056294359651562e-05, "loss": 2.250848960876465, "memory(GiB)": 72.85, "step": 50465, "token_acc": 0.5218978102189781, "train_speed(iter/s)": 0.671863 }, { "epoch": 2.162289533439013, "grad_norm": 6.232659816741943, "learning_rate": 6.0556365617809615e-05, "loss": 2.243949127197266, "memory(GiB)": 72.85, "step": 50470, "token_acc": 0.5157894736842106, "train_speed(iter/s)": 0.671875 }, { "epoch": 2.1625037487682617, "grad_norm": 8.16286849975586, "learning_rate": 6.0549787447865166e-05, "loss": 2.415819549560547, "memory(GiB)": 72.85, "step": 50475, "token_acc": 0.45652173913043476, "train_speed(iter/s)": 0.671889 }, { "epoch": 2.162717964097511, "grad_norm": 4.192944526672363, "learning_rate": 6.0543209086801434e-05, "loss": 2.441322135925293, "memory(GiB)": 72.85, "step": 50480, "token_acc": 0.5088967971530249, "train_speed(iter/s)": 0.671889 }, { "epoch": 2.1629321794267597, "grad_norm": 6.611559867858887, "learning_rate": 6.053663053473754e-05, "loss": 2.2294952392578127, "memory(GiB)": 72.85, "step": 50485, "token_acc": 0.542319749216301, "train_speed(iter/s)": 0.671887 }, { "epoch": 2.1631463947560086, "grad_norm": 4.378810882568359, "learning_rate": 6.053005179179273e-05, "loss": 2.2335567474365234, "memory(GiB)": 72.85, "step": 50490, "token_acc": 0.5322033898305085, "train_speed(iter/s)": 0.671897 }, { "epoch": 2.163360610085258, "grad_norm": 4.841519355773926, "learning_rate": 6.052347285808615e-05, "loss": 2.339498519897461, "memory(GiB)": 72.85, "step": 50495, "token_acc": 0.4900398406374502, "train_speed(iter/s)": 0.67189 }, { "epoch": 2.1635748254145066, "grad_norm": 5.641866683959961, "learning_rate": 6.051689373373698e-05, "loss": 2.2069280624389647, "memory(GiB)": 72.85, "step": 50500, "token_acc": 0.498567335243553, "train_speed(iter/s)": 0.671888 }, { "epoch": 2.1635748254145066, "eval_loss": 2.201698064804077, "eval_runtime": 15.3854, "eval_samples_per_second": 6.5, "eval_steps_per_second": 6.5, "eval_token_acc": 0.49195710455764075, "step": 50500 }, { "epoch": 2.1637890407437554, "grad_norm": 4.334278106689453, "learning_rate": 6.0510314418864413e-05, "loss": 2.384017753601074, "memory(GiB)": 72.85, "step": 50505, "token_acc": 0.49809885931558934, "train_speed(iter/s)": 0.671734 }, { "epoch": 2.1640032560730047, "grad_norm": 4.693604946136475, "learning_rate": 6.050373491358764e-05, "loss": 2.186212921142578, "memory(GiB)": 72.85, "step": 50510, "token_acc": 0.5547169811320755, "train_speed(iter/s)": 0.671737 }, { "epoch": 2.1642174714022535, "grad_norm": 5.021908283233643, "learning_rate": 6.049715521802587e-05, "loss": 2.1502798080444334, "memory(GiB)": 72.85, "step": 50515, "token_acc": 0.5431654676258992, "train_speed(iter/s)": 0.671745 }, { "epoch": 2.1644316867315023, "grad_norm": 5.206111431121826, "learning_rate": 6.0490575332298274e-05, "loss": 2.4727622985839846, "memory(GiB)": 72.85, "step": 50520, "token_acc": 0.5071428571428571, "train_speed(iter/s)": 0.671761 }, { "epoch": 2.1646459020607516, "grad_norm": 4.883852481842041, "learning_rate": 6.048399525652406e-05, "loss": 2.2165653228759767, "memory(GiB)": 72.85, "step": 50525, "token_acc": 0.5066666666666667, "train_speed(iter/s)": 0.671762 }, { "epoch": 2.1648601173900004, "grad_norm": 5.058311939239502, "learning_rate": 6.0477414990822444e-05, "loss": 2.200361442565918, "memory(GiB)": 72.85, "step": 50530, "token_acc": 0.5012787723785166, "train_speed(iter/s)": 0.671753 }, { "epoch": 2.165074332719249, "grad_norm": 5.353621959686279, "learning_rate": 6.0470834535312636e-05, "loss": 2.250354766845703, "memory(GiB)": 72.85, "step": 50535, "token_acc": 0.5365853658536586, "train_speed(iter/s)": 0.671763 }, { "epoch": 2.1652885480484985, "grad_norm": 4.8961615562438965, "learning_rate": 6.046425389011382e-05, "loss": 2.111263084411621, "memory(GiB)": 72.85, "step": 50540, "token_acc": 0.5421245421245421, "train_speed(iter/s)": 0.671755 }, { "epoch": 2.1655027633777473, "grad_norm": 4.672097206115723, "learning_rate": 6.045767305534524e-05, "loss": 2.142450141906738, "memory(GiB)": 72.85, "step": 50545, "token_acc": 0.5171232876712328, "train_speed(iter/s)": 0.671768 }, { "epoch": 2.165716978706996, "grad_norm": 4.61496114730835, "learning_rate": 6.045109203112611e-05, "loss": 2.102244186401367, "memory(GiB)": 72.85, "step": 50550, "token_acc": 0.522911051212938, "train_speed(iter/s)": 0.671764 }, { "epoch": 2.1659311940362453, "grad_norm": 4.928328990936279, "learning_rate": 6.044451081757563e-05, "loss": 2.392218589782715, "memory(GiB)": 72.85, "step": 50555, "token_acc": 0.4967532467532468, "train_speed(iter/s)": 0.671778 }, { "epoch": 2.166145409365494, "grad_norm": 4.815371990203857, "learning_rate": 6.043792941481303e-05, "loss": 2.2007959365844725, "memory(GiB)": 72.85, "step": 50560, "token_acc": 0.5121951219512195, "train_speed(iter/s)": 0.671793 }, { "epoch": 2.166359624694743, "grad_norm": 5.28557825088501, "learning_rate": 6.0431347822957574e-05, "loss": 2.474639129638672, "memory(GiB)": 72.85, "step": 50565, "token_acc": 0.4701086956521739, "train_speed(iter/s)": 0.671804 }, { "epoch": 2.1665738400239922, "grad_norm": 7.325283050537109, "learning_rate": 6.042476604212844e-05, "loss": 2.3488616943359375, "memory(GiB)": 72.85, "step": 50570, "token_acc": 0.5018450184501845, "train_speed(iter/s)": 0.6718 }, { "epoch": 2.166788055353241, "grad_norm": 3.7244653701782227, "learning_rate": 6.041818407244492e-05, "loss": 2.24291877746582, "memory(GiB)": 72.85, "step": 50575, "token_acc": 0.5134228187919463, "train_speed(iter/s)": 0.671811 }, { "epoch": 2.16700227068249, "grad_norm": 5.640575408935547, "learning_rate": 6.0411601914026205e-05, "loss": 2.2094371795654295, "memory(GiB)": 72.85, "step": 50580, "token_acc": 0.5494505494505495, "train_speed(iter/s)": 0.671817 }, { "epoch": 2.167216486011739, "grad_norm": 5.353941440582275, "learning_rate": 6.040501956699155e-05, "loss": 2.0942201614379883, "memory(GiB)": 72.85, "step": 50585, "token_acc": 0.5474137931034483, "train_speed(iter/s)": 0.671827 }, { "epoch": 2.167430701340988, "grad_norm": 4.551668643951416, "learning_rate": 6.039843703146022e-05, "loss": 2.3892345428466797, "memory(GiB)": 72.85, "step": 50590, "token_acc": 0.4782608695652174, "train_speed(iter/s)": 0.671824 }, { "epoch": 2.1676449166702367, "grad_norm": 4.2623209953308105, "learning_rate": 6.039185430755143e-05, "loss": 2.4725812911987304, "memory(GiB)": 72.85, "step": 50595, "token_acc": 0.5064935064935064, "train_speed(iter/s)": 0.671808 }, { "epoch": 2.167859131999486, "grad_norm": 4.6760993003845215, "learning_rate": 6.038527139538445e-05, "loss": 2.128106117248535, "memory(GiB)": 72.85, "step": 50600, "token_acc": 0.5288461538461539, "train_speed(iter/s)": 0.671814 }, { "epoch": 2.168073347328735, "grad_norm": 5.546715259552002, "learning_rate": 6.0378688295078556e-05, "loss": 2.2333610534667967, "memory(GiB)": 72.85, "step": 50605, "token_acc": 0.5146579804560261, "train_speed(iter/s)": 0.671826 }, { "epoch": 2.1682875626579836, "grad_norm": 4.497097969055176, "learning_rate": 6.037210500675298e-05, "loss": 2.1167730331420898, "memory(GiB)": 72.85, "step": 50610, "token_acc": 0.508833922261484, "train_speed(iter/s)": 0.67182 }, { "epoch": 2.168501777987233, "grad_norm": 5.106619834899902, "learning_rate": 6.036552153052698e-05, "loss": 2.054185485839844, "memory(GiB)": 72.85, "step": 50615, "token_acc": 0.5392156862745098, "train_speed(iter/s)": 0.671827 }, { "epoch": 2.1687159933164817, "grad_norm": 6.069531440734863, "learning_rate": 6.035893786651985e-05, "loss": 2.3490528106689452, "memory(GiB)": 72.85, "step": 50620, "token_acc": 0.4983164983164983, "train_speed(iter/s)": 0.671818 }, { "epoch": 2.1689302086457305, "grad_norm": 4.900267601013184, "learning_rate": 6.035235401485084e-05, "loss": 2.3605278015136717, "memory(GiB)": 72.85, "step": 50625, "token_acc": 0.5195530726256983, "train_speed(iter/s)": 0.67181 }, { "epoch": 2.1691444239749798, "grad_norm": 5.442502975463867, "learning_rate": 6.034576997563921e-05, "loss": 2.444580841064453, "memory(GiB)": 72.85, "step": 50630, "token_acc": 0.49264705882352944, "train_speed(iter/s)": 0.671806 }, { "epoch": 2.1693586393042286, "grad_norm": 4.24289083480835, "learning_rate": 6.0339185749004265e-05, "loss": 2.1982669830322266, "memory(GiB)": 72.85, "step": 50635, "token_acc": 0.5104895104895105, "train_speed(iter/s)": 0.671793 }, { "epoch": 2.1695728546334774, "grad_norm": 5.342505931854248, "learning_rate": 6.033260133506528e-05, "loss": 2.4121557235717774, "memory(GiB)": 72.85, "step": 50640, "token_acc": 0.5201238390092879, "train_speed(iter/s)": 0.67179 }, { "epoch": 2.1697870699627266, "grad_norm": 4.939157485961914, "learning_rate": 6.03260167339415e-05, "loss": 2.178050231933594, "memory(GiB)": 72.85, "step": 50645, "token_acc": 0.5264797507788161, "train_speed(iter/s)": 0.671781 }, { "epoch": 2.1700012852919754, "grad_norm": 4.460855007171631, "learning_rate": 6.031943194575227e-05, "loss": 2.466562271118164, "memory(GiB)": 72.85, "step": 50650, "token_acc": 0.5047619047619047, "train_speed(iter/s)": 0.671781 }, { "epoch": 2.1702155006212243, "grad_norm": 7.6127777099609375, "learning_rate": 6.031284697061683e-05, "loss": 2.301529884338379, "memory(GiB)": 72.85, "step": 50655, "token_acc": 0.4928571428571429, "train_speed(iter/s)": 0.671784 }, { "epoch": 2.1704297159504735, "grad_norm": 4.852128982543945, "learning_rate": 6.030626180865451e-05, "loss": 2.358362007141113, "memory(GiB)": 72.85, "step": 50660, "token_acc": 0.5056179775280899, "train_speed(iter/s)": 0.67179 }, { "epoch": 2.1706439312797223, "grad_norm": 4.436346530914307, "learning_rate": 6.029967645998459e-05, "loss": 2.248819923400879, "memory(GiB)": 72.85, "step": 50665, "token_acc": 0.5358255451713395, "train_speed(iter/s)": 0.671794 }, { "epoch": 2.170858146608971, "grad_norm": 6.052988052368164, "learning_rate": 6.0293090924726346e-05, "loss": 2.5160243988037108, "memory(GiB)": 72.85, "step": 50670, "token_acc": 0.4868421052631579, "train_speed(iter/s)": 0.671794 }, { "epoch": 2.1710723619382204, "grad_norm": 3.713914632797241, "learning_rate": 6.028650520299912e-05, "loss": 2.2839866638183595, "memory(GiB)": 72.85, "step": 50675, "token_acc": 0.5362776025236593, "train_speed(iter/s)": 0.671792 }, { "epoch": 2.171286577267469, "grad_norm": 5.27067232131958, "learning_rate": 6.0279919294922206e-05, "loss": 2.2497526168823243, "memory(GiB)": 72.85, "step": 50680, "token_acc": 0.5, "train_speed(iter/s)": 0.671796 }, { "epoch": 2.171500792596718, "grad_norm": 4.352365493774414, "learning_rate": 6.02733332006149e-05, "loss": 2.4250574111938477, "memory(GiB)": 72.85, "step": 50685, "token_acc": 0.49477351916376305, "train_speed(iter/s)": 0.671795 }, { "epoch": 2.1717150079259673, "grad_norm": 3.8010313510894775, "learning_rate": 6.026674692019654e-05, "loss": 2.410795974731445, "memory(GiB)": 72.85, "step": 50690, "token_acc": 0.4868035190615836, "train_speed(iter/s)": 0.671801 }, { "epoch": 2.171929223255216, "grad_norm": 4.919607639312744, "learning_rate": 6.0260160453786416e-05, "loss": 2.2539138793945312, "memory(GiB)": 72.85, "step": 50695, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.671798 }, { "epoch": 2.172143438584465, "grad_norm": 6.769827842712402, "learning_rate": 6.025357380150387e-05, "loss": 2.112028121948242, "memory(GiB)": 72.85, "step": 50700, "token_acc": 0.5075187969924813, "train_speed(iter/s)": 0.671777 }, { "epoch": 2.172357653913714, "grad_norm": 4.30273962020874, "learning_rate": 6.02469869634682e-05, "loss": 2.196843719482422, "memory(GiB)": 72.85, "step": 50705, "token_acc": 0.5031055900621118, "train_speed(iter/s)": 0.671765 }, { "epoch": 2.172571869242963, "grad_norm": 4.567917823791504, "learning_rate": 6.0240399939798766e-05, "loss": 2.3427993774414064, "memory(GiB)": 72.85, "step": 50710, "token_acc": 0.5411392405063291, "train_speed(iter/s)": 0.671743 }, { "epoch": 2.172786084572212, "grad_norm": 6.096375942230225, "learning_rate": 6.023381273061487e-05, "loss": 2.0617624282836915, "memory(GiB)": 72.85, "step": 50715, "token_acc": 0.5230769230769231, "train_speed(iter/s)": 0.671749 }, { "epoch": 2.173000299901461, "grad_norm": 4.912344455718994, "learning_rate": 6.0227225336035866e-05, "loss": 2.212217330932617, "memory(GiB)": 72.85, "step": 50720, "token_acc": 0.5408163265306123, "train_speed(iter/s)": 0.671756 }, { "epoch": 2.17321451523071, "grad_norm": 5.008901596069336, "learning_rate": 6.022063775618107e-05, "loss": 2.0594240188598634, "memory(GiB)": 72.85, "step": 50725, "token_acc": 0.556390977443609, "train_speed(iter/s)": 0.671745 }, { "epoch": 2.1734287305599587, "grad_norm": 4.781250476837158, "learning_rate": 6.0214049991169844e-05, "loss": 2.486410713195801, "memory(GiB)": 72.85, "step": 50730, "token_acc": 0.4981549815498155, "train_speed(iter/s)": 0.671753 }, { "epoch": 2.173642945889208, "grad_norm": 5.051817893981934, "learning_rate": 6.0207462041121524e-05, "loss": 2.2789234161376952, "memory(GiB)": 72.85, "step": 50735, "token_acc": 0.5105105105105106, "train_speed(iter/s)": 0.67177 }, { "epoch": 2.1738571612184567, "grad_norm": 7.288935661315918, "learning_rate": 6.0200873906155455e-05, "loss": 2.4028085708618163, "memory(GiB)": 72.85, "step": 50740, "token_acc": 0.4671814671814672, "train_speed(iter/s)": 0.671787 }, { "epoch": 2.1740713765477055, "grad_norm": 5.810967922210693, "learning_rate": 6.0194285586390955e-05, "loss": 2.2211299896240235, "memory(GiB)": 72.85, "step": 50745, "token_acc": 0.48046875, "train_speed(iter/s)": 0.671788 }, { "epoch": 2.174285591876955, "grad_norm": 6.800780773162842, "learning_rate": 6.0187697081947434e-05, "loss": 2.3002944946289063, "memory(GiB)": 72.85, "step": 50750, "token_acc": 0.5108695652173914, "train_speed(iter/s)": 0.67179 }, { "epoch": 2.1744998072062036, "grad_norm": 8.56468677520752, "learning_rate": 6.0181108392944216e-05, "loss": 2.4415369033813477, "memory(GiB)": 72.85, "step": 50755, "token_acc": 0.45985401459854014, "train_speed(iter/s)": 0.671785 }, { "epoch": 2.1747140225354524, "grad_norm": 4.4540276527404785, "learning_rate": 6.017451951950067e-05, "loss": 2.4134794235229493, "memory(GiB)": 72.85, "step": 50760, "token_acc": 0.5, "train_speed(iter/s)": 0.671804 }, { "epoch": 2.1749282378647017, "grad_norm": 4.814472675323486, "learning_rate": 6.0167930461736165e-05, "loss": 2.308772087097168, "memory(GiB)": 72.85, "step": 50765, "token_acc": 0.5133333333333333, "train_speed(iter/s)": 0.671806 }, { "epoch": 2.1751424531939505, "grad_norm": 5.896186828613281, "learning_rate": 6.016134121977006e-05, "loss": 2.108444023132324, "memory(GiB)": 72.85, "step": 50770, "token_acc": 0.5168918918918919, "train_speed(iter/s)": 0.671813 }, { "epoch": 2.1753566685231993, "grad_norm": 5.234255790710449, "learning_rate": 6.015475179372173e-05, "loss": 2.1291942596435547, "memory(GiB)": 72.85, "step": 50775, "token_acc": 0.5058365758754864, "train_speed(iter/s)": 0.671814 }, { "epoch": 2.1755708838524486, "grad_norm": 4.653395652770996, "learning_rate": 6.0148162183710534e-05, "loss": 2.4576251983642576, "memory(GiB)": 72.85, "step": 50780, "token_acc": 0.5, "train_speed(iter/s)": 0.671811 }, { "epoch": 2.1757850991816974, "grad_norm": 5.6374030113220215, "learning_rate": 6.014157238985587e-05, "loss": 2.3935766220092773, "memory(GiB)": 72.85, "step": 50785, "token_acc": 0.468944099378882, "train_speed(iter/s)": 0.67181 }, { "epoch": 2.175999314510946, "grad_norm": 4.143167972564697, "learning_rate": 6.0134982412277095e-05, "loss": 2.2768659591674805, "memory(GiB)": 72.85, "step": 50790, "token_acc": 0.5210355987055016, "train_speed(iter/s)": 0.671822 }, { "epoch": 2.1762135298401954, "grad_norm": 7.128526210784912, "learning_rate": 6.0128392251093624e-05, "loss": 2.3476810455322266, "memory(GiB)": 72.85, "step": 50795, "token_acc": 0.4565916398713826, "train_speed(iter/s)": 0.671825 }, { "epoch": 2.1764277451694443, "grad_norm": 4.357384204864502, "learning_rate": 6.012180190642481e-05, "loss": 1.7575838088989257, "memory(GiB)": 72.85, "step": 50800, "token_acc": 0.5856573705179283, "train_speed(iter/s)": 0.671833 }, { "epoch": 2.176641960498693, "grad_norm": 5.18011999130249, "learning_rate": 6.011521137839007e-05, "loss": 2.2882335662841795, "memory(GiB)": 72.85, "step": 50805, "token_acc": 0.515358361774744, "train_speed(iter/s)": 0.671831 }, { "epoch": 2.1768561758279423, "grad_norm": 5.352334499359131, "learning_rate": 6.0108620667108794e-05, "loss": 2.2872390747070312, "memory(GiB)": 72.85, "step": 50810, "token_acc": 0.5502008032128514, "train_speed(iter/s)": 0.671842 }, { "epoch": 2.177070391157191, "grad_norm": 4.867918014526367, "learning_rate": 6.010202977270035e-05, "loss": 2.097493362426758, "memory(GiB)": 72.85, "step": 50815, "token_acc": 0.5372549019607843, "train_speed(iter/s)": 0.671846 }, { "epoch": 2.17728460648644, "grad_norm": 5.9828715324401855, "learning_rate": 6.009543869528417e-05, "loss": 2.239939880371094, "memory(GiB)": 72.85, "step": 50820, "token_acc": 0.4717741935483871, "train_speed(iter/s)": 0.67184 }, { "epoch": 2.177498821815689, "grad_norm": 4.626224994659424, "learning_rate": 6.008884743497966e-05, "loss": 2.2277509689331056, "memory(GiB)": 72.85, "step": 50825, "token_acc": 0.5180327868852459, "train_speed(iter/s)": 0.671853 }, { "epoch": 2.177713037144938, "grad_norm": 4.496988773345947, "learning_rate": 6.00822559919062e-05, "loss": 2.3775482177734375, "memory(GiB)": 72.85, "step": 50830, "token_acc": 0.4715909090909091, "train_speed(iter/s)": 0.671861 }, { "epoch": 2.177927252474187, "grad_norm": 5.742677211761475, "learning_rate": 6.007566436618321e-05, "loss": 2.1527122497558593, "memory(GiB)": 72.85, "step": 50835, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.671848 }, { "epoch": 2.178141467803436, "grad_norm": 4.505527973175049, "learning_rate": 6.006907255793013e-05, "loss": 2.402819061279297, "memory(GiB)": 72.85, "step": 50840, "token_acc": 0.5, "train_speed(iter/s)": 0.671857 }, { "epoch": 2.178355683132685, "grad_norm": 6.493407249450684, "learning_rate": 6.006248056726634e-05, "loss": 2.2762256622314454, "memory(GiB)": 72.85, "step": 50845, "token_acc": 0.4557377049180328, "train_speed(iter/s)": 0.67186 }, { "epoch": 2.1785698984619337, "grad_norm": 4.619503974914551, "learning_rate": 6.005588839431129e-05, "loss": 2.198976516723633, "memory(GiB)": 72.85, "step": 50850, "token_acc": 0.5515873015873016, "train_speed(iter/s)": 0.671866 }, { "epoch": 2.178784113791183, "grad_norm": 4.683785915374756, "learning_rate": 6.0049296039184364e-05, "loss": 2.211659240722656, "memory(GiB)": 72.85, "step": 50855, "token_acc": 0.5088967971530249, "train_speed(iter/s)": 0.671861 }, { "epoch": 2.178998329120432, "grad_norm": 5.297793388366699, "learning_rate": 6.0042703502005015e-05, "loss": 2.325210952758789, "memory(GiB)": 72.85, "step": 50860, "token_acc": 0.49433962264150944, "train_speed(iter/s)": 0.671881 }, { "epoch": 2.1792125444496806, "grad_norm": 3.940286159515381, "learning_rate": 6.00361107828927e-05, "loss": 2.3652626037597657, "memory(GiB)": 72.85, "step": 50865, "token_acc": 0.535483870967742, "train_speed(iter/s)": 0.671894 }, { "epoch": 2.17942675977893, "grad_norm": 5.604731559753418, "learning_rate": 6.00295178819668e-05, "loss": 2.2248884201049806, "memory(GiB)": 72.85, "step": 50870, "token_acc": 0.5142857142857142, "train_speed(iter/s)": 0.671901 }, { "epoch": 2.1796409751081787, "grad_norm": 4.971683979034424, "learning_rate": 6.002292479934678e-05, "loss": 2.2372505187988283, "memory(GiB)": 72.85, "step": 50875, "token_acc": 0.538961038961039, "train_speed(iter/s)": 0.671917 }, { "epoch": 2.1798551904374275, "grad_norm": 4.56751823425293, "learning_rate": 6.0016331535152084e-05, "loss": 2.5359527587890627, "memory(GiB)": 72.85, "step": 50880, "token_acc": 0.46204620462046203, "train_speed(iter/s)": 0.671914 }, { "epoch": 2.1800694057666767, "grad_norm": 4.0744218826293945, "learning_rate": 6.000973808950214e-05, "loss": 2.1525571823120115, "memory(GiB)": 72.85, "step": 50885, "token_acc": 0.5647058823529412, "train_speed(iter/s)": 0.671927 }, { "epoch": 2.1802836210959256, "grad_norm": 4.494455814361572, "learning_rate": 6.000314446251638e-05, "loss": 2.207027626037598, "memory(GiB)": 72.85, "step": 50890, "token_acc": 0.5311475409836065, "train_speed(iter/s)": 0.671923 }, { "epoch": 2.1804978364251744, "grad_norm": 5.090467929840088, "learning_rate": 5.99965506543143e-05, "loss": 2.3766319274902346, "memory(GiB)": 72.85, "step": 50895, "token_acc": 0.49823321554770317, "train_speed(iter/s)": 0.671931 }, { "epoch": 2.1807120517544236, "grad_norm": 4.6528496742248535, "learning_rate": 5.9989956665015324e-05, "loss": 2.4419887542724608, "memory(GiB)": 72.85, "step": 50900, "token_acc": 0.45544554455445546, "train_speed(iter/s)": 0.671905 }, { "epoch": 2.1809262670836724, "grad_norm": 3.6394872665405273, "learning_rate": 5.99833624947389e-05, "loss": 2.206572151184082, "memory(GiB)": 72.85, "step": 50905, "token_acc": 0.5060606060606061, "train_speed(iter/s)": 0.671898 }, { "epoch": 2.1811404824129212, "grad_norm": 5.947621822357178, "learning_rate": 5.997676814360451e-05, "loss": 2.061850738525391, "memory(GiB)": 72.85, "step": 50910, "token_acc": 0.5647058823529412, "train_speed(iter/s)": 0.671908 }, { "epoch": 2.1813546977421705, "grad_norm": 5.654614448547363, "learning_rate": 5.9970173611731616e-05, "loss": 2.1573020935058596, "memory(GiB)": 72.85, "step": 50915, "token_acc": 0.528, "train_speed(iter/s)": 0.671921 }, { "epoch": 2.1815689130714193, "grad_norm": 3.8154098987579346, "learning_rate": 5.996357889923965e-05, "loss": 2.2289552688598633, "memory(GiB)": 72.85, "step": 50920, "token_acc": 0.528169014084507, "train_speed(iter/s)": 0.671921 }, { "epoch": 2.181783128400668, "grad_norm": 5.585381984710693, "learning_rate": 5.995698400624813e-05, "loss": 2.126030921936035, "memory(GiB)": 72.85, "step": 50925, "token_acc": 0.5592105263157895, "train_speed(iter/s)": 0.671932 }, { "epoch": 2.1819973437299174, "grad_norm": 6.067368984222412, "learning_rate": 5.995038893287648e-05, "loss": 2.1277355194091796, "memory(GiB)": 72.85, "step": 50930, "token_acc": 0.578125, "train_speed(iter/s)": 0.671935 }, { "epoch": 2.182211559059166, "grad_norm": 5.250296115875244, "learning_rate": 5.994379367924421e-05, "loss": 2.1947397232055663, "memory(GiB)": 72.85, "step": 50935, "token_acc": 0.5205992509363296, "train_speed(iter/s)": 0.671939 }, { "epoch": 2.182425774388415, "grad_norm": 4.078058242797852, "learning_rate": 5.993719824547079e-05, "loss": 2.2303199768066406, "memory(GiB)": 72.85, "step": 50940, "token_acc": 0.51, "train_speed(iter/s)": 0.671941 }, { "epoch": 2.1826399897176643, "grad_norm": 6.396618843078613, "learning_rate": 5.9930602631675705e-05, "loss": 2.2906036376953125, "memory(GiB)": 72.85, "step": 50945, "token_acc": 0.5094339622641509, "train_speed(iter/s)": 0.671937 }, { "epoch": 2.182854205046913, "grad_norm": 3.810981035232544, "learning_rate": 5.992400683797843e-05, "loss": 2.220096969604492, "memory(GiB)": 72.85, "step": 50950, "token_acc": 0.5331491712707183, "train_speed(iter/s)": 0.671933 }, { "epoch": 2.183068420376162, "grad_norm": 6.193515300750732, "learning_rate": 5.991741086449848e-05, "loss": 2.3036508560180664, "memory(GiB)": 72.85, "step": 50955, "token_acc": 0.5321428571428571, "train_speed(iter/s)": 0.671942 }, { "epoch": 2.183282635705411, "grad_norm": 4.720981121063232, "learning_rate": 5.991081471135531e-05, "loss": 2.418042755126953, "memory(GiB)": 72.85, "step": 50960, "token_acc": 0.524, "train_speed(iter/s)": 0.671959 }, { "epoch": 2.18349685103466, "grad_norm": 3.976306915283203, "learning_rate": 5.990421837866843e-05, "loss": 2.140868377685547, "memory(GiB)": 72.85, "step": 50965, "token_acc": 0.49830508474576274, "train_speed(iter/s)": 0.671965 }, { "epoch": 2.1837110663639088, "grad_norm": 5.600485324859619, "learning_rate": 5.989762186655736e-05, "loss": 2.296133804321289, "memory(GiB)": 72.85, "step": 50970, "token_acc": 0.5540983606557377, "train_speed(iter/s)": 0.671973 }, { "epoch": 2.183925281693158, "grad_norm": 4.644856929779053, "learning_rate": 5.989102517514158e-05, "loss": 2.362009048461914, "memory(GiB)": 72.85, "step": 50975, "token_acc": 0.4862068965517241, "train_speed(iter/s)": 0.671963 }, { "epoch": 2.184139497022407, "grad_norm": 6.511289119720459, "learning_rate": 5.9884428304540595e-05, "loss": 2.450825500488281, "memory(GiB)": 72.85, "step": 50980, "token_acc": 0.46511627906976744, "train_speed(iter/s)": 0.671968 }, { "epoch": 2.1843537123516557, "grad_norm": 5.448892116546631, "learning_rate": 5.987783125487394e-05, "loss": 2.3527576446533205, "memory(GiB)": 72.85, "step": 50985, "token_acc": 0.501628664495114, "train_speed(iter/s)": 0.671976 }, { "epoch": 2.184567927680905, "grad_norm": 6.597598552703857, "learning_rate": 5.987123402626108e-05, "loss": 2.078653907775879, "memory(GiB)": 72.85, "step": 50990, "token_acc": 0.5521235521235521, "train_speed(iter/s)": 0.671967 }, { "epoch": 2.1847821430101537, "grad_norm": 6.366387844085693, "learning_rate": 5.986463661882157e-05, "loss": 2.137786865234375, "memory(GiB)": 72.85, "step": 50995, "token_acc": 0.5148936170212766, "train_speed(iter/s)": 0.671974 }, { "epoch": 2.1849963583394025, "grad_norm": 4.223592281341553, "learning_rate": 5.985803903267491e-05, "loss": 2.18023681640625, "memory(GiB)": 72.85, "step": 51000, "token_acc": 0.523972602739726, "train_speed(iter/s)": 0.671989 }, { "epoch": 2.1849963583394025, "eval_loss": 2.0717949867248535, "eval_runtime": 15.2544, "eval_samples_per_second": 6.555, "eval_steps_per_second": 6.555, "eval_token_acc": 0.49466666666666664, "step": 51000 }, { "epoch": 2.185210573668652, "grad_norm": 3.8832154273986816, "learning_rate": 5.985144126794061e-05, "loss": 2.2010566711425783, "memory(GiB)": 72.85, "step": 51005, "token_acc": 0.5077519379844961, "train_speed(iter/s)": 0.671829 }, { "epoch": 2.1854247889979006, "grad_norm": 4.348099231719971, "learning_rate": 5.984484332473823e-05, "loss": 2.4296661376953126, "memory(GiB)": 72.85, "step": 51010, "token_acc": 0.4662756598240469, "train_speed(iter/s)": 0.671824 }, { "epoch": 2.1856390043271494, "grad_norm": 4.379305362701416, "learning_rate": 5.983824520318728e-05, "loss": 2.2180454254150392, "memory(GiB)": 72.85, "step": 51015, "token_acc": 0.5084745762711864, "train_speed(iter/s)": 0.671831 }, { "epoch": 2.1858532196563987, "grad_norm": 5.685546875, "learning_rate": 5.983164690340727e-05, "loss": 2.2027332305908205, "memory(GiB)": 72.85, "step": 51020, "token_acc": 0.5174825174825175, "train_speed(iter/s)": 0.671838 }, { "epoch": 2.1860674349856475, "grad_norm": 4.6797871589660645, "learning_rate": 5.982504842551777e-05, "loss": 2.3034427642822264, "memory(GiB)": 72.85, "step": 51025, "token_acc": 0.48698884758364314, "train_speed(iter/s)": 0.671839 }, { "epoch": 2.1862816503148967, "grad_norm": 5.371293067932129, "learning_rate": 5.981844976963831e-05, "loss": 2.2290138244628905, "memory(GiB)": 72.85, "step": 51030, "token_acc": 0.4966216216216216, "train_speed(iter/s)": 0.671841 }, { "epoch": 2.1864958656441456, "grad_norm": 5.528803825378418, "learning_rate": 5.981185093588839e-05, "loss": 2.347537612915039, "memory(GiB)": 72.85, "step": 51035, "token_acc": 0.47157190635451507, "train_speed(iter/s)": 0.671828 }, { "epoch": 2.1867100809733944, "grad_norm": 4.505820274353027, "learning_rate": 5.980525192438761e-05, "loss": 2.290016937255859, "memory(GiB)": 72.85, "step": 51040, "token_acc": 0.4817073170731707, "train_speed(iter/s)": 0.671832 }, { "epoch": 2.1869242963026436, "grad_norm": 4.878718376159668, "learning_rate": 5.979865273525549e-05, "loss": 2.146902847290039, "memory(GiB)": 72.85, "step": 51045, "token_acc": 0.5258064516129032, "train_speed(iter/s)": 0.671849 }, { "epoch": 2.1871385116318924, "grad_norm": 4.492627143859863, "learning_rate": 5.9792053368611565e-05, "loss": 2.1027854919433593, "memory(GiB)": 72.85, "step": 51050, "token_acc": 0.5306859205776173, "train_speed(iter/s)": 0.671839 }, { "epoch": 2.1873527269611412, "grad_norm": 4.13120174407959, "learning_rate": 5.978545382457543e-05, "loss": 1.8019001007080078, "memory(GiB)": 72.85, "step": 51055, "token_acc": 0.5648854961832062, "train_speed(iter/s)": 0.671838 }, { "epoch": 2.1875669422903905, "grad_norm": 5.337462902069092, "learning_rate": 5.977885410326661e-05, "loss": 2.233780288696289, "memory(GiB)": 72.85, "step": 51060, "token_acc": 0.5254237288135594, "train_speed(iter/s)": 0.671847 }, { "epoch": 2.1877811576196393, "grad_norm": 4.731051921844482, "learning_rate": 5.977225420480468e-05, "loss": 2.2437692642211915, "memory(GiB)": 72.85, "step": 51065, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.671851 }, { "epoch": 2.187995372948888, "grad_norm": 3.9339709281921387, "learning_rate": 5.97656541293092e-05, "loss": 2.0458229064941404, "memory(GiB)": 72.85, "step": 51070, "token_acc": 0.5303030303030303, "train_speed(iter/s)": 0.671857 }, { "epoch": 2.1882095882781374, "grad_norm": 4.952023506164551, "learning_rate": 5.975905387689973e-05, "loss": 2.295279121398926, "memory(GiB)": 72.85, "step": 51075, "token_acc": 0.5175718849840255, "train_speed(iter/s)": 0.671849 }, { "epoch": 2.188423803607386, "grad_norm": 4.515488147735596, "learning_rate": 5.9752453447695834e-05, "loss": 2.261980438232422, "memory(GiB)": 72.85, "step": 51080, "token_acc": 0.5188679245283019, "train_speed(iter/s)": 0.671857 }, { "epoch": 2.188638018936635, "grad_norm": 5.16793155670166, "learning_rate": 5.974585284181712e-05, "loss": 2.4213592529296877, "memory(GiB)": 72.85, "step": 51085, "token_acc": 0.5229357798165137, "train_speed(iter/s)": 0.671871 }, { "epoch": 2.1888522342658843, "grad_norm": 4.846878528594971, "learning_rate": 5.973925205938311e-05, "loss": 2.246291160583496, "memory(GiB)": 72.85, "step": 51090, "token_acc": 0.49038461538461536, "train_speed(iter/s)": 0.671885 }, { "epoch": 2.189066449595133, "grad_norm": 4.442030429840088, "learning_rate": 5.973265110051344e-05, "loss": 2.2867446899414063, "memory(GiB)": 72.85, "step": 51095, "token_acc": 0.5290102389078498, "train_speed(iter/s)": 0.671894 }, { "epoch": 2.189280664924382, "grad_norm": 5.354625225067139, "learning_rate": 5.9726049965327656e-05, "loss": 2.205765151977539, "memory(GiB)": 72.85, "step": 51100, "token_acc": 0.5141700404858299, "train_speed(iter/s)": 0.671891 }, { "epoch": 2.189494880253631, "grad_norm": 3.85305118560791, "learning_rate": 5.9719448653945344e-05, "loss": 2.3978805541992188, "memory(GiB)": 72.85, "step": 51105, "token_acc": 0.4608433734939759, "train_speed(iter/s)": 0.671906 }, { "epoch": 2.18970909558288, "grad_norm": 5.516369819641113, "learning_rate": 5.9712847166486105e-05, "loss": 2.328001022338867, "memory(GiB)": 72.85, "step": 51110, "token_acc": 0.481981981981982, "train_speed(iter/s)": 0.671911 }, { "epoch": 2.1899233109121288, "grad_norm": 4.29923152923584, "learning_rate": 5.9706245503069534e-05, "loss": 2.143108367919922, "memory(GiB)": 72.85, "step": 51115, "token_acc": 0.5112994350282486, "train_speed(iter/s)": 0.671907 }, { "epoch": 2.190137526241378, "grad_norm": 4.392751693725586, "learning_rate": 5.9699643663815205e-05, "loss": 1.9888744354248047, "memory(GiB)": 72.85, "step": 51120, "token_acc": 0.5533596837944664, "train_speed(iter/s)": 0.67191 }, { "epoch": 2.190351741570627, "grad_norm": 4.0407819747924805, "learning_rate": 5.969304164884275e-05, "loss": 1.9568017959594726, "memory(GiB)": 72.85, "step": 51125, "token_acc": 0.5642023346303502, "train_speed(iter/s)": 0.671931 }, { "epoch": 2.1905659568998757, "grad_norm": 5.794447422027588, "learning_rate": 5.968643945827176e-05, "loss": 2.0700096130371093, "memory(GiB)": 72.85, "step": 51130, "token_acc": 0.519434628975265, "train_speed(iter/s)": 0.671933 }, { "epoch": 2.190780172229125, "grad_norm": 7.897080421447754, "learning_rate": 5.9679837092221815e-05, "loss": 2.23150634765625, "memory(GiB)": 72.85, "step": 51135, "token_acc": 0.5423728813559322, "train_speed(iter/s)": 0.671945 }, { "epoch": 2.1909943875583737, "grad_norm": 4.066454887390137, "learning_rate": 5.967323455081255e-05, "loss": 2.4555166244506834, "memory(GiB)": 72.85, "step": 51140, "token_acc": 0.478125, "train_speed(iter/s)": 0.671927 }, { "epoch": 2.1912086028876225, "grad_norm": 3.724102258682251, "learning_rate": 5.966663183416357e-05, "loss": 2.2126365661621095, "memory(GiB)": 72.85, "step": 51145, "token_acc": 0.5302593659942363, "train_speed(iter/s)": 0.671931 }, { "epoch": 2.191422818216872, "grad_norm": 4.822882652282715, "learning_rate": 5.966002894239446e-05, "loss": 2.1472036361694338, "memory(GiB)": 72.85, "step": 51150, "token_acc": 0.51985559566787, "train_speed(iter/s)": 0.671922 }, { "epoch": 2.1916370335461206, "grad_norm": 4.947147369384766, "learning_rate": 5.965342587562489e-05, "loss": 2.281622886657715, "memory(GiB)": 72.85, "step": 51155, "token_acc": 0.5016722408026756, "train_speed(iter/s)": 0.671929 }, { "epoch": 2.1918512488753694, "grad_norm": 4.6638078689575195, "learning_rate": 5.9646822633974454e-05, "loss": 2.013075828552246, "memory(GiB)": 72.85, "step": 51160, "token_acc": 0.5307692307692308, "train_speed(iter/s)": 0.671924 }, { "epoch": 2.1920654642046187, "grad_norm": 4.419519901275635, "learning_rate": 5.964021921756277e-05, "loss": 2.1959888458251955, "memory(GiB)": 72.85, "step": 51165, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.671916 }, { "epoch": 2.1922796795338675, "grad_norm": 5.511220932006836, "learning_rate": 5.963361562650946e-05, "loss": 2.0573040008544923, "memory(GiB)": 72.85, "step": 51170, "token_acc": 0.5016501650165016, "train_speed(iter/s)": 0.671923 }, { "epoch": 2.1924938948631163, "grad_norm": 6.144486904144287, "learning_rate": 5.962701186093419e-05, "loss": 2.2984933853149414, "memory(GiB)": 72.85, "step": 51175, "token_acc": 0.5099337748344371, "train_speed(iter/s)": 0.671926 }, { "epoch": 2.1927081101923656, "grad_norm": 5.1212873458862305, "learning_rate": 5.962040792095656e-05, "loss": 1.9967437744140626, "memory(GiB)": 72.85, "step": 51180, "token_acc": 0.5474137931034483, "train_speed(iter/s)": 0.671934 }, { "epoch": 2.1929223255216144, "grad_norm": 3.909846067428589, "learning_rate": 5.96138038066962e-05, "loss": 2.3837053298950197, "memory(GiB)": 72.85, "step": 51185, "token_acc": 0.5107142857142857, "train_speed(iter/s)": 0.671941 }, { "epoch": 2.193136540850863, "grad_norm": 4.533779621124268, "learning_rate": 5.960719951827278e-05, "loss": 2.484750747680664, "memory(GiB)": 72.85, "step": 51190, "token_acc": 0.4894179894179894, "train_speed(iter/s)": 0.671951 }, { "epoch": 2.1933507561801124, "grad_norm": 4.454965114593506, "learning_rate": 5.960059505580593e-05, "loss": 2.1492326736450194, "memory(GiB)": 72.85, "step": 51195, "token_acc": 0.5625, "train_speed(iter/s)": 0.671922 }, { "epoch": 2.1935649715093612, "grad_norm": 4.9911980628967285, "learning_rate": 5.9593990419415294e-05, "loss": 2.321911430358887, "memory(GiB)": 72.85, "step": 51200, "token_acc": 0.5137254901960784, "train_speed(iter/s)": 0.671933 }, { "epoch": 2.19377918683861, "grad_norm": 5.863637924194336, "learning_rate": 5.9587385609220516e-05, "loss": 2.2280187606811523, "memory(GiB)": 72.85, "step": 51205, "token_acc": 0.48534201954397393, "train_speed(iter/s)": 0.671937 }, { "epoch": 2.1939934021678593, "grad_norm": 4.107954978942871, "learning_rate": 5.958078062534126e-05, "loss": 2.3245311737060548, "memory(GiB)": 72.85, "step": 51210, "token_acc": 0.4939759036144578, "train_speed(iter/s)": 0.671958 }, { "epoch": 2.194207617497108, "grad_norm": 3.6633079051971436, "learning_rate": 5.957417546789717e-05, "loss": 2.8011920928955076, "memory(GiB)": 72.85, "step": 51215, "token_acc": 0.4369747899159664, "train_speed(iter/s)": 0.671959 }, { "epoch": 2.194421832826357, "grad_norm": 4.9957098960876465, "learning_rate": 5.956757013700791e-05, "loss": 2.1111278533935547, "memory(GiB)": 72.85, "step": 51220, "token_acc": 0.5566037735849056, "train_speed(iter/s)": 0.671957 }, { "epoch": 2.194636048155606, "grad_norm": 6.022825241088867, "learning_rate": 5.956096463279314e-05, "loss": 1.92017822265625, "memory(GiB)": 72.85, "step": 51225, "token_acc": 0.596078431372549, "train_speed(iter/s)": 0.671951 }, { "epoch": 2.194850263484855, "grad_norm": 5.980376243591309, "learning_rate": 5.955435895537253e-05, "loss": 2.2909969329833983, "memory(GiB)": 72.85, "step": 51230, "token_acc": 0.4982698961937716, "train_speed(iter/s)": 0.671944 }, { "epoch": 2.195064478814104, "grad_norm": 4.6007232666015625, "learning_rate": 5.9547753104865746e-05, "loss": 2.228348731994629, "memory(GiB)": 72.85, "step": 51235, "token_acc": 0.5158227848101266, "train_speed(iter/s)": 0.671932 }, { "epoch": 2.195278694143353, "grad_norm": 4.551478385925293, "learning_rate": 5.954114708139247e-05, "loss": 2.4650951385498048, "memory(GiB)": 72.85, "step": 51240, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.671906 }, { "epoch": 2.195492909472602, "grad_norm": 5.6911702156066895, "learning_rate": 5.953454088507236e-05, "loss": 2.2855718612670897, "memory(GiB)": 72.85, "step": 51245, "token_acc": 0.49372384937238495, "train_speed(iter/s)": 0.671887 }, { "epoch": 2.1957071248018507, "grad_norm": 4.412039279937744, "learning_rate": 5.952793451602507e-05, "loss": 2.1855058670043945, "memory(GiB)": 72.85, "step": 51250, "token_acc": 0.5317725752508361, "train_speed(iter/s)": 0.6719 }, { "epoch": 2.1959213401311, "grad_norm": 4.890856742858887, "learning_rate": 5.9521327974370334e-05, "loss": 2.1772846221923827, "memory(GiB)": 72.85, "step": 51255, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.671904 }, { "epoch": 2.1961355554603488, "grad_norm": 4.866864204406738, "learning_rate": 5.95147212602278e-05, "loss": 2.1640392303466798, "memory(GiB)": 72.85, "step": 51260, "token_acc": 0.5050847457627119, "train_speed(iter/s)": 0.671901 }, { "epoch": 2.1963497707895976, "grad_norm": 5.107860088348389, "learning_rate": 5.950811437371716e-05, "loss": 2.335574722290039, "memory(GiB)": 72.85, "step": 51265, "token_acc": 0.5, "train_speed(iter/s)": 0.671897 }, { "epoch": 2.196563986118847, "grad_norm": 3.9033496379852295, "learning_rate": 5.950150731495813e-05, "loss": 2.3076908111572267, "memory(GiB)": 72.85, "step": 51270, "token_acc": 0.5292096219931272, "train_speed(iter/s)": 0.671903 }, { "epoch": 2.1967782014480957, "grad_norm": 4.8618268966674805, "learning_rate": 5.949490008407037e-05, "loss": 2.1440946578979494, "memory(GiB)": 72.85, "step": 51275, "token_acc": 0.5379939209726444, "train_speed(iter/s)": 0.671895 }, { "epoch": 2.1969924167773445, "grad_norm": 4.311604022979736, "learning_rate": 5.9488292681173585e-05, "loss": 2.1764217376708985, "memory(GiB)": 72.85, "step": 51280, "token_acc": 0.5376344086021505, "train_speed(iter/s)": 0.671885 }, { "epoch": 2.1972066321065937, "grad_norm": 4.599462509155273, "learning_rate": 5.948168510638748e-05, "loss": 2.1553815841674804, "memory(GiB)": 72.85, "step": 51285, "token_acc": 0.5339233038348082, "train_speed(iter/s)": 0.671876 }, { "epoch": 2.1974208474358425, "grad_norm": 4.172887802124023, "learning_rate": 5.9475077359831766e-05, "loss": 2.3073451995849608, "memory(GiB)": 72.85, "step": 51290, "token_acc": 0.5062893081761006, "train_speed(iter/s)": 0.671898 }, { "epoch": 2.1976350627650914, "grad_norm": 5.835326194763184, "learning_rate": 5.9468469441626116e-05, "loss": 2.1225513458251952, "memory(GiB)": 72.85, "step": 51295, "token_acc": 0.516245487364621, "train_speed(iter/s)": 0.671915 }, { "epoch": 2.1978492780943406, "grad_norm": 5.824143886566162, "learning_rate": 5.946186135189027e-05, "loss": 2.3173219680786135, "memory(GiB)": 72.85, "step": 51300, "token_acc": 0.5081967213114754, "train_speed(iter/s)": 0.671914 }, { "epoch": 2.1980634934235894, "grad_norm": 4.376791000366211, "learning_rate": 5.945525309074393e-05, "loss": 2.4242311477661134, "memory(GiB)": 72.85, "step": 51305, "token_acc": 0.5, "train_speed(iter/s)": 0.671933 }, { "epoch": 2.1982777087528382, "grad_norm": 5.793178081512451, "learning_rate": 5.944864465830681e-05, "loss": 2.3668190002441407, "memory(GiB)": 72.85, "step": 51310, "token_acc": 0.48771929824561405, "train_speed(iter/s)": 0.671942 }, { "epoch": 2.1984919240820875, "grad_norm": 3.8912861347198486, "learning_rate": 5.944203605469863e-05, "loss": 2.1748992919921877, "memory(GiB)": 72.85, "step": 51315, "token_acc": 0.5153374233128835, "train_speed(iter/s)": 0.671949 }, { "epoch": 2.1987061394113363, "grad_norm": 3.6153292655944824, "learning_rate": 5.943542728003911e-05, "loss": 1.9523725509643555, "memory(GiB)": 72.85, "step": 51320, "token_acc": 0.5844594594594594, "train_speed(iter/s)": 0.671957 }, { "epoch": 2.198920354740585, "grad_norm": 9.596646308898926, "learning_rate": 5.9428818334447976e-05, "loss": 2.270954704284668, "memory(GiB)": 72.85, "step": 51325, "token_acc": 0.5271966527196653, "train_speed(iter/s)": 0.671962 }, { "epoch": 2.1991345700698344, "grad_norm": 6.250390529632568, "learning_rate": 5.9422209218044956e-05, "loss": 2.2461244583129885, "memory(GiB)": 72.85, "step": 51330, "token_acc": 0.5051194539249146, "train_speed(iter/s)": 0.671949 }, { "epoch": 2.199348785399083, "grad_norm": 4.958255767822266, "learning_rate": 5.941559993094976e-05, "loss": 2.2134300231933595, "memory(GiB)": 72.85, "step": 51335, "token_acc": 0.5370370370370371, "train_speed(iter/s)": 0.671935 }, { "epoch": 2.199563000728332, "grad_norm": 4.765944957733154, "learning_rate": 5.9408990473282145e-05, "loss": 2.033880043029785, "memory(GiB)": 72.85, "step": 51340, "token_acc": 0.5671641791044776, "train_speed(iter/s)": 0.671929 }, { "epoch": 2.1997772160575813, "grad_norm": 4.301028251647949, "learning_rate": 5.940238084516184e-05, "loss": 2.3151039123535155, "memory(GiB)": 72.85, "step": 51345, "token_acc": 0.5176056338028169, "train_speed(iter/s)": 0.671922 }, { "epoch": 2.19999143138683, "grad_norm": 4.19886589050293, "learning_rate": 5.9395771046708594e-05, "loss": 2.344453048706055, "memory(GiB)": 72.85, "step": 51350, "token_acc": 0.512, "train_speed(iter/s)": 0.671908 }, { "epoch": 2.200205646716079, "grad_norm": 7.107089996337891, "learning_rate": 5.9389161078042143e-05, "loss": 2.285176086425781, "memory(GiB)": 72.85, "step": 51355, "token_acc": 0.5016722408026756, "train_speed(iter/s)": 0.671921 }, { "epoch": 2.200419862045328, "grad_norm": 7.2227582931518555, "learning_rate": 5.9382550939282234e-05, "loss": 2.402118682861328, "memory(GiB)": 72.85, "step": 51360, "token_acc": 0.5035971223021583, "train_speed(iter/s)": 0.671924 }, { "epoch": 2.200634077374577, "grad_norm": 5.511636734008789, "learning_rate": 5.9375940630548597e-05, "loss": 2.269740867614746, "memory(GiB)": 72.85, "step": 51365, "token_acc": 0.5084745762711864, "train_speed(iter/s)": 0.67193 }, { "epoch": 2.2008482927038258, "grad_norm": 4.74049711227417, "learning_rate": 5.9369330151961e-05, "loss": 2.3154367446899413, "memory(GiB)": 72.85, "step": 51370, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.671938 }, { "epoch": 2.201062508033075, "grad_norm": 5.152303218841553, "learning_rate": 5.9362719503639216e-05, "loss": 2.436343193054199, "memory(GiB)": 72.85, "step": 51375, "token_acc": 0.5085324232081911, "train_speed(iter/s)": 0.671953 }, { "epoch": 2.201276723362324, "grad_norm": 5.0138325691223145, "learning_rate": 5.9356108685702974e-05, "loss": 2.2226625442504884, "memory(GiB)": 72.85, "step": 51380, "token_acc": 0.5019157088122606, "train_speed(iter/s)": 0.671956 }, { "epoch": 2.2014909386915726, "grad_norm": 4.0896406173706055, "learning_rate": 5.934949769827205e-05, "loss": 2.1056137084960938, "memory(GiB)": 72.85, "step": 51385, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.671959 }, { "epoch": 2.201705154020822, "grad_norm": 4.106110572814941, "learning_rate": 5.9342886541466204e-05, "loss": 2.225594329833984, "memory(GiB)": 72.85, "step": 51390, "token_acc": 0.5053003533568905, "train_speed(iter/s)": 0.671966 }, { "epoch": 2.2019193693500707, "grad_norm": 4.405528545379639, "learning_rate": 5.93362752154052e-05, "loss": 2.1657047271728516, "memory(GiB)": 72.85, "step": 51395, "token_acc": 0.5065359477124183, "train_speed(iter/s)": 0.671968 }, { "epoch": 2.2021335846793195, "grad_norm": 8.079679489135742, "learning_rate": 5.9329663720208826e-05, "loss": 2.2485107421875, "memory(GiB)": 72.85, "step": 51400, "token_acc": 0.555956678700361, "train_speed(iter/s)": 0.671964 }, { "epoch": 2.202347800008569, "grad_norm": 5.081980228424072, "learning_rate": 5.932305205599683e-05, "loss": 2.579359436035156, "memory(GiB)": 72.85, "step": 51405, "token_acc": 0.532871972318339, "train_speed(iter/s)": 0.671974 }, { "epoch": 2.2025620153378176, "grad_norm": 5.203372478485107, "learning_rate": 5.931644022288899e-05, "loss": 2.360072898864746, "memory(GiB)": 72.85, "step": 51410, "token_acc": 0.5087719298245614, "train_speed(iter/s)": 0.671977 }, { "epoch": 2.2027762306670664, "grad_norm": 4.656744003295898, "learning_rate": 5.9309828221005115e-05, "loss": 2.1893569946289064, "memory(GiB)": 72.85, "step": 51415, "token_acc": 0.49032258064516127, "train_speed(iter/s)": 0.671965 }, { "epoch": 2.2029904459963157, "grad_norm": 5.175010681152344, "learning_rate": 5.930321605046496e-05, "loss": 2.198062515258789, "memory(GiB)": 72.85, "step": 51420, "token_acc": 0.5308219178082192, "train_speed(iter/s)": 0.671971 }, { "epoch": 2.2032046613255645, "grad_norm": 5.003137111663818, "learning_rate": 5.9296603711388324e-05, "loss": 2.078355407714844, "memory(GiB)": 72.85, "step": 51425, "token_acc": 0.5296167247386759, "train_speed(iter/s)": 0.671988 }, { "epoch": 2.2034188766548133, "grad_norm": 5.995408058166504, "learning_rate": 5.928999120389499e-05, "loss": 2.0559755325317384, "memory(GiB)": 72.85, "step": 51430, "token_acc": 0.5634920634920635, "train_speed(iter/s)": 0.671991 }, { "epoch": 2.2036330919840625, "grad_norm": 3.6992592811584473, "learning_rate": 5.928337852810475e-05, "loss": 2.3859615325927734, "memory(GiB)": 72.85, "step": 51435, "token_acc": 0.5078864353312302, "train_speed(iter/s)": 0.671999 }, { "epoch": 2.2038473073133114, "grad_norm": 6.288620471954346, "learning_rate": 5.927676568413739e-05, "loss": 2.478093147277832, "memory(GiB)": 72.85, "step": 51440, "token_acc": 0.46357615894039733, "train_speed(iter/s)": 0.672004 }, { "epoch": 2.20406152264256, "grad_norm": 9.672228813171387, "learning_rate": 5.9270152672112725e-05, "loss": 2.092522621154785, "memory(GiB)": 72.85, "step": 51445, "token_acc": 0.532, "train_speed(iter/s)": 0.672003 }, { "epoch": 2.2042757379718094, "grad_norm": 4.737159729003906, "learning_rate": 5.9263539492150557e-05, "loss": 2.090616989135742, "memory(GiB)": 72.85, "step": 51450, "token_acc": 0.5806451612903226, "train_speed(iter/s)": 0.672002 }, { "epoch": 2.2044899533010582, "grad_norm": 5.212927341461182, "learning_rate": 5.9256926144370663e-05, "loss": 2.234952926635742, "memory(GiB)": 72.85, "step": 51455, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.672011 }, { "epoch": 2.204704168630307, "grad_norm": 5.9331512451171875, "learning_rate": 5.9250312628892877e-05, "loss": 2.17281494140625, "memory(GiB)": 72.85, "step": 51460, "token_acc": 0.5340909090909091, "train_speed(iter/s)": 0.672022 }, { "epoch": 2.2049183839595563, "grad_norm": 3.7271230220794678, "learning_rate": 5.9243698945837014e-05, "loss": 2.1480266571044924, "memory(GiB)": 72.85, "step": 51465, "token_acc": 0.5165165165165165, "train_speed(iter/s)": 0.672018 }, { "epoch": 2.205132599288805, "grad_norm": 4.572857856750488, "learning_rate": 5.923708509532284e-05, "loss": 2.2763946533203123, "memory(GiB)": 72.85, "step": 51470, "token_acc": 0.5571428571428572, "train_speed(iter/s)": 0.672043 }, { "epoch": 2.205346814618054, "grad_norm": 5.586081504821777, "learning_rate": 5.923047107747024e-05, "loss": 2.169725036621094, "memory(GiB)": 72.85, "step": 51475, "token_acc": 0.5213414634146342, "train_speed(iter/s)": 0.672038 }, { "epoch": 2.205561029947303, "grad_norm": 5.295158863067627, "learning_rate": 5.9223856892398975e-05, "loss": 1.938202476501465, "memory(GiB)": 72.85, "step": 51480, "token_acc": 0.5663716814159292, "train_speed(iter/s)": 0.672045 }, { "epoch": 2.205775245276552, "grad_norm": 5.802604675292969, "learning_rate": 5.921724254022889e-05, "loss": 2.061613845825195, "memory(GiB)": 72.85, "step": 51485, "token_acc": 0.5323741007194245, "train_speed(iter/s)": 0.67205 }, { "epoch": 2.205989460605801, "grad_norm": 6.482641220092773, "learning_rate": 5.921062802107982e-05, "loss": 2.337559127807617, "memory(GiB)": 72.85, "step": 51490, "token_acc": 0.4945652173913043, "train_speed(iter/s)": 0.672027 }, { "epoch": 2.20620367593505, "grad_norm": 5.1872944831848145, "learning_rate": 5.920401333507157e-05, "loss": 2.121352767944336, "memory(GiB)": 72.85, "step": 51495, "token_acc": 0.48788927335640137, "train_speed(iter/s)": 0.672035 }, { "epoch": 2.206417891264299, "grad_norm": 4.628395080566406, "learning_rate": 5.9197398482324e-05, "loss": 1.9938121795654298, "memory(GiB)": 72.85, "step": 51500, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.672053 }, { "epoch": 2.206417891264299, "eval_loss": 1.9989129304885864, "eval_runtime": 15.9587, "eval_samples_per_second": 6.266, "eval_steps_per_second": 6.266, "eval_token_acc": 0.5211062590975255, "step": 51500 }, { "epoch": 2.2066321065935477, "grad_norm": 3.6805944442749023, "learning_rate": 5.919078346295693e-05, "loss": 2.1836065292358398, "memory(GiB)": 72.85, "step": 51505, "token_acc": 0.5163853028798411, "train_speed(iter/s)": 0.671875 }, { "epoch": 2.206846321922797, "grad_norm": 4.897614002227783, "learning_rate": 5.918416827709018e-05, "loss": 2.241841125488281, "memory(GiB)": 72.85, "step": 51510, "token_acc": 0.528169014084507, "train_speed(iter/s)": 0.671862 }, { "epoch": 2.2070605372520458, "grad_norm": 3.6386826038360596, "learning_rate": 5.917755292484361e-05, "loss": 2.460377502441406, "memory(GiB)": 72.85, "step": 51515, "token_acc": 0.4699140401146132, "train_speed(iter/s)": 0.671858 }, { "epoch": 2.2072747525812946, "grad_norm": 5.3414530754089355, "learning_rate": 5.917093740633707e-05, "loss": 2.4265586853027346, "memory(GiB)": 72.85, "step": 51520, "token_acc": 0.5100401606425703, "train_speed(iter/s)": 0.671875 }, { "epoch": 2.207488967910544, "grad_norm": 5.499476909637451, "learning_rate": 5.916432172169038e-05, "loss": 1.9889530181884765, "memory(GiB)": 72.85, "step": 51525, "token_acc": 0.5636363636363636, "train_speed(iter/s)": 0.671876 }, { "epoch": 2.2077031832397926, "grad_norm": 5.040598392486572, "learning_rate": 5.9157705871023426e-05, "loss": 2.061569595336914, "memory(GiB)": 72.85, "step": 51530, "token_acc": 0.539622641509434, "train_speed(iter/s)": 0.671888 }, { "epoch": 2.2079173985690415, "grad_norm": 5.591496467590332, "learning_rate": 5.915108985445603e-05, "loss": 2.3000221252441406, "memory(GiB)": 72.85, "step": 51535, "token_acc": 0.5306122448979592, "train_speed(iter/s)": 0.671889 }, { "epoch": 2.2081316138982907, "grad_norm": 5.149367809295654, "learning_rate": 5.914447367210805e-05, "loss": 2.174321746826172, "memory(GiB)": 72.85, "step": 51540, "token_acc": 0.4897260273972603, "train_speed(iter/s)": 0.671892 }, { "epoch": 2.2083458292275395, "grad_norm": 5.8323163986206055, "learning_rate": 5.913785732409937e-05, "loss": 2.163345146179199, "memory(GiB)": 72.85, "step": 51545, "token_acc": 0.5365853658536586, "train_speed(iter/s)": 0.671896 }, { "epoch": 2.2085600445567883, "grad_norm": 4.7637810707092285, "learning_rate": 5.913124081054981e-05, "loss": 2.291202735900879, "memory(GiB)": 72.85, "step": 51550, "token_acc": 0.48417721518987344, "train_speed(iter/s)": 0.671911 }, { "epoch": 2.2087742598860376, "grad_norm": 5.0811052322387695, "learning_rate": 5.912462413157926e-05, "loss": 2.229355049133301, "memory(GiB)": 72.85, "step": 51555, "token_acc": 0.5036231884057971, "train_speed(iter/s)": 0.671918 }, { "epoch": 2.2089884752152864, "grad_norm": 5.84070348739624, "learning_rate": 5.91180072873076e-05, "loss": 2.144048500061035, "memory(GiB)": 72.85, "step": 51560, "token_acc": 0.5283018867924528, "train_speed(iter/s)": 0.67191 }, { "epoch": 2.209202690544535, "grad_norm": 4.43522310256958, "learning_rate": 5.9111390277854675e-05, "loss": 2.268620491027832, "memory(GiB)": 72.85, "step": 51565, "token_acc": 0.5057471264367817, "train_speed(iter/s)": 0.671901 }, { "epoch": 2.2094169058737845, "grad_norm": 3.980872631072998, "learning_rate": 5.910477310334036e-05, "loss": 2.24681396484375, "memory(GiB)": 72.85, "step": 51570, "token_acc": 0.5183823529411765, "train_speed(iter/s)": 0.671903 }, { "epoch": 2.2096311212030333, "grad_norm": 4.915707111358643, "learning_rate": 5.9098155763884554e-05, "loss": 2.3701684951782225, "memory(GiB)": 72.85, "step": 51575, "token_acc": 0.5481481481481482, "train_speed(iter/s)": 0.671917 }, { "epoch": 2.209845336532282, "grad_norm": 4.678481101989746, "learning_rate": 5.909153825960711e-05, "loss": 2.194051170349121, "memory(GiB)": 72.85, "step": 51580, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.671916 }, { "epoch": 2.2100595518615314, "grad_norm": 5.719401836395264, "learning_rate": 5.908492059062794e-05, "loss": 2.2064258575439455, "memory(GiB)": 72.85, "step": 51585, "token_acc": 0.5420875420875421, "train_speed(iter/s)": 0.67192 }, { "epoch": 2.21027376719078, "grad_norm": 5.10633659362793, "learning_rate": 5.907830275706689e-05, "loss": 2.2313671112060547, "memory(GiB)": 72.85, "step": 51590, "token_acc": 0.5195729537366548, "train_speed(iter/s)": 0.671906 }, { "epoch": 2.210487982520029, "grad_norm": 4.607204437255859, "learning_rate": 5.907168475904388e-05, "loss": 2.3725019454956056, "memory(GiB)": 72.85, "step": 51595, "token_acc": 0.49137931034482757, "train_speed(iter/s)": 0.671906 }, { "epoch": 2.2107021978492782, "grad_norm": 4.996037483215332, "learning_rate": 5.906506659667878e-05, "loss": 2.686875343322754, "memory(GiB)": 72.85, "step": 51600, "token_acc": 0.4696485623003195, "train_speed(iter/s)": 0.671899 }, { "epoch": 2.210916413178527, "grad_norm": 5.732416152954102, "learning_rate": 5.905844827009151e-05, "loss": 2.4476526260375975, "memory(GiB)": 72.85, "step": 51605, "token_acc": 0.5244755244755245, "train_speed(iter/s)": 0.671893 }, { "epoch": 2.211130628507776, "grad_norm": 4.166609764099121, "learning_rate": 5.905182977940195e-05, "loss": 2.2266119003295897, "memory(GiB)": 72.85, "step": 51610, "token_acc": 0.5083333333333333, "train_speed(iter/s)": 0.67189 }, { "epoch": 2.211344843837025, "grad_norm": 3.3341243267059326, "learning_rate": 5.904521112472999e-05, "loss": 2.092080307006836, "memory(GiB)": 72.85, "step": 51615, "token_acc": 0.5618374558303887, "train_speed(iter/s)": 0.671873 }, { "epoch": 2.211559059166274, "grad_norm": 4.129626750946045, "learning_rate": 5.903859230619556e-05, "loss": 1.9021602630615235, "memory(GiB)": 72.85, "step": 51620, "token_acc": 0.5306122448979592, "train_speed(iter/s)": 0.671882 }, { "epoch": 2.2117732744955227, "grad_norm": 4.22191858291626, "learning_rate": 5.903197332391853e-05, "loss": 2.4875865936279298, "memory(GiB)": 72.85, "step": 51625, "token_acc": 0.47635135135135137, "train_speed(iter/s)": 0.671885 }, { "epoch": 2.211987489824772, "grad_norm": 9.872907638549805, "learning_rate": 5.902535417801884e-05, "loss": 2.538918876647949, "memory(GiB)": 72.85, "step": 51630, "token_acc": 0.49201277955271566, "train_speed(iter/s)": 0.671889 }, { "epoch": 2.212201705154021, "grad_norm": 5.110158443450928, "learning_rate": 5.901873486861641e-05, "loss": 2.4267353057861327, "memory(GiB)": 72.85, "step": 51635, "token_acc": 0.5015974440894568, "train_speed(iter/s)": 0.671893 }, { "epoch": 2.2124159204832696, "grad_norm": 8.141427040100098, "learning_rate": 5.901211539583111e-05, "loss": 2.0962779998779295, "memory(GiB)": 72.85, "step": 51640, "token_acc": 0.5626911314984709, "train_speed(iter/s)": 0.671904 }, { "epoch": 2.212630135812519, "grad_norm": 4.388298511505127, "learning_rate": 5.900549575978291e-05, "loss": 2.058740234375, "memory(GiB)": 72.85, "step": 51645, "token_acc": 0.5064102564102564, "train_speed(iter/s)": 0.671911 }, { "epoch": 2.2128443511417677, "grad_norm": 4.967255115509033, "learning_rate": 5.899887596059171e-05, "loss": 2.2857334136962892, "memory(GiB)": 72.85, "step": 51650, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.671915 }, { "epoch": 2.2130585664710165, "grad_norm": 5.731014728546143, "learning_rate": 5.899225599837741e-05, "loss": 1.903542709350586, "memory(GiB)": 72.85, "step": 51655, "token_acc": 0.5364963503649635, "train_speed(iter/s)": 0.671938 }, { "epoch": 2.2132727818002658, "grad_norm": 6.988419532775879, "learning_rate": 5.8985635873259956e-05, "loss": 2.3688737869262697, "memory(GiB)": 72.85, "step": 51660, "token_acc": 0.515527950310559, "train_speed(iter/s)": 0.671937 }, { "epoch": 2.2134869971295146, "grad_norm": 6.332569122314453, "learning_rate": 5.8979015585359296e-05, "loss": 2.353944206237793, "memory(GiB)": 72.85, "step": 51665, "token_acc": 0.5261194029850746, "train_speed(iter/s)": 0.671935 }, { "epoch": 2.2137012124587634, "grad_norm": 5.260552406311035, "learning_rate": 5.897239513479532e-05, "loss": 2.2437053680419923, "memory(GiB)": 72.85, "step": 51670, "token_acc": 0.5585585585585585, "train_speed(iter/s)": 0.671946 }, { "epoch": 2.2139154277880126, "grad_norm": 5.180067539215088, "learning_rate": 5.896577452168801e-05, "loss": 1.8941619873046875, "memory(GiB)": 72.85, "step": 51675, "token_acc": 0.5483870967741935, "train_speed(iter/s)": 0.671957 }, { "epoch": 2.2141296431172615, "grad_norm": 4.346897125244141, "learning_rate": 5.8959153746157294e-05, "loss": 2.076019287109375, "memory(GiB)": 72.85, "step": 51680, "token_acc": 0.5212355212355212, "train_speed(iter/s)": 0.671938 }, { "epoch": 2.2143438584465103, "grad_norm": 4.787189960479736, "learning_rate": 5.895253280832308e-05, "loss": 2.482692337036133, "memory(GiB)": 72.85, "step": 51685, "token_acc": 0.5015974440894568, "train_speed(iter/s)": 0.671927 }, { "epoch": 2.2145580737757595, "grad_norm": 6.3503875732421875, "learning_rate": 5.894591170830536e-05, "loss": 2.155180549621582, "memory(GiB)": 72.85, "step": 51690, "token_acc": 0.5421686746987951, "train_speed(iter/s)": 0.671945 }, { "epoch": 2.2147722891050083, "grad_norm": 4.608419418334961, "learning_rate": 5.893929044622404e-05, "loss": 2.4381824493408204, "memory(GiB)": 72.85, "step": 51695, "token_acc": 0.47126436781609193, "train_speed(iter/s)": 0.671946 }, { "epoch": 2.214986504434257, "grad_norm": 5.978700160980225, "learning_rate": 5.8932669022199095e-05, "loss": 2.274717330932617, "memory(GiB)": 72.85, "step": 51700, "token_acc": 0.5, "train_speed(iter/s)": 0.67196 }, { "epoch": 2.2152007197635064, "grad_norm": 4.173677444458008, "learning_rate": 5.892604743635045e-05, "loss": 2.042725944519043, "memory(GiB)": 72.85, "step": 51705, "token_acc": 0.524904214559387, "train_speed(iter/s)": 0.671962 }, { "epoch": 2.215414935092755, "grad_norm": 4.956857681274414, "learning_rate": 5.891942568879811e-05, "loss": 2.0051183700561523, "memory(GiB)": 72.85, "step": 51710, "token_acc": 0.5415162454873647, "train_speed(iter/s)": 0.671958 }, { "epoch": 2.215629150422004, "grad_norm": 5.521652698516846, "learning_rate": 5.8912803779662e-05, "loss": 2.4621278762817385, "memory(GiB)": 72.85, "step": 51715, "token_acc": 0.4583333333333333, "train_speed(iter/s)": 0.671954 }, { "epoch": 2.2158433657512533, "grad_norm": 5.056777477264404, "learning_rate": 5.890618170906208e-05, "loss": 2.279649543762207, "memory(GiB)": 72.85, "step": 51720, "token_acc": 0.50187265917603, "train_speed(iter/s)": 0.671966 }, { "epoch": 2.216057581080502, "grad_norm": 4.542469024658203, "learning_rate": 5.889955947711834e-05, "loss": 2.3333145141601563, "memory(GiB)": 72.85, "step": 51725, "token_acc": 0.5017182130584192, "train_speed(iter/s)": 0.671971 }, { "epoch": 2.216271796409751, "grad_norm": 7.691163063049316, "learning_rate": 5.8892937083950704e-05, "loss": 2.3866043090820312, "memory(GiB)": 72.85, "step": 51730, "token_acc": 0.5207667731629393, "train_speed(iter/s)": 0.671964 }, { "epoch": 2.216486011739, "grad_norm": 4.4488348960876465, "learning_rate": 5.8886314529679196e-05, "loss": 2.32946834564209, "memory(GiB)": 72.85, "step": 51735, "token_acc": 0.4778156996587031, "train_speed(iter/s)": 0.671946 }, { "epoch": 2.216700227068249, "grad_norm": 4.7175679206848145, "learning_rate": 5.8879691814423744e-05, "loss": 2.4682422637939454, "memory(GiB)": 72.85, "step": 51740, "token_acc": 0.4745762711864407, "train_speed(iter/s)": 0.671956 }, { "epoch": 2.216914442397498, "grad_norm": 4.142610549926758, "learning_rate": 5.8873068938304355e-05, "loss": 1.874091911315918, "memory(GiB)": 72.85, "step": 51745, "token_acc": 0.6024590163934426, "train_speed(iter/s)": 0.671971 }, { "epoch": 2.217128657726747, "grad_norm": 5.3113603591918945, "learning_rate": 5.8866445901441e-05, "loss": 2.159943389892578, "memory(GiB)": 72.85, "step": 51750, "token_acc": 0.534965034965035, "train_speed(iter/s)": 0.67199 }, { "epoch": 2.217342873055996, "grad_norm": 4.892415523529053, "learning_rate": 5.885982270395366e-05, "loss": 2.2731929779052735, "memory(GiB)": 72.85, "step": 51755, "token_acc": 0.5163636363636364, "train_speed(iter/s)": 0.671994 }, { "epoch": 2.2175570883852447, "grad_norm": 5.315062522888184, "learning_rate": 5.885319934596233e-05, "loss": 2.3915264129638674, "memory(GiB)": 72.85, "step": 51760, "token_acc": 0.487012987012987, "train_speed(iter/s)": 0.671995 }, { "epoch": 2.217771303714494, "grad_norm": 4.434576511383057, "learning_rate": 5.884657582758698e-05, "loss": 2.191695785522461, "memory(GiB)": 72.85, "step": 51765, "token_acc": 0.4783950617283951, "train_speed(iter/s)": 0.671999 }, { "epoch": 2.2179855190437427, "grad_norm": 5.996231555938721, "learning_rate": 5.8839952148947594e-05, "loss": 2.5994043350219727, "memory(GiB)": 72.85, "step": 51770, "token_acc": 0.46417445482866043, "train_speed(iter/s)": 0.672005 }, { "epoch": 2.2181997343729916, "grad_norm": 4.92519474029541, "learning_rate": 5.8833328310164215e-05, "loss": 2.2360815048217773, "memory(GiB)": 72.85, "step": 51775, "token_acc": 0.5190839694656488, "train_speed(iter/s)": 0.672008 }, { "epoch": 2.218413949702241, "grad_norm": 4.533902168273926, "learning_rate": 5.882670431135677e-05, "loss": 2.2238189697265627, "memory(GiB)": 72.85, "step": 51780, "token_acc": 0.5141843971631206, "train_speed(iter/s)": 0.67201 }, { "epoch": 2.2186281650314896, "grad_norm": 4.9951252937316895, "learning_rate": 5.882008015264532e-05, "loss": 2.1335693359375, "memory(GiB)": 72.85, "step": 51785, "token_acc": 0.5300353356890459, "train_speed(iter/s)": 0.672004 }, { "epoch": 2.2188423803607384, "grad_norm": 4.980475425720215, "learning_rate": 5.8813455834149846e-05, "loss": 2.0653564453125, "memory(GiB)": 72.85, "step": 51790, "token_acc": 0.5318471337579618, "train_speed(iter/s)": 0.671996 }, { "epoch": 2.2190565956899877, "grad_norm": 4.643101215362549, "learning_rate": 5.880683135599034e-05, "loss": 2.1705751419067383, "memory(GiB)": 72.85, "step": 51795, "token_acc": 0.5278810408921933, "train_speed(iter/s)": 0.672008 }, { "epoch": 2.2192708110192365, "grad_norm": 3.819899320602417, "learning_rate": 5.880020671828683e-05, "loss": 2.3293663024902345, "memory(GiB)": 72.85, "step": 51800, "token_acc": 0.5063694267515924, "train_speed(iter/s)": 0.672017 }, { "epoch": 2.2194850263484853, "grad_norm": 4.663571834564209, "learning_rate": 5.879358192115932e-05, "loss": 2.1941930770874025, "memory(GiB)": 72.85, "step": 51805, "token_acc": 0.5258064516129032, "train_speed(iter/s)": 0.672013 }, { "epoch": 2.2196992416777346, "grad_norm": 4.369613170623779, "learning_rate": 5.8786956964727834e-05, "loss": 2.2917051315307617, "memory(GiB)": 72.85, "step": 51810, "token_acc": 0.5290322580645161, "train_speed(iter/s)": 0.672021 }, { "epoch": 2.2199134570069834, "grad_norm": 5.151114463806152, "learning_rate": 5.878033184911236e-05, "loss": 2.189315414428711, "memory(GiB)": 72.85, "step": 51815, "token_acc": 0.5, "train_speed(iter/s)": 0.672012 }, { "epoch": 2.220127672336232, "grad_norm": 5.783360481262207, "learning_rate": 5.877370657443294e-05, "loss": 2.1224735260009764, "memory(GiB)": 72.85, "step": 51820, "token_acc": 0.5630630630630631, "train_speed(iter/s)": 0.672004 }, { "epoch": 2.2203418876654815, "grad_norm": 3.829569101333618, "learning_rate": 5.876708114080961e-05, "loss": 2.3339618682861327, "memory(GiB)": 72.85, "step": 51825, "token_acc": 0.4985507246376812, "train_speed(iter/s)": 0.672006 }, { "epoch": 2.2205561029947303, "grad_norm": 4.754868030548096, "learning_rate": 5.876045554836237e-05, "loss": 2.4376827239990235, "memory(GiB)": 72.85, "step": 51830, "token_acc": 0.49110320284697506, "train_speed(iter/s)": 0.672008 }, { "epoch": 2.220770318323979, "grad_norm": 5.086766719818115, "learning_rate": 5.875382979721127e-05, "loss": 2.0809581756591795, "memory(GiB)": 72.85, "step": 51835, "token_acc": 0.5197132616487455, "train_speed(iter/s)": 0.672011 }, { "epoch": 2.2209845336532283, "grad_norm": 5.883292198181152, "learning_rate": 5.874720388747632e-05, "loss": 2.4116954803466797, "memory(GiB)": 72.85, "step": 51840, "token_acc": 0.5190311418685121, "train_speed(iter/s)": 0.672022 }, { "epoch": 2.221198748982477, "grad_norm": 5.764402866363525, "learning_rate": 5.874057781927756e-05, "loss": 2.2086860656738283, "memory(GiB)": 72.85, "step": 51845, "token_acc": 0.5019011406844106, "train_speed(iter/s)": 0.672031 }, { "epoch": 2.221412964311726, "grad_norm": 6.674629211425781, "learning_rate": 5.8733951592735045e-05, "loss": 2.166110801696777, "memory(GiB)": 72.85, "step": 51850, "token_acc": 0.5084175084175084, "train_speed(iter/s)": 0.672045 }, { "epoch": 2.2216271796409752, "grad_norm": 4.301547527313232, "learning_rate": 5.8727325207968806e-05, "loss": 2.039556694030762, "memory(GiB)": 72.85, "step": 51855, "token_acc": 0.5791505791505791, "train_speed(iter/s)": 0.672049 }, { "epoch": 2.221841394970224, "grad_norm": 4.6000542640686035, "learning_rate": 5.872069866509887e-05, "loss": 2.204393005371094, "memory(GiB)": 72.85, "step": 51860, "token_acc": 0.4981132075471698, "train_speed(iter/s)": 0.672056 }, { "epoch": 2.222055610299473, "grad_norm": 5.164978504180908, "learning_rate": 5.871407196424532e-05, "loss": 2.07825927734375, "memory(GiB)": 72.85, "step": 51865, "token_acc": 0.5516014234875445, "train_speed(iter/s)": 0.672065 }, { "epoch": 2.222269825628722, "grad_norm": 4.570065498352051, "learning_rate": 5.870744510552817e-05, "loss": 2.4092369079589844, "memory(GiB)": 72.85, "step": 51870, "token_acc": 0.49491525423728816, "train_speed(iter/s)": 0.672076 }, { "epoch": 2.222484040957971, "grad_norm": 6.9346842765808105, "learning_rate": 5.8700818089067474e-05, "loss": 2.7450124740600588, "memory(GiB)": 72.85, "step": 51875, "token_acc": 0.4626334519572954, "train_speed(iter/s)": 0.672068 }, { "epoch": 2.2226982562872197, "grad_norm": 5.731123924255371, "learning_rate": 5.8694190914983317e-05, "loss": 2.029824447631836, "memory(GiB)": 72.85, "step": 51880, "token_acc": 0.5578512396694215, "train_speed(iter/s)": 0.672077 }, { "epoch": 2.222912471616469, "grad_norm": 4.768226146697998, "learning_rate": 5.868756358339572e-05, "loss": 2.2525115966796876, "memory(GiB)": 72.85, "step": 51885, "token_acc": 0.5121951219512195, "train_speed(iter/s)": 0.672087 }, { "epoch": 2.223126686945718, "grad_norm": 4.643097877502441, "learning_rate": 5.8680936094424754e-05, "loss": 2.443986701965332, "memory(GiB)": 72.85, "step": 51890, "token_acc": 0.4830188679245283, "train_speed(iter/s)": 0.672096 }, { "epoch": 2.2233409022749666, "grad_norm": 3.7141685485839844, "learning_rate": 5.8674308448190506e-05, "loss": 2.1532114028930662, "memory(GiB)": 72.85, "step": 51895, "token_acc": 0.5516014234875445, "train_speed(iter/s)": 0.672099 }, { "epoch": 2.223555117604216, "grad_norm": 5.1505656242370605, "learning_rate": 5.8667680644813005e-05, "loss": 2.20181941986084, "memory(GiB)": 72.85, "step": 51900, "token_acc": 0.5443037974683544, "train_speed(iter/s)": 0.67208 }, { "epoch": 2.2237693329334647, "grad_norm": 4.386983394622803, "learning_rate": 5.8661052684412354e-05, "loss": 2.2177663803100587, "memory(GiB)": 72.85, "step": 51905, "token_acc": 0.5150375939849624, "train_speed(iter/s)": 0.672093 }, { "epoch": 2.2239835482627135, "grad_norm": 5.098214626312256, "learning_rate": 5.86544245671086e-05, "loss": 2.3268728256225586, "memory(GiB)": 72.85, "step": 51910, "token_acc": 0.5348837209302325, "train_speed(iter/s)": 0.672101 }, { "epoch": 2.2241977635919628, "grad_norm": 5.291083335876465, "learning_rate": 5.8647796293021826e-05, "loss": 2.2996740341186523, "memory(GiB)": 72.85, "step": 51915, "token_acc": 0.5051903114186851, "train_speed(iter/s)": 0.672115 }, { "epoch": 2.2244119789212116, "grad_norm": 6.450314044952393, "learning_rate": 5.864116786227212e-05, "loss": 2.2176233291625977, "memory(GiB)": 72.85, "step": 51920, "token_acc": 0.5017921146953405, "train_speed(iter/s)": 0.672118 }, { "epoch": 2.2246261942504604, "grad_norm": 6.289032459259033, "learning_rate": 5.863453927497954e-05, "loss": 2.5200372695922852, "memory(GiB)": 72.85, "step": 51925, "token_acc": 0.4426229508196721, "train_speed(iter/s)": 0.672127 }, { "epoch": 2.2248404095797096, "grad_norm": 4.617354393005371, "learning_rate": 5.8627910531264176e-05, "loss": 2.2621749877929687, "memory(GiB)": 72.85, "step": 51930, "token_acc": 0.48787878787878786, "train_speed(iter/s)": 0.672122 }, { "epoch": 2.2250546249089584, "grad_norm": 4.749877452850342, "learning_rate": 5.862128163124613e-05, "loss": 2.3273616790771485, "memory(GiB)": 72.85, "step": 51935, "token_acc": 0.5125348189415042, "train_speed(iter/s)": 0.672123 }, { "epoch": 2.2252688402382073, "grad_norm": 3.568504810333252, "learning_rate": 5.861465257504548e-05, "loss": 2.107493209838867, "memory(GiB)": 72.85, "step": 51940, "token_acc": 0.5752508361204013, "train_speed(iter/s)": 0.672111 }, { "epoch": 2.2254830555674565, "grad_norm": 5.068622589111328, "learning_rate": 5.8608023362782316e-05, "loss": 2.042934036254883, "memory(GiB)": 72.85, "step": 51945, "token_acc": 0.5181058495821727, "train_speed(iter/s)": 0.672106 }, { "epoch": 2.2256972708967053, "grad_norm": 4.45560359954834, "learning_rate": 5.8601393994576734e-05, "loss": 2.3189083099365235, "memory(GiB)": 72.85, "step": 51950, "token_acc": 0.5076452599388379, "train_speed(iter/s)": 0.67211 }, { "epoch": 2.225911486225954, "grad_norm": 5.658502101898193, "learning_rate": 5.859476447054884e-05, "loss": 2.1890045166015626, "memory(GiB)": 72.85, "step": 51955, "token_acc": 0.5482625482625483, "train_speed(iter/s)": 0.672114 }, { "epoch": 2.2261257015552034, "grad_norm": 5.207546710968018, "learning_rate": 5.8588134790818707e-05, "loss": 2.3282318115234375, "memory(GiB)": 72.85, "step": 51960, "token_acc": 0.532608695652174, "train_speed(iter/s)": 0.672116 }, { "epoch": 2.226339916884452, "grad_norm": 5.1516828536987305, "learning_rate": 5.858150495550646e-05, "loss": 2.1001941680908205, "memory(GiB)": 72.85, "step": 51965, "token_acc": 0.5182481751824818, "train_speed(iter/s)": 0.672122 }, { "epoch": 2.226554132213701, "grad_norm": 4.599262237548828, "learning_rate": 5.857487496473221e-05, "loss": 2.5261579513549806, "memory(GiB)": 72.85, "step": 51970, "token_acc": 0.47041420118343197, "train_speed(iter/s)": 0.672115 }, { "epoch": 2.2267683475429503, "grad_norm": 4.203741550445557, "learning_rate": 5.856824481861605e-05, "loss": 2.0989051818847657, "memory(GiB)": 72.85, "step": 51975, "token_acc": 0.5266666666666666, "train_speed(iter/s)": 0.672125 }, { "epoch": 2.226982562872199, "grad_norm": 4.676695346832275, "learning_rate": 5.85616145172781e-05, "loss": 2.5759675979614256, "memory(GiB)": 72.85, "step": 51980, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.672111 }, { "epoch": 2.227196778201448, "grad_norm": 4.2661614418029785, "learning_rate": 5.855498406083847e-05, "loss": 2.3760793685913084, "memory(GiB)": 72.85, "step": 51985, "token_acc": 0.5416666666666666, "train_speed(iter/s)": 0.67211 }, { "epoch": 2.227410993530697, "grad_norm": 5.823429107666016, "learning_rate": 5.854835344941727e-05, "loss": 2.5707473754882812, "memory(GiB)": 72.85, "step": 51990, "token_acc": 0.5, "train_speed(iter/s)": 0.672113 }, { "epoch": 2.227625208859946, "grad_norm": 4.204874515533447, "learning_rate": 5.854172268313465e-05, "loss": 2.0489721298217773, "memory(GiB)": 72.85, "step": 51995, "token_acc": 0.5173611111111112, "train_speed(iter/s)": 0.672107 }, { "epoch": 2.227839424189195, "grad_norm": 3.8019487857818604, "learning_rate": 5.8535091762110695e-05, "loss": 1.97827091217041, "memory(GiB)": 72.85, "step": 52000, "token_acc": 0.5524193548387096, "train_speed(iter/s)": 0.672096 }, { "epoch": 2.227839424189195, "eval_loss": 1.9734708070755005, "eval_runtime": 16.0798, "eval_samples_per_second": 6.219, "eval_steps_per_second": 6.219, "eval_token_acc": 0.5206611570247934, "step": 52000 }, { "epoch": 2.228053639518444, "grad_norm": 4.754964828491211, "learning_rate": 5.852846068646554e-05, "loss": 2.335738754272461, "memory(GiB)": 72.85, "step": 52005, "token_acc": 0.5097493036211699, "train_speed(iter/s)": 0.67194 }, { "epoch": 2.228267854847693, "grad_norm": 4.195163726806641, "learning_rate": 5.8521829456319334e-05, "loss": 2.280678939819336, "memory(GiB)": 72.85, "step": 52010, "token_acc": 0.5395189003436426, "train_speed(iter/s)": 0.671955 }, { "epoch": 2.2284820701769417, "grad_norm": 4.808274269104004, "learning_rate": 5.851519807179219e-05, "loss": 2.392986869812012, "memory(GiB)": 72.85, "step": 52015, "token_acc": 0.45614035087719296, "train_speed(iter/s)": 0.671968 }, { "epoch": 2.228696285506191, "grad_norm": 5.081627368927002, "learning_rate": 5.850856653300424e-05, "loss": 2.288662338256836, "memory(GiB)": 72.85, "step": 52020, "token_acc": 0.5067114093959731, "train_speed(iter/s)": 0.671994 }, { "epoch": 2.2289105008354397, "grad_norm": 4.4746222496032715, "learning_rate": 5.850193484007563e-05, "loss": 2.255153846740723, "memory(GiB)": 72.85, "step": 52025, "token_acc": 0.49185667752442996, "train_speed(iter/s)": 0.671995 }, { "epoch": 2.2291247161646885, "grad_norm": 5.525484561920166, "learning_rate": 5.849530299312649e-05, "loss": 2.501753234863281, "memory(GiB)": 72.85, "step": 52030, "token_acc": 0.49216300940438873, "train_speed(iter/s)": 0.671989 }, { "epoch": 2.229338931493938, "grad_norm": 4.688736438751221, "learning_rate": 5.848867099227696e-05, "loss": 2.264076042175293, "memory(GiB)": 72.85, "step": 52035, "token_acc": 0.508130081300813, "train_speed(iter/s)": 0.67199 }, { "epoch": 2.2295531468231866, "grad_norm": 6.111398220062256, "learning_rate": 5.848203883764721e-05, "loss": 2.3266016006469727, "memory(GiB)": 72.85, "step": 52040, "token_acc": 0.5017182130584192, "train_speed(iter/s)": 0.672004 }, { "epoch": 2.2297673621524354, "grad_norm": 3.9360551834106445, "learning_rate": 5.8475406529357356e-05, "loss": 2.4229053497314452, "memory(GiB)": 72.85, "step": 52045, "token_acc": 0.4485049833887043, "train_speed(iter/s)": 0.672003 }, { "epoch": 2.2299815774816847, "grad_norm": 3.381918430328369, "learning_rate": 5.8468774067527575e-05, "loss": 2.2739105224609375, "memory(GiB)": 72.85, "step": 52050, "token_acc": 0.5077399380804953, "train_speed(iter/s)": 0.671999 }, { "epoch": 2.2301957928109335, "grad_norm": 4.808043479919434, "learning_rate": 5.8462141452277995e-05, "loss": 2.3873003005981444, "memory(GiB)": 72.85, "step": 52055, "token_acc": 0.4895833333333333, "train_speed(iter/s)": 0.671994 }, { "epoch": 2.2304100081401823, "grad_norm": 5.3751397132873535, "learning_rate": 5.845550868372879e-05, "loss": 2.183582878112793, "memory(GiB)": 72.85, "step": 52060, "token_acc": 0.5476190476190477, "train_speed(iter/s)": 0.672002 }, { "epoch": 2.2306242234694316, "grad_norm": 4.290040016174316, "learning_rate": 5.844887576200012e-05, "loss": 2.1245119094848635, "memory(GiB)": 72.85, "step": 52065, "token_acc": 0.5, "train_speed(iter/s)": 0.671983 }, { "epoch": 2.2308384387986804, "grad_norm": 5.151321887969971, "learning_rate": 5.844224268721214e-05, "loss": 2.559288215637207, "memory(GiB)": 72.85, "step": 52070, "token_acc": 0.47101449275362317, "train_speed(iter/s)": 0.671976 }, { "epoch": 2.231052654127929, "grad_norm": 5.3451738357543945, "learning_rate": 5.843560945948499e-05, "loss": 2.4108512878417967, "memory(GiB)": 72.85, "step": 52075, "token_acc": 0.47761194029850745, "train_speed(iter/s)": 0.671973 }, { "epoch": 2.2312668694571784, "grad_norm": 3.990870475769043, "learning_rate": 5.8428976078938877e-05, "loss": 2.6319808959960938, "memory(GiB)": 72.85, "step": 52080, "token_acc": 0.46296296296296297, "train_speed(iter/s)": 0.671978 }, { "epoch": 2.2314810847864273, "grad_norm": 4.142945289611816, "learning_rate": 5.842234254569396e-05, "loss": 2.181937026977539, "memory(GiB)": 72.85, "step": 52085, "token_acc": 0.5517241379310345, "train_speed(iter/s)": 0.671976 }, { "epoch": 2.231695300115676, "grad_norm": 6.232994556427002, "learning_rate": 5.84157088598704e-05, "loss": 2.4787384033203126, "memory(GiB)": 72.85, "step": 52090, "token_acc": 0.49508196721311476, "train_speed(iter/s)": 0.671976 }, { "epoch": 2.2319095154449253, "grad_norm": 4.743730068206787, "learning_rate": 5.840907502158839e-05, "loss": 2.1364309310913088, "memory(GiB)": 72.85, "step": 52095, "token_acc": 0.5230263157894737, "train_speed(iter/s)": 0.671992 }, { "epoch": 2.232123730774174, "grad_norm": 4.017286777496338, "learning_rate": 5.84024410309681e-05, "loss": 1.9898794174194336, "memory(GiB)": 72.85, "step": 52100, "token_acc": 0.5205992509363296, "train_speed(iter/s)": 0.672003 }, { "epoch": 2.232337946103423, "grad_norm": 6.979896068572998, "learning_rate": 5.839580688812969e-05, "loss": 2.3388740539550783, "memory(GiB)": 72.85, "step": 52105, "token_acc": 0.5283018867924528, "train_speed(iter/s)": 0.672 }, { "epoch": 2.232552161432672, "grad_norm": 5.297241687774658, "learning_rate": 5.8389172593193365e-05, "loss": 2.286202239990234, "memory(GiB)": 72.85, "step": 52110, "token_acc": 0.5097402597402597, "train_speed(iter/s)": 0.671998 }, { "epoch": 2.232766376761921, "grad_norm": 4.416945457458496, "learning_rate": 5.838253814627932e-05, "loss": 2.6209957122802736, "memory(GiB)": 72.85, "step": 52115, "token_acc": 0.4353312302839117, "train_speed(iter/s)": 0.672007 }, { "epoch": 2.2329805920911703, "grad_norm": 5.439729690551758, "learning_rate": 5.8375903547507724e-05, "loss": 2.501339149475098, "memory(GiB)": 72.85, "step": 52120, "token_acc": 0.46229508196721314, "train_speed(iter/s)": 0.672016 }, { "epoch": 2.233194807420419, "grad_norm": 4.268568515777588, "learning_rate": 5.836926879699879e-05, "loss": 2.294500732421875, "memory(GiB)": 72.85, "step": 52125, "token_acc": 0.5106382978723404, "train_speed(iter/s)": 0.672017 }, { "epoch": 2.233409022749668, "grad_norm": 5.391081809997559, "learning_rate": 5.83626338948727e-05, "loss": 2.196004867553711, "memory(GiB)": 72.85, "step": 52130, "token_acc": 0.46875, "train_speed(iter/s)": 0.672038 }, { "epoch": 2.233623238078917, "grad_norm": 5.684406280517578, "learning_rate": 5.835599884124964e-05, "loss": 2.169092559814453, "memory(GiB)": 72.85, "step": 52135, "token_acc": 0.5173501577287066, "train_speed(iter/s)": 0.672037 }, { "epoch": 2.233837453408166, "grad_norm": 4.077826499938965, "learning_rate": 5.8349363636249835e-05, "loss": 2.46068115234375, "memory(GiB)": 72.85, "step": 52140, "token_acc": 0.5, "train_speed(iter/s)": 0.672045 }, { "epoch": 2.234051668737415, "grad_norm": 3.809932231903076, "learning_rate": 5.834272827999345e-05, "loss": 2.119449234008789, "memory(GiB)": 72.85, "step": 52145, "token_acc": 0.5065359477124183, "train_speed(iter/s)": 0.672051 }, { "epoch": 2.234265884066664, "grad_norm": 5.393467903137207, "learning_rate": 5.833609277260074e-05, "loss": 2.3373687744140623, "memory(GiB)": 72.85, "step": 52150, "token_acc": 0.5328185328185329, "train_speed(iter/s)": 0.672066 }, { "epoch": 2.234480099395913, "grad_norm": 4.982870101928711, "learning_rate": 5.8329457114191886e-05, "loss": 2.3074323654174806, "memory(GiB)": 72.85, "step": 52155, "token_acc": 0.5484949832775919, "train_speed(iter/s)": 0.672073 }, { "epoch": 2.2346943147251617, "grad_norm": 3.894249677658081, "learning_rate": 5.832282130488711e-05, "loss": 2.109530448913574, "memory(GiB)": 72.85, "step": 52160, "token_acc": 0.5296167247386759, "train_speed(iter/s)": 0.672076 }, { "epoch": 2.234908530054411, "grad_norm": 5.496546745300293, "learning_rate": 5.8316185344806596e-05, "loss": 2.115230178833008, "memory(GiB)": 72.85, "step": 52165, "token_acc": 0.5418060200668896, "train_speed(iter/s)": 0.672082 }, { "epoch": 2.2351227453836597, "grad_norm": 5.085431098937988, "learning_rate": 5.8309549234070605e-05, "loss": 2.3157445907592775, "memory(GiB)": 72.85, "step": 52170, "token_acc": 0.5015873015873016, "train_speed(iter/s)": 0.672085 }, { "epoch": 2.2353369607129085, "grad_norm": 4.780853271484375, "learning_rate": 5.8302912972799315e-05, "loss": 2.506322479248047, "memory(GiB)": 72.85, "step": 52175, "token_acc": 0.5079365079365079, "train_speed(iter/s)": 0.672093 }, { "epoch": 2.235551176042158, "grad_norm": 4.191981792449951, "learning_rate": 5.8296276561112985e-05, "loss": 2.321955680847168, "memory(GiB)": 72.85, "step": 52180, "token_acc": 0.479020979020979, "train_speed(iter/s)": 0.672105 }, { "epoch": 2.2357653913714066, "grad_norm": 6.011520862579346, "learning_rate": 5.828963999913182e-05, "loss": 2.2419437408447265, "memory(GiB)": 72.85, "step": 52185, "token_acc": 0.5387596899224806, "train_speed(iter/s)": 0.672099 }, { "epoch": 2.2359796067006554, "grad_norm": 3.5696640014648438, "learning_rate": 5.8283003286976035e-05, "loss": 2.2819873809814455, "memory(GiB)": 72.85, "step": 52190, "token_acc": 0.47720364741641336, "train_speed(iter/s)": 0.672116 }, { "epoch": 2.2361938220299047, "grad_norm": 4.332692623138428, "learning_rate": 5.827636642476589e-05, "loss": 2.241394805908203, "memory(GiB)": 72.85, "step": 52195, "token_acc": 0.5070921985815603, "train_speed(iter/s)": 0.672132 }, { "epoch": 2.2364080373591535, "grad_norm": 5.373138904571533, "learning_rate": 5.826972941262161e-05, "loss": 2.4024526596069338, "memory(GiB)": 72.85, "step": 52200, "token_acc": 0.4888888888888889, "train_speed(iter/s)": 0.67214 }, { "epoch": 2.2366222526884023, "grad_norm": 6.636557579040527, "learning_rate": 5.826309225066341e-05, "loss": 2.116702842712402, "memory(GiB)": 72.85, "step": 52205, "token_acc": 0.49812734082397003, "train_speed(iter/s)": 0.672144 }, { "epoch": 2.2368364680176516, "grad_norm": 5.615790367126465, "learning_rate": 5.825645493901155e-05, "loss": 2.639062690734863, "memory(GiB)": 72.85, "step": 52210, "token_acc": 0.4563953488372093, "train_speed(iter/s)": 0.672144 }, { "epoch": 2.2370506833469004, "grad_norm": 5.161109447479248, "learning_rate": 5.824981747778626e-05, "loss": 2.3235015869140625, "memory(GiB)": 72.85, "step": 52215, "token_acc": 0.5510204081632653, "train_speed(iter/s)": 0.672152 }, { "epoch": 2.237264898676149, "grad_norm": 4.766689300537109, "learning_rate": 5.824317986710778e-05, "loss": 2.363507843017578, "memory(GiB)": 72.85, "step": 52220, "token_acc": 0.5357142857142857, "train_speed(iter/s)": 0.672153 }, { "epoch": 2.2374791140053985, "grad_norm": 3.6187262535095215, "learning_rate": 5.823654210709637e-05, "loss": 2.39105110168457, "memory(GiB)": 72.85, "step": 52225, "token_acc": 0.5418181818181819, "train_speed(iter/s)": 0.672156 }, { "epoch": 2.2376933293346473, "grad_norm": 3.6414411067962646, "learning_rate": 5.8229904197872284e-05, "loss": 2.0248931884765624, "memory(GiB)": 72.85, "step": 52230, "token_acc": 0.5304878048780488, "train_speed(iter/s)": 0.67215 }, { "epoch": 2.237907544663896, "grad_norm": 5.603583335876465, "learning_rate": 5.822326613955574e-05, "loss": 2.065362548828125, "memory(GiB)": 72.85, "step": 52235, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.672131 }, { "epoch": 2.2381217599931453, "grad_norm": 4.45028018951416, "learning_rate": 5.821662793226704e-05, "loss": 2.0319660186767576, "memory(GiB)": 72.85, "step": 52240, "token_acc": 0.5735849056603773, "train_speed(iter/s)": 0.672132 }, { "epoch": 2.238335975322394, "grad_norm": 4.511946201324463, "learning_rate": 5.820998957612641e-05, "loss": 2.1429338455200195, "memory(GiB)": 72.85, "step": 52245, "token_acc": 0.5283687943262412, "train_speed(iter/s)": 0.672125 }, { "epoch": 2.238550190651643, "grad_norm": 3.8135950565338135, "learning_rate": 5.820335107125412e-05, "loss": 2.1070955276489256, "memory(GiB)": 72.85, "step": 52250, "token_acc": 0.5342019543973942, "train_speed(iter/s)": 0.672126 }, { "epoch": 2.238764405980892, "grad_norm": 5.225114822387695, "learning_rate": 5.819671241777043e-05, "loss": 2.049224090576172, "memory(GiB)": 72.85, "step": 52255, "token_acc": 0.5639810426540285, "train_speed(iter/s)": 0.672123 }, { "epoch": 2.238978621310141, "grad_norm": 4.848046779632568, "learning_rate": 5.819007361579558e-05, "loss": 1.8707406997680665, "memory(GiB)": 72.85, "step": 52260, "token_acc": 0.5892857142857143, "train_speed(iter/s)": 0.672143 }, { "epoch": 2.23919283663939, "grad_norm": 4.228163242340088, "learning_rate": 5.818343466544989e-05, "loss": 2.2796207427978517, "memory(GiB)": 72.85, "step": 52265, "token_acc": 0.5114942528735632, "train_speed(iter/s)": 0.672146 }, { "epoch": 2.239407051968639, "grad_norm": 4.1781158447265625, "learning_rate": 5.8176795566853606e-05, "loss": 2.2943889617919924, "memory(GiB)": 72.85, "step": 52270, "token_acc": 0.5424354243542435, "train_speed(iter/s)": 0.672141 }, { "epoch": 2.239621267297888, "grad_norm": 4.983755588531494, "learning_rate": 5.817015632012699e-05, "loss": 1.866858673095703, "memory(GiB)": 72.85, "step": 52275, "token_acc": 0.5625, "train_speed(iter/s)": 0.672149 }, { "epoch": 2.2398354826271367, "grad_norm": 3.7409186363220215, "learning_rate": 5.816351692539033e-05, "loss": 2.054684066772461, "memory(GiB)": 72.85, "step": 52280, "token_acc": 0.5298245614035088, "train_speed(iter/s)": 0.672161 }, { "epoch": 2.240049697956386, "grad_norm": 4.921355724334717, "learning_rate": 5.81568773827639e-05, "loss": 2.274286651611328, "memory(GiB)": 72.85, "step": 52285, "token_acc": 0.49829351535836175, "train_speed(iter/s)": 0.672164 }, { "epoch": 2.240263913285635, "grad_norm": 6.686900615692139, "learning_rate": 5.815023769236798e-05, "loss": 2.4468616485595702, "memory(GiB)": 72.85, "step": 52290, "token_acc": 0.4559748427672956, "train_speed(iter/s)": 0.67217 }, { "epoch": 2.2404781286148836, "grad_norm": 6.92768669128418, "learning_rate": 5.814359785432286e-05, "loss": 1.9989965438842774, "memory(GiB)": 72.85, "step": 52295, "token_acc": 0.5467625899280576, "train_speed(iter/s)": 0.672175 }, { "epoch": 2.240692343944133, "grad_norm": 5.435842514038086, "learning_rate": 5.8136957868748844e-05, "loss": 2.162324333190918, "memory(GiB)": 72.85, "step": 52300, "token_acc": 0.547945205479452, "train_speed(iter/s)": 0.67216 }, { "epoch": 2.2409065592733817, "grad_norm": 4.9694013595581055, "learning_rate": 5.813031773576618e-05, "loss": 2.3458574295043944, "memory(GiB)": 72.85, "step": 52305, "token_acc": 0.5368098159509203, "train_speed(iter/s)": 0.672154 }, { "epoch": 2.2411207746026305, "grad_norm": 4.580580234527588, "learning_rate": 5.81236774554952e-05, "loss": 2.0663352966308595, "memory(GiB)": 72.85, "step": 52310, "token_acc": 0.5256410256410257, "train_speed(iter/s)": 0.672159 }, { "epoch": 2.2413349899318797, "grad_norm": 5.305854320526123, "learning_rate": 5.811703702805618e-05, "loss": 2.1931440353393556, "memory(GiB)": 72.85, "step": 52315, "token_acc": 0.5238095238095238, "train_speed(iter/s)": 0.672153 }, { "epoch": 2.2415492052611286, "grad_norm": 4.854557514190674, "learning_rate": 5.811039645356941e-05, "loss": 2.281400680541992, "memory(GiB)": 72.85, "step": 52320, "token_acc": 0.5234657039711191, "train_speed(iter/s)": 0.672143 }, { "epoch": 2.2417634205903774, "grad_norm": 6.379295349121094, "learning_rate": 5.810375573215521e-05, "loss": 2.04660587310791, "memory(GiB)": 72.85, "step": 52325, "token_acc": 0.5238095238095238, "train_speed(iter/s)": 0.672158 }, { "epoch": 2.2419776359196266, "grad_norm": 5.643614768981934, "learning_rate": 5.809711486393388e-05, "loss": 2.1573406219482423, "memory(GiB)": 72.85, "step": 52330, "token_acc": 0.5257731958762887, "train_speed(iter/s)": 0.672171 }, { "epoch": 2.2421918512488754, "grad_norm": 4.66619348526001, "learning_rate": 5.8090473849025685e-05, "loss": 2.2885000228881838, "memory(GiB)": 72.85, "step": 52335, "token_acc": 0.5323741007194245, "train_speed(iter/s)": 0.672167 }, { "epoch": 2.2424060665781242, "grad_norm": 4.636312484741211, "learning_rate": 5.8083832687551e-05, "loss": 2.321270561218262, "memory(GiB)": 72.85, "step": 52340, "token_acc": 0.5380116959064327, "train_speed(iter/s)": 0.672183 }, { "epoch": 2.2426202819073735, "grad_norm": 4.992216110229492, "learning_rate": 5.807719137963009e-05, "loss": 2.2608036041259765, "memory(GiB)": 72.85, "step": 52345, "token_acc": 0.5131578947368421, "train_speed(iter/s)": 0.672178 }, { "epoch": 2.2428344972366223, "grad_norm": 4.662277698516846, "learning_rate": 5.807054992538328e-05, "loss": 2.4052515029907227, "memory(GiB)": 72.85, "step": 52350, "token_acc": 0.4852941176470588, "train_speed(iter/s)": 0.672172 }, { "epoch": 2.243048712565871, "grad_norm": 3.8734219074249268, "learning_rate": 5.806390832493089e-05, "loss": 2.229286956787109, "memory(GiB)": 72.85, "step": 52355, "token_acc": 0.5227272727272727, "train_speed(iter/s)": 0.672172 }, { "epoch": 2.2432629278951204, "grad_norm": 7.427905082702637, "learning_rate": 5.805726657839324e-05, "loss": 2.158697509765625, "memory(GiB)": 72.85, "step": 52360, "token_acc": 0.5518518518518518, "train_speed(iter/s)": 0.672199 }, { "epoch": 2.243477143224369, "grad_norm": 5.990196704864502, "learning_rate": 5.805062468589064e-05, "loss": 2.148678016662598, "memory(GiB)": 72.85, "step": 52365, "token_acc": 0.524, "train_speed(iter/s)": 0.672196 }, { "epoch": 2.243691358553618, "grad_norm": 5.548598766326904, "learning_rate": 5.8043982647543426e-05, "loss": 2.3122842788696287, "memory(GiB)": 72.85, "step": 52370, "token_acc": 0.4983277591973244, "train_speed(iter/s)": 0.672182 }, { "epoch": 2.2439055738828673, "grad_norm": 4.25919771194458, "learning_rate": 5.803734046347192e-05, "loss": 2.4266538619995117, "memory(GiB)": 72.85, "step": 52375, "token_acc": 0.4934210526315789, "train_speed(iter/s)": 0.672206 }, { "epoch": 2.244119789212116, "grad_norm": 5.229031562805176, "learning_rate": 5.8030698133796445e-05, "loss": 2.470500946044922, "memory(GiB)": 72.85, "step": 52380, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.672197 }, { "epoch": 2.244334004541365, "grad_norm": 4.8494415283203125, "learning_rate": 5.802405565863735e-05, "loss": 1.953843879699707, "memory(GiB)": 72.85, "step": 52385, "token_acc": 0.5465116279069767, "train_speed(iter/s)": 0.672204 }, { "epoch": 2.244548219870614, "grad_norm": 4.473728179931641, "learning_rate": 5.8017413038114965e-05, "loss": 2.344216156005859, "memory(GiB)": 72.85, "step": 52390, "token_acc": 0.5096618357487923, "train_speed(iter/s)": 0.672209 }, { "epoch": 2.244762435199863, "grad_norm": 4.356535911560059, "learning_rate": 5.8010770272349615e-05, "loss": 2.1158065795898438, "memory(GiB)": 72.85, "step": 52395, "token_acc": 0.5451263537906137, "train_speed(iter/s)": 0.672213 }, { "epoch": 2.2449766505291118, "grad_norm": 4.860653400421143, "learning_rate": 5.8004127361461644e-05, "loss": 2.3182088851928713, "memory(GiB)": 72.85, "step": 52400, "token_acc": 0.48518518518518516, "train_speed(iter/s)": 0.672212 }, { "epoch": 2.245190865858361, "grad_norm": 5.797356605529785, "learning_rate": 5.799748430557139e-05, "loss": 2.2766403198242187, "memory(GiB)": 72.85, "step": 52405, "token_acc": 0.5251798561151079, "train_speed(iter/s)": 0.672212 }, { "epoch": 2.24540508118761, "grad_norm": 5.6197638511657715, "learning_rate": 5.799084110479921e-05, "loss": 2.625739097595215, "memory(GiB)": 72.85, "step": 52410, "token_acc": 0.4622356495468278, "train_speed(iter/s)": 0.67222 }, { "epoch": 2.2456192965168587, "grad_norm": 4.704622268676758, "learning_rate": 5.798419775926546e-05, "loss": 2.2617000579833983, "memory(GiB)": 72.85, "step": 52415, "token_acc": 0.51985559566787, "train_speed(iter/s)": 0.672219 }, { "epoch": 2.245833511846108, "grad_norm": 4.770252227783203, "learning_rate": 5.7977554269090475e-05, "loss": 2.102103424072266, "memory(GiB)": 72.85, "step": 52420, "token_acc": 0.5463917525773195, "train_speed(iter/s)": 0.672195 }, { "epoch": 2.2460477271753567, "grad_norm": 4.104209899902344, "learning_rate": 5.7970910634394594e-05, "loss": 2.0913928985595702, "memory(GiB)": 72.85, "step": 52425, "token_acc": 0.5593869731800766, "train_speed(iter/s)": 0.672208 }, { "epoch": 2.2462619425046055, "grad_norm": 4.784260272979736, "learning_rate": 5.796426685529821e-05, "loss": 2.390336799621582, "memory(GiB)": 72.85, "step": 52430, "token_acc": 0.5160142348754448, "train_speed(iter/s)": 0.672205 }, { "epoch": 2.246476157833855, "grad_norm": 4.951958179473877, "learning_rate": 5.795762293192164e-05, "loss": 2.0010793685913084, "memory(GiB)": 72.85, "step": 52435, "token_acc": 0.5588235294117647, "train_speed(iter/s)": 0.672221 }, { "epoch": 2.2466903731631036, "grad_norm": 4.400158882141113, "learning_rate": 5.7950978864385286e-05, "loss": 2.1330934524536134, "memory(GiB)": 72.85, "step": 52440, "token_acc": 0.5, "train_speed(iter/s)": 0.672224 }, { "epoch": 2.2469045884923524, "grad_norm": 6.942949295043945, "learning_rate": 5.7944334652809485e-05, "loss": 2.4921730041503904, "memory(GiB)": 72.85, "step": 52445, "token_acc": 0.49615384615384617, "train_speed(iter/s)": 0.672219 }, { "epoch": 2.2471188038216017, "grad_norm": 7.280636787414551, "learning_rate": 5.7937690297314594e-05, "loss": 2.389510917663574, "memory(GiB)": 72.85, "step": 52450, "token_acc": 0.47474747474747475, "train_speed(iter/s)": 0.672219 }, { "epoch": 2.2473330191508505, "grad_norm": 4.879859447479248, "learning_rate": 5.793104579802102e-05, "loss": 2.2794294357299805, "memory(GiB)": 72.85, "step": 52455, "token_acc": 0.5033557046979866, "train_speed(iter/s)": 0.67223 }, { "epoch": 2.2475472344800993, "grad_norm": 6.198484897613525, "learning_rate": 5.79244011550491e-05, "loss": 2.4480669021606447, "memory(GiB)": 72.85, "step": 52460, "token_acc": 0.4657534246575342, "train_speed(iter/s)": 0.672206 }, { "epoch": 2.2477614498093486, "grad_norm": 4.561391353607178, "learning_rate": 5.7917756368519217e-05, "loss": 2.4831993103027346, "memory(GiB)": 72.85, "step": 52465, "token_acc": 0.4629080118694362, "train_speed(iter/s)": 0.672213 }, { "epoch": 2.2479756651385974, "grad_norm": 5.272424697875977, "learning_rate": 5.7911111438551754e-05, "loss": 2.0369888305664063, "memory(GiB)": 72.85, "step": 52470, "token_acc": 0.5852713178294574, "train_speed(iter/s)": 0.672207 }, { "epoch": 2.248189880467846, "grad_norm": 5.004683971405029, "learning_rate": 5.7904466365267097e-05, "loss": 2.1782764434814452, "memory(GiB)": 72.85, "step": 52475, "token_acc": 0.49612403100775193, "train_speed(iter/s)": 0.672212 }, { "epoch": 2.2484040957970954, "grad_norm": 5.299646854400635, "learning_rate": 5.789782114878559e-05, "loss": 2.2443580627441406, "memory(GiB)": 72.85, "step": 52480, "token_acc": 0.5153846153846153, "train_speed(iter/s)": 0.6722 }, { "epoch": 2.2486183111263442, "grad_norm": 5.3954877853393555, "learning_rate": 5.789117578922767e-05, "loss": 2.3505382537841797, "memory(GiB)": 72.85, "step": 52485, "token_acc": 0.48717948717948717, "train_speed(iter/s)": 0.672204 }, { "epoch": 2.248832526455593, "grad_norm": 5.859393119812012, "learning_rate": 5.7884530286713687e-05, "loss": 2.355010223388672, "memory(GiB)": 72.85, "step": 52490, "token_acc": 0.5125348189415042, "train_speed(iter/s)": 0.672211 }, { "epoch": 2.2490467417848423, "grad_norm": 4.520985126495361, "learning_rate": 5.787788464136403e-05, "loss": 2.3690780639648437, "memory(GiB)": 72.85, "step": 52495, "token_acc": 0.46308724832214765, "train_speed(iter/s)": 0.6722 }, { "epoch": 2.249260957114091, "grad_norm": 5.127496242523193, "learning_rate": 5.787123885329913e-05, "loss": 1.9933769226074218, "memory(GiB)": 72.85, "step": 52500, "token_acc": 0.5550847457627118, "train_speed(iter/s)": 0.67221 }, { "epoch": 2.249260957114091, "eval_loss": 2.165433406829834, "eval_runtime": 15.4427, "eval_samples_per_second": 6.476, "eval_steps_per_second": 6.476, "eval_token_acc": 0.4963768115942029, "step": 52500 }, { "epoch": 2.24947517244334, "grad_norm": 5.453026294708252, "learning_rate": 5.786459292263934e-05, "loss": 2.2700267791748048, "memory(GiB)": 72.85, "step": 52505, "token_acc": 0.4968036529680365, "train_speed(iter/s)": 0.672051 }, { "epoch": 2.249689387772589, "grad_norm": 4.259263038635254, "learning_rate": 5.785794684950506e-05, "loss": 2.3453060150146485, "memory(GiB)": 72.85, "step": 52510, "token_acc": 0.5125786163522013, "train_speed(iter/s)": 0.672074 }, { "epoch": 2.249903603101838, "grad_norm": 4.777592658996582, "learning_rate": 5.7851300634016724e-05, "loss": 2.3935131072998046, "memory(GiB)": 72.85, "step": 52515, "token_acc": 0.4837758112094395, "train_speed(iter/s)": 0.672081 }, { "epoch": 2.250117818431087, "grad_norm": 6.82053804397583, "learning_rate": 5.784465427629469e-05, "loss": 2.3071556091308594, "memory(GiB)": 72.85, "step": 52520, "token_acc": 0.49173553719008267, "train_speed(iter/s)": 0.672083 }, { "epoch": 2.250332033760336, "grad_norm": 3.9764978885650635, "learning_rate": 5.783800777645939e-05, "loss": 1.8915332794189452, "memory(GiB)": 72.85, "step": 52525, "token_acc": 0.5427631578947368, "train_speed(iter/s)": 0.672076 }, { "epoch": 2.250546249089585, "grad_norm": 4.849264621734619, "learning_rate": 5.783136113463125e-05, "loss": 2.223338317871094, "memory(GiB)": 72.85, "step": 52530, "token_acc": 0.5416666666666666, "train_speed(iter/s)": 0.67209 }, { "epoch": 2.2507604644188337, "grad_norm": 4.4239373207092285, "learning_rate": 5.782471435093063e-05, "loss": 2.336584281921387, "memory(GiB)": 72.85, "step": 52535, "token_acc": 0.4901315789473684, "train_speed(iter/s)": 0.672087 }, { "epoch": 2.250974679748083, "grad_norm": 4.522926330566406, "learning_rate": 5.7818067425477976e-05, "loss": 2.1103500366210937, "memory(GiB)": 72.85, "step": 52540, "token_acc": 0.5427509293680297, "train_speed(iter/s)": 0.672073 }, { "epoch": 2.2511888950773318, "grad_norm": 6.123746395111084, "learning_rate": 5.781142035839371e-05, "loss": 2.1502553939819338, "memory(GiB)": 72.85, "step": 52545, "token_acc": 0.4878048780487805, "train_speed(iter/s)": 0.672068 }, { "epoch": 2.2514031104065806, "grad_norm": 5.8464035987854, "learning_rate": 5.7804773149798216e-05, "loss": 2.3494401931762696, "memory(GiB)": 72.85, "step": 52550, "token_acc": 0.5090252707581228, "train_speed(iter/s)": 0.672053 }, { "epoch": 2.25161732573583, "grad_norm": 4.608771324157715, "learning_rate": 5.7798125799811944e-05, "loss": 2.214288330078125, "memory(GiB)": 72.85, "step": 52555, "token_acc": 0.5083612040133779, "train_speed(iter/s)": 0.672063 }, { "epoch": 2.2518315410650787, "grad_norm": 5.676033973693848, "learning_rate": 5.77914783085553e-05, "loss": 2.3050918579101562, "memory(GiB)": 72.85, "step": 52560, "token_acc": 0.5300353356890459, "train_speed(iter/s)": 0.672074 }, { "epoch": 2.2520457563943275, "grad_norm": 5.464991569519043, "learning_rate": 5.778483067614874e-05, "loss": 1.8887517929077149, "memory(GiB)": 72.85, "step": 52565, "token_acc": 0.6123348017621145, "train_speed(iter/s)": 0.672061 }, { "epoch": 2.2522599717235767, "grad_norm": 5.119439125061035, "learning_rate": 5.7778182902712644e-05, "loss": 2.168185806274414, "memory(GiB)": 72.85, "step": 52570, "token_acc": 0.5143884892086331, "train_speed(iter/s)": 0.672058 }, { "epoch": 2.2524741870528255, "grad_norm": 4.442297458648682, "learning_rate": 5.777153498836748e-05, "loss": 2.459794044494629, "memory(GiB)": 72.85, "step": 52575, "token_acc": 0.5081967213114754, "train_speed(iter/s)": 0.672053 }, { "epoch": 2.2526884023820744, "grad_norm": 5.477285385131836, "learning_rate": 5.776488693323366e-05, "loss": 2.119708251953125, "memory(GiB)": 72.85, "step": 52580, "token_acc": 0.5427350427350427, "train_speed(iter/s)": 0.672058 }, { "epoch": 2.2529026177113236, "grad_norm": 5.661139011383057, "learning_rate": 5.775823873743165e-05, "loss": 2.3969490051269533, "memory(GiB)": 72.85, "step": 52585, "token_acc": 0.46226415094339623, "train_speed(iter/s)": 0.672047 }, { "epoch": 2.2531168330405724, "grad_norm": 4.867186546325684, "learning_rate": 5.775159040108185e-05, "loss": 2.378061294555664, "memory(GiB)": 72.85, "step": 52590, "token_acc": 0.4826388888888889, "train_speed(iter/s)": 0.672046 }, { "epoch": 2.2533310483698212, "grad_norm": 4.861160755157471, "learning_rate": 5.7744941924304716e-05, "loss": 2.4243019104003904, "memory(GiB)": 72.85, "step": 52595, "token_acc": 0.5159010600706714, "train_speed(iter/s)": 0.672041 }, { "epoch": 2.2535452636990705, "grad_norm": 4.8076043128967285, "learning_rate": 5.77382933072207e-05, "loss": 2.123159408569336, "memory(GiB)": 72.85, "step": 52600, "token_acc": 0.5317725752508361, "train_speed(iter/s)": 0.672049 }, { "epoch": 2.2537594790283193, "grad_norm": 4.91534948348999, "learning_rate": 5.773164454995026e-05, "loss": 2.308019256591797, "memory(GiB)": 72.85, "step": 52605, "token_acc": 0.5077519379844961, "train_speed(iter/s)": 0.672039 }, { "epoch": 2.253973694357568, "grad_norm": 4.473939418792725, "learning_rate": 5.77249956526138e-05, "loss": 2.2185686111450194, "memory(GiB)": 72.85, "step": 52610, "token_acc": 0.5151515151515151, "train_speed(iter/s)": 0.672032 }, { "epoch": 2.2541879096868174, "grad_norm": 5.358762741088867, "learning_rate": 5.7718346615331806e-05, "loss": 2.389057922363281, "memory(GiB)": 72.85, "step": 52615, "token_acc": 0.48148148148148145, "train_speed(iter/s)": 0.672027 }, { "epoch": 2.254402125016066, "grad_norm": 6.784146785736084, "learning_rate": 5.771169743822473e-05, "loss": 1.7654289245605468, "memory(GiB)": 72.85, "step": 52620, "token_acc": 0.6022727272727273, "train_speed(iter/s)": 0.672032 }, { "epoch": 2.254616340345315, "grad_norm": 5.059337615966797, "learning_rate": 5.770504812141301e-05, "loss": 2.2983604431152345, "memory(GiB)": 72.85, "step": 52625, "token_acc": 0.4726027397260274, "train_speed(iter/s)": 0.672038 }, { "epoch": 2.2548305556745643, "grad_norm": 4.997026443481445, "learning_rate": 5.7698398665017104e-05, "loss": 2.1980430603027346, "memory(GiB)": 72.85, "step": 52630, "token_acc": 0.5408805031446541, "train_speed(iter/s)": 0.672011 }, { "epoch": 2.255044771003813, "grad_norm": 3.7772207260131836, "learning_rate": 5.7691749069157505e-05, "loss": 1.9129203796386718, "memory(GiB)": 72.85, "step": 52635, "token_acc": 0.5465116279069767, "train_speed(iter/s)": 0.67201 }, { "epoch": 2.255258986333062, "grad_norm": 3.9179787635803223, "learning_rate": 5.768509933395465e-05, "loss": 2.286896514892578, "memory(GiB)": 72.85, "step": 52640, "token_acc": 0.5165745856353591, "train_speed(iter/s)": 0.672015 }, { "epoch": 2.255473201662311, "grad_norm": 4.160131454467773, "learning_rate": 5.7678449459529015e-05, "loss": 2.221518898010254, "memory(GiB)": 72.85, "step": 52645, "token_acc": 0.540268456375839, "train_speed(iter/s)": 0.67201 }, { "epoch": 2.25568741699156, "grad_norm": 4.58536434173584, "learning_rate": 5.7671799446001075e-05, "loss": 2.179559326171875, "memory(GiB)": 72.85, "step": 52650, "token_acc": 0.5015105740181269, "train_speed(iter/s)": 0.672006 }, { "epoch": 2.2559016323208088, "grad_norm": 4.9735212326049805, "learning_rate": 5.766514929349126e-05, "loss": 2.4267349243164062, "memory(GiB)": 72.85, "step": 52655, "token_acc": 0.4927536231884058, "train_speed(iter/s)": 0.67201 }, { "epoch": 2.256115847650058, "grad_norm": 5.895115375518799, "learning_rate": 5.7658499002120104e-05, "loss": 2.4006500244140625, "memory(GiB)": 72.85, "step": 52660, "token_acc": 0.5304659498207885, "train_speed(iter/s)": 0.672021 }, { "epoch": 2.256330062979307, "grad_norm": 5.747427463531494, "learning_rate": 5.765184857200804e-05, "loss": 2.37746524810791, "memory(GiB)": 72.85, "step": 52665, "token_acc": 0.5145631067961165, "train_speed(iter/s)": 0.672028 }, { "epoch": 2.2565442783085556, "grad_norm": 5.57724142074585, "learning_rate": 5.764519800327556e-05, "loss": 2.367789459228516, "memory(GiB)": 72.85, "step": 52670, "token_acc": 0.48322147651006714, "train_speed(iter/s)": 0.672014 }, { "epoch": 2.256758493637805, "grad_norm": 5.326210021972656, "learning_rate": 5.7638547296043154e-05, "loss": 1.9303874969482422, "memory(GiB)": 72.85, "step": 52675, "token_acc": 0.5578512396694215, "train_speed(iter/s)": 0.672015 }, { "epoch": 2.2569727089670537, "grad_norm": 4.7107462882995605, "learning_rate": 5.76318964504313e-05, "loss": 2.1494068145751952, "memory(GiB)": 72.85, "step": 52680, "token_acc": 0.5138888888888888, "train_speed(iter/s)": 0.672019 }, { "epoch": 2.2571869242963025, "grad_norm": 4.825115203857422, "learning_rate": 5.7625245466560474e-05, "loss": 2.066145324707031, "memory(GiB)": 72.85, "step": 52685, "token_acc": 0.5319148936170213, "train_speed(iter/s)": 0.672013 }, { "epoch": 2.257401139625552, "grad_norm": 4.220369338989258, "learning_rate": 5.761859434455118e-05, "loss": 1.935481071472168, "memory(GiB)": 72.85, "step": 52690, "token_acc": 0.5709342560553633, "train_speed(iter/s)": 0.672008 }, { "epoch": 2.2576153549548006, "grad_norm": 4.436008453369141, "learning_rate": 5.761194308452389e-05, "loss": 2.124817657470703, "memory(GiB)": 72.85, "step": 52695, "token_acc": 0.5612903225806452, "train_speed(iter/s)": 0.672019 }, { "epoch": 2.2578295702840494, "grad_norm": 3.705713987350464, "learning_rate": 5.760529168659912e-05, "loss": 2.192523384094238, "memory(GiB)": 72.85, "step": 52700, "token_acc": 0.4924924924924925, "train_speed(iter/s)": 0.672042 }, { "epoch": 2.2580437856132987, "grad_norm": 5.983484268188477, "learning_rate": 5.759864015089735e-05, "loss": 2.0584636688232423, "memory(GiB)": 72.85, "step": 52705, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.672064 }, { "epoch": 2.2582580009425475, "grad_norm": 8.215082168579102, "learning_rate": 5.7591988477539104e-05, "loss": 2.316047477722168, "memory(GiB)": 72.85, "step": 52710, "token_acc": 0.4978165938864629, "train_speed(iter/s)": 0.672071 }, { "epoch": 2.2584722162717963, "grad_norm": 4.2845869064331055, "learning_rate": 5.758533666664485e-05, "loss": 2.129566764831543, "memory(GiB)": 72.85, "step": 52715, "token_acc": 0.5241635687732342, "train_speed(iter/s)": 0.672075 }, { "epoch": 2.2586864316010455, "grad_norm": 3.9202959537506104, "learning_rate": 5.757868471833512e-05, "loss": 2.4482706069946287, "memory(GiB)": 72.85, "step": 52720, "token_acc": 0.4502923976608187, "train_speed(iter/s)": 0.672068 }, { "epoch": 2.2589006469302944, "grad_norm": 5.2853312492370605, "learning_rate": 5.757203263273039e-05, "loss": 2.251236343383789, "memory(GiB)": 72.85, "step": 52725, "token_acc": 0.5100671140939598, "train_speed(iter/s)": 0.672067 }, { "epoch": 2.259114862259543, "grad_norm": 4.168352127075195, "learning_rate": 5.756538040995119e-05, "loss": 1.9047185897827148, "memory(GiB)": 72.85, "step": 52730, "token_acc": 0.5614035087719298, "train_speed(iter/s)": 0.672068 }, { "epoch": 2.2593290775887924, "grad_norm": 4.31953239440918, "learning_rate": 5.7558728050118036e-05, "loss": 2.2316877365112306, "memory(GiB)": 72.85, "step": 52735, "token_acc": 0.5212765957446809, "train_speed(iter/s)": 0.672073 }, { "epoch": 2.2595432929180412, "grad_norm": 3.7785680294036865, "learning_rate": 5.755207555335142e-05, "loss": 2.393620491027832, "memory(GiB)": 72.85, "step": 52740, "token_acc": 0.4882154882154882, "train_speed(iter/s)": 0.672066 }, { "epoch": 2.25975750824729, "grad_norm": 5.760395526885986, "learning_rate": 5.7545422919771874e-05, "loss": 2.4152191162109373, "memory(GiB)": 72.85, "step": 52745, "token_acc": 0.4868421052631579, "train_speed(iter/s)": 0.67207 }, { "epoch": 2.2599717235765393, "grad_norm": 6.204463005065918, "learning_rate": 5.753877014949992e-05, "loss": 2.273807144165039, "memory(GiB)": 72.85, "step": 52750, "token_acc": 0.5327868852459017, "train_speed(iter/s)": 0.672072 }, { "epoch": 2.260185938905788, "grad_norm": 6.3552327156066895, "learning_rate": 5.753211724265606e-05, "loss": 2.2631389617919924, "memory(GiB)": 72.85, "step": 52755, "token_acc": 0.5065359477124183, "train_speed(iter/s)": 0.672079 }, { "epoch": 2.260400154235037, "grad_norm": 6.416165351867676, "learning_rate": 5.7525464199360844e-05, "loss": 2.320986747741699, "memory(GiB)": 72.85, "step": 52760, "token_acc": 0.524, "train_speed(iter/s)": 0.672086 }, { "epoch": 2.260614369564286, "grad_norm": 5.216601371765137, "learning_rate": 5.751881101973479e-05, "loss": 2.4157238006591797, "memory(GiB)": 72.85, "step": 52765, "token_acc": 0.4600760456273764, "train_speed(iter/s)": 0.672086 }, { "epoch": 2.260828584893535, "grad_norm": 3.842609167098999, "learning_rate": 5.75121577038984e-05, "loss": 2.0380575180053713, "memory(GiB)": 72.85, "step": 52770, "token_acc": 0.5214285714285715, "train_speed(iter/s)": 0.672103 }, { "epoch": 2.261042800222784, "grad_norm": 4.342744827270508, "learning_rate": 5.750550425197224e-05, "loss": 2.4451026916503906, "memory(GiB)": 72.85, "step": 52775, "token_acc": 0.4954954954954955, "train_speed(iter/s)": 0.672112 }, { "epoch": 2.261257015552033, "grad_norm": 5.880329132080078, "learning_rate": 5.749885066407683e-05, "loss": 2.5666530609130858, "memory(GiB)": 72.85, "step": 52780, "token_acc": 0.4386503067484663, "train_speed(iter/s)": 0.672114 }, { "epoch": 2.261471230881282, "grad_norm": 4.309525966644287, "learning_rate": 5.74921969403327e-05, "loss": 1.9764122009277343, "memory(GiB)": 72.85, "step": 52785, "token_acc": 0.5559701492537313, "train_speed(iter/s)": 0.672112 }, { "epoch": 2.2616854462105307, "grad_norm": 3.3641014099121094, "learning_rate": 5.74855430808604e-05, "loss": 2.1173513412475584, "memory(GiB)": 72.85, "step": 52790, "token_acc": 0.51953125, "train_speed(iter/s)": 0.672112 }, { "epoch": 2.26189966153978, "grad_norm": 5.2220139503479, "learning_rate": 5.7478889085780476e-05, "loss": 2.2504756927490233, "memory(GiB)": 72.85, "step": 52795, "token_acc": 0.5342019543973942, "train_speed(iter/s)": 0.672104 }, { "epoch": 2.2621138768690288, "grad_norm": 6.4711480140686035, "learning_rate": 5.7472234955213435e-05, "loss": 2.300008773803711, "memory(GiB)": 72.85, "step": 52800, "token_acc": 0.52, "train_speed(iter/s)": 0.672105 }, { "epoch": 2.2623280921982776, "grad_norm": 4.771026134490967, "learning_rate": 5.7465580689279864e-05, "loss": 2.2942710876464845, "memory(GiB)": 72.85, "step": 52805, "token_acc": 0.4942528735632184, "train_speed(iter/s)": 0.672117 }, { "epoch": 2.262542307527527, "grad_norm": 3.5826563835144043, "learning_rate": 5.745892628810029e-05, "loss": 2.322382354736328, "memory(GiB)": 72.85, "step": 52810, "token_acc": 0.48253968253968255, "train_speed(iter/s)": 0.67211 }, { "epoch": 2.2627565228567756, "grad_norm": 5.390227794647217, "learning_rate": 5.745227175179526e-05, "loss": 2.501831817626953, "memory(GiB)": 72.85, "step": 52815, "token_acc": 0.4618055555555556, "train_speed(iter/s)": 0.672126 }, { "epoch": 2.2629707381860245, "grad_norm": 6.5493268966674805, "learning_rate": 5.744561708048536e-05, "loss": 2.647978973388672, "memory(GiB)": 72.85, "step": 52820, "token_acc": 0.43666666666666665, "train_speed(iter/s)": 0.672131 }, { "epoch": 2.2631849535152737, "grad_norm": 5.31561279296875, "learning_rate": 5.743896227429111e-05, "loss": 2.325815963745117, "memory(GiB)": 72.85, "step": 52825, "token_acc": 0.49830508474576274, "train_speed(iter/s)": 0.67214 }, { "epoch": 2.2633991688445225, "grad_norm": 4.903266429901123, "learning_rate": 5.743230733333307e-05, "loss": 2.4114364624023437, "memory(GiB)": 72.85, "step": 52830, "token_acc": 0.4876712328767123, "train_speed(iter/s)": 0.672146 }, { "epoch": 2.2636133841737713, "grad_norm": 4.384224891662598, "learning_rate": 5.7425652257731834e-05, "loss": 2.250619888305664, "memory(GiB)": 72.85, "step": 52835, "token_acc": 0.48579545454545453, "train_speed(iter/s)": 0.672137 }, { "epoch": 2.2638275995030206, "grad_norm": 5.1934614181518555, "learning_rate": 5.741899704760791e-05, "loss": 2.1252281188964846, "memory(GiB)": 72.85, "step": 52840, "token_acc": 0.5401459854014599, "train_speed(iter/s)": 0.67214 }, { "epoch": 2.2640418148322694, "grad_norm": 4.366196155548096, "learning_rate": 5.741234170308193e-05, "loss": 2.0872194290161135, "memory(GiB)": 72.85, "step": 52845, "token_acc": 0.56, "train_speed(iter/s)": 0.672137 }, { "epoch": 2.264256030161518, "grad_norm": 4.553890705108643, "learning_rate": 5.74056862242744e-05, "loss": 2.0232561111450194, "memory(GiB)": 72.85, "step": 52850, "token_acc": 0.551094890510949, "train_speed(iter/s)": 0.67214 }, { "epoch": 2.2644702454907675, "grad_norm": 4.629136562347412, "learning_rate": 5.7399030611305913e-05, "loss": 2.608780860900879, "memory(GiB)": 72.85, "step": 52855, "token_acc": 0.43934426229508194, "train_speed(iter/s)": 0.672139 }, { "epoch": 2.2646844608200163, "grad_norm": 4.375909328460693, "learning_rate": 5.739237486429707e-05, "loss": 2.6690700531005858, "memory(GiB)": 72.85, "step": 52860, "token_acc": 0.46283783783783783, "train_speed(iter/s)": 0.672134 }, { "epoch": 2.264898676149265, "grad_norm": 4.567704677581787, "learning_rate": 5.738571898336842e-05, "loss": 2.3091266632080076, "memory(GiB)": 72.85, "step": 52865, "token_acc": 0.484472049689441, "train_speed(iter/s)": 0.672136 }, { "epoch": 2.2651128914785144, "grad_norm": 4.475961685180664, "learning_rate": 5.737906296864053e-05, "loss": 1.9938861846923828, "memory(GiB)": 72.85, "step": 52870, "token_acc": 0.5258964143426295, "train_speed(iter/s)": 0.672148 }, { "epoch": 2.265327106807763, "grad_norm": 4.986307621002197, "learning_rate": 5.737240682023399e-05, "loss": 2.34361572265625, "memory(GiB)": 72.85, "step": 52875, "token_acc": 0.5016501650165016, "train_speed(iter/s)": 0.672159 }, { "epoch": 2.265541322137012, "grad_norm": 4.845949649810791, "learning_rate": 5.73657505382694e-05, "loss": 2.184628486633301, "memory(GiB)": 72.85, "step": 52880, "token_acc": 0.5355648535564853, "train_speed(iter/s)": 0.672144 }, { "epoch": 2.2657555374662612, "grad_norm": 4.265414237976074, "learning_rate": 5.735909412286731e-05, "loss": 2.323151969909668, "memory(GiB)": 72.85, "step": 52885, "token_acc": 0.5212121212121212, "train_speed(iter/s)": 0.672148 }, { "epoch": 2.26596975279551, "grad_norm": 5.091641902923584, "learning_rate": 5.735243757414833e-05, "loss": 2.2437871932983398, "memory(GiB)": 72.85, "step": 52890, "token_acc": 0.4605678233438486, "train_speed(iter/s)": 0.67214 }, { "epoch": 2.266183968124759, "grad_norm": 4.358504295349121, "learning_rate": 5.734578089223306e-05, "loss": 2.454732322692871, "memory(GiB)": 72.85, "step": 52895, "token_acc": 0.4746268656716418, "train_speed(iter/s)": 0.672159 }, { "epoch": 2.266398183454008, "grad_norm": 4.316433429718018, "learning_rate": 5.7339124077242066e-05, "loss": 1.986138916015625, "memory(GiB)": 72.85, "step": 52900, "token_acc": 0.5330882352941176, "train_speed(iter/s)": 0.672159 }, { "epoch": 2.266612398783257, "grad_norm": 4.622542858123779, "learning_rate": 5.7332467129295964e-05, "loss": 2.1909582138061525, "memory(GiB)": 72.85, "step": 52905, "token_acc": 0.5369127516778524, "train_speed(iter/s)": 0.672166 }, { "epoch": 2.2668266141125057, "grad_norm": 5.852917194366455, "learning_rate": 5.732581004851534e-05, "loss": 2.043509292602539, "memory(GiB)": 72.85, "step": 52910, "token_acc": 0.5089285714285714, "train_speed(iter/s)": 0.672168 }, { "epoch": 2.267040829441755, "grad_norm": 4.314897060394287, "learning_rate": 5.731915283502079e-05, "loss": 2.184840774536133, "memory(GiB)": 72.85, "step": 52915, "token_acc": 0.5156794425087108, "train_speed(iter/s)": 0.672154 }, { "epoch": 2.267255044771004, "grad_norm": 5.148913383483887, "learning_rate": 5.731249548893291e-05, "loss": 2.404551887512207, "memory(GiB)": 72.85, "step": 52920, "token_acc": 0.46953405017921146, "train_speed(iter/s)": 0.67216 }, { "epoch": 2.2674692601002526, "grad_norm": 5.058510780334473, "learning_rate": 5.730583801037234e-05, "loss": 2.0544435501098635, "memory(GiB)": 72.85, "step": 52925, "token_acc": 0.5418181818181819, "train_speed(iter/s)": 0.672167 }, { "epoch": 2.267683475429502, "grad_norm": 5.9580159187316895, "learning_rate": 5.729918039945963e-05, "loss": 2.1215085983276367, "memory(GiB)": 72.85, "step": 52930, "token_acc": 0.5647840531561462, "train_speed(iter/s)": 0.67217 }, { "epoch": 2.2678976907587507, "grad_norm": 5.252384185791016, "learning_rate": 5.729252265631545e-05, "loss": 2.4932523727416993, "memory(GiB)": 72.85, "step": 52935, "token_acc": 0.4983164983164983, "train_speed(iter/s)": 0.672165 }, { "epoch": 2.2681119060879995, "grad_norm": 5.104426383972168, "learning_rate": 5.728586478106037e-05, "loss": 2.206962013244629, "memory(GiB)": 72.85, "step": 52940, "token_acc": 0.5055762081784386, "train_speed(iter/s)": 0.672167 }, { "epoch": 2.2683261214172488, "grad_norm": 4.40615701675415, "learning_rate": 5.727920677381501e-05, "loss": 2.4917768478393554, "memory(GiB)": 72.85, "step": 52945, "token_acc": 0.46774193548387094, "train_speed(iter/s)": 0.672173 }, { "epoch": 2.2685403367464976, "grad_norm": 4.656018257141113, "learning_rate": 5.72725486347e-05, "loss": 2.062417411804199, "memory(GiB)": 72.85, "step": 52950, "token_acc": 0.5244755244755245, "train_speed(iter/s)": 0.672172 }, { "epoch": 2.2687545520757464, "grad_norm": 4.436285018920898, "learning_rate": 5.726589036383594e-05, "loss": 2.571396064758301, "memory(GiB)": 72.85, "step": 52955, "token_acc": 0.48917748917748916, "train_speed(iter/s)": 0.672187 }, { "epoch": 2.2689687674049956, "grad_norm": 5.224911212921143, "learning_rate": 5.725923196134345e-05, "loss": 2.6043779373168947, "memory(GiB)": 72.85, "step": 52960, "token_acc": 0.46846846846846846, "train_speed(iter/s)": 0.672155 }, { "epoch": 2.2691829827342445, "grad_norm": 4.245809078216553, "learning_rate": 5.725257342734318e-05, "loss": 2.129447364807129, "memory(GiB)": 72.85, "step": 52965, "token_acc": 0.5046439628482973, "train_speed(iter/s)": 0.672169 }, { "epoch": 2.2693971980634933, "grad_norm": 5.597237586975098, "learning_rate": 5.7245914761955744e-05, "loss": 2.1203739166259767, "memory(GiB)": 72.85, "step": 52970, "token_acc": 0.5381944444444444, "train_speed(iter/s)": 0.672189 }, { "epoch": 2.2696114133927425, "grad_norm": 5.009825229644775, "learning_rate": 5.7239255965301755e-05, "loss": 2.3421144485473633, "memory(GiB)": 72.85, "step": 52975, "token_acc": 0.5055555555555555, "train_speed(iter/s)": 0.672188 }, { "epoch": 2.2698256287219913, "grad_norm": 5.265471458435059, "learning_rate": 5.723259703750186e-05, "loss": 2.2725467681884766, "memory(GiB)": 72.85, "step": 52980, "token_acc": 0.5046439628482973, "train_speed(iter/s)": 0.672198 }, { "epoch": 2.27003984405124, "grad_norm": 4.734911918640137, "learning_rate": 5.722593797867667e-05, "loss": 2.168401336669922, "memory(GiB)": 72.85, "step": 52985, "token_acc": 0.5563380281690141, "train_speed(iter/s)": 0.672208 }, { "epoch": 2.2702540593804894, "grad_norm": 3.4674952030181885, "learning_rate": 5.721927878894685e-05, "loss": 2.2134618759155273, "memory(GiB)": 72.85, "step": 52990, "token_acc": 0.5170278637770898, "train_speed(iter/s)": 0.672226 }, { "epoch": 2.270468274709738, "grad_norm": 3.586209297180176, "learning_rate": 5.721261946843302e-05, "loss": 2.3015933990478517, "memory(GiB)": 72.85, "step": 52995, "token_acc": 0.505464480874317, "train_speed(iter/s)": 0.672229 }, { "epoch": 2.270682490038987, "grad_norm": 4.593877792358398, "learning_rate": 5.72059600172558e-05, "loss": 1.9980733871459961, "memory(GiB)": 72.85, "step": 53000, "token_acc": 0.5956521739130435, "train_speed(iter/s)": 0.672225 }, { "epoch": 2.270682490038987, "eval_loss": 2.0613927841186523, "eval_runtime": 15.1221, "eval_samples_per_second": 6.613, "eval_steps_per_second": 6.613, "eval_token_acc": 0.5013661202185792, "step": 53000 }, { "epoch": 2.2708967053682363, "grad_norm": 6.034297943115234, "learning_rate": 5.7199300435535884e-05, "loss": 2.274186134338379, "memory(GiB)": 72.85, "step": 53005, "token_acc": 0.5014836795252225, "train_speed(iter/s)": 0.672075 }, { "epoch": 2.271110920697485, "grad_norm": 3.7537152767181396, "learning_rate": 5.7192640723393874e-05, "loss": 2.607348823547363, "memory(GiB)": 72.85, "step": 53010, "token_acc": 0.4790996784565916, "train_speed(iter/s)": 0.672087 }, { "epoch": 2.271325136026734, "grad_norm": 5.656817436218262, "learning_rate": 5.7185980880950426e-05, "loss": 2.330510711669922, "memory(GiB)": 72.85, "step": 53015, "token_acc": 0.49828178694158076, "train_speed(iter/s)": 0.672081 }, { "epoch": 2.271539351355983, "grad_norm": 3.7774667739868164, "learning_rate": 5.717932090832621e-05, "loss": 2.2381866455078123, "memory(GiB)": 72.85, "step": 53020, "token_acc": 0.4796747967479675, "train_speed(iter/s)": 0.672093 }, { "epoch": 2.271753566685232, "grad_norm": 5.860706329345703, "learning_rate": 5.7172660805641855e-05, "loss": 2.1506235122680666, "memory(GiB)": 72.85, "step": 53025, "token_acc": 0.5046728971962616, "train_speed(iter/s)": 0.6721 }, { "epoch": 2.271967782014481, "grad_norm": 4.21579122543335, "learning_rate": 5.716600057301802e-05, "loss": 2.2412609100341796, "memory(GiB)": 72.85, "step": 53030, "token_acc": 0.5337837837837838, "train_speed(iter/s)": 0.672109 }, { "epoch": 2.27218199734373, "grad_norm": 6.2278056144714355, "learning_rate": 5.7159340210575355e-05, "loss": 2.426173782348633, "memory(GiB)": 72.85, "step": 53035, "token_acc": 0.5286195286195287, "train_speed(iter/s)": 0.672125 }, { "epoch": 2.272396212672979, "grad_norm": 4.25550651550293, "learning_rate": 5.715267971843453e-05, "loss": 2.3571964263916017, "memory(GiB)": 72.85, "step": 53040, "token_acc": 0.5178571428571429, "train_speed(iter/s)": 0.672135 }, { "epoch": 2.2726104280022277, "grad_norm": 4.834414958953857, "learning_rate": 5.71460190967162e-05, "loss": 2.06502685546875, "memory(GiB)": 72.85, "step": 53045, "token_acc": 0.5357142857142857, "train_speed(iter/s)": 0.672145 }, { "epoch": 2.272824643331477, "grad_norm": 5.848574161529541, "learning_rate": 5.713935834554104e-05, "loss": 1.9991889953613282, "memory(GiB)": 72.85, "step": 53050, "token_acc": 0.538135593220339, "train_speed(iter/s)": 0.672148 }, { "epoch": 2.2730388586607257, "grad_norm": 8.688735961914062, "learning_rate": 5.713269746502971e-05, "loss": 2.3672369003295897, "memory(GiB)": 72.85, "step": 53055, "token_acc": 0.479020979020979, "train_speed(iter/s)": 0.672159 }, { "epoch": 2.2732530739899746, "grad_norm": 6.900572776794434, "learning_rate": 5.712736866757967e-05, "loss": 2.381048011779785, "memory(GiB)": 72.85, "step": 53060, "token_acc": 0.48518518518518516, "train_speed(iter/s)": 0.672173 }, { "epoch": 2.273467289319224, "grad_norm": 6.025215148925781, "learning_rate": 5.7120707554567306e-05, "loss": 2.2639509201049806, "memory(GiB)": 72.85, "step": 53065, "token_acc": 0.5222672064777328, "train_speed(iter/s)": 0.672157 }, { "epoch": 2.2736815046484726, "grad_norm": 6.137883186340332, "learning_rate": 5.7114046312556636e-05, "loss": 2.637801170349121, "memory(GiB)": 72.85, "step": 53070, "token_acc": 0.45918367346938777, "train_speed(iter/s)": 0.672152 }, { "epoch": 2.2738957199777214, "grad_norm": 4.585981369018555, "learning_rate": 5.710738494166835e-05, "loss": 2.422987937927246, "memory(GiB)": 72.85, "step": 53075, "token_acc": 0.4983922829581994, "train_speed(iter/s)": 0.672141 }, { "epoch": 2.2741099353069707, "grad_norm": 4.423504829406738, "learning_rate": 5.710072344202312e-05, "loss": 2.2922487258911133, "memory(GiB)": 72.85, "step": 53080, "token_acc": 0.5165562913907285, "train_speed(iter/s)": 0.672138 }, { "epoch": 2.2743241506362195, "grad_norm": 5.117640018463135, "learning_rate": 5.7094061813741604e-05, "loss": 2.4919158935546877, "memory(GiB)": 72.85, "step": 53085, "token_acc": 0.5495867768595041, "train_speed(iter/s)": 0.672153 }, { "epoch": 2.2745383659654683, "grad_norm": 6.206614971160889, "learning_rate": 5.7087400056944526e-05, "loss": 1.6828018188476563, "memory(GiB)": 72.85, "step": 53090, "token_acc": 0.5964125560538116, "train_speed(iter/s)": 0.672159 }, { "epoch": 2.2747525812947176, "grad_norm": 5.4859724044799805, "learning_rate": 5.708073817175256e-05, "loss": 2.2029632568359374, "memory(GiB)": 72.85, "step": 53095, "token_acc": 0.5331412103746398, "train_speed(iter/s)": 0.672136 }, { "epoch": 2.2749667966239664, "grad_norm": 7.767579555511475, "learning_rate": 5.707407615828635e-05, "loss": 2.1461765289306642, "memory(GiB)": 72.85, "step": 53100, "token_acc": 0.5402298850574713, "train_speed(iter/s)": 0.672128 }, { "epoch": 2.275181011953215, "grad_norm": 4.648700714111328, "learning_rate": 5.7067414016666634e-05, "loss": 2.2586082458496093, "memory(GiB)": 72.85, "step": 53105, "token_acc": 0.5143769968051118, "train_speed(iter/s)": 0.672137 }, { "epoch": 2.2753952272824645, "grad_norm": 4.02410364151001, "learning_rate": 5.7060751747014064e-05, "loss": 2.1200551986694336, "memory(GiB)": 72.85, "step": 53110, "token_acc": 0.5531914893617021, "train_speed(iter/s)": 0.672148 }, { "epoch": 2.2756094426117133, "grad_norm": 4.403312683105469, "learning_rate": 5.705408934944936e-05, "loss": 2.203253173828125, "memory(GiB)": 72.85, "step": 53115, "token_acc": 0.52, "train_speed(iter/s)": 0.67216 }, { "epoch": 2.275823657940962, "grad_norm": 6.150509357452393, "learning_rate": 5.704742682409322e-05, "loss": 2.410432052612305, "memory(GiB)": 72.85, "step": 53120, "token_acc": 0.5058139534883721, "train_speed(iter/s)": 0.672169 }, { "epoch": 2.2760378732702113, "grad_norm": 6.722238063812256, "learning_rate": 5.704076417106632e-05, "loss": 2.009780502319336, "memory(GiB)": 72.85, "step": 53125, "token_acc": 0.5684210526315789, "train_speed(iter/s)": 0.672171 }, { "epoch": 2.27625208859946, "grad_norm": 4.607746601104736, "learning_rate": 5.703410139048938e-05, "loss": 2.0212896347045897, "memory(GiB)": 72.85, "step": 53130, "token_acc": 0.5505226480836237, "train_speed(iter/s)": 0.672165 }, { "epoch": 2.276466303928709, "grad_norm": 5.037295818328857, "learning_rate": 5.7027438482483105e-05, "loss": 2.140945053100586, "memory(GiB)": 72.85, "step": 53135, "token_acc": 0.5099337748344371, "train_speed(iter/s)": 0.672174 }, { "epoch": 2.2766805192579582, "grad_norm": 5.715693950653076, "learning_rate": 5.7020775447168174e-05, "loss": 2.2910022735595703, "memory(GiB)": 72.85, "step": 53140, "token_acc": 0.5290322580645161, "train_speed(iter/s)": 0.67218 }, { "epoch": 2.276894734587207, "grad_norm": 5.1263041496276855, "learning_rate": 5.701411228466531e-05, "loss": 2.1809162139892577, "memory(GiB)": 72.85, "step": 53145, "token_acc": 0.5436893203883495, "train_speed(iter/s)": 0.672175 }, { "epoch": 2.277108949916456, "grad_norm": 6.611518383026123, "learning_rate": 5.700744899509524e-05, "loss": 2.1061756134033205, "memory(GiB)": 72.85, "step": 53150, "token_acc": 0.4867924528301887, "train_speed(iter/s)": 0.672178 }, { "epoch": 2.277323165245705, "grad_norm": 4.177055358886719, "learning_rate": 5.700078557857862e-05, "loss": 2.3001371383666993, "memory(GiB)": 72.85, "step": 53155, "token_acc": 0.48125, "train_speed(iter/s)": 0.672182 }, { "epoch": 2.277537380574954, "grad_norm": 5.6205058097839355, "learning_rate": 5.6995454754044994e-05, "loss": 2.1164113998413088, "memory(GiB)": 72.85, "step": 53160, "token_acc": 0.5525423728813559, "train_speed(iter/s)": 0.67219 }, { "epoch": 2.2777515959042027, "grad_norm": 4.316024303436279, "learning_rate": 5.698879110932887e-05, "loss": 2.242298126220703, "memory(GiB)": 72.85, "step": 53165, "token_acc": 0.5179640718562875, "train_speed(iter/s)": 0.672196 }, { "epoch": 2.277965811233452, "grad_norm": 5.152570724487305, "learning_rate": 5.698212733800424e-05, "loss": 2.1572525024414064, "memory(GiB)": 72.85, "step": 53170, "token_acc": 0.575, "train_speed(iter/s)": 0.672205 }, { "epoch": 2.278180026562701, "grad_norm": 5.113399505615234, "learning_rate": 5.697546344019184e-05, "loss": 2.0585107803344727, "memory(GiB)": 72.85, "step": 53175, "token_acc": 0.50187265917603, "train_speed(iter/s)": 0.6722 }, { "epoch": 2.2783942418919496, "grad_norm": 7.239832401275635, "learning_rate": 5.6968799416012365e-05, "loss": 2.318782615661621, "memory(GiB)": 72.85, "step": 53180, "token_acc": 0.5018587360594795, "train_speed(iter/s)": 0.672178 }, { "epoch": 2.278608457221199, "grad_norm": 7.662545204162598, "learning_rate": 5.696213526558655e-05, "loss": 2.2912195205688475, "memory(GiB)": 72.85, "step": 53185, "token_acc": 0.5034965034965035, "train_speed(iter/s)": 0.672186 }, { "epoch": 2.2788226725504477, "grad_norm": 3.571098566055298, "learning_rate": 5.695547098903513e-05, "loss": 2.188890266418457, "memory(GiB)": 72.85, "step": 53190, "token_acc": 0.5146198830409356, "train_speed(iter/s)": 0.672202 }, { "epoch": 2.2790368878796965, "grad_norm": 4.72181510925293, "learning_rate": 5.694880658647882e-05, "loss": 2.2692169189453124, "memory(GiB)": 72.85, "step": 53195, "token_acc": 0.4790996784565916, "train_speed(iter/s)": 0.672202 }, { "epoch": 2.2792511032089458, "grad_norm": 4.506049156188965, "learning_rate": 5.694214205803836e-05, "loss": 2.002292251586914, "memory(GiB)": 72.85, "step": 53200, "token_acc": 0.5693950177935944, "train_speed(iter/s)": 0.672197 }, { "epoch": 2.2794653185381946, "grad_norm": 4.722652912139893, "learning_rate": 5.693547740383449e-05, "loss": 2.6151687622070314, "memory(GiB)": 72.85, "step": 53205, "token_acc": 0.47278911564625853, "train_speed(iter/s)": 0.672187 }, { "epoch": 2.2796795338674434, "grad_norm": 6.0190019607543945, "learning_rate": 5.692881262398794e-05, "loss": 2.2714439392089845, "memory(GiB)": 72.85, "step": 53210, "token_acc": 0.5096774193548387, "train_speed(iter/s)": 0.672199 }, { "epoch": 2.2798937491966926, "grad_norm": 5.290778636932373, "learning_rate": 5.6922147718619456e-05, "loss": 2.0431797027587892, "memory(GiB)": 72.85, "step": 53215, "token_acc": 0.5720524017467249, "train_speed(iter/s)": 0.6722 }, { "epoch": 2.2801079645259414, "grad_norm": 6.88332462310791, "learning_rate": 5.6915482687849766e-05, "loss": 2.237101364135742, "memory(GiB)": 72.85, "step": 53220, "token_acc": 0.5285714285714286, "train_speed(iter/s)": 0.672204 }, { "epoch": 2.2803221798551903, "grad_norm": 5.403898239135742, "learning_rate": 5.690881753179962e-05, "loss": 2.1118221282958984, "memory(GiB)": 72.85, "step": 53225, "token_acc": 0.5238095238095238, "train_speed(iter/s)": 0.672195 }, { "epoch": 2.2805363951844395, "grad_norm": 4.530760288238525, "learning_rate": 5.690215225058976e-05, "loss": 1.9129934310913086, "memory(GiB)": 72.85, "step": 53230, "token_acc": 0.5674418604651162, "train_speed(iter/s)": 0.672198 }, { "epoch": 2.2807506105136883, "grad_norm": 5.451463222503662, "learning_rate": 5.689548684434094e-05, "loss": 2.233775520324707, "memory(GiB)": 72.85, "step": 53235, "token_acc": 0.5095057034220533, "train_speed(iter/s)": 0.672198 }, { "epoch": 2.280964825842937, "grad_norm": 4.612137794494629, "learning_rate": 5.68888213131739e-05, "loss": 1.81630859375, "memory(GiB)": 72.85, "step": 53240, "token_acc": 0.5917602996254682, "train_speed(iter/s)": 0.672197 }, { "epoch": 2.2811790411721864, "grad_norm": 5.5300469398498535, "learning_rate": 5.6882155657209414e-05, "loss": 1.9306232452392578, "memory(GiB)": 72.85, "step": 53245, "token_acc": 0.5517241379310345, "train_speed(iter/s)": 0.672204 }, { "epoch": 2.281393256501435, "grad_norm": 3.617269277572632, "learning_rate": 5.687548987656821e-05, "loss": 2.274143600463867, "memory(GiB)": 72.85, "step": 53250, "token_acc": 0.5442622950819672, "train_speed(iter/s)": 0.672205 }, { "epoch": 2.281607471830684, "grad_norm": 3.809483766555786, "learning_rate": 5.6868823971371065e-05, "loss": 2.161701202392578, "memory(GiB)": 72.85, "step": 53255, "token_acc": 0.4811320754716981, "train_speed(iter/s)": 0.672204 }, { "epoch": 2.2818216871599333, "grad_norm": 4.218539714813232, "learning_rate": 5.6862157941738734e-05, "loss": 2.292232131958008, "memory(GiB)": 72.85, "step": 53260, "token_acc": 0.5212765957446809, "train_speed(iter/s)": 0.672219 }, { "epoch": 2.282035902489182, "grad_norm": 3.820385217666626, "learning_rate": 5.685549178779196e-05, "loss": 2.076964569091797, "memory(GiB)": 72.85, "step": 53265, "token_acc": 0.5299684542586751, "train_speed(iter/s)": 0.67222 }, { "epoch": 2.282250117818431, "grad_norm": 5.629728317260742, "learning_rate": 5.684882550965154e-05, "loss": 2.52132568359375, "memory(GiB)": 72.85, "step": 53270, "token_acc": 0.49158249158249157, "train_speed(iter/s)": 0.672205 }, { "epoch": 2.28246433314768, "grad_norm": 4.474245548248291, "learning_rate": 5.68421591074382e-05, "loss": 2.0264936447143556, "memory(GiB)": 72.85, "step": 53275, "token_acc": 0.5322033898305085, "train_speed(iter/s)": 0.672201 }, { "epoch": 2.282678548476929, "grad_norm": 5.085840702056885, "learning_rate": 5.6835492581272734e-05, "loss": 2.148261833190918, "memory(GiB)": 72.85, "step": 53280, "token_acc": 0.5306122448979592, "train_speed(iter/s)": 0.672207 }, { "epoch": 2.282892763806178, "grad_norm": 5.5186052322387695, "learning_rate": 5.682882593127592e-05, "loss": 2.2078435897827147, "memory(GiB)": 72.85, "step": 53285, "token_acc": 0.5055350553505535, "train_speed(iter/s)": 0.672217 }, { "epoch": 2.283106979135427, "grad_norm": 4.193479537963867, "learning_rate": 5.682215915756852e-05, "loss": 2.1810461044311524, "memory(GiB)": 72.85, "step": 53290, "token_acc": 0.5381818181818182, "train_speed(iter/s)": 0.672224 }, { "epoch": 2.283321194464676, "grad_norm": 4.441211700439453, "learning_rate": 5.68154922602713e-05, "loss": 2.043631744384766, "memory(GiB)": 72.85, "step": 53295, "token_acc": 0.5121107266435986, "train_speed(iter/s)": 0.672225 }, { "epoch": 2.2835354097939247, "grad_norm": 4.4164581298828125, "learning_rate": 5.6808825239505036e-05, "loss": 2.388238525390625, "memory(GiB)": 72.85, "step": 53300, "token_acc": 0.47692307692307695, "train_speed(iter/s)": 0.672229 }, { "epoch": 2.283749625123174, "grad_norm": 6.710814952850342, "learning_rate": 5.6802158095390535e-05, "loss": 2.013887405395508, "memory(GiB)": 72.85, "step": 53305, "token_acc": 0.5708955223880597, "train_speed(iter/s)": 0.672237 }, { "epoch": 2.2839638404524227, "grad_norm": 4.551794052124023, "learning_rate": 5.679549082804853e-05, "loss": 2.260176658630371, "memory(GiB)": 72.85, "step": 53310, "token_acc": 0.5, "train_speed(iter/s)": 0.672229 }, { "epoch": 2.2841780557816715, "grad_norm": 4.631664276123047, "learning_rate": 5.678882343759986e-05, "loss": 2.269866943359375, "memory(GiB)": 72.85, "step": 53315, "token_acc": 0.5296296296296297, "train_speed(iter/s)": 0.672231 }, { "epoch": 2.284392271110921, "grad_norm": 3.3730878829956055, "learning_rate": 5.678215592416528e-05, "loss": 2.120602035522461, "memory(GiB)": 72.85, "step": 53320, "token_acc": 0.5409836065573771, "train_speed(iter/s)": 0.672236 }, { "epoch": 2.2846064864401696, "grad_norm": 6.5860795974731445, "learning_rate": 5.6775488287865576e-05, "loss": 2.153787612915039, "memory(GiB)": 72.85, "step": 53325, "token_acc": 0.5517241379310345, "train_speed(iter/s)": 0.67221 }, { "epoch": 2.2848207017694184, "grad_norm": 6.591796398162842, "learning_rate": 5.6768820528821553e-05, "loss": 2.3426092147827147, "memory(GiB)": 72.85, "step": 53330, "token_acc": 0.4935064935064935, "train_speed(iter/s)": 0.672214 }, { "epoch": 2.2850349170986677, "grad_norm": 5.603426456451416, "learning_rate": 5.6762152647154e-05, "loss": 2.3278478622436523, "memory(GiB)": 72.85, "step": 53335, "token_acc": 0.516728624535316, "train_speed(iter/s)": 0.672207 }, { "epoch": 2.2852491324279165, "grad_norm": 6.831884860992432, "learning_rate": 5.675548464298369e-05, "loss": 2.0972515106201173, "memory(GiB)": 72.85, "step": 53340, "token_acc": 0.4698275862068966, "train_speed(iter/s)": 0.672212 }, { "epoch": 2.2854633477571653, "grad_norm": 5.363086700439453, "learning_rate": 5.6748816516431446e-05, "loss": 2.1570648193359374, "memory(GiB)": 72.85, "step": 53345, "token_acc": 0.5136986301369864, "train_speed(iter/s)": 0.672201 }, { "epoch": 2.2856775630864146, "grad_norm": 6.43380880355835, "learning_rate": 5.6742148267618065e-05, "loss": 2.320700263977051, "memory(GiB)": 72.85, "step": 53350, "token_acc": 0.5427350427350427, "train_speed(iter/s)": 0.672204 }, { "epoch": 2.2858917784156634, "grad_norm": 4.717216491699219, "learning_rate": 5.673547989666434e-05, "loss": 2.3234859466552735, "memory(GiB)": 72.85, "step": 53355, "token_acc": 0.5049180327868853, "train_speed(iter/s)": 0.672187 }, { "epoch": 2.286105993744912, "grad_norm": 6.058289527893066, "learning_rate": 5.6728811403691086e-05, "loss": 2.2646177291870115, "memory(GiB)": 72.85, "step": 53360, "token_acc": 0.5, "train_speed(iter/s)": 0.672177 }, { "epoch": 2.2863202090741614, "grad_norm": 6.362125396728516, "learning_rate": 5.67221427888191e-05, "loss": 2.0886411666870117, "memory(GiB)": 72.85, "step": 53365, "token_acc": 0.5789473684210527, "train_speed(iter/s)": 0.672183 }, { "epoch": 2.2865344244034103, "grad_norm": 5.6123046875, "learning_rate": 5.6715474052169184e-05, "loss": 2.2673480987548826, "memory(GiB)": 72.85, "step": 53370, "token_acc": 0.4867924528301887, "train_speed(iter/s)": 0.672185 }, { "epoch": 2.286748639732659, "grad_norm": 4.387442588806152, "learning_rate": 5.670880519386216e-05, "loss": 2.320054626464844, "memory(GiB)": 72.85, "step": 53375, "token_acc": 0.4869281045751634, "train_speed(iter/s)": 0.672184 }, { "epoch": 2.2869628550619083, "grad_norm": 4.963874816894531, "learning_rate": 5.670213621401883e-05, "loss": 2.6144939422607423, "memory(GiB)": 72.85, "step": 53380, "token_acc": 0.47416413373860183, "train_speed(iter/s)": 0.672198 }, { "epoch": 2.287177070391157, "grad_norm": 4.146560192108154, "learning_rate": 5.669546711276002e-05, "loss": 2.3797515869140624, "memory(GiB)": 72.85, "step": 53385, "token_acc": 0.5070422535211268, "train_speed(iter/s)": 0.672208 }, { "epoch": 2.287391285720406, "grad_norm": 4.698003768920898, "learning_rate": 5.6688797890206545e-05, "loss": 2.3902734756469726, "memory(GiB)": 72.85, "step": 53390, "token_acc": 0.5346534653465347, "train_speed(iter/s)": 0.67221 }, { "epoch": 2.287605501049655, "grad_norm": 5.205145835876465, "learning_rate": 5.668212854647922e-05, "loss": 2.3614110946655273, "memory(GiB)": 72.85, "step": 53395, "token_acc": 0.46788990825688076, "train_speed(iter/s)": 0.672213 }, { "epoch": 2.287819716378904, "grad_norm": 4.111866474151611, "learning_rate": 5.6675459081698866e-05, "loss": 2.214913558959961, "memory(GiB)": 72.85, "step": 53400, "token_acc": 0.506578947368421, "train_speed(iter/s)": 0.672215 }, { "epoch": 2.288033931708153, "grad_norm": 4.568741321563721, "learning_rate": 5.666878949598632e-05, "loss": 2.1338407516479494, "memory(GiB)": 72.85, "step": 53405, "token_acc": 0.5, "train_speed(iter/s)": 0.672214 }, { "epoch": 2.288248147037402, "grad_norm": 5.159274578094482, "learning_rate": 5.666211978946239e-05, "loss": 2.4395904541015625, "memory(GiB)": 72.85, "step": 53410, "token_acc": 0.5033333333333333, "train_speed(iter/s)": 0.672207 }, { "epoch": 2.288462362366651, "grad_norm": 4.777286052703857, "learning_rate": 5.66554499622479e-05, "loss": 2.3268482208251955, "memory(GiB)": 72.85, "step": 53415, "token_acc": 0.5057803468208093, "train_speed(iter/s)": 0.672196 }, { "epoch": 2.2886765776958997, "grad_norm": 4.273813724517822, "learning_rate": 5.6648780014463695e-05, "loss": 2.3550662994384766, "memory(GiB)": 72.85, "step": 53420, "token_acc": 0.5110294117647058, "train_speed(iter/s)": 0.672196 }, { "epoch": 2.288890793025149, "grad_norm": 4.157719612121582, "learning_rate": 5.6642109946230604e-05, "loss": 2.329680252075195, "memory(GiB)": 72.85, "step": 53425, "token_acc": 0.4602649006622517, "train_speed(iter/s)": 0.672213 }, { "epoch": 2.289105008354398, "grad_norm": 5.288405895233154, "learning_rate": 5.6635439757669464e-05, "loss": 2.3243955612182616, "memory(GiB)": 72.85, "step": 53430, "token_acc": 0.5363321799307958, "train_speed(iter/s)": 0.67222 }, { "epoch": 2.2893192236836466, "grad_norm": 5.020346641540527, "learning_rate": 5.662876944890112e-05, "loss": 2.3274681091308596, "memory(GiB)": 72.85, "step": 53435, "token_acc": 0.49169435215946844, "train_speed(iter/s)": 0.672228 }, { "epoch": 2.289533439012896, "grad_norm": 4.338509559631348, "learning_rate": 5.662209902004637e-05, "loss": 2.2585723876953123, "memory(GiB)": 72.85, "step": 53440, "token_acc": 0.5211267605633803, "train_speed(iter/s)": 0.672239 }, { "epoch": 2.2897476543421447, "grad_norm": 4.249255657196045, "learning_rate": 5.661542847122611e-05, "loss": 2.088334655761719, "memory(GiB)": 72.85, "step": 53445, "token_acc": 0.532258064516129, "train_speed(iter/s)": 0.672225 }, { "epoch": 2.2899618696713935, "grad_norm": 5.118987560272217, "learning_rate": 5.660875780256115e-05, "loss": 1.951318359375, "memory(GiB)": 72.85, "step": 53450, "token_acc": 0.5392491467576792, "train_speed(iter/s)": 0.672234 }, { "epoch": 2.2901760850006427, "grad_norm": 7.800014019012451, "learning_rate": 5.6602087014172334e-05, "loss": 2.1970048904418946, "memory(GiB)": 72.85, "step": 53455, "token_acc": 0.5089605734767025, "train_speed(iter/s)": 0.672235 }, { "epoch": 2.2903903003298915, "grad_norm": 3.5075769424438477, "learning_rate": 5.659541610618052e-05, "loss": 2.167653274536133, "memory(GiB)": 72.85, "step": 53460, "token_acc": 0.5382059800664452, "train_speed(iter/s)": 0.672249 }, { "epoch": 2.2906045156591404, "grad_norm": 4.297872543334961, "learning_rate": 5.658874507870657e-05, "loss": 2.2808420181274416, "memory(GiB)": 72.85, "step": 53465, "token_acc": 0.5117845117845118, "train_speed(iter/s)": 0.672246 }, { "epoch": 2.2908187309883896, "grad_norm": 4.696307182312012, "learning_rate": 5.658207393187131e-05, "loss": 2.207978630065918, "memory(GiB)": 72.85, "step": 53470, "token_acc": 0.5163398692810458, "train_speed(iter/s)": 0.672252 }, { "epoch": 2.2910329463176384, "grad_norm": 7.205292224884033, "learning_rate": 5.657540266579562e-05, "loss": 2.4747993469238283, "memory(GiB)": 72.85, "step": 53475, "token_acc": 0.462882096069869, "train_speed(iter/s)": 0.67224 }, { "epoch": 2.2912471616468872, "grad_norm": 4.596039772033691, "learning_rate": 5.656873128060033e-05, "loss": 2.2467416763305663, "memory(GiB)": 72.85, "step": 53480, "token_acc": 0.4964788732394366, "train_speed(iter/s)": 0.672245 }, { "epoch": 2.2914613769761365, "grad_norm": 4.220129489898682, "learning_rate": 5.656205977640631e-05, "loss": 2.7659107208251954, "memory(GiB)": 72.85, "step": 53485, "token_acc": 0.4620253164556962, "train_speed(iter/s)": 0.672256 }, { "epoch": 2.2916755923053853, "grad_norm": 3.7349929809570312, "learning_rate": 5.6555388153334435e-05, "loss": 2.2350866317749025, "memory(GiB)": 72.85, "step": 53490, "token_acc": 0.5203488372093024, "train_speed(iter/s)": 0.672245 }, { "epoch": 2.291889807634634, "grad_norm": 4.178821086883545, "learning_rate": 5.654871641150553e-05, "loss": 1.955167007446289, "memory(GiB)": 72.85, "step": 53495, "token_acc": 0.5859375, "train_speed(iter/s)": 0.672238 }, { "epoch": 2.2921040229638834, "grad_norm": 4.109362602233887, "learning_rate": 5.65420445510405e-05, "loss": 2.3681737899780275, "memory(GiB)": 72.85, "step": 53500, "token_acc": 0.5138461538461538, "train_speed(iter/s)": 0.672245 }, { "epoch": 2.2921040229638834, "eval_loss": 1.8265398740768433, "eval_runtime": 14.9903, "eval_samples_per_second": 6.671, "eval_steps_per_second": 6.671, "eval_token_acc": 0.5451505016722408, "step": 53500 }, { "epoch": 2.292318238293132, "grad_norm": 4.3774824142456055, "learning_rate": 5.65353725720602e-05, "loss": 1.8428430557250977, "memory(GiB)": 72.85, "step": 53505, "token_acc": 0.5548022598870056, "train_speed(iter/s)": 0.672088 }, { "epoch": 2.292532453622381, "grad_norm": 5.829895973205566, "learning_rate": 5.652870047468548e-05, "loss": 2.344593811035156, "memory(GiB)": 72.85, "step": 53510, "token_acc": 0.5080906148867314, "train_speed(iter/s)": 0.672095 }, { "epoch": 2.2927466689516303, "grad_norm": 5.324220180511475, "learning_rate": 5.652202825903724e-05, "loss": 2.1447097778320314, "memory(GiB)": 72.85, "step": 53515, "token_acc": 0.5149253731343284, "train_speed(iter/s)": 0.672084 }, { "epoch": 2.292960884280879, "grad_norm": 4.702348232269287, "learning_rate": 5.6515355925236336e-05, "loss": 2.1589645385742187, "memory(GiB)": 72.85, "step": 53520, "token_acc": 0.5416666666666666, "train_speed(iter/s)": 0.672092 }, { "epoch": 2.293175099610128, "grad_norm": 4.263436794281006, "learning_rate": 5.650868347340365e-05, "loss": 2.082602691650391, "memory(GiB)": 72.85, "step": 53525, "token_acc": 0.5444444444444444, "train_speed(iter/s)": 0.6721 }, { "epoch": 2.293389314939377, "grad_norm": 4.966761112213135, "learning_rate": 5.6502010903660044e-05, "loss": 2.440181541442871, "memory(GiB)": 72.85, "step": 53530, "token_acc": 0.4967948717948718, "train_speed(iter/s)": 0.672109 }, { "epoch": 2.293603530268626, "grad_norm": 4.224318981170654, "learning_rate": 5.6495338216126425e-05, "loss": 2.269295883178711, "memory(GiB)": 72.85, "step": 53535, "token_acc": 0.5041551246537396, "train_speed(iter/s)": 0.672106 }, { "epoch": 2.2938177455978748, "grad_norm": 6.3682756423950195, "learning_rate": 5.6488665410923646e-05, "loss": 2.2429094314575195, "memory(GiB)": 72.85, "step": 53540, "token_acc": 0.5038461538461538, "train_speed(iter/s)": 0.672091 }, { "epoch": 2.294031960927124, "grad_norm": 4.824197292327881, "learning_rate": 5.648199248817263e-05, "loss": 1.9674287796020509, "memory(GiB)": 72.85, "step": 53545, "token_acc": 0.542319749216301, "train_speed(iter/s)": 0.672097 }, { "epoch": 2.294246176256373, "grad_norm": 5.357108116149902, "learning_rate": 5.6475319447994225e-05, "loss": 2.272408676147461, "memory(GiB)": 72.85, "step": 53550, "token_acc": 0.49825783972125437, "train_speed(iter/s)": 0.672111 }, { "epoch": 2.2944603915856217, "grad_norm": 6.398941993713379, "learning_rate": 5.646864629050933e-05, "loss": 1.8670431137084962, "memory(GiB)": 72.85, "step": 53555, "token_acc": 0.5775193798449613, "train_speed(iter/s)": 0.672123 }, { "epoch": 2.294674606914871, "grad_norm": 4.9958720207214355, "learning_rate": 5.6461973015838854e-05, "loss": 2.3050024032592775, "memory(GiB)": 72.85, "step": 53560, "token_acc": 0.5359712230215827, "train_speed(iter/s)": 0.672128 }, { "epoch": 2.2948888222441197, "grad_norm": 5.828979969024658, "learning_rate": 5.645529962410367e-05, "loss": 2.303104782104492, "memory(GiB)": 72.85, "step": 53565, "token_acc": 0.5278810408921933, "train_speed(iter/s)": 0.672138 }, { "epoch": 2.2951030375733685, "grad_norm": 4.684836387634277, "learning_rate": 5.644862611542466e-05, "loss": 2.168895149230957, "memory(GiB)": 72.85, "step": 53570, "token_acc": 0.5204918032786885, "train_speed(iter/s)": 0.672127 }, { "epoch": 2.295317252902618, "grad_norm": 7.506324291229248, "learning_rate": 5.6441952489922754e-05, "loss": 2.2607162475585936, "memory(GiB)": 72.85, "step": 53575, "token_acc": 0.4889705882352941, "train_speed(iter/s)": 0.67209 }, { "epoch": 2.2955314682318666, "grad_norm": 4.587313175201416, "learning_rate": 5.643527874771885e-05, "loss": 2.2464136123657226, "memory(GiB)": 72.85, "step": 53580, "token_acc": 0.5064935064935064, "train_speed(iter/s)": 0.672088 }, { "epoch": 2.2957456835611154, "grad_norm": 4.276444911956787, "learning_rate": 5.642860488893381e-05, "loss": 2.1665395736694335, "memory(GiB)": 72.85, "step": 53585, "token_acc": 0.534965034965035, "train_speed(iter/s)": 0.672093 }, { "epoch": 2.2959598988903647, "grad_norm": 4.339902877807617, "learning_rate": 5.642193091368857e-05, "loss": 2.550292205810547, "memory(GiB)": 72.85, "step": 53590, "token_acc": 0.5171339563862928, "train_speed(iter/s)": 0.672088 }, { "epoch": 2.2961741142196135, "grad_norm": 5.112697601318359, "learning_rate": 5.641525682210404e-05, "loss": 2.225457191467285, "memory(GiB)": 72.85, "step": 53595, "token_acc": 0.52, "train_speed(iter/s)": 0.672093 }, { "epoch": 2.2963883295488623, "grad_norm": 4.65933084487915, "learning_rate": 5.6408582614301095e-05, "loss": 2.2980175018310547, "memory(GiB)": 72.85, "step": 53600, "token_acc": 0.4984025559105431, "train_speed(iter/s)": 0.672108 }, { "epoch": 2.2966025448781116, "grad_norm": 4.450873374938965, "learning_rate": 5.640190829040066e-05, "loss": 1.9720623016357421, "memory(GiB)": 72.85, "step": 53605, "token_acc": 0.5296296296296297, "train_speed(iter/s)": 0.672123 }, { "epoch": 2.2968167602073604, "grad_norm": 3.1341042518615723, "learning_rate": 5.6395233850523674e-05, "loss": 2.109443473815918, "memory(GiB)": 72.85, "step": 53610, "token_acc": 0.5576208178438662, "train_speed(iter/s)": 0.672122 }, { "epoch": 2.297030975536609, "grad_norm": 4.970597743988037, "learning_rate": 5.638855929479101e-05, "loss": 2.2864633560180665, "memory(GiB)": 72.85, "step": 53615, "token_acc": 0.5646687697160884, "train_speed(iter/s)": 0.672114 }, { "epoch": 2.2972451908658584, "grad_norm": 5.279862880706787, "learning_rate": 5.638188462332361e-05, "loss": 2.319987487792969, "memory(GiB)": 72.85, "step": 53620, "token_acc": 0.5104166666666666, "train_speed(iter/s)": 0.672103 }, { "epoch": 2.2974594061951072, "grad_norm": 4.796964168548584, "learning_rate": 5.637520983624239e-05, "loss": 2.2276166915893554, "memory(GiB)": 72.85, "step": 53625, "token_acc": 0.5348837209302325, "train_speed(iter/s)": 0.672113 }, { "epoch": 2.297673621524356, "grad_norm": 5.105240821838379, "learning_rate": 5.636853493366825e-05, "loss": 2.3225980758666993, "memory(GiB)": 72.85, "step": 53630, "token_acc": 0.5164835164835165, "train_speed(iter/s)": 0.672096 }, { "epoch": 2.2978878368536053, "grad_norm": 4.209893226623535, "learning_rate": 5.6361859915722137e-05, "loss": 2.418232536315918, "memory(GiB)": 72.85, "step": 53635, "token_acc": 0.4685714285714286, "train_speed(iter/s)": 0.672094 }, { "epoch": 2.298102052182854, "grad_norm": 4.785491466522217, "learning_rate": 5.635518478252495e-05, "loss": 2.2114303588867186, "memory(GiB)": 72.85, "step": 53640, "token_acc": 0.5087719298245614, "train_speed(iter/s)": 0.67209 }, { "epoch": 2.298316267512103, "grad_norm": 4.871374130249023, "learning_rate": 5.634850953419763e-05, "loss": 2.124833106994629, "memory(GiB)": 72.85, "step": 53645, "token_acc": 0.5268456375838926, "train_speed(iter/s)": 0.672094 }, { "epoch": 2.298530482841352, "grad_norm": 4.987782001495361, "learning_rate": 5.634183417086112e-05, "loss": 2.0953754425048827, "memory(GiB)": 72.85, "step": 53650, "token_acc": 0.5503355704697986, "train_speed(iter/s)": 0.672084 }, { "epoch": 2.298744698170601, "grad_norm": 3.918323278427124, "learning_rate": 5.633515869263631e-05, "loss": 2.2795597076416017, "memory(GiB)": 72.85, "step": 53655, "token_acc": 0.5373134328358209, "train_speed(iter/s)": 0.67208 }, { "epoch": 2.29895891349985, "grad_norm": 5.418656826019287, "learning_rate": 5.632848309964418e-05, "loss": 2.2344118118286134, "memory(GiB)": 72.85, "step": 53660, "token_acc": 0.5, "train_speed(iter/s)": 0.672096 }, { "epoch": 2.299173128829099, "grad_norm": 5.006938457489014, "learning_rate": 5.632180739200564e-05, "loss": 2.1525802612304688, "memory(GiB)": 72.85, "step": 53665, "token_acc": 0.49834983498349833, "train_speed(iter/s)": 0.672104 }, { "epoch": 2.299387344158348, "grad_norm": 4.809108734130859, "learning_rate": 5.6315131569841616e-05, "loss": 1.9646120071411133, "memory(GiB)": 72.85, "step": 53670, "token_acc": 0.5528169014084507, "train_speed(iter/s)": 0.672111 }, { "epoch": 2.2996015594875967, "grad_norm": 4.892393112182617, "learning_rate": 5.630845563327307e-05, "loss": 2.2337732315063477, "memory(GiB)": 72.85, "step": 53675, "token_acc": 0.5202952029520295, "train_speed(iter/s)": 0.672106 }, { "epoch": 2.299815774816846, "grad_norm": 6.303492546081543, "learning_rate": 5.630177958242093e-05, "loss": 2.3894418716430663, "memory(GiB)": 72.85, "step": 53680, "token_acc": 0.5184135977337111, "train_speed(iter/s)": 0.672116 }, { "epoch": 2.3000299901460948, "grad_norm": 4.519448757171631, "learning_rate": 5.629510341740614e-05, "loss": 2.246259307861328, "memory(GiB)": 72.85, "step": 53685, "token_acc": 0.5161290322580645, "train_speed(iter/s)": 0.672111 }, { "epoch": 2.3002442054753436, "grad_norm": 5.835050582885742, "learning_rate": 5.628842713834964e-05, "loss": 2.4491058349609376, "memory(GiB)": 72.85, "step": 53690, "token_acc": 0.528169014084507, "train_speed(iter/s)": 0.672099 }, { "epoch": 2.300458420804593, "grad_norm": 5.371230602264404, "learning_rate": 5.6281750745372395e-05, "loss": 1.9781244277954102, "memory(GiB)": 72.85, "step": 53695, "token_acc": 0.552901023890785, "train_speed(iter/s)": 0.672127 }, { "epoch": 2.3006726361338417, "grad_norm": 5.392425537109375, "learning_rate": 5.627507423859534e-05, "loss": 2.4397661209106447, "memory(GiB)": 72.85, "step": 53700, "token_acc": 0.49504950495049505, "train_speed(iter/s)": 0.672125 }, { "epoch": 2.3008868514630905, "grad_norm": 5.581425666809082, "learning_rate": 5.626839761813942e-05, "loss": 2.1898639678955076, "memory(GiB)": 72.85, "step": 53705, "token_acc": 0.5409252669039146, "train_speed(iter/s)": 0.672119 }, { "epoch": 2.3011010667923397, "grad_norm": 4.7238054275512695, "learning_rate": 5.626172088412562e-05, "loss": 2.384472465515137, "memory(GiB)": 72.85, "step": 53710, "token_acc": 0.4963235294117647, "train_speed(iter/s)": 0.672126 }, { "epoch": 2.3013152821215885, "grad_norm": 6.676208019256592, "learning_rate": 5.625504403667483e-05, "loss": 2.3719615936279297, "memory(GiB)": 72.85, "step": 53715, "token_acc": 0.4918032786885246, "train_speed(iter/s)": 0.672111 }, { "epoch": 2.3015294974508373, "grad_norm": 5.878507614135742, "learning_rate": 5.624836707590808e-05, "loss": 2.3123870849609376, "memory(GiB)": 72.85, "step": 53720, "token_acc": 0.49333333333333335, "train_speed(iter/s)": 0.672102 }, { "epoch": 2.3017437127800866, "grad_norm": 4.931527137756348, "learning_rate": 5.62416900019463e-05, "loss": 2.074532890319824, "memory(GiB)": 72.85, "step": 53725, "token_acc": 0.5247933884297521, "train_speed(iter/s)": 0.672104 }, { "epoch": 2.3019579281093354, "grad_norm": 4.436371803283691, "learning_rate": 5.623501281491045e-05, "loss": 2.428989791870117, "memory(GiB)": 72.85, "step": 53730, "token_acc": 0.5016181229773463, "train_speed(iter/s)": 0.672112 }, { "epoch": 2.3021721434385842, "grad_norm": 3.984332799911499, "learning_rate": 5.6228335514921494e-05, "loss": 2.2283281326293944, "memory(GiB)": 72.85, "step": 53735, "token_acc": 0.5079872204472844, "train_speed(iter/s)": 0.672108 }, { "epoch": 2.3023863587678335, "grad_norm": 4.362983703613281, "learning_rate": 5.622165810210039e-05, "loss": 2.151917266845703, "memory(GiB)": 72.85, "step": 53740, "token_acc": 0.5135135135135135, "train_speed(iter/s)": 0.672118 }, { "epoch": 2.3026005740970823, "grad_norm": 4.964563369750977, "learning_rate": 5.621498057656811e-05, "loss": 2.292166900634766, "memory(GiB)": 72.85, "step": 53745, "token_acc": 0.5031847133757962, "train_speed(iter/s)": 0.672117 }, { "epoch": 2.302814789426331, "grad_norm": 4.854985237121582, "learning_rate": 5.620830293844563e-05, "loss": 2.144892692565918, "memory(GiB)": 72.85, "step": 53750, "token_acc": 0.5238095238095238, "train_speed(iter/s)": 0.672124 }, { "epoch": 2.3030290047555804, "grad_norm": 5.459046840667725, "learning_rate": 5.6201625187853926e-05, "loss": 2.517603874206543, "memory(GiB)": 72.85, "step": 53755, "token_acc": 0.476027397260274, "train_speed(iter/s)": 0.672124 }, { "epoch": 2.303243220084829, "grad_norm": 4.716509819030762, "learning_rate": 5.6194947324913946e-05, "loss": 2.2902446746826173, "memory(GiB)": 72.85, "step": 53760, "token_acc": 0.5366666666666666, "train_speed(iter/s)": 0.672109 }, { "epoch": 2.303457435414078, "grad_norm": 5.466906547546387, "learning_rate": 5.61882693497467e-05, "loss": 2.7347177505493163, "memory(GiB)": 72.85, "step": 53765, "token_acc": 0.45806451612903226, "train_speed(iter/s)": 0.672113 }, { "epoch": 2.3036716507433272, "grad_norm": 4.631776332855225, "learning_rate": 5.6181591262473155e-05, "loss": 2.0898948669433595, "memory(GiB)": 72.85, "step": 53770, "token_acc": 0.5265151515151515, "train_speed(iter/s)": 0.672108 }, { "epoch": 2.303885866072576, "grad_norm": 5.5272345542907715, "learning_rate": 5.617491306321427e-05, "loss": 2.2210512161254883, "memory(GiB)": 72.85, "step": 53775, "token_acc": 0.5255474452554745, "train_speed(iter/s)": 0.672104 }, { "epoch": 2.304100081401825, "grad_norm": 4.330491065979004, "learning_rate": 5.616823475209105e-05, "loss": 2.1276779174804688, "memory(GiB)": 72.85, "step": 53780, "token_acc": 0.5428571428571428, "train_speed(iter/s)": 0.672095 }, { "epoch": 2.304314296731074, "grad_norm": 3.5239758491516113, "learning_rate": 5.6161556329224475e-05, "loss": 2.146082878112793, "memory(GiB)": 72.85, "step": 53785, "token_acc": 0.5221518987341772, "train_speed(iter/s)": 0.672106 }, { "epoch": 2.304528512060323, "grad_norm": 6.568770885467529, "learning_rate": 5.615487779473552e-05, "loss": 2.302983856201172, "memory(GiB)": 72.85, "step": 53790, "token_acc": 0.47530864197530864, "train_speed(iter/s)": 0.672123 }, { "epoch": 2.3047427273895718, "grad_norm": 3.95630145072937, "learning_rate": 5.614819914874518e-05, "loss": 1.9928602218627929, "memory(GiB)": 72.85, "step": 53795, "token_acc": 0.569811320754717, "train_speed(iter/s)": 0.672135 }, { "epoch": 2.304956942718821, "grad_norm": 5.570407390594482, "learning_rate": 5.614152039137444e-05, "loss": 2.4614572525024414, "memory(GiB)": 72.85, "step": 53800, "token_acc": 0.4866666666666667, "train_speed(iter/s)": 0.672138 }, { "epoch": 2.30517115804807, "grad_norm": 4.022427558898926, "learning_rate": 5.6134841522744305e-05, "loss": 2.286744499206543, "memory(GiB)": 72.85, "step": 53805, "token_acc": 0.5142857142857142, "train_speed(iter/s)": 0.67215 }, { "epoch": 2.3053853733773186, "grad_norm": 5.704780101776123, "learning_rate": 5.612816254297577e-05, "loss": 2.161146354675293, "memory(GiB)": 72.85, "step": 53810, "token_acc": 0.5372549019607843, "train_speed(iter/s)": 0.672159 }, { "epoch": 2.305599588706568, "grad_norm": 3.869417667388916, "learning_rate": 5.612148345218981e-05, "loss": 1.8260812759399414, "memory(GiB)": 72.85, "step": 53815, "token_acc": 0.562753036437247, "train_speed(iter/s)": 0.672176 }, { "epoch": 2.3058138040358167, "grad_norm": 4.309270858764648, "learning_rate": 5.611480425050744e-05, "loss": 2.1906051635742188, "memory(GiB)": 72.85, "step": 53820, "token_acc": 0.4711864406779661, "train_speed(iter/s)": 0.672167 }, { "epoch": 2.3060280193650655, "grad_norm": 4.65528678894043, "learning_rate": 5.610812493804966e-05, "loss": 2.0510818481445314, "memory(GiB)": 72.85, "step": 53825, "token_acc": 0.5207547169811321, "train_speed(iter/s)": 0.672169 }, { "epoch": 2.3062422346943148, "grad_norm": 3.6535487174987793, "learning_rate": 5.610144551493743e-05, "loss": 2.225148391723633, "memory(GiB)": 72.85, "step": 53830, "token_acc": 0.5125786163522013, "train_speed(iter/s)": 0.672167 }, { "epoch": 2.3064564500235636, "grad_norm": 5.379218578338623, "learning_rate": 5.609476598129183e-05, "loss": 2.2502067565917967, "memory(GiB)": 72.85, "step": 53835, "token_acc": 0.53125, "train_speed(iter/s)": 0.67216 }, { "epoch": 2.3066706653528124, "grad_norm": 5.616039276123047, "learning_rate": 5.6088086337233826e-05, "loss": 2.285059356689453, "memory(GiB)": 72.85, "step": 53840, "token_acc": 0.5292096219931272, "train_speed(iter/s)": 0.672156 }, { "epoch": 2.3068848806820617, "grad_norm": 4.414529323577881, "learning_rate": 5.608140658288441e-05, "loss": 2.384232521057129, "memory(GiB)": 72.85, "step": 53845, "token_acc": 0.49146757679180886, "train_speed(iter/s)": 0.67215 }, { "epoch": 2.3070990960113105, "grad_norm": 5.397253036499023, "learning_rate": 5.607472671836461e-05, "loss": 2.5117767333984373, "memory(GiB)": 72.85, "step": 53850, "token_acc": 0.47674418604651164, "train_speed(iter/s)": 0.672164 }, { "epoch": 2.3073133113405593, "grad_norm": 3.5069453716278076, "learning_rate": 5.606804674379545e-05, "loss": 2.0332046508789063, "memory(GiB)": 72.85, "step": 53855, "token_acc": 0.5273775216138329, "train_speed(iter/s)": 0.67218 }, { "epoch": 2.3075275266698085, "grad_norm": 4.910445213317871, "learning_rate": 5.606136665929792e-05, "loss": 2.347772979736328, "memory(GiB)": 72.85, "step": 53860, "token_acc": 0.48214285714285715, "train_speed(iter/s)": 0.672177 }, { "epoch": 2.3077417419990573, "grad_norm": 5.371755599975586, "learning_rate": 5.605468646499305e-05, "loss": 2.142076301574707, "memory(GiB)": 72.85, "step": 53865, "token_acc": 0.5166666666666667, "train_speed(iter/s)": 0.672171 }, { "epoch": 2.3079559573283066, "grad_norm": 5.199319362640381, "learning_rate": 5.604800616100185e-05, "loss": 2.5719987869262697, "memory(GiB)": 72.85, "step": 53870, "token_acc": 0.4280936454849498, "train_speed(iter/s)": 0.672168 }, { "epoch": 2.3081701726575554, "grad_norm": 6.030617713928223, "learning_rate": 5.604132574744535e-05, "loss": 2.4561317443847654, "memory(GiB)": 72.85, "step": 53875, "token_acc": 0.5037878787878788, "train_speed(iter/s)": 0.672181 }, { "epoch": 2.3083843879868042, "grad_norm": 5.823563098907471, "learning_rate": 5.603464522444457e-05, "loss": 2.1477285385131837, "memory(GiB)": 72.85, "step": 53880, "token_acc": 0.48221343873517786, "train_speed(iter/s)": 0.672188 }, { "epoch": 2.3085986033160535, "grad_norm": 6.629082202911377, "learning_rate": 5.602796459212054e-05, "loss": 2.178754425048828, "memory(GiB)": 72.85, "step": 53885, "token_acc": 0.4934640522875817, "train_speed(iter/s)": 0.672197 }, { "epoch": 2.3088128186453023, "grad_norm": 5.768373966217041, "learning_rate": 5.602128385059425e-05, "loss": 2.233842468261719, "memory(GiB)": 72.85, "step": 53890, "token_acc": 0.5064516129032258, "train_speed(iter/s)": 0.672189 }, { "epoch": 2.309027033974551, "grad_norm": 5.770829677581787, "learning_rate": 5.6014602999986785e-05, "loss": 2.1670434951782225, "memory(GiB)": 72.85, "step": 53895, "token_acc": 0.4962686567164179, "train_speed(iter/s)": 0.672187 }, { "epoch": 2.3092412493038004, "grad_norm": 5.775804042816162, "learning_rate": 5.600792204041913e-05, "loss": 2.242188262939453, "memory(GiB)": 72.85, "step": 53900, "token_acc": 0.49823321554770317, "train_speed(iter/s)": 0.672193 }, { "epoch": 2.309455464633049, "grad_norm": 4.814346790313721, "learning_rate": 5.600124097201232e-05, "loss": 2.129491424560547, "memory(GiB)": 72.85, "step": 53905, "token_acc": 0.5653495440729484, "train_speed(iter/s)": 0.672202 }, { "epoch": 2.309669679962298, "grad_norm": 4.466114521026611, "learning_rate": 5.599455979488741e-05, "loss": 2.1102699279785155, "memory(GiB)": 72.85, "step": 53910, "token_acc": 0.5144694533762058, "train_speed(iter/s)": 0.672208 }, { "epoch": 2.3098838952915473, "grad_norm": 4.109237194061279, "learning_rate": 5.5987878509165436e-05, "loss": 2.264803886413574, "memory(GiB)": 72.85, "step": 53915, "token_acc": 0.5307443365695793, "train_speed(iter/s)": 0.672189 }, { "epoch": 2.310098110620796, "grad_norm": 3.810060501098633, "learning_rate": 5.598119711496741e-05, "loss": 1.9191370010375977, "memory(GiB)": 72.85, "step": 53920, "token_acc": 0.5892116182572614, "train_speed(iter/s)": 0.672193 }, { "epoch": 2.310312325950045, "grad_norm": 4.57445764541626, "learning_rate": 5.5974515612414394e-05, "loss": 2.3405275344848633, "memory(GiB)": 72.85, "step": 53925, "token_acc": 0.49216300940438873, "train_speed(iter/s)": 0.672194 }, { "epoch": 2.310526541279294, "grad_norm": 6.217108249664307, "learning_rate": 5.596783400162742e-05, "loss": 2.0483211517333983, "memory(GiB)": 72.85, "step": 53930, "token_acc": 0.5361216730038023, "train_speed(iter/s)": 0.672196 }, { "epoch": 2.310740756608543, "grad_norm": 7.928668975830078, "learning_rate": 5.596115228272755e-05, "loss": 2.4362300872802733, "memory(GiB)": 72.85, "step": 53935, "token_acc": 0.4653846153846154, "train_speed(iter/s)": 0.672202 }, { "epoch": 2.3109549719377918, "grad_norm": 3.4463229179382324, "learning_rate": 5.595447045583578e-05, "loss": 2.459681510925293, "memory(GiB)": 72.85, "step": 53940, "token_acc": 0.4720670391061452, "train_speed(iter/s)": 0.672188 }, { "epoch": 2.311169187267041, "grad_norm": 5.387537002563477, "learning_rate": 5.5947788521073216e-05, "loss": 2.269053268432617, "memory(GiB)": 72.85, "step": 53945, "token_acc": 0.5378486055776892, "train_speed(iter/s)": 0.672174 }, { "epoch": 2.31138340259629, "grad_norm": 5.095790863037109, "learning_rate": 5.594110647856089e-05, "loss": 2.585567855834961, "memory(GiB)": 72.85, "step": 53950, "token_acc": 0.4819277108433735, "train_speed(iter/s)": 0.672174 }, { "epoch": 2.3115976179255386, "grad_norm": 4.2391839027404785, "learning_rate": 5.5934424328419844e-05, "loss": 2.4662960052490233, "memory(GiB)": 72.85, "step": 53955, "token_acc": 0.5092592592592593, "train_speed(iter/s)": 0.672167 }, { "epoch": 2.311811833254788, "grad_norm": 5.176455020904541, "learning_rate": 5.592774207077112e-05, "loss": 2.285870361328125, "memory(GiB)": 72.85, "step": 53960, "token_acc": 0.5309090909090909, "train_speed(iter/s)": 0.67217 }, { "epoch": 2.3120260485840367, "grad_norm": 4.20352840423584, "learning_rate": 5.59210597057358e-05, "loss": 2.1197332382202148, "memory(GiB)": 72.85, "step": 53965, "token_acc": 0.5227272727272727, "train_speed(iter/s)": 0.672168 }, { "epoch": 2.3122402639132855, "grad_norm": 5.040560722351074, "learning_rate": 5.591437723343493e-05, "loss": 1.9994129180908202, "memory(GiB)": 72.85, "step": 53970, "token_acc": 0.5404411764705882, "train_speed(iter/s)": 0.67216 }, { "epoch": 2.3124544792425348, "grad_norm": 5.207132816314697, "learning_rate": 5.590769465398955e-05, "loss": 2.463161277770996, "memory(GiB)": 72.85, "step": 53975, "token_acc": 0.4573002754820937, "train_speed(iter/s)": 0.672167 }, { "epoch": 2.3126686945717836, "grad_norm": 4.375949382781982, "learning_rate": 5.590101196752076e-05, "loss": 2.016557312011719, "memory(GiB)": 72.85, "step": 53980, "token_acc": 0.5614035087719298, "train_speed(iter/s)": 0.672176 }, { "epoch": 2.3128829099010324, "grad_norm": 5.828303337097168, "learning_rate": 5.58943291741496e-05, "loss": 2.0789157867431642, "memory(GiB)": 72.85, "step": 53985, "token_acc": 0.5133333333333333, "train_speed(iter/s)": 0.672175 }, { "epoch": 2.3130971252302817, "grad_norm": 3.9745934009552, "learning_rate": 5.588764627399713e-05, "loss": 2.558244514465332, "memory(GiB)": 72.85, "step": 53990, "token_acc": 0.5171339563862928, "train_speed(iter/s)": 0.672182 }, { "epoch": 2.3133113405595305, "grad_norm": 3.72213077545166, "learning_rate": 5.588096326718443e-05, "loss": 2.138475799560547, "memory(GiB)": 72.85, "step": 53995, "token_acc": 0.5155709342560554, "train_speed(iter/s)": 0.672174 }, { "epoch": 2.3135255558887793, "grad_norm": 3.804964303970337, "learning_rate": 5.587428015383257e-05, "loss": 2.073501205444336, "memory(GiB)": 72.85, "step": 54000, "token_acc": 0.5505226480836237, "train_speed(iter/s)": 0.672172 }, { "epoch": 2.3135255558887793, "eval_loss": 2.0343713760375977, "eval_runtime": 15.6658, "eval_samples_per_second": 6.383, "eval_steps_per_second": 6.383, "eval_token_acc": 0.49440993788819876, "step": 54000 }, { "epoch": 2.3137397712180285, "grad_norm": 4.737060070037842, "learning_rate": 5.58675969340626e-05, "loss": 2.461990547180176, "memory(GiB)": 72.85, "step": 54005, "token_acc": 0.49738675958188155, "train_speed(iter/s)": 0.672027 }, { "epoch": 2.3139539865472774, "grad_norm": 4.0340776443481445, "learning_rate": 5.5860913607995616e-05, "loss": 2.2561079025268556, "memory(GiB)": 72.85, "step": 54010, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.672027 }, { "epoch": 2.314168201876526, "grad_norm": 8.631917953491211, "learning_rate": 5.5854230175752676e-05, "loss": 2.138179588317871, "memory(GiB)": 72.85, "step": 54015, "token_acc": 0.5475285171102662, "train_speed(iter/s)": 0.672037 }, { "epoch": 2.3143824172057754, "grad_norm": 16.385026931762695, "learning_rate": 5.584754663745486e-05, "loss": 1.842770767211914, "memory(GiB)": 72.85, "step": 54020, "token_acc": 0.532319391634981, "train_speed(iter/s)": 0.67203 }, { "epoch": 2.3145966325350242, "grad_norm": 4.724950313568115, "learning_rate": 5.5840862993223264e-05, "loss": 2.371146011352539, "memory(GiB)": 72.85, "step": 54025, "token_acc": 0.49224806201550386, "train_speed(iter/s)": 0.672026 }, { "epoch": 2.314810847864273, "grad_norm": 3.8964710235595703, "learning_rate": 5.583417924317896e-05, "loss": 2.2670764923095703, "memory(GiB)": 72.85, "step": 54030, "token_acc": 0.5229357798165137, "train_speed(iter/s)": 0.672028 }, { "epoch": 2.3150250631935223, "grad_norm": 3.5723860263824463, "learning_rate": 5.582749538744302e-05, "loss": 1.8592302322387695, "memory(GiB)": 72.85, "step": 54035, "token_acc": 0.5646687697160884, "train_speed(iter/s)": 0.672038 }, { "epoch": 2.315239278522771, "grad_norm": 4.301198482513428, "learning_rate": 5.582081142613653e-05, "loss": 2.055292510986328, "memory(GiB)": 72.85, "step": 54040, "token_acc": 0.549645390070922, "train_speed(iter/s)": 0.672037 }, { "epoch": 2.31545349385202, "grad_norm": 4.290740013122559, "learning_rate": 5.581412735938059e-05, "loss": 2.130524826049805, "memory(GiB)": 72.85, "step": 54045, "token_acc": 0.5378486055776892, "train_speed(iter/s)": 0.672027 }, { "epoch": 2.315667709181269, "grad_norm": 4.178371906280518, "learning_rate": 5.580744318729626e-05, "loss": 2.5043428421020506, "memory(GiB)": 72.85, "step": 54050, "token_acc": 0.4820359281437126, "train_speed(iter/s)": 0.672028 }, { "epoch": 2.315881924510518, "grad_norm": 5.470052719116211, "learning_rate": 5.580075891000467e-05, "loss": 2.1886688232421876, "memory(GiB)": 72.85, "step": 54055, "token_acc": 0.5373134328358209, "train_speed(iter/s)": 0.672035 }, { "epoch": 2.316096139839767, "grad_norm": 4.660904884338379, "learning_rate": 5.579407452762688e-05, "loss": 2.117909812927246, "memory(GiB)": 72.85, "step": 54060, "token_acc": 0.5490196078431373, "train_speed(iter/s)": 0.672048 }, { "epoch": 2.316310355169016, "grad_norm": 5.154417991638184, "learning_rate": 5.578739004028399e-05, "loss": 2.2619400024414062, "memory(GiB)": 72.85, "step": 54065, "token_acc": 0.4984709480122324, "train_speed(iter/s)": 0.672058 }, { "epoch": 2.316524570498265, "grad_norm": 5.1314544677734375, "learning_rate": 5.5780705448097126e-05, "loss": 2.2835983276367187, "memory(GiB)": 72.85, "step": 54070, "token_acc": 0.4731182795698925, "train_speed(iter/s)": 0.672062 }, { "epoch": 2.3167387858275137, "grad_norm": 7.256547451019287, "learning_rate": 5.577402075118733e-05, "loss": 2.3772926330566406, "memory(GiB)": 72.85, "step": 54075, "token_acc": 0.5218855218855218, "train_speed(iter/s)": 0.672061 }, { "epoch": 2.316953001156763, "grad_norm": 4.793285369873047, "learning_rate": 5.576733594967575e-05, "loss": 2.019148254394531, "memory(GiB)": 72.85, "step": 54080, "token_acc": 0.538961038961039, "train_speed(iter/s)": 0.672048 }, { "epoch": 2.3171672164860118, "grad_norm": 4.842687606811523, "learning_rate": 5.5760651043683456e-05, "loss": 2.2919666290283205, "memory(GiB)": 72.85, "step": 54085, "token_acc": 0.49480968858131485, "train_speed(iter/s)": 0.672048 }, { "epoch": 2.3173814318152606, "grad_norm": 4.909099578857422, "learning_rate": 5.575396603333156e-05, "loss": 2.1977357864379883, "memory(GiB)": 72.85, "step": 54090, "token_acc": 0.47440273037542663, "train_speed(iter/s)": 0.672051 }, { "epoch": 2.31759564714451, "grad_norm": 4.562229633331299, "learning_rate": 5.57472809187412e-05, "loss": 1.9563268661499023, "memory(GiB)": 72.85, "step": 54095, "token_acc": 0.5445205479452054, "train_speed(iter/s)": 0.672042 }, { "epoch": 2.3178098624737586, "grad_norm": 5.682244300842285, "learning_rate": 5.5740595700033435e-05, "loss": 1.9987415313720702, "memory(GiB)": 72.85, "step": 54100, "token_acc": 0.540650406504065, "train_speed(iter/s)": 0.672038 }, { "epoch": 2.3180240778030075, "grad_norm": 6.44950008392334, "learning_rate": 5.573391037732939e-05, "loss": 2.4396026611328123, "memory(GiB)": 72.85, "step": 54105, "token_acc": 0.48548812664907653, "train_speed(iter/s)": 0.672041 }, { "epoch": 2.3182382931322567, "grad_norm": 3.9836623668670654, "learning_rate": 5.572722495075019e-05, "loss": 2.2556739807128907, "memory(GiB)": 72.85, "step": 54110, "token_acc": 0.5352112676056338, "train_speed(iter/s)": 0.672049 }, { "epoch": 2.3184525084615055, "grad_norm": 4.320195198059082, "learning_rate": 5.572053942041694e-05, "loss": 2.2953359603881838, "memory(GiB)": 72.85, "step": 54115, "token_acc": 0.4865771812080537, "train_speed(iter/s)": 0.672058 }, { "epoch": 2.3186667237907543, "grad_norm": 5.59600305557251, "learning_rate": 5.5713853786450745e-05, "loss": 2.241052436828613, "memory(GiB)": 72.85, "step": 54120, "token_acc": 0.4892086330935252, "train_speed(iter/s)": 0.672051 }, { "epoch": 2.3188809391200036, "grad_norm": 5.556299209594727, "learning_rate": 5.570716804897273e-05, "loss": 2.488772964477539, "memory(GiB)": 72.85, "step": 54125, "token_acc": 0.483974358974359, "train_speed(iter/s)": 0.672065 }, { "epoch": 2.3190951544492524, "grad_norm": 5.578996181488037, "learning_rate": 5.570048220810401e-05, "loss": 2.2022891998291017, "memory(GiB)": 72.85, "step": 54130, "token_acc": 0.523121387283237, "train_speed(iter/s)": 0.672056 }, { "epoch": 2.319309369778501, "grad_norm": 4.837291240692139, "learning_rate": 5.56937962639657e-05, "loss": 2.0829919815063476, "memory(GiB)": 72.85, "step": 54135, "token_acc": 0.5411764705882353, "train_speed(iter/s)": 0.672049 }, { "epoch": 2.3195235851077505, "grad_norm": 4.489792346954346, "learning_rate": 5.568711021667894e-05, "loss": 2.0403421401977537, "memory(GiB)": 72.85, "step": 54140, "token_acc": 0.5516014234875445, "train_speed(iter/s)": 0.672042 }, { "epoch": 2.3197378004369993, "grad_norm": 4.7998480796813965, "learning_rate": 5.5680424066364844e-05, "loss": 2.268019104003906, "memory(GiB)": 72.85, "step": 54145, "token_acc": 0.5092936802973977, "train_speed(iter/s)": 0.672047 }, { "epoch": 2.319952015766248, "grad_norm": 7.231449127197266, "learning_rate": 5.567373781314453e-05, "loss": 2.307688903808594, "memory(GiB)": 72.85, "step": 54150, "token_acc": 0.5054945054945055, "train_speed(iter/s)": 0.672046 }, { "epoch": 2.3201662310954974, "grad_norm": 4.607802391052246, "learning_rate": 5.566705145713914e-05, "loss": 2.4078861236572267, "memory(GiB)": 72.85, "step": 54155, "token_acc": 0.47752808988764045, "train_speed(iter/s)": 0.672049 }, { "epoch": 2.320380446424746, "grad_norm": 4.608252048492432, "learning_rate": 5.5660364998469795e-05, "loss": 2.288814735412598, "memory(GiB)": 72.85, "step": 54160, "token_acc": 0.5376712328767124, "train_speed(iter/s)": 0.672045 }, { "epoch": 2.320594661753995, "grad_norm": 4.337491989135742, "learning_rate": 5.565367843725762e-05, "loss": 2.2335489273071287, "memory(GiB)": 72.85, "step": 54165, "token_acc": 0.48284960422163586, "train_speed(iter/s)": 0.672036 }, { "epoch": 2.3208088770832442, "grad_norm": 6.539888381958008, "learning_rate": 5.5646991773623766e-05, "loss": 2.325499725341797, "memory(GiB)": 72.85, "step": 54170, "token_acc": 0.5145631067961165, "train_speed(iter/s)": 0.672026 }, { "epoch": 2.321023092412493, "grad_norm": 3.8016908168792725, "learning_rate": 5.564030500768936e-05, "loss": 2.1741348266601563, "memory(GiB)": 72.85, "step": 54175, "token_acc": 0.5206349206349207, "train_speed(iter/s)": 0.672023 }, { "epoch": 2.321237307741742, "grad_norm": 5.879700660705566, "learning_rate": 5.563361813957554e-05, "loss": 2.2668514251708984, "memory(GiB)": 72.85, "step": 54180, "token_acc": 0.5097276264591439, "train_speed(iter/s)": 0.672032 }, { "epoch": 2.321451523070991, "grad_norm": 5.717491149902344, "learning_rate": 5.562693116940344e-05, "loss": 2.2477550506591797, "memory(GiB)": 72.85, "step": 54185, "token_acc": 0.4874551971326165, "train_speed(iter/s)": 0.672027 }, { "epoch": 2.32166573840024, "grad_norm": 5.360116004943848, "learning_rate": 5.5620244097294196e-05, "loss": 2.45580997467041, "memory(GiB)": 72.85, "step": 54190, "token_acc": 0.4765957446808511, "train_speed(iter/s)": 0.672033 }, { "epoch": 2.3218799537294887, "grad_norm": 5.247392177581787, "learning_rate": 5.561355692336896e-05, "loss": 2.4300283432006835, "memory(GiB)": 72.85, "step": 54195, "token_acc": 0.4750830564784053, "train_speed(iter/s)": 0.672036 }, { "epoch": 2.322094169058738, "grad_norm": 5.042898178100586, "learning_rate": 5.560686964774888e-05, "loss": 2.4485500335693358, "memory(GiB)": 72.85, "step": 54200, "token_acc": 0.47249190938511326, "train_speed(iter/s)": 0.672043 }, { "epoch": 2.322308384387987, "grad_norm": 4.931486129760742, "learning_rate": 5.560018227055508e-05, "loss": 2.094819259643555, "memory(GiB)": 72.85, "step": 54205, "token_acc": 0.5604395604395604, "train_speed(iter/s)": 0.672038 }, { "epoch": 2.3225225997172356, "grad_norm": 5.64401912689209, "learning_rate": 5.5593494791908754e-05, "loss": 2.044663429260254, "memory(GiB)": 72.85, "step": 54210, "token_acc": 0.5374149659863946, "train_speed(iter/s)": 0.672044 }, { "epoch": 2.322736815046485, "grad_norm": 5.393980979919434, "learning_rate": 5.5586807211931005e-05, "loss": 2.399805450439453, "memory(GiB)": 72.85, "step": 54215, "token_acc": 0.5157894736842106, "train_speed(iter/s)": 0.672041 }, { "epoch": 2.3229510303757337, "grad_norm": 4.696681022644043, "learning_rate": 5.5580119530743e-05, "loss": 2.2553918838500975, "memory(GiB)": 72.85, "step": 54220, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.672042 }, { "epoch": 2.3231652457049825, "grad_norm": 5.068559169769287, "learning_rate": 5.557343174846591e-05, "loss": 2.1403425216674803, "memory(GiB)": 72.85, "step": 54225, "token_acc": 0.5472972972972973, "train_speed(iter/s)": 0.672059 }, { "epoch": 2.3233794610342318, "grad_norm": 4.9189229011535645, "learning_rate": 5.5566743865220874e-05, "loss": 2.182292938232422, "memory(GiB)": 72.85, "step": 54230, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.672067 }, { "epoch": 2.3235936763634806, "grad_norm": 5.434014320373535, "learning_rate": 5.556005588112901e-05, "loss": 2.1729822158813477, "memory(GiB)": 72.85, "step": 54235, "token_acc": 0.5335820895522388, "train_speed(iter/s)": 0.672064 }, { "epoch": 2.3238078916927294, "grad_norm": 5.58210563659668, "learning_rate": 5.555336779631156e-05, "loss": 2.2693149566650392, "memory(GiB)": 72.85, "step": 54240, "token_acc": 0.5204081632653061, "train_speed(iter/s)": 0.672075 }, { "epoch": 2.3240221070219786, "grad_norm": 5.573437213897705, "learning_rate": 5.554667961088964e-05, "loss": 2.2260673522949217, "memory(GiB)": 72.85, "step": 54245, "token_acc": 0.4840989399293286, "train_speed(iter/s)": 0.672069 }, { "epoch": 2.3242363223512275, "grad_norm": 4.036922454833984, "learning_rate": 5.5539991324984406e-05, "loss": 2.1203453063964846, "memory(GiB)": 72.85, "step": 54250, "token_acc": 0.49829351535836175, "train_speed(iter/s)": 0.672075 }, { "epoch": 2.3244505376804763, "grad_norm": 3.8476505279541016, "learning_rate": 5.553330293871704e-05, "loss": 2.1303989410400392, "memory(GiB)": 72.85, "step": 54255, "token_acc": 0.5116279069767442, "train_speed(iter/s)": 0.672078 }, { "epoch": 2.3246647530097255, "grad_norm": 5.317897796630859, "learning_rate": 5.55266144522087e-05, "loss": 2.114397430419922, "memory(GiB)": 72.85, "step": 54260, "token_acc": 0.5518867924528302, "train_speed(iter/s)": 0.672078 }, { "epoch": 2.3248789683389743, "grad_norm": 4.653224945068359, "learning_rate": 5.551992586558055e-05, "loss": 2.310411834716797, "memory(GiB)": 72.85, "step": 54265, "token_acc": 0.4916387959866221, "train_speed(iter/s)": 0.672089 }, { "epoch": 2.325093183668223, "grad_norm": 5.039278030395508, "learning_rate": 5.551323717895376e-05, "loss": 2.001775550842285, "memory(GiB)": 72.85, "step": 54270, "token_acc": 0.5451127819548872, "train_speed(iter/s)": 0.672103 }, { "epoch": 2.3253073989974724, "grad_norm": 4.858924865722656, "learning_rate": 5.5506548392449507e-05, "loss": 2.5789737701416016, "memory(GiB)": 72.85, "step": 54275, "token_acc": 0.50187265917603, "train_speed(iter/s)": 0.672106 }, { "epoch": 2.325521614326721, "grad_norm": 3.8771204948425293, "learning_rate": 5.5499859506188964e-05, "loss": 2.4427032470703125, "memory(GiB)": 72.85, "step": 54280, "token_acc": 0.4697406340057637, "train_speed(iter/s)": 0.672108 }, { "epoch": 2.32573582965597, "grad_norm": 5.550187587738037, "learning_rate": 5.5493170520293326e-05, "loss": 2.088889312744141, "memory(GiB)": 72.85, "step": 54285, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.67209 }, { "epoch": 2.3259500449852193, "grad_norm": 4.7094950675964355, "learning_rate": 5.548648143488374e-05, "loss": 2.392938804626465, "memory(GiB)": 72.85, "step": 54290, "token_acc": 0.48742138364779874, "train_speed(iter/s)": 0.672075 }, { "epoch": 2.326164260314468, "grad_norm": 5.175674915313721, "learning_rate": 5.5479792250081384e-05, "loss": 2.2195409774780273, "memory(GiB)": 72.85, "step": 54295, "token_acc": 0.5, "train_speed(iter/s)": 0.672073 }, { "epoch": 2.326378475643717, "grad_norm": 4.671180725097656, "learning_rate": 5.547310296600746e-05, "loss": 1.9042509078979493, "memory(GiB)": 72.85, "step": 54300, "token_acc": 0.572463768115942, "train_speed(iter/s)": 0.672071 }, { "epoch": 2.326592690972966, "grad_norm": 4.966187953948975, "learning_rate": 5.546641358278314e-05, "loss": 2.2893577575683595, "memory(GiB)": 72.85, "step": 54305, "token_acc": 0.5271565495207667, "train_speed(iter/s)": 0.672064 }, { "epoch": 2.326806906302215, "grad_norm": 6.552312850952148, "learning_rate": 5.545972410052961e-05, "loss": 2.167201614379883, "memory(GiB)": 72.85, "step": 54310, "token_acc": 0.5412844036697247, "train_speed(iter/s)": 0.672072 }, { "epoch": 2.327021121631464, "grad_norm": 18.56194496154785, "learning_rate": 5.545303451936806e-05, "loss": 2.204891395568848, "memory(GiB)": 72.85, "step": 54315, "token_acc": 0.5031446540880503, "train_speed(iter/s)": 0.672081 }, { "epoch": 2.327235336960713, "grad_norm": 4.833926200866699, "learning_rate": 5.5446344839419685e-05, "loss": 2.3748104095458986, "memory(GiB)": 72.85, "step": 54320, "token_acc": 0.46875, "train_speed(iter/s)": 0.672092 }, { "epoch": 2.327449552289962, "grad_norm": 7.171981334686279, "learning_rate": 5.5439655060805636e-05, "loss": 2.202268600463867, "memory(GiB)": 72.85, "step": 54325, "token_acc": 0.5617529880478087, "train_speed(iter/s)": 0.672101 }, { "epoch": 2.3276637676192107, "grad_norm": 5.995997905731201, "learning_rate": 5.543296518364716e-05, "loss": 2.068966293334961, "memory(GiB)": 72.85, "step": 54330, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.672094 }, { "epoch": 2.32787798294846, "grad_norm": 4.480483055114746, "learning_rate": 5.5426275208065403e-05, "loss": 2.360034942626953, "memory(GiB)": 72.85, "step": 54335, "token_acc": 0.4966442953020134, "train_speed(iter/s)": 0.672095 }, { "epoch": 2.3280921982777087, "grad_norm": 5.257262706756592, "learning_rate": 5.541958513418159e-05, "loss": 2.2892644882202147, "memory(GiB)": 72.85, "step": 54340, "token_acc": 0.4879725085910653, "train_speed(iter/s)": 0.672089 }, { "epoch": 2.3283064136069576, "grad_norm": 4.94626522064209, "learning_rate": 5.5412894962116904e-05, "loss": 2.079583168029785, "memory(GiB)": 72.85, "step": 54345, "token_acc": 0.5207667731629393, "train_speed(iter/s)": 0.672083 }, { "epoch": 2.328520628936207, "grad_norm": 4.669886112213135, "learning_rate": 5.540620469199255e-05, "loss": 1.980619239807129, "memory(GiB)": 72.85, "step": 54350, "token_acc": 0.5783582089552238, "train_speed(iter/s)": 0.672066 }, { "epoch": 2.3287348442654556, "grad_norm": 4.793080806732178, "learning_rate": 5.539951432392972e-05, "loss": 2.2513084411621094, "memory(GiB)": 72.85, "step": 54355, "token_acc": 0.50920245398773, "train_speed(iter/s)": 0.672074 }, { "epoch": 2.3289490595947044, "grad_norm": 4.506133079528809, "learning_rate": 5.5392823858049646e-05, "loss": 1.9900016784667969, "memory(GiB)": 72.85, "step": 54360, "token_acc": 0.5234899328859061, "train_speed(iter/s)": 0.672076 }, { "epoch": 2.3291632749239537, "grad_norm": 5.349934101104736, "learning_rate": 5.538613329447348e-05, "loss": 2.107895088195801, "memory(GiB)": 72.85, "step": 54365, "token_acc": 0.5141700404858299, "train_speed(iter/s)": 0.672081 }, { "epoch": 2.3293774902532025, "grad_norm": 5.754432678222656, "learning_rate": 5.5379442633322484e-05, "loss": 2.1013864517211913, "memory(GiB)": 72.85, "step": 54370, "token_acc": 0.5365079365079365, "train_speed(iter/s)": 0.672077 }, { "epoch": 2.3295917055824513, "grad_norm": 4.766361236572266, "learning_rate": 5.537275187471783e-05, "loss": 2.4459938049316405, "memory(GiB)": 72.85, "step": 54375, "token_acc": 0.4716981132075472, "train_speed(iter/s)": 0.672068 }, { "epoch": 2.3298059209117006, "grad_norm": 5.249317169189453, "learning_rate": 5.536606101878071e-05, "loss": 2.2514392852783205, "memory(GiB)": 72.85, "step": 54380, "token_acc": 0.5147540983606558, "train_speed(iter/s)": 0.672071 }, { "epoch": 2.3300201362409494, "grad_norm": 3.837462902069092, "learning_rate": 5.535937006563239e-05, "loss": 2.2523374557495117, "memory(GiB)": 72.85, "step": 54385, "token_acc": 0.55, "train_speed(iter/s)": 0.672074 }, { "epoch": 2.330234351570198, "grad_norm": 4.107449054718018, "learning_rate": 5.535267901539405e-05, "loss": 2.0053672790527344, "memory(GiB)": 72.85, "step": 54390, "token_acc": 0.5376344086021505, "train_speed(iter/s)": 0.672068 }, { "epoch": 2.3304485668994475, "grad_norm": 6.458109378814697, "learning_rate": 5.5345987868186897e-05, "loss": 2.1474905014038086, "memory(GiB)": 72.85, "step": 54395, "token_acc": 0.5488215488215489, "train_speed(iter/s)": 0.672068 }, { "epoch": 2.3306627822286963, "grad_norm": 4.329232215881348, "learning_rate": 5.5339296624132164e-05, "loss": 2.0331459045410156, "memory(GiB)": 72.85, "step": 54400, "token_acc": 0.5471014492753623, "train_speed(iter/s)": 0.672068 }, { "epoch": 2.330876997557945, "grad_norm": 8.755556106567383, "learning_rate": 5.533260528335107e-05, "loss": 2.1910045623779295, "memory(GiB)": 72.85, "step": 54405, "token_acc": 0.5151515151515151, "train_speed(iter/s)": 0.672052 }, { "epoch": 2.3310912128871943, "grad_norm": 5.112955093383789, "learning_rate": 5.5325913845964826e-05, "loss": 2.4357351303100585, "memory(GiB)": 72.85, "step": 54410, "token_acc": 0.45454545454545453, "train_speed(iter/s)": 0.672055 }, { "epoch": 2.331305428216443, "grad_norm": 5.518373489379883, "learning_rate": 5.5319222312094654e-05, "loss": 2.3987567901611326, "memory(GiB)": 72.85, "step": 54415, "token_acc": 0.5032894736842105, "train_speed(iter/s)": 0.672063 }, { "epoch": 2.331519643545692, "grad_norm": 3.745623826980591, "learning_rate": 5.531253068186177e-05, "loss": 2.265367126464844, "memory(GiB)": 72.85, "step": 54420, "token_acc": 0.4840764331210191, "train_speed(iter/s)": 0.672057 }, { "epoch": 2.3317338588749412, "grad_norm": 5.1187663078308105, "learning_rate": 5.530583895538742e-05, "loss": 2.215804862976074, "memory(GiB)": 72.85, "step": 54425, "token_acc": 0.5092592592592593, "train_speed(iter/s)": 0.672051 }, { "epoch": 2.33194807420419, "grad_norm": 4.77479887008667, "learning_rate": 5.529914713279283e-05, "loss": 2.34163875579834, "memory(GiB)": 72.85, "step": 54430, "token_acc": 0.5198675496688742, "train_speed(iter/s)": 0.672054 }, { "epoch": 2.332162289533439, "grad_norm": 4.580870628356934, "learning_rate": 5.529245521419921e-05, "loss": 2.5729497909545898, "memory(GiB)": 72.85, "step": 54435, "token_acc": 0.4720670391061452, "train_speed(iter/s)": 0.672051 }, { "epoch": 2.332376504862688, "grad_norm": 6.1901726722717285, "learning_rate": 5.528576319972779e-05, "loss": 2.279539680480957, "memory(GiB)": 72.85, "step": 54440, "token_acc": 0.5115384615384615, "train_speed(iter/s)": 0.672054 }, { "epoch": 2.332590720191937, "grad_norm": 4.550880432128906, "learning_rate": 5.5279071089499823e-05, "loss": 2.3295448303222654, "memory(GiB)": 72.85, "step": 54445, "token_acc": 0.5223880597014925, "train_speed(iter/s)": 0.672036 }, { "epoch": 2.3328049355211857, "grad_norm": 4.263986587524414, "learning_rate": 5.527237888363652e-05, "loss": 2.3363365173339843, "memory(GiB)": 72.85, "step": 54450, "token_acc": 0.47719298245614034, "train_speed(iter/s)": 0.672042 }, { "epoch": 2.333019150850435, "grad_norm": 4.456645488739014, "learning_rate": 5.526568658225913e-05, "loss": 2.298494338989258, "memory(GiB)": 72.85, "step": 54455, "token_acc": 0.5067114093959731, "train_speed(iter/s)": 0.672046 }, { "epoch": 2.333233366179684, "grad_norm": 5.213571071624756, "learning_rate": 5.52589941854889e-05, "loss": 2.4702117919921873, "memory(GiB)": 72.85, "step": 54460, "token_acc": 0.4788732394366197, "train_speed(iter/s)": 0.67204 }, { "epoch": 2.3334475815089326, "grad_norm": 4.276248931884766, "learning_rate": 5.525230169344705e-05, "loss": 2.2078855514526365, "memory(GiB)": 72.85, "step": 54465, "token_acc": 0.5145631067961165, "train_speed(iter/s)": 0.672015 }, { "epoch": 2.333661796838182, "grad_norm": 5.382406234741211, "learning_rate": 5.5245609106254825e-05, "loss": 2.1160022735595705, "memory(GiB)": 72.85, "step": 54470, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.672007 }, { "epoch": 2.3338760121674307, "grad_norm": 5.239155292510986, "learning_rate": 5.523891642403348e-05, "loss": 2.665493392944336, "memory(GiB)": 72.85, "step": 54475, "token_acc": 0.47289156626506024, "train_speed(iter/s)": 0.671994 }, { "epoch": 2.3340902274966795, "grad_norm": 4.088633060455322, "learning_rate": 5.5232223646904235e-05, "loss": 2.032697296142578, "memory(GiB)": 72.85, "step": 54480, "token_acc": 0.5568627450980392, "train_speed(iter/s)": 0.672006 }, { "epoch": 2.3343044428259287, "grad_norm": 4.455254077911377, "learning_rate": 5.522553077498837e-05, "loss": 2.2547847747802736, "memory(GiB)": 72.85, "step": 54485, "token_acc": 0.5084175084175084, "train_speed(iter/s)": 0.672003 }, { "epoch": 2.3345186581551776, "grad_norm": 4.497067451477051, "learning_rate": 5.5218837808407095e-05, "loss": 2.1438457489013674, "memory(GiB)": 72.85, "step": 54490, "token_acc": 0.5033783783783784, "train_speed(iter/s)": 0.672017 }, { "epoch": 2.3347328734844264, "grad_norm": 4.380869388580322, "learning_rate": 5.521214474728167e-05, "loss": 2.2708221435546876, "memory(GiB)": 72.85, "step": 54495, "token_acc": 0.5345454545454545, "train_speed(iter/s)": 0.672017 }, { "epoch": 2.3349470888136756, "grad_norm": 5.4137749671936035, "learning_rate": 5.520545159173337e-05, "loss": 2.283961868286133, "memory(GiB)": 72.85, "step": 54500, "token_acc": 0.5145985401459854, "train_speed(iter/s)": 0.67201 }, { "epoch": 2.3349470888136756, "eval_loss": 2.0469062328338623, "eval_runtime": 14.9849, "eval_samples_per_second": 6.673, "eval_steps_per_second": 6.673, "eval_token_acc": 0.4742857142857143, "step": 54500 }, { "epoch": 2.3351613041429244, "grad_norm": 5.13449764251709, "learning_rate": 5.519875834188344e-05, "loss": 2.3824424743652344, "memory(GiB)": 72.85, "step": 54505, "token_acc": 0.4777251184834123, "train_speed(iter/s)": 0.67187 }, { "epoch": 2.3353755194721733, "grad_norm": 3.92769455909729, "learning_rate": 5.51920649978531e-05, "loss": 2.268354797363281, "memory(GiB)": 72.85, "step": 54510, "token_acc": 0.5188679245283019, "train_speed(iter/s)": 0.671864 }, { "epoch": 2.3355897348014225, "grad_norm": 5.110614776611328, "learning_rate": 5.518537155976366e-05, "loss": 2.20519905090332, "memory(GiB)": 72.85, "step": 54515, "token_acc": 0.5081967213114754, "train_speed(iter/s)": 0.671854 }, { "epoch": 2.3358039501306713, "grad_norm": 4.1627960205078125, "learning_rate": 5.517867802773633e-05, "loss": 2.2167116165161134, "memory(GiB)": 72.85, "step": 54520, "token_acc": 0.5, "train_speed(iter/s)": 0.671846 }, { "epoch": 2.33601816545992, "grad_norm": 4.642875671386719, "learning_rate": 5.5171984401892396e-05, "loss": 2.323113441467285, "memory(GiB)": 72.85, "step": 54525, "token_acc": 0.5244755244755245, "train_speed(iter/s)": 0.671856 }, { "epoch": 2.3362323807891694, "grad_norm": 7.607846736907959, "learning_rate": 5.51652906823531e-05, "loss": 2.3239479064941406, "memory(GiB)": 72.85, "step": 54530, "token_acc": 0.4831804281345566, "train_speed(iter/s)": 0.671868 }, { "epoch": 2.336446596118418, "grad_norm": 5.629887580871582, "learning_rate": 5.515859686923973e-05, "loss": 2.1590560913085937, "memory(GiB)": 72.85, "step": 54535, "token_acc": 0.5138461538461538, "train_speed(iter/s)": 0.671862 }, { "epoch": 2.336660811447667, "grad_norm": 5.269172668457031, "learning_rate": 5.515190296267354e-05, "loss": 2.3610801696777344, "memory(GiB)": 72.85, "step": 54540, "token_acc": 0.49140893470790376, "train_speed(iter/s)": 0.671864 }, { "epoch": 2.3368750267769163, "grad_norm": 5.185547351837158, "learning_rate": 5.5145208962775795e-05, "loss": 2.4502985000610353, "memory(GiB)": 72.85, "step": 54545, "token_acc": 0.4778156996587031, "train_speed(iter/s)": 0.671866 }, { "epoch": 2.337089242106165, "grad_norm": 6.420220851898193, "learning_rate": 5.513851486966777e-05, "loss": 2.1249237060546875, "memory(GiB)": 72.85, "step": 54550, "token_acc": 0.49096385542168675, "train_speed(iter/s)": 0.67186 }, { "epoch": 2.337303457435414, "grad_norm": 5.818902492523193, "learning_rate": 5.513182068347072e-05, "loss": 2.4094770431518553, "memory(GiB)": 72.85, "step": 54555, "token_acc": 0.47540983606557374, "train_speed(iter/s)": 0.671847 }, { "epoch": 2.337517672764663, "grad_norm": 3.9097378253936768, "learning_rate": 5.512512640430592e-05, "loss": 2.332346725463867, "memory(GiB)": 72.85, "step": 54560, "token_acc": 0.5357142857142857, "train_speed(iter/s)": 0.67186 }, { "epoch": 2.337731888093912, "grad_norm": 4.861849784851074, "learning_rate": 5.511843203229464e-05, "loss": 2.2707271575927734, "memory(GiB)": 72.85, "step": 54565, "token_acc": 0.5067114093959731, "train_speed(iter/s)": 0.671851 }, { "epoch": 2.337946103423161, "grad_norm": 4.445791244506836, "learning_rate": 5.511173756755818e-05, "loss": 2.191531181335449, "memory(GiB)": 72.85, "step": 54570, "token_acc": 0.554140127388535, "train_speed(iter/s)": 0.671859 }, { "epoch": 2.33816031875241, "grad_norm": 4.371007442474365, "learning_rate": 5.510504301021779e-05, "loss": 2.089335632324219, "memory(GiB)": 72.85, "step": 54575, "token_acc": 0.5179856115107914, "train_speed(iter/s)": 0.671863 }, { "epoch": 2.338374534081659, "grad_norm": 5.487183094024658, "learning_rate": 5.509834836039476e-05, "loss": 2.144367218017578, "memory(GiB)": 72.85, "step": 54580, "token_acc": 0.532520325203252, "train_speed(iter/s)": 0.671875 }, { "epoch": 2.3385887494109077, "grad_norm": 6.68305778503418, "learning_rate": 5.509165361821036e-05, "loss": 2.1622695922851562, "memory(GiB)": 72.85, "step": 54585, "token_acc": 0.5102880658436214, "train_speed(iter/s)": 0.671888 }, { "epoch": 2.338802964740157, "grad_norm": 4.299976825714111, "learning_rate": 5.508495878378589e-05, "loss": 1.9914016723632812, "memory(GiB)": 72.85, "step": 54590, "token_acc": 0.5531914893617021, "train_speed(iter/s)": 0.671891 }, { "epoch": 2.3390171800694057, "grad_norm": 5.394361972808838, "learning_rate": 5.5078263857242604e-05, "loss": 2.121824264526367, "memory(GiB)": 72.85, "step": 54595, "token_acc": 0.5513307984790875, "train_speed(iter/s)": 0.671877 }, { "epoch": 2.3392313953986545, "grad_norm": 5.290164947509766, "learning_rate": 5.5071568838701825e-05, "loss": 2.149473190307617, "memory(GiB)": 72.85, "step": 54600, "token_acc": 0.4908424908424908, "train_speed(iter/s)": 0.671883 }, { "epoch": 2.339445610727904, "grad_norm": 4.351294040679932, "learning_rate": 5.5064873728284804e-05, "loss": 1.966853141784668, "memory(GiB)": 72.85, "step": 54605, "token_acc": 0.5165289256198347, "train_speed(iter/s)": 0.671884 }, { "epoch": 2.3396598260571526, "grad_norm": 4.19543981552124, "learning_rate": 5.505817852611286e-05, "loss": 2.0454376220703123, "memory(GiB)": 72.85, "step": 54610, "token_acc": 0.5457875457875457, "train_speed(iter/s)": 0.671878 }, { "epoch": 2.3398740413864014, "grad_norm": 4.067371368408203, "learning_rate": 5.505148323230724e-05, "loss": 2.1910648345947266, "memory(GiB)": 72.85, "step": 54615, "token_acc": 0.55625, "train_speed(iter/s)": 0.671886 }, { "epoch": 2.3400882567156507, "grad_norm": 4.809296607971191, "learning_rate": 5.50447878469893e-05, "loss": 1.9877447128295898, "memory(GiB)": 72.85, "step": 54620, "token_acc": 0.5740072202166066, "train_speed(iter/s)": 0.671887 }, { "epoch": 2.3403024720448995, "grad_norm": 5.035502910614014, "learning_rate": 5.503809237028026e-05, "loss": 2.0828680038452148, "memory(GiB)": 72.85, "step": 54625, "token_acc": 0.5245901639344263, "train_speed(iter/s)": 0.671886 }, { "epoch": 2.3405166873741483, "grad_norm": 4.244236946105957, "learning_rate": 5.503139680230147e-05, "loss": 2.1785654067993163, "memory(GiB)": 72.85, "step": 54630, "token_acc": 0.5196078431372549, "train_speed(iter/s)": 0.671886 }, { "epoch": 2.3407309027033976, "grad_norm": 7.45162296295166, "learning_rate": 5.502470114317422e-05, "loss": 2.3953981399536133, "memory(GiB)": 72.85, "step": 54635, "token_acc": 0.49230769230769234, "train_speed(iter/s)": 0.671898 }, { "epoch": 2.3409451180326464, "grad_norm": 5.084190845489502, "learning_rate": 5.501800539301976e-05, "loss": 2.336091995239258, "memory(GiB)": 72.85, "step": 54640, "token_acc": 0.5153846153846153, "train_speed(iter/s)": 0.671905 }, { "epoch": 2.341159333361895, "grad_norm": 5.105315685272217, "learning_rate": 5.501130955195944e-05, "loss": 2.091069984436035, "memory(GiB)": 72.85, "step": 54645, "token_acc": 0.5563139931740614, "train_speed(iter/s)": 0.671903 }, { "epoch": 2.3413735486911444, "grad_norm": 6.231334209442139, "learning_rate": 5.5004613620114556e-05, "loss": 2.0442638397216797, "memory(GiB)": 72.85, "step": 54650, "token_acc": 0.5096153846153846, "train_speed(iter/s)": 0.671924 }, { "epoch": 2.3415877640203933, "grad_norm": 5.499350070953369, "learning_rate": 5.4997917597606394e-05, "loss": 2.085149574279785, "memory(GiB)": 72.85, "step": 54655, "token_acc": 0.5015479876160991, "train_speed(iter/s)": 0.671936 }, { "epoch": 2.341801979349642, "grad_norm": 6.0076985359191895, "learning_rate": 5.4991221484556264e-05, "loss": 2.4922918319702148, "memory(GiB)": 72.85, "step": 54660, "token_acc": 0.46179401993355484, "train_speed(iter/s)": 0.671948 }, { "epoch": 2.3420161946788913, "grad_norm": 4.680881500244141, "learning_rate": 5.498452528108549e-05, "loss": 2.4306583404541016, "memory(GiB)": 72.85, "step": 54665, "token_acc": 0.5234657039711191, "train_speed(iter/s)": 0.671968 }, { "epoch": 2.34223041000814, "grad_norm": 5.025435924530029, "learning_rate": 5.497782898731535e-05, "loss": 2.3554555892944338, "memory(GiB)": 72.85, "step": 54670, "token_acc": 0.5109489051094891, "train_speed(iter/s)": 0.671978 }, { "epoch": 2.342444625337389, "grad_norm": 5.081656455993652, "learning_rate": 5.497113260336717e-05, "loss": 2.3665868759155275, "memory(GiB)": 72.85, "step": 54675, "token_acc": 0.48338368580060426, "train_speed(iter/s)": 0.671967 }, { "epoch": 2.342658840666638, "grad_norm": 4.745852947235107, "learning_rate": 5.4964436129362264e-05, "loss": 2.1985769271850586, "memory(GiB)": 72.85, "step": 54680, "token_acc": 0.5138339920948617, "train_speed(iter/s)": 0.671985 }, { "epoch": 2.342873055995887, "grad_norm": 6.445450305938721, "learning_rate": 5.495773956542193e-05, "loss": 2.5250665664672853, "memory(GiB)": 72.85, "step": 54685, "token_acc": 0.5097276264591439, "train_speed(iter/s)": 0.671996 }, { "epoch": 2.343087271325136, "grad_norm": 4.542158603668213, "learning_rate": 5.495104291166751e-05, "loss": 2.069156265258789, "memory(GiB)": 72.85, "step": 54690, "token_acc": 0.5348837209302325, "train_speed(iter/s)": 0.671994 }, { "epoch": 2.343301486654385, "grad_norm": 5.792975902557373, "learning_rate": 5.49443461682203e-05, "loss": 2.290990447998047, "memory(GiB)": 72.85, "step": 54695, "token_acc": 0.5278688524590164, "train_speed(iter/s)": 0.671998 }, { "epoch": 2.343515701983634, "grad_norm": 5.276786804199219, "learning_rate": 5.49376493352016e-05, "loss": 2.5887758255004885, "memory(GiB)": 72.85, "step": 54700, "token_acc": 0.4563953488372093, "train_speed(iter/s)": 0.672 }, { "epoch": 2.3437299173128827, "grad_norm": 6.739496231079102, "learning_rate": 5.493095241273277e-05, "loss": 2.535645294189453, "memory(GiB)": 72.85, "step": 54705, "token_acc": 0.5017421602787456, "train_speed(iter/s)": 0.67198 }, { "epoch": 2.343944132642132, "grad_norm": 6.114837169647217, "learning_rate": 5.49242554009351e-05, "loss": 2.405266761779785, "memory(GiB)": 72.85, "step": 54710, "token_acc": 0.47750865051903113, "train_speed(iter/s)": 0.671976 }, { "epoch": 2.344158347971381, "grad_norm": 5.139735221862793, "learning_rate": 5.491755829992993e-05, "loss": 2.5532867431640627, "memory(GiB)": 72.85, "step": 54715, "token_acc": 0.4875444839857651, "train_speed(iter/s)": 0.671983 }, { "epoch": 2.34437256330063, "grad_norm": 4.910082817077637, "learning_rate": 5.491086110983859e-05, "loss": 2.433390426635742, "memory(GiB)": 72.85, "step": 54720, "token_acc": 0.4950166112956811, "train_speed(iter/s)": 0.671989 }, { "epoch": 2.344586778629879, "grad_norm": 5.266520977020264, "learning_rate": 5.490416383078238e-05, "loss": 2.0047527313232423, "memory(GiB)": 72.85, "step": 54725, "token_acc": 0.5527272727272727, "train_speed(iter/s)": 0.671996 }, { "epoch": 2.3448009939591277, "grad_norm": 4.938716888427734, "learning_rate": 5.489746646288264e-05, "loss": 2.050453758239746, "memory(GiB)": 72.85, "step": 54730, "token_acc": 0.5766129032258065, "train_speed(iter/s)": 0.672004 }, { "epoch": 2.345015209288377, "grad_norm": 4.548603057861328, "learning_rate": 5.4890769006260713e-05, "loss": 2.0320858001708983, "memory(GiB)": 72.85, "step": 54735, "token_acc": 0.5562913907284768, "train_speed(iter/s)": 0.672005 }, { "epoch": 2.3452294246176257, "grad_norm": 4.457573413848877, "learning_rate": 5.48840714610379e-05, "loss": 2.145737075805664, "memory(GiB)": 72.85, "step": 54740, "token_acc": 0.5071942446043165, "train_speed(iter/s)": 0.672017 }, { "epoch": 2.3454436399468745, "grad_norm": 7.291011333465576, "learning_rate": 5.4877373827335566e-05, "loss": 2.509490966796875, "memory(GiB)": 72.85, "step": 54745, "token_acc": 0.5175097276264592, "train_speed(iter/s)": 0.672024 }, { "epoch": 2.345657855276124, "grad_norm": 4.466257095336914, "learning_rate": 5.487067610527502e-05, "loss": 2.335959053039551, "memory(GiB)": 72.85, "step": 54750, "token_acc": 0.5276752767527675, "train_speed(iter/s)": 0.672019 }, { "epoch": 2.3458720706053726, "grad_norm": 6.507198810577393, "learning_rate": 5.48639782949776e-05, "loss": 2.171578216552734, "memory(GiB)": 72.85, "step": 54755, "token_acc": 0.5791139240506329, "train_speed(iter/s)": 0.67202 }, { "epoch": 2.3460862859346214, "grad_norm": 5.038837432861328, "learning_rate": 5.485728039656467e-05, "loss": 2.5915992736816404, "memory(GiB)": 72.85, "step": 54760, "token_acc": 0.48231511254019294, "train_speed(iter/s)": 0.672011 }, { "epoch": 2.3463005012638707, "grad_norm": 5.5085978507995605, "learning_rate": 5.485058241015755e-05, "loss": 2.3550642013549803, "memory(GiB)": 72.85, "step": 54765, "token_acc": 0.4788273615635179, "train_speed(iter/s)": 0.672003 }, { "epoch": 2.3465147165931195, "grad_norm": 5.081210613250732, "learning_rate": 5.484388433587756e-05, "loss": 2.249683380126953, "memory(GiB)": 72.85, "step": 54770, "token_acc": 0.5389830508474577, "train_speed(iter/s)": 0.672017 }, { "epoch": 2.3467289319223683, "grad_norm": 4.708942890167236, "learning_rate": 5.483718617384608e-05, "loss": 2.287493133544922, "memory(GiB)": 72.85, "step": 54775, "token_acc": 0.4696969696969697, "train_speed(iter/s)": 0.672018 }, { "epoch": 2.3469431472516176, "grad_norm": 4.644040584564209, "learning_rate": 5.4830487924184436e-05, "loss": 2.1341365814208983, "memory(GiB)": 72.85, "step": 54780, "token_acc": 0.5471014492753623, "train_speed(iter/s)": 0.672035 }, { "epoch": 2.3471573625808664, "grad_norm": 6.170173168182373, "learning_rate": 5.482378958701395e-05, "loss": 2.130838394165039, "memory(GiB)": 72.85, "step": 54785, "token_acc": 0.5971563981042654, "train_speed(iter/s)": 0.672023 }, { "epoch": 2.347371577910115, "grad_norm": 5.569472789764404, "learning_rate": 5.4817091162456e-05, "loss": 2.25378532409668, "memory(GiB)": 72.85, "step": 54790, "token_acc": 0.5406360424028268, "train_speed(iter/s)": 0.672026 }, { "epoch": 2.3475857932393644, "grad_norm": 5.183731555938721, "learning_rate": 5.481039265063194e-05, "loss": 2.2133344650268554, "memory(GiB)": 72.85, "step": 54795, "token_acc": 0.5720720720720721, "train_speed(iter/s)": 0.672029 }, { "epoch": 2.3478000085686133, "grad_norm": 5.563267230987549, "learning_rate": 5.480369405166309e-05, "loss": 2.3163780212402343, "memory(GiB)": 72.85, "step": 54800, "token_acc": 0.5171232876712328, "train_speed(iter/s)": 0.672034 }, { "epoch": 2.348014223897862, "grad_norm": 4.145936489105225, "learning_rate": 5.479699536567082e-05, "loss": 2.2108739852905273, "memory(GiB)": 72.85, "step": 54805, "token_acc": 0.49586776859504134, "train_speed(iter/s)": 0.672041 }, { "epoch": 2.3482284392271113, "grad_norm": 7.458123207092285, "learning_rate": 5.479029659277648e-05, "loss": 2.119597625732422, "memory(GiB)": 72.85, "step": 54810, "token_acc": 0.5120274914089347, "train_speed(iter/s)": 0.672043 }, { "epoch": 2.34844265455636, "grad_norm": 4.702911853790283, "learning_rate": 5.478359773310142e-05, "loss": 2.304665374755859, "memory(GiB)": 72.85, "step": 54815, "token_acc": 0.518796992481203, "train_speed(iter/s)": 0.672046 }, { "epoch": 2.348656869885609, "grad_norm": 6.330049514770508, "learning_rate": 5.477689878676701e-05, "loss": 2.113249588012695, "memory(GiB)": 72.85, "step": 54820, "token_acc": 0.5242290748898678, "train_speed(iter/s)": 0.672054 }, { "epoch": 2.348871085214858, "grad_norm": 4.0495758056640625, "learning_rate": 5.477019975389458e-05, "loss": 2.3250303268432617, "memory(GiB)": 72.85, "step": 54825, "token_acc": 0.49393939393939396, "train_speed(iter/s)": 0.672045 }, { "epoch": 2.349085300544107, "grad_norm": 4.500280857086182, "learning_rate": 5.476350063460551e-05, "loss": 2.3928401947021483, "memory(GiB)": 72.85, "step": 54830, "token_acc": 0.4758842443729904, "train_speed(iter/s)": 0.67205 }, { "epoch": 2.349299515873356, "grad_norm": 7.706442356109619, "learning_rate": 5.4756801429021165e-05, "loss": 2.1906402587890623, "memory(GiB)": 72.85, "step": 54835, "token_acc": 0.5141843971631206, "train_speed(iter/s)": 0.672045 }, { "epoch": 2.349513731202605, "grad_norm": 4.620948314666748, "learning_rate": 5.4750102137262904e-05, "loss": 2.1869863510131835, "memory(GiB)": 72.85, "step": 54840, "token_acc": 0.5178571428571429, "train_speed(iter/s)": 0.672035 }, { "epoch": 2.349727946531854, "grad_norm": 4.102917194366455, "learning_rate": 5.474340275945208e-05, "loss": 2.3341156005859376, "memory(GiB)": 72.85, "step": 54845, "token_acc": 0.48231511254019294, "train_speed(iter/s)": 0.672049 }, { "epoch": 2.3499421618611027, "grad_norm": 4.5956878662109375, "learning_rate": 5.4736703295710066e-05, "loss": 2.155588722229004, "memory(GiB)": 72.85, "step": 54850, "token_acc": 0.6053639846743295, "train_speed(iter/s)": 0.672062 }, { "epoch": 2.350156377190352, "grad_norm": 4.434629917144775, "learning_rate": 5.473000374615822e-05, "loss": 2.3529523849487304, "memory(GiB)": 72.85, "step": 54855, "token_acc": 0.5133333333333333, "train_speed(iter/s)": 0.672046 }, { "epoch": 2.350370592519601, "grad_norm": 4.908963680267334, "learning_rate": 5.472330411091794e-05, "loss": 2.312088394165039, "memory(GiB)": 72.85, "step": 54860, "token_acc": 0.4983164983164983, "train_speed(iter/s)": 0.672045 }, { "epoch": 2.3505848078488496, "grad_norm": 4.6078267097473145, "learning_rate": 5.4716604390110546e-05, "loss": 2.0699464797973635, "memory(GiB)": 72.85, "step": 54865, "token_acc": 0.5348837209302325, "train_speed(iter/s)": 0.672035 }, { "epoch": 2.350799023178099, "grad_norm": 6.695245742797852, "learning_rate": 5.4709904583857466e-05, "loss": 2.2974082946777346, "memory(GiB)": 72.85, "step": 54870, "token_acc": 0.5207547169811321, "train_speed(iter/s)": 0.672027 }, { "epoch": 2.3510132385073477, "grad_norm": 5.203370571136475, "learning_rate": 5.470320469228003e-05, "loss": 2.131852722167969, "memory(GiB)": 72.85, "step": 54875, "token_acc": 0.5372549019607843, "train_speed(iter/s)": 0.672022 }, { "epoch": 2.3512274538365965, "grad_norm": 5.374200820922852, "learning_rate": 5.469650471549964e-05, "loss": 2.3804210662841796, "memory(GiB)": 72.85, "step": 54880, "token_acc": 0.4946236559139785, "train_speed(iter/s)": 0.672016 }, { "epoch": 2.3514416691658457, "grad_norm": 4.965192794799805, "learning_rate": 5.468980465363766e-05, "loss": 2.299755859375, "memory(GiB)": 72.85, "step": 54885, "token_acc": 0.5346938775510204, "train_speed(iter/s)": 0.67202 }, { "epoch": 2.3516558844950946, "grad_norm": 4.676595687866211, "learning_rate": 5.4683104506815465e-05, "loss": 2.1036188125610353, "memory(GiB)": 72.85, "step": 54890, "token_acc": 0.5015873015873016, "train_speed(iter/s)": 0.672003 }, { "epoch": 2.3518700998243434, "grad_norm": 5.108208179473877, "learning_rate": 5.467640427515443e-05, "loss": 2.4801794052124024, "memory(GiB)": 72.85, "step": 54895, "token_acc": 0.45692883895131087, "train_speed(iter/s)": 0.672001 }, { "epoch": 2.3520843151535926, "grad_norm": 3.905526876449585, "learning_rate": 5.466970395877595e-05, "loss": 2.0400753021240234, "memory(GiB)": 72.85, "step": 54900, "token_acc": 0.5302013422818792, "train_speed(iter/s)": 0.672013 }, { "epoch": 2.3522985304828414, "grad_norm": 4.179747581481934, "learning_rate": 5.466300355780141e-05, "loss": 2.1317617416381838, "memory(GiB)": 72.85, "step": 54905, "token_acc": 0.5171232876712328, "train_speed(iter/s)": 0.672001 }, { "epoch": 2.3525127458120902, "grad_norm": 4.306671619415283, "learning_rate": 5.465630307235219e-05, "loss": 2.185190963745117, "memory(GiB)": 72.85, "step": 54910, "token_acc": 0.5069444444444444, "train_speed(iter/s)": 0.671977 }, { "epoch": 2.3527269611413395, "grad_norm": 4.6681294441223145, "learning_rate": 5.464960250254966e-05, "loss": 2.3757028579711914, "memory(GiB)": 72.85, "step": 54915, "token_acc": 0.49854227405247814, "train_speed(iter/s)": 0.671967 }, { "epoch": 2.3529411764705883, "grad_norm": 4.350349426269531, "learning_rate": 5.464290184851523e-05, "loss": 1.9243417739868165, "memory(GiB)": 72.85, "step": 54920, "token_acc": 0.5877551020408164, "train_speed(iter/s)": 0.671971 }, { "epoch": 2.353155391799837, "grad_norm": 5.2511210441589355, "learning_rate": 5.463620111037026e-05, "loss": 2.3159225463867186, "memory(GiB)": 72.85, "step": 54925, "token_acc": 0.5309446254071661, "train_speed(iter/s)": 0.671983 }, { "epoch": 2.3533696071290864, "grad_norm": 4.14105224609375, "learning_rate": 5.462950028823617e-05, "loss": 2.526894187927246, "memory(GiB)": 72.85, "step": 54930, "token_acc": 0.4935897435897436, "train_speed(iter/s)": 0.671992 }, { "epoch": 2.353583822458335, "grad_norm": 4.563291072845459, "learning_rate": 5.4622799382234336e-05, "loss": 2.437611770629883, "memory(GiB)": 72.85, "step": 54935, "token_acc": 0.4583333333333333, "train_speed(iter/s)": 0.671981 }, { "epoch": 2.353798037787584, "grad_norm": 6.728548526763916, "learning_rate": 5.461609839248616e-05, "loss": 2.013010787963867, "memory(GiB)": 72.85, "step": 54940, "token_acc": 0.5863309352517986, "train_speed(iter/s)": 0.671987 }, { "epoch": 2.3540122531168333, "grad_norm": 4.4177565574646, "learning_rate": 5.460939731911302e-05, "loss": 2.350900650024414, "memory(GiB)": 72.85, "step": 54945, "token_acc": 0.490272373540856, "train_speed(iter/s)": 0.671987 }, { "epoch": 2.354226468446082, "grad_norm": 5.0706329345703125, "learning_rate": 5.460269616223634e-05, "loss": 2.04672908782959, "memory(GiB)": 72.85, "step": 54950, "token_acc": 0.5368421052631579, "train_speed(iter/s)": 0.671988 }, { "epoch": 2.354440683775331, "grad_norm": 6.865579605102539, "learning_rate": 5.45959949219775e-05, "loss": 2.5244171142578127, "memory(GiB)": 72.85, "step": 54955, "token_acc": 0.48507462686567165, "train_speed(iter/s)": 0.671992 }, { "epoch": 2.35465489910458, "grad_norm": 4.646468162536621, "learning_rate": 5.458929359845789e-05, "loss": 2.021076202392578, "memory(GiB)": 72.85, "step": 54960, "token_acc": 0.5435540069686411, "train_speed(iter/s)": 0.672004 }, { "epoch": 2.354869114433829, "grad_norm": 4.541244029998779, "learning_rate": 5.458259219179893e-05, "loss": 2.3321506500244142, "memory(GiB)": 72.85, "step": 54965, "token_acc": 0.49221183800623053, "train_speed(iter/s)": 0.672006 }, { "epoch": 2.3550833297630778, "grad_norm": 4.7588372230529785, "learning_rate": 5.4575890702122e-05, "loss": 2.4807083129882814, "memory(GiB)": 72.85, "step": 54970, "token_acc": 0.46273291925465837, "train_speed(iter/s)": 0.672011 }, { "epoch": 2.355297545092327, "grad_norm": 4.339850902557373, "learning_rate": 5.456918912954853e-05, "loss": 2.190542221069336, "memory(GiB)": 72.85, "step": 54975, "token_acc": 0.5223367697594502, "train_speed(iter/s)": 0.672012 }, { "epoch": 2.355511760421576, "grad_norm": 6.078873634338379, "learning_rate": 5.4562487474199906e-05, "loss": 2.496765899658203, "memory(GiB)": 72.85, "step": 54980, "token_acc": 0.4861111111111111, "train_speed(iter/s)": 0.671992 }, { "epoch": 2.3557259757508247, "grad_norm": 5.81121301651001, "learning_rate": 5.455578573619755e-05, "loss": 2.4163959503173826, "memory(GiB)": 72.85, "step": 54985, "token_acc": 0.49415204678362573, "train_speed(iter/s)": 0.671971 }, { "epoch": 2.355940191080074, "grad_norm": 4.29484748840332, "learning_rate": 5.4549083915662846e-05, "loss": 2.3522010803222657, "memory(GiB)": 72.85, "step": 54990, "token_acc": 0.5017064846416383, "train_speed(iter/s)": 0.671964 }, { "epoch": 2.3561544064093227, "grad_norm": 5.209320545196533, "learning_rate": 5.454238201271724e-05, "loss": 2.316489601135254, "memory(GiB)": 72.85, "step": 54995, "token_acc": 0.5087719298245614, "train_speed(iter/s)": 0.671972 }, { "epoch": 2.3563686217385715, "grad_norm": 4.646145820617676, "learning_rate": 5.4535680027482105e-05, "loss": 2.0507965087890625, "memory(GiB)": 72.85, "step": 55000, "token_acc": 0.5201465201465202, "train_speed(iter/s)": 0.671973 }, { "epoch": 2.3563686217385715, "eval_loss": 2.053711175918579, "eval_runtime": 15.464, "eval_samples_per_second": 6.467, "eval_steps_per_second": 6.467, "eval_token_acc": 0.503052503052503, "step": 55000 }, { "epoch": 2.356582837067821, "grad_norm": 6.07608699798584, "learning_rate": 5.452897796007889e-05, "loss": 2.552115821838379, "memory(GiB)": 72.85, "step": 55005, "token_acc": 0.49723247232472323, "train_speed(iter/s)": 0.67183 }, { "epoch": 2.3567970523970696, "grad_norm": 6.622191905975342, "learning_rate": 5.4522275810628965e-05, "loss": 2.0616632461547852, "memory(GiB)": 72.85, "step": 55010, "token_acc": 0.5338983050847458, "train_speed(iter/s)": 0.671816 }, { "epoch": 2.3570112677263184, "grad_norm": 5.003321170806885, "learning_rate": 5.45155735792538e-05, "loss": 2.365060806274414, "memory(GiB)": 72.85, "step": 55015, "token_acc": 0.4967741935483871, "train_speed(iter/s)": 0.671821 }, { "epoch": 2.3572254830555677, "grad_norm": 4.166959762573242, "learning_rate": 5.4508871266074756e-05, "loss": 2.555000114440918, "memory(GiB)": 72.85, "step": 55020, "token_acc": 0.48125, "train_speed(iter/s)": 0.671826 }, { "epoch": 2.3574396983848165, "grad_norm": 4.892187595367432, "learning_rate": 5.45021688712133e-05, "loss": 2.4110170364379884, "memory(GiB)": 72.85, "step": 55025, "token_acc": 0.48580441640378547, "train_speed(iter/s)": 0.67182 }, { "epoch": 2.3576539137140653, "grad_norm": 4.3011016845703125, "learning_rate": 5.4495466394790815e-05, "loss": 1.9415340423583984, "memory(GiB)": 72.85, "step": 55030, "token_acc": 0.5841269841269842, "train_speed(iter/s)": 0.67182 }, { "epoch": 2.3578681290433146, "grad_norm": 4.44606876373291, "learning_rate": 5.448876383692876e-05, "loss": 2.3064548492431642, "memory(GiB)": 72.85, "step": 55035, "token_acc": 0.4769736842105263, "train_speed(iter/s)": 0.671815 }, { "epoch": 2.3580823443725634, "grad_norm": 4.006730556488037, "learning_rate": 5.448206119774853e-05, "loss": 2.149471092224121, "memory(GiB)": 72.85, "step": 55040, "token_acc": 0.5104166666666666, "train_speed(iter/s)": 0.671811 }, { "epoch": 2.358296559701812, "grad_norm": 4.251765251159668, "learning_rate": 5.447535847737153e-05, "loss": 2.2443737030029296, "memory(GiB)": 72.85, "step": 55045, "token_acc": 0.5469798657718121, "train_speed(iter/s)": 0.6718 }, { "epoch": 2.3585107750310614, "grad_norm": 4.487946510314941, "learning_rate": 5.446865567591925e-05, "loss": 2.2100221633911135, "memory(GiB)": 72.85, "step": 55050, "token_acc": 0.5479876160990712, "train_speed(iter/s)": 0.671802 }, { "epoch": 2.3587249903603102, "grad_norm": 4.808959484100342, "learning_rate": 5.446195279351307e-05, "loss": 2.031050682067871, "memory(GiB)": 72.85, "step": 55055, "token_acc": 0.5666666666666667, "train_speed(iter/s)": 0.67181 }, { "epoch": 2.358939205689559, "grad_norm": 4.499112606048584, "learning_rate": 5.4455249830274415e-05, "loss": 2.3682205200195314, "memory(GiB)": 72.85, "step": 55060, "token_acc": 0.4983388704318937, "train_speed(iter/s)": 0.671818 }, { "epoch": 2.3591534210188083, "grad_norm": 8.313821792602539, "learning_rate": 5.444854678632475e-05, "loss": 2.1829551696777343, "memory(GiB)": 72.85, "step": 55065, "token_acc": 0.5040983606557377, "train_speed(iter/s)": 0.671818 }, { "epoch": 2.359367636348057, "grad_norm": 4.364389896392822, "learning_rate": 5.444184366178549e-05, "loss": 2.1693397521972657, "memory(GiB)": 72.85, "step": 55070, "token_acc": 0.5061728395061729, "train_speed(iter/s)": 0.671819 }, { "epoch": 2.359581851677306, "grad_norm": 5.060277462005615, "learning_rate": 5.4435140456778034e-05, "loss": 2.2573831558227537, "memory(GiB)": 72.85, "step": 55075, "token_acc": 0.5054945054945055, "train_speed(iter/s)": 0.671824 }, { "epoch": 2.359796067006555, "grad_norm": 5.6034674644470215, "learning_rate": 5.442843717142387e-05, "loss": 2.262038803100586, "memory(GiB)": 72.85, "step": 55080, "token_acc": 0.5051546391752577, "train_speed(iter/s)": 0.671817 }, { "epoch": 2.360010282335804, "grad_norm": 6.2356133460998535, "learning_rate": 5.442173380584441e-05, "loss": 2.3540319442749023, "memory(GiB)": 72.85, "step": 55085, "token_acc": 0.4788732394366197, "train_speed(iter/s)": 0.671828 }, { "epoch": 2.360224497665053, "grad_norm": 5.722043991088867, "learning_rate": 5.441503036016109e-05, "loss": 2.093079948425293, "memory(GiB)": 72.85, "step": 55090, "token_acc": 0.4980544747081712, "train_speed(iter/s)": 0.671817 }, { "epoch": 2.360438712994302, "grad_norm": 4.397799968719482, "learning_rate": 5.440832683449536e-05, "loss": 2.094002532958984, "memory(GiB)": 72.85, "step": 55095, "token_acc": 0.526813880126183, "train_speed(iter/s)": 0.671828 }, { "epoch": 2.360652928323551, "grad_norm": 4.620034694671631, "learning_rate": 5.440162322896866e-05, "loss": 2.409638786315918, "memory(GiB)": 72.85, "step": 55100, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.671824 }, { "epoch": 2.3608671436527997, "grad_norm": 5.101677894592285, "learning_rate": 5.439491954370241e-05, "loss": 2.0264514923095702, "memory(GiB)": 72.85, "step": 55105, "token_acc": 0.5490909090909091, "train_speed(iter/s)": 0.671816 }, { "epoch": 2.361081358982049, "grad_norm": 5.402584552764893, "learning_rate": 5.4388215778818074e-05, "loss": 2.260154151916504, "memory(GiB)": 72.85, "step": 55110, "token_acc": 0.4837662337662338, "train_speed(iter/s)": 0.671824 }, { "epoch": 2.3612955743112978, "grad_norm": 8.44939136505127, "learning_rate": 5.438151193443709e-05, "loss": 2.243111419677734, "memory(GiB)": 72.85, "step": 55115, "token_acc": 0.5319148936170213, "train_speed(iter/s)": 0.671816 }, { "epoch": 2.3615097896405466, "grad_norm": 4.680634021759033, "learning_rate": 5.437480801068091e-05, "loss": 2.3222402572631835, "memory(GiB)": 72.85, "step": 55120, "token_acc": 0.5094339622641509, "train_speed(iter/s)": 0.671833 }, { "epoch": 2.361724004969796, "grad_norm": 6.257829666137695, "learning_rate": 5.4368104007670984e-05, "loss": 2.3163299560546875, "memory(GiB)": 72.85, "step": 55125, "token_acc": 0.48985507246376814, "train_speed(iter/s)": 0.671845 }, { "epoch": 2.3619382202990447, "grad_norm": 5.074399948120117, "learning_rate": 5.4361399925528766e-05, "loss": 2.299711990356445, "memory(GiB)": 72.85, "step": 55130, "token_acc": 0.5755102040816327, "train_speed(iter/s)": 0.671854 }, { "epoch": 2.3621524356282935, "grad_norm": 6.726627349853516, "learning_rate": 5.4354695764375674e-05, "loss": 2.165460395812988, "memory(GiB)": 72.85, "step": 55135, "token_acc": 0.5401459854014599, "train_speed(iter/s)": 0.671858 }, { "epoch": 2.3623666509575427, "grad_norm": 6.183446884155273, "learning_rate": 5.43479915243332e-05, "loss": 2.161080741882324, "memory(GiB)": 72.85, "step": 55140, "token_acc": 0.5280898876404494, "train_speed(iter/s)": 0.671849 }, { "epoch": 2.3625808662867915, "grad_norm": 4.676015377044678, "learning_rate": 5.434128720552277e-05, "loss": 2.342816162109375, "memory(GiB)": 72.85, "step": 55145, "token_acc": 0.5136054421768708, "train_speed(iter/s)": 0.671847 }, { "epoch": 2.3627950816160403, "grad_norm": 5.832537651062012, "learning_rate": 5.433458280806586e-05, "loss": 2.4287973403930665, "memory(GiB)": 72.85, "step": 55150, "token_acc": 0.48698884758364314, "train_speed(iter/s)": 0.671857 }, { "epoch": 2.3630092969452896, "grad_norm": 4.121406555175781, "learning_rate": 5.4327878332083914e-05, "loss": 2.3195707321166994, "memory(GiB)": 72.85, "step": 55155, "token_acc": 0.47985347985347987, "train_speed(iter/s)": 0.671848 }, { "epoch": 2.3632235122745384, "grad_norm": 5.660274028778076, "learning_rate": 5.4321173777698385e-05, "loss": 2.4920543670654296, "memory(GiB)": 72.85, "step": 55160, "token_acc": 0.46794871794871795, "train_speed(iter/s)": 0.67184 }, { "epoch": 2.3634377276037872, "grad_norm": 5.078347206115723, "learning_rate": 5.431446914503074e-05, "loss": 2.3588703155517576, "memory(GiB)": 72.85, "step": 55165, "token_acc": 0.4584837545126354, "train_speed(iter/s)": 0.671845 }, { "epoch": 2.3636519429330365, "grad_norm": 4.172830581665039, "learning_rate": 5.430776443420245e-05, "loss": 2.3090343475341797, "memory(GiB)": 72.85, "step": 55170, "token_acc": 0.49830508474576274, "train_speed(iter/s)": 0.67184 }, { "epoch": 2.3638661582622853, "grad_norm": 4.888303756713867, "learning_rate": 5.430105964533495e-05, "loss": 2.187595748901367, "memory(GiB)": 72.85, "step": 55175, "token_acc": 0.4981949458483754, "train_speed(iter/s)": 0.671854 }, { "epoch": 2.364080373591534, "grad_norm": 4.113884449005127, "learning_rate": 5.429435477854974e-05, "loss": 2.202343559265137, "memory(GiB)": 72.85, "step": 55180, "token_acc": 0.5173611111111112, "train_speed(iter/s)": 0.67185 }, { "epoch": 2.3642945889207834, "grad_norm": 3.831634044647217, "learning_rate": 5.4287649833968256e-05, "loss": 2.312516784667969, "memory(GiB)": 72.85, "step": 55185, "token_acc": 0.5119760479041916, "train_speed(iter/s)": 0.671834 }, { "epoch": 2.364508804250032, "grad_norm": 5.8072028160095215, "learning_rate": 5.428094481171196e-05, "loss": 2.319895935058594, "memory(GiB)": 72.85, "step": 55190, "token_acc": 0.5342960288808665, "train_speed(iter/s)": 0.671845 }, { "epoch": 2.364723019579281, "grad_norm": 4.561373710632324, "learning_rate": 5.427423971190234e-05, "loss": 2.208416748046875, "memory(GiB)": 72.85, "step": 55195, "token_acc": 0.5190839694656488, "train_speed(iter/s)": 0.671814 }, { "epoch": 2.3649372349085303, "grad_norm": 5.177542686462402, "learning_rate": 5.4267534534660865e-05, "loss": 2.4644342422485352, "memory(GiB)": 72.85, "step": 55200, "token_acc": 0.47840531561461797, "train_speed(iter/s)": 0.671814 }, { "epoch": 2.365151450237779, "grad_norm": 3.409567356109619, "learning_rate": 5.426082928010899e-05, "loss": 2.416461372375488, "memory(GiB)": 72.85, "step": 55205, "token_acc": 0.5054347826086957, "train_speed(iter/s)": 0.671818 }, { "epoch": 2.365365665567028, "grad_norm": 3.9074013233184814, "learning_rate": 5.425412394836821e-05, "loss": 2.23138427734375, "memory(GiB)": 72.85, "step": 55210, "token_acc": 0.49828178694158076, "train_speed(iter/s)": 0.671827 }, { "epoch": 2.365579880896277, "grad_norm": 4.445348739624023, "learning_rate": 5.424741853955998e-05, "loss": 2.174122619628906, "memory(GiB)": 72.85, "step": 55215, "token_acc": 0.5310344827586206, "train_speed(iter/s)": 0.67181 }, { "epoch": 2.365794096225526, "grad_norm": 4.074676036834717, "learning_rate": 5.424071305380577e-05, "loss": 2.2111623764038084, "memory(GiB)": 72.85, "step": 55220, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.671818 }, { "epoch": 2.3660083115547748, "grad_norm": 4.185485363006592, "learning_rate": 5.423400749122707e-05, "loss": 2.0105312347412108, "memory(GiB)": 72.85, "step": 55225, "token_acc": 0.5423728813559322, "train_speed(iter/s)": 0.671836 }, { "epoch": 2.366222526884024, "grad_norm": 4.6103739738464355, "learning_rate": 5.422730185194534e-05, "loss": 2.410144805908203, "memory(GiB)": 72.85, "step": 55230, "token_acc": 0.49834983498349833, "train_speed(iter/s)": 0.67183 }, { "epoch": 2.366436742213273, "grad_norm": 5.645618915557861, "learning_rate": 5.422059613608208e-05, "loss": 2.3065685272216796, "memory(GiB)": 72.85, "step": 55235, "token_acc": 0.5416666666666666, "train_speed(iter/s)": 0.671826 }, { "epoch": 2.3666509575425216, "grad_norm": 5.402383804321289, "learning_rate": 5.4213890343758766e-05, "loss": 2.1211334228515626, "memory(GiB)": 72.85, "step": 55240, "token_acc": 0.5424354243542435, "train_speed(iter/s)": 0.671835 }, { "epoch": 2.366865172871771, "grad_norm": 5.135112762451172, "learning_rate": 5.4207184475096885e-05, "loss": 2.5699283599853517, "memory(GiB)": 72.85, "step": 55245, "token_acc": 0.5064516129032258, "train_speed(iter/s)": 0.671846 }, { "epoch": 2.3670793882010197, "grad_norm": 7.0963239669799805, "learning_rate": 5.420047853021789e-05, "loss": 2.4368337631225585, "memory(GiB)": 72.85, "step": 55250, "token_acc": 0.48134328358208955, "train_speed(iter/s)": 0.671863 }, { "epoch": 2.3672936035302685, "grad_norm": 5.391376495361328, "learning_rate": 5.41937725092433e-05, "loss": 2.5213834762573244, "memory(GiB)": 72.85, "step": 55255, "token_acc": 0.45353159851301117, "train_speed(iter/s)": 0.671863 }, { "epoch": 2.3675078188595178, "grad_norm": 3.8902530670166016, "learning_rate": 5.418706641229458e-05, "loss": 2.1102535247802736, "memory(GiB)": 72.85, "step": 55260, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.671862 }, { "epoch": 2.3677220341887666, "grad_norm": 3.521066904067993, "learning_rate": 5.418036023949323e-05, "loss": 2.2980978012084963, "memory(GiB)": 72.85, "step": 55265, "token_acc": 0.4716417910447761, "train_speed(iter/s)": 0.671863 }, { "epoch": 2.3679362495180154, "grad_norm": 6.238555431365967, "learning_rate": 5.417365399096073e-05, "loss": 2.3620626449584963, "memory(GiB)": 72.85, "step": 55270, "token_acc": 0.5068493150684932, "train_speed(iter/s)": 0.671865 }, { "epoch": 2.3681504648472647, "grad_norm": 5.160538196563721, "learning_rate": 5.416694766681857e-05, "loss": 2.1010583877563476, "memory(GiB)": 72.85, "step": 55275, "token_acc": 0.5, "train_speed(iter/s)": 0.67187 }, { "epoch": 2.3683646801765135, "grad_norm": 5.89892053604126, "learning_rate": 5.416024126718824e-05, "loss": 2.7438533782958983, "memory(GiB)": 72.85, "step": 55280, "token_acc": 0.45733788395904434, "train_speed(iter/s)": 0.671881 }, { "epoch": 2.3685788955057623, "grad_norm": 5.015108585357666, "learning_rate": 5.415353479219125e-05, "loss": 2.1915611267089843, "memory(GiB)": 72.85, "step": 55285, "token_acc": 0.5126050420168067, "train_speed(iter/s)": 0.671889 }, { "epoch": 2.3687931108350115, "grad_norm": 6.525193691253662, "learning_rate": 5.4146828241949064e-05, "loss": 2.22125186920166, "memory(GiB)": 72.85, "step": 55290, "token_acc": 0.5352112676056338, "train_speed(iter/s)": 0.671907 }, { "epoch": 2.3690073261642604, "grad_norm": 4.947452545166016, "learning_rate": 5.414012161658322e-05, "loss": 2.1477693557739257, "memory(GiB)": 72.85, "step": 55295, "token_acc": 0.5344827586206896, "train_speed(iter/s)": 0.671902 }, { "epoch": 2.369221541493509, "grad_norm": 6.354803562164307, "learning_rate": 5.4133414916215174e-05, "loss": 2.276192283630371, "memory(GiB)": 72.85, "step": 55300, "token_acc": 0.49852507374631266, "train_speed(iter/s)": 0.671916 }, { "epoch": 2.3694357568227584, "grad_norm": 4.566303253173828, "learning_rate": 5.412670814096642e-05, "loss": 2.520657539367676, "memory(GiB)": 72.85, "step": 55305, "token_acc": 0.49226006191950467, "train_speed(iter/s)": 0.67191 }, { "epoch": 2.3696499721520072, "grad_norm": 5.7347636222839355, "learning_rate": 5.41200012909585e-05, "loss": 1.9160358428955078, "memory(GiB)": 72.85, "step": 55310, "token_acc": 0.6030534351145038, "train_speed(iter/s)": 0.671933 }, { "epoch": 2.369864187481256, "grad_norm": 4.365768909454346, "learning_rate": 5.4113294366312896e-05, "loss": 2.0992691040039064, "memory(GiB)": 72.85, "step": 55315, "token_acc": 0.5018050541516246, "train_speed(iter/s)": 0.671936 }, { "epoch": 2.3700784028105053, "grad_norm": 4.600193023681641, "learning_rate": 5.4106587367151085e-05, "loss": 2.3190692901611327, "memory(GiB)": 72.85, "step": 55320, "token_acc": 0.4888268156424581, "train_speed(iter/s)": 0.671934 }, { "epoch": 2.370292618139754, "grad_norm": 3.7063589096069336, "learning_rate": 5.409988029359461e-05, "loss": 2.556894302368164, "memory(GiB)": 72.85, "step": 55325, "token_acc": 0.46, "train_speed(iter/s)": 0.671924 }, { "epoch": 2.370506833469003, "grad_norm": 4.533651351928711, "learning_rate": 5.409317314576496e-05, "loss": 2.3583946228027344, "memory(GiB)": 72.85, "step": 55330, "token_acc": 0.4884910485933504, "train_speed(iter/s)": 0.671935 }, { "epoch": 2.370721048798252, "grad_norm": 4.4923996925354, "learning_rate": 5.408646592378362e-05, "loss": 2.1560792922973633, "memory(GiB)": 72.85, "step": 55335, "token_acc": 0.55, "train_speed(iter/s)": 0.671941 }, { "epoch": 2.370935264127501, "grad_norm": 3.8127408027648926, "learning_rate": 5.4079758627772135e-05, "loss": 2.3896810531616213, "memory(GiB)": 72.85, "step": 55340, "token_acc": 0.5017421602787456, "train_speed(iter/s)": 0.671945 }, { "epoch": 2.37114947945675, "grad_norm": 4.286913871765137, "learning_rate": 5.407305125785198e-05, "loss": 2.0681276321411133, "memory(GiB)": 72.85, "step": 55345, "token_acc": 0.5259515570934256, "train_speed(iter/s)": 0.67195 }, { "epoch": 2.371363694785999, "grad_norm": 4.375885963439941, "learning_rate": 5.406634381414468e-05, "loss": 2.071180534362793, "memory(GiB)": 72.85, "step": 55350, "token_acc": 0.5317725752508361, "train_speed(iter/s)": 0.671958 }, { "epoch": 2.371577910115248, "grad_norm": 4.48140811920166, "learning_rate": 5.405963629677177e-05, "loss": 2.1100168228149414, "memory(GiB)": 72.85, "step": 55355, "token_acc": 0.5498281786941581, "train_speed(iter/s)": 0.671954 }, { "epoch": 2.3717921254444967, "grad_norm": 4.554023265838623, "learning_rate": 5.405292870585472e-05, "loss": 2.2547887802124023, "memory(GiB)": 72.85, "step": 55360, "token_acc": 0.5259259259259259, "train_speed(iter/s)": 0.671943 }, { "epoch": 2.372006340773746, "grad_norm": 6.0855302810668945, "learning_rate": 5.4046221041515065e-05, "loss": 2.2669933319091795, "memory(GiB)": 72.85, "step": 55365, "token_acc": 0.5208333333333334, "train_speed(iter/s)": 0.671948 }, { "epoch": 2.3722205561029948, "grad_norm": 4.556931495666504, "learning_rate": 5.4039513303874336e-05, "loss": 2.232981491088867, "memory(GiB)": 72.85, "step": 55370, "token_acc": 0.5139442231075697, "train_speed(iter/s)": 0.67195 }, { "epoch": 2.3724347714322436, "grad_norm": 4.645655155181885, "learning_rate": 5.4032805493054005e-05, "loss": 2.3036855697631835, "memory(GiB)": 72.85, "step": 55375, "token_acc": 0.4981949458483754, "train_speed(iter/s)": 0.671958 }, { "epoch": 2.372648986761493, "grad_norm": 6.367177963256836, "learning_rate": 5.402609760917564e-05, "loss": 1.800785446166992, "memory(GiB)": 72.85, "step": 55380, "token_acc": 0.5523012552301255, "train_speed(iter/s)": 0.671971 }, { "epoch": 2.3728632020907416, "grad_norm": 4.9205098152160645, "learning_rate": 5.4019389652360744e-05, "loss": 2.061476135253906, "memory(GiB)": 72.85, "step": 55385, "token_acc": 0.5152542372881356, "train_speed(iter/s)": 0.671974 }, { "epoch": 2.3730774174199905, "grad_norm": 5.334798812866211, "learning_rate": 5.401268162273083e-05, "loss": 2.1444168090820312, "memory(GiB)": 72.85, "step": 55390, "token_acc": 0.5, "train_speed(iter/s)": 0.671975 }, { "epoch": 2.3732916327492397, "grad_norm": 4.612277984619141, "learning_rate": 5.400597352040742e-05, "loss": 2.3723518371582033, "memory(GiB)": 72.85, "step": 55395, "token_acc": 0.5337423312883436, "train_speed(iter/s)": 0.67197 }, { "epoch": 2.3735058480784885, "grad_norm": 5.354207515716553, "learning_rate": 5.399926534551204e-05, "loss": 2.29556884765625, "memory(GiB)": 72.85, "step": 55400, "token_acc": 0.5267489711934157, "train_speed(iter/s)": 0.671967 }, { "epoch": 2.3737200634077373, "grad_norm": 4.52912712097168, "learning_rate": 5.399255709816622e-05, "loss": 2.3008054733276366, "memory(GiB)": 72.85, "step": 55405, "token_acc": 0.5034965034965035, "train_speed(iter/s)": 0.67198 }, { "epoch": 2.3739342787369866, "grad_norm": 6.741326808929443, "learning_rate": 5.398584877849149e-05, "loss": 2.1369140625, "memory(GiB)": 72.85, "step": 55410, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.671987 }, { "epoch": 2.3741484940662354, "grad_norm": 4.954039573669434, "learning_rate": 5.397914038660935e-05, "loss": 2.1914716720581056, "memory(GiB)": 72.85, "step": 55415, "token_acc": 0.5400696864111498, "train_speed(iter/s)": 0.671992 }, { "epoch": 2.374362709395484, "grad_norm": 4.728740692138672, "learning_rate": 5.397243192264136e-05, "loss": 1.9713354110717773, "memory(GiB)": 72.85, "step": 55420, "token_acc": 0.5319148936170213, "train_speed(iter/s)": 0.672006 }, { "epoch": 2.3745769247247335, "grad_norm": 5.206634044647217, "learning_rate": 5.3965723386709043e-05, "loss": 2.115239715576172, "memory(GiB)": 72.85, "step": 55425, "token_acc": 0.5392857142857143, "train_speed(iter/s)": 0.67201 }, { "epoch": 2.3747911400539823, "grad_norm": 3.7168281078338623, "learning_rate": 5.3959014778933925e-05, "loss": 2.1699851989746093, "memory(GiB)": 72.85, "step": 55430, "token_acc": 0.5157593123209169, "train_speed(iter/s)": 0.671995 }, { "epoch": 2.375005355383231, "grad_norm": 4.756343841552734, "learning_rate": 5.395230609943753e-05, "loss": 2.253687286376953, "memory(GiB)": 72.85, "step": 55435, "token_acc": 0.489247311827957, "train_speed(iter/s)": 0.672005 }, { "epoch": 2.3752195707124804, "grad_norm": 4.583657741546631, "learning_rate": 5.3945597348341425e-05, "loss": 2.267715835571289, "memory(GiB)": 72.85, "step": 55440, "token_acc": 0.5045871559633027, "train_speed(iter/s)": 0.672012 }, { "epoch": 2.375433786041729, "grad_norm": 4.1631951332092285, "learning_rate": 5.393888852576712e-05, "loss": 1.9993263244628907, "memory(GiB)": 72.85, "step": 55445, "token_acc": 0.5809859154929577, "train_speed(iter/s)": 0.672016 }, { "epoch": 2.375648001370978, "grad_norm": 4.700808525085449, "learning_rate": 5.393217963183613e-05, "loss": 2.3273162841796875, "memory(GiB)": 72.85, "step": 55450, "token_acc": 0.4952076677316294, "train_speed(iter/s)": 0.672021 }, { "epoch": 2.3758622167002272, "grad_norm": 4.5799174308776855, "learning_rate": 5.392547066667003e-05, "loss": 2.175240707397461, "memory(GiB)": 72.85, "step": 55455, "token_acc": 0.501628664495114, "train_speed(iter/s)": 0.672024 }, { "epoch": 2.376076432029476, "grad_norm": 6.707763671875, "learning_rate": 5.3918761630390355e-05, "loss": 2.3029842376708984, "memory(GiB)": 72.85, "step": 55460, "token_acc": 0.5171339563862928, "train_speed(iter/s)": 0.67203 }, { "epoch": 2.376290647358725, "grad_norm": 5.383233070373535, "learning_rate": 5.391205252311863e-05, "loss": 2.266208267211914, "memory(GiB)": 72.85, "step": 55465, "token_acc": 0.5412186379928315, "train_speed(iter/s)": 0.672032 }, { "epoch": 2.376504862687974, "grad_norm": 4.664865016937256, "learning_rate": 5.390534334497641e-05, "loss": 2.4916391372680664, "memory(GiB)": 72.85, "step": 55470, "token_acc": 0.48242811501597443, "train_speed(iter/s)": 0.672036 }, { "epoch": 2.376719078017223, "grad_norm": 6.137387752532959, "learning_rate": 5.3898634096085235e-05, "loss": 2.1462657928466795, "memory(GiB)": 72.85, "step": 55475, "token_acc": 0.5238095238095238, "train_speed(iter/s)": 0.672022 }, { "epoch": 2.3769332933464717, "grad_norm": 4.395077228546143, "learning_rate": 5.389192477656663e-05, "loss": 2.1811466217041016, "memory(GiB)": 72.85, "step": 55480, "token_acc": 0.5050505050505051, "train_speed(iter/s)": 0.672029 }, { "epoch": 2.377147508675721, "grad_norm": 4.9633002281188965, "learning_rate": 5.388521538654216e-05, "loss": 2.0757162094116213, "memory(GiB)": 72.85, "step": 55485, "token_acc": 0.5776892430278885, "train_speed(iter/s)": 0.67204 }, { "epoch": 2.37736172400497, "grad_norm": 6.196085453033447, "learning_rate": 5.387850592613337e-05, "loss": 2.083730125427246, "memory(GiB)": 72.85, "step": 55490, "token_acc": 0.4944649446494465, "train_speed(iter/s)": 0.672047 }, { "epoch": 2.3775759393342186, "grad_norm": 4.078867435455322, "learning_rate": 5.3871796395461804e-05, "loss": 2.181497573852539, "memory(GiB)": 72.85, "step": 55495, "token_acc": 0.5035971223021583, "train_speed(iter/s)": 0.672053 }, { "epoch": 2.377790154663468, "grad_norm": 5.4568095207214355, "learning_rate": 5.3865086794649023e-05, "loss": 2.3564178466796877, "memory(GiB)": 72.85, "step": 55500, "token_acc": 0.48013245033112584, "train_speed(iter/s)": 0.672056 }, { "epoch": 2.377790154663468, "eval_loss": 2.0138802528381348, "eval_runtime": 15.3209, "eval_samples_per_second": 6.527, "eval_steps_per_second": 6.527, "eval_token_acc": 0.5145631067961165, "step": 55500 }, { "epoch": 2.3780043699927167, "grad_norm": 6.194697380065918, "learning_rate": 5.3858377123816576e-05, "loss": 2.3715890884399413, "memory(GiB)": 72.85, "step": 55505, "token_acc": 0.5074331020812686, "train_speed(iter/s)": 0.671908 }, { "epoch": 2.3782185853219655, "grad_norm": 4.315622329711914, "learning_rate": 5.385166738308599e-05, "loss": 2.2876815795898438, "memory(GiB)": 72.85, "step": 55510, "token_acc": 0.4894894894894895, "train_speed(iter/s)": 0.671911 }, { "epoch": 2.3784328006512148, "grad_norm": 5.26877498626709, "learning_rate": 5.384495757257885e-05, "loss": 2.38195743560791, "memory(GiB)": 72.85, "step": 55515, "token_acc": 0.49491525423728816, "train_speed(iter/s)": 0.671925 }, { "epoch": 2.3786470159804636, "grad_norm": 5.564742088317871, "learning_rate": 5.383824769241669e-05, "loss": 2.662763977050781, "memory(GiB)": 72.85, "step": 55520, "token_acc": 0.445993031358885, "train_speed(iter/s)": 0.671923 }, { "epoch": 2.3788612313097124, "grad_norm": 4.797672271728516, "learning_rate": 5.383153774272107e-05, "loss": 2.2496252059936523, "memory(GiB)": 72.85, "step": 55525, "token_acc": 0.4863013698630137, "train_speed(iter/s)": 0.671935 }, { "epoch": 2.3790754466389616, "grad_norm": 4.524253845214844, "learning_rate": 5.3824827723613546e-05, "loss": 2.175482749938965, "memory(GiB)": 72.85, "step": 55530, "token_acc": 0.5052631578947369, "train_speed(iter/s)": 0.671926 }, { "epoch": 2.3792896619682105, "grad_norm": 5.659003734588623, "learning_rate": 5.3818117635215695e-05, "loss": 2.046895217895508, "memory(GiB)": 72.85, "step": 55535, "token_acc": 0.5353159851301115, "train_speed(iter/s)": 0.671922 }, { "epoch": 2.3795038772974593, "grad_norm": 4.117636680603027, "learning_rate": 5.3811407477649045e-05, "loss": 2.1602678298950195, "memory(GiB)": 72.85, "step": 55540, "token_acc": 0.5171232876712328, "train_speed(iter/s)": 0.671894 }, { "epoch": 2.3797180926267085, "grad_norm": 4.7428741455078125, "learning_rate": 5.3804697251035184e-05, "loss": 2.5268014907836913, "memory(GiB)": 72.85, "step": 55545, "token_acc": 0.4940119760479042, "train_speed(iter/s)": 0.671914 }, { "epoch": 2.3799323079559573, "grad_norm": 6.295299053192139, "learning_rate": 5.379798695549566e-05, "loss": 2.330875778198242, "memory(GiB)": 72.85, "step": 55550, "token_acc": 0.5111111111111111, "train_speed(iter/s)": 0.671911 }, { "epoch": 2.380146523285206, "grad_norm": 8.54139232635498, "learning_rate": 5.379127659115204e-05, "loss": 2.283384323120117, "memory(GiB)": 72.85, "step": 55555, "token_acc": 0.49056603773584906, "train_speed(iter/s)": 0.6719 }, { "epoch": 2.3803607386144554, "grad_norm": 5.992025375366211, "learning_rate": 5.378456615812589e-05, "loss": 2.3723304748535154, "memory(GiB)": 72.85, "step": 55560, "token_acc": 0.5144927536231884, "train_speed(iter/s)": 0.671915 }, { "epoch": 2.380574953943704, "grad_norm": 5.656060218811035, "learning_rate": 5.377785565653878e-05, "loss": 2.1152114868164062, "memory(GiB)": 72.85, "step": 55565, "token_acc": 0.5401785714285714, "train_speed(iter/s)": 0.671914 }, { "epoch": 2.380789169272953, "grad_norm": 6.104208946228027, "learning_rate": 5.377114508651225e-05, "loss": 2.2345041275024413, "memory(GiB)": 72.85, "step": 55570, "token_acc": 0.5017301038062284, "train_speed(iter/s)": 0.671918 }, { "epoch": 2.3810033846022023, "grad_norm": 4.63207483291626, "learning_rate": 5.3764434448167913e-05, "loss": 2.2477773666381835, "memory(GiB)": 72.85, "step": 55575, "token_acc": 0.5149700598802395, "train_speed(iter/s)": 0.671911 }, { "epoch": 2.381217599931451, "grad_norm": 6.806500434875488, "learning_rate": 5.3757723741627285e-05, "loss": 2.143586349487305, "memory(GiB)": 72.85, "step": 55580, "token_acc": 0.5128205128205128, "train_speed(iter/s)": 0.67191 }, { "epoch": 2.3814318152607, "grad_norm": 5.608563423156738, "learning_rate": 5.375101296701199e-05, "loss": 2.1513952255249023, "memory(GiB)": 72.85, "step": 55585, "token_acc": 0.5483870967741935, "train_speed(iter/s)": 0.671917 }, { "epoch": 2.381646030589949, "grad_norm": 4.573489189147949, "learning_rate": 5.374430212444358e-05, "loss": 2.3641277313232423, "memory(GiB)": 72.85, "step": 55590, "token_acc": 0.47318611987381703, "train_speed(iter/s)": 0.671909 }, { "epoch": 2.381860245919198, "grad_norm": 3.9699220657348633, "learning_rate": 5.3737591214043614e-05, "loss": 2.333317756652832, "memory(GiB)": 72.85, "step": 55595, "token_acc": 0.4929078014184397, "train_speed(iter/s)": 0.671921 }, { "epoch": 2.382074461248447, "grad_norm": 4.197483062744141, "learning_rate": 5.3730880235933664e-05, "loss": 2.444276237487793, "memory(GiB)": 72.85, "step": 55600, "token_acc": 0.47703180212014135, "train_speed(iter/s)": 0.671924 }, { "epoch": 2.382288676577696, "grad_norm": 4.915787696838379, "learning_rate": 5.372416919023535e-05, "loss": 2.155303955078125, "memory(GiB)": 72.85, "step": 55605, "token_acc": 0.5362776025236593, "train_speed(iter/s)": 0.671922 }, { "epoch": 2.382502891906945, "grad_norm": 4.345069408416748, "learning_rate": 5.371745807707019e-05, "loss": 2.2617765426635743, "memory(GiB)": 72.85, "step": 55610, "token_acc": 0.5016181229773463, "train_speed(iter/s)": 0.671905 }, { "epoch": 2.3827171072361937, "grad_norm": 4.886369228363037, "learning_rate": 5.3710746896559804e-05, "loss": 2.533954048156738, "memory(GiB)": 72.85, "step": 55615, "token_acc": 0.4774193548387097, "train_speed(iter/s)": 0.671922 }, { "epoch": 2.382931322565443, "grad_norm": 5.418209552764893, "learning_rate": 5.3704035648825756e-05, "loss": 2.2396780014038087, "memory(GiB)": 72.85, "step": 55620, "token_acc": 0.5085324232081911, "train_speed(iter/s)": 0.671932 }, { "epoch": 2.3831455378946917, "grad_norm": 5.8119611740112305, "learning_rate": 5.369732433398963e-05, "loss": 2.285455322265625, "memory(GiB)": 72.85, "step": 55625, "token_acc": 0.5019607843137255, "train_speed(iter/s)": 0.671937 }, { "epoch": 2.3833597532239406, "grad_norm": 5.5088043212890625, "learning_rate": 5.3690612952173e-05, "loss": 2.2983001708984374, "memory(GiB)": 72.85, "step": 55630, "token_acc": 0.4938650306748466, "train_speed(iter/s)": 0.671939 }, { "epoch": 2.38357396855319, "grad_norm": 4.155125141143799, "learning_rate": 5.368390150349745e-05, "loss": 2.0316226959228514, "memory(GiB)": 72.85, "step": 55635, "token_acc": 0.5400696864111498, "train_speed(iter/s)": 0.671943 }, { "epoch": 2.3837881838824386, "grad_norm": 6.2484211921691895, "learning_rate": 5.3677189988084565e-05, "loss": 2.3625715255737303, "memory(GiB)": 72.85, "step": 55640, "token_acc": 0.5071428571428571, "train_speed(iter/s)": 0.671952 }, { "epoch": 2.3840023992116874, "grad_norm": 4.529323101043701, "learning_rate": 5.367047840605595e-05, "loss": 2.256649208068848, "memory(GiB)": 72.85, "step": 55645, "token_acc": 0.5422740524781341, "train_speed(iter/s)": 0.671964 }, { "epoch": 2.3842166145409367, "grad_norm": 4.497716426849365, "learning_rate": 5.3663766757533176e-05, "loss": 2.1765720367431642, "memory(GiB)": 72.85, "step": 55650, "token_acc": 0.49544072948328266, "train_speed(iter/s)": 0.671948 }, { "epoch": 2.3844308298701855, "grad_norm": 4.511884689331055, "learning_rate": 5.365705504263782e-05, "loss": 2.191769027709961, "memory(GiB)": 72.85, "step": 55655, "token_acc": 0.5434782608695652, "train_speed(iter/s)": 0.671942 }, { "epoch": 2.3846450451994343, "grad_norm": 4.960546016693115, "learning_rate": 5.365034326149147e-05, "loss": 2.0158197402954103, "memory(GiB)": 72.85, "step": 55660, "token_acc": 0.5124555160142349, "train_speed(iter/s)": 0.671937 }, { "epoch": 2.3848592605286836, "grad_norm": 3.995941400527954, "learning_rate": 5.364363141421575e-05, "loss": 2.242350196838379, "memory(GiB)": 72.85, "step": 55665, "token_acc": 0.467680608365019, "train_speed(iter/s)": 0.671952 }, { "epoch": 2.3850734758579324, "grad_norm": 4.970623970031738, "learning_rate": 5.363691950093223e-05, "loss": 1.9960643768310546, "memory(GiB)": 72.85, "step": 55670, "token_acc": 0.55859375, "train_speed(iter/s)": 0.671958 }, { "epoch": 2.385287691187181, "grad_norm": 4.8512067794799805, "learning_rate": 5.3630207521762476e-05, "loss": 2.233750343322754, "memory(GiB)": 72.85, "step": 55675, "token_acc": 0.5, "train_speed(iter/s)": 0.671955 }, { "epoch": 2.3855019065164305, "grad_norm": 4.976500988006592, "learning_rate": 5.362349547682812e-05, "loss": 2.375583839416504, "memory(GiB)": 72.85, "step": 55680, "token_acc": 0.48338368580060426, "train_speed(iter/s)": 0.671948 }, { "epoch": 2.3857161218456793, "grad_norm": 6.053844928741455, "learning_rate": 5.361678336625074e-05, "loss": 1.958207130432129, "memory(GiB)": 72.85, "step": 55685, "token_acc": 0.5590551181102362, "train_speed(iter/s)": 0.671945 }, { "epoch": 2.385930337174928, "grad_norm": 5.33388090133667, "learning_rate": 5.3610071190151955e-05, "loss": 1.8630100250244142, "memory(GiB)": 72.85, "step": 55690, "token_acc": 0.5637065637065637, "train_speed(iter/s)": 0.671956 }, { "epoch": 2.3861445525041773, "grad_norm": 4.610386371612549, "learning_rate": 5.360335894865333e-05, "loss": 2.193129539489746, "memory(GiB)": 72.85, "step": 55695, "token_acc": 0.5389830508474577, "train_speed(iter/s)": 0.671969 }, { "epoch": 2.386358767833426, "grad_norm": 3.5493268966674805, "learning_rate": 5.359664664187648e-05, "loss": 2.134661102294922, "memory(GiB)": 72.85, "step": 55700, "token_acc": 0.5335570469798657, "train_speed(iter/s)": 0.671977 }, { "epoch": 2.386572983162675, "grad_norm": 5.268110275268555, "learning_rate": 5.358993426994301e-05, "loss": 2.200443077087402, "memory(GiB)": 72.85, "step": 55705, "token_acc": 0.5115384615384615, "train_speed(iter/s)": 0.671985 }, { "epoch": 2.3867871984919242, "grad_norm": 4.222409248352051, "learning_rate": 5.3583221832974494e-05, "loss": 2.140772247314453, "memory(GiB)": 72.85, "step": 55710, "token_acc": 0.5297619047619048, "train_speed(iter/s)": 0.671993 }, { "epoch": 2.387001413821173, "grad_norm": 4.194869518280029, "learning_rate": 5.3576509331092564e-05, "loss": 2.4838356018066405, "memory(GiB)": 72.85, "step": 55715, "token_acc": 0.49595687331536387, "train_speed(iter/s)": 0.671986 }, { "epoch": 2.387215629150422, "grad_norm": 5.2567362785339355, "learning_rate": 5.356979676441882e-05, "loss": 2.2252885818481447, "memory(GiB)": 72.85, "step": 55720, "token_acc": 0.49491525423728816, "train_speed(iter/s)": 0.671988 }, { "epoch": 2.387429844479671, "grad_norm": 5.778426170349121, "learning_rate": 5.356308413307485e-05, "loss": 2.0347179412841796, "memory(GiB)": 72.85, "step": 55725, "token_acc": 0.5378486055776892, "train_speed(iter/s)": 0.671994 }, { "epoch": 2.38764405980892, "grad_norm": 4.067694187164307, "learning_rate": 5.355637143718226e-05, "loss": 2.262858581542969, "memory(GiB)": 72.85, "step": 55730, "token_acc": 0.5077519379844961, "train_speed(iter/s)": 0.672002 }, { "epoch": 2.3878582751381687, "grad_norm": 4.575268745422363, "learning_rate": 5.354965867686269e-05, "loss": 2.3515005111694336, "memory(GiB)": 72.85, "step": 55735, "token_acc": 0.5143769968051118, "train_speed(iter/s)": 0.67201 }, { "epoch": 2.388072490467418, "grad_norm": 4.135577201843262, "learning_rate": 5.35429458522377e-05, "loss": 2.2986833572387697, "memory(GiB)": 72.85, "step": 55740, "token_acc": 0.4627450980392157, "train_speed(iter/s)": 0.672012 }, { "epoch": 2.388286705796667, "grad_norm": 7.310564041137695, "learning_rate": 5.3536232963428933e-05, "loss": 2.4847965240478516, "memory(GiB)": 72.85, "step": 55745, "token_acc": 0.4927536231884058, "train_speed(iter/s)": 0.672005 }, { "epoch": 2.3885009211259156, "grad_norm": 3.9597842693328857, "learning_rate": 5.3529520010557986e-05, "loss": 2.2059850692749023, "memory(GiB)": 72.85, "step": 55750, "token_acc": 0.5086505190311419, "train_speed(iter/s)": 0.671997 }, { "epoch": 2.388715136455165, "grad_norm": 6.389488220214844, "learning_rate": 5.3522806993746465e-05, "loss": 2.309844398498535, "memory(GiB)": 72.85, "step": 55755, "token_acc": 0.5418326693227091, "train_speed(iter/s)": 0.672005 }, { "epoch": 2.3889293517844137, "grad_norm": 6.836005687713623, "learning_rate": 5.3516093913116006e-05, "loss": 2.347844886779785, "memory(GiB)": 72.85, "step": 55760, "token_acc": 0.49110320284697506, "train_speed(iter/s)": 0.671997 }, { "epoch": 2.3891435671136625, "grad_norm": 6.578225135803223, "learning_rate": 5.350938076878821e-05, "loss": 2.1483301162719726, "memory(GiB)": 72.85, "step": 55765, "token_acc": 0.5121951219512195, "train_speed(iter/s)": 0.67198 }, { "epoch": 2.3893577824429117, "grad_norm": 4.189550399780273, "learning_rate": 5.350266756088467e-05, "loss": 1.959469985961914, "memory(GiB)": 72.85, "step": 55770, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.671976 }, { "epoch": 2.3895719977721606, "grad_norm": 6.495180606842041, "learning_rate": 5.349595428952704e-05, "loss": 2.294397163391113, "memory(GiB)": 72.85, "step": 55775, "token_acc": 0.5032679738562091, "train_speed(iter/s)": 0.671963 }, { "epoch": 2.3897862131014094, "grad_norm": 3.8704090118408203, "learning_rate": 5.3489240954836896e-05, "loss": 2.312137413024902, "memory(GiB)": 72.85, "step": 55780, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.671966 }, { "epoch": 2.3900004284306586, "grad_norm": 5.001053333282471, "learning_rate": 5.348252755693589e-05, "loss": 2.307382011413574, "memory(GiB)": 72.85, "step": 55785, "token_acc": 0.5158227848101266, "train_speed(iter/s)": 0.671981 }, { "epoch": 2.3902146437599074, "grad_norm": 5.413167953491211, "learning_rate": 5.347581409594564e-05, "loss": 2.130385971069336, "memory(GiB)": 72.85, "step": 55790, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.671993 }, { "epoch": 2.3904288590891563, "grad_norm": 5.112560749053955, "learning_rate": 5.346910057198775e-05, "loss": 2.0543516159057615, "memory(GiB)": 72.85, "step": 55795, "token_acc": 0.556390977443609, "train_speed(iter/s)": 0.67199 }, { "epoch": 2.3906430744184055, "grad_norm": 4.517197132110596, "learning_rate": 5.3462386985183845e-05, "loss": 1.7922409057617188, "memory(GiB)": 72.85, "step": 55800, "token_acc": 0.5894308943089431, "train_speed(iter/s)": 0.67199 }, { "epoch": 2.3908572897476543, "grad_norm": 4.7064995765686035, "learning_rate": 5.3455673335655565e-05, "loss": 2.4999584197998046, "memory(GiB)": 72.85, "step": 55805, "token_acc": 0.44668587896253603, "train_speed(iter/s)": 0.671996 }, { "epoch": 2.391071505076903, "grad_norm": 4.614543437957764, "learning_rate": 5.34489596235245e-05, "loss": 2.1386037826538087, "memory(GiB)": 72.85, "step": 55810, "token_acc": 0.5046153846153846, "train_speed(iter/s)": 0.672003 }, { "epoch": 2.3912857204061524, "grad_norm": 4.119831085205078, "learning_rate": 5.344224584891231e-05, "loss": 1.9867380142211915, "memory(GiB)": 72.85, "step": 55815, "token_acc": 0.5436893203883495, "train_speed(iter/s)": 0.671996 }, { "epoch": 2.391499935735401, "grad_norm": 4.974974155426025, "learning_rate": 5.343553201194061e-05, "loss": 1.7159456253051757, "memory(GiB)": 72.85, "step": 55820, "token_acc": 0.5630252100840336, "train_speed(iter/s)": 0.672004 }, { "epoch": 2.39171415106465, "grad_norm": 5.638577938079834, "learning_rate": 5.3428818112731014e-05, "loss": 2.0376220703125, "memory(GiB)": 72.85, "step": 55825, "token_acc": 0.546875, "train_speed(iter/s)": 0.672004 }, { "epoch": 2.3919283663938993, "grad_norm": 7.357227325439453, "learning_rate": 5.342210415140516e-05, "loss": 1.9644304275512696, "memory(GiB)": 72.85, "step": 55830, "token_acc": 0.5527426160337553, "train_speed(iter/s)": 0.672011 }, { "epoch": 2.392142581723148, "grad_norm": 5.6303629875183105, "learning_rate": 5.34153901280847e-05, "loss": 2.3858058929443358, "memory(GiB)": 72.85, "step": 55835, "token_acc": 0.4628099173553719, "train_speed(iter/s)": 0.672008 }, { "epoch": 2.392356797052397, "grad_norm": 8.818201065063477, "learning_rate": 5.3408676042891224e-05, "loss": 2.4326738357543944, "memory(GiB)": 72.85, "step": 55840, "token_acc": 0.4714285714285714, "train_speed(iter/s)": 0.672022 }, { "epoch": 2.392571012381646, "grad_norm": 6.597329139709473, "learning_rate": 5.34019618959464e-05, "loss": 2.3665367126464845, "memory(GiB)": 72.85, "step": 55845, "token_acc": 0.5348837209302325, "train_speed(iter/s)": 0.672022 }, { "epoch": 2.392785227710895, "grad_norm": 5.990808010101318, "learning_rate": 5.339524768737183e-05, "loss": 2.032040596008301, "memory(GiB)": 72.85, "step": 55850, "token_acc": 0.5433070866141733, "train_speed(iter/s)": 0.672023 }, { "epoch": 2.392999443040144, "grad_norm": 5.2059245109558105, "learning_rate": 5.3388533417289164e-05, "loss": 2.140536880493164, "memory(GiB)": 72.85, "step": 55855, "token_acc": 0.5220588235294118, "train_speed(iter/s)": 0.672026 }, { "epoch": 2.393213658369393, "grad_norm": 4.852439880371094, "learning_rate": 5.338181908582004e-05, "loss": 2.0118751525878906, "memory(GiB)": 72.85, "step": 55860, "token_acc": 0.570957095709571, "train_speed(iter/s)": 0.672027 }, { "epoch": 2.393427873698642, "grad_norm": 6.402970314025879, "learning_rate": 5.33751046930861e-05, "loss": 2.4062108993530273, "memory(GiB)": 72.85, "step": 55865, "token_acc": 0.5, "train_speed(iter/s)": 0.672037 }, { "epoch": 2.3936420890278907, "grad_norm": 4.31373405456543, "learning_rate": 5.3368390239208957e-05, "loss": 2.2727298736572266, "memory(GiB)": 72.85, "step": 55870, "token_acc": 0.48579545454545453, "train_speed(iter/s)": 0.672048 }, { "epoch": 2.39385630435714, "grad_norm": 4.537845611572266, "learning_rate": 5.3361675724310266e-05, "loss": 2.527861785888672, "memory(GiB)": 72.85, "step": 55875, "token_acc": 0.48089171974522293, "train_speed(iter/s)": 0.672043 }, { "epoch": 2.3940705196863887, "grad_norm": 4.572492599487305, "learning_rate": 5.335496114851168e-05, "loss": 2.3643543243408205, "memory(GiB)": 72.85, "step": 55880, "token_acc": 0.47928994082840237, "train_speed(iter/s)": 0.672052 }, { "epoch": 2.3942847350156375, "grad_norm": 4.3336052894592285, "learning_rate": 5.334824651193482e-05, "loss": 2.2026039123535157, "memory(GiB)": 72.85, "step": 55885, "token_acc": 0.5658682634730539, "train_speed(iter/s)": 0.672044 }, { "epoch": 2.394498950344887, "grad_norm": 4.7326459884643555, "learning_rate": 5.334153181470133e-05, "loss": 1.925445556640625, "memory(GiB)": 72.85, "step": 55890, "token_acc": 0.5087719298245614, "train_speed(iter/s)": 0.672028 }, { "epoch": 2.3947131656741356, "grad_norm": 4.6437296867370605, "learning_rate": 5.333481705693285e-05, "loss": 2.333606147766113, "memory(GiB)": 72.85, "step": 55895, "token_acc": 0.513986013986014, "train_speed(iter/s)": 0.672032 }, { "epoch": 2.3949273810033844, "grad_norm": 3.322272777557373, "learning_rate": 5.332810223875103e-05, "loss": 2.4045833587646483, "memory(GiB)": 72.85, "step": 55900, "token_acc": 0.47262247838616717, "train_speed(iter/s)": 0.672044 }, { "epoch": 2.3951415963326337, "grad_norm": 4.436462879180908, "learning_rate": 5.332138736027753e-05, "loss": 2.385201644897461, "memory(GiB)": 72.85, "step": 55905, "token_acc": 0.4812680115273775, "train_speed(iter/s)": 0.672053 }, { "epoch": 2.3953558116618825, "grad_norm": 5.9341254234313965, "learning_rate": 5.331467242163398e-05, "loss": 2.152496337890625, "memory(GiB)": 72.85, "step": 55910, "token_acc": 0.548951048951049, "train_speed(iter/s)": 0.672063 }, { "epoch": 2.3955700269911313, "grad_norm": 5.903653144836426, "learning_rate": 5.3307957422942024e-05, "loss": 2.391486930847168, "memory(GiB)": 72.85, "step": 55915, "token_acc": 0.4899328859060403, "train_speed(iter/s)": 0.67207 }, { "epoch": 2.3957842423203806, "grad_norm": 3.482851028442383, "learning_rate": 5.330124236432332e-05, "loss": 2.308917236328125, "memory(GiB)": 72.85, "step": 55920, "token_acc": 0.5049180327868853, "train_speed(iter/s)": 0.672066 }, { "epoch": 2.3959984576496294, "grad_norm": 4.7346696853637695, "learning_rate": 5.329452724589951e-05, "loss": 2.537221145629883, "memory(GiB)": 72.85, "step": 55925, "token_acc": 0.46440677966101696, "train_speed(iter/s)": 0.672076 }, { "epoch": 2.396212672978878, "grad_norm": 4.8049492835998535, "learning_rate": 5.3287812067792256e-05, "loss": 2.0705562591552735, "memory(GiB)": 72.85, "step": 55930, "token_acc": 0.5296296296296297, "train_speed(iter/s)": 0.672076 }, { "epoch": 2.3964268883081274, "grad_norm": 3.9676146507263184, "learning_rate": 5.328109683012319e-05, "loss": 1.9225568771362305, "memory(GiB)": 72.85, "step": 55935, "token_acc": 0.5490909090909091, "train_speed(iter/s)": 0.672087 }, { "epoch": 2.3966411036373763, "grad_norm": 4.811767101287842, "learning_rate": 5.327438153301398e-05, "loss": 2.556716537475586, "memory(GiB)": 72.85, "step": 55940, "token_acc": 0.4967948717948718, "train_speed(iter/s)": 0.67208 }, { "epoch": 2.396855318966625, "grad_norm": 5.107141017913818, "learning_rate": 5.326766617658628e-05, "loss": 2.2671104431152345, "memory(GiB)": 72.85, "step": 55945, "token_acc": 0.5031645569620253, "train_speed(iter/s)": 0.672079 }, { "epoch": 2.3970695342958743, "grad_norm": 3.864389181137085, "learning_rate": 5.326095076096175e-05, "loss": 2.133306884765625, "memory(GiB)": 72.85, "step": 55950, "token_acc": 0.5604838709677419, "train_speed(iter/s)": 0.672067 }, { "epoch": 2.397283749625123, "grad_norm": 5.40172815322876, "learning_rate": 5.325423528626202e-05, "loss": 2.2646438598632814, "memory(GiB)": 72.85, "step": 55955, "token_acc": 0.5255972696245734, "train_speed(iter/s)": 0.672073 }, { "epoch": 2.397497964954372, "grad_norm": 8.179618835449219, "learning_rate": 5.324751975260879e-05, "loss": 2.317808151245117, "memory(GiB)": 72.85, "step": 55960, "token_acc": 0.4918032786885246, "train_speed(iter/s)": 0.672077 }, { "epoch": 2.397712180283621, "grad_norm": 6.135014533996582, "learning_rate": 5.324080416012368e-05, "loss": 2.1375431060791015, "memory(GiB)": 72.85, "step": 55965, "token_acc": 0.5508196721311476, "train_speed(iter/s)": 0.672078 }, { "epoch": 2.39792639561287, "grad_norm": 4.877544403076172, "learning_rate": 5.323408850892835e-05, "loss": 2.363129425048828, "memory(GiB)": 72.85, "step": 55970, "token_acc": 0.509375, "train_speed(iter/s)": 0.672083 }, { "epoch": 2.398140610942119, "grad_norm": 6.104407787322998, "learning_rate": 5.3227372799144484e-05, "loss": 2.1367212295532227, "memory(GiB)": 72.85, "step": 55975, "token_acc": 0.5387596899224806, "train_speed(iter/s)": 0.672088 }, { "epoch": 2.398354826271368, "grad_norm": 3.852177858352661, "learning_rate": 5.322065703089374e-05, "loss": 2.3160589218139647, "memory(GiB)": 72.85, "step": 55980, "token_acc": 0.5135135135135135, "train_speed(iter/s)": 0.672092 }, { "epoch": 2.398569041600617, "grad_norm": 4.331413269042969, "learning_rate": 5.3213941204297755e-05, "loss": 2.3950807571411135, "memory(GiB)": 72.85, "step": 55985, "token_acc": 0.48985507246376814, "train_speed(iter/s)": 0.672098 }, { "epoch": 2.3987832569298657, "grad_norm": 4.408031940460205, "learning_rate": 5.3207225319478225e-05, "loss": 2.326923942565918, "memory(GiB)": 72.85, "step": 55990, "token_acc": 0.48201438848920863, "train_speed(iter/s)": 0.672088 }, { "epoch": 2.398997472259115, "grad_norm": 3.659836769104004, "learning_rate": 5.3200509376556796e-05, "loss": 2.3125839233398438, "memory(GiB)": 72.85, "step": 55995, "token_acc": 0.5166051660516605, "train_speed(iter/s)": 0.672093 }, { "epoch": 2.399211687588364, "grad_norm": 4.50282621383667, "learning_rate": 5.3193793375655134e-05, "loss": 1.9906787872314453, "memory(GiB)": 72.85, "step": 56000, "token_acc": 0.568, "train_speed(iter/s)": 0.672086 }, { "epoch": 2.399211687588364, "eval_loss": 2.2581114768981934, "eval_runtime": 14.9395, "eval_samples_per_second": 6.694, "eval_steps_per_second": 6.694, "eval_token_acc": 0.4777327935222672, "step": 56000 }, { "epoch": 2.3994259029176126, "grad_norm": 5.004374027252197, "learning_rate": 5.3187077316894915e-05, "loss": 2.2224332809448244, "memory(GiB)": 72.85, "step": 56005, "token_acc": 0.48490749756572543, "train_speed(iter/s)": 0.671954 }, { "epoch": 2.399640118246862, "grad_norm": 6.4036102294921875, "learning_rate": 5.318036120039779e-05, "loss": 2.1204177856445314, "memory(GiB)": 72.85, "step": 56010, "token_acc": 0.540084388185654, "train_speed(iter/s)": 0.671946 }, { "epoch": 2.3998543335761107, "grad_norm": 6.032347679138184, "learning_rate": 5.3173645026285436e-05, "loss": 2.281711196899414, "memory(GiB)": 72.85, "step": 56015, "token_acc": 0.5239852398523985, "train_speed(iter/s)": 0.671949 }, { "epoch": 2.4000685489053595, "grad_norm": 4.357480049133301, "learning_rate": 5.316692879467954e-05, "loss": 2.1910120010375977, "memory(GiB)": 72.85, "step": 56020, "token_acc": 0.5347985347985348, "train_speed(iter/s)": 0.671959 }, { "epoch": 2.4002827642346087, "grad_norm": 5.138361930847168, "learning_rate": 5.3160212505701755e-05, "loss": 2.1053049087524416, "memory(GiB)": 72.85, "step": 56025, "token_acc": 0.5328947368421053, "train_speed(iter/s)": 0.671944 }, { "epoch": 2.4004969795638575, "grad_norm": 5.734163284301758, "learning_rate": 5.315349615947375e-05, "loss": 2.167532539367676, "memory(GiB)": 72.85, "step": 56030, "token_acc": 0.5132075471698113, "train_speed(iter/s)": 0.67195 }, { "epoch": 2.4007111948931064, "grad_norm": 4.827051162719727, "learning_rate": 5.3146779756117215e-05, "loss": 2.546617126464844, "memory(GiB)": 72.85, "step": 56035, "token_acc": 0.476056338028169, "train_speed(iter/s)": 0.671964 }, { "epoch": 2.4009254102223556, "grad_norm": 5.2168498039245605, "learning_rate": 5.314006329575379e-05, "loss": 2.260858917236328, "memory(GiB)": 72.85, "step": 56040, "token_acc": 0.5034722222222222, "train_speed(iter/s)": 0.67197 }, { "epoch": 2.4011396255516044, "grad_norm": 4.883511066436768, "learning_rate": 5.313334677850519e-05, "loss": 2.3067062377929686, "memory(GiB)": 72.85, "step": 56045, "token_acc": 0.5112781954887218, "train_speed(iter/s)": 0.671974 }, { "epoch": 2.4013538408808532, "grad_norm": 5.768743515014648, "learning_rate": 5.3126630204493075e-05, "loss": 2.5166439056396483, "memory(GiB)": 72.85, "step": 56050, "token_acc": 0.4738562091503268, "train_speed(iter/s)": 0.671972 }, { "epoch": 2.4015680562101025, "grad_norm": 5.03029727935791, "learning_rate": 5.311991357383912e-05, "loss": 2.339612579345703, "memory(GiB)": 72.85, "step": 56055, "token_acc": 0.4970414201183432, "train_speed(iter/s)": 0.671982 }, { "epoch": 2.4017822715393513, "grad_norm": 5.307229518890381, "learning_rate": 5.3113196886664996e-05, "loss": 2.304330825805664, "memory(GiB)": 72.85, "step": 56060, "token_acc": 0.5122950819672131, "train_speed(iter/s)": 0.671994 }, { "epoch": 2.4019964868686, "grad_norm": 4.678606986999512, "learning_rate": 5.310648014309241e-05, "loss": 2.3753339767456056, "memory(GiB)": 72.85, "step": 56065, "token_acc": 0.5239852398523985, "train_speed(iter/s)": 0.672011 }, { "epoch": 2.4022107021978494, "grad_norm": 6.414790630340576, "learning_rate": 5.309976334324299e-05, "loss": 2.300796127319336, "memory(GiB)": 72.85, "step": 56070, "token_acc": 0.47491638795986624, "train_speed(iter/s)": 0.672013 }, { "epoch": 2.402424917527098, "grad_norm": 6.964176177978516, "learning_rate": 5.309304648723847e-05, "loss": 2.4428432464599608, "memory(GiB)": 72.85, "step": 56075, "token_acc": 0.4924924924924925, "train_speed(iter/s)": 0.672017 }, { "epoch": 2.402639132856347, "grad_norm": 3.744905710220337, "learning_rate": 5.308632957520051e-05, "loss": 2.31579647064209, "memory(GiB)": 72.85, "step": 56080, "token_acc": 0.46827794561933533, "train_speed(iter/s)": 0.672026 }, { "epoch": 2.4028533481855963, "grad_norm": 3.885684013366699, "learning_rate": 5.3079612607250793e-05, "loss": 2.509761428833008, "memory(GiB)": 72.85, "step": 56085, "token_acc": 0.48589341692789967, "train_speed(iter/s)": 0.672025 }, { "epoch": 2.403067563514845, "grad_norm": 4.545266151428223, "learning_rate": 5.307289558351101e-05, "loss": 2.0656196594238283, "memory(GiB)": 72.85, "step": 56090, "token_acc": 0.5266666666666666, "train_speed(iter/s)": 0.672024 }, { "epoch": 2.403281778844094, "grad_norm": 5.8650994300842285, "learning_rate": 5.3066178504102826e-05, "loss": 2.1427244186401366, "memory(GiB)": 72.85, "step": 56095, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.672031 }, { "epoch": 2.403495994173343, "grad_norm": 4.984524250030518, "learning_rate": 5.305946136914796e-05, "loss": 2.429698371887207, "memory(GiB)": 72.85, "step": 56100, "token_acc": 0.5, "train_speed(iter/s)": 0.672022 }, { "epoch": 2.403710209502592, "grad_norm": 4.4669904708862305, "learning_rate": 5.305274417876807e-05, "loss": 2.2608974456787108, "memory(GiB)": 72.85, "step": 56105, "token_acc": 0.47435897435897434, "train_speed(iter/s)": 0.672008 }, { "epoch": 2.4039244248318408, "grad_norm": 7.345481872558594, "learning_rate": 5.304602693308486e-05, "loss": 2.517329978942871, "memory(GiB)": 72.85, "step": 56110, "token_acc": 0.4809688581314879, "train_speed(iter/s)": 0.672015 }, { "epoch": 2.40413864016109, "grad_norm": 4.631346225738525, "learning_rate": 5.3039309632220015e-05, "loss": 2.130625915527344, "memory(GiB)": 72.85, "step": 56115, "token_acc": 0.5387323943661971, "train_speed(iter/s)": 0.672004 }, { "epoch": 2.404352855490339, "grad_norm": 5.168181419372559, "learning_rate": 5.303259227629521e-05, "loss": 2.1102375030517577, "memory(GiB)": 72.85, "step": 56120, "token_acc": 0.5404255319148936, "train_speed(iter/s)": 0.672013 }, { "epoch": 2.4045670708195876, "grad_norm": 7.554594993591309, "learning_rate": 5.302587486543217e-05, "loss": 2.128819465637207, "memory(GiB)": 72.85, "step": 56125, "token_acc": 0.5725190839694656, "train_speed(iter/s)": 0.672004 }, { "epoch": 2.404781286148837, "grad_norm": 4.47348690032959, "learning_rate": 5.301915739975255e-05, "loss": 2.311166000366211, "memory(GiB)": 72.85, "step": 56130, "token_acc": 0.4899328859060403, "train_speed(iter/s)": 0.672006 }, { "epoch": 2.4049955014780857, "grad_norm": 5.011291980743408, "learning_rate": 5.301243987937809e-05, "loss": 2.452890396118164, "memory(GiB)": 72.85, "step": 56135, "token_acc": 0.46176470588235297, "train_speed(iter/s)": 0.672015 }, { "epoch": 2.4052097168073345, "grad_norm": 4.7769551277160645, "learning_rate": 5.300572230443044e-05, "loss": 2.1498653411865236, "memory(GiB)": 72.85, "step": 56140, "token_acc": 0.5296296296296297, "train_speed(iter/s)": 0.672024 }, { "epoch": 2.405423932136584, "grad_norm": 4.726151466369629, "learning_rate": 5.29990046750313e-05, "loss": 2.1635236740112305, "memory(GiB)": 72.85, "step": 56145, "token_acc": 0.524822695035461, "train_speed(iter/s)": 0.672032 }, { "epoch": 2.4056381474658326, "grad_norm": 6.364801406860352, "learning_rate": 5.2992286991302397e-05, "loss": 2.2654870986938476, "memory(GiB)": 72.85, "step": 56150, "token_acc": 0.47770700636942676, "train_speed(iter/s)": 0.672046 }, { "epoch": 2.4058523627950814, "grad_norm": 4.327681064605713, "learning_rate": 5.298556925336539e-05, "loss": 1.8987030029296874, "memory(GiB)": 72.85, "step": 56155, "token_acc": 0.5531914893617021, "train_speed(iter/s)": 0.672049 }, { "epoch": 2.4060665781243307, "grad_norm": 5.764791011810303, "learning_rate": 5.297885146134199e-05, "loss": 2.1277606964111326, "memory(GiB)": 72.85, "step": 56160, "token_acc": 0.5265151515151515, "train_speed(iter/s)": 0.672052 }, { "epoch": 2.4062807934535795, "grad_norm": 5.3418121337890625, "learning_rate": 5.297213361535392e-05, "loss": 2.244291305541992, "memory(GiB)": 72.85, "step": 56165, "token_acc": 0.48013245033112584, "train_speed(iter/s)": 0.672075 }, { "epoch": 2.4064950087828283, "grad_norm": 4.85310697555542, "learning_rate": 5.296541571552287e-05, "loss": 2.191878890991211, "memory(GiB)": 72.85, "step": 56170, "token_acc": 0.5181818181818182, "train_speed(iter/s)": 0.672063 }, { "epoch": 2.4067092241120775, "grad_norm": 5.4947509765625, "learning_rate": 5.295869776197051e-05, "loss": 2.0987857818603515, "memory(GiB)": 72.85, "step": 56175, "token_acc": 0.5, "train_speed(iter/s)": 0.672074 }, { "epoch": 2.4069234394413264, "grad_norm": 5.167087078094482, "learning_rate": 5.2951979754818584e-05, "loss": 2.3768230438232423, "memory(GiB)": 72.85, "step": 56180, "token_acc": 0.5176470588235295, "train_speed(iter/s)": 0.672067 }, { "epoch": 2.407137654770575, "grad_norm": 5.214908123016357, "learning_rate": 5.2945261694188755e-05, "loss": 2.3963592529296873, "memory(GiB)": 72.85, "step": 56185, "token_acc": 0.45918367346938777, "train_speed(iter/s)": 0.672073 }, { "epoch": 2.4073518700998244, "grad_norm": 6.440683841705322, "learning_rate": 5.293854358020276e-05, "loss": 2.4651350021362304, "memory(GiB)": 72.85, "step": 56190, "token_acc": 0.5036764705882353, "train_speed(iter/s)": 0.672088 }, { "epoch": 2.4075660854290732, "grad_norm": 5.818894863128662, "learning_rate": 5.2931825412982286e-05, "loss": 2.474699020385742, "memory(GiB)": 72.85, "step": 56195, "token_acc": 0.5387453874538746, "train_speed(iter/s)": 0.6721 }, { "epoch": 2.407780300758322, "grad_norm": 5.113055229187012, "learning_rate": 5.292510719264905e-05, "loss": 2.027474594116211, "memory(GiB)": 72.85, "step": 56200, "token_acc": 0.565068493150685, "train_speed(iter/s)": 0.672103 }, { "epoch": 2.4079945160875713, "grad_norm": 5.080167770385742, "learning_rate": 5.291838891932475e-05, "loss": 2.4233367919921873, "memory(GiB)": 72.85, "step": 56205, "token_acc": 0.4929078014184397, "train_speed(iter/s)": 0.672107 }, { "epoch": 2.40820873141682, "grad_norm": 4.934305191040039, "learning_rate": 5.29116705931311e-05, "loss": 1.9785001754760743, "memory(GiB)": 72.85, "step": 56210, "token_acc": 0.5708502024291497, "train_speed(iter/s)": 0.672119 }, { "epoch": 2.408422946746069, "grad_norm": 4.022805690765381, "learning_rate": 5.2904952214189795e-05, "loss": 2.1693046569824217, "memory(GiB)": 72.85, "step": 56215, "token_acc": 0.5415162454873647, "train_speed(iter/s)": 0.672132 }, { "epoch": 2.408637162075318, "grad_norm": 4.199501037597656, "learning_rate": 5.2898233782622575e-05, "loss": 1.9307140350341796, "memory(GiB)": 72.85, "step": 56220, "token_acc": 0.5782312925170068, "train_speed(iter/s)": 0.672141 }, { "epoch": 2.408851377404567, "grad_norm": 4.847868919372559, "learning_rate": 5.289151529855112e-05, "loss": 2.209427833557129, "memory(GiB)": 72.85, "step": 56225, "token_acc": 0.527027027027027, "train_speed(iter/s)": 0.672135 }, { "epoch": 2.409065592733816, "grad_norm": 4.1973676681518555, "learning_rate": 5.288479676209715e-05, "loss": 2.1943836212158203, "memory(GiB)": 72.85, "step": 56230, "token_acc": 0.48742138364779874, "train_speed(iter/s)": 0.672139 }, { "epoch": 2.409279808063065, "grad_norm": 5.320876121520996, "learning_rate": 5.287807817338237e-05, "loss": 2.3762731552124023, "memory(GiB)": 72.85, "step": 56235, "token_acc": 0.4948453608247423, "train_speed(iter/s)": 0.67215 }, { "epoch": 2.409494023392314, "grad_norm": 4.743624210357666, "learning_rate": 5.287135953252853e-05, "loss": 1.879014778137207, "memory(GiB)": 72.85, "step": 56240, "token_acc": 0.5423076923076923, "train_speed(iter/s)": 0.672153 }, { "epoch": 2.4097082387215627, "grad_norm": 4.854781150817871, "learning_rate": 5.28646408396573e-05, "loss": 2.286845588684082, "memory(GiB)": 72.85, "step": 56245, "token_acc": 0.5296442687747036, "train_speed(iter/s)": 0.672156 }, { "epoch": 2.409922454050812, "grad_norm": 3.8984944820404053, "learning_rate": 5.285792209489042e-05, "loss": 2.0661121368408204, "memory(GiB)": 72.85, "step": 56250, "token_acc": 0.5605095541401274, "train_speed(iter/s)": 0.672179 }, { "epoch": 2.4101366693800608, "grad_norm": 5.822908878326416, "learning_rate": 5.285120329834962e-05, "loss": 2.2252105712890624, "memory(GiB)": 72.85, "step": 56255, "token_acc": 0.4980694980694981, "train_speed(iter/s)": 0.672197 }, { "epoch": 2.4103508847093096, "grad_norm": 4.544365882873535, "learning_rate": 5.284448445015656e-05, "loss": 2.1902084350585938, "memory(GiB)": 72.85, "step": 56260, "token_acc": 0.5267175572519084, "train_speed(iter/s)": 0.6722 }, { "epoch": 2.410565100038559, "grad_norm": 7.19244384765625, "learning_rate": 5.283776555043302e-05, "loss": 2.5088272094726562, "memory(GiB)": 72.85, "step": 56265, "token_acc": 0.45675675675675675, "train_speed(iter/s)": 0.672205 }, { "epoch": 2.4107793153678077, "grad_norm": 6.1480326652526855, "learning_rate": 5.283104659930068e-05, "loss": 2.4804712295532227, "memory(GiB)": 72.85, "step": 56270, "token_acc": 0.5180327868852459, "train_speed(iter/s)": 0.672211 }, { "epoch": 2.4109935306970565, "grad_norm": 3.802931785583496, "learning_rate": 5.2824327596881284e-05, "loss": 2.2953548431396484, "memory(GiB)": 72.85, "step": 56275, "token_acc": 0.5015015015015015, "train_speed(iter/s)": 0.672215 }, { "epoch": 2.4112077460263057, "grad_norm": 5.424661159515381, "learning_rate": 5.281760854329655e-05, "loss": 2.170071029663086, "memory(GiB)": 72.85, "step": 56280, "token_acc": 0.5181518151815182, "train_speed(iter/s)": 0.672214 }, { "epoch": 2.4114219613555545, "grad_norm": 3.978670835494995, "learning_rate": 5.281088943866819e-05, "loss": 2.182229232788086, "memory(GiB)": 72.85, "step": 56285, "token_acc": 0.5031645569620253, "train_speed(iter/s)": 0.672226 }, { "epoch": 2.4116361766848033, "grad_norm": 5.370726585388184, "learning_rate": 5.2804170283117926e-05, "loss": 2.456431579589844, "memory(GiB)": 72.85, "step": 56290, "token_acc": 0.46779661016949153, "train_speed(iter/s)": 0.672229 }, { "epoch": 2.4118503920140526, "grad_norm": 6.254583835601807, "learning_rate": 5.279745107676749e-05, "loss": 2.392455291748047, "memory(GiB)": 72.85, "step": 56295, "token_acc": 0.498220640569395, "train_speed(iter/s)": 0.672225 }, { "epoch": 2.4120646073433014, "grad_norm": 3.9399731159210205, "learning_rate": 5.279073181973861e-05, "loss": 2.159857749938965, "memory(GiB)": 72.85, "step": 56300, "token_acc": 0.5639097744360902, "train_speed(iter/s)": 0.672226 }, { "epoch": 2.4122788226725502, "grad_norm": 5.291172981262207, "learning_rate": 5.2784012512152984e-05, "loss": 2.446253776550293, "memory(GiB)": 72.85, "step": 56305, "token_acc": 0.48184818481848185, "train_speed(iter/s)": 0.672233 }, { "epoch": 2.4124930380017995, "grad_norm": 3.712893009185791, "learning_rate": 5.277729315413238e-05, "loss": 2.211051368713379, "memory(GiB)": 72.85, "step": 56310, "token_acc": 0.5335365853658537, "train_speed(iter/s)": 0.672222 }, { "epoch": 2.4127072533310483, "grad_norm": 4.428051948547363, "learning_rate": 5.27705737457985e-05, "loss": 1.9751152038574218, "memory(GiB)": 72.85, "step": 56315, "token_acc": 0.5487364620938628, "train_speed(iter/s)": 0.67222 }, { "epoch": 2.412921468660297, "grad_norm": 4.007524490356445, "learning_rate": 5.276385428727307e-05, "loss": 2.2602611541748048, "memory(GiB)": 72.85, "step": 56320, "token_acc": 0.49201277955271566, "train_speed(iter/s)": 0.672206 }, { "epoch": 2.4131356839895464, "grad_norm": 5.207219123840332, "learning_rate": 5.275713477867785e-05, "loss": 2.0259115219116213, "memory(GiB)": 72.85, "step": 56325, "token_acc": 0.5544871794871795, "train_speed(iter/s)": 0.672207 }, { "epoch": 2.413349899318795, "grad_norm": 4.527654647827148, "learning_rate": 5.275041522013452e-05, "loss": 2.255393218994141, "memory(GiB)": 72.85, "step": 56330, "token_acc": 0.5037037037037037, "train_speed(iter/s)": 0.672216 }, { "epoch": 2.413564114648044, "grad_norm": 5.174074172973633, "learning_rate": 5.274369561176485e-05, "loss": 2.4072948455810548, "memory(GiB)": 72.85, "step": 56335, "token_acc": 0.4923547400611621, "train_speed(iter/s)": 0.672209 }, { "epoch": 2.4137783299772932, "grad_norm": 4.234846115112305, "learning_rate": 5.273697595369056e-05, "loss": 1.7406579971313476, "memory(GiB)": 72.85, "step": 56340, "token_acc": 0.6115384615384616, "train_speed(iter/s)": 0.672209 }, { "epoch": 2.413992545306542, "grad_norm": 4.235485076904297, "learning_rate": 5.2730256246033396e-05, "loss": 2.0638614654541017, "memory(GiB)": 72.85, "step": 56345, "token_acc": 0.5514018691588785, "train_speed(iter/s)": 0.672206 }, { "epoch": 2.414206760635791, "grad_norm": 4.80652379989624, "learning_rate": 5.272353648891505e-05, "loss": 2.187723922729492, "memory(GiB)": 72.85, "step": 56350, "token_acc": 0.5183823529411765, "train_speed(iter/s)": 0.67221 }, { "epoch": 2.41442097596504, "grad_norm": 4.894373416900635, "learning_rate": 5.2716816682457305e-05, "loss": 2.2463403701782227, "memory(GiB)": 72.85, "step": 56355, "token_acc": 0.49508196721311476, "train_speed(iter/s)": 0.672212 }, { "epoch": 2.414635191294289, "grad_norm": 3.9849767684936523, "learning_rate": 5.271009682678187e-05, "loss": 2.3733440399169923, "memory(GiB)": 72.85, "step": 56360, "token_acc": 0.47023809523809523, "train_speed(iter/s)": 0.672228 }, { "epoch": 2.4148494066235378, "grad_norm": 4.836683750152588, "learning_rate": 5.2703376922010496e-05, "loss": 2.2019750595092775, "memory(GiB)": 72.85, "step": 56365, "token_acc": 0.506896551724138, "train_speed(iter/s)": 0.672235 }, { "epoch": 2.415063621952787, "grad_norm": 4.680615425109863, "learning_rate": 5.269665696826491e-05, "loss": 2.377886199951172, "memory(GiB)": 72.85, "step": 56370, "token_acc": 0.5266272189349113, "train_speed(iter/s)": 0.672245 }, { "epoch": 2.415277837282036, "grad_norm": 3.8137755393981934, "learning_rate": 5.2689936965666854e-05, "loss": 2.0976497650146486, "memory(GiB)": 72.85, "step": 56375, "token_acc": 0.5148514851485149, "train_speed(iter/s)": 0.672265 }, { "epoch": 2.4154920526112846, "grad_norm": 3.603773832321167, "learning_rate": 5.2683216914338064e-05, "loss": 2.37313289642334, "memory(GiB)": 72.85, "step": 56380, "token_acc": 0.5192878338278932, "train_speed(iter/s)": 0.672263 }, { "epoch": 2.415706267940534, "grad_norm": 4.373320579528809, "learning_rate": 5.267649681440029e-05, "loss": 2.267717742919922, "memory(GiB)": 72.85, "step": 56385, "token_acc": 0.5241635687732342, "train_speed(iter/s)": 0.672266 }, { "epoch": 2.4159204832697827, "grad_norm": 4.660749435424805, "learning_rate": 5.2669776665975246e-05, "loss": 2.1735580444335936, "memory(GiB)": 72.85, "step": 56390, "token_acc": 0.5299401197604791, "train_speed(iter/s)": 0.672274 }, { "epoch": 2.4161346985990315, "grad_norm": 5.379993915557861, "learning_rate": 5.2663056469184716e-05, "loss": 2.3440656661987305, "memory(GiB)": 72.85, "step": 56395, "token_acc": 0.5074074074074074, "train_speed(iter/s)": 0.672267 }, { "epoch": 2.4163489139282808, "grad_norm": 5.608449459075928, "learning_rate": 5.2656336224150415e-05, "loss": 1.9384464263916015, "memory(GiB)": 72.85, "step": 56400, "token_acc": 0.5909090909090909, "train_speed(iter/s)": 0.672264 }, { "epoch": 2.4165631292575296, "grad_norm": 5.615568161010742, "learning_rate": 5.264961593099408e-05, "loss": 2.544936943054199, "memory(GiB)": 72.85, "step": 56405, "token_acc": 0.47701149425287354, "train_speed(iter/s)": 0.672272 }, { "epoch": 2.4167773445867784, "grad_norm": 3.917375326156616, "learning_rate": 5.264289558983748e-05, "loss": 1.8208351135253906, "memory(GiB)": 72.85, "step": 56410, "token_acc": 0.5925925925925926, "train_speed(iter/s)": 0.672259 }, { "epoch": 2.4169915599160277, "grad_norm": 5.276121616363525, "learning_rate": 5.263617520080233e-05, "loss": 2.2708595275878904, "memory(GiB)": 72.85, "step": 56415, "token_acc": 0.4625, "train_speed(iter/s)": 0.67226 }, { "epoch": 2.4172057752452765, "grad_norm": 6.367427349090576, "learning_rate": 5.26294547640104e-05, "loss": 2.840862846374512, "memory(GiB)": 72.85, "step": 56420, "token_acc": 0.43478260869565216, "train_speed(iter/s)": 0.672261 }, { "epoch": 2.4174199905745253, "grad_norm": 5.8059492111206055, "learning_rate": 5.262273427958344e-05, "loss": 2.151096153259277, "memory(GiB)": 72.85, "step": 56425, "token_acc": 0.5114503816793893, "train_speed(iter/s)": 0.67226 }, { "epoch": 2.4176342059037745, "grad_norm": 6.775182247161865, "learning_rate": 5.261601374764319e-05, "loss": 2.4195428848266602, "memory(GiB)": 72.85, "step": 56430, "token_acc": 0.46579804560260585, "train_speed(iter/s)": 0.672253 }, { "epoch": 2.4178484212330233, "grad_norm": 4.406580924987793, "learning_rate": 5.260929316831138e-05, "loss": 2.1994045257568358, "memory(GiB)": 72.85, "step": 56435, "token_acc": 0.5347985347985348, "train_speed(iter/s)": 0.672256 }, { "epoch": 2.418062636562272, "grad_norm": 4.2110114097595215, "learning_rate": 5.260257254170979e-05, "loss": 1.9212434768676758, "memory(GiB)": 72.85, "step": 56440, "token_acc": 0.5581395348837209, "train_speed(iter/s)": 0.672253 }, { "epoch": 2.4182768518915214, "grad_norm": 5.652708530426025, "learning_rate": 5.2595851867960165e-05, "loss": 2.2964061737060546, "memory(GiB)": 72.85, "step": 56445, "token_acc": 0.46686746987951805, "train_speed(iter/s)": 0.672257 }, { "epoch": 2.4184910672207702, "grad_norm": 5.631543159484863, "learning_rate": 5.258913114718421e-05, "loss": 2.350720977783203, "memory(GiB)": 72.85, "step": 56450, "token_acc": 0.4813753581661891, "train_speed(iter/s)": 0.672251 }, { "epoch": 2.418705282550019, "grad_norm": 4.303125381469727, "learning_rate": 5.2582410379503755e-05, "loss": 2.338216209411621, "memory(GiB)": 72.85, "step": 56455, "token_acc": 0.4645161290322581, "train_speed(iter/s)": 0.672246 }, { "epoch": 2.4189194978792683, "grad_norm": 5.45749568939209, "learning_rate": 5.257568956504051e-05, "loss": 1.872461700439453, "memory(GiB)": 72.85, "step": 56460, "token_acc": 0.5887850467289719, "train_speed(iter/s)": 0.672244 }, { "epoch": 2.419133713208517, "grad_norm": 5.082468509674072, "learning_rate": 5.256896870391621e-05, "loss": 2.184437370300293, "memory(GiB)": 72.85, "step": 56465, "token_acc": 0.5296610169491526, "train_speed(iter/s)": 0.672244 }, { "epoch": 2.419347928537766, "grad_norm": 4.458061695098877, "learning_rate": 5.256224779625265e-05, "loss": 2.2522125244140625, "memory(GiB)": 72.85, "step": 56470, "token_acc": 0.5765765765765766, "train_speed(iter/s)": 0.672243 }, { "epoch": 2.419562143867015, "grad_norm": 4.0775885581970215, "learning_rate": 5.2555526842171554e-05, "loss": 2.075832748413086, "memory(GiB)": 72.85, "step": 56475, "token_acc": 0.5608108108108109, "train_speed(iter/s)": 0.672261 }, { "epoch": 2.419776359196264, "grad_norm": 6.608166217803955, "learning_rate": 5.2548805841794714e-05, "loss": 2.147364044189453, "memory(GiB)": 72.85, "step": 56480, "token_acc": 0.5280898876404494, "train_speed(iter/s)": 0.67227 }, { "epoch": 2.419990574525513, "grad_norm": 5.711330890655518, "learning_rate": 5.254208479524385e-05, "loss": 2.6471206665039064, "memory(GiB)": 72.85, "step": 56485, "token_acc": 0.48242811501597443, "train_speed(iter/s)": 0.672279 }, { "epoch": 2.420204789854762, "grad_norm": 4.659440994262695, "learning_rate": 5.253536370264073e-05, "loss": 2.3113998413085937, "memory(GiB)": 72.85, "step": 56490, "token_acc": 0.5130111524163569, "train_speed(iter/s)": 0.672279 }, { "epoch": 2.420419005184011, "grad_norm": 4.554684638977051, "learning_rate": 5.252864256410712e-05, "loss": 2.3390501022338865, "memory(GiB)": 72.85, "step": 56495, "token_acc": 0.48639455782312924, "train_speed(iter/s)": 0.672273 }, { "epoch": 2.4206332205132597, "grad_norm": 5.4641804695129395, "learning_rate": 5.2521921379764796e-05, "loss": 2.7147178649902344, "memory(GiB)": 72.85, "step": 56500, "token_acc": 0.47719298245614034, "train_speed(iter/s)": 0.672276 }, { "epoch": 2.4206332205132597, "eval_loss": 2.066763162612915, "eval_runtime": 15.756, "eval_samples_per_second": 6.347, "eval_steps_per_second": 6.347, "eval_token_acc": 0.4980443285528031, "step": 56500 }, { "epoch": 2.420847435842509, "grad_norm": 4.590749740600586, "learning_rate": 5.251520014973548e-05, "loss": 2.3085222244262695, "memory(GiB)": 72.85, "step": 56505, "token_acc": 0.4968609865470852, "train_speed(iter/s)": 0.672132 }, { "epoch": 2.4210616511717578, "grad_norm": 4.945006847381592, "learning_rate": 5.250847887414096e-05, "loss": 2.072057342529297, "memory(GiB)": 72.85, "step": 56510, "token_acc": 0.5306122448979592, "train_speed(iter/s)": 0.672135 }, { "epoch": 2.4212758665010066, "grad_norm": 7.234052658081055, "learning_rate": 5.2501757553103e-05, "loss": 2.116799736022949, "memory(GiB)": 72.85, "step": 56515, "token_acc": 0.4768211920529801, "train_speed(iter/s)": 0.67213 }, { "epoch": 2.421490081830256, "grad_norm": 7.383198261260986, "learning_rate": 5.249503618674334e-05, "loss": 2.5330202102661135, "memory(GiB)": 72.85, "step": 56520, "token_acc": 0.48363636363636364, "train_speed(iter/s)": 0.672104 }, { "epoch": 2.4217042971595046, "grad_norm": 5.440727233886719, "learning_rate": 5.2488314775183766e-05, "loss": 2.406786346435547, "memory(GiB)": 72.85, "step": 56525, "token_acc": 0.4782608695652174, "train_speed(iter/s)": 0.672094 }, { "epoch": 2.4219185124887534, "grad_norm": 3.7181508541107178, "learning_rate": 5.248159331854604e-05, "loss": 2.1903192520141603, "memory(GiB)": 72.85, "step": 56530, "token_acc": 0.5159235668789809, "train_speed(iter/s)": 0.672093 }, { "epoch": 2.4221327278180027, "grad_norm": 4.204231262207031, "learning_rate": 5.247487181695191e-05, "loss": 2.0690914154052735, "memory(GiB)": 72.85, "step": 56535, "token_acc": 0.5656934306569343, "train_speed(iter/s)": 0.6721 }, { "epoch": 2.4223469431472515, "grad_norm": 5.745029449462891, "learning_rate": 5.246815027052318e-05, "loss": 2.369127655029297, "memory(GiB)": 72.85, "step": 56540, "token_acc": 0.5239520958083832, "train_speed(iter/s)": 0.672092 }, { "epoch": 2.4225611584765003, "grad_norm": 4.208278179168701, "learning_rate": 5.2461428679381575e-05, "loss": 2.2572423934936525, "memory(GiB)": 72.85, "step": 56545, "token_acc": 0.5016393442622951, "train_speed(iter/s)": 0.672095 }, { "epoch": 2.4227753738057496, "grad_norm": 6.561975002288818, "learning_rate": 5.245470704364888e-05, "loss": 2.2758771896362306, "memory(GiB)": 72.85, "step": 56550, "token_acc": 0.5378787878787878, "train_speed(iter/s)": 0.672099 }, { "epoch": 2.4229895891349984, "grad_norm": 4.7956342697143555, "learning_rate": 5.244798536344687e-05, "loss": 2.1964748382568358, "memory(GiB)": 72.85, "step": 56555, "token_acc": 0.5192878338278932, "train_speed(iter/s)": 0.672103 }, { "epoch": 2.423203804464247, "grad_norm": 6.003749847412109, "learning_rate": 5.2441263638897295e-05, "loss": 2.2781579971313475, "memory(GiB)": 72.85, "step": 56560, "token_acc": 0.49814126394052044, "train_speed(iter/s)": 0.672116 }, { "epoch": 2.4234180197934965, "grad_norm": 5.137377738952637, "learning_rate": 5.2434541870121954e-05, "loss": 2.0580764770507813, "memory(GiB)": 72.85, "step": 56565, "token_acc": 0.5186567164179104, "train_speed(iter/s)": 0.672117 }, { "epoch": 2.4236322351227453, "grad_norm": 4.919804573059082, "learning_rate": 5.24278200572426e-05, "loss": 2.267719268798828, "memory(GiB)": 72.85, "step": 56570, "token_acc": 0.5527272727272727, "train_speed(iter/s)": 0.67213 }, { "epoch": 2.4238464504519945, "grad_norm": 5.8107075691223145, "learning_rate": 5.242109820038101e-05, "loss": 2.3830886840820313, "memory(GiB)": 72.85, "step": 56575, "token_acc": 0.48717948717948717, "train_speed(iter/s)": 0.67214 }, { "epoch": 2.4240606657812434, "grad_norm": 5.513967514038086, "learning_rate": 5.241437629965895e-05, "loss": 2.312916946411133, "memory(GiB)": 72.85, "step": 56580, "token_acc": 0.5174603174603175, "train_speed(iter/s)": 0.67213 }, { "epoch": 2.424274881110492, "grad_norm": 4.160298824310303, "learning_rate": 5.240765435519821e-05, "loss": 2.1875890731811523, "memory(GiB)": 72.85, "step": 56585, "token_acc": 0.5072463768115942, "train_speed(iter/s)": 0.672134 }, { "epoch": 2.4244890964397414, "grad_norm": 5.882996559143066, "learning_rate": 5.2400932367120535e-05, "loss": 2.0037227630615235, "memory(GiB)": 72.85, "step": 56590, "token_acc": 0.5418181818181819, "train_speed(iter/s)": 0.672144 }, { "epoch": 2.4247033117689902, "grad_norm": 4.894522190093994, "learning_rate": 5.239421033554773e-05, "loss": 2.3563161849975587, "memory(GiB)": 72.85, "step": 56595, "token_acc": 0.47388059701492535, "train_speed(iter/s)": 0.672145 }, { "epoch": 2.424917527098239, "grad_norm": 5.344540119171143, "learning_rate": 5.238748826060156e-05, "loss": 2.212078094482422, "memory(GiB)": 72.85, "step": 56600, "token_acc": 0.4847560975609756, "train_speed(iter/s)": 0.67215 }, { "epoch": 2.4251317424274883, "grad_norm": 5.796063423156738, "learning_rate": 5.2380766142403794e-05, "loss": 2.1989372253417967, "memory(GiB)": 72.85, "step": 56605, "token_acc": 0.5399239543726235, "train_speed(iter/s)": 0.672141 }, { "epoch": 2.425345957756737, "grad_norm": 4.620692729949951, "learning_rate": 5.237404398107622e-05, "loss": 2.1632368087768556, "memory(GiB)": 72.85, "step": 56610, "token_acc": 0.549645390070922, "train_speed(iter/s)": 0.672129 }, { "epoch": 2.425560173085986, "grad_norm": 5.079030513763428, "learning_rate": 5.236732177674061e-05, "loss": 2.065398025512695, "memory(GiB)": 72.85, "step": 56615, "token_acc": 0.5326797385620915, "train_speed(iter/s)": 0.672129 }, { "epoch": 2.425774388415235, "grad_norm": 5.1130523681640625, "learning_rate": 5.2360599529518736e-05, "loss": 2.2023103713989256, "memory(GiB)": 72.85, "step": 56620, "token_acc": 0.5370919881305638, "train_speed(iter/s)": 0.672142 }, { "epoch": 2.425988603744484, "grad_norm": 5.0172343254089355, "learning_rate": 5.2353877239532404e-05, "loss": 2.1828895568847657, "memory(GiB)": 72.85, "step": 56625, "token_acc": 0.5432525951557093, "train_speed(iter/s)": 0.672141 }, { "epoch": 2.426202819073733, "grad_norm": 4.936988353729248, "learning_rate": 5.234715490690337e-05, "loss": 2.345752143859863, "memory(GiB)": 72.85, "step": 56630, "token_acc": 0.487012987012987, "train_speed(iter/s)": 0.672154 }, { "epoch": 2.426417034402982, "grad_norm": 3.7752318382263184, "learning_rate": 5.2340432531753426e-05, "loss": 2.0928958892822265, "memory(GiB)": 72.85, "step": 56635, "token_acc": 0.55625, "train_speed(iter/s)": 0.672148 }, { "epoch": 2.426631249732231, "grad_norm": 4.3750128746032715, "learning_rate": 5.233371011420434e-05, "loss": 2.326816749572754, "memory(GiB)": 72.85, "step": 56640, "token_acc": 0.5111111111111111, "train_speed(iter/s)": 0.672153 }, { "epoch": 2.4268454650614797, "grad_norm": 3.978192090988159, "learning_rate": 5.2326987654377926e-05, "loss": 2.392993354797363, "memory(GiB)": 72.85, "step": 56645, "token_acc": 0.4831804281345566, "train_speed(iter/s)": 0.672154 }, { "epoch": 2.427059680390729, "grad_norm": 4.852105617523193, "learning_rate": 5.232026515239593e-05, "loss": 2.0914255142211915, "memory(GiB)": 72.85, "step": 56650, "token_acc": 0.5598455598455598, "train_speed(iter/s)": 0.672161 }, { "epoch": 2.4272738957199778, "grad_norm": 6.0108723640441895, "learning_rate": 5.231354260838016e-05, "loss": 1.9428676605224608, "memory(GiB)": 72.85, "step": 56655, "token_acc": 0.5784313725490197, "train_speed(iter/s)": 0.672156 }, { "epoch": 2.4274881110492266, "grad_norm": 4.428952217102051, "learning_rate": 5.2306820022452404e-05, "loss": 2.1489536285400392, "memory(GiB)": 72.85, "step": 56660, "token_acc": 0.5308219178082192, "train_speed(iter/s)": 0.672157 }, { "epoch": 2.427702326378476, "grad_norm": 4.63079833984375, "learning_rate": 5.2300097394734426e-05, "loss": 2.3167266845703125, "memory(GiB)": 72.85, "step": 56665, "token_acc": 0.5230769230769231, "train_speed(iter/s)": 0.67217 }, { "epoch": 2.4279165417077246, "grad_norm": 4.409576892852783, "learning_rate": 5.229337472534803e-05, "loss": 2.5069007873535156, "memory(GiB)": 72.85, "step": 56670, "token_acc": 0.473972602739726, "train_speed(iter/s)": 0.672173 }, { "epoch": 2.4281307570369735, "grad_norm": 4.756968021392822, "learning_rate": 5.228665201441501e-05, "loss": 2.3082977294921876, "memory(GiB)": 72.85, "step": 56675, "token_acc": 0.543859649122807, "train_speed(iter/s)": 0.67217 }, { "epoch": 2.4283449723662227, "grad_norm": 4.9944963455200195, "learning_rate": 5.227992926205711e-05, "loss": 2.3358104705810545, "memory(GiB)": 72.85, "step": 56680, "token_acc": 0.5297450424929179, "train_speed(iter/s)": 0.672183 }, { "epoch": 2.4285591876954715, "grad_norm": 4.2469706535339355, "learning_rate": 5.22732064683962e-05, "loss": 2.301583480834961, "memory(GiB)": 72.85, "step": 56685, "token_acc": 0.5053763440860215, "train_speed(iter/s)": 0.672189 }, { "epoch": 2.4287734030247203, "grad_norm": 6.016251087188721, "learning_rate": 5.2266483633554e-05, "loss": 2.364543151855469, "memory(GiB)": 72.85, "step": 56690, "token_acc": 0.5038167938931297, "train_speed(iter/s)": 0.672169 }, { "epoch": 2.4289876183539696, "grad_norm": 3.8711273670196533, "learning_rate": 5.225976075765231e-05, "loss": 2.2735347747802734, "memory(GiB)": 72.85, "step": 56695, "token_acc": 0.5034013605442177, "train_speed(iter/s)": 0.672158 }, { "epoch": 2.4292018336832184, "grad_norm": 5.276119232177734, "learning_rate": 5.225303784081296e-05, "loss": 2.3434940338134767, "memory(GiB)": 72.85, "step": 56700, "token_acc": 0.4927007299270073, "train_speed(iter/s)": 0.672166 }, { "epoch": 2.429416049012467, "grad_norm": 4.834980487823486, "learning_rate": 5.2246314883157696e-05, "loss": 2.3498502731323243, "memory(GiB)": 72.85, "step": 56705, "token_acc": 0.49429657794676807, "train_speed(iter/s)": 0.672163 }, { "epoch": 2.4296302643417165, "grad_norm": 4.8202080726623535, "learning_rate": 5.223959188480833e-05, "loss": 2.277886390686035, "memory(GiB)": 72.85, "step": 56710, "token_acc": 0.5142857142857142, "train_speed(iter/s)": 0.672158 }, { "epoch": 2.4298444796709653, "grad_norm": 4.196802139282227, "learning_rate": 5.223286884588666e-05, "loss": 2.143942451477051, "memory(GiB)": 72.85, "step": 56715, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.67217 }, { "epoch": 2.430058695000214, "grad_norm": 4.746835708618164, "learning_rate": 5.222614576651449e-05, "loss": 2.2834415435791016, "memory(GiB)": 72.85, "step": 56720, "token_acc": 0.4865771812080537, "train_speed(iter/s)": 0.672158 }, { "epoch": 2.4302729103294634, "grad_norm": 4.885763645172119, "learning_rate": 5.2219422646813587e-05, "loss": 2.39174861907959, "memory(GiB)": 72.85, "step": 56725, "token_acc": 0.49333333333333335, "train_speed(iter/s)": 0.672178 }, { "epoch": 2.430487125658712, "grad_norm": 5.02217435836792, "learning_rate": 5.221269948690577e-05, "loss": 2.001886177062988, "memory(GiB)": 72.85, "step": 56730, "token_acc": 0.55893536121673, "train_speed(iter/s)": 0.672195 }, { "epoch": 2.430701340987961, "grad_norm": 4.546571731567383, "learning_rate": 5.2205976286912816e-05, "loss": 2.396697425842285, "memory(GiB)": 72.85, "step": 56735, "token_acc": 0.4699248120300752, "train_speed(iter/s)": 0.672185 }, { "epoch": 2.4309155563172102, "grad_norm": 7.2116007804870605, "learning_rate": 5.2199253046956544e-05, "loss": 2.2691928863525392, "memory(GiB)": 72.85, "step": 56740, "token_acc": 0.5483870967741935, "train_speed(iter/s)": 0.672191 }, { "epoch": 2.431129771646459, "grad_norm": 5.978128433227539, "learning_rate": 5.219252976715873e-05, "loss": 2.4344160079956056, "memory(GiB)": 72.85, "step": 56745, "token_acc": 0.49642857142857144, "train_speed(iter/s)": 0.672195 }, { "epoch": 2.431343986975708, "grad_norm": 4.806379318237305, "learning_rate": 5.2185806447641186e-05, "loss": 1.9848878860473633, "memory(GiB)": 72.85, "step": 56750, "token_acc": 0.5390334572490706, "train_speed(iter/s)": 0.672192 }, { "epoch": 2.431558202304957, "grad_norm": 4.8323774337768555, "learning_rate": 5.2179083088525694e-05, "loss": 2.0163768768310546, "memory(GiB)": 72.85, "step": 56755, "token_acc": 0.5226480836236934, "train_speed(iter/s)": 0.672195 }, { "epoch": 2.431772417634206, "grad_norm": 4.3922200202941895, "learning_rate": 5.217235968993409e-05, "loss": 2.1234073638916016, "memory(GiB)": 72.85, "step": 56760, "token_acc": 0.5314465408805031, "train_speed(iter/s)": 0.6722 }, { "epoch": 2.4319866329634547, "grad_norm": 4.708302021026611, "learning_rate": 5.2165636251988134e-05, "loss": 2.560430335998535, "memory(GiB)": 72.85, "step": 56765, "token_acc": 0.48242811501597443, "train_speed(iter/s)": 0.672208 }, { "epoch": 2.432200848292704, "grad_norm": 4.700854301452637, "learning_rate": 5.2158912774809666e-05, "loss": 2.3575069427490236, "memory(GiB)": 72.85, "step": 56770, "token_acc": 0.4887459807073955, "train_speed(iter/s)": 0.672201 }, { "epoch": 2.432415063621953, "grad_norm": 4.430868148803711, "learning_rate": 5.2152189258520454e-05, "loss": 2.1056468963623045, "memory(GiB)": 72.85, "step": 56775, "token_acc": 0.5241635687732342, "train_speed(iter/s)": 0.672202 }, { "epoch": 2.4326292789512016, "grad_norm": 5.64772367477417, "learning_rate": 5.2145465703242316e-05, "loss": 2.3508968353271484, "memory(GiB)": 72.85, "step": 56780, "token_acc": 0.4807121661721068, "train_speed(iter/s)": 0.672208 }, { "epoch": 2.432843494280451, "grad_norm": 3.4441568851470947, "learning_rate": 5.213874210909705e-05, "loss": 2.367068290710449, "memory(GiB)": 72.85, "step": 56785, "token_acc": 0.49122807017543857, "train_speed(iter/s)": 0.672222 }, { "epoch": 2.4330577096096997, "grad_norm": 5.477733135223389, "learning_rate": 5.213201847620648e-05, "loss": 2.2981510162353516, "memory(GiB)": 72.85, "step": 56790, "token_acc": 0.5213414634146342, "train_speed(iter/s)": 0.672218 }, { "epoch": 2.4332719249389485, "grad_norm": 4.191452503204346, "learning_rate": 5.212529480469237e-05, "loss": 2.3038614273071287, "memory(GiB)": 72.85, "step": 56795, "token_acc": 0.5211267605633803, "train_speed(iter/s)": 0.67222 }, { "epoch": 2.4334861402681978, "grad_norm": 3.7468838691711426, "learning_rate": 5.2118571094676586e-05, "loss": 2.0072649002075194, "memory(GiB)": 72.85, "step": 56800, "token_acc": 0.5255474452554745, "train_speed(iter/s)": 0.672217 }, { "epoch": 2.4337003555974466, "grad_norm": 4.948574542999268, "learning_rate": 5.211184734628087e-05, "loss": 2.119561767578125, "memory(GiB)": 72.85, "step": 56805, "token_acc": 0.5163636363636364, "train_speed(iter/s)": 0.672221 }, { "epoch": 2.4339145709266954, "grad_norm": 5.402403831481934, "learning_rate": 5.2105123559627065e-05, "loss": 2.3575408935546873, "memory(GiB)": 72.85, "step": 56810, "token_acc": 0.49333333333333335, "train_speed(iter/s)": 0.672238 }, { "epoch": 2.4341287862559446, "grad_norm": 3.7289936542510986, "learning_rate": 5.209839973483699e-05, "loss": 2.262715530395508, "memory(GiB)": 72.85, "step": 56815, "token_acc": 0.5261324041811847, "train_speed(iter/s)": 0.672243 }, { "epoch": 2.4343430015851935, "grad_norm": 6.151813983917236, "learning_rate": 5.2091675872032406e-05, "loss": 2.3198610305786134, "memory(GiB)": 72.85, "step": 56820, "token_acc": 0.531986531986532, "train_speed(iter/s)": 0.672251 }, { "epoch": 2.4345572169144423, "grad_norm": 4.4994072914123535, "learning_rate": 5.208495197133516e-05, "loss": 2.3956024169921877, "memory(GiB)": 72.85, "step": 56825, "token_acc": 0.47333333333333333, "train_speed(iter/s)": 0.672253 }, { "epoch": 2.4347714322436915, "grad_norm": 4.750564098358154, "learning_rate": 5.207822803286706e-05, "loss": 2.193999481201172, "memory(GiB)": 72.85, "step": 56830, "token_acc": 0.5390946502057613, "train_speed(iter/s)": 0.672255 }, { "epoch": 2.4349856475729403, "grad_norm": 4.767832279205322, "learning_rate": 5.207150405674992e-05, "loss": 2.362788200378418, "memory(GiB)": 72.85, "step": 56835, "token_acc": 0.49240121580547114, "train_speed(iter/s)": 0.672265 }, { "epoch": 2.435199862902189, "grad_norm": 6.444470405578613, "learning_rate": 5.206478004310552e-05, "loss": 2.37154483795166, "memory(GiB)": 72.85, "step": 56840, "token_acc": 0.4713804713804714, "train_speed(iter/s)": 0.672266 }, { "epoch": 2.4354140782314384, "grad_norm": 6.277686595916748, "learning_rate": 5.205805599205571e-05, "loss": 2.4679460525512695, "memory(GiB)": 72.85, "step": 56845, "token_acc": 0.4789156626506024, "train_speed(iter/s)": 0.672263 }, { "epoch": 2.435628293560687, "grad_norm": 4.252838134765625, "learning_rate": 5.205133190372228e-05, "loss": 2.411235809326172, "memory(GiB)": 72.85, "step": 56850, "token_acc": 0.48467966573816157, "train_speed(iter/s)": 0.672258 }, { "epoch": 2.435842508889936, "grad_norm": 4.938588619232178, "learning_rate": 5.204460777822704e-05, "loss": 2.4069896697998048, "memory(GiB)": 72.85, "step": 56855, "token_acc": 0.498220640569395, "train_speed(iter/s)": 0.672263 }, { "epoch": 2.4360567242191853, "grad_norm": 4.931520938873291, "learning_rate": 5.203922845115621e-05, "loss": 2.223288154602051, "memory(GiB)": 72.85, "step": 56860, "token_acc": 0.5229681978798587, "train_speed(iter/s)": 0.672272 }, { "epoch": 2.436270939548434, "grad_norm": 4.513496398925781, "learning_rate": 5.20325042590767e-05, "loss": 2.102462387084961, "memory(GiB)": 72.85, "step": 56865, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.672267 }, { "epoch": 2.436485154877683, "grad_norm": 7.2637786865234375, "learning_rate": 5.2025780030176475e-05, "loss": 2.360449028015137, "memory(GiB)": 72.85, "step": 56870, "token_acc": 0.5261538461538462, "train_speed(iter/s)": 0.672265 }, { "epoch": 2.436699370206932, "grad_norm": 4.404484748840332, "learning_rate": 5.2019055764577354e-05, "loss": 2.490711784362793, "memory(GiB)": 72.85, "step": 56875, "token_acc": 0.45989304812834225, "train_speed(iter/s)": 0.672269 }, { "epoch": 2.436913585536181, "grad_norm": 5.860114097595215, "learning_rate": 5.201233146240112e-05, "loss": 2.248210906982422, "memory(GiB)": 72.85, "step": 56880, "token_acc": 0.5164835164835165, "train_speed(iter/s)": 0.672278 }, { "epoch": 2.43712780086543, "grad_norm": 5.711979866027832, "learning_rate": 5.200560712376963e-05, "loss": 2.264247703552246, "memory(GiB)": 72.85, "step": 56885, "token_acc": 0.5112781954887218, "train_speed(iter/s)": 0.672276 }, { "epoch": 2.437342016194679, "grad_norm": 5.255344390869141, "learning_rate": 5.199888274880467e-05, "loss": 2.0705120086669924, "memory(GiB)": 72.85, "step": 56890, "token_acc": 0.54421768707483, "train_speed(iter/s)": 0.672281 }, { "epoch": 2.437556231523928, "grad_norm": 5.039056777954102, "learning_rate": 5.19921583376281e-05, "loss": 2.117949295043945, "memory(GiB)": 72.85, "step": 56895, "token_acc": 0.5304659498207885, "train_speed(iter/s)": 0.672286 }, { "epoch": 2.4377704468531767, "grad_norm": 5.896584510803223, "learning_rate": 5.198543389036169e-05, "loss": 2.415192985534668, "memory(GiB)": 72.85, "step": 56900, "token_acc": 0.4807017543859649, "train_speed(iter/s)": 0.672273 }, { "epoch": 2.437984662182426, "grad_norm": 5.137190341949463, "learning_rate": 5.197870940712729e-05, "loss": 1.8545400619506835, "memory(GiB)": 72.85, "step": 56905, "token_acc": 0.5604395604395604, "train_speed(iter/s)": 0.672272 }, { "epoch": 2.4381988775116747, "grad_norm": 4.897061824798584, "learning_rate": 5.197198488804671e-05, "loss": 2.3586652755737303, "memory(GiB)": 72.85, "step": 56910, "token_acc": 0.4927536231884058, "train_speed(iter/s)": 0.672272 }, { "epoch": 2.4384130928409236, "grad_norm": 4.512288570404053, "learning_rate": 5.196526033324178e-05, "loss": 2.0910091400146484, "memory(GiB)": 72.85, "step": 56915, "token_acc": 0.52, "train_speed(iter/s)": 0.67228 }, { "epoch": 2.438627308170173, "grad_norm": 8.383810043334961, "learning_rate": 5.1958535742834305e-05, "loss": 2.287078857421875, "memory(GiB)": 72.85, "step": 56920, "token_acc": 0.4826254826254826, "train_speed(iter/s)": 0.672287 }, { "epoch": 2.4388415234994216, "grad_norm": 4.650175094604492, "learning_rate": 5.195181111694612e-05, "loss": 2.451015090942383, "memory(GiB)": 72.85, "step": 56925, "token_acc": 0.4783861671469741, "train_speed(iter/s)": 0.672283 }, { "epoch": 2.4390557388286704, "grad_norm": 4.518404960632324, "learning_rate": 5.1945086455699064e-05, "loss": 2.210656929016113, "memory(GiB)": 72.85, "step": 56930, "token_acc": 0.5391849529780565, "train_speed(iter/s)": 0.672288 }, { "epoch": 2.4392699541579197, "grad_norm": 26.57437515258789, "learning_rate": 5.193836175921493e-05, "loss": 2.4154577255249023, "memory(GiB)": 72.85, "step": 56935, "token_acc": 0.46254071661237783, "train_speed(iter/s)": 0.672291 }, { "epoch": 2.4394841694871685, "grad_norm": 6.283881187438965, "learning_rate": 5.193163702761554e-05, "loss": 2.370545768737793, "memory(GiB)": 72.85, "step": 56940, "token_acc": 0.5202702702702703, "train_speed(iter/s)": 0.67228 }, { "epoch": 2.4396983848164173, "grad_norm": 5.485527038574219, "learning_rate": 5.192491226102275e-05, "loss": 2.4928096771240233, "memory(GiB)": 72.85, "step": 56945, "token_acc": 0.47635135135135137, "train_speed(iter/s)": 0.67227 }, { "epoch": 2.4399126001456666, "grad_norm": 7.3119306564331055, "learning_rate": 5.191818745955837e-05, "loss": 2.2726490020751955, "memory(GiB)": 72.85, "step": 56950, "token_acc": 0.5154185022026432, "train_speed(iter/s)": 0.67227 }, { "epoch": 2.4401268154749154, "grad_norm": 4.430331707000732, "learning_rate": 5.191146262334422e-05, "loss": 2.1605146408081053, "memory(GiB)": 72.85, "step": 56955, "token_acc": 0.5092936802973977, "train_speed(iter/s)": 0.672256 }, { "epoch": 2.440341030804164, "grad_norm": 4.448121070861816, "learning_rate": 5.190473775250213e-05, "loss": 2.1305126190185546, "memory(GiB)": 72.85, "step": 56960, "token_acc": 0.5190311418685121, "train_speed(iter/s)": 0.672253 }, { "epoch": 2.4405552461334135, "grad_norm": 4.360045909881592, "learning_rate": 5.1898012847153934e-05, "loss": 2.2847162246704102, "memory(GiB)": 72.85, "step": 56965, "token_acc": 0.5470383275261324, "train_speed(iter/s)": 0.672259 }, { "epoch": 2.4407694614626623, "grad_norm": 4.62225341796875, "learning_rate": 5.189128790742145e-05, "loss": 2.4652997970581056, "memory(GiB)": 72.85, "step": 56970, "token_acc": 0.5109489051094891, "train_speed(iter/s)": 0.67227 }, { "epoch": 2.440983676791911, "grad_norm": 5.399803161621094, "learning_rate": 5.1884562933426504e-05, "loss": 2.2752269744873046, "memory(GiB)": 72.85, "step": 56975, "token_acc": 0.5, "train_speed(iter/s)": 0.672269 }, { "epoch": 2.4411978921211603, "grad_norm": 5.487820625305176, "learning_rate": 5.187783792529094e-05, "loss": 2.327900505065918, "memory(GiB)": 72.85, "step": 56980, "token_acc": 0.48493975903614456, "train_speed(iter/s)": 0.672256 }, { "epoch": 2.441412107450409, "grad_norm": 3.8409066200256348, "learning_rate": 5.1871112883136586e-05, "loss": 1.9211860656738282, "memory(GiB)": 72.85, "step": 56985, "token_acc": 0.5311355311355311, "train_speed(iter/s)": 0.672255 }, { "epoch": 2.441626322779658, "grad_norm": 4.691295623779297, "learning_rate": 5.186438780708527e-05, "loss": 2.267531394958496, "memory(GiB)": 72.85, "step": 56990, "token_acc": 0.5366568914956011, "train_speed(iter/s)": 0.672249 }, { "epoch": 2.441840538108907, "grad_norm": 3.6430673599243164, "learning_rate": 5.18576626972588e-05, "loss": 2.2201717376708983, "memory(GiB)": 72.85, "step": 56995, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.672254 }, { "epoch": 2.442054753438156, "grad_norm": 10.057222366333008, "learning_rate": 5.1850937553779045e-05, "loss": 2.2396183013916016, "memory(GiB)": 72.85, "step": 57000, "token_acc": 0.5399239543726235, "train_speed(iter/s)": 0.67225 }, { "epoch": 2.442054753438156, "eval_loss": 2.136540651321411, "eval_runtime": 15.8944, "eval_samples_per_second": 6.292, "eval_steps_per_second": 6.292, "eval_token_acc": 0.4869791666666667, "step": 57000 }, { "epoch": 2.442268968767405, "grad_norm": 4.409976482391357, "learning_rate": 5.184421237676781e-05, "loss": 2.2932987213134766, "memory(GiB)": 72.85, "step": 57005, "token_acc": 0.49347014925373134, "train_speed(iter/s)": 0.672102 }, { "epoch": 2.442483184096654, "grad_norm": 4.640114784240723, "learning_rate": 5.1837487166346955e-05, "loss": 2.126254844665527, "memory(GiB)": 72.85, "step": 57010, "token_acc": 0.5437262357414449, "train_speed(iter/s)": 0.672114 }, { "epoch": 2.442697399425903, "grad_norm": 4.38798189163208, "learning_rate": 5.183076192263827e-05, "loss": 2.0832122802734374, "memory(GiB)": 72.85, "step": 57015, "token_acc": 0.5559322033898305, "train_speed(iter/s)": 0.672113 }, { "epoch": 2.4429116147551517, "grad_norm": 4.551623821258545, "learning_rate": 5.1824036645763644e-05, "loss": 2.0901922225952148, "memory(GiB)": 72.85, "step": 57020, "token_acc": 0.5224358974358975, "train_speed(iter/s)": 0.672121 }, { "epoch": 2.443125830084401, "grad_norm": 6.241823196411133, "learning_rate": 5.1817311335844856e-05, "loss": 2.4833446502685548, "memory(GiB)": 72.85, "step": 57025, "token_acc": 0.512280701754386, "train_speed(iter/s)": 0.672104 }, { "epoch": 2.44334004541365, "grad_norm": 12.14802360534668, "learning_rate": 5.181058599300379e-05, "loss": 1.831650733947754, "memory(GiB)": 72.85, "step": 57030, "token_acc": 0.564, "train_speed(iter/s)": 0.672113 }, { "epoch": 2.4435542607428986, "grad_norm": 5.175004005432129, "learning_rate": 5.1803860617362245e-05, "loss": 2.238657760620117, "memory(GiB)": 72.85, "step": 57035, "token_acc": 0.4907749077490775, "train_speed(iter/s)": 0.672121 }, { "epoch": 2.443768476072148, "grad_norm": 4.3431620597839355, "learning_rate": 5.179713520904208e-05, "loss": 2.181981086730957, "memory(GiB)": 72.85, "step": 57040, "token_acc": 0.5275080906148867, "train_speed(iter/s)": 0.672117 }, { "epoch": 2.4439826914013967, "grad_norm": 5.662912845611572, "learning_rate": 5.179040976816513e-05, "loss": 2.1239662170410156, "memory(GiB)": 72.85, "step": 57045, "token_acc": 0.549407114624506, "train_speed(iter/s)": 0.672134 }, { "epoch": 2.4441969067306455, "grad_norm": 5.927151203155518, "learning_rate": 5.178368429485322e-05, "loss": 1.9333354949951171, "memory(GiB)": 72.85, "step": 57050, "token_acc": 0.5868055555555556, "train_speed(iter/s)": 0.672136 }, { "epoch": 2.4444111220598947, "grad_norm": 5.631147384643555, "learning_rate": 5.1776958789228194e-05, "loss": 2.001643943786621, "memory(GiB)": 72.85, "step": 57055, "token_acc": 0.5526315789473685, "train_speed(iter/s)": 0.672145 }, { "epoch": 2.4446253373891436, "grad_norm": 5.054074287414551, "learning_rate": 5.177023325141189e-05, "loss": 2.1922740936279297, "memory(GiB)": 72.85, "step": 57060, "token_acc": 0.4895833333333333, "train_speed(iter/s)": 0.672144 }, { "epoch": 2.4448395527183924, "grad_norm": 5.771401405334473, "learning_rate": 5.176350768152616e-05, "loss": 2.3564804077148436, "memory(GiB)": 72.85, "step": 57065, "token_acc": 0.4629080118694362, "train_speed(iter/s)": 0.672153 }, { "epoch": 2.4450537680476416, "grad_norm": 5.139662265777588, "learning_rate": 5.175678207969282e-05, "loss": 2.501704788208008, "memory(GiB)": 72.85, "step": 57070, "token_acc": 0.49146757679180886, "train_speed(iter/s)": 0.672147 }, { "epoch": 2.4452679833768904, "grad_norm": 6.500164985656738, "learning_rate": 5.1750056446033744e-05, "loss": 2.488055610656738, "memory(GiB)": 72.85, "step": 57075, "token_acc": 0.5055762081784386, "train_speed(iter/s)": 0.672157 }, { "epoch": 2.4454821987061393, "grad_norm": 4.821329116821289, "learning_rate": 5.174333078067074e-05, "loss": 1.7852354049682617, "memory(GiB)": 72.85, "step": 57080, "token_acc": 0.5873015873015873, "train_speed(iter/s)": 0.672166 }, { "epoch": 2.4456964140353885, "grad_norm": 4.574825286865234, "learning_rate": 5.173660508372568e-05, "loss": 2.1762615203857423, "memory(GiB)": 72.85, "step": 57085, "token_acc": 0.48148148148148145, "train_speed(iter/s)": 0.672155 }, { "epoch": 2.4459106293646373, "grad_norm": 6.040863037109375, "learning_rate": 5.1729879355320364e-05, "loss": 2.173963165283203, "memory(GiB)": 72.85, "step": 57090, "token_acc": 0.5127118644067796, "train_speed(iter/s)": 0.672163 }, { "epoch": 2.446124844693886, "grad_norm": 4.332554817199707, "learning_rate": 5.1723153595576666e-05, "loss": 2.4440431594848633, "memory(GiB)": 72.85, "step": 57095, "token_acc": 0.48632218844984804, "train_speed(iter/s)": 0.672154 }, { "epoch": 2.4463390600231354, "grad_norm": 6.1937947273254395, "learning_rate": 5.171642780461644e-05, "loss": 2.245513343811035, "memory(GiB)": 72.85, "step": 57100, "token_acc": 0.4647058823529412, "train_speed(iter/s)": 0.672145 }, { "epoch": 2.446553275352384, "grad_norm": 4.82710599899292, "learning_rate": 5.170970198256151e-05, "loss": 2.2244888305664063, "memory(GiB)": 72.85, "step": 57105, "token_acc": 0.5222222222222223, "train_speed(iter/s)": 0.672142 }, { "epoch": 2.446767490681633, "grad_norm": 5.108072757720947, "learning_rate": 5.170297612953372e-05, "loss": 1.995736312866211, "memory(GiB)": 72.85, "step": 57110, "token_acc": 0.5290519877675841, "train_speed(iter/s)": 0.672126 }, { "epoch": 2.4469817060108823, "grad_norm": 6.349437236785889, "learning_rate": 5.169625024565492e-05, "loss": 2.335348129272461, "memory(GiB)": 72.85, "step": 57115, "token_acc": 0.5379310344827586, "train_speed(iter/s)": 0.672134 }, { "epoch": 2.447195921340131, "grad_norm": 6.0153727531433105, "learning_rate": 5.168952433104697e-05, "loss": 2.3398727416992187, "memory(GiB)": 72.85, "step": 57120, "token_acc": 0.4629080118694362, "train_speed(iter/s)": 0.672126 }, { "epoch": 2.44741013666938, "grad_norm": 5.102292537689209, "learning_rate": 5.1682798385831667e-05, "loss": 2.177182197570801, "memory(GiB)": 72.85, "step": 57125, "token_acc": 0.5093167701863354, "train_speed(iter/s)": 0.672123 }, { "epoch": 2.447624351998629, "grad_norm": 8.037801742553711, "learning_rate": 5.1676072410130915e-05, "loss": 2.2764408111572267, "memory(GiB)": 72.85, "step": 57130, "token_acc": 0.46619217081850534, "train_speed(iter/s)": 0.67213 }, { "epoch": 2.447838567327878, "grad_norm": 3.680744171142578, "learning_rate": 5.166934640406654e-05, "loss": 1.9283769607543946, "memory(GiB)": 72.85, "step": 57135, "token_acc": 0.5469255663430421, "train_speed(iter/s)": 0.67214 }, { "epoch": 2.448052782657127, "grad_norm": 6.755897045135498, "learning_rate": 5.16626203677604e-05, "loss": 2.286522102355957, "memory(GiB)": 72.85, "step": 57140, "token_acc": 0.4610169491525424, "train_speed(iter/s)": 0.67214 }, { "epoch": 2.448266997986376, "grad_norm": 5.0431365966796875, "learning_rate": 5.165589430133432e-05, "loss": 2.1819694519042967, "memory(GiB)": 72.85, "step": 57145, "token_acc": 0.5222929936305732, "train_speed(iter/s)": 0.672143 }, { "epoch": 2.448481213315625, "grad_norm": 5.558614253997803, "learning_rate": 5.164916820491015e-05, "loss": 2.261271095275879, "memory(GiB)": 72.85, "step": 57150, "token_acc": 0.5015873015873016, "train_speed(iter/s)": 0.672151 }, { "epoch": 2.4486954286448737, "grad_norm": 4.368114471435547, "learning_rate": 5.164244207860977e-05, "loss": 2.4048519134521484, "memory(GiB)": 72.85, "step": 57155, "token_acc": 0.5138461538461538, "train_speed(iter/s)": 0.672159 }, { "epoch": 2.448909643974123, "grad_norm": 5.128584384918213, "learning_rate": 5.163571592255499e-05, "loss": 1.8380851745605469, "memory(GiB)": 72.85, "step": 57160, "token_acc": 0.5546558704453441, "train_speed(iter/s)": 0.672152 }, { "epoch": 2.4491238593033717, "grad_norm": 6.004604339599609, "learning_rate": 5.1628989736867706e-05, "loss": 2.4284809112548826, "memory(GiB)": 72.85, "step": 57165, "token_acc": 0.45544554455445546, "train_speed(iter/s)": 0.672151 }, { "epoch": 2.4493380746326205, "grad_norm": 4.658123016357422, "learning_rate": 5.1622263521669724e-05, "loss": 2.245022010803223, "memory(GiB)": 72.85, "step": 57170, "token_acc": 0.5566037735849056, "train_speed(iter/s)": 0.672152 }, { "epoch": 2.44955228996187, "grad_norm": 5.220896244049072, "learning_rate": 5.1615537277082924e-05, "loss": 2.054124641418457, "memory(GiB)": 72.85, "step": 57175, "token_acc": 0.5390334572490706, "train_speed(iter/s)": 0.672157 }, { "epoch": 2.4497665052911186, "grad_norm": 6.497490406036377, "learning_rate": 5.160881100322915e-05, "loss": 2.2335071563720703, "memory(GiB)": 72.85, "step": 57180, "token_acc": 0.535031847133758, "train_speed(iter/s)": 0.672143 }, { "epoch": 2.4499807206203674, "grad_norm": 3.8065826892852783, "learning_rate": 5.160208470023027e-05, "loss": 1.955973243713379, "memory(GiB)": 72.85, "step": 57185, "token_acc": 0.5189873417721519, "train_speed(iter/s)": 0.672144 }, { "epoch": 2.4501949359496167, "grad_norm": 5.134560585021973, "learning_rate": 5.159535836820811e-05, "loss": 2.0270950317382814, "memory(GiB)": 72.85, "step": 57190, "token_acc": 0.5474452554744526, "train_speed(iter/s)": 0.672142 }, { "epoch": 2.4504091512788655, "grad_norm": 4.917340278625488, "learning_rate": 5.1588632007284534e-05, "loss": 2.124761390686035, "memory(GiB)": 72.85, "step": 57195, "token_acc": 0.45454545454545453, "train_speed(iter/s)": 0.672139 }, { "epoch": 2.4506233666081143, "grad_norm": 4.697519302368164, "learning_rate": 5.1581905617581394e-05, "loss": 2.1064510345458984, "memory(GiB)": 72.85, "step": 57200, "token_acc": 0.5208333333333334, "train_speed(iter/s)": 0.672142 }, { "epoch": 2.4508375819373636, "grad_norm": 6.332646369934082, "learning_rate": 5.1575179199220567e-05, "loss": 2.324882698059082, "memory(GiB)": 72.85, "step": 57205, "token_acc": 0.46441947565543074, "train_speed(iter/s)": 0.67214 }, { "epoch": 2.4510517972666124, "grad_norm": 4.5886664390563965, "learning_rate": 5.156845275232387e-05, "loss": 2.4504390716552735, "memory(GiB)": 72.85, "step": 57210, "token_acc": 0.46504559270516715, "train_speed(iter/s)": 0.672152 }, { "epoch": 2.451266012595861, "grad_norm": 4.8910393714904785, "learning_rate": 5.1561726277013204e-05, "loss": 1.8555511474609374, "memory(GiB)": 72.85, "step": 57215, "token_acc": 0.5580524344569289, "train_speed(iter/s)": 0.672161 }, { "epoch": 2.4514802279251104, "grad_norm": 5.371642112731934, "learning_rate": 5.1554999773410396e-05, "loss": 2.3620607376098635, "memory(GiB)": 72.85, "step": 57220, "token_acc": 0.4968944099378882, "train_speed(iter/s)": 0.672146 }, { "epoch": 2.4516944432543593, "grad_norm": 4.273340225219727, "learning_rate": 5.154827324163729e-05, "loss": 2.264200210571289, "memory(GiB)": 72.85, "step": 57225, "token_acc": 0.5139318885448917, "train_speed(iter/s)": 0.67215 }, { "epoch": 2.451908658583608, "grad_norm": 5.0884575843811035, "learning_rate": 5.1541546681815776e-05, "loss": 1.886693572998047, "memory(GiB)": 72.85, "step": 57230, "token_acc": 0.5846153846153846, "train_speed(iter/s)": 0.672159 }, { "epoch": 2.4521228739128573, "grad_norm": 5.447569847106934, "learning_rate": 5.153482009406769e-05, "loss": 2.111421585083008, "memory(GiB)": 72.85, "step": 57235, "token_acc": 0.5466101694915254, "train_speed(iter/s)": 0.672163 }, { "epoch": 2.452337089242106, "grad_norm": 5.186037540435791, "learning_rate": 5.1528093478514905e-05, "loss": 2.1256536483764648, "memory(GiB)": 72.85, "step": 57240, "token_acc": 0.5234657039711191, "train_speed(iter/s)": 0.672168 }, { "epoch": 2.452551304571355, "grad_norm": 4.774621963500977, "learning_rate": 5.152136683527927e-05, "loss": 1.9905651092529297, "memory(GiB)": 72.85, "step": 57245, "token_acc": 0.5136054421768708, "train_speed(iter/s)": 0.672179 }, { "epoch": 2.452765519900604, "grad_norm": 4.793619155883789, "learning_rate": 5.1514640164482664e-05, "loss": 2.272365760803223, "memory(GiB)": 72.85, "step": 57250, "token_acc": 0.5448275862068965, "train_speed(iter/s)": 0.672181 }, { "epoch": 2.452979735229853, "grad_norm": 4.637740612030029, "learning_rate": 5.1507913466246905e-05, "loss": 2.304836463928223, "memory(GiB)": 72.85, "step": 57255, "token_acc": 0.5361216730038023, "train_speed(iter/s)": 0.672177 }, { "epoch": 2.453193950559102, "grad_norm": 6.962691783905029, "learning_rate": 5.15011867406939e-05, "loss": 2.254380226135254, "memory(GiB)": 72.85, "step": 57260, "token_acc": 0.5310734463276836, "train_speed(iter/s)": 0.672185 }, { "epoch": 2.453408165888351, "grad_norm": 6.169130802154541, "learning_rate": 5.149445998794548e-05, "loss": 1.9921533584594726, "memory(GiB)": 72.85, "step": 57265, "token_acc": 0.5726872246696035, "train_speed(iter/s)": 0.672189 }, { "epoch": 2.4536223812176, "grad_norm": 4.614284515380859, "learning_rate": 5.148773320812351e-05, "loss": 2.1517311096191407, "memory(GiB)": 72.85, "step": 57270, "token_acc": 0.4874551971326165, "train_speed(iter/s)": 0.672191 }, { "epoch": 2.4538365965468487, "grad_norm": 4.73662805557251, "learning_rate": 5.1481006401349866e-05, "loss": 2.1289581298828124, "memory(GiB)": 72.85, "step": 57275, "token_acc": 0.5107142857142857, "train_speed(iter/s)": 0.672194 }, { "epoch": 2.454050811876098, "grad_norm": 5.307191848754883, "learning_rate": 5.1474279567746397e-05, "loss": 2.3450815200805666, "memory(GiB)": 72.85, "step": 57280, "token_acc": 0.5, "train_speed(iter/s)": 0.672193 }, { "epoch": 2.454265027205347, "grad_norm": 5.597243309020996, "learning_rate": 5.146755270743496e-05, "loss": 2.0655107498168945, "memory(GiB)": 72.85, "step": 57285, "token_acc": 0.5429553264604811, "train_speed(iter/s)": 0.6722 }, { "epoch": 2.4544792425345956, "grad_norm": 4.711793899536133, "learning_rate": 5.1460825820537454e-05, "loss": 2.235371208190918, "memory(GiB)": 72.85, "step": 57290, "token_acc": 0.5247933884297521, "train_speed(iter/s)": 0.6722 }, { "epoch": 2.454693457863845, "grad_norm": 6.75970983505249, "learning_rate": 5.1454098907175695e-05, "loss": 2.242354393005371, "memory(GiB)": 72.85, "step": 57295, "token_acc": 0.5195729537366548, "train_speed(iter/s)": 0.672201 }, { "epoch": 2.4549076731930937, "grad_norm": 4.557600498199463, "learning_rate": 5.144737196747158e-05, "loss": 2.3965953826904296, "memory(GiB)": 72.85, "step": 57300, "token_acc": 0.4662576687116564, "train_speed(iter/s)": 0.672209 }, { "epoch": 2.4551218885223425, "grad_norm": 4.712123870849609, "learning_rate": 5.1440645001546973e-05, "loss": 2.2639251708984376, "memory(GiB)": 72.85, "step": 57305, "token_acc": 0.5053003533568905, "train_speed(iter/s)": 0.672207 }, { "epoch": 2.4553361038515917, "grad_norm": 5.042758941650391, "learning_rate": 5.143391800952372e-05, "loss": 2.693052291870117, "memory(GiB)": 72.85, "step": 57310, "token_acc": 0.4629080118694362, "train_speed(iter/s)": 0.672223 }, { "epoch": 2.4555503191808405, "grad_norm": 5.00585412979126, "learning_rate": 5.142719099152369e-05, "loss": 2.1960187911987306, "memory(GiB)": 72.85, "step": 57315, "token_acc": 0.490272373540856, "train_speed(iter/s)": 0.672232 }, { "epoch": 2.4557645345100894, "grad_norm": 3.9079387187957764, "learning_rate": 5.142046394766876e-05, "loss": 2.3163066864013673, "memory(GiB)": 72.85, "step": 57320, "token_acc": 0.5170278637770898, "train_speed(iter/s)": 0.672219 }, { "epoch": 2.4559787498393386, "grad_norm": 6.217852592468262, "learning_rate": 5.14137368780808e-05, "loss": 2.254618835449219, "memory(GiB)": 72.85, "step": 57325, "token_acc": 0.5882352941176471, "train_speed(iter/s)": 0.672227 }, { "epoch": 2.4561929651685874, "grad_norm": 5.731610298156738, "learning_rate": 5.1407009782881676e-05, "loss": 2.287982177734375, "memory(GiB)": 72.85, "step": 57330, "token_acc": 0.49433962264150944, "train_speed(iter/s)": 0.672238 }, { "epoch": 2.4564071804978362, "grad_norm": 4.640722751617432, "learning_rate": 5.140028266219324e-05, "loss": 2.4662506103515627, "memory(GiB)": 72.85, "step": 57335, "token_acc": 0.48322147651006714, "train_speed(iter/s)": 0.672234 }, { "epoch": 2.4566213958270855, "grad_norm": 5.305909156799316, "learning_rate": 5.139355551613737e-05, "loss": 2.595835494995117, "memory(GiB)": 72.85, "step": 57340, "token_acc": 0.4765625, "train_speed(iter/s)": 0.672241 }, { "epoch": 2.4568356111563343, "grad_norm": 4.676496982574463, "learning_rate": 5.1386828344835926e-05, "loss": 2.2362640380859373, "memory(GiB)": 72.85, "step": 57345, "token_acc": 0.5126582278481012, "train_speed(iter/s)": 0.672254 }, { "epoch": 2.457049826485583, "grad_norm": 5.574447154998779, "learning_rate": 5.138010114841081e-05, "loss": 2.2599842071533205, "memory(GiB)": 72.85, "step": 57350, "token_acc": 0.5060606060606061, "train_speed(iter/s)": 0.672267 }, { "epoch": 2.4572640418148324, "grad_norm": 5.519840240478516, "learning_rate": 5.1373373926983836e-05, "loss": 2.238178253173828, "memory(GiB)": 72.85, "step": 57355, "token_acc": 0.5330739299610895, "train_speed(iter/s)": 0.672266 }, { "epoch": 2.457478257144081, "grad_norm": 4.050412178039551, "learning_rate": 5.136664668067693e-05, "loss": 2.2752336502075194, "memory(GiB)": 72.85, "step": 57360, "token_acc": 0.4942528735632184, "train_speed(iter/s)": 0.672275 }, { "epoch": 2.45769247247333, "grad_norm": 5.195649147033691, "learning_rate": 5.1359919409611934e-05, "loss": 2.3116518020629884, "memory(GiB)": 72.85, "step": 57365, "token_acc": 0.49122807017543857, "train_speed(iter/s)": 0.672278 }, { "epoch": 2.4579066878025793, "grad_norm": 4.653845310211182, "learning_rate": 5.135319211391072e-05, "loss": 2.3983144760131836, "memory(GiB)": 72.85, "step": 57370, "token_acc": 0.4649859943977591, "train_speed(iter/s)": 0.672281 }, { "epoch": 2.458120903131828, "grad_norm": 4.902218341827393, "learning_rate": 5.134646479369516e-05, "loss": 2.2168319702148436, "memory(GiB)": 72.85, "step": 57375, "token_acc": 0.5070422535211268, "train_speed(iter/s)": 0.672283 }, { "epoch": 2.458335118461077, "grad_norm": 5.726625919342041, "learning_rate": 5.133973744908713e-05, "loss": 2.2603578567504883, "memory(GiB)": 72.85, "step": 57380, "token_acc": 0.4943820224719101, "train_speed(iter/s)": 0.672285 }, { "epoch": 2.458549333790326, "grad_norm": 4.766808986663818, "learning_rate": 5.1333010080208476e-05, "loss": 2.399298095703125, "memory(GiB)": 72.85, "step": 57385, "token_acc": 0.5342019543973942, "train_speed(iter/s)": 0.672305 }, { "epoch": 2.458763549119575, "grad_norm": 4.767913341522217, "learning_rate": 5.1326282687181114e-05, "loss": 2.3431818008422853, "memory(GiB)": 72.85, "step": 57390, "token_acc": 0.47854785478547857, "train_speed(iter/s)": 0.67231 }, { "epoch": 2.4589777644488238, "grad_norm": 5.030263423919678, "learning_rate": 5.1319555270126904e-05, "loss": 2.176495170593262, "memory(GiB)": 72.85, "step": 57395, "token_acc": 0.5379061371841155, "train_speed(iter/s)": 0.672307 }, { "epoch": 2.459191979778073, "grad_norm": 4.653183937072754, "learning_rate": 5.131282782916771e-05, "loss": 2.358890724182129, "memory(GiB)": 72.85, "step": 57400, "token_acc": 0.47413793103448276, "train_speed(iter/s)": 0.672318 }, { "epoch": 2.459406195107322, "grad_norm": 3.9351963996887207, "learning_rate": 5.13061003644254e-05, "loss": 2.2027544021606444, "memory(GiB)": 72.85, "step": 57405, "token_acc": 0.5498281786941581, "train_speed(iter/s)": 0.672331 }, { "epoch": 2.4596204104365706, "grad_norm": 4.3640007972717285, "learning_rate": 5.129937287602186e-05, "loss": 2.239775466918945, "memory(GiB)": 72.85, "step": 57410, "token_acc": 0.5535055350553506, "train_speed(iter/s)": 0.672334 }, { "epoch": 2.45983462576582, "grad_norm": 5.042046070098877, "learning_rate": 5.129264536407896e-05, "loss": 2.1143592834472655, "memory(GiB)": 72.85, "step": 57415, "token_acc": 0.5060606060606061, "train_speed(iter/s)": 0.672326 }, { "epoch": 2.4600488410950687, "grad_norm": 4.903232097625732, "learning_rate": 5.128591782871858e-05, "loss": 2.359785270690918, "memory(GiB)": 72.85, "step": 57420, "token_acc": 0.4623287671232877, "train_speed(iter/s)": 0.672337 }, { "epoch": 2.460263056424318, "grad_norm": 3.1597511768341064, "learning_rate": 5.127919027006259e-05, "loss": 2.148960494995117, "memory(GiB)": 72.85, "step": 57425, "token_acc": 0.5359281437125748, "train_speed(iter/s)": 0.67233 }, { "epoch": 2.460477271753567, "grad_norm": 6.745537757873535, "learning_rate": 5.127246268823286e-05, "loss": 2.5334941864013674, "memory(GiB)": 72.85, "step": 57430, "token_acc": 0.5104166666666666, "train_speed(iter/s)": 0.672333 }, { "epoch": 2.4606914870828156, "grad_norm": 5.5509867668151855, "learning_rate": 5.126573508335128e-05, "loss": 2.315071868896484, "memory(GiB)": 72.85, "step": 57435, "token_acc": 0.539568345323741, "train_speed(iter/s)": 0.672342 }, { "epoch": 2.460905702412065, "grad_norm": 3.94230318069458, "learning_rate": 5.125900745553973e-05, "loss": 2.237453651428223, "memory(GiB)": 72.85, "step": 57440, "token_acc": 0.5273311897106109, "train_speed(iter/s)": 0.672335 }, { "epoch": 2.4611199177413137, "grad_norm": 5.15529727935791, "learning_rate": 5.1252279804920075e-05, "loss": 2.280819320678711, "memory(GiB)": 72.85, "step": 57445, "token_acc": 0.5220588235294118, "train_speed(iter/s)": 0.672337 }, { "epoch": 2.4613341330705625, "grad_norm": 4.9304585456848145, "learning_rate": 5.124555213161421e-05, "loss": 2.1402347564697264, "memory(GiB)": 72.85, "step": 57450, "token_acc": 0.5538461538461539, "train_speed(iter/s)": 0.672343 }, { "epoch": 2.4615483483998117, "grad_norm": 3.6327641010284424, "learning_rate": 5.123882443574397e-05, "loss": 2.1970331192016603, "memory(GiB)": 72.85, "step": 57455, "token_acc": 0.4928774928774929, "train_speed(iter/s)": 0.672359 }, { "epoch": 2.4617625637290605, "grad_norm": 4.698606967926025, "learning_rate": 5.123209671743128e-05, "loss": 2.3212348937988283, "memory(GiB)": 72.85, "step": 57460, "token_acc": 0.4778481012658228, "train_speed(iter/s)": 0.672366 }, { "epoch": 2.4619767790583094, "grad_norm": 4.8950066566467285, "learning_rate": 5.122536897679801e-05, "loss": 2.2490720748901367, "memory(GiB)": 72.85, "step": 57465, "token_acc": 0.5416666666666666, "train_speed(iter/s)": 0.672366 }, { "epoch": 2.4621909943875586, "grad_norm": 6.795088291168213, "learning_rate": 5.1218641213966015e-05, "loss": 2.3911632537841796, "memory(GiB)": 72.85, "step": 57470, "token_acc": 0.48104956268221577, "train_speed(iter/s)": 0.672362 }, { "epoch": 2.4624052097168074, "grad_norm": 4.785164833068848, "learning_rate": 5.121191342905719e-05, "loss": 2.536525917053223, "memory(GiB)": 72.85, "step": 57475, "token_acc": 0.4602649006622517, "train_speed(iter/s)": 0.672353 }, { "epoch": 2.4626194250460562, "grad_norm": 3.963472604751587, "learning_rate": 5.120518562219343e-05, "loss": 2.212361145019531, "memory(GiB)": 72.85, "step": 57480, "token_acc": 0.5174603174603175, "train_speed(iter/s)": 0.672357 }, { "epoch": 2.4628336403753055, "grad_norm": 4.672528266906738, "learning_rate": 5.119845779349659e-05, "loss": 2.342782402038574, "memory(GiB)": 72.85, "step": 57485, "token_acc": 0.5144694533762058, "train_speed(iter/s)": 0.672357 }, { "epoch": 2.4630478557045543, "grad_norm": 4.108226776123047, "learning_rate": 5.119172994308858e-05, "loss": 2.0919336318969726, "memory(GiB)": 72.85, "step": 57490, "token_acc": 0.5868725868725869, "train_speed(iter/s)": 0.672363 }, { "epoch": 2.463262071033803, "grad_norm": 4.350399494171143, "learning_rate": 5.118500207109125e-05, "loss": 2.1052518844604493, "memory(GiB)": 72.85, "step": 57495, "token_acc": 0.5173410404624278, "train_speed(iter/s)": 0.672369 }, { "epoch": 2.4634762863630524, "grad_norm": 4.520733833312988, "learning_rate": 5.1178274177626495e-05, "loss": 2.0232542037963865, "memory(GiB)": 72.85, "step": 57500, "token_acc": 0.5517241379310345, "train_speed(iter/s)": 0.672368 }, { "epoch": 2.4634762863630524, "eval_loss": 2.108510732650757, "eval_runtime": 15.258, "eval_samples_per_second": 6.554, "eval_steps_per_second": 6.554, "eval_token_acc": 0.49506172839506174, "step": 57500 }, { "epoch": 2.463690501692301, "grad_norm": 5.7495880126953125, "learning_rate": 5.1171546262816206e-05, "loss": 2.401556396484375, "memory(GiB)": 72.85, "step": 57505, "token_acc": 0.4995491433724076, "train_speed(iter/s)": 0.672231 }, { "epoch": 2.46390471702155, "grad_norm": 5.0813751220703125, "learning_rate": 5.116481832678226e-05, "loss": 1.9985370635986328, "memory(GiB)": 72.85, "step": 57510, "token_acc": 0.5570934256055363, "train_speed(iter/s)": 0.672239 }, { "epoch": 2.4641189323507993, "grad_norm": 4.70538854598999, "learning_rate": 5.115809036964651e-05, "loss": 2.285641670227051, "memory(GiB)": 72.85, "step": 57515, "token_acc": 0.5261194029850746, "train_speed(iter/s)": 0.672255 }, { "epoch": 2.464333147680048, "grad_norm": 3.9390757083892822, "learning_rate": 5.11513623915309e-05, "loss": 2.3757064819335936, "memory(GiB)": 72.85, "step": 57520, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.672258 }, { "epoch": 2.464547363009297, "grad_norm": 5.19193696975708, "learning_rate": 5.1144634392557275e-05, "loss": 2.168630027770996, "memory(GiB)": 72.85, "step": 57525, "token_acc": 0.5347826086956522, "train_speed(iter/s)": 0.672269 }, { "epoch": 2.464761578338546, "grad_norm": 3.7875170707702637, "learning_rate": 5.11379063728475e-05, "loss": 1.7063356399536134, "memory(GiB)": 72.85, "step": 57530, "token_acc": 0.6387665198237885, "train_speed(iter/s)": 0.672267 }, { "epoch": 2.464975793667795, "grad_norm": 4.912257194519043, "learning_rate": 5.113117833252351e-05, "loss": 2.071471405029297, "memory(GiB)": 72.85, "step": 57535, "token_acc": 0.5518672199170125, "train_speed(iter/s)": 0.672266 }, { "epoch": 2.4651900089970438, "grad_norm": 4.77969217300415, "learning_rate": 5.112445027170716e-05, "loss": 2.3403472900390625, "memory(GiB)": 72.85, "step": 57540, "token_acc": 0.5227272727272727, "train_speed(iter/s)": 0.672271 }, { "epoch": 2.465404224326293, "grad_norm": 4.712098121643066, "learning_rate": 5.111772219052032e-05, "loss": 2.3610179901123045, "memory(GiB)": 72.85, "step": 57545, "token_acc": 0.5119047619047619, "train_speed(iter/s)": 0.672264 }, { "epoch": 2.465618439655542, "grad_norm": 4.739504337310791, "learning_rate": 5.111099408908492e-05, "loss": 2.4457645416259766, "memory(GiB)": 72.85, "step": 57550, "token_acc": 0.5038759689922481, "train_speed(iter/s)": 0.672264 }, { "epoch": 2.4658326549847907, "grad_norm": 4.385597229003906, "learning_rate": 5.110426596752279e-05, "loss": 2.134128189086914, "memory(GiB)": 72.85, "step": 57555, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.672263 }, { "epoch": 2.46604687031404, "grad_norm": 9.224638938903809, "learning_rate": 5.109753782595588e-05, "loss": 2.214038276672363, "memory(GiB)": 72.85, "step": 57560, "token_acc": 0.5390334572490706, "train_speed(iter/s)": 0.672277 }, { "epoch": 2.4662610856432887, "grad_norm": 4.287622928619385, "learning_rate": 5.109080966450602e-05, "loss": 2.067566680908203, "memory(GiB)": 72.85, "step": 57565, "token_acc": 0.5190311418685121, "train_speed(iter/s)": 0.672277 }, { "epoch": 2.4664753009725375, "grad_norm": 6.095854759216309, "learning_rate": 5.108408148329512e-05, "loss": 2.30517463684082, "memory(GiB)": 72.85, "step": 57570, "token_acc": 0.5124555160142349, "train_speed(iter/s)": 0.67227 }, { "epoch": 2.466689516301787, "grad_norm": 5.293188095092773, "learning_rate": 5.107735328244507e-05, "loss": 2.0966259002685548, "memory(GiB)": 72.85, "step": 57575, "token_acc": 0.4983277591973244, "train_speed(iter/s)": 0.672277 }, { "epoch": 2.4669037316310356, "grad_norm": 6.807066440582275, "learning_rate": 5.107062506207776e-05, "loss": 2.395233726501465, "memory(GiB)": 72.85, "step": 57580, "token_acc": 0.4639175257731959, "train_speed(iter/s)": 0.67227 }, { "epoch": 2.4671179469602844, "grad_norm": 4.8188090324401855, "learning_rate": 5.106389682231506e-05, "loss": 2.3504444122314454, "memory(GiB)": 72.85, "step": 57585, "token_acc": 0.47038327526132406, "train_speed(iter/s)": 0.672266 }, { "epoch": 2.4673321622895337, "grad_norm": 3.7809133529663086, "learning_rate": 5.105716856327889e-05, "loss": 2.14005069732666, "memory(GiB)": 72.85, "step": 57590, "token_acc": 0.5413793103448276, "train_speed(iter/s)": 0.672256 }, { "epoch": 2.4675463776187825, "grad_norm": 4.813739776611328, "learning_rate": 5.1050440285091115e-05, "loss": 2.321916389465332, "memory(GiB)": 72.85, "step": 57595, "token_acc": 0.5292307692307693, "train_speed(iter/s)": 0.672247 }, { "epoch": 2.4677605929480313, "grad_norm": 4.953636169433594, "learning_rate": 5.104371198787362e-05, "loss": 2.3412818908691406, "memory(GiB)": 72.85, "step": 57600, "token_acc": 0.4815950920245399, "train_speed(iter/s)": 0.672253 }, { "epoch": 2.4679748082772806, "grad_norm": 4.106250762939453, "learning_rate": 5.1036983671748294e-05, "loss": 2.1574474334716798, "memory(GiB)": 72.85, "step": 57605, "token_acc": 0.5220588235294118, "train_speed(iter/s)": 0.672261 }, { "epoch": 2.4681890236065294, "grad_norm": 4.2289605140686035, "learning_rate": 5.103025533683706e-05, "loss": 2.3563594818115234, "memory(GiB)": 72.85, "step": 57610, "token_acc": 0.5071942446043165, "train_speed(iter/s)": 0.672267 }, { "epoch": 2.468403238935778, "grad_norm": 6.180091381072998, "learning_rate": 5.102352698326176e-05, "loss": 2.5849420547485353, "memory(GiB)": 72.85, "step": 57615, "token_acc": 0.48201438848920863, "train_speed(iter/s)": 0.672261 }, { "epoch": 2.4686174542650274, "grad_norm": 4.029515743255615, "learning_rate": 5.101679861114433e-05, "loss": 2.4433032989501955, "memory(GiB)": 72.85, "step": 57620, "token_acc": 0.48986486486486486, "train_speed(iter/s)": 0.672268 }, { "epoch": 2.4688316695942762, "grad_norm": 5.867647647857666, "learning_rate": 5.101007022060664e-05, "loss": 2.5580446243286135, "memory(GiB)": 72.85, "step": 57625, "token_acc": 0.45426829268292684, "train_speed(iter/s)": 0.67227 }, { "epoch": 2.469045884923525, "grad_norm": 7.694637775421143, "learning_rate": 5.1003341811770565e-05, "loss": 2.175312042236328, "memory(GiB)": 72.85, "step": 57630, "token_acc": 0.48880597014925375, "train_speed(iter/s)": 0.672262 }, { "epoch": 2.4692601002527743, "grad_norm": 4.693963050842285, "learning_rate": 5.0996613384758016e-05, "loss": 2.381536865234375, "memory(GiB)": 72.85, "step": 57635, "token_acc": 0.50187265917603, "train_speed(iter/s)": 0.672263 }, { "epoch": 2.469474315582023, "grad_norm": 5.500027179718018, "learning_rate": 5.098988493969089e-05, "loss": 2.2498600006103517, "memory(GiB)": 72.85, "step": 57640, "token_acc": 0.467680608365019, "train_speed(iter/s)": 0.672265 }, { "epoch": 2.469688530911272, "grad_norm": 4.764429569244385, "learning_rate": 5.098315647669105e-05, "loss": 2.3993335723876954, "memory(GiB)": 72.85, "step": 57645, "token_acc": 0.4837758112094395, "train_speed(iter/s)": 0.672278 }, { "epoch": 2.469902746240521, "grad_norm": 4.725830554962158, "learning_rate": 5.097642799588043e-05, "loss": 2.129010772705078, "memory(GiB)": 72.85, "step": 57650, "token_acc": 0.5400696864111498, "train_speed(iter/s)": 0.672286 }, { "epoch": 2.47011696156977, "grad_norm": 4.279745101928711, "learning_rate": 5.0969699497380896e-05, "loss": 2.5215978622436523, "memory(GiB)": 72.85, "step": 57655, "token_acc": 0.5067114093959731, "train_speed(iter/s)": 0.672301 }, { "epoch": 2.470331176899019, "grad_norm": 6.400049686431885, "learning_rate": 5.096297098131433e-05, "loss": 2.314681625366211, "memory(GiB)": 72.85, "step": 57660, "token_acc": 0.4968944099378882, "train_speed(iter/s)": 0.672293 }, { "epoch": 2.470545392228268, "grad_norm": 4.221296787261963, "learning_rate": 5.095624244780266e-05, "loss": 2.112663459777832, "memory(GiB)": 72.85, "step": 57665, "token_acc": 0.5251798561151079, "train_speed(iter/s)": 0.672291 }, { "epoch": 2.470759607557517, "grad_norm": 5.627758026123047, "learning_rate": 5.094951389696776e-05, "loss": 2.1882862091064452, "memory(GiB)": 72.85, "step": 57670, "token_acc": 0.5166666666666667, "train_speed(iter/s)": 0.672281 }, { "epoch": 2.4709738228867657, "grad_norm": 5.748357772827148, "learning_rate": 5.094278532893152e-05, "loss": 2.291874313354492, "memory(GiB)": 72.85, "step": 57675, "token_acc": 0.49642857142857144, "train_speed(iter/s)": 0.672286 }, { "epoch": 2.471188038216015, "grad_norm": 4.572343826293945, "learning_rate": 5.093605674381583e-05, "loss": 2.137897491455078, "memory(GiB)": 72.85, "step": 57680, "token_acc": 0.5607142857142857, "train_speed(iter/s)": 0.672293 }, { "epoch": 2.4714022535452638, "grad_norm": 4.9162116050720215, "learning_rate": 5.0929328141742606e-05, "loss": 2.168270301818848, "memory(GiB)": 72.85, "step": 57685, "token_acc": 0.513986013986014, "train_speed(iter/s)": 0.672308 }, { "epoch": 2.4716164688745126, "grad_norm": 4.795872211456299, "learning_rate": 5.0922599522833726e-05, "loss": 2.1234180450439455, "memory(GiB)": 72.85, "step": 57690, "token_acc": 0.5271084337349398, "train_speed(iter/s)": 0.672312 }, { "epoch": 2.471830684203762, "grad_norm": 4.712418556213379, "learning_rate": 5.091587088721109e-05, "loss": 2.5102413177490233, "memory(GiB)": 72.85, "step": 57695, "token_acc": 0.5038461538461538, "train_speed(iter/s)": 0.672323 }, { "epoch": 2.4720448995330107, "grad_norm": 4.1508402824401855, "learning_rate": 5.0909142234996584e-05, "loss": 2.234752082824707, "memory(GiB)": 72.85, "step": 57700, "token_acc": 0.5291970802919708, "train_speed(iter/s)": 0.672313 }, { "epoch": 2.4722591148622595, "grad_norm": 4.1274285316467285, "learning_rate": 5.090241356631213e-05, "loss": 2.1238275527954102, "memory(GiB)": 72.85, "step": 57705, "token_acc": 0.549618320610687, "train_speed(iter/s)": 0.672325 }, { "epoch": 2.4724733301915087, "grad_norm": 8.08352279663086, "learning_rate": 5.089568488127961e-05, "loss": 2.420304870605469, "memory(GiB)": 72.85, "step": 57710, "token_acc": 0.504, "train_speed(iter/s)": 0.672331 }, { "epoch": 2.4726875455207575, "grad_norm": 6.30693244934082, "learning_rate": 5.088895618002091e-05, "loss": 1.8530197143554688, "memory(GiB)": 72.85, "step": 57715, "token_acc": 0.5799256505576208, "train_speed(iter/s)": 0.672319 }, { "epoch": 2.4729017608500063, "grad_norm": 4.309980869293213, "learning_rate": 5.0882227462657926e-05, "loss": 2.2706098556518555, "memory(GiB)": 72.85, "step": 57720, "token_acc": 0.5313432835820896, "train_speed(iter/s)": 0.672319 }, { "epoch": 2.4731159761792556, "grad_norm": 5.287072658538818, "learning_rate": 5.0875498729312585e-05, "loss": 2.3040592193603517, "memory(GiB)": 72.85, "step": 57725, "token_acc": 0.48484848484848486, "train_speed(iter/s)": 0.672329 }, { "epoch": 2.4733301915085044, "grad_norm": 5.655421733856201, "learning_rate": 5.086876998010675e-05, "loss": 1.9293085098266602, "memory(GiB)": 72.85, "step": 57730, "token_acc": 0.550185873605948, "train_speed(iter/s)": 0.672327 }, { "epoch": 2.4735444068377532, "grad_norm": 5.317551136016846, "learning_rate": 5.086204121516233e-05, "loss": 2.149483871459961, "memory(GiB)": 72.85, "step": 57735, "token_acc": 0.5348837209302325, "train_speed(iter/s)": 0.672327 }, { "epoch": 2.4737586221670025, "grad_norm": 5.01090669631958, "learning_rate": 5.0855312434601235e-05, "loss": 2.3824493408203127, "memory(GiB)": 72.85, "step": 57740, "token_acc": 0.49038461538461536, "train_speed(iter/s)": 0.672326 }, { "epoch": 2.4739728374962513, "grad_norm": 3.936001777648926, "learning_rate": 5.084858363854534e-05, "loss": 2.214465522766113, "memory(GiB)": 72.85, "step": 57745, "token_acc": 0.5084745762711864, "train_speed(iter/s)": 0.672325 }, { "epoch": 2.4741870528255, "grad_norm": 5.145231246948242, "learning_rate": 5.0841854827116566e-05, "loss": 2.4648439407348635, "memory(GiB)": 72.85, "step": 57750, "token_acc": 0.47147147147147145, "train_speed(iter/s)": 0.672335 }, { "epoch": 2.4744012681547494, "grad_norm": 4.8582987785339355, "learning_rate": 5.083512600043679e-05, "loss": 2.2073081970214843, "memory(GiB)": 72.85, "step": 57755, "token_acc": 0.4869281045751634, "train_speed(iter/s)": 0.672331 }, { "epoch": 2.474615483483998, "grad_norm": 3.3311753273010254, "learning_rate": 5.082839715862793e-05, "loss": 2.2782419204711912, "memory(GiB)": 72.85, "step": 57760, "token_acc": 0.5206611570247934, "train_speed(iter/s)": 0.672318 }, { "epoch": 2.474829698813247, "grad_norm": 4.998225688934326, "learning_rate": 5.082166830181189e-05, "loss": 2.0834415435791014, "memory(GiB)": 72.85, "step": 57765, "token_acc": 0.5613382899628253, "train_speed(iter/s)": 0.672328 }, { "epoch": 2.4750439141424962, "grad_norm": 5.619354248046875, "learning_rate": 5.081493943011055e-05, "loss": 2.1799760818481446, "memory(GiB)": 72.85, "step": 57770, "token_acc": 0.5486111111111112, "train_speed(iter/s)": 0.672325 }, { "epoch": 2.475258129471745, "grad_norm": 3.9630210399627686, "learning_rate": 5.080821054364581e-05, "loss": 2.1440557479858398, "memory(GiB)": 72.85, "step": 57775, "token_acc": 0.5345345345345346, "train_speed(iter/s)": 0.672325 }, { "epoch": 2.475472344800994, "grad_norm": 4.453753471374512, "learning_rate": 5.080148164253958e-05, "loss": 2.2843278884887694, "memory(GiB)": 72.85, "step": 57780, "token_acc": 0.5049833887043189, "train_speed(iter/s)": 0.672328 }, { "epoch": 2.475686560130243, "grad_norm": 4.967215538024902, "learning_rate": 5.0794752726913774e-05, "loss": 2.156086540222168, "memory(GiB)": 72.85, "step": 57785, "token_acc": 0.5166051660516605, "train_speed(iter/s)": 0.672309 }, { "epoch": 2.475900775459492, "grad_norm": 4.908221244812012, "learning_rate": 5.078802379689025e-05, "loss": 2.4573972702026365, "memory(GiB)": 72.85, "step": 57790, "token_acc": 0.49050632911392406, "train_speed(iter/s)": 0.672303 }, { "epoch": 2.4761149907887408, "grad_norm": 4.698247909545898, "learning_rate": 5.078129485259095e-05, "loss": 2.1173984527587892, "memory(GiB)": 72.85, "step": 57795, "token_acc": 0.5692307692307692, "train_speed(iter/s)": 0.672306 }, { "epoch": 2.47632920611799, "grad_norm": 5.704189300537109, "learning_rate": 5.077456589413777e-05, "loss": 2.200347137451172, "memory(GiB)": 72.85, "step": 57800, "token_acc": 0.5326797385620915, "train_speed(iter/s)": 0.672307 }, { "epoch": 2.476543421447239, "grad_norm": 5.818138599395752, "learning_rate": 5.076783692165259e-05, "loss": 2.1708086013793944, "memory(GiB)": 72.85, "step": 57805, "token_acc": 0.5254237288135594, "train_speed(iter/s)": 0.672322 }, { "epoch": 2.4767576367764876, "grad_norm": 5.875242710113525, "learning_rate": 5.0761107935257334e-05, "loss": 2.2022222518920898, "memory(GiB)": 72.85, "step": 57810, "token_acc": 0.5371024734982333, "train_speed(iter/s)": 0.672321 }, { "epoch": 2.476971852105737, "grad_norm": 3.824296474456787, "learning_rate": 5.075437893507387e-05, "loss": 2.3669095993041993, "memory(GiB)": 72.85, "step": 57815, "token_acc": 0.5376344086021505, "train_speed(iter/s)": 0.672314 }, { "epoch": 2.4771860674349857, "grad_norm": 6.85640287399292, "learning_rate": 5.074764992122415e-05, "loss": 2.100594902038574, "memory(GiB)": 72.85, "step": 57820, "token_acc": 0.5099337748344371, "train_speed(iter/s)": 0.672314 }, { "epoch": 2.4774002827642345, "grad_norm": 6.124797821044922, "learning_rate": 5.0740920893830036e-05, "loss": 2.444619560241699, "memory(GiB)": 72.85, "step": 57825, "token_acc": 0.48659003831417624, "train_speed(iter/s)": 0.672318 }, { "epoch": 2.4776144980934838, "grad_norm": 5.116929054260254, "learning_rate": 5.0734191853013455e-05, "loss": 2.0884862899780274, "memory(GiB)": 72.85, "step": 57830, "token_acc": 0.5033557046979866, "train_speed(iter/s)": 0.672308 }, { "epoch": 2.4778287134227326, "grad_norm": 4.838474750518799, "learning_rate": 5.0727462798896296e-05, "loss": 1.9029006958007812, "memory(GiB)": 72.85, "step": 57835, "token_acc": 0.6138211382113821, "train_speed(iter/s)": 0.672315 }, { "epoch": 2.4780429287519814, "grad_norm": 5.051290512084961, "learning_rate": 5.072073373160047e-05, "loss": 2.3860956192016602, "memory(GiB)": 72.85, "step": 57840, "token_acc": 0.49508196721311476, "train_speed(iter/s)": 0.672317 }, { "epoch": 2.4782571440812307, "grad_norm": 5.571674823760986, "learning_rate": 5.071400465124786e-05, "loss": 2.1709508895874023, "memory(GiB)": 72.85, "step": 57845, "token_acc": 0.4908424908424908, "train_speed(iter/s)": 0.672324 }, { "epoch": 2.4784713594104795, "grad_norm": 6.227899551391602, "learning_rate": 5.070727555796041e-05, "loss": 2.526743507385254, "memory(GiB)": 72.85, "step": 57850, "token_acc": 0.4657039711191336, "train_speed(iter/s)": 0.672327 }, { "epoch": 2.4786855747397283, "grad_norm": 5.390369415283203, "learning_rate": 5.0700546451859984e-05, "loss": 2.189804458618164, "memory(GiB)": 72.85, "step": 57855, "token_acc": 0.5031446540880503, "train_speed(iter/s)": 0.672331 }, { "epoch": 2.4788997900689775, "grad_norm": 4.575758934020996, "learning_rate": 5.0693817333068505e-05, "loss": 2.195719528198242, "memory(GiB)": 72.85, "step": 57860, "token_acc": 0.4894366197183099, "train_speed(iter/s)": 0.672316 }, { "epoch": 2.4791140053982264, "grad_norm": 3.4153242111206055, "learning_rate": 5.0687088201707864e-05, "loss": 2.390732002258301, "memory(GiB)": 72.85, "step": 57865, "token_acc": 0.4861111111111111, "train_speed(iter/s)": 0.672323 }, { "epoch": 2.479328220727475, "grad_norm": 4.666726589202881, "learning_rate": 5.068035905789999e-05, "loss": 2.361240196228027, "memory(GiB)": 72.85, "step": 57870, "token_acc": 0.5031645569620253, "train_speed(iter/s)": 0.672331 }, { "epoch": 2.4795424360567244, "grad_norm": 4.249752998352051, "learning_rate": 5.0673629901766774e-05, "loss": 1.9528207778930664, "memory(GiB)": 72.85, "step": 57875, "token_acc": 0.5507246376811594, "train_speed(iter/s)": 0.672325 }, { "epoch": 2.4797566513859732, "grad_norm": 5.576457500457764, "learning_rate": 5.066690073343011e-05, "loss": 2.1037715911865233, "memory(GiB)": 72.85, "step": 57880, "token_acc": 0.5769230769230769, "train_speed(iter/s)": 0.67233 }, { "epoch": 2.479970866715222, "grad_norm": 7.06478214263916, "learning_rate": 5.066017155301193e-05, "loss": 2.014705467224121, "memory(GiB)": 72.85, "step": 57885, "token_acc": 0.5233333333333333, "train_speed(iter/s)": 0.672333 }, { "epoch": 2.4801850820444713, "grad_norm": 4.68418550491333, "learning_rate": 5.065344236063411e-05, "loss": 2.3649971008300783, "memory(GiB)": 72.85, "step": 57890, "token_acc": 0.5024630541871922, "train_speed(iter/s)": 0.672339 }, { "epoch": 2.48039929737372, "grad_norm": 3.7629361152648926, "learning_rate": 5.0646713156418565e-05, "loss": 2.2185415267944335, "memory(GiB)": 72.85, "step": 57895, "token_acc": 0.5490196078431373, "train_speed(iter/s)": 0.672348 }, { "epoch": 2.480613512702969, "grad_norm": 5.326994895935059, "learning_rate": 5.0639983940487214e-05, "loss": 2.2290390014648436, "memory(GiB)": 72.85, "step": 57900, "token_acc": 0.4984894259818731, "train_speed(iter/s)": 0.67234 }, { "epoch": 2.480827728032218, "grad_norm": 6.583948612213135, "learning_rate": 5.063325471296194e-05, "loss": 2.4305652618408202, "memory(GiB)": 72.85, "step": 57905, "token_acc": 0.46619217081850534, "train_speed(iter/s)": 0.672355 }, { "epoch": 2.481041943361467, "grad_norm": 5.203731060028076, "learning_rate": 5.062652547396468e-05, "loss": 2.2492990493774414, "memory(GiB)": 72.85, "step": 57910, "token_acc": 0.4613003095975232, "train_speed(iter/s)": 0.67235 }, { "epoch": 2.481256158690716, "grad_norm": 3.8854010105133057, "learning_rate": 5.061979622361732e-05, "loss": 2.184286689758301, "memory(GiB)": 72.85, "step": 57915, "token_acc": 0.5551020408163265, "train_speed(iter/s)": 0.672369 }, { "epoch": 2.481470374019965, "grad_norm": 4.295200347900391, "learning_rate": 5.0613066962041765e-05, "loss": 2.5402721405029296, "memory(GiB)": 72.85, "step": 57920, "token_acc": 0.4782608695652174, "train_speed(iter/s)": 0.672384 }, { "epoch": 2.481684589349214, "grad_norm": 5.368785858154297, "learning_rate": 5.060633768935993e-05, "loss": 2.4262481689453126, "memory(GiB)": 72.85, "step": 57925, "token_acc": 0.4689922480620155, "train_speed(iter/s)": 0.67238 }, { "epoch": 2.4818988046784627, "grad_norm": 4.8481340408325195, "learning_rate": 5.059960840569372e-05, "loss": 2.307527542114258, "memory(GiB)": 72.85, "step": 57930, "token_acc": 0.4952681388012618, "train_speed(iter/s)": 0.672384 }, { "epoch": 2.482113020007712, "grad_norm": 4.867666721343994, "learning_rate": 5.0592879111165035e-05, "loss": 2.5976577758789063, "memory(GiB)": 72.85, "step": 57935, "token_acc": 0.4889705882352941, "train_speed(iter/s)": 0.672393 }, { "epoch": 2.4823272353369608, "grad_norm": 3.7369155883789062, "learning_rate": 5.058614980589579e-05, "loss": 2.2487232208251955, "memory(GiB)": 72.85, "step": 57940, "token_acc": 0.5387453874538746, "train_speed(iter/s)": 0.672402 }, { "epoch": 2.4825414506662096, "grad_norm": 5.109816551208496, "learning_rate": 5.05794204900079e-05, "loss": 2.270569610595703, "memory(GiB)": 72.85, "step": 57945, "token_acc": 0.5230263157894737, "train_speed(iter/s)": 0.672402 }, { "epoch": 2.482755665995459, "grad_norm": 5.812681674957275, "learning_rate": 5.0572691163623245e-05, "loss": 2.0889305114746093, "memory(GiB)": 72.85, "step": 57950, "token_acc": 0.5427509293680297, "train_speed(iter/s)": 0.672406 }, { "epoch": 2.4829698813247076, "grad_norm": 6.275829792022705, "learning_rate": 5.0565961826863775e-05, "loss": 2.2706668853759764, "memory(GiB)": 72.85, "step": 57955, "token_acc": 0.4855072463768116, "train_speed(iter/s)": 0.672407 }, { "epoch": 2.4831840966539565, "grad_norm": 4.6737847328186035, "learning_rate": 5.055923247985136e-05, "loss": 2.331997299194336, "memory(GiB)": 72.85, "step": 57960, "token_acc": 0.49691358024691357, "train_speed(iter/s)": 0.672415 }, { "epoch": 2.4833983119832057, "grad_norm": 6.304072856903076, "learning_rate": 5.055250312270793e-05, "loss": 2.4353897094726564, "memory(GiB)": 72.85, "step": 57965, "token_acc": 0.5, "train_speed(iter/s)": 0.672413 }, { "epoch": 2.4836125273124545, "grad_norm": 4.446763038635254, "learning_rate": 5.0545773755555394e-05, "loss": 2.1109041213989257, "memory(GiB)": 72.85, "step": 57970, "token_acc": 0.5484764542936288, "train_speed(iter/s)": 0.672412 }, { "epoch": 2.4838267426417033, "grad_norm": 5.300282955169678, "learning_rate": 5.0539044378515645e-05, "loss": 2.3512235641479493, "memory(GiB)": 72.85, "step": 57975, "token_acc": 0.5194029850746269, "train_speed(iter/s)": 0.67242 }, { "epoch": 2.4840409579709526, "grad_norm": 4.223952770233154, "learning_rate": 5.0532314991710595e-05, "loss": 2.279833221435547, "memory(GiB)": 72.85, "step": 57980, "token_acc": 0.5170278637770898, "train_speed(iter/s)": 0.672413 }, { "epoch": 2.4842551733002014, "grad_norm": 4.708879470825195, "learning_rate": 5.0525585595262184e-05, "loss": 2.3536956787109373, "memory(GiB)": 72.85, "step": 57985, "token_acc": 0.5125786163522013, "train_speed(iter/s)": 0.672414 }, { "epoch": 2.48446938862945, "grad_norm": 4.240755081176758, "learning_rate": 5.051885618929226e-05, "loss": 2.2990577697753904, "memory(GiB)": 72.85, "step": 57990, "token_acc": 0.5441176470588235, "train_speed(iter/s)": 0.672416 }, { "epoch": 2.4846836039586995, "grad_norm": 5.408344745635986, "learning_rate": 5.051212677392281e-05, "loss": 2.3088531494140625, "memory(GiB)": 72.85, "step": 57995, "token_acc": 0.5145631067961165, "train_speed(iter/s)": 0.67241 }, { "epoch": 2.4848978192879483, "grad_norm": 4.875264644622803, "learning_rate": 5.050539734927569e-05, "loss": 2.1606266021728517, "memory(GiB)": 72.85, "step": 58000, "token_acc": 0.5488721804511278, "train_speed(iter/s)": 0.672416 }, { "epoch": 2.4848978192879483, "eval_loss": 2.127810478210449, "eval_runtime": 14.5145, "eval_samples_per_second": 6.89, "eval_steps_per_second": 6.89, "eval_token_acc": 0.5063613231552163, "step": 58000 }, { "epoch": 2.485112034617197, "grad_norm": 4.545732498168945, "learning_rate": 5.049866791547281e-05, "loss": 2.2280826568603516, "memory(GiB)": 72.85, "step": 58005, "token_acc": 0.4991304347826087, "train_speed(iter/s)": 0.672287 }, { "epoch": 2.4853262499464464, "grad_norm": 4.4461517333984375, "learning_rate": 5.049193847263609e-05, "loss": 2.131597900390625, "memory(GiB)": 72.85, "step": 58010, "token_acc": 0.5140845070422535, "train_speed(iter/s)": 0.672286 }, { "epoch": 2.485540465275695, "grad_norm": 8.166114807128906, "learning_rate": 5.048520902088746e-05, "loss": 2.0299808502197267, "memory(GiB)": 72.85, "step": 58015, "token_acc": 0.5627705627705628, "train_speed(iter/s)": 0.672292 }, { "epoch": 2.485754680604944, "grad_norm": 5.4601263999938965, "learning_rate": 5.04784795603488e-05, "loss": 2.1330766677856445, "memory(GiB)": 72.85, "step": 58020, "token_acc": 0.5551181102362205, "train_speed(iter/s)": 0.672295 }, { "epoch": 2.4859688959341932, "grad_norm": 7.700963973999023, "learning_rate": 5.047175009114204e-05, "loss": 2.0883398056030273, "memory(GiB)": 72.85, "step": 58025, "token_acc": 0.576, "train_speed(iter/s)": 0.6723 }, { "epoch": 2.486183111263442, "grad_norm": 4.4467878341674805, "learning_rate": 5.0465020613389094e-05, "loss": 2.193922996520996, "memory(GiB)": 72.85, "step": 58030, "token_acc": 0.51, "train_speed(iter/s)": 0.672299 }, { "epoch": 2.486397326592691, "grad_norm": 5.485840320587158, "learning_rate": 5.045829112721185e-05, "loss": 2.2496109008789062, "memory(GiB)": 72.85, "step": 58035, "token_acc": 0.5227272727272727, "train_speed(iter/s)": 0.6723 }, { "epoch": 2.48661154192194, "grad_norm": 4.536279201507568, "learning_rate": 5.045156163273224e-05, "loss": 2.1563846588134767, "memory(GiB)": 72.85, "step": 58040, "token_acc": 0.535031847133758, "train_speed(iter/s)": 0.672307 }, { "epoch": 2.486825757251189, "grad_norm": 4.981863021850586, "learning_rate": 5.044483213007217e-05, "loss": 2.092991256713867, "memory(GiB)": 72.85, "step": 58045, "token_acc": 0.5151515151515151, "train_speed(iter/s)": 0.672306 }, { "epoch": 2.4870399725804377, "grad_norm": 4.048055648803711, "learning_rate": 5.043810261935352e-05, "loss": 2.1456804275512695, "memory(GiB)": 72.85, "step": 58050, "token_acc": 0.5321637426900585, "train_speed(iter/s)": 0.672306 }, { "epoch": 2.487254187909687, "grad_norm": 4.5414862632751465, "learning_rate": 5.0431373100698264e-05, "loss": 2.2639533996582033, "memory(GiB)": 72.85, "step": 58055, "token_acc": 0.5152671755725191, "train_speed(iter/s)": 0.67231 }, { "epoch": 2.487468403238936, "grad_norm": 5.684524059295654, "learning_rate": 5.042464357422827e-05, "loss": 1.9405601501464844, "memory(GiB)": 72.85, "step": 58060, "token_acc": 0.5270758122743683, "train_speed(iter/s)": 0.672322 }, { "epoch": 2.4876826185681846, "grad_norm": 4.896487712860107, "learning_rate": 5.0417914040065453e-05, "loss": 2.3090328216552733, "memory(GiB)": 72.85, "step": 58065, "token_acc": 0.5136186770428015, "train_speed(iter/s)": 0.672321 }, { "epoch": 2.487896833897434, "grad_norm": 4.560266494750977, "learning_rate": 5.041118449833174e-05, "loss": 2.353464889526367, "memory(GiB)": 72.85, "step": 58070, "token_acc": 0.5080906148867314, "train_speed(iter/s)": 0.672332 }, { "epoch": 2.4881110492266827, "grad_norm": 7.105547904968262, "learning_rate": 5.040445494914904e-05, "loss": 2.240260696411133, "memory(GiB)": 72.85, "step": 58075, "token_acc": 0.5487804878048781, "train_speed(iter/s)": 0.67233 }, { "epoch": 2.4883252645559315, "grad_norm": 5.722555160522461, "learning_rate": 5.039772539263924e-05, "loss": 2.1027313232421876, "memory(GiB)": 72.85, "step": 58080, "token_acc": 0.49696969696969695, "train_speed(iter/s)": 0.672333 }, { "epoch": 2.4885394798851808, "grad_norm": 4.797251224517822, "learning_rate": 5.039099582892428e-05, "loss": 2.2601768493652346, "memory(GiB)": 72.85, "step": 58085, "token_acc": 0.5149253731343284, "train_speed(iter/s)": 0.672344 }, { "epoch": 2.4887536952144296, "grad_norm": 5.621713638305664, "learning_rate": 5.0384266258126066e-05, "loss": 2.4120922088623047, "memory(GiB)": 72.85, "step": 58090, "token_acc": 0.49328859060402686, "train_speed(iter/s)": 0.672356 }, { "epoch": 2.4889679105436784, "grad_norm": 5.096841812133789, "learning_rate": 5.03775366803665e-05, "loss": 2.0254634857177733, "memory(GiB)": 72.85, "step": 58095, "token_acc": 0.5434782608695652, "train_speed(iter/s)": 0.672341 }, { "epoch": 2.4891821258729276, "grad_norm": 5.421125411987305, "learning_rate": 5.037080709576751e-05, "loss": 2.0434152603149416, "memory(GiB)": 72.85, "step": 58100, "token_acc": 0.551440329218107, "train_speed(iter/s)": 0.672336 }, { "epoch": 2.4893963412021765, "grad_norm": 3.6087958812713623, "learning_rate": 5.0364077504451e-05, "loss": 2.3541473388671874, "memory(GiB)": 72.85, "step": 58105, "token_acc": 0.48787878787878786, "train_speed(iter/s)": 0.672342 }, { "epoch": 2.4896105565314253, "grad_norm": 4.322079658508301, "learning_rate": 5.035734790653889e-05, "loss": 2.3450302124023437, "memory(GiB)": 72.85, "step": 58110, "token_acc": 0.5074183976261127, "train_speed(iter/s)": 0.672353 }, { "epoch": 2.4898247718606745, "grad_norm": 5.0246262550354, "learning_rate": 5.03506183021531e-05, "loss": 2.342724609375, "memory(GiB)": 72.85, "step": 58115, "token_acc": 0.5062893081761006, "train_speed(iter/s)": 0.672363 }, { "epoch": 2.4900389871899233, "grad_norm": 5.571931838989258, "learning_rate": 5.0343888691415497e-05, "loss": 2.2161802291870116, "memory(GiB)": 72.85, "step": 58120, "token_acc": 0.5, "train_speed(iter/s)": 0.672355 }, { "epoch": 2.490253202519172, "grad_norm": 6.05535364151001, "learning_rate": 5.0337159074448034e-05, "loss": 2.1176149368286135, "memory(GiB)": 72.85, "step": 58125, "token_acc": 0.5609756097560976, "train_speed(iter/s)": 0.672367 }, { "epoch": 2.4904674178484214, "grad_norm": 3.520935297012329, "learning_rate": 5.033042945137264e-05, "loss": 1.8955541610717774, "memory(GiB)": 72.85, "step": 58130, "token_acc": 0.5699658703071673, "train_speed(iter/s)": 0.672384 }, { "epoch": 2.49068163317767, "grad_norm": 5.02352237701416, "learning_rate": 5.032369982231119e-05, "loss": 2.1527820587158204, "memory(GiB)": 72.85, "step": 58135, "token_acc": 0.501779359430605, "train_speed(iter/s)": 0.6724 }, { "epoch": 2.490895848506919, "grad_norm": 5.515174388885498, "learning_rate": 5.031697018738563e-05, "loss": 2.118912696838379, "memory(GiB)": 72.85, "step": 58140, "token_acc": 0.5490909090909091, "train_speed(iter/s)": 0.672404 }, { "epoch": 2.4911100638361683, "grad_norm": 5.846404075622559, "learning_rate": 5.0310240546717844e-05, "loss": 2.1512996673583986, "memory(GiB)": 72.85, "step": 58145, "token_acc": 0.5174825174825175, "train_speed(iter/s)": 0.672412 }, { "epoch": 2.491324279165417, "grad_norm": 4.430399417877197, "learning_rate": 5.030351090042976e-05, "loss": 2.2256940841674804, "memory(GiB)": 72.85, "step": 58150, "token_acc": 0.4967741935483871, "train_speed(iter/s)": 0.672416 }, { "epoch": 2.491538494494666, "grad_norm": 3.88523530960083, "learning_rate": 5.02967812486433e-05, "loss": 2.2626201629638674, "memory(GiB)": 72.85, "step": 58155, "token_acc": 0.5047318611987381, "train_speed(iter/s)": 0.672377 }, { "epoch": 2.491752709823915, "grad_norm": 4.874843120574951, "learning_rate": 5.029005159148036e-05, "loss": 2.2878307342529296, "memory(GiB)": 72.85, "step": 58160, "token_acc": 0.4809688581314879, "train_speed(iter/s)": 0.672377 }, { "epoch": 2.491966925153164, "grad_norm": 6.1205034255981445, "learning_rate": 5.0283321929062854e-05, "loss": 2.349182891845703, "memory(GiB)": 72.85, "step": 58165, "token_acc": 0.5175097276264592, "train_speed(iter/s)": 0.672365 }, { "epoch": 2.492181140482413, "grad_norm": 4.781672477722168, "learning_rate": 5.0276592261512726e-05, "loss": 2.481645965576172, "memory(GiB)": 72.85, "step": 58170, "token_acc": 0.4610169491525424, "train_speed(iter/s)": 0.672368 }, { "epoch": 2.492395355811662, "grad_norm": 4.821316242218018, "learning_rate": 5.026986258895187e-05, "loss": 2.2313865661621093, "memory(GiB)": 72.85, "step": 58175, "token_acc": 0.5080906148867314, "train_speed(iter/s)": 0.672375 }, { "epoch": 2.492609571140911, "grad_norm": 5.904088497161865, "learning_rate": 5.0263132911502174e-05, "loss": 2.0822917938232424, "memory(GiB)": 72.85, "step": 58180, "token_acc": 0.5551330798479087, "train_speed(iter/s)": 0.672381 }, { "epoch": 2.4928237864701597, "grad_norm": 5.054769992828369, "learning_rate": 5.025640322928561e-05, "loss": 2.6363618850708006, "memory(GiB)": 72.85, "step": 58185, "token_acc": 0.45987654320987653, "train_speed(iter/s)": 0.672389 }, { "epoch": 2.493038001799409, "grad_norm": 5.495509624481201, "learning_rate": 5.024967354242405e-05, "loss": 2.100412940979004, "memory(GiB)": 72.85, "step": 58190, "token_acc": 0.5372549019607843, "train_speed(iter/s)": 0.672399 }, { "epoch": 2.4932522171286577, "grad_norm": 8.22148323059082, "learning_rate": 5.0242943851039404e-05, "loss": 2.2341346740722656, "memory(GiB)": 72.85, "step": 58195, "token_acc": 0.49642857142857144, "train_speed(iter/s)": 0.672396 }, { "epoch": 2.4934664324579066, "grad_norm": 5.336740493774414, "learning_rate": 5.0236214155253614e-05, "loss": 2.1689882278442383, "memory(GiB)": 72.85, "step": 58200, "token_acc": 0.5244755244755245, "train_speed(iter/s)": 0.672387 }, { "epoch": 2.493680647787156, "grad_norm": 3.5767946243286133, "learning_rate": 5.0229484455188594e-05, "loss": 2.2340085983276365, "memory(GiB)": 72.85, "step": 58205, "token_acc": 0.5622775800711743, "train_speed(iter/s)": 0.672398 }, { "epoch": 2.4938948631164046, "grad_norm": 4.580663681030273, "learning_rate": 5.022275475096623e-05, "loss": 2.3867670059204102, "memory(GiB)": 72.85, "step": 58210, "token_acc": 0.4742647058823529, "train_speed(iter/s)": 0.672381 }, { "epoch": 2.4941090784456534, "grad_norm": 4.506206512451172, "learning_rate": 5.021602504270847e-05, "loss": 1.9376991271972657, "memory(GiB)": 72.85, "step": 58215, "token_acc": 0.5381818181818182, "train_speed(iter/s)": 0.672377 }, { "epoch": 2.4943232937749027, "grad_norm": 3.814923048019409, "learning_rate": 5.0209295330537206e-05, "loss": 2.344278335571289, "memory(GiB)": 72.85, "step": 58220, "token_acc": 0.5036231884057971, "train_speed(iter/s)": 0.672368 }, { "epoch": 2.4945375091041515, "grad_norm": 4.577261447906494, "learning_rate": 5.020256561457436e-05, "loss": 1.9721181869506836, "memory(GiB)": 72.85, "step": 58225, "token_acc": 0.5291970802919708, "train_speed(iter/s)": 0.672371 }, { "epoch": 2.4947517244334003, "grad_norm": 5.582392692565918, "learning_rate": 5.019583589494184e-05, "loss": 2.6296049118041993, "memory(GiB)": 72.85, "step": 58230, "token_acc": 0.4579710144927536, "train_speed(iter/s)": 0.672376 }, { "epoch": 2.4949659397626496, "grad_norm": 5.780125617980957, "learning_rate": 5.018910617176158e-05, "loss": 2.5576683044433595, "memory(GiB)": 72.85, "step": 58235, "token_acc": 0.47592067988668557, "train_speed(iter/s)": 0.672376 }, { "epoch": 2.4951801550918984, "grad_norm": 4.945119857788086, "learning_rate": 5.018237644515548e-05, "loss": 2.1909618377685547, "memory(GiB)": 72.85, "step": 58240, "token_acc": 0.5278688524590164, "train_speed(iter/s)": 0.672382 }, { "epoch": 2.495394370421147, "grad_norm": 4.116199016571045, "learning_rate": 5.017564671524546e-05, "loss": 2.7428916931152343, "memory(GiB)": 72.85, "step": 58245, "token_acc": 0.4758842443729904, "train_speed(iter/s)": 0.67237 }, { "epoch": 2.4956085857503965, "grad_norm": 4.418224811553955, "learning_rate": 5.0168916982153436e-05, "loss": 2.2618715286254885, "memory(GiB)": 72.85, "step": 58250, "token_acc": 0.5640326975476839, "train_speed(iter/s)": 0.672367 }, { "epoch": 2.4958228010796453, "grad_norm": 5.694347858428955, "learning_rate": 5.016218724600132e-05, "loss": 2.2860137939453127, "memory(GiB)": 72.85, "step": 58255, "token_acc": 0.524390243902439, "train_speed(iter/s)": 0.672365 }, { "epoch": 2.496037016408894, "grad_norm": 5.231717586517334, "learning_rate": 5.015545750691104e-05, "loss": 2.0136295318603517, "memory(GiB)": 72.85, "step": 58260, "token_acc": 0.5444015444015444, "train_speed(iter/s)": 0.672383 }, { "epoch": 2.4962512317381433, "grad_norm": 4.490179061889648, "learning_rate": 5.014872776500449e-05, "loss": 2.1790794372558593, "memory(GiB)": 72.85, "step": 58265, "token_acc": 0.5308641975308642, "train_speed(iter/s)": 0.672386 }, { "epoch": 2.496465447067392, "grad_norm": 3.7768542766571045, "learning_rate": 5.0141998020403604e-05, "loss": 2.0229793548583985, "memory(GiB)": 72.85, "step": 58270, "token_acc": 0.5420289855072464, "train_speed(iter/s)": 0.672389 }, { "epoch": 2.496679662396641, "grad_norm": 5.016552925109863, "learning_rate": 5.0135268273230284e-05, "loss": 2.445242691040039, "memory(GiB)": 72.85, "step": 58275, "token_acc": 0.46200607902735563, "train_speed(iter/s)": 0.672368 }, { "epoch": 2.49689387772589, "grad_norm": 5.658161163330078, "learning_rate": 5.012853852360645e-05, "loss": 2.101393699645996, "memory(GiB)": 72.85, "step": 58280, "token_acc": 0.5182186234817814, "train_speed(iter/s)": 0.672353 }, { "epoch": 2.497108093055139, "grad_norm": 5.342682361602783, "learning_rate": 5.0121808771654035e-05, "loss": 2.4750411987304686, "memory(GiB)": 72.85, "step": 58285, "token_acc": 0.48559670781893005, "train_speed(iter/s)": 0.672356 }, { "epoch": 2.497322308384388, "grad_norm": 3.7038018703460693, "learning_rate": 5.011507901749494e-05, "loss": 1.7040790557861327, "memory(GiB)": 72.85, "step": 58290, "token_acc": 0.5824175824175825, "train_speed(iter/s)": 0.672363 }, { "epoch": 2.497536523713637, "grad_norm": 5.685675144195557, "learning_rate": 5.0108349261251067e-05, "loss": 2.5326366424560547, "memory(GiB)": 72.85, "step": 58295, "token_acc": 0.46710526315789475, "train_speed(iter/s)": 0.672365 }, { "epoch": 2.497750739042886, "grad_norm": 5.703556060791016, "learning_rate": 5.010161950304435e-05, "loss": 2.201361083984375, "memory(GiB)": 72.85, "step": 58300, "token_acc": 0.5363321799307958, "train_speed(iter/s)": 0.672368 }, { "epoch": 2.4979649543721347, "grad_norm": 4.351314544677734, "learning_rate": 5.009488974299671e-05, "loss": 2.3663164138793946, "memory(GiB)": 72.85, "step": 58305, "token_acc": 0.5150501672240803, "train_speed(iter/s)": 0.672359 }, { "epoch": 2.498179169701384, "grad_norm": 6.183501720428467, "learning_rate": 5.008815998123002e-05, "loss": 2.3493608474731444, "memory(GiB)": 72.85, "step": 58310, "token_acc": 0.4948453608247423, "train_speed(iter/s)": 0.672365 }, { "epoch": 2.498393385030633, "grad_norm": 4.649277687072754, "learning_rate": 5.0081430217866266e-05, "loss": 2.4558284759521483, "memory(GiB)": 72.85, "step": 58315, "token_acc": 0.49032258064516127, "train_speed(iter/s)": 0.672364 }, { "epoch": 2.4986076003598816, "grad_norm": 4.506903648376465, "learning_rate": 5.0074700453027315e-05, "loss": 2.392802429199219, "memory(GiB)": 72.85, "step": 58320, "token_acc": 0.5018315018315018, "train_speed(iter/s)": 0.672377 }, { "epoch": 2.498821815689131, "grad_norm": 5.400259017944336, "learning_rate": 5.0067970686835083e-05, "loss": 2.4563602447509765, "memory(GiB)": 72.85, "step": 58325, "token_acc": 0.49038461538461536, "train_speed(iter/s)": 0.672381 }, { "epoch": 2.4990360310183797, "grad_norm": 4.5759053230285645, "learning_rate": 5.0061240919411523e-05, "loss": 2.201059913635254, "memory(GiB)": 72.85, "step": 58330, "token_acc": 0.5405405405405406, "train_speed(iter/s)": 0.672386 }, { "epoch": 2.4992502463476285, "grad_norm": 4.603580474853516, "learning_rate": 5.005451115087851e-05, "loss": 2.3836080551147463, "memory(GiB)": 72.85, "step": 58335, "token_acc": 0.4895833333333333, "train_speed(iter/s)": 0.672373 }, { "epoch": 2.4994644616768777, "grad_norm": 4.602904796600342, "learning_rate": 5.0047781381357984e-05, "loss": 2.1154525756835936, "memory(GiB)": 72.85, "step": 58340, "token_acc": 0.5457875457875457, "train_speed(iter/s)": 0.672366 }, { "epoch": 2.4996786770061266, "grad_norm": 4.179277420043945, "learning_rate": 5.004105161097185e-05, "loss": 2.1475542068481444, "memory(GiB)": 72.85, "step": 58345, "token_acc": 0.5444839857651246, "train_speed(iter/s)": 0.672373 }, { "epoch": 2.4998928923353754, "grad_norm": 6.299825191497803, "learning_rate": 5.0034321839842024e-05, "loss": 1.9512674331665039, "memory(GiB)": 72.85, "step": 58350, "token_acc": 0.5545851528384279, "train_speed(iter/s)": 0.672361 }, { "epoch": 2.5001071076646246, "grad_norm": 5.328091144561768, "learning_rate": 5.0027592068090424e-05, "loss": 2.0623308181762696, "memory(GiB)": 72.85, "step": 58355, "token_acc": 0.5223880597014925, "train_speed(iter/s)": 0.672354 }, { "epoch": 2.5003213229938734, "grad_norm": 3.6819024085998535, "learning_rate": 5.002086229583898e-05, "loss": 2.626128005981445, "memory(GiB)": 72.85, "step": 58360, "token_acc": 0.49201277955271566, "train_speed(iter/s)": 0.672354 }, { "epoch": 2.5005355383231223, "grad_norm": 4.576300621032715, "learning_rate": 5.001413252320959e-05, "loss": 2.2774379730224608, "memory(GiB)": 72.85, "step": 58365, "token_acc": 0.5121107266435986, "train_speed(iter/s)": 0.672367 }, { "epoch": 2.5007497536523715, "grad_norm": 4.850045204162598, "learning_rate": 5.0007402750324164e-05, "loss": 2.272943687438965, "memory(GiB)": 72.85, "step": 58370, "token_acc": 0.543859649122807, "train_speed(iter/s)": 0.672361 }, { "epoch": 2.5009639689816203, "grad_norm": 6.304008960723877, "learning_rate": 5.000067297730464e-05, "loss": 2.3082462310791017, "memory(GiB)": 72.85, "step": 58375, "token_acc": 0.47928994082840237, "train_speed(iter/s)": 0.672359 }, { "epoch": 2.501178184310869, "grad_norm": 5.098687648773193, "learning_rate": 4.9993943204272926e-05, "loss": 2.079889106750488, "memory(GiB)": 72.85, "step": 58380, "token_acc": 0.5054545454545455, "train_speed(iter/s)": 0.67236 }, { "epoch": 2.5013923996401184, "grad_norm": 5.4142045974731445, "learning_rate": 4.998721343135093e-05, "loss": 2.2214710235595705, "memory(GiB)": 72.85, "step": 58385, "token_acc": 0.5276872964169381, "train_speed(iter/s)": 0.672366 }, { "epoch": 2.501606614969367, "grad_norm": 5.090145587921143, "learning_rate": 4.998048365866058e-05, "loss": 2.127269744873047, "memory(GiB)": 72.85, "step": 58390, "token_acc": 0.5137931034482759, "train_speed(iter/s)": 0.672372 }, { "epoch": 2.501820830298616, "grad_norm": 6.4974846839904785, "learning_rate": 4.997375388632378e-05, "loss": 2.288983154296875, "memory(GiB)": 72.85, "step": 58395, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.672347 }, { "epoch": 2.5020350456278653, "grad_norm": 4.413758277893066, "learning_rate": 4.9967024114462466e-05, "loss": 2.0680263519287108, "memory(GiB)": 72.85, "step": 58400, "token_acc": 0.539622641509434, "train_speed(iter/s)": 0.672352 }, { "epoch": 2.502249260957114, "grad_norm": 5.7486653327941895, "learning_rate": 4.9960294343198547e-05, "loss": 1.9523933410644532, "memory(GiB)": 72.85, "step": 58405, "token_acc": 0.5649122807017544, "train_speed(iter/s)": 0.672367 }, { "epoch": 2.502463476286363, "grad_norm": 4.418430805206299, "learning_rate": 4.995356457265391e-05, "loss": 2.476479911804199, "memory(GiB)": 72.85, "step": 58410, "token_acc": 0.4983388704318937, "train_speed(iter/s)": 0.672372 }, { "epoch": 2.502677691615612, "grad_norm": 4.847742557525635, "learning_rate": 4.994683480295051e-05, "loss": 2.5718732833862306, "memory(GiB)": 72.85, "step": 58415, "token_acc": 0.47115384615384615, "train_speed(iter/s)": 0.672366 }, { "epoch": 2.502891906944861, "grad_norm": 4.791486740112305, "learning_rate": 4.994010503421025e-05, "loss": 2.4662353515625, "memory(GiB)": 72.85, "step": 58420, "token_acc": 0.467680608365019, "train_speed(iter/s)": 0.672366 }, { "epoch": 2.50310612227411, "grad_norm": 5.543183326721191, "learning_rate": 4.993337526655503e-05, "loss": 2.4136043548583985, "memory(GiB)": 72.85, "step": 58425, "token_acc": 0.47692307692307695, "train_speed(iter/s)": 0.672372 }, { "epoch": 2.503320337603359, "grad_norm": 4.451886177062988, "learning_rate": 4.992664550010679e-05, "loss": 2.4118207931518554, "memory(GiB)": 72.85, "step": 58430, "token_acc": 0.4716981132075472, "train_speed(iter/s)": 0.672367 }, { "epoch": 2.503534552932608, "grad_norm": 5.1791181564331055, "learning_rate": 4.991991573498741e-05, "loss": 2.1430690765380858, "memory(GiB)": 72.85, "step": 58435, "token_acc": 0.594017094017094, "train_speed(iter/s)": 0.672359 }, { "epoch": 2.5037487682618567, "grad_norm": 6.864693641662598, "learning_rate": 4.9913185971318865e-05, "loss": 2.0967227935791017, "memory(GiB)": 72.85, "step": 58440, "token_acc": 0.5375, "train_speed(iter/s)": 0.672356 }, { "epoch": 2.503962983591106, "grad_norm": 5.143047332763672, "learning_rate": 4.990645620922302e-05, "loss": 1.5967294692993164, "memory(GiB)": 72.85, "step": 58445, "token_acc": 0.6220472440944882, "train_speed(iter/s)": 0.67234 }, { "epoch": 2.5041771989203547, "grad_norm": 4.997161865234375, "learning_rate": 4.9899726448821824e-05, "loss": 2.1738698959350584, "memory(GiB)": 72.85, "step": 58450, "token_acc": 0.5168195718654435, "train_speed(iter/s)": 0.672356 }, { "epoch": 2.5043914142496035, "grad_norm": 4.4313459396362305, "learning_rate": 4.9892996690237154e-05, "loss": 2.117993927001953, "memory(GiB)": 72.85, "step": 58455, "token_acc": 0.5399361022364217, "train_speed(iter/s)": 0.672341 }, { "epoch": 2.504605629578853, "grad_norm": 5.619285583496094, "learning_rate": 4.988626693359097e-05, "loss": 2.335901069641113, "memory(GiB)": 72.85, "step": 58460, "token_acc": 0.4862068965517241, "train_speed(iter/s)": 0.672354 }, { "epoch": 2.5048198449081016, "grad_norm": 5.42985200881958, "learning_rate": 4.9879537179005164e-05, "loss": 2.222372055053711, "memory(GiB)": 72.85, "step": 58465, "token_acc": 0.5261324041811847, "train_speed(iter/s)": 0.672361 }, { "epoch": 2.5050340602373504, "grad_norm": 5.175992488861084, "learning_rate": 4.9872807426601646e-05, "loss": 2.2647001266479494, "memory(GiB)": 72.85, "step": 58470, "token_acc": 0.48299319727891155, "train_speed(iter/s)": 0.67237 }, { "epoch": 2.5052482755665997, "grad_norm": 5.733227252960205, "learning_rate": 4.9866077676502356e-05, "loss": 2.393342208862305, "memory(GiB)": 72.85, "step": 58475, "token_acc": 0.4967532467532468, "train_speed(iter/s)": 0.672362 }, { "epoch": 2.5054624908958485, "grad_norm": 5.0578484535217285, "learning_rate": 4.9859347928829185e-05, "loss": 2.2241180419921873, "memory(GiB)": 72.85, "step": 58480, "token_acc": 0.5381944444444444, "train_speed(iter/s)": 0.672356 }, { "epoch": 2.5056767062250973, "grad_norm": 4.61121940612793, "learning_rate": 4.985261818370405e-05, "loss": 2.267482948303223, "memory(GiB)": 72.85, "step": 58485, "token_acc": 0.5116279069767442, "train_speed(iter/s)": 0.672365 }, { "epoch": 2.5058909215543466, "grad_norm": 4.055366516113281, "learning_rate": 4.984588844124889e-05, "loss": 2.101359748840332, "memory(GiB)": 72.85, "step": 58490, "token_acc": 0.5233333333333333, "train_speed(iter/s)": 0.672364 }, { "epoch": 2.5061051368835954, "grad_norm": 4.503282070159912, "learning_rate": 4.983915870158559e-05, "loss": 2.4185968399047852, "memory(GiB)": 72.85, "step": 58495, "token_acc": 0.4728434504792332, "train_speed(iter/s)": 0.672372 }, { "epoch": 2.506319352212844, "grad_norm": 4.889791965484619, "learning_rate": 4.98324289648361e-05, "loss": 2.3585201263427735, "memory(GiB)": 72.85, "step": 58500, "token_acc": 0.46688741721854304, "train_speed(iter/s)": 0.67236 }, { "epoch": 2.506319352212844, "eval_loss": 2.1004297733306885, "eval_runtime": 15.4747, "eval_samples_per_second": 6.462, "eval_steps_per_second": 6.462, "eval_token_acc": 0.4796854521625164, "step": 58500 }, { "epoch": 2.5065335675420934, "grad_norm": 4.812817096710205, "learning_rate": 4.982569923112231e-05, "loss": 2.160181427001953, "memory(GiB)": 72.85, "step": 58505, "token_acc": 0.48633364750235625, "train_speed(iter/s)": 0.67223 }, { "epoch": 2.5067477828713423, "grad_norm": 5.085564136505127, "learning_rate": 4.9818969500566115e-05, "loss": 2.224671745300293, "memory(GiB)": 72.85, "step": 58510, "token_acc": 0.48698884758364314, "train_speed(iter/s)": 0.672242 }, { "epoch": 2.506961998200591, "grad_norm": 4.257676601409912, "learning_rate": 4.981223977328948e-05, "loss": 1.9801105499267577, "memory(GiB)": 72.85, "step": 58515, "token_acc": 0.526984126984127, "train_speed(iter/s)": 0.672249 }, { "epoch": 2.5071762135298403, "grad_norm": 5.481099605560303, "learning_rate": 4.98055100494143e-05, "loss": 2.4530218124389647, "memory(GiB)": 72.85, "step": 58520, "token_acc": 0.4819672131147541, "train_speed(iter/s)": 0.672233 }, { "epoch": 2.507390428859089, "grad_norm": 5.455050468444824, "learning_rate": 4.979878032906249e-05, "loss": 2.046868324279785, "memory(GiB)": 72.85, "step": 58525, "token_acc": 0.5521885521885522, "train_speed(iter/s)": 0.67226 }, { "epoch": 2.507604644188338, "grad_norm": 4.915592670440674, "learning_rate": 4.979205061235595e-05, "loss": 2.347561836242676, "memory(GiB)": 72.85, "step": 58530, "token_acc": 0.48333333333333334, "train_speed(iter/s)": 0.672266 }, { "epoch": 2.507818859517587, "grad_norm": 4.349565029144287, "learning_rate": 4.978532089941662e-05, "loss": 2.1286203384399416, "memory(GiB)": 72.85, "step": 58535, "token_acc": 0.5729166666666666, "train_speed(iter/s)": 0.672272 }, { "epoch": 2.508033074846836, "grad_norm": 5.782959938049316, "learning_rate": 4.97785911903664e-05, "loss": 2.1051538467407225, "memory(GiB)": 72.85, "step": 58540, "token_acc": 0.5568627450980392, "train_speed(iter/s)": 0.67227 }, { "epoch": 2.508247290176085, "grad_norm": 6.338688850402832, "learning_rate": 4.977186148532721e-05, "loss": 2.493810272216797, "memory(GiB)": 72.85, "step": 58545, "token_acc": 0.45774647887323944, "train_speed(iter/s)": 0.672273 }, { "epoch": 2.508461505505334, "grad_norm": 5.465243816375732, "learning_rate": 4.9765131784420963e-05, "loss": 2.1734106063842775, "memory(GiB)": 72.85, "step": 58550, "token_acc": 0.5160142348754448, "train_speed(iter/s)": 0.672267 }, { "epoch": 2.508675720834583, "grad_norm": 5.30525016784668, "learning_rate": 4.975840208776957e-05, "loss": 2.0986377716064455, "memory(GiB)": 72.85, "step": 58555, "token_acc": 0.532608695652174, "train_speed(iter/s)": 0.67228 }, { "epoch": 2.5088899361638317, "grad_norm": 3.7373569011688232, "learning_rate": 4.975167239549496e-05, "loss": 2.290299415588379, "memory(GiB)": 72.85, "step": 58560, "token_acc": 0.5047923322683706, "train_speed(iter/s)": 0.672283 }, { "epoch": 2.509104151493081, "grad_norm": 4.944522380828857, "learning_rate": 4.974494270771903e-05, "loss": 2.2532825469970703, "memory(GiB)": 72.85, "step": 58565, "token_acc": 0.5285171102661597, "train_speed(iter/s)": 0.672277 }, { "epoch": 2.50931836682233, "grad_norm": 5.237199783325195, "learning_rate": 4.97382130245637e-05, "loss": 2.4935813903808595, "memory(GiB)": 72.85, "step": 58570, "token_acc": 0.48928571428571427, "train_speed(iter/s)": 0.672263 }, { "epoch": 2.5095325821515786, "grad_norm": 4.297763347625732, "learning_rate": 4.973148334615089e-05, "loss": 2.2886890411376952, "memory(GiB)": 72.85, "step": 58575, "token_acc": 0.5085910652920962, "train_speed(iter/s)": 0.672272 }, { "epoch": 2.509746797480828, "grad_norm": 5.148290634155273, "learning_rate": 4.9724753672602504e-05, "loss": 2.187929153442383, "memory(GiB)": 72.85, "step": 58580, "token_acc": 0.48398576512455516, "train_speed(iter/s)": 0.672266 }, { "epoch": 2.5099610128100767, "grad_norm": 5.912526607513428, "learning_rate": 4.971802400404046e-05, "loss": 2.129001235961914, "memory(GiB)": 72.85, "step": 58585, "token_acc": 0.5467625899280576, "train_speed(iter/s)": 0.672279 }, { "epoch": 2.5101752281393255, "grad_norm": 4.5558648109436035, "learning_rate": 4.9711294340586686e-05, "loss": 2.3181472778320313, "memory(GiB)": 72.85, "step": 58590, "token_acc": 0.4948453608247423, "train_speed(iter/s)": 0.67227 }, { "epoch": 2.5103894434685747, "grad_norm": 4.071789264678955, "learning_rate": 4.970456468236309e-05, "loss": 2.0295312881469725, "memory(GiB)": 72.85, "step": 58595, "token_acc": 0.5287356321839081, "train_speed(iter/s)": 0.672273 }, { "epoch": 2.5106036587978235, "grad_norm": 4.698893070220947, "learning_rate": 4.9697835029491566e-05, "loss": 2.5124584197998048, "memory(GiB)": 72.85, "step": 58600, "token_acc": 0.4559748427672956, "train_speed(iter/s)": 0.67227 }, { "epoch": 2.5108178741270724, "grad_norm": 5.353111267089844, "learning_rate": 4.969110538209406e-05, "loss": 2.3825794219970704, "memory(GiB)": 72.85, "step": 58605, "token_acc": 0.4837758112094395, "train_speed(iter/s)": 0.672283 }, { "epoch": 2.5110320894563216, "grad_norm": 4.157218933105469, "learning_rate": 4.968437574029247e-05, "loss": 2.145614433288574, "memory(GiB)": 72.85, "step": 58610, "token_acc": 0.5268456375838926, "train_speed(iter/s)": 0.672296 }, { "epoch": 2.5112463047855704, "grad_norm": 5.2981648445129395, "learning_rate": 4.9677646104208694e-05, "loss": 2.322414207458496, "memory(GiB)": 72.85, "step": 58615, "token_acc": 0.521594684385382, "train_speed(iter/s)": 0.67229 }, { "epoch": 2.5114605201148192, "grad_norm": 5.047579288482666, "learning_rate": 4.967091647396468e-05, "loss": 2.198504638671875, "memory(GiB)": 72.85, "step": 58620, "token_acc": 0.5071633237822349, "train_speed(iter/s)": 0.672281 }, { "epoch": 2.5116747354440685, "grad_norm": 4.745920658111572, "learning_rate": 4.966418684968232e-05, "loss": 1.9346214294433595, "memory(GiB)": 72.85, "step": 58625, "token_acc": 0.5236220472440944, "train_speed(iter/s)": 0.672284 }, { "epoch": 2.5118889507733173, "grad_norm": 6.241486072540283, "learning_rate": 4.965745723148352e-05, "loss": 2.196904182434082, "memory(GiB)": 72.85, "step": 58630, "token_acc": 0.5285714285714286, "train_speed(iter/s)": 0.672288 }, { "epoch": 2.512103166102566, "grad_norm": 4.650886535644531, "learning_rate": 4.965072761949021e-05, "loss": 1.957223892211914, "memory(GiB)": 72.85, "step": 58635, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.672283 }, { "epoch": 2.5123173814318154, "grad_norm": 4.440714359283447, "learning_rate": 4.964399801382429e-05, "loss": 2.1612268447875977, "memory(GiB)": 72.85, "step": 58640, "token_acc": 0.5338078291814946, "train_speed(iter/s)": 0.672291 }, { "epoch": 2.512531596761064, "grad_norm": 5.412466526031494, "learning_rate": 4.9637268414607685e-05, "loss": 2.181583213806152, "memory(GiB)": 72.85, "step": 58645, "token_acc": 0.49137931034482757, "train_speed(iter/s)": 0.672288 }, { "epoch": 2.512745812090313, "grad_norm": 4.904282569885254, "learning_rate": 4.9630538821962306e-05, "loss": 2.468711090087891, "memory(GiB)": 72.85, "step": 58650, "token_acc": 0.5221843003412969, "train_speed(iter/s)": 0.672289 }, { "epoch": 2.5129600274195623, "grad_norm": 4.678189277648926, "learning_rate": 4.962380923601005e-05, "loss": 2.1244380950927733, "memory(GiB)": 72.85, "step": 58655, "token_acc": 0.5164835164835165, "train_speed(iter/s)": 0.672289 }, { "epoch": 2.513174242748811, "grad_norm": 5.6530842781066895, "learning_rate": 4.9617079656872837e-05, "loss": 2.32037467956543, "memory(GiB)": 72.85, "step": 58660, "token_acc": 0.4899713467048711, "train_speed(iter/s)": 0.672284 }, { "epoch": 2.51338845807806, "grad_norm": 4.595837593078613, "learning_rate": 4.961035008467261e-05, "loss": 2.017861557006836, "memory(GiB)": 72.85, "step": 58665, "token_acc": 0.5613382899628253, "train_speed(iter/s)": 0.672286 }, { "epoch": 2.513602673407309, "grad_norm": 4.805229187011719, "learning_rate": 4.9603620519531245e-05, "loss": 2.3308149337768556, "memory(GiB)": 72.85, "step": 58670, "token_acc": 0.4857142857142857, "train_speed(iter/s)": 0.672295 }, { "epoch": 2.513816888736558, "grad_norm": 4.800344467163086, "learning_rate": 4.9596890961570666e-05, "loss": 2.5126325607299806, "memory(GiB)": 72.85, "step": 58675, "token_acc": 0.5070921985815603, "train_speed(iter/s)": 0.672305 }, { "epoch": 2.5140311040658068, "grad_norm": 5.328408718109131, "learning_rate": 4.959016141091279e-05, "loss": 2.210011672973633, "memory(GiB)": 72.85, "step": 58680, "token_acc": 0.48771929824561405, "train_speed(iter/s)": 0.672285 }, { "epoch": 2.514245319395056, "grad_norm": 4.605645656585693, "learning_rate": 4.958343186767952e-05, "loss": 1.9993444442749024, "memory(GiB)": 72.85, "step": 58685, "token_acc": 0.5296167247386759, "train_speed(iter/s)": 0.672278 }, { "epoch": 2.514459534724305, "grad_norm": 4.255268573760986, "learning_rate": 4.9576702331992776e-05, "loss": 2.162443923950195, "memory(GiB)": 72.85, "step": 58690, "token_acc": 0.5054054054054054, "train_speed(iter/s)": 0.672273 }, { "epoch": 2.5146737500535536, "grad_norm": 4.246697902679443, "learning_rate": 4.9569972803974466e-05, "loss": 1.8965530395507812, "memory(GiB)": 72.85, "step": 58695, "token_acc": 0.5726495726495726, "train_speed(iter/s)": 0.672264 }, { "epoch": 2.514887965382803, "grad_norm": 5.319045066833496, "learning_rate": 4.9563243283746494e-05, "loss": 2.463578987121582, "memory(GiB)": 72.85, "step": 58700, "token_acc": 0.4788273615635179, "train_speed(iter/s)": 0.672261 }, { "epoch": 2.5151021807120517, "grad_norm": 4.521093368530273, "learning_rate": 4.95565137714308e-05, "loss": 2.312754821777344, "memory(GiB)": 72.85, "step": 58705, "token_acc": 0.5215827338129496, "train_speed(iter/s)": 0.672271 }, { "epoch": 2.5153163960413005, "grad_norm": 5.74748420715332, "learning_rate": 4.9549784267149266e-05, "loss": 2.1767913818359377, "memory(GiB)": 72.85, "step": 58710, "token_acc": 0.5342960288808665, "train_speed(iter/s)": 0.672267 }, { "epoch": 2.51553061137055, "grad_norm": 5.19431209564209, "learning_rate": 4.954305477102379e-05, "loss": 2.4342777252197267, "memory(GiB)": 72.85, "step": 58715, "token_acc": 0.4620253164556962, "train_speed(iter/s)": 0.672278 }, { "epoch": 2.5157448266997986, "grad_norm": 7.4974446296691895, "learning_rate": 4.953632528317633e-05, "loss": 2.5454288482666017, "memory(GiB)": 72.85, "step": 58720, "token_acc": 0.4930875576036866, "train_speed(iter/s)": 0.672285 }, { "epoch": 2.5159590420290474, "grad_norm": 4.942102909088135, "learning_rate": 4.952959580372876e-05, "loss": 2.153953742980957, "memory(GiB)": 72.85, "step": 58725, "token_acc": 0.5101351351351351, "train_speed(iter/s)": 0.672273 }, { "epoch": 2.5161732573582967, "grad_norm": 4.480218410491943, "learning_rate": 4.952286633280302e-05, "loss": 2.1818849563598635, "memory(GiB)": 72.85, "step": 58730, "token_acc": 0.5163398692810458, "train_speed(iter/s)": 0.672282 }, { "epoch": 2.5163874726875455, "grad_norm": 5.204768657684326, "learning_rate": 4.951613687052097e-05, "loss": 2.0962635040283204, "memory(GiB)": 72.85, "step": 58735, "token_acc": 0.5261324041811847, "train_speed(iter/s)": 0.6723 }, { "epoch": 2.5166016880167943, "grad_norm": 5.701413154602051, "learning_rate": 4.950940741700459e-05, "loss": 2.0734268188476563, "memory(GiB)": 72.85, "step": 58740, "token_acc": 0.52, "train_speed(iter/s)": 0.672287 }, { "epoch": 2.5168159033460435, "grad_norm": 4.537508487701416, "learning_rate": 4.950267797237573e-05, "loss": 2.0367143630981444, "memory(GiB)": 72.85, "step": 58745, "token_acc": 0.5443037974683544, "train_speed(iter/s)": 0.672283 }, { "epoch": 2.5170301186752924, "grad_norm": 6.196760654449463, "learning_rate": 4.949594853675634e-05, "loss": 2.3728815078735352, "memory(GiB)": 72.85, "step": 58750, "token_acc": 0.4962962962962963, "train_speed(iter/s)": 0.672293 }, { "epoch": 2.517244334004541, "grad_norm": 4.6542840003967285, "learning_rate": 4.948921911026832e-05, "loss": 2.112546920776367, "memory(GiB)": 72.85, "step": 58755, "token_acc": 0.5424354243542435, "train_speed(iter/s)": 0.672298 }, { "epoch": 2.5174585493337904, "grad_norm": 4.401843070983887, "learning_rate": 4.9482489693033564e-05, "loss": 2.274348831176758, "memory(GiB)": 72.85, "step": 58760, "token_acc": 0.5277777777777778, "train_speed(iter/s)": 0.672295 }, { "epoch": 2.5176727646630392, "grad_norm": 4.911707878112793, "learning_rate": 4.9475760285174e-05, "loss": 2.2144664764404296, "memory(GiB)": 72.85, "step": 58765, "token_acc": 0.5288461538461539, "train_speed(iter/s)": 0.672301 }, { "epoch": 2.517886979992288, "grad_norm": 5.812314033508301, "learning_rate": 4.946903088681153e-05, "loss": 2.2439949035644533, "memory(GiB)": 72.85, "step": 58770, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.672289 }, { "epoch": 2.5181011953215373, "grad_norm": 5.7122578620910645, "learning_rate": 4.9462301498068057e-05, "loss": 2.2237216949462892, "memory(GiB)": 72.85, "step": 58775, "token_acc": 0.5167785234899329, "train_speed(iter/s)": 0.672293 }, { "epoch": 2.518315410650786, "grad_norm": 3.8782148361206055, "learning_rate": 4.94555721190655e-05, "loss": 2.0520244598388673, "memory(GiB)": 72.85, "step": 58780, "token_acc": 0.5467625899280576, "train_speed(iter/s)": 0.672301 }, { "epoch": 2.518529625980035, "grad_norm": 4.619234561920166, "learning_rate": 4.944884274992576e-05, "loss": 2.305801010131836, "memory(GiB)": 72.85, "step": 58785, "token_acc": 0.45348837209302323, "train_speed(iter/s)": 0.672309 }, { "epoch": 2.518743841309284, "grad_norm": 4.698974132537842, "learning_rate": 4.9442113390770764e-05, "loss": 2.1385015487670898, "memory(GiB)": 72.85, "step": 58790, "token_acc": 0.5296167247386759, "train_speed(iter/s)": 0.672319 }, { "epoch": 2.518958056638533, "grad_norm": 4.660628795623779, "learning_rate": 4.94353840417224e-05, "loss": 2.11842041015625, "memory(GiB)": 72.85, "step": 58795, "token_acc": 0.5680272108843537, "train_speed(iter/s)": 0.672329 }, { "epoch": 2.519172271967782, "grad_norm": 5.848747253417969, "learning_rate": 4.942865470290258e-05, "loss": 2.230601692199707, "memory(GiB)": 72.85, "step": 58800, "token_acc": 0.5413533834586466, "train_speed(iter/s)": 0.672327 }, { "epoch": 2.519386487297031, "grad_norm": 4.419066429138184, "learning_rate": 4.9421925374433195e-05, "loss": 2.3535640716552733, "memory(GiB)": 72.85, "step": 58805, "token_acc": 0.5089605734767025, "train_speed(iter/s)": 0.672324 }, { "epoch": 2.51960070262628, "grad_norm": 4.093975067138672, "learning_rate": 4.94151960564362e-05, "loss": 2.3761133193969726, "memory(GiB)": 72.85, "step": 58810, "token_acc": 0.46946564885496184, "train_speed(iter/s)": 0.672331 }, { "epoch": 2.5198149179555287, "grad_norm": 5.196322917938232, "learning_rate": 4.9408466749033485e-05, "loss": 2.4656042098999023, "memory(GiB)": 72.85, "step": 58815, "token_acc": 0.501466275659824, "train_speed(iter/s)": 0.672335 }, { "epoch": 2.520029133284778, "grad_norm": 9.155738830566406, "learning_rate": 4.940173745234693e-05, "loss": 2.3437108993530273, "memory(GiB)": 72.85, "step": 58820, "token_acc": 0.496551724137931, "train_speed(iter/s)": 0.672327 }, { "epoch": 2.5202433486140268, "grad_norm": 4.004617691040039, "learning_rate": 4.9395008166498474e-05, "loss": 2.1087600708007814, "memory(GiB)": 72.85, "step": 58825, "token_acc": 0.5809859154929577, "train_speed(iter/s)": 0.672338 }, { "epoch": 2.5204575639432756, "grad_norm": 3.948331117630005, "learning_rate": 4.9388278891610004e-05, "loss": 2.26992244720459, "memory(GiB)": 72.85, "step": 58830, "token_acc": 0.5170278637770898, "train_speed(iter/s)": 0.672344 }, { "epoch": 2.520671779272525, "grad_norm": 6.2415947914123535, "learning_rate": 4.938154962780345e-05, "loss": 2.392315673828125, "memory(GiB)": 72.85, "step": 58835, "token_acc": 0.52, "train_speed(iter/s)": 0.672347 }, { "epoch": 2.5208859946017736, "grad_norm": 5.119233131408691, "learning_rate": 4.93748203752007e-05, "loss": 2.3814104080200194, "memory(GiB)": 72.85, "step": 58840, "token_acc": 0.5095057034220533, "train_speed(iter/s)": 0.672342 }, { "epoch": 2.5211002099310225, "grad_norm": 5.6587018966674805, "learning_rate": 4.9368091133923655e-05, "loss": 2.332357406616211, "memory(GiB)": 72.85, "step": 58845, "token_acc": 0.5037037037037037, "train_speed(iter/s)": 0.672344 }, { "epoch": 2.5213144252602717, "grad_norm": 5.754927635192871, "learning_rate": 4.936136190409424e-05, "loss": 2.111319351196289, "memory(GiB)": 72.85, "step": 58850, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.67233 }, { "epoch": 2.5215286405895205, "grad_norm": 4.965094089508057, "learning_rate": 4.9354632685834355e-05, "loss": 2.166355514526367, "memory(GiB)": 72.85, "step": 58855, "token_acc": 0.5412541254125413, "train_speed(iter/s)": 0.672336 }, { "epoch": 2.5217428559187693, "grad_norm": 4.978896617889404, "learning_rate": 4.9347903479265896e-05, "loss": 2.202036666870117, "memory(GiB)": 72.85, "step": 58860, "token_acc": 0.5261324041811847, "train_speed(iter/s)": 0.672327 }, { "epoch": 2.5219570712480186, "grad_norm": 5.112339496612549, "learning_rate": 4.934117428451079e-05, "loss": 2.2650646209716796, "memory(GiB)": 72.85, "step": 58865, "token_acc": 0.5174825174825175, "train_speed(iter/s)": 0.672331 }, { "epoch": 2.5221712865772674, "grad_norm": 4.711585521697998, "learning_rate": 4.933444510169091e-05, "loss": 2.2734622955322266, "memory(GiB)": 72.85, "step": 58870, "token_acc": 0.501466275659824, "train_speed(iter/s)": 0.672318 }, { "epoch": 2.5223855019065162, "grad_norm": 4.976612567901611, "learning_rate": 4.9327715930928206e-05, "loss": 2.34671573638916, "memory(GiB)": 72.85, "step": 58875, "token_acc": 0.5202952029520295, "train_speed(iter/s)": 0.67232 }, { "epoch": 2.5225997172357655, "grad_norm": 5.167202472686768, "learning_rate": 4.932098677234452e-05, "loss": 2.3282474517822265, "memory(GiB)": 72.85, "step": 58880, "token_acc": 0.49473684210526314, "train_speed(iter/s)": 0.672323 }, { "epoch": 2.5228139325650143, "grad_norm": 4.882300853729248, "learning_rate": 4.931425762606183e-05, "loss": 2.331489372253418, "memory(GiB)": 72.85, "step": 58885, "token_acc": 0.4881656804733728, "train_speed(iter/s)": 0.672327 }, { "epoch": 2.523028147894263, "grad_norm": 4.433920383453369, "learning_rate": 4.9307528492201994e-05, "loss": 2.1916337966918946, "memory(GiB)": 72.85, "step": 58890, "token_acc": 0.5189003436426117, "train_speed(iter/s)": 0.672326 }, { "epoch": 2.5232423632235124, "grad_norm": 4.314953327178955, "learning_rate": 4.930079937088693e-05, "loss": 2.2827234268188477, "memory(GiB)": 72.85, "step": 58895, "token_acc": 0.4864864864864865, "train_speed(iter/s)": 0.672324 }, { "epoch": 2.523456578552761, "grad_norm": 3.996446371078491, "learning_rate": 4.929407026223856e-05, "loss": 2.1791606903076173, "memory(GiB)": 72.85, "step": 58900, "token_acc": 0.5247524752475248, "train_speed(iter/s)": 0.672323 }, { "epoch": 2.52367079388201, "grad_norm": 4.4465651512146, "learning_rate": 4.928734116637874e-05, "loss": 2.0477270126342773, "memory(GiB)": 72.85, "step": 58905, "token_acc": 0.5376344086021505, "train_speed(iter/s)": 0.672328 }, { "epoch": 2.5238850092112592, "grad_norm": 6.498673439025879, "learning_rate": 4.928061208342942e-05, "loss": 2.1162416458129885, "memory(GiB)": 72.85, "step": 58910, "token_acc": 0.5457875457875457, "train_speed(iter/s)": 0.672331 }, { "epoch": 2.524099224540508, "grad_norm": 5.679749965667725, "learning_rate": 4.927388301351249e-05, "loss": 2.298786735534668, "memory(GiB)": 72.85, "step": 58915, "token_acc": 0.5049833887043189, "train_speed(iter/s)": 0.67233 }, { "epoch": 2.524313439869757, "grad_norm": 6.023911476135254, "learning_rate": 4.926715395674984e-05, "loss": 2.2690587997436524, "memory(GiB)": 72.85, "step": 58920, "token_acc": 0.5551724137931034, "train_speed(iter/s)": 0.672319 }, { "epoch": 2.524527655199006, "grad_norm": 4.338589191436768, "learning_rate": 4.9260424913263394e-05, "loss": 2.531778335571289, "memory(GiB)": 72.85, "step": 58925, "token_acc": 0.5052631578947369, "train_speed(iter/s)": 0.672327 }, { "epoch": 2.524741870528255, "grad_norm": 6.25024938583374, "learning_rate": 4.925369588317503e-05, "loss": 2.220503807067871, "memory(GiB)": 72.85, "step": 58930, "token_acc": 0.5387096774193548, "train_speed(iter/s)": 0.672323 }, { "epoch": 2.5249560858575038, "grad_norm": 5.295703887939453, "learning_rate": 4.924696686660668e-05, "loss": 2.3533592224121094, "memory(GiB)": 72.85, "step": 58935, "token_acc": 0.5091575091575091, "train_speed(iter/s)": 0.672317 }, { "epoch": 2.525170301186753, "grad_norm": 5.243861198425293, "learning_rate": 4.9240237863680226e-05, "loss": 2.112688446044922, "memory(GiB)": 72.85, "step": 58940, "token_acc": 0.5342960288808665, "train_speed(iter/s)": 0.672302 }, { "epoch": 2.525384516516002, "grad_norm": 4.681496620178223, "learning_rate": 4.923350887451757e-05, "loss": 2.3208425521850584, "memory(GiB)": 72.85, "step": 58945, "token_acc": 0.4965277777777778, "train_speed(iter/s)": 0.672307 }, { "epoch": 2.5255987318452506, "grad_norm": 4.319163799285889, "learning_rate": 4.9226779899240626e-05, "loss": 1.907087516784668, "memory(GiB)": 72.85, "step": 58950, "token_acc": 0.5441176470588235, "train_speed(iter/s)": 0.672291 }, { "epoch": 2.5258129471745, "grad_norm": 4.30533504486084, "learning_rate": 4.9220050937971275e-05, "loss": 2.030270004272461, "memory(GiB)": 72.85, "step": 58955, "token_acc": 0.5401929260450161, "train_speed(iter/s)": 0.672289 }, { "epoch": 2.5260271625037487, "grad_norm": 4.43306303024292, "learning_rate": 4.921332199083145e-05, "loss": 1.9853321075439454, "memory(GiB)": 72.85, "step": 58960, "token_acc": 0.5271317829457365, "train_speed(iter/s)": 0.67227 }, { "epoch": 2.5262413778329975, "grad_norm": 3.6493756771087646, "learning_rate": 4.920659305794302e-05, "loss": 2.3825761795043947, "memory(GiB)": 72.85, "step": 58965, "token_acc": 0.5210084033613446, "train_speed(iter/s)": 0.672271 }, { "epoch": 2.5264555931622468, "grad_norm": 5.637499809265137, "learning_rate": 4.919986413942792e-05, "loss": 2.279475975036621, "memory(GiB)": 72.85, "step": 58970, "token_acc": 0.5486381322957199, "train_speed(iter/s)": 0.672269 }, { "epoch": 2.5266698084914956, "grad_norm": 4.827823638916016, "learning_rate": 4.919313523540802e-05, "loss": 1.9316280364990235, "memory(GiB)": 72.85, "step": 58975, "token_acc": 0.580952380952381, "train_speed(iter/s)": 0.672275 }, { "epoch": 2.5268840238207444, "grad_norm": 6.127791404724121, "learning_rate": 4.918640634600524e-05, "loss": 2.423272705078125, "memory(GiB)": 72.85, "step": 58980, "token_acc": 0.4791666666666667, "train_speed(iter/s)": 0.672281 }, { "epoch": 2.5270982391499937, "grad_norm": 5.656960487365723, "learning_rate": 4.9179677471341475e-05, "loss": 2.221286392211914, "memory(GiB)": 72.85, "step": 58985, "token_acc": 0.5300751879699248, "train_speed(iter/s)": 0.672279 }, { "epoch": 2.5273124544792425, "grad_norm": 6.569987773895264, "learning_rate": 4.917294861153861e-05, "loss": 1.9231178283691406, "memory(GiB)": 72.85, "step": 58990, "token_acc": 0.5719063545150501, "train_speed(iter/s)": 0.67228 }, { "epoch": 2.5275266698084913, "grad_norm": 5.041792869567871, "learning_rate": 4.9166219766718566e-05, "loss": 2.3406309127807616, "memory(GiB)": 72.85, "step": 58995, "token_acc": 0.5019157088122606, "train_speed(iter/s)": 0.672277 }, { "epoch": 2.5277408851377405, "grad_norm": 4.785804748535156, "learning_rate": 4.915949093700323e-05, "loss": 2.4577674865722656, "memory(GiB)": 72.85, "step": 59000, "token_acc": 0.5174825174825175, "train_speed(iter/s)": 0.672278 }, { "epoch": 2.5277408851377405, "eval_loss": 1.8895689249038696, "eval_runtime": 16.0979, "eval_samples_per_second": 6.212, "eval_steps_per_second": 6.212, "eval_token_acc": 0.5291723202170964, "step": 59000 }, { "epoch": 2.5279551004669893, "grad_norm": 5.288638114929199, "learning_rate": 4.91527621225145e-05, "loss": 2.271147918701172, "memory(GiB)": 72.85, "step": 59005, "token_acc": 0.5197628458498024, "train_speed(iter/s)": 0.672121 }, { "epoch": 2.528169315796238, "grad_norm": 5.68010139465332, "learning_rate": 4.9146033323374286e-05, "loss": 2.4645130157470705, "memory(GiB)": 72.85, "step": 59010, "token_acc": 0.46905537459283386, "train_speed(iter/s)": 0.672125 }, { "epoch": 2.5283835311254874, "grad_norm": 3.660691261291504, "learning_rate": 4.913930453970447e-05, "loss": 2.0193716049194337, "memory(GiB)": 72.85, "step": 59015, "token_acc": 0.5400696864111498, "train_speed(iter/s)": 0.672135 }, { "epoch": 2.5285977464547362, "grad_norm": 5.506502628326416, "learning_rate": 4.913257577162697e-05, "loss": 2.219690132141113, "memory(GiB)": 72.85, "step": 59020, "token_acc": 0.5420875420875421, "train_speed(iter/s)": 0.672144 }, { "epoch": 2.528811961783985, "grad_norm": 4.454925060272217, "learning_rate": 4.912584701926367e-05, "loss": 2.4290721893310545, "memory(GiB)": 72.85, "step": 59025, "token_acc": 0.49110320284697506, "train_speed(iter/s)": 0.672153 }, { "epoch": 2.5290261771132343, "grad_norm": 5.352949619293213, "learning_rate": 4.911911828273645e-05, "loss": 2.5268789291381837, "memory(GiB)": 72.85, "step": 59030, "token_acc": 0.45454545454545453, "train_speed(iter/s)": 0.672161 }, { "epoch": 2.529240392442483, "grad_norm": 4.949173927307129, "learning_rate": 4.911238956216725e-05, "loss": 2.319818115234375, "memory(GiB)": 72.85, "step": 59035, "token_acc": 0.531496062992126, "train_speed(iter/s)": 0.672168 }, { "epoch": 2.529454607771732, "grad_norm": 5.184187412261963, "learning_rate": 4.9105660857677946e-05, "loss": 2.208622360229492, "memory(GiB)": 72.85, "step": 59040, "token_acc": 0.5033783783783784, "train_speed(iter/s)": 0.672154 }, { "epoch": 2.529668823100981, "grad_norm": 4.8776774406433105, "learning_rate": 4.909893216939044e-05, "loss": 2.2986135482788086, "memory(GiB)": 72.85, "step": 59045, "token_acc": 0.5268456375838926, "train_speed(iter/s)": 0.672161 }, { "epoch": 2.52988303843023, "grad_norm": 4.012123107910156, "learning_rate": 4.90922034974266e-05, "loss": 2.3573768615722654, "memory(GiB)": 72.85, "step": 59050, "token_acc": 0.5, "train_speed(iter/s)": 0.672169 }, { "epoch": 2.530097253759479, "grad_norm": 4.763365268707275, "learning_rate": 4.9085474841908376e-05, "loss": 2.337665557861328, "memory(GiB)": 72.85, "step": 59055, "token_acc": 0.4968152866242038, "train_speed(iter/s)": 0.672177 }, { "epoch": 2.530311469088728, "grad_norm": 4.284016132354736, "learning_rate": 4.907874620295761e-05, "loss": 2.3091188430786134, "memory(GiB)": 72.85, "step": 59060, "token_acc": 0.4825174825174825, "train_speed(iter/s)": 0.672179 }, { "epoch": 2.530525684417977, "grad_norm": 6.1875457763671875, "learning_rate": 4.907201758069623e-05, "loss": 2.1687074661254884, "memory(GiB)": 72.85, "step": 59065, "token_acc": 0.5084745762711864, "train_speed(iter/s)": 0.67218 }, { "epoch": 2.5307398997472257, "grad_norm": 4.981008529663086, "learning_rate": 4.906528897524613e-05, "loss": 2.488585662841797, "memory(GiB)": 72.85, "step": 59070, "token_acc": 0.4826388888888889, "train_speed(iter/s)": 0.672178 }, { "epoch": 2.530954115076475, "grad_norm": 3.4954521656036377, "learning_rate": 4.905856038672918e-05, "loss": 2.304584503173828, "memory(GiB)": 72.85, "step": 59075, "token_acc": 0.532520325203252, "train_speed(iter/s)": 0.672176 }, { "epoch": 2.5311683304057238, "grad_norm": 5.085925579071045, "learning_rate": 4.90518318152673e-05, "loss": 2.4923612594604494, "memory(GiB)": 72.85, "step": 59080, "token_acc": 0.490625, "train_speed(iter/s)": 0.672187 }, { "epoch": 2.5313825457349726, "grad_norm": 3.8521130084991455, "learning_rate": 4.904510326098238e-05, "loss": 2.0653383255004885, "memory(GiB)": 72.85, "step": 59085, "token_acc": 0.518840579710145, "train_speed(iter/s)": 0.672201 }, { "epoch": 2.531596761064222, "grad_norm": 6.4438700675964355, "learning_rate": 4.9038374723996306e-05, "loss": 2.012457847595215, "memory(GiB)": 72.85, "step": 59090, "token_acc": 0.5364963503649635, "train_speed(iter/s)": 0.672203 }, { "epoch": 2.5318109763934706, "grad_norm": 4.655745506286621, "learning_rate": 4.903164620443098e-05, "loss": 2.231861877441406, "memory(GiB)": 72.85, "step": 59095, "token_acc": 0.5076452599388379, "train_speed(iter/s)": 0.672201 }, { "epoch": 2.5320251917227194, "grad_norm": 5.362215518951416, "learning_rate": 4.902491770240827e-05, "loss": 2.3357975006103517, "memory(GiB)": 72.85, "step": 59100, "token_acc": 0.4970414201183432, "train_speed(iter/s)": 0.672206 }, { "epoch": 2.5322394070519687, "grad_norm": 4.677716255187988, "learning_rate": 4.9018189218050114e-05, "loss": 2.119703483581543, "memory(GiB)": 72.85, "step": 59105, "token_acc": 0.5634328358208955, "train_speed(iter/s)": 0.672197 }, { "epoch": 2.5324536223812175, "grad_norm": 5.124389171600342, "learning_rate": 4.901146075147837e-05, "loss": 2.2536251068115236, "memory(GiB)": 72.85, "step": 59110, "token_acc": 0.532871972318339, "train_speed(iter/s)": 0.672209 }, { "epoch": 2.5326678377104663, "grad_norm": 8.483489036560059, "learning_rate": 4.9004732302814946e-05, "loss": 2.4313207626342774, "memory(GiB)": 72.85, "step": 59115, "token_acc": 0.4820717131474104, "train_speed(iter/s)": 0.672194 }, { "epoch": 2.5328820530397156, "grad_norm": 4.82652473449707, "learning_rate": 4.899800387218173e-05, "loss": 2.203529167175293, "memory(GiB)": 72.85, "step": 59120, "token_acc": 0.5357142857142857, "train_speed(iter/s)": 0.672195 }, { "epoch": 2.5330962683689644, "grad_norm": 4.932329177856445, "learning_rate": 4.899127545970062e-05, "loss": 2.640353775024414, "memory(GiB)": 72.85, "step": 59125, "token_acc": 0.45514950166112955, "train_speed(iter/s)": 0.672188 }, { "epoch": 2.533310483698213, "grad_norm": 3.909890651702881, "learning_rate": 4.8984547065493496e-05, "loss": 2.3868289947509767, "memory(GiB)": 72.85, "step": 59130, "token_acc": 0.49169435215946844, "train_speed(iter/s)": 0.672197 }, { "epoch": 2.5335246990274625, "grad_norm": 4.168695449829102, "learning_rate": 4.897781868968225e-05, "loss": 2.325079917907715, "memory(GiB)": 72.85, "step": 59135, "token_acc": 0.5141955835962145, "train_speed(iter/s)": 0.672199 }, { "epoch": 2.5337389143567113, "grad_norm": 4.874745845794678, "learning_rate": 4.897109033238879e-05, "loss": 2.559918212890625, "memory(GiB)": 72.85, "step": 59140, "token_acc": 0.4452054794520548, "train_speed(iter/s)": 0.672203 }, { "epoch": 2.53395312968596, "grad_norm": 5.602721214294434, "learning_rate": 4.896436199373499e-05, "loss": 2.190282440185547, "memory(GiB)": 72.85, "step": 59145, "token_acc": 0.5136986301369864, "train_speed(iter/s)": 0.672186 }, { "epoch": 2.5341673450152093, "grad_norm": 5.202192306518555, "learning_rate": 4.895763367384273e-05, "loss": 2.0741695404052733, "memory(GiB)": 72.85, "step": 59150, "token_acc": 0.49838187702265374, "train_speed(iter/s)": 0.672196 }, { "epoch": 2.534381560344458, "grad_norm": 5.832893371582031, "learning_rate": 4.895090537283393e-05, "loss": 2.4069887161254884, "memory(GiB)": 72.85, "step": 59155, "token_acc": 0.48771929824561405, "train_speed(iter/s)": 0.672204 }, { "epoch": 2.534595775673707, "grad_norm": 5.535271167755127, "learning_rate": 4.894417709083045e-05, "loss": 2.1743398666381837, "memory(GiB)": 72.85, "step": 59160, "token_acc": 0.5748987854251012, "train_speed(iter/s)": 0.672211 }, { "epoch": 2.5348099910029562, "grad_norm": 4.8627142906188965, "learning_rate": 4.8938794478993415e-05, "loss": 2.1677696228027346, "memory(GiB)": 72.85, "step": 59165, "token_acc": 0.5055762081784386, "train_speed(iter/s)": 0.672214 }, { "epoch": 2.535024206332205, "grad_norm": 3.6725714206695557, "learning_rate": 4.8932066231506714e-05, "loss": 1.9256912231445313, "memory(GiB)": 72.85, "step": 59170, "token_acc": 0.5355805243445693, "train_speed(iter/s)": 0.672216 }, { "epoch": 2.535238421661454, "grad_norm": 4.193413257598877, "learning_rate": 4.892533800336661e-05, "loss": 2.290680503845215, "memory(GiB)": 72.85, "step": 59175, "token_acc": 0.49303621169916434, "train_speed(iter/s)": 0.672211 }, { "epoch": 2.535452636990703, "grad_norm": 7.00107479095459, "learning_rate": 4.891860979469503e-05, "loss": 2.1471446990966796, "memory(GiB)": 72.85, "step": 59180, "token_acc": 0.5433333333333333, "train_speed(iter/s)": 0.672211 }, { "epoch": 2.535666852319952, "grad_norm": 6.8273844718933105, "learning_rate": 4.891188160561385e-05, "loss": 2.2138034820556642, "memory(GiB)": 72.85, "step": 59185, "token_acc": 0.5225563909774437, "train_speed(iter/s)": 0.67222 }, { "epoch": 2.5358810676492007, "grad_norm": 4.264349937438965, "learning_rate": 4.890515343624495e-05, "loss": 2.343325614929199, "memory(GiB)": 72.85, "step": 59190, "token_acc": 0.4921259842519685, "train_speed(iter/s)": 0.672235 }, { "epoch": 2.53609528297845, "grad_norm": 4.3727707862854, "learning_rate": 4.889842528671024e-05, "loss": 2.3643112182617188, "memory(GiB)": 72.85, "step": 59195, "token_acc": 0.4968553459119497, "train_speed(iter/s)": 0.672249 }, { "epoch": 2.536309498307699, "grad_norm": 4.086714744567871, "learning_rate": 4.889169715713157e-05, "loss": 2.1703680038452147, "memory(GiB)": 72.85, "step": 59200, "token_acc": 0.5106382978723404, "train_speed(iter/s)": 0.672259 }, { "epoch": 2.5365237136369476, "grad_norm": 4.432520389556885, "learning_rate": 4.888496904763085e-05, "loss": 2.0408645629882813, "memory(GiB)": 72.85, "step": 59205, "token_acc": 0.5072992700729927, "train_speed(iter/s)": 0.672259 }, { "epoch": 2.536737928966197, "grad_norm": 5.725643634796143, "learning_rate": 4.887824095832997e-05, "loss": 2.2051095962524414, "memory(GiB)": 72.85, "step": 59210, "token_acc": 0.5089605734767025, "train_speed(iter/s)": 0.672249 }, { "epoch": 2.5369521442954457, "grad_norm": 5.831303119659424, "learning_rate": 4.887151288935079e-05, "loss": 2.194508361816406, "memory(GiB)": 72.85, "step": 59215, "token_acc": 0.5605536332179931, "train_speed(iter/s)": 0.672255 }, { "epoch": 2.5371663596246945, "grad_norm": 4.106167316436768, "learning_rate": 4.88647848408152e-05, "loss": 2.1690067291259765, "memory(GiB)": 72.85, "step": 59220, "token_acc": 0.5368421052631579, "train_speed(iter/s)": 0.67225 }, { "epoch": 2.5373805749539438, "grad_norm": 4.732247829437256, "learning_rate": 4.885805681284513e-05, "loss": 2.4722023010253906, "memory(GiB)": 72.85, "step": 59225, "token_acc": 0.47058823529411764, "train_speed(iter/s)": 0.672258 }, { "epoch": 2.5375947902831926, "grad_norm": 3.5883684158325195, "learning_rate": 4.885132880556243e-05, "loss": 2.4124435424804687, "memory(GiB)": 72.85, "step": 59230, "token_acc": 0.49466192170818507, "train_speed(iter/s)": 0.672272 }, { "epoch": 2.5378090056124414, "grad_norm": 5.980321884155273, "learning_rate": 4.884460081908897e-05, "loss": 2.2283645629882813, "memory(GiB)": 72.85, "step": 59235, "token_acc": 0.5, "train_speed(iter/s)": 0.672282 }, { "epoch": 2.5380232209416906, "grad_norm": 5.207921981811523, "learning_rate": 4.883787285354666e-05, "loss": 2.1280645370483398, "memory(GiB)": 72.85, "step": 59240, "token_acc": 0.5407407407407407, "train_speed(iter/s)": 0.672291 }, { "epoch": 2.5382374362709395, "grad_norm": 5.168096542358398, "learning_rate": 4.883114490905737e-05, "loss": 2.2891605377197264, "memory(GiB)": 72.85, "step": 59245, "token_acc": 0.5527156549520766, "train_speed(iter/s)": 0.672305 }, { "epoch": 2.5384516516001883, "grad_norm": 5.982285022735596, "learning_rate": 4.8824416985742985e-05, "loss": 2.1933460235595703, "memory(GiB)": 72.85, "step": 59250, "token_acc": 0.5321100917431193, "train_speed(iter/s)": 0.672298 }, { "epoch": 2.5386658669294375, "grad_norm": 4.556720733642578, "learning_rate": 4.88176890837254e-05, "loss": 2.637355613708496, "memory(GiB)": 72.85, "step": 59255, "token_acc": 0.4538653366583541, "train_speed(iter/s)": 0.672314 }, { "epoch": 2.5388800822586863, "grad_norm": 5.8879523277282715, "learning_rate": 4.8810961203126474e-05, "loss": 2.2382415771484374, "memory(GiB)": 72.85, "step": 59260, "token_acc": 0.5355805243445693, "train_speed(iter/s)": 0.672313 }, { "epoch": 2.539094297587935, "grad_norm": 4.294835090637207, "learning_rate": 4.88042333440681e-05, "loss": 1.7693174362182618, "memory(GiB)": 72.85, "step": 59265, "token_acc": 0.5903614457831325, "train_speed(iter/s)": 0.672302 }, { "epoch": 2.5393085129171844, "grad_norm": 5.008923530578613, "learning_rate": 4.879750550667218e-05, "loss": 2.181125259399414, "memory(GiB)": 72.85, "step": 59270, "token_acc": 0.49830508474576274, "train_speed(iter/s)": 0.6723 }, { "epoch": 2.539522728246433, "grad_norm": 5.003815650939941, "learning_rate": 4.8790777691060555e-05, "loss": 2.222960662841797, "memory(GiB)": 72.85, "step": 59275, "token_acc": 0.5117845117845118, "train_speed(iter/s)": 0.672297 }, { "epoch": 2.539736943575682, "grad_norm": 6.294053554534912, "learning_rate": 4.878404989735514e-05, "loss": 1.9577232360839845, "memory(GiB)": 72.85, "step": 59280, "token_acc": 0.6016597510373444, "train_speed(iter/s)": 0.6723 }, { "epoch": 2.5399511589049313, "grad_norm": 5.578244209289551, "learning_rate": 4.877732212567779e-05, "loss": 2.1348339080810548, "memory(GiB)": 72.85, "step": 59285, "token_acc": 0.5301724137931034, "train_speed(iter/s)": 0.672321 }, { "epoch": 2.54016537423418, "grad_norm": 3.995251417160034, "learning_rate": 4.8770594376150405e-05, "loss": 2.1207813262939452, "memory(GiB)": 72.85, "step": 59290, "token_acc": 0.5379537953795379, "train_speed(iter/s)": 0.672326 }, { "epoch": 2.540379589563429, "grad_norm": 4.512415885925293, "learning_rate": 4.8763866648894834e-05, "loss": 2.3273746490478517, "memory(GiB)": 72.85, "step": 59295, "token_acc": 0.5460992907801419, "train_speed(iter/s)": 0.67233 }, { "epoch": 2.540593804892678, "grad_norm": 4.807293891906738, "learning_rate": 4.8757138944033004e-05, "loss": 2.3518245697021483, "memory(GiB)": 72.85, "step": 59300, "token_acc": 0.4642857142857143, "train_speed(iter/s)": 0.672334 }, { "epoch": 2.540808020221927, "grad_norm": 4.170900344848633, "learning_rate": 4.875041126168676e-05, "loss": 2.315458869934082, "memory(GiB)": 72.85, "step": 59305, "token_acc": 0.5177304964539007, "train_speed(iter/s)": 0.672339 }, { "epoch": 2.541022235551176, "grad_norm": 4.570230007171631, "learning_rate": 4.874368360197799e-05, "loss": 2.12947940826416, "memory(GiB)": 72.85, "step": 59310, "token_acc": 0.5060240963855421, "train_speed(iter/s)": 0.67235 }, { "epoch": 2.541236450880425, "grad_norm": 4.194812297821045, "learning_rate": 4.8736955965028575e-05, "loss": 2.137498474121094, "memory(GiB)": 72.85, "step": 59315, "token_acc": 0.4983388704318937, "train_speed(iter/s)": 0.672356 }, { "epoch": 2.541450666209674, "grad_norm": 5.879147529602051, "learning_rate": 4.873022835096037e-05, "loss": 2.2339996337890624, "memory(GiB)": 72.85, "step": 59320, "token_acc": 0.5090252707581228, "train_speed(iter/s)": 0.672343 }, { "epoch": 2.5416648815389227, "grad_norm": 5.821617603302002, "learning_rate": 4.872350075989529e-05, "loss": 2.3953929901123048, "memory(GiB)": 72.85, "step": 59325, "token_acc": 0.4808362369337979, "train_speed(iter/s)": 0.672338 }, { "epoch": 2.541879096868172, "grad_norm": 5.078064441680908, "learning_rate": 4.8716773191955176e-05, "loss": 2.095162773132324, "memory(GiB)": 72.85, "step": 59330, "token_acc": 0.5494505494505495, "train_speed(iter/s)": 0.672331 }, { "epoch": 2.5420933121974207, "grad_norm": 4.852115154266357, "learning_rate": 4.871004564726193e-05, "loss": 2.2327152252197267, "memory(GiB)": 72.85, "step": 59335, "token_acc": 0.5445544554455446, "train_speed(iter/s)": 0.67232 }, { "epoch": 2.5423075275266696, "grad_norm": 5.461295127868652, "learning_rate": 4.870331812593742e-05, "loss": 2.5368066787719727, "memory(GiB)": 72.85, "step": 59340, "token_acc": 0.4550898203592814, "train_speed(iter/s)": 0.672331 }, { "epoch": 2.542521742855919, "grad_norm": 5.12766695022583, "learning_rate": 4.86965906281035e-05, "loss": 2.279856491088867, "memory(GiB)": 72.85, "step": 59345, "token_acc": 0.4911660777385159, "train_speed(iter/s)": 0.672328 }, { "epoch": 2.5427359581851676, "grad_norm": 4.862044334411621, "learning_rate": 4.868986315388209e-05, "loss": 2.272910308837891, "memory(GiB)": 72.85, "step": 59350, "token_acc": 0.5134328358208955, "train_speed(iter/s)": 0.672321 }, { "epoch": 2.5429501735144164, "grad_norm": 4.854152202606201, "learning_rate": 4.868313570339502e-05, "loss": 2.0444175720214846, "memory(GiB)": 72.85, "step": 59355, "token_acc": 0.5354609929078015, "train_speed(iter/s)": 0.67232 }, { "epoch": 2.5431643888436657, "grad_norm": 5.254620552062988, "learning_rate": 4.867640827676418e-05, "loss": 2.366378593444824, "memory(GiB)": 72.85, "step": 59360, "token_acc": 0.4812286689419795, "train_speed(iter/s)": 0.672325 }, { "epoch": 2.5433786041729145, "grad_norm": 4.982703685760498, "learning_rate": 4.866968087411146e-05, "loss": 2.4366218566894533, "memory(GiB)": 72.85, "step": 59365, "token_acc": 0.48307692307692307, "train_speed(iter/s)": 0.672325 }, { "epoch": 2.5435928195021633, "grad_norm": 5.08815336227417, "learning_rate": 4.866295349555869e-05, "loss": 2.169622039794922, "memory(GiB)": 72.85, "step": 59370, "token_acc": 0.5267857142857143, "train_speed(iter/s)": 0.672319 }, { "epoch": 2.5438070348314126, "grad_norm": 7.514886379241943, "learning_rate": 4.86562261412278e-05, "loss": 2.4245609283447265, "memory(GiB)": 72.85, "step": 59375, "token_acc": 0.44654088050314467, "train_speed(iter/s)": 0.672319 }, { "epoch": 2.5440212501606614, "grad_norm": 4.428889274597168, "learning_rate": 4.864949881124062e-05, "loss": 2.385201072692871, "memory(GiB)": 72.85, "step": 59380, "token_acc": 0.4772727272727273, "train_speed(iter/s)": 0.672326 }, { "epoch": 2.54423546548991, "grad_norm": 4.368238925933838, "learning_rate": 4.8642771505719044e-05, "loss": 2.311097526550293, "memory(GiB)": 72.85, "step": 59385, "token_acc": 0.5016393442622951, "train_speed(iter/s)": 0.67231 }, { "epoch": 2.5444496808191595, "grad_norm": 5.305829048156738, "learning_rate": 4.8636044224784926e-05, "loss": 2.006822395324707, "memory(GiB)": 72.85, "step": 59390, "token_acc": 0.5664556962025317, "train_speed(iter/s)": 0.67232 }, { "epoch": 2.5446638961484083, "grad_norm": 4.734959125518799, "learning_rate": 4.8629316968560155e-05, "loss": 2.116343879699707, "memory(GiB)": 72.85, "step": 59395, "token_acc": 0.5178571428571429, "train_speed(iter/s)": 0.67233 }, { "epoch": 2.544878111477657, "grad_norm": 5.0271172523498535, "learning_rate": 4.8622589737166595e-05, "loss": 2.092516326904297, "memory(GiB)": 72.85, "step": 59400, "token_acc": 0.5212765957446809, "train_speed(iter/s)": 0.672331 }, { "epoch": 2.5450923268069063, "grad_norm": 5.0667805671691895, "learning_rate": 4.861586253072611e-05, "loss": 2.3794763565063475, "memory(GiB)": 72.85, "step": 59405, "token_acc": 0.48514851485148514, "train_speed(iter/s)": 0.672341 }, { "epoch": 2.545306542136155, "grad_norm": 5.390570640563965, "learning_rate": 4.860913534936057e-05, "loss": 2.1566572189331055, "memory(GiB)": 72.85, "step": 59410, "token_acc": 0.5163934426229508, "train_speed(iter/s)": 0.672357 }, { "epoch": 2.545520757465404, "grad_norm": 4.008951663970947, "learning_rate": 4.860240819319186e-05, "loss": 2.0335020065307616, "memory(GiB)": 72.85, "step": 59415, "token_acc": 0.525, "train_speed(iter/s)": 0.672355 }, { "epoch": 2.545734972794653, "grad_norm": 5.648533344268799, "learning_rate": 4.859568106234182e-05, "loss": 2.5261112213134767, "memory(GiB)": 72.85, "step": 59420, "token_acc": 0.46534653465346537, "train_speed(iter/s)": 0.672353 }, { "epoch": 2.545949188123902, "grad_norm": 4.731817722320557, "learning_rate": 4.858895395693235e-05, "loss": 2.271566390991211, "memory(GiB)": 72.85, "step": 59425, "token_acc": 0.5224913494809689, "train_speed(iter/s)": 0.672354 }, { "epoch": 2.546163403453151, "grad_norm": 4.641002655029297, "learning_rate": 4.858222687708529e-05, "loss": 2.3663591384887694, "memory(GiB)": 72.85, "step": 59430, "token_acc": 0.5247148288973384, "train_speed(iter/s)": 0.672361 }, { "epoch": 2.5463776187824, "grad_norm": 3.7456233501434326, "learning_rate": 4.8575499822922534e-05, "loss": 2.1919122695922852, "memory(GiB)": 72.85, "step": 59435, "token_acc": 0.5075528700906344, "train_speed(iter/s)": 0.672363 }, { "epoch": 2.546591834111649, "grad_norm": 5.027740001678467, "learning_rate": 4.856877279456593e-05, "loss": 2.492860221862793, "memory(GiB)": 72.85, "step": 59440, "token_acc": 0.4906832298136646, "train_speed(iter/s)": 0.67238 }, { "epoch": 2.5468060494408977, "grad_norm": 6.212244987487793, "learning_rate": 4.856204579213733e-05, "loss": 2.208186721801758, "memory(GiB)": 72.85, "step": 59445, "token_acc": 0.48109965635738833, "train_speed(iter/s)": 0.672382 }, { "epoch": 2.547020264770147, "grad_norm": 4.632501602172852, "learning_rate": 4.855531881575863e-05, "loss": 2.056914520263672, "memory(GiB)": 72.85, "step": 59450, "token_acc": 0.5468164794007491, "train_speed(iter/s)": 0.672396 }, { "epoch": 2.547234480099396, "grad_norm": 4.76164436340332, "learning_rate": 4.8548591865551696e-05, "loss": 2.083755302429199, "memory(GiB)": 72.85, "step": 59455, "token_acc": 0.5341365461847389, "train_speed(iter/s)": 0.672396 }, { "epoch": 2.5474486954286446, "grad_norm": 4.985049247741699, "learning_rate": 4.854186494163838e-05, "loss": 1.7957626342773438, "memory(GiB)": 72.85, "step": 59460, "token_acc": 0.5606694560669456, "train_speed(iter/s)": 0.672394 }, { "epoch": 2.547662910757894, "grad_norm": 5.480484485626221, "learning_rate": 4.853513804414054e-05, "loss": 2.0539846420288086, "memory(GiB)": 72.85, "step": 59465, "token_acc": 0.49454545454545457, "train_speed(iter/s)": 0.672398 }, { "epoch": 2.5478771260871427, "grad_norm": 5.259427547454834, "learning_rate": 4.852841117318006e-05, "loss": 2.2348186492919924, "memory(GiB)": 72.85, "step": 59470, "token_acc": 0.5078864353312302, "train_speed(iter/s)": 0.6724 }, { "epoch": 2.5480913414163915, "grad_norm": 4.260805130004883, "learning_rate": 4.852168432887877e-05, "loss": 2.166149711608887, "memory(GiB)": 72.85, "step": 59475, "token_acc": 0.5062761506276151, "train_speed(iter/s)": 0.672394 }, { "epoch": 2.5483055567456407, "grad_norm": 6.567364692687988, "learning_rate": 4.851495751135858e-05, "loss": 2.161172103881836, "memory(GiB)": 72.85, "step": 59480, "token_acc": 0.5321428571428571, "train_speed(iter/s)": 0.672393 }, { "epoch": 2.5485197720748896, "grad_norm": 4.447934627532959, "learning_rate": 4.8508230720741324e-05, "loss": 2.2529882431030273, "memory(GiB)": 72.85, "step": 59485, "token_acc": 0.4830508474576271, "train_speed(iter/s)": 0.672403 }, { "epoch": 2.5487339874041384, "grad_norm": 5.252032279968262, "learning_rate": 4.850150395714886e-05, "loss": 2.1282840728759767, "memory(GiB)": 72.85, "step": 59490, "token_acc": 0.5111821086261981, "train_speed(iter/s)": 0.672403 }, { "epoch": 2.5489482027333876, "grad_norm": 4.827738285064697, "learning_rate": 4.8494777220703066e-05, "loss": 2.07320613861084, "memory(GiB)": 72.85, "step": 59495, "token_acc": 0.5227272727272727, "train_speed(iter/s)": 0.672388 }, { "epoch": 2.5491624180626364, "grad_norm": 5.877150058746338, "learning_rate": 4.848805051152579e-05, "loss": 2.055078315734863, "memory(GiB)": 72.85, "step": 59500, "token_acc": 0.5272727272727272, "train_speed(iter/s)": 0.672396 }, { "epoch": 2.5491624180626364, "eval_loss": 2.0085060596466064, "eval_runtime": 15.3655, "eval_samples_per_second": 6.508, "eval_steps_per_second": 6.508, "eval_token_acc": 0.5012562814070352, "step": 59500 }, { "epoch": 2.5493766333918852, "grad_norm": 4.733720779418945, "learning_rate": 4.848132382973889e-05, "loss": 2.135940361022949, "memory(GiB)": 72.85, "step": 59505, "token_acc": 0.5050784856879039, "train_speed(iter/s)": 0.672258 }, { "epoch": 2.5495908487211345, "grad_norm": 5.067681789398193, "learning_rate": 4.847459717546424e-05, "loss": 2.3844367980957033, "memory(GiB)": 72.85, "step": 59510, "token_acc": 0.45723684210526316, "train_speed(iter/s)": 0.67225 }, { "epoch": 2.5498050640503833, "grad_norm": 4.915318012237549, "learning_rate": 4.846787054882368e-05, "loss": 2.1601640701293947, "memory(GiB)": 72.85, "step": 59515, "token_acc": 0.5159010600706714, "train_speed(iter/s)": 0.672255 }, { "epoch": 2.550019279379632, "grad_norm": 4.104302406311035, "learning_rate": 4.8461143949939095e-05, "loss": 2.0435930252075196, "memory(GiB)": 72.85, "step": 59520, "token_acc": 0.5461847389558233, "train_speed(iter/s)": 0.672271 }, { "epoch": 2.5502334947088814, "grad_norm": 8.650044441223145, "learning_rate": 4.8454417378932327e-05, "loss": 2.616085433959961, "memory(GiB)": 72.85, "step": 59525, "token_acc": 0.467680608365019, "train_speed(iter/s)": 0.672275 }, { "epoch": 2.55044771003813, "grad_norm": 4.754047870635986, "learning_rate": 4.844769083592525e-05, "loss": 2.1810848236083986, "memory(GiB)": 72.85, "step": 59530, "token_acc": 0.4738562091503268, "train_speed(iter/s)": 0.672281 }, { "epoch": 2.550661925367379, "grad_norm": 6.929136276245117, "learning_rate": 4.84409643210397e-05, "loss": 2.403011703491211, "memory(GiB)": 72.85, "step": 59535, "token_acc": 0.4880546075085324, "train_speed(iter/s)": 0.672295 }, { "epoch": 2.5508761406966283, "grad_norm": 6.102960109710693, "learning_rate": 4.8434237834397546e-05, "loss": 2.2699405670166017, "memory(GiB)": 72.85, "step": 59540, "token_acc": 0.4745762711864407, "train_speed(iter/s)": 0.672298 }, { "epoch": 2.551090356025877, "grad_norm": 5.410344123840332, "learning_rate": 4.8427511376120644e-05, "loss": 2.1793903350830077, "memory(GiB)": 72.85, "step": 59545, "token_acc": 0.5393700787401575, "train_speed(iter/s)": 0.672297 }, { "epoch": 2.5513045713551263, "grad_norm": 4.433379650115967, "learning_rate": 4.8420784946330835e-05, "loss": 2.366374969482422, "memory(GiB)": 72.85, "step": 59550, "token_acc": 0.5032051282051282, "train_speed(iter/s)": 0.672291 }, { "epoch": 2.551518786684375, "grad_norm": 3.9969043731689453, "learning_rate": 4.841405854515001e-05, "loss": 2.256134796142578, "memory(GiB)": 72.85, "step": 59555, "token_acc": 0.5347222222222222, "train_speed(iter/s)": 0.672287 }, { "epoch": 2.551733002013624, "grad_norm": 4.609010219573975, "learning_rate": 4.840733217269999e-05, "loss": 2.1648998260498047, "memory(GiB)": 72.85, "step": 59560, "token_acc": 0.5436046511627907, "train_speed(iter/s)": 0.672293 }, { "epoch": 2.551947217342873, "grad_norm": 4.906635284423828, "learning_rate": 4.840060582910264e-05, "loss": 1.9696788787841797, "memory(GiB)": 72.85, "step": 59565, "token_acc": 0.5791505791505791, "train_speed(iter/s)": 0.672294 }, { "epoch": 2.552161432672122, "grad_norm": 5.465246200561523, "learning_rate": 4.839387951447983e-05, "loss": 2.2592700958251952, "memory(GiB)": 72.85, "step": 59570, "token_acc": 0.5064102564102564, "train_speed(iter/s)": 0.672309 }, { "epoch": 2.552375648001371, "grad_norm": 6.619577884674072, "learning_rate": 4.838715322895338e-05, "loss": 2.4645389556884765, "memory(GiB)": 72.85, "step": 59575, "token_acc": 0.4896265560165975, "train_speed(iter/s)": 0.67231 }, { "epoch": 2.55258986333062, "grad_norm": 4.39768648147583, "learning_rate": 4.838042697264517e-05, "loss": 1.7662666320800782, "memory(GiB)": 72.85, "step": 59580, "token_acc": 0.5824175824175825, "train_speed(iter/s)": 0.672304 }, { "epoch": 2.552804078659869, "grad_norm": 4.773266792297363, "learning_rate": 4.837370074567705e-05, "loss": 2.154070281982422, "memory(GiB)": 72.85, "step": 59585, "token_acc": 0.5360501567398119, "train_speed(iter/s)": 0.672304 }, { "epoch": 2.5530182939891177, "grad_norm": 5.1262288093566895, "learning_rate": 4.8366974548170845e-05, "loss": 2.3855419158935547, "memory(GiB)": 72.85, "step": 59590, "token_acc": 0.48736462093862815, "train_speed(iter/s)": 0.672311 }, { "epoch": 2.553232509318367, "grad_norm": 6.07110071182251, "learning_rate": 4.836024838024844e-05, "loss": 2.104397201538086, "memory(GiB)": 72.85, "step": 59595, "token_acc": 0.5510204081632653, "train_speed(iter/s)": 0.672299 }, { "epoch": 2.553446724647616, "grad_norm": 6.136374473571777, "learning_rate": 4.8353522242031686e-05, "loss": 2.306556510925293, "memory(GiB)": 72.85, "step": 59600, "token_acc": 0.4878048780487805, "train_speed(iter/s)": 0.672296 }, { "epoch": 2.5536609399768646, "grad_norm": 4.819609642028809, "learning_rate": 4.834679613364242e-05, "loss": 2.3648147583007812, "memory(GiB)": 72.85, "step": 59605, "token_acc": 0.5177304964539007, "train_speed(iter/s)": 0.672298 }, { "epoch": 2.553875155306114, "grad_norm": 4.397101402282715, "learning_rate": 4.834007005520248e-05, "loss": 2.269011688232422, "memory(GiB)": 72.85, "step": 59610, "token_acc": 0.5327380952380952, "train_speed(iter/s)": 0.672288 }, { "epoch": 2.5540893706353627, "grad_norm": 5.6546196937561035, "learning_rate": 4.8333344006833734e-05, "loss": 2.2312259674072266, "memory(GiB)": 72.85, "step": 59615, "token_acc": 0.49377593360995853, "train_speed(iter/s)": 0.672299 }, { "epoch": 2.5543035859646115, "grad_norm": 5.211958885192871, "learning_rate": 4.832661798865803e-05, "loss": 2.3105323791503904, "memory(GiB)": 72.85, "step": 59620, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.672301 }, { "epoch": 2.5545178012938607, "grad_norm": 6.072474956512451, "learning_rate": 4.83198920007972e-05, "loss": 2.1445751190185547, "memory(GiB)": 72.85, "step": 59625, "token_acc": 0.564, "train_speed(iter/s)": 0.672295 }, { "epoch": 2.5547320166231096, "grad_norm": 6.309737682342529, "learning_rate": 4.831316604337311e-05, "loss": 2.336210823059082, "memory(GiB)": 72.85, "step": 59630, "token_acc": 0.48, "train_speed(iter/s)": 0.672272 }, { "epoch": 2.5549462319523584, "grad_norm": 5.1081767082214355, "learning_rate": 4.83064401165076e-05, "loss": 2.339630889892578, "memory(GiB)": 72.85, "step": 59635, "token_acc": 0.47232472324723246, "train_speed(iter/s)": 0.672276 }, { "epoch": 2.5551604472816076, "grad_norm": 4.8036274909973145, "learning_rate": 4.829971422032252e-05, "loss": 2.3339771270751952, "memory(GiB)": 72.85, "step": 59640, "token_acc": 0.5319148936170213, "train_speed(iter/s)": 0.672276 }, { "epoch": 2.5553746626108564, "grad_norm": 5.135784149169922, "learning_rate": 4.8292988354939704e-05, "loss": 2.21433048248291, "memory(GiB)": 72.85, "step": 59645, "token_acc": 0.5, "train_speed(iter/s)": 0.672279 }, { "epoch": 2.5555888779401053, "grad_norm": 4.465998649597168, "learning_rate": 4.8286262520481006e-05, "loss": 2.3857868194580076, "memory(GiB)": 72.85, "step": 59650, "token_acc": 0.48355263157894735, "train_speed(iter/s)": 0.672287 }, { "epoch": 2.5558030932693545, "grad_norm": 5.290661811828613, "learning_rate": 4.8279536717068273e-05, "loss": 2.2291894912719727, "memory(GiB)": 72.85, "step": 59655, "token_acc": 0.484593837535014, "train_speed(iter/s)": 0.672296 }, { "epoch": 2.5560173085986033, "grad_norm": 4.926220893859863, "learning_rate": 4.8272810944823345e-05, "loss": 2.082405853271484, "memory(GiB)": 72.85, "step": 59660, "token_acc": 0.525974025974026, "train_speed(iter/s)": 0.672303 }, { "epoch": 2.556231523927852, "grad_norm": 4.27161979675293, "learning_rate": 4.826608520386806e-05, "loss": 2.3696165084838867, "memory(GiB)": 72.85, "step": 59665, "token_acc": 0.4855305466237942, "train_speed(iter/s)": 0.672291 }, { "epoch": 2.5564457392571014, "grad_norm": 6.895256519317627, "learning_rate": 4.825935949432426e-05, "loss": 2.4472253799438475, "memory(GiB)": 72.85, "step": 59670, "token_acc": 0.5032679738562091, "train_speed(iter/s)": 0.672302 }, { "epoch": 2.55665995458635, "grad_norm": 3.749721050262451, "learning_rate": 4.8252633816313817e-05, "loss": 2.1545520782470704, "memory(GiB)": 72.85, "step": 59675, "token_acc": 0.54, "train_speed(iter/s)": 0.672302 }, { "epoch": 2.556874169915599, "grad_norm": 4.76104736328125, "learning_rate": 4.8245908169958543e-05, "loss": 2.389098358154297, "memory(GiB)": 72.85, "step": 59680, "token_acc": 0.49454545454545457, "train_speed(iter/s)": 0.672297 }, { "epoch": 2.5570883852448483, "grad_norm": 4.521883487701416, "learning_rate": 4.8239182555380294e-05, "loss": 1.9593677520751953, "memory(GiB)": 72.85, "step": 59685, "token_acc": 0.5650793650793651, "train_speed(iter/s)": 0.672308 }, { "epoch": 2.557302600574097, "grad_norm": 4.163865089416504, "learning_rate": 4.8232456972700906e-05, "loss": 2.2998025894165037, "memory(GiB)": 72.85, "step": 59690, "token_acc": 0.49333333333333335, "train_speed(iter/s)": 0.672313 }, { "epoch": 2.557516815903346, "grad_norm": 5.231642246246338, "learning_rate": 4.82257314220422e-05, "loss": 2.2270246505737306, "memory(GiB)": 72.85, "step": 59695, "token_acc": 0.509090909090909, "train_speed(iter/s)": 0.672319 }, { "epoch": 2.557731031232595, "grad_norm": 4.637354850769043, "learning_rate": 4.8219005903526054e-05, "loss": 2.2336896896362304, "memory(GiB)": 72.85, "step": 59700, "token_acc": 0.527027027027027, "train_speed(iter/s)": 0.672325 }, { "epoch": 2.557945246561844, "grad_norm": 4.160804271697998, "learning_rate": 4.8212280417274286e-05, "loss": 1.994462013244629, "memory(GiB)": 72.85, "step": 59705, "token_acc": 0.5390070921985816, "train_speed(iter/s)": 0.672328 }, { "epoch": 2.558159461891093, "grad_norm": 4.786468982696533, "learning_rate": 4.820555496340872e-05, "loss": 2.4403629302978516, "memory(GiB)": 72.85, "step": 59710, "token_acc": 0.49226006191950467, "train_speed(iter/s)": 0.672333 }, { "epoch": 2.558373677220342, "grad_norm": 6.689545631408691, "learning_rate": 4.819882954205123e-05, "loss": 2.1326267242431642, "memory(GiB)": 72.85, "step": 59715, "token_acc": 0.5298804780876494, "train_speed(iter/s)": 0.67234 }, { "epoch": 2.558587892549591, "grad_norm": 4.549208641052246, "learning_rate": 4.819210415332361e-05, "loss": 2.3138076782226564, "memory(GiB)": 72.85, "step": 59720, "token_acc": 0.5197568389057751, "train_speed(iter/s)": 0.67234 }, { "epoch": 2.5588021078788397, "grad_norm": 4.929500579833984, "learning_rate": 4.818537879734774e-05, "loss": 2.2617889404296876, "memory(GiB)": 72.85, "step": 59725, "token_acc": 0.5159010600706714, "train_speed(iter/s)": 0.672325 }, { "epoch": 2.559016323208089, "grad_norm": 4.532209396362305, "learning_rate": 4.8178653474245424e-05, "loss": 2.250065040588379, "memory(GiB)": 72.85, "step": 59730, "token_acc": 0.4861111111111111, "train_speed(iter/s)": 0.672334 }, { "epoch": 2.5592305385373377, "grad_norm": 5.338951587677002, "learning_rate": 4.817192818413851e-05, "loss": 2.3229900360107423, "memory(GiB)": 72.85, "step": 59735, "token_acc": 0.4967741935483871, "train_speed(iter/s)": 0.67234 }, { "epoch": 2.5594447538665865, "grad_norm": 5.2919535636901855, "learning_rate": 4.81652029271488e-05, "loss": 2.438116264343262, "memory(GiB)": 72.85, "step": 59740, "token_acc": 0.47601476014760147, "train_speed(iter/s)": 0.672342 }, { "epoch": 2.559658969195836, "grad_norm": 5.160571575164795, "learning_rate": 4.81584777033982e-05, "loss": 2.3927022933959963, "memory(GiB)": 72.85, "step": 59745, "token_acc": 0.49473684210526314, "train_speed(iter/s)": 0.67233 }, { "epoch": 2.5598731845250846, "grad_norm": 5.415706634521484, "learning_rate": 4.8151752513008504e-05, "loss": 2.329384994506836, "memory(GiB)": 72.85, "step": 59750, "token_acc": 0.48717948717948717, "train_speed(iter/s)": 0.672335 }, { "epoch": 2.5600873998543334, "grad_norm": 4.804214954376221, "learning_rate": 4.814502735610153e-05, "loss": 2.4722084045410155, "memory(GiB)": 72.85, "step": 59755, "token_acc": 0.46938775510204084, "train_speed(iter/s)": 0.672338 }, { "epoch": 2.5603016151835827, "grad_norm": 4.827079772949219, "learning_rate": 4.813830223279914e-05, "loss": 2.3913700103759767, "memory(GiB)": 72.85, "step": 59760, "token_acc": 0.47719298245614034, "train_speed(iter/s)": 0.672334 }, { "epoch": 2.5605158305128315, "grad_norm": 5.106709003448486, "learning_rate": 4.8131577143223136e-05, "loss": 1.8911777496337892, "memory(GiB)": 72.85, "step": 59765, "token_acc": 0.596244131455399, "train_speed(iter/s)": 0.67233 }, { "epoch": 2.5607300458420803, "grad_norm": 5.163140296936035, "learning_rate": 4.812485208749538e-05, "loss": 2.277009201049805, "memory(GiB)": 72.85, "step": 59770, "token_acc": 0.48623853211009177, "train_speed(iter/s)": 0.672327 }, { "epoch": 2.5609442611713296, "grad_norm": 4.326900482177734, "learning_rate": 4.811812706573768e-05, "loss": 2.1621450424194335, "memory(GiB)": 72.85, "step": 59775, "token_acc": 0.4959677419354839, "train_speed(iter/s)": 0.672335 }, { "epoch": 2.5611584765005784, "grad_norm": 4.6223530769348145, "learning_rate": 4.811140207807187e-05, "loss": 2.24145565032959, "memory(GiB)": 72.85, "step": 59780, "token_acc": 0.4884488448844885, "train_speed(iter/s)": 0.672334 }, { "epoch": 2.561372691829827, "grad_norm": 4.3503804206848145, "learning_rate": 4.810467712461979e-05, "loss": 2.0568288803100585, "memory(GiB)": 72.85, "step": 59785, "token_acc": 0.4966442953020134, "train_speed(iter/s)": 0.672339 }, { "epoch": 2.5615869071590764, "grad_norm": 3.976311683654785, "learning_rate": 4.809795220550326e-05, "loss": 1.9860620498657227, "memory(GiB)": 72.85, "step": 59790, "token_acc": 0.5233644859813084, "train_speed(iter/s)": 0.672339 }, { "epoch": 2.5618011224883253, "grad_norm": 5.770867347717285, "learning_rate": 4.80912273208441e-05, "loss": 2.213346481323242, "memory(GiB)": 72.85, "step": 59795, "token_acc": 0.5314685314685315, "train_speed(iter/s)": 0.672339 }, { "epoch": 2.562015337817574, "grad_norm": 5.616217613220215, "learning_rate": 4.808450247076416e-05, "loss": 2.1123430252075197, "memory(GiB)": 72.85, "step": 59800, "token_acc": 0.5289855072463768, "train_speed(iter/s)": 0.672349 }, { "epoch": 2.5622295531468233, "grad_norm": 6.123165130615234, "learning_rate": 4.807777765538525e-05, "loss": 2.081222343444824, "memory(GiB)": 72.85, "step": 59805, "token_acc": 0.532608695652174, "train_speed(iter/s)": 0.672354 }, { "epoch": 2.562443768476072, "grad_norm": 4.765963554382324, "learning_rate": 4.8071052874829204e-05, "loss": 2.4953861236572266, "memory(GiB)": 72.85, "step": 59810, "token_acc": 0.48398576512455516, "train_speed(iter/s)": 0.672361 }, { "epoch": 2.562657983805321, "grad_norm": 5.134005546569824, "learning_rate": 4.806432812921782e-05, "loss": 2.185899543762207, "memory(GiB)": 72.85, "step": 59815, "token_acc": 0.49363057324840764, "train_speed(iter/s)": 0.672369 }, { "epoch": 2.56287219913457, "grad_norm": 4.998562335968018, "learning_rate": 4.805760341867298e-05, "loss": 2.094636154174805, "memory(GiB)": 72.85, "step": 59820, "token_acc": 0.534965034965035, "train_speed(iter/s)": 0.672368 }, { "epoch": 2.563086414463819, "grad_norm": 10.329505920410156, "learning_rate": 4.805087874331646e-05, "loss": 2.1841217041015626, "memory(GiB)": 72.85, "step": 59825, "token_acc": 0.5441176470588235, "train_speed(iter/s)": 0.672379 }, { "epoch": 2.563300629793068, "grad_norm": 4.091454029083252, "learning_rate": 4.8044154103270105e-05, "loss": 2.438186836242676, "memory(GiB)": 72.85, "step": 59830, "token_acc": 0.4812680115273775, "train_speed(iter/s)": 0.672385 }, { "epoch": 2.563514845122317, "grad_norm": 6.461130142211914, "learning_rate": 4.803742949865574e-05, "loss": 2.6483545303344727, "memory(GiB)": 72.85, "step": 59835, "token_acc": 0.44904458598726116, "train_speed(iter/s)": 0.672376 }, { "epoch": 2.563729060451566, "grad_norm": 5.941629886627197, "learning_rate": 4.803070492959517e-05, "loss": 2.294630432128906, "memory(GiB)": 72.85, "step": 59840, "token_acc": 0.4775641025641026, "train_speed(iter/s)": 0.672383 }, { "epoch": 2.5639432757808147, "grad_norm": 4.045503616333008, "learning_rate": 4.802398039621024e-05, "loss": 2.1179874420166014, "memory(GiB)": 72.85, "step": 59845, "token_acc": 0.5653710247349824, "train_speed(iter/s)": 0.672373 }, { "epoch": 2.564157491110064, "grad_norm": 5.735507011413574, "learning_rate": 4.801725589862275e-05, "loss": 2.221883201599121, "memory(GiB)": 72.85, "step": 59850, "token_acc": 0.514018691588785, "train_speed(iter/s)": 0.672378 }, { "epoch": 2.564371706439313, "grad_norm": 3.639693021774292, "learning_rate": 4.8010531436954525e-05, "loss": 2.4488210678100586, "memory(GiB)": 72.85, "step": 59855, "token_acc": 0.4772727272727273, "train_speed(iter/s)": 0.67238 }, { "epoch": 2.5645859217685616, "grad_norm": 5.355133533477783, "learning_rate": 4.80038070113274e-05, "loss": 2.097603988647461, "memory(GiB)": 72.85, "step": 59860, "token_acc": 0.5257731958762887, "train_speed(iter/s)": 0.672384 }, { "epoch": 2.564800137097811, "grad_norm": 5.848044395446777, "learning_rate": 4.799708262186317e-05, "loss": 1.7882928848266602, "memory(GiB)": 72.85, "step": 59865, "token_acc": 0.602510460251046, "train_speed(iter/s)": 0.672389 }, { "epoch": 2.5650143524270597, "grad_norm": 6.080626964569092, "learning_rate": 4.799035826868368e-05, "loss": 2.171059799194336, "memory(GiB)": 72.85, "step": 59870, "token_acc": 0.49473684210526314, "train_speed(iter/s)": 0.6724 }, { "epoch": 2.5652285677563085, "grad_norm": 5.101706504821777, "learning_rate": 4.798363395191074e-05, "loss": 2.3429698944091797, "memory(GiB)": 72.85, "step": 59875, "token_acc": 0.48951048951048953, "train_speed(iter/s)": 0.672401 }, { "epoch": 2.5654427830855577, "grad_norm": 4.337530612945557, "learning_rate": 4.797690967166615e-05, "loss": 2.0883296966552733, "memory(GiB)": 72.85, "step": 59880, "token_acc": 0.5054945054945055, "train_speed(iter/s)": 0.672396 }, { "epoch": 2.5656569984148065, "grad_norm": 7.261510848999023, "learning_rate": 4.7970185428071725e-05, "loss": 2.3705766677856444, "memory(GiB)": 72.85, "step": 59885, "token_acc": 0.47674418604651164, "train_speed(iter/s)": 0.67239 }, { "epoch": 2.565871213744056, "grad_norm": 4.7746405601501465, "learning_rate": 4.796346122124931e-05, "loss": 2.144238090515137, "memory(GiB)": 72.85, "step": 59890, "token_acc": 0.5176056338028169, "train_speed(iter/s)": 0.672401 }, { "epoch": 2.5660854290733046, "grad_norm": 4.781418323516846, "learning_rate": 4.795673705132072e-05, "loss": 2.269976806640625, "memory(GiB)": 72.85, "step": 59895, "token_acc": 0.510989010989011, "train_speed(iter/s)": 0.672392 }, { "epoch": 2.5662996444025534, "grad_norm": 4.153679370880127, "learning_rate": 4.795001291840773e-05, "loss": 2.0836212158203127, "memory(GiB)": 72.85, "step": 59900, "token_acc": 0.5378787878787878, "train_speed(iter/s)": 0.67239 }, { "epoch": 2.5665138597318027, "grad_norm": 5.562638759613037, "learning_rate": 4.79432888226322e-05, "loss": 2.3536455154418947, "memory(GiB)": 72.85, "step": 59905, "token_acc": 0.5075987841945289, "train_speed(iter/s)": 0.672401 }, { "epoch": 2.5667280750610515, "grad_norm": 4.626091480255127, "learning_rate": 4.79365647641159e-05, "loss": 2.052047538757324, "memory(GiB)": 72.85, "step": 59910, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.67239 }, { "epoch": 2.5669422903903003, "grad_norm": 4.114895820617676, "learning_rate": 4.792984074298069e-05, "loss": 2.0940799713134766, "memory(GiB)": 72.85, "step": 59915, "token_acc": 0.5359712230215827, "train_speed(iter/s)": 0.672391 }, { "epoch": 2.5671565057195496, "grad_norm": 9.280842781066895, "learning_rate": 4.792311675934835e-05, "loss": 2.2262266159057615, "memory(GiB)": 72.85, "step": 59920, "token_acc": 0.4971751412429379, "train_speed(iter/s)": 0.67238 }, { "epoch": 2.5673707210487984, "grad_norm": 4.894021987915039, "learning_rate": 4.7916392813340686e-05, "loss": 2.157885932922363, "memory(GiB)": 72.85, "step": 59925, "token_acc": 0.5, "train_speed(iter/s)": 0.672385 }, { "epoch": 2.567584936378047, "grad_norm": 5.021746635437012, "learning_rate": 4.790966890507954e-05, "loss": 2.0028003692626952, "memory(GiB)": 72.85, "step": 59930, "token_acc": 0.5287356321839081, "train_speed(iter/s)": 0.672378 }, { "epoch": 2.5677991517072964, "grad_norm": 6.152548789978027, "learning_rate": 4.790294503468669e-05, "loss": 2.5035802841186525, "memory(GiB)": 72.85, "step": 59935, "token_acc": 0.4896551724137931, "train_speed(iter/s)": 0.672393 }, { "epoch": 2.5680133670365453, "grad_norm": 4.906522750854492, "learning_rate": 4.789622120228396e-05, "loss": 2.184986877441406, "memory(GiB)": 72.85, "step": 59940, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.672399 }, { "epoch": 2.568227582365794, "grad_norm": 7.795204162597656, "learning_rate": 4.788949740799315e-05, "loss": 2.4743812561035154, "memory(GiB)": 72.85, "step": 59945, "token_acc": 0.4849624060150376, "train_speed(iter/s)": 0.672403 }, { "epoch": 2.5684417976950433, "grad_norm": 7.4657368659973145, "learning_rate": 4.788277365193608e-05, "loss": 2.431285095214844, "memory(GiB)": 72.85, "step": 59950, "token_acc": 0.46839080459770116, "train_speed(iter/s)": 0.672414 }, { "epoch": 2.568656013024292, "grad_norm": 7.227960109710693, "learning_rate": 4.7876049934234557e-05, "loss": 1.8725069046020508, "memory(GiB)": 72.85, "step": 59955, "token_acc": 0.5633333333333334, "train_speed(iter/s)": 0.672407 }, { "epoch": 2.568870228353541, "grad_norm": 4.432013988494873, "learning_rate": 4.786932625501036e-05, "loss": 2.2804792404174803, "memory(GiB)": 72.85, "step": 59960, "token_acc": 0.4904214559386973, "train_speed(iter/s)": 0.672415 }, { "epoch": 2.56908444368279, "grad_norm": 5.721411228179932, "learning_rate": 4.786260261438534e-05, "loss": 2.308298873901367, "memory(GiB)": 72.85, "step": 59965, "token_acc": 0.5014749262536873, "train_speed(iter/s)": 0.672395 }, { "epoch": 2.569298659012039, "grad_norm": 4.4657511711120605, "learning_rate": 4.785587901248127e-05, "loss": 2.2235973358154295, "memory(GiB)": 72.85, "step": 59970, "token_acc": 0.5103448275862069, "train_speed(iter/s)": 0.672398 }, { "epoch": 2.569512874341288, "grad_norm": 5.831353664398193, "learning_rate": 4.784915544941996e-05, "loss": 2.1228965759277343, "memory(GiB)": 72.85, "step": 59975, "token_acc": 0.5461538461538461, "train_speed(iter/s)": 0.672409 }, { "epoch": 2.569727089670537, "grad_norm": 4.588790416717529, "learning_rate": 4.7842431925323226e-05, "loss": 2.1048004150390627, "memory(GiB)": 72.85, "step": 59980, "token_acc": 0.5341880341880342, "train_speed(iter/s)": 0.672411 }, { "epoch": 2.569941304999786, "grad_norm": 3.95284104347229, "learning_rate": 4.7835708440312854e-05, "loss": 2.046356773376465, "memory(GiB)": 72.85, "step": 59985, "token_acc": 0.5066666666666667, "train_speed(iter/s)": 0.672415 }, { "epoch": 2.5701555203290347, "grad_norm": 4.4606032371521, "learning_rate": 4.782898499451066e-05, "loss": 2.286298370361328, "memory(GiB)": 72.85, "step": 59990, "token_acc": 0.5522875816993464, "train_speed(iter/s)": 0.672416 }, { "epoch": 2.570369735658284, "grad_norm": 5.429129123687744, "learning_rate": 4.782226158803844e-05, "loss": 2.204113578796387, "memory(GiB)": 72.85, "step": 59995, "token_acc": 0.5129032258064516, "train_speed(iter/s)": 0.67241 }, { "epoch": 2.570583950987533, "grad_norm": 5.7363505363464355, "learning_rate": 4.781553822101799e-05, "loss": 2.2453262329101564, "memory(GiB)": 72.85, "step": 60000, "token_acc": 0.5310344827586206, "train_speed(iter/s)": 0.672401 }, { "epoch": 2.570583950987533, "eval_loss": 2.0500900745391846, "eval_runtime": 15.9358, "eval_samples_per_second": 6.275, "eval_steps_per_second": 6.275, "eval_token_acc": 0.5018315018315018, "step": 60000 }, { "epoch": 2.5707981663167816, "grad_norm": 4.553274154663086, "learning_rate": 4.780881489357112e-05, "loss": 2.491489791870117, "memory(GiB)": 72.85, "step": 60005, "token_acc": 0.4927536231884058, "train_speed(iter/s)": 0.672271 }, { "epoch": 2.571012381646031, "grad_norm": 6.455875873565674, "learning_rate": 4.780209160581961e-05, "loss": 2.101535415649414, "memory(GiB)": 72.85, "step": 60010, "token_acc": 0.5150501672240803, "train_speed(iter/s)": 0.672277 }, { "epoch": 2.5712265969752797, "grad_norm": 4.610311508178711, "learning_rate": 4.779536835788528e-05, "loss": 2.5994279861450194, "memory(GiB)": 72.85, "step": 60015, "token_acc": 0.4768211920529801, "train_speed(iter/s)": 0.672265 }, { "epoch": 2.5714408123045285, "grad_norm": 5.324702739715576, "learning_rate": 4.7788645149889936e-05, "loss": 2.4177997589111326, "memory(GiB)": 72.85, "step": 60020, "token_acc": 0.48220064724919093, "train_speed(iter/s)": 0.672264 }, { "epoch": 2.5716550276337777, "grad_norm": 5.547961235046387, "learning_rate": 4.7781921981955336e-05, "loss": 2.2474319458007814, "memory(GiB)": 72.85, "step": 60025, "token_acc": 0.49110320284697506, "train_speed(iter/s)": 0.67225 }, { "epoch": 2.5718692429630265, "grad_norm": 4.456108093261719, "learning_rate": 4.7775198854203316e-05, "loss": 2.1974807739257813, "memory(GiB)": 72.85, "step": 60030, "token_acc": 0.5227272727272727, "train_speed(iter/s)": 0.672253 }, { "epoch": 2.5720834582922754, "grad_norm": 4.538473129272461, "learning_rate": 4.776847576675563e-05, "loss": 2.3557418823242187, "memory(GiB)": 72.85, "step": 60035, "token_acc": 0.4964788732394366, "train_speed(iter/s)": 0.672247 }, { "epoch": 2.5722976736215246, "grad_norm": 4.90955924987793, "learning_rate": 4.7761752719734123e-05, "loss": 2.236051559448242, "memory(GiB)": 72.85, "step": 60040, "token_acc": 0.4885245901639344, "train_speed(iter/s)": 0.672245 }, { "epoch": 2.5725118889507734, "grad_norm": 4.714458465576172, "learning_rate": 4.775502971326055e-05, "loss": 2.386068344116211, "memory(GiB)": 72.85, "step": 60045, "token_acc": 0.5107142857142857, "train_speed(iter/s)": 0.672254 }, { "epoch": 2.5727261042800222, "grad_norm": 4.779111862182617, "learning_rate": 4.774830674745673e-05, "loss": 2.5332077026367186, "memory(GiB)": 72.85, "step": 60050, "token_acc": 0.48, "train_speed(iter/s)": 0.672255 }, { "epoch": 2.5729403196092715, "grad_norm": 5.323661804199219, "learning_rate": 4.774158382244444e-05, "loss": 2.683393096923828, "memory(GiB)": 72.85, "step": 60055, "token_acc": 0.44107744107744107, "train_speed(iter/s)": 0.672256 }, { "epoch": 2.5731545349385203, "grad_norm": 4.989975929260254, "learning_rate": 4.773486093834549e-05, "loss": 2.385475921630859, "memory(GiB)": 72.85, "step": 60060, "token_acc": 0.484593837535014, "train_speed(iter/s)": 0.672247 }, { "epoch": 2.573368750267769, "grad_norm": 4.897096157073975, "learning_rate": 4.772813809528166e-05, "loss": 1.9164987564086915, "memory(GiB)": 72.85, "step": 60065, "token_acc": 0.5714285714285714, "train_speed(iter/s)": 0.672252 }, { "epoch": 2.5735829655970184, "grad_norm": 4.618252754211426, "learning_rate": 4.772141529337472e-05, "loss": 2.1645925521850584, "memory(GiB)": 72.85, "step": 60070, "token_acc": 0.518005540166205, "train_speed(iter/s)": 0.672257 }, { "epoch": 2.573797180926267, "grad_norm": 5.6848039627075195, "learning_rate": 4.77146925327465e-05, "loss": 2.4264732360839845, "memory(GiB)": 72.85, "step": 60075, "token_acc": 0.5222222222222223, "train_speed(iter/s)": 0.672258 }, { "epoch": 2.574011396255516, "grad_norm": 4.102342128753662, "learning_rate": 4.770796981351876e-05, "loss": 2.1750484466552735, "memory(GiB)": 72.85, "step": 60080, "token_acc": 0.5354107648725213, "train_speed(iter/s)": 0.672263 }, { "epoch": 2.5742256115847653, "grad_norm": 5.014070987701416, "learning_rate": 4.770124713581329e-05, "loss": 2.476983642578125, "memory(GiB)": 72.85, "step": 60085, "token_acc": 0.4844290657439446, "train_speed(iter/s)": 0.672263 }, { "epoch": 2.574439826914014, "grad_norm": 4.9596357345581055, "learning_rate": 4.76945244997519e-05, "loss": 1.8826549530029297, "memory(GiB)": 72.85, "step": 60090, "token_acc": 0.5430711610486891, "train_speed(iter/s)": 0.67226 }, { "epoch": 2.574654042243263, "grad_norm": 6.468573570251465, "learning_rate": 4.7687801905456344e-05, "loss": 2.2924524307250977, "memory(GiB)": 72.85, "step": 60095, "token_acc": 0.46946564885496184, "train_speed(iter/s)": 0.672264 }, { "epoch": 2.574868257572512, "grad_norm": 5.934815406799316, "learning_rate": 4.768107935304844e-05, "loss": 2.3363105773925783, "memory(GiB)": 72.85, "step": 60100, "token_acc": 0.5, "train_speed(iter/s)": 0.672266 }, { "epoch": 2.575082472901761, "grad_norm": 7.602248668670654, "learning_rate": 4.767435684264995e-05, "loss": 2.164905548095703, "memory(GiB)": 72.85, "step": 60105, "token_acc": 0.5142857142857142, "train_speed(iter/s)": 0.672281 }, { "epoch": 2.5752966882310098, "grad_norm": 4.261717796325684, "learning_rate": 4.766763437438265e-05, "loss": 2.1279186248779296, "memory(GiB)": 72.85, "step": 60110, "token_acc": 0.501577287066246, "train_speed(iter/s)": 0.672242 }, { "epoch": 2.575510903560259, "grad_norm": 5.68047571182251, "learning_rate": 4.766091194836836e-05, "loss": 2.517550468444824, "memory(GiB)": 72.85, "step": 60115, "token_acc": 0.4847328244274809, "train_speed(iter/s)": 0.672253 }, { "epoch": 2.575725118889508, "grad_norm": 4.331416130065918, "learning_rate": 4.765418956472885e-05, "loss": 2.15271053314209, "memory(GiB)": 72.85, "step": 60120, "token_acc": 0.5137931034482759, "train_speed(iter/s)": 0.672248 }, { "epoch": 2.5759393342187566, "grad_norm": 5.832624912261963, "learning_rate": 4.76474672235859e-05, "loss": 2.2579484939575196, "memory(GiB)": 72.85, "step": 60125, "token_acc": 0.4722222222222222, "train_speed(iter/s)": 0.672252 }, { "epoch": 2.576153549548006, "grad_norm": 4.9741716384887695, "learning_rate": 4.764074492506127e-05, "loss": 2.266254997253418, "memory(GiB)": 72.85, "step": 60130, "token_acc": 0.5140562248995983, "train_speed(iter/s)": 0.672247 }, { "epoch": 2.5763677648772547, "grad_norm": 8.12198543548584, "learning_rate": 4.763402266927677e-05, "loss": 2.394051742553711, "memory(GiB)": 72.85, "step": 60135, "token_acc": 0.50390625, "train_speed(iter/s)": 0.672262 }, { "epoch": 2.5765819802065035, "grad_norm": 4.725654602050781, "learning_rate": 4.7627300456354176e-05, "loss": 2.4352933883666994, "memory(GiB)": 72.85, "step": 60140, "token_acc": 0.4845360824742268, "train_speed(iter/s)": 0.672259 }, { "epoch": 2.576796195535753, "grad_norm": 4.306679725646973, "learning_rate": 4.762057828641525e-05, "loss": 2.6764930725097655, "memory(GiB)": 72.85, "step": 60145, "token_acc": 0.44936708860759494, "train_speed(iter/s)": 0.672257 }, { "epoch": 2.5770104108650016, "grad_norm": 4.90095853805542, "learning_rate": 4.761385615958178e-05, "loss": 2.424943542480469, "memory(GiB)": 72.85, "step": 60150, "token_acc": 0.49560117302052786, "train_speed(iter/s)": 0.672265 }, { "epoch": 2.5772246261942504, "grad_norm": 5.464286804199219, "learning_rate": 4.760713407597554e-05, "loss": 2.1710586547851562, "memory(GiB)": 72.85, "step": 60155, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.672277 }, { "epoch": 2.5774388415234997, "grad_norm": 4.169270038604736, "learning_rate": 4.760041203571832e-05, "loss": 2.2296144485473635, "memory(GiB)": 72.85, "step": 60160, "token_acc": 0.5136986301369864, "train_speed(iter/s)": 0.672283 }, { "epoch": 2.5776530568527485, "grad_norm": 5.848227500915527, "learning_rate": 4.7593690038931895e-05, "loss": 2.319079780578613, "memory(GiB)": 72.85, "step": 60165, "token_acc": 0.5104166666666666, "train_speed(iter/s)": 0.672277 }, { "epoch": 2.5778672721819973, "grad_norm": 5.136807918548584, "learning_rate": 4.758696808573801e-05, "loss": 2.033269500732422, "memory(GiB)": 72.85, "step": 60170, "token_acc": 0.5378486055776892, "train_speed(iter/s)": 0.672279 }, { "epoch": 2.5780814875112466, "grad_norm": 6.051952838897705, "learning_rate": 4.758024617625848e-05, "loss": 1.870936965942383, "memory(GiB)": 72.85, "step": 60175, "token_acc": 0.5485074626865671, "train_speed(iter/s)": 0.672279 }, { "epoch": 2.5782957028404954, "grad_norm": 4.0037431716918945, "learning_rate": 4.757352431061504e-05, "loss": 2.2085208892822266, "memory(GiB)": 72.85, "step": 60180, "token_acc": 0.5254237288135594, "train_speed(iter/s)": 0.672284 }, { "epoch": 2.578509918169744, "grad_norm": 6.482223033905029, "learning_rate": 4.756680248892949e-05, "loss": 2.1446998596191404, "memory(GiB)": 72.85, "step": 60185, "token_acc": 0.5222222222222223, "train_speed(iter/s)": 0.672273 }, { "epoch": 2.5787241334989934, "grad_norm": 3.7915191650390625, "learning_rate": 4.75600807113236e-05, "loss": 2.4219974517822265, "memory(GiB)": 72.85, "step": 60190, "token_acc": 0.45484949832775917, "train_speed(iter/s)": 0.672285 }, { "epoch": 2.5789383488282422, "grad_norm": 4.917784214019775, "learning_rate": 4.755335897791915e-05, "loss": 2.109188270568848, "memory(GiB)": 72.85, "step": 60195, "token_acc": 0.5064516129032258, "train_speed(iter/s)": 0.672291 }, { "epoch": 2.579152564157491, "grad_norm": 5.2570929527282715, "learning_rate": 4.7546637288837873e-05, "loss": 2.2121355056762697, "memory(GiB)": 72.85, "step": 60200, "token_acc": 0.5065359477124183, "train_speed(iter/s)": 0.672287 }, { "epoch": 2.5793667794867403, "grad_norm": 4.251828193664551, "learning_rate": 4.753991564420158e-05, "loss": 2.2228414535522463, "memory(GiB)": 72.85, "step": 60205, "token_acc": 0.5274390243902439, "train_speed(iter/s)": 0.672288 }, { "epoch": 2.579580994815989, "grad_norm": 4.870431423187256, "learning_rate": 4.753319404413203e-05, "loss": 2.179545211791992, "memory(GiB)": 72.85, "step": 60210, "token_acc": 0.5189393939393939, "train_speed(iter/s)": 0.672298 }, { "epoch": 2.579795210145238, "grad_norm": 6.480828762054443, "learning_rate": 4.752647248875096e-05, "loss": 2.233333206176758, "memory(GiB)": 72.85, "step": 60215, "token_acc": 0.5391849529780565, "train_speed(iter/s)": 0.672299 }, { "epoch": 2.580009425474487, "grad_norm": 4.196115970611572, "learning_rate": 4.7519750978180185e-05, "loss": 2.3605133056640626, "memory(GiB)": 72.85, "step": 60220, "token_acc": 0.49337748344370863, "train_speed(iter/s)": 0.672317 }, { "epoch": 2.580223640803736, "grad_norm": 5.206559658050537, "learning_rate": 4.751302951254144e-05, "loss": 2.3300479888916015, "memory(GiB)": 72.85, "step": 60225, "token_acc": 0.5055350553505535, "train_speed(iter/s)": 0.672319 }, { "epoch": 2.580437856132985, "grad_norm": 4.465354919433594, "learning_rate": 4.750630809195648e-05, "loss": 2.180410385131836, "memory(GiB)": 72.85, "step": 60230, "token_acc": 0.48013245033112584, "train_speed(iter/s)": 0.672324 }, { "epoch": 2.580652071462234, "grad_norm": 5.003514289855957, "learning_rate": 4.749958671654712e-05, "loss": 2.0260257720947266, "memory(GiB)": 72.85, "step": 60235, "token_acc": 0.5343511450381679, "train_speed(iter/s)": 0.672317 }, { "epoch": 2.580866286791483, "grad_norm": 4.206747531890869, "learning_rate": 4.7492865386435063e-05, "loss": 2.471544647216797, "memory(GiB)": 72.85, "step": 60240, "token_acc": 0.4738562091503268, "train_speed(iter/s)": 0.672312 }, { "epoch": 2.5810805021207317, "grad_norm": 5.26352596282959, "learning_rate": 4.748614410174212e-05, "loss": 2.734288787841797, "memory(GiB)": 72.85, "step": 60245, "token_acc": 0.4536082474226804, "train_speed(iter/s)": 0.672316 }, { "epoch": 2.581294717449981, "grad_norm": 4.730737209320068, "learning_rate": 4.7479422862590034e-05, "loss": 2.6060243606567384, "memory(GiB)": 72.85, "step": 60250, "token_acc": 0.5093167701863354, "train_speed(iter/s)": 0.672311 }, { "epoch": 2.5815089327792298, "grad_norm": 4.726035118103027, "learning_rate": 4.7472701669100544e-05, "loss": 2.1091209411621095, "memory(GiB)": 72.85, "step": 60255, "token_acc": 0.5522875816993464, "train_speed(iter/s)": 0.672314 }, { "epoch": 2.5817231481084786, "grad_norm": 4.9623026847839355, "learning_rate": 4.746598052139545e-05, "loss": 2.519190216064453, "memory(GiB)": 72.85, "step": 60260, "token_acc": 0.46865671641791046, "train_speed(iter/s)": 0.67232 }, { "epoch": 2.581937363437728, "grad_norm": 4.857523441314697, "learning_rate": 4.74592594195965e-05, "loss": 2.415472984313965, "memory(GiB)": 72.85, "step": 60265, "token_acc": 0.4637223974763407, "train_speed(iter/s)": 0.672319 }, { "epoch": 2.5821515787669767, "grad_norm": 5.469851016998291, "learning_rate": 4.745253836382545e-05, "loss": 2.3783889770507813, "memory(GiB)": 72.85, "step": 60270, "token_acc": 0.5221843003412969, "train_speed(iter/s)": 0.672308 }, { "epoch": 2.5823657940962255, "grad_norm": 4.576112270355225, "learning_rate": 4.7445817354204044e-05, "loss": 2.270663833618164, "memory(GiB)": 72.85, "step": 60275, "token_acc": 0.5030864197530864, "train_speed(iter/s)": 0.672321 }, { "epoch": 2.5825800094254747, "grad_norm": 4.924864768981934, "learning_rate": 4.743909639085408e-05, "loss": 2.512287139892578, "memory(GiB)": 72.85, "step": 60280, "token_acc": 0.49008498583569404, "train_speed(iter/s)": 0.672331 }, { "epoch": 2.5827942247547235, "grad_norm": 3.6509337425231934, "learning_rate": 4.7432375473897256e-05, "loss": 2.0766143798828125, "memory(GiB)": 72.85, "step": 60285, "token_acc": 0.5276752767527675, "train_speed(iter/s)": 0.672322 }, { "epoch": 2.5830084400839723, "grad_norm": 4.432168006896973, "learning_rate": 4.742565460345538e-05, "loss": 2.298278045654297, "memory(GiB)": 72.85, "step": 60290, "token_acc": 0.4772727272727273, "train_speed(iter/s)": 0.672322 }, { "epoch": 2.5832226554132216, "grad_norm": 4.480579853057861, "learning_rate": 4.741893377965018e-05, "loss": 2.021406555175781, "memory(GiB)": 72.85, "step": 60295, "token_acc": 0.5345622119815668, "train_speed(iter/s)": 0.672326 }, { "epoch": 2.5834368707424704, "grad_norm": 4.315228462219238, "learning_rate": 4.741221300260341e-05, "loss": 2.012085723876953, "memory(GiB)": 72.85, "step": 60300, "token_acc": 0.5592592592592592, "train_speed(iter/s)": 0.672332 }, { "epoch": 2.5836510860717192, "grad_norm": 5.642927169799805, "learning_rate": 4.740549227243684e-05, "loss": 2.415969467163086, "memory(GiB)": 72.85, "step": 60305, "token_acc": 0.5, "train_speed(iter/s)": 0.672339 }, { "epoch": 2.5838653014009685, "grad_norm": 4.206030368804932, "learning_rate": 4.7398771589272214e-05, "loss": 2.2588268280029298, "memory(GiB)": 72.85, "step": 60310, "token_acc": 0.49174917491749176, "train_speed(iter/s)": 0.672323 }, { "epoch": 2.5840795167302173, "grad_norm": 5.266557693481445, "learning_rate": 4.739205095323126e-05, "loss": 2.3635108947753904, "memory(GiB)": 72.85, "step": 60315, "token_acc": 0.5205479452054794, "train_speed(iter/s)": 0.672296 }, { "epoch": 2.584293732059466, "grad_norm": 4.79279088973999, "learning_rate": 4.738533036443577e-05, "loss": 2.307847595214844, "memory(GiB)": 72.85, "step": 60320, "token_acc": 0.5163398692810458, "train_speed(iter/s)": 0.672298 }, { "epoch": 2.5845079473887154, "grad_norm": 4.057628154754639, "learning_rate": 4.737860982300746e-05, "loss": 2.0539865493774414, "memory(GiB)": 72.85, "step": 60325, "token_acc": 0.5579937304075235, "train_speed(iter/s)": 0.672307 }, { "epoch": 2.584722162717964, "grad_norm": 5.892770290374756, "learning_rate": 4.737188932906809e-05, "loss": 2.031358528137207, "memory(GiB)": 72.85, "step": 60330, "token_acc": 0.5227272727272727, "train_speed(iter/s)": 0.672302 }, { "epoch": 2.584936378047213, "grad_norm": 4.497494220733643, "learning_rate": 4.736516888273941e-05, "loss": 2.0655611038208006, "memory(GiB)": 72.85, "step": 60335, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.672315 }, { "epoch": 2.5851505933764622, "grad_norm": 4.947353363037109, "learning_rate": 4.735844848414318e-05, "loss": 2.139185905456543, "memory(GiB)": 72.85, "step": 60340, "token_acc": 0.5536332179930796, "train_speed(iter/s)": 0.67231 }, { "epoch": 2.585364808705711, "grad_norm": 5.448515892028809, "learning_rate": 4.7351728133401115e-05, "loss": 2.0107561111450196, "memory(GiB)": 72.85, "step": 60345, "token_acc": 0.5488215488215489, "train_speed(iter/s)": 0.672322 }, { "epoch": 2.58557902403496, "grad_norm": 4.540935516357422, "learning_rate": 4.734500783063499e-05, "loss": 2.2129547119140627, "memory(GiB)": 72.85, "step": 60350, "token_acc": 0.5, "train_speed(iter/s)": 0.672328 }, { "epoch": 2.585793239364209, "grad_norm": 6.4246673583984375, "learning_rate": 4.733828757596655e-05, "loss": 2.231765937805176, "memory(GiB)": 72.85, "step": 60355, "token_acc": 0.5547445255474452, "train_speed(iter/s)": 0.672335 }, { "epoch": 2.586007454693458, "grad_norm": 8.407023429870605, "learning_rate": 4.7331567369517516e-05, "loss": 2.4079586029052735, "memory(GiB)": 72.85, "step": 60360, "token_acc": 0.48771929824561405, "train_speed(iter/s)": 0.672341 }, { "epoch": 2.5862216700227068, "grad_norm": 5.390028953552246, "learning_rate": 4.732484721140965e-05, "loss": 2.4038219451904297, "memory(GiB)": 72.85, "step": 60365, "token_acc": 0.47278911564625853, "train_speed(iter/s)": 0.672348 }, { "epoch": 2.586435885351956, "grad_norm": 5.223000526428223, "learning_rate": 4.7318127101764684e-05, "loss": 2.462506866455078, "memory(GiB)": 72.85, "step": 60370, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.672353 }, { "epoch": 2.586650100681205, "grad_norm": 3.9974215030670166, "learning_rate": 4.731140704070435e-05, "loss": 2.3225025177001952, "memory(GiB)": 72.85, "step": 60375, "token_acc": 0.5126353790613718, "train_speed(iter/s)": 0.672354 }, { "epoch": 2.5868643160104536, "grad_norm": 6.4363508224487305, "learning_rate": 4.7304687028350415e-05, "loss": 2.3633399963378907, "memory(GiB)": 72.85, "step": 60380, "token_acc": 0.5, "train_speed(iter/s)": 0.672364 }, { "epoch": 2.587078531339703, "grad_norm": 5.869675159454346, "learning_rate": 4.729796706482459e-05, "loss": 2.2615692138671877, "memory(GiB)": 72.85, "step": 60385, "token_acc": 0.5, "train_speed(iter/s)": 0.672356 }, { "epoch": 2.5872927466689517, "grad_norm": 5.884766101837158, "learning_rate": 4.7291247150248636e-05, "loss": 2.2938024520874025, "memory(GiB)": 72.85, "step": 60390, "token_acc": 0.5354609929078015, "train_speed(iter/s)": 0.672364 }, { "epoch": 2.5875069619982005, "grad_norm": 4.586406707763672, "learning_rate": 4.728452728474428e-05, "loss": 2.006281852722168, "memory(GiB)": 72.85, "step": 60395, "token_acc": 0.6044776119402985, "train_speed(iter/s)": 0.672364 }, { "epoch": 2.5877211773274498, "grad_norm": 4.34990930557251, "learning_rate": 4.727780746843325e-05, "loss": 2.4342113494873048, "memory(GiB)": 72.85, "step": 60400, "token_acc": 0.5066666666666667, "train_speed(iter/s)": 0.672364 }, { "epoch": 2.5879353926566986, "grad_norm": 4.228999614715576, "learning_rate": 4.727108770143728e-05, "loss": 2.375318717956543, "memory(GiB)": 72.85, "step": 60405, "token_acc": 0.5079872204472844, "train_speed(iter/s)": 0.672362 }, { "epoch": 2.5881496079859474, "grad_norm": 5.153909683227539, "learning_rate": 4.726436798387814e-05, "loss": 2.2933418273925783, "memory(GiB)": 72.85, "step": 60410, "token_acc": 0.5451263537906137, "train_speed(iter/s)": 0.672366 }, { "epoch": 2.5883638233151967, "grad_norm": 4.821467876434326, "learning_rate": 4.725764831587754e-05, "loss": 2.502271270751953, "memory(GiB)": 72.85, "step": 60415, "token_acc": 0.4787234042553192, "train_speed(iter/s)": 0.672366 }, { "epoch": 2.5885780386444455, "grad_norm": 4.556141376495361, "learning_rate": 4.72509286975572e-05, "loss": 2.270763969421387, "memory(GiB)": 72.85, "step": 60420, "token_acc": 0.5268817204301075, "train_speed(iter/s)": 0.672372 }, { "epoch": 2.5887922539736943, "grad_norm": 7.26174783706665, "learning_rate": 4.724420912903887e-05, "loss": 1.871413803100586, "memory(GiB)": 72.85, "step": 60425, "token_acc": 0.5648854961832062, "train_speed(iter/s)": 0.672375 }, { "epoch": 2.5890064693029435, "grad_norm": 5.59999942779541, "learning_rate": 4.723748961044428e-05, "loss": 2.1812311172485352, "memory(GiB)": 72.85, "step": 60430, "token_acc": 0.5168067226890757, "train_speed(iter/s)": 0.672372 }, { "epoch": 2.5892206846321923, "grad_norm": 5.664865493774414, "learning_rate": 4.723077014189516e-05, "loss": 2.295380401611328, "memory(GiB)": 72.85, "step": 60435, "token_acc": 0.5204460966542751, "train_speed(iter/s)": 0.672361 }, { "epoch": 2.589434899961441, "grad_norm": 4.528515338897705, "learning_rate": 4.722405072351324e-05, "loss": 2.4058296203613283, "memory(GiB)": 72.85, "step": 60440, "token_acc": 0.514367816091954, "train_speed(iter/s)": 0.672345 }, { "epoch": 2.5896491152906904, "grad_norm": 5.426424026489258, "learning_rate": 4.721733135542023e-05, "loss": 2.2353435516357423, "memory(GiB)": 72.85, "step": 60445, "token_acc": 0.47639484978540775, "train_speed(iter/s)": 0.672351 }, { "epoch": 2.5898633306199392, "grad_norm": 6.407140254974365, "learning_rate": 4.721061203773789e-05, "loss": 2.4104612350463865, "memory(GiB)": 72.85, "step": 60450, "token_acc": 0.5176470588235295, "train_speed(iter/s)": 0.672357 }, { "epoch": 2.590077545949188, "grad_norm": 3.6415061950683594, "learning_rate": 4.720389277058793e-05, "loss": 2.3974586486816407, "memory(GiB)": 72.85, "step": 60455, "token_acc": 0.48, "train_speed(iter/s)": 0.67236 }, { "epoch": 2.5902917612784373, "grad_norm": 4.495718002319336, "learning_rate": 4.719717355409205e-05, "loss": 2.4602262496948244, "memory(GiB)": 72.85, "step": 60460, "token_acc": 0.47249190938511326, "train_speed(iter/s)": 0.672375 }, { "epoch": 2.590505976607686, "grad_norm": 4.7746686935424805, "learning_rate": 4.719045438837202e-05, "loss": 2.3778911590576173, "memory(GiB)": 72.85, "step": 60465, "token_acc": 0.4983164983164983, "train_speed(iter/s)": 0.67239 }, { "epoch": 2.590720191936935, "grad_norm": 5.260173797607422, "learning_rate": 4.718373527354954e-05, "loss": 2.3865318298339844, "memory(GiB)": 72.85, "step": 60470, "token_acc": 0.4816053511705686, "train_speed(iter/s)": 0.672384 }, { "epoch": 2.590934407266184, "grad_norm": 3.89217209815979, "learning_rate": 4.717701620974635e-05, "loss": 2.218355178833008, "memory(GiB)": 72.85, "step": 60475, "token_acc": 0.5092936802973977, "train_speed(iter/s)": 0.6724 }, { "epoch": 2.591148622595433, "grad_norm": 4.036561489105225, "learning_rate": 4.7170297197084126e-05, "loss": 2.1348388671875, "memory(GiB)": 72.85, "step": 60480, "token_acc": 0.5522875816993464, "train_speed(iter/s)": 0.672415 }, { "epoch": 2.591362837924682, "grad_norm": 4.377843379974365, "learning_rate": 4.7163578235684644e-05, "loss": 1.9927879333496095, "memory(GiB)": 72.85, "step": 60485, "token_acc": 0.5481481481481482, "train_speed(iter/s)": 0.67241 }, { "epoch": 2.591577053253931, "grad_norm": 5.0593061447143555, "learning_rate": 4.71568593256696e-05, "loss": 2.167588806152344, "memory(GiB)": 72.85, "step": 60490, "token_acc": 0.5296167247386759, "train_speed(iter/s)": 0.672416 }, { "epoch": 2.59179126858318, "grad_norm": 5.596474647521973, "learning_rate": 4.7150140467160724e-05, "loss": 2.2116363525390623, "memory(GiB)": 72.85, "step": 60495, "token_acc": 0.5124555160142349, "train_speed(iter/s)": 0.672423 }, { "epoch": 2.5920054839124287, "grad_norm": 4.022644996643066, "learning_rate": 4.714342166027973e-05, "loss": 2.240021896362305, "memory(GiB)": 72.85, "step": 60500, "token_acc": 0.4897959183673469, "train_speed(iter/s)": 0.672431 }, { "epoch": 2.5920054839124287, "eval_loss": 2.041339159011841, "eval_runtime": 15.7637, "eval_samples_per_second": 6.344, "eval_steps_per_second": 6.344, "eval_token_acc": 0.51, "step": 60500 }, { "epoch": 2.592219699241678, "grad_norm": 4.458410739898682, "learning_rate": 4.713670290514832e-05, "loss": 2.517765235900879, "memory(GiB)": 72.85, "step": 60505, "token_acc": 0.503921568627451, "train_speed(iter/s)": 0.672291 }, { "epoch": 2.5924339145709268, "grad_norm": 5.409196376800537, "learning_rate": 4.712998420188823e-05, "loss": 2.5002696990966795, "memory(GiB)": 72.85, "step": 60510, "token_acc": 0.49836065573770494, "train_speed(iter/s)": 0.672282 }, { "epoch": 2.5926481299001756, "grad_norm": 4.1825971603393555, "learning_rate": 4.7123265550621175e-05, "loss": 2.2163455963134764, "memory(GiB)": 72.85, "step": 60515, "token_acc": 0.5176056338028169, "train_speed(iter/s)": 0.672279 }, { "epoch": 2.592862345229425, "grad_norm": 4.849092960357666, "learning_rate": 4.711654695146885e-05, "loss": 2.610928535461426, "memory(GiB)": 72.85, "step": 60520, "token_acc": 0.47761194029850745, "train_speed(iter/s)": 0.672283 }, { "epoch": 2.5930765605586736, "grad_norm": 4.946150779724121, "learning_rate": 4.710982840455299e-05, "loss": 2.2100595474243163, "memory(GiB)": 72.85, "step": 60525, "token_acc": 0.5287356321839081, "train_speed(iter/s)": 0.672299 }, { "epoch": 2.5932907758879225, "grad_norm": 6.500856876373291, "learning_rate": 4.710310990999529e-05, "loss": 2.1496936798095705, "memory(GiB)": 72.85, "step": 60530, "token_acc": 0.5308219178082192, "train_speed(iter/s)": 0.672299 }, { "epoch": 2.5935049912171717, "grad_norm": 5.400988578796387, "learning_rate": 4.709639146791748e-05, "loss": 2.2201459884643553, "memory(GiB)": 72.85, "step": 60535, "token_acc": 0.5093167701863354, "train_speed(iter/s)": 0.672287 }, { "epoch": 2.5937192065464205, "grad_norm": 4.2337446212768555, "learning_rate": 4.7089673078441265e-05, "loss": 2.175130081176758, "memory(GiB)": 72.85, "step": 60540, "token_acc": 0.4954128440366973, "train_speed(iter/s)": 0.672285 }, { "epoch": 2.5939334218756693, "grad_norm": 5.253509998321533, "learning_rate": 4.708295474168834e-05, "loss": 2.198828125, "memory(GiB)": 72.85, "step": 60545, "token_acc": 0.5320754716981132, "train_speed(iter/s)": 0.67228 }, { "epoch": 2.5941476372049186, "grad_norm": 6.197182655334473, "learning_rate": 4.707623645778042e-05, "loss": 2.3013332366943358, "memory(GiB)": 72.85, "step": 60550, "token_acc": 0.48484848484848486, "train_speed(iter/s)": 0.672293 }, { "epoch": 2.5943618525341674, "grad_norm": 5.313913822174072, "learning_rate": 4.7069518226839235e-05, "loss": 2.213029670715332, "memory(GiB)": 72.85, "step": 60555, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.672293 }, { "epoch": 2.594576067863416, "grad_norm": 5.286360740661621, "learning_rate": 4.706280004898648e-05, "loss": 1.961258316040039, "memory(GiB)": 72.85, "step": 60560, "token_acc": 0.5535055350553506, "train_speed(iter/s)": 0.672303 }, { "epoch": 2.5947902831926655, "grad_norm": 5.166820526123047, "learning_rate": 4.7056081924343846e-05, "loss": 2.3321710586547852, "memory(GiB)": 72.85, "step": 60565, "token_acc": 0.5204460966542751, "train_speed(iter/s)": 0.672297 }, { "epoch": 2.5950044985219143, "grad_norm": 4.633284568786621, "learning_rate": 4.704936385303306e-05, "loss": 2.45421142578125, "memory(GiB)": 72.85, "step": 60570, "token_acc": 0.4965034965034965, "train_speed(iter/s)": 0.672282 }, { "epoch": 2.595218713851163, "grad_norm": 4.522875785827637, "learning_rate": 4.70426458351758e-05, "loss": 2.0140754699707033, "memory(GiB)": 72.85, "step": 60575, "token_acc": 0.5652173913043478, "train_speed(iter/s)": 0.672282 }, { "epoch": 2.5954329291804124, "grad_norm": 4.277731418609619, "learning_rate": 4.7035927870893806e-05, "loss": 2.2063846588134766, "memory(GiB)": 72.85, "step": 60580, "token_acc": 0.5, "train_speed(iter/s)": 0.672285 }, { "epoch": 2.595647144509661, "grad_norm": 6.434327602386475, "learning_rate": 4.702920996030875e-05, "loss": 2.2124923706054687, "memory(GiB)": 72.85, "step": 60585, "token_acc": 0.5035714285714286, "train_speed(iter/s)": 0.672292 }, { "epoch": 2.59586135983891, "grad_norm": 5.729632377624512, "learning_rate": 4.7022492103542335e-05, "loss": 2.2010980606079102, "memory(GiB)": 72.85, "step": 60590, "token_acc": 0.5070422535211268, "train_speed(iter/s)": 0.672277 }, { "epoch": 2.5960755751681592, "grad_norm": 5.719350814819336, "learning_rate": 4.701577430071629e-05, "loss": 2.3175785064697267, "memory(GiB)": 72.85, "step": 60595, "token_acc": 0.4837758112094395, "train_speed(iter/s)": 0.672273 }, { "epoch": 2.596289790497408, "grad_norm": 5.578087329864502, "learning_rate": 4.700905655195228e-05, "loss": 2.186376190185547, "memory(GiB)": 72.85, "step": 60600, "token_acc": 0.5461847389558233, "train_speed(iter/s)": 0.672261 }, { "epoch": 2.596504005826657, "grad_norm": 4.24132776260376, "learning_rate": 4.7002338857372e-05, "loss": 2.204166221618652, "memory(GiB)": 72.85, "step": 60605, "token_acc": 0.49328859060402686, "train_speed(iter/s)": 0.672268 }, { "epoch": 2.596718221155906, "grad_norm": 3.891177177429199, "learning_rate": 4.6995621217097186e-05, "loss": 2.375472831726074, "memory(GiB)": 72.85, "step": 60610, "token_acc": 0.48120300751879697, "train_speed(iter/s)": 0.672277 }, { "epoch": 2.596932436485155, "grad_norm": 4.430634498596191, "learning_rate": 4.6988903631249496e-05, "loss": 2.114141273498535, "memory(GiB)": 72.85, "step": 60615, "token_acc": 0.4897959183673469, "train_speed(iter/s)": 0.672275 }, { "epoch": 2.5971466518144037, "grad_norm": 5.377476215362549, "learning_rate": 4.698218609995066e-05, "loss": 2.269771194458008, "memory(GiB)": 72.85, "step": 60620, "token_acc": 0.49295774647887325, "train_speed(iter/s)": 0.672273 }, { "epoch": 2.597360867143653, "grad_norm": 7.274384498596191, "learning_rate": 4.697546862332232e-05, "loss": 2.337876892089844, "memory(GiB)": 72.85, "step": 60625, "token_acc": 0.521613832853026, "train_speed(iter/s)": 0.672266 }, { "epoch": 2.597575082472902, "grad_norm": 5.2998151779174805, "learning_rate": 4.696875120148623e-05, "loss": 2.391061782836914, "memory(GiB)": 72.85, "step": 60630, "token_acc": 0.4735099337748344, "train_speed(iter/s)": 0.672266 }, { "epoch": 2.5977892978021506, "grad_norm": 4.299990653991699, "learning_rate": 4.696203383456405e-05, "loss": 1.9558557510375976, "memory(GiB)": 72.85, "step": 60635, "token_acc": 0.5220883534136547, "train_speed(iter/s)": 0.672275 }, { "epoch": 2.5980035131314, "grad_norm": 4.216680526733398, "learning_rate": 4.695531652267749e-05, "loss": 1.9745647430419921, "memory(GiB)": 72.85, "step": 60640, "token_acc": 0.5644599303135889, "train_speed(iter/s)": 0.672277 }, { "epoch": 2.5982177284606487, "grad_norm": 4.750105857849121, "learning_rate": 4.694859926594821e-05, "loss": 2.2208961486816405, "memory(GiB)": 72.85, "step": 60645, "token_acc": 0.5048231511254019, "train_speed(iter/s)": 0.672266 }, { "epoch": 2.5984319437898975, "grad_norm": 5.382213115692139, "learning_rate": 4.694188206449792e-05, "loss": 2.217156982421875, "memory(GiB)": 72.85, "step": 60650, "token_acc": 0.5084175084175084, "train_speed(iter/s)": 0.672271 }, { "epoch": 2.5986461591191468, "grad_norm": 5.62556791305542, "learning_rate": 4.693516491844831e-05, "loss": 2.1626041412353514, "memory(GiB)": 72.85, "step": 60655, "token_acc": 0.5647058823529412, "train_speed(iter/s)": 0.67228 }, { "epoch": 2.5988603744483956, "grad_norm": 5.042566299438477, "learning_rate": 4.692844782792107e-05, "loss": 2.340786361694336, "memory(GiB)": 72.85, "step": 60660, "token_acc": 0.4735099337748344, "train_speed(iter/s)": 0.672271 }, { "epoch": 2.5990745897776444, "grad_norm": 6.698353290557861, "learning_rate": 4.692173079303786e-05, "loss": 2.1716472625732424, "memory(GiB)": 72.85, "step": 60665, "token_acc": 0.5259515570934256, "train_speed(iter/s)": 0.672267 }, { "epoch": 2.5992888051068936, "grad_norm": 3.6738901138305664, "learning_rate": 4.69150138139204e-05, "loss": 2.2655139923095704, "memory(GiB)": 72.85, "step": 60670, "token_acc": 0.5338078291814946, "train_speed(iter/s)": 0.672284 }, { "epoch": 2.5995030204361425, "grad_norm": 4.924567699432373, "learning_rate": 4.690829689069035e-05, "loss": 2.3039342880249025, "memory(GiB)": 72.85, "step": 60675, "token_acc": 0.5323741007194245, "train_speed(iter/s)": 0.672296 }, { "epoch": 2.5997172357653913, "grad_norm": 4.74547815322876, "learning_rate": 4.69015800234694e-05, "loss": 2.1909255981445312, "memory(GiB)": 72.85, "step": 60680, "token_acc": 0.5171232876712328, "train_speed(iter/s)": 0.672297 }, { "epoch": 2.5999314510946405, "grad_norm": 4.9845757484436035, "learning_rate": 4.689486321237925e-05, "loss": 2.182925796508789, "memory(GiB)": 72.85, "step": 60685, "token_acc": 0.5104166666666666, "train_speed(iter/s)": 0.672306 }, { "epoch": 2.6001456664238893, "grad_norm": 5.600738525390625, "learning_rate": 4.688814645754155e-05, "loss": 2.3690210342407227, "memory(GiB)": 72.85, "step": 60690, "token_acc": 0.5526315789473685, "train_speed(iter/s)": 0.672306 }, { "epoch": 2.600359881753138, "grad_norm": 4.731483459472656, "learning_rate": 4.6881429759078e-05, "loss": 2.434212875366211, "memory(GiB)": 72.85, "step": 60695, "token_acc": 0.5047619047619047, "train_speed(iter/s)": 0.672314 }, { "epoch": 2.6005740970823874, "grad_norm": 4.324174880981445, "learning_rate": 4.687471311711026e-05, "loss": 2.124232864379883, "memory(GiB)": 72.85, "step": 60700, "token_acc": 0.5128205128205128, "train_speed(iter/s)": 0.672319 }, { "epoch": 2.600788312411636, "grad_norm": 4.092148303985596, "learning_rate": 4.686799653176005e-05, "loss": 2.2136512756347657, "memory(GiB)": 72.85, "step": 60705, "token_acc": 0.48823529411764705, "train_speed(iter/s)": 0.67231 }, { "epoch": 2.601002527740885, "grad_norm": 6.506377220153809, "learning_rate": 4.6861280003149e-05, "loss": 2.233510208129883, "memory(GiB)": 72.85, "step": 60710, "token_acc": 0.5168918918918919, "train_speed(iter/s)": 0.672308 }, { "epoch": 2.6012167430701343, "grad_norm": 5.344817638397217, "learning_rate": 4.685456353139883e-05, "loss": 1.9652799606323241, "memory(GiB)": 72.85, "step": 60715, "token_acc": 0.5875486381322957, "train_speed(iter/s)": 0.672294 }, { "epoch": 2.601430958399383, "grad_norm": 8.165572166442871, "learning_rate": 4.684784711663117e-05, "loss": 2.42012882232666, "memory(GiB)": 72.85, "step": 60720, "token_acc": 0.4864864864864865, "train_speed(iter/s)": 0.672283 }, { "epoch": 2.601645173728632, "grad_norm": 6.493742942810059, "learning_rate": 4.6841130758967735e-05, "loss": 2.2275217056274412, "memory(GiB)": 72.85, "step": 60725, "token_acc": 0.5207100591715976, "train_speed(iter/s)": 0.672299 }, { "epoch": 2.601859389057881, "grad_norm": 4.345998287200928, "learning_rate": 4.683441445853017e-05, "loss": 2.072841262817383, "memory(GiB)": 72.85, "step": 60730, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.672306 }, { "epoch": 2.60207360438713, "grad_norm": 4.521036148071289, "learning_rate": 4.6827698215440154e-05, "loss": 2.3854475021362305, "memory(GiB)": 72.85, "step": 60735, "token_acc": 0.48188405797101447, "train_speed(iter/s)": 0.672302 }, { "epoch": 2.602287819716379, "grad_norm": 5.427064895629883, "learning_rate": 4.682098202981936e-05, "loss": 2.2050472259521485, "memory(GiB)": 72.85, "step": 60740, "token_acc": 0.5381818181818182, "train_speed(iter/s)": 0.672303 }, { "epoch": 2.602502035045628, "grad_norm": 4.433483123779297, "learning_rate": 4.681426590178948e-05, "loss": 2.2362716674804686, "memory(GiB)": 72.85, "step": 60745, "token_acc": 0.5, "train_speed(iter/s)": 0.672305 }, { "epoch": 2.602716250374877, "grad_norm": 5.8916916847229, "learning_rate": 4.6807549831472135e-05, "loss": 2.5966604232788084, "memory(GiB)": 72.85, "step": 60750, "token_acc": 0.4542372881355932, "train_speed(iter/s)": 0.672314 }, { "epoch": 2.6029304657041257, "grad_norm": 4.748818874359131, "learning_rate": 4.680083381898903e-05, "loss": 2.077742004394531, "memory(GiB)": 72.85, "step": 60755, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.672314 }, { "epoch": 2.603144681033375, "grad_norm": 3.943986177444458, "learning_rate": 4.6794117864461815e-05, "loss": 2.128396987915039, "memory(GiB)": 72.85, "step": 60760, "token_acc": 0.5392857142857143, "train_speed(iter/s)": 0.672313 }, { "epoch": 2.6033588963626237, "grad_norm": 5.504417896270752, "learning_rate": 4.678740196801217e-05, "loss": 2.3211219787597654, "memory(GiB)": 72.85, "step": 60765, "token_acc": 0.49085365853658536, "train_speed(iter/s)": 0.672321 }, { "epoch": 2.6035731116918726, "grad_norm": 4.396368026733398, "learning_rate": 4.678068612976176e-05, "loss": 2.6055952072143556, "memory(GiB)": 72.85, "step": 60770, "token_acc": 0.46831955922865015, "train_speed(iter/s)": 0.672325 }, { "epoch": 2.603787327021122, "grad_norm": 5.4912943840026855, "learning_rate": 4.677397034983221e-05, "loss": 2.0823678970336914, "memory(GiB)": 72.85, "step": 60775, "token_acc": 0.5391304347826087, "train_speed(iter/s)": 0.672323 }, { "epoch": 2.6040015423503706, "grad_norm": 5.84462833404541, "learning_rate": 4.676725462834523e-05, "loss": 2.285075569152832, "memory(GiB)": 72.85, "step": 60780, "token_acc": 0.5206896551724138, "train_speed(iter/s)": 0.67232 }, { "epoch": 2.6042157576796194, "grad_norm": 6.0485639572143555, "learning_rate": 4.6760538965422476e-05, "loss": 2.5271127700805662, "memory(GiB)": 72.85, "step": 60785, "token_acc": 0.4930555555555556, "train_speed(iter/s)": 0.672321 }, { "epoch": 2.6044299730088687, "grad_norm": 5.982337474822998, "learning_rate": 4.675382336118559e-05, "loss": 2.3449121475219727, "memory(GiB)": 72.85, "step": 60790, "token_acc": 0.4892703862660944, "train_speed(iter/s)": 0.672318 }, { "epoch": 2.6046441883381175, "grad_norm": 5.788033962249756, "learning_rate": 4.674710781575623e-05, "loss": 1.9687110900878906, "memory(GiB)": 72.85, "step": 60795, "token_acc": 0.5563139931740614, "train_speed(iter/s)": 0.67231 }, { "epoch": 2.6048584036673663, "grad_norm": 6.263704299926758, "learning_rate": 4.674039232925609e-05, "loss": 2.4817434310913087, "memory(GiB)": 72.85, "step": 60800, "token_acc": 0.4280701754385965, "train_speed(iter/s)": 0.67231 }, { "epoch": 2.6050726189966156, "grad_norm": 4.802700996398926, "learning_rate": 4.6733676901806784e-05, "loss": 2.497044563293457, "memory(GiB)": 72.85, "step": 60805, "token_acc": 0.5053003533568905, "train_speed(iter/s)": 0.672313 }, { "epoch": 2.6052868343258644, "grad_norm": 5.589223384857178, "learning_rate": 4.6726961533529975e-05, "loss": 2.523727035522461, "memory(GiB)": 72.85, "step": 60810, "token_acc": 0.5222222222222223, "train_speed(iter/s)": 0.672315 }, { "epoch": 2.605501049655113, "grad_norm": 5.331908226013184, "learning_rate": 4.672024622454735e-05, "loss": 2.274575424194336, "memory(GiB)": 72.85, "step": 60815, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.672314 }, { "epoch": 2.6057152649843625, "grad_norm": 7.181542873382568, "learning_rate": 4.671353097498052e-05, "loss": 2.1347623825073243, "memory(GiB)": 72.85, "step": 60820, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.672314 }, { "epoch": 2.6059294803136113, "grad_norm": 4.3653364181518555, "learning_rate": 4.670681578495118e-05, "loss": 2.0438356399536133, "memory(GiB)": 72.85, "step": 60825, "token_acc": 0.5147058823529411, "train_speed(iter/s)": 0.672298 }, { "epoch": 2.60614369564286, "grad_norm": 3.9577112197875977, "learning_rate": 4.6700100654580946e-05, "loss": 2.448482322692871, "memory(GiB)": 72.85, "step": 60830, "token_acc": 0.5124223602484472, "train_speed(iter/s)": 0.672293 }, { "epoch": 2.6063579109721093, "grad_norm": 5.474624156951904, "learning_rate": 4.669338558399148e-05, "loss": 2.0928510665893554, "memory(GiB)": 72.85, "step": 60835, "token_acc": 0.5201342281879194, "train_speed(iter/s)": 0.672284 }, { "epoch": 2.606572126301358, "grad_norm": 4.167236804962158, "learning_rate": 4.668667057330445e-05, "loss": 2.1416748046875, "memory(GiB)": 72.85, "step": 60840, "token_acc": 0.5276872964169381, "train_speed(iter/s)": 0.672279 }, { "epoch": 2.606786341630607, "grad_norm": 5.392500400543213, "learning_rate": 4.667995562264149e-05, "loss": 2.184613800048828, "memory(GiB)": 72.85, "step": 60845, "token_acc": 0.5314685314685315, "train_speed(iter/s)": 0.67228 }, { "epoch": 2.607000556959856, "grad_norm": 4.14462423324585, "learning_rate": 4.667324073212422e-05, "loss": 1.8063724517822266, "memory(GiB)": 72.85, "step": 60850, "token_acc": 0.5894039735099338, "train_speed(iter/s)": 0.67229 }, { "epoch": 2.607214772289105, "grad_norm": 6.01522970199585, "learning_rate": 4.666652590187434e-05, "loss": 2.290627288818359, "memory(GiB)": 72.85, "step": 60855, "token_acc": 0.4812286689419795, "train_speed(iter/s)": 0.672304 }, { "epoch": 2.607428987618354, "grad_norm": 4.329615592956543, "learning_rate": 4.665981113201347e-05, "loss": 2.3119684219360352, "memory(GiB)": 72.85, "step": 60860, "token_acc": 0.4853801169590643, "train_speed(iter/s)": 0.672302 }, { "epoch": 2.607643202947603, "grad_norm": 6.676603317260742, "learning_rate": 4.6653096422663245e-05, "loss": 2.3325883865356447, "memory(GiB)": 72.85, "step": 60865, "token_acc": 0.5359712230215827, "train_speed(iter/s)": 0.672304 }, { "epoch": 2.607857418276852, "grad_norm": 5.6893792152404785, "learning_rate": 4.6646381773945336e-05, "loss": 2.1723533630371095, "memory(GiB)": 72.85, "step": 60870, "token_acc": 0.5387096774193548, "train_speed(iter/s)": 0.672315 }, { "epoch": 2.6080716336061007, "grad_norm": 5.461493968963623, "learning_rate": 4.663966718598136e-05, "loss": 2.474455642700195, "memory(GiB)": 72.85, "step": 60875, "token_acc": 0.4965753424657534, "train_speed(iter/s)": 0.672311 }, { "epoch": 2.60828584893535, "grad_norm": 4.889886856079102, "learning_rate": 4.663295265889295e-05, "loss": 2.3184123992919923, "memory(GiB)": 72.85, "step": 60880, "token_acc": 0.5287769784172662, "train_speed(iter/s)": 0.672318 }, { "epoch": 2.608500064264599, "grad_norm": 4.5687761306762695, "learning_rate": 4.662623819280178e-05, "loss": 2.283460998535156, "memory(GiB)": 72.85, "step": 60885, "token_acc": 0.5092592592592593, "train_speed(iter/s)": 0.672303 }, { "epoch": 2.6087142795938476, "grad_norm": 5.4298834800720215, "learning_rate": 4.661952378782947e-05, "loss": 2.234957695007324, "memory(GiB)": 72.85, "step": 60890, "token_acc": 0.5092250922509225, "train_speed(iter/s)": 0.672307 }, { "epoch": 2.608928494923097, "grad_norm": 8.001158714294434, "learning_rate": 4.6612809444097646e-05, "loss": 2.6049049377441404, "memory(GiB)": 72.85, "step": 60895, "token_acc": 0.4594594594594595, "train_speed(iter/s)": 0.672311 }, { "epoch": 2.6091427102523457, "grad_norm": 3.5366196632385254, "learning_rate": 4.660609516172797e-05, "loss": 2.1799432754516603, "memory(GiB)": 72.85, "step": 60900, "token_acc": 0.50187265917603, "train_speed(iter/s)": 0.672313 }, { "epoch": 2.6093569255815945, "grad_norm": 4.221365928649902, "learning_rate": 4.659938094084205e-05, "loss": 2.3516513824462892, "memory(GiB)": 72.85, "step": 60905, "token_acc": 0.5258358662613982, "train_speed(iter/s)": 0.672312 }, { "epoch": 2.6095711409108437, "grad_norm": 4.9613213539123535, "learning_rate": 4.659266678156154e-05, "loss": 2.5165918350219725, "memory(GiB)": 72.85, "step": 60910, "token_acc": 0.4735202492211838, "train_speed(iter/s)": 0.672314 }, { "epoch": 2.6097853562400926, "grad_norm": 4.368021488189697, "learning_rate": 4.658595268400807e-05, "loss": 2.3041446685791014, "memory(GiB)": 72.85, "step": 60915, "token_acc": 0.5038461538461538, "train_speed(iter/s)": 0.672318 }, { "epoch": 2.6099995715693414, "grad_norm": 4.564587116241455, "learning_rate": 4.657923864830325e-05, "loss": 2.2412647247314452, "memory(GiB)": 72.85, "step": 60920, "token_acc": 0.5152542372881356, "train_speed(iter/s)": 0.672314 }, { "epoch": 2.6102137868985906, "grad_norm": 4.24306583404541, "learning_rate": 4.657252467456874e-05, "loss": 2.2534612655639648, "memory(GiB)": 72.85, "step": 60925, "token_acc": 0.4726027397260274, "train_speed(iter/s)": 0.672315 }, { "epoch": 2.6104280022278394, "grad_norm": 4.316865921020508, "learning_rate": 4.6565810762926175e-05, "loss": 2.063827896118164, "memory(GiB)": 72.85, "step": 60930, "token_acc": 0.5107913669064749, "train_speed(iter/s)": 0.672332 }, { "epoch": 2.6106422175570883, "grad_norm": 6.1367011070251465, "learning_rate": 4.655909691349717e-05, "loss": 2.3010076522827148, "memory(GiB)": 72.85, "step": 60935, "token_acc": 0.525691699604743, "train_speed(iter/s)": 0.672332 }, { "epoch": 2.6108564328863375, "grad_norm": 5.636641025543213, "learning_rate": 4.6552383126403335e-05, "loss": 2.376104164123535, "memory(GiB)": 72.85, "step": 60940, "token_acc": 0.46511627906976744, "train_speed(iter/s)": 0.67234 }, { "epoch": 2.6110706482155863, "grad_norm": 4.206455707550049, "learning_rate": 4.6545669401766334e-05, "loss": 2.1005886077880858, "memory(GiB)": 72.85, "step": 60945, "token_acc": 0.5521885521885522, "train_speed(iter/s)": 0.672338 }, { "epoch": 2.611284863544835, "grad_norm": 4.474532604217529, "learning_rate": 4.653895573970777e-05, "loss": 2.305676078796387, "memory(GiB)": 72.85, "step": 60950, "token_acc": 0.5234657039711191, "train_speed(iter/s)": 0.672338 }, { "epoch": 2.6114990788740844, "grad_norm": 4.067724704742432, "learning_rate": 4.653224214034925e-05, "loss": 2.2436174392700194, "memory(GiB)": 72.85, "step": 60955, "token_acc": 0.511864406779661, "train_speed(iter/s)": 0.672342 }, { "epoch": 2.611713294203333, "grad_norm": 5.519034385681152, "learning_rate": 4.6525528603812434e-05, "loss": 2.2002668380737305, "memory(GiB)": 72.85, "step": 60960, "token_acc": 0.51, "train_speed(iter/s)": 0.672354 }, { "epoch": 2.611927509532582, "grad_norm": 5.863396644592285, "learning_rate": 4.6518815130218915e-05, "loss": 2.4144243240356444, "memory(GiB)": 72.85, "step": 60965, "token_acc": 0.4928571428571429, "train_speed(iter/s)": 0.672355 }, { "epoch": 2.6121417248618313, "grad_norm": 4.713258743286133, "learning_rate": 4.651210171969034e-05, "loss": 2.391593170166016, "memory(GiB)": 72.85, "step": 60970, "token_acc": 0.4688427299703264, "train_speed(iter/s)": 0.672352 }, { "epoch": 2.61235594019108, "grad_norm": 5.026242733001709, "learning_rate": 4.650538837234831e-05, "loss": 1.984634780883789, "memory(GiB)": 72.85, "step": 60975, "token_acc": 0.5714285714285714, "train_speed(iter/s)": 0.672346 }, { "epoch": 2.612570155520329, "grad_norm": 4.787406921386719, "learning_rate": 4.6498675088314434e-05, "loss": 2.144248390197754, "memory(GiB)": 72.85, "step": 60980, "token_acc": 0.5207547169811321, "train_speed(iter/s)": 0.672359 }, { "epoch": 2.612784370849578, "grad_norm": 4.696533203125, "learning_rate": 4.649196186771036e-05, "loss": 2.192066955566406, "memory(GiB)": 72.85, "step": 60985, "token_acc": 0.5245901639344263, "train_speed(iter/s)": 0.672361 }, { "epoch": 2.612998586178827, "grad_norm": 5.679023742675781, "learning_rate": 4.6485248710657686e-05, "loss": 2.070036697387695, "memory(GiB)": 72.85, "step": 60990, "token_acc": 0.5130718954248366, "train_speed(iter/s)": 0.672374 }, { "epoch": 2.6132128015080758, "grad_norm": 6.091338634490967, "learning_rate": 4.6478535617278e-05, "loss": 1.972098159790039, "memory(GiB)": 72.85, "step": 60995, "token_acc": 0.5367965367965368, "train_speed(iter/s)": 0.67238 }, { "epoch": 2.613427016837325, "grad_norm": 5.080183982849121, "learning_rate": 4.6471822587692965e-05, "loss": 2.063210296630859, "memory(GiB)": 72.85, "step": 61000, "token_acc": 0.5379061371841155, "train_speed(iter/s)": 0.672392 }, { "epoch": 2.613427016837325, "eval_loss": 2.1216864585876465, "eval_runtime": 15.5499, "eval_samples_per_second": 6.431, "eval_steps_per_second": 6.431, "eval_token_acc": 0.513677811550152, "step": 61000 }, { "epoch": 2.613641232166574, "grad_norm": 5.738321781158447, "learning_rate": 4.646510962202418e-05, "loss": 2.3285226821899414, "memory(GiB)": 72.85, "step": 61005, "token_acc": 0.5172774869109947, "train_speed(iter/s)": 0.672256 }, { "epoch": 2.6138554474958227, "grad_norm": 6.72853422164917, "learning_rate": 4.6458396720393246e-05, "loss": 2.2979469299316406, "memory(GiB)": 72.85, "step": 61010, "token_acc": 0.5171102661596958, "train_speed(iter/s)": 0.672261 }, { "epoch": 2.614069662825072, "grad_norm": 6.0330400466918945, "learning_rate": 4.645168388292178e-05, "loss": 2.436148262023926, "memory(GiB)": 72.85, "step": 61015, "token_acc": 0.5057034220532319, "train_speed(iter/s)": 0.672266 }, { "epoch": 2.6142838781543207, "grad_norm": 4.061287879943848, "learning_rate": 4.644497110973139e-05, "loss": 2.184595489501953, "memory(GiB)": 72.85, "step": 61020, "token_acc": 0.5247813411078717, "train_speed(iter/s)": 0.672278 }, { "epoch": 2.6144980934835695, "grad_norm": 4.536505222320557, "learning_rate": 4.643825840094368e-05, "loss": 2.3850563049316404, "memory(GiB)": 72.85, "step": 61025, "token_acc": 0.5, "train_speed(iter/s)": 0.672271 }, { "epoch": 2.614712308812819, "grad_norm": 6.186124324798584, "learning_rate": 4.643154575668026e-05, "loss": 2.623118782043457, "memory(GiB)": 72.85, "step": 61030, "token_acc": 0.45112781954887216, "train_speed(iter/s)": 0.672263 }, { "epoch": 2.6149265241420676, "grad_norm": 5.1305131912231445, "learning_rate": 4.6424833177062744e-05, "loss": 2.3135467529296876, "memory(GiB)": 72.85, "step": 61035, "token_acc": 0.490272373540856, "train_speed(iter/s)": 0.672256 }, { "epoch": 2.6151407394713164, "grad_norm": 3.8094491958618164, "learning_rate": 4.641812066221271e-05, "loss": 2.3316371917724608, "memory(GiB)": 72.85, "step": 61040, "token_acc": 0.5019305019305019, "train_speed(iter/s)": 0.672251 }, { "epoch": 2.6153549548005657, "grad_norm": 3.8336856365203857, "learning_rate": 4.64114082122518e-05, "loss": 1.9514785766601563, "memory(GiB)": 72.85, "step": 61045, "token_acc": 0.5584415584415584, "train_speed(iter/s)": 0.672257 }, { "epoch": 2.6155691701298145, "grad_norm": 4.382042407989502, "learning_rate": 4.6404695827301585e-05, "loss": 2.1314550399780274, "memory(GiB)": 72.85, "step": 61050, "token_acc": 0.5514705882352942, "train_speed(iter/s)": 0.672265 }, { "epoch": 2.6157833854590633, "grad_norm": 4.2343058586120605, "learning_rate": 4.639798350748369e-05, "loss": 2.1655601501464843, "memory(GiB)": 72.85, "step": 61055, "token_acc": 0.5017182130584192, "train_speed(iter/s)": 0.672251 }, { "epoch": 2.6159976007883126, "grad_norm": 5.10247802734375, "learning_rate": 4.639127125291969e-05, "loss": 2.1676956176757813, "memory(GiB)": 72.85, "step": 61060, "token_acc": 0.5272727272727272, "train_speed(iter/s)": 0.672271 }, { "epoch": 2.6162118161175614, "grad_norm": 4.102449893951416, "learning_rate": 4.63845590637312e-05, "loss": 2.396062660217285, "memory(GiB)": 72.85, "step": 61065, "token_acc": 0.48615384615384616, "train_speed(iter/s)": 0.672259 }, { "epoch": 2.61642603144681, "grad_norm": 4.694916248321533, "learning_rate": 4.63778469400398e-05, "loss": 2.1628557205200196, "memory(GiB)": 72.85, "step": 61070, "token_acc": 0.5275862068965518, "train_speed(iter/s)": 0.672251 }, { "epoch": 2.6166402467760594, "grad_norm": 4.368826389312744, "learning_rate": 4.6371134881967114e-05, "loss": 2.3553632736206054, "memory(GiB)": 72.85, "step": 61075, "token_acc": 0.5121212121212121, "train_speed(iter/s)": 0.672254 }, { "epoch": 2.6168544621053083, "grad_norm": 5.785443305969238, "learning_rate": 4.636442288963473e-05, "loss": 2.083486557006836, "memory(GiB)": 72.85, "step": 61080, "token_acc": 0.5344827586206896, "train_speed(iter/s)": 0.672254 }, { "epoch": 2.617068677434557, "grad_norm": 5.189540863037109, "learning_rate": 4.635771096316423e-05, "loss": 2.3271146774291993, "memory(GiB)": 72.85, "step": 61085, "token_acc": 0.4968152866242038, "train_speed(iter/s)": 0.672273 }, { "epoch": 2.6172828927638063, "grad_norm": 5.62531852722168, "learning_rate": 4.635099910267722e-05, "loss": 2.3528764724731444, "memory(GiB)": 72.85, "step": 61090, "token_acc": 0.5034722222222222, "train_speed(iter/s)": 0.67228 }, { "epoch": 2.617497108093055, "grad_norm": 5.642643451690674, "learning_rate": 4.634428730829528e-05, "loss": 2.3344940185546874, "memory(GiB)": 72.85, "step": 61095, "token_acc": 0.49514563106796117, "train_speed(iter/s)": 0.672282 }, { "epoch": 2.617711323422304, "grad_norm": 4.422712802886963, "learning_rate": 4.6337575580139994e-05, "loss": 2.2899215698242186, "memory(GiB)": 72.85, "step": 61100, "token_acc": 0.5087108013937283, "train_speed(iter/s)": 0.672283 }, { "epoch": 2.617925538751553, "grad_norm": 5.458566188812256, "learning_rate": 4.633086391833298e-05, "loss": 2.2482662200927734, "memory(GiB)": 72.85, "step": 61105, "token_acc": 0.5087719298245614, "train_speed(iter/s)": 0.672281 }, { "epoch": 2.618139754080802, "grad_norm": 3.9724018573760986, "learning_rate": 4.632415232299579e-05, "loss": 2.4559574127197266, "memory(GiB)": 72.85, "step": 61110, "token_acc": 0.5068493150684932, "train_speed(iter/s)": 0.672275 }, { "epoch": 2.618353969410051, "grad_norm": 4.459893226623535, "learning_rate": 4.631744079425004e-05, "loss": 2.0254589080810548, "memory(GiB)": 72.85, "step": 61115, "token_acc": 0.611336032388664, "train_speed(iter/s)": 0.672269 }, { "epoch": 2.6185681847393, "grad_norm": 6.022346019744873, "learning_rate": 4.631072933221731e-05, "loss": 2.3523832321166993, "memory(GiB)": 72.85, "step": 61120, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.672275 }, { "epoch": 2.618782400068549, "grad_norm": 3.597762107849121, "learning_rate": 4.630401793701916e-05, "loss": 1.9987375259399414, "memory(GiB)": 72.85, "step": 61125, "token_acc": 0.5415282392026578, "train_speed(iter/s)": 0.672272 }, { "epoch": 2.6189966153977977, "grad_norm": 4.369085311889648, "learning_rate": 4.6297306608777214e-05, "loss": 2.212189865112305, "memory(GiB)": 72.85, "step": 61130, "token_acc": 0.4948805460750853, "train_speed(iter/s)": 0.672271 }, { "epoch": 2.619210830727047, "grad_norm": 6.148866176605225, "learning_rate": 4.6290595347613024e-05, "loss": 1.969095230102539, "memory(GiB)": 72.85, "step": 61135, "token_acc": 0.5720524017467249, "train_speed(iter/s)": 0.672259 }, { "epoch": 2.619425046056296, "grad_norm": 5.172264575958252, "learning_rate": 4.6283884153648174e-05, "loss": 2.191398048400879, "memory(GiB)": 72.85, "step": 61140, "token_acc": 0.4935483870967742, "train_speed(iter/s)": 0.672251 }, { "epoch": 2.6196392613855446, "grad_norm": 3.95627760887146, "learning_rate": 4.627717302700423e-05, "loss": 2.435214614868164, "memory(GiB)": 72.85, "step": 61145, "token_acc": 0.4984709480122324, "train_speed(iter/s)": 0.672251 }, { "epoch": 2.619853476714794, "grad_norm": 5.096535682678223, "learning_rate": 4.6270461967802814e-05, "loss": 2.392613410949707, "memory(GiB)": 72.85, "step": 61150, "token_acc": 0.4927536231884058, "train_speed(iter/s)": 0.672237 }, { "epoch": 2.6200676920440427, "grad_norm": 4.883724689483643, "learning_rate": 4.6263750976165464e-05, "loss": 2.344967269897461, "memory(GiB)": 72.85, "step": 61155, "token_acc": 0.5205479452054794, "train_speed(iter/s)": 0.672239 }, { "epoch": 2.6202819073732915, "grad_norm": 4.584829330444336, "learning_rate": 4.625704005221379e-05, "loss": 2.2121572494506836, "memory(GiB)": 72.85, "step": 61160, "token_acc": 0.515358361774744, "train_speed(iter/s)": 0.672241 }, { "epoch": 2.6204961227025407, "grad_norm": 6.7488813400268555, "learning_rate": 4.625032919606934e-05, "loss": 2.2852245330810548, "memory(GiB)": 72.85, "step": 61165, "token_acc": 0.5152439024390244, "train_speed(iter/s)": 0.672247 }, { "epoch": 2.6207103380317895, "grad_norm": 5.155365467071533, "learning_rate": 4.6243618407853684e-05, "loss": 1.9505228042602538, "memory(GiB)": 72.85, "step": 61170, "token_acc": 0.6024590163934426, "train_speed(iter/s)": 0.672254 }, { "epoch": 2.6209245533610384, "grad_norm": 5.753386974334717, "learning_rate": 4.6236907687688415e-05, "loss": 2.046046829223633, "memory(GiB)": 72.85, "step": 61175, "token_acc": 0.5461254612546126, "train_speed(iter/s)": 0.672258 }, { "epoch": 2.6211387686902876, "grad_norm": 5.663280487060547, "learning_rate": 4.6230197035695095e-05, "loss": 2.5902406692504885, "memory(GiB)": 72.85, "step": 61180, "token_acc": 0.44086021505376344, "train_speed(iter/s)": 0.67226 }, { "epoch": 2.6213529840195364, "grad_norm": 5.379512310028076, "learning_rate": 4.622348645199528e-05, "loss": 2.1675865173339846, "memory(GiB)": 72.85, "step": 61185, "token_acc": 0.5092936802973977, "train_speed(iter/s)": 0.672262 }, { "epoch": 2.6215671993487852, "grad_norm": 4.799404621124268, "learning_rate": 4.621677593671056e-05, "loss": 2.1895103454589844, "memory(GiB)": 72.85, "step": 61190, "token_acc": 0.5109034267912772, "train_speed(iter/s)": 0.67227 }, { "epoch": 2.6217814146780345, "grad_norm": 4.507132053375244, "learning_rate": 4.6210065489962486e-05, "loss": 2.453957748413086, "memory(GiB)": 72.85, "step": 61195, "token_acc": 0.476038338658147, "train_speed(iter/s)": 0.672285 }, { "epoch": 2.6219956300072833, "grad_norm": 4.6098737716674805, "learning_rate": 4.620335511187264e-05, "loss": 1.9892715454101562, "memory(GiB)": 72.85, "step": 61200, "token_acc": 0.5370370370370371, "train_speed(iter/s)": 0.672277 }, { "epoch": 2.622209845336532, "grad_norm": 6.712203025817871, "learning_rate": 4.619664480256258e-05, "loss": 2.0411565780639647, "memory(GiB)": 72.85, "step": 61205, "token_acc": 0.5458015267175572, "train_speed(iter/s)": 0.672274 }, { "epoch": 2.6224240606657814, "grad_norm": 5.045285224914551, "learning_rate": 4.618993456215385e-05, "loss": 2.303826904296875, "memory(GiB)": 72.85, "step": 61210, "token_acc": 0.5, "train_speed(iter/s)": 0.672267 }, { "epoch": 2.62263827599503, "grad_norm": 7.35578727722168, "learning_rate": 4.618322439076802e-05, "loss": 2.220542144775391, "memory(GiB)": 72.85, "step": 61215, "token_acc": 0.5157894736842106, "train_speed(iter/s)": 0.672271 }, { "epoch": 2.622852491324279, "grad_norm": 4.420370578765869, "learning_rate": 4.617651428852668e-05, "loss": 1.9041419982910157, "memory(GiB)": 72.85, "step": 61220, "token_acc": 0.5775862068965517, "train_speed(iter/s)": 0.672275 }, { "epoch": 2.6230667066535283, "grad_norm": 4.764657497406006, "learning_rate": 4.616980425555138e-05, "loss": 2.4043289184570313, "memory(GiB)": 72.85, "step": 61225, "token_acc": 0.5050847457627119, "train_speed(iter/s)": 0.672272 }, { "epoch": 2.623280921982777, "grad_norm": 5.096610069274902, "learning_rate": 4.616309429196365e-05, "loss": 2.6054176330566405, "memory(GiB)": 72.85, "step": 61230, "token_acc": 0.4837662337662338, "train_speed(iter/s)": 0.67227 }, { "epoch": 2.623495137312026, "grad_norm": 3.9845235347747803, "learning_rate": 4.615638439788508e-05, "loss": 2.160262680053711, "memory(GiB)": 72.85, "step": 61235, "token_acc": 0.5047021943573667, "train_speed(iter/s)": 0.67228 }, { "epoch": 2.623709352641275, "grad_norm": 5.22115421295166, "learning_rate": 4.614967457343721e-05, "loss": 1.9878759384155273, "memory(GiB)": 72.85, "step": 61240, "token_acc": 0.5381818181818182, "train_speed(iter/s)": 0.672282 }, { "epoch": 2.623923567970524, "grad_norm": 4.376987934112549, "learning_rate": 4.614296481874159e-05, "loss": 2.3709009170532225, "memory(GiB)": 72.85, "step": 61245, "token_acc": 0.5030120481927711, "train_speed(iter/s)": 0.67229 }, { "epoch": 2.6241377832997728, "grad_norm": 4.019251346588135, "learning_rate": 4.613625513391979e-05, "loss": 2.418350601196289, "memory(GiB)": 72.85, "step": 61250, "token_acc": 0.5071428571428571, "train_speed(iter/s)": 0.672298 }, { "epoch": 2.624351998629022, "grad_norm": 4.268087863922119, "learning_rate": 4.612954551909334e-05, "loss": 2.290439224243164, "memory(GiB)": 72.85, "step": 61255, "token_acc": 0.5331325301204819, "train_speed(iter/s)": 0.672309 }, { "epoch": 2.624566213958271, "grad_norm": 5.567800521850586, "learning_rate": 4.612283597438382e-05, "loss": 2.023089599609375, "memory(GiB)": 72.85, "step": 61260, "token_acc": 0.5350877192982456, "train_speed(iter/s)": 0.672317 }, { "epoch": 2.6247804292875196, "grad_norm": 4.958776473999023, "learning_rate": 4.611612649991276e-05, "loss": 2.116927719116211, "memory(GiB)": 72.85, "step": 61265, "token_acc": 0.5372168284789643, "train_speed(iter/s)": 0.672313 }, { "epoch": 2.624994644616769, "grad_norm": 4.701255798339844, "learning_rate": 4.61094170958017e-05, "loss": 2.4791587829589843, "memory(GiB)": 72.85, "step": 61270, "token_acc": 0.45821325648414984, "train_speed(iter/s)": 0.672324 }, { "epoch": 2.6252088599460177, "grad_norm": 5.27418327331543, "learning_rate": 4.610270776217221e-05, "loss": 2.181836700439453, "memory(GiB)": 72.85, "step": 61275, "token_acc": 0.5300751879699248, "train_speed(iter/s)": 0.672326 }, { "epoch": 2.6254230752752665, "grad_norm": 5.219360828399658, "learning_rate": 4.6095998499145825e-05, "loss": 2.048427391052246, "memory(GiB)": 72.85, "step": 61280, "token_acc": 0.574750830564784, "train_speed(iter/s)": 0.672325 }, { "epoch": 2.625637290604516, "grad_norm": 4.385364532470703, "learning_rate": 4.608928930684408e-05, "loss": 2.1528770446777346, "memory(GiB)": 72.85, "step": 61285, "token_acc": 0.5205992509363296, "train_speed(iter/s)": 0.672313 }, { "epoch": 2.6258515059337646, "grad_norm": 6.331132411956787, "learning_rate": 4.608258018538851e-05, "loss": 2.2575746536254884, "memory(GiB)": 72.85, "step": 61290, "token_acc": 0.5037037037037037, "train_speed(iter/s)": 0.672323 }, { "epoch": 2.6260657212630134, "grad_norm": 8.14461612701416, "learning_rate": 4.607587113490071e-05, "loss": 2.365000343322754, "memory(GiB)": 72.85, "step": 61295, "token_acc": 0.4857142857142857, "train_speed(iter/s)": 0.672325 }, { "epoch": 2.6262799365922627, "grad_norm": 4.240405082702637, "learning_rate": 4.606916215550216e-05, "loss": 2.493427276611328, "memory(GiB)": 72.85, "step": 61300, "token_acc": 0.5, "train_speed(iter/s)": 0.672335 }, { "epoch": 2.6264941519215115, "grad_norm": 4.407451152801514, "learning_rate": 4.6062453247314443e-05, "loss": 2.3384140014648436, "memory(GiB)": 72.85, "step": 61305, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.672352 }, { "epoch": 2.6267083672507603, "grad_norm": 4.85334587097168, "learning_rate": 4.605574441045908e-05, "loss": 2.2069465637207033, "memory(GiB)": 72.85, "step": 61310, "token_acc": 0.5214521452145214, "train_speed(iter/s)": 0.672358 }, { "epoch": 2.6269225825800095, "grad_norm": 5.899469375610352, "learning_rate": 4.60490356450576e-05, "loss": 2.568733978271484, "memory(GiB)": 72.85, "step": 61315, "token_acc": 0.47941176470588237, "train_speed(iter/s)": 0.672378 }, { "epoch": 2.6271367979092584, "grad_norm": 7.6661505699157715, "learning_rate": 4.604232695123155e-05, "loss": 2.233448791503906, "memory(GiB)": 72.85, "step": 61320, "token_acc": 0.5846153846153846, "train_speed(iter/s)": 0.672372 }, { "epoch": 2.627351013238507, "grad_norm": 6.405954837799072, "learning_rate": 4.603561832910246e-05, "loss": 2.195160675048828, "memory(GiB)": 72.85, "step": 61325, "token_acc": 0.5068493150684932, "train_speed(iter/s)": 0.672377 }, { "epoch": 2.6275652285677564, "grad_norm": 12.071722984313965, "learning_rate": 4.6028909778791853e-05, "loss": 2.260754203796387, "memory(GiB)": 72.85, "step": 61330, "token_acc": 0.512280701754386, "train_speed(iter/s)": 0.672379 }, { "epoch": 2.6277794438970052, "grad_norm": 5.349226951599121, "learning_rate": 4.602220130042128e-05, "loss": 1.9230159759521483, "memory(GiB)": 72.85, "step": 61335, "token_acc": 0.5890909090909091, "train_speed(iter/s)": 0.672378 }, { "epoch": 2.627993659226254, "grad_norm": 4.2045183181762695, "learning_rate": 4.6015492894112256e-05, "loss": 2.0980754852294923, "memory(GiB)": 72.85, "step": 61340, "token_acc": 0.5436241610738255, "train_speed(iter/s)": 0.672376 }, { "epoch": 2.6282078745555033, "grad_norm": 4.170137405395508, "learning_rate": 4.600878455998632e-05, "loss": 2.3739933013916015, "memory(GiB)": 72.85, "step": 61345, "token_acc": 0.4918032786885246, "train_speed(iter/s)": 0.672381 }, { "epoch": 2.628422089884752, "grad_norm": 5.776815891265869, "learning_rate": 4.6002076298164995e-05, "loss": 2.3814815521240233, "memory(GiB)": 72.85, "step": 61350, "token_acc": 0.4774193548387097, "train_speed(iter/s)": 0.672377 }, { "epoch": 2.628636305214001, "grad_norm": 4.760187149047852, "learning_rate": 4.599536810876981e-05, "loss": 1.9172346115112304, "memory(GiB)": 72.85, "step": 61355, "token_acc": 0.5714285714285714, "train_speed(iter/s)": 0.672372 }, { "epoch": 2.62885052054325, "grad_norm": 5.9121994972229, "learning_rate": 4.598865999192229e-05, "loss": 2.3836193084716797, "memory(GiB)": 72.85, "step": 61360, "token_acc": 0.4809384164222874, "train_speed(iter/s)": 0.672374 }, { "epoch": 2.629064735872499, "grad_norm": 8.003654479980469, "learning_rate": 4.598195194774393e-05, "loss": 2.288864326477051, "memory(GiB)": 72.85, "step": 61365, "token_acc": 0.5130111524163569, "train_speed(iter/s)": 0.672382 }, { "epoch": 2.629278951201748, "grad_norm": 4.539325714111328, "learning_rate": 4.5975243976356316e-05, "loss": 2.1857086181640626, "memory(GiB)": 72.85, "step": 61370, "token_acc": 0.5340909090909091, "train_speed(iter/s)": 0.672372 }, { "epoch": 2.629493166530997, "grad_norm": 5.174936771392822, "learning_rate": 4.596853607788091e-05, "loss": 2.3640552520751954, "memory(GiB)": 72.85, "step": 61375, "token_acc": 0.518840579710145, "train_speed(iter/s)": 0.672367 }, { "epoch": 2.629707381860246, "grad_norm": 4.703927993774414, "learning_rate": 4.596182825243927e-05, "loss": 2.1812366485595702, "memory(GiB)": 72.85, "step": 61380, "token_acc": 0.5108695652173914, "train_speed(iter/s)": 0.672358 }, { "epoch": 2.6299215971894947, "grad_norm": 5.449645519256592, "learning_rate": 4.5955120500152895e-05, "loss": 2.1232696533203126, "memory(GiB)": 72.85, "step": 61385, "token_acc": 0.562015503875969, "train_speed(iter/s)": 0.672366 }, { "epoch": 2.630135812518744, "grad_norm": 6.278059482574463, "learning_rate": 4.59484128211433e-05, "loss": 2.5282470703125, "memory(GiB)": 72.85, "step": 61390, "token_acc": 0.47368421052631576, "train_speed(iter/s)": 0.672362 }, { "epoch": 2.6303500278479928, "grad_norm": 5.043061256408691, "learning_rate": 4.594170521553202e-05, "loss": 2.409788703918457, "memory(GiB)": 72.85, "step": 61395, "token_acc": 0.5451505016722408, "train_speed(iter/s)": 0.672359 }, { "epoch": 2.6305642431772416, "grad_norm": 6.152508735656738, "learning_rate": 4.5934997683440546e-05, "loss": 2.4170867919921877, "memory(GiB)": 72.85, "step": 61400, "token_acc": 0.48846153846153845, "train_speed(iter/s)": 0.672374 }, { "epoch": 2.630778458506491, "grad_norm": 4.6523237228393555, "learning_rate": 4.592829022499041e-05, "loss": 2.3605785369873047, "memory(GiB)": 72.85, "step": 61405, "token_acc": 0.46178343949044587, "train_speed(iter/s)": 0.672379 }, { "epoch": 2.6309926738357396, "grad_norm": 6.869020938873291, "learning_rate": 4.592158284030311e-05, "loss": 2.1287593841552734, "memory(GiB)": 72.85, "step": 61410, "token_acc": 0.496875, "train_speed(iter/s)": 0.672382 }, { "epoch": 2.6312068891649885, "grad_norm": 4.832738399505615, "learning_rate": 4.5914875529500165e-05, "loss": 2.4140621185302735, "memory(GiB)": 72.85, "step": 61415, "token_acc": 0.4809688581314879, "train_speed(iter/s)": 0.672389 }, { "epoch": 2.6314211044942377, "grad_norm": 5.934863090515137, "learning_rate": 4.5908168292703085e-05, "loss": 2.042354774475098, "memory(GiB)": 72.85, "step": 61420, "token_acc": 0.5147058823529411, "train_speed(iter/s)": 0.672398 }, { "epoch": 2.6316353198234865, "grad_norm": 4.100157737731934, "learning_rate": 4.590146113003338e-05, "loss": 2.322557067871094, "memory(GiB)": 72.85, "step": 61425, "token_acc": 0.5111940298507462, "train_speed(iter/s)": 0.672405 }, { "epoch": 2.6318495351527353, "grad_norm": 5.030875205993652, "learning_rate": 4.5894754041612534e-05, "loss": 2.242911148071289, "memory(GiB)": 72.85, "step": 61430, "token_acc": 0.4931972789115646, "train_speed(iter/s)": 0.672403 }, { "epoch": 2.6320637504819846, "grad_norm": 5.449295997619629, "learning_rate": 4.5888047027562087e-05, "loss": 2.404535484313965, "memory(GiB)": 72.85, "step": 61435, "token_acc": 0.492, "train_speed(iter/s)": 0.672414 }, { "epoch": 2.6322779658112334, "grad_norm": 4.398072719573975, "learning_rate": 4.58813400880035e-05, "loss": 2.0466129302978517, "memory(GiB)": 72.85, "step": 61440, "token_acc": 0.5361216730038023, "train_speed(iter/s)": 0.672416 }, { "epoch": 2.6324921811404822, "grad_norm": 5.330578327178955, "learning_rate": 4.5874633223058313e-05, "loss": 2.219285583496094, "memory(GiB)": 72.85, "step": 61445, "token_acc": 0.565068493150685, "train_speed(iter/s)": 0.672413 }, { "epoch": 2.6327063964697315, "grad_norm": 4.75368070602417, "learning_rate": 4.586792643284802e-05, "loss": 2.223638916015625, "memory(GiB)": 72.85, "step": 61450, "token_acc": 0.5475409836065573, "train_speed(iter/s)": 0.672415 }, { "epoch": 2.6329206117989803, "grad_norm": 4.846363544464111, "learning_rate": 4.586121971749412e-05, "loss": 2.207935905456543, "memory(GiB)": 72.85, "step": 61455, "token_acc": 0.4927536231884058, "train_speed(iter/s)": 0.672424 }, { "epoch": 2.633134827128229, "grad_norm": 4.817793369293213, "learning_rate": 4.58545130771181e-05, "loss": 2.282796859741211, "memory(GiB)": 72.85, "step": 61460, "token_acc": 0.5121107266435986, "train_speed(iter/s)": 0.672413 }, { "epoch": 2.6333490424574784, "grad_norm": 6.487153053283691, "learning_rate": 4.584780651184147e-05, "loss": 2.0861799240112306, "memory(GiB)": 72.85, "step": 61465, "token_acc": 0.54421768707483, "train_speed(iter/s)": 0.672421 }, { "epoch": 2.633563257786727, "grad_norm": 5.527267932891846, "learning_rate": 4.584110002178572e-05, "loss": 2.257991409301758, "memory(GiB)": 72.85, "step": 61470, "token_acc": 0.4963768115942029, "train_speed(iter/s)": 0.672423 }, { "epoch": 2.633777473115976, "grad_norm": 5.888284206390381, "learning_rate": 4.583439360707234e-05, "loss": 2.358669471740723, "memory(GiB)": 72.85, "step": 61475, "token_acc": 0.48046875, "train_speed(iter/s)": 0.67243 }, { "epoch": 2.6339916884452252, "grad_norm": 4.631492614746094, "learning_rate": 4.582768726782283e-05, "loss": 2.231521987915039, "memory(GiB)": 72.85, "step": 61480, "token_acc": 0.4980544747081712, "train_speed(iter/s)": 0.672449 }, { "epoch": 2.634205903774474, "grad_norm": 3.814803123474121, "learning_rate": 4.5820981004158676e-05, "loss": 1.993504524230957, "memory(GiB)": 72.85, "step": 61485, "token_acc": 0.5543071161048689, "train_speed(iter/s)": 0.67246 }, { "epoch": 2.634420119103723, "grad_norm": 5.650888919830322, "learning_rate": 4.581427481620138e-05, "loss": 2.2500988006591798, "memory(GiB)": 72.85, "step": 61490, "token_acc": 0.5182481751824818, "train_speed(iter/s)": 0.672462 }, { "epoch": 2.634634334432972, "grad_norm": 4.473260879516602, "learning_rate": 4.5807568704072425e-05, "loss": 2.159799575805664, "memory(GiB)": 72.85, "step": 61495, "token_acc": 0.49838187702265374, "train_speed(iter/s)": 0.672455 }, { "epoch": 2.634848549762221, "grad_norm": 4.398794174194336, "learning_rate": 4.580086266789328e-05, "loss": 2.083212471008301, "memory(GiB)": 72.85, "step": 61500, "token_acc": 0.5536332179930796, "train_speed(iter/s)": 0.672441 }, { "epoch": 2.634848549762221, "eval_loss": 1.9457756280899048, "eval_runtime": 15.8671, "eval_samples_per_second": 6.302, "eval_steps_per_second": 6.302, "eval_token_acc": 0.5258019525801952, "step": 61500 }, { "epoch": 2.6350627650914697, "grad_norm": 6.098683834075928, "learning_rate": 4.5794156707785456e-05, "loss": 2.2256748199462892, "memory(GiB)": 72.85, "step": 61505, "token_acc": 0.5230923694779116, "train_speed(iter/s)": 0.672309 }, { "epoch": 2.635276980420719, "grad_norm": 4.004787445068359, "learning_rate": 4.578745082387043e-05, "loss": 2.121513557434082, "memory(GiB)": 72.85, "step": 61510, "token_acc": 0.5267857142857143, "train_speed(iter/s)": 0.672306 }, { "epoch": 2.635491195749968, "grad_norm": 6.076596736907959, "learning_rate": 4.578074501626966e-05, "loss": 2.1817157745361326, "memory(GiB)": 72.85, "step": 61515, "token_acc": 0.5, "train_speed(iter/s)": 0.672318 }, { "epoch": 2.6357054110792166, "grad_norm": 4.331502914428711, "learning_rate": 4.577403928510467e-05, "loss": 1.9754947662353515, "memory(GiB)": 72.85, "step": 61520, "token_acc": 0.5249169435215947, "train_speed(iter/s)": 0.67231 }, { "epoch": 2.635919626408466, "grad_norm": 3.7812156677246094, "learning_rate": 4.576733363049693e-05, "loss": 2.3730133056640623, "memory(GiB)": 72.85, "step": 61525, "token_acc": 0.4788732394366197, "train_speed(iter/s)": 0.672319 }, { "epoch": 2.6361338417377147, "grad_norm": 4.2958984375, "learning_rate": 4.576062805256791e-05, "loss": 2.0142229080200194, "memory(GiB)": 72.85, "step": 61530, "token_acc": 0.5306859205776173, "train_speed(iter/s)": 0.672309 }, { "epoch": 2.6363480570669635, "grad_norm": 5.411367893218994, "learning_rate": 4.5753922551439085e-05, "loss": 2.0299022674560545, "memory(GiB)": 72.85, "step": 61535, "token_acc": 0.5676691729323309, "train_speed(iter/s)": 0.672308 }, { "epoch": 2.6365622723962128, "grad_norm": 7.050268173217773, "learning_rate": 4.5747217127231946e-05, "loss": 2.5234264373779296, "memory(GiB)": 72.85, "step": 61540, "token_acc": 0.4983388704318937, "train_speed(iter/s)": 0.672313 }, { "epoch": 2.6367764877254616, "grad_norm": 4.158149242401123, "learning_rate": 4.574051178006794e-05, "loss": 2.160989761352539, "memory(GiB)": 72.85, "step": 61545, "token_acc": 0.5321100917431193, "train_speed(iter/s)": 0.672315 }, { "epoch": 2.6369907030547104, "grad_norm": 4.386519432067871, "learning_rate": 4.573380651006858e-05, "loss": 2.2534152984619142, "memory(GiB)": 72.85, "step": 61550, "token_acc": 0.5387323943661971, "train_speed(iter/s)": 0.672324 }, { "epoch": 2.6372049183839597, "grad_norm": 5.272961139678955, "learning_rate": 4.57271013173553e-05, "loss": 2.0308965682983398, "memory(GiB)": 72.85, "step": 61555, "token_acc": 0.5622775800711743, "train_speed(iter/s)": 0.672336 }, { "epoch": 2.6374191337132085, "grad_norm": 6.528459072113037, "learning_rate": 4.572039620204959e-05, "loss": 2.1821208953857423, "memory(GiB)": 72.85, "step": 61560, "token_acc": 0.5154929577464789, "train_speed(iter/s)": 0.672332 }, { "epoch": 2.6376333490424573, "grad_norm": 5.2091240882873535, "learning_rate": 4.571369116427293e-05, "loss": 2.196282386779785, "memory(GiB)": 72.85, "step": 61565, "token_acc": 0.5342960288808665, "train_speed(iter/s)": 0.672341 }, { "epoch": 2.6378475643717065, "grad_norm": 4.726507663726807, "learning_rate": 4.570698620414676e-05, "loss": 2.2861965179443358, "memory(GiB)": 72.85, "step": 61570, "token_acc": 0.49201277955271566, "train_speed(iter/s)": 0.672353 }, { "epoch": 2.6380617797009553, "grad_norm": 5.585422992706299, "learning_rate": 4.5700281321792575e-05, "loss": 2.364957809448242, "memory(GiB)": 72.85, "step": 61575, "token_acc": 0.4954128440366973, "train_speed(iter/s)": 0.672338 }, { "epoch": 2.638275995030204, "grad_norm": 5.40962028503418, "learning_rate": 4.5693576517331824e-05, "loss": 2.1873710632324217, "memory(GiB)": 72.85, "step": 61580, "token_acc": 0.5209003215434084, "train_speed(iter/s)": 0.672342 }, { "epoch": 2.6384902103594534, "grad_norm": 3.6480231285095215, "learning_rate": 4.568687179088596e-05, "loss": 2.1023672103881834, "memory(GiB)": 72.85, "step": 61585, "token_acc": 0.5296610169491526, "train_speed(iter/s)": 0.672343 }, { "epoch": 2.6387044256887022, "grad_norm": 5.036264419555664, "learning_rate": 4.5680167142576455e-05, "loss": 2.2856096267700194, "memory(GiB)": 72.85, "step": 61590, "token_acc": 0.4491017964071856, "train_speed(iter/s)": 0.672335 }, { "epoch": 2.638918641017951, "grad_norm": 4.2040839195251465, "learning_rate": 4.56734625725248e-05, "loss": 2.224040412902832, "memory(GiB)": 72.85, "step": 61595, "token_acc": 0.5158730158730159, "train_speed(iter/s)": 0.672341 }, { "epoch": 2.6391328563472003, "grad_norm": 5.339751243591309, "learning_rate": 4.5666758080852425e-05, "loss": 2.433615303039551, "memory(GiB)": 72.85, "step": 61600, "token_acc": 0.5242718446601942, "train_speed(iter/s)": 0.672336 }, { "epoch": 2.639347071676449, "grad_norm": 6.713982105255127, "learning_rate": 4.566005366768078e-05, "loss": 2.4280643463134766, "memory(GiB)": 72.85, "step": 61605, "token_acc": 0.4870967741935484, "train_speed(iter/s)": 0.672346 }, { "epoch": 2.639561287005698, "grad_norm": 4.860907077789307, "learning_rate": 4.565334933313134e-05, "loss": 2.3165920257568358, "memory(GiB)": 72.85, "step": 61610, "token_acc": 0.46946564885496184, "train_speed(iter/s)": 0.672348 }, { "epoch": 2.639775502334947, "grad_norm": 4.628166675567627, "learning_rate": 4.5646645077325564e-05, "loss": 2.2205825805664063, "memory(GiB)": 72.85, "step": 61615, "token_acc": 0.5357142857142857, "train_speed(iter/s)": 0.672346 }, { "epoch": 2.639989717664196, "grad_norm": 4.484270095825195, "learning_rate": 4.563994090038488e-05, "loss": 2.152960205078125, "memory(GiB)": 72.85, "step": 61620, "token_acc": 0.49393939393939396, "train_speed(iter/s)": 0.672325 }, { "epoch": 2.640203932993445, "grad_norm": 4.347268104553223, "learning_rate": 4.5633236802430775e-05, "loss": 2.1234140396118164, "memory(GiB)": 72.85, "step": 61625, "token_acc": 0.521311475409836, "train_speed(iter/s)": 0.672324 }, { "epoch": 2.640418148322694, "grad_norm": 4.309386253356934, "learning_rate": 4.5626532783584665e-05, "loss": 2.12322940826416, "memory(GiB)": 72.85, "step": 61630, "token_acc": 0.5242718446601942, "train_speed(iter/s)": 0.67232 }, { "epoch": 2.640632363651943, "grad_norm": 4.010909557342529, "learning_rate": 4.5619828843968026e-05, "loss": 2.0468202590942384, "memory(GiB)": 72.85, "step": 61635, "token_acc": 0.5324675324675324, "train_speed(iter/s)": 0.672325 }, { "epoch": 2.6408465789811917, "grad_norm": 4.85123872756958, "learning_rate": 4.5613124983702294e-05, "loss": 2.227408599853516, "memory(GiB)": 72.85, "step": 61640, "token_acc": 0.5106382978723404, "train_speed(iter/s)": 0.672333 }, { "epoch": 2.641060794310441, "grad_norm": 6.68330717086792, "learning_rate": 4.560642120290891e-05, "loss": 2.199700355529785, "memory(GiB)": 72.85, "step": 61645, "token_acc": 0.5494880546075085, "train_speed(iter/s)": 0.67233 }, { "epoch": 2.6412750096396898, "grad_norm": 5.4645185470581055, "learning_rate": 4.559971750170934e-05, "loss": 2.185811233520508, "memory(GiB)": 72.85, "step": 61650, "token_acc": 0.5598705501618123, "train_speed(iter/s)": 0.67234 }, { "epoch": 2.6414892249689386, "grad_norm": 4.387864112854004, "learning_rate": 4.5593013880225015e-05, "loss": 2.215728759765625, "memory(GiB)": 72.85, "step": 61655, "token_acc": 0.5475285171102662, "train_speed(iter/s)": 0.672338 }, { "epoch": 2.641703440298188, "grad_norm": 5.980151653289795, "learning_rate": 4.558631033857735e-05, "loss": 2.3640310287475588, "memory(GiB)": 72.85, "step": 61660, "token_acc": 0.4692556634304207, "train_speed(iter/s)": 0.672342 }, { "epoch": 2.6419176556274366, "grad_norm": 4.768816947937012, "learning_rate": 4.557960687688783e-05, "loss": 2.28957462310791, "memory(GiB)": 72.85, "step": 61665, "token_acc": 0.5253623188405797, "train_speed(iter/s)": 0.672328 }, { "epoch": 2.6421318709566854, "grad_norm": 5.559784412384033, "learning_rate": 4.557290349527789e-05, "loss": 2.052395820617676, "memory(GiB)": 72.85, "step": 61670, "token_acc": 0.49473684210526314, "train_speed(iter/s)": 0.672322 }, { "epoch": 2.6423460862859347, "grad_norm": 3.926823854446411, "learning_rate": 4.5566200193868946e-05, "loss": 2.1849796295166017, "memory(GiB)": 72.85, "step": 61675, "token_acc": 0.504297994269341, "train_speed(iter/s)": 0.672326 }, { "epoch": 2.6425603016151835, "grad_norm": 6.554440021514893, "learning_rate": 4.555949697278245e-05, "loss": 2.384541130065918, "memory(GiB)": 72.85, "step": 61680, "token_acc": 0.47157190635451507, "train_speed(iter/s)": 0.67232 }, { "epoch": 2.6427745169444323, "grad_norm": 4.9554219245910645, "learning_rate": 4.555279383213984e-05, "loss": 2.4338090896606444, "memory(GiB)": 72.85, "step": 61685, "token_acc": 0.4863636363636364, "train_speed(iter/s)": 0.672306 }, { "epoch": 2.6429887322736816, "grad_norm": 5.2241339683532715, "learning_rate": 4.554609077206252e-05, "loss": 2.3142318725585938, "memory(GiB)": 72.85, "step": 61690, "token_acc": 0.5241157556270096, "train_speed(iter/s)": 0.67231 }, { "epoch": 2.6432029476029304, "grad_norm": 4.621926784515381, "learning_rate": 4.553938779267196e-05, "loss": 2.2910799026489257, "memory(GiB)": 72.85, "step": 61695, "token_acc": 0.5174418604651163, "train_speed(iter/s)": 0.672321 }, { "epoch": 2.643417162932179, "grad_norm": 4.287178993225098, "learning_rate": 4.553268489408958e-05, "loss": 2.331670951843262, "memory(GiB)": 72.85, "step": 61700, "token_acc": 0.5361842105263158, "train_speed(iter/s)": 0.672333 }, { "epoch": 2.6436313782614285, "grad_norm": 5.887935161590576, "learning_rate": 4.552598207643678e-05, "loss": 2.3529695510864257, "memory(GiB)": 72.85, "step": 61705, "token_acc": 0.49843260188087773, "train_speed(iter/s)": 0.672322 }, { "epoch": 2.6438455935906773, "grad_norm": 4.7804083824157715, "learning_rate": 4.551927933983504e-05, "loss": 2.4267702102661133, "memory(GiB)": 72.85, "step": 61710, "token_acc": 0.50390625, "train_speed(iter/s)": 0.672332 }, { "epoch": 2.644059808919926, "grad_norm": 3.672720193862915, "learning_rate": 4.5512576684405735e-05, "loss": 2.256080436706543, "memory(GiB)": 72.85, "step": 61715, "token_acc": 0.5015479876160991, "train_speed(iter/s)": 0.672319 }, { "epoch": 2.6442740242491753, "grad_norm": 4.238047122955322, "learning_rate": 4.550587411027032e-05, "loss": 2.1869832992553713, "memory(GiB)": 72.85, "step": 61720, "token_acc": 0.571875, "train_speed(iter/s)": 0.672321 }, { "epoch": 2.644488239578424, "grad_norm": 5.0133233070373535, "learning_rate": 4.549917161755022e-05, "loss": 2.337105941772461, "memory(GiB)": 72.85, "step": 61725, "token_acc": 0.5254237288135594, "train_speed(iter/s)": 0.672315 }, { "epoch": 2.644702454907673, "grad_norm": 5.729757785797119, "learning_rate": 4.549246920636684e-05, "loss": 2.6673210144042967, "memory(GiB)": 72.85, "step": 61730, "token_acc": 0.4714285714285714, "train_speed(iter/s)": 0.672322 }, { "epoch": 2.6449166702369222, "grad_norm": 5.983201503753662, "learning_rate": 4.548576687684159e-05, "loss": 2.084936332702637, "memory(GiB)": 72.85, "step": 61735, "token_acc": 0.5404411764705882, "train_speed(iter/s)": 0.67232 }, { "epoch": 2.645130885566171, "grad_norm": 4.797030925750732, "learning_rate": 4.5479064629095935e-05, "loss": 2.4390848159790037, "memory(GiB)": 72.85, "step": 61740, "token_acc": 0.4857142857142857, "train_speed(iter/s)": 0.672326 }, { "epoch": 2.64534510089542, "grad_norm": 4.643904209136963, "learning_rate": 4.547236246325126e-05, "loss": 2.11468391418457, "memory(GiB)": 72.85, "step": 61745, "token_acc": 0.5264797507788161, "train_speed(iter/s)": 0.672329 }, { "epoch": 2.645559316224669, "grad_norm": 3.8036859035491943, "learning_rate": 4.5465660379428976e-05, "loss": 2.1023542404174806, "memory(GiB)": 72.85, "step": 61750, "token_acc": 0.5587392550143266, "train_speed(iter/s)": 0.672322 }, { "epoch": 2.645773531553918, "grad_norm": 3.8283944129943848, "learning_rate": 4.545895837775052e-05, "loss": 2.254449653625488, "memory(GiB)": 72.85, "step": 61755, "token_acc": 0.53125, "train_speed(iter/s)": 0.672323 }, { "epoch": 2.6459877468831667, "grad_norm": 6.187100410461426, "learning_rate": 4.545225645833729e-05, "loss": 2.1490108489990236, "memory(GiB)": 72.85, "step": 61760, "token_acc": 0.5153846153846153, "train_speed(iter/s)": 0.672317 }, { "epoch": 2.646201962212416, "grad_norm": 4.853517055511475, "learning_rate": 4.54455546213107e-05, "loss": 2.1606868743896483, "memory(GiB)": 72.85, "step": 61765, "token_acc": 0.5157894736842106, "train_speed(iter/s)": 0.672314 }, { "epoch": 2.646416177541665, "grad_norm": 5.436280250549316, "learning_rate": 4.5438852866792164e-05, "loss": 2.3287542343139647, "memory(GiB)": 72.85, "step": 61770, "token_acc": 0.5, "train_speed(iter/s)": 0.672309 }, { "epoch": 2.6466303928709136, "grad_norm": 7.4376630783081055, "learning_rate": 4.5432151194903075e-05, "loss": 2.353993034362793, "memory(GiB)": 72.85, "step": 61775, "token_acc": 0.5252918287937743, "train_speed(iter/s)": 0.672319 }, { "epoch": 2.646844608200163, "grad_norm": 4.157412052154541, "learning_rate": 4.542544960576486e-05, "loss": 2.372935104370117, "memory(GiB)": 72.85, "step": 61780, "token_acc": 0.527972027972028, "train_speed(iter/s)": 0.672331 }, { "epoch": 2.6470588235294117, "grad_norm": 4.454836845397949, "learning_rate": 4.5418748099498925e-05, "loss": 2.464485740661621, "memory(GiB)": 72.85, "step": 61785, "token_acc": 0.4844290657439446, "train_speed(iter/s)": 0.672328 }, { "epoch": 2.6472730388586605, "grad_norm": 4.012227535247803, "learning_rate": 4.541204667622666e-05, "loss": 2.3061954498291017, "memory(GiB)": 72.85, "step": 61790, "token_acc": 0.547945205479452, "train_speed(iter/s)": 0.67233 }, { "epoch": 2.6474872541879098, "grad_norm": 10.232632637023926, "learning_rate": 4.540534533606947e-05, "loss": 2.573673057556152, "memory(GiB)": 72.85, "step": 61795, "token_acc": 0.4726027397260274, "train_speed(iter/s)": 0.672324 }, { "epoch": 2.6477014695171586, "grad_norm": 3.922903060913086, "learning_rate": 4.539864407914876e-05, "loss": 2.2761253356933593, "memory(GiB)": 72.85, "step": 61800, "token_acc": 0.5138461538461538, "train_speed(iter/s)": 0.672313 }, { "epoch": 2.6479156848464074, "grad_norm": 5.515809535980225, "learning_rate": 4.5391942905585926e-05, "loss": 2.2805686950683595, "memory(GiB)": 72.85, "step": 61805, "token_acc": 0.5483870967741935, "train_speed(iter/s)": 0.672308 }, { "epoch": 2.6481299001756566, "grad_norm": 5.020026683807373, "learning_rate": 4.5385241815502355e-05, "loss": 2.357228088378906, "memory(GiB)": 72.85, "step": 61810, "token_acc": 0.5057034220532319, "train_speed(iter/s)": 0.67231 }, { "epoch": 2.6483441155049054, "grad_norm": 5.345957279205322, "learning_rate": 4.537854080901948e-05, "loss": 2.403546905517578, "memory(GiB)": 72.85, "step": 61815, "token_acc": 0.4874551971326165, "train_speed(iter/s)": 0.672298 }, { "epoch": 2.6485583308341543, "grad_norm": 3.64180064201355, "learning_rate": 4.537183988625866e-05, "loss": 1.8988868713378906, "memory(GiB)": 72.85, "step": 61820, "token_acc": 0.5860655737704918, "train_speed(iter/s)": 0.672302 }, { "epoch": 2.6487725461634035, "grad_norm": 7.538309574127197, "learning_rate": 4.536513904734131e-05, "loss": 2.413636589050293, "memory(GiB)": 72.85, "step": 61825, "token_acc": 0.4684014869888476, "train_speed(iter/s)": 0.672319 }, { "epoch": 2.6489867614926523, "grad_norm": 4.898261070251465, "learning_rate": 4.535843829238882e-05, "loss": 2.504242706298828, "memory(GiB)": 72.85, "step": 61830, "token_acc": 0.4807017543859649, "train_speed(iter/s)": 0.672323 }, { "epoch": 2.649200976821901, "grad_norm": 5.562963008880615, "learning_rate": 4.5351737621522564e-05, "loss": 2.4106910705566404, "memory(GiB)": 72.85, "step": 61835, "token_acc": 0.46321525885558584, "train_speed(iter/s)": 0.672319 }, { "epoch": 2.6494151921511504, "grad_norm": 4.849070072174072, "learning_rate": 4.534503703486395e-05, "loss": 2.3365930557250976, "memory(GiB)": 72.85, "step": 61840, "token_acc": 0.4577922077922078, "train_speed(iter/s)": 0.672314 }, { "epoch": 2.649629407480399, "grad_norm": 5.326191425323486, "learning_rate": 4.533833653253436e-05, "loss": 2.372295379638672, "memory(GiB)": 72.85, "step": 61845, "token_acc": 0.4599406528189911, "train_speed(iter/s)": 0.672329 }, { "epoch": 2.649843622809648, "grad_norm": 7.472209453582764, "learning_rate": 4.533163611465516e-05, "loss": 2.3396411895751954, "memory(GiB)": 72.85, "step": 61850, "token_acc": 0.5, "train_speed(iter/s)": 0.672332 }, { "epoch": 2.6500578381388973, "grad_norm": 4.09473180770874, "learning_rate": 4.5324935781347764e-05, "loss": 2.4105968475341797, "memory(GiB)": 72.85, "step": 61855, "token_acc": 0.5, "train_speed(iter/s)": 0.672332 }, { "epoch": 2.650272053468146, "grad_norm": 4.791741847991943, "learning_rate": 4.531823553273353e-05, "loss": 2.4241409301757812, "memory(GiB)": 72.85, "step": 61860, "token_acc": 0.4630225080385852, "train_speed(iter/s)": 0.672342 }, { "epoch": 2.650486268797395, "grad_norm": 5.390972137451172, "learning_rate": 4.5311535368933864e-05, "loss": 2.4837989807128906, "memory(GiB)": 72.85, "step": 61865, "token_acc": 0.4793650793650794, "train_speed(iter/s)": 0.672336 }, { "epoch": 2.650700484126644, "grad_norm": 3.968395709991455, "learning_rate": 4.5304835290070135e-05, "loss": 2.2586015701293944, "memory(GiB)": 72.85, "step": 61870, "token_acc": 0.558303886925795, "train_speed(iter/s)": 0.672331 }, { "epoch": 2.650914699455893, "grad_norm": 5.24868106842041, "learning_rate": 4.529813529626371e-05, "loss": 2.110281753540039, "memory(GiB)": 72.85, "step": 61875, "token_acc": 0.5377049180327869, "train_speed(iter/s)": 0.672338 }, { "epoch": 2.651128914785142, "grad_norm": 3.6698813438415527, "learning_rate": 4.5291435387635955e-05, "loss": 2.0646846771240233, "memory(GiB)": 72.85, "step": 61880, "token_acc": 0.5901060070671378, "train_speed(iter/s)": 0.672343 }, { "epoch": 2.651343130114391, "grad_norm": 4.208518981933594, "learning_rate": 4.52847355643083e-05, "loss": 2.049047088623047, "memory(GiB)": 72.85, "step": 61885, "token_acc": 0.5547445255474452, "train_speed(iter/s)": 0.672353 }, { "epoch": 2.65155734544364, "grad_norm": 4.919297218322754, "learning_rate": 4.527803582640207e-05, "loss": 2.2603876113891603, "memory(GiB)": 72.85, "step": 61890, "token_acc": 0.49221183800623053, "train_speed(iter/s)": 0.672346 }, { "epoch": 2.6517715607728887, "grad_norm": 5.017638206481934, "learning_rate": 4.527133617403864e-05, "loss": 2.0816394805908205, "memory(GiB)": 72.85, "step": 61895, "token_acc": 0.4984126984126984, "train_speed(iter/s)": 0.672356 }, { "epoch": 2.651985776102138, "grad_norm": 5.273651599884033, "learning_rate": 4.5264636607339413e-05, "loss": 1.995147705078125, "memory(GiB)": 72.85, "step": 61900, "token_acc": 0.5409836065573771, "train_speed(iter/s)": 0.672359 }, { "epoch": 2.6521999914313867, "grad_norm": 5.6469340324401855, "learning_rate": 4.525793712642573e-05, "loss": 2.1968923568725587, "memory(GiB)": 72.85, "step": 61905, "token_acc": 0.5469798657718121, "train_speed(iter/s)": 0.672368 }, { "epoch": 2.6524142067606356, "grad_norm": 6.156246662139893, "learning_rate": 4.5251237731418946e-05, "loss": 2.3970813751220703, "memory(GiB)": 72.85, "step": 61910, "token_acc": 0.5149253731343284, "train_speed(iter/s)": 0.672359 }, { "epoch": 2.652628422089885, "grad_norm": 5.412295818328857, "learning_rate": 4.524453842244047e-05, "loss": 2.4071172714233398, "memory(GiB)": 72.85, "step": 61915, "token_acc": 0.5091463414634146, "train_speed(iter/s)": 0.672375 }, { "epoch": 2.6528426374191336, "grad_norm": 5.1627516746521, "learning_rate": 4.5237839199611624e-05, "loss": 2.097138595581055, "memory(GiB)": 72.85, "step": 61920, "token_acc": 0.5607843137254902, "train_speed(iter/s)": 0.672359 }, { "epoch": 2.6530568527483824, "grad_norm": 5.208143711090088, "learning_rate": 4.52311400630538e-05, "loss": 2.1082712173461915, "memory(GiB)": 72.85, "step": 61925, "token_acc": 0.5218978102189781, "train_speed(iter/s)": 0.672365 }, { "epoch": 2.6532710680776317, "grad_norm": 3.721020221710205, "learning_rate": 4.5224441012888344e-05, "loss": 1.9464902877807617, "memory(GiB)": 72.85, "step": 61930, "token_acc": 0.5719298245614035, "train_speed(iter/s)": 0.672371 }, { "epoch": 2.6534852834068805, "grad_norm": 5.37746524810791, "learning_rate": 4.521774204923661e-05, "loss": 2.2609477996826173, "memory(GiB)": 72.85, "step": 61935, "token_acc": 0.52, "train_speed(iter/s)": 0.672374 }, { "epoch": 2.6536994987361293, "grad_norm": 3.9399943351745605, "learning_rate": 4.521104317221998e-05, "loss": 2.2127910614013673, "memory(GiB)": 72.85, "step": 61940, "token_acc": 0.5047318611987381, "train_speed(iter/s)": 0.672369 }, { "epoch": 2.6539137140653786, "grad_norm": 4.503565311431885, "learning_rate": 4.520568413306547e-05, "loss": 2.1441537857055666, "memory(GiB)": 72.85, "step": 61945, "token_acc": 0.506993006993007, "train_speed(iter/s)": 0.672369 }, { "epoch": 2.6541279293946274, "grad_norm": 4.8614726066589355, "learning_rate": 4.519898541229781e-05, "loss": 2.200485420227051, "memory(GiB)": 72.85, "step": 61950, "token_acc": 0.5, "train_speed(iter/s)": 0.672375 }, { "epoch": 2.654342144723876, "grad_norm": 4.812672138214111, "learning_rate": 4.519228677850504e-05, "loss": 2.315463638305664, "memory(GiB)": 72.85, "step": 61955, "token_acc": 0.503448275862069, "train_speed(iter/s)": 0.672369 }, { "epoch": 2.6545563600531255, "grad_norm": 4.338935852050781, "learning_rate": 4.5185588231808486e-05, "loss": 2.0798242568969725, "memory(GiB)": 72.85, "step": 61960, "token_acc": 0.5155709342560554, "train_speed(iter/s)": 0.672368 }, { "epoch": 2.6547705753823743, "grad_norm": 4.284976005554199, "learning_rate": 4.517888977232953e-05, "loss": 1.951443862915039, "memory(GiB)": 72.85, "step": 61965, "token_acc": 0.5675675675675675, "train_speed(iter/s)": 0.672374 }, { "epoch": 2.654984790711623, "grad_norm": 4.292386054992676, "learning_rate": 4.5172191400189515e-05, "loss": 2.126949691772461, "memory(GiB)": 72.85, "step": 61970, "token_acc": 0.5161290322580645, "train_speed(iter/s)": 0.672368 }, { "epoch": 2.6551990060408723, "grad_norm": 5.197686672210693, "learning_rate": 4.516549311550977e-05, "loss": 2.339333724975586, "memory(GiB)": 72.85, "step": 61975, "token_acc": 0.5163398692810458, "train_speed(iter/s)": 0.672376 }, { "epoch": 2.655413221370121, "grad_norm": 6.612441062927246, "learning_rate": 4.515879491841166e-05, "loss": 2.2863359451293945, "memory(GiB)": 72.85, "step": 61980, "token_acc": 0.5418326693227091, "train_speed(iter/s)": 0.672386 }, { "epoch": 2.65562743669937, "grad_norm": 5.274060249328613, "learning_rate": 4.515209680901651e-05, "loss": 1.927667236328125, "memory(GiB)": 72.85, "step": 61985, "token_acc": 0.5429864253393665, "train_speed(iter/s)": 0.6724 }, { "epoch": 2.655841652028619, "grad_norm": 6.2161431312561035, "learning_rate": 4.514539878744568e-05, "loss": 2.235311508178711, "memory(GiB)": 72.85, "step": 61990, "token_acc": 0.5189393939393939, "train_speed(iter/s)": 0.672404 }, { "epoch": 2.656055867357868, "grad_norm": 5.4117913246154785, "learning_rate": 4.5138700853820516e-05, "loss": 2.1504947662353517, "memory(GiB)": 72.85, "step": 61995, "token_acc": 0.5157232704402516, "train_speed(iter/s)": 0.672412 }, { "epoch": 2.656270082687117, "grad_norm": 4.1661810874938965, "learning_rate": 4.513200300826232e-05, "loss": 1.92384033203125, "memory(GiB)": 72.85, "step": 62000, "token_acc": 0.5215686274509804, "train_speed(iter/s)": 0.67242 }, { "epoch": 2.656270082687117, "eval_loss": 2.097170829772949, "eval_runtime": 14.0853, "eval_samples_per_second": 7.1, "eval_steps_per_second": 7.1, "eval_token_acc": 0.4828060522696011, "step": 62000 }, { "epoch": 2.656484298016366, "grad_norm": 4.917749881744385, "learning_rate": 4.512530525089246e-05, "loss": 2.0218461990356444, "memory(GiB)": 72.85, "step": 62005, "token_acc": 0.5038387715930902, "train_speed(iter/s)": 0.672306 }, { "epoch": 2.656698513345615, "grad_norm": 4.877129554748535, "learning_rate": 4.511860758183229e-05, "loss": 2.443048667907715, "memory(GiB)": 72.85, "step": 62010, "token_acc": 0.5048231511254019, "train_speed(iter/s)": 0.67232 }, { "epoch": 2.6569127286748637, "grad_norm": 4.166194915771484, "learning_rate": 4.511191000120312e-05, "loss": 2.0121429443359373, "memory(GiB)": 72.85, "step": 62015, "token_acc": 0.5252918287937743, "train_speed(iter/s)": 0.672323 }, { "epoch": 2.657126944004113, "grad_norm": 5.755941390991211, "learning_rate": 4.510521250912627e-05, "loss": 2.2425241470336914, "memory(GiB)": 72.85, "step": 62020, "token_acc": 0.527027027027027, "train_speed(iter/s)": 0.672336 }, { "epoch": 2.657341159333362, "grad_norm": 4.470204830169678, "learning_rate": 4.50985151057231e-05, "loss": 2.1528743743896483, "memory(GiB)": 72.85, "step": 62025, "token_acc": 0.5032258064516129, "train_speed(iter/s)": 0.672329 }, { "epoch": 2.6575553746626106, "grad_norm": 4.88016414642334, "learning_rate": 4.509181779111493e-05, "loss": 2.036063385009766, "memory(GiB)": 72.85, "step": 62030, "token_acc": 0.5375375375375375, "train_speed(iter/s)": 0.67233 }, { "epoch": 2.65776958999186, "grad_norm": 3.6548573970794678, "learning_rate": 4.508512056542307e-05, "loss": 2.396799087524414, "memory(GiB)": 72.85, "step": 62035, "token_acc": 0.4738372093023256, "train_speed(iter/s)": 0.672337 }, { "epoch": 2.6579838053211087, "grad_norm": 4.320106029510498, "learning_rate": 4.507842342876887e-05, "loss": 2.217331886291504, "memory(GiB)": 72.85, "step": 62040, "token_acc": 0.5038167938931297, "train_speed(iter/s)": 0.672339 }, { "epoch": 2.6581980206503575, "grad_norm": 6.3247971534729, "learning_rate": 4.507172638127364e-05, "loss": 2.1315574645996094, "memory(GiB)": 72.85, "step": 62045, "token_acc": 0.5413223140495868, "train_speed(iter/s)": 0.672348 }, { "epoch": 2.6584122359796067, "grad_norm": 5.530213832855225, "learning_rate": 4.5065029423058726e-05, "loss": 2.3150405883789062, "memory(GiB)": 72.85, "step": 62050, "token_acc": 0.5051546391752577, "train_speed(iter/s)": 0.672352 }, { "epoch": 2.6586264513088556, "grad_norm": 4.699572563171387, "learning_rate": 4.505833255424543e-05, "loss": 2.086273956298828, "memory(GiB)": 72.85, "step": 62055, "token_acc": 0.516245487364621, "train_speed(iter/s)": 0.672366 }, { "epoch": 2.6588406666381044, "grad_norm": 4.815779209136963, "learning_rate": 4.505163577495506e-05, "loss": 2.1985919952392576, "memory(GiB)": 72.85, "step": 62060, "token_acc": 0.5415384615384615, "train_speed(iter/s)": 0.672362 }, { "epoch": 2.6590548819673536, "grad_norm": 4.884237289428711, "learning_rate": 4.504493908530896e-05, "loss": 2.566851806640625, "memory(GiB)": 72.85, "step": 62065, "token_acc": 0.47634069400630913, "train_speed(iter/s)": 0.672373 }, { "epoch": 2.6592690972966024, "grad_norm": 5.558164119720459, "learning_rate": 4.5038242485428436e-05, "loss": 2.005870246887207, "memory(GiB)": 72.85, "step": 62070, "token_acc": 0.5460526315789473, "train_speed(iter/s)": 0.672372 }, { "epoch": 2.6594833126258512, "grad_norm": 4.7061448097229, "learning_rate": 4.503154597543479e-05, "loss": 2.218300628662109, "memory(GiB)": 72.85, "step": 62075, "token_acc": 0.5354609929078015, "train_speed(iter/s)": 0.67237 }, { "epoch": 2.6596975279551005, "grad_norm": 4.178009510040283, "learning_rate": 4.5024849555449353e-05, "loss": 1.9701587677001953, "memory(GiB)": 72.85, "step": 62080, "token_acc": 0.5692307692307692, "train_speed(iter/s)": 0.672384 }, { "epoch": 2.6599117432843493, "grad_norm": 5.696615695953369, "learning_rate": 4.501815322559345e-05, "loss": 2.225876235961914, "memory(GiB)": 72.85, "step": 62085, "token_acc": 0.4909090909090909, "train_speed(iter/s)": 0.672371 }, { "epoch": 2.660125958613598, "grad_norm": 5.247380256652832, "learning_rate": 4.501145698598836e-05, "loss": 2.188703727722168, "memory(GiB)": 72.85, "step": 62090, "token_acc": 0.5301587301587302, "train_speed(iter/s)": 0.672381 }, { "epoch": 2.6603401739428474, "grad_norm": 8.051255226135254, "learning_rate": 4.500476083675542e-05, "loss": 2.404367446899414, "memory(GiB)": 72.85, "step": 62095, "token_acc": 0.4857142857142857, "train_speed(iter/s)": 0.672384 }, { "epoch": 2.660554389272096, "grad_norm": 5.013967037200928, "learning_rate": 4.499806477801592e-05, "loss": 2.4010900497436523, "memory(GiB)": 72.85, "step": 62100, "token_acc": 0.5266666666666666, "train_speed(iter/s)": 0.672388 }, { "epoch": 2.660768604601345, "grad_norm": 4.250585079193115, "learning_rate": 4.499136880989116e-05, "loss": 2.493918609619141, "memory(GiB)": 72.85, "step": 62105, "token_acc": 0.49429657794676807, "train_speed(iter/s)": 0.672378 }, { "epoch": 2.6609828199305943, "grad_norm": 5.610459804534912, "learning_rate": 4.498467293250246e-05, "loss": 2.2104204177856444, "memory(GiB)": 72.85, "step": 62110, "token_acc": 0.5181159420289855, "train_speed(iter/s)": 0.67237 }, { "epoch": 2.661197035259843, "grad_norm": 3.905815601348877, "learning_rate": 4.497797714597112e-05, "loss": 2.276938247680664, "memory(GiB)": 72.85, "step": 62115, "token_acc": 0.4985507246376812, "train_speed(iter/s)": 0.67237 }, { "epoch": 2.661411250589092, "grad_norm": 4.371700763702393, "learning_rate": 4.4971281450418425e-05, "loss": 2.1976131439208983, "memory(GiB)": 72.85, "step": 62120, "token_acc": 0.5387096774193548, "train_speed(iter/s)": 0.672362 }, { "epoch": 2.661625465918341, "grad_norm": 5.707208633422852, "learning_rate": 4.496458584596569e-05, "loss": 2.5522281646728517, "memory(GiB)": 72.85, "step": 62125, "token_acc": 0.4797507788161994, "train_speed(iter/s)": 0.672369 }, { "epoch": 2.66183968124759, "grad_norm": 4.612946033477783, "learning_rate": 4.495789033273419e-05, "loss": 1.9191339492797852, "memory(GiB)": 72.85, "step": 62130, "token_acc": 0.5474452554744526, "train_speed(iter/s)": 0.672371 }, { "epoch": 2.6620538965768388, "grad_norm": 5.269773960113525, "learning_rate": 4.495119491084526e-05, "loss": 2.369931221008301, "memory(GiB)": 72.85, "step": 62135, "token_acc": 0.5116959064327485, "train_speed(iter/s)": 0.67237 }, { "epoch": 2.662268111906088, "grad_norm": 5.715127944946289, "learning_rate": 4.4944499580420166e-05, "loss": 2.03623046875, "memory(GiB)": 72.85, "step": 62140, "token_acc": 0.5319148936170213, "train_speed(iter/s)": 0.672361 }, { "epoch": 2.662482327235337, "grad_norm": 4.348556041717529, "learning_rate": 4.4937804341580184e-05, "loss": 2.3643209457397463, "memory(GiB)": 72.85, "step": 62145, "token_acc": 0.4971751412429379, "train_speed(iter/s)": 0.672371 }, { "epoch": 2.6626965425645857, "grad_norm": 3.909572124481201, "learning_rate": 4.4931109194446624e-05, "loss": 2.2517364501953123, "memory(GiB)": 72.85, "step": 62150, "token_acc": 0.5327635327635327, "train_speed(iter/s)": 0.67238 }, { "epoch": 2.662910757893835, "grad_norm": 3.8942370414733887, "learning_rate": 4.49244141391408e-05, "loss": 2.4790437698364256, "memory(GiB)": 72.85, "step": 62155, "token_acc": 0.49829351535836175, "train_speed(iter/s)": 0.67239 }, { "epoch": 2.6631249732230837, "grad_norm": 5.311724662780762, "learning_rate": 4.4917719175783965e-05, "loss": 2.1168212890625, "memory(GiB)": 72.85, "step": 62160, "token_acc": 0.5120274914089347, "train_speed(iter/s)": 0.672377 }, { "epoch": 2.6633391885523325, "grad_norm": 4.497335910797119, "learning_rate": 4.491102430449741e-05, "loss": 2.4062068939208983, "memory(GiB)": 72.85, "step": 62165, "token_acc": 0.47474747474747475, "train_speed(iter/s)": 0.672384 }, { "epoch": 2.663553403881582, "grad_norm": 4.001894474029541, "learning_rate": 4.490432952540243e-05, "loss": 2.1803844451904295, "memory(GiB)": 72.85, "step": 62170, "token_acc": 0.5487364620938628, "train_speed(iter/s)": 0.672396 }, { "epoch": 2.6637676192108306, "grad_norm": 4.048462867736816, "learning_rate": 4.489763483862031e-05, "loss": 2.277070999145508, "memory(GiB)": 72.85, "step": 62175, "token_acc": 0.4983922829581994, "train_speed(iter/s)": 0.6724 }, { "epoch": 2.6639818345400794, "grad_norm": 8.623326301574707, "learning_rate": 4.4890940244272305e-05, "loss": 2.514765739440918, "memory(GiB)": 72.85, "step": 62180, "token_acc": 0.4684014869888476, "train_speed(iter/s)": 0.672408 }, { "epoch": 2.6641960498693287, "grad_norm": 4.385197162628174, "learning_rate": 4.488424574247972e-05, "loss": 2.4211862564086912, "memory(GiB)": 72.85, "step": 62185, "token_acc": 0.49174917491749176, "train_speed(iter/s)": 0.672412 }, { "epoch": 2.6644102651985775, "grad_norm": 4.747147560119629, "learning_rate": 4.4877551333363814e-05, "loss": 2.36658935546875, "memory(GiB)": 72.85, "step": 62190, "token_acc": 0.5, "train_speed(iter/s)": 0.672416 }, { "epoch": 2.6646244805278263, "grad_norm": 4.806020736694336, "learning_rate": 4.487085701704588e-05, "loss": 2.152237319946289, "memory(GiB)": 72.85, "step": 62195, "token_acc": 0.5338078291814946, "train_speed(iter/s)": 0.672419 }, { "epoch": 2.6648386958570756, "grad_norm": 4.52631139755249, "learning_rate": 4.4864162793647184e-05, "loss": 1.8888248443603515, "memory(GiB)": 72.85, "step": 62200, "token_acc": 0.5767790262172284, "train_speed(iter/s)": 0.672425 }, { "epoch": 2.6650529111863244, "grad_norm": 4.7881083488464355, "learning_rate": 4.4857468663288985e-05, "loss": 2.122223663330078, "memory(GiB)": 72.85, "step": 62205, "token_acc": 0.5250836120401338, "train_speed(iter/s)": 0.67243 }, { "epoch": 2.665267126515573, "grad_norm": 5.299064636230469, "learning_rate": 4.485077462609258e-05, "loss": 2.4236566543579103, "memory(GiB)": 72.85, "step": 62210, "token_acc": 0.5168195718654435, "train_speed(iter/s)": 0.672435 }, { "epoch": 2.6654813418448224, "grad_norm": 4.355448246002197, "learning_rate": 4.484408068217922e-05, "loss": 2.5655378341674804, "memory(GiB)": 72.85, "step": 62215, "token_acc": 0.4525316455696203, "train_speed(iter/s)": 0.672446 }, { "epoch": 2.6656955571740713, "grad_norm": 6.05044412612915, "learning_rate": 4.4837386831670155e-05, "loss": 1.9806926727294922, "memory(GiB)": 72.85, "step": 62220, "token_acc": 0.5697674418604651, "train_speed(iter/s)": 0.672452 }, { "epoch": 2.66590977250332, "grad_norm": 5.2260966300964355, "learning_rate": 4.4830693074686675e-05, "loss": 2.0763456344604494, "memory(GiB)": 72.85, "step": 62225, "token_acc": 0.55, "train_speed(iter/s)": 0.672454 }, { "epoch": 2.6661239878325693, "grad_norm": 3.9305636882781982, "learning_rate": 4.482399941135005e-05, "loss": 2.0561214447021485, "memory(GiB)": 72.85, "step": 62230, "token_acc": 0.5477031802120141, "train_speed(iter/s)": 0.672431 }, { "epoch": 2.666338203161818, "grad_norm": 5.323598384857178, "learning_rate": 4.481730584178153e-05, "loss": 2.172329902648926, "memory(GiB)": 72.85, "step": 62235, "token_acc": 0.5408163265306123, "train_speed(iter/s)": 0.67243 }, { "epoch": 2.666552418491067, "grad_norm": 4.5088067054748535, "learning_rate": 4.481061236610238e-05, "loss": 2.4781425476074217, "memory(GiB)": 72.85, "step": 62240, "token_acc": 0.5014005602240896, "train_speed(iter/s)": 0.672436 }, { "epoch": 2.666766633820316, "grad_norm": 6.348280429840088, "learning_rate": 4.480391898443386e-05, "loss": 2.3876562118530273, "memory(GiB)": 72.85, "step": 62245, "token_acc": 0.5209790209790209, "train_speed(iter/s)": 0.672444 }, { "epoch": 2.666980849149565, "grad_norm": 4.853623867034912, "learning_rate": 4.4797225696897205e-05, "loss": 2.401302719116211, "memory(GiB)": 72.85, "step": 62250, "token_acc": 0.45938375350140054, "train_speed(iter/s)": 0.672436 }, { "epoch": 2.6671950644788143, "grad_norm": 5.301024436950684, "learning_rate": 4.4790532503613696e-05, "loss": 2.212412452697754, "memory(GiB)": 72.85, "step": 62255, "token_acc": 0.5720164609053497, "train_speed(iter/s)": 0.672431 }, { "epoch": 2.667409279808063, "grad_norm": 6.398563385009766, "learning_rate": 4.4783839404704587e-05, "loss": 2.3275882720947267, "memory(GiB)": 72.85, "step": 62260, "token_acc": 0.5108695652173914, "train_speed(iter/s)": 0.672432 }, { "epoch": 2.667623495137312, "grad_norm": 4.191487789154053, "learning_rate": 4.47771464002911e-05, "loss": 2.459239959716797, "memory(GiB)": 72.85, "step": 62265, "token_acc": 0.4984520123839009, "train_speed(iter/s)": 0.672435 }, { "epoch": 2.667837710466561, "grad_norm": 5.088545322418213, "learning_rate": 4.4770453490494526e-05, "loss": 1.8737276077270508, "memory(GiB)": 72.85, "step": 62270, "token_acc": 0.5657370517928287, "train_speed(iter/s)": 0.672433 }, { "epoch": 2.66805192579581, "grad_norm": 4.21783971786499, "learning_rate": 4.476376067543607e-05, "loss": 2.5705322265625, "memory(GiB)": 72.85, "step": 62275, "token_acc": 0.5111111111111111, "train_speed(iter/s)": 0.672448 }, { "epoch": 2.6682661411250588, "grad_norm": 3.725931406021118, "learning_rate": 4.475706795523702e-05, "loss": 2.3203109741210937, "memory(GiB)": 72.85, "step": 62280, "token_acc": 0.5329153605015674, "train_speed(iter/s)": 0.67245 }, { "epoch": 2.668480356454308, "grad_norm": 4.980550289154053, "learning_rate": 4.47503753300186e-05, "loss": 2.6232526779174803, "memory(GiB)": 72.85, "step": 62285, "token_acc": 0.47750865051903113, "train_speed(iter/s)": 0.672471 }, { "epoch": 2.668694571783557, "grad_norm": 3.620692253112793, "learning_rate": 4.474368279990205e-05, "loss": 2.2292823791503906, "memory(GiB)": 72.85, "step": 62290, "token_acc": 0.5014492753623189, "train_speed(iter/s)": 0.672471 }, { "epoch": 2.6689087871128057, "grad_norm": 4.701869010925293, "learning_rate": 4.47369903650086e-05, "loss": 2.040087127685547, "memory(GiB)": 72.85, "step": 62295, "token_acc": 0.5045045045045045, "train_speed(iter/s)": 0.672481 }, { "epoch": 2.669123002442055, "grad_norm": 4.943761348724365, "learning_rate": 4.4730298025459536e-05, "loss": 2.3952251434326173, "memory(GiB)": 72.85, "step": 62300, "token_acc": 0.5209003215434084, "train_speed(iter/s)": 0.672467 }, { "epoch": 2.6693372177713037, "grad_norm": 4.176028251647949, "learning_rate": 4.4723605781376056e-05, "loss": 2.176953887939453, "memory(GiB)": 72.85, "step": 62305, "token_acc": 0.5171339563862928, "train_speed(iter/s)": 0.672457 }, { "epoch": 2.6695514331005525, "grad_norm": 4.791266918182373, "learning_rate": 4.47169136328794e-05, "loss": 2.432154083251953, "memory(GiB)": 72.85, "step": 62310, "token_acc": 0.4674922600619195, "train_speed(iter/s)": 0.672459 }, { "epoch": 2.669765648429802, "grad_norm": 5.008666038513184, "learning_rate": 4.471022158009082e-05, "loss": 2.5178926467895506, "memory(GiB)": 72.85, "step": 62315, "token_acc": 0.46987951807228917, "train_speed(iter/s)": 0.672452 }, { "epoch": 2.6699798637590506, "grad_norm": 4.948404788970947, "learning_rate": 4.470352962313154e-05, "loss": 2.0073352813720704, "memory(GiB)": 72.85, "step": 62320, "token_acc": 0.5255972696245734, "train_speed(iter/s)": 0.672455 }, { "epoch": 2.6701940790882994, "grad_norm": 4.907922744750977, "learning_rate": 4.4696837762122777e-05, "loss": 2.030989074707031, "memory(GiB)": 72.85, "step": 62325, "token_acc": 0.5179282868525896, "train_speed(iter/s)": 0.672454 }, { "epoch": 2.6704082944175487, "grad_norm": 6.272758483886719, "learning_rate": 4.469014599718579e-05, "loss": 2.0301738739013673, "memory(GiB)": 72.85, "step": 62330, "token_acc": 0.565597667638484, "train_speed(iter/s)": 0.672454 }, { "epoch": 2.6706225097467975, "grad_norm": 4.842332363128662, "learning_rate": 4.468345432844177e-05, "loss": 2.3463417053222657, "memory(GiB)": 72.85, "step": 62335, "token_acc": 0.50625, "train_speed(iter/s)": 0.672443 }, { "epoch": 2.6708367250760463, "grad_norm": 3.694662570953369, "learning_rate": 4.4676762756011974e-05, "loss": 2.110480308532715, "memory(GiB)": 72.85, "step": 62340, "token_acc": 0.5306859205776173, "train_speed(iter/s)": 0.67245 }, { "epoch": 2.6710509404052956, "grad_norm": 4.715819835662842, "learning_rate": 4.467007128001762e-05, "loss": 2.319552993774414, "memory(GiB)": 72.85, "step": 62345, "token_acc": 0.5119453924914675, "train_speed(iter/s)": 0.672448 }, { "epoch": 2.6712651557345444, "grad_norm": 4.641141891479492, "learning_rate": 4.466337990057991e-05, "loss": 2.288248634338379, "memory(GiB)": 72.85, "step": 62350, "token_acc": 0.5225806451612903, "train_speed(iter/s)": 0.672456 }, { "epoch": 2.671479371063793, "grad_norm": 5.070010185241699, "learning_rate": 4.4656688617820095e-05, "loss": 2.27552490234375, "memory(GiB)": 72.85, "step": 62355, "token_acc": 0.49645390070921985, "train_speed(iter/s)": 0.672442 }, { "epoch": 2.6716935863930424, "grad_norm": 4.548862457275391, "learning_rate": 4.464999743185937e-05, "loss": 1.9895103454589844, "memory(GiB)": 72.85, "step": 62360, "token_acc": 0.5378787878787878, "train_speed(iter/s)": 0.672444 }, { "epoch": 2.6719078017222913, "grad_norm": 4.577401161193848, "learning_rate": 4.464330634281895e-05, "loss": 2.079667091369629, "memory(GiB)": 72.85, "step": 62365, "token_acc": 0.5346153846153846, "train_speed(iter/s)": 0.672445 }, { "epoch": 2.67212201705154, "grad_norm": 5.379009246826172, "learning_rate": 4.4636615350820054e-05, "loss": 2.5333524703979493, "memory(GiB)": 72.85, "step": 62370, "token_acc": 0.4709141274238227, "train_speed(iter/s)": 0.672415 }, { "epoch": 2.6723362323807893, "grad_norm": 5.436595916748047, "learning_rate": 4.462992445598392e-05, "loss": 2.419651985168457, "memory(GiB)": 72.85, "step": 62375, "token_acc": 0.48201438848920863, "train_speed(iter/s)": 0.672418 }, { "epoch": 2.672550447710038, "grad_norm": 4.612763404846191, "learning_rate": 4.462323365843174e-05, "loss": 2.443699264526367, "memory(GiB)": 72.85, "step": 62380, "token_acc": 0.45478723404255317, "train_speed(iter/s)": 0.672413 }, { "epoch": 2.672764663039287, "grad_norm": 6.323281764984131, "learning_rate": 4.4616542958284725e-05, "loss": 2.173419189453125, "memory(GiB)": 72.85, "step": 62385, "token_acc": 0.5285171102661597, "train_speed(iter/s)": 0.672422 }, { "epoch": 2.672978878368536, "grad_norm": 3.7300968170166016, "learning_rate": 4.460985235566409e-05, "loss": 1.9340381622314453, "memory(GiB)": 72.85, "step": 62390, "token_acc": 0.551622418879056, "train_speed(iter/s)": 0.672422 }, { "epoch": 2.673193093697785, "grad_norm": 4.77959680557251, "learning_rate": 4.4603161850691025e-05, "loss": 2.2074695587158204, "memory(GiB)": 72.85, "step": 62395, "token_acc": 0.4850498338870432, "train_speed(iter/s)": 0.672416 }, { "epoch": 2.673407309027034, "grad_norm": 6.722838401794434, "learning_rate": 4.459647144348675e-05, "loss": 1.988372802734375, "memory(GiB)": 72.85, "step": 62400, "token_acc": 0.5375939849624061, "train_speed(iter/s)": 0.672408 }, { "epoch": 2.673621524356283, "grad_norm": 4.769232749938965, "learning_rate": 4.458978113417248e-05, "loss": 2.4682943344116213, "memory(GiB)": 72.85, "step": 62405, "token_acc": 0.5111940298507462, "train_speed(iter/s)": 0.67241 }, { "epoch": 2.673835739685532, "grad_norm": 4.42877197265625, "learning_rate": 4.4583090922869375e-05, "loss": 2.2672492980957033, "memory(GiB)": 72.85, "step": 62410, "token_acc": 0.4728434504792332, "train_speed(iter/s)": 0.672409 }, { "epoch": 2.6740499550147807, "grad_norm": 5.0192742347717285, "learning_rate": 4.457640080969868e-05, "loss": 2.3306121826171875, "memory(GiB)": 72.85, "step": 62415, "token_acc": 0.49838187702265374, "train_speed(iter/s)": 0.672411 }, { "epoch": 2.67426417034403, "grad_norm": 4.319342613220215, "learning_rate": 4.456971079478155e-05, "loss": 2.279240608215332, "memory(GiB)": 72.85, "step": 62420, "token_acc": 0.5157232704402516, "train_speed(iter/s)": 0.672415 }, { "epoch": 2.674478385673279, "grad_norm": 4.568208694458008, "learning_rate": 4.456302087823922e-05, "loss": 2.141935348510742, "memory(GiB)": 72.85, "step": 62425, "token_acc": 0.5775193798449613, "train_speed(iter/s)": 0.672421 }, { "epoch": 2.6746926010025276, "grad_norm": 4.207009315490723, "learning_rate": 4.455633106019287e-05, "loss": 1.984822654724121, "memory(GiB)": 72.85, "step": 62430, "token_acc": 0.5915032679738562, "train_speed(iter/s)": 0.672412 }, { "epoch": 2.674906816331777, "grad_norm": 4.50473165512085, "learning_rate": 4.4549641340763676e-05, "loss": 2.3282148361206056, "memory(GiB)": 72.85, "step": 62435, "token_acc": 0.5127388535031847, "train_speed(iter/s)": 0.67241 }, { "epoch": 2.6751210316610257, "grad_norm": 3.9877665042877197, "learning_rate": 4.454295172007285e-05, "loss": 2.1774221420288087, "memory(GiB)": 72.85, "step": 62440, "token_acc": 0.5234899328859061, "train_speed(iter/s)": 0.672414 }, { "epoch": 2.6753352469902745, "grad_norm": 6.646678447723389, "learning_rate": 4.4536262198241555e-05, "loss": 2.1769153594970705, "memory(GiB)": 72.85, "step": 62445, "token_acc": 0.4983277591973244, "train_speed(iter/s)": 0.672401 }, { "epoch": 2.6755494623195237, "grad_norm": 5.043229579925537, "learning_rate": 4.4529572775391014e-05, "loss": 2.1523767471313477, "memory(GiB)": 72.85, "step": 62450, "token_acc": 0.5245283018867924, "train_speed(iter/s)": 0.672405 }, { "epoch": 2.6757636776487725, "grad_norm": 5.858974456787109, "learning_rate": 4.4522883451642386e-05, "loss": 2.1357831954956055, "memory(GiB)": 72.85, "step": 62455, "token_acc": 0.5126050420168067, "train_speed(iter/s)": 0.672415 }, { "epoch": 2.6759778929780214, "grad_norm": 5.208359241485596, "learning_rate": 4.451619422711687e-05, "loss": 2.4999006271362303, "memory(GiB)": 72.85, "step": 62460, "token_acc": 0.4528301886792453, "train_speed(iter/s)": 0.672424 }, { "epoch": 2.6761921083072706, "grad_norm": 4.841597080230713, "learning_rate": 4.4509505101935636e-05, "loss": 2.4875783920288086, "memory(GiB)": 72.85, "step": 62465, "token_acc": 0.4900662251655629, "train_speed(iter/s)": 0.672419 }, { "epoch": 2.6764063236365194, "grad_norm": 5.843552112579346, "learning_rate": 4.450281607621987e-05, "loss": 2.1819807052612306, "memory(GiB)": 72.85, "step": 62470, "token_acc": 0.5278688524590164, "train_speed(iter/s)": 0.672427 }, { "epoch": 2.6766205389657682, "grad_norm": 3.344709634780884, "learning_rate": 4.449612715009075e-05, "loss": 2.405344009399414, "memory(GiB)": 72.85, "step": 62475, "token_acc": 0.46418338108882523, "train_speed(iter/s)": 0.672427 }, { "epoch": 2.6768347542950175, "grad_norm": 3.570117235183716, "learning_rate": 4.4489438323669435e-05, "loss": 2.6228654861450194, "memory(GiB)": 72.85, "step": 62480, "token_acc": 0.44868035190615835, "train_speed(iter/s)": 0.67242 }, { "epoch": 2.6770489696242663, "grad_norm": 5.851043701171875, "learning_rate": 4.448274959707713e-05, "loss": 2.0794551849365233, "memory(GiB)": 72.85, "step": 62485, "token_acc": 0.5145985401459854, "train_speed(iter/s)": 0.672425 }, { "epoch": 2.677263184953515, "grad_norm": 4.288473129272461, "learning_rate": 4.447606097043499e-05, "loss": 2.292787551879883, "memory(GiB)": 72.85, "step": 62490, "token_acc": 0.4857142857142857, "train_speed(iter/s)": 0.672424 }, { "epoch": 2.6774774002827644, "grad_norm": 4.251198768615723, "learning_rate": 4.4469372443864185e-05, "loss": 2.14849796295166, "memory(GiB)": 72.85, "step": 62495, "token_acc": 0.5275080906148867, "train_speed(iter/s)": 0.672418 }, { "epoch": 2.677691615612013, "grad_norm": 5.700974941253662, "learning_rate": 4.4462684017485884e-05, "loss": 2.2084218978881838, "memory(GiB)": 72.85, "step": 62500, "token_acc": 0.5173611111111112, "train_speed(iter/s)": 0.672429 }, { "epoch": 2.677691615612013, "eval_loss": 2.117621660232544, "eval_runtime": 16.7389, "eval_samples_per_second": 5.974, "eval_steps_per_second": 5.974, "eval_token_acc": 0.47493403693931396, "step": 62500 }, { "epoch": 2.677905830941262, "grad_norm": 4.84099817276001, "learning_rate": 4.445599569142127e-05, "loss": 2.184229850769043, "memory(GiB)": 72.85, "step": 62505, "token_acc": 0.49154228855721394, "train_speed(iter/s)": 0.672293 }, { "epoch": 2.6781200462705113, "grad_norm": 5.03608512878418, "learning_rate": 4.444930746579147e-05, "loss": 2.3353803634643553, "memory(GiB)": 72.85, "step": 62510, "token_acc": 0.5369774919614148, "train_speed(iter/s)": 0.672281 }, { "epoch": 2.67833426159976, "grad_norm": 4.616218090057373, "learning_rate": 4.444261934071769e-05, "loss": 1.9208656311035157, "memory(GiB)": 72.85, "step": 62515, "token_acc": 0.6047430830039525, "train_speed(iter/s)": 0.672283 }, { "epoch": 2.678548476929009, "grad_norm": 4.744720935821533, "learning_rate": 4.443593131632105e-05, "loss": 2.331224060058594, "memory(GiB)": 72.85, "step": 62520, "token_acc": 0.5055350553505535, "train_speed(iter/s)": 0.672289 }, { "epoch": 2.678762692258258, "grad_norm": 7.597353458404541, "learning_rate": 4.442924339272275e-05, "loss": 2.1672365188598635, "memory(GiB)": 72.85, "step": 62525, "token_acc": 0.48717948717948717, "train_speed(iter/s)": 0.672296 }, { "epoch": 2.678976907587507, "grad_norm": 4.9738030433654785, "learning_rate": 4.442255557004393e-05, "loss": 2.482951354980469, "memory(GiB)": 72.85, "step": 62530, "token_acc": 0.46779661016949153, "train_speed(iter/s)": 0.672307 }, { "epoch": 2.6791911229167558, "grad_norm": 4.6694111824035645, "learning_rate": 4.441586784840576e-05, "loss": 2.209071731567383, "memory(GiB)": 72.85, "step": 62535, "token_acc": 0.5350553505535055, "train_speed(iter/s)": 0.672312 }, { "epoch": 2.679405338246005, "grad_norm": 3.989555597305298, "learning_rate": 4.440918022792937e-05, "loss": 2.377724266052246, "memory(GiB)": 72.85, "step": 62540, "token_acc": 0.5273972602739726, "train_speed(iter/s)": 0.672306 }, { "epoch": 2.679619553575254, "grad_norm": 5.860815525054932, "learning_rate": 4.440249270873593e-05, "loss": 2.3457077026367186, "memory(GiB)": 72.85, "step": 62545, "token_acc": 0.5229007633587787, "train_speed(iter/s)": 0.672314 }, { "epoch": 2.6798337689045026, "grad_norm": 4.424471378326416, "learning_rate": 4.439580529094659e-05, "loss": 2.4413896560668946, "memory(GiB)": 72.85, "step": 62550, "token_acc": 0.4818181818181818, "train_speed(iter/s)": 0.672316 }, { "epoch": 2.680047984233752, "grad_norm": 5.452761173248291, "learning_rate": 4.4389117974682484e-05, "loss": 2.3845502853393556, "memory(GiB)": 72.85, "step": 62555, "token_acc": 0.5177304964539007, "train_speed(iter/s)": 0.672326 }, { "epoch": 2.6802621995630007, "grad_norm": 4.239333629608154, "learning_rate": 4.4382430760064774e-05, "loss": 2.249587059020996, "memory(GiB)": 72.85, "step": 62560, "token_acc": 0.5206896551724138, "train_speed(iter/s)": 0.672338 }, { "epoch": 2.6804764148922495, "grad_norm": 4.838751792907715, "learning_rate": 4.4375743647214596e-05, "loss": 2.3166465759277344, "memory(GiB)": 72.85, "step": 62565, "token_acc": 0.5031446540880503, "train_speed(iter/s)": 0.672342 }, { "epoch": 2.680690630221499, "grad_norm": 5.48712158203125, "learning_rate": 4.436905663625311e-05, "loss": 2.6916433334350587, "memory(GiB)": 72.85, "step": 62570, "token_acc": 0.42382271468144045, "train_speed(iter/s)": 0.67234 }, { "epoch": 2.6809048455507476, "grad_norm": 5.6400275230407715, "learning_rate": 4.436236972730144e-05, "loss": 2.5772930145263673, "memory(GiB)": 72.85, "step": 62575, "token_acc": 0.48089171974522293, "train_speed(iter/s)": 0.672353 }, { "epoch": 2.6811190608799964, "grad_norm": 5.19252347946167, "learning_rate": 4.435568292048072e-05, "loss": 2.09151611328125, "memory(GiB)": 72.85, "step": 62580, "token_acc": 0.548951048951049, "train_speed(iter/s)": 0.672359 }, { "epoch": 2.6813332762092457, "grad_norm": 4.602968692779541, "learning_rate": 4.4348996215912114e-05, "loss": 2.318306732177734, "memory(GiB)": 72.85, "step": 62585, "token_acc": 0.48467966573816157, "train_speed(iter/s)": 0.672357 }, { "epoch": 2.6815474915384945, "grad_norm": 3.615985155105591, "learning_rate": 4.434230961371674e-05, "loss": 2.2557958602905273, "memory(GiB)": 72.85, "step": 62590, "token_acc": 0.48863636363636365, "train_speed(iter/s)": 0.672361 }, { "epoch": 2.6817617068677437, "grad_norm": 5.2539381980896, "learning_rate": 4.433562311401571e-05, "loss": 2.0118547439575196, "memory(GiB)": 72.85, "step": 62595, "token_acc": 0.5714285714285714, "train_speed(iter/s)": 0.672371 }, { "epoch": 2.6819759221969925, "grad_norm": 4.040524005889893, "learning_rate": 4.43289367169302e-05, "loss": 2.19158935546875, "memory(GiB)": 72.85, "step": 62600, "token_acc": 0.515358361774744, "train_speed(iter/s)": 0.672359 }, { "epoch": 2.6821901375262414, "grad_norm": 5.326540946960449, "learning_rate": 4.4322250422581326e-05, "loss": 2.300285530090332, "memory(GiB)": 72.85, "step": 62605, "token_acc": 0.4820846905537459, "train_speed(iter/s)": 0.672352 }, { "epoch": 2.6824043528554906, "grad_norm": 5.991326808929443, "learning_rate": 4.431556423109021e-05, "loss": 1.821436309814453, "memory(GiB)": 72.85, "step": 62610, "token_acc": 0.6116504854368932, "train_speed(iter/s)": 0.672351 }, { "epoch": 2.6826185681847394, "grad_norm": 5.945887565612793, "learning_rate": 4.430887814257798e-05, "loss": 2.707598876953125, "memory(GiB)": 72.85, "step": 62615, "token_acc": 0.5, "train_speed(iter/s)": 0.672356 }, { "epoch": 2.6828327835139882, "grad_norm": 5.246383190155029, "learning_rate": 4.430219215716576e-05, "loss": 2.586067008972168, "memory(GiB)": 72.85, "step": 62620, "token_acc": 0.44477611940298506, "train_speed(iter/s)": 0.672363 }, { "epoch": 2.6830469988432375, "grad_norm": 4.040000915527344, "learning_rate": 4.429550627497467e-05, "loss": 2.112799072265625, "memory(GiB)": 72.85, "step": 62625, "token_acc": 0.5543859649122806, "train_speed(iter/s)": 0.672367 }, { "epoch": 2.6832612141724863, "grad_norm": 4.957391738891602, "learning_rate": 4.428882049612584e-05, "loss": 2.350368118286133, "memory(GiB)": 72.85, "step": 62630, "token_acc": 0.4967532467532468, "train_speed(iter/s)": 0.672375 }, { "epoch": 2.683475429501735, "grad_norm": 4.811291217803955, "learning_rate": 4.428213482074039e-05, "loss": 2.2951705932617186, "memory(GiB)": 72.85, "step": 62635, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.672373 }, { "epoch": 2.6836896448309844, "grad_norm": 5.104437351226807, "learning_rate": 4.427544924893941e-05, "loss": 2.6406126022338867, "memory(GiB)": 72.85, "step": 62640, "token_acc": 0.45161290322580644, "train_speed(iter/s)": 0.672373 }, { "epoch": 2.683903860160233, "grad_norm": 5.444965362548828, "learning_rate": 4.426876378084406e-05, "loss": 2.5444854736328124, "memory(GiB)": 72.85, "step": 62645, "token_acc": 0.48220064724919093, "train_speed(iter/s)": 0.672374 }, { "epoch": 2.684118075489482, "grad_norm": 4.907744884490967, "learning_rate": 4.426207841657543e-05, "loss": 2.3434391021728516, "memory(GiB)": 72.85, "step": 62650, "token_acc": 0.5114285714285715, "train_speed(iter/s)": 0.672391 }, { "epoch": 2.6843322908187313, "grad_norm": 4.156118392944336, "learning_rate": 4.425539315625462e-05, "loss": 2.028232383728027, "memory(GiB)": 72.85, "step": 62655, "token_acc": 0.49830508474576274, "train_speed(iter/s)": 0.672398 }, { "epoch": 2.68454650614798, "grad_norm": 4.831325054168701, "learning_rate": 4.424870800000276e-05, "loss": 2.402055549621582, "memory(GiB)": 72.85, "step": 62660, "token_acc": 0.4831804281345566, "train_speed(iter/s)": 0.672395 }, { "epoch": 2.684760721477229, "grad_norm": 4.347284317016602, "learning_rate": 4.424202294794093e-05, "loss": 2.1779758453369142, "memory(GiB)": 72.85, "step": 62665, "token_acc": 0.5462962962962963, "train_speed(iter/s)": 0.672408 }, { "epoch": 2.684974936806478, "grad_norm": 5.212475776672363, "learning_rate": 4.423533800019026e-05, "loss": 2.1560176849365233, "memory(GiB)": 72.85, "step": 62670, "token_acc": 0.5115511551155115, "train_speed(iter/s)": 0.672411 }, { "epoch": 2.685189152135727, "grad_norm": 5.09539270401001, "learning_rate": 4.422865315687187e-05, "loss": 2.248433303833008, "memory(GiB)": 72.85, "step": 62675, "token_acc": 0.5031847133757962, "train_speed(iter/s)": 0.672426 }, { "epoch": 2.6854033674649758, "grad_norm": 7.251928806304932, "learning_rate": 4.4221968418106844e-05, "loss": 2.2629840850830076, "memory(GiB)": 72.85, "step": 62680, "token_acc": 0.4867549668874172, "train_speed(iter/s)": 0.67243 }, { "epoch": 2.685617582794225, "grad_norm": 5.338127136230469, "learning_rate": 4.421528378401626e-05, "loss": 2.51184139251709, "memory(GiB)": 72.85, "step": 62685, "token_acc": 0.47794117647058826, "train_speed(iter/s)": 0.672439 }, { "epoch": 2.685831798123474, "grad_norm": 4.309332847595215, "learning_rate": 4.420859925472125e-05, "loss": 1.8209884643554688, "memory(GiB)": 72.85, "step": 62690, "token_acc": 0.5809128630705395, "train_speed(iter/s)": 0.672437 }, { "epoch": 2.6860460134527226, "grad_norm": 3.9817748069763184, "learning_rate": 4.42019148303429e-05, "loss": 2.2514678955078127, "memory(GiB)": 72.85, "step": 62695, "token_acc": 0.5017064846416383, "train_speed(iter/s)": 0.672444 }, { "epoch": 2.686260228781972, "grad_norm": 6.6620917320251465, "learning_rate": 4.419523051100229e-05, "loss": 2.437322425842285, "memory(GiB)": 72.85, "step": 62700, "token_acc": 0.49466192170818507, "train_speed(iter/s)": 0.672457 }, { "epoch": 2.6864744441112207, "grad_norm": 4.912454128265381, "learning_rate": 4.418854629682053e-05, "loss": 2.2639371871948244, "memory(GiB)": 72.85, "step": 62705, "token_acc": 0.49044585987261147, "train_speed(iter/s)": 0.672448 }, { "epoch": 2.6866886594404695, "grad_norm": 4.059321403503418, "learning_rate": 4.41818621879187e-05, "loss": 2.4215702056884765, "memory(GiB)": 72.85, "step": 62710, "token_acc": 0.5, "train_speed(iter/s)": 0.672452 }, { "epoch": 2.686902874769719, "grad_norm": 4.904304027557373, "learning_rate": 4.41751781844179e-05, "loss": 2.3165460586547852, "memory(GiB)": 72.85, "step": 62715, "token_acc": 0.49508196721311476, "train_speed(iter/s)": 0.672456 }, { "epoch": 2.6871170900989676, "grad_norm": 3.8481991291046143, "learning_rate": 4.416849428643922e-05, "loss": 2.090976333618164, "memory(GiB)": 72.85, "step": 62720, "token_acc": 0.5337620578778135, "train_speed(iter/s)": 0.672456 }, { "epoch": 2.6873313054282164, "grad_norm": 5.733062267303467, "learning_rate": 4.416181049410372e-05, "loss": 2.2459575653076174, "memory(GiB)": 72.85, "step": 62725, "token_acc": 0.5148148148148148, "train_speed(iter/s)": 0.672468 }, { "epoch": 2.6875455207574657, "grad_norm": 4.833285331726074, "learning_rate": 4.415512680753251e-05, "loss": 2.083334732055664, "memory(GiB)": 72.85, "step": 62730, "token_acc": 0.564625850340136, "train_speed(iter/s)": 0.672468 }, { "epoch": 2.6877597360867145, "grad_norm": 6.9581780433654785, "learning_rate": 4.414844322684667e-05, "loss": 2.2627113342285154, "memory(GiB)": 72.85, "step": 62735, "token_acc": 0.49809885931558934, "train_speed(iter/s)": 0.672477 }, { "epoch": 2.6879739514159633, "grad_norm": 4.7803144454956055, "learning_rate": 4.414175975216724e-05, "loss": 2.126438331604004, "memory(GiB)": 72.85, "step": 62740, "token_acc": 0.5166163141993958, "train_speed(iter/s)": 0.672471 }, { "epoch": 2.6881881667452125, "grad_norm": 6.060794353485107, "learning_rate": 4.413507638361534e-05, "loss": 2.253641700744629, "memory(GiB)": 72.85, "step": 62745, "token_acc": 0.5171102661596958, "train_speed(iter/s)": 0.672485 }, { "epoch": 2.6884023820744614, "grad_norm": 3.781251907348633, "learning_rate": 4.412839312131204e-05, "loss": 2.082503318786621, "memory(GiB)": 72.85, "step": 62750, "token_acc": 0.49666666666666665, "train_speed(iter/s)": 0.672487 }, { "epoch": 2.68861659740371, "grad_norm": 4.016599655151367, "learning_rate": 4.41217099653784e-05, "loss": 2.3787864685058593, "memory(GiB)": 72.85, "step": 62755, "token_acc": 0.498371335504886, "train_speed(iter/s)": 0.67249 }, { "epoch": 2.6888308127329594, "grad_norm": 4.630758285522461, "learning_rate": 4.411502691593551e-05, "loss": 2.265216827392578, "memory(GiB)": 72.85, "step": 62760, "token_acc": 0.5095057034220533, "train_speed(iter/s)": 0.672494 }, { "epoch": 2.6890450280622082, "grad_norm": 5.734176158905029, "learning_rate": 4.410834397310443e-05, "loss": 2.402155113220215, "memory(GiB)": 72.85, "step": 62765, "token_acc": 0.4909090909090909, "train_speed(iter/s)": 0.672491 }, { "epoch": 2.689259243391457, "grad_norm": 3.5610029697418213, "learning_rate": 4.410166113700621e-05, "loss": 2.158241844177246, "memory(GiB)": 72.85, "step": 62770, "token_acc": 0.5375722543352601, "train_speed(iter/s)": 0.672496 }, { "epoch": 2.6894734587207063, "grad_norm": 4.979588031768799, "learning_rate": 4.4094978407761936e-05, "loss": 2.2116165161132812, "memory(GiB)": 72.85, "step": 62775, "token_acc": 0.5246478873239436, "train_speed(iter/s)": 0.67249 }, { "epoch": 2.689687674049955, "grad_norm": 4.77622652053833, "learning_rate": 4.408829578549268e-05, "loss": 2.136464500427246, "memory(GiB)": 72.85, "step": 62780, "token_acc": 0.5631768953068592, "train_speed(iter/s)": 0.672491 }, { "epoch": 2.689901889379204, "grad_norm": 5.504626274108887, "learning_rate": 4.4081613270319476e-05, "loss": 2.173276901245117, "memory(GiB)": 72.85, "step": 62785, "token_acc": 0.5202702702702703, "train_speed(iter/s)": 0.672496 }, { "epoch": 2.690116104708453, "grad_norm": 5.290399074554443, "learning_rate": 4.407493086236341e-05, "loss": 1.7053329467773437, "memory(GiB)": 72.85, "step": 62790, "token_acc": 0.6099585062240664, "train_speed(iter/s)": 0.672501 }, { "epoch": 2.690330320037702, "grad_norm": 5.205339431762695, "learning_rate": 4.406824856174552e-05, "loss": 2.1742008209228514, "memory(GiB)": 72.85, "step": 62795, "token_acc": 0.5392156862745098, "train_speed(iter/s)": 0.672507 }, { "epoch": 2.690544535366951, "grad_norm": 4.462608337402344, "learning_rate": 4.406156636858688e-05, "loss": 2.1504201889038086, "memory(GiB)": 72.85, "step": 62800, "token_acc": 0.5236593059936908, "train_speed(iter/s)": 0.672509 }, { "epoch": 2.6907587506962, "grad_norm": 5.9395222663879395, "learning_rate": 4.4054884283008534e-05, "loss": 2.368026924133301, "memory(GiB)": 72.85, "step": 62805, "token_acc": 0.4786885245901639, "train_speed(iter/s)": 0.672519 }, { "epoch": 2.690972966025449, "grad_norm": 4.4392619132995605, "learning_rate": 4.404820230513153e-05, "loss": 2.187916374206543, "memory(GiB)": 72.85, "step": 62810, "token_acc": 0.5340136054421769, "train_speed(iter/s)": 0.672535 }, { "epoch": 2.6911871813546977, "grad_norm": 4.586309909820557, "learning_rate": 4.404152043507692e-05, "loss": 2.418901062011719, "memory(GiB)": 72.85, "step": 62815, "token_acc": 0.48641304347826086, "train_speed(iter/s)": 0.672539 }, { "epoch": 2.691401396683947, "grad_norm": 4.205632209777832, "learning_rate": 4.4034838672965764e-05, "loss": 2.133594512939453, "memory(GiB)": 72.85, "step": 62820, "token_acc": 0.5817490494296578, "train_speed(iter/s)": 0.672539 }, { "epoch": 2.6916156120131958, "grad_norm": 5.628411769866943, "learning_rate": 4.4028157018919106e-05, "loss": 2.0592424392700197, "memory(GiB)": 72.85, "step": 62825, "token_acc": 0.4980237154150198, "train_speed(iter/s)": 0.67254 }, { "epoch": 2.6918298273424446, "grad_norm": 4.1341233253479, "learning_rate": 4.4021475473057984e-05, "loss": 2.374916648864746, "memory(GiB)": 72.85, "step": 62830, "token_acc": 0.5015873015873016, "train_speed(iter/s)": 0.672534 }, { "epoch": 2.692044042671694, "grad_norm": 4.844470500946045, "learning_rate": 4.401479403550344e-05, "loss": 2.184408187866211, "memory(GiB)": 72.85, "step": 62835, "token_acc": 0.524822695035461, "train_speed(iter/s)": 0.672532 }, { "epoch": 2.6922582580009427, "grad_norm": 6.024712562561035, "learning_rate": 4.400811270637652e-05, "loss": 2.3460805892944334, "memory(GiB)": 72.85, "step": 62840, "token_acc": 0.4967105263157895, "train_speed(iter/s)": 0.672539 }, { "epoch": 2.6924724733301915, "grad_norm": 5.345314025878906, "learning_rate": 4.400143148579826e-05, "loss": 2.066579055786133, "memory(GiB)": 72.85, "step": 62845, "token_acc": 0.5104166666666666, "train_speed(iter/s)": 0.672553 }, { "epoch": 2.6926866886594407, "grad_norm": 4.841644287109375, "learning_rate": 4.399475037388969e-05, "loss": 2.136288642883301, "memory(GiB)": 72.85, "step": 62850, "token_acc": 0.5501432664756447, "train_speed(iter/s)": 0.672557 }, { "epoch": 2.6929009039886895, "grad_norm": 4.863332271575928, "learning_rate": 4.398806937077185e-05, "loss": 2.309976577758789, "memory(GiB)": 72.85, "step": 62855, "token_acc": 0.49560117302052786, "train_speed(iter/s)": 0.672556 }, { "epoch": 2.6931151193179383, "grad_norm": 7.422399520874023, "learning_rate": 4.398138847656578e-05, "loss": 2.618275833129883, "memory(GiB)": 72.85, "step": 62860, "token_acc": 0.5080645161290323, "train_speed(iter/s)": 0.672569 }, { "epoch": 2.6933293346471876, "grad_norm": 4.783759593963623, "learning_rate": 4.3974707691392503e-05, "loss": 2.277477264404297, "memory(GiB)": 72.85, "step": 62865, "token_acc": 0.5160142348754448, "train_speed(iter/s)": 0.672576 }, { "epoch": 2.6935435499764364, "grad_norm": 6.206051826477051, "learning_rate": 4.396802701537304e-05, "loss": 2.498918342590332, "memory(GiB)": 72.85, "step": 62870, "token_acc": 0.5149253731343284, "train_speed(iter/s)": 0.672591 }, { "epoch": 2.6937577653056852, "grad_norm": 4.432483673095703, "learning_rate": 4.396134644862844e-05, "loss": 2.3598390579223634, "memory(GiB)": 72.85, "step": 62875, "token_acc": 0.5357142857142857, "train_speed(iter/s)": 0.672589 }, { "epoch": 2.6939719806349345, "grad_norm": 3.648519515991211, "learning_rate": 4.395466599127971e-05, "loss": 2.3352460861206055, "memory(GiB)": 72.85, "step": 62880, "token_acc": 0.5335570469798657, "train_speed(iter/s)": 0.672582 }, { "epoch": 2.6941861959641833, "grad_norm": 4.912664890289307, "learning_rate": 4.3947985643447866e-05, "loss": 2.269953155517578, "memory(GiB)": 72.85, "step": 62885, "token_acc": 0.4889705882352941, "train_speed(iter/s)": 0.672579 }, { "epoch": 2.694400411293432, "grad_norm": 5.474615097045898, "learning_rate": 4.394130540525392e-05, "loss": 2.2415899276733398, "memory(GiB)": 72.85, "step": 62890, "token_acc": 0.5073313782991202, "train_speed(iter/s)": 0.672586 }, { "epoch": 2.6946146266226814, "grad_norm": 6.355604648590088, "learning_rate": 4.393462527681894e-05, "loss": 2.310717964172363, "memory(GiB)": 72.85, "step": 62895, "token_acc": 0.48398576512455516, "train_speed(iter/s)": 0.672597 }, { "epoch": 2.69482884195193, "grad_norm": 5.61641263961792, "learning_rate": 4.39279452582639e-05, "loss": 2.3747280120849608, "memory(GiB)": 72.85, "step": 62900, "token_acc": 0.5067567567567568, "train_speed(iter/s)": 0.672593 }, { "epoch": 2.695043057281179, "grad_norm": 5.938939571380615, "learning_rate": 4.3921265349709844e-05, "loss": 2.2004261016845703, "memory(GiB)": 72.85, "step": 62905, "token_acc": 0.5575221238938053, "train_speed(iter/s)": 0.672576 }, { "epoch": 2.6952572726104282, "grad_norm": 5.243402481079102, "learning_rate": 4.391458555127777e-05, "loss": 2.216262435913086, "memory(GiB)": 72.85, "step": 62910, "token_acc": 0.5, "train_speed(iter/s)": 0.672579 }, { "epoch": 2.695471487939677, "grad_norm": 4.304833889007568, "learning_rate": 4.390790586308867e-05, "loss": 2.1676029205322265, "memory(GiB)": 72.85, "step": 62915, "token_acc": 0.5096525096525096, "train_speed(iter/s)": 0.672575 }, { "epoch": 2.695685703268926, "grad_norm": 4.45601224899292, "learning_rate": 4.390122628526358e-05, "loss": 2.253031349182129, "memory(GiB)": 72.85, "step": 62920, "token_acc": 0.4946236559139785, "train_speed(iter/s)": 0.67258 }, { "epoch": 2.695899918598175, "grad_norm": 3.763455629348755, "learning_rate": 4.38945468179235e-05, "loss": 2.4039276123046873, "memory(GiB)": 72.85, "step": 62925, "token_acc": 0.501577287066246, "train_speed(iter/s)": 0.672577 }, { "epoch": 2.696114133927424, "grad_norm": 4.29157018661499, "learning_rate": 4.3887867461189416e-05, "loss": 2.2333423614501955, "memory(GiB)": 72.85, "step": 62930, "token_acc": 0.5178571428571429, "train_speed(iter/s)": 0.672588 }, { "epoch": 2.6963283492566728, "grad_norm": 4.464785099029541, "learning_rate": 4.388118821518236e-05, "loss": 2.3304676055908202, "memory(GiB)": 72.85, "step": 62935, "token_acc": 0.49838187702265374, "train_speed(iter/s)": 0.672592 }, { "epoch": 2.696542564585922, "grad_norm": 6.4985127449035645, "learning_rate": 4.3874509080023315e-05, "loss": 2.2351375579833985, "memory(GiB)": 72.85, "step": 62940, "token_acc": 0.47876447876447875, "train_speed(iter/s)": 0.67259 }, { "epoch": 2.696756779915171, "grad_norm": 4.461650848388672, "learning_rate": 4.3867830055833284e-05, "loss": 2.1301828384399415, "memory(GiB)": 72.85, "step": 62945, "token_acc": 0.527027027027027, "train_speed(iter/s)": 0.672596 }, { "epoch": 2.6969709952444196, "grad_norm": 5.694485187530518, "learning_rate": 4.386115114273328e-05, "loss": 2.2509954452514647, "memory(GiB)": 72.85, "step": 62950, "token_acc": 0.4837758112094395, "train_speed(iter/s)": 0.672598 }, { "epoch": 2.697185210573669, "grad_norm": 5.923526287078857, "learning_rate": 4.385447234084426e-05, "loss": 2.35833854675293, "memory(GiB)": 72.85, "step": 62955, "token_acc": 0.4929078014184397, "train_speed(iter/s)": 0.672597 }, { "epoch": 2.6973994259029177, "grad_norm": 6.1867241859436035, "learning_rate": 4.384779365028722e-05, "loss": 2.394775390625, "memory(GiB)": 72.85, "step": 62960, "token_acc": 0.47770700636942676, "train_speed(iter/s)": 0.672597 }, { "epoch": 2.6976136412321665, "grad_norm": 4.7034149169921875, "learning_rate": 4.38411150711832e-05, "loss": 2.113180160522461, "memory(GiB)": 72.85, "step": 62965, "token_acc": 0.5, "train_speed(iter/s)": 0.672594 }, { "epoch": 2.6978278565614158, "grad_norm": 4.378781318664551, "learning_rate": 4.383443660365316e-05, "loss": 2.0468976974487303, "memory(GiB)": 72.85, "step": 62970, "token_acc": 0.5102739726027398, "train_speed(iter/s)": 0.672601 }, { "epoch": 2.6980420718906646, "grad_norm": 4.3290324211120605, "learning_rate": 4.3827758247818075e-05, "loss": 2.290669250488281, "memory(GiB)": 72.85, "step": 62975, "token_acc": 0.5, "train_speed(iter/s)": 0.672606 }, { "epoch": 2.6982562872199134, "grad_norm": 4.160231590270996, "learning_rate": 4.382108000379894e-05, "loss": 2.211688232421875, "memory(GiB)": 72.85, "step": 62980, "token_acc": 0.5463576158940397, "train_speed(iter/s)": 0.672611 }, { "epoch": 2.6984705025491627, "grad_norm": 5.606118202209473, "learning_rate": 4.381440187171675e-05, "loss": 2.1128931045532227, "memory(GiB)": 72.85, "step": 62985, "token_acc": 0.5139318885448917, "train_speed(iter/s)": 0.67261 }, { "epoch": 2.6986847178784115, "grad_norm": 4.3438520431518555, "learning_rate": 4.380772385169245e-05, "loss": 2.1545663833618165, "memory(GiB)": 72.85, "step": 62990, "token_acc": 0.5459770114942529, "train_speed(iter/s)": 0.672614 }, { "epoch": 2.6988989332076603, "grad_norm": 4.3665995597839355, "learning_rate": 4.3801045943847064e-05, "loss": 2.1190769195556642, "memory(GiB)": 72.85, "step": 62995, "token_acc": 0.5494505494505495, "train_speed(iter/s)": 0.672615 }, { "epoch": 2.6991131485369095, "grad_norm": 4.308112621307373, "learning_rate": 4.3794368148301525e-05, "loss": 2.341685676574707, "memory(GiB)": 72.85, "step": 63000, "token_acc": 0.4807692307692308, "train_speed(iter/s)": 0.672607 }, { "epoch": 2.6991131485369095, "eval_loss": 2.0483100414276123, "eval_runtime": 15.1725, "eval_samples_per_second": 6.591, "eval_steps_per_second": 6.591, "eval_token_acc": 0.5067750677506775, "step": 63000 }, { "epoch": 2.6993273638661583, "grad_norm": 5.602994918823242, "learning_rate": 4.378769046517685e-05, "loss": 2.284107208251953, "memory(GiB)": 72.85, "step": 63005, "token_acc": 0.5072463768115942, "train_speed(iter/s)": 0.67248 }, { "epoch": 2.699541579195407, "grad_norm": 5.471659183502197, "learning_rate": 4.3781012894593975e-05, "loss": 2.240800476074219, "memory(GiB)": 72.85, "step": 63010, "token_acc": 0.4745222929936306, "train_speed(iter/s)": 0.672483 }, { "epoch": 2.6997557945246564, "grad_norm": 3.680070638656616, "learning_rate": 4.377433543667388e-05, "loss": 2.1021671295166016, "memory(GiB)": 72.85, "step": 63015, "token_acc": 0.5450980392156862, "train_speed(iter/s)": 0.672477 }, { "epoch": 2.6999700098539052, "grad_norm": 5.642914295196533, "learning_rate": 4.376765809153755e-05, "loss": 2.34852294921875, "memory(GiB)": 72.85, "step": 63020, "token_acc": 0.5408560311284046, "train_speed(iter/s)": 0.67247 }, { "epoch": 2.700184225183154, "grad_norm": 4.716421127319336, "learning_rate": 4.376098085930594e-05, "loss": 2.04449405670166, "memory(GiB)": 72.85, "step": 63025, "token_acc": 0.5, "train_speed(iter/s)": 0.672472 }, { "epoch": 2.7003984405124033, "grad_norm": 4.131943702697754, "learning_rate": 4.375430374010001e-05, "loss": 2.096735382080078, "memory(GiB)": 72.85, "step": 63030, "token_acc": 0.5571955719557196, "train_speed(iter/s)": 0.672467 }, { "epoch": 2.700612655841652, "grad_norm": 4.527009010314941, "learning_rate": 4.3747626734040716e-05, "loss": 2.1991161346435546, "memory(GiB)": 72.85, "step": 63035, "token_acc": 0.4888888888888889, "train_speed(iter/s)": 0.672468 }, { "epoch": 2.700826871170901, "grad_norm": 4.507688045501709, "learning_rate": 4.374094984124904e-05, "loss": 2.36057186126709, "memory(GiB)": 72.85, "step": 63040, "token_acc": 0.5295950155763239, "train_speed(iter/s)": 0.672476 }, { "epoch": 2.70104108650015, "grad_norm": 4.901730537414551, "learning_rate": 4.3734273061845926e-05, "loss": 2.257244873046875, "memory(GiB)": 72.85, "step": 63045, "token_acc": 0.5256410256410257, "train_speed(iter/s)": 0.672485 }, { "epoch": 2.701255301829399, "grad_norm": 4.4120259284973145, "learning_rate": 4.372759639595234e-05, "loss": 1.7633186340332032, "memory(GiB)": 72.85, "step": 63050, "token_acc": 0.5975609756097561, "train_speed(iter/s)": 0.672476 }, { "epoch": 2.701469517158648, "grad_norm": 4.193800449371338, "learning_rate": 4.3720919843689236e-05, "loss": 2.046467971801758, "memory(GiB)": 72.85, "step": 63055, "token_acc": 0.5325077399380805, "train_speed(iter/s)": 0.672468 }, { "epoch": 2.701683732487897, "grad_norm": 6.945068836212158, "learning_rate": 4.371424340517754e-05, "loss": 2.2454761505126952, "memory(GiB)": 72.85, "step": 63060, "token_acc": 0.5121107266435986, "train_speed(iter/s)": 0.672462 }, { "epoch": 2.701897947817146, "grad_norm": 4.986578464508057, "learning_rate": 4.3707567080538235e-05, "loss": 1.9796175003051757, "memory(GiB)": 72.85, "step": 63065, "token_acc": 0.5795918367346938, "train_speed(iter/s)": 0.672449 }, { "epoch": 2.7021121631463947, "grad_norm": 4.624647617340088, "learning_rate": 4.370089086989225e-05, "loss": 2.401078796386719, "memory(GiB)": 72.85, "step": 63070, "token_acc": 0.5166666666666667, "train_speed(iter/s)": 0.672445 }, { "epoch": 2.702326378475644, "grad_norm": 4.38785982131958, "learning_rate": 4.369421477336054e-05, "loss": 2.3619293212890624, "memory(GiB)": 72.85, "step": 63075, "token_acc": 0.47987616099071206, "train_speed(iter/s)": 0.672439 }, { "epoch": 2.7025405938048928, "grad_norm": 4.349098205566406, "learning_rate": 4.368753879106404e-05, "loss": 2.2373220443725588, "memory(GiB)": 72.85, "step": 63080, "token_acc": 0.5034013605442177, "train_speed(iter/s)": 0.672444 }, { "epoch": 2.7027548091341416, "grad_norm": 4.027554512023926, "learning_rate": 4.368086292312369e-05, "loss": 2.3096847534179688, "memory(GiB)": 72.85, "step": 63085, "token_acc": 0.4798657718120805, "train_speed(iter/s)": 0.672448 }, { "epoch": 2.702969024463391, "grad_norm": 4.551548004150391, "learning_rate": 4.367418716966045e-05, "loss": 2.2770254135131838, "memory(GiB)": 72.85, "step": 63090, "token_acc": 0.509090909090909, "train_speed(iter/s)": 0.67244 }, { "epoch": 2.7031832397926396, "grad_norm": 4.749971866607666, "learning_rate": 4.366751153079525e-05, "loss": 2.3303293228149413, "memory(GiB)": 72.85, "step": 63095, "token_acc": 0.45936395759717313, "train_speed(iter/s)": 0.672449 }, { "epoch": 2.7033974551218884, "grad_norm": 5.032801151275635, "learning_rate": 4.3660836006649e-05, "loss": 2.3699636459350586, "memory(GiB)": 72.85, "step": 63100, "token_acc": 0.5072992700729927, "train_speed(iter/s)": 0.672458 }, { "epoch": 2.7036116704511377, "grad_norm": 4.533610820770264, "learning_rate": 4.365416059734266e-05, "loss": 2.406138610839844, "memory(GiB)": 72.85, "step": 63105, "token_acc": 0.4379746835443038, "train_speed(iter/s)": 0.672473 }, { "epoch": 2.7038258857803865, "grad_norm": 4.141510486602783, "learning_rate": 4.364748530299714e-05, "loss": 2.12408332824707, "memory(GiB)": 72.85, "step": 63110, "token_acc": 0.5337620578778135, "train_speed(iter/s)": 0.67247 }, { "epoch": 2.7040401011096353, "grad_norm": 5.100597381591797, "learning_rate": 4.364081012373339e-05, "loss": 2.0109933853149413, "memory(GiB)": 72.85, "step": 63115, "token_acc": 0.5513307984790875, "train_speed(iter/s)": 0.672474 }, { "epoch": 2.7042543164388846, "grad_norm": 4.655598163604736, "learning_rate": 4.363413505967233e-05, "loss": 2.282882499694824, "memory(GiB)": 72.85, "step": 63120, "token_acc": 0.49640287769784175, "train_speed(iter/s)": 0.672472 }, { "epoch": 2.7044685317681334, "grad_norm": 4.762842178344727, "learning_rate": 4.36274601109349e-05, "loss": 2.210867691040039, "memory(GiB)": 72.85, "step": 63125, "token_acc": 0.52, "train_speed(iter/s)": 0.672462 }, { "epoch": 2.704682747097382, "grad_norm": 4.560262680053711, "learning_rate": 4.3620785277642004e-05, "loss": 2.4365081787109375, "memory(GiB)": 72.85, "step": 63130, "token_acc": 0.4679245283018868, "train_speed(iter/s)": 0.672463 }, { "epoch": 2.7048969624266315, "grad_norm": 4.879356384277344, "learning_rate": 4.3614110559914555e-05, "loss": 2.4575263977050783, "memory(GiB)": 72.85, "step": 63135, "token_acc": 0.494949494949495, "train_speed(iter/s)": 0.672471 }, { "epoch": 2.7051111777558803, "grad_norm": 4.620145797729492, "learning_rate": 4.360743595787349e-05, "loss": 2.3815792083740233, "memory(GiB)": 72.85, "step": 63140, "token_acc": 0.5, "train_speed(iter/s)": 0.672474 }, { "epoch": 2.705325393085129, "grad_norm": 5.473553657531738, "learning_rate": 4.360076147163972e-05, "loss": 2.802042770385742, "memory(GiB)": 72.85, "step": 63145, "token_acc": 0.4224137931034483, "train_speed(iter/s)": 0.672446 }, { "epoch": 2.7055396084143783, "grad_norm": 4.007572650909424, "learning_rate": 4.3594087101334164e-05, "loss": 2.1364002227783203, "memory(GiB)": 72.85, "step": 63150, "token_acc": 0.5419354838709678, "train_speed(iter/s)": 0.672451 }, { "epoch": 2.705753823743627, "grad_norm": 4.969000816345215, "learning_rate": 4.3587412847077726e-05, "loss": 2.327621650695801, "memory(GiB)": 72.85, "step": 63155, "token_acc": 0.5104895104895105, "train_speed(iter/s)": 0.672435 }, { "epoch": 2.705968039072876, "grad_norm": 5.068732261657715, "learning_rate": 4.358073870899131e-05, "loss": 2.213864326477051, "memory(GiB)": 72.85, "step": 63160, "token_acc": 0.5305343511450382, "train_speed(iter/s)": 0.672446 }, { "epoch": 2.7061822544021252, "grad_norm": 6.056618690490723, "learning_rate": 4.3574064687195846e-05, "loss": 2.2983774185180663, "memory(GiB)": 72.85, "step": 63165, "token_acc": 0.5087719298245614, "train_speed(iter/s)": 0.672459 }, { "epoch": 2.706396469731374, "grad_norm": 5.3729095458984375, "learning_rate": 4.356739078181223e-05, "loss": 2.0116052627563477, "memory(GiB)": 72.85, "step": 63170, "token_acc": 0.5412186379928315, "train_speed(iter/s)": 0.672452 }, { "epoch": 2.706610685060623, "grad_norm": 6.212166786193848, "learning_rate": 4.356071699296135e-05, "loss": 2.1262042999267576, "memory(GiB)": 72.85, "step": 63175, "token_acc": 0.5241379310344828, "train_speed(iter/s)": 0.672447 }, { "epoch": 2.706824900389872, "grad_norm": 5.416957855224609, "learning_rate": 4.3554043320764134e-05, "loss": 2.529829216003418, "memory(GiB)": 72.85, "step": 63180, "token_acc": 0.5015974440894568, "train_speed(iter/s)": 0.672454 }, { "epoch": 2.707039115719121, "grad_norm": 5.366074562072754, "learning_rate": 4.354736976534145e-05, "loss": 2.166396713256836, "memory(GiB)": 72.85, "step": 63185, "token_acc": 0.5141843971631206, "train_speed(iter/s)": 0.672455 }, { "epoch": 2.7072533310483697, "grad_norm": 3.8750803470611572, "learning_rate": 4.354069632681423e-05, "loss": 1.9407855987548828, "memory(GiB)": 72.85, "step": 63190, "token_acc": 0.5493421052631579, "train_speed(iter/s)": 0.672435 }, { "epoch": 2.707467546377619, "grad_norm": 4.863131046295166, "learning_rate": 4.353402300530336e-05, "loss": 2.023484802246094, "memory(GiB)": 72.85, "step": 63195, "token_acc": 0.5757575757575758, "train_speed(iter/s)": 0.672431 }, { "epoch": 2.707681761706868, "grad_norm": 5.987329959869385, "learning_rate": 4.352734980092973e-05, "loss": 2.2771156311035154, "memory(GiB)": 72.85, "step": 63200, "token_acc": 0.5305466237942122, "train_speed(iter/s)": 0.672425 }, { "epoch": 2.7078959770361166, "grad_norm": 5.951197147369385, "learning_rate": 4.352067671381422e-05, "loss": 2.130145454406738, "memory(GiB)": 72.85, "step": 63205, "token_acc": 0.5119453924914675, "train_speed(iter/s)": 0.672423 }, { "epoch": 2.708110192365366, "grad_norm": 4.277491569519043, "learning_rate": 4.3514003744077745e-05, "loss": 2.197296142578125, "memory(GiB)": 72.85, "step": 63210, "token_acc": 0.5087209302325582, "train_speed(iter/s)": 0.672428 }, { "epoch": 2.7083244076946147, "grad_norm": 4.000080585479736, "learning_rate": 4.350733089184117e-05, "loss": 2.112910842895508, "memory(GiB)": 72.85, "step": 63215, "token_acc": 0.5244755244755245, "train_speed(iter/s)": 0.672418 }, { "epoch": 2.7085386230238635, "grad_norm": 4.878415584564209, "learning_rate": 4.3500658157225375e-05, "loss": 2.131855583190918, "memory(GiB)": 72.85, "step": 63220, "token_acc": 0.5247524752475248, "train_speed(iter/s)": 0.672409 }, { "epoch": 2.7087528383531128, "grad_norm": 4.379963397979736, "learning_rate": 4.3493985540351265e-05, "loss": 2.044842529296875, "memory(GiB)": 72.85, "step": 63225, "token_acc": 0.525691699604743, "train_speed(iter/s)": 0.672402 }, { "epoch": 2.7089670536823616, "grad_norm": 4.972153186798096, "learning_rate": 4.34873130413397e-05, "loss": 2.0801040649414064, "memory(GiB)": 72.85, "step": 63230, "token_acc": 0.5387323943661971, "train_speed(iter/s)": 0.672407 }, { "epoch": 2.7091812690116104, "grad_norm": 4.424018383026123, "learning_rate": 4.348064066031159e-05, "loss": 2.2346673965454102, "memory(GiB)": 72.85, "step": 63235, "token_acc": 0.5176151761517616, "train_speed(iter/s)": 0.672408 }, { "epoch": 2.7093954843408596, "grad_norm": 4.801203727722168, "learning_rate": 4.3473968397387774e-05, "loss": 2.0889781951904296, "memory(GiB)": 72.85, "step": 63240, "token_acc": 0.5428571428571428, "train_speed(iter/s)": 0.672399 }, { "epoch": 2.7096096996701085, "grad_norm": 4.491710662841797, "learning_rate": 4.3467296252689144e-05, "loss": 2.3890558242797852, "memory(GiB)": 72.85, "step": 63245, "token_acc": 0.5308219178082192, "train_speed(iter/s)": 0.672411 }, { "epoch": 2.7098239149993573, "grad_norm": 4.775730609893799, "learning_rate": 4.3460624226336576e-05, "loss": 2.2342607498168947, "memory(GiB)": 72.85, "step": 63250, "token_acc": 0.47039473684210525, "train_speed(iter/s)": 0.672404 }, { "epoch": 2.7100381303286065, "grad_norm": 4.879724979400635, "learning_rate": 4.345395231845094e-05, "loss": 2.1944644927978514, "memory(GiB)": 72.85, "step": 63255, "token_acc": 0.5618374558303887, "train_speed(iter/s)": 0.672403 }, { "epoch": 2.7102523456578553, "grad_norm": 4.176592826843262, "learning_rate": 4.344728052915307e-05, "loss": 2.3099781036376954, "memory(GiB)": 72.85, "step": 63260, "token_acc": 0.5244299674267101, "train_speed(iter/s)": 0.6724 }, { "epoch": 2.710466560987104, "grad_norm": 5.214505195617676, "learning_rate": 4.344060885856387e-05, "loss": 2.6222389221191404, "memory(GiB)": 72.85, "step": 63265, "token_acc": 0.5143884892086331, "train_speed(iter/s)": 0.672406 }, { "epoch": 2.7106807763163534, "grad_norm": 5.290359973907471, "learning_rate": 4.343393730680421e-05, "loss": 2.4996337890625, "memory(GiB)": 72.85, "step": 63270, "token_acc": 0.5, "train_speed(iter/s)": 0.672421 }, { "epoch": 2.710894991645602, "grad_norm": 3.3511929512023926, "learning_rate": 4.3427265873994935e-05, "loss": 2.0435585021972655, "memory(GiB)": 72.85, "step": 63275, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.672422 }, { "epoch": 2.711109206974851, "grad_norm": 3.7920360565185547, "learning_rate": 4.342059456025689e-05, "loss": 2.247638130187988, "memory(GiB)": 72.85, "step": 63280, "token_acc": 0.503448275862069, "train_speed(iter/s)": 0.672425 }, { "epoch": 2.7113234223041003, "grad_norm": 6.208820819854736, "learning_rate": 4.341392336571096e-05, "loss": 2.0774742126464845, "memory(GiB)": 72.85, "step": 63285, "token_acc": 0.5807560137457045, "train_speed(iter/s)": 0.67243 }, { "epoch": 2.711537637633349, "grad_norm": 5.357738018035889, "learning_rate": 4.340725229047797e-05, "loss": 2.0883544921875, "memory(GiB)": 72.85, "step": 63290, "token_acc": 0.5387755102040817, "train_speed(iter/s)": 0.672431 }, { "epoch": 2.711751852962598, "grad_norm": 4.379182815551758, "learning_rate": 4.3400581334678805e-05, "loss": 2.2585081100463866, "memory(GiB)": 72.85, "step": 63295, "token_acc": 0.5283018867924528, "train_speed(iter/s)": 0.67243 }, { "epoch": 2.711966068291847, "grad_norm": 4.570275783538818, "learning_rate": 4.33939104984343e-05, "loss": 2.0711368560791015, "memory(GiB)": 72.85, "step": 63300, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.672431 }, { "epoch": 2.712180283621096, "grad_norm": 4.045596599578857, "learning_rate": 4.338723978186529e-05, "loss": 2.1705337524414063, "memory(GiB)": 72.85, "step": 63305, "token_acc": 0.5288753799392097, "train_speed(iter/s)": 0.672436 }, { "epoch": 2.712394498950345, "grad_norm": 5.519894599914551, "learning_rate": 4.338056918509265e-05, "loss": 2.0418716430664063, "memory(GiB)": 72.85, "step": 63310, "token_acc": 0.5420560747663551, "train_speed(iter/s)": 0.672445 }, { "epoch": 2.712608714279594, "grad_norm": 5.65863037109375, "learning_rate": 4.33738987082372e-05, "loss": 2.392266845703125, "memory(GiB)": 72.85, "step": 63315, "token_acc": 0.508, "train_speed(iter/s)": 0.672452 }, { "epoch": 2.712822929608843, "grad_norm": 4.0405473709106445, "learning_rate": 4.336722835141979e-05, "loss": 1.9978660583496093, "memory(GiB)": 72.85, "step": 63320, "token_acc": 0.5238095238095238, "train_speed(iter/s)": 0.672443 }, { "epoch": 2.7130371449380917, "grad_norm": 6.531030654907227, "learning_rate": 4.3360558114761266e-05, "loss": 2.1312801361083986, "memory(GiB)": 72.85, "step": 63325, "token_acc": 0.5238095238095238, "train_speed(iter/s)": 0.672449 }, { "epoch": 2.713251360267341, "grad_norm": 5.270047664642334, "learning_rate": 4.3353887998382444e-05, "loss": 2.0610050201416015, "memory(GiB)": 72.85, "step": 63330, "token_acc": 0.5537190082644629, "train_speed(iter/s)": 0.672455 }, { "epoch": 2.7134655755965897, "grad_norm": 5.215852737426758, "learning_rate": 4.334721800240418e-05, "loss": 2.1655784606933595, "memory(GiB)": 72.85, "step": 63335, "token_acc": 0.525096525096525, "train_speed(iter/s)": 0.672449 }, { "epoch": 2.7136797909258386, "grad_norm": 5.476064205169678, "learning_rate": 4.3340548126947316e-05, "loss": 2.2722877502441405, "memory(GiB)": 72.85, "step": 63340, "token_acc": 0.5077519379844961, "train_speed(iter/s)": 0.67244 }, { "epoch": 2.713894006255088, "grad_norm": 4.976879119873047, "learning_rate": 4.333387837213267e-05, "loss": 2.5044410705566404, "memory(GiB)": 72.85, "step": 63345, "token_acc": 0.49480968858131485, "train_speed(iter/s)": 0.672436 }, { "epoch": 2.7141082215843366, "grad_norm": 4.615973472595215, "learning_rate": 4.332720873808106e-05, "loss": 2.147678756713867, "memory(GiB)": 72.85, "step": 63350, "token_acc": 0.5397350993377483, "train_speed(iter/s)": 0.672442 }, { "epoch": 2.7143224369135854, "grad_norm": 4.568739414215088, "learning_rate": 4.332053922491333e-05, "loss": 2.496295928955078, "memory(GiB)": 72.85, "step": 63355, "token_acc": 0.4967105263157895, "train_speed(iter/s)": 0.672427 }, { "epoch": 2.7145366522428347, "grad_norm": 5.128261566162109, "learning_rate": 4.33138698327503e-05, "loss": 2.4418367385864257, "memory(GiB)": 72.85, "step": 63360, "token_acc": 0.5233918128654971, "train_speed(iter/s)": 0.672439 }, { "epoch": 2.7147508675720835, "grad_norm": 6.3836798667907715, "learning_rate": 4.3307200561712777e-05, "loss": 2.319264221191406, "memory(GiB)": 72.85, "step": 63365, "token_acc": 0.4904214559386973, "train_speed(iter/s)": 0.672444 }, { "epoch": 2.7149650829013323, "grad_norm": 4.49157190322876, "learning_rate": 4.330053141192161e-05, "loss": 2.1224889755249023, "memory(GiB)": 72.85, "step": 63370, "token_acc": 0.5135135135135135, "train_speed(iter/s)": 0.672448 }, { "epoch": 2.7151792982305816, "grad_norm": 4.776149272918701, "learning_rate": 4.3293862383497594e-05, "loss": 2.3755847930908205, "memory(GiB)": 72.85, "step": 63375, "token_acc": 0.48985507246376814, "train_speed(iter/s)": 0.672455 }, { "epoch": 2.7153935135598304, "grad_norm": 4.3351006507873535, "learning_rate": 4.3287193476561555e-05, "loss": 2.1922771453857424, "memory(GiB)": 72.85, "step": 63380, "token_acc": 0.553030303030303, "train_speed(iter/s)": 0.672471 }, { "epoch": 2.715607728889079, "grad_norm": 7.111230373382568, "learning_rate": 4.32805246912343e-05, "loss": 2.3711406707763674, "memory(GiB)": 72.85, "step": 63385, "token_acc": 0.49498327759197325, "train_speed(iter/s)": 0.672477 }, { "epoch": 2.7158219442183285, "grad_norm": 6.05540657043457, "learning_rate": 4.327385602763664e-05, "loss": 2.364855194091797, "memory(GiB)": 72.85, "step": 63390, "token_acc": 0.4912891986062718, "train_speed(iter/s)": 0.672474 }, { "epoch": 2.7160361595475773, "grad_norm": 5.2178802490234375, "learning_rate": 4.32671874858894e-05, "loss": 2.4376319885253905, "memory(GiB)": 72.85, "step": 63395, "token_acc": 0.4966442953020134, "train_speed(iter/s)": 0.672474 }, { "epoch": 2.716250374876826, "grad_norm": 5.630674362182617, "learning_rate": 4.326051906611337e-05, "loss": 2.0615327835083006, "memory(GiB)": 72.85, "step": 63400, "token_acc": 0.5419847328244275, "train_speed(iter/s)": 0.672481 }, { "epoch": 2.7164645902060753, "grad_norm": 4.749394416809082, "learning_rate": 4.325385076842934e-05, "loss": 2.3055398941040037, "memory(GiB)": 72.85, "step": 63405, "token_acc": 0.5, "train_speed(iter/s)": 0.672487 }, { "epoch": 2.716678805535324, "grad_norm": 5.202356815338135, "learning_rate": 4.3247182592958136e-05, "loss": 2.3262359619140627, "memory(GiB)": 72.85, "step": 63410, "token_acc": 0.5, "train_speed(iter/s)": 0.672478 }, { "epoch": 2.716893020864573, "grad_norm": 5.918593406677246, "learning_rate": 4.3240514539820574e-05, "loss": 2.3918888092041017, "memory(GiB)": 72.85, "step": 63415, "token_acc": 0.4716312056737589, "train_speed(iter/s)": 0.67249 }, { "epoch": 2.717107236193822, "grad_norm": 4.839836597442627, "learning_rate": 4.323384660913743e-05, "loss": 2.2734960556030273, "memory(GiB)": 72.85, "step": 63420, "token_acc": 0.5279503105590062, "train_speed(iter/s)": 0.672489 }, { "epoch": 2.717321451523071, "grad_norm": 6.313764572143555, "learning_rate": 4.322717880102949e-05, "loss": 2.2417835235595702, "memory(GiB)": 72.85, "step": 63425, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.672498 }, { "epoch": 2.71753566685232, "grad_norm": 4.111998558044434, "learning_rate": 4.3220511115617565e-05, "loss": 2.102567672729492, "memory(GiB)": 72.85, "step": 63430, "token_acc": 0.5533596837944664, "train_speed(iter/s)": 0.672513 }, { "epoch": 2.717749882181569, "grad_norm": 5.091036796569824, "learning_rate": 4.321384355302244e-05, "loss": 2.2327503204345702, "memory(GiB)": 72.85, "step": 63435, "token_acc": 0.5371621621621622, "train_speed(iter/s)": 0.672522 }, { "epoch": 2.717964097510818, "grad_norm": 7.852041244506836, "learning_rate": 4.320717611336491e-05, "loss": 2.389083480834961, "memory(GiB)": 72.85, "step": 63440, "token_acc": 0.4788273615635179, "train_speed(iter/s)": 0.672528 }, { "epoch": 2.7181783128400667, "grad_norm": 5.112532615661621, "learning_rate": 4.320050879676575e-05, "loss": 2.213887596130371, "memory(GiB)": 72.85, "step": 63445, "token_acc": 0.5413793103448276, "train_speed(iter/s)": 0.672529 }, { "epoch": 2.718392528169316, "grad_norm": 4.686591625213623, "learning_rate": 4.3193841603345755e-05, "loss": 2.459035110473633, "memory(GiB)": 72.85, "step": 63450, "token_acc": 0.49673202614379086, "train_speed(iter/s)": 0.672537 }, { "epoch": 2.718606743498565, "grad_norm": 4.171178340911865, "learning_rate": 4.318717453322571e-05, "loss": 2.3549312591552733, "memory(GiB)": 72.85, "step": 63455, "token_acc": 0.4714285714285714, "train_speed(iter/s)": 0.672541 }, { "epoch": 2.7188209588278136, "grad_norm": 4.0138983726501465, "learning_rate": 4.318050758652638e-05, "loss": 2.2985233306884765, "memory(GiB)": 72.85, "step": 63460, "token_acc": 0.49635036496350365, "train_speed(iter/s)": 0.672553 }, { "epoch": 2.719035174157063, "grad_norm": 3.8444151878356934, "learning_rate": 4.317384076336855e-05, "loss": 2.319212532043457, "memory(GiB)": 72.85, "step": 63465, "token_acc": 0.5148809523809523, "train_speed(iter/s)": 0.672568 }, { "epoch": 2.7192493894863117, "grad_norm": 4.944118499755859, "learning_rate": 4.3167174063873004e-05, "loss": 2.3870214462280273, "memory(GiB)": 72.85, "step": 63470, "token_acc": 0.5017301038062284, "train_speed(iter/s)": 0.672559 }, { "epoch": 2.7194636048155605, "grad_norm": 5.311090469360352, "learning_rate": 4.3160507488160504e-05, "loss": 2.2333616256713866, "memory(GiB)": 72.85, "step": 63475, "token_acc": 0.5041322314049587, "train_speed(iter/s)": 0.672563 }, { "epoch": 2.7196778201448097, "grad_norm": 4.829941749572754, "learning_rate": 4.3153841036351814e-05, "loss": 2.2299291610717775, "memory(GiB)": 72.85, "step": 63480, "token_acc": 0.5218855218855218, "train_speed(iter/s)": 0.672562 }, { "epoch": 2.7198920354740586, "grad_norm": 5.234649658203125, "learning_rate": 4.314717470856774e-05, "loss": 2.106984329223633, "memory(GiB)": 72.85, "step": 63485, "token_acc": 0.5528455284552846, "train_speed(iter/s)": 0.672561 }, { "epoch": 2.7201062508033074, "grad_norm": 3.863412380218506, "learning_rate": 4.314050850492902e-05, "loss": 2.361906051635742, "memory(GiB)": 72.85, "step": 63490, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.67257 }, { "epoch": 2.7203204661325566, "grad_norm": 4.399169921875, "learning_rate": 4.313384242555641e-05, "loss": 2.252298355102539, "memory(GiB)": 72.85, "step": 63495, "token_acc": 0.5043988269794721, "train_speed(iter/s)": 0.672573 }, { "epoch": 2.7205346814618054, "grad_norm": 5.180922985076904, "learning_rate": 4.31271764705707e-05, "loss": 2.077787399291992, "memory(GiB)": 72.85, "step": 63500, "token_acc": 0.5321428571428571, "train_speed(iter/s)": 0.672578 }, { "epoch": 2.7205346814618054, "eval_loss": 2.024188280105591, "eval_runtime": 14.7501, "eval_samples_per_second": 6.78, "eval_steps_per_second": 6.78, "eval_token_acc": 0.5020408163265306, "step": 63500 }, { "epoch": 2.7207488967910542, "grad_norm": 4.510531425476074, "learning_rate": 4.3120510640092636e-05, "loss": 2.4620634078979493, "memory(GiB)": 72.85, "step": 63505, "token_acc": 0.501984126984127, "train_speed(iter/s)": 0.672442 }, { "epoch": 2.7209631121203035, "grad_norm": 4.541799545288086, "learning_rate": 4.311384493424297e-05, "loss": 2.3054431915283202, "memory(GiB)": 72.85, "step": 63510, "token_acc": 0.46200607902735563, "train_speed(iter/s)": 0.672442 }, { "epoch": 2.7211773274495523, "grad_norm": 4.328494548797607, "learning_rate": 4.310717935314247e-05, "loss": 2.139535140991211, "memory(GiB)": 72.85, "step": 63515, "token_acc": 0.5394321766561514, "train_speed(iter/s)": 0.672429 }, { "epoch": 2.721391542778801, "grad_norm": 4.349802017211914, "learning_rate": 4.3100513896911865e-05, "loss": 2.515439987182617, "memory(GiB)": 72.85, "step": 63520, "token_acc": 0.47039473684210525, "train_speed(iter/s)": 0.672439 }, { "epoch": 2.7216057581080504, "grad_norm": 4.013448715209961, "learning_rate": 4.309384856567194e-05, "loss": 2.5543983459472654, "memory(GiB)": 72.85, "step": 63525, "token_acc": 0.45652173913043476, "train_speed(iter/s)": 0.672445 }, { "epoch": 2.721819973437299, "grad_norm": 6.144772529602051, "learning_rate": 4.3087183359543426e-05, "loss": 2.1890304565429686, "memory(GiB)": 72.85, "step": 63530, "token_acc": 0.5048543689320388, "train_speed(iter/s)": 0.672448 }, { "epoch": 2.722034188766548, "grad_norm": 4.802340984344482, "learning_rate": 4.308051827864705e-05, "loss": 2.5371469497680663, "memory(GiB)": 72.85, "step": 63535, "token_acc": 0.5050505050505051, "train_speed(iter/s)": 0.672426 }, { "epoch": 2.7222484040957973, "grad_norm": 4.742079734802246, "learning_rate": 4.3073853323103604e-05, "loss": 2.303641128540039, "memory(GiB)": 72.85, "step": 63540, "token_acc": 0.5234657039711191, "train_speed(iter/s)": 0.672417 }, { "epoch": 2.722462619425046, "grad_norm": 4.4833855628967285, "learning_rate": 4.3067188493033796e-05, "loss": 2.0923791885375977, "memory(GiB)": 72.85, "step": 63545, "token_acc": 0.559322033898305, "train_speed(iter/s)": 0.672412 }, { "epoch": 2.722676834754295, "grad_norm": 4.769822597503662, "learning_rate": 4.3060523788558355e-05, "loss": 2.2762729644775392, "memory(GiB)": 72.85, "step": 63550, "token_acc": 0.5133333333333333, "train_speed(iter/s)": 0.6724 }, { "epoch": 2.722891050083544, "grad_norm": 4.4847612380981445, "learning_rate": 4.3053859209798025e-05, "loss": 2.4139873504638674, "memory(GiB)": 72.85, "step": 63555, "token_acc": 0.4725274725274725, "train_speed(iter/s)": 0.672401 }, { "epoch": 2.723105265412793, "grad_norm": 5.470586776733398, "learning_rate": 4.3047194756873575e-05, "loss": 2.096876525878906, "memory(GiB)": 72.85, "step": 63560, "token_acc": 0.5228215767634855, "train_speed(iter/s)": 0.672396 }, { "epoch": 2.7233194807420418, "grad_norm": 4.578096866607666, "learning_rate": 4.3040530429905715e-05, "loss": 2.5062456130981445, "memory(GiB)": 72.85, "step": 63565, "token_acc": 0.4583333333333333, "train_speed(iter/s)": 0.672408 }, { "epoch": 2.723533696071291, "grad_norm": 5.714454650878906, "learning_rate": 4.3033866229015166e-05, "loss": 2.324826240539551, "memory(GiB)": 72.85, "step": 63570, "token_acc": 0.5116279069767442, "train_speed(iter/s)": 0.672417 }, { "epoch": 2.72374791140054, "grad_norm": 5.434156894683838, "learning_rate": 4.302720215432268e-05, "loss": 2.3639015197753905, "memory(GiB)": 72.85, "step": 63575, "token_acc": 0.43874643874643876, "train_speed(iter/s)": 0.672425 }, { "epoch": 2.7239621267297887, "grad_norm": 7.411475658416748, "learning_rate": 4.302053820594895e-05, "loss": 2.1264041900634765, "memory(GiB)": 72.85, "step": 63580, "token_acc": 0.5536332179930796, "train_speed(iter/s)": 0.672432 }, { "epoch": 2.724176342059038, "grad_norm": 5.125224590301514, "learning_rate": 4.301387438401473e-05, "loss": 2.338733100891113, "memory(GiB)": 72.85, "step": 63585, "token_acc": 0.48507462686567165, "train_speed(iter/s)": 0.672431 }, { "epoch": 2.7243905573882867, "grad_norm": 5.049564361572266, "learning_rate": 4.300721068864073e-05, "loss": 2.2877931594848633, "memory(GiB)": 72.85, "step": 63590, "token_acc": 0.48, "train_speed(iter/s)": 0.672432 }, { "epoch": 2.7246047727175355, "grad_norm": 4.378561496734619, "learning_rate": 4.3000547119947656e-05, "loss": 2.140987586975098, "memory(GiB)": 72.85, "step": 63595, "token_acc": 0.5590551181102362, "train_speed(iter/s)": 0.672452 }, { "epoch": 2.724818988046785, "grad_norm": 5.069467067718506, "learning_rate": 4.2993883678056246e-05, "loss": 2.4671682357788085, "memory(GiB)": 72.85, "step": 63600, "token_acc": 0.47147147147147145, "train_speed(iter/s)": 0.672435 }, { "epoch": 2.7250332033760336, "grad_norm": 6.001398086547852, "learning_rate": 4.298722036308721e-05, "loss": 2.062456512451172, "memory(GiB)": 72.85, "step": 63605, "token_acc": 0.5424354243542435, "train_speed(iter/s)": 0.67243 }, { "epoch": 2.7252474187052824, "grad_norm": 4.449279308319092, "learning_rate": 4.298055717516124e-05, "loss": 2.145852279663086, "memory(GiB)": 72.85, "step": 63610, "token_acc": 0.5568181818181818, "train_speed(iter/s)": 0.672431 }, { "epoch": 2.7254616340345317, "grad_norm": 5.08103084564209, "learning_rate": 4.297389411439908e-05, "loss": 2.2885568618774412, "memory(GiB)": 72.85, "step": 63615, "token_acc": 0.5017543859649123, "train_speed(iter/s)": 0.67243 }, { "epoch": 2.7256758493637805, "grad_norm": 4.696314811706543, "learning_rate": 4.2967231180921395e-05, "loss": 2.304381561279297, "memory(GiB)": 72.85, "step": 63620, "token_acc": 0.4782608695652174, "train_speed(iter/s)": 0.672434 }, { "epoch": 2.7258900646930293, "grad_norm": 4.086803436279297, "learning_rate": 4.296056837484894e-05, "loss": 2.1220741271972656, "memory(GiB)": 72.85, "step": 63625, "token_acc": 0.5325077399380805, "train_speed(iter/s)": 0.672453 }, { "epoch": 2.7261042800222786, "grad_norm": 4.630208492279053, "learning_rate": 4.295390569630236e-05, "loss": 2.235514831542969, "memory(GiB)": 72.85, "step": 63630, "token_acc": 0.5179856115107914, "train_speed(iter/s)": 0.672456 }, { "epoch": 2.7263184953515274, "grad_norm": 4.3391571044921875, "learning_rate": 4.294724314540241e-05, "loss": 2.1210838317871095, "memory(GiB)": 72.85, "step": 63635, "token_acc": 0.5477941176470589, "train_speed(iter/s)": 0.672453 }, { "epoch": 2.726532710680776, "grad_norm": 5.443648815155029, "learning_rate": 4.294058072226976e-05, "loss": 2.2604976654052735, "memory(GiB)": 72.85, "step": 63640, "token_acc": 0.49700598802395207, "train_speed(iter/s)": 0.672449 }, { "epoch": 2.7267469260100254, "grad_norm": 4.357637405395508, "learning_rate": 4.293391842702513e-05, "loss": 2.2055049896240235, "memory(GiB)": 72.85, "step": 63645, "token_acc": 0.5046728971962616, "train_speed(iter/s)": 0.672452 }, { "epoch": 2.7269611413392743, "grad_norm": 4.580090045928955, "learning_rate": 4.2927256259789184e-05, "loss": 2.623285484313965, "memory(GiB)": 72.85, "step": 63650, "token_acc": 0.46206896551724136, "train_speed(iter/s)": 0.672464 }, { "epoch": 2.727175356668523, "grad_norm": 4.521778583526611, "learning_rate": 4.292059422068262e-05, "loss": 2.2615732192993163, "memory(GiB)": 72.85, "step": 63655, "token_acc": 0.5340136054421769, "train_speed(iter/s)": 0.672471 }, { "epoch": 2.7273895719977723, "grad_norm": 4.610834121704102, "learning_rate": 4.291393230982614e-05, "loss": 2.1172677993774416, "memory(GiB)": 72.85, "step": 63660, "token_acc": 0.5303514376996805, "train_speed(iter/s)": 0.672474 }, { "epoch": 2.727603787327021, "grad_norm": 5.198297023773193, "learning_rate": 4.290727052734042e-05, "loss": 2.2503503799438476, "memory(GiB)": 72.85, "step": 63665, "token_acc": 0.5146579804560261, "train_speed(iter/s)": 0.672485 }, { "epoch": 2.72781800265627, "grad_norm": 5.640516757965088, "learning_rate": 4.290060887334616e-05, "loss": 2.4562488555908204, "memory(GiB)": 72.85, "step": 63670, "token_acc": 0.5, "train_speed(iter/s)": 0.672495 }, { "epoch": 2.728032217985519, "grad_norm": 4.094804763793945, "learning_rate": 4.289394734796402e-05, "loss": 2.1406137466430666, "memory(GiB)": 72.85, "step": 63675, "token_acc": 0.5045871559633027, "train_speed(iter/s)": 0.672501 }, { "epoch": 2.728246433314768, "grad_norm": 4.64881706237793, "learning_rate": 4.288728595131469e-05, "loss": 2.258269500732422, "memory(GiB)": 72.85, "step": 63680, "token_acc": 0.5070422535211268, "train_speed(iter/s)": 0.672508 }, { "epoch": 2.728460648644017, "grad_norm": 4.270210266113281, "learning_rate": 4.2880624683518844e-05, "loss": 2.039936065673828, "memory(GiB)": 72.85, "step": 63685, "token_acc": 0.5358255451713395, "train_speed(iter/s)": 0.672519 }, { "epoch": 2.728674863973266, "grad_norm": 6.458709239959717, "learning_rate": 4.287396354469717e-05, "loss": 2.308806037902832, "memory(GiB)": 72.85, "step": 63690, "token_acc": 0.4676258992805755, "train_speed(iter/s)": 0.672522 }, { "epoch": 2.728889079302515, "grad_norm": 6.300868511199951, "learning_rate": 4.286730253497032e-05, "loss": 2.3715211868286135, "memory(GiB)": 72.85, "step": 63695, "token_acc": 0.5098039215686274, "train_speed(iter/s)": 0.672533 }, { "epoch": 2.7291032946317637, "grad_norm": 5.273983478546143, "learning_rate": 4.286064165445896e-05, "loss": 2.30637264251709, "memory(GiB)": 72.85, "step": 63700, "token_acc": 0.5053003533568905, "train_speed(iter/s)": 0.672556 }, { "epoch": 2.729317509961013, "grad_norm": 4.091185092926025, "learning_rate": 4.28539809032838e-05, "loss": 2.3615509033203126, "memory(GiB)": 72.85, "step": 63705, "token_acc": 0.528169014084507, "train_speed(iter/s)": 0.672556 }, { "epoch": 2.729531725290262, "grad_norm": 4.175735950469971, "learning_rate": 4.284732028156548e-05, "loss": 2.204412841796875, "memory(GiB)": 72.85, "step": 63710, "token_acc": 0.5224489795918368, "train_speed(iter/s)": 0.67255 }, { "epoch": 2.7297459406195106, "grad_norm": 4.923203945159912, "learning_rate": 4.284065978942465e-05, "loss": 2.4459157943725587, "memory(GiB)": 72.85, "step": 63715, "token_acc": 0.48905109489051096, "train_speed(iter/s)": 0.672569 }, { "epoch": 2.72996015594876, "grad_norm": 4.983215808868408, "learning_rate": 4.2833999426982e-05, "loss": 2.496230697631836, "memory(GiB)": 72.85, "step": 63720, "token_acc": 0.4927536231884058, "train_speed(iter/s)": 0.672584 }, { "epoch": 2.7301743712780087, "grad_norm": 4.232917785644531, "learning_rate": 4.282733919435815e-05, "loss": 2.186554718017578, "memory(GiB)": 72.85, "step": 63725, "token_acc": 0.5429447852760736, "train_speed(iter/s)": 0.672593 }, { "epoch": 2.7303885866072575, "grad_norm": 5.399025917053223, "learning_rate": 4.28206790916738e-05, "loss": 2.033347511291504, "memory(GiB)": 72.85, "step": 63730, "token_acc": 0.5627118644067797, "train_speed(iter/s)": 0.672584 }, { "epoch": 2.7306028019365067, "grad_norm": 4.915286064147949, "learning_rate": 4.281401911904958e-05, "loss": 2.446536636352539, "memory(GiB)": 72.85, "step": 63735, "token_acc": 0.44569288389513106, "train_speed(iter/s)": 0.6726 }, { "epoch": 2.7308170172657555, "grad_norm": 7.153083324432373, "learning_rate": 4.280735927660613e-05, "loss": 2.3270200729370116, "memory(GiB)": 72.85, "step": 63740, "token_acc": 0.5198412698412699, "train_speed(iter/s)": 0.672598 }, { "epoch": 2.7310312325950044, "grad_norm": 4.434157371520996, "learning_rate": 4.2800699564464134e-05, "loss": 2.0671911239624023, "memory(GiB)": 72.85, "step": 63745, "token_acc": 0.5538461538461539, "train_speed(iter/s)": 0.672607 }, { "epoch": 2.7312454479242536, "grad_norm": 4.201035499572754, "learning_rate": 4.279403998274421e-05, "loss": 2.295034408569336, "memory(GiB)": 72.85, "step": 63750, "token_acc": 0.49544072948328266, "train_speed(iter/s)": 0.672608 }, { "epoch": 2.7314596632535024, "grad_norm": 4.813232898712158, "learning_rate": 4.2787380531567e-05, "loss": 2.389372634887695, "memory(GiB)": 72.85, "step": 63755, "token_acc": 0.446875, "train_speed(iter/s)": 0.67261 }, { "epoch": 2.7316738785827512, "grad_norm": 4.785274028778076, "learning_rate": 4.278072121105318e-05, "loss": 2.3061235427856444, "memory(GiB)": 72.85, "step": 63760, "token_acc": 0.4939759036144578, "train_speed(iter/s)": 0.672617 }, { "epoch": 2.7318880939120005, "grad_norm": 4.708336353302002, "learning_rate": 4.277406202132335e-05, "loss": 2.2523399353027345, "memory(GiB)": 72.85, "step": 63765, "token_acc": 0.5188284518828452, "train_speed(iter/s)": 0.672618 }, { "epoch": 2.7321023092412493, "grad_norm": 5.1204071044921875, "learning_rate": 4.276740296249817e-05, "loss": 2.439803886413574, "memory(GiB)": 72.85, "step": 63770, "token_acc": 0.5, "train_speed(iter/s)": 0.672625 }, { "epoch": 2.732316524570498, "grad_norm": 5.162618637084961, "learning_rate": 4.276074403469825e-05, "loss": 2.3289770126342773, "memory(GiB)": 72.85, "step": 63775, "token_acc": 0.5130718954248366, "train_speed(iter/s)": 0.672627 }, { "epoch": 2.7325307398997474, "grad_norm": 5.4475836753845215, "learning_rate": 4.275408523804427e-05, "loss": 2.147230529785156, "memory(GiB)": 72.85, "step": 63780, "token_acc": 0.5431034482758621, "train_speed(iter/s)": 0.672638 }, { "epoch": 2.732744955228996, "grad_norm": 4.146533489227295, "learning_rate": 4.274742657265682e-05, "loss": 2.3817853927612305, "memory(GiB)": 72.85, "step": 63785, "token_acc": 0.49480968858131485, "train_speed(iter/s)": 0.672646 }, { "epoch": 2.732959170558245, "grad_norm": 4.7320356369018555, "learning_rate": 4.2740768038656546e-05, "loss": 1.9720579147338868, "memory(GiB)": 72.85, "step": 63790, "token_acc": 0.6244897959183674, "train_speed(iter/s)": 0.67265 }, { "epoch": 2.7331733858874943, "grad_norm": 6.973124027252197, "learning_rate": 4.2734109636164074e-05, "loss": 2.146139717102051, "memory(GiB)": 72.85, "step": 63795, "token_acc": 0.5413793103448276, "train_speed(iter/s)": 0.672652 }, { "epoch": 2.733387601216743, "grad_norm": 4.2880964279174805, "learning_rate": 4.2727451365300014e-05, "loss": 2.1040958404541015, "memory(GiB)": 72.85, "step": 63800, "token_acc": 0.515625, "train_speed(iter/s)": 0.672644 }, { "epoch": 2.733601816545992, "grad_norm": 4.3067402839660645, "learning_rate": 4.272079322618501e-05, "loss": 2.36071662902832, "memory(GiB)": 72.85, "step": 63805, "token_acc": 0.46645367412140576, "train_speed(iter/s)": 0.672637 }, { "epoch": 2.733816031875241, "grad_norm": 4.36728048324585, "learning_rate": 4.271413521893965e-05, "loss": 2.334441375732422, "memory(GiB)": 72.85, "step": 63810, "token_acc": 0.4981132075471698, "train_speed(iter/s)": 0.672645 }, { "epoch": 2.73403024720449, "grad_norm": 4.554131984710693, "learning_rate": 4.270747734368457e-05, "loss": 2.4494810104370117, "memory(GiB)": 72.85, "step": 63815, "token_acc": 0.4967948717948718, "train_speed(iter/s)": 0.672656 }, { "epoch": 2.7342444625337388, "grad_norm": 4.402429103851318, "learning_rate": 4.270081960054038e-05, "loss": 1.8967863082885743, "memory(GiB)": 72.85, "step": 63820, "token_acc": 0.5660377358490566, "train_speed(iter/s)": 0.672654 }, { "epoch": 2.734458677862988, "grad_norm": 4.152065277099609, "learning_rate": 4.269416198962768e-05, "loss": 2.2721839904785157, "memory(GiB)": 72.85, "step": 63825, "token_acc": 0.4723926380368098, "train_speed(iter/s)": 0.672657 }, { "epoch": 2.734672893192237, "grad_norm": 5.331441879272461, "learning_rate": 4.26875045110671e-05, "loss": 2.173761177062988, "memory(GiB)": 72.85, "step": 63830, "token_acc": 0.5253164556962026, "train_speed(iter/s)": 0.672656 }, { "epoch": 2.7348871085214856, "grad_norm": 3.9851131439208984, "learning_rate": 4.268084716497924e-05, "loss": 2.2059221267700195, "memory(GiB)": 72.85, "step": 63835, "token_acc": 0.5214285714285715, "train_speed(iter/s)": 0.67265 }, { "epoch": 2.735101323850735, "grad_norm": 5.961982250213623, "learning_rate": 4.267418995148468e-05, "loss": 2.2332555770874025, "memory(GiB)": 72.85, "step": 63840, "token_acc": 0.47876447876447875, "train_speed(iter/s)": 0.672653 }, { "epoch": 2.7353155391799837, "grad_norm": 3.9984867572784424, "learning_rate": 4.266753287070406e-05, "loss": 2.183994483947754, "memory(GiB)": 72.85, "step": 63845, "token_acc": 0.5284810126582279, "train_speed(iter/s)": 0.672661 }, { "epoch": 2.7355297545092325, "grad_norm": 4.5468268394470215, "learning_rate": 4.266087592275794e-05, "loss": 2.456840705871582, "memory(GiB)": 72.85, "step": 63850, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.672649 }, { "epoch": 2.735743969838482, "grad_norm": 5.2358245849609375, "learning_rate": 4.265421910776694e-05, "loss": 2.1524885177612303, "memory(GiB)": 72.85, "step": 63855, "token_acc": 0.5292207792207793, "train_speed(iter/s)": 0.672648 }, { "epoch": 2.7359581851677306, "grad_norm": 4.161100387573242, "learning_rate": 4.2647562425851666e-05, "loss": 2.31214599609375, "memory(GiB)": 72.85, "step": 63860, "token_acc": 0.48249027237354086, "train_speed(iter/s)": 0.672644 }, { "epoch": 2.7361724004969794, "grad_norm": 5.213168621063232, "learning_rate": 4.26409058771327e-05, "loss": 2.1066329956054686, "memory(GiB)": 72.85, "step": 63865, "token_acc": 0.51953125, "train_speed(iter/s)": 0.672648 }, { "epoch": 2.7363866158262287, "grad_norm": 5.3488969802856445, "learning_rate": 4.2634249461730616e-05, "loss": 2.1806034088134765, "memory(GiB)": 72.85, "step": 63870, "token_acc": 0.5131086142322098, "train_speed(iter/s)": 0.672656 }, { "epoch": 2.7366008311554775, "grad_norm": 4.214539051055908, "learning_rate": 4.262759317976602e-05, "loss": 2.0836856842041014, "memory(GiB)": 72.85, "step": 63875, "token_acc": 0.5216049382716049, "train_speed(iter/s)": 0.672658 }, { "epoch": 2.7368150464847263, "grad_norm": 4.796680927276611, "learning_rate": 4.262093703135949e-05, "loss": 2.1664911270141602, "memory(GiB)": 72.85, "step": 63880, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.672669 }, { "epoch": 2.7370292618139755, "grad_norm": 4.435937881469727, "learning_rate": 4.26142810166316e-05, "loss": 2.168863868713379, "memory(GiB)": 72.85, "step": 63885, "token_acc": 0.5503355704697986, "train_speed(iter/s)": 0.67268 }, { "epoch": 2.7372434771432244, "grad_norm": 4.53120231628418, "learning_rate": 4.260762513570294e-05, "loss": 1.923199462890625, "memory(GiB)": 72.85, "step": 63890, "token_acc": 0.6016949152542372, "train_speed(iter/s)": 0.672678 }, { "epoch": 2.737457692472473, "grad_norm": 4.520552635192871, "learning_rate": 4.260096938869409e-05, "loss": 2.0041242599487306, "memory(GiB)": 72.85, "step": 63895, "token_acc": 0.5259515570934256, "train_speed(iter/s)": 0.672684 }, { "epoch": 2.7376719078017224, "grad_norm": 5.519033908843994, "learning_rate": 4.2594313775725616e-05, "loss": 2.3329811096191406, "memory(GiB)": 72.85, "step": 63900, "token_acc": 0.5104166666666666, "train_speed(iter/s)": 0.672681 }, { "epoch": 2.7378861231309712, "grad_norm": 6.127101421356201, "learning_rate": 4.25876582969181e-05, "loss": 2.4534656524658205, "memory(GiB)": 72.85, "step": 63905, "token_acc": 0.4838709677419355, "train_speed(iter/s)": 0.672672 }, { "epoch": 2.73810033846022, "grad_norm": 6.2202630043029785, "learning_rate": 4.258100295239209e-05, "loss": 2.1206655502319336, "memory(GiB)": 72.85, "step": 63910, "token_acc": 0.5588235294117647, "train_speed(iter/s)": 0.672666 }, { "epoch": 2.7383145537894693, "grad_norm": 4.857931613922119, "learning_rate": 4.25743477422682e-05, "loss": 2.2800134658813476, "memory(GiB)": 72.85, "step": 63915, "token_acc": 0.5163934426229508, "train_speed(iter/s)": 0.672675 }, { "epoch": 2.738528769118718, "grad_norm": 5.464070796966553, "learning_rate": 4.2567692666666945e-05, "loss": 2.184264373779297, "memory(GiB)": 72.85, "step": 63920, "token_acc": 0.49554896142433236, "train_speed(iter/s)": 0.672681 }, { "epoch": 2.738742984447967, "grad_norm": 5.322701454162598, "learning_rate": 4.2561037725708904e-05, "loss": 2.2832267761230467, "memory(GiB)": 72.85, "step": 63925, "token_acc": 0.515527950310559, "train_speed(iter/s)": 0.672686 }, { "epoch": 2.738957199777216, "grad_norm": 4.683815002441406, "learning_rate": 4.2554382919514645e-05, "loss": 2.2302255630493164, "memory(GiB)": 72.85, "step": 63930, "token_acc": 0.5129151291512916, "train_speed(iter/s)": 0.672676 }, { "epoch": 2.739171415106465, "grad_norm": 5.345463752746582, "learning_rate": 4.254772824820474e-05, "loss": 1.9942207336425781, "memory(GiB)": 72.85, "step": 63935, "token_acc": 0.5463576158940397, "train_speed(iter/s)": 0.672686 }, { "epoch": 2.739385630435714, "grad_norm": 5.350232124328613, "learning_rate": 4.254107371189973e-05, "loss": 2.3627302169799806, "memory(GiB)": 72.85, "step": 63940, "token_acc": 0.49328859060402686, "train_speed(iter/s)": 0.672687 }, { "epoch": 2.739599845764963, "grad_norm": 4.536011695861816, "learning_rate": 4.253441931072015e-05, "loss": 2.125266265869141, "memory(GiB)": 72.85, "step": 63945, "token_acc": 0.5445205479452054, "train_speed(iter/s)": 0.672687 }, { "epoch": 2.739814061094212, "grad_norm": 6.061318397521973, "learning_rate": 4.2527765044786576e-05, "loss": 2.3764604568481444, "memory(GiB)": 72.85, "step": 63950, "token_acc": 0.5143884892086331, "train_speed(iter/s)": 0.672669 }, { "epoch": 2.7400282764234607, "grad_norm": 5.4001312255859375, "learning_rate": 4.252111091421954e-05, "loss": 2.435609245300293, "memory(GiB)": 72.85, "step": 63955, "token_acc": 0.492, "train_speed(iter/s)": 0.672673 }, { "epoch": 2.74024249175271, "grad_norm": 5.949526309967041, "learning_rate": 4.251445691913961e-05, "loss": 2.193027687072754, "memory(GiB)": 72.85, "step": 63960, "token_acc": 0.48221343873517786, "train_speed(iter/s)": 0.672672 }, { "epoch": 2.7404567070819588, "grad_norm": 4.274285316467285, "learning_rate": 4.250780305966731e-05, "loss": 1.9450775146484376, "memory(GiB)": 72.85, "step": 63965, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.672669 }, { "epoch": 2.7406709224112076, "grad_norm": 7.036908149719238, "learning_rate": 4.2501149335923176e-05, "loss": 2.0237600326538088, "memory(GiB)": 72.85, "step": 63970, "token_acc": 0.5485232067510548, "train_speed(iter/s)": 0.672662 }, { "epoch": 2.740885137740457, "grad_norm": 4.296207427978516, "learning_rate": 4.2494495748027776e-05, "loss": 2.477669334411621, "memory(GiB)": 72.85, "step": 63975, "token_acc": 0.49836065573770494, "train_speed(iter/s)": 0.672665 }, { "epoch": 2.7410993530697056, "grad_norm": 5.1469268798828125, "learning_rate": 4.2487842296101615e-05, "loss": 1.9817344665527343, "memory(GiB)": 72.85, "step": 63980, "token_acc": 0.5495867768595041, "train_speed(iter/s)": 0.672661 }, { "epoch": 2.7413135683989545, "grad_norm": 5.225853443145752, "learning_rate": 4.248118898026523e-05, "loss": 2.1986770629882812, "memory(GiB)": 72.85, "step": 63985, "token_acc": 0.5375494071146245, "train_speed(iter/s)": 0.672669 }, { "epoch": 2.7415277837282037, "grad_norm": 4.897895812988281, "learning_rate": 4.247453580063917e-05, "loss": 2.297852325439453, "memory(GiB)": 72.85, "step": 63990, "token_acc": 0.4948453608247423, "train_speed(iter/s)": 0.672661 }, { "epoch": 2.7417419990574525, "grad_norm": 4.364527702331543, "learning_rate": 4.246788275734393e-05, "loss": 2.4016868591308596, "memory(GiB)": 72.85, "step": 63995, "token_acc": 0.5102040816326531, "train_speed(iter/s)": 0.672673 }, { "epoch": 2.7419562143867013, "grad_norm": 4.529907703399658, "learning_rate": 4.2461229850500075e-05, "loss": 2.2607255935668946, "memory(GiB)": 72.85, "step": 64000, "token_acc": 0.5511811023622047, "train_speed(iter/s)": 0.672683 }, { "epoch": 2.7419562143867013, "eval_loss": 1.9593909978866577, "eval_runtime": 14.3776, "eval_samples_per_second": 6.955, "eval_steps_per_second": 6.955, "eval_token_acc": 0.5361366622864652, "step": 64000 }, { "epoch": 2.7421704297159506, "grad_norm": 5.632851600646973, "learning_rate": 4.245457708022813e-05, "loss": 2.611524963378906, "memory(GiB)": 72.85, "step": 64005, "token_acc": 0.5132662397072278, "train_speed(iter/s)": 0.672571 }, { "epoch": 2.7423846450451994, "grad_norm": 5.963942050933838, "learning_rate": 4.244792444664859e-05, "loss": 2.2557418823242186, "memory(GiB)": 72.85, "step": 64010, "token_acc": 0.5209125475285171, "train_speed(iter/s)": 0.67258 }, { "epoch": 2.742598860374448, "grad_norm": 4.226773262023926, "learning_rate": 4.2441271949881975e-05, "loss": 2.3066455841064455, "memory(GiB)": 72.85, "step": 64015, "token_acc": 0.49814126394052044, "train_speed(iter/s)": 0.672568 }, { "epoch": 2.7428130757036975, "grad_norm": 4.4577484130859375, "learning_rate": 4.2434619590048817e-05, "loss": 1.980898666381836, "memory(GiB)": 72.85, "step": 64020, "token_acc": 0.5288135593220339, "train_speed(iter/s)": 0.672564 }, { "epoch": 2.7430272910329463, "grad_norm": 5.348402500152588, "learning_rate": 4.242796736726963e-05, "loss": 2.1493385314941404, "memory(GiB)": 72.85, "step": 64025, "token_acc": 0.532051282051282, "train_speed(iter/s)": 0.672569 }, { "epoch": 2.743241506362195, "grad_norm": 5.107352256774902, "learning_rate": 4.24213152816649e-05, "loss": 1.9488653182983398, "memory(GiB)": 72.85, "step": 64030, "token_acc": 0.5428571428571428, "train_speed(iter/s)": 0.672569 }, { "epoch": 2.7434557216914444, "grad_norm": 4.518165588378906, "learning_rate": 4.241466333335517e-05, "loss": 2.29864444732666, "memory(GiB)": 72.85, "step": 64035, "token_acc": 0.4854368932038835, "train_speed(iter/s)": 0.672566 }, { "epoch": 2.743669937020693, "grad_norm": 6.156572341918945, "learning_rate": 4.240801152246091e-05, "loss": 2.253219985961914, "memory(GiB)": 72.85, "step": 64040, "token_acc": 0.458041958041958, "train_speed(iter/s)": 0.672556 }, { "epoch": 2.743884152349942, "grad_norm": 5.057223796844482, "learning_rate": 4.2401359849102653e-05, "loss": 2.232574462890625, "memory(GiB)": 72.85, "step": 64045, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.672551 }, { "epoch": 2.7440983676791912, "grad_norm": 4.938795566558838, "learning_rate": 4.2394708313400894e-05, "loss": 2.2645130157470703, "memory(GiB)": 72.85, "step": 64050, "token_acc": 0.4918032786885246, "train_speed(iter/s)": 0.672538 }, { "epoch": 2.74431258300844, "grad_norm": 5.990772247314453, "learning_rate": 4.238805691547612e-05, "loss": 2.4957977294921876, "memory(GiB)": 72.85, "step": 64055, "token_acc": 0.5331125827814569, "train_speed(iter/s)": 0.672537 }, { "epoch": 2.744526798337689, "grad_norm": 5.630621910095215, "learning_rate": 4.238140565544885e-05, "loss": 2.3038394927978514, "memory(GiB)": 72.85, "step": 64060, "token_acc": 0.48580441640378547, "train_speed(iter/s)": 0.672548 }, { "epoch": 2.744741013666938, "grad_norm": 4.420992374420166, "learning_rate": 4.237475453343955e-05, "loss": 2.2628040313720703, "memory(GiB)": 72.85, "step": 64065, "token_acc": 0.5049180327868853, "train_speed(iter/s)": 0.672552 }, { "epoch": 2.744955228996187, "grad_norm": 5.368121147155762, "learning_rate": 4.236810354956872e-05, "loss": 2.2514671325683593, "memory(GiB)": 72.85, "step": 64070, "token_acc": 0.5041551246537396, "train_speed(iter/s)": 0.672548 }, { "epoch": 2.7451694443254357, "grad_norm": 5.478834629058838, "learning_rate": 4.236145270395685e-05, "loss": 2.007820892333984, "memory(GiB)": 72.85, "step": 64075, "token_acc": 0.582089552238806, "train_speed(iter/s)": 0.672544 }, { "epoch": 2.745383659654685, "grad_norm": 4.475099086761475, "learning_rate": 4.2354801996724444e-05, "loss": 2.214669609069824, "memory(GiB)": 72.85, "step": 64080, "token_acc": 0.49538461538461537, "train_speed(iter/s)": 0.672549 }, { "epoch": 2.745597874983934, "grad_norm": 6.012089729309082, "learning_rate": 4.2348151427991974e-05, "loss": 2.4262027740478516, "memory(GiB)": 72.85, "step": 64085, "token_acc": 0.4720496894409938, "train_speed(iter/s)": 0.672553 }, { "epoch": 2.7458120903131826, "grad_norm": 6.153258800506592, "learning_rate": 4.234150099787991e-05, "loss": 2.223662567138672, "memory(GiB)": 72.85, "step": 64090, "token_acc": 0.4645390070921986, "train_speed(iter/s)": 0.672562 }, { "epoch": 2.746026305642432, "grad_norm": 5.025852680206299, "learning_rate": 4.233485070650874e-05, "loss": 2.5873794555664062, "memory(GiB)": 72.85, "step": 64095, "token_acc": 0.4723756906077348, "train_speed(iter/s)": 0.672566 }, { "epoch": 2.7462405209716807, "grad_norm": 5.496510982513428, "learning_rate": 4.2328200553998944e-05, "loss": 2.472727966308594, "memory(GiB)": 72.85, "step": 64100, "token_acc": 0.5036764705882353, "train_speed(iter/s)": 0.672557 }, { "epoch": 2.7464547363009295, "grad_norm": 6.272185325622559, "learning_rate": 4.2321550540470996e-05, "loss": 2.1110965728759767, "memory(GiB)": 72.85, "step": 64105, "token_acc": 0.5269709543568465, "train_speed(iter/s)": 0.672567 }, { "epoch": 2.7466689516301788, "grad_norm": 5.319426536560059, "learning_rate": 4.231490066604536e-05, "loss": 2.349245071411133, "memory(GiB)": 72.85, "step": 64110, "token_acc": 0.47115384615384615, "train_speed(iter/s)": 0.672566 }, { "epoch": 2.7468831669594276, "grad_norm": 4.482425689697266, "learning_rate": 4.23082509308425e-05, "loss": 2.364668083190918, "memory(GiB)": 72.85, "step": 64115, "token_acc": 0.4876325088339223, "train_speed(iter/s)": 0.672565 }, { "epoch": 2.7470973822886764, "grad_norm": 4.810718536376953, "learning_rate": 4.23016013349829e-05, "loss": 2.1680767059326174, "memory(GiB)": 72.85, "step": 64120, "token_acc": 0.5547169811320755, "train_speed(iter/s)": 0.672568 }, { "epoch": 2.7473115976179256, "grad_norm": 4.850122451782227, "learning_rate": 4.229495187858701e-05, "loss": 2.022505760192871, "memory(GiB)": 72.85, "step": 64125, "token_acc": 0.5254901960784314, "train_speed(iter/s)": 0.672576 }, { "epoch": 2.7475258129471745, "grad_norm": 4.77047872543335, "learning_rate": 4.2288302561775295e-05, "loss": 2.4018226623535157, "memory(GiB)": 72.85, "step": 64130, "token_acc": 0.4592833876221498, "train_speed(iter/s)": 0.672578 }, { "epoch": 2.7477400282764233, "grad_norm": 9.470220565795898, "learning_rate": 4.228165338466821e-05, "loss": 2.3638883590698243, "memory(GiB)": 72.85, "step": 64135, "token_acc": 0.5264797507788161, "train_speed(iter/s)": 0.672572 }, { "epoch": 2.7479542436056725, "grad_norm": 5.1299543380737305, "learning_rate": 4.227500434738622e-05, "loss": 2.227849006652832, "memory(GiB)": 72.85, "step": 64140, "token_acc": 0.4887640449438202, "train_speed(iter/s)": 0.672579 }, { "epoch": 2.7481684589349213, "grad_norm": 4.722250938415527, "learning_rate": 4.226835545004975e-05, "loss": 2.2042308807373048, "memory(GiB)": 72.85, "step": 64145, "token_acc": 0.5272108843537415, "train_speed(iter/s)": 0.672593 }, { "epoch": 2.74838267426417, "grad_norm": 5.043619155883789, "learning_rate": 4.226170669277929e-05, "loss": 2.0955814361572265, "memory(GiB)": 72.85, "step": 64150, "token_acc": 0.5319148936170213, "train_speed(iter/s)": 0.67259 }, { "epoch": 2.7485968895934194, "grad_norm": 5.985729217529297, "learning_rate": 4.225505807569529e-05, "loss": 2.106698989868164, "memory(GiB)": 72.85, "step": 64155, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.672601 }, { "epoch": 2.7488111049226682, "grad_norm": 4.034474849700928, "learning_rate": 4.224840959891815e-05, "loss": 2.148297119140625, "memory(GiB)": 72.85, "step": 64160, "token_acc": 0.5570469798657718, "train_speed(iter/s)": 0.672592 }, { "epoch": 2.749025320251917, "grad_norm": 5.707336902618408, "learning_rate": 4.224176126256836e-05, "loss": 2.3195571899414062, "memory(GiB)": 72.85, "step": 64165, "token_acc": 0.49063670411985016, "train_speed(iter/s)": 0.672577 }, { "epoch": 2.7492395355811663, "grad_norm": 5.483246803283691, "learning_rate": 4.223511306676634e-05, "loss": 1.9879571914672851, "memory(GiB)": 72.85, "step": 64170, "token_acc": 0.5511811023622047, "train_speed(iter/s)": 0.672552 }, { "epoch": 2.749453750910415, "grad_norm": 3.7596969604492188, "learning_rate": 4.222846501163253e-05, "loss": 2.270499610900879, "memory(GiB)": 72.85, "step": 64175, "token_acc": 0.5298013245033113, "train_speed(iter/s)": 0.672554 }, { "epoch": 2.749667966239664, "grad_norm": 4.700667381286621, "learning_rate": 4.222181709728736e-05, "loss": 2.2202951431274416, "memory(GiB)": 72.85, "step": 64180, "token_acc": 0.5104895104895105, "train_speed(iter/s)": 0.672555 }, { "epoch": 2.749882181568913, "grad_norm": 5.296207427978516, "learning_rate": 4.221516932385128e-05, "loss": 2.2774328231811523, "memory(GiB)": 72.85, "step": 64185, "token_acc": 0.5210727969348659, "train_speed(iter/s)": 0.672561 }, { "epoch": 2.750096396898162, "grad_norm": 4.489518642425537, "learning_rate": 4.220852169144471e-05, "loss": 1.8688554763793945, "memory(GiB)": 72.85, "step": 64190, "token_acc": 0.5665529010238908, "train_speed(iter/s)": 0.672564 }, { "epoch": 2.750310612227411, "grad_norm": 5.4184770584106445, "learning_rate": 4.2201874200188074e-05, "loss": 2.2301963806152343, "memory(GiB)": 72.85, "step": 64195, "token_acc": 0.5087108013937283, "train_speed(iter/s)": 0.672574 }, { "epoch": 2.75052482755666, "grad_norm": 4.018089771270752, "learning_rate": 4.2195226850201796e-05, "loss": 2.351791000366211, "memory(GiB)": 72.85, "step": 64200, "token_acc": 0.48297213622291024, "train_speed(iter/s)": 0.672579 }, { "epoch": 2.750739042885909, "grad_norm": 5.512024402618408, "learning_rate": 4.2188579641606316e-05, "loss": 1.9237127304077148, "memory(GiB)": 72.85, "step": 64205, "token_acc": 0.5413793103448276, "train_speed(iter/s)": 0.672583 }, { "epoch": 2.7509532582151577, "grad_norm": 3.6686549186706543, "learning_rate": 4.218193257452204e-05, "loss": 2.199071502685547, "memory(GiB)": 72.85, "step": 64210, "token_acc": 0.5198675496688742, "train_speed(iter/s)": 0.672579 }, { "epoch": 2.751167473544407, "grad_norm": 5.2576375007629395, "learning_rate": 4.217528564906938e-05, "loss": 2.0871524810791016, "memory(GiB)": 72.85, "step": 64215, "token_acc": 0.5422535211267606, "train_speed(iter/s)": 0.672579 }, { "epoch": 2.7513816888736558, "grad_norm": 4.677558422088623, "learning_rate": 4.2168638865368756e-05, "loss": 2.3532413482666015, "memory(GiB)": 72.85, "step": 64220, "token_acc": 0.5035211267605634, "train_speed(iter/s)": 0.672583 }, { "epoch": 2.7515959042029046, "grad_norm": 6.5978593826293945, "learning_rate": 4.2161992223540606e-05, "loss": 2.223276901245117, "memory(GiB)": 72.85, "step": 64225, "token_acc": 0.5194805194805194, "train_speed(iter/s)": 0.672584 }, { "epoch": 2.751810119532154, "grad_norm": 4.070497035980225, "learning_rate": 4.215534572370531e-05, "loss": 2.2532657623291015, "memory(GiB)": 72.85, "step": 64230, "token_acc": 0.5085324232081911, "train_speed(iter/s)": 0.672594 }, { "epoch": 2.7520243348614026, "grad_norm": 3.9237427711486816, "learning_rate": 4.214869936598329e-05, "loss": 2.226696014404297, "memory(GiB)": 72.85, "step": 64235, "token_acc": 0.5337837837837838, "train_speed(iter/s)": 0.672603 }, { "epoch": 2.7522385501906514, "grad_norm": 3.7098381519317627, "learning_rate": 4.214205315049494e-05, "loss": 2.251353073120117, "memory(GiB)": 72.85, "step": 64240, "token_acc": 0.5107913669064749, "train_speed(iter/s)": 0.672606 }, { "epoch": 2.7524527655199007, "grad_norm": 4.605055332183838, "learning_rate": 4.2135407077360667e-05, "loss": 2.1199506759643554, "memory(GiB)": 72.85, "step": 64245, "token_acc": 0.5158730158730159, "train_speed(iter/s)": 0.672615 }, { "epoch": 2.7526669808491495, "grad_norm": 5.41745138168335, "learning_rate": 4.2128761146700884e-05, "loss": 2.0988433837890623, "memory(GiB)": 72.85, "step": 64250, "token_acc": 0.5277777777777778, "train_speed(iter/s)": 0.672621 }, { "epoch": 2.7528811961783983, "grad_norm": 4.632405757904053, "learning_rate": 4.2122115358635975e-05, "loss": 2.4494890213012694, "memory(GiB)": 72.85, "step": 64255, "token_acc": 0.4612546125461255, "train_speed(iter/s)": 0.672619 }, { "epoch": 2.7530954115076476, "grad_norm": 5.465114593505859, "learning_rate": 4.2115469713286325e-05, "loss": 2.5042140960693358, "memory(GiB)": 72.85, "step": 64260, "token_acc": 0.5017921146953405, "train_speed(iter/s)": 0.672628 }, { "epoch": 2.7533096268368964, "grad_norm": 4.002376556396484, "learning_rate": 4.210882421077235e-05, "loss": 2.0823541641235352, "memory(GiB)": 72.85, "step": 64265, "token_acc": 0.5399239543726235, "train_speed(iter/s)": 0.672624 }, { "epoch": 2.753523842166145, "grad_norm": 4.798471450805664, "learning_rate": 4.210217885121442e-05, "loss": 2.0455127716064454, "memory(GiB)": 72.85, "step": 64270, "token_acc": 0.4868913857677903, "train_speed(iter/s)": 0.672635 }, { "epoch": 2.7537380574953945, "grad_norm": 5.58632755279541, "learning_rate": 4.209553363473293e-05, "loss": 2.114589309692383, "memory(GiB)": 72.85, "step": 64275, "token_acc": 0.5255474452554745, "train_speed(iter/s)": 0.672635 }, { "epoch": 2.7539522728246433, "grad_norm": 5.341146469116211, "learning_rate": 4.208888856144826e-05, "loss": 2.207541847229004, "memory(GiB)": 72.85, "step": 64280, "token_acc": 0.5196078431372549, "train_speed(iter/s)": 0.672636 }, { "epoch": 2.754166488153892, "grad_norm": 4.738819122314453, "learning_rate": 4.2082243631480795e-05, "loss": 2.1173648834228516, "memory(GiB)": 72.85, "step": 64285, "token_acc": 0.5188679245283019, "train_speed(iter/s)": 0.672632 }, { "epoch": 2.7543807034831413, "grad_norm": 4.800362586975098, "learning_rate": 4.207559884495092e-05, "loss": 2.373019790649414, "memory(GiB)": 72.85, "step": 64290, "token_acc": 0.4903047091412742, "train_speed(iter/s)": 0.672628 }, { "epoch": 2.75459491881239, "grad_norm": 6.3411078453063965, "learning_rate": 4.2068954201978985e-05, "loss": 2.5642234802246096, "memory(GiB)": 72.85, "step": 64295, "token_acc": 0.4484848484848485, "train_speed(iter/s)": 0.672622 }, { "epoch": 2.754809134141639, "grad_norm": 4.920842170715332, "learning_rate": 4.2062309702685404e-05, "loss": 2.4779985427856444, "memory(GiB)": 72.85, "step": 64300, "token_acc": 0.49310344827586206, "train_speed(iter/s)": 0.672618 }, { "epoch": 2.7550233494708882, "grad_norm": 4.801092624664307, "learning_rate": 4.205566534719052e-05, "loss": 2.3737037658691404, "memory(GiB)": 72.85, "step": 64305, "token_acc": 0.5292207792207793, "train_speed(iter/s)": 0.672605 }, { "epoch": 2.755237564800137, "grad_norm": 4.567351818084717, "learning_rate": 4.204902113561472e-05, "loss": 2.3269763946533204, "memory(GiB)": 72.85, "step": 64310, "token_acc": 0.49666666666666665, "train_speed(iter/s)": 0.672608 }, { "epoch": 2.755451780129386, "grad_norm": 7.403995037078857, "learning_rate": 4.204237706807836e-05, "loss": 2.4961977005004883, "memory(GiB)": 72.85, "step": 64315, "token_acc": 0.4773413897280967, "train_speed(iter/s)": 0.672626 }, { "epoch": 2.755665995458635, "grad_norm": 5.685850620269775, "learning_rate": 4.20357331447018e-05, "loss": 2.389763832092285, "memory(GiB)": 72.85, "step": 64320, "token_acc": 0.4523809523809524, "train_speed(iter/s)": 0.67264 }, { "epoch": 2.755880210787884, "grad_norm": 5.000650405883789, "learning_rate": 4.202908936560541e-05, "loss": 2.129477691650391, "memory(GiB)": 72.85, "step": 64325, "token_acc": 0.49019607843137253, "train_speed(iter/s)": 0.672643 }, { "epoch": 2.7560944261171327, "grad_norm": 5.33780574798584, "learning_rate": 4.202244573090954e-05, "loss": 2.4548858642578124, "memory(GiB)": 72.85, "step": 64330, "token_acc": 0.4672897196261682, "train_speed(iter/s)": 0.672646 }, { "epoch": 2.756308641446382, "grad_norm": 4.033640384674072, "learning_rate": 4.2015802240734554e-05, "loss": 2.4727426528930665, "memory(GiB)": 72.85, "step": 64335, "token_acc": 0.4773413897280967, "train_speed(iter/s)": 0.672638 }, { "epoch": 2.756522856775631, "grad_norm": 4.585942268371582, "learning_rate": 4.200915889520079e-05, "loss": 2.1867807388305662, "memory(GiB)": 72.85, "step": 64340, "token_acc": 0.5490909090909091, "train_speed(iter/s)": 0.672641 }, { "epoch": 2.7567370721048796, "grad_norm": 4.801708698272705, "learning_rate": 4.2002515694428616e-05, "loss": 2.571134567260742, "memory(GiB)": 72.85, "step": 64345, "token_acc": 0.5, "train_speed(iter/s)": 0.672649 }, { "epoch": 2.756951287434129, "grad_norm": 5.607510089874268, "learning_rate": 4.1995872638538375e-05, "loss": 2.230427360534668, "memory(GiB)": 72.85, "step": 64350, "token_acc": 0.5132450331125827, "train_speed(iter/s)": 0.672655 }, { "epoch": 2.7571655027633777, "grad_norm": 3.671386957168579, "learning_rate": 4.1989229727650416e-05, "loss": 2.525594139099121, "memory(GiB)": 72.85, "step": 64355, "token_acc": 0.4803370786516854, "train_speed(iter/s)": 0.672659 }, { "epoch": 2.7573797180926265, "grad_norm": 7.0749192237854, "learning_rate": 4.1982586961885054e-05, "loss": 2.309559631347656, "memory(GiB)": 72.85, "step": 64360, "token_acc": 0.5147058823529411, "train_speed(iter/s)": 0.672669 }, { "epoch": 2.7575939334218758, "grad_norm": 5.6287055015563965, "learning_rate": 4.1975944341362646e-05, "loss": 2.015346336364746, "memory(GiB)": 72.85, "step": 64365, "token_acc": 0.549407114624506, "train_speed(iter/s)": 0.67268 }, { "epoch": 2.7578081487511246, "grad_norm": 4.273754596710205, "learning_rate": 4.196930186620355e-05, "loss": 2.389418029785156, "memory(GiB)": 72.85, "step": 64370, "token_acc": 0.5164835164835165, "train_speed(iter/s)": 0.67268 }, { "epoch": 2.7580223640803734, "grad_norm": 4.086581707000732, "learning_rate": 4.196265953652809e-05, "loss": 2.3451107025146483, "memory(GiB)": 72.85, "step": 64375, "token_acc": 0.48148148148148145, "train_speed(iter/s)": 0.672688 }, { "epoch": 2.7582365794096226, "grad_norm": 4.708340644836426, "learning_rate": 4.195601735245658e-05, "loss": 1.9611848831176757, "memory(GiB)": 72.85, "step": 64380, "token_acc": 0.5408560311284046, "train_speed(iter/s)": 0.672681 }, { "epoch": 2.7584507947388714, "grad_norm": 4.649248123168945, "learning_rate": 4.194937531410937e-05, "loss": 2.296651077270508, "memory(GiB)": 72.85, "step": 64385, "token_acc": 0.5017301038062284, "train_speed(iter/s)": 0.672686 }, { "epoch": 2.7586650100681203, "grad_norm": 4.835324287414551, "learning_rate": 4.1942733421606764e-05, "loss": 2.3458885192871093, "memory(GiB)": 72.85, "step": 64390, "token_acc": 0.46381578947368424, "train_speed(iter/s)": 0.672673 }, { "epoch": 2.7588792253973695, "grad_norm": 5.115762710571289, "learning_rate": 4.193609167506912e-05, "loss": 1.9949365615844727, "memory(GiB)": 72.85, "step": 64395, "token_acc": 0.5681818181818182, "train_speed(iter/s)": 0.672675 }, { "epoch": 2.7590934407266183, "grad_norm": 5.074087619781494, "learning_rate": 4.1929450074616734e-05, "loss": 2.1765869140625, "memory(GiB)": 72.85, "step": 64400, "token_acc": 0.5198675496688742, "train_speed(iter/s)": 0.672675 }, { "epoch": 2.759307656055867, "grad_norm": 4.171158790588379, "learning_rate": 4.192280862036992e-05, "loss": 2.160984230041504, "memory(GiB)": 72.85, "step": 64405, "token_acc": 0.5085324232081911, "train_speed(iter/s)": 0.67268 }, { "epoch": 2.7595218713851164, "grad_norm": 5.4867401123046875, "learning_rate": 4.1916167312449014e-05, "loss": 2.061013031005859, "memory(GiB)": 72.85, "step": 64410, "token_acc": 0.5098039215686274, "train_speed(iter/s)": 0.672679 }, { "epoch": 2.759736086714365, "grad_norm": 5.081762790679932, "learning_rate": 4.1909526150974326e-05, "loss": 2.339268684387207, "memory(GiB)": 72.85, "step": 64415, "token_acc": 0.5220125786163522, "train_speed(iter/s)": 0.672688 }, { "epoch": 2.759950302043614, "grad_norm": 5.8728790283203125, "learning_rate": 4.190288513606615e-05, "loss": 2.325605010986328, "memory(GiB)": 72.85, "step": 64420, "token_acc": 0.4758364312267658, "train_speed(iter/s)": 0.672677 }, { "epoch": 2.7601645173728633, "grad_norm": 4.3542656898498535, "learning_rate": 4.189624426784481e-05, "loss": 2.0842565536499023, "memory(GiB)": 72.85, "step": 64425, "token_acc": 0.579136690647482, "train_speed(iter/s)": 0.672682 }, { "epoch": 2.760378732702112, "grad_norm": 4.771271228790283, "learning_rate": 4.18896035464306e-05, "loss": 2.338970184326172, "memory(GiB)": 72.85, "step": 64430, "token_acc": 0.47678018575851394, "train_speed(iter/s)": 0.672683 }, { "epoch": 2.760592948031361, "grad_norm": 5.197103977203369, "learning_rate": 4.188296297194384e-05, "loss": 2.1937965393066405, "memory(GiB)": 72.85, "step": 64435, "token_acc": 0.5338645418326693, "train_speed(iter/s)": 0.672662 }, { "epoch": 2.76080716336061, "grad_norm": 4.599421501159668, "learning_rate": 4.18763225445048e-05, "loss": 2.3446048736572265, "memory(GiB)": 72.85, "step": 64440, "token_acc": 0.5139318885448917, "train_speed(iter/s)": 0.672658 }, { "epoch": 2.761021378689859, "grad_norm": 5.247986316680908, "learning_rate": 4.1869682264233826e-05, "loss": 2.1069620132446287, "memory(GiB)": 72.85, "step": 64445, "token_acc": 0.5373134328358209, "train_speed(iter/s)": 0.67267 }, { "epoch": 2.761235594019108, "grad_norm": 5.065687656402588, "learning_rate": 4.186304213125116e-05, "loss": 2.2410762786865233, "memory(GiB)": 72.85, "step": 64450, "token_acc": 0.504, "train_speed(iter/s)": 0.672683 }, { "epoch": 2.761449809348357, "grad_norm": 8.192699432373047, "learning_rate": 4.185640214567714e-05, "loss": 2.126272964477539, "memory(GiB)": 72.85, "step": 64455, "token_acc": 0.48253968253968255, "train_speed(iter/s)": 0.672675 }, { "epoch": 2.761664024677606, "grad_norm": 5.137020111083984, "learning_rate": 4.184976230763203e-05, "loss": 2.3845745086669923, "memory(GiB)": 72.85, "step": 64460, "token_acc": 0.5167785234899329, "train_speed(iter/s)": 0.67265 }, { "epoch": 2.7618782400068547, "grad_norm": 6.5209431648254395, "learning_rate": 4.1843122617236106e-05, "loss": 1.814352035522461, "memory(GiB)": 72.85, "step": 64465, "token_acc": 0.6322314049586777, "train_speed(iter/s)": 0.672649 }, { "epoch": 2.762092455336104, "grad_norm": 5.567412376403809, "learning_rate": 4.183648307460969e-05, "loss": 2.310029220581055, "memory(GiB)": 72.85, "step": 64470, "token_acc": 0.5098039215686274, "train_speed(iter/s)": 0.672647 }, { "epoch": 2.7623066706653527, "grad_norm": 4.256182670593262, "learning_rate": 4.182984367987302e-05, "loss": 2.1147010803222654, "memory(GiB)": 72.85, "step": 64475, "token_acc": 0.5691699604743083, "train_speed(iter/s)": 0.67265 }, { "epoch": 2.7625208859946015, "grad_norm": 5.184787273406982, "learning_rate": 4.182320443314641e-05, "loss": 2.0551197052001955, "memory(GiB)": 72.85, "step": 64480, "token_acc": 0.5251798561151079, "train_speed(iter/s)": 0.672631 }, { "epoch": 2.762735101323851, "grad_norm": 4.800354957580566, "learning_rate": 4.181656533455013e-05, "loss": 2.4050785064697267, "memory(GiB)": 72.85, "step": 64485, "token_acc": 0.45222929936305734, "train_speed(iter/s)": 0.672642 }, { "epoch": 2.7629493166530996, "grad_norm": 4.976218223571777, "learning_rate": 4.1809926384204426e-05, "loss": 2.101491928100586, "memory(GiB)": 72.85, "step": 64490, "token_acc": 0.49640287769784175, "train_speed(iter/s)": 0.672648 }, { "epoch": 2.7631635319823484, "grad_norm": 5.119731903076172, "learning_rate": 4.18032875822296e-05, "loss": 2.1995113372802733, "memory(GiB)": 72.85, "step": 64495, "token_acc": 0.550185873605948, "train_speed(iter/s)": 0.672656 }, { "epoch": 2.7633777473115977, "grad_norm": 5.327057838439941, "learning_rate": 4.179664892874591e-05, "loss": 2.1156206130981445, "memory(GiB)": 72.85, "step": 64500, "token_acc": 0.5164179104477612, "train_speed(iter/s)": 0.672657 }, { "epoch": 2.7633777473115977, "eval_loss": 2.202927350997925, "eval_runtime": 15.927, "eval_samples_per_second": 6.279, "eval_steps_per_second": 6.279, "eval_token_acc": 0.48322147651006714, "step": 64500 }, { "epoch": 2.7635919626408465, "grad_norm": 5.502750873565674, "learning_rate": 4.17900104238736e-05, "loss": 2.630226707458496, "memory(GiB)": 72.85, "step": 64505, "token_acc": 0.48616600790513836, "train_speed(iter/s)": 0.67252 }, { "epoch": 2.7638061779700953, "grad_norm": 5.837386608123779, "learning_rate": 4.1783372067732977e-05, "loss": 2.041997718811035, "memory(GiB)": 72.85, "step": 64510, "token_acc": 0.5570934256055363, "train_speed(iter/s)": 0.672529 }, { "epoch": 2.7640203932993446, "grad_norm": 4.282172203063965, "learning_rate": 4.177673386044425e-05, "loss": 1.9778305053710938, "memory(GiB)": 72.85, "step": 64515, "token_acc": 0.5770609318996416, "train_speed(iter/s)": 0.672515 }, { "epoch": 2.7642346086285934, "grad_norm": 4.061786651611328, "learning_rate": 4.177009580212773e-05, "loss": 2.0727005004882812, "memory(GiB)": 72.85, "step": 64520, "token_acc": 0.5622895622895623, "train_speed(iter/s)": 0.672524 }, { "epoch": 2.764448823957842, "grad_norm": 6.32614278793335, "learning_rate": 4.176345789290363e-05, "loss": 2.270911979675293, "memory(GiB)": 72.85, "step": 64525, "token_acc": 0.4729241877256318, "train_speed(iter/s)": 0.672519 }, { "epoch": 2.7646630392870915, "grad_norm": 4.763693809509277, "learning_rate": 4.175682013289223e-05, "loss": 2.3115264892578127, "memory(GiB)": 72.85, "step": 64530, "token_acc": 0.48466257668711654, "train_speed(iter/s)": 0.672505 }, { "epoch": 2.7648772546163403, "grad_norm": 6.101604461669922, "learning_rate": 4.1750182522213745e-05, "loss": 2.386656951904297, "memory(GiB)": 72.85, "step": 64535, "token_acc": 0.5086505190311419, "train_speed(iter/s)": 0.672511 }, { "epoch": 2.765091469945589, "grad_norm": 5.334583282470703, "learning_rate": 4.174354506098847e-05, "loss": 2.137425994873047, "memory(GiB)": 72.85, "step": 64540, "token_acc": 0.5609756097560976, "train_speed(iter/s)": 0.672528 }, { "epoch": 2.7653056852748383, "grad_norm": 5.435666084289551, "learning_rate": 4.1736907749336603e-05, "loss": 2.366059112548828, "memory(GiB)": 72.85, "step": 64545, "token_acc": 0.48148148148148145, "train_speed(iter/s)": 0.672535 }, { "epoch": 2.765519900604087, "grad_norm": 5.527969837188721, "learning_rate": 4.17302705873784e-05, "loss": 1.9785011291503907, "memory(GiB)": 72.85, "step": 64550, "token_acc": 0.5341365461847389, "train_speed(iter/s)": 0.67254 }, { "epoch": 2.765734115933336, "grad_norm": 5.168056964874268, "learning_rate": 4.172363357523412e-05, "loss": 2.1234832763671876, "memory(GiB)": 72.85, "step": 64555, "token_acc": 0.5032051282051282, "train_speed(iter/s)": 0.672544 }, { "epoch": 2.765948331262585, "grad_norm": 4.92463493347168, "learning_rate": 4.1716996713023976e-05, "loss": 2.171164894104004, "memory(GiB)": 72.85, "step": 64560, "token_acc": 0.5186335403726708, "train_speed(iter/s)": 0.672527 }, { "epoch": 2.766162546591834, "grad_norm": 5.076783180236816, "learning_rate": 4.17103600008682e-05, "loss": 2.1979043960571287, "memory(GiB)": 72.85, "step": 64565, "token_acc": 0.5266666666666666, "train_speed(iter/s)": 0.672515 }, { "epoch": 2.766376761921083, "grad_norm": 5.01760721206665, "learning_rate": 4.170372343888703e-05, "loss": 2.062921905517578, "memory(GiB)": 72.85, "step": 64570, "token_acc": 0.5241379310344828, "train_speed(iter/s)": 0.672519 }, { "epoch": 2.766590977250332, "grad_norm": 4.6217570304870605, "learning_rate": 4.169708702720069e-05, "loss": 2.506306266784668, "memory(GiB)": 72.85, "step": 64575, "token_acc": 0.4578313253012048, "train_speed(iter/s)": 0.672505 }, { "epoch": 2.766805192579581, "grad_norm": 4.696188449859619, "learning_rate": 4.169045076592942e-05, "loss": 2.157618522644043, "memory(GiB)": 72.85, "step": 64580, "token_acc": 0.5393258426966292, "train_speed(iter/s)": 0.672517 }, { "epoch": 2.7670194079088297, "grad_norm": 5.007235050201416, "learning_rate": 4.168381465519342e-05, "loss": 2.439315414428711, "memory(GiB)": 72.85, "step": 64585, "token_acc": 0.50997150997151, "train_speed(iter/s)": 0.672523 }, { "epoch": 2.767233623238079, "grad_norm": 5.416086196899414, "learning_rate": 4.167717869511291e-05, "loss": 2.302790069580078, "memory(GiB)": 72.85, "step": 64590, "token_acc": 0.49829351535836175, "train_speed(iter/s)": 0.672519 }, { "epoch": 2.767447838567328, "grad_norm": 5.041661739349365, "learning_rate": 4.167054288580812e-05, "loss": 2.0812583923339845, "memory(GiB)": 72.85, "step": 64595, "token_acc": 0.55, "train_speed(iter/s)": 0.672521 }, { "epoch": 2.7676620538965766, "grad_norm": 5.696412086486816, "learning_rate": 4.166390722739926e-05, "loss": 2.3956056594848634, "memory(GiB)": 72.85, "step": 64600, "token_acc": 0.49158249158249157, "train_speed(iter/s)": 0.67251 }, { "epoch": 2.767876269225826, "grad_norm": 5.113123416900635, "learning_rate": 4.165727172000655e-05, "loss": 2.273046112060547, "memory(GiB)": 72.85, "step": 64605, "token_acc": 0.5270758122743683, "train_speed(iter/s)": 0.672494 }, { "epoch": 2.7680904845550747, "grad_norm": 5.102407932281494, "learning_rate": 4.165063636375018e-05, "loss": 2.243133544921875, "memory(GiB)": 72.85, "step": 64610, "token_acc": 0.5278688524590164, "train_speed(iter/s)": 0.672497 }, { "epoch": 2.7683046998843235, "grad_norm": 5.088896751403809, "learning_rate": 4.164400115875037e-05, "loss": 2.492044448852539, "memory(GiB)": 72.85, "step": 64615, "token_acc": 0.48606811145510836, "train_speed(iter/s)": 0.672493 }, { "epoch": 2.7685189152135727, "grad_norm": 4.127800464630127, "learning_rate": 4.1637366105127315e-05, "loss": 2.039118766784668, "memory(GiB)": 72.85, "step": 64620, "token_acc": 0.5431654676258992, "train_speed(iter/s)": 0.67249 }, { "epoch": 2.7687331305428216, "grad_norm": 4.949804306030273, "learning_rate": 4.163073120300122e-05, "loss": 2.200132369995117, "memory(GiB)": 72.85, "step": 64625, "token_acc": 0.5021459227467812, "train_speed(iter/s)": 0.672495 }, { "epoch": 2.7689473458720704, "grad_norm": 6.156340599060059, "learning_rate": 4.162409645249228e-05, "loss": 2.175435256958008, "memory(GiB)": 72.85, "step": 64630, "token_acc": 0.5547445255474452, "train_speed(iter/s)": 0.672491 }, { "epoch": 2.7691615612013196, "grad_norm": 6.146665096282959, "learning_rate": 4.1617461853720685e-05, "loss": 2.2910207748413085, "memory(GiB)": 72.85, "step": 64635, "token_acc": 0.49809885931558934, "train_speed(iter/s)": 0.672495 }, { "epoch": 2.7693757765305684, "grad_norm": 4.687530517578125, "learning_rate": 4.161082740680664e-05, "loss": 2.1094106674194335, "memory(GiB)": 72.85, "step": 64640, "token_acc": 0.5136054421768708, "train_speed(iter/s)": 0.672508 }, { "epoch": 2.7695899918598172, "grad_norm": 7.175690174102783, "learning_rate": 4.160419311187033e-05, "loss": 2.120401382446289, "memory(GiB)": 72.85, "step": 64645, "token_acc": 0.5179856115107914, "train_speed(iter/s)": 0.672502 }, { "epoch": 2.7698042071890665, "grad_norm": 4.00183629989624, "learning_rate": 4.1597558969031924e-05, "loss": 2.007619857788086, "memory(GiB)": 72.85, "step": 64650, "token_acc": 0.5636363636363636, "train_speed(iter/s)": 0.672496 }, { "epoch": 2.7700184225183153, "grad_norm": 4.414296627044678, "learning_rate": 4.159092497841163e-05, "loss": 2.3569026947021485, "memory(GiB)": 72.85, "step": 64655, "token_acc": 0.4784172661870504, "train_speed(iter/s)": 0.672489 }, { "epoch": 2.770232637847564, "grad_norm": 3.9355647563934326, "learning_rate": 4.15842911401296e-05, "loss": 2.204671096801758, "memory(GiB)": 72.85, "step": 64660, "token_acc": 0.5235109717868338, "train_speed(iter/s)": 0.672496 }, { "epoch": 2.7704468531768134, "grad_norm": 5.218111038208008, "learning_rate": 4.157765745430605e-05, "loss": 2.069255256652832, "memory(GiB)": 72.85, "step": 64665, "token_acc": 0.5016077170418006, "train_speed(iter/s)": 0.672489 }, { "epoch": 2.770661068506062, "grad_norm": 4.721912384033203, "learning_rate": 4.157102392106112e-05, "loss": 2.3981624603271485, "memory(GiB)": 72.85, "step": 64670, "token_acc": 0.5184135977337111, "train_speed(iter/s)": 0.672491 }, { "epoch": 2.770875283835311, "grad_norm": 4.167314052581787, "learning_rate": 4.156439054051501e-05, "loss": 2.369456100463867, "memory(GiB)": 72.85, "step": 64675, "token_acc": 0.4984709480122324, "train_speed(iter/s)": 0.672492 }, { "epoch": 2.7710894991645603, "grad_norm": 5.520843029022217, "learning_rate": 4.1557757312787874e-05, "loss": 2.414176368713379, "memory(GiB)": 72.85, "step": 64680, "token_acc": 0.4880952380952381, "train_speed(iter/s)": 0.672495 }, { "epoch": 2.771303714493809, "grad_norm": 4.8198018074035645, "learning_rate": 4.1551124237999895e-05, "loss": 2.2821107864379884, "memory(GiB)": 72.85, "step": 64685, "token_acc": 0.48534201954397393, "train_speed(iter/s)": 0.672499 }, { "epoch": 2.771517929823058, "grad_norm": 6.067161560058594, "learning_rate": 4.1544491316271224e-05, "loss": 2.538239669799805, "memory(GiB)": 72.85, "step": 64690, "token_acc": 0.4612794612794613, "train_speed(iter/s)": 0.672488 }, { "epoch": 2.771732145152307, "grad_norm": 4.34912633895874, "learning_rate": 4.153785854772201e-05, "loss": 2.422513008117676, "memory(GiB)": 72.85, "step": 64695, "token_acc": 0.5072463768115942, "train_speed(iter/s)": 0.672486 }, { "epoch": 2.771946360481556, "grad_norm": 4.5014166831970215, "learning_rate": 4.153122593247244e-05, "loss": 2.041591262817383, "memory(GiB)": 72.85, "step": 64700, "token_acc": 0.5409252669039146, "train_speed(iter/s)": 0.67249 }, { "epoch": 2.7721605758108048, "grad_norm": 4.93430757522583, "learning_rate": 4.1524593470642656e-05, "loss": 2.2108631134033203, "memory(GiB)": 72.85, "step": 64705, "token_acc": 0.5516014234875445, "train_speed(iter/s)": 0.672499 }, { "epoch": 2.772374791140054, "grad_norm": 4.902222633361816, "learning_rate": 4.15179611623528e-05, "loss": 2.3873382568359376, "memory(GiB)": 72.85, "step": 64710, "token_acc": 0.4539249146757679, "train_speed(iter/s)": 0.672487 }, { "epoch": 2.772589006469303, "grad_norm": 7.636556625366211, "learning_rate": 4.1511329007723046e-05, "loss": 2.198961067199707, "memory(GiB)": 72.85, "step": 64715, "token_acc": 0.5461254612546126, "train_speed(iter/s)": 0.672491 }, { "epoch": 2.7728032217985517, "grad_norm": 3.7107880115509033, "learning_rate": 4.1504697006873524e-05, "loss": 2.2051025390625, "memory(GiB)": 72.85, "step": 64720, "token_acc": 0.5049180327868853, "train_speed(iter/s)": 0.67249 }, { "epoch": 2.773017437127801, "grad_norm": 5.251133918762207, "learning_rate": 4.1498065159924394e-05, "loss": 1.962199592590332, "memory(GiB)": 72.85, "step": 64725, "token_acc": 0.5869565217391305, "train_speed(iter/s)": 0.672499 }, { "epoch": 2.7732316524570497, "grad_norm": 6.145563125610352, "learning_rate": 4.149143346699579e-05, "loss": 2.369420623779297, "memory(GiB)": 72.85, "step": 64730, "token_acc": 0.4876543209876543, "train_speed(iter/s)": 0.672501 }, { "epoch": 2.7734458677862985, "grad_norm": 5.296045780181885, "learning_rate": 4.1484801928207824e-05, "loss": 2.4939291000366213, "memory(GiB)": 72.85, "step": 64735, "token_acc": 0.4785714285714286, "train_speed(iter/s)": 0.672503 }, { "epoch": 2.773660083115548, "grad_norm": 4.1703338623046875, "learning_rate": 4.1478170543680664e-05, "loss": 2.3967065811157227, "memory(GiB)": 72.85, "step": 64740, "token_acc": 0.46601941747572817, "train_speed(iter/s)": 0.672498 }, { "epoch": 2.7738742984447966, "grad_norm": 3.765976905822754, "learning_rate": 4.147153931353446e-05, "loss": 2.120627784729004, "memory(GiB)": 72.85, "step": 64745, "token_acc": 0.5508196721311476, "train_speed(iter/s)": 0.672495 }, { "epoch": 2.7740885137740454, "grad_norm": 4.682988166809082, "learning_rate": 4.1464908237889324e-05, "loss": 2.1501510620117186, "memory(GiB)": 72.85, "step": 64750, "token_acc": 0.528052805280528, "train_speed(iter/s)": 0.672484 }, { "epoch": 2.7743027291032947, "grad_norm": 4.132692337036133, "learning_rate": 4.145827731686536e-05, "loss": 2.2092573165893556, "memory(GiB)": 72.85, "step": 64755, "token_acc": 0.5433070866141733, "train_speed(iter/s)": 0.672471 }, { "epoch": 2.7745169444325435, "grad_norm": 4.555698394775391, "learning_rate": 4.145164655058273e-05, "loss": 2.294808197021484, "memory(GiB)": 72.85, "step": 64760, "token_acc": 0.4879032258064516, "train_speed(iter/s)": 0.672473 }, { "epoch": 2.7747311597617923, "grad_norm": 5.22376823425293, "learning_rate": 4.144501593916154e-05, "loss": 2.444527816772461, "memory(GiB)": 72.85, "step": 64765, "token_acc": 0.46405228758169936, "train_speed(iter/s)": 0.672482 }, { "epoch": 2.7749453750910416, "grad_norm": 4.727163314819336, "learning_rate": 4.1438385482721913e-05, "loss": 2.135395622253418, "memory(GiB)": 72.85, "step": 64770, "token_acc": 0.5520504731861199, "train_speed(iter/s)": 0.672486 }, { "epoch": 2.7751595904202904, "grad_norm": 4.915817737579346, "learning_rate": 4.143175518138397e-05, "loss": 2.5744441986083983, "memory(GiB)": 72.85, "step": 64775, "token_acc": 0.4444444444444444, "train_speed(iter/s)": 0.672492 }, { "epoch": 2.775373805749539, "grad_norm": 5.499283790588379, "learning_rate": 4.14251250352678e-05, "loss": 2.3279855728149412, "memory(GiB)": 72.85, "step": 64780, "token_acc": 0.4721311475409836, "train_speed(iter/s)": 0.672502 }, { "epoch": 2.7755880210787884, "grad_norm": 5.242013931274414, "learning_rate": 4.141849504449355e-05, "loss": 2.092725372314453, "memory(GiB)": 72.85, "step": 64785, "token_acc": 0.5457875457875457, "train_speed(iter/s)": 0.672498 }, { "epoch": 2.7758022364080372, "grad_norm": 4.997512340545654, "learning_rate": 4.141186520918132e-05, "loss": 2.1801694869995116, "memory(GiB)": 72.85, "step": 64790, "token_acc": 0.53125, "train_speed(iter/s)": 0.672505 }, { "epoch": 2.776016451737286, "grad_norm": 5.078779220581055, "learning_rate": 4.140523552945118e-05, "loss": 2.3846574783325196, "memory(GiB)": 72.85, "step": 64795, "token_acc": 0.5209003215434084, "train_speed(iter/s)": 0.672508 }, { "epoch": 2.7762306670665353, "grad_norm": 5.550184726715088, "learning_rate": 4.1398606005423284e-05, "loss": 2.3864316940307617, "memory(GiB)": 72.85, "step": 64800, "token_acc": 0.4867924528301887, "train_speed(iter/s)": 0.672511 }, { "epoch": 2.776444882395784, "grad_norm": 5.245193958282471, "learning_rate": 4.13919766372177e-05, "loss": 2.49612979888916, "memory(GiB)": 72.85, "step": 64805, "token_acc": 0.47575757575757577, "train_speed(iter/s)": 0.6725 }, { "epoch": 2.776659097725033, "grad_norm": 4.744694232940674, "learning_rate": 4.1385347424954526e-05, "loss": 2.440154266357422, "memory(GiB)": 72.85, "step": 64810, "token_acc": 0.501577287066246, "train_speed(iter/s)": 0.672504 }, { "epoch": 2.776873313054282, "grad_norm": 3.9063491821289062, "learning_rate": 4.137871836875387e-05, "loss": 2.0403547286987305, "memory(GiB)": 72.85, "step": 64815, "token_acc": 0.5502958579881657, "train_speed(iter/s)": 0.672497 }, { "epoch": 2.777087528383531, "grad_norm": 5.412117958068848, "learning_rate": 4.137208946873582e-05, "loss": 2.292619323730469, "memory(GiB)": 72.85, "step": 64820, "token_acc": 0.4963768115942029, "train_speed(iter/s)": 0.672502 }, { "epoch": 2.77730174371278, "grad_norm": 4.2491559982299805, "learning_rate": 4.1365460725020466e-05, "loss": 2.836401176452637, "memory(GiB)": 72.85, "step": 64825, "token_acc": 0.4004914004914005, "train_speed(iter/s)": 0.672497 }, { "epoch": 2.777515959042029, "grad_norm": 4.067605495452881, "learning_rate": 4.135883213772789e-05, "loss": 2.118876266479492, "memory(GiB)": 72.85, "step": 64830, "token_acc": 0.5345345345345346, "train_speed(iter/s)": 0.672491 }, { "epoch": 2.777730174371278, "grad_norm": 5.047835826873779, "learning_rate": 4.1352203706978186e-05, "loss": 2.5136999130249023, "memory(GiB)": 72.85, "step": 64835, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.672483 }, { "epoch": 2.7779443897005267, "grad_norm": 6.485341548919678, "learning_rate": 4.134557543289141e-05, "loss": 1.9999679565429687, "memory(GiB)": 72.85, "step": 64840, "token_acc": 0.5278810408921933, "train_speed(iter/s)": 0.672481 }, { "epoch": 2.778158605029776, "grad_norm": 7.558277606964111, "learning_rate": 4.1338947315587664e-05, "loss": 2.2252294540405275, "memory(GiB)": 72.85, "step": 64845, "token_acc": 0.5367647058823529, "train_speed(iter/s)": 0.672484 }, { "epoch": 2.7783728203590248, "grad_norm": 6.361995220184326, "learning_rate": 4.133231935518701e-05, "loss": 2.166535568237305, "memory(GiB)": 72.85, "step": 64850, "token_acc": 0.5144927536231884, "train_speed(iter/s)": 0.672495 }, { "epoch": 2.7785870356882736, "grad_norm": 5.344714164733887, "learning_rate": 4.132569155180951e-05, "loss": 2.2334718704223633, "memory(GiB)": 72.85, "step": 64855, "token_acc": 0.5268456375838926, "train_speed(iter/s)": 0.672495 }, { "epoch": 2.778801251017523, "grad_norm": 5.6060943603515625, "learning_rate": 4.131906390557526e-05, "loss": 1.9067726135253906, "memory(GiB)": 72.85, "step": 64860, "token_acc": 0.5214007782101168, "train_speed(iter/s)": 0.672502 }, { "epoch": 2.7790154663467717, "grad_norm": 5.075109004974365, "learning_rate": 4.131243641660429e-05, "loss": 2.1499761581420898, "memory(GiB)": 72.85, "step": 64865, "token_acc": 0.521865889212828, "train_speed(iter/s)": 0.672513 }, { "epoch": 2.7792296816760205, "grad_norm": 5.015100002288818, "learning_rate": 4.130580908501671e-05, "loss": 2.7387950897216795, "memory(GiB)": 72.85, "step": 64870, "token_acc": 0.4664310954063604, "train_speed(iter/s)": 0.672506 }, { "epoch": 2.7794438970052697, "grad_norm": 5.385369300842285, "learning_rate": 4.129918191093254e-05, "loss": 2.1017871856689454, "memory(GiB)": 72.85, "step": 64875, "token_acc": 0.5982532751091703, "train_speed(iter/s)": 0.672509 }, { "epoch": 2.7796581123345185, "grad_norm": 4.409383773803711, "learning_rate": 4.1292554894471847e-05, "loss": 2.371006202697754, "memory(GiB)": 72.85, "step": 64880, "token_acc": 0.5079872204472844, "train_speed(iter/s)": 0.672503 }, { "epoch": 2.7798723276637674, "grad_norm": 4.08161735534668, "learning_rate": 4.1285928035754684e-05, "loss": 2.34627685546875, "memory(GiB)": 72.85, "step": 64885, "token_acc": 0.5, "train_speed(iter/s)": 0.6725 }, { "epoch": 2.7800865429930166, "grad_norm": 4.125476360321045, "learning_rate": 4.127930133490112e-05, "loss": 2.315644645690918, "memory(GiB)": 72.85, "step": 64890, "token_acc": 0.49146757679180886, "train_speed(iter/s)": 0.672498 }, { "epoch": 2.7803007583222654, "grad_norm": 4.514078617095947, "learning_rate": 4.12726747920312e-05, "loss": 2.3927371978759764, "memory(GiB)": 72.85, "step": 64895, "token_acc": 0.48148148148148145, "train_speed(iter/s)": 0.672499 }, { "epoch": 2.7805149736515142, "grad_norm": 4.734058856964111, "learning_rate": 4.126604840726496e-05, "loss": 2.446074104309082, "memory(GiB)": 72.85, "step": 64900, "token_acc": 0.46686746987951805, "train_speed(iter/s)": 0.672501 }, { "epoch": 2.7807291889807635, "grad_norm": 4.412248134613037, "learning_rate": 4.125942218072244e-05, "loss": 1.870516014099121, "memory(GiB)": 72.85, "step": 64905, "token_acc": 0.5794701986754967, "train_speed(iter/s)": 0.672501 }, { "epoch": 2.7809434043100123, "grad_norm": 4.483216285705566, "learning_rate": 4.125279611252369e-05, "loss": 2.1977890014648436, "memory(GiB)": 72.85, "step": 64910, "token_acc": 0.5224358974358975, "train_speed(iter/s)": 0.672502 }, { "epoch": 2.781157619639261, "grad_norm": 4.1587748527526855, "learning_rate": 4.124617020278875e-05, "loss": 2.1448511123657226, "memory(GiB)": 72.85, "step": 64915, "token_acc": 0.5220125786163522, "train_speed(iter/s)": 0.672498 }, { "epoch": 2.7813718349685104, "grad_norm": 5.1451802253723145, "learning_rate": 4.1239544451637646e-05, "loss": 2.207819366455078, "memory(GiB)": 72.85, "step": 64920, "token_acc": 0.5288135593220339, "train_speed(iter/s)": 0.672494 }, { "epoch": 2.781586050297759, "grad_norm": 6.515679836273193, "learning_rate": 4.12329188591904e-05, "loss": 2.5428728103637694, "memory(GiB)": 72.85, "step": 64925, "token_acc": 0.5034246575342466, "train_speed(iter/s)": 0.672487 }, { "epoch": 2.781800265627008, "grad_norm": 6.762939929962158, "learning_rate": 4.122629342556706e-05, "loss": 2.346024513244629, "memory(GiB)": 72.85, "step": 64930, "token_acc": 0.47648902821316613, "train_speed(iter/s)": 0.672489 }, { "epoch": 2.7820144809562573, "grad_norm": 5.415521621704102, "learning_rate": 4.121966815088766e-05, "loss": 2.139268684387207, "memory(GiB)": 72.85, "step": 64935, "token_acc": 0.5179856115107914, "train_speed(iter/s)": 0.67249 }, { "epoch": 2.782228696285506, "grad_norm": 3.721713066101074, "learning_rate": 4.1213043035272184e-05, "loss": 2.0815635681152345, "memory(GiB)": 72.85, "step": 64940, "token_acc": 0.5547445255474452, "train_speed(iter/s)": 0.672488 }, { "epoch": 2.782442911614755, "grad_norm": 3.7756712436676025, "learning_rate": 4.12064180788407e-05, "loss": 2.1569990158081054, "memory(GiB)": 72.85, "step": 64945, "token_acc": 0.5184135977337111, "train_speed(iter/s)": 0.67249 }, { "epoch": 2.782657126944004, "grad_norm": 4.014257907867432, "learning_rate": 4.1199793281713176e-05, "loss": 2.185042953491211, "memory(GiB)": 72.85, "step": 64950, "token_acc": 0.5177304964539007, "train_speed(iter/s)": 0.672489 }, { "epoch": 2.782871342273253, "grad_norm": 6.319504737854004, "learning_rate": 4.119316864400967e-05, "loss": 2.1709569931030273, "memory(GiB)": 72.85, "step": 64955, "token_acc": 0.519650655021834, "train_speed(iter/s)": 0.672499 }, { "epoch": 2.7830855576025018, "grad_norm": 5.165319442749023, "learning_rate": 4.118654416585015e-05, "loss": 2.154463195800781, "memory(GiB)": 72.85, "step": 64960, "token_acc": 0.542016806722689, "train_speed(iter/s)": 0.672505 }, { "epoch": 2.783299772931751, "grad_norm": 4.866090774536133, "learning_rate": 4.117991984735468e-05, "loss": 2.313599395751953, "memory(GiB)": 72.85, "step": 64965, "token_acc": 0.43934426229508194, "train_speed(iter/s)": 0.672516 }, { "epoch": 2.783513988261, "grad_norm": 4.071235656738281, "learning_rate": 4.117329568864322e-05, "loss": 2.2323396682739256, "memory(GiB)": 72.85, "step": 64970, "token_acc": 0.5313653136531366, "train_speed(iter/s)": 0.672513 }, { "epoch": 2.783728203590249, "grad_norm": 7.7289838790893555, "learning_rate": 4.11666716898358e-05, "loss": 2.277568054199219, "memory(GiB)": 72.85, "step": 64975, "token_acc": 0.541095890410959, "train_speed(iter/s)": 0.672517 }, { "epoch": 2.783942418919498, "grad_norm": 4.639584541320801, "learning_rate": 4.116004785105241e-05, "loss": 2.242416191101074, "memory(GiB)": 72.85, "step": 64980, "token_acc": 0.56, "train_speed(iter/s)": 0.672523 }, { "epoch": 2.7841566342487467, "grad_norm": 4.9735002517700195, "learning_rate": 4.115342417241304e-05, "loss": 2.4282121658325195, "memory(GiB)": 72.85, "step": 64985, "token_acc": 0.4671814671814672, "train_speed(iter/s)": 0.672529 }, { "epoch": 2.784370849577996, "grad_norm": 5.091830730438232, "learning_rate": 4.114680065403769e-05, "loss": 2.6774642944335936, "memory(GiB)": 72.85, "step": 64990, "token_acc": 0.47297297297297297, "train_speed(iter/s)": 0.672526 }, { "epoch": 2.784585064907245, "grad_norm": 4.242180824279785, "learning_rate": 4.114017729604635e-05, "loss": 2.267429542541504, "memory(GiB)": 72.85, "step": 64995, "token_acc": 0.48598130841121495, "train_speed(iter/s)": 0.672535 }, { "epoch": 2.7847992802364936, "grad_norm": 6.923364639282227, "learning_rate": 4.113355409855901e-05, "loss": 2.3895442962646483, "memory(GiB)": 72.85, "step": 65000, "token_acc": 0.4915254237288136, "train_speed(iter/s)": 0.672535 }, { "epoch": 2.7847992802364936, "eval_loss": 2.0827443599700928, "eval_runtime": 16.0718, "eval_samples_per_second": 6.222, "eval_steps_per_second": 6.222, "eval_token_acc": 0.46547314578005117, "step": 65000 }, { "epoch": 2.785013495565743, "grad_norm": 5.361903667449951, "learning_rate": 4.1126931061695656e-05, "loss": 2.1563961029052736, "memory(GiB)": 72.85, "step": 65005, "token_acc": 0.4849624060150376, "train_speed(iter/s)": 0.672411 }, { "epoch": 2.7852277108949917, "grad_norm": 5.4710774421691895, "learning_rate": 4.112030818557626e-05, "loss": 2.431626892089844, "memory(GiB)": 72.85, "step": 65010, "token_acc": 0.496551724137931, "train_speed(iter/s)": 0.672419 }, { "epoch": 2.7854419262242405, "grad_norm": 4.511233806610107, "learning_rate": 4.111368547032083e-05, "loss": 2.403432273864746, "memory(GiB)": 72.85, "step": 65015, "token_acc": 0.47017543859649125, "train_speed(iter/s)": 0.672429 }, { "epoch": 2.7856561415534897, "grad_norm": 5.752511024475098, "learning_rate": 4.110706291604931e-05, "loss": 2.0839588165283205, "memory(GiB)": 72.85, "step": 65020, "token_acc": 0.5186567164179104, "train_speed(iter/s)": 0.672433 }, { "epoch": 2.7858703568827385, "grad_norm": 5.17103910446167, "learning_rate": 4.110044052288169e-05, "loss": 2.2059005737304687, "memory(GiB)": 72.85, "step": 65025, "token_acc": 0.5035971223021583, "train_speed(iter/s)": 0.672445 }, { "epoch": 2.7860845722119874, "grad_norm": 4.921043395996094, "learning_rate": 4.109381829093792e-05, "loss": 1.9742671966552734, "memory(GiB)": 72.85, "step": 65030, "token_acc": 0.5576208178438662, "train_speed(iter/s)": 0.672448 }, { "epoch": 2.7862987875412366, "grad_norm": 4.1618876457214355, "learning_rate": 4.108719622033801e-05, "loss": 1.986979103088379, "memory(GiB)": 72.85, "step": 65035, "token_acc": 0.53515625, "train_speed(iter/s)": 0.672451 }, { "epoch": 2.7865130028704854, "grad_norm": 4.154176235198975, "learning_rate": 4.10805743112019e-05, "loss": 2.2547168731689453, "memory(GiB)": 72.85, "step": 65040, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.672453 }, { "epoch": 2.7867272181997342, "grad_norm": 5.361545085906982, "learning_rate": 4.1073952563649546e-05, "loss": 2.205560302734375, "memory(GiB)": 72.85, "step": 65045, "token_acc": 0.5109489051094891, "train_speed(iter/s)": 0.672446 }, { "epoch": 2.7869414335289835, "grad_norm": 5.317359447479248, "learning_rate": 4.1067330977800924e-05, "loss": 2.2226749420166017, "memory(GiB)": 72.85, "step": 65050, "token_acc": 0.5218855218855218, "train_speed(iter/s)": 0.672449 }, { "epoch": 2.7871556488582323, "grad_norm": 4.651944637298584, "learning_rate": 4.106070955377597e-05, "loss": 2.4095314025878904, "memory(GiB)": 72.85, "step": 65055, "token_acc": 0.49333333333333335, "train_speed(iter/s)": 0.67246 }, { "epoch": 2.787369864187481, "grad_norm": 4.014432907104492, "learning_rate": 4.105408829169466e-05, "loss": 2.1551788330078123, "memory(GiB)": 72.85, "step": 65060, "token_acc": 0.516245487364621, "train_speed(iter/s)": 0.672462 }, { "epoch": 2.7875840795167304, "grad_norm": 5.201488494873047, "learning_rate": 4.104746719167693e-05, "loss": 2.151163864135742, "memory(GiB)": 72.85, "step": 65065, "token_acc": 0.5109717868338558, "train_speed(iter/s)": 0.67245 }, { "epoch": 2.787798294845979, "grad_norm": 4.506302356719971, "learning_rate": 4.104084625384272e-05, "loss": 2.2662479400634767, "memory(GiB)": 72.85, "step": 65070, "token_acc": 0.48226950354609927, "train_speed(iter/s)": 0.672453 }, { "epoch": 2.788012510175228, "grad_norm": 8.261003494262695, "learning_rate": 4.103422547831199e-05, "loss": 1.729411506652832, "memory(GiB)": 72.85, "step": 65075, "token_acc": 0.6600790513833992, "train_speed(iter/s)": 0.67246 }, { "epoch": 2.7882267255044773, "grad_norm": 6.0929789543151855, "learning_rate": 4.102760486520468e-05, "loss": 2.1345054626464846, "memory(GiB)": 72.85, "step": 65080, "token_acc": 0.5512367491166078, "train_speed(iter/s)": 0.672467 }, { "epoch": 2.788440940833726, "grad_norm": 5.580582141876221, "learning_rate": 4.1020984414640716e-05, "loss": 2.2497154235839845, "memory(GiB)": 72.85, "step": 65085, "token_acc": 0.5482954545454546, "train_speed(iter/s)": 0.672477 }, { "epoch": 2.788655156162975, "grad_norm": 6.855732440948486, "learning_rate": 4.1014364126740056e-05, "loss": 2.2418731689453124, "memory(GiB)": 72.85, "step": 65090, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.672477 }, { "epoch": 2.788869371492224, "grad_norm": 5.5256757736206055, "learning_rate": 4.100774400162261e-05, "loss": 2.241672325134277, "memory(GiB)": 72.85, "step": 65095, "token_acc": 0.5470383275261324, "train_speed(iter/s)": 0.672487 }, { "epoch": 2.789083586821473, "grad_norm": 6.1145429611206055, "learning_rate": 4.100112403940832e-05, "loss": 2.0928157806396483, "memory(GiB)": 72.85, "step": 65100, "token_acc": 0.525, "train_speed(iter/s)": 0.672494 }, { "epoch": 2.7892978021507218, "grad_norm": 6.574073314666748, "learning_rate": 4.099450424021709e-05, "loss": 2.174964714050293, "memory(GiB)": 72.85, "step": 65105, "token_acc": 0.5198675496688742, "train_speed(iter/s)": 0.672491 }, { "epoch": 2.789512017479971, "grad_norm": 3.7883198261260986, "learning_rate": 4.0987884604168886e-05, "loss": 2.397092819213867, "memory(GiB)": 72.85, "step": 65110, "token_acc": 0.4892966360856269, "train_speed(iter/s)": 0.672502 }, { "epoch": 2.78972623280922, "grad_norm": 4.038250923156738, "learning_rate": 4.09812651313836e-05, "loss": 2.1621673583984373, "memory(GiB)": 72.85, "step": 65115, "token_acc": 0.5177304964539007, "train_speed(iter/s)": 0.672504 }, { "epoch": 2.7899404481384686, "grad_norm": 4.886916637420654, "learning_rate": 4.097464582198116e-05, "loss": 2.3824363708496095, "memory(GiB)": 72.85, "step": 65120, "token_acc": 0.47058823529411764, "train_speed(iter/s)": 0.67251 }, { "epoch": 2.790154663467718, "grad_norm": 4.518286228179932, "learning_rate": 4.0968026676081474e-05, "loss": 2.1965917587280273, "memory(GiB)": 72.85, "step": 65125, "token_acc": 0.5225806451612903, "train_speed(iter/s)": 0.672513 }, { "epoch": 2.7903688787969667, "grad_norm": 5.856472492218018, "learning_rate": 4.096140769380445e-05, "loss": 2.2020721435546875, "memory(GiB)": 72.85, "step": 65130, "token_acc": 0.4834710743801653, "train_speed(iter/s)": 0.672504 }, { "epoch": 2.7905830941262155, "grad_norm": 4.462333679199219, "learning_rate": 4.095478887527002e-05, "loss": 2.1183427810668944, "memory(GiB)": 72.85, "step": 65135, "token_acc": 0.5102040816326531, "train_speed(iter/s)": 0.672511 }, { "epoch": 2.790797309455465, "grad_norm": 4.1328511238098145, "learning_rate": 4.094817022059806e-05, "loss": 2.3068414688110352, "memory(GiB)": 72.85, "step": 65140, "token_acc": 0.46496815286624205, "train_speed(iter/s)": 0.672516 }, { "epoch": 2.7910115247847136, "grad_norm": 4.958164691925049, "learning_rate": 4.09415517299085e-05, "loss": 2.3132822036743166, "memory(GiB)": 72.85, "step": 65145, "token_acc": 0.5028571428571429, "train_speed(iter/s)": 0.672517 }, { "epoch": 2.7912257401139624, "grad_norm": 4.337655544281006, "learning_rate": 4.0934933403321226e-05, "loss": 2.5986709594726562, "memory(GiB)": 72.85, "step": 65150, "token_acc": 0.4941860465116279, "train_speed(iter/s)": 0.672514 }, { "epoch": 2.7914399554432117, "grad_norm": 5.829590797424316, "learning_rate": 4.0928315240956134e-05, "loss": 2.6231405258178713, "memory(GiB)": 72.85, "step": 65155, "token_acc": 0.4697406340057637, "train_speed(iter/s)": 0.672507 }, { "epoch": 2.7916541707724605, "grad_norm": 6.754383563995361, "learning_rate": 4.0921697242933125e-05, "loss": 2.036441421508789, "memory(GiB)": 72.85, "step": 65160, "token_acc": 0.5177865612648221, "train_speed(iter/s)": 0.672503 }, { "epoch": 2.7918683861017093, "grad_norm": 4.854525566101074, "learning_rate": 4.0915079409372094e-05, "loss": 1.9874433517456054, "memory(GiB)": 72.85, "step": 65165, "token_acc": 0.5533980582524272, "train_speed(iter/s)": 0.672508 }, { "epoch": 2.7920826014309585, "grad_norm": 4.750720024108887, "learning_rate": 4.09084617403929e-05, "loss": 2.2913103103637695, "memory(GiB)": 72.85, "step": 65170, "token_acc": 0.4855072463768116, "train_speed(iter/s)": 0.67251 }, { "epoch": 2.7922968167602074, "grad_norm": 3.420290231704712, "learning_rate": 4.0901844236115464e-05, "loss": 2.2307371139526366, "memory(GiB)": 72.85, "step": 65175, "token_acc": 0.49855907780979825, "train_speed(iter/s)": 0.672525 }, { "epoch": 2.792511032089456, "grad_norm": 6.145827293395996, "learning_rate": 4.089522689665964e-05, "loss": 2.092945671081543, "memory(GiB)": 72.85, "step": 65180, "token_acc": 0.5742574257425742, "train_speed(iter/s)": 0.672525 }, { "epoch": 2.7927252474187054, "grad_norm": 5.142385482788086, "learning_rate": 4.088860972214534e-05, "loss": 2.2509178161621093, "memory(GiB)": 72.85, "step": 65185, "token_acc": 0.5201465201465202, "train_speed(iter/s)": 0.672536 }, { "epoch": 2.7929394627479542, "grad_norm": 4.814436435699463, "learning_rate": 4.088199271269241e-05, "loss": 2.6035842895507812, "memory(GiB)": 72.85, "step": 65190, "token_acc": 0.43389830508474575, "train_speed(iter/s)": 0.672529 }, { "epoch": 2.793153678077203, "grad_norm": 7.999196529388428, "learning_rate": 4.087537586842074e-05, "loss": 2.16781063079834, "memory(GiB)": 72.85, "step": 65195, "token_acc": 0.549618320610687, "train_speed(iter/s)": 0.672535 }, { "epoch": 2.7933678934064523, "grad_norm": 5.994810581207275, "learning_rate": 4.086875918945019e-05, "loss": 2.1696775436401365, "memory(GiB)": 72.85, "step": 65200, "token_acc": 0.5253731343283582, "train_speed(iter/s)": 0.672545 }, { "epoch": 2.793582108735701, "grad_norm": 6.342574119567871, "learning_rate": 4.0862142675900645e-05, "loss": 2.5352855682373048, "memory(GiB)": 72.85, "step": 65205, "token_acc": 0.4681528662420382, "train_speed(iter/s)": 0.672539 }, { "epoch": 2.79379632406495, "grad_norm": 5.592190742492676, "learning_rate": 4.0855526327891956e-05, "loss": 2.4086158752441404, "memory(GiB)": 72.85, "step": 65210, "token_acc": 0.49291784702549574, "train_speed(iter/s)": 0.672546 }, { "epoch": 2.794010539394199, "grad_norm": 4.521246433258057, "learning_rate": 4.084891014554398e-05, "loss": 2.2029661178588866, "memory(GiB)": 72.85, "step": 65215, "token_acc": 0.5186440677966102, "train_speed(iter/s)": 0.672551 }, { "epoch": 2.794224754723448, "grad_norm": 5.132366180419922, "learning_rate": 4.0842294128976586e-05, "loss": 2.3260242462158205, "memory(GiB)": 72.85, "step": 65220, "token_acc": 0.47038327526132406, "train_speed(iter/s)": 0.672558 }, { "epoch": 2.794438970052697, "grad_norm": 6.510217666625977, "learning_rate": 4.083567827830962e-05, "loss": 2.11712760925293, "memory(GiB)": 72.85, "step": 65225, "token_acc": 0.5321100917431193, "train_speed(iter/s)": 0.672551 }, { "epoch": 2.794653185381946, "grad_norm": 4.074977874755859, "learning_rate": 4.0829062593662944e-05, "loss": 2.3694419860839844, "memory(GiB)": 72.85, "step": 65230, "token_acc": 0.525974025974026, "train_speed(iter/s)": 0.672561 }, { "epoch": 2.794867400711195, "grad_norm": 5.399648189544678, "learning_rate": 4.08224470751564e-05, "loss": 2.1246257781982423, "memory(GiB)": 72.85, "step": 65235, "token_acc": 0.5411764705882353, "train_speed(iter/s)": 0.672532 }, { "epoch": 2.7950816160404437, "grad_norm": 5.12270450592041, "learning_rate": 4.081583172290983e-05, "loss": 2.2918020248413087, "memory(GiB)": 72.85, "step": 65240, "token_acc": 0.4744525547445255, "train_speed(iter/s)": 0.672522 }, { "epoch": 2.795295831369693, "grad_norm": 4.819918632507324, "learning_rate": 4.080921653704309e-05, "loss": 2.2314266204833983, "memory(GiB)": 72.85, "step": 65245, "token_acc": 0.4956521739130435, "train_speed(iter/s)": 0.672532 }, { "epoch": 2.7955100466989418, "grad_norm": 4.889039993286133, "learning_rate": 4.080260151767602e-05, "loss": 2.1398109436035155, "memory(GiB)": 72.85, "step": 65250, "token_acc": 0.5308641975308642, "train_speed(iter/s)": 0.672536 }, { "epoch": 2.7957242620281906, "grad_norm": 4.313788890838623, "learning_rate": 4.079598666492843e-05, "loss": 2.089670753479004, "memory(GiB)": 72.85, "step": 65255, "token_acc": 0.5240963855421686, "train_speed(iter/s)": 0.672531 }, { "epoch": 2.79593847735744, "grad_norm": 4.386739730834961, "learning_rate": 4.0789371978920185e-05, "loss": 2.10913028717041, "memory(GiB)": 72.85, "step": 65260, "token_acc": 0.5145228215767634, "train_speed(iter/s)": 0.672537 }, { "epoch": 2.7961526926866886, "grad_norm": 3.136702060699463, "learning_rate": 4.078275745977112e-05, "loss": 2.054847526550293, "memory(GiB)": 72.85, "step": 65265, "token_acc": 0.5887096774193549, "train_speed(iter/s)": 0.67254 }, { "epoch": 2.7963669080159375, "grad_norm": 5.46202278137207, "learning_rate": 4.0776143107601037e-05, "loss": 2.298357391357422, "memory(GiB)": 72.85, "step": 65270, "token_acc": 0.4957983193277311, "train_speed(iter/s)": 0.672543 }, { "epoch": 2.7965811233451867, "grad_norm": 6.053948402404785, "learning_rate": 4.076952892252977e-05, "loss": 2.1172470092773437, "memory(GiB)": 72.85, "step": 65275, "token_acc": 0.5652173913043478, "train_speed(iter/s)": 0.672547 }, { "epoch": 2.7967953386744355, "grad_norm": 5.219965934753418, "learning_rate": 4.0762914904677165e-05, "loss": 2.28784065246582, "memory(GiB)": 72.85, "step": 65280, "token_acc": 0.5116279069767442, "train_speed(iter/s)": 0.672546 }, { "epoch": 2.7970095540036843, "grad_norm": 4.647161960601807, "learning_rate": 4.0756301054163004e-05, "loss": 2.458461952209473, "memory(GiB)": 72.85, "step": 65285, "token_acc": 0.4811320754716981, "train_speed(iter/s)": 0.672536 }, { "epoch": 2.7972237693329336, "grad_norm": 5.029975891113281, "learning_rate": 4.074968737110713e-05, "loss": 2.522831916809082, "memory(GiB)": 72.85, "step": 65290, "token_acc": 0.4657534246575342, "train_speed(iter/s)": 0.672541 }, { "epoch": 2.7974379846621824, "grad_norm": 4.8669610023498535, "learning_rate": 4.0743073855629355e-05, "loss": 2.2734958648681642, "memory(GiB)": 72.85, "step": 65295, "token_acc": 0.5015384615384615, "train_speed(iter/s)": 0.672553 }, { "epoch": 2.797652199991431, "grad_norm": 3.9310622215270996, "learning_rate": 4.073646050784946e-05, "loss": 2.242107391357422, "memory(GiB)": 72.85, "step": 65300, "token_acc": 0.5186440677966102, "train_speed(iter/s)": 0.672559 }, { "epoch": 2.7978664153206805, "grad_norm": 5.167774200439453, "learning_rate": 4.072984732788729e-05, "loss": 2.2635866165161134, "memory(GiB)": 72.85, "step": 65305, "token_acc": 0.46830985915492956, "train_speed(iter/s)": 0.672564 }, { "epoch": 2.7980806306499293, "grad_norm": 5.349511623382568, "learning_rate": 4.072323431586263e-05, "loss": 2.3128887176513673, "memory(GiB)": 72.85, "step": 65310, "token_acc": 0.5032051282051282, "train_speed(iter/s)": 0.67256 }, { "epoch": 2.7982948459791785, "grad_norm": 5.9077677726745605, "learning_rate": 4.0716621471895275e-05, "loss": 2.279297637939453, "memory(GiB)": 72.85, "step": 65315, "token_acc": 0.49645390070921985, "train_speed(iter/s)": 0.672564 }, { "epoch": 2.7985090613084274, "grad_norm": 4.0788397789001465, "learning_rate": 4.0710008796105034e-05, "loss": 2.2421131134033203, "memory(GiB)": 72.85, "step": 65320, "token_acc": 0.5271565495207667, "train_speed(iter/s)": 0.672579 }, { "epoch": 2.798723276637676, "grad_norm": 5.2325358390808105, "learning_rate": 4.0703396288611694e-05, "loss": 2.1593626022338865, "memory(GiB)": 72.85, "step": 65325, "token_acc": 0.5116279069767442, "train_speed(iter/s)": 0.67258 }, { "epoch": 2.7989374919669254, "grad_norm": 6.90647029876709, "learning_rate": 4.069678394953505e-05, "loss": 2.283690643310547, "memory(GiB)": 72.85, "step": 65330, "token_acc": 0.5091575091575091, "train_speed(iter/s)": 0.672572 }, { "epoch": 2.7991517072961742, "grad_norm": 5.086902618408203, "learning_rate": 4.069017177899489e-05, "loss": 2.136433410644531, "memory(GiB)": 72.85, "step": 65335, "token_acc": 0.5670103092783505, "train_speed(iter/s)": 0.672567 }, { "epoch": 2.799365922625423, "grad_norm": 4.710102558135986, "learning_rate": 4.0683559777111014e-05, "loss": 2.213084411621094, "memory(GiB)": 72.85, "step": 65340, "token_acc": 0.5, "train_speed(iter/s)": 0.672554 }, { "epoch": 2.7995801379546723, "grad_norm": 4.83547306060791, "learning_rate": 4.0676947944003175e-05, "loss": 1.893792724609375, "memory(GiB)": 72.85, "step": 65345, "token_acc": 0.5638297872340425, "train_speed(iter/s)": 0.672551 }, { "epoch": 2.799794353283921, "grad_norm": 4.160833835601807, "learning_rate": 4.0670336279791186e-05, "loss": 2.5049476623535156, "memory(GiB)": 72.85, "step": 65350, "token_acc": 0.4935064935064935, "train_speed(iter/s)": 0.672559 }, { "epoch": 2.80000856861317, "grad_norm": 4.468177795410156, "learning_rate": 4.066372478459481e-05, "loss": 2.3304365158081053, "memory(GiB)": 72.85, "step": 65355, "token_acc": 0.49206349206349204, "train_speed(iter/s)": 0.672548 }, { "epoch": 2.800222783942419, "grad_norm": 4.991196632385254, "learning_rate": 4.06571134585338e-05, "loss": 2.175948715209961, "memory(GiB)": 72.85, "step": 65360, "token_acc": 0.5285171102661597, "train_speed(iter/s)": 0.672547 }, { "epoch": 2.800436999271668, "grad_norm": 4.56896448135376, "learning_rate": 4.065050230172796e-05, "loss": 2.1373363494873048, "memory(GiB)": 72.85, "step": 65365, "token_acc": 0.521594684385382, "train_speed(iter/s)": 0.672542 }, { "epoch": 2.800651214600917, "grad_norm": 4.897663593292236, "learning_rate": 4.064389131429704e-05, "loss": 2.170965576171875, "memory(GiB)": 72.85, "step": 65370, "token_acc": 0.5108695652173914, "train_speed(iter/s)": 0.672552 }, { "epoch": 2.800865429930166, "grad_norm": 5.538837432861328, "learning_rate": 4.0637280496360795e-05, "loss": 2.0972917556762694, "memory(GiB)": 72.85, "step": 65375, "token_acc": 0.5278810408921933, "train_speed(iter/s)": 0.672543 }, { "epoch": 2.801079645259415, "grad_norm": 4.142284393310547, "learning_rate": 4.0630669848039005e-05, "loss": 2.1419466018676756, "memory(GiB)": 72.85, "step": 65380, "token_acc": 0.5282392026578073, "train_speed(iter/s)": 0.67254 }, { "epoch": 2.8012938605886637, "grad_norm": 3.5946147441864014, "learning_rate": 4.0624059369451415e-05, "loss": 2.187310791015625, "memory(GiB)": 72.85, "step": 65385, "token_acc": 0.519434628975265, "train_speed(iter/s)": 0.672548 }, { "epoch": 2.801508075917913, "grad_norm": 6.365548610687256, "learning_rate": 4.061744906071779e-05, "loss": 2.106085205078125, "memory(GiB)": 72.85, "step": 65390, "token_acc": 0.544, "train_speed(iter/s)": 0.672557 }, { "epoch": 2.8017222912471618, "grad_norm": 4.7533698081970215, "learning_rate": 4.061083892195788e-05, "loss": 2.2309770584106445, "memory(GiB)": 72.85, "step": 65395, "token_acc": 0.5030120481927711, "train_speed(iter/s)": 0.672565 }, { "epoch": 2.8019365065764106, "grad_norm": 4.551390647888184, "learning_rate": 4.0604228953291404e-05, "loss": 2.022005081176758, "memory(GiB)": 72.85, "step": 65400, "token_acc": 0.5341880341880342, "train_speed(iter/s)": 0.672567 }, { "epoch": 2.80215072190566, "grad_norm": 4.044778823852539, "learning_rate": 4.059761915483815e-05, "loss": 2.5082090377807615, "memory(GiB)": 72.85, "step": 65405, "token_acc": 0.47315436241610737, "train_speed(iter/s)": 0.672574 }, { "epoch": 2.8023649372349086, "grad_norm": 6.938198089599609, "learning_rate": 4.059100952671786e-05, "loss": 2.2664468765258787, "memory(GiB)": 72.85, "step": 65410, "token_acc": 0.5267175572519084, "train_speed(iter/s)": 0.672577 }, { "epoch": 2.8025791525641575, "grad_norm": 5.396237373352051, "learning_rate": 4.058440006905025e-05, "loss": 2.2011943817138673, "memory(GiB)": 72.85, "step": 65415, "token_acc": 0.5397923875432526, "train_speed(iter/s)": 0.672597 }, { "epoch": 2.8027933678934067, "grad_norm": 6.513652324676514, "learning_rate": 4.057779078195506e-05, "loss": 2.275948715209961, "memory(GiB)": 72.85, "step": 65420, "token_acc": 0.5112540192926045, "train_speed(iter/s)": 0.672602 }, { "epoch": 2.8030075832226555, "grad_norm": 3.4464221000671387, "learning_rate": 4.0571181665552035e-05, "loss": 2.0158411026000977, "memory(GiB)": 72.85, "step": 65425, "token_acc": 0.5152439024390244, "train_speed(iter/s)": 0.672605 }, { "epoch": 2.8032217985519043, "grad_norm": 5.474059581756592, "learning_rate": 4.05645727199609e-05, "loss": 2.0565008163452148, "memory(GiB)": 72.85, "step": 65430, "token_acc": 0.5236593059936908, "train_speed(iter/s)": 0.672607 }, { "epoch": 2.8034360138811536, "grad_norm": 4.9902238845825195, "learning_rate": 4.055796394530138e-05, "loss": 2.546085166931152, "memory(GiB)": 72.85, "step": 65435, "token_acc": 0.4847457627118644, "train_speed(iter/s)": 0.672599 }, { "epoch": 2.8036502292104024, "grad_norm": 4.723545074462891, "learning_rate": 4.05513553416932e-05, "loss": 2.285068130493164, "memory(GiB)": 72.85, "step": 65440, "token_acc": 0.5109034267912772, "train_speed(iter/s)": 0.6726 }, { "epoch": 2.8038644445396512, "grad_norm": 4.850212097167969, "learning_rate": 4.054474690925607e-05, "loss": 2.051015853881836, "memory(GiB)": 72.85, "step": 65445, "token_acc": 0.5709090909090909, "train_speed(iter/s)": 0.672614 }, { "epoch": 2.8040786598689005, "grad_norm": 7.063178539276123, "learning_rate": 4.053813864810974e-05, "loss": 2.330245018005371, "memory(GiB)": 72.85, "step": 65450, "token_acc": 0.5259259259259259, "train_speed(iter/s)": 0.672614 }, { "epoch": 2.8042928751981493, "grad_norm": 5.732304096221924, "learning_rate": 4.05315305583739e-05, "loss": 2.1948829650878907, "memory(GiB)": 72.85, "step": 65455, "token_acc": 0.5117845117845118, "train_speed(iter/s)": 0.672613 }, { "epoch": 2.804507090527398, "grad_norm": 6.067006587982178, "learning_rate": 4.052492264016825e-05, "loss": 2.5080287933349608, "memory(GiB)": 72.85, "step": 65460, "token_acc": 0.4697986577181208, "train_speed(iter/s)": 0.672618 }, { "epoch": 2.8047213058566474, "grad_norm": 4.948612213134766, "learning_rate": 4.0518314893612535e-05, "loss": 2.327939033508301, "memory(GiB)": 72.85, "step": 65465, "token_acc": 0.49719101123595505, "train_speed(iter/s)": 0.672624 }, { "epoch": 2.804935521185896, "grad_norm": 5.808066368103027, "learning_rate": 4.0511707318826426e-05, "loss": 2.331618881225586, "memory(GiB)": 72.85, "step": 65470, "token_acc": 0.5141700404858299, "train_speed(iter/s)": 0.672631 }, { "epoch": 2.805149736515145, "grad_norm": 4.727378845214844, "learning_rate": 4.050509991592964e-05, "loss": 2.210666275024414, "memory(GiB)": 72.85, "step": 65475, "token_acc": 0.48231511254019294, "train_speed(iter/s)": 0.672639 }, { "epoch": 2.8053639518443942, "grad_norm": 4.31856107711792, "learning_rate": 4.049849268504187e-05, "loss": 2.0021568298339845, "memory(GiB)": 72.85, "step": 65480, "token_acc": 0.5726744186046512, "train_speed(iter/s)": 0.672609 }, { "epoch": 2.805578167173643, "grad_norm": 8.090779304504395, "learning_rate": 4.0491885626282836e-05, "loss": 1.9578474044799805, "memory(GiB)": 72.85, "step": 65485, "token_acc": 0.5222222222222223, "train_speed(iter/s)": 0.672599 }, { "epoch": 2.805792382502892, "grad_norm": 4.062626838684082, "learning_rate": 4.04852787397722e-05, "loss": 2.36175651550293, "memory(GiB)": 72.85, "step": 65490, "token_acc": 0.48220064724919093, "train_speed(iter/s)": 0.672573 }, { "epoch": 2.806006597832141, "grad_norm": 4.5160746574401855, "learning_rate": 4.047867202562967e-05, "loss": 2.1334983825683596, "memory(GiB)": 72.85, "step": 65495, "token_acc": 0.5272108843537415, "train_speed(iter/s)": 0.672577 }, { "epoch": 2.80622081316139, "grad_norm": 4.33526611328125, "learning_rate": 4.0472065483974933e-05, "loss": 2.2630538940429688, "memory(GiB)": 72.85, "step": 65500, "token_acc": 0.5074626865671642, "train_speed(iter/s)": 0.672587 }, { "epoch": 2.80622081316139, "eval_loss": 1.9542895555496216, "eval_runtime": 16.0335, "eval_samples_per_second": 6.237, "eval_steps_per_second": 6.237, "eval_token_acc": 0.487698986975398, "step": 65500 }, { "epoch": 2.8064350284906388, "grad_norm": 4.175490856170654, "learning_rate": 4.046545911492766e-05, "loss": 2.1832149505615233, "memory(GiB)": 72.85, "step": 65505, "token_acc": 0.49176954732510286, "train_speed(iter/s)": 0.67246 }, { "epoch": 2.806649243819888, "grad_norm": 4.890081882476807, "learning_rate": 4.0458852918607545e-05, "loss": 2.3355377197265623, "memory(GiB)": 72.85, "step": 65510, "token_acc": 0.5327868852459017, "train_speed(iter/s)": 0.672462 }, { "epoch": 2.806863459149137, "grad_norm": 5.204342842102051, "learning_rate": 4.0452246895134266e-05, "loss": 2.200318145751953, "memory(GiB)": 72.85, "step": 65515, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.672468 }, { "epoch": 2.8070776744783856, "grad_norm": 6.25446081161499, "learning_rate": 4.044564104462747e-05, "loss": 2.2966196060180666, "memory(GiB)": 72.85, "step": 65520, "token_acc": 0.49145299145299143, "train_speed(iter/s)": 0.67246 }, { "epoch": 2.807291889807635, "grad_norm": 5.507570266723633, "learning_rate": 4.0439035367206875e-05, "loss": 2.1491378784179687, "memory(GiB)": 72.85, "step": 65525, "token_acc": 0.5543071161048689, "train_speed(iter/s)": 0.67247 }, { "epoch": 2.8075061051368837, "grad_norm": 6.856100082397461, "learning_rate": 4.04324298629921e-05, "loss": 2.237597846984863, "memory(GiB)": 72.85, "step": 65530, "token_acc": 0.5362318840579711, "train_speed(iter/s)": 0.67246 }, { "epoch": 2.8077203204661325, "grad_norm": 5.955726623535156, "learning_rate": 4.042582453210285e-05, "loss": 1.9937652587890624, "memory(GiB)": 72.85, "step": 65535, "token_acc": 0.564748201438849, "train_speed(iter/s)": 0.67246 }, { "epoch": 2.8079345357953818, "grad_norm": 4.804549694061279, "learning_rate": 4.0419219374658766e-05, "loss": 2.3207382202148437, "memory(GiB)": 72.85, "step": 65540, "token_acc": 0.5316455696202531, "train_speed(iter/s)": 0.672459 }, { "epoch": 2.8081487511246306, "grad_norm": 4.651952266693115, "learning_rate": 4.04126143907795e-05, "loss": 2.0984878540039062, "memory(GiB)": 72.85, "step": 65545, "token_acc": 0.5169811320754717, "train_speed(iter/s)": 0.672476 }, { "epoch": 2.8083629664538794, "grad_norm": 5.281871318817139, "learning_rate": 4.040600958058471e-05, "loss": 2.3001489639282227, "memory(GiB)": 72.85, "step": 65550, "token_acc": 0.505338078291815, "train_speed(iter/s)": 0.672483 }, { "epoch": 2.8085771817831287, "grad_norm": 4.262298107147217, "learning_rate": 4.039940494419407e-05, "loss": 2.179946708679199, "memory(GiB)": 72.85, "step": 65555, "token_acc": 0.5539033457249071, "train_speed(iter/s)": 0.672475 }, { "epoch": 2.8087913971123775, "grad_norm": 3.7407708168029785, "learning_rate": 4.0392800481727224e-05, "loss": 2.3190919876098635, "memory(GiB)": 72.85, "step": 65560, "token_acc": 0.4702194357366771, "train_speed(iter/s)": 0.672481 }, { "epoch": 2.8090056124416263, "grad_norm": 5.395492076873779, "learning_rate": 4.03861961933038e-05, "loss": 2.370532989501953, "memory(GiB)": 72.85, "step": 65565, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.672488 }, { "epoch": 2.8092198277708755, "grad_norm": 4.278215408325195, "learning_rate": 4.037959207904346e-05, "loss": 2.100918197631836, "memory(GiB)": 72.85, "step": 65570, "token_acc": 0.527972027972028, "train_speed(iter/s)": 0.672493 }, { "epoch": 2.8094340431001243, "grad_norm": 4.031492233276367, "learning_rate": 4.0372988139065824e-05, "loss": 2.422530746459961, "memory(GiB)": 72.85, "step": 65575, "token_acc": 0.5328185328185329, "train_speed(iter/s)": 0.672499 }, { "epoch": 2.809648258429373, "grad_norm": 4.509002685546875, "learning_rate": 4.036638437349054e-05, "loss": 2.1889423370361327, "memory(GiB)": 72.85, "step": 65580, "token_acc": 0.5400696864111498, "train_speed(iter/s)": 0.672494 }, { "epoch": 2.8098624737586224, "grad_norm": 4.601434707641602, "learning_rate": 4.035978078243725e-05, "loss": 2.0673051834106446, "memory(GiB)": 72.85, "step": 65585, "token_acc": 0.5324675324675324, "train_speed(iter/s)": 0.672507 }, { "epoch": 2.8100766890878712, "grad_norm": 4.863897800445557, "learning_rate": 4.0353177366025565e-05, "loss": 2.3414688110351562, "memory(GiB)": 72.85, "step": 65590, "token_acc": 0.5104895104895105, "train_speed(iter/s)": 0.672518 }, { "epoch": 2.81029090441712, "grad_norm": 5.086577892303467, "learning_rate": 4.0346574124375126e-05, "loss": 2.1634315490722655, "memory(GiB)": 72.85, "step": 65595, "token_acc": 0.5126353790613718, "train_speed(iter/s)": 0.672538 }, { "epoch": 2.8105051197463693, "grad_norm": 5.055959701538086, "learning_rate": 4.033997105760555e-05, "loss": 2.2577499389648437, "memory(GiB)": 72.85, "step": 65600, "token_acc": 0.4847328244274809, "train_speed(iter/s)": 0.672553 }, { "epoch": 2.810719335075618, "grad_norm": 3.702622413635254, "learning_rate": 4.0333368165836456e-05, "loss": 2.2010353088378904, "memory(GiB)": 72.85, "step": 65605, "token_acc": 0.5274390243902439, "train_speed(iter/s)": 0.672533 }, { "epoch": 2.810933550404867, "grad_norm": 4.589471340179443, "learning_rate": 4.032676544918747e-05, "loss": 2.1351219177246095, "memory(GiB)": 72.85, "step": 65610, "token_acc": 0.5482758620689655, "train_speed(iter/s)": 0.672534 }, { "epoch": 2.811147765734116, "grad_norm": 5.762794494628906, "learning_rate": 4.0320162907778196e-05, "loss": 2.0683366775512697, "memory(GiB)": 72.85, "step": 65615, "token_acc": 0.5077519379844961, "train_speed(iter/s)": 0.672531 }, { "epoch": 2.811361981063365, "grad_norm": 4.4678874015808105, "learning_rate": 4.031356054172826e-05, "loss": 2.2898818969726564, "memory(GiB)": 72.85, "step": 65620, "token_acc": 0.5353159851301115, "train_speed(iter/s)": 0.672536 }, { "epoch": 2.811576196392614, "grad_norm": 5.447594165802002, "learning_rate": 4.0306958351157245e-05, "loss": 2.1994400024414062, "memory(GiB)": 72.85, "step": 65625, "token_acc": 0.5177304964539007, "train_speed(iter/s)": 0.672539 }, { "epoch": 2.811790411721863, "grad_norm": 5.3372063636779785, "learning_rate": 4.0300356336184786e-05, "loss": 2.263796806335449, "memory(GiB)": 72.85, "step": 65630, "token_acc": 0.5057471264367817, "train_speed(iter/s)": 0.672523 }, { "epoch": 2.812004627051112, "grad_norm": 4.371908664703369, "learning_rate": 4.029375449693047e-05, "loss": 1.9797521591186524, "memory(GiB)": 72.85, "step": 65635, "token_acc": 0.5567765567765568, "train_speed(iter/s)": 0.672523 }, { "epoch": 2.8122188423803607, "grad_norm": 4.330429553985596, "learning_rate": 4.02871528335139e-05, "loss": 2.573020362854004, "memory(GiB)": 72.85, "step": 65640, "token_acc": 0.4887640449438202, "train_speed(iter/s)": 0.672524 }, { "epoch": 2.81243305770961, "grad_norm": 5.92976188659668, "learning_rate": 4.028055134605467e-05, "loss": 2.13587646484375, "memory(GiB)": 72.85, "step": 65645, "token_acc": 0.5368852459016393, "train_speed(iter/s)": 0.672528 }, { "epoch": 2.8126472730388588, "grad_norm": 5.580557823181152, "learning_rate": 4.0273950034672356e-05, "loss": 2.021915245056152, "memory(GiB)": 72.85, "step": 65650, "token_acc": 0.524, "train_speed(iter/s)": 0.672534 }, { "epoch": 2.8128614883681076, "grad_norm": 5.26324462890625, "learning_rate": 4.026734889948657e-05, "loss": 2.198202133178711, "memory(GiB)": 72.85, "step": 65655, "token_acc": 0.5089974293059126, "train_speed(iter/s)": 0.672535 }, { "epoch": 2.813075703697357, "grad_norm": 5.701022148132324, "learning_rate": 4.02607479406169e-05, "loss": 2.3783638000488283, "memory(GiB)": 72.85, "step": 65660, "token_acc": 0.48936170212765956, "train_speed(iter/s)": 0.672526 }, { "epoch": 2.8132899190266056, "grad_norm": 6.256682395935059, "learning_rate": 4.0254147158182895e-05, "loss": 2.586161422729492, "memory(GiB)": 72.85, "step": 65665, "token_acc": 0.48024316109422494, "train_speed(iter/s)": 0.672544 }, { "epoch": 2.8135041343558544, "grad_norm": 8.709990501403809, "learning_rate": 4.024754655230417e-05, "loss": 2.3992610931396485, "memory(GiB)": 72.85, "step": 65670, "token_acc": 0.5352112676056338, "train_speed(iter/s)": 0.672561 }, { "epoch": 2.8137183496851037, "grad_norm": 3.9043891429901123, "learning_rate": 4.024094612310028e-05, "loss": 1.9704381942749023, "memory(GiB)": 72.85, "step": 65675, "token_acc": 0.5429553264604811, "train_speed(iter/s)": 0.672562 }, { "epoch": 2.8139325650143525, "grad_norm": 4.711461544036865, "learning_rate": 4.023434587069081e-05, "loss": 2.157850456237793, "memory(GiB)": 72.85, "step": 65680, "token_acc": 0.47703180212014135, "train_speed(iter/s)": 0.67257 }, { "epoch": 2.8141467803436013, "grad_norm": 5.821319580078125, "learning_rate": 4.0227745795195335e-05, "loss": 1.971333885192871, "memory(GiB)": 72.85, "step": 65685, "token_acc": 0.6147186147186147, "train_speed(iter/s)": 0.672574 }, { "epoch": 2.8143609956728506, "grad_norm": 7.366785049438477, "learning_rate": 4.02211458967334e-05, "loss": 2.3563329696655275, "memory(GiB)": 72.85, "step": 65690, "token_acc": 0.5, "train_speed(iter/s)": 0.672578 }, { "epoch": 2.8145752110020994, "grad_norm": 5.4940385818481445, "learning_rate": 4.021454617542457e-05, "loss": 2.4777875900268556, "memory(GiB)": 72.85, "step": 65695, "token_acc": 0.4965986394557823, "train_speed(iter/s)": 0.672586 }, { "epoch": 2.814789426331348, "grad_norm": 5.378252983093262, "learning_rate": 4.0207946631388426e-05, "loss": 2.3110067367553713, "memory(GiB)": 72.85, "step": 65700, "token_acc": 0.5255972696245734, "train_speed(iter/s)": 0.67259 }, { "epoch": 2.8150036416605975, "grad_norm": 4.75296688079834, "learning_rate": 4.0201347264744524e-05, "loss": 2.158014678955078, "memory(GiB)": 72.85, "step": 65705, "token_acc": 0.5418181818181819, "train_speed(iter/s)": 0.672597 }, { "epoch": 2.8152178569898463, "grad_norm": 6.101327896118164, "learning_rate": 4.0194748075612396e-05, "loss": 2.4675708770751954, "memory(GiB)": 72.85, "step": 65710, "token_acc": 0.49291784702549574, "train_speed(iter/s)": 0.672607 }, { "epoch": 2.815432072319095, "grad_norm": 4.201706886291504, "learning_rate": 4.0188149064111615e-05, "loss": 2.277578353881836, "memory(GiB)": 72.85, "step": 65715, "token_acc": 0.5016949152542373, "train_speed(iter/s)": 0.672593 }, { "epoch": 2.8156462876483443, "grad_norm": 4.857163429260254, "learning_rate": 4.018155023036171e-05, "loss": 2.2516380310058595, "memory(GiB)": 72.85, "step": 65720, "token_acc": 0.5161290322580645, "train_speed(iter/s)": 0.67259 }, { "epoch": 2.815860502977593, "grad_norm": 5.188303470611572, "learning_rate": 4.017495157448224e-05, "loss": 2.0632226943969725, "memory(GiB)": 72.85, "step": 65725, "token_acc": 0.5406360424028268, "train_speed(iter/s)": 0.67259 }, { "epoch": 2.816074718306842, "grad_norm": 4.717227458953857, "learning_rate": 4.0168353096592735e-05, "loss": 2.423458290100098, "memory(GiB)": 72.85, "step": 65730, "token_acc": 0.4891640866873065, "train_speed(iter/s)": 0.672589 }, { "epoch": 2.8162889336360912, "grad_norm": 4.770730972290039, "learning_rate": 4.0161754796812736e-05, "loss": 2.410430908203125, "memory(GiB)": 72.85, "step": 65735, "token_acc": 0.4782608695652174, "train_speed(iter/s)": 0.672591 }, { "epoch": 2.81650314896534, "grad_norm": 4.417270660400391, "learning_rate": 4.0155156675261785e-05, "loss": 2.0743553161621096, "memory(GiB)": 72.85, "step": 65740, "token_acc": 0.5221518987341772, "train_speed(iter/s)": 0.672588 }, { "epoch": 2.816717364294589, "grad_norm": 5.408052921295166, "learning_rate": 4.01485587320594e-05, "loss": 2.351627731323242, "memory(GiB)": 72.85, "step": 65745, "token_acc": 0.49032258064516127, "train_speed(iter/s)": 0.672592 }, { "epoch": 2.816931579623838, "grad_norm": 5.504631996154785, "learning_rate": 4.014196096732511e-05, "loss": 2.3192766189575194, "memory(GiB)": 72.85, "step": 65750, "token_acc": 0.4701492537313433, "train_speed(iter/s)": 0.672583 }, { "epoch": 2.817145794953087, "grad_norm": 5.348200798034668, "learning_rate": 4.0135363381178454e-05, "loss": 2.2748769760131835, "memory(GiB)": 72.85, "step": 65755, "token_acc": 0.5485074626865671, "train_speed(iter/s)": 0.672592 }, { "epoch": 2.8173600102823357, "grad_norm": 5.783451557159424, "learning_rate": 4.012876597373893e-05, "loss": 2.0158180236816405, "memory(GiB)": 72.85, "step": 65760, "token_acc": 0.5220588235294118, "train_speed(iter/s)": 0.672597 }, { "epoch": 2.817574225611585, "grad_norm": 5.46758508682251, "learning_rate": 4.012216874512609e-05, "loss": 2.20076904296875, "memory(GiB)": 72.85, "step": 65765, "token_acc": 0.5129151291512916, "train_speed(iter/s)": 0.672609 }, { "epoch": 2.817788440940834, "grad_norm": 5.091311931610107, "learning_rate": 4.0115571695459396e-05, "loss": 2.115486907958984, "memory(GiB)": 72.85, "step": 65770, "token_acc": 0.4924812030075188, "train_speed(iter/s)": 0.67259 }, { "epoch": 2.8180026562700826, "grad_norm": 6.9401750564575195, "learning_rate": 4.0108974824858425e-05, "loss": 2.2644214630126953, "memory(GiB)": 72.85, "step": 65775, "token_acc": 0.5111940298507462, "train_speed(iter/s)": 0.672601 }, { "epoch": 2.818216871599332, "grad_norm": 5.4966230392456055, "learning_rate": 4.010237813344264e-05, "loss": 1.9830524444580078, "memory(GiB)": 72.85, "step": 65780, "token_acc": 0.5764705882352941, "train_speed(iter/s)": 0.672604 }, { "epoch": 2.8184310869285807, "grad_norm": 5.484341621398926, "learning_rate": 4.0095781621331563e-05, "loss": 2.311948776245117, "memory(GiB)": 72.85, "step": 65785, "token_acc": 0.48013245033112584, "train_speed(iter/s)": 0.672604 }, { "epoch": 2.8186453022578295, "grad_norm": 4.404721736907959, "learning_rate": 4.0089185288644706e-05, "loss": 2.2552116394042967, "memory(GiB)": 72.85, "step": 65790, "token_acc": 0.4772727272727273, "train_speed(iter/s)": 0.672602 }, { "epoch": 2.8188595175870788, "grad_norm": 3.98384952545166, "learning_rate": 4.008258913550153e-05, "loss": 2.310150718688965, "memory(GiB)": 72.85, "step": 65795, "token_acc": 0.5127272727272727, "train_speed(iter/s)": 0.672597 }, { "epoch": 2.8190737329163276, "grad_norm": 4.387348175048828, "learning_rate": 4.0075993162021575e-05, "loss": 2.306140327453613, "memory(GiB)": 72.85, "step": 65800, "token_acc": 0.4608433734939759, "train_speed(iter/s)": 0.672608 }, { "epoch": 2.8192879482455764, "grad_norm": 5.06274938583374, "learning_rate": 4.006939736832431e-05, "loss": 2.0545513153076174, "memory(GiB)": 72.85, "step": 65805, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.672616 }, { "epoch": 2.8195021635748256, "grad_norm": 5.300815105438232, "learning_rate": 4.006280175452922e-05, "loss": 2.250321388244629, "memory(GiB)": 72.85, "step": 65810, "token_acc": 0.5085324232081911, "train_speed(iter/s)": 0.672609 }, { "epoch": 2.8197163789040744, "grad_norm": 5.196166038513184, "learning_rate": 4.0056206320755806e-05, "loss": 2.1499229431152345, "memory(GiB)": 72.85, "step": 65815, "token_acc": 0.513677811550152, "train_speed(iter/s)": 0.672608 }, { "epoch": 2.8199305942333233, "grad_norm": 3.9543979167938232, "learning_rate": 4.0049611067123526e-05, "loss": 2.1656753540039064, "memory(GiB)": 72.85, "step": 65820, "token_acc": 0.5092936802973977, "train_speed(iter/s)": 0.672612 }, { "epoch": 2.8201448095625725, "grad_norm": 6.881379127502441, "learning_rate": 4.00430159937519e-05, "loss": 1.855982208251953, "memory(GiB)": 72.85, "step": 65825, "token_acc": 0.5450643776824035, "train_speed(iter/s)": 0.672607 }, { "epoch": 2.8203590248918213, "grad_norm": 4.37612247467041, "learning_rate": 4.003642110076037e-05, "loss": 1.9781814575195313, "memory(GiB)": 72.85, "step": 65830, "token_acc": 0.5287769784172662, "train_speed(iter/s)": 0.672619 }, { "epoch": 2.82057324022107, "grad_norm": 4.562140464782715, "learning_rate": 4.002982638826841e-05, "loss": 2.3625873565673827, "memory(GiB)": 72.85, "step": 65835, "token_acc": 0.47796610169491527, "train_speed(iter/s)": 0.67263 }, { "epoch": 2.8207874555503194, "grad_norm": 4.7399444580078125, "learning_rate": 4.0023231856395505e-05, "loss": 2.5415557861328124, "memory(GiB)": 72.85, "step": 65840, "token_acc": 0.5033112582781457, "train_speed(iter/s)": 0.672636 }, { "epoch": 2.821001670879568, "grad_norm": 4.561245441436768, "learning_rate": 4.00166375052611e-05, "loss": 2.1894176483154295, "memory(GiB)": 72.85, "step": 65845, "token_acc": 0.5254237288135594, "train_speed(iter/s)": 0.672646 }, { "epoch": 2.821215886208817, "grad_norm": 4.218949317932129, "learning_rate": 4.001004333498469e-05, "loss": 2.1868019104003906, "memory(GiB)": 72.85, "step": 65850, "token_acc": 0.49454545454545457, "train_speed(iter/s)": 0.672652 }, { "epoch": 2.8214301015380663, "grad_norm": 5.59018087387085, "learning_rate": 4.0003449345685704e-05, "loss": 2.09205322265625, "memory(GiB)": 72.85, "step": 65855, "token_acc": 0.5019305019305019, "train_speed(iter/s)": 0.672656 }, { "epoch": 2.821644316867315, "grad_norm": 4.945861339569092, "learning_rate": 3.999685553748362e-05, "loss": 2.389540672302246, "memory(GiB)": 72.85, "step": 65860, "token_acc": 0.46131805157593125, "train_speed(iter/s)": 0.67265 }, { "epoch": 2.821858532196564, "grad_norm": 4.8863959312438965, "learning_rate": 3.9990261910497876e-05, "loss": 2.3628862380981444, "memory(GiB)": 72.85, "step": 65865, "token_acc": 0.5055350553505535, "train_speed(iter/s)": 0.672649 }, { "epoch": 2.822072747525813, "grad_norm": 4.546560764312744, "learning_rate": 3.9983668464847935e-05, "loss": 2.0617544174194338, "memory(GiB)": 72.85, "step": 65870, "token_acc": 0.5338345864661654, "train_speed(iter/s)": 0.672657 }, { "epoch": 2.822286962855062, "grad_norm": 5.083465099334717, "learning_rate": 3.9977075200653234e-05, "loss": 2.0430694580078126, "memory(GiB)": 72.85, "step": 65875, "token_acc": 0.5405405405405406, "train_speed(iter/s)": 0.67266 }, { "epoch": 2.822501178184311, "grad_norm": 4.244882106781006, "learning_rate": 3.997048211803321e-05, "loss": 2.151654815673828, "memory(GiB)": 72.85, "step": 65880, "token_acc": 0.5189873417721519, "train_speed(iter/s)": 0.672655 }, { "epoch": 2.82271539351356, "grad_norm": 4.751278400421143, "learning_rate": 3.996388921710732e-05, "loss": 2.263233947753906, "memory(GiB)": 72.85, "step": 65885, "token_acc": 0.5326797385620915, "train_speed(iter/s)": 0.672659 }, { "epoch": 2.822929608842809, "grad_norm": 6.352930068969727, "learning_rate": 3.995729649799499e-05, "loss": 2.5912757873535157, "memory(GiB)": 72.85, "step": 65890, "token_acc": 0.4745762711864407, "train_speed(iter/s)": 0.672656 }, { "epoch": 2.8231438241720577, "grad_norm": 4.183155536651611, "learning_rate": 3.995070396081565e-05, "loss": 2.0845083236694335, "memory(GiB)": 72.85, "step": 65895, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.672655 }, { "epoch": 2.823358039501307, "grad_norm": 4.837650299072266, "learning_rate": 3.994411160568874e-05, "loss": 1.9571950912475586, "memory(GiB)": 72.85, "step": 65900, "token_acc": 0.5515873015873016, "train_speed(iter/s)": 0.672659 }, { "epoch": 2.8235722548305557, "grad_norm": 4.513175964355469, "learning_rate": 3.993751943273367e-05, "loss": 2.085988235473633, "memory(GiB)": 72.85, "step": 65905, "token_acc": 0.5310077519379846, "train_speed(iter/s)": 0.672663 }, { "epoch": 2.8237864701598046, "grad_norm": 5.6505255699157715, "learning_rate": 3.9930927442069885e-05, "loss": 2.3925952911376953, "memory(GiB)": 72.85, "step": 65910, "token_acc": 0.4879518072289157, "train_speed(iter/s)": 0.672667 }, { "epoch": 2.824000685489054, "grad_norm": 5.011288166046143, "learning_rate": 3.99243356338168e-05, "loss": 2.1435783386230467, "memory(GiB)": 72.85, "step": 65915, "token_acc": 0.49216300940438873, "train_speed(iter/s)": 0.672673 }, { "epoch": 2.8242149008183026, "grad_norm": 5.745389938354492, "learning_rate": 3.9917744008093806e-05, "loss": 2.3551197052001953, "memory(GiB)": 72.85, "step": 65920, "token_acc": 0.5085324232081911, "train_speed(iter/s)": 0.672665 }, { "epoch": 2.8244291161475514, "grad_norm": 5.299041271209717, "learning_rate": 3.991115256502034e-05, "loss": 2.301432800292969, "memory(GiB)": 72.85, "step": 65925, "token_acc": 0.5261437908496732, "train_speed(iter/s)": 0.672663 }, { "epoch": 2.8246433314768007, "grad_norm": 4.3935651779174805, "learning_rate": 3.9904561304715824e-05, "loss": 2.6275774002075196, "memory(GiB)": 72.85, "step": 65930, "token_acc": 0.4595300261096606, "train_speed(iter/s)": 0.672661 }, { "epoch": 2.8248575468060495, "grad_norm": 5.932034969329834, "learning_rate": 3.989797022729966e-05, "loss": 2.345145416259766, "memory(GiB)": 72.85, "step": 65935, "token_acc": 0.5, "train_speed(iter/s)": 0.672669 }, { "epoch": 2.8250717621352983, "grad_norm": 4.138672351837158, "learning_rate": 3.9891379332891224e-05, "loss": 2.3949068069458006, "memory(GiB)": 72.85, "step": 65940, "token_acc": 0.5, "train_speed(iter/s)": 0.672648 }, { "epoch": 2.8252859774645476, "grad_norm": 6.171112060546875, "learning_rate": 3.9884788621609936e-05, "loss": 2.0271137237548826, "memory(GiB)": 72.85, "step": 65945, "token_acc": 0.5802469135802469, "train_speed(iter/s)": 0.672657 }, { "epoch": 2.8255001927937964, "grad_norm": 6.940995216369629, "learning_rate": 3.98781980935752e-05, "loss": 1.9088836669921876, "memory(GiB)": 72.85, "step": 65950, "token_acc": 0.5450980392156862, "train_speed(iter/s)": 0.67266 }, { "epoch": 2.825714408123045, "grad_norm": 3.913127899169922, "learning_rate": 3.9871607748906395e-05, "loss": 1.9711349487304688, "memory(GiB)": 72.85, "step": 65955, "token_acc": 0.5432525951557093, "train_speed(iter/s)": 0.672676 }, { "epoch": 2.8259286234522945, "grad_norm": 4.792845726013184, "learning_rate": 3.9865017587722916e-05, "loss": 2.0178859710693358, "memory(GiB)": 72.85, "step": 65960, "token_acc": 0.5119453924914675, "train_speed(iter/s)": 0.672676 }, { "epoch": 2.8261428387815433, "grad_norm": 7.282494068145752, "learning_rate": 3.985842761014414e-05, "loss": 2.125808334350586, "memory(GiB)": 72.85, "step": 65965, "token_acc": 0.556910569105691, "train_speed(iter/s)": 0.672683 }, { "epoch": 2.826357054110792, "grad_norm": 5.590877532958984, "learning_rate": 3.9851837816289485e-05, "loss": 2.1443729400634766, "memory(GiB)": 72.85, "step": 65970, "token_acc": 0.5214007782101168, "train_speed(iter/s)": 0.672689 }, { "epoch": 2.8265712694400413, "grad_norm": 5.362163066864014, "learning_rate": 3.984524820627829e-05, "loss": 2.3780765533447266, "memory(GiB)": 72.85, "step": 65975, "token_acc": 0.48214285714285715, "train_speed(iter/s)": 0.672695 }, { "epoch": 2.82678548476929, "grad_norm": 6.4689860343933105, "learning_rate": 3.983865878022995e-05, "loss": 2.270751953125, "memory(GiB)": 72.85, "step": 65980, "token_acc": 0.5268817204301075, "train_speed(iter/s)": 0.672694 }, { "epoch": 2.826999700098539, "grad_norm": 6.755190849304199, "learning_rate": 3.983206953826385e-05, "loss": 2.2356958389282227, "memory(GiB)": 72.85, "step": 65985, "token_acc": 0.5086505190311419, "train_speed(iter/s)": 0.672683 }, { "epoch": 2.827213915427788, "grad_norm": 4.1941399574279785, "learning_rate": 3.982548048049935e-05, "loss": 2.045979881286621, "memory(GiB)": 72.85, "step": 65990, "token_acc": 0.5491525423728814, "train_speed(iter/s)": 0.672686 }, { "epoch": 2.827428130757037, "grad_norm": 5.41318416595459, "learning_rate": 3.981889160705579e-05, "loss": 2.4109230041503906, "memory(GiB)": 72.85, "step": 65995, "token_acc": 0.4919093851132686, "train_speed(iter/s)": 0.672688 }, { "epoch": 2.827642346086286, "grad_norm": 5.478531360626221, "learning_rate": 3.981230291805257e-05, "loss": 1.9959173202514648, "memory(GiB)": 72.85, "step": 66000, "token_acc": 0.5060240963855421, "train_speed(iter/s)": 0.672691 }, { "epoch": 2.827642346086286, "eval_loss": 2.0825839042663574, "eval_runtime": 14.8345, "eval_samples_per_second": 6.741, "eval_steps_per_second": 6.741, "eval_token_acc": 0.5019973368841545, "step": 66000 }, { "epoch": 2.827856561415535, "grad_norm": 7.242488384246826, "learning_rate": 3.980571441360904e-05, "loss": 2.5772527694702148, "memory(GiB)": 72.85, "step": 66005, "token_acc": 0.49653808110781406, "train_speed(iter/s)": 0.672576 }, { "epoch": 2.828070776744784, "grad_norm": 6.400946617126465, "learning_rate": 3.979912609384456e-05, "loss": 2.12886962890625, "memory(GiB)": 72.85, "step": 66010, "token_acc": 0.5365853658536586, "train_speed(iter/s)": 0.672583 }, { "epoch": 2.8282849920740327, "grad_norm": 4.950554847717285, "learning_rate": 3.979253795887849e-05, "loss": 2.506175231933594, "memory(GiB)": 72.85, "step": 66015, "token_acc": 0.5034246575342466, "train_speed(iter/s)": 0.672594 }, { "epoch": 2.828499207403282, "grad_norm": 5.89299201965332, "learning_rate": 3.978595000883017e-05, "loss": 2.294045829772949, "memory(GiB)": 72.85, "step": 66020, "token_acc": 0.5211267605633803, "train_speed(iter/s)": 0.672578 }, { "epoch": 2.828713422732531, "grad_norm": 5.055482387542725, "learning_rate": 3.977936224381893e-05, "loss": 2.14105224609375, "memory(GiB)": 72.85, "step": 66025, "token_acc": 0.5110294117647058, "train_speed(iter/s)": 0.672575 }, { "epoch": 2.8289276380617796, "grad_norm": 4.556074619293213, "learning_rate": 3.9772774663964145e-05, "loss": 2.5084609985351562, "memory(GiB)": 72.85, "step": 66030, "token_acc": 0.45014245014245013, "train_speed(iter/s)": 0.672589 }, { "epoch": 2.829141853391029, "grad_norm": 4.362577438354492, "learning_rate": 3.9766187269385144e-05, "loss": 2.2145000457763673, "memory(GiB)": 72.85, "step": 66035, "token_acc": 0.5103448275862069, "train_speed(iter/s)": 0.672584 }, { "epoch": 2.8293560687202777, "grad_norm": 4.832986831665039, "learning_rate": 3.9759600060201245e-05, "loss": 2.1811956405639648, "memory(GiB)": 72.85, "step": 66040, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.672582 }, { "epoch": 2.8295702840495265, "grad_norm": 5.62236213684082, "learning_rate": 3.975301303653181e-05, "loss": 2.1893653869628906, "memory(GiB)": 72.85, "step": 66045, "token_acc": 0.5, "train_speed(iter/s)": 0.672583 }, { "epoch": 2.8297844993787757, "grad_norm": 4.152304172515869, "learning_rate": 3.974642619849615e-05, "loss": 2.2319381713867186, "memory(GiB)": 72.85, "step": 66050, "token_acc": 0.5160142348754448, "train_speed(iter/s)": 0.672596 }, { "epoch": 2.8299987147080246, "grad_norm": 4.868943691253662, "learning_rate": 3.9739839546213596e-05, "loss": 2.5687488555908202, "memory(GiB)": 72.85, "step": 66055, "token_acc": 0.4823943661971831, "train_speed(iter/s)": 0.6726 }, { "epoch": 2.8302129300372734, "grad_norm": 4.7510762214660645, "learning_rate": 3.9733253079803486e-05, "loss": 2.147490882873535, "memory(GiB)": 72.85, "step": 66060, "token_acc": 0.5387453874538746, "train_speed(iter/s)": 0.672586 }, { "epoch": 2.8304271453665226, "grad_norm": 4.515618324279785, "learning_rate": 3.9726666799385095e-05, "loss": 2.132745552062988, "memory(GiB)": 72.85, "step": 66065, "token_acc": 0.5412186379928315, "train_speed(iter/s)": 0.67258 }, { "epoch": 2.8306413606957714, "grad_norm": 5.262728691101074, "learning_rate": 3.972008070507779e-05, "loss": 2.1851139068603516, "memory(GiB)": 72.85, "step": 66070, "token_acc": 0.5191740412979351, "train_speed(iter/s)": 0.672587 }, { "epoch": 2.8308555760250202, "grad_norm": 6.071770668029785, "learning_rate": 3.971349479700088e-05, "loss": 2.2346168518066407, "memory(GiB)": 72.85, "step": 66075, "token_acc": 0.4827586206896552, "train_speed(iter/s)": 0.672582 }, { "epoch": 2.8310697913542695, "grad_norm": 5.918970584869385, "learning_rate": 3.970690907527366e-05, "loss": 2.363721466064453, "memory(GiB)": 72.85, "step": 66080, "token_acc": 0.4634920634920635, "train_speed(iter/s)": 0.672586 }, { "epoch": 2.8312840066835183, "grad_norm": 4.0400896072387695, "learning_rate": 3.970032354001542e-05, "loss": 2.0758939743041993, "memory(GiB)": 72.85, "step": 66085, "token_acc": 0.569078947368421, "train_speed(iter/s)": 0.672586 }, { "epoch": 2.831498222012767, "grad_norm": 4.975486755371094, "learning_rate": 3.9693738191345495e-05, "loss": 1.8952108383178712, "memory(GiB)": 72.85, "step": 66090, "token_acc": 0.5776173285198556, "train_speed(iter/s)": 0.672587 }, { "epoch": 2.8317124373420164, "grad_norm": 4.690342426300049, "learning_rate": 3.968715302938317e-05, "loss": 2.0807773590087892, "memory(GiB)": 72.85, "step": 66095, "token_acc": 0.5072463768115942, "train_speed(iter/s)": 0.672582 }, { "epoch": 2.831926652671265, "grad_norm": 6.639970779418945, "learning_rate": 3.9680568054247744e-05, "loss": 2.2190324783325197, "memory(GiB)": 72.85, "step": 66100, "token_acc": 0.48201438848920863, "train_speed(iter/s)": 0.672584 }, { "epoch": 2.832140868000514, "grad_norm": 4.98060417175293, "learning_rate": 3.9673983266058504e-05, "loss": 2.3674036026000977, "memory(GiB)": 72.85, "step": 66105, "token_acc": 0.5076452599388379, "train_speed(iter/s)": 0.672584 }, { "epoch": 2.8323550833297633, "grad_norm": 4.60392951965332, "learning_rate": 3.9667398664934735e-05, "loss": 2.2542697906494142, "memory(GiB)": 72.85, "step": 66110, "token_acc": 0.5054545454545455, "train_speed(iter/s)": 0.672587 }, { "epoch": 2.832569298659012, "grad_norm": 5.118516445159912, "learning_rate": 3.966081425099575e-05, "loss": 2.310118293762207, "memory(GiB)": 72.85, "step": 66115, "token_acc": 0.47318611987381703, "train_speed(iter/s)": 0.672583 }, { "epoch": 2.832783513988261, "grad_norm": 4.890450477600098, "learning_rate": 3.96542300243608e-05, "loss": 2.1484682083129885, "memory(GiB)": 72.85, "step": 66120, "token_acc": 0.49640287769784175, "train_speed(iter/s)": 0.672602 }, { "epoch": 2.83299772931751, "grad_norm": 4.58934211730957, "learning_rate": 3.9647645985149184e-05, "loss": 2.4652915954589845, "memory(GiB)": 72.85, "step": 66125, "token_acc": 0.48717948717948717, "train_speed(iter/s)": 0.672615 }, { "epoch": 2.833211944646759, "grad_norm": 5.995037078857422, "learning_rate": 3.964106213348017e-05, "loss": 2.23870735168457, "memory(GiB)": 72.85, "step": 66130, "token_acc": 0.49640287769784175, "train_speed(iter/s)": 0.672601 }, { "epoch": 2.8334261599760078, "grad_norm": 4.7728590965271, "learning_rate": 3.963447846947304e-05, "loss": 2.2046581268310548, "memory(GiB)": 72.85, "step": 66135, "token_acc": 0.53156146179402, "train_speed(iter/s)": 0.672604 }, { "epoch": 2.833640375305257, "grad_norm": 4.973441123962402, "learning_rate": 3.962789499324703e-05, "loss": 2.099509429931641, "memory(GiB)": 72.85, "step": 66140, "token_acc": 0.5830508474576271, "train_speed(iter/s)": 0.672597 }, { "epoch": 2.833854590634506, "grad_norm": 4.456355571746826, "learning_rate": 3.962131170492145e-05, "loss": 2.429019737243652, "memory(GiB)": 72.85, "step": 66145, "token_acc": 0.48344370860927155, "train_speed(iter/s)": 0.672597 }, { "epoch": 2.8340688059637547, "grad_norm": 4.488949775695801, "learning_rate": 3.961472860461555e-05, "loss": 2.2174400329589843, "memory(GiB)": 72.85, "step": 66150, "token_acc": 0.5034013605442177, "train_speed(iter/s)": 0.6726 }, { "epoch": 2.834283021293004, "grad_norm": 4.811708450317383, "learning_rate": 3.9608145692448575e-05, "loss": 2.5370046615600588, "memory(GiB)": 72.85, "step": 66155, "token_acc": 0.47794117647058826, "train_speed(iter/s)": 0.672604 }, { "epoch": 2.8344972366222527, "grad_norm": 4.848087310791016, "learning_rate": 3.9601562968539796e-05, "loss": 2.3250442504882813, "memory(GiB)": 72.85, "step": 66160, "token_acc": 0.5265306122448979, "train_speed(iter/s)": 0.672608 }, { "epoch": 2.8347114519515015, "grad_norm": 4.248692989349365, "learning_rate": 3.959498043300846e-05, "loss": 2.2615682601928713, "memory(GiB)": 72.85, "step": 66165, "token_acc": 0.5077519379844961, "train_speed(iter/s)": 0.672614 }, { "epoch": 2.834925667280751, "grad_norm": 7.626750946044922, "learning_rate": 3.958839808597381e-05, "loss": 2.6225311279296877, "memory(GiB)": 72.85, "step": 66170, "token_acc": 0.4097222222222222, "train_speed(iter/s)": 0.672608 }, { "epoch": 2.8351398826099996, "grad_norm": 4.847922325134277, "learning_rate": 3.95818159275551e-05, "loss": 2.175125312805176, "memory(GiB)": 72.85, "step": 66175, "token_acc": 0.5297619047619048, "train_speed(iter/s)": 0.67261 }, { "epoch": 2.8353540979392484, "grad_norm": 4.403483867645264, "learning_rate": 3.957523395787156e-05, "loss": 2.5665054321289062, "memory(GiB)": 72.85, "step": 66180, "token_acc": 0.4652014652014652, "train_speed(iter/s)": 0.672616 }, { "epoch": 2.8355683132684977, "grad_norm": 4.620969772338867, "learning_rate": 3.956865217704244e-05, "loss": 2.0515899658203125, "memory(GiB)": 72.85, "step": 66185, "token_acc": 0.5157894736842106, "train_speed(iter/s)": 0.672614 }, { "epoch": 2.8357825285977465, "grad_norm": 4.883731365203857, "learning_rate": 3.956207058518697e-05, "loss": 2.314599609375, "memory(GiB)": 72.85, "step": 66190, "token_acc": 0.5419847328244275, "train_speed(iter/s)": 0.672612 }, { "epoch": 2.8359967439269953, "grad_norm": 3.8180365562438965, "learning_rate": 3.955548918242438e-05, "loss": 2.29913387298584, "memory(GiB)": 72.85, "step": 66195, "token_acc": 0.5241635687732342, "train_speed(iter/s)": 0.672612 }, { "epoch": 2.8362109592562446, "grad_norm": 4.316484451293945, "learning_rate": 3.954890796887391e-05, "loss": 2.087152099609375, "memory(GiB)": 72.85, "step": 66200, "token_acc": 0.532258064516129, "train_speed(iter/s)": 0.672617 }, { "epoch": 2.8364251745854934, "grad_norm": 4.725858688354492, "learning_rate": 3.9542326944654775e-05, "loss": 2.413216972351074, "memory(GiB)": 72.85, "step": 66205, "token_acc": 0.43986254295532645, "train_speed(iter/s)": 0.672627 }, { "epoch": 2.836639389914742, "grad_norm": 4.340005874633789, "learning_rate": 3.953574610988619e-05, "loss": 2.573006439208984, "memory(GiB)": 72.85, "step": 66210, "token_acc": 0.4406779661016949, "train_speed(iter/s)": 0.67261 }, { "epoch": 2.8368536052439914, "grad_norm": 4.766674518585205, "learning_rate": 3.952916546468737e-05, "loss": 2.183163070678711, "memory(GiB)": 72.85, "step": 66215, "token_acc": 0.5269709543568465, "train_speed(iter/s)": 0.672607 }, { "epoch": 2.8370678205732403, "grad_norm": 5.168055534362793, "learning_rate": 3.9522585009177554e-05, "loss": 2.1612531661987306, "memory(GiB)": 72.85, "step": 66220, "token_acc": 0.5464684014869888, "train_speed(iter/s)": 0.672613 }, { "epoch": 2.837282035902489, "grad_norm": 5.253329753875732, "learning_rate": 3.951600474347594e-05, "loss": 2.1479846954345705, "memory(GiB)": 72.85, "step": 66225, "token_acc": 0.5072992700729927, "train_speed(iter/s)": 0.672607 }, { "epoch": 2.8374962512317383, "grad_norm": 4.256030082702637, "learning_rate": 3.950942466770173e-05, "loss": 2.093428611755371, "memory(GiB)": 72.85, "step": 66230, "token_acc": 0.5222222222222223, "train_speed(iter/s)": 0.672584 }, { "epoch": 2.837710466560987, "grad_norm": 4.107301712036133, "learning_rate": 3.950284478197414e-05, "loss": 2.2590831756591796, "memory(GiB)": 72.85, "step": 66235, "token_acc": 0.5082508250825083, "train_speed(iter/s)": 0.672591 }, { "epoch": 2.837924681890236, "grad_norm": 7.785951137542725, "learning_rate": 3.9496265086412364e-05, "loss": 2.005863571166992, "memory(GiB)": 72.85, "step": 66240, "token_acc": 0.5597014925373134, "train_speed(iter/s)": 0.672595 }, { "epoch": 2.838138897219485, "grad_norm": 5.253699779510498, "learning_rate": 3.948968558113559e-05, "loss": 2.343136215209961, "memory(GiB)": 72.85, "step": 66245, "token_acc": 0.5099601593625498, "train_speed(iter/s)": 0.672584 }, { "epoch": 2.838353112548734, "grad_norm": 4.720396041870117, "learning_rate": 3.9483106266263036e-05, "loss": 2.1972373962402343, "memory(GiB)": 72.85, "step": 66250, "token_acc": 0.5135135135135135, "train_speed(iter/s)": 0.672575 }, { "epoch": 2.838567327877983, "grad_norm": 4.469578742980957, "learning_rate": 3.9476527141913866e-05, "loss": 2.2329626083374023, "memory(GiB)": 72.85, "step": 66255, "token_acc": 0.5083056478405316, "train_speed(iter/s)": 0.672589 }, { "epoch": 2.838781543207232, "grad_norm": 5.101342678070068, "learning_rate": 3.946994820820728e-05, "loss": 2.0663833618164062, "memory(GiB)": 72.85, "step": 66260, "token_acc": 0.532520325203252, "train_speed(iter/s)": 0.672605 }, { "epoch": 2.838995758536481, "grad_norm": 4.018296241760254, "learning_rate": 3.9463369465262466e-05, "loss": 2.129779052734375, "memory(GiB)": 72.85, "step": 66265, "token_acc": 0.5254901960784314, "train_speed(iter/s)": 0.672609 }, { "epoch": 2.8392099738657297, "grad_norm": 4.897321701049805, "learning_rate": 3.945679091319859e-05, "loss": 2.1338788986206056, "memory(GiB)": 72.85, "step": 66270, "token_acc": 0.48787878787878786, "train_speed(iter/s)": 0.6726 }, { "epoch": 2.839424189194979, "grad_norm": 4.979508399963379, "learning_rate": 3.9450212552134845e-05, "loss": 2.323394203186035, "memory(GiB)": 72.85, "step": 66275, "token_acc": 0.47019867549668876, "train_speed(iter/s)": 0.672601 }, { "epoch": 2.8396384045242278, "grad_norm": 4.160813808441162, "learning_rate": 3.9443634382190396e-05, "loss": 2.2712697982788086, "memory(GiB)": 72.85, "step": 66280, "token_acc": 0.4856115107913669, "train_speed(iter/s)": 0.672611 }, { "epoch": 2.8398526198534766, "grad_norm": 4.72718620300293, "learning_rate": 3.9437056403484404e-05, "loss": 2.2943363189697266, "memory(GiB)": 72.85, "step": 66285, "token_acc": 0.49, "train_speed(iter/s)": 0.672624 }, { "epoch": 2.840066835182726, "grad_norm": 4.167622089385986, "learning_rate": 3.9430478616136036e-05, "loss": 2.1552141189575194, "memory(GiB)": 72.85, "step": 66290, "token_acc": 0.5119453924914675, "train_speed(iter/s)": 0.672622 }, { "epoch": 2.8402810505119747, "grad_norm": 4.000309944152832, "learning_rate": 3.9423901020264474e-05, "loss": 2.3734258651733398, "memory(GiB)": 72.85, "step": 66295, "token_acc": 0.4900398406374502, "train_speed(iter/s)": 0.672628 }, { "epoch": 2.8404952658412235, "grad_norm": 5.494855880737305, "learning_rate": 3.9417323615988864e-05, "loss": 2.2286943435668944, "memory(GiB)": 72.85, "step": 66300, "token_acc": 0.5338078291814946, "train_speed(iter/s)": 0.672629 }, { "epoch": 2.8407094811704727, "grad_norm": 5.772464752197266, "learning_rate": 3.941074640342838e-05, "loss": 2.343250274658203, "memory(GiB)": 72.85, "step": 66305, "token_acc": 0.4927007299270073, "train_speed(iter/s)": 0.672635 }, { "epoch": 2.8409236964997215, "grad_norm": 4.593503475189209, "learning_rate": 3.940416938270215e-05, "loss": 2.346294975280762, "memory(GiB)": 72.85, "step": 66310, "token_acc": 0.513595166163142, "train_speed(iter/s)": 0.672648 }, { "epoch": 2.8411379118289704, "grad_norm": 6.3446455001831055, "learning_rate": 3.939759255392932e-05, "loss": 2.212725830078125, "memory(GiB)": 72.85, "step": 66315, "token_acc": 0.4738562091503268, "train_speed(iter/s)": 0.67264 }, { "epoch": 2.8413521271582196, "grad_norm": 5.528204917907715, "learning_rate": 3.939101591722906e-05, "loss": 2.1821109771728517, "memory(GiB)": 72.85, "step": 66320, "token_acc": 0.535593220338983, "train_speed(iter/s)": 0.672637 }, { "epoch": 2.8415663424874684, "grad_norm": 4.776557445526123, "learning_rate": 3.93844394727205e-05, "loss": 2.1421907424926756, "memory(GiB)": 72.85, "step": 66325, "token_acc": 0.52734375, "train_speed(iter/s)": 0.672649 }, { "epoch": 2.8417805578167172, "grad_norm": 6.248463153839111, "learning_rate": 3.937786322052276e-05, "loss": 2.336239051818848, "memory(GiB)": 72.85, "step": 66330, "token_acc": 0.528, "train_speed(iter/s)": 0.672653 }, { "epoch": 2.8419947731459665, "grad_norm": 7.0469069480896, "learning_rate": 3.937128716075501e-05, "loss": 2.2729141235351564, "memory(GiB)": 72.85, "step": 66335, "token_acc": 0.49140893470790376, "train_speed(iter/s)": 0.672654 }, { "epoch": 2.8422089884752153, "grad_norm": 4.8879570960998535, "learning_rate": 3.936471129353635e-05, "loss": 2.245477485656738, "memory(GiB)": 72.85, "step": 66340, "token_acc": 0.5100671140939598, "train_speed(iter/s)": 0.672665 }, { "epoch": 2.842423203804464, "grad_norm": 5.427562713623047, "learning_rate": 3.935813561898593e-05, "loss": 2.373739242553711, "memory(GiB)": 72.85, "step": 66345, "token_acc": 0.5014409221902018, "train_speed(iter/s)": 0.672664 }, { "epoch": 2.8426374191337134, "grad_norm": 6.1647772789001465, "learning_rate": 3.935156013722287e-05, "loss": 2.434188652038574, "memory(GiB)": 72.85, "step": 66350, "token_acc": 0.4608433734939759, "train_speed(iter/s)": 0.67267 }, { "epoch": 2.842851634462962, "grad_norm": 5.1650071144104, "learning_rate": 3.934498484836627e-05, "loss": 2.005764198303223, "memory(GiB)": 72.85, "step": 66355, "token_acc": 0.5445205479452054, "train_speed(iter/s)": 0.672671 }, { "epoch": 2.843065849792211, "grad_norm": 4.7828474044799805, "learning_rate": 3.933840975253527e-05, "loss": 2.3112083435058595, "memory(GiB)": 72.85, "step": 66360, "token_acc": 0.5287009063444109, "train_speed(iter/s)": 0.672676 }, { "epoch": 2.8432800651214603, "grad_norm": 5.427246570587158, "learning_rate": 3.933183484984898e-05, "loss": 2.348859405517578, "memory(GiB)": 72.85, "step": 66365, "token_acc": 0.528, "train_speed(iter/s)": 0.672679 }, { "epoch": 2.843494280450709, "grad_norm": 5.009654998779297, "learning_rate": 3.932526014042652e-05, "loss": 2.0003084182739257, "memory(GiB)": 72.85, "step": 66370, "token_acc": 0.5779467680608364, "train_speed(iter/s)": 0.672685 }, { "epoch": 2.843708495779958, "grad_norm": 4.555218696594238, "learning_rate": 3.9318685624386975e-05, "loss": 2.110331153869629, "memory(GiB)": 72.85, "step": 66375, "token_acc": 0.5460526315789473, "train_speed(iter/s)": 0.672696 }, { "epoch": 2.843922711109207, "grad_norm": 5.621971607208252, "learning_rate": 3.931211130184947e-05, "loss": 2.157002258300781, "memory(GiB)": 72.85, "step": 66380, "token_acc": 0.501779359430605, "train_speed(iter/s)": 0.67269 }, { "epoch": 2.844136926438456, "grad_norm": 4.918340682983398, "learning_rate": 3.9305537172933085e-05, "loss": 2.4077835083007812, "memory(GiB)": 72.85, "step": 66385, "token_acc": 0.5117647058823529, "train_speed(iter/s)": 0.672692 }, { "epoch": 2.8443511417677048, "grad_norm": 6.396496772766113, "learning_rate": 3.9298963237756934e-05, "loss": 2.011255073547363, "memory(GiB)": 72.85, "step": 66390, "token_acc": 0.5661016949152542, "train_speed(iter/s)": 0.672698 }, { "epoch": 2.844565357096954, "grad_norm": 5.120174407958984, "learning_rate": 3.92923894964401e-05, "loss": 2.114836502075195, "memory(GiB)": 72.85, "step": 66395, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.672703 }, { "epoch": 2.844779572426203, "grad_norm": 5.193880081176758, "learning_rate": 3.9285815949101675e-05, "loss": 2.094365119934082, "memory(GiB)": 72.85, "step": 66400, "token_acc": 0.512280701754386, "train_speed(iter/s)": 0.672704 }, { "epoch": 2.8449937877554516, "grad_norm": 3.871375560760498, "learning_rate": 3.9279242595860746e-05, "loss": 2.3460971832275392, "memory(GiB)": 72.85, "step": 66405, "token_acc": 0.5091463414634146, "train_speed(iter/s)": 0.672711 }, { "epoch": 2.845208003084701, "grad_norm": 5.10439395904541, "learning_rate": 3.9272669436836395e-05, "loss": 2.3582246780395506, "memory(GiB)": 72.85, "step": 66410, "token_acc": 0.501577287066246, "train_speed(iter/s)": 0.672702 }, { "epoch": 2.8454222184139497, "grad_norm": 5.7998433113098145, "learning_rate": 3.9266096472147694e-05, "loss": 1.9221382141113281, "memory(GiB)": 72.85, "step": 66415, "token_acc": 0.5770609318996416, "train_speed(iter/s)": 0.672707 }, { "epoch": 2.8456364337431985, "grad_norm": 5.035932540893555, "learning_rate": 3.925952370191373e-05, "loss": 2.442918395996094, "memory(GiB)": 72.85, "step": 66420, "token_acc": 0.48344370860927155, "train_speed(iter/s)": 0.672685 }, { "epoch": 2.845850649072448, "grad_norm": 4.575541019439697, "learning_rate": 3.9252951126253565e-05, "loss": 2.546527290344238, "memory(GiB)": 72.85, "step": 66425, "token_acc": 0.44776119402985076, "train_speed(iter/s)": 0.672686 }, { "epoch": 2.8460648644016966, "grad_norm": 5.023799896240234, "learning_rate": 3.9246378745286276e-05, "loss": 2.2951997756958007, "memory(GiB)": 72.85, "step": 66430, "token_acc": 0.5211726384364821, "train_speed(iter/s)": 0.672695 }, { "epoch": 2.8462790797309454, "grad_norm": 5.120741367340088, "learning_rate": 3.923980655913091e-05, "loss": 2.3783445358276367, "memory(GiB)": 72.85, "step": 66435, "token_acc": 0.48148148148148145, "train_speed(iter/s)": 0.672691 }, { "epoch": 2.8464932950601947, "grad_norm": 6.335713863372803, "learning_rate": 3.923323456790656e-05, "loss": 2.425120162963867, "memory(GiB)": 72.85, "step": 66440, "token_acc": 0.4797047970479705, "train_speed(iter/s)": 0.672693 }, { "epoch": 2.8467075103894435, "grad_norm": 5.189638137817383, "learning_rate": 3.9226662771732244e-05, "loss": 2.2358694076538086, "memory(GiB)": 72.85, "step": 66445, "token_acc": 0.4868421052631579, "train_speed(iter/s)": 0.672693 }, { "epoch": 2.8469217257186923, "grad_norm": 4.366446018218994, "learning_rate": 3.922009117072706e-05, "loss": 2.2698331832885743, "memory(GiB)": 72.85, "step": 66450, "token_acc": 0.5110294117647058, "train_speed(iter/s)": 0.672693 }, { "epoch": 2.8471359410479415, "grad_norm": 4.4711713790893555, "learning_rate": 3.921351976501004e-05, "loss": 2.360668182373047, "memory(GiB)": 72.85, "step": 66455, "token_acc": 0.49696969696969695, "train_speed(iter/s)": 0.672702 }, { "epoch": 2.8473501563771904, "grad_norm": 4.203141212463379, "learning_rate": 3.920694855470021e-05, "loss": 2.1346616744995117, "memory(GiB)": 72.85, "step": 66460, "token_acc": 0.4885245901639344, "train_speed(iter/s)": 0.672711 }, { "epoch": 2.847564371706439, "grad_norm": 6.254764556884766, "learning_rate": 3.920169172722555e-05, "loss": 2.041529655456543, "memory(GiB)": 72.85, "step": 66465, "token_acc": 0.5259259259259259, "train_speed(iter/s)": 0.672722 }, { "epoch": 2.8477785870356884, "grad_norm": 3.846799850463867, "learning_rate": 3.91951208689487e-05, "loss": 1.9186929702758788, "memory(GiB)": 72.85, "step": 66470, "token_acc": 0.5442622950819672, "train_speed(iter/s)": 0.672717 }, { "epoch": 2.8479928023649372, "grad_norm": 5.014898777008057, "learning_rate": 3.9188550206412364e-05, "loss": 2.0669620513916014, "memory(GiB)": 72.85, "step": 66475, "token_acc": 0.5354609929078015, "train_speed(iter/s)": 0.672721 }, { "epoch": 2.848207017694186, "grad_norm": 4.34937858581543, "learning_rate": 3.9181979739735565e-05, "loss": 2.336806869506836, "memory(GiB)": 72.85, "step": 66480, "token_acc": 0.4936708860759494, "train_speed(iter/s)": 0.672725 }, { "epoch": 2.8484212330234353, "grad_norm": 9.038169860839844, "learning_rate": 3.917540946903736e-05, "loss": 2.0764089584350587, "memory(GiB)": 72.85, "step": 66485, "token_acc": 0.5340501792114696, "train_speed(iter/s)": 0.672735 }, { "epoch": 2.848635448352684, "grad_norm": 5.611948013305664, "learning_rate": 3.916883939443678e-05, "loss": 2.474081039428711, "memory(GiB)": 72.85, "step": 66490, "token_acc": 0.4748201438848921, "train_speed(iter/s)": 0.672742 }, { "epoch": 2.848849663681933, "grad_norm": 4.590900421142578, "learning_rate": 3.916226951605283e-05, "loss": 2.490616226196289, "memory(GiB)": 72.85, "step": 66495, "token_acc": 0.48148148148148145, "train_speed(iter/s)": 0.672752 }, { "epoch": 2.849063879011182, "grad_norm": 4.14667272567749, "learning_rate": 3.915569983400453e-05, "loss": 2.351367378234863, "memory(GiB)": 72.85, "step": 66500, "token_acc": 0.5213414634146342, "train_speed(iter/s)": 0.672749 }, { "epoch": 2.849063879011182, "eval_loss": 1.888687252998352, "eval_runtime": 16.1065, "eval_samples_per_second": 6.209, "eval_steps_per_second": 6.209, "eval_token_acc": 0.5160390516039052, "step": 66500 }, { "epoch": 2.849278094340431, "grad_norm": 4.734620094299316, "learning_rate": 3.9149130348410906e-05, "loss": 2.2578901290893554, "memory(GiB)": 72.85, "step": 66505, "token_acc": 0.5230923694779116, "train_speed(iter/s)": 0.672614 }, { "epoch": 2.84949230966968, "grad_norm": 4.915157794952393, "learning_rate": 3.9142561059390955e-05, "loss": 2.3831777572631836, "memory(GiB)": 72.85, "step": 66510, "token_acc": 0.5016181229773463, "train_speed(iter/s)": 0.672613 }, { "epoch": 2.849706524998929, "grad_norm": 5.613536357879639, "learning_rate": 3.913599196706371e-05, "loss": 2.3486665725708007, "memory(GiB)": 72.85, "step": 66515, "token_acc": 0.4732824427480916, "train_speed(iter/s)": 0.672624 }, { "epoch": 2.849920740328178, "grad_norm": 4.903514862060547, "learning_rate": 3.912942307154816e-05, "loss": 2.257468414306641, "memory(GiB)": 72.85, "step": 66520, "token_acc": 0.5795053003533569, "train_speed(iter/s)": 0.672635 }, { "epoch": 2.8501349556574267, "grad_norm": 6.1584038734436035, "learning_rate": 3.9122854372963296e-05, "loss": 2.369297218322754, "memory(GiB)": 72.85, "step": 66525, "token_acc": 0.47280334728033474, "train_speed(iter/s)": 0.672623 }, { "epoch": 2.850349170986676, "grad_norm": 4.582597255706787, "learning_rate": 3.9116285871428136e-05, "loss": 2.3433300018310548, "memory(GiB)": 72.85, "step": 66530, "token_acc": 0.49707602339181284, "train_speed(iter/s)": 0.672621 }, { "epoch": 2.8505633863159248, "grad_norm": 6.0591607093811035, "learning_rate": 3.910971756706168e-05, "loss": 2.1610889434814453, "memory(GiB)": 72.85, "step": 66535, "token_acc": 0.4826254826254826, "train_speed(iter/s)": 0.672626 }, { "epoch": 2.8507776016451736, "grad_norm": 4.96173620223999, "learning_rate": 3.910314945998288e-05, "loss": 2.4255769729614256, "memory(GiB)": 72.85, "step": 66540, "token_acc": 0.5130718954248366, "train_speed(iter/s)": 0.672622 }, { "epoch": 2.850991816974423, "grad_norm": 5.307187557220459, "learning_rate": 3.9096581550310776e-05, "loss": 2.214950180053711, "memory(GiB)": 72.85, "step": 66545, "token_acc": 0.5084175084175084, "train_speed(iter/s)": 0.672612 }, { "epoch": 2.8512060323036716, "grad_norm": 8.339349746704102, "learning_rate": 3.90900138381643e-05, "loss": 2.5137147903442383, "memory(GiB)": 72.85, "step": 66550, "token_acc": 0.46905537459283386, "train_speed(iter/s)": 0.672618 }, { "epoch": 2.8514202476329205, "grad_norm": 8.310546875, "learning_rate": 3.908344632366246e-05, "loss": 2.3637231826782226, "memory(GiB)": 72.85, "step": 66555, "token_acc": 0.48494983277591974, "train_speed(iter/s)": 0.672628 }, { "epoch": 2.8516344629621697, "grad_norm": 5.31398868560791, "learning_rate": 3.907687900692424e-05, "loss": 2.1763954162597656, "memory(GiB)": 72.85, "step": 66560, "token_acc": 0.47491638795986624, "train_speed(iter/s)": 0.672624 }, { "epoch": 2.8518486782914185, "grad_norm": 5.399849891662598, "learning_rate": 3.9070311888068605e-05, "loss": 2.2926362991333007, "memory(GiB)": 72.85, "step": 66565, "token_acc": 0.4766081871345029, "train_speed(iter/s)": 0.672637 }, { "epoch": 2.8520628936206673, "grad_norm": 6.187798500061035, "learning_rate": 3.906374496721452e-05, "loss": 2.2177654266357423, "memory(GiB)": 72.85, "step": 66570, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.672637 }, { "epoch": 2.8522771089499166, "grad_norm": 6.1417765617370605, "learning_rate": 3.905717824448096e-05, "loss": 2.3484012603759767, "memory(GiB)": 72.85, "step": 66575, "token_acc": 0.5016611295681063, "train_speed(iter/s)": 0.672631 }, { "epoch": 2.8524913242791654, "grad_norm": 4.690108299255371, "learning_rate": 3.905061171998688e-05, "loss": 2.6219778060913086, "memory(GiB)": 72.85, "step": 66580, "token_acc": 0.46216216216216216, "train_speed(iter/s)": 0.67263 }, { "epoch": 2.852705539608414, "grad_norm": 4.096339225769043, "learning_rate": 3.904404539385123e-05, "loss": 2.257903289794922, "memory(GiB)": 72.85, "step": 66585, "token_acc": 0.49696969696969695, "train_speed(iter/s)": 0.672636 }, { "epoch": 2.8529197549376635, "grad_norm": 5.7180657386779785, "learning_rate": 3.903747926619299e-05, "loss": 2.164842987060547, "memory(GiB)": 72.85, "step": 66590, "token_acc": 0.5144508670520231, "train_speed(iter/s)": 0.672638 }, { "epoch": 2.8531339702669123, "grad_norm": 4.0981597900390625, "learning_rate": 3.9030913337131095e-05, "loss": 2.3853277206420898, "memory(GiB)": 72.85, "step": 66595, "token_acc": 0.4886731391585761, "train_speed(iter/s)": 0.672636 }, { "epoch": 2.853348185596161, "grad_norm": 3.9005067348480225, "learning_rate": 3.902434760678448e-05, "loss": 2.0850101470947267, "memory(GiB)": 72.85, "step": 66600, "token_acc": 0.5480769230769231, "train_speed(iter/s)": 0.672633 }, { "epoch": 2.8535624009254104, "grad_norm": 5.633953094482422, "learning_rate": 3.901778207527211e-05, "loss": 2.143407440185547, "memory(GiB)": 72.85, "step": 66605, "token_acc": 0.5267857142857143, "train_speed(iter/s)": 0.672637 }, { "epoch": 2.853776616254659, "grad_norm": 7.397823333740234, "learning_rate": 3.901121674271292e-05, "loss": 2.173874855041504, "memory(GiB)": 72.85, "step": 66610, "token_acc": 0.5590909090909091, "train_speed(iter/s)": 0.672639 }, { "epoch": 2.853990831583908, "grad_norm": 4.38796329498291, "learning_rate": 3.9004651609225854e-05, "loss": 2.265304183959961, "memory(GiB)": 72.85, "step": 66615, "token_acc": 0.5083056478405316, "train_speed(iter/s)": 0.672645 }, { "epoch": 2.8542050469131572, "grad_norm": 4.702394008636475, "learning_rate": 3.899808667492984e-05, "loss": 2.5751102447509764, "memory(GiB)": 72.85, "step": 66620, "token_acc": 0.46785714285714286, "train_speed(iter/s)": 0.672637 }, { "epoch": 2.854419262242406, "grad_norm": 7.416023254394531, "learning_rate": 3.89915219399438e-05, "loss": 2.174312210083008, "memory(GiB)": 72.85, "step": 66625, "token_acc": 0.5176470588235295, "train_speed(iter/s)": 0.672646 }, { "epoch": 2.854633477571655, "grad_norm": 3.973092794418335, "learning_rate": 3.8984957404386644e-05, "loss": 1.8609359741210938, "memory(GiB)": 72.85, "step": 66630, "token_acc": 0.5225563909774437, "train_speed(iter/s)": 0.672638 }, { "epoch": 2.854847692900904, "grad_norm": 5.31488037109375, "learning_rate": 3.897839306837735e-05, "loss": 1.9549089431762696, "memory(GiB)": 72.85, "step": 66635, "token_acc": 0.551094890510949, "train_speed(iter/s)": 0.672643 }, { "epoch": 2.855061908230153, "grad_norm": 4.488143444061279, "learning_rate": 3.89718289320348e-05, "loss": 2.0934284210205076, "memory(GiB)": 72.85, "step": 66640, "token_acc": 0.5283018867924528, "train_speed(iter/s)": 0.672633 }, { "epoch": 2.8552761235594017, "grad_norm": 5.674739360809326, "learning_rate": 3.89652649954779e-05, "loss": 2.434255599975586, "memory(GiB)": 72.85, "step": 66645, "token_acc": 0.5047021943573667, "train_speed(iter/s)": 0.672641 }, { "epoch": 2.855490338888651, "grad_norm": 6.877378940582275, "learning_rate": 3.8958701258825594e-05, "loss": 2.019522285461426, "memory(GiB)": 72.85, "step": 66650, "token_acc": 0.5465116279069767, "train_speed(iter/s)": 0.672639 }, { "epoch": 2.8557045542179, "grad_norm": 4.3157057762146, "learning_rate": 3.8952137722196754e-05, "loss": 2.03841552734375, "memory(GiB)": 72.85, "step": 66655, "token_acc": 0.5588235294117647, "train_speed(iter/s)": 0.672638 }, { "epoch": 2.8559187695471486, "grad_norm": 4.268231391906738, "learning_rate": 3.894557438571032e-05, "loss": 2.2417964935302734, "memory(GiB)": 72.85, "step": 66660, "token_acc": 0.5314465408805031, "train_speed(iter/s)": 0.672635 }, { "epoch": 2.856132984876398, "grad_norm": 4.908862590789795, "learning_rate": 3.893901124948518e-05, "loss": 2.3170413970947266, "memory(GiB)": 72.85, "step": 66665, "token_acc": 0.4857142857142857, "train_speed(iter/s)": 0.672641 }, { "epoch": 2.8563472002056467, "grad_norm": 4.316481113433838, "learning_rate": 3.8932448313640205e-05, "loss": 2.27984504699707, "memory(GiB)": 72.85, "step": 66670, "token_acc": 0.4879518072289157, "train_speed(iter/s)": 0.672637 }, { "epoch": 2.8565614155348955, "grad_norm": 4.717637538909912, "learning_rate": 3.892588557829433e-05, "loss": 2.3102596282958983, "memory(GiB)": 72.85, "step": 66675, "token_acc": 0.5367647058823529, "train_speed(iter/s)": 0.67265 }, { "epoch": 2.8567756308641448, "grad_norm": 5.984638690948486, "learning_rate": 3.891932304356642e-05, "loss": 2.3807872772216796, "memory(GiB)": 72.85, "step": 66680, "token_acc": 0.5017182130584192, "train_speed(iter/s)": 0.672664 }, { "epoch": 2.8569898461933936, "grad_norm": 6.027592658996582, "learning_rate": 3.8912760709575365e-05, "loss": 2.5629377365112305, "memory(GiB)": 72.85, "step": 66685, "token_acc": 0.458955223880597, "train_speed(iter/s)": 0.672672 }, { "epoch": 2.8572040615226424, "grad_norm": 4.502224922180176, "learning_rate": 3.890619857644006e-05, "loss": 2.3300048828125, "memory(GiB)": 72.85, "step": 66690, "token_acc": 0.5321428571428571, "train_speed(iter/s)": 0.672668 }, { "epoch": 2.8574182768518916, "grad_norm": 7.629763126373291, "learning_rate": 3.889963664427935e-05, "loss": 2.2583751678466797, "memory(GiB)": 72.85, "step": 66695, "token_acc": 0.47719298245614034, "train_speed(iter/s)": 0.672659 }, { "epoch": 2.8576324921811405, "grad_norm": 5.026388645172119, "learning_rate": 3.889307491321216e-05, "loss": 2.6123210906982424, "memory(GiB)": 72.85, "step": 66700, "token_acc": 0.484149855907781, "train_speed(iter/s)": 0.672665 }, { "epoch": 2.8578467075103893, "grad_norm": 7.651427268981934, "learning_rate": 3.888651338335731e-05, "loss": 2.250991439819336, "memory(GiB)": 72.85, "step": 66705, "token_acc": 0.5108695652173914, "train_speed(iter/s)": 0.672671 }, { "epoch": 2.8580609228396385, "grad_norm": 5.016650676727295, "learning_rate": 3.887995205483372e-05, "loss": 2.1320077896118166, "memory(GiB)": 72.85, "step": 66710, "token_acc": 0.5794701986754967, "train_speed(iter/s)": 0.672676 }, { "epoch": 2.8582751381688873, "grad_norm": 6.262345790863037, "learning_rate": 3.8873390927760214e-05, "loss": 2.3354255676269533, "memory(GiB)": 72.85, "step": 66715, "token_acc": 0.49836065573770494, "train_speed(iter/s)": 0.672674 }, { "epoch": 2.858489353498136, "grad_norm": 4.635672569274902, "learning_rate": 3.886683000225568e-05, "loss": 2.2461479187011717, "memory(GiB)": 72.85, "step": 66720, "token_acc": 0.5251572327044025, "train_speed(iter/s)": 0.67267 }, { "epoch": 2.8587035688273854, "grad_norm": 4.594204425811768, "learning_rate": 3.8860269278438974e-05, "loss": 2.301404571533203, "memory(GiB)": 72.85, "step": 66725, "token_acc": 0.5152542372881356, "train_speed(iter/s)": 0.672683 }, { "epoch": 2.8589177841566342, "grad_norm": 5.985118389129639, "learning_rate": 3.885370875642892e-05, "loss": 2.006219673156738, "memory(GiB)": 72.85, "step": 66730, "token_acc": 0.5615141955835962, "train_speed(iter/s)": 0.672687 }, { "epoch": 2.859131999485883, "grad_norm": 3.9388885498046875, "learning_rate": 3.88471484363444e-05, "loss": 2.483930206298828, "memory(GiB)": 72.85, "step": 66735, "token_acc": 0.5111111111111111, "train_speed(iter/s)": 0.672677 }, { "epoch": 2.8593462148151323, "grad_norm": 4.872296333312988, "learning_rate": 3.884058831830426e-05, "loss": 2.1144346237182616, "memory(GiB)": 72.85, "step": 66740, "token_acc": 0.5163398692810458, "train_speed(iter/s)": 0.672674 }, { "epoch": 2.859560430144381, "grad_norm": 3.587587833404541, "learning_rate": 3.88340284024273e-05, "loss": 2.0757431030273437, "memory(GiB)": 72.85, "step": 66745, "token_acc": 0.5221518987341772, "train_speed(iter/s)": 0.672667 }, { "epoch": 2.85977464547363, "grad_norm": 5.499965667724609, "learning_rate": 3.8827468688832414e-05, "loss": 2.2385929107666014, "memory(GiB)": 72.85, "step": 66750, "token_acc": 0.4945054945054945, "train_speed(iter/s)": 0.672664 }, { "epoch": 2.859988860802879, "grad_norm": 4.599653244018555, "learning_rate": 3.88209091776384e-05, "loss": 2.1869300842285155, "memory(GiB)": 72.85, "step": 66755, "token_acc": 0.5480427046263345, "train_speed(iter/s)": 0.672661 }, { "epoch": 2.860203076132128, "grad_norm": 4.870532035827637, "learning_rate": 3.8814349868964114e-05, "loss": 2.2221694946289063, "memory(GiB)": 72.85, "step": 66760, "token_acc": 0.5151515151515151, "train_speed(iter/s)": 0.672667 }, { "epoch": 2.860417291461377, "grad_norm": 4.233839511871338, "learning_rate": 3.8807790762928367e-05, "loss": 2.667164421081543, "memory(GiB)": 72.85, "step": 66765, "token_acc": 0.4403183023872679, "train_speed(iter/s)": 0.672672 }, { "epoch": 2.860631506790626, "grad_norm": 7.400300025939941, "learning_rate": 3.8801231859649986e-05, "loss": 2.1841814041137697, "memory(GiB)": 72.85, "step": 66770, "token_acc": 0.52, "train_speed(iter/s)": 0.672684 }, { "epoch": 2.860845722119875, "grad_norm": 6.451882362365723, "learning_rate": 3.879467315924779e-05, "loss": 2.407547378540039, "memory(GiB)": 72.85, "step": 66775, "token_acc": 0.4936708860759494, "train_speed(iter/s)": 0.672683 }, { "epoch": 2.8610599374491237, "grad_norm": 4.656249523162842, "learning_rate": 3.878811466184061e-05, "loss": 2.265060806274414, "memory(GiB)": 72.85, "step": 66780, "token_acc": 0.5047021943573667, "train_speed(iter/s)": 0.672673 }, { "epoch": 2.861274152778373, "grad_norm": 4.673425197601318, "learning_rate": 3.8781556367547255e-05, "loss": 1.887440299987793, "memory(GiB)": 72.85, "step": 66785, "token_acc": 0.5666666666666667, "train_speed(iter/s)": 0.672677 }, { "epoch": 2.8614883681076217, "grad_norm": 4.650686264038086, "learning_rate": 3.877499827648652e-05, "loss": 2.125926208496094, "memory(GiB)": 72.85, "step": 66790, "token_acc": 0.4910394265232975, "train_speed(iter/s)": 0.672673 }, { "epoch": 2.8617025834368706, "grad_norm": 3.4322338104248047, "learning_rate": 3.8768440388777236e-05, "loss": 2.3662431716918944, "memory(GiB)": 72.85, "step": 66795, "token_acc": 0.5, "train_speed(iter/s)": 0.672676 }, { "epoch": 2.86191679876612, "grad_norm": 7.386733055114746, "learning_rate": 3.876188270453817e-05, "loss": 2.159648323059082, "memory(GiB)": 72.85, "step": 66800, "token_acc": 0.5307692307692308, "train_speed(iter/s)": 0.672694 }, { "epoch": 2.8621310140953686, "grad_norm": 6.34850549697876, "learning_rate": 3.875532522388816e-05, "loss": 2.3222707748413085, "memory(GiB)": 72.85, "step": 66805, "token_acc": 0.4746268656716418, "train_speed(iter/s)": 0.6727 }, { "epoch": 2.8623452294246174, "grad_norm": 4.711183071136475, "learning_rate": 3.874876794694598e-05, "loss": 2.484441566467285, "memory(GiB)": 72.85, "step": 66810, "token_acc": 0.496875, "train_speed(iter/s)": 0.672704 }, { "epoch": 2.8625594447538667, "grad_norm": 4.130077362060547, "learning_rate": 3.874221087383041e-05, "loss": 2.3740493774414064, "memory(GiB)": 72.85, "step": 66815, "token_acc": 0.4937106918238994, "train_speed(iter/s)": 0.67272 }, { "epoch": 2.8627736600831155, "grad_norm": 4.726319789886475, "learning_rate": 3.8735654004660263e-05, "loss": 2.304883575439453, "memory(GiB)": 72.85, "step": 66820, "token_acc": 0.5483870967741935, "train_speed(iter/s)": 0.672735 }, { "epoch": 2.8629878754123643, "grad_norm": 3.7739837169647217, "learning_rate": 3.872909733955431e-05, "loss": 2.290903854370117, "memory(GiB)": 72.85, "step": 66825, "token_acc": 0.48417721518987344, "train_speed(iter/s)": 0.672737 }, { "epoch": 2.8632020907416136, "grad_norm": 4.09919548034668, "learning_rate": 3.8722540878631325e-05, "loss": 2.3871063232421874, "memory(GiB)": 72.85, "step": 66830, "token_acc": 0.4697986577181208, "train_speed(iter/s)": 0.672729 }, { "epoch": 2.8634163060708624, "grad_norm": 5.692593097686768, "learning_rate": 3.8715984622010096e-05, "loss": 2.1189756393432617, "memory(GiB)": 72.85, "step": 66835, "token_acc": 0.5143769968051118, "train_speed(iter/s)": 0.672723 }, { "epoch": 2.863630521400111, "grad_norm": 4.5176591873168945, "learning_rate": 3.870942856980938e-05, "loss": 2.394292640686035, "memory(GiB)": 72.85, "step": 66840, "token_acc": 0.498567335243553, "train_speed(iter/s)": 0.672722 }, { "epoch": 2.8638447367293605, "grad_norm": 5.980645656585693, "learning_rate": 3.870287272214798e-05, "loss": 2.3292266845703127, "memory(GiB)": 72.85, "step": 66845, "token_acc": 0.4937106918238994, "train_speed(iter/s)": 0.672722 }, { "epoch": 2.8640589520586093, "grad_norm": 6.1081671714782715, "learning_rate": 3.86963170791446e-05, "loss": 2.0185028076171876, "memory(GiB)": 72.85, "step": 66850, "token_acc": 0.5347222222222222, "train_speed(iter/s)": 0.672718 }, { "epoch": 2.864273167387858, "grad_norm": 5.018372058868408, "learning_rate": 3.868976164091807e-05, "loss": 2.2931995391845703, "memory(GiB)": 72.85, "step": 66855, "token_acc": 0.5597014925373134, "train_speed(iter/s)": 0.672729 }, { "epoch": 2.8644873827171073, "grad_norm": 4.5688910484313965, "learning_rate": 3.86832064075871e-05, "loss": 2.282161521911621, "memory(GiB)": 72.85, "step": 66860, "token_acc": 0.5310344827586206, "train_speed(iter/s)": 0.672739 }, { "epoch": 2.864701598046356, "grad_norm": 4.440577507019043, "learning_rate": 3.8676651379270476e-05, "loss": 2.4734981536865233, "memory(GiB)": 72.85, "step": 66865, "token_acc": 0.5017421602787456, "train_speed(iter/s)": 0.672749 }, { "epoch": 2.864915813375605, "grad_norm": 5.287296295166016, "learning_rate": 3.8670096556086945e-05, "loss": 2.171610641479492, "memory(GiB)": 72.85, "step": 66870, "token_acc": 0.5304878048780488, "train_speed(iter/s)": 0.672743 }, { "epoch": 2.8651300287048542, "grad_norm": 5.529905796051025, "learning_rate": 3.866354193815522e-05, "loss": 1.9849437713623046, "memory(GiB)": 72.85, "step": 66875, "token_acc": 0.5906040268456376, "train_speed(iter/s)": 0.672738 }, { "epoch": 2.865344244034103, "grad_norm": Infinity, "learning_rate": 3.8658298391670966e-05, "loss": 2.189678764343262, "memory(GiB)": 72.85, "step": 66880, "token_acc": 0.5320754716981132, "train_speed(iter/s)": 0.672737 }, { "epoch": 2.865558459363352, "grad_norm": 4.166343688964844, "learning_rate": 3.8651744143491774e-05, "loss": 2.1233631134033204, "memory(GiB)": 72.85, "step": 66885, "token_acc": 0.5097276264591439, "train_speed(iter/s)": 0.672734 }, { "epoch": 2.865772674692601, "grad_norm": 5.269502639770508, "learning_rate": 3.864519010089689e-05, "loss": 2.4075748443603517, "memory(GiB)": 72.85, "step": 66890, "token_acc": 0.4652014652014652, "train_speed(iter/s)": 0.67273 }, { "epoch": 2.86598689002185, "grad_norm": 4.927108287811279, "learning_rate": 3.863863626400501e-05, "loss": 1.7777494430541991, "memory(GiB)": 72.85, "step": 66895, "token_acc": 0.5677966101694916, "train_speed(iter/s)": 0.672733 }, { "epoch": 2.8662011053510987, "grad_norm": 4.015029430389404, "learning_rate": 3.863208263293491e-05, "loss": 2.1317726135253907, "memory(GiB)": 72.85, "step": 66900, "token_acc": 0.5051194539249146, "train_speed(iter/s)": 0.672742 }, { "epoch": 2.866415320680348, "grad_norm": 4.496556282043457, "learning_rate": 3.862552920780531e-05, "loss": 2.375013542175293, "memory(GiB)": 72.85, "step": 66905, "token_acc": 0.47761194029850745, "train_speed(iter/s)": 0.672734 }, { "epoch": 2.866629536009597, "grad_norm": 5.251720905303955, "learning_rate": 3.861897598873491e-05, "loss": 2.5606224060058596, "memory(GiB)": 72.85, "step": 66910, "token_acc": 0.4657534246575342, "train_speed(iter/s)": 0.672743 }, { "epoch": 2.8668437513388456, "grad_norm": 5.669228553771973, "learning_rate": 3.861242297584243e-05, "loss": 2.070673370361328, "memory(GiB)": 72.85, "step": 66915, "token_acc": 0.5535714285714286, "train_speed(iter/s)": 0.672758 }, { "epoch": 2.867057966668095, "grad_norm": 5.465575218200684, "learning_rate": 3.8605870169246596e-05, "loss": 2.482360076904297, "memory(GiB)": 72.85, "step": 66920, "token_acc": 0.47019867549668876, "train_speed(iter/s)": 0.672755 }, { "epoch": 2.8672721819973437, "grad_norm": 4.738051414489746, "learning_rate": 3.8599317569066104e-05, "loss": 2.4423667907714846, "memory(GiB)": 72.85, "step": 66925, "token_acc": 0.476027397260274, "train_speed(iter/s)": 0.672753 }, { "epoch": 2.8674863973265925, "grad_norm": 5.093533039093018, "learning_rate": 3.859276517541968e-05, "loss": 2.4095035552978517, "memory(GiB)": 72.85, "step": 66930, "token_acc": 0.49240121580547114, "train_speed(iter/s)": 0.672753 }, { "epoch": 2.8677006126558418, "grad_norm": 5.2932329177856445, "learning_rate": 3.8586212988426015e-05, "loss": 2.369799041748047, "memory(GiB)": 72.85, "step": 66935, "token_acc": 0.47278911564625853, "train_speed(iter/s)": 0.672751 }, { "epoch": 2.8679148279850906, "grad_norm": 4.726813316345215, "learning_rate": 3.85796610082038e-05, "loss": 2.231303024291992, "memory(GiB)": 72.85, "step": 66940, "token_acc": 0.5182724252491694, "train_speed(iter/s)": 0.672759 }, { "epoch": 2.8681290433143394, "grad_norm": 4.492504119873047, "learning_rate": 3.857310923487175e-05, "loss": 1.780431365966797, "memory(GiB)": 72.85, "step": 66945, "token_acc": 0.6068376068376068, "train_speed(iter/s)": 0.672752 }, { "epoch": 2.8683432586435886, "grad_norm": 4.906249046325684, "learning_rate": 3.856655766854854e-05, "loss": 2.3792350769042967, "memory(GiB)": 72.85, "step": 66950, "token_acc": 0.5064516129032258, "train_speed(iter/s)": 0.672761 }, { "epoch": 2.8685574739728374, "grad_norm": 4.5201640129089355, "learning_rate": 3.856000630935286e-05, "loss": 2.2542497634887697, "memory(GiB)": 72.85, "step": 66955, "token_acc": 0.5718954248366013, "train_speed(iter/s)": 0.67277 }, { "epoch": 2.8687716893020863, "grad_norm": 6.0332231521606445, "learning_rate": 3.855345515740341e-05, "loss": 2.0993009567260743, "memory(GiB)": 72.85, "step": 66960, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.672781 }, { "epoch": 2.8689859046313355, "grad_norm": 5.923471450805664, "learning_rate": 3.854690421281884e-05, "loss": 2.1348594665527343, "memory(GiB)": 72.85, "step": 66965, "token_acc": 0.5208333333333334, "train_speed(iter/s)": 0.672787 }, { "epoch": 2.8692001199605843, "grad_norm": 4.114765644073486, "learning_rate": 3.8540353475717846e-05, "loss": 2.270997428894043, "memory(GiB)": 72.85, "step": 66970, "token_acc": 0.5665236051502146, "train_speed(iter/s)": 0.672792 }, { "epoch": 2.869414335289833, "grad_norm": 4.255698204040527, "learning_rate": 3.85338029462191e-05, "loss": 2.4356033325195314, "memory(GiB)": 72.85, "step": 66975, "token_acc": 0.4652567975830816, "train_speed(iter/s)": 0.672792 }, { "epoch": 2.8696285506190824, "grad_norm": 4.794826030731201, "learning_rate": 3.8527252624441276e-05, "loss": 2.0558773040771485, "memory(GiB)": 72.85, "step": 66980, "token_acc": 0.5241635687732342, "train_speed(iter/s)": 0.67277 }, { "epoch": 2.869842765948331, "grad_norm": 5.942518711090088, "learning_rate": 3.852070251050303e-05, "loss": 2.192554473876953, "memory(GiB)": 72.85, "step": 66985, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.672779 }, { "epoch": 2.87005698127758, "grad_norm": 4.235851287841797, "learning_rate": 3.851415260452303e-05, "loss": 2.371307373046875, "memory(GiB)": 72.85, "step": 66990, "token_acc": 0.43874643874643876, "train_speed(iter/s)": 0.672793 }, { "epoch": 2.8702711966068293, "grad_norm": 4.598145484924316, "learning_rate": 3.8507602906619936e-05, "loss": 2.235751724243164, "memory(GiB)": 72.85, "step": 66995, "token_acc": 0.4938650306748466, "train_speed(iter/s)": 0.672788 }, { "epoch": 2.870485411936078, "grad_norm": 5.3450026512146, "learning_rate": 3.850105341691238e-05, "loss": 1.9829795837402344, "memory(GiB)": 72.85, "step": 67000, "token_acc": 0.5115511551155115, "train_speed(iter/s)": 0.672786 }, { "epoch": 2.870485411936078, "eval_loss": 2.0371973514556885, "eval_runtime": 15.5417, "eval_samples_per_second": 6.434, "eval_steps_per_second": 6.434, "eval_token_acc": 0.5034867503486751, "step": 67000 }, { "epoch": 2.870699627265327, "grad_norm": 4.591775417327881, "learning_rate": 3.849450413551904e-05, "loss": 1.9919336318969727, "memory(GiB)": 72.85, "step": 67005, "token_acc": 0.5216049382716049, "train_speed(iter/s)": 0.672659 }, { "epoch": 2.870913842594576, "grad_norm": 5.593405723571777, "learning_rate": 3.848795506255855e-05, "loss": 2.219834899902344, "memory(GiB)": 72.85, "step": 67010, "token_acc": 0.4981684981684982, "train_speed(iter/s)": 0.672661 }, { "epoch": 2.871128057923825, "grad_norm": 3.721395254135132, "learning_rate": 3.8481406198149545e-05, "loss": 2.0461309432983397, "memory(GiB)": 72.85, "step": 67015, "token_acc": 0.5537459283387622, "train_speed(iter/s)": 0.672658 }, { "epoch": 2.871342273253074, "grad_norm": 4.81046724319458, "learning_rate": 3.847485754241068e-05, "loss": 1.997739028930664, "memory(GiB)": 72.85, "step": 67020, "token_acc": 0.5060240963855421, "train_speed(iter/s)": 0.672662 }, { "epoch": 2.871556488582323, "grad_norm": 4.775210857391357, "learning_rate": 3.8468309095460576e-05, "loss": 2.452315902709961, "memory(GiB)": 72.85, "step": 67025, "token_acc": 0.4608150470219436, "train_speed(iter/s)": 0.672652 }, { "epoch": 2.871770703911572, "grad_norm": 4.438849925994873, "learning_rate": 3.846176085741788e-05, "loss": 2.3512195587158202, "memory(GiB)": 72.85, "step": 67030, "token_acc": 0.47761194029850745, "train_speed(iter/s)": 0.672655 }, { "epoch": 2.8719849192408207, "grad_norm": 4.697140693664551, "learning_rate": 3.845521282840121e-05, "loss": 2.3968698501586916, "memory(GiB)": 72.85, "step": 67035, "token_acc": 0.48253968253968255, "train_speed(iter/s)": 0.672664 }, { "epoch": 2.87219913457007, "grad_norm": 5.8131818771362305, "learning_rate": 3.8448665008529176e-05, "loss": 2.016520309448242, "memory(GiB)": 72.85, "step": 67040, "token_acc": 0.5181518151815182, "train_speed(iter/s)": 0.672669 }, { "epoch": 2.8724133498993187, "grad_norm": 5.692855358123779, "learning_rate": 3.8442117397920394e-05, "loss": 2.4361745834350588, "memory(GiB)": 72.85, "step": 67045, "token_acc": 0.43870967741935485, "train_speed(iter/s)": 0.672662 }, { "epoch": 2.8726275652285675, "grad_norm": 5.4893412590026855, "learning_rate": 3.8435569996693535e-05, "loss": 2.238597106933594, "memory(GiB)": 72.85, "step": 67050, "token_acc": 0.525, "train_speed(iter/s)": 0.672663 }, { "epoch": 2.872841780557817, "grad_norm": 5.8610920906066895, "learning_rate": 3.8429022804967164e-05, "loss": 2.354923629760742, "memory(GiB)": 72.85, "step": 67055, "token_acc": 0.5196078431372549, "train_speed(iter/s)": 0.672666 }, { "epoch": 2.8730559958870656, "grad_norm": 4.691226482391357, "learning_rate": 3.8422475822859886e-05, "loss": 2.208898735046387, "memory(GiB)": 72.85, "step": 67060, "token_acc": 0.5135135135135135, "train_speed(iter/s)": 0.672673 }, { "epoch": 2.8732702112163144, "grad_norm": 5.056803226470947, "learning_rate": 3.841592905049034e-05, "loss": 1.9950693130493165, "memory(GiB)": 72.85, "step": 67065, "token_acc": 0.5335689045936396, "train_speed(iter/s)": 0.672681 }, { "epoch": 2.8734844265455637, "grad_norm": 4.194988250732422, "learning_rate": 3.8409382487977086e-05, "loss": 2.132921600341797, "memory(GiB)": 72.85, "step": 67070, "token_acc": 0.5371621621621622, "train_speed(iter/s)": 0.672691 }, { "epoch": 2.8736986418748125, "grad_norm": 5.246105670928955, "learning_rate": 3.8402836135438755e-05, "loss": 2.1576469421386717, "memory(GiB)": 72.85, "step": 67075, "token_acc": 0.5106382978723404, "train_speed(iter/s)": 0.672697 }, { "epoch": 2.8739128572040613, "grad_norm": 3.3897433280944824, "learning_rate": 3.839628999299393e-05, "loss": 2.271307373046875, "memory(GiB)": 72.85, "step": 67080, "token_acc": 0.5028571428571429, "train_speed(iter/s)": 0.672694 }, { "epoch": 2.8741270725333106, "grad_norm": 4.639877796173096, "learning_rate": 3.838974406076118e-05, "loss": 2.260869598388672, "memory(GiB)": 72.85, "step": 67085, "token_acc": 0.4965753424657534, "train_speed(iter/s)": 0.672679 }, { "epoch": 2.8743412878625594, "grad_norm": 5.033101558685303, "learning_rate": 3.8383198338859125e-05, "loss": 2.36654052734375, "memory(GiB)": 72.85, "step": 67090, "token_acc": 0.49825783972125437, "train_speed(iter/s)": 0.672681 }, { "epoch": 2.874555503191808, "grad_norm": 4.187601566314697, "learning_rate": 3.837665282740632e-05, "loss": 2.2645662307739256, "memory(GiB)": 72.85, "step": 67095, "token_acc": 0.48355263157894735, "train_speed(iter/s)": 0.672683 }, { "epoch": 2.8747697185210574, "grad_norm": 5.523599624633789, "learning_rate": 3.837010752652135e-05, "loss": 2.3759742736816407, "memory(GiB)": 72.85, "step": 67100, "token_acc": 0.48322147651006714, "train_speed(iter/s)": 0.672692 }, { "epoch": 2.8749839338503063, "grad_norm": 5.238307952880859, "learning_rate": 3.8363562436322794e-05, "loss": 2.1781749725341797, "memory(GiB)": 72.85, "step": 67105, "token_acc": 0.5405405405405406, "train_speed(iter/s)": 0.672686 }, { "epoch": 2.875198149179555, "grad_norm": 6.765161514282227, "learning_rate": 3.835701755692922e-05, "loss": 2.2129066467285154, "memory(GiB)": 72.85, "step": 67110, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.672675 }, { "epoch": 2.8754123645088043, "grad_norm": 5.574646949768066, "learning_rate": 3.8350472888459196e-05, "loss": 1.9546340942382812, "memory(GiB)": 72.85, "step": 67115, "token_acc": 0.5793357933579336, "train_speed(iter/s)": 0.672658 }, { "epoch": 2.875626579838053, "grad_norm": 4.5640435218811035, "learning_rate": 3.8343928431031276e-05, "loss": 2.0846038818359376, "memory(GiB)": 72.85, "step": 67120, "token_acc": 0.5511551155115512, "train_speed(iter/s)": 0.672665 }, { "epoch": 2.875840795167302, "grad_norm": 4.055357456207275, "learning_rate": 3.8337384184764035e-05, "loss": 2.2787166595458985, "memory(GiB)": 72.85, "step": 67125, "token_acc": 0.49310344827586206, "train_speed(iter/s)": 0.672674 }, { "epoch": 2.876055010496551, "grad_norm": 5.524316787719727, "learning_rate": 3.8330840149776017e-05, "loss": 2.1599262237548826, "memory(GiB)": 72.85, "step": 67130, "token_acc": 0.47333333333333333, "train_speed(iter/s)": 0.672686 }, { "epoch": 2.8762692258258, "grad_norm": 4.8821892738342285, "learning_rate": 3.8324296326185785e-05, "loss": 2.2006580352783205, "memory(GiB)": 72.85, "step": 67135, "token_acc": 0.5326460481099656, "train_speed(iter/s)": 0.672676 }, { "epoch": 2.876483441155049, "grad_norm": 4.22332239151001, "learning_rate": 3.831775271411188e-05, "loss": 2.2176021575927733, "memory(GiB)": 72.85, "step": 67140, "token_acc": 0.5407166123778502, "train_speed(iter/s)": 0.672667 }, { "epoch": 2.876697656484298, "grad_norm": 4.082303524017334, "learning_rate": 3.8311209313672826e-05, "loss": 1.8966829299926757, "memory(GiB)": 72.85, "step": 67145, "token_acc": 0.5636363636363636, "train_speed(iter/s)": 0.672667 }, { "epoch": 2.876911871813547, "grad_norm": 4.756270408630371, "learning_rate": 3.830466612498719e-05, "loss": 2.173515510559082, "memory(GiB)": 72.85, "step": 67150, "token_acc": 0.5222222222222223, "train_speed(iter/s)": 0.672662 }, { "epoch": 2.8771260871427957, "grad_norm": 6.207439422607422, "learning_rate": 3.82981231481735e-05, "loss": 2.622671890258789, "memory(GiB)": 72.85, "step": 67155, "token_acc": 0.493006993006993, "train_speed(iter/s)": 0.672656 }, { "epoch": 2.877340302472045, "grad_norm": 4.115400314331055, "learning_rate": 3.829158038335028e-05, "loss": 2.2239288330078124, "memory(GiB)": 72.85, "step": 67160, "token_acc": 0.525691699604743, "train_speed(iter/s)": 0.672669 }, { "epoch": 2.877554517801294, "grad_norm": 4.080145359039307, "learning_rate": 3.8285037830636075e-05, "loss": 2.4481014251708983, "memory(GiB)": 72.85, "step": 67165, "token_acc": 0.49666666666666665, "train_speed(iter/s)": 0.67266 }, { "epoch": 2.8777687331305426, "grad_norm": 5.574477195739746, "learning_rate": 3.8278495490149394e-05, "loss": 2.2815528869628907, "memory(GiB)": 72.85, "step": 67170, "token_acc": 0.4936708860759494, "train_speed(iter/s)": 0.672656 }, { "epoch": 2.877982948459792, "grad_norm": 7.476324558258057, "learning_rate": 3.827195336200876e-05, "loss": 2.259065628051758, "memory(GiB)": 72.85, "step": 67175, "token_acc": 0.5039370078740157, "train_speed(iter/s)": 0.672648 }, { "epoch": 2.8781971637890407, "grad_norm": 5.347029685974121, "learning_rate": 3.826541144633271e-05, "loss": 2.244312286376953, "memory(GiB)": 72.85, "step": 67180, "token_acc": 0.5244755244755245, "train_speed(iter/s)": 0.672637 }, { "epoch": 2.8784113791182895, "grad_norm": 4.859315395355225, "learning_rate": 3.8258869743239714e-05, "loss": 2.498709869384766, "memory(GiB)": 72.85, "step": 67185, "token_acc": 0.5254237288135594, "train_speed(iter/s)": 0.672642 }, { "epoch": 2.8786255944475387, "grad_norm": 5.336812973022461, "learning_rate": 3.8252328252848296e-05, "loss": 2.1675752639770507, "memory(GiB)": 72.85, "step": 67190, "token_acc": 0.49127906976744184, "train_speed(iter/s)": 0.672636 }, { "epoch": 2.8788398097767876, "grad_norm": 4.777942180633545, "learning_rate": 3.8245786975276996e-05, "loss": 2.165422058105469, "memory(GiB)": 72.85, "step": 67195, "token_acc": 0.5392857142857143, "train_speed(iter/s)": 0.672632 }, { "epoch": 2.8790540251060364, "grad_norm": 5.338001728057861, "learning_rate": 3.8239245910644296e-05, "loss": 2.2240535736083986, "memory(GiB)": 72.85, "step": 67200, "token_acc": 0.5087719298245614, "train_speed(iter/s)": 0.672626 }, { "epoch": 2.8792682404352856, "grad_norm": 4.970613479614258, "learning_rate": 3.8232705059068674e-05, "loss": 2.327829360961914, "memory(GiB)": 72.85, "step": 67205, "token_acc": 0.4866920152091255, "train_speed(iter/s)": 0.672629 }, { "epoch": 2.8794824557645344, "grad_norm": 5.083037853240967, "learning_rate": 3.822616442066865e-05, "loss": 2.109747123718262, "memory(GiB)": 72.85, "step": 67210, "token_acc": 0.49050632911392406, "train_speed(iter/s)": 0.672626 }, { "epoch": 2.8796966710937832, "grad_norm": 10.919990539550781, "learning_rate": 3.8219623995562694e-05, "loss": 2.3034507751464846, "memory(GiB)": 72.85, "step": 67215, "token_acc": 0.4979919678714859, "train_speed(iter/s)": 0.672635 }, { "epoch": 2.8799108864230325, "grad_norm": 5.697817802429199, "learning_rate": 3.821308378386931e-05, "loss": 2.002923583984375, "memory(GiB)": 72.85, "step": 67220, "token_acc": 0.5485232067510548, "train_speed(iter/s)": 0.672633 }, { "epoch": 2.8801251017522813, "grad_norm": 4.448006629943848, "learning_rate": 3.820654378570697e-05, "loss": 2.3354482650756836, "memory(GiB)": 72.85, "step": 67225, "token_acc": 0.4837758112094395, "train_speed(iter/s)": 0.672639 }, { "epoch": 2.88033931708153, "grad_norm": 5.294830322265625, "learning_rate": 3.820000400119414e-05, "loss": 2.149026107788086, "memory(GiB)": 72.85, "step": 67230, "token_acc": 0.5300353356890459, "train_speed(iter/s)": 0.672646 }, { "epoch": 2.8805535324107794, "grad_norm": 5.8129191398620605, "learning_rate": 3.819346443044932e-05, "loss": 2.2126323699951174, "memory(GiB)": 72.85, "step": 67235, "token_acc": 0.5018315018315018, "train_speed(iter/s)": 0.67264 }, { "epoch": 2.880767747740028, "grad_norm": 4.071712970733643, "learning_rate": 3.818692507359096e-05, "loss": 2.2038503646850587, "memory(GiB)": 72.85, "step": 67240, "token_acc": 0.5381679389312977, "train_speed(iter/s)": 0.672632 }, { "epoch": 2.880981963069277, "grad_norm": 3.9051332473754883, "learning_rate": 3.818038593073753e-05, "loss": 2.2482208251953124, "memory(GiB)": 72.85, "step": 67245, "token_acc": 0.49411764705882355, "train_speed(iter/s)": 0.67263 }, { "epoch": 2.8811961783985263, "grad_norm": 5.650581359863281, "learning_rate": 3.817384700200751e-05, "loss": 1.9682844161987305, "memory(GiB)": 72.85, "step": 67250, "token_acc": 0.5714285714285714, "train_speed(iter/s)": 0.672607 }, { "epoch": 2.881410393727775, "grad_norm": 4.9359049797058105, "learning_rate": 3.816730828751933e-05, "loss": 2.4801395416259764, "memory(GiB)": 72.85, "step": 67255, "token_acc": 0.47157190635451507, "train_speed(iter/s)": 0.672604 }, { "epoch": 2.881624609057024, "grad_norm": 4.57049560546875, "learning_rate": 3.816076978739147e-05, "loss": 2.3034439086914062, "memory(GiB)": 72.85, "step": 67260, "token_acc": 0.5096153846153846, "train_speed(iter/s)": 0.672596 }, { "epoch": 2.881838824386273, "grad_norm": 7.000427722930908, "learning_rate": 3.815423150174236e-05, "loss": 2.309239959716797, "memory(GiB)": 72.85, "step": 67265, "token_acc": 0.5251798561151079, "train_speed(iter/s)": 0.672597 }, { "epoch": 2.882053039715522, "grad_norm": 4.919317245483398, "learning_rate": 3.8147693430690465e-05, "loss": 2.008388328552246, "memory(GiB)": 72.85, "step": 67270, "token_acc": 0.5559701492537313, "train_speed(iter/s)": 0.6726 }, { "epoch": 2.8822672550447708, "grad_norm": 6.720444679260254, "learning_rate": 3.8141155574354214e-05, "loss": 2.4543025970458983, "memory(GiB)": 72.85, "step": 67275, "token_acc": 0.48299319727891155, "train_speed(iter/s)": 0.672604 }, { "epoch": 2.88248147037402, "grad_norm": 4.933783531188965, "learning_rate": 3.813461793285207e-05, "loss": 2.493115234375, "memory(GiB)": 72.85, "step": 67280, "token_acc": 0.47368421052631576, "train_speed(iter/s)": 0.67261 }, { "epoch": 2.882695685703269, "grad_norm": 7.094781875610352, "learning_rate": 3.8128080506302455e-05, "loss": 2.1331939697265625, "memory(GiB)": 72.85, "step": 67285, "token_acc": 0.541958041958042, "train_speed(iter/s)": 0.672597 }, { "epoch": 2.8829099010325177, "grad_norm": 4.379166603088379, "learning_rate": 3.812154329482378e-05, "loss": 2.098763084411621, "memory(GiB)": 72.85, "step": 67290, "token_acc": 0.5316901408450704, "train_speed(iter/s)": 0.672602 }, { "epoch": 2.883124116361767, "grad_norm": 4.448056697845459, "learning_rate": 3.8115006298534506e-05, "loss": 2.4141819000244142, "memory(GiB)": 72.85, "step": 67295, "token_acc": 0.546875, "train_speed(iter/s)": 0.672612 }, { "epoch": 2.8833383316910157, "grad_norm": 3.7361748218536377, "learning_rate": 3.8108469517553046e-05, "loss": 2.2607952117919923, "memory(GiB)": 72.85, "step": 67300, "token_acc": 0.5120274914089347, "train_speed(iter/s)": 0.67261 }, { "epoch": 2.8835525470202645, "grad_norm": 5.536273956298828, "learning_rate": 3.81019329519978e-05, "loss": 2.3510244369506834, "memory(GiB)": 72.85, "step": 67305, "token_acc": 0.5204460966542751, "train_speed(iter/s)": 0.672609 }, { "epoch": 2.883766762349514, "grad_norm": 4.297797203063965, "learning_rate": 3.809539660198721e-05, "loss": 2.1626121520996096, "memory(GiB)": 72.85, "step": 67310, "token_acc": 0.5261437908496732, "train_speed(iter/s)": 0.672613 }, { "epoch": 2.8839809776787626, "grad_norm": 4.121703147888184, "learning_rate": 3.808886046763967e-05, "loss": 2.1933872222900392, "memory(GiB)": 72.85, "step": 67315, "token_acc": 0.5050505050505051, "train_speed(iter/s)": 0.672623 }, { "epoch": 2.8841951930080114, "grad_norm": 5.870772838592529, "learning_rate": 3.80823245490736e-05, "loss": 2.1140625, "memory(GiB)": 72.85, "step": 67320, "token_acc": 0.5478927203065134, "train_speed(iter/s)": 0.672631 }, { "epoch": 2.8844094083372607, "grad_norm": 5.653231620788574, "learning_rate": 3.807578884640741e-05, "loss": 2.143195152282715, "memory(GiB)": 72.85, "step": 67325, "token_acc": 0.5055350553505535, "train_speed(iter/s)": 0.672627 }, { "epoch": 2.8846236236665095, "grad_norm": 5.2142109870910645, "learning_rate": 3.806925335975948e-05, "loss": 2.0850454330444337, "memory(GiB)": 72.85, "step": 67330, "token_acc": 0.5108695652173914, "train_speed(iter/s)": 0.672627 }, { "epoch": 2.8848378389957583, "grad_norm": 5.046900272369385, "learning_rate": 3.806271808924822e-05, "loss": 2.2516925811767576, "memory(GiB)": 72.85, "step": 67335, "token_acc": 0.49480968858131485, "train_speed(iter/s)": 0.672635 }, { "epoch": 2.8850520543250076, "grad_norm": 4.315749168395996, "learning_rate": 3.805618303499201e-05, "loss": 2.4605857849121096, "memory(GiB)": 72.85, "step": 67340, "token_acc": 0.49107142857142855, "train_speed(iter/s)": 0.672631 }, { "epoch": 2.8852662696542564, "grad_norm": 4.8968729972839355, "learning_rate": 3.804964819710926e-05, "loss": 2.267852020263672, "memory(GiB)": 72.85, "step": 67345, "token_acc": 0.5, "train_speed(iter/s)": 0.67263 }, { "epoch": 2.885480484983505, "grad_norm": 5.261877536773682, "learning_rate": 3.804311357571834e-05, "loss": 2.0929798126220702, "memory(GiB)": 72.85, "step": 67350, "token_acc": 0.5261044176706827, "train_speed(iter/s)": 0.672621 }, { "epoch": 2.8856947003127544, "grad_norm": 6.413607120513916, "learning_rate": 3.8036579170937636e-05, "loss": 2.4439334869384766, "memory(GiB)": 72.85, "step": 67355, "token_acc": 0.5298245614035088, "train_speed(iter/s)": 0.672624 }, { "epoch": 2.8859089156420032, "grad_norm": 6.6018877029418945, "learning_rate": 3.803004498288551e-05, "loss": 2.4033929824829103, "memory(GiB)": 72.85, "step": 67360, "token_acc": 0.4612903225806452, "train_speed(iter/s)": 0.672635 }, { "epoch": 2.886123130971252, "grad_norm": 5.383670330047607, "learning_rate": 3.802351101168038e-05, "loss": 2.2579929351806642, "memory(GiB)": 72.85, "step": 67365, "token_acc": 0.5222929936305732, "train_speed(iter/s)": 0.672642 }, { "epoch": 2.8863373463005013, "grad_norm": 5.166397571563721, "learning_rate": 3.801697725744056e-05, "loss": 2.512729454040527, "memory(GiB)": 72.85, "step": 67370, "token_acc": 0.49825783972125437, "train_speed(iter/s)": 0.672641 }, { "epoch": 2.88655156162975, "grad_norm": 5.634330749511719, "learning_rate": 3.801044372028443e-05, "loss": 2.4630014419555666, "memory(GiB)": 72.85, "step": 67375, "token_acc": 0.49666666666666665, "train_speed(iter/s)": 0.67265 }, { "epoch": 2.886765776958999, "grad_norm": 5.8359880447387695, "learning_rate": 3.800391040033038e-05, "loss": 2.170125961303711, "memory(GiB)": 72.85, "step": 67380, "token_acc": 0.5267175572519084, "train_speed(iter/s)": 0.672648 }, { "epoch": 2.886979992288248, "grad_norm": 5.700636386871338, "learning_rate": 3.799737729769674e-05, "loss": 2.322741889953613, "memory(GiB)": 72.85, "step": 67385, "token_acc": 0.5126582278481012, "train_speed(iter/s)": 0.672635 }, { "epoch": 2.887194207617497, "grad_norm": 6.058968544006348, "learning_rate": 3.799084441250186e-05, "loss": 2.2775152206420897, "memory(GiB)": 72.85, "step": 67390, "token_acc": 0.5046728971962616, "train_speed(iter/s)": 0.67264 }, { "epoch": 2.887408422946746, "grad_norm": 4.9965901374816895, "learning_rate": 3.798431174486411e-05, "loss": 2.0590381622314453, "memory(GiB)": 72.85, "step": 67395, "token_acc": 0.5510204081632653, "train_speed(iter/s)": 0.672642 }, { "epoch": 2.887622638275995, "grad_norm": 5.135261535644531, "learning_rate": 3.7977779294901815e-05, "loss": 2.172952079772949, "memory(GiB)": 72.85, "step": 67400, "token_acc": 0.48046875, "train_speed(iter/s)": 0.672653 }, { "epoch": 2.887836853605244, "grad_norm": 5.321569919586182, "learning_rate": 3.7971247062733335e-05, "loss": 1.9936342239379883, "memory(GiB)": 72.85, "step": 67405, "token_acc": 0.541958041958042, "train_speed(iter/s)": 0.672651 }, { "epoch": 2.8880510689344927, "grad_norm": 6.070120334625244, "learning_rate": 3.7964715048477004e-05, "loss": 2.6316173553466795, "memory(GiB)": 72.85, "step": 67410, "token_acc": 0.48659003831417624, "train_speed(iter/s)": 0.672638 }, { "epoch": 2.888265284263742, "grad_norm": 4.572012424468994, "learning_rate": 3.795818325225112e-05, "loss": 2.719820022583008, "memory(GiB)": 72.85, "step": 67415, "token_acc": 0.47416413373860183, "train_speed(iter/s)": 0.672652 }, { "epoch": 2.8884794995929908, "grad_norm": 6.040840148925781, "learning_rate": 3.795165167417405e-05, "loss": 2.4516544342041016, "memory(GiB)": 72.85, "step": 67420, "token_acc": 0.4769736842105263, "train_speed(iter/s)": 0.67265 }, { "epoch": 2.8886937149222396, "grad_norm": 4.253721714019775, "learning_rate": 3.7945120314364134e-05, "loss": 2.3059223175048826, "memory(GiB)": 72.85, "step": 67425, "token_acc": 0.48936170212765956, "train_speed(iter/s)": 0.672652 }, { "epoch": 2.888907930251489, "grad_norm": 4.836444854736328, "learning_rate": 3.7938589172939665e-05, "loss": 2.2202884674072267, "memory(GiB)": 72.85, "step": 67430, "token_acc": 0.5055350553505535, "train_speed(iter/s)": 0.672662 }, { "epoch": 2.8891221455807377, "grad_norm": 4.619389057159424, "learning_rate": 3.793205825001896e-05, "loss": 2.105497360229492, "memory(GiB)": 72.85, "step": 67435, "token_acc": 0.5613382899628253, "train_speed(iter/s)": 0.672665 }, { "epoch": 2.8893363609099865, "grad_norm": 4.9450812339782715, "learning_rate": 3.792552754572035e-05, "loss": 2.4874383926391603, "memory(GiB)": 72.85, "step": 67440, "token_acc": 0.4900662251655629, "train_speed(iter/s)": 0.672678 }, { "epoch": 2.8895505762392357, "grad_norm": 3.722170352935791, "learning_rate": 3.7918997060162134e-05, "loss": 1.7564483642578126, "memory(GiB)": 72.85, "step": 67445, "token_acc": 0.6016260162601627, "train_speed(iter/s)": 0.672692 }, { "epoch": 2.8897647915684845, "grad_norm": 4.563932418823242, "learning_rate": 3.7912466793462606e-05, "loss": 2.1432727813720702, "memory(GiB)": 72.85, "step": 67450, "token_acc": 0.5507246376811594, "train_speed(iter/s)": 0.672678 }, { "epoch": 2.8899790068977333, "grad_norm": 5.213963031768799, "learning_rate": 3.79059367457401e-05, "loss": 2.460200309753418, "memory(GiB)": 72.85, "step": 67455, "token_acc": 0.47017543859649125, "train_speed(iter/s)": 0.672678 }, { "epoch": 2.8901932222269826, "grad_norm": 4.203242778778076, "learning_rate": 3.789940691711288e-05, "loss": 2.3080814361572264, "memory(GiB)": 72.85, "step": 67460, "token_acc": 0.5, "train_speed(iter/s)": 0.672675 }, { "epoch": 2.8904074375562314, "grad_norm": 6.500298023223877, "learning_rate": 3.789287730769927e-05, "loss": 2.098206901550293, "memory(GiB)": 72.85, "step": 67465, "token_acc": 0.5665529010238908, "train_speed(iter/s)": 0.672679 }, { "epoch": 2.8906216528854802, "grad_norm": 4.021356582641602, "learning_rate": 3.7886347917617536e-05, "loss": 2.1247737884521483, "memory(GiB)": 72.85, "step": 67470, "token_acc": 0.5511551155115512, "train_speed(iter/s)": 0.672695 }, { "epoch": 2.8908358682147295, "grad_norm": 5.419811248779297, "learning_rate": 3.787981874698597e-05, "loss": 2.233357238769531, "memory(GiB)": 72.85, "step": 67475, "token_acc": 0.5149253731343284, "train_speed(iter/s)": 0.672694 }, { "epoch": 2.8910500835439783, "grad_norm": 5.073150157928467, "learning_rate": 3.787328979592286e-05, "loss": 2.3746042251586914, "memory(GiB)": 72.85, "step": 67480, "token_acc": 0.5076335877862596, "train_speed(iter/s)": 0.672703 }, { "epoch": 2.891264298873227, "grad_norm": 5.341644287109375, "learning_rate": 3.7866761064546494e-05, "loss": 2.4041297912597654, "memory(GiB)": 72.85, "step": 67485, "token_acc": 0.4863013698630137, "train_speed(iter/s)": 0.672705 }, { "epoch": 2.8914785142024764, "grad_norm": 3.954209089279175, "learning_rate": 3.78602325529751e-05, "loss": 2.123691940307617, "memory(GiB)": 72.85, "step": 67490, "token_acc": 0.5531914893617021, "train_speed(iter/s)": 0.672713 }, { "epoch": 2.891692729531725, "grad_norm": 4.594150543212891, "learning_rate": 3.7853704261326995e-05, "loss": 2.3642704010009767, "memory(GiB)": 72.85, "step": 67495, "token_acc": 0.43573667711598746, "train_speed(iter/s)": 0.672713 }, { "epoch": 2.891906944860974, "grad_norm": 7.219204425811768, "learning_rate": 3.784717618972044e-05, "loss": 2.3123590469360353, "memory(GiB)": 72.85, "step": 67500, "token_acc": 0.4660493827160494, "train_speed(iter/s)": 0.672717 }, { "epoch": 2.891906944860974, "eval_loss": 2.1370861530303955, "eval_runtime": 16.06, "eval_samples_per_second": 6.227, "eval_steps_per_second": 6.227, "eval_token_acc": 0.4899425287356322, "step": 67500 }, { "epoch": 2.8921211601902233, "grad_norm": 4.741679668426514, "learning_rate": 3.784064833827368e-05, "loss": 2.1725467681884765, "memory(GiB)": 72.85, "step": 67505, "token_acc": 0.5, "train_speed(iter/s)": 0.672589 }, { "epoch": 2.892335375519472, "grad_norm": 4.569614887237549, "learning_rate": 3.7834120707105e-05, "loss": 2.1404487609863283, "memory(GiB)": 72.85, "step": 67510, "token_acc": 0.5531914893617021, "train_speed(iter/s)": 0.672593 }, { "epoch": 2.892549590848721, "grad_norm": 5.427907466888428, "learning_rate": 3.7827593296332624e-05, "loss": 2.510986328125, "memory(GiB)": 72.85, "step": 67515, "token_acc": 0.45714285714285713, "train_speed(iter/s)": 0.672597 }, { "epoch": 2.89276380617797, "grad_norm": 4.660200119018555, "learning_rate": 3.7821066106074807e-05, "loss": 2.3903316497802733, "memory(GiB)": 72.85, "step": 67520, "token_acc": 0.49110320284697506, "train_speed(iter/s)": 0.672603 }, { "epoch": 2.892978021507219, "grad_norm": 4.511118412017822, "learning_rate": 3.781453913644982e-05, "loss": 2.325713348388672, "memory(GiB)": 72.85, "step": 67525, "token_acc": 0.4930555555555556, "train_speed(iter/s)": 0.67259 }, { "epoch": 2.8931922368364678, "grad_norm": 3.798933506011963, "learning_rate": 3.780801238757588e-05, "loss": 2.188010406494141, "memory(GiB)": 72.85, "step": 67530, "token_acc": 0.49291784702549574, "train_speed(iter/s)": 0.672587 }, { "epoch": 2.893406452165717, "grad_norm": 3.93003249168396, "learning_rate": 3.780148585957122e-05, "loss": 2.223775863647461, "memory(GiB)": 72.85, "step": 67535, "token_acc": 0.5320754716981132, "train_speed(iter/s)": 0.672602 }, { "epoch": 2.893620667494966, "grad_norm": 5.1619791984558105, "learning_rate": 3.7794959552554104e-05, "loss": 1.8930835723876953, "memory(GiB)": 72.85, "step": 67540, "token_acc": 0.5902439024390244, "train_speed(iter/s)": 0.672613 }, { "epoch": 2.8938348828242146, "grad_norm": 3.5159072875976562, "learning_rate": 3.778843346664273e-05, "loss": 2.427535820007324, "memory(GiB)": 72.85, "step": 67545, "token_acc": 0.5053191489361702, "train_speed(iter/s)": 0.672614 }, { "epoch": 2.894049098153464, "grad_norm": 5.764401912689209, "learning_rate": 3.778190760195535e-05, "loss": 2.057868003845215, "memory(GiB)": 72.85, "step": 67550, "token_acc": 0.5395189003436426, "train_speed(iter/s)": 0.672601 }, { "epoch": 2.8942633134827127, "grad_norm": 4.311509132385254, "learning_rate": 3.777538195861018e-05, "loss": 1.9101211547851562, "memory(GiB)": 72.85, "step": 67555, "token_acc": 0.5787545787545788, "train_speed(iter/s)": 0.672598 }, { "epoch": 2.8944775288119615, "grad_norm": 7.326721668243408, "learning_rate": 3.7768856536725405e-05, "loss": 2.064284324645996, "memory(GiB)": 72.85, "step": 67560, "token_acc": 0.5195729537366548, "train_speed(iter/s)": 0.672593 }, { "epoch": 2.8946917441412108, "grad_norm": 6.422009468078613, "learning_rate": 3.776233133641928e-05, "loss": 2.168759346008301, "memory(GiB)": 72.85, "step": 67565, "token_acc": 0.5308641975308642, "train_speed(iter/s)": 0.67258 }, { "epoch": 2.8949059594704596, "grad_norm": 4.962710857391357, "learning_rate": 3.775580635781002e-05, "loss": 2.3046590805053713, "memory(GiB)": 72.85, "step": 67570, "token_acc": 0.5202702702702703, "train_speed(iter/s)": 0.672593 }, { "epoch": 2.8951201747997084, "grad_norm": 6.058051586151123, "learning_rate": 3.774928160101581e-05, "loss": 2.114402198791504, "memory(GiB)": 72.85, "step": 67575, "token_acc": 0.5244755244755245, "train_speed(iter/s)": 0.672586 }, { "epoch": 2.8953343901289577, "grad_norm": 5.704201698303223, "learning_rate": 3.774275706615484e-05, "loss": 2.105500411987305, "memory(GiB)": 72.85, "step": 67580, "token_acc": 0.528957528957529, "train_speed(iter/s)": 0.6726 }, { "epoch": 2.8955486054582065, "grad_norm": 4.338428020477295, "learning_rate": 3.7736232753345344e-05, "loss": 2.1678050994873046, "memory(GiB)": 72.85, "step": 67585, "token_acc": 0.525974025974026, "train_speed(iter/s)": 0.672601 }, { "epoch": 2.8957628207874553, "grad_norm": 4.122265338897705, "learning_rate": 3.7729708662705473e-05, "loss": 2.337223243713379, "memory(GiB)": 72.85, "step": 67590, "token_acc": 0.5361216730038023, "train_speed(iter/s)": 0.672605 }, { "epoch": 2.8959770361167045, "grad_norm": 3.7307891845703125, "learning_rate": 3.772318479435346e-05, "loss": 1.987023162841797, "memory(GiB)": 72.85, "step": 67595, "token_acc": 0.5474683544303798, "train_speed(iter/s)": 0.672609 }, { "epoch": 2.8961912514459534, "grad_norm": 4.6032538414001465, "learning_rate": 3.7716661148407475e-05, "loss": 2.4418867111206053, "memory(GiB)": 72.85, "step": 67600, "token_acc": 0.4666666666666667, "train_speed(iter/s)": 0.672607 }, { "epoch": 2.896405466775202, "grad_norm": 6.635652542114258, "learning_rate": 3.7710137724985675e-05, "loss": 2.4919155120849608, "memory(GiB)": 72.85, "step": 67605, "token_acc": 0.5017543859649123, "train_speed(iter/s)": 0.672621 }, { "epoch": 2.8966196821044514, "grad_norm": 6.226921081542969, "learning_rate": 3.7703614524206276e-05, "loss": 2.3932661056518554, "memory(GiB)": 72.85, "step": 67610, "token_acc": 0.4812286689419795, "train_speed(iter/s)": 0.672623 }, { "epoch": 2.8968338974337002, "grad_norm": 4.460200786590576, "learning_rate": 3.769709154618743e-05, "loss": 2.3721029281616213, "memory(GiB)": 72.85, "step": 67615, "token_acc": 0.47147147147147145, "train_speed(iter/s)": 0.672626 }, { "epoch": 2.897048112762949, "grad_norm": 5.19549036026001, "learning_rate": 3.76905687910473e-05, "loss": 2.2948402404785155, "memory(GiB)": 72.85, "step": 67620, "token_acc": 0.4752475247524752, "train_speed(iter/s)": 0.672623 }, { "epoch": 2.8972623280921983, "grad_norm": 5.333555698394775, "learning_rate": 3.7684046258904083e-05, "loss": 2.42726993560791, "memory(GiB)": 72.85, "step": 67625, "token_acc": 0.4847457627118644, "train_speed(iter/s)": 0.672637 }, { "epoch": 2.897476543421447, "grad_norm": 4.774133682250977, "learning_rate": 3.7677523949875916e-05, "loss": 2.3657432556152345, "memory(GiB)": 72.85, "step": 67630, "token_acc": 0.5080645161290323, "train_speed(iter/s)": 0.672641 }, { "epoch": 2.897690758750696, "grad_norm": 5.044535160064697, "learning_rate": 3.767100186408094e-05, "loss": 2.614129829406738, "memory(GiB)": 72.85, "step": 67635, "token_acc": 0.44108761329305135, "train_speed(iter/s)": 0.672637 }, { "epoch": 2.897904974079945, "grad_norm": 5.067955493927002, "learning_rate": 3.7664480001637345e-05, "loss": 2.1111770629882813, "memory(GiB)": 72.85, "step": 67640, "token_acc": 0.5083056478405316, "train_speed(iter/s)": 0.672639 }, { "epoch": 2.898119189409194, "grad_norm": 4.776112079620361, "learning_rate": 3.765795836266327e-05, "loss": 2.2857112884521484, "memory(GiB)": 72.85, "step": 67645, "token_acc": 0.5154639175257731, "train_speed(iter/s)": 0.672635 }, { "epoch": 2.898333404738443, "grad_norm": 4.471993923187256, "learning_rate": 3.765143694727685e-05, "loss": 2.3836984634399414, "memory(GiB)": 72.85, "step": 67650, "token_acc": 0.5081967213114754, "train_speed(iter/s)": 0.672634 }, { "epoch": 2.898547620067692, "grad_norm": 5.017747402191162, "learning_rate": 3.764491575559625e-05, "loss": 2.326129913330078, "memory(GiB)": 72.85, "step": 67655, "token_acc": 0.4964788732394366, "train_speed(iter/s)": 0.672631 }, { "epoch": 2.898761835396941, "grad_norm": 4.650403022766113, "learning_rate": 3.7639698963399336e-05, "loss": 2.171596908569336, "memory(GiB)": 72.85, "step": 67660, "token_acc": 0.46296296296296297, "train_speed(iter/s)": 0.672636 }, { "epoch": 2.8989760507261897, "grad_norm": 3.857436418533325, "learning_rate": 3.763317817468688e-05, "loss": 2.038702392578125, "memory(GiB)": 72.85, "step": 67665, "token_acc": 0.5140845070422535, "train_speed(iter/s)": 0.672636 }, { "epoch": 2.899190266055439, "grad_norm": 3.9320757389068604, "learning_rate": 3.7626657610010996e-05, "loss": 2.1762771606445312, "memory(GiB)": 72.85, "step": 67670, "token_acc": 0.5400696864111498, "train_speed(iter/s)": 0.672627 }, { "epoch": 2.8994044813846878, "grad_norm": 3.6492598056793213, "learning_rate": 3.762013726948982e-05, "loss": 2.0171857833862306, "memory(GiB)": 72.85, "step": 67675, "token_acc": 0.5522875816993464, "train_speed(iter/s)": 0.67262 }, { "epoch": 2.8996186967139366, "grad_norm": 3.7531888484954834, "learning_rate": 3.7613617153241455e-05, "loss": 2.3570871353149414, "memory(GiB)": 72.85, "step": 67680, "token_acc": 0.4857142857142857, "train_speed(iter/s)": 0.672608 }, { "epoch": 2.899832912043186, "grad_norm": 4.567779064178467, "learning_rate": 3.7607097261384054e-05, "loss": 2.2721321105957033, "memory(GiB)": 72.85, "step": 67685, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.672617 }, { "epoch": 2.9000471273724346, "grad_norm": 4.3945417404174805, "learning_rate": 3.76005775940357e-05, "loss": 1.887613296508789, "memory(GiB)": 72.85, "step": 67690, "token_acc": 0.5521739130434783, "train_speed(iter/s)": 0.672625 }, { "epoch": 2.900261342701684, "grad_norm": 5.335206985473633, "learning_rate": 3.7594058151314526e-05, "loss": 2.0379732131958006, "memory(GiB)": 72.85, "step": 67695, "token_acc": 0.5444839857651246, "train_speed(iter/s)": 0.672623 }, { "epoch": 2.9004755580309327, "grad_norm": 5.607467174530029, "learning_rate": 3.758753893333863e-05, "loss": 1.959050750732422, "memory(GiB)": 72.85, "step": 67700, "token_acc": 0.5551020408163265, "train_speed(iter/s)": 0.672619 }, { "epoch": 2.9006897733601815, "grad_norm": 6.700648784637451, "learning_rate": 3.75810199402261e-05, "loss": 2.4442909240722654, "memory(GiB)": 72.85, "step": 67705, "token_acc": 0.46303501945525294, "train_speed(iter/s)": 0.672605 }, { "epoch": 2.900903988689431, "grad_norm": 8.795781135559082, "learning_rate": 3.7574501172095045e-05, "loss": 2.1327890396118163, "memory(GiB)": 72.85, "step": 67710, "token_acc": 0.5376344086021505, "train_speed(iter/s)": 0.672621 }, { "epoch": 2.9011182040186796, "grad_norm": 4.023645877838135, "learning_rate": 3.7567982629063554e-05, "loss": 2.0125118255615235, "memory(GiB)": 72.85, "step": 67715, "token_acc": 0.5503597122302158, "train_speed(iter/s)": 0.672619 }, { "epoch": 2.9013324193479284, "grad_norm": 4.459312915802002, "learning_rate": 3.756146431124973e-05, "loss": 1.9282081604003907, "memory(GiB)": 72.85, "step": 67720, "token_acc": 0.5900383141762452, "train_speed(iter/s)": 0.672628 }, { "epoch": 2.9015466346771777, "grad_norm": 5.3154120445251465, "learning_rate": 3.755494621877165e-05, "loss": 2.0189666748046875, "memory(GiB)": 72.85, "step": 67725, "token_acc": 0.5166666666666667, "train_speed(iter/s)": 0.672634 }, { "epoch": 2.9017608500064265, "grad_norm": 4.562631130218506, "learning_rate": 3.7548428351747377e-05, "loss": 2.3070957183837892, "memory(GiB)": 72.85, "step": 67730, "token_acc": 0.5126353790613718, "train_speed(iter/s)": 0.672644 }, { "epoch": 2.9019750653356753, "grad_norm": 4.149303436279297, "learning_rate": 3.754191071029502e-05, "loss": 2.385285568237305, "memory(GiB)": 72.85, "step": 67735, "token_acc": 0.4869281045751634, "train_speed(iter/s)": 0.672645 }, { "epoch": 2.9021892806649245, "grad_norm": 9.57109260559082, "learning_rate": 3.7535393294532635e-05, "loss": 2.3116313934326174, "memory(GiB)": 72.85, "step": 67740, "token_acc": 0.4955357142857143, "train_speed(iter/s)": 0.672653 }, { "epoch": 2.9024034959941734, "grad_norm": 5.862455368041992, "learning_rate": 3.752887610457828e-05, "loss": 2.220896339416504, "memory(GiB)": 72.85, "step": 67745, "token_acc": 0.532608695652174, "train_speed(iter/s)": 0.672655 }, { "epoch": 2.902617711323422, "grad_norm": 6.596020698547363, "learning_rate": 3.7522359140550054e-05, "loss": 2.4165245056152345, "memory(GiB)": 72.85, "step": 67750, "token_acc": 0.49673202614379086, "train_speed(iter/s)": 0.672665 }, { "epoch": 2.9028319266526714, "grad_norm": 4.283695220947266, "learning_rate": 3.7515842402565967e-05, "loss": 2.1797380447387695, "memory(GiB)": 72.85, "step": 67755, "token_acc": 0.545751633986928, "train_speed(iter/s)": 0.672667 }, { "epoch": 2.9030461419819202, "grad_norm": 4.458949089050293, "learning_rate": 3.750932589074413e-05, "loss": 2.3022422790527344, "memory(GiB)": 72.85, "step": 67760, "token_acc": 0.4866666666666667, "train_speed(iter/s)": 0.672671 }, { "epoch": 2.903260357311169, "grad_norm": 4.681413650512695, "learning_rate": 3.750280960520256e-05, "loss": 2.345174217224121, "memory(GiB)": 72.85, "step": 67765, "token_acc": 0.4786885245901639, "train_speed(iter/s)": 0.672681 }, { "epoch": 2.9034745726404183, "grad_norm": 4.913506984710693, "learning_rate": 3.749629354605932e-05, "loss": 2.0852424621582033, "memory(GiB)": 72.85, "step": 67770, "token_acc": 0.5055350553505535, "train_speed(iter/s)": 0.672685 }, { "epoch": 2.903688787969667, "grad_norm": 6.360473155975342, "learning_rate": 3.748977771343246e-05, "loss": 1.9799306869506836, "memory(GiB)": 72.85, "step": 67775, "token_acc": 0.5404255319148936, "train_speed(iter/s)": 0.67269 }, { "epoch": 2.903903003298916, "grad_norm": 3.7695372104644775, "learning_rate": 3.748326210744001e-05, "loss": 2.172661781311035, "memory(GiB)": 72.85, "step": 67780, "token_acc": 0.5218978102189781, "train_speed(iter/s)": 0.672678 }, { "epoch": 2.904117218628165, "grad_norm": 6.414247035980225, "learning_rate": 3.747674672820001e-05, "loss": 2.395595169067383, "memory(GiB)": 72.85, "step": 67785, "token_acc": 0.5147058823529411, "train_speed(iter/s)": 0.672676 }, { "epoch": 2.904331433957414, "grad_norm": 4.029166221618652, "learning_rate": 3.7470231575830484e-05, "loss": 2.225167655944824, "memory(GiB)": 72.85, "step": 67790, "token_acc": 0.5212121212121212, "train_speed(iter/s)": 0.672681 }, { "epoch": 2.904545649286663, "grad_norm": 4.049565315246582, "learning_rate": 3.746371665044948e-05, "loss": 2.5352619171142576, "memory(GiB)": 72.85, "step": 67795, "token_acc": 0.4878048780487805, "train_speed(iter/s)": 0.672686 }, { "epoch": 2.904759864615912, "grad_norm": 4.216047286987305, "learning_rate": 3.7457201952175e-05, "loss": 2.0588064193725586, "memory(GiB)": 72.85, "step": 67800, "token_acc": 0.5135135135135135, "train_speed(iter/s)": 0.672691 }, { "epoch": 2.904974079945161, "grad_norm": 5.47633695602417, "learning_rate": 3.745068748112506e-05, "loss": 2.5044013977050783, "memory(GiB)": 72.85, "step": 67805, "token_acc": 0.5121107266435986, "train_speed(iter/s)": 0.672694 }, { "epoch": 2.9051882952744097, "grad_norm": 5.386184215545654, "learning_rate": 3.744417323741771e-05, "loss": 2.1697240829467774, "memory(GiB)": 72.85, "step": 67810, "token_acc": 0.497737556561086, "train_speed(iter/s)": 0.672692 }, { "epoch": 2.905402510603659, "grad_norm": 3.967008352279663, "learning_rate": 3.743765922117092e-05, "loss": 2.3930282592773438, "memory(GiB)": 72.85, "step": 67815, "token_acc": 0.48013245033112584, "train_speed(iter/s)": 0.6727 }, { "epoch": 2.9056167259329078, "grad_norm": 6.13978385925293, "learning_rate": 3.743114543250273e-05, "loss": 2.1618270874023438, "memory(GiB)": 72.85, "step": 67820, "token_acc": 0.5358490566037736, "train_speed(iter/s)": 0.672705 }, { "epoch": 2.9058309412621566, "grad_norm": 5.842589855194092, "learning_rate": 3.742463187153114e-05, "loss": 2.7059341430664063, "memory(GiB)": 72.85, "step": 67825, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.672705 }, { "epoch": 2.906045156591406, "grad_norm": 5.716278076171875, "learning_rate": 3.741811853837412e-05, "loss": 2.255761909484863, "memory(GiB)": 72.85, "step": 67830, "token_acc": 0.5502008032128514, "train_speed(iter/s)": 0.672705 }, { "epoch": 2.9062593719206546, "grad_norm": 7.204436779022217, "learning_rate": 3.741160543314968e-05, "loss": 2.1924274444580076, "memory(GiB)": 72.85, "step": 67835, "token_acc": 0.5273311897106109, "train_speed(iter/s)": 0.672697 }, { "epoch": 2.9064735872499035, "grad_norm": 4.877670764923096, "learning_rate": 3.7405092555975835e-05, "loss": 2.1213539123535154, "memory(GiB)": 72.85, "step": 67840, "token_acc": 0.5528846153846154, "train_speed(iter/s)": 0.6727 }, { "epoch": 2.9066878025791527, "grad_norm": 3.999317169189453, "learning_rate": 3.739857990697056e-05, "loss": 2.3987428665161135, "memory(GiB)": 72.85, "step": 67845, "token_acc": 0.5031055900621118, "train_speed(iter/s)": 0.672695 }, { "epoch": 2.9069020179084015, "grad_norm": 4.292856216430664, "learning_rate": 3.739206748625181e-05, "loss": 2.4164424896240235, "memory(GiB)": 72.85, "step": 67850, "token_acc": 0.4495114006514658, "train_speed(iter/s)": 0.672693 }, { "epoch": 2.9071162332376503, "grad_norm": 6.282840251922607, "learning_rate": 3.7385555293937604e-05, "loss": 2.4097408294677733, "memory(GiB)": 72.85, "step": 67855, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.672707 }, { "epoch": 2.9073304485668996, "grad_norm": 5.8182549476623535, "learning_rate": 3.737904333014588e-05, "loss": 2.2392702102661133, "memory(GiB)": 72.85, "step": 67860, "token_acc": 0.5087108013937283, "train_speed(iter/s)": 0.672716 }, { "epoch": 2.9075446638961484, "grad_norm": 5.3558831214904785, "learning_rate": 3.7372531594994634e-05, "loss": 2.2397274017333983, "memory(GiB)": 72.85, "step": 67865, "token_acc": 0.5470383275261324, "train_speed(iter/s)": 0.672721 }, { "epoch": 2.907758879225397, "grad_norm": 4.810769081115723, "learning_rate": 3.736602008860183e-05, "loss": 2.4665876388549806, "memory(GiB)": 72.85, "step": 67870, "token_acc": 0.49169435215946844, "train_speed(iter/s)": 0.672728 }, { "epoch": 2.9079730945546465, "grad_norm": 3.8449208736419678, "learning_rate": 3.7359508811085407e-05, "loss": 2.1348197937011717, "memory(GiB)": 72.85, "step": 67875, "token_acc": 0.5534591194968553, "train_speed(iter/s)": 0.672724 }, { "epoch": 2.9081873098838953, "grad_norm": 5.874060153961182, "learning_rate": 3.735299776256336e-05, "loss": 2.3182699203491213, "memory(GiB)": 72.85, "step": 67880, "token_acc": 0.5116279069767442, "train_speed(iter/s)": 0.672721 }, { "epoch": 2.908401525213144, "grad_norm": 7.6752448081970215, "learning_rate": 3.7346486943153617e-05, "loss": 1.9385013580322266, "memory(GiB)": 72.85, "step": 67885, "token_acc": 0.5306859205776173, "train_speed(iter/s)": 0.672722 }, { "epoch": 2.9086157405423934, "grad_norm": 6.573065280914307, "learning_rate": 3.733997635297412e-05, "loss": 2.4651294708251954, "memory(GiB)": 72.85, "step": 67890, "token_acc": 0.512280701754386, "train_speed(iter/s)": 0.672726 }, { "epoch": 2.908829955871642, "grad_norm": 7.051931858062744, "learning_rate": 3.733346599214284e-05, "loss": 2.4945537567138674, "memory(GiB)": 72.85, "step": 67895, "token_acc": 0.4752475247524752, "train_speed(iter/s)": 0.672718 }, { "epoch": 2.909044171200891, "grad_norm": 4.377479553222656, "learning_rate": 3.73269558607777e-05, "loss": 2.3700750350952147, "memory(GiB)": 72.85, "step": 67900, "token_acc": 0.4906832298136646, "train_speed(iter/s)": 0.672713 }, { "epoch": 2.9092583865301402, "grad_norm": 6.164149761199951, "learning_rate": 3.7320445958996634e-05, "loss": 2.159802055358887, "memory(GiB)": 72.85, "step": 67905, "token_acc": 0.5232974910394266, "train_speed(iter/s)": 0.672721 }, { "epoch": 2.909472601859389, "grad_norm": 5.054595947265625, "learning_rate": 3.731393628691759e-05, "loss": 2.3596403121948244, "memory(GiB)": 72.85, "step": 67910, "token_acc": 0.4980694980694981, "train_speed(iter/s)": 0.672715 }, { "epoch": 2.909686817188638, "grad_norm": 4.281498908996582, "learning_rate": 3.73074268446585e-05, "loss": 2.1488908767700194, "memory(GiB)": 72.85, "step": 67915, "token_acc": 0.5708502024291497, "train_speed(iter/s)": 0.672713 }, { "epoch": 2.909901032517887, "grad_norm": 4.375678062438965, "learning_rate": 3.730091763233727e-05, "loss": 2.099007415771484, "memory(GiB)": 72.85, "step": 67920, "token_acc": 0.5016835016835017, "train_speed(iter/s)": 0.672708 }, { "epoch": 2.910115247847136, "grad_norm": 4.5657243728637695, "learning_rate": 3.7294408650071836e-05, "loss": 2.244467926025391, "memory(GiB)": 72.85, "step": 67925, "token_acc": 0.5098684210526315, "train_speed(iter/s)": 0.672715 }, { "epoch": 2.9103294631763847, "grad_norm": 3.5205187797546387, "learning_rate": 3.728789989798011e-05, "loss": 2.2266143798828124, "memory(GiB)": 72.85, "step": 67930, "token_acc": 0.54, "train_speed(iter/s)": 0.672716 }, { "epoch": 2.910543678505634, "grad_norm": 4.699172019958496, "learning_rate": 3.728139137617999e-05, "loss": 2.0774234771728515, "memory(GiB)": 72.85, "step": 67935, "token_acc": 0.5518518518518518, "train_speed(iter/s)": 0.672711 }, { "epoch": 2.910757893834883, "grad_norm": 4.476968765258789, "learning_rate": 3.727488308478941e-05, "loss": 2.4335844039916994, "memory(GiB)": 72.85, "step": 67940, "token_acc": 0.4623955431754875, "train_speed(iter/s)": 0.672711 }, { "epoch": 2.9109721091641316, "grad_norm": 6.480370044708252, "learning_rate": 3.7268375023926264e-05, "loss": 2.223103904724121, "memory(GiB)": 72.85, "step": 67945, "token_acc": 0.49809885931558934, "train_speed(iter/s)": 0.672706 }, { "epoch": 2.911186324493381, "grad_norm": 5.060086250305176, "learning_rate": 3.726186719370843e-05, "loss": 2.259468650817871, "memory(GiB)": 72.85, "step": 67950, "token_acc": 0.5016077170418006, "train_speed(iter/s)": 0.672713 }, { "epoch": 2.9114005398226297, "grad_norm": 4.129026412963867, "learning_rate": 3.725535959425384e-05, "loss": 2.074534606933594, "memory(GiB)": 72.85, "step": 67955, "token_acc": 0.5296296296296297, "train_speed(iter/s)": 0.672722 }, { "epoch": 2.9116147551518785, "grad_norm": 5.682302474975586, "learning_rate": 3.724885222568034e-05, "loss": 1.9657596588134765, "memory(GiB)": 72.85, "step": 67960, "token_acc": 0.5473251028806584, "train_speed(iter/s)": 0.672727 }, { "epoch": 2.9118289704811278, "grad_norm": 6.625432014465332, "learning_rate": 3.724234508810587e-05, "loss": 2.1593250274658202, "memory(GiB)": 72.85, "step": 67965, "token_acc": 0.49382716049382713, "train_speed(iter/s)": 0.672718 }, { "epoch": 2.9120431858103766, "grad_norm": 6.17551851272583, "learning_rate": 3.723583818164827e-05, "loss": 2.018306541442871, "memory(GiB)": 72.85, "step": 67970, "token_acc": 0.5772357723577236, "train_speed(iter/s)": 0.672716 }, { "epoch": 2.9122574011396254, "grad_norm": 4.658281326293945, "learning_rate": 3.722933150642544e-05, "loss": 2.200631523132324, "memory(GiB)": 72.85, "step": 67975, "token_acc": 0.49137931034482757, "train_speed(iter/s)": 0.672731 }, { "epoch": 2.9124716164688746, "grad_norm": 5.339991569519043, "learning_rate": 3.7222825062555234e-05, "loss": 2.1935882568359375, "memory(GiB)": 72.85, "step": 67980, "token_acc": 0.5429553264604811, "train_speed(iter/s)": 0.67274 }, { "epoch": 2.9126858317981235, "grad_norm": 6.551102161407471, "learning_rate": 3.7216318850155565e-05, "loss": 1.9586727142333984, "memory(GiB)": 72.85, "step": 67985, "token_acc": 0.5617529880478087, "train_speed(iter/s)": 0.672746 }, { "epoch": 2.9129000471273723, "grad_norm": 5.390726089477539, "learning_rate": 3.720981286934426e-05, "loss": 2.111723709106445, "memory(GiB)": 72.85, "step": 67990, "token_acc": 0.5163934426229508, "train_speed(iter/s)": 0.672742 }, { "epoch": 2.9131142624566215, "grad_norm": 5.174891948699951, "learning_rate": 3.720330712023919e-05, "loss": 2.3583974838256836, "memory(GiB)": 72.85, "step": 67995, "token_acc": 0.5279503105590062, "train_speed(iter/s)": 0.672742 }, { "epoch": 2.9133284777858703, "grad_norm": 4.583150863647461, "learning_rate": 3.719680160295823e-05, "loss": 2.4645347595214844, "memory(GiB)": 72.85, "step": 68000, "token_acc": 0.4845360824742268, "train_speed(iter/s)": 0.672745 }, { "epoch": 2.9133284777858703, "eval_loss": 2.0709450244903564, "eval_runtime": 15.7121, "eval_samples_per_second": 6.365, "eval_steps_per_second": 6.365, "eval_token_acc": 0.5095298602287166, "step": 68000 }, { "epoch": 2.913542693115119, "grad_norm": 5.155501842498779, "learning_rate": 3.71902963176192e-05, "loss": 2.131342887878418, "memory(GiB)": 72.85, "step": 68005, "token_acc": 0.5186602870813397, "train_speed(iter/s)": 0.672595 }, { "epoch": 2.9137569084443684, "grad_norm": 4.351185321807861, "learning_rate": 3.7183791264339986e-05, "loss": 2.069799041748047, "memory(GiB)": 72.85, "step": 68010, "token_acc": 0.535483870967742, "train_speed(iter/s)": 0.672599 }, { "epoch": 2.9139711237736172, "grad_norm": 4.733451843261719, "learning_rate": 3.7177286443238405e-05, "loss": 2.3425012588500977, "memory(GiB)": 72.85, "step": 68015, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.672602 }, { "epoch": 2.914185339102866, "grad_norm": 5.439971923828125, "learning_rate": 3.717078185443231e-05, "loss": 2.2011831283569334, "memory(GiB)": 72.85, "step": 68020, "token_acc": 0.5309090909090909, "train_speed(iter/s)": 0.672601 }, { "epoch": 2.9143995544321153, "grad_norm": 4.849987983703613, "learning_rate": 3.716427749803955e-05, "loss": 2.0553653717041014, "memory(GiB)": 72.85, "step": 68025, "token_acc": 0.5357142857142857, "train_speed(iter/s)": 0.672607 }, { "epoch": 2.914613769761364, "grad_norm": 4.292133808135986, "learning_rate": 3.715777337417793e-05, "loss": 2.14510555267334, "memory(GiB)": 72.85, "step": 68030, "token_acc": 0.5266666666666666, "train_speed(iter/s)": 0.672595 }, { "epoch": 2.9148279850906134, "grad_norm": 5.291376113891602, "learning_rate": 3.715126948296529e-05, "loss": 2.4529291152954102, "memory(GiB)": 72.85, "step": 68035, "token_acc": 0.5266903914590747, "train_speed(iter/s)": 0.672592 }, { "epoch": 2.915042200419862, "grad_norm": 4.615544319152832, "learning_rate": 3.7144765824519463e-05, "loss": 2.1189949035644533, "memory(GiB)": 72.85, "step": 68040, "token_acc": 0.5545171339563862, "train_speed(iter/s)": 0.672598 }, { "epoch": 2.915256415749111, "grad_norm": 5.091541290283203, "learning_rate": 3.713826239895826e-05, "loss": 2.3396728515625, "memory(GiB)": 72.85, "step": 68045, "token_acc": 0.490272373540856, "train_speed(iter/s)": 0.672598 }, { "epoch": 2.9154706310783602, "grad_norm": 4.636651515960693, "learning_rate": 3.713175920639948e-05, "loss": 2.237504005432129, "memory(GiB)": 72.85, "step": 68050, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.672587 }, { "epoch": 2.915684846407609, "grad_norm": 3.70780348777771, "learning_rate": 3.712525624696097e-05, "loss": 2.3679840087890627, "memory(GiB)": 72.85, "step": 68055, "token_acc": 0.4794520547945205, "train_speed(iter/s)": 0.672591 }, { "epoch": 2.915899061736858, "grad_norm": 4.679551124572754, "learning_rate": 3.711875352076053e-05, "loss": 2.3428253173828124, "memory(GiB)": 72.85, "step": 68060, "token_acc": 0.47019867549668876, "train_speed(iter/s)": 0.672606 }, { "epoch": 2.916113277066107, "grad_norm": 5.178483009338379, "learning_rate": 3.711225102791594e-05, "loss": 2.4857767105102537, "memory(GiB)": 72.85, "step": 68065, "token_acc": 0.4956268221574344, "train_speed(iter/s)": 0.672609 }, { "epoch": 2.916327492395356, "grad_norm": 3.8206002712249756, "learning_rate": 3.710574876854502e-05, "loss": 2.2287235260009766, "memory(GiB)": 72.85, "step": 68070, "token_acc": 0.49169435215946844, "train_speed(iter/s)": 0.672606 }, { "epoch": 2.9165417077246047, "grad_norm": 4.389073848724365, "learning_rate": 3.709924674276557e-05, "loss": 2.2186328887939455, "memory(GiB)": 72.85, "step": 68075, "token_acc": 0.521875, "train_speed(iter/s)": 0.672607 }, { "epoch": 2.916755923053854, "grad_norm": 4.18182897567749, "learning_rate": 3.7092744950695345e-05, "loss": 2.5239490509033202, "memory(GiB)": 72.85, "step": 68080, "token_acc": 0.4420731707317073, "train_speed(iter/s)": 0.672608 }, { "epoch": 2.916970138383103, "grad_norm": 3.5205941200256348, "learning_rate": 3.7086243392452165e-05, "loss": 2.0503087997436524, "memory(GiB)": 72.85, "step": 68085, "token_acc": 0.5348837209302325, "train_speed(iter/s)": 0.672602 }, { "epoch": 2.9171843537123516, "grad_norm": 5.432882308959961, "learning_rate": 3.70797420681538e-05, "loss": 2.351008987426758, "memory(GiB)": 72.85, "step": 68090, "token_acc": 0.47058823529411764, "train_speed(iter/s)": 0.672604 }, { "epoch": 2.917398569041601, "grad_norm": 3.96771240234375, "learning_rate": 3.707324097791802e-05, "loss": 2.333481216430664, "memory(GiB)": 72.85, "step": 68095, "token_acc": 0.4896142433234421, "train_speed(iter/s)": 0.67261 }, { "epoch": 2.9176127843708497, "grad_norm": 6.626757621765137, "learning_rate": 3.706674012186261e-05, "loss": 2.0873870849609375, "memory(GiB)": 72.85, "step": 68100, "token_acc": 0.5643939393939394, "train_speed(iter/s)": 0.672617 }, { "epoch": 2.9178269997000985, "grad_norm": 5.3062639236450195, "learning_rate": 3.706023950010533e-05, "loss": 2.0601707458496095, "memory(GiB)": 72.85, "step": 68105, "token_acc": 0.5174603174603175, "train_speed(iter/s)": 0.672618 }, { "epoch": 2.9180412150293478, "grad_norm": 5.310214042663574, "learning_rate": 3.7053739112763966e-05, "loss": 2.499184799194336, "memory(GiB)": 72.85, "step": 68110, "token_acc": 0.49063670411985016, "train_speed(iter/s)": 0.672624 }, { "epoch": 2.9182554303585966, "grad_norm": 5.222655296325684, "learning_rate": 3.704723895995625e-05, "loss": 2.4286830902099608, "memory(GiB)": 72.85, "step": 68115, "token_acc": 0.45674740484429066, "train_speed(iter/s)": 0.67264 }, { "epoch": 2.9184696456878454, "grad_norm": 4.556639194488525, "learning_rate": 3.7040739041799944e-05, "loss": 2.190491485595703, "memory(GiB)": 72.85, "step": 68120, "token_acc": 0.4891640866873065, "train_speed(iter/s)": 0.672636 }, { "epoch": 2.9186838610170946, "grad_norm": 5.904332160949707, "learning_rate": 3.70342393584128e-05, "loss": 1.9714794158935547, "memory(GiB)": 72.85, "step": 68125, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.672643 }, { "epoch": 2.9188980763463435, "grad_norm": 3.6994543075561523, "learning_rate": 3.70277399099126e-05, "loss": 2.327367973327637, "memory(GiB)": 72.85, "step": 68130, "token_acc": 0.5274390243902439, "train_speed(iter/s)": 0.672637 }, { "epoch": 2.9191122916755923, "grad_norm": 4.08326530456543, "learning_rate": 3.7021240696417045e-05, "loss": 2.313716506958008, "memory(GiB)": 72.85, "step": 68135, "token_acc": 0.5243055555555556, "train_speed(iter/s)": 0.672639 }, { "epoch": 2.9193265070048415, "grad_norm": 5.491308689117432, "learning_rate": 3.7014741718043885e-05, "loss": 2.339634323120117, "memory(GiB)": 72.85, "step": 68140, "token_acc": 0.48823529411764705, "train_speed(iter/s)": 0.672642 }, { "epoch": 2.9195407223340903, "grad_norm": 4.516792297363281, "learning_rate": 3.700824297491088e-05, "loss": 2.37908992767334, "memory(GiB)": 72.85, "step": 68145, "token_acc": 0.44668587896253603, "train_speed(iter/s)": 0.672646 }, { "epoch": 2.919754937663339, "grad_norm": 5.945704936981201, "learning_rate": 3.7001744467135725e-05, "loss": 2.1904314041137694, "memory(GiB)": 72.85, "step": 68150, "token_acc": 0.5323741007194245, "train_speed(iter/s)": 0.672642 }, { "epoch": 2.9199691529925884, "grad_norm": 4.6440887451171875, "learning_rate": 3.699524619483617e-05, "loss": 2.473878288269043, "memory(GiB)": 72.85, "step": 68155, "token_acc": 0.5067567567567568, "train_speed(iter/s)": 0.672649 }, { "epoch": 2.9201833683218372, "grad_norm": 4.4044365882873535, "learning_rate": 3.698874815812993e-05, "loss": 2.3374380111694335, "memory(GiB)": 72.85, "step": 68160, "token_acc": 0.45195729537366547, "train_speed(iter/s)": 0.672652 }, { "epoch": 2.920397583651086, "grad_norm": 4.600472450256348, "learning_rate": 3.6982250357134715e-05, "loss": 2.298040199279785, "memory(GiB)": 72.85, "step": 68165, "token_acc": 0.5239616613418531, "train_speed(iter/s)": 0.672651 }, { "epoch": 2.9206117989803353, "grad_norm": 4.547414302825928, "learning_rate": 3.697575279196825e-05, "loss": 2.2473386764526366, "memory(GiB)": 72.85, "step": 68170, "token_acc": 0.5052264808362369, "train_speed(iter/s)": 0.672649 }, { "epoch": 2.920826014309584, "grad_norm": 4.687139987945557, "learning_rate": 3.696925546274825e-05, "loss": 2.361260986328125, "memory(GiB)": 72.85, "step": 68175, "token_acc": 0.4886731391585761, "train_speed(iter/s)": 0.672648 }, { "epoch": 2.921040229638833, "grad_norm": 5.141767501831055, "learning_rate": 3.6962758369592394e-05, "loss": 2.230039596557617, "memory(GiB)": 72.85, "step": 68180, "token_acc": 0.5060975609756098, "train_speed(iter/s)": 0.672644 }, { "epoch": 2.921254444968082, "grad_norm": 6.69974422454834, "learning_rate": 3.6956261512618414e-05, "loss": 2.1899320602416994, "memory(GiB)": 72.85, "step": 68185, "token_acc": 0.47368421052631576, "train_speed(iter/s)": 0.672622 }, { "epoch": 2.921468660297331, "grad_norm": 4.461357116699219, "learning_rate": 3.694976489194398e-05, "loss": 2.05297966003418, "memory(GiB)": 72.85, "step": 68190, "token_acc": 0.5176056338028169, "train_speed(iter/s)": 0.672623 }, { "epoch": 2.92168287562658, "grad_norm": 5.019514083862305, "learning_rate": 3.694326850768681e-05, "loss": 2.456589698791504, "memory(GiB)": 72.85, "step": 68195, "token_acc": 0.46875, "train_speed(iter/s)": 0.672623 }, { "epoch": 2.921897090955829, "grad_norm": 5.324350833892822, "learning_rate": 3.693677235996455e-05, "loss": 1.931370162963867, "memory(GiB)": 72.85, "step": 68200, "token_acc": 0.5634920634920635, "train_speed(iter/s)": 0.672627 }, { "epoch": 2.922111306285078, "grad_norm": 4.085070610046387, "learning_rate": 3.6930276448894945e-05, "loss": 2.276102828979492, "memory(GiB)": 72.85, "step": 68205, "token_acc": 0.5136054421768708, "train_speed(iter/s)": 0.672621 }, { "epoch": 2.9223255216143267, "grad_norm": 5.143280029296875, "learning_rate": 3.692378077459563e-05, "loss": 2.3165056228637697, "memory(GiB)": 72.85, "step": 68210, "token_acc": 0.48355263157894735, "train_speed(iter/s)": 0.672609 }, { "epoch": 2.922539736943576, "grad_norm": 3.963212251663208, "learning_rate": 3.6917285337184306e-05, "loss": 2.187389945983887, "memory(GiB)": 72.85, "step": 68215, "token_acc": 0.512987012987013, "train_speed(iter/s)": 0.672612 }, { "epoch": 2.9227539522728248, "grad_norm": 5.101999282836914, "learning_rate": 3.6910790136778614e-05, "loss": 2.4587787628173827, "memory(GiB)": 72.85, "step": 68220, "token_acc": 0.49230769230769234, "train_speed(iter/s)": 0.672618 }, { "epoch": 2.9229681676020736, "grad_norm": 5.774129867553711, "learning_rate": 3.6904295173496245e-05, "loss": 2.301527404785156, "memory(GiB)": 72.85, "step": 68225, "token_acc": 0.49855907780979825, "train_speed(iter/s)": 0.672621 }, { "epoch": 2.923182382931323, "grad_norm": 5.001949310302734, "learning_rate": 3.6897800447454856e-05, "loss": 2.0663047790527345, "memory(GiB)": 72.85, "step": 68230, "token_acc": 0.5070422535211268, "train_speed(iter/s)": 0.672623 }, { "epoch": 2.9233965982605716, "grad_norm": 4.269180774688721, "learning_rate": 3.68913059587721e-05, "loss": 2.3910051345825196, "memory(GiB)": 72.85, "step": 68235, "token_acc": 0.4805194805194805, "train_speed(iter/s)": 0.672624 }, { "epoch": 2.9236108135898204, "grad_norm": 4.819035530090332, "learning_rate": 3.688481170756564e-05, "loss": 2.170750617980957, "memory(GiB)": 72.85, "step": 68240, "token_acc": 0.5386996904024768, "train_speed(iter/s)": 0.672625 }, { "epoch": 2.9238250289190697, "grad_norm": 7.217181205749512, "learning_rate": 3.687831769395311e-05, "loss": 2.1399473190307616, "memory(GiB)": 72.85, "step": 68245, "token_acc": 0.5652173913043478, "train_speed(iter/s)": 0.672622 }, { "epoch": 2.9240392442483185, "grad_norm": 5.150470733642578, "learning_rate": 3.6871823918052165e-05, "loss": 2.361770439147949, "memory(GiB)": 72.85, "step": 68250, "token_acc": 0.5225806451612903, "train_speed(iter/s)": 0.672629 }, { "epoch": 2.9242534595775673, "grad_norm": 5.354013442993164, "learning_rate": 3.686533037998045e-05, "loss": 2.4733943939208984, "memory(GiB)": 72.85, "step": 68255, "token_acc": 0.4805194805194805, "train_speed(iter/s)": 0.672621 }, { "epoch": 2.9244676749068166, "grad_norm": 7.648631572723389, "learning_rate": 3.68588370798556e-05, "loss": 2.383658599853516, "memory(GiB)": 72.85, "step": 68260, "token_acc": 0.48736462093862815, "train_speed(iter/s)": 0.672608 }, { "epoch": 2.9246818902360654, "grad_norm": 5.812209129333496, "learning_rate": 3.685234401779523e-05, "loss": 2.124114227294922, "memory(GiB)": 72.85, "step": 68265, "token_acc": 0.51985559566787, "train_speed(iter/s)": 0.672611 }, { "epoch": 2.924896105565314, "grad_norm": 5.247125148773193, "learning_rate": 3.684585119391697e-05, "loss": 2.258071708679199, "memory(GiB)": 72.85, "step": 68270, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.672611 }, { "epoch": 2.9251103208945635, "grad_norm": 4.336661338806152, "learning_rate": 3.683935860833848e-05, "loss": 2.047326850891113, "memory(GiB)": 72.85, "step": 68275, "token_acc": 0.544973544973545, "train_speed(iter/s)": 0.67261 }, { "epoch": 2.9253245362238123, "grad_norm": 5.712910175323486, "learning_rate": 3.683286626117735e-05, "loss": 2.1956945419311524, "memory(GiB)": 72.85, "step": 68280, "token_acc": 0.5096774193548387, "train_speed(iter/s)": 0.672614 }, { "epoch": 2.925538751553061, "grad_norm": 4.788073539733887, "learning_rate": 3.682637415255119e-05, "loss": 2.11734676361084, "memory(GiB)": 72.85, "step": 68285, "token_acc": 0.5472972972972973, "train_speed(iter/s)": 0.672616 }, { "epoch": 2.9257529668823103, "grad_norm": 4.940449237823486, "learning_rate": 3.681988228257763e-05, "loss": 2.22869987487793, "memory(GiB)": 72.85, "step": 68290, "token_acc": 0.48120300751879697, "train_speed(iter/s)": 0.672622 }, { "epoch": 2.925967182211559, "grad_norm": 4.3015851974487305, "learning_rate": 3.681339065137426e-05, "loss": 2.1543294906616213, "memory(GiB)": 72.85, "step": 68295, "token_acc": 0.532258064516129, "train_speed(iter/s)": 0.67263 }, { "epoch": 2.926181397540808, "grad_norm": 5.441272258758545, "learning_rate": 3.6806899259058703e-05, "loss": 2.5232444763183595, "memory(GiB)": 72.85, "step": 68300, "token_acc": 0.4742647058823529, "train_speed(iter/s)": 0.672619 }, { "epoch": 2.9263956128700572, "grad_norm": 5.389498710632324, "learning_rate": 3.680040810574853e-05, "loss": 2.230641174316406, "memory(GiB)": 72.85, "step": 68305, "token_acc": 0.5015974440894568, "train_speed(iter/s)": 0.672613 }, { "epoch": 2.926609828199306, "grad_norm": 6.186598777770996, "learning_rate": 3.6793917191561346e-05, "loss": 2.2918472290039062, "memory(GiB)": 72.85, "step": 68310, "token_acc": 0.4689655172413793, "train_speed(iter/s)": 0.672614 }, { "epoch": 2.926824043528555, "grad_norm": 5.858827114105225, "learning_rate": 3.678742651661474e-05, "loss": 2.5893798828125, "memory(GiB)": 72.85, "step": 68315, "token_acc": 0.4576271186440678, "train_speed(iter/s)": 0.672614 }, { "epoch": 2.927038258857804, "grad_norm": 4.781980037689209, "learning_rate": 3.678093608102631e-05, "loss": 2.4720026016235352, "memory(GiB)": 72.85, "step": 68320, "token_acc": 0.5, "train_speed(iter/s)": 0.672627 }, { "epoch": 2.927252474187053, "grad_norm": 5.647188663482666, "learning_rate": 3.6774445884913604e-05, "loss": 2.348761558532715, "memory(GiB)": 72.85, "step": 68325, "token_acc": 0.5068493150684932, "train_speed(iter/s)": 0.672623 }, { "epoch": 2.9274666895163017, "grad_norm": 3.3661582469940186, "learning_rate": 3.676795592839423e-05, "loss": 2.4499149322509766, "memory(GiB)": 72.85, "step": 68330, "token_acc": 0.49843260188087773, "train_speed(iter/s)": 0.672623 }, { "epoch": 2.927680904845551, "grad_norm": 4.109957695007324, "learning_rate": 3.676146621158574e-05, "loss": 2.0677446365356444, "memory(GiB)": 72.85, "step": 68335, "token_acc": 0.5193798449612403, "train_speed(iter/s)": 0.67262 }, { "epoch": 2.9278951201748, "grad_norm": 4.556361198425293, "learning_rate": 3.675497673460572e-05, "loss": 2.1642589569091797, "memory(GiB)": 72.85, "step": 68340, "token_acc": 0.5018050541516246, "train_speed(iter/s)": 0.672618 }, { "epoch": 2.9281093355040486, "grad_norm": 5.602635860443115, "learning_rate": 3.674848749757169e-05, "loss": 2.3143871307373045, "memory(GiB)": 72.85, "step": 68345, "token_acc": 0.45482866043613707, "train_speed(iter/s)": 0.67263 }, { "epoch": 2.928323550833298, "grad_norm": 7.234043121337891, "learning_rate": 3.674199850060127e-05, "loss": 2.2226633071899413, "memory(GiB)": 72.85, "step": 68350, "token_acc": 0.5058823529411764, "train_speed(iter/s)": 0.672623 }, { "epoch": 2.9285377661625467, "grad_norm": 4.941644191741943, "learning_rate": 3.6735509743811966e-05, "loss": 2.1358314514160157, "memory(GiB)": 72.85, "step": 68355, "token_acc": 0.5433070866141733, "train_speed(iter/s)": 0.672624 }, { "epoch": 2.9287519814917955, "grad_norm": 5.6829094886779785, "learning_rate": 3.672902122732137e-05, "loss": 2.2707141876220702, "memory(GiB)": 72.85, "step": 68360, "token_acc": 0.5251798561151079, "train_speed(iter/s)": 0.672624 }, { "epoch": 2.9289661968210448, "grad_norm": 4.5570292472839355, "learning_rate": 3.672253295124699e-05, "loss": 2.2653087615966796, "memory(GiB)": 72.85, "step": 68365, "token_acc": 0.5152671755725191, "train_speed(iter/s)": 0.672629 }, { "epoch": 2.9291804121502936, "grad_norm": 4.392472267150879, "learning_rate": 3.671604491570637e-05, "loss": 2.1142208099365236, "memory(GiB)": 72.85, "step": 68370, "token_acc": 0.5351170568561873, "train_speed(iter/s)": 0.672628 }, { "epoch": 2.9293946274795424, "grad_norm": 5.053548812866211, "learning_rate": 3.670955712081708e-05, "loss": 2.096821975708008, "memory(GiB)": 72.85, "step": 68375, "token_acc": 0.5328947368421053, "train_speed(iter/s)": 0.672615 }, { "epoch": 2.9296088428087916, "grad_norm": 5.8289642333984375, "learning_rate": 3.6703069566696625e-05, "loss": 2.30059700012207, "memory(GiB)": 72.85, "step": 68380, "token_acc": 0.48214285714285715, "train_speed(iter/s)": 0.672617 }, { "epoch": 2.9298230581380404, "grad_norm": 5.046953201293945, "learning_rate": 3.669658225346252e-05, "loss": 2.2245487213134765, "memory(GiB)": 72.85, "step": 68385, "token_acc": 0.5072992700729927, "train_speed(iter/s)": 0.672614 }, { "epoch": 2.9300372734672893, "grad_norm": 4.060428142547607, "learning_rate": 3.669009518123233e-05, "loss": 2.339279556274414, "memory(GiB)": 72.85, "step": 68390, "token_acc": 0.4969512195121951, "train_speed(iter/s)": 0.672609 }, { "epoch": 2.9302514887965385, "grad_norm": 5.515277862548828, "learning_rate": 3.668360835012353e-05, "loss": 2.4524322509765626, "memory(GiB)": 72.85, "step": 68395, "token_acc": 0.46785714285714286, "train_speed(iter/s)": 0.672609 }, { "epoch": 2.9304657041257873, "grad_norm": 4.810489654541016, "learning_rate": 3.6677121760253675e-05, "loss": 2.3209062576293946, "memory(GiB)": 72.85, "step": 68400, "token_acc": 0.5211267605633803, "train_speed(iter/s)": 0.672613 }, { "epoch": 2.930679919455036, "grad_norm": 5.24346399307251, "learning_rate": 3.667063541174026e-05, "loss": 2.4100725173950197, "memory(GiB)": 72.85, "step": 68405, "token_acc": 0.5125, "train_speed(iter/s)": 0.672614 }, { "epoch": 2.9308941347842854, "grad_norm": 5.310918807983398, "learning_rate": 3.666414930470077e-05, "loss": 2.1672557830810546, "memory(GiB)": 72.85, "step": 68410, "token_acc": 0.5324675324675324, "train_speed(iter/s)": 0.672624 }, { "epoch": 2.931108350113534, "grad_norm": 4.503053188323975, "learning_rate": 3.665766343925274e-05, "loss": 2.4118206024169924, "memory(GiB)": 72.85, "step": 68415, "token_acc": 0.44884488448844884, "train_speed(iter/s)": 0.672619 }, { "epoch": 2.931322565442783, "grad_norm": 5.854469299316406, "learning_rate": 3.665117781551363e-05, "loss": 2.2130512237548827, "memory(GiB)": 72.85, "step": 68420, "token_acc": 0.4865771812080537, "train_speed(iter/s)": 0.672627 }, { "epoch": 2.9315367807720323, "grad_norm": 4.957823276519775, "learning_rate": 3.664469243360097e-05, "loss": 2.0378086090087892, "memory(GiB)": 72.85, "step": 68425, "token_acc": 0.5539568345323741, "train_speed(iter/s)": 0.672627 }, { "epoch": 2.931750996101281, "grad_norm": 5.433363914489746, "learning_rate": 3.6638207293632235e-05, "loss": 2.203841209411621, "memory(GiB)": 72.85, "step": 68430, "token_acc": 0.5494505494505495, "train_speed(iter/s)": 0.672636 }, { "epoch": 2.93196521143053, "grad_norm": 5.541749954223633, "learning_rate": 3.663172239572492e-05, "loss": 2.2732378005981446, "memory(GiB)": 72.85, "step": 68435, "token_acc": 0.504885993485342, "train_speed(iter/s)": 0.672626 }, { "epoch": 2.932179426759779, "grad_norm": 4.1792144775390625, "learning_rate": 3.6625237739996474e-05, "loss": 2.2263809204101563, "memory(GiB)": 72.85, "step": 68440, "token_acc": 0.5034246575342466, "train_speed(iter/s)": 0.672623 }, { "epoch": 2.932393642089028, "grad_norm": 5.594233989715576, "learning_rate": 3.661875332656441e-05, "loss": 2.090842056274414, "memory(GiB)": 72.85, "step": 68445, "token_acc": 0.5015873015873016, "train_speed(iter/s)": 0.672637 }, { "epoch": 2.932607857418277, "grad_norm": 4.123451232910156, "learning_rate": 3.6612269155546186e-05, "loss": 2.140629196166992, "memory(GiB)": 72.85, "step": 68450, "token_acc": 0.5766666666666667, "train_speed(iter/s)": 0.672638 }, { "epoch": 2.932822072747526, "grad_norm": 4.796048164367676, "learning_rate": 3.660578522705925e-05, "loss": 2.230202484130859, "memory(GiB)": 72.85, "step": 68455, "token_acc": 0.49800796812749004, "train_speed(iter/s)": 0.672641 }, { "epoch": 2.933036288076775, "grad_norm": 3.9446346759796143, "learning_rate": 3.659930154122109e-05, "loss": 2.2348869323730467, "memory(GiB)": 72.85, "step": 68460, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.672643 }, { "epoch": 2.9332505034060237, "grad_norm": 4.867632865905762, "learning_rate": 3.659281809814915e-05, "loss": 2.2043636322021483, "memory(GiB)": 72.85, "step": 68465, "token_acc": 0.48859934853420195, "train_speed(iter/s)": 0.672643 }, { "epoch": 2.933464718735273, "grad_norm": 5.224976539611816, "learning_rate": 3.658633489796088e-05, "loss": 2.3998619079589845, "memory(GiB)": 72.85, "step": 68470, "token_acc": 0.48872180451127817, "train_speed(iter/s)": 0.672647 }, { "epoch": 2.9336789340645217, "grad_norm": 4.523379325866699, "learning_rate": 3.657985194077375e-05, "loss": 2.2966449737548826, "memory(GiB)": 72.85, "step": 68475, "token_acc": 0.5156794425087108, "train_speed(iter/s)": 0.672644 }, { "epoch": 2.9338931493937705, "grad_norm": 4.072225570678711, "learning_rate": 3.6573369226705176e-05, "loss": 1.9698680877685546, "memory(GiB)": 72.85, "step": 68480, "token_acc": 0.5416666666666666, "train_speed(iter/s)": 0.672646 }, { "epoch": 2.93410736472302, "grad_norm": 4.625225067138672, "learning_rate": 3.656688675587263e-05, "loss": 2.4436439514160155, "memory(GiB)": 72.85, "step": 68485, "token_acc": 0.5, "train_speed(iter/s)": 0.672657 }, { "epoch": 2.9343215800522686, "grad_norm": 5.405015468597412, "learning_rate": 3.656040452839352e-05, "loss": 2.2250417709350585, "memory(GiB)": 72.85, "step": 68490, "token_acc": 0.5130111524163569, "train_speed(iter/s)": 0.672656 }, { "epoch": 2.9345357953815174, "grad_norm": 4.928916931152344, "learning_rate": 3.655392254438528e-05, "loss": 2.3239059448242188, "memory(GiB)": 72.85, "step": 68495, "token_acc": 0.49122807017543857, "train_speed(iter/s)": 0.672659 }, { "epoch": 2.9347500107107667, "grad_norm": 4.609511375427246, "learning_rate": 3.654744080396535e-05, "loss": 2.0713443756103516, "memory(GiB)": 72.85, "step": 68500, "token_acc": 0.5016501650165016, "train_speed(iter/s)": 0.672668 }, { "epoch": 2.9347500107107667, "eval_loss": 2.382702589035034, "eval_runtime": 15.8232, "eval_samples_per_second": 6.32, "eval_steps_per_second": 6.32, "eval_token_acc": 0.46248294679399726, "step": 68500 }, { "epoch": 2.9349642260400155, "grad_norm": 3.6980128288269043, "learning_rate": 3.654095930725117e-05, "loss": 2.04669075012207, "memory(GiB)": 72.85, "step": 68505, "token_acc": 0.4777887462981244, "train_speed(iter/s)": 0.672551 }, { "epoch": 2.9351784413692643, "grad_norm": 5.686766624450684, "learning_rate": 3.653447805436013e-05, "loss": 1.863894271850586, "memory(GiB)": 72.85, "step": 68510, "token_acc": 0.5767634854771784, "train_speed(iter/s)": 0.672549 }, { "epoch": 2.9353926566985136, "grad_norm": 6.152573585510254, "learning_rate": 3.652799704540964e-05, "loss": 2.2682485580444336, "memory(GiB)": 72.85, "step": 68515, "token_acc": 0.5341880341880342, "train_speed(iter/s)": 0.672553 }, { "epoch": 2.9356068720277624, "grad_norm": 4.045928478240967, "learning_rate": 3.652151628051713e-05, "loss": 2.2853458404541014, "memory(GiB)": 72.85, "step": 68520, "token_acc": 0.49216300940438873, "train_speed(iter/s)": 0.672551 }, { "epoch": 2.935821087357011, "grad_norm": 4.677862644195557, "learning_rate": 3.6515035759800007e-05, "loss": 2.330842208862305, "memory(GiB)": 72.85, "step": 68525, "token_acc": 0.47194719471947194, "train_speed(iter/s)": 0.672544 }, { "epoch": 2.9360353026862605, "grad_norm": 5.200367450714111, "learning_rate": 3.6508555483375636e-05, "loss": 2.187290573120117, "memory(GiB)": 72.85, "step": 68530, "token_acc": 0.5067567567567568, "train_speed(iter/s)": 0.672544 }, { "epoch": 2.9362495180155093, "grad_norm": 5.08951473236084, "learning_rate": 3.6502075451361464e-05, "loss": 2.2096107482910154, "memory(GiB)": 72.85, "step": 68535, "token_acc": 0.5309090909090909, "train_speed(iter/s)": 0.672555 }, { "epoch": 2.936463733344758, "grad_norm": 4.721416473388672, "learning_rate": 3.6495595663874835e-05, "loss": 2.1504589080810548, "memory(GiB)": 72.85, "step": 68540, "token_acc": 0.5148247978436657, "train_speed(iter/s)": 0.672547 }, { "epoch": 2.9366779486740073, "grad_norm": 4.229361534118652, "learning_rate": 3.6489116121033175e-05, "loss": 2.3404029846191405, "memory(GiB)": 72.85, "step": 68545, "token_acc": 0.49523809523809526, "train_speed(iter/s)": 0.672558 }, { "epoch": 2.936892164003256, "grad_norm": 4.881148815155029, "learning_rate": 3.648263682295385e-05, "loss": 2.045333480834961, "memory(GiB)": 72.85, "step": 68550, "token_acc": 0.5133079847908745, "train_speed(iter/s)": 0.672565 }, { "epoch": 2.937106379332505, "grad_norm": 5.742684364318848, "learning_rate": 3.6476157769754226e-05, "loss": 2.166852569580078, "memory(GiB)": 72.85, "step": 68555, "token_acc": 0.5222222222222223, "train_speed(iter/s)": 0.672572 }, { "epoch": 2.937320594661754, "grad_norm": 4.235395431518555, "learning_rate": 3.64696789615517e-05, "loss": 2.3336105346679688, "memory(GiB)": 72.85, "step": 68560, "token_acc": 0.4807121661721068, "train_speed(iter/s)": 0.672574 }, { "epoch": 2.937534809991003, "grad_norm": 5.012623310089111, "learning_rate": 3.646320039846364e-05, "loss": 2.2909872055053713, "memory(GiB)": 72.85, "step": 68565, "token_acc": 0.476027397260274, "train_speed(iter/s)": 0.672574 }, { "epoch": 2.937749025320252, "grad_norm": 5.339132308959961, "learning_rate": 3.6456722080607375e-05, "loss": 1.9639795303344727, "memory(GiB)": 72.85, "step": 68570, "token_acc": 0.5477031802120141, "train_speed(iter/s)": 0.672566 }, { "epoch": 2.937963240649501, "grad_norm": 6.596158027648926, "learning_rate": 3.645024400810031e-05, "loss": 2.352180290222168, "memory(GiB)": 72.85, "step": 68575, "token_acc": 0.49333333333333335, "train_speed(iter/s)": 0.672572 }, { "epoch": 2.93817745597875, "grad_norm": 4.95272970199585, "learning_rate": 3.644376618105979e-05, "loss": 2.3586566925048826, "memory(GiB)": 72.85, "step": 68580, "token_acc": 0.47796610169491527, "train_speed(iter/s)": 0.672579 }, { "epoch": 2.9383916713079987, "grad_norm": 5.5417046546936035, "learning_rate": 3.6437288599603156e-05, "loss": 2.177062225341797, "memory(GiB)": 72.85, "step": 68585, "token_acc": 0.5065359477124183, "train_speed(iter/s)": 0.672572 }, { "epoch": 2.938605886637248, "grad_norm": 5.483148097991943, "learning_rate": 3.643081126384777e-05, "loss": 2.030050277709961, "memory(GiB)": 72.85, "step": 68590, "token_acc": 0.5441696113074205, "train_speed(iter/s)": 0.672571 }, { "epoch": 2.938820101966497, "grad_norm": 3.7622475624084473, "learning_rate": 3.642433417391097e-05, "loss": 2.1155422210693358, "memory(GiB)": 72.85, "step": 68595, "token_acc": 0.5335365853658537, "train_speed(iter/s)": 0.67258 }, { "epoch": 2.9390343172957456, "grad_norm": 4.235804557800293, "learning_rate": 3.6417857329910075e-05, "loss": 2.3287063598632813, "memory(GiB)": 72.85, "step": 68600, "token_acc": 0.5061728395061729, "train_speed(iter/s)": 0.672572 }, { "epoch": 2.939248532624995, "grad_norm": 7.672514915466309, "learning_rate": 3.641138073196245e-05, "loss": 2.283600616455078, "memory(GiB)": 72.85, "step": 68605, "token_acc": 0.5148148148148148, "train_speed(iter/s)": 0.672575 }, { "epoch": 2.9394627479542437, "grad_norm": 5.706338882446289, "learning_rate": 3.640490438018541e-05, "loss": 2.6383073806762694, "memory(GiB)": 72.85, "step": 68610, "token_acc": 0.45481049562682213, "train_speed(iter/s)": 0.672583 }, { "epoch": 2.9396769632834925, "grad_norm": 3.7418432235717773, "learning_rate": 3.639842827469627e-05, "loss": 2.082599067687988, "memory(GiB)": 72.85, "step": 68615, "token_acc": 0.5363984674329502, "train_speed(iter/s)": 0.672588 }, { "epoch": 2.9398911786127417, "grad_norm": 6.16000509262085, "learning_rate": 3.639195241561237e-05, "loss": 2.1741073608398436, "memory(GiB)": 72.85, "step": 68620, "token_acc": 0.5050167224080268, "train_speed(iter/s)": 0.672597 }, { "epoch": 2.9401053939419906, "grad_norm": 5.213306427001953, "learning_rate": 3.638547680305101e-05, "loss": 2.0453617095947267, "memory(GiB)": 72.85, "step": 68625, "token_acc": 0.5202952029520295, "train_speed(iter/s)": 0.672599 }, { "epoch": 2.9403196092712394, "grad_norm": 5.297087669372559, "learning_rate": 3.637900143712952e-05, "loss": 2.2855911254882812, "memory(GiB)": 72.85, "step": 68630, "token_acc": 0.5064516129032258, "train_speed(iter/s)": 0.672611 }, { "epoch": 2.9405338246004886, "grad_norm": 4.937045574188232, "learning_rate": 3.637252631796519e-05, "loss": 2.2397180557250977, "memory(GiB)": 72.85, "step": 68635, "token_acc": 0.5067114093959731, "train_speed(iter/s)": 0.67262 }, { "epoch": 2.9407480399297374, "grad_norm": 6.794082164764404, "learning_rate": 3.636605144567532e-05, "loss": 2.5297691345214846, "memory(GiB)": 72.85, "step": 68640, "token_acc": 0.47491638795986624, "train_speed(iter/s)": 0.672622 }, { "epoch": 2.9409622552589862, "grad_norm": 3.8877272605895996, "learning_rate": 3.635957682037722e-05, "loss": 2.3194244384765623, "memory(GiB)": 72.85, "step": 68645, "token_acc": 0.5508474576271186, "train_speed(iter/s)": 0.67263 }, { "epoch": 2.9411764705882355, "grad_norm": 4.628995418548584, "learning_rate": 3.635310244218819e-05, "loss": 2.3849155426025392, "memory(GiB)": 72.85, "step": 68650, "token_acc": 0.5104602510460251, "train_speed(iter/s)": 0.672635 }, { "epoch": 2.9413906859174843, "grad_norm": 7.488161563873291, "learning_rate": 3.634662831122552e-05, "loss": 2.512312126159668, "memory(GiB)": 72.85, "step": 68655, "token_acc": 0.49514563106796117, "train_speed(iter/s)": 0.672629 }, { "epoch": 2.941604901246733, "grad_norm": 4.286231517791748, "learning_rate": 3.6340154427606466e-05, "loss": 2.2491159439086914, "memory(GiB)": 72.85, "step": 68660, "token_acc": 0.5109489051094891, "train_speed(iter/s)": 0.672633 }, { "epoch": 2.9418191165759824, "grad_norm": 4.717033863067627, "learning_rate": 3.633368079144834e-05, "loss": 2.5233043670654296, "memory(GiB)": 72.85, "step": 68665, "token_acc": 0.48909657320872274, "train_speed(iter/s)": 0.672626 }, { "epoch": 2.942033331905231, "grad_norm": 4.4263224601745605, "learning_rate": 3.632720740286841e-05, "loss": 1.8104585647583007, "memory(GiB)": 72.85, "step": 68670, "token_acc": 0.5734767025089605, "train_speed(iter/s)": 0.672619 }, { "epoch": 2.94224754723448, "grad_norm": 5.63248872756958, "learning_rate": 3.6320734261983935e-05, "loss": 2.301066589355469, "memory(GiB)": 72.85, "step": 68675, "token_acc": 0.4527027027027027, "train_speed(iter/s)": 0.672618 }, { "epoch": 2.9424617625637293, "grad_norm": 5.237942695617676, "learning_rate": 3.631426136891221e-05, "loss": 2.5398326873779298, "memory(GiB)": 72.85, "step": 68680, "token_acc": 0.47368421052631576, "train_speed(iter/s)": 0.672601 }, { "epoch": 2.942675977892978, "grad_norm": 5.199198246002197, "learning_rate": 3.6307788723770455e-05, "loss": 2.5345211029052734, "memory(GiB)": 72.85, "step": 68685, "token_acc": 0.48286604361370716, "train_speed(iter/s)": 0.672597 }, { "epoch": 2.942890193222227, "grad_norm": 5.570507049560547, "learning_rate": 3.630131632667596e-05, "loss": 2.2304447174072264, "memory(GiB)": 72.85, "step": 68690, "token_acc": 0.50920245398773, "train_speed(iter/s)": 0.672599 }, { "epoch": 2.943104408551476, "grad_norm": 4.628712177276611, "learning_rate": 3.629484417774597e-05, "loss": 2.202543258666992, "memory(GiB)": 72.85, "step": 68695, "token_acc": 0.5116279069767442, "train_speed(iter/s)": 0.672605 }, { "epoch": 2.943318623880725, "grad_norm": 4.556662559509277, "learning_rate": 3.6288372277097735e-05, "loss": 2.3675182342529295, "memory(GiB)": 72.85, "step": 68700, "token_acc": 0.503731343283582, "train_speed(iter/s)": 0.672609 }, { "epoch": 2.9435328392099738, "grad_norm": 4.756103038787842, "learning_rate": 3.6281900624848496e-05, "loss": 2.245442199707031, "memory(GiB)": 72.85, "step": 68705, "token_acc": 0.5036496350364964, "train_speed(iter/s)": 0.672616 }, { "epoch": 2.943747054539223, "grad_norm": 6.008317947387695, "learning_rate": 3.6275429221115506e-05, "loss": 2.1856058120727537, "memory(GiB)": 72.85, "step": 68710, "token_acc": 0.49814126394052044, "train_speed(iter/s)": 0.67261 }, { "epoch": 2.943961269868472, "grad_norm": 6.4552226066589355, "learning_rate": 3.6268958066015965e-05, "loss": 2.414866065979004, "memory(GiB)": 72.85, "step": 68715, "token_acc": 0.49477351916376305, "train_speed(iter/s)": 0.672608 }, { "epoch": 2.9441754851977207, "grad_norm": 4.693901062011719, "learning_rate": 3.6262487159667145e-05, "loss": 2.423534965515137, "memory(GiB)": 72.85, "step": 68720, "token_acc": 0.5022026431718062, "train_speed(iter/s)": 0.672614 }, { "epoch": 2.94438970052697, "grad_norm": 6.866062641143799, "learning_rate": 3.625601650218626e-05, "loss": 2.242639923095703, "memory(GiB)": 72.85, "step": 68725, "token_acc": 0.5114754098360655, "train_speed(iter/s)": 0.672609 }, { "epoch": 2.9446039158562187, "grad_norm": 5.257636547088623, "learning_rate": 3.624954609369052e-05, "loss": 2.005910110473633, "memory(GiB)": 72.85, "step": 68730, "token_acc": 0.5051194539249146, "train_speed(iter/s)": 0.672599 }, { "epoch": 2.9448181311854675, "grad_norm": 3.501716136932373, "learning_rate": 3.6243075934297165e-05, "loss": 2.1088531494140623, "memory(GiB)": 72.85, "step": 68735, "token_acc": 0.5, "train_speed(iter/s)": 0.672602 }, { "epoch": 2.945032346514717, "grad_norm": 4.639688014984131, "learning_rate": 3.62366060241234e-05, "loss": 2.37249870300293, "memory(GiB)": 72.85, "step": 68740, "token_acc": 0.5292207792207793, "train_speed(iter/s)": 0.672604 }, { "epoch": 2.9452465618439656, "grad_norm": 5.939981460571289, "learning_rate": 3.62301363632864e-05, "loss": 2.339573097229004, "memory(GiB)": 72.85, "step": 68745, "token_acc": 0.5436507936507936, "train_speed(iter/s)": 0.672604 }, { "epoch": 2.9454607771732144, "grad_norm": 3.9458186626434326, "learning_rate": 3.622366695190342e-05, "loss": 2.350263977050781, "memory(GiB)": 72.85, "step": 68750, "token_acc": 0.5225806451612903, "train_speed(iter/s)": 0.672615 }, { "epoch": 2.9456749925024637, "grad_norm": 4.483972072601318, "learning_rate": 3.621719779009164e-05, "loss": 2.225633430480957, "memory(GiB)": 72.85, "step": 68755, "token_acc": 0.49583333333333335, "train_speed(iter/s)": 0.672616 }, { "epoch": 2.9458892078317125, "grad_norm": 6.005478858947754, "learning_rate": 3.6210728877968236e-05, "loss": 2.1349853515625, "memory(GiB)": 72.85, "step": 68760, "token_acc": 0.5081967213114754, "train_speed(iter/s)": 0.67262 }, { "epoch": 2.9461034231609613, "grad_norm": 4.102398872375488, "learning_rate": 3.620426021565042e-05, "loss": 2.615175247192383, "memory(GiB)": 72.85, "step": 68765, "token_acc": 0.476038338658147, "train_speed(iter/s)": 0.672609 }, { "epoch": 2.9463176384902106, "grad_norm": 5.409984588623047, "learning_rate": 3.619779180325537e-05, "loss": 2.359732437133789, "memory(GiB)": 72.85, "step": 68770, "token_acc": 0.4980544747081712, "train_speed(iter/s)": 0.6726 }, { "epoch": 2.9465318538194594, "grad_norm": 6.073079586029053, "learning_rate": 3.619132364090028e-05, "loss": 2.18933219909668, "memory(GiB)": 72.85, "step": 68775, "token_acc": 0.5171102661596958, "train_speed(iter/s)": 0.6726 }, { "epoch": 2.946746069148708, "grad_norm": 3.703961133956909, "learning_rate": 3.6184855728702306e-05, "loss": 1.8630565643310546, "memory(GiB)": 72.85, "step": 68780, "token_acc": 0.5825242718446602, "train_speed(iter/s)": 0.6726 }, { "epoch": 2.9469602844779574, "grad_norm": 4.178813457489014, "learning_rate": 3.617838806677862e-05, "loss": 2.1667850494384764, "memory(GiB)": 72.85, "step": 68785, "token_acc": 0.5135135135135135, "train_speed(iter/s)": 0.672609 }, { "epoch": 2.9471744998072062, "grad_norm": 4.654308319091797, "learning_rate": 3.617192065524639e-05, "loss": 2.295998764038086, "memory(GiB)": 72.85, "step": 68790, "token_acc": 0.4889589905362776, "train_speed(iter/s)": 0.672611 }, { "epoch": 2.947388715136455, "grad_norm": 4.034571170806885, "learning_rate": 3.6165453494222816e-05, "loss": 2.1081398010253904, "memory(GiB)": 72.85, "step": 68795, "token_acc": 0.5431309904153354, "train_speed(iter/s)": 0.672617 }, { "epoch": 2.9476029304657043, "grad_norm": 5.269551753997803, "learning_rate": 3.615898658382503e-05, "loss": 2.4723377227783203, "memory(GiB)": 72.85, "step": 68800, "token_acc": 0.45910290237467016, "train_speed(iter/s)": 0.67261 }, { "epoch": 2.947817145794953, "grad_norm": 5.247290134429932, "learning_rate": 3.615251992417017e-05, "loss": 2.294195556640625, "memory(GiB)": 72.85, "step": 68805, "token_acc": 0.5194029850746269, "train_speed(iter/s)": 0.672614 }, { "epoch": 2.948031361124202, "grad_norm": 4.462802410125732, "learning_rate": 3.6146053515375415e-05, "loss": 2.292998504638672, "memory(GiB)": 72.85, "step": 68810, "token_acc": 0.5047923322683706, "train_speed(iter/s)": 0.672621 }, { "epoch": 2.948245576453451, "grad_norm": 4.6555256843566895, "learning_rate": 3.613958735755789e-05, "loss": 2.2736652374267576, "memory(GiB)": 72.85, "step": 68815, "token_acc": 0.5275590551181102, "train_speed(iter/s)": 0.672629 }, { "epoch": 2.9484597917827, "grad_norm": 5.71811580657959, "learning_rate": 3.613312145083474e-05, "loss": 1.808181381225586, "memory(GiB)": 72.85, "step": 68820, "token_acc": 0.5874125874125874, "train_speed(iter/s)": 0.672631 }, { "epoch": 2.948674007111949, "grad_norm": 5.602775573730469, "learning_rate": 3.6126655795323115e-05, "loss": 2.2268999099731444, "memory(GiB)": 72.85, "step": 68825, "token_acc": 0.5015197568389058, "train_speed(iter/s)": 0.67263 }, { "epoch": 2.948888222441198, "grad_norm": 4.379074573516846, "learning_rate": 3.612019039114012e-05, "loss": 2.3293806076049806, "memory(GiB)": 72.85, "step": 68830, "token_acc": 0.5256410256410257, "train_speed(iter/s)": 0.672637 }, { "epoch": 2.949102437770447, "grad_norm": 4.478560924530029, "learning_rate": 3.6113725238402906e-05, "loss": 2.310012626647949, "memory(GiB)": 72.85, "step": 68835, "token_acc": 0.49836065573770494, "train_speed(iter/s)": 0.672639 }, { "epoch": 2.9493166530996957, "grad_norm": 4.281253814697266, "learning_rate": 3.610726033722859e-05, "loss": 2.191958999633789, "memory(GiB)": 72.85, "step": 68840, "token_acc": 0.5190311418685121, "train_speed(iter/s)": 0.672641 }, { "epoch": 2.949530868428945, "grad_norm": 5.580425262451172, "learning_rate": 3.610079568773427e-05, "loss": 2.406338119506836, "memory(GiB)": 72.85, "step": 68845, "token_acc": 0.4777327935222672, "train_speed(iter/s)": 0.672646 }, { "epoch": 2.9497450837581938, "grad_norm": 4.003261566162109, "learning_rate": 3.6094331290037094e-05, "loss": 2.3582199096679686, "memory(GiB)": 72.85, "step": 68850, "token_acc": 0.5115606936416185, "train_speed(iter/s)": 0.672651 }, { "epoch": 2.9499592990874426, "grad_norm": 4.539664268493652, "learning_rate": 3.6087867144254136e-05, "loss": 2.339570236206055, "memory(GiB)": 72.85, "step": 68855, "token_acc": 0.515625, "train_speed(iter/s)": 0.672661 }, { "epoch": 2.950173514416692, "grad_norm": 5.58783483505249, "learning_rate": 3.608140325050252e-05, "loss": 2.0926326751708983, "memory(GiB)": 72.85, "step": 68860, "token_acc": 0.5462184873949579, "train_speed(iter/s)": 0.672654 }, { "epoch": 2.9503877297459407, "grad_norm": 5.1022868156433105, "learning_rate": 3.607493960889932e-05, "loss": 2.3213748931884766, "memory(GiB)": 72.85, "step": 68865, "token_acc": 0.5020576131687243, "train_speed(iter/s)": 0.67265 }, { "epoch": 2.9506019450751895, "grad_norm": 5.960246562957764, "learning_rate": 3.6068476219561677e-05, "loss": 2.215555191040039, "memory(GiB)": 72.85, "step": 68870, "token_acc": 0.5492424242424242, "train_speed(iter/s)": 0.672664 }, { "epoch": 2.9508161604044387, "grad_norm": 5.097075462341309, "learning_rate": 3.606201308260665e-05, "loss": 2.3629350662231445, "memory(GiB)": 72.85, "step": 68875, "token_acc": 0.48639455782312924, "train_speed(iter/s)": 0.67265 }, { "epoch": 2.9510303757336875, "grad_norm": 4.8207292556762695, "learning_rate": 3.6055550198151334e-05, "loss": 2.086075019836426, "memory(GiB)": 72.85, "step": 68880, "token_acc": 0.5583941605839416, "train_speed(iter/s)": 0.672657 }, { "epoch": 2.9512445910629364, "grad_norm": 5.265907287597656, "learning_rate": 3.604908756631281e-05, "loss": 2.400586700439453, "memory(GiB)": 72.85, "step": 68885, "token_acc": 0.5709459459459459, "train_speed(iter/s)": 0.672662 }, { "epoch": 2.9514588063921856, "grad_norm": 5.127951622009277, "learning_rate": 3.604262518720814e-05, "loss": 2.0401472091674804, "memory(GiB)": 72.85, "step": 68890, "token_acc": 0.5708955223880597, "train_speed(iter/s)": 0.672662 }, { "epoch": 2.9516730217214344, "grad_norm": 5.207024097442627, "learning_rate": 3.603616306095442e-05, "loss": 2.1338832855224608, "memory(GiB)": 72.85, "step": 68895, "token_acc": 0.5589225589225589, "train_speed(iter/s)": 0.672669 }, { "epoch": 2.9518872370506832, "grad_norm": 5.195775508880615, "learning_rate": 3.602970118766871e-05, "loss": 2.3081939697265623, "memory(GiB)": 72.85, "step": 68900, "token_acc": 0.4774436090225564, "train_speed(iter/s)": 0.672673 }, { "epoch": 2.9521014523799325, "grad_norm": 6.339512348175049, "learning_rate": 3.602323956746805e-05, "loss": 2.4620183944702148, "memory(GiB)": 72.85, "step": 68905, "token_acc": 0.5143884892086331, "train_speed(iter/s)": 0.672674 }, { "epoch": 2.9523156677091813, "grad_norm": 4.591829776763916, "learning_rate": 3.601677820046953e-05, "loss": 2.169636535644531, "memory(GiB)": 72.85, "step": 68910, "token_acc": 0.5374149659863946, "train_speed(iter/s)": 0.672667 }, { "epoch": 2.95252988303843, "grad_norm": 4.565418720245361, "learning_rate": 3.601031708679018e-05, "loss": 2.1226871490478514, "memory(GiB)": 72.85, "step": 68915, "token_acc": 0.5441176470588235, "train_speed(iter/s)": 0.672667 }, { "epoch": 2.9527440983676794, "grad_norm": 5.643307209014893, "learning_rate": 3.600385622654706e-05, "loss": 2.2510086059570313, "memory(GiB)": 72.85, "step": 68920, "token_acc": 0.5393700787401575, "train_speed(iter/s)": 0.67267 }, { "epoch": 2.952958313696928, "grad_norm": 4.55682373046875, "learning_rate": 3.599739561985722e-05, "loss": 2.185012626647949, "memory(GiB)": 72.85, "step": 68925, "token_acc": 0.5518518518518518, "train_speed(iter/s)": 0.672676 }, { "epoch": 2.953172529026177, "grad_norm": 4.342138767242432, "learning_rate": 3.599093526683769e-05, "loss": 2.669296073913574, "memory(GiB)": 72.85, "step": 68930, "token_acc": 0.4427710843373494, "train_speed(iter/s)": 0.672676 }, { "epoch": 2.9533867443554263, "grad_norm": 5.85283088684082, "learning_rate": 3.598447516760549e-05, "loss": 2.680526351928711, "memory(GiB)": 72.85, "step": 68935, "token_acc": 0.43209876543209874, "train_speed(iter/s)": 0.672685 }, { "epoch": 2.953600959684675, "grad_norm": 5.356490135192871, "learning_rate": 3.59780153222777e-05, "loss": 2.263264846801758, "memory(GiB)": 72.85, "step": 68940, "token_acc": 0.4795539033457249, "train_speed(iter/s)": 0.672697 }, { "epoch": 2.953815175013924, "grad_norm": 4.317440986633301, "learning_rate": 3.597155573097131e-05, "loss": 2.0885746002197267, "memory(GiB)": 72.85, "step": 68945, "token_acc": 0.5852713178294574, "train_speed(iter/s)": 0.672704 }, { "epoch": 2.954029390343173, "grad_norm": 5.552002429962158, "learning_rate": 3.5965096393803345e-05, "loss": 2.370738220214844, "memory(GiB)": 72.85, "step": 68950, "token_acc": 0.5182926829268293, "train_speed(iter/s)": 0.672715 }, { "epoch": 2.954243605672422, "grad_norm": 4.638965606689453, "learning_rate": 3.5958637310890825e-05, "loss": 2.3110378265380858, "memory(GiB)": 72.85, "step": 68955, "token_acc": 0.5017421602787456, "train_speed(iter/s)": 0.672713 }, { "epoch": 2.9544578210016708, "grad_norm": 4.649094581604004, "learning_rate": 3.595217848235075e-05, "loss": 2.3709205627441405, "memory(GiB)": 72.85, "step": 68960, "token_acc": 0.47633136094674555, "train_speed(iter/s)": 0.672725 }, { "epoch": 2.95467203633092, "grad_norm": 7.239723205566406, "learning_rate": 3.594571990830016e-05, "loss": 2.3294395446777343, "memory(GiB)": 72.85, "step": 68965, "token_acc": 0.5, "train_speed(iter/s)": 0.672728 }, { "epoch": 2.954886251660169, "grad_norm": 6.851308822631836, "learning_rate": 3.5939261588856034e-05, "loss": 2.3005207061767576, "memory(GiB)": 72.85, "step": 68970, "token_acc": 0.5238095238095238, "train_speed(iter/s)": 0.672724 }, { "epoch": 2.9551004669894176, "grad_norm": 5.609177589416504, "learning_rate": 3.593280352413536e-05, "loss": 2.012752151489258, "memory(GiB)": 72.85, "step": 68975, "token_acc": 0.543859649122807, "train_speed(iter/s)": 0.672733 }, { "epoch": 2.955314682318667, "grad_norm": 5.212034225463867, "learning_rate": 3.592634571425516e-05, "loss": 2.096968078613281, "memory(GiB)": 72.85, "step": 68980, "token_acc": 0.5373134328358209, "train_speed(iter/s)": 0.672741 }, { "epoch": 2.9555288976479157, "grad_norm": 4.665994167327881, "learning_rate": 3.5919888159332415e-05, "loss": 2.2039587020874025, "memory(GiB)": 72.85, "step": 68985, "token_acc": 0.48484848484848486, "train_speed(iter/s)": 0.672744 }, { "epoch": 2.9557431129771645, "grad_norm": 5.744750022888184, "learning_rate": 3.591343085948409e-05, "loss": 2.3362310409545897, "memory(GiB)": 72.85, "step": 68990, "token_acc": 0.4858156028368794, "train_speed(iter/s)": 0.672742 }, { "epoch": 2.955957328306414, "grad_norm": 3.258176803588867, "learning_rate": 3.590697381482718e-05, "loss": 2.1293848037719725, "memory(GiB)": 72.85, "step": 68995, "token_acc": 0.5403508771929825, "train_speed(iter/s)": 0.672754 }, { "epoch": 2.9561715436356626, "grad_norm": 5.10861873626709, "learning_rate": 3.590051702547866e-05, "loss": 2.1542823791503904, "memory(GiB)": 72.85, "step": 69000, "token_acc": 0.4982698961937716, "train_speed(iter/s)": 0.67276 }, { "epoch": 2.9561715436356626, "eval_loss": 2.206179141998291, "eval_runtime": 15.2817, "eval_samples_per_second": 6.544, "eval_steps_per_second": 6.544, "eval_token_acc": 0.4946524064171123, "step": 69000 }, { "epoch": 2.9563857589649114, "grad_norm": 5.362854957580566, "learning_rate": 3.5894060491555516e-05, "loss": 2.3545488357543944, "memory(GiB)": 72.85, "step": 69005, "token_acc": 0.5, "train_speed(iter/s)": 0.672637 }, { "epoch": 2.9565999742941607, "grad_norm": 5.18113374710083, "learning_rate": 3.588760421317466e-05, "loss": 2.0796972274780274, "memory(GiB)": 72.85, "step": 69010, "token_acc": 0.5283018867924528, "train_speed(iter/s)": 0.672642 }, { "epoch": 2.9568141896234095, "grad_norm": 4.537745952606201, "learning_rate": 3.588114819045313e-05, "loss": 2.498903274536133, "memory(GiB)": 72.85, "step": 69015, "token_acc": 0.5161290322580645, "train_speed(iter/s)": 0.672642 }, { "epoch": 2.9570284049526583, "grad_norm": 5.317471981048584, "learning_rate": 3.587469242350783e-05, "loss": 2.225180435180664, "memory(GiB)": 72.85, "step": 69020, "token_acc": 0.4938650306748466, "train_speed(iter/s)": 0.672646 }, { "epoch": 2.9572426202819075, "grad_norm": 6.899603366851807, "learning_rate": 3.5868236912455734e-05, "loss": 2.4985265731811523, "memory(GiB)": 72.85, "step": 69025, "token_acc": 0.47876447876447875, "train_speed(iter/s)": 0.672662 }, { "epoch": 2.9574568356111564, "grad_norm": 4.046102523803711, "learning_rate": 3.5861781657413784e-05, "loss": 2.2382232666015627, "memory(GiB)": 72.85, "step": 69030, "token_acc": 0.5182481751824818, "train_speed(iter/s)": 0.672665 }, { "epoch": 2.957671050940405, "grad_norm": 4.543712615966797, "learning_rate": 3.585532665849891e-05, "loss": 2.3926477432250977, "memory(GiB)": 72.85, "step": 69035, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.672674 }, { "epoch": 2.9578852662696544, "grad_norm": 4.367122173309326, "learning_rate": 3.584887191582808e-05, "loss": 2.1996347427368166, "memory(GiB)": 72.85, "step": 69040, "token_acc": 0.5111940298507462, "train_speed(iter/s)": 0.672676 }, { "epoch": 2.9580994815989032, "grad_norm": 4.9905595779418945, "learning_rate": 3.58424174295182e-05, "loss": 2.089046096801758, "memory(GiB)": 72.85, "step": 69045, "token_acc": 0.5173611111111112, "train_speed(iter/s)": 0.672666 }, { "epoch": 2.958313696928152, "grad_norm": 4.868507385253906, "learning_rate": 3.58359631996862e-05, "loss": 2.4547386169433594, "memory(GiB)": 72.85, "step": 69050, "token_acc": 0.5219123505976095, "train_speed(iter/s)": 0.672674 }, { "epoch": 2.9585279122574013, "grad_norm": 4.388759613037109, "learning_rate": 3.582950922644903e-05, "loss": 2.1227289199829102, "memory(GiB)": 72.85, "step": 69055, "token_acc": 0.5309446254071661, "train_speed(iter/s)": 0.672679 }, { "epoch": 2.95874212758665, "grad_norm": 4.368592739105225, "learning_rate": 3.582305550992358e-05, "loss": 2.249290657043457, "memory(GiB)": 72.85, "step": 69060, "token_acc": 0.4808362369337979, "train_speed(iter/s)": 0.672682 }, { "epoch": 2.958956342915899, "grad_norm": 4.129845142364502, "learning_rate": 3.581660205022679e-05, "loss": 2.4378192901611326, "memory(GiB)": 72.85, "step": 69065, "token_acc": 0.46449704142011833, "train_speed(iter/s)": 0.672679 }, { "epoch": 2.959170558245148, "grad_norm": 4.210975170135498, "learning_rate": 3.5810148847475555e-05, "loss": 2.0542343139648436, "memory(GiB)": 72.85, "step": 69070, "token_acc": 0.5296167247386759, "train_speed(iter/s)": 0.67267 }, { "epoch": 2.959384773574397, "grad_norm": 4.183958053588867, "learning_rate": 3.580369590178678e-05, "loss": 2.241665267944336, "memory(GiB)": 72.85, "step": 69075, "token_acc": 0.5190615835777126, "train_speed(iter/s)": 0.672677 }, { "epoch": 2.959598988903646, "grad_norm": 4.893428325653076, "learning_rate": 3.579724321327738e-05, "loss": 2.2204408645629883, "memory(GiB)": 72.85, "step": 69080, "token_acc": 0.5461538461538461, "train_speed(iter/s)": 0.672675 }, { "epoch": 2.959813204232895, "grad_norm": 4.675237655639648, "learning_rate": 3.579079078206422e-05, "loss": 2.1254390716552733, "memory(GiB)": 72.85, "step": 69085, "token_acc": 0.5357142857142857, "train_speed(iter/s)": 0.672674 }, { "epoch": 2.960027419562144, "grad_norm": 6.0818071365356445, "learning_rate": 3.578433860826423e-05, "loss": 2.4812749862670898, "memory(GiB)": 72.85, "step": 69090, "token_acc": 0.4787234042553192, "train_speed(iter/s)": 0.672667 }, { "epoch": 2.9602416348913927, "grad_norm": 4.537519454956055, "learning_rate": 3.577788669199428e-05, "loss": 2.2617237091064455, "memory(GiB)": 72.85, "step": 69095, "token_acc": 0.5105740181268882, "train_speed(iter/s)": 0.67266 }, { "epoch": 2.960455850220642, "grad_norm": 4.312253952026367, "learning_rate": 3.5771435033371265e-05, "loss": 2.467987060546875, "memory(GiB)": 72.85, "step": 69100, "token_acc": 0.5157232704402516, "train_speed(iter/s)": 0.672669 }, { "epoch": 2.9606700655498908, "grad_norm": 3.470115900039673, "learning_rate": 3.576498363251204e-05, "loss": 2.2391120910644533, "memory(GiB)": 72.85, "step": 69105, "token_acc": 0.5160256410256411, "train_speed(iter/s)": 0.672668 }, { "epoch": 2.9608842808791396, "grad_norm": 4.032697677612305, "learning_rate": 3.57585324895335e-05, "loss": 2.2588525772094727, "memory(GiB)": 72.85, "step": 69110, "token_acc": 0.5117845117845118, "train_speed(iter/s)": 0.672667 }, { "epoch": 2.961098496208389, "grad_norm": 4.831437587738037, "learning_rate": 3.57520816045525e-05, "loss": 1.9861045837402345, "memory(GiB)": 72.85, "step": 69115, "token_acc": 0.5650793650793651, "train_speed(iter/s)": 0.672666 }, { "epoch": 2.9613127115376376, "grad_norm": 4.306404113769531, "learning_rate": 3.5745630977685896e-05, "loss": 2.152815055847168, "memory(GiB)": 72.85, "step": 69120, "token_acc": 0.5387596899224806, "train_speed(iter/s)": 0.672671 }, { "epoch": 2.9615269268668865, "grad_norm": 5.496334552764893, "learning_rate": 3.5739180609050574e-05, "loss": 2.0208290100097654, "memory(GiB)": 72.85, "step": 69125, "token_acc": 0.5239616613418531, "train_speed(iter/s)": 0.672682 }, { "epoch": 2.9617411421961357, "grad_norm": 4.1585283279418945, "learning_rate": 3.573273049876337e-05, "loss": 2.1362941741943358, "memory(GiB)": 72.85, "step": 69130, "token_acc": 0.5178571428571429, "train_speed(iter/s)": 0.672688 }, { "epoch": 2.9619553575253845, "grad_norm": 5.33834171295166, "learning_rate": 3.572628064694112e-05, "loss": 2.2670665740966798, "memory(GiB)": 72.85, "step": 69135, "token_acc": 0.5218978102189781, "train_speed(iter/s)": 0.6727 }, { "epoch": 2.9621695728546333, "grad_norm": 5.442109107971191, "learning_rate": 3.571983105370071e-05, "loss": 2.0690582275390623, "memory(GiB)": 72.85, "step": 69140, "token_acc": 0.5627376425855514, "train_speed(iter/s)": 0.672707 }, { "epoch": 2.9623837881838826, "grad_norm": 4.901822090148926, "learning_rate": 3.571338171915894e-05, "loss": 2.2532926559448243, "memory(GiB)": 72.85, "step": 69145, "token_acc": 0.475177304964539, "train_speed(iter/s)": 0.672708 }, { "epoch": 2.9625980035131314, "grad_norm": 4.6360907554626465, "learning_rate": 3.570693264343267e-05, "loss": 2.29219970703125, "memory(GiB)": 72.85, "step": 69150, "token_acc": 0.4794007490636704, "train_speed(iter/s)": 0.672712 }, { "epoch": 2.96281221884238, "grad_norm": 5.496952533721924, "learning_rate": 3.5700483826638727e-05, "loss": 2.330682945251465, "memory(GiB)": 72.85, "step": 69155, "token_acc": 0.5061728395061729, "train_speed(iter/s)": 0.672711 }, { "epoch": 2.9630264341716295, "grad_norm": 4.476264953613281, "learning_rate": 3.569403526889391e-05, "loss": 2.3246105194091795, "memory(GiB)": 72.85, "step": 69160, "token_acc": 0.5309446254071661, "train_speed(iter/s)": 0.672706 }, { "epoch": 2.9632406495008783, "grad_norm": 3.7957069873809814, "learning_rate": 3.568758697031508e-05, "loss": 2.150639533996582, "memory(GiB)": 72.85, "step": 69165, "token_acc": 0.532, "train_speed(iter/s)": 0.672705 }, { "epoch": 2.963454864830127, "grad_norm": 5.357796669006348, "learning_rate": 3.5681138931019046e-05, "loss": 2.3168697357177734, "memory(GiB)": 72.85, "step": 69170, "token_acc": 0.5121951219512195, "train_speed(iter/s)": 0.672691 }, { "epoch": 2.9636690801593764, "grad_norm": 5.069322109222412, "learning_rate": 3.5674691151122605e-05, "loss": 2.3019845962524412, "memory(GiB)": 72.85, "step": 69175, "token_acc": 0.5014749262536873, "train_speed(iter/s)": 0.672701 }, { "epoch": 2.963883295488625, "grad_norm": 4.308399677276611, "learning_rate": 3.5668243630742565e-05, "loss": 2.2815374374389648, "memory(GiB)": 72.85, "step": 69180, "token_acc": 0.5178571428571429, "train_speed(iter/s)": 0.6727 }, { "epoch": 2.964097510817874, "grad_norm": 4.298670768737793, "learning_rate": 3.566179636999575e-05, "loss": 2.1749752044677733, "memory(GiB)": 72.85, "step": 69185, "token_acc": 0.517799352750809, "train_speed(iter/s)": 0.672695 }, { "epoch": 2.9643117261471232, "grad_norm": 5.092456340789795, "learning_rate": 3.5655349368998944e-05, "loss": 2.2477956771850587, "memory(GiB)": 72.85, "step": 69190, "token_acc": 0.5124555160142349, "train_speed(iter/s)": 0.672685 }, { "epoch": 2.964525941476372, "grad_norm": 5.94182014465332, "learning_rate": 3.564890262786893e-05, "loss": 2.226042366027832, "memory(GiB)": 72.85, "step": 69195, "token_acc": 0.5106382978723404, "train_speed(iter/s)": 0.672692 }, { "epoch": 2.964740156805621, "grad_norm": 4.905686378479004, "learning_rate": 3.5642456146722514e-05, "loss": 2.4151750564575196, "memory(GiB)": 72.85, "step": 69200, "token_acc": 0.4479495268138801, "train_speed(iter/s)": 0.672698 }, { "epoch": 2.96495437213487, "grad_norm": 4.273608207702637, "learning_rate": 3.563600992567646e-05, "loss": 2.113592338562012, "memory(GiB)": 72.85, "step": 69205, "token_acc": 0.5178571428571429, "train_speed(iter/s)": 0.672704 }, { "epoch": 2.965168587464119, "grad_norm": 4.009890556335449, "learning_rate": 3.562956396484758e-05, "loss": 2.2199535369873047, "memory(GiB)": 72.85, "step": 69210, "token_acc": 0.5285285285285285, "train_speed(iter/s)": 0.672707 }, { "epoch": 2.9653828027933677, "grad_norm": 5.168210983276367, "learning_rate": 3.5623118264352624e-05, "loss": 2.2217218399047853, "memory(GiB)": 72.85, "step": 69215, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.672715 }, { "epoch": 2.965597018122617, "grad_norm": 4.346342086791992, "learning_rate": 3.561667282430836e-05, "loss": 2.1399837493896485, "memory(GiB)": 72.85, "step": 69220, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.672724 }, { "epoch": 2.965811233451866, "grad_norm": 4.287571430206299, "learning_rate": 3.561022764483157e-05, "loss": 2.318773078918457, "memory(GiB)": 72.85, "step": 69225, "token_acc": 0.5108695652173914, "train_speed(iter/s)": 0.672714 }, { "epoch": 2.9660254487811146, "grad_norm": 5.548887729644775, "learning_rate": 3.5603782726039004e-05, "loss": 2.409805107116699, "memory(GiB)": 72.85, "step": 69230, "token_acc": 0.5220125786163522, "train_speed(iter/s)": 0.672714 }, { "epoch": 2.966239664110364, "grad_norm": 5.975102424621582, "learning_rate": 3.559733806804739e-05, "loss": 2.439902496337891, "memory(GiB)": 72.85, "step": 69235, "token_acc": 0.47843137254901963, "train_speed(iter/s)": 0.672719 }, { "epoch": 2.9664538794396127, "grad_norm": 5.349637985229492, "learning_rate": 3.559089367097354e-05, "loss": 2.0353660583496094, "memory(GiB)": 72.85, "step": 69240, "token_acc": 0.5659574468085107, "train_speed(iter/s)": 0.672723 }, { "epoch": 2.9666680947688615, "grad_norm": 5.703014850616455, "learning_rate": 3.5584449534934156e-05, "loss": 2.4022495269775392, "memory(GiB)": 72.85, "step": 69245, "token_acc": 0.45733788395904434, "train_speed(iter/s)": 0.67274 }, { "epoch": 2.9668823100981108, "grad_norm": 4.7298359870910645, "learning_rate": 3.557800566004599e-05, "loss": 2.2992639541625977, "memory(GiB)": 72.85, "step": 69250, "token_acc": 0.5017421602787456, "train_speed(iter/s)": 0.672739 }, { "epoch": 2.9670965254273596, "grad_norm": 4.3261189460754395, "learning_rate": 3.557156204642579e-05, "loss": 2.1197784423828123, "memory(GiB)": 72.85, "step": 69255, "token_acc": 0.5448275862068965, "train_speed(iter/s)": 0.672741 }, { "epoch": 2.9673107407566084, "grad_norm": 6.186577320098877, "learning_rate": 3.556511869419028e-05, "loss": 2.3220073699951174, "memory(GiB)": 72.85, "step": 69260, "token_acc": 0.5684647302904564, "train_speed(iter/s)": 0.672743 }, { "epoch": 2.9675249560858576, "grad_norm": 4.375716686248779, "learning_rate": 3.555867560345617e-05, "loss": 2.4518333435058595, "memory(GiB)": 72.85, "step": 69265, "token_acc": 0.5304054054054054, "train_speed(iter/s)": 0.672738 }, { "epoch": 2.9677391714151065, "grad_norm": 5.200732707977295, "learning_rate": 3.5552232774340216e-05, "loss": 2.4088933944702147, "memory(GiB)": 72.85, "step": 69270, "token_acc": 0.5131578947368421, "train_speed(iter/s)": 0.672746 }, { "epoch": 2.9679533867443553, "grad_norm": 6.3148064613342285, "learning_rate": 3.554579020695912e-05, "loss": 2.20330867767334, "memory(GiB)": 72.85, "step": 69275, "token_acc": 0.49635036496350365, "train_speed(iter/s)": 0.672758 }, { "epoch": 2.9681676020736045, "grad_norm": 4.456871509552002, "learning_rate": 3.5539347901429575e-05, "loss": 2.4565677642822266, "memory(GiB)": 72.85, "step": 69280, "token_acc": 0.5, "train_speed(iter/s)": 0.67275 }, { "epoch": 2.9683818174028533, "grad_norm": 5.636627674102783, "learning_rate": 3.553290585786833e-05, "loss": 2.206848907470703, "memory(GiB)": 72.85, "step": 69285, "token_acc": 0.4935064935064935, "train_speed(iter/s)": 0.672739 }, { "epoch": 2.968596032732102, "grad_norm": 5.270667552947998, "learning_rate": 3.552646407639204e-05, "loss": 2.130675506591797, "memory(GiB)": 72.85, "step": 69290, "token_acc": 0.5050847457627119, "train_speed(iter/s)": 0.672742 }, { "epoch": 2.9688102480613514, "grad_norm": 6.9366021156311035, "learning_rate": 3.552002255711745e-05, "loss": 2.485761260986328, "memory(GiB)": 72.85, "step": 69295, "token_acc": 0.45555555555555555, "train_speed(iter/s)": 0.672758 }, { "epoch": 2.9690244633906, "grad_norm": 5.613895416259766, "learning_rate": 3.5513581300161236e-05, "loss": 2.687274360656738, "memory(GiB)": 72.85, "step": 69300, "token_acc": 0.47733333333333333, "train_speed(iter/s)": 0.672757 }, { "epoch": 2.969238678719849, "grad_norm": 5.424663066864014, "learning_rate": 3.550714030564006e-05, "loss": 1.7718955993652343, "memory(GiB)": 72.85, "step": 69305, "token_acc": 0.5867158671586716, "train_speed(iter/s)": 0.672763 }, { "epoch": 2.9694528940490983, "grad_norm": 3.8361785411834717, "learning_rate": 3.550069957367065e-05, "loss": 1.9559232711791992, "memory(GiB)": 72.85, "step": 69310, "token_acc": 0.5285714285714286, "train_speed(iter/s)": 0.672759 }, { "epoch": 2.969667109378347, "grad_norm": 5.277312278747559, "learning_rate": 3.5494259104369676e-05, "loss": 2.5138214111328123, "memory(GiB)": 72.85, "step": 69315, "token_acc": 0.4414715719063545, "train_speed(iter/s)": 0.672752 }, { "epoch": 2.969881324707596, "grad_norm": 5.3415632247924805, "learning_rate": 3.5487818897853805e-05, "loss": 2.5204656600952147, "memory(GiB)": 72.85, "step": 69320, "token_acc": 0.4697508896797153, "train_speed(iter/s)": 0.672749 }, { "epoch": 2.970095540036845, "grad_norm": 5.139737606048584, "learning_rate": 3.5481378954239694e-05, "loss": 2.3441715240478516, "memory(GiB)": 72.85, "step": 69325, "token_acc": 0.4823529411764706, "train_speed(iter/s)": 0.672758 }, { "epoch": 2.970309755366094, "grad_norm": 6.052884578704834, "learning_rate": 3.547493927364404e-05, "loss": 2.1456972122192384, "memory(GiB)": 72.85, "step": 69330, "token_acc": 0.5517241379310345, "train_speed(iter/s)": 0.672755 }, { "epoch": 2.970523970695343, "grad_norm": 4.938274383544922, "learning_rate": 3.546849985618348e-05, "loss": 2.0785308837890626, "memory(GiB)": 72.85, "step": 69335, "token_acc": 0.5369774919614148, "train_speed(iter/s)": 0.672744 }, { "epoch": 2.970738186024592, "grad_norm": 5.626301288604736, "learning_rate": 3.546206070197468e-05, "loss": 2.0875722885131838, "memory(GiB)": 72.85, "step": 69340, "token_acc": 0.5517241379310345, "train_speed(iter/s)": 0.672751 }, { "epoch": 2.970952401353841, "grad_norm": 7.157534599304199, "learning_rate": 3.5455621811134285e-05, "loss": 2.2851917266845705, "memory(GiB)": 72.85, "step": 69345, "token_acc": 0.4795539033457249, "train_speed(iter/s)": 0.67274 }, { "epoch": 2.9711666166830897, "grad_norm": 4.828558921813965, "learning_rate": 3.5449183183778936e-05, "loss": 2.516948127746582, "memory(GiB)": 72.85, "step": 69350, "token_acc": 0.5, "train_speed(iter/s)": 0.672726 }, { "epoch": 2.971380832012339, "grad_norm": 4.677064895629883, "learning_rate": 3.54427448200253e-05, "loss": 1.9753395080566407, "memory(GiB)": 72.85, "step": 69355, "token_acc": 0.5338983050847458, "train_speed(iter/s)": 0.672735 }, { "epoch": 2.9715950473415877, "grad_norm": 7.006875514984131, "learning_rate": 3.543630671998999e-05, "loss": 2.420791244506836, "memory(GiB)": 72.85, "step": 69360, "token_acc": 0.5088967971530249, "train_speed(iter/s)": 0.672736 }, { "epoch": 2.9718092626708366, "grad_norm": 4.164816379547119, "learning_rate": 3.542986888378964e-05, "loss": 2.31973876953125, "memory(GiB)": 72.85, "step": 69365, "token_acc": 0.5309446254071661, "train_speed(iter/s)": 0.672742 }, { "epoch": 2.972023478000086, "grad_norm": 4.18321418762207, "learning_rate": 3.542343131154089e-05, "loss": 2.414691162109375, "memory(GiB)": 72.85, "step": 69370, "token_acc": 0.4713375796178344, "train_speed(iter/s)": 0.672748 }, { "epoch": 2.9722376933293346, "grad_norm": 4.77921724319458, "learning_rate": 3.541699400336036e-05, "loss": 2.386468505859375, "memory(GiB)": 72.85, "step": 69375, "token_acc": 0.47075208913649025, "train_speed(iter/s)": 0.672746 }, { "epoch": 2.9724519086585834, "grad_norm": 4.905314922332764, "learning_rate": 3.5410556959364636e-05, "loss": 2.2223337173461912, "memory(GiB)": 72.85, "step": 69380, "token_acc": 0.504, "train_speed(iter/s)": 0.672748 }, { "epoch": 2.9726661239878327, "grad_norm": 5.663028717041016, "learning_rate": 3.5404120179670376e-05, "loss": 2.3810630798339845, "memory(GiB)": 72.85, "step": 69385, "token_acc": 0.4924924924924925, "train_speed(iter/s)": 0.672737 }, { "epoch": 2.9728803393170815, "grad_norm": 4.653260231018066, "learning_rate": 3.5397683664394174e-05, "loss": 2.1507102966308596, "memory(GiB)": 72.85, "step": 69390, "token_acc": 0.5348837209302325, "train_speed(iter/s)": 0.672741 }, { "epoch": 2.9730945546463303, "grad_norm": 3.7907910346984863, "learning_rate": 3.539124741365263e-05, "loss": 1.9681955337524415, "memory(GiB)": 72.85, "step": 69395, "token_acc": 0.5347222222222222, "train_speed(iter/s)": 0.67274 }, { "epoch": 2.9733087699755796, "grad_norm": 5.137685775756836, "learning_rate": 3.538481142756235e-05, "loss": 2.1252601623535154, "memory(GiB)": 72.85, "step": 69400, "token_acc": 0.532871972318339, "train_speed(iter/s)": 0.672748 }, { "epoch": 2.9735229853048284, "grad_norm": 4.185038089752197, "learning_rate": 3.537837570623992e-05, "loss": 2.298489570617676, "memory(GiB)": 72.85, "step": 69405, "token_acc": 0.5296052631578947, "train_speed(iter/s)": 0.672755 }, { "epoch": 2.973737200634077, "grad_norm": 4.430057525634766, "learning_rate": 3.537194024980192e-05, "loss": 2.480138397216797, "memory(GiB)": 72.85, "step": 69410, "token_acc": 0.4702194357366771, "train_speed(iter/s)": 0.672762 }, { "epoch": 2.9739514159633265, "grad_norm": 6.921762943267822, "learning_rate": 3.5365505058364953e-05, "loss": 2.1573602676391603, "memory(GiB)": 72.85, "step": 69415, "token_acc": 0.5450980392156862, "train_speed(iter/s)": 0.672766 }, { "epoch": 2.9741656312925753, "grad_norm": 5.3830037117004395, "learning_rate": 3.535907013204559e-05, "loss": 2.507537841796875, "memory(GiB)": 72.85, "step": 69420, "token_acc": 0.4720496894409938, "train_speed(iter/s)": 0.672761 }, { "epoch": 2.974379846621824, "grad_norm": 4.9110541343688965, "learning_rate": 3.53526354709604e-05, "loss": 2.046089935302734, "memory(GiB)": 72.85, "step": 69425, "token_acc": 0.547945205479452, "train_speed(iter/s)": 0.672763 }, { "epoch": 2.9745940619510733, "grad_norm": 4.405577182769775, "learning_rate": 3.5346201075225965e-05, "loss": 2.1638824462890627, "memory(GiB)": 72.85, "step": 69430, "token_acc": 0.5348101265822784, "train_speed(iter/s)": 0.672753 }, { "epoch": 2.974808277280322, "grad_norm": 5.586616516113281, "learning_rate": 3.5339766944958837e-05, "loss": 2.4894182205200197, "memory(GiB)": 72.85, "step": 69435, "token_acc": 0.4857142857142857, "train_speed(iter/s)": 0.672756 }, { "epoch": 2.975022492609571, "grad_norm": 6.246866226196289, "learning_rate": 3.533333308027559e-05, "loss": 2.476802635192871, "memory(GiB)": 72.85, "step": 69440, "token_acc": 0.44, "train_speed(iter/s)": 0.672759 }, { "epoch": 2.9752367079388202, "grad_norm": 4.369438171386719, "learning_rate": 3.532689948129277e-05, "loss": 2.0605358123779296, "memory(GiB)": 72.85, "step": 69445, "token_acc": 0.5313531353135313, "train_speed(iter/s)": 0.672758 }, { "epoch": 2.975450923268069, "grad_norm": 3.8880419731140137, "learning_rate": 3.532046614812693e-05, "loss": 2.2652610778808593, "memory(GiB)": 72.85, "step": 69450, "token_acc": 0.5202952029520295, "train_speed(iter/s)": 0.672758 }, { "epoch": 2.975665138597318, "grad_norm": 5.646951675415039, "learning_rate": 3.53140330808946e-05, "loss": 2.3247461318969727, "memory(GiB)": 72.85, "step": 69455, "token_acc": 0.5, "train_speed(iter/s)": 0.672754 }, { "epoch": 2.975879353926567, "grad_norm": 5.796933174133301, "learning_rate": 3.5307600279712364e-05, "loss": 2.0771690368652345, "memory(GiB)": 72.85, "step": 69460, "token_acc": 0.5508474576271186, "train_speed(iter/s)": 0.67276 }, { "epoch": 2.976093569255816, "grad_norm": 4.503629207611084, "learning_rate": 3.530116774469672e-05, "loss": 1.9615293502807618, "memory(GiB)": 72.85, "step": 69465, "token_acc": 0.5291970802919708, "train_speed(iter/s)": 0.672764 }, { "epoch": 2.9763077845850647, "grad_norm": 4.6487884521484375, "learning_rate": 3.52947354759642e-05, "loss": 2.52639217376709, "memory(GiB)": 72.85, "step": 69470, "token_acc": 0.5048231511254019, "train_speed(iter/s)": 0.672772 }, { "epoch": 2.976521999914314, "grad_norm": 5.415521621704102, "learning_rate": 3.528830347363136e-05, "loss": 2.1311008453369142, "memory(GiB)": 72.85, "step": 69475, "token_acc": 0.5326797385620915, "train_speed(iter/s)": 0.672773 }, { "epoch": 2.976736215243563, "grad_norm": 5.111537933349609, "learning_rate": 3.52818717378147e-05, "loss": 2.0915225982666015, "memory(GiB)": 72.85, "step": 69480, "token_acc": 0.5461847389558233, "train_speed(iter/s)": 0.672771 }, { "epoch": 2.9769504305728116, "grad_norm": 5.075310707092285, "learning_rate": 3.527544026863073e-05, "loss": 2.63409423828125, "memory(GiB)": 72.85, "step": 69485, "token_acc": 0.5118110236220472, "train_speed(iter/s)": 0.67278 }, { "epoch": 2.977164645902061, "grad_norm": 4.193329811096191, "learning_rate": 3.526900906619599e-05, "loss": 2.3440196990966795, "memory(GiB)": 72.85, "step": 69490, "token_acc": 0.49363057324840764, "train_speed(iter/s)": 0.672776 }, { "epoch": 2.9773788612313097, "grad_norm": 4.499414920806885, "learning_rate": 3.526257813062696e-05, "loss": 2.226639747619629, "memory(GiB)": 72.85, "step": 69495, "token_acc": 0.5017667844522968, "train_speed(iter/s)": 0.672779 }, { "epoch": 2.9775930765605585, "grad_norm": 3.7146987915039062, "learning_rate": 3.525614746204015e-05, "loss": 2.151082229614258, "memory(GiB)": 72.85, "step": 69500, "token_acc": 0.5326797385620915, "train_speed(iter/s)": 0.672777 }, { "epoch": 2.9775930765605585, "eval_loss": 2.109278440475464, "eval_runtime": 14.9352, "eval_samples_per_second": 6.696, "eval_steps_per_second": 6.696, "eval_token_acc": 0.4956268221574344, "step": 69500 }, { "epoch": 2.9778072918898078, "grad_norm": 7.192903995513916, "learning_rate": 3.524971706055206e-05, "loss": 2.364838981628418, "memory(GiB)": 72.85, "step": 69505, "token_acc": 0.49461206896551724, "train_speed(iter/s)": 0.67266 }, { "epoch": 2.9780215072190566, "grad_norm": 5.055431842803955, "learning_rate": 3.524328692627918e-05, "loss": 2.131207084655762, "memory(GiB)": 72.85, "step": 69510, "token_acc": 0.5035211267605634, "train_speed(iter/s)": 0.672647 }, { "epoch": 2.9782357225483054, "grad_norm": 3.6041057109832764, "learning_rate": 3.523685705933801e-05, "loss": 2.242165756225586, "memory(GiB)": 72.85, "step": 69515, "token_acc": 0.5360501567398119, "train_speed(iter/s)": 0.672648 }, { "epoch": 2.9784499378775546, "grad_norm": 4.319948673248291, "learning_rate": 3.523042745984502e-05, "loss": 2.3366336822509766, "memory(GiB)": 72.85, "step": 69520, "token_acc": 0.5060975609756098, "train_speed(iter/s)": 0.672661 }, { "epoch": 2.9786641532068034, "grad_norm": 8.968033790588379, "learning_rate": 3.522399812791668e-05, "loss": 2.253777503967285, "memory(GiB)": 72.85, "step": 69525, "token_acc": 0.5061224489795918, "train_speed(iter/s)": 0.672671 }, { "epoch": 2.9788783685360523, "grad_norm": 6.826650142669678, "learning_rate": 3.521756906366946e-05, "loss": 2.2199092864990235, "memory(GiB)": 72.85, "step": 69530, "token_acc": 0.5418181818181819, "train_speed(iter/s)": 0.67267 }, { "epoch": 2.9790925838653015, "grad_norm": 3.5300722122192383, "learning_rate": 3.5211140267219876e-05, "loss": 2.1365192413330076, "memory(GiB)": 72.85, "step": 69535, "token_acc": 0.48986486486486486, "train_speed(iter/s)": 0.672668 }, { "epoch": 2.9793067991945503, "grad_norm": 4.886340141296387, "learning_rate": 3.520471173868435e-05, "loss": 2.2132408142089846, "memory(GiB)": 72.85, "step": 69540, "token_acc": 0.5327868852459017, "train_speed(iter/s)": 0.672666 }, { "epoch": 2.979521014523799, "grad_norm": 5.2348313331604, "learning_rate": 3.519828347817935e-05, "loss": 2.3674230575561523, "memory(GiB)": 72.85, "step": 69545, "token_acc": 0.527027027027027, "train_speed(iter/s)": 0.672671 }, { "epoch": 2.9797352298530484, "grad_norm": 5.382399082183838, "learning_rate": 3.519185548582133e-05, "loss": 2.1959667205810547, "memory(GiB)": 72.85, "step": 69550, "token_acc": 0.5331010452961672, "train_speed(iter/s)": 0.672675 }, { "epoch": 2.979949445182297, "grad_norm": 5.687472343444824, "learning_rate": 3.518542776172674e-05, "loss": 2.1743053436279296, "memory(GiB)": 72.85, "step": 69555, "token_acc": 0.5314465408805031, "train_speed(iter/s)": 0.672677 }, { "epoch": 2.980163660511546, "grad_norm": 4.737326622009277, "learning_rate": 3.517900030601202e-05, "loss": 2.3793869018554688, "memory(GiB)": 72.85, "step": 69560, "token_acc": 0.5170278637770898, "train_speed(iter/s)": 0.672685 }, { "epoch": 2.9803778758407953, "grad_norm": 3.665203094482422, "learning_rate": 3.5172573118793615e-05, "loss": 2.147702217102051, "memory(GiB)": 72.85, "step": 69565, "token_acc": 0.5551724137931034, "train_speed(iter/s)": 0.672681 }, { "epoch": 2.980592091170044, "grad_norm": 4.745710372924805, "learning_rate": 3.516614620018796e-05, "loss": 2.2604238510131838, "memory(GiB)": 72.85, "step": 69570, "token_acc": 0.5015105740181269, "train_speed(iter/s)": 0.672685 }, { "epoch": 2.980806306499293, "grad_norm": 4.470069408416748, "learning_rate": 3.515971955031148e-05, "loss": 2.2196935653686523, "memory(GiB)": 72.85, "step": 69575, "token_acc": 0.52, "train_speed(iter/s)": 0.672688 }, { "epoch": 2.981020521828542, "grad_norm": 5.617458820343018, "learning_rate": 3.5153293169280594e-05, "loss": 2.270661163330078, "memory(GiB)": 72.85, "step": 69580, "token_acc": 0.48881789137380194, "train_speed(iter/s)": 0.6727 }, { "epoch": 2.981234737157791, "grad_norm": 5.596992492675781, "learning_rate": 3.514686705721174e-05, "loss": 2.287739372253418, "memory(GiB)": 72.85, "step": 69585, "token_acc": 0.5106382978723404, "train_speed(iter/s)": 0.672697 }, { "epoch": 2.98144895248704, "grad_norm": 3.860527515411377, "learning_rate": 3.514044121422132e-05, "loss": 2.1435209274291993, "memory(GiB)": 72.85, "step": 69590, "token_acc": 0.5342019543973942, "train_speed(iter/s)": 0.672693 }, { "epoch": 2.981663167816289, "grad_norm": 6.121658802032471, "learning_rate": 3.513401564042574e-05, "loss": 2.1497100830078124, "memory(GiB)": 72.85, "step": 69595, "token_acc": 0.5069637883008357, "train_speed(iter/s)": 0.672692 }, { "epoch": 2.981877383145538, "grad_norm": 5.244606018066406, "learning_rate": 3.5127590335941424e-05, "loss": 2.4782939910888673, "memory(GiB)": 72.85, "step": 69600, "token_acc": 0.45874587458745875, "train_speed(iter/s)": 0.672684 }, { "epoch": 2.9820915984747867, "grad_norm": 5.091803073883057, "learning_rate": 3.512116530088474e-05, "loss": 2.0182518005371093, "memory(GiB)": 72.85, "step": 69605, "token_acc": 0.5423076923076923, "train_speed(iter/s)": 0.672681 }, { "epoch": 2.982305813804036, "grad_norm": 5.453753471374512, "learning_rate": 3.5114740535372115e-05, "loss": 1.975509262084961, "memory(GiB)": 72.85, "step": 69610, "token_acc": 0.5642633228840125, "train_speed(iter/s)": 0.672688 }, { "epoch": 2.9825200291332847, "grad_norm": 4.4225754737854, "learning_rate": 3.510831603951993e-05, "loss": 2.1806398391723634, "memory(GiB)": 72.85, "step": 69615, "token_acc": 0.5141955835962145, "train_speed(iter/s)": 0.672686 }, { "epoch": 2.9827342444625335, "grad_norm": 4.560598373413086, "learning_rate": 3.510189181344457e-05, "loss": 2.5012933731079103, "memory(GiB)": 72.85, "step": 69620, "token_acc": 0.4765840220385675, "train_speed(iter/s)": 0.672694 }, { "epoch": 2.982948459791783, "grad_norm": 4.350207328796387, "learning_rate": 3.509546785726243e-05, "loss": 2.0036006927490235, "memory(GiB)": 72.85, "step": 69625, "token_acc": 0.5845588235294118, "train_speed(iter/s)": 0.6727 }, { "epoch": 2.9831626751210316, "grad_norm": 4.282087326049805, "learning_rate": 3.508904417108985e-05, "loss": 1.940073013305664, "memory(GiB)": 72.85, "step": 69630, "token_acc": 0.5434083601286174, "train_speed(iter/s)": 0.672698 }, { "epoch": 2.9833768904502804, "grad_norm": 4.259242057800293, "learning_rate": 3.508262075504323e-05, "loss": 2.234671974182129, "memory(GiB)": 72.85, "step": 69635, "token_acc": 0.51, "train_speed(iter/s)": 0.672704 }, { "epoch": 2.9835911057795297, "grad_norm": 3.8365848064422607, "learning_rate": 3.507619760923892e-05, "loss": 2.3447498321533202, "memory(GiB)": 72.85, "step": 69640, "token_acc": 0.4777777777777778, "train_speed(iter/s)": 0.672713 }, { "epoch": 2.9838053211087785, "grad_norm": 4.720909595489502, "learning_rate": 3.5069774733793305e-05, "loss": 2.3156364440917967, "memory(GiB)": 72.85, "step": 69645, "token_acc": 0.4934210526315789, "train_speed(iter/s)": 0.672724 }, { "epoch": 2.9840195364380273, "grad_norm": 5.883677005767822, "learning_rate": 3.506463662817325e-05, "loss": 2.4494497299194338, "memory(GiB)": 72.85, "step": 69650, "token_acc": 0.48846153846153845, "train_speed(iter/s)": 0.672725 }, { "epoch": 2.9842337517672766, "grad_norm": 4.387364387512207, "learning_rate": 3.505821423966646e-05, "loss": 2.0641136169433594, "memory(GiB)": 72.85, "step": 69655, "token_acc": 0.5635179153094463, "train_speed(iter/s)": 0.672718 }, { "epoch": 2.9844479670965254, "grad_norm": 6.118068218231201, "learning_rate": 3.5051792121844145e-05, "loss": 2.3498519897460937, "memory(GiB)": 72.85, "step": 69660, "token_acc": 0.47101449275362317, "train_speed(iter/s)": 0.672728 }, { "epoch": 2.984662182425774, "grad_norm": 5.375743389129639, "learning_rate": 3.504537027482263e-05, "loss": 2.200168991088867, "memory(GiB)": 72.85, "step": 69665, "token_acc": 0.5015673981191222, "train_speed(iter/s)": 0.672733 }, { "epoch": 2.9848763977550234, "grad_norm": 4.902660846710205, "learning_rate": 3.503894869871828e-05, "loss": 2.231443977355957, "memory(GiB)": 72.85, "step": 69670, "token_acc": 0.5377049180327869, "train_speed(iter/s)": 0.672739 }, { "epoch": 2.9850906130842723, "grad_norm": 5.328152179718018, "learning_rate": 3.5032527393647396e-05, "loss": 2.2889049530029295, "memory(GiB)": 72.85, "step": 69675, "token_acc": 0.4894366197183099, "train_speed(iter/s)": 0.672737 }, { "epoch": 2.985304828413521, "grad_norm": 5.0093207359313965, "learning_rate": 3.502610635972631e-05, "loss": 2.434408950805664, "memory(GiB)": 72.85, "step": 69680, "token_acc": 0.4858757062146893, "train_speed(iter/s)": 0.672744 }, { "epoch": 2.9855190437427703, "grad_norm": 5.781189441680908, "learning_rate": 3.5019685597071375e-05, "loss": 2.4425811767578125, "memory(GiB)": 72.85, "step": 69685, "token_acc": 0.4738675958188153, "train_speed(iter/s)": 0.672746 }, { "epoch": 2.985733259072019, "grad_norm": 5.771678447723389, "learning_rate": 3.501326510579888e-05, "loss": 2.0919952392578125, "memory(GiB)": 72.85, "step": 69690, "token_acc": 0.5239852398523985, "train_speed(iter/s)": 0.672756 }, { "epoch": 2.985947474401268, "grad_norm": 4.4679789543151855, "learning_rate": 3.500684488602513e-05, "loss": 2.5616382598876952, "memory(GiB)": 72.85, "step": 69695, "token_acc": 0.4983388704318937, "train_speed(iter/s)": 0.672755 }, { "epoch": 2.986161689730517, "grad_norm": 5.5052409172058105, "learning_rate": 3.5000424937866474e-05, "loss": 2.3106834411621096, "memory(GiB)": 72.85, "step": 69700, "token_acc": 0.4868421052631579, "train_speed(iter/s)": 0.672747 }, { "epoch": 2.986375905059766, "grad_norm": 4.8639912605285645, "learning_rate": 3.499400526143917e-05, "loss": 2.2911609649658202, "memory(GiB)": 72.85, "step": 69705, "token_acc": 0.5314465408805031, "train_speed(iter/s)": 0.672753 }, { "epoch": 2.986590120389015, "grad_norm": 5.057878017425537, "learning_rate": 3.498758585685955e-05, "loss": 2.2610774993896485, "memory(GiB)": 72.85, "step": 69710, "token_acc": 0.5211267605633803, "train_speed(iter/s)": 0.672759 }, { "epoch": 2.986804335718264, "grad_norm": 4.322267055511475, "learning_rate": 3.49811667242439e-05, "loss": 2.3891908645629885, "memory(GiB)": 72.85, "step": 69715, "token_acc": 0.5196078431372549, "train_speed(iter/s)": 0.672767 }, { "epoch": 2.987018551047513, "grad_norm": 5.162110328674316, "learning_rate": 3.4974747863708476e-05, "loss": 2.180319595336914, "memory(GiB)": 72.85, "step": 69720, "token_acc": 0.5053003533568905, "train_speed(iter/s)": 0.672771 }, { "epoch": 2.9872327663767617, "grad_norm": 5.26661491394043, "learning_rate": 3.49683292753696e-05, "loss": 2.060857582092285, "memory(GiB)": 72.85, "step": 69725, "token_acc": 0.5537459283387622, "train_speed(iter/s)": 0.672758 }, { "epoch": 2.987446981706011, "grad_norm": 5.147796630859375, "learning_rate": 3.4961910959343555e-05, "loss": 2.065703201293945, "memory(GiB)": 72.85, "step": 69730, "token_acc": 0.5209125475285171, "train_speed(iter/s)": 0.672768 }, { "epoch": 2.98766119703526, "grad_norm": 4.942749500274658, "learning_rate": 3.49554929157466e-05, "loss": 1.8551029205322265, "memory(GiB)": 72.85, "step": 69735, "token_acc": 0.5358361774744027, "train_speed(iter/s)": 0.672773 }, { "epoch": 2.9878754123645086, "grad_norm": 5.044088840484619, "learning_rate": 3.4949075144694985e-05, "loss": 2.4862958908081056, "memory(GiB)": 72.85, "step": 69740, "token_acc": 0.46956521739130436, "train_speed(iter/s)": 0.672768 }, { "epoch": 2.988089627693758, "grad_norm": 5.964745998382568, "learning_rate": 3.494265764630501e-05, "loss": 2.189412307739258, "memory(GiB)": 72.85, "step": 69745, "token_acc": 0.5348837209302325, "train_speed(iter/s)": 0.672765 }, { "epoch": 2.9883038430230067, "grad_norm": 4.76537561416626, "learning_rate": 3.4936240420692915e-05, "loss": 2.170039176940918, "memory(GiB)": 72.85, "step": 69750, "token_acc": 0.5232974910394266, "train_speed(iter/s)": 0.67277 }, { "epoch": 2.9885180583522555, "grad_norm": 4.4625983238220215, "learning_rate": 3.492982346797495e-05, "loss": 2.387835884094238, "memory(GiB)": 72.85, "step": 69755, "token_acc": 0.48672566371681414, "train_speed(iter/s)": 0.672767 }, { "epoch": 2.9887322736815047, "grad_norm": 4.78431510925293, "learning_rate": 3.4923406788267375e-05, "loss": 2.4170312881469727, "memory(GiB)": 72.85, "step": 69760, "token_acc": 0.49032258064516127, "train_speed(iter/s)": 0.672769 }, { "epoch": 2.9889464890107535, "grad_norm": 5.752712726593018, "learning_rate": 3.491699038168642e-05, "loss": 2.1237905502319334, "memory(GiB)": 72.85, "step": 69765, "token_acc": 0.5052631578947369, "train_speed(iter/s)": 0.672759 }, { "epoch": 2.9891607043400024, "grad_norm": 4.7122931480407715, "learning_rate": 3.4910574248348345e-05, "loss": 2.202231216430664, "memory(GiB)": 72.85, "step": 69770, "token_acc": 0.5068027210884354, "train_speed(iter/s)": 0.672768 }, { "epoch": 2.9893749196692516, "grad_norm": 5.700575351715088, "learning_rate": 3.4904158388369365e-05, "loss": 2.259379577636719, "memory(GiB)": 72.85, "step": 69775, "token_acc": 0.5017921146953405, "train_speed(iter/s)": 0.672766 }, { "epoch": 2.9895891349985004, "grad_norm": 4.8908514976501465, "learning_rate": 3.489774280186572e-05, "loss": 2.4322076797485352, "memory(GiB)": 72.85, "step": 69780, "token_acc": 0.47468354430379744, "train_speed(iter/s)": 0.672768 }, { "epoch": 2.9898033503277492, "grad_norm": 5.748246669769287, "learning_rate": 3.489132748895363e-05, "loss": 2.479381561279297, "memory(GiB)": 72.85, "step": 69785, "token_acc": 0.46332046332046334, "train_speed(iter/s)": 0.672775 }, { "epoch": 2.9900175656569985, "grad_norm": 5.500277519226074, "learning_rate": 3.4884912449749316e-05, "loss": 2.2559093475341796, "memory(GiB)": 72.85, "step": 69790, "token_acc": 0.46494464944649444, "train_speed(iter/s)": 0.672788 }, { "epoch": 2.9902317809862473, "grad_norm": 4.577823162078857, "learning_rate": 3.4878497684368984e-05, "loss": 2.247138023376465, "memory(GiB)": 72.85, "step": 69795, "token_acc": 0.5390334572490706, "train_speed(iter/s)": 0.672787 }, { "epoch": 2.990445996315496, "grad_norm": 5.872641086578369, "learning_rate": 3.487208319292885e-05, "loss": 2.2606485366821287, "memory(GiB)": 72.85, "step": 69800, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.67278 }, { "epoch": 2.9906602116447454, "grad_norm": 3.700470447540283, "learning_rate": 3.486566897554513e-05, "loss": 2.280674362182617, "memory(GiB)": 72.85, "step": 69805, "token_acc": 0.48024316109422494, "train_speed(iter/s)": 0.67279 }, { "epoch": 2.990874426973994, "grad_norm": 4.352335453033447, "learning_rate": 3.485925503233401e-05, "loss": 2.360974884033203, "memory(GiB)": 72.85, "step": 69810, "token_acc": 0.5223880597014925, "train_speed(iter/s)": 0.672791 }, { "epoch": 2.991088642303243, "grad_norm": 4.300286769866943, "learning_rate": 3.485284136341169e-05, "loss": 2.078145980834961, "memory(GiB)": 72.85, "step": 69815, "token_acc": 0.5431654676258992, "train_speed(iter/s)": 0.672795 }, { "epoch": 2.9913028576324923, "grad_norm": 5.407598972320557, "learning_rate": 3.484642796889437e-05, "loss": 2.3371986389160155, "memory(GiB)": 72.85, "step": 69820, "token_acc": 0.5105740181268882, "train_speed(iter/s)": 0.672786 }, { "epoch": 2.991517072961741, "grad_norm": 3.997485399246216, "learning_rate": 3.4840014848898205e-05, "loss": 2.2605642318725585, "memory(GiB)": 72.85, "step": 69825, "token_acc": 0.5127388535031847, "train_speed(iter/s)": 0.672786 }, { "epoch": 2.99173128829099, "grad_norm": 3.3542046546936035, "learning_rate": 3.48336020035394e-05, "loss": 2.2214258193969725, "memory(GiB)": 72.85, "step": 69830, "token_acc": 0.4816053511705686, "train_speed(iter/s)": 0.672787 }, { "epoch": 2.991945503620239, "grad_norm": 5.718159198760986, "learning_rate": 3.4827189432934126e-05, "loss": 2.2193475723266602, "memory(GiB)": 72.85, "step": 69835, "token_acc": 0.4972375690607735, "train_speed(iter/s)": 0.672779 }, { "epoch": 2.992159718949488, "grad_norm": 4.598884582519531, "learning_rate": 3.4820777137198535e-05, "loss": 1.9169551849365234, "memory(GiB)": 72.85, "step": 69840, "token_acc": 0.5782312925170068, "train_speed(iter/s)": 0.672779 }, { "epoch": 2.9923739342787368, "grad_norm": 4.655243396759033, "learning_rate": 3.481436511644882e-05, "loss": 2.1407903671264648, "memory(GiB)": 72.85, "step": 69845, "token_acc": 0.5298245614035088, "train_speed(iter/s)": 0.672787 }, { "epoch": 2.992588149607986, "grad_norm": 5.3355183601379395, "learning_rate": 3.4807953370801115e-05, "loss": 2.385106658935547, "memory(GiB)": 72.85, "step": 69850, "token_acc": 0.4682274247491639, "train_speed(iter/s)": 0.672785 }, { "epoch": 2.992802364937235, "grad_norm": 4.3539252281188965, "learning_rate": 3.4801541900371595e-05, "loss": 1.8319963455200194, "memory(GiB)": 72.85, "step": 69855, "token_acc": 0.5807560137457045, "train_speed(iter/s)": 0.672787 }, { "epoch": 2.9930165802664837, "grad_norm": 4.5532307624816895, "learning_rate": 3.4795130705276404e-05, "loss": 2.2624040603637696, "memory(GiB)": 72.85, "step": 69860, "token_acc": 0.48704663212435234, "train_speed(iter/s)": 0.672787 }, { "epoch": 2.993230795595733, "grad_norm": 5.038718223571777, "learning_rate": 3.478871978563167e-05, "loss": 2.207240867614746, "memory(GiB)": 72.85, "step": 69865, "token_acc": 0.47, "train_speed(iter/s)": 0.672774 }, { "epoch": 2.9934450109249817, "grad_norm": 5.130584239959717, "learning_rate": 3.4782309141553536e-05, "loss": 1.9559370040893556, "memory(GiB)": 72.85, "step": 69870, "token_acc": 0.5461847389558233, "train_speed(iter/s)": 0.67276 }, { "epoch": 2.9936592262542305, "grad_norm": 5.516624927520752, "learning_rate": 3.4775898773158164e-05, "loss": 2.366144561767578, "memory(GiB)": 72.85, "step": 69875, "token_acc": 0.5277777777777778, "train_speed(iter/s)": 0.672765 }, { "epoch": 2.99387344158348, "grad_norm": 6.124396324157715, "learning_rate": 3.476948868056167e-05, "loss": 2.378561592102051, "memory(GiB)": 72.85, "step": 69880, "token_acc": 0.4935897435897436, "train_speed(iter/s)": 0.672751 }, { "epoch": 2.9940876569127286, "grad_norm": 4.534250736236572, "learning_rate": 3.4763078863880174e-05, "loss": 2.1635648727416994, "memory(GiB)": 72.85, "step": 69885, "token_acc": 0.5245283018867924, "train_speed(iter/s)": 0.672748 }, { "epoch": 2.9943018722419774, "grad_norm": 5.2361860275268555, "learning_rate": 3.475666932322979e-05, "loss": 2.348451042175293, "memory(GiB)": 72.85, "step": 69890, "token_acc": 0.5233644859813084, "train_speed(iter/s)": 0.67275 }, { "epoch": 2.9945160875712267, "grad_norm": 6.458314418792725, "learning_rate": 3.475026005872666e-05, "loss": 2.210146903991699, "memory(GiB)": 72.85, "step": 69895, "token_acc": 0.5112781954887218, "train_speed(iter/s)": 0.672751 }, { "epoch": 2.9947303029004755, "grad_norm": 5.716375350952148, "learning_rate": 3.4743851070486853e-05, "loss": 2.3964508056640623, "memory(GiB)": 72.85, "step": 69900, "token_acc": 0.49382716049382713, "train_speed(iter/s)": 0.672753 }, { "epoch": 2.9949445182297243, "grad_norm": 5.623406887054443, "learning_rate": 3.473744235862652e-05, "loss": 2.167104148864746, "memory(GiB)": 72.85, "step": 69905, "token_acc": 0.5233644859813084, "train_speed(iter/s)": 0.672756 }, { "epoch": 2.9951587335589736, "grad_norm": 4.378422737121582, "learning_rate": 3.4731033923261705e-05, "loss": 2.582498550415039, "memory(GiB)": 72.85, "step": 69910, "token_acc": 0.5016077170418006, "train_speed(iter/s)": 0.672751 }, { "epoch": 2.9953729488882224, "grad_norm": 3.854370594024658, "learning_rate": 3.472462576450856e-05, "loss": 2.0525970458984375, "memory(GiB)": 72.85, "step": 69915, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.672753 }, { "epoch": 2.995587164217471, "grad_norm": 3.9715778827667236, "learning_rate": 3.471821788248314e-05, "loss": 2.204121398925781, "memory(GiB)": 72.85, "step": 69920, "token_acc": 0.4913494809688581, "train_speed(iter/s)": 0.672759 }, { "epoch": 2.9958013795467204, "grad_norm": 7.104905128479004, "learning_rate": 3.471181027730153e-05, "loss": 2.2607791900634764, "memory(GiB)": 72.85, "step": 69925, "token_acc": 0.521875, "train_speed(iter/s)": 0.672753 }, { "epoch": 2.9960155948759692, "grad_norm": 4.459795951843262, "learning_rate": 3.4705402949079825e-05, "loss": 2.3063899993896486, "memory(GiB)": 72.85, "step": 69930, "token_acc": 0.5149253731343284, "train_speed(iter/s)": 0.672757 }, { "epoch": 2.996229810205218, "grad_norm": 4.760015964508057, "learning_rate": 3.46989958979341e-05, "loss": 2.295215606689453, "memory(GiB)": 72.85, "step": 69935, "token_acc": 0.5083798882681564, "train_speed(iter/s)": 0.672749 }, { "epoch": 2.9964440255344673, "grad_norm": 3.8972601890563965, "learning_rate": 3.46925891239804e-05, "loss": 2.1774948120117186, "memory(GiB)": 72.85, "step": 69940, "token_acc": 0.49050632911392406, "train_speed(iter/s)": 0.672733 }, { "epoch": 2.996658240863716, "grad_norm": 5.605466842651367, "learning_rate": 3.4686182627334804e-05, "loss": 2.457560348510742, "memory(GiB)": 72.85, "step": 69945, "token_acc": 0.4645669291338583, "train_speed(iter/s)": 0.672732 }, { "epoch": 2.996872456192965, "grad_norm": 4.735770225524902, "learning_rate": 3.46797764081134e-05, "loss": 2.2614376068115236, "memory(GiB)": 72.85, "step": 69950, "token_acc": 0.5163636363636364, "train_speed(iter/s)": 0.672731 }, { "epoch": 2.997086671522214, "grad_norm": 5.473379135131836, "learning_rate": 3.46733704664322e-05, "loss": 2.3058197021484377, "memory(GiB)": 72.85, "step": 69955, "token_acc": 0.4895833333333333, "train_speed(iter/s)": 0.672739 }, { "epoch": 2.997300886851463, "grad_norm": 5.243511199951172, "learning_rate": 3.4666964802407274e-05, "loss": 2.0825759887695314, "memory(GiB)": 72.85, "step": 69960, "token_acc": 0.5382059800664452, "train_speed(iter/s)": 0.672739 }, { "epoch": 2.997515102180712, "grad_norm": 4.6015520095825195, "learning_rate": 3.466055941615468e-05, "loss": 2.0881738662719727, "memory(GiB)": 72.85, "step": 69965, "token_acc": 0.5469798657718121, "train_speed(iter/s)": 0.672735 }, { "epoch": 2.997729317509961, "grad_norm": 4.894510269165039, "learning_rate": 3.465415430779042e-05, "loss": 2.383139991760254, "memory(GiB)": 72.85, "step": 69970, "token_acc": 0.44664031620553357, "train_speed(iter/s)": 0.672728 }, { "epoch": 2.99794353283921, "grad_norm": 6.009816646575928, "learning_rate": 3.4647749477430566e-05, "loss": 2.3473526000976563, "memory(GiB)": 72.85, "step": 69975, "token_acc": 0.5070422535211268, "train_speed(iter/s)": 0.672732 }, { "epoch": 2.9981577481684587, "grad_norm": 4.48258638381958, "learning_rate": 3.4641344925191134e-05, "loss": 2.285669708251953, "memory(GiB)": 72.85, "step": 69980, "token_acc": 0.5018181818181818, "train_speed(iter/s)": 0.672737 }, { "epoch": 2.998371963497708, "grad_norm": 4.645113945007324, "learning_rate": 3.463494065118813e-05, "loss": 2.1252588272094726, "memory(GiB)": 72.85, "step": 69985, "token_acc": 0.5255972696245734, "train_speed(iter/s)": 0.67273 }, { "epoch": 2.9985861788269568, "grad_norm": 6.3872785568237305, "learning_rate": 3.46285366555376e-05, "loss": 2.0771703720092773, "memory(GiB)": 72.85, "step": 69990, "token_acc": 0.5470383275261324, "train_speed(iter/s)": 0.672726 }, { "epoch": 2.9988003941562056, "grad_norm": 4.474422454833984, "learning_rate": 3.462213293835554e-05, "loss": 2.2965030670166016, "memory(GiB)": 72.85, "step": 69995, "token_acc": 0.5098814229249012, "train_speed(iter/s)": 0.672724 }, { "epoch": 2.999014609485455, "grad_norm": 4.633749485015869, "learning_rate": 3.4615729499757984e-05, "loss": 2.128719520568848, "memory(GiB)": 72.85, "step": 70000, "token_acc": 0.5427631578947368, "train_speed(iter/s)": 0.672732 }, { "epoch": 2.999014609485455, "eval_loss": 1.9727303981781006, "eval_runtime": 15.9026, "eval_samples_per_second": 6.288, "eval_steps_per_second": 6.288, "eval_token_acc": 0.49933774834437084, "step": 70000 }, { "epoch": 2.9992288248147037, "grad_norm": 4.5030083656311035, "learning_rate": 3.4609326339860917e-05, "loss": 2.3415145874023438, "memory(GiB)": 72.85, "step": 70005, "token_acc": 0.5004721435316336, "train_speed(iter/s)": 0.672621 }, { "epoch": 2.9994430401439525, "grad_norm": 5.572040557861328, "learning_rate": 3.460292345878032e-05, "loss": 2.1949846267700197, "memory(GiB)": 72.85, "step": 70010, "token_acc": 0.5498154981549815, "train_speed(iter/s)": 0.672623 }, { "epoch": 2.9996572554732017, "grad_norm": 4.247422218322754, "learning_rate": 3.459652085663223e-05, "loss": 2.248403549194336, "memory(GiB)": 72.85, "step": 70015, "token_acc": 0.4863013698630137, "train_speed(iter/s)": 0.672629 }, { "epoch": 2.9998714708024505, "grad_norm": 3.4202041625976562, "learning_rate": 3.459011853353259e-05, "loss": 2.1879289627075194, "memory(GiB)": 72.85, "step": 70020, "token_acc": 0.5401234567901234, "train_speed(iter/s)": 0.672637 }, { "epoch": 3.0000856861317, "grad_norm": 4.20212984085083, "learning_rate": 3.458371648959743e-05, "loss": 2.1143186569213865, "memory(GiB)": 72.85, "step": 70025, "token_acc": 0.5773195876288659, "train_speed(iter/s)": 0.672643 }, { "epoch": 3.0002999014609486, "grad_norm": 5.238027572631836, "learning_rate": 3.45773147249427e-05, "loss": 2.0768823623657227, "memory(GiB)": 72.85, "step": 70030, "token_acc": 0.5625, "train_speed(iter/s)": 0.67265 }, { "epoch": 3.0005141167901974, "grad_norm": 3.877317190170288, "learning_rate": 3.457091323968439e-05, "loss": 2.09281005859375, "memory(GiB)": 72.85, "step": 70035, "token_acc": 0.5401459854014599, "train_speed(iter/s)": 0.672655 }, { "epoch": 3.0007283321194467, "grad_norm": 5.599862098693848, "learning_rate": 3.4564512033938446e-05, "loss": 2.068215560913086, "memory(GiB)": 72.85, "step": 70040, "token_acc": 0.5484949832775919, "train_speed(iter/s)": 0.672663 }, { "epoch": 3.0009425474486955, "grad_norm": 4.061886310577393, "learning_rate": 3.455811110782086e-05, "loss": 1.6782283782958984, "memory(GiB)": 72.85, "step": 70045, "token_acc": 0.5811320754716981, "train_speed(iter/s)": 0.672665 }, { "epoch": 3.0011567627779443, "grad_norm": 4.346339702606201, "learning_rate": 3.4551710461447575e-05, "loss": 2.128091049194336, "memory(GiB)": 72.85, "step": 70050, "token_acc": 0.5057803468208093, "train_speed(iter/s)": 0.672666 }, { "epoch": 3.0013709781071936, "grad_norm": 3.4996869564056396, "learning_rate": 3.4545310094934535e-05, "loss": 2.1911460876464846, "memory(GiB)": 72.85, "step": 70055, "token_acc": 0.516209476309227, "train_speed(iter/s)": 0.672647 }, { "epoch": 3.0015851934364424, "grad_norm": 6.125337600708008, "learning_rate": 3.453891000839771e-05, "loss": 2.0336654663085936, "memory(GiB)": 72.85, "step": 70060, "token_acc": 0.5357142857142857, "train_speed(iter/s)": 0.672648 }, { "epoch": 3.001799408765691, "grad_norm": 4.296307563781738, "learning_rate": 3.453251020195304e-05, "loss": 1.888907241821289, "memory(GiB)": 72.85, "step": 70065, "token_acc": 0.5718849840255591, "train_speed(iter/s)": 0.672653 }, { "epoch": 3.0020136240949404, "grad_norm": 4.673353672027588, "learning_rate": 3.4526110675716436e-05, "loss": 1.9689149856567383, "memory(GiB)": 72.85, "step": 70070, "token_acc": 0.5493421052631579, "train_speed(iter/s)": 0.672649 }, { "epoch": 3.0022278394241892, "grad_norm": 4.462831497192383, "learning_rate": 3.451971142980388e-05, "loss": 2.1320823669433593, "memory(GiB)": 72.85, "step": 70075, "token_acc": 0.511326860841424, "train_speed(iter/s)": 0.672648 }, { "epoch": 3.002442054753438, "grad_norm": 5.78044319152832, "learning_rate": 3.451331246433126e-05, "loss": 1.8985706329345704, "memory(GiB)": 72.85, "step": 70080, "token_acc": 0.547945205479452, "train_speed(iter/s)": 0.672641 }, { "epoch": 3.0026562700826873, "grad_norm": 4.578415870666504, "learning_rate": 3.4506913779414506e-05, "loss": 1.8695034027099608, "memory(GiB)": 72.85, "step": 70085, "token_acc": 0.5964285714285714, "train_speed(iter/s)": 0.67264 }, { "epoch": 3.002870485411936, "grad_norm": 4.067234039306641, "learning_rate": 3.450051537516953e-05, "loss": 2.1835622787475586, "memory(GiB)": 72.85, "step": 70090, "token_acc": 0.5371621621621622, "train_speed(iter/s)": 0.672634 }, { "epoch": 3.003084700741185, "grad_norm": 4.127302169799805, "learning_rate": 3.4494117251712285e-05, "loss": 2.1893810272216796, "memory(GiB)": 72.85, "step": 70095, "token_acc": 0.4894894894894895, "train_speed(iter/s)": 0.672627 }, { "epoch": 3.003298916070434, "grad_norm": 4.413608551025391, "learning_rate": 3.448771940915864e-05, "loss": 2.0301528930664063, "memory(GiB)": 72.85, "step": 70100, "token_acc": 0.5639344262295082, "train_speed(iter/s)": 0.672632 }, { "epoch": 3.003513131399683, "grad_norm": 4.786779403686523, "learning_rate": 3.448132184762451e-05, "loss": 1.904124641418457, "memory(GiB)": 72.85, "step": 70105, "token_acc": 0.5353535353535354, "train_speed(iter/s)": 0.672626 }, { "epoch": 3.003727346728932, "grad_norm": 5.7275261878967285, "learning_rate": 3.4474924567225794e-05, "loss": 2.4026599884033204, "memory(GiB)": 72.85, "step": 70110, "token_acc": 0.4934210526315789, "train_speed(iter/s)": 0.672632 }, { "epoch": 3.003941562058181, "grad_norm": 5.563304424285889, "learning_rate": 3.446852756807838e-05, "loss": 2.012409782409668, "memory(GiB)": 72.85, "step": 70115, "token_acc": 0.5669781931464174, "train_speed(iter/s)": 0.672619 }, { "epoch": 3.00415577738743, "grad_norm": 3.7245702743530273, "learning_rate": 3.446213085029817e-05, "loss": 2.138851547241211, "memory(GiB)": 72.85, "step": 70120, "token_acc": 0.5035714285714286, "train_speed(iter/s)": 0.672618 }, { "epoch": 3.0043699927166787, "grad_norm": 5.788553237915039, "learning_rate": 3.445573441400103e-05, "loss": 1.886031150817871, "memory(GiB)": 72.85, "step": 70125, "token_acc": 0.5787545787545788, "train_speed(iter/s)": 0.672624 }, { "epoch": 3.004584208045928, "grad_norm": 6.339607238769531, "learning_rate": 3.444933825930284e-05, "loss": 1.8413700103759765, "memory(GiB)": 72.85, "step": 70130, "token_acc": 0.56875, "train_speed(iter/s)": 0.672625 }, { "epoch": 3.0047984233751768, "grad_norm": 5.444699287414551, "learning_rate": 3.444294238631948e-05, "loss": 2.0527423858642577, "memory(GiB)": 72.85, "step": 70135, "token_acc": 0.5432525951557093, "train_speed(iter/s)": 0.672637 }, { "epoch": 3.0050126387044256, "grad_norm": 4.453230857849121, "learning_rate": 3.44365467951668e-05, "loss": 2.154783821105957, "memory(GiB)": 72.85, "step": 70140, "token_acc": 0.543918918918919, "train_speed(iter/s)": 0.672639 }, { "epoch": 3.005226854033675, "grad_norm": 5.489436626434326, "learning_rate": 3.4430151485960685e-05, "loss": 2.2743148803710938, "memory(GiB)": 72.85, "step": 70145, "token_acc": 0.5189504373177842, "train_speed(iter/s)": 0.672631 }, { "epoch": 3.0054410693629237, "grad_norm": 9.17690658569336, "learning_rate": 3.4423756458816985e-05, "loss": 2.5169279098510744, "memory(GiB)": 72.85, "step": 70150, "token_acc": 0.5064516129032258, "train_speed(iter/s)": 0.672637 }, { "epoch": 3.0056552846921725, "grad_norm": 7.205576419830322, "learning_rate": 3.441736171385154e-05, "loss": 2.1029075622558593, "memory(GiB)": 72.85, "step": 70155, "token_acc": 0.5108695652173914, "train_speed(iter/s)": 0.672628 }, { "epoch": 3.0058695000214217, "grad_norm": 6.257798671722412, "learning_rate": 3.441096725118021e-05, "loss": 2.0954872131347657, "memory(GiB)": 72.85, "step": 70160, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.672628 }, { "epoch": 3.0060837153506705, "grad_norm": 4.868657112121582, "learning_rate": 3.440457307091881e-05, "loss": 1.7928630828857421, "memory(GiB)": 72.85, "step": 70165, "token_acc": 0.6042402826855123, "train_speed(iter/s)": 0.672635 }, { "epoch": 3.0062979306799194, "grad_norm": 4.445572853088379, "learning_rate": 3.4398179173183226e-05, "loss": 1.8856338500976562, "memory(GiB)": 72.85, "step": 70170, "token_acc": 0.5676567656765676, "train_speed(iter/s)": 0.672643 }, { "epoch": 3.0065121460091686, "grad_norm": 5.174871444702148, "learning_rate": 3.439178555808925e-05, "loss": 2.0025588989257814, "memory(GiB)": 72.85, "step": 70175, "token_acc": 0.5746031746031746, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.0067263613384174, "grad_norm": 6.606664180755615, "learning_rate": 3.438539222575273e-05, "loss": 2.1981880187988283, "memory(GiB)": 72.85, "step": 70180, "token_acc": 0.5361842105263158, "train_speed(iter/s)": 0.672652 }, { "epoch": 3.0069405766676662, "grad_norm": 6.075148105621338, "learning_rate": 3.437899917628946e-05, "loss": 2.211027717590332, "memory(GiB)": 72.85, "step": 70185, "token_acc": 0.5284280936454849, "train_speed(iter/s)": 0.67265 }, { "epoch": 3.0071547919969155, "grad_norm": 8.570656776428223, "learning_rate": 3.4372606409815296e-05, "loss": 2.0497737884521485, "memory(GiB)": 72.85, "step": 70190, "token_acc": 0.5477031802120141, "train_speed(iter/s)": 0.672656 }, { "epoch": 3.0073690073261643, "grad_norm": 5.781487464904785, "learning_rate": 3.436621392644602e-05, "loss": 2.257952117919922, "memory(GiB)": 72.85, "step": 70195, "token_acc": 0.5327380952380952, "train_speed(iter/s)": 0.67266 }, { "epoch": 3.007583222655413, "grad_norm": 5.730810165405273, "learning_rate": 3.4359821726297433e-05, "loss": 2.2590030670166015, "memory(GiB)": 72.85, "step": 70200, "token_acc": 0.4868804664723032, "train_speed(iter/s)": 0.672668 }, { "epoch": 3.0077974379846624, "grad_norm": 6.761592388153076, "learning_rate": 3.435342980948536e-05, "loss": 1.981135940551758, "memory(GiB)": 72.85, "step": 70205, "token_acc": 0.5607142857142857, "train_speed(iter/s)": 0.672665 }, { "epoch": 3.008011653313911, "grad_norm": 5.675887584686279, "learning_rate": 3.434703817612558e-05, "loss": 2.182461166381836, "memory(GiB)": 72.85, "step": 70210, "token_acc": 0.4984984984984985, "train_speed(iter/s)": 0.672668 }, { "epoch": 3.00822586864316, "grad_norm": 5.365435600280762, "learning_rate": 3.4340646826333875e-05, "loss": 2.2928308486938476, "memory(GiB)": 72.85, "step": 70215, "token_acc": 0.5517241379310345, "train_speed(iter/s)": 0.672673 }, { "epoch": 3.0084400839724093, "grad_norm": 4.5745463371276855, "learning_rate": 3.433425576022606e-05, "loss": 2.380488395690918, "memory(GiB)": 72.85, "step": 70220, "token_acc": 0.4909090909090909, "train_speed(iter/s)": 0.672667 }, { "epoch": 3.008654299301658, "grad_norm": 5.525644302368164, "learning_rate": 3.432786497791789e-05, "loss": 1.8797176361083985, "memory(GiB)": 72.85, "step": 70225, "token_acc": 0.5335463258785943, "train_speed(iter/s)": 0.672662 }, { "epoch": 3.008868514630907, "grad_norm": 5.197679042816162, "learning_rate": 3.4321474479525154e-05, "loss": 1.932767677307129, "memory(GiB)": 72.85, "step": 70230, "token_acc": 0.5536912751677853, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.009082729960156, "grad_norm": 5.036160945892334, "learning_rate": 3.431508426516361e-05, "loss": 2.4263322830200194, "memory(GiB)": 72.85, "step": 70235, "token_acc": 0.5139318885448917, "train_speed(iter/s)": 0.672666 }, { "epoch": 3.009296945289405, "grad_norm": 4.675755023956299, "learning_rate": 3.430869433494901e-05, "loss": 2.0756610870361327, "memory(GiB)": 72.85, "step": 70240, "token_acc": 0.542319749216301, "train_speed(iter/s)": 0.672675 }, { "epoch": 3.0095111606186538, "grad_norm": 5.680154800415039, "learning_rate": 3.430230468899715e-05, "loss": 2.1830215454101562, "memory(GiB)": 72.85, "step": 70245, "token_acc": 0.5255972696245734, "train_speed(iter/s)": 0.672689 }, { "epoch": 3.009725375947903, "grad_norm": 6.202547550201416, "learning_rate": 3.429591532742377e-05, "loss": 2.004569244384766, "memory(GiB)": 72.85, "step": 70250, "token_acc": 0.56656346749226, "train_speed(iter/s)": 0.672688 }, { "epoch": 3.009939591277152, "grad_norm": 5.327023029327393, "learning_rate": 3.428952625034461e-05, "loss": 2.40557804107666, "memory(GiB)": 72.85, "step": 70255, "token_acc": 0.48493975903614456, "train_speed(iter/s)": 0.672698 }, { "epoch": 3.0101538066064006, "grad_norm": 5.047275543212891, "learning_rate": 3.4283137457875415e-05, "loss": 2.149113082885742, "memory(GiB)": 72.85, "step": 70260, "token_acc": 0.5447761194029851, "train_speed(iter/s)": 0.672693 }, { "epoch": 3.01036802193565, "grad_norm": 4.7111735343933105, "learning_rate": 3.427674895013194e-05, "loss": 2.232690620422363, "memory(GiB)": 72.85, "step": 70265, "token_acc": 0.5103448275862069, "train_speed(iter/s)": 0.672693 }, { "epoch": 3.0105822372648987, "grad_norm": 4.533078193664551, "learning_rate": 3.427036072722991e-05, "loss": 2.078515815734863, "memory(GiB)": 72.85, "step": 70270, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.672694 }, { "epoch": 3.0107964525941475, "grad_norm": 4.438307285308838, "learning_rate": 3.426397278928504e-05, "loss": 2.1514123916625976, "memory(GiB)": 72.85, "step": 70275, "token_acc": 0.4984984984984985, "train_speed(iter/s)": 0.672698 }, { "epoch": 3.0110106679233968, "grad_norm": 6.334342956542969, "learning_rate": 3.4257585136413065e-05, "loss": 2.166422653198242, "memory(GiB)": 72.85, "step": 70280, "token_acc": 0.5588235294117647, "train_speed(iter/s)": 0.672703 }, { "epoch": 3.0112248832526456, "grad_norm": 8.940037727355957, "learning_rate": 3.42511977687297e-05, "loss": 2.1608076095581055, "memory(GiB)": 72.85, "step": 70285, "token_acc": 0.5054545454545455, "train_speed(iter/s)": 0.6727 }, { "epoch": 3.0114390985818944, "grad_norm": 9.778741836547852, "learning_rate": 3.424481068635067e-05, "loss": 2.0295150756835936, "memory(GiB)": 72.85, "step": 70290, "token_acc": 0.5676691729323309, "train_speed(iter/s)": 0.672706 }, { "epoch": 3.0116533139111437, "grad_norm": 5.106609344482422, "learning_rate": 3.423842388939167e-05, "loss": 2.026382637023926, "memory(GiB)": 72.85, "step": 70295, "token_acc": 0.5612244897959183, "train_speed(iter/s)": 0.672694 }, { "epoch": 3.0118675292403925, "grad_norm": 8.25857925415039, "learning_rate": 3.423203737796841e-05, "loss": 2.167383575439453, "memory(GiB)": 72.85, "step": 70300, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.672695 }, { "epoch": 3.0120817445696413, "grad_norm": 5.042719841003418, "learning_rate": 3.422565115219658e-05, "loss": 2.3242975234985352, "memory(GiB)": 72.85, "step": 70305, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.672679 }, { "epoch": 3.0122959598988905, "grad_norm": 6.111715316772461, "learning_rate": 3.421926521219189e-05, "loss": 2.0340953826904298, "memory(GiB)": 72.85, "step": 70310, "token_acc": 0.5774647887323944, "train_speed(iter/s)": 0.672679 }, { "epoch": 3.0125101752281394, "grad_norm": 5.629879951477051, "learning_rate": 3.4212879558069976e-05, "loss": 2.0812862396240233, "memory(GiB)": 72.85, "step": 70315, "token_acc": 0.5243445692883895, "train_speed(iter/s)": 0.672697 }, { "epoch": 3.012724390557388, "grad_norm": 6.099002838134766, "learning_rate": 3.420649418994658e-05, "loss": 1.8409105300903321, "memory(GiB)": 72.85, "step": 70320, "token_acc": 0.5466237942122186, "train_speed(iter/s)": 0.672703 }, { "epoch": 3.0129386058866374, "grad_norm": 7.6256585121154785, "learning_rate": 3.420010910793737e-05, "loss": 1.756161880493164, "memory(GiB)": 72.85, "step": 70325, "token_acc": 0.6115384615384616, "train_speed(iter/s)": 0.672688 }, { "epoch": 3.0131528212158862, "grad_norm": 4.679280757904053, "learning_rate": 3.419372431215799e-05, "loss": 1.9372833251953125, "memory(GiB)": 72.85, "step": 70330, "token_acc": 0.5586206896551724, "train_speed(iter/s)": 0.672696 }, { "epoch": 3.013367036545135, "grad_norm": 5.774650573730469, "learning_rate": 3.4187339802724136e-05, "loss": 2.107070541381836, "memory(GiB)": 72.85, "step": 70335, "token_acc": 0.5, "train_speed(iter/s)": 0.672691 }, { "epoch": 3.0135812518743843, "grad_norm": 5.764052867889404, "learning_rate": 3.418095557975145e-05, "loss": 2.0604419708251953, "memory(GiB)": 72.85, "step": 70340, "token_acc": 0.5396825396825397, "train_speed(iter/s)": 0.672697 }, { "epoch": 3.013795467203633, "grad_norm": 5.8570876121521, "learning_rate": 3.417457164335558e-05, "loss": 2.099635696411133, "memory(GiB)": 72.85, "step": 70345, "token_acc": 0.5451388888888888, "train_speed(iter/s)": 0.672688 }, { "epoch": 3.014009682532882, "grad_norm": 5.654277801513672, "learning_rate": 3.416818799365221e-05, "loss": 2.5152666091918947, "memory(GiB)": 72.85, "step": 70350, "token_acc": 0.46855345911949686, "train_speed(iter/s)": 0.672677 }, { "epoch": 3.014223897862131, "grad_norm": 5.175768852233887, "learning_rate": 3.416180463075696e-05, "loss": 2.113654136657715, "memory(GiB)": 72.85, "step": 70355, "token_acc": 0.51, "train_speed(iter/s)": 0.67267 }, { "epoch": 3.01443811319138, "grad_norm": 6.437307834625244, "learning_rate": 3.4155421554785465e-05, "loss": 1.7479021072387695, "memory(GiB)": 72.85, "step": 70360, "token_acc": 0.5684647302904564, "train_speed(iter/s)": 0.672661 }, { "epoch": 3.014652328520629, "grad_norm": 5.036139011383057, "learning_rate": 3.414903876585339e-05, "loss": 2.001984977722168, "memory(GiB)": 72.85, "step": 70365, "token_acc": 0.5335463258785943, "train_speed(iter/s)": 0.672662 }, { "epoch": 3.014866543849878, "grad_norm": 5.943432331085205, "learning_rate": 3.4142656264076325e-05, "loss": 2.1274280548095703, "memory(GiB)": 72.85, "step": 70370, "token_acc": 0.5092936802973977, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.015080759179127, "grad_norm": 8.945252418518066, "learning_rate": 3.4136274049569936e-05, "loss": 2.100013542175293, "memory(GiB)": 72.85, "step": 70375, "token_acc": 0.5643564356435643, "train_speed(iter/s)": 0.672672 }, { "epoch": 3.0152949745083757, "grad_norm": 8.387358665466309, "learning_rate": 3.4129892122449825e-05, "loss": 2.2506500244140626, "memory(GiB)": 72.85, "step": 70380, "token_acc": 0.5136363636363637, "train_speed(iter/s)": 0.672685 }, { "epoch": 3.015509189837625, "grad_norm": 6.683136463165283, "learning_rate": 3.4123510482831575e-05, "loss": 2.379273223876953, "memory(GiB)": 72.85, "step": 70385, "token_acc": 0.519298245614035, "train_speed(iter/s)": 0.672687 }, { "epoch": 3.0157234051668738, "grad_norm": 5.405837059020996, "learning_rate": 3.411712913083085e-05, "loss": 1.898929977416992, "memory(GiB)": 72.85, "step": 70390, "token_acc": 0.5447470817120622, "train_speed(iter/s)": 0.672672 }, { "epoch": 3.0159376204961226, "grad_norm": 5.0799455642700195, "learning_rate": 3.4110748066563234e-05, "loss": 2.0157238006591798, "memory(GiB)": 72.85, "step": 70395, "token_acc": 0.5637583892617449, "train_speed(iter/s)": 0.672662 }, { "epoch": 3.016151835825372, "grad_norm": 5.622128963470459, "learning_rate": 3.410436729014432e-05, "loss": 2.3122764587402345, "memory(GiB)": 72.85, "step": 70400, "token_acc": 0.48771929824561405, "train_speed(iter/s)": 0.672664 }, { "epoch": 3.0163660511546206, "grad_norm": 8.126216888427734, "learning_rate": 3.4097986801689694e-05, "loss": 2.2115745544433594, "memory(GiB)": 72.85, "step": 70405, "token_acc": 0.5236363636363637, "train_speed(iter/s)": 0.672675 }, { "epoch": 3.0165802664838695, "grad_norm": 5.159111976623535, "learning_rate": 3.4091606601314965e-05, "loss": 2.145005226135254, "memory(GiB)": 72.85, "step": 70410, "token_acc": 0.49812734082397003, "train_speed(iter/s)": 0.67268 }, { "epoch": 3.0167944818131187, "grad_norm": 6.203346252441406, "learning_rate": 3.408522668913571e-05, "loss": 1.97058162689209, "memory(GiB)": 72.85, "step": 70415, "token_acc": 0.5876288659793815, "train_speed(iter/s)": 0.672692 }, { "epoch": 3.0170086971423675, "grad_norm": 4.956916809082031, "learning_rate": 3.4078847065267485e-05, "loss": 1.7763128280639648, "memory(GiB)": 72.85, "step": 70420, "token_acc": 0.5814814814814815, "train_speed(iter/s)": 0.672702 }, { "epoch": 3.0172229124716163, "grad_norm": 6.902529239654541, "learning_rate": 3.4072467729825894e-05, "loss": 2.1702884674072265, "memory(GiB)": 72.85, "step": 70425, "token_acc": 0.5368421052631579, "train_speed(iter/s)": 0.672699 }, { "epoch": 3.0174371278008656, "grad_norm": 6.799626350402832, "learning_rate": 3.406608868292648e-05, "loss": 1.9382394790649413, "memory(GiB)": 72.85, "step": 70430, "token_acc": 0.5640138408304498, "train_speed(iter/s)": 0.672698 }, { "epoch": 3.0176513431301144, "grad_norm": 4.9807963371276855, "learning_rate": 3.4059709924684835e-05, "loss": 2.3534645080566405, "memory(GiB)": 72.85, "step": 70435, "token_acc": 0.5386996904024768, "train_speed(iter/s)": 0.672701 }, { "epoch": 3.017865558459363, "grad_norm": 5.644111156463623, "learning_rate": 3.405333145521649e-05, "loss": 1.9384321212768554, "memory(GiB)": 72.85, "step": 70440, "token_acc": 0.5569620253164557, "train_speed(iter/s)": 0.672701 }, { "epoch": 3.0180797737886125, "grad_norm": 5.150214195251465, "learning_rate": 3.4046953274636995e-05, "loss": 2.050385665893555, "memory(GiB)": 72.85, "step": 70445, "token_acc": 0.5266457680250783, "train_speed(iter/s)": 0.67271 }, { "epoch": 3.0182939891178613, "grad_norm": 5.336150169372559, "learning_rate": 3.404057538306192e-05, "loss": 2.2081275939941407, "memory(GiB)": 72.85, "step": 70450, "token_acc": 0.5017667844522968, "train_speed(iter/s)": 0.672708 }, { "epoch": 3.01850820444711, "grad_norm": 7.351570129394531, "learning_rate": 3.403419778060679e-05, "loss": 2.24639892578125, "memory(GiB)": 72.85, "step": 70455, "token_acc": 0.51875, "train_speed(iter/s)": 0.672711 }, { "epoch": 3.0187224197763594, "grad_norm": 5.381967544555664, "learning_rate": 3.402782046738713e-05, "loss": 1.781862449645996, "memory(GiB)": 72.85, "step": 70460, "token_acc": 0.6083916083916084, "train_speed(iter/s)": 0.672712 }, { "epoch": 3.018936635105608, "grad_norm": 5.284968376159668, "learning_rate": 3.402144344351848e-05, "loss": 1.9867698669433593, "memory(GiB)": 72.85, "step": 70465, "token_acc": 0.5635838150289018, "train_speed(iter/s)": 0.672702 }, { "epoch": 3.019150850434857, "grad_norm": 6.976531982421875, "learning_rate": 3.40150667091164e-05, "loss": 2.1215770721435545, "memory(GiB)": 72.85, "step": 70470, "token_acc": 0.5335820895522388, "train_speed(iter/s)": 0.672698 }, { "epoch": 3.0193650657641062, "grad_norm": 5.378650665283203, "learning_rate": 3.400869026429636e-05, "loss": 2.1375701904296873, "memory(GiB)": 72.85, "step": 70475, "token_acc": 0.5346534653465347, "train_speed(iter/s)": 0.672695 }, { "epoch": 3.019579281093355, "grad_norm": 6.221175193786621, "learning_rate": 3.400231410917392e-05, "loss": 2.012154197692871, "memory(GiB)": 72.85, "step": 70480, "token_acc": 0.5488215488215489, "train_speed(iter/s)": 0.672702 }, { "epoch": 3.019793496422604, "grad_norm": 4.864549160003662, "learning_rate": 3.3995938243864555e-05, "loss": 1.8924118041992188, "memory(GiB)": 72.85, "step": 70485, "token_acc": 0.531496062992126, "train_speed(iter/s)": 0.672698 }, { "epoch": 3.020007711751853, "grad_norm": 5.128222465515137, "learning_rate": 3.398956266848377e-05, "loss": 2.1378955841064453, "memory(GiB)": 72.85, "step": 70490, "token_acc": 0.5627240143369175, "train_speed(iter/s)": 0.672703 }, { "epoch": 3.020221927081102, "grad_norm": 5.64188289642334, "learning_rate": 3.3983187383147095e-05, "loss": 2.2878528594970704, "memory(GiB)": 72.85, "step": 70495, "token_acc": 0.5084745762711864, "train_speed(iter/s)": 0.672701 }, { "epoch": 3.0204361424103507, "grad_norm": 5.861303329467773, "learning_rate": 3.397681238797e-05, "loss": 2.4605241775512696, "memory(GiB)": 72.85, "step": 70500, "token_acc": 0.5210355987055016, "train_speed(iter/s)": 0.672697 }, { "epoch": 3.0204361424103507, "eval_loss": 2.102964162826538, "eval_runtime": 15.1328, "eval_samples_per_second": 6.608, "eval_steps_per_second": 6.608, "eval_token_acc": 0.4801641586867305, "step": 70500 }, { "epoch": 3.0206503577396, "grad_norm": 6.31498908996582, "learning_rate": 3.3970437683067966e-05, "loss": 2.3233604431152344, "memory(GiB)": 72.85, "step": 70505, "token_acc": 0.4980237154150198, "train_speed(iter/s)": 0.672585 }, { "epoch": 3.020864573068849, "grad_norm": 4.87204647064209, "learning_rate": 3.396406326855651e-05, "loss": 2.0564971923828126, "memory(GiB)": 72.85, "step": 70510, "token_acc": 0.5186440677966102, "train_speed(iter/s)": 0.672589 }, { "epoch": 3.0210787883980976, "grad_norm": 4.5406365394592285, "learning_rate": 3.395768914455108e-05, "loss": 2.3463964462280273, "memory(GiB)": 72.85, "step": 70515, "token_acc": 0.5071428571428571, "train_speed(iter/s)": 0.672576 }, { "epoch": 3.021293003727347, "grad_norm": 6.319192409515381, "learning_rate": 3.3951315311167164e-05, "loss": 1.9603736877441407, "memory(GiB)": 72.85, "step": 70520, "token_acc": 0.5709090909090909, "train_speed(iter/s)": 0.672577 }, { "epoch": 3.0215072190565957, "grad_norm": 5.113741874694824, "learning_rate": 3.394494176852022e-05, "loss": 2.2798944473266602, "memory(GiB)": 72.85, "step": 70525, "token_acc": 0.5089285714285714, "train_speed(iter/s)": 0.672582 }, { "epoch": 3.0217214343858445, "grad_norm": 6.85211706161499, "learning_rate": 3.393856851672572e-05, "loss": 2.06689338684082, "memory(GiB)": 72.85, "step": 70530, "token_acc": 0.5129151291512916, "train_speed(iter/s)": 0.672587 }, { "epoch": 3.0219356497150938, "grad_norm": 5.538865089416504, "learning_rate": 3.3932195555899104e-05, "loss": 1.972181510925293, "memory(GiB)": 72.85, "step": 70535, "token_acc": 0.5617529880478087, "train_speed(iter/s)": 0.67259 }, { "epoch": 3.0221498650443426, "grad_norm": 6.215267181396484, "learning_rate": 3.3925822886155854e-05, "loss": 2.1690053939819336, "memory(GiB)": 72.85, "step": 70540, "token_acc": 0.4965986394557823, "train_speed(iter/s)": 0.672591 }, { "epoch": 3.0223640803735914, "grad_norm": 4.4146223068237305, "learning_rate": 3.3919450507611405e-05, "loss": 2.378038787841797, "memory(GiB)": 72.85, "step": 70545, "token_acc": 0.47368421052631576, "train_speed(iter/s)": 0.672605 }, { "epoch": 3.0225782957028406, "grad_norm": 5.963570594787598, "learning_rate": 3.391307842038118e-05, "loss": 2.0061317443847657, "memory(GiB)": 72.85, "step": 70550, "token_acc": 0.5649122807017544, "train_speed(iter/s)": 0.672604 }, { "epoch": 3.0227925110320895, "grad_norm": 5.161948204040527, "learning_rate": 3.390670662458065e-05, "loss": 2.1806745529174805, "memory(GiB)": 72.85, "step": 70555, "token_acc": 0.5182724252491694, "train_speed(iter/s)": 0.672611 }, { "epoch": 3.0230067263613383, "grad_norm": 7.903336524963379, "learning_rate": 3.3900335120325225e-05, "loss": 1.9378915786743165, "memory(GiB)": 72.85, "step": 70560, "token_acc": 0.5950413223140496, "train_speed(iter/s)": 0.672601 }, { "epoch": 3.0232209416905875, "grad_norm": 5.64048957824707, "learning_rate": 3.3893963907730315e-05, "loss": 2.258229637145996, "memory(GiB)": 72.85, "step": 70565, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.672594 }, { "epoch": 3.0234351570198363, "grad_norm": 6.904128551483154, "learning_rate": 3.388759298691137e-05, "loss": 2.2580389022827148, "memory(GiB)": 72.85, "step": 70570, "token_acc": 0.5046728971962616, "train_speed(iter/s)": 0.6726 }, { "epoch": 3.023649372349085, "grad_norm": 10.530730247497559, "learning_rate": 3.388122235798378e-05, "loss": 2.1559261322021483, "memory(GiB)": 72.85, "step": 70575, "token_acc": 0.4948453608247423, "train_speed(iter/s)": 0.672611 }, { "epoch": 3.0238635876783344, "grad_norm": 5.307474613189697, "learning_rate": 3.3874852021062984e-05, "loss": 1.9176916122436523, "memory(GiB)": 72.85, "step": 70580, "token_acc": 0.5472779369627507, "train_speed(iter/s)": 0.672608 }, { "epoch": 3.024077803007583, "grad_norm": 4.203141689300537, "learning_rate": 3.386848197626437e-05, "loss": 1.7243558883666992, "memory(GiB)": 72.85, "step": 70585, "token_acc": 0.5754385964912281, "train_speed(iter/s)": 0.672619 }, { "epoch": 3.024292018336832, "grad_norm": 4.756429195404053, "learning_rate": 3.386211222370332e-05, "loss": 1.9758724212646483, "memory(GiB)": 72.85, "step": 70590, "token_acc": 0.5972696245733788, "train_speed(iter/s)": 0.67262 }, { "epoch": 3.0245062336660813, "grad_norm": 5.773680686950684, "learning_rate": 3.3855742763495255e-05, "loss": 1.940176010131836, "memory(GiB)": 72.85, "step": 70595, "token_acc": 0.5849056603773585, "train_speed(iter/s)": 0.672619 }, { "epoch": 3.02472044899533, "grad_norm": 4.896238803863525, "learning_rate": 3.384937359575556e-05, "loss": 2.0460710525512695, "memory(GiB)": 72.85, "step": 70600, "token_acc": 0.5539358600583091, "train_speed(iter/s)": 0.672629 }, { "epoch": 3.024934664324579, "grad_norm": 5.382472038269043, "learning_rate": 3.3843004720599604e-05, "loss": 2.1864656448364257, "memory(GiB)": 72.85, "step": 70605, "token_acc": 0.525679758308157, "train_speed(iter/s)": 0.672633 }, { "epoch": 3.025148879653828, "grad_norm": 4.927250385284424, "learning_rate": 3.3836636138142765e-05, "loss": 1.9034591674804688, "memory(GiB)": 72.85, "step": 70610, "token_acc": 0.5909090909090909, "train_speed(iter/s)": 0.672627 }, { "epoch": 3.025363094983077, "grad_norm": 6.094142913818359, "learning_rate": 3.383026784850044e-05, "loss": 1.855167770385742, "memory(GiB)": 72.85, "step": 70615, "token_acc": 0.5662650602409639, "train_speed(iter/s)": 0.672637 }, { "epoch": 3.025577310312326, "grad_norm": 6.919877529144287, "learning_rate": 3.382389985178798e-05, "loss": 2.515128517150879, "memory(GiB)": 72.85, "step": 70620, "token_acc": 0.4805194805194805, "train_speed(iter/s)": 0.672639 }, { "epoch": 3.025791525641575, "grad_norm": 4.230441093444824, "learning_rate": 3.3817532148120747e-05, "loss": 2.1239788055419924, "memory(GiB)": 72.85, "step": 70625, "token_acc": 0.5154639175257731, "train_speed(iter/s)": 0.672635 }, { "epoch": 3.026005740970824, "grad_norm": 4.859835624694824, "learning_rate": 3.3811164737614106e-05, "loss": 1.9268548965454102, "memory(GiB)": 72.85, "step": 70630, "token_acc": 0.5735294117647058, "train_speed(iter/s)": 0.672629 }, { "epoch": 3.0262199563000727, "grad_norm": 5.451693058013916, "learning_rate": 3.380479762038339e-05, "loss": 1.9997556686401368, "memory(GiB)": 72.85, "step": 70635, "token_acc": 0.5451263537906137, "train_speed(iter/s)": 0.67264 }, { "epoch": 3.026434171629322, "grad_norm": 5.047646522521973, "learning_rate": 3.379843079654397e-05, "loss": 1.9138904571533204, "memory(GiB)": 72.85, "step": 70640, "token_acc": 0.5698005698005698, "train_speed(iter/s)": 0.672644 }, { "epoch": 3.0266483869585707, "grad_norm": 6.924579620361328, "learning_rate": 3.3792064266211165e-05, "loss": 2.0186094284057616, "memory(GiB)": 72.85, "step": 70645, "token_acc": 0.5543859649122806, "train_speed(iter/s)": 0.672649 }, { "epoch": 3.0268626022878196, "grad_norm": 4.726693630218506, "learning_rate": 3.3785698029500327e-05, "loss": 2.022342872619629, "memory(GiB)": 72.85, "step": 70650, "token_acc": 0.5448028673835126, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.027076817617069, "grad_norm": 6.768402576446533, "learning_rate": 3.377933208652677e-05, "loss": 1.8636363983154296, "memory(GiB)": 72.85, "step": 70655, "token_acc": 0.5481727574750831, "train_speed(iter/s)": 0.67266 }, { "epoch": 3.0272910329463176, "grad_norm": 6.875367164611816, "learning_rate": 3.3772966437405826e-05, "loss": 2.4939363479614256, "memory(GiB)": 72.85, "step": 70660, "token_acc": 0.4964788732394366, "train_speed(iter/s)": 0.672666 }, { "epoch": 3.0275052482755664, "grad_norm": 5.274730682373047, "learning_rate": 3.3766601082252834e-05, "loss": 2.0062591552734377, "memory(GiB)": 72.85, "step": 70665, "token_acc": 0.5544554455445545, "train_speed(iter/s)": 0.672674 }, { "epoch": 3.0277194636048157, "grad_norm": 4.858833312988281, "learning_rate": 3.376023602118309e-05, "loss": 2.141180992126465, "memory(GiB)": 72.85, "step": 70670, "token_acc": 0.5476923076923077, "train_speed(iter/s)": 0.672671 }, { "epoch": 3.0279336789340645, "grad_norm": 5.128391742706299, "learning_rate": 3.3753871254311894e-05, "loss": 2.3550188064575197, "memory(GiB)": 72.85, "step": 70675, "token_acc": 0.48322147651006714, "train_speed(iter/s)": 0.67267 }, { "epoch": 3.0281478942633133, "grad_norm": 4.978062152862549, "learning_rate": 3.374750678175457e-05, "loss": 2.0585540771484374, "memory(GiB)": 72.85, "step": 70680, "token_acc": 0.5462555066079295, "train_speed(iter/s)": 0.672654 }, { "epoch": 3.0283621095925626, "grad_norm": 5.843716621398926, "learning_rate": 3.374114260362638e-05, "loss": 2.239094543457031, "memory(GiB)": 72.85, "step": 70685, "token_acc": 0.46229508196721314, "train_speed(iter/s)": 0.67266 }, { "epoch": 3.0285763249218114, "grad_norm": 7.458775520324707, "learning_rate": 3.373477872004267e-05, "loss": 2.0885581970214844, "memory(GiB)": 72.85, "step": 70690, "token_acc": 0.5609756097560976, "train_speed(iter/s)": 0.672656 }, { "epoch": 3.02879054025106, "grad_norm": 6.5735321044921875, "learning_rate": 3.372841513111868e-05, "loss": 2.1512712478637694, "memory(GiB)": 72.85, "step": 70695, "token_acc": 0.5469387755102041, "train_speed(iter/s)": 0.672665 }, { "epoch": 3.0290047555803095, "grad_norm": 4.909300327301025, "learning_rate": 3.372205183696974e-05, "loss": 2.2125314712524413, "memory(GiB)": 72.85, "step": 70700, "token_acc": 0.5043478260869565, "train_speed(iter/s)": 0.672667 }, { "epoch": 3.0292189709095583, "grad_norm": 8.542016983032227, "learning_rate": 3.37156888377111e-05, "loss": 1.968193244934082, "memory(GiB)": 72.85, "step": 70705, "token_acc": 0.5440613026819924, "train_speed(iter/s)": 0.672663 }, { "epoch": 3.029433186238807, "grad_norm": 6.89066743850708, "learning_rate": 3.3709326133458014e-05, "loss": 1.9892751693725585, "memory(GiB)": 72.85, "step": 70710, "token_acc": 0.5161290322580645, "train_speed(iter/s)": 0.67267 }, { "epoch": 3.0296474015680563, "grad_norm": 6.401310920715332, "learning_rate": 3.370296372432578e-05, "loss": 2.0170974731445312, "memory(GiB)": 72.85, "step": 70715, "token_acc": 0.5559322033898305, "train_speed(iter/s)": 0.672675 }, { "epoch": 3.029861616897305, "grad_norm": 5.735138893127441, "learning_rate": 3.3696601610429636e-05, "loss": 1.955355453491211, "memory(GiB)": 72.85, "step": 70720, "token_acc": 0.5985401459854015, "train_speed(iter/s)": 0.672669 }, { "epoch": 3.030075832226554, "grad_norm": 5.347618103027344, "learning_rate": 3.369023979188486e-05, "loss": 2.059514617919922, "memory(GiB)": 72.85, "step": 70725, "token_acc": 0.5394190871369294, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.0302900475558032, "grad_norm": 4.3795552253723145, "learning_rate": 3.368387826880669e-05, "loss": 2.0518808364868164, "memory(GiB)": 72.85, "step": 70730, "token_acc": 0.5444839857651246, "train_speed(iter/s)": 0.672648 }, { "epoch": 3.030504262885052, "grad_norm": 7.948148727416992, "learning_rate": 3.3677517041310356e-05, "loss": 2.120557403564453, "memory(GiB)": 72.85, "step": 70735, "token_acc": 0.5825825825825826, "train_speed(iter/s)": 0.672653 }, { "epoch": 3.030718478214301, "grad_norm": 8.58534049987793, "learning_rate": 3.3671156109511125e-05, "loss": 2.2373336791992187, "memory(GiB)": 72.85, "step": 70740, "token_acc": 0.5224489795918368, "train_speed(iter/s)": 0.672648 }, { "epoch": 3.03093269354355, "grad_norm": 5.3441901206970215, "learning_rate": 3.3664795473524224e-05, "loss": 2.183215522766113, "memory(GiB)": 72.85, "step": 70745, "token_acc": 0.5329815303430079, "train_speed(iter/s)": 0.672647 }, { "epoch": 3.031146908872799, "grad_norm": 5.947108268737793, "learning_rate": 3.365843513346486e-05, "loss": 2.1999916076660155, "memory(GiB)": 72.85, "step": 70750, "token_acc": 0.5459610027855153, "train_speed(iter/s)": 0.672649 }, { "epoch": 3.0313611242020477, "grad_norm": 8.021434783935547, "learning_rate": 3.3652075089448266e-05, "loss": 1.892344093322754, "memory(GiB)": 72.85, "step": 70755, "token_acc": 0.5663716814159292, "train_speed(iter/s)": 0.672642 }, { "epoch": 3.031575339531297, "grad_norm": 4.823124885559082, "learning_rate": 3.364571534158968e-05, "loss": 1.9969465255737304, "memory(GiB)": 72.85, "step": 70760, "token_acc": 0.5842293906810035, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.031789554860546, "grad_norm": 6.540212154388428, "learning_rate": 3.36393558900043e-05, "loss": 2.1671689987182616, "memory(GiB)": 72.85, "step": 70765, "token_acc": 0.5547445255474452, "train_speed(iter/s)": 0.672635 }, { "epoch": 3.0320037701897946, "grad_norm": 6.944116592407227, "learning_rate": 3.363299673480735e-05, "loss": 2.3512535095214844, "memory(GiB)": 72.85, "step": 70770, "token_acc": 0.4852459016393443, "train_speed(iter/s)": 0.672637 }, { "epoch": 3.032217985519044, "grad_norm": 5.892014980316162, "learning_rate": 3.362663787611401e-05, "loss": 2.1235931396484373, "memory(GiB)": 72.85, "step": 70775, "token_acc": 0.5197368421052632, "train_speed(iter/s)": 0.672644 }, { "epoch": 3.0324322008482927, "grad_norm": 5.591046333312988, "learning_rate": 3.3620279314039474e-05, "loss": 2.1729230880737305, "memory(GiB)": 72.85, "step": 70780, "token_acc": 0.553030303030303, "train_speed(iter/s)": 0.672636 }, { "epoch": 3.0326464161775415, "grad_norm": 5.198909282684326, "learning_rate": 3.361392104869896e-05, "loss": 2.045599365234375, "memory(GiB)": 72.85, "step": 70785, "token_acc": 0.5331010452961672, "train_speed(iter/s)": 0.672645 }, { "epoch": 3.0328606315067907, "grad_norm": 5.241848945617676, "learning_rate": 3.3607563080207636e-05, "loss": 2.3152097702026366, "memory(GiB)": 72.85, "step": 70790, "token_acc": 0.49575070821529743, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.0330748468360396, "grad_norm": 6.537286281585693, "learning_rate": 3.360120540868067e-05, "loss": 2.0431503295898437, "memory(GiB)": 72.85, "step": 70795, "token_acc": 0.5789473684210527, "train_speed(iter/s)": 0.672658 }, { "epoch": 3.0332890621652884, "grad_norm": 4.978337287902832, "learning_rate": 3.359484803423326e-05, "loss": 2.15216064453125, "memory(GiB)": 72.85, "step": 70800, "token_acc": 0.5508771929824562, "train_speed(iter/s)": 0.672662 }, { "epoch": 3.0335032774945376, "grad_norm": 4.568006992340088, "learning_rate": 3.358849095698056e-05, "loss": 2.0780893325805665, "memory(GiB)": 72.85, "step": 70805, "token_acc": 0.5204081632653061, "train_speed(iter/s)": 0.672671 }, { "epoch": 3.0337174928237864, "grad_norm": 4.2108941078186035, "learning_rate": 3.358213417703775e-05, "loss": 1.8473026275634765, "memory(GiB)": 72.85, "step": 70810, "token_acc": 0.5496688741721855, "train_speed(iter/s)": 0.672673 }, { "epoch": 3.0339317081530353, "grad_norm": 8.07111644744873, "learning_rate": 3.3575777694519984e-05, "loss": 2.0159351348876955, "memory(GiB)": 72.85, "step": 70815, "token_acc": 0.5617529880478087, "train_speed(iter/s)": 0.672675 }, { "epoch": 3.0341459234822845, "grad_norm": 7.7173638343811035, "learning_rate": 3.3569421509542395e-05, "loss": 2.077898406982422, "memory(GiB)": 72.85, "step": 70820, "token_acc": 0.5254237288135594, "train_speed(iter/s)": 0.672678 }, { "epoch": 3.0343601388115333, "grad_norm": 4.225019454956055, "learning_rate": 3.3563065622220166e-05, "loss": 2.0534008026123045, "memory(GiB)": 72.85, "step": 70825, "token_acc": 0.5495495495495496, "train_speed(iter/s)": 0.672685 }, { "epoch": 3.034574354140782, "grad_norm": 6.043596267700195, "learning_rate": 3.3556710032668405e-05, "loss": 2.3542556762695312, "memory(GiB)": 72.85, "step": 70830, "token_acc": 0.5179153094462541, "train_speed(iter/s)": 0.672689 }, { "epoch": 3.0347885694700314, "grad_norm": 4.95862340927124, "learning_rate": 3.3550354741002275e-05, "loss": 2.02585506439209, "memory(GiB)": 72.85, "step": 70835, "token_acc": 0.5289855072463768, "train_speed(iter/s)": 0.672686 }, { "epoch": 3.03500278479928, "grad_norm": 9.142202377319336, "learning_rate": 3.35439997473369e-05, "loss": 2.1729507446289062, "memory(GiB)": 72.85, "step": 70840, "token_acc": 0.5743243243243243, "train_speed(iter/s)": 0.67269 }, { "epoch": 3.035217000128529, "grad_norm": 3.906277894973755, "learning_rate": 3.353764505178742e-05, "loss": 2.4851011276245116, "memory(GiB)": 72.85, "step": 70845, "token_acc": 0.5056603773584906, "train_speed(iter/s)": 0.672684 }, { "epoch": 3.0354312154577783, "grad_norm": 5.710079669952393, "learning_rate": 3.353129065446893e-05, "loss": 2.0824983596801756, "memory(GiB)": 72.85, "step": 70850, "token_acc": 0.5992647058823529, "train_speed(iter/s)": 0.672683 }, { "epoch": 3.035645430787027, "grad_norm": 3.7360734939575195, "learning_rate": 3.352493655549655e-05, "loss": 1.9791603088378906, "memory(GiB)": 72.85, "step": 70855, "token_acc": 0.5664335664335665, "train_speed(iter/s)": 0.67269 }, { "epoch": 3.035859646116276, "grad_norm": 4.775140762329102, "learning_rate": 3.351858275498542e-05, "loss": 2.1580272674560548, "memory(GiB)": 72.85, "step": 70860, "token_acc": 0.5250737463126843, "train_speed(iter/s)": 0.672688 }, { "epoch": 3.036073861445525, "grad_norm": 6.15798807144165, "learning_rate": 3.351222925305061e-05, "loss": 2.021535110473633, "memory(GiB)": 72.85, "step": 70865, "token_acc": 0.5246478873239436, "train_speed(iter/s)": 0.672676 }, { "epoch": 3.036288076774774, "grad_norm": 4.102132797241211, "learning_rate": 3.350587604980724e-05, "loss": 1.9594818115234376, "memory(GiB)": 72.85, "step": 70870, "token_acc": 0.5643939393939394, "train_speed(iter/s)": 0.672677 }, { "epoch": 3.036502292104023, "grad_norm": 5.989055156707764, "learning_rate": 3.3499523145370406e-05, "loss": 2.018794822692871, "memory(GiB)": 72.85, "step": 70875, "token_acc": 0.5313531353135313, "train_speed(iter/s)": 0.672674 }, { "epoch": 3.036716507433272, "grad_norm": 6.2991790771484375, "learning_rate": 3.3493170539855166e-05, "loss": 2.1751340866088866, "memory(GiB)": 72.85, "step": 70880, "token_acc": 0.5302013422818792, "train_speed(iter/s)": 0.672676 }, { "epoch": 3.036930722762521, "grad_norm": 6.440340518951416, "learning_rate": 3.348681823337665e-05, "loss": 2.439365196228027, "memory(GiB)": 72.85, "step": 70885, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.67268 }, { "epoch": 3.0371449380917697, "grad_norm": 5.79849100112915, "learning_rate": 3.34804662260499e-05, "loss": 2.1358478546142576, "memory(GiB)": 72.85, "step": 70890, "token_acc": 0.5326797385620915, "train_speed(iter/s)": 0.672688 }, { "epoch": 3.037359153421019, "grad_norm": 6.358684539794922, "learning_rate": 3.347411451799e-05, "loss": 1.856319046020508, "memory(GiB)": 72.85, "step": 70895, "token_acc": 0.5524691358024691, "train_speed(iter/s)": 0.672682 }, { "epoch": 3.0375733687502677, "grad_norm": 4.68524694442749, "learning_rate": 3.3467763109312025e-05, "loss": 1.938755416870117, "memory(GiB)": 72.85, "step": 70900, "token_acc": 0.5444839857651246, "train_speed(iter/s)": 0.672691 }, { "epoch": 3.0377875840795165, "grad_norm": 5.826516151428223, "learning_rate": 3.346141200013101e-05, "loss": 2.0831008911132813, "memory(GiB)": 72.85, "step": 70905, "token_acc": 0.5428571428571428, "train_speed(iter/s)": 0.672691 }, { "epoch": 3.038001799408766, "grad_norm": 5.754778861999512, "learning_rate": 3.345506119056204e-05, "loss": 1.898394775390625, "memory(GiB)": 72.85, "step": 70910, "token_acc": 0.55625, "train_speed(iter/s)": 0.672698 }, { "epoch": 3.0382160147380146, "grad_norm": 5.741051197052002, "learning_rate": 3.344871068072016e-05, "loss": 2.192819595336914, "memory(GiB)": 72.85, "step": 70915, "token_acc": 0.5367647058823529, "train_speed(iter/s)": 0.672706 }, { "epoch": 3.0384302300672634, "grad_norm": 5.934877395629883, "learning_rate": 3.344236047072042e-05, "loss": 2.3231082916259767, "memory(GiB)": 72.85, "step": 70920, "token_acc": 0.5181518151815182, "train_speed(iter/s)": 0.672713 }, { "epoch": 3.0386444453965127, "grad_norm": 6.09702730178833, "learning_rate": 3.343601056067785e-05, "loss": 2.0889360427856447, "memory(GiB)": 72.85, "step": 70925, "token_acc": 0.5229681978798587, "train_speed(iter/s)": 0.672721 }, { "epoch": 3.0388586607257615, "grad_norm": 5.122757434844971, "learning_rate": 3.342966095070749e-05, "loss": 2.0919633865356446, "memory(GiB)": 72.85, "step": 70930, "token_acc": 0.5451263537906137, "train_speed(iter/s)": 0.672716 }, { "epoch": 3.0390728760550103, "grad_norm": 5.288000106811523, "learning_rate": 3.3423311640924365e-05, "loss": 2.1593740463256834, "memory(GiB)": 72.85, "step": 70935, "token_acc": 0.5594855305466238, "train_speed(iter/s)": 0.672713 }, { "epoch": 3.0392870913842596, "grad_norm": 5.378473281860352, "learning_rate": 3.341696263144349e-05, "loss": 2.0522865295410155, "memory(GiB)": 72.85, "step": 70940, "token_acc": 0.501779359430605, "train_speed(iter/s)": 0.672712 }, { "epoch": 3.0395013067135084, "grad_norm": 6.420713901519775, "learning_rate": 3.3410613922379906e-05, "loss": 2.0275978088378905, "memory(GiB)": 72.85, "step": 70945, "token_acc": 0.5824175824175825, "train_speed(iter/s)": 0.672708 }, { "epoch": 3.039715522042757, "grad_norm": 4.727179050445557, "learning_rate": 3.34042655138486e-05, "loss": 2.2617603302001954, "memory(GiB)": 72.85, "step": 70950, "token_acc": 0.5317725752508361, "train_speed(iter/s)": 0.672709 }, { "epoch": 3.0399297373720064, "grad_norm": 3.416919231414795, "learning_rate": 3.33979174059646e-05, "loss": 2.180573272705078, "memory(GiB)": 72.85, "step": 70955, "token_acc": 0.5154929577464789, "train_speed(iter/s)": 0.672692 }, { "epoch": 3.0401439527012553, "grad_norm": 4.503795146942139, "learning_rate": 3.3391569598842904e-05, "loss": 2.0328252792358397, "memory(GiB)": 72.85, "step": 70960, "token_acc": 0.5398230088495575, "train_speed(iter/s)": 0.672693 }, { "epoch": 3.040358168030504, "grad_norm": 4.705836772918701, "learning_rate": 3.338522209259849e-05, "loss": 2.0078685760498045, "memory(GiB)": 72.85, "step": 70965, "token_acc": 0.5434782608695652, "train_speed(iter/s)": 0.672698 }, { "epoch": 3.0405723833597533, "grad_norm": 7.725492000579834, "learning_rate": 3.337887488734638e-05, "loss": 1.9349918365478516, "memory(GiB)": 72.85, "step": 70970, "token_acc": 0.5656934306569343, "train_speed(iter/s)": 0.672709 }, { "epoch": 3.040786598689002, "grad_norm": 5.712118148803711, "learning_rate": 3.3372527983201544e-05, "loss": 2.0775453567504885, "memory(GiB)": 72.85, "step": 70975, "token_acc": 0.5471698113207547, "train_speed(iter/s)": 0.672698 }, { "epoch": 3.041000814018251, "grad_norm": 4.816429615020752, "learning_rate": 3.336618138027894e-05, "loss": 1.9132335662841797, "memory(GiB)": 72.85, "step": 70980, "token_acc": 0.5851851851851851, "train_speed(iter/s)": 0.672704 }, { "epoch": 3.0412150293475, "grad_norm": 7.291342258453369, "learning_rate": 3.335983507869357e-05, "loss": 1.662057113647461, "memory(GiB)": 72.85, "step": 70985, "token_acc": 0.635036496350365, "train_speed(iter/s)": 0.672705 }, { "epoch": 3.041429244676749, "grad_norm": 6.602746963500977, "learning_rate": 3.335348907856041e-05, "loss": 2.044333648681641, "memory(GiB)": 72.85, "step": 70990, "token_acc": 0.5325670498084292, "train_speed(iter/s)": 0.672707 }, { "epoch": 3.041643460005998, "grad_norm": 4.012298583984375, "learning_rate": 3.3347143379994405e-05, "loss": 2.236867904663086, "memory(GiB)": 72.85, "step": 70995, "token_acc": 0.5450643776824035, "train_speed(iter/s)": 0.672712 }, { "epoch": 3.041857675335247, "grad_norm": 10.169082641601562, "learning_rate": 3.3340797983110514e-05, "loss": 1.8590383529663086, "memory(GiB)": 72.85, "step": 71000, "token_acc": 0.6059322033898306, "train_speed(iter/s)": 0.672722 }, { "epoch": 3.041857675335247, "eval_loss": 1.9905176162719727, "eval_runtime": 15.2587, "eval_samples_per_second": 6.554, "eval_steps_per_second": 6.554, "eval_token_acc": 0.5, "step": 71000 }, { "epoch": 3.042071890664496, "grad_norm": 5.4004974365234375, "learning_rate": 3.3334452888023703e-05, "loss": 2.1487659454345702, "memory(GiB)": 72.85, "step": 71005, "token_acc": 0.5080264400377715, "train_speed(iter/s)": 0.672603 }, { "epoch": 3.0422861059937447, "grad_norm": 6.111708641052246, "learning_rate": 3.332810809484891e-05, "loss": 2.032012367248535, "memory(GiB)": 72.85, "step": 71010, "token_acc": 0.5103448275862069, "train_speed(iter/s)": 0.672598 }, { "epoch": 3.042500321322994, "grad_norm": 4.865117073059082, "learning_rate": 3.3321763603701085e-05, "loss": 2.0351930618286134, "memory(GiB)": 72.85, "step": 71015, "token_acc": 0.5316901408450704, "train_speed(iter/s)": 0.6726 }, { "epoch": 3.042714536652243, "grad_norm": 6.272166728973389, "learning_rate": 3.331541941469515e-05, "loss": 2.1993759155273436, "memory(GiB)": 72.85, "step": 71020, "token_acc": 0.5215827338129496, "train_speed(iter/s)": 0.672583 }, { "epoch": 3.0429287519814916, "grad_norm": 12.741697311401367, "learning_rate": 3.330907552794604e-05, "loss": 2.1951116561889648, "memory(GiB)": 72.85, "step": 71025, "token_acc": 0.5036496350364964, "train_speed(iter/s)": 0.672595 }, { "epoch": 3.043142967310741, "grad_norm": 5.7764458656311035, "learning_rate": 3.3302731943568696e-05, "loss": 2.516126823425293, "memory(GiB)": 72.85, "step": 71030, "token_acc": 0.5031055900621118, "train_speed(iter/s)": 0.672605 }, { "epoch": 3.0433571826399897, "grad_norm": 4.488224029541016, "learning_rate": 3.3296388661678024e-05, "loss": 2.3313526153564452, "memory(GiB)": 72.85, "step": 71035, "token_acc": 0.48502994011976047, "train_speed(iter/s)": 0.672602 }, { "epoch": 3.0435713979692385, "grad_norm": 6.370916366577148, "learning_rate": 3.3290045682388924e-05, "loss": 2.119398498535156, "memory(GiB)": 72.85, "step": 71040, "token_acc": 0.5374149659863946, "train_speed(iter/s)": 0.672587 }, { "epoch": 3.0437856132984877, "grad_norm": 5.48543643951416, "learning_rate": 3.328370300581634e-05, "loss": 2.2267135620117187, "memory(GiB)": 72.85, "step": 71045, "token_acc": 0.546031746031746, "train_speed(iter/s)": 0.672581 }, { "epoch": 3.0439998286277365, "grad_norm": 5.605425834655762, "learning_rate": 3.327736063207514e-05, "loss": 1.9987836837768556, "memory(GiB)": 72.85, "step": 71050, "token_acc": 0.5381526104417671, "train_speed(iter/s)": 0.67258 }, { "epoch": 3.0442140439569854, "grad_norm": 7.007625102996826, "learning_rate": 3.327101856128024e-05, "loss": 2.1525852203369142, "memory(GiB)": 72.85, "step": 71055, "token_acc": 0.5483870967741935, "train_speed(iter/s)": 0.672582 }, { "epoch": 3.0444282592862346, "grad_norm": 5.794934272766113, "learning_rate": 3.3264676793546545e-05, "loss": 1.888084602355957, "memory(GiB)": 72.85, "step": 71060, "token_acc": 0.616, "train_speed(iter/s)": 0.672587 }, { "epoch": 3.0446424746154834, "grad_norm": 7.522222518920898, "learning_rate": 3.3258335328988934e-05, "loss": 2.328790473937988, "memory(GiB)": 72.85, "step": 71065, "token_acc": 0.532319391634981, "train_speed(iter/s)": 0.672588 }, { "epoch": 3.0448566899447322, "grad_norm": 4.490306377410889, "learning_rate": 3.3251994167722264e-05, "loss": 2.205922508239746, "memory(GiB)": 72.85, "step": 71070, "token_acc": 0.5100671140939598, "train_speed(iter/s)": 0.672594 }, { "epoch": 3.0450709052739815, "grad_norm": 4.469733715057373, "learning_rate": 3.324565330986145e-05, "loss": 2.260677719116211, "memory(GiB)": 72.85, "step": 71075, "token_acc": 0.543918918918919, "train_speed(iter/s)": 0.672595 }, { "epoch": 3.0452851206032303, "grad_norm": 5.531559467315674, "learning_rate": 3.3239312755521335e-05, "loss": 1.9503841400146484, "memory(GiB)": 72.85, "step": 71080, "token_acc": 0.5323076923076923, "train_speed(iter/s)": 0.672594 }, { "epoch": 3.045499335932479, "grad_norm": 5.249732494354248, "learning_rate": 3.3232972504816785e-05, "loss": 2.138858413696289, "memory(GiB)": 72.85, "step": 71085, "token_acc": 0.535031847133758, "train_speed(iter/s)": 0.672593 }, { "epoch": 3.0457135512617284, "grad_norm": 5.4938764572143555, "learning_rate": 3.322663255786268e-05, "loss": 2.0968324661254885, "memory(GiB)": 72.85, "step": 71090, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.672599 }, { "epoch": 3.045927766590977, "grad_norm": 5.778555870056152, "learning_rate": 3.322029291477385e-05, "loss": 2.1949363708496095, "memory(GiB)": 72.85, "step": 71095, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.672588 }, { "epoch": 3.046141981920226, "grad_norm": 6.77565336227417, "learning_rate": 3.321395357566516e-05, "loss": 1.8430446624755858, "memory(GiB)": 72.85, "step": 71100, "token_acc": 0.5863309352517986, "train_speed(iter/s)": 0.672595 }, { "epoch": 3.0463561972494753, "grad_norm": 5.986884593963623, "learning_rate": 3.3207614540651456e-05, "loss": 1.8894590377807616, "memory(GiB)": 72.85, "step": 71105, "token_acc": 0.5925925925925926, "train_speed(iter/s)": 0.672598 }, { "epoch": 3.046570412578724, "grad_norm": 5.967811584472656, "learning_rate": 3.320127580984755e-05, "loss": 2.0360145568847656, "memory(GiB)": 72.85, "step": 71110, "token_acc": 0.5407407407407407, "train_speed(iter/s)": 0.672592 }, { "epoch": 3.046784627907973, "grad_norm": 6.207586765289307, "learning_rate": 3.319493738336831e-05, "loss": 1.9377407073974608, "memory(GiB)": 72.85, "step": 71115, "token_acc": 0.5784313725490197, "train_speed(iter/s)": 0.672596 }, { "epoch": 3.046998843237222, "grad_norm": 4.819958686828613, "learning_rate": 3.3188599261328536e-05, "loss": 2.119691276550293, "memory(GiB)": 72.85, "step": 71120, "token_acc": 0.5582191780821918, "train_speed(iter/s)": 0.672603 }, { "epoch": 3.047213058566471, "grad_norm": 4.661118984222412, "learning_rate": 3.318226144384303e-05, "loss": 1.81036376953125, "memory(GiB)": 72.85, "step": 71125, "token_acc": 0.5728813559322034, "train_speed(iter/s)": 0.672613 }, { "epoch": 3.0474272738957198, "grad_norm": 7.19794225692749, "learning_rate": 3.317592393102665e-05, "loss": 2.1547861099243164, "memory(GiB)": 72.85, "step": 71130, "token_acc": 0.5167173252279635, "train_speed(iter/s)": 0.672609 }, { "epoch": 3.047641489224969, "grad_norm": 5.827264785766602, "learning_rate": 3.3169586722994205e-05, "loss": 2.2864404678344727, "memory(GiB)": 72.85, "step": 71135, "token_acc": 0.512987012987013, "train_speed(iter/s)": 0.672624 }, { "epoch": 3.047855704554218, "grad_norm": 6.293904781341553, "learning_rate": 3.3163249819860485e-05, "loss": 2.0461090087890623, "memory(GiB)": 72.85, "step": 71140, "token_acc": 0.5469798657718121, "train_speed(iter/s)": 0.672634 }, { "epoch": 3.0480699198834666, "grad_norm": 4.510101795196533, "learning_rate": 3.315691322174027e-05, "loss": 2.132834243774414, "memory(GiB)": 72.85, "step": 71145, "token_acc": 0.5347222222222222, "train_speed(iter/s)": 0.672637 }, { "epoch": 3.048284135212716, "grad_norm": 7.028562545776367, "learning_rate": 3.3150576928748386e-05, "loss": 2.21678466796875, "memory(GiB)": 72.85, "step": 71150, "token_acc": 0.5266666666666666, "train_speed(iter/s)": 0.672639 }, { "epoch": 3.0484983505419647, "grad_norm": 6.785381317138672, "learning_rate": 3.314424094099961e-05, "loss": 1.9458791732788085, "memory(GiB)": 72.85, "step": 71155, "token_acc": 0.5632911392405063, "train_speed(iter/s)": 0.672635 }, { "epoch": 3.0487125658712135, "grad_norm": 4.778997421264648, "learning_rate": 3.3137905258608714e-05, "loss": 2.0104089736938477, "memory(GiB)": 72.85, "step": 71160, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.672641 }, { "epoch": 3.048926781200463, "grad_norm": 4.959018707275391, "learning_rate": 3.313156988169049e-05, "loss": 2.252728271484375, "memory(GiB)": 72.85, "step": 71165, "token_acc": 0.5273311897106109, "train_speed(iter/s)": 0.672645 }, { "epoch": 3.0491409965297116, "grad_norm": 5.1118059158325195, "learning_rate": 3.312523481035969e-05, "loss": 2.0224653244018556, "memory(GiB)": 72.85, "step": 71170, "token_acc": 0.5621301775147929, "train_speed(iter/s)": 0.672624 }, { "epoch": 3.0493552118589604, "grad_norm": 5.446597099304199, "learning_rate": 3.31189000447311e-05, "loss": 2.042780876159668, "memory(GiB)": 72.85, "step": 71175, "token_acc": 0.5756457564575646, "train_speed(iter/s)": 0.67263 }, { "epoch": 3.0495694271882097, "grad_norm": 6.859033107757568, "learning_rate": 3.311256558491947e-05, "loss": 2.313633155822754, "memory(GiB)": 72.85, "step": 71180, "token_acc": 0.5029585798816568, "train_speed(iter/s)": 0.672642 }, { "epoch": 3.0497836425174585, "grad_norm": 6.6238508224487305, "learning_rate": 3.3106231431039546e-05, "loss": 1.87475528717041, "memory(GiB)": 72.85, "step": 71185, "token_acc": 0.5601503759398496, "train_speed(iter/s)": 0.672629 }, { "epoch": 3.0499978578467073, "grad_norm": 6.550374984741211, "learning_rate": 3.309989758320611e-05, "loss": 1.842228889465332, "memory(GiB)": 72.85, "step": 71190, "token_acc": 0.5673469387755102, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.0502120731759566, "grad_norm": 5.153262615203857, "learning_rate": 3.309356404153385e-05, "loss": 2.2286901473999023, "memory(GiB)": 72.85, "step": 71195, "token_acc": 0.5126353790613718, "train_speed(iter/s)": 0.672642 }, { "epoch": 3.0504262885052054, "grad_norm": 6.02055549621582, "learning_rate": 3.3087230806137545e-05, "loss": 1.9798974990844727, "memory(GiB)": 72.85, "step": 71200, "token_acc": 0.5609756097560976, "train_speed(iter/s)": 0.672637 }, { "epoch": 3.050640503834454, "grad_norm": 4.934430122375488, "learning_rate": 3.308089787713193e-05, "loss": 2.3256881713867186, "memory(GiB)": 72.85, "step": 71205, "token_acc": 0.5016835016835017, "train_speed(iter/s)": 0.672636 }, { "epoch": 3.0508547191637034, "grad_norm": 4.335466384887695, "learning_rate": 3.3074565254631725e-05, "loss": 2.277918243408203, "memory(GiB)": 72.85, "step": 71210, "token_acc": 0.5287769784172662, "train_speed(iter/s)": 0.672634 }, { "epoch": 3.0510689344929522, "grad_norm": 5.189278602600098, "learning_rate": 3.3068232938751644e-05, "loss": 2.2233739852905274, "memory(GiB)": 72.85, "step": 71215, "token_acc": 0.5507246376811594, "train_speed(iter/s)": 0.672628 }, { "epoch": 3.0512831498222015, "grad_norm": 6.744357109069824, "learning_rate": 3.306190092960641e-05, "loss": 1.9082977294921875, "memory(GiB)": 72.85, "step": 71220, "token_acc": 0.5980066445182725, "train_speed(iter/s)": 0.672633 }, { "epoch": 3.0514973651514503, "grad_norm": 5.882833480834961, "learning_rate": 3.3055569227310735e-05, "loss": 2.216592788696289, "memory(GiB)": 72.85, "step": 71225, "token_acc": 0.5252225519287834, "train_speed(iter/s)": 0.672639 }, { "epoch": 3.051711580480699, "grad_norm": 7.090776443481445, "learning_rate": 3.30492378319793e-05, "loss": 2.2622211456298826, "memory(GiB)": 72.85, "step": 71230, "token_acc": 0.539568345323741, "train_speed(iter/s)": 0.672633 }, { "epoch": 3.0519257958099484, "grad_norm": 7.217623710632324, "learning_rate": 3.3042906743726835e-05, "loss": 2.1802764892578126, "memory(GiB)": 72.85, "step": 71235, "token_acc": 0.5307692307692308, "train_speed(iter/s)": 0.67263 }, { "epoch": 3.052140011139197, "grad_norm": 5.4988908767700195, "learning_rate": 3.3036575962668015e-05, "loss": 1.8902650833129884, "memory(GiB)": 72.85, "step": 71240, "token_acc": 0.49645390070921985, "train_speed(iter/s)": 0.672643 }, { "epoch": 3.052354226468446, "grad_norm": 4.5944929122924805, "learning_rate": 3.3030245488917545e-05, "loss": 2.101614570617676, "memory(GiB)": 72.85, "step": 71245, "token_acc": 0.5401459854014599, "train_speed(iter/s)": 0.672646 }, { "epoch": 3.0525684417976953, "grad_norm": 5.74950647354126, "learning_rate": 3.3023915322590094e-05, "loss": 1.8364418029785157, "memory(GiB)": 72.85, "step": 71250, "token_acc": 0.5852713178294574, "train_speed(iter/s)": 0.672646 }, { "epoch": 3.052782657126944, "grad_norm": 7.612363338470459, "learning_rate": 3.301758546380033e-05, "loss": 1.9060623168945312, "memory(GiB)": 72.85, "step": 71255, "token_acc": 0.5243553008595988, "train_speed(iter/s)": 0.672635 }, { "epoch": 3.052996872456193, "grad_norm": 5.281733512878418, "learning_rate": 3.3011255912662954e-05, "loss": 2.3182106018066406, "memory(GiB)": 72.85, "step": 71260, "token_acc": 0.4879725085910653, "train_speed(iter/s)": 0.672639 }, { "epoch": 3.053211087785442, "grad_norm": 4.918889999389648, "learning_rate": 3.30049266692926e-05, "loss": 2.036804962158203, "memory(GiB)": 72.85, "step": 71265, "token_acc": 0.5463258785942492, "train_speed(iter/s)": 0.67265 }, { "epoch": 3.053425303114691, "grad_norm": 5.29713773727417, "learning_rate": 3.2998597733803946e-05, "loss": 2.0460206985473635, "memory(GiB)": 72.85, "step": 71270, "token_acc": 0.5633333333333334, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.0536395184439398, "grad_norm": 6.253316402435303, "learning_rate": 3.299226910631162e-05, "loss": 2.5252761840820312, "memory(GiB)": 72.85, "step": 71275, "token_acc": 0.44545454545454544, "train_speed(iter/s)": 0.672649 }, { "epoch": 3.053853733773189, "grad_norm": 6.592952251434326, "learning_rate": 3.298594078693032e-05, "loss": 2.332956314086914, "memory(GiB)": 72.85, "step": 71280, "token_acc": 0.5189003436426117, "train_speed(iter/s)": 0.67266 }, { "epoch": 3.054067949102438, "grad_norm": 4.887953281402588, "learning_rate": 3.297961277577466e-05, "loss": 2.166434478759766, "memory(GiB)": 72.85, "step": 71285, "token_acc": 0.5108695652173914, "train_speed(iter/s)": 0.672667 }, { "epoch": 3.0542821644316867, "grad_norm": 4.720284938812256, "learning_rate": 3.297328507295928e-05, "loss": 2.016846466064453, "memory(GiB)": 72.85, "step": 71290, "token_acc": 0.5496688741721855, "train_speed(iter/s)": 0.672664 }, { "epoch": 3.054496379760936, "grad_norm": 3.87575626373291, "learning_rate": 3.296695767859882e-05, "loss": 1.945530891418457, "memory(GiB)": 72.85, "step": 71295, "token_acc": 0.5594405594405595, "train_speed(iter/s)": 0.672671 }, { "epoch": 3.0547105950901847, "grad_norm": 7.414439678192139, "learning_rate": 3.2960630592807886e-05, "loss": 2.031905937194824, "memory(GiB)": 72.85, "step": 71300, "token_acc": 0.5104895104895105, "train_speed(iter/s)": 0.672677 }, { "epoch": 3.0549248104194335, "grad_norm": 6.309853553771973, "learning_rate": 3.295430381570113e-05, "loss": 2.010569763183594, "memory(GiB)": 72.85, "step": 71305, "token_acc": 0.5748987854251012, "train_speed(iter/s)": 0.672675 }, { "epoch": 3.055139025748683, "grad_norm": 6.62957239151001, "learning_rate": 3.294797734739314e-05, "loss": 2.491547966003418, "memory(GiB)": 72.85, "step": 71310, "token_acc": 0.4205298013245033, "train_speed(iter/s)": 0.672675 }, { "epoch": 3.0553532410779316, "grad_norm": 5.7418212890625, "learning_rate": 3.294165118799853e-05, "loss": 1.877630615234375, "memory(GiB)": 72.85, "step": 71315, "token_acc": 0.5619834710743802, "train_speed(iter/s)": 0.672682 }, { "epoch": 3.0555674564071804, "grad_norm": 6.306034088134766, "learning_rate": 3.2935325337631925e-05, "loss": 2.059433174133301, "memory(GiB)": 72.85, "step": 71320, "token_acc": 0.5854430379746836, "train_speed(iter/s)": 0.672689 }, { "epoch": 3.0557816717364297, "grad_norm": 6.236740589141846, "learning_rate": 3.29289997964079e-05, "loss": 2.210055351257324, "memory(GiB)": 72.85, "step": 71325, "token_acc": 0.5331125827814569, "train_speed(iter/s)": 0.672697 }, { "epoch": 3.0559958870656785, "grad_norm": 7.404148578643799, "learning_rate": 3.292267456444107e-05, "loss": 2.2306941986083983, "memory(GiB)": 72.85, "step": 71330, "token_acc": 0.5169230769230769, "train_speed(iter/s)": 0.672695 }, { "epoch": 3.0562101023949273, "grad_norm": 5.30940055847168, "learning_rate": 3.2916349641846e-05, "loss": 2.0887287139892576, "memory(GiB)": 72.85, "step": 71335, "token_acc": 0.5125448028673835, "train_speed(iter/s)": 0.672692 }, { "epoch": 3.0564243177241766, "grad_norm": 5.275916576385498, "learning_rate": 3.291002502873728e-05, "loss": 2.1744394302368164, "memory(GiB)": 72.85, "step": 71340, "token_acc": 0.5506756756756757, "train_speed(iter/s)": 0.672701 }, { "epoch": 3.0566385330534254, "grad_norm": 5.867997646331787, "learning_rate": 3.290370072522949e-05, "loss": 2.1273630142211912, "memory(GiB)": 72.85, "step": 71345, "token_acc": 0.5230769230769231, "train_speed(iter/s)": 0.672706 }, { "epoch": 3.056852748382674, "grad_norm": 4.875144958496094, "learning_rate": 3.289737673143719e-05, "loss": 2.3751808166503907, "memory(GiB)": 72.85, "step": 71350, "token_acc": 0.49206349206349204, "train_speed(iter/s)": 0.672705 }, { "epoch": 3.0570669637119234, "grad_norm": 4.674074649810791, "learning_rate": 3.289105304747496e-05, "loss": 2.3089269638061523, "memory(GiB)": 72.85, "step": 71355, "token_acc": 0.5346820809248555, "train_speed(iter/s)": 0.672707 }, { "epoch": 3.0572811790411722, "grad_norm": 5.899993419647217, "learning_rate": 3.2884729673457356e-05, "loss": 2.2930137634277346, "memory(GiB)": 72.85, "step": 71360, "token_acc": 0.5202952029520295, "train_speed(iter/s)": 0.672709 }, { "epoch": 3.057495394370421, "grad_norm": 6.489803314208984, "learning_rate": 3.287840660949893e-05, "loss": 2.112686538696289, "memory(GiB)": 72.85, "step": 71365, "token_acc": 0.5540540540540541, "train_speed(iter/s)": 0.672707 }, { "epoch": 3.0577096096996703, "grad_norm": 4.891587734222412, "learning_rate": 3.287208385571424e-05, "loss": 2.0668107986450197, "memory(GiB)": 72.85, "step": 71370, "token_acc": 0.5526315789473685, "train_speed(iter/s)": 0.6727 }, { "epoch": 3.057923825028919, "grad_norm": 6.109734058380127, "learning_rate": 3.2865761412217806e-05, "loss": 2.5384057998657226, "memory(GiB)": 72.85, "step": 71375, "token_acc": 0.49390243902439024, "train_speed(iter/s)": 0.672704 }, { "epoch": 3.058138040358168, "grad_norm": 4.76166296005249, "learning_rate": 3.285943927912418e-05, "loss": 2.0795465469360352, "memory(GiB)": 72.85, "step": 71380, "token_acc": 0.5472222222222223, "train_speed(iter/s)": 0.672714 }, { "epoch": 3.058352255687417, "grad_norm": 4.841107368469238, "learning_rate": 3.285311745654789e-05, "loss": 1.9005813598632812, "memory(GiB)": 72.85, "step": 71385, "token_acc": 0.577922077922078, "train_speed(iter/s)": 0.672707 }, { "epoch": 3.058566471016666, "grad_norm": 4.834062576293945, "learning_rate": 3.2846795944603464e-05, "loss": 2.211902046203613, "memory(GiB)": 72.85, "step": 71390, "token_acc": 0.5317220543806647, "train_speed(iter/s)": 0.672695 }, { "epoch": 3.058780686345915, "grad_norm": 5.4360222816467285, "learning_rate": 3.2840474743405426e-05, "loss": 2.0738107681274416, "memory(GiB)": 72.85, "step": 71395, "token_acc": 0.5551839464882943, "train_speed(iter/s)": 0.672693 }, { "epoch": 3.058994901675164, "grad_norm": 4.384190082550049, "learning_rate": 3.283415385306827e-05, "loss": 1.9422525405883788, "memory(GiB)": 72.85, "step": 71400, "token_acc": 0.5452196382428941, "train_speed(iter/s)": 0.672697 }, { "epoch": 3.059209117004413, "grad_norm": 5.966806411743164, "learning_rate": 3.2827833273706535e-05, "loss": 2.193132019042969, "memory(GiB)": 72.85, "step": 71405, "token_acc": 0.5487012987012987, "train_speed(iter/s)": 0.672679 }, { "epoch": 3.0594233323336617, "grad_norm": 5.933908939361572, "learning_rate": 3.2821513005434705e-05, "loss": 2.08868350982666, "memory(GiB)": 72.85, "step": 71410, "token_acc": 0.5638297872340425, "train_speed(iter/s)": 0.672679 }, { "epoch": 3.059637547662911, "grad_norm": 5.403615951538086, "learning_rate": 3.281519304836727e-05, "loss": 2.088371467590332, "memory(GiB)": 72.85, "step": 71415, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.672687 }, { "epoch": 3.0598517629921598, "grad_norm": 5.399892807006836, "learning_rate": 3.280887340261873e-05, "loss": 1.874897575378418, "memory(GiB)": 72.85, "step": 71420, "token_acc": 0.5884476534296029, "train_speed(iter/s)": 0.672686 }, { "epoch": 3.0600659783214086, "grad_norm": 4.65841817855835, "learning_rate": 3.2802554068303596e-05, "loss": 2.3043001174926756, "memory(GiB)": 72.85, "step": 71425, "token_acc": 0.48066298342541436, "train_speed(iter/s)": 0.672693 }, { "epoch": 3.060280193650658, "grad_norm": 6.199366569519043, "learning_rate": 3.2796235045536314e-05, "loss": 2.4056365966796873, "memory(GiB)": 72.85, "step": 71430, "token_acc": 0.5201238390092879, "train_speed(iter/s)": 0.672696 }, { "epoch": 3.0604944089799067, "grad_norm": 7.527563095092773, "learning_rate": 3.278991633443138e-05, "loss": 2.0879209518432615, "memory(GiB)": 72.85, "step": 71435, "token_acc": 0.5573770491803278, "train_speed(iter/s)": 0.672706 }, { "epoch": 3.0607086243091555, "grad_norm": 6.938315391540527, "learning_rate": 3.278359793510325e-05, "loss": 2.3182533264160154, "memory(GiB)": 72.85, "step": 71440, "token_acc": 0.5017667844522968, "train_speed(iter/s)": 0.672705 }, { "epoch": 3.0609228396384047, "grad_norm": 6.371435165405273, "learning_rate": 3.27772798476664e-05, "loss": 2.266301727294922, "memory(GiB)": 72.85, "step": 71445, "token_acc": 0.5040431266846361, "train_speed(iter/s)": 0.672703 }, { "epoch": 3.0611370549676535, "grad_norm": 6.398867130279541, "learning_rate": 3.277096207223528e-05, "loss": 1.8599504470825194, "memory(GiB)": 72.85, "step": 71450, "token_acc": 0.5655430711610487, "train_speed(iter/s)": 0.672707 }, { "epoch": 3.0613512702969023, "grad_norm": 5.174177646636963, "learning_rate": 3.2764644608924344e-05, "loss": 1.9904819488525392, "memory(GiB)": 72.85, "step": 71455, "token_acc": 0.5589225589225589, "train_speed(iter/s)": 0.6727 }, { "epoch": 3.0615654856261516, "grad_norm": 6.738514423370361, "learning_rate": 3.2758327457848035e-05, "loss": 2.0941390991210938, "memory(GiB)": 72.85, "step": 71460, "token_acc": 0.5548172757475083, "train_speed(iter/s)": 0.672701 }, { "epoch": 3.0617797009554004, "grad_norm": 5.186707019805908, "learning_rate": 3.275201061912081e-05, "loss": 2.2266921997070312, "memory(GiB)": 72.85, "step": 71465, "token_acc": 0.5070422535211268, "train_speed(iter/s)": 0.672709 }, { "epoch": 3.0619939162846492, "grad_norm": 10.103256225585938, "learning_rate": 3.274569409285707e-05, "loss": 2.3215158462524412, "memory(GiB)": 72.85, "step": 71470, "token_acc": 0.5145985401459854, "train_speed(iter/s)": 0.67272 }, { "epoch": 3.0622081316138985, "grad_norm": 4.957287311553955, "learning_rate": 3.273937787917129e-05, "loss": 2.099073791503906, "memory(GiB)": 72.85, "step": 71475, "token_acc": 0.5019455252918288, "train_speed(iter/s)": 0.672734 }, { "epoch": 3.0624223469431473, "grad_norm": 6.678194999694824, "learning_rate": 3.273306197817787e-05, "loss": 2.3611270904541017, "memory(GiB)": 72.85, "step": 71480, "token_acc": 0.48717948717948717, "train_speed(iter/s)": 0.672747 }, { "epoch": 3.062636562272396, "grad_norm": 5.308071613311768, "learning_rate": 3.272674638999121e-05, "loss": 1.8120052337646484, "memory(GiB)": 72.85, "step": 71485, "token_acc": 0.55, "train_speed(iter/s)": 0.672735 }, { "epoch": 3.0628507776016454, "grad_norm": 4.577859401702881, "learning_rate": 3.2720431114725765e-05, "loss": 2.481916046142578, "memory(GiB)": 72.85, "step": 71490, "token_acc": 0.5182926829268293, "train_speed(iter/s)": 0.672738 }, { "epoch": 3.063064992930894, "grad_norm": 4.34591007232666, "learning_rate": 3.271411615249589e-05, "loss": 2.119384002685547, "memory(GiB)": 72.85, "step": 71495, "token_acc": 0.5063694267515924, "train_speed(iter/s)": 0.672748 }, { "epoch": 3.063279208260143, "grad_norm": 5.37779426574707, "learning_rate": 3.270780150341605e-05, "loss": 2.0792282104492186, "memory(GiB)": 72.85, "step": 71500, "token_acc": 0.5335276967930029, "train_speed(iter/s)": 0.672737 }, { "epoch": 3.063279208260143, "eval_loss": 2.04952335357666, "eval_runtime": 16.5028, "eval_samples_per_second": 6.06, "eval_steps_per_second": 6.06, "eval_token_acc": 0.4893350062735257, "step": 71500 }, { "epoch": 3.0634934235893923, "grad_norm": 4.590423107147217, "learning_rate": 3.270148716760058e-05, "loss": 1.9844511032104493, "memory(GiB)": 72.85, "step": 71505, "token_acc": 0.5106976744186047, "train_speed(iter/s)": 0.67262 }, { "epoch": 3.063707638918641, "grad_norm": 4.234012603759766, "learning_rate": 3.2695173145163915e-05, "loss": 1.9667938232421875, "memory(GiB)": 72.85, "step": 71510, "token_acc": 0.5590277777777778, "train_speed(iter/s)": 0.672623 }, { "epoch": 3.06392185424789, "grad_norm": 4.164618015289307, "learning_rate": 3.268885943622043e-05, "loss": 1.8949974060058594, "memory(GiB)": 72.85, "step": 71515, "token_acc": 0.543918918918919, "train_speed(iter/s)": 0.672622 }, { "epoch": 3.064136069577139, "grad_norm": 4.368712902069092, "learning_rate": 3.2682546040884475e-05, "loss": 1.9330183029174806, "memory(GiB)": 72.85, "step": 71520, "token_acc": 0.5475409836065573, "train_speed(iter/s)": 0.672633 }, { "epoch": 3.064350284906388, "grad_norm": 5.090556621551514, "learning_rate": 3.2676232959270456e-05, "loss": 2.033736228942871, "memory(GiB)": 72.85, "step": 71525, "token_acc": 0.5736434108527132, "train_speed(iter/s)": 0.672613 }, { "epoch": 3.0645645002356368, "grad_norm": 6.663718223571777, "learning_rate": 3.2669920191492714e-05, "loss": 2.3849233627319335, "memory(GiB)": 72.85, "step": 71530, "token_acc": 0.4919093851132686, "train_speed(iter/s)": 0.67261 }, { "epoch": 3.064778715564886, "grad_norm": 4.980041027069092, "learning_rate": 3.266360773766564e-05, "loss": 1.8759912490844726, "memory(GiB)": 72.85, "step": 71535, "token_acc": 0.5641891891891891, "train_speed(iter/s)": 0.67261 }, { "epoch": 3.064992930894135, "grad_norm": 5.505342960357666, "learning_rate": 3.265729559790357e-05, "loss": 2.3777536392211913, "memory(GiB)": 72.85, "step": 71540, "token_acc": 0.5120967741935484, "train_speed(iter/s)": 0.672614 }, { "epoch": 3.0652071462233836, "grad_norm": 5.956794261932373, "learning_rate": 3.2650983772320857e-05, "loss": 2.094776725769043, "memory(GiB)": 72.85, "step": 71545, "token_acc": 0.5473684210526316, "train_speed(iter/s)": 0.672616 }, { "epoch": 3.065421361552633, "grad_norm": 5.951139450073242, "learning_rate": 3.264467226103185e-05, "loss": 2.01546688079834, "memory(GiB)": 72.85, "step": 71550, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.672626 }, { "epoch": 3.0656355768818817, "grad_norm": 6.072529315948486, "learning_rate": 3.263836106415089e-05, "loss": 2.116840362548828, "memory(GiB)": 72.85, "step": 71555, "token_acc": 0.5289855072463768, "train_speed(iter/s)": 0.672612 }, { "epoch": 3.0658497922111305, "grad_norm": 6.794958591461182, "learning_rate": 3.26320501817923e-05, "loss": 1.9990398406982421, "memory(GiB)": 72.85, "step": 71560, "token_acc": 0.5551724137931034, "train_speed(iter/s)": 0.672616 }, { "epoch": 3.0660640075403798, "grad_norm": 5.339178085327148, "learning_rate": 3.2625739614070415e-05, "loss": 1.92900390625, "memory(GiB)": 72.85, "step": 71565, "token_acc": 0.5520833333333334, "train_speed(iter/s)": 0.672616 }, { "epoch": 3.0662782228696286, "grad_norm": 4.319184303283691, "learning_rate": 3.261942936109954e-05, "loss": 1.9709278106689454, "memory(GiB)": 72.85, "step": 71570, "token_acc": 0.5805084745762712, "train_speed(iter/s)": 0.672622 }, { "epoch": 3.0664924381988774, "grad_norm": 6.372118949890137, "learning_rate": 3.261311942299401e-05, "loss": 2.1189388275146483, "memory(GiB)": 72.85, "step": 71575, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.672624 }, { "epoch": 3.0667066535281267, "grad_norm": 4.785248279571533, "learning_rate": 3.2606809799868146e-05, "loss": 1.9105785369873047, "memory(GiB)": 72.85, "step": 71580, "token_acc": 0.563573883161512, "train_speed(iter/s)": 0.672631 }, { "epoch": 3.0669208688573755, "grad_norm": 7.411341667175293, "learning_rate": 3.260050049183624e-05, "loss": 2.116637420654297, "memory(GiB)": 72.85, "step": 71585, "token_acc": 0.5445859872611465, "train_speed(iter/s)": 0.672637 }, { "epoch": 3.0671350841866243, "grad_norm": 6.493471622467041, "learning_rate": 3.2594191499012584e-05, "loss": 2.0315155029296874, "memory(GiB)": 72.85, "step": 71590, "token_acc": 0.5322033898305085, "train_speed(iter/s)": 0.67263 }, { "epoch": 3.0673492995158735, "grad_norm": 7.713765621185303, "learning_rate": 3.2587882821511476e-05, "loss": 2.260803985595703, "memory(GiB)": 72.85, "step": 71595, "token_acc": 0.48, "train_speed(iter/s)": 0.672641 }, { "epoch": 3.0675635148451224, "grad_norm": 5.648256301879883, "learning_rate": 3.2581574459447214e-05, "loss": 2.109400177001953, "memory(GiB)": 72.85, "step": 71600, "token_acc": 0.5311355311355311, "train_speed(iter/s)": 0.672641 }, { "epoch": 3.067777730174371, "grad_norm": 3.8161957263946533, "learning_rate": 3.257526641293406e-05, "loss": 2.102824020385742, "memory(GiB)": 72.85, "step": 71605, "token_acc": 0.5639344262295082, "train_speed(iter/s)": 0.672642 }, { "epoch": 3.0679919455036204, "grad_norm": 6.428198337554932, "learning_rate": 3.25689586820863e-05, "loss": 1.8675304412841798, "memory(GiB)": 72.85, "step": 71610, "token_acc": 0.5902255639097744, "train_speed(iter/s)": 0.672628 }, { "epoch": 3.0682061608328692, "grad_norm": 5.877601623535156, "learning_rate": 3.2562651267018204e-05, "loss": 2.212933349609375, "memory(GiB)": 72.85, "step": 71615, "token_acc": 0.4967741935483871, "train_speed(iter/s)": 0.672631 }, { "epoch": 3.068420376162118, "grad_norm": 5.354395389556885, "learning_rate": 3.255634416784404e-05, "loss": 2.232948112487793, "memory(GiB)": 72.85, "step": 71620, "token_acc": 0.5197368421052632, "train_speed(iter/s)": 0.672631 }, { "epoch": 3.0686345914913673, "grad_norm": 6.105681896209717, "learning_rate": 3.255003738467808e-05, "loss": 2.0721607208251953, "memory(GiB)": 72.85, "step": 71625, "token_acc": 0.5581395348837209, "train_speed(iter/s)": 0.672631 }, { "epoch": 3.068848806820616, "grad_norm": 6.38720178604126, "learning_rate": 3.254373091763454e-05, "loss": 2.213312339782715, "memory(GiB)": 72.85, "step": 71630, "token_acc": 0.548, "train_speed(iter/s)": 0.672634 }, { "epoch": 3.069063022149865, "grad_norm": 8.416572570800781, "learning_rate": 3.2537424766827706e-05, "loss": 2.100356101989746, "memory(GiB)": 72.85, "step": 71635, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.672625 }, { "epoch": 3.069277237479114, "grad_norm": 5.280162811279297, "learning_rate": 3.25311189323718e-05, "loss": 2.1616209030151365, "memory(GiB)": 72.85, "step": 71640, "token_acc": 0.5283582089552239, "train_speed(iter/s)": 0.672614 }, { "epoch": 3.069491452808363, "grad_norm": 5.750206470489502, "learning_rate": 3.252481341438104e-05, "loss": 2.0275714874267576, "memory(GiB)": 72.85, "step": 71645, "token_acc": 0.56, "train_speed(iter/s)": 0.672606 }, { "epoch": 3.069705668137612, "grad_norm": 4.988376617431641, "learning_rate": 3.251850821296969e-05, "loss": 1.8885757446289062, "memory(GiB)": 72.85, "step": 71650, "token_acc": 0.5653710247349824, "train_speed(iter/s)": 0.672614 }, { "epoch": 3.069919883466861, "grad_norm": 4.750631332397461, "learning_rate": 3.251220332825198e-05, "loss": 2.2017166137695314, "memory(GiB)": 72.85, "step": 71655, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.672618 }, { "epoch": 3.07013409879611, "grad_norm": 6.598528861999512, "learning_rate": 3.2505898760342104e-05, "loss": 2.0945976257324217, "memory(GiB)": 72.85, "step": 71660, "token_acc": 0.5170278637770898, "train_speed(iter/s)": 0.672618 }, { "epoch": 3.0703483141253587, "grad_norm": 8.144505500793457, "learning_rate": 3.249959450935427e-05, "loss": 2.357272720336914, "memory(GiB)": 72.85, "step": 71665, "token_acc": 0.48632218844984804, "train_speed(iter/s)": 0.67261 }, { "epoch": 3.070562529454608, "grad_norm": 6.521749496459961, "learning_rate": 3.249329057540271e-05, "loss": 1.931460189819336, "memory(GiB)": 72.85, "step": 71670, "token_acc": 0.5597269624573379, "train_speed(iter/s)": 0.6726 }, { "epoch": 3.0707767447838568, "grad_norm": 4.537057876586914, "learning_rate": 3.2486986958601604e-05, "loss": 2.2291114807128904, "memory(GiB)": 72.85, "step": 71675, "token_acc": 0.5222929936305732, "train_speed(iter/s)": 0.672607 }, { "epoch": 3.0709909601131056, "grad_norm": 8.204541206359863, "learning_rate": 3.248068365906517e-05, "loss": 2.2281436920166016, "memory(GiB)": 72.85, "step": 71680, "token_acc": 0.5231316725978647, "train_speed(iter/s)": 0.672593 }, { "epoch": 3.071205175442355, "grad_norm": 5.382759094238281, "learning_rate": 3.2474380676907587e-05, "loss": 2.058256912231445, "memory(GiB)": 72.85, "step": 71685, "token_acc": 0.5863309352517986, "train_speed(iter/s)": 0.67258 }, { "epoch": 3.0714193907716036, "grad_norm": 4.61682653427124, "learning_rate": 3.246807801224303e-05, "loss": 2.160148048400879, "memory(GiB)": 72.85, "step": 71690, "token_acc": 0.5018050541516246, "train_speed(iter/s)": 0.67256 }, { "epoch": 3.0716336061008525, "grad_norm": 5.513885021209717, "learning_rate": 3.2461775665185686e-05, "loss": 2.1692596435546876, "memory(GiB)": 72.85, "step": 71695, "token_acc": 0.5470383275261324, "train_speed(iter/s)": 0.672563 }, { "epoch": 3.0718478214301017, "grad_norm": 4.3940749168396, "learning_rate": 3.2455473635849745e-05, "loss": 1.7980781555175782, "memory(GiB)": 72.85, "step": 71700, "token_acc": 0.5913621262458472, "train_speed(iter/s)": 0.672567 }, { "epoch": 3.0720620367593505, "grad_norm": 6.435458183288574, "learning_rate": 3.244917192434933e-05, "loss": 1.7770784378051758, "memory(GiB)": 72.85, "step": 71705, "token_acc": 0.5905172413793104, "train_speed(iter/s)": 0.672571 }, { "epoch": 3.0722762520885993, "grad_norm": 6.131955623626709, "learning_rate": 3.244287053079865e-05, "loss": 2.2153148651123047, "memory(GiB)": 72.85, "step": 71710, "token_acc": 0.509090909090909, "train_speed(iter/s)": 0.672569 }, { "epoch": 3.0724904674178486, "grad_norm": 5.50639533996582, "learning_rate": 3.2436569455311816e-05, "loss": 1.9184730529785157, "memory(GiB)": 72.85, "step": 71715, "token_acc": 0.5490196078431373, "train_speed(iter/s)": 0.672565 }, { "epoch": 3.0727046827470974, "grad_norm": 5.944974899291992, "learning_rate": 3.243026869800301e-05, "loss": 2.0084964752197267, "memory(GiB)": 72.85, "step": 71720, "token_acc": 0.5682656826568265, "train_speed(iter/s)": 0.672558 }, { "epoch": 3.072918898076346, "grad_norm": 7.6234025955200195, "learning_rate": 3.242396825898638e-05, "loss": 2.022394561767578, "memory(GiB)": 72.85, "step": 71725, "token_acc": 0.5580524344569289, "train_speed(iter/s)": 0.672565 }, { "epoch": 3.0731331134055955, "grad_norm": 4.591200828552246, "learning_rate": 3.241766813837606e-05, "loss": 1.8513700485229492, "memory(GiB)": 72.85, "step": 71730, "token_acc": 0.5698924731182796, "train_speed(iter/s)": 0.672569 }, { "epoch": 3.0733473287348443, "grad_norm": 5.122316837310791, "learning_rate": 3.241136833628615e-05, "loss": 1.9098737716674805, "memory(GiB)": 72.85, "step": 71735, "token_acc": 0.5633802816901409, "train_speed(iter/s)": 0.672562 }, { "epoch": 3.073561544064093, "grad_norm": 6.426998138427734, "learning_rate": 3.2405068852830825e-05, "loss": 2.345180702209473, "memory(GiB)": 72.85, "step": 71740, "token_acc": 0.5076452599388379, "train_speed(iter/s)": 0.672567 }, { "epoch": 3.0737757593933424, "grad_norm": 6.591789245605469, "learning_rate": 3.239876968812418e-05, "loss": 2.1933361053466798, "memory(GiB)": 72.85, "step": 71745, "token_acc": 0.5032679738562091, "train_speed(iter/s)": 0.672555 }, { "epoch": 3.073989974722591, "grad_norm": 4.750649452209473, "learning_rate": 3.239247084228032e-05, "loss": 2.1062397003173827, "memory(GiB)": 72.85, "step": 71750, "token_acc": 0.5787545787545788, "train_speed(iter/s)": 0.672541 }, { "epoch": 3.07420419005184, "grad_norm": 5.96690034866333, "learning_rate": 3.238617231541338e-05, "loss": 2.1937862396240235, "memory(GiB)": 72.85, "step": 71755, "token_acc": 0.5268456375838926, "train_speed(iter/s)": 0.672545 }, { "epoch": 3.0744184053810892, "grad_norm": 6.6887383460998535, "learning_rate": 3.2379874107637444e-05, "loss": 2.183109664916992, "memory(GiB)": 72.85, "step": 71760, "token_acc": 0.5508474576271186, "train_speed(iter/s)": 0.672551 }, { "epoch": 3.074632620710338, "grad_norm": 6.87215518951416, "learning_rate": 3.2373576219066626e-05, "loss": 1.9889154434204102, "memory(GiB)": 72.85, "step": 71765, "token_acc": 0.5498154981549815, "train_speed(iter/s)": 0.672557 }, { "epoch": 3.074846836039587, "grad_norm": 4.953912258148193, "learning_rate": 3.236727864981501e-05, "loss": 1.7186737060546875, "memory(GiB)": 72.85, "step": 71770, "token_acc": 0.5836431226765799, "train_speed(iter/s)": 0.672573 }, { "epoch": 3.075061051368836, "grad_norm": 5.709259510040283, "learning_rate": 3.236098139999667e-05, "loss": 2.0874128341674805, "memory(GiB)": 72.85, "step": 71775, "token_acc": 0.5116279069767442, "train_speed(iter/s)": 0.672583 }, { "epoch": 3.075275266698085, "grad_norm": 4.669174671173096, "learning_rate": 3.235468446972571e-05, "loss": 2.097904396057129, "memory(GiB)": 72.85, "step": 71780, "token_acc": 0.4983277591973244, "train_speed(iter/s)": 0.672585 }, { "epoch": 3.0754894820273337, "grad_norm": 6.358663082122803, "learning_rate": 3.23483878591162e-05, "loss": 2.1557870864868165, "memory(GiB)": 72.85, "step": 71785, "token_acc": 0.5230263157894737, "train_speed(iter/s)": 0.672591 }, { "epoch": 3.075703697356583, "grad_norm": 5.36470365524292, "learning_rate": 3.2342091568282175e-05, "loss": 2.287291717529297, "memory(GiB)": 72.85, "step": 71790, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.672592 }, { "epoch": 3.075917912685832, "grad_norm": 5.646018028259277, "learning_rate": 3.2335795597337734e-05, "loss": 2.2892290115356446, "memory(GiB)": 72.85, "step": 71795, "token_acc": 0.5, "train_speed(iter/s)": 0.672585 }, { "epoch": 3.0761321280150806, "grad_norm": 7.230668544769287, "learning_rate": 3.232949994639693e-05, "loss": 2.272678756713867, "memory(GiB)": 72.85, "step": 71800, "token_acc": 0.5266903914590747, "train_speed(iter/s)": 0.672587 }, { "epoch": 3.07634634334433, "grad_norm": 7.174509048461914, "learning_rate": 3.2323204615573835e-05, "loss": 2.1123416900634764, "memory(GiB)": 72.85, "step": 71805, "token_acc": 0.5210727969348659, "train_speed(iter/s)": 0.672582 }, { "epoch": 3.0765605586735787, "grad_norm": 5.932636260986328, "learning_rate": 3.2316909604982434e-05, "loss": 2.1482976913452148, "memory(GiB)": 72.85, "step": 71810, "token_acc": 0.5125, "train_speed(iter/s)": 0.672584 }, { "epoch": 3.0767747740028275, "grad_norm": 5.523989677429199, "learning_rate": 3.2310614914736826e-05, "loss": 2.08912353515625, "memory(GiB)": 72.85, "step": 71815, "token_acc": 0.5751633986928104, "train_speed(iter/s)": 0.672589 }, { "epoch": 3.0769889893320768, "grad_norm": 5.623080253601074, "learning_rate": 3.2304320544951016e-05, "loss": 1.8891016006469727, "memory(GiB)": 72.85, "step": 71820, "token_acc": 0.5665529010238908, "train_speed(iter/s)": 0.672585 }, { "epoch": 3.0772032046613256, "grad_norm": 6.708661079406738, "learning_rate": 3.229802649573904e-05, "loss": 2.1256071090698243, "memory(GiB)": 72.85, "step": 71825, "token_acc": 0.5390946502057613, "train_speed(iter/s)": 0.672586 }, { "epoch": 3.0774174199905744, "grad_norm": 5.237918853759766, "learning_rate": 3.2291732767214934e-05, "loss": 2.217314910888672, "memory(GiB)": 72.85, "step": 71830, "token_acc": 0.5551839464882943, "train_speed(iter/s)": 0.672589 }, { "epoch": 3.0776316353198236, "grad_norm": 9.35777759552002, "learning_rate": 3.2285439359492676e-05, "loss": 2.108099365234375, "memory(GiB)": 72.85, "step": 71835, "token_acc": 0.5390070921985816, "train_speed(iter/s)": 0.672586 }, { "epoch": 3.0778458506490725, "grad_norm": 5.340311527252197, "learning_rate": 3.227914627268632e-05, "loss": 1.5630910873413086, "memory(GiB)": 72.85, "step": 71840, "token_acc": 0.6367924528301887, "train_speed(iter/s)": 0.672581 }, { "epoch": 3.0780600659783213, "grad_norm": 5.808384418487549, "learning_rate": 3.227285350690985e-05, "loss": 1.8890029907226562, "memory(GiB)": 72.85, "step": 71845, "token_acc": 0.547945205479452, "train_speed(iter/s)": 0.672575 }, { "epoch": 3.0782742813075705, "grad_norm": 5.6635942459106445, "learning_rate": 3.226656106227726e-05, "loss": 2.1318016052246094, "memory(GiB)": 72.85, "step": 71850, "token_acc": 0.5407407407407407, "train_speed(iter/s)": 0.672582 }, { "epoch": 3.0784884966368193, "grad_norm": 5.347670555114746, "learning_rate": 3.226026893890255e-05, "loss": 2.143535614013672, "memory(GiB)": 72.85, "step": 71855, "token_acc": 0.5428571428571428, "train_speed(iter/s)": 0.672582 }, { "epoch": 3.078702711966068, "grad_norm": 6.331587791442871, "learning_rate": 3.225397713689972e-05, "loss": 1.888389778137207, "memory(GiB)": 72.85, "step": 71860, "token_acc": 0.5641025641025641, "train_speed(iter/s)": 0.672587 }, { "epoch": 3.0789169272953174, "grad_norm": 6.713162899017334, "learning_rate": 3.224768565638271e-05, "loss": 2.0846954345703126, "memory(GiB)": 72.85, "step": 71865, "token_acc": 0.5627009646302251, "train_speed(iter/s)": 0.672594 }, { "epoch": 3.079131142624566, "grad_norm": 5.25736665725708, "learning_rate": 3.224139449746556e-05, "loss": 2.055590057373047, "memory(GiB)": 72.85, "step": 71870, "token_acc": 0.5566343042071198, "train_speed(iter/s)": 0.6726 }, { "epoch": 3.079345357953815, "grad_norm": 6.508932113647461, "learning_rate": 3.22351036602622e-05, "loss": 2.3512187957763673, "memory(GiB)": 72.85, "step": 71875, "token_acc": 0.484375, "train_speed(iter/s)": 0.672594 }, { "epoch": 3.0795595732830643, "grad_norm": 6.177726745605469, "learning_rate": 3.2228813144886606e-05, "loss": 2.0717586517333983, "memory(GiB)": 72.85, "step": 71880, "token_acc": 0.5321888412017167, "train_speed(iter/s)": 0.672595 }, { "epoch": 3.079773788612313, "grad_norm": 5.91089391708374, "learning_rate": 3.222252295145273e-05, "loss": 2.1406835556030273, "memory(GiB)": 72.85, "step": 71885, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.672608 }, { "epoch": 3.079988003941562, "grad_norm": 5.934329986572266, "learning_rate": 3.221623308007453e-05, "loss": 2.3150081634521484, "memory(GiB)": 72.85, "step": 71890, "token_acc": 0.5234657039711191, "train_speed(iter/s)": 0.672612 }, { "epoch": 3.080202219270811, "grad_norm": 5.740843296051025, "learning_rate": 3.220994353086594e-05, "loss": 1.8717700958251953, "memory(GiB)": 72.85, "step": 71895, "token_acc": 0.5308219178082192, "train_speed(iter/s)": 0.672616 }, { "epoch": 3.08041643460006, "grad_norm": 5.766706466674805, "learning_rate": 3.220365430394092e-05, "loss": 1.7637836456298828, "memory(GiB)": 72.85, "step": 71900, "token_acc": 0.5977443609022557, "train_speed(iter/s)": 0.672627 }, { "epoch": 3.080630649929309, "grad_norm": 4.784089088439941, "learning_rate": 3.2197365399413394e-05, "loss": 2.050078201293945, "memory(GiB)": 72.85, "step": 71905, "token_acc": 0.549520766773163, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.080844865258558, "grad_norm": 4.997866153717041, "learning_rate": 3.21910768173973e-05, "loss": 1.8652614593505858, "memory(GiB)": 72.85, "step": 71910, "token_acc": 0.5382059800664452, "train_speed(iter/s)": 0.672635 }, { "epoch": 3.081059080587807, "grad_norm": 6.390736103057861, "learning_rate": 3.2184788558006553e-05, "loss": 1.8453266143798828, "memory(GiB)": 72.85, "step": 71915, "token_acc": 0.5754385964912281, "train_speed(iter/s)": 0.672625 }, { "epoch": 3.0812732959170557, "grad_norm": 4.327361583709717, "learning_rate": 3.217850062135507e-05, "loss": 1.9651453018188476, "memory(GiB)": 72.85, "step": 71920, "token_acc": 0.5802919708029197, "train_speed(iter/s)": 0.672618 }, { "epoch": 3.081487511246305, "grad_norm": 6.9127726554870605, "learning_rate": 3.2172213007556766e-05, "loss": 2.077926445007324, "memory(GiB)": 72.85, "step": 71925, "token_acc": 0.5402298850574713, "train_speed(iter/s)": 0.672614 }, { "epoch": 3.0817017265755537, "grad_norm": 5.9813079833984375, "learning_rate": 3.216592571672555e-05, "loss": 2.2660196304321287, "memory(GiB)": 72.85, "step": 71930, "token_acc": 0.4891640866873065, "train_speed(iter/s)": 0.672617 }, { "epoch": 3.0819159419048026, "grad_norm": 10.315654754638672, "learning_rate": 3.215963874897532e-05, "loss": 2.3180801391601564, "memory(GiB)": 72.85, "step": 71935, "token_acc": 0.5306122448979592, "train_speed(iter/s)": 0.672618 }, { "epoch": 3.082130157234052, "grad_norm": 5.725717544555664, "learning_rate": 3.215335210441995e-05, "loss": 1.9733705520629883, "memory(GiB)": 72.85, "step": 71940, "token_acc": 0.554140127388535, "train_speed(iter/s)": 0.67262 }, { "epoch": 3.0823443725633006, "grad_norm": 4.689335346221924, "learning_rate": 3.214706578317337e-05, "loss": 2.246148109436035, "memory(GiB)": 72.85, "step": 71945, "token_acc": 0.5226586102719033, "train_speed(iter/s)": 0.672619 }, { "epoch": 3.0825585878925494, "grad_norm": 6.8687028884887695, "learning_rate": 3.214077978534944e-05, "loss": 2.3218788146972655, "memory(GiB)": 72.85, "step": 71950, "token_acc": 0.5291828793774319, "train_speed(iter/s)": 0.672625 }, { "epoch": 3.0827728032217987, "grad_norm": 5.1942901611328125, "learning_rate": 3.213449411106203e-05, "loss": 2.377366638183594, "memory(GiB)": 72.85, "step": 71955, "token_acc": 0.5295950155763239, "train_speed(iter/s)": 0.67263 }, { "epoch": 3.0829870185510475, "grad_norm": 5.7196431159973145, "learning_rate": 3.2128208760425025e-05, "loss": 2.152647018432617, "memory(GiB)": 72.85, "step": 71960, "token_acc": 0.5524475524475524, "train_speed(iter/s)": 0.672637 }, { "epoch": 3.0832012338802963, "grad_norm": 5.508145809173584, "learning_rate": 3.2121923733552275e-05, "loss": 2.106268310546875, "memory(GiB)": 72.85, "step": 71965, "token_acc": 0.5156794425087108, "train_speed(iter/s)": 0.672641 }, { "epoch": 3.0834154492095456, "grad_norm": 5.500322341918945, "learning_rate": 3.211563903055766e-05, "loss": 2.138785552978516, "memory(GiB)": 72.85, "step": 71970, "token_acc": 0.4866666666666667, "train_speed(iter/s)": 0.672642 }, { "epoch": 3.0836296645387944, "grad_norm": 5.451967239379883, "learning_rate": 3.210935465155501e-05, "loss": 2.0733669281005858, "memory(GiB)": 72.85, "step": 71975, "token_acc": 0.5399361022364217, "train_speed(iter/s)": 0.672645 }, { "epoch": 3.083843879868043, "grad_norm": 6.343043804168701, "learning_rate": 3.2103070596658186e-05, "loss": 2.1090789794921876, "memory(GiB)": 72.85, "step": 71980, "token_acc": 0.5376712328767124, "train_speed(iter/s)": 0.672649 }, { "epoch": 3.0840580951972925, "grad_norm": 4.887917995452881, "learning_rate": 3.209678686598103e-05, "loss": 1.8866291046142578, "memory(GiB)": 72.85, "step": 71985, "token_acc": 0.5993975903614458, "train_speed(iter/s)": 0.672644 }, { "epoch": 3.0842723105265413, "grad_norm": 5.748469352722168, "learning_rate": 3.2090503459637376e-05, "loss": 2.409993362426758, "memory(GiB)": 72.85, "step": 71990, "token_acc": 0.47540983606557374, "train_speed(iter/s)": 0.672644 }, { "epoch": 3.08448652585579, "grad_norm": 4.8899688720703125, "learning_rate": 3.2084220377741046e-05, "loss": 1.7543958663940429, "memory(GiB)": 72.85, "step": 71995, "token_acc": 0.5798816568047337, "train_speed(iter/s)": 0.672647 }, { "epoch": 3.0847007411850393, "grad_norm": 8.355697631835938, "learning_rate": 3.2077937620405875e-05, "loss": 2.3206886291503905, "memory(GiB)": 72.85, "step": 72000, "token_acc": 0.5366666666666666, "train_speed(iter/s)": 0.672646 }, { "epoch": 3.0847007411850393, "eval_loss": 2.0424611568450928, "eval_runtime": 14.9483, "eval_samples_per_second": 6.69, "eval_steps_per_second": 6.69, "eval_token_acc": 0.5012345679012346, "step": 72000 }, { "epoch": 3.084914956514288, "grad_norm": 5.178858757019043, "learning_rate": 3.207165518774567e-05, "loss": 1.9261693954467773, "memory(GiB)": 72.85, "step": 72005, "token_acc": 0.5169811320754717, "train_speed(iter/s)": 0.672539 }, { "epoch": 3.085129171843537, "grad_norm": 6.272393226623535, "learning_rate": 3.2065373079874264e-05, "loss": 1.8793979644775392, "memory(GiB)": 72.85, "step": 72010, "token_acc": 0.5657370517928287, "train_speed(iter/s)": 0.672534 }, { "epoch": 3.0853433871727862, "grad_norm": 5.631988525390625, "learning_rate": 3.205909129690542e-05, "loss": 2.41812686920166, "memory(GiB)": 72.85, "step": 72015, "token_acc": 0.49310344827586206, "train_speed(iter/s)": 0.67253 }, { "epoch": 3.085557602502035, "grad_norm": 5.926543235778809, "learning_rate": 3.205280983895298e-05, "loss": 1.9466934204101562, "memory(GiB)": 72.85, "step": 72020, "token_acc": 0.5523465703971119, "train_speed(iter/s)": 0.672523 }, { "epoch": 3.085771817831284, "grad_norm": 4.453667163848877, "learning_rate": 3.2046528706130726e-05, "loss": 2.0881675720214843, "memory(GiB)": 72.85, "step": 72025, "token_acc": 0.5079365079365079, "train_speed(iter/s)": 0.672508 }, { "epoch": 3.085986033160533, "grad_norm": 4.859024524688721, "learning_rate": 3.204024789855246e-05, "loss": 2.171660804748535, "memory(GiB)": 72.85, "step": 72030, "token_acc": 0.5436507936507936, "train_speed(iter/s)": 0.672519 }, { "epoch": 3.086200248489782, "grad_norm": 5.567615509033203, "learning_rate": 3.203396741633194e-05, "loss": 1.7290157318115233, "memory(GiB)": 72.85, "step": 72035, "token_acc": 0.6015625, "train_speed(iter/s)": 0.672527 }, { "epoch": 3.0864144638190307, "grad_norm": 7.23477840423584, "learning_rate": 3.202768725958295e-05, "loss": 1.9581933975219727, "memory(GiB)": 72.85, "step": 72040, "token_acc": 0.5704225352112676, "train_speed(iter/s)": 0.672541 }, { "epoch": 3.08662867914828, "grad_norm": 7.371650218963623, "learning_rate": 3.202140742841928e-05, "loss": 2.1022050857543944, "memory(GiB)": 72.85, "step": 72045, "token_acc": 0.5425867507886435, "train_speed(iter/s)": 0.672548 }, { "epoch": 3.086842894477529, "grad_norm": 5.210415363311768, "learning_rate": 3.201512792295467e-05, "loss": 1.8310945510864258, "memory(GiB)": 72.85, "step": 72050, "token_acc": 0.5909090909090909, "train_speed(iter/s)": 0.672545 }, { "epoch": 3.0870571098067776, "grad_norm": 6.347781658172607, "learning_rate": 3.200884874330289e-05, "loss": 2.4328540802001952, "memory(GiB)": 72.85, "step": 72055, "token_acc": 0.4732142857142857, "train_speed(iter/s)": 0.672546 }, { "epoch": 3.087271325136027, "grad_norm": 5.421815872192383, "learning_rate": 3.200256988957769e-05, "loss": 1.8676952362060546, "memory(GiB)": 72.85, "step": 72060, "token_acc": 0.525096525096525, "train_speed(iter/s)": 0.67255 }, { "epoch": 3.0874855404652757, "grad_norm": 5.867778778076172, "learning_rate": 3.1996291361892813e-05, "loss": 2.212592887878418, "memory(GiB)": 72.85, "step": 72065, "token_acc": 0.5472312703583062, "train_speed(iter/s)": 0.67255 }, { "epoch": 3.0876997557945245, "grad_norm": 6.167985916137695, "learning_rate": 3.1990013160362025e-05, "loss": 2.1987945556640627, "memory(GiB)": 72.85, "step": 72070, "token_acc": 0.5157894736842106, "train_speed(iter/s)": 0.672558 }, { "epoch": 3.0879139711237737, "grad_norm": 6.884618282318115, "learning_rate": 3.198373528509904e-05, "loss": 1.9560985565185547, "memory(GiB)": 72.85, "step": 72075, "token_acc": 0.5958188153310104, "train_speed(iter/s)": 0.67257 }, { "epoch": 3.0881281864530226, "grad_norm": 5.78845739364624, "learning_rate": 3.197745773621758e-05, "loss": 1.9287689208984375, "memory(GiB)": 72.85, "step": 72080, "token_acc": 0.5373665480427047, "train_speed(iter/s)": 0.672568 }, { "epoch": 3.0883424017822714, "grad_norm": 6.483495235443115, "learning_rate": 3.197118051383137e-05, "loss": 1.8962417602539063, "memory(GiB)": 72.85, "step": 72085, "token_acc": 0.5486381322957199, "train_speed(iter/s)": 0.672567 }, { "epoch": 3.0885566171115206, "grad_norm": 5.933588027954102, "learning_rate": 3.196490361805415e-05, "loss": 2.047251892089844, "memory(GiB)": 72.85, "step": 72090, "token_acc": 0.5620915032679739, "train_speed(iter/s)": 0.672569 }, { "epoch": 3.0887708324407694, "grad_norm": 3.515167236328125, "learning_rate": 3.1958627048999626e-05, "loss": 2.136644744873047, "memory(GiB)": 72.85, "step": 72095, "token_acc": 0.5693215339233039, "train_speed(iter/s)": 0.672563 }, { "epoch": 3.0889850477700183, "grad_norm": 8.136004447937012, "learning_rate": 3.195235080678149e-05, "loss": 2.1497392654418945, "memory(GiB)": 72.85, "step": 72100, "token_acc": 0.5637065637065637, "train_speed(iter/s)": 0.672561 }, { "epoch": 3.0891992630992675, "grad_norm": 6.2038116455078125, "learning_rate": 3.1946074891513444e-05, "loss": 2.3218273162841796, "memory(GiB)": 72.85, "step": 72105, "token_acc": 0.55, "train_speed(iter/s)": 0.672569 }, { "epoch": 3.0894134784285163, "grad_norm": 5.90116548538208, "learning_rate": 3.193979930330919e-05, "loss": 2.024312973022461, "memory(GiB)": 72.85, "step": 72110, "token_acc": 0.5364963503649635, "train_speed(iter/s)": 0.672577 }, { "epoch": 3.089627693757765, "grad_norm": 5.645786285400391, "learning_rate": 3.193352404228243e-05, "loss": 2.049761962890625, "memory(GiB)": 72.85, "step": 72115, "token_acc": 0.5567765567765568, "train_speed(iter/s)": 0.672587 }, { "epoch": 3.0898419090870144, "grad_norm": 5.910443305969238, "learning_rate": 3.1927249108546814e-05, "loss": 2.130992889404297, "memory(GiB)": 72.85, "step": 72120, "token_acc": 0.5166051660516605, "train_speed(iter/s)": 0.672586 }, { "epoch": 3.090056124416263, "grad_norm": 5.463050365447998, "learning_rate": 3.192097450221603e-05, "loss": 2.1723892211914064, "memory(GiB)": 72.85, "step": 72125, "token_acc": 0.5258358662613982, "train_speed(iter/s)": 0.672584 }, { "epoch": 3.090270339745512, "grad_norm": 7.051460266113281, "learning_rate": 3.191470022340376e-05, "loss": 2.157037544250488, "memory(GiB)": 72.85, "step": 72130, "token_acc": 0.5415384615384615, "train_speed(iter/s)": 0.672582 }, { "epoch": 3.0904845550747613, "grad_norm": 6.297416687011719, "learning_rate": 3.190842627222367e-05, "loss": 1.977764892578125, "memory(GiB)": 72.85, "step": 72135, "token_acc": 0.5588235294117647, "train_speed(iter/s)": 0.672585 }, { "epoch": 3.09069877040401, "grad_norm": 5.424359321594238, "learning_rate": 3.1902152648789395e-05, "loss": 1.84200439453125, "memory(GiB)": 72.85, "step": 72140, "token_acc": 0.5766129032258065, "train_speed(iter/s)": 0.672581 }, { "epoch": 3.090912985733259, "grad_norm": 6.590149402618408, "learning_rate": 3.1895879353214606e-05, "loss": 2.292190742492676, "memory(GiB)": 72.85, "step": 72145, "token_acc": 0.5395189003436426, "train_speed(iter/s)": 0.67258 }, { "epoch": 3.091127201062508, "grad_norm": 5.294656276702881, "learning_rate": 3.188960638561294e-05, "loss": 2.2677207946777345, "memory(GiB)": 72.85, "step": 72150, "token_acc": 0.5361842105263158, "train_speed(iter/s)": 0.672577 }, { "epoch": 3.091341416391757, "grad_norm": 5.041738510131836, "learning_rate": 3.188333374609805e-05, "loss": 1.8267086029052735, "memory(GiB)": 72.85, "step": 72155, "token_acc": 0.6209386281588448, "train_speed(iter/s)": 0.672585 }, { "epoch": 3.091555631721006, "grad_norm": 8.261690139770508, "learning_rate": 3.187706143478355e-05, "loss": 1.9207700729370116, "memory(GiB)": 72.85, "step": 72160, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.672592 }, { "epoch": 3.091769847050255, "grad_norm": 4.257164478302002, "learning_rate": 3.18707894517831e-05, "loss": 2.022941780090332, "memory(GiB)": 72.85, "step": 72165, "token_acc": 0.5261538461538462, "train_speed(iter/s)": 0.672597 }, { "epoch": 3.091984062379504, "grad_norm": 4.487820148468018, "learning_rate": 3.18645177972103e-05, "loss": 2.042845344543457, "memory(GiB)": 72.85, "step": 72170, "token_acc": 0.5510204081632653, "train_speed(iter/s)": 0.672599 }, { "epoch": 3.0921982777087527, "grad_norm": 7.730922698974609, "learning_rate": 3.185824647117878e-05, "loss": 1.9494998931884766, "memory(GiB)": 72.85, "step": 72175, "token_acc": 0.5670995670995671, "train_speed(iter/s)": 0.672594 }, { "epoch": 3.092412493038002, "grad_norm": 5.736198902130127, "learning_rate": 3.185197547380214e-05, "loss": 2.2200189590454102, "memory(GiB)": 72.85, "step": 72180, "token_acc": 0.5310344827586206, "train_speed(iter/s)": 0.672608 }, { "epoch": 3.0926267083672507, "grad_norm": 5.012415885925293, "learning_rate": 3.1845704805193975e-05, "loss": 2.057941436767578, "memory(GiB)": 72.85, "step": 72185, "token_acc": 0.5643835616438356, "train_speed(iter/s)": 0.672621 }, { "epoch": 3.0928409236964995, "grad_norm": 6.325059413909912, "learning_rate": 3.183943446546792e-05, "loss": 2.268003463745117, "memory(GiB)": 72.85, "step": 72190, "token_acc": 0.5050847457627119, "train_speed(iter/s)": 0.672627 }, { "epoch": 3.093055139025749, "grad_norm": 4.816209316253662, "learning_rate": 3.183316445473753e-05, "loss": 2.1048328399658205, "memory(GiB)": 72.85, "step": 72195, "token_acc": 0.554858934169279, "train_speed(iter/s)": 0.672635 }, { "epoch": 3.0932693543549976, "grad_norm": 5.587780475616455, "learning_rate": 3.182689477311641e-05, "loss": 2.1658708572387697, "memory(GiB)": 72.85, "step": 72200, "token_acc": 0.520891364902507, "train_speed(iter/s)": 0.672634 }, { "epoch": 3.0934835696842464, "grad_norm": 10.900074005126953, "learning_rate": 3.182062542071815e-05, "loss": 2.08709716796875, "memory(GiB)": 72.85, "step": 72205, "token_acc": 0.5238095238095238, "train_speed(iter/s)": 0.672634 }, { "epoch": 3.0936977850134957, "grad_norm": 5.668189525604248, "learning_rate": 3.18143563976563e-05, "loss": 2.1048635482788085, "memory(GiB)": 72.85, "step": 72210, "token_acc": 0.5434782608695652, "train_speed(iter/s)": 0.672633 }, { "epoch": 3.0939120003427445, "grad_norm": 5.128875732421875, "learning_rate": 3.180808770404445e-05, "loss": 2.180878448486328, "memory(GiB)": 72.85, "step": 72215, "token_acc": 0.5242718446601942, "train_speed(iter/s)": 0.672632 }, { "epoch": 3.0941262156719933, "grad_norm": 6.1534576416015625, "learning_rate": 3.180181933999616e-05, "loss": 2.063099670410156, "memory(GiB)": 72.85, "step": 72220, "token_acc": 0.5755102040816327, "train_speed(iter/s)": 0.672617 }, { "epoch": 3.0943404310012426, "grad_norm": 6.182106018066406, "learning_rate": 3.179555130562497e-05, "loss": 2.460700035095215, "memory(GiB)": 72.85, "step": 72225, "token_acc": 0.5181818181818182, "train_speed(iter/s)": 0.672613 }, { "epoch": 3.0945546463304914, "grad_norm": 5.300474643707275, "learning_rate": 3.178928360104446e-05, "loss": 2.338633346557617, "memory(GiB)": 72.85, "step": 72230, "token_acc": 0.5121951219512195, "train_speed(iter/s)": 0.672616 }, { "epoch": 3.09476886165974, "grad_norm": 5.722198486328125, "learning_rate": 3.1783016226368147e-05, "loss": 2.1386993408203123, "memory(GiB)": 72.85, "step": 72235, "token_acc": 0.5513698630136986, "train_speed(iter/s)": 0.672625 }, { "epoch": 3.0949830769889894, "grad_norm": 5.2359514236450195, "learning_rate": 3.177674918170959e-05, "loss": 2.0620079040527344, "memory(GiB)": 72.85, "step": 72240, "token_acc": 0.541095890410959, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.0951972923182383, "grad_norm": 4.849368572235107, "learning_rate": 3.177048246718232e-05, "loss": 2.059187889099121, "memory(GiB)": 72.85, "step": 72245, "token_acc": 0.5276752767527675, "train_speed(iter/s)": 0.67264 }, { "epoch": 3.095411507647487, "grad_norm": 5.74123477935791, "learning_rate": 3.1764216082899853e-05, "loss": 2.044206237792969, "memory(GiB)": 72.85, "step": 72250, "token_acc": 0.5359712230215827, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.0956257229767363, "grad_norm": 4.759278297424316, "learning_rate": 3.175795002897572e-05, "loss": 2.219099426269531, "memory(GiB)": 72.85, "step": 72255, "token_acc": 0.5468277945619335, "train_speed(iter/s)": 0.672639 }, { "epoch": 3.095839938305985, "grad_norm": 4.259162902832031, "learning_rate": 3.175168430552344e-05, "loss": 2.2155412673950194, "memory(GiB)": 72.85, "step": 72260, "token_acc": 0.5083612040133779, "train_speed(iter/s)": 0.672624 }, { "epoch": 3.096054153635234, "grad_norm": 5.187714576721191, "learning_rate": 3.1745418912656525e-05, "loss": 2.187528610229492, "memory(GiB)": 72.85, "step": 72265, "token_acc": 0.5212355212355212, "train_speed(iter/s)": 0.672636 }, { "epoch": 3.096268368964483, "grad_norm": 5.422654628753662, "learning_rate": 3.1739153850488456e-05, "loss": 1.9745912551879883, "memory(GiB)": 72.85, "step": 72270, "token_acc": 0.5457227138643068, "train_speed(iter/s)": 0.672642 }, { "epoch": 3.096482584293732, "grad_norm": 5.872976779937744, "learning_rate": 3.173288911913276e-05, "loss": 2.2630283355712892, "memory(GiB)": 72.85, "step": 72275, "token_acc": 0.4807692307692308, "train_speed(iter/s)": 0.672635 }, { "epoch": 3.096696799622981, "grad_norm": 5.2138519287109375, "learning_rate": 3.17266247187029e-05, "loss": 2.3443201065063475, "memory(GiB)": 72.85, "step": 72280, "token_acc": 0.5177865612648221, "train_speed(iter/s)": 0.672639 }, { "epoch": 3.09691101495223, "grad_norm": 7.723632335662842, "learning_rate": 3.172036064931238e-05, "loss": 2.040313148498535, "memory(GiB)": 72.85, "step": 72285, "token_acc": 0.5382262996941896, "train_speed(iter/s)": 0.672634 }, { "epoch": 3.097125230281479, "grad_norm": 5.20357084274292, "learning_rate": 3.171409691107467e-05, "loss": 1.9711475372314453, "memory(GiB)": 72.85, "step": 72290, "token_acc": 0.5487364620938628, "train_speed(iter/s)": 0.672625 }, { "epoch": 3.0973394456107277, "grad_norm": 5.725861549377441, "learning_rate": 3.1707833504103254e-05, "loss": 2.0925725936889648, "memory(GiB)": 72.85, "step": 72295, "token_acc": 0.5240963855421686, "train_speed(iter/s)": 0.672631 }, { "epoch": 3.097553660939977, "grad_norm": 5.142855644226074, "learning_rate": 3.1701570428511596e-05, "loss": 2.2397727966308594, "memory(GiB)": 72.85, "step": 72300, "token_acc": 0.5236363636363637, "train_speed(iter/s)": 0.672631 }, { "epoch": 3.097767876269226, "grad_norm": 5.171331882476807, "learning_rate": 3.169530768441316e-05, "loss": 2.1010257720947267, "memory(GiB)": 72.85, "step": 72305, "token_acc": 0.5539033457249071, "train_speed(iter/s)": 0.672631 }, { "epoch": 3.0979820915984746, "grad_norm": 5.6189799308776855, "learning_rate": 3.1689045271921365e-05, "loss": 1.8544097900390626, "memory(GiB)": 72.85, "step": 72310, "token_acc": 0.5765124555160143, "train_speed(iter/s)": 0.672636 }, { "epoch": 3.098196306927724, "grad_norm": 6.839034557342529, "learning_rate": 3.168278319114971e-05, "loss": 2.1706335067749025, "memory(GiB)": 72.85, "step": 72315, "token_acc": 0.5364963503649635, "train_speed(iter/s)": 0.672644 }, { "epoch": 3.0984105222569727, "grad_norm": 5.867397308349609, "learning_rate": 3.167652144221163e-05, "loss": 2.302486801147461, "memory(GiB)": 72.85, "step": 72320, "token_acc": 0.5086505190311419, "train_speed(iter/s)": 0.672648 }, { "epoch": 3.0986247375862215, "grad_norm": 4.846171855926514, "learning_rate": 3.1670260025220546e-05, "loss": 2.0800241470336913, "memory(GiB)": 72.85, "step": 72325, "token_acc": 0.5351681957186545, "train_speed(iter/s)": 0.672649 }, { "epoch": 3.0988389529154707, "grad_norm": 5.755127429962158, "learning_rate": 3.1663998940289894e-05, "loss": 2.2014108657836915, "memory(GiB)": 72.85, "step": 72330, "token_acc": 0.5092592592592593, "train_speed(iter/s)": 0.672653 }, { "epoch": 3.0990531682447195, "grad_norm": 5.289050579071045, "learning_rate": 3.165773818753311e-05, "loss": 1.8305778503417969, "memory(GiB)": 72.85, "step": 72335, "token_acc": 0.5884615384615385, "train_speed(iter/s)": 0.67265 }, { "epoch": 3.0992673835739684, "grad_norm": 4.60721492767334, "learning_rate": 3.1651477767063595e-05, "loss": 2.1553012847900392, "memory(GiB)": 72.85, "step": 72340, "token_acc": 0.5098039215686274, "train_speed(iter/s)": 0.67264 }, { "epoch": 3.0994815989032176, "grad_norm": 5.3651957511901855, "learning_rate": 3.164521767899478e-05, "loss": 2.262497329711914, "memory(GiB)": 72.85, "step": 72345, "token_acc": 0.526813880126183, "train_speed(iter/s)": 0.672641 }, { "epoch": 3.0996958142324664, "grad_norm": 6.17258882522583, "learning_rate": 3.163895792344007e-05, "loss": 2.1199840545654296, "memory(GiB)": 72.85, "step": 72350, "token_acc": 0.5477941176470589, "train_speed(iter/s)": 0.672652 }, { "epoch": 3.0999100295617152, "grad_norm": 4.2732625007629395, "learning_rate": 3.163269850051285e-05, "loss": 2.047053909301758, "memory(GiB)": 72.85, "step": 72355, "token_acc": 0.5606060606060606, "train_speed(iter/s)": 0.672654 }, { "epoch": 3.1001242448909645, "grad_norm": 5.301600456237793, "learning_rate": 3.162643941032653e-05, "loss": 1.973537063598633, "memory(GiB)": 72.85, "step": 72360, "token_acc": 0.5875486381322957, "train_speed(iter/s)": 0.67266 }, { "epoch": 3.1003384602202133, "grad_norm": 8.731474876403809, "learning_rate": 3.16201806529945e-05, "loss": 2.1676380157470705, "memory(GiB)": 72.85, "step": 72365, "token_acc": 0.5691699604743083, "train_speed(iter/s)": 0.672656 }, { "epoch": 3.100552675549462, "grad_norm": 5.691610336303711, "learning_rate": 3.1613922228630136e-05, "loss": 2.0873403549194336, "memory(GiB)": 72.85, "step": 72370, "token_acc": 0.5300353356890459, "train_speed(iter/s)": 0.672649 }, { "epoch": 3.1007668908787114, "grad_norm": 4.916072845458984, "learning_rate": 3.160766413734681e-05, "loss": 2.1931020736694338, "memory(GiB)": 72.85, "step": 72375, "token_acc": 0.5019157088122606, "train_speed(iter/s)": 0.672653 }, { "epoch": 3.10098110620796, "grad_norm": 4.621870517730713, "learning_rate": 3.160140637925791e-05, "loss": 1.8684810638427733, "memory(GiB)": 72.85, "step": 72380, "token_acc": 0.5830618892508144, "train_speed(iter/s)": 0.672646 }, { "epoch": 3.101195321537209, "grad_norm": 5.946414470672607, "learning_rate": 3.159514895447679e-05, "loss": 1.6071538925170898, "memory(GiB)": 72.85, "step": 72385, "token_acc": 0.6, "train_speed(iter/s)": 0.672649 }, { "epoch": 3.1014095368664583, "grad_norm": 4.276555061340332, "learning_rate": 3.158889186311681e-05, "loss": 2.0585586547851564, "memory(GiB)": 72.85, "step": 72390, "token_acc": 0.5198776758409785, "train_speed(iter/s)": 0.672641 }, { "epoch": 3.101623752195707, "grad_norm": 6.158783435821533, "learning_rate": 3.158263510529133e-05, "loss": 2.252091979980469, "memory(GiB)": 72.85, "step": 72395, "token_acc": 0.47039473684210525, "train_speed(iter/s)": 0.67265 }, { "epoch": 3.101837967524956, "grad_norm": 6.817680358886719, "learning_rate": 3.1576378681113685e-05, "loss": 2.0123777389526367, "memory(GiB)": 72.85, "step": 72400, "token_acc": 0.5652173913043478, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.102052182854205, "grad_norm": 6.176132678985596, "learning_rate": 3.1570122590697235e-05, "loss": 2.0507766723632814, "memory(GiB)": 72.85, "step": 72405, "token_acc": 0.4983277591973244, "train_speed(iter/s)": 0.672678 }, { "epoch": 3.102266398183454, "grad_norm": 3.9987564086914062, "learning_rate": 3.15638668341553e-05, "loss": 1.8312774658203126, "memory(GiB)": 72.85, "step": 72410, "token_acc": 0.6123188405797102, "train_speed(iter/s)": 0.672683 }, { "epoch": 3.1024806135127028, "grad_norm": 7.407456398010254, "learning_rate": 3.155761141160121e-05, "loss": 1.9910194396972656, "memory(GiB)": 72.85, "step": 72415, "token_acc": 0.5518394648829431, "train_speed(iter/s)": 0.672692 }, { "epoch": 3.102694828841952, "grad_norm": 5.707339286804199, "learning_rate": 3.1551356323148284e-05, "loss": 2.586253547668457, "memory(GiB)": 72.85, "step": 72420, "token_acc": 0.5126582278481012, "train_speed(iter/s)": 0.672682 }, { "epoch": 3.102909044171201, "grad_norm": 6.154913425445557, "learning_rate": 3.1545101568909855e-05, "loss": 1.981062126159668, "memory(GiB)": 72.85, "step": 72425, "token_acc": 0.571875, "train_speed(iter/s)": 0.672663 }, { "epoch": 3.1031232595004496, "grad_norm": 6.1628737449646, "learning_rate": 3.153884714899921e-05, "loss": 2.331072235107422, "memory(GiB)": 72.85, "step": 72430, "token_acc": 0.5297619047619048, "train_speed(iter/s)": 0.67267 }, { "epoch": 3.103337474829699, "grad_norm": 5.376282691955566, "learning_rate": 3.153259306352968e-05, "loss": 2.1098659515380858, "memory(GiB)": 72.85, "step": 72435, "token_acc": 0.5703971119133574, "train_speed(iter/s)": 0.672674 }, { "epoch": 3.1035516901589477, "grad_norm": 6.486807823181152, "learning_rate": 3.152633931261454e-05, "loss": 2.168033981323242, "memory(GiB)": 72.85, "step": 72440, "token_acc": 0.556390977443609, "train_speed(iter/s)": 0.672666 }, { "epoch": 3.1037659054881965, "grad_norm": 5.639733791351318, "learning_rate": 3.15200858963671e-05, "loss": 2.022360420227051, "memory(GiB)": 72.85, "step": 72445, "token_acc": 0.5407407407407407, "train_speed(iter/s)": 0.672657 }, { "epoch": 3.103980120817446, "grad_norm": 7.247982501983643, "learning_rate": 3.151383281490065e-05, "loss": 2.07559757232666, "memory(GiB)": 72.85, "step": 72450, "token_acc": 0.5225806451612903, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.1041943361466946, "grad_norm": 6.488443374633789, "learning_rate": 3.150758006832842e-05, "loss": 1.974137306213379, "memory(GiB)": 72.85, "step": 72455, "token_acc": 0.546031746031746, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.1044085514759434, "grad_norm": 6.901057720184326, "learning_rate": 3.150132765676374e-05, "loss": 2.1007268905639647, "memory(GiB)": 72.85, "step": 72460, "token_acc": 0.5708661417322834, "train_speed(iter/s)": 0.672664 }, { "epoch": 3.1046227668051927, "grad_norm": 4.981420993804932, "learning_rate": 3.149507558031989e-05, "loss": 2.1983531951904296, "memory(GiB)": 72.85, "step": 72465, "token_acc": 0.5188172043010753, "train_speed(iter/s)": 0.672653 }, { "epoch": 3.1048369821344415, "grad_norm": 6.632315158843994, "learning_rate": 3.148882383911009e-05, "loss": 2.3516918182373048, "memory(GiB)": 72.85, "step": 72470, "token_acc": 0.5197132616487455, "train_speed(iter/s)": 0.672663 }, { "epoch": 3.1050511974636903, "grad_norm": 7.124999523162842, "learning_rate": 3.14825724332476e-05, "loss": 2.090990447998047, "memory(GiB)": 72.85, "step": 72475, "token_acc": 0.5241157556270096, "train_speed(iter/s)": 0.672644 }, { "epoch": 3.1052654127929396, "grad_norm": 4.506641387939453, "learning_rate": 3.14763213628457e-05, "loss": 1.7763963699340821, "memory(GiB)": 72.85, "step": 72480, "token_acc": 0.5882352941176471, "train_speed(iter/s)": 0.672653 }, { "epoch": 3.1054796281221884, "grad_norm": 6.02532434463501, "learning_rate": 3.14700706280176e-05, "loss": 2.108809471130371, "memory(GiB)": 72.85, "step": 72485, "token_acc": 0.5226480836236934, "train_speed(iter/s)": 0.672656 }, { "epoch": 3.105693843451437, "grad_norm": 4.888488292694092, "learning_rate": 3.146382022887656e-05, "loss": 2.0611820220947266, "memory(GiB)": 72.85, "step": 72490, "token_acc": 0.541095890410959, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.1059080587806864, "grad_norm": 6.003374099731445, "learning_rate": 3.145757016553581e-05, "loss": 2.3255558013916016, "memory(GiB)": 72.85, "step": 72495, "token_acc": 0.5229681978798587, "train_speed(iter/s)": 0.672646 }, { "epoch": 3.1061222741099352, "grad_norm": 6.1648664474487305, "learning_rate": 3.1451320438108564e-05, "loss": 2.3384239196777346, "memory(GiB)": 72.85, "step": 72500, "token_acc": 0.5284810126582279, "train_speed(iter/s)": 0.672657 }, { "epoch": 3.1061222741099352, "eval_loss": 2.118938684463501, "eval_runtime": 15.7541, "eval_samples_per_second": 6.348, "eval_steps_per_second": 6.348, "eval_token_acc": 0.5052631578947369, "step": 72500 }, { "epoch": 3.106336489439184, "grad_norm": 6.294800281524658, "learning_rate": 3.1445071046708055e-05, "loss": 2.2798267364501954, "memory(GiB)": 72.85, "step": 72505, "token_acc": 0.5158562367864693, "train_speed(iter/s)": 0.672547 }, { "epoch": 3.1065507047684333, "grad_norm": 6.084676265716553, "learning_rate": 3.143882199144749e-05, "loss": 2.0845977783203127, "memory(GiB)": 72.85, "step": 72510, "token_acc": 0.5944700460829493, "train_speed(iter/s)": 0.672546 }, { "epoch": 3.106764920097682, "grad_norm": 5.137457370758057, "learning_rate": 3.143257327244006e-05, "loss": 2.348529815673828, "memory(GiB)": 72.85, "step": 72515, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.672548 }, { "epoch": 3.106979135426931, "grad_norm": 6.236578941345215, "learning_rate": 3.1426324889799e-05, "loss": 2.160234069824219, "memory(GiB)": 72.85, "step": 72520, "token_acc": 0.5277777777777778, "train_speed(iter/s)": 0.672551 }, { "epoch": 3.10719335075618, "grad_norm": 5.1212334632873535, "learning_rate": 3.142007684363747e-05, "loss": 1.945082664489746, "memory(GiB)": 72.85, "step": 72525, "token_acc": 0.5251572327044025, "train_speed(iter/s)": 0.672543 }, { "epoch": 3.107407566085429, "grad_norm": 28.36506462097168, "learning_rate": 3.141382913406867e-05, "loss": 2.0283809661865235, "memory(GiB)": 72.85, "step": 72530, "token_acc": 0.5444015444015444, "train_speed(iter/s)": 0.672547 }, { "epoch": 3.107621781414678, "grad_norm": 4.51902961730957, "learning_rate": 3.140758176120582e-05, "loss": 1.9385190963745118, "memory(GiB)": 72.85, "step": 72535, "token_acc": 0.5594855305466238, "train_speed(iter/s)": 0.672549 }, { "epoch": 3.107835996743927, "grad_norm": 5.033231735229492, "learning_rate": 3.140133472516206e-05, "loss": 1.9295459747314454, "memory(GiB)": 72.85, "step": 72540, "token_acc": 0.5673758865248227, "train_speed(iter/s)": 0.67256 }, { "epoch": 3.108050212073176, "grad_norm": 9.86880874633789, "learning_rate": 3.139508802605055e-05, "loss": 2.190109634399414, "memory(GiB)": 72.85, "step": 72545, "token_acc": 0.5016835016835017, "train_speed(iter/s)": 0.672567 }, { "epoch": 3.1082644274024247, "grad_norm": 6.449174880981445, "learning_rate": 3.1388841663984505e-05, "loss": 2.2097087860107423, "memory(GiB)": 72.85, "step": 72550, "token_acc": 0.4738372093023256, "train_speed(iter/s)": 0.672568 }, { "epoch": 3.108478642731674, "grad_norm": 4.794569969177246, "learning_rate": 3.1382595639077034e-05, "loss": 1.9036819458007812, "memory(GiB)": 72.85, "step": 72555, "token_acc": 0.5508196721311476, "train_speed(iter/s)": 0.672565 }, { "epoch": 3.1086928580609228, "grad_norm": 5.14576530456543, "learning_rate": 3.1376349951441305e-05, "loss": 2.1806514739990233, "memory(GiB)": 72.85, "step": 72560, "token_acc": 0.4778156996587031, "train_speed(iter/s)": 0.672566 }, { "epoch": 3.1089070733901716, "grad_norm": 5.976357936859131, "learning_rate": 3.1370104601190475e-05, "loss": 1.8541521072387694, "memory(GiB)": 72.85, "step": 72565, "token_acc": 0.5642633228840125, "train_speed(iter/s)": 0.67257 }, { "epoch": 3.109121288719421, "grad_norm": 5.485257625579834, "learning_rate": 3.136385958843766e-05, "loss": 2.1033256530761717, "memory(GiB)": 72.85, "step": 72570, "token_acc": 0.5314465408805031, "train_speed(iter/s)": 0.672577 }, { "epoch": 3.1093355040486697, "grad_norm": 4.560611248016357, "learning_rate": 3.135761491329603e-05, "loss": 1.7939451217651368, "memory(GiB)": 72.85, "step": 72575, "token_acc": 0.5524475524475524, "train_speed(iter/s)": 0.672584 }, { "epoch": 3.1095497193779185, "grad_norm": 5.115523338317871, "learning_rate": 3.135137057587869e-05, "loss": 2.381816101074219, "memory(GiB)": 72.85, "step": 72580, "token_acc": 0.4817708333333333, "train_speed(iter/s)": 0.672597 }, { "epoch": 3.1097639347071677, "grad_norm": 6.333441257476807, "learning_rate": 3.134512657629876e-05, "loss": 1.993144416809082, "memory(GiB)": 72.85, "step": 72585, "token_acc": 0.5464684014869888, "train_speed(iter/s)": 0.672593 }, { "epoch": 3.1099781500364165, "grad_norm": 7.543163776397705, "learning_rate": 3.1338882914669365e-05, "loss": 1.9686391830444336, "memory(GiB)": 72.85, "step": 72590, "token_acc": 0.5714285714285714, "train_speed(iter/s)": 0.6726 }, { "epoch": 3.1101923653656653, "grad_norm": 4.8459625244140625, "learning_rate": 3.133263959110362e-05, "loss": 2.033535385131836, "memory(GiB)": 72.85, "step": 72595, "token_acc": 0.5666666666666667, "train_speed(iter/s)": 0.672593 }, { "epoch": 3.1104065806949146, "grad_norm": 5.055450439453125, "learning_rate": 3.1326396605714603e-05, "loss": 1.9290437698364258, "memory(GiB)": 72.85, "step": 72600, "token_acc": 0.5815384615384616, "train_speed(iter/s)": 0.672599 }, { "epoch": 3.1106207960241634, "grad_norm": 6.102560043334961, "learning_rate": 3.132015395861542e-05, "loss": 2.1303451538085936, "memory(GiB)": 72.85, "step": 72605, "token_acc": 0.5494505494505495, "train_speed(iter/s)": 0.672609 }, { "epoch": 3.1108350113534122, "grad_norm": 4.755853652954102, "learning_rate": 3.1313911649919194e-05, "loss": 2.1014734268188477, "memory(GiB)": 72.85, "step": 72610, "token_acc": 0.5348837209302325, "train_speed(iter/s)": 0.67261 }, { "epoch": 3.1110492266826615, "grad_norm": 7.855395317077637, "learning_rate": 3.130766967973898e-05, "loss": 2.094308090209961, "memory(GiB)": 72.85, "step": 72615, "token_acc": 0.544, "train_speed(iter/s)": 0.672606 }, { "epoch": 3.1112634420119103, "grad_norm": 5.939291477203369, "learning_rate": 3.130142804818786e-05, "loss": 1.9456743240356444, "memory(GiB)": 72.85, "step": 72620, "token_acc": 0.5482866043613707, "train_speed(iter/s)": 0.672607 }, { "epoch": 3.111477657341159, "grad_norm": 4.7109174728393555, "learning_rate": 3.129518675537893e-05, "loss": 2.3356117248535155, "memory(GiB)": 72.85, "step": 72625, "token_acc": 0.4896551724137931, "train_speed(iter/s)": 0.672611 }, { "epoch": 3.1116918726704084, "grad_norm": 4.778289318084717, "learning_rate": 3.128894580142522e-05, "loss": 2.3513240814208984, "memory(GiB)": 72.85, "step": 72630, "token_acc": 0.5255681818181818, "train_speed(iter/s)": 0.672623 }, { "epoch": 3.111906087999657, "grad_norm": 3.919572591781616, "learning_rate": 3.128270518643982e-05, "loss": 2.0389987945556642, "memory(GiB)": 72.85, "step": 72635, "token_acc": 0.5743944636678201, "train_speed(iter/s)": 0.672623 }, { "epoch": 3.112120303328906, "grad_norm": 5.2717204093933105, "learning_rate": 3.127646491053577e-05, "loss": 1.9622735977172852, "memory(GiB)": 72.85, "step": 72640, "token_acc": 0.5392857142857143, "train_speed(iter/s)": 0.672615 }, { "epoch": 3.1123345186581552, "grad_norm": 6.280423164367676, "learning_rate": 3.127022497382611e-05, "loss": 2.1005504608154295, "memory(GiB)": 72.85, "step": 72645, "token_acc": 0.5238095238095238, "train_speed(iter/s)": 0.672625 }, { "epoch": 3.112548733987404, "grad_norm": 4.540974140167236, "learning_rate": 3.126398537642392e-05, "loss": 1.915550994873047, "memory(GiB)": 72.85, "step": 72650, "token_acc": 0.5836734693877551, "train_speed(iter/s)": 0.67262 }, { "epoch": 3.112762949316653, "grad_norm": 5.706165790557861, "learning_rate": 3.12577461184422e-05, "loss": 2.3225358963012694, "memory(GiB)": 72.85, "step": 72655, "token_acc": 0.49240121580547114, "train_speed(iter/s)": 0.672619 }, { "epoch": 3.112977164645902, "grad_norm": 6.814767837524414, "learning_rate": 3.125150719999398e-05, "loss": 2.1146278381347656, "memory(GiB)": 72.85, "step": 72660, "token_acc": 0.5212355212355212, "train_speed(iter/s)": 0.672625 }, { "epoch": 3.113191379975151, "grad_norm": 6.0860066413879395, "learning_rate": 3.12452686211923e-05, "loss": 1.9915475845336914, "memory(GiB)": 72.85, "step": 72665, "token_acc": 0.5560344827586207, "train_speed(iter/s)": 0.672633 }, { "epoch": 3.1134055953044, "grad_norm": 6.55952262878418, "learning_rate": 3.123903038215017e-05, "loss": 1.9091377258300781, "memory(GiB)": 72.85, "step": 72670, "token_acc": 0.543859649122807, "train_speed(iter/s)": 0.672633 }, { "epoch": 3.113619810633649, "grad_norm": 5.793970108032227, "learning_rate": 3.1232792482980614e-05, "loss": 2.0166893005371094, "memory(GiB)": 72.85, "step": 72675, "token_acc": 0.5412186379928315, "train_speed(iter/s)": 0.672636 }, { "epoch": 3.113834025962898, "grad_norm": 6.6294636726379395, "learning_rate": 3.12265549237966e-05, "loss": 2.003614044189453, "memory(GiB)": 72.85, "step": 72680, "token_acc": 0.5295950155763239, "train_speed(iter/s)": 0.672644 }, { "epoch": 3.114048241292147, "grad_norm": 5.0465898513793945, "learning_rate": 3.122031770471119e-05, "loss": 2.183485412597656, "memory(GiB)": 72.85, "step": 72685, "token_acc": 0.5103448275862069, "train_speed(iter/s)": 0.672635 }, { "epoch": 3.114262456621396, "grad_norm": 6.93400764465332, "learning_rate": 3.121408082583732e-05, "loss": 1.99993953704834, "memory(GiB)": 72.85, "step": 72690, "token_acc": 0.55, "train_speed(iter/s)": 0.672632 }, { "epoch": 3.1144766719506447, "grad_norm": 4.967258930206299, "learning_rate": 3.1207844287288005e-05, "loss": 2.027887535095215, "memory(GiB)": 72.85, "step": 72695, "token_acc": 0.5596026490066225, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.114690887279894, "grad_norm": 5.486751556396484, "learning_rate": 3.120160808917622e-05, "loss": 2.2901092529296876, "memory(GiB)": 72.85, "step": 72700, "token_acc": 0.5133079847908745, "train_speed(iter/s)": 0.672642 }, { "epoch": 3.1149051026091428, "grad_norm": 6.314843654632568, "learning_rate": 3.119537223161494e-05, "loss": 2.098375129699707, "memory(GiB)": 72.85, "step": 72705, "token_acc": 0.5427631578947368, "train_speed(iter/s)": 0.672642 }, { "epoch": 3.1151193179383916, "grad_norm": 4.830260753631592, "learning_rate": 3.118913671471714e-05, "loss": 2.230961227416992, "memory(GiB)": 72.85, "step": 72710, "token_acc": 0.5166163141993958, "train_speed(iter/s)": 0.672655 }, { "epoch": 3.115333533267641, "grad_norm": 4.576676845550537, "learning_rate": 3.118290153859576e-05, "loss": 1.9274173736572267, "memory(GiB)": 72.85, "step": 72715, "token_acc": 0.5688405797101449, "train_speed(iter/s)": 0.672656 }, { "epoch": 3.1155477485968897, "grad_norm": 10.42148208618164, "learning_rate": 3.1176666703363775e-05, "loss": 2.2600942611694337, "memory(GiB)": 72.85, "step": 72720, "token_acc": 0.5053763440860215, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.1157619639261385, "grad_norm": 5.546805381774902, "learning_rate": 3.117043220913414e-05, "loss": 2.116547393798828, "memory(GiB)": 72.85, "step": 72725, "token_acc": 0.5296167247386759, "train_speed(iter/s)": 0.67265 }, { "epoch": 3.1159761792553877, "grad_norm": 4.953848361968994, "learning_rate": 3.116419805601977e-05, "loss": 2.007238006591797, "memory(GiB)": 72.85, "step": 72730, "token_acc": 0.5643939393939394, "train_speed(iter/s)": 0.672656 }, { "epoch": 3.1161903945846365, "grad_norm": 4.645264625549316, "learning_rate": 3.1157964244133636e-05, "loss": 1.863272476196289, "memory(GiB)": 72.85, "step": 72735, "token_acc": 0.5741444866920152, "train_speed(iter/s)": 0.672656 }, { "epoch": 3.1164046099138853, "grad_norm": 7.966286659240723, "learning_rate": 3.1151730773588656e-05, "loss": 2.148842620849609, "memory(GiB)": 72.85, "step": 72740, "token_acc": 0.5204918032786885, "train_speed(iter/s)": 0.672661 }, { "epoch": 3.1166188252431346, "grad_norm": 6.877475261688232, "learning_rate": 3.114549764449775e-05, "loss": 1.9576303482055664, "memory(GiB)": 72.85, "step": 72745, "token_acc": 0.5, "train_speed(iter/s)": 0.672658 }, { "epoch": 3.1168330405723834, "grad_norm": 7.135300636291504, "learning_rate": 3.113926485697382e-05, "loss": 2.305563735961914, "memory(GiB)": 72.85, "step": 72750, "token_acc": 0.4967741935483871, "train_speed(iter/s)": 0.67267 }, { "epoch": 3.1170472559016322, "grad_norm": 5.032740592956543, "learning_rate": 3.113303241112982e-05, "loss": 2.219706726074219, "memory(GiB)": 72.85, "step": 72755, "token_acc": 0.555205047318612, "train_speed(iter/s)": 0.672664 }, { "epoch": 3.1172614712308815, "grad_norm": 6.022158622741699, "learning_rate": 3.1126800307078644e-05, "loss": 2.096162223815918, "memory(GiB)": 72.85, "step": 72760, "token_acc": 0.5124555160142349, "train_speed(iter/s)": 0.672667 }, { "epoch": 3.1174756865601303, "grad_norm": 5.462790489196777, "learning_rate": 3.112056854493317e-05, "loss": 2.3039602279663085, "memory(GiB)": 72.85, "step": 72765, "token_acc": 0.5128205128205128, "train_speed(iter/s)": 0.672671 }, { "epoch": 3.117689901889379, "grad_norm": 5.379281044006348, "learning_rate": 3.1114337124806316e-05, "loss": 2.4715423583984375, "memory(GiB)": 72.85, "step": 72770, "token_acc": 0.5566343042071198, "train_speed(iter/s)": 0.672675 }, { "epoch": 3.1179041172186284, "grad_norm": 6.127087593078613, "learning_rate": 3.110810604681096e-05, "loss": 2.196796417236328, "memory(GiB)": 72.85, "step": 72775, "token_acc": 0.5354609929078015, "train_speed(iter/s)": 0.672662 }, { "epoch": 3.118118332547877, "grad_norm": 4.71285343170166, "learning_rate": 3.110187531105998e-05, "loss": 2.099643325805664, "memory(GiB)": 72.85, "step": 72780, "token_acc": 0.5084269662921348, "train_speed(iter/s)": 0.67265 }, { "epoch": 3.118332547877126, "grad_norm": 4.583128929138184, "learning_rate": 3.109564491766627e-05, "loss": 1.9411832809448242, "memory(GiB)": 72.85, "step": 72785, "token_acc": 0.5724907063197026, "train_speed(iter/s)": 0.672657 }, { "epoch": 3.1185467632063752, "grad_norm": 5.114350318908691, "learning_rate": 3.108941486674267e-05, "loss": 2.1854034423828126, "memory(GiB)": 72.85, "step": 72790, "token_acc": 0.5228758169934641, "train_speed(iter/s)": 0.672656 }, { "epoch": 3.118760978535624, "grad_norm": 4.366440773010254, "learning_rate": 3.108318515840207e-05, "loss": 2.170437812805176, "memory(GiB)": 72.85, "step": 72795, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.118975193864873, "grad_norm": 5.7064619064331055, "learning_rate": 3.107695579275732e-05, "loss": 1.8287940979003907, "memory(GiB)": 72.85, "step": 72800, "token_acc": 0.5672727272727273, "train_speed(iter/s)": 0.672654 }, { "epoch": 3.119189409194122, "grad_norm": 4.156899452209473, "learning_rate": 3.1070726769921255e-05, "loss": 1.987942886352539, "memory(GiB)": 72.85, "step": 72805, "token_acc": 0.554140127388535, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.119403624523371, "grad_norm": 5.884254455566406, "learning_rate": 3.1064498090006746e-05, "loss": 1.9676383972167968, "memory(GiB)": 72.85, "step": 72810, "token_acc": 0.5273972602739726, "train_speed(iter/s)": 0.672668 }, { "epoch": 3.1196178398526198, "grad_norm": 5.56210994720459, "learning_rate": 3.1058269753126594e-05, "loss": 1.9266197204589843, "memory(GiB)": 72.85, "step": 72815, "token_acc": 0.5752895752895753, "train_speed(iter/s)": 0.672674 }, { "epoch": 3.119832055181869, "grad_norm": 5.460916519165039, "learning_rate": 3.105204175939367e-05, "loss": 2.3400455474853517, "memory(GiB)": 72.85, "step": 72820, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.672675 }, { "epoch": 3.120046270511118, "grad_norm": 7.452915191650391, "learning_rate": 3.1045814108920765e-05, "loss": 2.237050247192383, "memory(GiB)": 72.85, "step": 72825, "token_acc": 0.5183946488294314, "train_speed(iter/s)": 0.672666 }, { "epoch": 3.1202604858403666, "grad_norm": 4.543479919433594, "learning_rate": 3.103958680182074e-05, "loss": 2.2531484603881835, "memory(GiB)": 72.85, "step": 72830, "token_acc": 0.5135135135135135, "train_speed(iter/s)": 0.672668 }, { "epoch": 3.120474701169616, "grad_norm": 4.873347282409668, "learning_rate": 3.103335983820638e-05, "loss": 2.099612808227539, "memory(GiB)": 72.85, "step": 72835, "token_acc": 0.5622895622895623, "train_speed(iter/s)": 0.672662 }, { "epoch": 3.1206889164988647, "grad_norm": 4.969986915588379, "learning_rate": 3.102713321819051e-05, "loss": 2.2000244140625, "memory(GiB)": 72.85, "step": 72840, "token_acc": 0.5307692307692308, "train_speed(iter/s)": 0.672666 }, { "epoch": 3.1209031318281135, "grad_norm": 7.27443265914917, "learning_rate": 3.102090694188591e-05, "loss": 2.3228790283203127, "memory(GiB)": 72.85, "step": 72845, "token_acc": 0.5, "train_speed(iter/s)": 0.672673 }, { "epoch": 3.1211173471573628, "grad_norm": 6.815298557281494, "learning_rate": 3.101468100940538e-05, "loss": 2.011387825012207, "memory(GiB)": 72.85, "step": 72850, "token_acc": 0.587360594795539, "train_speed(iter/s)": 0.672669 }, { "epoch": 3.1213315624866116, "grad_norm": 5.520057201385498, "learning_rate": 3.100845542086171e-05, "loss": 1.9208124160766602, "memory(GiB)": 72.85, "step": 72855, "token_acc": 0.5986394557823129, "train_speed(iter/s)": 0.672678 }, { "epoch": 3.1215457778158604, "grad_norm": 6.307549476623535, "learning_rate": 3.10022301763677e-05, "loss": 1.9287309646606445, "memory(GiB)": 72.85, "step": 72860, "token_acc": 0.5683229813664596, "train_speed(iter/s)": 0.672674 }, { "epoch": 3.1217599931451097, "grad_norm": 6.73323392868042, "learning_rate": 3.0996005276036104e-05, "loss": 2.0758167266845704, "memory(GiB)": 72.85, "step": 72865, "token_acc": 0.5761589403973509, "train_speed(iter/s)": 0.672675 }, { "epoch": 3.1219742084743585, "grad_norm": 5.420924186706543, "learning_rate": 3.0989780719979704e-05, "loss": 2.1239835739135744, "memory(GiB)": 72.85, "step": 72870, "token_acc": 0.5420875420875421, "train_speed(iter/s)": 0.672669 }, { "epoch": 3.1221884238036073, "grad_norm": 5.454546928405762, "learning_rate": 3.0983556508311254e-05, "loss": 2.2479557037353515, "memory(GiB)": 72.85, "step": 72875, "token_acc": 0.5420875420875421, "train_speed(iter/s)": 0.672663 }, { "epoch": 3.1224026391328565, "grad_norm": 7.1132121086120605, "learning_rate": 3.097733264114352e-05, "loss": 2.322256088256836, "memory(GiB)": 72.85, "step": 72880, "token_acc": 0.5071942446043165, "train_speed(iter/s)": 0.672663 }, { "epoch": 3.1226168544621054, "grad_norm": 5.298500061035156, "learning_rate": 3.097110911858925e-05, "loss": 1.8469894409179688, "memory(GiB)": 72.85, "step": 72885, "token_acc": 0.5900383141762452, "train_speed(iter/s)": 0.672673 }, { "epoch": 3.122831069791354, "grad_norm": 4.8996171951293945, "learning_rate": 3.0964885940761186e-05, "loss": 2.127199172973633, "memory(GiB)": 72.85, "step": 72890, "token_acc": 0.5474137931034483, "train_speed(iter/s)": 0.672686 }, { "epoch": 3.1230452851206034, "grad_norm": 9.05701732635498, "learning_rate": 3.095866310777207e-05, "loss": 2.450165557861328, "memory(GiB)": 72.85, "step": 72895, "token_acc": 0.4850498338870432, "train_speed(iter/s)": 0.672688 }, { "epoch": 3.1232595004498522, "grad_norm": 6.668960094451904, "learning_rate": 3.0952440619734625e-05, "loss": 2.073818016052246, "memory(GiB)": 72.85, "step": 72900, "token_acc": 0.5198675496688742, "train_speed(iter/s)": 0.672688 }, { "epoch": 3.123473715779101, "grad_norm": 5.15566873550415, "learning_rate": 3.0946218476761596e-05, "loss": 2.2321834564208984, "memory(GiB)": 72.85, "step": 72905, "token_acc": 0.4899328859060403, "train_speed(iter/s)": 0.672683 }, { "epoch": 3.1236879311083503, "grad_norm": 5.312379837036133, "learning_rate": 3.093999667896569e-05, "loss": 2.0381492614746093, "memory(GiB)": 72.85, "step": 72910, "token_acc": 0.5833333333333334, "train_speed(iter/s)": 0.672697 }, { "epoch": 3.123902146437599, "grad_norm": 5.433825492858887, "learning_rate": 3.093377522645963e-05, "loss": 2.2598819732666016, "memory(GiB)": 72.85, "step": 72915, "token_acc": 0.48148148148148145, "train_speed(iter/s)": 0.672695 }, { "epoch": 3.124116361766848, "grad_norm": 5.1299920082092285, "learning_rate": 3.09275541193561e-05, "loss": 1.9258195877075195, "memory(GiB)": 72.85, "step": 72920, "token_acc": 0.5928853754940712, "train_speed(iter/s)": 0.672692 }, { "epoch": 3.124330577096097, "grad_norm": 5.674587249755859, "learning_rate": 3.092133335776785e-05, "loss": 2.041385269165039, "memory(GiB)": 72.85, "step": 72925, "token_acc": 0.5207547169811321, "train_speed(iter/s)": 0.6727 }, { "epoch": 3.124544792425346, "grad_norm": 6.14146614074707, "learning_rate": 3.091511294180752e-05, "loss": 2.3783679962158204, "memory(GiB)": 72.85, "step": 72930, "token_acc": 0.48951048951048953, "train_speed(iter/s)": 0.672701 }, { "epoch": 3.124759007754595, "grad_norm": 7.617697715759277, "learning_rate": 3.090889287158783e-05, "loss": 2.0695701599121095, "memory(GiB)": 72.85, "step": 72935, "token_acc": 0.5547169811320755, "train_speed(iter/s)": 0.672703 }, { "epoch": 3.124973223083844, "grad_norm": 6.151548862457275, "learning_rate": 3.090267314722146e-05, "loss": 2.1420536041259766, "memory(GiB)": 72.85, "step": 72940, "token_acc": 0.5267857142857143, "train_speed(iter/s)": 0.672708 }, { "epoch": 3.125187438413093, "grad_norm": 6.0813517570495605, "learning_rate": 3.089645376882108e-05, "loss": 2.0885408401489256, "memory(GiB)": 72.85, "step": 72945, "token_acc": 0.5354609929078015, "train_speed(iter/s)": 0.672713 }, { "epoch": 3.1254016537423417, "grad_norm": 4.583804607391357, "learning_rate": 3.0890234736499346e-05, "loss": 2.324421501159668, "memory(GiB)": 72.85, "step": 72950, "token_acc": 0.4923547400611621, "train_speed(iter/s)": 0.672721 }, { "epoch": 3.125615869071591, "grad_norm": 8.068319320678711, "learning_rate": 3.0884016050368945e-05, "loss": 1.9698568344116212, "memory(GiB)": 72.85, "step": 72955, "token_acc": 0.5689655172413793, "train_speed(iter/s)": 0.672726 }, { "epoch": 3.1258300844008398, "grad_norm": 9.348608016967773, "learning_rate": 3.087779771054252e-05, "loss": 2.1603042602539064, "memory(GiB)": 72.85, "step": 72960, "token_acc": 0.5357142857142857, "train_speed(iter/s)": 0.672726 }, { "epoch": 3.1260442997300886, "grad_norm": 5.458560943603516, "learning_rate": 3.0871579717132734e-05, "loss": 2.1996383666992188, "memory(GiB)": 72.85, "step": 72965, "token_acc": 0.5425867507886435, "train_speed(iter/s)": 0.672739 }, { "epoch": 3.126258515059338, "grad_norm": 5.15324068069458, "learning_rate": 3.086536207025223e-05, "loss": 2.2312444686889648, "memory(GiB)": 72.85, "step": 72970, "token_acc": 0.5403225806451613, "train_speed(iter/s)": 0.672735 }, { "epoch": 3.1264727303885866, "grad_norm": 4.68441915512085, "learning_rate": 3.085914477001362e-05, "loss": 1.840565299987793, "memory(GiB)": 72.85, "step": 72975, "token_acc": 0.5604026845637584, "train_speed(iter/s)": 0.672742 }, { "epoch": 3.1266869457178355, "grad_norm": 6.252969264984131, "learning_rate": 3.085292781652957e-05, "loss": 2.2097841262817384, "memory(GiB)": 72.85, "step": 72980, "token_acc": 0.5014409221902018, "train_speed(iter/s)": 0.67275 }, { "epoch": 3.1269011610470847, "grad_norm": 4.733941078186035, "learning_rate": 3.08467112099127e-05, "loss": 2.1199615478515623, "memory(GiB)": 72.85, "step": 72985, "token_acc": 0.5422535211267606, "train_speed(iter/s)": 0.672765 }, { "epoch": 3.1271153763763335, "grad_norm": 5.480513095855713, "learning_rate": 3.0840494950275626e-05, "loss": 2.0595783233642577, "memory(GiB)": 72.85, "step": 72990, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.672765 }, { "epoch": 3.1273295917055823, "grad_norm": 5.526041507720947, "learning_rate": 3.0834279037730954e-05, "loss": 2.230031394958496, "memory(GiB)": 72.85, "step": 72995, "token_acc": 0.48175182481751827, "train_speed(iter/s)": 0.672771 }, { "epoch": 3.1275438070348316, "grad_norm": 5.641049385070801, "learning_rate": 3.0828063472391294e-05, "loss": 1.7698549270629882, "memory(GiB)": 72.85, "step": 73000, "token_acc": 0.5819672131147541, "train_speed(iter/s)": 0.672776 }, { "epoch": 3.1275438070348316, "eval_loss": 2.1700079441070557, "eval_runtime": 15.6776, "eval_samples_per_second": 6.379, "eval_steps_per_second": 6.379, "eval_token_acc": 0.521044992743106, "step": 73000 }, { "epoch": 3.1277580223640804, "grad_norm": 4.97382116317749, "learning_rate": 3.082184825436925e-05, "loss": 2.1274946212768553, "memory(GiB)": 72.85, "step": 73005, "token_acc": 0.5295336787564767, "train_speed(iter/s)": 0.67266 }, { "epoch": 3.127972237693329, "grad_norm": 10.860952377319336, "learning_rate": 3.081563338377742e-05, "loss": 2.3939374923706054, "memory(GiB)": 72.85, "step": 73010, "token_acc": 0.4659090909090909, "train_speed(iter/s)": 0.672669 }, { "epoch": 3.1281864530225785, "grad_norm": 6.712177753448486, "learning_rate": 3.080941886072839e-05, "loss": 2.1804130554199217, "memory(GiB)": 72.85, "step": 73015, "token_acc": 0.5395189003436426, "train_speed(iter/s)": 0.672653 }, { "epoch": 3.1284006683518273, "grad_norm": 5.557953834533691, "learning_rate": 3.0803204685334725e-05, "loss": 1.8831331253051757, "memory(GiB)": 72.85, "step": 73020, "token_acc": 0.5448717948717948, "train_speed(iter/s)": 0.672644 }, { "epoch": 3.128614883681076, "grad_norm": 5.529966831207275, "learning_rate": 3.079699085770903e-05, "loss": 2.127239227294922, "memory(GiB)": 72.85, "step": 73025, "token_acc": 0.5521235521235521, "train_speed(iter/s)": 0.672645 }, { "epoch": 3.1288290990103254, "grad_norm": 5.116786479949951, "learning_rate": 3.0790777377963855e-05, "loss": 1.8757755279541015, "memory(GiB)": 72.85, "step": 73030, "token_acc": 0.5519287833827893, "train_speed(iter/s)": 0.672646 }, { "epoch": 3.129043314339574, "grad_norm": 5.8844523429870605, "learning_rate": 3.078456424621177e-05, "loss": 2.1222328186035155, "memory(GiB)": 72.85, "step": 73035, "token_acc": 0.5353846153846153, "train_speed(iter/s)": 0.672644 }, { "epoch": 3.129257529668823, "grad_norm": 5.75800895690918, "learning_rate": 3.0778351462565325e-05, "loss": 2.1705642700195313, "memory(GiB)": 72.85, "step": 73040, "token_acc": 0.53, "train_speed(iter/s)": 0.672652 }, { "epoch": 3.1294717449980722, "grad_norm": 4.271900177001953, "learning_rate": 3.077213902713707e-05, "loss": 1.965753936767578, "memory(GiB)": 72.85, "step": 73045, "token_acc": 0.5501730103806228, "train_speed(iter/s)": 0.672652 }, { "epoch": 3.129685960327321, "grad_norm": 6.643305778503418, "learning_rate": 3.0765926940039544e-05, "loss": 1.9969490051269532, "memory(GiB)": 72.85, "step": 73050, "token_acc": 0.5398550724637681, "train_speed(iter/s)": 0.672656 }, { "epoch": 3.12990017565657, "grad_norm": 5.569093227386475, "learning_rate": 3.0759715201385306e-05, "loss": 1.9539314270019532, "memory(GiB)": 72.85, "step": 73055, "token_acc": 0.5544871794871795, "train_speed(iter/s)": 0.672662 }, { "epoch": 3.130114390985819, "grad_norm": 6.951426982879639, "learning_rate": 3.075350381128689e-05, "loss": 1.736435317993164, "memory(GiB)": 72.85, "step": 73060, "token_acc": 0.5857740585774058, "train_speed(iter/s)": 0.672666 }, { "epoch": 3.130328606315068, "grad_norm": 6.046818733215332, "learning_rate": 3.074729276985678e-05, "loss": 2.207174301147461, "memory(GiB)": 72.85, "step": 73065, "token_acc": 0.5196629213483146, "train_speed(iter/s)": 0.672664 }, { "epoch": 3.1305428216443167, "grad_norm": 5.428980827331543, "learning_rate": 3.074108207720755e-05, "loss": 2.220001983642578, "memory(GiB)": 72.85, "step": 73070, "token_acc": 0.5206611570247934, "train_speed(iter/s)": 0.672663 }, { "epoch": 3.130757036973566, "grad_norm": 5.474658489227295, "learning_rate": 3.0734871733451674e-05, "loss": 1.8301425933837892, "memory(GiB)": 72.85, "step": 73075, "token_acc": 0.5687732342007435, "train_speed(iter/s)": 0.67266 }, { "epoch": 3.130971252302815, "grad_norm": 4.961811065673828, "learning_rate": 3.072866173870167e-05, "loss": 2.0384008407592775, "memory(GiB)": 72.85, "step": 73080, "token_acc": 0.5197568389057751, "train_speed(iter/s)": 0.672664 }, { "epoch": 3.1311854676320636, "grad_norm": 5.5060930252075195, "learning_rate": 3.0722452093070033e-05, "loss": 1.916109848022461, "memory(GiB)": 72.85, "step": 73085, "token_acc": 0.5528169014084507, "train_speed(iter/s)": 0.672657 }, { "epoch": 3.131399682961313, "grad_norm": 5.500598430633545, "learning_rate": 3.071624279666927e-05, "loss": 1.9315332412719726, "memory(GiB)": 72.85, "step": 73090, "token_acc": 0.545751633986928, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.1316138982905617, "grad_norm": 6.319000720977783, "learning_rate": 3.0710033849611843e-05, "loss": 2.156136703491211, "memory(GiB)": 72.85, "step": 73095, "token_acc": 0.5728155339805825, "train_speed(iter/s)": 0.672655 }, { "epoch": 3.1318281136198105, "grad_norm": 4.922591686248779, "learning_rate": 3.0703825252010256e-05, "loss": 2.0646699905395507, "memory(GiB)": 72.85, "step": 73100, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.672669 }, { "epoch": 3.1320423289490598, "grad_norm": 7.69327449798584, "learning_rate": 3.069761700397697e-05, "loss": 1.946316909790039, "memory(GiB)": 72.85, "step": 73105, "token_acc": 0.5352697095435685, "train_speed(iter/s)": 0.672676 }, { "epoch": 3.1322565442783086, "grad_norm": 5.170004844665527, "learning_rate": 3.069140910562447e-05, "loss": 2.4827381134033204, "memory(GiB)": 72.85, "step": 73110, "token_acc": 0.4690909090909091, "train_speed(iter/s)": 0.672682 }, { "epoch": 3.1324707596075574, "grad_norm": 5.771084308624268, "learning_rate": 3.06852015570652e-05, "loss": 1.8597129821777343, "memory(GiB)": 72.85, "step": 73115, "token_acc": 0.5765124555160143, "train_speed(iter/s)": 0.672683 }, { "epoch": 3.1326849749368066, "grad_norm": 5.80764102935791, "learning_rate": 3.067899435841161e-05, "loss": 1.9797950744628907, "memory(GiB)": 72.85, "step": 73120, "token_acc": 0.5805369127516778, "train_speed(iter/s)": 0.67267 }, { "epoch": 3.1328991902660555, "grad_norm": 7.630290985107422, "learning_rate": 3.067278750977617e-05, "loss": 2.0693166732788084, "memory(GiB)": 72.85, "step": 73125, "token_acc": 0.525679758308157, "train_speed(iter/s)": 0.672669 }, { "epoch": 3.1331134055953043, "grad_norm": 5.717693328857422, "learning_rate": 3.066658101127132e-05, "loss": 2.045047378540039, "memory(GiB)": 72.85, "step": 73130, "token_acc": 0.5731225296442688, "train_speed(iter/s)": 0.672683 }, { "epoch": 3.1333276209245535, "grad_norm": 7.462960243225098, "learning_rate": 3.06603748630095e-05, "loss": 1.9838226318359375, "memory(GiB)": 72.85, "step": 73135, "token_acc": 0.5613496932515337, "train_speed(iter/s)": 0.672696 }, { "epoch": 3.1335418362538023, "grad_norm": 5.094288349151611, "learning_rate": 3.065416906510312e-05, "loss": 2.223367691040039, "memory(GiB)": 72.85, "step": 73140, "token_acc": 0.5080906148867314, "train_speed(iter/s)": 0.672702 }, { "epoch": 3.133756051583051, "grad_norm": 5.903021335601807, "learning_rate": 3.064796361766462e-05, "loss": 2.074072265625, "memory(GiB)": 72.85, "step": 73145, "token_acc": 0.5700325732899023, "train_speed(iter/s)": 0.672711 }, { "epoch": 3.1339702669123004, "grad_norm": 4.712680339813232, "learning_rate": 3.06417585208064e-05, "loss": 1.89847412109375, "memory(GiB)": 72.85, "step": 73150, "token_acc": 0.5314685314685315, "train_speed(iter/s)": 0.672711 }, { "epoch": 3.134184482241549, "grad_norm": 5.118426322937012, "learning_rate": 3.0635553774640904e-05, "loss": 1.7748380661010743, "memory(GiB)": 72.85, "step": 73155, "token_acc": 0.5838926174496645, "train_speed(iter/s)": 0.672709 }, { "epoch": 3.134398697570798, "grad_norm": 5.2429609298706055, "learning_rate": 3.0629349379280506e-05, "loss": 2.2133195877075194, "memory(GiB)": 72.85, "step": 73160, "token_acc": 0.5131578947368421, "train_speed(iter/s)": 0.672718 }, { "epoch": 3.1346129129000473, "grad_norm": 6.820574760437012, "learning_rate": 3.062314533483761e-05, "loss": 2.140009117126465, "memory(GiB)": 72.85, "step": 73165, "token_acc": 0.5285714285714286, "train_speed(iter/s)": 0.672722 }, { "epoch": 3.134827128229296, "grad_norm": 6.089321136474609, "learning_rate": 3.0616941641424624e-05, "loss": 2.293470764160156, "memory(GiB)": 72.85, "step": 73170, "token_acc": 0.5018050541516246, "train_speed(iter/s)": 0.672713 }, { "epoch": 3.135041343558545, "grad_norm": 4.949143409729004, "learning_rate": 3.061073829915393e-05, "loss": 1.7397464752197265, "memory(GiB)": 72.85, "step": 73175, "token_acc": 0.6092307692307692, "train_speed(iter/s)": 0.672721 }, { "epoch": 3.135255558887794, "grad_norm": 5.9484782218933105, "learning_rate": 3.060453530813788e-05, "loss": 1.9630317687988281, "memory(GiB)": 72.85, "step": 73180, "token_acc": 0.5612244897959183, "train_speed(iter/s)": 0.672727 }, { "epoch": 3.135469774217043, "grad_norm": 6.191342353820801, "learning_rate": 3.059833266848888e-05, "loss": 2.1362918853759765, "memory(GiB)": 72.85, "step": 73185, "token_acc": 0.52, "train_speed(iter/s)": 0.672723 }, { "epoch": 3.135683989546292, "grad_norm": 4.893400192260742, "learning_rate": 3.0592130380319274e-05, "loss": 1.6244422912597656, "memory(GiB)": 72.85, "step": 73190, "token_acc": 0.6425339366515838, "train_speed(iter/s)": 0.672729 }, { "epoch": 3.135898204875541, "grad_norm": 5.661508560180664, "learning_rate": 3.058592844374143e-05, "loss": 2.1035177230834963, "memory(GiB)": 72.85, "step": 73195, "token_acc": 0.5379939209726444, "train_speed(iter/s)": 0.672727 }, { "epoch": 3.13611242020479, "grad_norm": 5.788506984710693, "learning_rate": 3.057972685886771e-05, "loss": 1.9191299438476563, "memory(GiB)": 72.85, "step": 73200, "token_acc": 0.5819935691318328, "train_speed(iter/s)": 0.672739 }, { "epoch": 3.1363266355340387, "grad_norm": 7.222607612609863, "learning_rate": 3.057352562581046e-05, "loss": 1.9695516586303712, "memory(GiB)": 72.85, "step": 73205, "token_acc": 0.5396341463414634, "train_speed(iter/s)": 0.672743 }, { "epoch": 3.136540850863288, "grad_norm": 6.383829116821289, "learning_rate": 3.0567324744682006e-05, "loss": 2.187346649169922, "memory(GiB)": 72.85, "step": 73210, "token_acc": 0.5483870967741935, "train_speed(iter/s)": 0.672754 }, { "epoch": 3.1367550661925367, "grad_norm": 5.384261608123779, "learning_rate": 3.056112421559471e-05, "loss": 2.0177047729492186, "memory(GiB)": 72.85, "step": 73215, "token_acc": 0.5369774919614148, "train_speed(iter/s)": 0.67276 }, { "epoch": 3.1369692815217856, "grad_norm": 4.584500312805176, "learning_rate": 3.055492403866088e-05, "loss": 2.268617057800293, "memory(GiB)": 72.85, "step": 73220, "token_acc": 0.5407407407407407, "train_speed(iter/s)": 0.672767 }, { "epoch": 3.137183496851035, "grad_norm": 5.7979278564453125, "learning_rate": 3.054872421399283e-05, "loss": 2.3311393737792967, "memory(GiB)": 72.85, "step": 73225, "token_acc": 0.5124555160142349, "train_speed(iter/s)": 0.672767 }, { "epoch": 3.1373977121802836, "grad_norm": 6.744204998016357, "learning_rate": 3.05425247417029e-05, "loss": 2.046032524108887, "memory(GiB)": 72.85, "step": 73230, "token_acc": 0.568561872909699, "train_speed(iter/s)": 0.672763 }, { "epoch": 3.1376119275095324, "grad_norm": 6.208034038543701, "learning_rate": 3.053632562190339e-05, "loss": 2.138333320617676, "memory(GiB)": 72.85, "step": 73235, "token_acc": 0.5212765957446809, "train_speed(iter/s)": 0.672772 }, { "epoch": 3.1378261428387817, "grad_norm": 8.797403335571289, "learning_rate": 3.053012685470658e-05, "loss": 1.9870399475097655, "memory(GiB)": 72.85, "step": 73240, "token_acc": 0.5795918367346938, "train_speed(iter/s)": 0.672786 }, { "epoch": 3.1380403581680305, "grad_norm": 4.81754207611084, "learning_rate": 3.0523928440224806e-05, "loss": 2.083886909484863, "memory(GiB)": 72.85, "step": 73245, "token_acc": 0.5080906148867314, "train_speed(iter/s)": 0.672785 }, { "epoch": 3.1382545734972793, "grad_norm": 5.736824989318848, "learning_rate": 3.0517730378570333e-05, "loss": 1.9128404617309571, "memory(GiB)": 72.85, "step": 73250, "token_acc": 0.54, "train_speed(iter/s)": 0.672776 }, { "epoch": 3.1384687888265286, "grad_norm": 5.154837131500244, "learning_rate": 3.0511532669855446e-05, "loss": 1.671320343017578, "memory(GiB)": 72.85, "step": 73255, "token_acc": 0.6052631578947368, "train_speed(iter/s)": 0.672778 }, { "epoch": 3.1386830041557774, "grad_norm": 5.272000789642334, "learning_rate": 3.0505335314192435e-05, "loss": 2.1938827514648436, "memory(GiB)": 72.85, "step": 73260, "token_acc": 0.4941860465116279, "train_speed(iter/s)": 0.672774 }, { "epoch": 3.138897219485026, "grad_norm": 4.926502227783203, "learning_rate": 3.0499138311693547e-05, "loss": 2.0425106048583985, "memory(GiB)": 72.85, "step": 73265, "token_acc": 0.5851063829787234, "train_speed(iter/s)": 0.672785 }, { "epoch": 3.1391114348142755, "grad_norm": 4.754178047180176, "learning_rate": 3.0492941662471054e-05, "loss": 2.044895553588867, "memory(GiB)": 72.85, "step": 73270, "token_acc": 0.5529801324503312, "train_speed(iter/s)": 0.67279 }, { "epoch": 3.1393256501435243, "grad_norm": 5.348150253295898, "learning_rate": 3.048674536663725e-05, "loss": 2.2224800109863283, "memory(GiB)": 72.85, "step": 73275, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.672796 }, { "epoch": 3.139539865472773, "grad_norm": 6.650071620941162, "learning_rate": 3.048054942430435e-05, "loss": 1.707840919494629, "memory(GiB)": 72.85, "step": 73280, "token_acc": 0.5930232558139535, "train_speed(iter/s)": 0.672811 }, { "epoch": 3.1397540808020223, "grad_norm": 6.560832500457764, "learning_rate": 3.0474353835584595e-05, "loss": 2.0319541931152343, "memory(GiB)": 72.85, "step": 73285, "token_acc": 0.5735735735735735, "train_speed(iter/s)": 0.672817 }, { "epoch": 3.139968296131271, "grad_norm": 7.0663299560546875, "learning_rate": 3.0468158600590252e-05, "loss": 2.0749370574951174, "memory(GiB)": 72.85, "step": 73290, "token_acc": 0.5762081784386617, "train_speed(iter/s)": 0.672822 }, { "epoch": 3.14018251146052, "grad_norm": 5.159889221191406, "learning_rate": 3.046196371943353e-05, "loss": 2.1138946533203127, "memory(GiB)": 72.85, "step": 73295, "token_acc": 0.5704697986577181, "train_speed(iter/s)": 0.672829 }, { "epoch": 3.140396726789769, "grad_norm": 4.936517715454102, "learning_rate": 3.045576919222667e-05, "loss": 2.1270599365234375, "memory(GiB)": 72.85, "step": 73300, "token_acc": 0.5265017667844523, "train_speed(iter/s)": 0.672846 }, { "epoch": 3.140610942119018, "grad_norm": 7.110511779785156, "learning_rate": 3.0449575019081883e-05, "loss": 1.897750473022461, "memory(GiB)": 72.85, "step": 73305, "token_acc": 0.5563380281690141, "train_speed(iter/s)": 0.672853 }, { "epoch": 3.140825157448267, "grad_norm": 5.571561336517334, "learning_rate": 3.0443381200111377e-05, "loss": 1.8561491012573241, "memory(GiB)": 72.85, "step": 73310, "token_acc": 0.5891472868217055, "train_speed(iter/s)": 0.672846 }, { "epoch": 3.141039372777516, "grad_norm": 7.6930766105651855, "learning_rate": 3.0437187735427376e-05, "loss": 2.300044631958008, "memory(GiB)": 72.85, "step": 73315, "token_acc": 0.4910394265232975, "train_speed(iter/s)": 0.672838 }, { "epoch": 3.141253588106765, "grad_norm": 5.276439189910889, "learning_rate": 3.0430994625142073e-05, "loss": 2.0346139907836913, "memory(GiB)": 72.85, "step": 73320, "token_acc": 0.47470817120622566, "train_speed(iter/s)": 0.672833 }, { "epoch": 3.1414678034360137, "grad_norm": 5.031986713409424, "learning_rate": 3.0424801869367647e-05, "loss": 2.254788398742676, "memory(GiB)": 72.85, "step": 73325, "token_acc": 0.5154929577464789, "train_speed(iter/s)": 0.672837 }, { "epoch": 3.141682018765263, "grad_norm": 4.223744869232178, "learning_rate": 3.0418609468216308e-05, "loss": 2.0536975860595703, "memory(GiB)": 72.85, "step": 73330, "token_acc": 0.545751633986928, "train_speed(iter/s)": 0.672829 }, { "epoch": 3.141896234094512, "grad_norm": 6.152678966522217, "learning_rate": 3.0412417421800216e-05, "loss": 2.1517433166503905, "memory(GiB)": 72.85, "step": 73335, "token_acc": 0.5221518987341772, "train_speed(iter/s)": 0.672831 }, { "epoch": 3.1421104494237606, "grad_norm": 5.211582660675049, "learning_rate": 3.0406225730231568e-05, "loss": 1.8695228576660157, "memory(GiB)": 72.85, "step": 73340, "token_acc": 0.5993150684931506, "train_speed(iter/s)": 0.672832 }, { "epoch": 3.14232466475301, "grad_norm": 5.496091842651367, "learning_rate": 3.0400034393622502e-05, "loss": 2.10245304107666, "memory(GiB)": 72.85, "step": 73345, "token_acc": 0.544, "train_speed(iter/s)": 0.672848 }, { "epoch": 3.1425388800822587, "grad_norm": 5.8474225997924805, "learning_rate": 3.039384341208522e-05, "loss": 2.1172725677490236, "memory(GiB)": 72.85, "step": 73350, "token_acc": 0.5464684014869888, "train_speed(iter/s)": 0.672842 }, { "epoch": 3.1427530954115075, "grad_norm": 6.8409318923950195, "learning_rate": 3.0387652785731847e-05, "loss": 1.9375875473022461, "memory(GiB)": 72.85, "step": 73355, "token_acc": 0.5658362989323843, "train_speed(iter/s)": 0.672833 }, { "epoch": 3.1429673107407567, "grad_norm": 6.468844413757324, "learning_rate": 3.0381462514674552e-05, "loss": 1.8820520401000977, "memory(GiB)": 72.85, "step": 73360, "token_acc": 0.572992700729927, "train_speed(iter/s)": 0.672838 }, { "epoch": 3.1431815260700056, "grad_norm": 5.881229400634766, "learning_rate": 3.037527259902546e-05, "loss": 2.0371067047119142, "memory(GiB)": 72.85, "step": 73365, "token_acc": 0.5428571428571428, "train_speed(iter/s)": 0.672831 }, { "epoch": 3.1433957413992544, "grad_norm": 6.08922815322876, "learning_rate": 3.0369083038896716e-05, "loss": 2.077349090576172, "memory(GiB)": 72.85, "step": 73370, "token_acc": 0.5245901639344263, "train_speed(iter/s)": 0.672832 }, { "epoch": 3.1436099567285036, "grad_norm": 6.041421413421631, "learning_rate": 3.036289383440045e-05, "loss": 2.0402671813964846, "memory(GiB)": 72.85, "step": 73375, "token_acc": 0.5483870967741935, "train_speed(iter/s)": 0.672821 }, { "epoch": 3.1438241720577524, "grad_norm": 5.842034339904785, "learning_rate": 3.0356704985648786e-05, "loss": 2.1647878646850587, "memory(GiB)": 72.85, "step": 73380, "token_acc": 0.5331010452961672, "train_speed(iter/s)": 0.672812 }, { "epoch": 3.1440383873870013, "grad_norm": 5.342073917388916, "learning_rate": 3.0350516492753828e-05, "loss": 1.9352561950683593, "memory(GiB)": 72.85, "step": 73385, "token_acc": 0.5769230769230769, "train_speed(iter/s)": 0.672807 }, { "epoch": 3.1442526027162505, "grad_norm": 6.483957767486572, "learning_rate": 3.0344328355827706e-05, "loss": 2.04683837890625, "memory(GiB)": 72.85, "step": 73390, "token_acc": 0.5255972696245734, "train_speed(iter/s)": 0.672815 }, { "epoch": 3.1444668180454993, "grad_norm": 7.7205729484558105, "learning_rate": 3.0338140574982505e-05, "loss": 1.8431541442871093, "memory(GiB)": 72.85, "step": 73395, "token_acc": 0.5536480686695279, "train_speed(iter/s)": 0.672819 }, { "epoch": 3.144681033374748, "grad_norm": 8.512955665588379, "learning_rate": 3.033195315033034e-05, "loss": 2.1723888397216795, "memory(GiB)": 72.85, "step": 73400, "token_acc": 0.5477178423236515, "train_speed(iter/s)": 0.672818 }, { "epoch": 3.1448952487039974, "grad_norm": 5.87830114364624, "learning_rate": 3.0325766081983296e-05, "loss": 2.1180580139160154, "memory(GiB)": 72.85, "step": 73405, "token_acc": 0.5634920634920635, "train_speed(iter/s)": 0.672828 }, { "epoch": 3.145109464033246, "grad_norm": 4.9498114585876465, "learning_rate": 3.031957937005344e-05, "loss": 2.0647953033447264, "memory(GiB)": 72.85, "step": 73410, "token_acc": 0.5424354243542435, "train_speed(iter/s)": 0.672826 }, { "epoch": 3.145323679362495, "grad_norm": 6.784549713134766, "learning_rate": 3.0313393014652865e-05, "loss": 2.120198440551758, "memory(GiB)": 72.85, "step": 73415, "token_acc": 0.4881889763779528, "train_speed(iter/s)": 0.672827 }, { "epoch": 3.1455378946917443, "grad_norm": 4.871748447418213, "learning_rate": 3.0307207015893658e-05, "loss": 2.097799873352051, "memory(GiB)": 72.85, "step": 73420, "token_acc": 0.5059523809523809, "train_speed(iter/s)": 0.672836 }, { "epoch": 3.145752110020993, "grad_norm": 7.983211040496826, "learning_rate": 3.0301021373887865e-05, "loss": 2.180606460571289, "memory(GiB)": 72.85, "step": 73425, "token_acc": 0.5080906148867314, "train_speed(iter/s)": 0.672839 }, { "epoch": 3.145966325350242, "grad_norm": 4.60140323638916, "learning_rate": 3.0294836088747547e-05, "loss": 1.950139045715332, "memory(GiB)": 72.85, "step": 73430, "token_acc": 0.5381818181818182, "train_speed(iter/s)": 0.672837 }, { "epoch": 3.146180540679491, "grad_norm": 4.612889766693115, "learning_rate": 3.0288651160584765e-05, "loss": 2.2596439361572265, "memory(GiB)": 72.85, "step": 73435, "token_acc": 0.5257731958762887, "train_speed(iter/s)": 0.67284 }, { "epoch": 3.14639475600874, "grad_norm": 4.354960918426514, "learning_rate": 3.0282466589511544e-05, "loss": 2.0309869766235353, "memory(GiB)": 72.85, "step": 73440, "token_acc": 0.5602836879432624, "train_speed(iter/s)": 0.672816 }, { "epoch": 3.146608971337989, "grad_norm": 6.12471342086792, "learning_rate": 3.0276282375639953e-05, "loss": 2.3127815246582033, "memory(GiB)": 72.85, "step": 73445, "token_acc": 0.5301204819277109, "train_speed(iter/s)": 0.672811 }, { "epoch": 3.146823186667238, "grad_norm": 4.955881118774414, "learning_rate": 3.0270098519082e-05, "loss": 1.863111686706543, "memory(GiB)": 72.85, "step": 73450, "token_acc": 0.5813008130081301, "train_speed(iter/s)": 0.672835 }, { "epoch": 3.147037401996487, "grad_norm": 8.067530632019043, "learning_rate": 3.0263915019949717e-05, "loss": 1.9767623901367188, "memory(GiB)": 72.85, "step": 73455, "token_acc": 0.5290322580645161, "train_speed(iter/s)": 0.672822 }, { "epoch": 3.1472516173257357, "grad_norm": 5.260770320892334, "learning_rate": 3.0257731878355143e-05, "loss": 2.173722267150879, "memory(GiB)": 72.85, "step": 73460, "token_acc": 0.560126582278481, "train_speed(iter/s)": 0.672823 }, { "epoch": 3.147465832654985, "grad_norm": 4.72221040725708, "learning_rate": 3.025154909441027e-05, "loss": 2.2106502532958983, "memory(GiB)": 72.85, "step": 73465, "token_acc": 0.5592105263157895, "train_speed(iter/s)": 0.672824 }, { "epoch": 3.1476800479842337, "grad_norm": 5.456965923309326, "learning_rate": 3.0245366668227098e-05, "loss": 2.212990951538086, "memory(GiB)": 72.85, "step": 73470, "token_acc": 0.5299684542586751, "train_speed(iter/s)": 0.67283 }, { "epoch": 3.1478942633134825, "grad_norm": 5.5245819091796875, "learning_rate": 3.0239184599917658e-05, "loss": 2.1372406005859377, "memory(GiB)": 72.85, "step": 73475, "token_acc": 0.5311355311355311, "train_speed(iter/s)": 0.67283 }, { "epoch": 3.148108478642732, "grad_norm": 6.9857001304626465, "learning_rate": 3.023300288959391e-05, "loss": 2.3598270416259766, "memory(GiB)": 72.85, "step": 73480, "token_acc": 0.5295950155763239, "train_speed(iter/s)": 0.672826 }, { "epoch": 3.1483226939719806, "grad_norm": 6.153048992156982, "learning_rate": 3.0226821537367867e-05, "loss": 1.9575523376464843, "memory(GiB)": 72.85, "step": 73485, "token_acc": 0.54296875, "train_speed(iter/s)": 0.67283 }, { "epoch": 3.1485369093012294, "grad_norm": 5.513986110687256, "learning_rate": 3.0220640543351486e-05, "loss": 2.189956283569336, "memory(GiB)": 72.85, "step": 73490, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.672827 }, { "epoch": 3.1487511246304787, "grad_norm": 7.453521251678467, "learning_rate": 3.0214459907656767e-05, "loss": 2.01633358001709, "memory(GiB)": 72.85, "step": 73495, "token_acc": 0.5254777070063694, "train_speed(iter/s)": 0.672833 }, { "epoch": 3.1489653399597275, "grad_norm": 5.0757951736450195, "learning_rate": 3.0208279630395664e-05, "loss": 1.9371580123901366, "memory(GiB)": 72.85, "step": 73500, "token_acc": 0.5523465703971119, "train_speed(iter/s)": 0.672836 }, { "epoch": 3.1489653399597275, "eval_loss": 2.0983972549438477, "eval_runtime": 15.4296, "eval_samples_per_second": 6.481, "eval_steps_per_second": 6.481, "eval_token_acc": 0.5153203342618384, "step": 73500 }, { "epoch": 3.1491795552889763, "grad_norm": 6.462095260620117, "learning_rate": 3.0202099711680153e-05, "loss": 2.1178516387939452, "memory(GiB)": 72.85, "step": 73505, "token_acc": 0.5103857566765578, "train_speed(iter/s)": 0.672719 }, { "epoch": 3.1493937706182256, "grad_norm": 6.033425331115723, "learning_rate": 3.019592015162217e-05, "loss": 1.985053253173828, "memory(GiB)": 72.85, "step": 73510, "token_acc": 0.5586592178770949, "train_speed(iter/s)": 0.672724 }, { "epoch": 3.1496079859474744, "grad_norm": 5.290426731109619, "learning_rate": 3.0189740950333666e-05, "loss": 1.9826160430908204, "memory(GiB)": 72.85, "step": 73515, "token_acc": 0.5742574257425742, "train_speed(iter/s)": 0.672719 }, { "epoch": 3.149822201276723, "grad_norm": 8.271775245666504, "learning_rate": 3.01835621079266e-05, "loss": 1.8292848587036132, "memory(GiB)": 72.85, "step": 73520, "token_acc": 0.574468085106383, "train_speed(iter/s)": 0.672715 }, { "epoch": 3.1500364166059724, "grad_norm": 6.0647196769714355, "learning_rate": 3.0177383624512896e-05, "loss": 2.4874900817871093, "memory(GiB)": 72.85, "step": 73525, "token_acc": 0.47863247863247865, "train_speed(iter/s)": 0.672723 }, { "epoch": 3.1502506319352213, "grad_norm": 7.34559965133667, "learning_rate": 3.017120550020447e-05, "loss": 2.189391326904297, "memory(GiB)": 72.85, "step": 73530, "token_acc": 0.515748031496063, "train_speed(iter/s)": 0.672723 }, { "epoch": 3.15046484726447, "grad_norm": 9.761812210083008, "learning_rate": 3.016502773511327e-05, "loss": 2.2604095458984377, "memory(GiB)": 72.85, "step": 73535, "token_acc": 0.5197368421052632, "train_speed(iter/s)": 0.67273 }, { "epoch": 3.1506790625937193, "grad_norm": 5.870776176452637, "learning_rate": 3.0158850329351184e-05, "loss": 1.9567113876342774, "memory(GiB)": 72.85, "step": 73540, "token_acc": 0.5461847389558233, "train_speed(iter/s)": 0.672735 }, { "epoch": 3.150893277922968, "grad_norm": 5.515131950378418, "learning_rate": 3.0152673283030153e-05, "loss": 2.034585380554199, "memory(GiB)": 72.85, "step": 73545, "token_acc": 0.5228070175438596, "train_speed(iter/s)": 0.672728 }, { "epoch": 3.151107493252217, "grad_norm": 4.653886318206787, "learning_rate": 3.014649659626206e-05, "loss": 2.3053604125976563, "memory(GiB)": 72.85, "step": 73550, "token_acc": 0.5243055555555556, "train_speed(iter/s)": 0.672729 }, { "epoch": 3.151321708581466, "grad_norm": 5.002079963684082, "learning_rate": 3.0140320269158795e-05, "loss": 2.083139419555664, "memory(GiB)": 72.85, "step": 73555, "token_acc": 0.5133079847908745, "train_speed(iter/s)": 0.672734 }, { "epoch": 3.151535923910715, "grad_norm": 6.49993896484375, "learning_rate": 3.0134144301832266e-05, "loss": 2.1261098861694334, "memory(GiB)": 72.85, "step": 73560, "token_acc": 0.5692883895131086, "train_speed(iter/s)": 0.672732 }, { "epoch": 3.151750139239964, "grad_norm": 5.075185298919678, "learning_rate": 3.0127968694394337e-05, "loss": 2.1034690856933596, "memory(GiB)": 72.85, "step": 73565, "token_acc": 0.5146579804560261, "train_speed(iter/s)": 0.67273 }, { "epoch": 3.151964354569213, "grad_norm": 9.025712013244629, "learning_rate": 3.012179344695691e-05, "loss": 1.9979972839355469, "memory(GiB)": 72.85, "step": 73570, "token_acc": 0.6040816326530613, "train_speed(iter/s)": 0.672735 }, { "epoch": 3.152178569898462, "grad_norm": 6.013253688812256, "learning_rate": 3.0115618559631832e-05, "loss": 2.0706304550170898, "memory(GiB)": 72.85, "step": 73575, "token_acc": 0.53156146179402, "train_speed(iter/s)": 0.672726 }, { "epoch": 3.1523927852277107, "grad_norm": 5.339637279510498, "learning_rate": 3.0109444032530985e-05, "loss": 2.1629974365234377, "memory(GiB)": 72.85, "step": 73580, "token_acc": 0.5033333333333333, "train_speed(iter/s)": 0.672724 }, { "epoch": 3.15260700055696, "grad_norm": 6.940325736999512, "learning_rate": 3.0103269865766215e-05, "loss": 2.124881553649902, "memory(GiB)": 72.85, "step": 73585, "token_acc": 0.5425101214574899, "train_speed(iter/s)": 0.672723 }, { "epoch": 3.152821215886209, "grad_norm": 6.039753437042236, "learning_rate": 3.0097096059449383e-05, "loss": 2.1641258239746093, "memory(GiB)": 72.85, "step": 73590, "token_acc": 0.5101351351351351, "train_speed(iter/s)": 0.672726 }, { "epoch": 3.1530354312154576, "grad_norm": 4.626230716705322, "learning_rate": 3.0090922613692328e-05, "loss": 2.0421382904052736, "memory(GiB)": 72.85, "step": 73595, "token_acc": 0.5351170568561873, "train_speed(iter/s)": 0.672733 }, { "epoch": 3.153249646544707, "grad_norm": 7.435403823852539, "learning_rate": 3.008474952860687e-05, "loss": 2.3970787048339846, "memory(GiB)": 72.85, "step": 73600, "token_acc": 0.5206896551724138, "train_speed(iter/s)": 0.672723 }, { "epoch": 3.1534638618739557, "grad_norm": 7.070004940032959, "learning_rate": 3.007857680430487e-05, "loss": 2.11226806640625, "memory(GiB)": 72.85, "step": 73605, "token_acc": 0.5678571428571428, "train_speed(iter/s)": 0.672717 }, { "epoch": 3.1536780772032045, "grad_norm": 6.0626749992370605, "learning_rate": 3.007240444089814e-05, "loss": 1.90914306640625, "memory(GiB)": 72.85, "step": 73610, "token_acc": 0.5583596214511041, "train_speed(iter/s)": 0.672732 }, { "epoch": 3.1538922925324537, "grad_norm": 6.928576469421387, "learning_rate": 3.006623243849849e-05, "loss": 2.569672203063965, "memory(GiB)": 72.85, "step": 73615, "token_acc": 0.48951048951048953, "train_speed(iter/s)": 0.672723 }, { "epoch": 3.1541065078617025, "grad_norm": 7.031991004943848, "learning_rate": 3.006006079721774e-05, "loss": 2.1038127899169923, "memory(GiB)": 72.85, "step": 73620, "token_acc": 0.5548387096774193, "train_speed(iter/s)": 0.672734 }, { "epoch": 3.1543207231909514, "grad_norm": 7.202446460723877, "learning_rate": 3.0053889517167693e-05, "loss": 2.318035888671875, "memory(GiB)": 72.85, "step": 73625, "token_acc": 0.5166666666666667, "train_speed(iter/s)": 0.672732 }, { "epoch": 3.1545349385202006, "grad_norm": 5.6695332527160645, "learning_rate": 3.004771859846015e-05, "loss": 2.022517776489258, "memory(GiB)": 72.85, "step": 73630, "token_acc": 0.5444839857651246, "train_speed(iter/s)": 0.672718 }, { "epoch": 3.1547491538494494, "grad_norm": 5.586943626403809, "learning_rate": 3.0041548041206914e-05, "loss": 2.0536304473876954, "memory(GiB)": 72.85, "step": 73635, "token_acc": 0.5429553264604811, "train_speed(iter/s)": 0.672722 }, { "epoch": 3.1549633691786982, "grad_norm": 7.018746376037598, "learning_rate": 3.0035377845519724e-05, "loss": 2.2290470123291017, "memory(GiB)": 72.85, "step": 73640, "token_acc": 0.5256410256410257, "train_speed(iter/s)": 0.672729 }, { "epoch": 3.1551775845079475, "grad_norm": 3.8358969688415527, "learning_rate": 3.002920801151041e-05, "loss": 2.0065082550048827, "memory(GiB)": 72.85, "step": 73645, "token_acc": 0.5516014234875445, "train_speed(iter/s)": 0.672738 }, { "epoch": 3.1553917998371963, "grad_norm": 4.867340564727783, "learning_rate": 3.0023038539290737e-05, "loss": 2.429473876953125, "memory(GiB)": 72.85, "step": 73650, "token_acc": 0.48348348348348347, "train_speed(iter/s)": 0.672733 }, { "epoch": 3.155606015166445, "grad_norm": 5.388586521148682, "learning_rate": 3.001686942897246e-05, "loss": 2.0900848388671873, "memory(GiB)": 72.85, "step": 73655, "token_acc": 0.5720720720720721, "train_speed(iter/s)": 0.672739 }, { "epoch": 3.1558202304956944, "grad_norm": 5.687099456787109, "learning_rate": 3.0010700680667325e-05, "loss": 2.091691589355469, "memory(GiB)": 72.85, "step": 73660, "token_acc": 0.5849673202614379, "train_speed(iter/s)": 0.672752 }, { "epoch": 3.156034445824943, "grad_norm": 5.08513879776001, "learning_rate": 3.000453229448711e-05, "loss": 2.033371162414551, "memory(GiB)": 72.85, "step": 73665, "token_acc": 0.532608695652174, "train_speed(iter/s)": 0.672749 }, { "epoch": 3.156248661154192, "grad_norm": 5.708557605743408, "learning_rate": 2.9998364270543555e-05, "loss": 2.0581647872924806, "memory(GiB)": 72.85, "step": 73670, "token_acc": 0.5170068027210885, "train_speed(iter/s)": 0.672747 }, { "epoch": 3.1564628764834413, "grad_norm": 4.584696292877197, "learning_rate": 2.9992196608948374e-05, "loss": 2.2501152038574217, "memory(GiB)": 72.85, "step": 73675, "token_acc": 0.509493670886076, "train_speed(iter/s)": 0.672764 }, { "epoch": 3.15667709181269, "grad_norm": 4.979548931121826, "learning_rate": 2.998602930981334e-05, "loss": 2.435693359375, "memory(GiB)": 72.85, "step": 73680, "token_acc": 0.49056603773584906, "train_speed(iter/s)": 0.672762 }, { "epoch": 3.156891307141939, "grad_norm": 8.407668113708496, "learning_rate": 2.9979862373250144e-05, "loss": 2.217470169067383, "memory(GiB)": 72.85, "step": 73685, "token_acc": 0.5099601593625498, "train_speed(iter/s)": 0.672756 }, { "epoch": 3.157105522471188, "grad_norm": 6.7125139236450195, "learning_rate": 2.997369579937054e-05, "loss": 2.2972576141357424, "memory(GiB)": 72.85, "step": 73690, "token_acc": 0.5121951219512195, "train_speed(iter/s)": 0.672765 }, { "epoch": 3.157319737800437, "grad_norm": 3.887965202331543, "learning_rate": 2.9967529588286215e-05, "loss": 2.2650163650512694, "memory(GiB)": 72.85, "step": 73695, "token_acc": 0.5138888888888888, "train_speed(iter/s)": 0.672765 }, { "epoch": 3.1575339531296858, "grad_norm": 6.260205268859863, "learning_rate": 2.996136374010887e-05, "loss": 2.3732189178466796, "memory(GiB)": 72.85, "step": 73700, "token_acc": 0.5, "train_speed(iter/s)": 0.672764 }, { "epoch": 3.157748168458935, "grad_norm": 4.478489875793457, "learning_rate": 2.995519825495024e-05, "loss": 2.3763404846191407, "memory(GiB)": 72.85, "step": 73705, "token_acc": 0.5013192612137203, "train_speed(iter/s)": 0.672765 }, { "epoch": 3.157962383788184, "grad_norm": 6.106410503387451, "learning_rate": 2.9949033132921988e-05, "loss": 1.9964736938476562, "memory(GiB)": 72.85, "step": 73710, "token_acc": 0.5810276679841897, "train_speed(iter/s)": 0.67277 }, { "epoch": 3.1581765991174326, "grad_norm": 4.97804594039917, "learning_rate": 2.9942868374135798e-05, "loss": 2.103156089782715, "memory(GiB)": 72.85, "step": 73715, "token_acc": 0.5441176470588235, "train_speed(iter/s)": 0.672767 }, { "epoch": 3.158390814446682, "grad_norm": 5.400423049926758, "learning_rate": 2.993670397870336e-05, "loss": 2.033164215087891, "memory(GiB)": 72.85, "step": 73720, "token_acc": 0.549645390070922, "train_speed(iter/s)": 0.672767 }, { "epoch": 3.1586050297759307, "grad_norm": 5.449758052825928, "learning_rate": 2.9930539946736373e-05, "loss": 2.300907325744629, "memory(GiB)": 72.85, "step": 73725, "token_acc": 0.5059288537549407, "train_speed(iter/s)": 0.672774 }, { "epoch": 3.1588192451051795, "grad_norm": 4.963281631469727, "learning_rate": 2.9924376278346467e-05, "loss": 2.3728216171264647, "memory(GiB)": 72.85, "step": 73730, "token_acc": 0.5077399380804953, "train_speed(iter/s)": 0.672782 }, { "epoch": 3.159033460434429, "grad_norm": 5.8416361808776855, "learning_rate": 2.9918212973645325e-05, "loss": 1.889532470703125, "memory(GiB)": 72.85, "step": 73735, "token_acc": 0.5703971119133574, "train_speed(iter/s)": 0.672782 }, { "epoch": 3.1592476757636776, "grad_norm": 7.103753089904785, "learning_rate": 2.9912050032744597e-05, "loss": 2.225335884094238, "memory(GiB)": 72.85, "step": 73740, "token_acc": 0.556390977443609, "train_speed(iter/s)": 0.672788 }, { "epoch": 3.1594618910929264, "grad_norm": 5.311192512512207, "learning_rate": 2.9905887455755915e-05, "loss": 2.0241931915283202, "memory(GiB)": 72.85, "step": 73745, "token_acc": 0.5493827160493827, "train_speed(iter/s)": 0.672793 }, { "epoch": 3.1596761064221757, "grad_norm": 5.046893119812012, "learning_rate": 2.9899725242790945e-05, "loss": 2.1955501556396486, "memory(GiB)": 72.85, "step": 73750, "token_acc": 0.5192307692307693, "train_speed(iter/s)": 0.672791 }, { "epoch": 3.1598903217514245, "grad_norm": 7.173089981079102, "learning_rate": 2.98935633939613e-05, "loss": 1.9947307586669922, "memory(GiB)": 72.85, "step": 73755, "token_acc": 0.5475285171102662, "train_speed(iter/s)": 0.672794 }, { "epoch": 3.1601045370806733, "grad_norm": 6.330143928527832, "learning_rate": 2.9887401909378608e-05, "loss": 2.3206939697265625, "memory(GiB)": 72.85, "step": 73760, "token_acc": 0.5331010452961672, "train_speed(iter/s)": 0.672797 }, { "epoch": 3.1603187524099225, "grad_norm": 4.533491134643555, "learning_rate": 2.988124078915451e-05, "loss": 2.035956382751465, "memory(GiB)": 72.85, "step": 73765, "token_acc": 0.6055045871559633, "train_speed(iter/s)": 0.672803 }, { "epoch": 3.1605329677391714, "grad_norm": 5.693591594696045, "learning_rate": 2.98750800334006e-05, "loss": 1.7868118286132812, "memory(GiB)": 72.85, "step": 73770, "token_acc": 0.5877551020408164, "train_speed(iter/s)": 0.672813 }, { "epoch": 3.16074718306842, "grad_norm": 6.2629618644714355, "learning_rate": 2.98689196422285e-05, "loss": 2.0961593627929687, "memory(GiB)": 72.85, "step": 73775, "token_acc": 0.5427509293680297, "train_speed(iter/s)": 0.672816 }, { "epoch": 3.1609613983976694, "grad_norm": 6.627548694610596, "learning_rate": 2.9862759615749804e-05, "loss": 2.0662343978881834, "memory(GiB)": 72.85, "step": 73780, "token_acc": 0.5381818181818182, "train_speed(iter/s)": 0.672813 }, { "epoch": 3.1611756137269182, "grad_norm": 5.313335418701172, "learning_rate": 2.985659995407609e-05, "loss": 2.3537870407104493, "memory(GiB)": 72.85, "step": 73785, "token_acc": 0.5284810126582279, "train_speed(iter/s)": 0.672823 }, { "epoch": 3.161389829056167, "grad_norm": 5.590052604675293, "learning_rate": 2.9850440657318974e-05, "loss": 1.936471939086914, "memory(GiB)": 72.85, "step": 73790, "token_acc": 0.524822695035461, "train_speed(iter/s)": 0.67282 }, { "epoch": 3.1616040443854163, "grad_norm": 6.108640670776367, "learning_rate": 2.984428172559004e-05, "loss": 2.4143350601196287, "memory(GiB)": 72.85, "step": 73795, "token_acc": 0.5186721991701245, "train_speed(iter/s)": 0.672827 }, { "epoch": 3.161818259714665, "grad_norm": 5.659984588623047, "learning_rate": 2.983812315900084e-05, "loss": 1.9622055053710938, "memory(GiB)": 72.85, "step": 73800, "token_acc": 0.5507246376811594, "train_speed(iter/s)": 0.672827 }, { "epoch": 3.162032475043914, "grad_norm": 11.763230323791504, "learning_rate": 2.9831964957662955e-05, "loss": 1.7302101135253907, "memory(GiB)": 72.85, "step": 73805, "token_acc": 0.6229508196721312, "train_speed(iter/s)": 0.672808 }, { "epoch": 3.162246690373163, "grad_norm": 5.987096309661865, "learning_rate": 2.982580712168795e-05, "loss": 1.9535049438476562, "memory(GiB)": 72.85, "step": 73810, "token_acc": 0.5594405594405595, "train_speed(iter/s)": 0.672809 }, { "epoch": 3.162460905702412, "grad_norm": 5.375250339508057, "learning_rate": 2.9819649651187365e-05, "loss": 2.3276901245117188, "memory(GiB)": 72.85, "step": 73815, "token_acc": 0.5085910652920962, "train_speed(iter/s)": 0.672818 }, { "epoch": 3.162675121031661, "grad_norm": 5.141759872436523, "learning_rate": 2.9813492546272757e-05, "loss": 2.030137825012207, "memory(GiB)": 72.85, "step": 73820, "token_acc": 0.5094339622641509, "train_speed(iter/s)": 0.672828 }, { "epoch": 3.16288933636091, "grad_norm": 4.430132865905762, "learning_rate": 2.980733580705568e-05, "loss": 1.990823745727539, "memory(GiB)": 72.85, "step": 73825, "token_acc": 0.5618374558303887, "train_speed(iter/s)": 0.672825 }, { "epoch": 3.163103551690159, "grad_norm": 5.695847511291504, "learning_rate": 2.9801179433647643e-05, "loss": 1.8850988388061523, "memory(GiB)": 72.85, "step": 73830, "token_acc": 0.5529801324503312, "train_speed(iter/s)": 0.672822 }, { "epoch": 3.163317767019408, "grad_norm": 5.099205493927002, "learning_rate": 2.9795023426160197e-05, "loss": 2.2398508071899412, "memory(GiB)": 72.85, "step": 73835, "token_acc": 0.5498154981549815, "train_speed(iter/s)": 0.672819 }, { "epoch": 3.163531982348657, "grad_norm": 5.998830795288086, "learning_rate": 2.9788867784704856e-05, "loss": 1.930701446533203, "memory(GiB)": 72.85, "step": 73840, "token_acc": 0.5596707818930041, "train_speed(iter/s)": 0.672819 }, { "epoch": 3.1637461976779058, "grad_norm": 4.677015781402588, "learning_rate": 2.978271250939313e-05, "loss": 2.2779361724853517, "memory(GiB)": 72.85, "step": 73845, "token_acc": 0.48104956268221577, "train_speed(iter/s)": 0.672824 }, { "epoch": 3.163960413007155, "grad_norm": 10.705110549926758, "learning_rate": 2.9776557600336534e-05, "loss": 2.2966402053833006, "memory(GiB)": 72.85, "step": 73850, "token_acc": 0.5435540069686411, "train_speed(iter/s)": 0.672824 }, { "epoch": 3.164174628336404, "grad_norm": 7.211675643920898, "learning_rate": 2.9770403057646562e-05, "loss": 2.242745018005371, "memory(GiB)": 72.85, "step": 73855, "token_acc": 0.5323076923076923, "train_speed(iter/s)": 0.672827 }, { "epoch": 3.1643888436656527, "grad_norm": 4.924079895019531, "learning_rate": 2.976424888143472e-05, "loss": 1.9841459274291993, "memory(GiB)": 72.85, "step": 73860, "token_acc": 0.5611510791366906, "train_speed(iter/s)": 0.67283 }, { "epoch": 3.164603058994902, "grad_norm": 5.598952770233154, "learning_rate": 2.9758095071812485e-05, "loss": 2.30692138671875, "memory(GiB)": 72.85, "step": 73865, "token_acc": 0.45403899721448465, "train_speed(iter/s)": 0.67283 }, { "epoch": 3.1648172743241507, "grad_norm": 5.165461540222168, "learning_rate": 2.9751941628891356e-05, "loss": 2.266586494445801, "memory(GiB)": 72.85, "step": 73870, "token_acc": 0.5225563909774437, "train_speed(iter/s)": 0.672834 }, { "epoch": 3.1650314896533995, "grad_norm": 5.182082176208496, "learning_rate": 2.9745788552782795e-05, "loss": 2.267259407043457, "memory(GiB)": 72.85, "step": 73875, "token_acc": 0.5078369905956113, "train_speed(iter/s)": 0.672837 }, { "epoch": 3.165245704982649, "grad_norm": 5.185582160949707, "learning_rate": 2.9739635843598278e-05, "loss": 2.2581813812255858, "memory(GiB)": 72.85, "step": 73880, "token_acc": 0.5409252669039146, "train_speed(iter/s)": 0.672841 }, { "epoch": 3.1654599203118976, "grad_norm": 8.460654258728027, "learning_rate": 2.9733483501449267e-05, "loss": 2.0202610015869142, "memory(GiB)": 72.85, "step": 73885, "token_acc": 0.5795454545454546, "train_speed(iter/s)": 0.672841 }, { "epoch": 3.1656741356411464, "grad_norm": 4.501950740814209, "learning_rate": 2.97273315264472e-05, "loss": 2.2463199615478517, "memory(GiB)": 72.85, "step": 73890, "token_acc": 0.5351681957186545, "train_speed(iter/s)": 0.672846 }, { "epoch": 3.1658883509703957, "grad_norm": 5.311535835266113, "learning_rate": 2.9721179918703558e-05, "loss": 2.2084638595581056, "memory(GiB)": 72.85, "step": 73895, "token_acc": 0.5321100917431193, "train_speed(iter/s)": 0.672854 }, { "epoch": 3.1661025662996445, "grad_norm": 6.589900016784668, "learning_rate": 2.971502867832976e-05, "loss": 2.2851449966430666, "memory(GiB)": 72.85, "step": 73900, "token_acc": 0.508695652173913, "train_speed(iter/s)": 0.672859 }, { "epoch": 3.1663167816288933, "grad_norm": 5.674716472625732, "learning_rate": 2.9708877805437236e-05, "loss": 2.008159637451172, "memory(GiB)": 72.85, "step": 73905, "token_acc": 0.5875486381322957, "train_speed(iter/s)": 0.672867 }, { "epoch": 3.1665309969581426, "grad_norm": 6.7158098220825195, "learning_rate": 2.9702727300137434e-05, "loss": 2.251353454589844, "memory(GiB)": 72.85, "step": 73910, "token_acc": 0.527027027027027, "train_speed(iter/s)": 0.672875 }, { "epoch": 3.1667452122873914, "grad_norm": 6.0825676918029785, "learning_rate": 2.969657716254176e-05, "loss": 2.0327630996704102, "memory(GiB)": 72.85, "step": 73915, "token_acc": 0.5305343511450382, "train_speed(iter/s)": 0.672877 }, { "epoch": 3.16695942761664, "grad_norm": 5.1967363357543945, "learning_rate": 2.9690427392761642e-05, "loss": 1.7255653381347655, "memory(GiB)": 72.85, "step": 73920, "token_acc": 0.6091549295774648, "train_speed(iter/s)": 0.672887 }, { "epoch": 3.1671736429458894, "grad_norm": 4.712534427642822, "learning_rate": 2.9684277990908487e-05, "loss": 2.0193126678466795, "memory(GiB)": 72.85, "step": 73925, "token_acc": 0.5174603174603175, "train_speed(iter/s)": 0.672897 }, { "epoch": 3.1673878582751382, "grad_norm": 4.449965953826904, "learning_rate": 2.9678128957093687e-05, "loss": 2.036853790283203, "memory(GiB)": 72.85, "step": 73930, "token_acc": 0.5597014925373134, "train_speed(iter/s)": 0.672906 }, { "epoch": 3.167602073604387, "grad_norm": 5.789939880371094, "learning_rate": 2.967198029142863e-05, "loss": 1.8308633804321288, "memory(GiB)": 72.85, "step": 73935, "token_acc": 0.5905797101449275, "train_speed(iter/s)": 0.672898 }, { "epoch": 3.1678162889336363, "grad_norm": 7.495068073272705, "learning_rate": 2.966583199402474e-05, "loss": 2.2555479049682616, "memory(GiB)": 72.85, "step": 73940, "token_acc": 0.5273972602739726, "train_speed(iter/s)": 0.672896 }, { "epoch": 3.168030504262885, "grad_norm": 5.667675971984863, "learning_rate": 2.9659684064993376e-05, "loss": 2.152272033691406, "memory(GiB)": 72.85, "step": 73945, "token_acc": 0.531496062992126, "train_speed(iter/s)": 0.672898 }, { "epoch": 3.168244719592134, "grad_norm": 6.9271345138549805, "learning_rate": 2.965353650444591e-05, "loss": 2.0195777893066404, "memory(GiB)": 72.85, "step": 73950, "token_acc": 0.5416666666666666, "train_speed(iter/s)": 0.672891 }, { "epoch": 3.168458934921383, "grad_norm": 5.677482604980469, "learning_rate": 2.9647389312493727e-05, "loss": 1.9197446823120117, "memory(GiB)": 72.85, "step": 73955, "token_acc": 0.5895196506550219, "train_speed(iter/s)": 0.672875 }, { "epoch": 3.168673150250632, "grad_norm": 4.702225208282471, "learning_rate": 2.9641242489248167e-05, "loss": 2.0630123138427736, "memory(GiB)": 72.85, "step": 73960, "token_acc": 0.5325077399380805, "train_speed(iter/s)": 0.672876 }, { "epoch": 3.168887365579881, "grad_norm": 4.815978050231934, "learning_rate": 2.963509603482061e-05, "loss": 2.1534528732299805, "memory(GiB)": 72.85, "step": 73965, "token_acc": 0.5360824742268041, "train_speed(iter/s)": 0.672873 }, { "epoch": 3.16910158090913, "grad_norm": 5.223862171173096, "learning_rate": 2.9628949949322392e-05, "loss": 2.130208969116211, "memory(GiB)": 72.85, "step": 73970, "token_acc": 0.5284552845528455, "train_speed(iter/s)": 0.672874 }, { "epoch": 3.169315796238379, "grad_norm": 6.946464538574219, "learning_rate": 2.9622804232864842e-05, "loss": 2.170469856262207, "memory(GiB)": 72.85, "step": 73975, "token_acc": 0.5218855218855218, "train_speed(iter/s)": 0.672872 }, { "epoch": 3.1695300115676277, "grad_norm": 7.067306995391846, "learning_rate": 2.961665888555932e-05, "loss": 2.1713977813720704, "memory(GiB)": 72.85, "step": 73980, "token_acc": 0.5464285714285714, "train_speed(iter/s)": 0.672885 }, { "epoch": 3.169744226896877, "grad_norm": 6.196094036102295, "learning_rate": 2.9610513907517147e-05, "loss": 1.8963914871215821, "memory(GiB)": 72.85, "step": 73985, "token_acc": 0.5970695970695971, "train_speed(iter/s)": 0.672887 }, { "epoch": 3.1699584422261258, "grad_norm": 5.231651306152344, "learning_rate": 2.960436929884962e-05, "loss": 2.155277442932129, "memory(GiB)": 72.85, "step": 73990, "token_acc": 0.525, "train_speed(iter/s)": 0.672889 }, { "epoch": 3.1701726575553746, "grad_norm": 5.710628509521484, "learning_rate": 2.95982250596681e-05, "loss": 2.371139335632324, "memory(GiB)": 72.85, "step": 73995, "token_acc": 0.4866920152091255, "train_speed(iter/s)": 0.672894 }, { "epoch": 3.170386872884624, "grad_norm": 5.486025810241699, "learning_rate": 2.9592081190083855e-05, "loss": 2.221547508239746, "memory(GiB)": 72.85, "step": 74000, "token_acc": 0.5034013605442177, "train_speed(iter/s)": 0.672889 }, { "epoch": 3.170386872884624, "eval_loss": 2.134387254714966, "eval_runtime": 16.225, "eval_samples_per_second": 6.163, "eval_steps_per_second": 6.163, "eval_token_acc": 0.5195195195195195, "step": 74000 }, { "epoch": 3.1706010882138727, "grad_norm": 5.711306095123291, "learning_rate": 2.958593769020821e-05, "loss": 2.135982704162598, "memory(GiB)": 72.85, "step": 74005, "token_acc": 0.5326424870466321, "train_speed(iter/s)": 0.672772 }, { "epoch": 3.1708153035431215, "grad_norm": 20.98784828186035, "learning_rate": 2.957979456015244e-05, "loss": 2.0359519958496093, "memory(GiB)": 72.85, "step": 74010, "token_acc": 0.5212765957446809, "train_speed(iter/s)": 0.672776 }, { "epoch": 3.1710295188723707, "grad_norm": 5.795983791351318, "learning_rate": 2.9573651800027864e-05, "loss": 2.153965377807617, "memory(GiB)": 72.85, "step": 74015, "token_acc": 0.495114006514658, "train_speed(iter/s)": 0.672777 }, { "epoch": 3.1712437342016195, "grad_norm": 7.457180023193359, "learning_rate": 2.9567509409945738e-05, "loss": 1.8219619750976563, "memory(GiB)": 72.85, "step": 74020, "token_acc": 0.594488188976378, "train_speed(iter/s)": 0.672777 }, { "epoch": 3.1714579495308683, "grad_norm": 5.16526985168457, "learning_rate": 2.956136739001736e-05, "loss": 2.272437858581543, "memory(GiB)": 72.85, "step": 74025, "token_acc": 0.5400696864111498, "train_speed(iter/s)": 0.67277 }, { "epoch": 3.1716721648601176, "grad_norm": 6.368913173675537, "learning_rate": 2.9555225740353975e-05, "loss": 2.0857707977294924, "memory(GiB)": 72.85, "step": 74030, "token_acc": 0.535593220338983, "train_speed(iter/s)": 0.672769 }, { "epoch": 3.1718863801893664, "grad_norm": 5.103208065032959, "learning_rate": 2.954908446106685e-05, "loss": 2.100234031677246, "memory(GiB)": 72.85, "step": 74035, "token_acc": 0.5653710247349824, "train_speed(iter/s)": 0.672776 }, { "epoch": 3.1721005955186152, "grad_norm": 5.089391708374023, "learning_rate": 2.9542943552267256e-05, "loss": 1.951410675048828, "memory(GiB)": 72.85, "step": 74040, "token_acc": 0.5551839464882943, "train_speed(iter/s)": 0.672778 }, { "epoch": 3.1723148108478645, "grad_norm": 5.448544979095459, "learning_rate": 2.953680301406643e-05, "loss": 1.9385421752929688, "memory(GiB)": 72.85, "step": 74045, "token_acc": 0.5606557377049181, "train_speed(iter/s)": 0.672766 }, { "epoch": 3.1725290261771133, "grad_norm": 5.382907867431641, "learning_rate": 2.95306628465756e-05, "loss": 1.8383075714111328, "memory(GiB)": 72.85, "step": 74050, "token_acc": 0.5851063829787234, "train_speed(iter/s)": 0.67277 }, { "epoch": 3.172743241506362, "grad_norm": 4.9691362380981445, "learning_rate": 2.9524523049906027e-05, "loss": 2.0914661407470705, "memory(GiB)": 72.85, "step": 74055, "token_acc": 0.5423076923076923, "train_speed(iter/s)": 0.672778 }, { "epoch": 3.1729574568356114, "grad_norm": 6.397500038146973, "learning_rate": 2.9518383624168915e-05, "loss": 2.5034297943115233, "memory(GiB)": 72.85, "step": 74060, "token_acc": 0.4594594594594595, "train_speed(iter/s)": 0.672775 }, { "epoch": 3.17317167216486, "grad_norm": 5.437431335449219, "learning_rate": 2.9512244569475513e-05, "loss": 2.293872833251953, "memory(GiB)": 72.85, "step": 74065, "token_acc": 0.5068027210884354, "train_speed(iter/s)": 0.672782 }, { "epoch": 3.173385887494109, "grad_norm": 6.5856733322143555, "learning_rate": 2.9506105885937007e-05, "loss": 1.9545812606811523, "memory(GiB)": 72.85, "step": 74070, "token_acc": 0.5714285714285714, "train_speed(iter/s)": 0.672792 }, { "epoch": 3.1736001028233582, "grad_norm": 7.913446426391602, "learning_rate": 2.9499967573664616e-05, "loss": 1.8749300003051759, "memory(GiB)": 72.85, "step": 74075, "token_acc": 0.5795454545454546, "train_speed(iter/s)": 0.672797 }, { "epoch": 3.173814318152607, "grad_norm": 4.669710636138916, "learning_rate": 2.9493829632769554e-05, "loss": 2.22274112701416, "memory(GiB)": 72.85, "step": 74080, "token_acc": 0.49707602339181284, "train_speed(iter/s)": 0.672795 }, { "epoch": 3.174028533481856, "grad_norm": 5.26831579208374, "learning_rate": 2.948769206336298e-05, "loss": 2.042047691345215, "memory(GiB)": 72.85, "step": 74085, "token_acc": 0.5355805243445693, "train_speed(iter/s)": 0.672794 }, { "epoch": 3.174242748811105, "grad_norm": 5.628278732299805, "learning_rate": 2.948155486555613e-05, "loss": 2.1436130523681642, "memory(GiB)": 72.85, "step": 74090, "token_acc": 0.5207547169811321, "train_speed(iter/s)": 0.67279 }, { "epoch": 3.174456964140354, "grad_norm": 6.0239081382751465, "learning_rate": 2.947541803946015e-05, "loss": 1.8770309448242188, "memory(GiB)": 72.85, "step": 74095, "token_acc": 0.5703971119133574, "train_speed(iter/s)": 0.672795 }, { "epoch": 3.1746711794696028, "grad_norm": 5.914608955383301, "learning_rate": 2.9469281585186238e-05, "loss": 2.0586517333984373, "memory(GiB)": 72.85, "step": 74100, "token_acc": 0.5666666666666667, "train_speed(iter/s)": 0.672801 }, { "epoch": 3.174885394798852, "grad_norm": 6.596573352813721, "learning_rate": 2.9463145502845536e-05, "loss": 2.302309036254883, "memory(GiB)": 72.85, "step": 74105, "token_acc": 0.5040322580645161, "train_speed(iter/s)": 0.672792 }, { "epoch": 3.175099610128101, "grad_norm": 5.5433549880981445, "learning_rate": 2.9457009792549227e-05, "loss": 1.842527198791504, "memory(GiB)": 72.85, "step": 74110, "token_acc": 0.6118012422360248, "train_speed(iter/s)": 0.672797 }, { "epoch": 3.1753138254573496, "grad_norm": 5.609597206115723, "learning_rate": 2.945087445440846e-05, "loss": 2.1410106658935546, "memory(GiB)": 72.85, "step": 74115, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.672798 }, { "epoch": 3.175528040786599, "grad_norm": 6.02396297454834, "learning_rate": 2.9444739488534373e-05, "loss": 2.1051578521728516, "memory(GiB)": 72.85, "step": 74120, "token_acc": 0.5094339622641509, "train_speed(iter/s)": 0.672794 }, { "epoch": 3.1757422561158477, "grad_norm": 6.4574995040893555, "learning_rate": 2.943860489503812e-05, "loss": 1.9092323303222656, "memory(GiB)": 72.85, "step": 74125, "token_acc": 0.6065573770491803, "train_speed(iter/s)": 0.67279 }, { "epoch": 3.1759564714450965, "grad_norm": 5.6539435386657715, "learning_rate": 2.943247067403082e-05, "loss": 2.020583724975586, "memory(GiB)": 72.85, "step": 74130, "token_acc": 0.5800711743772242, "train_speed(iter/s)": 0.672785 }, { "epoch": 3.1761706867743458, "grad_norm": 8.042851448059082, "learning_rate": 2.942633682562361e-05, "loss": 1.9646841049194337, "memory(GiB)": 72.85, "step": 74135, "token_acc": 0.5622641509433962, "train_speed(iter/s)": 0.672785 }, { "epoch": 3.1763849021035946, "grad_norm": 6.794559001922607, "learning_rate": 2.942020334992761e-05, "loss": 2.0016799926757813, "memory(GiB)": 72.85, "step": 74140, "token_acc": 0.569811320754717, "train_speed(iter/s)": 0.672786 }, { "epoch": 3.1765991174328434, "grad_norm": 5.765057563781738, "learning_rate": 2.941407024705392e-05, "loss": 2.5803205490112306, "memory(GiB)": 72.85, "step": 74145, "token_acc": 0.4835164835164835, "train_speed(iter/s)": 0.6728 }, { "epoch": 3.1768133327620927, "grad_norm": 6.886010646820068, "learning_rate": 2.940793751711367e-05, "loss": 2.2987577438354494, "memory(GiB)": 72.85, "step": 74150, "token_acc": 0.5397923875432526, "train_speed(iter/s)": 0.672801 }, { "epoch": 3.1770275480913415, "grad_norm": 4.892776012420654, "learning_rate": 2.9401805160217933e-05, "loss": 1.982966995239258, "memory(GiB)": 72.85, "step": 74155, "token_acc": 0.5299684542586751, "train_speed(iter/s)": 0.672799 }, { "epoch": 3.1772417634205903, "grad_norm": 5.611025333404541, "learning_rate": 2.9395673176477834e-05, "loss": 2.1714637756347654, "memory(GiB)": 72.85, "step": 74160, "token_acc": 0.5382165605095541, "train_speed(iter/s)": 0.672808 }, { "epoch": 3.1774559787498395, "grad_norm": 6.066831111907959, "learning_rate": 2.9389541566004437e-05, "loss": 2.0967123031616213, "memory(GiB)": 72.85, "step": 74165, "token_acc": 0.5727554179566563, "train_speed(iter/s)": 0.672809 }, { "epoch": 3.1776701940790884, "grad_norm": 5.803770065307617, "learning_rate": 2.9383410328908834e-05, "loss": 1.9113277435302733, "memory(GiB)": 72.85, "step": 74170, "token_acc": 0.5306859205776173, "train_speed(iter/s)": 0.672817 }, { "epoch": 3.177884409408337, "grad_norm": 5.220874309539795, "learning_rate": 2.93772794653021e-05, "loss": 1.941503143310547, "memory(GiB)": 72.85, "step": 74175, "token_acc": 0.559748427672956, "train_speed(iter/s)": 0.672826 }, { "epoch": 3.1780986247375864, "grad_norm": 5.608453273773193, "learning_rate": 2.9371148975295283e-05, "loss": 1.8963478088378907, "memory(GiB)": 72.85, "step": 74180, "token_acc": 0.5613382899628253, "train_speed(iter/s)": 0.672824 }, { "epoch": 3.1783128400668352, "grad_norm": 4.586099147796631, "learning_rate": 2.936501885899946e-05, "loss": 2.0646644592285157, "memory(GiB)": 72.85, "step": 74185, "token_acc": 0.5112994350282486, "train_speed(iter/s)": 0.672829 }, { "epoch": 3.178527055396084, "grad_norm": 7.830397129058838, "learning_rate": 2.935888911652568e-05, "loss": 2.1038593292236327, "memory(GiB)": 72.85, "step": 74190, "token_acc": 0.5587301587301587, "train_speed(iter/s)": 0.672832 }, { "epoch": 3.1787412707253333, "grad_norm": 5.362516403198242, "learning_rate": 2.935275974798497e-05, "loss": 2.119728660583496, "memory(GiB)": 72.85, "step": 74195, "token_acc": 0.5050505050505051, "train_speed(iter/s)": 0.672837 }, { "epoch": 3.178955486054582, "grad_norm": 7.244234085083008, "learning_rate": 2.9346630753488404e-05, "loss": 2.076127815246582, "memory(GiB)": 72.85, "step": 74200, "token_acc": 0.5092250922509225, "train_speed(iter/s)": 0.672831 }, { "epoch": 3.179169701383831, "grad_norm": 8.281059265136719, "learning_rate": 2.934050213314698e-05, "loss": 2.3842668533325195, "memory(GiB)": 72.85, "step": 74205, "token_acc": 0.47096774193548385, "train_speed(iter/s)": 0.672825 }, { "epoch": 3.17938391671308, "grad_norm": 4.9805827140808105, "learning_rate": 2.9334373887071752e-05, "loss": 2.2655588150024415, "memory(GiB)": 72.85, "step": 74210, "token_acc": 0.5270758122743683, "train_speed(iter/s)": 0.672829 }, { "epoch": 3.179598132042329, "grad_norm": 6.475459098815918, "learning_rate": 2.9328246015373727e-05, "loss": 2.1669849395751952, "memory(GiB)": 72.85, "step": 74215, "token_acc": 0.4942528735632184, "train_speed(iter/s)": 0.672832 }, { "epoch": 3.179812347371578, "grad_norm": 5.831925392150879, "learning_rate": 2.932211851816391e-05, "loss": 2.2512451171875, "memory(GiB)": 72.85, "step": 74220, "token_acc": 0.5481927710843374, "train_speed(iter/s)": 0.672838 }, { "epoch": 3.180026562700827, "grad_norm": 5.827132701873779, "learning_rate": 2.931599139555332e-05, "loss": 2.0924619674682616, "memory(GiB)": 72.85, "step": 74225, "token_acc": 0.5607843137254902, "train_speed(iter/s)": 0.672845 }, { "epoch": 3.180240778030076, "grad_norm": 5.215449810028076, "learning_rate": 2.9309864647652928e-05, "loss": 2.310578536987305, "memory(GiB)": 72.85, "step": 74230, "token_acc": 0.5149501661129569, "train_speed(iter/s)": 0.672843 }, { "epoch": 3.1804549933593247, "grad_norm": 5.773408889770508, "learning_rate": 2.930373827457378e-05, "loss": 2.231406021118164, "memory(GiB)": 72.85, "step": 74235, "token_acc": 0.4967105263157895, "train_speed(iter/s)": 0.672849 }, { "epoch": 3.180669208688574, "grad_norm": 5.610549449920654, "learning_rate": 2.9297612276426802e-05, "loss": 2.420410919189453, "memory(GiB)": 72.85, "step": 74240, "token_acc": 0.5795053003533569, "train_speed(iter/s)": 0.672844 }, { "epoch": 3.1808834240178228, "grad_norm": 5.100445747375488, "learning_rate": 2.9291486653323015e-05, "loss": 1.999310302734375, "memory(GiB)": 72.85, "step": 74245, "token_acc": 0.5358649789029536, "train_speed(iter/s)": 0.672844 }, { "epoch": 3.1810976393470716, "grad_norm": 6.273720741271973, "learning_rate": 2.9285361405373356e-05, "loss": 2.0327014923095703, "memory(GiB)": 72.85, "step": 74250, "token_acc": 0.5387596899224806, "train_speed(iter/s)": 0.67284 }, { "epoch": 3.181311854676321, "grad_norm": 6.3630781173706055, "learning_rate": 2.9279236532688827e-05, "loss": 1.924017333984375, "memory(GiB)": 72.85, "step": 74255, "token_acc": 0.549520766773163, "train_speed(iter/s)": 0.672847 }, { "epoch": 3.1815260700055696, "grad_norm": 6.703378200531006, "learning_rate": 2.927311203538036e-05, "loss": 2.0533140182495115, "memory(GiB)": 72.85, "step": 74260, "token_acc": 0.5197132616487455, "train_speed(iter/s)": 0.672855 }, { "epoch": 3.1817402853348185, "grad_norm": 5.264691352844238, "learning_rate": 2.926698791355891e-05, "loss": 1.9920387268066406, "memory(GiB)": 72.85, "step": 74265, "token_acc": 0.5154929577464789, "train_speed(iter/s)": 0.672863 }, { "epoch": 3.1819545006640677, "grad_norm": 6.641918659210205, "learning_rate": 2.9260864167335422e-05, "loss": 1.9091482162475586, "memory(GiB)": 72.85, "step": 74270, "token_acc": 0.5583941605839416, "train_speed(iter/s)": 0.672868 }, { "epoch": 3.1821687159933165, "grad_norm": 4.654962539672852, "learning_rate": 2.9254740796820833e-05, "loss": 2.3112993240356445, "memory(GiB)": 72.85, "step": 74275, "token_acc": 0.5, "train_speed(iter/s)": 0.672868 }, { "epoch": 3.1823829313225653, "grad_norm": 5.317541122436523, "learning_rate": 2.9248617802126076e-05, "loss": 2.218244171142578, "memory(GiB)": 72.85, "step": 74280, "token_acc": 0.5337423312883436, "train_speed(iter/s)": 0.672862 }, { "epoch": 3.1825971466518146, "grad_norm": 5.076883316040039, "learning_rate": 2.924249518336208e-05, "loss": 1.8080183029174806, "memory(GiB)": 72.85, "step": 74285, "token_acc": 0.587248322147651, "train_speed(iter/s)": 0.672856 }, { "epoch": 3.1828113619810634, "grad_norm": 7.074796199798584, "learning_rate": 2.9236372940639744e-05, "loss": 2.1466800689697267, "memory(GiB)": 72.85, "step": 74290, "token_acc": 0.5387096774193548, "train_speed(iter/s)": 0.672862 }, { "epoch": 3.183025577310312, "grad_norm": 4.99068021774292, "learning_rate": 2.9230251074070002e-05, "loss": 1.9722869873046875, "memory(GiB)": 72.85, "step": 74295, "token_acc": 0.5789473684210527, "train_speed(iter/s)": 0.672862 }, { "epoch": 3.1832397926395615, "grad_norm": 5.773390769958496, "learning_rate": 2.9224129583763743e-05, "loss": 2.0065147399902346, "memory(GiB)": 72.85, "step": 74300, "token_acc": 0.5286624203821656, "train_speed(iter/s)": 0.672859 }, { "epoch": 3.1834540079688103, "grad_norm": 6.54079008102417, "learning_rate": 2.9218008469831848e-05, "loss": 2.0459358215332033, "memory(GiB)": 72.85, "step": 74305, "token_acc": 0.5518518518518518, "train_speed(iter/s)": 0.672869 }, { "epoch": 3.183668223298059, "grad_norm": 8.17691707611084, "learning_rate": 2.9211887732385234e-05, "loss": 2.192416763305664, "memory(GiB)": 72.85, "step": 74310, "token_acc": 0.5577557755775577, "train_speed(iter/s)": 0.672864 }, { "epoch": 3.1838824386273084, "grad_norm": 4.902279853820801, "learning_rate": 2.9205767371534787e-05, "loss": 2.1474891662597657, "memory(GiB)": 72.85, "step": 74315, "token_acc": 0.5064516129032258, "train_speed(iter/s)": 0.672847 }, { "epoch": 3.184096653956557, "grad_norm": 4.362852096557617, "learning_rate": 2.9199647387391365e-05, "loss": 2.16717529296875, "memory(GiB)": 72.85, "step": 74320, "token_acc": 0.5267605633802817, "train_speed(iter/s)": 0.672848 }, { "epoch": 3.184310869285806, "grad_norm": 5.8477678298950195, "learning_rate": 2.919352778006584e-05, "loss": 2.477178955078125, "memory(GiB)": 72.85, "step": 74325, "token_acc": 0.4867924528301887, "train_speed(iter/s)": 0.672854 }, { "epoch": 3.1845250846150552, "grad_norm": 5.438309669494629, "learning_rate": 2.918740854966908e-05, "loss": 2.237233352661133, "memory(GiB)": 72.85, "step": 74330, "token_acc": 0.5146579804560261, "train_speed(iter/s)": 0.672854 }, { "epoch": 3.184739299944304, "grad_norm": 5.650671005249023, "learning_rate": 2.9181289696311943e-05, "loss": 2.2959291458129885, "memory(GiB)": 72.85, "step": 74335, "token_acc": 0.5019011406844106, "train_speed(iter/s)": 0.672849 }, { "epoch": 3.184953515273553, "grad_norm": 6.672481536865234, "learning_rate": 2.9175171220105268e-05, "loss": 2.4878313064575197, "memory(GiB)": 72.85, "step": 74340, "token_acc": 0.4623287671232877, "train_speed(iter/s)": 0.672855 }, { "epoch": 3.185167730602802, "grad_norm": 6.028575420379639, "learning_rate": 2.9169053121159884e-05, "loss": 2.2205568313598634, "memory(GiB)": 72.85, "step": 74345, "token_acc": 0.5382436260623229, "train_speed(iter/s)": 0.67286 }, { "epoch": 3.185381945932051, "grad_norm": 5.1698126792907715, "learning_rate": 2.9162935399586665e-05, "loss": 2.1898172378540037, "memory(GiB)": 72.85, "step": 74350, "token_acc": 0.5124223602484472, "train_speed(iter/s)": 0.672865 }, { "epoch": 3.1855961612612997, "grad_norm": 5.599262237548828, "learning_rate": 2.9156818055496414e-05, "loss": 2.1636314392089844, "memory(GiB)": 72.85, "step": 74355, "token_acc": 0.569620253164557, "train_speed(iter/s)": 0.672872 }, { "epoch": 3.185810376590549, "grad_norm": 5.473653793334961, "learning_rate": 2.9150701088999966e-05, "loss": 2.092263031005859, "memory(GiB)": 72.85, "step": 74360, "token_acc": 0.508833922261484, "train_speed(iter/s)": 0.67287 }, { "epoch": 3.186024591919798, "grad_norm": 4.296537399291992, "learning_rate": 2.9144584500208116e-05, "loss": 2.0550989151000976, "memory(GiB)": 72.85, "step": 74365, "token_acc": 0.5427631578947368, "train_speed(iter/s)": 0.672873 }, { "epoch": 3.1862388072490466, "grad_norm": 7.286295413970947, "learning_rate": 2.913846828923167e-05, "loss": 2.064228820800781, "memory(GiB)": 72.85, "step": 74370, "token_acc": 0.5896656534954408, "train_speed(iter/s)": 0.672886 }, { "epoch": 3.186453022578296, "grad_norm": 6.344146728515625, "learning_rate": 2.9132352456181457e-05, "loss": 2.3241527557373045, "memory(GiB)": 72.85, "step": 74375, "token_acc": 0.5126353790613718, "train_speed(iter/s)": 0.672888 }, { "epoch": 3.1866672379075447, "grad_norm": 5.186686038970947, "learning_rate": 2.912623700116823e-05, "loss": 2.503700828552246, "memory(GiB)": 72.85, "step": 74380, "token_acc": 0.48534201954397393, "train_speed(iter/s)": 0.672886 }, { "epoch": 3.1868814532367935, "grad_norm": 4.1240553855896, "learning_rate": 2.9120121924302823e-05, "loss": 2.161570167541504, "memory(GiB)": 72.85, "step": 74385, "token_acc": 0.5156794425087108, "train_speed(iter/s)": 0.672882 }, { "epoch": 3.1870956685660428, "grad_norm": 5.754187107086182, "learning_rate": 2.9114007225696e-05, "loss": 2.0198169708251954, "memory(GiB)": 72.85, "step": 74390, "token_acc": 0.5766423357664233, "train_speed(iter/s)": 0.672895 }, { "epoch": 3.1873098838952916, "grad_norm": 6.249663352966309, "learning_rate": 2.9107892905458523e-05, "loss": 2.127261734008789, "memory(GiB)": 72.85, "step": 74395, "token_acc": 0.5308219178082192, "train_speed(iter/s)": 0.6729 }, { "epoch": 3.1875240992245404, "grad_norm": 6.622662544250488, "learning_rate": 2.9101778963701164e-05, "loss": 1.954937744140625, "memory(GiB)": 72.85, "step": 74400, "token_acc": 0.5589225589225589, "train_speed(iter/s)": 0.672891 }, { "epoch": 3.1877383145537896, "grad_norm": 6.8440632820129395, "learning_rate": 2.9095665400534665e-05, "loss": 2.350681686401367, "memory(GiB)": 72.85, "step": 74405, "token_acc": 0.5084745762711864, "train_speed(iter/s)": 0.672895 }, { "epoch": 3.1879525298830385, "grad_norm": 4.512284755706787, "learning_rate": 2.908955221606982e-05, "loss": 2.2364553451538085, "memory(GiB)": 72.85, "step": 74410, "token_acc": 0.5222222222222223, "train_speed(iter/s)": 0.672889 }, { "epoch": 3.1881667452122873, "grad_norm": 6.610019683837891, "learning_rate": 2.908343941041735e-05, "loss": 2.1145145416259767, "memory(GiB)": 72.85, "step": 74415, "token_acc": 0.5048231511254019, "train_speed(iter/s)": 0.672885 }, { "epoch": 3.1883809605415365, "grad_norm": 6.236812114715576, "learning_rate": 2.9077326983687996e-05, "loss": 2.242798614501953, "memory(GiB)": 72.85, "step": 74420, "token_acc": 0.5584045584045584, "train_speed(iter/s)": 0.672888 }, { "epoch": 3.1885951758707853, "grad_norm": 4.943971157073975, "learning_rate": 2.907121493599249e-05, "loss": 2.167533111572266, "memory(GiB)": 72.85, "step": 74425, "token_acc": 0.5071942446043165, "train_speed(iter/s)": 0.672882 }, { "epoch": 3.188809391200034, "grad_norm": 5.009179592132568, "learning_rate": 2.906510326744154e-05, "loss": 1.9416221618652343, "memory(GiB)": 72.85, "step": 74430, "token_acc": 0.571969696969697, "train_speed(iter/s)": 0.672885 }, { "epoch": 3.1890236065292834, "grad_norm": 6.157591342926025, "learning_rate": 2.90589919781459e-05, "loss": 2.208621788024902, "memory(GiB)": 72.85, "step": 74435, "token_acc": 0.5107142857142857, "train_speed(iter/s)": 0.672876 }, { "epoch": 3.189237821858532, "grad_norm": 7.030471324920654, "learning_rate": 2.9052881068216265e-05, "loss": 2.2052072525024413, "memory(GiB)": 72.85, "step": 74440, "token_acc": 0.512396694214876, "train_speed(iter/s)": 0.672875 }, { "epoch": 3.189452037187781, "grad_norm": 8.18649959564209, "learning_rate": 2.9046770537763345e-05, "loss": 1.9582286834716798, "memory(GiB)": 72.85, "step": 74445, "token_acc": 0.5096774193548387, "train_speed(iter/s)": 0.672876 }, { "epoch": 3.1896662525170303, "grad_norm": 4.3016557693481445, "learning_rate": 2.904066038689781e-05, "loss": 2.028493118286133, "memory(GiB)": 72.85, "step": 74450, "token_acc": 0.5045871559633027, "train_speed(iter/s)": 0.672881 }, { "epoch": 3.189880467846279, "grad_norm": 5.514505386352539, "learning_rate": 2.903455061573039e-05, "loss": 2.1073509216308595, "memory(GiB)": 72.85, "step": 74455, "token_acc": 0.538235294117647, "train_speed(iter/s)": 0.672879 }, { "epoch": 3.190094683175528, "grad_norm": 5.694091320037842, "learning_rate": 2.902844122437175e-05, "loss": 1.9386621475219727, "memory(GiB)": 72.85, "step": 74460, "token_acc": 0.548951048951049, "train_speed(iter/s)": 0.67288 }, { "epoch": 3.190308898504777, "grad_norm": 6.66751766204834, "learning_rate": 2.9022332212932556e-05, "loss": 2.2529525756835938, "memory(GiB)": 72.85, "step": 74465, "token_acc": 0.5224489795918368, "train_speed(iter/s)": 0.672887 }, { "epoch": 3.190523113834026, "grad_norm": 7.547196865081787, "learning_rate": 2.9016223581523515e-05, "loss": 2.1970331192016603, "memory(GiB)": 72.85, "step": 74470, "token_acc": 0.5159235668789809, "train_speed(iter/s)": 0.672875 }, { "epoch": 3.190737329163275, "grad_norm": 5.7015557289123535, "learning_rate": 2.9010115330255262e-05, "loss": 1.7567821502685548, "memory(GiB)": 72.85, "step": 74475, "token_acc": 0.5968379446640316, "train_speed(iter/s)": 0.672876 }, { "epoch": 3.190951544492524, "grad_norm": 5.054845809936523, "learning_rate": 2.9004007459238463e-05, "loss": 2.2356889724731444, "memory(GiB)": 72.85, "step": 74480, "token_acc": 0.47988505747126436, "train_speed(iter/s)": 0.672878 }, { "epoch": 3.191165759821773, "grad_norm": 5.710044860839844, "learning_rate": 2.8997899968583764e-05, "loss": 2.1903133392333984, "memory(GiB)": 72.85, "step": 74485, "token_acc": 0.5177304964539007, "train_speed(iter/s)": 0.672884 }, { "epoch": 3.1913799751510217, "grad_norm": 7.8465800285339355, "learning_rate": 2.899179285840179e-05, "loss": 2.2688066482543947, "memory(GiB)": 72.85, "step": 74490, "token_acc": 0.5512820512820513, "train_speed(iter/s)": 0.672873 }, { "epoch": 3.191594190480271, "grad_norm": 6.0529680252075195, "learning_rate": 2.8985686128803212e-05, "loss": 2.363455581665039, "memory(GiB)": 72.85, "step": 74495, "token_acc": 0.4722222222222222, "train_speed(iter/s)": 0.672872 }, { "epoch": 3.1918084058095197, "grad_norm": 6.372243404388428, "learning_rate": 2.8979579779898646e-05, "loss": 2.086376190185547, "memory(GiB)": 72.85, "step": 74500, "token_acc": 0.5363321799307958, "train_speed(iter/s)": 0.672856 }, { "epoch": 3.1918084058095197, "eval_loss": 2.229820489883423, "eval_runtime": 15.0089, "eval_samples_per_second": 6.663, "eval_steps_per_second": 6.663, "eval_token_acc": 0.4941329856584094, "step": 74500 }, { "epoch": 3.1920226211387686, "grad_norm": 8.794805526733398, "learning_rate": 2.8973473811798714e-05, "loss": 1.992274284362793, "memory(GiB)": 72.85, "step": 74505, "token_acc": 0.5086872586872587, "train_speed(iter/s)": 0.67274 }, { "epoch": 3.192236836468018, "grad_norm": 6.875923156738281, "learning_rate": 2.896736822461402e-05, "loss": 1.990230178833008, "memory(GiB)": 72.85, "step": 74510, "token_acc": 0.5158227848101266, "train_speed(iter/s)": 0.672742 }, { "epoch": 3.1924510517972666, "grad_norm": 6.026576519012451, "learning_rate": 2.8961263018455166e-05, "loss": 2.1437051773071287, "memory(GiB)": 72.85, "step": 74515, "token_acc": 0.5753846153846154, "train_speed(iter/s)": 0.672735 }, { "epoch": 3.1926652671265154, "grad_norm": 5.093513488769531, "learning_rate": 2.8955158193432786e-05, "loss": 1.9223709106445312, "memory(GiB)": 72.85, "step": 74520, "token_acc": 0.5755395683453237, "train_speed(iter/s)": 0.67274 }, { "epoch": 3.1928794824557647, "grad_norm": 8.979086875915527, "learning_rate": 2.894905374965744e-05, "loss": 2.1224334716796873, "memory(GiB)": 72.85, "step": 74525, "token_acc": 0.5751633986928104, "train_speed(iter/s)": 0.672735 }, { "epoch": 3.1930936977850135, "grad_norm": 5.638503551483154, "learning_rate": 2.8942949687239753e-05, "loss": 2.0361812591552733, "memory(GiB)": 72.85, "step": 74530, "token_acc": 0.5636363636363636, "train_speed(iter/s)": 0.672735 }, { "epoch": 3.1933079131142623, "grad_norm": 5.921193599700928, "learning_rate": 2.8936846006290287e-05, "loss": 1.8033878326416015, "memory(GiB)": 72.85, "step": 74535, "token_acc": 0.5764705882352941, "train_speed(iter/s)": 0.672741 }, { "epoch": 3.1935221284435116, "grad_norm": 4.739704132080078, "learning_rate": 2.8930742706919612e-05, "loss": 2.0117759704589844, "memory(GiB)": 72.85, "step": 74540, "token_acc": 0.5737704918032787, "train_speed(iter/s)": 0.672731 }, { "epoch": 3.1937363437727604, "grad_norm": 6.384622097015381, "learning_rate": 2.8924639789238294e-05, "loss": 1.8954021453857421, "memory(GiB)": 72.85, "step": 74545, "token_acc": 0.5836575875486382, "train_speed(iter/s)": 0.672739 }, { "epoch": 3.193950559102009, "grad_norm": 5.1944732666015625, "learning_rate": 2.891853725335688e-05, "loss": 1.937265396118164, "memory(GiB)": 72.85, "step": 74550, "token_acc": 0.5714285714285714, "train_speed(iter/s)": 0.672747 }, { "epoch": 3.1941647744312585, "grad_norm": 5.133950710296631, "learning_rate": 2.891243509938596e-05, "loss": 2.357512664794922, "memory(GiB)": 72.85, "step": 74555, "token_acc": 0.5147540983606558, "train_speed(iter/s)": 0.672747 }, { "epoch": 3.1943789897605073, "grad_norm": 5.098080635070801, "learning_rate": 2.8906333327436054e-05, "loss": 2.3213478088378907, "memory(GiB)": 72.85, "step": 74560, "token_acc": 0.5117647058823529, "train_speed(iter/s)": 0.672747 }, { "epoch": 3.194593205089756, "grad_norm": 6.427610397338867, "learning_rate": 2.8900231937617717e-05, "loss": 2.0208078384399415, "memory(GiB)": 72.85, "step": 74565, "token_acc": 0.5559322033898305, "train_speed(iter/s)": 0.672749 }, { "epoch": 3.1948074204190053, "grad_norm": 6.359622001647949, "learning_rate": 2.889413093004146e-05, "loss": 1.9807416915893554, "memory(GiB)": 72.85, "step": 74570, "token_acc": 0.5798319327731093, "train_speed(iter/s)": 0.672749 }, { "epoch": 3.195021635748254, "grad_norm": 5.703744888305664, "learning_rate": 2.8888030304817814e-05, "loss": 2.002880668640137, "memory(GiB)": 72.85, "step": 74575, "token_acc": 0.5463917525773195, "train_speed(iter/s)": 0.672742 }, { "epoch": 3.195235851077503, "grad_norm": 6.125221252441406, "learning_rate": 2.8881930062057317e-05, "loss": 1.961419677734375, "memory(GiB)": 72.85, "step": 74580, "token_acc": 0.574750830564784, "train_speed(iter/s)": 0.672745 }, { "epoch": 3.195450066406752, "grad_norm": 5.699084281921387, "learning_rate": 2.8875830201870463e-05, "loss": 2.1063865661621093, "memory(GiB)": 72.85, "step": 74585, "token_acc": 0.5077399380804953, "train_speed(iter/s)": 0.672743 }, { "epoch": 3.195664281736001, "grad_norm": 6.480649471282959, "learning_rate": 2.8869730724367765e-05, "loss": 2.2017770767211915, "memory(GiB)": 72.85, "step": 74590, "token_acc": 0.5396341463414634, "train_speed(iter/s)": 0.672742 }, { "epoch": 3.19587849706525, "grad_norm": 6.124151229858398, "learning_rate": 2.8863631629659716e-05, "loss": 2.3552379608154297, "memory(GiB)": 72.85, "step": 74595, "token_acc": 0.5198675496688742, "train_speed(iter/s)": 0.672734 }, { "epoch": 3.196092712394499, "grad_norm": 7.395498752593994, "learning_rate": 2.8857532917856788e-05, "loss": 2.285479736328125, "memory(GiB)": 72.85, "step": 74600, "token_acc": 0.5220588235294118, "train_speed(iter/s)": 0.672732 }, { "epoch": 3.196306927723748, "grad_norm": 5.4432291984558105, "learning_rate": 2.8851434589069503e-05, "loss": 2.0603559494018553, "memory(GiB)": 72.85, "step": 74605, "token_acc": 0.540785498489426, "train_speed(iter/s)": 0.672725 }, { "epoch": 3.1965211430529967, "grad_norm": 5.18524694442749, "learning_rate": 2.8845336643408305e-05, "loss": 2.071584701538086, "memory(GiB)": 72.85, "step": 74610, "token_acc": 0.4928571428571429, "train_speed(iter/s)": 0.672715 }, { "epoch": 3.196735358382246, "grad_norm": 6.002305030822754, "learning_rate": 2.8839239080983692e-05, "loss": 2.158660888671875, "memory(GiB)": 72.85, "step": 74615, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.672704 }, { "epoch": 3.196949573711495, "grad_norm": 5.05136775970459, "learning_rate": 2.8833141901906112e-05, "loss": 1.903086280822754, "memory(GiB)": 72.85, "step": 74620, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.672696 }, { "epoch": 3.1971637890407436, "grad_norm": 5.191627025604248, "learning_rate": 2.8827045106286033e-05, "loss": 2.03790397644043, "memory(GiB)": 72.85, "step": 74625, "token_acc": 0.54, "train_speed(iter/s)": 0.672689 }, { "epoch": 3.197378004369993, "grad_norm": 6.4492950439453125, "learning_rate": 2.8820948694233884e-05, "loss": 2.098979949951172, "memory(GiB)": 72.85, "step": 74630, "token_acc": 0.5395189003436426, "train_speed(iter/s)": 0.672691 }, { "epoch": 3.1975922196992417, "grad_norm": 4.229830265045166, "learning_rate": 2.8814852665860104e-05, "loss": 2.2737829208374025, "memory(GiB)": 72.85, "step": 74635, "token_acc": 0.5081967213114754, "train_speed(iter/s)": 0.67269 }, { "epoch": 3.1978064350284905, "grad_norm": 4.309218883514404, "learning_rate": 2.8808757021275157e-05, "loss": 1.970942497253418, "memory(GiB)": 72.85, "step": 74640, "token_acc": 0.56, "train_speed(iter/s)": 0.672694 }, { "epoch": 3.1980206503577397, "grad_norm": 7.121971607208252, "learning_rate": 2.8802661760589455e-05, "loss": 2.1969942092895507, "memory(GiB)": 72.85, "step": 74645, "token_acc": 0.5144694533762058, "train_speed(iter/s)": 0.672694 }, { "epoch": 3.1982348656869886, "grad_norm": 6.159330368041992, "learning_rate": 2.8796566883913424e-05, "loss": 2.000472640991211, "memory(GiB)": 72.85, "step": 74650, "token_acc": 0.5437956204379562, "train_speed(iter/s)": 0.672704 }, { "epoch": 3.1984490810162374, "grad_norm": 6.2607903480529785, "learning_rate": 2.8790472391357463e-05, "loss": 1.8800365447998046, "memory(GiB)": 72.85, "step": 74655, "token_acc": 0.5806451612903226, "train_speed(iter/s)": 0.672702 }, { "epoch": 3.1986632963454866, "grad_norm": 5.261260509490967, "learning_rate": 2.878437828303198e-05, "loss": 2.2285783767700194, "memory(GiB)": 72.85, "step": 74660, "token_acc": 0.49838187702265374, "train_speed(iter/s)": 0.672701 }, { "epoch": 3.1988775116747354, "grad_norm": 5.059776306152344, "learning_rate": 2.8778284559047408e-05, "loss": 1.962799644470215, "memory(GiB)": 72.85, "step": 74665, "token_acc": 0.5476190476190477, "train_speed(iter/s)": 0.672706 }, { "epoch": 3.1990917270039843, "grad_norm": 6.1347150802612305, "learning_rate": 2.8772191219514112e-05, "loss": 1.9791141510009767, "memory(GiB)": 72.85, "step": 74670, "token_acc": 0.5440613026819924, "train_speed(iter/s)": 0.672702 }, { "epoch": 3.1993059423332335, "grad_norm": 5.506320953369141, "learning_rate": 2.8766098264542474e-05, "loss": 2.3373506546020506, "memory(GiB)": 72.85, "step": 74675, "token_acc": 0.5134328358208955, "train_speed(iter/s)": 0.672704 }, { "epoch": 3.1995201576624823, "grad_norm": 4.870406150817871, "learning_rate": 2.8760005694242897e-05, "loss": 1.859560203552246, "memory(GiB)": 72.85, "step": 74680, "token_acc": 0.56, "train_speed(iter/s)": 0.67271 }, { "epoch": 3.199734372991731, "grad_norm": 6.657196521759033, "learning_rate": 2.875391350872575e-05, "loss": 1.9145162582397461, "memory(GiB)": 72.85, "step": 74685, "token_acc": 0.58984375, "train_speed(iter/s)": 0.672718 }, { "epoch": 3.1999485883209804, "grad_norm": 5.04622745513916, "learning_rate": 2.874782170810138e-05, "loss": 2.0306488037109376, "memory(GiB)": 72.85, "step": 74690, "token_acc": 0.5615141955835962, "train_speed(iter/s)": 0.672715 }, { "epoch": 3.200162803650229, "grad_norm": 6.657513618469238, "learning_rate": 2.874173029248014e-05, "loss": 1.9510763168334961, "memory(GiB)": 72.85, "step": 74695, "token_acc": 0.5709459459459459, "train_speed(iter/s)": 0.672717 }, { "epoch": 3.200377018979478, "grad_norm": 6.206558704376221, "learning_rate": 2.873563926197242e-05, "loss": 2.08792724609375, "memory(GiB)": 72.85, "step": 74700, "token_acc": 0.5413793103448276, "train_speed(iter/s)": 0.672722 }, { "epoch": 3.2005912343087273, "grad_norm": 5.269947528839111, "learning_rate": 2.872954861668854e-05, "loss": 2.3917898178100585, "memory(GiB)": 72.85, "step": 74705, "token_acc": 0.5067567567567568, "train_speed(iter/s)": 0.672728 }, { "epoch": 3.200805449637976, "grad_norm": 5.67980432510376, "learning_rate": 2.872345835673884e-05, "loss": 2.103990173339844, "memory(GiB)": 72.85, "step": 74710, "token_acc": 0.5346534653465347, "train_speed(iter/s)": 0.672723 }, { "epoch": 3.201019664967225, "grad_norm": 5.84926176071167, "learning_rate": 2.8717368482233654e-05, "loss": 2.16070556640625, "memory(GiB)": 72.85, "step": 74715, "token_acc": 0.5328947368421053, "train_speed(iter/s)": 0.672724 }, { "epoch": 3.201233880296474, "grad_norm": 5.914472579956055, "learning_rate": 2.871127899328328e-05, "loss": 1.9182777404785156, "memory(GiB)": 72.85, "step": 74720, "token_acc": 0.6014492753623188, "train_speed(iter/s)": 0.672731 }, { "epoch": 3.201448095625723, "grad_norm": 5.086022853851318, "learning_rate": 2.8705189889998074e-05, "loss": 2.096579742431641, "memory(GiB)": 72.85, "step": 74725, "token_acc": 0.5344129554655871, "train_speed(iter/s)": 0.672734 }, { "epoch": 3.201662310954972, "grad_norm": 6.4005022048950195, "learning_rate": 2.869910117248834e-05, "loss": 1.8624559402465821, "memory(GiB)": 72.85, "step": 74730, "token_acc": 0.5909090909090909, "train_speed(iter/s)": 0.672745 }, { "epoch": 3.201876526284221, "grad_norm": 6.4398322105407715, "learning_rate": 2.8693012840864363e-05, "loss": 2.23687686920166, "memory(GiB)": 72.85, "step": 74735, "token_acc": 0.5171232876712328, "train_speed(iter/s)": 0.672746 }, { "epoch": 3.20209074161347, "grad_norm": 4.05196475982666, "learning_rate": 2.8686924895236444e-05, "loss": 2.0672185897827147, "memory(GiB)": 72.85, "step": 74740, "token_acc": 0.5253623188405797, "train_speed(iter/s)": 0.672746 }, { "epoch": 3.2023049569427187, "grad_norm": 5.822434902191162, "learning_rate": 2.8680837335714854e-05, "loss": 2.3385921478271485, "memory(GiB)": 72.85, "step": 74745, "token_acc": 0.515625, "train_speed(iter/s)": 0.672746 }, { "epoch": 3.202519172271968, "grad_norm": 5.210939884185791, "learning_rate": 2.867475016240992e-05, "loss": 2.0638683319091795, "memory(GiB)": 72.85, "step": 74750, "token_acc": 0.5300353356890459, "train_speed(iter/s)": 0.672747 }, { "epoch": 3.2027333876012167, "grad_norm": 10.458639144897461, "learning_rate": 2.866866337543187e-05, "loss": 2.0722604751586915, "memory(GiB)": 72.85, "step": 74755, "token_acc": 0.5573122529644269, "train_speed(iter/s)": 0.672761 }, { "epoch": 3.2029476029304655, "grad_norm": 5.518771171569824, "learning_rate": 2.866257697489101e-05, "loss": 2.021980857849121, "memory(GiB)": 72.85, "step": 74760, "token_acc": 0.5400696864111498, "train_speed(iter/s)": 0.672767 }, { "epoch": 3.203161818259715, "grad_norm": 4.904399871826172, "learning_rate": 2.8656490960897586e-05, "loss": 2.2762968063354494, "memory(GiB)": 72.85, "step": 74765, "token_acc": 0.5154320987654321, "train_speed(iter/s)": 0.672771 }, { "epoch": 3.2033760335889636, "grad_norm": 4.048534870147705, "learning_rate": 2.865040533356185e-05, "loss": 1.9517339706420898, "memory(GiB)": 72.85, "step": 74770, "token_acc": 0.5507246376811594, "train_speed(iter/s)": 0.672771 }, { "epoch": 3.2035902489182124, "grad_norm": 6.567921161651611, "learning_rate": 2.8644320092994047e-05, "loss": 1.6470565795898438, "memory(GiB)": 72.85, "step": 74775, "token_acc": 0.5912698412698413, "train_speed(iter/s)": 0.672771 }, { "epoch": 3.2038044642474617, "grad_norm": 6.761322021484375, "learning_rate": 2.86382352393044e-05, "loss": 2.101654624938965, "memory(GiB)": 72.85, "step": 74780, "token_acc": 0.5362776025236593, "train_speed(iter/s)": 0.672778 }, { "epoch": 3.2040186795767105, "grad_norm": 8.045844078063965, "learning_rate": 2.8632150772603183e-05, "loss": 1.637839126586914, "memory(GiB)": 72.85, "step": 74785, "token_acc": 0.6166666666666667, "train_speed(iter/s)": 0.672762 }, { "epoch": 3.2042328949059593, "grad_norm": 6.058800220489502, "learning_rate": 2.8626066693000596e-05, "loss": 2.3126739501953124, "memory(GiB)": 72.85, "step": 74790, "token_acc": 0.5205882352941177, "train_speed(iter/s)": 0.672768 }, { "epoch": 3.2044471102352086, "grad_norm": 5.788595676422119, "learning_rate": 2.8619983000606866e-05, "loss": 2.2933883666992188, "memory(GiB)": 72.85, "step": 74795, "token_acc": 0.4935897435897436, "train_speed(iter/s)": 0.672769 }, { "epoch": 3.2046613255644574, "grad_norm": 6.162851333618164, "learning_rate": 2.86138996955322e-05, "loss": 2.1492162704467774, "memory(GiB)": 72.85, "step": 74800, "token_acc": 0.5441696113074205, "train_speed(iter/s)": 0.672771 }, { "epoch": 3.204875540893706, "grad_norm": 5.907881259918213, "learning_rate": 2.8607816777886776e-05, "loss": 2.149750518798828, "memory(GiB)": 72.85, "step": 74805, "token_acc": 0.549645390070922, "train_speed(iter/s)": 0.67277 }, { "epoch": 3.2050897562229554, "grad_norm": 4.718456745147705, "learning_rate": 2.860173424778084e-05, "loss": 2.1566396713256837, "memory(GiB)": 72.85, "step": 74810, "token_acc": 0.5061349693251533, "train_speed(iter/s)": 0.672763 }, { "epoch": 3.2053039715522043, "grad_norm": 6.583543300628662, "learning_rate": 2.8595652105324566e-05, "loss": 2.553867530822754, "memory(GiB)": 72.85, "step": 74815, "token_acc": 0.4568345323741007, "train_speed(iter/s)": 0.672759 }, { "epoch": 3.205518186881453, "grad_norm": 7.400876998901367, "learning_rate": 2.858957035062811e-05, "loss": 2.1495393753051757, "memory(GiB)": 72.85, "step": 74820, "token_acc": 0.5447470817120622, "train_speed(iter/s)": 0.672757 }, { "epoch": 3.2057324022107023, "grad_norm": 5.608740329742432, "learning_rate": 2.8583488983801693e-05, "loss": 2.0798528671264647, "memory(GiB)": 72.85, "step": 74825, "token_acc": 0.5757575757575758, "train_speed(iter/s)": 0.672745 }, { "epoch": 3.205946617539951, "grad_norm": 5.295559406280518, "learning_rate": 2.8577408004955465e-05, "loss": 2.2385543823242187, "memory(GiB)": 72.85, "step": 74830, "token_acc": 0.5513307984790875, "train_speed(iter/s)": 0.672748 }, { "epoch": 3.2061608328692, "grad_norm": 6.37840461730957, "learning_rate": 2.8571327414199578e-05, "loss": 2.163452911376953, "memory(GiB)": 72.85, "step": 74835, "token_acc": 0.57, "train_speed(iter/s)": 0.672741 }, { "epoch": 3.206375048198449, "grad_norm": 7.162784099578857, "learning_rate": 2.8565247211644186e-05, "loss": 2.0130910873413086, "memory(GiB)": 72.85, "step": 74840, "token_acc": 0.5414012738853503, "train_speed(iter/s)": 0.672741 }, { "epoch": 3.206589263527698, "grad_norm": 5.840203762054443, "learning_rate": 2.8559167397399465e-05, "loss": 1.924873161315918, "memory(GiB)": 72.85, "step": 74845, "token_acc": 0.5431034482758621, "train_speed(iter/s)": 0.672737 }, { "epoch": 3.206803478856947, "grad_norm": 5.954565525054932, "learning_rate": 2.8553087971575543e-05, "loss": 2.0673547744750977, "memory(GiB)": 72.85, "step": 74850, "token_acc": 0.5508771929824562, "train_speed(iter/s)": 0.672736 }, { "epoch": 3.207017694186196, "grad_norm": 13.425700187683105, "learning_rate": 2.854700893428255e-05, "loss": 1.9893112182617188, "memory(GiB)": 72.85, "step": 74855, "token_acc": 0.5245901639344263, "train_speed(iter/s)": 0.672747 }, { "epoch": 3.207231909515445, "grad_norm": 6.839000225067139, "learning_rate": 2.8540930285630608e-05, "loss": 2.1569774627685545, "memory(GiB)": 72.85, "step": 74860, "token_acc": 0.5171102661596958, "train_speed(iter/s)": 0.672747 }, { "epoch": 3.2074461248446937, "grad_norm": 5.928360939025879, "learning_rate": 2.853485202572983e-05, "loss": 2.0259531021118162, "memory(GiB)": 72.85, "step": 74865, "token_acc": 0.556390977443609, "train_speed(iter/s)": 0.672749 }, { "epoch": 3.207660340173943, "grad_norm": 6.07545280456543, "learning_rate": 2.8528774154690364e-05, "loss": 1.7894851684570312, "memory(GiB)": 72.85, "step": 74870, "token_acc": 0.6230769230769231, "train_speed(iter/s)": 0.672754 }, { "epoch": 3.207874555503192, "grad_norm": 7.247676372528076, "learning_rate": 2.8522696672622297e-05, "loss": 2.2809354782104494, "memory(GiB)": 72.85, "step": 74875, "token_acc": 0.5182724252491694, "train_speed(iter/s)": 0.672754 }, { "epoch": 3.2080887708324406, "grad_norm": 4.489987373352051, "learning_rate": 2.8516619579635724e-05, "loss": 2.110483741760254, "memory(GiB)": 72.85, "step": 74880, "token_acc": 0.548951048951049, "train_speed(iter/s)": 0.672754 }, { "epoch": 3.20830298616169, "grad_norm": 6.203021049499512, "learning_rate": 2.851054287584074e-05, "loss": 2.0287546157836913, "memory(GiB)": 72.85, "step": 74885, "token_acc": 0.5494880546075085, "train_speed(iter/s)": 0.67276 }, { "epoch": 3.2085172014909387, "grad_norm": 6.290343761444092, "learning_rate": 2.8504466561347408e-05, "loss": 2.106280517578125, "memory(GiB)": 72.85, "step": 74890, "token_acc": 0.5147058823529411, "train_speed(iter/s)": 0.672753 }, { "epoch": 3.2087314168201875, "grad_norm": 5.73297119140625, "learning_rate": 2.8498390636265838e-05, "loss": 1.8804590225219726, "memory(GiB)": 72.85, "step": 74895, "token_acc": 0.5579937304075235, "train_speed(iter/s)": 0.672751 }, { "epoch": 3.2089456321494367, "grad_norm": 5.31510591506958, "learning_rate": 2.84923151007061e-05, "loss": 2.131842041015625, "memory(GiB)": 72.85, "step": 74900, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.672747 }, { "epoch": 3.2091598474786855, "grad_norm": 6.119028568267822, "learning_rate": 2.848623995477826e-05, "loss": 2.0986934661865235, "memory(GiB)": 72.85, "step": 74905, "token_acc": 0.5399239543726235, "train_speed(iter/s)": 0.672743 }, { "epoch": 3.2093740628079344, "grad_norm": 5.764351844787598, "learning_rate": 2.8480165198592356e-05, "loss": 2.014116096496582, "memory(GiB)": 72.85, "step": 74910, "token_acc": 0.5468164794007491, "train_speed(iter/s)": 0.672742 }, { "epoch": 3.2095882781371836, "grad_norm": 6.72391414642334, "learning_rate": 2.8474090832258455e-05, "loss": 2.006283187866211, "memory(GiB)": 72.85, "step": 74915, "token_acc": 0.5986394557823129, "train_speed(iter/s)": 0.672746 }, { "epoch": 3.2098024934664324, "grad_norm": 5.871701717376709, "learning_rate": 2.8468016855886586e-05, "loss": 1.9801319122314454, "memory(GiB)": 72.85, "step": 74920, "token_acc": 0.5836298932384342, "train_speed(iter/s)": 0.672753 }, { "epoch": 3.2100167087956812, "grad_norm": 5.6641740798950195, "learning_rate": 2.8461943269586776e-05, "loss": 2.17335205078125, "memory(GiB)": 72.85, "step": 74925, "token_acc": 0.5211726384364821, "train_speed(iter/s)": 0.67275 }, { "epoch": 3.2102309241249305, "grad_norm": 8.47928524017334, "learning_rate": 2.845587007346908e-05, "loss": 2.006888198852539, "memory(GiB)": 72.85, "step": 74930, "token_acc": 0.563573883161512, "train_speed(iter/s)": 0.672745 }, { "epoch": 3.2104451394541793, "grad_norm": 5.907646656036377, "learning_rate": 2.8449797267643514e-05, "loss": 1.6522771835327148, "memory(GiB)": 72.85, "step": 74935, "token_acc": 0.6408163265306123, "train_speed(iter/s)": 0.672742 }, { "epoch": 3.210659354783428, "grad_norm": 5.485136032104492, "learning_rate": 2.844372485222008e-05, "loss": 2.0466060638427734, "memory(GiB)": 72.85, "step": 74940, "token_acc": 0.49836065573770494, "train_speed(iter/s)": 0.672727 }, { "epoch": 3.2108735701126774, "grad_norm": 5.371152400970459, "learning_rate": 2.8437652827308798e-05, "loss": 2.455161476135254, "memory(GiB)": 72.85, "step": 74945, "token_acc": 0.506896551724138, "train_speed(iter/s)": 0.672733 }, { "epoch": 3.211087785441926, "grad_norm": 5.047054767608643, "learning_rate": 2.843158119301964e-05, "loss": 1.9711748123168946, "memory(GiB)": 72.85, "step": 74950, "token_acc": 0.6071428571428571, "train_speed(iter/s)": 0.672734 }, { "epoch": 3.211302000771175, "grad_norm": 5.5357441902160645, "learning_rate": 2.842550994946264e-05, "loss": 2.2708621978759767, "memory(GiB)": 72.85, "step": 74955, "token_acc": 0.48028673835125446, "train_speed(iter/s)": 0.672726 }, { "epoch": 3.2115162161004243, "grad_norm": 5.553022384643555, "learning_rate": 2.841943909674777e-05, "loss": 2.038047218322754, "memory(GiB)": 72.85, "step": 74960, "token_acc": 0.5521235521235521, "train_speed(iter/s)": 0.672733 }, { "epoch": 3.211730431429673, "grad_norm": 5.853500843048096, "learning_rate": 2.8413368634985e-05, "loss": 1.9837623596191407, "memory(GiB)": 72.85, "step": 74965, "token_acc": 0.5678233438485805, "train_speed(iter/s)": 0.672732 }, { "epoch": 3.211944646758922, "grad_norm": 5.344702243804932, "learning_rate": 2.840729856428429e-05, "loss": 2.019905662536621, "memory(GiB)": 72.85, "step": 74970, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.672729 }, { "epoch": 3.212158862088171, "grad_norm": 6.234258651733398, "learning_rate": 2.8401228884755636e-05, "loss": 2.433358001708984, "memory(GiB)": 72.85, "step": 74975, "token_acc": 0.49146757679180886, "train_speed(iter/s)": 0.672724 }, { "epoch": 3.21237307741742, "grad_norm": 4.830449104309082, "learning_rate": 2.839515959650899e-05, "loss": 2.075764465332031, "memory(GiB)": 72.85, "step": 74980, "token_acc": 0.5387755102040817, "train_speed(iter/s)": 0.67273 }, { "epoch": 3.2125872927466688, "grad_norm": 5.774769306182861, "learning_rate": 2.8389090699654275e-05, "loss": 2.0146913528442383, "memory(GiB)": 72.85, "step": 74985, "token_acc": 0.5570032573289903, "train_speed(iter/s)": 0.672726 }, { "epoch": 3.212801508075918, "grad_norm": 6.7504496574401855, "learning_rate": 2.8383022194301466e-05, "loss": 1.9709705352783202, "memory(GiB)": 72.85, "step": 74990, "token_acc": 0.5134228187919463, "train_speed(iter/s)": 0.672705 }, { "epoch": 3.213015723405167, "grad_norm": 4.581146240234375, "learning_rate": 2.8376954080560504e-05, "loss": 2.082554817199707, "memory(GiB)": 72.85, "step": 74995, "token_acc": 0.5382165605095541, "train_speed(iter/s)": 0.672707 }, { "epoch": 3.2132299387344156, "grad_norm": 6.798693656921387, "learning_rate": 2.837088635854129e-05, "loss": 2.1579151153564453, "memory(GiB)": 72.85, "step": 75000, "token_acc": 0.5047923322683706, "train_speed(iter/s)": 0.672712 }, { "epoch": 3.2132299387344156, "eval_loss": 2.1797142028808594, "eval_runtime": 16.6124, "eval_samples_per_second": 6.02, "eval_steps_per_second": 6.02, "eval_token_acc": 0.47674418604651164, "step": 75000 }, { "epoch": 3.213444154063665, "grad_norm": 7.362617492675781, "learning_rate": 2.8364819028353762e-05, "loss": 1.851324462890625, "memory(GiB)": 72.85, "step": 75005, "token_acc": 0.4956855225311601, "train_speed(iter/s)": 0.672598 }, { "epoch": 3.2136583693929137, "grad_norm": 8.629291534423828, "learning_rate": 2.835875209010781e-05, "loss": 2.3903221130371093, "memory(GiB)": 72.85, "step": 75010, "token_acc": 0.511049723756906, "train_speed(iter/s)": 0.672598 }, { "epoch": 3.2138725847221625, "grad_norm": 5.835781574249268, "learning_rate": 2.8352685543913394e-05, "loss": 2.0062124252319338, "memory(GiB)": 72.85, "step": 75015, "token_acc": 0.5463576158940397, "train_speed(iter/s)": 0.672601 }, { "epoch": 3.214086800051412, "grad_norm": 7.153323650360107, "learning_rate": 2.8346619389880374e-05, "loss": 2.2359201431274416, "memory(GiB)": 72.85, "step": 75020, "token_acc": 0.5131964809384164, "train_speed(iter/s)": 0.672596 }, { "epoch": 3.2143010153806606, "grad_norm": 7.010035037994385, "learning_rate": 2.834055362811866e-05, "loss": 1.9232454299926758, "memory(GiB)": 72.85, "step": 75025, "token_acc": 0.5364806866952789, "train_speed(iter/s)": 0.672586 }, { "epoch": 3.2145152307099094, "grad_norm": 6.726810455322266, "learning_rate": 2.8334488258738145e-05, "loss": 2.142752838134766, "memory(GiB)": 72.85, "step": 75030, "token_acc": 0.5298507462686567, "train_speed(iter/s)": 0.672593 }, { "epoch": 3.2147294460391587, "grad_norm": 6.4078288078308105, "learning_rate": 2.832842328184867e-05, "loss": 2.136793518066406, "memory(GiB)": 72.85, "step": 75035, "token_acc": 0.5168918918918919, "train_speed(iter/s)": 0.672607 }, { "epoch": 3.2149436613684075, "grad_norm": 6.825883865356445, "learning_rate": 2.8322358697560157e-05, "loss": 2.184219741821289, "memory(GiB)": 72.85, "step": 75040, "token_acc": 0.5613382899628253, "train_speed(iter/s)": 0.672613 }, { "epoch": 3.2151578766976563, "grad_norm": 7.036430835723877, "learning_rate": 2.831629450598243e-05, "loss": 2.036709213256836, "memory(GiB)": 72.85, "step": 75045, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.672607 }, { "epoch": 3.2153720920269055, "grad_norm": 7.385213851928711, "learning_rate": 2.8310230707225393e-05, "loss": 1.877681350708008, "memory(GiB)": 72.85, "step": 75050, "token_acc": 0.6, "train_speed(iter/s)": 0.672615 }, { "epoch": 3.2155863073561544, "grad_norm": 5.926187992095947, "learning_rate": 2.8304167301398875e-05, "loss": 2.169521713256836, "memory(GiB)": 72.85, "step": 75055, "token_acc": 0.5527272727272727, "train_speed(iter/s)": 0.672604 }, { "epoch": 3.215800522685403, "grad_norm": 6.766677379608154, "learning_rate": 2.8298104288612714e-05, "loss": 2.0566646575927736, "memory(GiB)": 72.85, "step": 75060, "token_acc": 0.5416666666666666, "train_speed(iter/s)": 0.672607 }, { "epoch": 3.2160147380146524, "grad_norm": 5.067251205444336, "learning_rate": 2.8292041668976754e-05, "loss": 2.162864875793457, "memory(GiB)": 72.85, "step": 75065, "token_acc": 0.5407166123778502, "train_speed(iter/s)": 0.672603 }, { "epoch": 3.2162289533439012, "grad_norm": 4.140871524810791, "learning_rate": 2.8285979442600806e-05, "loss": 2.0128271102905275, "memory(GiB)": 72.85, "step": 75070, "token_acc": 0.5391566265060241, "train_speed(iter/s)": 0.672603 }, { "epoch": 3.21644316867315, "grad_norm": 6.058709621429443, "learning_rate": 2.827991760959473e-05, "loss": 1.9880752563476562, "memory(GiB)": 72.85, "step": 75075, "token_acc": 0.5411764705882353, "train_speed(iter/s)": 0.672594 }, { "epoch": 3.2166573840023993, "grad_norm": 6.391778945922852, "learning_rate": 2.8273856170068312e-05, "loss": 2.1273052215576174, "memory(GiB)": 72.85, "step": 75080, "token_acc": 0.524390243902439, "train_speed(iter/s)": 0.672589 }, { "epoch": 3.216871599331648, "grad_norm": 4.984568119049072, "learning_rate": 2.826779512413138e-05, "loss": 2.1429521560668947, "memory(GiB)": 72.85, "step": 75085, "token_acc": 0.5445544554455446, "train_speed(iter/s)": 0.672575 }, { "epoch": 3.217085814660897, "grad_norm": 5.8095173835754395, "learning_rate": 2.826173447189372e-05, "loss": 2.1548587799072267, "memory(GiB)": 72.85, "step": 75090, "token_acc": 0.5238095238095238, "train_speed(iter/s)": 0.67258 }, { "epoch": 3.217300029990146, "grad_norm": 7.379960536956787, "learning_rate": 2.8255674213465117e-05, "loss": 2.0562490463256835, "memory(GiB)": 72.85, "step": 75095, "token_acc": 0.5604395604395604, "train_speed(iter/s)": 0.672582 }, { "epoch": 3.217514245319395, "grad_norm": 5.716922283172607, "learning_rate": 2.824961434895539e-05, "loss": 2.2913530349731444, "memory(GiB)": 72.85, "step": 75100, "token_acc": 0.48727272727272725, "train_speed(iter/s)": 0.67259 }, { "epoch": 3.217728460648644, "grad_norm": 5.890923500061035, "learning_rate": 2.8243554878474305e-05, "loss": 1.9371475219726562, "memory(GiB)": 72.85, "step": 75105, "token_acc": 0.5261437908496732, "train_speed(iter/s)": 0.672601 }, { "epoch": 3.217942675977893, "grad_norm": 5.093511581420898, "learning_rate": 2.823749580213163e-05, "loss": 2.0040950775146484, "memory(GiB)": 72.85, "step": 75110, "token_acc": 0.5434782608695652, "train_speed(iter/s)": 0.672592 }, { "epoch": 3.218156891307142, "grad_norm": 7.150288105010986, "learning_rate": 2.823143712003711e-05, "loss": 2.2019931793212892, "memory(GiB)": 72.85, "step": 75115, "token_acc": 0.53125, "train_speed(iter/s)": 0.672601 }, { "epoch": 3.2183711066363907, "grad_norm": 6.030051231384277, "learning_rate": 2.8225378832300553e-05, "loss": 2.009062576293945, "memory(GiB)": 72.85, "step": 75120, "token_acc": 0.5421686746987951, "train_speed(iter/s)": 0.672608 }, { "epoch": 3.21858532196564, "grad_norm": 5.848684787750244, "learning_rate": 2.821932093903169e-05, "loss": 1.9104446411132812, "memory(GiB)": 72.85, "step": 75125, "token_acc": 0.5615942028985508, "train_speed(iter/s)": 0.672613 }, { "epoch": 3.2187995372948888, "grad_norm": 5.425237655639648, "learning_rate": 2.821326344034023e-05, "loss": 1.6811298370361327, "memory(GiB)": 72.85, "step": 75130, "token_acc": 0.5873015873015873, "train_speed(iter/s)": 0.67262 }, { "epoch": 3.2190137526241376, "grad_norm": 6.80735969543457, "learning_rate": 2.8207206336335966e-05, "loss": 1.9680908203125, "memory(GiB)": 72.85, "step": 75135, "token_acc": 0.6036363636363636, "train_speed(iter/s)": 0.672627 }, { "epoch": 3.219227967953387, "grad_norm": 6.8892035484313965, "learning_rate": 2.82011496271286e-05, "loss": 2.0473743438720704, "memory(GiB)": 72.85, "step": 75140, "token_acc": 0.5034246575342466, "train_speed(iter/s)": 0.672629 }, { "epoch": 3.2194421832826356, "grad_norm": 7.298036098480225, "learning_rate": 2.819509331282786e-05, "loss": 1.7543649673461914, "memory(GiB)": 72.85, "step": 75145, "token_acc": 0.5729537366548043, "train_speed(iter/s)": 0.672626 }, { "epoch": 3.2196563986118845, "grad_norm": 5.459255218505859, "learning_rate": 2.818903739354345e-05, "loss": 1.9191829681396484, "memory(GiB)": 72.85, "step": 75150, "token_acc": 0.5676567656765676, "train_speed(iter/s)": 0.672623 }, { "epoch": 3.2198706139411337, "grad_norm": 6.916747093200684, "learning_rate": 2.818298186938508e-05, "loss": 1.9324594497680665, "memory(GiB)": 72.85, "step": 75155, "token_acc": 0.5631067961165048, "train_speed(iter/s)": 0.672615 }, { "epoch": 3.2200848292703825, "grad_norm": 5.524806022644043, "learning_rate": 2.8176926740462473e-05, "loss": 2.071000099182129, "memory(GiB)": 72.85, "step": 75160, "token_acc": 0.5197132616487455, "train_speed(iter/s)": 0.672594 }, { "epoch": 3.2202990445996313, "grad_norm": 4.809706687927246, "learning_rate": 2.8170872006885312e-05, "loss": 1.9095516204833984, "memory(GiB)": 72.85, "step": 75165, "token_acc": 0.5787965616045845, "train_speed(iter/s)": 0.672596 }, { "epoch": 3.2205132599288806, "grad_norm": 5.911842346191406, "learning_rate": 2.8164817668763278e-05, "loss": 2.050128173828125, "memory(GiB)": 72.85, "step": 75170, "token_acc": 0.547945205479452, "train_speed(iter/s)": 0.672588 }, { "epoch": 3.2207274752581294, "grad_norm": 4.380453109741211, "learning_rate": 2.815876372620606e-05, "loss": 2.2300689697265623, "memory(GiB)": 72.85, "step": 75175, "token_acc": 0.5373563218390804, "train_speed(iter/s)": 0.672594 }, { "epoch": 3.2209416905873782, "grad_norm": 5.751181602478027, "learning_rate": 2.8152710179323305e-05, "loss": 2.354899787902832, "memory(GiB)": 72.85, "step": 75180, "token_acc": 0.5027322404371585, "train_speed(iter/s)": 0.672596 }, { "epoch": 3.2211559059166275, "grad_norm": 4.736215114593506, "learning_rate": 2.8146657028224722e-05, "loss": 1.935063362121582, "memory(GiB)": 72.85, "step": 75185, "token_acc": 0.5797101449275363, "train_speed(iter/s)": 0.672602 }, { "epoch": 3.2213701212458763, "grad_norm": 6.585012912750244, "learning_rate": 2.8140604273019922e-05, "loss": 2.0816402435302734, "memory(GiB)": 72.85, "step": 75190, "token_acc": 0.5239616613418531, "train_speed(iter/s)": 0.672599 }, { "epoch": 3.221584336575125, "grad_norm": 8.840418815612793, "learning_rate": 2.81345519138186e-05, "loss": 2.0624715805053713, "memory(GiB)": 72.85, "step": 75195, "token_acc": 0.5472972972972973, "train_speed(iter/s)": 0.6726 }, { "epoch": 3.2217985519043744, "grad_norm": 7.963643550872803, "learning_rate": 2.8128499950730387e-05, "loss": 2.0149959564208983, "memory(GiB)": 72.85, "step": 75200, "token_acc": 0.556923076923077, "train_speed(iter/s)": 0.672603 }, { "epoch": 3.222012767233623, "grad_norm": 6.869063377380371, "learning_rate": 2.8122448383864908e-05, "loss": 1.9483259201049805, "memory(GiB)": 72.85, "step": 75205, "token_acc": 0.5061728395061729, "train_speed(iter/s)": 0.672612 }, { "epoch": 3.222226982562872, "grad_norm": 5.056821823120117, "learning_rate": 2.8116397213331806e-05, "loss": 1.9231613159179688, "memory(GiB)": 72.85, "step": 75210, "token_acc": 0.5738255033557047, "train_speed(iter/s)": 0.672612 }, { "epoch": 3.2224411978921212, "grad_norm": 7.16187858581543, "learning_rate": 2.8110346439240675e-05, "loss": 2.0113061904907226, "memory(GiB)": 72.85, "step": 75215, "token_acc": 0.5427509293680297, "train_speed(iter/s)": 0.67262 }, { "epoch": 3.22265541322137, "grad_norm": 7.549469947814941, "learning_rate": 2.8104296061701173e-05, "loss": 1.734454345703125, "memory(GiB)": 72.85, "step": 75220, "token_acc": 0.5777777777777777, "train_speed(iter/s)": 0.67263 }, { "epoch": 3.222869628550619, "grad_norm": 5.279513835906982, "learning_rate": 2.809824608082289e-05, "loss": 2.081013870239258, "memory(GiB)": 72.85, "step": 75225, "token_acc": 0.5427509293680297, "train_speed(iter/s)": 0.672633 }, { "epoch": 3.223083843879868, "grad_norm": 5.957446575164795, "learning_rate": 2.809219649671542e-05, "loss": 2.0546747207641602, "memory(GiB)": 72.85, "step": 75230, "token_acc": 0.5608856088560885, "train_speed(iter/s)": 0.672634 }, { "epoch": 3.223298059209117, "grad_norm": 7.955948829650879, "learning_rate": 2.8086147309488364e-05, "loss": 2.143322563171387, "memory(GiB)": 72.85, "step": 75235, "token_acc": 0.5196850393700787, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.2235122745383658, "grad_norm": 5.632909297943115, "learning_rate": 2.8080098519251285e-05, "loss": 2.287617492675781, "memory(GiB)": 72.85, "step": 75240, "token_acc": 0.5051903114186851, "train_speed(iter/s)": 0.672639 }, { "epoch": 3.223726489867615, "grad_norm": 14.113860130310059, "learning_rate": 2.8074050126113803e-05, "loss": 1.9645151138305663, "memory(GiB)": 72.85, "step": 75245, "token_acc": 0.5751633986928104, "train_speed(iter/s)": 0.672647 }, { "epoch": 3.223940705196864, "grad_norm": 4.567779064178467, "learning_rate": 2.8068002130185466e-05, "loss": 2.0000099182128905, "memory(GiB)": 72.85, "step": 75250, "token_acc": 0.5598591549295775, "train_speed(iter/s)": 0.672654 }, { "epoch": 3.2241549205261126, "grad_norm": 5.129379749298096, "learning_rate": 2.8061954531575852e-05, "loss": 2.427674102783203, "memory(GiB)": 72.85, "step": 75255, "token_acc": 0.5096774193548387, "train_speed(iter/s)": 0.672658 }, { "epoch": 3.224369135855362, "grad_norm": 5.262463569641113, "learning_rate": 2.8055907330394514e-05, "loss": 2.2453615188598635, "memory(GiB)": 72.85, "step": 75260, "token_acc": 0.5375, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.2245833511846107, "grad_norm": 6.309487819671631, "learning_rate": 2.804986052675097e-05, "loss": 1.9603219985961915, "memory(GiB)": 72.85, "step": 75265, "token_acc": 0.5688073394495413, "train_speed(iter/s)": 0.672648 }, { "epoch": 3.2247975665138595, "grad_norm": 6.958639621734619, "learning_rate": 2.8043814120754825e-05, "loss": 2.0555152893066406, "memory(GiB)": 72.85, "step": 75270, "token_acc": 0.5338345864661654, "train_speed(iter/s)": 0.672639 }, { "epoch": 3.2250117818431088, "grad_norm": 6.1221418380737305, "learning_rate": 2.8037768112515554e-05, "loss": 2.014880561828613, "memory(GiB)": 72.85, "step": 75275, "token_acc": 0.5428571428571428, "train_speed(iter/s)": 0.672649 }, { "epoch": 3.2252259971723576, "grad_norm": 5.67822790145874, "learning_rate": 2.8031722502142742e-05, "loss": 2.1977495193481444, "memory(GiB)": 72.85, "step": 75280, "token_acc": 0.4984423676012461, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.2254402125016064, "grad_norm": 5.60606575012207, "learning_rate": 2.8025677289745884e-05, "loss": 2.138569641113281, "memory(GiB)": 72.85, "step": 75285, "token_acc": 0.5205479452054794, "train_speed(iter/s)": 0.672664 }, { "epoch": 3.2256544278308557, "grad_norm": 5.798521518707275, "learning_rate": 2.8019632475434494e-05, "loss": 2.1530784606933593, "memory(GiB)": 72.85, "step": 75290, "token_acc": 0.5051194539249146, "train_speed(iter/s)": 0.67266 }, { "epoch": 3.2258686431601045, "grad_norm": 6.9919233322143555, "learning_rate": 2.801358805931808e-05, "loss": 2.0676204681396486, "memory(GiB)": 72.85, "step": 75295, "token_acc": 0.5408560311284046, "train_speed(iter/s)": 0.672664 }, { "epoch": 3.2260828584893533, "grad_norm": 7.4893879890441895, "learning_rate": 2.800754404150612e-05, "loss": 2.1790136337280273, "memory(GiB)": 72.85, "step": 75300, "token_acc": 0.5117845117845118, "train_speed(iter/s)": 0.67265 }, { "epoch": 3.2262970738186025, "grad_norm": 5.754085063934326, "learning_rate": 2.8001500422108162e-05, "loss": 2.123111915588379, "memory(GiB)": 72.85, "step": 75305, "token_acc": 0.5464684014869888, "train_speed(iter/s)": 0.672644 }, { "epoch": 3.2265112891478513, "grad_norm": 5.806827545166016, "learning_rate": 2.7995457201233648e-05, "loss": 2.1467426300048826, "memory(GiB)": 72.85, "step": 75310, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.672648 }, { "epoch": 3.2267255044771, "grad_norm": 5.610858917236328, "learning_rate": 2.7989414378992073e-05, "loss": 2.400162124633789, "memory(GiB)": 72.85, "step": 75315, "token_acc": 0.5194805194805194, "train_speed(iter/s)": 0.672636 }, { "epoch": 3.2269397198063494, "grad_norm": 5.3852009773254395, "learning_rate": 2.798337195549291e-05, "loss": 1.7595495223999023, "memory(GiB)": 72.85, "step": 75320, "token_acc": 0.5774058577405857, "train_speed(iter/s)": 0.67264 }, { "epoch": 3.2271539351355982, "grad_norm": 7.026219367980957, "learning_rate": 2.7977329930845587e-05, "loss": 1.8448020935058593, "memory(GiB)": 72.85, "step": 75325, "token_acc": 0.5855513307984791, "train_speed(iter/s)": 0.672645 }, { "epoch": 3.227368150464847, "grad_norm": 5.363926410675049, "learning_rate": 2.7971288305159616e-05, "loss": 2.3306964874267577, "memory(GiB)": 72.85, "step": 75330, "token_acc": 0.5064935064935064, "train_speed(iter/s)": 0.672645 }, { "epoch": 3.2275823657940963, "grad_norm": 7.475751876831055, "learning_rate": 2.796524707854442e-05, "loss": 2.2811744689941404, "memory(GiB)": 72.85, "step": 75335, "token_acc": 0.5142857142857142, "train_speed(iter/s)": 0.672635 }, { "epoch": 3.227796581123345, "grad_norm": 7.382194995880127, "learning_rate": 2.7959206251109427e-05, "loss": 2.1076330184936523, "memory(GiB)": 72.85, "step": 75340, "token_acc": 0.5141843971631206, "train_speed(iter/s)": 0.672645 }, { "epoch": 3.228010796452594, "grad_norm": 6.895776748657227, "learning_rate": 2.7953165822964113e-05, "loss": 2.2341293334960937, "memory(GiB)": 72.85, "step": 75345, "token_acc": 0.49544072948328266, "train_speed(iter/s)": 0.67265 }, { "epoch": 3.228225011781843, "grad_norm": 6.025691509246826, "learning_rate": 2.7947125794217872e-05, "loss": 2.3218284606933595, "memory(GiB)": 72.85, "step": 75350, "token_acc": 0.5190311418685121, "train_speed(iter/s)": 0.672644 }, { "epoch": 3.228439227111092, "grad_norm": 5.421307563781738, "learning_rate": 2.7941086164980135e-05, "loss": 1.9120073318481445, "memory(GiB)": 72.85, "step": 75355, "token_acc": 0.5612648221343873, "train_speed(iter/s)": 0.67265 }, { "epoch": 3.228653442440341, "grad_norm": 6.097388744354248, "learning_rate": 2.7935046935360298e-05, "loss": 1.8132999420166016, "memory(GiB)": 72.85, "step": 75360, "token_acc": 0.5703703703703704, "train_speed(iter/s)": 0.672656 }, { "epoch": 3.22886765776959, "grad_norm": 5.545856952667236, "learning_rate": 2.7929008105467808e-05, "loss": 2.0067676544189452, "memory(GiB)": 72.85, "step": 75365, "token_acc": 0.575107296137339, "train_speed(iter/s)": 0.67267 }, { "epoch": 3.229081873098839, "grad_norm": 5.0166916847229, "learning_rate": 2.7922969675412036e-05, "loss": 2.217352294921875, "memory(GiB)": 72.85, "step": 75370, "token_acc": 0.49830508474576274, "train_speed(iter/s)": 0.67267 }, { "epoch": 3.2292960884280877, "grad_norm": 7.257055282592773, "learning_rate": 2.791693164530238e-05, "loss": 1.8862829208374023, "memory(GiB)": 72.85, "step": 75375, "token_acc": 0.554006968641115, "train_speed(iter/s)": 0.67267 }, { "epoch": 3.229510303757337, "grad_norm": 6.341777801513672, "learning_rate": 2.7910894015248224e-05, "loss": 2.377917671203613, "memory(GiB)": 72.85, "step": 75380, "token_acc": 0.4882943143812709, "train_speed(iter/s)": 0.672664 }, { "epoch": 3.2297245190865858, "grad_norm": 5.180856704711914, "learning_rate": 2.7904856785358924e-05, "loss": 2.4161457061767577, "memory(GiB)": 72.85, "step": 75385, "token_acc": 0.4962121212121212, "train_speed(iter/s)": 0.672665 }, { "epoch": 3.2299387344158346, "grad_norm": 6.379793643951416, "learning_rate": 2.7898819955743886e-05, "loss": 2.100288963317871, "memory(GiB)": 72.85, "step": 75390, "token_acc": 0.5767790262172284, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.230152949745084, "grad_norm": 5.711638927459717, "learning_rate": 2.7892783526512463e-05, "loss": 2.0217071533203126, "memory(GiB)": 72.85, "step": 75395, "token_acc": 0.5421245421245421, "train_speed(iter/s)": 0.672643 }, { "epoch": 3.2303671650743326, "grad_norm": 6.231622695922852, "learning_rate": 2.7886747497774003e-05, "loss": 1.67655029296875, "memory(GiB)": 72.85, "step": 75400, "token_acc": 0.5723905723905723, "train_speed(iter/s)": 0.672649 }, { "epoch": 3.2305813804035814, "grad_norm": 9.02505111694336, "learning_rate": 2.7880711869637853e-05, "loss": 2.1540454864501952, "memory(GiB)": 72.85, "step": 75405, "token_acc": 0.5311355311355311, "train_speed(iter/s)": 0.672656 }, { "epoch": 3.2307955957328307, "grad_norm": 6.614160060882568, "learning_rate": 2.7874676642213337e-05, "loss": 1.8395841598510743, "memory(GiB)": 72.85, "step": 75410, "token_acc": 0.5634920634920635, "train_speed(iter/s)": 0.67266 }, { "epoch": 3.2310098110620795, "grad_norm": 5.73193883895874, "learning_rate": 2.786864181560983e-05, "loss": 2.4415273666381836, "memory(GiB)": 72.85, "step": 75415, "token_acc": 0.45454545454545453, "train_speed(iter/s)": 0.672663 }, { "epoch": 3.2312240263913283, "grad_norm": 5.467795372009277, "learning_rate": 2.7862607389936617e-05, "loss": 2.1495243072509767, "memory(GiB)": 72.85, "step": 75420, "token_acc": 0.5261538461538462, "train_speed(iter/s)": 0.672661 }, { "epoch": 3.2314382417205776, "grad_norm": 4.101316452026367, "learning_rate": 2.7856573365303063e-05, "loss": 2.2763645172119142, "memory(GiB)": 72.85, "step": 75425, "token_acc": 0.5070028011204482, "train_speed(iter/s)": 0.672657 }, { "epoch": 3.2316524570498264, "grad_norm": 4.941257476806641, "learning_rate": 2.785053974181845e-05, "loss": 2.503748321533203, "memory(GiB)": 72.85, "step": 75430, "token_acc": 0.48184818481848185, "train_speed(iter/s)": 0.672656 }, { "epoch": 3.231866672379075, "grad_norm": 4.452658653259277, "learning_rate": 2.7844506519592084e-05, "loss": 1.9677114486694336, "memory(GiB)": 72.85, "step": 75435, "token_acc": 0.5836177474402731, "train_speed(iter/s)": 0.672663 }, { "epoch": 3.2320808877083245, "grad_norm": 4.939054489135742, "learning_rate": 2.783847369873327e-05, "loss": 2.069997024536133, "memory(GiB)": 72.85, "step": 75440, "token_acc": 0.5222929936305732, "train_speed(iter/s)": 0.672665 }, { "epoch": 3.2322951030375733, "grad_norm": 4.303225517272949, "learning_rate": 2.7832441279351273e-05, "loss": 2.3254554748535154, "memory(GiB)": 72.85, "step": 75445, "token_acc": 0.5097402597402597, "train_speed(iter/s)": 0.672668 }, { "epoch": 3.2325093183668225, "grad_norm": 4.6100077629089355, "learning_rate": 2.782640926155542e-05, "loss": 2.0469627380371094, "memory(GiB)": 72.85, "step": 75450, "token_acc": 0.5277777777777778, "train_speed(iter/s)": 0.672669 }, { "epoch": 3.2327235336960713, "grad_norm": 6.31783390045166, "learning_rate": 2.7820377645454958e-05, "loss": 2.2915699005126955, "memory(GiB)": 72.85, "step": 75455, "token_acc": 0.5215686274509804, "train_speed(iter/s)": 0.672666 }, { "epoch": 3.23293774902532, "grad_norm": 6.311063766479492, "learning_rate": 2.7814346431159165e-05, "loss": 2.2442401885986327, "memory(GiB)": 72.85, "step": 75460, "token_acc": 0.5171232876712328, "train_speed(iter/s)": 0.672666 }, { "epoch": 3.2331519643545694, "grad_norm": 5.44314432144165, "learning_rate": 2.7808315618777304e-05, "loss": 1.8481605529785157, "memory(GiB)": 72.85, "step": 75465, "token_acc": 0.5502958579881657, "train_speed(iter/s)": 0.672676 }, { "epoch": 3.2333661796838182, "grad_norm": 6.682504653930664, "learning_rate": 2.7802285208418588e-05, "loss": 2.0280513763427734, "memory(GiB)": 72.85, "step": 75470, "token_acc": 0.5513307984790875, "train_speed(iter/s)": 0.672674 }, { "epoch": 3.233580395013067, "grad_norm": 5.467617511749268, "learning_rate": 2.7796255200192334e-05, "loss": 1.5926006317138672, "memory(GiB)": 72.85, "step": 75475, "token_acc": 0.5930735930735931, "train_speed(iter/s)": 0.672677 }, { "epoch": 3.2337946103423163, "grad_norm": 6.779176712036133, "learning_rate": 2.7790225594207736e-05, "loss": 2.075870323181152, "memory(GiB)": 72.85, "step": 75480, "token_acc": 0.5525291828793775, "train_speed(iter/s)": 0.672684 }, { "epoch": 3.234008825671565, "grad_norm": 5.7864670753479, "learning_rate": 2.778419639057402e-05, "loss": 1.8473941802978515, "memory(GiB)": 72.85, "step": 75485, "token_acc": 0.5652173913043478, "train_speed(iter/s)": 0.672695 }, { "epoch": 3.234223041000814, "grad_norm": 6.3326802253723145, "learning_rate": 2.7778167589400444e-05, "loss": 2.0264961242675783, "memory(GiB)": 72.85, "step": 75490, "token_acc": 0.5048543689320388, "train_speed(iter/s)": 0.672699 }, { "epoch": 3.234437256330063, "grad_norm": 8.189825057983398, "learning_rate": 2.777213919079621e-05, "loss": 2.239430046081543, "memory(GiB)": 72.85, "step": 75495, "token_acc": 0.5441696113074205, "train_speed(iter/s)": 0.672706 }, { "epoch": 3.234651471659312, "grad_norm": 4.681686878204346, "learning_rate": 2.7766111194870526e-05, "loss": 2.0247488021850586, "memory(GiB)": 72.85, "step": 75500, "token_acc": 0.50920245398773, "train_speed(iter/s)": 0.672705 }, { "epoch": 3.234651471659312, "eval_loss": 2.137127637863159, "eval_runtime": 15.7784, "eval_samples_per_second": 6.338, "eval_steps_per_second": 6.338, "eval_token_acc": 0.5174825174825175, "step": 75500 }, { "epoch": 3.234865686988561, "grad_norm": 7.113305568695068, "learning_rate": 2.7760083601732577e-05, "loss": 2.136520195007324, "memory(GiB)": 72.85, "step": 75505, "token_acc": 0.5198412698412699, "train_speed(iter/s)": 0.672584 }, { "epoch": 3.23507990231781, "grad_norm": 4.901050090789795, "learning_rate": 2.7754056411491593e-05, "loss": 1.8756023406982423, "memory(GiB)": 72.85, "step": 75510, "token_acc": 0.555205047318612, "train_speed(iter/s)": 0.672585 }, { "epoch": 3.235294117647059, "grad_norm": 6.2941999435424805, "learning_rate": 2.7748029624256745e-05, "loss": 2.239217758178711, "memory(GiB)": 72.85, "step": 75515, "token_acc": 0.476027397260274, "train_speed(iter/s)": 0.672566 }, { "epoch": 3.2355083329763077, "grad_norm": 5.780476093292236, "learning_rate": 2.7742003240137216e-05, "loss": 2.1879940032958984, "memory(GiB)": 72.85, "step": 75520, "token_acc": 0.5163636363636364, "train_speed(iter/s)": 0.672569 }, { "epoch": 3.235722548305557, "grad_norm": 6.071455478668213, "learning_rate": 2.7735977259242172e-05, "loss": 2.1204807281494142, "memory(GiB)": 72.85, "step": 75525, "token_acc": 0.5435435435435435, "train_speed(iter/s)": 0.67256 }, { "epoch": 3.2359367636348058, "grad_norm": 6.6635026931762695, "learning_rate": 2.7729951681680776e-05, "loss": 1.9074874877929688, "memory(GiB)": 72.85, "step": 75530, "token_acc": 0.5407407407407407, "train_speed(iter/s)": 0.672545 }, { "epoch": 3.2361509789640546, "grad_norm": 7.987886428833008, "learning_rate": 2.7723926507562215e-05, "loss": 2.2369548797607424, "memory(GiB)": 72.85, "step": 75535, "token_acc": 0.48717948717948717, "train_speed(iter/s)": 0.67255 }, { "epoch": 3.236365194293304, "grad_norm": 4.876842021942139, "learning_rate": 2.771790173699561e-05, "loss": 2.062304878234863, "memory(GiB)": 72.85, "step": 75540, "token_acc": 0.5522875816993464, "train_speed(iter/s)": 0.672546 }, { "epoch": 3.2365794096225526, "grad_norm": 5.83180570602417, "learning_rate": 2.7711877370090126e-05, "loss": 2.0478023529052733, "memory(GiB)": 72.85, "step": 75545, "token_acc": 0.5392857142857143, "train_speed(iter/s)": 0.672551 }, { "epoch": 3.2367936249518015, "grad_norm": 4.896720886230469, "learning_rate": 2.7705853406954896e-05, "loss": 1.8550174713134766, "memory(GiB)": 72.85, "step": 75550, "token_acc": 0.5766423357664233, "train_speed(iter/s)": 0.672554 }, { "epoch": 3.2370078402810507, "grad_norm": 5.574060440063477, "learning_rate": 2.769982984769902e-05, "loss": 2.15368709564209, "memory(GiB)": 72.85, "step": 75555, "token_acc": 0.5357142857142857, "train_speed(iter/s)": 0.672569 }, { "epoch": 3.2372220556102995, "grad_norm": 6.2610859870910645, "learning_rate": 2.7693806692431662e-05, "loss": 2.1520938873291016, "memory(GiB)": 72.85, "step": 75560, "token_acc": 0.4873417721518987, "train_speed(iter/s)": 0.672569 }, { "epoch": 3.2374362709395483, "grad_norm": 4.521472454071045, "learning_rate": 2.7687783941261903e-05, "loss": 1.920712661743164, "memory(GiB)": 72.85, "step": 75565, "token_acc": 0.6123778501628665, "train_speed(iter/s)": 0.672563 }, { "epoch": 3.2376504862687976, "grad_norm": 4.873570919036865, "learning_rate": 2.7681761594298895e-05, "loss": 2.217479133605957, "memory(GiB)": 72.85, "step": 75570, "token_acc": 0.5052631578947369, "train_speed(iter/s)": 0.67257 }, { "epoch": 3.2378647015980464, "grad_norm": 6.213076591491699, "learning_rate": 2.76757396516517e-05, "loss": 2.000621795654297, "memory(GiB)": 72.85, "step": 75575, "token_acc": 0.5179856115107914, "train_speed(iter/s)": 0.672582 }, { "epoch": 3.238078916927295, "grad_norm": 9.310952186584473, "learning_rate": 2.7669718113429434e-05, "loss": 2.4058664321899412, "memory(GiB)": 72.85, "step": 75580, "token_acc": 0.516245487364621, "train_speed(iter/s)": 0.672582 }, { "epoch": 3.2382931322565445, "grad_norm": 6.306857109069824, "learning_rate": 2.7663696979741167e-05, "loss": 1.830731201171875, "memory(GiB)": 72.85, "step": 75585, "token_acc": 0.6017191977077364, "train_speed(iter/s)": 0.672585 }, { "epoch": 3.2385073475857933, "grad_norm": 4.628521919250488, "learning_rate": 2.7657676250695964e-05, "loss": 1.7563873291015626, "memory(GiB)": 72.85, "step": 75590, "token_acc": 0.5665236051502146, "train_speed(iter/s)": 0.672571 }, { "epoch": 3.238721562915042, "grad_norm": 7.669726371765137, "learning_rate": 2.765165592640293e-05, "loss": 2.2410160064697267, "memory(GiB)": 72.85, "step": 75595, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.67257 }, { "epoch": 3.2389357782442914, "grad_norm": 6.950193405151367, "learning_rate": 2.7645636006971122e-05, "loss": 1.7637744903564454, "memory(GiB)": 72.85, "step": 75600, "token_acc": 0.5805243445692884, "train_speed(iter/s)": 0.67257 }, { "epoch": 3.23914999357354, "grad_norm": 4.9039530754089355, "learning_rate": 2.763961649250958e-05, "loss": 2.016330909729004, "memory(GiB)": 72.85, "step": 75605, "token_acc": 0.5792880258899676, "train_speed(iter/s)": 0.672578 }, { "epoch": 3.239364208902789, "grad_norm": 6.2689127922058105, "learning_rate": 2.7633597383127362e-05, "loss": 2.0550331115722655, "memory(GiB)": 72.85, "step": 75610, "token_acc": 0.5, "train_speed(iter/s)": 0.67258 }, { "epoch": 3.2395784242320382, "grad_norm": 7.397874355316162, "learning_rate": 2.762757867893349e-05, "loss": 1.9598096847534179, "memory(GiB)": 72.85, "step": 75615, "token_acc": 0.5761194029850746, "train_speed(iter/s)": 0.672588 }, { "epoch": 3.239792639561287, "grad_norm": 5.939125061035156, "learning_rate": 2.7621560380037036e-05, "loss": 2.2005025863647463, "memory(GiB)": 72.85, "step": 75620, "token_acc": 0.4766666666666667, "train_speed(iter/s)": 0.672577 }, { "epoch": 3.240006854890536, "grad_norm": 4.810330867767334, "learning_rate": 2.761554248654701e-05, "loss": 2.220943641662598, "memory(GiB)": 72.85, "step": 75625, "token_acc": 0.5160256410256411, "train_speed(iter/s)": 0.672567 }, { "epoch": 3.240221070219785, "grad_norm": 7.005712985992432, "learning_rate": 2.7609524998572433e-05, "loss": 1.7996755599975587, "memory(GiB)": 72.85, "step": 75630, "token_acc": 0.5604395604395604, "train_speed(iter/s)": 0.672565 }, { "epoch": 3.240435285549034, "grad_norm": 6.421493053436279, "learning_rate": 2.760350791622229e-05, "loss": 2.1061553955078125, "memory(GiB)": 72.85, "step": 75635, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.672566 }, { "epoch": 3.2406495008782827, "grad_norm": 6.912929534912109, "learning_rate": 2.7597491239605634e-05, "loss": 2.3138561248779297, "memory(GiB)": 72.85, "step": 75640, "token_acc": 0.5246478873239436, "train_speed(iter/s)": 0.672566 }, { "epoch": 3.240863716207532, "grad_norm": 6.299856662750244, "learning_rate": 2.7591474968831434e-05, "loss": 2.2989511489868164, "memory(GiB)": 72.85, "step": 75645, "token_acc": 0.5578231292517006, "train_speed(iter/s)": 0.672576 }, { "epoch": 3.241077931536781, "grad_norm": 3.8197813034057617, "learning_rate": 2.758545910400867e-05, "loss": 2.242518997192383, "memory(GiB)": 72.85, "step": 75650, "token_acc": 0.511864406779661, "train_speed(iter/s)": 0.672575 }, { "epoch": 3.2412921468660296, "grad_norm": 5.353216648101807, "learning_rate": 2.757944364524636e-05, "loss": 2.200457763671875, "memory(GiB)": 72.85, "step": 75655, "token_acc": 0.5686274509803921, "train_speed(iter/s)": 0.672582 }, { "epoch": 3.241506362195279, "grad_norm": 5.613381385803223, "learning_rate": 2.7573428592653462e-05, "loss": 2.1147972106933595, "memory(GiB)": 72.85, "step": 75660, "token_acc": 0.523943661971831, "train_speed(iter/s)": 0.672576 }, { "epoch": 3.2417205775245277, "grad_norm": 5.04160737991333, "learning_rate": 2.7567413946338943e-05, "loss": 2.1540817260742187, "memory(GiB)": 72.85, "step": 75665, "token_acc": 0.5578947368421052, "train_speed(iter/s)": 0.672575 }, { "epoch": 3.2419347928537765, "grad_norm": 5.7286458015441895, "learning_rate": 2.756139970641176e-05, "loss": 2.1696529388427734, "memory(GiB)": 72.85, "step": 75670, "token_acc": 0.5108695652173914, "train_speed(iter/s)": 0.672575 }, { "epoch": 3.2421490081830258, "grad_norm": 6.775607109069824, "learning_rate": 2.7555385872980856e-05, "loss": 2.508247184753418, "memory(GiB)": 72.85, "step": 75675, "token_acc": 0.51, "train_speed(iter/s)": 0.672575 }, { "epoch": 3.2423632235122746, "grad_norm": 4.730298042297363, "learning_rate": 2.754937244615521e-05, "loss": 1.8560325622558593, "memory(GiB)": 72.85, "step": 75680, "token_acc": 0.5891238670694864, "train_speed(iter/s)": 0.672565 }, { "epoch": 3.2425774388415234, "grad_norm": 5.021498680114746, "learning_rate": 2.754335942604374e-05, "loss": 2.1657859802246096, "memory(GiB)": 72.85, "step": 75685, "token_acc": 0.5444444444444444, "train_speed(iter/s)": 0.672567 }, { "epoch": 3.2427916541707726, "grad_norm": 5.2237091064453125, "learning_rate": 2.753734681275538e-05, "loss": 2.1478734970092774, "memory(GiB)": 72.85, "step": 75690, "token_acc": 0.5525291828793775, "train_speed(iter/s)": 0.672563 }, { "epoch": 3.2430058695000215, "grad_norm": 4.396772861480713, "learning_rate": 2.7531334606399057e-05, "loss": 2.189606475830078, "memory(GiB)": 72.85, "step": 75695, "token_acc": 0.513677811550152, "train_speed(iter/s)": 0.672553 }, { "epoch": 3.2432200848292703, "grad_norm": 7.067743301391602, "learning_rate": 2.7525322807083664e-05, "loss": 1.7339727401733398, "memory(GiB)": 72.85, "step": 75700, "token_acc": 0.5779816513761468, "train_speed(iter/s)": 0.67255 }, { "epoch": 3.2434343001585195, "grad_norm": 5.288275718688965, "learning_rate": 2.7519311414918147e-05, "loss": 2.0356979370117188, "memory(GiB)": 72.85, "step": 75705, "token_acc": 0.5498392282958199, "train_speed(iter/s)": 0.672556 }, { "epoch": 3.2436485154877683, "grad_norm": 5.4627532958984375, "learning_rate": 2.751330043001138e-05, "loss": 2.217751884460449, "memory(GiB)": 72.85, "step": 75710, "token_acc": 0.524822695035461, "train_speed(iter/s)": 0.672565 }, { "epoch": 3.243862730817017, "grad_norm": 5.458895206451416, "learning_rate": 2.7507289852472284e-05, "loss": 2.102911949157715, "memory(GiB)": 72.85, "step": 75715, "token_acc": 0.555921052631579, "train_speed(iter/s)": 0.672573 }, { "epoch": 3.2440769461462664, "grad_norm": 5.945850849151611, "learning_rate": 2.7501279682409735e-05, "loss": 1.9373285293579101, "memory(GiB)": 72.85, "step": 75720, "token_acc": 0.5728476821192053, "train_speed(iter/s)": 0.672572 }, { "epoch": 3.244291161475515, "grad_norm": 5.493580341339111, "learning_rate": 2.749526991993261e-05, "loss": 2.136832618713379, "memory(GiB)": 72.85, "step": 75725, "token_acc": 0.5094339622641509, "train_speed(iter/s)": 0.67257 }, { "epoch": 3.244505376804764, "grad_norm": 6.014963150024414, "learning_rate": 2.7489260565149783e-05, "loss": 2.047388458251953, "memory(GiB)": 72.85, "step": 75730, "token_acc": 0.5317919075144508, "train_speed(iter/s)": 0.672567 }, { "epoch": 3.2447195921340133, "grad_norm": 4.768183708190918, "learning_rate": 2.748325161817009e-05, "loss": 2.2638666152954103, "memory(GiB)": 72.85, "step": 75735, "token_acc": 0.5203761755485894, "train_speed(iter/s)": 0.672568 }, { "epoch": 3.244933807463262, "grad_norm": 8.762370109558105, "learning_rate": 2.7477243079102445e-05, "loss": 1.9649774551391601, "memory(GiB)": 72.85, "step": 75740, "token_acc": 0.5654952076677316, "train_speed(iter/s)": 0.672573 }, { "epoch": 3.245148022792511, "grad_norm": 5.230297088623047, "learning_rate": 2.747123494805567e-05, "loss": 1.955467987060547, "memory(GiB)": 72.85, "step": 75745, "token_acc": 0.5608108108108109, "train_speed(iter/s)": 0.672589 }, { "epoch": 3.24536223812176, "grad_norm": 6.277017593383789, "learning_rate": 2.7465227225138603e-05, "loss": 1.8421638488769532, "memory(GiB)": 72.85, "step": 75750, "token_acc": 0.5945121951219512, "train_speed(iter/s)": 0.672587 }, { "epoch": 3.245576453451009, "grad_norm": 6.220486164093018, "learning_rate": 2.7459219910460084e-05, "loss": 2.0260847091674803, "memory(GiB)": 72.85, "step": 75755, "token_acc": 0.5425531914893617, "train_speed(iter/s)": 0.672585 }, { "epoch": 3.245790668780258, "grad_norm": 7.375682830810547, "learning_rate": 2.745321300412892e-05, "loss": 2.058018684387207, "memory(GiB)": 72.85, "step": 75760, "token_acc": 0.5774647887323944, "train_speed(iter/s)": 0.672598 }, { "epoch": 3.246004884109507, "grad_norm": 6.5482282638549805, "learning_rate": 2.7447206506253974e-05, "loss": 2.3479902267456056, "memory(GiB)": 72.85, "step": 75765, "token_acc": 0.5280898876404494, "train_speed(iter/s)": 0.672611 }, { "epoch": 3.246219099438756, "grad_norm": 5.649763584136963, "learning_rate": 2.7441200416944048e-05, "loss": 2.0422428131103514, "memory(GiB)": 72.85, "step": 75770, "token_acc": 0.5741444866920152, "train_speed(iter/s)": 0.672617 }, { "epoch": 3.2464333147680047, "grad_norm": 5.958280563354492, "learning_rate": 2.7435194736307924e-05, "loss": 1.940190887451172, "memory(GiB)": 72.85, "step": 75775, "token_acc": 0.5203761755485894, "train_speed(iter/s)": 0.672598 }, { "epoch": 3.246647530097254, "grad_norm": 6.891191482543945, "learning_rate": 2.7429189464454418e-05, "loss": 2.0861677169799804, "memory(GiB)": 72.85, "step": 75780, "token_acc": 0.5535168195718655, "train_speed(iter/s)": 0.672601 }, { "epoch": 3.2468617454265027, "grad_norm": 8.311568260192871, "learning_rate": 2.7423184601492304e-05, "loss": 2.056314468383789, "memory(GiB)": 72.85, "step": 75785, "token_acc": 0.5527156549520766, "train_speed(iter/s)": 0.672607 }, { "epoch": 3.2470759607557516, "grad_norm": 5.797309875488281, "learning_rate": 2.7417180147530396e-05, "loss": 2.628848648071289, "memory(GiB)": 72.85, "step": 75790, "token_acc": 0.504885993485342, "train_speed(iter/s)": 0.672611 }, { "epoch": 3.247290176085001, "grad_norm": 6.130356311798096, "learning_rate": 2.741117610267744e-05, "loss": 2.339076614379883, "memory(GiB)": 72.85, "step": 75795, "token_acc": 0.5123456790123457, "train_speed(iter/s)": 0.672617 }, { "epoch": 3.2475043914142496, "grad_norm": 5.37648344039917, "learning_rate": 2.7405172467042227e-05, "loss": 1.8994003295898438, "memory(GiB)": 72.85, "step": 75800, "token_acc": 0.5604026845637584, "train_speed(iter/s)": 0.672618 }, { "epoch": 3.2477186067434984, "grad_norm": 4.987094402313232, "learning_rate": 2.7399169240733525e-05, "loss": 2.0371883392333983, "memory(GiB)": 72.85, "step": 75805, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.672622 }, { "epoch": 3.2479328220727477, "grad_norm": 7.562342166900635, "learning_rate": 2.739316642386006e-05, "loss": 1.9646621704101563, "memory(GiB)": 72.85, "step": 75810, "token_acc": 0.5676567656765676, "train_speed(iter/s)": 0.672625 }, { "epoch": 3.2481470374019965, "grad_norm": 5.428618431091309, "learning_rate": 2.73871640165306e-05, "loss": 2.4086536407470702, "memory(GiB)": 72.85, "step": 75815, "token_acc": 0.5238095238095238, "train_speed(iter/s)": 0.672634 }, { "epoch": 3.2483612527312453, "grad_norm": 5.409314155578613, "learning_rate": 2.7381162018853856e-05, "loss": 1.976824951171875, "memory(GiB)": 72.85, "step": 75820, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.672643 }, { "epoch": 3.2485754680604946, "grad_norm": 5.144061088562012, "learning_rate": 2.7375160430938595e-05, "loss": 1.9955350875854492, "memory(GiB)": 72.85, "step": 75825, "token_acc": 0.5125448028673835, "train_speed(iter/s)": 0.672644 }, { "epoch": 3.2487896833897434, "grad_norm": 6.333273887634277, "learning_rate": 2.7369159252893528e-05, "loss": 2.0715564727783202, "memory(GiB)": 72.85, "step": 75830, "token_acc": 0.5662650602409639, "train_speed(iter/s)": 0.672642 }, { "epoch": 3.249003898718992, "grad_norm": 5.527255535125732, "learning_rate": 2.736315848482738e-05, "loss": 2.2722309112548826, "memory(GiB)": 72.85, "step": 75835, "token_acc": 0.54, "train_speed(iter/s)": 0.672642 }, { "epoch": 3.2492181140482415, "grad_norm": 10.588712692260742, "learning_rate": 2.7357158126848837e-05, "loss": 2.125989532470703, "memory(GiB)": 72.85, "step": 75840, "token_acc": 0.5446428571428571, "train_speed(iter/s)": 0.67263 }, { "epoch": 3.2494323293774903, "grad_norm": 6.7360639572143555, "learning_rate": 2.7351158179066606e-05, "loss": 2.171347427368164, "memory(GiB)": 72.85, "step": 75845, "token_acc": 0.5196078431372549, "train_speed(iter/s)": 0.672616 }, { "epoch": 3.249646544706739, "grad_norm": 6.226277828216553, "learning_rate": 2.7345158641589408e-05, "loss": 2.082914924621582, "memory(GiB)": 72.85, "step": 75850, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.67262 }, { "epoch": 3.2498607600359883, "grad_norm": 5.957159996032715, "learning_rate": 2.7339159514525893e-05, "loss": 2.00655517578125, "memory(GiB)": 72.85, "step": 75855, "token_acc": 0.5525423728813559, "train_speed(iter/s)": 0.672628 }, { "epoch": 3.250074975365237, "grad_norm": 6.55410099029541, "learning_rate": 2.733316079798478e-05, "loss": 2.1815900802612305, "memory(GiB)": 72.85, "step": 75860, "token_acc": 0.5212765957446809, "train_speed(iter/s)": 0.672634 }, { "epoch": 3.250289190694486, "grad_norm": 4.569135665893555, "learning_rate": 2.732716249207472e-05, "loss": 2.2360591888427734, "memory(GiB)": 72.85, "step": 75865, "token_acc": 0.5193798449612403, "train_speed(iter/s)": 0.672639 }, { "epoch": 3.250503406023735, "grad_norm": 5.168656349182129, "learning_rate": 2.732116459690438e-05, "loss": 2.1184839248657226, "memory(GiB)": 72.85, "step": 75870, "token_acc": 0.5182926829268293, "train_speed(iter/s)": 0.672642 }, { "epoch": 3.250717621352984, "grad_norm": 6.162555694580078, "learning_rate": 2.7315167112582424e-05, "loss": 2.133525848388672, "memory(GiB)": 72.85, "step": 75875, "token_acc": 0.516260162601626, "train_speed(iter/s)": 0.672644 }, { "epoch": 3.250931836682233, "grad_norm": 5.752391815185547, "learning_rate": 2.730917003921747e-05, "loss": 2.0435297012329103, "memory(GiB)": 72.85, "step": 75880, "token_acc": 0.5341246290801187, "train_speed(iter/s)": 0.672641 }, { "epoch": 3.251146052011482, "grad_norm": 5.984940528869629, "learning_rate": 2.7303173376918212e-05, "loss": 2.1270790100097656, "memory(GiB)": 72.85, "step": 75885, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.672642 }, { "epoch": 3.251360267340731, "grad_norm": 6.222224235534668, "learning_rate": 2.7297177125793257e-05, "loss": 2.134827995300293, "memory(GiB)": 72.85, "step": 75890, "token_acc": 0.4963235294117647, "train_speed(iter/s)": 0.672642 }, { "epoch": 3.2515744826699797, "grad_norm": 4.922737121582031, "learning_rate": 2.7291181285951227e-05, "loss": 1.8130626678466797, "memory(GiB)": 72.85, "step": 75895, "token_acc": 0.6007604562737643, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.251788697999229, "grad_norm": 4.428775787353516, "learning_rate": 2.728518585750076e-05, "loss": 2.025950050354004, "memory(GiB)": 72.85, "step": 75900, "token_acc": 0.5575539568345323, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.252002913328478, "grad_norm": 7.172867774963379, "learning_rate": 2.727919084055044e-05, "loss": 2.0232261657714843, "memory(GiB)": 72.85, "step": 75905, "token_acc": 0.5132450331125827, "train_speed(iter/s)": 0.672643 }, { "epoch": 3.2522171286577266, "grad_norm": 7.870923042297363, "learning_rate": 2.7273196235208907e-05, "loss": 2.066594123840332, "memory(GiB)": 72.85, "step": 75910, "token_acc": 0.5593869731800766, "train_speed(iter/s)": 0.672641 }, { "epoch": 3.252431343986976, "grad_norm": 6.666689395904541, "learning_rate": 2.7267202041584748e-05, "loss": 2.170262908935547, "memory(GiB)": 72.85, "step": 75915, "token_acc": 0.546875, "train_speed(iter/s)": 0.67265 }, { "epoch": 3.2526455593162247, "grad_norm": 6.7725348472595215, "learning_rate": 2.726120825978655e-05, "loss": 2.256632614135742, "memory(GiB)": 72.85, "step": 75920, "token_acc": 0.509090909090909, "train_speed(iter/s)": 0.67265 }, { "epoch": 3.2528597746454735, "grad_norm": 5.0864667892456055, "learning_rate": 2.7255214889922892e-05, "loss": 1.9716756820678711, "memory(GiB)": 72.85, "step": 75925, "token_acc": 0.568904593639576, "train_speed(iter/s)": 0.672649 }, { "epoch": 3.2530739899747227, "grad_norm": 6.834656238555908, "learning_rate": 2.724922193210233e-05, "loss": 1.8095151901245117, "memory(GiB)": 72.85, "step": 75930, "token_acc": 0.590443686006826, "train_speed(iter/s)": 0.672658 }, { "epoch": 3.2532882053039716, "grad_norm": 6.233514785766602, "learning_rate": 2.7243229386433476e-05, "loss": 1.9598634719848633, "memory(GiB)": 72.85, "step": 75935, "token_acc": 0.5377358490566038, "train_speed(iter/s)": 0.672653 }, { "epoch": 3.2535024206332204, "grad_norm": 4.408251762390137, "learning_rate": 2.7237237253024854e-05, "loss": 2.0701730728149412, "memory(GiB)": 72.85, "step": 75940, "token_acc": 0.5322033898305085, "train_speed(iter/s)": 0.672646 }, { "epoch": 3.2537166359624696, "grad_norm": 5.4877119064331055, "learning_rate": 2.723124553198505e-05, "loss": 2.0863203048706054, "memory(GiB)": 72.85, "step": 75945, "token_acc": 0.528052805280528, "train_speed(iter/s)": 0.672649 }, { "epoch": 3.2539308512917184, "grad_norm": 7.018387317657471, "learning_rate": 2.722525422342259e-05, "loss": 2.1528072357177734, "memory(GiB)": 72.85, "step": 75950, "token_acc": 0.534965034965035, "train_speed(iter/s)": 0.672654 }, { "epoch": 3.2541450666209673, "grad_norm": 6.910305976867676, "learning_rate": 2.721926332744602e-05, "loss": 2.009551239013672, "memory(GiB)": 72.85, "step": 75955, "token_acc": 0.540650406504065, "train_speed(iter/s)": 0.672661 }, { "epoch": 3.2543592819502165, "grad_norm": 6.328810214996338, "learning_rate": 2.7213272844163863e-05, "loss": 2.0034637451171875, "memory(GiB)": 72.85, "step": 75960, "token_acc": 0.534965034965035, "train_speed(iter/s)": 0.672665 }, { "epoch": 3.2545734972794653, "grad_norm": 6.118366718292236, "learning_rate": 2.7207282773684617e-05, "loss": 2.3020309448242187, "memory(GiB)": 72.85, "step": 75965, "token_acc": 0.5078125, "train_speed(iter/s)": 0.672665 }, { "epoch": 3.254787712608714, "grad_norm": 4.6966633796691895, "learning_rate": 2.720129311611685e-05, "loss": 2.157938766479492, "memory(GiB)": 72.85, "step": 75970, "token_acc": 0.528957528957529, "train_speed(iter/s)": 0.672678 }, { "epoch": 3.2550019279379634, "grad_norm": 5.242419242858887, "learning_rate": 2.7195303871569044e-05, "loss": 2.3550769805908205, "memory(GiB)": 72.85, "step": 75975, "token_acc": 0.5179153094462541, "train_speed(iter/s)": 0.672686 }, { "epoch": 3.255216143267212, "grad_norm": 5.486394882202148, "learning_rate": 2.71893150401497e-05, "loss": 2.3055259704589846, "memory(GiB)": 72.85, "step": 75980, "token_acc": 0.48148148148148145, "train_speed(iter/s)": 0.672692 }, { "epoch": 3.255430358596461, "grad_norm": 5.924442291259766, "learning_rate": 2.7183326621967313e-05, "loss": 2.019839286804199, "memory(GiB)": 72.85, "step": 75985, "token_acc": 0.5506756756756757, "train_speed(iter/s)": 0.672694 }, { "epoch": 3.2556445739257103, "grad_norm": 5.525440216064453, "learning_rate": 2.717733861713035e-05, "loss": 1.912013053894043, "memory(GiB)": 72.85, "step": 75990, "token_acc": 0.5904059040590406, "train_speed(iter/s)": 0.672694 }, { "epoch": 3.255858789254959, "grad_norm": 10.512439727783203, "learning_rate": 2.7171351025747326e-05, "loss": 2.3668758392333986, "memory(GiB)": 72.85, "step": 75995, "token_acc": 0.5397489539748954, "train_speed(iter/s)": 0.672702 }, { "epoch": 3.256073004584208, "grad_norm": 5.554982662200928, "learning_rate": 2.7165363847926694e-05, "loss": 2.3293582916259767, "memory(GiB)": 72.85, "step": 76000, "token_acc": 0.49554896142433236, "train_speed(iter/s)": 0.672698 }, { "epoch": 3.256073004584208, "eval_loss": 1.9251514673233032, "eval_runtime": 14.5604, "eval_samples_per_second": 6.868, "eval_steps_per_second": 6.868, "eval_token_acc": 0.5129310344827587, "step": 76000 }, { "epoch": 3.256287219913457, "grad_norm": 5.003957748413086, "learning_rate": 2.7159377083776895e-05, "loss": 2.2425474166870116, "memory(GiB)": 72.85, "step": 76005, "token_acc": 0.5113300492610837, "train_speed(iter/s)": 0.672586 }, { "epoch": 3.256501435242706, "grad_norm": 6.827089786529541, "learning_rate": 2.7153390733406424e-05, "loss": 2.2306650161743162, "memory(GiB)": 72.85, "step": 76010, "token_acc": 0.49310344827586206, "train_speed(iter/s)": 0.672586 }, { "epoch": 3.256715650571955, "grad_norm": 4.313132286071777, "learning_rate": 2.7147404796923716e-05, "loss": 2.392689514160156, "memory(GiB)": 72.85, "step": 76015, "token_acc": 0.4928571428571429, "train_speed(iter/s)": 0.672588 }, { "epoch": 3.256929865901204, "grad_norm": 7.168797969818115, "learning_rate": 2.7141419274437207e-05, "loss": 2.091935920715332, "memory(GiB)": 72.85, "step": 76020, "token_acc": 0.5415282392026578, "train_speed(iter/s)": 0.672595 }, { "epoch": 3.257144081230453, "grad_norm": 7.295015811920166, "learning_rate": 2.7135434166055308e-05, "loss": 1.960594940185547, "memory(GiB)": 72.85, "step": 76025, "token_acc": 0.5482758620689655, "train_speed(iter/s)": 0.672589 }, { "epoch": 3.2573582965597017, "grad_norm": 5.822306156158447, "learning_rate": 2.7129449471886492e-05, "loss": 2.2376922607421874, "memory(GiB)": 72.85, "step": 76030, "token_acc": 0.5323741007194245, "train_speed(iter/s)": 0.672577 }, { "epoch": 3.257572511888951, "grad_norm": 4.714992046356201, "learning_rate": 2.7123465192039154e-05, "loss": 1.9071903228759766, "memory(GiB)": 72.85, "step": 76035, "token_acc": 0.597457627118644, "train_speed(iter/s)": 0.672548 }, { "epoch": 3.2577867272181997, "grad_norm": 5.271762847900391, "learning_rate": 2.7117481326621695e-05, "loss": 1.7567468643188477, "memory(GiB)": 72.85, "step": 76040, "token_acc": 0.6104868913857678, "train_speed(iter/s)": 0.672547 }, { "epoch": 3.2580009425474485, "grad_norm": 6.635909557342529, "learning_rate": 2.7111497875742537e-05, "loss": 2.400892639160156, "memory(GiB)": 72.85, "step": 76045, "token_acc": 0.5, "train_speed(iter/s)": 0.672553 }, { "epoch": 3.258215157876698, "grad_norm": 6.530038356781006, "learning_rate": 2.7105514839510043e-05, "loss": 2.416648101806641, "memory(GiB)": 72.85, "step": 76050, "token_acc": 0.5153846153846153, "train_speed(iter/s)": 0.672556 }, { "epoch": 3.2584293732059466, "grad_norm": 4.825953006744385, "learning_rate": 2.709953221803264e-05, "loss": 2.0612295150756834, "memory(GiB)": 72.85, "step": 76055, "token_acc": 0.5412186379928315, "train_speed(iter/s)": 0.672564 }, { "epoch": 3.2586435885351954, "grad_norm": 5.262447834014893, "learning_rate": 2.70935500114187e-05, "loss": 1.9883888244628907, "memory(GiB)": 72.85, "step": 76060, "token_acc": 0.4938650306748466, "train_speed(iter/s)": 0.67257 }, { "epoch": 3.2588578038644447, "grad_norm": 5.807671070098877, "learning_rate": 2.7087568219776587e-05, "loss": 2.1091129302978517, "memory(GiB)": 72.85, "step": 76065, "token_acc": 0.5734767025089605, "train_speed(iter/s)": 0.672571 }, { "epoch": 3.2590720191936935, "grad_norm": 4.401711940765381, "learning_rate": 2.708158684321467e-05, "loss": 2.1632322311401366, "memory(GiB)": 72.85, "step": 76070, "token_acc": 0.5209790209790209, "train_speed(iter/s)": 0.672566 }, { "epoch": 3.2592862345229423, "grad_norm": 5.101068019866943, "learning_rate": 2.7075605881841305e-05, "loss": 2.5227676391601563, "memory(GiB)": 72.85, "step": 76075, "token_acc": 0.4645161290322581, "train_speed(iter/s)": 0.672566 }, { "epoch": 3.2595004498521916, "grad_norm": 4.504432201385498, "learning_rate": 2.7069625335764827e-05, "loss": 2.487583351135254, "memory(GiB)": 72.85, "step": 76080, "token_acc": 0.495114006514658, "train_speed(iter/s)": 0.672574 }, { "epoch": 3.2597146651814404, "grad_norm": 5.470425605773926, "learning_rate": 2.706364520509359e-05, "loss": 2.277562713623047, "memory(GiB)": 72.85, "step": 76085, "token_acc": 0.5248447204968945, "train_speed(iter/s)": 0.672585 }, { "epoch": 3.259928880510689, "grad_norm": 5.963515281677246, "learning_rate": 2.7057665489935964e-05, "loss": 2.396292495727539, "memory(GiB)": 72.85, "step": 76090, "token_acc": 0.47183098591549294, "train_speed(iter/s)": 0.672594 }, { "epoch": 3.2601430958399384, "grad_norm": 5.108076095581055, "learning_rate": 2.7051686190400242e-05, "loss": 2.1858882904052734, "memory(GiB)": 72.85, "step": 76095, "token_acc": 0.5277777777777778, "train_speed(iter/s)": 0.672599 }, { "epoch": 3.2603573111691873, "grad_norm": 4.61378288269043, "learning_rate": 2.704570730659476e-05, "loss": 1.8934892654418944, "memory(GiB)": 72.85, "step": 76100, "token_acc": 0.5429447852760736, "train_speed(iter/s)": 0.672602 }, { "epoch": 3.260571526498436, "grad_norm": 5.8581953048706055, "learning_rate": 2.7039728838627813e-05, "loss": 2.550726318359375, "memory(GiB)": 72.85, "step": 76105, "token_acc": 0.5138888888888888, "train_speed(iter/s)": 0.672604 }, { "epoch": 3.2607857418276853, "grad_norm": 6.043788433074951, "learning_rate": 2.7033750786607702e-05, "loss": 2.284978485107422, "memory(GiB)": 72.85, "step": 76110, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.672604 }, { "epoch": 3.260999957156934, "grad_norm": 4.462137699127197, "learning_rate": 2.7028968644546154e-05, "loss": 2.2762884140014648, "memory(GiB)": 72.85, "step": 76115, "token_acc": 0.5361216730038023, "train_speed(iter/s)": 0.672592 }, { "epoch": 3.261214172486183, "grad_norm": 4.897885322570801, "learning_rate": 2.7022991341503302e-05, "loss": 2.058791732788086, "memory(GiB)": 72.85, "step": 76120, "token_acc": 0.5675675675675675, "train_speed(iter/s)": 0.672597 }, { "epoch": 3.261428387815432, "grad_norm": 6.921424865722656, "learning_rate": 2.7017014454710492e-05, "loss": 2.3402734756469727, "memory(GiB)": 72.85, "step": 76125, "token_acc": 0.4820846905537459, "train_speed(iter/s)": 0.672598 }, { "epoch": 3.261642603144681, "grad_norm": 4.7635016441345215, "learning_rate": 2.7011037984276043e-05, "loss": 1.9336875915527343, "memory(GiB)": 72.85, "step": 76130, "token_acc": 0.5824915824915825, "train_speed(iter/s)": 0.6726 }, { "epoch": 3.26185681847393, "grad_norm": 4.5239410400390625, "learning_rate": 2.7005061930308195e-05, "loss": 2.156184959411621, "memory(GiB)": 72.85, "step": 76135, "token_acc": 0.541501976284585, "train_speed(iter/s)": 0.672594 }, { "epoch": 3.262071033803179, "grad_norm": 5.679836750030518, "learning_rate": 2.6999086292915234e-05, "loss": 2.140398406982422, "memory(GiB)": 72.85, "step": 76140, "token_acc": 0.5444743935309974, "train_speed(iter/s)": 0.672603 }, { "epoch": 3.262285249132428, "grad_norm": 4.911476135253906, "learning_rate": 2.6993111072205406e-05, "loss": 2.0214313507080077, "memory(GiB)": 72.85, "step": 76145, "token_acc": 0.5893536121673004, "train_speed(iter/s)": 0.672608 }, { "epoch": 3.2624994644616767, "grad_norm": 7.19293737411499, "learning_rate": 2.698713626828695e-05, "loss": 1.903325080871582, "memory(GiB)": 72.85, "step": 76150, "token_acc": 0.5448717948717948, "train_speed(iter/s)": 0.672602 }, { "epoch": 3.262713679790926, "grad_norm": 7.287982940673828, "learning_rate": 2.6981161881268113e-05, "loss": 1.7817859649658203, "memory(GiB)": 72.85, "step": 76155, "token_acc": 0.572, "train_speed(iter/s)": 0.672597 }, { "epoch": 3.262927895120175, "grad_norm": 4.875372886657715, "learning_rate": 2.6975187911257116e-05, "loss": 2.312177848815918, "memory(GiB)": 72.85, "step": 76160, "token_acc": 0.4913294797687861, "train_speed(iter/s)": 0.672597 }, { "epoch": 3.2631421104494236, "grad_norm": 7.242611885070801, "learning_rate": 2.696921435836217e-05, "loss": 2.068851280212402, "memory(GiB)": 72.85, "step": 76165, "token_acc": 0.5342465753424658, "train_speed(iter/s)": 0.672584 }, { "epoch": 3.263356325778673, "grad_norm": 4.4569525718688965, "learning_rate": 2.6963241222691533e-05, "loss": 2.27257080078125, "memory(GiB)": 72.85, "step": 76170, "token_acc": 0.5033783783783784, "train_speed(iter/s)": 0.67259 }, { "epoch": 3.2635705411079217, "grad_norm": 5.03694486618042, "learning_rate": 2.6957268504353394e-05, "loss": 2.252908706665039, "memory(GiB)": 72.85, "step": 76175, "token_acc": 0.48909657320872274, "train_speed(iter/s)": 0.67258 }, { "epoch": 3.2637847564371705, "grad_norm": 4.615447998046875, "learning_rate": 2.6951296203455945e-05, "loss": 2.355886459350586, "memory(GiB)": 72.85, "step": 76180, "token_acc": 0.4983164983164983, "train_speed(iter/s)": 0.672584 }, { "epoch": 3.2639989717664197, "grad_norm": 6.107265472412109, "learning_rate": 2.694532432010739e-05, "loss": 2.053746223449707, "memory(GiB)": 72.85, "step": 76185, "token_acc": 0.5477178423236515, "train_speed(iter/s)": 0.672572 }, { "epoch": 3.2642131870956685, "grad_norm": 6.193697452545166, "learning_rate": 2.693935285441589e-05, "loss": 1.7692739486694335, "memory(GiB)": 72.85, "step": 76190, "token_acc": 0.608, "train_speed(iter/s)": 0.672577 }, { "epoch": 3.2644274024249174, "grad_norm": 6.8792290687561035, "learning_rate": 2.693338180648965e-05, "loss": 1.829428482055664, "memory(GiB)": 72.85, "step": 76195, "token_acc": 0.555921052631579, "train_speed(iter/s)": 0.672586 }, { "epoch": 3.2646416177541666, "grad_norm": 5.89915132522583, "learning_rate": 2.6927411176436857e-05, "loss": 2.187017250061035, "memory(GiB)": 72.85, "step": 76200, "token_acc": 0.5363321799307958, "train_speed(iter/s)": 0.672588 }, { "epoch": 3.2648558330834154, "grad_norm": 6.506184101104736, "learning_rate": 2.692144096436565e-05, "loss": 2.4181140899658202, "memory(GiB)": 72.85, "step": 76205, "token_acc": 0.5148514851485149, "train_speed(iter/s)": 0.672594 }, { "epoch": 3.2650700484126642, "grad_norm": 5.310163497924805, "learning_rate": 2.6915471170384188e-05, "loss": 2.239537811279297, "memory(GiB)": 72.85, "step": 76210, "token_acc": 0.5308641975308642, "train_speed(iter/s)": 0.672593 }, { "epoch": 3.2652842637419135, "grad_norm": 5.151704788208008, "learning_rate": 2.6909501794600622e-05, "loss": 1.9181644439697265, "memory(GiB)": 72.85, "step": 76215, "token_acc": 0.5544217687074829, "train_speed(iter/s)": 0.672596 }, { "epoch": 3.2654984790711623, "grad_norm": 4.781271457672119, "learning_rate": 2.690353283712308e-05, "loss": 2.09615535736084, "memory(GiB)": 72.85, "step": 76220, "token_acc": 0.5778688524590164, "train_speed(iter/s)": 0.672598 }, { "epoch": 3.265712694400411, "grad_norm": 4.574112415313721, "learning_rate": 2.689756429805972e-05, "loss": 2.3035545349121094, "memory(GiB)": 72.85, "step": 76225, "token_acc": 0.5048231511254019, "train_speed(iter/s)": 0.672586 }, { "epoch": 3.2659269097296604, "grad_norm": 5.522767543792725, "learning_rate": 2.6891596177518652e-05, "loss": 2.1939571380615233, "memory(GiB)": 72.85, "step": 76230, "token_acc": 0.5126582278481012, "train_speed(iter/s)": 0.672582 }, { "epoch": 3.266141125058909, "grad_norm": 5.328197479248047, "learning_rate": 2.6885628475608006e-05, "loss": 2.080862808227539, "memory(GiB)": 72.85, "step": 76235, "token_acc": 0.528052805280528, "train_speed(iter/s)": 0.672586 }, { "epoch": 3.266355340388158, "grad_norm": 6.203468322753906, "learning_rate": 2.6879661192435866e-05, "loss": 1.7082530975341796, "memory(GiB)": 72.85, "step": 76240, "token_acc": 0.6068702290076335, "train_speed(iter/s)": 0.672597 }, { "epoch": 3.2665695557174073, "grad_norm": 7.696681499481201, "learning_rate": 2.6873694328110365e-05, "loss": 2.1281082153320314, "memory(GiB)": 72.85, "step": 76245, "token_acc": 0.5473251028806584, "train_speed(iter/s)": 0.672605 }, { "epoch": 3.266783771046656, "grad_norm": 7.818352699279785, "learning_rate": 2.6867727882739568e-05, "loss": 1.8980297088623046, "memory(GiB)": 72.85, "step": 76250, "token_acc": 0.5608856088560885, "train_speed(iter/s)": 0.672608 }, { "epoch": 3.266997986375905, "grad_norm": 5.316392421722412, "learning_rate": 2.6861761856431595e-05, "loss": 2.2636537551879883, "memory(GiB)": 72.85, "step": 76255, "token_acc": 0.53156146179402, "train_speed(iter/s)": 0.672611 }, { "epoch": 3.267212201705154, "grad_norm": 6.458505630493164, "learning_rate": 2.6855796249294512e-05, "loss": 2.1039873123168946, "memory(GiB)": 72.85, "step": 76260, "token_acc": 0.5228215767634855, "train_speed(iter/s)": 0.672613 }, { "epoch": 3.267426417034403, "grad_norm": 4.799169540405273, "learning_rate": 2.6849831061436394e-05, "loss": 1.7896831512451172, "memory(GiB)": 72.85, "step": 76265, "token_acc": 0.5950704225352113, "train_speed(iter/s)": 0.67261 }, { "epoch": 3.2676406323636518, "grad_norm": 5.519354820251465, "learning_rate": 2.684386629296528e-05, "loss": 2.229682731628418, "memory(GiB)": 72.85, "step": 76270, "token_acc": 0.5531135531135531, "train_speed(iter/s)": 0.672604 }, { "epoch": 3.267854847692901, "grad_norm": 6.925886631011963, "learning_rate": 2.683790194398927e-05, "loss": 2.307966613769531, "memory(GiB)": 72.85, "step": 76275, "token_acc": 0.5220588235294118, "train_speed(iter/s)": 0.672598 }, { "epoch": 3.26806906302215, "grad_norm": 4.524786472320557, "learning_rate": 2.6831938014616377e-05, "loss": 2.0903575897216795, "memory(GiB)": 72.85, "step": 76280, "token_acc": 0.5473684210526316, "train_speed(iter/s)": 0.672594 }, { "epoch": 3.2682832783513986, "grad_norm": 6.135393142700195, "learning_rate": 2.6825974504954676e-05, "loss": 2.0766563415527344, "memory(GiB)": 72.85, "step": 76285, "token_acc": 0.5178571428571429, "train_speed(iter/s)": 0.67259 }, { "epoch": 3.268497493680648, "grad_norm": 5.610516548156738, "learning_rate": 2.6820011415112183e-05, "loss": 2.3106712341308593, "memory(GiB)": 72.85, "step": 76290, "token_acc": 0.5160256410256411, "train_speed(iter/s)": 0.67259 }, { "epoch": 3.2687117090098967, "grad_norm": 6.726705551147461, "learning_rate": 2.6814048745196933e-05, "loss": 1.9427877426147462, "memory(GiB)": 72.85, "step": 76295, "token_acc": 0.5680272108843537, "train_speed(iter/s)": 0.672597 }, { "epoch": 3.2689259243391455, "grad_norm": 5.403908729553223, "learning_rate": 2.6808086495316943e-05, "loss": 2.001696014404297, "memory(GiB)": 72.85, "step": 76300, "token_acc": 0.5752212389380531, "train_speed(iter/s)": 0.672605 }, { "epoch": 3.269140139668395, "grad_norm": 5.08151912689209, "learning_rate": 2.68021246655802e-05, "loss": 2.185853385925293, "memory(GiB)": 72.85, "step": 76305, "token_acc": 0.5255474452554745, "train_speed(iter/s)": 0.672612 }, { "epoch": 3.2693543549976436, "grad_norm": 6.304742336273193, "learning_rate": 2.679616325609474e-05, "loss": 2.0443849563598633, "memory(GiB)": 72.85, "step": 76310, "token_acc": 0.5531914893617021, "train_speed(iter/s)": 0.672599 }, { "epoch": 3.2695685703268924, "grad_norm": 5.478185653686523, "learning_rate": 2.679020226696856e-05, "loss": 2.034127044677734, "memory(GiB)": 72.85, "step": 76315, "token_acc": 0.5545171339563862, "train_speed(iter/s)": 0.672597 }, { "epoch": 3.2697827856561417, "grad_norm": 4.89270544052124, "learning_rate": 2.678424169830964e-05, "loss": 2.1660385131835938, "memory(GiB)": 72.85, "step": 76320, "token_acc": 0.5291970802919708, "train_speed(iter/s)": 0.672599 }, { "epoch": 3.2699970009853905, "grad_norm": 5.71514892578125, "learning_rate": 2.6778281550225957e-05, "loss": 2.2369571685791017, "memory(GiB)": 72.85, "step": 76325, "token_acc": 0.5437956204379562, "train_speed(iter/s)": 0.672599 }, { "epoch": 3.2702112163146393, "grad_norm": 6.125514984130859, "learning_rate": 2.6772321822825486e-05, "loss": 2.1068992614746094, "memory(GiB)": 72.85, "step": 76330, "token_acc": 0.5289855072463768, "train_speed(iter/s)": 0.672606 }, { "epoch": 3.2704254316438885, "grad_norm": 5.467737197875977, "learning_rate": 2.6766362516216177e-05, "loss": 2.111096954345703, "memory(GiB)": 72.85, "step": 76335, "token_acc": 0.5393258426966292, "train_speed(iter/s)": 0.672608 }, { "epoch": 3.2706396469731374, "grad_norm": 5.668374538421631, "learning_rate": 2.6760403630506027e-05, "loss": 2.000623893737793, "memory(GiB)": 72.85, "step": 76340, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.672611 }, { "epoch": 3.270853862302386, "grad_norm": 6.638614654541016, "learning_rate": 2.6754445165802938e-05, "loss": 1.9945924758911133, "memory(GiB)": 72.85, "step": 76345, "token_acc": 0.5306859205776173, "train_speed(iter/s)": 0.672616 }, { "epoch": 3.2710680776316354, "grad_norm": 5.611868858337402, "learning_rate": 2.6748487122214905e-05, "loss": 2.2012786865234375, "memory(GiB)": 72.85, "step": 76350, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.672607 }, { "epoch": 3.2712822929608842, "grad_norm": 5.5391035079956055, "learning_rate": 2.6742529499849846e-05, "loss": 1.876839828491211, "memory(GiB)": 72.85, "step": 76355, "token_acc": 0.594059405940594, "train_speed(iter/s)": 0.672609 }, { "epoch": 3.271496508290133, "grad_norm": 5.402767658233643, "learning_rate": 2.673657229881567e-05, "loss": 2.111729049682617, "memory(GiB)": 72.85, "step": 76360, "token_acc": 0.5656934306569343, "train_speed(iter/s)": 0.672598 }, { "epoch": 3.2717107236193823, "grad_norm": 4.758188724517822, "learning_rate": 2.67306155192203e-05, "loss": 1.7103893280029296, "memory(GiB)": 72.85, "step": 76365, "token_acc": 0.5984251968503937, "train_speed(iter/s)": 0.672601 }, { "epoch": 3.271924938948631, "grad_norm": 4.581432819366455, "learning_rate": 2.6724659161171674e-05, "loss": 2.14211368560791, "memory(GiB)": 72.85, "step": 76370, "token_acc": 0.5034965034965035, "train_speed(iter/s)": 0.6726 }, { "epoch": 3.27213915427788, "grad_norm": 4.903287887573242, "learning_rate": 2.671870322477768e-05, "loss": 1.974822998046875, "memory(GiB)": 72.85, "step": 76375, "token_acc": 0.5703703703703704, "train_speed(iter/s)": 0.672612 }, { "epoch": 3.272353369607129, "grad_norm": 5.443872451782227, "learning_rate": 2.6712747710146224e-05, "loss": 2.2203834533691404, "memory(GiB)": 72.85, "step": 76380, "token_acc": 0.5241379310344828, "train_speed(iter/s)": 0.672613 }, { "epoch": 3.272567584936378, "grad_norm": 7.185408115386963, "learning_rate": 2.670679261738518e-05, "loss": 2.0130714416503905, "memory(GiB)": 72.85, "step": 76385, "token_acc": 0.5434782608695652, "train_speed(iter/s)": 0.672608 }, { "epoch": 3.272781800265627, "grad_norm": 5.7883620262146, "learning_rate": 2.670083794660244e-05, "loss": 2.1140254974365233, "memory(GiB)": 72.85, "step": 76390, "token_acc": 0.5445205479452054, "train_speed(iter/s)": 0.672614 }, { "epoch": 3.272996015594876, "grad_norm": 6.140955924987793, "learning_rate": 2.669488369790586e-05, "loss": 2.0627473831176757, "memory(GiB)": 72.85, "step": 76395, "token_acc": 0.5644444444444444, "train_speed(iter/s)": 0.672613 }, { "epoch": 3.273210230924125, "grad_norm": 7.6330366134643555, "learning_rate": 2.6688929871403346e-05, "loss": 2.0482181549072265, "memory(GiB)": 72.85, "step": 76400, "token_acc": 0.5072463768115942, "train_speed(iter/s)": 0.672621 }, { "epoch": 3.2734244462533737, "grad_norm": 7.108986854553223, "learning_rate": 2.6682976467202726e-05, "loss": 2.307892990112305, "memory(GiB)": 72.85, "step": 76405, "token_acc": 0.5518518518518518, "train_speed(iter/s)": 0.672631 }, { "epoch": 3.273638661582623, "grad_norm": 5.601809501647949, "learning_rate": 2.6677023485411866e-05, "loss": 1.9184131622314453, "memory(GiB)": 72.85, "step": 76410, "token_acc": 0.5793103448275863, "train_speed(iter/s)": 0.672631 }, { "epoch": 3.2738528769118718, "grad_norm": 5.774411201477051, "learning_rate": 2.667107092613861e-05, "loss": 1.7153535842895509, "memory(GiB)": 72.85, "step": 76415, "token_acc": 0.5985130111524164, "train_speed(iter/s)": 0.672632 }, { "epoch": 3.274067092241121, "grad_norm": 6.182285308837891, "learning_rate": 2.6665118789490763e-05, "loss": 2.4001396179199217, "memory(GiB)": 72.85, "step": 76420, "token_acc": 0.4916387959866221, "train_speed(iter/s)": 0.672616 }, { "epoch": 3.27428130757037, "grad_norm": 5.3838372230529785, "learning_rate": 2.6659167075576176e-05, "loss": 2.0020774841308593, "memory(GiB)": 72.85, "step": 76425, "token_acc": 0.5514705882352942, "train_speed(iter/s)": 0.672618 }, { "epoch": 3.2744955228996186, "grad_norm": 5.594499111175537, "learning_rate": 2.6653215784502693e-05, "loss": 2.1310199737548827, "memory(GiB)": 72.85, "step": 76430, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.672619 }, { "epoch": 3.274709738228868, "grad_norm": 5.933680057525635, "learning_rate": 2.664726491637811e-05, "loss": 2.1778789520263673, "memory(GiB)": 72.85, "step": 76435, "token_acc": 0.5425531914893617, "train_speed(iter/s)": 0.672628 }, { "epoch": 3.2749239535581167, "grad_norm": 5.1438984870910645, "learning_rate": 2.664131447131023e-05, "loss": 2.304561424255371, "memory(GiB)": 72.85, "step": 76440, "token_acc": 0.5151515151515151, "train_speed(iter/s)": 0.67263 }, { "epoch": 3.2751381688873655, "grad_norm": 5.7798686027526855, "learning_rate": 2.6635364449406853e-05, "loss": 2.0885772705078125, "memory(GiB)": 72.85, "step": 76445, "token_acc": 0.5338078291814946, "train_speed(iter/s)": 0.672614 }, { "epoch": 3.275352384216615, "grad_norm": 5.485997676849365, "learning_rate": 2.662941485077574e-05, "loss": 1.837336540222168, "memory(GiB)": 72.85, "step": 76450, "token_acc": 0.572463768115942, "train_speed(iter/s)": 0.672591 }, { "epoch": 3.2755665995458636, "grad_norm": 5.615496635437012, "learning_rate": 2.6623465675524728e-05, "loss": 1.7353126525878906, "memory(GiB)": 72.85, "step": 76455, "token_acc": 0.6271929824561403, "train_speed(iter/s)": 0.672598 }, { "epoch": 3.2757808148751124, "grad_norm": 17.110103607177734, "learning_rate": 2.6617516923761553e-05, "loss": 1.9483209609985352, "memory(GiB)": 72.85, "step": 76460, "token_acc": 0.528169014084507, "train_speed(iter/s)": 0.672593 }, { "epoch": 3.2759950302043617, "grad_norm": 5.53051233291626, "learning_rate": 2.6611568595594006e-05, "loss": 1.9904407501220702, "memory(GiB)": 72.85, "step": 76465, "token_acc": 0.5984848484848485, "train_speed(iter/s)": 0.672597 }, { "epoch": 3.2762092455336105, "grad_norm": 8.004217147827148, "learning_rate": 2.6605620691129828e-05, "loss": 1.983018684387207, "memory(GiB)": 72.85, "step": 76470, "token_acc": 0.5595238095238095, "train_speed(iter/s)": 0.672592 }, { "epoch": 3.2764234608628593, "grad_norm": 5.53909158706665, "learning_rate": 2.659967321047678e-05, "loss": 2.2677064895629884, "memory(GiB)": 72.85, "step": 76475, "token_acc": 0.5033333333333333, "train_speed(iter/s)": 0.672594 }, { "epoch": 3.2766376761921086, "grad_norm": 5.457602024078369, "learning_rate": 2.6593726153742575e-05, "loss": 1.8670665740966796, "memory(GiB)": 72.85, "step": 76480, "token_acc": 0.5335820895522388, "train_speed(iter/s)": 0.672605 }, { "epoch": 3.2768518915213574, "grad_norm": 8.444818496704102, "learning_rate": 2.6587779521035007e-05, "loss": 1.998844528198242, "memory(GiB)": 72.85, "step": 76485, "token_acc": 0.5787671232876712, "train_speed(iter/s)": 0.672606 }, { "epoch": 3.277066106850606, "grad_norm": 4.61716365814209, "learning_rate": 2.6581833312461768e-05, "loss": 2.1553218841552733, "memory(GiB)": 72.85, "step": 76490, "token_acc": 0.5154639175257731, "train_speed(iter/s)": 0.672607 }, { "epoch": 3.2772803221798554, "grad_norm": 5.975488185882568, "learning_rate": 2.657588752813057e-05, "loss": 2.1522518157958985, "memory(GiB)": 72.85, "step": 76495, "token_acc": 0.5218978102189781, "train_speed(iter/s)": 0.672606 }, { "epoch": 3.2774945375091042, "grad_norm": 7.2329583168029785, "learning_rate": 2.6569942168149165e-05, "loss": 2.4060218811035154, "memory(GiB)": 72.85, "step": 76500, "token_acc": 0.5146198830409356, "train_speed(iter/s)": 0.672599 }, { "epoch": 3.2774945375091042, "eval_loss": 2.2760422229766846, "eval_runtime": 13.8649, "eval_samples_per_second": 7.212, "eval_steps_per_second": 7.212, "eval_token_acc": 0.49343065693430654, "step": 76500 }, { "epoch": 3.277708752838353, "grad_norm": 5.602674961090088, "learning_rate": 2.6563997232625236e-05, "loss": 2.1478931427001955, "memory(GiB)": 72.85, "step": 76505, "token_acc": 0.5061728395061729, "train_speed(iter/s)": 0.672503 }, { "epoch": 3.2779229681676023, "grad_norm": 7.2474799156188965, "learning_rate": 2.655805272166646e-05, "loss": 2.040680503845215, "memory(GiB)": 72.85, "step": 76510, "token_acc": 0.5434782608695652, "train_speed(iter/s)": 0.672505 }, { "epoch": 3.278137183496851, "grad_norm": 4.79673433303833, "learning_rate": 2.6552108635380567e-05, "loss": 2.362211990356445, "memory(GiB)": 72.85, "step": 76515, "token_acc": 0.5017301038062284, "train_speed(iter/s)": 0.672504 }, { "epoch": 3.2783513988261, "grad_norm": 5.348042011260986, "learning_rate": 2.654616497387523e-05, "loss": 1.810652732849121, "memory(GiB)": 72.85, "step": 76520, "token_acc": 0.5919732441471572, "train_speed(iter/s)": 0.672515 }, { "epoch": 3.278565614155349, "grad_norm": 5.782190799713135, "learning_rate": 2.654022173725811e-05, "loss": 1.8996814727783202, "memory(GiB)": 72.85, "step": 76525, "token_acc": 0.5527272727272727, "train_speed(iter/s)": 0.672498 }, { "epoch": 3.278779829484598, "grad_norm": 7.321112155914307, "learning_rate": 2.653427892563688e-05, "loss": 2.1289243698120117, "memory(GiB)": 72.85, "step": 76530, "token_acc": 0.5949367088607594, "train_speed(iter/s)": 0.672511 }, { "epoch": 3.278994044813847, "grad_norm": 5.43751859664917, "learning_rate": 2.65283365391192e-05, "loss": 2.021536445617676, "memory(GiB)": 72.85, "step": 76535, "token_acc": 0.5611510791366906, "train_speed(iter/s)": 0.672515 }, { "epoch": 3.279208260143096, "grad_norm": 7.735400199890137, "learning_rate": 2.6522394577812702e-05, "loss": 2.137558174133301, "memory(GiB)": 72.85, "step": 76540, "token_acc": 0.5339805825242718, "train_speed(iter/s)": 0.672525 }, { "epoch": 3.279422475472345, "grad_norm": 8.02541446685791, "learning_rate": 2.6516453041825075e-05, "loss": 2.092431831359863, "memory(GiB)": 72.85, "step": 76545, "token_acc": 0.5665399239543726, "train_speed(iter/s)": 0.672527 }, { "epoch": 3.2796366908015937, "grad_norm": 6.0388617515563965, "learning_rate": 2.6510511931263926e-05, "loss": 2.2918739318847656, "memory(GiB)": 72.85, "step": 76550, "token_acc": 0.49050632911392406, "train_speed(iter/s)": 0.672532 }, { "epoch": 3.279850906130843, "grad_norm": 6.0952467918396, "learning_rate": 2.6504571246236893e-05, "loss": 2.367624282836914, "memory(GiB)": 72.85, "step": 76555, "token_acc": 0.5191082802547771, "train_speed(iter/s)": 0.672535 }, { "epoch": 3.2800651214600918, "grad_norm": 7.483573913574219, "learning_rate": 2.6498630986851584e-05, "loss": 1.9654132843017578, "memory(GiB)": 72.85, "step": 76560, "token_acc": 0.5617021276595745, "train_speed(iter/s)": 0.672537 }, { "epoch": 3.2802793367893406, "grad_norm": 5.296267986297607, "learning_rate": 2.649269115321561e-05, "loss": 2.2780855178833006, "memory(GiB)": 72.85, "step": 76565, "token_acc": 0.5097402597402597, "train_speed(iter/s)": 0.672532 }, { "epoch": 3.28049355211859, "grad_norm": 6.904795169830322, "learning_rate": 2.6486751745436578e-05, "loss": 2.409863090515137, "memory(GiB)": 72.85, "step": 76570, "token_acc": 0.532871972318339, "train_speed(iter/s)": 0.672528 }, { "epoch": 3.2807077674478387, "grad_norm": 4.150479316711426, "learning_rate": 2.648081276362212e-05, "loss": 2.365672302246094, "memory(GiB)": 72.85, "step": 76575, "token_acc": 0.5252525252525253, "train_speed(iter/s)": 0.672523 }, { "epoch": 3.2809219827770875, "grad_norm": 4.022576808929443, "learning_rate": 2.6474874207879807e-05, "loss": 1.808953094482422, "memory(GiB)": 72.85, "step": 76580, "token_acc": 0.572, "train_speed(iter/s)": 0.672521 }, { "epoch": 3.2811361981063367, "grad_norm": 7.264626979827881, "learning_rate": 2.6468936078317207e-05, "loss": 2.1802534103393554, "memory(GiB)": 72.85, "step": 76585, "token_acc": 0.517799352750809, "train_speed(iter/s)": 0.672524 }, { "epoch": 3.2813504134355855, "grad_norm": 7.600806713104248, "learning_rate": 2.6462998375041908e-05, "loss": 2.0736446380615234, "memory(GiB)": 72.85, "step": 76590, "token_acc": 0.5134228187919463, "train_speed(iter/s)": 0.672537 }, { "epoch": 3.2815646287648343, "grad_norm": 6.714496612548828, "learning_rate": 2.645706109816145e-05, "loss": 2.1755014419555665, "memory(GiB)": 72.85, "step": 76595, "token_acc": 0.521311475409836, "train_speed(iter/s)": 0.672543 }, { "epoch": 3.2817788440940836, "grad_norm": 4.544937610626221, "learning_rate": 2.6451124247783443e-05, "loss": 2.1369070053100585, "memory(GiB)": 72.85, "step": 76600, "token_acc": 0.5207100591715976, "train_speed(iter/s)": 0.67255 }, { "epoch": 3.2819930594233324, "grad_norm": 5.315232276916504, "learning_rate": 2.6445187824015406e-05, "loss": 2.2601320266723635, "memory(GiB)": 72.85, "step": 76605, "token_acc": 0.5441176470588235, "train_speed(iter/s)": 0.672553 }, { "epoch": 3.2822072747525812, "grad_norm": 6.979111194610596, "learning_rate": 2.6439251826964882e-05, "loss": 2.243264389038086, "memory(GiB)": 72.85, "step": 76610, "token_acc": 0.4778156996587031, "train_speed(iter/s)": 0.672552 }, { "epoch": 3.2824214900818305, "grad_norm": 4.407118320465088, "learning_rate": 2.6433316256739417e-05, "loss": 1.9648574829101562, "memory(GiB)": 72.85, "step": 76615, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.672552 }, { "epoch": 3.2826357054110793, "grad_norm": 6.033966541290283, "learning_rate": 2.6427381113446536e-05, "loss": 1.9361974716186523, "memory(GiB)": 72.85, "step": 76620, "token_acc": 0.5567375886524822, "train_speed(iter/s)": 0.672557 }, { "epoch": 3.282849920740328, "grad_norm": 7.116406440734863, "learning_rate": 2.642144639719374e-05, "loss": 2.171998977661133, "memory(GiB)": 72.85, "step": 76625, "token_acc": 0.4954954954954955, "train_speed(iter/s)": 0.672565 }, { "epoch": 3.2830641360695774, "grad_norm": 6.538126468658447, "learning_rate": 2.641551210808858e-05, "loss": 2.179641914367676, "memory(GiB)": 72.85, "step": 76630, "token_acc": 0.5650557620817844, "train_speed(iter/s)": 0.672544 }, { "epoch": 3.283278351398826, "grad_norm": 5.976966381072998, "learning_rate": 2.640957824623854e-05, "loss": 2.075191116333008, "memory(GiB)": 72.85, "step": 76635, "token_acc": 0.5254237288135594, "train_speed(iter/s)": 0.672546 }, { "epoch": 3.283492566728075, "grad_norm": 6.468922138214111, "learning_rate": 2.6403644811751104e-05, "loss": 2.0380001068115234, "memory(GiB)": 72.85, "step": 76640, "token_acc": 0.5656934306569343, "train_speed(iter/s)": 0.672545 }, { "epoch": 3.2837067820573242, "grad_norm": 4.9629411697387695, "learning_rate": 2.63977118047338e-05, "loss": 2.097643280029297, "memory(GiB)": 72.85, "step": 76645, "token_acc": 0.5442622950819672, "train_speed(iter/s)": 0.672536 }, { "epoch": 3.283920997386573, "grad_norm": 5.475043296813965, "learning_rate": 2.6391779225294084e-05, "loss": 2.081826591491699, "memory(GiB)": 72.85, "step": 76650, "token_acc": 0.5558912386706949, "train_speed(iter/s)": 0.672526 }, { "epoch": 3.284135212715822, "grad_norm": 8.401924133300781, "learning_rate": 2.638584707353941e-05, "loss": 2.474167251586914, "memory(GiB)": 72.85, "step": 76655, "token_acc": 0.48881789137380194, "train_speed(iter/s)": 0.672525 }, { "epoch": 3.284349428045071, "grad_norm": 5.324656963348389, "learning_rate": 2.637991534957729e-05, "loss": 2.1110305786132812, "memory(GiB)": 72.85, "step": 76660, "token_acc": 0.5041322314049587, "train_speed(iter/s)": 0.672527 }, { "epoch": 3.28456364337432, "grad_norm": 4.718584060668945, "learning_rate": 2.6373984053515167e-05, "loss": 2.049153137207031, "memory(GiB)": 72.85, "step": 76665, "token_acc": 0.5339805825242718, "train_speed(iter/s)": 0.672525 }, { "epoch": 3.2847778587035688, "grad_norm": 6.169468879699707, "learning_rate": 2.636805318546048e-05, "loss": 2.5546022415161134, "memory(GiB)": 72.85, "step": 76670, "token_acc": 0.5209790209790209, "train_speed(iter/s)": 0.67253 }, { "epoch": 3.284992074032818, "grad_norm": 5.1118927001953125, "learning_rate": 2.636212274552068e-05, "loss": 2.0884492874145506, "memory(GiB)": 72.85, "step": 76675, "token_acc": 0.5741444866920152, "train_speed(iter/s)": 0.672535 }, { "epoch": 3.285206289362067, "grad_norm": 4.912277698516846, "learning_rate": 2.6356192733803197e-05, "loss": 2.2955738067626954, "memory(GiB)": 72.85, "step": 76680, "token_acc": 0.5084745762711864, "train_speed(iter/s)": 0.67254 }, { "epoch": 3.2854205046913156, "grad_norm": 4.635552883148193, "learning_rate": 2.6350263150415443e-05, "loss": 2.2283226013183595, "memory(GiB)": 72.85, "step": 76685, "token_acc": 0.5491525423728814, "train_speed(iter/s)": 0.672534 }, { "epoch": 3.285634720020565, "grad_norm": 4.567420482635498, "learning_rate": 2.6344333995464875e-05, "loss": 2.2728715896606446, "memory(GiB)": 72.85, "step": 76690, "token_acc": 0.5221843003412969, "train_speed(iter/s)": 0.672535 }, { "epoch": 3.2858489353498137, "grad_norm": 5.758421897888184, "learning_rate": 2.6338405269058896e-05, "loss": 2.119523620605469, "memory(GiB)": 72.85, "step": 76695, "token_acc": 0.5226480836236934, "train_speed(iter/s)": 0.672542 }, { "epoch": 3.2860631506790625, "grad_norm": 4.692851543426514, "learning_rate": 2.6332476971304898e-05, "loss": 1.9115066528320312, "memory(GiB)": 72.85, "step": 76700, "token_acc": 0.5306122448979592, "train_speed(iter/s)": 0.672539 }, { "epoch": 3.2862773660083118, "grad_norm": 6.609753608703613, "learning_rate": 2.6326549102310284e-05, "loss": 2.0852718353271484, "memory(GiB)": 72.85, "step": 76705, "token_acc": 0.5308641975308642, "train_speed(iter/s)": 0.672546 }, { "epoch": 3.2864915813375606, "grad_norm": 5.307541847229004, "learning_rate": 2.632062166218241e-05, "loss": 2.1883411407470703, "memory(GiB)": 72.85, "step": 76710, "token_acc": 0.5356125356125356, "train_speed(iter/s)": 0.672556 }, { "epoch": 3.2867057966668094, "grad_norm": 8.066058158874512, "learning_rate": 2.6314694651028697e-05, "loss": 2.146877670288086, "memory(GiB)": 72.85, "step": 76715, "token_acc": 0.55, "train_speed(iter/s)": 0.672565 }, { "epoch": 3.2869200119960587, "grad_norm": 5.315252304077148, "learning_rate": 2.630876806895653e-05, "loss": 2.055011749267578, "memory(GiB)": 72.85, "step": 76720, "token_acc": 0.5666666666666667, "train_speed(iter/s)": 0.672577 }, { "epoch": 3.2871342273253075, "grad_norm": 5.036868572235107, "learning_rate": 2.630284191607325e-05, "loss": 2.1621170043945312, "memory(GiB)": 72.85, "step": 76725, "token_acc": 0.5221518987341772, "train_speed(iter/s)": 0.672578 }, { "epoch": 3.2873484426545563, "grad_norm": 7.603403568267822, "learning_rate": 2.629691619248622e-05, "loss": 2.1584182739257813, "memory(GiB)": 72.85, "step": 76730, "token_acc": 0.5050167224080268, "train_speed(iter/s)": 0.672572 }, { "epoch": 3.2875626579838055, "grad_norm": 5.015799045562744, "learning_rate": 2.6290990898302786e-05, "loss": 2.1056695938110352, "memory(GiB)": 72.85, "step": 76735, "token_acc": 0.5421245421245421, "train_speed(iter/s)": 0.672557 }, { "epoch": 3.2877768733130543, "grad_norm": 5.428468227386475, "learning_rate": 2.6285066033630278e-05, "loss": 2.0749162673950194, "memory(GiB)": 72.85, "step": 76740, "token_acc": 0.5850622406639004, "train_speed(iter/s)": 0.672559 }, { "epoch": 3.287991088642303, "grad_norm": 6.301313877105713, "learning_rate": 2.6279141598576062e-05, "loss": 2.4820308685302734, "memory(GiB)": 72.85, "step": 76745, "token_acc": 0.5029761904761905, "train_speed(iter/s)": 0.672564 }, { "epoch": 3.2882053039715524, "grad_norm": 7.239397048950195, "learning_rate": 2.6273217593247447e-05, "loss": 2.004541015625, "memory(GiB)": 72.85, "step": 76750, "token_acc": 0.5679442508710801, "train_speed(iter/s)": 0.672563 }, { "epoch": 3.2884195193008012, "grad_norm": 6.749152183532715, "learning_rate": 2.6267294017751753e-05, "loss": 2.0633266448974608, "memory(GiB)": 72.85, "step": 76755, "token_acc": 0.5057471264367817, "train_speed(iter/s)": 0.67256 }, { "epoch": 3.28863373463005, "grad_norm": 6.4860382080078125, "learning_rate": 2.626137087219629e-05, "loss": 2.217686080932617, "memory(GiB)": 72.85, "step": 76760, "token_acc": 0.5394736842105263, "train_speed(iter/s)": 0.672557 }, { "epoch": 3.2888479499592993, "grad_norm": 5.210367679595947, "learning_rate": 2.625544815668836e-05, "loss": 1.9769250869750976, "memory(GiB)": 72.85, "step": 76765, "token_acc": 0.5785953177257525, "train_speed(iter/s)": 0.67256 }, { "epoch": 3.289062165288548, "grad_norm": 6.248388290405273, "learning_rate": 2.6249525871335246e-05, "loss": 2.1648818969726564, "memory(GiB)": 72.85, "step": 76770, "token_acc": 0.5615615615615616, "train_speed(iter/s)": 0.672566 }, { "epoch": 3.289276380617797, "grad_norm": 5.872239112854004, "learning_rate": 2.624360401624427e-05, "loss": 2.196495246887207, "memory(GiB)": 72.85, "step": 76775, "token_acc": 0.5186335403726708, "train_speed(iter/s)": 0.672572 }, { "epoch": 3.289490595947046, "grad_norm": 8.193886756896973, "learning_rate": 2.6237682591522693e-05, "loss": 1.9869243621826171, "memory(GiB)": 72.85, "step": 76780, "token_acc": 0.5243055555555556, "train_speed(iter/s)": 0.67258 }, { "epoch": 3.289704811276295, "grad_norm": 5.806116580963135, "learning_rate": 2.6231761597277783e-05, "loss": 2.078665542602539, "memory(GiB)": 72.85, "step": 76785, "token_acc": 0.5611285266457681, "train_speed(iter/s)": 0.672579 }, { "epoch": 3.289919026605544, "grad_norm": 4.425219535827637, "learning_rate": 2.6225841033616794e-05, "loss": 1.9813636779785155, "memory(GiB)": 72.85, "step": 76790, "token_acc": 0.541095890410959, "train_speed(iter/s)": 0.672583 }, { "epoch": 3.290133241934793, "grad_norm": 6.137466907501221, "learning_rate": 2.621992090064701e-05, "loss": 2.0945852279663084, "memory(GiB)": 72.85, "step": 76795, "token_acc": 0.5666666666666667, "train_speed(iter/s)": 0.672572 }, { "epoch": 3.290347457264042, "grad_norm": 5.29721212387085, "learning_rate": 2.6214001198475645e-05, "loss": 2.0134775161743166, "memory(GiB)": 72.85, "step": 76800, "token_acc": 0.5103448275862069, "train_speed(iter/s)": 0.672584 }, { "epoch": 3.2905616725932907, "grad_norm": 5.816701889038086, "learning_rate": 2.6208081927209988e-05, "loss": 2.35772590637207, "memory(GiB)": 72.85, "step": 76805, "token_acc": 0.4629080118694362, "train_speed(iter/s)": 0.672588 }, { "epoch": 3.29077588792254, "grad_norm": 6.363066673278809, "learning_rate": 2.6202163086957243e-05, "loss": 2.0243343353271483, "memory(GiB)": 72.85, "step": 76810, "token_acc": 0.5584415584415584, "train_speed(iter/s)": 0.672595 }, { "epoch": 3.2909901032517888, "grad_norm": 5.890258312225342, "learning_rate": 2.6196244677824637e-05, "loss": 2.3538591384887697, "memory(GiB)": 72.85, "step": 76815, "token_acc": 0.5088235294117647, "train_speed(iter/s)": 0.67261 }, { "epoch": 3.2912043185810376, "grad_norm": 5.230836391448975, "learning_rate": 2.6190326699919388e-05, "loss": 1.8980880737304688, "memory(GiB)": 72.85, "step": 76820, "token_acc": 0.5632530120481928, "train_speed(iter/s)": 0.672597 }, { "epoch": 3.291418533910287, "grad_norm": 4.78712272644043, "learning_rate": 2.618440915334871e-05, "loss": 2.098249816894531, "memory(GiB)": 72.85, "step": 76825, "token_acc": 0.5559322033898305, "train_speed(iter/s)": 0.672599 }, { "epoch": 3.2916327492395356, "grad_norm": 3.9743595123291016, "learning_rate": 2.617849203821978e-05, "loss": 1.9290401458740234, "memory(GiB)": 72.85, "step": 76830, "token_acc": 0.56875, "train_speed(iter/s)": 0.672602 }, { "epoch": 3.2918469645687845, "grad_norm": 5.073469161987305, "learning_rate": 2.617257535463983e-05, "loss": 2.1681060791015625, "memory(GiB)": 72.85, "step": 76835, "token_acc": 0.5244755244755245, "train_speed(iter/s)": 0.672609 }, { "epoch": 3.2920611798980337, "grad_norm": 4.818371295928955, "learning_rate": 2.616665910271603e-05, "loss": 2.1929197311401367, "memory(GiB)": 72.85, "step": 76840, "token_acc": 0.5348101265822784, "train_speed(iter/s)": 0.672614 }, { "epoch": 3.2922753952272825, "grad_norm": 5.46450662612915, "learning_rate": 2.616074328255556e-05, "loss": 2.337404251098633, "memory(GiB)": 72.85, "step": 76845, "token_acc": 0.5450980392156862, "train_speed(iter/s)": 0.672615 }, { "epoch": 3.2924896105565313, "grad_norm": 5.446191787719727, "learning_rate": 2.6154827894265588e-05, "loss": 2.432181739807129, "memory(GiB)": 72.85, "step": 76850, "token_acc": 0.4584837545126354, "train_speed(iter/s)": 0.672607 }, { "epoch": 3.2927038258857806, "grad_norm": 5.366235256195068, "learning_rate": 2.6148912937953256e-05, "loss": 2.216632080078125, "memory(GiB)": 72.85, "step": 76855, "token_acc": 0.5108359133126935, "train_speed(iter/s)": 0.67261 }, { "epoch": 3.2929180412150294, "grad_norm": 8.065648078918457, "learning_rate": 2.614299841372576e-05, "loss": 2.1607479095458983, "memory(GiB)": 72.85, "step": 76860, "token_acc": 0.5164835164835165, "train_speed(iter/s)": 0.672616 }, { "epoch": 3.293132256544278, "grad_norm": 6.062355041503906, "learning_rate": 2.613708432169021e-05, "loss": 2.0497793197631835, "memory(GiB)": 72.85, "step": 76865, "token_acc": 0.5749128919860628, "train_speed(iter/s)": 0.672607 }, { "epoch": 3.2933464718735275, "grad_norm": 6.191584587097168, "learning_rate": 2.613117066195378e-05, "loss": 2.244009780883789, "memory(GiB)": 72.85, "step": 76870, "token_acc": 0.5537459283387622, "train_speed(iter/s)": 0.672604 }, { "epoch": 3.2935606872027763, "grad_norm": 7.769454002380371, "learning_rate": 2.6125257434623584e-05, "loss": 1.9713926315307617, "memory(GiB)": 72.85, "step": 76875, "token_acc": 0.5652173913043478, "train_speed(iter/s)": 0.672609 }, { "epoch": 3.293774902532025, "grad_norm": 6.535286903381348, "learning_rate": 2.6119344639806753e-05, "loss": 1.9595525741577149, "memory(GiB)": 72.85, "step": 76880, "token_acc": 0.5667752442996743, "train_speed(iter/s)": 0.672622 }, { "epoch": 3.2939891178612744, "grad_norm": 6.493725776672363, "learning_rate": 2.6113432277610367e-05, "loss": 1.8848047256469727, "memory(GiB)": 72.85, "step": 76885, "token_acc": 0.59375, "train_speed(iter/s)": 0.672631 }, { "epoch": 3.294203333190523, "grad_norm": 5.99257755279541, "learning_rate": 2.6107520348141585e-05, "loss": 1.9857673645019531, "memory(GiB)": 72.85, "step": 76890, "token_acc": 0.5077519379844961, "train_speed(iter/s)": 0.672619 }, { "epoch": 3.294417548519772, "grad_norm": 4.694571018218994, "learning_rate": 2.6101608851507486e-05, "loss": 2.293990135192871, "memory(GiB)": 72.85, "step": 76895, "token_acc": 0.508833922261484, "train_speed(iter/s)": 0.672607 }, { "epoch": 3.2946317638490212, "grad_norm": 7.590577125549316, "learning_rate": 2.609569778781516e-05, "loss": 2.4792095184326173, "memory(GiB)": 72.85, "step": 76900, "token_acc": 0.4768211920529801, "train_speed(iter/s)": 0.672612 }, { "epoch": 3.29484597917827, "grad_norm": 5.637724876403809, "learning_rate": 2.6089787157171697e-05, "loss": 1.9407943725585937, "memory(GiB)": 72.85, "step": 76905, "token_acc": 0.5559105431309904, "train_speed(iter/s)": 0.672615 }, { "epoch": 3.295060194507519, "grad_norm": 5.0302414894104, "learning_rate": 2.6083876959684162e-05, "loss": 2.0924989700317385, "memory(GiB)": 72.85, "step": 76910, "token_acc": 0.49333333333333335, "train_speed(iter/s)": 0.672613 }, { "epoch": 3.295274409836768, "grad_norm": 4.263304233551025, "learning_rate": 2.607796719545962e-05, "loss": 1.9582225799560546, "memory(GiB)": 72.85, "step": 76915, "token_acc": 0.5857142857142857, "train_speed(iter/s)": 0.672616 }, { "epoch": 3.295488625166017, "grad_norm": 5.945681095123291, "learning_rate": 2.6072057864605147e-05, "loss": 2.2272037506103515, "memory(GiB)": 72.85, "step": 76920, "token_acc": 0.5439560439560439, "train_speed(iter/s)": 0.672624 }, { "epoch": 3.2957028404952657, "grad_norm": 6.726355075836182, "learning_rate": 2.606614896722781e-05, "loss": 2.5295652389526366, "memory(GiB)": 72.85, "step": 76925, "token_acc": 0.47692307692307695, "train_speed(iter/s)": 0.67263 }, { "epoch": 3.295917055824515, "grad_norm": 5.166580677032471, "learning_rate": 2.6060240503434623e-05, "loss": 2.401250457763672, "memory(GiB)": 72.85, "step": 76930, "token_acc": 0.4573643410852713, "train_speed(iter/s)": 0.672629 }, { "epoch": 3.296131271153764, "grad_norm": 5.061311721801758, "learning_rate": 2.6054332473332622e-05, "loss": 2.0908761978149415, "memory(GiB)": 72.85, "step": 76935, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.672627 }, { "epoch": 3.2963454864830126, "grad_norm": 5.271635055541992, "learning_rate": 2.6048424877028876e-05, "loss": 2.120685577392578, "memory(GiB)": 72.85, "step": 76940, "token_acc": 0.51875, "train_speed(iter/s)": 0.672635 }, { "epoch": 3.296559701812262, "grad_norm": 6.546647548675537, "learning_rate": 2.6042517714630354e-05, "loss": 2.001051330566406, "memory(GiB)": 72.85, "step": 76945, "token_acc": 0.5338078291814946, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.2967739171415107, "grad_norm": 4.821775436401367, "learning_rate": 2.6036610986244125e-05, "loss": 2.0065574645996094, "memory(GiB)": 72.85, "step": 76950, "token_acc": 0.5802919708029197, "train_speed(iter/s)": 0.672635 }, { "epoch": 3.2969881324707595, "grad_norm": 5.486419677734375, "learning_rate": 2.6030704691977158e-05, "loss": 2.232894515991211, "memory(GiB)": 72.85, "step": 76955, "token_acc": 0.5064935064935064, "train_speed(iter/s)": 0.672644 }, { "epoch": 3.2972023478000088, "grad_norm": 7.03634786605835, "learning_rate": 2.602479883193647e-05, "loss": 2.153727912902832, "memory(GiB)": 72.85, "step": 76960, "token_acc": 0.5379061371841155, "train_speed(iter/s)": 0.672631 }, { "epoch": 3.2974165631292576, "grad_norm": 5.868263244628906, "learning_rate": 2.6018893406229033e-05, "loss": 2.1040264129638673, "memory(GiB)": 72.85, "step": 76965, "token_acc": 0.5476190476190477, "train_speed(iter/s)": 0.672626 }, { "epoch": 3.2976307784585064, "grad_norm": 6.051685810089111, "learning_rate": 2.6012988414961848e-05, "loss": 2.233794021606445, "memory(GiB)": 72.85, "step": 76970, "token_acc": 0.503125, "train_speed(iter/s)": 0.672624 }, { "epoch": 3.2978449937877556, "grad_norm": 5.832145690917969, "learning_rate": 2.6007083858241853e-05, "loss": 2.038901519775391, "memory(GiB)": 72.85, "step": 76975, "token_acc": 0.5387453874538746, "train_speed(iter/s)": 0.672626 }, { "epoch": 3.2980592091170045, "grad_norm": 6.128128528594971, "learning_rate": 2.6001179736176064e-05, "loss": 2.0323177337646485, "memory(GiB)": 72.85, "step": 76980, "token_acc": 0.5769230769230769, "train_speed(iter/s)": 0.67263 }, { "epoch": 3.2982734244462533, "grad_norm": 6.861522197723389, "learning_rate": 2.599527604887141e-05, "loss": 2.354608154296875, "memory(GiB)": 72.85, "step": 76985, "token_acc": 0.5285714285714286, "train_speed(iter/s)": 0.672622 }, { "epoch": 3.2984876397755025, "grad_norm": 5.8936567306518555, "learning_rate": 2.5989372796434854e-05, "loss": 2.637731170654297, "memory(GiB)": 72.85, "step": 76990, "token_acc": 0.4671280276816609, "train_speed(iter/s)": 0.672621 }, { "epoch": 3.2987018551047513, "grad_norm": 6.505456924438477, "learning_rate": 2.598346997897333e-05, "loss": 2.3736190795898438, "memory(GiB)": 72.85, "step": 76995, "token_acc": 0.48520710059171596, "train_speed(iter/s)": 0.672612 }, { "epoch": 3.298916070434, "grad_norm": 5.616074085235596, "learning_rate": 2.597756759659376e-05, "loss": 2.1479673385620117, "memory(GiB)": 72.85, "step": 77000, "token_acc": 0.5219123505976095, "train_speed(iter/s)": 0.672606 }, { "epoch": 3.298916070434, "eval_loss": 2.0456104278564453, "eval_runtime": 15.8526, "eval_samples_per_second": 6.308, "eval_steps_per_second": 6.308, "eval_token_acc": 0.5186721991701245, "step": 77000 }, { "epoch": 3.2991302857632494, "grad_norm": 6.004067897796631, "learning_rate": 2.597166564940311e-05, "loss": 1.956641387939453, "memory(GiB)": 72.85, "step": 77005, "token_acc": 0.5411167512690356, "train_speed(iter/s)": 0.672492 }, { "epoch": 3.299344501092498, "grad_norm": 7.009248733520508, "learning_rate": 2.5965764137508253e-05, "loss": 2.4834394454956055, "memory(GiB)": 72.85, "step": 77010, "token_acc": 0.4423676012461059, "train_speed(iter/s)": 0.672494 }, { "epoch": 3.299558716421747, "grad_norm": 8.078749656677246, "learning_rate": 2.5959863061016144e-05, "loss": 1.8449512481689454, "memory(GiB)": 72.85, "step": 77015, "token_acc": 0.5780590717299579, "train_speed(iter/s)": 0.672497 }, { "epoch": 3.2997729317509963, "grad_norm": 5.8724446296691895, "learning_rate": 2.5953962420033673e-05, "loss": 1.858021354675293, "memory(GiB)": 72.85, "step": 77020, "token_acc": 0.5921985815602837, "train_speed(iter/s)": 0.672479 }, { "epoch": 3.299987147080245, "grad_norm": 6.7587714195251465, "learning_rate": 2.5948062214667723e-05, "loss": 2.3368036270141603, "memory(GiB)": 72.85, "step": 77025, "token_acc": 0.5067567567567568, "train_speed(iter/s)": 0.672456 }, { "epoch": 3.300201362409494, "grad_norm": 5.137601852416992, "learning_rate": 2.5942162445025174e-05, "loss": 2.168041801452637, "memory(GiB)": 72.85, "step": 77030, "token_acc": 0.5275862068965518, "train_speed(iter/s)": 0.672451 }, { "epoch": 3.300415577738743, "grad_norm": 5.351202487945557, "learning_rate": 2.593626311121294e-05, "loss": 2.4431819915771484, "memory(GiB)": 72.85, "step": 77035, "token_acc": 0.47959183673469385, "train_speed(iter/s)": 0.672453 }, { "epoch": 3.300629793067992, "grad_norm": 6.160048484802246, "learning_rate": 2.5930364213337865e-05, "loss": 1.7425630569458008, "memory(GiB)": 72.85, "step": 77040, "token_acc": 0.583969465648855, "train_speed(iter/s)": 0.672448 }, { "epoch": 3.300844008397241, "grad_norm": 5.504642963409424, "learning_rate": 2.592446575150683e-05, "loss": 2.1784490585327148, "memory(GiB)": 72.85, "step": 77045, "token_acc": 0.5248447204968945, "train_speed(iter/s)": 0.672451 }, { "epoch": 3.30105822372649, "grad_norm": 6.162940502166748, "learning_rate": 2.5918567725826682e-05, "loss": 2.0668630599975586, "memory(GiB)": 72.85, "step": 77050, "token_acc": 0.5289855072463768, "train_speed(iter/s)": 0.67245 }, { "epoch": 3.301272439055739, "grad_norm": 7.0827717781066895, "learning_rate": 2.591267013640427e-05, "loss": 1.89123592376709, "memory(GiB)": 72.85, "step": 77055, "token_acc": 0.594306049822064, "train_speed(iter/s)": 0.672446 }, { "epoch": 3.3014866543849877, "grad_norm": 5.859311103820801, "learning_rate": 2.590677298334641e-05, "loss": 2.103828239440918, "memory(GiB)": 72.85, "step": 77060, "token_acc": 0.4981684981684982, "train_speed(iter/s)": 0.672449 }, { "epoch": 3.301700869714237, "grad_norm": 4.791397571563721, "learning_rate": 2.590087626675998e-05, "loss": 2.1676919937133787, "memory(GiB)": 72.85, "step": 77065, "token_acc": 0.4936708860759494, "train_speed(iter/s)": 0.67246 }, { "epoch": 3.3019150850434857, "grad_norm": 4.472589492797852, "learning_rate": 2.589497998675179e-05, "loss": 1.9947805404663086, "memory(GiB)": 72.85, "step": 77070, "token_acc": 0.5344262295081967, "train_speed(iter/s)": 0.672467 }, { "epoch": 3.3021293003727346, "grad_norm": 5.188172340393066, "learning_rate": 2.5889084143428643e-05, "loss": 2.223073387145996, "memory(GiB)": 72.85, "step": 77075, "token_acc": 0.5664335664335665, "train_speed(iter/s)": 0.672455 }, { "epoch": 3.302343515701984, "grad_norm": 5.496951103210449, "learning_rate": 2.5883188736897356e-05, "loss": 2.238782501220703, "memory(GiB)": 72.85, "step": 77080, "token_acc": 0.49324324324324326, "train_speed(iter/s)": 0.672445 }, { "epoch": 3.3025577310312326, "grad_norm": 5.531020164489746, "learning_rate": 2.587729376726471e-05, "loss": 1.791120719909668, "memory(GiB)": 72.85, "step": 77085, "token_acc": 0.5482758620689655, "train_speed(iter/s)": 0.672443 }, { "epoch": 3.3027719463604814, "grad_norm": 4.6711225509643555, "learning_rate": 2.587139923463751e-05, "loss": 2.385934257507324, "memory(GiB)": 72.85, "step": 77090, "token_acc": 0.5096952908587258, "train_speed(iter/s)": 0.672436 }, { "epoch": 3.3029861616897307, "grad_norm": 5.647747993469238, "learning_rate": 2.586550513912257e-05, "loss": 2.1516260147094726, "memory(GiB)": 72.85, "step": 77095, "token_acc": 0.5719844357976653, "train_speed(iter/s)": 0.672438 }, { "epoch": 3.3032003770189795, "grad_norm": 6.025554180145264, "learning_rate": 2.585961148082665e-05, "loss": 2.3178728103637694, "memory(GiB)": 72.85, "step": 77100, "token_acc": 0.5148148148148148, "train_speed(iter/s)": 0.672448 }, { "epoch": 3.3034145923482283, "grad_norm": 6.3362812995910645, "learning_rate": 2.5853718259856507e-05, "loss": 1.9635704040527344, "memory(GiB)": 72.85, "step": 77105, "token_acc": 0.5575221238938053, "train_speed(iter/s)": 0.672438 }, { "epoch": 3.3036288076774776, "grad_norm": 4.285511493682861, "learning_rate": 2.584782547631891e-05, "loss": 2.208220863342285, "memory(GiB)": 72.85, "step": 77110, "token_acc": 0.5474683544303798, "train_speed(iter/s)": 0.672444 }, { "epoch": 3.3038430230067264, "grad_norm": 5.226926326751709, "learning_rate": 2.5841933130320618e-05, "loss": 2.1364017486572267, "memory(GiB)": 72.85, "step": 77115, "token_acc": 0.5594405594405595, "train_speed(iter/s)": 0.672441 }, { "epoch": 3.304057238335975, "grad_norm": 7.247593879699707, "learning_rate": 2.5836041221968345e-05, "loss": 2.063355827331543, "memory(GiB)": 72.85, "step": 77120, "token_acc": 0.5407166123778502, "train_speed(iter/s)": 0.672443 }, { "epoch": 3.3042714536652245, "grad_norm": 6.611433029174805, "learning_rate": 2.583014975136887e-05, "loss": 2.0441394805908204, "memory(GiB)": 72.85, "step": 77125, "token_acc": 0.5709219858156028, "train_speed(iter/s)": 0.672456 }, { "epoch": 3.3044856689944733, "grad_norm": 7.044872283935547, "learning_rate": 2.5824258718628906e-05, "loss": 1.9326078414916992, "memory(GiB)": 72.85, "step": 77130, "token_acc": 0.5740072202166066, "train_speed(iter/s)": 0.672455 }, { "epoch": 3.304699884323722, "grad_norm": 5.097524642944336, "learning_rate": 2.5818368123855176e-05, "loss": 1.8786788940429688, "memory(GiB)": 72.85, "step": 77135, "token_acc": 0.6055363321799307, "train_speed(iter/s)": 0.67246 }, { "epoch": 3.3049140996529713, "grad_norm": 7.513049125671387, "learning_rate": 2.581247796715439e-05, "loss": 2.1278289794921874, "memory(GiB)": 72.85, "step": 77140, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.672469 }, { "epoch": 3.30512831498222, "grad_norm": 7.095204830169678, "learning_rate": 2.5806588248633235e-05, "loss": 2.1136524200439455, "memory(GiB)": 72.85, "step": 77145, "token_acc": 0.5109289617486339, "train_speed(iter/s)": 0.672469 }, { "epoch": 3.305342530311469, "grad_norm": 6.593286514282227, "learning_rate": 2.580069896839845e-05, "loss": 1.9955530166625977, "memory(GiB)": 72.85, "step": 77150, "token_acc": 0.5481481481481482, "train_speed(iter/s)": 0.67246 }, { "epoch": 3.305556745640718, "grad_norm": 5.035472393035889, "learning_rate": 2.5794810126556707e-05, "loss": 2.193125534057617, "memory(GiB)": 72.85, "step": 77155, "token_acc": 0.5149700598802395, "train_speed(iter/s)": 0.672451 }, { "epoch": 3.305770960969967, "grad_norm": 4.206913948059082, "learning_rate": 2.5788921723214664e-05, "loss": 2.1249547958374024, "memory(GiB)": 72.85, "step": 77160, "token_acc": 0.5264900662251656, "train_speed(iter/s)": 0.672445 }, { "epoch": 3.305985176299216, "grad_norm": 5.5066704750061035, "learning_rate": 2.5783033758479035e-05, "loss": 2.2169315338134767, "memory(GiB)": 72.85, "step": 77165, "token_acc": 0.5762195121951219, "train_speed(iter/s)": 0.672437 }, { "epoch": 3.306199391628465, "grad_norm": 5.808767795562744, "learning_rate": 2.5777146232456463e-05, "loss": 2.1019689559936525, "memory(GiB)": 72.85, "step": 77170, "token_acc": 0.4962686567164179, "train_speed(iter/s)": 0.672437 }, { "epoch": 3.306413606957714, "grad_norm": 5.459039688110352, "learning_rate": 2.577125914525359e-05, "loss": 2.64178409576416, "memory(GiB)": 72.85, "step": 77175, "token_acc": 0.4948805460750853, "train_speed(iter/s)": 0.672434 }, { "epoch": 3.3066278222869627, "grad_norm": 6.512938022613525, "learning_rate": 2.5765372496977113e-05, "loss": 1.8401802062988282, "memory(GiB)": 72.85, "step": 77180, "token_acc": 0.5724907063197026, "train_speed(iter/s)": 0.672433 }, { "epoch": 3.306842037616212, "grad_norm": 6.372891902923584, "learning_rate": 2.575948628773364e-05, "loss": 2.239729309082031, "memory(GiB)": 72.85, "step": 77185, "token_acc": 0.49343832020997375, "train_speed(iter/s)": 0.672434 }, { "epoch": 3.307056252945461, "grad_norm": 5.340980052947998, "learning_rate": 2.5753600517629817e-05, "loss": 1.947108840942383, "memory(GiB)": 72.85, "step": 77190, "token_acc": 0.5306122448979592, "train_speed(iter/s)": 0.672434 }, { "epoch": 3.3072704682747096, "grad_norm": 4.860237121582031, "learning_rate": 2.5747715186772264e-05, "loss": 2.214786911010742, "memory(GiB)": 72.85, "step": 77195, "token_acc": 0.5537190082644629, "train_speed(iter/s)": 0.672439 }, { "epoch": 3.307484683603959, "grad_norm": 5.820427417755127, "learning_rate": 2.5741830295267598e-05, "loss": 1.8758060455322265, "memory(GiB)": 72.85, "step": 77200, "token_acc": 0.5981308411214953, "train_speed(iter/s)": 0.672442 }, { "epoch": 3.3076988989332077, "grad_norm": 5.2575249671936035, "learning_rate": 2.573594584322242e-05, "loss": 2.1166614532470702, "memory(GiB)": 72.85, "step": 77205, "token_acc": 0.523972602739726, "train_speed(iter/s)": 0.672447 }, { "epoch": 3.3079131142624565, "grad_norm": 4.464720726013184, "learning_rate": 2.5730061830743358e-05, "loss": 2.252632904052734, "memory(GiB)": 72.85, "step": 77210, "token_acc": 0.5075987841945289, "train_speed(iter/s)": 0.672447 }, { "epoch": 3.3081273295917057, "grad_norm": 5.774447917938232, "learning_rate": 2.5724178257936992e-05, "loss": 2.042660713195801, "memory(GiB)": 72.85, "step": 77215, "token_acc": 0.5551839464882943, "train_speed(iter/s)": 0.672444 }, { "epoch": 3.3083415449209546, "grad_norm": 5.851554870605469, "learning_rate": 2.5718295124909913e-05, "loss": 2.217194747924805, "memory(GiB)": 72.85, "step": 77220, "token_acc": 0.5253731343283582, "train_speed(iter/s)": 0.672442 }, { "epoch": 3.3085557602502034, "grad_norm": 6.782536029815674, "learning_rate": 2.5712412431768696e-05, "loss": 2.305406379699707, "memory(GiB)": 72.85, "step": 77225, "token_acc": 0.5289473684210526, "train_speed(iter/s)": 0.672447 }, { "epoch": 3.3087699755794526, "grad_norm": 5.260626792907715, "learning_rate": 2.5706530178619893e-05, "loss": 2.1648685455322267, "memory(GiB)": 72.85, "step": 77230, "token_acc": 0.5061728395061729, "train_speed(iter/s)": 0.672448 }, { "epoch": 3.3089841909087014, "grad_norm": 5.534717082977295, "learning_rate": 2.570064836557008e-05, "loss": 2.138034629821777, "memory(GiB)": 72.85, "step": 77235, "token_acc": 0.5859872611464968, "train_speed(iter/s)": 0.672457 }, { "epoch": 3.3091984062379503, "grad_norm": 7.056853294372559, "learning_rate": 2.5694766992725837e-05, "loss": 2.1627780914306642, "memory(GiB)": 72.85, "step": 77240, "token_acc": 0.518796992481203, "train_speed(iter/s)": 0.672444 }, { "epoch": 3.3094126215671995, "grad_norm": 6.6395673751831055, "learning_rate": 2.5688886060193694e-05, "loss": 2.140370559692383, "memory(GiB)": 72.85, "step": 77245, "token_acc": 0.5792880258899676, "train_speed(iter/s)": 0.67245 }, { "epoch": 3.3096268368964483, "grad_norm": 6.216238975524902, "learning_rate": 2.5683005568080188e-05, "loss": 2.257914924621582, "memory(GiB)": 72.85, "step": 77250, "token_acc": 0.5268817204301075, "train_speed(iter/s)": 0.672433 }, { "epoch": 3.309841052225697, "grad_norm": 6.444779396057129, "learning_rate": 2.567712551649184e-05, "loss": 2.162679672241211, "memory(GiB)": 72.85, "step": 77255, "token_acc": 0.5234899328859061, "train_speed(iter/s)": 0.672438 }, { "epoch": 3.3100552675549464, "grad_norm": 5.633199214935303, "learning_rate": 2.567124590553518e-05, "loss": 2.22369499206543, "memory(GiB)": 72.85, "step": 77260, "token_acc": 0.5306122448979592, "train_speed(iter/s)": 0.672449 }, { "epoch": 3.310269482884195, "grad_norm": 5.259326457977295, "learning_rate": 2.5665366735316708e-05, "loss": 2.114023208618164, "memory(GiB)": 72.85, "step": 77265, "token_acc": 0.5340136054421769, "train_speed(iter/s)": 0.672454 }, { "epoch": 3.310483698213444, "grad_norm": 5.613901138305664, "learning_rate": 2.565948800594296e-05, "loss": 1.8846796035766602, "memory(GiB)": 72.85, "step": 77270, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.672465 }, { "epoch": 3.3106979135426933, "grad_norm": 5.844206809997559, "learning_rate": 2.5653609717520423e-05, "loss": 2.367009925842285, "memory(GiB)": 72.85, "step": 77275, "token_acc": 0.514018691588785, "train_speed(iter/s)": 0.672471 }, { "epoch": 3.310912128871942, "grad_norm": 6.690782070159912, "learning_rate": 2.5647731870155585e-05, "loss": 2.077523040771484, "memory(GiB)": 72.85, "step": 77280, "token_acc": 0.5465116279069767, "train_speed(iter/s)": 0.672467 }, { "epoch": 3.311126344201191, "grad_norm": 5.463449001312256, "learning_rate": 2.5641854463954928e-05, "loss": 2.134918975830078, "memory(GiB)": 72.85, "step": 77285, "token_acc": 0.5265017667844523, "train_speed(iter/s)": 0.672465 }, { "epoch": 3.31134055953044, "grad_norm": 5.630061149597168, "learning_rate": 2.563597749902491e-05, "loss": 2.2910799026489257, "memory(GiB)": 72.85, "step": 77290, "token_acc": 0.49853372434017595, "train_speed(iter/s)": 0.672475 }, { "epoch": 3.311554774859689, "grad_norm": 7.999413967132568, "learning_rate": 2.5630100975472026e-05, "loss": 2.069919204711914, "memory(GiB)": 72.85, "step": 77295, "token_acc": 0.5574324324324325, "train_speed(iter/s)": 0.672459 }, { "epoch": 3.3117689901889378, "grad_norm": 9.572088241577148, "learning_rate": 2.5624224893402733e-05, "loss": 2.075733947753906, "memory(GiB)": 72.85, "step": 77300, "token_acc": 0.5551839464882943, "train_speed(iter/s)": 0.67245 }, { "epoch": 3.311983205518187, "grad_norm": 4.752735614776611, "learning_rate": 2.5618349252923448e-05, "loss": 1.9606115341186523, "memory(GiB)": 72.85, "step": 77305, "token_acc": 0.532, "train_speed(iter/s)": 0.672448 }, { "epoch": 3.312197420847436, "grad_norm": 5.417895317077637, "learning_rate": 2.5612474054140657e-05, "loss": 2.0880926132202147, "memory(GiB)": 72.85, "step": 77310, "token_acc": 0.5070921985815603, "train_speed(iter/s)": 0.672454 }, { "epoch": 3.3124116361766847, "grad_norm": 6.886181354522705, "learning_rate": 2.560659929716078e-05, "loss": 2.1771358489990233, "memory(GiB)": 72.85, "step": 77315, "token_acc": 0.5550847457627118, "train_speed(iter/s)": 0.672456 }, { "epoch": 3.312625851505934, "grad_norm": 7.2399797439575195, "learning_rate": 2.560072498209022e-05, "loss": 2.416202163696289, "memory(GiB)": 72.85, "step": 77320, "token_acc": 0.4939759036144578, "train_speed(iter/s)": 0.672467 }, { "epoch": 3.3128400668351827, "grad_norm": 6.6075639724731445, "learning_rate": 2.5594851109035435e-05, "loss": 2.1289241790771483, "memory(GiB)": 72.85, "step": 77325, "token_acc": 0.5395189003436426, "train_speed(iter/s)": 0.672471 }, { "epoch": 3.3130542821644315, "grad_norm": 5.785404682159424, "learning_rate": 2.558897767810281e-05, "loss": 2.181854248046875, "memory(GiB)": 72.85, "step": 77330, "token_acc": 0.519434628975265, "train_speed(iter/s)": 0.672482 }, { "epoch": 3.313268497493681, "grad_norm": 5.552237510681152, "learning_rate": 2.5583104689398757e-05, "loss": 2.1551090240478517, "memory(GiB)": 72.85, "step": 77335, "token_acc": 0.5331010452961672, "train_speed(iter/s)": 0.672477 }, { "epoch": 3.3134827128229296, "grad_norm": 5.452175617218018, "learning_rate": 2.5577232143029672e-05, "loss": 2.122584915161133, "memory(GiB)": 72.85, "step": 77340, "token_acc": 0.5181159420289855, "train_speed(iter/s)": 0.672485 }, { "epoch": 3.3136969281521784, "grad_norm": 6.8113932609558105, "learning_rate": 2.5571360039101932e-05, "loss": 1.9166507720947266, "memory(GiB)": 72.85, "step": 77345, "token_acc": 0.55078125, "train_speed(iter/s)": 0.672479 }, { "epoch": 3.3139111434814277, "grad_norm": 4.990286827087402, "learning_rate": 2.5565488377721903e-05, "loss": 2.048094367980957, "memory(GiB)": 72.85, "step": 77350, "token_acc": 0.5375375375375375, "train_speed(iter/s)": 0.672483 }, { "epoch": 3.3141253588106765, "grad_norm": 5.037135601043701, "learning_rate": 2.555961715899599e-05, "loss": 2.088225746154785, "memory(GiB)": 72.85, "step": 77355, "token_acc": 0.5276073619631901, "train_speed(iter/s)": 0.672471 }, { "epoch": 3.3143395741399253, "grad_norm": 4.643705368041992, "learning_rate": 2.555374638303054e-05, "loss": 1.9503036499023438, "memory(GiB)": 72.85, "step": 77360, "token_acc": 0.5490909090909091, "train_speed(iter/s)": 0.67248 }, { "epoch": 3.3145537894691746, "grad_norm": 4.49810266494751, "learning_rate": 2.554787604993191e-05, "loss": 1.7308107376098634, "memory(GiB)": 72.85, "step": 77365, "token_acc": 0.6017316017316018, "train_speed(iter/s)": 0.672486 }, { "epoch": 3.3147680047984234, "grad_norm": 5.651411533355713, "learning_rate": 2.5542006159806444e-05, "loss": 2.318937873840332, "memory(GiB)": 72.85, "step": 77370, "token_acc": 0.47875354107648727, "train_speed(iter/s)": 0.672496 }, { "epoch": 3.314982220127672, "grad_norm": 7.423572063446045, "learning_rate": 2.5536136712760457e-05, "loss": 2.2549644470214845, "memory(GiB)": 72.85, "step": 77375, "token_acc": 0.4716981132075472, "train_speed(iter/s)": 0.6725 }, { "epoch": 3.3151964354569214, "grad_norm": 5.683423042297363, "learning_rate": 2.5530267708900303e-05, "loss": 2.122151756286621, "memory(GiB)": 72.85, "step": 77380, "token_acc": 0.540453074433657, "train_speed(iter/s)": 0.672495 }, { "epoch": 3.3154106507861703, "grad_norm": 5.689837455749512, "learning_rate": 2.5524399148332325e-05, "loss": 2.1089643478393554, "memory(GiB)": 72.85, "step": 77385, "token_acc": 0.5236363636363637, "train_speed(iter/s)": 0.672493 }, { "epoch": 3.315624866115419, "grad_norm": 7.104650497436523, "learning_rate": 2.551853103116281e-05, "loss": 2.0993709564208984, "memory(GiB)": 72.85, "step": 77390, "token_acc": 0.5016835016835017, "train_speed(iter/s)": 0.67249 }, { "epoch": 3.3158390814446683, "grad_norm": 4.576380729675293, "learning_rate": 2.5512663357498078e-05, "loss": 1.9330265045166015, "memory(GiB)": 72.85, "step": 77395, "token_acc": 0.5647840531561462, "train_speed(iter/s)": 0.672478 }, { "epoch": 3.316053296773917, "grad_norm": 5.024566650390625, "learning_rate": 2.550679612744442e-05, "loss": 2.0309627532958983, "memory(GiB)": 72.85, "step": 77400, "token_acc": 0.5084175084175084, "train_speed(iter/s)": 0.672481 }, { "epoch": 3.316267512103166, "grad_norm": 5.401362895965576, "learning_rate": 2.5500929341108104e-05, "loss": 2.3703096389770506, "memory(GiB)": 72.85, "step": 77405, "token_acc": 0.5301587301587302, "train_speed(iter/s)": 0.672489 }, { "epoch": 3.316481727432415, "grad_norm": 6.063008785247803, "learning_rate": 2.5495062998595455e-05, "loss": 2.07076358795166, "memory(GiB)": 72.85, "step": 77410, "token_acc": 0.5382165605095541, "train_speed(iter/s)": 0.672495 }, { "epoch": 3.316695942761664, "grad_norm": 5.348352432250977, "learning_rate": 2.548919710001273e-05, "loss": 2.2234153747558594, "memory(GiB)": 72.85, "step": 77415, "token_acc": 0.5518672199170125, "train_speed(iter/s)": 0.672513 }, { "epoch": 3.316910158090913, "grad_norm": 5.78090238571167, "learning_rate": 2.5483331645466192e-05, "loss": 2.3324535369873045, "memory(GiB)": 72.85, "step": 77420, "token_acc": 0.5170278637770898, "train_speed(iter/s)": 0.672502 }, { "epoch": 3.317124373420162, "grad_norm": 6.216237545013428, "learning_rate": 2.5477466635062097e-05, "loss": 2.066126251220703, "memory(GiB)": 72.85, "step": 77425, "token_acc": 0.5374149659863946, "train_speed(iter/s)": 0.672506 }, { "epoch": 3.317338588749411, "grad_norm": 5.984373569488525, "learning_rate": 2.5471602068906697e-05, "loss": 1.9689205169677735, "memory(GiB)": 72.85, "step": 77430, "token_acc": 0.5373665480427047, "train_speed(iter/s)": 0.672511 }, { "epoch": 3.3175528040786597, "grad_norm": 6.998040676116943, "learning_rate": 2.5465737947106218e-05, "loss": 1.8993921279907227, "memory(GiB)": 72.85, "step": 77435, "token_acc": 0.5708955223880597, "train_speed(iter/s)": 0.672505 }, { "epoch": 3.317767019407909, "grad_norm": 6.199459552764893, "learning_rate": 2.545987426976693e-05, "loss": 2.1036121368408205, "memory(GiB)": 72.85, "step": 77440, "token_acc": 0.554006968641115, "train_speed(iter/s)": 0.672497 }, { "epoch": 3.317981234737158, "grad_norm": 5.561986923217773, "learning_rate": 2.545401103699504e-05, "loss": 2.1167369842529298, "memory(GiB)": 72.85, "step": 77445, "token_acc": 0.5460526315789473, "train_speed(iter/s)": 0.672502 }, { "epoch": 3.3181954500664066, "grad_norm": 5.880345821380615, "learning_rate": 2.5448148248896768e-05, "loss": 2.374642181396484, "memory(GiB)": 72.85, "step": 77450, "token_acc": 0.5117845117845118, "train_speed(iter/s)": 0.672496 }, { "epoch": 3.318409665395656, "grad_norm": 5.58167839050293, "learning_rate": 2.54422859055783e-05, "loss": 2.135710906982422, "memory(GiB)": 72.85, "step": 77455, "token_acc": 0.5570934256055363, "train_speed(iter/s)": 0.6725 }, { "epoch": 3.3186238807249047, "grad_norm": 4.931665897369385, "learning_rate": 2.543642400714588e-05, "loss": 2.0938533782958983, "memory(GiB)": 72.85, "step": 77460, "token_acc": 0.5544554455445545, "train_speed(iter/s)": 0.672501 }, { "epoch": 3.3188380960541535, "grad_norm": 7.444046974182129, "learning_rate": 2.543056255370566e-05, "loss": 2.2574106216430665, "memory(GiB)": 72.85, "step": 77465, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.672508 }, { "epoch": 3.3190523113834027, "grad_norm": 6.3282670974731445, "learning_rate": 2.542470154536387e-05, "loss": 2.1548538208007812, "memory(GiB)": 72.85, "step": 77470, "token_acc": 0.5018315018315018, "train_speed(iter/s)": 0.672496 }, { "epoch": 3.3192665267126515, "grad_norm": 5.512868404388428, "learning_rate": 2.5418840982226667e-05, "loss": 2.193760299682617, "memory(GiB)": 72.85, "step": 77475, "token_acc": 0.4967105263157895, "train_speed(iter/s)": 0.672497 }, { "epoch": 3.3194807420419004, "grad_norm": 6.632433891296387, "learning_rate": 2.5412980864400217e-05, "loss": 2.371254730224609, "memory(GiB)": 72.85, "step": 77480, "token_acc": 0.4787234042553192, "train_speed(iter/s)": 0.672504 }, { "epoch": 3.3196949573711496, "grad_norm": 4.484029769897461, "learning_rate": 2.540712119199068e-05, "loss": 1.978085708618164, "memory(GiB)": 72.85, "step": 77485, "token_acc": 0.5516014234875445, "train_speed(iter/s)": 0.672503 }, { "epoch": 3.3199091727003984, "grad_norm": 4.911560535430908, "learning_rate": 2.5401261965104217e-05, "loss": 2.0938974380493165, "memory(GiB)": 72.85, "step": 77490, "token_acc": 0.543046357615894, "train_speed(iter/s)": 0.672501 }, { "epoch": 3.3201233880296472, "grad_norm": 6.315254211425781, "learning_rate": 2.5395403183846945e-05, "loss": 2.1377586364746093, "memory(GiB)": 72.85, "step": 77495, "token_acc": 0.5016835016835017, "train_speed(iter/s)": 0.672497 }, { "epoch": 3.3203376033588965, "grad_norm": 5.685832977294922, "learning_rate": 2.5389544848325054e-05, "loss": 2.3072776794433594, "memory(GiB)": 72.85, "step": 77500, "token_acc": 0.5226480836236934, "train_speed(iter/s)": 0.672497 }, { "epoch": 3.3203376033588965, "eval_loss": 2.0590689182281494, "eval_runtime": 14.7707, "eval_samples_per_second": 6.77, "eval_steps_per_second": 6.77, "eval_token_acc": 0.5227629513343799, "step": 77500 }, { "epoch": 3.3205518186881453, "grad_norm": 5.560390949249268, "learning_rate": 2.5383686958644636e-05, "loss": 1.8812137603759767, "memory(GiB)": 72.85, "step": 77505, "token_acc": 0.5319383259911894, "train_speed(iter/s)": 0.672405 }, { "epoch": 3.320766034017394, "grad_norm": 6.754106521606445, "learning_rate": 2.5377829514911822e-05, "loss": 2.2234264373779298, "memory(GiB)": 72.85, "step": 77510, "token_acc": 0.5015576323987538, "train_speed(iter/s)": 0.672413 }, { "epoch": 3.3209802493466434, "grad_norm": 5.200516700744629, "learning_rate": 2.537197251723272e-05, "loss": 2.1710128784179688, "memory(GiB)": 72.85, "step": 77515, "token_acc": 0.5526315789473685, "train_speed(iter/s)": 0.672422 }, { "epoch": 3.321194464675892, "grad_norm": 4.567024230957031, "learning_rate": 2.5366115965713422e-05, "loss": 2.2721717834472654, "memory(GiB)": 72.85, "step": 77520, "token_acc": 0.523972602739726, "train_speed(iter/s)": 0.672418 }, { "epoch": 3.321408680005141, "grad_norm": 4.902412414550781, "learning_rate": 2.5360259860460066e-05, "loss": 2.0012184143066407, "memory(GiB)": 72.85, "step": 77525, "token_acc": 0.5180327868852459, "train_speed(iter/s)": 0.672422 }, { "epoch": 3.3216228953343903, "grad_norm": 7.3245439529418945, "learning_rate": 2.5354404201578696e-05, "loss": 2.1008686065673827, "memory(GiB)": 72.85, "step": 77530, "token_acc": 0.5667752442996743, "train_speed(iter/s)": 0.672438 }, { "epoch": 3.321837110663639, "grad_norm": 5.266022682189941, "learning_rate": 2.5348548989175424e-05, "loss": 2.3802108764648438, "memory(GiB)": 72.85, "step": 77535, "token_acc": 0.4935400516795866, "train_speed(iter/s)": 0.67244 }, { "epoch": 3.322051325992888, "grad_norm": 5.7024736404418945, "learning_rate": 2.534269422335632e-05, "loss": 2.0016096115112303, "memory(GiB)": 72.85, "step": 77540, "token_acc": 0.5418181818181819, "train_speed(iter/s)": 0.672434 }, { "epoch": 3.322265541322137, "grad_norm": 5.80607271194458, "learning_rate": 2.533683990422744e-05, "loss": 2.3288997650146483, "memory(GiB)": 72.85, "step": 77545, "token_acc": 0.5353535353535354, "train_speed(iter/s)": 0.672433 }, { "epoch": 3.322479756651386, "grad_norm": 6.513274669647217, "learning_rate": 2.5330986031894823e-05, "loss": 2.5545467376708983, "memory(GiB)": 72.85, "step": 77550, "token_acc": 0.4634146341463415, "train_speed(iter/s)": 0.672442 }, { "epoch": 3.3226939719806348, "grad_norm": 5.8115997314453125, "learning_rate": 2.5325132606464552e-05, "loss": 1.9577383041381835, "memory(GiB)": 72.85, "step": 77555, "token_acc": 0.5340599455040872, "train_speed(iter/s)": 0.672442 }, { "epoch": 3.322908187309884, "grad_norm": 6.285534858703613, "learning_rate": 2.5319279628042647e-05, "loss": 2.2691015243530273, "memory(GiB)": 72.85, "step": 77560, "token_acc": 0.5341365461847389, "train_speed(iter/s)": 0.672441 }, { "epoch": 3.323122402639133, "grad_norm": 4.9844255447387695, "learning_rate": 2.5313427096735155e-05, "loss": 1.9256256103515625, "memory(GiB)": 72.85, "step": 77565, "token_acc": 0.5448717948717948, "train_speed(iter/s)": 0.672438 }, { "epoch": 3.3233366179683816, "grad_norm": 4.4250640869140625, "learning_rate": 2.530757501264808e-05, "loss": 2.0095163345336915, "memory(GiB)": 72.85, "step": 77570, "token_acc": 0.5152439024390244, "train_speed(iter/s)": 0.672434 }, { "epoch": 3.323550833297631, "grad_norm": 4.817890167236328, "learning_rate": 2.5301723375887447e-05, "loss": 2.284314727783203, "memory(GiB)": 72.85, "step": 77575, "token_acc": 0.5181159420289855, "train_speed(iter/s)": 0.67244 }, { "epoch": 3.3237650486268797, "grad_norm": 8.283872604370117, "learning_rate": 2.5295872186559243e-05, "loss": 2.398703384399414, "memory(GiB)": 72.85, "step": 77580, "token_acc": 0.5209580838323353, "train_speed(iter/s)": 0.672435 }, { "epoch": 3.3239792639561285, "grad_norm": 5.5196685791015625, "learning_rate": 2.5290021444769517e-05, "loss": 1.9274993896484376, "memory(GiB)": 72.85, "step": 77585, "token_acc": 0.5509554140127388, "train_speed(iter/s)": 0.672434 }, { "epoch": 3.324193479285378, "grad_norm": 5.978655815124512, "learning_rate": 2.5284171150624225e-05, "loss": 2.1597240447998045, "memory(GiB)": 72.85, "step": 77590, "token_acc": 0.5228070175438596, "train_speed(iter/s)": 0.67243 }, { "epoch": 3.3244076946146266, "grad_norm": 6.916089057922363, "learning_rate": 2.5278321304229358e-05, "loss": 2.18876953125, "memory(GiB)": 72.85, "step": 77595, "token_acc": 0.4940828402366864, "train_speed(iter/s)": 0.672426 }, { "epoch": 3.3246219099438754, "grad_norm": 6.857678413391113, "learning_rate": 2.5272471905690876e-05, "loss": 2.2088064193725585, "memory(GiB)": 72.85, "step": 77600, "token_acc": 0.48253968253968255, "train_speed(iter/s)": 0.672437 }, { "epoch": 3.3248361252731247, "grad_norm": 6.2289137840271, "learning_rate": 2.526662295511478e-05, "loss": 2.158833694458008, "memory(GiB)": 72.85, "step": 77605, "token_acc": 0.5167785234899329, "train_speed(iter/s)": 0.672439 }, { "epoch": 3.3250503406023735, "grad_norm": 6.533128261566162, "learning_rate": 2.5260774452606993e-05, "loss": 1.9591856002807617, "memory(GiB)": 72.85, "step": 77610, "token_acc": 0.5296052631578947, "train_speed(iter/s)": 0.672438 }, { "epoch": 3.3252645559316223, "grad_norm": 7.689643859863281, "learning_rate": 2.5254926398273498e-05, "loss": 2.359796333312988, "memory(GiB)": 72.85, "step": 77615, "token_acc": 0.5319148936170213, "train_speed(iter/s)": 0.672442 }, { "epoch": 3.3254787712608715, "grad_norm": 5.321580410003662, "learning_rate": 2.5249078792220227e-05, "loss": 2.030157279968262, "memory(GiB)": 72.85, "step": 77620, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.672437 }, { "epoch": 3.3256929865901204, "grad_norm": 4.852499961853027, "learning_rate": 2.524323163455311e-05, "loss": 1.9927200317382812, "memory(GiB)": 72.85, "step": 77625, "token_acc": 0.5793650793650794, "train_speed(iter/s)": 0.672431 }, { "epoch": 3.325907201919369, "grad_norm": 5.884352207183838, "learning_rate": 2.5237384925378084e-05, "loss": 1.8276771545410155, "memory(GiB)": 72.85, "step": 77630, "token_acc": 0.5894308943089431, "train_speed(iter/s)": 0.672429 }, { "epoch": 3.3261214172486184, "grad_norm": 4.043868541717529, "learning_rate": 2.5231538664801047e-05, "loss": 2.0049936294555666, "memory(GiB)": 72.85, "step": 77635, "token_acc": 0.5643564356435643, "train_speed(iter/s)": 0.672427 }, { "epoch": 3.3263356325778672, "grad_norm": 5.915707111358643, "learning_rate": 2.5225692852927913e-05, "loss": 1.8744390487670899, "memory(GiB)": 72.85, "step": 77640, "token_acc": 0.5196850393700787, "train_speed(iter/s)": 0.672424 }, { "epoch": 3.326549847907116, "grad_norm": 5.888396739959717, "learning_rate": 2.5219847489864606e-05, "loss": 2.058864784240723, "memory(GiB)": 72.85, "step": 77645, "token_acc": 0.5783475783475783, "train_speed(iter/s)": 0.672432 }, { "epoch": 3.3267640632363653, "grad_norm": 4.988405227661133, "learning_rate": 2.521400257571701e-05, "loss": 1.927703285217285, "memory(GiB)": 72.85, "step": 77650, "token_acc": 0.5767634854771784, "train_speed(iter/s)": 0.672433 }, { "epoch": 3.326978278565614, "grad_norm": 5.665439128875732, "learning_rate": 2.5208158110591006e-05, "loss": 2.0088016510009767, "memory(GiB)": 72.85, "step": 77655, "token_acc": 0.5590277777777778, "train_speed(iter/s)": 0.672434 }, { "epoch": 3.327192493894863, "grad_norm": 6.244566440582275, "learning_rate": 2.5202314094592478e-05, "loss": 2.254979705810547, "memory(GiB)": 72.85, "step": 77660, "token_acc": 0.5225806451612903, "train_speed(iter/s)": 0.672449 }, { "epoch": 3.327406709224112, "grad_norm": 6.360942840576172, "learning_rate": 2.519647052782727e-05, "loss": 2.308005142211914, "memory(GiB)": 72.85, "step": 77665, "token_acc": 0.5302491103202847, "train_speed(iter/s)": 0.672452 }, { "epoch": 3.327620924553361, "grad_norm": 6.238119125366211, "learning_rate": 2.519062741040129e-05, "loss": 2.514456939697266, "memory(GiB)": 72.85, "step": 77670, "token_acc": 0.5161290322580645, "train_speed(iter/s)": 0.672453 }, { "epoch": 3.32783513988261, "grad_norm": 4.444169521331787, "learning_rate": 2.5184784742420342e-05, "loss": 2.2185258865356445, "memory(GiB)": 72.85, "step": 77675, "token_acc": 0.5399449035812672, "train_speed(iter/s)": 0.672453 }, { "epoch": 3.328049355211859, "grad_norm": 4.780026912689209, "learning_rate": 2.5178942523990324e-05, "loss": 2.1786237716674806, "memory(GiB)": 72.85, "step": 77680, "token_acc": 0.5335570469798657, "train_speed(iter/s)": 0.672456 }, { "epoch": 3.328263570541108, "grad_norm": 5.891591548919678, "learning_rate": 2.5173100755217037e-05, "loss": 2.0561519622802735, "memory(GiB)": 72.85, "step": 77685, "token_acc": 0.5245901639344263, "train_speed(iter/s)": 0.672464 }, { "epoch": 3.3284777858703567, "grad_norm": 9.894193649291992, "learning_rate": 2.5167259436206325e-05, "loss": 2.2847084045410155, "memory(GiB)": 72.85, "step": 77690, "token_acc": 0.5129151291512916, "train_speed(iter/s)": 0.672465 }, { "epoch": 3.328692001199606, "grad_norm": 5.071257591247559, "learning_rate": 2.5161418567063983e-05, "loss": 2.1288629531860352, "memory(GiB)": 72.85, "step": 77695, "token_acc": 0.5392491467576792, "train_speed(iter/s)": 0.672452 }, { "epoch": 3.3289062165288548, "grad_norm": 5.949367523193359, "learning_rate": 2.5155578147895862e-05, "loss": 2.2951633453369142, "memory(GiB)": 72.85, "step": 77700, "token_acc": 0.4952681388012618, "train_speed(iter/s)": 0.672446 }, { "epoch": 3.3291204318581036, "grad_norm": 6.691959381103516, "learning_rate": 2.5149738178807745e-05, "loss": 2.3892940521240233, "memory(GiB)": 72.85, "step": 77705, "token_acc": 0.46474358974358976, "train_speed(iter/s)": 0.672444 }, { "epoch": 3.329334647187353, "grad_norm": 7.946000099182129, "learning_rate": 2.5143898659905442e-05, "loss": 2.0721450805664063, "memory(GiB)": 72.85, "step": 77710, "token_acc": 0.5372168284789643, "train_speed(iter/s)": 0.672435 }, { "epoch": 3.3295488625166016, "grad_norm": 11.319766998291016, "learning_rate": 2.5138059591294727e-05, "loss": 2.2878223419189454, "memory(GiB)": 72.85, "step": 77715, "token_acc": 0.49382716049382713, "train_speed(iter/s)": 0.672435 }, { "epoch": 3.3297630778458505, "grad_norm": 4.537421226501465, "learning_rate": 2.513222097308138e-05, "loss": 2.024471473693848, "memory(GiB)": 72.85, "step": 77720, "token_acc": 0.5206896551724138, "train_speed(iter/s)": 0.67244 }, { "epoch": 3.3299772931750997, "grad_norm": 6.1394782066345215, "learning_rate": 2.512638280537117e-05, "loss": 2.376914215087891, "memory(GiB)": 72.85, "step": 77725, "token_acc": 0.5201465201465202, "train_speed(iter/s)": 0.672423 }, { "epoch": 3.3301915085043485, "grad_norm": 4.744594097137451, "learning_rate": 2.5120545088269877e-05, "loss": 2.1968690872192385, "memory(GiB)": 72.85, "step": 77730, "token_acc": 0.5133333333333333, "train_speed(iter/s)": 0.672425 }, { "epoch": 3.3304057238335973, "grad_norm": 6.35701847076416, "learning_rate": 2.5114707821883253e-05, "loss": 2.1517671585083007, "memory(GiB)": 72.85, "step": 77735, "token_acc": 0.5292207792207793, "train_speed(iter/s)": 0.672422 }, { "epoch": 3.3306199391628466, "grad_norm": 6.311168193817139, "learning_rate": 2.5108871006317046e-05, "loss": 2.1487070083618165, "memory(GiB)": 72.85, "step": 77740, "token_acc": 0.5228758169934641, "train_speed(iter/s)": 0.672428 }, { "epoch": 3.3308341544920954, "grad_norm": 5.010785102844238, "learning_rate": 2.510303464167698e-05, "loss": 2.3639652252197267, "memory(GiB)": 72.85, "step": 77745, "token_acc": 0.521594684385382, "train_speed(iter/s)": 0.672434 }, { "epoch": 3.3310483698213442, "grad_norm": 4.743597507476807, "learning_rate": 2.509719872806878e-05, "loss": 2.3198688507080076, "memory(GiB)": 72.85, "step": 77750, "token_acc": 0.5181818181818182, "train_speed(iter/s)": 0.672432 }, { "epoch": 3.3312625851505935, "grad_norm": 7.148629665374756, "learning_rate": 2.5091363265598196e-05, "loss": 2.1407371520996095, "memory(GiB)": 72.85, "step": 77755, "token_acc": 0.5170278637770898, "train_speed(iter/s)": 0.672424 }, { "epoch": 3.3314768004798423, "grad_norm": 5.554344177246094, "learning_rate": 2.5085528254370942e-05, "loss": 1.8708986282348632, "memory(GiB)": 72.85, "step": 77760, "token_acc": 0.5622641509433962, "train_speed(iter/s)": 0.672427 }, { "epoch": 3.331691015809091, "grad_norm": 6.564517021179199, "learning_rate": 2.5079693694492722e-05, "loss": 1.8143310546875, "memory(GiB)": 72.85, "step": 77765, "token_acc": 0.5765124555160143, "train_speed(iter/s)": 0.672432 }, { "epoch": 3.3319052311383404, "grad_norm": 5.266533851623535, "learning_rate": 2.507385958606922e-05, "loss": 1.8150938034057618, "memory(GiB)": 72.85, "step": 77770, "token_acc": 0.5907692307692308, "train_speed(iter/s)": 0.672433 }, { "epoch": 3.332119446467589, "grad_norm": 4.560990810394287, "learning_rate": 2.506802592920614e-05, "loss": 1.8716348648071288, "memory(GiB)": 72.85, "step": 77775, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.672434 }, { "epoch": 3.332333661796838, "grad_norm": 4.289434432983398, "learning_rate": 2.506219272400916e-05, "loss": 2.059602737426758, "memory(GiB)": 72.85, "step": 77780, "token_acc": 0.5808823529411765, "train_speed(iter/s)": 0.672436 }, { "epoch": 3.3325478771260872, "grad_norm": 5.377066135406494, "learning_rate": 2.5056359970583925e-05, "loss": 1.7915035247802735, "memory(GiB)": 72.85, "step": 77785, "token_acc": 0.5735849056603773, "train_speed(iter/s)": 0.672443 }, { "epoch": 3.332762092455336, "grad_norm": 5.159180641174316, "learning_rate": 2.505052766903615e-05, "loss": 1.982142448425293, "memory(GiB)": 72.85, "step": 77790, "token_acc": 0.552901023890785, "train_speed(iter/s)": 0.672441 }, { "epoch": 3.332976307784585, "grad_norm": 6.927950859069824, "learning_rate": 2.5044695819471475e-05, "loss": 2.270427131652832, "memory(GiB)": 72.85, "step": 77795, "token_acc": 0.4981132075471698, "train_speed(iter/s)": 0.672446 }, { "epoch": 3.333190523113834, "grad_norm": 5.524957180023193, "learning_rate": 2.5038864421995545e-05, "loss": 2.072818946838379, "memory(GiB)": 72.85, "step": 77800, "token_acc": 0.5528169014084507, "train_speed(iter/s)": 0.67245 }, { "epoch": 3.333404738443083, "grad_norm": 5.152999401092529, "learning_rate": 2.5033033476713998e-05, "loss": 2.5688133239746094, "memory(GiB)": 72.85, "step": 77805, "token_acc": 0.47719298245614034, "train_speed(iter/s)": 0.672449 }, { "epoch": 3.3336189537723317, "grad_norm": 8.131933212280273, "learning_rate": 2.5027202983732456e-05, "loss": 2.5150787353515627, "memory(GiB)": 72.85, "step": 77810, "token_acc": 0.5121107266435986, "train_speed(iter/s)": 0.672445 }, { "epoch": 3.333833169101581, "grad_norm": 6.424735069274902, "learning_rate": 2.5021372943156575e-05, "loss": 2.118122100830078, "memory(GiB)": 72.85, "step": 77815, "token_acc": 0.5160349854227405, "train_speed(iter/s)": 0.672446 }, { "epoch": 3.33404738443083, "grad_norm": 4.819057941436768, "learning_rate": 2.501554335509195e-05, "loss": 1.8663997650146484, "memory(GiB)": 72.85, "step": 77820, "token_acc": 0.5181159420289855, "train_speed(iter/s)": 0.672451 }, { "epoch": 3.3342615997600786, "grad_norm": 6.089187145233154, "learning_rate": 2.5009714219644175e-05, "loss": 2.302365303039551, "memory(GiB)": 72.85, "step": 77825, "token_acc": 0.5405405405405406, "train_speed(iter/s)": 0.672449 }, { "epoch": 3.334475815089328, "grad_norm": 5.38602876663208, "learning_rate": 2.5003885536918897e-05, "loss": 2.0724502563476563, "memory(GiB)": 72.85, "step": 77830, "token_acc": 0.5127388535031847, "train_speed(iter/s)": 0.672453 }, { "epoch": 3.3346900304185767, "grad_norm": 6.0762481689453125, "learning_rate": 2.4998057307021677e-05, "loss": 2.2517433166503906, "memory(GiB)": 72.85, "step": 77835, "token_acc": 0.5034013605442177, "train_speed(iter/s)": 0.672459 }, { "epoch": 3.3349042457478255, "grad_norm": 4.764061450958252, "learning_rate": 2.4992229530058087e-05, "loss": 1.8886686325073243, "memory(GiB)": 72.85, "step": 77840, "token_acc": 0.6046511627906976, "train_speed(iter/s)": 0.672465 }, { "epoch": 3.3351184610770748, "grad_norm": 5.9394917488098145, "learning_rate": 2.498640220613373e-05, "loss": 2.4434226989746093, "memory(GiB)": 72.85, "step": 77845, "token_acc": 0.5135135135135135, "train_speed(iter/s)": 0.672469 }, { "epoch": 3.3353326764063236, "grad_norm": 4.740837097167969, "learning_rate": 2.498057533535417e-05, "loss": 2.070034980773926, "memory(GiB)": 72.85, "step": 77850, "token_acc": 0.525, "train_speed(iter/s)": 0.672475 }, { "epoch": 3.3355468917355724, "grad_norm": 5.908228397369385, "learning_rate": 2.4974748917824954e-05, "loss": 2.3838119506835938, "memory(GiB)": 72.85, "step": 77855, "token_acc": 0.4807692307692308, "train_speed(iter/s)": 0.672476 }, { "epoch": 3.3357611070648217, "grad_norm": 5.851901054382324, "learning_rate": 2.4968922953651635e-05, "loss": 2.17147274017334, "memory(GiB)": 72.85, "step": 77860, "token_acc": 0.49038461538461536, "train_speed(iter/s)": 0.672478 }, { "epoch": 3.3359753223940705, "grad_norm": 5.506640911102295, "learning_rate": 2.496309744293976e-05, "loss": 2.0831260681152344, "memory(GiB)": 72.85, "step": 77865, "token_acc": 0.5246478873239436, "train_speed(iter/s)": 0.672474 }, { "epoch": 3.3361895377233193, "grad_norm": 5.690036296844482, "learning_rate": 2.495727238579484e-05, "loss": 2.2846214294433596, "memory(GiB)": 72.85, "step": 77870, "token_acc": 0.5157894736842106, "train_speed(iter/s)": 0.67247 }, { "epoch": 3.3364037530525685, "grad_norm": 5.46895694732666, "learning_rate": 2.495144778232244e-05, "loss": 1.791891098022461, "memory(GiB)": 72.85, "step": 77875, "token_acc": 0.6303501945525292, "train_speed(iter/s)": 0.672475 }, { "epoch": 3.3366179683818173, "grad_norm": 7.34682035446167, "learning_rate": 2.4945623632628068e-05, "loss": 2.297465515136719, "memory(GiB)": 72.85, "step": 77880, "token_acc": 0.49032258064516127, "train_speed(iter/s)": 0.672473 }, { "epoch": 3.336832183711066, "grad_norm": 7.600522518157959, "learning_rate": 2.4939799936817222e-05, "loss": 2.007778549194336, "memory(GiB)": 72.85, "step": 77885, "token_acc": 0.5526315789473685, "train_speed(iter/s)": 0.672475 }, { "epoch": 3.3370463990403154, "grad_norm": 5.2380595207214355, "learning_rate": 2.493397669499541e-05, "loss": 2.1932748794555663, "memory(GiB)": 72.85, "step": 77890, "token_acc": 0.5239616613418531, "train_speed(iter/s)": 0.672478 }, { "epoch": 3.3372606143695642, "grad_norm": 5.420348167419434, "learning_rate": 2.4928153907268104e-05, "loss": 2.097338104248047, "memory(GiB)": 72.85, "step": 77895, "token_acc": 0.5192878338278932, "train_speed(iter/s)": 0.672478 }, { "epoch": 3.337474829698813, "grad_norm": 5.646077632904053, "learning_rate": 2.4922331573740808e-05, "loss": 1.9524646759033204, "memory(GiB)": 72.85, "step": 77900, "token_acc": 0.5300353356890459, "train_speed(iter/s)": 0.672478 }, { "epoch": 3.3376890450280623, "grad_norm": 5.869632720947266, "learning_rate": 2.491650969451902e-05, "loss": 2.3450847625732423, "memory(GiB)": 72.85, "step": 77905, "token_acc": 0.5052631578947369, "train_speed(iter/s)": 0.672476 }, { "epoch": 3.337903260357311, "grad_norm": 3.6756718158721924, "learning_rate": 2.4910688269708195e-05, "loss": 1.9346513748168945, "memory(GiB)": 72.85, "step": 77910, "token_acc": 0.5680933852140078, "train_speed(iter/s)": 0.672469 }, { "epoch": 3.33811747568656, "grad_norm": 5.453929901123047, "learning_rate": 2.4904867299413783e-05, "loss": 2.069680023193359, "memory(GiB)": 72.85, "step": 77915, "token_acc": 0.5464684014869888, "train_speed(iter/s)": 0.672464 }, { "epoch": 3.338331691015809, "grad_norm": 6.256455421447754, "learning_rate": 2.4899046783741235e-05, "loss": 1.90466251373291, "memory(GiB)": 72.85, "step": 77920, "token_acc": 0.5657894736842105, "train_speed(iter/s)": 0.67247 }, { "epoch": 3.338545906345058, "grad_norm": 5.66930627822876, "learning_rate": 2.4893226722796008e-05, "loss": 2.1865379333496096, "memory(GiB)": 72.85, "step": 77925, "token_acc": 0.521311475409836, "train_speed(iter/s)": 0.672468 }, { "epoch": 3.338760121674307, "grad_norm": 5.960740566253662, "learning_rate": 2.4887407116683516e-05, "loss": 2.4598834991455076, "memory(GiB)": 72.85, "step": 77930, "token_acc": 0.48056537102473496, "train_speed(iter/s)": 0.672486 }, { "epoch": 3.338974337003556, "grad_norm": 9.47280502319336, "learning_rate": 2.488158796550921e-05, "loss": 2.375080108642578, "memory(GiB)": 72.85, "step": 77935, "token_acc": 0.5029940119760479, "train_speed(iter/s)": 0.672485 }, { "epoch": 3.339188552332805, "grad_norm": 4.71888542175293, "learning_rate": 2.487576926937851e-05, "loss": 2.0298488616943358, "memory(GiB)": 72.85, "step": 77940, "token_acc": 0.5283582089552239, "train_speed(iter/s)": 0.672487 }, { "epoch": 3.3394027676620537, "grad_norm": 5.951352119445801, "learning_rate": 2.4869951028396813e-05, "loss": 2.2283206939697267, "memory(GiB)": 72.85, "step": 77945, "token_acc": 0.5112781954887218, "train_speed(iter/s)": 0.672479 }, { "epoch": 3.339616982991303, "grad_norm": 4.941180229187012, "learning_rate": 2.486413324266953e-05, "loss": 2.358077621459961, "memory(GiB)": 72.85, "step": 77950, "token_acc": 0.4889589905362776, "train_speed(iter/s)": 0.672469 }, { "epoch": 3.3398311983205518, "grad_norm": 5.137448787689209, "learning_rate": 2.4858315912302033e-05, "loss": 2.2746252059936523, "memory(GiB)": 72.85, "step": 77955, "token_acc": 0.5538461538461539, "train_speed(iter/s)": 0.672477 }, { "epoch": 3.3400454136498006, "grad_norm": 5.7231926918029785, "learning_rate": 2.4852499037399747e-05, "loss": 2.2242902755737304, "memory(GiB)": 72.85, "step": 77960, "token_acc": 0.5291828793774319, "train_speed(iter/s)": 0.672481 }, { "epoch": 3.34025962897905, "grad_norm": 6.9696245193481445, "learning_rate": 2.484668261806803e-05, "loss": 2.223067283630371, "memory(GiB)": 72.85, "step": 77965, "token_acc": 0.5323076923076923, "train_speed(iter/s)": 0.672479 }, { "epoch": 3.3404738443082986, "grad_norm": 6.9839277267456055, "learning_rate": 2.4840866654412232e-05, "loss": 2.0402204513549806, "memory(GiB)": 72.85, "step": 77970, "token_acc": 0.5673469387755102, "train_speed(iter/s)": 0.672476 }, { "epoch": 3.3406880596375474, "grad_norm": 5.364129543304443, "learning_rate": 2.483505114653776e-05, "loss": 2.2201601028442384, "memory(GiB)": 72.85, "step": 77975, "token_acc": 0.5, "train_speed(iter/s)": 0.672461 }, { "epoch": 3.3409022749667967, "grad_norm": 5.9016923904418945, "learning_rate": 2.482923609454994e-05, "loss": 2.0141382217407227, "memory(GiB)": 72.85, "step": 77980, "token_acc": 0.56875, "train_speed(iter/s)": 0.672464 }, { "epoch": 3.3411164902960455, "grad_norm": 7.2292351722717285, "learning_rate": 2.48234214985541e-05, "loss": 1.9303117752075196, "memory(GiB)": 72.85, "step": 77985, "token_acc": 0.547244094488189, "train_speed(iter/s)": 0.672473 }, { "epoch": 3.3413307056252943, "grad_norm": 5.175879955291748, "learning_rate": 2.4817607358655614e-05, "loss": 1.85015869140625, "memory(GiB)": 72.85, "step": 77990, "token_acc": 0.6007462686567164, "train_speed(iter/s)": 0.672469 }, { "epoch": 3.3415449209545436, "grad_norm": 5.077057361602783, "learning_rate": 2.48117936749598e-05, "loss": 2.142904853820801, "memory(GiB)": 72.85, "step": 77995, "token_acc": 0.5698924731182796, "train_speed(iter/s)": 0.672465 }, { "epoch": 3.3417591362837924, "grad_norm": 6.855337619781494, "learning_rate": 2.480598044757197e-05, "loss": 2.000777244567871, "memory(GiB)": 72.85, "step": 78000, "token_acc": 0.5602605863192183, "train_speed(iter/s)": 0.672475 }, { "epoch": 3.3417591362837924, "eval_loss": 1.9903159141540527, "eval_runtime": 15.8744, "eval_samples_per_second": 6.299, "eval_steps_per_second": 6.299, "eval_token_acc": 0.5027855153203342, "step": 78000 }, { "epoch": 3.341973351613041, "grad_norm": 7.379778861999512, "learning_rate": 2.4800167676597436e-05, "loss": 1.7948945999145507, "memory(GiB)": 72.85, "step": 78005, "token_acc": 0.5237623762376238, "train_speed(iter/s)": 0.672362 }, { "epoch": 3.3421875669422905, "grad_norm": 5.420772075653076, "learning_rate": 2.4794355362141508e-05, "loss": 2.252170181274414, "memory(GiB)": 72.85, "step": 78010, "token_acc": 0.4935064935064935, "train_speed(iter/s)": 0.672365 }, { "epoch": 3.3424017822715393, "grad_norm": 5.7884674072265625, "learning_rate": 2.4788543504309454e-05, "loss": 2.0889373779296876, "memory(GiB)": 72.85, "step": 78015, "token_acc": 0.5642857142857143, "train_speed(iter/s)": 0.672361 }, { "epoch": 3.342615997600788, "grad_norm": 5.865636348724365, "learning_rate": 2.4782732103206607e-05, "loss": 2.1787071228027344, "memory(GiB)": 72.85, "step": 78020, "token_acc": 0.5289855072463768, "train_speed(iter/s)": 0.67236 }, { "epoch": 3.3428302129300373, "grad_norm": 14.306157112121582, "learning_rate": 2.4776921158938222e-05, "loss": 2.1048072814941405, "memory(GiB)": 72.85, "step": 78025, "token_acc": 0.560126582278481, "train_speed(iter/s)": 0.672362 }, { "epoch": 3.343044428259286, "grad_norm": 5.237390995025635, "learning_rate": 2.4771110671609573e-05, "loss": 2.040420150756836, "memory(GiB)": 72.85, "step": 78030, "token_acc": 0.5571955719557196, "train_speed(iter/s)": 0.672358 }, { "epoch": 3.343258643588535, "grad_norm": 6.730449676513672, "learning_rate": 2.4765300641325915e-05, "loss": 2.261764717102051, "memory(GiB)": 72.85, "step": 78035, "token_acc": 0.4789156626506024, "train_speed(iter/s)": 0.672361 }, { "epoch": 3.3434728589177842, "grad_norm": 5.074272632598877, "learning_rate": 2.4759491068192496e-05, "loss": 1.896172332763672, "memory(GiB)": 72.85, "step": 78040, "token_acc": 0.5679012345679012, "train_speed(iter/s)": 0.672368 }, { "epoch": 3.343687074247033, "grad_norm": 7.249850749969482, "learning_rate": 2.4753681952314573e-05, "loss": 2.1705528259277345, "memory(GiB)": 72.85, "step": 78045, "token_acc": 0.5097402597402597, "train_speed(iter/s)": 0.672369 }, { "epoch": 3.343901289576282, "grad_norm": 5.192586421966553, "learning_rate": 2.4747873293797396e-05, "loss": 2.014358711242676, "memory(GiB)": 72.85, "step": 78050, "token_acc": 0.5296167247386759, "train_speed(iter/s)": 0.672366 }, { "epoch": 3.344115504905531, "grad_norm": 6.490363597869873, "learning_rate": 2.474206509274619e-05, "loss": 2.3771238327026367, "memory(GiB)": 72.85, "step": 78055, "token_acc": 0.5169811320754717, "train_speed(iter/s)": 0.672376 }, { "epoch": 3.34432972023478, "grad_norm": 5.733760833740234, "learning_rate": 2.4736257349266167e-05, "loss": 1.6618124008178712, "memory(GiB)": 72.85, "step": 78060, "token_acc": 0.6310344827586207, "train_speed(iter/s)": 0.672382 }, { "epoch": 3.3445439355640287, "grad_norm": 4.48844575881958, "learning_rate": 2.473045006346254e-05, "loss": 2.312076187133789, "memory(GiB)": 72.85, "step": 78065, "token_acc": 0.4913294797687861, "train_speed(iter/s)": 0.67239 }, { "epoch": 3.344758150893278, "grad_norm": 5.845757961273193, "learning_rate": 2.472464323544052e-05, "loss": 2.414504623413086, "memory(GiB)": 72.85, "step": 78070, "token_acc": 0.4794520547945205, "train_speed(iter/s)": 0.672397 }, { "epoch": 3.344972366222527, "grad_norm": 4.865434646606445, "learning_rate": 2.4718836865305274e-05, "loss": 2.2823184967041015, "memory(GiB)": 72.85, "step": 78075, "token_acc": 0.49823321554770317, "train_speed(iter/s)": 0.672407 }, { "epoch": 3.3451865815517756, "grad_norm": 7.142309665679932, "learning_rate": 2.4713030953162036e-05, "loss": 2.1846866607666016, "memory(GiB)": 72.85, "step": 78080, "token_acc": 0.5661538461538461, "train_speed(iter/s)": 0.672416 }, { "epoch": 3.345400796881025, "grad_norm": 5.430541038513184, "learning_rate": 2.470722549911596e-05, "loss": 1.731038475036621, "memory(GiB)": 72.85, "step": 78085, "token_acc": 0.597457627118644, "train_speed(iter/s)": 0.672421 }, { "epoch": 3.3456150122102737, "grad_norm": 3.8567943572998047, "learning_rate": 2.470142050327222e-05, "loss": 2.190736770629883, "memory(GiB)": 72.85, "step": 78090, "token_acc": 0.5033557046979866, "train_speed(iter/s)": 0.672421 }, { "epoch": 3.3458292275395225, "grad_norm": 4.56056022644043, "learning_rate": 2.4695615965735984e-05, "loss": 2.0058530807495116, "memory(GiB)": 72.85, "step": 78095, "token_acc": 0.5578231292517006, "train_speed(iter/s)": 0.672424 }, { "epoch": 3.3460434428687718, "grad_norm": 5.001718997955322, "learning_rate": 2.468981188661238e-05, "loss": 2.1679780960083006, "memory(GiB)": 72.85, "step": 78100, "token_acc": 0.5275862068965518, "train_speed(iter/s)": 0.672424 }, { "epoch": 3.3462576581980206, "grad_norm": 5.219482898712158, "learning_rate": 2.46840082660066e-05, "loss": 1.9156764984130858, "memory(GiB)": 72.85, "step": 78105, "token_acc": 0.573076923076923, "train_speed(iter/s)": 0.672423 }, { "epoch": 3.3464718735272694, "grad_norm": 6.3270158767700195, "learning_rate": 2.467820510402375e-05, "loss": 2.526500701904297, "memory(GiB)": 72.85, "step": 78110, "token_acc": 0.4657039711191336, "train_speed(iter/s)": 0.672426 }, { "epoch": 3.3466860888565186, "grad_norm": 4.86251163482666, "learning_rate": 2.4672402400768973e-05, "loss": 2.369332122802734, "memory(GiB)": 72.85, "step": 78115, "token_acc": 0.5129032258064516, "train_speed(iter/s)": 0.672424 }, { "epoch": 3.3469003041857674, "grad_norm": 4.976129055023193, "learning_rate": 2.466660015634737e-05, "loss": 2.2470516204833983, "memory(GiB)": 72.85, "step": 78120, "token_acc": 0.506896551724138, "train_speed(iter/s)": 0.672425 }, { "epoch": 3.3471145195150163, "grad_norm": 4.052567005157471, "learning_rate": 2.4660798370864086e-05, "loss": 2.313091278076172, "memory(GiB)": 72.85, "step": 78125, "token_acc": 0.5269121813031161, "train_speed(iter/s)": 0.672423 }, { "epoch": 3.3473287348442655, "grad_norm": 4.707873821258545, "learning_rate": 2.465499704442419e-05, "loss": 2.2105297088623046, "memory(GiB)": 72.85, "step": 78130, "token_acc": 0.5473372781065089, "train_speed(iter/s)": 0.67242 }, { "epoch": 3.3475429501735143, "grad_norm": 5.4046311378479, "learning_rate": 2.4649196177132818e-05, "loss": 1.932316207885742, "memory(GiB)": 72.85, "step": 78135, "token_acc": 0.5793650793650794, "train_speed(iter/s)": 0.672433 }, { "epoch": 3.347757165502763, "grad_norm": 6.785266876220703, "learning_rate": 2.4643395769095035e-05, "loss": 2.4140932083129885, "memory(GiB)": 72.85, "step": 78140, "token_acc": 0.5298013245033113, "train_speed(iter/s)": 0.672428 }, { "epoch": 3.3479713808320124, "grad_norm": 5.292311668395996, "learning_rate": 2.4637595820415925e-05, "loss": 1.8226140975952148, "memory(GiB)": 72.85, "step": 78145, "token_acc": 0.562962962962963, "train_speed(iter/s)": 0.672425 }, { "epoch": 3.348185596161261, "grad_norm": 5.844507694244385, "learning_rate": 2.4631796331200564e-05, "loss": 2.163597297668457, "memory(GiB)": 72.85, "step": 78150, "token_acc": 0.5324232081911263, "train_speed(iter/s)": 0.672429 }, { "epoch": 3.34839981149051, "grad_norm": 6.2908711433410645, "learning_rate": 2.4625997301554005e-05, "loss": 2.0513355255126955, "memory(GiB)": 72.85, "step": 78155, "token_acc": 0.5277777777777778, "train_speed(iter/s)": 0.672432 }, { "epoch": 3.3486140268197593, "grad_norm": 5.323896408081055, "learning_rate": 2.462019873158129e-05, "loss": 2.198360061645508, "memory(GiB)": 72.85, "step": 78160, "token_acc": 0.4794952681388013, "train_speed(iter/s)": 0.672428 }, { "epoch": 3.348828242149008, "grad_norm": 6.218833923339844, "learning_rate": 2.46144006213875e-05, "loss": 2.0375110626220705, "memory(GiB)": 72.85, "step": 78165, "token_acc": 0.5516014234875445, "train_speed(iter/s)": 0.672419 }, { "epoch": 3.349042457478257, "grad_norm": 5.698317527770996, "learning_rate": 2.460860297107766e-05, "loss": 1.8386774063110352, "memory(GiB)": 72.85, "step": 78170, "token_acc": 0.5477941176470589, "train_speed(iter/s)": 0.672409 }, { "epoch": 3.349256672807506, "grad_norm": 5.325355529785156, "learning_rate": 2.4602805780756795e-05, "loss": 2.0230751037597656, "memory(GiB)": 72.85, "step": 78175, "token_acc": 0.5345454545454545, "train_speed(iter/s)": 0.672402 }, { "epoch": 3.349470888136755, "grad_norm": 6.1211137771606445, "learning_rate": 2.459700905052993e-05, "loss": 2.160652923583984, "memory(GiB)": 72.85, "step": 78180, "token_acc": 0.5296167247386759, "train_speed(iter/s)": 0.672404 }, { "epoch": 3.349685103466004, "grad_norm": 5.326605319976807, "learning_rate": 2.459121278050205e-05, "loss": 2.2006868362426757, "memory(GiB)": 72.85, "step": 78185, "token_acc": 0.5292307692307693, "train_speed(iter/s)": 0.672414 }, { "epoch": 3.349899318795253, "grad_norm": 8.201047897338867, "learning_rate": 2.4585416970778207e-05, "loss": 1.9618993759155274, "memory(GiB)": 72.85, "step": 78190, "token_acc": 0.5524193548387096, "train_speed(iter/s)": 0.672412 }, { "epoch": 3.350113534124502, "grad_norm": 11.592696189880371, "learning_rate": 2.4579621621463362e-05, "loss": 2.195836067199707, "memory(GiB)": 72.85, "step": 78195, "token_acc": 0.527027027027027, "train_speed(iter/s)": 0.672409 }, { "epoch": 3.3503277494537507, "grad_norm": 6.351308345794678, "learning_rate": 2.4573826732662537e-05, "loss": 2.344388008117676, "memory(GiB)": 72.85, "step": 78200, "token_acc": 0.5230769230769231, "train_speed(iter/s)": 0.672411 }, { "epoch": 3.350541964783, "grad_norm": 5.666059494018555, "learning_rate": 2.4568032304480687e-05, "loss": 2.535609817504883, "memory(GiB)": 72.85, "step": 78205, "token_acc": 0.47761194029850745, "train_speed(iter/s)": 0.672403 }, { "epoch": 3.3507561801122487, "grad_norm": 5.040704727172852, "learning_rate": 2.4562238337022793e-05, "loss": 2.1427608489990235, "memory(GiB)": 72.85, "step": 78210, "token_acc": 0.573076923076923, "train_speed(iter/s)": 0.672399 }, { "epoch": 3.3509703954414976, "grad_norm": 5.590729713439941, "learning_rate": 2.455644483039381e-05, "loss": 2.249186706542969, "memory(GiB)": 72.85, "step": 78215, "token_acc": 0.5353535353535354, "train_speed(iter/s)": 0.672398 }, { "epoch": 3.351184610770747, "grad_norm": 7.371922969818115, "learning_rate": 2.455065178469868e-05, "loss": 1.9632017135620117, "memory(GiB)": 72.85, "step": 78220, "token_acc": 0.5856164383561644, "train_speed(iter/s)": 0.672404 }, { "epoch": 3.3513988260999956, "grad_norm": 6.2124128341674805, "learning_rate": 2.4544859200042386e-05, "loss": 2.21644287109375, "memory(GiB)": 72.85, "step": 78225, "token_acc": 0.4788732394366197, "train_speed(iter/s)": 0.672409 }, { "epoch": 3.3516130414292444, "grad_norm": 6.262801647186279, "learning_rate": 2.4539067076529847e-05, "loss": 2.261025238037109, "memory(GiB)": 72.85, "step": 78230, "token_acc": 0.49002849002849, "train_speed(iter/s)": 0.672412 }, { "epoch": 3.3518272567584937, "grad_norm": 7.40443754196167, "learning_rate": 2.4533275414265992e-05, "loss": 2.0159442901611326, "memory(GiB)": 72.85, "step": 78235, "token_acc": 0.5806451612903226, "train_speed(iter/s)": 0.672411 }, { "epoch": 3.3520414720877425, "grad_norm": 7.602849006652832, "learning_rate": 2.452748421335574e-05, "loss": 2.146440124511719, "memory(GiB)": 72.85, "step": 78240, "token_acc": 0.531055900621118, "train_speed(iter/s)": 0.672411 }, { "epoch": 3.3522556874169913, "grad_norm": 4.923918724060059, "learning_rate": 2.452169347390399e-05, "loss": 2.1371782302856444, "memory(GiB)": 72.85, "step": 78245, "token_acc": 0.5274261603375527, "train_speed(iter/s)": 0.672418 }, { "epoch": 3.3524699027462406, "grad_norm": 6.32198429107666, "learning_rate": 2.4515903196015684e-05, "loss": 1.9869380950927735, "memory(GiB)": 72.85, "step": 78250, "token_acc": 0.5843137254901961, "train_speed(iter/s)": 0.67242 }, { "epoch": 3.3526841180754894, "grad_norm": 5.285317897796631, "learning_rate": 2.4510113379795696e-05, "loss": 2.2164608001708985, "memory(GiB)": 72.85, "step": 78255, "token_acc": 0.546031746031746, "train_speed(iter/s)": 0.672419 }, { "epoch": 3.352898333404738, "grad_norm": 4.612018585205078, "learning_rate": 2.4504324025348912e-05, "loss": 2.1726078033447265, "memory(GiB)": 72.85, "step": 78260, "token_acc": 0.5095785440613027, "train_speed(iter/s)": 0.672419 }, { "epoch": 3.3531125487339875, "grad_norm": 4.267486095428467, "learning_rate": 2.44985351327802e-05, "loss": 2.3053049087524413, "memory(GiB)": 72.85, "step": 78265, "token_acc": 0.5534591194968553, "train_speed(iter/s)": 0.672416 }, { "epoch": 3.3533267640632363, "grad_norm": 5.944040298461914, "learning_rate": 2.4492746702194463e-05, "loss": 2.0227848052978517, "memory(GiB)": 72.85, "step": 78270, "token_acc": 0.5371900826446281, "train_speed(iter/s)": 0.672422 }, { "epoch": 3.353540979392485, "grad_norm": 5.345816135406494, "learning_rate": 2.448695873369653e-05, "loss": 1.9251968383789062, "memory(GiB)": 72.85, "step": 78275, "token_acc": 0.5548172757475083, "train_speed(iter/s)": 0.67243 }, { "epoch": 3.3537551947217343, "grad_norm": 6.716392993927002, "learning_rate": 2.4481171227391293e-05, "loss": 1.8280256271362305, "memory(GiB)": 72.85, "step": 78280, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.672422 }, { "epoch": 3.353969410050983, "grad_norm": 6.545982837677002, "learning_rate": 2.4475384183383577e-05, "loss": 1.888587188720703, "memory(GiB)": 72.85, "step": 78285, "token_acc": 0.5990990990990991, "train_speed(iter/s)": 0.672427 }, { "epoch": 3.3541836253802324, "grad_norm": 5.651149749755859, "learning_rate": 2.4469597601778222e-05, "loss": 1.8090578079223634, "memory(GiB)": 72.85, "step": 78290, "token_acc": 0.5641025641025641, "train_speed(iter/s)": 0.672437 }, { "epoch": 3.354397840709481, "grad_norm": 5.665034294128418, "learning_rate": 2.446381148268005e-05, "loss": 2.0406005859375, "memory(GiB)": 72.85, "step": 78295, "token_acc": 0.56657223796034, "train_speed(iter/s)": 0.672434 }, { "epoch": 3.35461205603873, "grad_norm": 5.493152141571045, "learning_rate": 2.445802582619389e-05, "loss": 2.319660949707031, "memory(GiB)": 72.85, "step": 78300, "token_acc": 0.5128205128205128, "train_speed(iter/s)": 0.672422 }, { "epoch": 3.3548262713679793, "grad_norm": 8.255622863769531, "learning_rate": 2.4452240632424538e-05, "loss": 2.068764877319336, "memory(GiB)": 72.85, "step": 78305, "token_acc": 0.5521472392638037, "train_speed(iter/s)": 0.672425 }, { "epoch": 3.355040486697228, "grad_norm": 7.146310329437256, "learning_rate": 2.4446455901476828e-05, "loss": 2.0753925323486326, "memory(GiB)": 72.85, "step": 78310, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.672414 }, { "epoch": 3.355254702026477, "grad_norm": 5.411673069000244, "learning_rate": 2.4440671633455543e-05, "loss": 2.372344398498535, "memory(GiB)": 72.85, "step": 78315, "token_acc": 0.4735099337748344, "train_speed(iter/s)": 0.672424 }, { "epoch": 3.355468917355726, "grad_norm": 7.353855609893799, "learning_rate": 2.4434887828465463e-05, "loss": 2.2843511581420897, "memory(GiB)": 72.85, "step": 78320, "token_acc": 0.4796747967479675, "train_speed(iter/s)": 0.672424 }, { "epoch": 3.355683132684975, "grad_norm": 4.7366132736206055, "learning_rate": 2.4429104486611376e-05, "loss": 2.1738943099975585, "memory(GiB)": 72.85, "step": 78325, "token_acc": 0.5705128205128205, "train_speed(iter/s)": 0.67242 }, { "epoch": 3.355897348014224, "grad_norm": 7.534574508666992, "learning_rate": 2.4423321607998028e-05, "loss": 2.080642318725586, "memory(GiB)": 72.85, "step": 78330, "token_acc": 0.5120967741935484, "train_speed(iter/s)": 0.672414 }, { "epoch": 3.356111563343473, "grad_norm": 7.047098159790039, "learning_rate": 2.4417539192730226e-05, "loss": 2.0130743026733398, "memory(GiB)": 72.85, "step": 78335, "token_acc": 0.5769230769230769, "train_speed(iter/s)": 0.672422 }, { "epoch": 3.356325778672722, "grad_norm": 4.92242431640625, "learning_rate": 2.4411757240912675e-05, "loss": 2.296955680847168, "memory(GiB)": 72.85, "step": 78340, "token_acc": 0.512987012987013, "train_speed(iter/s)": 0.672425 }, { "epoch": 3.3565399940019707, "grad_norm": 5.814269542694092, "learning_rate": 2.4405975752650168e-05, "loss": 2.067344856262207, "memory(GiB)": 72.85, "step": 78345, "token_acc": 0.5457413249211357, "train_speed(iter/s)": 0.672421 }, { "epoch": 3.35675420933122, "grad_norm": 5.40214204788208, "learning_rate": 2.4400194728047414e-05, "loss": 2.2188648223876952, "memory(GiB)": 72.85, "step": 78350, "token_acc": 0.5017667844522968, "train_speed(iter/s)": 0.672421 }, { "epoch": 3.3569684246604687, "grad_norm": 5.396111488342285, "learning_rate": 2.4394414167209152e-05, "loss": 2.523277473449707, "memory(GiB)": 72.85, "step": 78355, "token_acc": 0.4342105263157895, "train_speed(iter/s)": 0.672421 }, { "epoch": 3.3571826399897176, "grad_norm": 4.239984035491943, "learning_rate": 2.4388634070240097e-05, "loss": 2.229335975646973, "memory(GiB)": 72.85, "step": 78360, "token_acc": 0.518918918918919, "train_speed(iter/s)": 0.672421 }, { "epoch": 3.357396855318967, "grad_norm": 6.044880390167236, "learning_rate": 2.438285443724494e-05, "loss": 2.1185514450073244, "memory(GiB)": 72.85, "step": 78365, "token_acc": 0.5342960288808665, "train_speed(iter/s)": 0.672408 }, { "epoch": 3.3576110706482156, "grad_norm": 5.238282680511475, "learning_rate": 2.4377075268328426e-05, "loss": 1.9580888748168945, "memory(GiB)": 72.85, "step": 78370, "token_acc": 0.5932203389830508, "train_speed(iter/s)": 0.672412 }, { "epoch": 3.3578252859774644, "grad_norm": 6.716465473175049, "learning_rate": 2.437129656359523e-05, "loss": 2.1196006774902343, "memory(GiB)": 72.85, "step": 78375, "token_acc": 0.4940828402366864, "train_speed(iter/s)": 0.672406 }, { "epoch": 3.3580395013067137, "grad_norm": 7.9874396324157715, "learning_rate": 2.4365518323150037e-05, "loss": 2.069219779968262, "memory(GiB)": 72.85, "step": 78380, "token_acc": 0.5363984674329502, "train_speed(iter/s)": 0.672407 }, { "epoch": 3.3582537166359625, "grad_norm": 6.299202919006348, "learning_rate": 2.4359740547097526e-05, "loss": 2.224335289001465, "memory(GiB)": 72.85, "step": 78385, "token_acc": 0.5288461538461539, "train_speed(iter/s)": 0.67241 }, { "epoch": 3.3584679319652113, "grad_norm": 5.210597515106201, "learning_rate": 2.435396323554235e-05, "loss": 2.0564081192016603, "memory(GiB)": 72.85, "step": 78390, "token_acc": 0.5131964809384164, "train_speed(iter/s)": 0.672413 }, { "epoch": 3.3586821472944606, "grad_norm": 8.070504188537598, "learning_rate": 2.4348186388589206e-05, "loss": 2.068504524230957, "memory(GiB)": 72.85, "step": 78395, "token_acc": 0.5319148936170213, "train_speed(iter/s)": 0.672409 }, { "epoch": 3.3588963626237094, "grad_norm": 7.396309852600098, "learning_rate": 2.4342410006342732e-05, "loss": 2.1671051025390624, "memory(GiB)": 72.85, "step": 78400, "token_acc": 0.5103244837758112, "train_speed(iter/s)": 0.67241 }, { "epoch": 3.359110577952958, "grad_norm": 5.517651557922363, "learning_rate": 2.4336634088907566e-05, "loss": 2.120876693725586, "memory(GiB)": 72.85, "step": 78405, "token_acc": 0.5349544072948328, "train_speed(iter/s)": 0.672412 }, { "epoch": 3.3593247932822075, "grad_norm": 7.233891487121582, "learning_rate": 2.4330858636388348e-05, "loss": 1.9071144104003905, "memory(GiB)": 72.85, "step": 78410, "token_acc": 0.5483870967741935, "train_speed(iter/s)": 0.672417 }, { "epoch": 3.3595390086114563, "grad_norm": 5.780675411224365, "learning_rate": 2.432508364888969e-05, "loss": 2.0784509658813475, "memory(GiB)": 72.85, "step": 78415, "token_acc": 0.568561872909699, "train_speed(iter/s)": 0.67243 }, { "epoch": 3.359753223940705, "grad_norm": 5.4921135902404785, "learning_rate": 2.431930912651622e-05, "loss": 1.8736156463623046, "memory(GiB)": 72.85, "step": 78420, "token_acc": 0.5267857142857143, "train_speed(iter/s)": 0.672436 }, { "epoch": 3.3599674392699543, "grad_norm": 5.743514060974121, "learning_rate": 2.4313535069372584e-05, "loss": 1.9359098434448243, "memory(GiB)": 72.85, "step": 78425, "token_acc": 0.5403508771929825, "train_speed(iter/s)": 0.672439 }, { "epoch": 3.360181654599203, "grad_norm": 4.234891414642334, "learning_rate": 2.430776147756335e-05, "loss": 1.9313114166259766, "memory(GiB)": 72.85, "step": 78430, "token_acc": 0.6079295154185022, "train_speed(iter/s)": 0.672432 }, { "epoch": 3.360395869928452, "grad_norm": 5.423024654388428, "learning_rate": 2.4301988351193117e-05, "loss": 2.1717191696166993, "memory(GiB)": 72.85, "step": 78435, "token_acc": 0.5094339622641509, "train_speed(iter/s)": 0.672437 }, { "epoch": 3.360610085257701, "grad_norm": 6.15116024017334, "learning_rate": 2.4296215690366476e-05, "loss": 2.3687244415283204, "memory(GiB)": 72.85, "step": 78440, "token_acc": 0.5047923322683706, "train_speed(iter/s)": 0.672437 }, { "epoch": 3.36082430058695, "grad_norm": 5.883719444274902, "learning_rate": 2.4290443495188e-05, "loss": 2.1735862731933593, "memory(GiB)": 72.85, "step": 78445, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.672433 }, { "epoch": 3.361038515916199, "grad_norm": 6.550692558288574, "learning_rate": 2.4284671765762235e-05, "loss": 1.9158096313476562, "memory(GiB)": 72.85, "step": 78450, "token_acc": 0.5580645161290323, "train_speed(iter/s)": 0.672427 }, { "epoch": 3.361252731245448, "grad_norm": 5.841611862182617, "learning_rate": 2.427890050219378e-05, "loss": 1.9635515213012695, "memory(GiB)": 72.85, "step": 78455, "token_acc": 0.5608974358974359, "train_speed(iter/s)": 0.672434 }, { "epoch": 3.361466946574697, "grad_norm": 5.382997989654541, "learning_rate": 2.427312970458718e-05, "loss": 2.0902378082275392, "memory(GiB)": 72.85, "step": 78460, "token_acc": 0.5426621160409556, "train_speed(iter/s)": 0.672436 }, { "epoch": 3.3616811619039457, "grad_norm": 6.385404109954834, "learning_rate": 2.426735937304696e-05, "loss": 2.136923599243164, "memory(GiB)": 72.85, "step": 78465, "token_acc": 0.5049180327868853, "train_speed(iter/s)": 0.672436 }, { "epoch": 3.361895377233195, "grad_norm": 5.954941272735596, "learning_rate": 2.426158950767767e-05, "loss": 2.284830856323242, "memory(GiB)": 72.85, "step": 78470, "token_acc": 0.49411764705882355, "train_speed(iter/s)": 0.67244 }, { "epoch": 3.362109592562444, "grad_norm": 5.316071510314941, "learning_rate": 2.425582010858381e-05, "loss": 1.9704671859741212, "memory(GiB)": 72.85, "step": 78475, "token_acc": 0.5868055555555556, "train_speed(iter/s)": 0.672448 }, { "epoch": 3.3623238078916926, "grad_norm": 6.568477153778076, "learning_rate": 2.4250051175869938e-05, "loss": 2.110428810119629, "memory(GiB)": 72.85, "step": 78480, "token_acc": 0.5418181818181819, "train_speed(iter/s)": 0.67244 }, { "epoch": 3.362538023220942, "grad_norm": 5.059680938720703, "learning_rate": 2.4244282709640542e-05, "loss": 2.024587631225586, "memory(GiB)": 72.85, "step": 78485, "token_acc": 0.5049833887043189, "train_speed(iter/s)": 0.672444 }, { "epoch": 3.3627522385501907, "grad_norm": 5.757900238037109, "learning_rate": 2.4238514710000103e-05, "loss": 1.885024070739746, "memory(GiB)": 72.85, "step": 78490, "token_acc": 0.5789473684210527, "train_speed(iter/s)": 0.672437 }, { "epoch": 3.3629664538794395, "grad_norm": 7.374701976776123, "learning_rate": 2.4232747177053167e-05, "loss": 1.930201530456543, "memory(GiB)": 72.85, "step": 78495, "token_acc": 0.5373134328358209, "train_speed(iter/s)": 0.672447 }, { "epoch": 3.3631806692086887, "grad_norm": 6.848402500152588, "learning_rate": 2.422698011090418e-05, "loss": 1.9395771026611328, "memory(GiB)": 72.85, "step": 78500, "token_acc": 0.5569620253164557, "train_speed(iter/s)": 0.67245 }, { "epoch": 3.3631806692086887, "eval_loss": 2.0120155811309814, "eval_runtime": 15.9195, "eval_samples_per_second": 6.282, "eval_steps_per_second": 6.282, "eval_token_acc": 0.5108538350217077, "step": 78500 }, { "epoch": 3.3633948845379376, "grad_norm": 6.513145923614502, "learning_rate": 2.4221213511657624e-05, "loss": 1.960738754272461, "memory(GiB)": 72.85, "step": 78505, "token_acc": 0.5240532241555783, "train_speed(iter/s)": 0.672338 }, { "epoch": 3.3636090998671864, "grad_norm": 7.686927318572998, "learning_rate": 2.421544737941795e-05, "loss": 2.1819183349609377, "memory(GiB)": 72.85, "step": 78510, "token_acc": 0.5799256505576208, "train_speed(iter/s)": 0.672341 }, { "epoch": 3.3638233151964356, "grad_norm": 8.032703399658203, "learning_rate": 2.4209681714289655e-05, "loss": 2.0267663955688477, "memory(GiB)": 72.85, "step": 78515, "token_acc": 0.5594713656387665, "train_speed(iter/s)": 0.672339 }, { "epoch": 3.3640375305256844, "grad_norm": 5.711641311645508, "learning_rate": 2.4203916516377167e-05, "loss": 2.1062999725341798, "memory(GiB)": 72.85, "step": 78520, "token_acc": 0.5296442687747036, "train_speed(iter/s)": 0.672344 }, { "epoch": 3.3642517458549333, "grad_norm": 7.944598197937012, "learning_rate": 2.4198151785784934e-05, "loss": 2.4065765380859374, "memory(GiB)": 72.85, "step": 78525, "token_acc": 0.4862068965517241, "train_speed(iter/s)": 0.672348 }, { "epoch": 3.3644659611841825, "grad_norm": 8.400487899780273, "learning_rate": 2.4192387522617384e-05, "loss": 2.293796348571777, "memory(GiB)": 72.85, "step": 78530, "token_acc": 0.5121951219512195, "train_speed(iter/s)": 0.672349 }, { "epoch": 3.3646801765134313, "grad_norm": 4.836816310882568, "learning_rate": 2.4186623726978925e-05, "loss": 2.1694103240966798, "memory(GiB)": 72.85, "step": 78535, "token_acc": 0.5301587301587302, "train_speed(iter/s)": 0.672343 }, { "epoch": 3.36489439184268, "grad_norm": 5.747317790985107, "learning_rate": 2.418086039897401e-05, "loss": 1.898245620727539, "memory(GiB)": 72.85, "step": 78540, "token_acc": 0.5587188612099644, "train_speed(iter/s)": 0.672345 }, { "epoch": 3.3651086071719294, "grad_norm": 6.486248016357422, "learning_rate": 2.4175097538707025e-05, "loss": 2.4907739639282225, "memory(GiB)": 72.85, "step": 78545, "token_acc": 0.4560260586319218, "train_speed(iter/s)": 0.672349 }, { "epoch": 3.365322822501178, "grad_norm": 5.77797269821167, "learning_rate": 2.4169335146282378e-05, "loss": 2.198719024658203, "memory(GiB)": 72.85, "step": 78550, "token_acc": 0.5290102389078498, "train_speed(iter/s)": 0.672344 }, { "epoch": 3.365537037830427, "grad_norm": 4.3558502197265625, "learning_rate": 2.4163573221804457e-05, "loss": 1.9814998626708984, "memory(GiB)": 72.85, "step": 78555, "token_acc": 0.5728813559322034, "train_speed(iter/s)": 0.672345 }, { "epoch": 3.3657512531596763, "grad_norm": 6.3903374671936035, "learning_rate": 2.4157811765377624e-05, "loss": 2.2655193328857424, "memory(GiB)": 72.85, "step": 78560, "token_acc": 0.5512048192771084, "train_speed(iter/s)": 0.67235 }, { "epoch": 3.365965468488925, "grad_norm": 7.280360698699951, "learning_rate": 2.4152050777106273e-05, "loss": 1.931075668334961, "memory(GiB)": 72.85, "step": 78565, "token_acc": 0.5272108843537415, "train_speed(iter/s)": 0.672346 }, { "epoch": 3.366179683818174, "grad_norm": 5.69180154800415, "learning_rate": 2.414629025709479e-05, "loss": 1.9899517059326173, "memory(GiB)": 72.85, "step": 78570, "token_acc": 0.5523465703971119, "train_speed(iter/s)": 0.672341 }, { "epoch": 3.366393899147423, "grad_norm": 5.005707740783691, "learning_rate": 2.414053020544751e-05, "loss": 2.011918640136719, "memory(GiB)": 72.85, "step": 78575, "token_acc": 0.5767918088737202, "train_speed(iter/s)": 0.672332 }, { "epoch": 3.366608114476672, "grad_norm": 5.28460168838501, "learning_rate": 2.4134770622268783e-05, "loss": 2.1018938064575194, "memory(GiB)": 72.85, "step": 78580, "token_acc": 0.5287769784172662, "train_speed(iter/s)": 0.672338 }, { "epoch": 3.3668223298059208, "grad_norm": 4.5640034675598145, "learning_rate": 2.4129011507662945e-05, "loss": 2.3091463088989257, "memory(GiB)": 72.85, "step": 78585, "token_acc": 0.5447761194029851, "train_speed(iter/s)": 0.672333 }, { "epoch": 3.36703654513517, "grad_norm": 7.50919246673584, "learning_rate": 2.4123252861734334e-05, "loss": 2.292405891418457, "memory(GiB)": 72.85, "step": 78590, "token_acc": 0.46099290780141844, "train_speed(iter/s)": 0.672328 }, { "epoch": 3.367250760464419, "grad_norm": 5.809482097625732, "learning_rate": 2.4117494684587262e-05, "loss": 2.2551248550415037, "memory(GiB)": 72.85, "step": 78595, "token_acc": 0.5159420289855072, "train_speed(iter/s)": 0.672325 }, { "epoch": 3.3674649757936677, "grad_norm": 5.095403671264648, "learning_rate": 2.4111736976326066e-05, "loss": 1.9830015182495118, "memory(GiB)": 72.85, "step": 78600, "token_acc": 0.5605536332179931, "train_speed(iter/s)": 0.672327 }, { "epoch": 3.367679191122917, "grad_norm": 6.172769069671631, "learning_rate": 2.410597973705504e-05, "loss": 2.1869571685791014, "memory(GiB)": 72.85, "step": 78605, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.672331 }, { "epoch": 3.3678934064521657, "grad_norm": 6.018836498260498, "learning_rate": 2.4100222966878484e-05, "loss": 2.1179195404052735, "memory(GiB)": 72.85, "step": 78610, "token_acc": 0.5404411764705882, "train_speed(iter/s)": 0.672342 }, { "epoch": 3.3681076217814145, "grad_norm": 10.127049446105957, "learning_rate": 2.409446666590068e-05, "loss": 2.0272537231445313, "memory(GiB)": 72.85, "step": 78615, "token_acc": 0.5544217687074829, "train_speed(iter/s)": 0.672344 }, { "epoch": 3.368321837110664, "grad_norm": 6.634837627410889, "learning_rate": 2.4088710834225896e-05, "loss": 2.098687934875488, "memory(GiB)": 72.85, "step": 78620, "token_acc": 0.5415162454873647, "train_speed(iter/s)": 0.672346 }, { "epoch": 3.3685360524399126, "grad_norm": 7.469727039337158, "learning_rate": 2.408295547195844e-05, "loss": 1.6584428787231444, "memory(GiB)": 72.85, "step": 78625, "token_acc": 0.608, "train_speed(iter/s)": 0.672337 }, { "epoch": 3.3687502677691614, "grad_norm": 5.276391506195068, "learning_rate": 2.4077200579202563e-05, "loss": 2.178669738769531, "memory(GiB)": 72.85, "step": 78630, "token_acc": 0.532608695652174, "train_speed(iter/s)": 0.672337 }, { "epoch": 3.3689644830984107, "grad_norm": 5.831383228302002, "learning_rate": 2.4071446156062494e-05, "loss": 1.9940128326416016, "memory(GiB)": 72.85, "step": 78635, "token_acc": 0.5648854961832062, "train_speed(iter/s)": 0.67234 }, { "epoch": 3.3691786984276595, "grad_norm": 5.799034118652344, "learning_rate": 2.406569220264252e-05, "loss": 2.1580106735229494, "memory(GiB)": 72.85, "step": 78640, "token_acc": 0.5261324041811847, "train_speed(iter/s)": 0.672342 }, { "epoch": 3.3693929137569083, "grad_norm": 5.153778553009033, "learning_rate": 2.405993871904686e-05, "loss": 2.1664922714233397, "memory(GiB)": 72.85, "step": 78645, "token_acc": 0.5054151624548736, "train_speed(iter/s)": 0.672334 }, { "epoch": 3.3696071290861576, "grad_norm": 5.492439270019531, "learning_rate": 2.4054185705379724e-05, "loss": 1.9680631637573243, "memory(GiB)": 72.85, "step": 78650, "token_acc": 0.578397212543554, "train_speed(iter/s)": 0.672336 }, { "epoch": 3.3698213444154064, "grad_norm": 4.780496597290039, "learning_rate": 2.404843316174537e-05, "loss": 2.0307266235351564, "memory(GiB)": 72.85, "step": 78655, "token_acc": 0.5302491103202847, "train_speed(iter/s)": 0.672332 }, { "epoch": 3.370035559744655, "grad_norm": 7.178018569946289, "learning_rate": 2.4042681088248e-05, "loss": 2.361353302001953, "memory(GiB)": 72.85, "step": 78660, "token_acc": 0.5291970802919708, "train_speed(iter/s)": 0.672334 }, { "epoch": 3.3702497750739044, "grad_norm": 5.584402561187744, "learning_rate": 2.4036929484991804e-05, "loss": 2.221431922912598, "memory(GiB)": 72.85, "step": 78665, "token_acc": 0.5578635014836796, "train_speed(iter/s)": 0.672339 }, { "epoch": 3.3704639904031533, "grad_norm": 5.3778395652771, "learning_rate": 2.4031178352080992e-05, "loss": 2.117235565185547, "memory(GiB)": 72.85, "step": 78670, "token_acc": 0.5563139931740614, "train_speed(iter/s)": 0.672349 }, { "epoch": 3.370678205732402, "grad_norm": 5.101222038269043, "learning_rate": 2.402542768961974e-05, "loss": 2.1108638763427736, "memory(GiB)": 72.85, "step": 78675, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.672337 }, { "epoch": 3.3708924210616513, "grad_norm": 5.933165550231934, "learning_rate": 2.4019677497712216e-05, "loss": 1.9335945129394532, "memory(GiB)": 72.85, "step": 78680, "token_acc": 0.5475409836065573, "train_speed(iter/s)": 0.672342 }, { "epoch": 3.3711066363909, "grad_norm": 6.715673446655273, "learning_rate": 2.4013927776462625e-05, "loss": 1.884439468383789, "memory(GiB)": 72.85, "step": 78685, "token_acc": 0.5779467680608364, "train_speed(iter/s)": 0.672354 }, { "epoch": 3.371320851720149, "grad_norm": 4.639705181121826, "learning_rate": 2.4008178525975105e-05, "loss": 1.9899559020996094, "memory(GiB)": 72.85, "step": 78690, "token_acc": 0.5365079365079365, "train_speed(iter/s)": 0.672366 }, { "epoch": 3.371535067049398, "grad_norm": 5.394955635070801, "learning_rate": 2.4002429746353817e-05, "loss": 1.9797361373901368, "memory(GiB)": 72.85, "step": 78695, "token_acc": 0.535593220338983, "train_speed(iter/s)": 0.672366 }, { "epoch": 3.371749282378647, "grad_norm": 4.973645210266113, "learning_rate": 2.39966814377029e-05, "loss": 1.9710107803344727, "memory(GiB)": 72.85, "step": 78700, "token_acc": 0.5677655677655677, "train_speed(iter/s)": 0.672368 }, { "epoch": 3.371963497707896, "grad_norm": 4.999355316162109, "learning_rate": 2.3990933600126476e-05, "loss": 2.036507415771484, "memory(GiB)": 72.85, "step": 78705, "token_acc": 0.5416666666666666, "train_speed(iter/s)": 0.672372 }, { "epoch": 3.372177713037145, "grad_norm": 4.373348236083984, "learning_rate": 2.3985186233728686e-05, "loss": 2.0414249420166017, "memory(GiB)": 72.85, "step": 78710, "token_acc": 0.5230769230769231, "train_speed(iter/s)": 0.672373 }, { "epoch": 3.372391928366394, "grad_norm": 5.846643924713135, "learning_rate": 2.3979439338613668e-05, "loss": 1.873883056640625, "memory(GiB)": 72.85, "step": 78715, "token_acc": 0.5746268656716418, "train_speed(iter/s)": 0.672385 }, { "epoch": 3.3726061436956427, "grad_norm": 4.795635223388672, "learning_rate": 2.397369291488552e-05, "loss": 2.3139299392700194, "memory(GiB)": 72.85, "step": 78720, "token_acc": 0.479020979020979, "train_speed(iter/s)": 0.672383 }, { "epoch": 3.372820359024892, "grad_norm": 5.269816875457764, "learning_rate": 2.3967946962648334e-05, "loss": 2.3550220489501954, "memory(GiB)": 72.85, "step": 78725, "token_acc": 0.5033557046979866, "train_speed(iter/s)": 0.672392 }, { "epoch": 3.373034574354141, "grad_norm": 6.312513828277588, "learning_rate": 2.3962201482006215e-05, "loss": 2.2150457382202147, "memory(GiB)": 72.85, "step": 78730, "token_acc": 0.5328185328185329, "train_speed(iter/s)": 0.672387 }, { "epoch": 3.3732487896833896, "grad_norm": 5.645391941070557, "learning_rate": 2.395645647306324e-05, "loss": 2.320779800415039, "memory(GiB)": 72.85, "step": 78735, "token_acc": 0.5064935064935064, "train_speed(iter/s)": 0.672383 }, { "epoch": 3.373463005012639, "grad_norm": 9.988771438598633, "learning_rate": 2.3950711935923466e-05, "loss": 2.256020736694336, "memory(GiB)": 72.85, "step": 78740, "token_acc": 0.5191082802547771, "train_speed(iter/s)": 0.672384 }, { "epoch": 3.3736772203418877, "grad_norm": 6.484866142272949, "learning_rate": 2.3944967870691003e-05, "loss": 2.29382381439209, "memory(GiB)": 72.85, "step": 78745, "token_acc": 0.5183946488294314, "train_speed(iter/s)": 0.672383 }, { "epoch": 3.3738914356711365, "grad_norm": 8.445618629455566, "learning_rate": 2.3939224277469886e-05, "loss": 2.0661745071411133, "memory(GiB)": 72.85, "step": 78750, "token_acc": 0.5246478873239436, "train_speed(iter/s)": 0.672385 }, { "epoch": 3.3741056510003857, "grad_norm": 5.0723981857299805, "learning_rate": 2.3933481156364168e-05, "loss": 2.2216070175170897, "memory(GiB)": 72.85, "step": 78755, "token_acc": 0.5363321799307958, "train_speed(iter/s)": 0.672386 }, { "epoch": 3.3743198663296345, "grad_norm": 4.953127861022949, "learning_rate": 2.392773850747789e-05, "loss": 2.3419281005859376, "memory(GiB)": 72.85, "step": 78760, "token_acc": 0.49722222222222223, "train_speed(iter/s)": 0.672388 }, { "epoch": 3.3745340816588834, "grad_norm": 6.197782516479492, "learning_rate": 2.3921996330915076e-05, "loss": 2.0685569763183596, "memory(GiB)": 72.85, "step": 78765, "token_acc": 0.5563139931740614, "train_speed(iter/s)": 0.672395 }, { "epoch": 3.3747482969881326, "grad_norm": 5.539393424987793, "learning_rate": 2.391625462677977e-05, "loss": 2.003805732727051, "memory(GiB)": 72.85, "step": 78770, "token_acc": 0.5498154981549815, "train_speed(iter/s)": 0.672405 }, { "epoch": 3.3749625123173814, "grad_norm": 4.335538864135742, "learning_rate": 2.3910513395175988e-05, "loss": 2.131926155090332, "memory(GiB)": 72.85, "step": 78775, "token_acc": 0.5406976744186046, "train_speed(iter/s)": 0.672403 }, { "epoch": 3.3751767276466302, "grad_norm": 5.836961269378662, "learning_rate": 2.3904772636207723e-05, "loss": 2.0265872955322264, "memory(GiB)": 72.85, "step": 78780, "token_acc": 0.5571955719557196, "train_speed(iter/s)": 0.672413 }, { "epoch": 3.3753909429758795, "grad_norm": 6.376724720001221, "learning_rate": 2.3899032349978967e-05, "loss": 2.238981246948242, "memory(GiB)": 72.85, "step": 78785, "token_acc": 0.5266457680250783, "train_speed(iter/s)": 0.672423 }, { "epoch": 3.3756051583051283, "grad_norm": 6.085662841796875, "learning_rate": 2.389329253659374e-05, "loss": 2.2929901123046874, "memory(GiB)": 72.85, "step": 78790, "token_acc": 0.5426136363636364, "train_speed(iter/s)": 0.672412 }, { "epoch": 3.375819373634377, "grad_norm": 4.604628562927246, "learning_rate": 2.3887553196155995e-05, "loss": 2.3353748321533203, "memory(GiB)": 72.85, "step": 78795, "token_acc": 0.5335365853658537, "train_speed(iter/s)": 0.672417 }, { "epoch": 3.3760335889636264, "grad_norm": 4.373770713806152, "learning_rate": 2.3881814328769737e-05, "loss": 2.2801288604736327, "memory(GiB)": 72.85, "step": 78800, "token_acc": 0.5420289855072464, "train_speed(iter/s)": 0.672411 }, { "epoch": 3.376247804292875, "grad_norm": 6.457517147064209, "learning_rate": 2.387607593453891e-05, "loss": 2.202501678466797, "memory(GiB)": 72.85, "step": 78805, "token_acc": 0.534965034965035, "train_speed(iter/s)": 0.672415 }, { "epoch": 3.376462019622124, "grad_norm": 5.207837104797363, "learning_rate": 2.3870338013567474e-05, "loss": 1.9546354293823243, "memory(GiB)": 72.85, "step": 78810, "token_acc": 0.582089552238806, "train_speed(iter/s)": 0.672414 }, { "epoch": 3.3766762349513733, "grad_norm": 4.613273620605469, "learning_rate": 2.3864600565959377e-05, "loss": 1.952089500427246, "memory(GiB)": 72.85, "step": 78815, "token_acc": 0.5714285714285714, "train_speed(iter/s)": 0.672408 }, { "epoch": 3.376890450280622, "grad_norm": 6.705935955047607, "learning_rate": 2.3858863591818558e-05, "loss": 2.2833011627197264, "memory(GiB)": 72.85, "step": 78820, "token_acc": 0.5283687943262412, "train_speed(iter/s)": 0.672397 }, { "epoch": 3.377104665609871, "grad_norm": 6.864068031311035, "learning_rate": 2.385312709124893e-05, "loss": 2.260870170593262, "memory(GiB)": 72.85, "step": 78825, "token_acc": 0.5251572327044025, "train_speed(iter/s)": 0.672403 }, { "epoch": 3.37731888093912, "grad_norm": 6.301802635192871, "learning_rate": 2.3847391064354453e-05, "loss": 2.1088720321655274, "memory(GiB)": 72.85, "step": 78830, "token_acc": 0.5131086142322098, "train_speed(iter/s)": 0.672401 }, { "epoch": 3.377533096268369, "grad_norm": 6.314172267913818, "learning_rate": 2.3841655511239013e-05, "loss": 2.133808898925781, "memory(GiB)": 72.85, "step": 78835, "token_acc": 0.5253623188405797, "train_speed(iter/s)": 0.672403 }, { "epoch": 3.3777473115976178, "grad_norm": 6.004793643951416, "learning_rate": 2.3835920432006527e-05, "loss": 2.052215576171875, "memory(GiB)": 72.85, "step": 78840, "token_acc": 0.5551839464882943, "train_speed(iter/s)": 0.672397 }, { "epoch": 3.377961526926867, "grad_norm": 5.793295860290527, "learning_rate": 2.3830185826760887e-05, "loss": 2.346698188781738, "memory(GiB)": 72.85, "step": 78845, "token_acc": 0.4880952380952381, "train_speed(iter/s)": 0.672406 }, { "epoch": 3.378175742256116, "grad_norm": 8.945484161376953, "learning_rate": 2.3824451695605958e-05, "loss": 2.420610237121582, "memory(GiB)": 72.85, "step": 78850, "token_acc": 0.5017301038062284, "train_speed(iter/s)": 0.672403 }, { "epoch": 3.3783899575853646, "grad_norm": 6.548686504364014, "learning_rate": 2.381871803864566e-05, "loss": 2.2891729354858397, "memory(GiB)": 72.85, "step": 78855, "token_acc": 0.532051282051282, "train_speed(iter/s)": 0.672408 }, { "epoch": 3.378604172914614, "grad_norm": 6.9229607582092285, "learning_rate": 2.381298485598383e-05, "loss": 2.2395044326782227, "memory(GiB)": 72.85, "step": 78860, "token_acc": 0.5057471264367817, "train_speed(iter/s)": 0.6724 }, { "epoch": 3.3788183882438627, "grad_norm": 5.833590984344482, "learning_rate": 2.3807252147724362e-05, "loss": 1.9092735290527343, "memory(GiB)": 72.85, "step": 78865, "token_acc": 0.5833333333333334, "train_speed(iter/s)": 0.672396 }, { "epoch": 3.3790326035731115, "grad_norm": 6.345298767089844, "learning_rate": 2.380151991397109e-05, "loss": 2.546767807006836, "memory(GiB)": 72.85, "step": 78870, "token_acc": 0.5279503105590062, "train_speed(iter/s)": 0.672397 }, { "epoch": 3.379246818902361, "grad_norm": 7.616945266723633, "learning_rate": 2.3795788154827862e-05, "loss": 2.2432350158691405, "memory(GiB)": 72.85, "step": 78875, "token_acc": 0.5087719298245614, "train_speed(iter/s)": 0.672385 }, { "epoch": 3.3794610342316096, "grad_norm": 4.880653381347656, "learning_rate": 2.3790056870398515e-05, "loss": 2.040735626220703, "memory(GiB)": 72.85, "step": 78880, "token_acc": 0.5714285714285714, "train_speed(iter/s)": 0.672377 }, { "epoch": 3.3796752495608584, "grad_norm": 5.657181739807129, "learning_rate": 2.3784326060786855e-05, "loss": 2.1429187774658205, "memory(GiB)": 72.85, "step": 78885, "token_acc": 0.511400651465798, "train_speed(iter/s)": 0.672385 }, { "epoch": 3.3798894648901077, "grad_norm": 4.677504539489746, "learning_rate": 2.3778595726096737e-05, "loss": 2.2108486175537108, "memory(GiB)": 72.85, "step": 78890, "token_acc": 0.5486725663716814, "train_speed(iter/s)": 0.672381 }, { "epoch": 3.3801036802193565, "grad_norm": 4.718134880065918, "learning_rate": 2.3772865866431955e-05, "loss": 2.5037275314331056, "memory(GiB)": 72.85, "step": 78895, "token_acc": 0.4790996784565916, "train_speed(iter/s)": 0.672374 }, { "epoch": 3.3803178955486053, "grad_norm": 7.431224346160889, "learning_rate": 2.3767136481896312e-05, "loss": 2.2114715576171875, "memory(GiB)": 72.85, "step": 78900, "token_acc": 0.4964788732394366, "train_speed(iter/s)": 0.672379 }, { "epoch": 3.3805321108778545, "grad_norm": 5.335478782653809, "learning_rate": 2.3761407572593603e-05, "loss": 1.7980995178222656, "memory(GiB)": 72.85, "step": 78905, "token_acc": 0.5720720720720721, "train_speed(iter/s)": 0.672378 }, { "epoch": 3.3807463262071034, "grad_norm": 7.490497589111328, "learning_rate": 2.375567913862759e-05, "loss": 2.3398075103759766, "memory(GiB)": 72.85, "step": 78910, "token_acc": 0.5124555160142349, "train_speed(iter/s)": 0.672382 }, { "epoch": 3.380960541536352, "grad_norm": 6.007510662078857, "learning_rate": 2.3749951180102082e-05, "loss": 2.2315719604492186, "memory(GiB)": 72.85, "step": 78915, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.67238 }, { "epoch": 3.3811747568656014, "grad_norm": 4.917358875274658, "learning_rate": 2.3744223697120836e-05, "loss": 2.1395523071289064, "memory(GiB)": 72.85, "step": 78920, "token_acc": 0.5371024734982333, "train_speed(iter/s)": 0.672358 }, { "epoch": 3.3813889721948502, "grad_norm": 5.888526916503906, "learning_rate": 2.373849668978761e-05, "loss": 2.215950775146484, "memory(GiB)": 72.85, "step": 78925, "token_acc": 0.5076923076923077, "train_speed(iter/s)": 0.672363 }, { "epoch": 3.381603187524099, "grad_norm": 5.914276123046875, "learning_rate": 2.373277015820613e-05, "loss": 1.9846900939941405, "memory(GiB)": 72.85, "step": 78930, "token_acc": 0.575, "train_speed(iter/s)": 0.672361 }, { "epoch": 3.3818174028533483, "grad_norm": 6.261571884155273, "learning_rate": 2.3727044102480184e-05, "loss": 2.0689550399780274, "memory(GiB)": 72.85, "step": 78935, "token_acc": 0.5354330708661418, "train_speed(iter/s)": 0.672349 }, { "epoch": 3.382031618182597, "grad_norm": 4.6778974533081055, "learning_rate": 2.3721318522713453e-05, "loss": 1.8890342712402344, "memory(GiB)": 72.85, "step": 78940, "token_acc": 0.534375, "train_speed(iter/s)": 0.672345 }, { "epoch": 3.382245833511846, "grad_norm": 5.238224983215332, "learning_rate": 2.3715593419009714e-05, "loss": 1.7280630111694335, "memory(GiB)": 72.85, "step": 78945, "token_acc": 0.5641025641025641, "train_speed(iter/s)": 0.672343 }, { "epoch": 3.382460048841095, "grad_norm": 4.900081157684326, "learning_rate": 2.3709868791472652e-05, "loss": 2.3906726837158203, "memory(GiB)": 72.85, "step": 78950, "token_acc": 0.5159235668789809, "train_speed(iter/s)": 0.672345 }, { "epoch": 3.382674264170344, "grad_norm": 6.847629547119141, "learning_rate": 2.3704144640205983e-05, "loss": 2.3465721130371096, "memory(GiB)": 72.85, "step": 78955, "token_acc": 0.5239852398523985, "train_speed(iter/s)": 0.672357 }, { "epoch": 3.382888479499593, "grad_norm": 4.754970073699951, "learning_rate": 2.3698420965313395e-05, "loss": 2.1836217880249023, "memory(GiB)": 72.85, "step": 78960, "token_acc": 0.51985559566787, "train_speed(iter/s)": 0.672365 }, { "epoch": 3.383102694828842, "grad_norm": 5.436221599578857, "learning_rate": 2.3692697766898592e-05, "loss": 2.1931081771850587, "memory(GiB)": 72.85, "step": 78965, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.672362 }, { "epoch": 3.383316910158091, "grad_norm": 7.275204181671143, "learning_rate": 2.3686975045065223e-05, "loss": 2.109739875793457, "memory(GiB)": 72.85, "step": 78970, "token_acc": 0.5433070866141733, "train_speed(iter/s)": 0.67237 }, { "epoch": 3.3835311254873397, "grad_norm": 6.409578800201416, "learning_rate": 2.3681252799917002e-05, "loss": 2.1019840240478516, "memory(GiB)": 72.85, "step": 78975, "token_acc": 0.5572519083969466, "train_speed(iter/s)": 0.672375 }, { "epoch": 3.383745340816589, "grad_norm": 7.203580379486084, "learning_rate": 2.367553103155758e-05, "loss": 2.462245559692383, "memory(GiB)": 72.85, "step": 78980, "token_acc": 0.4786885245901639, "train_speed(iter/s)": 0.672366 }, { "epoch": 3.3839595561458378, "grad_norm": 5.28834867477417, "learning_rate": 2.366980974009061e-05, "loss": 2.0739484786987306, "memory(GiB)": 72.85, "step": 78985, "token_acc": 0.5580645161290323, "train_speed(iter/s)": 0.672364 }, { "epoch": 3.3841737714750866, "grad_norm": 5.118025779724121, "learning_rate": 2.3664088925619732e-05, "loss": 2.1085344314575196, "memory(GiB)": 72.85, "step": 78990, "token_acc": 0.5767918088737202, "train_speed(iter/s)": 0.672371 }, { "epoch": 3.384387986804336, "grad_norm": 6.194166660308838, "learning_rate": 2.365836858824857e-05, "loss": 2.0494152069091798, "memory(GiB)": 72.85, "step": 78995, "token_acc": 0.543010752688172, "train_speed(iter/s)": 0.672377 }, { "epoch": 3.3846022021335846, "grad_norm": 7.416169166564941, "learning_rate": 2.365264872808079e-05, "loss": 1.7979007720947267, "memory(GiB)": 72.85, "step": 79000, "token_acc": 0.585820895522388, "train_speed(iter/s)": 0.672379 }, { "epoch": 3.3846022021335846, "eval_loss": 2.1969149112701416, "eval_runtime": 15.5953, "eval_samples_per_second": 6.412, "eval_steps_per_second": 6.412, "eval_token_acc": 0.5088131609870741, "step": 79000 }, { "epoch": 3.3848164174628335, "grad_norm": 5.890375137329102, "learning_rate": 2.3646929345219975e-05, "loss": 2.10141658782959, "memory(GiB)": 72.85, "step": 79005, "token_acc": 0.5206232813932172, "train_speed(iter/s)": 0.672279 }, { "epoch": 3.3850306327920827, "grad_norm": 6.218717098236084, "learning_rate": 2.3641210439769773e-05, "loss": 2.3294681549072265, "memory(GiB)": 72.85, "step": 79010, "token_acc": 0.5, "train_speed(iter/s)": 0.672283 }, { "epoch": 3.3852448481213315, "grad_norm": 4.016737937927246, "learning_rate": 2.3635492011833778e-05, "loss": 2.1744789123535155, "memory(GiB)": 72.85, "step": 79015, "token_acc": 0.5393586005830904, "train_speed(iter/s)": 0.67229 }, { "epoch": 3.3854590634505803, "grad_norm": 4.731693744659424, "learning_rate": 2.362977406151557e-05, "loss": 1.821640396118164, "memory(GiB)": 72.85, "step": 79020, "token_acc": 0.602112676056338, "train_speed(iter/s)": 0.672295 }, { "epoch": 3.3856732787798296, "grad_norm": 5.822431564331055, "learning_rate": 2.362405658891874e-05, "loss": 1.9648311614990235, "memory(GiB)": 72.85, "step": 79025, "token_acc": 0.5759493670886076, "train_speed(iter/s)": 0.672287 }, { "epoch": 3.3858874941090784, "grad_norm": 6.346989154815674, "learning_rate": 2.3618339594146853e-05, "loss": 2.055082321166992, "memory(GiB)": 72.85, "step": 79030, "token_acc": 0.5528169014084507, "train_speed(iter/s)": 0.672293 }, { "epoch": 3.3861017094383272, "grad_norm": 6.183883190155029, "learning_rate": 2.3612623077303514e-05, "loss": 2.1269863128662108, "memory(GiB)": 72.85, "step": 79035, "token_acc": 0.5503355704697986, "train_speed(iter/s)": 0.672306 }, { "epoch": 3.3863159247675765, "grad_norm": 6.707696437835693, "learning_rate": 2.360690703849226e-05, "loss": 2.192656707763672, "memory(GiB)": 72.85, "step": 79040, "token_acc": 0.565068493150685, "train_speed(iter/s)": 0.672302 }, { "epoch": 3.3865301400968253, "grad_norm": 6.016796588897705, "learning_rate": 2.360119147781664e-05, "loss": 2.180220603942871, "memory(GiB)": 72.85, "step": 79045, "token_acc": 0.5364238410596026, "train_speed(iter/s)": 0.672304 }, { "epoch": 3.386744355426074, "grad_norm": 5.3957319259643555, "learning_rate": 2.35954763953802e-05, "loss": 1.9064632415771485, "memory(GiB)": 72.85, "step": 79050, "token_acc": 0.5533596837944664, "train_speed(iter/s)": 0.6723 }, { "epoch": 3.3869585707553234, "grad_norm": 6.239831924438477, "learning_rate": 2.3589761791286462e-05, "loss": 1.9130584716796875, "memory(GiB)": 72.85, "step": 79055, "token_acc": 0.5809859154929577, "train_speed(iter/s)": 0.672303 }, { "epoch": 3.387172786084572, "grad_norm": 6.16274881362915, "learning_rate": 2.3584047665638977e-05, "loss": 1.9005397796630858, "memory(GiB)": 72.85, "step": 79060, "token_acc": 0.5860805860805861, "train_speed(iter/s)": 0.672305 }, { "epoch": 3.387387001413821, "grad_norm": 7.763656139373779, "learning_rate": 2.3578334018541254e-05, "loss": 2.2791316986083983, "memory(GiB)": 72.85, "step": 79065, "token_acc": 0.5116279069767442, "train_speed(iter/s)": 0.672299 }, { "epoch": 3.3876012167430702, "grad_norm": 5.900801658630371, "learning_rate": 2.3572620850096787e-05, "loss": 1.7641496658325195, "memory(GiB)": 72.85, "step": 79070, "token_acc": 0.5746031746031746, "train_speed(iter/s)": 0.672303 }, { "epoch": 3.387815432072319, "grad_norm": 5.988309860229492, "learning_rate": 2.356690816040909e-05, "loss": 1.9036504745483398, "memory(GiB)": 72.85, "step": 79075, "token_acc": 0.5768025078369906, "train_speed(iter/s)": 0.672293 }, { "epoch": 3.388029647401568, "grad_norm": 7.224788665771484, "learning_rate": 2.3561195949581634e-05, "loss": 2.1541679382324217, "memory(GiB)": 72.85, "step": 79080, "token_acc": 0.519434628975265, "train_speed(iter/s)": 0.672288 }, { "epoch": 3.388243862730817, "grad_norm": 7.0268168449401855, "learning_rate": 2.3555484217717904e-05, "loss": 2.1736541748046876, "memory(GiB)": 72.85, "step": 79085, "token_acc": 0.5348837209302325, "train_speed(iter/s)": 0.672296 }, { "epoch": 3.388458078060066, "grad_norm": 5.7234954833984375, "learning_rate": 2.3549772964921413e-05, "loss": 2.1196533203125, "memory(GiB)": 72.85, "step": 79090, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.672289 }, { "epoch": 3.3886722933893147, "grad_norm": 6.464007377624512, "learning_rate": 2.354406219129559e-05, "loss": 2.279631423950195, "memory(GiB)": 72.85, "step": 79095, "token_acc": 0.5853658536585366, "train_speed(iter/s)": 0.672303 }, { "epoch": 3.388886508718564, "grad_norm": 4.4109392166137695, "learning_rate": 2.35383518969439e-05, "loss": 2.322914886474609, "memory(GiB)": 72.85, "step": 79100, "token_acc": 0.5211267605633803, "train_speed(iter/s)": 0.672303 }, { "epoch": 3.389100724047813, "grad_norm": 6.5456037521362305, "learning_rate": 2.353264208196979e-05, "loss": 1.8691713333129882, "memory(GiB)": 72.85, "step": 79105, "token_acc": 0.5344827586206896, "train_speed(iter/s)": 0.672302 }, { "epoch": 3.3893149393770616, "grad_norm": 4.747344017028809, "learning_rate": 2.35269327464767e-05, "loss": 2.051406669616699, "memory(GiB)": 72.85, "step": 79110, "token_acc": 0.5341246290801187, "train_speed(iter/s)": 0.672309 }, { "epoch": 3.389529154706311, "grad_norm": 7.130992412567139, "learning_rate": 2.3521223890568032e-05, "loss": 2.1934768676757814, "memory(GiB)": 72.85, "step": 79115, "token_acc": 0.5401929260450161, "train_speed(iter/s)": 0.672315 }, { "epoch": 3.3897433700355597, "grad_norm": 5.3059563636779785, "learning_rate": 2.3515515514347252e-05, "loss": 2.122756004333496, "memory(GiB)": 72.85, "step": 79120, "token_acc": 0.5547945205479452, "train_speed(iter/s)": 0.672312 }, { "epoch": 3.3899575853648085, "grad_norm": 7.056699275970459, "learning_rate": 2.3509807617917757e-05, "loss": 2.0076068878173827, "memory(GiB)": 72.85, "step": 79125, "token_acc": 0.5492063492063493, "train_speed(iter/s)": 0.672308 }, { "epoch": 3.3901718006940578, "grad_norm": 5.224781513214111, "learning_rate": 2.3504100201382945e-05, "loss": 2.0411842346191404, "memory(GiB)": 72.85, "step": 79130, "token_acc": 0.5572519083969466, "train_speed(iter/s)": 0.67231 }, { "epoch": 3.3903860160233066, "grad_norm": 5.008984088897705, "learning_rate": 2.3498393264846212e-05, "loss": 2.306869125366211, "memory(GiB)": 72.85, "step": 79135, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.672316 }, { "epoch": 3.390600231352556, "grad_norm": 4.979738712310791, "learning_rate": 2.349268680841093e-05, "loss": 1.8412302017211915, "memory(GiB)": 72.85, "step": 79140, "token_acc": 0.5424354243542435, "train_speed(iter/s)": 0.672324 }, { "epoch": 3.3908144466818047, "grad_norm": 8.623056411743164, "learning_rate": 2.3486980832180505e-05, "loss": 2.21811580657959, "memory(GiB)": 72.85, "step": 79145, "token_acc": 0.5240793201133145, "train_speed(iter/s)": 0.672325 }, { "epoch": 3.3910286620110535, "grad_norm": 6.614287853240967, "learning_rate": 2.34812753362583e-05, "loss": 1.9162696838378905, "memory(GiB)": 72.85, "step": 79150, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.672328 }, { "epoch": 3.3912428773403027, "grad_norm": 5.669014930725098, "learning_rate": 2.3475570320747647e-05, "loss": 2.213693618774414, "memory(GiB)": 72.85, "step": 79155, "token_acc": 0.5129151291512916, "train_speed(iter/s)": 0.672325 }, { "epoch": 3.3914570926695515, "grad_norm": 6.195414066314697, "learning_rate": 2.3469865785751938e-05, "loss": 2.0105049133300783, "memory(GiB)": 72.85, "step": 79160, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.67233 }, { "epoch": 3.3916713079988003, "grad_norm": 4.939703941345215, "learning_rate": 2.3464161731374496e-05, "loss": 1.98304443359375, "memory(GiB)": 72.85, "step": 79165, "token_acc": 0.5422077922077922, "train_speed(iter/s)": 0.672329 }, { "epoch": 3.3918855233280496, "grad_norm": 7.38211727142334, "learning_rate": 2.345845815771866e-05, "loss": 2.189778137207031, "memory(GiB)": 72.85, "step": 79170, "token_acc": 0.5307443365695793, "train_speed(iter/s)": 0.672309 }, { "epoch": 3.3920997386572984, "grad_norm": 5.983181953430176, "learning_rate": 2.3452755064887732e-05, "loss": 2.0260150909423826, "memory(GiB)": 72.85, "step": 79175, "token_acc": 0.5480427046263345, "train_speed(iter/s)": 0.672314 }, { "epoch": 3.3923139539865472, "grad_norm": 5.382136344909668, "learning_rate": 2.3447052452985068e-05, "loss": 1.8625995635986328, "memory(GiB)": 72.85, "step": 79180, "token_acc": 0.5593869731800766, "train_speed(iter/s)": 0.672315 }, { "epoch": 3.3925281693157965, "grad_norm": 5.533299446105957, "learning_rate": 2.3441350322113957e-05, "loss": 2.1339664459228516, "memory(GiB)": 72.85, "step": 79185, "token_acc": 0.4925373134328358, "train_speed(iter/s)": 0.672317 }, { "epoch": 3.3927423846450453, "grad_norm": 4.600045204162598, "learning_rate": 2.3435648672377702e-05, "loss": 1.874324417114258, "memory(GiB)": 72.85, "step": 79190, "token_acc": 0.5912162162162162, "train_speed(iter/s)": 0.672307 }, { "epoch": 3.392956599974294, "grad_norm": 5.926587104797363, "learning_rate": 2.342994750387959e-05, "loss": 2.1443149566650392, "memory(GiB)": 72.85, "step": 79195, "token_acc": 0.5620915032679739, "train_speed(iter/s)": 0.672298 }, { "epoch": 3.3931708153035434, "grad_norm": 4.801341533660889, "learning_rate": 2.3424246816722884e-05, "loss": 1.9392574310302735, "memory(GiB)": 72.85, "step": 79200, "token_acc": 0.5886287625418061, "train_speed(iter/s)": 0.672279 }, { "epoch": 3.393385030632792, "grad_norm": 5.715890407562256, "learning_rate": 2.3418546611010895e-05, "loss": 2.268429183959961, "memory(GiB)": 72.85, "step": 79205, "token_acc": 0.4986301369863014, "train_speed(iter/s)": 0.672263 }, { "epoch": 3.393599245962041, "grad_norm": 4.83927059173584, "learning_rate": 2.3412846886846867e-05, "loss": 2.108463668823242, "memory(GiB)": 72.85, "step": 79210, "token_acc": 0.5282392026578073, "train_speed(iter/s)": 0.672279 }, { "epoch": 3.3938134612912902, "grad_norm": 5.401548862457275, "learning_rate": 2.3407147644334067e-05, "loss": 2.3180301666259764, "memory(GiB)": 72.85, "step": 79215, "token_acc": 0.46715328467153283, "train_speed(iter/s)": 0.672281 }, { "epoch": 3.394027676620539, "grad_norm": 4.862604141235352, "learning_rate": 2.340144888357572e-05, "loss": 2.17315673828125, "memory(GiB)": 72.85, "step": 79220, "token_acc": 0.5141065830721003, "train_speed(iter/s)": 0.672283 }, { "epoch": 3.394241891949788, "grad_norm": 6.813900947570801, "learning_rate": 2.339575060467507e-05, "loss": 1.9637968063354492, "memory(GiB)": 72.85, "step": 79225, "token_acc": 0.5488372093023256, "train_speed(iter/s)": 0.672288 }, { "epoch": 3.394456107279037, "grad_norm": 7.535879135131836, "learning_rate": 2.3390052807735352e-05, "loss": 2.228262710571289, "memory(GiB)": 72.85, "step": 79230, "token_acc": 0.5059288537549407, "train_speed(iter/s)": 0.672285 }, { "epoch": 3.394670322608286, "grad_norm": 5.493967533111572, "learning_rate": 2.338435549285981e-05, "loss": 1.6331396102905273, "memory(GiB)": 72.85, "step": 79235, "token_acc": 0.5709342560553633, "train_speed(iter/s)": 0.67228 }, { "epoch": 3.3948845379375348, "grad_norm": 6.3893938064575195, "learning_rate": 2.337865866015163e-05, "loss": 2.090438461303711, "memory(GiB)": 72.85, "step": 79240, "token_acc": 0.547945205479452, "train_speed(iter/s)": 0.672273 }, { "epoch": 3.395098753266784, "grad_norm": 8.469143867492676, "learning_rate": 2.3372962309714023e-05, "loss": 2.175657844543457, "memory(GiB)": 72.85, "step": 79245, "token_acc": 0.5428571428571428, "train_speed(iter/s)": 0.672291 }, { "epoch": 3.395312968596033, "grad_norm": 6.882409572601318, "learning_rate": 2.3367266441650188e-05, "loss": 2.0125255584716797, "memory(GiB)": 72.85, "step": 79250, "token_acc": 0.5160142348754448, "train_speed(iter/s)": 0.672292 }, { "epoch": 3.3955271839252816, "grad_norm": 6.920419216156006, "learning_rate": 2.3361571056063302e-05, "loss": 2.1086040496826173, "memory(GiB)": 72.85, "step": 79255, "token_acc": 0.5445544554455446, "train_speed(iter/s)": 0.6723 }, { "epoch": 3.395741399254531, "grad_norm": 5.759465217590332, "learning_rate": 2.335587615305652e-05, "loss": 2.049224853515625, "memory(GiB)": 72.85, "step": 79260, "token_acc": 0.5469255663430421, "train_speed(iter/s)": 0.672296 }, { "epoch": 3.3959556145837797, "grad_norm": 5.567898750305176, "learning_rate": 2.335018173273306e-05, "loss": 2.1854352951049805, "memory(GiB)": 72.85, "step": 79265, "token_acc": 0.5353159851301115, "train_speed(iter/s)": 0.672291 }, { "epoch": 3.3961698299130285, "grad_norm": 5.259408950805664, "learning_rate": 2.3344487795196063e-05, "loss": 2.0479888916015625, "memory(GiB)": 72.85, "step": 79270, "token_acc": 0.5076452599388379, "train_speed(iter/s)": 0.672301 }, { "epoch": 3.3963840452422778, "grad_norm": 5.819187164306641, "learning_rate": 2.3338794340548666e-05, "loss": 2.4261358261108397, "memory(GiB)": 72.85, "step": 79275, "token_acc": 0.4716417910447761, "train_speed(iter/s)": 0.672305 }, { "epoch": 3.3965982605715266, "grad_norm": 5.5626420974731445, "learning_rate": 2.3333101368894024e-05, "loss": 2.5824970245361327, "memory(GiB)": 72.85, "step": 79280, "token_acc": 0.4515235457063712, "train_speed(iter/s)": 0.6723 }, { "epoch": 3.3968124759007754, "grad_norm": 5.440097808837891, "learning_rate": 2.3327408880335245e-05, "loss": 2.066193199157715, "memory(GiB)": 72.85, "step": 79285, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.672302 }, { "epoch": 3.3970266912300247, "grad_norm": 7.5093092918396, "learning_rate": 2.3321716874975498e-05, "loss": 1.8720849990844726, "memory(GiB)": 72.85, "step": 79290, "token_acc": 0.5272108843537415, "train_speed(iter/s)": 0.672298 }, { "epoch": 3.3972409065592735, "grad_norm": 6.3282694816589355, "learning_rate": 2.331602535291787e-05, "loss": 2.156378746032715, "memory(GiB)": 72.85, "step": 79295, "token_acc": 0.5340501792114696, "train_speed(iter/s)": 0.672302 }, { "epoch": 3.3974551218885223, "grad_norm": 5.401558876037598, "learning_rate": 2.331033431426546e-05, "loss": 2.126612091064453, "memory(GiB)": 72.85, "step": 79300, "token_acc": 0.541958041958042, "train_speed(iter/s)": 0.672302 }, { "epoch": 3.3976693372177715, "grad_norm": 4.950624942779541, "learning_rate": 2.3304643759121398e-05, "loss": 1.8788894653320312, "memory(GiB)": 72.85, "step": 79305, "token_acc": 0.5728155339805825, "train_speed(iter/s)": 0.672308 }, { "epoch": 3.3978835525470203, "grad_norm": 4.821784496307373, "learning_rate": 2.3298953687588753e-05, "loss": 2.085074234008789, "memory(GiB)": 72.85, "step": 79310, "token_acc": 0.5275080906148867, "train_speed(iter/s)": 0.672312 }, { "epoch": 3.398097767876269, "grad_norm": 6.408305644989014, "learning_rate": 2.3293264099770613e-05, "loss": 1.892618751525879, "memory(GiB)": 72.85, "step": 79315, "token_acc": 0.5524193548387096, "train_speed(iter/s)": 0.67232 }, { "epoch": 3.3983119832055184, "grad_norm": 8.380623817443848, "learning_rate": 2.3287574995770028e-05, "loss": 2.1763343811035156, "memory(GiB)": 72.85, "step": 79320, "token_acc": 0.5607843137254902, "train_speed(iter/s)": 0.672324 }, { "epoch": 3.3985261985347672, "grad_norm": 4.753582954406738, "learning_rate": 2.32818863756901e-05, "loss": 2.117477035522461, "memory(GiB)": 72.85, "step": 79325, "token_acc": 0.5144508670520231, "train_speed(iter/s)": 0.672339 }, { "epoch": 3.398740413864016, "grad_norm": 5.293547630310059, "learning_rate": 2.327619823963386e-05, "loss": 1.7223344802856446, "memory(GiB)": 72.85, "step": 79330, "token_acc": 0.5726141078838174, "train_speed(iter/s)": 0.672344 }, { "epoch": 3.3989546291932653, "grad_norm": 9.325218200683594, "learning_rate": 2.3270510587704365e-05, "loss": 2.293425369262695, "memory(GiB)": 72.85, "step": 79335, "token_acc": 0.49836065573770494, "train_speed(iter/s)": 0.672345 }, { "epoch": 3.399168844522514, "grad_norm": 6.122609615325928, "learning_rate": 2.326482342000464e-05, "loss": 2.2215816497802736, "memory(GiB)": 72.85, "step": 79340, "token_acc": 0.5239616613418531, "train_speed(iter/s)": 0.672342 }, { "epoch": 3.399383059851763, "grad_norm": 5.069175720214844, "learning_rate": 2.3259136736637697e-05, "loss": 1.7359203338623046, "memory(GiB)": 72.85, "step": 79345, "token_acc": 0.6164383561643836, "train_speed(iter/s)": 0.672347 }, { "epoch": 3.399597275181012, "grad_norm": 8.131698608398438, "learning_rate": 2.32534505377066e-05, "loss": 2.4151866912841795, "memory(GiB)": 72.85, "step": 79350, "token_acc": 0.4790874524714829, "train_speed(iter/s)": 0.672356 }, { "epoch": 3.399811490510261, "grad_norm": 6.372827053070068, "learning_rate": 2.324776482331434e-05, "loss": 2.373604965209961, "memory(GiB)": 72.85, "step": 79355, "token_acc": 0.5283582089552239, "train_speed(iter/s)": 0.672363 }, { "epoch": 3.40002570583951, "grad_norm": 4.9957804679870605, "learning_rate": 2.324207959356391e-05, "loss": 2.058245086669922, "memory(GiB)": 72.85, "step": 79360, "token_acc": 0.5559701492537313, "train_speed(iter/s)": 0.672367 }, { "epoch": 3.400239921168759, "grad_norm": 7.124074935913086, "learning_rate": 2.323639484855831e-05, "loss": 2.1115528106689454, "memory(GiB)": 72.85, "step": 79365, "token_acc": 0.5373134328358209, "train_speed(iter/s)": 0.672363 }, { "epoch": 3.400454136498008, "grad_norm": 6.653261661529541, "learning_rate": 2.3230710588400505e-05, "loss": 2.0646347045898437, "memory(GiB)": 72.85, "step": 79370, "token_acc": 0.5318352059925093, "train_speed(iter/s)": 0.672359 }, { "epoch": 3.4006683518272567, "grad_norm": 6.9153289794921875, "learning_rate": 2.322502681319349e-05, "loss": 2.101570892333984, "memory(GiB)": 72.85, "step": 79375, "token_acc": 0.54421768707483, "train_speed(iter/s)": 0.672368 }, { "epoch": 3.400882567156506, "grad_norm": 4.730045318603516, "learning_rate": 2.321934352304025e-05, "loss": 1.939908218383789, "memory(GiB)": 72.85, "step": 79380, "token_acc": 0.5222929936305732, "train_speed(iter/s)": 0.672374 }, { "epoch": 3.4010967824857548, "grad_norm": 6.10462760925293, "learning_rate": 2.321366071804373e-05, "loss": 1.9654024124145508, "memory(GiB)": 72.85, "step": 79385, "token_acc": 0.5411392405063291, "train_speed(iter/s)": 0.672371 }, { "epoch": 3.4013109978150036, "grad_norm": 5.078990936279297, "learning_rate": 2.320797839830686e-05, "loss": 1.9739288330078124, "memory(GiB)": 72.85, "step": 79390, "token_acc": 0.5309090909090909, "train_speed(iter/s)": 0.672375 }, { "epoch": 3.401525213144253, "grad_norm": 6.189517974853516, "learning_rate": 2.3202296563932607e-05, "loss": 2.326152801513672, "memory(GiB)": 72.85, "step": 79395, "token_acc": 0.5071225071225072, "train_speed(iter/s)": 0.67238 }, { "epoch": 3.4017394284735016, "grad_norm": 5.979090213775635, "learning_rate": 2.3196615215023886e-05, "loss": 2.059357452392578, "memory(GiB)": 72.85, "step": 79400, "token_acc": 0.5078369905956113, "train_speed(iter/s)": 0.672383 }, { "epoch": 3.4019536438027504, "grad_norm": 5.071167945861816, "learning_rate": 2.3190934351683602e-05, "loss": 2.089107894897461, "memory(GiB)": 72.85, "step": 79405, "token_acc": 0.5347222222222222, "train_speed(iter/s)": 0.672387 }, { "epoch": 3.4021678591319997, "grad_norm": 7.323049068450928, "learning_rate": 2.3185253974014714e-05, "loss": 1.966751480102539, "memory(GiB)": 72.85, "step": 79410, "token_acc": 0.570957095709571, "train_speed(iter/s)": 0.67239 }, { "epoch": 3.4023820744612485, "grad_norm": 6.722159385681152, "learning_rate": 2.3179574082120105e-05, "loss": 2.042974662780762, "memory(GiB)": 72.85, "step": 79415, "token_acc": 0.5518518518518518, "train_speed(iter/s)": 0.67239 }, { "epoch": 3.4025962897904973, "grad_norm": 4.522604942321777, "learning_rate": 2.317389467610267e-05, "loss": 1.818944549560547, "memory(GiB)": 72.85, "step": 79420, "token_acc": 0.5732217573221757, "train_speed(iter/s)": 0.672389 }, { "epoch": 3.4028105051197466, "grad_norm": 4.640139579772949, "learning_rate": 2.3168215756065292e-05, "loss": 2.1232063293457033, "memory(GiB)": 72.85, "step": 79425, "token_acc": 0.5476190476190477, "train_speed(iter/s)": 0.672395 }, { "epoch": 3.4030247204489954, "grad_norm": 6.662623405456543, "learning_rate": 2.3162537322110843e-05, "loss": 1.8413450241088867, "memory(GiB)": 72.85, "step": 79430, "token_acc": 0.5919117647058824, "train_speed(iter/s)": 0.672398 }, { "epoch": 3.403238935778244, "grad_norm": 4.694510459899902, "learning_rate": 2.3156859374342226e-05, "loss": 2.0084915161132812, "memory(GiB)": 72.85, "step": 79435, "token_acc": 0.5348101265822784, "train_speed(iter/s)": 0.672399 }, { "epoch": 3.4034531511074935, "grad_norm": 7.5482330322265625, "learning_rate": 2.315118191286228e-05, "loss": 1.9466712951660157, "memory(GiB)": 72.85, "step": 79440, "token_acc": 0.5168067226890757, "train_speed(iter/s)": 0.672403 }, { "epoch": 3.4036673664367423, "grad_norm": 7.602789878845215, "learning_rate": 2.314550493777386e-05, "loss": 2.385260009765625, "memory(GiB)": 72.85, "step": 79445, "token_acc": 0.4868421052631579, "train_speed(iter/s)": 0.672399 }, { "epoch": 3.403881581765991, "grad_norm": 7.3132195472717285, "learning_rate": 2.313982844917979e-05, "loss": 2.125393104553223, "memory(GiB)": 72.85, "step": 79450, "token_acc": 0.5430711610486891, "train_speed(iter/s)": 0.672397 }, { "epoch": 3.4040957970952403, "grad_norm": 5.379884719848633, "learning_rate": 2.3134152447182945e-05, "loss": 1.8384857177734375, "memory(GiB)": 72.85, "step": 79455, "token_acc": 0.5719298245614035, "train_speed(iter/s)": 0.672399 }, { "epoch": 3.404310012424489, "grad_norm": 5.236968994140625, "learning_rate": 2.3128476931886128e-05, "loss": 2.4559085845947264, "memory(GiB)": 72.85, "step": 79460, "token_acc": 0.4581005586592179, "train_speed(iter/s)": 0.672411 }, { "epoch": 3.404524227753738, "grad_norm": 6.112103462219238, "learning_rate": 2.3122801903392145e-05, "loss": 1.9434804916381836, "memory(GiB)": 72.85, "step": 79465, "token_acc": 0.541095890410959, "train_speed(iter/s)": 0.672408 }, { "epoch": 3.4047384430829872, "grad_norm": 6.268404483795166, "learning_rate": 2.311712736180383e-05, "loss": 2.3191518783569336, "memory(GiB)": 72.85, "step": 79470, "token_acc": 0.5451263537906137, "train_speed(iter/s)": 0.672415 }, { "epoch": 3.404952658412236, "grad_norm": 6.07773494720459, "learning_rate": 2.3111453307223978e-05, "loss": 2.064434814453125, "memory(GiB)": 72.85, "step": 79475, "token_acc": 0.5483870967741935, "train_speed(iter/s)": 0.672424 }, { "epoch": 3.405166873741485, "grad_norm": 5.293090343475342, "learning_rate": 2.3105779739755368e-05, "loss": 2.177202606201172, "memory(GiB)": 72.85, "step": 79480, "token_acc": 0.5304878048780488, "train_speed(iter/s)": 0.672422 }, { "epoch": 3.405381089070734, "grad_norm": 6.7336883544921875, "learning_rate": 2.3100106659500794e-05, "loss": 2.105005645751953, "memory(GiB)": 72.85, "step": 79485, "token_acc": 0.4793103448275862, "train_speed(iter/s)": 0.672423 }, { "epoch": 3.405595304399983, "grad_norm": 6.3248372077941895, "learning_rate": 2.3094434066562993e-05, "loss": 2.019771957397461, "memory(GiB)": 72.85, "step": 79490, "token_acc": 0.5582191780821918, "train_speed(iter/s)": 0.672424 }, { "epoch": 3.4058095197292317, "grad_norm": 5.026352882385254, "learning_rate": 2.3088761961044786e-05, "loss": 1.9722593307495118, "memory(GiB)": 72.85, "step": 79495, "token_acc": 0.5694915254237288, "train_speed(iter/s)": 0.672429 }, { "epoch": 3.406023735058481, "grad_norm": 4.537477493286133, "learning_rate": 2.3083090343048898e-05, "loss": 1.7671035766601562, "memory(GiB)": 72.85, "step": 79500, "token_acc": 0.5773584905660377, "train_speed(iter/s)": 0.672436 }, { "epoch": 3.406023735058481, "eval_loss": 2.0670557022094727, "eval_runtime": 15.9257, "eval_samples_per_second": 6.279, "eval_steps_per_second": 6.279, "eval_token_acc": 0.5083655083655083, "step": 79500 }, { "epoch": 3.40623795038773, "grad_norm": 5.767116546630859, "learning_rate": 2.3077419212678076e-05, "loss": 2.0160285949707033, "memory(GiB)": 72.85, "step": 79505, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.672335 }, { "epoch": 3.4064521657169786, "grad_norm": 4.976794719696045, "learning_rate": 2.3071748570035063e-05, "loss": 2.1888145446777343, "memory(GiB)": 72.85, "step": 79510, "token_acc": 0.5231316725978647, "train_speed(iter/s)": 0.672341 }, { "epoch": 3.406666381046228, "grad_norm": 6.087699890136719, "learning_rate": 2.3066078415222563e-05, "loss": 2.1060165405273437, "memory(GiB)": 72.85, "step": 79515, "token_acc": 0.530791788856305, "train_speed(iter/s)": 0.672334 }, { "epoch": 3.4068805963754767, "grad_norm": 5.7303290367126465, "learning_rate": 2.306040874834334e-05, "loss": 1.9805124282836915, "memory(GiB)": 72.85, "step": 79520, "token_acc": 0.5462555066079295, "train_speed(iter/s)": 0.672336 }, { "epoch": 3.4070948117047255, "grad_norm": 5.660371780395508, "learning_rate": 2.3054739569500066e-05, "loss": 2.153875732421875, "memory(GiB)": 72.85, "step": 79525, "token_acc": 0.5242718446601942, "train_speed(iter/s)": 0.672338 }, { "epoch": 3.4073090270339748, "grad_norm": 6.777160167694092, "learning_rate": 2.3049070878795477e-05, "loss": 2.3794797897338866, "memory(GiB)": 72.85, "step": 79530, "token_acc": 0.5369127516778524, "train_speed(iter/s)": 0.672347 }, { "epoch": 3.4075232423632236, "grad_norm": 5.7932562828063965, "learning_rate": 2.3043402676332253e-05, "loss": 2.081649971008301, "memory(GiB)": 72.85, "step": 79535, "token_acc": 0.5059288537549407, "train_speed(iter/s)": 0.672353 }, { "epoch": 3.4077374576924724, "grad_norm": 5.022346019744873, "learning_rate": 2.303773496221308e-05, "loss": 2.176106262207031, "memory(GiB)": 72.85, "step": 79540, "token_acc": 0.565359477124183, "train_speed(iter/s)": 0.672352 }, { "epoch": 3.4079516730217216, "grad_norm": 7.336010932922363, "learning_rate": 2.3032067736540626e-05, "loss": 2.026538276672363, "memory(GiB)": 72.85, "step": 79545, "token_acc": 0.5292207792207793, "train_speed(iter/s)": 0.672348 }, { "epoch": 3.4081658883509705, "grad_norm": 4.746048450469971, "learning_rate": 2.3026400999417546e-05, "loss": 1.9300138473510742, "memory(GiB)": 72.85, "step": 79550, "token_acc": 0.5286195286195287, "train_speed(iter/s)": 0.672347 }, { "epoch": 3.4083801036802193, "grad_norm": 5.970099449157715, "learning_rate": 2.3020734750946534e-05, "loss": 1.8803611755371095, "memory(GiB)": 72.85, "step": 79555, "token_acc": 0.5974025974025974, "train_speed(iter/s)": 0.672351 }, { "epoch": 3.4085943190094685, "grad_norm": 6.959489822387695, "learning_rate": 2.3015068991230222e-05, "loss": 1.9724632263183595, "memory(GiB)": 72.85, "step": 79560, "token_acc": 0.5736434108527132, "train_speed(iter/s)": 0.672365 }, { "epoch": 3.4088085343387173, "grad_norm": 4.952245235443115, "learning_rate": 2.3009403720371247e-05, "loss": 1.8317047119140626, "memory(GiB)": 72.85, "step": 79565, "token_acc": 0.588, "train_speed(iter/s)": 0.672367 }, { "epoch": 3.409022749667966, "grad_norm": 5.08772087097168, "learning_rate": 2.300373893847224e-05, "loss": 2.158616065979004, "memory(GiB)": 72.85, "step": 79570, "token_acc": 0.49122807017543857, "train_speed(iter/s)": 0.672377 }, { "epoch": 3.4092369649972154, "grad_norm": 6.149524211883545, "learning_rate": 2.2998074645635815e-05, "loss": 2.1778308868408205, "memory(GiB)": 72.85, "step": 79575, "token_acc": 0.4984520123839009, "train_speed(iter/s)": 0.672377 }, { "epoch": 3.409451180326464, "grad_norm": 7.260812759399414, "learning_rate": 2.299241084196461e-05, "loss": 2.1176326751708983, "memory(GiB)": 72.85, "step": 79580, "token_acc": 0.5272108843537415, "train_speed(iter/s)": 0.672377 }, { "epoch": 3.409665395655713, "grad_norm": 6.177829265594482, "learning_rate": 2.2986747527561227e-05, "loss": 2.2599388122558595, "memory(GiB)": 72.85, "step": 79585, "token_acc": 0.509090909090909, "train_speed(iter/s)": 0.67238 }, { "epoch": 3.4098796109849623, "grad_norm": 5.177559852600098, "learning_rate": 2.2981084702528244e-05, "loss": 1.7470170974731445, "memory(GiB)": 72.85, "step": 79590, "token_acc": 0.5746268656716418, "train_speed(iter/s)": 0.672387 }, { "epoch": 3.410093826314211, "grad_norm": 5.729853630065918, "learning_rate": 2.297542236696824e-05, "loss": 2.1250572204589844, "memory(GiB)": 72.85, "step": 79595, "token_acc": 0.5708154506437768, "train_speed(iter/s)": 0.672396 }, { "epoch": 3.41030804164346, "grad_norm": 5.669373989105225, "learning_rate": 2.296976052098383e-05, "loss": 2.0900930404663085, "memory(GiB)": 72.85, "step": 79600, "token_acc": 0.528169014084507, "train_speed(iter/s)": 0.672402 }, { "epoch": 3.410522256972709, "grad_norm": 5.558821201324463, "learning_rate": 2.2964099164677567e-05, "loss": 2.2780860900878905, "memory(GiB)": 72.85, "step": 79605, "token_acc": 0.5067567567567568, "train_speed(iter/s)": 0.672404 }, { "epoch": 3.410736472301958, "grad_norm": 6.540903091430664, "learning_rate": 2.295843829815199e-05, "loss": 2.3515209197998046, "memory(GiB)": 72.85, "step": 79610, "token_acc": 0.5068493150684932, "train_speed(iter/s)": 0.672407 }, { "epoch": 3.410950687631207, "grad_norm": 5.658348560333252, "learning_rate": 2.295277792150969e-05, "loss": 2.2093650817871096, "memory(GiB)": 72.85, "step": 79615, "token_acc": 0.5236686390532544, "train_speed(iter/s)": 0.672398 }, { "epoch": 3.411164902960456, "grad_norm": 4.335690975189209, "learning_rate": 2.294711803485319e-05, "loss": 1.6882131576538086, "memory(GiB)": 72.85, "step": 79620, "token_acc": 0.6147859922178989, "train_speed(iter/s)": 0.672396 }, { "epoch": 3.411379118289705, "grad_norm": 6.64645528793335, "learning_rate": 2.2941458638285017e-05, "loss": 2.4279291152954103, "memory(GiB)": 72.85, "step": 79625, "token_acc": 0.4809384164222874, "train_speed(iter/s)": 0.672388 }, { "epoch": 3.4115933336189537, "grad_norm": 4.868781566619873, "learning_rate": 2.2935799731907707e-05, "loss": 2.122706985473633, "memory(GiB)": 72.85, "step": 79630, "token_acc": 0.5067567567567568, "train_speed(iter/s)": 0.672389 }, { "epoch": 3.411807548948203, "grad_norm": 5.954343318939209, "learning_rate": 2.293014131582376e-05, "loss": 1.8198917388916016, "memory(GiB)": 72.85, "step": 79635, "token_acc": 0.5422535211267606, "train_speed(iter/s)": 0.672394 }, { "epoch": 3.4120217642774517, "grad_norm": 6.575009822845459, "learning_rate": 2.2924483390135716e-05, "loss": 1.8765260696411132, "memory(GiB)": 72.85, "step": 79640, "token_acc": 0.5669014084507042, "train_speed(iter/s)": 0.672404 }, { "epoch": 3.4122359796067006, "grad_norm": 4.803484916687012, "learning_rate": 2.291882595494605e-05, "loss": 2.035047721862793, "memory(GiB)": 72.85, "step": 79645, "token_acc": 0.5511551155115512, "train_speed(iter/s)": 0.672412 }, { "epoch": 3.41245019493595, "grad_norm": 5.452559471130371, "learning_rate": 2.2913169010357256e-05, "loss": 2.1587318420410155, "memory(GiB)": 72.85, "step": 79650, "token_acc": 0.5342465753424658, "train_speed(iter/s)": 0.672426 }, { "epoch": 3.4126644102651986, "grad_norm": 4.906037330627441, "learning_rate": 2.2907512556471817e-05, "loss": 1.8236398696899414, "memory(GiB)": 72.85, "step": 79655, "token_acc": 0.5465116279069767, "train_speed(iter/s)": 0.672429 }, { "epoch": 3.4128786255944474, "grad_norm": 5.323065757751465, "learning_rate": 2.290185659339218e-05, "loss": 2.126911926269531, "memory(GiB)": 72.85, "step": 79660, "token_acc": 0.5305343511450382, "train_speed(iter/s)": 0.672434 }, { "epoch": 3.4130928409236967, "grad_norm": 5.109606742858887, "learning_rate": 2.2896201121220856e-05, "loss": 1.9403186798095704, "memory(GiB)": 72.85, "step": 79665, "token_acc": 0.5578231292517006, "train_speed(iter/s)": 0.672443 }, { "epoch": 3.4133070562529455, "grad_norm": 5.507060527801514, "learning_rate": 2.289054614006025e-05, "loss": 2.0522300720214846, "memory(GiB)": 72.85, "step": 79670, "token_acc": 0.559375, "train_speed(iter/s)": 0.672434 }, { "epoch": 3.4135212715821943, "grad_norm": 5.197301387786865, "learning_rate": 2.288489165001285e-05, "loss": 2.178095245361328, "memory(GiB)": 72.85, "step": 79675, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.672438 }, { "epoch": 3.4137354869114436, "grad_norm": 5.100247859954834, "learning_rate": 2.287923765118108e-05, "loss": 1.958098793029785, "memory(GiB)": 72.85, "step": 79680, "token_acc": 0.5527156549520766, "train_speed(iter/s)": 0.672439 }, { "epoch": 3.4139497022406924, "grad_norm": 4.783187389373779, "learning_rate": 2.2873584143667352e-05, "loss": 1.9788402557373046, "memory(GiB)": 72.85, "step": 79685, "token_acc": 0.543918918918919, "train_speed(iter/s)": 0.672424 }, { "epoch": 3.414163917569941, "grad_norm": 4.832862377166748, "learning_rate": 2.28679311275741e-05, "loss": 2.342190170288086, "memory(GiB)": 72.85, "step": 79690, "token_acc": 0.4931972789115646, "train_speed(iter/s)": 0.672427 }, { "epoch": 3.4143781328991905, "grad_norm": 4.676830291748047, "learning_rate": 2.2862278603003707e-05, "loss": 2.014082145690918, "memory(GiB)": 72.85, "step": 79695, "token_acc": 0.5459940652818991, "train_speed(iter/s)": 0.672424 }, { "epoch": 3.4145923482284393, "grad_norm": 6.809175968170166, "learning_rate": 2.2856626570058613e-05, "loss": 2.0794441223144533, "memory(GiB)": 72.85, "step": 79700, "token_acc": 0.564625850340136, "train_speed(iter/s)": 0.672429 }, { "epoch": 3.414806563557688, "grad_norm": 5.4314188957214355, "learning_rate": 2.2850975028841194e-05, "loss": 2.0305482864379885, "memory(GiB)": 72.85, "step": 79705, "token_acc": 0.5406360424028268, "train_speed(iter/s)": 0.672421 }, { "epoch": 3.4150207788869373, "grad_norm": 5.819560527801514, "learning_rate": 2.2845323979453832e-05, "loss": 2.2130640029907225, "memory(GiB)": 72.85, "step": 79710, "token_acc": 0.5, "train_speed(iter/s)": 0.672427 }, { "epoch": 3.415234994216186, "grad_norm": 5.758284091949463, "learning_rate": 2.2839673421998892e-05, "loss": 1.9842210769653321, "memory(GiB)": 72.85, "step": 79715, "token_acc": 0.543026706231454, "train_speed(iter/s)": 0.672432 }, { "epoch": 3.415449209545435, "grad_norm": 9.044259071350098, "learning_rate": 2.283402335657873e-05, "loss": 2.0735279083251954, "memory(GiB)": 72.85, "step": 79720, "token_acc": 0.5151515151515151, "train_speed(iter/s)": 0.67244 }, { "epoch": 3.415663424874684, "grad_norm": 5.179149627685547, "learning_rate": 2.2828373783295743e-05, "loss": 1.8536857604980468, "memory(GiB)": 72.85, "step": 79725, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.672442 }, { "epoch": 3.415877640203933, "grad_norm": 4.337193012237549, "learning_rate": 2.2822724702252245e-05, "loss": 2.3296539306640627, "memory(GiB)": 72.85, "step": 79730, "token_acc": 0.4983922829581994, "train_speed(iter/s)": 0.672444 }, { "epoch": 3.416091855533182, "grad_norm": 5.108864784240723, "learning_rate": 2.281707611355059e-05, "loss": 2.2564796447753905, "memory(GiB)": 72.85, "step": 79735, "token_acc": 0.5295950155763239, "train_speed(iter/s)": 0.672445 }, { "epoch": 3.416306070862431, "grad_norm": 5.141253471374512, "learning_rate": 2.2811428017293096e-05, "loss": 2.318924331665039, "memory(GiB)": 72.85, "step": 79740, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.672443 }, { "epoch": 3.41652028619168, "grad_norm": 5.630430698394775, "learning_rate": 2.2805780413582075e-05, "loss": 2.2789302825927735, "memory(GiB)": 72.85, "step": 79745, "token_acc": 0.5210355987055016, "train_speed(iter/s)": 0.672428 }, { "epoch": 3.4167345015209287, "grad_norm": 5.031443119049072, "learning_rate": 2.2800133302519866e-05, "loss": 2.2211761474609375, "memory(GiB)": 72.85, "step": 79750, "token_acc": 0.5361842105263158, "train_speed(iter/s)": 0.672433 }, { "epoch": 3.416948716850178, "grad_norm": 5.505776405334473, "learning_rate": 2.2794486684208744e-05, "loss": 2.118651580810547, "memory(GiB)": 72.85, "step": 79755, "token_acc": 0.5578635014836796, "train_speed(iter/s)": 0.672438 }, { "epoch": 3.417162932179427, "grad_norm": 4.334819793701172, "learning_rate": 2.278884055875103e-05, "loss": 1.9013675689697265, "memory(GiB)": 72.85, "step": 79760, "token_acc": 0.5783132530120482, "train_speed(iter/s)": 0.67244 }, { "epoch": 3.4173771475086756, "grad_norm": 6.497598648071289, "learning_rate": 2.2783194926248996e-05, "loss": 1.902008056640625, "memory(GiB)": 72.85, "step": 79765, "token_acc": 0.5559440559440559, "train_speed(iter/s)": 0.672441 }, { "epoch": 3.417591362837925, "grad_norm": 6.591381549835205, "learning_rate": 2.2777549786804918e-05, "loss": 2.0457603454589846, "memory(GiB)": 72.85, "step": 79770, "token_acc": 0.5547703180212014, "train_speed(iter/s)": 0.672435 }, { "epoch": 3.4178055781671737, "grad_norm": 6.83133602142334, "learning_rate": 2.2771905140521067e-05, "loss": 2.085959243774414, "memory(GiB)": 72.85, "step": 79775, "token_acc": 0.511864406779661, "train_speed(iter/s)": 0.67243 }, { "epoch": 3.4180197934964225, "grad_norm": 6.790292739868164, "learning_rate": 2.2766260987499677e-05, "loss": 1.863018798828125, "memory(GiB)": 72.85, "step": 79780, "token_acc": 0.5513307984790875, "train_speed(iter/s)": 0.672434 }, { "epoch": 3.4182340088256717, "grad_norm": 6.370758533477783, "learning_rate": 2.276061732784303e-05, "loss": 2.0142017364501954, "memory(GiB)": 72.85, "step": 79785, "token_acc": 0.5338078291814946, "train_speed(iter/s)": 0.672439 }, { "epoch": 3.4184482241549206, "grad_norm": 4.608632564544678, "learning_rate": 2.275497416165335e-05, "loss": 1.836810302734375, "memory(GiB)": 72.85, "step": 79790, "token_acc": 0.5984555984555985, "train_speed(iter/s)": 0.672439 }, { "epoch": 3.4186624394841694, "grad_norm": 6.042679309844971, "learning_rate": 2.2749331489032884e-05, "loss": 2.384006881713867, "memory(GiB)": 72.85, "step": 79795, "token_acc": 0.5, "train_speed(iter/s)": 0.672434 }, { "epoch": 3.4188766548134186, "grad_norm": 5.191427707672119, "learning_rate": 2.274368931008383e-05, "loss": 2.28237247467041, "memory(GiB)": 72.85, "step": 79800, "token_acc": 0.5167785234899329, "train_speed(iter/s)": 0.672439 }, { "epoch": 3.4190908701426674, "grad_norm": 5.69620943069458, "learning_rate": 2.27380476249084e-05, "loss": 2.2053043365478517, "memory(GiB)": 72.85, "step": 79805, "token_acc": 0.5594405594405595, "train_speed(iter/s)": 0.672449 }, { "epoch": 3.4193050854719163, "grad_norm": 6.07904052734375, "learning_rate": 2.2732406433608826e-05, "loss": 2.308112144470215, "memory(GiB)": 72.85, "step": 79810, "token_acc": 0.4952978056426332, "train_speed(iter/s)": 0.672455 }, { "epoch": 3.4195193008011655, "grad_norm": 5.453437805175781, "learning_rate": 2.2726765736287287e-05, "loss": 1.9576194763183594, "memory(GiB)": 72.85, "step": 79815, "token_acc": 0.5650557620817844, "train_speed(iter/s)": 0.672463 }, { "epoch": 3.4197335161304143, "grad_norm": 8.424448013305664, "learning_rate": 2.2721125533045955e-05, "loss": 2.2191694259643553, "memory(GiB)": 72.85, "step": 79820, "token_acc": 0.5035714285714286, "train_speed(iter/s)": 0.672469 }, { "epoch": 3.419947731459663, "grad_norm": 5.314571857452393, "learning_rate": 2.2715485823987043e-05, "loss": 1.9919879913330079, "memory(GiB)": 72.85, "step": 79825, "token_acc": 0.567741935483871, "train_speed(iter/s)": 0.67247 }, { "epoch": 3.4201619467889124, "grad_norm": 5.864910125732422, "learning_rate": 2.27098466092127e-05, "loss": 2.0879940032958983, "memory(GiB)": 72.85, "step": 79830, "token_acc": 0.5506329113924051, "train_speed(iter/s)": 0.672476 }, { "epoch": 3.420376162118161, "grad_norm": 4.75620698928833, "learning_rate": 2.2704207888825086e-05, "loss": 1.9933622360229493, "memory(GiB)": 72.85, "step": 79835, "token_acc": 0.5510835913312694, "train_speed(iter/s)": 0.672486 }, { "epoch": 3.42059037744741, "grad_norm": 8.985468864440918, "learning_rate": 2.2698569662926333e-05, "loss": 2.190827178955078, "memory(GiB)": 72.85, "step": 79840, "token_acc": 0.5298245614035088, "train_speed(iter/s)": 0.672493 }, { "epoch": 3.4208045927766593, "grad_norm": 5.854579448699951, "learning_rate": 2.2692931931618622e-05, "loss": 2.208965873718262, "memory(GiB)": 72.85, "step": 79845, "token_acc": 0.5, "train_speed(iter/s)": 0.672494 }, { "epoch": 3.421018808105908, "grad_norm": 5.591583251953125, "learning_rate": 2.2687294695004058e-05, "loss": 2.140612030029297, "memory(GiB)": 72.85, "step": 79850, "token_acc": 0.5322033898305085, "train_speed(iter/s)": 0.6725 }, { "epoch": 3.421233023435157, "grad_norm": 7.47766637802124, "learning_rate": 2.2681657953184775e-05, "loss": 2.225539207458496, "memory(GiB)": 72.85, "step": 79855, "token_acc": 0.5137931034482759, "train_speed(iter/s)": 0.672502 }, { "epoch": 3.421447238764406, "grad_norm": 8.732163429260254, "learning_rate": 2.267602170626289e-05, "loss": 2.2642229080200194, "memory(GiB)": 72.85, "step": 79860, "token_acc": 0.5266903914590747, "train_speed(iter/s)": 0.672507 }, { "epoch": 3.421661454093655, "grad_norm": 6.2285237312316895, "learning_rate": 2.267038595434048e-05, "loss": 2.013208198547363, "memory(GiB)": 72.85, "step": 79865, "token_acc": 0.5390946502057613, "train_speed(iter/s)": 0.672506 }, { "epoch": 3.4218756694229038, "grad_norm": 6.2722368240356445, "learning_rate": 2.266475069751969e-05, "loss": 2.0661964416503906, "memory(GiB)": 72.85, "step": 79870, "token_acc": 0.5655430711610487, "train_speed(iter/s)": 0.672516 }, { "epoch": 3.422089884752153, "grad_norm": 5.977258205413818, "learning_rate": 2.2659115935902576e-05, "loss": 2.2976402282714843, "memory(GiB)": 72.85, "step": 79875, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.672521 }, { "epoch": 3.422304100081402, "grad_norm": 5.29747200012207, "learning_rate": 2.265348166959123e-05, "loss": 2.0669384002685547, "memory(GiB)": 72.85, "step": 79880, "token_acc": 0.5487012987012987, "train_speed(iter/s)": 0.672537 }, { "epoch": 3.4225183154106507, "grad_norm": 6.340567588806152, "learning_rate": 2.2647847898687712e-05, "loss": 2.1455104827880858, "memory(GiB)": 72.85, "step": 79885, "token_acc": 0.5507246376811594, "train_speed(iter/s)": 0.672549 }, { "epoch": 3.4227325307399, "grad_norm": 7.0300612449646, "learning_rate": 2.2642214623294073e-05, "loss": 2.178609085083008, "memory(GiB)": 72.85, "step": 79890, "token_acc": 0.4929078014184397, "train_speed(iter/s)": 0.672551 }, { "epoch": 3.4229467460691487, "grad_norm": 5.174409866333008, "learning_rate": 2.2636581843512378e-05, "loss": 2.311198616027832, "memory(GiB)": 72.85, "step": 79895, "token_acc": 0.5157593123209169, "train_speed(iter/s)": 0.672552 }, { "epoch": 3.4231609613983975, "grad_norm": 5.733304500579834, "learning_rate": 2.2630949559444693e-05, "loss": 2.005809020996094, "memory(GiB)": 72.85, "step": 79900, "token_acc": 0.5714285714285714, "train_speed(iter/s)": 0.672556 }, { "epoch": 3.423375176727647, "grad_norm": 10.47945785522461, "learning_rate": 2.2625317771193027e-05, "loss": 2.2973960876464843, "memory(GiB)": 72.85, "step": 79905, "token_acc": 0.45422535211267606, "train_speed(iter/s)": 0.672555 }, { "epoch": 3.4235893920568956, "grad_norm": 6.294013023376465, "learning_rate": 2.2619686478859416e-05, "loss": 1.867426300048828, "memory(GiB)": 72.85, "step": 79910, "token_acc": 0.5935483870967742, "train_speed(iter/s)": 0.67256 }, { "epoch": 3.4238036073861444, "grad_norm": 5.785637378692627, "learning_rate": 2.261405568254586e-05, "loss": 1.9249488830566406, "memory(GiB)": 72.85, "step": 79915, "token_acc": 0.5909090909090909, "train_speed(iter/s)": 0.672556 }, { "epoch": 3.4240178227153937, "grad_norm": 8.85044002532959, "learning_rate": 2.2608425382354383e-05, "loss": 1.872482681274414, "memory(GiB)": 72.85, "step": 79920, "token_acc": 0.5559105431309904, "train_speed(iter/s)": 0.672556 }, { "epoch": 3.4242320380446425, "grad_norm": 4.616954803466797, "learning_rate": 2.2602795578386954e-05, "loss": 2.253749656677246, "memory(GiB)": 72.85, "step": 79925, "token_acc": 0.5249343832020997, "train_speed(iter/s)": 0.672559 }, { "epoch": 3.4244462533738913, "grad_norm": 6.939560413360596, "learning_rate": 2.2597166270745603e-05, "loss": 2.3298789978027346, "memory(GiB)": 72.85, "step": 79930, "token_acc": 0.5034246575342466, "train_speed(iter/s)": 0.672562 }, { "epoch": 3.4246604687031406, "grad_norm": 5.338041305541992, "learning_rate": 2.2591537459532287e-05, "loss": 1.905430793762207, "memory(GiB)": 72.85, "step": 79935, "token_acc": 0.5113636363636364, "train_speed(iter/s)": 0.67256 }, { "epoch": 3.4248746840323894, "grad_norm": 6.22358512878418, "learning_rate": 2.258590914484898e-05, "loss": 2.167399597167969, "memory(GiB)": 72.85, "step": 79940, "token_acc": 0.5475409836065573, "train_speed(iter/s)": 0.672555 }, { "epoch": 3.425088899361638, "grad_norm": 6.43202543258667, "learning_rate": 2.2580281326797653e-05, "loss": 2.13156795501709, "memory(GiB)": 72.85, "step": 79945, "token_acc": 0.516245487364621, "train_speed(iter/s)": 0.672559 }, { "epoch": 3.4253031146908874, "grad_norm": 4.5130391120910645, "learning_rate": 2.2574654005480232e-05, "loss": 2.0807024002075196, "memory(GiB)": 72.85, "step": 79950, "token_acc": 0.5645161290322581, "train_speed(iter/s)": 0.672566 }, { "epoch": 3.4255173300201363, "grad_norm": 5.773980617523193, "learning_rate": 2.2569027180998697e-05, "loss": 2.168231964111328, "memory(GiB)": 72.85, "step": 79955, "token_acc": 0.5281899109792285, "train_speed(iter/s)": 0.672559 }, { "epoch": 3.425731545349385, "grad_norm": 6.216250896453857, "learning_rate": 2.2563400853454963e-05, "loss": 2.0343116760253905, "memory(GiB)": 72.85, "step": 79960, "token_acc": 0.5382059800664452, "train_speed(iter/s)": 0.672559 }, { "epoch": 3.4259457606786343, "grad_norm": 5.460191249847412, "learning_rate": 2.2557775022950948e-05, "loss": 2.030857276916504, "memory(GiB)": 72.85, "step": 79965, "token_acc": 0.5578231292517006, "train_speed(iter/s)": 0.672559 }, { "epoch": 3.426159976007883, "grad_norm": 5.585275650024414, "learning_rate": 2.25521496895886e-05, "loss": 1.95623779296875, "memory(GiB)": 72.85, "step": 79970, "token_acc": 0.5674846625766872, "train_speed(iter/s)": 0.672568 }, { "epoch": 3.426374191337132, "grad_norm": 5.552577972412109, "learning_rate": 2.2546524853469802e-05, "loss": 2.250509834289551, "memory(GiB)": 72.85, "step": 79975, "token_acc": 0.5277777777777778, "train_speed(iter/s)": 0.672571 }, { "epoch": 3.426588406666381, "grad_norm": 6.000086307525635, "learning_rate": 2.2540900514696466e-05, "loss": 2.0293251037597657, "memory(GiB)": 72.85, "step": 79980, "token_acc": 0.5310344827586206, "train_speed(iter/s)": 0.672578 }, { "epoch": 3.42680262199563, "grad_norm": 6.485193729400635, "learning_rate": 2.2535276673370453e-05, "loss": 2.065234375, "memory(GiB)": 72.85, "step": 79985, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.672584 }, { "epoch": 3.427016837324879, "grad_norm": 6.067239284515381, "learning_rate": 2.2529653329593686e-05, "loss": 2.157871627807617, "memory(GiB)": 72.85, "step": 79990, "token_acc": 0.5661764705882353, "train_speed(iter/s)": 0.672587 }, { "epoch": 3.427231052654128, "grad_norm": 5.009445667266846, "learning_rate": 2.2524030483468024e-05, "loss": 1.852120590209961, "memory(GiB)": 72.85, "step": 79995, "token_acc": 0.5692307692307692, "train_speed(iter/s)": 0.672583 }, { "epoch": 3.427445267983377, "grad_norm": 3.865710735321045, "learning_rate": 2.251840813509532e-05, "loss": 1.9519939422607422, "memory(GiB)": 72.85, "step": 80000, "token_acc": 0.5373134328358209, "train_speed(iter/s)": 0.672575 }, { "epoch": 3.427445267983377, "eval_loss": 2.128011465072632, "eval_runtime": 16.5652, "eval_samples_per_second": 6.037, "eval_steps_per_second": 6.037, "eval_token_acc": 0.4854368932038835, "step": 80000 }, { "epoch": 3.4276594833126257, "grad_norm": 5.58018684387207, "learning_rate": 2.251278628457743e-05, "loss": 2.1407407760620116, "memory(GiB)": 72.85, "step": 80005, "token_acc": 0.4914772727272727, "train_speed(iter/s)": 0.672464 }, { "epoch": 3.427873698641875, "grad_norm": 5.657835483551025, "learning_rate": 2.2507164932016188e-05, "loss": 2.277152442932129, "memory(GiB)": 72.85, "step": 80010, "token_acc": 0.5173611111111112, "train_speed(iter/s)": 0.67246 }, { "epoch": 3.428087913971124, "grad_norm": 7.4161376953125, "learning_rate": 2.250154407751346e-05, "loss": 1.9966556549072265, "memory(GiB)": 72.85, "step": 80015, "token_acc": 0.5584905660377358, "train_speed(iter/s)": 0.672465 }, { "epoch": 3.4283021293003726, "grad_norm": 6.934149742126465, "learning_rate": 2.2495923721171053e-05, "loss": 2.259766387939453, "memory(GiB)": 72.85, "step": 80020, "token_acc": 0.5256410256410257, "train_speed(iter/s)": 0.672468 }, { "epoch": 3.428516344629622, "grad_norm": 4.52301025390625, "learning_rate": 2.2490303863090793e-05, "loss": 2.168138885498047, "memory(GiB)": 72.85, "step": 80025, "token_acc": 0.4866666666666667, "train_speed(iter/s)": 0.672468 }, { "epoch": 3.4287305599588707, "grad_norm": 5.4144134521484375, "learning_rate": 2.2484684503374487e-05, "loss": 2.2524139404296877, "memory(GiB)": 72.85, "step": 80030, "token_acc": 0.5, "train_speed(iter/s)": 0.672468 }, { "epoch": 3.4289447752881195, "grad_norm": 5.0681843757629395, "learning_rate": 2.2479065642123908e-05, "loss": 1.9354799270629883, "memory(GiB)": 72.85, "step": 80035, "token_acc": 0.5779467680608364, "train_speed(iter/s)": 0.672466 }, { "epoch": 3.4291589906173687, "grad_norm": 6.210683345794678, "learning_rate": 2.2473447279440875e-05, "loss": 2.0096778869628906, "memory(GiB)": 72.85, "step": 80040, "token_acc": 0.5421686746987951, "train_speed(iter/s)": 0.672461 }, { "epoch": 3.4293732059466175, "grad_norm": 4.8299760818481445, "learning_rate": 2.246782941542718e-05, "loss": 1.9590009689331054, "memory(GiB)": 72.85, "step": 80045, "token_acc": 0.5458515283842795, "train_speed(iter/s)": 0.672467 }, { "epoch": 3.4295874212758664, "grad_norm": 5.839393138885498, "learning_rate": 2.2462212050184594e-05, "loss": 2.189015769958496, "memory(GiB)": 72.85, "step": 80050, "token_acc": 0.541095890410959, "train_speed(iter/s)": 0.672465 }, { "epoch": 3.4298016366051156, "grad_norm": 5.643980503082275, "learning_rate": 2.245659518381486e-05, "loss": 1.9136693954467774, "memory(GiB)": 72.85, "step": 80055, "token_acc": 0.5461538461538461, "train_speed(iter/s)": 0.672467 }, { "epoch": 3.4300158519343644, "grad_norm": 6.0850300788879395, "learning_rate": 2.2450978816419742e-05, "loss": 2.0195262908935545, "memory(GiB)": 72.85, "step": 80060, "token_acc": 0.5173611111111112, "train_speed(iter/s)": 0.672457 }, { "epoch": 3.4302300672636132, "grad_norm": 4.8551130294799805, "learning_rate": 2.244536294810099e-05, "loss": 2.2683683395385743, "memory(GiB)": 72.85, "step": 80065, "token_acc": 0.5243055555555556, "train_speed(iter/s)": 0.672465 }, { "epoch": 3.4304442825928625, "grad_norm": 6.975549221038818, "learning_rate": 2.2439747578960318e-05, "loss": 2.0429351806640623, "memory(GiB)": 72.85, "step": 80070, "token_acc": 0.5403508771929825, "train_speed(iter/s)": 0.672471 }, { "epoch": 3.4306584979221113, "grad_norm": 4.185390472412109, "learning_rate": 2.2434132709099487e-05, "loss": 2.59481143951416, "memory(GiB)": 72.85, "step": 80075, "token_acc": 0.47246376811594204, "train_speed(iter/s)": 0.672464 }, { "epoch": 3.43087271325136, "grad_norm": 6.251466274261475, "learning_rate": 2.24285183386202e-05, "loss": 2.4202293395996093, "memory(GiB)": 72.85, "step": 80080, "token_acc": 0.5182724252491694, "train_speed(iter/s)": 0.672463 }, { "epoch": 3.4310869285806094, "grad_norm": 6.013256549835205, "learning_rate": 2.2422904467624172e-05, "loss": 2.116780662536621, "memory(GiB)": 72.85, "step": 80085, "token_acc": 0.5410447761194029, "train_speed(iter/s)": 0.67246 }, { "epoch": 3.431301143909858, "grad_norm": Infinity, "learning_rate": 2.241841373052363e-05, "loss": 2.264840316772461, "memory(GiB)": 72.85, "step": 80090, "token_acc": 0.5299684542586751, "train_speed(iter/s)": 0.672458 }, { "epoch": 3.431515359239107, "grad_norm": 5.82450008392334, "learning_rate": 2.241280075885372e-05, "loss": 2.4341196060180663, "memory(GiB)": 72.85, "step": 80095, "token_acc": 0.483974358974359, "train_speed(iter/s)": 0.672447 }, { "epoch": 3.4317295745683563, "grad_norm": 7.386930465698242, "learning_rate": 2.240718828695182e-05, "loss": 2.1588932037353517, "memory(GiB)": 72.85, "step": 80100, "token_acc": 0.5894039735099338, "train_speed(iter/s)": 0.672455 }, { "epoch": 3.431943789897605, "grad_norm": 5.300327301025391, "learning_rate": 2.2401576314919586e-05, "loss": 2.1080305099487306, "memory(GiB)": 72.85, "step": 80105, "token_acc": 0.5028248587570622, "train_speed(iter/s)": 0.67245 }, { "epoch": 3.432158005226854, "grad_norm": 5.344230651855469, "learning_rate": 2.239596484285869e-05, "loss": 2.210310173034668, "memory(GiB)": 72.85, "step": 80110, "token_acc": 0.5616883116883117, "train_speed(iter/s)": 0.672444 }, { "epoch": 3.432372220556103, "grad_norm": 5.756407260894775, "learning_rate": 2.2390353870870785e-05, "loss": 2.1386062622070314, "memory(GiB)": 72.85, "step": 80115, "token_acc": 0.5160256410256411, "train_speed(iter/s)": 0.67245 }, { "epoch": 3.432586435885352, "grad_norm": 6.825236797332764, "learning_rate": 2.23847433990575e-05, "loss": 1.8960184097290038, "memory(GiB)": 72.85, "step": 80120, "token_acc": 0.5634920634920635, "train_speed(iter/s)": 0.672456 }, { "epoch": 3.4328006512146008, "grad_norm": 5.53325891494751, "learning_rate": 2.2379133427520514e-05, "loss": 2.3154130935668946, "memory(GiB)": 72.85, "step": 80125, "token_acc": 0.47230320699708456, "train_speed(iter/s)": 0.672467 }, { "epoch": 3.43301486654385, "grad_norm": 5.154168128967285, "learning_rate": 2.2373523956361436e-05, "loss": 1.9627262115478517, "memory(GiB)": 72.85, "step": 80130, "token_acc": 0.550185873605948, "train_speed(iter/s)": 0.672469 }, { "epoch": 3.433229081873099, "grad_norm": 4.636606216430664, "learning_rate": 2.2367914985681888e-05, "loss": 2.028879165649414, "memory(GiB)": 72.85, "step": 80135, "token_acc": 0.5633802816901409, "train_speed(iter/s)": 0.672466 }, { "epoch": 3.4334432972023476, "grad_norm": 5.175487518310547, "learning_rate": 2.236230651558348e-05, "loss": 2.285401725769043, "memory(GiB)": 72.85, "step": 80140, "token_acc": 0.541501976284585, "train_speed(iter/s)": 0.672468 }, { "epoch": 3.433657512531597, "grad_norm": 7.7666916847229, "learning_rate": 2.2356698546167814e-05, "loss": 2.207076644897461, "memory(GiB)": 72.85, "step": 80145, "token_acc": 0.5289256198347108, "train_speed(iter/s)": 0.672469 }, { "epoch": 3.4338717278608457, "grad_norm": 4.423090934753418, "learning_rate": 2.2351091077536467e-05, "loss": 2.2068460464477537, "memory(GiB)": 72.85, "step": 80150, "token_acc": 0.5228758169934641, "train_speed(iter/s)": 0.67248 }, { "epoch": 3.4340859431900945, "grad_norm": 6.398015975952148, "learning_rate": 2.2345484109791067e-05, "loss": 1.8514154434204102, "memory(GiB)": 72.85, "step": 80155, "token_acc": 0.57847533632287, "train_speed(iter/s)": 0.672484 }, { "epoch": 3.434300158519344, "grad_norm": 6.529756546020508, "learning_rate": 2.2339877643033136e-05, "loss": 2.256519889831543, "memory(GiB)": 72.85, "step": 80160, "token_acc": 0.5179282868525896, "train_speed(iter/s)": 0.672475 }, { "epoch": 3.4345143738485926, "grad_norm": 5.9537482261657715, "learning_rate": 2.2334271677364294e-05, "loss": 2.3536046981811523, "memory(GiB)": 72.85, "step": 80165, "token_acc": 0.5034246575342466, "train_speed(iter/s)": 0.672478 }, { "epoch": 3.4347285891778414, "grad_norm": 6.628918170928955, "learning_rate": 2.2328666212886067e-05, "loss": 2.3116836547851562, "memory(GiB)": 72.85, "step": 80170, "token_acc": 0.521311475409836, "train_speed(iter/s)": 0.67248 }, { "epoch": 3.4349428045070907, "grad_norm": 5.667978286743164, "learning_rate": 2.2323061249700017e-05, "loss": 2.1283924102783205, "memory(GiB)": 72.85, "step": 80175, "token_acc": 0.5324675324675324, "train_speed(iter/s)": 0.672475 }, { "epoch": 3.4351570198363395, "grad_norm": 5.531149387359619, "learning_rate": 2.2317456787907653e-05, "loss": 2.0830841064453125, "memory(GiB)": 72.85, "step": 80180, "token_acc": 0.5067567567567568, "train_speed(iter/s)": 0.672483 }, { "epoch": 3.4353712351655883, "grad_norm": 6.299276351928711, "learning_rate": 2.2311852827610547e-05, "loss": 2.353516960144043, "memory(GiB)": 72.85, "step": 80185, "token_acc": 0.5178571428571429, "train_speed(iter/s)": 0.672491 }, { "epoch": 3.4355854504948375, "grad_norm": 5.841764450073242, "learning_rate": 2.2306249368910198e-05, "loss": 1.8503280639648438, "memory(GiB)": 72.85, "step": 80190, "token_acc": 0.545045045045045, "train_speed(iter/s)": 0.672478 }, { "epoch": 3.4357996658240864, "grad_norm": 5.141180038452148, "learning_rate": 2.2300646411908128e-05, "loss": 2.2713911056518556, "memory(GiB)": 72.85, "step": 80195, "token_acc": 0.534375, "train_speed(iter/s)": 0.672479 }, { "epoch": 3.436013881153335, "grad_norm": 6.051193714141846, "learning_rate": 2.2295043956705825e-05, "loss": 2.0163480758666994, "memory(GiB)": 72.85, "step": 80200, "token_acc": 0.543918918918919, "train_speed(iter/s)": 0.672488 }, { "epoch": 3.4362280964825844, "grad_norm": 8.214212417602539, "learning_rate": 2.228944200340477e-05, "loss": 1.9793811798095704, "memory(GiB)": 72.85, "step": 80205, "token_acc": 0.5090252707581228, "train_speed(iter/s)": 0.672492 }, { "epoch": 3.4364423118118332, "grad_norm": 6.557714462280273, "learning_rate": 2.2283840552106495e-05, "loss": 2.0764738082885743, "memory(GiB)": 72.85, "step": 80210, "token_acc": 0.5228215767634855, "train_speed(iter/s)": 0.67249 }, { "epoch": 3.436656527141082, "grad_norm": 4.946774482727051, "learning_rate": 2.2278239602912437e-05, "loss": 2.0061397552490234, "memory(GiB)": 72.85, "step": 80215, "token_acc": 0.5623003194888179, "train_speed(iter/s)": 0.672485 }, { "epoch": 3.4368707424703313, "grad_norm": 7.4695000648498535, "learning_rate": 2.2272639155924086e-05, "loss": 1.9578495025634766, "memory(GiB)": 72.85, "step": 80220, "token_acc": 0.5909090909090909, "train_speed(iter/s)": 0.67249 }, { "epoch": 3.43708495779958, "grad_norm": 4.868207931518555, "learning_rate": 2.226703921124288e-05, "loss": 2.1621543884277346, "memory(GiB)": 72.85, "step": 80225, "token_acc": 0.535483870967742, "train_speed(iter/s)": 0.672496 }, { "epoch": 3.437299173128829, "grad_norm": 6.762542247772217, "learning_rate": 2.2261439768970272e-05, "loss": 2.356558990478516, "memory(GiB)": 72.85, "step": 80230, "token_acc": 0.5134099616858238, "train_speed(iter/s)": 0.672505 }, { "epoch": 3.437513388458078, "grad_norm": 6.359561920166016, "learning_rate": 2.2255840829207692e-05, "loss": 1.9878780364990234, "memory(GiB)": 72.85, "step": 80235, "token_acc": 0.5362776025236593, "train_speed(iter/s)": 0.672508 }, { "epoch": 3.437727603787327, "grad_norm": 5.3974504470825195, "learning_rate": 2.2250242392056576e-05, "loss": 2.206667900085449, "memory(GiB)": 72.85, "step": 80240, "token_acc": 0.5174825174825175, "train_speed(iter/s)": 0.67252 }, { "epoch": 3.437941819116576, "grad_norm": 7.449629306793213, "learning_rate": 2.224464445761837e-05, "loss": 2.2129573822021484, "memory(GiB)": 72.85, "step": 80245, "token_acc": 0.5125448028673835, "train_speed(iter/s)": 0.672527 }, { "epoch": 3.438156034445825, "grad_norm": 5.750715732574463, "learning_rate": 2.2239047025994462e-05, "loss": 2.386913299560547, "memory(GiB)": 72.85, "step": 80250, "token_acc": 0.5335689045936396, "train_speed(iter/s)": 0.672519 }, { "epoch": 3.438370249775074, "grad_norm": 5.071930408477783, "learning_rate": 2.2233450097286267e-05, "loss": 2.1261810302734374, "memory(GiB)": 72.85, "step": 80255, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.67253 }, { "epoch": 3.4385844651043227, "grad_norm": 4.720595836639404, "learning_rate": 2.2227853671595167e-05, "loss": 2.1008445739746096, "memory(GiB)": 72.85, "step": 80260, "token_acc": 0.5241730279898219, "train_speed(iter/s)": 0.672538 }, { "epoch": 3.438798680433572, "grad_norm": 5.948912143707275, "learning_rate": 2.2222257749022536e-05, "loss": 2.070079803466797, "memory(GiB)": 72.85, "step": 80265, "token_acc": 0.559322033898305, "train_speed(iter/s)": 0.672541 }, { "epoch": 3.4390128957628208, "grad_norm": 5.07833194732666, "learning_rate": 2.2216662329669773e-05, "loss": 2.0181062698364256, "memory(GiB)": 72.85, "step": 80270, "token_acc": 0.546583850931677, "train_speed(iter/s)": 0.672536 }, { "epoch": 3.4392271110920696, "grad_norm": 6.623505592346191, "learning_rate": 2.221106741363824e-05, "loss": 2.035460090637207, "memory(GiB)": 72.85, "step": 80275, "token_acc": 0.5284280936454849, "train_speed(iter/s)": 0.672538 }, { "epoch": 3.439441326421319, "grad_norm": 5.225928783416748, "learning_rate": 2.2205473001029293e-05, "loss": 2.286569595336914, "memory(GiB)": 72.85, "step": 80280, "token_acc": 0.5230263157894737, "train_speed(iter/s)": 0.672542 }, { "epoch": 3.4396555417505676, "grad_norm": 5.54355001449585, "learning_rate": 2.219987909194427e-05, "loss": 2.158432960510254, "memory(GiB)": 72.85, "step": 80285, "token_acc": 0.5346534653465347, "train_speed(iter/s)": 0.67255 }, { "epoch": 3.4398697570798165, "grad_norm": 6.418878078460693, "learning_rate": 2.2194285686484517e-05, "loss": 1.9721805572509765, "memory(GiB)": 72.85, "step": 80290, "token_acc": 0.5547703180212014, "train_speed(iter/s)": 0.672563 }, { "epoch": 3.4400839724090657, "grad_norm": 6.198085784912109, "learning_rate": 2.2188692784751348e-05, "loss": 2.2765840530395507, "memory(GiB)": 72.85, "step": 80295, "token_acc": 0.4883720930232558, "train_speed(iter/s)": 0.672563 }, { "epoch": 3.4402981877383145, "grad_norm": 4.818479537963867, "learning_rate": 2.218310038684611e-05, "loss": 2.399139404296875, "memory(GiB)": 72.85, "step": 80300, "token_acc": 0.4882943143812709, "train_speed(iter/s)": 0.672553 }, { "epoch": 3.4405124030675633, "grad_norm": 6.0950026512146, "learning_rate": 2.217750849287011e-05, "loss": 2.0724483489990235, "memory(GiB)": 72.85, "step": 80305, "token_acc": 0.5427350427350427, "train_speed(iter/s)": 0.672557 }, { "epoch": 3.4407266183968126, "grad_norm": 5.838136196136475, "learning_rate": 2.2171917102924617e-05, "loss": 2.206013870239258, "memory(GiB)": 72.85, "step": 80310, "token_acc": 0.5496894409937888, "train_speed(iter/s)": 0.672559 }, { "epoch": 3.4409408337260614, "grad_norm": 6.307716369628906, "learning_rate": 2.2166326217110973e-05, "loss": 2.284312438964844, "memory(GiB)": 72.85, "step": 80315, "token_acc": 0.4864864864864865, "train_speed(iter/s)": 0.672557 }, { "epoch": 3.4411550490553102, "grad_norm": 3.978238344192505, "learning_rate": 2.2160735835530445e-05, "loss": 2.155629348754883, "memory(GiB)": 72.85, "step": 80320, "token_acc": 0.5522875816993464, "train_speed(iter/s)": 0.672564 }, { "epoch": 3.4413692643845595, "grad_norm": 6.060308456420898, "learning_rate": 2.2155145958284274e-05, "loss": 2.1618953704833985, "memory(GiB)": 72.85, "step": 80325, "token_acc": 0.509641873278237, "train_speed(iter/s)": 0.672566 }, { "epoch": 3.4415834797138083, "grad_norm": 5.029531478881836, "learning_rate": 2.214955658547378e-05, "loss": 2.1040544509887695, "memory(GiB)": 72.85, "step": 80330, "token_acc": 0.5210355987055016, "train_speed(iter/s)": 0.672572 }, { "epoch": 3.441797695043057, "grad_norm": 5.240782260894775, "learning_rate": 2.214396771720019e-05, "loss": 1.7598331451416016, "memory(GiB)": 72.85, "step": 80335, "token_acc": 0.6, "train_speed(iter/s)": 0.67257 }, { "epoch": 3.4420119103723064, "grad_norm": 5.040289402008057, "learning_rate": 2.213837935356476e-05, "loss": 2.137989616394043, "memory(GiB)": 72.85, "step": 80340, "token_acc": 0.6047297297297297, "train_speed(iter/s)": 0.672565 }, { "epoch": 3.442226125701555, "grad_norm": 6.452085018157959, "learning_rate": 2.2132791494668727e-05, "loss": 2.2619955062866213, "memory(GiB)": 72.85, "step": 80345, "token_acc": 0.5166666666666667, "train_speed(iter/s)": 0.672559 }, { "epoch": 3.442440341030804, "grad_norm": 4.542790412902832, "learning_rate": 2.2127204140613288e-05, "loss": 2.067668342590332, "memory(GiB)": 72.85, "step": 80350, "token_acc": 0.5874125874125874, "train_speed(iter/s)": 0.672549 }, { "epoch": 3.4426545563600532, "grad_norm": 6.390249252319336, "learning_rate": 2.212161729149972e-05, "loss": 2.313976287841797, "memory(GiB)": 72.85, "step": 80355, "token_acc": 0.5100671140939598, "train_speed(iter/s)": 0.672564 }, { "epoch": 3.442868771689302, "grad_norm": 8.552563667297363, "learning_rate": 2.2116030947429206e-05, "loss": 2.080534744262695, "memory(GiB)": 72.85, "step": 80360, "token_acc": 0.528957528957529, "train_speed(iter/s)": 0.672576 }, { "epoch": 3.443082987018551, "grad_norm": 6.2521653175354, "learning_rate": 2.2110445108502946e-05, "loss": 1.9052209854125977, "memory(GiB)": 72.85, "step": 80365, "token_acc": 0.590443686006826, "train_speed(iter/s)": 0.67258 }, { "epoch": 3.4432972023478, "grad_norm": 5.171764373779297, "learning_rate": 2.210485977482214e-05, "loss": 2.1837158203125, "memory(GiB)": 72.85, "step": 80370, "token_acc": 0.5064516129032258, "train_speed(iter/s)": 0.672581 }, { "epoch": 3.443511417677049, "grad_norm": 8.002726554870605, "learning_rate": 2.2099274946487964e-05, "loss": 2.160869598388672, "memory(GiB)": 72.85, "step": 80375, "token_acc": 0.5224913494809689, "train_speed(iter/s)": 0.672578 }, { "epoch": 3.4437256330062977, "grad_norm": 5.730252265930176, "learning_rate": 2.2093690623601577e-05, "loss": 2.2217990875244142, "memory(GiB)": 72.85, "step": 80380, "token_acc": 0.48466257668711654, "train_speed(iter/s)": 0.672579 }, { "epoch": 3.443939848335547, "grad_norm": 5.3523712158203125, "learning_rate": 2.208810680626416e-05, "loss": 2.152362251281738, "memory(GiB)": 72.85, "step": 80385, "token_acc": 0.5215827338129496, "train_speed(iter/s)": 0.672584 }, { "epoch": 3.444154063664796, "grad_norm": 5.932448863983154, "learning_rate": 2.208252349457689e-05, "loss": 1.7570867538452148, "memory(GiB)": 72.85, "step": 80390, "token_acc": 0.594306049822064, "train_speed(iter/s)": 0.672587 }, { "epoch": 3.4443682789940446, "grad_norm": 7.468530178070068, "learning_rate": 2.20769406886409e-05, "loss": 2.3264511108398436, "memory(GiB)": 72.85, "step": 80395, "token_acc": 0.5350553505535055, "train_speed(iter/s)": 0.672587 }, { "epoch": 3.444582494323294, "grad_norm": 6.050468444824219, "learning_rate": 2.2071358388557322e-05, "loss": 2.400196838378906, "memory(GiB)": 72.85, "step": 80400, "token_acc": 0.5149700598802395, "train_speed(iter/s)": 0.672595 }, { "epoch": 3.4447967096525427, "grad_norm": 5.0278472900390625, "learning_rate": 2.2065776594427284e-05, "loss": 2.0666990280151367, "memory(GiB)": 72.85, "step": 80405, "token_acc": 0.5588235294117647, "train_speed(iter/s)": 0.672602 }, { "epoch": 3.4450109249817915, "grad_norm": 6.269662857055664, "learning_rate": 2.2060195306351894e-05, "loss": 1.8571401596069337, "memory(GiB)": 72.85, "step": 80410, "token_acc": 0.576271186440678, "train_speed(iter/s)": 0.672605 }, { "epoch": 3.4452251403110408, "grad_norm": 5.921087741851807, "learning_rate": 2.2054614524432286e-05, "loss": 2.085318756103516, "memory(GiB)": 72.85, "step": 80415, "token_acc": 0.51171875, "train_speed(iter/s)": 0.67261 }, { "epoch": 3.4454393556402896, "grad_norm": 5.889559745788574, "learning_rate": 2.2049034248769556e-05, "loss": 2.151680755615234, "memory(GiB)": 72.85, "step": 80420, "token_acc": 0.5274725274725275, "train_speed(iter/s)": 0.672618 }, { "epoch": 3.4456535709695384, "grad_norm": 5.181361675262451, "learning_rate": 2.2043454479464793e-05, "loss": 2.1987350463867186, "memory(GiB)": 72.85, "step": 80425, "token_acc": 0.5076335877862596, "train_speed(iter/s)": 0.672624 }, { "epoch": 3.4458677862987876, "grad_norm": 5.600039482116699, "learning_rate": 2.2037875216619074e-05, "loss": 2.0640283584594727, "memory(GiB)": 72.85, "step": 80430, "token_acc": 0.5732484076433121, "train_speed(iter/s)": 0.672623 }, { "epoch": 3.4460820016280365, "grad_norm": 6.904242515563965, "learning_rate": 2.2032296460333474e-05, "loss": 2.0928970336914063, "memory(GiB)": 72.85, "step": 80435, "token_acc": 0.532871972318339, "train_speed(iter/s)": 0.672625 }, { "epoch": 3.4462962169572853, "grad_norm": 5.358855247497559, "learning_rate": 2.2026718210709047e-05, "loss": 2.2271629333496095, "memory(GiB)": 72.85, "step": 80440, "token_acc": 0.5166051660516605, "train_speed(iter/s)": 0.672639 }, { "epoch": 3.4465104322865345, "grad_norm": 5.941921234130859, "learning_rate": 2.2021140467846872e-05, "loss": 1.8892717361450195, "memory(GiB)": 72.85, "step": 80445, "token_acc": 0.5579399141630901, "train_speed(iter/s)": 0.672645 }, { "epoch": 3.4467246476157833, "grad_norm": 4.916507244110107, "learning_rate": 2.201556323184799e-05, "loss": 2.1700294494628904, "memory(GiB)": 72.85, "step": 80450, "token_acc": 0.5584415584415584, "train_speed(iter/s)": 0.672642 }, { "epoch": 3.446938862945032, "grad_norm": 6.610027313232422, "learning_rate": 2.2009986502813407e-05, "loss": 2.2316747665405274, "memory(GiB)": 72.85, "step": 80455, "token_acc": 0.501779359430605, "train_speed(iter/s)": 0.672647 }, { "epoch": 3.4471530782742814, "grad_norm": 5.926755428314209, "learning_rate": 2.2004410280844202e-05, "loss": 1.982895851135254, "memory(GiB)": 72.85, "step": 80460, "token_acc": 0.5743944636678201, "train_speed(iter/s)": 0.672647 }, { "epoch": 3.4473672936035302, "grad_norm": 5.903873443603516, "learning_rate": 2.199883456604136e-05, "loss": 1.9925411224365235, "memory(GiB)": 72.85, "step": 80465, "token_acc": 0.5559440559440559, "train_speed(iter/s)": 0.672654 }, { "epoch": 3.447581508932779, "grad_norm": 5.647480487823486, "learning_rate": 2.199325935850588e-05, "loss": 2.0029979705810548, "memory(GiB)": 72.85, "step": 80470, "token_acc": 0.5269230769230769, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.4477957242620283, "grad_norm": 5.007395267486572, "learning_rate": 2.1987684658338797e-05, "loss": 2.190943717956543, "memory(GiB)": 72.85, "step": 80475, "token_acc": 0.5258620689655172, "train_speed(iter/s)": 0.672653 }, { "epoch": 3.448009939591277, "grad_norm": 9.3836030960083, "learning_rate": 2.1982110465641087e-05, "loss": 2.12692813873291, "memory(GiB)": 72.85, "step": 80480, "token_acc": 0.5244755244755245, "train_speed(iter/s)": 0.672662 }, { "epoch": 3.448224154920526, "grad_norm": 4.344038486480713, "learning_rate": 2.197653678051373e-05, "loss": 2.189763069152832, "memory(GiB)": 72.85, "step": 80485, "token_acc": 0.5082417582417582, "train_speed(iter/s)": 0.672661 }, { "epoch": 3.448438370249775, "grad_norm": 5.5037055015563965, "learning_rate": 2.1970963603057693e-05, "loss": 2.0609663009643553, "memory(GiB)": 72.85, "step": 80490, "token_acc": 0.5369127516778524, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.448652585579024, "grad_norm": 6.156872749328613, "learning_rate": 2.1965390933373923e-05, "loss": 1.7958145141601562, "memory(GiB)": 72.85, "step": 80495, "token_acc": 0.569620253164557, "train_speed(iter/s)": 0.672653 }, { "epoch": 3.448866800908273, "grad_norm": 6.189816951751709, "learning_rate": 2.1959818771563418e-05, "loss": 2.1328859329223633, "memory(GiB)": 72.85, "step": 80500, "token_acc": 0.5013927576601671, "train_speed(iter/s)": 0.672648 }, { "epoch": 3.448866800908273, "eval_loss": 1.8604480028152466, "eval_runtime": 15.5725, "eval_samples_per_second": 6.422, "eval_steps_per_second": 6.422, "eval_token_acc": 0.5402635431918009, "step": 80500 }, { "epoch": 3.449081016237522, "grad_norm": 6.0969109535217285, "learning_rate": 2.19542471177271e-05, "loss": 1.9833997726440429, "memory(GiB)": 72.85, "step": 80505, "token_acc": 0.5450733752620545, "train_speed(iter/s)": 0.672551 }, { "epoch": 3.449295231566771, "grad_norm": 4.738961696624756, "learning_rate": 2.1948675971965903e-05, "loss": 1.9233165740966798, "memory(GiB)": 72.85, "step": 80510, "token_acc": 0.573170731707317, "train_speed(iter/s)": 0.672546 }, { "epoch": 3.4495094468960197, "grad_norm": 5.322431564331055, "learning_rate": 2.1943105334380754e-05, "loss": 2.0413068771362304, "memory(GiB)": 72.85, "step": 80515, "token_acc": 0.5656934306569343, "train_speed(iter/s)": 0.672555 }, { "epoch": 3.449723662225269, "grad_norm": 5.079336166381836, "learning_rate": 2.1937535205072572e-05, "loss": 1.946738052368164, "memory(GiB)": 72.85, "step": 80520, "token_acc": 0.5302491103202847, "train_speed(iter/s)": 0.672557 }, { "epoch": 3.4499378775545178, "grad_norm": 7.167060852050781, "learning_rate": 2.193196558414225e-05, "loss": 2.333154296875, "memory(GiB)": 72.85, "step": 80525, "token_acc": 0.5069444444444444, "train_speed(iter/s)": 0.672563 }, { "epoch": 3.4501520928837666, "grad_norm": 5.503314971923828, "learning_rate": 2.1926396471690696e-05, "loss": 2.31933650970459, "memory(GiB)": 72.85, "step": 80530, "token_acc": 0.5118110236220472, "train_speed(iter/s)": 0.672577 }, { "epoch": 3.450366308213016, "grad_norm": 6.5586628913879395, "learning_rate": 2.1920827867818827e-05, "loss": 2.175187683105469, "memory(GiB)": 72.85, "step": 80535, "token_acc": 0.547945205479452, "train_speed(iter/s)": 0.672574 }, { "epoch": 3.4505805235422646, "grad_norm": 6.149832248687744, "learning_rate": 2.19152597726275e-05, "loss": 2.050531005859375, "memory(GiB)": 72.85, "step": 80540, "token_acc": 0.5368098159509203, "train_speed(iter/s)": 0.672584 }, { "epoch": 3.4507947388715134, "grad_norm": 6.4046430587768555, "learning_rate": 2.1909692186217584e-05, "loss": 2.234170913696289, "memory(GiB)": 72.85, "step": 80545, "token_acc": 0.5058365758754864, "train_speed(iter/s)": 0.672591 }, { "epoch": 3.4510089542007627, "grad_norm": 6.049537658691406, "learning_rate": 2.1904125108689947e-05, "loss": 2.1434209823608397, "memory(GiB)": 72.85, "step": 80550, "token_acc": 0.5162337662337663, "train_speed(iter/s)": 0.672601 }, { "epoch": 3.4512231695300115, "grad_norm": 5.423120975494385, "learning_rate": 2.1898558540145425e-05, "loss": 2.1798337936401366, "memory(GiB)": 72.85, "step": 80555, "token_acc": 0.5138339920948617, "train_speed(iter/s)": 0.672601 }, { "epoch": 3.4514373848592603, "grad_norm": 7.395324230194092, "learning_rate": 2.1892992480684892e-05, "loss": 2.119317054748535, "memory(GiB)": 72.85, "step": 80560, "token_acc": 0.5173501577287066, "train_speed(iter/s)": 0.672609 }, { "epoch": 3.4516516001885096, "grad_norm": 5.120800495147705, "learning_rate": 2.1887426930409173e-05, "loss": 1.8608184814453126, "memory(GiB)": 72.85, "step": 80565, "token_acc": 0.5574468085106383, "train_speed(iter/s)": 0.672612 }, { "epoch": 3.4518658155177584, "grad_norm": 4.762556076049805, "learning_rate": 2.188186188941908e-05, "loss": 2.041020965576172, "memory(GiB)": 72.85, "step": 80570, "token_acc": 0.564625850340136, "train_speed(iter/s)": 0.672605 }, { "epoch": 3.452080030847007, "grad_norm": 4.590350151062012, "learning_rate": 2.187629735781544e-05, "loss": 2.260855293273926, "memory(GiB)": 72.85, "step": 80575, "token_acc": 0.46, "train_speed(iter/s)": 0.672601 }, { "epoch": 3.4522942461762565, "grad_norm": 5.562370300292969, "learning_rate": 2.187073333569905e-05, "loss": 2.0597124099731445, "memory(GiB)": 72.85, "step": 80580, "token_acc": 0.5536912751677853, "train_speed(iter/s)": 0.672609 }, { "epoch": 3.4525084615055053, "grad_norm": 6.324398994445801, "learning_rate": 2.1865169823170695e-05, "loss": 2.0860157012939453, "memory(GiB)": 72.85, "step": 80585, "token_acc": 0.5699300699300699, "train_speed(iter/s)": 0.672611 }, { "epoch": 3.452722676834754, "grad_norm": 6.851224422454834, "learning_rate": 2.1859606820331202e-05, "loss": 2.155268669128418, "memory(GiB)": 72.85, "step": 80590, "token_acc": 0.5434782608695652, "train_speed(iter/s)": 0.672618 }, { "epoch": 3.4529368921640033, "grad_norm": 6.192825794219971, "learning_rate": 2.185404432728133e-05, "loss": 2.1155847549438476, "memory(GiB)": 72.85, "step": 80595, "token_acc": 0.5572289156626506, "train_speed(iter/s)": 0.672619 }, { "epoch": 3.453151107493252, "grad_norm": 4.855914115905762, "learning_rate": 2.184848234412185e-05, "loss": 1.9051294326782227, "memory(GiB)": 72.85, "step": 80600, "token_acc": 0.5845588235294118, "train_speed(iter/s)": 0.67263 }, { "epoch": 3.453365322822501, "grad_norm": 7.056889533996582, "learning_rate": 2.18429208709535e-05, "loss": 1.8910207748413086, "memory(GiB)": 72.85, "step": 80605, "token_acc": 0.5730337078651685, "train_speed(iter/s)": 0.672632 }, { "epoch": 3.4535795381517502, "grad_norm": 8.61642074584961, "learning_rate": 2.183735990787707e-05, "loss": 2.102496528625488, "memory(GiB)": 72.85, "step": 80610, "token_acc": 0.5212355212355212, "train_speed(iter/s)": 0.67264 }, { "epoch": 3.453793753480999, "grad_norm": 7.261716842651367, "learning_rate": 2.1831799454993262e-05, "loss": 2.3071884155273437, "memory(GiB)": 72.85, "step": 80615, "token_acc": 0.5035460992907801, "train_speed(iter/s)": 0.672647 }, { "epoch": 3.454007968810248, "grad_norm": 5.703823089599609, "learning_rate": 2.182623951240286e-05, "loss": 1.747460174560547, "memory(GiB)": 72.85, "step": 80620, "token_acc": 0.5953307392996109, "train_speed(iter/s)": 0.672637 }, { "epoch": 3.454222184139497, "grad_norm": 8.10443115234375, "learning_rate": 2.1820680080206547e-05, "loss": 2.2817493438720704, "memory(GiB)": 72.85, "step": 80625, "token_acc": 0.5251798561151079, "train_speed(iter/s)": 0.672636 }, { "epoch": 3.454436399468746, "grad_norm": 7.058004856109619, "learning_rate": 2.1815121158505053e-05, "loss": 2.096732330322266, "memory(GiB)": 72.85, "step": 80630, "token_acc": 0.5132075471698113, "train_speed(iter/s)": 0.672635 }, { "epoch": 3.4546506147979947, "grad_norm": 3.9435222148895264, "learning_rate": 2.1809562747399075e-05, "loss": 1.9264760971069337, "memory(GiB)": 72.85, "step": 80635, "token_acc": 0.569023569023569, "train_speed(iter/s)": 0.672641 }, { "epoch": 3.454864830127244, "grad_norm": 5.557364463806152, "learning_rate": 2.1804004846989302e-05, "loss": 2.0199935913085936, "memory(GiB)": 72.85, "step": 80640, "token_acc": 0.5521885521885522, "train_speed(iter/s)": 0.672644 }, { "epoch": 3.455079045456493, "grad_norm": 6.508354187011719, "learning_rate": 2.1798447457376443e-05, "loss": 2.284646224975586, "memory(GiB)": 72.85, "step": 80645, "token_acc": 0.4954682779456193, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.4552932607857416, "grad_norm": 5.4070539474487305, "learning_rate": 2.1792890578661168e-05, "loss": 2.250265884399414, "memory(GiB)": 72.85, "step": 80650, "token_acc": 0.5372168284789643, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.455507476114991, "grad_norm": 9.66545581817627, "learning_rate": 2.1787334210944137e-05, "loss": 2.2993896484375, "memory(GiB)": 72.85, "step": 80655, "token_acc": 0.5031055900621118, "train_speed(iter/s)": 0.672648 }, { "epoch": 3.4557216914442397, "grad_norm": 6.089763641357422, "learning_rate": 2.1781778354326017e-05, "loss": 1.891777801513672, "memory(GiB)": 72.85, "step": 80660, "token_acc": 0.5530546623794212, "train_speed(iter/s)": 0.672655 }, { "epoch": 3.4559359067734885, "grad_norm": 4.99012565612793, "learning_rate": 2.177622300890745e-05, "loss": 1.9879093170166016, "memory(GiB)": 72.85, "step": 80665, "token_acc": 0.5534351145038168, "train_speed(iter/s)": 0.67265 }, { "epoch": 3.4561501221027378, "grad_norm": 6.40512752532959, "learning_rate": 2.1770668174789054e-05, "loss": 2.101141357421875, "memory(GiB)": 72.85, "step": 80670, "token_acc": 0.5238095238095238, "train_speed(iter/s)": 0.672657 }, { "epoch": 3.4563643374319866, "grad_norm": 5.878130912780762, "learning_rate": 2.1765113852071494e-05, "loss": 1.883772087097168, "memory(GiB)": 72.85, "step": 80675, "token_acc": 0.6056338028169014, "train_speed(iter/s)": 0.672663 }, { "epoch": 3.4565785527612354, "grad_norm": 5.2069244384765625, "learning_rate": 2.17595600408554e-05, "loss": 1.7423484802246094, "memory(GiB)": 72.85, "step": 80680, "token_acc": 0.6138996138996139, "train_speed(iter/s)": 0.672669 }, { "epoch": 3.4567927680904846, "grad_norm": 4.723913669586182, "learning_rate": 2.1754006741241366e-05, "loss": 1.8836647033691407, "memory(GiB)": 72.85, "step": 80685, "token_acc": 0.5444444444444444, "train_speed(iter/s)": 0.672665 }, { "epoch": 3.4570069834197334, "grad_norm": 5.2788848876953125, "learning_rate": 2.174845395333e-05, "loss": 2.3429946899414062, "memory(GiB)": 72.85, "step": 80690, "token_acc": 0.4980544747081712, "train_speed(iter/s)": 0.672671 }, { "epoch": 3.4572211987489823, "grad_norm": 4.924185752868652, "learning_rate": 2.1742901677221887e-05, "loss": 1.9641851425170898, "memory(GiB)": 72.85, "step": 80695, "token_acc": 0.6016597510373444, "train_speed(iter/s)": 0.67268 }, { "epoch": 3.4574354140782315, "grad_norm": 6.358138561248779, "learning_rate": 2.1737349913017603e-05, "loss": 1.9139801025390626, "memory(GiB)": 72.85, "step": 80700, "token_acc": 0.5547445255474452, "train_speed(iter/s)": 0.672679 }, { "epoch": 3.4576496294074803, "grad_norm": 6.113473892211914, "learning_rate": 2.1731798660817753e-05, "loss": 2.1885055541992187, "memory(GiB)": 72.85, "step": 80705, "token_acc": 0.5189393939393939, "train_speed(iter/s)": 0.672683 }, { "epoch": 3.457863844736729, "grad_norm": 5.725899696350098, "learning_rate": 2.1726247920722893e-05, "loss": 1.9140296936035157, "memory(GiB)": 72.85, "step": 80710, "token_acc": 0.5714285714285714, "train_speed(iter/s)": 0.672684 }, { "epoch": 3.4580780600659784, "grad_norm": 5.154545307159424, "learning_rate": 2.1720697692833576e-05, "loss": 1.8065235137939453, "memory(GiB)": 72.85, "step": 80715, "token_acc": 0.5967078189300411, "train_speed(iter/s)": 0.672699 }, { "epoch": 3.458292275395227, "grad_norm": 5.099823474884033, "learning_rate": 2.1715147977250345e-05, "loss": 2.2906330108642576, "memory(GiB)": 72.85, "step": 80720, "token_acc": 0.47959183673469385, "train_speed(iter/s)": 0.672699 }, { "epoch": 3.458506490724476, "grad_norm": 4.973609924316406, "learning_rate": 2.1709598774073735e-05, "loss": 2.0272199630737306, "memory(GiB)": 72.85, "step": 80725, "token_acc": 0.5616883116883117, "train_speed(iter/s)": 0.672701 }, { "epoch": 3.4587207060537253, "grad_norm": 4.461769104003906, "learning_rate": 2.170405008340427e-05, "loss": 2.0585443496704103, "memory(GiB)": 72.85, "step": 80730, "token_acc": 0.5300751879699248, "train_speed(iter/s)": 0.672712 }, { "epoch": 3.458934921382974, "grad_norm": 7.001522541046143, "learning_rate": 2.16985019053425e-05, "loss": 2.1855226516723634, "memory(GiB)": 72.85, "step": 80735, "token_acc": 0.5190311418685121, "train_speed(iter/s)": 0.672707 }, { "epoch": 3.459149136712223, "grad_norm": 5.991788387298584, "learning_rate": 2.169295423998892e-05, "loss": 1.9173238754272461, "memory(GiB)": 72.85, "step": 80740, "token_acc": 0.5551470588235294, "train_speed(iter/s)": 0.672708 }, { "epoch": 3.459363352041472, "grad_norm": 7.210577011108398, "learning_rate": 2.1687407087444023e-05, "loss": 2.0587621688842774, "memory(GiB)": 72.85, "step": 80745, "token_acc": 0.5532544378698225, "train_speed(iter/s)": 0.672711 }, { "epoch": 3.459577567370721, "grad_norm": 6.435888290405273, "learning_rate": 2.1681860447808288e-05, "loss": 2.3561370849609373, "memory(GiB)": 72.85, "step": 80750, "token_acc": 0.5034965034965035, "train_speed(iter/s)": 0.672712 }, { "epoch": 3.45979178269997, "grad_norm": 6.66936731338501, "learning_rate": 2.167631432118224e-05, "loss": 2.4940216064453127, "memory(GiB)": 72.85, "step": 80755, "token_acc": 0.5150602409638554, "train_speed(iter/s)": 0.672704 }, { "epoch": 3.460005998029219, "grad_norm": 7.52030611038208, "learning_rate": 2.1670768707666302e-05, "loss": 2.3033401489257814, "memory(GiB)": 72.85, "step": 80760, "token_acc": 0.5100286532951289, "train_speed(iter/s)": 0.672695 }, { "epoch": 3.460220213358468, "grad_norm": 5.306900501251221, "learning_rate": 2.1665223607360984e-05, "loss": 2.223972129821777, "memory(GiB)": 72.85, "step": 80765, "token_acc": 0.5073529411764706, "train_speed(iter/s)": 0.6727 }, { "epoch": 3.4604344286877167, "grad_norm": 7.023824691772461, "learning_rate": 2.1659679020366725e-05, "loss": 1.7670709609985351, "memory(GiB)": 72.85, "step": 80770, "token_acc": 0.5789473684210527, "train_speed(iter/s)": 0.672702 }, { "epoch": 3.460648644016966, "grad_norm": 5.723729610443115, "learning_rate": 2.165413494678396e-05, "loss": 2.2218679428100585, "memory(GiB)": 72.85, "step": 80775, "token_acc": 0.5559701492537313, "train_speed(iter/s)": 0.672719 }, { "epoch": 3.4608628593462147, "grad_norm": 5.799663066864014, "learning_rate": 2.1648591386713136e-05, "loss": 1.9462106704711915, "memory(GiB)": 72.85, "step": 80780, "token_acc": 0.5827338129496403, "train_speed(iter/s)": 0.672716 }, { "epoch": 3.4610770746754635, "grad_norm": 5.799746036529541, "learning_rate": 2.164304834025465e-05, "loss": 2.2081085205078126, "memory(GiB)": 72.85, "step": 80785, "token_acc": 0.4948805460750853, "train_speed(iter/s)": 0.672714 }, { "epoch": 3.461291290004713, "grad_norm": 7.483019828796387, "learning_rate": 2.1637505807508968e-05, "loss": 2.2519481658935545, "memory(GiB)": 72.85, "step": 80790, "token_acc": 0.5364431486880467, "train_speed(iter/s)": 0.672711 }, { "epoch": 3.4615055053339616, "grad_norm": 7.587349891662598, "learning_rate": 2.163196378857647e-05, "loss": 2.1796186447143553, "memory(GiB)": 72.85, "step": 80795, "token_acc": 0.5366568914956011, "train_speed(iter/s)": 0.672714 }, { "epoch": 3.4617197206632104, "grad_norm": 6.424136638641357, "learning_rate": 2.162642228355756e-05, "loss": 2.403270721435547, "memory(GiB)": 72.85, "step": 80800, "token_acc": 0.5316455696202531, "train_speed(iter/s)": 0.672723 }, { "epoch": 3.4619339359924597, "grad_norm": 4.091686248779297, "learning_rate": 2.1620881292552637e-05, "loss": 2.0335193634033204, "memory(GiB)": 72.85, "step": 80805, "token_acc": 0.5521885521885522, "train_speed(iter/s)": 0.672719 }, { "epoch": 3.4621481513217085, "grad_norm": 4.362661838531494, "learning_rate": 2.161534081566206e-05, "loss": 2.0631778717041014, "memory(GiB)": 72.85, "step": 80810, "token_acc": 0.5566343042071198, "train_speed(iter/s)": 0.672724 }, { "epoch": 3.4623623666509573, "grad_norm": 5.496510982513428, "learning_rate": 2.1609800852986194e-05, "loss": 2.3313188552856445, "memory(GiB)": 72.85, "step": 80815, "token_acc": 0.49244712990936557, "train_speed(iter/s)": 0.672729 }, { "epoch": 3.4625765819802066, "grad_norm": 6.393731594085693, "learning_rate": 2.1604261404625443e-05, "loss": 2.2978031158447267, "memory(GiB)": 72.85, "step": 80820, "token_acc": 0.5319148936170213, "train_speed(iter/s)": 0.672734 }, { "epoch": 3.4627907973094554, "grad_norm": 4.784221172332764, "learning_rate": 2.159872247068011e-05, "loss": 2.219330596923828, "memory(GiB)": 72.85, "step": 80825, "token_acc": 0.5202702702702703, "train_speed(iter/s)": 0.67274 }, { "epoch": 3.463005012638704, "grad_norm": 4.944983959197998, "learning_rate": 2.159318405125058e-05, "loss": 2.1654756546020506, "memory(GiB)": 72.85, "step": 80830, "token_acc": 0.5157232704402516, "train_speed(iter/s)": 0.672737 }, { "epoch": 3.4632192279679535, "grad_norm": 4.797879695892334, "learning_rate": 2.158764614643717e-05, "loss": 2.3802059173583983, "memory(GiB)": 72.85, "step": 80835, "token_acc": 0.5145985401459854, "train_speed(iter/s)": 0.672737 }, { "epoch": 3.4634334432972023, "grad_norm": 5.684353351593018, "learning_rate": 2.15821087563402e-05, "loss": 1.9952684402465821, "memory(GiB)": 72.85, "step": 80840, "token_acc": 0.5846153846153846, "train_speed(iter/s)": 0.672741 }, { "epoch": 3.463647658626451, "grad_norm": 4.7325215339660645, "learning_rate": 2.1576571881059975e-05, "loss": 2.3671602249145507, "memory(GiB)": 72.85, "step": 80845, "token_acc": 0.4983388704318937, "train_speed(iter/s)": 0.672745 }, { "epoch": 3.4638618739557003, "grad_norm": 6.247686386108398, "learning_rate": 2.157103552069683e-05, "loss": 2.028345489501953, "memory(GiB)": 72.85, "step": 80850, "token_acc": 0.5349544072948328, "train_speed(iter/s)": 0.672743 }, { "epoch": 3.464076089284949, "grad_norm": 6.8847198486328125, "learning_rate": 2.156549967535104e-05, "loss": 2.2517444610595705, "memory(GiB)": 72.85, "step": 80855, "token_acc": 0.5559105431309904, "train_speed(iter/s)": 0.672751 }, { "epoch": 3.464290304614198, "grad_norm": 3.958815097808838, "learning_rate": 2.155996434512291e-05, "loss": 1.9564201354980468, "memory(GiB)": 72.85, "step": 80860, "token_acc": 0.5952380952380952, "train_speed(iter/s)": 0.672755 }, { "epoch": 3.464504519943447, "grad_norm": 5.4731125831604, "learning_rate": 2.155442953011269e-05, "loss": 2.20200252532959, "memory(GiB)": 72.85, "step": 80865, "token_acc": 0.5546558704453441, "train_speed(iter/s)": 0.672751 }, { "epoch": 3.464718735272696, "grad_norm": 7.12040901184082, "learning_rate": 2.1548895230420672e-05, "loss": 2.0684146881103516, "memory(GiB)": 72.85, "step": 80870, "token_acc": 0.5487012987012987, "train_speed(iter/s)": 0.67274 }, { "epoch": 3.464932950601945, "grad_norm": 5.669070720672607, "learning_rate": 2.1543361446147088e-05, "loss": 2.1265520095825194, "memory(GiB)": 72.85, "step": 80875, "token_acc": 0.54421768707483, "train_speed(iter/s)": 0.672729 }, { "epoch": 3.465147165931194, "grad_norm": 5.555521011352539, "learning_rate": 2.1537828177392222e-05, "loss": 2.0704227447509767, "memory(GiB)": 72.85, "step": 80880, "token_acc": 0.5612903225806452, "train_speed(iter/s)": 0.67272 }, { "epoch": 3.465361381260443, "grad_norm": 5.24281120300293, "learning_rate": 2.1532295424256293e-05, "loss": 1.9516792297363281, "memory(GiB)": 72.85, "step": 80885, "token_acc": 0.5436241610738255, "train_speed(iter/s)": 0.672729 }, { "epoch": 3.4655755965896917, "grad_norm": 6.7348785400390625, "learning_rate": 2.1526763186839545e-05, "loss": 2.119632339477539, "memory(GiB)": 72.85, "step": 80890, "token_acc": 0.5373134328358209, "train_speed(iter/s)": 0.672722 }, { "epoch": 3.465789811918941, "grad_norm": 5.709564685821533, "learning_rate": 2.1521231465242185e-05, "loss": 2.0737781524658203, "memory(GiB)": 72.85, "step": 80895, "token_acc": 0.5935483870967742, "train_speed(iter/s)": 0.672724 }, { "epoch": 3.46600402724819, "grad_norm": 6.636321067810059, "learning_rate": 2.151570025956442e-05, "loss": 2.2406131744384767, "memory(GiB)": 72.85, "step": 80900, "token_acc": 0.5, "train_speed(iter/s)": 0.672729 }, { "epoch": 3.4662182425774386, "grad_norm": 6.760307312011719, "learning_rate": 2.1510169569906462e-05, "loss": 2.4599180221557617, "memory(GiB)": 72.85, "step": 80905, "token_acc": 0.49466192170818507, "train_speed(iter/s)": 0.672743 }, { "epoch": 3.466432457906688, "grad_norm": 8.302261352539062, "learning_rate": 2.150463939636853e-05, "loss": 1.865506935119629, "memory(GiB)": 72.85, "step": 80910, "token_acc": 0.5774058577405857, "train_speed(iter/s)": 0.672751 }, { "epoch": 3.4666466732359367, "grad_norm": 7.296651363372803, "learning_rate": 2.1499109739050782e-05, "loss": 2.211490249633789, "memory(GiB)": 72.85, "step": 80915, "token_acc": 0.5160349854227405, "train_speed(iter/s)": 0.672736 }, { "epoch": 3.4668608885651855, "grad_norm": 5.4893646240234375, "learning_rate": 2.14935805980534e-05, "loss": 2.2189910888671873, "memory(GiB)": 72.85, "step": 80920, "token_acc": 0.49433962264150944, "train_speed(iter/s)": 0.672731 }, { "epoch": 3.4670751038944347, "grad_norm": 5.546732425689697, "learning_rate": 2.1488051973476546e-05, "loss": 2.3687070846557616, "memory(GiB)": 72.85, "step": 80925, "token_acc": 0.5143769968051118, "train_speed(iter/s)": 0.672738 }, { "epoch": 3.4672893192236836, "grad_norm": 7.331081867218018, "learning_rate": 2.1482523865420358e-05, "loss": 2.2224998474121094, "memory(GiB)": 72.85, "step": 80930, "token_acc": 0.5331325301204819, "train_speed(iter/s)": 0.672744 }, { "epoch": 3.4675035345529324, "grad_norm": 5.123290538787842, "learning_rate": 2.147699627398502e-05, "loss": 1.992372703552246, "memory(GiB)": 72.85, "step": 80935, "token_acc": 0.5523012552301255, "train_speed(iter/s)": 0.672729 }, { "epoch": 3.4677177498821816, "grad_norm": 5.160436630249023, "learning_rate": 2.1471469199270648e-05, "loss": 1.9537776947021483, "memory(GiB)": 72.85, "step": 80940, "token_acc": 0.5173501577287066, "train_speed(iter/s)": 0.67273 }, { "epoch": 3.4679319652114304, "grad_norm": 6.79822301864624, "learning_rate": 2.146594264137738e-05, "loss": 2.3434982299804688, "memory(GiB)": 72.85, "step": 80945, "token_acc": 0.5275080906148867, "train_speed(iter/s)": 0.672727 }, { "epoch": 3.4681461805406792, "grad_norm": 7.7275214195251465, "learning_rate": 2.1460416600405324e-05, "loss": 2.368941879272461, "memory(GiB)": 72.85, "step": 80950, "token_acc": 0.5087719298245614, "train_speed(iter/s)": 0.672731 }, { "epoch": 3.4683603958699285, "grad_norm": 6.097754001617432, "learning_rate": 2.14548910764546e-05, "loss": 2.148271942138672, "memory(GiB)": 72.85, "step": 80955, "token_acc": 0.5370370370370371, "train_speed(iter/s)": 0.672738 }, { "epoch": 3.4685746111991773, "grad_norm": 6.594888687133789, "learning_rate": 2.144936606962527e-05, "loss": 2.0763523101806642, "memory(GiB)": 72.85, "step": 80960, "token_acc": 0.5407608695652174, "train_speed(iter/s)": 0.672731 }, { "epoch": 3.468788826528426, "grad_norm": 9.245387077331543, "learning_rate": 2.1443841580017488e-05, "loss": 2.3054178237915037, "memory(GiB)": 72.85, "step": 80965, "token_acc": 0.511326860841424, "train_speed(iter/s)": 0.67272 }, { "epoch": 3.4690030418576754, "grad_norm": 4.670864582061768, "learning_rate": 2.1438317607731294e-05, "loss": 1.9065586090087892, "memory(GiB)": 72.85, "step": 80970, "token_acc": 0.599290780141844, "train_speed(iter/s)": 0.672718 }, { "epoch": 3.469217257186924, "grad_norm": 7.3023858070373535, "learning_rate": 2.143279415286676e-05, "loss": 2.082071304321289, "memory(GiB)": 72.85, "step": 80975, "token_acc": 0.5601503759398496, "train_speed(iter/s)": 0.672726 }, { "epoch": 3.469431472516173, "grad_norm": 6.894445896148682, "learning_rate": 2.142727121552397e-05, "loss": 2.2764925003051757, "memory(GiB)": 72.85, "step": 80980, "token_acc": 0.5076335877862596, "train_speed(iter/s)": 0.672725 }, { "epoch": 3.4696456878454223, "grad_norm": 5.2772603034973145, "learning_rate": 2.1421748795802965e-05, "loss": 2.0543832778930664, "memory(GiB)": 72.85, "step": 80985, "token_acc": 0.4912891986062718, "train_speed(iter/s)": 0.672725 }, { "epoch": 3.469859903174671, "grad_norm": 8.900675773620605, "learning_rate": 2.141622689380377e-05, "loss": 2.284353828430176, "memory(GiB)": 72.85, "step": 80990, "token_acc": 0.48375451263537905, "train_speed(iter/s)": 0.67273 }, { "epoch": 3.47007411850392, "grad_norm": 6.7931036949157715, "learning_rate": 2.141070550962646e-05, "loss": 1.9523849487304688, "memory(GiB)": 72.85, "step": 80995, "token_acc": 0.5382165605095541, "train_speed(iter/s)": 0.672737 }, { "epoch": 3.470288333833169, "grad_norm": 7.281875133514404, "learning_rate": 2.140518464337104e-05, "loss": 2.3539703369140623, "memory(GiB)": 72.85, "step": 81000, "token_acc": 0.5067114093959731, "train_speed(iter/s)": 0.672739 }, { "epoch": 3.470288333833169, "eval_loss": 2.0624191761016846, "eval_runtime": 15.0988, "eval_samples_per_second": 6.623, "eval_steps_per_second": 6.623, "eval_token_acc": 0.49867021276595747, "step": 81000 }, { "epoch": 3.470502549162418, "grad_norm": 4.989017486572266, "learning_rate": 2.1399664295137523e-05, "loss": 2.179148292541504, "memory(GiB)": 72.85, "step": 81005, "token_acc": 0.49479659413434246, "train_speed(iter/s)": 0.672634 }, { "epoch": 3.470716764491667, "grad_norm": 6.116218566894531, "learning_rate": 2.1394144465025916e-05, "loss": 1.8373210906982422, "memory(GiB)": 72.85, "step": 81010, "token_acc": 0.5574912891986062, "train_speed(iter/s)": 0.672634 }, { "epoch": 3.470930979820916, "grad_norm": 5.474175453186035, "learning_rate": 2.1388625153136217e-05, "loss": 2.028041458129883, "memory(GiB)": 72.85, "step": 81015, "token_acc": 0.5033112582781457, "train_speed(iter/s)": 0.672615 }, { "epoch": 3.471145195150165, "grad_norm": 5.786999225616455, "learning_rate": 2.1383106359568395e-05, "loss": 1.9947801589965821, "memory(GiB)": 72.85, "step": 81020, "token_acc": 0.5579399141630901, "train_speed(iter/s)": 0.672625 }, { "epoch": 3.471359410479414, "grad_norm": 7.200125217437744, "learning_rate": 2.1377588084422463e-05, "loss": 1.9253578186035156, "memory(GiB)": 72.85, "step": 81025, "token_acc": 0.5466237942122186, "train_speed(iter/s)": 0.672629 }, { "epoch": 3.471573625808663, "grad_norm": 4.946737766265869, "learning_rate": 2.1372070327798372e-05, "loss": 2.2213258743286133, "memory(GiB)": 72.85, "step": 81030, "token_acc": 0.5426829268292683, "train_speed(iter/s)": 0.672628 }, { "epoch": 3.4717878411379117, "grad_norm": 8.03543472290039, "learning_rate": 2.1366553089796088e-05, "loss": 2.373291778564453, "memory(GiB)": 72.85, "step": 81035, "token_acc": 0.44744744744744747, "train_speed(iter/s)": 0.672632 }, { "epoch": 3.472002056467161, "grad_norm": 4.988245010375977, "learning_rate": 2.1361036370515548e-05, "loss": 2.056166648864746, "memory(GiB)": 72.85, "step": 81040, "token_acc": 0.5270758122743683, "train_speed(iter/s)": 0.67264 }, { "epoch": 3.47221627179641, "grad_norm": 6.62160587310791, "learning_rate": 2.1355520170056688e-05, "loss": 1.8510627746582031, "memory(GiB)": 72.85, "step": 81045, "token_acc": 0.5932203389830508, "train_speed(iter/s)": 0.672639 }, { "epoch": 3.4724304871256586, "grad_norm": 4.611385345458984, "learning_rate": 2.1350004488519444e-05, "loss": 2.2191389083862303, "memory(GiB)": 72.85, "step": 81050, "token_acc": 0.5102040816326531, "train_speed(iter/s)": 0.672642 }, { "epoch": 3.472644702454908, "grad_norm": 5.037582874298096, "learning_rate": 2.1344489326003762e-05, "loss": 2.2408302307128904, "memory(GiB)": 72.85, "step": 81055, "token_acc": 0.47719298245614034, "train_speed(iter/s)": 0.672637 }, { "epoch": 3.4728589177841567, "grad_norm": 5.884340286254883, "learning_rate": 2.1338974682609543e-05, "loss": 2.0527603149414064, "memory(GiB)": 72.85, "step": 81060, "token_acc": 0.5451713395638629, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.4730731331134055, "grad_norm": 6.543931007385254, "learning_rate": 2.133346055843668e-05, "loss": 1.9525644302368164, "memory(GiB)": 72.85, "step": 81065, "token_acc": 0.5317919075144508, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.4732873484426547, "grad_norm": 5.674046516418457, "learning_rate": 2.1327946953585076e-05, "loss": 2.0488666534423827, "memory(GiB)": 72.85, "step": 81070, "token_acc": 0.5163934426229508, "train_speed(iter/s)": 0.672645 }, { "epoch": 3.4735015637719036, "grad_norm": 8.25030517578125, "learning_rate": 2.1322433868154586e-05, "loss": 1.941767120361328, "memory(GiB)": 72.85, "step": 81075, "token_acc": 0.5631067961165048, "train_speed(iter/s)": 0.672646 }, { "epoch": 3.4737157791011524, "grad_norm": 4.651237487792969, "learning_rate": 2.131692130224513e-05, "loss": 1.9432565689086914, "memory(GiB)": 72.85, "step": 81080, "token_acc": 0.5547945205479452, "train_speed(iter/s)": 0.67265 }, { "epoch": 3.4739299944304016, "grad_norm": 6.054096221923828, "learning_rate": 2.131140925595655e-05, "loss": 1.9958866119384766, "memory(GiB)": 72.85, "step": 81085, "token_acc": 0.5849673202614379, "train_speed(iter/s)": 0.672648 }, { "epoch": 3.4741442097596504, "grad_norm": 5.735112190246582, "learning_rate": 2.13058977293887e-05, "loss": 2.261343765258789, "memory(GiB)": 72.85, "step": 81090, "token_acc": 0.5176470588235295, "train_speed(iter/s)": 0.672655 }, { "epoch": 3.4743584250888992, "grad_norm": 5.780578136444092, "learning_rate": 2.130038672264143e-05, "loss": 2.1394327163696287, "memory(GiB)": 72.85, "step": 81095, "token_acc": 0.5408560311284046, "train_speed(iter/s)": 0.672665 }, { "epoch": 3.4745726404181485, "grad_norm": 5.608763217926025, "learning_rate": 2.1294876235814582e-05, "loss": 2.3632253646850585, "memory(GiB)": 72.85, "step": 81100, "token_acc": 0.5416666666666666, "train_speed(iter/s)": 0.672668 }, { "epoch": 3.4747868557473973, "grad_norm": 6.322705268859863, "learning_rate": 2.1289366269007954e-05, "loss": 2.0247575759887697, "memory(GiB)": 72.85, "step": 81105, "token_acc": 0.5590277777777778, "train_speed(iter/s)": 0.67267 }, { "epoch": 3.475001071076646, "grad_norm": 7.411378860473633, "learning_rate": 2.1283856822321412e-05, "loss": 1.9719114303588867, "memory(GiB)": 72.85, "step": 81110, "token_acc": 0.6139705882352942, "train_speed(iter/s)": 0.67267 }, { "epoch": 3.4752152864058954, "grad_norm": 5.625621318817139, "learning_rate": 2.1278347895854745e-05, "loss": 2.151469039916992, "memory(GiB)": 72.85, "step": 81115, "token_acc": 0.5119047619047619, "train_speed(iter/s)": 0.672674 }, { "epoch": 3.475429501735144, "grad_norm": 6.862193584442139, "learning_rate": 2.1272839489707725e-05, "loss": 2.2586742401123048, "memory(GiB)": 72.85, "step": 81120, "token_acc": 0.5087108013937283, "train_speed(iter/s)": 0.672679 }, { "epoch": 3.475643717064393, "grad_norm": 4.7809834480285645, "learning_rate": 2.1267331603980184e-05, "loss": 2.7131248474121095, "memory(GiB)": 72.85, "step": 81125, "token_acc": 0.4720670391061452, "train_speed(iter/s)": 0.672674 }, { "epoch": 3.4758579323936423, "grad_norm": 6.598269939422607, "learning_rate": 2.1261824238771882e-05, "loss": 1.9794170379638671, "memory(GiB)": 72.85, "step": 81130, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.672668 }, { "epoch": 3.476072147722891, "grad_norm": 4.89886999130249, "learning_rate": 2.125631739418258e-05, "loss": 2.2210720062255858, "memory(GiB)": 72.85, "step": 81135, "token_acc": 0.5518518518518518, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.47628636305214, "grad_norm": 7.275463581085205, "learning_rate": 2.125081107031207e-05, "loss": 2.06641845703125, "memory(GiB)": 72.85, "step": 81140, "token_acc": 0.5254777070063694, "train_speed(iter/s)": 0.672662 }, { "epoch": 3.476500578381389, "grad_norm": 6.652456760406494, "learning_rate": 2.1245305267260086e-05, "loss": 2.210175895690918, "memory(GiB)": 72.85, "step": 81145, "token_acc": 0.5018181818181818, "train_speed(iter/s)": 0.672667 }, { "epoch": 3.476714793710638, "grad_norm": 4.425463676452637, "learning_rate": 2.123979998512637e-05, "loss": 2.0133329391479493, "memory(GiB)": 72.85, "step": 81150, "token_acc": 0.5214285714285715, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.4769290090398868, "grad_norm": 5.734181880950928, "learning_rate": 2.1234295224010654e-05, "loss": 2.1081768035888673, "memory(GiB)": 72.85, "step": 81155, "token_acc": 0.5474452554744526, "train_speed(iter/s)": 0.672653 }, { "epoch": 3.477143224369136, "grad_norm": 5.890716075897217, "learning_rate": 2.1228790984012647e-05, "loss": 2.084009552001953, "memory(GiB)": 72.85, "step": 81160, "token_acc": 0.5535055350553506, "train_speed(iter/s)": 0.672648 }, { "epoch": 3.477357439698385, "grad_norm": 6.246518611907959, "learning_rate": 2.12232872652321e-05, "loss": 2.0662982940673826, "memory(GiB)": 72.85, "step": 81165, "token_acc": 0.5149501661129569, "train_speed(iter/s)": 0.672641 }, { "epoch": 3.4775716550276337, "grad_norm": 5.869900226593018, "learning_rate": 2.1217784067768702e-05, "loss": 2.3949682235717775, "memory(GiB)": 72.85, "step": 81170, "token_acc": 0.5108359133126935, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.477785870356883, "grad_norm": 5.63656759262085, "learning_rate": 2.1212281391722145e-05, "loss": 2.190030097961426, "memory(GiB)": 72.85, "step": 81175, "token_acc": 0.5224358974358975, "train_speed(iter/s)": 0.672637 }, { "epoch": 3.4780000856861317, "grad_norm": 6.196033954620361, "learning_rate": 2.1206779237192116e-05, "loss": 1.9724227905273437, "memory(GiB)": 72.85, "step": 81180, "token_acc": 0.5160256410256411, "train_speed(iter/s)": 0.67264 }, { "epoch": 3.4782143010153805, "grad_norm": 6.713281154632568, "learning_rate": 2.1201277604278285e-05, "loss": 1.9469112396240233, "memory(GiB)": 72.85, "step": 81185, "token_acc": 0.5422535211267606, "train_speed(iter/s)": 0.672645 }, { "epoch": 3.47842851634463, "grad_norm": 5.265523910522461, "learning_rate": 2.119577649308031e-05, "loss": 2.001058006286621, "memory(GiB)": 72.85, "step": 81190, "token_acc": 0.5498392282958199, "train_speed(iter/s)": 0.672643 }, { "epoch": 3.4786427316738786, "grad_norm": 5.020660877227783, "learning_rate": 2.119027590369787e-05, "loss": 2.0799915313720705, "memory(GiB)": 72.85, "step": 81195, "token_acc": 0.5044642857142857, "train_speed(iter/s)": 0.672641 }, { "epoch": 3.4788569470031274, "grad_norm": 7.252254962921143, "learning_rate": 2.118477583623062e-05, "loss": 2.228732109069824, "memory(GiB)": 72.85, "step": 81200, "token_acc": 0.5229681978798587, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.4790711623323767, "grad_norm": 5.2487311363220215, "learning_rate": 2.1179276290778188e-05, "loss": 2.2633813858032226, "memory(GiB)": 72.85, "step": 81205, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.672643 }, { "epoch": 3.4792853776616255, "grad_norm": 4.315386772155762, "learning_rate": 2.1173777267440205e-05, "loss": 1.7049221038818358, "memory(GiB)": 72.85, "step": 81210, "token_acc": 0.609375, "train_speed(iter/s)": 0.672639 }, { "epoch": 3.4794995929908743, "grad_norm": 4.998721122741699, "learning_rate": 2.1168278766316286e-05, "loss": 2.1279693603515626, "memory(GiB)": 72.85, "step": 81215, "token_acc": 0.5559322033898305, "train_speed(iter/s)": 0.672648 }, { "epoch": 3.4797138083201236, "grad_norm": 4.235116958618164, "learning_rate": 2.116278078750602e-05, "loss": 2.202541732788086, "memory(GiB)": 72.85, "step": 81220, "token_acc": 0.569023569023569, "train_speed(iter/s)": 0.672648 }, { "epoch": 3.4799280236493724, "grad_norm": 5.8857502937316895, "learning_rate": 2.1157283331109057e-05, "loss": 2.2523786544799806, "memory(GiB)": 72.85, "step": 81225, "token_acc": 0.5347985347985348, "train_speed(iter/s)": 0.672652 }, { "epoch": 3.480142238978621, "grad_norm": 5.526360034942627, "learning_rate": 2.115178639722496e-05, "loss": 2.006764602661133, "memory(GiB)": 72.85, "step": 81230, "token_acc": 0.5328185328185329, "train_speed(iter/s)": 0.672647 }, { "epoch": 3.4803564543078704, "grad_norm": 6.150822639465332, "learning_rate": 2.1146289985953314e-05, "loss": 2.503253173828125, "memory(GiB)": 72.85, "step": 81235, "token_acc": 0.49480968858131485, "train_speed(iter/s)": 0.67265 }, { "epoch": 3.4805706696371193, "grad_norm": 8.40116024017334, "learning_rate": 2.114079409739369e-05, "loss": 2.2005426406860353, "memory(GiB)": 72.85, "step": 81240, "token_acc": 0.5473684210526316, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.480784884966368, "grad_norm": 7.775257110595703, "learning_rate": 2.113529873164566e-05, "loss": 2.301036834716797, "memory(GiB)": 72.85, "step": 81245, "token_acc": 0.48214285714285715, "train_speed(iter/s)": 0.672649 }, { "epoch": 3.4809991002956173, "grad_norm": 5.170611381530762, "learning_rate": 2.1129803888808748e-05, "loss": 2.187972640991211, "memory(GiB)": 72.85, "step": 81250, "token_acc": 0.5543071161048689, "train_speed(iter/s)": 0.672649 }, { "epoch": 3.481213315624866, "grad_norm": 4.716609477996826, "learning_rate": 2.1124309568982536e-05, "loss": 2.0225181579589844, "memory(GiB)": 72.85, "step": 81255, "token_acc": 0.494949494949495, "train_speed(iter/s)": 0.672648 }, { "epoch": 3.481427530954115, "grad_norm": 4.457845687866211, "learning_rate": 2.1118815772266548e-05, "loss": 2.402507781982422, "memory(GiB)": 72.85, "step": 81260, "token_acc": 0.503030303030303, "train_speed(iter/s)": 0.672654 }, { "epoch": 3.481641746283364, "grad_norm": 4.611264228820801, "learning_rate": 2.1113322498760308e-05, "loss": 2.000016975402832, "memory(GiB)": 72.85, "step": 81265, "token_acc": 0.5618374558303887, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.481855961612613, "grad_norm": 5.952761173248291, "learning_rate": 2.1107829748563308e-05, "loss": 1.9290023803710938, "memory(GiB)": 72.85, "step": 81270, "token_acc": 0.5206896551724138, "train_speed(iter/s)": 0.672649 }, { "epoch": 3.482070176941862, "grad_norm": 5.430524826049805, "learning_rate": 2.1102337521775088e-05, "loss": 2.230666732788086, "memory(GiB)": 72.85, "step": 81275, "token_acc": 0.47635135135135137, "train_speed(iter/s)": 0.672645 }, { "epoch": 3.482284392271111, "grad_norm": 5.850447177886963, "learning_rate": 2.109684581849512e-05, "loss": 2.2420722961425783, "memory(GiB)": 72.85, "step": 81280, "token_acc": 0.4984709480122324, "train_speed(iter/s)": 0.672656 }, { "epoch": 3.48249860760036, "grad_norm": 6.5925750732421875, "learning_rate": 2.1091354638822918e-05, "loss": 1.9019847869873048, "memory(GiB)": 72.85, "step": 81285, "token_acc": 0.5539033457249071, "train_speed(iter/s)": 0.672655 }, { "epoch": 3.4827128229296087, "grad_norm": 6.149680137634277, "learning_rate": 2.1085863982857946e-05, "loss": 2.1135543823242187, "memory(GiB)": 72.85, "step": 81290, "token_acc": 0.5077519379844961, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.482927038258858, "grad_norm": 6.94876766204834, "learning_rate": 2.1080373850699677e-05, "loss": 2.2631603240966798, "memory(GiB)": 72.85, "step": 81295, "token_acc": 0.5340501792114696, "train_speed(iter/s)": 0.672645 }, { "epoch": 3.483141253588107, "grad_norm": 7.707319259643555, "learning_rate": 2.1074884242447555e-05, "loss": 1.7977943420410156, "memory(GiB)": 72.85, "step": 81300, "token_acc": 0.56640625, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.4833554689173556, "grad_norm": 5.867281913757324, "learning_rate": 2.1069395158201026e-05, "loss": 2.205335998535156, "memory(GiB)": 72.85, "step": 81305, "token_acc": 0.5620915032679739, "train_speed(iter/s)": 0.672661 }, { "epoch": 3.483569684246605, "grad_norm": 5.570512771606445, "learning_rate": 2.1063906598059562e-05, "loss": 2.4320400238037108, "memory(GiB)": 72.85, "step": 81310, "token_acc": 0.49019607843137253, "train_speed(iter/s)": 0.672665 }, { "epoch": 3.4837838995758537, "grad_norm": 6.520667552947998, "learning_rate": 2.1058418562122574e-05, "loss": 2.1296056747436523, "memory(GiB)": 72.85, "step": 81315, "token_acc": 0.5277777777777778, "train_speed(iter/s)": 0.672672 }, { "epoch": 3.4839981149051025, "grad_norm": 5.09883451461792, "learning_rate": 2.1052931050489484e-05, "loss": 2.0631284713745117, "memory(GiB)": 72.85, "step": 81320, "token_acc": 0.5236486486486487, "train_speed(iter/s)": 0.672677 }, { "epoch": 3.4842123302343517, "grad_norm": 4.143102169036865, "learning_rate": 2.10474440632597e-05, "loss": 1.857541275024414, "memory(GiB)": 72.85, "step": 81325, "token_acc": 0.5807453416149069, "train_speed(iter/s)": 0.672678 }, { "epoch": 3.4844265455636005, "grad_norm": 6.783512592315674, "learning_rate": 2.1041957600532626e-05, "loss": 2.4391733169555665, "memory(GiB)": 72.85, "step": 81330, "token_acc": 0.5031055900621118, "train_speed(iter/s)": 0.672675 }, { "epoch": 3.4846407608928494, "grad_norm": 5.9035868644714355, "learning_rate": 2.1036471662407637e-05, "loss": 2.01224308013916, "memory(GiB)": 72.85, "step": 81335, "token_acc": 0.5985915492957746, "train_speed(iter/s)": 0.672672 }, { "epoch": 3.4848549762220986, "grad_norm": 7.3474249839782715, "learning_rate": 2.1030986248984153e-05, "loss": 1.9971427917480469, "memory(GiB)": 72.85, "step": 81340, "token_acc": 0.5579710144927537, "train_speed(iter/s)": 0.672662 }, { "epoch": 3.4850691915513474, "grad_norm": 8.074767112731934, "learning_rate": 2.102550136036151e-05, "loss": 2.0407238006591797, "memory(GiB)": 72.85, "step": 81345, "token_acc": 0.5357142857142857, "train_speed(iter/s)": 0.672657 }, { "epoch": 3.4852834068805962, "grad_norm": 5.185755729675293, "learning_rate": 2.1020016996639107e-05, "loss": 2.098969841003418, "memory(GiB)": 72.85, "step": 81350, "token_acc": 0.5612648221343873, "train_speed(iter/s)": 0.672666 }, { "epoch": 3.4854976222098455, "grad_norm": 7.236758708953857, "learning_rate": 2.1014533157916283e-05, "loss": 2.223899078369141, "memory(GiB)": 72.85, "step": 81355, "token_acc": 0.5666666666666667, "train_speed(iter/s)": 0.672669 }, { "epoch": 3.4857118375390943, "grad_norm": 6.832993030548096, "learning_rate": 2.1009049844292378e-05, "loss": 2.217770004272461, "memory(GiB)": 72.85, "step": 81360, "token_acc": 0.5116279069767442, "train_speed(iter/s)": 0.672672 }, { "epoch": 3.485926052868343, "grad_norm": 8.698293685913086, "learning_rate": 2.100356705586672e-05, "loss": 2.2784616470336916, "memory(GiB)": 72.85, "step": 81365, "token_acc": 0.5190311418685121, "train_speed(iter/s)": 0.672678 }, { "epoch": 3.4861402681975924, "grad_norm": 7.175543785095215, "learning_rate": 2.0998084792738658e-05, "loss": 2.1259178161621093, "memory(GiB)": 72.85, "step": 81370, "token_acc": 0.5718849840255591, "train_speed(iter/s)": 0.672681 }, { "epoch": 3.486354483526841, "grad_norm": 6.597747325897217, "learning_rate": 2.09926030550075e-05, "loss": 1.9513172149658202, "memory(GiB)": 72.85, "step": 81375, "token_acc": 0.6014492753623188, "train_speed(iter/s)": 0.672684 }, { "epoch": 3.48656869885609, "grad_norm": 4.800704479217529, "learning_rate": 2.0987121842772546e-05, "loss": 2.0542718887329103, "memory(GiB)": 72.85, "step": 81380, "token_acc": 0.5543859649122806, "train_speed(iter/s)": 0.672692 }, { "epoch": 3.4867829141853393, "grad_norm": 4.75586462020874, "learning_rate": 2.0981641156133093e-05, "loss": 2.1638145446777344, "memory(GiB)": 72.85, "step": 81385, "token_acc": 0.532051282051282, "train_speed(iter/s)": 0.672679 }, { "epoch": 3.486997129514588, "grad_norm": 5.377697944641113, "learning_rate": 2.0976160995188437e-05, "loss": 2.6081378936767576, "memory(GiB)": 72.85, "step": 81390, "token_acc": 0.45910290237467016, "train_speed(iter/s)": 0.672661 }, { "epoch": 3.487211344843837, "grad_norm": 7.150356769561768, "learning_rate": 2.097068136003783e-05, "loss": 2.0290504455566407, "memory(GiB)": 72.85, "step": 81395, "token_acc": 0.5734265734265734, "train_speed(iter/s)": 0.67266 }, { "epoch": 3.487425560173086, "grad_norm": 5.564051628112793, "learning_rate": 2.096520225078058e-05, "loss": 1.9899173736572267, "memory(GiB)": 72.85, "step": 81400, "token_acc": 0.5697674418604651, "train_speed(iter/s)": 0.672664 }, { "epoch": 3.487639775502335, "grad_norm": 6.488508701324463, "learning_rate": 2.095972366751593e-05, "loss": 2.440695953369141, "memory(GiB)": 72.85, "step": 81405, "token_acc": 0.4880952380952381, "train_speed(iter/s)": 0.672669 }, { "epoch": 3.4878539908315838, "grad_norm": 6.357293605804443, "learning_rate": 2.0954245610343126e-05, "loss": 1.8023811340332032, "memory(GiB)": 72.85, "step": 81410, "token_acc": 0.6066176470588235, "train_speed(iter/s)": 0.672672 }, { "epoch": 3.488068206160833, "grad_norm": 5.403370380401611, "learning_rate": 2.094876807936139e-05, "loss": 2.2205286026000977, "memory(GiB)": 72.85, "step": 81415, "token_acc": 0.5451505016722408, "train_speed(iter/s)": 0.672671 }, { "epoch": 3.488282421490082, "grad_norm": 7.5449934005737305, "learning_rate": 2.0943291074669995e-05, "loss": 2.2263616561889648, "memory(GiB)": 72.85, "step": 81420, "token_acc": 0.49825783972125437, "train_speed(iter/s)": 0.672671 }, { "epoch": 3.4884966368193306, "grad_norm": 4.735187530517578, "learning_rate": 2.0937814596368122e-05, "loss": 1.9297052383422852, "memory(GiB)": 72.85, "step": 81425, "token_acc": 0.5905511811023622, "train_speed(iter/s)": 0.672663 }, { "epoch": 3.48871085214858, "grad_norm": 5.715268611907959, "learning_rate": 2.0932338644555017e-05, "loss": 1.8999898910522461, "memory(GiB)": 72.85, "step": 81430, "token_acc": 0.5413533834586466, "train_speed(iter/s)": 0.672656 }, { "epoch": 3.4889250674778287, "grad_norm": 5.1336212158203125, "learning_rate": 2.092686321932986e-05, "loss": 1.8870893478393556, "memory(GiB)": 72.85, "step": 81435, "token_acc": 0.5428571428571428, "train_speed(iter/s)": 0.67266 }, { "epoch": 3.4891392828070775, "grad_norm": 7.230395317077637, "learning_rate": 2.0921388320791858e-05, "loss": 2.444720649719238, "memory(GiB)": 72.85, "step": 81440, "token_acc": 0.4657534246575342, "train_speed(iter/s)": 0.672665 }, { "epoch": 3.489353498136327, "grad_norm": 5.524167537689209, "learning_rate": 2.091591394904018e-05, "loss": 2.096299171447754, "memory(GiB)": 72.85, "step": 81445, "token_acc": 0.5396341463414634, "train_speed(iter/s)": 0.672674 }, { "epoch": 3.4895677134655756, "grad_norm": 6.449456691741943, "learning_rate": 2.0910440104173985e-05, "loss": 2.0776443481445312, "memory(GiB)": 72.85, "step": 81450, "token_acc": 0.5037037037037037, "train_speed(iter/s)": 0.672681 }, { "epoch": 3.4897819287948244, "grad_norm": 4.8556036949157715, "learning_rate": 2.0904966786292473e-05, "loss": 2.08079833984375, "memory(GiB)": 72.85, "step": 81455, "token_acc": 0.525679758308157, "train_speed(iter/s)": 0.672695 }, { "epoch": 3.4899961441240737, "grad_norm": 7.572358131408691, "learning_rate": 2.089949399549478e-05, "loss": 2.0744392395019533, "memory(GiB)": 72.85, "step": 81460, "token_acc": 0.549618320610687, "train_speed(iter/s)": 0.672704 }, { "epoch": 3.4902103594533225, "grad_norm": 6.574545860290527, "learning_rate": 2.0894021731880052e-05, "loss": 2.194110107421875, "memory(GiB)": 72.85, "step": 81465, "token_acc": 0.5080906148867314, "train_speed(iter/s)": 0.672702 }, { "epoch": 3.4904245747825713, "grad_norm": 4.744706630706787, "learning_rate": 2.088854999554743e-05, "loss": 2.087970733642578, "memory(GiB)": 72.85, "step": 81470, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.6727 }, { "epoch": 3.4906387901118205, "grad_norm": 6.369481563568115, "learning_rate": 2.0883078786596028e-05, "loss": 2.0107490539550783, "memory(GiB)": 72.85, "step": 81475, "token_acc": 0.5559440559440559, "train_speed(iter/s)": 0.672701 }, { "epoch": 3.4908530054410694, "grad_norm": 6.051743030548096, "learning_rate": 2.087760810512495e-05, "loss": 1.9692184448242187, "memory(GiB)": 72.85, "step": 81480, "token_acc": 0.5547703180212014, "train_speed(iter/s)": 0.672701 }, { "epoch": 3.491067220770318, "grad_norm": 5.6023478507995605, "learning_rate": 2.087213795123334e-05, "loss": 2.0977027893066404, "memory(GiB)": 72.85, "step": 81485, "token_acc": 0.5542857142857143, "train_speed(iter/s)": 0.672706 }, { "epoch": 3.4912814360995674, "grad_norm": 5.2239508628845215, "learning_rate": 2.0866668325020254e-05, "loss": 2.298929977416992, "memory(GiB)": 72.85, "step": 81490, "token_acc": 0.5359712230215827, "train_speed(iter/s)": 0.672713 }, { "epoch": 3.4914956514288162, "grad_norm": 5.657415390014648, "learning_rate": 2.086119922658482e-05, "loss": 1.9408672332763672, "memory(GiB)": 72.85, "step": 81495, "token_acc": 0.5514018691588785, "train_speed(iter/s)": 0.672717 }, { "epoch": 3.491709866758065, "grad_norm": 5.159480571746826, "learning_rate": 2.0855730656026102e-05, "loss": 1.8962047576904297, "memory(GiB)": 72.85, "step": 81500, "token_acc": 0.5740072202166066, "train_speed(iter/s)": 0.672718 }, { "epoch": 3.491709866758065, "eval_loss": 2.0684690475463867, "eval_runtime": 15.8472, "eval_samples_per_second": 6.31, "eval_steps_per_second": 6.31, "eval_token_acc": 0.48956884561891517, "step": 81500 }, { "epoch": 3.4919240820873143, "grad_norm": 6.313748359680176, "learning_rate": 2.0850262613443157e-05, "loss": 2.0812509536743162, "memory(GiB)": 72.85, "step": 81505, "token_acc": 0.49800399201596807, "train_speed(iter/s)": 0.672616 }, { "epoch": 3.492138297416563, "grad_norm": 8.532894134521484, "learning_rate": 2.0844795098935034e-05, "loss": 2.3516231536865235, "memory(GiB)": 72.85, "step": 81510, "token_acc": 0.5343283582089552, "train_speed(iter/s)": 0.672611 }, { "epoch": 3.492352512745812, "grad_norm": 4.782888412475586, "learning_rate": 2.0839328112600808e-05, "loss": 2.1832918167114257, "memory(GiB)": 72.85, "step": 81515, "token_acc": 0.584717607973422, "train_speed(iter/s)": 0.672614 }, { "epoch": 3.492566728075061, "grad_norm": 6.886974334716797, "learning_rate": 2.0833861654539517e-05, "loss": 2.2391149520874025, "memory(GiB)": 72.85, "step": 81520, "token_acc": 0.5152542372881356, "train_speed(iter/s)": 0.672613 }, { "epoch": 3.49278094340431, "grad_norm": 5.784388542175293, "learning_rate": 2.082839572485018e-05, "loss": 2.0105037689208984, "memory(GiB)": 72.85, "step": 81525, "token_acc": 0.5474006116207951, "train_speed(iter/s)": 0.672617 }, { "epoch": 3.492995158733559, "grad_norm": 5.029408931732178, "learning_rate": 2.0822930323631816e-05, "loss": 2.0730031967163085, "memory(GiB)": 72.85, "step": 81530, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.67261 }, { "epoch": 3.493209374062808, "grad_norm": 8.237629890441895, "learning_rate": 2.081746545098344e-05, "loss": 2.0651500701904295, "memory(GiB)": 72.85, "step": 81535, "token_acc": 0.5688405797101449, "train_speed(iter/s)": 0.672616 }, { "epoch": 3.493423589392057, "grad_norm": 5.097856521606445, "learning_rate": 2.0812001107004042e-05, "loss": 2.1602827072143556, "memory(GiB)": 72.85, "step": 81540, "token_acc": 0.5235457063711911, "train_speed(iter/s)": 0.672616 }, { "epoch": 3.4936378047213057, "grad_norm": 6.295601844787598, "learning_rate": 2.0806537291792638e-05, "loss": 1.8219249725341797, "memory(GiB)": 72.85, "step": 81545, "token_acc": 0.6018957345971564, "train_speed(iter/s)": 0.672624 }, { "epoch": 3.493852020050555, "grad_norm": 5.544720649719238, "learning_rate": 2.0801074005448196e-05, "loss": 2.4264984130859375, "memory(GiB)": 72.85, "step": 81550, "token_acc": 0.547945205479452, "train_speed(iter/s)": 0.672636 }, { "epoch": 3.4940662353798038, "grad_norm": 6.805716037750244, "learning_rate": 2.079561124806969e-05, "loss": 2.169170379638672, "memory(GiB)": 72.85, "step": 81555, "token_acc": 0.5783132530120482, "train_speed(iter/s)": 0.672641 }, { "epoch": 3.4942804507090526, "grad_norm": 5.38956356048584, "learning_rate": 2.0790149019756078e-05, "loss": 2.1128717422485352, "memory(GiB)": 72.85, "step": 81560, "token_acc": 0.5509641873278237, "train_speed(iter/s)": 0.672644 }, { "epoch": 3.494494666038302, "grad_norm": 3.5310146808624268, "learning_rate": 2.0784687320606306e-05, "loss": 1.9245433807373047, "memory(GiB)": 72.85, "step": 81565, "token_acc": 0.5740072202166066, "train_speed(iter/s)": 0.672637 }, { "epoch": 3.4947088813675506, "grad_norm": 7.562002658843994, "learning_rate": 2.077922615071933e-05, "loss": 2.169594383239746, "memory(GiB)": 72.85, "step": 81570, "token_acc": 0.5429553264604811, "train_speed(iter/s)": 0.672634 }, { "epoch": 3.4949230966967995, "grad_norm": 5.902254104614258, "learning_rate": 2.0773765510194092e-05, "loss": 2.176633834838867, "memory(GiB)": 72.85, "step": 81575, "token_acc": 0.5482758620689655, "train_speed(iter/s)": 0.672641 }, { "epoch": 3.4951373120260487, "grad_norm": 5.487933158874512, "learning_rate": 2.0768305399129517e-05, "loss": 2.018417739868164, "memory(GiB)": 72.85, "step": 81580, "token_acc": 0.5708812260536399, "train_speed(iter/s)": 0.672647 }, { "epoch": 3.4953515273552975, "grad_norm": 7.021001815795898, "learning_rate": 2.076284581762451e-05, "loss": 2.044536590576172, "memory(GiB)": 72.85, "step": 81585, "token_acc": 0.5570469798657718, "train_speed(iter/s)": 0.672647 }, { "epoch": 3.4955657426845463, "grad_norm": 4.186278820037842, "learning_rate": 2.0757386765777975e-05, "loss": 1.9790271759033202, "memory(GiB)": 72.85, "step": 81590, "token_acc": 0.5096774193548387, "train_speed(iter/s)": 0.672645 }, { "epoch": 3.4957799580137956, "grad_norm": 4.1688714027404785, "learning_rate": 2.0751928243688794e-05, "loss": 1.8342378616333008, "memory(GiB)": 72.85, "step": 81595, "token_acc": 0.5523465703971119, "train_speed(iter/s)": 0.672639 }, { "epoch": 3.4959941733430444, "grad_norm": 6.951740741729736, "learning_rate": 2.0746470251455886e-05, "loss": 2.245059776306152, "memory(GiB)": 72.85, "step": 81600, "token_acc": 0.5148148148148148, "train_speed(iter/s)": 0.672631 }, { "epoch": 3.496208388672293, "grad_norm": 7.172634601593018, "learning_rate": 2.0741012789178116e-05, "loss": 2.266283416748047, "memory(GiB)": 72.85, "step": 81605, "token_acc": 0.5097402597402597, "train_speed(iter/s)": 0.672634 }, { "epoch": 3.4964226040015425, "grad_norm": 5.309043884277344, "learning_rate": 2.073555585695434e-05, "loss": 1.9599311828613282, "memory(GiB)": 72.85, "step": 81610, "token_acc": 0.5980392156862745, "train_speed(iter/s)": 0.672629 }, { "epoch": 3.4966368193307913, "grad_norm": 5.917293071746826, "learning_rate": 2.073009945488343e-05, "loss": 2.1595443725585937, "memory(GiB)": 72.85, "step": 81615, "token_acc": 0.5618729096989966, "train_speed(iter/s)": 0.672634 }, { "epoch": 3.49685103466004, "grad_norm": 7.095696449279785, "learning_rate": 2.072464358306422e-05, "loss": 2.2446746826171875, "memory(GiB)": 72.85, "step": 81620, "token_acc": 0.5433333333333333, "train_speed(iter/s)": 0.672635 }, { "epoch": 3.4970652499892894, "grad_norm": 5.2990617752075195, "learning_rate": 2.0719188241595532e-05, "loss": 2.212162399291992, "memory(GiB)": 72.85, "step": 81625, "token_acc": 0.5171339563862928, "train_speed(iter/s)": 0.672648 }, { "epoch": 3.497279465318538, "grad_norm": 6.631138801574707, "learning_rate": 2.0713733430576244e-05, "loss": 2.3893402099609373, "memory(GiB)": 72.85, "step": 81630, "token_acc": 0.5099337748344371, "train_speed(iter/s)": 0.672653 }, { "epoch": 3.497493680647787, "grad_norm": 6.425409317016602, "learning_rate": 2.0708279150105143e-05, "loss": 1.9204565048217774, "memory(GiB)": 72.85, "step": 81635, "token_acc": 0.5843621399176955, "train_speed(iter/s)": 0.672652 }, { "epoch": 3.4977078959770362, "grad_norm": 5.972413063049316, "learning_rate": 2.0702825400281024e-05, "loss": 2.250198745727539, "memory(GiB)": 72.85, "step": 81640, "token_acc": 0.5444839857651246, "train_speed(iter/s)": 0.672652 }, { "epoch": 3.497922111306285, "grad_norm": 6.198363304138184, "learning_rate": 2.069737218120273e-05, "loss": 1.8663137435913086, "memory(GiB)": 72.85, "step": 81645, "token_acc": 0.5801526717557252, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.498136326635534, "grad_norm": 4.669450759887695, "learning_rate": 2.069191949296902e-05, "loss": 1.8485973358154297, "memory(GiB)": 72.85, "step": 81650, "token_acc": 0.5602836879432624, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.498350541964783, "grad_norm": 6.426802635192871, "learning_rate": 2.0686467335678668e-05, "loss": 2.079289436340332, "memory(GiB)": 72.85, "step": 81655, "token_acc": 0.55, "train_speed(iter/s)": 0.672649 }, { "epoch": 3.498564757294032, "grad_norm": 5.31290864944458, "learning_rate": 2.0681015709430475e-05, "loss": 1.91973819732666, "memory(GiB)": 72.85, "step": 81660, "token_acc": 0.5608108108108109, "train_speed(iter/s)": 0.672648 }, { "epoch": 3.4987789726232807, "grad_norm": 6.2758684158325195, "learning_rate": 2.0675564614323185e-05, "loss": 1.9120012283325196, "memory(GiB)": 72.85, "step": 81665, "token_acc": 0.5956678700361011, "train_speed(iter/s)": 0.67264 }, { "epoch": 3.49899318795253, "grad_norm": 5.836574554443359, "learning_rate": 2.0670114050455546e-05, "loss": 2.1444034576416016, "memory(GiB)": 72.85, "step": 81670, "token_acc": 0.5173501577287066, "train_speed(iter/s)": 0.672639 }, { "epoch": 3.499207403281779, "grad_norm": 4.534523010253906, "learning_rate": 2.066466401792631e-05, "loss": 2.0310686111450194, "memory(GiB)": 72.85, "step": 81675, "token_acc": 0.5448717948717948, "train_speed(iter/s)": 0.672626 }, { "epoch": 3.4994216186110276, "grad_norm": 6.340795993804932, "learning_rate": 2.0659214516834207e-05, "loss": 2.2450180053710938, "memory(GiB)": 72.85, "step": 81680, "token_acc": 0.5300353356890459, "train_speed(iter/s)": 0.672632 }, { "epoch": 3.499635833940277, "grad_norm": 6.864187717437744, "learning_rate": 2.065376554727793e-05, "loss": 2.0955949783325196, "memory(GiB)": 72.85, "step": 81685, "token_acc": 0.5259515570934256, "train_speed(iter/s)": 0.672637 }, { "epoch": 3.4998500492695257, "grad_norm": 5.857882499694824, "learning_rate": 2.0648317109356246e-05, "loss": 2.242972755432129, "memory(GiB)": 72.85, "step": 81690, "token_acc": 0.515625, "train_speed(iter/s)": 0.672643 }, { "epoch": 3.500064264598775, "grad_norm": 6.882266044616699, "learning_rate": 2.064286920316783e-05, "loss": 2.4993824005126952, "memory(GiB)": 72.85, "step": 81695, "token_acc": 0.4505928853754941, "train_speed(iter/s)": 0.672642 }, { "epoch": 3.5002784799280238, "grad_norm": 6.4846367835998535, "learning_rate": 2.0637421828811375e-05, "loss": 2.082983207702637, "memory(GiB)": 72.85, "step": 81700, "token_acc": 0.5182186234817814, "train_speed(iter/s)": 0.672648 }, { "epoch": 3.5004926952572726, "grad_norm": 5.93856143951416, "learning_rate": 2.0631974986385565e-05, "loss": 2.1400190353393556, "memory(GiB)": 72.85, "step": 81705, "token_acc": 0.51875, "train_speed(iter/s)": 0.672649 }, { "epoch": 3.500706910586522, "grad_norm": 6.206775188446045, "learning_rate": 2.0626528675989067e-05, "loss": 2.0147748947143556, "memory(GiB)": 72.85, "step": 81710, "token_acc": 0.541958041958042, "train_speed(iter/s)": 0.67265 }, { "epoch": 3.5009211259157706, "grad_norm": 7.903863906860352, "learning_rate": 2.062108289772055e-05, "loss": 2.3486564636230467, "memory(GiB)": 72.85, "step": 81715, "token_acc": 0.5064935064935064, "train_speed(iter/s)": 0.672647 }, { "epoch": 3.5011353412450195, "grad_norm": 5.3787007331848145, "learning_rate": 2.06156376516787e-05, "loss": 2.568960762023926, "memory(GiB)": 72.85, "step": 81720, "token_acc": 0.49843260188087773, "train_speed(iter/s)": 0.672656 }, { "epoch": 3.5013495565742687, "grad_norm": 5.126669883728027, "learning_rate": 2.0610192937962137e-05, "loss": 2.0523242950439453, "memory(GiB)": 72.85, "step": 81725, "token_acc": 0.549800796812749, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.5015637719035175, "grad_norm": 6.4628586769104, "learning_rate": 2.0604748756669495e-05, "loss": 2.1453155517578124, "memory(GiB)": 72.85, "step": 81730, "token_acc": 0.5155709342560554, "train_speed(iter/s)": 0.672645 }, { "epoch": 3.5017779872327663, "grad_norm": 6.319869041442871, "learning_rate": 2.059930510789941e-05, "loss": 2.1196405410766603, "memory(GiB)": 72.85, "step": 81735, "token_acc": 0.4983164983164983, "train_speed(iter/s)": 0.672643 }, { "epoch": 3.5019922025620156, "grad_norm": 6.749495506286621, "learning_rate": 2.0593861991750467e-05, "loss": 2.1129032135009767, "memory(GiB)": 72.85, "step": 81740, "token_acc": 0.5570175438596491, "train_speed(iter/s)": 0.672648 }, { "epoch": 3.5022064178912644, "grad_norm": 5.312088489532471, "learning_rate": 2.0588419408321326e-05, "loss": 1.918635368347168, "memory(GiB)": 72.85, "step": 81745, "token_acc": 0.556420233463035, "train_speed(iter/s)": 0.672646 }, { "epoch": 3.5024206332205132, "grad_norm": 4.793365001678467, "learning_rate": 2.0582977357710555e-05, "loss": 2.382061576843262, "memory(GiB)": 72.85, "step": 81750, "token_acc": 0.49702380952380953, "train_speed(iter/s)": 0.672645 }, { "epoch": 3.5026348485497625, "grad_norm": 6.432275295257568, "learning_rate": 2.0577535840016736e-05, "loss": 2.010537528991699, "memory(GiB)": 72.85, "step": 81755, "token_acc": 0.5859375, "train_speed(iter/s)": 0.672645 }, { "epoch": 3.5028490638790113, "grad_norm": 6.213893890380859, "learning_rate": 2.057209485533847e-05, "loss": 1.9978240966796874, "memory(GiB)": 72.85, "step": 81760, "token_acc": 0.5610561056105611, "train_speed(iter/s)": 0.672641 }, { "epoch": 3.50306327920826, "grad_norm": 6.9376420974731445, "learning_rate": 2.05666544037743e-05, "loss": 2.0685068130493165, "memory(GiB)": 72.85, "step": 81765, "token_acc": 0.5254901960784314, "train_speed(iter/s)": 0.672647 }, { "epoch": 3.5032774945375094, "grad_norm": 6.021786689758301, "learning_rate": 2.0561214485422785e-05, "loss": 2.255149078369141, "memory(GiB)": 72.85, "step": 81770, "token_acc": 0.5304054054054054, "train_speed(iter/s)": 0.672635 }, { "epoch": 3.503491709866758, "grad_norm": 6.663199424743652, "learning_rate": 2.05557751003825e-05, "loss": 2.207356643676758, "memory(GiB)": 72.85, "step": 81775, "token_acc": 0.5394190871369294, "train_speed(iter/s)": 0.672641 }, { "epoch": 3.503705925196007, "grad_norm": 4.546817779541016, "learning_rate": 2.055033624875197e-05, "loss": 2.1898113250732423, "memory(GiB)": 72.85, "step": 81780, "token_acc": 0.5360230547550432, "train_speed(iter/s)": 0.672644 }, { "epoch": 3.5039201405252562, "grad_norm": 6.082249164581299, "learning_rate": 2.0544897930629707e-05, "loss": 2.1859193801879884, "memory(GiB)": 72.85, "step": 81785, "token_acc": 0.4984126984126984, "train_speed(iter/s)": 0.67265 }, { "epoch": 3.504134355854505, "grad_norm": 5.256475448608398, "learning_rate": 2.053946014611427e-05, "loss": 2.304594802856445, "memory(GiB)": 72.85, "step": 81790, "token_acc": 0.5174603174603175, "train_speed(iter/s)": 0.672653 }, { "epoch": 3.504348571183754, "grad_norm": 5.640197277069092, "learning_rate": 2.0534022895304146e-05, "loss": 2.2750080108642576, "memory(GiB)": 72.85, "step": 81795, "token_acc": 0.5131578947368421, "train_speed(iter/s)": 0.67266 }, { "epoch": 3.504562786513003, "grad_norm": 5.885473728179932, "learning_rate": 2.052858617829782e-05, "loss": 2.1469064712524415, "memory(GiB)": 72.85, "step": 81800, "token_acc": 0.5254237288135594, "train_speed(iter/s)": 0.672661 }, { "epoch": 3.504777001842252, "grad_norm": 5.799552917480469, "learning_rate": 2.052314999519382e-05, "loss": 2.182366943359375, "memory(GiB)": 72.85, "step": 81805, "token_acc": 0.5340909090909091, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.5049912171715008, "grad_norm": 3.8973965644836426, "learning_rate": 2.051771434609061e-05, "loss": 2.4071741104125977, "memory(GiB)": 72.85, "step": 81810, "token_acc": 0.48514851485148514, "train_speed(iter/s)": 0.672654 }, { "epoch": 3.50520543250075, "grad_norm": 5.646766185760498, "learning_rate": 2.051227923108666e-05, "loss": 2.1306753158569336, "memory(GiB)": 72.85, "step": 81815, "token_acc": 0.5163398692810458, "train_speed(iter/s)": 0.672657 }, { "epoch": 3.505419647829999, "grad_norm": 5.997304916381836, "learning_rate": 2.0506844650280436e-05, "loss": 2.2410017013549806, "memory(GiB)": 72.85, "step": 81820, "token_acc": 0.5170068027210885, "train_speed(iter/s)": 0.67266 }, { "epoch": 3.5056338631592476, "grad_norm": 7.898597717285156, "learning_rate": 2.0501410603770388e-05, "loss": 2.293292236328125, "memory(GiB)": 72.85, "step": 81825, "token_acc": 0.5211726384364821, "train_speed(iter/s)": 0.672656 }, { "epoch": 3.505848078488497, "grad_norm": 8.192365646362305, "learning_rate": 2.049597709165494e-05, "loss": 2.184718894958496, "memory(GiB)": 72.85, "step": 81830, "token_acc": 0.52, "train_speed(iter/s)": 0.672666 }, { "epoch": 3.5060622938177457, "grad_norm": 6.082423210144043, "learning_rate": 2.0490544114032563e-05, "loss": 2.2085723876953125, "memory(GiB)": 72.85, "step": 81835, "token_acc": 0.5137254901960784, "train_speed(iter/s)": 0.672672 }, { "epoch": 3.5062765091469945, "grad_norm": 4.438302516937256, "learning_rate": 2.048511167100166e-05, "loss": 2.071910095214844, "memory(GiB)": 72.85, "step": 81840, "token_acc": 0.5912698412698413, "train_speed(iter/s)": 0.672675 }, { "epoch": 3.5064907244762438, "grad_norm": 4.927206039428711, "learning_rate": 2.0479679762660648e-05, "loss": 1.9673664093017578, "memory(GiB)": 72.85, "step": 81845, "token_acc": 0.5702247191011236, "train_speed(iter/s)": 0.672669 }, { "epoch": 3.5067049398054926, "grad_norm": 5.262843608856201, "learning_rate": 2.0474248389107924e-05, "loss": 2.019702911376953, "memory(GiB)": 72.85, "step": 81850, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.5069191551347414, "grad_norm": 4.790360450744629, "learning_rate": 2.0468817550441877e-05, "loss": 2.2975580215454103, "memory(GiB)": 72.85, "step": 81855, "token_acc": 0.5085910652920962, "train_speed(iter/s)": 0.672652 }, { "epoch": 3.5071333704639907, "grad_norm": 5.754069805145264, "learning_rate": 2.04633872467609e-05, "loss": 2.0961864471435545, "memory(GiB)": 72.85, "step": 81860, "token_acc": 0.49818181818181817, "train_speed(iter/s)": 0.672666 }, { "epoch": 3.5073475857932395, "grad_norm": 6.547964572906494, "learning_rate": 2.0457957478163386e-05, "loss": 1.939455795288086, "memory(GiB)": 72.85, "step": 81865, "token_acc": 0.6008064516129032, "train_speed(iter/s)": 0.67267 }, { "epoch": 3.5075618011224883, "grad_norm": 5.601988315582275, "learning_rate": 2.0452528244747688e-05, "loss": 2.259473991394043, "memory(GiB)": 72.85, "step": 81870, "token_acc": 0.5260115606936416, "train_speed(iter/s)": 0.672661 }, { "epoch": 3.5077760164517375, "grad_norm": 5.547113418579102, "learning_rate": 2.0447099546612157e-05, "loss": 2.088049125671387, "memory(GiB)": 72.85, "step": 81875, "token_acc": 0.5247933884297521, "train_speed(iter/s)": 0.672667 }, { "epoch": 3.5079902317809863, "grad_norm": 8.314697265625, "learning_rate": 2.0441671383855144e-05, "loss": 2.236643600463867, "memory(GiB)": 72.85, "step": 81880, "token_acc": 0.5156794425087108, "train_speed(iter/s)": 0.672676 }, { "epoch": 3.508204447110235, "grad_norm": 6.461391925811768, "learning_rate": 2.043624375657496e-05, "loss": 2.1997299194335938, "memory(GiB)": 72.85, "step": 81885, "token_acc": 0.5469255663430421, "train_speed(iter/s)": 0.672675 }, { "epoch": 3.5084186624394844, "grad_norm": 5.548227787017822, "learning_rate": 2.043081666486997e-05, "loss": 1.9759065628051757, "memory(GiB)": 72.85, "step": 81890, "token_acc": 0.5756457564575646, "train_speed(iter/s)": 0.672679 }, { "epoch": 3.5086328777687332, "grad_norm": 6.19331169128418, "learning_rate": 2.0425390108838477e-05, "loss": 2.297678565979004, "memory(GiB)": 72.85, "step": 81895, "token_acc": 0.5104477611940299, "train_speed(iter/s)": 0.672685 }, { "epoch": 3.508847093097982, "grad_norm": 5.440852165222168, "learning_rate": 2.0419964088578785e-05, "loss": 2.2151123046875, "memory(GiB)": 72.85, "step": 81900, "token_acc": 0.543026706231454, "train_speed(iter/s)": 0.672692 }, { "epoch": 3.5090613084272313, "grad_norm": 5.754644393920898, "learning_rate": 2.0414538604189194e-05, "loss": 2.181832122802734, "memory(GiB)": 72.85, "step": 81905, "token_acc": 0.5197368421052632, "train_speed(iter/s)": 0.672691 }, { "epoch": 3.50927552375648, "grad_norm": 5.6201701164245605, "learning_rate": 2.0409113655767988e-05, "loss": 2.1100061416625975, "memory(GiB)": 72.85, "step": 81910, "token_acc": 0.535483870967742, "train_speed(iter/s)": 0.672693 }, { "epoch": 3.509489739085729, "grad_norm": 4.969036102294922, "learning_rate": 2.0403689243413426e-05, "loss": 2.113016891479492, "memory(GiB)": 72.85, "step": 81915, "token_acc": 0.5591397849462365, "train_speed(iter/s)": 0.672694 }, { "epoch": 3.509703954414978, "grad_norm": 5.703455448150635, "learning_rate": 2.0398265367223818e-05, "loss": 2.2031665802001954, "memory(GiB)": 72.85, "step": 81920, "token_acc": 0.5236486486486487, "train_speed(iter/s)": 0.672695 }, { "epoch": 3.509918169744227, "grad_norm": 5.896630764007568, "learning_rate": 2.03928420272974e-05, "loss": 2.195306396484375, "memory(GiB)": 72.85, "step": 81925, "token_acc": 0.5033112582781457, "train_speed(iter/s)": 0.672705 }, { "epoch": 3.510132385073476, "grad_norm": 5.836372375488281, "learning_rate": 2.0387419223732422e-05, "loss": 2.258682060241699, "memory(GiB)": 72.85, "step": 81930, "token_acc": 0.5608108108108109, "train_speed(iter/s)": 0.672703 }, { "epoch": 3.510346600402725, "grad_norm": 7.441843032836914, "learning_rate": 2.0381996956627103e-05, "loss": 1.9593338012695312, "memory(GiB)": 72.85, "step": 81935, "token_acc": 0.5606060606060606, "train_speed(iter/s)": 0.672702 }, { "epoch": 3.510560815731974, "grad_norm": 5.618685722351074, "learning_rate": 2.037657522607972e-05, "loss": 2.1054948806762694, "memory(GiB)": 72.85, "step": 81940, "token_acc": 0.5029239766081871, "train_speed(iter/s)": 0.672701 }, { "epoch": 3.5107750310612227, "grad_norm": 4.832515716552734, "learning_rate": 2.0371154032188438e-05, "loss": 1.87152156829834, "memory(GiB)": 72.85, "step": 81945, "token_acc": 0.588, "train_speed(iter/s)": 0.672706 }, { "epoch": 3.510989246390472, "grad_norm": 5.50546932220459, "learning_rate": 2.0365733375051523e-05, "loss": 2.1462615966796874, "memory(GiB)": 72.85, "step": 81950, "token_acc": 0.4912891986062718, "train_speed(iter/s)": 0.67271 }, { "epoch": 3.5112034617197208, "grad_norm": 6.351517677307129, "learning_rate": 2.036031325476714e-05, "loss": 2.0720582962036134, "memory(GiB)": 72.85, "step": 81955, "token_acc": 0.5461254612546126, "train_speed(iter/s)": 0.672711 }, { "epoch": 3.5114176770489696, "grad_norm": 5.028898239135742, "learning_rate": 2.0354893671433485e-05, "loss": 2.0165887832641602, "memory(GiB)": 72.85, "step": 81960, "token_acc": 0.5694915254237288, "train_speed(iter/s)": 0.672715 }, { "epoch": 3.511631892378219, "grad_norm": 5.514338970184326, "learning_rate": 2.0349474625148744e-05, "loss": 2.0209114074707033, "memory(GiB)": 72.85, "step": 81965, "token_acc": 0.5298507462686567, "train_speed(iter/s)": 0.672727 }, { "epoch": 3.5118461077074676, "grad_norm": 4.744051933288574, "learning_rate": 2.0344056116011083e-05, "loss": 2.117976188659668, "memory(GiB)": 72.85, "step": 81970, "token_acc": 0.565359477124183, "train_speed(iter/s)": 0.672718 }, { "epoch": 3.5120603230367164, "grad_norm": 7.640831470489502, "learning_rate": 2.033863814411865e-05, "loss": 1.6530101776123047, "memory(GiB)": 72.85, "step": 81975, "token_acc": 0.6036866359447005, "train_speed(iter/s)": 0.672722 }, { "epoch": 3.5122745383659657, "grad_norm": 5.790241241455078, "learning_rate": 2.033322070956963e-05, "loss": 2.0752323150634764, "memory(GiB)": 72.85, "step": 81980, "token_acc": 0.5598455598455598, "train_speed(iter/s)": 0.672732 }, { "epoch": 3.5124887536952145, "grad_norm": 6.832118988037109, "learning_rate": 2.032780381246214e-05, "loss": 2.1536163330078124, "memory(GiB)": 72.85, "step": 81985, "token_acc": 0.5128205128205128, "train_speed(iter/s)": 0.672732 }, { "epoch": 3.5127029690244633, "grad_norm": 6.831270694732666, "learning_rate": 2.0322387452894326e-05, "loss": 1.9866905212402344, "memory(GiB)": 72.85, "step": 81990, "token_acc": 0.5381818181818182, "train_speed(iter/s)": 0.672734 }, { "epoch": 3.5129171843537126, "grad_norm": 7.855515480041504, "learning_rate": 2.0316971630964298e-05, "loss": 2.065321350097656, "memory(GiB)": 72.85, "step": 81995, "token_acc": 0.5353846153846153, "train_speed(iter/s)": 0.672735 }, { "epoch": 3.5131313996829614, "grad_norm": 4.8824782371521, "learning_rate": 2.0311556346770156e-05, "loss": 2.0287425994873045, "memory(GiB)": 72.85, "step": 82000, "token_acc": 0.5318471337579618, "train_speed(iter/s)": 0.672733 }, { "epoch": 3.5131313996829614, "eval_loss": 2.107389450073242, "eval_runtime": 15.3153, "eval_samples_per_second": 6.529, "eval_steps_per_second": 6.529, "eval_token_acc": 0.5135542168674698, "step": 82000 }, { "epoch": 3.51334561501221, "grad_norm": 4.355884075164795, "learning_rate": 2.0306141600410046e-05, "loss": 2.215627670288086, "memory(GiB)": 72.85, "step": 82005, "token_acc": 0.5114583333333333, "train_speed(iter/s)": 0.672625 }, { "epoch": 3.5135598303414595, "grad_norm": 6.198500633239746, "learning_rate": 2.030072739198201e-05, "loss": 2.169094276428223, "memory(GiB)": 72.85, "step": 82010, "token_acc": 0.5148247978436657, "train_speed(iter/s)": 0.672628 }, { "epoch": 3.5137740456707083, "grad_norm": 8.69444465637207, "learning_rate": 2.0295313721584174e-05, "loss": 1.9657289505004882, "memory(GiB)": 72.85, "step": 82015, "token_acc": 0.5429447852760736, "train_speed(iter/s)": 0.67263 }, { "epoch": 3.513988260999957, "grad_norm": 5.398221969604492, "learning_rate": 2.0289900589314605e-05, "loss": 2.4373950958251953, "memory(GiB)": 72.85, "step": 82020, "token_acc": 0.49454545454545457, "train_speed(iter/s)": 0.672633 }, { "epoch": 3.5142024763292063, "grad_norm": 4.761831760406494, "learning_rate": 2.028448799527135e-05, "loss": 2.1524065017700194, "memory(GiB)": 72.85, "step": 82025, "token_acc": 0.5173501577287066, "train_speed(iter/s)": 0.672628 }, { "epoch": 3.514416691658455, "grad_norm": 8.615418434143066, "learning_rate": 2.0279075939552454e-05, "loss": 2.1836484909057616, "memory(GiB)": 72.85, "step": 82030, "token_acc": 0.5527272727272727, "train_speed(iter/s)": 0.672633 }, { "epoch": 3.514630906987704, "grad_norm": 5.169909954071045, "learning_rate": 2.0273664422255994e-05, "loss": 2.2866268157958984, "memory(GiB)": 72.85, "step": 82035, "token_acc": 0.5182724252491694, "train_speed(iter/s)": 0.672621 }, { "epoch": 3.5148451223169532, "grad_norm": 7.785467147827148, "learning_rate": 2.026825344347999e-05, "loss": 2.2937923431396485, "memory(GiB)": 72.85, "step": 82040, "token_acc": 0.49814126394052044, "train_speed(iter/s)": 0.672624 }, { "epoch": 3.515059337646202, "grad_norm": 6.411333084106445, "learning_rate": 2.026284300332247e-05, "loss": 2.152650833129883, "memory(GiB)": 72.85, "step": 82045, "token_acc": 0.532051282051282, "train_speed(iter/s)": 0.67263 }, { "epoch": 3.515273552975451, "grad_norm": 6.024623394012451, "learning_rate": 2.025743310188144e-05, "loss": 2.4345130920410156, "memory(GiB)": 72.85, "step": 82050, "token_acc": 0.48220064724919093, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.5154877683047, "grad_norm": 4.705246925354004, "learning_rate": 2.025202373925491e-05, "loss": 2.07906494140625, "memory(GiB)": 72.85, "step": 82055, "token_acc": 0.5787878787878787, "train_speed(iter/s)": 0.672645 }, { "epoch": 3.515701983633949, "grad_norm": 6.277451992034912, "learning_rate": 2.0246614915540855e-05, "loss": 1.8905580520629883, "memory(GiB)": 72.85, "step": 82060, "token_acc": 0.5897435897435898, "train_speed(iter/s)": 0.672653 }, { "epoch": 3.5159161989631977, "grad_norm": 5.356415271759033, "learning_rate": 2.0241206630837306e-05, "loss": 2.1931072235107423, "memory(GiB)": 72.85, "step": 82065, "token_acc": 0.5310734463276836, "train_speed(iter/s)": 0.672654 }, { "epoch": 3.516130414292447, "grad_norm": 5.5830183029174805, "learning_rate": 2.0235798885242212e-05, "loss": 2.047726058959961, "memory(GiB)": 72.85, "step": 82070, "token_acc": 0.5363636363636364, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.516344629621696, "grad_norm": 5.297658443450928, "learning_rate": 2.023039167885354e-05, "loss": 2.2347843170166017, "memory(GiB)": 72.85, "step": 82075, "token_acc": 0.4784172661870504, "train_speed(iter/s)": 0.672656 }, { "epoch": 3.5165588449509446, "grad_norm": 4.425514221191406, "learning_rate": 2.022498501176923e-05, "loss": 1.978497314453125, "memory(GiB)": 72.85, "step": 82080, "token_acc": 0.5423728813559322, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.516773060280194, "grad_norm": 5.967526435852051, "learning_rate": 2.0219578884087276e-05, "loss": 1.840753936767578, "memory(GiB)": 72.85, "step": 82085, "token_acc": 0.5625, "train_speed(iter/s)": 0.672658 }, { "epoch": 3.5169872756094427, "grad_norm": 6.2314839363098145, "learning_rate": 2.021417329590556e-05, "loss": 2.0780271530151366, "memory(GiB)": 72.85, "step": 82090, "token_acc": 0.5253623188405797, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.5172014909386915, "grad_norm": 4.218230724334717, "learning_rate": 2.0208768247322058e-05, "loss": 1.9952201843261719, "memory(GiB)": 72.85, "step": 82095, "token_acc": 0.5391849529780565, "train_speed(iter/s)": 0.672654 }, { "epoch": 3.5174157062679408, "grad_norm": 6.803869724273682, "learning_rate": 2.020336373843466e-05, "loss": 1.856285285949707, "memory(GiB)": 72.85, "step": 82100, "token_acc": 0.5544554455445545, "train_speed(iter/s)": 0.672648 }, { "epoch": 3.5176299215971896, "grad_norm": 4.587182998657227, "learning_rate": 2.0197959769341286e-05, "loss": 1.9114030838012694, "memory(GiB)": 72.85, "step": 82105, "token_acc": 0.5625, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.5178441369264384, "grad_norm": 6.890520095825195, "learning_rate": 2.0192556340139822e-05, "loss": 2.4667152404785155, "memory(GiB)": 72.85, "step": 82110, "token_acc": 0.4827586206896552, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.5180583522556876, "grad_norm": 5.437001705169678, "learning_rate": 2.0187153450928164e-05, "loss": 2.2343368530273438, "memory(GiB)": 72.85, "step": 82115, "token_acc": 0.5113636363636364, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.5182725675849364, "grad_norm": 4.995700836181641, "learning_rate": 2.0181751101804168e-05, "loss": 1.9036191940307616, "memory(GiB)": 72.85, "step": 82120, "token_acc": 0.607773851590106, "train_speed(iter/s)": 0.672632 }, { "epoch": 3.5184867829141853, "grad_norm": 6.842763900756836, "learning_rate": 2.0176349292865744e-05, "loss": 2.2757965087890626, "memory(GiB)": 72.85, "step": 82125, "token_acc": 0.5095541401273885, "train_speed(iter/s)": 0.672643 }, { "epoch": 3.5187009982434345, "grad_norm": 4.141037940979004, "learning_rate": 2.0170948024210727e-05, "loss": 2.218646430969238, "memory(GiB)": 72.85, "step": 82130, "token_acc": 0.5347985347985348, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.5189152135726833, "grad_norm": 5.319639682769775, "learning_rate": 2.016554729593697e-05, "loss": 2.1914899826049803, "memory(GiB)": 72.85, "step": 82135, "token_acc": 0.5149501661129569, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.519129428901932, "grad_norm": 4.821976661682129, "learning_rate": 2.0160147108142308e-05, "loss": 2.125233268737793, "memory(GiB)": 72.85, "step": 82140, "token_acc": 0.5109717868338558, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.5193436442311814, "grad_norm": 6.320165634155273, "learning_rate": 2.0154747460924563e-05, "loss": 2.244694709777832, "memory(GiB)": 72.85, "step": 82145, "token_acc": 0.5098039215686274, "train_speed(iter/s)": 0.672635 }, { "epoch": 3.51955785956043, "grad_norm": 7.02391242980957, "learning_rate": 2.0149348354381575e-05, "loss": 2.077692413330078, "memory(GiB)": 72.85, "step": 82150, "token_acc": 0.5594405594405595, "train_speed(iter/s)": 0.67263 }, { "epoch": 3.519772074889679, "grad_norm": 6.9497761726379395, "learning_rate": 2.0143949788611126e-05, "loss": 1.977520751953125, "memory(GiB)": 72.85, "step": 82155, "token_acc": 0.5666666666666667, "train_speed(iter/s)": 0.672637 }, { "epoch": 3.5199862902189283, "grad_norm": 4.789803981781006, "learning_rate": 2.0138551763711055e-05, "loss": 1.956216049194336, "memory(GiB)": 72.85, "step": 82160, "token_acc": 0.5521885521885522, "train_speed(iter/s)": 0.672634 }, { "epoch": 3.520200505548177, "grad_norm": 5.622014999389648, "learning_rate": 2.0133154279779126e-05, "loss": 2.3346088409423826, "memory(GiB)": 72.85, "step": 82165, "token_acc": 0.4930747922437673, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.520414720877426, "grad_norm": 6.0902862548828125, "learning_rate": 2.0127757336913128e-05, "loss": 1.9892271041870118, "memory(GiB)": 72.85, "step": 82170, "token_acc": 0.5492957746478874, "train_speed(iter/s)": 0.672628 }, { "epoch": 3.520628936206675, "grad_norm": 5.241879463195801, "learning_rate": 2.012236093521081e-05, "loss": 1.8451160430908202, "memory(GiB)": 72.85, "step": 82175, "token_acc": 0.5748987854251012, "train_speed(iter/s)": 0.672624 }, { "epoch": 3.520843151535924, "grad_norm": 5.212069034576416, "learning_rate": 2.011696507476997e-05, "loss": 1.961738395690918, "memory(GiB)": 72.85, "step": 82180, "token_acc": 0.5862068965517241, "train_speed(iter/s)": 0.672621 }, { "epoch": 3.521057366865173, "grad_norm": 4.806005954742432, "learning_rate": 2.0111569755688343e-05, "loss": 1.9657695770263672, "memory(GiB)": 72.85, "step": 82185, "token_acc": 0.5255255255255256, "train_speed(iter/s)": 0.672625 }, { "epoch": 3.521271582194422, "grad_norm": 7.0687150955200195, "learning_rate": 2.010617497806366e-05, "loss": 2.179010009765625, "memory(GiB)": 72.85, "step": 82190, "token_acc": 0.5379061371841155, "train_speed(iter/s)": 0.672628 }, { "epoch": 3.521485797523671, "grad_norm": 5.070688247680664, "learning_rate": 2.010078074199367e-05, "loss": 1.939264678955078, "memory(GiB)": 72.85, "step": 82195, "token_acc": 0.5182481751824818, "train_speed(iter/s)": 0.672629 }, { "epoch": 3.5217000128529197, "grad_norm": 6.579041957855225, "learning_rate": 2.0095387047576074e-05, "loss": 2.1439563751220705, "memory(GiB)": 72.85, "step": 82200, "token_acc": 0.5315985130111525, "train_speed(iter/s)": 0.672639 }, { "epoch": 3.521914228182169, "grad_norm": 7.2386908531188965, "learning_rate": 2.008999389490858e-05, "loss": 1.8654695510864259, "memory(GiB)": 72.85, "step": 82205, "token_acc": 0.5572519083969466, "train_speed(iter/s)": 0.672625 }, { "epoch": 3.5221284435114177, "grad_norm": 5.75070858001709, "learning_rate": 2.0084601284088928e-05, "loss": 2.1558372497558596, "memory(GiB)": 72.85, "step": 82210, "token_acc": 0.4863013698630137, "train_speed(iter/s)": 0.672629 }, { "epoch": 3.5223426588406666, "grad_norm": 6.066382884979248, "learning_rate": 2.0079209215214782e-05, "loss": 1.9786907196044923, "memory(GiB)": 72.85, "step": 82215, "token_acc": 0.5427631578947368, "train_speed(iter/s)": 0.672632 }, { "epoch": 3.522556874169916, "grad_norm": 5.410205364227295, "learning_rate": 2.007381768838383e-05, "loss": 2.5693891525268553, "memory(GiB)": 72.85, "step": 82220, "token_acc": 0.4854368932038835, "train_speed(iter/s)": 0.672628 }, { "epoch": 3.5227710894991646, "grad_norm": 6.595974922180176, "learning_rate": 2.0068426703693743e-05, "loss": 2.235523986816406, "memory(GiB)": 72.85, "step": 82225, "token_acc": 0.5, "train_speed(iter/s)": 0.672629 }, { "epoch": 3.5229853048284134, "grad_norm": 6.391623497009277, "learning_rate": 2.0063036261242162e-05, "loss": 2.1568798065185546, "memory(GiB)": 72.85, "step": 82230, "token_acc": 0.5134328358208955, "train_speed(iter/s)": 0.672633 }, { "epoch": 3.5231995201576627, "grad_norm": 4.76979398727417, "learning_rate": 2.005764636112677e-05, "loss": 2.082590103149414, "memory(GiB)": 72.85, "step": 82235, "token_acc": 0.5197132616487455, "train_speed(iter/s)": 0.672628 }, { "epoch": 3.5234137354869115, "grad_norm": 6.404263019561768, "learning_rate": 2.005225700344522e-05, "loss": 2.2497934341430663, "memory(GiB)": 72.85, "step": 82240, "token_acc": 0.5370919881305638, "train_speed(iter/s)": 0.672605 }, { "epoch": 3.5236279508161603, "grad_norm": 4.981007099151611, "learning_rate": 2.004686818829512e-05, "loss": 1.9961277008056642, "memory(GiB)": 72.85, "step": 82245, "token_acc": 0.5888157894736842, "train_speed(iter/s)": 0.6726 }, { "epoch": 3.5238421661454096, "grad_norm": 5.845139026641846, "learning_rate": 2.004147991577411e-05, "loss": 2.229410743713379, "memory(GiB)": 72.85, "step": 82250, "token_acc": 0.5437262357414449, "train_speed(iter/s)": 0.672608 }, { "epoch": 3.5240563814746584, "grad_norm": 5.08478307723999, "learning_rate": 2.003609218597979e-05, "loss": 2.2893301010131837, "memory(GiB)": 72.85, "step": 82255, "token_acc": 0.515527950310559, "train_speed(iter/s)": 0.672616 }, { "epoch": 3.524270596803907, "grad_norm": 5.958395004272461, "learning_rate": 2.003070499900977e-05, "loss": 2.251167106628418, "memory(GiB)": 72.85, "step": 82260, "token_acc": 0.5259259259259259, "train_speed(iter/s)": 0.672615 }, { "epoch": 3.5244848121331565, "grad_norm": 5.154244899749756, "learning_rate": 2.002531835496162e-05, "loss": 1.8547468185424805, "memory(GiB)": 72.85, "step": 82265, "token_acc": 0.6099071207430341, "train_speed(iter/s)": 0.672622 }, { "epoch": 3.5246990274624053, "grad_norm": 4.5095343589782715, "learning_rate": 2.0019932253932965e-05, "loss": 1.9132608413696288, "memory(GiB)": 72.85, "step": 82270, "token_acc": 0.5766423357664233, "train_speed(iter/s)": 0.67263 }, { "epoch": 3.524913242791654, "grad_norm": 5.498847484588623, "learning_rate": 2.0014546696021368e-05, "loss": 2.430647850036621, "memory(GiB)": 72.85, "step": 82275, "token_acc": 0.5211267605633803, "train_speed(iter/s)": 0.672635 }, { "epoch": 3.5251274581209033, "grad_norm": 8.725198745727539, "learning_rate": 2.000916168132438e-05, "loss": 2.171820068359375, "memory(GiB)": 72.85, "step": 82280, "token_acc": 0.5202492211838006, "train_speed(iter/s)": 0.67264 }, { "epoch": 3.525341673450152, "grad_norm": 5.628159046173096, "learning_rate": 2.0003777209939566e-05, "loss": 2.1477329254150392, "memory(GiB)": 72.85, "step": 82285, "token_acc": 0.5264900662251656, "train_speed(iter/s)": 0.67264 }, { "epoch": 3.525555888779401, "grad_norm": 6.027541160583496, "learning_rate": 1.9998393281964446e-05, "loss": 2.377035713195801, "memory(GiB)": 72.85, "step": 82290, "token_acc": 0.5061728395061729, "train_speed(iter/s)": 0.672652 }, { "epoch": 3.52577010410865, "grad_norm": 5.772958278656006, "learning_rate": 1.9993009897496596e-05, "loss": 2.119527053833008, "memory(GiB)": 72.85, "step": 82295, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.525984319437899, "grad_norm": 7.225375175476074, "learning_rate": 1.9987627056633513e-05, "loss": 2.131337356567383, "memory(GiB)": 72.85, "step": 82300, "token_acc": 0.528052805280528, "train_speed(iter/s)": 0.672652 }, { "epoch": 3.526198534767148, "grad_norm": 4.935617446899414, "learning_rate": 1.9982244759472706e-05, "loss": 2.3639888763427734, "memory(GiB)": 72.85, "step": 82305, "token_acc": 0.5253623188405797, "train_speed(iter/s)": 0.672645 }, { "epoch": 3.526412750096397, "grad_norm": 4.474663257598877, "learning_rate": 1.997686300611172e-05, "loss": 2.18420295715332, "memory(GiB)": 72.85, "step": 82310, "token_acc": 0.5842293906810035, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.526626965425646, "grad_norm": 5.766672134399414, "learning_rate": 1.997148179664801e-05, "loss": 2.210311698913574, "memory(GiB)": 72.85, "step": 82315, "token_acc": 0.5392857142857143, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.5268411807548947, "grad_norm": 6.783390522003174, "learning_rate": 1.9966101131179072e-05, "loss": 2.4232025146484375, "memory(GiB)": 72.85, "step": 82320, "token_acc": 0.516, "train_speed(iter/s)": 0.672632 }, { "epoch": 3.527055396084144, "grad_norm": 4.496657848358154, "learning_rate": 1.9960721009802398e-05, "loss": 2.0114669799804688, "memory(GiB)": 72.85, "step": 82325, "token_acc": 0.5393258426966292, "train_speed(iter/s)": 0.672628 }, { "epoch": 3.527269611413393, "grad_norm": 6.280988693237305, "learning_rate": 1.9955341432615445e-05, "loss": 2.2448266983032226, "memory(GiB)": 72.85, "step": 82330, "token_acc": 0.5282392026578073, "train_speed(iter/s)": 0.672632 }, { "epoch": 3.5274838267426416, "grad_norm": 5.809720039367676, "learning_rate": 1.994996239971566e-05, "loss": 2.1071327209472654, "memory(GiB)": 72.85, "step": 82335, "token_acc": 0.531986531986532, "train_speed(iter/s)": 0.672645 }, { "epoch": 3.527698042071891, "grad_norm": 5.31339693069458, "learning_rate": 1.99445839112005e-05, "loss": 1.9433616638183593, "memory(GiB)": 72.85, "step": 82340, "token_acc": 0.5423076923076923, "train_speed(iter/s)": 0.672654 }, { "epoch": 3.5279122574011397, "grad_norm": 6.620911121368408, "learning_rate": 1.993920596716739e-05, "loss": 2.2349292755126955, "memory(GiB)": 72.85, "step": 82345, "token_acc": 0.5229357798165137, "train_speed(iter/s)": 0.672655 }, { "epoch": 3.5281264727303885, "grad_norm": 5.721080303192139, "learning_rate": 1.9933828567713752e-05, "loss": 2.367903137207031, "memory(GiB)": 72.85, "step": 82350, "token_acc": 0.49586776859504134, "train_speed(iter/s)": 0.672652 }, { "epoch": 3.5283406880596377, "grad_norm": 6.545754432678223, "learning_rate": 1.992845171293703e-05, "loss": 2.0820411682128905, "memory(GiB)": 72.85, "step": 82355, "token_acc": 0.5403508771929825, "train_speed(iter/s)": 0.672637 }, { "epoch": 3.5285549033888866, "grad_norm": 5.340708255767822, "learning_rate": 1.9923075402934617e-05, "loss": 1.9703271865844727, "memory(GiB)": 72.85, "step": 82360, "token_acc": 0.5571955719557196, "train_speed(iter/s)": 0.672635 }, { "epoch": 3.5287691187181354, "grad_norm": 5.340760707855225, "learning_rate": 1.9917699637803905e-05, "loss": 2.0709535598754885, "memory(GiB)": 72.85, "step": 82365, "token_acc": 0.568561872909699, "train_speed(iter/s)": 0.672643 }, { "epoch": 3.5289833340473846, "grad_norm": 5.220935344696045, "learning_rate": 1.9912324417642285e-05, "loss": 2.1553718566894533, "memory(GiB)": 72.85, "step": 82370, "token_acc": 0.5428571428571428, "train_speed(iter/s)": 0.67265 }, { "epoch": 3.5291975493766334, "grad_norm": 6.876023769378662, "learning_rate": 1.9906949742547115e-05, "loss": 2.370851516723633, "memory(GiB)": 72.85, "step": 82375, "token_acc": 0.5, "train_speed(iter/s)": 0.672647 }, { "epoch": 3.5294117647058822, "grad_norm": 5.119292259216309, "learning_rate": 1.9901575612615774e-05, "loss": 1.9272083282470702, "memory(GiB)": 72.85, "step": 82380, "token_acc": 0.5673469387755102, "train_speed(iter/s)": 0.672648 }, { "epoch": 3.5296259800351315, "grad_norm": 7.05645751953125, "learning_rate": 1.9896202027945653e-05, "loss": 1.9036113739013671, "memory(GiB)": 72.85, "step": 82385, "token_acc": 0.5486381322957199, "train_speed(iter/s)": 0.672648 }, { "epoch": 3.5298401953643803, "grad_norm": 6.213525772094727, "learning_rate": 1.9890828988634063e-05, "loss": 2.011688232421875, "memory(GiB)": 72.85, "step": 82390, "token_acc": 0.5223880597014925, "train_speed(iter/s)": 0.672643 }, { "epoch": 3.530054410693629, "grad_norm": 6.187417507171631, "learning_rate": 1.988545649477836e-05, "loss": 2.1830095291137694, "memory(GiB)": 72.85, "step": 82395, "token_acc": 0.5065359477124183, "train_speed(iter/s)": 0.672646 }, { "epoch": 3.5302686260228784, "grad_norm": 4.68194055557251, "learning_rate": 1.988008454647585e-05, "loss": 2.019051361083984, "memory(GiB)": 72.85, "step": 82400, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.530482841352127, "grad_norm": 5.43694543838501, "learning_rate": 1.987471314382386e-05, "loss": 2.070332717895508, "memory(GiB)": 72.85, "step": 82405, "token_acc": 0.4850498338870432, "train_speed(iter/s)": 0.672658 }, { "epoch": 3.530697056681376, "grad_norm": 5.486753940582275, "learning_rate": 1.9869342286919707e-05, "loss": 2.2881696701049803, "memory(GiB)": 72.85, "step": 82410, "token_acc": 0.5125786163522013, "train_speed(iter/s)": 0.672664 }, { "epoch": 3.5309112720106253, "grad_norm": 5.1692376136779785, "learning_rate": 1.986397197586069e-05, "loss": 2.4384897232055662, "memory(GiB)": 72.85, "step": 82415, "token_acc": 0.5, "train_speed(iter/s)": 0.672665 }, { "epoch": 3.531125487339874, "grad_norm": 5.7393574714660645, "learning_rate": 1.9858602210744086e-05, "loss": 2.0915494918823243, "memory(GiB)": 72.85, "step": 82420, "token_acc": 0.5632183908045977, "train_speed(iter/s)": 0.672667 }, { "epoch": 3.531339702669123, "grad_norm": 5.189434051513672, "learning_rate": 1.985323299166718e-05, "loss": 2.3014278411865234, "memory(GiB)": 72.85, "step": 82425, "token_acc": 0.5033333333333333, "train_speed(iter/s)": 0.67267 }, { "epoch": 3.531553917998372, "grad_norm": 6.240303039550781, "learning_rate": 1.9847864318727238e-05, "loss": 2.0920101165771485, "memory(GiB)": 72.85, "step": 82430, "token_acc": 0.5414012738853503, "train_speed(iter/s)": 0.672666 }, { "epoch": 3.531768133327621, "grad_norm": 7.165520668029785, "learning_rate": 1.9842496192021503e-05, "loss": 1.9254405975341797, "memory(GiB)": 72.85, "step": 82435, "token_acc": 0.5709090909090909, "train_speed(iter/s)": 0.672675 }, { "epoch": 3.5319823486568698, "grad_norm": 6.133455753326416, "learning_rate": 1.9837128611647255e-05, "loss": 2.0174383163452148, "memory(GiB)": 72.85, "step": 82440, "token_acc": 0.5656934306569343, "train_speed(iter/s)": 0.67268 }, { "epoch": 3.532196563986119, "grad_norm": 5.436672687530518, "learning_rate": 1.9831761577701717e-05, "loss": 2.380985450744629, "memory(GiB)": 72.85, "step": 82445, "token_acc": 0.477124183006536, "train_speed(iter/s)": 0.672676 }, { "epoch": 3.532410779315368, "grad_norm": 5.550756454467773, "learning_rate": 1.98263950902821e-05, "loss": 2.0178253173828127, "memory(GiB)": 72.85, "step": 82450, "token_acc": 0.5633333333333334, "train_speed(iter/s)": 0.672667 }, { "epoch": 3.5326249946446167, "grad_norm": 5.671982765197754, "learning_rate": 1.9821029149485664e-05, "loss": 1.883245849609375, "memory(GiB)": 72.85, "step": 82455, "token_acc": 0.6101083032490975, "train_speed(iter/s)": 0.672661 }, { "epoch": 3.532839209973866, "grad_norm": 5.750094890594482, "learning_rate": 1.9815663755409593e-05, "loss": 2.2504045486450197, "memory(GiB)": 72.85, "step": 82460, "token_acc": 0.48104956268221577, "train_speed(iter/s)": 0.672663 }, { "epoch": 3.5330534253031147, "grad_norm": 7.723504066467285, "learning_rate": 1.9810298908151064e-05, "loss": 2.0758079528808593, "memory(GiB)": 72.85, "step": 82465, "token_acc": 0.5568627450980392, "train_speed(iter/s)": 0.672669 }, { "epoch": 3.5332676406323635, "grad_norm": 5.1762895584106445, "learning_rate": 1.9804934607807313e-05, "loss": 2.0708244323730467, "memory(GiB)": 72.85, "step": 82470, "token_acc": 0.5241635687732342, "train_speed(iter/s)": 0.672663 }, { "epoch": 3.533481855961613, "grad_norm": 5.983510971069336, "learning_rate": 1.9799570854475498e-05, "loss": 2.2202253341674805, "memory(GiB)": 72.85, "step": 82475, "token_acc": 0.5339233038348082, "train_speed(iter/s)": 0.672666 }, { "epoch": 3.5336960712908616, "grad_norm": 5.500265121459961, "learning_rate": 1.9794207648252787e-05, "loss": 2.2413875579833986, "memory(GiB)": 72.85, "step": 82480, "token_acc": 0.5551181102362205, "train_speed(iter/s)": 0.672661 }, { "epoch": 3.5339102866201104, "grad_norm": 6.072788715362549, "learning_rate": 1.9788844989236338e-05, "loss": 2.077691078186035, "memory(GiB)": 72.85, "step": 82485, "token_acc": 0.5303030303030303, "train_speed(iter/s)": 0.672661 }, { "epoch": 3.5341245019493597, "grad_norm": 7.098670482635498, "learning_rate": 1.9783482877523303e-05, "loss": 2.140513038635254, "memory(GiB)": 72.85, "step": 82490, "token_acc": 0.4876543209876543, "train_speed(iter/s)": 0.672667 }, { "epoch": 3.5343387172786085, "grad_norm": 4.356334209442139, "learning_rate": 1.9778121313210805e-05, "loss": 2.1060115814208986, "memory(GiB)": 72.85, "step": 82495, "token_acc": 0.5258855585831063, "train_speed(iter/s)": 0.67267 }, { "epoch": 3.5345529326078573, "grad_norm": 4.454436302185059, "learning_rate": 1.9772760296396004e-05, "loss": 1.9622329711914062, "memory(GiB)": 72.85, "step": 82500, "token_acc": 0.5094339622641509, "train_speed(iter/s)": 0.672658 }, { "epoch": 3.5345529326078573, "eval_loss": 1.9497802257537842, "eval_runtime": 15.3118, "eval_samples_per_second": 6.531, "eval_steps_per_second": 6.531, "eval_token_acc": 0.5218002812939522, "step": 82500 }, { "epoch": 3.5347671479371066, "grad_norm": 4.684083938598633, "learning_rate": 1.976739982717601e-05, "loss": 1.931590461730957, "memory(GiB)": 72.85, "step": 82505, "token_acc": 0.5398587285570131, "train_speed(iter/s)": 0.672559 }, { "epoch": 3.5349813632663554, "grad_norm": 5.530428409576416, "learning_rate": 1.9762039905647927e-05, "loss": 2.195175552368164, "memory(GiB)": 72.85, "step": 82510, "token_acc": 0.5331230283911672, "train_speed(iter/s)": 0.672565 }, { "epoch": 3.535195578595604, "grad_norm": 6.729899883270264, "learning_rate": 1.9756680531908856e-05, "loss": 2.1766664505004885, "memory(GiB)": 72.85, "step": 82515, "token_acc": 0.5516129032258065, "train_speed(iter/s)": 0.672557 }, { "epoch": 3.5354097939248534, "grad_norm": 5.22761344909668, "learning_rate": 1.975132170605587e-05, "loss": 2.2094640731811523, "memory(GiB)": 72.85, "step": 82520, "token_acc": 0.5415162454873647, "train_speed(iter/s)": 0.672564 }, { "epoch": 3.5356240092541023, "grad_norm": 5.60462760925293, "learning_rate": 1.9745963428186064e-05, "loss": 1.9610361099243163, "memory(GiB)": 72.85, "step": 82525, "token_acc": 0.5615942028985508, "train_speed(iter/s)": 0.672575 }, { "epoch": 3.535838224583351, "grad_norm": 5.569339275360107, "learning_rate": 1.9740605698396536e-05, "loss": 2.394458770751953, "memory(GiB)": 72.85, "step": 82530, "token_acc": 0.48863636363636365, "train_speed(iter/s)": 0.672578 }, { "epoch": 3.5360524399126003, "grad_norm": 4.784703254699707, "learning_rate": 1.9735248516784317e-05, "loss": 1.8418548583984375, "memory(GiB)": 72.85, "step": 82535, "token_acc": 0.568561872909699, "train_speed(iter/s)": 0.672577 }, { "epoch": 3.536266655241849, "grad_norm": 6.723227024078369, "learning_rate": 1.972989188344646e-05, "loss": 2.18701286315918, "memory(GiB)": 72.85, "step": 82540, "token_acc": 0.524904214559387, "train_speed(iter/s)": 0.672577 }, { "epoch": 3.536480870571098, "grad_norm": 5.24317741394043, "learning_rate": 1.972453579848001e-05, "loss": 2.071304512023926, "memory(GiB)": 72.85, "step": 82545, "token_acc": 0.55859375, "train_speed(iter/s)": 0.672574 }, { "epoch": 3.536695085900347, "grad_norm": 6.043819904327393, "learning_rate": 1.9719180261981972e-05, "loss": 2.3370737075805663, "memory(GiB)": 72.85, "step": 82550, "token_acc": 0.4984709480122324, "train_speed(iter/s)": 0.672569 }, { "epoch": 3.536909301229596, "grad_norm": 7.965291976928711, "learning_rate": 1.9713825274049412e-05, "loss": 1.965459442138672, "memory(GiB)": 72.85, "step": 82555, "token_acc": 0.5440613026819924, "train_speed(iter/s)": 0.672568 }, { "epoch": 3.537123516558845, "grad_norm": 4.666102409362793, "learning_rate": 1.970847083477932e-05, "loss": 2.343686103820801, "memory(GiB)": 72.85, "step": 82560, "token_acc": 0.48562300319488816, "train_speed(iter/s)": 0.672577 }, { "epoch": 3.537337731888094, "grad_norm": 5.210982322692871, "learning_rate": 1.970311694426869e-05, "loss": 2.195804214477539, "memory(GiB)": 72.85, "step": 82565, "token_acc": 0.5687732342007435, "train_speed(iter/s)": 0.672573 }, { "epoch": 3.537551947217343, "grad_norm": 5.762908935546875, "learning_rate": 1.9697763602614518e-05, "loss": 2.3891664505004884, "memory(GiB)": 72.85, "step": 82570, "token_acc": 0.4955223880597015, "train_speed(iter/s)": 0.672578 }, { "epoch": 3.5377661625465917, "grad_norm": 6.581815719604492, "learning_rate": 1.9692410809913787e-05, "loss": 2.1599021911621095, "memory(GiB)": 72.85, "step": 82575, "token_acc": 0.5541666666666667, "train_speed(iter/s)": 0.672588 }, { "epoch": 3.537980377875841, "grad_norm": 6.171903610229492, "learning_rate": 1.9687058566263445e-05, "loss": 2.0561477661132814, "memory(GiB)": 72.85, "step": 82580, "token_acc": 0.5371024734982333, "train_speed(iter/s)": 0.672595 }, { "epoch": 3.5381945932050898, "grad_norm": 5.3170576095581055, "learning_rate": 1.968170687176049e-05, "loss": 2.3283437728881835, "memory(GiB)": 72.85, "step": 82585, "token_acc": 0.5138888888888888, "train_speed(iter/s)": 0.6726 }, { "epoch": 3.5384088085343386, "grad_norm": 5.876070499420166, "learning_rate": 1.9676355726501853e-05, "loss": 2.2838552474975584, "memory(GiB)": 72.85, "step": 82590, "token_acc": 0.5072463768115942, "train_speed(iter/s)": 0.672602 }, { "epoch": 3.538623023863588, "grad_norm": 4.622082710266113, "learning_rate": 1.9671005130584487e-05, "loss": 2.309168243408203, "memory(GiB)": 72.85, "step": 82595, "token_acc": 0.48548812664907653, "train_speed(iter/s)": 0.672601 }, { "epoch": 3.5388372391928367, "grad_norm": 5.98997163772583, "learning_rate": 1.966565508410529e-05, "loss": 2.3234806060791016, "memory(GiB)": 72.85, "step": 82600, "token_acc": 0.49852507374631266, "train_speed(iter/s)": 0.672608 }, { "epoch": 3.5390514545220855, "grad_norm": 4.907209873199463, "learning_rate": 1.9660305587161226e-05, "loss": 1.8607223510742188, "memory(GiB)": 72.85, "step": 82605, "token_acc": 0.5723076923076923, "train_speed(iter/s)": 0.672609 }, { "epoch": 3.5392656698513347, "grad_norm": 4.373894691467285, "learning_rate": 1.9654956639849163e-05, "loss": 2.3259761810302733, "memory(GiB)": 72.85, "step": 82610, "token_acc": 0.5088235294117647, "train_speed(iter/s)": 0.672609 }, { "epoch": 3.5394798851805835, "grad_norm": 5.401246547698975, "learning_rate": 1.9649608242266054e-05, "loss": 1.7989028930664062, "memory(GiB)": 72.85, "step": 82615, "token_acc": 0.5738255033557047, "train_speed(iter/s)": 0.672616 }, { "epoch": 3.5396941005098324, "grad_norm": 6.134547233581543, "learning_rate": 1.9644260394508758e-05, "loss": 2.256892204284668, "memory(GiB)": 72.85, "step": 82620, "token_acc": 0.5181518151815182, "train_speed(iter/s)": 0.672624 }, { "epoch": 3.5399083158390816, "grad_norm": 6.543603420257568, "learning_rate": 1.9638913096674162e-05, "loss": 2.0872268676757812, "memory(GiB)": 72.85, "step": 82625, "token_acc": 0.5398773006134969, "train_speed(iter/s)": 0.672621 }, { "epoch": 3.5401225311683304, "grad_norm": 5.134305953979492, "learning_rate": 1.963356634885914e-05, "loss": 1.798759651184082, "memory(GiB)": 72.85, "step": 82630, "token_acc": 0.6016597510373444, "train_speed(iter/s)": 0.672625 }, { "epoch": 3.5403367464975792, "grad_norm": 5.27810001373291, "learning_rate": 1.9628220151160542e-05, "loss": 1.9266447067260741, "memory(GiB)": 72.85, "step": 82635, "token_acc": 0.55, "train_speed(iter/s)": 0.672632 }, { "epoch": 3.5405509618268285, "grad_norm": 5.74284029006958, "learning_rate": 1.9622874503675213e-05, "loss": 2.055607223510742, "memory(GiB)": 72.85, "step": 82640, "token_acc": 0.5514950166112956, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.5407651771560773, "grad_norm": 6.575479030609131, "learning_rate": 1.9617529406500028e-05, "loss": 1.870152473449707, "memory(GiB)": 72.85, "step": 82645, "token_acc": 0.5519713261648745, "train_speed(iter/s)": 0.672631 }, { "epoch": 3.540979392485326, "grad_norm": 6.127137660980225, "learning_rate": 1.96121848597318e-05, "loss": 2.0717660903930666, "memory(GiB)": 72.85, "step": 82650, "token_acc": 0.5548387096774193, "train_speed(iter/s)": 0.672633 }, { "epoch": 3.5411936078145754, "grad_norm": 5.7811102867126465, "learning_rate": 1.960684086346734e-05, "loss": 2.104741859436035, "memory(GiB)": 72.85, "step": 82655, "token_acc": 0.5050847457627119, "train_speed(iter/s)": 0.672635 }, { "epoch": 3.541407823143824, "grad_norm": 5.46229887008667, "learning_rate": 1.9601497417803477e-05, "loss": 2.358173942565918, "memory(GiB)": 72.85, "step": 82660, "token_acc": 0.48695652173913045, "train_speed(iter/s)": 0.672619 }, { "epoch": 3.541622038473073, "grad_norm": 5.090808868408203, "learning_rate": 1.9596154522836983e-05, "loss": 2.153591346740723, "memory(GiB)": 72.85, "step": 82665, "token_acc": 0.543046357615894, "train_speed(iter/s)": 0.672627 }, { "epoch": 3.5418362538023223, "grad_norm": 5.710697174072266, "learning_rate": 1.9590812178664692e-05, "loss": 1.9757928848266602, "memory(GiB)": 72.85, "step": 82670, "token_acc": 0.5643153526970954, "train_speed(iter/s)": 0.672622 }, { "epoch": 3.542050469131571, "grad_norm": 6.73332405090332, "learning_rate": 1.9585470385383347e-05, "loss": 1.815891647338867, "memory(GiB)": 72.85, "step": 82675, "token_acc": 0.5976095617529881, "train_speed(iter/s)": 0.67261 }, { "epoch": 3.54226468446082, "grad_norm": 6.280341625213623, "learning_rate": 1.9580129143089755e-05, "loss": 2.140645980834961, "memory(GiB)": 72.85, "step": 82680, "token_acc": 0.5283018867924528, "train_speed(iter/s)": 0.672611 }, { "epoch": 3.542478899790069, "grad_norm": 5.695857524871826, "learning_rate": 1.9574788451880654e-05, "loss": 1.9104549407958984, "memory(GiB)": 72.85, "step": 82685, "token_acc": 0.5919117647058824, "train_speed(iter/s)": 0.672601 }, { "epoch": 3.542693115119318, "grad_norm": 6.279382705688477, "learning_rate": 1.9569448311852805e-05, "loss": 2.2423328399658202, "memory(GiB)": 72.85, "step": 82690, "token_acc": 0.5337620578778135, "train_speed(iter/s)": 0.6726 }, { "epoch": 3.5429073304485668, "grad_norm": 5.470129013061523, "learning_rate": 1.956410872310293e-05, "loss": 1.8852594375610352, "memory(GiB)": 72.85, "step": 82695, "token_acc": 0.6083650190114068, "train_speed(iter/s)": 0.672603 }, { "epoch": 3.543121545777816, "grad_norm": 8.455183982849121, "learning_rate": 1.955876968572779e-05, "loss": 2.5409406661987304, "memory(GiB)": 72.85, "step": 82700, "token_acc": 0.4965986394557823, "train_speed(iter/s)": 0.672601 }, { "epoch": 3.543335761107065, "grad_norm": 7.204983234405518, "learning_rate": 1.95534311998241e-05, "loss": 1.9848445892333983, "memory(GiB)": 72.85, "step": 82705, "token_acc": 0.524822695035461, "train_speed(iter/s)": 0.672601 }, { "epoch": 3.5435499764363136, "grad_norm": 6.5687127113342285, "learning_rate": 1.954809326548856e-05, "loss": 2.2295085906982424, "memory(GiB)": 72.85, "step": 82710, "token_acc": 0.4984984984984985, "train_speed(iter/s)": 0.672608 }, { "epoch": 3.543764191765563, "grad_norm": 5.936383247375488, "learning_rate": 1.9542755882817876e-05, "loss": 2.300801467895508, "memory(GiB)": 72.85, "step": 82715, "token_acc": 0.4984025559105431, "train_speed(iter/s)": 0.672617 }, { "epoch": 3.5439784070948117, "grad_norm": 7.081585884094238, "learning_rate": 1.9537419051908746e-05, "loss": 2.083893394470215, "memory(GiB)": 72.85, "step": 82720, "token_acc": 0.546875, "train_speed(iter/s)": 0.672625 }, { "epoch": 3.5441926224240605, "grad_norm": 6.695867538452148, "learning_rate": 1.9532082772857824e-05, "loss": 2.0895263671875, "memory(GiB)": 72.85, "step": 82725, "token_acc": 0.5289256198347108, "train_speed(iter/s)": 0.672611 }, { "epoch": 3.54440683775331, "grad_norm": 5.933010101318359, "learning_rate": 1.952674704576182e-05, "loss": 1.9583879470825196, "memory(GiB)": 72.85, "step": 82730, "token_acc": 0.5416666666666666, "train_speed(iter/s)": 0.672617 }, { "epoch": 3.5446210530825586, "grad_norm": 5.552717685699463, "learning_rate": 1.9521411870717384e-05, "loss": 1.9777261734008789, "memory(GiB)": 72.85, "step": 82735, "token_acc": 0.5488721804511278, "train_speed(iter/s)": 0.672618 }, { "epoch": 3.5448352684118074, "grad_norm": 5.251500129699707, "learning_rate": 1.9516077247821157e-05, "loss": 2.0203685760498047, "memory(GiB)": 72.85, "step": 82740, "token_acc": 0.5410447761194029, "train_speed(iter/s)": 0.672617 }, { "epoch": 3.5450494837410567, "grad_norm": 6.929096221923828, "learning_rate": 1.9510743177169776e-05, "loss": 2.1168394088745117, "memory(GiB)": 72.85, "step": 82745, "token_acc": 0.524, "train_speed(iter/s)": 0.672613 }, { "epoch": 3.5452636990703055, "grad_norm": 6.07268762588501, "learning_rate": 1.9505409658859898e-05, "loss": 1.9310096740722655, "memory(GiB)": 72.85, "step": 82750, "token_acc": 0.5909090909090909, "train_speed(iter/s)": 0.672616 }, { "epoch": 3.5454779143995543, "grad_norm": 7.423854827880859, "learning_rate": 1.950007669298811e-05, "loss": 2.031309127807617, "memory(GiB)": 72.85, "step": 82755, "token_acc": 0.5465116279069767, "train_speed(iter/s)": 0.672607 }, { "epoch": 3.5456921297288035, "grad_norm": 6.9565324783325195, "learning_rate": 1.9494744279651067e-05, "loss": 2.215414810180664, "memory(GiB)": 72.85, "step": 82760, "token_acc": 0.5243055555555556, "train_speed(iter/s)": 0.672599 }, { "epoch": 3.5459063450580524, "grad_norm": 7.086856365203857, "learning_rate": 1.9489412418945345e-05, "loss": 2.1680265426635743, "memory(GiB)": 72.85, "step": 82765, "token_acc": 0.46153846153846156, "train_speed(iter/s)": 0.672599 }, { "epoch": 3.546120560387301, "grad_norm": 4.969593524932861, "learning_rate": 1.948408111096754e-05, "loss": 2.311443328857422, "memory(GiB)": 72.85, "step": 82770, "token_acc": 0.49387755102040815, "train_speed(iter/s)": 0.672591 }, { "epoch": 3.5463347757165504, "grad_norm": 6.454896926879883, "learning_rate": 1.947875035581423e-05, "loss": 2.16159725189209, "memory(GiB)": 72.85, "step": 82775, "token_acc": 0.5368421052631579, "train_speed(iter/s)": 0.672575 }, { "epoch": 3.5465489910457992, "grad_norm": 5.122424125671387, "learning_rate": 1.9473420153581988e-05, "loss": 2.077722930908203, "memory(GiB)": 72.85, "step": 82780, "token_acc": 0.5478260869565217, "train_speed(iter/s)": 0.672579 }, { "epoch": 3.546763206375048, "grad_norm": 5.034019947052002, "learning_rate": 1.9468090504367358e-05, "loss": 2.1231407165527343, "memory(GiB)": 72.85, "step": 82785, "token_acc": 0.536741214057508, "train_speed(iter/s)": 0.672575 }, { "epoch": 3.5469774217042973, "grad_norm": 5.946100234985352, "learning_rate": 1.946276140826693e-05, "loss": 2.2123397827148437, "memory(GiB)": 72.85, "step": 82790, "token_acc": 0.5, "train_speed(iter/s)": 0.672561 }, { "epoch": 3.547191637033546, "grad_norm": 5.790131568908691, "learning_rate": 1.9457432865377223e-05, "loss": 2.172713279724121, "memory(GiB)": 72.85, "step": 82795, "token_acc": 0.5131086142322098, "train_speed(iter/s)": 0.672563 }, { "epoch": 3.547405852362795, "grad_norm": 5.8472208976745605, "learning_rate": 1.9452104875794775e-05, "loss": 2.1270706176757814, "memory(GiB)": 72.85, "step": 82800, "token_acc": 0.51953125, "train_speed(iter/s)": 0.67257 }, { "epoch": 3.547620067692044, "grad_norm": 7.603801250457764, "learning_rate": 1.94467774396161e-05, "loss": 1.9085311889648438, "memory(GiB)": 72.85, "step": 82805, "token_acc": 0.5568181818181818, "train_speed(iter/s)": 0.672575 }, { "epoch": 3.547834283021293, "grad_norm": 5.503652572631836, "learning_rate": 1.9441450556937695e-05, "loss": 2.1754751205444336, "memory(GiB)": 72.85, "step": 82810, "token_acc": 0.5534351145038168, "train_speed(iter/s)": 0.672579 }, { "epoch": 3.548048498350542, "grad_norm": 6.255324840545654, "learning_rate": 1.9436124227856095e-05, "loss": 2.021531105041504, "memory(GiB)": 72.85, "step": 82815, "token_acc": 0.525, "train_speed(iter/s)": 0.672584 }, { "epoch": 3.548262713679791, "grad_norm": 4.941567897796631, "learning_rate": 1.9430798452467763e-05, "loss": 2.019920539855957, "memory(GiB)": 72.85, "step": 82820, "token_acc": 0.5652173913043478, "train_speed(iter/s)": 0.672589 }, { "epoch": 3.54847692900904, "grad_norm": 5.0869269371032715, "learning_rate": 1.9425473230869206e-05, "loss": 2.069829750061035, "memory(GiB)": 72.85, "step": 82825, "token_acc": 0.528052805280528, "train_speed(iter/s)": 0.672592 }, { "epoch": 3.5486911443382887, "grad_norm": 7.612703323364258, "learning_rate": 1.9420148563156887e-05, "loss": 2.1196334838867186, "memory(GiB)": 72.85, "step": 82830, "token_acc": 0.5236220472440944, "train_speed(iter/s)": 0.672601 }, { "epoch": 3.548905359667538, "grad_norm": 7.092423915863037, "learning_rate": 1.941482444942726e-05, "loss": 1.980735969543457, "memory(GiB)": 72.85, "step": 82835, "token_acc": 0.5260115606936416, "train_speed(iter/s)": 0.672603 }, { "epoch": 3.5491195749967868, "grad_norm": 7.523045063018799, "learning_rate": 1.9409500889776765e-05, "loss": 1.8029207229614257, "memory(GiB)": 72.85, "step": 82840, "token_acc": 0.6, "train_speed(iter/s)": 0.672607 }, { "epoch": 3.5493337903260356, "grad_norm": 5.606503009796143, "learning_rate": 1.9404177884301872e-05, "loss": 2.2639999389648438, "memory(GiB)": 72.85, "step": 82845, "token_acc": 0.5113636363636364, "train_speed(iter/s)": 0.672615 }, { "epoch": 3.549548005655285, "grad_norm": 7.239699363708496, "learning_rate": 1.9398855433099e-05, "loss": 2.0504526138305663, "memory(GiB)": 72.85, "step": 82850, "token_acc": 0.5396825396825397, "train_speed(iter/s)": 0.672621 }, { "epoch": 3.5497622209845336, "grad_norm": 9.134148597717285, "learning_rate": 1.939353353626457e-05, "loss": 2.2578155517578127, "memory(GiB)": 72.85, "step": 82855, "token_acc": 0.4933920704845815, "train_speed(iter/s)": 0.672626 }, { "epoch": 3.5499764363137825, "grad_norm": 6.548464775085449, "learning_rate": 1.9388212193894988e-05, "loss": 1.8894046783447265, "memory(GiB)": 72.85, "step": 82860, "token_acc": 0.5827814569536424, "train_speed(iter/s)": 0.672632 }, { "epoch": 3.5501906516430317, "grad_norm": 5.245365142822266, "learning_rate": 1.9382891406086656e-05, "loss": 2.2052494049072267, "memory(GiB)": 72.85, "step": 82865, "token_acc": 0.5030120481927711, "train_speed(iter/s)": 0.672629 }, { "epoch": 3.5504048669722805, "grad_norm": 5.016049385070801, "learning_rate": 1.9377571172935955e-05, "loss": 2.198729133605957, "memory(GiB)": 72.85, "step": 82870, "token_acc": 0.47840531561461797, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.5506190823015293, "grad_norm": 5.897339820861816, "learning_rate": 1.9372251494539295e-05, "loss": 2.102342224121094, "memory(GiB)": 72.85, "step": 82875, "token_acc": 0.5070422535211268, "train_speed(iter/s)": 0.672621 }, { "epoch": 3.5508332976307786, "grad_norm": 5.503421306610107, "learning_rate": 1.9366932370993025e-05, "loss": 2.2249773025512694, "memory(GiB)": 72.85, "step": 82880, "token_acc": 0.46226415094339623, "train_speed(iter/s)": 0.672609 }, { "epoch": 3.5510475129600274, "grad_norm": 7.375865936279297, "learning_rate": 1.9361613802393518e-05, "loss": 2.1400123596191407, "memory(GiB)": 72.85, "step": 82885, "token_acc": 0.5048231511254019, "train_speed(iter/s)": 0.672605 }, { "epoch": 3.551261728289276, "grad_norm": 5.098079681396484, "learning_rate": 1.9356295788837114e-05, "loss": 2.2828969955444336, "memory(GiB)": 72.85, "step": 82890, "token_acc": 0.4945054945054945, "train_speed(iter/s)": 0.672595 }, { "epoch": 3.5514759436185255, "grad_norm": 5.192509651184082, "learning_rate": 1.9350978330420138e-05, "loss": 2.09592227935791, "memory(GiB)": 72.85, "step": 82895, "token_acc": 0.539568345323741, "train_speed(iter/s)": 0.672596 }, { "epoch": 3.5516901589477743, "grad_norm": 5.188864707946777, "learning_rate": 1.9345661427238946e-05, "loss": 1.9042745590209962, "memory(GiB)": 72.85, "step": 82900, "token_acc": 0.5938566552901023, "train_speed(iter/s)": 0.672601 }, { "epoch": 3.551904374277023, "grad_norm": 6.8861870765686035, "learning_rate": 1.9340345079389872e-05, "loss": 2.001243019104004, "memory(GiB)": 72.85, "step": 82905, "token_acc": 0.5437262357414449, "train_speed(iter/s)": 0.672608 }, { "epoch": 3.5521185896062724, "grad_norm": 5.348304271697998, "learning_rate": 1.9335029286969202e-05, "loss": 1.997643280029297, "memory(GiB)": 72.85, "step": 82910, "token_acc": 0.577922077922078, "train_speed(iter/s)": 0.672607 }, { "epoch": 3.552332804935521, "grad_norm": 6.812142848968506, "learning_rate": 1.9329714050073244e-05, "loss": 1.9655582427978515, "memory(GiB)": 72.85, "step": 82915, "token_acc": 0.5533980582524272, "train_speed(iter/s)": 0.672609 }, { "epoch": 3.55254702026477, "grad_norm": 6.731032848358154, "learning_rate": 1.932439936879829e-05, "loss": 2.1368431091308593, "memory(GiB)": 72.85, "step": 82920, "token_acc": 0.5152542372881356, "train_speed(iter/s)": 0.672615 }, { "epoch": 3.5527612355940192, "grad_norm": 4.842106819152832, "learning_rate": 1.9319085243240613e-05, "loss": 2.2231399536132814, "memory(GiB)": 72.85, "step": 82925, "token_acc": 0.478125, "train_speed(iter/s)": 0.672616 }, { "epoch": 3.552975450923268, "grad_norm": 5.566215991973877, "learning_rate": 1.9313771673496478e-05, "loss": 2.024089241027832, "memory(GiB)": 72.85, "step": 82930, "token_acc": 0.5763888888888888, "train_speed(iter/s)": 0.672617 }, { "epoch": 3.553189666252517, "grad_norm": 5.772278308868408, "learning_rate": 1.9308458659662166e-05, "loss": 2.2627197265625, "memory(GiB)": 72.85, "step": 82935, "token_acc": 0.5123456790123457, "train_speed(iter/s)": 0.672621 }, { "epoch": 3.553403881581766, "grad_norm": 6.234476089477539, "learning_rate": 1.9303146201833922e-05, "loss": 2.172689437866211, "memory(GiB)": 72.85, "step": 82940, "token_acc": 0.5355805243445693, "train_speed(iter/s)": 0.672628 }, { "epoch": 3.553618096911015, "grad_norm": 6.078773498535156, "learning_rate": 1.9297834300107974e-05, "loss": 1.6303213119506836, "memory(GiB)": 72.85, "step": 82945, "token_acc": 0.592156862745098, "train_speed(iter/s)": 0.672631 }, { "epoch": 3.5538323122402637, "grad_norm": 4.859559535980225, "learning_rate": 1.929252295458056e-05, "loss": 1.8559417724609375, "memory(GiB)": 72.85, "step": 82950, "token_acc": 0.5484848484848485, "train_speed(iter/s)": 0.672626 }, { "epoch": 3.554046527569513, "grad_norm": 10.079492568969727, "learning_rate": 1.9287212165347885e-05, "loss": 2.2021404266357423, "memory(GiB)": 72.85, "step": 82955, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.554260742898762, "grad_norm": 5.514726161956787, "learning_rate": 1.9281901932506187e-05, "loss": 1.9925338745117187, "memory(GiB)": 72.85, "step": 82960, "token_acc": 0.5563636363636364, "train_speed(iter/s)": 0.672642 }, { "epoch": 3.5544749582280106, "grad_norm": 5.984887599945068, "learning_rate": 1.9276592256151653e-05, "loss": 2.114542770385742, "memory(GiB)": 72.85, "step": 82965, "token_acc": 0.5451127819548872, "train_speed(iter/s)": 0.67265 }, { "epoch": 3.55468917355726, "grad_norm": 6.072865962982178, "learning_rate": 1.9271283136380453e-05, "loss": 2.1652687072753904, "memory(GiB)": 72.85, "step": 82970, "token_acc": 0.5429447852760736, "train_speed(iter/s)": 0.672658 }, { "epoch": 3.5549033888865087, "grad_norm": 6.3625102043151855, "learning_rate": 1.9265974573288802e-05, "loss": 2.0547412872314452, "memory(GiB)": 72.85, "step": 82975, "token_acc": 0.5625, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.5551176042157575, "grad_norm": 5.875123977661133, "learning_rate": 1.926066656697285e-05, "loss": 1.834113883972168, "memory(GiB)": 72.85, "step": 82980, "token_acc": 0.5878378378378378, "train_speed(iter/s)": 0.672667 }, { "epoch": 3.5553318195450068, "grad_norm": 5.956394672393799, "learning_rate": 1.9255359117528742e-05, "loss": 2.298390769958496, "memory(GiB)": 72.85, "step": 82985, "token_acc": 0.50199203187251, "train_speed(iter/s)": 0.672673 }, { "epoch": 3.5555460348742556, "grad_norm": 6.885656833648682, "learning_rate": 1.9250052225052665e-05, "loss": 2.1802791595458983, "memory(GiB)": 72.85, "step": 82990, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.672676 }, { "epoch": 3.5557602502035044, "grad_norm": 6.344377040863037, "learning_rate": 1.9244745889640736e-05, "loss": 2.212208938598633, "memory(GiB)": 72.85, "step": 82995, "token_acc": 0.5154061624649859, "train_speed(iter/s)": 0.672672 }, { "epoch": 3.5559744655327536, "grad_norm": 5.769633769989014, "learning_rate": 1.9239440111389088e-05, "loss": 1.9656021118164062, "memory(GiB)": 72.85, "step": 83000, "token_acc": 0.5882352941176471, "train_speed(iter/s)": 0.672668 }, { "epoch": 3.5559744655327536, "eval_loss": 2.001408576965332, "eval_runtime": 16.187, "eval_samples_per_second": 6.178, "eval_steps_per_second": 6.178, "eval_token_acc": 0.49744897959183676, "step": 83000 }, { "epoch": 3.5561886808620025, "grad_norm": 6.273305416107178, "learning_rate": 1.923413489039384e-05, "loss": 2.160663604736328, "memory(GiB)": 72.85, "step": 83005, "token_acc": 0.5052539404553416, "train_speed(iter/s)": 0.672571 }, { "epoch": 3.5564028961912513, "grad_norm": 6.881087779998779, "learning_rate": 1.9228830226751093e-05, "loss": 2.0028900146484374, "memory(GiB)": 72.85, "step": 83010, "token_acc": 0.5234899328859061, "train_speed(iter/s)": 0.672581 }, { "epoch": 3.5566171115205005, "grad_norm": 5.554699897766113, "learning_rate": 1.922352612055694e-05, "loss": 2.1140199661254884, "memory(GiB)": 72.85, "step": 83015, "token_acc": 0.5202492211838006, "train_speed(iter/s)": 0.672587 }, { "epoch": 3.5568313268497493, "grad_norm": 6.8287529945373535, "learning_rate": 1.9218222571907497e-05, "loss": 1.9440284729003907, "memory(GiB)": 72.85, "step": 83020, "token_acc": 0.6016260162601627, "train_speed(iter/s)": 0.672599 }, { "epoch": 3.557045542178998, "grad_norm": 6.963924407958984, "learning_rate": 1.9212919580898826e-05, "loss": 1.9951044082641602, "memory(GiB)": 72.85, "step": 83025, "token_acc": 0.5403726708074534, "train_speed(iter/s)": 0.672597 }, { "epoch": 3.5572597575082474, "grad_norm": 5.084224700927734, "learning_rate": 1.9207617147627e-05, "loss": 2.019510269165039, "memory(GiB)": 72.85, "step": 83030, "token_acc": 0.5183946488294314, "train_speed(iter/s)": 0.672587 }, { "epoch": 3.5574739728374962, "grad_norm": 5.535876750946045, "learning_rate": 1.9202315272188076e-05, "loss": 2.2280609130859377, "memory(GiB)": 72.85, "step": 83035, "token_acc": 0.4967741935483871, "train_speed(iter/s)": 0.672573 }, { "epoch": 3.557688188166745, "grad_norm": 7.329272747039795, "learning_rate": 1.9197013954678077e-05, "loss": 2.1400951385498046, "memory(GiB)": 72.85, "step": 83040, "token_acc": 0.5719844357976653, "train_speed(iter/s)": 0.672571 }, { "epoch": 3.5579024034959943, "grad_norm": 9.868585586547852, "learning_rate": 1.9191713195193074e-05, "loss": 1.8984031677246094, "memory(GiB)": 72.85, "step": 83045, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.672569 }, { "epoch": 3.558116618825243, "grad_norm": 5.599636554718018, "learning_rate": 1.91864129938291e-05, "loss": 2.1765819549560548, "memory(GiB)": 72.85, "step": 83050, "token_acc": 0.5518394648829431, "train_speed(iter/s)": 0.672583 }, { "epoch": 3.558330834154492, "grad_norm": 7.298469066619873, "learning_rate": 1.918111335068216e-05, "loss": 1.934345817565918, "memory(GiB)": 72.85, "step": 83055, "token_acc": 0.5714285714285714, "train_speed(iter/s)": 0.672585 }, { "epoch": 3.558545049483741, "grad_norm": 5.597590446472168, "learning_rate": 1.9175814265848262e-05, "loss": 2.136075019836426, "memory(GiB)": 72.85, "step": 83060, "token_acc": 0.5093167701863354, "train_speed(iter/s)": 0.672585 }, { "epoch": 3.55875926481299, "grad_norm": 5.597587585449219, "learning_rate": 1.917051573942341e-05, "loss": 1.935494613647461, "memory(GiB)": 72.85, "step": 83065, "token_acc": 0.5579710144927537, "train_speed(iter/s)": 0.672578 }, { "epoch": 3.558973480142239, "grad_norm": 6.342118263244629, "learning_rate": 1.916521777150358e-05, "loss": 2.122675895690918, "memory(GiB)": 72.85, "step": 83070, "token_acc": 0.5221518987341772, "train_speed(iter/s)": 0.672578 }, { "epoch": 3.559187695471488, "grad_norm": 4.614778518676758, "learning_rate": 1.915992036218474e-05, "loss": 1.954826545715332, "memory(GiB)": 72.85, "step": 83075, "token_acc": 0.6159420289855072, "train_speed(iter/s)": 0.672583 }, { "epoch": 3.559401910800737, "grad_norm": 8.837064743041992, "learning_rate": 1.9154623511562893e-05, "loss": 2.0786260604858398, "memory(GiB)": 72.85, "step": 83080, "token_acc": 0.5246478873239436, "train_speed(iter/s)": 0.672585 }, { "epoch": 3.5596161261299857, "grad_norm": 7.835968971252441, "learning_rate": 1.914932721973397e-05, "loss": 1.9827188491821288, "memory(GiB)": 72.85, "step": 83085, "token_acc": 0.5495495495495496, "train_speed(iter/s)": 0.672592 }, { "epoch": 3.559830341459235, "grad_norm": 7.055253505706787, "learning_rate": 1.9144031486793927e-05, "loss": 2.1176275253295898, "memory(GiB)": 72.85, "step": 83090, "token_acc": 0.5542635658914729, "train_speed(iter/s)": 0.672599 }, { "epoch": 3.5600445567884837, "grad_norm": 8.373709678649902, "learning_rate": 1.9138736312838702e-05, "loss": 2.1115488052368163, "memory(GiB)": 72.85, "step": 83095, "token_acc": 0.5487012987012987, "train_speed(iter/s)": 0.672613 }, { "epoch": 3.5602587721177326, "grad_norm": 6.089518070220947, "learning_rate": 1.91334416979642e-05, "loss": 2.1605796813964844, "memory(GiB)": 72.85, "step": 83100, "token_acc": 0.5347985347985348, "train_speed(iter/s)": 0.672619 }, { "epoch": 3.560472987446982, "grad_norm": 5.9852681159973145, "learning_rate": 1.912814764226637e-05, "loss": 2.1560701370239257, "memory(GiB)": 72.85, "step": 83105, "token_acc": 0.5412186379928315, "train_speed(iter/s)": 0.672626 }, { "epoch": 3.5606872027762306, "grad_norm": 6.455447673797607, "learning_rate": 1.9122854145841107e-05, "loss": 2.354363441467285, "memory(GiB)": 72.85, "step": 83110, "token_acc": 0.504950495049505, "train_speed(iter/s)": 0.672621 }, { "epoch": 3.5609014181054794, "grad_norm": 5.698825359344482, "learning_rate": 1.9117561208784292e-05, "loss": 2.219620704650879, "memory(GiB)": 72.85, "step": 83115, "token_acc": 0.5358490566037736, "train_speed(iter/s)": 0.672625 }, { "epoch": 3.5611156334347287, "grad_norm": 5.6947126388549805, "learning_rate": 1.9112268831191845e-05, "loss": 2.3822317123413086, "memory(GiB)": 72.85, "step": 83120, "token_acc": 0.493006993006993, "train_speed(iter/s)": 0.672623 }, { "epoch": 3.5613298487639775, "grad_norm": 5.9505934715271, "learning_rate": 1.9106977013159615e-05, "loss": 2.2773990631103516, "memory(GiB)": 72.85, "step": 83125, "token_acc": 0.5346534653465347, "train_speed(iter/s)": 0.672623 }, { "epoch": 3.5615440640932263, "grad_norm": 6.384159088134766, "learning_rate": 1.9101685754783466e-05, "loss": 2.1282188415527346, "memory(GiB)": 72.85, "step": 83130, "token_acc": 0.5247524752475248, "train_speed(iter/s)": 0.672629 }, { "epoch": 3.5617582794224756, "grad_norm": 4.726558685302734, "learning_rate": 1.9096395056159283e-05, "loss": 1.9658088684082031, "memory(GiB)": 72.85, "step": 83135, "token_acc": 0.5645756457564576, "train_speed(iter/s)": 0.672634 }, { "epoch": 3.5619724947517244, "grad_norm": 6.675390243530273, "learning_rate": 1.909110491738289e-05, "loss": 2.13183708190918, "memory(GiB)": 72.85, "step": 83140, "token_acc": 0.5331230283911672, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.562186710080973, "grad_norm": 7.19795560836792, "learning_rate": 1.908581533855012e-05, "loss": 2.1522598266601562, "memory(GiB)": 72.85, "step": 83145, "token_acc": 0.548, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.5624009254102225, "grad_norm": 5.543943405151367, "learning_rate": 1.908052631975682e-05, "loss": 1.87596435546875, "memory(GiB)": 72.85, "step": 83150, "token_acc": 0.5580524344569289, "train_speed(iter/s)": 0.672642 }, { "epoch": 3.5626151407394713, "grad_norm": 5.39097261428833, "learning_rate": 1.9075237861098778e-05, "loss": 2.1436431884765623, "memory(GiB)": 72.85, "step": 83155, "token_acc": 0.5229681978798587, "train_speed(iter/s)": 0.672647 }, { "epoch": 3.56282935606872, "grad_norm": 5.552811145782471, "learning_rate": 1.9069949962671802e-05, "loss": 2.296702575683594, "memory(GiB)": 72.85, "step": 83160, "token_acc": 0.5045592705167173, "train_speed(iter/s)": 0.672657 }, { "epoch": 3.5630435713979693, "grad_norm": 7.771615982055664, "learning_rate": 1.906466262457171e-05, "loss": 2.1019821166992188, "memory(GiB)": 72.85, "step": 83165, "token_acc": 0.4984126984126984, "train_speed(iter/s)": 0.672661 }, { "epoch": 3.563257786727218, "grad_norm": 5.940589904785156, "learning_rate": 1.9059375846894272e-05, "loss": 2.19127197265625, "memory(GiB)": 72.85, "step": 83170, "token_acc": 0.5358361774744027, "train_speed(iter/s)": 0.672661 }, { "epoch": 3.563472002056467, "grad_norm": 5.378917217254639, "learning_rate": 1.9054089629735272e-05, "loss": 1.9118677139282227, "memory(GiB)": 72.85, "step": 83175, "token_acc": 0.5718954248366013, "train_speed(iter/s)": 0.672662 }, { "epoch": 3.5636862173857162, "grad_norm": 5.738307952880859, "learning_rate": 1.9048803973190466e-05, "loss": 2.288035011291504, "memory(GiB)": 72.85, "step": 83180, "token_acc": 0.5228758169934641, "train_speed(iter/s)": 0.672662 }, { "epoch": 3.563900432714965, "grad_norm": 6.3628106117248535, "learning_rate": 1.9043518877355593e-05, "loss": 2.1104270935058596, "memory(GiB)": 72.85, "step": 83185, "token_acc": 0.4812286689419795, "train_speed(iter/s)": 0.672674 }, { "epoch": 3.564114648044214, "grad_norm": 5.837589263916016, "learning_rate": 1.903823434232641e-05, "loss": 2.1746124267578124, "memory(GiB)": 72.85, "step": 83190, "token_acc": 0.527972027972028, "train_speed(iter/s)": 0.672675 }, { "epoch": 3.564328863373463, "grad_norm": 6.2125701904296875, "learning_rate": 1.9032950368198682e-05, "loss": 1.7467008590698243, "memory(GiB)": 72.85, "step": 83195, "token_acc": 0.58, "train_speed(iter/s)": 0.672669 }, { "epoch": 3.564543078702712, "grad_norm": 5.122907638549805, "learning_rate": 1.90276669550681e-05, "loss": 2.1796602249145507, "memory(GiB)": 72.85, "step": 83200, "token_acc": 0.5359477124183006, "train_speed(iter/s)": 0.672671 }, { "epoch": 3.5647572940319607, "grad_norm": 4.893338203430176, "learning_rate": 1.902238410303039e-05, "loss": 2.1053810119628906, "memory(GiB)": 72.85, "step": 83205, "token_acc": 0.535031847133758, "train_speed(iter/s)": 0.672666 }, { "epoch": 3.56497150936121, "grad_norm": 8.453242301940918, "learning_rate": 1.901710181218125e-05, "loss": 1.9524019241333008, "memory(GiB)": 72.85, "step": 83210, "token_acc": 0.5984251968503937, "train_speed(iter/s)": 0.672664 }, { "epoch": 3.565185724690459, "grad_norm": 5.730264186859131, "learning_rate": 1.9011820082616376e-05, "loss": 2.0375545501708983, "memory(GiB)": 72.85, "step": 83215, "token_acc": 0.5544217687074829, "train_speed(iter/s)": 0.672673 }, { "epoch": 3.5653999400197076, "grad_norm": 4.950985431671143, "learning_rate": 1.9006538914431432e-05, "loss": 2.5087684631347655, "memory(GiB)": 72.85, "step": 83220, "token_acc": 0.46, "train_speed(iter/s)": 0.672681 }, { "epoch": 3.565614155348957, "grad_norm": 7.155237197875977, "learning_rate": 1.9001258307722125e-05, "loss": 2.008761405944824, "memory(GiB)": 72.85, "step": 83225, "token_acc": 0.5419161676646707, "train_speed(iter/s)": 0.672681 }, { "epoch": 3.5658283706782057, "grad_norm": 4.945308685302734, "learning_rate": 1.899597826258411e-05, "loss": 1.8760126113891602, "memory(GiB)": 72.85, "step": 83230, "token_acc": 0.5779467680608364, "train_speed(iter/s)": 0.672695 }, { "epoch": 3.5660425860074545, "grad_norm": 4.748457431793213, "learning_rate": 1.8990698779113026e-05, "loss": 2.2966001510620115, "memory(GiB)": 72.85, "step": 83235, "token_acc": 0.49375, "train_speed(iter/s)": 0.672691 }, { "epoch": 3.5662568013367038, "grad_norm": 7.11780309677124, "learning_rate": 1.8985419857404525e-05, "loss": 2.379721832275391, "memory(GiB)": 72.85, "step": 83240, "token_acc": 0.5089605734767025, "train_speed(iter/s)": 0.672695 }, { "epoch": 3.5664710166659526, "grad_norm": 4.884275436401367, "learning_rate": 1.898014149755422e-05, "loss": 1.945596694946289, "memory(GiB)": 72.85, "step": 83245, "token_acc": 0.5580645161290323, "train_speed(iter/s)": 0.672696 }, { "epoch": 3.5666852319952014, "grad_norm": 6.497670650482178, "learning_rate": 1.897486369965777e-05, "loss": 2.0906049728393556, "memory(GiB)": 72.85, "step": 83250, "token_acc": 0.5361842105263158, "train_speed(iter/s)": 0.672701 }, { "epoch": 3.5668994473244506, "grad_norm": 5.714499473571777, "learning_rate": 1.897064186601161e-05, "loss": 2.1486682891845703, "memory(GiB)": 72.85, "step": 83255, "token_acc": 0.5359712230215827, "train_speed(iter/s)": 0.672694 }, { "epoch": 3.5671136626536994, "grad_norm": 5.852686882019043, "learning_rate": 1.8965365079873e-05, "loss": 1.958428192138672, "memory(GiB)": 72.85, "step": 83260, "token_acc": 0.5270758122743683, "train_speed(iter/s)": 0.672694 }, { "epoch": 3.5673278779829483, "grad_norm": 6.316087245941162, "learning_rate": 1.8960088855955916e-05, "loss": 2.189332962036133, "memory(GiB)": 72.85, "step": 83265, "token_acc": 0.5782312925170068, "train_speed(iter/s)": 0.672705 }, { "epoch": 3.5675420933121975, "grad_norm": 5.668951511383057, "learning_rate": 1.895481319435592e-05, "loss": 2.1662275314331056, "memory(GiB)": 72.85, "step": 83270, "token_acc": 0.4612903225806452, "train_speed(iter/s)": 0.672699 }, { "epoch": 3.5677563086414463, "grad_norm": 4.486448764801025, "learning_rate": 1.8949538095168617e-05, "loss": 2.293834686279297, "memory(GiB)": 72.85, "step": 83275, "token_acc": 0.5153374233128835, "train_speed(iter/s)": 0.672702 }, { "epoch": 3.567970523970695, "grad_norm": 6.1306986808776855, "learning_rate": 1.8944263558489567e-05, "loss": 2.1034942626953126, "memory(GiB)": 72.85, "step": 83280, "token_acc": 0.5490196078431373, "train_speed(iter/s)": 0.672723 }, { "epoch": 3.5681847392999444, "grad_norm": 6.107184886932373, "learning_rate": 1.893898958441431e-05, "loss": 1.9026630401611329, "memory(GiB)": 72.85, "step": 83285, "token_acc": 0.5616883116883117, "train_speed(iter/s)": 0.67273 }, { "epoch": 3.568398954629193, "grad_norm": 6.157905578613281, "learning_rate": 1.89337161730384e-05, "loss": 2.360404205322266, "memory(GiB)": 72.85, "step": 83290, "token_acc": 0.5149501661129569, "train_speed(iter/s)": 0.67272 }, { "epoch": 3.568613169958442, "grad_norm": 5.584350109100342, "learning_rate": 1.8928443324457347e-05, "loss": 2.134543228149414, "memory(GiB)": 72.85, "step": 83295, "token_acc": 0.5824915824915825, "train_speed(iter/s)": 0.672726 }, { "epoch": 3.5688273852876913, "grad_norm": 7.6993279457092285, "learning_rate": 1.8923171038766702e-05, "loss": 2.21347713470459, "memory(GiB)": 72.85, "step": 83300, "token_acc": 0.48, "train_speed(iter/s)": 0.672719 }, { "epoch": 3.56904160061694, "grad_norm": 4.7866291999816895, "learning_rate": 1.891789931606197e-05, "loss": 2.201914405822754, "memory(GiB)": 72.85, "step": 83305, "token_acc": 0.5236486486486487, "train_speed(iter/s)": 0.672723 }, { "epoch": 3.569255815946189, "grad_norm": 5.089260578155518, "learning_rate": 1.8912628156438633e-05, "loss": 2.1102170944213867, "memory(GiB)": 72.85, "step": 83310, "token_acc": 0.5450980392156862, "train_speed(iter/s)": 0.67272 }, { "epoch": 3.569470031275438, "grad_norm": 6.978419780731201, "learning_rate": 1.890735755999221e-05, "loss": 2.062832260131836, "memory(GiB)": 72.85, "step": 83315, "token_acc": 0.5255474452554745, "train_speed(iter/s)": 0.672724 }, { "epoch": 3.569684246604687, "grad_norm": 5.0524983406066895, "learning_rate": 1.890208752681818e-05, "loss": 1.9741361618041993, "memory(GiB)": 72.85, "step": 83320, "token_acc": 0.525679758308157, "train_speed(iter/s)": 0.672723 }, { "epoch": 3.569898461933936, "grad_norm": 6.405671119689941, "learning_rate": 1.8896818057012007e-05, "loss": 1.6765867233276368, "memory(GiB)": 72.85, "step": 83325, "token_acc": 0.6063348416289592, "train_speed(iter/s)": 0.672724 }, { "epoch": 3.570112677263185, "grad_norm": 4.71431303024292, "learning_rate": 1.8891549150669124e-05, "loss": 2.2197620391845705, "memory(GiB)": 72.85, "step": 83330, "token_acc": 0.5418060200668896, "train_speed(iter/s)": 0.67273 }, { "epoch": 3.570326892592434, "grad_norm": 7.9988694190979, "learning_rate": 1.8886280807885037e-05, "loss": 2.307484245300293, "memory(GiB)": 72.85, "step": 83335, "token_acc": 0.5084175084175084, "train_speed(iter/s)": 0.672727 }, { "epoch": 3.5705411079216827, "grad_norm": 5.271642684936523, "learning_rate": 1.8881013028755156e-05, "loss": 2.1712934494018556, "memory(GiB)": 72.85, "step": 83340, "token_acc": 0.5, "train_speed(iter/s)": 0.672718 }, { "epoch": 3.570755323250932, "grad_norm": 5.766791343688965, "learning_rate": 1.8875745813374917e-05, "loss": 2.218487548828125, "memory(GiB)": 72.85, "step": 83345, "token_acc": 0.49038461538461536, "train_speed(iter/s)": 0.672714 }, { "epoch": 3.5709695385801807, "grad_norm": 6.585563659667969, "learning_rate": 1.8870479161839743e-05, "loss": 2.2106319427490235, "memory(GiB)": 72.85, "step": 83350, "token_acc": 0.5136986301369864, "train_speed(iter/s)": 0.672725 }, { "epoch": 3.5711837539094295, "grad_norm": 8.92768383026123, "learning_rate": 1.886521307424502e-05, "loss": 2.357309913635254, "memory(GiB)": 72.85, "step": 83355, "token_acc": 0.5337620578778135, "train_speed(iter/s)": 0.672729 }, { "epoch": 3.571397969238679, "grad_norm": 5.679088592529297, "learning_rate": 1.8859947550686186e-05, "loss": 2.0883224487304686, "memory(GiB)": 72.85, "step": 83360, "token_acc": 0.5321888412017167, "train_speed(iter/s)": 0.672732 }, { "epoch": 3.5716121845679276, "grad_norm": 5.787170886993408, "learning_rate": 1.8854682591258615e-05, "loss": 2.06829776763916, "memory(GiB)": 72.85, "step": 83365, "token_acc": 0.5859154929577465, "train_speed(iter/s)": 0.672736 }, { "epoch": 3.5718263998971764, "grad_norm": 5.715604305267334, "learning_rate": 1.884941819605769e-05, "loss": 2.110569381713867, "memory(GiB)": 72.85, "step": 83370, "token_acc": 0.5358361774744027, "train_speed(iter/s)": 0.672733 }, { "epoch": 3.5720406152264257, "grad_norm": 6.211131572723389, "learning_rate": 1.884415436517877e-05, "loss": 2.2429950714111326, "memory(GiB)": 72.85, "step": 83375, "token_acc": 0.5281899109792285, "train_speed(iter/s)": 0.672737 }, { "epoch": 3.5722548305556745, "grad_norm": 5.9556403160095215, "learning_rate": 1.883889109871722e-05, "loss": 2.420719337463379, "memory(GiB)": 72.85, "step": 83380, "token_acc": 0.5, "train_speed(iter/s)": 0.672737 }, { "epoch": 3.5724690458849233, "grad_norm": 6.140465259552002, "learning_rate": 1.8833628396768378e-05, "loss": 1.9912548065185547, "memory(GiB)": 72.85, "step": 83385, "token_acc": 0.5649122807017544, "train_speed(iter/s)": 0.672745 }, { "epoch": 3.5726832612141726, "grad_norm": 4.920034885406494, "learning_rate": 1.882836625942759e-05, "loss": 2.1518373489379883, "memory(GiB)": 72.85, "step": 83390, "token_acc": 0.5487804878048781, "train_speed(iter/s)": 0.672746 }, { "epoch": 3.5728974765434214, "grad_norm": 5.026478290557861, "learning_rate": 1.88231046867902e-05, "loss": 1.8731456756591798, "memory(GiB)": 72.85, "step": 83395, "token_acc": 0.5759717314487632, "train_speed(iter/s)": 0.672745 }, { "epoch": 3.57311169187267, "grad_norm": 4.730144023895264, "learning_rate": 1.881784367895152e-05, "loss": 2.200334358215332, "memory(GiB)": 72.85, "step": 83400, "token_acc": 0.5240793201133145, "train_speed(iter/s)": 0.672745 }, { "epoch": 3.5733259072019194, "grad_norm": 7.64863395690918, "learning_rate": 1.8812583236006852e-05, "loss": 2.043418312072754, "memory(GiB)": 72.85, "step": 83405, "token_acc": 0.5852713178294574, "train_speed(iter/s)": 0.67274 }, { "epoch": 3.5735401225311683, "grad_norm": 5.074132919311523, "learning_rate": 1.8807323358051498e-05, "loss": 1.7403785705566406, "memory(GiB)": 72.85, "step": 83410, "token_acc": 0.5755395683453237, "train_speed(iter/s)": 0.672739 }, { "epoch": 3.573754337860417, "grad_norm": 6.1307477951049805, "learning_rate": 1.8802064045180723e-05, "loss": 2.1357803344726562, "memory(GiB)": 72.85, "step": 83415, "token_acc": 0.5359712230215827, "train_speed(iter/s)": 0.672749 }, { "epoch": 3.5739685531896663, "grad_norm": 6.405646800994873, "learning_rate": 1.8796805297489846e-05, "loss": 2.1927452087402344, "memory(GiB)": 72.85, "step": 83420, "token_acc": 0.4907749077490775, "train_speed(iter/s)": 0.672754 }, { "epoch": 3.574182768518915, "grad_norm": 4.625175476074219, "learning_rate": 1.8791547115074105e-05, "loss": 1.9200294494628907, "memory(GiB)": 72.85, "step": 83425, "token_acc": 0.5371024734982333, "train_speed(iter/s)": 0.672743 }, { "epoch": 3.574396983848164, "grad_norm": 6.223049640655518, "learning_rate": 1.8786289498028763e-05, "loss": 1.7504850387573243, "memory(GiB)": 72.85, "step": 83430, "token_acc": 0.610236220472441, "train_speed(iter/s)": 0.672726 }, { "epoch": 3.574611199177413, "grad_norm": 6.076009750366211, "learning_rate": 1.8781032446449077e-05, "loss": 2.2099365234375, "memory(GiB)": 72.85, "step": 83435, "token_acc": 0.5107692307692308, "train_speed(iter/s)": 0.672733 }, { "epoch": 3.574825414506662, "grad_norm": 6.902676582336426, "learning_rate": 1.8775775960430248e-05, "loss": 1.676734733581543, "memory(GiB)": 72.85, "step": 83440, "token_acc": 0.5797665369649806, "train_speed(iter/s)": 0.672738 }, { "epoch": 3.575039629835911, "grad_norm": 7.3944411277771, "learning_rate": 1.8770520040067546e-05, "loss": 2.211873245239258, "memory(GiB)": 72.85, "step": 83445, "token_acc": 0.49709302325581395, "train_speed(iter/s)": 0.672733 }, { "epoch": 3.57525384516516, "grad_norm": 6.683770179748535, "learning_rate": 1.8765264685456174e-05, "loss": 2.2762277603149412, "memory(GiB)": 72.85, "step": 83450, "token_acc": 0.5145631067961165, "train_speed(iter/s)": 0.672736 }, { "epoch": 3.575468060494409, "grad_norm": 9.249117851257324, "learning_rate": 1.8761060809171997e-05, "loss": 2.016172409057617, "memory(GiB)": 72.85, "step": 83455, "token_acc": 0.5544217687074829, "train_speed(iter/s)": 0.672737 }, { "epoch": 3.5756822758236577, "grad_norm": 4.383972644805908, "learning_rate": 1.875580647315292e-05, "loss": 1.9546005249023437, "memory(GiB)": 72.85, "step": 83460, "token_acc": 0.5610687022900763, "train_speed(iter/s)": 0.67274 }, { "epoch": 3.575896491152907, "grad_norm": 7.257941722869873, "learning_rate": 1.8750552703151718e-05, "loss": 1.911958885192871, "memory(GiB)": 72.85, "step": 83465, "token_acc": 0.5641025641025641, "train_speed(iter/s)": 0.672748 }, { "epoch": 3.576110706482156, "grad_norm": 6.156497955322266, "learning_rate": 1.8745299499263546e-05, "loss": 1.8969430923461914, "memory(GiB)": 72.85, "step": 83470, "token_acc": 0.5725490196078431, "train_speed(iter/s)": 0.672754 }, { "epoch": 3.5763249218114046, "grad_norm": 6.696033000946045, "learning_rate": 1.8740046861583614e-05, "loss": 2.081755447387695, "memory(GiB)": 72.85, "step": 83475, "token_acc": 0.5296167247386759, "train_speed(iter/s)": 0.672759 }, { "epoch": 3.576539137140654, "grad_norm": 6.247641563415527, "learning_rate": 1.873479479020706e-05, "loss": 2.1369598388671873, "memory(GiB)": 72.85, "step": 83480, "token_acc": 0.532319391634981, "train_speed(iter/s)": 0.672756 }, { "epoch": 3.5767533524699027, "grad_norm": 8.182168006896973, "learning_rate": 1.8729543285229023e-05, "loss": 2.1743579864501954, "memory(GiB)": 72.85, "step": 83485, "token_acc": 0.5231316725978647, "train_speed(iter/s)": 0.672748 }, { "epoch": 3.5769675677991515, "grad_norm": 6.885693550109863, "learning_rate": 1.8724292346744647e-05, "loss": 2.319829559326172, "memory(GiB)": 72.85, "step": 83490, "token_acc": 0.4807121661721068, "train_speed(iter/s)": 0.672737 }, { "epoch": 3.5771817831284007, "grad_norm": 7.227003574371338, "learning_rate": 1.8719041974849033e-05, "loss": 2.004137420654297, "memory(GiB)": 72.85, "step": 83495, "token_acc": 0.554006968641115, "train_speed(iter/s)": 0.672735 }, { "epoch": 3.5773959984576496, "grad_norm": 4.750421524047852, "learning_rate": 1.871379216963734e-05, "loss": 2.0774442672729494, "memory(GiB)": 72.85, "step": 83500, "token_acc": 0.5161290322580645, "train_speed(iter/s)": 0.672738 }, { "epoch": 3.5773959984576496, "eval_loss": 2.0686888694763184, "eval_runtime": 16.374, "eval_samples_per_second": 6.107, "eval_steps_per_second": 6.107, "eval_token_acc": 0.5069060773480663, "step": 83500 }, { "epoch": 3.5776102137868984, "grad_norm": 4.427478790283203, "learning_rate": 1.870854293120463e-05, "loss": 2.3295806884765624, "memory(GiB)": 72.85, "step": 83505, "token_acc": 0.49901960784313726, "train_speed(iter/s)": 0.672639 }, { "epoch": 3.5778244291161476, "grad_norm": 5.297327041625977, "learning_rate": 1.870329425964604e-05, "loss": 1.9247047424316406, "memory(GiB)": 72.85, "step": 83510, "token_acc": 0.5642633228840125, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.5780386444453964, "grad_norm": 4.7291789054870605, "learning_rate": 1.8698046155056627e-05, "loss": 2.1987869262695314, "memory(GiB)": 72.85, "step": 83515, "token_acc": 0.5016722408026756, "train_speed(iter/s)": 0.67264 }, { "epoch": 3.5782528597746452, "grad_norm": 6.825068950653076, "learning_rate": 1.869279861753147e-05, "loss": 2.028423881530762, "memory(GiB)": 72.85, "step": 83520, "token_acc": 0.5543071161048689, "train_speed(iter/s)": 0.672645 }, { "epoch": 3.5784670751038945, "grad_norm": 5.965996742248535, "learning_rate": 1.8687551647165623e-05, "loss": 2.1576488494873045, "memory(GiB)": 72.85, "step": 83525, "token_acc": 0.5198412698412699, "train_speed(iter/s)": 0.672643 }, { "epoch": 3.5786812904331433, "grad_norm": 5.260438442230225, "learning_rate": 1.8682305244054166e-05, "loss": 1.968223762512207, "memory(GiB)": 72.85, "step": 83530, "token_acc": 0.49828178694158076, "train_speed(iter/s)": 0.672643 }, { "epoch": 3.578895505762392, "grad_norm": 6.222975730895996, "learning_rate": 1.867705940829213e-05, "loss": 2.368686866760254, "memory(GiB)": 72.85, "step": 83535, "token_acc": 0.47035573122529645, "train_speed(iter/s)": 0.672655 }, { "epoch": 3.5791097210916414, "grad_norm": 5.754146099090576, "learning_rate": 1.867181413997454e-05, "loss": 1.931357192993164, "memory(GiB)": 72.85, "step": 83540, "token_acc": 0.5827067669172933, "train_speed(iter/s)": 0.672656 }, { "epoch": 3.57932393642089, "grad_norm": 5.985625267028809, "learning_rate": 1.8666569439196436e-05, "loss": 2.2326000213623045, "memory(GiB)": 72.85, "step": 83545, "token_acc": 0.5149700598802395, "train_speed(iter/s)": 0.672666 }, { "epoch": 3.579538151750139, "grad_norm": 5.390135765075684, "learning_rate": 1.8661325306052813e-05, "loss": 2.0924964904785157, "memory(GiB)": 72.85, "step": 83550, "token_acc": 0.5569620253164557, "train_speed(iter/s)": 0.67267 }, { "epoch": 3.5797523670793883, "grad_norm": 5.410092353820801, "learning_rate": 1.8656081740638664e-05, "loss": 2.066447448730469, "memory(GiB)": 72.85, "step": 83555, "token_acc": 0.583969465648855, "train_speed(iter/s)": 0.672663 }, { "epoch": 3.579966582408637, "grad_norm": 5.771743297576904, "learning_rate": 1.865083874304901e-05, "loss": 2.000400924682617, "memory(GiB)": 72.85, "step": 83560, "token_acc": 0.5601503759398496, "train_speed(iter/s)": 0.672657 }, { "epoch": 3.580180797737886, "grad_norm": 7.093830108642578, "learning_rate": 1.8645596313378828e-05, "loss": 2.36151123046875, "memory(GiB)": 72.85, "step": 83565, "token_acc": 0.4929078014184397, "train_speed(iter/s)": 0.67265 }, { "epoch": 3.580395013067135, "grad_norm": 5.243053436279297, "learning_rate": 1.8640354451723073e-05, "loss": 2.011961555480957, "memory(GiB)": 72.85, "step": 83570, "token_acc": 0.5159420289855072, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.580609228396384, "grad_norm": 6.282355308532715, "learning_rate": 1.8635113158176704e-05, "loss": 2.0110200881958007, "memory(GiB)": 72.85, "step": 83575, "token_acc": 0.5471698113207547, "train_speed(iter/s)": 0.672652 }, { "epoch": 3.5808234437256328, "grad_norm": 5.028402328491211, "learning_rate": 1.86298724328347e-05, "loss": 2.186082649230957, "memory(GiB)": 72.85, "step": 83580, "token_acc": 0.544891640866873, "train_speed(iter/s)": 0.672645 }, { "epoch": 3.581037659054882, "grad_norm": 5.811917781829834, "learning_rate": 1.8624632275791965e-05, "loss": 2.157979965209961, "memory(GiB)": 72.85, "step": 83585, "token_acc": 0.5442622950819672, "train_speed(iter/s)": 0.672646 }, { "epoch": 3.581251874384131, "grad_norm": 7.339115619659424, "learning_rate": 1.8619392687143466e-05, "loss": 2.1030029296875, "memory(GiB)": 72.85, "step": 83590, "token_acc": 0.5543859649122806, "train_speed(iter/s)": 0.672647 }, { "epoch": 3.5814660897133797, "grad_norm": 4.525493144989014, "learning_rate": 1.8614153666984107e-05, "loss": 2.0734907150268556, "memory(GiB)": 72.85, "step": 83595, "token_acc": 0.5705521472392638, "train_speed(iter/s)": 0.67265 }, { "epoch": 3.581680305042629, "grad_norm": 7.065184593200684, "learning_rate": 1.8608915215408795e-05, "loss": 2.1248910903930662, "memory(GiB)": 72.85, "step": 83600, "token_acc": 0.562111801242236, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.5818945203718777, "grad_norm": 6.4457244873046875, "learning_rate": 1.8603677332512433e-05, "loss": 1.892728614807129, "memory(GiB)": 72.85, "step": 83605, "token_acc": 0.563573883161512, "train_speed(iter/s)": 0.672643 }, { "epoch": 3.5821087357011265, "grad_norm": 5.621789932250977, "learning_rate": 1.859844001838989e-05, "loss": 2.346040725708008, "memory(GiB)": 72.85, "step": 83610, "token_acc": 0.5178571428571429, "train_speed(iter/s)": 0.672636 }, { "epoch": 3.582322951030376, "grad_norm": 6.189189434051514, "learning_rate": 1.859320327313608e-05, "loss": 2.007081413269043, "memory(GiB)": 72.85, "step": 83615, "token_acc": 0.5562700964630225, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.5825371663596246, "grad_norm": 5.832401275634766, "learning_rate": 1.8587967096845853e-05, "loss": 1.785028076171875, "memory(GiB)": 72.85, "step": 83620, "token_acc": 0.6157024793388429, "train_speed(iter/s)": 0.672641 }, { "epoch": 3.5827513816888734, "grad_norm": 6.170268535614014, "learning_rate": 1.8582731489614075e-05, "loss": 1.9844202041625976, "memory(GiB)": 72.85, "step": 83625, "token_acc": 0.5639344262295082, "train_speed(iter/s)": 0.672646 }, { "epoch": 3.5829655970181227, "grad_norm": 5.565478324890137, "learning_rate": 1.857749645153558e-05, "loss": 2.064278411865234, "memory(GiB)": 72.85, "step": 83630, "token_acc": 0.5581395348837209, "train_speed(iter/s)": 0.672648 }, { "epoch": 3.5831798123473715, "grad_norm": 6.645668029785156, "learning_rate": 1.8572261982705218e-05, "loss": 2.187485694885254, "memory(GiB)": 72.85, "step": 83635, "token_acc": 0.5291828793774319, "train_speed(iter/s)": 0.672648 }, { "epoch": 3.5833940276766203, "grad_norm": 5.165337562561035, "learning_rate": 1.8567028083217796e-05, "loss": 2.144661521911621, "memory(GiB)": 72.85, "step": 83640, "token_acc": 0.5504087193460491, "train_speed(iter/s)": 0.672658 }, { "epoch": 3.5836082430058696, "grad_norm": 5.564573287963867, "learning_rate": 1.856179475316816e-05, "loss": 1.93389949798584, "memory(GiB)": 72.85, "step": 83645, "token_acc": 0.5540983606557377, "train_speed(iter/s)": 0.672658 }, { "epoch": 3.5838224583351184, "grad_norm": 6.700953483581543, "learning_rate": 1.855656199265109e-05, "loss": 2.262552833557129, "memory(GiB)": 72.85, "step": 83650, "token_acc": 0.5614035087719298, "train_speed(iter/s)": 0.672663 }, { "epoch": 3.584036673664367, "grad_norm": 6.507002830505371, "learning_rate": 1.8551329801761407e-05, "loss": 2.1920055389404296, "memory(GiB)": 72.85, "step": 83655, "token_acc": 0.4943820224719101, "train_speed(iter/s)": 0.672666 }, { "epoch": 3.5842508889936164, "grad_norm": 6.840691089630127, "learning_rate": 1.8546098180593892e-05, "loss": 1.9029590606689453, "memory(GiB)": 72.85, "step": 83660, "token_acc": 0.5916955017301038, "train_speed(iter/s)": 0.672669 }, { "epoch": 3.5844651043228652, "grad_norm": 5.978414058685303, "learning_rate": 1.854086712924331e-05, "loss": 2.0107376098632814, "memory(GiB)": 72.85, "step": 83665, "token_acc": 0.5463258785942492, "train_speed(iter/s)": 0.67268 }, { "epoch": 3.584679319652114, "grad_norm": 6.740415573120117, "learning_rate": 1.8535636647804423e-05, "loss": 2.026999282836914, "memory(GiB)": 72.85, "step": 83670, "token_acc": 0.5330882352941176, "train_speed(iter/s)": 0.672683 }, { "epoch": 3.5848935349813633, "grad_norm": 4.795135021209717, "learning_rate": 1.8530406736372004e-05, "loss": 1.9623695373535157, "memory(GiB)": 72.85, "step": 83675, "token_acc": 0.550531914893617, "train_speed(iter/s)": 0.672689 }, { "epoch": 3.585107750310612, "grad_norm": 7.117599010467529, "learning_rate": 1.852517739504079e-05, "loss": 2.121477508544922, "memory(GiB)": 72.85, "step": 83680, "token_acc": 0.5882352941176471, "train_speed(iter/s)": 0.672694 }, { "epoch": 3.585321965639861, "grad_norm": 5.819565773010254, "learning_rate": 1.851994862390551e-05, "loss": 2.02797908782959, "memory(GiB)": 72.85, "step": 83685, "token_acc": 0.6006711409395973, "train_speed(iter/s)": 0.672688 }, { "epoch": 3.58553618096911, "grad_norm": 4.678924560546875, "learning_rate": 1.8514720423060887e-05, "loss": 2.381135177612305, "memory(GiB)": 72.85, "step": 83690, "token_acc": 0.48936170212765956, "train_speed(iter/s)": 0.672689 }, { "epoch": 3.585750396298359, "grad_norm": 5.455377578735352, "learning_rate": 1.8509492792601652e-05, "loss": 1.6572793960571288, "memory(GiB)": 72.85, "step": 83695, "token_acc": 0.608540925266904, "train_speed(iter/s)": 0.672684 }, { "epoch": 3.585964611627608, "grad_norm": 5.073955535888672, "learning_rate": 1.8504265732622468e-05, "loss": 1.9869371414184571, "memory(GiB)": 72.85, "step": 83700, "token_acc": 0.564625850340136, "train_speed(iter/s)": 0.672676 }, { "epoch": 3.586178826956857, "grad_norm": 5.018492698669434, "learning_rate": 1.8499039243218074e-05, "loss": 2.023013687133789, "memory(GiB)": 72.85, "step": 83705, "token_acc": 0.5433962264150943, "train_speed(iter/s)": 0.672683 }, { "epoch": 3.586393042286106, "grad_norm": 6.992395401000977, "learning_rate": 1.8493813324483138e-05, "loss": 2.1786556243896484, "memory(GiB)": 72.85, "step": 83710, "token_acc": 0.5271565495207667, "train_speed(iter/s)": 0.672695 }, { "epoch": 3.5866072576153547, "grad_norm": 5.091546058654785, "learning_rate": 1.8488587976512327e-05, "loss": 2.1111961364746095, "memory(GiB)": 72.85, "step": 83715, "token_acc": 0.5068119891008175, "train_speed(iter/s)": 0.672699 }, { "epoch": 3.586821472944604, "grad_norm": 5.557155609130859, "learning_rate": 1.8483363199400306e-05, "loss": 2.158483695983887, "memory(GiB)": 72.85, "step": 83720, "token_acc": 0.5347985347985348, "train_speed(iter/s)": 0.672702 }, { "epoch": 3.5870356882738528, "grad_norm": 5.43412971496582, "learning_rate": 1.8478138993241712e-05, "loss": 2.1223493576049806, "memory(GiB)": 72.85, "step": 83725, "token_acc": 0.5288135593220339, "train_speed(iter/s)": 0.672699 }, { "epoch": 3.5872499036031016, "grad_norm": 4.463566303253174, "learning_rate": 1.8472915358131193e-05, "loss": 1.8546638488769531, "memory(GiB)": 72.85, "step": 83730, "token_acc": 0.5953947368421053, "train_speed(iter/s)": 0.672701 }, { "epoch": 3.587464118932351, "grad_norm": 6.27388334274292, "learning_rate": 1.8467692294163407e-05, "loss": 2.0382663726806642, "memory(GiB)": 72.85, "step": 83735, "token_acc": 0.5363984674329502, "train_speed(iter/s)": 0.672699 }, { "epoch": 3.5876783342615997, "grad_norm": 5.5938401222229, "learning_rate": 1.846246980143296e-05, "loss": 1.775583267211914, "memory(GiB)": 72.85, "step": 83740, "token_acc": 0.5646551724137931, "train_speed(iter/s)": 0.672704 }, { "epoch": 3.5878925495908485, "grad_norm": 5.672805309295654, "learning_rate": 1.845724788003445e-05, "loss": 1.9286548614501953, "memory(GiB)": 72.85, "step": 83745, "token_acc": 0.5448504983388704, "train_speed(iter/s)": 0.672702 }, { "epoch": 3.5881067649200977, "grad_norm": 5.62899923324585, "learning_rate": 1.845202653006248e-05, "loss": 1.944415283203125, "memory(GiB)": 72.85, "step": 83750, "token_acc": 0.554858934169279, "train_speed(iter/s)": 0.672693 }, { "epoch": 3.5883209802493465, "grad_norm": 6.90693473815918, "learning_rate": 1.844680575161164e-05, "loss": 1.8603912353515626, "memory(GiB)": 72.85, "step": 83755, "token_acc": 0.5539358600583091, "train_speed(iter/s)": 0.67269 }, { "epoch": 3.5885351955785953, "grad_norm": 7.176619052886963, "learning_rate": 1.8441585544776523e-05, "loss": 1.876072311401367, "memory(GiB)": 72.85, "step": 83760, "token_acc": 0.556, "train_speed(iter/s)": 0.672687 }, { "epoch": 3.5887494109078446, "grad_norm": 5.910101413726807, "learning_rate": 1.8436365909651687e-05, "loss": 2.370067024230957, "memory(GiB)": 72.85, "step": 83765, "token_acc": 0.5, "train_speed(iter/s)": 0.672685 }, { "epoch": 3.5889636262370934, "grad_norm": 5.207287311553955, "learning_rate": 1.843114684633169e-05, "loss": 2.13425235748291, "memory(GiB)": 72.85, "step": 83770, "token_acc": 0.5682819383259912, "train_speed(iter/s)": 0.67269 }, { "epoch": 3.5891778415663422, "grad_norm": 5.947693347930908, "learning_rate": 1.8425928354911087e-05, "loss": 2.084563446044922, "memory(GiB)": 72.85, "step": 83775, "token_acc": 0.5405405405405406, "train_speed(iter/s)": 0.672691 }, { "epoch": 3.5893920568955915, "grad_norm": 4.930802822113037, "learning_rate": 1.842071043548441e-05, "loss": 2.2197099685668946, "memory(GiB)": 72.85, "step": 83780, "token_acc": 0.5206349206349207, "train_speed(iter/s)": 0.672695 }, { "epoch": 3.5896062722248403, "grad_norm": 5.300042629241943, "learning_rate": 1.841549308814617e-05, "loss": 2.3996334075927734, "memory(GiB)": 72.85, "step": 83785, "token_acc": 0.5135135135135135, "train_speed(iter/s)": 0.672692 }, { "epoch": 3.589820487554089, "grad_norm": 5.809393882751465, "learning_rate": 1.8410276312990915e-05, "loss": 2.3782548904418945, "memory(GiB)": 72.85, "step": 83790, "token_acc": 0.5228070175438596, "train_speed(iter/s)": 0.672698 }, { "epoch": 3.5900347028833384, "grad_norm": 6.298556327819824, "learning_rate": 1.8405060110113143e-05, "loss": 2.265576171875, "memory(GiB)": 72.85, "step": 83795, "token_acc": 0.5382059800664452, "train_speed(iter/s)": 0.672703 }, { "epoch": 3.590248918212587, "grad_norm": 7.3680033683776855, "learning_rate": 1.8399844479607324e-05, "loss": 1.9968643188476562, "memory(GiB)": 72.85, "step": 83800, "token_acc": 0.5340909090909091, "train_speed(iter/s)": 0.672706 }, { "epoch": 3.590463133541836, "grad_norm": 5.011641979217529, "learning_rate": 1.8394629421567984e-05, "loss": 1.9047718048095703, "memory(GiB)": 72.85, "step": 83805, "token_acc": 0.5469255663430421, "train_speed(iter/s)": 0.672693 }, { "epoch": 3.5906773488710853, "grad_norm": 5.944525718688965, "learning_rate": 1.8389414936089584e-05, "loss": 2.3990880966186525, "memory(GiB)": 72.85, "step": 83810, "token_acc": 0.45714285714285713, "train_speed(iter/s)": 0.672691 }, { "epoch": 3.590891564200334, "grad_norm": 5.085084438323975, "learning_rate": 1.838420102326656e-05, "loss": 2.155971908569336, "memory(GiB)": 72.85, "step": 83815, "token_acc": 0.4939759036144578, "train_speed(iter/s)": 0.672697 }, { "epoch": 3.591105779529583, "grad_norm": 5.891717433929443, "learning_rate": 1.8378987683193417e-05, "loss": 2.1102243423461915, "memory(GiB)": 72.85, "step": 83820, "token_acc": 0.505338078291815, "train_speed(iter/s)": 0.672701 }, { "epoch": 3.591319994858832, "grad_norm": 8.198873519897461, "learning_rate": 1.837377491596457e-05, "loss": 2.2190629959106447, "memory(GiB)": 72.85, "step": 83825, "token_acc": 0.5431654676258992, "train_speed(iter/s)": 0.672709 }, { "epoch": 3.591534210188081, "grad_norm": 4.436293125152588, "learning_rate": 1.8368562721674465e-05, "loss": 2.066850471496582, "memory(GiB)": 72.85, "step": 83830, "token_acc": 0.5448717948717948, "train_speed(iter/s)": 0.672697 }, { "epoch": 3.5917484255173298, "grad_norm": 5.887960433959961, "learning_rate": 1.8363351100417515e-05, "loss": 1.9122787475585938, "memory(GiB)": 72.85, "step": 83835, "token_acc": 0.5340909090909091, "train_speed(iter/s)": 0.672709 }, { "epoch": 3.591962640846579, "grad_norm": 9.618741035461426, "learning_rate": 1.8358140052288136e-05, "loss": 1.9595563888549805, "memory(GiB)": 72.85, "step": 83840, "token_acc": 0.5714285714285714, "train_speed(iter/s)": 0.672705 }, { "epoch": 3.592176856175828, "grad_norm": 5.9966535568237305, "learning_rate": 1.8352929577380718e-05, "loss": 2.165599060058594, "memory(GiB)": 72.85, "step": 83845, "token_acc": 0.553030303030303, "train_speed(iter/s)": 0.672708 }, { "epoch": 3.5923910715050766, "grad_norm": 6.865417957305908, "learning_rate": 1.834771967578968e-05, "loss": 2.4208324432373045, "memory(GiB)": 72.85, "step": 83850, "token_acc": 0.5046439628482973, "train_speed(iter/s)": 0.672712 }, { "epoch": 3.592605286834326, "grad_norm": 5.451267242431641, "learning_rate": 1.83425103476094e-05, "loss": 1.7685705184936524, "memory(GiB)": 72.85, "step": 83855, "token_acc": 0.5702127659574469, "train_speed(iter/s)": 0.672719 }, { "epoch": 3.5928195021635747, "grad_norm": 7.6960015296936035, "learning_rate": 1.8337301592934235e-05, "loss": 1.9741268157958984, "memory(GiB)": 72.85, "step": 83860, "token_acc": 0.5793991416309013, "train_speed(iter/s)": 0.672724 }, { "epoch": 3.5930337174928235, "grad_norm": 10.72528076171875, "learning_rate": 1.8332093411858557e-05, "loss": 2.0218801498413086, "memory(GiB)": 72.85, "step": 83865, "token_acc": 0.5229357798165137, "train_speed(iter/s)": 0.672731 }, { "epoch": 3.5932479328220728, "grad_norm": 6.096974849700928, "learning_rate": 1.8326885804476702e-05, "loss": 2.032725715637207, "memory(GiB)": 72.85, "step": 83870, "token_acc": 0.5444126074498568, "train_speed(iter/s)": 0.672741 }, { "epoch": 3.5934621481513216, "grad_norm": 4.607923984527588, "learning_rate": 1.8321678770883016e-05, "loss": 1.9732400894165039, "memory(GiB)": 72.85, "step": 83875, "token_acc": 0.5650887573964497, "train_speed(iter/s)": 0.672738 }, { "epoch": 3.5936763634805704, "grad_norm": 4.8408050537109375, "learning_rate": 1.831647231117185e-05, "loss": 2.372750663757324, "memory(GiB)": 72.85, "step": 83880, "token_acc": 0.5433526011560693, "train_speed(iter/s)": 0.672745 }, { "epoch": 3.5938905788098197, "grad_norm": 6.482077121734619, "learning_rate": 1.8311266425437516e-05, "loss": 1.9850587844848633, "memory(GiB)": 72.85, "step": 83885, "token_acc": 0.5686274509803921, "train_speed(iter/s)": 0.672742 }, { "epoch": 3.5941047941390685, "grad_norm": 8.281739234924316, "learning_rate": 1.8306061113774313e-05, "loss": 2.1039377212524415, "memory(GiB)": 72.85, "step": 83890, "token_acc": 0.545774647887324, "train_speed(iter/s)": 0.672751 }, { "epoch": 3.5943190094683173, "grad_norm": 5.32125186920166, "learning_rate": 1.8300856376276547e-05, "loss": 2.185344123840332, "memory(GiB)": 72.85, "step": 83895, "token_acc": 0.5407407407407407, "train_speed(iter/s)": 0.672755 }, { "epoch": 3.5945332247975665, "grad_norm": 5.356077671051025, "learning_rate": 1.8295652213038483e-05, "loss": 1.8926498413085937, "memory(GiB)": 72.85, "step": 83900, "token_acc": 0.6071428571428571, "train_speed(iter/s)": 0.67275 }, { "epoch": 3.5947474401268154, "grad_norm": 5.835246562957764, "learning_rate": 1.8290448624154443e-05, "loss": 2.1954030990600586, "memory(GiB)": 72.85, "step": 83905, "token_acc": 0.541033434650456, "train_speed(iter/s)": 0.672744 }, { "epoch": 3.594961655456064, "grad_norm": 6.190213680267334, "learning_rate": 1.828524560971867e-05, "loss": 2.16500301361084, "memory(GiB)": 72.85, "step": 83910, "token_acc": 0.4854014598540146, "train_speed(iter/s)": 0.672743 }, { "epoch": 3.5951758707853134, "grad_norm": 9.812746047973633, "learning_rate": 1.8280043169825422e-05, "loss": 2.2727819442749024, "memory(GiB)": 72.85, "step": 83915, "token_acc": 0.5463258785942492, "train_speed(iter/s)": 0.672753 }, { "epoch": 3.5953900861145622, "grad_norm": 7.225423336029053, "learning_rate": 1.8274841304568947e-05, "loss": 2.1206398010253906, "memory(GiB)": 72.85, "step": 83920, "token_acc": 0.5096774193548387, "train_speed(iter/s)": 0.672758 }, { "epoch": 3.595604301443811, "grad_norm": 5.333999156951904, "learning_rate": 1.826964001404349e-05, "loss": 2.2244327545166014, "memory(GiB)": 72.85, "step": 83925, "token_acc": 0.5176056338028169, "train_speed(iter/s)": 0.672757 }, { "epoch": 3.5958185167730603, "grad_norm": 5.352041244506836, "learning_rate": 1.8264439298343243e-05, "loss": 2.021162223815918, "memory(GiB)": 72.85, "step": 83930, "token_acc": 0.5578231292517006, "train_speed(iter/s)": 0.672765 }, { "epoch": 3.596032732102309, "grad_norm": 5.211922645568848, "learning_rate": 1.825923915756247e-05, "loss": 2.045307922363281, "memory(GiB)": 72.85, "step": 83935, "token_acc": 0.5701219512195121, "train_speed(iter/s)": 0.672764 }, { "epoch": 3.596246947431558, "grad_norm": 5.423749923706055, "learning_rate": 1.8254039591795352e-05, "loss": 1.9919286727905274, "memory(GiB)": 72.85, "step": 83940, "token_acc": 0.564935064935065, "train_speed(iter/s)": 0.672773 }, { "epoch": 3.596461162760807, "grad_norm": 6.843000888824463, "learning_rate": 1.824884060113607e-05, "loss": 1.9305192947387695, "memory(GiB)": 72.85, "step": 83945, "token_acc": 0.5781818181818181, "train_speed(iter/s)": 0.672779 }, { "epoch": 3.596675378090056, "grad_norm": 5.88124942779541, "learning_rate": 1.8243642185678845e-05, "loss": 2.2204898834228515, "memory(GiB)": 72.85, "step": 83950, "token_acc": 0.5278810408921933, "train_speed(iter/s)": 0.672771 }, { "epoch": 3.596889593419305, "grad_norm": 4.482949256896973, "learning_rate": 1.823844434551783e-05, "loss": 2.1756263732910157, "memory(GiB)": 72.85, "step": 83955, "token_acc": 0.5411764705882353, "train_speed(iter/s)": 0.672774 }, { "epoch": 3.597103808748554, "grad_norm": 5.6633429527282715, "learning_rate": 1.823324708074718e-05, "loss": 2.4167812347412108, "memory(GiB)": 72.85, "step": 83960, "token_acc": 0.511864406779661, "train_speed(iter/s)": 0.672779 }, { "epoch": 3.597318024077803, "grad_norm": 4.648917198181152, "learning_rate": 1.8228050391461066e-05, "loss": 2.127926063537598, "memory(GiB)": 72.85, "step": 83965, "token_acc": 0.5817610062893082, "train_speed(iter/s)": 0.672786 }, { "epoch": 3.597532239407052, "grad_norm": 5.776437759399414, "learning_rate": 1.8222854277753632e-05, "loss": 2.1466365814208985, "memory(GiB)": 72.85, "step": 83970, "token_acc": 0.5191082802547771, "train_speed(iter/s)": 0.672781 }, { "epoch": 3.597746454736301, "grad_norm": 5.173720836639404, "learning_rate": 1.8217658739718994e-05, "loss": 2.090825843811035, "memory(GiB)": 72.85, "step": 83975, "token_acc": 0.5492063492063493, "train_speed(iter/s)": 0.672782 }, { "epoch": 3.5979606700655498, "grad_norm": 5.401615142822266, "learning_rate": 1.821246377745129e-05, "loss": 2.199298286437988, "memory(GiB)": 72.85, "step": 83980, "token_acc": 0.5337423312883436, "train_speed(iter/s)": 0.672782 }, { "epoch": 3.598174885394799, "grad_norm": 7.402063846588135, "learning_rate": 1.8207269391044618e-05, "loss": 2.1279937744140627, "memory(GiB)": 72.85, "step": 83985, "token_acc": 0.5387323943661971, "train_speed(iter/s)": 0.672781 }, { "epoch": 3.598389100724048, "grad_norm": 5.936182975769043, "learning_rate": 1.820207558059307e-05, "loss": 2.0774005889892577, "memory(GiB)": 72.85, "step": 83990, "token_acc": 0.5277777777777778, "train_speed(iter/s)": 0.672782 }, { "epoch": 3.5986033160532966, "grad_norm": 5.100615978240967, "learning_rate": 1.8196882346190767e-05, "loss": 2.2904293060302736, "memory(GiB)": 72.85, "step": 83995, "token_acc": 0.501577287066246, "train_speed(iter/s)": 0.672783 }, { "epoch": 3.598817531382546, "grad_norm": 9.214699745178223, "learning_rate": 1.8191689687931778e-05, "loss": 2.0721031188964845, "memory(GiB)": 72.85, "step": 84000, "token_acc": 0.5266457680250783, "train_speed(iter/s)": 0.672774 }, { "epoch": 3.598817531382546, "eval_loss": 2.191737413406372, "eval_runtime": 15.9565, "eval_samples_per_second": 6.267, "eval_steps_per_second": 6.267, "eval_token_acc": 0.46002460024600245, "step": 84000 }, { "epoch": 3.5990317467117947, "grad_norm": 5.231319427490234, "learning_rate": 1.8186497605910163e-05, "loss": 2.1910423278808593, "memory(GiB)": 72.85, "step": 84005, "token_acc": 0.4791288566243194, "train_speed(iter/s)": 0.672678 }, { "epoch": 3.5992459620410435, "grad_norm": 4.9541850090026855, "learning_rate": 1.818130610021999e-05, "loss": 2.224306106567383, "memory(GiB)": 72.85, "step": 84010, "token_acc": 0.5, "train_speed(iter/s)": 0.67268 }, { "epoch": 3.599460177370293, "grad_norm": 4.807674407958984, "learning_rate": 1.8176115170955292e-05, "loss": 2.0208738327026365, "memory(GiB)": 72.85, "step": 84015, "token_acc": 0.5177304964539007, "train_speed(iter/s)": 0.672689 }, { "epoch": 3.5996743926995416, "grad_norm": 5.211023330688477, "learning_rate": 1.8170924818210115e-05, "loss": 2.007013130187988, "memory(GiB)": 72.85, "step": 84020, "token_acc": 0.5443037974683544, "train_speed(iter/s)": 0.672691 }, { "epoch": 3.5998886080287904, "grad_norm": 6.643101692199707, "learning_rate": 1.816573504207852e-05, "loss": 1.972304344177246, "memory(GiB)": 72.85, "step": 84025, "token_acc": 0.5299145299145299, "train_speed(iter/s)": 0.672689 }, { "epoch": 3.6001028233580397, "grad_norm": 5.969718933105469, "learning_rate": 1.8160545842654485e-05, "loss": 1.9763336181640625, "memory(GiB)": 72.85, "step": 84030, "token_acc": 0.5663082437275986, "train_speed(iter/s)": 0.672677 }, { "epoch": 3.6003170386872885, "grad_norm": 6.444377422332764, "learning_rate": 1.815535722003204e-05, "loss": 2.2290878295898438, "memory(GiB)": 72.85, "step": 84035, "token_acc": 0.5409252669039146, "train_speed(iter/s)": 0.672673 }, { "epoch": 3.6005312540165373, "grad_norm": 5.777790069580078, "learning_rate": 1.8150169174305164e-05, "loss": 1.8355195999145508, "memory(GiB)": 72.85, "step": 84040, "token_acc": 0.573943661971831, "train_speed(iter/s)": 0.672681 }, { "epoch": 3.6007454693457865, "grad_norm": 6.98452615737915, "learning_rate": 1.8144981705567838e-05, "loss": 1.6986221313476562, "memory(GiB)": 72.85, "step": 84045, "token_acc": 0.5956521739130435, "train_speed(iter/s)": 0.672669 }, { "epoch": 3.6009596846750354, "grad_norm": 6.004899024963379, "learning_rate": 1.813979481391407e-05, "loss": 1.8630226135253907, "memory(GiB)": 72.85, "step": 84050, "token_acc": 0.5796460176991151, "train_speed(iter/s)": 0.672664 }, { "epoch": 3.601173900004284, "grad_norm": 4.501643657684326, "learning_rate": 1.81346084994378e-05, "loss": 1.9453851699829101, "memory(GiB)": 72.85, "step": 84055, "token_acc": 0.5780730897009967, "train_speed(iter/s)": 0.672671 }, { "epoch": 3.6013881153335334, "grad_norm": 5.884762763977051, "learning_rate": 1.8129422762232996e-05, "loss": 2.2046857833862306, "memory(GiB)": 72.85, "step": 84060, "token_acc": 0.4806201550387597, "train_speed(iter/s)": 0.672671 }, { "epoch": 3.6016023306627822, "grad_norm": 4.646182537078857, "learning_rate": 1.812423760239359e-05, "loss": 2.2196285247802736, "memory(GiB)": 72.85, "step": 84065, "token_acc": 0.5085910652920962, "train_speed(iter/s)": 0.672669 }, { "epoch": 3.601816545992031, "grad_norm": 5.612159252166748, "learning_rate": 1.8119053020013527e-05, "loss": 2.203695869445801, "memory(GiB)": 72.85, "step": 84070, "token_acc": 0.5303030303030303, "train_speed(iter/s)": 0.672668 }, { "epoch": 3.6020307613212803, "grad_norm": 6.2804083824157715, "learning_rate": 1.8113869015186702e-05, "loss": 1.9741334915161133, "memory(GiB)": 72.85, "step": 84075, "token_acc": 0.5924528301886792, "train_speed(iter/s)": 0.672674 }, { "epoch": 3.602244976650529, "grad_norm": 7.026366710662842, "learning_rate": 1.8108685588007074e-05, "loss": 1.8907848358154298, "memory(GiB)": 72.85, "step": 84080, "token_acc": 0.5697674418604651, "train_speed(iter/s)": 0.672682 }, { "epoch": 3.602459191979778, "grad_norm": 5.395017147064209, "learning_rate": 1.8103502738568524e-05, "loss": 2.370002555847168, "memory(GiB)": 72.85, "step": 84085, "token_acc": 0.523972602739726, "train_speed(iter/s)": 0.672688 }, { "epoch": 3.602673407309027, "grad_norm": 5.050708293914795, "learning_rate": 1.8098320466964934e-05, "loss": 2.202016830444336, "memory(GiB)": 72.85, "step": 84090, "token_acc": 0.5482625482625483, "train_speed(iter/s)": 0.672695 }, { "epoch": 3.602887622638276, "grad_norm": 7.107876777648926, "learning_rate": 1.8093138773290186e-05, "loss": 2.136635589599609, "memory(GiB)": 72.85, "step": 84095, "token_acc": 0.5101351351351351, "train_speed(iter/s)": 0.672699 }, { "epoch": 3.603101837967525, "grad_norm": 8.374351501464844, "learning_rate": 1.808795765763818e-05, "loss": 2.184931182861328, "memory(GiB)": 72.85, "step": 84100, "token_acc": 0.5166666666666667, "train_speed(iter/s)": 0.672708 }, { "epoch": 3.603316053296774, "grad_norm": 6.7656569480896, "learning_rate": 1.808277712010273e-05, "loss": 2.1438133239746096, "memory(GiB)": 72.85, "step": 84105, "token_acc": 0.5052631578947369, "train_speed(iter/s)": 0.672707 }, { "epoch": 3.603530268626023, "grad_norm": 9.381121635437012, "learning_rate": 1.8077597160777736e-05, "loss": 2.006340217590332, "memory(GiB)": 72.85, "step": 84110, "token_acc": 0.5655172413793104, "train_speed(iter/s)": 0.672704 }, { "epoch": 3.6037444839552717, "grad_norm": 6.051048755645752, "learning_rate": 1.8072417779757013e-05, "loss": 2.249217414855957, "memory(GiB)": 72.85, "step": 84115, "token_acc": 0.5254237288135594, "train_speed(iter/s)": 0.672704 }, { "epoch": 3.603958699284521, "grad_norm": 5.672954559326172, "learning_rate": 1.8067238977134392e-05, "loss": 2.2702470779418946, "memory(GiB)": 72.85, "step": 84120, "token_acc": 0.49640287769784175, "train_speed(iter/s)": 0.672697 }, { "epoch": 3.6041729146137698, "grad_norm": 4.59365701675415, "learning_rate": 1.8062060753003696e-05, "loss": 2.1824302673339844, "memory(GiB)": 72.85, "step": 84125, "token_acc": 0.5, "train_speed(iter/s)": 0.672698 }, { "epoch": 3.6043871299430186, "grad_norm": 5.378287315368652, "learning_rate": 1.8056883107458726e-05, "loss": 2.293869400024414, "memory(GiB)": 72.85, "step": 84130, "token_acc": 0.5201149425287356, "train_speed(iter/s)": 0.672701 }, { "epoch": 3.604601345272268, "grad_norm": 5.240545272827148, "learning_rate": 1.805170604059327e-05, "loss": 2.1200286865234377, "memory(GiB)": 72.85, "step": 84135, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.672699 }, { "epoch": 3.6048155606015166, "grad_norm": 6.2536163330078125, "learning_rate": 1.8046529552501147e-05, "loss": 2.2401096343994142, "memory(GiB)": 72.85, "step": 84140, "token_acc": 0.5201238390092879, "train_speed(iter/s)": 0.672701 }, { "epoch": 3.6050297759307655, "grad_norm": 5.977377891540527, "learning_rate": 1.804135364327611e-05, "loss": 1.9473196029663087, "memory(GiB)": 72.85, "step": 84145, "token_acc": 0.5726141078838174, "train_speed(iter/s)": 0.672696 }, { "epoch": 3.6052439912600147, "grad_norm": 4.01804780960083, "learning_rate": 1.8036178313011937e-05, "loss": 2.1220603942871095, "memory(GiB)": 72.85, "step": 84150, "token_acc": 0.48698884758364314, "train_speed(iter/s)": 0.672703 }, { "epoch": 3.6054582065892635, "grad_norm": 4.806979656219482, "learning_rate": 1.8031003561802373e-05, "loss": 2.1810813903808595, "memory(GiB)": 72.85, "step": 84155, "token_acc": 0.47540983606557374, "train_speed(iter/s)": 0.672706 }, { "epoch": 3.6056724219185123, "grad_norm": 4.24669075012207, "learning_rate": 1.8025829389741156e-05, "loss": 2.0090969085693358, "memory(GiB)": 72.85, "step": 84160, "token_acc": 0.5559440559440559, "train_speed(iter/s)": 0.672693 }, { "epoch": 3.6058866372477616, "grad_norm": 6.560534477233887, "learning_rate": 1.8020655796922053e-05, "loss": 2.230063247680664, "memory(GiB)": 72.85, "step": 84165, "token_acc": 0.5492063492063493, "train_speed(iter/s)": 0.67269 }, { "epoch": 3.6061008525770104, "grad_norm": 4.757352828979492, "learning_rate": 1.8015482783438748e-05, "loss": 2.17923526763916, "memory(GiB)": 72.85, "step": 84170, "token_acc": 0.5313432835820896, "train_speed(iter/s)": 0.672694 }, { "epoch": 3.606315067906259, "grad_norm": 5.302896499633789, "learning_rate": 1.8010310349384997e-05, "loss": 2.39996395111084, "memory(GiB)": 72.85, "step": 84175, "token_acc": 0.4835164835164835, "train_speed(iter/s)": 0.672695 }, { "epoch": 3.6065292832355085, "grad_norm": 5.385234355926514, "learning_rate": 1.8005138494854478e-05, "loss": 2.052212142944336, "memory(GiB)": 72.85, "step": 84180, "token_acc": 0.5180327868852459, "train_speed(iter/s)": 0.672694 }, { "epoch": 3.6067434985647573, "grad_norm": 4.5360636711120605, "learning_rate": 1.7999967219940887e-05, "loss": 1.9877574920654297, "memory(GiB)": 72.85, "step": 84185, "token_acc": 0.5669014084507042, "train_speed(iter/s)": 0.672698 }, { "epoch": 3.606957713894006, "grad_norm": 6.15467643737793, "learning_rate": 1.7994796524737895e-05, "loss": 2.251808929443359, "memory(GiB)": 72.85, "step": 84190, "token_acc": 0.5054545454545455, "train_speed(iter/s)": 0.672699 }, { "epoch": 3.6071719292232554, "grad_norm": 4.524338245391846, "learning_rate": 1.7989626409339205e-05, "loss": 1.866582489013672, "memory(GiB)": 72.85, "step": 84195, "token_acc": 0.5617283950617284, "train_speed(iter/s)": 0.672701 }, { "epoch": 3.607386144552504, "grad_norm": 6.08840274810791, "learning_rate": 1.7984456873838464e-05, "loss": 2.0908639907836912, "memory(GiB)": 72.85, "step": 84200, "token_acc": 0.5224913494809689, "train_speed(iter/s)": 0.672709 }, { "epoch": 3.607600359881753, "grad_norm": 4.443761825561523, "learning_rate": 1.7979287918329313e-05, "loss": 2.140834999084473, "memory(GiB)": 72.85, "step": 84205, "token_acc": 0.5393586005830904, "train_speed(iter/s)": 0.672711 }, { "epoch": 3.6078145752110022, "grad_norm": 6.3730788230896, "learning_rate": 1.7974119542905405e-05, "loss": 2.3015039443969725, "memory(GiB)": 72.85, "step": 84210, "token_acc": 0.5192878338278932, "train_speed(iter/s)": 0.672714 }, { "epoch": 3.608028790540251, "grad_norm": 5.094814300537109, "learning_rate": 1.7968951747660356e-05, "loss": 2.291636657714844, "memory(GiB)": 72.85, "step": 84215, "token_acc": 0.525, "train_speed(iter/s)": 0.672713 }, { "epoch": 3.6082430058695, "grad_norm": 7.9930644035339355, "learning_rate": 1.7963784532687784e-05, "loss": 2.3449674606323243, "memory(GiB)": 72.85, "step": 84220, "token_acc": 0.521875, "train_speed(iter/s)": 0.672717 }, { "epoch": 3.608457221198749, "grad_norm": 5.553504467010498, "learning_rate": 1.795861789808132e-05, "loss": 2.102061462402344, "memory(GiB)": 72.85, "step": 84225, "token_acc": 0.5283018867924528, "train_speed(iter/s)": 0.67272 }, { "epoch": 3.608671436527998, "grad_norm": 6.2068915367126465, "learning_rate": 1.7953451843934554e-05, "loss": 2.0794532775878904, "memory(GiB)": 72.85, "step": 84230, "token_acc": 0.5655430711610487, "train_speed(iter/s)": 0.672732 }, { "epoch": 3.6088856518572467, "grad_norm": 6.276862621307373, "learning_rate": 1.794828637034107e-05, "loss": 2.214021682739258, "memory(GiB)": 72.85, "step": 84235, "token_acc": 0.5382262996941896, "train_speed(iter/s)": 0.672733 }, { "epoch": 3.609099867186496, "grad_norm": 5.205320835113525, "learning_rate": 1.7943121477394425e-05, "loss": 1.9232879638671876, "memory(GiB)": 72.85, "step": 84240, "token_acc": 0.5719557195571956, "train_speed(iter/s)": 0.672726 }, { "epoch": 3.609314082515745, "grad_norm": 6.095168113708496, "learning_rate": 1.7937957165188226e-05, "loss": 2.235121154785156, "memory(GiB)": 72.85, "step": 84245, "token_acc": 0.4758364312267658, "train_speed(iter/s)": 0.67273 }, { "epoch": 3.6095282978449936, "grad_norm": 4.834169387817383, "learning_rate": 1.793279343381599e-05, "loss": 2.13934211730957, "memory(GiB)": 72.85, "step": 84250, "token_acc": 0.5369774919614148, "train_speed(iter/s)": 0.672725 }, { "epoch": 3.609742513174243, "grad_norm": 4.970468044281006, "learning_rate": 1.7927630283371304e-05, "loss": 2.274745559692383, "memory(GiB)": 72.85, "step": 84255, "token_acc": 0.5117647058823529, "train_speed(iter/s)": 0.672724 }, { "epoch": 3.6099567285034917, "grad_norm": 4.809723377227783, "learning_rate": 1.7922467713947676e-05, "loss": 2.1237503051757813, "memory(GiB)": 72.85, "step": 84260, "token_acc": 0.49645390070921985, "train_speed(iter/s)": 0.672724 }, { "epoch": 3.6101709438327405, "grad_norm": 5.06528902053833, "learning_rate": 1.7917305725638637e-05, "loss": 2.396710968017578, "memory(GiB)": 72.85, "step": 84265, "token_acc": 0.5018450184501845, "train_speed(iter/s)": 0.672725 }, { "epoch": 3.6103851591619898, "grad_norm": 5.921145915985107, "learning_rate": 1.7912144318537705e-05, "loss": 1.9969785690307618, "memory(GiB)": 72.85, "step": 84270, "token_acc": 0.5342960288808665, "train_speed(iter/s)": 0.672734 }, { "epoch": 3.6105993744912386, "grad_norm": 5.660430431365967, "learning_rate": 1.7906983492738378e-05, "loss": 2.1267337799072266, "memory(GiB)": 72.85, "step": 84275, "token_acc": 0.5537974683544303, "train_speed(iter/s)": 0.672733 }, { "epoch": 3.6108135898204874, "grad_norm": 4.502972602844238, "learning_rate": 1.790182324833413e-05, "loss": 1.946125602722168, "memory(GiB)": 72.85, "step": 84280, "token_acc": 0.5347432024169184, "train_speed(iter/s)": 0.672731 }, { "epoch": 3.6110278051497366, "grad_norm": 5.689549922943115, "learning_rate": 1.789666358541849e-05, "loss": 1.9427949905395507, "memory(GiB)": 72.85, "step": 84285, "token_acc": 0.5774193548387097, "train_speed(iter/s)": 0.672722 }, { "epoch": 3.6112420204789855, "grad_norm": 4.520853519439697, "learning_rate": 1.78915045040849e-05, "loss": 2.0711591720581053, "memory(GiB)": 72.85, "step": 84290, "token_acc": 0.5412541254125413, "train_speed(iter/s)": 0.67271 }, { "epoch": 3.6114562358082343, "grad_norm": 5.874118328094482, "learning_rate": 1.7886346004426822e-05, "loss": 2.314177703857422, "memory(GiB)": 72.85, "step": 84295, "token_acc": 0.49523809523809526, "train_speed(iter/s)": 0.672704 }, { "epoch": 3.6116704511374835, "grad_norm": 6.289173126220703, "learning_rate": 1.7881188086537715e-05, "loss": 2.3304113388061523, "memory(GiB)": 72.85, "step": 84300, "token_acc": 0.5016722408026756, "train_speed(iter/s)": 0.672702 }, { "epoch": 3.6118846664667323, "grad_norm": 6.199737548828125, "learning_rate": 1.7876030750510997e-05, "loss": 2.097001647949219, "memory(GiB)": 72.85, "step": 84305, "token_acc": 0.5571955719557196, "train_speed(iter/s)": 0.672701 }, { "epoch": 3.6120988817959816, "grad_norm": 4.340647220611572, "learning_rate": 1.7870873996440135e-05, "loss": 2.047115135192871, "memory(GiB)": 72.85, "step": 84310, "token_acc": 0.5414847161572053, "train_speed(iter/s)": 0.672709 }, { "epoch": 3.6123130971252304, "grad_norm": 5.806946277618408, "learning_rate": 1.7865717824418517e-05, "loss": 2.133926582336426, "memory(GiB)": 72.85, "step": 84315, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.672712 }, { "epoch": 3.6125273124544792, "grad_norm": 7.261683464050293, "learning_rate": 1.7860562234539573e-05, "loss": 1.7875415802001953, "memory(GiB)": 72.85, "step": 84320, "token_acc": 0.5752895752895753, "train_speed(iter/s)": 0.672717 }, { "epoch": 3.6127415277837285, "grad_norm": 5.285200119018555, "learning_rate": 1.78554072268967e-05, "loss": 2.0330835342407227, "memory(GiB)": 72.85, "step": 84325, "token_acc": 0.5520833333333334, "train_speed(iter/s)": 0.672718 }, { "epoch": 3.6129557431129773, "grad_norm": 6.244776725769043, "learning_rate": 1.785025280158328e-05, "loss": 2.187835121154785, "memory(GiB)": 72.85, "step": 84330, "token_acc": 0.5220125786163522, "train_speed(iter/s)": 0.672714 }, { "epoch": 3.613169958442226, "grad_norm": 6.436563968658447, "learning_rate": 1.784509895869267e-05, "loss": 2.4338829040527346, "memory(GiB)": 72.85, "step": 84335, "token_acc": 0.4810126582278481, "train_speed(iter/s)": 0.672724 }, { "epoch": 3.6133841737714754, "grad_norm": 7.422440052032471, "learning_rate": 1.7839945698318273e-05, "loss": 2.2467689514160156, "memory(GiB)": 72.85, "step": 84340, "token_acc": 0.5132075471698113, "train_speed(iter/s)": 0.672728 }, { "epoch": 3.613598389100724, "grad_norm": 5.653660297393799, "learning_rate": 1.7834793020553425e-05, "loss": 2.2973264694213866, "memory(GiB)": 72.85, "step": 84345, "token_acc": 0.4897435897435897, "train_speed(iter/s)": 0.672733 }, { "epoch": 3.613812604429973, "grad_norm": 7.4673333168029785, "learning_rate": 1.782964092549148e-05, "loss": 2.3177490234375, "memory(GiB)": 72.85, "step": 84350, "token_acc": 0.46645367412140576, "train_speed(iter/s)": 0.672745 }, { "epoch": 3.6140268197592222, "grad_norm": 5.762950897216797, "learning_rate": 1.7824489413225764e-05, "loss": 1.8496604919433595, "memory(GiB)": 72.85, "step": 84355, "token_acc": 0.5582191780821918, "train_speed(iter/s)": 0.672752 }, { "epoch": 3.614241035088471, "grad_norm": 4.913909435272217, "learning_rate": 1.781933848384961e-05, "loss": 2.4934772491455077, "memory(GiB)": 72.85, "step": 84360, "token_acc": 0.44673539518900346, "train_speed(iter/s)": 0.672744 }, { "epoch": 3.61445525041772, "grad_norm": 4.724314212799072, "learning_rate": 1.7814188137456307e-05, "loss": 1.993891143798828, "memory(GiB)": 72.85, "step": 84365, "token_acc": 0.5479041916167665, "train_speed(iter/s)": 0.672735 }, { "epoch": 3.614669465746969, "grad_norm": 5.748486518859863, "learning_rate": 1.7809038374139192e-05, "loss": 2.3146581649780273, "memory(GiB)": 72.85, "step": 84370, "token_acc": 0.5060975609756098, "train_speed(iter/s)": 0.672734 }, { "epoch": 3.614883681076218, "grad_norm": 5.615958213806152, "learning_rate": 1.7803889193991552e-05, "loss": 2.262790298461914, "memory(GiB)": 72.85, "step": 84375, "token_acc": 0.5348837209302325, "train_speed(iter/s)": 0.672744 }, { "epoch": 3.6150978964054667, "grad_norm": 4.703610897064209, "learning_rate": 1.7798740597106656e-05, "loss": 2.048991012573242, "memory(GiB)": 72.85, "step": 84380, "token_acc": 0.535483870967742, "train_speed(iter/s)": 0.672756 }, { "epoch": 3.615312111734716, "grad_norm": 6.0177459716796875, "learning_rate": 1.7793592583577782e-05, "loss": 1.9861846923828126, "memory(GiB)": 72.85, "step": 84385, "token_acc": 0.5423728813559322, "train_speed(iter/s)": 0.672759 }, { "epoch": 3.615526327063965, "grad_norm": 6.974653720855713, "learning_rate": 1.778844515349818e-05, "loss": 1.994333839416504, "memory(GiB)": 72.85, "step": 84390, "token_acc": 0.5719557195571956, "train_speed(iter/s)": 0.672772 }, { "epoch": 3.6157405423932136, "grad_norm": 5.528071403503418, "learning_rate": 1.7783298306961105e-05, "loss": 2.3694000244140625, "memory(GiB)": 72.85, "step": 84395, "token_acc": 0.5358166189111748, "train_speed(iter/s)": 0.672777 }, { "epoch": 3.615954757722463, "grad_norm": 6.272535800933838, "learning_rate": 1.777815204405982e-05, "loss": 2.313372802734375, "memory(GiB)": 72.85, "step": 84400, "token_acc": 0.5050847457627119, "train_speed(iter/s)": 0.672769 }, { "epoch": 3.6161689730517117, "grad_norm": 6.441710472106934, "learning_rate": 1.7773006364887534e-05, "loss": 2.284138870239258, "memory(GiB)": 72.85, "step": 84405, "token_acc": 0.5394736842105263, "train_speed(iter/s)": 0.672769 }, { "epoch": 3.6163831883809605, "grad_norm": 5.392933368682861, "learning_rate": 1.776786126953747e-05, "loss": 2.0096227645874025, "memory(GiB)": 72.85, "step": 84410, "token_acc": 0.5827586206896552, "train_speed(iter/s)": 0.67278 }, { "epoch": 3.6165974037102098, "grad_norm": 6.086761951446533, "learning_rate": 1.7762716758102842e-05, "loss": 2.268293571472168, "memory(GiB)": 72.85, "step": 84415, "token_acc": 0.5431309904153354, "train_speed(iter/s)": 0.672782 }, { "epoch": 3.6168116190394586, "grad_norm": 7.793637275695801, "learning_rate": 1.7757572830676837e-05, "loss": 1.8724014282226562, "memory(GiB)": 72.85, "step": 84420, "token_acc": 0.5855513307984791, "train_speed(iter/s)": 0.67278 }, { "epoch": 3.6170258343687074, "grad_norm": 5.595699310302734, "learning_rate": 1.775242948735263e-05, "loss": 2.251070022583008, "memory(GiB)": 72.85, "step": 84425, "token_acc": 0.49491525423728816, "train_speed(iter/s)": 0.672788 }, { "epoch": 3.6172400496979566, "grad_norm": 4.533681392669678, "learning_rate": 1.7747286728223434e-05, "loss": 1.8405261993408204, "memory(GiB)": 72.85, "step": 84430, "token_acc": 0.5669014084507042, "train_speed(iter/s)": 0.672798 }, { "epoch": 3.6174542650272055, "grad_norm": 6.398031711578369, "learning_rate": 1.774214455338239e-05, "loss": 2.005709648132324, "memory(GiB)": 72.85, "step": 84435, "token_acc": 0.5291970802919708, "train_speed(iter/s)": 0.672802 }, { "epoch": 3.6176684803564543, "grad_norm": 5.398040771484375, "learning_rate": 1.7737002962922655e-05, "loss": 2.0821563720703127, "memory(GiB)": 72.85, "step": 84440, "token_acc": 0.5519713261648745, "train_speed(iter/s)": 0.672804 }, { "epoch": 3.6178826956857035, "grad_norm": 6.113097667694092, "learning_rate": 1.773186195693738e-05, "loss": 2.148105239868164, "memory(GiB)": 72.85, "step": 84445, "token_acc": 0.5117845117845118, "train_speed(iter/s)": 0.672805 }, { "epoch": 3.6180969110149523, "grad_norm": 6.035192966461182, "learning_rate": 1.7726721535519676e-05, "loss": 2.132969093322754, "memory(GiB)": 72.85, "step": 84450, "token_acc": 0.5302593659942363, "train_speed(iter/s)": 0.6728 }, { "epoch": 3.618311126344201, "grad_norm": 4.862796306610107, "learning_rate": 1.7721581698762708e-05, "loss": 2.0577632904052736, "memory(GiB)": 72.85, "step": 84455, "token_acc": 0.5429362880886427, "train_speed(iter/s)": 0.672809 }, { "epoch": 3.6185253416734504, "grad_norm": 5.698683738708496, "learning_rate": 1.7716442446759563e-05, "loss": 2.146503448486328, "memory(GiB)": 72.85, "step": 84460, "token_acc": 0.5464285714285714, "train_speed(iter/s)": 0.672807 }, { "epoch": 3.6187395570026992, "grad_norm": 5.711428642272949, "learning_rate": 1.7711303779603334e-05, "loss": 2.3553512573242186, "memory(GiB)": 72.85, "step": 84465, "token_acc": 0.4671280276816609, "train_speed(iter/s)": 0.672807 }, { "epoch": 3.618953772331948, "grad_norm": 6.235141754150391, "learning_rate": 1.770616569738714e-05, "loss": 1.9486991882324218, "memory(GiB)": 72.85, "step": 84470, "token_acc": 0.5525423728813559, "train_speed(iter/s)": 0.672795 }, { "epoch": 3.6191679876611973, "grad_norm": 6.301248550415039, "learning_rate": 1.7701028200204045e-05, "loss": 2.1230485916137694, "memory(GiB)": 72.85, "step": 84475, "token_acc": 0.5708812260536399, "train_speed(iter/s)": 0.672794 }, { "epoch": 3.619382202990446, "grad_norm": 6.368499279022217, "learning_rate": 1.7695891288147114e-05, "loss": 2.2132007598876955, "memory(GiB)": 72.85, "step": 84480, "token_acc": 0.5362318840579711, "train_speed(iter/s)": 0.672783 }, { "epoch": 3.619596418319695, "grad_norm": 7.446050643920898, "learning_rate": 1.769075496130943e-05, "loss": 2.233881950378418, "memory(GiB)": 72.85, "step": 84485, "token_acc": 0.49146757679180886, "train_speed(iter/s)": 0.672775 }, { "epoch": 3.619810633648944, "grad_norm": 6.282371520996094, "learning_rate": 1.7685619219784027e-05, "loss": 2.077942657470703, "memory(GiB)": 72.85, "step": 84490, "token_acc": 0.504225352112676, "train_speed(iter/s)": 0.67278 }, { "epoch": 3.620024848978193, "grad_norm": 6.734011173248291, "learning_rate": 1.7680484063663942e-05, "loss": 2.170770263671875, "memory(GiB)": 72.85, "step": 84495, "token_acc": 0.5255972696245734, "train_speed(iter/s)": 0.672781 }, { "epoch": 3.620239064307442, "grad_norm": 9.040629386901855, "learning_rate": 1.7675349493042216e-05, "loss": 2.4575599670410155, "memory(GiB)": 72.85, "step": 84500, "token_acc": 0.4934210526315789, "train_speed(iter/s)": 0.672786 }, { "epoch": 3.620239064307442, "eval_loss": 1.9988205432891846, "eval_runtime": 16.3569, "eval_samples_per_second": 6.114, "eval_steps_per_second": 6.114, "eval_token_acc": 0.5258278145695364, "step": 84500 }, { "epoch": 3.620453279636691, "grad_norm": 5.89414644241333, "learning_rate": 1.767021550801185e-05, "loss": 2.0683710098266603, "memory(GiB)": 72.85, "step": 84505, "token_acc": 0.5334608030592735, "train_speed(iter/s)": 0.672683 }, { "epoch": 3.62066749496594, "grad_norm": 4.78681755065918, "learning_rate": 1.7665082108665842e-05, "loss": 2.0123939514160156, "memory(GiB)": 72.85, "step": 84510, "token_acc": 0.570957095709571, "train_speed(iter/s)": 0.672681 }, { "epoch": 3.6208817102951887, "grad_norm": 5.060636043548584, "learning_rate": 1.7659949295097223e-05, "loss": 2.051582908630371, "memory(GiB)": 72.85, "step": 84515, "token_acc": 0.527972027972028, "train_speed(iter/s)": 0.672682 }, { "epoch": 3.621095925624438, "grad_norm": 6.490627765655518, "learning_rate": 1.7654817067398956e-05, "loss": 2.237506103515625, "memory(GiB)": 72.85, "step": 84520, "token_acc": 0.5238095238095238, "train_speed(iter/s)": 0.672688 }, { "epoch": 3.6213101409536868, "grad_norm": 4.724766731262207, "learning_rate": 1.7649685425664024e-05, "loss": 2.2553737640380858, "memory(GiB)": 72.85, "step": 84525, "token_acc": 0.49283667621776506, "train_speed(iter/s)": 0.672685 }, { "epoch": 3.6215243562829356, "grad_norm": 6.438952922821045, "learning_rate": 1.7644554369985382e-05, "loss": 2.2101160049438477, "memory(GiB)": 72.85, "step": 84530, "token_acc": 0.49707602339181284, "train_speed(iter/s)": 0.67268 }, { "epoch": 3.621738571612185, "grad_norm": 5.785902500152588, "learning_rate": 1.763942390045598e-05, "loss": 2.1853036880493164, "memory(GiB)": 72.85, "step": 84535, "token_acc": 0.4966216216216216, "train_speed(iter/s)": 0.672673 }, { "epoch": 3.6219527869414336, "grad_norm": 5.105207443237305, "learning_rate": 1.7634294017168767e-05, "loss": 2.357578659057617, "memory(GiB)": 72.85, "step": 84540, "token_acc": 0.4813664596273292, "train_speed(iter/s)": 0.672678 }, { "epoch": 3.6221670022706824, "grad_norm": 6.249039173126221, "learning_rate": 1.7629164720216694e-05, "loss": 2.106566619873047, "memory(GiB)": 72.85, "step": 84545, "token_acc": 0.5257731958762887, "train_speed(iter/s)": 0.672683 }, { "epoch": 3.6223812175999317, "grad_norm": 4.888716697692871, "learning_rate": 1.7624036009692674e-05, "loss": 2.1778358459472655, "memory(GiB)": 72.85, "step": 84550, "token_acc": 0.5066225165562914, "train_speed(iter/s)": 0.67268 }, { "epoch": 3.6225954329291805, "grad_norm": 4.321506023406982, "learning_rate": 1.761890788568961e-05, "loss": 2.0489887237548827, "memory(GiB)": 72.85, "step": 84555, "token_acc": 0.5479041916167665, "train_speed(iter/s)": 0.672686 }, { "epoch": 3.6228096482584293, "grad_norm": 6.400816917419434, "learning_rate": 1.7613780348300408e-05, "loss": 2.148162269592285, "memory(GiB)": 72.85, "step": 84560, "token_acc": 0.5326460481099656, "train_speed(iter/s)": 0.672686 }, { "epoch": 3.6230238635876786, "grad_norm": 6.689667224884033, "learning_rate": 1.760865339761795e-05, "loss": 1.9374799728393555, "memory(GiB)": 72.85, "step": 84565, "token_acc": 0.5601659751037344, "train_speed(iter/s)": 0.672691 }, { "epoch": 3.6232380789169274, "grad_norm": 4.717325210571289, "learning_rate": 1.7603527033735105e-05, "loss": 2.109479522705078, "memory(GiB)": 72.85, "step": 84570, "token_acc": 0.5241379310344828, "train_speed(iter/s)": 0.672694 }, { "epoch": 3.623452294246176, "grad_norm": 8.42790412902832, "learning_rate": 1.759840125674478e-05, "loss": 1.7990911483764649, "memory(GiB)": 72.85, "step": 84575, "token_acc": 0.5399239543726235, "train_speed(iter/s)": 0.672697 }, { "epoch": 3.6236665095754255, "grad_norm": 6.783222675323486, "learning_rate": 1.7593276066739807e-05, "loss": 2.144712448120117, "memory(GiB)": 72.85, "step": 84580, "token_acc": 0.528125, "train_speed(iter/s)": 0.6727 }, { "epoch": 3.6238807249046743, "grad_norm": 9.373225212097168, "learning_rate": 1.758815146381304e-05, "loss": 2.1792396545410155, "memory(GiB)": 72.85, "step": 84585, "token_acc": 0.5392491467576792, "train_speed(iter/s)": 0.672708 }, { "epoch": 3.624094940233923, "grad_norm": 5.627621650695801, "learning_rate": 1.7583027448057317e-05, "loss": 2.1085681915283203, "memory(GiB)": 72.85, "step": 84590, "token_acc": 0.5183823529411765, "train_speed(iter/s)": 0.672701 }, { "epoch": 3.6243091555631723, "grad_norm": 5.712549209594727, "learning_rate": 1.757790401956544e-05, "loss": 2.1052106857299804, "memory(GiB)": 72.85, "step": 84595, "token_acc": 0.5277777777777778, "train_speed(iter/s)": 0.672694 }, { "epoch": 3.624523370892421, "grad_norm": 6.060661792755127, "learning_rate": 1.7572781178430265e-05, "loss": 2.242770195007324, "memory(GiB)": 72.85, "step": 84600, "token_acc": 0.5119363395225465, "train_speed(iter/s)": 0.672695 }, { "epoch": 3.62473758622167, "grad_norm": 6.327573776245117, "learning_rate": 1.7567658924744574e-05, "loss": 2.0802309036254885, "memory(GiB)": 72.85, "step": 84605, "token_acc": 0.56875, "train_speed(iter/s)": 0.6727 }, { "epoch": 3.6249518015509192, "grad_norm": 5.819168567657471, "learning_rate": 1.7562537258601152e-05, "loss": 2.2941566467285157, "memory(GiB)": 72.85, "step": 84610, "token_acc": 0.4892086330935252, "train_speed(iter/s)": 0.672698 }, { "epoch": 3.625166016880168, "grad_norm": 6.669523239135742, "learning_rate": 1.7557416180092818e-05, "loss": 1.8671133041381835, "memory(GiB)": 72.85, "step": 84615, "token_acc": 0.5594405594405595, "train_speed(iter/s)": 0.672694 }, { "epoch": 3.625380232209417, "grad_norm": 5.6629767417907715, "learning_rate": 1.7552295689312313e-05, "loss": 2.270877456665039, "memory(GiB)": 72.85, "step": 84620, "token_acc": 0.5204460966542751, "train_speed(iter/s)": 0.672686 }, { "epoch": 3.625594447538666, "grad_norm": 5.869470119476318, "learning_rate": 1.7547175786352403e-05, "loss": 1.6973211288452148, "memory(GiB)": 72.85, "step": 84625, "token_acc": 0.5428571428571428, "train_speed(iter/s)": 0.672679 }, { "epoch": 3.625808662867915, "grad_norm": 7.544078826904297, "learning_rate": 1.7542056471305858e-05, "loss": 2.468820571899414, "memory(GiB)": 72.85, "step": 84630, "token_acc": 0.5243055555555556, "train_speed(iter/s)": 0.672686 }, { "epoch": 3.6260228781971637, "grad_norm": 4.86293888092041, "learning_rate": 1.7536937744265407e-05, "loss": 2.383591651916504, "memory(GiB)": 72.85, "step": 84635, "token_acc": 0.5258620689655172, "train_speed(iter/s)": 0.672681 }, { "epoch": 3.626237093526413, "grad_norm": 5.058652400970459, "learning_rate": 1.7531819605323784e-05, "loss": 2.201264190673828, "memory(GiB)": 72.85, "step": 84640, "token_acc": 0.5302013422818792, "train_speed(iter/s)": 0.672682 }, { "epoch": 3.626451308855662, "grad_norm": 7.208418846130371, "learning_rate": 1.752670205457371e-05, "loss": 2.0269609451293946, "memory(GiB)": 72.85, "step": 84645, "token_acc": 0.540084388185654, "train_speed(iter/s)": 0.672681 }, { "epoch": 3.6266655241849106, "grad_norm": 4.809017181396484, "learning_rate": 1.7521585092107888e-05, "loss": 2.057725524902344, "memory(GiB)": 72.85, "step": 84650, "token_acc": 0.5434782608695652, "train_speed(iter/s)": 0.672687 }, { "epoch": 3.62687973951416, "grad_norm": 7.689036846160889, "learning_rate": 1.7516468718019e-05, "loss": 2.0880929946899416, "memory(GiB)": 72.85, "step": 84655, "token_acc": 0.5444444444444444, "train_speed(iter/s)": 0.67269 }, { "epoch": 3.6270939548434087, "grad_norm": 6.212381362915039, "learning_rate": 1.7511352932399772e-05, "loss": 2.005510139465332, "memory(GiB)": 72.85, "step": 84660, "token_acc": 0.5676691729323309, "train_speed(iter/s)": 0.672672 }, { "epoch": 3.6273081701726575, "grad_norm": 5.925207138061523, "learning_rate": 1.750623773534286e-05, "loss": 2.2851877212524414, "memory(GiB)": 72.85, "step": 84665, "token_acc": 0.5526315789473685, "train_speed(iter/s)": 0.672676 }, { "epoch": 3.6275223855019068, "grad_norm": 4.9826178550720215, "learning_rate": 1.750112312694094e-05, "loss": 2.2767425537109376, "memory(GiB)": 72.85, "step": 84670, "token_acc": 0.5348837209302325, "train_speed(iter/s)": 0.672681 }, { "epoch": 3.6277366008311556, "grad_norm": 5.125367641448975, "learning_rate": 1.7496009107286655e-05, "loss": 2.099270057678223, "memory(GiB)": 72.85, "step": 84675, "token_acc": 0.5498489425981873, "train_speed(iter/s)": 0.672684 }, { "epoch": 3.6279508161604044, "grad_norm": 4.769040107727051, "learning_rate": 1.749089567647264e-05, "loss": 2.024730682373047, "memory(GiB)": 72.85, "step": 84680, "token_acc": 0.5316901408450704, "train_speed(iter/s)": 0.672675 }, { "epoch": 3.6281650314896536, "grad_norm": 5.533544540405273, "learning_rate": 1.7485782834591546e-05, "loss": 2.1025293350219725, "memory(GiB)": 72.85, "step": 84685, "token_acc": 0.53125, "train_speed(iter/s)": 0.672684 }, { "epoch": 3.6283792468189024, "grad_norm": 7.585317611694336, "learning_rate": 1.7480670581736012e-05, "loss": 1.9212045669555664, "memory(GiB)": 72.85, "step": 84690, "token_acc": 0.586046511627907, "train_speed(iter/s)": 0.672689 }, { "epoch": 3.6285934621481513, "grad_norm": 4.898203372955322, "learning_rate": 1.7475558917998642e-05, "loss": 2.2803203582763674, "memory(GiB)": 72.85, "step": 84695, "token_acc": 0.5162337662337663, "train_speed(iter/s)": 0.672688 }, { "epoch": 3.6288076774774005, "grad_norm": 5.5434184074401855, "learning_rate": 1.7470447843472027e-05, "loss": 1.9457368850708008, "memory(GiB)": 72.85, "step": 84700, "token_acc": 0.5755102040816327, "train_speed(iter/s)": 0.672686 }, { "epoch": 3.6290218928066493, "grad_norm": 5.985960483551025, "learning_rate": 1.7465337358248768e-05, "loss": 2.0477006912231444, "memory(GiB)": 72.85, "step": 84705, "token_acc": 0.5753424657534246, "train_speed(iter/s)": 0.67268 }, { "epoch": 3.629236108135898, "grad_norm": 6.913252353668213, "learning_rate": 1.7460227462421442e-05, "loss": 2.073689842224121, "memory(GiB)": 72.85, "step": 84710, "token_acc": 0.5604395604395604, "train_speed(iter/s)": 0.672681 }, { "epoch": 3.6294503234651474, "grad_norm": 4.71458101272583, "learning_rate": 1.74551181560826e-05, "loss": 2.0040666580200197, "memory(GiB)": 72.85, "step": 84715, "token_acc": 0.5102040816326531, "train_speed(iter/s)": 0.672684 }, { "epoch": 3.629664538794396, "grad_norm": 6.029065132141113, "learning_rate": 1.7450009439324845e-05, "loss": 2.128590202331543, "memory(GiB)": 72.85, "step": 84720, "token_acc": 0.5418181818181819, "train_speed(iter/s)": 0.672687 }, { "epoch": 3.629878754123645, "grad_norm": 7.513161659240723, "learning_rate": 1.7444901312240697e-05, "loss": 2.3841197967529295, "memory(GiB)": 72.85, "step": 84725, "token_acc": 0.5080385852090032, "train_speed(iter/s)": 0.672683 }, { "epoch": 3.6300929694528943, "grad_norm": 7.97020959854126, "learning_rate": 1.74397937749227e-05, "loss": 2.0312034606933596, "memory(GiB)": 72.85, "step": 84730, "token_acc": 0.5390070921985816, "train_speed(iter/s)": 0.672687 }, { "epoch": 3.630307184782143, "grad_norm": 5.829135417938232, "learning_rate": 1.7434686827463392e-05, "loss": 2.5340402603149412, "memory(GiB)": 72.85, "step": 84735, "token_acc": 0.45565749235474007, "train_speed(iter/s)": 0.672693 }, { "epoch": 3.630521400111392, "grad_norm": 6.104176998138428, "learning_rate": 1.742958046995526e-05, "loss": 1.9082902908325194, "memory(GiB)": 72.85, "step": 84740, "token_acc": 0.5321888412017167, "train_speed(iter/s)": 0.672697 }, { "epoch": 3.630735615440641, "grad_norm": 6.8145976066589355, "learning_rate": 1.7424474702490845e-05, "loss": 2.1736949920654296, "memory(GiB)": 72.85, "step": 84745, "token_acc": 0.5212121212121212, "train_speed(iter/s)": 0.6727 }, { "epoch": 3.63094983076989, "grad_norm": 4.833029270172119, "learning_rate": 1.7419369525162633e-05, "loss": 1.9313934326171875, "memory(GiB)": 72.85, "step": 84750, "token_acc": 0.5992779783393501, "train_speed(iter/s)": 0.67271 }, { "epoch": 3.631164046099139, "grad_norm": 5.415891170501709, "learning_rate": 1.7414264938063102e-05, "loss": 1.7950664520263673, "memory(GiB)": 72.85, "step": 84755, "token_acc": 0.5866141732283464, "train_speed(iter/s)": 0.672725 }, { "epoch": 3.631378261428388, "grad_norm": 5.2798309326171875, "learning_rate": 1.7409160941284712e-05, "loss": 2.1691509246826173, "memory(GiB)": 72.85, "step": 84760, "token_acc": 0.5389830508474577, "train_speed(iter/s)": 0.672731 }, { "epoch": 3.631592476757637, "grad_norm": 8.641988754272461, "learning_rate": 1.740405753491996e-05, "loss": 2.349589729309082, "memory(GiB)": 72.85, "step": 84765, "token_acc": 0.5290322580645161, "train_speed(iter/s)": 0.672738 }, { "epoch": 3.6318066920868857, "grad_norm": 4.571622371673584, "learning_rate": 1.7398954719061273e-05, "loss": 1.9534122467041015, "memory(GiB)": 72.85, "step": 84770, "token_acc": 0.55, "train_speed(iter/s)": 0.672742 }, { "epoch": 3.632020907416135, "grad_norm": 5.809561252593994, "learning_rate": 1.7393852493801114e-05, "loss": 2.1898208618164063, "memory(GiB)": 72.85, "step": 84775, "token_acc": 0.49696969696969695, "train_speed(iter/s)": 0.672738 }, { "epoch": 3.6322351227453837, "grad_norm": 7.86251163482666, "learning_rate": 1.7388750859231907e-05, "loss": 2.0341907501220704, "memory(GiB)": 72.85, "step": 84780, "token_acc": 0.555956678700361, "train_speed(iter/s)": 0.672723 }, { "epoch": 3.6324493380746325, "grad_norm": 8.484630584716797, "learning_rate": 1.7383649815446067e-05, "loss": 1.9686798095703124, "memory(GiB)": 72.85, "step": 84785, "token_acc": 0.5571428571428572, "train_speed(iter/s)": 0.672728 }, { "epoch": 3.632663553403882, "grad_norm": 7.440064430236816, "learning_rate": 1.737854936253601e-05, "loss": 2.2608110427856447, "memory(GiB)": 72.85, "step": 84790, "token_acc": 0.5080645161290323, "train_speed(iter/s)": 0.672721 }, { "epoch": 3.6328777687331306, "grad_norm": 10.355185508728027, "learning_rate": 1.7373449500594137e-05, "loss": 1.9604061126708985, "memory(GiB)": 72.85, "step": 84795, "token_acc": 0.5359281437125748, "train_speed(iter/s)": 0.672715 }, { "epoch": 3.6330919840623794, "grad_norm": 4.100813865661621, "learning_rate": 1.736835022971281e-05, "loss": 2.0796857833862306, "memory(GiB)": 72.85, "step": 84800, "token_acc": 0.5348101265822784, "train_speed(iter/s)": 0.672718 }, { "epoch": 3.6333061993916287, "grad_norm": 6.497685432434082, "learning_rate": 1.7363251549984445e-05, "loss": 2.111224555969238, "memory(GiB)": 72.85, "step": 84805, "token_acc": 0.555956678700361, "train_speed(iter/s)": 0.672723 }, { "epoch": 3.6335204147208775, "grad_norm": 4.707677364349365, "learning_rate": 1.7358153461501396e-05, "loss": 2.5880002975463867, "memory(GiB)": 72.85, "step": 84810, "token_acc": 0.4899328859060403, "train_speed(iter/s)": 0.672719 }, { "epoch": 3.6337346300501263, "grad_norm": 5.914585113525391, "learning_rate": 1.7353055964356018e-05, "loss": 2.068866157531738, "memory(GiB)": 72.85, "step": 84815, "token_acc": 0.5358255451713395, "train_speed(iter/s)": 0.672713 }, { "epoch": 3.6339488453793756, "grad_norm": 4.2580647468566895, "learning_rate": 1.7347959058640655e-05, "loss": 1.9141191482543944, "memory(GiB)": 72.85, "step": 84820, "token_acc": 0.5870445344129555, "train_speed(iter/s)": 0.672717 }, { "epoch": 3.6341630607086244, "grad_norm": 5.49806547164917, "learning_rate": 1.734286274444763e-05, "loss": 2.236845588684082, "memory(GiB)": 72.85, "step": 84825, "token_acc": 0.5150501672240803, "train_speed(iter/s)": 0.672723 }, { "epoch": 3.634377276037873, "grad_norm": 7.191650390625, "learning_rate": 1.7337767021869295e-05, "loss": 2.2141139984130858, "memory(GiB)": 72.85, "step": 84830, "token_acc": 0.5125448028673835, "train_speed(iter/s)": 0.672729 }, { "epoch": 3.6345914913671225, "grad_norm": 5.47359561920166, "learning_rate": 1.7332671890997936e-05, "loss": 1.9989664077758789, "memory(GiB)": 72.85, "step": 84835, "token_acc": 0.5793103448275863, "train_speed(iter/s)": 0.672736 }, { "epoch": 3.6348057066963713, "grad_norm": 5.9287261962890625, "learning_rate": 1.7327577351925883e-05, "loss": 2.079513931274414, "memory(GiB)": 72.85, "step": 84840, "token_acc": 0.5292096219931272, "train_speed(iter/s)": 0.67274 }, { "epoch": 3.63501992202562, "grad_norm": 5.297179698944092, "learning_rate": 1.732248340474542e-05, "loss": 2.1377193450927736, "memory(GiB)": 72.85, "step": 84845, "token_acc": 0.5157894736842106, "train_speed(iter/s)": 0.672729 }, { "epoch": 3.6352341373548693, "grad_norm": 5.153276443481445, "learning_rate": 1.731739004954882e-05, "loss": 1.8845218658447265, "memory(GiB)": 72.85, "step": 84850, "token_acc": 0.5522388059701493, "train_speed(iter/s)": 0.672733 }, { "epoch": 3.635448352684118, "grad_norm": 4.34396505355835, "learning_rate": 1.7312297286428347e-05, "loss": 2.3261348724365236, "memory(GiB)": 72.85, "step": 84855, "token_acc": 0.5424657534246575, "train_speed(iter/s)": 0.672739 }, { "epoch": 3.635662568013367, "grad_norm": 4.95023250579834, "learning_rate": 1.7307205115476282e-05, "loss": 2.287228584289551, "memory(GiB)": 72.85, "step": 84860, "token_acc": 0.5069444444444444, "train_speed(iter/s)": 0.672738 }, { "epoch": 3.635876783342616, "grad_norm": 7.463078498840332, "learning_rate": 1.730211353678487e-05, "loss": 2.340835952758789, "memory(GiB)": 72.85, "step": 84865, "token_acc": 0.4931506849315068, "train_speed(iter/s)": 0.672742 }, { "epoch": 3.636090998671865, "grad_norm": 4.864620685577393, "learning_rate": 1.7297022550446347e-05, "loss": 2.361930274963379, "memory(GiB)": 72.85, "step": 84870, "token_acc": 0.5153374233128835, "train_speed(iter/s)": 0.672744 }, { "epoch": 3.636305214001114, "grad_norm": 6.896915435791016, "learning_rate": 1.7291932156552933e-05, "loss": 2.1485225677490236, "memory(GiB)": 72.85, "step": 84875, "token_acc": 0.5648854961832062, "train_speed(iter/s)": 0.67275 }, { "epoch": 3.636519429330363, "grad_norm": 5.895974159240723, "learning_rate": 1.7286842355196846e-05, "loss": 2.2366973876953127, "memory(GiB)": 72.85, "step": 84880, "token_acc": 0.5369774919614148, "train_speed(iter/s)": 0.67276 }, { "epoch": 3.636733644659612, "grad_norm": 6.154411792755127, "learning_rate": 1.7281753146470285e-05, "loss": 2.1743852615356447, "memory(GiB)": 72.85, "step": 84885, "token_acc": 0.49236641221374045, "train_speed(iter/s)": 0.672758 }, { "epoch": 3.6369478599888607, "grad_norm": 7.053438186645508, "learning_rate": 1.727666453046548e-05, "loss": 2.0728872299194334, "memory(GiB)": 72.85, "step": 84890, "token_acc": 0.5412541254125413, "train_speed(iter/s)": 0.672766 }, { "epoch": 3.63716207531811, "grad_norm": 4.641865253448486, "learning_rate": 1.7271576507274588e-05, "loss": 2.111861801147461, "memory(GiB)": 72.85, "step": 84895, "token_acc": 0.5578947368421052, "train_speed(iter/s)": 0.672763 }, { "epoch": 3.637376290647359, "grad_norm": 4.369584083557129, "learning_rate": 1.726648907698979e-05, "loss": 1.8957429885864259, "memory(GiB)": 72.85, "step": 84900, "token_acc": 0.5736196319018405, "train_speed(iter/s)": 0.672764 }, { "epoch": 3.6375905059766076, "grad_norm": 5.66689395904541, "learning_rate": 1.7261402239703235e-05, "loss": 1.7809181213378906, "memory(GiB)": 72.85, "step": 84905, "token_acc": 0.5993031358885017, "train_speed(iter/s)": 0.672771 }, { "epoch": 3.637804721305857, "grad_norm": 4.6783037185668945, "learning_rate": 1.7256315995507107e-05, "loss": 1.9471401214599608, "memory(GiB)": 72.85, "step": 84910, "token_acc": 0.5620915032679739, "train_speed(iter/s)": 0.672768 }, { "epoch": 3.6380189366351057, "grad_norm": 7.444029808044434, "learning_rate": 1.7251230344493508e-05, "loss": 2.1278480529785155, "memory(GiB)": 72.85, "step": 84915, "token_acc": 0.572463768115942, "train_speed(iter/s)": 0.67277 }, { "epoch": 3.6382331519643545, "grad_norm": 6.272411346435547, "learning_rate": 1.7246145286754612e-05, "loss": 1.984079360961914, "memory(GiB)": 72.85, "step": 84920, "token_acc": 0.5472312703583062, "train_speed(iter/s)": 0.672761 }, { "epoch": 3.6384473672936037, "grad_norm": 5.182465076446533, "learning_rate": 1.724106082238252e-05, "loss": 2.2608192443847654, "memory(GiB)": 72.85, "step": 84925, "token_acc": 0.5088235294117647, "train_speed(iter/s)": 0.672768 }, { "epoch": 3.6386615826228526, "grad_norm": 7.1925506591796875, "learning_rate": 1.723597695146934e-05, "loss": 2.1130762100219727, "memory(GiB)": 72.85, "step": 84930, "token_acc": 0.5439093484419264, "train_speed(iter/s)": 0.672766 }, { "epoch": 3.6388757979521014, "grad_norm": 7.825403690338135, "learning_rate": 1.723089367410718e-05, "loss": 2.0210407257080076, "memory(GiB)": 72.85, "step": 84935, "token_acc": 0.5520231213872833, "train_speed(iter/s)": 0.672771 }, { "epoch": 3.6390900132813506, "grad_norm": 6.334887981414795, "learning_rate": 1.7225810990388114e-05, "loss": 2.204931640625, "memory(GiB)": 72.85, "step": 84940, "token_acc": 0.4982456140350877, "train_speed(iter/s)": 0.672765 }, { "epoch": 3.6393042286105994, "grad_norm": 6.684940814971924, "learning_rate": 1.7220728900404208e-05, "loss": 2.2938631057739256, "memory(GiB)": 72.85, "step": 84945, "token_acc": 0.4461538461538462, "train_speed(iter/s)": 0.672767 }, { "epoch": 3.6395184439398482, "grad_norm": 6.514469146728516, "learning_rate": 1.721564740424757e-05, "loss": 2.1328813552856447, "memory(GiB)": 72.85, "step": 84950, "token_acc": 0.5697211155378487, "train_speed(iter/s)": 0.672776 }, { "epoch": 3.6397326592690975, "grad_norm": 6.63431453704834, "learning_rate": 1.721056650201023e-05, "loss": 2.0640350341796876, "memory(GiB)": 72.85, "step": 84955, "token_acc": 0.5512820512820513, "train_speed(iter/s)": 0.672774 }, { "epoch": 3.6399468745983463, "grad_norm": 6.33659553527832, "learning_rate": 1.7205486193784237e-05, "loss": 2.1261455535888674, "memory(GiB)": 72.85, "step": 84960, "token_acc": 0.5261324041811847, "train_speed(iter/s)": 0.672768 }, { "epoch": 3.640161089927595, "grad_norm": 6.0818634033203125, "learning_rate": 1.7200406479661624e-05, "loss": 2.0976930618286134, "memory(GiB)": 72.85, "step": 84965, "token_acc": 0.5276872964169381, "train_speed(iter/s)": 0.672774 }, { "epoch": 3.6403753052568444, "grad_norm": 5.642204761505127, "learning_rate": 1.7195327359734403e-05, "loss": 1.9559902191162108, "memory(GiB)": 72.85, "step": 84970, "token_acc": 0.5748299319727891, "train_speed(iter/s)": 0.672775 }, { "epoch": 3.640589520586093, "grad_norm": 4.802664756774902, "learning_rate": 1.7190248834094612e-05, "loss": 1.8669340133666992, "memory(GiB)": 72.85, "step": 84975, "token_acc": 0.6147859922178989, "train_speed(iter/s)": 0.672778 }, { "epoch": 3.640803735915342, "grad_norm": 6.342477798461914, "learning_rate": 1.718517090283423e-05, "loss": 2.121771812438965, "memory(GiB)": 72.85, "step": 84980, "token_acc": 0.592, "train_speed(iter/s)": 0.672777 }, { "epoch": 3.6410179512445913, "grad_norm": 9.353209495544434, "learning_rate": 1.7180093566045274e-05, "loss": 2.04444580078125, "memory(GiB)": 72.85, "step": 84985, "token_acc": 0.5735294117647058, "train_speed(iter/s)": 0.672779 }, { "epoch": 3.64123216657384, "grad_norm": 5.442655086517334, "learning_rate": 1.7175016823819712e-05, "loss": 1.865057373046875, "memory(GiB)": 72.85, "step": 84990, "token_acc": 0.5703703703703704, "train_speed(iter/s)": 0.672787 }, { "epoch": 3.641446381903089, "grad_norm": 5.718019962310791, "learning_rate": 1.716994067624951e-05, "loss": 2.15108699798584, "memory(GiB)": 72.85, "step": 84995, "token_acc": 0.542319749216301, "train_speed(iter/s)": 0.672793 }, { "epoch": 3.641660597232338, "grad_norm": 6.470370769500732, "learning_rate": 1.7164865123426615e-05, "loss": 2.0764677047729494, "memory(GiB)": 72.85, "step": 85000, "token_acc": 0.5451807228915663, "train_speed(iter/s)": 0.672801 }, { "epoch": 3.641660597232338, "eval_loss": 2.144317388534546, "eval_runtime": 16.1, "eval_samples_per_second": 6.211, "eval_steps_per_second": 6.211, "eval_token_acc": 0.5012562814070352, "step": 85000 }, { "epoch": 3.641874812561587, "grad_norm": 4.603091716766357, "learning_rate": 1.7159790165443008e-05, "loss": 2.1818815231323243, "memory(GiB)": 72.85, "step": 85005, "token_acc": 0.5, "train_speed(iter/s)": 0.672712 }, { "epoch": 3.6420890278908358, "grad_norm": 5.407954692840576, "learning_rate": 1.715471580239061e-05, "loss": 1.8619421005249024, "memory(GiB)": 72.85, "step": 85010, "token_acc": 0.5523465703971119, "train_speed(iter/s)": 0.672706 }, { "epoch": 3.642303243220085, "grad_norm": 7.650449275970459, "learning_rate": 1.714964203436134e-05, "loss": 2.0322710037231446, "memory(GiB)": 72.85, "step": 85015, "token_acc": 0.5133333333333333, "train_speed(iter/s)": 0.672708 }, { "epoch": 3.642517458549334, "grad_norm": 6.965291976928711, "learning_rate": 1.7144568861447125e-05, "loss": 2.3463167190551757, "memory(GiB)": 72.85, "step": 85020, "token_acc": 0.5291828793774319, "train_speed(iter/s)": 0.672718 }, { "epoch": 3.6427316738785827, "grad_norm": 5.241476058959961, "learning_rate": 1.713949628373987e-05, "loss": 2.251919174194336, "memory(GiB)": 72.85, "step": 85025, "token_acc": 0.5141065830721003, "train_speed(iter/s)": 0.672722 }, { "epoch": 3.642945889207832, "grad_norm": 4.476888179779053, "learning_rate": 1.713442430133144e-05, "loss": 2.2172929763793947, "memory(GiB)": 72.85, "step": 85030, "token_acc": 0.5313531353135313, "train_speed(iter/s)": 0.672718 }, { "epoch": 3.6431601045370807, "grad_norm": 5.803266525268555, "learning_rate": 1.7129352914313768e-05, "loss": 2.2910400390625, "memory(GiB)": 72.85, "step": 85035, "token_acc": 0.4772727272727273, "train_speed(iter/s)": 0.672726 }, { "epoch": 3.6433743198663295, "grad_norm": 7.590466022491455, "learning_rate": 1.7124282122778702e-05, "loss": 1.9206207275390625, "memory(GiB)": 72.85, "step": 85040, "token_acc": 0.5229681978798587, "train_speed(iter/s)": 0.672724 }, { "epoch": 3.643588535195579, "grad_norm": 6.169946193695068, "learning_rate": 1.71192119268181e-05, "loss": 2.2240264892578123, "memory(GiB)": 72.85, "step": 85045, "token_acc": 0.5218855218855218, "train_speed(iter/s)": 0.672732 }, { "epoch": 3.6438027505248276, "grad_norm": 5.483342170715332, "learning_rate": 1.7114142326523823e-05, "loss": 1.7955514907836914, "memory(GiB)": 72.85, "step": 85050, "token_acc": 0.5981308411214953, "train_speed(iter/s)": 0.672726 }, { "epoch": 3.6440169658540764, "grad_norm": 8.380117416381836, "learning_rate": 1.710907332198769e-05, "loss": 2.2257429122924806, "memory(GiB)": 72.85, "step": 85055, "token_acc": 0.5276752767527675, "train_speed(iter/s)": 0.67273 }, { "epoch": 3.6442311811833257, "grad_norm": 5.6009039878845215, "learning_rate": 1.7104004913301548e-05, "loss": 2.035983657836914, "memory(GiB)": 72.85, "step": 85060, "token_acc": 0.5405405405405406, "train_speed(iter/s)": 0.672734 }, { "epoch": 3.6444453965125745, "grad_norm": 5.265610694885254, "learning_rate": 1.7098937100557232e-05, "loss": 2.366085433959961, "memory(GiB)": 72.85, "step": 85065, "token_acc": 0.5054347826086957, "train_speed(iter/s)": 0.67274 }, { "epoch": 3.6446596118418233, "grad_norm": 5.411501407623291, "learning_rate": 1.7093869883846535e-05, "loss": 2.1876659393310547, "memory(GiB)": 72.85, "step": 85070, "token_acc": 0.5547703180212014, "train_speed(iter/s)": 0.67275 }, { "epoch": 3.6448738271710726, "grad_norm": 5.275345802307129, "learning_rate": 1.7088803263261255e-05, "loss": 1.9654539108276368, "memory(GiB)": 72.85, "step": 85075, "token_acc": 0.5451505016722408, "train_speed(iter/s)": 0.672751 }, { "epoch": 3.6450880425003214, "grad_norm": 7.94677209854126, "learning_rate": 1.7083737238893173e-05, "loss": 2.2937511444091796, "memory(GiB)": 72.85, "step": 85080, "token_acc": 0.5018587360594795, "train_speed(iter/s)": 0.67276 }, { "epoch": 3.64530225782957, "grad_norm": 7.068404674530029, "learning_rate": 1.707867181083407e-05, "loss": 2.4926599502563476, "memory(GiB)": 72.85, "step": 85085, "token_acc": 0.46546546546546547, "train_speed(iter/s)": 0.672755 }, { "epoch": 3.6455164731588194, "grad_norm": 7.128543853759766, "learning_rate": 1.7073606979175693e-05, "loss": 2.092765998840332, "memory(GiB)": 72.85, "step": 85090, "token_acc": 0.5571428571428572, "train_speed(iter/s)": 0.672758 }, { "epoch": 3.6457306884880682, "grad_norm": 5.3195271492004395, "learning_rate": 1.706854274400983e-05, "loss": 2.2730236053466797, "memory(GiB)": 72.85, "step": 85095, "token_acc": 0.4937106918238994, "train_speed(iter/s)": 0.672765 }, { "epoch": 3.645944903817317, "grad_norm": 4.326260566711426, "learning_rate": 1.7063479105428208e-05, "loss": 2.0364675521850586, "memory(GiB)": 72.85, "step": 85100, "token_acc": 0.5664556962025317, "train_speed(iter/s)": 0.67277 }, { "epoch": 3.6461591191465663, "grad_norm": 6.501736640930176, "learning_rate": 1.7058416063522558e-05, "loss": 2.2152145385742186, "memory(GiB)": 72.85, "step": 85105, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.672761 }, { "epoch": 3.646373334475815, "grad_norm": 5.710597038269043, "learning_rate": 1.705335361838461e-05, "loss": 1.921211051940918, "memory(GiB)": 72.85, "step": 85110, "token_acc": 0.5532646048109966, "train_speed(iter/s)": 0.672761 }, { "epoch": 3.646587549805064, "grad_norm": 4.237960338592529, "learning_rate": 1.704829177010604e-05, "loss": 2.1984046936035155, "memory(GiB)": 72.85, "step": 85115, "token_acc": 0.575187969924812, "train_speed(iter/s)": 0.672773 }, { "epoch": 3.646801765134313, "grad_norm": 6.042526721954346, "learning_rate": 1.7043230518778598e-05, "loss": 2.0796180725097657, "memory(GiB)": 72.85, "step": 85120, "token_acc": 0.5422535211267606, "train_speed(iter/s)": 0.672781 }, { "epoch": 3.647015980463562, "grad_norm": 5.785332202911377, "learning_rate": 1.7038169864493953e-05, "loss": 2.1241758346557615, "memory(GiB)": 72.85, "step": 85125, "token_acc": 0.5224719101123596, "train_speed(iter/s)": 0.672781 }, { "epoch": 3.647230195792811, "grad_norm": 4.781090259552002, "learning_rate": 1.703310980734376e-05, "loss": 2.0714300155639647, "memory(GiB)": 72.85, "step": 85130, "token_acc": 0.5714285714285714, "train_speed(iter/s)": 0.672779 }, { "epoch": 3.64744441112206, "grad_norm": 5.936593532562256, "learning_rate": 1.702805034741973e-05, "loss": 2.2429225921630858, "memory(GiB)": 72.85, "step": 85135, "token_acc": 0.49615384615384617, "train_speed(iter/s)": 0.672788 }, { "epoch": 3.647658626451309, "grad_norm": 6.204736232757568, "learning_rate": 1.7022991484813495e-05, "loss": 2.3217239379882812, "memory(GiB)": 72.85, "step": 85140, "token_acc": 0.4827586206896552, "train_speed(iter/s)": 0.672792 }, { "epoch": 3.6478728417805577, "grad_norm": 5.818300724029541, "learning_rate": 1.701793321961669e-05, "loss": 2.164969062805176, "memory(GiB)": 72.85, "step": 85145, "token_acc": 0.5130111524163569, "train_speed(iter/s)": 0.672789 }, { "epoch": 3.648087057109807, "grad_norm": 5.9749016761779785, "learning_rate": 1.7012875551920976e-05, "loss": 2.0725419998168944, "memory(GiB)": 72.85, "step": 85150, "token_acc": 0.525, "train_speed(iter/s)": 0.672795 }, { "epoch": 3.6483012724390558, "grad_norm": 5.662809371948242, "learning_rate": 1.7007818481817967e-05, "loss": 2.475834274291992, "memory(GiB)": 72.85, "step": 85155, "token_acc": 0.5016611295681063, "train_speed(iter/s)": 0.672802 }, { "epoch": 3.6485154877683046, "grad_norm": 5.4963555335998535, "learning_rate": 1.7002762009399283e-05, "loss": 2.1045766830444337, "memory(GiB)": 72.85, "step": 85160, "token_acc": 0.5526315789473685, "train_speed(iter/s)": 0.6728 }, { "epoch": 3.648729703097554, "grad_norm": 6.66295862197876, "learning_rate": 1.699770613475651e-05, "loss": 2.0663625717163088, "memory(GiB)": 72.85, "step": 85165, "token_acc": 0.5407166123778502, "train_speed(iter/s)": 0.672804 }, { "epoch": 3.6489439184268027, "grad_norm": 7.274508953094482, "learning_rate": 1.6992650857981252e-05, "loss": 2.2148292541503904, "memory(GiB)": 72.85, "step": 85170, "token_acc": 0.5168539325842697, "train_speed(iter/s)": 0.672808 }, { "epoch": 3.6491581337560515, "grad_norm": 6.828995704650879, "learning_rate": 1.6987596179165072e-05, "loss": 2.110117530822754, "memory(GiB)": 72.85, "step": 85175, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.672807 }, { "epoch": 3.6493723490853007, "grad_norm": 6.798620223999023, "learning_rate": 1.6982542098399567e-05, "loss": 2.337051582336426, "memory(GiB)": 72.85, "step": 85180, "token_acc": 0.4916387959866221, "train_speed(iter/s)": 0.672813 }, { "epoch": 3.6495865644145495, "grad_norm": 5.197806358337402, "learning_rate": 1.6977488615776288e-05, "loss": 2.3070987701416015, "memory(GiB)": 72.85, "step": 85185, "token_acc": 0.5144508670520231, "train_speed(iter/s)": 0.672817 }, { "epoch": 3.6498007797437984, "grad_norm": 6.935415267944336, "learning_rate": 1.6972435731386783e-05, "loss": 1.9176692962646484, "memory(GiB)": 72.85, "step": 85190, "token_acc": 0.6021505376344086, "train_speed(iter/s)": 0.672824 }, { "epoch": 3.6500149950730476, "grad_norm": 7.060267925262451, "learning_rate": 1.6967383445322588e-05, "loss": 2.2519203186035157, "memory(GiB)": 72.85, "step": 85195, "token_acc": 0.4946236559139785, "train_speed(iter/s)": 0.672812 }, { "epoch": 3.6502292104022964, "grad_norm": 6.345497131347656, "learning_rate": 1.6962331757675203e-05, "loss": 2.111918640136719, "memory(GiB)": 72.85, "step": 85200, "token_acc": 0.5323741007194245, "train_speed(iter/s)": 0.6728 }, { "epoch": 3.6504434257315452, "grad_norm": 5.495755195617676, "learning_rate": 1.695728066853618e-05, "loss": 2.4712345123291017, "memory(GiB)": 72.85, "step": 85205, "token_acc": 0.5133333333333333, "train_speed(iter/s)": 0.6728 }, { "epoch": 3.6506576410607945, "grad_norm": 5.239195346832275, "learning_rate": 1.6952230177997024e-05, "loss": 2.119909477233887, "memory(GiB)": 72.85, "step": 85210, "token_acc": 0.5222551928783383, "train_speed(iter/s)": 0.672793 }, { "epoch": 3.6508718563900433, "grad_norm": 5.526941299438477, "learning_rate": 1.694718028614923e-05, "loss": 1.9610214233398438, "memory(GiB)": 72.85, "step": 85215, "token_acc": 0.5815602836879432, "train_speed(iter/s)": 0.6728 }, { "epoch": 3.651086071719292, "grad_norm": 5.866941452026367, "learning_rate": 1.694213099308426e-05, "loss": 2.445271873474121, "memory(GiB)": 72.85, "step": 85220, "token_acc": 0.4601063829787234, "train_speed(iter/s)": 0.67278 }, { "epoch": 3.6513002870485414, "grad_norm": 7.33018159866333, "learning_rate": 1.6937082298893602e-05, "loss": 2.365734672546387, "memory(GiB)": 72.85, "step": 85225, "token_acc": 0.4923469387755102, "train_speed(iter/s)": 0.672773 }, { "epoch": 3.65151450237779, "grad_norm": 4.891249179840088, "learning_rate": 1.6932034203668713e-05, "loss": 1.9533138275146484, "memory(GiB)": 72.85, "step": 85230, "token_acc": 0.5398550724637681, "train_speed(iter/s)": 0.672776 }, { "epoch": 3.651728717707039, "grad_norm": 6.659483432769775, "learning_rate": 1.692698670750103e-05, "loss": 2.354311943054199, "memory(GiB)": 72.85, "step": 85235, "token_acc": 0.5147058823529411, "train_speed(iter/s)": 0.672777 }, { "epoch": 3.6519429330362883, "grad_norm": 9.112658500671387, "learning_rate": 1.6921939810482023e-05, "loss": 2.206976890563965, "memory(GiB)": 72.85, "step": 85240, "token_acc": 0.5401459854014599, "train_speed(iter/s)": 0.672763 }, { "epoch": 3.652157148365537, "grad_norm": 4.409562587738037, "learning_rate": 1.691689351270311e-05, "loss": 2.1059799194335938, "memory(GiB)": 72.85, "step": 85245, "token_acc": 0.5147540983606558, "train_speed(iter/s)": 0.672766 }, { "epoch": 3.652371363694786, "grad_norm": 6.116556167602539, "learning_rate": 1.6911847814255698e-05, "loss": 2.1212596893310547, "memory(GiB)": 72.85, "step": 85250, "token_acc": 0.5161290322580645, "train_speed(iter/s)": 0.672767 }, { "epoch": 3.652585579024035, "grad_norm": 4.842987537384033, "learning_rate": 1.6906802715231206e-05, "loss": 2.076837921142578, "memory(GiB)": 72.85, "step": 85255, "token_acc": 0.531055900621118, "train_speed(iter/s)": 0.672763 }, { "epoch": 3.652799794353284, "grad_norm": 6.469358921051025, "learning_rate": 1.690175821572101e-05, "loss": 2.1362314224243164, "memory(GiB)": 72.85, "step": 85260, "token_acc": 0.5369774919614148, "train_speed(iter/s)": 0.672764 }, { "epoch": 3.6530140096825328, "grad_norm": 4.682540416717529, "learning_rate": 1.6896714315816524e-05, "loss": 1.909344482421875, "memory(GiB)": 72.85, "step": 85265, "token_acc": 0.5620689655172414, "train_speed(iter/s)": 0.67276 }, { "epoch": 3.653228225011782, "grad_norm": 5.660292625427246, "learning_rate": 1.6891671015609117e-05, "loss": 2.1074562072753906, "memory(GiB)": 72.85, "step": 85270, "token_acc": 0.527027027027027, "train_speed(iter/s)": 0.672754 }, { "epoch": 3.653442440341031, "grad_norm": 5.852853298187256, "learning_rate": 1.6886628315190133e-05, "loss": 2.123262405395508, "memory(GiB)": 72.85, "step": 85275, "token_acc": 0.5518394648829431, "train_speed(iter/s)": 0.672756 }, { "epoch": 3.6536566556702796, "grad_norm": 6.564169406890869, "learning_rate": 1.688158621465096e-05, "loss": 2.3326236724853517, "memory(GiB)": 72.85, "step": 85280, "token_acc": 0.5, "train_speed(iter/s)": 0.672762 }, { "epoch": 3.653870870999529, "grad_norm": 5.5044941902160645, "learning_rate": 1.687654471408291e-05, "loss": 2.2004411697387694, "memory(GiB)": 72.85, "step": 85285, "token_acc": 0.5392156862745098, "train_speed(iter/s)": 0.672752 }, { "epoch": 3.6540850863287777, "grad_norm": 5.86756706237793, "learning_rate": 1.6871503813577312e-05, "loss": 1.9628330230712892, "memory(GiB)": 72.85, "step": 85290, "token_acc": 0.528957528957529, "train_speed(iter/s)": 0.672755 }, { "epoch": 3.6542993016580265, "grad_norm": 5.756049156188965, "learning_rate": 1.6866463513225517e-05, "loss": 2.129655456542969, "memory(GiB)": 72.85, "step": 85295, "token_acc": 0.4833948339483395, "train_speed(iter/s)": 0.672757 }, { "epoch": 3.654513516987276, "grad_norm": 5.279608249664307, "learning_rate": 1.6861423813118816e-05, "loss": 2.21417179107666, "memory(GiB)": 72.85, "step": 85300, "token_acc": 0.5719844357976653, "train_speed(iter/s)": 0.672759 }, { "epoch": 3.6547277323165246, "grad_norm": 4.273431777954102, "learning_rate": 1.6856384713348512e-05, "loss": 2.0447505950927733, "memory(GiB)": 72.85, "step": 85305, "token_acc": 0.5448028673835126, "train_speed(iter/s)": 0.672764 }, { "epoch": 3.6549419476457734, "grad_norm": 6.209177017211914, "learning_rate": 1.6851346214005888e-05, "loss": 2.074197006225586, "memory(GiB)": 72.85, "step": 85310, "token_acc": 0.5301204819277109, "train_speed(iter/s)": 0.672771 }, { "epoch": 3.6551561629750227, "grad_norm": 5.733301162719727, "learning_rate": 1.6846308315182224e-05, "loss": 1.9761377334594727, "memory(GiB)": 72.85, "step": 85315, "token_acc": 0.5344262295081967, "train_speed(iter/s)": 0.672776 }, { "epoch": 3.6553703783042715, "grad_norm": 6.086387634277344, "learning_rate": 1.684127101696877e-05, "loss": 2.30689697265625, "memory(GiB)": 72.85, "step": 85320, "token_acc": 0.5252918287937743, "train_speed(iter/s)": 0.672778 }, { "epoch": 3.6555845936335203, "grad_norm": 6.213635444641113, "learning_rate": 1.6836234319456805e-05, "loss": 2.009079933166504, "memory(GiB)": 72.85, "step": 85325, "token_acc": 0.545774647887324, "train_speed(iter/s)": 0.672779 }, { "epoch": 3.6557988089627695, "grad_norm": 4.775221824645996, "learning_rate": 1.6831198222737576e-05, "loss": 2.228825569152832, "memory(GiB)": 72.85, "step": 85330, "token_acc": 0.5488721804511278, "train_speed(iter/s)": 0.672791 }, { "epoch": 3.6560130242920184, "grad_norm": 5.613771438598633, "learning_rate": 1.68261627269023e-05, "loss": 2.2423839569091797, "memory(GiB)": 72.85, "step": 85335, "token_acc": 0.5507246376811594, "train_speed(iter/s)": 0.672792 }, { "epoch": 3.656227239621267, "grad_norm": 4.899063587188721, "learning_rate": 1.682112783204221e-05, "loss": 2.0876901626586912, "memory(GiB)": 72.85, "step": 85340, "token_acc": 0.5890909090909091, "train_speed(iter/s)": 0.672793 }, { "epoch": 3.6564414549505164, "grad_norm": 5.153916358947754, "learning_rate": 1.6816093538248496e-05, "loss": 2.1657503128051756, "memory(GiB)": 72.85, "step": 85345, "token_acc": 0.5658914728682171, "train_speed(iter/s)": 0.672796 }, { "epoch": 3.6566556702797652, "grad_norm": 7.659602642059326, "learning_rate": 1.6811059845612375e-05, "loss": 2.0293148040771483, "memory(GiB)": 72.85, "step": 85350, "token_acc": 0.5429553264604811, "train_speed(iter/s)": 0.672787 }, { "epoch": 3.656869885609014, "grad_norm": 5.703488826751709, "learning_rate": 1.6806026754225057e-05, "loss": 2.113362693786621, "memory(GiB)": 72.85, "step": 85355, "token_acc": 0.5422535211267606, "train_speed(iter/s)": 0.672796 }, { "epoch": 3.6570841009382633, "grad_norm": 3.87153697013855, "learning_rate": 1.6800994264177698e-05, "loss": 2.156546211242676, "memory(GiB)": 72.85, "step": 85360, "token_acc": 0.5491525423728814, "train_speed(iter/s)": 0.672788 }, { "epoch": 3.657298316267512, "grad_norm": 4.924710273742676, "learning_rate": 1.679596237556148e-05, "loss": 2.2379018783569338, "memory(GiB)": 72.85, "step": 85365, "token_acc": 0.5057803468208093, "train_speed(iter/s)": 0.672784 }, { "epoch": 3.657512531596761, "grad_norm": 4.957890510559082, "learning_rate": 1.6790931088467543e-05, "loss": 2.073123550415039, "memory(GiB)": 72.85, "step": 85370, "token_acc": 0.5067114093959731, "train_speed(iter/s)": 0.672785 }, { "epoch": 3.65772674692601, "grad_norm": 4.429101467132568, "learning_rate": 1.678590040298705e-05, "loss": 1.8749307632446288, "memory(GiB)": 72.85, "step": 85375, "token_acc": 0.5674603174603174, "train_speed(iter/s)": 0.672784 }, { "epoch": 3.657940962255259, "grad_norm": 5.6090989112854, "learning_rate": 1.6780870319211107e-05, "loss": 2.0564430236816404, "memory(GiB)": 72.85, "step": 85380, "token_acc": 0.5338645418326693, "train_speed(iter/s)": 0.672787 }, { "epoch": 3.658155177584508, "grad_norm": 3.972923755645752, "learning_rate": 1.6775840837230878e-05, "loss": 2.0582576751708985, "memory(GiB)": 72.85, "step": 85385, "token_acc": 0.5029585798816568, "train_speed(iter/s)": 0.672789 }, { "epoch": 3.658369392913757, "grad_norm": 4.170594692230225, "learning_rate": 1.6770811957137454e-05, "loss": 2.1022571563720702, "memory(GiB)": 72.85, "step": 85390, "token_acc": 0.5128205128205128, "train_speed(iter/s)": 0.672785 }, { "epoch": 3.658583608243006, "grad_norm": 5.675563335418701, "learning_rate": 1.6765783679021944e-05, "loss": 2.226155471801758, "memory(GiB)": 72.85, "step": 85395, "token_acc": 0.5138888888888888, "train_speed(iter/s)": 0.672787 }, { "epoch": 3.6587978235722547, "grad_norm": 5.429281234741211, "learning_rate": 1.6760756002975447e-05, "loss": 2.2605783462524416, "memory(GiB)": 72.85, "step": 85400, "token_acc": 0.5141700404858299, "train_speed(iter/s)": 0.67279 }, { "epoch": 3.659012038901504, "grad_norm": 6.729478359222412, "learning_rate": 1.6755728929089016e-05, "loss": 2.2802255630493162, "memory(GiB)": 72.85, "step": 85405, "token_acc": 0.5170940170940171, "train_speed(iter/s)": 0.672797 }, { "epoch": 3.6592262542307528, "grad_norm": 6.380817413330078, "learning_rate": 1.6750702457453754e-05, "loss": 2.300453948974609, "memory(GiB)": 72.85, "step": 85410, "token_acc": 0.5202952029520295, "train_speed(iter/s)": 0.672806 }, { "epoch": 3.6594404695600016, "grad_norm": 5.237045764923096, "learning_rate": 1.6745676588160707e-05, "loss": 1.9603979110717773, "memory(GiB)": 72.85, "step": 85415, "token_acc": 0.5233333333333333, "train_speed(iter/s)": 0.67281 }, { "epoch": 3.659654684889251, "grad_norm": 6.3138747215271, "learning_rate": 1.6740651321300925e-05, "loss": 2.134640121459961, "memory(GiB)": 72.85, "step": 85420, "token_acc": 0.5328467153284672, "train_speed(iter/s)": 0.672821 }, { "epoch": 3.6598689002184996, "grad_norm": 6.008269786834717, "learning_rate": 1.673562665696543e-05, "loss": 1.9739261627197267, "memory(GiB)": 72.85, "step": 85425, "token_acc": 0.5909090909090909, "train_speed(iter/s)": 0.672829 }, { "epoch": 3.6600831155477485, "grad_norm": 4.324216842651367, "learning_rate": 1.6730602595245283e-05, "loss": 2.0587215423583984, "memory(GiB)": 72.85, "step": 85430, "token_acc": 0.5479041916167665, "train_speed(iter/s)": 0.672842 }, { "epoch": 3.6602973308769977, "grad_norm": 5.474115371704102, "learning_rate": 1.6725579136231457e-05, "loss": 1.8191997528076171, "memory(GiB)": 72.85, "step": 85435, "token_acc": 0.5798611111111112, "train_speed(iter/s)": 0.672839 }, { "epoch": 3.6605115462062465, "grad_norm": 7.253164768218994, "learning_rate": 1.6720556280014994e-05, "loss": 2.3232688903808594, "memory(GiB)": 72.85, "step": 85440, "token_acc": 0.5234375, "train_speed(iter/s)": 0.672839 }, { "epoch": 3.6607257615354953, "grad_norm": 5.659224033355713, "learning_rate": 1.6715534026686873e-05, "loss": 2.1208522796630858, "memory(GiB)": 72.85, "step": 85445, "token_acc": 0.5393939393939394, "train_speed(iter/s)": 0.672839 }, { "epoch": 3.6609399768647446, "grad_norm": 7.239534854888916, "learning_rate": 1.671051237633808e-05, "loss": 2.0193065643310546, "memory(GiB)": 72.85, "step": 85450, "token_acc": 0.5426829268292683, "train_speed(iter/s)": 0.67284 }, { "epoch": 3.6611541921939934, "grad_norm": 5.6417012214660645, "learning_rate": 1.670549132905958e-05, "loss": 1.614966583251953, "memory(GiB)": 72.85, "step": 85455, "token_acc": 0.6045627376425855, "train_speed(iter/s)": 0.672841 }, { "epoch": 3.661368407523242, "grad_norm": 6.941958904266357, "learning_rate": 1.670047088494234e-05, "loss": 2.0117183685302735, "memory(GiB)": 72.85, "step": 85460, "token_acc": 0.5869565217391305, "train_speed(iter/s)": 0.672848 }, { "epoch": 3.6615826228524915, "grad_norm": 6.839953422546387, "learning_rate": 1.6695451044077286e-05, "loss": 2.013160514831543, "memory(GiB)": 72.85, "step": 85465, "token_acc": 0.5247813411078717, "train_speed(iter/s)": 0.672846 }, { "epoch": 3.6617968381817403, "grad_norm": 6.238954544067383, "learning_rate": 1.66904318065554e-05, "loss": 2.147678756713867, "memory(GiB)": 72.85, "step": 85470, "token_acc": 0.5353260869565217, "train_speed(iter/s)": 0.672851 }, { "epoch": 3.662011053510989, "grad_norm": 5.691981792449951, "learning_rate": 1.668541317246759e-05, "loss": 2.220220375061035, "memory(GiB)": 72.85, "step": 85475, "token_acc": 0.5311475409836065, "train_speed(iter/s)": 0.672848 }, { "epoch": 3.6622252688402384, "grad_norm": 7.184820652008057, "learning_rate": 1.6680395141904765e-05, "loss": 1.9992406845092774, "memory(GiB)": 72.85, "step": 85480, "token_acc": 0.5335689045936396, "train_speed(iter/s)": 0.672848 }, { "epoch": 3.662439484169487, "grad_norm": 6.883421421051025, "learning_rate": 1.6675377714957848e-05, "loss": 2.135089874267578, "memory(GiB)": 72.85, "step": 85485, "token_acc": 0.5373134328358209, "train_speed(iter/s)": 0.672847 }, { "epoch": 3.662653699498736, "grad_norm": 6.0695295333862305, "learning_rate": 1.6670360891717702e-05, "loss": 2.009623718261719, "memory(GiB)": 72.85, "step": 85490, "token_acc": 0.5512820512820513, "train_speed(iter/s)": 0.672855 }, { "epoch": 3.6628679148279852, "grad_norm": 6.151148319244385, "learning_rate": 1.6665344672275246e-05, "loss": 2.1039670944213866, "memory(GiB)": 72.85, "step": 85495, "token_acc": 0.5488958990536278, "train_speed(iter/s)": 0.672854 }, { "epoch": 3.663082130157234, "grad_norm": 5.879136085510254, "learning_rate": 1.6660329056721334e-05, "loss": 2.211299514770508, "memory(GiB)": 72.85, "step": 85500, "token_acc": 0.5154320987654321, "train_speed(iter/s)": 0.672852 }, { "epoch": 3.663082130157234, "eval_loss": 2.207956075668335, "eval_runtime": 16.6451, "eval_samples_per_second": 6.008, "eval_steps_per_second": 6.008, "eval_token_acc": 0.5007052186177715, "step": 85500 }, { "epoch": 3.663296345486483, "grad_norm": 4.239976406097412, "learning_rate": 1.6655314045146847e-05, "loss": 2.338185501098633, "memory(GiB)": 72.85, "step": 85505, "token_acc": 0.49718045112781956, "train_speed(iter/s)": 0.672745 }, { "epoch": 3.663510560815732, "grad_norm": 6.035307884216309, "learning_rate": 1.665029963764263e-05, "loss": 2.1969913482666015, "memory(GiB)": 72.85, "step": 85510, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.672756 }, { "epoch": 3.663724776144981, "grad_norm": 5.047427177429199, "learning_rate": 1.6645285834299518e-05, "loss": 2.0961795806884767, "memory(GiB)": 72.85, "step": 85515, "token_acc": 0.5339233038348082, "train_speed(iter/s)": 0.672753 }, { "epoch": 3.6639389914742297, "grad_norm": 5.526318550109863, "learning_rate": 1.664027263520834e-05, "loss": 2.0430728912353517, "memory(GiB)": 72.85, "step": 85520, "token_acc": 0.526984126984127, "train_speed(iter/s)": 0.672752 }, { "epoch": 3.664153206803479, "grad_norm": 6.122418403625488, "learning_rate": 1.6635260040459905e-05, "loss": 2.1839290618896485, "memory(GiB)": 72.85, "step": 85525, "token_acc": 0.5243445692883895, "train_speed(iter/s)": 0.672749 }, { "epoch": 3.664367422132728, "grad_norm": 5.798214912414551, "learning_rate": 1.663024805014505e-05, "loss": 2.280153274536133, "memory(GiB)": 72.85, "step": 85530, "token_acc": 0.5147540983606558, "train_speed(iter/s)": 0.672753 }, { "epoch": 3.6645816374619766, "grad_norm": 5.51644229888916, "learning_rate": 1.6625236664354547e-05, "loss": 2.28024959564209, "memory(GiB)": 72.85, "step": 85535, "token_acc": 0.48148148148148145, "train_speed(iter/s)": 0.672754 }, { "epoch": 3.664795852791226, "grad_norm": 5.576676368713379, "learning_rate": 1.6620225883179196e-05, "loss": 2.215347480773926, "memory(GiB)": 72.85, "step": 85540, "token_acc": 0.5306748466257669, "train_speed(iter/s)": 0.672749 }, { "epoch": 3.6650100681204747, "grad_norm": 5.203799724578857, "learning_rate": 1.661521570670977e-05, "loss": 2.031296730041504, "memory(GiB)": 72.85, "step": 85545, "token_acc": 0.5461538461538461, "train_speed(iter/s)": 0.672745 }, { "epoch": 3.6652242834497235, "grad_norm": 7.359340190887451, "learning_rate": 1.6610206135037003e-05, "loss": 1.7568273544311523, "memory(GiB)": 72.85, "step": 85550, "token_acc": 0.5912162162162162, "train_speed(iter/s)": 0.672746 }, { "epoch": 3.6654384987789728, "grad_norm": 6.526478290557861, "learning_rate": 1.6605197168251697e-05, "loss": 1.8880992889404298, "memory(GiB)": 72.85, "step": 85555, "token_acc": 0.5338645418326693, "train_speed(iter/s)": 0.672747 }, { "epoch": 3.6656527141082216, "grad_norm": 5.594759941101074, "learning_rate": 1.6600188806444573e-05, "loss": 2.1383642196655273, "memory(GiB)": 72.85, "step": 85560, "token_acc": 0.5271317829457365, "train_speed(iter/s)": 0.67275 }, { "epoch": 3.6658669294374704, "grad_norm": 6.0009026527404785, "learning_rate": 1.6595181049706354e-05, "loss": 1.9427234649658203, "memory(GiB)": 72.85, "step": 85565, "token_acc": 0.555956678700361, "train_speed(iter/s)": 0.672743 }, { "epoch": 3.6660811447667196, "grad_norm": 7.1729865074157715, "learning_rate": 1.6590173898127752e-05, "loss": 2.17984619140625, "memory(GiB)": 72.85, "step": 85570, "token_acc": 0.5409836065573771, "train_speed(iter/s)": 0.672739 }, { "epoch": 3.6662953600959685, "grad_norm": 6.384511947631836, "learning_rate": 1.6585167351799507e-05, "loss": 2.2497280120849608, "memory(GiB)": 72.85, "step": 85575, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.672745 }, { "epoch": 3.6665095754252173, "grad_norm": 4.850757598876953, "learning_rate": 1.6580161410812288e-05, "loss": 2.0544958114624023, "memory(GiB)": 72.85, "step": 85580, "token_acc": 0.5701492537313433, "train_speed(iter/s)": 0.67275 }, { "epoch": 3.6667237907544665, "grad_norm": 6.653410911560059, "learning_rate": 1.657515607525681e-05, "loss": 2.2218673706054686, "memory(GiB)": 72.85, "step": 85585, "token_acc": 0.516245487364621, "train_speed(iter/s)": 0.672748 }, { "epoch": 3.6669380060837153, "grad_norm": 6.106175899505615, "learning_rate": 1.6570151345223733e-05, "loss": 1.8784954071044921, "memory(GiB)": 72.85, "step": 85590, "token_acc": 0.5976095617529881, "train_speed(iter/s)": 0.672744 }, { "epoch": 3.667152221412964, "grad_norm": 6.3383355140686035, "learning_rate": 1.656514722080373e-05, "loss": 2.0225452423095702, "memory(GiB)": 72.85, "step": 85595, "token_acc": 0.5406360424028268, "train_speed(iter/s)": 0.672743 }, { "epoch": 3.6673664367422134, "grad_norm": 5.6840715408325195, "learning_rate": 1.6560143702087438e-05, "loss": 2.1281854629516603, "memory(GiB)": 72.85, "step": 85600, "token_acc": 0.5265017667844523, "train_speed(iter/s)": 0.672751 }, { "epoch": 3.667580652071462, "grad_norm": 7.373327255249023, "learning_rate": 1.6555140789165523e-05, "loss": 2.266828155517578, "memory(GiB)": 72.85, "step": 85605, "token_acc": 0.5330739299610895, "train_speed(iter/s)": 0.672753 }, { "epoch": 3.667794867400711, "grad_norm": 6.693173885345459, "learning_rate": 1.6550138482128585e-05, "loss": 1.9782402038574218, "memory(GiB)": 72.85, "step": 85610, "token_acc": 0.527972027972028, "train_speed(iter/s)": 0.672748 }, { "epoch": 3.6680090827299603, "grad_norm": 8.442191123962402, "learning_rate": 1.6545136781067282e-05, "loss": 2.173015594482422, "memory(GiB)": 72.85, "step": 85615, "token_acc": 0.5413533834586466, "train_speed(iter/s)": 0.672758 }, { "epoch": 3.668223298059209, "grad_norm": 7.400197505950928, "learning_rate": 1.654013568607221e-05, "loss": 1.9028350830078125, "memory(GiB)": 72.85, "step": 85620, "token_acc": 0.5583941605839416, "train_speed(iter/s)": 0.672761 }, { "epoch": 3.668437513388458, "grad_norm": 6.172098159790039, "learning_rate": 1.6535135197233976e-05, "loss": 2.115799140930176, "memory(GiB)": 72.85, "step": 85625, "token_acc": 0.5535714285714286, "train_speed(iter/s)": 0.672764 }, { "epoch": 3.668651728717707, "grad_norm": 6.180514812469482, "learning_rate": 1.6530135314643153e-05, "loss": 1.9680551528930663, "memory(GiB)": 72.85, "step": 85630, "token_acc": 0.5627376425855514, "train_speed(iter/s)": 0.67277 }, { "epoch": 3.668865944046956, "grad_norm": 6.709898471832275, "learning_rate": 1.652513603839031e-05, "loss": 2.4140304565429687, "memory(GiB)": 72.85, "step": 85635, "token_acc": 0.5071225071225072, "train_speed(iter/s)": 0.672769 }, { "epoch": 3.669080159376205, "grad_norm": 8.85554313659668, "learning_rate": 1.6520137368566047e-05, "loss": 2.352324295043945, "memory(GiB)": 72.85, "step": 85640, "token_acc": 0.5074074074074074, "train_speed(iter/s)": 0.672777 }, { "epoch": 3.669294374705454, "grad_norm": 5.899624824523926, "learning_rate": 1.6515139305260886e-05, "loss": 2.0944850921630858, "memory(GiB)": 72.85, "step": 85645, "token_acc": 0.5614617940199336, "train_speed(iter/s)": 0.672784 }, { "epoch": 3.669508590034703, "grad_norm": 4.9984049797058105, "learning_rate": 1.65101418485654e-05, "loss": 2.0498493194580076, "memory(GiB)": 72.85, "step": 85650, "token_acc": 0.5674740484429066, "train_speed(iter/s)": 0.672785 }, { "epoch": 3.6697228053639517, "grad_norm": 5.994021415710449, "learning_rate": 1.6505144998570116e-05, "loss": 2.102909469604492, "memory(GiB)": 72.85, "step": 85655, "token_acc": 0.546242774566474, "train_speed(iter/s)": 0.672783 }, { "epoch": 3.669937020693201, "grad_norm": 6.179702281951904, "learning_rate": 1.6500148755365547e-05, "loss": 2.3397052764892576, "memory(GiB)": 72.85, "step": 85660, "token_acc": 0.48493975903614456, "train_speed(iter/s)": 0.672785 }, { "epoch": 3.6701512360224497, "grad_norm": 5.3797502517700195, "learning_rate": 1.6495153119042213e-05, "loss": 2.043392372131348, "memory(GiB)": 72.85, "step": 85665, "token_acc": 0.6322314049586777, "train_speed(iter/s)": 0.672792 }, { "epoch": 3.6703654513516986, "grad_norm": 5.486962795257568, "learning_rate": 1.649015808969059e-05, "loss": 1.7997156143188477, "memory(GiB)": 72.85, "step": 85670, "token_acc": 0.5854545454545454, "train_speed(iter/s)": 0.672787 }, { "epoch": 3.670579666680948, "grad_norm": 5.7569804191589355, "learning_rate": 1.6485163667401205e-05, "loss": 2.1647571563720702, "memory(GiB)": 72.85, "step": 85675, "token_acc": 0.5104895104895105, "train_speed(iter/s)": 0.67278 }, { "epoch": 3.6707938820101966, "grad_norm": 5.245151996612549, "learning_rate": 1.648016985226452e-05, "loss": 1.8445209503173827, "memory(GiB)": 72.85, "step": 85680, "token_acc": 0.5616883116883117, "train_speed(iter/s)": 0.672779 }, { "epoch": 3.6710080973394454, "grad_norm": 4.712581157684326, "learning_rate": 1.6475176644371003e-05, "loss": 2.038011932373047, "memory(GiB)": 72.85, "step": 85685, "token_acc": 0.5649546827794562, "train_speed(iter/s)": 0.672771 }, { "epoch": 3.6712223126686947, "grad_norm": 5.747527122497559, "learning_rate": 1.6470184043811115e-05, "loss": 2.4188631057739256, "memory(GiB)": 72.85, "step": 85690, "token_acc": 0.49032258064516127, "train_speed(iter/s)": 0.672764 }, { "epoch": 3.6714365279979435, "grad_norm": 6.011465072631836, "learning_rate": 1.6465192050675272e-05, "loss": 1.7640663146972657, "memory(GiB)": 72.85, "step": 85695, "token_acc": 0.5793650793650794, "train_speed(iter/s)": 0.67277 }, { "epoch": 3.6716507433271923, "grad_norm": 5.609074592590332, "learning_rate": 1.6460200665053955e-05, "loss": 2.3715492248535157, "memory(GiB)": 72.85, "step": 85700, "token_acc": 0.49514563106796117, "train_speed(iter/s)": 0.672771 }, { "epoch": 3.6718649586564416, "grad_norm": 5.157476902008057, "learning_rate": 1.6455209887037564e-05, "loss": 2.193582534790039, "memory(GiB)": 72.85, "step": 85705, "token_acc": 0.5709677419354838, "train_speed(iter/s)": 0.672775 }, { "epoch": 3.6720791739856904, "grad_norm": 4.446305751800537, "learning_rate": 1.6450219716716514e-05, "loss": 1.9858526229858398, "memory(GiB)": 72.85, "step": 85710, "token_acc": 0.5648854961832062, "train_speed(iter/s)": 0.67278 }, { "epoch": 3.672293389314939, "grad_norm": 5.3484578132629395, "learning_rate": 1.64452301541812e-05, "loss": 2.040791702270508, "memory(GiB)": 72.85, "step": 85715, "token_acc": 0.5382059800664452, "train_speed(iter/s)": 0.672782 }, { "epoch": 3.6725076046441885, "grad_norm": 4.830794811248779, "learning_rate": 1.644024119952201e-05, "loss": 2.0800670623779296, "memory(GiB)": 72.85, "step": 85720, "token_acc": 0.5514285714285714, "train_speed(iter/s)": 0.672779 }, { "epoch": 3.6727218199734373, "grad_norm": 5.057399272918701, "learning_rate": 1.643525285282933e-05, "loss": 1.9460943222045899, "memory(GiB)": 72.85, "step": 85725, "token_acc": 0.5457413249211357, "train_speed(iter/s)": 0.672786 }, { "epoch": 3.672936035302686, "grad_norm": 5.7343292236328125, "learning_rate": 1.6430265114193548e-05, "loss": 2.2033321380615236, "memory(GiB)": 72.85, "step": 85730, "token_acc": 0.5016611295681063, "train_speed(iter/s)": 0.67278 }, { "epoch": 3.6731502506319353, "grad_norm": 5.141839027404785, "learning_rate": 1.6425277983705e-05, "loss": 1.9514629364013671, "memory(GiB)": 72.85, "step": 85735, "token_acc": 0.5673076923076923, "train_speed(iter/s)": 0.672781 }, { "epoch": 3.673364465961184, "grad_norm": 5.790070533752441, "learning_rate": 1.642029146145404e-05, "loss": 1.953864288330078, "memory(GiB)": 72.85, "step": 85740, "token_acc": 0.5394736842105263, "train_speed(iter/s)": 0.672781 }, { "epoch": 3.673578681290433, "grad_norm": 5.360121726989746, "learning_rate": 1.6415305547531e-05, "loss": 1.6929861068725587, "memory(GiB)": 72.85, "step": 85745, "token_acc": 0.5809859154929577, "train_speed(iter/s)": 0.672788 }, { "epoch": 3.6737928966196822, "grad_norm": 5.496664524078369, "learning_rate": 1.6410320242026213e-05, "loss": 2.0097158432006834, "memory(GiB)": 72.85, "step": 85750, "token_acc": 0.5613382899628253, "train_speed(iter/s)": 0.672786 }, { "epoch": 3.674007111948931, "grad_norm": 4.648687839508057, "learning_rate": 1.640533554502996e-05, "loss": 1.7707429885864259, "memory(GiB)": 72.85, "step": 85755, "token_acc": 0.592156862745098, "train_speed(iter/s)": 0.672789 }, { "epoch": 3.67422132727818, "grad_norm": 6.177188396453857, "learning_rate": 1.6400351456632584e-05, "loss": 2.3051645278930666, "memory(GiB)": 72.85, "step": 85760, "token_acc": 0.5490196078431373, "train_speed(iter/s)": 0.672791 }, { "epoch": 3.674435542607429, "grad_norm": 6.694350719451904, "learning_rate": 1.6395367976924357e-05, "loss": 1.918069839477539, "memory(GiB)": 72.85, "step": 85765, "token_acc": 0.5615384615384615, "train_speed(iter/s)": 0.672797 }, { "epoch": 3.674649757936678, "grad_norm": 5.9954376220703125, "learning_rate": 1.639038510599557e-05, "loss": 1.9621055603027344, "memory(GiB)": 72.85, "step": 85770, "token_acc": 0.5622775800711743, "train_speed(iter/s)": 0.672798 }, { "epoch": 3.6748639732659267, "grad_norm": 5.319386959075928, "learning_rate": 1.6385402843936482e-05, "loss": 2.3930099487304686, "memory(GiB)": 72.85, "step": 85775, "token_acc": 0.4887459807073955, "train_speed(iter/s)": 0.672798 }, { "epoch": 3.675078188595176, "grad_norm": 6.092208385467529, "learning_rate": 1.638042119083734e-05, "loss": 2.13226375579834, "memory(GiB)": 72.85, "step": 85780, "token_acc": 0.55, "train_speed(iter/s)": 0.672799 }, { "epoch": 3.675292403924425, "grad_norm": 5.790502548217773, "learning_rate": 1.6375440146788416e-05, "loss": 2.222382736206055, "memory(GiB)": 72.85, "step": 85785, "token_acc": 0.5107142857142857, "train_speed(iter/s)": 0.672795 }, { "epoch": 3.6755066192536736, "grad_norm": 9.305243492126465, "learning_rate": 1.6370459711879943e-05, "loss": 2.026140594482422, "memory(GiB)": 72.85, "step": 85790, "token_acc": 0.5580524344569289, "train_speed(iter/s)": 0.672798 }, { "epoch": 3.675720834582923, "grad_norm": 5.611993312835693, "learning_rate": 1.6365479886202123e-05, "loss": 2.324216842651367, "memory(GiB)": 72.85, "step": 85795, "token_acc": 0.5398773006134969, "train_speed(iter/s)": 0.672796 }, { "epoch": 3.6759350499121717, "grad_norm": 6.016709804534912, "learning_rate": 1.6360500669845202e-05, "loss": 1.923794937133789, "memory(GiB)": 72.85, "step": 85800, "token_acc": 0.5598705501618123, "train_speed(iter/s)": 0.672798 }, { "epoch": 3.6761492652414205, "grad_norm": 7.3570756912231445, "learning_rate": 1.6355522062899365e-05, "loss": 1.8945600509643554, "memory(GiB)": 72.85, "step": 85805, "token_acc": 0.5598290598290598, "train_speed(iter/s)": 0.672794 }, { "epoch": 3.6763634805706698, "grad_norm": 5.0799336433410645, "learning_rate": 1.635054406545481e-05, "loss": 2.0112585067749023, "memory(GiB)": 72.85, "step": 85810, "token_acc": 0.5364238410596026, "train_speed(iter/s)": 0.672795 }, { "epoch": 3.6765776958999186, "grad_norm": 5.391516208648682, "learning_rate": 1.6345566677601697e-05, "loss": 1.7893085479736328, "memory(GiB)": 72.85, "step": 85815, "token_acc": 0.5533596837944664, "train_speed(iter/s)": 0.672798 }, { "epoch": 3.6767919112291674, "grad_norm": 5.769270896911621, "learning_rate": 1.634058989943023e-05, "loss": 2.010538101196289, "memory(GiB)": 72.85, "step": 85820, "token_acc": 0.5430711610486891, "train_speed(iter/s)": 0.672804 }, { "epoch": 3.6770061265584166, "grad_norm": 6.637495040893555, "learning_rate": 1.6335613731030554e-05, "loss": 2.0951864242553713, "memory(GiB)": 72.85, "step": 85825, "token_acc": 0.5888888888888889, "train_speed(iter/s)": 0.67281 }, { "epoch": 3.6772203418876654, "grad_norm": 5.4406328201293945, "learning_rate": 1.633063817249281e-05, "loss": 2.029079055786133, "memory(GiB)": 72.85, "step": 85830, "token_acc": 0.56, "train_speed(iter/s)": 0.672813 }, { "epoch": 3.6774345572169143, "grad_norm": 6.506168842315674, "learning_rate": 1.6325663223907146e-05, "loss": 2.044691467285156, "memory(GiB)": 72.85, "step": 85835, "token_acc": 0.5676567656765676, "train_speed(iter/s)": 0.672799 }, { "epoch": 3.6776487725461635, "grad_norm": 8.380014419555664, "learning_rate": 1.6320688885363666e-05, "loss": 2.344854164123535, "memory(GiB)": 72.85, "step": 85840, "token_acc": 0.5531914893617021, "train_speed(iter/s)": 0.672805 }, { "epoch": 3.6778629878754123, "grad_norm": 4.655486583709717, "learning_rate": 1.631571515695251e-05, "loss": 2.133159637451172, "memory(GiB)": 72.85, "step": 85845, "token_acc": 0.5159235668789809, "train_speed(iter/s)": 0.672818 }, { "epoch": 3.678077203204661, "grad_norm": 7.565803527832031, "learning_rate": 1.6310742038763776e-05, "loss": 2.1062643051147463, "memory(GiB)": 72.85, "step": 85850, "token_acc": 0.516260162601626, "train_speed(iter/s)": 0.672817 }, { "epoch": 3.6782914185339104, "grad_norm": 6.8250017166137695, "learning_rate": 1.6305769530887553e-05, "loss": 2.135756492614746, "memory(GiB)": 72.85, "step": 85855, "token_acc": 0.5277777777777778, "train_speed(iter/s)": 0.672814 }, { "epoch": 3.678505633863159, "grad_norm": 5.10996150970459, "learning_rate": 1.6300797633413928e-05, "loss": 2.1336929321289064, "memory(GiB)": 72.85, "step": 85860, "token_acc": 0.5631067961165048, "train_speed(iter/s)": 0.672809 }, { "epoch": 3.678719849192408, "grad_norm": 5.841415882110596, "learning_rate": 1.6295826346432947e-05, "loss": 2.2832366943359377, "memory(GiB)": 72.85, "step": 85865, "token_acc": 0.5331230283911672, "train_speed(iter/s)": 0.672811 }, { "epoch": 3.6789340645216573, "grad_norm": 7.312309265136719, "learning_rate": 1.6290855670034683e-05, "loss": 2.207180404663086, "memory(GiB)": 72.85, "step": 85870, "token_acc": 0.5098039215686274, "train_speed(iter/s)": 0.672809 }, { "epoch": 3.679148279850906, "grad_norm": 7.2595977783203125, "learning_rate": 1.628588560430921e-05, "loss": 2.348058319091797, "memory(GiB)": 72.85, "step": 85875, "token_acc": 0.5181058495821727, "train_speed(iter/s)": 0.672802 }, { "epoch": 3.679362495180155, "grad_norm": 4.6654767990112305, "learning_rate": 1.6280916149346548e-05, "loss": 1.9509820938110352, "memory(GiB)": 72.85, "step": 85880, "token_acc": 0.5576208178438662, "train_speed(iter/s)": 0.672791 }, { "epoch": 3.679576710509404, "grad_norm": 4.862341403961182, "learning_rate": 1.6275947305236715e-05, "loss": 2.241779899597168, "memory(GiB)": 72.85, "step": 85885, "token_acc": 0.5627009646302251, "train_speed(iter/s)": 0.672783 }, { "epoch": 3.679790925838653, "grad_norm": 5.258766174316406, "learning_rate": 1.627097907206974e-05, "loss": 1.8887916564941407, "memory(GiB)": 72.85, "step": 85890, "token_acc": 0.5694444444444444, "train_speed(iter/s)": 0.672787 }, { "epoch": 3.680005141167902, "grad_norm": 5.309013843536377, "learning_rate": 1.6266011449935614e-05, "loss": 1.9200448989868164, "memory(GiB)": 72.85, "step": 85895, "token_acc": 0.5498392282958199, "train_speed(iter/s)": 0.672787 }, { "epoch": 3.680219356497151, "grad_norm": 6.388468265533447, "learning_rate": 1.626104443892432e-05, "loss": 2.0804662704467773, "memory(GiB)": 72.85, "step": 85900, "token_acc": 0.5641025641025641, "train_speed(iter/s)": 0.672773 }, { "epoch": 3.6804335718264, "grad_norm": 5.158586025238037, "learning_rate": 1.6256078039125876e-05, "loss": 2.090959167480469, "memory(GiB)": 72.85, "step": 85905, "token_acc": 0.5186170212765957, "train_speed(iter/s)": 0.672779 }, { "epoch": 3.6806477871556487, "grad_norm": 5.794443130493164, "learning_rate": 1.625111225063023e-05, "loss": 2.1228641510009765, "memory(GiB)": 72.85, "step": 85910, "token_acc": 0.5246376811594203, "train_speed(iter/s)": 0.67278 }, { "epoch": 3.680862002484898, "grad_norm": 10.512895584106445, "learning_rate": 1.6246147073527344e-05, "loss": 2.113333511352539, "memory(GiB)": 72.85, "step": 85915, "token_acc": 0.516245487364621, "train_speed(iter/s)": 0.672775 }, { "epoch": 3.6810762178141467, "grad_norm": 5.261044979095459, "learning_rate": 1.624118250790717e-05, "loss": 1.9481128692626952, "memory(GiB)": 72.85, "step": 85920, "token_acc": 0.5563380281690141, "train_speed(iter/s)": 0.67277 }, { "epoch": 3.6812904331433955, "grad_norm": 5.737076759338379, "learning_rate": 1.6236218553859623e-05, "loss": 2.277654266357422, "memory(GiB)": 72.85, "step": 85925, "token_acc": 0.5290102389078498, "train_speed(iter/s)": 0.672766 }, { "epoch": 3.681504648472645, "grad_norm": 4.981070041656494, "learning_rate": 1.6231255211474672e-05, "loss": 2.0910282135009766, "memory(GiB)": 72.85, "step": 85930, "token_acc": 0.5110294117647058, "train_speed(iter/s)": 0.672762 }, { "epoch": 3.6817188638018936, "grad_norm": 7.445515155792236, "learning_rate": 1.6226292480842202e-05, "loss": 2.292569160461426, "memory(GiB)": 72.85, "step": 85935, "token_acc": 0.53125, "train_speed(iter/s)": 0.672757 }, { "epoch": 3.6819330791311424, "grad_norm": 4.658512115478516, "learning_rate": 1.6221330362052116e-05, "loss": 1.955650520324707, "memory(GiB)": 72.85, "step": 85940, "token_acc": 0.5571428571428572, "train_speed(iter/s)": 0.672763 }, { "epoch": 3.6821472944603917, "grad_norm": 4.907525539398193, "learning_rate": 1.6216368855194332e-05, "loss": 2.171141815185547, "memory(GiB)": 72.85, "step": 85945, "token_acc": 0.5619047619047619, "train_speed(iter/s)": 0.67277 }, { "epoch": 3.6823615097896405, "grad_norm": 7.533132553100586, "learning_rate": 1.6211407960358716e-05, "loss": 2.031412124633789, "memory(GiB)": 72.85, "step": 85950, "token_acc": 0.5182481751824818, "train_speed(iter/s)": 0.672766 }, { "epoch": 3.6825757251188893, "grad_norm": 6.768759250640869, "learning_rate": 1.6206447677635146e-05, "loss": 2.0422126770019533, "memory(GiB)": 72.85, "step": 85955, "token_acc": 0.5708661417322834, "train_speed(iter/s)": 0.672757 }, { "epoch": 3.6827899404481386, "grad_norm": 5.865666389465332, "learning_rate": 1.620148800711346e-05, "loss": 2.1999940872192383, "memory(GiB)": 72.85, "step": 85960, "token_acc": 0.5125448028673835, "train_speed(iter/s)": 0.672761 }, { "epoch": 3.6830041557773874, "grad_norm": 5.3544602394104, "learning_rate": 1.6196528948883533e-05, "loss": 2.0219186782836913, "memory(GiB)": 72.85, "step": 85965, "token_acc": 0.5632911392405063, "train_speed(iter/s)": 0.672758 }, { "epoch": 3.683218371106636, "grad_norm": 5.173421859741211, "learning_rate": 1.6191570503035202e-05, "loss": 1.817570114135742, "memory(GiB)": 72.85, "step": 85970, "token_acc": 0.5571428571428572, "train_speed(iter/s)": 0.672761 }, { "epoch": 3.6834325864358854, "grad_norm": 6.0149312019348145, "learning_rate": 1.6186612669658286e-05, "loss": 1.973800277709961, "memory(GiB)": 72.85, "step": 85975, "token_acc": 0.5331325301204819, "train_speed(iter/s)": 0.672759 }, { "epoch": 3.6836468017651343, "grad_norm": 5.883084774017334, "learning_rate": 1.6181655448842598e-05, "loss": 2.3564121246337892, "memory(GiB)": 72.85, "step": 85980, "token_acc": 0.5064516129032258, "train_speed(iter/s)": 0.672758 }, { "epoch": 3.683861017094383, "grad_norm": 6.338587760925293, "learning_rate": 1.617669884067793e-05, "loss": 2.2441816329956055, "memory(GiB)": 72.85, "step": 85985, "token_acc": 0.5268456375838926, "train_speed(iter/s)": 0.672761 }, { "epoch": 3.6840752324236323, "grad_norm": 7.198296546936035, "learning_rate": 1.6171742845254105e-05, "loss": 2.226926803588867, "memory(GiB)": 72.85, "step": 85990, "token_acc": 0.5155038759689923, "train_speed(iter/s)": 0.672767 }, { "epoch": 3.684289447752881, "grad_norm": 5.48461389541626, "learning_rate": 1.61667874626609e-05, "loss": 2.3197174072265625, "memory(GiB)": 72.85, "step": 85995, "token_acc": 0.5045592705167173, "train_speed(iter/s)": 0.672765 }, { "epoch": 3.68450366308213, "grad_norm": 6.054134368896484, "learning_rate": 1.6161832692988072e-05, "loss": 2.2852048873901367, "memory(GiB)": 72.85, "step": 86000, "token_acc": 0.5, "train_speed(iter/s)": 0.672757 }, { "epoch": 3.68450366308213, "eval_loss": 1.9438101053237915, "eval_runtime": 16.1175, "eval_samples_per_second": 6.204, "eval_steps_per_second": 6.204, "eval_token_acc": 0.5053619302949062, "step": 86000 }, { "epoch": 3.684717878411379, "grad_norm": 5.829672813415527, "learning_rate": 1.6156878536325386e-05, "loss": 2.0745943069458006, "memory(GiB)": 72.85, "step": 86005, "token_acc": 0.5152380952380953, "train_speed(iter/s)": 0.672658 }, { "epoch": 3.684932093740628, "grad_norm": 6.442437171936035, "learning_rate": 1.615192499276258e-05, "loss": 2.049121284484863, "memory(GiB)": 72.85, "step": 86010, "token_acc": 0.540453074433657, "train_speed(iter/s)": 0.672649 }, { "epoch": 3.685146309069877, "grad_norm": 5.7329182624816895, "learning_rate": 1.6146972062389404e-05, "loss": 2.027039909362793, "memory(GiB)": 72.85, "step": 86015, "token_acc": 0.556, "train_speed(iter/s)": 0.672655 }, { "epoch": 3.685360524399126, "grad_norm": 5.360079765319824, "learning_rate": 1.61420197452956e-05, "loss": 2.209859085083008, "memory(GiB)": 72.85, "step": 86020, "token_acc": 0.5201465201465202, "train_speed(iter/s)": 0.672643 }, { "epoch": 3.685574739728375, "grad_norm": 7.939674377441406, "learning_rate": 1.6137068041570874e-05, "loss": 1.7682819366455078, "memory(GiB)": 72.85, "step": 86025, "token_acc": 0.5632183908045977, "train_speed(iter/s)": 0.672635 }, { "epoch": 3.6857889550576237, "grad_norm": 7.219977855682373, "learning_rate": 1.6132116951304927e-05, "loss": 1.825979232788086, "memory(GiB)": 72.85, "step": 86030, "token_acc": 0.5893536121673004, "train_speed(iter/s)": 0.672625 }, { "epoch": 3.686003170386873, "grad_norm": 4.368396282196045, "learning_rate": 1.6127166474587445e-05, "loss": 1.922246551513672, "memory(GiB)": 72.85, "step": 86035, "token_acc": 0.5752895752895753, "train_speed(iter/s)": 0.672634 }, { "epoch": 3.686217385716122, "grad_norm": 5.436540603637695, "learning_rate": 1.6122216611508125e-05, "loss": 2.1227754592895507, "memory(GiB)": 72.85, "step": 86040, "token_acc": 0.5580645161290323, "train_speed(iter/s)": 0.672637 }, { "epoch": 3.6864316010453706, "grad_norm": 4.441815376281738, "learning_rate": 1.6117267362156606e-05, "loss": 2.0224864959716795, "memory(GiB)": 72.85, "step": 86045, "token_acc": 0.5563139931740614, "train_speed(iter/s)": 0.672632 }, { "epoch": 3.68664581637462, "grad_norm": 6.951808929443359, "learning_rate": 1.61123187266226e-05, "loss": 2.187735366821289, "memory(GiB)": 72.85, "step": 86050, "token_acc": 0.5362903225806451, "train_speed(iter/s)": 0.672634 }, { "epoch": 3.6868600317038687, "grad_norm": 6.208585739135742, "learning_rate": 1.6107370704995723e-05, "loss": 2.3667985916137697, "memory(GiB)": 72.85, "step": 86055, "token_acc": 0.5045317220543807, "train_speed(iter/s)": 0.672625 }, { "epoch": 3.6870742470331175, "grad_norm": 6.771169185638428, "learning_rate": 1.610242329736562e-05, "loss": 2.236663818359375, "memory(GiB)": 72.85, "step": 86060, "token_acc": 0.5302013422818792, "train_speed(iter/s)": 0.672633 }, { "epoch": 3.6872884623623667, "grad_norm": 6.135320663452148, "learning_rate": 1.6097476503821916e-05, "loss": 2.293560600280762, "memory(GiB)": 72.85, "step": 86065, "token_acc": 0.5337423312883436, "train_speed(iter/s)": 0.672628 }, { "epoch": 3.6875026776916155, "grad_norm": 6.322453022003174, "learning_rate": 1.609253032445422e-05, "loss": 2.2325250625610353, "memory(GiB)": 72.85, "step": 86070, "token_acc": 0.5052910052910053, "train_speed(iter/s)": 0.672631 }, { "epoch": 3.6877168930208644, "grad_norm": 9.783828735351562, "learning_rate": 1.6087584759352153e-05, "loss": 2.346420669555664, "memory(GiB)": 72.85, "step": 86075, "token_acc": 0.5103448275862069, "train_speed(iter/s)": 0.672629 }, { "epoch": 3.6879311083501136, "grad_norm": 4.632090091705322, "learning_rate": 1.6082639808605303e-05, "loss": 2.4637805938720705, "memory(GiB)": 72.85, "step": 86080, "token_acc": 0.48184818481848185, "train_speed(iter/s)": 0.672634 }, { "epoch": 3.6881453236793624, "grad_norm": 5.222105026245117, "learning_rate": 1.607769547230325e-05, "loss": 2.054579162597656, "memory(GiB)": 72.85, "step": 86085, "token_acc": 0.5369774919614148, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.6883595390086112, "grad_norm": 6.535361289978027, "learning_rate": 1.6072751750535555e-05, "loss": 2.0013500213623048, "memory(GiB)": 72.85, "step": 86090, "token_acc": 0.5947955390334573, "train_speed(iter/s)": 0.672637 }, { "epoch": 3.6885737543378605, "grad_norm": 5.152525901794434, "learning_rate": 1.6067808643391797e-05, "loss": 1.7915006637573243, "memory(GiB)": 72.85, "step": 86095, "token_acc": 0.5923076923076923, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.6887879696671093, "grad_norm": 5.2534613609313965, "learning_rate": 1.6062866150961497e-05, "loss": 1.87268123626709, "memory(GiB)": 72.85, "step": 86100, "token_acc": 0.5770609318996416, "train_speed(iter/s)": 0.672636 }, { "epoch": 3.689002184996358, "grad_norm": 5.14207124710083, "learning_rate": 1.6057924273334235e-05, "loss": 2.3133630752563477, "memory(GiB)": 72.85, "step": 86105, "token_acc": 0.47619047619047616, "train_speed(iter/s)": 0.672641 }, { "epoch": 3.6892164003256074, "grad_norm": 4.732337951660156, "learning_rate": 1.6052983010599513e-05, "loss": 2.0277360916137694, "memory(GiB)": 72.85, "step": 86110, "token_acc": 0.5361842105263158, "train_speed(iter/s)": 0.672641 }, { "epoch": 3.689430615654856, "grad_norm": 6.896875858306885, "learning_rate": 1.604804236284685e-05, "loss": 2.1876983642578125, "memory(GiB)": 72.85, "step": 86115, "token_acc": 0.535593220338983, "train_speed(iter/s)": 0.672646 }, { "epoch": 3.689644830984105, "grad_norm": 4.558460235595703, "learning_rate": 1.604310233016575e-05, "loss": 1.849451446533203, "memory(GiB)": 72.85, "step": 86120, "token_acc": 0.5809859154929577, "train_speed(iter/s)": 0.672652 }, { "epoch": 3.6898590463133543, "grad_norm": 5.15700626373291, "learning_rate": 1.6038162912645706e-05, "loss": 2.093755531311035, "memory(GiB)": 72.85, "step": 86125, "token_acc": 0.5272727272727272, "train_speed(iter/s)": 0.672642 }, { "epoch": 3.690073261642603, "grad_norm": 9.419149398803711, "learning_rate": 1.6033224110376175e-05, "loss": 2.1096529006958007, "memory(GiB)": 72.85, "step": 86130, "token_acc": 0.5215686274509804, "train_speed(iter/s)": 0.672631 }, { "epoch": 3.690287476971852, "grad_norm": 6.738673686981201, "learning_rate": 1.602828592344668e-05, "loss": 1.8489553451538085, "memory(GiB)": 72.85, "step": 86135, "token_acc": 0.5955882352941176, "train_speed(iter/s)": 0.672623 }, { "epoch": 3.690501692301101, "grad_norm": 5.036343097686768, "learning_rate": 1.6023348351946642e-05, "loss": 2.2446830749511717, "memory(GiB)": 72.85, "step": 86140, "token_acc": 0.5284280936454849, "train_speed(iter/s)": 0.67262 }, { "epoch": 3.69071590763035, "grad_norm": 5.416397571563721, "learning_rate": 1.6018411395965533e-05, "loss": 2.1639778137207033, "memory(GiB)": 72.85, "step": 86145, "token_acc": 0.5059171597633136, "train_speed(iter/s)": 0.672626 }, { "epoch": 3.6909301229595988, "grad_norm": 6.678920269012451, "learning_rate": 1.6013475055592775e-05, "loss": 1.8893741607666015, "memory(GiB)": 72.85, "step": 86150, "token_acc": 0.5584905660377358, "train_speed(iter/s)": 0.672623 }, { "epoch": 3.691144338288848, "grad_norm": 5.155628204345703, "learning_rate": 1.6008539330917776e-05, "loss": 2.5470170974731445, "memory(GiB)": 72.85, "step": 86155, "token_acc": 0.47840531561461797, "train_speed(iter/s)": 0.672621 }, { "epoch": 3.691358553618097, "grad_norm": 6.047445774078369, "learning_rate": 1.6003604222029993e-05, "loss": 2.066859245300293, "memory(GiB)": 72.85, "step": 86160, "token_acc": 0.572463768115942, "train_speed(iter/s)": 0.67261 }, { "epoch": 3.6915727689473457, "grad_norm": 7.970089435577393, "learning_rate": 1.59986697290188e-05, "loss": 2.4472419738769533, "memory(GiB)": 72.85, "step": 86165, "token_acc": 0.5107692307692308, "train_speed(iter/s)": 0.672617 }, { "epoch": 3.691786984276595, "grad_norm": 5.932147979736328, "learning_rate": 1.5993735851973614e-05, "loss": 2.1873058319091796, "memory(GiB)": 72.85, "step": 86170, "token_acc": 0.5639097744360902, "train_speed(iter/s)": 0.672622 }, { "epoch": 3.6920011996058437, "grad_norm": 5.148868560791016, "learning_rate": 1.5988802590983802e-05, "loss": 1.877604866027832, "memory(GiB)": 72.85, "step": 86175, "token_acc": 0.5524193548387096, "train_speed(iter/s)": 0.67263 }, { "epoch": 3.6922154149350925, "grad_norm": 5.940605163574219, "learning_rate": 1.5983869946138736e-05, "loss": 2.1289785385131834, "memory(GiB)": 72.85, "step": 86180, "token_acc": 0.5482866043613707, "train_speed(iter/s)": 0.672633 }, { "epoch": 3.692429630264342, "grad_norm": 5.963436603546143, "learning_rate": 1.5978937917527774e-05, "loss": 2.2381464004516602, "memory(GiB)": 72.85, "step": 86185, "token_acc": 0.5181159420289855, "train_speed(iter/s)": 0.672635 }, { "epoch": 3.6926438455935906, "grad_norm": 5.187590599060059, "learning_rate": 1.597400650524025e-05, "loss": 1.917806053161621, "memory(GiB)": 72.85, "step": 86190, "token_acc": 0.5501432664756447, "train_speed(iter/s)": 0.672634 }, { "epoch": 3.6928580609228394, "grad_norm": 5.709259986877441, "learning_rate": 1.5969075709365537e-05, "loss": 2.4065093994140625, "memory(GiB)": 72.85, "step": 86195, "token_acc": 0.5208333333333334, "train_speed(iter/s)": 0.672632 }, { "epoch": 3.6930722762520887, "grad_norm": 6.433982849121094, "learning_rate": 1.596414552999294e-05, "loss": 1.8577579498291015, "memory(GiB)": 72.85, "step": 86200, "token_acc": 0.5742574257425742, "train_speed(iter/s)": 0.67264 }, { "epoch": 3.6932864915813375, "grad_norm": 5.122760772705078, "learning_rate": 1.5959215967211776e-05, "loss": 2.2297082901000977, "memory(GiB)": 72.85, "step": 86205, "token_acc": 0.5320754716981132, "train_speed(iter/s)": 0.67265 }, { "epoch": 3.6935007069105863, "grad_norm": 5.2140021324157715, "learning_rate": 1.5954287021111346e-05, "loss": 2.315578079223633, "memory(GiB)": 72.85, "step": 86210, "token_acc": 0.5143769968051118, "train_speed(iter/s)": 0.672644 }, { "epoch": 3.6937149222398356, "grad_norm": 7.352776527404785, "learning_rate": 1.594935869178093e-05, "loss": 2.050711822509766, "memory(GiB)": 72.85, "step": 86215, "token_acc": 0.5833333333333334, "train_speed(iter/s)": 0.672646 }, { "epoch": 3.6939291375690844, "grad_norm": 6.387104034423828, "learning_rate": 1.5944430979309832e-05, "loss": 2.2562910079956056, "memory(GiB)": 72.85, "step": 86220, "token_acc": 0.4935483870967742, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.694143352898333, "grad_norm": 7.517836570739746, "learning_rate": 1.593950388378732e-05, "loss": 2.259812355041504, "memory(GiB)": 72.85, "step": 86225, "token_acc": 0.5368421052631579, "train_speed(iter/s)": 0.672663 }, { "epoch": 3.6943575682275824, "grad_norm": 5.002760410308838, "learning_rate": 1.5934577405302648e-05, "loss": 2.3487472534179688, "memory(GiB)": 72.85, "step": 86230, "token_acc": 0.516245487364621, "train_speed(iter/s)": 0.672657 }, { "epoch": 3.6945717835568312, "grad_norm": 5.4825053215026855, "learning_rate": 1.5929651543945057e-05, "loss": 2.189303970336914, "memory(GiB)": 72.85, "step": 86235, "token_acc": 0.495114006514658, "train_speed(iter/s)": 0.672663 }, { "epoch": 3.69478599888608, "grad_norm": 4.986804485321045, "learning_rate": 1.5924726299803776e-05, "loss": 2.1679414749145507, "memory(GiB)": 72.85, "step": 86240, "token_acc": 0.5447368421052632, "train_speed(iter/s)": 0.672674 }, { "epoch": 3.6950002142153293, "grad_norm": 7.766815185546875, "learning_rate": 1.5919801672968045e-05, "loss": 2.0530141830444335, "memory(GiB)": 72.85, "step": 86245, "token_acc": 0.5559701492537313, "train_speed(iter/s)": 0.672674 }, { "epoch": 3.695214429544578, "grad_norm": 5.009598731994629, "learning_rate": 1.591487766352709e-05, "loss": 2.1698539733886717, "memory(GiB)": 72.85, "step": 86250, "token_acc": 0.5100864553314121, "train_speed(iter/s)": 0.67268 }, { "epoch": 3.695428644873827, "grad_norm": 6.62708044052124, "learning_rate": 1.59099542715701e-05, "loss": 2.268486213684082, "memory(GiB)": 72.85, "step": 86255, "token_acc": 0.5080645161290323, "train_speed(iter/s)": 0.672674 }, { "epoch": 3.695642860203076, "grad_norm": 5.087367057800293, "learning_rate": 1.590503149718627e-05, "loss": 2.271999740600586, "memory(GiB)": 72.85, "step": 86260, "token_acc": 0.5128205128205128, "train_speed(iter/s)": 0.672677 }, { "epoch": 3.695857075532325, "grad_norm": 5.423969745635986, "learning_rate": 1.5900109340464776e-05, "loss": 2.167512130737305, "memory(GiB)": 72.85, "step": 86265, "token_acc": 0.5335570469798657, "train_speed(iter/s)": 0.672688 }, { "epoch": 3.696071290861574, "grad_norm": 4.85966157913208, "learning_rate": 1.5895187801494787e-05, "loss": 2.2004039764404295, "memory(GiB)": 72.85, "step": 86270, "token_acc": 0.4944237918215613, "train_speed(iter/s)": 0.672688 }, { "epoch": 3.696285506190823, "grad_norm": 7.784854888916016, "learning_rate": 1.5890266880365452e-05, "loss": 2.0252384185791015, "memory(GiB)": 72.85, "step": 86275, "token_acc": 0.575, "train_speed(iter/s)": 0.672687 }, { "epoch": 3.696499721520072, "grad_norm": 4.99608039855957, "learning_rate": 1.5885346577165944e-05, "loss": 1.9200685501098633, "memory(GiB)": 72.85, "step": 86280, "token_acc": 0.5686274509803921, "train_speed(iter/s)": 0.672691 }, { "epoch": 3.6967139368493207, "grad_norm": 6.1069865226745605, "learning_rate": 1.5880426891985385e-05, "loss": 2.0472665786743165, "memory(GiB)": 72.85, "step": 86285, "token_acc": 0.5169491525423728, "train_speed(iter/s)": 0.672691 }, { "epoch": 3.69692815217857, "grad_norm": 5.8030195236206055, "learning_rate": 1.58755078249129e-05, "loss": 2.1582000732421873, "memory(GiB)": 72.85, "step": 86290, "token_acc": 0.5245901639344263, "train_speed(iter/s)": 0.672694 }, { "epoch": 3.6971423675078188, "grad_norm": 7.368011474609375, "learning_rate": 1.5870589376037604e-05, "loss": 2.2630733489990233, "memory(GiB)": 72.85, "step": 86295, "token_acc": 0.5268817204301075, "train_speed(iter/s)": 0.672699 }, { "epoch": 3.6973565828370676, "grad_norm": 5.415844440460205, "learning_rate": 1.586567154544858e-05, "loss": 2.4160266876220704, "memory(GiB)": 72.85, "step": 86300, "token_acc": 0.5222222222222223, "train_speed(iter/s)": 0.672693 }, { "epoch": 3.697570798166317, "grad_norm": 6.040314674377441, "learning_rate": 1.5860754333234952e-05, "loss": 1.8301361083984375, "memory(GiB)": 72.85, "step": 86305, "token_acc": 0.5686274509803921, "train_speed(iter/s)": 0.672694 }, { "epoch": 3.6977850134955657, "grad_norm": 5.186590194702148, "learning_rate": 1.585583773948577e-05, "loss": 2.122397613525391, "memory(GiB)": 72.85, "step": 86310, "token_acc": 0.5088967971530249, "train_speed(iter/s)": 0.672691 }, { "epoch": 3.6979992288248145, "grad_norm": 5.768973350524902, "learning_rate": 1.5850921764290132e-05, "loss": 2.2193328857421877, "memory(GiB)": 72.85, "step": 86315, "token_acc": 0.5251798561151079, "train_speed(iter/s)": 0.672693 }, { "epoch": 3.6982134441540637, "grad_norm": 5.444477081298828, "learning_rate": 1.584600640773708e-05, "loss": 1.9159090042114257, "memory(GiB)": 72.85, "step": 86320, "token_acc": 0.5735735735735735, "train_speed(iter/s)": 0.672696 }, { "epoch": 3.6984276594833125, "grad_norm": 4.64835262298584, "learning_rate": 1.584109166991566e-05, "loss": 2.2247459411621096, "memory(GiB)": 72.85, "step": 86325, "token_acc": 0.5632183908045977, "train_speed(iter/s)": 0.672703 }, { "epoch": 3.6986418748125613, "grad_norm": 5.7548747062683105, "learning_rate": 1.583617755091491e-05, "loss": 2.060212326049805, "memory(GiB)": 72.85, "step": 86330, "token_acc": 0.5483870967741935, "train_speed(iter/s)": 0.672702 }, { "epoch": 3.6988560901418106, "grad_norm": 5.162807941436768, "learning_rate": 1.5831264050823834e-05, "loss": 2.0795310974121093, "memory(GiB)": 72.85, "step": 86335, "token_acc": 0.4811320754716981, "train_speed(iter/s)": 0.672698 }, { "epoch": 3.6990703054710594, "grad_norm": 11.337166786193848, "learning_rate": 1.5826351169731485e-05, "loss": 1.8907066345214845, "memory(GiB)": 72.85, "step": 86340, "token_acc": 0.5447154471544715, "train_speed(iter/s)": 0.6727 }, { "epoch": 3.6992845208003082, "grad_norm": 6.096715927124023, "learning_rate": 1.5821438907726834e-05, "loss": 2.079834747314453, "memory(GiB)": 72.85, "step": 86345, "token_acc": 0.5518394648829431, "train_speed(iter/s)": 0.672704 }, { "epoch": 3.6994987361295575, "grad_norm": 4.757782936096191, "learning_rate": 1.5816527264898882e-05, "loss": 1.9947040557861329, "memory(GiB)": 72.85, "step": 86350, "token_acc": 0.528023598820059, "train_speed(iter/s)": 0.6727 }, { "epoch": 3.6997129514588063, "grad_norm": 6.585903167724609, "learning_rate": 1.5811616241336607e-05, "loss": 2.1525497436523438, "memory(GiB)": 72.85, "step": 86355, "token_acc": 0.5625, "train_speed(iter/s)": 0.672697 }, { "epoch": 3.699927166788055, "grad_norm": 6.116725921630859, "learning_rate": 1.5806705837128953e-05, "loss": 1.9368654251098634, "memory(GiB)": 72.85, "step": 86360, "token_acc": 0.5412186379928315, "train_speed(iter/s)": 0.672697 }, { "epoch": 3.7001413821173044, "grad_norm": 5.569450378417969, "learning_rate": 1.5801796052364926e-05, "loss": 2.4940797805786135, "memory(GiB)": 72.85, "step": 86365, "token_acc": 0.45104895104895104, "train_speed(iter/s)": 0.672698 }, { "epoch": 3.700355597446553, "grad_norm": 5.000767230987549, "learning_rate": 1.5796886887133435e-05, "loss": 2.0874267578125, "memory(GiB)": 72.85, "step": 86370, "token_acc": 0.5670103092783505, "train_speed(iter/s)": 0.672707 }, { "epoch": 3.700569812775802, "grad_norm": 7.088662624359131, "learning_rate": 1.5791978341523427e-05, "loss": 2.1329532623291017, "memory(GiB)": 72.85, "step": 86375, "token_acc": 0.5185185185185185, "train_speed(iter/s)": 0.672714 }, { "epoch": 3.7007840281050512, "grad_norm": 4.917025566101074, "learning_rate": 1.578707041562382e-05, "loss": 1.9821516036987306, "memory(GiB)": 72.85, "step": 86380, "token_acc": 0.5339805825242718, "train_speed(iter/s)": 0.672719 }, { "epoch": 3.7009982434343, "grad_norm": 7.012786865234375, "learning_rate": 1.5782163109523506e-05, "loss": 2.1813724517822264, "memory(GiB)": 72.85, "step": 86385, "token_acc": 0.5601374570446735, "train_speed(iter/s)": 0.672724 }, { "epoch": 3.701212458763549, "grad_norm": 5.7141313552856445, "learning_rate": 1.5777256423311416e-05, "loss": 1.9300073623657226, "memory(GiB)": 72.85, "step": 86390, "token_acc": 0.5526315789473685, "train_speed(iter/s)": 0.672731 }, { "epoch": 3.701426674092798, "grad_norm": 5.0177717208862305, "learning_rate": 1.5772350357076437e-05, "loss": 2.212759590148926, "memory(GiB)": 72.85, "step": 86395, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.672737 }, { "epoch": 3.701640889422047, "grad_norm": 4.223283767700195, "learning_rate": 1.5767444910907442e-05, "loss": 2.272006607055664, "memory(GiB)": 72.85, "step": 86400, "token_acc": 0.48338368580060426, "train_speed(iter/s)": 0.672735 }, { "epoch": 3.7018551047512958, "grad_norm": 8.160526275634766, "learning_rate": 1.5762540084893297e-05, "loss": 1.9646821975708009, "memory(GiB)": 72.85, "step": 86405, "token_acc": 0.5420560747663551, "train_speed(iter/s)": 0.672739 }, { "epoch": 3.702069320080545, "grad_norm": 4.6359686851501465, "learning_rate": 1.5757635879122857e-05, "loss": 2.0793060302734374, "memory(GiB)": 72.85, "step": 86410, "token_acc": 0.5, "train_speed(iter/s)": 0.672731 }, { "epoch": 3.702283535409794, "grad_norm": 5.866309642791748, "learning_rate": 1.5752732293684958e-05, "loss": 2.104458236694336, "memory(GiB)": 72.85, "step": 86415, "token_acc": 0.5598705501618123, "train_speed(iter/s)": 0.672733 }, { "epoch": 3.7024977507390426, "grad_norm": 6.741335391998291, "learning_rate": 1.574782932866843e-05, "loss": 2.2369621276855467, "memory(GiB)": 72.85, "step": 86420, "token_acc": 0.49498327759197325, "train_speed(iter/s)": 0.672739 }, { "epoch": 3.702711966068292, "grad_norm": 6.395698070526123, "learning_rate": 1.5742926984162122e-05, "loss": 2.146851921081543, "memory(GiB)": 72.85, "step": 86425, "token_acc": 0.5247524752475248, "train_speed(iter/s)": 0.672735 }, { "epoch": 3.7029261813975407, "grad_norm": 5.83855676651001, "learning_rate": 1.5738025260254823e-05, "loss": 1.9795814514160157, "memory(GiB)": 72.85, "step": 86430, "token_acc": 0.5516014234875445, "train_speed(iter/s)": 0.672735 }, { "epoch": 3.7031403967267895, "grad_norm": 4.827495574951172, "learning_rate": 1.573312415703534e-05, "loss": 2.066577339172363, "memory(GiB)": 72.85, "step": 86435, "token_acc": 0.5480769230769231, "train_speed(iter/s)": 0.672731 }, { "epoch": 3.7033546120560388, "grad_norm": 5.6825432777404785, "learning_rate": 1.572822367459245e-05, "loss": 2.0198801040649412, "memory(GiB)": 72.85, "step": 86440, "token_acc": 0.5789473684210527, "train_speed(iter/s)": 0.67273 }, { "epoch": 3.7035688273852876, "grad_norm": 6.502521514892578, "learning_rate": 1.5723323813014927e-05, "loss": 2.0838050842285156, "memory(GiB)": 72.85, "step": 86445, "token_acc": 0.5509433962264151, "train_speed(iter/s)": 0.672732 }, { "epoch": 3.7037830427145364, "grad_norm": 5.141392707824707, "learning_rate": 1.5718424572391565e-05, "loss": 1.9804733276367188, "memory(GiB)": 72.85, "step": 86450, "token_acc": 0.5596026490066225, "train_speed(iter/s)": 0.672721 }, { "epoch": 3.7039972580437857, "grad_norm": 5.997866153717041, "learning_rate": 1.5713525952811102e-05, "loss": 1.9805992126464844, "memory(GiB)": 72.85, "step": 86455, "token_acc": 0.5056179775280899, "train_speed(iter/s)": 0.672722 }, { "epoch": 3.7042114733730345, "grad_norm": 10.603343963623047, "learning_rate": 1.5708627954362265e-05, "loss": 2.206472396850586, "memory(GiB)": 72.85, "step": 86460, "token_acc": 0.5323741007194245, "train_speed(iter/s)": 0.672733 }, { "epoch": 3.7044256887022833, "grad_norm": 5.115133762359619, "learning_rate": 1.5703730577133812e-05, "loss": 2.072423553466797, "memory(GiB)": 72.85, "step": 86465, "token_acc": 0.5521235521235521, "train_speed(iter/s)": 0.672738 }, { "epoch": 3.7046399040315325, "grad_norm": 4.07414436340332, "learning_rate": 1.5698833821214454e-05, "loss": 1.8232982635498047, "memory(GiB)": 72.85, "step": 86470, "token_acc": 0.5615942028985508, "train_speed(iter/s)": 0.672738 }, { "epoch": 3.7048541193607814, "grad_norm": 5.12891960144043, "learning_rate": 1.5693937686692894e-05, "loss": 1.97112979888916, "memory(GiB)": 72.85, "step": 86475, "token_acc": 0.563573883161512, "train_speed(iter/s)": 0.672732 }, { "epoch": 3.70506833469003, "grad_norm": 5.84666633605957, "learning_rate": 1.5689042173657824e-05, "loss": 2.053155708312988, "memory(GiB)": 72.85, "step": 86480, "token_acc": 0.5381526104417671, "train_speed(iter/s)": 0.672739 }, { "epoch": 3.7052825500192794, "grad_norm": 6.616062641143799, "learning_rate": 1.5684147282197954e-05, "loss": 2.1237998962402345, "memory(GiB)": 72.85, "step": 86485, "token_acc": 0.5180722891566265, "train_speed(iter/s)": 0.672738 }, { "epoch": 3.7054967653485282, "grad_norm": 7.497840404510498, "learning_rate": 1.5679253012401947e-05, "loss": 2.3475921630859373, "memory(GiB)": 72.85, "step": 86490, "token_acc": 0.48201438848920863, "train_speed(iter/s)": 0.67274 }, { "epoch": 3.705710980677777, "grad_norm": 8.25999927520752, "learning_rate": 1.567435936435847e-05, "loss": 2.2953142166137694, "memory(GiB)": 72.85, "step": 86495, "token_acc": 0.5188679245283019, "train_speed(iter/s)": 0.672752 }, { "epoch": 3.7059251960070263, "grad_norm": 4.8559136390686035, "learning_rate": 1.566946633815617e-05, "loss": 2.1178409576416017, "memory(GiB)": 72.85, "step": 86500, "token_acc": 0.532394366197183, "train_speed(iter/s)": 0.672763 }, { "epoch": 3.7059251960070263, "eval_loss": 2.0074985027313232, "eval_runtime": 15.0485, "eval_samples_per_second": 6.645, "eval_steps_per_second": 6.645, "eval_token_acc": 0.5149456521739131, "step": 86500 }, { "epoch": 3.706139411336275, "grad_norm": 6.966310024261475, "learning_rate": 1.5664573933883676e-05, "loss": 2.262262535095215, "memory(GiB)": 72.85, "step": 86505, "token_acc": 0.5081967213114754, "train_speed(iter/s)": 0.672674 }, { "epoch": 3.706353626665524, "grad_norm": 5.39832067489624, "learning_rate": 1.565968215162965e-05, "loss": 2.22857780456543, "memory(GiB)": 72.85, "step": 86510, "token_acc": 0.5288461538461539, "train_speed(iter/s)": 0.672679 }, { "epoch": 3.706567841994773, "grad_norm": 5.052289962768555, "learning_rate": 1.5654790991482697e-05, "loss": 1.959274673461914, "memory(GiB)": 72.85, "step": 86515, "token_acc": 0.5666666666666667, "train_speed(iter/s)": 0.67268 }, { "epoch": 3.706782057324022, "grad_norm": 8.729686737060547, "learning_rate": 1.564990045353143e-05, "loss": 2.4582157135009766, "memory(GiB)": 72.85, "step": 86520, "token_acc": 0.5324232081911263, "train_speed(iter/s)": 0.672686 }, { "epoch": 3.706996272653271, "grad_norm": 6.243490695953369, "learning_rate": 1.5645010537864428e-05, "loss": 2.097795867919922, "memory(GiB)": 72.85, "step": 86525, "token_acc": 0.5474006116207951, "train_speed(iter/s)": 0.672679 }, { "epoch": 3.70721048798252, "grad_norm": 5.853946208953857, "learning_rate": 1.5640121244570284e-05, "loss": 2.143208122253418, "memory(GiB)": 72.85, "step": 86530, "token_acc": 0.5369649805447471, "train_speed(iter/s)": 0.672674 }, { "epoch": 3.707424703311769, "grad_norm": 7.837860107421875, "learning_rate": 1.5635232573737563e-05, "loss": 1.9770143508911133, "memory(GiB)": 72.85, "step": 86535, "token_acc": 0.5492424242424242, "train_speed(iter/s)": 0.67267 }, { "epoch": 3.7076389186410177, "grad_norm": 6.723422527313232, "learning_rate": 1.5630344525454865e-05, "loss": 2.268754768371582, "memory(GiB)": 72.85, "step": 86540, "token_acc": 0.486013986013986, "train_speed(iter/s)": 0.672669 }, { "epoch": 3.707853133970267, "grad_norm": 6.971726894378662, "learning_rate": 1.5625457099810714e-05, "loss": 2.068669319152832, "memory(GiB)": 72.85, "step": 86545, "token_acc": 0.5049833887043189, "train_speed(iter/s)": 0.672673 }, { "epoch": 3.7080673492995158, "grad_norm": 6.699459552764893, "learning_rate": 1.5620570296893656e-05, "loss": 2.2824024200439452, "memory(GiB)": 72.85, "step": 86550, "token_acc": 0.5230769230769231, "train_speed(iter/s)": 0.672678 }, { "epoch": 3.7082815646287646, "grad_norm": 6.368699550628662, "learning_rate": 1.5615684116792213e-05, "loss": 2.010707473754883, "memory(GiB)": 72.85, "step": 86555, "token_acc": 0.5471014492753623, "train_speed(iter/s)": 0.672687 }, { "epoch": 3.708495779958014, "grad_norm": 5.64551305770874, "learning_rate": 1.5610798559594913e-05, "loss": 2.2510753631591798, "memory(GiB)": 72.85, "step": 86560, "token_acc": 0.5358255451713395, "train_speed(iter/s)": 0.672684 }, { "epoch": 3.7087099952872626, "grad_norm": 7.302831172943115, "learning_rate": 1.5605913625390234e-05, "loss": 1.7187902450561523, "memory(GiB)": 72.85, "step": 86565, "token_acc": 0.5857605177993528, "train_speed(iter/s)": 0.672672 }, { "epoch": 3.7089242106165115, "grad_norm": 5.1705827713012695, "learning_rate": 1.5601029314266718e-05, "loss": 2.148669624328613, "memory(GiB)": 72.85, "step": 86570, "token_acc": 0.5295857988165681, "train_speed(iter/s)": 0.672672 }, { "epoch": 3.7091384259457607, "grad_norm": 6.548347473144531, "learning_rate": 1.559614562631282e-05, "loss": 2.014787483215332, "memory(GiB)": 72.85, "step": 86575, "token_acc": 0.5565476190476191, "train_speed(iter/s)": 0.672673 }, { "epoch": 3.7093526412750095, "grad_norm": 4.8191914558410645, "learning_rate": 1.5591262561617014e-05, "loss": 2.1504716873168945, "memory(GiB)": 72.85, "step": 86580, "token_acc": 0.4843205574912892, "train_speed(iter/s)": 0.672673 }, { "epoch": 3.7095668566042583, "grad_norm": 6.280990123748779, "learning_rate": 1.5586380120267774e-05, "loss": 2.2795555114746096, "memory(GiB)": 72.85, "step": 86585, "token_acc": 0.52046783625731, "train_speed(iter/s)": 0.67267 }, { "epoch": 3.7097810719335076, "grad_norm": 5.726999282836914, "learning_rate": 1.558149830235352e-05, "loss": 2.1543954849243163, "memory(GiB)": 72.85, "step": 86590, "token_acc": 0.5398550724637681, "train_speed(iter/s)": 0.672677 }, { "epoch": 3.7099952872627564, "grad_norm": 5.9785308837890625, "learning_rate": 1.5576617107962727e-05, "loss": 1.777949333190918, "memory(GiB)": 72.85, "step": 86595, "token_acc": 0.6113207547169811, "train_speed(iter/s)": 0.672672 }, { "epoch": 3.710209502592005, "grad_norm": 8.103233337402344, "learning_rate": 1.5571736537183806e-05, "loss": 2.296101379394531, "memory(GiB)": 72.85, "step": 86600, "token_acc": 0.5133333333333333, "train_speed(iter/s)": 0.672673 }, { "epoch": 3.7104237179212545, "grad_norm": 6.097261428833008, "learning_rate": 1.5566856590105155e-05, "loss": 1.859389305114746, "memory(GiB)": 72.85, "step": 86605, "token_acc": 0.5765124555160143, "train_speed(iter/s)": 0.672675 }, { "epoch": 3.7106379332505033, "grad_norm": 4.857625961303711, "learning_rate": 1.556197726681522e-05, "loss": 2.0128726959228516, "memory(GiB)": 72.85, "step": 86610, "token_acc": 0.5677233429394812, "train_speed(iter/s)": 0.672677 }, { "epoch": 3.710852148579752, "grad_norm": 5.668946266174316, "learning_rate": 1.555709856740237e-05, "loss": 2.195042610168457, "memory(GiB)": 72.85, "step": 86615, "token_acc": 0.4966442953020134, "train_speed(iter/s)": 0.672672 }, { "epoch": 3.7110663639090014, "grad_norm": 6.951694965362549, "learning_rate": 1.555222049195499e-05, "loss": 1.818883514404297, "memory(GiB)": 72.85, "step": 86620, "token_acc": 0.6023166023166023, "train_speed(iter/s)": 0.672674 }, { "epoch": 3.71128057923825, "grad_norm": 4.313561916351318, "learning_rate": 1.5547343040561433e-05, "loss": 2.116372299194336, "memory(GiB)": 72.85, "step": 86625, "token_acc": 0.5283018867924528, "train_speed(iter/s)": 0.672683 }, { "epoch": 3.711494794567499, "grad_norm": 5.211331844329834, "learning_rate": 1.5542466213310092e-05, "loss": 1.843195343017578, "memory(GiB)": 72.85, "step": 86630, "token_acc": 0.5578231292517006, "train_speed(iter/s)": 0.672688 }, { "epoch": 3.7117090098967482, "grad_norm": 5.055698871612549, "learning_rate": 1.55375900102893e-05, "loss": 2.1626638412475585, "memory(GiB)": 72.85, "step": 86635, "token_acc": 0.5298507462686567, "train_speed(iter/s)": 0.672691 }, { "epoch": 3.711923225225997, "grad_norm": 5.296370506286621, "learning_rate": 1.5532714431587393e-05, "loss": 2.056702423095703, "memory(GiB)": 72.85, "step": 86640, "token_acc": 0.5617021276595745, "train_speed(iter/s)": 0.672697 }, { "epoch": 3.712137440555246, "grad_norm": 5.394046783447266, "learning_rate": 1.55278394772927e-05, "loss": 1.9819150924682618, "memory(GiB)": 72.85, "step": 86645, "token_acc": 0.5551724137931034, "train_speed(iter/s)": 0.6727 }, { "epoch": 3.712351655884495, "grad_norm": 6.130615711212158, "learning_rate": 1.5522965147493513e-05, "loss": 1.7819778442382812, "memory(GiB)": 72.85, "step": 86650, "token_acc": 0.6008403361344538, "train_speed(iter/s)": 0.672707 }, { "epoch": 3.712565871213744, "grad_norm": 6.229518890380859, "learning_rate": 1.551809144227817e-05, "loss": 1.8739151000976562, "memory(GiB)": 72.85, "step": 86655, "token_acc": 0.5674603174603174, "train_speed(iter/s)": 0.672708 }, { "epoch": 3.7127800865429927, "grad_norm": 5.716585636138916, "learning_rate": 1.551321836173495e-05, "loss": 2.3209253311157227, "memory(GiB)": 72.85, "step": 86660, "token_acc": 0.5016722408026756, "train_speed(iter/s)": 0.67271 }, { "epoch": 3.712994301872242, "grad_norm": 6.750881671905518, "learning_rate": 1.5508345905952127e-05, "loss": 2.286024475097656, "memory(GiB)": 72.85, "step": 86665, "token_acc": 0.5050847457627119, "train_speed(iter/s)": 0.672722 }, { "epoch": 3.713208517201491, "grad_norm": 5.160858631134033, "learning_rate": 1.5503474075017975e-05, "loss": 2.222468376159668, "memory(GiB)": 72.85, "step": 86670, "token_acc": 0.47318611987381703, "train_speed(iter/s)": 0.672729 }, { "epoch": 3.7134227325307396, "grad_norm": 6.121030330657959, "learning_rate": 1.549860286902074e-05, "loss": 2.2943058013916016, "memory(GiB)": 72.85, "step": 86675, "token_acc": 0.5125448028673835, "train_speed(iter/s)": 0.67272 }, { "epoch": 3.713636947859989, "grad_norm": 6.34588623046875, "learning_rate": 1.5493732288048673e-05, "loss": 1.860558319091797, "memory(GiB)": 72.85, "step": 86680, "token_acc": 0.5667870036101083, "train_speed(iter/s)": 0.672722 }, { "epoch": 3.7138511631892377, "grad_norm": 5.322757720947266, "learning_rate": 1.5488862332190028e-05, "loss": 2.1094146728515626, "memory(GiB)": 72.85, "step": 86685, "token_acc": 0.5305343511450382, "train_speed(iter/s)": 0.672725 }, { "epoch": 3.714065378518487, "grad_norm": 7.631682395935059, "learning_rate": 1.548399300153302e-05, "loss": 2.4104196548461916, "memory(GiB)": 72.85, "step": 86690, "token_acc": 0.4952681388012618, "train_speed(iter/s)": 0.672725 }, { "epoch": 3.7142795938477358, "grad_norm": 7.455071449279785, "learning_rate": 1.5479124296165857e-05, "loss": 2.2747756958007814, "memory(GiB)": 72.85, "step": 86695, "token_acc": 0.5192307692307693, "train_speed(iter/s)": 0.672726 }, { "epoch": 3.7144938091769846, "grad_norm": 6.698180675506592, "learning_rate": 1.547425621617675e-05, "loss": 2.0762136459350584, "memory(GiB)": 72.85, "step": 86700, "token_acc": 0.540268456375839, "train_speed(iter/s)": 0.672733 }, { "epoch": 3.714708024506234, "grad_norm": 6.081416130065918, "learning_rate": 1.5469388761653864e-05, "loss": 2.297250175476074, "memory(GiB)": 72.85, "step": 86705, "token_acc": 0.4768211920529801, "train_speed(iter/s)": 0.672729 }, { "epoch": 3.7149222398354826, "grad_norm": 5.914706707000732, "learning_rate": 1.5464521932685385e-05, "loss": 2.2461055755615233, "memory(GiB)": 72.85, "step": 86710, "token_acc": 0.5148514851485149, "train_speed(iter/s)": 0.672722 }, { "epoch": 3.7151364551647315, "grad_norm": 5.466361999511719, "learning_rate": 1.5459655729359502e-05, "loss": 2.273588752746582, "memory(GiB)": 72.85, "step": 86715, "token_acc": 0.55, "train_speed(iter/s)": 0.672722 }, { "epoch": 3.7153506704939807, "grad_norm": 5.598830223083496, "learning_rate": 1.545479015176436e-05, "loss": 2.4684749603271485, "memory(GiB)": 72.85, "step": 86720, "token_acc": 0.5050167224080268, "train_speed(iter/s)": 0.672719 }, { "epoch": 3.7155648858232295, "grad_norm": 5.502035140991211, "learning_rate": 1.54499251999881e-05, "loss": 1.9127233505249024, "memory(GiB)": 72.85, "step": 86725, "token_acc": 0.5639097744360902, "train_speed(iter/s)": 0.672717 }, { "epoch": 3.7157791011524783, "grad_norm": 5.08300256729126, "learning_rate": 1.5445060874118848e-05, "loss": 2.2385114669799804, "memory(GiB)": 72.85, "step": 86730, "token_acc": 0.5226586102719033, "train_speed(iter/s)": 0.672718 }, { "epoch": 3.7159933164817276, "grad_norm": 4.974391937255859, "learning_rate": 1.5440197174244727e-05, "loss": 2.0670610427856446, "memory(GiB)": 72.85, "step": 86735, "token_acc": 0.5310344827586206, "train_speed(iter/s)": 0.672721 }, { "epoch": 3.7162075318109764, "grad_norm": 6.701722621917725, "learning_rate": 1.5435334100453863e-05, "loss": 1.9567073822021483, "memory(GiB)": 72.85, "step": 86740, "token_acc": 0.5622775800711743, "train_speed(iter/s)": 0.672724 }, { "epoch": 3.716421747140225, "grad_norm": 6.045121669769287, "learning_rate": 1.543047165283435e-05, "loss": 2.207575035095215, "memory(GiB)": 72.85, "step": 86745, "token_acc": 0.5103448275862069, "train_speed(iter/s)": 0.672728 }, { "epoch": 3.7166359624694745, "grad_norm": 5.903063774108887, "learning_rate": 1.5425609831474268e-05, "loss": 2.219739532470703, "memory(GiB)": 72.85, "step": 86750, "token_acc": 0.5275080906148867, "train_speed(iter/s)": 0.672726 }, { "epoch": 3.7168501777987233, "grad_norm": 7.363205432891846, "learning_rate": 1.5420748636461686e-05, "loss": 1.92956600189209, "memory(GiB)": 72.85, "step": 86755, "token_acc": 0.5681818181818182, "train_speed(iter/s)": 0.672725 }, { "epoch": 3.717064393127972, "grad_norm": 5.8108720779418945, "learning_rate": 1.5415888067884693e-05, "loss": 2.143790435791016, "memory(GiB)": 72.85, "step": 86760, "token_acc": 0.5319148936170213, "train_speed(iter/s)": 0.672725 }, { "epoch": 3.7172786084572214, "grad_norm": 4.511143207550049, "learning_rate": 1.5411028125831334e-05, "loss": 2.0997547149658202, "memory(GiB)": 72.85, "step": 86765, "token_acc": 0.5103244837758112, "train_speed(iter/s)": 0.672732 }, { "epoch": 3.71749282378647, "grad_norm": 4.697188377380371, "learning_rate": 1.5406168810389625e-05, "loss": 2.023537826538086, "memory(GiB)": 72.85, "step": 86770, "token_acc": 0.5368421052631579, "train_speed(iter/s)": 0.672737 }, { "epoch": 3.717707039115719, "grad_norm": 4.5248122215271, "learning_rate": 1.5401310121647645e-05, "loss": 2.04146728515625, "memory(GiB)": 72.85, "step": 86775, "token_acc": 0.5520833333333334, "train_speed(iter/s)": 0.672741 }, { "epoch": 3.7179212544449682, "grad_norm": 5.249929904937744, "learning_rate": 1.539645205969338e-05, "loss": 2.385578155517578, "memory(GiB)": 72.85, "step": 86780, "token_acc": 0.4817073170731707, "train_speed(iter/s)": 0.672723 }, { "epoch": 3.718135469774217, "grad_norm": 5.560955047607422, "learning_rate": 1.5391594624614852e-05, "loss": 2.095467185974121, "memory(GiB)": 72.85, "step": 86785, "token_acc": 0.5406162464985994, "train_speed(iter/s)": 0.672707 }, { "epoch": 3.718349685103466, "grad_norm": 5.501263618469238, "learning_rate": 1.5386737816500046e-05, "loss": 1.8163326263427735, "memory(GiB)": 72.85, "step": 86790, "token_acc": 0.5674740484429066, "train_speed(iter/s)": 0.672709 }, { "epoch": 3.718563900432715, "grad_norm": 5.70343017578125, "learning_rate": 1.5381881635436945e-05, "loss": 2.1242769241333006, "memory(GiB)": 72.85, "step": 86795, "token_acc": 0.5198863636363636, "train_speed(iter/s)": 0.672709 }, { "epoch": 3.718778115761964, "grad_norm": 3.82246994972229, "learning_rate": 1.5377026081513545e-05, "loss": 1.894679069519043, "memory(GiB)": 72.85, "step": 86800, "token_acc": 0.5596026490066225, "train_speed(iter/s)": 0.672707 }, { "epoch": 3.7189923310912127, "grad_norm": 7.192368984222412, "learning_rate": 1.5372171154817798e-05, "loss": 2.0531978607177734, "memory(GiB)": 72.85, "step": 86805, "token_acc": 0.5596707818930041, "train_speed(iter/s)": 0.672705 }, { "epoch": 3.719206546420462, "grad_norm": 6.634958744049072, "learning_rate": 1.5367316855437657e-05, "loss": 2.216804313659668, "memory(GiB)": 72.85, "step": 86810, "token_acc": 0.5225225225225225, "train_speed(iter/s)": 0.672709 }, { "epoch": 3.719420761749711, "grad_norm": 4.6478729248046875, "learning_rate": 1.5362463183461058e-05, "loss": 2.042850685119629, "memory(GiB)": 72.85, "step": 86815, "token_acc": 0.5425867507886435, "train_speed(iter/s)": 0.672708 }, { "epoch": 3.7196349770789596, "grad_norm": 4.058072566986084, "learning_rate": 1.535761013897591e-05, "loss": 2.1317626953125, "memory(GiB)": 72.85, "step": 86820, "token_acc": 0.5615384615384615, "train_speed(iter/s)": 0.672709 }, { "epoch": 3.719849192408209, "grad_norm": 7.923584461212158, "learning_rate": 1.5352757722070172e-05, "loss": 1.8691228866577148, "memory(GiB)": 72.85, "step": 86825, "token_acc": 0.5734265734265734, "train_speed(iter/s)": 0.672714 }, { "epoch": 3.7200634077374577, "grad_norm": 5.838871479034424, "learning_rate": 1.5347905932831713e-05, "loss": 2.067088317871094, "memory(GiB)": 72.85, "step": 86830, "token_acc": 0.5610687022900763, "train_speed(iter/s)": 0.672717 }, { "epoch": 3.7202776230667065, "grad_norm": 6.6249494552612305, "learning_rate": 1.534305477134846e-05, "loss": 2.03299617767334, "memory(GiB)": 72.85, "step": 86835, "token_acc": 0.5427509293680297, "train_speed(iter/s)": 0.672717 }, { "epoch": 3.7204918383959558, "grad_norm": 4.982351303100586, "learning_rate": 1.5338204237708283e-05, "loss": 2.1358089447021484, "memory(GiB)": 72.85, "step": 86840, "token_acc": 0.5141065830721003, "train_speed(iter/s)": 0.672723 }, { "epoch": 3.7207060537252046, "grad_norm": 8.50231647491455, "learning_rate": 1.5333354331999045e-05, "loss": 1.8314529418945313, "memory(GiB)": 72.85, "step": 86845, "token_acc": 0.5587188612099644, "train_speed(iter/s)": 0.672724 }, { "epoch": 3.7209202690544534, "grad_norm": 5.830309867858887, "learning_rate": 1.5328505054308616e-05, "loss": 2.1795711517333984, "memory(GiB)": 72.85, "step": 86850, "token_acc": 0.5777126099706745, "train_speed(iter/s)": 0.672715 }, { "epoch": 3.7211344843837026, "grad_norm": 5.1304121017456055, "learning_rate": 1.5323656404724827e-05, "loss": 1.7836666107177734, "memory(GiB)": 72.85, "step": 86855, "token_acc": 0.6387832699619772, "train_speed(iter/s)": 0.672722 }, { "epoch": 3.7213486997129515, "grad_norm": 6.390644073486328, "learning_rate": 1.5318808383335553e-05, "loss": 1.9382518768310546, "memory(GiB)": 72.85, "step": 86860, "token_acc": 0.5348837209302325, "train_speed(iter/s)": 0.672713 }, { "epoch": 3.7215629150422003, "grad_norm": 4.580824375152588, "learning_rate": 1.5313960990228593e-05, "loss": 1.853339385986328, "memory(GiB)": 72.85, "step": 86865, "token_acc": 0.5754716981132075, "train_speed(iter/s)": 0.672718 }, { "epoch": 3.7217771303714495, "grad_norm": 4.582234859466553, "learning_rate": 1.5309114225491765e-05, "loss": 1.9544664382934571, "memory(GiB)": 72.85, "step": 86870, "token_acc": 0.5614617940199336, "train_speed(iter/s)": 0.672716 }, { "epoch": 3.7219913457006983, "grad_norm": 6.547107696533203, "learning_rate": 1.5304268089212882e-05, "loss": 1.978966522216797, "memory(GiB)": 72.85, "step": 86875, "token_acc": 0.5625, "train_speed(iter/s)": 0.672722 }, { "epoch": 3.722205561029947, "grad_norm": 4.404630661010742, "learning_rate": 1.529942258147971e-05, "loss": 2.2837392807006838, "memory(GiB)": 72.85, "step": 86880, "token_acc": 0.5126353790613718, "train_speed(iter/s)": 0.672717 }, { "epoch": 3.7224197763591964, "grad_norm": 6.794259071350098, "learning_rate": 1.5294577702380064e-05, "loss": 1.6351064682006835, "memory(GiB)": 72.85, "step": 86885, "token_acc": 0.5905172413793104, "train_speed(iter/s)": 0.672708 }, { "epoch": 3.722633991688445, "grad_norm": 5.034305095672607, "learning_rate": 1.5289733452001704e-05, "loss": 2.2839927673339844, "memory(GiB)": 72.85, "step": 86890, "token_acc": 0.4785276073619632, "train_speed(iter/s)": 0.6727 }, { "epoch": 3.722848207017694, "grad_norm": 6.161484241485596, "learning_rate": 1.5284889830432382e-05, "loss": 2.125052070617676, "memory(GiB)": 72.85, "step": 86895, "token_acc": 0.5508196721311476, "train_speed(iter/s)": 0.672697 }, { "epoch": 3.7230624223469433, "grad_norm": 7.931238651275635, "learning_rate": 1.5280046837759843e-05, "loss": 2.003940391540527, "memory(GiB)": 72.85, "step": 86900, "token_acc": 0.5472312703583062, "train_speed(iter/s)": 0.672697 }, { "epoch": 3.723276637676192, "grad_norm": 5.535980701446533, "learning_rate": 1.5275204474071815e-05, "loss": 1.9399944305419923, "memory(GiB)": 72.85, "step": 86905, "token_acc": 0.594017094017094, "train_speed(iter/s)": 0.672696 }, { "epoch": 3.723490853005441, "grad_norm": 8.965337753295898, "learning_rate": 1.5270362739456044e-05, "loss": 1.9517599105834962, "memory(GiB)": 72.85, "step": 86910, "token_acc": 0.5587044534412956, "train_speed(iter/s)": 0.672695 }, { "epoch": 3.72370506833469, "grad_norm": 5.359368324279785, "learning_rate": 1.526552163400022e-05, "loss": 2.0733341217041015, "memory(GiB)": 72.85, "step": 86915, "token_acc": 0.5699300699300699, "train_speed(iter/s)": 0.672698 }, { "epoch": 3.723919283663939, "grad_norm": 4.546002388000488, "learning_rate": 1.5260681157792063e-05, "loss": 1.8702579498291017, "memory(GiB)": 72.85, "step": 86920, "token_acc": 0.5540540540540541, "train_speed(iter/s)": 0.672697 }, { "epoch": 3.724133498993188, "grad_norm": 5.83521842956543, "learning_rate": 1.5255841310919262e-05, "loss": 2.119101715087891, "memory(GiB)": 72.85, "step": 86925, "token_acc": 0.5319148936170213, "train_speed(iter/s)": 0.672702 }, { "epoch": 3.724347714322437, "grad_norm": 7.2166032791137695, "learning_rate": 1.5251002093469485e-05, "loss": 1.9969650268554688, "memory(GiB)": 72.85, "step": 86930, "token_acc": 0.5830258302583026, "train_speed(iter/s)": 0.67269 }, { "epoch": 3.724561929651686, "grad_norm": 6.547573089599609, "learning_rate": 1.5246163505530403e-05, "loss": 1.9166763305664063, "memory(GiB)": 72.85, "step": 86935, "token_acc": 0.5590551181102362, "train_speed(iter/s)": 0.672684 }, { "epoch": 3.7247761449809347, "grad_norm": 5.5582499504089355, "learning_rate": 1.5241325547189655e-05, "loss": 2.3454065322875977, "memory(GiB)": 72.85, "step": 86940, "token_acc": 0.5036496350364964, "train_speed(iter/s)": 0.672685 }, { "epoch": 3.724990360310184, "grad_norm": 5.456287384033203, "learning_rate": 1.5236488218534917e-05, "loss": 1.952935791015625, "memory(GiB)": 72.85, "step": 86945, "token_acc": 0.5272727272727272, "train_speed(iter/s)": 0.672689 }, { "epoch": 3.7252045756394327, "grad_norm": 8.123308181762695, "learning_rate": 1.5231651519653806e-05, "loss": 2.227956199645996, "memory(GiB)": 72.85, "step": 86950, "token_acc": 0.5202312138728323, "train_speed(iter/s)": 0.672696 }, { "epoch": 3.7254187909686816, "grad_norm": 6.60164737701416, "learning_rate": 1.5226815450633941e-05, "loss": 2.2693334579467774, "memory(GiB)": 72.85, "step": 86955, "token_acc": 0.5207100591715976, "train_speed(iter/s)": 0.6727 }, { "epoch": 3.725633006297931, "grad_norm": 5.305706024169922, "learning_rate": 1.5221980011562936e-05, "loss": 2.03399772644043, "memory(GiB)": 72.85, "step": 86960, "token_acc": 0.5418060200668896, "train_speed(iter/s)": 0.672695 }, { "epoch": 3.7258472216271796, "grad_norm": 6.304851531982422, "learning_rate": 1.5217145202528371e-05, "loss": 1.8868118286132813, "memory(GiB)": 72.85, "step": 86965, "token_acc": 0.571969696969697, "train_speed(iter/s)": 0.672686 }, { "epoch": 3.7260614369564284, "grad_norm": 9.921913146972656, "learning_rate": 1.5212311023617864e-05, "loss": 2.1898178100585937, "memory(GiB)": 72.85, "step": 86970, "token_acc": 0.5296296296296297, "train_speed(iter/s)": 0.672686 }, { "epoch": 3.7262756522856777, "grad_norm": 5.278691291809082, "learning_rate": 1.5207477474918963e-05, "loss": 2.0884864807128904, "memory(GiB)": 72.85, "step": 86975, "token_acc": 0.5629139072847682, "train_speed(iter/s)": 0.67269 }, { "epoch": 3.7264898676149265, "grad_norm": 5.163815021514893, "learning_rate": 1.5202644556519258e-05, "loss": 2.2922155380249025, "memory(GiB)": 72.85, "step": 86980, "token_acc": 0.5272108843537415, "train_speed(iter/s)": 0.672681 }, { "epoch": 3.7267040829441753, "grad_norm": 5.489591598510742, "learning_rate": 1.5197812268506295e-05, "loss": 2.1000158309936525, "memory(GiB)": 72.85, "step": 86985, "token_acc": 0.5787401574803149, "train_speed(iter/s)": 0.672677 }, { "epoch": 3.7269182982734246, "grad_norm": 6.451738357543945, "learning_rate": 1.5192980610967606e-05, "loss": 1.9825336456298828, "memory(GiB)": 72.85, "step": 86990, "token_acc": 0.5709459459459459, "train_speed(iter/s)": 0.672675 }, { "epoch": 3.7271325136026734, "grad_norm": 7.543302059173584, "learning_rate": 1.5188149583990725e-05, "loss": 1.8466911315917969, "memory(GiB)": 72.85, "step": 86995, "token_acc": 0.5809859154929577, "train_speed(iter/s)": 0.672679 }, { "epoch": 3.727346728931922, "grad_norm": 5.246976375579834, "learning_rate": 1.5183319187663152e-05, "loss": 2.3582115173339844, "memory(GiB)": 72.85, "step": 87000, "token_acc": 0.5034965034965035, "train_speed(iter/s)": 0.672667 }, { "epoch": 3.727346728931922, "eval_loss": 2.1200571060180664, "eval_runtime": 15.0502, "eval_samples_per_second": 6.644, "eval_steps_per_second": 6.644, "eval_token_acc": 0.49303621169916434, "step": 87000 }, { "epoch": 3.7275609442611715, "grad_norm": 7.792636394500732, "learning_rate": 1.5178489422072434e-05, "loss": 2.2387603759765624, "memory(GiB)": 72.85, "step": 87005, "token_acc": 0.49691358024691357, "train_speed(iter/s)": 0.672569 }, { "epoch": 3.7277751595904203, "grad_norm": 6.062155723571777, "learning_rate": 1.5173660287306046e-05, "loss": 2.004137420654297, "memory(GiB)": 72.85, "step": 87010, "token_acc": 0.5309090909090909, "train_speed(iter/s)": 0.672568 }, { "epoch": 3.727989374919669, "grad_norm": 5.420860290527344, "learning_rate": 1.5168831783451465e-05, "loss": 1.9797210693359375, "memory(GiB)": 72.85, "step": 87015, "token_acc": 0.5567010309278351, "train_speed(iter/s)": 0.672575 }, { "epoch": 3.7282035902489183, "grad_norm": 5.3093085289001465, "learning_rate": 1.5164003910596176e-05, "loss": 2.149758148193359, "memory(GiB)": 72.85, "step": 87020, "token_acc": 0.5537459283387622, "train_speed(iter/s)": 0.672577 }, { "epoch": 3.728417805578167, "grad_norm": 6.590810775756836, "learning_rate": 1.5159176668827618e-05, "loss": 2.153742790222168, "memory(GiB)": 72.85, "step": 87025, "token_acc": 0.49337748344370863, "train_speed(iter/s)": 0.672568 }, { "epoch": 3.7286320209074164, "grad_norm": 5.494441032409668, "learning_rate": 1.5154350058233275e-05, "loss": 2.104127311706543, "memory(GiB)": 72.85, "step": 87030, "token_acc": 0.5428571428571428, "train_speed(iter/s)": 0.67257 }, { "epoch": 3.7288462362366652, "grad_norm": 5.323612213134766, "learning_rate": 1.5149524078900562e-05, "loss": 1.9647632598876954, "memory(GiB)": 72.85, "step": 87035, "token_acc": 0.54421768707483, "train_speed(iter/s)": 0.672571 }, { "epoch": 3.729060451565914, "grad_norm": 7.209998607635498, "learning_rate": 1.5144698730916918e-05, "loss": 2.1788888931274415, "memory(GiB)": 72.85, "step": 87040, "token_acc": 0.4882943143812709, "train_speed(iter/s)": 0.672581 }, { "epoch": 3.7292746668951633, "grad_norm": 5.46712064743042, "learning_rate": 1.5139874014369754e-05, "loss": 2.1907896041870116, "memory(GiB)": 72.85, "step": 87045, "token_acc": 0.5040322580645161, "train_speed(iter/s)": 0.672579 }, { "epoch": 3.729488882224412, "grad_norm": 6.529107093811035, "learning_rate": 1.5135049929346456e-05, "loss": 2.2381965637207033, "memory(GiB)": 72.85, "step": 87050, "token_acc": 0.5032679738562091, "train_speed(iter/s)": 0.672577 }, { "epoch": 3.729703097553661, "grad_norm": 6.927433490753174, "learning_rate": 1.5130226475934451e-05, "loss": 2.1337804794311523, "memory(GiB)": 72.85, "step": 87055, "token_acc": 0.553030303030303, "train_speed(iter/s)": 0.672577 }, { "epoch": 3.72991731288291, "grad_norm": 9.511605262756348, "learning_rate": 1.5125403654221088e-05, "loss": 2.3624786376953124, "memory(GiB)": 72.85, "step": 87060, "token_acc": 0.4857142857142857, "train_speed(iter/s)": 0.672578 }, { "epoch": 3.730131528212159, "grad_norm": 5.073268413543701, "learning_rate": 1.512058146429377e-05, "loss": 2.039594268798828, "memory(GiB)": 72.85, "step": 87065, "token_acc": 0.5062893081761006, "train_speed(iter/s)": 0.672583 }, { "epoch": 3.730345743541408, "grad_norm": 5.602399826049805, "learning_rate": 1.5115759906239835e-05, "loss": 1.7648170471191407, "memory(GiB)": 72.85, "step": 87070, "token_acc": 0.6090534979423868, "train_speed(iter/s)": 0.672594 }, { "epoch": 3.730559958870657, "grad_norm": 4.673177242279053, "learning_rate": 1.5110938980146638e-05, "loss": 2.131577491760254, "memory(GiB)": 72.85, "step": 87075, "token_acc": 0.5538461538461539, "train_speed(iter/s)": 0.672595 }, { "epoch": 3.730774174199906, "grad_norm": 8.135040283203125, "learning_rate": 1.5106118686101505e-05, "loss": 1.7963117599487304, "memory(GiB)": 72.85, "step": 87080, "token_acc": 0.535593220338983, "train_speed(iter/s)": 0.672597 }, { "epoch": 3.7309883895291547, "grad_norm": 6.700455188751221, "learning_rate": 1.510129902419175e-05, "loss": 2.1577518463134764, "memory(GiB)": 72.85, "step": 87085, "token_acc": 0.49838187702265374, "train_speed(iter/s)": 0.672598 }, { "epoch": 3.731202604858404, "grad_norm": 4.717530727386475, "learning_rate": 1.509647999450472e-05, "loss": 2.020428276062012, "memory(GiB)": 72.85, "step": 87090, "token_acc": 0.5282392026578073, "train_speed(iter/s)": 0.672602 }, { "epoch": 3.7314168201876527, "grad_norm": 6.315904140472412, "learning_rate": 1.5091661597127694e-05, "loss": 2.485057830810547, "memory(GiB)": 72.85, "step": 87095, "token_acc": 0.4885245901639344, "train_speed(iter/s)": 0.672606 }, { "epoch": 3.7316310355169016, "grad_norm": 6.346518039703369, "learning_rate": 1.5086843832147968e-05, "loss": 2.2222316741943358, "memory(GiB)": 72.85, "step": 87100, "token_acc": 0.5241379310344828, "train_speed(iter/s)": 0.67261 }, { "epoch": 3.731845250846151, "grad_norm": 6.705410480499268, "learning_rate": 1.5082026699652813e-05, "loss": 2.378096008300781, "memory(GiB)": 72.85, "step": 87105, "token_acc": 0.4723127035830619, "train_speed(iter/s)": 0.672613 }, { "epoch": 3.7320594661753996, "grad_norm": 4.61197566986084, "learning_rate": 1.507721019972949e-05, "loss": 2.0266653060913087, "memory(GiB)": 72.85, "step": 87110, "token_acc": 0.5769230769230769, "train_speed(iter/s)": 0.672615 }, { "epoch": 3.7322736815046484, "grad_norm": 5.83392333984375, "learning_rate": 1.5072394332465274e-05, "loss": 2.302196502685547, "memory(GiB)": 72.85, "step": 87115, "token_acc": 0.5124223602484472, "train_speed(iter/s)": 0.672625 }, { "epoch": 3.7324878968338977, "grad_norm": 4.4879150390625, "learning_rate": 1.5067579097947404e-05, "loss": 2.2322635650634766, "memory(GiB)": 72.85, "step": 87120, "token_acc": 0.5389408099688473, "train_speed(iter/s)": 0.672631 }, { "epoch": 3.7327021121631465, "grad_norm": 5.774082183837891, "learning_rate": 1.5062764496263088e-05, "loss": 1.92166748046875, "memory(GiB)": 72.85, "step": 87125, "token_acc": 0.6079136690647482, "train_speed(iter/s)": 0.672634 }, { "epoch": 3.7329163274923953, "grad_norm": 6.772961139678955, "learning_rate": 1.5057950527499587e-05, "loss": 1.9934032440185547, "memory(GiB)": 72.85, "step": 87130, "token_acc": 0.5884353741496599, "train_speed(iter/s)": 0.672639 }, { "epoch": 3.7331305428216446, "grad_norm": 5.810514450073242, "learning_rate": 1.5053137191744088e-05, "loss": 1.9054754257202149, "memory(GiB)": 72.85, "step": 87135, "token_acc": 0.5833333333333334, "train_speed(iter/s)": 0.672644 }, { "epoch": 3.7333447581508934, "grad_norm": 5.678802013397217, "learning_rate": 1.5048324489083793e-05, "loss": 2.0990962982177734, "memory(GiB)": 72.85, "step": 87140, "token_acc": 0.5836575875486382, "train_speed(iter/s)": 0.672653 }, { "epoch": 3.733558973480142, "grad_norm": 4.824166774749756, "learning_rate": 1.504351241960587e-05, "loss": 2.097804641723633, "memory(GiB)": 72.85, "step": 87145, "token_acc": 0.5675675675675675, "train_speed(iter/s)": 0.672657 }, { "epoch": 3.7337731888093915, "grad_norm": 5.712284564971924, "learning_rate": 1.503870098339753e-05, "loss": 2.1247220993041993, "memory(GiB)": 72.85, "step": 87150, "token_acc": 0.5401459854014599, "train_speed(iter/s)": 0.672667 }, { "epoch": 3.7339874041386403, "grad_norm": 5.33682107925415, "learning_rate": 1.5033890180545912e-05, "loss": 2.2419364929199217, "memory(GiB)": 72.85, "step": 87155, "token_acc": 0.49407114624505927, "train_speed(iter/s)": 0.672658 }, { "epoch": 3.734201619467889, "grad_norm": 6.715882301330566, "learning_rate": 1.502908001113818e-05, "loss": 2.007301139831543, "memory(GiB)": 72.85, "step": 87160, "token_acc": 0.5434083601286174, "train_speed(iter/s)": 0.672663 }, { "epoch": 3.7344158347971383, "grad_norm": 5.266164779663086, "learning_rate": 1.5024270475261465e-05, "loss": 1.9921138763427735, "memory(GiB)": 72.85, "step": 87165, "token_acc": 0.5748299319727891, "train_speed(iter/s)": 0.672675 }, { "epoch": 3.734630050126387, "grad_norm": 6.0336127281188965, "learning_rate": 1.5019461573002885e-05, "loss": 2.123818588256836, "memory(GiB)": 72.85, "step": 87170, "token_acc": 0.5014245014245015, "train_speed(iter/s)": 0.672683 }, { "epoch": 3.734844265455636, "grad_norm": 6.411330699920654, "learning_rate": 1.5014653304449595e-05, "loss": 2.09227294921875, "memory(GiB)": 72.85, "step": 87175, "token_acc": 0.5160142348754448, "train_speed(iter/s)": 0.672673 }, { "epoch": 3.7350584807848852, "grad_norm": 5.497200012207031, "learning_rate": 1.5009845669688671e-05, "loss": 2.021647644042969, "memory(GiB)": 72.85, "step": 87180, "token_acc": 0.5687830687830688, "train_speed(iter/s)": 0.672669 }, { "epoch": 3.735272696114134, "grad_norm": 5.983733177185059, "learning_rate": 1.500503866880722e-05, "loss": 2.122880744934082, "memory(GiB)": 72.85, "step": 87185, "token_acc": 0.550314465408805, "train_speed(iter/s)": 0.67267 }, { "epoch": 3.735486911443383, "grad_norm": 5.472656726837158, "learning_rate": 1.5000232301892319e-05, "loss": 1.9903770446777345, "memory(GiB)": 72.85, "step": 87190, "token_acc": 0.5551724137931034, "train_speed(iter/s)": 0.67267 }, { "epoch": 3.735701126772632, "grad_norm": 6.537342548370361, "learning_rate": 1.499542656903103e-05, "loss": 1.9043832778930665, "memory(GiB)": 72.85, "step": 87195, "token_acc": 0.5791139240506329, "train_speed(iter/s)": 0.672664 }, { "epoch": 3.735915342101881, "grad_norm": 5.2992777824401855, "learning_rate": 1.4990621470310423e-05, "loss": 2.101601409912109, "memory(GiB)": 72.85, "step": 87200, "token_acc": 0.5018587360594795, "train_speed(iter/s)": 0.672662 }, { "epoch": 3.7361295574311297, "grad_norm": 5.512876510620117, "learning_rate": 1.4985817005817571e-05, "loss": 2.0726613998413086, "memory(GiB)": 72.85, "step": 87205, "token_acc": 0.5397350993377483, "train_speed(iter/s)": 0.672657 }, { "epoch": 3.736343772760379, "grad_norm": 5.996299743652344, "learning_rate": 1.498101317563948e-05, "loss": 2.054399299621582, "memory(GiB)": 72.85, "step": 87210, "token_acc": 0.5526315789473685, "train_speed(iter/s)": 0.672664 }, { "epoch": 3.736557988089628, "grad_norm": 5.476770877838135, "learning_rate": 1.4976209979863192e-05, "loss": 1.9285303115844727, "memory(GiB)": 72.85, "step": 87215, "token_acc": 0.5656934306569343, "train_speed(iter/s)": 0.672672 }, { "epoch": 3.7367722034188766, "grad_norm": 4.665248394012451, "learning_rate": 1.4971407418575716e-05, "loss": 1.9268083572387695, "memory(GiB)": 72.85, "step": 87220, "token_acc": 0.5372549019607843, "train_speed(iter/s)": 0.672675 }, { "epoch": 3.736986418748126, "grad_norm": 5.523993015289307, "learning_rate": 1.4966605491864055e-05, "loss": 2.298764228820801, "memory(GiB)": 72.85, "step": 87225, "token_acc": 0.5032467532467533, "train_speed(iter/s)": 0.672685 }, { "epoch": 3.7372006340773747, "grad_norm": 5.884316921234131, "learning_rate": 1.4961804199815176e-05, "loss": 2.035089111328125, "memory(GiB)": 72.85, "step": 87230, "token_acc": 0.5772058823529411, "train_speed(iter/s)": 0.672692 }, { "epoch": 3.7374148494066235, "grad_norm": 4.926573753356934, "learning_rate": 1.4957003542516108e-05, "loss": 2.2551542282104493, "memory(GiB)": 72.85, "step": 87235, "token_acc": 0.5210843373493976, "train_speed(iter/s)": 0.672693 }, { "epoch": 3.7376290647358728, "grad_norm": 7.597228527069092, "learning_rate": 1.4952203520053786e-05, "loss": 1.8868762969970703, "memory(GiB)": 72.85, "step": 87240, "token_acc": 0.5709219858156028, "train_speed(iter/s)": 0.672694 }, { "epoch": 3.7378432800651216, "grad_norm": 6.244272232055664, "learning_rate": 1.494740413251518e-05, "loss": 2.175887107849121, "memory(GiB)": 72.85, "step": 87245, "token_acc": 0.5061728395061729, "train_speed(iter/s)": 0.672689 }, { "epoch": 3.7380574953943704, "grad_norm": 5.204878330230713, "learning_rate": 1.4942605379987229e-05, "loss": 2.011366081237793, "memory(GiB)": 72.85, "step": 87250, "token_acc": 0.5734265734265734, "train_speed(iter/s)": 0.672683 }, { "epoch": 3.7382717107236196, "grad_norm": 6.605665683746338, "learning_rate": 1.4937807262556846e-05, "loss": 2.138579177856445, "memory(GiB)": 72.85, "step": 87255, "token_acc": 0.5416666666666666, "train_speed(iter/s)": 0.672686 }, { "epoch": 3.7384859260528684, "grad_norm": 4.702922344207764, "learning_rate": 1.4933009780310997e-05, "loss": 2.2825763702392576, "memory(GiB)": 72.85, "step": 87260, "token_acc": 0.5231788079470199, "train_speed(iter/s)": 0.672688 }, { "epoch": 3.7387001413821173, "grad_norm": 6.976950645446777, "learning_rate": 1.4928212933336572e-05, "loss": 2.166971778869629, "memory(GiB)": 72.85, "step": 87265, "token_acc": 0.5457875457875457, "train_speed(iter/s)": 0.672678 }, { "epoch": 3.7389143567113665, "grad_norm": 6.01324462890625, "learning_rate": 1.4923416721720458e-05, "loss": 2.099323272705078, "memory(GiB)": 72.85, "step": 87270, "token_acc": 0.5379310344827586, "train_speed(iter/s)": 0.672678 }, { "epoch": 3.7391285720406153, "grad_norm": 5.304335594177246, "learning_rate": 1.491862114554956e-05, "loss": 2.1005882263183593, "memory(GiB)": 72.85, "step": 87275, "token_acc": 0.5431034482758621, "train_speed(iter/s)": 0.672683 }, { "epoch": 3.739342787369864, "grad_norm": 7.182087421417236, "learning_rate": 1.4913826204910759e-05, "loss": 2.169691467285156, "memory(GiB)": 72.85, "step": 87280, "token_acc": 0.5426356589147286, "train_speed(iter/s)": 0.67268 }, { "epoch": 3.7395570026991134, "grad_norm": 6.041727066040039, "learning_rate": 1.4909031899890902e-05, "loss": 1.938551139831543, "memory(GiB)": 72.85, "step": 87285, "token_acc": 0.5571955719557196, "train_speed(iter/s)": 0.672682 }, { "epoch": 3.739771218028362, "grad_norm": 6.1935014724731445, "learning_rate": 1.4904238230576833e-05, "loss": 2.350965881347656, "memory(GiB)": 72.85, "step": 87290, "token_acc": 0.4952978056426332, "train_speed(iter/s)": 0.672672 }, { "epoch": 3.739985433357611, "grad_norm": 6.240757942199707, "learning_rate": 1.4899445197055434e-05, "loss": 1.9453912734985352, "memory(GiB)": 72.85, "step": 87295, "token_acc": 0.572463768115942, "train_speed(iter/s)": 0.672674 }, { "epoch": 3.7401996486868603, "grad_norm": 9.283761978149414, "learning_rate": 1.489465279941351e-05, "loss": 2.0702884674072264, "memory(GiB)": 72.85, "step": 87300, "token_acc": 0.5857740585774058, "train_speed(iter/s)": 0.672675 }, { "epoch": 3.740413864016109, "grad_norm": 5.970803737640381, "learning_rate": 1.4889861037737879e-05, "loss": 2.0998867034912108, "memory(GiB)": 72.85, "step": 87305, "token_acc": 0.525, "train_speed(iter/s)": 0.672681 }, { "epoch": 3.740628079345358, "grad_norm": 4.73370361328125, "learning_rate": 1.4885069912115357e-05, "loss": 2.2575246810913088, "memory(GiB)": 72.85, "step": 87310, "token_acc": 0.5142857142857142, "train_speed(iter/s)": 0.67268 }, { "epoch": 3.740842294674607, "grad_norm": 5.483979225158691, "learning_rate": 1.4880279422632715e-05, "loss": 2.4223772048950196, "memory(GiB)": 72.85, "step": 87315, "token_acc": 0.4942857142857143, "train_speed(iter/s)": 0.672682 }, { "epoch": 3.741056510003856, "grad_norm": 4.5155348777771, "learning_rate": 1.4875489569376772e-05, "loss": 1.929145050048828, "memory(GiB)": 72.85, "step": 87320, "token_acc": 0.5490909090909091, "train_speed(iter/s)": 0.672695 }, { "epoch": 3.741270725333105, "grad_norm": 5.731226444244385, "learning_rate": 1.487070035243428e-05, "loss": 2.368291473388672, "memory(GiB)": 72.85, "step": 87325, "token_acc": 0.4886731391585761, "train_speed(iter/s)": 0.672696 }, { "epoch": 3.741484940662354, "grad_norm": 6.495553016662598, "learning_rate": 1.486591177189201e-05, "loss": 1.8663434982299805, "memory(GiB)": 72.85, "step": 87330, "token_acc": 0.56957928802589, "train_speed(iter/s)": 0.672703 }, { "epoch": 3.741699155991603, "grad_norm": 4.988786697387695, "learning_rate": 1.486112382783671e-05, "loss": 2.1089527130126955, "memory(GiB)": 72.85, "step": 87335, "token_acc": 0.5156794425087108, "train_speed(iter/s)": 0.672704 }, { "epoch": 3.7419133713208517, "grad_norm": 6.046002388000488, "learning_rate": 1.4856336520355091e-05, "loss": 2.074066162109375, "memory(GiB)": 72.85, "step": 87340, "token_acc": 0.5, "train_speed(iter/s)": 0.672701 }, { "epoch": 3.742127586650101, "grad_norm": 5.783788681030273, "learning_rate": 1.4851549849533908e-05, "loss": 2.2643238067626954, "memory(GiB)": 72.85, "step": 87345, "token_acc": 0.5426621160409556, "train_speed(iter/s)": 0.672696 }, { "epoch": 3.7423418019793497, "grad_norm": 6.530419826507568, "learning_rate": 1.4846763815459891e-05, "loss": 2.504994583129883, "memory(GiB)": 72.85, "step": 87350, "token_acc": 0.5079365079365079, "train_speed(iter/s)": 0.672688 }, { "epoch": 3.7425560173085985, "grad_norm": 6.068699359893799, "learning_rate": 1.4841978418219715e-05, "loss": 1.9285697937011719, "memory(GiB)": 72.85, "step": 87355, "token_acc": 0.543859649122807, "train_speed(iter/s)": 0.672694 }, { "epoch": 3.742770232637848, "grad_norm": 5.499575138092041, "learning_rate": 1.4837193657900089e-05, "loss": 2.276198959350586, "memory(GiB)": 72.85, "step": 87360, "token_acc": 0.4944237918215613, "train_speed(iter/s)": 0.672693 }, { "epoch": 3.7429844479670966, "grad_norm": 6.1500468254089355, "learning_rate": 1.4832409534587682e-05, "loss": 2.1788686752319335, "memory(GiB)": 72.85, "step": 87365, "token_acc": 0.5301587301587302, "train_speed(iter/s)": 0.672696 }, { "epoch": 3.7431986632963454, "grad_norm": 5.333981513977051, "learning_rate": 1.4827626048369164e-05, "loss": 2.0604286193847656, "memory(GiB)": 72.85, "step": 87370, "token_acc": 0.5666666666666667, "train_speed(iter/s)": 0.672701 }, { "epoch": 3.7434128786255947, "grad_norm": 5.692989826202393, "learning_rate": 1.4822843199331182e-05, "loss": 1.9727176666259765, "memory(GiB)": 72.85, "step": 87375, "token_acc": 0.5667870036101083, "train_speed(iter/s)": 0.672702 }, { "epoch": 3.7436270939548435, "grad_norm": 5.809737205505371, "learning_rate": 1.4818060987560406e-05, "loss": 2.117966079711914, "memory(GiB)": 72.85, "step": 87380, "token_acc": 0.5708502024291497, "train_speed(iter/s)": 0.672707 }, { "epoch": 3.7438413092840923, "grad_norm": 4.757686614990234, "learning_rate": 1.4813279413143461e-05, "loss": 2.1158275604248047, "memory(GiB)": 72.85, "step": 87385, "token_acc": 0.5209790209790209, "train_speed(iter/s)": 0.672702 }, { "epoch": 3.7440555246133416, "grad_norm": 6.079452991485596, "learning_rate": 1.4808498476166971e-05, "loss": 2.007116508483887, "memory(GiB)": 72.85, "step": 87390, "token_acc": 0.5574324324324325, "train_speed(iter/s)": 0.672719 }, { "epoch": 3.7442697399425904, "grad_norm": 8.058281898498535, "learning_rate": 1.480371817671754e-05, "loss": 2.3046249389648437, "memory(GiB)": 72.85, "step": 87395, "token_acc": 0.523121387283237, "train_speed(iter/s)": 0.672731 }, { "epoch": 3.744483955271839, "grad_norm": 5.41019868850708, "learning_rate": 1.4798938514881754e-05, "loss": 1.8544073104858398, "memory(GiB)": 72.85, "step": 87400, "token_acc": 0.5783475783475783, "train_speed(iter/s)": 0.672725 }, { "epoch": 3.7446981706010884, "grad_norm": 6.30548620223999, "learning_rate": 1.4794159490746235e-05, "loss": 2.168489456176758, "memory(GiB)": 72.85, "step": 87405, "token_acc": 0.5248447204968945, "train_speed(iter/s)": 0.672715 }, { "epoch": 3.7449123859303373, "grad_norm": 7.541922092437744, "learning_rate": 1.4789381104397543e-05, "loss": 1.8598787307739257, "memory(GiB)": 72.85, "step": 87410, "token_acc": 0.5729166666666666, "train_speed(iter/s)": 0.672719 }, { "epoch": 3.745126601259586, "grad_norm": 10.1524658203125, "learning_rate": 1.4784603355922244e-05, "loss": 2.1403650283813476, "memory(GiB)": 72.85, "step": 87415, "token_acc": 0.5519713261648745, "train_speed(iter/s)": 0.672722 }, { "epoch": 3.7453408165888353, "grad_norm": 6.612679481506348, "learning_rate": 1.477982624540687e-05, "loss": 2.4564979553222654, "memory(GiB)": 72.85, "step": 87420, "token_acc": 0.5016722408026756, "train_speed(iter/s)": 0.672724 }, { "epoch": 3.745555031918084, "grad_norm": 5.777815818786621, "learning_rate": 1.4775049772937999e-05, "loss": 2.0765893936157225, "memory(GiB)": 72.85, "step": 87425, "token_acc": 0.5708333333333333, "train_speed(iter/s)": 0.672726 }, { "epoch": 3.745769247247333, "grad_norm": 5.443716049194336, "learning_rate": 1.4770273938602146e-05, "loss": 2.3338626861572265, "memory(GiB)": 72.85, "step": 87430, "token_acc": 0.5370370370370371, "train_speed(iter/s)": 0.67272 }, { "epoch": 3.745983462576582, "grad_norm": 6.827422618865967, "learning_rate": 1.4765498742485818e-05, "loss": 2.2439285278320313, "memory(GiB)": 72.85, "step": 87435, "token_acc": 0.46808510638297873, "train_speed(iter/s)": 0.672721 }, { "epoch": 3.746197677905831, "grad_norm": 4.933470249176025, "learning_rate": 1.4760724184675546e-05, "loss": 2.020599937438965, "memory(GiB)": 72.85, "step": 87440, "token_acc": 0.5382513661202186, "train_speed(iter/s)": 0.672717 }, { "epoch": 3.74641189323508, "grad_norm": 5.943704128265381, "learning_rate": 1.4755950265257806e-05, "loss": 2.0164211273193358, "memory(GiB)": 72.85, "step": 87445, "token_acc": 0.5343511450381679, "train_speed(iter/s)": 0.672727 }, { "epoch": 3.746626108564329, "grad_norm": 5.95210599899292, "learning_rate": 1.4751176984319093e-05, "loss": 2.153461456298828, "memory(GiB)": 72.85, "step": 87450, "token_acc": 0.5070422535211268, "train_speed(iter/s)": 0.672725 }, { "epoch": 3.746840323893578, "grad_norm": 6.546492576599121, "learning_rate": 1.474640434194588e-05, "loss": 1.955265998840332, "memory(GiB)": 72.85, "step": 87455, "token_acc": 0.534375, "train_speed(iter/s)": 0.672719 }, { "epoch": 3.7470545392228267, "grad_norm": 7.638839244842529, "learning_rate": 1.4741632338224598e-05, "loss": 2.171259117126465, "memory(GiB)": 72.85, "step": 87460, "token_acc": 0.5431034482758621, "train_speed(iter/s)": 0.672723 }, { "epoch": 3.747268754552076, "grad_norm": 5.534433841705322, "learning_rate": 1.4736860973241746e-05, "loss": 2.1301408767700196, "memory(GiB)": 72.85, "step": 87465, "token_acc": 0.5477941176470589, "train_speed(iter/s)": 0.672721 }, { "epoch": 3.747482969881325, "grad_norm": 5.643470287322998, "learning_rate": 1.4732090247083729e-05, "loss": 2.091851234436035, "memory(GiB)": 72.85, "step": 87470, "token_acc": 0.5348101265822784, "train_speed(iter/s)": 0.672724 }, { "epoch": 3.7476971852105736, "grad_norm": 5.0989179611206055, "learning_rate": 1.4727320159836988e-05, "loss": 2.0659393310546874, "memory(GiB)": 72.85, "step": 87475, "token_acc": 0.5705329153605015, "train_speed(iter/s)": 0.672721 }, { "epoch": 3.747911400539823, "grad_norm": 5.18169641494751, "learning_rate": 1.4722550711587924e-05, "loss": 2.0901262283325197, "memory(GiB)": 72.85, "step": 87480, "token_acc": 0.5, "train_speed(iter/s)": 0.672724 }, { "epoch": 3.7481256158690717, "grad_norm": 6.21730899810791, "learning_rate": 1.4717781902422939e-05, "loss": 2.4574167251586916, "memory(GiB)": 72.85, "step": 87485, "token_acc": 0.48727272727272725, "train_speed(iter/s)": 0.672716 }, { "epoch": 3.7483398311983205, "grad_norm": 5.062270641326904, "learning_rate": 1.4713013732428443e-05, "loss": 2.0262985229492188, "memory(GiB)": 72.85, "step": 87490, "token_acc": 0.5535714285714286, "train_speed(iter/s)": 0.672725 }, { "epoch": 3.7485540465275697, "grad_norm": 6.7048420906066895, "learning_rate": 1.470824620169079e-05, "loss": 1.8531967163085938, "memory(GiB)": 72.85, "step": 87495, "token_acc": 0.5964125560538116, "train_speed(iter/s)": 0.672729 }, { "epoch": 3.7487682618568186, "grad_norm": 7.494169235229492, "learning_rate": 1.4703479310296375e-05, "loss": 1.9484521865844726, "memory(GiB)": 72.85, "step": 87500, "token_acc": 0.5354330708661418, "train_speed(iter/s)": 0.672733 }, { "epoch": 3.7487682618568186, "eval_loss": 2.0004560947418213, "eval_runtime": 15.5627, "eval_samples_per_second": 6.426, "eval_steps_per_second": 6.426, "eval_token_acc": 0.5007194244604316, "step": 87500 }, { "epoch": 3.7489824771860674, "grad_norm": 6.496382713317871, "learning_rate": 1.4698713058331548e-05, "loss": 2.223992347717285, "memory(GiB)": 72.85, "step": 87505, "token_acc": 0.4938650306748466, "train_speed(iter/s)": 0.672639 }, { "epoch": 3.7491966925153166, "grad_norm": 4.389644622802734, "learning_rate": 1.469394744588265e-05, "loss": 2.0453914642333983, "memory(GiB)": 72.85, "step": 87510, "token_acc": 0.532258064516129, "train_speed(iter/s)": 0.67263 }, { "epoch": 3.7494109078445654, "grad_norm": 6.0022735595703125, "learning_rate": 1.4689182473036011e-05, "loss": 1.986384391784668, "memory(GiB)": 72.85, "step": 87515, "token_acc": 0.535483870967742, "train_speed(iter/s)": 0.672632 }, { "epoch": 3.7496251231738142, "grad_norm": 5.0477142333984375, "learning_rate": 1.4684418139877942e-05, "loss": 1.8620752334594726, "memory(GiB)": 72.85, "step": 87520, "token_acc": 0.5625, "train_speed(iter/s)": 0.672639 }, { "epoch": 3.7498393385030635, "grad_norm": 9.154969215393066, "learning_rate": 1.4679654446494784e-05, "loss": 2.2199548721313476, "memory(GiB)": 72.85, "step": 87525, "token_acc": 0.5129151291512916, "train_speed(iter/s)": 0.67265 }, { "epoch": 3.7500535538323123, "grad_norm": 6.904919147491455, "learning_rate": 1.4674891392972817e-05, "loss": 2.283941650390625, "memory(GiB)": 72.85, "step": 87530, "token_acc": 0.5069444444444444, "train_speed(iter/s)": 0.672647 }, { "epoch": 3.750267769161561, "grad_norm": 6.285868167877197, "learning_rate": 1.4670128979398329e-05, "loss": 2.1147422790527344, "memory(GiB)": 72.85, "step": 87535, "token_acc": 0.5849056603773585, "train_speed(iter/s)": 0.672656 }, { "epoch": 3.7504819844908104, "grad_norm": 4.785617828369141, "learning_rate": 1.4665367205857593e-05, "loss": 2.0986186981201174, "memory(GiB)": 72.85, "step": 87540, "token_acc": 0.5228070175438596, "train_speed(iter/s)": 0.672656 }, { "epoch": 3.750696199820059, "grad_norm": 6.377664566040039, "learning_rate": 1.4660606072436866e-05, "loss": 1.8787572860717774, "memory(GiB)": 72.85, "step": 87545, "token_acc": 0.5800711743772242, "train_speed(iter/s)": 0.672653 }, { "epoch": 3.750910415149308, "grad_norm": 4.06596040725708, "learning_rate": 1.4655845579222426e-05, "loss": 2.3411813735961915, "memory(GiB)": 72.85, "step": 87550, "token_acc": 0.5459940652818991, "train_speed(iter/s)": 0.672657 }, { "epoch": 3.7511246304785573, "grad_norm": 4.721414566040039, "learning_rate": 1.4651085726300501e-05, "loss": 2.047986602783203, "memory(GiB)": 72.85, "step": 87555, "token_acc": 0.5218978102189781, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.751338845807806, "grad_norm": 7.82835054397583, "learning_rate": 1.464632651375732e-05, "loss": 2.3109764099121093, "memory(GiB)": 72.85, "step": 87560, "token_acc": 0.5302013422818792, "train_speed(iter/s)": 0.672669 }, { "epoch": 3.751553061137055, "grad_norm": 6.923000335693359, "learning_rate": 1.4641567941679091e-05, "loss": 2.0882286071777343, "memory(GiB)": 72.85, "step": 87565, "token_acc": 0.5517241379310345, "train_speed(iter/s)": 0.672673 }, { "epoch": 3.751767276466304, "grad_norm": 5.68706750869751, "learning_rate": 1.463681001015202e-05, "loss": 2.1474029541015627, "memory(GiB)": 72.85, "step": 87570, "token_acc": 0.5462184873949579, "train_speed(iter/s)": 0.672676 }, { "epoch": 3.751981491795553, "grad_norm": 6.012629985809326, "learning_rate": 1.4632052719262319e-05, "loss": 2.2021459579467773, "memory(GiB)": 72.85, "step": 87575, "token_acc": 0.5451127819548872, "train_speed(iter/s)": 0.672671 }, { "epoch": 3.7521957071248018, "grad_norm": 8.23105525970459, "learning_rate": 1.4627296069096147e-05, "loss": 1.8127967834472656, "memory(GiB)": 72.85, "step": 87580, "token_acc": 0.5411764705882353, "train_speed(iter/s)": 0.672671 }, { "epoch": 3.752409922454051, "grad_norm": 7.307677268981934, "learning_rate": 1.4622540059739703e-05, "loss": 2.497928237915039, "memory(GiB)": 72.85, "step": 87585, "token_acc": 0.4874551971326165, "train_speed(iter/s)": 0.672669 }, { "epoch": 3.7526241377833, "grad_norm": 5.1731953620910645, "learning_rate": 1.4618735713695454e-05, "loss": 2.2319108963012697, "memory(GiB)": 72.85, "step": 87590, "token_acc": 0.5245283018867924, "train_speed(iter/s)": 0.672669 }, { "epoch": 3.7528383531125487, "grad_norm": 5.140832901000977, "learning_rate": 1.461398085801361e-05, "loss": 1.907326889038086, "memory(GiB)": 72.85, "step": 87595, "token_acc": 0.5870307167235495, "train_speed(iter/s)": 0.672671 }, { "epoch": 3.753052568441798, "grad_norm": 7.405152797698975, "learning_rate": 1.460922664338268e-05, "loss": 2.0143102645874023, "memory(GiB)": 72.85, "step": 87600, "token_acc": 0.5783582089552238, "train_speed(iter/s)": 0.672673 }, { "epoch": 3.7532667837710467, "grad_norm": 6.403111934661865, "learning_rate": 1.4604473069888824e-05, "loss": 2.1631826400756835, "memory(GiB)": 72.85, "step": 87605, "token_acc": 0.5567010309278351, "train_speed(iter/s)": 0.672674 }, { "epoch": 3.7534809991002955, "grad_norm": 6.377440929412842, "learning_rate": 1.4599720137618144e-05, "loss": 2.0308792114257814, "memory(GiB)": 72.85, "step": 87610, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.672675 }, { "epoch": 3.753695214429545, "grad_norm": 4.8025641441345215, "learning_rate": 1.4594967846656732e-05, "loss": 1.995189094543457, "memory(GiB)": 72.85, "step": 87615, "token_acc": 0.5477031802120141, "train_speed(iter/s)": 0.672682 }, { "epoch": 3.7539094297587936, "grad_norm": 6.671694755554199, "learning_rate": 1.4590216197090701e-05, "loss": 2.437120056152344, "memory(GiB)": 72.85, "step": 87620, "token_acc": 0.46496815286624205, "train_speed(iter/s)": 0.672686 }, { "epoch": 3.7541236450880424, "grad_norm": 6.913819313049316, "learning_rate": 1.4585465189006121e-05, "loss": 2.297161102294922, "memory(GiB)": 72.85, "step": 87625, "token_acc": 0.5321428571428571, "train_speed(iter/s)": 0.672696 }, { "epoch": 3.7543378604172917, "grad_norm": 4.988941669464111, "learning_rate": 1.4580714822489038e-05, "loss": 2.1831314086914064, "memory(GiB)": 72.85, "step": 87630, "token_acc": 0.504424778761062, "train_speed(iter/s)": 0.672705 }, { "epoch": 3.7545520757465405, "grad_norm": 6.558313846588135, "learning_rate": 1.4575965097625555e-05, "loss": 1.627437210083008, "memory(GiB)": 72.85, "step": 87635, "token_acc": 0.6019417475728155, "train_speed(iter/s)": 0.67271 }, { "epoch": 3.7547662910757893, "grad_norm": 5.577802658081055, "learning_rate": 1.4571216014501698e-05, "loss": 2.122639846801758, "memory(GiB)": 72.85, "step": 87640, "token_acc": 0.5317460317460317, "train_speed(iter/s)": 0.672717 }, { "epoch": 3.7549805064050386, "grad_norm": 5.62770938873291, "learning_rate": 1.456646757320349e-05, "loss": 2.0633769989013673, "memory(GiB)": 72.85, "step": 87645, "token_acc": 0.5234899328859061, "train_speed(iter/s)": 0.672712 }, { "epoch": 3.7551947217342874, "grad_norm": 6.303351879119873, "learning_rate": 1.4561719773816968e-05, "loss": 1.9760627746582031, "memory(GiB)": 72.85, "step": 87650, "token_acc": 0.5415282392026578, "train_speed(iter/s)": 0.672704 }, { "epoch": 3.755408937063536, "grad_norm": 5.682805061340332, "learning_rate": 1.4556972616428115e-05, "loss": 2.0815881729125976, "memory(GiB)": 72.85, "step": 87655, "token_acc": 0.504885993485342, "train_speed(iter/s)": 0.672709 }, { "epoch": 3.7556231523927854, "grad_norm": 6.041965484619141, "learning_rate": 1.4552226101122968e-05, "loss": 2.1687314987182615, "memory(GiB)": 72.85, "step": 87660, "token_acc": 0.5280898876404494, "train_speed(iter/s)": 0.672721 }, { "epoch": 3.7558373677220342, "grad_norm": 5.813294887542725, "learning_rate": 1.4547480227987498e-05, "loss": 2.15643253326416, "memory(GiB)": 72.85, "step": 87665, "token_acc": 0.5168195718654435, "train_speed(iter/s)": 0.672728 }, { "epoch": 3.756051583051283, "grad_norm": 6.663804531097412, "learning_rate": 1.4542734997107682e-05, "loss": 1.9393976211547852, "memory(GiB)": 72.85, "step": 87670, "token_acc": 0.6019108280254777, "train_speed(iter/s)": 0.672724 }, { "epoch": 3.7562657983805323, "grad_norm": 7.279977321624756, "learning_rate": 1.4537990408569484e-05, "loss": 2.295669746398926, "memory(GiB)": 72.85, "step": 87675, "token_acc": 0.5096153846153846, "train_speed(iter/s)": 0.672727 }, { "epoch": 3.756480013709781, "grad_norm": 5.192126750946045, "learning_rate": 1.4533246462458855e-05, "loss": 1.8341482162475586, "memory(GiB)": 72.85, "step": 87680, "token_acc": 0.5951417004048583, "train_speed(iter/s)": 0.672731 }, { "epoch": 3.75669422903903, "grad_norm": 7.173250675201416, "learning_rate": 1.4528503158861722e-05, "loss": 1.9309490203857422, "memory(GiB)": 72.85, "step": 87685, "token_acc": 0.5632911392405063, "train_speed(iter/s)": 0.672732 }, { "epoch": 3.756908444368279, "grad_norm": 5.346771240234375, "learning_rate": 1.4523760497864025e-05, "loss": 2.0142555236816406, "memory(GiB)": 72.85, "step": 87690, "token_acc": 0.5283018867924528, "train_speed(iter/s)": 0.672735 }, { "epoch": 3.757122659697528, "grad_norm": 5.0697855949401855, "learning_rate": 1.4519018479551705e-05, "loss": 1.951494026184082, "memory(GiB)": 72.85, "step": 87695, "token_acc": 0.5601374570446735, "train_speed(iter/s)": 0.672738 }, { "epoch": 3.757336875026777, "grad_norm": 5.269640922546387, "learning_rate": 1.4514277104010648e-05, "loss": 2.1771213531494142, "memory(GiB)": 72.85, "step": 87700, "token_acc": 0.5862068965517241, "train_speed(iter/s)": 0.672739 }, { "epoch": 3.757551090356026, "grad_norm": 8.668744087219238, "learning_rate": 1.4509536371326754e-05, "loss": 2.123533248901367, "memory(GiB)": 72.85, "step": 87705, "token_acc": 0.5275862068965518, "train_speed(iter/s)": 0.672749 }, { "epoch": 3.757765305685275, "grad_norm": 6.854020118713379, "learning_rate": 1.4504796281585898e-05, "loss": 2.365604782104492, "memory(GiB)": 72.85, "step": 87710, "token_acc": 0.5302491103202847, "train_speed(iter/s)": 0.672747 }, { "epoch": 3.7579795210145237, "grad_norm": 4.566987991333008, "learning_rate": 1.4500056834873937e-05, "loss": 2.093144989013672, "memory(GiB)": 72.85, "step": 87715, "token_acc": 0.5501618122977346, "train_speed(iter/s)": 0.672749 }, { "epoch": 3.758193736343773, "grad_norm": 4.809406280517578, "learning_rate": 1.4495318031276767e-05, "loss": 2.01114444732666, "memory(GiB)": 72.85, "step": 87720, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.672755 }, { "epoch": 3.7584079516730218, "grad_norm": 7.208446979522705, "learning_rate": 1.4490579870880217e-05, "loss": 2.386224365234375, "memory(GiB)": 72.85, "step": 87725, "token_acc": 0.4813664596273292, "train_speed(iter/s)": 0.672754 }, { "epoch": 3.7586221670022706, "grad_norm": 7.011614799499512, "learning_rate": 1.4485842353770118e-05, "loss": 1.8396196365356445, "memory(GiB)": 72.85, "step": 87730, "token_acc": 0.6008771929824561, "train_speed(iter/s)": 0.67276 }, { "epoch": 3.75883638233152, "grad_norm": 5.954864501953125, "learning_rate": 1.4481105480032304e-05, "loss": 2.195003128051758, "memory(GiB)": 72.85, "step": 87735, "token_acc": 0.5494880546075085, "train_speed(iter/s)": 0.672757 }, { "epoch": 3.7590505976607687, "grad_norm": 5.276615619659424, "learning_rate": 1.4476369249752587e-05, "loss": 2.1163768768310547, "memory(GiB)": 72.85, "step": 87740, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.672753 }, { "epoch": 3.7592648129900175, "grad_norm": 5.927802085876465, "learning_rate": 1.4471633663016743e-05, "loss": 2.0130935668945313, "memory(GiB)": 72.85, "step": 87745, "token_acc": 0.5627240143369175, "train_speed(iter/s)": 0.672761 }, { "epoch": 3.7594790283192667, "grad_norm": 5.7224602699279785, "learning_rate": 1.4466898719910598e-05, "loss": 2.2687559127807617, "memory(GiB)": 72.85, "step": 87750, "token_acc": 0.4982456140350877, "train_speed(iter/s)": 0.672768 }, { "epoch": 3.7596932436485155, "grad_norm": 4.192924976348877, "learning_rate": 1.4462164420519918e-05, "loss": 1.9255636215209961, "memory(GiB)": 72.85, "step": 87755, "token_acc": 0.578125, "train_speed(iter/s)": 0.672771 }, { "epoch": 3.7599074589777643, "grad_norm": 5.299386501312256, "learning_rate": 1.4457430764930452e-05, "loss": 2.1829082489013674, "memory(GiB)": 72.85, "step": 87760, "token_acc": 0.5108359133126935, "train_speed(iter/s)": 0.67277 }, { "epoch": 3.7601216743070136, "grad_norm": 5.3845534324646, "learning_rate": 1.445269775322799e-05, "loss": 2.332686996459961, "memory(GiB)": 72.85, "step": 87765, "token_acc": 0.532967032967033, "train_speed(iter/s)": 0.672763 }, { "epoch": 3.7603358896362624, "grad_norm": 5.102795600891113, "learning_rate": 1.4447965385498247e-05, "loss": 1.8263210296630858, "memory(GiB)": 72.85, "step": 87770, "token_acc": 0.5480427046263345, "train_speed(iter/s)": 0.672764 }, { "epoch": 3.7605501049655112, "grad_norm": 5.872624397277832, "learning_rate": 1.444323366182695e-05, "loss": 2.06967716217041, "memory(GiB)": 72.85, "step": 87775, "token_acc": 0.5255972696245734, "train_speed(iter/s)": 0.672771 }, { "epoch": 3.7607643202947605, "grad_norm": 5.367947101593018, "learning_rate": 1.443850258229984e-05, "loss": 1.811208152770996, "memory(GiB)": 72.85, "step": 87780, "token_acc": 0.5738831615120275, "train_speed(iter/s)": 0.672783 }, { "epoch": 3.7609785356240093, "grad_norm": 6.276507377624512, "learning_rate": 1.4433772147002623e-05, "loss": 1.9610036849975585, "memory(GiB)": 72.85, "step": 87785, "token_acc": 0.5314685314685315, "train_speed(iter/s)": 0.672777 }, { "epoch": 3.761192750953258, "grad_norm": 5.3644280433654785, "learning_rate": 1.4429042356020977e-05, "loss": 2.205185317993164, "memory(GiB)": 72.85, "step": 87790, "token_acc": 0.5049180327868853, "train_speed(iter/s)": 0.672773 }, { "epoch": 3.7614069662825074, "grad_norm": 7.21012020111084, "learning_rate": 1.4424313209440605e-05, "loss": 2.1051050186157227, "memory(GiB)": 72.85, "step": 87795, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.672775 }, { "epoch": 3.761621181611756, "grad_norm": 5.118549823760986, "learning_rate": 1.4419584707347156e-05, "loss": 2.102894401550293, "memory(GiB)": 72.85, "step": 87800, "token_acc": 0.5117845117845118, "train_speed(iter/s)": 0.672759 }, { "epoch": 3.761835396941005, "grad_norm": 6.7156877517700195, "learning_rate": 1.4414856849826319e-05, "loss": 2.373745346069336, "memory(GiB)": 72.85, "step": 87805, "token_acc": 0.49295774647887325, "train_speed(iter/s)": 0.672751 }, { "epoch": 3.7620496122702543, "grad_norm": 4.5625200271606445, "learning_rate": 1.4410129636963737e-05, "loss": 1.9101554870605468, "memory(GiB)": 72.85, "step": 87810, "token_acc": 0.5581395348837209, "train_speed(iter/s)": 0.672749 }, { "epoch": 3.762263827599503, "grad_norm": 5.001140117645264, "learning_rate": 1.4405403068845036e-05, "loss": 2.181953239440918, "memory(GiB)": 72.85, "step": 87815, "token_acc": 0.5150602409638554, "train_speed(iter/s)": 0.672753 }, { "epoch": 3.762478042928752, "grad_norm": 5.0919671058654785, "learning_rate": 1.4400677145555857e-05, "loss": 2.5478986740112304, "memory(GiB)": 72.85, "step": 87820, "token_acc": 0.4899135446685879, "train_speed(iter/s)": 0.67276 }, { "epoch": 3.762692258258001, "grad_norm": 5.745369911193848, "learning_rate": 1.4395951867181801e-05, "loss": 2.1687854766845702, "memory(GiB)": 72.85, "step": 87825, "token_acc": 0.5191256830601093, "train_speed(iter/s)": 0.672767 }, { "epoch": 3.76290647358725, "grad_norm": 6.8374924659729, "learning_rate": 1.4391227233808457e-05, "loss": 2.0852237701416017, "memory(GiB)": 72.85, "step": 87830, "token_acc": 0.5679442508710801, "train_speed(iter/s)": 0.672765 }, { "epoch": 3.7631206889164988, "grad_norm": 4.037143230438232, "learning_rate": 1.4386503245521443e-05, "loss": 2.1808473587036135, "memory(GiB)": 72.85, "step": 87835, "token_acc": 0.5105740181268882, "train_speed(iter/s)": 0.672769 }, { "epoch": 3.763334904245748, "grad_norm": 4.852779388427734, "learning_rate": 1.4381779902406345e-05, "loss": 2.2255544662475586, "memory(GiB)": 72.85, "step": 87840, "token_acc": 0.5308641975308642, "train_speed(iter/s)": 0.672766 }, { "epoch": 3.763549119574997, "grad_norm": 5.813396453857422, "learning_rate": 1.4377057204548717e-05, "loss": 2.178109359741211, "memory(GiB)": 72.85, "step": 87845, "token_acc": 0.5604026845637584, "train_speed(iter/s)": 0.672765 }, { "epoch": 3.7637633349042456, "grad_norm": 5.283553123474121, "learning_rate": 1.4372335152034112e-05, "loss": 2.1151992797851564, "memory(GiB)": 72.85, "step": 87850, "token_acc": 0.565359477124183, "train_speed(iter/s)": 0.672766 }, { "epoch": 3.763977550233495, "grad_norm": 4.434414863586426, "learning_rate": 1.4367613744948077e-05, "loss": 2.2267921447753904, "memory(GiB)": 72.85, "step": 87855, "token_acc": 0.542319749216301, "train_speed(iter/s)": 0.672775 }, { "epoch": 3.7641917655627437, "grad_norm": 6.2267961502075195, "learning_rate": 1.436289298337613e-05, "loss": 2.0281614303588866, "memory(GiB)": 72.85, "step": 87860, "token_acc": 0.5592105263157895, "train_speed(iter/s)": 0.67277 }, { "epoch": 3.7644059808919925, "grad_norm": 7.941826343536377, "learning_rate": 1.4358172867403818e-05, "loss": 2.3305389404296877, "memory(GiB)": 72.85, "step": 87865, "token_acc": 0.5179153094462541, "train_speed(iter/s)": 0.672774 }, { "epoch": 3.7646201962212418, "grad_norm": 5.632295608520508, "learning_rate": 1.4353453397116646e-05, "loss": 2.053472900390625, "memory(GiB)": 72.85, "step": 87870, "token_acc": 0.5547169811320755, "train_speed(iter/s)": 0.672779 }, { "epoch": 3.7648344115504906, "grad_norm": 5.278420925140381, "learning_rate": 1.4348734572600103e-05, "loss": 1.9858049392700194, "memory(GiB)": 72.85, "step": 87875, "token_acc": 0.5918367346938775, "train_speed(iter/s)": 0.67278 }, { "epoch": 3.7650486268797394, "grad_norm": 6.601995468139648, "learning_rate": 1.434401639393968e-05, "loss": 2.296028900146484, "memory(GiB)": 72.85, "step": 87880, "token_acc": 0.5559322033898305, "train_speed(iter/s)": 0.672793 }, { "epoch": 3.7652628422089887, "grad_norm": 4.532019138336182, "learning_rate": 1.433929886122084e-05, "loss": 2.014337158203125, "memory(GiB)": 72.85, "step": 87885, "token_acc": 0.555984555984556, "train_speed(iter/s)": 0.672791 }, { "epoch": 3.7654770575382375, "grad_norm": 5.7712082862854, "learning_rate": 1.4334581974529044e-05, "loss": 1.8977926254272461, "memory(GiB)": 72.85, "step": 87890, "token_acc": 0.5865724381625441, "train_speed(iter/s)": 0.672788 }, { "epoch": 3.7656912728674863, "grad_norm": 5.79408597946167, "learning_rate": 1.4329865733949765e-05, "loss": 2.326773452758789, "memory(GiB)": 72.85, "step": 87895, "token_acc": 0.4880546075085324, "train_speed(iter/s)": 0.672795 }, { "epoch": 3.7659054881967355, "grad_norm": 7.415910243988037, "learning_rate": 1.4325150139568432e-05, "loss": 2.283965492248535, "memory(GiB)": 72.85, "step": 87900, "token_acc": 0.5380116959064327, "train_speed(iter/s)": 0.672798 }, { "epoch": 3.7661197035259844, "grad_norm": 5.798778533935547, "learning_rate": 1.4320435191470471e-05, "loss": 2.338883972167969, "memory(GiB)": 72.85, "step": 87905, "token_acc": 0.5329153605015674, "train_speed(iter/s)": 0.672796 }, { "epoch": 3.766333918855233, "grad_norm": 7.0617804527282715, "learning_rate": 1.4315720889741279e-05, "loss": 2.118139457702637, "memory(GiB)": 72.85, "step": 87910, "token_acc": 0.5377358490566038, "train_speed(iter/s)": 0.672789 }, { "epoch": 3.7665481341844824, "grad_norm": 6.105551719665527, "learning_rate": 1.4311007234466296e-05, "loss": 2.037609100341797, "memory(GiB)": 72.85, "step": 87915, "token_acc": 0.5440613026819924, "train_speed(iter/s)": 0.672792 }, { "epoch": 3.7667623495137312, "grad_norm": 7.747371196746826, "learning_rate": 1.4306294225730876e-05, "loss": 1.9608402252197266, "memory(GiB)": 72.85, "step": 87920, "token_acc": 0.5639097744360902, "train_speed(iter/s)": 0.672792 }, { "epoch": 3.76697656484298, "grad_norm": 4.368068218231201, "learning_rate": 1.4301581863620434e-05, "loss": 2.280166244506836, "memory(GiB)": 72.85, "step": 87925, "token_acc": 0.501628664495114, "train_speed(iter/s)": 0.672794 }, { "epoch": 3.7671907801722293, "grad_norm": 6.851829528808594, "learning_rate": 1.4296870148220332e-05, "loss": 2.2179437637329102, "memory(GiB)": 72.85, "step": 87930, "token_acc": 0.5161290322580645, "train_speed(iter/s)": 0.672794 }, { "epoch": 3.767404995501478, "grad_norm": 6.134130001068115, "learning_rate": 1.4292159079615913e-05, "loss": 2.0479331970214845, "memory(GiB)": 72.85, "step": 87935, "token_acc": 0.5670103092783505, "train_speed(iter/s)": 0.672796 }, { "epoch": 3.767619210830727, "grad_norm": 4.504811763763428, "learning_rate": 1.4287448657892528e-05, "loss": 2.089069366455078, "memory(GiB)": 72.85, "step": 87940, "token_acc": 0.5178571428571429, "train_speed(iter/s)": 0.6728 }, { "epoch": 3.767833426159976, "grad_norm": 5.173920154571533, "learning_rate": 1.42827388831355e-05, "loss": 2.209042549133301, "memory(GiB)": 72.85, "step": 87945, "token_acc": 0.5, "train_speed(iter/s)": 0.672805 }, { "epoch": 3.768047641489225, "grad_norm": 9.10322380065918, "learning_rate": 1.4278029755430178e-05, "loss": 1.9821325302124024, "memory(GiB)": 72.85, "step": 87950, "token_acc": 0.5405405405405406, "train_speed(iter/s)": 0.672811 }, { "epoch": 3.768261856818474, "grad_norm": 4.285072326660156, "learning_rate": 1.4273321274861856e-05, "loss": 2.064036178588867, "memory(GiB)": 72.85, "step": 87955, "token_acc": 0.5382165605095541, "train_speed(iter/s)": 0.67281 }, { "epoch": 3.768476072147723, "grad_norm": 6.6969828605651855, "learning_rate": 1.4268613441515832e-05, "loss": 2.0615249633789063, "memory(GiB)": 72.85, "step": 87960, "token_acc": 0.5551839464882943, "train_speed(iter/s)": 0.672814 }, { "epoch": 3.768690287476972, "grad_norm": 4.6540069580078125, "learning_rate": 1.4263906255477394e-05, "loss": 1.8904626846313477, "memory(GiB)": 72.85, "step": 87965, "token_acc": 0.5964912280701754, "train_speed(iter/s)": 0.672814 }, { "epoch": 3.7689045028062207, "grad_norm": 5.344182014465332, "learning_rate": 1.4259199716831823e-05, "loss": 2.3037023544311523, "memory(GiB)": 72.85, "step": 87970, "token_acc": 0.528395061728395, "train_speed(iter/s)": 0.672806 }, { "epoch": 3.76911871813547, "grad_norm": 5.7752838134765625, "learning_rate": 1.4254493825664355e-05, "loss": 1.8153038024902344, "memory(GiB)": 72.85, "step": 87975, "token_acc": 0.5809128630705395, "train_speed(iter/s)": 0.672805 }, { "epoch": 3.7693329334647188, "grad_norm": 6.613818168640137, "learning_rate": 1.424978858206028e-05, "loss": 2.019005012512207, "memory(GiB)": 72.85, "step": 87980, "token_acc": 0.5693950177935944, "train_speed(iter/s)": 0.672809 }, { "epoch": 3.7695471487939676, "grad_norm": 4.41210412979126, "learning_rate": 1.4245083986104807e-05, "loss": 2.2156888961791994, "memory(GiB)": 72.85, "step": 87985, "token_acc": 0.5260115606936416, "train_speed(iter/s)": 0.672797 }, { "epoch": 3.769761364123217, "grad_norm": 7.1595611572265625, "learning_rate": 1.4240380037883195e-05, "loss": 2.0144643783569336, "memory(GiB)": 72.85, "step": 87990, "token_acc": 0.5259515570934256, "train_speed(iter/s)": 0.672797 }, { "epoch": 3.7699755794524656, "grad_norm": 6.344911098480225, "learning_rate": 1.4235676737480641e-05, "loss": 2.266628646850586, "memory(GiB)": 72.85, "step": 87995, "token_acc": 0.5055762081784386, "train_speed(iter/s)": 0.672796 }, { "epoch": 3.7701897947817145, "grad_norm": 7.386858940124512, "learning_rate": 1.4230974084982357e-05, "loss": 1.9599117279052733, "memory(GiB)": 72.85, "step": 88000, "token_acc": 0.5593869731800766, "train_speed(iter/s)": 0.672804 }, { "epoch": 3.7701897947817145, "eval_loss": 2.1948728561401367, "eval_runtime": 14.764, "eval_samples_per_second": 6.773, "eval_steps_per_second": 6.773, "eval_token_acc": 0.4767879548306148, "step": 88000 }, { "epoch": 3.7704040101109637, "grad_norm": 5.486996650695801, "learning_rate": 1.4226272080473513e-05, "loss": 2.009702682495117, "memory(GiB)": 72.85, "step": 88005, "token_acc": 0.4939309056956116, "train_speed(iter/s)": 0.67272 }, { "epoch": 3.7706182254402125, "grad_norm": 5.39966344833374, "learning_rate": 1.4221570724039324e-05, "loss": 2.2513191223144533, "memory(GiB)": 72.85, "step": 88010, "token_acc": 0.5303030303030303, "train_speed(iter/s)": 0.672729 }, { "epoch": 3.7708324407694613, "grad_norm": 5.244326114654541, "learning_rate": 1.4216870015764944e-05, "loss": 2.1638248443603514, "memory(GiB)": 72.85, "step": 88015, "token_acc": 0.527027027027027, "train_speed(iter/s)": 0.672733 }, { "epoch": 3.7710466560987106, "grad_norm": 5.935708522796631, "learning_rate": 1.4212169955735527e-05, "loss": 1.9837633132934571, "memory(GiB)": 72.85, "step": 88020, "token_acc": 0.5544217687074829, "train_speed(iter/s)": 0.672723 }, { "epoch": 3.7712608714279594, "grad_norm": 6.424587249755859, "learning_rate": 1.4207470544036228e-05, "loss": 2.1491958618164064, "memory(GiB)": 72.85, "step": 88025, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.67273 }, { "epoch": 3.771475086757208, "grad_norm": 5.713136196136475, "learning_rate": 1.4202771780752178e-05, "loss": 2.2254432678222655, "memory(GiB)": 72.85, "step": 88030, "token_acc": 0.48589341692789967, "train_speed(iter/s)": 0.672738 }, { "epoch": 3.7716893020864575, "grad_norm": 4.982446670532227, "learning_rate": 1.419807366596848e-05, "loss": 2.0318416595458983, "memory(GiB)": 72.85, "step": 88035, "token_acc": 0.5225563909774437, "train_speed(iter/s)": 0.672741 }, { "epoch": 3.7719035174157063, "grad_norm": 6.2254252433776855, "learning_rate": 1.4193376199770276e-05, "loss": 1.9654909133911134, "memory(GiB)": 72.85, "step": 88040, "token_acc": 0.5749235474006116, "train_speed(iter/s)": 0.672739 }, { "epoch": 3.772117732744955, "grad_norm": 6.480680465698242, "learning_rate": 1.4188679382242653e-05, "loss": 2.2901357650756835, "memory(GiB)": 72.85, "step": 88045, "token_acc": 0.5389408099688473, "train_speed(iter/s)": 0.67273 }, { "epoch": 3.7723319480742044, "grad_norm": 4.90813684463501, "learning_rate": 1.4183983213470697e-05, "loss": 2.0926652908325196, "memory(GiB)": 72.85, "step": 88050, "token_acc": 0.5309090909090909, "train_speed(iter/s)": 0.672726 }, { "epoch": 3.772546163403453, "grad_norm": 10.51826000213623, "learning_rate": 1.4179287693539467e-05, "loss": 2.1972820281982424, "memory(GiB)": 72.85, "step": 88055, "token_acc": 0.563573883161512, "train_speed(iter/s)": 0.672722 }, { "epoch": 3.772760378732702, "grad_norm": 4.899564743041992, "learning_rate": 1.417459282253406e-05, "loss": 2.3059154510498048, "memory(GiB)": 72.85, "step": 88060, "token_acc": 0.46689895470383275, "train_speed(iter/s)": 0.672728 }, { "epoch": 3.7729745940619512, "grad_norm": 6.806473255157471, "learning_rate": 1.4169898600539494e-05, "loss": 2.450748825073242, "memory(GiB)": 72.85, "step": 88065, "token_acc": 0.47950819672131145, "train_speed(iter/s)": 0.67273 }, { "epoch": 3.7731888093912, "grad_norm": 8.122628211975098, "learning_rate": 1.4165205027640843e-05, "loss": 2.078950881958008, "memory(GiB)": 72.85, "step": 88070, "token_acc": 0.5451127819548872, "train_speed(iter/s)": 0.672735 }, { "epoch": 3.773403024720449, "grad_norm": 5.081286907196045, "learning_rate": 1.4160512103923118e-05, "loss": 2.284930419921875, "memory(GiB)": 72.85, "step": 88075, "token_acc": 0.4875, "train_speed(iter/s)": 0.672741 }, { "epoch": 3.773617240049698, "grad_norm": 8.874213218688965, "learning_rate": 1.415581982947134e-05, "loss": 2.3818031311035157, "memory(GiB)": 72.85, "step": 88080, "token_acc": 0.47854785478547857, "train_speed(iter/s)": 0.672729 }, { "epoch": 3.773831455378947, "grad_norm": 5.318605899810791, "learning_rate": 1.4151128204370506e-05, "loss": 2.017459678649902, "memory(GiB)": 72.85, "step": 88085, "token_acc": 0.5481481481481482, "train_speed(iter/s)": 0.672728 }, { "epoch": 3.7740456707081957, "grad_norm": 6.854396343231201, "learning_rate": 1.4146437228705594e-05, "loss": 2.3330047607421873, "memory(GiB)": 72.85, "step": 88090, "token_acc": 0.4964788732394366, "train_speed(iter/s)": 0.672736 }, { "epoch": 3.774259886037445, "grad_norm": 6.106525897979736, "learning_rate": 1.4141746902561625e-05, "loss": 2.0191320419311523, "memory(GiB)": 72.85, "step": 88095, "token_acc": 0.5271084337349398, "train_speed(iter/s)": 0.67274 }, { "epoch": 3.774474101366694, "grad_norm": 4.640056610107422, "learning_rate": 1.4137057226023554e-05, "loss": 2.1374914169311525, "memory(GiB)": 72.85, "step": 88100, "token_acc": 0.5436893203883495, "train_speed(iter/s)": 0.672742 }, { "epoch": 3.7746883166959426, "grad_norm": 8.126107215881348, "learning_rate": 1.4132368199176328e-05, "loss": 2.1445871353149415, "memory(GiB)": 72.85, "step": 88105, "token_acc": 0.5335570469798657, "train_speed(iter/s)": 0.672736 }, { "epoch": 3.774902532025192, "grad_norm": 4.950556755065918, "learning_rate": 1.4127679822104894e-05, "loss": 2.19653205871582, "memory(GiB)": 72.85, "step": 88110, "token_acc": 0.5470383275261324, "train_speed(iter/s)": 0.672731 }, { "epoch": 3.7751167473544407, "grad_norm": 8.3357572555542, "learning_rate": 1.41229920948942e-05, "loss": 2.350978660583496, "memory(GiB)": 72.85, "step": 88115, "token_acc": 0.5161290322580645, "train_speed(iter/s)": 0.672726 }, { "epoch": 3.7753309626836895, "grad_norm": 8.899473190307617, "learning_rate": 1.4118305017629141e-05, "loss": 2.2613386154174804, "memory(GiB)": 72.85, "step": 88120, "token_acc": 0.5, "train_speed(iter/s)": 0.672732 }, { "epoch": 3.7755451780129388, "grad_norm": 4.71587610244751, "learning_rate": 1.4113618590394655e-05, "loss": 2.0491146087646483, "memory(GiB)": 72.85, "step": 88125, "token_acc": 0.5414364640883977, "train_speed(iter/s)": 0.672737 }, { "epoch": 3.7757593933421876, "grad_norm": 8.29793643951416, "learning_rate": 1.4108932813275627e-05, "loss": 2.3988826751708983, "memory(GiB)": 72.85, "step": 88130, "token_acc": 0.492, "train_speed(iter/s)": 0.672733 }, { "epoch": 3.7759736086714364, "grad_norm": 5.9319071769714355, "learning_rate": 1.4104247686356958e-05, "loss": 2.1414928436279297, "memory(GiB)": 72.85, "step": 88135, "token_acc": 0.4867549668874172, "train_speed(iter/s)": 0.672722 }, { "epoch": 3.7761878240006856, "grad_norm": 6.020059108734131, "learning_rate": 1.4099563209723521e-05, "loss": 2.142596435546875, "memory(GiB)": 72.85, "step": 88140, "token_acc": 0.54421768707483, "train_speed(iter/s)": 0.672725 }, { "epoch": 3.7764020393299345, "grad_norm": 5.262325286865234, "learning_rate": 1.4094879383460169e-05, "loss": 2.3795217514038085, "memory(GiB)": 72.85, "step": 88145, "token_acc": 0.5125348189415042, "train_speed(iter/s)": 0.672732 }, { "epoch": 3.7766162546591833, "grad_norm": 4.62454891204834, "learning_rate": 1.4090196207651745e-05, "loss": 2.168730354309082, "memory(GiB)": 72.85, "step": 88150, "token_acc": 0.5051546391752577, "train_speed(iter/s)": 0.672724 }, { "epoch": 3.7768304699884325, "grad_norm": 11.241645812988281, "learning_rate": 1.4085513682383117e-05, "loss": 1.9785297393798829, "memory(GiB)": 72.85, "step": 88155, "token_acc": 0.554006968641115, "train_speed(iter/s)": 0.672724 }, { "epoch": 3.7770446853176813, "grad_norm": 7.26413631439209, "learning_rate": 1.4080831807739103e-05, "loss": 2.3162521362304687, "memory(GiB)": 72.85, "step": 88160, "token_acc": 0.4785714285714286, "train_speed(iter/s)": 0.672726 }, { "epoch": 3.77725890064693, "grad_norm": 7.610813140869141, "learning_rate": 1.4076150583804515e-05, "loss": 1.951904296875, "memory(GiB)": 72.85, "step": 88165, "token_acc": 0.5526315789473685, "train_speed(iter/s)": 0.672734 }, { "epoch": 3.7774731159761794, "grad_norm": 4.744349956512451, "learning_rate": 1.4071470010664157e-05, "loss": 2.5049741744995115, "memory(GiB)": 72.85, "step": 88170, "token_acc": 0.47058823529411764, "train_speed(iter/s)": 0.67274 }, { "epoch": 3.777687331305428, "grad_norm": 4.777798652648926, "learning_rate": 1.4066790088402821e-05, "loss": 2.1526552200317384, "memory(GiB)": 72.85, "step": 88175, "token_acc": 0.5362318840579711, "train_speed(iter/s)": 0.672745 }, { "epoch": 3.777901546634677, "grad_norm": 5.227078914642334, "learning_rate": 1.4062110817105279e-05, "loss": 2.0835903167724608, "memory(GiB)": 72.85, "step": 88180, "token_acc": 0.532258064516129, "train_speed(iter/s)": 0.672745 }, { "epoch": 3.7781157619639263, "grad_norm": 4.7095947265625, "learning_rate": 1.4057432196856325e-05, "loss": 1.994652557373047, "memory(GiB)": 72.85, "step": 88185, "token_acc": 0.555921052631579, "train_speed(iter/s)": 0.672746 }, { "epoch": 3.778329977293175, "grad_norm": 5.268409729003906, "learning_rate": 1.4052754227740706e-05, "loss": 2.217696189880371, "memory(GiB)": 72.85, "step": 88190, "token_acc": 0.48936170212765956, "train_speed(iter/s)": 0.672746 }, { "epoch": 3.778544192622424, "grad_norm": 7.104170799255371, "learning_rate": 1.404807690984316e-05, "loss": 2.1085948944091797, "memory(GiB)": 72.85, "step": 88195, "token_acc": 0.5163398692810458, "train_speed(iter/s)": 0.67274 }, { "epoch": 3.778758407951673, "grad_norm": 7.698678493499756, "learning_rate": 1.4043400243248433e-05, "loss": 2.0460765838623045, "memory(GiB)": 72.85, "step": 88200, "token_acc": 0.5267175572519084, "train_speed(iter/s)": 0.672745 }, { "epoch": 3.778972623280922, "grad_norm": 6.251504421234131, "learning_rate": 1.4038724228041223e-05, "loss": 1.9279962539672852, "memory(GiB)": 72.85, "step": 88205, "token_acc": 0.5671641791044776, "train_speed(iter/s)": 0.672738 }, { "epoch": 3.779186838610171, "grad_norm": 6.572375774383545, "learning_rate": 1.4034048864306254e-05, "loss": 2.1873334884643554, "memory(GiB)": 72.85, "step": 88210, "token_acc": 0.4927536231884058, "train_speed(iter/s)": 0.672745 }, { "epoch": 3.77940105393942, "grad_norm": 7.48480749130249, "learning_rate": 1.402937415212825e-05, "loss": 2.0888248443603517, "memory(GiB)": 72.85, "step": 88215, "token_acc": 0.5390070921985816, "train_speed(iter/s)": 0.672749 }, { "epoch": 3.779615269268669, "grad_norm": 4.681337356567383, "learning_rate": 1.402470009159187e-05, "loss": 1.9683393478393554, "memory(GiB)": 72.85, "step": 88220, "token_acc": 0.5377049180327869, "train_speed(iter/s)": 0.672748 }, { "epoch": 3.7798294845979177, "grad_norm": 5.292003631591797, "learning_rate": 1.4020026682781801e-05, "loss": 1.81143798828125, "memory(GiB)": 72.85, "step": 88225, "token_acc": 0.5773584905660377, "train_speed(iter/s)": 0.672744 }, { "epoch": 3.780043699927167, "grad_norm": 7.629693031311035, "learning_rate": 1.4015353925782698e-05, "loss": 2.1220726013183593, "memory(GiB)": 72.85, "step": 88230, "token_acc": 0.5513307984790875, "train_speed(iter/s)": 0.672754 }, { "epoch": 3.7802579152564157, "grad_norm": 6.733371734619141, "learning_rate": 1.40106818206792e-05, "loss": 2.2027034759521484, "memory(GiB)": 72.85, "step": 88235, "token_acc": 0.5183946488294314, "train_speed(iter/s)": 0.672758 }, { "epoch": 3.7804721305856646, "grad_norm": 6.048663139343262, "learning_rate": 1.4006010367555977e-05, "loss": 2.270703887939453, "memory(GiB)": 72.85, "step": 88240, "token_acc": 0.5361216730038023, "train_speed(iter/s)": 0.672768 }, { "epoch": 3.780686345914914, "grad_norm": 9.191351890563965, "learning_rate": 1.4001339566497645e-05, "loss": 2.1681402206420897, "memory(GiB)": 72.85, "step": 88245, "token_acc": 0.5505226480836237, "train_speed(iter/s)": 0.672773 }, { "epoch": 3.7809005612441626, "grad_norm": 5.6402153968811035, "learning_rate": 1.399666941758881e-05, "loss": 2.0876379013061523, "memory(GiB)": 72.85, "step": 88250, "token_acc": 0.4884488448844885, "train_speed(iter/s)": 0.67277 }, { "epoch": 3.7811147765734114, "grad_norm": 5.7973246574401855, "learning_rate": 1.3991999920914085e-05, "loss": 2.226209259033203, "memory(GiB)": 72.85, "step": 88255, "token_acc": 0.5298013245033113, "train_speed(iter/s)": 0.672774 }, { "epoch": 3.7813289919026607, "grad_norm": 5.893048286437988, "learning_rate": 1.3987331076558057e-05, "loss": 2.008193016052246, "memory(GiB)": 72.85, "step": 88260, "token_acc": 0.5408163265306123, "train_speed(iter/s)": 0.672767 }, { "epoch": 3.7815432072319095, "grad_norm": 7.807822227478027, "learning_rate": 1.3982662884605303e-05, "loss": 2.2362274169921874, "memory(GiB)": 72.85, "step": 88265, "token_acc": 0.48773006134969327, "train_speed(iter/s)": 0.672772 }, { "epoch": 3.7817574225611583, "grad_norm": 6.69459867477417, "learning_rate": 1.3977995345140405e-05, "loss": 2.23254508972168, "memory(GiB)": 72.85, "step": 88270, "token_acc": 0.5078125, "train_speed(iter/s)": 0.672771 }, { "epoch": 3.7819716378904076, "grad_norm": 4.737234115600586, "learning_rate": 1.3973328458247913e-05, "loss": 2.1008155822753904, "memory(GiB)": 72.85, "step": 88275, "token_acc": 0.5642633228840125, "train_speed(iter/s)": 0.672772 }, { "epoch": 3.7821858532196564, "grad_norm": 8.265521049499512, "learning_rate": 1.3968662224012357e-05, "loss": 2.4545488357543945, "memory(GiB)": 72.85, "step": 88280, "token_acc": 0.5302593659942363, "train_speed(iter/s)": 0.672776 }, { "epoch": 3.782400068548905, "grad_norm": 5.562351226806641, "learning_rate": 1.39639966425183e-05, "loss": 2.028907585144043, "memory(GiB)": 72.85, "step": 88285, "token_acc": 0.5669291338582677, "train_speed(iter/s)": 0.672786 }, { "epoch": 3.7826142838781545, "grad_norm": 6.166532516479492, "learning_rate": 1.3959331713850254e-05, "loss": 2.030270004272461, "memory(GiB)": 72.85, "step": 88290, "token_acc": 0.573943661971831, "train_speed(iter/s)": 0.67279 }, { "epoch": 3.7828284992074033, "grad_norm": 4.514156818389893, "learning_rate": 1.39546674380927e-05, "loss": 2.14709587097168, "memory(GiB)": 72.85, "step": 88295, "token_acc": 0.5159010600706714, "train_speed(iter/s)": 0.672798 }, { "epoch": 3.783042714536652, "grad_norm": 10.540074348449707, "learning_rate": 1.3950003815330176e-05, "loss": 1.990517807006836, "memory(GiB)": 72.85, "step": 88300, "token_acc": 0.5859375, "train_speed(iter/s)": 0.672788 }, { "epoch": 3.7832569298659013, "grad_norm": 4.8163886070251465, "learning_rate": 1.3945340845647153e-05, "loss": 2.1179122924804688, "memory(GiB)": 72.85, "step": 88305, "token_acc": 0.5266457680250783, "train_speed(iter/s)": 0.672782 }, { "epoch": 3.78347114519515, "grad_norm": 5.6246819496154785, "learning_rate": 1.3940678529128104e-05, "loss": 2.2132476806640624, "memory(GiB)": 72.85, "step": 88310, "token_acc": 0.5224913494809689, "train_speed(iter/s)": 0.67278 }, { "epoch": 3.783685360524399, "grad_norm": 4.261295795440674, "learning_rate": 1.3936016865857483e-05, "loss": 1.9379409790039062, "memory(GiB)": 72.85, "step": 88315, "token_acc": 0.5222222222222223, "train_speed(iter/s)": 0.672785 }, { "epoch": 3.7838995758536482, "grad_norm": 4.882917881011963, "learning_rate": 1.3931355855919753e-05, "loss": 2.1850002288818358, "memory(GiB)": 72.85, "step": 88320, "token_acc": 0.5309090909090909, "train_speed(iter/s)": 0.672784 }, { "epoch": 3.784113791182897, "grad_norm": 5.8479156494140625, "learning_rate": 1.3926695499399328e-05, "loss": 2.1554286956787108, "memory(GiB)": 72.85, "step": 88325, "token_acc": 0.521865889212828, "train_speed(iter/s)": 0.672784 }, { "epoch": 3.784328006512146, "grad_norm": 6.255013942718506, "learning_rate": 1.3922035796380672e-05, "loss": 2.190518379211426, "memory(GiB)": 72.85, "step": 88330, "token_acc": 0.5068493150684932, "train_speed(iter/s)": 0.672793 }, { "epoch": 3.784542221841395, "grad_norm": 5.7757158279418945, "learning_rate": 1.3917376746948179e-05, "loss": 2.2261770248413084, "memory(GiB)": 72.85, "step": 88335, "token_acc": 0.5065359477124183, "train_speed(iter/s)": 0.672794 }, { "epoch": 3.784756437170644, "grad_norm": 8.292828559875488, "learning_rate": 1.3912718351186254e-05, "loss": 2.2785797119140625, "memory(GiB)": 72.85, "step": 88340, "token_acc": 0.5229357798165137, "train_speed(iter/s)": 0.672797 }, { "epoch": 3.7849706524998927, "grad_norm": 4.842984676361084, "learning_rate": 1.3908060609179291e-05, "loss": 2.236455535888672, "memory(GiB)": 72.85, "step": 88345, "token_acc": 0.5481481481481482, "train_speed(iter/s)": 0.67279 }, { "epoch": 3.785184867829142, "grad_norm": 5.165616512298584, "learning_rate": 1.3903403521011654e-05, "loss": 2.0493289947509767, "memory(GiB)": 72.85, "step": 88350, "token_acc": 0.5571955719557196, "train_speed(iter/s)": 0.672796 }, { "epoch": 3.785399083158391, "grad_norm": 7.017091274261475, "learning_rate": 1.3898747086767715e-05, "loss": 2.188358688354492, "memory(GiB)": 72.85, "step": 88355, "token_acc": 0.49174917491749176, "train_speed(iter/s)": 0.672793 }, { "epoch": 3.7856132984876396, "grad_norm": 7.658354759216309, "learning_rate": 1.3894091306531864e-05, "loss": 2.1304962158203127, "memory(GiB)": 72.85, "step": 88360, "token_acc": 0.4984025559105431, "train_speed(iter/s)": 0.672797 }, { "epoch": 3.785827513816889, "grad_norm": 6.53671407699585, "learning_rate": 1.388943618038841e-05, "loss": 2.2008838653564453, "memory(GiB)": 72.85, "step": 88365, "token_acc": 0.525, "train_speed(iter/s)": 0.672791 }, { "epoch": 3.7860417291461377, "grad_norm": 4.921710968017578, "learning_rate": 1.3884781708421702e-05, "loss": 2.1810024261474608, "memory(GiB)": 72.85, "step": 88370, "token_acc": 0.5592105263157895, "train_speed(iter/s)": 0.672797 }, { "epoch": 3.7862559444753865, "grad_norm": 6.742751598358154, "learning_rate": 1.3880127890716044e-05, "loss": 2.3451366424560547, "memory(GiB)": 72.85, "step": 88375, "token_acc": 0.4749262536873156, "train_speed(iter/s)": 0.672803 }, { "epoch": 3.7864701598046357, "grad_norm": 5.841028690338135, "learning_rate": 1.3875474727355742e-05, "loss": 2.1226356506347654, "memory(GiB)": 72.85, "step": 88380, "token_acc": 0.5335463258785943, "train_speed(iter/s)": 0.672803 }, { "epoch": 3.7866843751338846, "grad_norm": 6.021456241607666, "learning_rate": 1.3870822218425118e-05, "loss": 1.9122602462768554, "memory(GiB)": 72.85, "step": 88385, "token_acc": 0.5882352941176471, "train_speed(iter/s)": 0.672805 }, { "epoch": 3.7868985904631334, "grad_norm": 5.444272518157959, "learning_rate": 1.386617036400844e-05, "loss": 2.2768342971801756, "memory(GiB)": 72.85, "step": 88390, "token_acc": 0.528125, "train_speed(iter/s)": 0.672813 }, { "epoch": 3.7871128057923826, "grad_norm": 4.9107441902160645, "learning_rate": 1.3861519164189984e-05, "loss": 2.005140686035156, "memory(GiB)": 72.85, "step": 88395, "token_acc": 0.5762195121951219, "train_speed(iter/s)": 0.672813 }, { "epoch": 3.7873270211216314, "grad_norm": 7.344789505004883, "learning_rate": 1.385686861905401e-05, "loss": 2.117320251464844, "memory(GiB)": 72.85, "step": 88400, "token_acc": 0.5795454545454546, "train_speed(iter/s)": 0.672818 }, { "epoch": 3.7875412364508803, "grad_norm": 6.303198337554932, "learning_rate": 1.3852218728684762e-05, "loss": 1.9644405364990234, "memory(GiB)": 72.85, "step": 88405, "token_acc": 0.5709219858156028, "train_speed(iter/s)": 0.672811 }, { "epoch": 3.7877554517801295, "grad_norm": 7.231514930725098, "learning_rate": 1.3847569493166468e-05, "loss": 2.326609802246094, "memory(GiB)": 72.85, "step": 88410, "token_acc": 0.48905109489051096, "train_speed(iter/s)": 0.672804 }, { "epoch": 3.7879696671093783, "grad_norm": 8.25492000579834, "learning_rate": 1.384292091258338e-05, "loss": 2.5352067947387695, "memory(GiB)": 72.85, "step": 88415, "token_acc": 0.49458483754512633, "train_speed(iter/s)": 0.672812 }, { "epoch": 3.788183882438627, "grad_norm": 5.905238628387451, "learning_rate": 1.3838272987019702e-05, "loss": 2.2001926422119142, "memory(GiB)": 72.85, "step": 88420, "token_acc": 0.509493670886076, "train_speed(iter/s)": 0.672811 }, { "epoch": 3.7883980977678764, "grad_norm": 6.331000328063965, "learning_rate": 1.3833625716559611e-05, "loss": 2.2601585388183594, "memory(GiB)": 72.85, "step": 88425, "token_acc": 0.5280898876404494, "train_speed(iter/s)": 0.672811 }, { "epoch": 3.788612313097125, "grad_norm": 6.548717021942139, "learning_rate": 1.3828979101287338e-05, "loss": 1.8846475601196289, "memory(GiB)": 72.85, "step": 88430, "token_acc": 0.5276752767527675, "train_speed(iter/s)": 0.672814 }, { "epoch": 3.788826528426374, "grad_norm": 5.403645038604736, "learning_rate": 1.3824333141287033e-05, "loss": 2.092950439453125, "memory(GiB)": 72.85, "step": 88435, "token_acc": 0.527027027027027, "train_speed(iter/s)": 0.672819 }, { "epoch": 3.7890407437556233, "grad_norm": 5.297698497772217, "learning_rate": 1.3819687836642859e-05, "loss": 2.07788028717041, "memory(GiB)": 72.85, "step": 88440, "token_acc": 0.5373665480427047, "train_speed(iter/s)": 0.67282 }, { "epoch": 3.789254959084872, "grad_norm": 4.5448689460754395, "learning_rate": 1.3815043187438993e-05, "loss": 2.175966262817383, "memory(GiB)": 72.85, "step": 88445, "token_acc": 0.515358361774744, "train_speed(iter/s)": 0.672827 }, { "epoch": 3.789469174414121, "grad_norm": 5.658778667449951, "learning_rate": 1.381039919375956e-05, "loss": 2.3012035369873045, "memory(GiB)": 72.85, "step": 88450, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.672831 }, { "epoch": 3.78968338974337, "grad_norm": 4.631224155426025, "learning_rate": 1.3805755855688695e-05, "loss": 1.940648078918457, "memory(GiB)": 72.85, "step": 88455, "token_acc": 0.5551839464882943, "train_speed(iter/s)": 0.672831 }, { "epoch": 3.789897605072619, "grad_norm": 5.190648078918457, "learning_rate": 1.380111317331052e-05, "loss": 2.1908517837524415, "memory(GiB)": 72.85, "step": 88460, "token_acc": 0.5220338983050847, "train_speed(iter/s)": 0.672825 }, { "epoch": 3.790111820401868, "grad_norm": 5.329380035400391, "learning_rate": 1.3796471146709139e-05, "loss": 2.10189208984375, "memory(GiB)": 72.85, "step": 88465, "token_acc": 0.5434083601286174, "train_speed(iter/s)": 0.672824 }, { "epoch": 3.790326035731117, "grad_norm": 8.268646240234375, "learning_rate": 1.3791829775968623e-05, "loss": 1.9921627044677734, "memory(GiB)": 72.85, "step": 88470, "token_acc": 0.5662650602409639, "train_speed(iter/s)": 0.672823 }, { "epoch": 3.790540251060366, "grad_norm": 5.472398281097412, "learning_rate": 1.3787189061173094e-05, "loss": 2.109590530395508, "memory(GiB)": 72.85, "step": 88475, "token_acc": 0.5375, "train_speed(iter/s)": 0.672827 }, { "epoch": 3.7907544663896147, "grad_norm": 5.556617259979248, "learning_rate": 1.3782549002406608e-05, "loss": 2.188581848144531, "memory(GiB)": 72.85, "step": 88480, "token_acc": 0.5316455696202531, "train_speed(iter/s)": 0.672826 }, { "epoch": 3.790968681718864, "grad_norm": 5.472699165344238, "learning_rate": 1.3777909599753219e-05, "loss": 2.040999984741211, "memory(GiB)": 72.85, "step": 88485, "token_acc": 0.5380116959064327, "train_speed(iter/s)": 0.67283 }, { "epoch": 3.7911828970481127, "grad_norm": 5.389126300811768, "learning_rate": 1.3773270853296976e-05, "loss": 1.5843382835388184, "memory(GiB)": 72.85, "step": 88490, "token_acc": 0.6349206349206349, "train_speed(iter/s)": 0.67283 }, { "epoch": 3.7913971123773615, "grad_norm": 5.351783752441406, "learning_rate": 1.3768632763121903e-05, "loss": 1.873215103149414, "memory(GiB)": 72.85, "step": 88495, "token_acc": 0.5703125, "train_speed(iter/s)": 0.672833 }, { "epoch": 3.791611327706611, "grad_norm": 5.9320807456970215, "learning_rate": 1.3763995329312034e-05, "loss": 2.1028881072998047, "memory(GiB)": 72.85, "step": 88500, "token_acc": 0.5597269624573379, "train_speed(iter/s)": 0.672836 }, { "epoch": 3.791611327706611, "eval_loss": 2.0849063396453857, "eval_runtime": 15.8924, "eval_samples_per_second": 6.292, "eval_steps_per_second": 6.292, "eval_token_acc": 0.49560117302052786, "step": 88500 }, { "epoch": 3.7918255430358596, "grad_norm": 4.662487983703613, "learning_rate": 1.3759358551951396e-05, "loss": 2.2375444412231444, "memory(GiB)": 72.85, "step": 88505, "token_acc": 0.5020325203252033, "train_speed(iter/s)": 0.672737 }, { "epoch": 3.7920397583651084, "grad_norm": 6.042744159698486, "learning_rate": 1.3754722431123974e-05, "loss": 2.080859375, "memory(GiB)": 72.85, "step": 88510, "token_acc": 0.5157894736842106, "train_speed(iter/s)": 0.67274 }, { "epoch": 3.7922539736943577, "grad_norm": 4.252504348754883, "learning_rate": 1.3750086966913755e-05, "loss": 1.7223669052124024, "memory(GiB)": 72.85, "step": 88515, "token_acc": 0.5673076923076923, "train_speed(iter/s)": 0.672739 }, { "epoch": 3.7924681890236065, "grad_norm": 6.331970691680908, "learning_rate": 1.374545215940472e-05, "loss": 2.113836097717285, "memory(GiB)": 72.85, "step": 88520, "token_acc": 0.5632911392405063, "train_speed(iter/s)": 0.672741 }, { "epoch": 3.7926824043528553, "grad_norm": 4.414780616760254, "learning_rate": 1.3740818008680812e-05, "loss": 2.3531618118286133, "memory(GiB)": 72.85, "step": 88525, "token_acc": 0.5264797507788161, "train_speed(iter/s)": 0.672745 }, { "epoch": 3.7928966196821046, "grad_norm": 5.1024627685546875, "learning_rate": 1.3736184514826012e-05, "loss": 2.16317195892334, "memory(GiB)": 72.85, "step": 88530, "token_acc": 0.4930555555555556, "train_speed(iter/s)": 0.672741 }, { "epoch": 3.7931108350113534, "grad_norm": 5.624659061431885, "learning_rate": 1.3731551677924248e-05, "loss": 1.9502532958984375, "memory(GiB)": 72.85, "step": 88535, "token_acc": 0.5570469798657718, "train_speed(iter/s)": 0.67275 }, { "epoch": 3.793325050340602, "grad_norm": 5.709303855895996, "learning_rate": 1.3726919498059453e-05, "loss": 1.9452760696411133, "memory(GiB)": 72.85, "step": 88540, "token_acc": 0.5655430711610487, "train_speed(iter/s)": 0.672749 }, { "epoch": 3.7935392656698514, "grad_norm": 5.327815055847168, "learning_rate": 1.3722287975315535e-05, "loss": 2.3583999633789063, "memory(GiB)": 72.85, "step": 88545, "token_acc": 0.5107692307692308, "train_speed(iter/s)": 0.672755 }, { "epoch": 3.7937534809991003, "grad_norm": 6.804324626922607, "learning_rate": 1.37176571097764e-05, "loss": 2.260811614990234, "memory(GiB)": 72.85, "step": 88550, "token_acc": 0.5215827338129496, "train_speed(iter/s)": 0.672762 }, { "epoch": 3.793967696328349, "grad_norm": 6.8277812004089355, "learning_rate": 1.3713026901525928e-05, "loss": 2.2389602661132812, "memory(GiB)": 72.85, "step": 88555, "token_acc": 0.5493421052631579, "train_speed(iter/s)": 0.672767 }, { "epoch": 3.7941819116575983, "grad_norm": 5.72192907333374, "learning_rate": 1.3708397350648027e-05, "loss": 2.327047348022461, "memory(GiB)": 72.85, "step": 88560, "token_acc": 0.4854368932038835, "train_speed(iter/s)": 0.672761 }, { "epoch": 3.794396126986847, "grad_norm": 7.319942474365234, "learning_rate": 1.3703768457226557e-05, "loss": 2.0290449142456053, "memory(GiB)": 72.85, "step": 88565, "token_acc": 0.592, "train_speed(iter/s)": 0.672768 }, { "epoch": 3.794610342316096, "grad_norm": 5.247231960296631, "learning_rate": 1.3699140221345364e-05, "loss": 1.9920429229736327, "memory(GiB)": 72.85, "step": 88570, "token_acc": 0.572347266881029, "train_speed(iter/s)": 0.672759 }, { "epoch": 3.794824557645345, "grad_norm": 5.449360370635986, "learning_rate": 1.3694512643088286e-05, "loss": 2.2271278381347654, "memory(GiB)": 72.85, "step": 88575, "token_acc": 0.5144927536231884, "train_speed(iter/s)": 0.672767 }, { "epoch": 3.795038772974594, "grad_norm": 6.418020248413086, "learning_rate": 1.3689885722539187e-05, "loss": 2.182906150817871, "memory(GiB)": 72.85, "step": 88580, "token_acc": 0.5102040816326531, "train_speed(iter/s)": 0.672767 }, { "epoch": 3.795252988303843, "grad_norm": 5.4881696701049805, "learning_rate": 1.3685259459781853e-05, "loss": 2.006900596618652, "memory(GiB)": 72.85, "step": 88585, "token_acc": 0.5575757575757576, "train_speed(iter/s)": 0.672764 }, { "epoch": 3.795467203633092, "grad_norm": 7.496159553527832, "learning_rate": 1.3680633854900121e-05, "loss": 1.7286426544189453, "memory(GiB)": 72.85, "step": 88590, "token_acc": 0.5657894736842105, "train_speed(iter/s)": 0.672768 }, { "epoch": 3.795681418962341, "grad_norm": 7.428773403167725, "learning_rate": 1.3676008907977784e-05, "loss": 2.325154495239258, "memory(GiB)": 72.85, "step": 88595, "token_acc": 0.5214285714285715, "train_speed(iter/s)": 0.672774 }, { "epoch": 3.7958956342915897, "grad_norm": 5.493962287902832, "learning_rate": 1.3671384619098614e-05, "loss": 2.0966888427734376, "memory(GiB)": 72.85, "step": 88600, "token_acc": 0.5236363636363637, "train_speed(iter/s)": 0.672778 }, { "epoch": 3.796109849620839, "grad_norm": 8.639824867248535, "learning_rate": 1.3666760988346395e-05, "loss": 2.2174510955810547, "memory(GiB)": 72.85, "step": 88605, "token_acc": 0.514018691588785, "train_speed(iter/s)": 0.672786 }, { "epoch": 3.796324064950088, "grad_norm": 4.102678298950195, "learning_rate": 1.3662138015804865e-05, "loss": 2.2546096801757813, "memory(GiB)": 72.85, "step": 88610, "token_acc": 0.5308641975308642, "train_speed(iter/s)": 0.672783 }, { "epoch": 3.7965382802793366, "grad_norm": 4.960516452789307, "learning_rate": 1.365751570155781e-05, "loss": 2.0230619430541994, "memory(GiB)": 72.85, "step": 88615, "token_acc": 0.5979020979020979, "train_speed(iter/s)": 0.672784 }, { "epoch": 3.796752495608586, "grad_norm": 6.365875720977783, "learning_rate": 1.3652894045688952e-05, "loss": 1.992293930053711, "memory(GiB)": 72.85, "step": 88620, "token_acc": 0.5551181102362205, "train_speed(iter/s)": 0.672791 }, { "epoch": 3.7969667109378347, "grad_norm": 3.9995057582855225, "learning_rate": 1.3648273048282012e-05, "loss": 2.4319515228271484, "memory(GiB)": 72.85, "step": 88625, "token_acc": 0.4948453608247423, "train_speed(iter/s)": 0.672792 }, { "epoch": 3.7971809262670835, "grad_norm": 6.275674343109131, "learning_rate": 1.3643652709420707e-05, "loss": 2.0471864700317384, "memory(GiB)": 72.85, "step": 88630, "token_acc": 0.5794701986754967, "train_speed(iter/s)": 0.672792 }, { "epoch": 3.7973951415963327, "grad_norm": 4.541415691375732, "learning_rate": 1.3639033029188741e-05, "loss": 2.2375858306884764, "memory(GiB)": 72.85, "step": 88635, "token_acc": 0.5404411764705882, "train_speed(iter/s)": 0.67279 }, { "epoch": 3.7976093569255815, "grad_norm": 6.489036560058594, "learning_rate": 1.3634414007669783e-05, "loss": 2.005298614501953, "memory(GiB)": 72.85, "step": 88640, "token_acc": 0.5222222222222223, "train_speed(iter/s)": 0.672798 }, { "epoch": 3.7978235722548304, "grad_norm": 5.540270805358887, "learning_rate": 1.3629795644947541e-05, "loss": 1.8710474014282226, "memory(GiB)": 72.85, "step": 88645, "token_acc": 0.5977859778597786, "train_speed(iter/s)": 0.672794 }, { "epoch": 3.7980377875840796, "grad_norm": 7.534907341003418, "learning_rate": 1.362517794110566e-05, "loss": 2.4337879180908204, "memory(GiB)": 72.85, "step": 88650, "token_acc": 0.4894366197183099, "train_speed(iter/s)": 0.672785 }, { "epoch": 3.7982520029133284, "grad_norm": 5.722867488861084, "learning_rate": 1.3620560896227813e-05, "loss": 2.165181541442871, "memory(GiB)": 72.85, "step": 88655, "token_acc": 0.53156146179402, "train_speed(iter/s)": 0.672782 }, { "epoch": 3.7984662182425772, "grad_norm": 4.458883762359619, "learning_rate": 1.3615944510397632e-05, "loss": 1.972874069213867, "memory(GiB)": 72.85, "step": 88660, "token_acc": 0.5475409836065573, "train_speed(iter/s)": 0.672788 }, { "epoch": 3.7986804335718265, "grad_norm": 4.982778072357178, "learning_rate": 1.3611328783698752e-05, "loss": 1.943499755859375, "memory(GiB)": 72.85, "step": 88665, "token_acc": 0.5209003215434084, "train_speed(iter/s)": 0.672794 }, { "epoch": 3.7988946489010753, "grad_norm": 4.353864669799805, "learning_rate": 1.360671371621477e-05, "loss": 2.1101591110229494, "memory(GiB)": 72.85, "step": 88670, "token_acc": 0.5311475409836065, "train_speed(iter/s)": 0.672791 }, { "epoch": 3.799108864230324, "grad_norm": 5.131698131561279, "learning_rate": 1.360209930802932e-05, "loss": 1.8491771697998047, "memory(GiB)": 72.85, "step": 88675, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.672785 }, { "epoch": 3.7993230795595734, "grad_norm": 5.6436638832092285, "learning_rate": 1.3597485559225987e-05, "loss": 2.209421730041504, "memory(GiB)": 72.85, "step": 88680, "token_acc": 0.5064935064935064, "train_speed(iter/s)": 0.672789 }, { "epoch": 3.799537294888822, "grad_norm": 4.608534812927246, "learning_rate": 1.3592872469888351e-05, "loss": 2.002381134033203, "memory(GiB)": 72.85, "step": 88685, "token_acc": 0.5795454545454546, "train_speed(iter/s)": 0.672792 }, { "epoch": 3.799751510218071, "grad_norm": 8.422449111938477, "learning_rate": 1.3588260040099987e-05, "loss": 2.015131378173828, "memory(GiB)": 72.85, "step": 88690, "token_acc": 0.5584905660377358, "train_speed(iter/s)": 0.672784 }, { "epoch": 3.7999657255473203, "grad_norm": 5.293859481811523, "learning_rate": 1.3583648269944443e-05, "loss": 2.0592451095581055, "memory(GiB)": 72.85, "step": 88695, "token_acc": 0.5622775800711743, "train_speed(iter/s)": 0.67279 }, { "epoch": 3.800179940876569, "grad_norm": 5.565335273742676, "learning_rate": 1.3579037159505258e-05, "loss": 1.9671337127685546, "memory(GiB)": 72.85, "step": 88700, "token_acc": 0.5893416927899686, "train_speed(iter/s)": 0.672797 }, { "epoch": 3.800394156205818, "grad_norm": 5.438821315765381, "learning_rate": 1.3574426708866e-05, "loss": 2.170502853393555, "memory(GiB)": 72.85, "step": 88705, "token_acc": 0.5570032573289903, "train_speed(iter/s)": 0.672801 }, { "epoch": 3.800608371535067, "grad_norm": 7.929220199584961, "learning_rate": 1.3569816918110167e-05, "loss": 2.0139163970947265, "memory(GiB)": 72.85, "step": 88710, "token_acc": 0.5655172413793104, "train_speed(iter/s)": 0.672805 }, { "epoch": 3.800822586864316, "grad_norm": 7.3694586753845215, "learning_rate": 1.3565207787321283e-05, "loss": 2.0851715087890623, "memory(GiB)": 72.85, "step": 88715, "token_acc": 0.5111111111111111, "train_speed(iter/s)": 0.672812 }, { "epoch": 3.8010368021935648, "grad_norm": 6.56181526184082, "learning_rate": 1.3560599316582817e-05, "loss": 1.9430845260620118, "memory(GiB)": 72.85, "step": 88720, "token_acc": 0.5403508771929825, "train_speed(iter/s)": 0.67281 }, { "epoch": 3.801251017522814, "grad_norm": 6.826817512512207, "learning_rate": 1.3555991505978288e-05, "loss": 2.1044261932373045, "memory(GiB)": 72.85, "step": 88725, "token_acc": 0.5218855218855218, "train_speed(iter/s)": 0.672811 }, { "epoch": 3.801465232852063, "grad_norm": 5.758919715881348, "learning_rate": 1.3551384355591152e-05, "loss": 1.7869033813476562, "memory(GiB)": 72.85, "step": 88730, "token_acc": 0.6042402826855123, "train_speed(iter/s)": 0.6728 }, { "epoch": 3.8016794481813116, "grad_norm": 5.867374897003174, "learning_rate": 1.3546777865504895e-05, "loss": 1.964742088317871, "memory(GiB)": 72.85, "step": 88735, "token_acc": 0.5560344827586207, "train_speed(iter/s)": 0.672797 }, { "epoch": 3.801893663510561, "grad_norm": 5.780582427978516, "learning_rate": 1.3542172035802952e-05, "loss": 2.0076217651367188, "memory(GiB)": 72.85, "step": 88740, "token_acc": 0.4864864864864865, "train_speed(iter/s)": 0.6728 }, { "epoch": 3.8021078788398097, "grad_norm": 7.222357749938965, "learning_rate": 1.3537566866568762e-05, "loss": 2.3059715270996093, "memory(GiB)": 72.85, "step": 88745, "token_acc": 0.49640287769784175, "train_speed(iter/s)": 0.672802 }, { "epoch": 3.8023220941690585, "grad_norm": 5.506109714508057, "learning_rate": 1.3532962357885754e-05, "loss": 2.1208463668823243, "memory(GiB)": 72.85, "step": 88750, "token_acc": 0.5654952076677316, "train_speed(iter/s)": 0.672793 }, { "epoch": 3.802536309498308, "grad_norm": 5.888340950012207, "learning_rate": 1.3528358509837324e-05, "loss": 2.2724853515625, "memory(GiB)": 72.85, "step": 88755, "token_acc": 0.5245901639344263, "train_speed(iter/s)": 0.672792 }, { "epoch": 3.8027505248275566, "grad_norm": 6.012366771697998, "learning_rate": 1.352375532250691e-05, "loss": 1.7645660400390626, "memory(GiB)": 72.85, "step": 88760, "token_acc": 0.602112676056338, "train_speed(iter/s)": 0.672796 }, { "epoch": 3.8029647401568054, "grad_norm": 4.990540027618408, "learning_rate": 1.351915279597788e-05, "loss": 2.0505176544189454, "memory(GiB)": 72.85, "step": 88765, "token_acc": 0.5427631578947368, "train_speed(iter/s)": 0.672785 }, { "epoch": 3.8031789554860547, "grad_norm": 6.167916297912598, "learning_rate": 1.3514550930333624e-05, "loss": 1.9697135925292968, "memory(GiB)": 72.85, "step": 88770, "token_acc": 0.5443037974683544, "train_speed(iter/s)": 0.672787 }, { "epoch": 3.8033931708153035, "grad_norm": 5.137491226196289, "learning_rate": 1.3509949725657507e-05, "loss": 1.964113998413086, "memory(GiB)": 72.85, "step": 88775, "token_acc": 0.5450980392156862, "train_speed(iter/s)": 0.672785 }, { "epoch": 3.8036073861445523, "grad_norm": 5.31507682800293, "learning_rate": 1.3505349182032878e-05, "loss": 2.136492156982422, "memory(GiB)": 72.85, "step": 88780, "token_acc": 0.5150602409638554, "train_speed(iter/s)": 0.672795 }, { "epoch": 3.8038216014738016, "grad_norm": 5.484696388244629, "learning_rate": 1.350074929954307e-05, "loss": 2.317393493652344, "memory(GiB)": 72.85, "step": 88785, "token_acc": 0.4626865671641791, "train_speed(iter/s)": 0.672799 }, { "epoch": 3.8040358168030504, "grad_norm": 6.279439926147461, "learning_rate": 1.3496150078271436e-05, "loss": 2.143609619140625, "memory(GiB)": 72.85, "step": 88790, "token_acc": 0.5147540983606558, "train_speed(iter/s)": 0.672804 }, { "epoch": 3.804250032132299, "grad_norm": 5.584055423736572, "learning_rate": 1.3491551518301277e-05, "loss": 2.1724838256835937, "memory(GiB)": 72.85, "step": 88795, "token_acc": 0.5028089887640449, "train_speed(iter/s)": 0.672811 }, { "epoch": 3.8044642474615484, "grad_norm": 5.347318649291992, "learning_rate": 1.3486953619715926e-05, "loss": 2.1600303649902344, "memory(GiB)": 72.85, "step": 88800, "token_acc": 0.5239616613418531, "train_speed(iter/s)": 0.672812 }, { "epoch": 3.8046784627907972, "grad_norm": 4.845505714416504, "learning_rate": 1.348235638259866e-05, "loss": 2.2761674880981446, "memory(GiB)": 72.85, "step": 88805, "token_acc": 0.5244755244755245, "train_speed(iter/s)": 0.672816 }, { "epoch": 3.804892678120046, "grad_norm": 5.3245062828063965, "learning_rate": 1.3477759807032764e-05, "loss": 1.9091951370239257, "memory(GiB)": 72.85, "step": 88810, "token_acc": 0.5753012048192772, "train_speed(iter/s)": 0.672816 }, { "epoch": 3.8051068934492953, "grad_norm": 4.9630560874938965, "learning_rate": 1.3473163893101498e-05, "loss": 2.113864517211914, "memory(GiB)": 72.85, "step": 88815, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.672819 }, { "epoch": 3.805321108778544, "grad_norm": 6.286643028259277, "learning_rate": 1.3468568640888146e-05, "loss": 1.8922969818115234, "memory(GiB)": 72.85, "step": 88820, "token_acc": 0.5779467680608364, "train_speed(iter/s)": 0.672823 }, { "epoch": 3.805535324107793, "grad_norm": 6.35530424118042, "learning_rate": 1.3463974050475941e-05, "loss": 2.2327592849731444, "memory(GiB)": 72.85, "step": 88825, "token_acc": 0.5439739413680782, "train_speed(iter/s)": 0.672825 }, { "epoch": 3.805749539437042, "grad_norm": 5.068140029907227, "learning_rate": 1.3459380121948123e-05, "loss": 2.3326398849487306, "memory(GiB)": 72.85, "step": 88830, "token_acc": 0.4957983193277311, "train_speed(iter/s)": 0.672832 }, { "epoch": 3.805963754766291, "grad_norm": 5.908372402191162, "learning_rate": 1.3454786855387908e-05, "loss": 2.215622138977051, "memory(GiB)": 72.85, "step": 88835, "token_acc": 0.5283687943262412, "train_speed(iter/s)": 0.672835 }, { "epoch": 3.80617797009554, "grad_norm": 4.537492275238037, "learning_rate": 1.3450194250878512e-05, "loss": 2.246430778503418, "memory(GiB)": 72.85, "step": 88840, "token_acc": 0.5051194539249146, "train_speed(iter/s)": 0.672828 }, { "epoch": 3.806392185424789, "grad_norm": 6.542224884033203, "learning_rate": 1.3445602308503119e-05, "loss": 2.214178466796875, "memory(GiB)": 72.85, "step": 88845, "token_acc": 0.512280701754386, "train_speed(iter/s)": 0.672833 }, { "epoch": 3.806606400754038, "grad_norm": 7.233428001403809, "learning_rate": 1.3441011028344947e-05, "loss": 2.199507141113281, "memory(GiB)": 72.85, "step": 88850, "token_acc": 0.49850746268656715, "train_speed(iter/s)": 0.672832 }, { "epoch": 3.8068206160832867, "grad_norm": 5.738512992858887, "learning_rate": 1.3436420410487156e-05, "loss": 2.1535926818847657, "memory(GiB)": 72.85, "step": 88855, "token_acc": 0.5089285714285714, "train_speed(iter/s)": 0.672831 }, { "epoch": 3.807034831412536, "grad_norm": 6.871333599090576, "learning_rate": 1.3431830455012905e-05, "loss": 2.2006526947021485, "memory(GiB)": 72.85, "step": 88860, "token_acc": 0.5276872964169381, "train_speed(iter/s)": 0.672831 }, { "epoch": 3.8072490467417848, "grad_norm": 5.370438098907471, "learning_rate": 1.3427241162005356e-05, "loss": 2.3929637908935546, "memory(GiB)": 72.85, "step": 88865, "token_acc": 0.5233333333333333, "train_speed(iter/s)": 0.672821 }, { "epoch": 3.8074632620710336, "grad_norm": 4.746522903442383, "learning_rate": 1.342265253154762e-05, "loss": 1.9327472686767577, "memory(GiB)": 72.85, "step": 88870, "token_acc": 0.5253164556962026, "train_speed(iter/s)": 0.672828 }, { "epoch": 3.807677477400283, "grad_norm": 4.4036383628845215, "learning_rate": 1.3418064563722848e-05, "loss": 2.0195077896118163, "memory(GiB)": 72.85, "step": 88875, "token_acc": 0.5325077399380805, "train_speed(iter/s)": 0.672816 }, { "epoch": 3.8078916927295317, "grad_norm": 4.809118747711182, "learning_rate": 1.3413477258614165e-05, "loss": 2.0488359451293947, "memory(GiB)": 72.85, "step": 88880, "token_acc": 0.5595567867036011, "train_speed(iter/s)": 0.672814 }, { "epoch": 3.8081059080587805, "grad_norm": 4.541502475738525, "learning_rate": 1.340889061630467e-05, "loss": 2.127695083618164, "memory(GiB)": 72.85, "step": 88885, "token_acc": 0.5302197802197802, "train_speed(iter/s)": 0.672811 }, { "epoch": 3.8083201233880297, "grad_norm": 5.6208720207214355, "learning_rate": 1.340430463687744e-05, "loss": 2.2994285583496095, "memory(GiB)": 72.85, "step": 88890, "token_acc": 0.5127388535031847, "train_speed(iter/s)": 0.672821 }, { "epoch": 3.8085343387172785, "grad_norm": 6.696994304656982, "learning_rate": 1.3399719320415566e-05, "loss": 2.4738481521606444, "memory(GiB)": 72.85, "step": 88895, "token_acc": 0.46688741721854304, "train_speed(iter/s)": 0.672823 }, { "epoch": 3.8087485540465273, "grad_norm": 5.517415523529053, "learning_rate": 1.3395134667002091e-05, "loss": 2.1182525634765623, "memory(GiB)": 72.85, "step": 88900, "token_acc": 0.5107692307692308, "train_speed(iter/s)": 0.672826 }, { "epoch": 3.8089627693757766, "grad_norm": 3.6233041286468506, "learning_rate": 1.3390550676720104e-05, "loss": 1.7693799972534179, "memory(GiB)": 72.85, "step": 88905, "token_acc": 0.5514705882352942, "train_speed(iter/s)": 0.672824 }, { "epoch": 3.8091769847050254, "grad_norm": 5.437952995300293, "learning_rate": 1.3385967349652634e-05, "loss": 2.209499740600586, "memory(GiB)": 72.85, "step": 88910, "token_acc": 0.532608695652174, "train_speed(iter/s)": 0.672814 }, { "epoch": 3.8093912000342742, "grad_norm": 5.518321514129639, "learning_rate": 1.3381384685882715e-05, "loss": 2.00006103515625, "memory(GiB)": 72.85, "step": 88915, "token_acc": 0.5674740484429066, "train_speed(iter/s)": 0.672808 }, { "epoch": 3.8096054153635235, "grad_norm": 5.923853397369385, "learning_rate": 1.3376802685493367e-05, "loss": 2.4056215286254883, "memory(GiB)": 72.85, "step": 88920, "token_acc": 0.4619718309859155, "train_speed(iter/s)": 0.672812 }, { "epoch": 3.8098196306927723, "grad_norm": 4.65238618850708, "learning_rate": 1.3372221348567588e-05, "loss": 2.177163505554199, "memory(GiB)": 72.85, "step": 88925, "token_acc": 0.5234657039711191, "train_speed(iter/s)": 0.67282 }, { "epoch": 3.810033846022021, "grad_norm": 6.560069561004639, "learning_rate": 1.3367640675188365e-05, "loss": 1.9125150680541991, "memory(GiB)": 72.85, "step": 88930, "token_acc": 0.5583941605839416, "train_speed(iter/s)": 0.672822 }, { "epoch": 3.8102480613512704, "grad_norm": 5.642876148223877, "learning_rate": 1.3363060665438709e-05, "loss": 1.995936965942383, "memory(GiB)": 72.85, "step": 88935, "token_acc": 0.541033434650456, "train_speed(iter/s)": 0.672823 }, { "epoch": 3.810462276680519, "grad_norm": 3.9212396144866943, "learning_rate": 1.3358481319401578e-05, "loss": 2.0474658966064454, "memory(GiB)": 72.85, "step": 88940, "token_acc": 0.552, "train_speed(iter/s)": 0.672825 }, { "epoch": 3.810676492009768, "grad_norm": 5.97024393081665, "learning_rate": 1.3353902637159916e-05, "loss": 2.3406698226928713, "memory(GiB)": 72.85, "step": 88945, "token_acc": 0.5111940298507462, "train_speed(iter/s)": 0.672827 }, { "epoch": 3.8108907073390172, "grad_norm": 7.328152179718018, "learning_rate": 1.33493246187967e-05, "loss": 2.0016332626342774, "memory(GiB)": 72.85, "step": 88950, "token_acc": 0.5793650793650794, "train_speed(iter/s)": 0.672826 }, { "epoch": 3.811104922668266, "grad_norm": 5.3405938148498535, "learning_rate": 1.3344747264394848e-05, "loss": 2.2709564208984374, "memory(GiB)": 72.85, "step": 88955, "token_acc": 0.525679758308157, "train_speed(iter/s)": 0.672829 }, { "epoch": 3.811319137997515, "grad_norm": 6.853544235229492, "learning_rate": 1.334017057403727e-05, "loss": 2.2111579895019533, "memory(GiB)": 72.85, "step": 88960, "token_acc": 0.5268817204301075, "train_speed(iter/s)": 0.672837 }, { "epoch": 3.811533353326764, "grad_norm": 5.634567737579346, "learning_rate": 1.3335594547806912e-05, "loss": 2.224256896972656, "memory(GiB)": 72.85, "step": 88965, "token_acc": 0.5261627906976745, "train_speed(iter/s)": 0.672838 }, { "epoch": 3.811747568656013, "grad_norm": 4.678146839141846, "learning_rate": 1.3331019185786647e-05, "loss": 2.035572624206543, "memory(GiB)": 72.85, "step": 88970, "token_acc": 0.5614035087719298, "train_speed(iter/s)": 0.672844 }, { "epoch": 3.8119617839852618, "grad_norm": 6.9850544929504395, "learning_rate": 1.332644448805937e-05, "loss": 2.046652984619141, "memory(GiB)": 72.85, "step": 88975, "token_acc": 0.5604395604395604, "train_speed(iter/s)": 0.672843 }, { "epoch": 3.812175999314511, "grad_norm": 5.929824352264404, "learning_rate": 1.3321870454707952e-05, "loss": 2.1024051666259767, "memory(GiB)": 72.85, "step": 88980, "token_acc": 0.5223880597014925, "train_speed(iter/s)": 0.672854 }, { "epoch": 3.81239021464376, "grad_norm": 7.370256423950195, "learning_rate": 1.3317297085815256e-05, "loss": 2.2391641616821287, "memory(GiB)": 72.85, "step": 88985, "token_acc": 0.5397350993377483, "train_speed(iter/s)": 0.672861 }, { "epoch": 3.8126044299730086, "grad_norm": 8.565942764282227, "learning_rate": 1.3312724381464125e-05, "loss": 2.0869638442993166, "memory(GiB)": 72.85, "step": 88990, "token_acc": 0.5317460317460317, "train_speed(iter/s)": 0.67285 }, { "epoch": 3.812818645302258, "grad_norm": 6.679172515869141, "learning_rate": 1.3308152341737417e-05, "loss": 2.2656097412109375, "memory(GiB)": 72.85, "step": 88995, "token_acc": 0.4864864864864865, "train_speed(iter/s)": 0.672865 }, { "epoch": 3.8130328606315067, "grad_norm": 6.414939880371094, "learning_rate": 1.3303580966717955e-05, "loss": 2.2616598129272463, "memory(GiB)": 72.85, "step": 89000, "token_acc": 0.5162337662337663, "train_speed(iter/s)": 0.672869 }, { "epoch": 3.8130328606315067, "eval_loss": 2.1278250217437744, "eval_runtime": 14.7288, "eval_samples_per_second": 6.789, "eval_steps_per_second": 6.789, "eval_token_acc": 0.45962732919254656, "step": 89000 }, { "epoch": 3.8132470759607555, "grad_norm": 5.097575664520264, "learning_rate": 1.3299010256488542e-05, "loss": 2.054350471496582, "memory(GiB)": 72.85, "step": 89005, "token_acc": 0.479557069846678, "train_speed(iter/s)": 0.672789 }, { "epoch": 3.8134612912900048, "grad_norm": 6.25760555267334, "learning_rate": 1.3294440211131987e-05, "loss": 2.2904342651367187, "memory(GiB)": 72.85, "step": 89010, "token_acc": 0.563573883161512, "train_speed(iter/s)": 0.672781 }, { "epoch": 3.8136755066192536, "grad_norm": 5.738301753997803, "learning_rate": 1.3289870830731066e-05, "loss": 1.9659074783325194, "memory(GiB)": 72.85, "step": 89015, "token_acc": 0.5738831615120275, "train_speed(iter/s)": 0.672789 }, { "epoch": 3.8138897219485024, "grad_norm": 5.510807991027832, "learning_rate": 1.3285302115368576e-05, "loss": 2.1848106384277344, "memory(GiB)": 72.85, "step": 89020, "token_acc": 0.5151515151515151, "train_speed(iter/s)": 0.67279 }, { "epoch": 3.8141039372777517, "grad_norm": 7.657186031341553, "learning_rate": 1.3280734065127287e-05, "loss": 2.1358346939086914, "memory(GiB)": 72.85, "step": 89025, "token_acc": 0.5413533834586466, "train_speed(iter/s)": 0.672791 }, { "epoch": 3.8143181526070005, "grad_norm": 4.626282215118408, "learning_rate": 1.3276166680089952e-05, "loss": 2.1012170791625975, "memory(GiB)": 72.85, "step": 89030, "token_acc": 0.5223880597014925, "train_speed(iter/s)": 0.672794 }, { "epoch": 3.8145323679362493, "grad_norm": 7.296841621398926, "learning_rate": 1.3271599960339304e-05, "loss": 2.2596954345703124, "memory(GiB)": 72.85, "step": 89035, "token_acc": 0.5457413249211357, "train_speed(iter/s)": 0.672803 }, { "epoch": 3.8147465832654985, "grad_norm": 4.7753190994262695, "learning_rate": 1.3267033905958075e-05, "loss": 1.9522594451904296, "memory(GiB)": 72.85, "step": 89040, "token_acc": 0.5490196078431373, "train_speed(iter/s)": 0.672801 }, { "epoch": 3.8149607985947473, "grad_norm": 5.44185209274292, "learning_rate": 1.3262468517028975e-05, "loss": 2.166004943847656, "memory(GiB)": 72.85, "step": 89045, "token_acc": 0.5780590717299579, "train_speed(iter/s)": 0.672796 }, { "epoch": 3.815175013923996, "grad_norm": 5.934185981750488, "learning_rate": 1.3257903793634735e-05, "loss": 2.217533493041992, "memory(GiB)": 72.85, "step": 89050, "token_acc": 0.5151515151515151, "train_speed(iter/s)": 0.672806 }, { "epoch": 3.8153892292532454, "grad_norm": 6.3167829513549805, "learning_rate": 1.3253339735858034e-05, "loss": 2.0910871505737303, "memory(GiB)": 72.85, "step": 89055, "token_acc": 0.5518518518518518, "train_speed(iter/s)": 0.672808 }, { "epoch": 3.8156034445824942, "grad_norm": 7.575211048126221, "learning_rate": 1.324877634378155e-05, "loss": 1.8675146102905273, "memory(GiB)": 72.85, "step": 89060, "token_acc": 0.573943661971831, "train_speed(iter/s)": 0.672815 }, { "epoch": 3.815817659911743, "grad_norm": 5.9530110359191895, "learning_rate": 1.3244213617487965e-05, "loss": 2.011940574645996, "memory(GiB)": 72.85, "step": 89065, "token_acc": 0.5821428571428572, "train_speed(iter/s)": 0.672819 }, { "epoch": 3.8160318752409923, "grad_norm": 5.299249172210693, "learning_rate": 1.3239651557059922e-05, "loss": 1.950462532043457, "memory(GiB)": 72.85, "step": 89070, "token_acc": 0.5754385964912281, "train_speed(iter/s)": 0.672824 }, { "epoch": 3.816246090570241, "grad_norm": 5.618359565734863, "learning_rate": 1.3235090162580065e-05, "loss": 2.2214622497558594, "memory(GiB)": 72.85, "step": 89075, "token_acc": 0.532051282051282, "train_speed(iter/s)": 0.672822 }, { "epoch": 3.81646030589949, "grad_norm": 6.79823637008667, "learning_rate": 1.3230529434131056e-05, "loss": 2.120899772644043, "memory(GiB)": 72.85, "step": 89080, "token_acc": 0.5176056338028169, "train_speed(iter/s)": 0.672833 }, { "epoch": 3.816674521228739, "grad_norm": 4.936615943908691, "learning_rate": 1.3225969371795494e-05, "loss": 1.9149066925048828, "memory(GiB)": 72.85, "step": 89085, "token_acc": 0.5547445255474452, "train_speed(iter/s)": 0.672841 }, { "epoch": 3.816888736557988, "grad_norm": 5.609488010406494, "learning_rate": 1.322140997565598e-05, "loss": 1.8524948120117188, "memory(GiB)": 72.85, "step": 89090, "token_acc": 0.5793103448275863, "train_speed(iter/s)": 0.672836 }, { "epoch": 3.817102951887237, "grad_norm": 6.314071178436279, "learning_rate": 1.3216851245795143e-05, "loss": 2.198783111572266, "memory(GiB)": 72.85, "step": 89095, "token_acc": 0.5228758169934641, "train_speed(iter/s)": 0.672838 }, { "epoch": 3.817317167216486, "grad_norm": 5.140715599060059, "learning_rate": 1.3212293182295549e-05, "loss": 2.314952278137207, "memory(GiB)": 72.85, "step": 89100, "token_acc": 0.49171270718232046, "train_speed(iter/s)": 0.672836 }, { "epoch": 3.817531382545735, "grad_norm": 4.592928409576416, "learning_rate": 1.3207735785239756e-05, "loss": 1.8668577194213867, "memory(GiB)": 72.85, "step": 89105, "token_acc": 0.5819935691318328, "train_speed(iter/s)": 0.672843 }, { "epoch": 3.8177455978749837, "grad_norm": 5.464303016662598, "learning_rate": 1.3203179054710363e-05, "loss": 2.2853485107421876, "memory(GiB)": 72.85, "step": 89110, "token_acc": 0.5190839694656488, "train_speed(iter/s)": 0.672838 }, { "epoch": 3.817959813204233, "grad_norm": 5.304604530334473, "learning_rate": 1.31986229907899e-05, "loss": 2.229732322692871, "memory(GiB)": 72.85, "step": 89115, "token_acc": 0.4927536231884058, "train_speed(iter/s)": 0.672833 }, { "epoch": 3.8181740285334818, "grad_norm": 6.149523735046387, "learning_rate": 1.3194067593560899e-05, "loss": 2.056544303894043, "memory(GiB)": 72.85, "step": 89120, "token_acc": 0.5425867507886435, "train_speed(iter/s)": 0.672838 }, { "epoch": 3.8183882438627306, "grad_norm": 6.266686916351318, "learning_rate": 1.3189512863105891e-05, "loss": 2.1677825927734373, "memory(GiB)": 72.85, "step": 89125, "token_acc": 0.5283687943262412, "train_speed(iter/s)": 0.672838 }, { "epoch": 3.81860245919198, "grad_norm": 5.435312271118164, "learning_rate": 1.318495879950739e-05, "loss": 1.9426166534423828, "memory(GiB)": 72.85, "step": 89130, "token_acc": 0.5524691358024691, "train_speed(iter/s)": 0.672821 }, { "epoch": 3.8188166745212286, "grad_norm": 6.411437511444092, "learning_rate": 1.3180405402847884e-05, "loss": 2.033472442626953, "memory(GiB)": 72.85, "step": 89135, "token_acc": 0.5805243445692884, "train_speed(iter/s)": 0.67282 }, { "epoch": 3.8190308898504775, "grad_norm": 6.044285297393799, "learning_rate": 1.3175852673209881e-05, "loss": 2.253184509277344, "memory(GiB)": 72.85, "step": 89140, "token_acc": 0.5548780487804879, "train_speed(iter/s)": 0.672822 }, { "epoch": 3.8192451051797267, "grad_norm": 5.878677845001221, "learning_rate": 1.3171300610675858e-05, "loss": 2.460663414001465, "memory(GiB)": 72.85, "step": 89145, "token_acc": 0.5070921985815603, "train_speed(iter/s)": 0.672826 }, { "epoch": 3.8194593205089755, "grad_norm": 4.934878349304199, "learning_rate": 1.3166749215328267e-05, "loss": 2.3208087921142577, "memory(GiB)": 72.85, "step": 89150, "token_acc": 0.5313653136531366, "train_speed(iter/s)": 0.672832 }, { "epoch": 3.8196735358382243, "grad_norm": 6.008206367492676, "learning_rate": 1.3162198487249572e-05, "loss": 2.160223960876465, "memory(GiB)": 72.85, "step": 89155, "token_acc": 0.5329512893982808, "train_speed(iter/s)": 0.672835 }, { "epoch": 3.8198877511674736, "grad_norm": 4.484124183654785, "learning_rate": 1.315764842652219e-05, "loss": 2.1721384048461916, "memory(GiB)": 72.85, "step": 89160, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.672838 }, { "epoch": 3.8201019664967224, "grad_norm": 4.293078422546387, "learning_rate": 1.3153099033228566e-05, "loss": 2.2231191635131835, "memory(GiB)": 72.85, "step": 89165, "token_acc": 0.5439739413680782, "train_speed(iter/s)": 0.672838 }, { "epoch": 3.820316181825971, "grad_norm": 5.463493347167969, "learning_rate": 1.3148550307451135e-05, "loss": 2.235869216918945, "memory(GiB)": 72.85, "step": 89170, "token_acc": 0.5344827586206896, "train_speed(iter/s)": 0.672846 }, { "epoch": 3.8205303971552205, "grad_norm": 5.135865211486816, "learning_rate": 1.3144002249272286e-05, "loss": 2.0020191192626955, "memory(GiB)": 72.85, "step": 89175, "token_acc": 0.5896551724137931, "train_speed(iter/s)": 0.672853 }, { "epoch": 3.8207446124844693, "grad_norm": 6.504959583282471, "learning_rate": 1.3139454858774409e-05, "loss": 2.0406957626342774, "memory(GiB)": 72.85, "step": 89180, "token_acc": 0.5223367697594502, "train_speed(iter/s)": 0.672858 }, { "epoch": 3.820958827813718, "grad_norm": 6.860550880432129, "learning_rate": 1.313490813603988e-05, "loss": 2.066924285888672, "memory(GiB)": 72.85, "step": 89185, "token_acc": 0.5598705501618123, "train_speed(iter/s)": 0.672854 }, { "epoch": 3.8211730431429674, "grad_norm": 5.629070281982422, "learning_rate": 1.3130362081151065e-05, "loss": 1.9140825271606445, "memory(GiB)": 72.85, "step": 89190, "token_acc": 0.5992366412213741, "train_speed(iter/s)": 0.672851 }, { "epoch": 3.821387258472216, "grad_norm": 5.797428607940674, "learning_rate": 1.312581669419034e-05, "loss": 2.1501132965087892, "memory(GiB)": 72.85, "step": 89195, "token_acc": 0.5586206896551724, "train_speed(iter/s)": 0.672849 }, { "epoch": 3.821601473801465, "grad_norm": 5.28212308883667, "learning_rate": 1.3121271975240035e-05, "loss": 2.2349933624267577, "memory(GiB)": 72.85, "step": 89200, "token_acc": 0.518796992481203, "train_speed(iter/s)": 0.672841 }, { "epoch": 3.8218156891307142, "grad_norm": 4.99946403503418, "learning_rate": 1.3116727924382483e-05, "loss": 2.1448654174804687, "memory(GiB)": 72.85, "step": 89205, "token_acc": 0.5585284280936454, "train_speed(iter/s)": 0.67284 }, { "epoch": 3.822029904459963, "grad_norm": 6.054779052734375, "learning_rate": 1.3112184541700007e-05, "loss": 2.2554275512695314, "memory(GiB)": 72.85, "step": 89210, "token_acc": 0.490625, "train_speed(iter/s)": 0.672843 }, { "epoch": 3.822244119789212, "grad_norm": 6.465923309326172, "learning_rate": 1.3107641827274908e-05, "loss": 2.0916852951049805, "memory(GiB)": 72.85, "step": 89215, "token_acc": 0.526813880126183, "train_speed(iter/s)": 0.67284 }, { "epoch": 3.822458335118461, "grad_norm": 6.720875263214111, "learning_rate": 1.3103099781189477e-05, "loss": 2.3927907943725586, "memory(GiB)": 72.85, "step": 89220, "token_acc": 0.5069444444444444, "train_speed(iter/s)": 0.672837 }, { "epoch": 3.82267255044771, "grad_norm": 6.517592906951904, "learning_rate": 1.3098558403526012e-05, "loss": 2.0735755920410157, "memory(GiB)": 72.85, "step": 89225, "token_acc": 0.5429553264604811, "train_speed(iter/s)": 0.672834 }, { "epoch": 3.8228867657769587, "grad_norm": 6.204859733581543, "learning_rate": 1.3094017694366783e-05, "loss": 2.1125465393066407, "memory(GiB)": 72.85, "step": 89230, "token_acc": 0.5273972602739726, "train_speed(iter/s)": 0.672835 }, { "epoch": 3.823100981106208, "grad_norm": 5.100192070007324, "learning_rate": 1.3089477653794042e-05, "loss": 1.940798568725586, "memory(GiB)": 72.85, "step": 89235, "token_acc": 0.5573770491803278, "train_speed(iter/s)": 0.672845 }, { "epoch": 3.823315196435457, "grad_norm": 6.180934429168701, "learning_rate": 1.3084938281890025e-05, "loss": 1.939805793762207, "memory(GiB)": 72.85, "step": 89240, "token_acc": 0.5674157303370787, "train_speed(iter/s)": 0.67284 }, { "epoch": 3.8235294117647056, "grad_norm": 6.353850364685059, "learning_rate": 1.308039957873699e-05, "loss": 1.9073625564575196, "memory(GiB)": 72.85, "step": 89245, "token_acc": 0.5714285714285714, "train_speed(iter/s)": 0.672837 }, { "epoch": 3.823743627093955, "grad_norm": 5.015727519989014, "learning_rate": 1.307586154441714e-05, "loss": 2.1049476623535157, "memory(GiB)": 72.85, "step": 89250, "token_acc": 0.5822368421052632, "train_speed(iter/s)": 0.672846 }, { "epoch": 3.8239578424232037, "grad_norm": 5.993034362792969, "learning_rate": 1.307132417901271e-05, "loss": 2.094737243652344, "memory(GiB)": 72.85, "step": 89255, "token_acc": 0.5604026845637584, "train_speed(iter/s)": 0.672838 }, { "epoch": 3.8241720577524525, "grad_norm": 6.411613464355469, "learning_rate": 1.306678748260588e-05, "loss": 2.0335037231445314, "memory(GiB)": 72.85, "step": 89260, "token_acc": 0.5415282392026578, "train_speed(iter/s)": 0.672845 }, { "epoch": 3.8243862730817018, "grad_norm": 6.043384075164795, "learning_rate": 1.3062251455278845e-05, "loss": 2.2348377227783205, "memory(GiB)": 72.85, "step": 89265, "token_acc": 0.5467128027681661, "train_speed(iter/s)": 0.67284 }, { "epoch": 3.8246004884109506, "grad_norm": 6.062367916107178, "learning_rate": 1.3057716097113775e-05, "loss": 2.1717903137207033, "memory(GiB)": 72.85, "step": 89270, "token_acc": 0.5833333333333334, "train_speed(iter/s)": 0.672847 }, { "epoch": 3.8248147037401994, "grad_norm": 5.71336030960083, "learning_rate": 1.3053181408192833e-05, "loss": 1.9734460830688476, "memory(GiB)": 72.85, "step": 89275, "token_acc": 0.5342465753424658, "train_speed(iter/s)": 0.672857 }, { "epoch": 3.8250289190694486, "grad_norm": 4.381412506103516, "learning_rate": 1.3048647388598156e-05, "loss": 1.92978515625, "memory(GiB)": 72.85, "step": 89280, "token_acc": 0.576, "train_speed(iter/s)": 0.67286 }, { "epoch": 3.8252431343986975, "grad_norm": 5.458390235900879, "learning_rate": 1.3044114038411908e-05, "loss": 2.396005630493164, "memory(GiB)": 72.85, "step": 89285, "token_acc": 0.5233644859813084, "train_speed(iter/s)": 0.67286 }, { "epoch": 3.8254573497279463, "grad_norm": 5.494016170501709, "learning_rate": 1.3039581357716202e-05, "loss": 1.9209419250488282, "memory(GiB)": 72.85, "step": 89290, "token_acc": 0.5572755417956656, "train_speed(iter/s)": 0.672854 }, { "epoch": 3.8256715650571955, "grad_norm": 6.583059310913086, "learning_rate": 1.3035049346593148e-05, "loss": 2.2999101638793946, "memory(GiB)": 72.85, "step": 89295, "token_acc": 0.5059880239520959, "train_speed(iter/s)": 0.672854 }, { "epoch": 3.8258857803864443, "grad_norm": 5.622025489807129, "learning_rate": 1.3030518005124853e-05, "loss": 1.9744558334350586, "memory(GiB)": 72.85, "step": 89300, "token_acc": 0.5255972696245734, "train_speed(iter/s)": 0.672847 }, { "epoch": 3.826099995715693, "grad_norm": 6.712788105010986, "learning_rate": 1.3025987333393391e-05, "loss": 2.2402944564819336, "memory(GiB)": 72.85, "step": 89305, "token_acc": 0.524822695035461, "train_speed(iter/s)": 0.672837 }, { "epoch": 3.8263142110449424, "grad_norm": 5.029712677001953, "learning_rate": 1.3021457331480863e-05, "loss": 2.2588054656982424, "memory(GiB)": 72.85, "step": 89310, "token_acc": 0.5141955835962145, "train_speed(iter/s)": 0.672844 }, { "epoch": 3.826528426374191, "grad_norm": 4.684381008148193, "learning_rate": 1.3016927999469314e-05, "loss": 1.885670280456543, "memory(GiB)": 72.85, "step": 89315, "token_acc": 0.5568181818181818, "train_speed(iter/s)": 0.672846 }, { "epoch": 3.82674264170344, "grad_norm": 7.240773677825928, "learning_rate": 1.3012399337440823e-05, "loss": 2.0537933349609374, "memory(GiB)": 72.85, "step": 89320, "token_acc": 0.5490196078431373, "train_speed(iter/s)": 0.672849 }, { "epoch": 3.8269568570326893, "grad_norm": 5.840982913970947, "learning_rate": 1.3007871345477412e-05, "loss": 2.404925537109375, "memory(GiB)": 72.85, "step": 89325, "token_acc": 0.4860050890585242, "train_speed(iter/s)": 0.672852 }, { "epoch": 3.827171072361938, "grad_norm": 7.552815914154053, "learning_rate": 1.3003344023661113e-05, "loss": 2.0579418182373046, "memory(GiB)": 72.85, "step": 89330, "token_acc": 0.5214285714285715, "train_speed(iter/s)": 0.672857 }, { "epoch": 3.827385287691187, "grad_norm": 6.467860221862793, "learning_rate": 1.299881737207393e-05, "loss": 2.3370128631591798, "memory(GiB)": 72.85, "step": 89335, "token_acc": 0.4756554307116105, "train_speed(iter/s)": 0.672855 }, { "epoch": 3.827599503020436, "grad_norm": 5.503911972045898, "learning_rate": 1.2994291390797892e-05, "loss": 2.2797521591186523, "memory(GiB)": 72.85, "step": 89340, "token_acc": 0.5353535353535354, "train_speed(iter/s)": 0.672853 }, { "epoch": 3.827813718349685, "grad_norm": 5.716067790985107, "learning_rate": 1.2989766079914983e-05, "loss": 1.6500917434692384, "memory(GiB)": 72.85, "step": 89345, "token_acc": 0.5649122807017544, "train_speed(iter/s)": 0.672854 }, { "epoch": 3.828027933678934, "grad_norm": 6.2819318771362305, "learning_rate": 1.298524143950718e-05, "loss": 2.1775625228881834, "memory(GiB)": 72.85, "step": 89350, "token_acc": 0.48672566371681414, "train_speed(iter/s)": 0.672846 }, { "epoch": 3.828242149008183, "grad_norm": 5.444692611694336, "learning_rate": 1.2980717469656444e-05, "loss": 2.3153993606567385, "memory(GiB)": 72.85, "step": 89355, "token_acc": 0.5, "train_speed(iter/s)": 0.67284 }, { "epoch": 3.828456364337432, "grad_norm": 9.332343101501465, "learning_rate": 1.2976194170444745e-05, "loss": 1.9955862045288086, "memory(GiB)": 72.85, "step": 89360, "token_acc": 0.5620437956204379, "train_speed(iter/s)": 0.672838 }, { "epoch": 3.8286705796666807, "grad_norm": 6.625326633453369, "learning_rate": 1.2971671541954006e-05, "loss": 2.2332136154174806, "memory(GiB)": 72.85, "step": 89365, "token_acc": 0.5016393442622951, "train_speed(iter/s)": 0.672837 }, { "epoch": 3.82888479499593, "grad_norm": 5.912018775939941, "learning_rate": 1.296714958426618e-05, "loss": 2.2415027618408203, "memory(GiB)": 72.85, "step": 89370, "token_acc": 0.49825783972125437, "train_speed(iter/s)": 0.672837 }, { "epoch": 3.8290990103251787, "grad_norm": 7.232619762420654, "learning_rate": 1.2962628297463187e-05, "loss": 2.224888229370117, "memory(GiB)": 72.85, "step": 89375, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.672847 }, { "epoch": 3.8293132256544276, "grad_norm": 5.266637802124023, "learning_rate": 1.295810768162692e-05, "loss": 2.090745544433594, "memory(GiB)": 72.85, "step": 89380, "token_acc": 0.5311475409836065, "train_speed(iter/s)": 0.672841 }, { "epoch": 3.829527440983677, "grad_norm": 5.707479476928711, "learning_rate": 1.2953587736839269e-05, "loss": 2.066150665283203, "memory(GiB)": 72.85, "step": 89385, "token_acc": 0.5758620689655173, "train_speed(iter/s)": 0.672841 }, { "epoch": 3.8297416563129256, "grad_norm": 5.286025524139404, "learning_rate": 1.294906846318214e-05, "loss": 2.2089365005493162, "memory(GiB)": 72.85, "step": 89390, "token_acc": 0.5093632958801498, "train_speed(iter/s)": 0.672838 }, { "epoch": 3.829955871642175, "grad_norm": 4.632121562957764, "learning_rate": 1.2944549860737381e-05, "loss": 1.961073112487793, "memory(GiB)": 72.85, "step": 89395, "token_acc": 0.5474006116207951, "train_speed(iter/s)": 0.67283 }, { "epoch": 3.8301700869714237, "grad_norm": 6.186366558074951, "learning_rate": 1.2940031929586877e-05, "loss": 2.05653076171875, "memory(GiB)": 72.85, "step": 89400, "token_acc": 0.5243445692883895, "train_speed(iter/s)": 0.672827 }, { "epoch": 3.8303843023006725, "grad_norm": 6.134958744049072, "learning_rate": 1.293551466981246e-05, "loss": 2.004745101928711, "memory(GiB)": 72.85, "step": 89405, "token_acc": 0.5234899328859061, "train_speed(iter/s)": 0.672818 }, { "epoch": 3.8305985176299218, "grad_norm": 4.93506383895874, "learning_rate": 1.293099808149596e-05, "loss": 2.3495485305786135, "memory(GiB)": 72.85, "step": 89410, "token_acc": 0.4910394265232975, "train_speed(iter/s)": 0.672818 }, { "epoch": 3.8308127329591706, "grad_norm": 6.145834445953369, "learning_rate": 1.2926482164719205e-05, "loss": 2.2782745361328125, "memory(GiB)": 72.85, "step": 89415, "token_acc": 0.5138461538461538, "train_speed(iter/s)": 0.672816 }, { "epoch": 3.8310269482884194, "grad_norm": 7.623732089996338, "learning_rate": 1.2921966919564005e-05, "loss": 1.9013607025146484, "memory(GiB)": 72.85, "step": 89420, "token_acc": 0.5531914893617021, "train_speed(iter/s)": 0.672802 }, { "epoch": 3.8312411636176686, "grad_norm": 5.25183629989624, "learning_rate": 1.2917452346112141e-05, "loss": 2.347105598449707, "memory(GiB)": 72.85, "step": 89425, "token_acc": 0.5278810408921933, "train_speed(iter/s)": 0.672798 }, { "epoch": 3.8314553789469175, "grad_norm": 5.099460124969482, "learning_rate": 1.2912938444445433e-05, "loss": 2.110134315490723, "memory(GiB)": 72.85, "step": 89430, "token_acc": 0.5086705202312138, "train_speed(iter/s)": 0.672793 }, { "epoch": 3.8316695942761663, "grad_norm": 9.247085571289062, "learning_rate": 1.2908425214645632e-05, "loss": 2.0684188842773437, "memory(GiB)": 72.85, "step": 89435, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.672798 }, { "epoch": 3.8318838096054155, "grad_norm": 7.820704460144043, "learning_rate": 1.2903912656794499e-05, "loss": 1.8706552505493164, "memory(GiB)": 72.85, "step": 89440, "token_acc": 0.5952380952380952, "train_speed(iter/s)": 0.672792 }, { "epoch": 3.8320980249346643, "grad_norm": 4.95768928527832, "learning_rate": 1.289940077097379e-05, "loss": 2.3526479721069338, "memory(GiB)": 72.85, "step": 89445, "token_acc": 0.5266666666666666, "train_speed(iter/s)": 0.672796 }, { "epoch": 3.832312240263913, "grad_norm": 6.597757816314697, "learning_rate": 1.2894889557265221e-05, "loss": 2.032852363586426, "memory(GiB)": 72.85, "step": 89450, "token_acc": 0.5142857142857142, "train_speed(iter/s)": 0.672807 }, { "epoch": 3.8325264555931624, "grad_norm": 4.969250202178955, "learning_rate": 1.289037901575056e-05, "loss": 2.1379266738891602, "memory(GiB)": 72.85, "step": 89455, "token_acc": 0.553125, "train_speed(iter/s)": 0.672811 }, { "epoch": 3.832740670922411, "grad_norm": 5.502128601074219, "learning_rate": 1.288586914651147e-05, "loss": 2.0327987670898438, "memory(GiB)": 72.85, "step": 89460, "token_acc": 0.5296610169491526, "train_speed(iter/s)": 0.672814 }, { "epoch": 3.83295488625166, "grad_norm": 9.547462463378906, "learning_rate": 1.2881359949629696e-05, "loss": 1.9786441802978516, "memory(GiB)": 72.85, "step": 89465, "token_acc": 0.5615615615615616, "train_speed(iter/s)": 0.672821 }, { "epoch": 3.8331691015809093, "grad_norm": 6.264106273651123, "learning_rate": 1.2876851425186904e-05, "loss": 2.119847297668457, "memory(GiB)": 72.85, "step": 89470, "token_acc": 0.5190839694656488, "train_speed(iter/s)": 0.672813 }, { "epoch": 3.833383316910158, "grad_norm": 6.028746128082275, "learning_rate": 1.2872343573264773e-05, "loss": 2.039164733886719, "memory(GiB)": 72.85, "step": 89475, "token_acc": 0.5656934306569343, "train_speed(iter/s)": 0.672814 }, { "epoch": 3.833597532239407, "grad_norm": 6.545751571655273, "learning_rate": 1.2867836393944953e-05, "loss": 1.942794418334961, "memory(GiB)": 72.85, "step": 89480, "token_acc": 0.5875486381322957, "train_speed(iter/s)": 0.672809 }, { "epoch": 3.833811747568656, "grad_norm": 4.664309501647949, "learning_rate": 1.2863329887309129e-05, "loss": 2.0888721466064455, "memory(GiB)": 72.85, "step": 89485, "token_acc": 0.5563380281690141, "train_speed(iter/s)": 0.672798 }, { "epoch": 3.834025962897905, "grad_norm": 6.229625701904297, "learning_rate": 1.285882405343891e-05, "loss": 2.2956092834472654, "memory(GiB)": 72.85, "step": 89490, "token_acc": 0.5460526315789473, "train_speed(iter/s)": 0.672794 }, { "epoch": 3.834240178227154, "grad_norm": 5.341671466827393, "learning_rate": 1.2854318892415945e-05, "loss": 2.289663887023926, "memory(GiB)": 72.85, "step": 89495, "token_acc": 0.5207667731629393, "train_speed(iter/s)": 0.672795 }, { "epoch": 3.834454393556403, "grad_norm": 7.547558784484863, "learning_rate": 1.284981440432183e-05, "loss": 1.9559928894042968, "memory(GiB)": 72.85, "step": 89500, "token_acc": 0.555984555984556, "train_speed(iter/s)": 0.672787 }, { "epoch": 3.834454393556403, "eval_loss": 2.05484676361084, "eval_runtime": 15.1511, "eval_samples_per_second": 6.6, "eval_steps_per_second": 6.6, "eval_token_acc": 0.48201438848920863, "step": 89500 }, { "epoch": 3.834668608885652, "grad_norm": 7.456129550933838, "learning_rate": 1.2845310589238185e-05, "loss": 2.118013381958008, "memory(GiB)": 72.85, "step": 89505, "token_acc": 0.4949596774193548, "train_speed(iter/s)": 0.672694 }, { "epoch": 3.8348828242149007, "grad_norm": 6.198980331420898, "learning_rate": 1.2840807447246573e-05, "loss": 1.8935604095458984, "memory(GiB)": 72.85, "step": 89510, "token_acc": 0.5268817204301075, "train_speed(iter/s)": 0.672684 }, { "epoch": 3.83509703954415, "grad_norm": 4.786324977874756, "learning_rate": 1.283630497842861e-05, "loss": 2.3120346069335938, "memory(GiB)": 72.85, "step": 89515, "token_acc": 0.5230769230769231, "train_speed(iter/s)": 0.672682 }, { "epoch": 3.8353112548733987, "grad_norm": 5.053291320800781, "learning_rate": 1.2831803182865843e-05, "loss": 2.0111536026000976, "memory(GiB)": 72.85, "step": 89520, "token_acc": 0.4963768115942029, "train_speed(iter/s)": 0.672679 }, { "epoch": 3.8355254702026476, "grad_norm": 6.024468898773193, "learning_rate": 1.282730206063983e-05, "loss": 2.077593231201172, "memory(GiB)": 72.85, "step": 89525, "token_acc": 0.5174825174825175, "train_speed(iter/s)": 0.672673 }, { "epoch": 3.835739685531897, "grad_norm": 5.099366188049316, "learning_rate": 1.2822801611832103e-05, "loss": 2.2695837020874023, "memory(GiB)": 72.85, "step": 89530, "token_acc": 0.48338368580060426, "train_speed(iter/s)": 0.672681 }, { "epoch": 3.8359539008611456, "grad_norm": 5.075733184814453, "learning_rate": 1.2818301836524194e-05, "loss": 1.9599884033203125, "memory(GiB)": 72.85, "step": 89535, "token_acc": 0.549645390070922, "train_speed(iter/s)": 0.672672 }, { "epoch": 3.8361681161903944, "grad_norm": 6.832911491394043, "learning_rate": 1.281380273479762e-05, "loss": 2.1329410552978514, "memory(GiB)": 72.85, "step": 89540, "token_acc": 0.49823321554770317, "train_speed(iter/s)": 0.672667 }, { "epoch": 3.8363823315196437, "grad_norm": 6.774927616119385, "learning_rate": 1.280930430673391e-05, "loss": 2.170965003967285, "memory(GiB)": 72.85, "step": 89545, "token_acc": 0.5374149659863946, "train_speed(iter/s)": 0.672675 }, { "epoch": 3.8365965468488925, "grad_norm": 6.089997291564941, "learning_rate": 1.2804806552414544e-05, "loss": 2.3850479125976562, "memory(GiB)": 72.85, "step": 89550, "token_acc": 0.45180722891566266, "train_speed(iter/s)": 0.672673 }, { "epoch": 3.8368107621781413, "grad_norm": 6.617410182952881, "learning_rate": 1.2800309471920996e-05, "loss": 1.8560831069946289, "memory(GiB)": 72.85, "step": 89555, "token_acc": 0.5714285714285714, "train_speed(iter/s)": 0.672676 }, { "epoch": 3.8370249775073906, "grad_norm": 5.0978193283081055, "learning_rate": 1.2795813065334739e-05, "loss": 2.148248863220215, "memory(GiB)": 72.85, "step": 89560, "token_acc": 0.5395894428152492, "train_speed(iter/s)": 0.672679 }, { "epoch": 3.8372391928366394, "grad_norm": 7.514171123504639, "learning_rate": 1.2791317332737229e-05, "loss": 2.2798860549926756, "memory(GiB)": 72.85, "step": 89565, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.672689 }, { "epoch": 3.837453408165888, "grad_norm": 5.393597602844238, "learning_rate": 1.2786822274209892e-05, "loss": 2.0358348846435548, "memory(GiB)": 72.85, "step": 89570, "token_acc": 0.5399239543726235, "train_speed(iter/s)": 0.672696 }, { "epoch": 3.8376676234951375, "grad_norm": 5.4940032958984375, "learning_rate": 1.2782327889834195e-05, "loss": 2.1387243270874023, "memory(GiB)": 72.85, "step": 89575, "token_acc": 0.5197368421052632, "train_speed(iter/s)": 0.672691 }, { "epoch": 3.8378818388243863, "grad_norm": 5.839317321777344, "learning_rate": 1.2777834179691544e-05, "loss": 2.4635236740112303, "memory(GiB)": 72.85, "step": 89580, "token_acc": 0.4811320754716981, "train_speed(iter/s)": 0.672692 }, { "epoch": 3.838096054153635, "grad_norm": 5.915650844573975, "learning_rate": 1.2773341143863343e-05, "loss": 1.9554885864257812, "memory(GiB)": 72.85, "step": 89585, "token_acc": 0.5394321766561514, "train_speed(iter/s)": 0.672689 }, { "epoch": 3.8383102694828843, "grad_norm": 6.985197067260742, "learning_rate": 1.2768848782430986e-05, "loss": 2.094300079345703, "memory(GiB)": 72.85, "step": 89590, "token_acc": 0.5527950310559007, "train_speed(iter/s)": 0.672689 }, { "epoch": 3.838524484812133, "grad_norm": 7.440850734710693, "learning_rate": 1.2764357095475843e-05, "loss": 1.9498083114624023, "memory(GiB)": 72.85, "step": 89595, "token_acc": 0.5640138408304498, "train_speed(iter/s)": 0.672693 }, { "epoch": 3.838738700141382, "grad_norm": 6.072738170623779, "learning_rate": 1.2759866083079319e-05, "loss": 1.97921142578125, "memory(GiB)": 72.85, "step": 89600, "token_acc": 0.5296442687747036, "train_speed(iter/s)": 0.672693 }, { "epoch": 3.838952915470631, "grad_norm": 5.609594345092773, "learning_rate": 1.2755375745322755e-05, "loss": 2.186635398864746, "memory(GiB)": 72.85, "step": 89605, "token_acc": 0.4983164983164983, "train_speed(iter/s)": 0.672693 }, { "epoch": 3.83916713079988, "grad_norm": 6.118424415588379, "learning_rate": 1.2750886082287482e-05, "loss": 2.0944845199584963, "memory(GiB)": 72.85, "step": 89610, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.672698 }, { "epoch": 3.839381346129129, "grad_norm": 5.17432975769043, "learning_rate": 1.2746397094054868e-05, "loss": 2.4349496841430662, "memory(GiB)": 72.85, "step": 89615, "token_acc": 0.486404833836858, "train_speed(iter/s)": 0.6727 }, { "epoch": 3.839595561458378, "grad_norm": 4.788812637329102, "learning_rate": 1.2741908780706213e-05, "loss": 2.0968833923339845, "memory(GiB)": 72.85, "step": 89620, "token_acc": 0.5256410256410257, "train_speed(iter/s)": 0.672704 }, { "epoch": 3.839809776787627, "grad_norm": 6.897314071655273, "learning_rate": 1.273742114232282e-05, "loss": 2.1416040420532227, "memory(GiB)": 72.85, "step": 89625, "token_acc": 0.5340136054421769, "train_speed(iter/s)": 0.6727 }, { "epoch": 3.8400239921168757, "grad_norm": 6.226212501525879, "learning_rate": 1.2732934178986011e-05, "loss": 2.0507013320922853, "memory(GiB)": 72.85, "step": 89630, "token_acc": 0.564935064935065, "train_speed(iter/s)": 0.672696 }, { "epoch": 3.840238207446125, "grad_norm": 5.118808269500732, "learning_rate": 1.2728447890777056e-05, "loss": 2.1584039688110352, "memory(GiB)": 72.85, "step": 89635, "token_acc": 0.5342960288808665, "train_speed(iter/s)": 0.672703 }, { "epoch": 3.840452422775374, "grad_norm": 4.440250396728516, "learning_rate": 1.2723962277777235e-05, "loss": 2.2626825332641602, "memory(GiB)": 72.85, "step": 89640, "token_acc": 0.49557522123893805, "train_speed(iter/s)": 0.672708 }, { "epoch": 3.8406666381046226, "grad_norm": 5.057619094848633, "learning_rate": 1.2719477340067804e-05, "loss": 1.9515228271484375, "memory(GiB)": 72.85, "step": 89645, "token_acc": 0.5597014925373134, "train_speed(iter/s)": 0.672713 }, { "epoch": 3.840880853433872, "grad_norm": 8.384790420532227, "learning_rate": 1.2714993077730015e-05, "loss": 2.2771955490112306, "memory(GiB)": 72.85, "step": 89650, "token_acc": 0.4790996784565916, "train_speed(iter/s)": 0.672711 }, { "epoch": 3.8410950687631207, "grad_norm": 5.952513694763184, "learning_rate": 1.2710509490845079e-05, "loss": 2.2391403198242186, "memory(GiB)": 72.85, "step": 89655, "token_acc": 0.5072463768115942, "train_speed(iter/s)": 0.672716 }, { "epoch": 3.8413092840923695, "grad_norm": 6.593381881713867, "learning_rate": 1.2706026579494268e-05, "loss": 2.0025955200195313, "memory(GiB)": 72.85, "step": 89660, "token_acc": 0.5617021276595745, "train_speed(iter/s)": 0.672711 }, { "epoch": 3.8415234994216187, "grad_norm": 4.713565826416016, "learning_rate": 1.270154434375877e-05, "loss": 2.1986209869384767, "memory(GiB)": 72.85, "step": 89665, "token_acc": 0.5074626865671642, "train_speed(iter/s)": 0.672713 }, { "epoch": 3.8417377147508676, "grad_norm": 8.089766502380371, "learning_rate": 1.2697062783719782e-05, "loss": 1.9758571624755858, "memory(GiB)": 72.85, "step": 89670, "token_acc": 0.568561872909699, "train_speed(iter/s)": 0.672716 }, { "epoch": 3.8419519300801164, "grad_norm": 6.157087802886963, "learning_rate": 1.2692581899458494e-05, "loss": 2.1822208404541015, "memory(GiB)": 72.85, "step": 89675, "token_acc": 0.5335463258785943, "train_speed(iter/s)": 0.672714 }, { "epoch": 3.8421661454093656, "grad_norm": 7.102477550506592, "learning_rate": 1.2688101691056064e-05, "loss": 2.0897802352905273, "memory(GiB)": 72.85, "step": 89680, "token_acc": 0.543046357615894, "train_speed(iter/s)": 0.672718 }, { "epoch": 3.8423803607386144, "grad_norm": 4.3775811195373535, "learning_rate": 1.2683622158593684e-05, "loss": 2.0417314529418946, "memory(GiB)": 72.85, "step": 89685, "token_acc": 0.5886524822695035, "train_speed(iter/s)": 0.672718 }, { "epoch": 3.8425945760678633, "grad_norm": 5.050898551940918, "learning_rate": 1.2679143302152502e-05, "loss": 2.0111501693725584, "memory(GiB)": 72.85, "step": 89690, "token_acc": 0.5534351145038168, "train_speed(iter/s)": 0.672714 }, { "epoch": 3.8428087913971125, "grad_norm": 4.5942230224609375, "learning_rate": 1.2674665121813645e-05, "loss": 2.029072380065918, "memory(GiB)": 72.85, "step": 89695, "token_acc": 0.5313531353135313, "train_speed(iter/s)": 0.672709 }, { "epoch": 3.8430230067263613, "grad_norm": 6.282269477844238, "learning_rate": 1.2670187617658247e-05, "loss": 2.0149505615234373, "memory(GiB)": 72.85, "step": 89700, "token_acc": 0.5563636363636364, "train_speed(iter/s)": 0.672704 }, { "epoch": 3.84323722205561, "grad_norm": 6.2923407554626465, "learning_rate": 1.2665710789767422e-05, "loss": 2.056003952026367, "memory(GiB)": 72.85, "step": 89705, "token_acc": 0.5684210526315789, "train_speed(iter/s)": 0.67271 }, { "epoch": 3.8434514373848594, "grad_norm": 5.879652976989746, "learning_rate": 1.2661234638222253e-05, "loss": 2.4759292602539062, "memory(GiB)": 72.85, "step": 89710, "token_acc": 0.47101449275362317, "train_speed(iter/s)": 0.672714 }, { "epoch": 3.843665652714108, "grad_norm": 5.856530666351318, "learning_rate": 1.2656759163103855e-05, "loss": 2.1397756576538085, "memory(GiB)": 72.85, "step": 89715, "token_acc": 0.5324675324675324, "train_speed(iter/s)": 0.672717 }, { "epoch": 3.843879868043357, "grad_norm": 6.172656059265137, "learning_rate": 1.26522843644933e-05, "loss": 1.9318834304809571, "memory(GiB)": 72.85, "step": 89720, "token_acc": 0.5477941176470589, "train_speed(iter/s)": 0.672709 }, { "epoch": 3.8440940833726063, "grad_norm": 5.551095008850098, "learning_rate": 1.2647810242471647e-05, "loss": 2.323555755615234, "memory(GiB)": 72.85, "step": 89725, "token_acc": 0.516320474777448, "train_speed(iter/s)": 0.672713 }, { "epoch": 3.844308298701855, "grad_norm": 5.429028511047363, "learning_rate": 1.2643336797119948e-05, "loss": 1.8842267990112305, "memory(GiB)": 72.85, "step": 89730, "token_acc": 0.6013745704467354, "train_speed(iter/s)": 0.672714 }, { "epoch": 3.8445225140311043, "grad_norm": 4.785919666290283, "learning_rate": 1.2638864028519249e-05, "loss": 2.1650678634643556, "memory(GiB)": 72.85, "step": 89735, "token_acc": 0.5661538461538461, "train_speed(iter/s)": 0.672712 }, { "epoch": 3.844736729360353, "grad_norm": 6.879296779632568, "learning_rate": 1.2634391936750561e-05, "loss": 2.2228633880615236, "memory(GiB)": 72.85, "step": 89740, "token_acc": 0.5379537953795379, "train_speed(iter/s)": 0.672712 }, { "epoch": 3.844950944689602, "grad_norm": 6.769728660583496, "learning_rate": 1.2629920521894928e-05, "loss": 2.075226974487305, "memory(GiB)": 72.85, "step": 89745, "token_acc": 0.4937888198757764, "train_speed(iter/s)": 0.672714 }, { "epoch": 3.8451651600188512, "grad_norm": 5.56279993057251, "learning_rate": 1.2625449784033344e-05, "loss": 1.8111194610595702, "memory(GiB)": 72.85, "step": 89750, "token_acc": 0.6007067137809188, "train_speed(iter/s)": 0.672704 }, { "epoch": 3.8453793753481, "grad_norm": 5.87615442276001, "learning_rate": 1.2620979723246784e-05, "loss": 1.9939256668090821, "memory(GiB)": 72.85, "step": 89755, "token_acc": 0.6, "train_speed(iter/s)": 0.672705 }, { "epoch": 3.845593590677349, "grad_norm": 5.602572917938232, "learning_rate": 1.261651033961625e-05, "loss": 1.8073955535888673, "memory(GiB)": 72.85, "step": 89760, "token_acc": 0.576271186440678, "train_speed(iter/s)": 0.672692 }, { "epoch": 3.845807806006598, "grad_norm": 5.8192901611328125, "learning_rate": 1.2612041633222705e-05, "loss": 2.220045471191406, "memory(GiB)": 72.85, "step": 89765, "token_acc": 0.5223880597014925, "train_speed(iter/s)": 0.672697 }, { "epoch": 3.846022021335847, "grad_norm": 5.50053596496582, "learning_rate": 1.2607573604147077e-05, "loss": 2.2293817520141603, "memory(GiB)": 72.85, "step": 89770, "token_acc": 0.5187713310580204, "train_speed(iter/s)": 0.672694 }, { "epoch": 3.8462362366650957, "grad_norm": 5.562912464141846, "learning_rate": 1.2603106252470348e-05, "loss": 2.0934471130371093, "memory(GiB)": 72.85, "step": 89775, "token_acc": 0.575091575091575, "train_speed(iter/s)": 0.672702 }, { "epoch": 3.846450451994345, "grad_norm": 5.261343002319336, "learning_rate": 1.259863957827343e-05, "loss": 2.0637237548828127, "memory(GiB)": 72.85, "step": 89780, "token_acc": 0.531496062992126, "train_speed(iter/s)": 0.672704 }, { "epoch": 3.846664667323594, "grad_norm": 6.3972859382629395, "learning_rate": 1.2594173581637242e-05, "loss": 2.4889781951904295, "memory(GiB)": 72.85, "step": 89785, "token_acc": 0.4909090909090909, "train_speed(iter/s)": 0.672708 }, { "epoch": 3.8468788826528426, "grad_norm": 4.886534690856934, "learning_rate": 1.2589708262642692e-05, "loss": 1.990995216369629, "memory(GiB)": 72.85, "step": 89790, "token_acc": 0.5501618122977346, "train_speed(iter/s)": 0.67271 }, { "epoch": 3.847093097982092, "grad_norm": 5.793076992034912, "learning_rate": 1.2585243621370668e-05, "loss": 1.9178043365478517, "memory(GiB)": 72.85, "step": 89795, "token_acc": 0.5674740484429066, "train_speed(iter/s)": 0.67272 }, { "epoch": 3.8473073133113407, "grad_norm": 9.26305866241455, "learning_rate": 1.2580779657902036e-05, "loss": 2.0458961486816407, "memory(GiB)": 72.85, "step": 89800, "token_acc": 0.5103734439834025, "train_speed(iter/s)": 0.672724 }, { "epoch": 3.8475215286405895, "grad_norm": 4.712730884552002, "learning_rate": 1.2576316372317703e-05, "loss": 2.3290178298950197, "memory(GiB)": 72.85, "step": 89805, "token_acc": 0.4681528662420382, "train_speed(iter/s)": 0.672726 }, { "epoch": 3.8477357439698388, "grad_norm": 5.464509963989258, "learning_rate": 1.2571853764698504e-05, "loss": 2.0981348037719725, "memory(GiB)": 72.85, "step": 89810, "token_acc": 0.5109034267912772, "train_speed(iter/s)": 0.672724 }, { "epoch": 3.8479499592990876, "grad_norm": 8.103699684143066, "learning_rate": 1.256739183512528e-05, "loss": 1.9758338928222656, "memory(GiB)": 72.85, "step": 89815, "token_acc": 0.5451388888888888, "train_speed(iter/s)": 0.672715 }, { "epoch": 3.8481641746283364, "grad_norm": 5.800901889801025, "learning_rate": 1.2562930583678872e-05, "loss": 2.196421432495117, "memory(GiB)": 72.85, "step": 89820, "token_acc": 0.5103448275862069, "train_speed(iter/s)": 0.67272 }, { "epoch": 3.8483783899575856, "grad_norm": 6.1401262283325195, "learning_rate": 1.2558470010440077e-05, "loss": 2.081739807128906, "memory(GiB)": 72.85, "step": 89825, "token_acc": 0.5399361022364217, "train_speed(iter/s)": 0.67272 }, { "epoch": 3.8485926052868344, "grad_norm": 6.514916896820068, "learning_rate": 1.2554010115489723e-05, "loss": 2.0526561737060547, "memory(GiB)": 72.85, "step": 89830, "token_acc": 0.5625, "train_speed(iter/s)": 0.672722 }, { "epoch": 3.8488068206160833, "grad_norm": 3.9625744819641113, "learning_rate": 1.2549550898908619e-05, "loss": 2.1480009078979494, "memory(GiB)": 72.85, "step": 89835, "token_acc": 0.5668016194331984, "train_speed(iter/s)": 0.672722 }, { "epoch": 3.8490210359453325, "grad_norm": 7.6608099937438965, "learning_rate": 1.2545092360777527e-05, "loss": 2.4277647018432615, "memory(GiB)": 72.85, "step": 89840, "token_acc": 0.5102040816326531, "train_speed(iter/s)": 0.67272 }, { "epoch": 3.8492352512745813, "grad_norm": 6.739991188049316, "learning_rate": 1.254063450117723e-05, "loss": 1.8051467895507813, "memory(GiB)": 72.85, "step": 89845, "token_acc": 0.5850622406639004, "train_speed(iter/s)": 0.67272 }, { "epoch": 3.84944946660383, "grad_norm": 7.441308975219727, "learning_rate": 1.2536177320188475e-05, "loss": 2.4748041152954103, "memory(GiB)": 72.85, "step": 89850, "token_acc": 0.5098684210526315, "train_speed(iter/s)": 0.67272 }, { "epoch": 3.8496636819330794, "grad_norm": 5.2652130126953125, "learning_rate": 1.2531720817891995e-05, "loss": 1.9139509201049805, "memory(GiB)": 72.85, "step": 89855, "token_acc": 0.5682656826568265, "train_speed(iter/s)": 0.672723 }, { "epoch": 3.849877897262328, "grad_norm": 6.235360145568848, "learning_rate": 1.2527264994368554e-05, "loss": 1.9397539138793944, "memory(GiB)": 72.85, "step": 89860, "token_acc": 0.5530546623794212, "train_speed(iter/s)": 0.672714 }, { "epoch": 3.850092112591577, "grad_norm": 4.656686782836914, "learning_rate": 1.2522809849698863e-05, "loss": 1.775437355041504, "memory(GiB)": 72.85, "step": 89865, "token_acc": 0.5783582089552238, "train_speed(iter/s)": 0.672708 }, { "epoch": 3.8503063279208263, "grad_norm": 5.848061561584473, "learning_rate": 1.2518355383963625e-05, "loss": 1.981974220275879, "memory(GiB)": 72.85, "step": 89870, "token_acc": 0.5463576158940397, "train_speed(iter/s)": 0.672706 }, { "epoch": 3.850520543250075, "grad_norm": 6.158498764038086, "learning_rate": 1.2513901597243544e-05, "loss": 1.9696975708007813, "memory(GiB)": 72.85, "step": 89875, "token_acc": 0.575107296137339, "train_speed(iter/s)": 0.672708 }, { "epoch": 3.850734758579324, "grad_norm": 5.70729398727417, "learning_rate": 1.2509448489619301e-05, "loss": 2.0353174209594727, "memory(GiB)": 72.85, "step": 89880, "token_acc": 0.5487012987012987, "train_speed(iter/s)": 0.672707 }, { "epoch": 3.850948973908573, "grad_norm": 6.8465752601623535, "learning_rate": 1.2504996061171548e-05, "loss": 1.9780044555664062, "memory(GiB)": 72.85, "step": 89885, "token_acc": 0.572463768115942, "train_speed(iter/s)": 0.672714 }, { "epoch": 3.851163189237822, "grad_norm": 5.28422212600708, "learning_rate": 1.2500544311980983e-05, "loss": 2.1047893524169923, "memory(GiB)": 72.85, "step": 89890, "token_acc": 0.5248447204968945, "train_speed(iter/s)": 0.672702 }, { "epoch": 3.851377404567071, "grad_norm": 4.2230916023254395, "learning_rate": 1.2496093242128232e-05, "loss": 2.2117815017700195, "memory(GiB)": 72.85, "step": 89895, "token_acc": 0.53, "train_speed(iter/s)": 0.672705 }, { "epoch": 3.85159161989632, "grad_norm": 5.097679615020752, "learning_rate": 1.2491642851693935e-05, "loss": 2.247611427307129, "memory(GiB)": 72.85, "step": 89900, "token_acc": 0.5992366412213741, "train_speed(iter/s)": 0.672707 }, { "epoch": 3.851805835225569, "grad_norm": 5.18873929977417, "learning_rate": 1.2487193140758702e-05, "loss": 1.9322406768798828, "memory(GiB)": 72.85, "step": 89905, "token_acc": 0.5723684210526315, "train_speed(iter/s)": 0.67271 }, { "epoch": 3.8520200505548177, "grad_norm": 6.291836738586426, "learning_rate": 1.2482744109403165e-05, "loss": 2.149239730834961, "memory(GiB)": 72.85, "step": 89910, "token_acc": 0.5315068493150685, "train_speed(iter/s)": 0.672713 }, { "epoch": 3.852234265884067, "grad_norm": 6.589292526245117, "learning_rate": 1.2478295757707904e-05, "loss": 1.821843719482422, "memory(GiB)": 72.85, "step": 89915, "token_acc": 0.6182572614107884, "train_speed(iter/s)": 0.672703 }, { "epoch": 3.8524484812133157, "grad_norm": 6.898810386657715, "learning_rate": 1.247384808575352e-05, "loss": 2.597695159912109, "memory(GiB)": 72.85, "step": 89920, "token_acc": 0.4812286689419795, "train_speed(iter/s)": 0.672703 }, { "epoch": 3.8526626965425645, "grad_norm": 4.795462131500244, "learning_rate": 1.2469401093620586e-05, "loss": 2.047543525695801, "memory(GiB)": 72.85, "step": 89925, "token_acc": 0.4927007299270073, "train_speed(iter/s)": 0.672703 }, { "epoch": 3.852876911871814, "grad_norm": 11.562239646911621, "learning_rate": 1.2464954781389653e-05, "loss": 2.186212158203125, "memory(GiB)": 72.85, "step": 89930, "token_acc": 0.5161290322580645, "train_speed(iter/s)": 0.672703 }, { "epoch": 3.8530911272010626, "grad_norm": 9.171093940734863, "learning_rate": 1.246050914914128e-05, "loss": 2.165226364135742, "memory(GiB)": 72.85, "step": 89935, "token_acc": 0.5655172413793104, "train_speed(iter/s)": 0.672702 }, { "epoch": 3.8533053425303114, "grad_norm": 5.540653228759766, "learning_rate": 1.2456064196955996e-05, "loss": 1.9928186416625977, "memory(GiB)": 72.85, "step": 89940, "token_acc": 0.545774647887324, "train_speed(iter/s)": 0.672708 }, { "epoch": 3.8535195578595607, "grad_norm": 5.921386241912842, "learning_rate": 1.2451619924914315e-05, "loss": 1.8882150650024414, "memory(GiB)": 72.85, "step": 89945, "token_acc": 0.6156716417910447, "train_speed(iter/s)": 0.672708 }, { "epoch": 3.8537337731888095, "grad_norm": 5.7281084060668945, "learning_rate": 1.2447176333096777e-05, "loss": 1.9633119583129883, "memory(GiB)": 72.85, "step": 89950, "token_acc": 0.556, "train_speed(iter/s)": 0.672713 }, { "epoch": 3.8539479885180583, "grad_norm": 5.280021667480469, "learning_rate": 1.2442733421583864e-05, "loss": 2.228934097290039, "memory(GiB)": 72.85, "step": 89955, "token_acc": 0.4857142857142857, "train_speed(iter/s)": 0.672715 }, { "epoch": 3.8541622038473076, "grad_norm": 6.596508026123047, "learning_rate": 1.243829119045607e-05, "loss": 2.1452896118164064, "memory(GiB)": 72.85, "step": 89960, "token_acc": 0.5296052631578947, "train_speed(iter/s)": 0.672713 }, { "epoch": 3.8543764191765564, "grad_norm": 7.081056118011475, "learning_rate": 1.2433849639793865e-05, "loss": 2.0242536544799803, "memory(GiB)": 72.85, "step": 89965, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.672716 }, { "epoch": 3.854590634505805, "grad_norm": 5.819374084472656, "learning_rate": 1.2429408769677697e-05, "loss": 2.1949623107910154, "memory(GiB)": 72.85, "step": 89970, "token_acc": 0.5178571428571429, "train_speed(iter/s)": 0.672715 }, { "epoch": 3.8548048498350544, "grad_norm": 6.855014801025391, "learning_rate": 1.2424968580188051e-05, "loss": 2.1372552871704102, "memory(GiB)": 72.85, "step": 89975, "token_acc": 0.5227963525835866, "train_speed(iter/s)": 0.672726 }, { "epoch": 3.8550190651643033, "grad_norm": 6.843245506286621, "learning_rate": 1.2420529071405335e-05, "loss": 2.046504592895508, "memory(GiB)": 72.85, "step": 89980, "token_acc": 0.5522875816993464, "train_speed(iter/s)": 0.672723 }, { "epoch": 3.855233280493552, "grad_norm": 6.066211223602295, "learning_rate": 1.241609024341e-05, "loss": 2.004379081726074, "memory(GiB)": 72.85, "step": 89985, "token_acc": 0.5210355987055016, "train_speed(iter/s)": 0.672727 }, { "epoch": 3.8554474958228013, "grad_norm": 5.206275463104248, "learning_rate": 1.2411652096282444e-05, "loss": 2.098140335083008, "memory(GiB)": 72.85, "step": 89990, "token_acc": 0.5659163987138264, "train_speed(iter/s)": 0.672724 }, { "epoch": 3.85566171115205, "grad_norm": 8.278709411621094, "learning_rate": 1.240721463010307e-05, "loss": 2.1863611221313475, "memory(GiB)": 72.85, "step": 89995, "token_acc": 0.5395189003436426, "train_speed(iter/s)": 0.672722 }, { "epoch": 3.855875926481299, "grad_norm": 5.99104642868042, "learning_rate": 1.2402777844952256e-05, "loss": 1.9005910873413085, "memory(GiB)": 72.85, "step": 90000, "token_acc": 0.5748987854251012, "train_speed(iter/s)": 0.672727 }, { "epoch": 3.855875926481299, "eval_loss": 1.9691139459609985, "eval_runtime": 14.8665, "eval_samples_per_second": 6.727, "eval_steps_per_second": 6.727, "eval_token_acc": 0.5124183006535947, "step": 90000 }, { "epoch": 3.856090141810548, "grad_norm": 6.8634867668151855, "learning_rate": 1.2398341740910407e-05, "loss": 1.9868221282958984, "memory(GiB)": 72.85, "step": 90005, "token_acc": 0.5182341650671785, "train_speed(iter/s)": 0.672641 }, { "epoch": 3.856304357139797, "grad_norm": 6.051738262176514, "learning_rate": 1.2393906318057868e-05, "loss": 2.1429941177368166, "memory(GiB)": 72.85, "step": 90010, "token_acc": 0.5568181818181818, "train_speed(iter/s)": 0.672646 }, { "epoch": 3.856518572469046, "grad_norm": 5.5678486824035645, "learning_rate": 1.2389471576474992e-05, "loss": 1.9703353881835937, "memory(GiB)": 72.85, "step": 90015, "token_acc": 0.571875, "train_speed(iter/s)": 0.672642 }, { "epoch": 3.856732787798295, "grad_norm": 6.302164077758789, "learning_rate": 1.2385037516242126e-05, "loss": 1.8635730743408203, "memory(GiB)": 72.85, "step": 90020, "token_acc": 0.5543071161048689, "train_speed(iter/s)": 0.672649 }, { "epoch": 3.856947003127544, "grad_norm": 7.020616054534912, "learning_rate": 1.2380604137439589e-05, "loss": 1.986008644104004, "memory(GiB)": 72.85, "step": 90025, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.672656 }, { "epoch": 3.8571612184567927, "grad_norm": 5.466887474060059, "learning_rate": 1.2376171440147677e-05, "loss": 1.9189722061157226, "memory(GiB)": 72.85, "step": 90030, "token_acc": 0.5326460481099656, "train_speed(iter/s)": 0.67266 }, { "epoch": 3.857375433786042, "grad_norm": 8.083952903747559, "learning_rate": 1.2371739424446731e-05, "loss": 2.1188344955444336, "memory(GiB)": 72.85, "step": 90035, "token_acc": 0.5427509293680297, "train_speed(iter/s)": 0.672654 }, { "epoch": 3.857589649115291, "grad_norm": 6.868081569671631, "learning_rate": 1.236730809041703e-05, "loss": 2.4156448364257814, "memory(GiB)": 72.85, "step": 90040, "token_acc": 0.4668769716088328, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.8578038644445396, "grad_norm": 7.144318580627441, "learning_rate": 1.2362877438138838e-05, "loss": 2.147630310058594, "memory(GiB)": 72.85, "step": 90045, "token_acc": 0.5652173913043478, "train_speed(iter/s)": 0.672658 }, { "epoch": 3.858018079773789, "grad_norm": 5.234299659729004, "learning_rate": 1.2358447467692413e-05, "loss": 2.0763761520385744, "memory(GiB)": 72.85, "step": 90050, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.672652 }, { "epoch": 3.8582322951030377, "grad_norm": 5.971590042114258, "learning_rate": 1.235401817915804e-05, "loss": 2.1029384613037108, "memory(GiB)": 72.85, "step": 90055, "token_acc": 0.5257731958762887, "train_speed(iter/s)": 0.672656 }, { "epoch": 3.8584465104322865, "grad_norm": 5.2332305908203125, "learning_rate": 1.2349589572615927e-05, "loss": 2.569235992431641, "memory(GiB)": 72.85, "step": 90060, "token_acc": 0.4854368932038835, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.8586607257615357, "grad_norm": 6.707629680633545, "learning_rate": 1.2345161648146337e-05, "loss": 2.2080806732177733, "memory(GiB)": 72.85, "step": 90065, "token_acc": 0.52, "train_speed(iter/s)": 0.672657 }, { "epoch": 3.8588749410907845, "grad_norm": 6.038315296173096, "learning_rate": 1.2340734405829463e-05, "loss": 2.0761474609375, "memory(GiB)": 72.85, "step": 90070, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.672664 }, { "epoch": 3.8590891564200334, "grad_norm": 5.765809535980225, "learning_rate": 1.233630784574551e-05, "loss": 2.135977935791016, "memory(GiB)": 72.85, "step": 90075, "token_acc": 0.5335689045936396, "train_speed(iter/s)": 0.67266 }, { "epoch": 3.8593033717492826, "grad_norm": 4.9621076583862305, "learning_rate": 1.2331881967974674e-05, "loss": 2.1196256637573243, "memory(GiB)": 72.85, "step": 90080, "token_acc": 0.5364238410596026, "train_speed(iter/s)": 0.672654 }, { "epoch": 3.8595175870785314, "grad_norm": 6.959714889526367, "learning_rate": 1.232745677259713e-05, "loss": 1.941095733642578, "memory(GiB)": 72.85, "step": 90085, "token_acc": 0.5632183908045977, "train_speed(iter/s)": 0.672661 }, { "epoch": 3.8597318024077802, "grad_norm": 6.182060718536377, "learning_rate": 1.2323032259693035e-05, "loss": 2.108260726928711, "memory(GiB)": 72.85, "step": 90090, "token_acc": 0.5301587301587302, "train_speed(iter/s)": 0.672667 }, { "epoch": 3.8599460177370295, "grad_norm": 8.02723217010498, "learning_rate": 1.2318608429342566e-05, "loss": 1.8538833618164063, "memory(GiB)": 72.85, "step": 90095, "token_acc": 0.5658362989323843, "train_speed(iter/s)": 0.672664 }, { "epoch": 3.8601602330662783, "grad_norm": 5.100944519042969, "learning_rate": 1.2314185281625857e-05, "loss": 2.1509000778198244, "memory(GiB)": 72.85, "step": 90100, "token_acc": 0.5265151515151515, "train_speed(iter/s)": 0.67266 }, { "epoch": 3.860374448395527, "grad_norm": 4.368150234222412, "learning_rate": 1.2309762816623028e-05, "loss": 2.114332389831543, "memory(GiB)": 72.85, "step": 90105, "token_acc": 0.5350877192982456, "train_speed(iter/s)": 0.672655 }, { "epoch": 3.8605886637247764, "grad_norm": 5.945515155792236, "learning_rate": 1.2305341034414208e-05, "loss": 2.147329330444336, "memory(GiB)": 72.85, "step": 90110, "token_acc": 0.5401929260450161, "train_speed(iter/s)": 0.672656 }, { "epoch": 3.860802879054025, "grad_norm": 6.762919902801514, "learning_rate": 1.2300919935079474e-05, "loss": 1.9611686706542968, "memory(GiB)": 72.85, "step": 90115, "token_acc": 0.5526315789473685, "train_speed(iter/s)": 0.67266 }, { "epoch": 3.861017094383274, "grad_norm": 6.058197975158691, "learning_rate": 1.2296499518698962e-05, "loss": 1.9025789260864259, "memory(GiB)": 72.85, "step": 90120, "token_acc": 0.5618374558303887, "train_speed(iter/s)": 0.672661 }, { "epoch": 3.8612313097125233, "grad_norm": 5.9174580574035645, "learning_rate": 1.229207978535271e-05, "loss": 2.0148571014404295, "memory(GiB)": 72.85, "step": 90125, "token_acc": 0.5331010452961672, "train_speed(iter/s)": 0.672663 }, { "epoch": 3.861445525041772, "grad_norm": 4.827971935272217, "learning_rate": 1.2287660735120826e-05, "loss": 2.2333053588867187, "memory(GiB)": 72.85, "step": 90130, "token_acc": 0.5284280936454849, "train_speed(iter/s)": 0.672663 }, { "epoch": 3.861659740371021, "grad_norm": 5.409884929656982, "learning_rate": 1.2283242368083342e-05, "loss": 1.9886474609375, "memory(GiB)": 72.85, "step": 90135, "token_acc": 0.560126582278481, "train_speed(iter/s)": 0.672672 }, { "epoch": 3.86187395570027, "grad_norm": 6.755555152893066, "learning_rate": 1.22788246843203e-05, "loss": 2.0619754791259766, "memory(GiB)": 72.85, "step": 90140, "token_acc": 0.575091575091575, "train_speed(iter/s)": 0.672676 }, { "epoch": 3.862088171029519, "grad_norm": 7.161450386047363, "learning_rate": 1.227440768391172e-05, "loss": 2.4008485794067385, "memory(GiB)": 72.85, "step": 90145, "token_acc": 0.5152439024390244, "train_speed(iter/s)": 0.672681 }, { "epoch": 3.8623023863587678, "grad_norm": 5.1806840896606445, "learning_rate": 1.2269991366937656e-05, "loss": 2.163471984863281, "memory(GiB)": 72.85, "step": 90150, "token_acc": 0.5431654676258992, "train_speed(iter/s)": 0.672677 }, { "epoch": 3.862516601688017, "grad_norm": 6.633518218994141, "learning_rate": 1.2265575733478086e-05, "loss": 2.246958923339844, "memory(GiB)": 72.85, "step": 90155, "token_acc": 0.5155709342560554, "train_speed(iter/s)": 0.672675 }, { "epoch": 3.862730817017266, "grad_norm": 6.179605484008789, "learning_rate": 1.2261160783613013e-05, "loss": 2.08200626373291, "memory(GiB)": 72.85, "step": 90160, "token_acc": 0.5310077519379846, "train_speed(iter/s)": 0.672687 }, { "epoch": 3.8629450323465147, "grad_norm": 6.459651947021484, "learning_rate": 1.2256746517422407e-05, "loss": 2.0615255355834963, "memory(GiB)": 72.85, "step": 90165, "token_acc": 0.539568345323741, "train_speed(iter/s)": 0.67269 }, { "epoch": 3.863159247675764, "grad_norm": 6.972684860229492, "learning_rate": 1.2252332934986249e-05, "loss": 2.284014892578125, "memory(GiB)": 72.85, "step": 90170, "token_acc": 0.5096153846153846, "train_speed(iter/s)": 0.672697 }, { "epoch": 3.8633734630050127, "grad_norm": 4.553611755371094, "learning_rate": 1.224792003638448e-05, "loss": 2.103720474243164, "memory(GiB)": 72.85, "step": 90175, "token_acc": 0.54421768707483, "train_speed(iter/s)": 0.672702 }, { "epoch": 3.8635876783342615, "grad_norm": 4.969642639160156, "learning_rate": 1.2243507821697065e-05, "loss": 1.8579782485961913, "memory(GiB)": 72.85, "step": 90180, "token_acc": 0.5518518518518518, "train_speed(iter/s)": 0.672705 }, { "epoch": 3.863801893663511, "grad_norm": 4.739274024963379, "learning_rate": 1.223909629100392e-05, "loss": 2.250068473815918, "memory(GiB)": 72.85, "step": 90185, "token_acc": 0.5288135593220339, "train_speed(iter/s)": 0.672703 }, { "epoch": 3.8640161089927596, "grad_norm": 4.884121894836426, "learning_rate": 1.2234685444384969e-05, "loss": 2.068745422363281, "memory(GiB)": 72.85, "step": 90190, "token_acc": 0.5401459854014599, "train_speed(iter/s)": 0.672703 }, { "epoch": 3.8642303243220084, "grad_norm": 5.131908416748047, "learning_rate": 1.2230275281920123e-05, "loss": 2.335016441345215, "memory(GiB)": 72.85, "step": 90195, "token_acc": 0.5254237288135594, "train_speed(iter/s)": 0.672709 }, { "epoch": 3.8644445396512577, "grad_norm": 4.91698694229126, "learning_rate": 1.2225865803689252e-05, "loss": 2.113412857055664, "memory(GiB)": 72.85, "step": 90200, "token_acc": 0.5257352941176471, "train_speed(iter/s)": 0.672716 }, { "epoch": 3.8646587549805065, "grad_norm": 7.227598190307617, "learning_rate": 1.2221457009772259e-05, "loss": 2.2312788009643554, "memory(GiB)": 72.85, "step": 90205, "token_acc": 0.541958041958042, "train_speed(iter/s)": 0.672717 }, { "epoch": 3.8648729703097553, "grad_norm": 6.72685432434082, "learning_rate": 1.2217048900249029e-05, "loss": 2.3606597900390627, "memory(GiB)": 72.85, "step": 90210, "token_acc": 0.4984709480122324, "train_speed(iter/s)": 0.672714 }, { "epoch": 3.8650871856390046, "grad_norm": 7.2723002433776855, "learning_rate": 1.2212641475199393e-05, "loss": 1.9190418243408203, "memory(GiB)": 72.85, "step": 90215, "token_acc": 0.5748987854251012, "train_speed(iter/s)": 0.67271 }, { "epoch": 3.8653014009682534, "grad_norm": 4.791959285736084, "learning_rate": 1.2208234734703211e-05, "loss": 1.960504150390625, "memory(GiB)": 72.85, "step": 90220, "token_acc": 0.576, "train_speed(iter/s)": 0.672705 }, { "epoch": 3.865515616297502, "grad_norm": 5.910756587982178, "learning_rate": 1.2203828678840307e-05, "loss": 2.227939796447754, "memory(GiB)": 72.85, "step": 90225, "token_acc": 0.5335463258785943, "train_speed(iter/s)": 0.672709 }, { "epoch": 3.8657298316267514, "grad_norm": 7.150509357452393, "learning_rate": 1.2199423307690505e-05, "loss": 2.126742935180664, "memory(GiB)": 72.85, "step": 90230, "token_acc": 0.516245487364621, "train_speed(iter/s)": 0.672714 }, { "epoch": 3.8659440469560002, "grad_norm": 5.463474750518799, "learning_rate": 1.2195018621333587e-05, "loss": 2.175222396850586, "memory(GiB)": 72.85, "step": 90235, "token_acc": 0.5352941176470588, "train_speed(iter/s)": 0.672711 }, { "epoch": 3.866158262285249, "grad_norm": 6.388562202453613, "learning_rate": 1.2190614619849395e-05, "loss": 2.4977752685546877, "memory(GiB)": 72.85, "step": 90240, "token_acc": 0.4380952380952381, "train_speed(iter/s)": 0.672713 }, { "epoch": 3.8663724776144983, "grad_norm": 5.063618183135986, "learning_rate": 1.218621130331768e-05, "loss": 2.1602994918823244, "memory(GiB)": 72.85, "step": 90245, "token_acc": 0.47352941176470587, "train_speed(iter/s)": 0.67272 }, { "epoch": 3.866586692943747, "grad_norm": 7.085966110229492, "learning_rate": 1.2181808671818223e-05, "loss": 2.1862035751342774, "memory(GiB)": 72.85, "step": 90250, "token_acc": 0.5141065830721003, "train_speed(iter/s)": 0.672717 }, { "epoch": 3.866800908272996, "grad_norm": 5.817555904388428, "learning_rate": 1.2177406725430774e-05, "loss": 2.1960775375366213, "memory(GiB)": 72.85, "step": 90255, "token_acc": 0.5498154981549815, "train_speed(iter/s)": 0.672719 }, { "epoch": 3.867015123602245, "grad_norm": 9.722408294677734, "learning_rate": 1.217300546423507e-05, "loss": 2.098712158203125, "memory(GiB)": 72.85, "step": 90260, "token_acc": 0.515358361774744, "train_speed(iter/s)": 0.672726 }, { "epoch": 3.867229338931494, "grad_norm": 6.727409839630127, "learning_rate": 1.2168604888310875e-05, "loss": 1.945218849182129, "memory(GiB)": 72.85, "step": 90265, "token_acc": 0.5555555555555556, "train_speed(iter/s)": 0.672723 }, { "epoch": 3.867443554260743, "grad_norm": 7.482864856719971, "learning_rate": 1.2164204997737888e-05, "loss": 2.059019660949707, "memory(GiB)": 72.85, "step": 90270, "token_acc": 0.552, "train_speed(iter/s)": 0.672726 }, { "epoch": 3.867657769589992, "grad_norm": 6.181248664855957, "learning_rate": 1.2159805792595807e-05, "loss": 2.089539909362793, "memory(GiB)": 72.85, "step": 90275, "token_acc": 0.5907335907335908, "train_speed(iter/s)": 0.67273 }, { "epoch": 3.867871984919241, "grad_norm": 7.035867691040039, "learning_rate": 1.2155407272964353e-05, "loss": 1.9868707656860352, "memory(GiB)": 72.85, "step": 90280, "token_acc": 0.5645161290322581, "train_speed(iter/s)": 0.672724 }, { "epoch": 3.8680862002484897, "grad_norm": 5.764963626861572, "learning_rate": 1.21510094389232e-05, "loss": 1.9805124282836915, "memory(GiB)": 72.85, "step": 90285, "token_acc": 0.5587301587301587, "train_speed(iter/s)": 0.672721 }, { "epoch": 3.868300415577739, "grad_norm": 5.159361362457275, "learning_rate": 1.2146612290552e-05, "loss": 1.9435012817382813, "memory(GiB)": 72.85, "step": 90290, "token_acc": 0.575, "train_speed(iter/s)": 0.672718 }, { "epoch": 3.8685146309069878, "grad_norm": 5.873623371124268, "learning_rate": 1.2142215827930441e-05, "loss": 2.1975210189819334, "memory(GiB)": 72.85, "step": 90295, "token_acc": 0.553030303030303, "train_speed(iter/s)": 0.672718 }, { "epoch": 3.8687288462362366, "grad_norm": 3.899968385696411, "learning_rate": 1.213782005113816e-05, "loss": 1.9635906219482422, "memory(GiB)": 72.85, "step": 90300, "token_acc": 0.5959595959595959, "train_speed(iter/s)": 0.67271 }, { "epoch": 3.868943061565486, "grad_norm": 6.066151142120361, "learning_rate": 1.2133424960254786e-05, "loss": 2.3591026306152343, "memory(GiB)": 72.85, "step": 90305, "token_acc": 0.5077399380804953, "train_speed(iter/s)": 0.672708 }, { "epoch": 3.8691572768947347, "grad_norm": 5.479531764984131, "learning_rate": 1.2129030555359938e-05, "loss": 2.156905937194824, "memory(GiB)": 72.85, "step": 90310, "token_acc": 0.5088967971530249, "train_speed(iter/s)": 0.672709 }, { "epoch": 3.8693714922239835, "grad_norm": 6.9849677085876465, "learning_rate": 1.2124636836533226e-05, "loss": 2.4634517669677733, "memory(GiB)": 72.85, "step": 90315, "token_acc": 0.5276872964169381, "train_speed(iter/s)": 0.672714 }, { "epoch": 3.8695857075532327, "grad_norm": 4.993785858154297, "learning_rate": 1.2120243803854237e-05, "loss": 2.153999137878418, "memory(GiB)": 72.85, "step": 90320, "token_acc": 0.5424657534246575, "train_speed(iter/s)": 0.672721 }, { "epoch": 3.8697999228824815, "grad_norm": 7.962214469909668, "learning_rate": 1.2115851457402577e-05, "loss": 2.0053266525268554, "memory(GiB)": 72.85, "step": 90325, "token_acc": 0.5222929936305732, "train_speed(iter/s)": 0.672723 }, { "epoch": 3.8700141382117303, "grad_norm": 5.765658378601074, "learning_rate": 1.2111459797257807e-05, "loss": 2.015119171142578, "memory(GiB)": 72.85, "step": 90330, "token_acc": 0.5355805243445693, "train_speed(iter/s)": 0.672726 }, { "epoch": 3.8702283535409796, "grad_norm": 6.281500339508057, "learning_rate": 1.2107068823499484e-05, "loss": 2.064564514160156, "memory(GiB)": 72.85, "step": 90335, "token_acc": 0.5315315315315315, "train_speed(iter/s)": 0.672724 }, { "epoch": 3.8704425688702284, "grad_norm": 6.055914878845215, "learning_rate": 1.2102678536207157e-05, "loss": 2.1281963348388673, "memory(GiB)": 72.85, "step": 90340, "token_acc": 0.5075757575757576, "train_speed(iter/s)": 0.672734 }, { "epoch": 3.8706567841994772, "grad_norm": 5.323705673217773, "learning_rate": 1.2098288935460344e-05, "loss": 1.793524169921875, "memory(GiB)": 72.85, "step": 90345, "token_acc": 0.5743243243243243, "train_speed(iter/s)": 0.672737 }, { "epoch": 3.8708709995287265, "grad_norm": 4.363182067871094, "learning_rate": 1.2093900021338584e-05, "loss": 2.1047832489013674, "memory(GiB)": 72.85, "step": 90350, "token_acc": 0.5124223602484472, "train_speed(iter/s)": 0.672729 }, { "epoch": 3.8710852148579753, "grad_norm": 4.677682399749756, "learning_rate": 1.2090389384464662e-05, "loss": 2.1158884048461912, "memory(GiB)": 72.85, "step": 90355, "token_acc": 0.5492063492063493, "train_speed(iter/s)": 0.672731 }, { "epoch": 3.871299430187224, "grad_norm": 4.866480350494385, "learning_rate": 1.2086001706468358e-05, "loss": 2.0004035949707033, "memory(GiB)": 72.85, "step": 90360, "token_acc": 0.5609756097560976, "train_speed(iter/s)": 0.672725 }, { "epoch": 3.8715136455164734, "grad_norm": 6.874039649963379, "learning_rate": 1.2081614715319706e-05, "loss": 2.1585391998291015, "memory(GiB)": 72.85, "step": 90365, "token_acc": 0.540453074433657, "train_speed(iter/s)": 0.67273 }, { "epoch": 3.871727860845722, "grad_norm": 4.786227226257324, "learning_rate": 1.207722841109815e-05, "loss": 2.0116127014160154, "memory(GiB)": 72.85, "step": 90370, "token_acc": 0.5379746835443038, "train_speed(iter/s)": 0.672728 }, { "epoch": 3.871942076174971, "grad_norm": 7.351208686828613, "learning_rate": 1.2072842793883199e-05, "loss": 2.3245500564575194, "memory(GiB)": 72.85, "step": 90375, "token_acc": 0.5090252707581228, "train_speed(iter/s)": 0.672723 }, { "epoch": 3.8721562915042202, "grad_norm": 5.7451395988464355, "learning_rate": 1.2068457863754273e-05, "loss": 2.361438179016113, "memory(GiB)": 72.85, "step": 90380, "token_acc": 0.4891304347826087, "train_speed(iter/s)": 0.672721 }, { "epoch": 3.872370506833469, "grad_norm": 6.490621566772461, "learning_rate": 1.2064073620790823e-05, "loss": 2.3014097213745117, "memory(GiB)": 72.85, "step": 90385, "token_acc": 0.5087719298245614, "train_speed(iter/s)": 0.672727 }, { "epoch": 3.872584722162718, "grad_norm": 5.955977916717529, "learning_rate": 1.205969006507226e-05, "loss": 1.9497573852539063, "memory(GiB)": 72.85, "step": 90390, "token_acc": 0.5228215767634855, "train_speed(iter/s)": 0.672727 }, { "epoch": 3.872798937491967, "grad_norm": 4.701608180999756, "learning_rate": 1.205530719667799e-05, "loss": 2.3140478134155273, "memory(GiB)": 72.85, "step": 90395, "token_acc": 0.5230769230769231, "train_speed(iter/s)": 0.672721 }, { "epoch": 3.873013152821216, "grad_norm": 5.905970096588135, "learning_rate": 1.2050925015687437e-05, "loss": 2.0520912170410157, "memory(GiB)": 72.85, "step": 90400, "token_acc": 0.5578635014836796, "train_speed(iter/s)": 0.672719 }, { "epoch": 3.8732273681504648, "grad_norm": 5.584826946258545, "learning_rate": 1.2046543522179966e-05, "loss": 2.0407405853271485, "memory(GiB)": 72.85, "step": 90405, "token_acc": 0.5373665480427047, "train_speed(iter/s)": 0.672721 }, { "epoch": 3.873441583479714, "grad_norm": 4.628293991088867, "learning_rate": 1.2042162716234972e-05, "loss": 1.9333562850952148, "memory(GiB)": 72.85, "step": 90410, "token_acc": 0.5679442508710801, "train_speed(iter/s)": 0.672719 }, { "epoch": 3.873655798808963, "grad_norm": 6.046133041381836, "learning_rate": 1.2037782597931812e-05, "loss": 2.257678413391113, "memory(GiB)": 72.85, "step": 90415, "token_acc": 0.48986486486486486, "train_speed(iter/s)": 0.672716 }, { "epoch": 3.8738700141382116, "grad_norm": 5.273566722869873, "learning_rate": 1.2033403167349833e-05, "loss": 2.2569351196289062, "memory(GiB)": 72.85, "step": 90420, "token_acc": 0.5303514376996805, "train_speed(iter/s)": 0.672718 }, { "epoch": 3.874084229467461, "grad_norm": 5.163562297821045, "learning_rate": 1.2029024424568363e-05, "loss": 1.9967166900634765, "memory(GiB)": 72.85, "step": 90425, "token_acc": 0.5643939393939394, "train_speed(iter/s)": 0.672719 }, { "epoch": 3.8742984447967097, "grad_norm": 7.857806205749512, "learning_rate": 1.2024646369666731e-05, "loss": 1.8002819061279296, "memory(GiB)": 72.85, "step": 90430, "token_acc": 0.5846645367412141, "train_speed(iter/s)": 0.672717 }, { "epoch": 3.8745126601259585, "grad_norm": 6.84838342666626, "learning_rate": 1.202026900272426e-05, "loss": 2.1075817108154298, "memory(GiB)": 72.85, "step": 90435, "token_acc": 0.5284280936454849, "train_speed(iter/s)": 0.67272 }, { "epoch": 3.8747268754552078, "grad_norm": 5.071460247039795, "learning_rate": 1.201589232382025e-05, "loss": 1.937063980102539, "memory(GiB)": 72.85, "step": 90440, "token_acc": 0.582089552238806, "train_speed(iter/s)": 0.672723 }, { "epoch": 3.8749410907844566, "grad_norm": 6.874341011047363, "learning_rate": 1.2011516333033979e-05, "loss": 2.351165008544922, "memory(GiB)": 72.85, "step": 90445, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.67273 }, { "epoch": 3.8751553061137054, "grad_norm": 5.613440990447998, "learning_rate": 1.2007141030444723e-05, "loss": 1.852944564819336, "memory(GiB)": 72.85, "step": 90450, "token_acc": 0.597444089456869, "train_speed(iter/s)": 0.672731 }, { "epoch": 3.8753695214429547, "grad_norm": 5.7144060134887695, "learning_rate": 1.2002766416131739e-05, "loss": 2.18720703125, "memory(GiB)": 72.85, "step": 90455, "token_acc": 0.5178571428571429, "train_speed(iter/s)": 0.67272 }, { "epoch": 3.8755837367722035, "grad_norm": 7.780680179595947, "learning_rate": 1.1998392490174299e-05, "loss": 2.1040718078613283, "memory(GiB)": 72.85, "step": 90460, "token_acc": 0.5420560747663551, "train_speed(iter/s)": 0.672723 }, { "epoch": 3.8757979521014523, "grad_norm": 5.876774787902832, "learning_rate": 1.1994019252651611e-05, "loss": 2.033440589904785, "memory(GiB)": 72.85, "step": 90465, "token_acc": 0.5468164794007491, "train_speed(iter/s)": 0.67272 }, { "epoch": 3.8760121674307015, "grad_norm": 5.269876480102539, "learning_rate": 1.1989646703642931e-05, "loss": 2.171195220947266, "memory(GiB)": 72.85, "step": 90470, "token_acc": 0.5620437956204379, "train_speed(iter/s)": 0.672716 }, { "epoch": 3.8762263827599504, "grad_norm": 9.072769165039062, "learning_rate": 1.1985274843227456e-05, "loss": 2.369317626953125, "memory(GiB)": 72.85, "step": 90475, "token_acc": 0.47058823529411764, "train_speed(iter/s)": 0.672717 }, { "epoch": 3.876440598089199, "grad_norm": 5.927426338195801, "learning_rate": 1.1980903671484389e-05, "loss": 2.0652822494506835, "memory(GiB)": 72.85, "step": 90480, "token_acc": 0.5592105263157895, "train_speed(iter/s)": 0.672718 }, { "epoch": 3.8766548134184484, "grad_norm": 4.851194858551025, "learning_rate": 1.1976533188492922e-05, "loss": 1.9900720596313477, "memory(GiB)": 72.85, "step": 90485, "token_acc": 0.5570469798657718, "train_speed(iter/s)": 0.672715 }, { "epoch": 3.8768690287476972, "grad_norm": 6.884859085083008, "learning_rate": 1.1972163394332203e-05, "loss": 2.333320426940918, "memory(GiB)": 72.85, "step": 90490, "token_acc": 0.49421965317919075, "train_speed(iter/s)": 0.672718 }, { "epoch": 3.877083244076946, "grad_norm": 4.660165309906006, "learning_rate": 1.1967794289081436e-05, "loss": 1.986440086364746, "memory(GiB)": 72.85, "step": 90495, "token_acc": 0.5676567656765676, "train_speed(iter/s)": 0.672723 }, { "epoch": 3.8772974594061953, "grad_norm": 6.817154884338379, "learning_rate": 1.1963425872819755e-05, "loss": 1.9016641616821288, "memory(GiB)": 72.85, "step": 90500, "token_acc": 0.5683453237410072, "train_speed(iter/s)": 0.672717 }, { "epoch": 3.8772974594061953, "eval_loss": 2.1843535900115967, "eval_runtime": 15.4196, "eval_samples_per_second": 6.485, "eval_steps_per_second": 6.485, "eval_token_acc": 0.5, "step": 90500 }, { "epoch": 3.877511674735444, "grad_norm": 6.9419264793396, "learning_rate": 1.1959058145626289e-05, "loss": 2.0896785736083983, "memory(GiB)": 72.85, "step": 90505, "token_acc": 0.516532618409294, "train_speed(iter/s)": 0.672633 }, { "epoch": 3.877725890064693, "grad_norm": 5.876617431640625, "learning_rate": 1.1954691107580174e-05, "loss": 2.0598129272460937, "memory(GiB)": 72.85, "step": 90510, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.672629 }, { "epoch": 3.877940105393942, "grad_norm": 5.704283237457275, "learning_rate": 1.1950324758760507e-05, "loss": 2.0329769134521483, "memory(GiB)": 72.85, "step": 90515, "token_acc": 0.5446808510638298, "train_speed(iter/s)": 0.672632 }, { "epoch": 3.878154320723191, "grad_norm": 8.66891098022461, "learning_rate": 1.1945959099246407e-05, "loss": 2.0651243209838865, "memory(GiB)": 72.85, "step": 90520, "token_acc": 0.516245487364621, "train_speed(iter/s)": 0.672635 }, { "epoch": 3.87836853605244, "grad_norm": 5.847703456878662, "learning_rate": 1.1941594129116962e-05, "loss": 2.170228385925293, "memory(GiB)": 72.85, "step": 90525, "token_acc": 0.5589225589225589, "train_speed(iter/s)": 0.672639 }, { "epoch": 3.878582751381689, "grad_norm": 4.881619453430176, "learning_rate": 1.1937229848451237e-05, "loss": 2.011090850830078, "memory(GiB)": 72.85, "step": 90530, "token_acc": 0.5805471124620061, "train_speed(iter/s)": 0.672636 }, { "epoch": 3.878796966710938, "grad_norm": 8.024557113647461, "learning_rate": 1.1932866257328302e-05, "loss": 1.9208141326904298, "memory(GiB)": 72.85, "step": 90535, "token_acc": 0.5512820512820513, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.8790111820401867, "grad_norm": 4.781789302825928, "learning_rate": 1.1928503355827192e-05, "loss": 1.6431684494018555, "memory(GiB)": 72.85, "step": 90540, "token_acc": 0.6201550387596899, "train_speed(iter/s)": 0.672639 }, { "epoch": 3.879225397369436, "grad_norm": 5.24136209487915, "learning_rate": 1.1924141144026969e-05, "loss": 2.224709892272949, "memory(GiB)": 72.85, "step": 90545, "token_acc": 0.5241157556270096, "train_speed(iter/s)": 0.67264 }, { "epoch": 3.8794396126986848, "grad_norm": 6.367155075073242, "learning_rate": 1.1919779622006632e-05, "loss": 2.285678672790527, "memory(GiB)": 72.85, "step": 90550, "token_acc": 0.5159235668789809, "train_speed(iter/s)": 0.672636 }, { "epoch": 3.8796538280279336, "grad_norm": 4.9747490882873535, "learning_rate": 1.1915418789845229e-05, "loss": 1.967245864868164, "memory(GiB)": 72.85, "step": 90555, "token_acc": 0.5575539568345323, "train_speed(iter/s)": 0.672639 }, { "epoch": 3.879868043357183, "grad_norm": 5.281302452087402, "learning_rate": 1.1911058647621737e-05, "loss": 1.977257537841797, "memory(GiB)": 72.85, "step": 90560, "token_acc": 0.581081081081081, "train_speed(iter/s)": 0.672652 }, { "epoch": 3.8800822586864316, "grad_norm": 5.294955730438232, "learning_rate": 1.1906699195415144e-05, "loss": 2.225482177734375, "memory(GiB)": 72.85, "step": 90565, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.672658 }, { "epoch": 3.8802964740156805, "grad_norm": 7.3091840744018555, "learning_rate": 1.1902340433304431e-05, "loss": 2.2000179290771484, "memory(GiB)": 72.85, "step": 90570, "token_acc": 0.4921135646687697, "train_speed(iter/s)": 0.672653 }, { "epoch": 3.8805106893449297, "grad_norm": 6.945642948150635, "learning_rate": 1.1897982361368548e-05, "loss": 2.0439731597900392, "memory(GiB)": 72.85, "step": 90575, "token_acc": 0.5622895622895623, "train_speed(iter/s)": 0.672647 }, { "epoch": 3.8807249046741785, "grad_norm": 6.096896171569824, "learning_rate": 1.1893624979686474e-05, "loss": 2.191529655456543, "memory(GiB)": 72.85, "step": 90580, "token_acc": 0.5201238390092879, "train_speed(iter/s)": 0.672648 }, { "epoch": 3.8809391200034273, "grad_norm": 6.326863765716553, "learning_rate": 1.1889268288337124e-05, "loss": 2.182405471801758, "memory(GiB)": 72.85, "step": 90585, "token_acc": 0.5222551928783383, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.8811533353326766, "grad_norm": 5.31085729598999, "learning_rate": 1.1884912287399436e-05, "loss": 2.2585039138793945, "memory(GiB)": 72.85, "step": 90590, "token_acc": 0.5317725752508361, "train_speed(iter/s)": 0.672644 }, { "epoch": 3.8813675506619254, "grad_norm": 7.768106460571289, "learning_rate": 1.1880556976952312e-05, "loss": 2.318172645568848, "memory(GiB)": 72.85, "step": 90595, "token_acc": 0.4768211920529801, "train_speed(iter/s)": 0.672646 }, { "epoch": 3.881581765991174, "grad_norm": 5.176763534545898, "learning_rate": 1.1876202357074645e-05, "loss": 2.1406251907348635, "memory(GiB)": 72.85, "step": 90600, "token_acc": 0.5393258426966292, "train_speed(iter/s)": 0.672646 }, { "epoch": 3.8817959813204235, "grad_norm": 4.995957851409912, "learning_rate": 1.187184842784535e-05, "loss": 2.212545394897461, "memory(GiB)": 72.85, "step": 90605, "token_acc": 0.5234899328859061, "train_speed(iter/s)": 0.67264 }, { "epoch": 3.8820101966496723, "grad_norm": 6.122725009918213, "learning_rate": 1.1867495189343286e-05, "loss": 2.065278244018555, "memory(GiB)": 72.85, "step": 90610, "token_acc": 0.5568181818181818, "train_speed(iter/s)": 0.67264 }, { "epoch": 3.882224411978921, "grad_norm": 4.852311611175537, "learning_rate": 1.1863142641647307e-05, "loss": 2.309286689758301, "memory(GiB)": 72.85, "step": 90615, "token_acc": 0.4849315068493151, "train_speed(iter/s)": 0.672632 }, { "epoch": 3.8824386273081704, "grad_norm": 6.1831512451171875, "learning_rate": 1.1858790784836282e-05, "loss": 2.2997314453125, "memory(GiB)": 72.85, "step": 90620, "token_acc": 0.5176470588235295, "train_speed(iter/s)": 0.672625 }, { "epoch": 3.882652842637419, "grad_norm": 7.333326816558838, "learning_rate": 1.185443961898905e-05, "loss": 2.1187782287597656, "memory(GiB)": 72.85, "step": 90625, "token_acc": 0.5372549019607843, "train_speed(iter/s)": 0.672623 }, { "epoch": 3.882867057966668, "grad_norm": 5.476439952850342, "learning_rate": 1.1850089144184423e-05, "loss": 2.003125762939453, "memory(GiB)": 72.85, "step": 90630, "token_acc": 0.5249169435215947, "train_speed(iter/s)": 0.672623 }, { "epoch": 3.8830812732959172, "grad_norm": 9.169368743896484, "learning_rate": 1.18457393605012e-05, "loss": 2.056256866455078, "memory(GiB)": 72.85, "step": 90635, "token_acc": 0.5319148936170213, "train_speed(iter/s)": 0.672626 }, { "epoch": 3.883295488625166, "grad_norm": 6.02662467956543, "learning_rate": 1.184139026801822e-05, "loss": 2.191624641418457, "memory(GiB)": 72.85, "step": 90640, "token_acc": 0.4768211920529801, "train_speed(iter/s)": 0.672626 }, { "epoch": 3.883509703954415, "grad_norm": 7.956553936004639, "learning_rate": 1.1837041866814252e-05, "loss": 2.2357355117797852, "memory(GiB)": 72.85, "step": 90645, "token_acc": 0.5351170568561873, "train_speed(iter/s)": 0.672629 }, { "epoch": 3.883723919283664, "grad_norm": 7.840685844421387, "learning_rate": 1.1832694156968066e-05, "loss": 2.2758405685424803, "memory(GiB)": 72.85, "step": 90650, "token_acc": 0.49326145552560646, "train_speed(iter/s)": 0.672633 }, { "epoch": 3.883938134612913, "grad_norm": 6.413180828094482, "learning_rate": 1.1828347138558432e-05, "loss": 2.015023040771484, "memory(GiB)": 72.85, "step": 90655, "token_acc": 0.5347985347985348, "train_speed(iter/s)": 0.672626 }, { "epoch": 3.8841523499421617, "grad_norm": 6.079823017120361, "learning_rate": 1.1824000811664083e-05, "loss": 1.8410594940185547, "memory(GiB)": 72.85, "step": 90660, "token_acc": 0.5868852459016394, "train_speed(iter/s)": 0.672625 }, { "epoch": 3.884366565271411, "grad_norm": 5.542038440704346, "learning_rate": 1.1819655176363786e-05, "loss": 1.9986419677734375, "memory(GiB)": 72.85, "step": 90665, "token_acc": 0.5434782608695652, "train_speed(iter/s)": 0.672627 }, { "epoch": 3.88458078060066, "grad_norm": 5.141200542449951, "learning_rate": 1.1815310232736249e-05, "loss": 2.2447595596313477, "memory(GiB)": 72.85, "step": 90670, "token_acc": 0.4965753424657534, "train_speed(iter/s)": 0.672629 }, { "epoch": 3.8847949959299086, "grad_norm": 6.528443336486816, "learning_rate": 1.1810965980860189e-05, "loss": 2.3228368759155273, "memory(GiB)": 72.85, "step": 90675, "token_acc": 0.5223880597014925, "train_speed(iter/s)": 0.672631 }, { "epoch": 3.885009211259158, "grad_norm": 4.720282554626465, "learning_rate": 1.1806622420814306e-05, "loss": 2.3763944625854494, "memory(GiB)": 72.85, "step": 90680, "token_acc": 0.5014084507042254, "train_speed(iter/s)": 0.672626 }, { "epoch": 3.8852234265884067, "grad_norm": 6.502199172973633, "learning_rate": 1.1802279552677282e-05, "loss": 2.0749568939208984, "memory(GiB)": 72.85, "step": 90685, "token_acc": 0.5660377358490566, "train_speed(iter/s)": 0.672634 }, { "epoch": 3.8854376419176555, "grad_norm": 5.556310176849365, "learning_rate": 1.1797937376527784e-05, "loss": 1.9230009078979493, "memory(GiB)": 72.85, "step": 90690, "token_acc": 0.5535714285714286, "train_speed(iter/s)": 0.672639 }, { "epoch": 3.8856518572469048, "grad_norm": 6.29854679107666, "learning_rate": 1.1793595892444492e-05, "loss": 2.1040950775146485, "memory(GiB)": 72.85, "step": 90695, "token_acc": 0.5342960288808665, "train_speed(iter/s)": 0.672637 }, { "epoch": 3.8858660725761536, "grad_norm": 5.003039360046387, "learning_rate": 1.1789255100506057e-05, "loss": 2.279698944091797, "memory(GiB)": 72.85, "step": 90700, "token_acc": 0.5403225806451613, "train_speed(iter/s)": 0.672644 }, { "epoch": 3.8860802879054024, "grad_norm": 7.252145290374756, "learning_rate": 1.1784915000791114e-05, "loss": 2.16741943359375, "memory(GiB)": 72.85, "step": 90705, "token_acc": 0.5399239543726235, "train_speed(iter/s)": 0.672655 }, { "epoch": 3.8862945032346516, "grad_norm": 7.266942977905273, "learning_rate": 1.1780575593378284e-05, "loss": 2.307459259033203, "memory(GiB)": 72.85, "step": 90710, "token_acc": 0.4878048780487805, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.8865087185639005, "grad_norm": 7.484971523284912, "learning_rate": 1.177623687834618e-05, "loss": 1.802992057800293, "memory(GiB)": 72.85, "step": 90715, "token_acc": 0.6042553191489362, "train_speed(iter/s)": 0.672647 }, { "epoch": 3.8867229338931493, "grad_norm": 5.019059181213379, "learning_rate": 1.1771898855773388e-05, "loss": 2.289641571044922, "memory(GiB)": 72.85, "step": 90720, "token_acc": 0.5034246575342466, "train_speed(iter/s)": 0.672638 }, { "epoch": 3.8869371492223985, "grad_norm": 5.691256046295166, "learning_rate": 1.1767561525738525e-05, "loss": 2.086812210083008, "memory(GiB)": 72.85, "step": 90725, "token_acc": 0.535483870967742, "train_speed(iter/s)": 0.67264 }, { "epoch": 3.8871513645516473, "grad_norm": 4.710538864135742, "learning_rate": 1.1763224888320145e-05, "loss": 2.2113716125488283, "memory(GiB)": 72.85, "step": 90730, "token_acc": 0.515527950310559, "train_speed(iter/s)": 0.672646 }, { "epoch": 3.887365579880896, "grad_norm": 6.694054126739502, "learning_rate": 1.1758888943596818e-05, "loss": 1.9259098052978516, "memory(GiB)": 72.85, "step": 90735, "token_acc": 0.5916666666666667, "train_speed(iter/s)": 0.672641 }, { "epoch": 3.8875797952101454, "grad_norm": 6.166999816894531, "learning_rate": 1.1754553691647092e-05, "loss": 2.008477210998535, "memory(GiB)": 72.85, "step": 90740, "token_acc": 0.5266903914590747, "train_speed(iter/s)": 0.672648 }, { "epoch": 3.887794010539394, "grad_norm": 5.5898661613464355, "learning_rate": 1.1750219132549489e-05, "loss": 1.8680032730102538, "memory(GiB)": 72.85, "step": 90745, "token_acc": 0.5724137931034483, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.888008225868643, "grad_norm": 6.623014450073242, "learning_rate": 1.1745885266382561e-05, "loss": 2.221359443664551, "memory(GiB)": 72.85, "step": 90750, "token_acc": 0.5067114093959731, "train_speed(iter/s)": 0.672654 }, { "epoch": 3.8882224411978923, "grad_norm": 6.991778373718262, "learning_rate": 1.1741552093224805e-05, "loss": 2.352559471130371, "memory(GiB)": 72.85, "step": 90755, "token_acc": 0.5201238390092879, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.888436656527141, "grad_norm": 5.927052974700928, "learning_rate": 1.1737219613154727e-05, "loss": 2.6402687072753905, "memory(GiB)": 72.85, "step": 90760, "token_acc": 0.46726190476190477, "train_speed(iter/s)": 0.672654 }, { "epoch": 3.88865087185639, "grad_norm": 6.919082164764404, "learning_rate": 1.1732887826250787e-05, "loss": 2.0637582778930663, "memory(GiB)": 72.85, "step": 90765, "token_acc": 0.5425867507886435, "train_speed(iter/s)": 0.672655 }, { "epoch": 3.888865087185639, "grad_norm": 5.847538948059082, "learning_rate": 1.1728556732591501e-05, "loss": 2.125064468383789, "memory(GiB)": 72.85, "step": 90770, "token_acc": 0.5406360424028268, "train_speed(iter/s)": 0.672657 }, { "epoch": 3.889079302514888, "grad_norm": 5.433976650238037, "learning_rate": 1.172422633225531e-05, "loss": 2.175282859802246, "memory(GiB)": 72.85, "step": 90775, "token_acc": 0.55, "train_speed(iter/s)": 0.672656 }, { "epoch": 3.889293517844137, "grad_norm": 8.26944351196289, "learning_rate": 1.1719896625320654e-05, "loss": 2.729191207885742, "memory(GiB)": 72.85, "step": 90780, "token_acc": 0.43902439024390244, "train_speed(iter/s)": 0.67266 }, { "epoch": 3.889507733173386, "grad_norm": 5.556572914123535, "learning_rate": 1.1715567611865991e-05, "loss": 1.949570083618164, "memory(GiB)": 72.85, "step": 90785, "token_acc": 0.54858934169279, "train_speed(iter/s)": 0.672661 }, { "epoch": 3.889721948502635, "grad_norm": 5.102692127227783, "learning_rate": 1.1711239291969733e-05, "loss": 2.3194711685180662, "memory(GiB)": 72.85, "step": 90790, "token_acc": 0.5146579804560261, "train_speed(iter/s)": 0.672664 }, { "epoch": 3.8899361638318837, "grad_norm": 7.033505916595459, "learning_rate": 1.1706911665710296e-05, "loss": 2.3173179626464844, "memory(GiB)": 72.85, "step": 90795, "token_acc": 0.5278592375366569, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.890150379161133, "grad_norm": 6.142239093780518, "learning_rate": 1.1702584733166073e-05, "loss": 2.2591787338256837, "memory(GiB)": 72.85, "step": 90800, "token_acc": 0.5, "train_speed(iter/s)": 0.672644 }, { "epoch": 3.8903645944903817, "grad_norm": 4.86271858215332, "learning_rate": 1.1698258494415443e-05, "loss": 1.9894248962402343, "memory(GiB)": 72.85, "step": 90805, "token_acc": 0.5633333333333334, "train_speed(iter/s)": 0.67264 }, { "epoch": 3.8905788098196306, "grad_norm": 4.538608551025391, "learning_rate": 1.1693932949536801e-05, "loss": 2.251759910583496, "memory(GiB)": 72.85, "step": 90810, "token_acc": 0.5121951219512195, "train_speed(iter/s)": 0.672636 }, { "epoch": 3.89079302514888, "grad_norm": 5.190683841705322, "learning_rate": 1.1689608098608495e-05, "loss": 2.2893470764160155, "memory(GiB)": 72.85, "step": 90815, "token_acc": 0.5426829268292683, "train_speed(iter/s)": 0.672625 }, { "epoch": 3.8910072404781286, "grad_norm": 5.660338401794434, "learning_rate": 1.168528394170888e-05, "loss": 2.23541145324707, "memory(GiB)": 72.85, "step": 90820, "token_acc": 0.5182926829268293, "train_speed(iter/s)": 0.672622 }, { "epoch": 3.8912214558073774, "grad_norm": 6.4499688148498535, "learning_rate": 1.1680960478916292e-05, "loss": 2.109136199951172, "memory(GiB)": 72.85, "step": 90825, "token_acc": 0.5400696864111498, "train_speed(iter/s)": 0.672623 }, { "epoch": 3.8914356711366267, "grad_norm": 4.312106132507324, "learning_rate": 1.1676637710309047e-05, "loss": 2.1623327255249025, "memory(GiB)": 72.85, "step": 90830, "token_acc": 0.5035971223021583, "train_speed(iter/s)": 0.672619 }, { "epoch": 3.8916498864658755, "grad_norm": 4.976128101348877, "learning_rate": 1.1672315635965447e-05, "loss": 1.805028533935547, "memory(GiB)": 72.85, "step": 90835, "token_acc": 0.5875912408759124, "train_speed(iter/s)": 0.672621 }, { "epoch": 3.8918641017951243, "grad_norm": 6.191423416137695, "learning_rate": 1.1667994255963805e-05, "loss": 2.025765228271484, "memory(GiB)": 72.85, "step": 90840, "token_acc": 0.5331010452961672, "train_speed(iter/s)": 0.672616 }, { "epoch": 3.8920783171243736, "grad_norm": 5.262320518493652, "learning_rate": 1.1663673570382416e-05, "loss": 1.8192068099975587, "memory(GiB)": 72.85, "step": 90845, "token_acc": 0.5645161290322581, "train_speed(iter/s)": 0.672622 }, { "epoch": 3.8922925324536224, "grad_norm": 4.70515251159668, "learning_rate": 1.1659353579299543e-05, "loss": 2.210047721862793, "memory(GiB)": 72.85, "step": 90850, "token_acc": 0.5340599455040872, "train_speed(iter/s)": 0.672616 }, { "epoch": 3.892506747782871, "grad_norm": 6.336127281188965, "learning_rate": 1.1655034282793448e-05, "loss": 2.2272293090820314, "memory(GiB)": 72.85, "step": 90855, "token_acc": 0.4984126984126984, "train_speed(iter/s)": 0.672616 }, { "epoch": 3.8927209631121205, "grad_norm": 4.771566390991211, "learning_rate": 1.1650715680942381e-05, "loss": 1.9059455871582032, "memory(GiB)": 72.85, "step": 90860, "token_acc": 0.5594855305466238, "train_speed(iter/s)": 0.672623 }, { "epoch": 3.8929351784413693, "grad_norm": 5.826114177703857, "learning_rate": 1.1646397773824553e-05, "loss": 2.234684181213379, "memory(GiB)": 72.85, "step": 90865, "token_acc": 0.5178571428571429, "train_speed(iter/s)": 0.672625 }, { "epoch": 3.893149393770618, "grad_norm": 6.744257926940918, "learning_rate": 1.1642080561518226e-05, "loss": 1.8643007278442383, "memory(GiB)": 72.85, "step": 90870, "token_acc": 0.5863453815261044, "train_speed(iter/s)": 0.672627 }, { "epoch": 3.8933636090998673, "grad_norm": 6.851278305053711, "learning_rate": 1.163776404410159e-05, "loss": 2.1121999740600588, "memory(GiB)": 72.85, "step": 90875, "token_acc": 0.5284552845528455, "train_speed(iter/s)": 0.672632 }, { "epoch": 3.893577824429116, "grad_norm": 5.714219093322754, "learning_rate": 1.1633448221652848e-05, "loss": 2.071417045593262, "memory(GiB)": 72.85, "step": 90880, "token_acc": 0.5460122699386503, "train_speed(iter/s)": 0.672626 }, { "epoch": 3.893792039758365, "grad_norm": 6.961499214172363, "learning_rate": 1.1629133094250183e-05, "loss": 1.8310028076171876, "memory(GiB)": 72.85, "step": 90885, "token_acc": 0.5836575875486382, "train_speed(iter/s)": 0.672626 }, { "epoch": 3.894006255087614, "grad_norm": 5.835861682891846, "learning_rate": 1.1624818661971747e-05, "loss": 2.015574073791504, "memory(GiB)": 72.85, "step": 90890, "token_acc": 0.5483870967741935, "train_speed(iter/s)": 0.67263 }, { "epoch": 3.894220470416863, "grad_norm": 5.236940383911133, "learning_rate": 1.1620504924895737e-05, "loss": 1.9506031036376954, "memory(GiB)": 72.85, "step": 90895, "token_acc": 0.574750830564784, "train_speed(iter/s)": 0.672627 }, { "epoch": 3.894434685746112, "grad_norm": 5.445614337921143, "learning_rate": 1.1616191883100285e-05, "loss": 2.046734428405762, "memory(GiB)": 72.85, "step": 90900, "token_acc": 0.6070287539936102, "train_speed(iter/s)": 0.672628 }, { "epoch": 3.894648901075361, "grad_norm": 4.826608180999756, "learning_rate": 1.1611879536663523e-05, "loss": 1.685174560546875, "memory(GiB)": 72.85, "step": 90905, "token_acc": 0.6224066390041494, "train_speed(iter/s)": 0.67264 }, { "epoch": 3.89486311640461, "grad_norm": 5.817762851715088, "learning_rate": 1.1607567885663555e-05, "loss": 1.9429798126220703, "memory(GiB)": 72.85, "step": 90910, "token_acc": 0.6115702479338843, "train_speed(iter/s)": 0.672639 }, { "epoch": 3.8950773317338587, "grad_norm": 6.129075527191162, "learning_rate": 1.160325693017853e-05, "loss": 2.2225225448608397, "memory(GiB)": 72.85, "step": 90915, "token_acc": 0.5582191780821918, "train_speed(iter/s)": 0.672631 }, { "epoch": 3.895291547063108, "grad_norm": 5.010186672210693, "learning_rate": 1.1598946670286525e-05, "loss": 2.1643348693847657, "memory(GiB)": 72.85, "step": 90920, "token_acc": 0.5335689045936396, "train_speed(iter/s)": 0.672637 }, { "epoch": 3.895505762392357, "grad_norm": 5.696573734283447, "learning_rate": 1.1594637106065608e-05, "loss": 2.3285762786865236, "memory(GiB)": 72.85, "step": 90925, "token_acc": 0.5109034267912772, "train_speed(iter/s)": 0.672643 }, { "epoch": 3.8957199777216056, "grad_norm": 6.998345375061035, "learning_rate": 1.1590328237593878e-05, "loss": 1.9144725799560547, "memory(GiB)": 72.85, "step": 90930, "token_acc": 0.5300353356890459, "train_speed(iter/s)": 0.672637 }, { "epoch": 3.895934193050855, "grad_norm": 4.375741958618164, "learning_rate": 1.1586020064949387e-05, "loss": 1.9081424713134765, "memory(GiB)": 72.85, "step": 90935, "token_acc": 0.5802919708029197, "train_speed(iter/s)": 0.672643 }, { "epoch": 3.8961484083801037, "grad_norm": 5.656577110290527, "learning_rate": 1.1581712588210181e-05, "loss": 2.111195755004883, "memory(GiB)": 72.85, "step": 90940, "token_acc": 0.5466666666666666, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.8963626237093525, "grad_norm": 5.802524566650391, "learning_rate": 1.1577405807454283e-05, "loss": 2.212245750427246, "memory(GiB)": 72.85, "step": 90945, "token_acc": 0.56, "train_speed(iter/s)": 0.672657 }, { "epoch": 3.8965768390386017, "grad_norm": 7.370728969573975, "learning_rate": 1.1573099722759712e-05, "loss": 2.059977340698242, "memory(GiB)": 72.85, "step": 90950, "token_acc": 0.5269709543568465, "train_speed(iter/s)": 0.672654 }, { "epoch": 3.8967910543678506, "grad_norm": 5.872063159942627, "learning_rate": 1.1568794334204502e-05, "loss": 1.9602359771728515, "memory(GiB)": 72.85, "step": 90955, "token_acc": 0.5755813953488372, "train_speed(iter/s)": 0.672661 }, { "epoch": 3.8970052696970994, "grad_norm": 5.338561534881592, "learning_rate": 1.1564489641866633e-05, "loss": 2.0599876403808595, "memory(GiB)": 72.85, "step": 90960, "token_acc": 0.5289855072463768, "train_speed(iter/s)": 0.672654 }, { "epoch": 3.8972194850263486, "grad_norm": 5.104250431060791, "learning_rate": 1.156018564582409e-05, "loss": 1.9373727798461915, "memory(GiB)": 72.85, "step": 90965, "token_acc": 0.5970149253731343, "train_speed(iter/s)": 0.672656 }, { "epoch": 3.8974337003555974, "grad_norm": 6.469420433044434, "learning_rate": 1.1555882346154845e-05, "loss": 2.099407768249512, "memory(GiB)": 72.85, "step": 90970, "token_acc": 0.5254777070063694, "train_speed(iter/s)": 0.672658 }, { "epoch": 3.8976479156848463, "grad_norm": 5.164635181427002, "learning_rate": 1.1551579742936852e-05, "loss": 2.000875473022461, "memory(GiB)": 72.85, "step": 90975, "token_acc": 0.5370370370370371, "train_speed(iter/s)": 0.672662 }, { "epoch": 3.8978621310140955, "grad_norm": 4.828713417053223, "learning_rate": 1.1547277836248043e-05, "loss": 2.444967269897461, "memory(GiB)": 72.85, "step": 90980, "token_acc": 0.487012987012987, "train_speed(iter/s)": 0.672668 }, { "epoch": 3.8980763463433443, "grad_norm": 5.0379157066345215, "learning_rate": 1.1542976626166374e-05, "loss": 2.0043968200683593, "memory(GiB)": 72.85, "step": 90985, "token_acc": 0.5481171548117155, "train_speed(iter/s)": 0.672673 }, { "epoch": 3.898290561672593, "grad_norm": 5.022841930389404, "learning_rate": 1.153867611276977e-05, "loss": 2.035922050476074, "memory(GiB)": 72.85, "step": 90990, "token_acc": 0.5284810126582279, "train_speed(iter/s)": 0.672669 }, { "epoch": 3.8985047770018424, "grad_norm": 5.459929466247559, "learning_rate": 1.1534376296136124e-05, "loss": 2.2190425872802733, "memory(GiB)": 72.85, "step": 90995, "token_acc": 0.5669014084507042, "train_speed(iter/s)": 0.672671 }, { "epoch": 3.898718992331091, "grad_norm": 6.645204067230225, "learning_rate": 1.153007717634334e-05, "loss": 2.095576286315918, "memory(GiB)": 72.85, "step": 91000, "token_acc": 0.5526315789473685, "train_speed(iter/s)": 0.672669 }, { "epoch": 3.898718992331091, "eval_loss": 1.9861547946929932, "eval_runtime": 14.234, "eval_samples_per_second": 7.025, "eval_steps_per_second": 7.025, "eval_token_acc": 0.532345013477089, "step": 91000 }, { "epoch": 3.89893320766034, "grad_norm": 8.23481273651123, "learning_rate": 1.1525778753469297e-05, "loss": 2.270941734313965, "memory(GiB)": 72.85, "step": 91005, "token_acc": 0.527027027027027, "train_speed(iter/s)": 0.67259 }, { "epoch": 3.8991474229895893, "grad_norm": 5.509709358215332, "learning_rate": 1.1521481027591847e-05, "loss": 2.035295104980469, "memory(GiB)": 72.85, "step": 91010, "token_acc": 0.5134228187919463, "train_speed(iter/s)": 0.672596 }, { "epoch": 3.899361638318838, "grad_norm": 6.55212926864624, "learning_rate": 1.1517183998788877e-05, "loss": 1.8949224472045898, "memory(GiB)": 72.85, "step": 91015, "token_acc": 0.5899581589958159, "train_speed(iter/s)": 0.672597 }, { "epoch": 3.899575853648087, "grad_norm": 4.984838962554932, "learning_rate": 1.1512887667138217e-05, "loss": 2.177961730957031, "memory(GiB)": 72.85, "step": 91020, "token_acc": 0.5153203342618384, "train_speed(iter/s)": 0.6726 }, { "epoch": 3.899790068977336, "grad_norm": 6.22851037979126, "learning_rate": 1.1508592032717701e-05, "loss": 2.0101943969726563, "memory(GiB)": 72.85, "step": 91025, "token_acc": 0.5212765957446809, "train_speed(iter/s)": 0.672606 }, { "epoch": 3.900004284306585, "grad_norm": 6.7548017501831055, "learning_rate": 1.1504297095605154e-05, "loss": 1.919784164428711, "memory(GiB)": 72.85, "step": 91030, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.672608 }, { "epoch": 3.900218499635834, "grad_norm": 5.314976215362549, "learning_rate": 1.1500002855878362e-05, "loss": 1.8987823486328126, "memory(GiB)": 72.85, "step": 91035, "token_acc": 0.6052631578947368, "train_speed(iter/s)": 0.672609 }, { "epoch": 3.900432714965083, "grad_norm": 6.977879524230957, "learning_rate": 1.1495709313615143e-05, "loss": 2.1792791366577147, "memory(GiB)": 72.85, "step": 91040, "token_acc": 0.5338078291814946, "train_speed(iter/s)": 0.672621 }, { "epoch": 3.900646930294332, "grad_norm": 5.404019832611084, "learning_rate": 1.1491416468893274e-05, "loss": 1.9646839141845702, "memory(GiB)": 72.85, "step": 91045, "token_acc": 0.5533333333333333, "train_speed(iter/s)": 0.672616 }, { "epoch": 3.9008611456235807, "grad_norm": 5.140287399291992, "learning_rate": 1.1487124321790515e-05, "loss": 1.844863510131836, "memory(GiB)": 72.85, "step": 91050, "token_acc": 0.5884244372990354, "train_speed(iter/s)": 0.672624 }, { "epoch": 3.90107536095283, "grad_norm": 5.77195930480957, "learning_rate": 1.148283287238463e-05, "loss": 1.7228363037109375, "memory(GiB)": 72.85, "step": 91055, "token_acc": 0.572992700729927, "train_speed(iter/s)": 0.67262 }, { "epoch": 3.9012895762820787, "grad_norm": 6.334585189819336, "learning_rate": 1.1478542120753344e-05, "loss": 2.117169952392578, "memory(GiB)": 72.85, "step": 91060, "token_acc": 0.5754385964912281, "train_speed(iter/s)": 0.672619 }, { "epoch": 3.9015037916113275, "grad_norm": 5.872769355773926, "learning_rate": 1.1474252066974422e-05, "loss": 2.02819938659668, "memory(GiB)": 72.85, "step": 91065, "token_acc": 0.5364963503649635, "train_speed(iter/s)": 0.672626 }, { "epoch": 3.901718006940577, "grad_norm": 6.45491361618042, "learning_rate": 1.1469962711125548e-05, "loss": 1.8110795974731446, "memory(GiB)": 72.85, "step": 91070, "token_acc": 0.5990783410138248, "train_speed(iter/s)": 0.67263 }, { "epoch": 3.9019322222698256, "grad_norm": 6.757261753082275, "learning_rate": 1.1465674053284452e-05, "loss": 2.1177391052246093, "memory(GiB)": 72.85, "step": 91075, "token_acc": 0.525, "train_speed(iter/s)": 0.672637 }, { "epoch": 3.9021464375990744, "grad_norm": 4.720127105712891, "learning_rate": 1.1461386093528826e-05, "loss": 1.8566291809082032, "memory(GiB)": 72.85, "step": 91080, "token_acc": 0.5369127516778524, "train_speed(iter/s)": 0.672646 }, { "epoch": 3.9023606529283237, "grad_norm": 5.451552391052246, "learning_rate": 1.1457098831936342e-05, "loss": 2.245372009277344, "memory(GiB)": 72.85, "step": 91085, "token_acc": 0.5085910652920962, "train_speed(iter/s)": 0.672646 }, { "epoch": 3.9025748682575725, "grad_norm": 5.7409796714782715, "learning_rate": 1.1452812268584667e-05, "loss": 2.051711654663086, "memory(GiB)": 72.85, "step": 91090, "token_acc": 0.5257142857142857, "train_speed(iter/s)": 0.672642 }, { "epoch": 3.9027890835868213, "grad_norm": 5.066042423248291, "learning_rate": 1.1448526403551441e-05, "loss": 2.1337158203125, "memory(GiB)": 72.85, "step": 91095, "token_acc": 0.5234159779614325, "train_speed(iter/s)": 0.672648 }, { "epoch": 3.9030032989160706, "grad_norm": 7.713239669799805, "learning_rate": 1.1444241236914343e-05, "loss": 2.088230514526367, "memory(GiB)": 72.85, "step": 91100, "token_acc": 0.5192307692307693, "train_speed(iter/s)": 0.672651 }, { "epoch": 3.9032175142453194, "grad_norm": 5.273262977600098, "learning_rate": 1.143995676875098e-05, "loss": 2.101024627685547, "memory(GiB)": 72.85, "step": 91105, "token_acc": 0.5444015444015444, "train_speed(iter/s)": 0.672659 }, { "epoch": 3.903431729574568, "grad_norm": 7.721088886260986, "learning_rate": 1.1435672999138975e-05, "loss": 1.9757640838623047, "memory(GiB)": 72.85, "step": 91110, "token_acc": 0.572347266881029, "train_speed(iter/s)": 0.672665 }, { "epoch": 3.9036459449038174, "grad_norm": 5.625461578369141, "learning_rate": 1.1431389928155922e-05, "loss": 2.17294921875, "memory(GiB)": 72.85, "step": 91115, "token_acc": 0.5392857142857143, "train_speed(iter/s)": 0.672667 }, { "epoch": 3.9038601602330663, "grad_norm": 6.009393215179443, "learning_rate": 1.1427107555879412e-05, "loss": 2.208087921142578, "memory(GiB)": 72.85, "step": 91120, "token_acc": 0.5230769230769231, "train_speed(iter/s)": 0.672677 }, { "epoch": 3.904074375562315, "grad_norm": 7.707584381103516, "learning_rate": 1.142282588238705e-05, "loss": 2.0719398498535155, "memory(GiB)": 72.85, "step": 91125, "token_acc": 0.5, "train_speed(iter/s)": 0.672681 }, { "epoch": 3.9042885908915643, "grad_norm": 5.320141792297363, "learning_rate": 1.1418544907756368e-05, "loss": 1.9827754974365235, "memory(GiB)": 72.85, "step": 91130, "token_acc": 0.5682539682539682, "train_speed(iter/s)": 0.672682 }, { "epoch": 3.904502806220813, "grad_norm": 5.566509246826172, "learning_rate": 1.1414264632064952e-05, "loss": 2.135569763183594, "memory(GiB)": 72.85, "step": 91135, "token_acc": 0.5309446254071661, "train_speed(iter/s)": 0.672681 }, { "epoch": 3.904717021550062, "grad_norm": 6.381899356842041, "learning_rate": 1.1409985055390332e-05, "loss": 2.159296417236328, "memory(GiB)": 72.85, "step": 91140, "token_acc": 0.5173611111111112, "train_speed(iter/s)": 0.672678 }, { "epoch": 3.904931236879311, "grad_norm": 7.009152412414551, "learning_rate": 1.1405706177810027e-05, "loss": 1.9953596115112304, "memory(GiB)": 72.85, "step": 91145, "token_acc": 0.5673469387755102, "train_speed(iter/s)": 0.672682 }, { "epoch": 3.90514545220856, "grad_norm": 5.487311840057373, "learning_rate": 1.1401427999401565e-05, "loss": 1.861094856262207, "memory(GiB)": 72.85, "step": 91150, "token_acc": 0.57421875, "train_speed(iter/s)": 0.67269 }, { "epoch": 3.905359667537809, "grad_norm": 4.602847099304199, "learning_rate": 1.1397150520242422e-05, "loss": 1.765864944458008, "memory(GiB)": 72.85, "step": 91155, "token_acc": 0.5962732919254659, "train_speed(iter/s)": 0.672695 }, { "epoch": 3.905573882867058, "grad_norm": 5.785676002502441, "learning_rate": 1.1392873740410132e-05, "loss": 2.3085094451904298, "memory(GiB)": 72.85, "step": 91160, "token_acc": 0.53954802259887, "train_speed(iter/s)": 0.672696 }, { "epoch": 3.905788098196307, "grad_norm": 6.218249320983887, "learning_rate": 1.1388597659982148e-05, "loss": 2.202443504333496, "memory(GiB)": 72.85, "step": 91165, "token_acc": 0.5304878048780488, "train_speed(iter/s)": 0.672701 }, { "epoch": 3.9060023135255557, "grad_norm": 6.093884468078613, "learning_rate": 1.138432227903593e-05, "loss": 2.0897106170654296, "memory(GiB)": 72.85, "step": 91170, "token_acc": 0.5375, "train_speed(iter/s)": 0.6727 }, { "epoch": 3.906216528854805, "grad_norm": 6.271176338195801, "learning_rate": 1.1380047597648946e-05, "loss": 2.292032814025879, "memory(GiB)": 72.85, "step": 91175, "token_acc": 0.5172413793103449, "train_speed(iter/s)": 0.672705 }, { "epoch": 3.906430744184054, "grad_norm": 4.938135623931885, "learning_rate": 1.1375773615898617e-05, "loss": 2.3134456634521485, "memory(GiB)": 72.85, "step": 91180, "token_acc": 0.51985559566787, "train_speed(iter/s)": 0.672708 }, { "epoch": 3.9066449595133026, "grad_norm": 6.9151225090026855, "learning_rate": 1.137150033386239e-05, "loss": 2.1531402587890627, "memory(GiB)": 72.85, "step": 91185, "token_acc": 0.5206349206349207, "train_speed(iter/s)": 0.672708 }, { "epoch": 3.906859174842552, "grad_norm": 6.788259506225586, "learning_rate": 1.1367227751617671e-05, "loss": 2.030744934082031, "memory(GiB)": 72.85, "step": 91190, "token_acc": 0.5445205479452054, "train_speed(iter/s)": 0.672715 }, { "epoch": 3.9070733901718007, "grad_norm": 5.7427544593811035, "learning_rate": 1.1362955869241865e-05, "loss": 2.2638439178466796, "memory(GiB)": 72.85, "step": 91195, "token_acc": 0.5128205128205128, "train_speed(iter/s)": 0.672709 }, { "epoch": 3.9072876055010495, "grad_norm": 5.7812581062316895, "learning_rate": 1.1358684686812354e-05, "loss": 2.323625183105469, "memory(GiB)": 72.85, "step": 91200, "token_acc": 0.5266272189349113, "train_speed(iter/s)": 0.67271 }, { "epoch": 3.9075018208302987, "grad_norm": 4.825207233428955, "learning_rate": 1.1354414204406505e-05, "loss": 2.122614860534668, "memory(GiB)": 72.85, "step": 91205, "token_acc": 0.5, "train_speed(iter/s)": 0.672713 }, { "epoch": 3.9077160361595475, "grad_norm": 5.127635955810547, "learning_rate": 1.135014442210171e-05, "loss": 1.8692897796630858, "memory(GiB)": 72.85, "step": 91210, "token_acc": 0.5802047781569966, "train_speed(iter/s)": 0.672714 }, { "epoch": 3.9079302514887964, "grad_norm": 4.8946919441223145, "learning_rate": 1.1345875339975292e-05, "loss": 2.113958740234375, "memory(GiB)": 72.85, "step": 91215, "token_acc": 0.5230769230769231, "train_speed(iter/s)": 0.672707 }, { "epoch": 3.9081444668180456, "grad_norm": 6.652769565582275, "learning_rate": 1.1341606958104616e-05, "loss": 2.181040573120117, "memory(GiB)": 72.85, "step": 91220, "token_acc": 0.5252225519287834, "train_speed(iter/s)": 0.672709 }, { "epoch": 3.9083586821472944, "grad_norm": 6.389649391174316, "learning_rate": 1.1337339276566993e-05, "loss": 2.1789459228515624, "memory(GiB)": 72.85, "step": 91225, "token_acc": 0.5117056856187291, "train_speed(iter/s)": 0.672708 }, { "epoch": 3.9085728974765432, "grad_norm": 7.153774261474609, "learning_rate": 1.1333072295439739e-05, "loss": 2.1694267272949217, "memory(GiB)": 72.85, "step": 91230, "token_acc": 0.5161290322580645, "train_speed(iter/s)": 0.672716 }, { "epoch": 3.9087871128057925, "grad_norm": 6.011205196380615, "learning_rate": 1.1328806014800158e-05, "loss": 2.3240192413330076, "memory(GiB)": 72.85, "step": 91235, "token_acc": 0.5018050541516246, "train_speed(iter/s)": 0.672716 }, { "epoch": 3.9090013281350413, "grad_norm": 6.005913734436035, "learning_rate": 1.132454043472551e-05, "loss": 1.9091312408447265, "memory(GiB)": 72.85, "step": 91240, "token_acc": 0.5607476635514018, "train_speed(iter/s)": 0.672716 }, { "epoch": 3.90921554346429, "grad_norm": 8.451909065246582, "learning_rate": 1.1320275555293113e-05, "loss": 2.052129936218262, "memory(GiB)": 72.85, "step": 91245, "token_acc": 0.5181518151815182, "train_speed(iter/s)": 0.672721 }, { "epoch": 3.9094297587935394, "grad_norm": 5.924468040466309, "learning_rate": 1.1316011376580204e-05, "loss": 2.007377815246582, "memory(GiB)": 72.85, "step": 91250, "token_acc": 0.5648535564853556, "train_speed(iter/s)": 0.672721 }, { "epoch": 3.909643974122788, "grad_norm": 5.410959720611572, "learning_rate": 1.1311747898664038e-05, "loss": 2.054223823547363, "memory(GiB)": 72.85, "step": 91255, "token_acc": 0.5073313782991202, "train_speed(iter/s)": 0.672728 }, { "epoch": 3.909858189452037, "grad_norm": 7.0167131423950195, "learning_rate": 1.1307485121621858e-05, "loss": 2.4076654434204103, "memory(GiB)": 72.85, "step": 91260, "token_acc": 0.4754601226993865, "train_speed(iter/s)": 0.672725 }, { "epoch": 3.9100724047812863, "grad_norm": 4.937214374542236, "learning_rate": 1.1303223045530859e-05, "loss": 2.0067026138305666, "memory(GiB)": 72.85, "step": 91265, "token_acc": 0.5618729096989966, "train_speed(iter/s)": 0.672721 }, { "epoch": 3.910286620110535, "grad_norm": 6.805635452270508, "learning_rate": 1.1298961670468294e-05, "loss": 2.0975130081176756, "memory(GiB)": 72.85, "step": 91270, "token_acc": 0.5218855218855218, "train_speed(iter/s)": 0.672732 }, { "epoch": 3.910500835439784, "grad_norm": 6.774161338806152, "learning_rate": 1.1294700996511342e-05, "loss": 1.8926746368408203, "memory(GiB)": 72.85, "step": 91275, "token_acc": 0.5749128919860628, "train_speed(iter/s)": 0.672735 }, { "epoch": 3.910715050769033, "grad_norm": 4.820539474487305, "learning_rate": 1.1290441023737175e-05, "loss": 2.1170644760131836, "memory(GiB)": 72.85, "step": 91280, "token_acc": 0.5286195286195287, "train_speed(iter/s)": 0.672736 }, { "epoch": 3.910929266098282, "grad_norm": 5.91414213180542, "learning_rate": 1.1286181752222996e-05, "loss": 2.356110763549805, "memory(GiB)": 72.85, "step": 91285, "token_acc": 0.5331010452961672, "train_speed(iter/s)": 0.672727 }, { "epoch": 3.9111434814275308, "grad_norm": 5.542418479919434, "learning_rate": 1.128192318204595e-05, "loss": 1.8862691879272462, "memory(GiB)": 72.85, "step": 91290, "token_acc": 0.541958041958042, "train_speed(iter/s)": 0.672733 }, { "epoch": 3.91135769675678, "grad_norm": 5.276829719543457, "learning_rate": 1.1277665313283187e-05, "loss": 1.979568099975586, "memory(GiB)": 72.85, "step": 91295, "token_acc": 0.5503355704697986, "train_speed(iter/s)": 0.672735 }, { "epoch": 3.911571912086029, "grad_norm": 7.644931793212891, "learning_rate": 1.1273408146011827e-05, "loss": 2.297859954833984, "memory(GiB)": 72.85, "step": 91300, "token_acc": 0.53, "train_speed(iter/s)": 0.672738 }, { "epoch": 3.9117861274152776, "grad_norm": 6.535579204559326, "learning_rate": 1.1269151680309021e-05, "loss": 1.9509544372558594, "memory(GiB)": 72.85, "step": 91305, "token_acc": 0.5306859205776173, "train_speed(iter/s)": 0.672729 }, { "epoch": 3.912000342744527, "grad_norm": 5.639070987701416, "learning_rate": 1.126489591625186e-05, "loss": 2.1971473693847656, "memory(GiB)": 72.85, "step": 91310, "token_acc": 0.523972602739726, "train_speed(iter/s)": 0.672732 }, { "epoch": 3.9122145580737757, "grad_norm": 8.076926231384277, "learning_rate": 1.1260640853917453e-05, "loss": 2.2034610748291015, "memory(GiB)": 72.85, "step": 91315, "token_acc": 0.5304347826086957, "train_speed(iter/s)": 0.672734 }, { "epoch": 3.9124287734030245, "grad_norm": 8.68780517578125, "learning_rate": 1.1256386493382876e-05, "loss": 2.1785104751586912, "memory(GiB)": 72.85, "step": 91320, "token_acc": 0.46075085324232085, "train_speed(iter/s)": 0.672742 }, { "epoch": 3.912642988732274, "grad_norm": 6.361404895782471, "learning_rate": 1.1252132834725187e-05, "loss": 2.1570611953735352, "memory(GiB)": 72.85, "step": 91325, "token_acc": 0.49421965317919075, "train_speed(iter/s)": 0.672746 }, { "epoch": 3.9128572040615226, "grad_norm": 5.536370277404785, "learning_rate": 1.1247879878021472e-05, "loss": 2.0389286041259767, "memory(GiB)": 72.85, "step": 91330, "token_acc": 0.5638297872340425, "train_speed(iter/s)": 0.672739 }, { "epoch": 3.9130714193907714, "grad_norm": 6.139580726623535, "learning_rate": 1.1243627623348769e-05, "loss": 2.1655858993530273, "memory(GiB)": 72.85, "step": 91335, "token_acc": 0.5097276264591439, "train_speed(iter/s)": 0.672737 }, { "epoch": 3.9132856347200207, "grad_norm": 4.800059795379639, "learning_rate": 1.1239376070784108e-05, "loss": 1.9901264190673829, "memory(GiB)": 72.85, "step": 91340, "token_acc": 0.5697674418604651, "train_speed(iter/s)": 0.672746 }, { "epoch": 3.9134998500492695, "grad_norm": 6.221296310424805, "learning_rate": 1.1235125220404507e-05, "loss": 2.032947540283203, "memory(GiB)": 72.85, "step": 91345, "token_acc": 0.5364238410596026, "train_speed(iter/s)": 0.672744 }, { "epoch": 3.9137140653785183, "grad_norm": 6.27868127822876, "learning_rate": 1.1230875072286979e-05, "loss": 2.0476005554199217, "memory(GiB)": 72.85, "step": 91350, "token_acc": 0.5701492537313433, "train_speed(iter/s)": 0.672747 }, { "epoch": 3.9139282807077675, "grad_norm": 8.213465690612793, "learning_rate": 1.1226625626508502e-05, "loss": 1.9822433471679688, "memory(GiB)": 72.85, "step": 91355, "token_acc": 0.5427509293680297, "train_speed(iter/s)": 0.672751 }, { "epoch": 3.9141424960370164, "grad_norm": 6.185059547424316, "learning_rate": 1.1222376883146079e-05, "loss": 2.358315658569336, "memory(GiB)": 72.85, "step": 91360, "token_acc": 0.5082508250825083, "train_speed(iter/s)": 0.672745 }, { "epoch": 3.914356711366265, "grad_norm": 5.319657802581787, "learning_rate": 1.1218128842276688e-05, "loss": 2.0715604782104493, "memory(GiB)": 72.85, "step": 91365, "token_acc": 0.5687022900763359, "train_speed(iter/s)": 0.672745 }, { "epoch": 3.9145709266955144, "grad_norm": 5.644569396972656, "learning_rate": 1.121388150397727e-05, "loss": 1.9431514739990234, "memory(GiB)": 72.85, "step": 91370, "token_acc": 0.6043956043956044, "train_speed(iter/s)": 0.672748 }, { "epoch": 3.9147851420247632, "grad_norm": 5.388494968414307, "learning_rate": 1.120963486832477e-05, "loss": 2.155451202392578, "memory(GiB)": 72.85, "step": 91375, "token_acc": 0.5243553008595988, "train_speed(iter/s)": 0.672753 }, { "epoch": 3.914999357354012, "grad_norm": 6.956850528717041, "learning_rate": 1.1205388935396127e-05, "loss": 2.1724843978881836, "memory(GiB)": 72.85, "step": 91380, "token_acc": 0.5416666666666666, "train_speed(iter/s)": 0.672752 }, { "epoch": 3.9152135726832613, "grad_norm": 5.994847774505615, "learning_rate": 1.120114370526824e-05, "loss": 2.149527931213379, "memory(GiB)": 72.85, "step": 91385, "token_acc": 0.5085324232081911, "train_speed(iter/s)": 0.672756 }, { "epoch": 3.91542778801251, "grad_norm": 6.655364990234375, "learning_rate": 1.119689917801805e-05, "loss": 2.1533859252929686, "memory(GiB)": 72.85, "step": 91390, "token_acc": 0.5160256410256411, "train_speed(iter/s)": 0.672765 }, { "epoch": 3.915642003341759, "grad_norm": 4.739348888397217, "learning_rate": 1.1192655353722425e-05, "loss": 2.0952959060668945, "memory(GiB)": 72.85, "step": 91395, "token_acc": 0.4791666666666667, "train_speed(iter/s)": 0.67277 }, { "epoch": 3.915856218671008, "grad_norm": 6.423987865447998, "learning_rate": 1.1189260800464879e-05, "loss": 2.464193916320801, "memory(GiB)": 72.85, "step": 91400, "token_acc": 0.53, "train_speed(iter/s)": 0.672774 }, { "epoch": 3.916070434000257, "grad_norm": 4.8860368728637695, "learning_rate": 1.1185018241681217e-05, "loss": 2.2253320693969725, "memory(GiB)": 72.85, "step": 91405, "token_acc": 0.5208333333333334, "train_speed(iter/s)": 0.672779 }, { "epoch": 3.916284649329506, "grad_norm": 6.168807506561279, "learning_rate": 1.1180776386067344e-05, "loss": 2.163253974914551, "memory(GiB)": 72.85, "step": 91410, "token_acc": 0.5687022900763359, "train_speed(iter/s)": 0.672784 }, { "epoch": 3.916498864658755, "grad_norm": 5.332060813903809, "learning_rate": 1.1176535233700142e-05, "loss": 2.02872257232666, "memory(GiB)": 72.85, "step": 91415, "token_acc": 0.5454545454545454, "train_speed(iter/s)": 0.67278 }, { "epoch": 3.916713079988004, "grad_norm": 4.997055530548096, "learning_rate": 1.1172294784656418e-05, "loss": 1.8337356567382812, "memory(GiB)": 72.85, "step": 91420, "token_acc": 0.5807692307692308, "train_speed(iter/s)": 0.672783 }, { "epoch": 3.9169272953172527, "grad_norm": 5.038870811462402, "learning_rate": 1.1168055039013004e-05, "loss": 2.2123973846435545, "memory(GiB)": 72.85, "step": 91425, "token_acc": 0.5105105105105106, "train_speed(iter/s)": 0.672791 }, { "epoch": 3.917141510646502, "grad_norm": 5.560269832611084, "learning_rate": 1.1163815996846693e-05, "loss": 2.196582794189453, "memory(GiB)": 72.85, "step": 91430, "token_acc": 0.5460992907801419, "train_speed(iter/s)": 0.672784 }, { "epoch": 3.9173557259757508, "grad_norm": 7.723437309265137, "learning_rate": 1.1159577658234288e-05, "loss": 2.1313909530639648, "memory(GiB)": 72.85, "step": 91435, "token_acc": 0.525096525096525, "train_speed(iter/s)": 0.672794 }, { "epoch": 3.9175699413049996, "grad_norm": 5.559811592102051, "learning_rate": 1.1155340023252558e-05, "loss": 2.049545669555664, "memory(GiB)": 72.85, "step": 91440, "token_acc": 0.5609756097560976, "train_speed(iter/s)": 0.672788 }, { "epoch": 3.917784156634249, "grad_norm": 5.641452789306641, "learning_rate": 1.1151103091978294e-05, "loss": 2.1370260238647463, "memory(GiB)": 72.85, "step": 91445, "token_acc": 0.5359712230215827, "train_speed(iter/s)": 0.672797 }, { "epoch": 3.9179983719634977, "grad_norm": 5.252267360687256, "learning_rate": 1.1146866864488242e-05, "loss": 2.2076623916625975, "memory(GiB)": 72.85, "step": 91450, "token_acc": 0.527027027027027, "train_speed(iter/s)": 0.672799 }, { "epoch": 3.9182125872927465, "grad_norm": 7.161230087280273, "learning_rate": 1.1142631340859145e-05, "loss": 1.9038759231567384, "memory(GiB)": 72.85, "step": 91455, "token_acc": 0.5470383275261324, "train_speed(iter/s)": 0.672796 }, { "epoch": 3.9184268026219957, "grad_norm": 5.865582466125488, "learning_rate": 1.1138396521167732e-05, "loss": 2.0892467498779297, "memory(GiB)": 72.85, "step": 91460, "token_acc": 0.5168067226890757, "train_speed(iter/s)": 0.672798 }, { "epoch": 3.9186410179512445, "grad_norm": 5.34935998916626, "learning_rate": 1.1134162405490706e-05, "loss": 2.1104124069213865, "memory(GiB)": 72.85, "step": 91465, "token_acc": 0.5353535353535354, "train_speed(iter/s)": 0.672798 }, { "epoch": 3.9188552332804933, "grad_norm": 6.548468112945557, "learning_rate": 1.1129928993904803e-05, "loss": 2.126793098449707, "memory(GiB)": 72.85, "step": 91470, "token_acc": 0.5535055350553506, "train_speed(iter/s)": 0.672796 }, { "epoch": 3.9190694486097426, "grad_norm": 6.375921249389648, "learning_rate": 1.1125696286486687e-05, "loss": 1.9855985641479492, "memory(GiB)": 72.85, "step": 91475, "token_acc": 0.5590062111801242, "train_speed(iter/s)": 0.6728 }, { "epoch": 3.9192836639389914, "grad_norm": 5.277941703796387, "learning_rate": 1.112146428331306e-05, "loss": 2.0661691665649413, "memory(GiB)": 72.85, "step": 91480, "token_acc": 0.5362318840579711, "train_speed(iter/s)": 0.672797 }, { "epoch": 3.9194978792682402, "grad_norm": 6.379815101623535, "learning_rate": 1.1117232984460579e-05, "loss": 2.158404731750488, "memory(GiB)": 72.85, "step": 91485, "token_acc": 0.511400651465798, "train_speed(iter/s)": 0.672786 }, { "epoch": 3.9197120945974895, "grad_norm": 8.653315544128418, "learning_rate": 1.1113002390005894e-05, "loss": 2.1198501586914062, "memory(GiB)": 72.85, "step": 91490, "token_acc": 0.5145228215767634, "train_speed(iter/s)": 0.672788 }, { "epoch": 3.9199263099267383, "grad_norm": 7.362796783447266, "learning_rate": 1.1108772500025644e-05, "loss": 2.243865394592285, "memory(GiB)": 72.85, "step": 91495, "token_acc": 0.5151515151515151, "train_speed(iter/s)": 0.672784 }, { "epoch": 3.920140525255987, "grad_norm": 5.262857913970947, "learning_rate": 1.1104543314596472e-05, "loss": 2.0991424560546874, "memory(GiB)": 72.85, "step": 91500, "token_acc": 0.5426621160409556, "train_speed(iter/s)": 0.672791 }, { "epoch": 3.920140525255987, "eval_loss": 2.1456048488616943, "eval_runtime": 14.7538, "eval_samples_per_second": 6.778, "eval_steps_per_second": 6.778, "eval_token_acc": 0.503957783641161, "step": 91500 }, { "epoch": 3.9203547405852364, "grad_norm": 5.48861026763916, "learning_rate": 1.1100314833794984e-05, "loss": 2.2935531616210936, "memory(GiB)": 72.85, "step": 91505, "token_acc": 0.5154349859681946, "train_speed(iter/s)": 0.672706 }, { "epoch": 3.920568955914485, "grad_norm": 6.858559608459473, "learning_rate": 1.1096087057697785e-05, "loss": 2.0408807754516602, "memory(GiB)": 72.85, "step": 91510, "token_acc": 0.5547703180212014, "train_speed(iter/s)": 0.672704 }, { "epoch": 3.920783171243734, "grad_norm": 5.710294723510742, "learning_rate": 1.1091859986381464e-05, "loss": 2.2061256408691405, "memory(GiB)": 72.85, "step": 91515, "token_acc": 0.5639097744360902, "train_speed(iter/s)": 0.672707 }, { "epoch": 3.9209973865729832, "grad_norm": 5.338324069976807, "learning_rate": 1.10876336199226e-05, "loss": 2.0870155334472655, "memory(GiB)": 72.85, "step": 91520, "token_acc": 0.5462555066079295, "train_speed(iter/s)": 0.672703 }, { "epoch": 3.921211601902232, "grad_norm": 6.28648042678833, "learning_rate": 1.1083407958397734e-05, "loss": 1.978614044189453, "memory(GiB)": 72.85, "step": 91525, "token_acc": 0.543859649122807, "train_speed(iter/s)": 0.672711 }, { "epoch": 3.921425817231481, "grad_norm": 4.383269309997559, "learning_rate": 1.1079183001883465e-05, "loss": 2.034891891479492, "memory(GiB)": 72.85, "step": 91530, "token_acc": 0.5354609929078015, "train_speed(iter/s)": 0.672719 }, { "epoch": 3.92164003256073, "grad_norm": 6.0929460525512695, "learning_rate": 1.1074958750456299e-05, "loss": 2.4234209060668945, "memory(GiB)": 72.85, "step": 91535, "token_acc": 0.4820846905537459, "train_speed(iter/s)": 0.672728 }, { "epoch": 3.921854247889979, "grad_norm": 5.419583320617676, "learning_rate": 1.1070735204192772e-05, "loss": 2.2311634063720702, "memory(GiB)": 72.85, "step": 91540, "token_acc": 0.5346534653465347, "train_speed(iter/s)": 0.672723 }, { "epoch": 3.9220684632192278, "grad_norm": 5.731825351715088, "learning_rate": 1.1066512363169379e-05, "loss": 2.1073394775390626, "memory(GiB)": 72.85, "step": 91545, "token_acc": 0.5664335664335665, "train_speed(iter/s)": 0.672728 }, { "epoch": 3.922282678548477, "grad_norm": 4.092108726501465, "learning_rate": 1.1062290227462651e-05, "loss": 1.9414569854736328, "memory(GiB)": 72.85, "step": 91550, "token_acc": 0.5285714285714286, "train_speed(iter/s)": 0.672729 }, { "epoch": 3.922496893877726, "grad_norm": 5.8707804679870605, "learning_rate": 1.1058068797149052e-05, "loss": 1.966973114013672, "memory(GiB)": 72.85, "step": 91555, "token_acc": 0.5767918088737202, "train_speed(iter/s)": 0.672731 }, { "epoch": 3.9227111092069746, "grad_norm": 4.554040431976318, "learning_rate": 1.1053848072305079e-05, "loss": 2.1579708099365233, "memory(GiB)": 72.85, "step": 91560, "token_acc": 0.5735294117647058, "train_speed(iter/s)": 0.672726 }, { "epoch": 3.922925324536224, "grad_norm": 6.9513702392578125, "learning_rate": 1.1049628053007183e-05, "loss": 2.0561296463012697, "memory(GiB)": 72.85, "step": 91565, "token_acc": 0.5467128027681661, "train_speed(iter/s)": 0.672722 }, { "epoch": 3.9231395398654727, "grad_norm": 5.732734203338623, "learning_rate": 1.1045408739331815e-05, "loss": 2.469195556640625, "memory(GiB)": 72.85, "step": 91570, "token_acc": 0.47674418604651164, "train_speed(iter/s)": 0.672721 }, { "epoch": 3.9233537551947215, "grad_norm": 6.725522041320801, "learning_rate": 1.1041190131355406e-05, "loss": 2.01185302734375, "memory(GiB)": 72.85, "step": 91575, "token_acc": 0.5381679389312977, "train_speed(iter/s)": 0.672719 }, { "epoch": 3.9235679705239708, "grad_norm": 6.082762718200684, "learning_rate": 1.103697222915439e-05, "loss": 1.9994258880615234, "memory(GiB)": 72.85, "step": 91580, "token_acc": 0.5627118644067797, "train_speed(iter/s)": 0.672721 }, { "epoch": 3.9237821858532196, "grad_norm": 5.746944427490234, "learning_rate": 1.1032755032805153e-05, "loss": 1.8003875732421875, "memory(GiB)": 72.85, "step": 91585, "token_acc": 0.5809128630705395, "train_speed(iter/s)": 0.672727 }, { "epoch": 3.9239964011824684, "grad_norm": 5.556541919708252, "learning_rate": 1.1028538542384131e-05, "loss": 2.133541297912598, "memory(GiB)": 72.85, "step": 91590, "token_acc": 0.5217391304347826, "train_speed(iter/s)": 0.672728 }, { "epoch": 3.9242106165117177, "grad_norm": 5.111086845397949, "learning_rate": 1.1024322757967686e-05, "loss": 2.0869585037231446, "memory(GiB)": 72.85, "step": 91595, "token_acc": 0.5683760683760684, "train_speed(iter/s)": 0.672736 }, { "epoch": 3.9244248318409665, "grad_norm": 6.747942924499512, "learning_rate": 1.1020107679632202e-05, "loss": 2.1071714401245116, "memory(GiB)": 72.85, "step": 91600, "token_acc": 0.5236486486486487, "train_speed(iter/s)": 0.67274 }, { "epoch": 3.9246390471702153, "grad_norm": 5.084037780761719, "learning_rate": 1.1015893307454029e-05, "loss": 1.8110862731933595, "memory(GiB)": 72.85, "step": 91605, "token_acc": 0.5985663082437276, "train_speed(iter/s)": 0.672753 }, { "epoch": 3.9248532624994645, "grad_norm": 5.959425926208496, "learning_rate": 1.1011679641509509e-05, "loss": 2.1862735748291016, "memory(GiB)": 72.85, "step": 91610, "token_acc": 0.5263157894736842, "train_speed(iter/s)": 0.67276 }, { "epoch": 3.9250674778287133, "grad_norm": 7.387907981872559, "learning_rate": 1.1007466681874995e-05, "loss": 1.8326160430908203, "memory(GiB)": 72.85, "step": 91615, "token_acc": 0.5867768595041323, "train_speed(iter/s)": 0.672766 }, { "epoch": 3.925281693157962, "grad_norm": 4.6370978355407715, "learning_rate": 1.1003254428626792e-05, "loss": 2.0901586532592775, "memory(GiB)": 72.85, "step": 91620, "token_acc": 0.5133333333333333, "train_speed(iter/s)": 0.672765 }, { "epoch": 3.9254959084872114, "grad_norm": 7.1601972579956055, "learning_rate": 1.0999042881841227e-05, "loss": 1.9701658248901368, "memory(GiB)": 72.85, "step": 91625, "token_acc": 0.5384615384615384, "train_speed(iter/s)": 0.672771 }, { "epoch": 3.9257101238164602, "grad_norm": 6.125442981719971, "learning_rate": 1.0994832041594589e-05, "loss": 2.078367233276367, "memory(GiB)": 72.85, "step": 91630, "token_acc": 0.5328185328185329, "train_speed(iter/s)": 0.672778 }, { "epoch": 3.925924339145709, "grad_norm": 5.788210391998291, "learning_rate": 1.0990621907963156e-05, "loss": 1.849700927734375, "memory(GiB)": 72.85, "step": 91635, "token_acc": 0.5819397993311036, "train_speed(iter/s)": 0.672786 }, { "epoch": 3.9261385544749583, "grad_norm": 5.173761367797852, "learning_rate": 1.0986412481023184e-05, "loss": 1.9924442291259765, "memory(GiB)": 72.85, "step": 91640, "token_acc": 0.5355805243445693, "train_speed(iter/s)": 0.672789 }, { "epoch": 3.926352769804207, "grad_norm": 5.183128833770752, "learning_rate": 1.0982203760850968e-05, "loss": 2.1417163848876952, "memory(GiB)": 72.85, "step": 91645, "token_acc": 0.5116959064327485, "train_speed(iter/s)": 0.672787 }, { "epoch": 3.926566985133456, "grad_norm": 5.937257289886475, "learning_rate": 1.0977995747522729e-05, "loss": 1.7634803771972656, "memory(GiB)": 72.85, "step": 91650, "token_acc": 0.6070038910505836, "train_speed(iter/s)": 0.672792 }, { "epoch": 3.926781200462705, "grad_norm": 7.383814334869385, "learning_rate": 1.0973788441114702e-05, "loss": 2.1747756958007813, "memory(GiB)": 72.85, "step": 91655, "token_acc": 0.5323741007194245, "train_speed(iter/s)": 0.67279 }, { "epoch": 3.926995415791954, "grad_norm": 5.540606498718262, "learning_rate": 1.0969581841703108e-05, "loss": 2.0398887634277343, "memory(GiB)": 72.85, "step": 91660, "token_acc": 0.5470383275261324, "train_speed(iter/s)": 0.672786 }, { "epoch": 3.927209631121203, "grad_norm": 8.64651107788086, "learning_rate": 1.0965375949364155e-05, "loss": 2.1298496246337892, "memory(GiB)": 72.85, "step": 91665, "token_acc": 0.5156794425087108, "train_speed(iter/s)": 0.672782 }, { "epoch": 3.927423846450452, "grad_norm": 4.850919246673584, "learning_rate": 1.0961170764174017e-05, "loss": 2.018253135681152, "memory(GiB)": 72.85, "step": 91670, "token_acc": 0.5460750853242321, "train_speed(iter/s)": 0.672789 }, { "epoch": 3.927638061779701, "grad_norm": 5.855517387390137, "learning_rate": 1.0956966286208903e-05, "loss": 2.3972894668579103, "memory(GiB)": 72.85, "step": 91675, "token_acc": 0.5231788079470199, "train_speed(iter/s)": 0.672791 }, { "epoch": 3.9278522771089497, "grad_norm": 5.431683540344238, "learning_rate": 1.0952762515544973e-05, "loss": 2.145119857788086, "memory(GiB)": 72.85, "step": 91680, "token_acc": 0.5166051660516605, "train_speed(iter/s)": 0.672794 }, { "epoch": 3.928066492438199, "grad_norm": 11.620285987854004, "learning_rate": 1.0948559452258384e-05, "loss": 2.3396493911743166, "memory(GiB)": 72.85, "step": 91685, "token_acc": 0.5127272727272727, "train_speed(iter/s)": 0.672797 }, { "epoch": 3.9282807077674478, "grad_norm": 5.711371421813965, "learning_rate": 1.0944357096425267e-05, "loss": 2.387921142578125, "memory(GiB)": 72.85, "step": 91690, "token_acc": 0.47005988023952094, "train_speed(iter/s)": 0.672802 }, { "epoch": 3.9284949230966966, "grad_norm": 5.881725788116455, "learning_rate": 1.0940155448121747e-05, "loss": 2.3692041397094727, "memory(GiB)": 72.85, "step": 91695, "token_acc": 0.5043988269794721, "train_speed(iter/s)": 0.672802 }, { "epoch": 3.928709138425946, "grad_norm": 4.76515007019043, "learning_rate": 1.0935954507423951e-05, "loss": 1.9473150253295899, "memory(GiB)": 72.85, "step": 91700, "token_acc": 0.5337620578778135, "train_speed(iter/s)": 0.672801 }, { "epoch": 3.9289233537551946, "grad_norm": 5.702199935913086, "learning_rate": 1.0931754274408002e-05, "loss": 2.26979923248291, "memory(GiB)": 72.85, "step": 91705, "token_acc": 0.49473684210526314, "train_speed(iter/s)": 0.672802 }, { "epoch": 3.9291375690844434, "grad_norm": 5.916152477264404, "learning_rate": 1.092755474914997e-05, "loss": 2.080843925476074, "memory(GiB)": 72.85, "step": 91710, "token_acc": 0.5501730103806228, "train_speed(iter/s)": 0.672808 }, { "epoch": 3.9293517844136927, "grad_norm": 5.095661163330078, "learning_rate": 1.0923355931725937e-05, "loss": 1.9509490966796874, "memory(GiB)": 72.85, "step": 91715, "token_acc": 0.5568181818181818, "train_speed(iter/s)": 0.672808 }, { "epoch": 3.9295659997429415, "grad_norm": 6.287876605987549, "learning_rate": 1.0919157822211968e-05, "loss": 2.011602210998535, "memory(GiB)": 72.85, "step": 91720, "token_acc": 0.5846774193548387, "train_speed(iter/s)": 0.672815 }, { "epoch": 3.9297802150721903, "grad_norm": 7.3698039054870605, "learning_rate": 1.0914960420684118e-05, "loss": 2.188066864013672, "memory(GiB)": 72.85, "step": 91725, "token_acc": 0.5347222222222222, "train_speed(iter/s)": 0.672809 }, { "epoch": 3.9299944304014396, "grad_norm": 5.2459235191345215, "learning_rate": 1.0910763727218403e-05, "loss": 2.388787841796875, "memory(GiB)": 72.85, "step": 91730, "token_acc": 0.5175438596491229, "train_speed(iter/s)": 0.672811 }, { "epoch": 3.9302086457306884, "grad_norm": 12.247245788574219, "learning_rate": 1.0906567741890895e-05, "loss": 2.2909027099609376, "memory(GiB)": 72.85, "step": 91735, "token_acc": 0.5294117647058824, "train_speed(iter/s)": 0.67282 }, { "epoch": 3.930422861059937, "grad_norm": 5.739806175231934, "learning_rate": 1.0902372464777582e-05, "loss": 1.9892606735229492, "memory(GiB)": 72.85, "step": 91740, "token_acc": 0.5766129032258065, "train_speed(iter/s)": 0.67282 }, { "epoch": 3.9306370763891865, "grad_norm": 6.38999605178833, "learning_rate": 1.089817789595447e-05, "loss": 2.1110675811767576, "memory(GiB)": 72.85, "step": 91745, "token_acc": 0.5494505494505495, "train_speed(iter/s)": 0.672823 }, { "epoch": 3.9308512917184353, "grad_norm": 6.70878267288208, "learning_rate": 1.089398403549754e-05, "loss": 1.9849401473999024, "memory(GiB)": 72.85, "step": 91750, "token_acc": 0.5540983606557377, "train_speed(iter/s)": 0.672829 }, { "epoch": 3.931065507047684, "grad_norm": 5.3475661277771, "learning_rate": 1.0889790883482765e-05, "loss": 2.1667245864868163, "memory(GiB)": 72.85, "step": 91755, "token_acc": 0.5089605734767025, "train_speed(iter/s)": 0.672826 }, { "epoch": 3.9312797223769333, "grad_norm": 4.976240158081055, "learning_rate": 1.0885598439986127e-05, "loss": 2.110682487487793, "memory(GiB)": 72.85, "step": 91760, "token_acc": 0.5088757396449705, "train_speed(iter/s)": 0.672821 }, { "epoch": 3.931493937706182, "grad_norm": 5.271213531494141, "learning_rate": 1.0881406705083568e-05, "loss": 2.1819694519042967, "memory(GiB)": 72.85, "step": 91765, "token_acc": 0.5080385852090032, "train_speed(iter/s)": 0.672816 }, { "epoch": 3.931708153035431, "grad_norm": 7.982507705688477, "learning_rate": 1.087721567885101e-05, "loss": 2.3376802444458007, "memory(GiB)": 72.85, "step": 91770, "token_acc": 0.465625, "train_speed(iter/s)": 0.672814 }, { "epoch": 3.9319223683646802, "grad_norm": 5.302019119262695, "learning_rate": 1.0873025361364404e-05, "loss": 2.362711715698242, "memory(GiB)": 72.85, "step": 91775, "token_acc": 0.48175182481751827, "train_speed(iter/s)": 0.672817 }, { "epoch": 3.932136583693929, "grad_norm": 4.557053565979004, "learning_rate": 1.0868835752699646e-05, "loss": 2.3438957214355467, "memory(GiB)": 72.85, "step": 91780, "token_acc": 0.4635036496350365, "train_speed(iter/s)": 0.672814 }, { "epoch": 3.932350799023178, "grad_norm": 5.885368824005127, "learning_rate": 1.086464685293262e-05, "loss": 1.8973230361938476, "memory(GiB)": 72.85, "step": 91785, "token_acc": 0.5627376425855514, "train_speed(iter/s)": 0.672817 }, { "epoch": 3.932565014352427, "grad_norm": 6.200954914093018, "learning_rate": 1.0860458662139245e-05, "loss": 2.0114204406738283, "memory(GiB)": 72.85, "step": 91790, "token_acc": 0.5333333333333333, "train_speed(iter/s)": 0.672815 }, { "epoch": 3.932779229681676, "grad_norm": 6.347955703735352, "learning_rate": 1.0856271180395377e-05, "loss": 2.3204957962036135, "memory(GiB)": 72.85, "step": 91795, "token_acc": 0.49390243902439024, "train_speed(iter/s)": 0.672813 }, { "epoch": 3.9329934450109247, "grad_norm": 7.589892864227295, "learning_rate": 1.0852084407776875e-05, "loss": 2.3668121337890624, "memory(GiB)": 72.85, "step": 91800, "token_acc": 0.451505016722408, "train_speed(iter/s)": 0.672813 }, { "epoch": 3.933207660340174, "grad_norm": 5.780033588409424, "learning_rate": 1.0847898344359592e-05, "loss": 2.4637920379638674, "memory(GiB)": 72.85, "step": 91805, "token_acc": 0.4844290657439446, "train_speed(iter/s)": 0.672815 }, { "epoch": 3.933421875669423, "grad_norm": 6.405942916870117, "learning_rate": 1.0843712990219351e-05, "loss": 2.110991859436035, "memory(GiB)": 72.85, "step": 91810, "token_acc": 0.496, "train_speed(iter/s)": 0.672813 }, { "epoch": 3.9336360909986716, "grad_norm": 5.021074295043945, "learning_rate": 1.083952834543197e-05, "loss": 2.0821355819702148, "memory(GiB)": 72.85, "step": 91815, "token_acc": 0.5526315789473685, "train_speed(iter/s)": 0.672814 }, { "epoch": 3.933850306327921, "grad_norm": 4.6056036949157715, "learning_rate": 1.0835344410073283e-05, "loss": 1.8869894027709961, "memory(GiB)": 72.85, "step": 91820, "token_acc": 0.577922077922078, "train_speed(iter/s)": 0.672809 }, { "epoch": 3.9340645216571697, "grad_norm": 6.768329620361328, "learning_rate": 1.0831161184219074e-05, "loss": 2.003145217895508, "memory(GiB)": 72.85, "step": 91825, "token_acc": 0.5275862068965518, "train_speed(iter/s)": 0.672806 }, { "epoch": 3.9342787369864185, "grad_norm": 5.167002201080322, "learning_rate": 1.082697866794512e-05, "loss": 1.958841323852539, "memory(GiB)": 72.85, "step": 91830, "token_acc": 0.5778546712802768, "train_speed(iter/s)": 0.672808 }, { "epoch": 3.9344929523156678, "grad_norm": 6.959473609924316, "learning_rate": 1.0822796861327194e-05, "loss": 2.0511680603027345, "memory(GiB)": 72.85, "step": 91835, "token_acc": 0.5514950166112956, "train_speed(iter/s)": 0.672805 }, { "epoch": 3.9347071676449166, "grad_norm": 6.574665546417236, "learning_rate": 1.0818615764441043e-05, "loss": 1.9966461181640625, "memory(GiB)": 72.85, "step": 91840, "token_acc": 0.542319749216301, "train_speed(iter/s)": 0.672808 }, { "epoch": 3.9349213829741654, "grad_norm": 6.434609413146973, "learning_rate": 1.0814435377362426e-05, "loss": 2.09576473236084, "memory(GiB)": 72.85, "step": 91845, "token_acc": 0.5389221556886228, "train_speed(iter/s)": 0.672817 }, { "epoch": 3.9351355983034146, "grad_norm": 4.996205806732178, "learning_rate": 1.0810255700167078e-05, "loss": 2.185542106628418, "memory(GiB)": 72.85, "step": 91850, "token_acc": 0.5503597122302158, "train_speed(iter/s)": 0.672817 }, { "epoch": 3.9353498136326635, "grad_norm": 4.630441188812256, "learning_rate": 1.0806076732930715e-05, "loss": 2.092268943786621, "memory(GiB)": 72.85, "step": 91855, "token_acc": 0.5396825396825397, "train_speed(iter/s)": 0.672817 }, { "epoch": 3.9355640289619123, "grad_norm": 5.877446174621582, "learning_rate": 1.0801898475729033e-05, "loss": 2.240169143676758, "memory(GiB)": 72.85, "step": 91860, "token_acc": 0.5551839464882943, "train_speed(iter/s)": 0.672825 }, { "epoch": 3.9357782442911615, "grad_norm": 5.277438163757324, "learning_rate": 1.079772092863774e-05, "loss": 2.397245407104492, "memory(GiB)": 72.85, "step": 91865, "token_acc": 0.5255972696245734, "train_speed(iter/s)": 0.67283 }, { "epoch": 3.9359924596204103, "grad_norm": 6.412086009979248, "learning_rate": 1.0793544091732499e-05, "loss": 1.9525634765625, "memory(GiB)": 72.85, "step": 91870, "token_acc": 0.5647058823529412, "train_speed(iter/s)": 0.67283 }, { "epoch": 3.936206674949659, "grad_norm": 5.8555989265441895, "learning_rate": 1.0789367965088976e-05, "loss": 2.1434700012207033, "memory(GiB)": 72.85, "step": 91875, "token_acc": 0.5247524752475248, "train_speed(iter/s)": 0.672835 }, { "epoch": 3.9364208902789084, "grad_norm": 6.286477088928223, "learning_rate": 1.0785192548782846e-05, "loss": 2.128113555908203, "memory(GiB)": 72.85, "step": 91880, "token_acc": 0.56, "train_speed(iter/s)": 0.67284 }, { "epoch": 3.936635105608157, "grad_norm": 4.640005588531494, "learning_rate": 1.078101784288974e-05, "loss": 1.9811250686645507, "memory(GiB)": 72.85, "step": 91885, "token_acc": 0.5403225806451613, "train_speed(iter/s)": 0.672839 }, { "epoch": 3.936849320937406, "grad_norm": 4.7125630378723145, "learning_rate": 1.077684384748529e-05, "loss": 1.9662389755249023, "memory(GiB)": 72.85, "step": 91890, "token_acc": 0.524904214559387, "train_speed(iter/s)": 0.672843 }, { "epoch": 3.9370635362666553, "grad_norm": 7.738040924072266, "learning_rate": 1.0772670562645104e-05, "loss": 1.9707015991210937, "memory(GiB)": 72.85, "step": 91895, "token_acc": 0.5617529880478087, "train_speed(iter/s)": 0.672842 }, { "epoch": 3.937277751595904, "grad_norm": 6.67453145980835, "learning_rate": 1.0768497988444776e-05, "loss": 2.2367341995239256, "memory(GiB)": 72.85, "step": 91900, "token_acc": 0.5272727272727272, "train_speed(iter/s)": 0.672848 }, { "epoch": 3.937491966925153, "grad_norm": 5.44437837600708, "learning_rate": 1.0764326124959922e-05, "loss": 2.3850215911865233, "memory(GiB)": 72.85, "step": 91905, "token_acc": 0.4804270462633452, "train_speed(iter/s)": 0.672853 }, { "epoch": 3.937706182254402, "grad_norm": 8.058561325073242, "learning_rate": 1.0760154972266106e-05, "loss": 2.124127960205078, "memory(GiB)": 72.85, "step": 91910, "token_acc": 0.5017921146953405, "train_speed(iter/s)": 0.672858 }, { "epoch": 3.937920397583651, "grad_norm": 5.118844985961914, "learning_rate": 1.0755984530438884e-05, "loss": 2.4803770065307615, "memory(GiB)": 72.85, "step": 91915, "token_acc": 0.47653429602888087, "train_speed(iter/s)": 0.672857 }, { "epoch": 3.9381346129129, "grad_norm": 5.972157001495361, "learning_rate": 1.0751814799553828e-05, "loss": 1.9955171585083007, "memory(GiB)": 72.85, "step": 91920, "token_acc": 0.521875, "train_speed(iter/s)": 0.67285 }, { "epoch": 3.938348828242149, "grad_norm": 4.559250831604004, "learning_rate": 1.074764577968646e-05, "loss": 2.2103059768676756, "memory(GiB)": 72.85, "step": 91925, "token_acc": 0.5130111524163569, "train_speed(iter/s)": 0.67285 }, { "epoch": 3.938563043571398, "grad_norm": 4.374612808227539, "learning_rate": 1.07434774709123e-05, "loss": 2.106253242492676, "memory(GiB)": 72.85, "step": 91930, "token_acc": 0.5476190476190477, "train_speed(iter/s)": 0.672845 }, { "epoch": 3.9387772589006467, "grad_norm": 6.9150238037109375, "learning_rate": 1.0739309873306885e-05, "loss": 2.2679864883422853, "memory(GiB)": 72.85, "step": 91935, "token_acc": 0.49206349206349204, "train_speed(iter/s)": 0.672846 }, { "epoch": 3.938991474229896, "grad_norm": 5.713655948638916, "learning_rate": 1.07351429869457e-05, "loss": 1.9297588348388672, "memory(GiB)": 72.85, "step": 91940, "token_acc": 0.5691699604743083, "train_speed(iter/s)": 0.672845 }, { "epoch": 3.9392056895591447, "grad_norm": 6.327988147735596, "learning_rate": 1.0730976811904237e-05, "loss": 1.9461435317993163, "memory(GiB)": 72.85, "step": 91945, "token_acc": 0.5714285714285714, "train_speed(iter/s)": 0.672852 }, { "epoch": 3.9394199048883936, "grad_norm": 9.245251655578613, "learning_rate": 1.072681134825796e-05, "loss": 2.2293684005737306, "memory(GiB)": 72.85, "step": 91950, "token_acc": 0.5104602510460251, "train_speed(iter/s)": 0.672847 }, { "epoch": 3.939634120217643, "grad_norm": 5.0099897384643555, "learning_rate": 1.0722646596082342e-05, "loss": 1.9018272399902343, "memory(GiB)": 72.85, "step": 91955, "token_acc": 0.5683453237410072, "train_speed(iter/s)": 0.67285 }, { "epoch": 3.9398483355468916, "grad_norm": 6.9654951095581055, "learning_rate": 1.0718482555452807e-05, "loss": 2.1595170974731444, "memory(GiB)": 72.85, "step": 91960, "token_acc": 0.5419847328244275, "train_speed(iter/s)": 0.672852 }, { "epoch": 3.9400625508761404, "grad_norm": 4.900876522064209, "learning_rate": 1.071431922644483e-05, "loss": 2.024050712585449, "memory(GiB)": 72.85, "step": 91965, "token_acc": 0.5504885993485342, "train_speed(iter/s)": 0.67285 }, { "epoch": 3.9402767662053897, "grad_norm": 6.272977352142334, "learning_rate": 1.071015660913381e-05, "loss": 2.127863311767578, "memory(GiB)": 72.85, "step": 91970, "token_acc": 0.5342857142857143, "train_speed(iter/s)": 0.672851 }, { "epoch": 3.9404909815346385, "grad_norm": 4.923856735229492, "learning_rate": 1.0705994703595157e-05, "loss": 2.0125682830810545, "memory(GiB)": 72.85, "step": 91975, "token_acc": 0.5574324324324325, "train_speed(iter/s)": 0.672848 }, { "epoch": 3.9407051968638873, "grad_norm": 6.613358974456787, "learning_rate": 1.0701833509904275e-05, "loss": 2.2175100326538084, "memory(GiB)": 72.85, "step": 91980, "token_acc": 0.54421768707483, "train_speed(iter/s)": 0.672854 }, { "epoch": 3.9409194121931366, "grad_norm": 5.2079620361328125, "learning_rate": 1.0697673028136524e-05, "loss": 2.104279708862305, "memory(GiB)": 72.85, "step": 91985, "token_acc": 0.5484848484848485, "train_speed(iter/s)": 0.672855 }, { "epoch": 3.9411336275223854, "grad_norm": 5.947059631347656, "learning_rate": 1.06935132583673e-05, "loss": 1.811256217956543, "memory(GiB)": 72.85, "step": 91990, "token_acc": 0.5811320754716981, "train_speed(iter/s)": 0.672856 }, { "epoch": 3.941347842851634, "grad_norm": 5.680670738220215, "learning_rate": 1.0689354200671969e-05, "loss": 2.0072288513183594, "memory(GiB)": 72.85, "step": 91995, "token_acc": 0.5541795665634675, "train_speed(iter/s)": 0.672851 }, { "epoch": 3.9415620581808835, "grad_norm": 5.479496479034424, "learning_rate": 1.0685195855125863e-05, "loss": 2.19075984954834, "memory(GiB)": 72.85, "step": 92000, "token_acc": 0.564625850340136, "train_speed(iter/s)": 0.672857 }, { "epoch": 3.9415620581808835, "eval_loss": 1.8249893188476562, "eval_runtime": 14.5244, "eval_samples_per_second": 6.885, "eval_steps_per_second": 6.885, "eval_token_acc": 0.5206489675516224, "step": 92000 } ], "logging_steps": 5, "max_steps": 116705, "num_input_tokens_seen": 0, "num_train_epochs": 5, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 4.4649466223799665e+18, "train_batch_size": 8, "trial_name": null, "trial_params": null }