hungry_carson / checkpoint-2008 /trainer_state.json
tomekkorbak's picture
Training in progress, step 2008
448d9dc
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.8501270110076207,
"global_step": 2008,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 4.166666666666667e-06,
"loss": 3.0643,
"theoretical_loss": 3.321567680436603,
"tokens_seen": 2990538752
},
{
"epoch": 0.0,
"learning_rate": 8.333333333333334e-06,
"loss": 3.0798,
"theoretical_loss": 3.3215564803546,
"tokens_seen": 2990669824
},
{
"epoch": 0.0,
"learning_rate": 1.25e-05,
"loss": 2.9318,
"theoretical_loss": 3.321545280900887,
"tokens_seen": 2990800896
},
{
"epoch": 0.0,
"learning_rate": 1.6666666666666667e-05,
"loss": 2.8098,
"theoretical_loss": 3.3215340820754022,
"tokens_seen": 2990931968
},
{
"epoch": 0.0,
"learning_rate": 2.0833333333333336e-05,
"loss": 2.7055,
"theoretical_loss": 3.3215228838780817,
"tokens_seen": 2991063040
},
{
"epoch": 0.0,
"learning_rate": 2.5e-05,
"loss": 2.9762,
"theoretical_loss": 3.3215116863088636,
"tokens_seen": 2991194112
},
{
"epoch": 0.0,
"learning_rate": 2.916666666666667e-05,
"loss": 2.8724,
"theoretical_loss": 3.3215004893676854,
"tokens_seen": 2991325184
},
{
"epoch": 0.0,
"learning_rate": 3.3333333333333335e-05,
"loss": 3.0452,
"theoretical_loss": 3.321489293054483,
"tokens_seen": 2991456256
},
{
"epoch": 0.0,
"learning_rate": 3.7500000000000003e-05,
"loss": 2.8676,
"theoretical_loss": 3.321478097369195,
"tokens_seen": 2991587328
},
{
"epoch": 0.0,
"learning_rate": 4.166666666666667e-05,
"loss": 2.8343,
"theoretical_loss": 3.321466902311758,
"tokens_seen": 2991718400
},
{
"epoch": 0.0,
"learning_rate": 4.5833333333333334e-05,
"loss": 2.743,
"theoretical_loss": 3.3214557078821096,
"tokens_seen": 2991849472
},
{
"epoch": 0.01,
"learning_rate": 5e-05,
"loss": 2.5867,
"theoretical_loss": 3.321444514080187,
"tokens_seen": 2991980544
},
{
"epoch": 0.01,
"objective/train/docs_used": 1640856,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.7297048568725586,
"objective/train/theoretical_loss": 3.321438917414603,
"objective/train/tokens_used": 22097376,
"theoretical_loss": 3.321438917414603,
"tokens_seen": 2992046080
},
{
"epoch": 0.01,
"learning_rate": 5.4166666666666664e-05,
"loss": 2.7262,
"theoretical_loss": 3.321433320905927,
"tokens_seen": 2992111616
},
{
"epoch": 0.01,
"learning_rate": 5.833333333333334e-05,
"loss": 2.6517,
"theoretical_loss": 3.3214221283592678,
"tokens_seen": 2992242688
},
{
"epoch": 0.01,
"learning_rate": 6.25e-05,
"loss": 2.9399,
"theoretical_loss": 3.321410936440146,
"tokens_seen": 2992373760
},
{
"epoch": 0.01,
"learning_rate": 6.666666666666667e-05,
"loss": 2.7939,
"theoretical_loss": 3.3213997451485,
"tokens_seen": 2992504832
},
{
"epoch": 0.01,
"learning_rate": 7.083333333333334e-05,
"loss": 2.5715,
"theoretical_loss": 3.3213885544842654,
"tokens_seen": 2992635904
},
{
"epoch": 0.01,
"learning_rate": 7.500000000000001e-05,
"loss": 2.6047,
"theoretical_loss": 3.321377364447381,
"tokens_seen": 2992766976
},
{
"epoch": 0.01,
"learning_rate": 7.916666666666666e-05,
"loss": 2.6736,
"theoretical_loss": 3.3213661750377836,
"tokens_seen": 2992898048
},
{
"epoch": 0.01,
"learning_rate": 8.333333333333334e-05,
"loss": 2.6853,
"theoretical_loss": 3.3213549862554106,
"tokens_seen": 2993029120
},
{
"epoch": 0.01,
"learning_rate": 8.75e-05,
"loss": 2.3665,
"theoretical_loss": 3.3213437981001994,
"tokens_seen": 2993160192
},
{
"epoch": 0.01,
"learning_rate": 9.166666666666667e-05,
"loss": 2.618,
"theoretical_loss": 3.3213326105720875,
"tokens_seen": 2993291264
},
{
"epoch": 0.01,
"learning_rate": 9.583333333333334e-05,
"loss": 2.6614,
"theoretical_loss": 3.3213214236710122,
"tokens_seen": 2993422336
},
{
"epoch": 0.01,
"learning_rate": 0.0001,
"loss": 2.6163,
"theoretical_loss": 3.321310237396911,
"tokens_seen": 2993553408
},
{
"epoch": 0.01,
"objective/train/docs_used": 1641461,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.5642035007476807,
"objective/train/theoretical_loss": 3.3212990517497207,
"objective/train/tokens_used": 23735776,
"theoretical_loss": 3.3212990517497207,
"tokens_seen": 2993684480
},
{
"epoch": 0.01,
"learning_rate": 9.995722840034217e-05,
"loss": 2.7218,
"theoretical_loss": 3.3212990517497207,
"tokens_seen": 2993684480
},
{
"epoch": 0.01,
"learning_rate": 9.991445680068435e-05,
"loss": 2.5137,
"theoretical_loss": 3.3212878667293797,
"tokens_seen": 2993815552
},
{
"epoch": 0.01,
"learning_rate": 9.987168520102653e-05,
"loss": 2.5283,
"theoretical_loss": 3.321276682335825,
"tokens_seen": 2993946624
},
{
"epoch": 0.01,
"learning_rate": 9.98289136013687e-05,
"loss": 2.6367,
"theoretical_loss": 3.3212654985689936,
"tokens_seen": 2994077696
},
{
"epoch": 0.01,
"learning_rate": 9.978614200171087e-05,
"loss": 2.5822,
"theoretical_loss": 3.3212543154288237,
"tokens_seen": 2994208768
},
{
"epoch": 0.01,
"learning_rate": 9.974337040205303e-05,
"loss": 2.6296,
"theoretical_loss": 3.3212431329152525,
"tokens_seen": 2994339840
},
{
"epoch": 0.01,
"learning_rate": 9.970059880239521e-05,
"loss": 2.5596,
"theoretical_loss": 3.321231951028217,
"tokens_seen": 2994470912
},
{
"epoch": 0.01,
"learning_rate": 9.965782720273739e-05,
"loss": 2.5663,
"theoretical_loss": 3.3212207697676552,
"tokens_seen": 2994601984
},
{
"epoch": 0.01,
"learning_rate": 9.961505560307956e-05,
"loss": 2.5138,
"theoretical_loss": 3.3212095891335043,
"tokens_seen": 2994733056
},
{
"epoch": 0.01,
"learning_rate": 9.957228400342173e-05,
"loss": 2.5938,
"theoretical_loss": 3.321198409125702,
"tokens_seen": 2994864128
},
{
"epoch": 0.01,
"learning_rate": 9.95295124037639e-05,
"loss": 2.4583,
"theoretical_loss": 3.321187229744186,
"tokens_seen": 2994995200
},
{
"epoch": 0.02,
"learning_rate": 9.948674080410608e-05,
"loss": 2.5265,
"theoretical_loss": 3.321176050988893,
"tokens_seen": 2995126272
},
{
"epoch": 0.02,
"learning_rate": 9.944396920444825e-05,
"loss": 2.7376,
"theoretical_loss": 3.3211648728597614,
"tokens_seen": 2995257344
},
{
"epoch": 0.02,
"objective/train/docs_used": 1642666,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.7178521156311035,
"objective/train/theoretical_loss": 3.3211592840299864,
"objective/train/tokens_used": 25374176,
"theoretical_loss": 3.3211592840299864,
"tokens_seen": 2995322880
},
{
"epoch": 0.02,
"learning_rate": 9.940119760479042e-05,
"loss": 2.4464,
"theoretical_loss": 3.3211536953567284,
"tokens_seen": 2995388416
},
{
"epoch": 0.02,
"learning_rate": 9.93584260051326e-05,
"loss": 2.657,
"theoretical_loss": 3.321142518479731,
"tokens_seen": 2995519488
},
{
"epoch": 0.02,
"learning_rate": 9.931565440547476e-05,
"loss": 2.5525,
"theoretical_loss": 3.321131342228708,
"tokens_seen": 2995650560
},
{
"epoch": 0.02,
"learning_rate": 9.927288280581694e-05,
"loss": 2.6099,
"theoretical_loss": 3.321120166603596,
"tokens_seen": 2995781632
},
{
"epoch": 0.02,
"learning_rate": 9.923011120615912e-05,
"loss": 2.4712,
"theoretical_loss": 3.3211089916043326,
"tokens_seen": 2995912704
},
{
"epoch": 0.02,
"learning_rate": 9.918733960650128e-05,
"loss": 2.514,
"theoretical_loss": 3.3210978172308554,
"tokens_seen": 2996043776
},
{
"epoch": 0.02,
"learning_rate": 9.914456800684346e-05,
"loss": 2.4037,
"theoretical_loss": 3.3210866434831026,
"tokens_seen": 2996174848
},
{
"epoch": 0.02,
"learning_rate": 9.910179640718563e-05,
"loss": 2.4715,
"theoretical_loss": 3.3210754703610106,
"tokens_seen": 2996305920
},
{
"epoch": 0.02,
"learning_rate": 9.90590248075278e-05,
"loss": 2.4805,
"theoretical_loss": 3.321064297864518,
"tokens_seen": 2996436992
},
{
"epoch": 0.02,
"learning_rate": 9.901625320786998e-05,
"loss": 2.4755,
"theoretical_loss": 3.3210531259935627,
"tokens_seen": 2996568064
},
{
"epoch": 0.02,
"learning_rate": 9.897348160821215e-05,
"loss": 2.5174,
"theoretical_loss": 3.321041954748081,
"tokens_seen": 2996699136
},
{
"epoch": 0.02,
"learning_rate": 9.893071000855433e-05,
"loss": 2.5996,
"theoretical_loss": 3.321030784128012,
"tokens_seen": 2996830208
},
{
"epoch": 0.02,
"objective/train/docs_used": 1643300,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.879695415496826,
"objective/train/theoretical_loss": 3.321019614133292,
"objective/train/tokens_used": 27012576,
"theoretical_loss": 3.321019614133292,
"tokens_seen": 2996961280
},
{
"epoch": 0.02,
"learning_rate": 9.888793840889649e-05,
"loss": 2.5696,
"theoretical_loss": 3.321019614133292,
"tokens_seen": 2996961280
},
{
"epoch": 0.02,
"learning_rate": 9.884516680923867e-05,
"loss": 2.607,
"theoretical_loss": 3.3210084447638595,
"tokens_seen": 2997092352
},
{
"epoch": 0.02,
"learning_rate": 9.880239520958085e-05,
"loss": 2.4604,
"theoretical_loss": 3.320997276019652,
"tokens_seen": 2997223424
},
{
"epoch": 0.02,
"learning_rate": 9.875962360992301e-05,
"loss": 2.4603,
"theoretical_loss": 3.3209861079006067,
"tokens_seen": 2997354496
},
{
"epoch": 0.02,
"learning_rate": 9.871685201026519e-05,
"loss": 2.4374,
"theoretical_loss": 3.320974940406662,
"tokens_seen": 2997485568
},
{
"epoch": 0.02,
"learning_rate": 9.867408041060736e-05,
"loss": 2.5285,
"theoretical_loss": 3.320963773537755,
"tokens_seen": 2997616640
},
{
"epoch": 0.02,
"learning_rate": 9.863130881094953e-05,
"loss": 2.5895,
"theoretical_loss": 3.320952607293824,
"tokens_seen": 2997747712
},
{
"epoch": 0.02,
"learning_rate": 9.858853721129171e-05,
"loss": 2.5835,
"theoretical_loss": 3.320941441674806,
"tokens_seen": 2997878784
},
{
"epoch": 0.02,
"learning_rate": 9.854576561163388e-05,
"loss": 2.7362,
"theoretical_loss": 3.320930276680639,
"tokens_seen": 2998009856
},
{
"epoch": 0.02,
"learning_rate": 9.850299401197606e-05,
"loss": 2.5553,
"theoretical_loss": 3.3209191123112607,
"tokens_seen": 2998140928
},
{
"epoch": 0.03,
"learning_rate": 9.846022241231822e-05,
"loss": 2.6403,
"theoretical_loss": 3.320907948566609,
"tokens_seen": 2998272000
},
{
"epoch": 0.03,
"learning_rate": 9.84174508126604e-05,
"loss": 2.5129,
"theoretical_loss": 3.3208967854466214,
"tokens_seen": 2998403072
},
{
"epoch": 0.03,
"learning_rate": 9.837467921300258e-05,
"loss": 2.4238,
"theoretical_loss": 3.3208856229512356,
"tokens_seen": 2998534144
},
{
"epoch": 0.03,
"objective/train/docs_used": 1644380,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.4041128158569336,
"objective/train/theoretical_loss": 3.320880041937749,
"objective/train/tokens_used": 28650976,
"theoretical_loss": 3.320880041937749,
"tokens_seen": 2998599680
},
{
"epoch": 0.03,
"learning_rate": 9.833190761334474e-05,
"loss": 2.3705,
"theoretical_loss": 3.3208744610803898,
"tokens_seen": 2998665216
},
{
"epoch": 0.03,
"learning_rate": 9.828913601368692e-05,
"loss": 2.4602,
"theoretical_loss": 3.320863299834021,
"tokens_seen": 2998796288
},
{
"epoch": 0.03,
"learning_rate": 9.824636441402908e-05,
"loss": 2.5492,
"theoretical_loss": 3.320852139212068,
"tokens_seen": 2998927360
},
{
"epoch": 0.03,
"learning_rate": 9.820359281437126e-05,
"loss": 2.426,
"theoretical_loss": 3.3208409792144677,
"tokens_seen": 2999058432
},
{
"epoch": 0.03,
"learning_rate": 9.816082121471344e-05,
"loss": 2.4079,
"theoretical_loss": 3.320829819841158,
"tokens_seen": 2999189504
},
{
"epoch": 0.03,
"learning_rate": 9.81180496150556e-05,
"loss": 2.5189,
"theoretical_loss": 3.320818661092077,
"tokens_seen": 2999320576
},
{
"epoch": 0.03,
"learning_rate": 9.807527801539777e-05,
"loss": 2.4085,
"theoretical_loss": 3.3208075029671624,
"tokens_seen": 2999451648
},
{
"epoch": 0.03,
"learning_rate": 9.803250641573995e-05,
"loss": 2.4031,
"theoretical_loss": 3.320796345466352,
"tokens_seen": 2999582720
},
{
"epoch": 0.03,
"learning_rate": 9.798973481608213e-05,
"loss": 2.5067,
"theoretical_loss": 3.320785188589584,
"tokens_seen": 2999713792
},
{
"epoch": 0.03,
"learning_rate": 9.79469632164243e-05,
"loss": 2.4357,
"theoretical_loss": 3.3207740323367956,
"tokens_seen": 2999844864
},
{
"epoch": 0.03,
"learning_rate": 9.790419161676647e-05,
"loss": 2.4929,
"theoretical_loss": 3.3207628767079242,
"tokens_seen": 2999975936
},
{
"epoch": 0.03,
"learning_rate": 9.786142001710863e-05,
"loss": 2.4052,
"theoretical_loss": 3.3207517217029094,
"tokens_seen": 3000107008
},
{
"epoch": 0.03,
"objective/train/docs_used": 1645056,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.3271520137786865,
"objective/train/theoretical_loss": 3.3207405673216877,
"objective/train/tokens_used": 30289376,
"theoretical_loss": 3.3207405673216877,
"tokens_seen": 3000238080
},
{
"epoch": 0.03,
"learning_rate": 9.781864841745081e-05,
"loss": 2.5201,
"theoretical_loss": 3.3207405673216877,
"tokens_seen": 3000238080
},
{
"epoch": 0.03,
"learning_rate": 9.777587681779299e-05,
"loss": 2.5431,
"theoretical_loss": 3.320729413564197,
"tokens_seen": 3000369152
},
{
"epoch": 0.03,
"learning_rate": 9.773310521813517e-05,
"loss": 2.5359,
"theoretical_loss": 3.3207182604303753,
"tokens_seen": 3000500224
},
{
"epoch": 0.03,
"learning_rate": 9.769033361847733e-05,
"loss": 2.1929,
"theoretical_loss": 3.320707107920161,
"tokens_seen": 3000631296
},
{
"epoch": 0.03,
"learning_rate": 9.76475620188195e-05,
"loss": 2.5196,
"theoretical_loss": 3.3206959560334917,
"tokens_seen": 3000762368
},
{
"epoch": 0.03,
"learning_rate": 9.760479041916169e-05,
"loss": 2.3645,
"theoretical_loss": 3.320684804770305,
"tokens_seen": 3000893440
},
{
"epoch": 0.03,
"learning_rate": 9.756201881950386e-05,
"loss": 2.5329,
"theoretical_loss": 3.3206736541305393,
"tokens_seen": 3001024512
},
{
"epoch": 0.03,
"learning_rate": 9.751924721984602e-05,
"loss": 2.533,
"theoretical_loss": 3.3206625041141318,
"tokens_seen": 3001155584
},
{
"epoch": 0.04,
"learning_rate": 9.74764756201882e-05,
"loss": 2.4702,
"theoretical_loss": 3.3206513547210212,
"tokens_seen": 3001286656
},
{
"epoch": 0.04,
"learning_rate": 9.743370402053036e-05,
"loss": 2.6255,
"theoretical_loss": 3.320640205951145,
"tokens_seen": 3001417728
},
{
"epoch": 0.04,
"learning_rate": 9.739093242087256e-05,
"loss": 2.5139,
"theoretical_loss": 3.3206290578044415,
"tokens_seen": 3001548800
},
{
"epoch": 0.04,
"learning_rate": 9.734816082121472e-05,
"loss": 2.3796,
"theoretical_loss": 3.3206179102808484,
"tokens_seen": 3001679872
},
{
"epoch": 0.04,
"learning_rate": 9.730538922155689e-05,
"loss": 2.4997,
"theoretical_loss": 3.3206067633803036,
"tokens_seen": 3001810944
},
{
"epoch": 0.04,
"objective/train/docs_used": 1646327,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.398160219192505,
"objective/train/theoretical_loss": 3.320601190163655,
"objective/train/tokens_used": 31927776,
"theoretical_loss": 3.320601190163655,
"tokens_seen": 3001876480
},
{
"epoch": 0.04,
"learning_rate": 9.726261762189906e-05,
"loss": 2.3607,
"theoretical_loss": 3.320595617102745,
"tokens_seen": 3001942016
},
{
"epoch": 0.04,
"learning_rate": 9.721984602224123e-05,
"loss": 2.4803,
"theoretical_loss": 3.320584471448111,
"tokens_seen": 3002073088
},
{
"epoch": 0.04,
"learning_rate": 9.717707442258342e-05,
"loss": 2.348,
"theoretical_loss": 3.3205733264163393,
"tokens_seen": 3002204160
},
{
"epoch": 0.04,
"learning_rate": 9.713430282292558e-05,
"loss": 2.2828,
"theoretical_loss": 3.320562182007368,
"tokens_seen": 3002335232
},
{
"epoch": 0.04,
"learning_rate": 9.709153122326775e-05,
"loss": 2.4615,
"theoretical_loss": 3.320551038221135,
"tokens_seen": 3002466304
},
{
"epoch": 0.04,
"learning_rate": 9.704875962360993e-05,
"loss": 2.3988,
"theoretical_loss": 3.3205398950575784,
"tokens_seen": 3002597376
},
{
"epoch": 0.04,
"learning_rate": 9.700598802395209e-05,
"loss": 2.5582,
"theoretical_loss": 3.320528752516636,
"tokens_seen": 3002728448
},
{
"epoch": 0.04,
"learning_rate": 9.696321642429428e-05,
"loss": 2.3266,
"theoretical_loss": 3.3205176105982463,
"tokens_seen": 3002859520
},
{
"epoch": 0.04,
"learning_rate": 9.692044482463645e-05,
"loss": 2.5922,
"theoretical_loss": 3.320506469302347,
"tokens_seen": 3002990592
},
{
"epoch": 0.04,
"learning_rate": 9.687767322497861e-05,
"loss": 2.4959,
"theoretical_loss": 3.3204953286288763,
"tokens_seen": 3003121664
},
{
"epoch": 0.04,
"learning_rate": 9.683490162532079e-05,
"loss": 2.6068,
"theoretical_loss": 3.3204841885777725,
"tokens_seen": 3003252736
},
{
"epoch": 0.04,
"learning_rate": 9.679213002566297e-05,
"loss": 2.3996,
"theoretical_loss": 3.3204730491489727,
"tokens_seen": 3003383808
},
{
"epoch": 0.04,
"objective/train/docs_used": 1647543,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.3885486125946045,
"objective/train/theoretical_loss": 3.3204619103424164,
"objective/train/tokens_used": 33566176,
"theoretical_loss": 3.3204619103424164,
"tokens_seen": 3003514880
},
{
"epoch": 0.04,
"learning_rate": 9.674935842600514e-05,
"loss": 2.4692,
"theoretical_loss": 3.3204619103424164,
"tokens_seen": 3003514880
},
{
"epoch": 0.04,
"learning_rate": 9.670658682634731e-05,
"loss": 2.35,
"theoretical_loss": 3.3204507721580403,
"tokens_seen": 3003645952
},
{
"epoch": 0.04,
"learning_rate": 9.666381522668948e-05,
"loss": 2.3168,
"theoretical_loss": 3.3204396345957834,
"tokens_seen": 3003777024
},
{
"epoch": 0.04,
"learning_rate": 9.662104362703166e-05,
"loss": 2.4671,
"theoretical_loss": 3.320428497655584,
"tokens_seen": 3003908096
},
{
"epoch": 0.04,
"learning_rate": 9.657827202737383e-05,
"loss": 2.552,
"theoretical_loss": 3.320417361337379,
"tokens_seen": 3004039168
},
{
"epoch": 0.04,
"learning_rate": 9.6535500427716e-05,
"loss": 2.3902,
"theoretical_loss": 3.3204062256411078,
"tokens_seen": 3004170240
},
{
"epoch": 0.04,
"learning_rate": 9.649272882805818e-05,
"loss": 2.491,
"theoretical_loss": 3.320395090566708,
"tokens_seen": 3004301312
},
{
"epoch": 0.05,
"learning_rate": 9.644995722840034e-05,
"loss": 2.4628,
"theoretical_loss": 3.3203839561141173,
"tokens_seen": 3004432384
},
{
"epoch": 0.05,
"learning_rate": 9.640718562874252e-05,
"loss": 2.4858,
"theoretical_loss": 3.320372822283275,
"tokens_seen": 3004563456
},
{
"epoch": 0.05,
"learning_rate": 9.63644140290847e-05,
"loss": 2.5038,
"theoretical_loss": 3.3203616890741183,
"tokens_seen": 3004694528
},
{
"epoch": 0.05,
"learning_rate": 9.632164242942686e-05,
"loss": 2.2651,
"theoretical_loss": 3.3203505564865856,
"tokens_seen": 3004825600
},
{
"epoch": 0.05,
"learning_rate": 9.627887082976904e-05,
"loss": 2.4591,
"theoretical_loss": 3.3203394245206153,
"tokens_seen": 3004956672
},
{
"epoch": 0.05,
"learning_rate": 9.623609923011121e-05,
"loss": 2.4003,
"theoretical_loss": 3.320328293176145,
"tokens_seen": 3005087744
},
{
"epoch": 0.05,
"objective/train/docs_used": 1648109,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.3964030742645264,
"objective/train/theoretical_loss": 3.3203227277369534,
"objective/train/tokens_used": 35204576,
"theoretical_loss": 3.3203227277369534,
"tokens_seen": 3005153280
},
{
"epoch": 0.05,
"learning_rate": 9.619332763045337e-05,
"loss": 2.3952,
"theoretical_loss": 3.320317162453114,
"tokens_seen": 3005218816
},
{
"epoch": 0.05,
"learning_rate": 9.615055603079556e-05,
"loss": 2.4367,
"theoretical_loss": 3.3203060323514593,
"tokens_seen": 3005349888
},
{
"epoch": 0.05,
"learning_rate": 9.610778443113773e-05,
"loss": 2.4468,
"theoretical_loss": 3.3202949028711197,
"tokens_seen": 3005480960
},
{
"epoch": 0.05,
"learning_rate": 9.60650128314799e-05,
"loss": 2.3551,
"theoretical_loss": 3.3202837740120335,
"tokens_seen": 3005612032
},
{
"epoch": 0.05,
"learning_rate": 9.602224123182207e-05,
"loss": 2.3886,
"theoretical_loss": 3.3202726457741387,
"tokens_seen": 3005743104
},
{
"epoch": 0.05,
"learning_rate": 9.597946963216424e-05,
"loss": 2.4953,
"theoretical_loss": 3.320261518157374,
"tokens_seen": 3005874176
},
{
"epoch": 0.05,
"learning_rate": 9.593669803250643e-05,
"loss": 2.3074,
"theoretical_loss": 3.3202503911616765,
"tokens_seen": 3006005248
},
{
"epoch": 0.05,
"learning_rate": 9.589392643284859e-05,
"loss": 2.4135,
"theoretical_loss": 3.320239264786986,
"tokens_seen": 3006136320
},
{
"epoch": 0.05,
"learning_rate": 9.585115483319077e-05,
"loss": 2.431,
"theoretical_loss": 3.3202281390332393,
"tokens_seen": 3006267392
},
{
"epoch": 0.05,
"learning_rate": 9.580838323353294e-05,
"loss": 2.3277,
"theoretical_loss": 3.320217013900376,
"tokens_seen": 3006398464
},
{
"epoch": 0.05,
"learning_rate": 9.576561163387511e-05,
"loss": 2.5083,
"theoretical_loss": 3.3202058893883333,
"tokens_seen": 3006529536
},
{
"epoch": 0.05,
"learning_rate": 9.572284003421729e-05,
"loss": 2.4582,
"theoretical_loss": 3.3201947654970505,
"tokens_seen": 3006660608
},
{
"epoch": 0.05,
"objective/train/docs_used": 1649212,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.5212416648864746,
"objective/train/theoretical_loss": 3.320183642226465,
"objective/train/tokens_used": 36842976,
"theoretical_loss": 3.320183642226465,
"tokens_seen": 3006791680
},
{
"epoch": 0.05,
"learning_rate": 9.568006843455946e-05,
"loss": 2.3596,
"theoretical_loss": 3.320183642226465,
"tokens_seen": 3006791680
},
{
"epoch": 0.05,
"learning_rate": 9.563729683490164e-05,
"loss": 2.3923,
"theoretical_loss": 3.3201725195765155,
"tokens_seen": 3006922752
},
{
"epoch": 0.05,
"learning_rate": 9.55945252352438e-05,
"loss": 2.3563,
"theoretical_loss": 3.3201613975471402,
"tokens_seen": 3007053824
},
{
"epoch": 0.05,
"learning_rate": 9.555175363558598e-05,
"loss": 2.4285,
"theoretical_loss": 3.3201502761382775,
"tokens_seen": 3007184896
},
{
"epoch": 0.05,
"learning_rate": 9.550898203592816e-05,
"loss": 2.2928,
"theoretical_loss": 3.320139155349866,
"tokens_seen": 3007315968
},
{
"epoch": 0.06,
"learning_rate": 9.546621043627032e-05,
"loss": 2.4617,
"theoretical_loss": 3.3201280351818436,
"tokens_seen": 3007447040
},
{
"epoch": 0.06,
"learning_rate": 9.542343883661249e-05,
"loss": 2.4107,
"theoretical_loss": 3.320116915634149,
"tokens_seen": 3007578112
},
{
"epoch": 0.06,
"learning_rate": 9.538066723695466e-05,
"loss": 2.5715,
"theoretical_loss": 3.3201057967067205,
"tokens_seen": 3007709184
},
{
"epoch": 0.06,
"learning_rate": 9.533789563729684e-05,
"loss": 2.4217,
"theoretical_loss": 3.3200946783994962,
"tokens_seen": 3007840256
},
{
"epoch": 0.06,
"learning_rate": 9.529512403763902e-05,
"loss": 2.4315,
"theoretical_loss": 3.3200835607124146,
"tokens_seen": 3007971328
},
{
"epoch": 0.06,
"learning_rate": 9.525235243798119e-05,
"loss": 2.4307,
"theoretical_loss": 3.3200724436454143,
"tokens_seen": 3008102400
},
{
"epoch": 0.06,
"learning_rate": 9.520958083832335e-05,
"loss": 2.5032,
"theoretical_loss": 3.3200613271984336,
"tokens_seen": 3008233472
},
{
"epoch": 0.06,
"learning_rate": 9.516680923866553e-05,
"loss": 2.4379,
"theoretical_loss": 3.3200502113714108,
"tokens_seen": 3008364544
},
{
"epoch": 0.06,
"objective/train/docs_used": 1649940,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.399945020675659,
"objective/train/theoretical_loss": 3.3200446536903643,
"objective/train/tokens_used": 38481376,
"theoretical_loss": 3.3200446536903643,
"tokens_seen": 3008430080
},
{
"epoch": 0.06,
"learning_rate": 9.512403763900771e-05,
"loss": 2.5107,
"theoretical_loss": 3.3200390961642845,
"tokens_seen": 3008495616
},
{
"epoch": 0.06,
"learning_rate": 9.508126603934989e-05,
"loss": 2.4659,
"theoretical_loss": 3.3200279815769926,
"tokens_seen": 3008626688
},
{
"epoch": 0.06,
"learning_rate": 9.503849443969205e-05,
"loss": 2.4327,
"theoretical_loss": 3.3200168676094743,
"tokens_seen": 3008757760
},
{
"epoch": 0.06,
"learning_rate": 9.499572284003421e-05,
"loss": 2.2681,
"theoretical_loss": 3.320005754261668,
"tokens_seen": 3008888832
},
{
"epoch": 0.06,
"learning_rate": 9.49529512403764e-05,
"loss": 2.3802,
"theoretical_loss": 3.319994641533511,
"tokens_seen": 3009019904
},
{
"epoch": 0.06,
"learning_rate": 9.491017964071857e-05,
"loss": 2.5461,
"theoretical_loss": 3.319983529424943,
"tokens_seen": 3009150976
},
{
"epoch": 0.06,
"learning_rate": 9.486740804106075e-05,
"loss": 2.4021,
"theoretical_loss": 3.3199724179359027,
"tokens_seen": 3009282048
},
{
"epoch": 0.06,
"learning_rate": 9.482463644140291e-05,
"loss": 2.4038,
"theoretical_loss": 3.319961307066327,
"tokens_seen": 3009413120
},
{
"epoch": 0.06,
"learning_rate": 9.478186484174508e-05,
"loss": 2.4687,
"theoretical_loss": 3.3199501968161558,
"tokens_seen": 3009544192
},
{
"epoch": 0.06,
"learning_rate": 9.473909324208726e-05,
"loss": 2.4401,
"theoretical_loss": 3.319939087185327,
"tokens_seen": 3009675264
},
{
"epoch": 0.06,
"learning_rate": 9.469632164242944e-05,
"loss": 2.4778,
"theoretical_loss": 3.3199279781737796,
"tokens_seen": 3009806336
},
{
"epoch": 0.06,
"learning_rate": 9.46535500427716e-05,
"loss": 2.4166,
"theoretical_loss": 3.3199168697814514,
"tokens_seen": 3009937408
},
{
"epoch": 0.06,
"objective/train/docs_used": 1651249,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.412529230117798,
"objective/train/theoretical_loss": 3.3199057620082812,
"objective/train/tokens_used": 40119776,
"theoretical_loss": 3.3199057620082812,
"tokens_seen": 3010068480
},
{
"epoch": 0.06,
"learning_rate": 9.461077844311378e-05,
"loss": 2.3839,
"theoretical_loss": 3.3199057620082812,
"tokens_seen": 3010068480
},
{
"epoch": 0.06,
"learning_rate": 9.456800684345594e-05,
"loss": 2.5879,
"theoretical_loss": 3.319894654854208,
"tokens_seen": 3010199552
},
{
"epoch": 0.06,
"learning_rate": 9.452523524379812e-05,
"loss": 2.444,
"theoretical_loss": 3.3198835483191695,
"tokens_seen": 3010330624
},
{
"epoch": 0.06,
"learning_rate": 9.44824636441403e-05,
"loss": 2.2849,
"theoretical_loss": 3.319872442403105,
"tokens_seen": 3010461696
},
{
"epoch": 0.07,
"learning_rate": 9.443969204448247e-05,
"loss": 2.568,
"theoretical_loss": 3.3198613371059524,
"tokens_seen": 3010592768
},
{
"epoch": 0.07,
"learning_rate": 9.439692044482464e-05,
"loss": 2.4131,
"theoretical_loss": 3.319850232427651,
"tokens_seen": 3010723840
},
{
"epoch": 0.07,
"learning_rate": 9.435414884516681e-05,
"loss": 2.4222,
"theoretical_loss": 3.3198391283681383,
"tokens_seen": 3010854912
},
{
"epoch": 0.07,
"learning_rate": 9.431137724550899e-05,
"loss": 2.3993,
"theoretical_loss": 3.3198280249273546,
"tokens_seen": 3010985984
},
{
"epoch": 0.07,
"learning_rate": 9.426860564585116e-05,
"loss": 2.5294,
"theoretical_loss": 3.319816922105237,
"tokens_seen": 3011117056
},
{
"epoch": 0.07,
"learning_rate": 9.422583404619333e-05,
"loss": 2.4529,
"theoretical_loss": 3.319805819901724,
"tokens_seen": 3011248128
},
{
"epoch": 0.07,
"learning_rate": 9.418306244653551e-05,
"loss": 2.4924,
"theoretical_loss": 3.3197947183167553,
"tokens_seen": 3011379200
},
{
"epoch": 0.07,
"learning_rate": 9.414029084687767e-05,
"loss": 2.6054,
"theoretical_loss": 3.319783617350269,
"tokens_seen": 3011510272
},
{
"epoch": 0.07,
"learning_rate": 9.409751924721985e-05,
"loss": 2.388,
"theoretical_loss": 3.319772517002204,
"tokens_seen": 3011641344
},
{
"epoch": 0.07,
"objective/train/docs_used": 1651905,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.8164331912994385,
"objective/train/theoretical_loss": 3.31976696706006,
"objective/train/tokens_used": 41758176,
"theoretical_loss": 3.31976696706006,
"tokens_seen": 3011706880
},
{
"epoch": 0.07,
"learning_rate": 9.405474764756203e-05,
"loss": 2.4623,
"theoretical_loss": 3.319761417272498,
"tokens_seen": 3011772416
},
{
"epoch": 0.07,
"learning_rate": 9.40119760479042e-05,
"loss": 2.3955,
"theoretical_loss": 3.319750318161091,
"tokens_seen": 3011903488
},
{
"epoch": 0.07,
"learning_rate": 9.396920444824637e-05,
"loss": 2.2424,
"theoretical_loss": 3.3197392196679205,
"tokens_seen": 3012034560
},
{
"epoch": 0.07,
"learning_rate": 9.392643284858854e-05,
"loss": 2.3568,
"theoretical_loss": 3.3197281217929255,
"tokens_seen": 3012165632
},
{
"epoch": 0.07,
"learning_rate": 9.388366124893072e-05,
"loss": 2.3788,
"theoretical_loss": 3.319717024536045,
"tokens_seen": 3012296704
},
{
"epoch": 0.07,
"learning_rate": 9.38408896492729e-05,
"loss": 2.3081,
"theoretical_loss": 3.3197059278972176,
"tokens_seen": 3012427776
},
{
"epoch": 0.07,
"learning_rate": 9.379811804961506e-05,
"loss": 2.4277,
"theoretical_loss": 3.3196948318763817,
"tokens_seen": 3012558848
},
{
"epoch": 0.07,
"learning_rate": 9.375534644995724e-05,
"loss": 2.4135,
"theoretical_loss": 3.319683736473476,
"tokens_seen": 3012689920
},
{
"epoch": 0.07,
"learning_rate": 9.37125748502994e-05,
"loss": 2.3474,
"theoretical_loss": 3.3196726416884395,
"tokens_seen": 3012820992
},
{
"epoch": 0.07,
"learning_rate": 9.366980325064158e-05,
"loss": 2.5489,
"theoretical_loss": 3.3196615475212106,
"tokens_seen": 3012952064
},
{
"epoch": 0.07,
"learning_rate": 9.362703165098376e-05,
"loss": 2.4998,
"theoretical_loss": 3.3196504539717284,
"tokens_seen": 3013083136
},
{
"epoch": 0.07,
"learning_rate": 9.358426005132592e-05,
"loss": 2.5438,
"theoretical_loss": 3.3196393610399317,
"tokens_seen": 3013214208
},
{
"epoch": 0.07,
"objective/train/docs_used": 1652881,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.3386740684509277,
"objective/train/theoretical_loss": 3.3196282687257583,
"objective/train/tokens_used": 43396576,
"theoretical_loss": 3.3196282687257583,
"tokens_seen": 3013345280
},
{
"epoch": 0.07,
"learning_rate": 9.35414884516681e-05,
"loss": 2.5351,
"theoretical_loss": 3.3196282687257583,
"tokens_seen": 3013345280
},
{
"epoch": 0.07,
"learning_rate": 9.349871685201027e-05,
"loss": 2.3127,
"theoretical_loss": 3.3196171770291483,
"tokens_seen": 3013476352
},
{
"epoch": 0.07,
"learning_rate": 9.345594525235244e-05,
"loss": 2.435,
"theoretical_loss": 3.3196060859500394,
"tokens_seen": 3013607424
},
{
"epoch": 0.08,
"learning_rate": 9.341317365269462e-05,
"loss": 2.3466,
"theoretical_loss": 3.319594995488371,
"tokens_seen": 3013738496
},
{
"epoch": 0.08,
"learning_rate": 9.337040205303679e-05,
"loss": 2.5683,
"theoretical_loss": 3.3195839056440812,
"tokens_seen": 3013869568
},
{
"epoch": 0.08,
"learning_rate": 9.332763045337895e-05,
"loss": 2.4838,
"theoretical_loss": 3.3195728164171094,
"tokens_seen": 3014000640
},
{
"epoch": 0.08,
"learning_rate": 9.328485885372113e-05,
"loss": 2.5372,
"theoretical_loss": 3.319561727807394,
"tokens_seen": 3014131712
},
{
"epoch": 0.08,
"learning_rate": 9.324208725406331e-05,
"loss": 2.4055,
"theoretical_loss": 3.3195506398148744,
"tokens_seen": 3014262784
},
{
"epoch": 0.08,
"learning_rate": 9.319931565440549e-05,
"loss": 2.5566,
"theoretical_loss": 3.319539552439489,
"tokens_seen": 3014393856
},
{
"epoch": 0.08,
"learning_rate": 9.315654405474765e-05,
"loss": 2.4051,
"theoretical_loss": 3.3195284656811763,
"tokens_seen": 3014524928
},
{
"epoch": 0.08,
"learning_rate": 9.311377245508982e-05,
"loss": 2.5032,
"theoretical_loss": 3.319517379539876,
"tokens_seen": 3014656000
},
{
"epoch": 0.08,
"learning_rate": 9.3071000855432e-05,
"loss": 2.427,
"theoretical_loss": 3.3195062940155258,
"tokens_seen": 3014787072
},
{
"epoch": 0.08,
"learning_rate": 9.302822925577417e-05,
"loss": 2.4468,
"theoretical_loss": 3.3194952091080654,
"tokens_seen": 3014918144
},
{
"epoch": 0.08,
"objective/train/docs_used": 1653310,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.2789435386657715,
"objective/train/theoretical_loss": 3.3194896668856497,
"objective/train/tokens_used": 45034976,
"theoretical_loss": 3.3194896668856497,
"tokens_seen": 3014983680
},
{
"epoch": 0.08,
"learning_rate": 9.298545765611635e-05,
"loss": 2.3447,
"theoretical_loss": 3.3194841248174334,
"tokens_seen": 3015049216
},
{
"epoch": 0.08,
"learning_rate": 9.294268605645852e-05,
"loss": 2.3587,
"theoretical_loss": 3.3194730411435684,
"tokens_seen": 3015180288
},
{
"epoch": 0.08,
"learning_rate": 9.289991445680068e-05,
"loss": 2.6264,
"theoretical_loss": 3.3194619580864098,
"tokens_seen": 3015311360
},
{
"epoch": 0.08,
"learning_rate": 9.285714285714286e-05,
"loss": 2.57,
"theoretical_loss": 3.3194508756458965,
"tokens_seen": 3015442432
},
{
"epoch": 0.08,
"learning_rate": 9.281437125748504e-05,
"loss": 2.3972,
"theoretical_loss": 3.319439793821967,
"tokens_seen": 3015573504
},
{
"epoch": 0.08,
"learning_rate": 9.27715996578272e-05,
"loss": 2.4522,
"theoretical_loss": 3.3194287126145596,
"tokens_seen": 3015704576
},
{
"epoch": 0.08,
"learning_rate": 9.272882805816938e-05,
"loss": 2.4546,
"theoretical_loss": 3.3194176320236144,
"tokens_seen": 3015835648
},
{
"epoch": 0.08,
"learning_rate": 9.268605645851154e-05,
"loss": 2.6088,
"theoretical_loss": 3.31940655204907,
"tokens_seen": 3015966720
},
{
"epoch": 0.08,
"learning_rate": 9.264328485885372e-05,
"loss": 2.4454,
"theoretical_loss": 3.319395472690865,
"tokens_seen": 3016097792
},
{
"epoch": 0.08,
"learning_rate": 9.26005132591959e-05,
"loss": 2.3876,
"theoretical_loss": 3.3193843939489382,
"tokens_seen": 3016228864
},
{
"epoch": 0.08,
"learning_rate": 9.255774165953807e-05,
"loss": 2.4971,
"theoretical_loss": 3.319373315823229,
"tokens_seen": 3016359936
},
{
"epoch": 0.08,
"learning_rate": 9.251497005988024e-05,
"loss": 2.5668,
"theoretical_loss": 3.3193622383136763,
"tokens_seen": 3016491008
},
{
"epoch": 0.08,
"objective/train/docs_used": 1654644,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.148563861846924,
"objective/train/theoretical_loss": 3.3193511614202187,
"objective/train/tokens_used": 46673376,
"theoretical_loss": 3.3193511614202187,
"tokens_seen": 3016622080
},
{
"epoch": 0.08,
"learning_rate": 9.247219846022241e-05,
"loss": 2.3435,
"theoretical_loss": 3.3193511614202187,
"tokens_seen": 3016622080
},
{
"epoch": 0.09,
"learning_rate": 9.242942686056459e-05,
"loss": 2.5671,
"theoretical_loss": 3.319340085142796,
"tokens_seen": 3016753152
},
{
"epoch": 0.09,
"learning_rate": 9.238665526090677e-05,
"loss": 2.455,
"theoretical_loss": 3.319329009481346,
"tokens_seen": 3016884224
},
{
"epoch": 0.09,
"learning_rate": 9.234388366124893e-05,
"loss": 2.5706,
"theoretical_loss": 3.3193179344358086,
"tokens_seen": 3017015296
},
{
"epoch": 0.09,
"learning_rate": 9.230111206159111e-05,
"loss": 2.3164,
"theoretical_loss": 3.319306860006122,
"tokens_seen": 3017146368
},
{
"epoch": 0.09,
"learning_rate": 9.225834046193327e-05,
"loss": 2.317,
"theoretical_loss": 3.319295786192226,
"tokens_seen": 3017277440
},
{
"epoch": 0.09,
"learning_rate": 9.221556886227547e-05,
"loss": 2.3955,
"theoretical_loss": 3.319284712994059,
"tokens_seen": 3017408512
},
{
"epoch": 0.09,
"learning_rate": 9.217279726261763e-05,
"loss": 2.4648,
"theoretical_loss": 3.3192736404115606,
"tokens_seen": 3017539584
},
{
"epoch": 0.09,
"learning_rate": 9.21300256629598e-05,
"loss": 2.3474,
"theoretical_loss": 3.3192625684446693,
"tokens_seen": 3017670656
},
{
"epoch": 0.09,
"learning_rate": 9.208725406330197e-05,
"loss": 2.2225,
"theoretical_loss": 3.3192514970933242,
"tokens_seen": 3017801728
},
{
"epoch": 0.09,
"learning_rate": 9.204448246364414e-05,
"loss": 2.4497,
"theoretical_loss": 3.319240426357465,
"tokens_seen": 3017932800
},
{
"epoch": 0.09,
"learning_rate": 9.200171086398632e-05,
"loss": 2.4471,
"theoretical_loss": 3.31922935623703,
"tokens_seen": 3018063872
},
{
"epoch": 0.09,
"learning_rate": 9.19589392643285e-05,
"loss": 2.3816,
"theoretical_loss": 3.3192182867319584,
"tokens_seen": 3018194944
},
{
"epoch": 0.09,
"objective/train/docs_used": 1655335,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.1034493446350098,
"objective/train/theoretical_loss": 3.319212752210165,
"objective/train/tokens_used": 48311776,
"theoretical_loss": 3.319212752210165,
"tokens_seen": 3018260480
},
{
"epoch": 0.09,
"learning_rate": 9.191616766467066e-05,
"loss": 2.3561,
"theoretical_loss": 3.3192072178421896,
"tokens_seen": 3018326016
},
{
"epoch": 0.09,
"learning_rate": 9.187339606501284e-05,
"loss": 2.4573,
"theoretical_loss": 3.319196149567662,
"tokens_seen": 3018457088
},
{
"epoch": 0.09,
"learning_rate": 9.1830624465355e-05,
"loss": 2.3837,
"theoretical_loss": 3.3191850819083157,
"tokens_seen": 3018588160
},
{
"epoch": 0.09,
"learning_rate": 9.178785286569718e-05,
"loss": 2.4923,
"theoretical_loss": 3.319174014864089,
"tokens_seen": 3018719232
},
{
"epoch": 0.09,
"learning_rate": 9.174508126603936e-05,
"loss": 2.6121,
"theoretical_loss": 3.319162948434921,
"tokens_seen": 3018850304
},
{
"epoch": 0.09,
"learning_rate": 9.170230966638152e-05,
"loss": 2.5103,
"theoretical_loss": 3.319151882620752,
"tokens_seen": 3018981376
},
{
"epoch": 0.09,
"learning_rate": 9.16595380667237e-05,
"loss": 2.3644,
"theoretical_loss": 3.3191408174215193,
"tokens_seen": 3019112448
},
{
"epoch": 0.09,
"learning_rate": 9.161676646706587e-05,
"loss": 2.5582,
"theoretical_loss": 3.3191297528371635,
"tokens_seen": 3019243520
},
{
"epoch": 0.09,
"learning_rate": 9.157399486740804e-05,
"loss": 2.3977,
"theoretical_loss": 3.319118688867623,
"tokens_seen": 3019374592
},
{
"epoch": 0.09,
"learning_rate": 9.153122326775022e-05,
"loss": 2.3253,
"theoretical_loss": 3.319107625512837,
"tokens_seen": 3019505664
},
{
"epoch": 0.09,
"learning_rate": 9.148845166809239e-05,
"loss": 2.5582,
"theoretical_loss": 3.3190965627727445,
"tokens_seen": 3019636736
},
{
"epoch": 0.09,
"learning_rate": 9.144568006843457e-05,
"loss": 2.402,
"theoretical_loss": 3.3190855006472857,
"tokens_seen": 3019767808
},
{
"epoch": 0.09,
"objective/train/docs_used": 1656670,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.027528762817383,
"objective/train/theoretical_loss": 3.3190744391363984,
"objective/train/tokens_used": 49950176,
"theoretical_loss": 3.3190744391363984,
"tokens_seen": 3019898880
},
{
"epoch": 0.1,
"learning_rate": 9.140290846877674e-05,
"loss": 2.4387,
"theoretical_loss": 3.3190744391363984,
"tokens_seen": 3019898880
},
{
"epoch": 0.1,
"learning_rate": 9.136013686911891e-05,
"loss": 2.5059,
"theoretical_loss": 3.3190633782400223,
"tokens_seen": 3020029952
},
{
"epoch": 0.1,
"learning_rate": 9.131736526946109e-05,
"loss": 2.3856,
"theoretical_loss": 3.3190523179580973,
"tokens_seen": 3020161024
},
{
"epoch": 0.1,
"learning_rate": 9.127459366980325e-05,
"loss": 2.5568,
"theoretical_loss": 3.3190412582905617,
"tokens_seen": 3020292096
},
{
"epoch": 0.1,
"learning_rate": 9.123182207014542e-05,
"loss": 2.5093,
"theoretical_loss": 3.319030199237355,
"tokens_seen": 3020423168
},
{
"epoch": 0.1,
"learning_rate": 9.118905047048761e-05,
"loss": 2.3476,
"theoretical_loss": 3.3190191407984164,
"tokens_seen": 3020554240
},
{
"epoch": 0.1,
"learning_rate": 9.114627887082977e-05,
"loss": 2.4233,
"theoretical_loss": 3.3190080829736854,
"tokens_seen": 3020685312
},
{
"epoch": 0.1,
"learning_rate": 9.110350727117195e-05,
"loss": 2.4302,
"theoretical_loss": 3.318997025763101,
"tokens_seen": 3020816384
},
{
"epoch": 0.1,
"learning_rate": 9.106073567151412e-05,
"loss": 2.5779,
"theoretical_loss": 3.318985969166602,
"tokens_seen": 3020947456
},
{
"epoch": 0.1,
"learning_rate": 9.101796407185628e-05,
"loss": 2.4611,
"theoretical_loss": 3.3189749131841286,
"tokens_seen": 3021078528
},
{
"epoch": 0.1,
"learning_rate": 9.097519247219847e-05,
"loss": 2.3387,
"theoretical_loss": 3.3189638578156195,
"tokens_seen": 3021209600
},
{
"epoch": 0.1,
"learning_rate": 9.093242087254064e-05,
"loss": 2.6853,
"theoretical_loss": 3.3189528030610136,
"tokens_seen": 3021340672
},
{
"epoch": 0.1,
"learning_rate": 9.088964927288282e-05,
"loss": 2.4887,
"theoretical_loss": 3.318941748920251,
"tokens_seen": 3021471744
},
{
"epoch": 0.1,
"objective/train/docs_used": 1657192,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.7123944759368896,
"objective/train/theoretical_loss": 3.318936222080042,
"objective/train/tokens_used": 51588576,
"theoretical_loss": 3.318936222080042,
"tokens_seen": 3021537280
},
{
"epoch": 0.1,
"learning_rate": 9.084687767322498e-05,
"loss": 2.4619,
"theoretical_loss": 3.318930695393271,
"tokens_seen": 3021602816
},
{
"epoch": 0.1,
"learning_rate": 9.080410607356715e-05,
"loss": 2.4821,
"theoretical_loss": 3.3189196424800116,
"tokens_seen": 3021733888
},
{
"epoch": 0.1,
"learning_rate": 9.076133447390934e-05,
"loss": 2.4471,
"theoretical_loss": 3.3189085901804134,
"tokens_seen": 3021864960
},
{
"epoch": 0.1,
"learning_rate": 9.07185628742515e-05,
"loss": 2.3988,
"theoretical_loss": 3.3188975384944155,
"tokens_seen": 3021996032
},
{
"epoch": 0.1,
"learning_rate": 9.067579127459367e-05,
"loss": 2.469,
"theoretical_loss": 3.318886487421957,
"tokens_seen": 3022127104
},
{
"epoch": 0.1,
"learning_rate": 9.063301967493585e-05,
"loss": 2.5398,
"theoretical_loss": 3.318875436962977,
"tokens_seen": 3022258176
},
{
"epoch": 0.1,
"learning_rate": 9.059024807527801e-05,
"loss": 2.3125,
"theoretical_loss": 3.3188643871174155,
"tokens_seen": 3022389248
},
{
"epoch": 0.1,
"learning_rate": 9.05474764756202e-05,
"loss": 2.3502,
"theoretical_loss": 3.318853337885211,
"tokens_seen": 3022520320
},
{
"epoch": 0.1,
"learning_rate": 9.050470487596237e-05,
"loss": 2.4073,
"theoretical_loss": 3.318842289266304,
"tokens_seen": 3022651392
},
{
"epoch": 0.1,
"learning_rate": 9.046193327630453e-05,
"loss": 2.3899,
"theoretical_loss": 3.3188312412606327,
"tokens_seen": 3022782464
},
{
"epoch": 0.1,
"learning_rate": 9.041916167664671e-05,
"loss": 2.3516,
"theoretical_loss": 3.3188201938681368,
"tokens_seen": 3022913536
},
{
"epoch": 0.11,
"learning_rate": 9.037639007698889e-05,
"loss": 2.3968,
"theoretical_loss": 3.318809147088756,
"tokens_seen": 3023044608
},
{
"epoch": 0.11,
"objective/train/docs_used": 1658380,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.6505117416381836,
"objective/train/theoretical_loss": 3.3187981009224297,
"objective/train/tokens_used": 53226976,
"theoretical_loss": 3.3187981009224297,
"tokens_seen": 3023175680
},
{
"epoch": 0.11,
"learning_rate": 9.033361847733107e-05,
"loss": 2.4709,
"theoretical_loss": 3.3187981009224297,
"tokens_seen": 3023175680
},
{
"epoch": 0.11,
"learning_rate": 9.029084687767323e-05,
"loss": 2.3951,
"theoretical_loss": 3.3187870553690972,
"tokens_seen": 3023306752
},
{
"epoch": 0.11,
"learning_rate": 9.02480752780154e-05,
"loss": 2.5401,
"theoretical_loss": 3.3187760104286976,
"tokens_seen": 3023437824
},
{
"epoch": 0.11,
"learning_rate": 9.020530367835757e-05,
"loss": 2.4571,
"theoretical_loss": 3.3187649661011704,
"tokens_seen": 3023568896
},
{
"epoch": 0.11,
"learning_rate": 9.016253207869975e-05,
"loss": 2.362,
"theoretical_loss": 3.3187539223864557,
"tokens_seen": 3023699968
},
{
"epoch": 0.11,
"learning_rate": 9.011976047904193e-05,
"loss": 2.4072,
"theoretical_loss": 3.318742879284492,
"tokens_seen": 3023831040
},
{
"epoch": 0.11,
"learning_rate": 9.00769888793841e-05,
"loss": 2.3097,
"theoretical_loss": 3.3187318367952194,
"tokens_seen": 3023962112
},
{
"epoch": 0.11,
"learning_rate": 9.003421727972626e-05,
"loss": 2.3313,
"theoretical_loss": 3.318720794918577,
"tokens_seen": 3024093184
},
{
"epoch": 0.11,
"learning_rate": 8.999144568006844e-05,
"loss": 2.3635,
"theoretical_loss": 3.3187097536545047,
"tokens_seen": 3024224256
},
{
"epoch": 0.11,
"learning_rate": 8.994867408041062e-05,
"loss": 2.4897,
"theoretical_loss": 3.3186987130029415,
"tokens_seen": 3024355328
},
{
"epoch": 0.11,
"learning_rate": 8.990590248075278e-05,
"loss": 2.5518,
"theoretical_loss": 3.3186876729638266,
"tokens_seen": 3024486400
},
{
"epoch": 0.11,
"learning_rate": 8.986313088109496e-05,
"loss": 2.4191,
"theoretical_loss": 3.3186766335371005,
"tokens_seen": 3024617472
},
{
"epoch": 0.11,
"learning_rate": 8.982035928143712e-05,
"loss": 2.4865,
"theoretical_loss": 3.318665594722702,
"tokens_seen": 3024748544
},
{
"epoch": 0.11,
"objective/train/docs_used": 1658975,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.6697089672088623,
"objective/train/theoretical_loss": 3.3186600755451066,
"objective/train/tokens_used": 54865376,
"theoretical_loss": 3.3186600755451066,
"tokens_seen": 3024814080
},
{
"epoch": 0.11,
"learning_rate": 8.97775876817793e-05,
"loss": 2.4613,
"theoretical_loss": 3.3186545565205705,
"tokens_seen": 3024879616
},
{
"epoch": 0.11,
"learning_rate": 8.973481608212148e-05,
"loss": 2.4463,
"theoretical_loss": 3.318643518930646,
"tokens_seen": 3025010688
},
{
"epoch": 0.11,
"learning_rate": 8.969204448246365e-05,
"loss": 2.3875,
"theoretical_loss": 3.3186324819528674,
"tokens_seen": 3025141760
},
{
"epoch": 0.11,
"learning_rate": 8.964927288280582e-05,
"loss": 2.4101,
"theoretical_loss": 3.318621445587175,
"tokens_seen": 3025272832
},
{
"epoch": 0.11,
"learning_rate": 8.960650128314799e-05,
"loss": 2.3602,
"theoretical_loss": 3.3186104098335076,
"tokens_seen": 3025403904
},
{
"epoch": 0.11,
"learning_rate": 8.956372968349017e-05,
"loss": 2.41,
"theoretical_loss": 3.318599374691805,
"tokens_seen": 3025534976
},
{
"epoch": 0.11,
"learning_rate": 8.952095808383235e-05,
"loss": 2.4345,
"theoretical_loss": 3.318588340162007,
"tokens_seen": 3025666048
},
{
"epoch": 0.11,
"learning_rate": 8.947818648417451e-05,
"loss": 2.5437,
"theoretical_loss": 3.3185773062440527,
"tokens_seen": 3025797120
},
{
"epoch": 0.11,
"learning_rate": 8.943541488451669e-05,
"loss": 2.3191,
"theoretical_loss": 3.3185662729378826,
"tokens_seen": 3025928192
},
{
"epoch": 0.12,
"learning_rate": 8.939264328485885e-05,
"loss": 2.3322,
"theoretical_loss": 3.318555240243435,
"tokens_seen": 3026059264
},
{
"epoch": 0.12,
"learning_rate": 8.934987168520103e-05,
"loss": 2.3374,
"theoretical_loss": 3.3185442081606507,
"tokens_seen": 3026190336
},
{
"epoch": 0.12,
"learning_rate": 8.930710008554321e-05,
"loss": 2.4206,
"theoretical_loss": 3.3185331766894683,
"tokens_seen": 3026321408
},
{
"epoch": 0.12,
"objective/train/docs_used": 1660130,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.2490313053131104,
"objective/train/theoretical_loss": 3.318522145829828,
"objective/train/tokens_used": 56503776,
"theoretical_loss": 3.318522145829828,
"tokens_seen": 3026452480
},
{
"epoch": 0.12,
"learning_rate": 8.926432848588537e-05,
"loss": 2.2883,
"theoretical_loss": 3.318522145829828,
"tokens_seen": 3026452480
},
{
"epoch": 0.12,
"learning_rate": 8.922155688622755e-05,
"loss": 2.4436,
"theoretical_loss": 3.318511115581669,
"tokens_seen": 3026583552
},
{
"epoch": 0.12,
"learning_rate": 8.917878528656972e-05,
"loss": 2.4165,
"theoretical_loss": 3.3185000859449314,
"tokens_seen": 3026714624
},
{
"epoch": 0.12,
"learning_rate": 8.91360136869119e-05,
"loss": 2.3836,
"theoretical_loss": 3.3184890569195544,
"tokens_seen": 3026845696
},
{
"epoch": 0.12,
"learning_rate": 8.909324208725407e-05,
"loss": 2.3455,
"theoretical_loss": 3.3184780285054782,
"tokens_seen": 3026976768
},
{
"epoch": 0.12,
"learning_rate": 8.905047048759624e-05,
"loss": 2.4005,
"theoretical_loss": 3.318467000702642,
"tokens_seen": 3027107840
},
{
"epoch": 0.12,
"learning_rate": 8.900769888793842e-05,
"loss": 2.425,
"theoretical_loss": 3.3184559735109853,
"tokens_seen": 3027238912
},
{
"epoch": 0.12,
"learning_rate": 8.896492728828058e-05,
"loss": 2.4123,
"theoretical_loss": 3.3184449469304482,
"tokens_seen": 3027369984
},
{
"epoch": 0.12,
"learning_rate": 8.892215568862276e-05,
"loss": 2.4926,
"theoretical_loss": 3.3184339209609703,
"tokens_seen": 3027501056
},
{
"epoch": 0.12,
"learning_rate": 8.887938408896494e-05,
"loss": 2.2946,
"theoretical_loss": 3.318422895602491,
"tokens_seen": 3027632128
},
{
"epoch": 0.12,
"learning_rate": 8.88366124893071e-05,
"loss": 2.2669,
"theoretical_loss": 3.31841187085495,
"tokens_seen": 3027763200
},
{
"epoch": 0.12,
"learning_rate": 8.879384088964928e-05,
"loss": 2.2708,
"theoretical_loss": 3.318400846718288,
"tokens_seen": 3027894272
},
{
"epoch": 0.12,
"learning_rate": 8.875106928999145e-05,
"loss": 2.3823,
"theoretical_loss": 3.318389823192443,
"tokens_seen": 3028025344
},
{
"epoch": 0.12,
"objective/train/docs_used": 1661412,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.202181100845337,
"objective/train/theoretical_loss": 3.3183843116585585,
"objective/train/tokens_used": 58142176,
"theoretical_loss": 3.3183843116585585,
"tokens_seen": 3028090880
},
{
"epoch": 0.12,
"learning_rate": 8.870829769033362e-05,
"loss": 2.4362,
"theoretical_loss": 3.318378800277356,
"tokens_seen": 3028156416
},
{
"epoch": 0.12,
"learning_rate": 8.86655260906758e-05,
"loss": 2.3161,
"theoretical_loss": 3.3183677779729663,
"tokens_seen": 3028287488
},
{
"epoch": 0.12,
"learning_rate": 8.862275449101797e-05,
"loss": 2.4133,
"theoretical_loss": 3.3183567562792136,
"tokens_seen": 3028418560
},
{
"epoch": 0.12,
"learning_rate": 8.857998289136013e-05,
"loss": 2.464,
"theoretical_loss": 3.3183457351960377,
"tokens_seen": 3028549632
},
{
"epoch": 0.12,
"learning_rate": 8.853721129170231e-05,
"loss": 2.4854,
"theoretical_loss": 3.3183347147233784,
"tokens_seen": 3028680704
},
{
"epoch": 0.12,
"learning_rate": 8.849443969204449e-05,
"loss": 2.2685,
"theoretical_loss": 3.3183236948611756,
"tokens_seen": 3028811776
},
{
"epoch": 0.12,
"learning_rate": 8.845166809238667e-05,
"loss": 2.1281,
"theoretical_loss": 3.3183126756093686,
"tokens_seen": 3028942848
},
{
"epoch": 0.12,
"learning_rate": 8.840889649272883e-05,
"loss": 2.3911,
"theoretical_loss": 3.318301656967898,
"tokens_seen": 3029073920
},
{
"epoch": 0.13,
"learning_rate": 8.8366124893071e-05,
"loss": 2.2723,
"theoretical_loss": 3.3182906389367024,
"tokens_seen": 3029204992
},
{
"epoch": 0.13,
"learning_rate": 8.832335329341318e-05,
"loss": 2.2601,
"theoretical_loss": 3.3182796215157224,
"tokens_seen": 3029336064
},
{
"epoch": 0.13,
"learning_rate": 8.828058169375535e-05,
"loss": 2.3874,
"theoretical_loss": 3.318268604704898,
"tokens_seen": 3029467136
},
{
"epoch": 0.13,
"learning_rate": 8.823781009409753e-05,
"loss": 2.3443,
"theoretical_loss": 3.318257588504168,
"tokens_seen": 3029598208
},
{
"epoch": 0.13,
"objective/train/docs_used": 1662079,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.219090700149536,
"objective/train/theoretical_loss": 3.318246572913474,
"objective/train/tokens_used": 59780576,
"theoretical_loss": 3.318246572913474,
"tokens_seen": 3029729280
},
{
"epoch": 0.13,
"learning_rate": 8.81950384944397e-05,
"loss": 2.4424,
"theoretical_loss": 3.318246572913474,
"tokens_seen": 3029729280
},
{
"epoch": 0.13,
"learning_rate": 8.815226689478186e-05,
"loss": 2.4172,
"theoretical_loss": 3.318235557932754,
"tokens_seen": 3029860352
},
{
"epoch": 0.13,
"learning_rate": 8.810949529512404e-05,
"loss": 2.2985,
"theoretical_loss": 3.318224543561948,
"tokens_seen": 3029991424
},
{
"epoch": 0.13,
"learning_rate": 8.806672369546622e-05,
"loss": 2.5111,
"theoretical_loss": 3.3182135298009974,
"tokens_seen": 3030122496
},
{
"epoch": 0.13,
"learning_rate": 8.80239520958084e-05,
"loss": 2.481,
"theoretical_loss": 3.3182025166498406,
"tokens_seen": 3030253568
},
{
"epoch": 0.13,
"learning_rate": 8.798118049615056e-05,
"loss": 2.2891,
"theoretical_loss": 3.3181915041084182,
"tokens_seen": 3030384640
},
{
"epoch": 0.13,
"learning_rate": 8.793840889649273e-05,
"loss": 2.2625,
"theoretical_loss": 3.3181804921766695,
"tokens_seen": 3030515712
},
{
"epoch": 0.13,
"learning_rate": 8.78956372968349e-05,
"loss": 2.6118,
"theoretical_loss": 3.318169480854535,
"tokens_seen": 3030646784
},
{
"epoch": 0.13,
"learning_rate": 8.785286569717708e-05,
"loss": 2.3715,
"theoretical_loss": 3.318158470141954,
"tokens_seen": 3030777856
},
{
"epoch": 0.13,
"learning_rate": 8.781009409751925e-05,
"loss": 2.4212,
"theoretical_loss": 3.3181474600388667,
"tokens_seen": 3030908928
},
{
"epoch": 0.13,
"learning_rate": 8.776732249786143e-05,
"loss": 2.1904,
"theoretical_loss": 3.318136450545213,
"tokens_seen": 3031040000
},
{
"epoch": 0.13,
"learning_rate": 8.772455089820359e-05,
"loss": 2.3964,
"theoretical_loss": 3.318125441660933,
"tokens_seen": 3031171072
},
{
"epoch": 0.13,
"learning_rate": 8.768177929854577e-05,
"loss": 2.2051,
"theoretical_loss": 3.318114433385966,
"tokens_seen": 3031302144
},
{
"epoch": 0.13,
"objective/train/docs_used": 1662642,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.347285270690918,
"objective/train/theoretical_loss": 3.3181089294769563,
"objective/train/tokens_used": 61418976,
"theoretical_loss": 3.3181089294769563,
"tokens_seen": 3031367680
},
{
"epoch": 0.13,
"learning_rate": 8.763900769888795e-05,
"loss": 2.4334,
"theoretical_loss": 3.3181034257202526,
"tokens_seen": 3031433216
},
{
"epoch": 0.13,
"learning_rate": 8.759623609923011e-05,
"loss": 2.3486,
"theoretical_loss": 3.318092418663732,
"tokens_seen": 3031564288
},
{
"epoch": 0.13,
"learning_rate": 8.755346449957229e-05,
"loss": 2.4626,
"theoretical_loss": 3.3180814122163453,
"tokens_seen": 3031695360
},
{
"epoch": 0.13,
"learning_rate": 8.751069289991445e-05,
"loss": 2.4253,
"theoretical_loss": 3.3180704063780313,
"tokens_seen": 3031826432
},
{
"epoch": 0.13,
"learning_rate": 8.746792130025663e-05,
"loss": 2.4214,
"theoretical_loss": 3.318059401148731,
"tokens_seen": 3031957504
},
{
"epoch": 0.13,
"learning_rate": 8.742514970059881e-05,
"loss": 2.536,
"theoretical_loss": 3.3180483965283836,
"tokens_seen": 3032088576
},
{
"epoch": 0.14,
"learning_rate": 8.738237810094098e-05,
"loss": 2.3038,
"theoretical_loss": 3.318037392516929,
"tokens_seen": 3032219648
},
{
"epoch": 0.14,
"learning_rate": 8.733960650128315e-05,
"loss": 2.3862,
"theoretical_loss": 3.318026389114308,
"tokens_seen": 3032350720
},
{
"epoch": 0.14,
"learning_rate": 8.729683490162532e-05,
"loss": 2.2624,
"theoretical_loss": 3.3180153863204596,
"tokens_seen": 3032481792
},
{
"epoch": 0.14,
"learning_rate": 8.72540633019675e-05,
"loss": 2.4936,
"theoretical_loss": 3.3180043841353246,
"tokens_seen": 3032612864
},
{
"epoch": 0.14,
"learning_rate": 8.721129170230968e-05,
"loss": 2.384,
"theoretical_loss": 3.317993382558843,
"tokens_seen": 3032743936
},
{
"epoch": 0.14,
"learning_rate": 8.716852010265184e-05,
"loss": 2.4542,
"theoretical_loss": 3.317982381590954,
"tokens_seen": 3032875008
},
{
"epoch": 0.14,
"objective/train/docs_used": 1663221,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.7515668869018555,
"objective/train/theoretical_loss": 3.3179713812315983,
"objective/train/tokens_used": 63057376,
"theoretical_loss": 3.3179713812315983,
"tokens_seen": 3033006080
},
{
"epoch": 0.14,
"learning_rate": 8.712574850299402e-05,
"loss": 2.4188,
"theoretical_loss": 3.3179713812315983,
"tokens_seen": 3033006080
},
{
"epoch": 0.14,
"learning_rate": 8.708297690333618e-05,
"loss": 2.5102,
"theoretical_loss": 3.317960381480716,
"tokens_seen": 3033137152
},
{
"epoch": 0.14,
"learning_rate": 8.704020530367836e-05,
"loss": 2.39,
"theoretical_loss": 3.317949382338247,
"tokens_seen": 3033268224
},
{
"epoch": 0.14,
"learning_rate": 8.699743370402054e-05,
"loss": 2.3996,
"theoretical_loss": 3.3179383838041314,
"tokens_seen": 3033399296
},
{
"epoch": 0.14,
"learning_rate": 8.69546621043627e-05,
"loss": 2.6354,
"theoretical_loss": 3.317927385878309,
"tokens_seen": 3033530368
},
{
"epoch": 0.14,
"learning_rate": 8.691189050470488e-05,
"loss": 2.4611,
"theoretical_loss": 3.31791638856072,
"tokens_seen": 3033661440
},
{
"epoch": 0.14,
"learning_rate": 8.686911890504705e-05,
"loss": 2.4918,
"theoretical_loss": 3.317905391851305,
"tokens_seen": 3033792512
},
{
"epoch": 0.14,
"learning_rate": 8.682634730538923e-05,
"loss": 2.4662,
"theoretical_loss": 3.3178943957500033,
"tokens_seen": 3033923584
},
{
"epoch": 0.14,
"learning_rate": 8.67835757057314e-05,
"loss": 2.5224,
"theoretical_loss": 3.317883400256756,
"tokens_seen": 3034054656
},
{
"epoch": 0.14,
"learning_rate": 8.674080410607357e-05,
"loss": 2.5573,
"theoretical_loss": 3.3178724053715016,
"tokens_seen": 3034185728
},
{
"epoch": 0.14,
"learning_rate": 8.669803250641575e-05,
"loss": 2.4771,
"theoretical_loss": 3.3178614110941815,
"tokens_seen": 3034316800
},
{
"epoch": 0.14,
"learning_rate": 8.665526090675791e-05,
"loss": 2.6235,
"theoretical_loss": 3.3178504174247356,
"tokens_seen": 3034447872
},
{
"epoch": 0.14,
"learning_rate": 8.661248930710009e-05,
"loss": 2.5638,
"theoretical_loss": 3.317839424363104,
"tokens_seen": 3034578944
},
{
"epoch": 0.14,
"objective/train/docs_used": 1664363,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.340308666229248,
"objective/train/theoretical_loss": 3.3178339280602,
"objective/train/tokens_used": 64695776,
"theoretical_loss": 3.3178339280602,
"tokens_seen": 3034644480
},
{
"epoch": 0.14,
"learning_rate": 8.656971770744227e-05,
"loss": 2.4057,
"theoretical_loss": 3.317828431909227,
"tokens_seen": 3034710016
},
{
"epoch": 0.14,
"learning_rate": 8.652694610778443e-05,
"loss": 2.3668,
"theoretical_loss": 3.3178174400630445,
"tokens_seen": 3034841088
},
{
"epoch": 0.14,
"learning_rate": 8.64841745081266e-05,
"loss": 2.616,
"theoretical_loss": 3.3178064488244967,
"tokens_seen": 3034972160
},
{
"epoch": 0.14,
"learning_rate": 8.644140290846878e-05,
"loss": 2.402,
"theoretical_loss": 3.3177954581935234,
"tokens_seen": 3035103232
},
{
"epoch": 0.14,
"learning_rate": 8.639863130881095e-05,
"loss": 2.3662,
"theoretical_loss": 3.317784468170066,
"tokens_seen": 3035234304
},
{
"epoch": 0.15,
"learning_rate": 8.635585970915313e-05,
"loss": 2.3539,
"theoretical_loss": 3.317773478754063,
"tokens_seen": 3035365376
},
{
"epoch": 0.15,
"learning_rate": 8.63130881094953e-05,
"loss": 2.4253,
"theoretical_loss": 3.317762489945456,
"tokens_seen": 3035496448
},
{
"epoch": 0.15,
"learning_rate": 8.627031650983746e-05,
"loss": 2.3829,
"theoretical_loss": 3.3177515017441843,
"tokens_seen": 3035627520
},
{
"epoch": 0.15,
"learning_rate": 8.622754491017964e-05,
"loss": 2.5298,
"theoretical_loss": 3.3177405141501883,
"tokens_seen": 3035758592
},
{
"epoch": 0.15,
"learning_rate": 8.618477331052182e-05,
"loss": 2.4475,
"theoretical_loss": 3.317729527163409,
"tokens_seen": 3035889664
},
{
"epoch": 0.15,
"learning_rate": 8.6142001710864e-05,
"loss": 2.7431,
"theoretical_loss": 3.3177185407837855,
"tokens_seen": 3036020736
},
{
"epoch": 0.15,
"learning_rate": 8.609923011120616e-05,
"loss": 2.4028,
"theoretical_loss": 3.3177075550112587,
"tokens_seen": 3036151808
},
{
"epoch": 0.15,
"objective/train/docs_used": 1664891,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.417335033416748,
"objective/train/theoretical_loss": 3.3176965698457686,
"objective/train/tokens_used": 66334176,
"theoretical_loss": 3.3176965698457686,
"tokens_seen": 3036282880
},
{
"epoch": 0.15,
"learning_rate": 8.605645851154833e-05,
"loss": 2.4356,
"theoretical_loss": 3.3176965698457686,
"tokens_seen": 3036282880
},
{
"epoch": 0.15,
"learning_rate": 8.601368691189052e-05,
"loss": 2.4589,
"theoretical_loss": 3.3176855852872555,
"tokens_seen": 3036413952
},
{
"epoch": 0.15,
"learning_rate": 8.597091531223268e-05,
"loss": 2.4168,
"theoretical_loss": 3.3176746013356597,
"tokens_seen": 3036545024
},
{
"epoch": 0.15,
"learning_rate": 8.592814371257485e-05,
"loss": 2.3732,
"theoretical_loss": 3.317663617990922,
"tokens_seen": 3036676096
},
{
"epoch": 0.15,
"learning_rate": 8.588537211291703e-05,
"loss": 2.3843,
"theoretical_loss": 3.3176526352529816,
"tokens_seen": 3036807168
},
{
"epoch": 0.15,
"learning_rate": 8.584260051325919e-05,
"loss": 2.3937,
"theoretical_loss": 3.31764165312178,
"tokens_seen": 3036938240
},
{
"epoch": 0.15,
"learning_rate": 8.579982891360138e-05,
"loss": 2.4782,
"theoretical_loss": 3.3176306715972563,
"tokens_seen": 3037069312
},
{
"epoch": 0.15,
"learning_rate": 8.575705731394355e-05,
"loss": 2.5745,
"theoretical_loss": 3.3176196906793516,
"tokens_seen": 3037200384
},
{
"epoch": 0.15,
"learning_rate": 8.571428571428571e-05,
"loss": 2.4332,
"theoretical_loss": 3.3176087103680056,
"tokens_seen": 3037331456
},
{
"epoch": 0.15,
"learning_rate": 8.567151411462789e-05,
"loss": 2.4773,
"theoretical_loss": 3.3175977306631594,
"tokens_seen": 3037462528
},
{
"epoch": 0.15,
"learning_rate": 8.562874251497006e-05,
"loss": 2.4218,
"theoretical_loss": 3.3175867515647526,
"tokens_seen": 3037593600
},
{
"epoch": 0.15,
"learning_rate": 8.558597091531225e-05,
"loss": 2.3285,
"theoretical_loss": 3.317575773072726,
"tokens_seen": 3037724672
},
{
"epoch": 0.15,
"learning_rate": 8.554319931565441e-05,
"loss": 2.2487,
"theoretical_loss": 3.3175647951870197,
"tokens_seen": 3037855744
},
{
"epoch": 0.15,
"objective/train/docs_used": 1665791,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.037510395050049,
"objective/train/theoretical_loss": 3.317559306471518,
"objective/train/tokens_used": 67972576,
"theoretical_loss": 3.317559306471518,
"tokens_seen": 3037921280
},
{
"epoch": 0.15,
"learning_rate": 8.550042771599658e-05,
"loss": 2.4374,
"theoretical_loss": 3.3175538179075743,
"tokens_seen": 3037986816
},
{
"epoch": 0.15,
"learning_rate": 8.545765611633876e-05,
"loss": 2.3843,
"theoretical_loss": 3.31754284123433,
"tokens_seen": 3038117888
},
{
"epoch": 0.15,
"learning_rate": 8.541488451668092e-05,
"loss": 2.3408,
"theoretical_loss": 3.3175318651672274,
"tokens_seen": 3038248960
},
{
"epoch": 0.15,
"learning_rate": 8.537211291702311e-05,
"loss": 2.45,
"theoretical_loss": 3.3175208897062065,
"tokens_seen": 3038380032
},
{
"epoch": 0.16,
"learning_rate": 8.532934131736528e-05,
"loss": 2.5611,
"theoretical_loss": 3.317509914851208,
"tokens_seen": 3038511104
},
{
"epoch": 0.16,
"learning_rate": 8.528656971770744e-05,
"loss": 2.2891,
"theoretical_loss": 3.3174989406021718,
"tokens_seen": 3038642176
},
{
"epoch": 0.16,
"learning_rate": 8.524379811804962e-05,
"loss": 2.5899,
"theoretical_loss": 3.317487966959039,
"tokens_seen": 3038773248
},
{
"epoch": 0.16,
"learning_rate": 8.520102651839178e-05,
"loss": 2.4568,
"theoretical_loss": 3.3174769939217494,
"tokens_seen": 3038904320
},
{
"epoch": 0.16,
"learning_rate": 8.515825491873396e-05,
"loss": 2.4892,
"theoretical_loss": 3.317466021490244,
"tokens_seen": 3039035392
},
{
"epoch": 0.16,
"learning_rate": 8.511548331907614e-05,
"loss": 2.6673,
"theoretical_loss": 3.3174550496644626,
"tokens_seen": 3039166464
},
{
"epoch": 0.16,
"learning_rate": 8.50727117194183e-05,
"loss": 2.4839,
"theoretical_loss": 3.317444078444346,
"tokens_seen": 3039297536
},
{
"epoch": 0.16,
"learning_rate": 8.502994011976048e-05,
"loss": 2.4649,
"theoretical_loss": 3.317433107829835,
"tokens_seen": 3039428608
},
{
"epoch": 0.16,
"objective/train/docs_used": 1666368,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.4885432720184326,
"objective/train/theoretical_loss": 3.31742213782087,
"objective/train/tokens_used": 69610976,
"theoretical_loss": 3.31742213782087,
"tokens_seen": 3039559680
},
{
"epoch": 0.16,
"learning_rate": 8.498716852010266e-05,
"loss": 2.5349,
"theoretical_loss": 3.31742213782087,
"tokens_seen": 3039559680
},
{
"epoch": 0.16,
"learning_rate": 8.494439692044483e-05,
"loss": 2.403,
"theoretical_loss": 3.3174111684173906,
"tokens_seen": 3039690752
},
{
"epoch": 0.16,
"learning_rate": 8.4901625320787e-05,
"loss": 2.5023,
"theoretical_loss": 3.317400199619338,
"tokens_seen": 3039821824
},
{
"epoch": 0.16,
"learning_rate": 8.485885372112917e-05,
"loss": 2.4205,
"theoretical_loss": 3.3173892314266524,
"tokens_seen": 3039952896
},
{
"epoch": 0.16,
"learning_rate": 8.481608212147135e-05,
"loss": 2.5295,
"theoretical_loss": 3.3173782638392746,
"tokens_seen": 3040083968
},
{
"epoch": 0.16,
"learning_rate": 8.477331052181353e-05,
"loss": 2.4369,
"theoretical_loss": 3.3173672968571446,
"tokens_seen": 3040215040
},
{
"epoch": 0.16,
"learning_rate": 8.473053892215569e-05,
"loss": 2.5302,
"theoretical_loss": 3.3173563304802034,
"tokens_seen": 3040346112
},
{
"epoch": 0.16,
"learning_rate": 8.468776732249787e-05,
"loss": 2.486,
"theoretical_loss": 3.3173453647083915,
"tokens_seen": 3040477184
},
{
"epoch": 0.16,
"learning_rate": 8.464499572284003e-05,
"loss": 2.511,
"theoretical_loss": 3.317334399541649,
"tokens_seen": 3040608256
},
{
"epoch": 0.16,
"learning_rate": 8.460222412318221e-05,
"loss": 2.6424,
"theoretical_loss": 3.3173234349799166,
"tokens_seen": 3040739328
},
{
"epoch": 0.16,
"learning_rate": 8.455945252352439e-05,
"loss": 2.4228,
"theoretical_loss": 3.317312471023135,
"tokens_seen": 3040870400
},
{
"epoch": 0.16,
"learning_rate": 8.451668092386656e-05,
"loss": 2.5719,
"theoretical_loss": 3.3173015076712447,
"tokens_seen": 3041001472
},
{
"epoch": 0.16,
"learning_rate": 8.447390932420873e-05,
"loss": 2.5819,
"theoretical_loss": 3.3172905449241865,
"tokens_seen": 3041132544
},
{
"epoch": 0.16,
"objective/train/docs_used": 1667402,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.5028133392333984,
"objective/train/theoretical_loss": 3.317285063777451,
"objective/train/tokens_used": 71249376,
"theoretical_loss": 3.317285063777451,
"tokens_seen": 3041198080
},
{
"epoch": 0.16,
"learning_rate": 8.44311377245509e-05,
"loss": 2.5006,
"theoretical_loss": 3.3172795827819,
"tokens_seen": 3041263616
},
{
"epoch": 0.16,
"learning_rate": 8.438836612489306e-05,
"loss": 2.4687,
"theoretical_loss": 3.3172686212443274,
"tokens_seen": 3041394688
},
{
"epoch": 0.17,
"learning_rate": 8.434559452523526e-05,
"loss": 2.5483,
"theoretical_loss": 3.317257660311408,
"tokens_seen": 3041525760
},
{
"epoch": 0.17,
"learning_rate": 8.430282292557742e-05,
"loss": 2.6292,
"theoretical_loss": 3.3172466999830825,
"tokens_seen": 3041656832
},
{
"epoch": 0.17,
"learning_rate": 8.42600513259196e-05,
"loss": 2.4604,
"theoretical_loss": 3.317235740259292,
"tokens_seen": 3041787904
},
{
"epoch": 0.17,
"learning_rate": 8.421727972626176e-05,
"loss": 2.4137,
"theoretical_loss": 3.3172247811399767,
"tokens_seen": 3041918976
},
{
"epoch": 0.17,
"learning_rate": 8.417450812660394e-05,
"loss": 2.5,
"theoretical_loss": 3.317213822625077,
"tokens_seen": 3042050048
},
{
"epoch": 0.17,
"learning_rate": 8.413173652694612e-05,
"loss": 2.5009,
"theoretical_loss": 3.3172028647145346,
"tokens_seen": 3042181120
},
{
"epoch": 0.17,
"learning_rate": 8.408896492728828e-05,
"loss": 2.4612,
"theoretical_loss": 3.317191907408289,
"tokens_seen": 3042312192
},
{
"epoch": 0.17,
"learning_rate": 8.404619332763046e-05,
"loss": 2.4446,
"theoretical_loss": 3.3171809507062817,
"tokens_seen": 3042443264
},
{
"epoch": 0.17,
"learning_rate": 8.400342172797263e-05,
"loss": 2.5055,
"theoretical_loss": 3.3171699946084523,
"tokens_seen": 3042574336
},
{
"epoch": 0.17,
"learning_rate": 8.39606501283148e-05,
"loss": 2.5237,
"theoretical_loss": 3.3171590391147427,
"tokens_seen": 3042705408
},
{
"epoch": 0.17,
"objective/train/docs_used": 1668521,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.3456153869628906,
"objective/train/theoretical_loss": 3.3171480842250927,
"objective/train/tokens_used": 72887776,
"theoretical_loss": 3.3171480842250927,
"tokens_seen": 3042836480
},
{
"epoch": 0.17,
"learning_rate": 8.391787852865698e-05,
"loss": 2.4685,
"theoretical_loss": 3.3171480842250927,
"tokens_seen": 3042836480
},
{
"epoch": 0.17,
"learning_rate": 8.387510692899915e-05,
"loss": 2.4987,
"theoretical_loss": 3.317137129939443,
"tokens_seen": 3042967552
},
{
"epoch": 0.17,
"learning_rate": 8.383233532934131e-05,
"loss": 2.4009,
"theoretical_loss": 3.3171261762577346,
"tokens_seen": 3043098624
},
{
"epoch": 0.17,
"learning_rate": 8.378956372968349e-05,
"loss": 2.4797,
"theoretical_loss": 3.3171152231799086,
"tokens_seen": 3043229696
},
{
"epoch": 0.17,
"learning_rate": 8.374679213002567e-05,
"loss": 2.4712,
"theoretical_loss": 3.317104270705905,
"tokens_seen": 3043360768
},
{
"epoch": 0.17,
"learning_rate": 8.370402053036785e-05,
"loss": 2.4712,
"theoretical_loss": 3.3170933188356644,
"tokens_seen": 3043491840
},
{
"epoch": 0.17,
"learning_rate": 8.366124893071001e-05,
"loss": 2.5555,
"theoretical_loss": 3.3170823675691277,
"tokens_seen": 3043622912
},
{
"epoch": 0.17,
"learning_rate": 8.361847733105218e-05,
"loss": 2.5155,
"theoretical_loss": 3.317071416906236,
"tokens_seen": 3043753984
},
{
"epoch": 0.17,
"learning_rate": 8.357570573139436e-05,
"loss": 2.4821,
"theoretical_loss": 3.3170604668469297,
"tokens_seen": 3043885056
},
{
"epoch": 0.17,
"learning_rate": 8.353293413173653e-05,
"loss": 2.5343,
"theoretical_loss": 3.31704951739115,
"tokens_seen": 3044016128
},
{
"epoch": 0.17,
"learning_rate": 8.349016253207871e-05,
"loss": 2.4523,
"theoretical_loss": 3.317038568538837,
"tokens_seen": 3044147200
},
{
"epoch": 0.17,
"learning_rate": 8.344739093242088e-05,
"loss": 2.3806,
"theoretical_loss": 3.317027620289932,
"tokens_seen": 3044278272
},
{
"epoch": 0.17,
"learning_rate": 8.340461933276304e-05,
"loss": 2.5725,
"theoretical_loss": 3.317016672644375,
"tokens_seen": 3044409344
},
{
"epoch": 0.17,
"objective/train/docs_used": 1668980,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.030937671661377,
"objective/train/theoretical_loss": 3.3170111990478337,
"objective/train/tokens_used": 74526176,
"theoretical_loss": 3.3170111990478337,
"tokens_seen": 3044474880
},
{
"epoch": 0.17,
"learning_rate": 8.336184773310522e-05,
"loss": 2.3872,
"theoretical_loss": 3.3170057256021077,
"tokens_seen": 3044540416
},
{
"epoch": 0.18,
"learning_rate": 8.33190761334474e-05,
"loss": 2.497,
"theoretical_loss": 3.3169947791630703,
"tokens_seen": 3044671488
},
{
"epoch": 0.18,
"learning_rate": 8.327630453378958e-05,
"loss": 2.5771,
"theoretical_loss": 3.3169838333272037,
"tokens_seen": 3044802560
},
{
"epoch": 0.18,
"learning_rate": 8.323353293413174e-05,
"loss": 2.4761,
"theoretical_loss": 3.316972888094449,
"tokens_seen": 3044933632
},
{
"epoch": 0.18,
"learning_rate": 8.319076133447391e-05,
"loss": 2.4855,
"theoretical_loss": 3.3169619434647464,
"tokens_seen": 3045064704
},
{
"epoch": 0.18,
"learning_rate": 8.314798973481609e-05,
"loss": 2.3585,
"theoretical_loss": 3.3169509994380375,
"tokens_seen": 3045195776
},
{
"epoch": 0.18,
"learning_rate": 8.310521813515826e-05,
"loss": 2.5985,
"theoretical_loss": 3.3169400560142623,
"tokens_seen": 3045326848
},
{
"epoch": 0.18,
"learning_rate": 8.306244653550043e-05,
"loss": 2.4823,
"theoretical_loss": 3.3169291131933623,
"tokens_seen": 3045457920
},
{
"epoch": 0.18,
"learning_rate": 8.30196749358426e-05,
"loss": 2.4651,
"theoretical_loss": 3.316918170975278,
"tokens_seen": 3045588992
},
{
"epoch": 0.18,
"learning_rate": 8.297690333618477e-05,
"loss": 2.4029,
"theoretical_loss": 3.31690722935995,
"tokens_seen": 3045720064
},
{
"epoch": 0.18,
"learning_rate": 8.293413173652695e-05,
"loss": 2.5912,
"theoretical_loss": 3.3168962883473205,
"tokens_seen": 3045851136
},
{
"epoch": 0.18,
"learning_rate": 8.289136013686913e-05,
"loss": 2.5166,
"theoretical_loss": 3.316885347937329,
"tokens_seen": 3045982208
},
{
"epoch": 0.18,
"objective/train/docs_used": 1670028,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.2350833415985107,
"objective/train/theoretical_loss": 3.316874408129916,
"objective/train/tokens_used": 76164576,
"theoretical_loss": 3.316874408129916,
"tokens_seen": 3046113280
},
{
"epoch": 0.18,
"learning_rate": 8.284858853721129e-05,
"loss": 2.5141,
"theoretical_loss": 3.316874408129916,
"tokens_seen": 3046113280
},
{
"epoch": 0.18,
"learning_rate": 8.280581693755347e-05,
"loss": 2.441,
"theoretical_loss": 3.316863468925024,
"tokens_seen": 3046244352
},
{
"epoch": 0.18,
"learning_rate": 8.276304533789564e-05,
"loss": 2.5365,
"theoretical_loss": 3.3168525303225924,
"tokens_seen": 3046375424
},
{
"epoch": 0.18,
"learning_rate": 8.272027373823781e-05,
"loss": 2.6514,
"theoretical_loss": 3.316841592322563,
"tokens_seen": 3046506496
},
{
"epoch": 0.18,
"learning_rate": 8.267750213857999e-05,
"loss": 2.6148,
"theoretical_loss": 3.3168306549248765,
"tokens_seen": 3046637568
},
{
"epoch": 0.18,
"learning_rate": 8.263473053892216e-05,
"loss": 2.4865,
"theoretical_loss": 3.316819718129474,
"tokens_seen": 3046768640
},
{
"epoch": 0.18,
"learning_rate": 8.259195893926434e-05,
"loss": 2.5057,
"theoretical_loss": 3.3168087819362957,
"tokens_seen": 3046899712
},
{
"epoch": 0.18,
"learning_rate": 8.25491873396065e-05,
"loss": 2.4055,
"theoretical_loss": 3.316797846345283,
"tokens_seen": 3047030784
},
{
"epoch": 0.18,
"learning_rate": 8.250641573994868e-05,
"loss": 2.5902,
"theoretical_loss": 3.316786911356377,
"tokens_seen": 3047161856
},
{
"epoch": 0.18,
"learning_rate": 8.246364414029086e-05,
"loss": 2.3611,
"theoretical_loss": 3.316775976969519,
"tokens_seen": 3047292928
},
{
"epoch": 0.18,
"learning_rate": 8.242087254063302e-05,
"loss": 2.5959,
"theoretical_loss": 3.316765043184649,
"tokens_seen": 3047424000
},
{
"epoch": 0.18,
"learning_rate": 8.23781009409752e-05,
"loss": 2.5646,
"theoretical_loss": 3.316754110001708,
"tokens_seen": 3047555072
},
{
"epoch": 0.19,
"learning_rate": 8.233532934131736e-05,
"loss": 2.585,
"theoretical_loss": 3.3167431774206384,
"tokens_seen": 3047686144
},
{
"epoch": 0.19,
"objective/train/docs_used": 1670628,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 1.981849193572998,
"objective/train/theoretical_loss": 3.316737711355786,
"objective/train/tokens_used": 77802976,
"theoretical_loss": 3.316737711355786,
"tokens_seen": 3047751680
},
{
"epoch": 0.19,
"learning_rate": 8.229255774165954e-05,
"loss": 2.3982,
"theoretical_loss": 3.3167322454413792,
"tokens_seen": 3047817216
},
{
"epoch": 0.19,
"learning_rate": 8.224978614200172e-05,
"loss": 2.5217,
"theoretical_loss": 3.316721314063873,
"tokens_seen": 3047948288
},
{
"epoch": 0.19,
"learning_rate": 8.220701454234389e-05,
"loss": 2.6089,
"theoretical_loss": 3.3167103832880604,
"tokens_seen": 3048079360
},
{
"epoch": 0.19,
"learning_rate": 8.216424294268606e-05,
"loss": 2.529,
"theoretical_loss": 3.316699453113882,
"tokens_seen": 3048210432
},
{
"epoch": 0.19,
"learning_rate": 8.212147134302823e-05,
"loss": 2.4732,
"theoretical_loss": 3.3166885235412784,
"tokens_seen": 3048341504
},
{
"epoch": 0.19,
"learning_rate": 8.207869974337041e-05,
"loss": 2.4514,
"theoretical_loss": 3.316677594570192,
"tokens_seen": 3048472576
},
{
"epoch": 0.19,
"learning_rate": 8.203592814371259e-05,
"loss": 2.4329,
"theoretical_loss": 3.316666666200563,
"tokens_seen": 3048603648
},
{
"epoch": 0.19,
"learning_rate": 8.199315654405475e-05,
"loss": 2.5147,
"theoretical_loss": 3.316655738432332,
"tokens_seen": 3048734720
},
{
"epoch": 0.19,
"learning_rate": 8.195038494439693e-05,
"loss": 2.4813,
"theoretical_loss": 3.3166448112654408,
"tokens_seen": 3048865792
},
{
"epoch": 0.19,
"learning_rate": 8.190761334473909e-05,
"loss": 2.4503,
"theoretical_loss": 3.3166338846998302,
"tokens_seen": 3048996864
},
{
"epoch": 0.19,
"learning_rate": 8.186484174508127e-05,
"loss": 2.5814,
"theoretical_loss": 3.316622958735442,
"tokens_seen": 3049127936
},
{
"epoch": 0.19,
"learning_rate": 8.182207014542345e-05,
"loss": 2.5462,
"theoretical_loss": 3.3166120333722158,
"tokens_seen": 3049259008
},
{
"epoch": 0.19,
"objective/train/docs_used": 1671752,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.1045682430267334,
"objective/train/theoretical_loss": 3.3166011086100937,
"objective/train/tokens_used": 79441376,
"theoretical_loss": 3.3166011086100937,
"tokens_seen": 3049390080
},
{
"epoch": 0.19,
"learning_rate": 8.177929854576561e-05,
"loss": 2.5946,
"theoretical_loss": 3.3166011086100937,
"tokens_seen": 3049390080
},
{
"epoch": 0.19,
"learning_rate": 8.173652694610778e-05,
"loss": 2.4925,
"theoretical_loss": 3.3165901844490167,
"tokens_seen": 3049521152
},
{
"epoch": 0.19,
"learning_rate": 8.169375534644996e-05,
"loss": 2.4937,
"theoretical_loss": 3.3165792608889255,
"tokens_seen": 3049652224
},
{
"epoch": 0.19,
"learning_rate": 8.165098374679214e-05,
"loss": 2.5232,
"theoretical_loss": 3.3165683379297612,
"tokens_seen": 3049783296
},
{
"epoch": 0.19,
"learning_rate": 8.160821214713431e-05,
"loss": 2.4577,
"theoretical_loss": 3.3165574155714657,
"tokens_seen": 3049914368
},
{
"epoch": 0.19,
"learning_rate": 8.156544054747648e-05,
"loss": 2.5193,
"theoretical_loss": 3.3165464938139797,
"tokens_seen": 3050045440
},
{
"epoch": 0.19,
"learning_rate": 8.152266894781864e-05,
"loss": 2.4776,
"theoretical_loss": 3.3165355726572434,
"tokens_seen": 3050176512
},
{
"epoch": 0.19,
"learning_rate": 8.147989734816082e-05,
"loss": 2.4092,
"theoretical_loss": 3.3165246521011995,
"tokens_seen": 3050307584
},
{
"epoch": 0.19,
"learning_rate": 8.1437125748503e-05,
"loss": 2.3898,
"theoretical_loss": 3.3165137321457885,
"tokens_seen": 3050438656
},
{
"epoch": 0.19,
"learning_rate": 8.139435414884518e-05,
"loss": 2.4147,
"theoretical_loss": 3.3165028127909513,
"tokens_seen": 3050569728
},
{
"epoch": 0.19,
"learning_rate": 8.135158254918734e-05,
"loss": 2.559,
"theoretical_loss": 3.3164918940366293,
"tokens_seen": 3050700800
},
{
"epoch": 0.2,
"learning_rate": 8.130881094952951e-05,
"loss": 2.4779,
"theoretical_loss": 3.3164809758827634,
"tokens_seen": 3050831872
},
{
"epoch": 0.2,
"learning_rate": 8.126603934987169e-05,
"loss": 2.5393,
"theoretical_loss": 3.3164700583292954,
"tokens_seen": 3050962944
},
{
"epoch": 0.2,
"objective/train/docs_used": 1672176,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.2255687713623047,
"objective/train/theoretical_loss": 3.316464599777692,
"objective/train/tokens_used": 81079776,
"theoretical_loss": 3.316464599777692,
"tokens_seen": 3051028480
},
{
"epoch": 0.2,
"learning_rate": 8.122326775021386e-05,
"loss": 2.4892,
"theoretical_loss": 3.316459141376166,
"tokens_seen": 3051094016
},
{
"epoch": 0.2,
"learning_rate": 8.118049615055604e-05,
"loss": 2.4738,
"theoretical_loss": 3.3164482250233163,
"tokens_seen": 3051225088
},
{
"epoch": 0.2,
"learning_rate": 8.113772455089821e-05,
"loss": 2.5011,
"theoretical_loss": 3.316437309270688,
"tokens_seen": 3051356160
},
{
"epoch": 0.2,
"learning_rate": 8.109495295124037e-05,
"loss": 2.4879,
"theoretical_loss": 3.316426394118222,
"tokens_seen": 3051487232
},
{
"epoch": 0.2,
"learning_rate": 8.105218135158255e-05,
"loss": 2.441,
"theoretical_loss": 3.316415479565859,
"tokens_seen": 3051618304
},
{
"epoch": 0.2,
"learning_rate": 8.100940975192473e-05,
"loss": 2.5491,
"theoretical_loss": 3.3164045656135417,
"tokens_seen": 3051749376
},
{
"epoch": 0.2,
"learning_rate": 8.09666381522669e-05,
"loss": 2.502,
"theoretical_loss": 3.3163936522612096,
"tokens_seen": 3051880448
},
{
"epoch": 0.2,
"learning_rate": 8.092386655260907e-05,
"loss": 2.3763,
"theoretical_loss": 3.3163827395088052,
"tokens_seen": 3052011520
},
{
"epoch": 0.2,
"learning_rate": 8.088109495295124e-05,
"loss": 2.4419,
"theoretical_loss": 3.3163718273562695,
"tokens_seen": 3052142592
},
{
"epoch": 0.2,
"learning_rate": 8.083832335329341e-05,
"loss": 2.4011,
"theoretical_loss": 3.3163609158035436,
"tokens_seen": 3052273664
},
{
"epoch": 0.2,
"learning_rate": 8.07955517536356e-05,
"loss": 2.4094,
"theoretical_loss": 3.3163500048505687,
"tokens_seen": 3052404736
},
{
"epoch": 0.2,
"learning_rate": 8.075278015397776e-05,
"loss": 2.4447,
"theoretical_loss": 3.3163390944972857,
"tokens_seen": 3052535808
},
{
"epoch": 0.2,
"objective/train/docs_used": 1673386,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.077202320098877,
"objective/train/theoretical_loss": 3.316328184743637,
"objective/train/tokens_used": 82718176,
"theoretical_loss": 3.316328184743637,
"tokens_seen": 3052666880
},
{
"epoch": 0.2,
"learning_rate": 8.071000855431994e-05,
"loss": 2.3368,
"theoretical_loss": 3.316328184743637,
"tokens_seen": 3052666880
},
{
"epoch": 0.2,
"learning_rate": 8.06672369546621e-05,
"loss": 2.578,
"theoretical_loss": 3.3163172755895634,
"tokens_seen": 3052797952
},
{
"epoch": 0.2,
"learning_rate": 8.062446535500429e-05,
"loss": 2.5108,
"theoretical_loss": 3.3163063670350055,
"tokens_seen": 3052929024
},
{
"epoch": 0.2,
"learning_rate": 8.058169375534646e-05,
"loss": 2.4558,
"theoretical_loss": 3.3162954590799054,
"tokens_seen": 3053060096
},
{
"epoch": 0.2,
"learning_rate": 8.053892215568862e-05,
"loss": 2.5339,
"theoretical_loss": 3.316284551724204,
"tokens_seen": 3053191168
},
{
"epoch": 0.2,
"learning_rate": 8.04961505560308e-05,
"loss": 2.3677,
"theoretical_loss": 3.3162736449678434,
"tokens_seen": 3053322240
},
{
"epoch": 0.2,
"learning_rate": 8.045337895637297e-05,
"loss": 2.5638,
"theoretical_loss": 3.3162627388107637,
"tokens_seen": 3053453312
},
{
"epoch": 0.2,
"learning_rate": 8.041060735671514e-05,
"loss": 2.4784,
"theoretical_loss": 3.316251833252908,
"tokens_seen": 3053584384
},
{
"epoch": 0.2,
"learning_rate": 8.036783575705732e-05,
"loss": 2.486,
"theoretical_loss": 3.3162409282942154,
"tokens_seen": 3053715456
},
{
"epoch": 0.2,
"learning_rate": 8.032506415739949e-05,
"loss": 2.4993,
"theoretical_loss": 3.316230023934629,
"tokens_seen": 3053846528
},
{
"epoch": 0.21,
"learning_rate": 8.028229255774167e-05,
"loss": 2.5384,
"theoretical_loss": 3.3162191201740896,
"tokens_seen": 3053977600
},
{
"epoch": 0.21,
"learning_rate": 8.023952095808383e-05,
"loss": 2.5113,
"theoretical_loss": 3.3162082170125387,
"tokens_seen": 3054108672
},
{
"epoch": 0.21,
"learning_rate": 8.019674935842601e-05,
"loss": 2.2871,
"theoretical_loss": 3.3161973144499175,
"tokens_seen": 3054239744
},
{
"epoch": 0.21,
"objective/train/docs_used": 1673816,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.4686410427093506,
"objective/train/theoretical_loss": 3.316191863393187,
"objective/train/tokens_used": 84356576,
"theoretical_loss": 3.316191863393187,
"tokens_seen": 3054305280
},
{
"epoch": 0.21,
"learning_rate": 8.015397775876819e-05,
"loss": 2.4141,
"theoretical_loss": 3.3161864124861675,
"tokens_seen": 3054370816
},
{
"epoch": 0.21,
"learning_rate": 8.011120615911035e-05,
"loss": 2.5008,
"theoretical_loss": 3.31617551112123,
"tokens_seen": 3054501888
},
{
"epoch": 0.21,
"learning_rate": 8.006843455945253e-05,
"loss": 2.5764,
"theoretical_loss": 3.316164610355047,
"tokens_seen": 3054632960
},
{
"epoch": 0.21,
"learning_rate": 8.00256629597947e-05,
"loss": 2.4664,
"theoretical_loss": 3.316153710187559,
"tokens_seen": 3054764032
},
{
"epoch": 0.21,
"learning_rate": 7.998289136013687e-05,
"loss": 2.5636,
"theoretical_loss": 3.316142810618708,
"tokens_seen": 3054895104
},
{
"epoch": 0.21,
"learning_rate": 7.994011976047905e-05,
"loss": 2.54,
"theoretical_loss": 3.3161319116484353,
"tokens_seen": 3055026176
},
{
"epoch": 0.21,
"learning_rate": 7.989734816082122e-05,
"loss": 2.6878,
"theoretical_loss": 3.3161210132766823,
"tokens_seen": 3055157248
},
{
"epoch": 0.21,
"learning_rate": 7.98545765611634e-05,
"loss": 2.6299,
"theoretical_loss": 3.316110115503391,
"tokens_seen": 3055288320
},
{
"epoch": 0.21,
"learning_rate": 7.981180496150556e-05,
"loss": 2.476,
"theoretical_loss": 3.316099218328502,
"tokens_seen": 3055419392
},
{
"epoch": 0.21,
"learning_rate": 7.976903336184774e-05,
"loss": 2.5998,
"theoretical_loss": 3.3160883217519572,
"tokens_seen": 3055550464
},
{
"epoch": 0.21,
"learning_rate": 7.972626176218992e-05,
"loss": 2.4002,
"theoretical_loss": 3.316077425773698,
"tokens_seen": 3055681536
},
{
"epoch": 0.21,
"learning_rate": 7.968349016253208e-05,
"loss": 2.5102,
"theoretical_loss": 3.316066530393666,
"tokens_seen": 3055812608
},
{
"epoch": 0.21,
"objective/train/docs_used": 1674780,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.8518364429473877,
"objective/train/theoretical_loss": 3.3160556356118027,
"objective/train/tokens_used": 85994976,
"theoretical_loss": 3.3160556356118027,
"tokens_seen": 3055943680
},
{
"epoch": 0.21,
"learning_rate": 7.964071856287424e-05,
"loss": 2.5794,
"theoretical_loss": 3.3160556356118027,
"tokens_seen": 3055943680
},
{
"epoch": 0.21,
"learning_rate": 7.959794696321644e-05,
"loss": 2.6511,
"theoretical_loss": 3.3160447414280494,
"tokens_seen": 3056074752
},
{
"epoch": 0.21,
"learning_rate": 7.95551753635586e-05,
"loss": 2.4842,
"theoretical_loss": 3.316033847842348,
"tokens_seen": 3056205824
},
{
"epoch": 0.21,
"learning_rate": 7.951240376390078e-05,
"loss": 2.4747,
"theoretical_loss": 3.3160229548546396,
"tokens_seen": 3056336896
},
{
"epoch": 0.21,
"learning_rate": 7.946963216424294e-05,
"loss": 2.4766,
"theoretical_loss": 3.316012062464866,
"tokens_seen": 3056467968
},
{
"epoch": 0.21,
"learning_rate": 7.942686056458511e-05,
"loss": 2.611,
"theoretical_loss": 3.316001170672968,
"tokens_seen": 3056599040
},
{
"epoch": 0.21,
"learning_rate": 7.93840889649273e-05,
"loss": 2.4415,
"theoretical_loss": 3.3159902794788887,
"tokens_seen": 3056730112
},
{
"epoch": 0.21,
"learning_rate": 7.934131736526947e-05,
"loss": 2.4817,
"theoretical_loss": 3.3159793888825684,
"tokens_seen": 3056861184
},
{
"epoch": 0.22,
"learning_rate": 7.929854576561164e-05,
"loss": 2.5289,
"theoretical_loss": 3.315968498883949,
"tokens_seen": 3056992256
},
{
"epoch": 0.22,
"learning_rate": 7.925577416595381e-05,
"loss": 2.4786,
"theoretical_loss": 3.3159576094829726,
"tokens_seen": 3057123328
},
{
"epoch": 0.22,
"learning_rate": 7.921300256629597e-05,
"loss": 2.6726,
"theoretical_loss": 3.3159467206795794,
"tokens_seen": 3057254400
},
{
"epoch": 0.22,
"learning_rate": 7.917023096663817e-05,
"loss": 2.4774,
"theoretical_loss": 3.3159358324737123,
"tokens_seen": 3057385472
},
{
"epoch": 0.22,
"learning_rate": 7.912745936698033e-05,
"loss": 2.3915,
"theoretical_loss": 3.3159249448653125,
"tokens_seen": 3057516544
},
{
"epoch": 0.22,
"objective/train/docs_used": 1675938,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.4315812587738037,
"objective/train/theoretical_loss": 3.3159195012851446,
"objective/train/tokens_used": 87633376,
"theoretical_loss": 3.3159195012851446,
"tokens_seen": 3057582080
},
{
"epoch": 0.22,
"learning_rate": 7.90846877673225e-05,
"loss": 2.4585,
"theoretical_loss": 3.3159140578543216,
"tokens_seen": 3057647616
},
{
"epoch": 0.22,
"learning_rate": 7.904191616766467e-05,
"loss": 2.474,
"theoretical_loss": 3.315903171440681,
"tokens_seen": 3057778688
},
{
"epoch": 0.22,
"learning_rate": 7.899914456800684e-05,
"loss": 2.3614,
"theoretical_loss": 3.3158922856243325,
"tokens_seen": 3057909760
},
{
"epoch": 0.22,
"learning_rate": 7.895637296834903e-05,
"loss": 2.4224,
"theoretical_loss": 3.3158814004052175,
"tokens_seen": 3058040832
},
{
"epoch": 0.22,
"learning_rate": 7.89136013686912e-05,
"loss": 2.5027,
"theoretical_loss": 3.3158705157832786,
"tokens_seen": 3058171904
},
{
"epoch": 0.22,
"learning_rate": 7.887082976903336e-05,
"loss": 2.4609,
"theoretical_loss": 3.315859631758456,
"tokens_seen": 3058302976
},
{
"epoch": 0.22,
"learning_rate": 7.882805816937554e-05,
"loss": 2.5555,
"theoretical_loss": 3.3158487483306924,
"tokens_seen": 3058434048
},
{
"epoch": 0.22,
"learning_rate": 7.878528656971772e-05,
"loss": 2.3801,
"theoretical_loss": 3.3158378654999288,
"tokens_seen": 3058565120
},
{
"epoch": 0.22,
"learning_rate": 7.87425149700599e-05,
"loss": 2.491,
"theoretical_loss": 3.315826983266107,
"tokens_seen": 3058696192
},
{
"epoch": 0.22,
"learning_rate": 7.869974337040206e-05,
"loss": 2.5554,
"theoretical_loss": 3.31581610162917,
"tokens_seen": 3058827264
},
{
"epoch": 0.22,
"learning_rate": 7.865697177074422e-05,
"loss": 2.4215,
"theoretical_loss": 3.3158052205890574,
"tokens_seen": 3058958336
},
{
"epoch": 0.22,
"learning_rate": 7.86142001710864e-05,
"loss": 2.4309,
"theoretical_loss": 3.315794340145712,
"tokens_seen": 3059089408
},
{
"epoch": 0.22,
"objective/train/docs_used": 1676633,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.8794491291046143,
"objective/train/theoretical_loss": 3.3157834602990754,
"objective/train/tokens_used": 89271776,
"theoretical_loss": 3.3157834602990754,
"tokens_seen": 3059220480
},
{
"epoch": 0.22,
"learning_rate": 7.857142857142858e-05,
"loss": 2.507,
"theoretical_loss": 3.3157834602990754,
"tokens_seen": 3059220480
},
{
"epoch": 0.22,
"learning_rate": 7.852865697177076e-05,
"loss": 2.4838,
"theoretical_loss": 3.3157725810490892,
"tokens_seen": 3059351552
},
{
"epoch": 0.22,
"learning_rate": 7.848588537211292e-05,
"loss": 2.3672,
"theoretical_loss": 3.3157617023956956,
"tokens_seen": 3059482624
},
{
"epoch": 0.22,
"learning_rate": 7.844311377245509e-05,
"loss": 2.5403,
"theoretical_loss": 3.3157508243388354,
"tokens_seen": 3059613696
},
{
"epoch": 0.22,
"learning_rate": 7.840034217279727e-05,
"loss": 2.3567,
"theoretical_loss": 3.315739946878451,
"tokens_seen": 3059744768
},
{
"epoch": 0.22,
"learning_rate": 7.835757057313944e-05,
"loss": 2.5668,
"theoretical_loss": 3.3157290700144837,
"tokens_seen": 3059875840
},
{
"epoch": 0.22,
"learning_rate": 7.831479897348161e-05,
"loss": 2.6316,
"theoretical_loss": 3.315718193746876,
"tokens_seen": 3060006912
},
{
"epoch": 0.23,
"learning_rate": 7.827202737382379e-05,
"loss": 2.6666,
"theoretical_loss": 3.315707318075569,
"tokens_seen": 3060137984
},
{
"epoch": 0.23,
"learning_rate": 7.822925577416595e-05,
"loss": 2.4661,
"theoretical_loss": 3.3156964430005047,
"tokens_seen": 3060269056
},
{
"epoch": 0.23,
"learning_rate": 7.818648417450813e-05,
"loss": 2.5179,
"theoretical_loss": 3.315685568521625,
"tokens_seen": 3060400128
},
{
"epoch": 0.23,
"learning_rate": 7.814371257485031e-05,
"loss": 2.5853,
"theoretical_loss": 3.3156746946388713,
"tokens_seen": 3060531200
},
{
"epoch": 0.23,
"learning_rate": 7.810094097519247e-05,
"loss": 2.5132,
"theoretical_loss": 3.315663821352186,
"tokens_seen": 3060662272
},
{
"epoch": 0.23,
"learning_rate": 7.805816937553465e-05,
"loss": 2.6414,
"theoretical_loss": 3.3156529486615103,
"tokens_seen": 3060793344
},
{
"epoch": 0.23,
"objective/train/docs_used": 1677094,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.3754663467407227,
"objective/train/theoretical_loss": 3.315647512539658,
"objective/train/tokens_used": 90910176,
"theoretical_loss": 3.315647512539658,
"tokens_seen": 3060858880
},
{
"epoch": 0.23,
"learning_rate": 7.801539777587682e-05,
"loss": 2.6056,
"theoretical_loss": 3.3156420765667862,
"tokens_seen": 3060924416
},
{
"epoch": 0.23,
"learning_rate": 7.7972626176219e-05,
"loss": 2.5379,
"theoretical_loss": 3.3156312050679553,
"tokens_seen": 3061055488
},
{
"epoch": 0.23,
"learning_rate": 7.792985457656117e-05,
"loss": 2.547,
"theoretical_loss": 3.31562033416496,
"tokens_seen": 3061186560
},
{
"epoch": 0.23,
"learning_rate": 7.788708297690334e-05,
"loss": 2.533,
"theoretical_loss": 3.315609463857742,
"tokens_seen": 3061317632
},
{
"epoch": 0.23,
"learning_rate": 7.784431137724552e-05,
"loss": 2.5029,
"theoretical_loss": 3.3155985941462425,
"tokens_seen": 3061448704
},
{
"epoch": 0.23,
"learning_rate": 7.780153977758768e-05,
"loss": 2.478,
"theoretical_loss": 3.315587725030404,
"tokens_seen": 3061579776
},
{
"epoch": 0.23,
"learning_rate": 7.775876817792986e-05,
"loss": 2.5078,
"theoretical_loss": 3.315576856510168,
"tokens_seen": 3061710848
},
{
"epoch": 0.23,
"learning_rate": 7.771599657827204e-05,
"loss": 2.508,
"theoretical_loss": 3.315565988585477,
"tokens_seen": 3061841920
},
{
"epoch": 0.23,
"learning_rate": 7.76732249786142e-05,
"loss": 2.5306,
"theoretical_loss": 3.3155551212562724,
"tokens_seen": 3061972992
},
{
"epoch": 0.23,
"learning_rate": 7.763045337895638e-05,
"loss": 2.6,
"theoretical_loss": 3.3155442545224956,
"tokens_seen": 3062104064
},
{
"epoch": 0.23,
"learning_rate": 7.758768177929855e-05,
"loss": 2.4638,
"theoretical_loss": 3.315533388384089,
"tokens_seen": 3062235136
},
{
"epoch": 0.23,
"learning_rate": 7.754491017964072e-05,
"loss": 2.6104,
"theoretical_loss": 3.315522522840995,
"tokens_seen": 3062366208
},
{
"epoch": 0.23,
"objective/train/docs_used": 1678209,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.218628406524658,
"objective/train/theoretical_loss": 3.3155116578931545,
"objective/train/tokens_used": 92548576,
"theoretical_loss": 3.3155116578931545,
"tokens_seen": 3062497280
},
{
"epoch": 0.23,
"learning_rate": 7.75021385799829e-05,
"loss": 2.2883,
"theoretical_loss": 3.3155116578931545,
"tokens_seen": 3062497280
},
{
"epoch": 0.23,
"learning_rate": 7.745936698032507e-05,
"loss": 2.5558,
"theoretical_loss": 3.31550079354051,
"tokens_seen": 3062628352
},
{
"epoch": 0.23,
"learning_rate": 7.741659538066724e-05,
"loss": 2.5281,
"theoretical_loss": 3.315489929783004,
"tokens_seen": 3062759424
},
{
"epoch": 0.23,
"learning_rate": 7.737382378100941e-05,
"loss": 2.5825,
"theoretical_loss": 3.315479066620577,
"tokens_seen": 3062890496
},
{
"epoch": 0.23,
"learning_rate": 7.733105218135159e-05,
"loss": 2.5783,
"theoretical_loss": 3.3154682040531718,
"tokens_seen": 3063021568
},
{
"epoch": 0.23,
"learning_rate": 7.728828058169377e-05,
"loss": 2.4715,
"theoretical_loss": 3.3154573420807303,
"tokens_seen": 3063152640
},
{
"epoch": 0.24,
"learning_rate": 7.724550898203593e-05,
"loss": 2.562,
"theoretical_loss": 3.3154464807031943,
"tokens_seen": 3063283712
},
{
"epoch": 0.24,
"learning_rate": 7.720273738237811e-05,
"loss": 2.4961,
"theoretical_loss": 3.315435619920506,
"tokens_seen": 3063414784
},
{
"epoch": 0.24,
"learning_rate": 7.715996578272027e-05,
"loss": 2.4626,
"theoretical_loss": 3.315424759732607,
"tokens_seen": 3063545856
},
{
"epoch": 0.24,
"learning_rate": 7.711719418306245e-05,
"loss": 2.4789,
"theoretical_loss": 3.31541390013944,
"tokens_seen": 3063676928
},
{
"epoch": 0.24,
"learning_rate": 7.707442258340463e-05,
"loss": 2.4289,
"theoretical_loss": 3.3154030411409465,
"tokens_seen": 3063808000
},
{
"epoch": 0.24,
"learning_rate": 7.70316509837468e-05,
"loss": 2.5123,
"theoretical_loss": 3.3153921827370683,
"tokens_seen": 3063939072
},
{
"epoch": 0.24,
"learning_rate": 7.698887938408896e-05,
"loss": 2.5002,
"theoretical_loss": 3.3153813249277473,
"tokens_seen": 3064070144
},
{
"epoch": 0.24,
"objective/train/docs_used": 1678783,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.781327724456787,
"objective/train/theoretical_loss": 3.315375896246028,
"objective/train/tokens_used": 94186976,
"theoretical_loss": 3.315375896246028,
"tokens_seen": 3064135680
},
{
"epoch": 0.24,
"learning_rate": 7.694610778443114e-05,
"loss": 2.5217,
"theoretical_loss": 3.3153704677129263,
"tokens_seen": 3064201216
},
{
"epoch": 0.24,
"learning_rate": 7.690333618477332e-05,
"loss": 2.4925,
"theoretical_loss": 3.3153596110925467,
"tokens_seen": 3064332288
},
{
"epoch": 0.24,
"learning_rate": 7.68605645851155e-05,
"loss": 2.459,
"theoretical_loss": 3.315348755066551,
"tokens_seen": 3064463360
},
{
"epoch": 0.24,
"learning_rate": 7.681779298545766e-05,
"loss": 2.4395,
"theoretical_loss": 3.31533789963488,
"tokens_seen": 3064594432
},
{
"epoch": 0.24,
"learning_rate": 7.677502138579982e-05,
"loss": 2.4192,
"theoretical_loss": 3.3153270447974776,
"tokens_seen": 3064725504
},
{
"epoch": 0.24,
"learning_rate": 7.6732249786142e-05,
"loss": 2.5295,
"theoretical_loss": 3.3153161905542845,
"tokens_seen": 3064856576
},
{
"epoch": 0.24,
"learning_rate": 7.668947818648418e-05,
"loss": 2.4586,
"theoretical_loss": 3.315305336905243,
"tokens_seen": 3064987648
},
{
"epoch": 0.24,
"learning_rate": 7.664670658682636e-05,
"loss": 2.4483,
"theoretical_loss": 3.3152944838502956,
"tokens_seen": 3065118720
},
{
"epoch": 0.24,
"learning_rate": 7.660393498716852e-05,
"loss": 2.5967,
"theoretical_loss": 3.3152836313893843,
"tokens_seen": 3065249792
},
{
"epoch": 0.24,
"learning_rate": 7.656116338751069e-05,
"loss": 2.6626,
"theoretical_loss": 3.3152727795224504,
"tokens_seen": 3065380864
},
{
"epoch": 0.24,
"learning_rate": 7.651839178785287e-05,
"loss": 2.5101,
"theoretical_loss": 3.3152619282494373,
"tokens_seen": 3065511936
},
{
"epoch": 0.24,
"learning_rate": 7.647562018819505e-05,
"loss": 2.3196,
"theoretical_loss": 3.315251077570286,
"tokens_seen": 3065643008
},
{
"epoch": 0.24,
"objective/train/docs_used": 1679875,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.6284544467926025,
"objective/train/theoretical_loss": 3.3152402274849395,
"objective/train/tokens_used": 95825376,
"theoretical_loss": 3.3152402274849395,
"tokens_seen": 3065774080
},
{
"epoch": 0.24,
"learning_rate": 7.643284858853722e-05,
"loss": 2.5859,
"theoretical_loss": 3.3152402274849395,
"tokens_seen": 3065774080
},
{
"epoch": 0.24,
"learning_rate": 7.639007698887939e-05,
"loss": 2.3573,
"theoretical_loss": 3.315229377993339,
"tokens_seen": 3065905152
},
{
"epoch": 0.24,
"learning_rate": 7.634730538922155e-05,
"loss": 2.435,
"theoretical_loss": 3.315218529095427,
"tokens_seen": 3066036224
},
{
"epoch": 0.24,
"learning_rate": 7.630453378956373e-05,
"loss": 2.4501,
"theoretical_loss": 3.315207680791146,
"tokens_seen": 3066167296
},
{
"epoch": 0.25,
"learning_rate": 7.626176218990591e-05,
"loss": 2.484,
"theoretical_loss": 3.3151968330804378,
"tokens_seen": 3066298368
},
{
"epoch": 0.25,
"learning_rate": 7.621899059024807e-05,
"loss": 2.4815,
"theoretical_loss": 3.3151859859632444,
"tokens_seen": 3066429440
},
{
"epoch": 0.25,
"learning_rate": 7.617621899059025e-05,
"loss": 2.3643,
"theoretical_loss": 3.3151751394395084,
"tokens_seen": 3066560512
},
{
"epoch": 0.25,
"learning_rate": 7.613344739093242e-05,
"loss": 2.6645,
"theoretical_loss": 3.3151642935091714,
"tokens_seen": 3066691584
},
{
"epoch": 0.25,
"learning_rate": 7.60906757912746e-05,
"loss": 2.4228,
"theoretical_loss": 3.3151534481721763,
"tokens_seen": 3066822656
},
{
"epoch": 0.25,
"learning_rate": 7.604790419161677e-05,
"loss": 2.5867,
"theoretical_loss": 3.3151426034284643,
"tokens_seen": 3066953728
},
{
"epoch": 0.25,
"learning_rate": 7.600513259195894e-05,
"loss": 2.6365,
"theoretical_loss": 3.3151317592779788,
"tokens_seen": 3067084800
},
{
"epoch": 0.25,
"learning_rate": 7.596236099230112e-05,
"loss": 2.5273,
"theoretical_loss": 3.315120915720661,
"tokens_seen": 3067215872
},
{
"epoch": 0.25,
"learning_rate": 7.591958939264328e-05,
"loss": 2.5048,
"theoretical_loss": 3.315110072756454,
"tokens_seen": 3067346944
},
{
"epoch": 0.25,
"objective/train/docs_used": 1680552,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.5477449893951416,
"objective/train/theoretical_loss": 3.3151046514967484,
"objective/train/tokens_used": 97463776,
"theoretical_loss": 3.3151046514967484,
"tokens_seen": 3067412480
},
{
"epoch": 0.25,
"learning_rate": 7.587681779298546e-05,
"loss": 2.3805,
"theoretical_loss": 3.315099230385299,
"tokens_seen": 3067478016
},
{
"epoch": 0.25,
"learning_rate": 7.583404619332764e-05,
"loss": 2.4863,
"theoretical_loss": 3.3150883886071387,
"tokens_seen": 3067609088
},
{
"epoch": 0.25,
"learning_rate": 7.57912745936698e-05,
"loss": 2.3692,
"theoretical_loss": 3.3150775474219154,
"tokens_seen": 3067740160
},
{
"epoch": 0.25,
"learning_rate": 7.574850299401198e-05,
"loss": 2.5147,
"theoretical_loss": 3.3150667068295716,
"tokens_seen": 3067871232
},
{
"epoch": 0.25,
"learning_rate": 7.570573139435415e-05,
"loss": 2.4548,
"theoretical_loss": 3.3150558668300487,
"tokens_seen": 3068002304
},
{
"epoch": 0.25,
"learning_rate": 7.566295979469632e-05,
"loss": 2.515,
"theoretical_loss": 3.31504502742329,
"tokens_seen": 3068133376
},
{
"epoch": 0.25,
"learning_rate": 7.56201881950385e-05,
"loss": 2.4336,
"theoretical_loss": 3.3150341886092374,
"tokens_seen": 3068264448
},
{
"epoch": 0.25,
"learning_rate": 7.557741659538067e-05,
"loss": 2.4561,
"theoretical_loss": 3.3150233503878326,
"tokens_seen": 3068395520
},
{
"epoch": 0.25,
"learning_rate": 7.553464499572285e-05,
"loss": 2.5634,
"theoretical_loss": 3.3150125127590186,
"tokens_seen": 3068526592
},
{
"epoch": 0.25,
"learning_rate": 7.549187339606501e-05,
"loss": 2.5016,
"theoretical_loss": 3.3150016757227374,
"tokens_seen": 3068657664
},
{
"epoch": 0.25,
"learning_rate": 7.544910179640719e-05,
"loss": 2.3541,
"theoretical_loss": 3.314990839278931,
"tokens_seen": 3068788736
},
{
"epoch": 0.25,
"learning_rate": 7.540633019674937e-05,
"loss": 2.3958,
"theoretical_loss": 3.314980003427542,
"tokens_seen": 3068919808
},
{
"epoch": 0.25,
"objective/train/docs_used": 1681818,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.7954180240631104,
"objective/train/theoretical_loss": 3.3149691681685134,
"objective/train/tokens_used": 99102176,
"theoretical_loss": 3.3149691681685134,
"tokens_seen": 3069050880
},
{
"epoch": 0.25,
"learning_rate": 7.536355859709153e-05,
"loss": 2.5015,
"theoretical_loss": 3.3149691681685134,
"tokens_seen": 3069050880
},
{
"epoch": 0.25,
"learning_rate": 7.532078699743371e-05,
"loss": 2.5696,
"theoretical_loss": 3.314958333501786,
"tokens_seen": 3069181952
},
{
"epoch": 0.25,
"learning_rate": 7.527801539777588e-05,
"loss": 2.3828,
"theoretical_loss": 3.3149474994273036,
"tokens_seen": 3069313024
},
{
"epoch": 0.26,
"learning_rate": 7.523524379811805e-05,
"loss": 2.5782,
"theoretical_loss": 3.314936665945008,
"tokens_seen": 3069444096
},
{
"epoch": 0.26,
"learning_rate": 7.519247219846023e-05,
"loss": 2.5156,
"theoretical_loss": 3.314925833054841,
"tokens_seen": 3069575168
},
{
"epoch": 0.26,
"learning_rate": 7.51497005988024e-05,
"loss": 2.4908,
"theoretical_loss": 3.3149150007567454,
"tokens_seen": 3069706240
},
{
"epoch": 0.26,
"learning_rate": 7.510692899914457e-05,
"loss": 2.5009,
"theoretical_loss": 3.314904169050664,
"tokens_seen": 3069837312
},
{
"epoch": 0.26,
"learning_rate": 7.506415739948674e-05,
"loss": 2.4503,
"theoretical_loss": 3.3148933379365384,
"tokens_seen": 3069968384
},
{
"epoch": 0.26,
"learning_rate": 7.502138579982892e-05,
"loss": 2.4305,
"theoretical_loss": 3.3148825074143113,
"tokens_seen": 3070099456
},
{
"epoch": 0.26,
"learning_rate": 7.49786142001711e-05,
"loss": 2.3984,
"theoretical_loss": 3.314871677483925,
"tokens_seen": 3070230528
},
{
"epoch": 0.26,
"learning_rate": 7.493584260051326e-05,
"loss": 2.4805,
"theoretical_loss": 3.3148608481453223,
"tokens_seen": 3070361600
},
{
"epoch": 0.26,
"learning_rate": 7.489307100085543e-05,
"loss": 2.4796,
"theoretical_loss": 3.3148500193984454,
"tokens_seen": 3070492672
},
{
"epoch": 0.26,
"learning_rate": 7.48502994011976e-05,
"loss": 2.5145,
"theoretical_loss": 3.3148391912432364,
"tokens_seen": 3070623744
},
{
"epoch": 0.26,
"objective/train/docs_used": 1682472,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.632871150970459,
"objective/train/theoretical_loss": 3.3148337773874896,
"objective/train/tokens_used": 100740576,
"theoretical_loss": 3.3148337773874896,
"tokens_seen": 3070689280
},
{
"epoch": 0.26,
"learning_rate": 7.480752780153978e-05,
"loss": 2.4604,
"theoretical_loss": 3.3148283636796383,
"tokens_seen": 3070754816
},
{
"epoch": 0.26,
"learning_rate": 7.476475620188196e-05,
"loss": 2.4658,
"theoretical_loss": 3.3148175367075927,
"tokens_seen": 3070885888
},
{
"epoch": 0.26,
"learning_rate": 7.472198460222413e-05,
"loss": 2.4759,
"theoretical_loss": 3.314806710327043,
"tokens_seen": 3071016960
},
{
"epoch": 0.26,
"learning_rate": 7.467921300256629e-05,
"loss": 2.6602,
"theoretical_loss": 3.3147958845379306,
"tokens_seen": 3071148032
},
{
"epoch": 0.26,
"learning_rate": 7.463644140290847e-05,
"loss": 2.3896,
"theoretical_loss": 3.3147850593401986,
"tokens_seen": 3071279104
},
{
"epoch": 0.26,
"learning_rate": 7.459366980325065e-05,
"loss": 2.587,
"theoretical_loss": 3.3147742347337896,
"tokens_seen": 3071410176
},
{
"epoch": 0.26,
"learning_rate": 7.455089820359282e-05,
"loss": 2.719,
"theoretical_loss": 3.3147634107186454,
"tokens_seen": 3071541248
},
{
"epoch": 0.26,
"learning_rate": 7.450812660393499e-05,
"loss": 2.5003,
"theoretical_loss": 3.3147525872947092,
"tokens_seen": 3071672320
},
{
"epoch": 0.26,
"learning_rate": 7.446535500427715e-05,
"loss": 2.344,
"theoretical_loss": 3.3147417644619233,
"tokens_seen": 3071803392
},
{
"epoch": 0.26,
"learning_rate": 7.442258340461933e-05,
"loss": 2.5274,
"theoretical_loss": 3.31473094222023,
"tokens_seen": 3071934464
},
{
"epoch": 0.26,
"learning_rate": 7.437981180496151e-05,
"loss": 2.6219,
"theoretical_loss": 3.3147201205695715,
"tokens_seen": 3072065536
},
{
"epoch": 0.26,
"learning_rate": 7.433704020530369e-05,
"loss": 2.4226,
"theoretical_loss": 3.3147092995098912,
"tokens_seen": 3072196608
},
{
"epoch": 0.26,
"objective/train/docs_used": 1683394,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.5833144187927246,
"objective/train/theoretical_loss": 3.3146984790411307,
"objective/train/tokens_used": 102378976,
"theoretical_loss": 3.3146984790411307,
"tokens_seen": 3072327680
},
{
"epoch": 0.26,
"learning_rate": 7.429426860564585e-05,
"loss": 2.4399,
"theoretical_loss": 3.3146984790411307,
"tokens_seen": 3072327680
},
{
"epoch": 0.27,
"learning_rate": 7.425149700598802e-05,
"loss": 2.6166,
"theoretical_loss": 3.314687659163233,
"tokens_seen": 3072458752
},
{
"epoch": 0.27,
"learning_rate": 7.420872540633021e-05,
"loss": 2.2906,
"theoretical_loss": 3.3146768398761406,
"tokens_seen": 3072589824
},
{
"epoch": 0.27,
"learning_rate": 7.416595380667238e-05,
"loss": 2.5399,
"theoretical_loss": 3.3146660211797956,
"tokens_seen": 3072720896
},
{
"epoch": 0.27,
"learning_rate": 7.412318220701454e-05,
"loss": 2.4276,
"theoretical_loss": 3.3146552030741416,
"tokens_seen": 3072851968
},
{
"epoch": 0.27,
"learning_rate": 7.408041060735672e-05,
"loss": 2.5037,
"theoretical_loss": 3.31464438555912,
"tokens_seen": 3072983040
},
{
"epoch": 0.27,
"learning_rate": 7.403763900769888e-05,
"loss": 2.4357,
"theoretical_loss": 3.314633568634674,
"tokens_seen": 3073114112
},
{
"epoch": 0.27,
"learning_rate": 7.399486740804107e-05,
"loss": 2.5825,
"theoretical_loss": 3.314622752300746,
"tokens_seen": 3073245184
},
{
"epoch": 0.27,
"learning_rate": 7.395209580838324e-05,
"loss": 2.4643,
"theoretical_loss": 3.3146119365572786,
"tokens_seen": 3073376256
},
{
"epoch": 0.27,
"learning_rate": 7.39093242087254e-05,
"loss": 2.3604,
"theoretical_loss": 3.3146011214042144,
"tokens_seen": 3073507328
},
{
"epoch": 0.27,
"learning_rate": 7.386655260906758e-05,
"loss": 2.4892,
"theoretical_loss": 3.314590306841496,
"tokens_seen": 3073638400
},
{
"epoch": 0.27,
"learning_rate": 7.382378100940975e-05,
"loss": 2.6183,
"theoretical_loss": 3.3145794928690657,
"tokens_seen": 3073769472
},
{
"epoch": 0.27,
"learning_rate": 7.378100940975194e-05,
"loss": 2.5466,
"theoretical_loss": 3.3145686794868667,
"tokens_seen": 3073900544
},
{
"epoch": 0.27,
"objective/train/docs_used": 1684169,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.35479998588562,
"objective/train/theoretical_loss": 3.314563273017086,
"objective/train/tokens_used": 104017376,
"theoretical_loss": 3.314563273017086,
"tokens_seen": 3073966080
},
{
"epoch": 0.27,
"learning_rate": 7.37382378100941e-05,
"loss": 2.5362,
"theoretical_loss": 3.314557866694841,
"tokens_seen": 3074031616
},
{
"epoch": 0.27,
"learning_rate": 7.369546621043627e-05,
"loss": 2.647,
"theoretical_loss": 3.314547054492932,
"tokens_seen": 3074162688
},
{
"epoch": 0.27,
"learning_rate": 7.365269461077845e-05,
"loss": 2.5716,
"theoretical_loss": 3.314536242881082,
"tokens_seen": 3074293760
},
{
"epoch": 0.27,
"learning_rate": 7.360992301112061e-05,
"loss": 2.6096,
"theoretical_loss": 3.3145254318592325,
"tokens_seen": 3074424832
},
{
"epoch": 0.27,
"learning_rate": 7.356715141146279e-05,
"loss": 2.4451,
"theoretical_loss": 3.3145146214273282,
"tokens_seen": 3074555904
},
{
"epoch": 0.27,
"learning_rate": 7.352437981180497e-05,
"loss": 2.4806,
"theoretical_loss": 3.3145038115853103,
"tokens_seen": 3074686976
},
{
"epoch": 0.27,
"learning_rate": 7.348160821214713e-05,
"loss": 2.5374,
"theoretical_loss": 3.314493002333122,
"tokens_seen": 3074818048
},
{
"epoch": 0.27,
"learning_rate": 7.343883661248931e-05,
"loss": 2.6111,
"theoretical_loss": 3.3144821936707056,
"tokens_seen": 3074949120
},
{
"epoch": 0.27,
"learning_rate": 7.339606501283149e-05,
"loss": 2.4581,
"theoretical_loss": 3.3144713855980044,
"tokens_seen": 3075080192
},
{
"epoch": 0.27,
"learning_rate": 7.335329341317365e-05,
"loss": 2.4606,
"theoretical_loss": 3.3144605781149608,
"tokens_seen": 3075211264
},
{
"epoch": 0.27,
"learning_rate": 7.331052181351583e-05,
"loss": 2.4645,
"theoretical_loss": 3.314449771221517,
"tokens_seen": 3075342336
},
{
"epoch": 0.27,
"learning_rate": 7.3267750213858e-05,
"loss": 2.5643,
"theoretical_loss": 3.3144389649176165,
"tokens_seen": 3075473408
},
{
"epoch": 0.27,
"objective/train/docs_used": 1684867,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.6634860038757324,
"objective/train/theoretical_loss": 3.3144281592032017,
"objective/train/tokens_used": 105655776,
"theoretical_loss": 3.3144281592032017,
"tokens_seen": 3075604480
},
{
"epoch": 0.28,
"learning_rate": 7.322497861420018e-05,
"loss": 2.6146,
"theoretical_loss": 3.3144281592032017,
"tokens_seen": 3075604480
},
{
"epoch": 0.28,
"learning_rate": 7.318220701454235e-05,
"loss": 2.4308,
"theoretical_loss": 3.3144173540782154,
"tokens_seen": 3075735552
},
{
"epoch": 0.28,
"learning_rate": 7.313943541488452e-05,
"loss": 2.4509,
"theoretical_loss": 3.3144065495426,
"tokens_seen": 3075866624
},
{
"epoch": 0.28,
"learning_rate": 7.30966638152267e-05,
"loss": 2.4845,
"theoretical_loss": 3.3143957455962982,
"tokens_seen": 3075997696
},
{
"epoch": 0.28,
"learning_rate": 7.305389221556886e-05,
"loss": 2.6057,
"theoretical_loss": 3.3143849422392533,
"tokens_seen": 3076128768
},
{
"epoch": 0.28,
"learning_rate": 7.301112061591104e-05,
"loss": 2.4856,
"theoretical_loss": 3.3143741394714077,
"tokens_seen": 3076259840
},
{
"epoch": 0.28,
"learning_rate": 7.296834901625322e-05,
"loss": 2.6096,
"theoretical_loss": 3.314363337292704,
"tokens_seen": 3076390912
},
{
"epoch": 0.28,
"learning_rate": 7.292557741659538e-05,
"loss": 2.5767,
"theoretical_loss": 3.314352535703086,
"tokens_seen": 3076521984
},
{
"epoch": 0.28,
"learning_rate": 7.288280581693756e-05,
"loss": 2.6051,
"theoretical_loss": 3.314341734702495,
"tokens_seen": 3076653056
},
{
"epoch": 0.28,
"learning_rate": 7.284003421727973e-05,
"loss": 2.472,
"theoretical_loss": 3.3143309342908744,
"tokens_seen": 3076784128
},
{
"epoch": 0.28,
"learning_rate": 7.279726261762189e-05,
"loss": 2.5424,
"theoretical_loss": 3.314320134468167,
"tokens_seen": 3076915200
},
{
"epoch": 0.28,
"learning_rate": 7.275449101796408e-05,
"loss": 2.6118,
"theoretical_loss": 3.3143093352343165,
"tokens_seen": 3077046272
},
{
"epoch": 0.28,
"learning_rate": 7.271171941830625e-05,
"loss": 2.4979,
"theoretical_loss": 3.314298536589264,
"tokens_seen": 3077177344
},
{
"epoch": 0.28,
"objective/train/docs_used": 1686119,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.6551506519317627,
"objective/train/theoretical_loss": 3.3142931374875197,
"objective/train/tokens_used": 107294176,
"theoretical_loss": 3.3142931374875197,
"tokens_seen": 3077242880
},
{
"epoch": 0.28,
"learning_rate": 7.266894781864843e-05,
"loss": 2.5443,
"theoretical_loss": 3.3142877385329537,
"tokens_seen": 3077308416
},
{
"epoch": 0.28,
"learning_rate": 7.262617621899059e-05,
"loss": 2.5527,
"theoretical_loss": 3.314276941065328,
"tokens_seen": 3077439488
},
{
"epoch": 0.28,
"learning_rate": 7.258340461933276e-05,
"loss": 2.6246,
"theoretical_loss": 3.3142661441863295,
"tokens_seen": 3077570560
},
{
"epoch": 0.28,
"learning_rate": 7.254063301967495e-05,
"loss": 2.5297,
"theoretical_loss": 3.3142553478959007,
"tokens_seen": 3077701632
},
{
"epoch": 0.28,
"learning_rate": 7.249786142001711e-05,
"loss": 2.4888,
"theoretical_loss": 3.3142445521939856,
"tokens_seen": 3077832704
},
{
"epoch": 0.28,
"learning_rate": 7.245508982035929e-05,
"loss": 2.5649,
"theoretical_loss": 3.314233757080526,
"tokens_seen": 3077963776
},
{
"epoch": 0.28,
"learning_rate": 7.241231822070146e-05,
"loss": 2.5155,
"theoretical_loss": 3.3142229625554656,
"tokens_seen": 3078094848
},
{
"epoch": 0.28,
"learning_rate": 7.236954662104363e-05,
"loss": 2.5393,
"theoretical_loss": 3.3142121686187465,
"tokens_seen": 3078225920
},
{
"epoch": 0.28,
"learning_rate": 7.232677502138581e-05,
"loss": 2.5875,
"theoretical_loss": 3.3142013752703123,
"tokens_seen": 3078356992
},
{
"epoch": 0.28,
"learning_rate": 7.228400342172798e-05,
"loss": 2.4805,
"theoretical_loss": 3.314190582510105,
"tokens_seen": 3078488064
},
{
"epoch": 0.28,
"learning_rate": 7.224123182207014e-05,
"loss": 2.5378,
"theoretical_loss": 3.3141797903380685,
"tokens_seen": 3078619136
},
{
"epoch": 0.29,
"learning_rate": 7.219846022241232e-05,
"loss": 2.5374,
"theoretical_loss": 3.314168998754145,
"tokens_seen": 3078750208
},
{
"epoch": 0.29,
"objective/train/docs_used": 1686853,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.4723048210144043,
"objective/train/theoretical_loss": 3.314158207758278,
"objective/train/tokens_used": 108932576,
"theoretical_loss": 3.314158207758278,
"tokens_seen": 3078881280
},
{
"epoch": 0.29,
"learning_rate": 7.21556886227545e-05,
"loss": 2.7317,
"theoretical_loss": 3.314158207758278,
"tokens_seen": 3078881280
},
{
"epoch": 0.29,
"learning_rate": 7.211291702309668e-05,
"loss": 2.5378,
"theoretical_loss": 3.3141474173504095,
"tokens_seen": 3079012352
},
{
"epoch": 0.29,
"learning_rate": 7.207014542343884e-05,
"loss": 2.6327,
"theoretical_loss": 3.3141366275304835,
"tokens_seen": 3079143424
},
{
"epoch": 0.29,
"learning_rate": 7.2027373823781e-05,
"loss": 2.5976,
"theoretical_loss": 3.3141258382984424,
"tokens_seen": 3079274496
},
{
"epoch": 0.29,
"learning_rate": 7.198460222412318e-05,
"loss": 2.6818,
"theoretical_loss": 3.3141150496542293,
"tokens_seen": 3079405568
},
{
"epoch": 0.29,
"learning_rate": 7.194183062446536e-05,
"loss": 2.6468,
"theoretical_loss": 3.3141042615977865,
"tokens_seen": 3079536640
},
{
"epoch": 0.29,
"learning_rate": 7.189905902480754e-05,
"loss": 2.4813,
"theoretical_loss": 3.314093474129058,
"tokens_seen": 3079667712
},
{
"epoch": 0.29,
"learning_rate": 7.18562874251497e-05,
"loss": 2.5804,
"theoretical_loss": 3.314082687247986,
"tokens_seen": 3079798784
},
{
"epoch": 0.29,
"learning_rate": 7.181351582549187e-05,
"loss": 2.4319,
"theoretical_loss": 3.314071900954514,
"tokens_seen": 3079929856
},
{
"epoch": 0.29,
"learning_rate": 7.177074422583405e-05,
"loss": 2.5641,
"theoretical_loss": 3.3140611152485846,
"tokens_seen": 3080060928
},
{
"epoch": 0.29,
"learning_rate": 7.172797262617623e-05,
"loss": 2.609,
"theoretical_loss": 3.314050330130141,
"tokens_seen": 3080192000
},
{
"epoch": 0.29,
"learning_rate": 7.16852010265184e-05,
"loss": 2.4717,
"theoretical_loss": 3.314039545599126,
"tokens_seen": 3080323072
},
{
"epoch": 0.29,
"learning_rate": 7.164242942686057e-05,
"loss": 2.5039,
"theoretical_loss": 3.314028761655483,
"tokens_seen": 3080454144
},
{
"epoch": 0.29,
"objective/train/docs_used": 1688263,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.41924786567688,
"objective/train/theoretical_loss": 3.314023369903908,
"objective/train/tokens_used": 110570976,
"theoretical_loss": 3.314023369903908,
"tokens_seen": 3080519680
},
{
"epoch": 0.29,
"learning_rate": 7.159965782720273e-05,
"loss": 2.4467,
"theoretical_loss": 3.3140179782991552,
"tokens_seen": 3080585216
},
{
"epoch": 0.29,
"learning_rate": 7.155688622754491e-05,
"loss": 2.6404,
"theoretical_loss": 3.3140071955300847,
"tokens_seen": 3080716288
},
{
"epoch": 0.29,
"learning_rate": 7.151411462788709e-05,
"loss": 2.5012,
"theoretical_loss": 3.3139964133482147,
"tokens_seen": 3080847360
},
{
"epoch": 0.29,
"learning_rate": 7.147134302822926e-05,
"loss": 2.469,
"theoretical_loss": 3.3139856317534893,
"tokens_seen": 3080978432
},
{
"epoch": 0.29,
"learning_rate": 7.142857142857143e-05,
"loss": 2.5803,
"theoretical_loss": 3.31397485074585,
"tokens_seen": 3081109504
},
{
"epoch": 0.29,
"learning_rate": 7.13857998289136e-05,
"loss": 2.4955,
"theoretical_loss": 3.3139640703252415,
"tokens_seen": 3081240576
},
{
"epoch": 0.29,
"learning_rate": 7.134302822925578e-05,
"loss": 2.5004,
"theoretical_loss": 3.3139532904916056,
"tokens_seen": 3081371648
},
{
"epoch": 0.29,
"learning_rate": 7.130025662959796e-05,
"loss": 2.5855,
"theoretical_loss": 3.3139425112448864,
"tokens_seen": 3081502720
},
{
"epoch": 0.29,
"learning_rate": 7.125748502994012e-05,
"loss": 2.543,
"theoretical_loss": 3.3139317325850257,
"tokens_seen": 3081633792
},
{
"epoch": 0.3,
"learning_rate": 7.12147134302823e-05,
"loss": 2.4956,
"theoretical_loss": 3.3139209545119677,
"tokens_seen": 3081764864
},
{
"epoch": 0.3,
"learning_rate": 7.117194183062446e-05,
"loss": 2.3803,
"theoretical_loss": 3.313910177025655,
"tokens_seen": 3081895936
},
{
"epoch": 0.3,
"learning_rate": 7.112917023096664e-05,
"loss": 2.4136,
"theoretical_loss": 3.3138994001260307,
"tokens_seen": 3082027008
},
{
"epoch": 0.3,
"objective/train/docs_used": 1688735,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.326685667037964,
"objective/train/theoretical_loss": 3.313888623813038,
"objective/train/tokens_used": 112209376,
"theoretical_loss": 3.313888623813038,
"tokens_seen": 3082158080
},
{
"epoch": 0.3,
"learning_rate": 7.108639863130882e-05,
"loss": 2.4642,
"theoretical_loss": 3.313888623813038,
"tokens_seen": 3082158080
},
{
"epoch": 0.3,
"learning_rate": 7.104362703165098e-05,
"loss": 2.5382,
"theoretical_loss": 3.3138778480866202,
"tokens_seen": 3082289152
},
{
"epoch": 0.3,
"learning_rate": 7.100085543199316e-05,
"loss": 2.527,
"theoretical_loss": 3.3138670729467203,
"tokens_seen": 3082420224
},
{
"epoch": 0.3,
"learning_rate": 7.095808383233533e-05,
"loss": 2.5672,
"theoretical_loss": 3.3138562983932816,
"tokens_seen": 3082551296
},
{
"epoch": 0.3,
"learning_rate": 7.09153122326775e-05,
"loss": 2.4753,
"theoretical_loss": 3.313845524426247,
"tokens_seen": 3082682368
},
{
"epoch": 0.3,
"learning_rate": 7.087254063301968e-05,
"loss": 2.6285,
"theoretical_loss": 3.3138347510455595,
"tokens_seen": 3082813440
},
{
"epoch": 0.3,
"learning_rate": 7.082976903336185e-05,
"loss": 2.4997,
"theoretical_loss": 3.3138239782511625,
"tokens_seen": 3082944512
},
{
"epoch": 0.3,
"learning_rate": 7.078699743370403e-05,
"loss": 2.6724,
"theoretical_loss": 3.3138132060429992,
"tokens_seen": 3083075584
},
{
"epoch": 0.3,
"learning_rate": 7.074422583404619e-05,
"loss": 2.5294,
"theoretical_loss": 3.313802434421013,
"tokens_seen": 3083206656
},
{
"epoch": 0.3,
"learning_rate": 7.070145423438837e-05,
"loss": 2.442,
"theoretical_loss": 3.313791663385146,
"tokens_seen": 3083337728
},
{
"epoch": 0.3,
"learning_rate": 7.065868263473055e-05,
"loss": 2.5894,
"theoretical_loss": 3.313780892935343,
"tokens_seen": 3083468800
},
{
"epoch": 0.3,
"learning_rate": 7.061591103507271e-05,
"loss": 2.5735,
"theoretical_loss": 3.313770123071546,
"tokens_seen": 3083599872
},
{
"epoch": 0.3,
"learning_rate": 7.057313943541489e-05,
"loss": 2.5645,
"theoretical_loss": 3.313759353793699,
"tokens_seen": 3083730944
},
{
"epoch": 0.3,
"objective/train/docs_used": 1689720,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.1338090896606445,
"objective/train/theoretical_loss": 3.313753969374489,
"objective/train/tokens_used": 113847776,
"theoretical_loss": 3.313753969374489,
"tokens_seen": 3083796480
},
{
"epoch": 0.3,
"learning_rate": 7.053036783575706e-05,
"loss": 2.4857,
"theoretical_loss": 3.3137485851017447,
"tokens_seen": 3083862016
},
{
"epoch": 0.3,
"learning_rate": 7.048759623609923e-05,
"loss": 2.6054,
"theoretical_loss": 3.3137378169956264,
"tokens_seen": 3083993088
},
{
"epoch": 0.3,
"learning_rate": 7.044482463644141e-05,
"loss": 2.6083,
"theoretical_loss": 3.313727049475287,
"tokens_seen": 3084124160
},
{
"epoch": 0.3,
"learning_rate": 7.040205303678358e-05,
"loss": 2.3527,
"theoretical_loss": 3.3137162825406707,
"tokens_seen": 3084255232
},
{
"epoch": 0.3,
"learning_rate": 7.035928143712576e-05,
"loss": 2.4356,
"theoretical_loss": 3.31370551619172,
"tokens_seen": 3084386304
},
{
"epoch": 0.3,
"learning_rate": 7.031650983746792e-05,
"loss": 2.6893,
"theoretical_loss": 3.313694750428378,
"tokens_seen": 3084517376
},
{
"epoch": 0.3,
"learning_rate": 7.02737382378101e-05,
"loss": 2.3975,
"theoretical_loss": 3.313683985250589,
"tokens_seen": 3084648448
},
{
"epoch": 0.3,
"learning_rate": 7.023096663815228e-05,
"loss": 2.5608,
"theoretical_loss": 3.313673220658295,
"tokens_seen": 3084779520
},
{
"epoch": 0.31,
"learning_rate": 7.018819503849444e-05,
"loss": 2.4643,
"theoretical_loss": 3.31366245665144,
"tokens_seen": 3084910592
},
{
"epoch": 0.31,
"learning_rate": 7.01454234388366e-05,
"loss": 2.395,
"theoretical_loss": 3.3136516932299673,
"tokens_seen": 3085041664
},
{
"epoch": 0.31,
"learning_rate": 7.010265183917878e-05,
"loss": 2.5804,
"theoretical_loss": 3.3136409303938197,
"tokens_seen": 3085172736
},
{
"epoch": 0.31,
"learning_rate": 7.005988023952096e-05,
"loss": 2.5382,
"theoretical_loss": 3.313630168142941,
"tokens_seen": 3085303808
},
{
"epoch": 0.31,
"objective/train/docs_used": 1690994,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.813086748123169,
"objective/train/theoretical_loss": 3.3136194064772746,
"objective/train/tokens_used": 115486176,
"theoretical_loss": 3.3136194064772746,
"tokens_seen": 3085434880
},
{
"epoch": 0.31,
"learning_rate": 7.001710863986314e-05,
"loss": 2.7161,
"theoretical_loss": 3.3136194064772746,
"tokens_seen": 3085434880
},
{
"epoch": 0.31,
"learning_rate": 6.99743370402053e-05,
"loss": 2.5303,
"theoretical_loss": 3.3136086453967635,
"tokens_seen": 3085565952
},
{
"epoch": 0.31,
"learning_rate": 6.993156544054747e-05,
"loss": 2.5129,
"theoretical_loss": 3.313597884901351,
"tokens_seen": 3085697024
},
{
"epoch": 0.31,
"learning_rate": 6.988879384088965e-05,
"loss": 2.4289,
"theoretical_loss": 3.3135871249909803,
"tokens_seen": 3085828096
},
{
"epoch": 0.31,
"learning_rate": 6.984602224123183e-05,
"loss": 2.4337,
"theoretical_loss": 3.3135763656655954,
"tokens_seen": 3085959168
},
{
"epoch": 0.31,
"learning_rate": 6.9803250641574e-05,
"loss": 2.4339,
"theoretical_loss": 3.313565606925139,
"tokens_seen": 3086090240
},
{
"epoch": 0.31,
"learning_rate": 6.976047904191617e-05,
"loss": 2.3467,
"theoretical_loss": 3.313554848769555,
"tokens_seen": 3086221312
},
{
"epoch": 0.31,
"learning_rate": 6.971770744225834e-05,
"loss": 2.5069,
"theoretical_loss": 3.313544091198786,
"tokens_seen": 3086352384
},
{
"epoch": 0.31,
"learning_rate": 6.967493584260051e-05,
"loss": 2.3418,
"theoretical_loss": 3.313533334212776,
"tokens_seen": 3086483456
},
{
"epoch": 0.31,
"learning_rate": 6.963216424294269e-05,
"loss": 2.6456,
"theoretical_loss": 3.3135225778114683,
"tokens_seen": 3086614528
},
{
"epoch": 0.31,
"learning_rate": 6.958939264328487e-05,
"loss": 2.4547,
"theoretical_loss": 3.313511821994806,
"tokens_seen": 3086745600
},
{
"epoch": 0.31,
"learning_rate": 6.954662104362704e-05,
"loss": 2.459,
"theoretical_loss": 3.313501066762733,
"tokens_seen": 3086876672
},
{
"epoch": 0.31,
"learning_rate": 6.95038494439692e-05,
"loss": 2.5895,
"theoretical_loss": 3.3134903121151926,
"tokens_seen": 3087007744
},
{
"epoch": 0.31,
"objective/train/docs_used": 1691628,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.380619525909424,
"objective/train/theoretical_loss": 3.313484935010604,
"objective/train/tokens_used": 117124576,
"theoretical_loss": 3.313484935010604,
"tokens_seen": 3087073280
},
{
"epoch": 0.31,
"learning_rate": 6.946107784431138e-05,
"loss": 2.5606,
"theoretical_loss": 3.3134795580521277,
"tokens_seen": 3087138816
},
{
"epoch": 0.31,
"learning_rate": 6.941830624465356e-05,
"loss": 2.6642,
"theoretical_loss": 3.313468804573482,
"tokens_seen": 3087269888
},
{
"epoch": 0.31,
"learning_rate": 6.937553464499572e-05,
"loss": 2.3761,
"theoretical_loss": 3.313458051679199,
"tokens_seen": 3087400960
},
{
"epoch": 0.31,
"learning_rate": 6.93327630453379e-05,
"loss": 2.6433,
"theoretical_loss": 3.3134472993692223,
"tokens_seen": 3087532032
},
{
"epoch": 0.31,
"learning_rate": 6.928999144568006e-05,
"loss": 2.5497,
"theoretical_loss": 3.3134365476434953,
"tokens_seen": 3087663104
},
{
"epoch": 0.31,
"learning_rate": 6.924721984602224e-05,
"loss": 2.5376,
"theoretical_loss": 3.313425796501961,
"tokens_seen": 3087794176
},
{
"epoch": 0.31,
"learning_rate": 6.920444824636442e-05,
"loss": 2.6459,
"theoretical_loss": 3.3134150459445633,
"tokens_seen": 3087925248
},
{
"epoch": 0.32,
"learning_rate": 6.916167664670659e-05,
"loss": 2.7084,
"theoretical_loss": 3.313404295971245,
"tokens_seen": 3088056320
},
{
"epoch": 0.32,
"learning_rate": 6.911890504704876e-05,
"loss": 2.5354,
"theoretical_loss": 3.313393546581951,
"tokens_seen": 3088187392
},
{
"epoch": 0.32,
"learning_rate": 6.907613344739093e-05,
"loss": 2.4561,
"theoretical_loss": 3.3133827977766237,
"tokens_seen": 3088318464
},
{
"epoch": 0.32,
"learning_rate": 6.903336184773311e-05,
"loss": 2.6013,
"theoretical_loss": 3.3133720495552064,
"tokens_seen": 3088449536
},
{
"epoch": 0.32,
"learning_rate": 6.899059024807529e-05,
"loss": 2.5218,
"theoretical_loss": 3.3133613019176433,
"tokens_seen": 3088580608
},
{
"epoch": 0.32,
"objective/train/docs_used": 1692791,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.3386290073394775,
"objective/train/theoretical_loss": 3.3133505548638778,
"objective/train/tokens_used": 118762976,
"theoretical_loss": 3.3133505548638778,
"tokens_seen": 3088711680
},
{
"epoch": 0.32,
"learning_rate": 6.894781864841745e-05,
"loss": 2.5461,
"theoretical_loss": 3.3133505548638778,
"tokens_seen": 3088711680
},
{
"epoch": 0.32,
"learning_rate": 6.890504704875963e-05,
"loss": 2.6487,
"theoretical_loss": 3.3133398083938532,
"tokens_seen": 3088842752
},
{
"epoch": 0.32,
"learning_rate": 6.886227544910179e-05,
"loss": 2.6339,
"theoretical_loss": 3.313329062507513,
"tokens_seen": 3088973824
},
{
"epoch": 0.32,
"learning_rate": 6.881950384944397e-05,
"loss": 2.525,
"theoretical_loss": 3.313318317204801,
"tokens_seen": 3089104896
},
{
"epoch": 0.32,
"learning_rate": 6.877673224978615e-05,
"loss": 2.4559,
"theoretical_loss": 3.3133075724856598,
"tokens_seen": 3089235968
},
{
"epoch": 0.32,
"learning_rate": 6.873396065012831e-05,
"loss": 2.5522,
"theoretical_loss": 3.313296828350034,
"tokens_seen": 3089367040
},
{
"epoch": 0.32,
"learning_rate": 6.869118905047049e-05,
"loss": 2.6742,
"theoretical_loss": 3.313286084797867,
"tokens_seen": 3089498112
},
{
"epoch": 0.32,
"learning_rate": 6.864841745081266e-05,
"loss": 2.548,
"theoretical_loss": 3.3132753418291023,
"tokens_seen": 3089629184
},
{
"epoch": 0.32,
"learning_rate": 6.860564585115484e-05,
"loss": 2.6003,
"theoretical_loss": 3.3132645994436833,
"tokens_seen": 3089760256
},
{
"epoch": 0.32,
"learning_rate": 6.856287425149701e-05,
"loss": 2.6613,
"theoretical_loss": 3.313253857641554,
"tokens_seen": 3089891328
},
{
"epoch": 0.32,
"learning_rate": 6.852010265183918e-05,
"loss": 2.6473,
"theoretical_loss": 3.3132431164226572,
"tokens_seen": 3090022400
},
{
"epoch": 0.32,
"learning_rate": 6.847733105218136e-05,
"loss": 2.3945,
"theoretical_loss": 3.313232375786937,
"tokens_seen": 3090153472
},
{
"epoch": 0.32,
"learning_rate": 6.843455945252352e-05,
"loss": 2.5502,
"theoretical_loss": 3.3132216357343367,
"tokens_seen": 3090284544
},
{
"epoch": 0.32,
"objective/train/docs_used": 1693638,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.6819231510162354,
"objective/train/theoretical_loss": 3.3132162659266893,
"objective/train/tokens_used": 120401376,
"theoretical_loss": 3.3132162659266893,
"tokens_seen": 3090350080
},
{
"epoch": 0.32,
"learning_rate": 6.83917878528657e-05,
"loss": 2.4485,
"theoretical_loss": 3.313210896264801,
"tokens_seen": 3090415616
},
{
"epoch": 0.32,
"learning_rate": 6.834901625320788e-05,
"loss": 2.6666,
"theoretical_loss": 3.3132001573782714,
"tokens_seen": 3090546688
},
{
"epoch": 0.32,
"learning_rate": 6.830624465355004e-05,
"loss": 2.5939,
"theoretical_loss": 3.3131894190746936,
"tokens_seen": 3090677760
},
{
"epoch": 0.32,
"learning_rate": 6.826347305389222e-05,
"loss": 2.5552,
"theoretical_loss": 3.3131786813540107,
"tokens_seen": 3090808832
},
{
"epoch": 0.32,
"learning_rate": 6.822070145423439e-05,
"loss": 2.6067,
"theoretical_loss": 3.3131679442161657,
"tokens_seen": 3090939904
},
{
"epoch": 0.33,
"learning_rate": 6.817792985457656e-05,
"loss": 2.6286,
"theoretical_loss": 3.3131572076611024,
"tokens_seen": 3091070976
},
{
"epoch": 0.33,
"learning_rate": 6.813515825491874e-05,
"loss": 2.4823,
"theoretical_loss": 3.313146471688765,
"tokens_seen": 3091202048
},
{
"epoch": 0.33,
"learning_rate": 6.809238665526091e-05,
"loss": 2.5575,
"theoretical_loss": 3.3131357362990967,
"tokens_seen": 3091333120
},
{
"epoch": 0.33,
"learning_rate": 6.804961505560307e-05,
"loss": 2.6222,
"theoretical_loss": 3.3131250014920415,
"tokens_seen": 3091464192
},
{
"epoch": 0.33,
"learning_rate": 6.800684345594526e-05,
"loss": 2.617,
"theoretical_loss": 3.313114267267543,
"tokens_seen": 3091595264
},
{
"epoch": 0.33,
"learning_rate": 6.796407185628743e-05,
"loss": 2.6044,
"theoretical_loss": 3.3131035336255446,
"tokens_seen": 3091726336
},
{
"epoch": 0.33,
"learning_rate": 6.792130025662961e-05,
"loss": 2.4566,
"theoretical_loss": 3.31309280056599,
"tokens_seen": 3091857408
},
{
"epoch": 0.33,
"objective/train/docs_used": 1694249,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 1.87570321559906,
"objective/train/theoretical_loss": 3.3130820680888236,
"objective/train/tokens_used": 122039776,
"theoretical_loss": 3.3130820680888236,
"tokens_seen": 3091988480
},
{
"epoch": 0.33,
"learning_rate": 6.787852865697177e-05,
"loss": 2.5227,
"theoretical_loss": 3.3130820680888236,
"tokens_seen": 3091988480
},
{
"epoch": 0.33,
"learning_rate": 6.783575705731394e-05,
"loss": 2.5406,
"theoretical_loss": 3.313071336193988,
"tokens_seen": 3092119552
},
{
"epoch": 0.33,
"learning_rate": 6.779298545765613e-05,
"loss": 2.659,
"theoretical_loss": 3.313060604881428,
"tokens_seen": 3092250624
},
{
"epoch": 0.33,
"learning_rate": 6.775021385799829e-05,
"loss": 2.4937,
"theoretical_loss": 3.313049874151087,
"tokens_seen": 3092381696
},
{
"epoch": 0.33,
"learning_rate": 6.770744225834047e-05,
"loss": 2.6934,
"theoretical_loss": 3.313039144002908,
"tokens_seen": 3092512768
},
{
"epoch": 0.33,
"learning_rate": 6.766467065868264e-05,
"loss": 2.6567,
"theoretical_loss": 3.313028414436836,
"tokens_seen": 3092643840
},
{
"epoch": 0.33,
"learning_rate": 6.76218990590248e-05,
"loss": 2.5487,
"theoretical_loss": 3.313017685452814,
"tokens_seen": 3092774912
},
{
"epoch": 0.33,
"learning_rate": 6.757912745936699e-05,
"loss": 2.5314,
"theoretical_loss": 3.3130069570507854,
"tokens_seen": 3092905984
},
{
"epoch": 0.33,
"learning_rate": 6.753635585970916e-05,
"loss": 2.529,
"theoretical_loss": 3.312996229230695,
"tokens_seen": 3093037056
},
{
"epoch": 0.33,
"learning_rate": 6.749358426005134e-05,
"loss": 2.6427,
"theoretical_loss": 3.312985501992485,
"tokens_seen": 3093168128
},
{
"epoch": 0.33,
"learning_rate": 6.74508126603935e-05,
"loss": 2.5213,
"theoretical_loss": 3.312974775336101,
"tokens_seen": 3093299200
},
{
"epoch": 0.33,
"learning_rate": 6.740804106073567e-05,
"loss": 2.4606,
"theoretical_loss": 3.312964049261486,
"tokens_seen": 3093430272
},
{
"epoch": 0.33,
"learning_rate": 6.736526946107786e-05,
"loss": 2.6563,
"theoretical_loss": 3.3129533237685838,
"tokens_seen": 3093561344
},
{
"epoch": 0.33,
"objective/train/docs_used": 1695060,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.7868711948394775,
"objective/train/theoretical_loss": 3.312947961240257,
"objective/train/tokens_used": 123678176,
"theoretical_loss": 3.312947961240257,
"tokens_seen": 3093626880
},
{
"epoch": 0.33,
"learning_rate": 6.732249786142002e-05,
"loss": 2.7125,
"theoretical_loss": 3.3129425988573376,
"tokens_seen": 3093692416
},
{
"epoch": 0.33,
"learning_rate": 6.727972626176219e-05,
"loss": 2.531,
"theoretical_loss": 3.3129318745276923,
"tokens_seen": 3093823488
},
{
"epoch": 0.33,
"learning_rate": 6.723695466210436e-05,
"loss": 2.5273,
"theoretical_loss": 3.3129211507795913,
"tokens_seen": 3093954560
},
{
"epoch": 0.33,
"learning_rate": 6.719418306244653e-05,
"loss": 2.7121,
"theoretical_loss": 3.312910427612978,
"tokens_seen": 3094085632
},
{
"epoch": 0.34,
"learning_rate": 6.715141146278872e-05,
"loss": 2.5665,
"theoretical_loss": 3.312899705027797,
"tokens_seen": 3094216704
},
{
"epoch": 0.34,
"learning_rate": 6.710863986313089e-05,
"loss": 2.6584,
"theoretical_loss": 3.312888983023991,
"tokens_seen": 3094347776
},
{
"epoch": 0.34,
"learning_rate": 6.706586826347305e-05,
"loss": 2.6025,
"theoretical_loss": 3.3128782616015053,
"tokens_seen": 3094478848
},
{
"epoch": 0.34,
"learning_rate": 6.702309666381523e-05,
"loss": 2.6372,
"theoretical_loss": 3.312867540760283,
"tokens_seen": 3094609920
},
{
"epoch": 0.34,
"learning_rate": 6.698032506415741e-05,
"loss": 2.5192,
"theoretical_loss": 3.3128568205002678,
"tokens_seen": 3094740992
},
{
"epoch": 0.34,
"learning_rate": 6.693755346449959e-05,
"loss": 2.5967,
"theoretical_loss": 3.3128461008214036,
"tokens_seen": 3094872064
},
{
"epoch": 0.34,
"learning_rate": 6.689478186484175e-05,
"loss": 2.6274,
"theoretical_loss": 3.3128353817236347,
"tokens_seen": 3095003136
},
{
"epoch": 0.34,
"learning_rate": 6.685201026518392e-05,
"loss": 2.5711,
"theoretical_loss": 3.312824663206905,
"tokens_seen": 3095134208
},
{
"epoch": 0.34,
"objective/train/docs_used": 1695507,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.8500750064849854,
"objective/train/theoretical_loss": 3.312813945271158,
"objective/train/tokens_used": 125316576,
"theoretical_loss": 3.312813945271158,
"tokens_seen": 3095265280
},
{
"epoch": 0.34,
"learning_rate": 6.68092386655261e-05,
"loss": 2.5407,
"theoretical_loss": 3.312813945271158,
"tokens_seen": 3095265280
},
{
"epoch": 0.34,
"learning_rate": 6.676646706586827e-05,
"loss": 2.6406,
"theoretical_loss": 3.3128032279163375,
"tokens_seen": 3095396352
},
{
"epoch": 0.34,
"learning_rate": 6.672369546621044e-05,
"loss": 2.5063,
"theoretical_loss": 3.312792511142388,
"tokens_seen": 3095527424
},
{
"epoch": 0.34,
"learning_rate": 6.668092386655261e-05,
"loss": 2.6621,
"theoretical_loss": 3.3127817949492533,
"tokens_seen": 3095658496
},
{
"epoch": 0.34,
"learning_rate": 6.663815226689478e-05,
"loss": 2.6269,
"theoretical_loss": 3.3127710793368768,
"tokens_seen": 3095789568
},
{
"epoch": 0.34,
"learning_rate": 6.659538066723696e-05,
"loss": 2.6773,
"theoretical_loss": 3.312760364305203,
"tokens_seen": 3095920640
},
{
"epoch": 0.34,
"learning_rate": 6.655260906757914e-05,
"loss": 2.6404,
"theoretical_loss": 3.3127496498541755,
"tokens_seen": 3096051712
},
{
"epoch": 0.34,
"learning_rate": 6.65098374679213e-05,
"loss": 2.5714,
"theoretical_loss": 3.3127389359837385,
"tokens_seen": 3096182784
},
{
"epoch": 0.34,
"learning_rate": 6.646706586826348e-05,
"loss": 2.6192,
"theoretical_loss": 3.3127282226938357,
"tokens_seen": 3096313856
},
{
"epoch": 0.34,
"learning_rate": 6.642429426860564e-05,
"loss": 2.5385,
"theoretical_loss": 3.3127175099844113,
"tokens_seen": 3096444928
},
{
"epoch": 0.34,
"learning_rate": 6.638152266894782e-05,
"loss": 2.632,
"theoretical_loss": 3.3127067978554092,
"tokens_seen": 3096576000
},
{
"epoch": 0.34,
"learning_rate": 6.633875106929e-05,
"loss": 2.5296,
"theoretical_loss": 3.3126960863067736,
"tokens_seen": 3096707072
},
{
"epoch": 0.34,
"learning_rate": 6.629597946963217e-05,
"loss": 2.5782,
"theoretical_loss": 3.3126853753384484,
"tokens_seen": 3096838144
},
{
"epoch": 0.34,
"objective/train/docs_used": 1696585,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.7085928916931152,
"objective/train/theoretical_loss": 3.3126800200718844,
"objective/train/tokens_used": 126954976,
"theoretical_loss": 3.3126800200718844,
"tokens_seen": 3096903680
},
{
"epoch": 0.34,
"learning_rate": 6.625320786997434e-05,
"loss": 2.5901,
"theoretical_loss": 3.312674664950377,
"tokens_seen": 3096969216
},
{
"epoch": 0.34,
"learning_rate": 6.621043627031651e-05,
"loss": 2.7204,
"theoretical_loss": 3.3126639551425043,
"tokens_seen": 3097100288
},
{
"epoch": 0.35,
"learning_rate": 6.616766467065869e-05,
"loss": 2.4963,
"theoretical_loss": 3.3126532459147735,
"tokens_seen": 3097231360
},
{
"epoch": 0.35,
"learning_rate": 6.612489307100087e-05,
"loss": 2.5793,
"theoretical_loss": 3.3126425372671293,
"tokens_seen": 3097362432
},
{
"epoch": 0.35,
"learning_rate": 6.608212147134303e-05,
"loss": 2.6098,
"theoretical_loss": 3.3126318291995154,
"tokens_seen": 3097493504
},
{
"epoch": 0.35,
"learning_rate": 6.603934987168521e-05,
"loss": 2.7311,
"theoretical_loss": 3.312621121711876,
"tokens_seen": 3097624576
},
{
"epoch": 0.35,
"learning_rate": 6.599657827202737e-05,
"loss": 2.5035,
"theoretical_loss": 3.312610414804155,
"tokens_seen": 3097755648
},
{
"epoch": 0.35,
"learning_rate": 6.595380667236955e-05,
"loss": 2.4575,
"theoretical_loss": 3.312599708476297,
"tokens_seen": 3097886720
},
{
"epoch": 0.35,
"learning_rate": 6.591103507271173e-05,
"loss": 2.5423,
"theoretical_loss": 3.3125890027282447,
"tokens_seen": 3098017792
},
{
"epoch": 0.35,
"learning_rate": 6.58682634730539e-05,
"loss": 2.5146,
"theoretical_loss": 3.3125782975599436,
"tokens_seen": 3098148864
},
{
"epoch": 0.35,
"learning_rate": 6.582549187339607e-05,
"loss": 2.5976,
"theoretical_loss": 3.312567592971337,
"tokens_seen": 3098279936
},
{
"epoch": 0.35,
"learning_rate": 6.578272027373824e-05,
"loss": 2.4906,
"theoretical_loss": 3.3125568889623693,
"tokens_seen": 3098411008
},
{
"epoch": 0.35,
"objective/train/docs_used": 1697815,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.453221082687378,
"objective/train/theoretical_loss": 3.3125461855329847,
"objective/train/tokens_used": 128593376,
"theoretical_loss": 3.3125461855329847,
"tokens_seen": 3098542080
},
{
"epoch": 0.35,
"learning_rate": 6.573994867408042e-05,
"loss": 2.539,
"theoretical_loss": 3.3125461855329847,
"tokens_seen": 3098542080
},
{
"epoch": 0.35,
"learning_rate": 6.56971770744226e-05,
"loss": 2.6606,
"theoretical_loss": 3.312535482683127,
"tokens_seen": 3098673152
},
{
"epoch": 0.35,
"learning_rate": 6.565440547476476e-05,
"loss": 2.5937,
"theoretical_loss": 3.3125247804127405,
"tokens_seen": 3098804224
},
{
"epoch": 0.35,
"learning_rate": 6.561163387510694e-05,
"loss": 2.4754,
"theoretical_loss": 3.312514078721769,
"tokens_seen": 3098935296
},
{
"epoch": 0.35,
"learning_rate": 6.55688622754491e-05,
"loss": 2.5848,
"theoretical_loss": 3.312503377610157,
"tokens_seen": 3099066368
},
{
"epoch": 0.35,
"learning_rate": 6.552609067579128e-05,
"loss": 2.5223,
"theoretical_loss": 3.3124926770778487,
"tokens_seen": 3099197440
},
{
"epoch": 0.35,
"learning_rate": 6.548331907613346e-05,
"loss": 2.5736,
"theoretical_loss": 3.3124819771247878,
"tokens_seen": 3099328512
},
{
"epoch": 0.35,
"learning_rate": 6.544054747647562e-05,
"loss": 2.6126,
"theoretical_loss": 3.3124712777509187,
"tokens_seen": 3099459584
},
{
"epoch": 0.35,
"learning_rate": 6.539777587681779e-05,
"loss": 2.5122,
"theoretical_loss": 3.312460578956186,
"tokens_seen": 3099590656
},
{
"epoch": 0.35,
"learning_rate": 6.535500427715997e-05,
"loss": 2.5951,
"theoretical_loss": 3.3124498807405325,
"tokens_seen": 3099721728
},
{
"epoch": 0.35,
"learning_rate": 6.531223267750214e-05,
"loss": 2.5596,
"theoretical_loss": 3.312439183103904,
"tokens_seen": 3099852800
},
{
"epoch": 0.35,
"learning_rate": 6.526946107784432e-05,
"loss": 2.6898,
"theoretical_loss": 3.312428486046244,
"tokens_seen": 3099983872
},
{
"epoch": 0.35,
"learning_rate": 6.522668947818649e-05,
"loss": 2.7212,
"theoretical_loss": 3.312417789567496,
"tokens_seen": 3100114944
},
{
"epoch": 0.35,
"objective/train/docs_used": 1698465,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.276474714279175,
"objective/train/theoretical_loss": 3.3124124415451974,
"objective/train/tokens_used": 130231776,
"theoretical_loss": 3.3124124415451974,
"tokens_seen": 3100180480
},
{
"epoch": 0.35,
"learning_rate": 6.518391787852865e-05,
"loss": 2.4624,
"theoretical_loss": 3.3124070936676056,
"tokens_seen": 3100246016
},
{
"epoch": 0.36,
"learning_rate": 6.514114627887083e-05,
"loss": 2.5426,
"theoretical_loss": 3.312396398346516,
"tokens_seen": 3100377088
},
{
"epoch": 0.36,
"learning_rate": 6.509837467921301e-05,
"loss": 2.5892,
"theoretical_loss": 3.3123857036041717,
"tokens_seen": 3100508160
},
{
"epoch": 0.36,
"learning_rate": 6.505560307955519e-05,
"loss": 2.6646,
"theoretical_loss": 3.3123750094405167,
"tokens_seen": 3100639232
},
{
"epoch": 0.36,
"learning_rate": 6.501283147989735e-05,
"loss": 2.4117,
"theoretical_loss": 3.3123643158554956,
"tokens_seen": 3100770304
},
{
"epoch": 0.36,
"learning_rate": 6.497005988023952e-05,
"loss": 2.5704,
"theoretical_loss": 3.312353622849052,
"tokens_seen": 3100901376
},
{
"epoch": 0.36,
"learning_rate": 6.49272882805817e-05,
"loss": 2.4199,
"theoretical_loss": 3.3123429304211314,
"tokens_seen": 3101032448
},
{
"epoch": 0.36,
"learning_rate": 6.488451668092387e-05,
"loss": 2.4503,
"theoretical_loss": 3.3123322385716767,
"tokens_seen": 3101163520
},
{
"epoch": 0.36,
"learning_rate": 6.484174508126605e-05,
"loss": 2.528,
"theoretical_loss": 3.312321547300633,
"tokens_seen": 3101294592
},
{
"epoch": 0.36,
"learning_rate": 6.479897348160822e-05,
"loss": 2.5026,
"theoretical_loss": 3.3123108566079438,
"tokens_seen": 3101425664
},
{
"epoch": 0.36,
"learning_rate": 6.475620188195038e-05,
"loss": 2.607,
"theoretical_loss": 3.3123001664935545,
"tokens_seen": 3101556736
},
{
"epoch": 0.36,
"learning_rate": 6.471343028229256e-05,
"loss": 2.5181,
"theoretical_loss": 3.3122894769574085,
"tokens_seen": 3101687808
},
{
"epoch": 0.36,
"objective/train/docs_used": 1699641,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.600740671157837,
"objective/train/theoretical_loss": 3.3122787879994497,
"objective/train/tokens_used": 131870176,
"theoretical_loss": 3.3122787879994497,
"tokens_seen": 3101818880
},
{
"epoch": 0.36,
"learning_rate": 6.467065868263474e-05,
"loss": 2.6134,
"theoretical_loss": 3.3122787879994497,
"tokens_seen": 3101818880
},
{
"epoch": 0.36,
"learning_rate": 6.46278870829769e-05,
"loss": 2.4917,
"theoretical_loss": 3.3122680996196237,
"tokens_seen": 3101949952
},
{
"epoch": 0.36,
"learning_rate": 6.458511548331908e-05,
"loss": 2.6256,
"theoretical_loss": 3.312257411817874,
"tokens_seen": 3102081024
},
{
"epoch": 0.36,
"learning_rate": 6.454234388366125e-05,
"loss": 2.4842,
"theoretical_loss": 3.3122467245941447,
"tokens_seen": 3102212096
},
{
"epoch": 0.36,
"learning_rate": 6.449957228400342e-05,
"loss": 2.5249,
"theoretical_loss": 3.312236037948381,
"tokens_seen": 3102343168
},
{
"epoch": 0.36,
"learning_rate": 6.44568006843456e-05,
"loss": 2.6317,
"theoretical_loss": 3.312225351880526,
"tokens_seen": 3102474240
},
{
"epoch": 0.36,
"learning_rate": 6.441402908468777e-05,
"loss": 2.468,
"theoretical_loss": 3.312214666390525,
"tokens_seen": 3102605312
},
{
"epoch": 0.36,
"learning_rate": 6.437125748502994e-05,
"loss": 2.5314,
"theoretical_loss": 3.312203981478322,
"tokens_seen": 3102736384
},
{
"epoch": 0.36,
"learning_rate": 6.432848588537211e-05,
"loss": 2.6474,
"theoretical_loss": 3.3121932971438612,
"tokens_seen": 3102867456
},
{
"epoch": 0.36,
"learning_rate": 6.428571428571429e-05,
"loss": 2.5816,
"theoretical_loss": 3.3121826133870873,
"tokens_seen": 3102998528
},
{
"epoch": 0.36,
"learning_rate": 6.424294268605647e-05,
"loss": 2.6009,
"theoretical_loss": 3.3121719302079446,
"tokens_seen": 3103129600
},
{
"epoch": 0.36,
"learning_rate": 6.420017108639863e-05,
"loss": 2.5371,
"theoretical_loss": 3.3121612476063778,
"tokens_seen": 3103260672
},
{
"epoch": 0.36,
"learning_rate": 6.415739948674081e-05,
"loss": 2.5445,
"theoretical_loss": 3.3121505655823302,
"tokens_seen": 3103391744
},
{
"epoch": 0.36,
"objective/train/docs_used": 1699977,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.1121926307678223,
"objective/train/theoretical_loss": 3.312145224786859,
"objective/train/tokens_used": 133508576,
"theoretical_loss": 3.312145224786859,
"tokens_seen": 3103457280
},
{
"epoch": 0.37,
"learning_rate": 6.411462788708297e-05,
"loss": 2.5187,
"theoretical_loss": 3.312139884135747,
"tokens_seen": 3103522816
},
{
"epoch": 0.37,
"learning_rate": 6.407185628742515e-05,
"loss": 2.6903,
"theoretical_loss": 3.3121292032665726,
"tokens_seen": 3103653888
},
{
"epoch": 0.37,
"learning_rate": 6.402908468776733e-05,
"loss": 2.5954,
"theoretical_loss": 3.3121185229747514,
"tokens_seen": 3103784960
},
{
"epoch": 0.37,
"learning_rate": 6.39863130881095e-05,
"loss": 2.4992,
"theoretical_loss": 3.312107843260227,
"tokens_seen": 3103916032
},
{
"epoch": 0.37,
"learning_rate": 6.394354148845167e-05,
"loss": 2.4812,
"theoretical_loss": 3.312097164122945,
"tokens_seen": 3104047104
},
{
"epoch": 0.37,
"learning_rate": 6.390076988879384e-05,
"loss": 2.5708,
"theoretical_loss": 3.3120864855628493,
"tokens_seen": 3104178176
},
{
"epoch": 0.37,
"learning_rate": 6.385799828913602e-05,
"loss": 2.5199,
"theoretical_loss": 3.3120758075798844,
"tokens_seen": 3104309248
},
{
"epoch": 0.37,
"learning_rate": 6.38152266894782e-05,
"loss": 2.5963,
"theoretical_loss": 3.3120651301739947,
"tokens_seen": 3104440320
},
{
"epoch": 0.37,
"learning_rate": 6.377245508982036e-05,
"loss": 2.5702,
"theoretical_loss": 3.312054453345125,
"tokens_seen": 3104571392
},
{
"epoch": 0.37,
"learning_rate": 6.372968349016254e-05,
"loss": 2.5606,
"theoretical_loss": 3.3120437770932183,
"tokens_seen": 3104702464
},
{
"epoch": 0.37,
"learning_rate": 6.36869118905047e-05,
"loss": 2.5872,
"theoretical_loss": 3.312033101418221,
"tokens_seen": 3104833536
},
{
"epoch": 0.37,
"learning_rate": 6.364414029084688e-05,
"loss": 2.6235,
"theoretical_loss": 3.3120224263200764,
"tokens_seen": 3104964608
},
{
"epoch": 0.37,
"objective/train/docs_used": 1701288,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.725125551223755,
"objective/train/theoretical_loss": 3.3120117517987295,
"objective/train/tokens_used": 135146976,
"theoretical_loss": 3.3120117517987295,
"tokens_seen": 3105095680
},
{
"epoch": 0.37,
"learning_rate": 6.360136869118906e-05,
"loss": 2.5687,
"theoretical_loss": 3.3120117517987295,
"tokens_seen": 3105095680
},
{
"epoch": 0.37,
"learning_rate": 6.355859709153122e-05,
"loss": 2.5826,
"theoretical_loss": 3.312001077854125,
"tokens_seen": 3105226752
},
{
"epoch": 0.37,
"learning_rate": 6.35158254918734e-05,
"loss": 2.5514,
"theoretical_loss": 3.311990404486206,
"tokens_seen": 3105357824
},
{
"epoch": 0.37,
"learning_rate": 6.347305389221557e-05,
"loss": 2.5869,
"theoretical_loss": 3.311979731694919,
"tokens_seen": 3105488896
},
{
"epoch": 0.37,
"learning_rate": 6.343028229255775e-05,
"loss": 2.6095,
"theoretical_loss": 3.311969059480207,
"tokens_seen": 3105619968
},
{
"epoch": 0.37,
"learning_rate": 6.338751069289992e-05,
"loss": 2.613,
"theoretical_loss": 3.311958387842015,
"tokens_seen": 3105751040
},
{
"epoch": 0.37,
"learning_rate": 6.334473909324209e-05,
"loss": 2.5923,
"theoretical_loss": 3.311947716780288,
"tokens_seen": 3105882112
},
{
"epoch": 0.37,
"learning_rate": 6.330196749358425e-05,
"loss": 2.6335,
"theoretical_loss": 3.31193704629497,
"tokens_seen": 3106013184
},
{
"epoch": 0.37,
"learning_rate": 6.325919589392643e-05,
"loss": 2.5942,
"theoretical_loss": 3.311926376386005,
"tokens_seen": 3106144256
},
{
"epoch": 0.37,
"learning_rate": 6.321642429426861e-05,
"loss": 2.5971,
"theoretical_loss": 3.311915707053339,
"tokens_seen": 3106275328
},
{
"epoch": 0.37,
"learning_rate": 6.317365269461079e-05,
"loss": 2.5642,
"theoretical_loss": 3.3119050382969153,
"tokens_seen": 3106406400
},
{
"epoch": 0.38,
"learning_rate": 6.313088109495295e-05,
"loss": 2.4445,
"theoretical_loss": 3.311894370116679,
"tokens_seen": 3106537472
},
{
"epoch": 0.38,
"learning_rate": 6.308810949529512e-05,
"loss": 2.5959,
"theoretical_loss": 3.3118837025125747,
"tokens_seen": 3106668544
},
{
"epoch": 0.38,
"objective/train/docs_used": 1702491,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.138563632965088,
"objective/train/theoretical_loss": 3.3118783689265547,
"objective/train/tokens_used": 136785376,
"theoretical_loss": 3.3118783689265547,
"tokens_seen": 3106734080
},
{
"epoch": 0.38,
"learning_rate": 6.30453378956373e-05,
"loss": 2.4946,
"theoretical_loss": 3.311873035484547,
"tokens_seen": 3106799616
},
{
"epoch": 0.38,
"learning_rate": 6.300256629597947e-05,
"loss": 2.4598,
"theoretical_loss": 3.31186236903254,
"tokens_seen": 3106930688
},
{
"epoch": 0.38,
"learning_rate": 6.295979469632165e-05,
"loss": 2.5823,
"theoretical_loss": 3.311851703156499,
"tokens_seen": 3107061760
},
{
"epoch": 0.38,
"learning_rate": 6.291702309666382e-05,
"loss": 2.6785,
"theoretical_loss": 3.311841037856368,
"tokens_seen": 3107192832
},
{
"epoch": 0.38,
"learning_rate": 6.287425149700598e-05,
"loss": 2.4439,
"theoretical_loss": 3.311830373132092,
"tokens_seen": 3107323904
},
{
"epoch": 0.38,
"learning_rate": 6.283147989734816e-05,
"loss": 2.5055,
"theoretical_loss": 3.3118197089836157,
"tokens_seen": 3107454976
},
{
"epoch": 0.38,
"learning_rate": 6.278870829769034e-05,
"loss": 2.4456,
"theoretical_loss": 3.3118090454108833,
"tokens_seen": 3107586048
},
{
"epoch": 0.38,
"learning_rate": 6.274593669803252e-05,
"loss": 2.5729,
"theoretical_loss": 3.3117983824138397,
"tokens_seen": 3107717120
},
{
"epoch": 0.38,
"learning_rate": 6.270316509837468e-05,
"loss": 2.5774,
"theoretical_loss": 3.311787719992429,
"tokens_seen": 3107848192
},
{
"epoch": 0.38,
"learning_rate": 6.266039349871685e-05,
"loss": 2.4742,
"theoretical_loss": 3.311777058146597,
"tokens_seen": 3107979264
},
{
"epoch": 0.38,
"learning_rate": 6.261762189905902e-05,
"loss": 2.4432,
"theoretical_loss": 3.311766396876288,
"tokens_seen": 3108110336
},
{
"epoch": 0.38,
"learning_rate": 6.25748502994012e-05,
"loss": 2.5103,
"theoretical_loss": 3.3117557361814454,
"tokens_seen": 3108241408
},
{
"epoch": 0.38,
"objective/train/docs_used": 1702889,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.351708173751831,
"objective/train/theoretical_loss": 3.3117450760620155,
"objective/train/tokens_used": 138423776,
"theoretical_loss": 3.3117450760620155,
"tokens_seen": 3108372480
},
{
"epoch": 0.38,
"learning_rate": 6.253207869974337e-05,
"loss": 2.4282,
"theoretical_loss": 3.3117450760620155,
"tokens_seen": 3108372480
},
{
"epoch": 0.38,
"learning_rate": 6.248930710008555e-05,
"loss": 2.5714,
"theoretical_loss": 3.311734416517942,
"tokens_seen": 3108503552
},
{
"epoch": 0.38,
"learning_rate": 6.244653550042771e-05,
"loss": 2.4235,
"theoretical_loss": 3.31172375754917,
"tokens_seen": 3108634624
},
{
"epoch": 0.38,
"learning_rate": 6.24037639007699e-05,
"loss": 2.6382,
"theoretical_loss": 3.311713099155644,
"tokens_seen": 3108765696
},
{
"epoch": 0.38,
"learning_rate": 6.236099230111207e-05,
"loss": 2.5431,
"theoretical_loss": 3.311702441337309,
"tokens_seen": 3108896768
},
{
"epoch": 0.38,
"learning_rate": 6.231822070145423e-05,
"loss": 2.3186,
"theoretical_loss": 3.3116917840941094,
"tokens_seen": 3109027840
},
{
"epoch": 0.38,
"learning_rate": 6.227544910179641e-05,
"loss": 2.5123,
"theoretical_loss": 3.3116811274259903,
"tokens_seen": 3109158912
},
{
"epoch": 0.38,
"learning_rate": 6.223267750213857e-05,
"loss": 2.5532,
"theoretical_loss": 3.3116704713328957,
"tokens_seen": 3109289984
},
{
"epoch": 0.38,
"learning_rate": 6.218990590248077e-05,
"loss": 2.6137,
"theoretical_loss": 3.311659815814771,
"tokens_seen": 3109421056
},
{
"epoch": 0.38,
"learning_rate": 6.214713430282293e-05,
"loss": 2.4946,
"theoretical_loss": 3.311649160871561,
"tokens_seen": 3109552128
},
{
"epoch": 0.39,
"learning_rate": 6.21043627031651e-05,
"loss": 2.499,
"theoretical_loss": 3.31163850650321,
"tokens_seen": 3109683200
},
{
"epoch": 0.39,
"learning_rate": 6.206159110350727e-05,
"loss": 2.558,
"theoretical_loss": 3.311627852709663,
"tokens_seen": 3109814272
},
{
"epoch": 0.39,
"learning_rate": 6.201881950384944e-05,
"loss": 2.4069,
"theoretical_loss": 3.3116171994908647,
"tokens_seen": 3109945344
},
{
"epoch": 0.39,
"objective/train/docs_used": 1704007,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.46962571144104,
"objective/train/theoretical_loss": 3.311611873096979,
"objective/train/tokens_used": 140062176,
"theoretical_loss": 3.311611873096979,
"tokens_seen": 3110010880
},
{
"epoch": 0.39,
"learning_rate": 6.197604790419162e-05,
"loss": 2.5208,
"theoretical_loss": 3.3116065468467597,
"tokens_seen": 3110076416
},
{
"epoch": 0.39,
"learning_rate": 6.19332763045338e-05,
"loss": 2.5436,
"theoretical_loss": 3.311595894777293,
"tokens_seen": 3110207488
},
{
"epoch": 0.39,
"learning_rate": 6.189050470487596e-05,
"loss": 2.4587,
"theoretical_loss": 3.3115852432824093,
"tokens_seen": 3110338560
},
{
"epoch": 0.39,
"learning_rate": 6.184773310521814e-05,
"loss": 2.6021,
"theoretical_loss": 3.311574592362054,
"tokens_seen": 3110469632
},
{
"epoch": 0.39,
"learning_rate": 6.18049615055603e-05,
"loss": 2.3898,
"theoretical_loss": 3.311563942016171,
"tokens_seen": 3110600704
},
{
"epoch": 0.39,
"learning_rate": 6.176218990590248e-05,
"loss": 2.603,
"theoretical_loss": 3.311553292244705,
"tokens_seen": 3110731776
},
{
"epoch": 0.39,
"learning_rate": 6.171941830624466e-05,
"loss": 2.5603,
"theoretical_loss": 3.3115426430476016,
"tokens_seen": 3110862848
},
{
"epoch": 0.39,
"learning_rate": 6.167664670658683e-05,
"loss": 2.4966,
"theoretical_loss": 3.3115319944248056,
"tokens_seen": 3110993920
},
{
"epoch": 0.39,
"learning_rate": 6.1633875106929e-05,
"loss": 2.6126,
"theoretical_loss": 3.311521346376261,
"tokens_seen": 3111124992
},
{
"epoch": 0.39,
"learning_rate": 6.159110350727118e-05,
"loss": 2.535,
"theoretical_loss": 3.311510698901914,
"tokens_seen": 3111256064
},
{
"epoch": 0.39,
"learning_rate": 6.154833190761335e-05,
"loss": 2.5019,
"theoretical_loss": 3.311500052001708,
"tokens_seen": 3111387136
},
{
"epoch": 0.39,
"learning_rate": 6.150556030795552e-05,
"loss": 2.6325,
"theoretical_loss": 3.311489405675588,
"tokens_seen": 3111518208
},
{
"epoch": 0.39,
"objective/train/docs_used": 1704747,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.4722747802734375,
"objective/train/theoretical_loss": 3.3114787599235003,
"objective/train/tokens_used": 141700576,
"theoretical_loss": 3.3114787599235003,
"tokens_seen": 3111649280
},
{
"epoch": 0.39,
"learning_rate": 6.146278870829769e-05,
"loss": 2.4573,
"theoretical_loss": 3.3114787599235003,
"tokens_seen": 3111649280
},
{
"epoch": 0.39,
"learning_rate": 6.142001710863987e-05,
"loss": 2.6906,
"theoretical_loss": 3.3114681147453884,
"tokens_seen": 3111780352
},
{
"epoch": 0.39,
"learning_rate": 6.137724550898205e-05,
"loss": 2.5696,
"theoretical_loss": 3.311457470141198,
"tokens_seen": 3111911424
},
{
"epoch": 0.39,
"learning_rate": 6.133447390932421e-05,
"loss": 2.3866,
"theoretical_loss": 3.311446826110873,
"tokens_seen": 3112042496
},
{
"epoch": 0.39,
"learning_rate": 6.129170230966639e-05,
"loss": 2.5784,
"theoretical_loss": 3.311436182654359,
"tokens_seen": 3112173568
},
{
"epoch": 0.39,
"learning_rate": 6.124893071000855e-05,
"loss": 2.4109,
"theoretical_loss": 3.311425539771601,
"tokens_seen": 3112304640
},
{
"epoch": 0.39,
"learning_rate": 6.120615911035072e-05,
"loss": 2.5248,
"theoretical_loss": 3.3114148974625435,
"tokens_seen": 3112435712
},
{
"epoch": 0.39,
"learning_rate": 6.116338751069291e-05,
"loss": 2.5607,
"theoretical_loss": 3.3114042557271315,
"tokens_seen": 3112566784
},
{
"epoch": 0.4,
"learning_rate": 6.112061591103508e-05,
"loss": 2.4251,
"theoretical_loss": 3.3113936145653105,
"tokens_seen": 3112697856
},
{
"epoch": 0.4,
"learning_rate": 6.107784431137725e-05,
"loss": 2.6288,
"theoretical_loss": 3.3113829739770244,
"tokens_seen": 3112828928
},
{
"epoch": 0.4,
"learning_rate": 6.103507271171942e-05,
"loss": 2.4883,
"theoretical_loss": 3.311372333962219,
"tokens_seen": 3112960000
},
{
"epoch": 0.4,
"learning_rate": 6.099230111206159e-05,
"loss": 2.5164,
"theoretical_loss": 3.311361694520839,
"tokens_seen": 3113091072
},
{
"epoch": 0.4,
"learning_rate": 6.094952951240377e-05,
"loss": 2.5751,
"theoretical_loss": 3.311351055652829,
"tokens_seen": 3113222144
},
{
"epoch": 0.4,
"objective/train/docs_used": 1705767,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.8438971042633057,
"objective/train/theoretical_loss": 3.311345736433821,
"objective/train/tokens_used": 143338976,
"theoretical_loss": 3.311345736433821,
"tokens_seen": 3113287680
},
{
"epoch": 0.4,
"learning_rate": 6.090675791274594e-05,
"loss": 2.5603,
"theoretical_loss": 3.3113404173581342,
"tokens_seen": 3113353216
},
{
"epoch": 0.4,
"learning_rate": 6.086398631308812e-05,
"loss": 2.6501,
"theoretical_loss": 3.3113297796367,
"tokens_seen": 3113484288
},
{
"epoch": 0.4,
"learning_rate": 6.082121471343028e-05,
"loss": 2.3713,
"theoretical_loss": 3.311319142488471,
"tokens_seen": 3113615360
},
{
"epoch": 0.4,
"learning_rate": 6.0778443113772454e-05,
"loss": 2.6465,
"theoretical_loss": 3.311308505913392,
"tokens_seen": 3113746432
},
{
"epoch": 0.4,
"learning_rate": 6.073567151411463e-05,
"loss": 2.5023,
"theoretical_loss": 3.3112978699114084,
"tokens_seen": 3113877504
},
{
"epoch": 0.4,
"learning_rate": 6.0692899914456804e-05,
"loss": 2.5409,
"theoretical_loss": 3.3112872344824646,
"tokens_seen": 3114008576
},
{
"epoch": 0.4,
"learning_rate": 6.065012831479898e-05,
"loss": 2.5392,
"theoretical_loss": 3.3112765996265066,
"tokens_seen": 3114139648
},
{
"epoch": 0.4,
"learning_rate": 6.060735671514115e-05,
"loss": 2.565,
"theoretical_loss": 3.3112659653434786,
"tokens_seen": 3114270720
},
{
"epoch": 0.4,
"learning_rate": 6.056458511548332e-05,
"loss": 2.4205,
"theoretical_loss": 3.3112553316333253,
"tokens_seen": 3114401792
},
{
"epoch": 0.4,
"learning_rate": 6.05218135158255e-05,
"loss": 2.5678,
"theoretical_loss": 3.311244698495993,
"tokens_seen": 3114532864
},
{
"epoch": 0.4,
"learning_rate": 6.047904191616767e-05,
"loss": 2.5991,
"theoretical_loss": 3.3112340659314254,
"tokens_seen": 3114663936
},
{
"epoch": 0.4,
"learning_rate": 6.043627031650983e-05,
"loss": 2.6641,
"theoretical_loss": 3.3112234339395687,
"tokens_seen": 3114795008
},
{
"epoch": 0.4,
"objective/train/docs_used": 1706383,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.5129618644714355,
"objective/train/theoretical_loss": 3.311212802520367,
"objective/train/tokens_used": 144977376,
"theoretical_loss": 3.311212802520367,
"tokens_seen": 3114926080
},
{
"epoch": 0.4,
"learning_rate": 6.039349871685202e-05,
"loss": 2.4797,
"theoretical_loss": 3.311212802520367,
"tokens_seen": 3114926080
},
{
"epoch": 0.4,
"learning_rate": 6.035072711719418e-05,
"loss": 2.5095,
"theoretical_loss": 3.311202171673766,
"tokens_seen": 3115057152
},
{
"epoch": 0.4,
"learning_rate": 6.030795551753636e-05,
"loss": 2.5513,
"theoretical_loss": 3.3111915413997104,
"tokens_seen": 3115188224
},
{
"epoch": 0.4,
"learning_rate": 6.026518391787853e-05,
"loss": 2.5783,
"theoretical_loss": 3.3111809116981457,
"tokens_seen": 3115319296
},
{
"epoch": 0.4,
"learning_rate": 6.02224123182207e-05,
"loss": 2.4676,
"theoretical_loss": 3.3111702825690164,
"tokens_seen": 3115450368
},
{
"epoch": 0.4,
"learning_rate": 6.017964071856288e-05,
"loss": 2.4629,
"theoretical_loss": 3.311159654012268,
"tokens_seen": 3115581440
},
{
"epoch": 0.4,
"learning_rate": 6.013686911890505e-05,
"loss": 2.6033,
"theoretical_loss": 3.311149026027845,
"tokens_seen": 3115712512
},
{
"epoch": 0.41,
"learning_rate": 6.0094097519247226e-05,
"loss": 2.5076,
"theoretical_loss": 3.3111383986156935,
"tokens_seen": 3115843584
},
{
"epoch": 0.41,
"learning_rate": 6.00513259195894e-05,
"loss": 2.6581,
"theoretical_loss": 3.3111277717757583,
"tokens_seen": 3115974656
},
{
"epoch": 0.41,
"learning_rate": 6.000855431993156e-05,
"loss": 2.6139,
"theoretical_loss": 3.3111171455079838,
"tokens_seen": 3116105728
},
{
"epoch": 0.41,
"learning_rate": 5.996578272027375e-05,
"loss": 2.6152,
"theoretical_loss": 3.311106519812316,
"tokens_seen": 3116236800
},
{
"epoch": 0.41,
"learning_rate": 5.992301112061591e-05,
"loss": 2.6476,
"theoretical_loss": 3.3110958946887,
"tokens_seen": 3116367872
},
{
"epoch": 0.41,
"learning_rate": 5.988023952095808e-05,
"loss": 2.5176,
"theoretical_loss": 3.31108527013708,
"tokens_seen": 3116498944
},
{
"epoch": 0.41,
"objective/train/docs_used": 1707531,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.416107654571533,
"objective/train/theoretical_loss": 3.3110799580757515,
"objective/train/tokens_used": 146615776,
"theoretical_loss": 3.3110799580757515,
"tokens_seen": 3116564480
},
{
"epoch": 0.41,
"learning_rate": 5.983746792130026e-05,
"loss": 2.6398,
"theoretical_loss": 3.311074646157402,
"tokens_seen": 3116630016
},
{
"epoch": 0.41,
"learning_rate": 5.9794696321642426e-05,
"loss": 2.598,
"theoretical_loss": 3.311064022749611,
"tokens_seen": 3116761088
},
{
"epoch": 0.41,
"learning_rate": 5.975192472198461e-05,
"loss": 2.5575,
"theoretical_loss": 3.311053399913652,
"tokens_seen": 3116892160
},
{
"epoch": 0.41,
"learning_rate": 5.9709153122326776e-05,
"loss": 2.6516,
"theoretical_loss": 3.3110427776494706,
"tokens_seen": 3117023232
},
{
"epoch": 0.41,
"learning_rate": 5.966638152266895e-05,
"loss": 2.5544,
"theoretical_loss": 3.3110321559570117,
"tokens_seen": 3117154304
},
{
"epoch": 0.41,
"learning_rate": 5.9623609923011126e-05,
"loss": 2.5245,
"theoretical_loss": 3.31102153483622,
"tokens_seen": 3117285376
},
{
"epoch": 0.41,
"learning_rate": 5.95808383233533e-05,
"loss": 2.5908,
"theoretical_loss": 3.3110109142870416,
"tokens_seen": 3117416448
},
{
"epoch": 0.41,
"learning_rate": 5.9538066723695476e-05,
"loss": 2.5651,
"theoretical_loss": 3.311000294309421,
"tokens_seen": 3117547520
},
{
"epoch": 0.41,
"learning_rate": 5.949529512403764e-05,
"loss": 2.631,
"theoretical_loss": 3.310989674903304,
"tokens_seen": 3117678592
},
{
"epoch": 0.41,
"learning_rate": 5.945252352437981e-05,
"loss": 2.6308,
"theoretical_loss": 3.310979056068635,
"tokens_seen": 3117809664
},
{
"epoch": 0.41,
"learning_rate": 5.940975192472199e-05,
"loss": 2.3692,
"theoretical_loss": 3.3109684378053603,
"tokens_seen": 3117940736
},
{
"epoch": 0.41,
"learning_rate": 5.936698032506416e-05,
"loss": 2.5465,
"theoretical_loss": 3.3109578201134244,
"tokens_seen": 3118071808
},
{
"epoch": 0.41,
"objective/train/docs_used": 1708692,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.6952877044677734,
"objective/train/theoretical_loss": 3.3109472029927725,
"objective/train/tokens_used": 148254176,
"theoretical_loss": 3.3109472029927725,
"tokens_seen": 3118202880
},
{
"epoch": 0.41,
"learning_rate": 5.932420872540634e-05,
"loss": 2.5085,
"theoretical_loss": 3.3109472029927725,
"tokens_seen": 3118202880
},
{
"epoch": 0.41,
"learning_rate": 5.9281437125748505e-05,
"loss": 2.4693,
"theoretical_loss": 3.31093658644335,
"tokens_seen": 3118333952
},
{
"epoch": 0.41,
"learning_rate": 5.9238665526090676e-05,
"loss": 2.5199,
"theoretical_loss": 3.3109259704651026,
"tokens_seen": 3118465024
},
{
"epoch": 0.41,
"learning_rate": 5.9195893926432855e-05,
"loss": 2.5373,
"theoretical_loss": 3.3109153550579746,
"tokens_seen": 3118596096
},
{
"epoch": 0.41,
"learning_rate": 5.9153122326775026e-05,
"loss": 2.625,
"theoretical_loss": 3.3109047402219125,
"tokens_seen": 3118727168
},
{
"epoch": 0.41,
"learning_rate": 5.911035072711719e-05,
"loss": 2.5271,
"theoretical_loss": 3.3108941259568607,
"tokens_seen": 3118858240
},
{
"epoch": 0.42,
"learning_rate": 5.906757912745937e-05,
"loss": 2.4392,
"theoretical_loss": 3.3108835122627647,
"tokens_seen": 3118989312
},
{
"epoch": 0.42,
"learning_rate": 5.902480752780154e-05,
"loss": 2.6036,
"theoretical_loss": 3.3108728991395697,
"tokens_seen": 3119120384
},
{
"epoch": 0.42,
"learning_rate": 5.898203592814372e-05,
"loss": 2.4951,
"theoretical_loss": 3.3108622865872213,
"tokens_seen": 3119251456
},
{
"epoch": 0.42,
"learning_rate": 5.893926432848589e-05,
"loss": 2.5654,
"theoretical_loss": 3.3108516746056647,
"tokens_seen": 3119382528
},
{
"epoch": 0.42,
"learning_rate": 5.8896492728828055e-05,
"loss": 2.4893,
"theoretical_loss": 3.310841063194845,
"tokens_seen": 3119513600
},
{
"epoch": 0.42,
"learning_rate": 5.8853721129170233e-05,
"loss": 2.5437,
"theoretical_loss": 3.3108304523547076,
"tokens_seen": 3119644672
},
{
"epoch": 0.42,
"learning_rate": 5.8810949529512405e-05,
"loss": 2.4567,
"theoretical_loss": 3.310819842085198,
"tokens_seen": 3119775744
},
{
"epoch": 0.42,
"objective/train/docs_used": 1709277,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.5585927963256836,
"objective/train/theoretical_loss": 3.3108145371644113,
"objective/train/tokens_used": 149892576,
"theoretical_loss": 3.3108145371644113,
"tokens_seen": 3119841280
},
{
"epoch": 0.42,
"learning_rate": 5.876817792985458e-05,
"loss": 2.4938,
"theoretical_loss": 3.3108092323862617,
"tokens_seen": 3119906816
},
{
"epoch": 0.42,
"learning_rate": 5.8725406330196755e-05,
"loss": 2.5817,
"theoretical_loss": 3.3107986232578437,
"tokens_seen": 3120037888
},
{
"epoch": 0.42,
"learning_rate": 5.868263473053892e-05,
"loss": 2.5277,
"theoretical_loss": 3.310788014699889,
"tokens_seen": 3120168960
},
{
"epoch": 0.42,
"learning_rate": 5.86398631308811e-05,
"loss": 2.5421,
"theoretical_loss": 3.3107774067123437,
"tokens_seen": 3120300032
},
{
"epoch": 0.42,
"learning_rate": 5.859709153122327e-05,
"loss": 2.566,
"theoretical_loss": 3.310766799295153,
"tokens_seen": 3120431104
},
{
"epoch": 0.42,
"learning_rate": 5.855431993156544e-05,
"loss": 2.7047,
"theoretical_loss": 3.3107561924482622,
"tokens_seen": 3120562176
},
{
"epoch": 0.42,
"learning_rate": 5.851154833190762e-05,
"loss": 2.4278,
"theoretical_loss": 3.3107455861716164,
"tokens_seen": 3120693248
},
{
"epoch": 0.42,
"learning_rate": 5.8468776732249784e-05,
"loss": 2.551,
"theoretical_loss": 3.3107349804651616,
"tokens_seen": 3120824320
},
{
"epoch": 0.42,
"learning_rate": 5.842600513259196e-05,
"loss": 2.4814,
"theoretical_loss": 3.3107243753288427,
"tokens_seen": 3120955392
},
{
"epoch": 0.42,
"learning_rate": 5.8383233532934134e-05,
"loss": 2.5773,
"theoretical_loss": 3.310713770762605,
"tokens_seen": 3121086464
},
{
"epoch": 0.42,
"learning_rate": 5.8340461933276305e-05,
"loss": 2.6013,
"theoretical_loss": 3.3107031667663946,
"tokens_seen": 3121217536
},
{
"epoch": 0.42,
"learning_rate": 5.8297690333618484e-05,
"loss": 2.4748,
"theoretical_loss": 3.3106925633401563,
"tokens_seen": 3121348608
},
{
"epoch": 0.42,
"objective/train/docs_used": 1710494,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.6660287380218506,
"objective/train/theoretical_loss": 3.3106819604838353,
"objective/train/tokens_used": 151530976,
"theoretical_loss": 3.3106819604838353,
"tokens_seen": 3121479680
},
{
"epoch": 0.42,
"learning_rate": 5.825491873396065e-05,
"loss": 2.6014,
"theoretical_loss": 3.3106819604838353,
"tokens_seen": 3121479680
},
{
"epoch": 0.42,
"learning_rate": 5.8212147134302833e-05,
"loss": 2.3807,
"theoretical_loss": 3.310671358197378,
"tokens_seen": 3121610752
},
{
"epoch": 0.42,
"learning_rate": 5.8169375534645e-05,
"loss": 2.5044,
"theoretical_loss": 3.3106607564807295,
"tokens_seen": 3121741824
},
{
"epoch": 0.42,
"learning_rate": 5.812660393498717e-05,
"loss": 2.4961,
"theoretical_loss": 3.3106501553338346,
"tokens_seen": 3121872896
},
{
"epoch": 0.43,
"learning_rate": 5.808383233532935e-05,
"loss": 2.5321,
"theoretical_loss": 3.3106395547566394,
"tokens_seen": 3122003968
},
{
"epoch": 0.43,
"learning_rate": 5.804106073567151e-05,
"loss": 2.6361,
"theoretical_loss": 3.3106289547490895,
"tokens_seen": 3122135040
},
{
"epoch": 0.43,
"learning_rate": 5.79982891360137e-05,
"loss": 2.5194,
"theoretical_loss": 3.3106183553111297,
"tokens_seen": 3122266112
},
{
"epoch": 0.43,
"learning_rate": 5.795551753635586e-05,
"loss": 2.5992,
"theoretical_loss": 3.310607756442706,
"tokens_seen": 3122397184
},
{
"epoch": 0.43,
"learning_rate": 5.7912745936698034e-05,
"loss": 2.4211,
"theoretical_loss": 3.3105971581437634,
"tokens_seen": 3122528256
},
{
"epoch": 0.43,
"learning_rate": 5.786997433704021e-05,
"loss": 2.4752,
"theoretical_loss": 3.310586560414248,
"tokens_seen": 3122659328
},
{
"epoch": 0.43,
"learning_rate": 5.782720273738238e-05,
"loss": 2.5157,
"theoretical_loss": 3.3105759632541054,
"tokens_seen": 3122790400
},
{
"epoch": 0.43,
"learning_rate": 5.778443113772455e-05,
"loss": 2.4493,
"theoretical_loss": 3.3105653666632806,
"tokens_seen": 3122921472
},
{
"epoch": 0.43,
"learning_rate": 5.774165953806673e-05,
"loss": 2.4614,
"theoretical_loss": 3.310554770641719,
"tokens_seen": 3123052544
},
{
"epoch": 0.43,
"objective/train/docs_used": 1711181,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.3176870346069336,
"objective/train/theoretical_loss": 3.310549472844395,
"objective/train/tokens_used": 153169376,
"theoretical_loss": 3.310549472844395,
"tokens_seen": 3123118080
},
{
"epoch": 0.43,
"learning_rate": 5.76988879384089e-05,
"loss": 2.4795,
"theoretical_loss": 3.3105441751893667,
"tokens_seen": 3123183616
},
{
"epoch": 0.43,
"learning_rate": 5.765611633875108e-05,
"loss": 2.5418,
"theoretical_loss": 3.310533580306169,
"tokens_seen": 3123314688
},
{
"epoch": 0.43,
"learning_rate": 5.761334473909324e-05,
"loss": 2.5666,
"theoretical_loss": 3.310522985992071,
"tokens_seen": 3123445760
},
{
"epoch": 0.43,
"learning_rate": 5.757057313943541e-05,
"loss": 2.5606,
"theoretical_loss": 3.310512392247019,
"tokens_seen": 3123576832
},
{
"epoch": 0.43,
"learning_rate": 5.752780153977759e-05,
"loss": 2.6241,
"theoretical_loss": 3.310501799070958,
"tokens_seen": 3123707904
},
{
"epoch": 0.43,
"learning_rate": 5.748502994011976e-05,
"loss": 2.4346,
"theoretical_loss": 3.310491206463834,
"tokens_seen": 3123838976
},
{
"epoch": 0.43,
"learning_rate": 5.744225834046194e-05,
"loss": 2.6249,
"theoretical_loss": 3.3104806144255923,
"tokens_seen": 3123970048
},
{
"epoch": 0.43,
"learning_rate": 5.739948674080411e-05,
"loss": 2.6147,
"theoretical_loss": 3.3104700229561783,
"tokens_seen": 3124101120
},
{
"epoch": 0.43,
"learning_rate": 5.735671514114628e-05,
"loss": 2.6046,
"theoretical_loss": 3.310459432055538,
"tokens_seen": 3124232192
},
{
"epoch": 0.43,
"learning_rate": 5.7313943541488456e-05,
"loss": 2.4672,
"theoretical_loss": 3.3104488417236166,
"tokens_seen": 3124363264
},
{
"epoch": 0.43,
"learning_rate": 5.727117194183063e-05,
"loss": 2.5318,
"theoretical_loss": 3.31043825196036,
"tokens_seen": 3124494336
},
{
"epoch": 0.43,
"learning_rate": 5.7228400342172805e-05,
"loss": 2.433,
"theoretical_loss": 3.310427662765714,
"tokens_seen": 3124625408
},
{
"epoch": 0.43,
"objective/train/docs_used": 1712507,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.4671425819396973,
"objective/train/theoretical_loss": 3.310417074139624,
"objective/train/tokens_used": 154807776,
"theoretical_loss": 3.310417074139624,
"tokens_seen": 3124756480
},
{
"epoch": 0.43,
"learning_rate": 5.718562874251498e-05,
"loss": 2.3719,
"theoretical_loss": 3.310417074139624,
"tokens_seen": 3124756480
},
{
"epoch": 0.43,
"learning_rate": 5.714285714285714e-05,
"loss": 2.3982,
"theoretical_loss": 3.3104064860820355,
"tokens_seen": 3124887552
},
{
"epoch": 0.43,
"learning_rate": 5.710008554319932e-05,
"loss": 2.6333,
"theoretical_loss": 3.310395898592894,
"tokens_seen": 3125018624
},
{
"epoch": 0.44,
"learning_rate": 5.705731394354149e-05,
"loss": 2.4311,
"theoretical_loss": 3.310385311672145,
"tokens_seen": 3125149696
},
{
"epoch": 0.44,
"learning_rate": 5.7014542343883656e-05,
"loss": 2.3717,
"theoretical_loss": 3.3103747253197353,
"tokens_seen": 3125280768
},
{
"epoch": 0.44,
"learning_rate": 5.697177074422584e-05,
"loss": 2.582,
"theoretical_loss": 3.310364139535609,
"tokens_seen": 3125411840
},
{
"epoch": 0.44,
"learning_rate": 5.6928999144568006e-05,
"loss": 2.4568,
"theoretical_loss": 3.3103535543197133,
"tokens_seen": 3125542912
},
{
"epoch": 0.44,
"learning_rate": 5.6886227544910184e-05,
"loss": 2.6903,
"theoretical_loss": 3.310342969671993,
"tokens_seen": 3125673984
},
{
"epoch": 0.44,
"learning_rate": 5.6843455945252356e-05,
"loss": 2.5035,
"theoretical_loss": 3.3103323855923934,
"tokens_seen": 3125805056
},
{
"epoch": 0.44,
"learning_rate": 5.680068434559452e-05,
"loss": 2.5451,
"theoretical_loss": 3.3103218020808605,
"tokens_seen": 3125936128
},
{
"epoch": 0.44,
"learning_rate": 5.6757912745936706e-05,
"loss": 2.4153,
"theoretical_loss": 3.3103112191373407,
"tokens_seen": 3126067200
},
{
"epoch": 0.44,
"learning_rate": 5.671514114627887e-05,
"loss": 2.5702,
"theoretical_loss": 3.3103006367617787,
"tokens_seen": 3126198272
},
{
"epoch": 0.44,
"learning_rate": 5.667236954662105e-05,
"loss": 2.4602,
"theoretical_loss": 3.310290054954121,
"tokens_seen": 3126329344
},
{
"epoch": 0.44,
"objective/train/docs_used": 1713045,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.963982105255127,
"objective/train/theoretical_loss": 3.310284764263239,
"objective/train/tokens_used": 156446176,
"theoretical_loss": 3.310284764263239,
"tokens_seen": 3126394880
},
{
"epoch": 0.44,
"learning_rate": 5.662959794696322e-05,
"loss": 2.487,
"theoretical_loss": 3.3102794737143126,
"tokens_seen": 3126460416
},
{
"epoch": 0.44,
"learning_rate": 5.6586826347305385e-05,
"loss": 2.6109,
"theoretical_loss": 3.3102688930423,
"tokens_seen": 3126591488
},
{
"epoch": 0.44,
"learning_rate": 5.654405474764757e-05,
"loss": 2.5076,
"theoretical_loss": 3.3102583129380285,
"tokens_seen": 3126722560
},
{
"epoch": 0.44,
"learning_rate": 5.6501283147989735e-05,
"loss": 2.4448,
"theoretical_loss": 3.3102477334014435,
"tokens_seen": 3126853632
},
{
"epoch": 0.44,
"learning_rate": 5.6458511548331906e-05,
"loss": 2.5856,
"theoretical_loss": 3.3102371544324916,
"tokens_seen": 3126984704
},
{
"epoch": 0.44,
"learning_rate": 5.6415739948674085e-05,
"loss": 2.3335,
"theoretical_loss": 3.3102265760311176,
"tokens_seen": 3127115776
},
{
"epoch": 0.44,
"learning_rate": 5.6372968349016256e-05,
"loss": 2.4991,
"theoretical_loss": 3.310215998197268,
"tokens_seen": 3127246848
},
{
"epoch": 0.44,
"learning_rate": 5.6330196749358434e-05,
"loss": 2.4256,
"theoretical_loss": 3.310205420930888,
"tokens_seen": 3127377920
},
{
"epoch": 0.44,
"learning_rate": 5.62874251497006e-05,
"loss": 2.4177,
"theoretical_loss": 3.310194844231924,
"tokens_seen": 3127508992
},
{
"epoch": 0.44,
"learning_rate": 5.624465355004277e-05,
"loss": 2.4802,
"theoretical_loss": 3.3101842681003215,
"tokens_seen": 3127640064
},
{
"epoch": 0.44,
"learning_rate": 5.620188195038495e-05,
"loss": 2.5473,
"theoretical_loss": 3.310173692536026,
"tokens_seen": 3127771136
},
{
"epoch": 0.44,
"learning_rate": 5.615911035072712e-05,
"loss": 2.6317,
"theoretical_loss": 3.3101631175389836,
"tokens_seen": 3127902208
},
{
"epoch": 0.44,
"objective/train/docs_used": 1713856,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.385249137878418,
"objective/train/theoretical_loss": 3.31015254310914,
"objective/train/tokens_used": 158084576,
"theoretical_loss": 3.31015254310914,
"tokens_seen": 3128033280
},
{
"epoch": 0.44,
"learning_rate": 5.61163387510693e-05,
"loss": 2.5323,
"theoretical_loss": 3.31015254310914,
"tokens_seen": 3128033280
},
{
"epoch": 0.44,
"learning_rate": 5.6073567151411464e-05,
"loss": 2.4468,
"theoretical_loss": 3.310141969246441,
"tokens_seen": 3128164352
},
{
"epoch": 0.45,
"learning_rate": 5.6030795551753635e-05,
"loss": 2.5161,
"theoretical_loss": 3.3101313959508327,
"tokens_seen": 3128295424
},
{
"epoch": 0.45,
"learning_rate": 5.5988023952095813e-05,
"loss": 2.3882,
"theoretical_loss": 3.3101208232222605,
"tokens_seen": 3128426496
},
{
"epoch": 0.45,
"learning_rate": 5.5945252352437985e-05,
"loss": 2.6025,
"theoretical_loss": 3.310110251060671,
"tokens_seen": 3128557568
},
{
"epoch": 0.45,
"learning_rate": 5.590248075278016e-05,
"loss": 2.6315,
"theoretical_loss": 3.3100996794660085,
"tokens_seen": 3128688640
},
{
"epoch": 0.45,
"learning_rate": 5.585970915312233e-05,
"loss": 2.4923,
"theoretical_loss": 3.3100891084382207,
"tokens_seen": 3128819712
},
{
"epoch": 0.45,
"learning_rate": 5.58169375534645e-05,
"loss": 2.4411,
"theoretical_loss": 3.310078537977252,
"tokens_seen": 3128950784
},
{
"epoch": 0.45,
"learning_rate": 5.577416595380668e-05,
"loss": 2.4036,
"theoretical_loss": 3.3100679680830494,
"tokens_seen": 3129081856
},
{
"epoch": 0.45,
"learning_rate": 5.573139435414885e-05,
"loss": 2.5574,
"theoretical_loss": 3.3100573987555575,
"tokens_seen": 3129212928
},
{
"epoch": 0.45,
"learning_rate": 5.5688622754491014e-05,
"loss": 2.4951,
"theoretical_loss": 3.3100468299947234,
"tokens_seen": 3129344000
},
{
"epoch": 0.45,
"learning_rate": 5.564585115483319e-05,
"loss": 2.4359,
"theoretical_loss": 3.3100362618004926,
"tokens_seen": 3129475072
},
{
"epoch": 0.45,
"learning_rate": 5.5603079555175364e-05,
"loss": 2.4827,
"theoretical_loss": 3.3100256941728103,
"tokens_seen": 3129606144
},
{
"epoch": 0.45,
"objective/train/docs_used": 1714507,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.3790676593780518,
"objective/train/theoretical_loss": 3.3100204105714086,
"objective/train/tokens_used": 159722976,
"theoretical_loss": 3.3100204105714086,
"tokens_seen": 3129671680
},
{
"epoch": 0.45,
"learning_rate": 5.556030795551754e-05,
"loss": 2.4887,
"theoretical_loss": 3.3100151271116234,
"tokens_seen": 3129737216
},
{
"epoch": 0.45,
"learning_rate": 5.5517536355859714e-05,
"loss": 2.475,
"theoretical_loss": 3.3100045606168775,
"tokens_seen": 3129868288
},
{
"epoch": 0.45,
"learning_rate": 5.547476475620188e-05,
"loss": 2.6397,
"theoretical_loss": 3.3099939946885186,
"tokens_seen": 3129999360
},
{
"epoch": 0.45,
"learning_rate": 5.543199315654406e-05,
"loss": 2.5231,
"theoretical_loss": 3.309983429326492,
"tokens_seen": 3130130432
},
{
"epoch": 0.45,
"learning_rate": 5.538922155688623e-05,
"loss": 2.5931,
"theoretical_loss": 3.309972864530744,
"tokens_seen": 3130261504
},
{
"epoch": 0.45,
"learning_rate": 5.5346449957228407e-05,
"loss": 2.5449,
"theoretical_loss": 3.309962300301221,
"tokens_seen": 3130392576
},
{
"epoch": 0.45,
"learning_rate": 5.530367835757058e-05,
"loss": 2.4872,
"theoretical_loss": 3.3099517366378683,
"tokens_seen": 3130523648
},
{
"epoch": 0.45,
"learning_rate": 5.526090675791274e-05,
"loss": 2.5182,
"theoretical_loss": 3.3099411735406323,
"tokens_seen": 3130654720
},
{
"epoch": 0.45,
"learning_rate": 5.521813515825492e-05,
"loss": 2.4474,
"theoretical_loss": 3.3099306110094586,
"tokens_seen": 3130785792
},
{
"epoch": 0.45,
"learning_rate": 5.517536355859709e-05,
"loss": 2.712,
"theoretical_loss": 3.3099200490442935,
"tokens_seen": 3130916864
},
{
"epoch": 0.45,
"learning_rate": 5.5132591958939264e-05,
"loss": 2.5673,
"theoretical_loss": 3.3099094876450827,
"tokens_seen": 3131047936
},
{
"epoch": 0.45,
"learning_rate": 5.508982035928144e-05,
"loss": 2.6539,
"theoretical_loss": 3.3098989268117722,
"tokens_seen": 3131179008
},
{
"epoch": 0.45,
"objective/train/docs_used": 1715427,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.553161382675171,
"objective/train/theoretical_loss": 3.3098883665443086,
"objective/train/tokens_used": 161361376,
"theoretical_loss": 3.3098883665443086,
"tokens_seen": 3131310080
},
{
"epoch": 0.46,
"learning_rate": 5.504704875962361e-05,
"loss": 2.5297,
"theoretical_loss": 3.3098883665443086,
"tokens_seen": 3131310080
},
{
"epoch": 0.46,
"learning_rate": 5.500427715996579e-05,
"loss": 2.5105,
"theoretical_loss": 3.309877806842637,
"tokens_seen": 3131441152
},
{
"epoch": 0.46,
"learning_rate": 5.496150556030796e-05,
"loss": 2.6815,
"theoretical_loss": 3.3098672477067037,
"tokens_seen": 3131572224
},
{
"epoch": 0.46,
"learning_rate": 5.491873396065013e-05,
"loss": 2.5448,
"theoretical_loss": 3.309856689136455,
"tokens_seen": 3131703296
},
{
"epoch": 0.46,
"learning_rate": 5.487596236099231e-05,
"loss": 2.5363,
"theoretical_loss": 3.3098461311318363,
"tokens_seen": 3131834368
},
{
"epoch": 0.46,
"learning_rate": 5.483319076133447e-05,
"loss": 2.6097,
"theoretical_loss": 3.3098355736927947,
"tokens_seen": 3131965440
},
{
"epoch": 0.46,
"learning_rate": 5.479041916167666e-05,
"loss": 2.3725,
"theoretical_loss": 3.3098250168192753,
"tokens_seen": 3132096512
},
{
"epoch": 0.46,
"learning_rate": 5.474764756201882e-05,
"loss": 2.6798,
"theoretical_loss": 3.3098144605112245,
"tokens_seen": 3132227584
},
{
"epoch": 0.46,
"learning_rate": 5.470487596236099e-05,
"loss": 2.4954,
"theoretical_loss": 3.3098039047685885,
"tokens_seen": 3132358656
},
{
"epoch": 0.46,
"learning_rate": 5.466210436270317e-05,
"loss": 2.6217,
"theoretical_loss": 3.3097933495913128,
"tokens_seen": 3132489728
},
{
"epoch": 0.46,
"learning_rate": 5.4619332763045336e-05,
"loss": 2.6219,
"theoretical_loss": 3.309782794979344,
"tokens_seen": 3132620800
},
{
"epoch": 0.46,
"learning_rate": 5.457656116338752e-05,
"loss": 2.4051,
"theoretical_loss": 3.309772240932628,
"tokens_seen": 3132751872
},
{
"epoch": 0.46,
"learning_rate": 5.4533789563729686e-05,
"loss": 2.6075,
"theoretical_loss": 3.30976168745111,
"tokens_seen": 3132882944
},
{
"epoch": 0.46,
"objective/train/docs_used": 1716696,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.7211923599243164,
"objective/train/theoretical_loss": 3.309756410922285,
"objective/train/tokens_used": 162999776,
"theoretical_loss": 3.309756410922285,
"tokens_seen": 3132948480
},
{
"epoch": 0.46,
"learning_rate": 5.449101796407186e-05,
"loss": 2.7641,
"theoretical_loss": 3.3097511345347383,
"tokens_seen": 3133014016
},
{
"epoch": 0.46,
"learning_rate": 5.4448246364414036e-05,
"loss": 2.4953,
"theoretical_loss": 3.3097405821834567,
"tokens_seen": 3133145088
},
{
"epoch": 0.46,
"learning_rate": 5.44054747647562e-05,
"loss": 2.5759,
"theoretical_loss": 3.309730030397213,
"tokens_seen": 3133276160
},
{
"epoch": 0.46,
"learning_rate": 5.436270316509837e-05,
"loss": 2.5484,
"theoretical_loss": 3.309719479175952,
"tokens_seen": 3133407232
},
{
"epoch": 0.46,
"learning_rate": 5.431993156544055e-05,
"loss": 2.594,
"theoretical_loss": 3.309708928519621,
"tokens_seen": 3133538304
},
{
"epoch": 0.46,
"learning_rate": 5.427715996578272e-05,
"loss": 2.589,
"theoretical_loss": 3.309698378428165,
"tokens_seen": 3133669376
},
{
"epoch": 0.46,
"learning_rate": 5.42343883661249e-05,
"loss": 2.4454,
"theoretical_loss": 3.3096878289015303,
"tokens_seen": 3133800448
},
{
"epoch": 0.46,
"learning_rate": 5.419161676646707e-05,
"loss": 2.533,
"theoretical_loss": 3.3096772799396637,
"tokens_seen": 3133931520
},
{
"epoch": 0.46,
"learning_rate": 5.4148845166809236e-05,
"loss": 2.5289,
"theoretical_loss": 3.309666731542511,
"tokens_seen": 3134062592
},
{
"epoch": 0.46,
"learning_rate": 5.4106073567151415e-05,
"loss": 2.4881,
"theoretical_loss": 3.3096561837100187,
"tokens_seen": 3134193664
},
{
"epoch": 0.46,
"learning_rate": 5.4063301967493586e-05,
"loss": 2.5104,
"theoretical_loss": 3.3096456364421325,
"tokens_seen": 3134324736
},
{
"epoch": 0.47,
"learning_rate": 5.4020530367835764e-05,
"loss": 2.5572,
"theoretical_loss": 3.3096350897387983,
"tokens_seen": 3134455808
},
{
"epoch": 0.47,
"objective/train/docs_used": 1717402,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.579829692840576,
"objective/train/theoretical_loss": 3.309624543599963,
"objective/train/tokens_used": 164638176,
"theoretical_loss": 3.309624543599963,
"tokens_seen": 3134586880
},
{
"epoch": 0.47,
"learning_rate": 5.3977758768177936e-05,
"loss": 2.5648,
"theoretical_loss": 3.309624543599963,
"tokens_seen": 3134586880
},
{
"epoch": 0.47,
"learning_rate": 5.39349871685201e-05,
"loss": 2.6332,
"theoretical_loss": 3.3096139980255725,
"tokens_seen": 3134717952
},
{
"epoch": 0.47,
"learning_rate": 5.389221556886228e-05,
"loss": 2.5212,
"theoretical_loss": 3.3096034530155727,
"tokens_seen": 3134849024
},
{
"epoch": 0.47,
"learning_rate": 5.384944396920445e-05,
"loss": 2.6408,
"theoretical_loss": 3.3095929085699103,
"tokens_seen": 3134980096
},
{
"epoch": 0.47,
"learning_rate": 5.380667236954663e-05,
"loss": 2.5513,
"theoretical_loss": 3.309582364688531,
"tokens_seen": 3135111168
},
{
"epoch": 0.47,
"learning_rate": 5.37639007698888e-05,
"loss": 2.5817,
"theoretical_loss": 3.3095718213713816,
"tokens_seen": 3135242240
},
{
"epoch": 0.47,
"learning_rate": 5.3721129170230965e-05,
"loss": 2.5799,
"theoretical_loss": 3.309561278618408,
"tokens_seen": 3135373312
},
{
"epoch": 0.47,
"learning_rate": 5.367835757057314e-05,
"loss": 2.6115,
"theoretical_loss": 3.309550736429556,
"tokens_seen": 3135504384
},
{
"epoch": 0.47,
"learning_rate": 5.3635585970915315e-05,
"loss": 2.5717,
"theoretical_loss": 3.3095401948047725,
"tokens_seen": 3135635456
},
{
"epoch": 0.47,
"learning_rate": 5.359281437125748e-05,
"loss": 2.5582,
"theoretical_loss": 3.3095296537440038,
"tokens_seen": 3135766528
},
{
"epoch": 0.47,
"learning_rate": 5.3550042771599665e-05,
"loss": 2.5156,
"theoretical_loss": 3.3095191132471955,
"tokens_seen": 3135897600
},
{
"epoch": 0.47,
"learning_rate": 5.350727117194183e-05,
"loss": 2.4804,
"theoretical_loss": 3.309508573314294,
"tokens_seen": 3136028672
},
{
"epoch": 0.47,
"learning_rate": 5.346449957228401e-05,
"loss": 2.4863,
"theoretical_loss": 3.309498033945246,
"tokens_seen": 3136159744
},
{
"epoch": 0.47,
"objective/train/docs_used": 1718738,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.53719162940979,
"objective/train/theoretical_loss": 3.30949276447215,
"objective/train/tokens_used": 166276576,
"theoretical_loss": 3.30949276447215,
"tokens_seen": 3136225280
},
{
"epoch": 0.47,
"learning_rate": 5.342172797262618e-05,
"loss": 2.6355,
"theoretical_loss": 3.3094874951399977,
"tokens_seen": 3136290816
},
{
"epoch": 0.47,
"learning_rate": 5.3378956372968344e-05,
"loss": 2.5385,
"theoretical_loss": 3.3094769568984947,
"tokens_seen": 3136421888
},
{
"epoch": 0.47,
"learning_rate": 5.333618477331053e-05,
"loss": 2.5406,
"theoretical_loss": 3.3094664192206844,
"tokens_seen": 3136552960
},
{
"epoch": 0.47,
"learning_rate": 5.3293413173652694e-05,
"loss": 2.4186,
"theoretical_loss": 3.309455882106512,
"tokens_seen": 3136684032
},
{
"epoch": 0.47,
"learning_rate": 5.325064157399487e-05,
"loss": 2.3413,
"theoretical_loss": 3.309445345555925,
"tokens_seen": 3136815104
},
{
"epoch": 0.47,
"learning_rate": 5.3207869974337044e-05,
"loss": 2.5227,
"theoretical_loss": 3.3094348095688684,
"tokens_seen": 3136946176
},
{
"epoch": 0.47,
"learning_rate": 5.3165098374679215e-05,
"loss": 2.3453,
"theoretical_loss": 3.3094242741452895,
"tokens_seen": 3137077248
},
{
"epoch": 0.47,
"learning_rate": 5.312232677502139e-05,
"loss": 2.5183,
"theoretical_loss": 3.3094137392851337,
"tokens_seen": 3137208320
},
{
"epoch": 0.47,
"learning_rate": 5.307955517536356e-05,
"loss": 2.5605,
"theoretical_loss": 3.3094032049883486,
"tokens_seen": 3137339392
},
{
"epoch": 0.48,
"learning_rate": 5.303678357570573e-05,
"loss": 2.3662,
"theoretical_loss": 3.309392671254879,
"tokens_seen": 3137470464
},
{
"epoch": 0.48,
"learning_rate": 5.299401197604791e-05,
"loss": 2.6077,
"theoretical_loss": 3.309382138084673,
"tokens_seen": 3137601536
},
{
"epoch": 0.48,
"learning_rate": 5.295124037639008e-05,
"loss": 2.4003,
"theoretical_loss": 3.3093716054776756,
"tokens_seen": 3137732608
},
{
"epoch": 0.48,
"objective/train/docs_used": 1719408,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.7354519367218018,
"objective/train/theoretical_loss": 3.3093610734338332,
"objective/train/tokens_used": 167914976,
"theoretical_loss": 3.3093610734338332,
"tokens_seen": 3137863680
},
{
"epoch": 0.48,
"learning_rate": 5.290846877673226e-05,
"loss": 2.5897,
"theoretical_loss": 3.3093610734338332,
"tokens_seen": 3137863680
},
{
"epoch": 0.48,
"learning_rate": 5.286569717707442e-05,
"loss": 2.5798,
"theoretical_loss": 3.309350541953093,
"tokens_seen": 3137994752
},
{
"epoch": 0.48,
"learning_rate": 5.2822925577416594e-05,
"loss": 2.4144,
"theoretical_loss": 3.3093400110354008,
"tokens_seen": 3138125824
},
{
"epoch": 0.48,
"learning_rate": 5.278015397775877e-05,
"loss": 2.5288,
"theoretical_loss": 3.3093294806807028,
"tokens_seen": 3138256896
},
{
"epoch": 0.48,
"learning_rate": 5.2737382378100944e-05,
"loss": 2.3532,
"theoretical_loss": 3.309318950888946,
"tokens_seen": 3138387968
},
{
"epoch": 0.48,
"learning_rate": 5.269461077844312e-05,
"loss": 2.4541,
"theoretical_loss": 3.3093084216600763,
"tokens_seen": 3138519040
},
{
"epoch": 0.48,
"learning_rate": 5.265183917878529e-05,
"loss": 2.642,
"theoretical_loss": 3.3092978929940404,
"tokens_seen": 3138650112
},
{
"epoch": 0.48,
"learning_rate": 5.260906757912746e-05,
"loss": 2.5889,
"theoretical_loss": 3.309287364890785,
"tokens_seen": 3138781184
},
{
"epoch": 0.48,
"learning_rate": 5.256629597946964e-05,
"loss": 2.5466,
"theoretical_loss": 3.3092768373502555,
"tokens_seen": 3138912256
},
{
"epoch": 0.48,
"learning_rate": 5.252352437981181e-05,
"loss": 2.4441,
"theoretical_loss": 3.3092663103723994,
"tokens_seen": 3139043328
},
{
"epoch": 0.48,
"learning_rate": 5.2480752780153986e-05,
"loss": 2.5501,
"theoretical_loss": 3.3092557839571626,
"tokens_seen": 3139174400
},
{
"epoch": 0.48,
"learning_rate": 5.243798118049615e-05,
"loss": 2.4633,
"theoretical_loss": 3.3092452581044913,
"tokens_seen": 3139305472
},
{
"epoch": 0.48,
"learning_rate": 5.239520958083832e-05,
"loss": 2.5114,
"theoretical_loss": 3.309234732814333,
"tokens_seen": 3139436544
},
{
"epoch": 0.48,
"objective/train/docs_used": 1720490,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.6848251819610596,
"objective/train/theoretical_loss": 3.3092294703801786,
"objective/train/tokens_used": 169553376,
"theoretical_loss": 3.3092294703801786,
"tokens_seen": 3139502080
},
{
"epoch": 0.48,
"learning_rate": 5.23524379811805e-05,
"loss": 2.558,
"theoretical_loss": 3.3092242080866323,
"tokens_seen": 3139567616
},
{
"epoch": 0.48,
"learning_rate": 5.230966638152267e-05,
"loss": 2.5359,
"theoretical_loss": 3.3092136839213375,
"tokens_seen": 3139698688
},
{
"epoch": 0.48,
"learning_rate": 5.226689478186484e-05,
"loss": 2.5309,
"theoretical_loss": 3.3092031603183942,
"tokens_seen": 3139829760
},
{
"epoch": 0.48,
"learning_rate": 5.2224123182207016e-05,
"loss": 2.5739,
"theoretical_loss": 3.309192637277749,
"tokens_seen": 3139960832
},
{
"epoch": 0.48,
"learning_rate": 5.218135158254919e-05,
"loss": 2.5384,
"theoretical_loss": 3.3091821147993485,
"tokens_seen": 3140091904
},
{
"epoch": 0.48,
"learning_rate": 5.2138579982891365e-05,
"loss": 2.6171,
"theoretical_loss": 3.3091715928831396,
"tokens_seen": 3140222976
},
{
"epoch": 0.48,
"learning_rate": 5.209580838323354e-05,
"loss": 2.6771,
"theoretical_loss": 3.3091610715290676,
"tokens_seen": 3140354048
},
{
"epoch": 0.48,
"learning_rate": 5.20530367835757e-05,
"loss": 2.5835,
"theoretical_loss": 3.3091505507370798,
"tokens_seen": 3140485120
},
{
"epoch": 0.49,
"learning_rate": 5.201026518391789e-05,
"loss": 2.4421,
"theoretical_loss": 3.3091400305071232,
"tokens_seen": 3140616192
},
{
"epoch": 0.49,
"learning_rate": 5.196749358426005e-05,
"loss": 2.5076,
"theoretical_loss": 3.309129510839143,
"tokens_seen": 3140747264
},
{
"epoch": 0.49,
"learning_rate": 5.192472198460223e-05,
"loss": 2.5753,
"theoretical_loss": 3.309118991733087,
"tokens_seen": 3140878336
},
{
"epoch": 0.49,
"learning_rate": 5.18819503849444e-05,
"loss": 2.5797,
"theoretical_loss": 3.309108473188901,
"tokens_seen": 3141009408
},
{
"epoch": 0.49,
"objective/train/docs_used": 1720925,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.6266226768493652,
"objective/train/theoretical_loss": 3.309097955206532,
"objective/train/tokens_used": 171191776,
"theoretical_loss": 3.309097955206532,
"tokens_seen": 3141140480
},
{
"epoch": 0.49,
"learning_rate": 5.1839178785286566e-05,
"loss": 2.5632,
"theoretical_loss": 3.309097955206532,
"tokens_seen": 3141140480
},
{
"epoch": 0.49,
"learning_rate": 5.179640718562875e-05,
"loss": 2.5929,
"theoretical_loss": 3.309087437785926,
"tokens_seen": 3141271552
},
{
"epoch": 0.49,
"learning_rate": 5.1753635585970916e-05,
"loss": 2.5103,
"theoretical_loss": 3.30907692092703,
"tokens_seen": 3141402624
},
{
"epoch": 0.49,
"learning_rate": 5.171086398631309e-05,
"loss": 2.4878,
"theoretical_loss": 3.3090664046297906,
"tokens_seen": 3141533696
},
{
"epoch": 0.49,
"learning_rate": 5.1668092386655266e-05,
"loss": 2.4295,
"theoretical_loss": 3.309055888894154,
"tokens_seen": 3141664768
},
{
"epoch": 0.49,
"learning_rate": 5.162532078699743e-05,
"loss": 2.5133,
"theoretical_loss": 3.309045373720067,
"tokens_seen": 3141795840
},
{
"epoch": 0.49,
"learning_rate": 5.1582549187339616e-05,
"loss": 2.4796,
"theoretical_loss": 3.3090348591074763,
"tokens_seen": 3141926912
},
{
"epoch": 0.49,
"learning_rate": 5.153977758768178e-05,
"loss": 2.4262,
"theoretical_loss": 3.3090243450563284,
"tokens_seen": 3142057984
},
{
"epoch": 0.49,
"learning_rate": 5.149700598802395e-05,
"loss": 2.4909,
"theoretical_loss": 3.30901383156657,
"tokens_seen": 3142189056
},
{
"epoch": 0.49,
"learning_rate": 5.145423438836613e-05,
"loss": 2.3213,
"theoretical_loss": 3.3090033186381476,
"tokens_seen": 3142320128
},
{
"epoch": 0.49,
"learning_rate": 5.1411462788708295e-05,
"loss": 2.4614,
"theoretical_loss": 3.3089928062710072,
"tokens_seen": 3142451200
},
{
"epoch": 0.49,
"learning_rate": 5.136869118905048e-05,
"loss": 2.5389,
"theoretical_loss": 3.3089822944650966,
"tokens_seen": 3142582272
},
{
"epoch": 0.49,
"learning_rate": 5.1325919589392645e-05,
"loss": 2.5776,
"theoretical_loss": 3.308971783220362,
"tokens_seen": 3142713344
},
{
"epoch": 0.49,
"objective/train/docs_used": 1722164,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.7013967037200928,
"objective/train/theoretical_loss": 3.3089665278084186,
"objective/train/tokens_used": 172830176,
"theoretical_loss": 3.3089665278084186,
"tokens_seen": 3142778880
},
{
"epoch": 0.49,
"learning_rate": 5.1283147989734816e-05,
"loss": 2.516,
"theoretical_loss": 3.308961272536749,
"tokens_seen": 3142844416
},
{
"epoch": 0.49,
"learning_rate": 5.1240376390076994e-05,
"loss": 2.4546,
"theoretical_loss": 3.308950762414206,
"tokens_seen": 3142975488
},
{
"epoch": 0.49,
"learning_rate": 5.119760479041916e-05,
"loss": 2.5464,
"theoretical_loss": 3.3089402528526786,
"tokens_seen": 3143106560
},
{
"epoch": 0.49,
"learning_rate": 5.1154833190761344e-05,
"loss": 2.4838,
"theoretical_loss": 3.3089297438521137,
"tokens_seen": 3143237632
},
{
"epoch": 0.49,
"learning_rate": 5.111206159110351e-05,
"loss": 2.5751,
"theoretical_loss": 3.308919235412458,
"tokens_seen": 3143368704
},
{
"epoch": 0.49,
"learning_rate": 5.106928999144568e-05,
"loss": 2.6909,
"theoretical_loss": 3.3089087275336575,
"tokens_seen": 3143499776
},
{
"epoch": 0.49,
"learning_rate": 5.102651839178786e-05,
"loss": 2.6497,
"theoretical_loss": 3.30889822021566,
"tokens_seen": 3143630848
},
{
"epoch": 0.5,
"learning_rate": 5.098374679213003e-05,
"loss": 2.5391,
"theoretical_loss": 3.308887713458412,
"tokens_seen": 3143761920
},
{
"epoch": 0.5,
"learning_rate": 5.0940975192472195e-05,
"loss": 2.3483,
"theoretical_loss": 3.308877207261859,
"tokens_seen": 3143892992
},
{
"epoch": 0.5,
"learning_rate": 5.089820359281437e-05,
"loss": 2.5682,
"theoretical_loss": 3.3088667016259494,
"tokens_seen": 3144024064
},
{
"epoch": 0.5,
"learning_rate": 5.0855431993156545e-05,
"loss": 2.5066,
"theoretical_loss": 3.3088561965506287,
"tokens_seen": 3144155136
},
{
"epoch": 0.5,
"learning_rate": 5.081266039349872e-05,
"loss": 2.6193,
"theoretical_loss": 3.3088456920358436,
"tokens_seen": 3144286208
},
{
"epoch": 0.5,
"objective/train/docs_used": 1723267,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.3256311416625977,
"objective/train/theoretical_loss": 3.3088351880815416,
"objective/train/tokens_used": 174468576,
"theoretical_loss": 3.3088351880815416,
"tokens_seen": 3144417280
},
{
"epoch": 0.5,
"learning_rate": 5.0769888793840895e-05,
"loss": 2.4757,
"theoretical_loss": 3.3088351880815416,
"tokens_seen": 3144417280
},
{
"epoch": 0.5,
"learning_rate": 5.072711719418306e-05,
"loss": 2.4459,
"theoretical_loss": 3.308824684687669,
"tokens_seen": 3144548352
},
{
"epoch": 0.5,
"learning_rate": 5.068434559452524e-05,
"loss": 2.4645,
"theoretical_loss": 3.308814181854173,
"tokens_seen": 3144679424
},
{
"epoch": 0.5,
"learning_rate": 5.064157399486741e-05,
"loss": 2.4541,
"theoretical_loss": 3.3088036795809996,
"tokens_seen": 3144810496
},
{
"epoch": 0.5,
"learning_rate": 5.059880239520959e-05,
"loss": 2.5162,
"theoretical_loss": 3.308793177868096,
"tokens_seen": 3144941568
},
{
"epoch": 0.5,
"learning_rate": 5.055603079555176e-05,
"loss": 2.5396,
"theoretical_loss": 3.308782676715409,
"tokens_seen": 3145072640
},
{
"epoch": 0.5,
"learning_rate": 5.0513259195893924e-05,
"loss": 2.601,
"theoretical_loss": 3.308772176122885,
"tokens_seen": 3145203712
},
{
"epoch": 0.5,
"learning_rate": 5.04704875962361e-05,
"loss": 2.5741,
"theoretical_loss": 3.3087616760904712,
"tokens_seen": 3145334784
},
{
"epoch": 0.5,
"learning_rate": 5.0427715996578274e-05,
"loss": 2.4807,
"theoretical_loss": 3.308751176618114,
"tokens_seen": 3145465856
},
{
"epoch": 0.5,
"learning_rate": 5.038494439692045e-05,
"loss": 2.5737,
"theoretical_loss": 3.3087406777057606,
"tokens_seen": 3145596928
},
{
"epoch": 0.5,
"learning_rate": 5.0342172797262623e-05,
"loss": 2.5546,
"theoretical_loss": 3.3087301793533577,
"tokens_seen": 3145728000
},
{
"epoch": 0.5,
"learning_rate": 5.029940119760479e-05,
"loss": 2.4823,
"theoretical_loss": 3.3087196815608517,
"tokens_seen": 3145859072
},
{
"epoch": 0.5,
"learning_rate": 5.0256629597946967e-05,
"loss": 2.5493,
"theoretical_loss": 3.3087091843281895,
"tokens_seen": 3145990144
},
{
"epoch": 0.5,
"objective/train/docs_used": 1723860,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.650022506713867,
"objective/train/theoretical_loss": 3.3087039359217836,
"objective/train/tokens_used": 176106976,
"theoretical_loss": 3.3087039359217836,
"tokens_seen": 3146055680
},
{
"epoch": 0.5,
"learning_rate": 5.021385799828914e-05,
"loss": 2.7187,
"theoretical_loss": 3.3086986876553186,
"tokens_seen": 3146121216
},
{
"epoch": 0.5,
"learning_rate": 5.017108639863131e-05,
"loss": 2.5579,
"theoretical_loss": 3.3086881915421853,
"tokens_seen": 3146252288
},
{
"epoch": 0.5,
"learning_rate": 5.012831479897349e-05,
"loss": 2.6554,
"theoretical_loss": 3.3086776959887363,
"tokens_seen": 3146383360
},
{
"epoch": 0.5,
"learning_rate": 5.008554319931565e-05,
"loss": 2.6396,
"theoretical_loss": 3.3086672009949187,
"tokens_seen": 3146514432
},
{
"epoch": 0.5,
"learning_rate": 5.004277159965783e-05,
"loss": 2.5456,
"theoretical_loss": 3.3086567065606793,
"tokens_seen": 3146645504
},
{
"epoch": 0.51,
"learning_rate": 5e-05,
"loss": 2.4661,
"theoretical_loss": 3.3086462126859653,
"tokens_seen": 3146776576
},
{
"epoch": 0.51,
"learning_rate": 4.9957228400342174e-05,
"loss": 2.4853,
"theoretical_loss": 3.3086357193707228,
"tokens_seen": 3146907648
},
{
"epoch": 0.51,
"learning_rate": 4.991445680068435e-05,
"loss": 2.4425,
"theoretical_loss": 3.308625226614899,
"tokens_seen": 3147038720
},
{
"epoch": 0.51,
"learning_rate": 4.987168520102652e-05,
"loss": 2.4538,
"theoretical_loss": 3.308614734418441,
"tokens_seen": 3147169792
},
{
"epoch": 0.51,
"learning_rate": 4.9828913601368695e-05,
"loss": 2.3674,
"theoretical_loss": 3.3086042427812954,
"tokens_seen": 3147300864
},
{
"epoch": 0.51,
"learning_rate": 4.978614200171087e-05,
"loss": 2.5229,
"theoretical_loss": 3.3085937517034094,
"tokens_seen": 3147431936
},
{
"epoch": 0.51,
"learning_rate": 4.974337040205304e-05,
"loss": 2.4467,
"theoretical_loss": 3.30858326118473,
"tokens_seen": 3147563008
},
{
"epoch": 0.51,
"objective/train/docs_used": 1724798,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.843707323074341,
"objective/train/theoretical_loss": 3.308572771225203,
"objective/train/tokens_used": 177745376,
"theoretical_loss": 3.308572771225203,
"tokens_seen": 3147694080
},
{
"epoch": 0.51,
"learning_rate": 4.970059880239521e-05,
"loss": 2.7121,
"theoretical_loss": 3.308572771225203,
"tokens_seen": 3147694080
},
{
"epoch": 0.51,
"learning_rate": 4.965782720273738e-05,
"loss": 2.6234,
"theoretical_loss": 3.308562281824777,
"tokens_seen": 3147825152
},
{
"epoch": 0.51,
"learning_rate": 4.961505560307956e-05,
"loss": 2.5533,
"theoretical_loss": 3.3085517929833976,
"tokens_seen": 3147956224
},
{
"epoch": 0.51,
"learning_rate": 4.957228400342173e-05,
"loss": 2.4984,
"theoretical_loss": 3.308541304701013,
"tokens_seen": 3148087296
},
{
"epoch": 0.51,
"learning_rate": 4.95295124037639e-05,
"loss": 2.5731,
"theoretical_loss": 3.3085308169775685,
"tokens_seen": 3148218368
},
{
"epoch": 0.51,
"learning_rate": 4.9486740804106074e-05,
"loss": 2.4481,
"theoretical_loss": 3.308520329813012,
"tokens_seen": 3148349440
},
{
"epoch": 0.51,
"learning_rate": 4.9443969204448246e-05,
"loss": 2.5747,
"theoretical_loss": 3.3085098432072906,
"tokens_seen": 3148480512
},
{
"epoch": 0.51,
"learning_rate": 4.9401197604790424e-05,
"loss": 2.5293,
"theoretical_loss": 3.308499357160351,
"tokens_seen": 3148611584
},
{
"epoch": 0.51,
"learning_rate": 4.9358426005132596e-05,
"loss": 2.6658,
"theoretical_loss": 3.3084888716721403,
"tokens_seen": 3148742656
},
{
"epoch": 0.51,
"learning_rate": 4.931565440547477e-05,
"loss": 2.4781,
"theoretical_loss": 3.308478386742605,
"tokens_seen": 3148873728
},
{
"epoch": 0.51,
"learning_rate": 4.927288280581694e-05,
"loss": 2.4863,
"theoretical_loss": 3.3084679023716923,
"tokens_seen": 3149004800
},
{
"epoch": 0.51,
"learning_rate": 4.923011120615911e-05,
"loss": 2.5368,
"theoretical_loss": 3.3084574185593496,
"tokens_seen": 3149135872
},
{
"epoch": 0.51,
"learning_rate": 4.918733960650129e-05,
"loss": 2.5071,
"theoretical_loss": 3.3084469353055237,
"tokens_seen": 3149266944
},
{
"epoch": 0.51,
"objective/train/docs_used": 1725490,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.170764684677124,
"objective/train/theoretical_loss": 3.308441693888038,
"objective/train/tokens_used": 179383776,
"theoretical_loss": 3.308441693888038,
"tokens_seen": 3149332480
},
{
"epoch": 0.51,
"learning_rate": 4.914456800684346e-05,
"loss": 2.4342,
"theoretical_loss": 3.3084364526101613,
"tokens_seen": 3149398016
},
{
"epoch": 0.51,
"learning_rate": 4.910179640718563e-05,
"loss": 2.5341,
"theoretical_loss": 3.30842597047321,
"tokens_seen": 3149529088
},
{
"epoch": 0.51,
"learning_rate": 4.90590248075278e-05,
"loss": 2.6495,
"theoretical_loss": 3.3084154888946156,
"tokens_seen": 3149660160
},
{
"epoch": 0.51,
"learning_rate": 4.9016253207869974e-05,
"loss": 2.555,
"theoretical_loss": 3.3084050078743266,
"tokens_seen": 3149791232
},
{
"epoch": 0.52,
"learning_rate": 4.897348160821215e-05,
"loss": 2.4809,
"theoretical_loss": 3.3083945274122892,
"tokens_seen": 3149922304
},
{
"epoch": 0.52,
"learning_rate": 4.893071000855432e-05,
"loss": 2.4281,
"theoretical_loss": 3.3083840475084507,
"tokens_seen": 3150053376
},
{
"epoch": 0.52,
"learning_rate": 4.8887938408896496e-05,
"loss": 2.4226,
"theoretical_loss": 3.308373568162758,
"tokens_seen": 3150184448
},
{
"epoch": 0.52,
"learning_rate": 4.884516680923867e-05,
"loss": 2.5657,
"theoretical_loss": 3.3083630893751588,
"tokens_seen": 3150315520
},
{
"epoch": 0.52,
"learning_rate": 4.8802395209580846e-05,
"loss": 2.5738,
"theoretical_loss": 3.3083526111455988,
"tokens_seen": 3150446592
},
{
"epoch": 0.52,
"learning_rate": 4.875962360992301e-05,
"loss": 2.4448,
"theoretical_loss": 3.308342133474026,
"tokens_seen": 3150577664
},
{
"epoch": 0.52,
"learning_rate": 4.871685201026518e-05,
"loss": 2.4711,
"theoretical_loss": 3.3083316563603877,
"tokens_seen": 3150708736
},
{
"epoch": 0.52,
"learning_rate": 4.867408041060736e-05,
"loss": 2.5539,
"theoretical_loss": 3.30832117980463,
"tokens_seen": 3150839808
},
{
"epoch": 0.52,
"objective/train/docs_used": 1726849,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.238480806350708,
"objective/train/theoretical_loss": 3.3083107038067014,
"objective/train/tokens_used": 181022176,
"theoretical_loss": 3.3083107038067014,
"tokens_seen": 3150970880
},
{
"epoch": 0.52,
"learning_rate": 4.863130881094953e-05,
"loss": 2.5334,
"theoretical_loss": 3.3083107038067014,
"tokens_seen": 3150970880
},
{
"epoch": 0.52,
"learning_rate": 4.858853721129171e-05,
"loss": 2.458,
"theoretical_loss": 3.3083002283665475,
"tokens_seen": 3151101952
},
{
"epoch": 0.52,
"learning_rate": 4.8545765611633875e-05,
"loss": 2.5054,
"theoretical_loss": 3.3082897534841162,
"tokens_seen": 3151233024
},
{
"epoch": 0.52,
"learning_rate": 4.8502994011976046e-05,
"loss": 2.52,
"theoretical_loss": 3.308279279159355,
"tokens_seen": 3151364096
},
{
"epoch": 0.52,
"learning_rate": 4.8460222412318225e-05,
"loss": 2.4724,
"theoretical_loss": 3.30826880539221,
"tokens_seen": 3151495168
},
{
"epoch": 0.52,
"learning_rate": 4.8417450812660396e-05,
"loss": 2.5626,
"theoretical_loss": 3.308258332182629,
"tokens_seen": 3151626240
},
{
"epoch": 0.52,
"learning_rate": 4.837467921300257e-05,
"loss": 2.387,
"theoretical_loss": 3.308247859530559,
"tokens_seen": 3151757312
},
{
"epoch": 0.52,
"learning_rate": 4.833190761334474e-05,
"loss": 2.4943,
"theoretical_loss": 3.308237387435947,
"tokens_seen": 3151888384
},
{
"epoch": 0.52,
"learning_rate": 4.828913601368692e-05,
"loss": 2.5211,
"theoretical_loss": 3.3082269158987403,
"tokens_seen": 3152019456
},
{
"epoch": 0.52,
"learning_rate": 4.824636441402909e-05,
"loss": 2.5913,
"theoretical_loss": 3.308216444918886,
"tokens_seen": 3152150528
},
{
"epoch": 0.52,
"learning_rate": 4.820359281437126e-05,
"loss": 2.4218,
"theoretical_loss": 3.3082059744963317,
"tokens_seen": 3152281600
},
{
"epoch": 0.52,
"learning_rate": 4.816082121471343e-05,
"loss": 2.4526,
"theoretical_loss": 3.3081955046310236,
"tokens_seen": 3152412672
},
{
"epoch": 0.52,
"learning_rate": 4.8118049615055603e-05,
"loss": 2.4867,
"theoretical_loss": 3.3081850353229094,
"tokens_seen": 3152543744
},
{
"epoch": 0.52,
"objective/train/docs_used": 1727307,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.4184036254882812,
"objective/train/theoretical_loss": 3.3081798008777836,
"objective/train/tokens_used": 182660576,
"theoretical_loss": 3.3081798008777836,
"tokens_seen": 3152609280
},
{
"epoch": 0.52,
"learning_rate": 4.807527801539778e-05,
"loss": 2.4747,
"theoretical_loss": 3.3081745665719366,
"tokens_seen": 3152674816
},
{
"epoch": 0.52,
"learning_rate": 4.803250641573995e-05,
"loss": 2.4888,
"theoretical_loss": 3.308164098378052,
"tokens_seen": 3152805888
},
{
"epoch": 0.52,
"learning_rate": 4.798973481608212e-05,
"loss": 2.4952,
"theoretical_loss": 3.308153630741203,
"tokens_seen": 3152936960
},
{
"epoch": 0.53,
"learning_rate": 4.7946963216424296e-05,
"loss": 2.505,
"theoretical_loss": 3.3081431636613363,
"tokens_seen": 3153068032
},
{
"epoch": 0.53,
"learning_rate": 4.790419161676647e-05,
"loss": 2.5401,
"theoretical_loss": 3.3081326971384,
"tokens_seen": 3153199104
},
{
"epoch": 0.53,
"learning_rate": 4.7861420017108646e-05,
"loss": 2.5614,
"theoretical_loss": 3.308122231172341,
"tokens_seen": 3153330176
},
{
"epoch": 0.53,
"learning_rate": 4.781864841745082e-05,
"loss": 2.5993,
"theoretical_loss": 3.3081117657631056,
"tokens_seen": 3153461248
},
{
"epoch": 0.53,
"learning_rate": 4.777587681779299e-05,
"loss": 2.6378,
"theoretical_loss": 3.3081013009106424,
"tokens_seen": 3153592320
},
{
"epoch": 0.53,
"learning_rate": 4.773310521813516e-05,
"loss": 2.544,
"theoretical_loss": 3.3080908366148973,
"tokens_seen": 3153723392
},
{
"epoch": 0.53,
"learning_rate": 4.769033361847733e-05,
"loss": 2.4272,
"theoretical_loss": 3.308080372875819,
"tokens_seen": 3153854464
},
{
"epoch": 0.53,
"learning_rate": 4.764756201881951e-05,
"loss": 2.5282,
"theoretical_loss": 3.3080699096933537,
"tokens_seen": 3153985536
},
{
"epoch": 0.53,
"learning_rate": 4.7604790419161675e-05,
"loss": 2.4947,
"theoretical_loss": 3.308059447067449,
"tokens_seen": 3154116608
},
{
"debugging/Self-BLEU-5": 0.4768917357816163,
"debugging/distinct-1-grams": 0.8069642973374905,
"debugging/distinct-2-grams": 0.9493042579915046,
"debugging/entropy-1-grams": 5.952991944891716,
"debugging/entropy-2-grams": 6.680805112573269,
"debugging/length": 514.4615384615385,
"debugging/num_segments": 13,
"debugging/score": 0.008350422819326409,
"debugging/score_std": 0.0069101491985588594,
"epoch": 0.53,
"objective/train/docs_used": 1728625,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.800560712814331,
"objective/train/theoretical_loss": 3.3080489849980523,
"objective/train/tokens_used": 184298976,
"theoretical_loss": 3.3080489849980523,
"tokens_seen": 3154247680
},
{
"epoch": 0.53,
"learning_rate": 4.7562018819503854e-05,
"loss": 2.3931,
"theoretical_loss": 3.3080489849980523,
"tokens_seen": 3154247680
},
{
"epoch": 0.53,
"learning_rate": 4.7519247219846025e-05,
"loss": 2.6028,
"theoretical_loss": 3.3080385234851106,
"tokens_seen": 3154378752
},
{
"epoch": 0.53,
"learning_rate": 4.74764756201882e-05,
"loss": 2.4836,
"theoretical_loss": 3.3080280625285714,
"tokens_seen": 3154509824
},
{
"epoch": 0.53,
"learning_rate": 4.7433704020530375e-05,
"loss": 2.641,
"theoretical_loss": 3.308017602128382,
"tokens_seen": 3154640896
},
{
"epoch": 0.53,
"learning_rate": 4.739093242087254e-05,
"loss": 2.5354,
"theoretical_loss": 3.308007142284489,
"tokens_seen": 3154771968
},
{
"epoch": 0.53,
"learning_rate": 4.734816082121472e-05,
"loss": 2.5539,
"theoretical_loss": 3.307996682996841,
"tokens_seen": 3154903040
},
{
"epoch": 0.53,
"learning_rate": 4.730538922155689e-05,
"loss": 2.4411,
"theoretical_loss": 3.3079862242653846,
"tokens_seen": 3155034112
},
{
"epoch": 0.53,
"learning_rate": 4.726261762189906e-05,
"loss": 2.4431,
"theoretical_loss": 3.307975766090067,
"tokens_seen": 3155165184
},
{
"epoch": 0.53,
"learning_rate": 4.721984602224123e-05,
"loss": 2.3219,
"theoretical_loss": 3.3079653084708354,
"tokens_seen": 3155296256
},
{
"epoch": 0.53,
"learning_rate": 4.7177074422583404e-05,
"loss": 2.5297,
"theoretical_loss": 3.3079548514076373,
"tokens_seen": 3155427328
},
{
"epoch": 0.53,
"learning_rate": 4.713430282292558e-05,
"loss": 2.6636,
"theoretical_loss": 3.307944394900421,
"tokens_seen": 3155558400
},
{
"epoch": 0.53,
"learning_rate": 4.7091531223267754e-05,
"loss": 2.3727,
"theoretical_loss": 3.307933938949132,
"tokens_seen": 3155689472
},
{
"epoch": 0.53,
"learning_rate": 4.7048759623609925e-05,
"loss": 2.6068,
"theoretical_loss": 3.307923483553719,
"tokens_seen": 3155820544
},
{
"epoch": 0.53,
"objective/train/docs_used": 1729266,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.3172640800476074,
"objective/train/theoretical_loss": 3.3079182560644496,
"objective/train/tokens_used": 185937376,
"theoretical_loss": 3.3079182560644496,
"tokens_seen": 3155886080
},
{
"epoch": 0.53,
"learning_rate": 4.70059880239521e-05,
"loss": 2.4148,
"theoretical_loss": 3.307913028714129,
"tokens_seen": 3155951616
},
{
"epoch": 0.54,
"learning_rate": 4.696321642429427e-05,
"loss": 2.6612,
"theoretical_loss": 3.3079025744303094,
"tokens_seen": 3156082688
},
{
"epoch": 0.54,
"learning_rate": 4.692044482463645e-05,
"loss": 2.5739,
"theoretical_loss": 3.3078921207022076,
"tokens_seen": 3156213760
},
{
"epoch": 0.54,
"learning_rate": 4.687767322497862e-05,
"loss": 2.4681,
"theoretical_loss": 3.307881667529771,
"tokens_seen": 3156344832
},
{
"epoch": 0.54,
"learning_rate": 4.683490162532079e-05,
"loss": 2.3788,
"theoretical_loss": 3.3078712149129466,
"tokens_seen": 3156475904
},
{
"epoch": 0.54,
"learning_rate": 4.679213002566296e-05,
"loss": 2.6225,
"theoretical_loss": 3.3078607628516825,
"tokens_seen": 3156606976
},
{
"epoch": 0.54,
"learning_rate": 4.674935842600513e-05,
"loss": 2.564,
"theoretical_loss": 3.3078503113459257,
"tokens_seen": 3156738048
},
{
"epoch": 0.54,
"learning_rate": 4.670658682634731e-05,
"loss": 2.3909,
"theoretical_loss": 3.307839860395623,
"tokens_seen": 3156869120
},
{
"epoch": 0.54,
"learning_rate": 4.6663815226689476e-05,
"loss": 2.4339,
"theoretical_loss": 3.307829410000723,
"tokens_seen": 3157000192
},
{
"epoch": 0.54,
"learning_rate": 4.6621043627031654e-05,
"loss": 2.4966,
"theoretical_loss": 3.307818960161173,
"tokens_seen": 3157131264
},
{
"epoch": 0.54,
"learning_rate": 4.6578272027373826e-05,
"loss": 2.5257,
"theoretical_loss": 3.307808510876919,
"tokens_seen": 3157262336
},
{
"epoch": 0.54,
"learning_rate": 4.6535500427716e-05,
"loss": 2.3732,
"theoretical_loss": 3.3077980621479104,
"tokens_seen": 3157393408
},
{
"epoch": 0.54,
"objective/train/docs_used": 1730350,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.4390780925750732,
"objective/train/theoretical_loss": 3.307787613974093,
"objective/train/tokens_used": 187575776,
"theoretical_loss": 3.307787613974093,
"tokens_seen": 3157524480
},
{
"epoch": 0.54,
"learning_rate": 4.6492728828058175e-05,
"loss": 2.6069,
"theoretical_loss": 3.307787613974093,
"tokens_seen": 3157524480
},
{
"epoch": 0.54,
"learning_rate": 4.644995722840034e-05,
"loss": 2.5569,
"theoretical_loss": 3.3077771663554154,
"tokens_seen": 3157655552
},
{
"epoch": 0.54,
"learning_rate": 4.640718562874252e-05,
"loss": 2.5445,
"theoretical_loss": 3.3077667192918243,
"tokens_seen": 3157786624
},
{
"epoch": 0.54,
"learning_rate": 4.636441402908469e-05,
"loss": 2.1927,
"theoretical_loss": 3.307756272783268,
"tokens_seen": 3157917696
},
{
"epoch": 0.54,
"learning_rate": 4.632164242942686e-05,
"loss": 2.5662,
"theoretical_loss": 3.3077458268296933,
"tokens_seen": 3158048768
},
{
"epoch": 0.54,
"learning_rate": 4.627887082976903e-05,
"loss": 2.4771,
"theoretical_loss": 3.3077353814310477,
"tokens_seen": 3158179840
},
{
"epoch": 0.54,
"learning_rate": 4.6236099230111205e-05,
"loss": 2.5113,
"theoretical_loss": 3.3077249365872787,
"tokens_seen": 3158310912
},
{
"epoch": 0.54,
"learning_rate": 4.619332763045338e-05,
"loss": 2.3738,
"theoretical_loss": 3.307714492298334,
"tokens_seen": 3158441984
},
{
"epoch": 0.54,
"learning_rate": 4.6150556030795554e-05,
"loss": 2.5683,
"theoretical_loss": 3.307704048564161,
"tokens_seen": 3158573056
},
{
"epoch": 0.54,
"learning_rate": 4.610778443113773e-05,
"loss": 2.429,
"theoretical_loss": 3.3076936053847072,
"tokens_seen": 3158704128
},
{
"epoch": 0.54,
"learning_rate": 4.60650128314799e-05,
"loss": 2.5088,
"theoretical_loss": 3.3076831627599206,
"tokens_seen": 3158835200
},
{
"epoch": 0.54,
"learning_rate": 4.602224123182207e-05,
"loss": 2.4902,
"theoretical_loss": 3.307672720689748,
"tokens_seen": 3158966272
},
{
"epoch": 0.54,
"learning_rate": 4.597946963216425e-05,
"loss": 2.2596,
"theoretical_loss": 3.3076622791741377,
"tokens_seen": 3159097344
},
{
"epoch": 0.54,
"objective/train/docs_used": 1730945,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 3.087862730026245,
"objective/train/theoretical_loss": 3.3076570586242764,
"objective/train/tokens_used": 189214176,
"theoretical_loss": 3.3076570586242764,
"tokens_seen": 3159162880
},
{
"epoch": 0.55,
"learning_rate": 4.593669803250642e-05,
"loss": 2.4972,
"theoretical_loss": 3.307651838213036,
"tokens_seen": 3159228416
},
{
"epoch": 0.55,
"learning_rate": 4.589392643284859e-05,
"loss": 2.6479,
"theoretical_loss": 3.3076413978063917,
"tokens_seen": 3159359488
},
{
"epoch": 0.55,
"learning_rate": 4.585115483319076e-05,
"loss": 2.639,
"theoretical_loss": 3.307630957954152,
"tokens_seen": 3159490560
},
{
"epoch": 0.55,
"learning_rate": 4.580838323353293e-05,
"loss": 2.5289,
"theoretical_loss": 3.307620518656264,
"tokens_seen": 3159621632
},
{
"epoch": 0.55,
"learning_rate": 4.576561163387511e-05,
"loss": 2.3132,
"theoretical_loss": 3.307610079912676,
"tokens_seen": 3159752704
},
{
"epoch": 0.55,
"learning_rate": 4.572284003421728e-05,
"loss": 2.4593,
"theoretical_loss": 3.3075996417233346,
"tokens_seen": 3159883776
},
{
"epoch": 0.55,
"learning_rate": 4.5680068434559455e-05,
"loss": 2.4431,
"theoretical_loss": 3.3075892040881887,
"tokens_seen": 3160014848
},
{
"epoch": 0.55,
"learning_rate": 4.5637296834901626e-05,
"loss": 2.5069,
"theoretical_loss": 3.307578767007185,
"tokens_seen": 3160145920
},
{
"epoch": 0.55,
"learning_rate": 4.5594525235243804e-05,
"loss": 2.4377,
"theoretical_loss": 3.307568330480271,
"tokens_seen": 3160276992
},
{
"epoch": 0.55,
"learning_rate": 4.5551753635585976e-05,
"loss": 2.5466,
"theoretical_loss": 3.3075578945073945,
"tokens_seen": 3160408064
},
{
"epoch": 0.55,
"learning_rate": 4.550898203592814e-05,
"loss": 2.457,
"theoretical_loss": 3.3075474590885032,
"tokens_seen": 3160539136
},
{
"epoch": 0.55,
"learning_rate": 4.546621043627032e-05,
"loss": 2.4887,
"theoretical_loss": 3.307537024223545,
"tokens_seen": 3160670208
},
{
"epoch": 0.55,
"objective/train/docs_used": 1732078,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.740237236022949,
"objective/train/theoretical_loss": 3.307526589912467,
"objective/train/tokens_used": 190852576,
"theoretical_loss": 3.307526589912467,
"tokens_seen": 3160801280
},
{
"epoch": 0.55,
"learning_rate": 4.542343883661249e-05,
"loss": 2.5039,
"theoretical_loss": 3.307526589912467,
"tokens_seen": 3160801280
},
{
"epoch": 0.55,
"learning_rate": 4.538066723695467e-05,
"loss": 2.4584,
"theoretical_loss": 3.3075161561552173,
"tokens_seen": 3160932352
},
{
"epoch": 0.55,
"learning_rate": 4.5337895637296834e-05,
"loss": 2.3226,
"theoretical_loss": 3.307505722951743,
"tokens_seen": 3161063424
},
{
"epoch": 0.55,
"learning_rate": 4.5295124037639005e-05,
"loss": 2.4711,
"theoretical_loss": 3.307495290301992,
"tokens_seen": 3161194496
},
{
"epoch": 0.55,
"learning_rate": 4.5252352437981183e-05,
"loss": 2.5309,
"theoretical_loss": 3.307484858205912,
"tokens_seen": 3161325568
},
{
"epoch": 0.55,
"learning_rate": 4.5209580838323355e-05,
"loss": 2.4896,
"theoretical_loss": 3.3074744266634513,
"tokens_seen": 3161456640
},
{
"epoch": 0.55,
"learning_rate": 4.516680923866553e-05,
"loss": 2.3985,
"theoretical_loss": 3.3074639956745564,
"tokens_seen": 3161587712
},
{
"epoch": 0.55,
"learning_rate": 4.51240376390077e-05,
"loss": 2.5226,
"theoretical_loss": 3.3074535652391757,
"tokens_seen": 3161718784
},
{
"epoch": 0.55,
"learning_rate": 4.5081266039349876e-05,
"loss": 2.3819,
"theoretical_loss": 3.3074431353572566,
"tokens_seen": 3161849856
},
{
"epoch": 0.55,
"learning_rate": 4.503849443969205e-05,
"loss": 2.5918,
"theoretical_loss": 3.307432706028747,
"tokens_seen": 3161980928
},
{
"epoch": 0.55,
"learning_rate": 4.499572284003422e-05,
"loss": 2.5223,
"theoretical_loss": 3.307422277253594,
"tokens_seen": 3162112000
},
{
"epoch": 0.56,
"learning_rate": 4.495295124037639e-05,
"loss": 2.4723,
"theoretical_loss": 3.3074118490317463,
"tokens_seen": 3162243072
},
{
"epoch": 0.56,
"learning_rate": 4.491017964071856e-05,
"loss": 2.4196,
"theoretical_loss": 3.307401421363151,
"tokens_seen": 3162374144
},
{
"epoch": 0.56,
"objective/train/docs_used": 1732556,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.6318604946136475,
"objective/train/theoretical_loss": 3.3073962077363066,
"objective/train/tokens_used": 192490976,
"theoretical_loss": 3.3073962077363066,
"tokens_seen": 3162439680
},
{
"epoch": 0.56,
"learning_rate": 4.486740804106074e-05,
"loss": 2.4152,
"theoretical_loss": 3.307390994247756,
"tokens_seen": 3162505216
},
{
"epoch": 0.56,
"learning_rate": 4.482463644140291e-05,
"loss": 2.4101,
"theoretical_loss": 3.3073805676855086,
"tokens_seen": 3162636288
},
{
"epoch": 0.56,
"learning_rate": 4.4781864841745084e-05,
"loss": 2.5474,
"theoretical_loss": 3.3073701416763575,
"tokens_seen": 3162767360
},
{
"epoch": 0.56,
"learning_rate": 4.4739093242087255e-05,
"loss": 2.5322,
"theoretical_loss": 3.3073597162202493,
"tokens_seen": 3162898432
},
{
"epoch": 0.56,
"learning_rate": 4.469632164242943e-05,
"loss": 2.4196,
"theoretical_loss": 3.3073492913171325,
"tokens_seen": 3163029504
},
{
"epoch": 0.56,
"learning_rate": 4.4653550042771605e-05,
"loss": 2.5852,
"theoretical_loss": 3.307338866966955,
"tokens_seen": 3163160576
},
{
"epoch": 0.56,
"learning_rate": 4.4610778443113777e-05,
"loss": 2.6559,
"theoretical_loss": 3.3073284431696632,
"tokens_seen": 3163291648
},
{
"epoch": 0.56,
"learning_rate": 4.456800684345595e-05,
"loss": 2.5197,
"theoretical_loss": 3.3073180199252064,
"tokens_seen": 3163422720
},
{
"epoch": 0.56,
"learning_rate": 4.452523524379812e-05,
"loss": 2.5187,
"theoretical_loss": 3.3073075972335317,
"tokens_seen": 3163553792
},
{
"epoch": 0.56,
"learning_rate": 4.448246364414029e-05,
"loss": 2.5293,
"theoretical_loss": 3.307297175094587,
"tokens_seen": 3163684864
},
{
"epoch": 0.56,
"learning_rate": 4.443969204448247e-05,
"loss": 2.5596,
"theoretical_loss": 3.3072867535083206,
"tokens_seen": 3163815936
},
{
"epoch": 0.56,
"learning_rate": 4.439692044482464e-05,
"loss": 2.4285,
"theoretical_loss": 3.3072763324746792,
"tokens_seen": 3163947008
},
{
"epoch": 0.56,
"objective/train/docs_used": 1733728,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.5950536727905273,
"objective/train/theoretical_loss": 3.3072659119936114,
"objective/train/tokens_used": 194129376,
"theoretical_loss": 3.3072659119936114,
"tokens_seen": 3164078080
},
{
"epoch": 0.56,
"learning_rate": 4.435414884516681e-05,
"loss": 2.3195,
"theoretical_loss": 3.3072659119936114,
"tokens_seen": 3164078080
},
{
"epoch": 0.56,
"learning_rate": 4.4311377245508984e-05,
"loss": 2.3918,
"theoretical_loss": 3.3072554920650648,
"tokens_seen": 3164209152
},
{
"epoch": 0.56,
"learning_rate": 4.4268605645851155e-05,
"loss": 2.5205,
"theoretical_loss": 3.3072450726889873,
"tokens_seen": 3164340224
},
{
"epoch": 0.56,
"learning_rate": 4.4225834046193334e-05,
"loss": 2.5269,
"theoretical_loss": 3.3072346538653266,
"tokens_seen": 3164471296
},
{
"epoch": 0.56,
"learning_rate": 4.41830624465355e-05,
"loss": 2.3742,
"theoretical_loss": 3.307224235594031,
"tokens_seen": 3164602368
},
{
"epoch": 0.56,
"learning_rate": 4.414029084687768e-05,
"loss": 2.4977,
"theoretical_loss": 3.3072138178750476,
"tokens_seen": 3164733440
},
{
"epoch": 0.56,
"learning_rate": 4.409751924721985e-05,
"loss": 2.365,
"theoretical_loss": 3.3072034007083246,
"tokens_seen": 3164864512
},
{
"epoch": 0.56,
"learning_rate": 4.405474764756202e-05,
"loss": 2.5296,
"theoretical_loss": 3.3071929840938097,
"tokens_seen": 3164995584
},
{
"epoch": 0.56,
"learning_rate": 4.40119760479042e-05,
"loss": 2.4961,
"theoretical_loss": 3.307182568031451,
"tokens_seen": 3165126656
},
{
"epoch": 0.56,
"learning_rate": 4.396920444824636e-05,
"loss": 2.355,
"theoretical_loss": 3.307172152521196,
"tokens_seen": 3165257728
},
{
"epoch": 0.57,
"learning_rate": 4.392643284858854e-05,
"loss": 2.3241,
"theoretical_loss": 3.307161737562993,
"tokens_seen": 3165388800
},
{
"epoch": 0.57,
"learning_rate": 4.388366124893071e-05,
"loss": 2.3619,
"theoretical_loss": 3.30715132315679,
"tokens_seen": 3165519872
},
{
"epoch": 0.57,
"learning_rate": 4.3840889649272884e-05,
"loss": 2.4652,
"theoretical_loss": 3.307140909302534,
"tokens_seen": 3165650944
},
{
"epoch": 0.57,
"objective/train/docs_used": 1734416,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.3513553142547607,
"objective/train/theoretical_loss": 3.3071357025823707,
"objective/train/tokens_used": 195767776,
"theoretical_loss": 3.3071357025823707,
"tokens_seen": 3165716480
},
{
"epoch": 0.57,
"learning_rate": 4.3798118049615056e-05,
"loss": 2.5116,
"theoretical_loss": 3.307130496000174,
"tokens_seen": 3165782016
},
{
"epoch": 0.57,
"learning_rate": 4.375534644995723e-05,
"loss": 2.4344,
"theoretical_loss": 3.3071200832496577,
"tokens_seen": 3165913088
},
{
"epoch": 0.57,
"learning_rate": 4.3712574850299406e-05,
"loss": 2.5286,
"theoretical_loss": 3.3071096710509322,
"tokens_seen": 3166044160
},
{
"epoch": 0.57,
"learning_rate": 4.366980325064158e-05,
"loss": 2.4363,
"theoretical_loss": 3.3070992594039463,
"tokens_seen": 3166175232
},
{
"epoch": 0.57,
"learning_rate": 4.362703165098375e-05,
"loss": 2.4088,
"theoretical_loss": 3.307088848308647,
"tokens_seen": 3166306304
},
{
"epoch": 0.57,
"learning_rate": 4.358426005132592e-05,
"loss": 2.4939,
"theoretical_loss": 3.307078437764983,
"tokens_seen": 3166437376
},
{
"epoch": 0.57,
"learning_rate": 4.354148845166809e-05,
"loss": 2.4801,
"theoretical_loss": 3.3070680277729023,
"tokens_seen": 3166568448
},
{
"epoch": 0.57,
"learning_rate": 4.349871685201027e-05,
"loss": 2.3799,
"theoretical_loss": 3.307057618332353,
"tokens_seen": 3166699520
},
{
"epoch": 0.57,
"learning_rate": 4.345594525235244e-05,
"loss": 2.4907,
"theoretical_loss": 3.3070472094432817,
"tokens_seen": 3166830592
},
{
"epoch": 0.57,
"learning_rate": 4.341317365269461e-05,
"loss": 2.4501,
"theoretical_loss": 3.307036801105638,
"tokens_seen": 3166961664
},
{
"epoch": 0.57,
"learning_rate": 4.3370402053036785e-05,
"loss": 2.3822,
"theoretical_loss": 3.3070263933193687,
"tokens_seen": 3167092736
},
{
"epoch": 0.57,
"learning_rate": 4.3327630453378956e-05,
"loss": 2.6524,
"theoretical_loss": 3.307015986084422,
"tokens_seen": 3167223808
},
{
"epoch": 0.57,
"objective/train/docs_used": 1735648,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.1001577377319336,
"objective/train/theoretical_loss": 3.3070055794007467,
"objective/train/tokens_used": 197406176,
"theoretical_loss": 3.3070055794007467,
"tokens_seen": 3167354880
},
{
"epoch": 0.57,
"learning_rate": 4.3284858853721134e-05,
"loss": 2.3798,
"theoretical_loss": 3.3070055794007467,
"tokens_seen": 3167354880
},
{
"epoch": 0.57,
"learning_rate": 4.32420872540633e-05,
"loss": 2.5349,
"theoretical_loss": 3.3069951732682896,
"tokens_seen": 3167485952
},
{
"epoch": 0.57,
"learning_rate": 4.319931565440548e-05,
"loss": 2.5036,
"theoretical_loss": 3.3069847676869997,
"tokens_seen": 3167617024
},
{
"epoch": 0.57,
"learning_rate": 4.315654405474765e-05,
"loss": 2.5548,
"theoretical_loss": 3.306974362656825,
"tokens_seen": 3167748096
},
{
"epoch": 0.57,
"learning_rate": 4.311377245508982e-05,
"loss": 2.4116,
"theoretical_loss": 3.3069639581777124,
"tokens_seen": 3167879168
},
{
"epoch": 0.57,
"learning_rate": 4.3071000855432e-05,
"loss": 2.551,
"theoretical_loss": 3.3069535542496107,
"tokens_seen": 3168010240
},
{
"epoch": 0.57,
"learning_rate": 4.3028229255774163e-05,
"loss": 2.6564,
"theoretical_loss": 3.3069431508724683,
"tokens_seen": 3168141312
},
{
"epoch": 0.57,
"learning_rate": 4.298545765611634e-05,
"loss": 2.3732,
"theoretical_loss": 3.3069327480462323,
"tokens_seen": 3168272384
},
{
"epoch": 0.57,
"learning_rate": 4.294268605645851e-05,
"loss": 2.4254,
"theoretical_loss": 3.3069223457708516,
"tokens_seen": 3168403456
},
{
"epoch": 0.58,
"learning_rate": 4.289991445680069e-05,
"loss": 2.4169,
"theoretical_loss": 3.3069119440462735,
"tokens_seen": 3168534528
},
{
"epoch": 0.58,
"learning_rate": 4.2857142857142856e-05,
"loss": 2.4854,
"theoretical_loss": 3.306901542872447,
"tokens_seen": 3168665600
},
{
"epoch": 0.58,
"learning_rate": 4.281437125748503e-05,
"loss": 2.4044,
"theoretical_loss": 3.306891142249319,
"tokens_seen": 3168796672
},
{
"epoch": 0.58,
"learning_rate": 4.2771599657827206e-05,
"loss": 2.4397,
"theoretical_loss": 3.3068807421768383,
"tokens_seen": 3168927744
},
{
"epoch": 0.58,
"objective/train/docs_used": 1736895,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.7372190952301025,
"objective/train/theoretical_loss": 3.306875542347074,
"objective/train/tokens_used": 199044576,
"theoretical_loss": 3.306875542347074,
"tokens_seen": 3168993280
},
{
"epoch": 0.58,
"learning_rate": 4.272882805816938e-05,
"loss": 2.5245,
"theoretical_loss": 3.306870342654953,
"tokens_seen": 3169058816
},
{
"epoch": 0.58,
"learning_rate": 4.2686056458511556e-05,
"loss": 2.5318,
"theoretical_loss": 3.3068599436836106,
"tokens_seen": 3169189888
},
{
"epoch": 0.58,
"learning_rate": 4.264328485885372e-05,
"loss": 2.4989,
"theoretical_loss": 3.30684954526276,
"tokens_seen": 3169320960
},
{
"epoch": 0.58,
"learning_rate": 4.260051325919589e-05,
"loss": 2.4893,
"theoretical_loss": 3.306839147392348,
"tokens_seen": 3169452032
},
{
"epoch": 0.58,
"learning_rate": 4.255774165953807e-05,
"loss": 2.4955,
"theoretical_loss": 3.3068287500723246,
"tokens_seen": 3169583104
},
{
"epoch": 0.58,
"learning_rate": 4.251497005988024e-05,
"loss": 2.509,
"theoretical_loss": 3.3068183533026367,
"tokens_seen": 3169714176
},
{
"epoch": 0.58,
"learning_rate": 4.2472198460222414e-05,
"loss": 2.498,
"theoretical_loss": 3.3068079570832323,
"tokens_seen": 3169845248
},
{
"epoch": 0.58,
"learning_rate": 4.2429426860564585e-05,
"loss": 2.4562,
"theoretical_loss": 3.30679756141406,
"tokens_seen": 3169976320
},
{
"epoch": 0.58,
"learning_rate": 4.238665526090676e-05,
"loss": 2.4116,
"theoretical_loss": 3.3067871662950674,
"tokens_seen": 3170107392
},
{
"epoch": 0.58,
"learning_rate": 4.2343883661248935e-05,
"loss": 2.4851,
"theoretical_loss": 3.3067767717262035,
"tokens_seen": 3170238464
},
{
"epoch": 0.58,
"learning_rate": 4.2301112061591106e-05,
"loss": 2.535,
"theoretical_loss": 3.3067663777074157,
"tokens_seen": 3170369536
},
{
"epoch": 0.58,
"learning_rate": 4.225834046193328e-05,
"loss": 2.5879,
"theoretical_loss": 3.3067559842386522,
"tokens_seen": 3170500608
},
{
"epoch": 0.58,
"objective/train/docs_used": 1737490,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.7611770629882812,
"objective/train/theoretical_loss": 3.306745591319862,
"objective/train/tokens_used": 200682976,
"theoretical_loss": 3.306745591319862,
"tokens_seen": 3170631680
},
{
"epoch": 0.58,
"learning_rate": 4.221556886227545e-05,
"loss": 2.5032,
"theoretical_loss": 3.306745591319862,
"tokens_seen": 3170631680
},
{
"epoch": 0.58,
"learning_rate": 4.217279726261763e-05,
"loss": 2.5335,
"theoretical_loss": 3.3067351989509914,
"tokens_seen": 3170762752
},
{
"epoch": 0.58,
"learning_rate": 4.21300256629598e-05,
"loss": 2.4593,
"theoretical_loss": 3.3067248071319906,
"tokens_seen": 3170893824
},
{
"epoch": 0.58,
"learning_rate": 4.208725406330197e-05,
"loss": 2.4957,
"theoretical_loss": 3.306714415862807,
"tokens_seen": 3171024896
},
{
"epoch": 0.58,
"learning_rate": 4.204448246364414e-05,
"loss": 2.5613,
"theoretical_loss": 3.3067040251433886,
"tokens_seen": 3171155968
},
{
"epoch": 0.58,
"learning_rate": 4.2001710863986314e-05,
"loss": 2.4987,
"theoretical_loss": 3.3066936349736835,
"tokens_seen": 3171287040
},
{
"epoch": 0.58,
"learning_rate": 4.195893926432849e-05,
"loss": 2.3298,
"theoretical_loss": 3.3066832453536406,
"tokens_seen": 3171418112
},
{
"epoch": 0.59,
"learning_rate": 4.191616766467066e-05,
"loss": 2.3801,
"theoretical_loss": 3.306672856283207,
"tokens_seen": 3171549184
},
{
"epoch": 0.59,
"learning_rate": 4.1873396065012835e-05,
"loss": 2.5703,
"theoretical_loss": 3.306662467762332,
"tokens_seen": 3171680256
},
{
"epoch": 0.59,
"learning_rate": 4.183062446535501e-05,
"loss": 2.3197,
"theoretical_loss": 3.3066520797909633,
"tokens_seen": 3171811328
},
{
"epoch": 0.59,
"learning_rate": 4.178785286569718e-05,
"loss": 2.48,
"theoretical_loss": 3.306641692369049,
"tokens_seen": 3171942400
},
{
"epoch": 0.59,
"learning_rate": 4.1745081266039356e-05,
"loss": 2.4383,
"theoretical_loss": 3.3066313054965377,
"tokens_seen": 3172073472
},
{
"epoch": 0.59,
"learning_rate": 4.170230966638152e-05,
"loss": 2.4996,
"theoretical_loss": 3.3066209191733775,
"tokens_seen": 3172204544
},
{
"epoch": 0.59,
"objective/train/docs_used": 1738634,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.6035115718841553,
"objective/train/theoretical_loss": 3.306615726217788,
"objective/train/tokens_used": 202321376,
"theoretical_loss": 3.306615726217788,
"tokens_seen": 3172270080
},
{
"epoch": 0.59,
"learning_rate": 4.16595380667237e-05,
"loss": 2.4728,
"theoretical_loss": 3.3066105333995166,
"tokens_seen": 3172335616
},
{
"epoch": 0.59,
"learning_rate": 4.161676646706587e-05,
"loss": 2.4305,
"theoretical_loss": 3.306600148174903,
"tokens_seen": 3172466688
},
{
"epoch": 0.59,
"learning_rate": 4.157399486740804e-05,
"loss": 2.4707,
"theoretical_loss": 3.3065897634994856,
"tokens_seen": 3172597760
},
{
"epoch": 0.59,
"learning_rate": 4.1531223267750214e-05,
"loss": 2.4014,
"theoretical_loss": 3.306579379373212,
"tokens_seen": 3172728832
},
{
"epoch": 0.59,
"learning_rate": 4.1488451668092386e-05,
"loss": 2.5214,
"theoretical_loss": 3.306568995796031,
"tokens_seen": 3172859904
},
{
"epoch": 0.59,
"learning_rate": 4.1445680068434564e-05,
"loss": 2.5563,
"theoretical_loss": 3.3065586127678905,
"tokens_seen": 3172990976
},
{
"epoch": 0.59,
"learning_rate": 4.1402908468776735e-05,
"loss": 2.4841,
"theoretical_loss": 3.3065482302887395,
"tokens_seen": 3173122048
},
{
"epoch": 0.59,
"learning_rate": 4.136013686911891e-05,
"loss": 2.4925,
"theoretical_loss": 3.3065378483585253,
"tokens_seen": 3173253120
},
{
"epoch": 0.59,
"learning_rate": 4.131736526946108e-05,
"loss": 2.5094,
"theoretical_loss": 3.3065274669771965,
"tokens_seen": 3173384192
},
{
"epoch": 0.59,
"learning_rate": 4.127459366980325e-05,
"loss": 2.4376,
"theoretical_loss": 3.3065170861447015,
"tokens_seen": 3173515264
},
{
"epoch": 0.59,
"learning_rate": 4.123182207014543e-05,
"loss": 2.4933,
"theoretical_loss": 3.306506705860989,
"tokens_seen": 3173646336
},
{
"epoch": 0.59,
"learning_rate": 4.11890504704876e-05,
"loss": 2.4276,
"theoretical_loss": 3.306496326126007,
"tokens_seen": 3173777408
},
{
"epoch": 0.59,
"objective/train/docs_used": 1739179,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.3437700271606445,
"objective/train/theoretical_loss": 3.306485946939704,
"objective/train/tokens_used": 203959776,
"theoretical_loss": 3.306485946939704,
"tokens_seen": 3173908480
},
{
"epoch": 0.59,
"learning_rate": 4.114627887082977e-05,
"loss": 2.419,
"theoretical_loss": 3.306485946939704,
"tokens_seen": 3173908480
},
{
"epoch": 0.59,
"learning_rate": 4.110350727117194e-05,
"loss": 2.4699,
"theoretical_loss": 3.306475568302028,
"tokens_seen": 3174039552
},
{
"epoch": 0.59,
"learning_rate": 4.1060735671514114e-05,
"loss": 2.5379,
"theoretical_loss": 3.3064651902129274,
"tokens_seen": 3174170624
},
{
"epoch": 0.59,
"learning_rate": 4.101796407185629e-05,
"loss": 2.6357,
"theoretical_loss": 3.3064548126723508,
"tokens_seen": 3174301696
},
{
"epoch": 0.59,
"learning_rate": 4.0975192472198464e-05,
"loss": 2.5486,
"theoretical_loss": 3.3064444356802465,
"tokens_seen": 3174432768
},
{
"epoch": 0.59,
"learning_rate": 4.0932420872540636e-05,
"loss": 2.4297,
"theoretical_loss": 3.3064340592365626,
"tokens_seen": 3174563840
},
{
"epoch": 0.6,
"learning_rate": 4.088964927288281e-05,
"loss": 2.3582,
"theoretical_loss": 3.306423683341248,
"tokens_seen": 3174694912
},
{
"epoch": 0.6,
"learning_rate": 4.084687767322498e-05,
"loss": 2.3793,
"theoretical_loss": 3.3064133079942506,
"tokens_seen": 3174825984
},
{
"epoch": 0.6,
"learning_rate": 4.080410607356716e-05,
"loss": 2.3722,
"theoretical_loss": 3.3064029331955194,
"tokens_seen": 3174957056
},
{
"epoch": 0.6,
"learning_rate": 4.076133447390932e-05,
"loss": 2.4821,
"theoretical_loss": 3.3063925589450016,
"tokens_seen": 3175088128
},
{
"epoch": 0.6,
"learning_rate": 4.07185628742515e-05,
"loss": 2.4909,
"theoretical_loss": 3.3063821852426467,
"tokens_seen": 3175219200
},
{
"epoch": 0.6,
"learning_rate": 4.067579127459367e-05,
"loss": 2.445,
"theoretical_loss": 3.306371812088403,
"tokens_seen": 3175350272
},
{
"epoch": 0.6,
"learning_rate": 4.063301967493584e-05,
"loss": 2.4686,
"theoretical_loss": 3.3063614394822185,
"tokens_seen": 3175481344
},
{
"epoch": 0.6,
"objective/train/docs_used": 1739867,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.4862287044525146,
"objective/train/theoretical_loss": 3.3063562533846325,
"objective/train/tokens_used": 205598176,
"theoretical_loss": 3.3063562533846325,
"tokens_seen": 3175546880
},
{
"epoch": 0.6,
"learning_rate": 4.059024807527802e-05,
"loss": 2.5223,
"theoretical_loss": 3.3063510674240417,
"tokens_seen": 3175612416
},
{
"epoch": 0.6,
"learning_rate": 4.0547476475620186e-05,
"loss": 2.495,
"theoretical_loss": 3.3063406959138213,
"tokens_seen": 3175743488
},
{
"epoch": 0.6,
"learning_rate": 4.0504704875962364e-05,
"loss": 2.3905,
"theoretical_loss": 3.3063303249515057,
"tokens_seen": 3175874560
},
{
"epoch": 0.6,
"learning_rate": 4.0461933276304536e-05,
"loss": 2.6257,
"theoretical_loss": 3.3063199545370425,
"tokens_seen": 3176005632
},
{
"epoch": 0.6,
"learning_rate": 4.041916167664671e-05,
"loss": 2.4279,
"theoretical_loss": 3.3063095846703816,
"tokens_seen": 3176136704
},
{
"epoch": 0.6,
"learning_rate": 4.037639007698888e-05,
"loss": 2.4832,
"theoretical_loss": 3.3062992153514705,
"tokens_seen": 3176267776
},
{
"epoch": 0.6,
"learning_rate": 4.033361847733105e-05,
"loss": 2.5037,
"theoretical_loss": 3.306288846580258,
"tokens_seen": 3176398848
},
{
"epoch": 0.6,
"learning_rate": 4.029084687767323e-05,
"loss": 2.5226,
"theoretical_loss": 3.3062784783566923,
"tokens_seen": 3176529920
},
{
"epoch": 0.6,
"learning_rate": 4.02480752780154e-05,
"loss": 2.4342,
"theoretical_loss": 3.306268110680722,
"tokens_seen": 3176660992
},
{
"epoch": 0.6,
"learning_rate": 4.020530367835757e-05,
"loss": 2.4567,
"theoretical_loss": 3.306257743552296,
"tokens_seen": 3176792064
},
{
"epoch": 0.6,
"learning_rate": 4.016253207869974e-05,
"loss": 2.4013,
"theoretical_loss": 3.306247376971362,
"tokens_seen": 3176923136
},
{
"epoch": 0.6,
"learning_rate": 4.0119760479041915e-05,
"loss": 2.5258,
"theoretical_loss": 3.3062370109378696,
"tokens_seen": 3177054208
},
{
"epoch": 0.6,
"objective/train/docs_used": 1740434,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 1.7373958826065063,
"objective/train/theoretical_loss": 3.306226645451766,
"objective/train/tokens_used": 207236576,
"theoretical_loss": 3.306226645451766,
"tokens_seen": 3177185280
},
{
"epoch": 0.6,
"learning_rate": 4.007698887938409e-05,
"loss": 2.3115,
"theoretical_loss": 3.306226645451766,
"tokens_seen": 3177185280
},
{
"epoch": 0.6,
"learning_rate": 4.0034217279726265e-05,
"loss": 2.4377,
"theoretical_loss": 3.3062162805130004,
"tokens_seen": 3177316352
},
{
"epoch": 0.6,
"learning_rate": 3.9991445680068436e-05,
"loss": 2.3801,
"theoretical_loss": 3.3062059161215216,
"tokens_seen": 3177447424
},
{
"epoch": 0.6,
"learning_rate": 3.994867408041061e-05,
"loss": 2.5384,
"theoretical_loss": 3.3061955522772775,
"tokens_seen": 3177578496
},
{
"epoch": 0.6,
"learning_rate": 3.990590248075278e-05,
"loss": 2.4873,
"theoretical_loss": 3.306185188980217,
"tokens_seen": 3177709568
},
{
"epoch": 0.61,
"learning_rate": 3.986313088109496e-05,
"loss": 2.3113,
"theoretical_loss": 3.3061748262302886,
"tokens_seen": 3177840640
},
{
"epoch": 0.61,
"learning_rate": 3.982035928143712e-05,
"loss": 2.5892,
"theoretical_loss": 3.306164464027441,
"tokens_seen": 3177971712
},
{
"epoch": 0.61,
"learning_rate": 3.97775876817793e-05,
"loss": 2.5497,
"theoretical_loss": 3.3061541023716225,
"tokens_seen": 3178102784
},
{
"epoch": 0.61,
"learning_rate": 3.973481608212147e-05,
"loss": 2.3053,
"theoretical_loss": 3.3061437412627814,
"tokens_seen": 3178233856
},
{
"epoch": 0.61,
"learning_rate": 3.969204448246365e-05,
"loss": 2.5014,
"theoretical_loss": 3.306133380700867,
"tokens_seen": 3178364928
},
{
"epoch": 0.61,
"learning_rate": 3.964927288280582e-05,
"loss": 2.671,
"theoretical_loss": 3.306123020685827,
"tokens_seen": 3178496000
},
{
"epoch": 0.61,
"learning_rate": 3.960650128314799e-05,
"loss": 2.5158,
"theoretical_loss": 3.306112661217611,
"tokens_seen": 3178627072
},
{
"epoch": 0.61,
"learning_rate": 3.9563729683490165e-05,
"loss": 2.4479,
"theoretical_loss": 3.306102302296167,
"tokens_seen": 3178758144
},
{
"epoch": 0.61,
"objective/train/docs_used": 1741553,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.4893863201141357,
"objective/train/theoretical_loss": 3.3060971230404683,
"objective/train/tokens_used": 208874976,
"theoretical_loss": 3.3060971230404683,
"tokens_seen": 3178823680
},
{
"epoch": 0.61,
"learning_rate": 3.9520958083832336e-05,
"loss": 2.5002,
"theoretical_loss": 3.3060919439214436,
"tokens_seen": 3178889216
},
{
"epoch": 0.61,
"learning_rate": 3.9478186484174515e-05,
"loss": 2.4046,
"theoretical_loss": 3.3060815860933896,
"tokens_seen": 3179020288
},
{
"epoch": 0.61,
"learning_rate": 3.943541488451668e-05,
"loss": 2.458,
"theoretical_loss": 3.306071228811953,
"tokens_seen": 3179151360
},
{
"epoch": 0.61,
"learning_rate": 3.939264328485886e-05,
"loss": 2.4769,
"theoretical_loss": 3.3060608720770834,
"tokens_seen": 3179282432
},
{
"epoch": 0.61,
"learning_rate": 3.934987168520103e-05,
"loss": 2.4564,
"theoretical_loss": 3.306050515888729,
"tokens_seen": 3179413504
},
{
"epoch": 0.61,
"learning_rate": 3.93071000855432e-05,
"loss": 2.4812,
"theoretical_loss": 3.306040160246838,
"tokens_seen": 3179544576
},
{
"epoch": 0.61,
"learning_rate": 3.926432848588538e-05,
"loss": 2.5644,
"theoretical_loss": 3.3060298051513595,
"tokens_seen": 3179675648
},
{
"epoch": 0.61,
"learning_rate": 3.9221556886227544e-05,
"loss": 2.6354,
"theoretical_loss": 3.306019450602242,
"tokens_seen": 3179806720
},
{
"epoch": 0.61,
"learning_rate": 3.917878528656972e-05,
"loss": 2.4243,
"theoretical_loss": 3.306009096599434,
"tokens_seen": 3179937792
},
{
"epoch": 0.61,
"learning_rate": 3.9136013686911894e-05,
"loss": 2.5855,
"theoretical_loss": 3.3059987431428848,
"tokens_seen": 3180068864
},
{
"epoch": 0.61,
"learning_rate": 3.9093242087254065e-05,
"loss": 2.4582,
"theoretical_loss": 3.3059883902325424,
"tokens_seen": 3180199936
},
{
"epoch": 0.61,
"learning_rate": 3.905047048759624e-05,
"loss": 2.4974,
"theoretical_loss": 3.3059780378683556,
"tokens_seen": 3180331008
},
{
"epoch": 0.61,
"objective/train/docs_used": 1742088,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.383901834487915,
"objective/train/theoretical_loss": 3.3059676860502734,
"objective/train/tokens_used": 210513376,
"theoretical_loss": 3.3059676860502734,
"tokens_seen": 3180462080
},
{
"epoch": 0.61,
"learning_rate": 3.900769888793841e-05,
"loss": 2.5039,
"theoretical_loss": 3.3059676860502734,
"tokens_seen": 3180462080
},
{
"epoch": 0.61,
"learning_rate": 3.8964927288280587e-05,
"loss": 2.5339,
"theoretical_loss": 3.305957334778244,
"tokens_seen": 3180593152
},
{
"epoch": 0.61,
"learning_rate": 3.892215568862276e-05,
"loss": 2.4543,
"theoretical_loss": 3.3059469840522167,
"tokens_seen": 3180724224
},
{
"epoch": 0.62,
"learning_rate": 3.887938408896493e-05,
"loss": 2.5014,
"theoretical_loss": 3.3059366338721397,
"tokens_seen": 3180855296
},
{
"epoch": 0.62,
"learning_rate": 3.88366124893071e-05,
"loss": 2.4249,
"theoretical_loss": 3.305926284237962,
"tokens_seen": 3180986368
},
{
"epoch": 0.62,
"learning_rate": 3.879384088964927e-05,
"loss": 2.4925,
"theoretical_loss": 3.305915935149632,
"tokens_seen": 3181117440
},
{
"epoch": 0.62,
"learning_rate": 3.875106928999145e-05,
"loss": 2.4658,
"theoretical_loss": 3.3059055866070985,
"tokens_seen": 3181248512
},
{
"epoch": 0.62,
"learning_rate": 3.870829769033362e-05,
"loss": 2.4208,
"theoretical_loss": 3.3058952386103106,
"tokens_seen": 3181379584
},
{
"epoch": 0.62,
"learning_rate": 3.8665526090675794e-05,
"loss": 2.4329,
"theoretical_loss": 3.305884891159217,
"tokens_seen": 3181510656
},
{
"epoch": 0.62,
"learning_rate": 3.8622754491017966e-05,
"loss": 2.343,
"theoretical_loss": 3.305874544253766,
"tokens_seen": 3181641728
},
{
"epoch": 0.62,
"learning_rate": 3.857998289136014e-05,
"loss": 2.558,
"theoretical_loss": 3.3058641978939063,
"tokens_seen": 3181772800
},
{
"epoch": 0.62,
"learning_rate": 3.8537211291702315e-05,
"loss": 2.5192,
"theoretical_loss": 3.305853852079587,
"tokens_seen": 3181903872
},
{
"epoch": 0.62,
"learning_rate": 3.849443969204448e-05,
"loss": 2.636,
"theoretical_loss": 3.305843506810757,
"tokens_seen": 3182034944
},
{
"epoch": 0.62,
"objective/train/docs_used": 1743107,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.428229570388794,
"objective/train/theoretical_loss": 3.3058383343808844,
"objective/train/tokens_used": 212151776,
"theoretical_loss": 3.3058383343808844,
"tokens_seen": 3182100480
},
{
"epoch": 0.62,
"learning_rate": 3.845166809238666e-05,
"loss": 2.5503,
"theoretical_loss": 3.3058331620873647,
"tokens_seen": 3182166016
},
{
"epoch": 0.62,
"learning_rate": 3.840889649272883e-05,
"loss": 2.5304,
"theoretical_loss": 3.305822817909359,
"tokens_seen": 3182297088
},
{
"epoch": 0.62,
"learning_rate": 3.8366124893071e-05,
"loss": 2.4096,
"theoretical_loss": 3.305812474276689,
"tokens_seen": 3182428160
},
{
"epoch": 0.62,
"learning_rate": 3.832335329341318e-05,
"loss": 2.5894,
"theoretical_loss": 3.305802131189303,
"tokens_seen": 3182559232
},
{
"epoch": 0.62,
"learning_rate": 3.8280581693755344e-05,
"loss": 2.4894,
"theoretical_loss": 3.30579178864715,
"tokens_seen": 3182690304
},
{
"epoch": 0.62,
"learning_rate": 3.823781009409752e-05,
"loss": 2.5676,
"theoretical_loss": 3.3057814466501787,
"tokens_seen": 3182821376
},
{
"epoch": 0.62,
"learning_rate": 3.8195038494439694e-05,
"loss": 2.5241,
"theoretical_loss": 3.3057711051983385,
"tokens_seen": 3182952448
},
{
"epoch": 0.62,
"learning_rate": 3.8152266894781866e-05,
"loss": 2.5246,
"theoretical_loss": 3.3057607642915774,
"tokens_seen": 3183083520
},
{
"epoch": 0.62,
"learning_rate": 3.810949529512404e-05,
"loss": 2.3917,
"theoretical_loss": 3.3057504239298448,
"tokens_seen": 3183214592
},
{
"epoch": 0.62,
"learning_rate": 3.806672369546621e-05,
"loss": 2.4044,
"theoretical_loss": 3.305740084113089,
"tokens_seen": 3183345664
},
{
"epoch": 0.62,
"learning_rate": 3.802395209580839e-05,
"loss": 2.3885,
"theoretical_loss": 3.3057297448412593,
"tokens_seen": 3183476736
},
{
"epoch": 0.62,
"learning_rate": 3.798118049615056e-05,
"loss": 2.7271,
"theoretical_loss": 3.3057194061143047,
"tokens_seen": 3183607808
},
{
"epoch": 0.62,
"objective/train/docs_used": 1744349,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.4869487285614014,
"objective/train/theoretical_loss": 3.305709067932173,
"objective/train/tokens_used": 213790176,
"theoretical_loss": 3.305709067932173,
"tokens_seen": 3183738880
},
{
"epoch": 0.62,
"learning_rate": 3.793840889649273e-05,
"loss": 2.487,
"theoretical_loss": 3.305709067932173,
"tokens_seen": 3183738880
},
{
"epoch": 0.62,
"learning_rate": 3.78956372968349e-05,
"loss": 2.4626,
"theoretical_loss": 3.3056987302948144,
"tokens_seen": 3183869952
},
{
"epoch": 0.63,
"learning_rate": 3.785286569717707e-05,
"loss": 2.5541,
"theoretical_loss": 3.3056883932021774,
"tokens_seen": 3184001024
},
{
"epoch": 0.63,
"learning_rate": 3.781009409751925e-05,
"loss": 2.4975,
"theoretical_loss": 3.30567805665421,
"tokens_seen": 3184132096
},
{
"epoch": 0.63,
"learning_rate": 3.776732249786142e-05,
"loss": 2.5569,
"theoretical_loss": 3.3056677206508622,
"tokens_seen": 3184263168
},
{
"epoch": 0.63,
"learning_rate": 3.7724550898203595e-05,
"loss": 2.5492,
"theoretical_loss": 3.3056573851920827,
"tokens_seen": 3184394240
},
{
"epoch": 0.63,
"learning_rate": 3.7681779298545766e-05,
"loss": 2.65,
"theoretical_loss": 3.3056470502778197,
"tokens_seen": 3184525312
},
{
"epoch": 0.63,
"learning_rate": 3.763900769888794e-05,
"loss": 2.4132,
"theoretical_loss": 3.3056367159080224,
"tokens_seen": 3184656384
},
{
"epoch": 0.63,
"learning_rate": 3.7596236099230116e-05,
"loss": 2.5916,
"theoretical_loss": 3.3056263820826404,
"tokens_seen": 3184787456
},
{
"epoch": 0.63,
"learning_rate": 3.755346449957229e-05,
"loss": 2.5077,
"theoretical_loss": 3.3056160488016215,
"tokens_seen": 3184918528
},
{
"epoch": 0.63,
"learning_rate": 3.751069289991446e-05,
"loss": 2.485,
"theoretical_loss": 3.3056057160649153,
"tokens_seen": 3185049600
},
{
"epoch": 0.63,
"learning_rate": 3.746792130025663e-05,
"loss": 2.3922,
"theoretical_loss": 3.3055953838724705,
"tokens_seen": 3185180672
},
{
"epoch": 0.63,
"learning_rate": 3.74251497005988e-05,
"loss": 2.4665,
"theoretical_loss": 3.3055850522242363,
"tokens_seen": 3185311744
},
{
"epoch": 0.63,
"objective/train/docs_used": 1745003,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.075413227081299,
"objective/train/theoretical_loss": 3.3055798866041823,
"objective/train/tokens_used": 215428576,
"theoretical_loss": 3.3055798866041823,
"tokens_seen": 3185377280
},
{
"epoch": 0.63,
"learning_rate": 3.738237810094098e-05,
"loss": 2.3933,
"theoretical_loss": 3.305574721120162,
"tokens_seen": 3185442816
},
{
"epoch": 0.63,
"learning_rate": 3.7339606501283145e-05,
"loss": 2.457,
"theoretical_loss": 3.3055643905601952,
"tokens_seen": 3185573888
},
{
"epoch": 0.63,
"learning_rate": 3.729683490162532e-05,
"loss": 2.4476,
"theoretical_loss": 3.305554060544286,
"tokens_seen": 3185704960
},
{
"epoch": 0.63,
"learning_rate": 3.7254063301967495e-05,
"loss": 2.3651,
"theoretical_loss": 3.305543731072383,
"tokens_seen": 3185836032
},
{
"epoch": 0.63,
"learning_rate": 3.7211291702309666e-05,
"loss": 2.642,
"theoretical_loss": 3.3055334021444356,
"tokens_seen": 3185967104
},
{
"epoch": 0.63,
"learning_rate": 3.7168520102651845e-05,
"loss": 2.5106,
"theoretical_loss": 3.305523073760392,
"tokens_seen": 3186098176
},
{
"epoch": 0.63,
"learning_rate": 3.712574850299401e-05,
"loss": 2.4407,
"theoretical_loss": 3.305512745920202,
"tokens_seen": 3186229248
},
{
"epoch": 0.63,
"learning_rate": 3.708297690333619e-05,
"loss": 2.4805,
"theoretical_loss": 3.305502418623814,
"tokens_seen": 3186360320
},
{
"epoch": 0.63,
"learning_rate": 3.704020530367836e-05,
"loss": 2.5066,
"theoretical_loss": 3.305492091871177,
"tokens_seen": 3186491392
},
{
"epoch": 0.63,
"learning_rate": 3.699743370402054e-05,
"loss": 2.4317,
"theoretical_loss": 3.3054817656622406,
"tokens_seen": 3186622464
},
{
"epoch": 0.63,
"learning_rate": 3.69546621043627e-05,
"loss": 2.5524,
"theoretical_loss": 3.305471439996953,
"tokens_seen": 3186753536
},
{
"epoch": 0.63,
"learning_rate": 3.6911890504704874e-05,
"loss": 2.3725,
"theoretical_loss": 3.3054611148752637,
"tokens_seen": 3186884608
},
{
"epoch": 0.63,
"objective/train/docs_used": 1746088,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.514112710952759,
"objective/train/theoretical_loss": 3.3054507902971215,
"objective/train/tokens_used": 217066976,
"theoretical_loss": 3.3054507902971215,
"tokens_seen": 3187015680
},
{
"epoch": 0.64,
"learning_rate": 3.686911890504705e-05,
"loss": 2.5984,
"theoretical_loss": 3.3054507902971215,
"tokens_seen": 3187015680
},
{
"epoch": 0.64,
"learning_rate": 3.6826347305389224e-05,
"loss": 2.4928,
"theoretical_loss": 3.305440466262476,
"tokens_seen": 3187146752
},
{
"epoch": 0.64,
"learning_rate": 3.6783575705731395e-05,
"loss": 2.4952,
"theoretical_loss": 3.3054301427712756,
"tokens_seen": 3187277824
},
{
"epoch": 0.64,
"learning_rate": 3.6740804106073567e-05,
"loss": 2.5138,
"theoretical_loss": 3.3054198198234697,
"tokens_seen": 3187408896
},
{
"epoch": 0.64,
"learning_rate": 3.6698032506415745e-05,
"loss": 2.4385,
"theoretical_loss": 3.305409497419007,
"tokens_seen": 3187539968
},
{
"epoch": 0.64,
"learning_rate": 3.6655260906757916e-05,
"loss": 2.4016,
"theoretical_loss": 3.305399175557837,
"tokens_seen": 3187671040
},
{
"epoch": 0.64,
"learning_rate": 3.661248930710009e-05,
"loss": 2.6873,
"theoretical_loss": 3.3053888542399084,
"tokens_seen": 3187802112
},
{
"epoch": 0.64,
"learning_rate": 3.656971770744226e-05,
"loss": 2.4365,
"theoretical_loss": 3.3053785334651704,
"tokens_seen": 3187933184
},
{
"epoch": 0.64,
"learning_rate": 3.652694610778443e-05,
"loss": 2.5373,
"theoretical_loss": 3.305368213233572,
"tokens_seen": 3188064256
},
{
"epoch": 0.64,
"learning_rate": 3.648417450812661e-05,
"loss": 2.4138,
"theoretical_loss": 3.3053578935450627,
"tokens_seen": 3188195328
},
{
"epoch": 0.64,
"learning_rate": 3.644140290846878e-05,
"loss": 2.4721,
"theoretical_loss": 3.3053475743995913,
"tokens_seen": 3188326400
},
{
"epoch": 0.64,
"learning_rate": 3.6398631308810946e-05,
"loss": 2.4902,
"theoretical_loss": 3.305337255797107,
"tokens_seen": 3188457472
},
{
"epoch": 0.64,
"learning_rate": 3.6355859709153124e-05,
"loss": 2.5323,
"theoretical_loss": 3.3053269377375587,
"tokens_seen": 3188588544
},
{
"epoch": 0.64,
"objective/train/docs_used": 1746515,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.3587210178375244,
"objective/train/theoretical_loss": 3.3053217789113694,
"objective/train/tokens_used": 218705376,
"theoretical_loss": 3.3053217789113694,
"tokens_seen": 3188654080
},
{
"epoch": 0.64,
"learning_rate": 3.6313088109495295e-05,
"loss": 2.5451,
"theoretical_loss": 3.3053166202208955,
"tokens_seen": 3188719616
},
{
"epoch": 0.64,
"learning_rate": 3.6270316509837474e-05,
"loss": 2.6429,
"theoretical_loss": 3.3053063032470664,
"tokens_seen": 3188850688
},
{
"epoch": 0.64,
"learning_rate": 3.6227544910179645e-05,
"loss": 2.7202,
"theoretical_loss": 3.305295986816021,
"tokens_seen": 3188981760
},
{
"epoch": 0.64,
"learning_rate": 3.618477331052182e-05,
"loss": 2.4629,
"theoretical_loss": 3.3052856709277085,
"tokens_seen": 3189112832
},
{
"epoch": 0.64,
"learning_rate": 3.614200171086399e-05,
"loss": 2.46,
"theoretical_loss": 3.3052753555820775,
"tokens_seen": 3189243904
},
{
"epoch": 0.64,
"learning_rate": 3.609923011120616e-05,
"loss": 2.5008,
"theoretical_loss": 3.3052650407790773,
"tokens_seen": 3189374976
},
{
"epoch": 0.64,
"learning_rate": 3.605645851154834e-05,
"loss": 2.3809,
"theoretical_loss": 3.3052547265186574,
"tokens_seen": 3189506048
},
{
"epoch": 0.64,
"learning_rate": 3.60136869118905e-05,
"loss": 2.5,
"theoretical_loss": 3.3052444128007665,
"tokens_seen": 3189637120
},
{
"epoch": 0.64,
"learning_rate": 3.597091531223268e-05,
"loss": 2.5178,
"theoretical_loss": 3.305234099625354,
"tokens_seen": 3189768192
},
{
"epoch": 0.64,
"learning_rate": 3.592814371257485e-05,
"loss": 2.4331,
"theoretical_loss": 3.3052237869923693,
"tokens_seen": 3189899264
},
{
"epoch": 0.64,
"learning_rate": 3.5885372112917024e-05,
"loss": 2.5657,
"theoretical_loss": 3.305213474901761,
"tokens_seen": 3190030336
},
{
"epoch": 0.65,
"learning_rate": 3.58426005132592e-05,
"loss": 2.6933,
"theoretical_loss": 3.305203163353479,
"tokens_seen": 3190161408
},
{
"epoch": 0.65,
"objective/train/docs_used": 1747620,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.320441484451294,
"objective/train/theoretical_loss": 3.305192852347472,
"objective/train/tokens_used": 220343776,
"theoretical_loss": 3.305192852347472,
"tokens_seen": 3190292480
},
{
"epoch": 0.65,
"learning_rate": 3.579982891360137e-05,
"loss": 2.5312,
"theoretical_loss": 3.305192852347472,
"tokens_seen": 3190292480
},
{
"epoch": 0.65,
"learning_rate": 3.5757057313943545e-05,
"loss": 2.4923,
"theoretical_loss": 3.3051825418836893,
"tokens_seen": 3190423552
},
{
"epoch": 0.65,
"learning_rate": 3.571428571428572e-05,
"loss": 2.3546,
"theoretical_loss": 3.3051722319620804,
"tokens_seen": 3190554624
},
{
"epoch": 0.65,
"learning_rate": 3.567151411462789e-05,
"loss": 2.5379,
"theoretical_loss": 3.305161922582594,
"tokens_seen": 3190685696
},
{
"epoch": 0.65,
"learning_rate": 3.562874251497006e-05,
"loss": 2.5875,
"theoretical_loss": 3.3051516137451795,
"tokens_seen": 3190816768
},
{
"epoch": 0.65,
"learning_rate": 3.558597091531223e-05,
"loss": 2.5622,
"theoretical_loss": 3.3051413054497867,
"tokens_seen": 3190947840
},
{
"epoch": 0.65,
"learning_rate": 3.554319931565441e-05,
"loss": 2.5436,
"theoretical_loss": 3.305130997696364,
"tokens_seen": 3191078912
},
{
"epoch": 0.65,
"learning_rate": 3.550042771599658e-05,
"loss": 2.5889,
"theoretical_loss": 3.3051206904848613,
"tokens_seen": 3191209984
},
{
"epoch": 0.65,
"learning_rate": 3.545765611633875e-05,
"loss": 2.5459,
"theoretical_loss": 3.3051103838152276,
"tokens_seen": 3191341056
},
{
"epoch": 0.65,
"learning_rate": 3.5414884516680924e-05,
"loss": 2.4801,
"theoretical_loss": 3.3051000776874115,
"tokens_seen": 3191472128
},
{
"epoch": 0.65,
"learning_rate": 3.5372112917023096e-05,
"loss": 2.5707,
"theoretical_loss": 3.3050897721013635,
"tokens_seen": 3191603200
},
{
"epoch": 0.65,
"learning_rate": 3.5329341317365274e-05,
"loss": 2.5332,
"theoretical_loss": 3.3050794670570323,
"tokens_seen": 3191734272
},
{
"epoch": 0.65,
"learning_rate": 3.5286569717707446e-05,
"loss": 2.584,
"theoretical_loss": 3.305069162554367,
"tokens_seen": 3191865344
},
{
"epoch": 0.65,
"objective/train/docs_used": 1748281,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 3.083997964859009,
"objective/train/theoretical_loss": 3.3050640105061433,
"objective/train/tokens_used": 221982176,
"theoretical_loss": 3.3050640105061433,
"tokens_seen": 3191930880
},
{
"epoch": 0.65,
"learning_rate": 3.524379811804962e-05,
"loss": 2.7437,
"theoretical_loss": 3.3050588585933167,
"tokens_seen": 3191996416
},
{
"epoch": 0.65,
"learning_rate": 3.520102651839179e-05,
"loss": 2.4889,
"theoretical_loss": 3.3050485551738316,
"tokens_seen": 3192127488
},
{
"epoch": 0.65,
"learning_rate": 3.515825491873396e-05,
"loss": 2.5558,
"theoretical_loss": 3.30503825229586,
"tokens_seen": 3192258560
},
{
"epoch": 0.65,
"learning_rate": 3.511548331907614e-05,
"loss": 2.359,
"theoretical_loss": 3.305027949959352,
"tokens_seen": 3192389632
},
{
"epoch": 0.65,
"learning_rate": 3.50727117194183e-05,
"loss": 2.6027,
"theoretical_loss": 3.3050176481642564,
"tokens_seen": 3192520704
},
{
"epoch": 0.65,
"learning_rate": 3.502994011976048e-05,
"loss": 2.4854,
"theoretical_loss": 3.3050073469105223,
"tokens_seen": 3192651776
},
{
"epoch": 0.65,
"learning_rate": 3.498716852010265e-05,
"loss": 2.3569,
"theoretical_loss": 3.3049970461980998,
"tokens_seen": 3192782848
},
{
"epoch": 0.65,
"learning_rate": 3.4944396920444825e-05,
"loss": 2.437,
"theoretical_loss": 3.304986746026938,
"tokens_seen": 3192913920
},
{
"epoch": 0.65,
"learning_rate": 3.4901625320787e-05,
"loss": 2.4806,
"theoretical_loss": 3.304976446396986,
"tokens_seen": 3193044992
},
{
"epoch": 0.65,
"learning_rate": 3.485885372112917e-05,
"loss": 2.4566,
"theoretical_loss": 3.304966147308193,
"tokens_seen": 3193176064
},
{
"epoch": 0.66,
"learning_rate": 3.4816082121471346e-05,
"loss": 2.5142,
"theoretical_loss": 3.3049558487605086,
"tokens_seen": 3193307136
},
{
"epoch": 0.66,
"learning_rate": 3.477331052181352e-05,
"loss": 2.5212,
"theoretical_loss": 3.3049455507538825,
"tokens_seen": 3193438208
},
{
"epoch": 0.66,
"objective/train/docs_used": 1749492,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.5565552711486816,
"objective/train/theoretical_loss": 3.304935253288263,
"objective/train/tokens_used": 223620576,
"theoretical_loss": 3.304935253288263,
"tokens_seen": 3193569280
},
{
"epoch": 0.66,
"learning_rate": 3.473053892215569e-05,
"loss": 2.5474,
"theoretical_loss": 3.304935253288263,
"tokens_seen": 3193569280
},
{
"epoch": 0.66,
"learning_rate": 3.468776732249786e-05,
"loss": 2.4504,
"theoretical_loss": 3.304924956363601,
"tokens_seen": 3193700352
},
{
"epoch": 0.66,
"learning_rate": 3.464499572284003e-05,
"loss": 2.5253,
"theoretical_loss": 3.3049146599798447,
"tokens_seen": 3193831424
},
{
"epoch": 0.66,
"learning_rate": 3.460222412318221e-05,
"loss": 2.3926,
"theoretical_loss": 3.3049043641369438,
"tokens_seen": 3193962496
},
{
"epoch": 0.66,
"learning_rate": 3.455945252352438e-05,
"loss": 2.4699,
"theoretical_loss": 3.304894068834848,
"tokens_seen": 3194093568
},
{
"epoch": 0.66,
"learning_rate": 3.4516680923866553e-05,
"loss": 2.4117,
"theoretical_loss": 3.3048837740735064,
"tokens_seen": 3194224640
},
{
"epoch": 0.66,
"learning_rate": 3.4473909324208725e-05,
"loss": 2.488,
"theoretical_loss": 3.3048734798528683,
"tokens_seen": 3194355712
},
{
"epoch": 0.66,
"learning_rate": 3.4431137724550896e-05,
"loss": 2.4476,
"theoretical_loss": 3.3048631861728834,
"tokens_seen": 3194486784
},
{
"epoch": 0.66,
"learning_rate": 3.4388366124893075e-05,
"loss": 2.3744,
"theoretical_loss": 3.304852893033501,
"tokens_seen": 3194617856
},
{
"epoch": 0.66,
"learning_rate": 3.4345594525235246e-05,
"loss": 2.5922,
"theoretical_loss": 3.3048426004346707,
"tokens_seen": 3194748928
},
{
"epoch": 0.66,
"learning_rate": 3.430282292557742e-05,
"loss": 2.5774,
"theoretical_loss": 3.3048323083763416,
"tokens_seen": 3194880000
},
{
"epoch": 0.66,
"learning_rate": 3.426005132591959e-05,
"loss": 2.3775,
"theoretical_loss": 3.304822016858463,
"tokens_seen": 3195011072
},
{
"epoch": 0.66,
"learning_rate": 3.421727972626176e-05,
"loss": 2.42,
"theoretical_loss": 3.304811725880985,
"tokens_seen": 3195142144
},
{
"epoch": 0.66,
"objective/train/docs_used": 1749977,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.7795517444610596,
"objective/train/theoretical_loss": 3.3048065805948803,
"objective/train/tokens_used": 225258976,
"theoretical_loss": 3.3048065805948803,
"tokens_seen": 3195207680
},
{
"epoch": 0.66,
"learning_rate": 3.417450812660394e-05,
"loss": 2.5814,
"theoretical_loss": 3.304801435443857,
"tokens_seen": 3195273216
},
{
"epoch": 0.66,
"learning_rate": 3.413173652694611e-05,
"loss": 2.5834,
"theoretical_loss": 3.3047911455470276,
"tokens_seen": 3195404288
},
{
"epoch": 0.66,
"learning_rate": 3.408896492728828e-05,
"loss": 2.5808,
"theoretical_loss": 3.3047808561904475,
"tokens_seen": 3195535360
},
{
"epoch": 0.66,
"learning_rate": 3.4046193327630454e-05,
"loss": 2.607,
"theoretical_loss": 3.304770567374065,
"tokens_seen": 3195666432
},
{
"epoch": 0.66,
"learning_rate": 3.400342172797263e-05,
"loss": 2.4575,
"theoretical_loss": 3.3047602790978305,
"tokens_seen": 3195797504
},
{
"epoch": 0.66,
"learning_rate": 3.3960650128314803e-05,
"loss": 2.4267,
"theoretical_loss": 3.304749991361693,
"tokens_seen": 3195928576
},
{
"epoch": 0.66,
"learning_rate": 3.391787852865697e-05,
"loss": 2.4974,
"theoretical_loss": 3.304739704165602,
"tokens_seen": 3196059648
},
{
"epoch": 0.66,
"learning_rate": 3.3875106928999147e-05,
"loss": 2.5884,
"theoretical_loss": 3.304729417509507,
"tokens_seen": 3196190720
},
{
"epoch": 0.67,
"learning_rate": 3.383233532934132e-05,
"loss": 2.7342,
"theoretical_loss": 3.304719131393358,
"tokens_seen": 3196321792
},
{
"epoch": 0.67,
"learning_rate": 3.3789563729683496e-05,
"loss": 2.4659,
"theoretical_loss": 3.304708845817104,
"tokens_seen": 3196452864
},
{
"epoch": 0.67,
"learning_rate": 3.374679213002567e-05,
"loss": 2.4338,
"theoretical_loss": 3.304698560780695,
"tokens_seen": 3196583936
},
{
"epoch": 0.67,
"learning_rate": 3.370402053036783e-05,
"loss": 2.4709,
"theoretical_loss": 3.3046882762840797,
"tokens_seen": 3196715008
},
{
"epoch": 0.67,
"objective/train/docs_used": 1750966,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.7182776927948,
"objective/train/theoretical_loss": 3.3046779923272083,
"objective/train/tokens_used": 226897376,
"theoretical_loss": 3.3046779923272083,
"tokens_seen": 3196846080
},
{
"epoch": 0.67,
"learning_rate": 3.366124893071001e-05,
"loss": 2.5408,
"theoretical_loss": 3.3046779923272083,
"tokens_seen": 3196846080
},
{
"epoch": 0.67,
"learning_rate": 3.361847733105218e-05,
"loss": 2.4637,
"theoretical_loss": 3.30466770891003,
"tokens_seen": 3196977152
},
{
"epoch": 0.67,
"learning_rate": 3.357570573139436e-05,
"loss": 2.5451,
"theoretical_loss": 3.304657426032495,
"tokens_seen": 3197108224
},
{
"epoch": 0.67,
"learning_rate": 3.3532934131736525e-05,
"loss": 2.5917,
"theoretical_loss": 3.304647143694552,
"tokens_seen": 3197239296
},
{
"epoch": 0.67,
"learning_rate": 3.3490162532078704e-05,
"loss": 2.449,
"theoretical_loss": 3.3046368618961512,
"tokens_seen": 3197370368
},
{
"epoch": 0.67,
"learning_rate": 3.3447390932420875e-05,
"loss": 2.6124,
"theoretical_loss": 3.3046265806372417,
"tokens_seen": 3197501440
},
{
"epoch": 0.67,
"learning_rate": 3.340461933276305e-05,
"loss": 2.4882,
"theoretical_loss": 3.3046162999177735,
"tokens_seen": 3197632512
},
{
"epoch": 0.67,
"learning_rate": 3.336184773310522e-05,
"loss": 2.5891,
"theoretical_loss": 3.304606019737696,
"tokens_seen": 3197763584
},
{
"epoch": 0.67,
"learning_rate": 3.331907613344739e-05,
"loss": 2.5998,
"theoretical_loss": 3.304595740096959,
"tokens_seen": 3197894656
},
{
"epoch": 0.67,
"learning_rate": 3.327630453378957e-05,
"loss": 2.5076,
"theoretical_loss": 3.3045854609955114,
"tokens_seen": 3198025728
},
{
"epoch": 0.67,
"learning_rate": 3.323353293413174e-05,
"loss": 2.5564,
"theoretical_loss": 3.304575182433304,
"tokens_seen": 3198156800
},
{
"epoch": 0.67,
"learning_rate": 3.319076133447391e-05,
"loss": 2.5137,
"theoretical_loss": 3.3045649044102854,
"tokens_seen": 3198287872
},
{
"epoch": 0.67,
"learning_rate": 3.314798973481608e-05,
"loss": 2.6105,
"theoretical_loss": 3.3045546269264054,
"tokens_seen": 3198418944
},
{
"epoch": 0.67,
"objective/train/docs_used": 1752106,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.8310353755950928,
"objective/train/theoretical_loss": 3.3045494883866264,
"objective/train/tokens_used": 228535776,
"theoretical_loss": 3.3045494883866264,
"tokens_seen": 3198484480
},
{
"epoch": 0.67,
"learning_rate": 3.3105218135158254e-05,
"loss": 2.5698,
"theoretical_loss": 3.304544349981614,
"tokens_seen": 3198550016
},
{
"epoch": 0.67,
"learning_rate": 3.306244653550043e-05,
"loss": 2.4674,
"theoretical_loss": 3.3045340735758604,
"tokens_seen": 3198681088
},
{
"epoch": 0.67,
"learning_rate": 3.3019674935842604e-05,
"loss": 2.6941,
"theoretical_loss": 3.304523797709094,
"tokens_seen": 3198812160
},
{
"epoch": 0.67,
"learning_rate": 3.2976903336184776e-05,
"loss": 2.5911,
"theoretical_loss": 3.3045135223812654,
"tokens_seen": 3198943232
},
{
"epoch": 0.67,
"learning_rate": 3.293413173652695e-05,
"loss": 2.5483,
"theoretical_loss": 3.304503247592324,
"tokens_seen": 3199074304
},
{
"epoch": 0.67,
"learning_rate": 3.289136013686912e-05,
"loss": 2.5965,
"theoretical_loss": 3.304492973342219,
"tokens_seen": 3199205376
},
{
"epoch": 0.67,
"learning_rate": 3.28485885372113e-05,
"loss": 2.5635,
"theoretical_loss": 3.3044826996309,
"tokens_seen": 3199336448
},
{
"epoch": 0.68,
"learning_rate": 3.280581693755347e-05,
"loss": 2.4813,
"theoretical_loss": 3.304472426458317,
"tokens_seen": 3199467520
},
{
"epoch": 0.68,
"learning_rate": 3.276304533789564e-05,
"loss": 2.4929,
"theoretical_loss": 3.30446215382442,
"tokens_seen": 3199598592
},
{
"epoch": 0.68,
"learning_rate": 3.272027373823781e-05,
"loss": 2.4621,
"theoretical_loss": 3.304451881729158,
"tokens_seen": 3199729664
},
{
"epoch": 0.68,
"learning_rate": 3.267750213857998e-05,
"loss": 2.4748,
"theoretical_loss": 3.304441610172481,
"tokens_seen": 3199860736
},
{
"epoch": 0.68,
"learning_rate": 3.263473053892216e-05,
"loss": 2.6285,
"theoretical_loss": 3.304431339154339,
"tokens_seen": 3199991808
},
{
"epoch": 0.68,
"objective/train/docs_used": 1752559,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 3.019050359725952,
"objective/train/theoretical_loss": 3.304421068674681,
"objective/train/tokens_used": 230174176,
"theoretical_loss": 3.304421068674681,
"tokens_seen": 3200122880
},
{
"epoch": 0.68,
"learning_rate": 3.2591958939264326e-05,
"loss": 2.4811,
"theoretical_loss": 3.304421068674681,
"tokens_seen": 3200122880
},
{
"epoch": 0.68,
"learning_rate": 3.2549187339606504e-05,
"loss": 2.4264,
"theoretical_loss": 3.3044107987334574,
"tokens_seen": 3200253952
},
{
"epoch": 0.68,
"learning_rate": 3.2506415739948676e-05,
"loss": 2.5031,
"theoretical_loss": 3.304400529330618,
"tokens_seen": 3200385024
},
{
"epoch": 0.68,
"learning_rate": 3.246364414029085e-05,
"loss": 2.559,
"theoretical_loss": 3.3043902604661115,
"tokens_seen": 3200516096
},
{
"epoch": 0.68,
"learning_rate": 3.2420872540633026e-05,
"loss": 2.4459,
"theoretical_loss": 3.3043799921398884,
"tokens_seen": 3200647168
},
{
"epoch": 0.68,
"learning_rate": 3.237810094097519e-05,
"loss": 2.5738,
"theoretical_loss": 3.304369724351899,
"tokens_seen": 3200778240
},
{
"epoch": 0.68,
"learning_rate": 3.233532934131737e-05,
"loss": 2.4598,
"theoretical_loss": 3.3043594571020916,
"tokens_seen": 3200909312
},
{
"epoch": 0.68,
"learning_rate": 3.229255774165954e-05,
"loss": 2.5795,
"theoretical_loss": 3.3043491903904174,
"tokens_seen": 3201040384
},
{
"epoch": 0.68,
"learning_rate": 3.224978614200171e-05,
"loss": 2.6696,
"theoretical_loss": 3.3043389242168253,
"tokens_seen": 3201171456
},
{
"epoch": 0.68,
"learning_rate": 3.220701454234388e-05,
"loss": 2.4077,
"theoretical_loss": 3.304328658581265,
"tokens_seen": 3201302528
},
{
"epoch": 0.68,
"learning_rate": 3.2164242942686055e-05,
"loss": 2.4908,
"theoretical_loss": 3.304318393483687,
"tokens_seen": 3201433600
},
{
"epoch": 0.68,
"learning_rate": 3.212147134302823e-05,
"loss": 2.4689,
"theoretical_loss": 3.30430812892404,
"tokens_seen": 3201564672
},
{
"epoch": 0.68,
"learning_rate": 3.2078699743370405e-05,
"loss": 2.4558,
"theoretical_loss": 3.304297864902275,
"tokens_seen": 3201695744
},
{
"epoch": 0.68,
"objective/train/docs_used": 1753714,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.8162074089050293,
"objective/train/theoretical_loss": 3.3042927330930825,
"objective/train/tokens_used": 231812576,
"theoretical_loss": 3.3042927330930825,
"tokens_seen": 3201761280
},
{
"epoch": 0.68,
"learning_rate": 3.2035928143712576e-05,
"loss": 2.4873,
"theoretical_loss": 3.3042876014183413,
"tokens_seen": 3201826816
},
{
"epoch": 0.68,
"learning_rate": 3.199315654405475e-05,
"loss": 2.3903,
"theoretical_loss": 3.304277338472189,
"tokens_seen": 3201957888
},
{
"epoch": 0.68,
"learning_rate": 3.195038494439692e-05,
"loss": 2.3911,
"theoretical_loss": 3.3042670760637667,
"tokens_seen": 3202088960
},
{
"epoch": 0.68,
"learning_rate": 3.19076133447391e-05,
"loss": 2.5844,
"theoretical_loss": 3.3042568141930255,
"tokens_seen": 3202220032
},
{
"epoch": 0.68,
"learning_rate": 3.186484174508127e-05,
"loss": 2.5279,
"theoretical_loss": 3.3042465528599148,
"tokens_seen": 3202351104
},
{
"epoch": 0.69,
"learning_rate": 3.182207014542344e-05,
"loss": 2.3653,
"theoretical_loss": 3.304236292064384,
"tokens_seen": 3202482176
},
{
"epoch": 0.69,
"learning_rate": 3.177929854576561e-05,
"loss": 2.5646,
"theoretical_loss": 3.304226031806384,
"tokens_seen": 3202613248
},
{
"epoch": 0.69,
"learning_rate": 3.1736526946107784e-05,
"loss": 2.3691,
"theoretical_loss": 3.304215772085864,
"tokens_seen": 3202744320
},
{
"epoch": 0.69,
"learning_rate": 3.169375534644996e-05,
"loss": 2.3526,
"theoretical_loss": 3.304205512902773,
"tokens_seen": 3202875392
},
{
"epoch": 0.69,
"learning_rate": 3.1650983746792127e-05,
"loss": 2.4674,
"theoretical_loss": 3.3041952542570625,
"tokens_seen": 3203006464
},
{
"epoch": 0.69,
"learning_rate": 3.1608212147134305e-05,
"loss": 2.6753,
"theoretical_loss": 3.304184996148681,
"tokens_seen": 3203137536
},
{
"epoch": 0.69,
"learning_rate": 3.1565440547476476e-05,
"loss": 2.4466,
"theoretical_loss": 3.3041747385775793,
"tokens_seen": 3203268608
},
{
"epoch": 0.69,
"objective/train/docs_used": 1754313,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.8113200664520264,
"objective/train/theoretical_loss": 3.304164481543707,
"objective/train/tokens_used": 233450976,
"theoretical_loss": 3.304164481543707,
"tokens_seen": 3203399680
},
{
"epoch": 0.69,
"learning_rate": 3.152266894781865e-05,
"loss": 2.624,
"theoretical_loss": 3.304164481543707,
"tokens_seen": 3203399680
},
{
"epoch": 0.69,
"learning_rate": 3.1479897348160826e-05,
"loss": 2.4266,
"theoretical_loss": 3.3041542250470135,
"tokens_seen": 3203530752
},
{
"epoch": 0.69,
"learning_rate": 3.143712574850299e-05,
"loss": 2.4957,
"theoretical_loss": 3.3041439690874492,
"tokens_seen": 3203661824
},
{
"epoch": 0.69,
"learning_rate": 3.139435414884517e-05,
"loss": 2.4742,
"theoretical_loss": 3.3041337136649642,
"tokens_seen": 3203792896
},
{
"epoch": 0.69,
"learning_rate": 3.135158254918734e-05,
"loss": 2.5775,
"theoretical_loss": 3.3041234587795083,
"tokens_seen": 3203923968
},
{
"epoch": 0.69,
"learning_rate": 3.130881094952951e-05,
"loss": 2.4736,
"theoretical_loss": 3.3041132044310304,
"tokens_seen": 3204055040
},
{
"epoch": 0.69,
"learning_rate": 3.1266039349871684e-05,
"loss": 2.3845,
"theoretical_loss": 3.3041029506194817,
"tokens_seen": 3204186112
},
{
"epoch": 0.69,
"learning_rate": 3.1223267750213855e-05,
"loss": 2.4658,
"theoretical_loss": 3.3040926973448115,
"tokens_seen": 3204317184
},
{
"epoch": 0.69,
"learning_rate": 3.1180496150556034e-05,
"loss": 2.4869,
"theoretical_loss": 3.30408244460697,
"tokens_seen": 3204448256
},
{
"epoch": 0.69,
"learning_rate": 3.1137724550898205e-05,
"loss": 2.6231,
"theoretical_loss": 3.304072192405907,
"tokens_seen": 3204579328
},
{
"epoch": 0.69,
"learning_rate": 3.1094952951240383e-05,
"loss": 2.5349,
"theoretical_loss": 3.3040619407415726,
"tokens_seen": 3204710400
},
{
"epoch": 0.69,
"learning_rate": 3.105218135158255e-05,
"loss": 2.498,
"theoretical_loss": 3.304051689613916,
"tokens_seen": 3204841472
},
{
"epoch": 0.69,
"learning_rate": 3.100940975192472e-05,
"loss": 2.3935,
"theoretical_loss": 3.304041439022888,
"tokens_seen": 3204972544
},
{
"epoch": 0.69,
"objective/train/docs_used": 1755636,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.230454206466675,
"objective/train/theoretical_loss": 3.304036313928594,
"objective/train/tokens_used": 235089376,
"theoretical_loss": 3.304036313928594,
"tokens_seen": 3205038080
},
{
"epoch": 0.69,
"learning_rate": 3.09666381522669e-05,
"loss": 2.5012,
"theoretical_loss": 3.3040311889684384,
"tokens_seen": 3205103616
},
{
"epoch": 0.69,
"learning_rate": 3.092386655260907e-05,
"loss": 2.5347,
"theoretical_loss": 3.3040209394505173,
"tokens_seen": 3205234688
},
{
"epoch": 0.69,
"learning_rate": 3.088109495295124e-05,
"loss": 2.2631,
"theoretical_loss": 3.3040106904690747,
"tokens_seen": 3205365760
},
{
"epoch": 0.69,
"learning_rate": 3.083832335329341e-05,
"loss": 2.4597,
"theoretical_loss": 3.30400044202406,
"tokens_seen": 3205496832
},
{
"epoch": 0.7,
"learning_rate": 3.079555175363559e-05,
"loss": 2.3887,
"theoretical_loss": 3.303990194115423,
"tokens_seen": 3205627904
},
{
"epoch": 0.7,
"learning_rate": 3.075278015397776e-05,
"loss": 2.4304,
"theoretical_loss": 3.303979946743115,
"tokens_seen": 3205758976
},
{
"epoch": 0.7,
"learning_rate": 3.0710008554319934e-05,
"loss": 2.4462,
"theoretical_loss": 3.303969699907085,
"tokens_seen": 3205890048
},
{
"epoch": 0.7,
"learning_rate": 3.0667236954662105e-05,
"loss": 2.5555,
"theoretical_loss": 3.303959453607283,
"tokens_seen": 3206021120
},
{
"epoch": 0.7,
"learning_rate": 3.062446535500428e-05,
"loss": 2.4496,
"theoretical_loss": 3.30394920784366,
"tokens_seen": 3206152192
},
{
"epoch": 0.7,
"learning_rate": 3.0581693755346455e-05,
"loss": 2.3901,
"theoretical_loss": 3.3039389626161646,
"tokens_seen": 3206283264
},
{
"epoch": 0.7,
"learning_rate": 3.053892215568863e-05,
"loss": 2.5053,
"theoretical_loss": 3.3039287179247476,
"tokens_seen": 3206414336
},
{
"epoch": 0.7,
"learning_rate": 3.0496150556030795e-05,
"loss": 2.4153,
"theoretical_loss": 3.303918473769359,
"tokens_seen": 3206545408
},
{
"epoch": 0.7,
"objective/train/docs_used": 1756084,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.25111985206604,
"objective/train/theoretical_loss": 3.303908230149949,
"objective/train/tokens_used": 236727776,
"theoretical_loss": 3.303908230149949,
"tokens_seen": 3206676480
},
{
"epoch": 0.7,
"learning_rate": 3.045337895637297e-05,
"loss": 2.5711,
"theoretical_loss": 3.303908230149949,
"tokens_seen": 3206676480
},
{
"epoch": 0.7,
"learning_rate": 3.041060735671514e-05,
"loss": 2.4415,
"theoretical_loss": 3.3038979870664678,
"tokens_seen": 3206807552
},
{
"epoch": 0.7,
"learning_rate": 3.0367835757057316e-05,
"loss": 2.5201,
"theoretical_loss": 3.303887744518865,
"tokens_seen": 3206938624
},
{
"epoch": 0.7,
"learning_rate": 3.032506415739949e-05,
"loss": 2.4766,
"theoretical_loss": 3.30387750250709,
"tokens_seen": 3207069696
},
{
"epoch": 0.7,
"learning_rate": 3.028229255774166e-05,
"loss": 2.413,
"theoretical_loss": 3.303867261031094,
"tokens_seen": 3207200768
},
{
"epoch": 0.7,
"learning_rate": 3.0239520958083834e-05,
"loss": 2.5375,
"theoretical_loss": 3.303857020090827,
"tokens_seen": 3207331840
},
{
"epoch": 0.7,
"learning_rate": 3.019674935842601e-05,
"loss": 2.5067,
"theoretical_loss": 3.3038467796862387,
"tokens_seen": 3207462912
},
{
"epoch": 0.7,
"learning_rate": 3.015397775876818e-05,
"loss": 2.4761,
"theoretical_loss": 3.3038365398172793,
"tokens_seen": 3207593984
},
{
"epoch": 0.7,
"learning_rate": 3.011120615911035e-05,
"loss": 2.5521,
"theoretical_loss": 3.303826300483899,
"tokens_seen": 3207725056
},
{
"epoch": 0.7,
"learning_rate": 3.0068434559452524e-05,
"loss": 2.4859,
"theoretical_loss": 3.3038160616860477,
"tokens_seen": 3207856128
},
{
"epoch": 0.7,
"learning_rate": 3.00256629597947e-05,
"loss": 2.4261,
"theoretical_loss": 3.3038058234236756,
"tokens_seen": 3207987200
},
{
"epoch": 0.7,
"learning_rate": 2.9982891360136873e-05,
"loss": 2.5167,
"theoretical_loss": 3.303795585696733,
"tokens_seen": 3208118272
},
{
"epoch": 0.7,
"learning_rate": 2.994011976047904e-05,
"loss": 2.4994,
"theoretical_loss": 3.30378534850517,
"tokens_seen": 3208249344
},
{
"epoch": 0.7,
"objective/train/docs_used": 1757266,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.5266647338867188,
"objective/train/theoretical_loss": 3.30378023011014,
"objective/train/tokens_used": 238366176,
"theoretical_loss": 3.30378023011014,
"tokens_seen": 3208314880
},
{
"epoch": 0.7,
"learning_rate": 2.9897348160821213e-05,
"loss": 2.4878,
"theoretical_loss": 3.3037751118489362,
"tokens_seen": 3208380416
},
{
"epoch": 0.7,
"learning_rate": 2.9854576561163388e-05,
"loss": 2.599,
"theoretical_loss": 3.3037648757279827,
"tokens_seen": 3208511488
},
{
"epoch": 0.7,
"learning_rate": 2.9811804961505563e-05,
"loss": 2.4182,
"theoretical_loss": 3.3037546401422584,
"tokens_seen": 3208642560
},
{
"epoch": 0.71,
"learning_rate": 2.9769033361847738e-05,
"loss": 2.3344,
"theoretical_loss": 3.3037444050917144,
"tokens_seen": 3208773632
},
{
"epoch": 0.71,
"learning_rate": 2.9726261762189906e-05,
"loss": 2.4827,
"theoretical_loss": 3.303734170576301,
"tokens_seen": 3208904704
},
{
"epoch": 0.71,
"learning_rate": 2.968349016253208e-05,
"loss": 2.6349,
"theoretical_loss": 3.3037239365959676,
"tokens_seen": 3209035776
},
{
"epoch": 0.71,
"learning_rate": 2.9640718562874252e-05,
"loss": 2.532,
"theoretical_loss": 3.3037137031506645,
"tokens_seen": 3209166848
},
{
"epoch": 0.71,
"learning_rate": 2.9597946963216427e-05,
"loss": 2.4858,
"theoretical_loss": 3.3037034702403423,
"tokens_seen": 3209297920
},
{
"epoch": 0.71,
"learning_rate": 2.9555175363558595e-05,
"loss": 2.4419,
"theoretical_loss": 3.303693237864951,
"tokens_seen": 3209428992
},
{
"epoch": 0.71,
"learning_rate": 2.951240376390077e-05,
"loss": 2.4977,
"theoretical_loss": 3.303683006024441,
"tokens_seen": 3209560064
},
{
"epoch": 0.71,
"learning_rate": 2.9469632164242945e-05,
"loss": 2.5561,
"theoretical_loss": 3.3036727747187618,
"tokens_seen": 3209691136
},
{
"epoch": 0.71,
"learning_rate": 2.9426860564585117e-05,
"loss": 2.5139,
"theoretical_loss": 3.303662543947864,
"tokens_seen": 3209822208
},
{
"epoch": 0.71,
"objective/train/docs_used": 1757623,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 3.1107144355773926,
"objective/train/theoretical_loss": 3.303652313711699,
"objective/train/tokens_used": 240004576,
"theoretical_loss": 3.303652313711699,
"tokens_seen": 3209953280
},
{
"epoch": 0.71,
"learning_rate": 2.938408896492729e-05,
"loss": 2.6032,
"theoretical_loss": 3.303652313711699,
"tokens_seen": 3209953280
},
{
"epoch": 0.71,
"learning_rate": 2.934131736526946e-05,
"loss": 2.513,
"theoretical_loss": 3.3036420840102148,
"tokens_seen": 3210084352
},
{
"epoch": 0.71,
"learning_rate": 2.9298545765611635e-05,
"loss": 2.5001,
"theoretical_loss": 3.3036318548433634,
"tokens_seen": 3210215424
},
{
"epoch": 0.71,
"learning_rate": 2.925577416595381e-05,
"loss": 2.3906,
"theoretical_loss": 3.303621626211094,
"tokens_seen": 3210346496
},
{
"epoch": 0.71,
"learning_rate": 2.921300256629598e-05,
"loss": 2.4056,
"theoretical_loss": 3.303611398113357,
"tokens_seen": 3210477568
},
{
"epoch": 0.71,
"learning_rate": 2.9170230966638153e-05,
"loss": 2.643,
"theoretical_loss": 3.3036011705501034,
"tokens_seen": 3210608640
},
{
"epoch": 0.71,
"learning_rate": 2.9127459366980324e-05,
"loss": 2.5167,
"theoretical_loss": 3.3035909435212827,
"tokens_seen": 3210739712
},
{
"epoch": 0.71,
"learning_rate": 2.90846877673225e-05,
"loss": 2.5026,
"theoretical_loss": 3.303580717026845,
"tokens_seen": 3210870784
},
{
"epoch": 0.71,
"learning_rate": 2.9041916167664674e-05,
"loss": 2.5411,
"theoretical_loss": 3.3035704910667416,
"tokens_seen": 3211001856
},
{
"epoch": 0.71,
"learning_rate": 2.899914456800685e-05,
"loss": 2.3603,
"theoretical_loss": 3.3035602656409218,
"tokens_seen": 3211132928
},
{
"epoch": 0.71,
"learning_rate": 2.8956372968349017e-05,
"loss": 2.6508,
"theoretical_loss": 3.303550040749336,
"tokens_seen": 3211264000
},
{
"epoch": 0.71,
"learning_rate": 2.891360136869119e-05,
"loss": 2.509,
"theoretical_loss": 3.3035398163919347,
"tokens_seen": 3211395072
},
{
"epoch": 0.71,
"learning_rate": 2.8870829769033363e-05,
"loss": 2.6896,
"theoretical_loss": 3.303529592568668,
"tokens_seen": 3211526144
},
{
"epoch": 0.71,
"objective/train/docs_used": 1758775,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.5142648220062256,
"objective/train/theoretical_loss": 3.3035244808573196,
"objective/train/tokens_used": 241642976,
"theoretical_loss": 3.3035244808573196,
"tokens_seen": 3211591680
},
{
"epoch": 0.71,
"learning_rate": 2.882805816937554e-05,
"loss": 2.5199,
"theoretical_loss": 3.3035193692794866,
"tokens_seen": 3211657216
},
{
"epoch": 0.72,
"learning_rate": 2.8785286569717706e-05,
"loss": 2.422,
"theoretical_loss": 3.3035091465243407,
"tokens_seen": 3211788288
},
{
"epoch": 0.72,
"learning_rate": 2.874251497005988e-05,
"loss": 2.4424,
"theoretical_loss": 3.3034989243031805,
"tokens_seen": 3211919360
},
{
"epoch": 0.72,
"learning_rate": 2.8699743370402056e-05,
"loss": 2.3157,
"theoretical_loss": 3.303488702615956,
"tokens_seen": 3212050432
},
{
"epoch": 0.72,
"learning_rate": 2.8656971770744228e-05,
"loss": 2.4144,
"theoretical_loss": 3.303478481462618,
"tokens_seen": 3212181504
},
{
"epoch": 0.72,
"learning_rate": 2.8614200171086403e-05,
"loss": 2.5694,
"theoretical_loss": 3.3034682608431165,
"tokens_seen": 3212312576
},
{
"epoch": 0.72,
"learning_rate": 2.857142857142857e-05,
"loss": 2.3805,
"theoretical_loss": 3.303458040757402,
"tokens_seen": 3212443648
},
{
"epoch": 0.72,
"learning_rate": 2.8528656971770746e-05,
"loss": 2.5355,
"theoretical_loss": 3.303447821205425,
"tokens_seen": 3212574720
},
{
"epoch": 0.72,
"learning_rate": 2.848588537211292e-05,
"loss": 2.3711,
"theoretical_loss": 3.3034376021871354,
"tokens_seen": 3212705792
},
{
"epoch": 0.72,
"learning_rate": 2.8443113772455092e-05,
"loss": 2.4546,
"theoretical_loss": 3.303427383702484,
"tokens_seen": 3212836864
},
{
"epoch": 0.72,
"learning_rate": 2.840034217279726e-05,
"loss": 2.3728,
"theoretical_loss": 3.303417165751421,
"tokens_seen": 3212967936
},
{
"epoch": 0.72,
"learning_rate": 2.8357570573139435e-05,
"loss": 2.5899,
"theoretical_loss": 3.3034069483338966,
"tokens_seen": 3213099008
},
{
"epoch": 0.72,
"objective/train/docs_used": 1759176,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.225017547607422,
"objective/train/theoretical_loss": 3.303396731449862,
"objective/train/tokens_used": 243281376,
"theoretical_loss": 3.303396731449862,
"tokens_seen": 3213230080
},
{
"epoch": 0.72,
"learning_rate": 2.831479897348161e-05,
"loss": 2.4874,
"theoretical_loss": 3.303396731449862,
"tokens_seen": 3213230080
},
{
"epoch": 0.72,
"learning_rate": 2.8272027373823785e-05,
"loss": 2.3283,
"theoretical_loss": 3.303386515099266,
"tokens_seen": 3213361152
},
{
"epoch": 0.72,
"learning_rate": 2.8229255774165953e-05,
"loss": 2.5505,
"theoretical_loss": 3.3033762992820606,
"tokens_seen": 3213492224
},
{
"epoch": 0.72,
"learning_rate": 2.8186484174508128e-05,
"loss": 2.5643,
"theoretical_loss": 3.303366083998195,
"tokens_seen": 3213623296
},
{
"epoch": 0.72,
"learning_rate": 2.81437125748503e-05,
"loss": 2.512,
"theoretical_loss": 3.3033558692476204,
"tokens_seen": 3213754368
},
{
"epoch": 0.72,
"learning_rate": 2.8100940975192475e-05,
"loss": 2.5855,
"theoretical_loss": 3.303345655030287,
"tokens_seen": 3213885440
},
{
"epoch": 0.72,
"learning_rate": 2.805816937553465e-05,
"loss": 2.5781,
"theoretical_loss": 3.303335441346145,
"tokens_seen": 3214016512
},
{
"epoch": 0.72,
"learning_rate": 2.8015397775876818e-05,
"loss": 2.4615,
"theoretical_loss": 3.3033252281951455,
"tokens_seen": 3214147584
},
{
"epoch": 0.72,
"learning_rate": 2.7972626176218992e-05,
"loss": 2.4135,
"theoretical_loss": 3.3033150155772377,
"tokens_seen": 3214278656
},
{
"epoch": 0.72,
"learning_rate": 2.7929854576561164e-05,
"loss": 2.5158,
"theoretical_loss": 3.303304803492373,
"tokens_seen": 3214409728
},
{
"epoch": 0.72,
"learning_rate": 2.788708297690334e-05,
"loss": 2.5429,
"theoretical_loss": 3.3032945919405017,
"tokens_seen": 3214540800
},
{
"epoch": 0.72,
"learning_rate": 2.7844311377245507e-05,
"loss": 2.5166,
"theoretical_loss": 3.303284380921574,
"tokens_seen": 3214671872
},
{
"epoch": 0.72,
"learning_rate": 2.7801539777587682e-05,
"loss": 2.5323,
"theoretical_loss": 3.303274170435541,
"tokens_seen": 3214802944
},
{
"epoch": 0.72,
"objective/train/docs_used": 1760371,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.3985843658447266,
"objective/train/theoretical_loss": 3.3032690653923438,
"objective/train/tokens_used": 244919776,
"theoretical_loss": 3.3032690653923438,
"tokens_seen": 3214868480
},
{
"epoch": 0.73,
"learning_rate": 2.7758768177929857e-05,
"loss": 2.3832,
"theoretical_loss": 3.3032639604823517,
"tokens_seen": 3214934016
},
{
"epoch": 0.73,
"learning_rate": 2.771599657827203e-05,
"loss": 2.3234,
"theoretical_loss": 3.303253751061958,
"tokens_seen": 3215065088
},
{
"epoch": 0.73,
"learning_rate": 2.7673224978614203e-05,
"loss": 2.4125,
"theoretical_loss": 3.3032435421743105,
"tokens_seen": 3215196160
},
{
"epoch": 0.73,
"learning_rate": 2.763045337895637e-05,
"loss": 2.4612,
"theoretical_loss": 3.3032333338193585,
"tokens_seen": 3215327232
},
{
"epoch": 0.73,
"learning_rate": 2.7587681779298546e-05,
"loss": 2.5315,
"theoretical_loss": 3.3032231259970533,
"tokens_seen": 3215458304
},
{
"epoch": 0.73,
"learning_rate": 2.754491017964072e-05,
"loss": 2.5663,
"theoretical_loss": 3.303212918707345,
"tokens_seen": 3215589376
},
{
"epoch": 0.73,
"learning_rate": 2.7502138579982896e-05,
"loss": 2.4922,
"theoretical_loss": 3.3032027119501843,
"tokens_seen": 3215720448
},
{
"epoch": 0.73,
"learning_rate": 2.7459366980325064e-05,
"loss": 2.5179,
"theoretical_loss": 3.303192505725522,
"tokens_seen": 3215851520
},
{
"epoch": 0.73,
"learning_rate": 2.7416595380667236e-05,
"loss": 2.4059,
"theoretical_loss": 3.303182300033308,
"tokens_seen": 3215982592
},
{
"epoch": 0.73,
"learning_rate": 2.737382378100941e-05,
"loss": 2.4359,
"theoretical_loss": 3.303172094873493,
"tokens_seen": 3216113664
},
{
"epoch": 0.73,
"learning_rate": 2.7331052181351586e-05,
"loss": 2.4875,
"theoretical_loss": 3.3031618902460282,
"tokens_seen": 3216244736
},
{
"epoch": 0.73,
"learning_rate": 2.728828058169376e-05,
"loss": 2.5865,
"theoretical_loss": 3.303151686150863,
"tokens_seen": 3216375808
},
{
"epoch": 0.73,
"objective/train/docs_used": 1760932,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.7095277309417725,
"objective/train/theoretical_loss": 3.303141482587949,
"objective/train/tokens_used": 246558176,
"theoretical_loss": 3.303141482587949,
"tokens_seen": 3216506880
},
{
"epoch": 0.73,
"learning_rate": 2.724550898203593e-05,
"loss": 2.5145,
"theoretical_loss": 3.303141482587949,
"tokens_seen": 3216506880
},
{
"epoch": 0.73,
"learning_rate": 2.72027373823781e-05,
"loss": 2.541,
"theoretical_loss": 3.303131279557236,
"tokens_seen": 3216637952
},
{
"epoch": 0.73,
"learning_rate": 2.7159965782720275e-05,
"loss": 2.4856,
"theoretical_loss": 3.303121077058675,
"tokens_seen": 3216769024
},
{
"epoch": 0.73,
"learning_rate": 2.711719418306245e-05,
"loss": 2.6048,
"theoretical_loss": 3.3031108750922167,
"tokens_seen": 3216900096
},
{
"epoch": 0.73,
"learning_rate": 2.7074422583404618e-05,
"loss": 2.371,
"theoretical_loss": 3.303100673657811,
"tokens_seen": 3217031168
},
{
"epoch": 0.73,
"learning_rate": 2.7031650983746793e-05,
"loss": 2.5231,
"theoretical_loss": 3.303090472755409,
"tokens_seen": 3217162240
},
{
"epoch": 0.73,
"learning_rate": 2.6988879384088968e-05,
"loss": 2.5842,
"theoretical_loss": 3.3030802723849613,
"tokens_seen": 3217293312
},
{
"epoch": 0.73,
"learning_rate": 2.694610778443114e-05,
"loss": 2.3844,
"theoretical_loss": 3.3030700725464177,
"tokens_seen": 3217424384
},
{
"epoch": 0.73,
"learning_rate": 2.6903336184773314e-05,
"loss": 2.5238,
"theoretical_loss": 3.30305987323973,
"tokens_seen": 3217555456
},
{
"epoch": 0.73,
"learning_rate": 2.6860564585115482e-05,
"loss": 2.4149,
"theoretical_loss": 3.303049674464848,
"tokens_seen": 3217686528
},
{
"epoch": 0.73,
"learning_rate": 2.6817792985457657e-05,
"loss": 2.4585,
"theoretical_loss": 3.303039476221723,
"tokens_seen": 3217817600
},
{
"epoch": 0.73,
"learning_rate": 2.6775021385799832e-05,
"loss": 2.5258,
"theoretical_loss": 3.303029278510305,
"tokens_seen": 3217948672
},
{
"epoch": 0.74,
"learning_rate": 2.6732249786142004e-05,
"loss": 2.3461,
"theoretical_loss": 3.3030190813305444,
"tokens_seen": 3218079744
},
{
"epoch": 0.74,
"objective/train/docs_used": 1762106,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.7149224281311035,
"objective/train/theoretical_loss": 3.3030139829400205,
"objective/train/tokens_used": 248196576,
"theoretical_loss": 3.3030139829400205,
"tokens_seen": 3218145280
},
{
"epoch": 0.74,
"learning_rate": 2.6689478186484172e-05,
"loss": 2.5382,
"theoretical_loss": 3.3030088846823924,
"tokens_seen": 3218210816
},
{
"epoch": 0.74,
"learning_rate": 2.6646706586826347e-05,
"loss": 2.5688,
"theoretical_loss": 3.3029986885657996,
"tokens_seen": 3218341888
},
{
"epoch": 0.74,
"learning_rate": 2.6603934987168522e-05,
"loss": 2.581,
"theoretical_loss": 3.3029884929807163,
"tokens_seen": 3218472960
},
{
"epoch": 0.74,
"learning_rate": 2.6561163387510697e-05,
"loss": 2.3666,
"theoretical_loss": 3.3029782979270936,
"tokens_seen": 3218604032
},
{
"epoch": 0.74,
"learning_rate": 2.6518391787852865e-05,
"loss": 2.3376,
"theoretical_loss": 3.302968103404882,
"tokens_seen": 3218735104
},
{
"epoch": 0.74,
"learning_rate": 2.647562018819504e-05,
"loss": 2.5427,
"theoretical_loss": 3.302957909414032,
"tokens_seen": 3218866176
},
{
"epoch": 0.74,
"learning_rate": 2.643284858853721e-05,
"loss": 2.5189,
"theoretical_loss": 3.3029477159544935,
"tokens_seen": 3218997248
},
{
"epoch": 0.74,
"learning_rate": 2.6390076988879386e-05,
"loss": 2.3899,
"theoretical_loss": 3.302937523026219,
"tokens_seen": 3219128320
},
{
"epoch": 0.74,
"learning_rate": 2.634730538922156e-05,
"loss": 2.3719,
"theoretical_loss": 3.3029273306291578,
"tokens_seen": 3219259392
},
{
"epoch": 0.74,
"learning_rate": 2.630453378956373e-05,
"loss": 2.4855,
"theoretical_loss": 3.302917138763261,
"tokens_seen": 3219390464
},
{
"epoch": 0.74,
"learning_rate": 2.6261762189905904e-05,
"loss": 2.4274,
"theoretical_loss": 3.3029069474284793,
"tokens_seen": 3219521536
},
{
"epoch": 0.74,
"learning_rate": 2.6218990590248076e-05,
"loss": 2.3962,
"theoretical_loss": 3.3028967566247633,
"tokens_seen": 3219652608
},
{
"epoch": 0.74,
"objective/train/docs_used": 1762646,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.652979850769043,
"objective/train/theoretical_loss": 3.302886566352064,
"objective/train/tokens_used": 249834976,
"theoretical_loss": 3.302886566352064,
"tokens_seen": 3219783680
},
{
"epoch": 0.74,
"learning_rate": 2.617621899059025e-05,
"loss": 2.407,
"theoretical_loss": 3.302886566352064,
"tokens_seen": 3219783680
},
{
"epoch": 0.74,
"learning_rate": 2.613344739093242e-05,
"loss": 2.5723,
"theoretical_loss": 3.3028763766103317,
"tokens_seen": 3219914752
},
{
"epoch": 0.74,
"learning_rate": 2.6090675791274594e-05,
"loss": 2.47,
"theoretical_loss": 3.3028661873995175,
"tokens_seen": 3220045824
},
{
"epoch": 0.74,
"learning_rate": 2.604790419161677e-05,
"loss": 2.5141,
"theoretical_loss": 3.302855998719572,
"tokens_seen": 3220176896
},
{
"epoch": 0.74,
"learning_rate": 2.6005132591958943e-05,
"loss": 2.5068,
"theoretical_loss": 3.3028458105704455,
"tokens_seen": 3220307968
},
{
"epoch": 0.74,
"learning_rate": 2.5962360992301115e-05,
"loss": 2.4223,
"theoretical_loss": 3.3028356229520894,
"tokens_seen": 3220439040
},
{
"epoch": 0.74,
"learning_rate": 2.5919589392643283e-05,
"loss": 2.4377,
"theoretical_loss": 3.302825435864454,
"tokens_seen": 3220570112
},
{
"epoch": 0.74,
"learning_rate": 2.5876817792985458e-05,
"loss": 2.5049,
"theoretical_loss": 3.3028152493074905,
"tokens_seen": 3220701184
},
{
"epoch": 0.74,
"learning_rate": 2.5834046193327633e-05,
"loss": 2.467,
"theoretical_loss": 3.3028050632811494,
"tokens_seen": 3220832256
},
{
"epoch": 0.74,
"learning_rate": 2.5791274593669808e-05,
"loss": 2.4527,
"theoretical_loss": 3.302794877785381,
"tokens_seen": 3220963328
},
{
"epoch": 0.75,
"learning_rate": 2.5748502994011976e-05,
"loss": 2.4822,
"theoretical_loss": 3.3027846928201368,
"tokens_seen": 3221094400
},
{
"epoch": 0.75,
"learning_rate": 2.5705731394354147e-05,
"loss": 2.5557,
"theoretical_loss": 3.3027745083853675,
"tokens_seen": 3221225472
},
{
"epoch": 0.75,
"learning_rate": 2.5662959794696322e-05,
"loss": 2.5684,
"theoretical_loss": 3.302764324481023,
"tokens_seen": 3221356544
},
{
"epoch": 0.75,
"objective/train/docs_used": 1763846,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.3604302406311035,
"objective/train/theoretical_loss": 3.3027592327277455,
"objective/train/tokens_used": 251473376,
"theoretical_loss": 3.3027592327277455,
"tokens_seen": 3221422080
},
{
"epoch": 0.75,
"learning_rate": 2.5620188195038497e-05,
"loss": 2.3491,
"theoretical_loss": 3.302754141107056,
"tokens_seen": 3221487616
},
{
"epoch": 0.75,
"learning_rate": 2.5577416595380672e-05,
"loss": 2.3532,
"theoretical_loss": 3.3027439582634153,
"tokens_seen": 3221618688
},
{
"epoch": 0.75,
"learning_rate": 2.553464499572284e-05,
"loss": 2.4439,
"theoretical_loss": 3.3027337759500526,
"tokens_seen": 3221749760
},
{
"epoch": 0.75,
"learning_rate": 2.5491873396065015e-05,
"loss": 2.425,
"theoretical_loss": 3.3027235941669186,
"tokens_seen": 3221880832
},
{
"epoch": 0.75,
"learning_rate": 2.5449101796407187e-05,
"loss": 2.2809,
"theoretical_loss": 3.302713412913964,
"tokens_seen": 3222011904
},
{
"epoch": 0.75,
"learning_rate": 2.540633019674936e-05,
"loss": 2.3808,
"theoretical_loss": 3.3027032321911403,
"tokens_seen": 3222142976
},
{
"epoch": 0.75,
"learning_rate": 2.536355859709153e-05,
"loss": 2.4512,
"theoretical_loss": 3.3026930519983972,
"tokens_seen": 3222274048
},
{
"epoch": 0.75,
"learning_rate": 2.5320786997433705e-05,
"loss": 2.3638,
"theoretical_loss": 3.3026828723356862,
"tokens_seen": 3222405120
},
{
"epoch": 0.75,
"learning_rate": 2.527801539777588e-05,
"loss": 2.4473,
"theoretical_loss": 3.3026726932029584,
"tokens_seen": 3222536192
},
{
"epoch": 0.75,
"learning_rate": 2.523524379811805e-05,
"loss": 2.4028,
"theoretical_loss": 3.302662514600164,
"tokens_seen": 3222667264
},
{
"epoch": 0.75,
"learning_rate": 2.5192472198460226e-05,
"loss": 2.4703,
"theoretical_loss": 3.302652336527254,
"tokens_seen": 3222798336
},
{
"epoch": 0.75,
"learning_rate": 2.5149700598802394e-05,
"loss": 2.3777,
"theoretical_loss": 3.30264215898418,
"tokens_seen": 3222929408
},
{
"epoch": 0.75,
"objective/train/docs_used": 1764511,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 1.997779369354248,
"objective/train/theoretical_loss": 3.302631981970892,
"objective/train/tokens_used": 253111776,
"theoretical_loss": 3.302631981970892,
"tokens_seen": 3223060480
},
{
"epoch": 0.75,
"learning_rate": 2.510692899914457e-05,
"loss": 2.569,
"theoretical_loss": 3.302631981970892,
"tokens_seen": 3223060480
},
{
"epoch": 0.75,
"learning_rate": 2.5064157399486744e-05,
"loss": 2.6162,
"theoretical_loss": 3.3026218054873415,
"tokens_seen": 3223191552
},
{
"epoch": 0.75,
"learning_rate": 2.5021385799828915e-05,
"loss": 2.4105,
"theoretical_loss": 3.302611629533479,
"tokens_seen": 3223322624
},
{
"epoch": 0.75,
"learning_rate": 2.4978614200171087e-05,
"loss": 2.5256,
"theoretical_loss": 3.302601454109255,
"tokens_seen": 3223453696
},
{
"epoch": 0.75,
"learning_rate": 2.493584260051326e-05,
"loss": 2.363,
"theoretical_loss": 3.3025912792146217,
"tokens_seen": 3223584768
},
{
"epoch": 0.75,
"learning_rate": 2.4893071000855433e-05,
"loss": 2.434,
"theoretical_loss": 3.302581104849529,
"tokens_seen": 3223715840
},
{
"epoch": 0.75,
"learning_rate": 2.4850299401197605e-05,
"loss": 2.4288,
"theoretical_loss": 3.3025709310139275,
"tokens_seen": 3223846912
},
{
"epoch": 0.75,
"learning_rate": 2.480752780153978e-05,
"loss": 2.4615,
"theoretical_loss": 3.302560757707769,
"tokens_seen": 3223977984
},
{
"epoch": 0.75,
"learning_rate": 2.476475620188195e-05,
"loss": 2.4484,
"theoretical_loss": 3.302550584931004,
"tokens_seen": 3224109056
},
{
"epoch": 0.76,
"learning_rate": 2.4721984602224123e-05,
"loss": 2.4344,
"theoretical_loss": 3.302540412683584,
"tokens_seen": 3224240128
},
{
"epoch": 0.76,
"learning_rate": 2.4679213002566298e-05,
"loss": 2.4386,
"theoretical_loss": 3.302530240965458,
"tokens_seen": 3224371200
},
{
"epoch": 0.76,
"learning_rate": 2.463644140290847e-05,
"loss": 2.4709,
"theoretical_loss": 3.3025200697765795,
"tokens_seen": 3224502272
},
{
"epoch": 0.76,
"learning_rate": 2.4593669803250644e-05,
"loss": 2.4457,
"theoretical_loss": 3.302509899116898,
"tokens_seen": 3224633344
},
{
"epoch": 0.76,
"objective/train/docs_used": 1765173,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.816251516342163,
"objective/train/theoretical_loss": 3.3025048139854913,
"objective/train/tokens_used": 254750176,
"theoretical_loss": 3.3025048139854913,
"tokens_seen": 3224698880
},
{
"epoch": 0.76,
"learning_rate": 2.4550898203592816e-05,
"loss": 2.4591,
"theoretical_loss": 3.302499728986365,
"tokens_seen": 3224764416
},
{
"epoch": 0.76,
"learning_rate": 2.4508126603934987e-05,
"loss": 2.4954,
"theoretical_loss": 3.302489559384931,
"tokens_seen": 3224895488
},
{
"epoch": 0.76,
"learning_rate": 2.446535500427716e-05,
"loss": 2.4195,
"theoretical_loss": 3.3024793903125476,
"tokens_seen": 3225026560
},
{
"epoch": 0.76,
"learning_rate": 2.4422583404619334e-05,
"loss": 2.531,
"theoretical_loss": 3.3024692217691647,
"tokens_seen": 3225157632
},
{
"epoch": 0.76,
"learning_rate": 2.4379811804961505e-05,
"loss": 2.5572,
"theoretical_loss": 3.3024590537547343,
"tokens_seen": 3225288704
},
{
"epoch": 0.76,
"learning_rate": 2.433704020530368e-05,
"loss": 2.528,
"theoretical_loss": 3.302448886269207,
"tokens_seen": 3225419776
},
{
"epoch": 0.76,
"learning_rate": 2.4294268605645855e-05,
"loss": 2.6383,
"theoretical_loss": 3.302438719312534,
"tokens_seen": 3225550848
},
{
"epoch": 0.76,
"learning_rate": 2.4251497005988023e-05,
"loss": 2.5238,
"theoretical_loss": 3.302428552884666,
"tokens_seen": 3225681920
},
{
"epoch": 0.76,
"learning_rate": 2.4208725406330198e-05,
"loss": 2.4368,
"theoretical_loss": 3.302418386985554,
"tokens_seen": 3225812992
},
{
"epoch": 0.76,
"learning_rate": 2.416595380667237e-05,
"loss": 2.4254,
"theoretical_loss": 3.3024082216151496,
"tokens_seen": 3225944064
},
{
"epoch": 0.76,
"learning_rate": 2.4123182207014544e-05,
"loss": 2.6633,
"theoretical_loss": 3.3023980567734035,
"tokens_seen": 3226075136
},
{
"epoch": 0.76,
"learning_rate": 2.4080410607356716e-05,
"loss": 2.2511,
"theoretical_loss": 3.302387892460266,
"tokens_seen": 3226206208
},
{
"epoch": 0.76,
"objective/train/docs_used": 1765556,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.658133029937744,
"objective/train/theoretical_loss": 3.3023777286756895,
"objective/train/tokens_used": 256388576,
"theoretical_loss": 3.3023777286756895,
"tokens_seen": 3226337280
},
{
"epoch": 0.76,
"learning_rate": 2.403763900769889e-05,
"loss": 2.5316,
"theoretical_loss": 3.3023777286756895,
"tokens_seen": 3226337280
},
{
"epoch": 0.76,
"learning_rate": 2.399486740804106e-05,
"loss": 2.4955,
"theoretical_loss": 3.302367565419624,
"tokens_seen": 3226468352
},
{
"epoch": 0.76,
"learning_rate": 2.3952095808383234e-05,
"loss": 2.6443,
"theoretical_loss": 3.302357402692021,
"tokens_seen": 3226599424
},
{
"epoch": 0.76,
"learning_rate": 2.390932420872541e-05,
"loss": 2.6128,
"theoretical_loss": 3.3023472404928307,
"tokens_seen": 3226730496
},
{
"epoch": 0.76,
"learning_rate": 2.386655260906758e-05,
"loss": 2.5977,
"theoretical_loss": 3.3023370788220054,
"tokens_seen": 3226861568
},
{
"epoch": 0.76,
"learning_rate": 2.3823781009409755e-05,
"loss": 2.5164,
"theoretical_loss": 3.302326917679496,
"tokens_seen": 3226992640
},
{
"epoch": 0.76,
"learning_rate": 2.3781009409751927e-05,
"loss": 2.5272,
"theoretical_loss": 3.3023167570652525,
"tokens_seen": 3227123712
},
{
"epoch": 0.77,
"learning_rate": 2.37382378100941e-05,
"loss": 2.5149,
"theoretical_loss": 3.302306596979227,
"tokens_seen": 3227254784
},
{
"epoch": 0.77,
"learning_rate": 2.369546621043627e-05,
"loss": 2.6254,
"theoretical_loss": 3.302296437421371,
"tokens_seen": 3227385856
},
{
"epoch": 0.77,
"learning_rate": 2.3652694610778445e-05,
"loss": 2.5184,
"theoretical_loss": 3.302286278391634,
"tokens_seen": 3227516928
},
{
"epoch": 0.77,
"learning_rate": 2.3609923011120616e-05,
"loss": 2.5916,
"theoretical_loss": 3.302276119889968,
"tokens_seen": 3227648000
},
{
"epoch": 0.77,
"learning_rate": 2.356715141146279e-05,
"loss": 2.6482,
"theoretical_loss": 3.3022659619163246,
"tokens_seen": 3227779072
},
{
"epoch": 0.77,
"learning_rate": 2.3524379811804963e-05,
"loss": 2.5005,
"theoretical_loss": 3.3022558044706543,
"tokens_seen": 3227910144
},
{
"epoch": 0.77,
"objective/train/docs_used": 1765973,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.4675238132476807,
"objective/train/theoretical_loss": 3.3022507259457936,
"objective/train/tokens_used": 258026976,
"theoretical_loss": 3.3022507259457936,
"tokens_seen": 3227975680
},
{
"epoch": 0.77,
"learning_rate": 2.3481608212147134e-05,
"loss": 2.477,
"theoretical_loss": 3.3022456475529083,
"tokens_seen": 3228041216
},
{
"epoch": 0.77,
"learning_rate": 2.343883661248931e-05,
"loss": 2.4768,
"theoretical_loss": 3.3022354911630374,
"tokens_seen": 3228172288
},
{
"epoch": 0.77,
"learning_rate": 2.339606501283148e-05,
"loss": 2.6043,
"theoretical_loss": 3.3022253353009936,
"tokens_seen": 3228303360
},
{
"epoch": 0.77,
"learning_rate": 2.3353293413173656e-05,
"loss": 2.5132,
"theoretical_loss": 3.302215179966727,
"tokens_seen": 3228434432
},
{
"epoch": 0.77,
"learning_rate": 2.3310521813515827e-05,
"loss": 2.4532,
"theoretical_loss": 3.3022050251601898,
"tokens_seen": 3228565504
},
{
"epoch": 0.77,
"learning_rate": 2.3267750213858e-05,
"loss": 2.4456,
"theoretical_loss": 3.3021948708813325,
"tokens_seen": 3228696576
},
{
"epoch": 0.77,
"learning_rate": 2.322497861420017e-05,
"loss": 2.5319,
"theoretical_loss": 3.302184717130106,
"tokens_seen": 3228827648
},
{
"epoch": 0.77,
"learning_rate": 2.3182207014542345e-05,
"loss": 2.5463,
"theoretical_loss": 3.3021745639064624,
"tokens_seen": 3228958720
},
{
"epoch": 0.77,
"learning_rate": 2.3139435414884517e-05,
"loss": 2.6091,
"theoretical_loss": 3.3021644112103523,
"tokens_seen": 3229089792
},
{
"epoch": 0.77,
"learning_rate": 2.309666381522669e-05,
"loss": 2.6879,
"theoretical_loss": 3.3021542590417265,
"tokens_seen": 3229220864
},
{
"epoch": 0.77,
"learning_rate": 2.3053892215568866e-05,
"loss": 2.5093,
"theoretical_loss": 3.302144107400537,
"tokens_seen": 3229351936
},
{
"epoch": 0.77,
"learning_rate": 2.3011120615911034e-05,
"loss": 2.3793,
"theoretical_loss": 3.3021339562867342,
"tokens_seen": 3229483008
},
{
"epoch": 0.77,
"objective/train/docs_used": 1767178,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.406644344329834,
"objective/train/theoretical_loss": 3.30212380570027,
"objective/train/tokens_used": 259665376,
"theoretical_loss": 3.30212380570027,
"tokens_seen": 3229614080
},
{
"epoch": 0.77,
"learning_rate": 2.296834901625321e-05,
"loss": 2.5267,
"theoretical_loss": 3.30212380570027,
"tokens_seen": 3229614080
},
{
"epoch": 0.77,
"learning_rate": 2.292557741659538e-05,
"loss": 2.4837,
"theoretical_loss": 3.3021136556410955,
"tokens_seen": 3229745152
},
{
"epoch": 0.77,
"learning_rate": 2.2882805816937556e-05,
"loss": 2.5635,
"theoretical_loss": 3.3021035061091615,
"tokens_seen": 3229876224
},
{
"epoch": 0.77,
"learning_rate": 2.2840034217279727e-05,
"loss": 2.6972,
"theoretical_loss": 3.302093357104419,
"tokens_seen": 3230007296
},
{
"epoch": 0.77,
"learning_rate": 2.2797262617621902e-05,
"loss": 2.5288,
"theoretical_loss": 3.3020832086268204,
"tokens_seen": 3230138368
},
{
"epoch": 0.77,
"learning_rate": 2.275449101796407e-05,
"loss": 2.4562,
"theoretical_loss": 3.3020730606763156,
"tokens_seen": 3230269440
},
{
"epoch": 0.78,
"learning_rate": 2.2711719418306245e-05,
"loss": 2.4811,
"theoretical_loss": 3.3020629132528567,
"tokens_seen": 3230400512
},
{
"epoch": 0.78,
"learning_rate": 2.2668947818648417e-05,
"loss": 2.5825,
"theoretical_loss": 3.3020527663563946,
"tokens_seen": 3230531584
},
{
"epoch": 0.78,
"learning_rate": 2.2626176218990592e-05,
"loss": 2.5378,
"theoretical_loss": 3.3020426199868806,
"tokens_seen": 3230662656
},
{
"epoch": 0.78,
"learning_rate": 2.2583404619332767e-05,
"loss": 2.5697,
"theoretical_loss": 3.3020324741442657,
"tokens_seen": 3230793728
},
{
"epoch": 0.78,
"learning_rate": 2.2540633019674938e-05,
"loss": 2.5101,
"theoretical_loss": 3.3020223288285018,
"tokens_seen": 3230924800
},
{
"epoch": 0.78,
"learning_rate": 2.249786142001711e-05,
"loss": 2.3868,
"theoretical_loss": 3.3020121840395396,
"tokens_seen": 3231055872
},
{
"epoch": 0.78,
"learning_rate": 2.245508982035928e-05,
"loss": 2.4703,
"theoretical_loss": 3.3020020397773306,
"tokens_seen": 3231186944
},
{
"epoch": 0.78,
"objective/train/docs_used": 1767743,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.440702438354492,
"objective/train/theoretical_loss": 3.3019969678437437,
"objective/train/tokens_used": 261303776,
"theoretical_loss": 3.3019969678437437,
"tokens_seen": 3231252480
},
{
"epoch": 0.78,
"learning_rate": 2.2412318220701456e-05,
"loss": 2.5682,
"theoretical_loss": 3.3019918960418266,
"tokens_seen": 3231318016
},
{
"epoch": 0.78,
"learning_rate": 2.2369546621043628e-05,
"loss": 2.6399,
"theoretical_loss": 3.301981752832978,
"tokens_seen": 3231449088
},
{
"epoch": 0.78,
"learning_rate": 2.2326775021385803e-05,
"loss": 2.6231,
"theoretical_loss": 3.301971610150736,
"tokens_seen": 3231580160
},
{
"epoch": 0.78,
"learning_rate": 2.2284003421727974e-05,
"loss": 2.5494,
"theoretical_loss": 3.301961467995053,
"tokens_seen": 3231711232
},
{
"epoch": 0.78,
"learning_rate": 2.2241231822070146e-05,
"loss": 2.5655,
"theoretical_loss": 3.301951326365879,
"tokens_seen": 3231842304
},
{
"epoch": 0.78,
"learning_rate": 2.219846022241232e-05,
"loss": 2.3956,
"theoretical_loss": 3.301941185263167,
"tokens_seen": 3231973376
},
{
"epoch": 0.78,
"learning_rate": 2.2155688622754492e-05,
"loss": 2.4201,
"theoretical_loss": 3.3019310446868664,
"tokens_seen": 3232104448
},
{
"epoch": 0.78,
"learning_rate": 2.2112917023096667e-05,
"loss": 2.4982,
"theoretical_loss": 3.3019209046369298,
"tokens_seen": 3232235520
},
{
"epoch": 0.78,
"learning_rate": 2.207014542343884e-05,
"loss": 2.563,
"theoretical_loss": 3.301910765113308,
"tokens_seen": 3232366592
},
{
"epoch": 0.78,
"learning_rate": 2.202737382378101e-05,
"loss": 2.4654,
"theoretical_loss": 3.3019006261159527,
"tokens_seen": 3232497664
},
{
"epoch": 0.78,
"learning_rate": 2.198460222412318e-05,
"loss": 2.4671,
"theoretical_loss": 3.301890487644815,
"tokens_seen": 3232628736
},
{
"epoch": 0.78,
"learning_rate": 2.1941830624465356e-05,
"loss": 2.4903,
"theoretical_loss": 3.301880349699846,
"tokens_seen": 3232759808
},
{
"epoch": 0.78,
"objective/train/docs_used": 1769062,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.198112964630127,
"objective/train/theoretical_loss": 3.3018702122809978,
"objective/train/tokens_used": 262942176,
"theoretical_loss": 3.3018702122809978,
"tokens_seen": 3232890880
},
{
"epoch": 0.78,
"learning_rate": 2.1899059024807528e-05,
"loss": 2.4369,
"theoretical_loss": 3.3018702122809978,
"tokens_seen": 3232890880
},
{
"epoch": 0.78,
"learning_rate": 2.1856287425149703e-05,
"loss": 2.4286,
"theoretical_loss": 3.3018600753882215,
"tokens_seen": 3233021952
},
{
"epoch": 0.78,
"learning_rate": 2.1813515825491874e-05,
"loss": 2.4311,
"theoretical_loss": 3.3018499390214675,
"tokens_seen": 3233153024
},
{
"epoch": 0.78,
"learning_rate": 2.1770744225834046e-05,
"loss": 2.4278,
"theoretical_loss": 3.3018398031806884,
"tokens_seen": 3233284096
},
{
"epoch": 0.78,
"learning_rate": 2.172797262617622e-05,
"loss": 2.3558,
"theoretical_loss": 3.3018296678658356,
"tokens_seen": 3233415168
},
{
"epoch": 0.79,
"learning_rate": 2.1685201026518392e-05,
"loss": 2.5817,
"theoretical_loss": 3.3018195330768596,
"tokens_seen": 3233546240
},
{
"epoch": 0.79,
"learning_rate": 2.1642429426860567e-05,
"loss": 2.5169,
"theoretical_loss": 3.3018093988137123,
"tokens_seen": 3233677312
},
{
"epoch": 0.79,
"learning_rate": 2.159965782720274e-05,
"loss": 2.5621,
"theoretical_loss": 3.3017992650763452,
"tokens_seen": 3233808384
},
{
"epoch": 0.79,
"learning_rate": 2.155688622754491e-05,
"loss": 2.6427,
"theoretical_loss": 3.3017891318647097,
"tokens_seen": 3233939456
},
{
"epoch": 0.79,
"learning_rate": 2.1514114627887082e-05,
"loss": 2.6727,
"theoretical_loss": 3.301778999178757,
"tokens_seen": 3234070528
},
{
"epoch": 0.79,
"learning_rate": 2.1471343028229257e-05,
"loss": 2.593,
"theoretical_loss": 3.3017688670184384,
"tokens_seen": 3234201600
},
{
"epoch": 0.79,
"learning_rate": 2.1428571428571428e-05,
"loss": 2.4446,
"theoretical_loss": 3.301758735383706,
"tokens_seen": 3234332672
},
{
"epoch": 0.79,
"learning_rate": 2.1385799828913603e-05,
"loss": 2.5108,
"theoretical_loss": 3.301748604274511,
"tokens_seen": 3234463744
},
{
"epoch": 0.79,
"objective/train/docs_used": 1769583,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.804602861404419,
"objective/train/theoretical_loss": 3.3017435389169743,
"objective/train/tokens_used": 264580576,
"theoretical_loss": 3.3017435389169743,
"tokens_seen": 3234529280
},
{
"epoch": 0.79,
"learning_rate": 2.1343028229255778e-05,
"loss": 2.4721,
"theoretical_loss": 3.3017384736908038,
"tokens_seen": 3234594816
},
{
"epoch": 0.79,
"learning_rate": 2.1300256629597946e-05,
"loss": 2.4311,
"theoretical_loss": 3.3017283436325373,
"tokens_seen": 3234725888
},
{
"epoch": 0.79,
"learning_rate": 2.125748502994012e-05,
"loss": 2.3786,
"theoretical_loss": 3.301718214099662,
"tokens_seen": 3234856960
},
{
"epoch": 0.79,
"learning_rate": 2.1214713430282293e-05,
"loss": 2.4871,
"theoretical_loss": 3.3017080850921303,
"tokens_seen": 3234988032
},
{
"epoch": 0.79,
"learning_rate": 2.1171941830624467e-05,
"loss": 2.3986,
"theoretical_loss": 3.3016979566098925,
"tokens_seen": 3235119104
},
{
"epoch": 0.79,
"learning_rate": 2.112917023096664e-05,
"loss": 2.6495,
"theoretical_loss": 3.301687828652901,
"tokens_seen": 3235250176
},
{
"epoch": 0.79,
"learning_rate": 2.1086398631308814e-05,
"loss": 2.5269,
"theoretical_loss": 3.3016777012211067,
"tokens_seen": 3235381248
},
{
"epoch": 0.79,
"learning_rate": 2.1043627031650985e-05,
"loss": 2.4523,
"theoretical_loss": 3.301667574314462,
"tokens_seen": 3235512320
},
{
"epoch": 0.79,
"learning_rate": 2.1000855431993157e-05,
"loss": 2.5899,
"theoretical_loss": 3.301657447932917,
"tokens_seen": 3235643392
},
{
"epoch": 0.79,
"learning_rate": 2.095808383233533e-05,
"loss": 2.657,
"theoretical_loss": 3.301647322076424,
"tokens_seen": 3235774464
},
{
"epoch": 0.79,
"learning_rate": 2.0915312232677503e-05,
"loss": 2.618,
"theoretical_loss": 3.3016371967449345,
"tokens_seen": 3235905536
},
{
"epoch": 0.79,
"learning_rate": 2.0872540633019678e-05,
"loss": 2.4609,
"theoretical_loss": 3.3016270719384004,
"tokens_seen": 3236036608
},
{
"epoch": 0.79,
"objective/train/docs_used": 1770972,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.1038246154785156,
"objective/train/theoretical_loss": 3.3016169476567727,
"objective/train/tokens_used": 266218976,
"theoretical_loss": 3.3016169476567727,
"tokens_seen": 3236167680
},
{
"epoch": 0.79,
"learning_rate": 2.082976903336185e-05,
"loss": 2.4857,
"theoretical_loss": 3.3016169476567727,
"tokens_seen": 3236167680
},
{
"epoch": 0.79,
"learning_rate": 2.078699743370402e-05,
"loss": 2.528,
"theoretical_loss": 3.301606823900003,
"tokens_seen": 3236298752
},
{
"epoch": 0.79,
"learning_rate": 2.0744225834046193e-05,
"loss": 2.4886,
"theoretical_loss": 3.3015967006680422,
"tokens_seen": 3236429824
},
{
"epoch": 0.8,
"learning_rate": 2.0701454234388368e-05,
"loss": 2.3298,
"theoretical_loss": 3.301586577960843,
"tokens_seen": 3236560896
},
{
"epoch": 0.8,
"learning_rate": 2.065868263473054e-05,
"loss": 2.4334,
"theoretical_loss": 3.3015764557783567,
"tokens_seen": 3236691968
},
{
"epoch": 0.8,
"learning_rate": 2.0615911035072714e-05,
"loss": 2.5871,
"theoretical_loss": 3.301566334120534,
"tokens_seen": 3236823040
},
{
"epoch": 0.8,
"learning_rate": 2.0573139435414886e-05,
"loss": 2.6669,
"theoretical_loss": 3.301556212987328,
"tokens_seen": 3236954112
},
{
"epoch": 0.8,
"learning_rate": 2.0530367835757057e-05,
"loss": 2.5303,
"theoretical_loss": 3.3015460923786883,
"tokens_seen": 3237085184
},
{
"epoch": 0.8,
"learning_rate": 2.0487596236099232e-05,
"loss": 2.5037,
"theoretical_loss": 3.301535972294568,
"tokens_seen": 3237216256
},
{
"epoch": 0.8,
"learning_rate": 2.0444824636441404e-05,
"loss": 2.4185,
"theoretical_loss": 3.301525852734918,
"tokens_seen": 3237347328
},
{
"epoch": 0.8,
"learning_rate": 2.040205303678358e-05,
"loss": 2.5031,
"theoretical_loss": 3.3015157336996905,
"tokens_seen": 3237478400
},
{
"epoch": 0.8,
"learning_rate": 2.035928143712575e-05,
"loss": 2.5396,
"theoretical_loss": 3.3015056151888365,
"tokens_seen": 3237609472
},
{
"epoch": 0.8,
"learning_rate": 2.031650983746792e-05,
"loss": 2.5218,
"theoretical_loss": 3.3014954972023074,
"tokens_seen": 3237740544
},
{
"epoch": 0.8,
"objective/train/docs_used": 1771594,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.3041329383850098,
"objective/train/theoretical_loss": 3.30149043840565,
"objective/train/tokens_used": 267857376,
"theoretical_loss": 3.30149043840565,
"tokens_seen": 3237806080
},
{
"epoch": 0.8,
"learning_rate": 2.0273738237810093e-05,
"loss": 2.5805,
"theoretical_loss": 3.3014853797400554,
"tokens_seen": 3237871616
},
{
"epoch": 0.8,
"learning_rate": 2.0230966638152268e-05,
"loss": 2.5418,
"theoretical_loss": 3.301475262802032,
"tokens_seen": 3238002688
},
{
"epoch": 0.8,
"learning_rate": 2.018819503849444e-05,
"loss": 2.5376,
"theoretical_loss": 3.3014651463881886,
"tokens_seen": 3238133760
},
{
"epoch": 0.8,
"learning_rate": 2.0145423438836614e-05,
"loss": 2.4949,
"theoretical_loss": 3.301455030498477,
"tokens_seen": 3238264832
},
{
"epoch": 0.8,
"learning_rate": 2.0102651839178786e-05,
"loss": 2.4963,
"theoretical_loss": 3.3014449151328487,
"tokens_seen": 3238395904
},
{
"epoch": 0.8,
"learning_rate": 2.0059880239520957e-05,
"loss": 2.4281,
"theoretical_loss": 3.301434800291255,
"tokens_seen": 3238526976
},
{
"epoch": 0.8,
"learning_rate": 2.0017108639863132e-05,
"loss": 2.451,
"theoretical_loss": 3.3014246859736485,
"tokens_seen": 3238658048
},
{
"epoch": 0.8,
"learning_rate": 1.9974337040205304e-05,
"loss": 2.5048,
"theoretical_loss": 3.30141457217998,
"tokens_seen": 3238789120
},
{
"epoch": 0.8,
"learning_rate": 1.993156544054748e-05,
"loss": 2.4166,
"theoretical_loss": 3.3014044589102016,
"tokens_seen": 3238920192
},
{
"epoch": 0.8,
"learning_rate": 1.988879384088965e-05,
"loss": 2.4944,
"theoretical_loss": 3.3013943461642645,
"tokens_seen": 3239051264
},
{
"epoch": 0.8,
"learning_rate": 1.9846022241231825e-05,
"loss": 2.5976,
"theoretical_loss": 3.3013842339421213,
"tokens_seen": 3239182336
},
{
"epoch": 0.8,
"learning_rate": 1.9803250641573993e-05,
"loss": 2.5097,
"theoretical_loss": 3.3013741222437227,
"tokens_seen": 3239313408
},
{
"epoch": 0.8,
"objective/train/docs_used": 1772760,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.3606889247894287,
"objective/train/theoretical_loss": 3.3013640110690208,
"objective/train/tokens_used": 269495776,
"theoretical_loss": 3.3013640110690208,
"tokens_seen": 3239444480
},
{
"epoch": 0.8,
"learning_rate": 1.9760479041916168e-05,
"loss": 2.4156,
"theoretical_loss": 3.3013640110690208,
"tokens_seen": 3239444480
},
{
"epoch": 0.8,
"learning_rate": 1.971770744225834e-05,
"loss": 2.3081,
"theoretical_loss": 3.301353900417967,
"tokens_seen": 3239575552
},
{
"epoch": 0.81,
"learning_rate": 1.9674935842600515e-05,
"loss": 2.5215,
"theoretical_loss": 3.301343790290513,
"tokens_seen": 3239706624
},
{
"epoch": 0.81,
"learning_rate": 1.963216424294269e-05,
"loss": 2.6333,
"theoretical_loss": 3.3013336806866116,
"tokens_seen": 3239837696
},
{
"epoch": 0.81,
"learning_rate": 1.958939264328486e-05,
"loss": 2.6026,
"theoretical_loss": 3.301323571606213,
"tokens_seen": 3239968768
},
{
"epoch": 0.81,
"learning_rate": 1.9546621043627033e-05,
"loss": 2.5328,
"theoretical_loss": 3.3013134630492695,
"tokens_seen": 3240099840
},
{
"epoch": 0.81,
"learning_rate": 1.9503849443969204e-05,
"loss": 2.5511,
"theoretical_loss": 3.301303355015733,
"tokens_seen": 3240230912
},
{
"epoch": 0.81,
"learning_rate": 1.946107784431138e-05,
"loss": 2.5345,
"theoretical_loss": 3.3012932475055554,
"tokens_seen": 3240361984
},
{
"epoch": 0.81,
"learning_rate": 1.941830624465355e-05,
"loss": 2.489,
"theoretical_loss": 3.301283140518688,
"tokens_seen": 3240493056
},
{
"epoch": 0.81,
"learning_rate": 1.9375534644995725e-05,
"loss": 2.5627,
"theoretical_loss": 3.3012730340550824,
"tokens_seen": 3240624128
},
{
"epoch": 0.81,
"learning_rate": 1.9332763045337897e-05,
"loss": 2.5931,
"theoretical_loss": 3.3012629281146912,
"tokens_seen": 3240755200
},
{
"epoch": 0.81,
"learning_rate": 1.928999144568007e-05,
"loss": 2.4621,
"theoretical_loss": 3.301252822697465,
"tokens_seen": 3240886272
},
{
"epoch": 0.81,
"learning_rate": 1.924721984602224e-05,
"loss": 2.5237,
"theoretical_loss": 3.3012427178033565,
"tokens_seen": 3241017344
},
{
"epoch": 0.81,
"objective/train/docs_used": 1773780,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.4805448055267334,
"objective/train/theoretical_loss": 3.301237665552456,
"objective/train/tokens_used": 271134176,
"theoretical_loss": 3.301237665552456,
"tokens_seen": 3241082880
},
{
"epoch": 0.81,
"learning_rate": 1.9204448246364415e-05,
"loss": 2.4985,
"theoretical_loss": 3.3012326134323167,
"tokens_seen": 3241148416
},
{
"epoch": 0.81,
"learning_rate": 1.916167664670659e-05,
"loss": 2.4819,
"theoretical_loss": 3.301222509584298,
"tokens_seen": 3241279488
},
{
"epoch": 0.81,
"learning_rate": 1.911890504704876e-05,
"loss": 2.3771,
"theoretical_loss": 3.301212406259252,
"tokens_seen": 3241410560
},
{
"epoch": 0.81,
"learning_rate": 1.9076133447390933e-05,
"loss": 2.5396,
"theoretical_loss": 3.3012023034571305,
"tokens_seen": 3241541632
},
{
"epoch": 0.81,
"learning_rate": 1.9033361847733104e-05,
"loss": 2.5725,
"theoretical_loss": 3.3011922011778854,
"tokens_seen": 3241672704
},
{
"epoch": 0.81,
"learning_rate": 1.899059024807528e-05,
"loss": 2.4494,
"theoretical_loss": 3.301182099421468,
"tokens_seen": 3241803776
},
{
"epoch": 0.81,
"learning_rate": 1.894781864841745e-05,
"loss": 2.6522,
"theoretical_loss": 3.3011719981878302,
"tokens_seen": 3241934848
},
{
"epoch": 0.81,
"learning_rate": 1.8905047048759626e-05,
"loss": 2.5201,
"theoretical_loss": 3.3011618974769243,
"tokens_seen": 3242065920
},
{
"epoch": 0.81,
"learning_rate": 1.8862275449101797e-05,
"loss": 2.4335,
"theoretical_loss": 3.301151797288702,
"tokens_seen": 3242196992
},
{
"epoch": 0.81,
"learning_rate": 1.881950384944397e-05,
"loss": 2.7051,
"theoretical_loss": 3.301141697623115,
"tokens_seen": 3242328064
},
{
"epoch": 0.81,
"learning_rate": 1.8776732249786144e-05,
"loss": 2.4368,
"theoretical_loss": 3.301131598480115,
"tokens_seen": 3242459136
},
{
"epoch": 0.81,
"learning_rate": 1.8733960650128315e-05,
"loss": 2.485,
"theoretical_loss": 3.301121499859654,
"tokens_seen": 3242590208
},
{
"epoch": 0.81,
"objective/train/docs_used": 1774468,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.227705717086792,
"objective/train/theoretical_loss": 3.3011114017616836,
"objective/train/tokens_used": 272772576,
"theoretical_loss": 3.3011114017616836,
"tokens_seen": 3242721280
},
{
"epoch": 0.81,
"learning_rate": 1.869118905047049e-05,
"loss": 2.5532,
"theoretical_loss": 3.3011114017616836,
"tokens_seen": 3242721280
},
{
"epoch": 0.82,
"learning_rate": 1.864841745081266e-05,
"loss": 2.4939,
"theoretical_loss": 3.301101304186156,
"tokens_seen": 3242852352
},
{
"epoch": 0.82,
"learning_rate": 1.8605645851154833e-05,
"loss": 2.4718,
"theoretical_loss": 3.3010912071330227,
"tokens_seen": 3242983424
},
{
"epoch": 0.82,
"learning_rate": 1.8562874251497005e-05,
"loss": 2.3537,
"theoretical_loss": 3.301081110602236,
"tokens_seen": 3243114496
},
{
"epoch": 0.82,
"learning_rate": 1.852010265183918e-05,
"loss": 2.5119,
"theoretical_loss": 3.301071014593748,
"tokens_seen": 3243245568
},
{
"epoch": 0.82,
"learning_rate": 1.847733105218135e-05,
"loss": 2.4318,
"theoretical_loss": 3.3010609191075093,
"tokens_seen": 3243376640
},
{
"epoch": 0.82,
"learning_rate": 1.8434559452523526e-05,
"loss": 2.3919,
"theoretical_loss": 3.3010508241434726,
"tokens_seen": 3243507712
},
{
"epoch": 0.82,
"learning_rate": 1.8391787852865698e-05,
"loss": 2.4154,
"theoretical_loss": 3.3010407297015902,
"tokens_seen": 3243638784
},
{
"epoch": 0.82,
"learning_rate": 1.8349016253207872e-05,
"loss": 2.5434,
"theoretical_loss": 3.3010306357818138,
"tokens_seen": 3243769856
},
{
"epoch": 0.82,
"learning_rate": 1.8306244653550044e-05,
"loss": 2.5467,
"theoretical_loss": 3.3010205423840944,
"tokens_seen": 3243900928
},
{
"epoch": 0.82,
"learning_rate": 1.8263473053892215e-05,
"loss": 2.5148,
"theoretical_loss": 3.301010449508385,
"tokens_seen": 3244032000
},
{
"epoch": 0.82,
"learning_rate": 1.822070145423439e-05,
"loss": 2.4515,
"theoretical_loss": 3.3010003571546367,
"tokens_seen": 3244163072
},
{
"epoch": 0.82,
"learning_rate": 1.8177929854576562e-05,
"loss": 2.6058,
"theoretical_loss": 3.3009902653228025,
"tokens_seen": 3244294144
},
{
"epoch": 0.82,
"objective/train/docs_used": 1775745,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.896294593811035,
"objective/train/theoretical_loss": 3.3009852196025875,
"objective/train/tokens_used": 274410976,
"theoretical_loss": 3.3009852196025875,
"tokens_seen": 3244359680
},
{
"epoch": 0.82,
"learning_rate": 1.8135158254918737e-05,
"loss": 2.4755,
"theoretical_loss": 3.300980174012833,
"tokens_seen": 3244425216
},
{
"epoch": 0.82,
"learning_rate": 1.809238665526091e-05,
"loss": 2.4185,
"theoretical_loss": 3.300970083224681,
"tokens_seen": 3244556288
},
{
"epoch": 0.82,
"learning_rate": 1.804961505560308e-05,
"loss": 2.5255,
"theoretical_loss": 3.300959992958298,
"tokens_seen": 3244687360
},
{
"epoch": 0.82,
"learning_rate": 1.800684345594525e-05,
"loss": 2.5014,
"theoretical_loss": 3.3009499032136365,
"tokens_seen": 3244818432
},
{
"epoch": 0.82,
"learning_rate": 1.7964071856287426e-05,
"loss": 2.569,
"theoretical_loss": 3.300939813990648,
"tokens_seen": 3244949504
},
{
"epoch": 0.82,
"learning_rate": 1.79213002566296e-05,
"loss": 2.419,
"theoretical_loss": 3.300929725289284,
"tokens_seen": 3245080576
},
{
"epoch": 0.82,
"learning_rate": 1.7878528656971773e-05,
"loss": 2.601,
"theoretical_loss": 3.300919637109498,
"tokens_seen": 3245211648
},
{
"epoch": 0.82,
"learning_rate": 1.7835757057313944e-05,
"loss": 2.3526,
"theoretical_loss": 3.3009095494512404,
"tokens_seen": 3245342720
},
{
"epoch": 0.82,
"learning_rate": 1.7792985457656116e-05,
"loss": 2.5026,
"theoretical_loss": 3.300899462314464,
"tokens_seen": 3245473792
},
{
"epoch": 0.82,
"learning_rate": 1.775021385799829e-05,
"loss": 2.5506,
"theoretical_loss": 3.3008893756991204,
"tokens_seen": 3245604864
},
{
"epoch": 0.82,
"learning_rate": 1.7707442258340462e-05,
"loss": 2.4427,
"theoretical_loss": 3.3008792896051617,
"tokens_seen": 3245735936
},
{
"epoch": 0.83,
"learning_rate": 1.7664670658682637e-05,
"loss": 2.6281,
"theoretical_loss": 3.30086920403254,
"tokens_seen": 3245867008
},
{
"epoch": 0.83,
"objective/train/docs_used": 1776229,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.5814051628112793,
"objective/train/theoretical_loss": 3.300859118981207,
"objective/train/tokens_used": 276049376,
"theoretical_loss": 3.300859118981207,
"tokens_seen": 3245998080
},
{
"epoch": 0.83,
"learning_rate": 1.762189905902481e-05,
"loss": 2.3169,
"theoretical_loss": 3.300859118981207,
"tokens_seen": 3245998080
},
{
"epoch": 0.83,
"learning_rate": 1.757912745936698e-05,
"loss": 2.586,
"theoretical_loss": 3.300849034451115,
"tokens_seen": 3246129152
},
{
"epoch": 0.83,
"learning_rate": 1.753635585970915e-05,
"loss": 2.5397,
"theoretical_loss": 3.3008389504422166,
"tokens_seen": 3246260224
},
{
"epoch": 0.83,
"learning_rate": 1.7493584260051327e-05,
"loss": 2.4997,
"theoretical_loss": 3.300828866954463,
"tokens_seen": 3246391296
},
{
"epoch": 0.83,
"learning_rate": 1.74508126603935e-05,
"loss": 2.4944,
"theoretical_loss": 3.300818783987806,
"tokens_seen": 3246522368
},
{
"epoch": 0.83,
"learning_rate": 1.7408041060735673e-05,
"loss": 2.4759,
"theoretical_loss": 3.300808701542198,
"tokens_seen": 3246653440
},
{
"epoch": 0.83,
"learning_rate": 1.7365269461077845e-05,
"loss": 2.4671,
"theoretical_loss": 3.3007986196175914,
"tokens_seen": 3246784512
},
{
"epoch": 0.83,
"learning_rate": 1.7322497861420016e-05,
"loss": 2.5327,
"theoretical_loss": 3.300788538213938,
"tokens_seen": 3246915584
},
{
"epoch": 0.83,
"learning_rate": 1.727972626176219e-05,
"loss": 2.5069,
"theoretical_loss": 3.3007784573311896,
"tokens_seen": 3247046656
},
{
"epoch": 0.83,
"learning_rate": 1.7236954662104362e-05,
"loss": 2.4564,
"theoretical_loss": 3.3007683769692986,
"tokens_seen": 3247177728
},
{
"epoch": 0.83,
"learning_rate": 1.7194183062446537e-05,
"loss": 2.4556,
"theoretical_loss": 3.3007582971282163,
"tokens_seen": 3247308800
},
{
"epoch": 0.83,
"learning_rate": 1.715141146278871e-05,
"loss": 2.3536,
"theoretical_loss": 3.300748217807896,
"tokens_seen": 3247439872
},
{
"epoch": 0.83,
"learning_rate": 1.710863986313088e-05,
"loss": 2.4368,
"theoretical_loss": 3.3007381390082893,
"tokens_seen": 3247570944
},
{
"epoch": 0.83,
"objective/train/docs_used": 1777442,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.5164363384246826,
"objective/train/theoretical_loss": 3.3007330998037383,
"objective/train/tokens_used": 277687776,
"theoretical_loss": 3.3007330998037383,
"tokens_seen": 3247636480
},
{
"epoch": 0.83,
"learning_rate": 1.7065868263473055e-05,
"loss": 2.6578,
"theoretical_loss": 3.300728060729348,
"tokens_seen": 3247702016
},
{
"epoch": 0.83,
"learning_rate": 1.7023096663815227e-05,
"loss": 2.4565,
"theoretical_loss": 3.3007179829710243,
"tokens_seen": 3247833088
},
{
"epoch": 0.83,
"learning_rate": 1.6980325064157402e-05,
"loss": 2.5889,
"theoretical_loss": 3.3007079057332698,
"tokens_seen": 3247964160
},
{
"epoch": 0.83,
"learning_rate": 1.6937553464499573e-05,
"loss": 2.4938,
"theoretical_loss": 3.3006978290160376,
"tokens_seen": 3248095232
},
{
"epoch": 0.83,
"learning_rate": 1.6894781864841748e-05,
"loss": 2.4327,
"theoretical_loss": 3.3006877528192797,
"tokens_seen": 3248226304
},
{
"epoch": 0.83,
"learning_rate": 1.6852010265183916e-05,
"loss": 2.3783,
"theoretical_loss": 3.3006776771429474,
"tokens_seen": 3248357376
},
{
"epoch": 0.83,
"learning_rate": 1.680923866552609e-05,
"loss": 2.4708,
"theoretical_loss": 3.3006676019869934,
"tokens_seen": 3248488448
},
{
"epoch": 0.83,
"learning_rate": 1.6766467065868263e-05,
"loss": 2.5233,
"theoretical_loss": 3.30065752735137,
"tokens_seen": 3248619520
},
{
"epoch": 0.83,
"learning_rate": 1.6723695466210438e-05,
"loss": 2.5416,
"theoretical_loss": 3.3006474532360284,
"tokens_seen": 3248750592
},
{
"epoch": 0.83,
"learning_rate": 1.668092386655261e-05,
"loss": 2.4243,
"theoretical_loss": 3.300637379640922,
"tokens_seen": 3248881664
},
{
"epoch": 0.84,
"learning_rate": 1.6638152266894784e-05,
"loss": 2.5114,
"theoretical_loss": 3.300627306566002,
"tokens_seen": 3249012736
},
{
"epoch": 0.84,
"learning_rate": 1.6595380667236956e-05,
"loss": 2.6126,
"theoretical_loss": 3.300617234011221,
"tokens_seen": 3249143808
},
{
"epoch": 0.84,
"objective/train/docs_used": 1777962,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.4109323024749756,
"objective/train/theoretical_loss": 3.300607161976531,
"objective/train/tokens_used": 279326176,
"theoretical_loss": 3.300607161976531,
"tokens_seen": 3249274880
},
{
"epoch": 0.84,
"learning_rate": 1.6552609067579127e-05,
"loss": 2.4634,
"theoretical_loss": 3.300607161976531,
"tokens_seen": 3249274880
},
{
"epoch": 0.84,
"learning_rate": 1.6509837467921302e-05,
"loss": 2.5956,
"theoretical_loss": 3.3005970904618844,
"tokens_seen": 3249405952
},
{
"epoch": 0.84,
"learning_rate": 1.6467065868263474e-05,
"loss": 2.5098,
"theoretical_loss": 3.300587019467233,
"tokens_seen": 3249537024
},
{
"epoch": 0.84,
"learning_rate": 1.642429426860565e-05,
"loss": 2.4901,
"theoretical_loss": 3.3005769489925294,
"tokens_seen": 3249668096
},
{
"epoch": 0.84,
"learning_rate": 1.638152266894782e-05,
"loss": 2.4405,
"theoretical_loss": 3.300566879037725,
"tokens_seen": 3249799168
},
{
"epoch": 0.84,
"learning_rate": 1.633875106928999e-05,
"loss": 2.5138,
"theoretical_loss": 3.3005568096027735,
"tokens_seen": 3249930240
},
{
"epoch": 0.84,
"learning_rate": 1.6295979469632163e-05,
"loss": 2.4746,
"theoretical_loss": 3.3005467406876257,
"tokens_seen": 3250061312
},
{
"epoch": 0.84,
"learning_rate": 1.6253207869974338e-05,
"loss": 2.5813,
"theoretical_loss": 3.300536672292234,
"tokens_seen": 3250192384
},
{
"epoch": 0.84,
"learning_rate": 1.6210436270316513e-05,
"loss": 2.5663,
"theoretical_loss": 3.300526604416551,
"tokens_seen": 3250323456
},
{
"epoch": 0.84,
"learning_rate": 1.6167664670658684e-05,
"loss": 2.4411,
"theoretical_loss": 3.300516537060529,
"tokens_seen": 3250454528
},
{
"epoch": 0.84,
"learning_rate": 1.6124893071000856e-05,
"loss": 2.4614,
"theoretical_loss": 3.3005064702241196,
"tokens_seen": 3250585600
},
{
"epoch": 0.84,
"learning_rate": 1.6082121471343027e-05,
"loss": 2.2742,
"theoretical_loss": 3.300496403907276,
"tokens_seen": 3250716672
},
{
"epoch": 0.84,
"learning_rate": 1.6039349871685202e-05,
"loss": 2.5173,
"theoretical_loss": 3.3004863381099496,
"tokens_seen": 3250847744
},
{
"epoch": 0.84,
"objective/train/docs_used": 1779321,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.8377304077148438,
"objective/train/theoretical_loss": 3.3004813054060906,
"objective/train/tokens_used": 280964576,
"theoretical_loss": 3.3004813054060906,
"tokens_seen": 3250913280
},
{
"epoch": 0.84,
"learning_rate": 1.5996578272027374e-05,
"loss": 2.6478,
"theoretical_loss": 3.3004762728320927,
"tokens_seen": 3250978816
},
{
"epoch": 0.84,
"learning_rate": 1.595380667236955e-05,
"loss": 2.5887,
"theoretical_loss": 3.300466208073658,
"tokens_seen": 3251109888
},
{
"epoch": 0.84,
"learning_rate": 1.591103507271172e-05,
"loss": 2.5804,
"theoretical_loss": 3.300456143834598,
"tokens_seen": 3251240960
},
{
"epoch": 0.84,
"learning_rate": 1.5868263473053892e-05,
"loss": 2.3951,
"theoretical_loss": 3.300446080114864,
"tokens_seen": 3251372032
},
{
"epoch": 0.84,
"learning_rate": 1.5825491873396063e-05,
"loss": 2.3772,
"theoretical_loss": 3.300436016914409,
"tokens_seen": 3251503104
},
{
"epoch": 0.84,
"learning_rate": 1.5782720273738238e-05,
"loss": 2.5086,
"theoretical_loss": 3.3004259542331846,
"tokens_seen": 3251634176
},
{
"epoch": 0.84,
"learning_rate": 1.5739948674080413e-05,
"loss": 2.5292,
"theoretical_loss": 3.3004158920711437,
"tokens_seen": 3251765248
},
{
"epoch": 0.84,
"learning_rate": 1.5697177074422585e-05,
"loss": 2.5086,
"theoretical_loss": 3.3004058304282387,
"tokens_seen": 3251896320
},
{
"epoch": 0.85,
"learning_rate": 1.5654405474764756e-05,
"loss": 2.5897,
"theoretical_loss": 3.300395769304421,
"tokens_seen": 3252027392
},
{
"epoch": 0.85,
"learning_rate": 1.5611633875106928e-05,
"loss": 2.4933,
"theoretical_loss": 3.300385708699644,
"tokens_seen": 3252158464
},
{
"epoch": 0.85,
"learning_rate": 1.5568862275449103e-05,
"loss": 2.4314,
"theoretical_loss": 3.3003756486138593,
"tokens_seen": 3252289536
},
{
"epoch": 0.85,
"learning_rate": 1.5526090675791274e-05,
"loss": 2.358,
"theoretical_loss": 3.3003655890470194,
"tokens_seen": 3252420608
},
{
"epoch": 0.85,
"objective/train/docs_used": 1779836,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.32458758354187,
"objective/train/theoretical_loss": 3.3003555299990768,
"objective/train/tokens_used": 282602976,
"theoretical_loss": 3.3003555299990768,
"tokens_seen": 3252551680
},
{
"epoch": 0.85,
"learning_rate": 1.548331907613345e-05,
"loss": 2.4358,
"theoretical_loss": 3.3003555299990768,
"tokens_seen": 3252551680
},
{
"epoch": 0.85,
"learning_rate": 1.544054747647562e-05,
"loss": 2.5154,
"theoretical_loss": 3.3003454714699836,
"tokens_seen": 3252682752
},
{
"epoch": 0.85,
"learning_rate": 1.5397775876817795e-05,
"loss": 2.4796,
"theoretical_loss": 3.300335413459692,
"tokens_seen": 3252813824
},
{
"epoch": 0.85,
"learning_rate": 1.5355004277159967e-05,
"loss": 2.4735,
"theoretical_loss": 3.3003253559681545,
"tokens_seen": 3252944896
},
{
"epoch": 0.85,
"learning_rate": 1.531223267750214e-05,
"loss": 2.441,
"theoretical_loss": 3.300315298995324,
"tokens_seen": 3253075968
},
{
"epoch": 0.85,
"learning_rate": 1.5269461077844313e-05,
"loss": 2.3167,
"theoretical_loss": 3.300305242541152,
"tokens_seen": 3253207040
},
{
"epoch": 0.85,
"learning_rate": 1.5226689478186485e-05,
"loss": 2.4886,
"theoretical_loss": 3.3002951866055907,
"tokens_seen": 3253338112
},
{
"epoch": 0.85,
"learning_rate": 1.5183917878528658e-05,
"loss": 2.5997,
"theoretical_loss": 3.300285131188594,
"tokens_seen": 3253469184
},
{
"epoch": 0.85,
"learning_rate": 1.514114627887083e-05,
"loss": 2.4284,
"theoretical_loss": 3.300275076290112,
"tokens_seen": 3253600256
}
],
"max_steps": 2362,
"num_train_epochs": 9223372036854775807,
"total_flos": 1.34316646465536e+17,
"trial_name": null,
"trial_params": null
}