ZMC2019's picture
Training in progress, step 550, checkpoint
13a265f verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.5125815470643057,
"eval_steps": 500,
"global_step": 550,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004659832246039142,
"grad_norm": 59.78214918326703,
"learning_rate": 7.763975155279503e-07,
"loss": 10.6865,
"step": 5
},
{
"epoch": 0.009319664492078284,
"grad_norm": 61.61821429517621,
"learning_rate": 1.5527950310559006e-06,
"loss": 10.4783,
"step": 10
},
{
"epoch": 0.013979496738117428,
"grad_norm": 100.18886340338842,
"learning_rate": 2.329192546583851e-06,
"loss": 8.8595,
"step": 15
},
{
"epoch": 0.01863932898415657,
"grad_norm": 28.80282471816165,
"learning_rate": 3.1055900621118013e-06,
"loss": 2.9699,
"step": 20
},
{
"epoch": 0.023299161230195712,
"grad_norm": 2.9469993742141027,
"learning_rate": 3.881987577639752e-06,
"loss": 1.3152,
"step": 25
},
{
"epoch": 0.027958993476234855,
"grad_norm": 1.273876748177396,
"learning_rate": 4.658385093167702e-06,
"loss": 0.9979,
"step": 30
},
{
"epoch": 0.032618825722273995,
"grad_norm": 1.1847847745298432,
"learning_rate": 5.4347826086956525e-06,
"loss": 0.8462,
"step": 35
},
{
"epoch": 0.03727865796831314,
"grad_norm": 0.6184161495875878,
"learning_rate": 6.2111801242236025e-06,
"loss": 0.7823,
"step": 40
},
{
"epoch": 0.04193849021435228,
"grad_norm": 0.4611063105597401,
"learning_rate": 6.9875776397515525e-06,
"loss": 0.7195,
"step": 45
},
{
"epoch": 0.046598322460391424,
"grad_norm": 0.3903179820498685,
"learning_rate": 7.763975155279503e-06,
"loss": 0.6868,
"step": 50
},
{
"epoch": 0.05125815470643057,
"grad_norm": 0.44815192407139737,
"learning_rate": 8.540372670807453e-06,
"loss": 0.6483,
"step": 55
},
{
"epoch": 0.05591798695246971,
"grad_norm": 0.3363352375310443,
"learning_rate": 9.316770186335403e-06,
"loss": 0.61,
"step": 60
},
{
"epoch": 0.06057781919850885,
"grad_norm": 0.31214409078055283,
"learning_rate": 1.0093167701863353e-05,
"loss": 0.5932,
"step": 65
},
{
"epoch": 0.06523765144454799,
"grad_norm": 0.32957450493165075,
"learning_rate": 1.0869565217391305e-05,
"loss": 0.5886,
"step": 70
},
{
"epoch": 0.06989748369058714,
"grad_norm": 0.3311294918877853,
"learning_rate": 1.1645962732919255e-05,
"loss": 0.5704,
"step": 75
},
{
"epoch": 0.07455731593662628,
"grad_norm": 0.3181163049876225,
"learning_rate": 1.2422360248447205e-05,
"loss": 0.5604,
"step": 80
},
{
"epoch": 0.07921714818266543,
"grad_norm": 0.3310250029185392,
"learning_rate": 1.3198757763975155e-05,
"loss": 0.5508,
"step": 85
},
{
"epoch": 0.08387698042870456,
"grad_norm": 0.2864578039803888,
"learning_rate": 1.3975155279503105e-05,
"loss": 0.5606,
"step": 90
},
{
"epoch": 0.08853681267474371,
"grad_norm": 0.2527763382663063,
"learning_rate": 1.4751552795031057e-05,
"loss": 0.542,
"step": 95
},
{
"epoch": 0.09319664492078285,
"grad_norm": 0.30977172064299785,
"learning_rate": 1.5527950310559007e-05,
"loss": 0.5272,
"step": 100
},
{
"epoch": 0.097856477166822,
"grad_norm": 0.3503262906800675,
"learning_rate": 1.630434782608696e-05,
"loss": 0.5214,
"step": 105
},
{
"epoch": 0.10251630941286113,
"grad_norm": 0.3032318880335728,
"learning_rate": 1.7080745341614907e-05,
"loss": 0.5229,
"step": 110
},
{
"epoch": 0.10717614165890028,
"grad_norm": 0.32192998759757896,
"learning_rate": 1.785714285714286e-05,
"loss": 0.5201,
"step": 115
},
{
"epoch": 0.11183597390493942,
"grad_norm": 0.37017049044979194,
"learning_rate": 1.8633540372670807e-05,
"loss": 0.5067,
"step": 120
},
{
"epoch": 0.11649580615097857,
"grad_norm": 0.27625624649694025,
"learning_rate": 1.940993788819876e-05,
"loss": 0.5026,
"step": 125
},
{
"epoch": 0.1211556383970177,
"grad_norm": 0.32732522406326287,
"learning_rate": 2.0186335403726707e-05,
"loss": 0.5083,
"step": 130
},
{
"epoch": 0.12581547064305684,
"grad_norm": 0.3974302327759709,
"learning_rate": 2.096273291925466e-05,
"loss": 0.5069,
"step": 135
},
{
"epoch": 0.13047530288909598,
"grad_norm": 0.49055099062465235,
"learning_rate": 2.173913043478261e-05,
"loss": 0.4918,
"step": 140
},
{
"epoch": 0.13513513513513514,
"grad_norm": 0.3509510737287038,
"learning_rate": 2.2515527950310562e-05,
"loss": 0.5182,
"step": 145
},
{
"epoch": 0.13979496738117428,
"grad_norm": 0.4060738145091598,
"learning_rate": 2.329192546583851e-05,
"loss": 0.4924,
"step": 150
},
{
"epoch": 0.14445479962721341,
"grad_norm": 0.42238178931670933,
"learning_rate": 2.4068322981366462e-05,
"loss": 0.5005,
"step": 155
},
{
"epoch": 0.14911463187325255,
"grad_norm": 0.42361270461040995,
"learning_rate": 2.484472049689441e-05,
"loss": 0.4809,
"step": 160
},
{
"epoch": 0.15377446411929171,
"grad_norm": 0.4419148082648927,
"learning_rate": 2.5621118012422362e-05,
"loss": 0.4922,
"step": 165
},
{
"epoch": 0.15843429636533085,
"grad_norm": 0.37817457175825797,
"learning_rate": 2.639751552795031e-05,
"loss": 0.4682,
"step": 170
},
{
"epoch": 0.16309412861137,
"grad_norm": 0.4612740179437555,
"learning_rate": 2.7173913043478262e-05,
"loss": 0.4812,
"step": 175
},
{
"epoch": 0.16775396085740912,
"grad_norm": 0.4027204736632852,
"learning_rate": 2.795031055900621e-05,
"loss": 0.4743,
"step": 180
},
{
"epoch": 0.1724137931034483,
"grad_norm": 0.3662916369622068,
"learning_rate": 2.8726708074534165e-05,
"loss": 0.4771,
"step": 185
},
{
"epoch": 0.17707362534948742,
"grad_norm": 0.44549022951820444,
"learning_rate": 2.9503105590062114e-05,
"loss": 0.4872,
"step": 190
},
{
"epoch": 0.18173345759552656,
"grad_norm": 0.4421692278535386,
"learning_rate": 3.0279503105590062e-05,
"loss": 0.4768,
"step": 195
},
{
"epoch": 0.1863932898415657,
"grad_norm": 0.4592171701659634,
"learning_rate": 3.1055900621118014e-05,
"loss": 0.4782,
"step": 200
},
{
"epoch": 0.19105312208760486,
"grad_norm": 0.5338610618981041,
"learning_rate": 3.183229813664597e-05,
"loss": 0.4677,
"step": 205
},
{
"epoch": 0.195712954333644,
"grad_norm": 0.679375515050287,
"learning_rate": 3.260869565217392e-05,
"loss": 0.4817,
"step": 210
},
{
"epoch": 0.20037278657968313,
"grad_norm": 0.5075771202954662,
"learning_rate": 3.3385093167701865e-05,
"loss": 0.4632,
"step": 215
},
{
"epoch": 0.20503261882572227,
"grad_norm": 0.5271972882853628,
"learning_rate": 3.4161490683229814e-05,
"loss": 0.4674,
"step": 220
},
{
"epoch": 0.2096924510717614,
"grad_norm": 0.45927485782401883,
"learning_rate": 3.493788819875777e-05,
"loss": 0.4496,
"step": 225
},
{
"epoch": 0.21435228331780057,
"grad_norm": 0.3875430276374643,
"learning_rate": 3.571428571428572e-05,
"loss": 0.4628,
"step": 230
},
{
"epoch": 0.2190121155638397,
"grad_norm": 0.43592470909651004,
"learning_rate": 3.6490683229813665e-05,
"loss": 0.4604,
"step": 235
},
{
"epoch": 0.22367194780987884,
"grad_norm": 0.45541861707423287,
"learning_rate": 3.7267080745341614e-05,
"loss": 0.4578,
"step": 240
},
{
"epoch": 0.22833178005591798,
"grad_norm": 0.5415802082513514,
"learning_rate": 3.804347826086957e-05,
"loss": 0.4628,
"step": 245
},
{
"epoch": 0.23299161230195714,
"grad_norm": 0.48756789908879733,
"learning_rate": 3.881987577639752e-05,
"loss": 0.4551,
"step": 250
},
{
"epoch": 0.23765144454799628,
"grad_norm": 0.48641029082339876,
"learning_rate": 3.9596273291925465e-05,
"loss": 0.4636,
"step": 255
},
{
"epoch": 0.2423112767940354,
"grad_norm": 0.44167537560377024,
"learning_rate": 4.0372670807453414e-05,
"loss": 0.4584,
"step": 260
},
{
"epoch": 0.24697110904007455,
"grad_norm": 0.49523500628083084,
"learning_rate": 4.114906832298137e-05,
"loss": 0.457,
"step": 265
},
{
"epoch": 0.2516309412861137,
"grad_norm": 0.46362590765498735,
"learning_rate": 4.192546583850932e-05,
"loss": 0.4553,
"step": 270
},
{
"epoch": 0.25629077353215285,
"grad_norm": 0.41387594745783063,
"learning_rate": 4.270186335403727e-05,
"loss": 0.4606,
"step": 275
},
{
"epoch": 0.26095060577819196,
"grad_norm": 0.4327868844195844,
"learning_rate": 4.347826086956522e-05,
"loss": 0.4529,
"step": 280
},
{
"epoch": 0.2656104380242311,
"grad_norm": 0.425878793618421,
"learning_rate": 4.425465838509317e-05,
"loss": 0.457,
"step": 285
},
{
"epoch": 0.2702702702702703,
"grad_norm": 0.49167640847055744,
"learning_rate": 4.5031055900621124e-05,
"loss": 0.4586,
"step": 290
},
{
"epoch": 0.2749301025163094,
"grad_norm": 0.6223762427590684,
"learning_rate": 4.580745341614907e-05,
"loss": 0.4627,
"step": 295
},
{
"epoch": 0.27958993476234856,
"grad_norm": 0.6138447122463503,
"learning_rate": 4.658385093167702e-05,
"loss": 0.4643,
"step": 300
},
{
"epoch": 0.2842497670083877,
"grad_norm": 0.5410077879825527,
"learning_rate": 4.736024844720497e-05,
"loss": 0.4531,
"step": 305
},
{
"epoch": 0.28890959925442683,
"grad_norm": 0.5248106064020833,
"learning_rate": 4.8136645962732924e-05,
"loss": 0.4453,
"step": 310
},
{
"epoch": 0.293569431500466,
"grad_norm": 0.4576312238038398,
"learning_rate": 4.891304347826087e-05,
"loss": 0.4531,
"step": 315
},
{
"epoch": 0.2982292637465051,
"grad_norm": 0.7616375740291316,
"learning_rate": 4.968944099378882e-05,
"loss": 0.4387,
"step": 320
},
{
"epoch": 0.30288909599254427,
"grad_norm": 0.5230246761762287,
"learning_rate": 4.994822229892993e-05,
"loss": 0.4367,
"step": 325
},
{
"epoch": 0.30754892823858343,
"grad_norm": 0.4333888474147187,
"learning_rate": 4.986192613047981e-05,
"loss": 0.4468,
"step": 330
},
{
"epoch": 0.31220876048462254,
"grad_norm": 0.6558069118015527,
"learning_rate": 4.977562996202969e-05,
"loss": 0.447,
"step": 335
},
{
"epoch": 0.3168685927306617,
"grad_norm": 0.4781469048268466,
"learning_rate": 4.968933379357957e-05,
"loss": 0.4487,
"step": 340
},
{
"epoch": 0.32152842497670087,
"grad_norm": 0.4582486158349481,
"learning_rate": 4.9603037625129445e-05,
"loss": 0.4433,
"step": 345
},
{
"epoch": 0.32618825722274,
"grad_norm": 0.4234664780262906,
"learning_rate": 4.951674145667933e-05,
"loss": 0.4568,
"step": 350
},
{
"epoch": 0.33084808946877914,
"grad_norm": 0.4766441062632173,
"learning_rate": 4.94304452882292e-05,
"loss": 0.4431,
"step": 355
},
{
"epoch": 0.33550792171481825,
"grad_norm": 0.5294140666427822,
"learning_rate": 4.934414911977908e-05,
"loss": 0.4405,
"step": 360
},
{
"epoch": 0.3401677539608574,
"grad_norm": 0.3838225157756671,
"learning_rate": 4.9257852951328965e-05,
"loss": 0.437,
"step": 365
},
{
"epoch": 0.3448275862068966,
"grad_norm": 0.4289011151413574,
"learning_rate": 4.917155678287884e-05,
"loss": 0.4498,
"step": 370
},
{
"epoch": 0.3494874184529357,
"grad_norm": 0.4739139367556648,
"learning_rate": 4.908526061442872e-05,
"loss": 0.4364,
"step": 375
},
{
"epoch": 0.35414725069897485,
"grad_norm": 0.6531685734098429,
"learning_rate": 4.89989644459786e-05,
"loss": 0.4507,
"step": 380
},
{
"epoch": 0.35880708294501396,
"grad_norm": 0.38736077721702633,
"learning_rate": 4.891266827752848e-05,
"loss": 0.4526,
"step": 385
},
{
"epoch": 0.3634669151910531,
"grad_norm": 0.39074723361967706,
"learning_rate": 4.882637210907836e-05,
"loss": 0.4372,
"step": 390
},
{
"epoch": 0.3681267474370923,
"grad_norm": 0.4998720672786241,
"learning_rate": 4.874007594062824e-05,
"loss": 0.4432,
"step": 395
},
{
"epoch": 0.3727865796831314,
"grad_norm": 0.41449696680501236,
"learning_rate": 4.865377977217811e-05,
"loss": 0.4284,
"step": 400
},
{
"epoch": 0.37744641192917056,
"grad_norm": 0.42798981510455325,
"learning_rate": 4.8567483603728e-05,
"loss": 0.4466,
"step": 405
},
{
"epoch": 0.3821062441752097,
"grad_norm": 0.453510571603012,
"learning_rate": 4.8481187435277875e-05,
"loss": 0.4425,
"step": 410
},
{
"epoch": 0.38676607642124883,
"grad_norm": 0.5613852564226437,
"learning_rate": 4.839489126682776e-05,
"loss": 0.4296,
"step": 415
},
{
"epoch": 0.391425908667288,
"grad_norm": 0.4991311823287778,
"learning_rate": 4.830859509837763e-05,
"loss": 0.4477,
"step": 420
},
{
"epoch": 0.3960857409133271,
"grad_norm": 0.41710093216407557,
"learning_rate": 4.822229892992751e-05,
"loss": 0.4451,
"step": 425
},
{
"epoch": 0.40074557315936626,
"grad_norm": 0.44852195468081424,
"learning_rate": 4.8136002761477395e-05,
"loss": 0.4322,
"step": 430
},
{
"epoch": 0.40540540540540543,
"grad_norm": 0.5873258297489329,
"learning_rate": 4.804970659302727e-05,
"loss": 0.445,
"step": 435
},
{
"epoch": 0.41006523765144454,
"grad_norm": 0.5301184440251494,
"learning_rate": 4.796341042457715e-05,
"loss": 0.439,
"step": 440
},
{
"epoch": 0.4147250698974837,
"grad_norm": 0.554152067322795,
"learning_rate": 4.787711425612703e-05,
"loss": 0.4337,
"step": 445
},
{
"epoch": 0.4193849021435228,
"grad_norm": 0.4875794890348032,
"learning_rate": 4.779081808767691e-05,
"loss": 0.4245,
"step": 450
},
{
"epoch": 0.424044734389562,
"grad_norm": 0.4690783572871423,
"learning_rate": 4.770452191922679e-05,
"loss": 0.4314,
"step": 455
},
{
"epoch": 0.42870456663560114,
"grad_norm": 0.3725289513240759,
"learning_rate": 4.761822575077667e-05,
"loss": 0.4283,
"step": 460
},
{
"epoch": 0.43336439888164024,
"grad_norm": 0.4830268598668616,
"learning_rate": 4.753192958232654e-05,
"loss": 0.4255,
"step": 465
},
{
"epoch": 0.4380242311276794,
"grad_norm": 0.43173494250112954,
"learning_rate": 4.744563341387643e-05,
"loss": 0.4378,
"step": 470
},
{
"epoch": 0.4426840633737186,
"grad_norm": 0.43237002431737065,
"learning_rate": 4.7359337245426306e-05,
"loss": 0.4277,
"step": 475
},
{
"epoch": 0.4473438956197577,
"grad_norm": 0.41385681702794824,
"learning_rate": 4.7273041076976184e-05,
"loss": 0.4394,
"step": 480
},
{
"epoch": 0.45200372786579684,
"grad_norm": 0.40157124060011173,
"learning_rate": 4.718674490852606e-05,
"loss": 0.432,
"step": 485
},
{
"epoch": 0.45666356011183595,
"grad_norm": 0.39938983254093463,
"learning_rate": 4.710044874007594e-05,
"loss": 0.4264,
"step": 490
},
{
"epoch": 0.4613233923578751,
"grad_norm": 0.39732323279012777,
"learning_rate": 4.7014152571625826e-05,
"loss": 0.4321,
"step": 495
},
{
"epoch": 0.4659832246039143,
"grad_norm": 0.4747358464791143,
"learning_rate": 4.6927856403175704e-05,
"loss": 0.435,
"step": 500
},
{
"epoch": 0.4706430568499534,
"grad_norm": 0.3698718174123855,
"learning_rate": 4.684156023472558e-05,
"loss": 0.4221,
"step": 505
},
{
"epoch": 0.47530288909599255,
"grad_norm": 0.4305572996344627,
"learning_rate": 4.675526406627546e-05,
"loss": 0.4303,
"step": 510
},
{
"epoch": 0.47996272134203166,
"grad_norm": 0.6085259797324423,
"learning_rate": 4.666896789782534e-05,
"loss": 0.4281,
"step": 515
},
{
"epoch": 0.4846225535880708,
"grad_norm": 0.5730318489213171,
"learning_rate": 4.658267172937522e-05,
"loss": 0.4321,
"step": 520
},
{
"epoch": 0.48928238583411,
"grad_norm": 0.4332245949035479,
"learning_rate": 4.64963755609251e-05,
"loss": 0.4309,
"step": 525
},
{
"epoch": 0.4939422180801491,
"grad_norm": 0.508102013567185,
"learning_rate": 4.641007939247497e-05,
"loss": 0.428,
"step": 530
},
{
"epoch": 0.49860205032618826,
"grad_norm": 0.34669842614662666,
"learning_rate": 4.632378322402486e-05,
"loss": 0.4283,
"step": 535
},
{
"epoch": 0.5032618825722274,
"grad_norm": 0.3889254150420956,
"learning_rate": 4.6237487055574736e-05,
"loss": 0.4178,
"step": 540
},
{
"epoch": 0.5079217148182665,
"grad_norm": 0.49239466237923585,
"learning_rate": 4.6151190887124615e-05,
"loss": 0.4244,
"step": 545
},
{
"epoch": 0.5125815470643057,
"grad_norm": 0.4397316581317278,
"learning_rate": 4.606489471867449e-05,
"loss": 0.4245,
"step": 550
}
],
"logging_steps": 5,
"max_steps": 3219,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 550,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.708780893462856e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}