terry69's picture
Model save
b1143e5 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9983722192078134,
"eval_steps": 500,
"global_step": 460,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.002170374389582203,
"grad_norm": 1.069300659689101,
"learning_rate": 4.347826086956522e-06,
"loss": 1.414,
"step": 1
},
{
"epoch": 0.010851871947911014,
"grad_norm": 1.0869761401892497,
"learning_rate": 2.173913043478261e-05,
"loss": 1.3963,
"step": 5
},
{
"epoch": 0.02170374389582203,
"grad_norm": 0.2444703280571688,
"learning_rate": 4.347826086956522e-05,
"loss": 1.3471,
"step": 10
},
{
"epoch": 0.032555615843733045,
"grad_norm": 0.21229931040050687,
"learning_rate": 6.521739130434783e-05,
"loss": 1.2864,
"step": 15
},
{
"epoch": 0.04340748779164406,
"grad_norm": 0.1762825707559893,
"learning_rate": 8.695652173913044e-05,
"loss": 1.2743,
"step": 20
},
{
"epoch": 0.05425935973955507,
"grad_norm": 0.16054804651124344,
"learning_rate": 0.00010869565217391305,
"loss": 1.2327,
"step": 25
},
{
"epoch": 0.06511123168746609,
"grad_norm": 0.1039496796881345,
"learning_rate": 0.00013043478260869567,
"loss": 1.2126,
"step": 30
},
{
"epoch": 0.0759631036353771,
"grad_norm": 0.09387480834859042,
"learning_rate": 0.00015217391304347827,
"loss": 1.1809,
"step": 35
},
{
"epoch": 0.08681497558328811,
"grad_norm": 0.10738381024430724,
"learning_rate": 0.00017391304347826088,
"loss": 1.1958,
"step": 40
},
{
"epoch": 0.09766684753119913,
"grad_norm": 0.07429080271089483,
"learning_rate": 0.0001956521739130435,
"loss": 1.1523,
"step": 45
},
{
"epoch": 0.10851871947911014,
"grad_norm": 0.08592662653519074,
"learning_rate": 0.00019995393663024054,
"loss": 1.1594,
"step": 50
},
{
"epoch": 0.11937059142702117,
"grad_norm": 0.09095607145429864,
"learning_rate": 0.00019976687691905393,
"loss": 1.1614,
"step": 55
},
{
"epoch": 0.13022246337493218,
"grad_norm": 0.09659070176916736,
"learning_rate": 0.00019943621095573586,
"loss": 1.1479,
"step": 60
},
{
"epoch": 0.1410743353228432,
"grad_norm": 0.08577984921023268,
"learning_rate": 0.0001989624147068713,
"loss": 1.1363,
"step": 65
},
{
"epoch": 0.1519262072707542,
"grad_norm": 0.09436954934257955,
"learning_rate": 0.0001983461701633742,
"loss": 1.1407,
"step": 70
},
{
"epoch": 0.16277807921866522,
"grad_norm": 0.07860010966590557,
"learning_rate": 0.00019758836435881746,
"loss": 1.1566,
"step": 75
},
{
"epoch": 0.17362995116657623,
"grad_norm": 0.07271734829149594,
"learning_rate": 0.00019669008809262062,
"loss": 1.139,
"step": 80
},
{
"epoch": 0.18448182311448724,
"grad_norm": 0.08286259363397955,
"learning_rate": 0.0001956526343599335,
"loss": 1.1428,
"step": 85
},
{
"epoch": 0.19533369506239825,
"grad_norm": 0.07039798681596814,
"learning_rate": 0.00019447749649047542,
"loss": 1.128,
"step": 90
},
{
"epoch": 0.20618556701030927,
"grad_norm": 0.07088917306719042,
"learning_rate": 0.00019316636599900946,
"loss": 1.1283,
"step": 95
},
{
"epoch": 0.21703743895822028,
"grad_norm": 0.07293899658768029,
"learning_rate": 0.00019172113015054532,
"loss": 1.1229,
"step": 100
},
{
"epoch": 0.22788931090613132,
"grad_norm": 0.07001959910529978,
"learning_rate": 0.00019014386924377582,
"loss": 1.1341,
"step": 105
},
{
"epoch": 0.23874118285404233,
"grad_norm": 0.07897051254796171,
"learning_rate": 0.00018843685361665723,
"loss": 1.1367,
"step": 110
},
{
"epoch": 0.24959305480195335,
"grad_norm": 0.07980670912072682,
"learning_rate": 0.00018660254037844388,
"loss": 1.1198,
"step": 115
},
{
"epoch": 0.26044492674986436,
"grad_norm": 0.07327615168352668,
"learning_rate": 0.00018464356987288013,
"loss": 1.1226,
"step": 120
},
{
"epoch": 0.27129679869777534,
"grad_norm": 0.07718729819586401,
"learning_rate": 0.00018256276187764197,
"loss": 1.1132,
"step": 125
},
{
"epoch": 0.2821486706456864,
"grad_norm": 0.06955350279895621,
"learning_rate": 0.00018036311154549784,
"loss": 1.1143,
"step": 130
},
{
"epoch": 0.29300054259359737,
"grad_norm": 0.07312017765794143,
"learning_rate": 0.00017804778509303138,
"loss": 1.1055,
"step": 135
},
{
"epoch": 0.3038524145415084,
"grad_norm": 0.06917446214768698,
"learning_rate": 0.00017562011524313185,
"loss": 1.1036,
"step": 140
},
{
"epoch": 0.31470428648941945,
"grad_norm": 0.07416178127350893,
"learning_rate": 0.00017308359642781242,
"loss": 1.1105,
"step": 145
},
{
"epoch": 0.32555615843733043,
"grad_norm": 0.07262036822274683,
"learning_rate": 0.00017044187975826124,
"loss": 1.1219,
"step": 150
},
{
"epoch": 0.3364080303852415,
"grad_norm": 0.07372734639616982,
"learning_rate": 0.0001676987677693659,
"loss": 1.1089,
"step": 155
},
{
"epoch": 0.34725990233315246,
"grad_norm": 0.07467616325072211,
"learning_rate": 0.0001648582089462756,
"loss": 1.1242,
"step": 160
},
{
"epoch": 0.3581117742810635,
"grad_norm": 0.07693038799730868,
"learning_rate": 0.0001619242920408802,
"loss": 1.1035,
"step": 165
},
{
"epoch": 0.3689636462289745,
"grad_norm": 0.07824471460501765,
"learning_rate": 0.00015890124018638638,
"loss": 1.1215,
"step": 170
},
{
"epoch": 0.3798155181768855,
"grad_norm": 0.07888988968904209,
"learning_rate": 0.00015579340481846336,
"loss": 1.1255,
"step": 175
},
{
"epoch": 0.3906673901247965,
"grad_norm": 0.0793792296368889,
"learning_rate": 0.00015260525941170712,
"loss": 1.1172,
"step": 180
},
{
"epoch": 0.40151926207270755,
"grad_norm": 0.0787057550750279,
"learning_rate": 0.00014934139304044033,
"loss": 1.1081,
"step": 185
},
{
"epoch": 0.41237113402061853,
"grad_norm": 0.07367605233129934,
"learning_rate": 0.00014600650377311522,
"loss": 1.1213,
"step": 190
},
{
"epoch": 0.4232230059685296,
"grad_norm": 0.07155404224760639,
"learning_rate": 0.00014260539190982886,
"loss": 1.1023,
"step": 195
},
{
"epoch": 0.43407487791644056,
"grad_norm": 0.07019186009135339,
"learning_rate": 0.00013914295307268396,
"loss": 1.1104,
"step": 200
},
{
"epoch": 0.4449267498643516,
"grad_norm": 0.07069181937055558,
"learning_rate": 0.00013562417115894172,
"loss": 1.1078,
"step": 205
},
{
"epoch": 0.45577862181226264,
"grad_norm": 0.07290667661940653,
"learning_rate": 0.00013205411116710972,
"loss": 1.0888,
"step": 210
},
{
"epoch": 0.4666304937601736,
"grad_norm": 0.07161072440717098,
"learning_rate": 0.0001284379119062912,
"loss": 1.1235,
"step": 215
},
{
"epoch": 0.47748236570808467,
"grad_norm": 0.07060414902470838,
"learning_rate": 0.00012478077859929,
"loss": 1.1172,
"step": 220
},
{
"epoch": 0.48833423765599565,
"grad_norm": 0.07615333425469985,
"learning_rate": 0.00012108797539011847,
"loss": 1.1098,
"step": 225
},
{
"epoch": 0.4991861096039067,
"grad_norm": 0.07182595107605282,
"learning_rate": 0.00011736481776669306,
"loss": 1.1132,
"step": 230
},
{
"epoch": 0.5100379815518177,
"grad_norm": 0.0744050275020255,
"learning_rate": 0.00011361666490962468,
"loss": 1.1058,
"step": 235
},
{
"epoch": 0.5208898534997287,
"grad_norm": 0.06916253752221385,
"learning_rate": 0.00010984891197811687,
"loss": 1.1073,
"step": 240
},
{
"epoch": 0.5317417254476398,
"grad_norm": 0.07255483864108674,
"learning_rate": 0.00010606698234407586,
"loss": 1.1159,
"step": 245
},
{
"epoch": 0.5425935973955507,
"grad_norm": 0.0718643676883612,
"learning_rate": 0.00010227631978561056,
"loss": 1.1067,
"step": 250
},
{
"epoch": 0.5534454693434617,
"grad_norm": 0.07709129734227686,
"learning_rate": 9.848238065115975e-05,
"loss": 1.1059,
"step": 255
},
{
"epoch": 0.5642973412913728,
"grad_norm": 0.07119845087632411,
"learning_rate": 9.469062600552509e-05,
"loss": 1.1072,
"step": 260
},
{
"epoch": 0.5751492132392838,
"grad_norm": 0.07116811249448173,
"learning_rate": 9.09065137691153e-05,
"loss": 1.0919,
"step": 265
},
{
"epoch": 0.5860010851871947,
"grad_norm": 0.07826423399254606,
"learning_rate": 8.713549086171691e-05,
"loss": 1.0896,
"step": 270
},
{
"epoch": 0.5968529571351058,
"grad_norm": 0.06985572465886306,
"learning_rate": 8.33829853620986e-05,
"loss": 1.0826,
"step": 275
},
{
"epoch": 0.6077048290830168,
"grad_norm": 0.07521227026642825,
"learning_rate": 7.965439869473664e-05,
"loss": 1.1015,
"step": 280
},
{
"epoch": 0.6185567010309279,
"grad_norm": 0.07085025384725667,
"learning_rate": 7.595509785490617e-05,
"loss": 1.1082,
"step": 285
},
{
"epoch": 0.6294085729788389,
"grad_norm": 0.07314117620701872,
"learning_rate": 7.229040768333115e-05,
"loss": 1.1191,
"step": 290
},
{
"epoch": 0.6402604449267498,
"grad_norm": 0.07800277021405978,
"learning_rate": 6.866560320151179e-05,
"loss": 1.1217,
"step": 295
},
{
"epoch": 0.6511123168746609,
"grad_norm": 0.07587494199122763,
"learning_rate": 6.508590201876317e-05,
"loss": 1.1187,
"step": 300
},
{
"epoch": 0.6619641888225719,
"grad_norm": 0.07415053941958896,
"learning_rate": 6.155645682189351e-05,
"loss": 1.1124,
"step": 305
},
{
"epoch": 0.672816060770483,
"grad_norm": 0.07124587825867733,
"learning_rate": 5.8082347958333625e-05,
"loss": 1.1062,
"step": 310
},
{
"epoch": 0.6836679327183939,
"grad_norm": 0.07506903523794208,
"learning_rate": 5.466857612339229e-05,
"loss": 1.1227,
"step": 315
},
{
"epoch": 0.6945198046663049,
"grad_norm": 0.07916490012178254,
"learning_rate": 5.1320055162165115e-05,
"loss": 1.1115,
"step": 320
},
{
"epoch": 0.705371676614216,
"grad_norm": 0.07011216352533452,
"learning_rate": 4.804160499645667e-05,
"loss": 1.0982,
"step": 325
},
{
"epoch": 0.716223548562127,
"grad_norm": 0.07092293644080847,
"learning_rate": 4.483794468689728e-05,
"loss": 1.0812,
"step": 330
},
{
"epoch": 0.7270754205100379,
"grad_norm": 0.0718641240464739,
"learning_rate": 4.1713685640242165e-05,
"loss": 1.1113,
"step": 335
},
{
"epoch": 0.737927292457949,
"grad_norm": 0.06926837229200397,
"learning_rate": 3.8673324971628357e-05,
"loss": 1.0968,
"step": 340
},
{
"epoch": 0.74877916440586,
"grad_norm": 0.06788108886080438,
"learning_rate": 3.5721239031346066e-05,
"loss": 1.1169,
"step": 345
},
{
"epoch": 0.759631036353771,
"grad_norm": 0.07009824066249028,
"learning_rate": 3.2861677105440336e-05,
"loss": 1.117,
"step": 350
},
{
"epoch": 0.7704829083016821,
"grad_norm": 0.07291917614935897,
"learning_rate": 3.009875529921181e-05,
"loss": 1.1034,
"step": 355
},
{
"epoch": 0.781334780249593,
"grad_norm": 0.07028920602015883,
"learning_rate": 2.7436450612420095e-05,
"loss": 1.0959,
"step": 360
},
{
"epoch": 0.7921866521975041,
"grad_norm": 0.07447440376259351,
"learning_rate": 2.4878595214718236e-05,
"loss": 1.0976,
"step": 365
},
{
"epoch": 0.8030385241454151,
"grad_norm": 0.06926375563549898,
"learning_rate": 2.242887092955801e-05,
"loss": 1.1142,
"step": 370
},
{
"epoch": 0.8138903960933261,
"grad_norm": 0.06772391666168086,
"learning_rate": 2.0090803934506764e-05,
"loss": 1.1087,
"step": 375
},
{
"epoch": 0.8247422680412371,
"grad_norm": 0.06992654447190966,
"learning_rate": 1.7867759685603114e-05,
"loss": 1.0827,
"step": 380
},
{
"epoch": 0.8355941399891481,
"grad_norm": 0.07112396412052087,
"learning_rate": 1.5762938073058853e-05,
"loss": 1.1133,
"step": 385
},
{
"epoch": 0.8464460119370592,
"grad_norm": 0.0690455035742087,
"learning_rate": 1.3779368815278647e-05,
"loss": 1.0997,
"step": 390
},
{
"epoch": 0.8572978838849702,
"grad_norm": 0.06782571299859211,
"learning_rate": 1.1919907097828653e-05,
"loss": 1.0827,
"step": 395
},
{
"epoch": 0.8681497558328811,
"grad_norm": 0.06820364734874568,
"learning_rate": 1.01872294636304e-05,
"loss": 1.1097,
"step": 400
},
{
"epoch": 0.8790016277807922,
"grad_norm": 0.06714602286547451,
"learning_rate": 8.58382996029652e-06,
"loss": 1.1122,
"step": 405
},
{
"epoch": 0.8898534997287032,
"grad_norm": 0.0684925645871222,
"learning_rate": 7.1120165501533e-06,
"loss": 1.0831,
"step": 410
},
{
"epoch": 0.9007053716766142,
"grad_norm": 0.06878969210961007,
"learning_rate": 5.77390778811796e-06,
"loss": 1.0946,
"step": 415
},
{
"epoch": 0.9115572436245253,
"grad_norm": 0.06870977921001499,
"learning_rate": 4.5714297722121106e-06,
"loss": 1.1182,
"step": 420
},
{
"epoch": 0.9224091155724362,
"grad_norm": 0.0705672247176395,
"learning_rate": 3.5063133711014882e-06,
"loss": 1.1038,
"step": 425
},
{
"epoch": 0.9332609875203473,
"grad_norm": 0.07691125665136789,
"learning_rate": 2.580091732652101e-06,
"loss": 1.1109,
"step": 430
},
{
"epoch": 0.9441128594682583,
"grad_norm": 0.07114346398569787,
"learning_rate": 1.7940980770894122e-06,
"loss": 1.113,
"step": 435
},
{
"epoch": 0.9549647314161693,
"grad_norm": 0.0706109763180513,
"learning_rate": 1.1494637779369766e-06,
"loss": 1.0971,
"step": 440
},
{
"epoch": 0.9658166033640803,
"grad_norm": 0.06965109543647903,
"learning_rate": 6.471167334968886e-07,
"loss": 1.093,
"step": 445
},
{
"epoch": 0.9766684753119913,
"grad_norm": 0.07100289385231237,
"learning_rate": 2.877800312160783e-07,
"loss": 1.109,
"step": 450
},
{
"epoch": 0.9875203472599023,
"grad_norm": 0.07102279530958475,
"learning_rate": 7.197090686119623e-08,
"loss": 1.109,
"step": 455
},
{
"epoch": 0.9983722192078134,
"grad_norm": 0.06941067748225418,
"learning_rate": 0.0,
"loss": 1.1043,
"step": 460
},
{
"epoch": 0.9983722192078134,
"eval_loss": 0.9801881909370422,
"eval_runtime": 2.1363,
"eval_samples_per_second": 3.277,
"eval_steps_per_second": 0.936,
"step": 460
},
{
"epoch": 0.9983722192078134,
"step": 460,
"total_flos": 1.4599365322604544e+16,
"train_loss": 1.1265571905219036,
"train_runtime": 17446.1728,
"train_samples_per_second": 3.379,
"train_steps_per_second": 0.026
}
],
"logging_steps": 5,
"max_steps": 460,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.4599365322604544e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}