webagent / agenttrek7b /trainer_state.json
Hazyuan
move agenttrek7b
46ba4e5
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 200,
"global_step": 1480,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.006760182524928173,
"grad_norm": 15.915069580078125,
"learning_rate": 6.081081081081082e-07,
"loss": 1.028,
"step": 10
},
{
"epoch": 0.013520365049856346,
"grad_norm": 7.203866004943848,
"learning_rate": 1.2837837837837838e-06,
"loss": 0.8412,
"step": 20
},
{
"epoch": 0.020280547574784518,
"grad_norm": 4.731671333312988,
"learning_rate": 1.9594594594594595e-06,
"loss": 0.6928,
"step": 30
},
{
"epoch": 0.027040730099712692,
"grad_norm": 4.059757709503174,
"learning_rate": 2.6351351351351353e-06,
"loss": 0.5823,
"step": 40
},
{
"epoch": 0.03380091262464086,
"grad_norm": 3.655268430709839,
"learning_rate": 3.310810810810811e-06,
"loss": 0.5195,
"step": 50
},
{
"epoch": 0.040561095149569036,
"grad_norm": 3.749523401260376,
"learning_rate": 3.986486486486487e-06,
"loss": 0.4697,
"step": 60
},
{
"epoch": 0.04732127767449721,
"grad_norm": 3.336225748062134,
"learning_rate": 4.6621621621621625e-06,
"loss": 0.4567,
"step": 70
},
{
"epoch": 0.054081460199425384,
"grad_norm": 3.8789913654327393,
"learning_rate": 5.337837837837838e-06,
"loss": 0.4675,
"step": 80
},
{
"epoch": 0.06084164272435356,
"grad_norm": 3.631592035293579,
"learning_rate": 6.013513513513514e-06,
"loss": 0.4531,
"step": 90
},
{
"epoch": 0.06760182524928172,
"grad_norm": 3.175447702407837,
"learning_rate": 6.689189189189191e-06,
"loss": 0.4368,
"step": 100
},
{
"epoch": 0.0743620077742099,
"grad_norm": 3.1399242877960205,
"learning_rate": 7.3648648648648655e-06,
"loss": 0.4185,
"step": 110
},
{
"epoch": 0.08112219029913807,
"grad_norm": 3.3930342197418213,
"learning_rate": 8.040540540540541e-06,
"loss": 0.4202,
"step": 120
},
{
"epoch": 0.08788237282406625,
"grad_norm": 3.535984516143799,
"learning_rate": 8.716216216216217e-06,
"loss": 0.4299,
"step": 130
},
{
"epoch": 0.09464255534899442,
"grad_norm": 3.0242111682891846,
"learning_rate": 9.391891891891893e-06,
"loss": 0.4134,
"step": 140
},
{
"epoch": 0.1014027378739226,
"grad_norm": 3.216952085494995,
"learning_rate": 9.999986093075303e-06,
"loss": 0.4138,
"step": 150
},
{
"epoch": 0.10816292039885077,
"grad_norm": 2.8959848880767822,
"learning_rate": 9.998317355716393e-06,
"loss": 0.4063,
"step": 160
},
{
"epoch": 0.11492310292377894,
"grad_norm": 2.8924849033355713,
"learning_rate": 9.993868297034709e-06,
"loss": 0.4265,
"step": 170
},
{
"epoch": 0.12168328544870712,
"grad_norm": 3.100831985473633,
"learning_rate": 9.986641391825633e-06,
"loss": 0.4437,
"step": 180
},
{
"epoch": 0.1284434679736353,
"grad_norm": 2.6609292030334473,
"learning_rate": 9.976640660065733e-06,
"loss": 0.3854,
"step": 190
},
{
"epoch": 0.13520365049856345,
"grad_norm": 2.6647753715515137,
"learning_rate": 9.963871664676647e-06,
"loss": 0.3906,
"step": 200
},
{
"epoch": 0.13520365049856345,
"eval_loss": 0.4129255712032318,
"eval_runtime": 1274.8086,
"eval_samples_per_second": 4.126,
"eval_steps_per_second": 0.516,
"step": 200
},
{
"epoch": 0.14196383302349164,
"grad_norm": 2.908745765686035,
"learning_rate": 9.948341508430691e-06,
"loss": 0.416,
"step": 210
},
{
"epoch": 0.1487240155484198,
"grad_norm": 2.6044161319732666,
"learning_rate": 9.93005882999994e-06,
"loss": 0.4055,
"step": 220
},
{
"epoch": 0.15548419807334798,
"grad_norm": 2.6734066009521484,
"learning_rate": 9.909033799150947e-06,
"loss": 0.4093,
"step": 230
},
{
"epoch": 0.16224438059827614,
"grad_norm": 2.681687116622925,
"learning_rate": 9.885278111087803e-06,
"loss": 0.4183,
"step": 240
},
{
"epoch": 0.16900456312320433,
"grad_norm": 2.633845806121826,
"learning_rate": 9.85880497994666e-06,
"loss": 0.3845,
"step": 250
},
{
"epoch": 0.1757647456481325,
"grad_norm": 2.317528486251831,
"learning_rate": 9.829629131445342e-06,
"loss": 0.3886,
"step": 260
},
{
"epoch": 0.18252492817306068,
"grad_norm": 2.4552831649780273,
"learning_rate": 9.797766794692156e-06,
"loss": 0.4016,
"step": 270
},
{
"epoch": 0.18928511069798884,
"grad_norm": 2.5544655323028564,
"learning_rate": 9.76323569315841e-06,
"loss": 0.3855,
"step": 280
},
{
"epoch": 0.19604529322291703,
"grad_norm": 2.6713716983795166,
"learning_rate": 9.726055034819726e-06,
"loss": 0.3987,
"step": 290
},
{
"epoch": 0.2028054757478452,
"grad_norm": 2.450104236602783,
"learning_rate": 9.686245501471555e-06,
"loss": 0.3903,
"step": 300
},
{
"epoch": 0.20956565827277338,
"grad_norm": 2.417210340499878,
"learning_rate": 9.64382923722492e-06,
"loss": 0.4012,
"step": 310
},
{
"epoch": 0.21632584079770154,
"grad_norm": 2.1763546466827393,
"learning_rate": 9.598829836188694e-06,
"loss": 0.3823,
"step": 320
},
{
"epoch": 0.22308602332262972,
"grad_norm": 2.173384428024292,
"learning_rate": 9.551272329345373e-06,
"loss": 0.3722,
"step": 330
},
{
"epoch": 0.22984620584755788,
"grad_norm": 2.3560631275177,
"learning_rate": 9.501183170627535e-06,
"loss": 0.3863,
"step": 340
},
{
"epoch": 0.23660638837248607,
"grad_norm": 2.427529811859131,
"learning_rate": 9.448590222202808e-06,
"loss": 0.3713,
"step": 350
},
{
"epoch": 0.24336657089741423,
"grad_norm": 2.119920492172241,
"learning_rate": 9.393522738975497e-06,
"loss": 0.3817,
"step": 360
},
{
"epoch": 0.2501267534223424,
"grad_norm": 2.0783591270446777,
"learning_rate": 9.336011352313502e-06,
"loss": 0.3826,
"step": 370
},
{
"epoch": 0.2568869359472706,
"grad_norm": 2.054614543914795,
"learning_rate": 9.276088053009578e-06,
"loss": 0.3629,
"step": 380
},
{
"epoch": 0.26364711847219874,
"grad_norm": 2.3898072242736816,
"learning_rate": 9.213786173486403e-06,
"loss": 0.3887,
"step": 390
},
{
"epoch": 0.2704073009971269,
"grad_norm": 2.4764063358306885,
"learning_rate": 9.149140369255377e-06,
"loss": 0.3823,
"step": 400
},
{
"epoch": 0.2704073009971269,
"eval_loss": 0.3794357478618622,
"eval_runtime": 1274.964,
"eval_samples_per_second": 4.126,
"eval_steps_per_second": 0.516,
"step": 400
},
{
"epoch": 0.2771674835220551,
"grad_norm": 2.2685487270355225,
"learning_rate": 9.082186599639429e-06,
"loss": 0.3783,
"step": 410
},
{
"epoch": 0.2839276660469833,
"grad_norm": 2.163623332977295,
"learning_rate": 9.012962107770615e-06,
"loss": 0.3824,
"step": 420
},
{
"epoch": 0.29068784857191143,
"grad_norm": 2.4199347496032715,
"learning_rate": 8.941505399873549e-06,
"loss": 0.3722,
"step": 430
},
{
"epoch": 0.2974480310968396,
"grad_norm": 2.7932002544403076,
"learning_rate": 8.86785622384627e-06,
"loss": 0.3725,
"step": 440
},
{
"epoch": 0.3042082136217678,
"grad_norm": 2.3451616764068604,
"learning_rate": 8.792055547150413e-06,
"loss": 0.368,
"step": 450
},
{
"epoch": 0.31096839614669597,
"grad_norm": 2.3174757957458496,
"learning_rate": 8.714145534022999e-06,
"loss": 0.3576,
"step": 460
},
{
"epoch": 0.31772857867162413,
"grad_norm": 2.2395827770233154,
"learning_rate": 8.634169522022522e-06,
"loss": 0.3585,
"step": 470
},
{
"epoch": 0.3244887611965523,
"grad_norm": 2.222054958343506,
"learning_rate": 8.552171997922358e-06,
"loss": 0.3495,
"step": 480
},
{
"epoch": 0.3312489437214805,
"grad_norm": 2.371980667114258,
"learning_rate": 8.46819857296494e-06,
"loss": 0.3662,
"step": 490
},
{
"epoch": 0.33800912624640866,
"grad_norm": 2.1474437713623047,
"learning_rate": 8.382295957490435e-06,
"loss": 0.3616,
"step": 500
},
{
"epoch": 0.3447693087713368,
"grad_norm": 2.476712465286255,
"learning_rate": 8.294511934954054e-06,
"loss": 0.3595,
"step": 510
},
{
"epoch": 0.351529491296265,
"grad_norm": 2.175936698913574,
"learning_rate": 8.20489533534643e-06,
"loss": 0.3549,
"step": 520
},
{
"epoch": 0.3582896738211932,
"grad_norm": 1.8972084522247314,
"learning_rate": 8.113496008031863e-06,
"loss": 0.3426,
"step": 530
},
{
"epoch": 0.36504985634612136,
"grad_norm": 1.9858911037445068,
"learning_rate": 8.020364794019546e-06,
"loss": 0.351,
"step": 540
},
{
"epoch": 0.3718100388710495,
"grad_norm": 1.843750238418579,
"learning_rate": 7.925553497683169e-06,
"loss": 0.3413,
"step": 550
},
{
"epoch": 0.3785702213959777,
"grad_norm": 2.026078224182129,
"learning_rate": 7.829114857944672e-06,
"loss": 0.3589,
"step": 560
},
{
"epoch": 0.38533040392090584,
"grad_norm": 2.246981620788574,
"learning_rate": 7.731102518938137e-06,
"loss": 0.3483,
"step": 570
},
{
"epoch": 0.39209058644583406,
"grad_norm": 2.0345938205718994,
"learning_rate": 7.631571000170156e-06,
"loss": 0.356,
"step": 580
},
{
"epoch": 0.3988507689707622,
"grad_norm": 2.166692018508911,
"learning_rate": 7.530575666193283e-06,
"loss": 0.3544,
"step": 590
},
{
"epoch": 0.4056109514956904,
"grad_norm": 2.0450754165649414,
"learning_rate": 7.4281726958094146e-06,
"loss": 0.3507,
"step": 600
},
{
"epoch": 0.4056109514956904,
"eval_loss": 0.35060280561447144,
"eval_runtime": 1270.966,
"eval_samples_per_second": 4.139,
"eval_steps_per_second": 0.518,
"step": 600
},
{
"epoch": 0.41237113402061853,
"grad_norm": 2.1229889392852783,
"learning_rate": 7.32441905082026e-06,
"loss": 0.3511,
"step": 610
},
{
"epoch": 0.41913131654554675,
"grad_norm": 1.8597893714904785,
"learning_rate": 7.2193724443422405e-06,
"loss": 0.3547,
"step": 620
},
{
"epoch": 0.4258914990704749,
"grad_norm": 1.9739303588867188,
"learning_rate": 7.113091308703498e-06,
"loss": 0.3313,
"step": 630
},
{
"epoch": 0.43265168159540307,
"grad_norm": 1.9213026762008667,
"learning_rate": 7.005634762940818e-06,
"loss": 0.3513,
"step": 640
},
{
"epoch": 0.43941186412033123,
"grad_norm": 1.9261809587478638,
"learning_rate": 6.897062579914587e-06,
"loss": 0.3311,
"step": 650
},
{
"epoch": 0.44617204664525945,
"grad_norm": 1.9443905353546143,
"learning_rate": 6.787435153060039e-06,
"loss": 0.3335,
"step": 660
},
{
"epoch": 0.4529322291701876,
"grad_norm": 2.0720746517181396,
"learning_rate": 6.676813462793337e-06,
"loss": 0.3339,
"step": 670
},
{
"epoch": 0.45969241169511577,
"grad_norm": 1.9223897457122803,
"learning_rate": 6.565259042591112e-06,
"loss": 0.3446,
"step": 680
},
{
"epoch": 0.4664525942200439,
"grad_norm": 2.407301664352417,
"learning_rate": 6.452833944762385e-06,
"loss": 0.3475,
"step": 690
},
{
"epoch": 0.47321277674497214,
"grad_norm": 1.946742057800293,
"learning_rate": 6.339600705931876e-06,
"loss": 0.3274,
"step": 700
},
{
"epoch": 0.4799729592699003,
"grad_norm": 1.9959310293197632,
"learning_rate": 6.225622312253916e-06,
"loss": 0.3347,
"step": 710
},
{
"epoch": 0.48673314179482846,
"grad_norm": 1.8777397871017456,
"learning_rate": 6.110962164376309e-06,
"loss": 0.3419,
"step": 720
},
{
"epoch": 0.4934933243197566,
"grad_norm": 2.0931403636932373,
"learning_rate": 5.995684042173626e-06,
"loss": 0.3344,
"step": 730
},
{
"epoch": 0.5002535068446848,
"grad_norm": 1.7780061960220337,
"learning_rate": 5.8798520692695605e-06,
"loss": 0.3115,
"step": 740
},
{
"epoch": 0.507013689369613,
"grad_norm": 1.8530755043029785,
"learning_rate": 5.763530677368065e-06,
"loss": 0.3248,
"step": 750
},
{
"epoch": 0.5137738718945412,
"grad_norm": 2.074946165084839,
"learning_rate": 5.64678457041312e-06,
"loss": 0.3381,
"step": 760
},
{
"epoch": 0.5205340544194693,
"grad_norm": 2.1300790309906006,
"learning_rate": 5.529678688597081e-06,
"loss": 0.3272,
"step": 770
},
{
"epoch": 0.5272942369443975,
"grad_norm": 1.9701505899429321,
"learning_rate": 5.412278172237591e-06,
"loss": 0.3283,
"step": 780
},
{
"epoch": 0.5340544194693256,
"grad_norm": 1.8419865369796753,
"learning_rate": 5.294648325543188e-06,
"loss": 0.3201,
"step": 790
},
{
"epoch": 0.5408146019942538,
"grad_norm": 1.948057770729065,
"learning_rate": 5.176854580287744e-06,
"loss": 0.3065,
"step": 800
},
{
"epoch": 0.5408146019942538,
"eval_loss": 0.329855740070343,
"eval_runtime": 1276.1984,
"eval_samples_per_second": 4.122,
"eval_steps_per_second": 0.516,
"step": 800
},
{
"epoch": 0.5475747845191821,
"grad_norm": 1.8223614692687988,
"learning_rate": 5.0589624594139455e-06,
"loss": 0.3207,
"step": 810
},
{
"epoch": 0.5543349670441102,
"grad_norm": 2.12894606590271,
"learning_rate": 4.941037540586057e-06,
"loss": 0.3062,
"step": 820
},
{
"epoch": 0.5610951495690384,
"grad_norm": 1.7402645349502563,
"learning_rate": 4.8231454197122575e-06,
"loss": 0.3114,
"step": 830
},
{
"epoch": 0.5678553320939665,
"grad_norm": 1.9146500825881958,
"learning_rate": 4.705351674456813e-06,
"loss": 0.3267,
"step": 840
},
{
"epoch": 0.5746155146188947,
"grad_norm": 1.9016306400299072,
"learning_rate": 4.587721827762411e-06,
"loss": 0.3231,
"step": 850
},
{
"epoch": 0.5813756971438229,
"grad_norm": 1.9831342697143555,
"learning_rate": 4.47032131140292e-06,
"loss": 0.328,
"step": 860
},
{
"epoch": 0.588135879668751,
"grad_norm": 1.9523651599884033,
"learning_rate": 4.353215429586882e-06,
"loss": 0.3215,
"step": 870
},
{
"epoch": 0.5948960621936792,
"grad_norm": 2.0452113151550293,
"learning_rate": 4.236469322631938e-06,
"loss": 0.328,
"step": 880
},
{
"epoch": 0.6016562447186073,
"grad_norm": 1.8030444383621216,
"learning_rate": 4.12014793073044e-06,
"loss": 0.3036,
"step": 890
},
{
"epoch": 0.6084164272435356,
"grad_norm": 1.8724502325057983,
"learning_rate": 4.004315957826375e-06,
"loss": 0.3024,
"step": 900
},
{
"epoch": 0.6151766097684638,
"grad_norm": 1.9722743034362793,
"learning_rate": 3.889037835623693e-06,
"loss": 0.3144,
"step": 910
},
{
"epoch": 0.6219367922933919,
"grad_norm": 1.9698827266693115,
"learning_rate": 3.7743776877460864e-06,
"loss": 0.3228,
"step": 920
},
{
"epoch": 0.6286969748183201,
"grad_norm": 1.863534688949585,
"learning_rate": 3.6603992940681263e-06,
"loss": 0.3055,
"step": 930
},
{
"epoch": 0.6354571573432483,
"grad_norm": 1.9991436004638672,
"learning_rate": 3.5471660552376176e-06,
"loss": 0.3264,
"step": 940
},
{
"epoch": 0.6422173398681764,
"grad_norm": 1.981311559677124,
"learning_rate": 3.4347409574088896e-06,
"loss": 0.316,
"step": 950
},
{
"epoch": 0.6489775223931046,
"grad_norm": 1.8221795558929443,
"learning_rate": 3.323186537206665e-06,
"loss": 0.3071,
"step": 960
},
{
"epoch": 0.6557377049180327,
"grad_norm": 1.8596502542495728,
"learning_rate": 3.2125648469399628e-06,
"loss": 0.3193,
"step": 970
},
{
"epoch": 0.662497887442961,
"grad_norm": 1.6564629077911377,
"learning_rate": 3.1029374200854167e-06,
"loss": 0.3001,
"step": 980
},
{
"epoch": 0.6692580699678892,
"grad_norm": 2.0445966720581055,
"learning_rate": 2.9943652370591835e-06,
"loss": 0.3059,
"step": 990
},
{
"epoch": 0.6760182524928173,
"grad_norm": 1.694770336151123,
"learning_rate": 2.886908691296504e-06,
"loss": 0.3042,
"step": 1000
},
{
"epoch": 0.6760182524928173,
"eval_loss": 0.30882763862609863,
"eval_runtime": 1270.5948,
"eval_samples_per_second": 4.14,
"eval_steps_per_second": 0.518,
"step": 1000
},
{
"epoch": 0.6827784350177455,
"grad_norm": 2.0789783000946045,
"learning_rate": 2.7806275556577624e-06,
"loss": 0.3064,
"step": 1010
},
{
"epoch": 0.6895386175426736,
"grad_norm": 1.8919354677200317,
"learning_rate": 2.6755809491797426e-06,
"loss": 0.3103,
"step": 1020
},
{
"epoch": 0.6962988000676018,
"grad_norm": 1.8415489196777344,
"learning_rate": 2.5718273041905863e-06,
"loss": 0.2969,
"step": 1030
},
{
"epoch": 0.70305898259253,
"grad_norm": 1.8690521717071533,
"learning_rate": 2.469424333806718e-06,
"loss": 0.3062,
"step": 1040
},
{
"epoch": 0.7098191651174581,
"grad_norm": 2.0415210723876953,
"learning_rate": 2.3684289998298453e-06,
"loss": 0.317,
"step": 1050
},
{
"epoch": 0.7165793476423864,
"grad_norm": 1.8409037590026855,
"learning_rate": 2.268897481061863e-06,
"loss": 0.2845,
"step": 1060
},
{
"epoch": 0.7233395301673146,
"grad_norm": 2.023800849914551,
"learning_rate": 2.1708851420553277e-06,
"loss": 0.3041,
"step": 1070
},
{
"epoch": 0.7300997126922427,
"grad_norm": 1.9210608005523682,
"learning_rate": 2.0744465023168313e-06,
"loss": 0.3023,
"step": 1080
},
{
"epoch": 0.7368598952171709,
"grad_norm": 1.8132646083831787,
"learning_rate": 1.9796352059804565e-06,
"loss": 0.3089,
"step": 1090
},
{
"epoch": 0.743620077742099,
"grad_norm": 1.9919142723083496,
"learning_rate": 1.8865039919681377e-06,
"loss": 0.2941,
"step": 1100
},
{
"epoch": 0.7503802602670272,
"grad_norm": 2.0446407794952393,
"learning_rate": 1.7951046646535714e-06,
"loss": 0.2856,
"step": 1110
},
{
"epoch": 0.7571404427919554,
"grad_norm": 2.0371832847595215,
"learning_rate": 1.705488065045946e-06,
"loss": 0.2919,
"step": 1120
},
{
"epoch": 0.7639006253168835,
"grad_norm": 1.980812430381775,
"learning_rate": 1.6177040425095664e-06,
"loss": 0.2985,
"step": 1130
},
{
"epoch": 0.7706608078418117,
"grad_norm": 1.756753921508789,
"learning_rate": 1.5318014270350617e-06,
"loss": 0.2893,
"step": 1140
},
{
"epoch": 0.77742099036674,
"grad_norm": 1.7378242015838623,
"learning_rate": 1.4478280020776442e-06,
"loss": 0.2862,
"step": 1150
},
{
"epoch": 0.7841811728916681,
"grad_norm": 1.8673665523529053,
"learning_rate": 1.3658304779774784e-06,
"loss": 0.2967,
"step": 1160
},
{
"epoch": 0.7909413554165963,
"grad_norm": 2.001669406890869,
"learning_rate": 1.2858544659770001e-06,
"loss": 0.2924,
"step": 1170
},
{
"epoch": 0.7977015379415244,
"grad_norm": 2.087536573410034,
"learning_rate": 1.2079444528495887e-06,
"loss": 0.2921,
"step": 1180
},
{
"epoch": 0.8044617204664526,
"grad_norm": 1.8734593391418457,
"learning_rate": 1.1321437761537307e-06,
"loss": 0.2805,
"step": 1190
},
{
"epoch": 0.8112219029913807,
"grad_norm": 1.8164645433425903,
"learning_rate": 1.0584946001264523e-06,
"loss": 0.2977,
"step": 1200
},
{
"epoch": 0.8112219029913807,
"eval_loss": 0.294592946767807,
"eval_runtime": 1271.0164,
"eval_samples_per_second": 4.138,
"eval_steps_per_second": 0.518,
"step": 1200
},
{
"epoch": 0.8179820855163089,
"grad_norm": 1.854030966758728,
"learning_rate": 9.870378922293855e-07,
"loss": 0.2859,
"step": 1210
},
{
"epoch": 0.8247422680412371,
"grad_norm": 1.7828032970428467,
"learning_rate": 9.178134003605721e-07,
"loss": 0.2916,
"step": 1220
},
{
"epoch": 0.8315024505661653,
"grad_norm": 2.0216519832611084,
"learning_rate": 8.508596307446254e-07,
"loss": 0.2867,
"step": 1230
},
{
"epoch": 0.8382626330910935,
"grad_norm": 1.7878661155700684,
"learning_rate": 7.862138265135983e-07,
"loss": 0.2886,
"step": 1240
},
{
"epoch": 0.8450228156160217,
"grad_norm": 1.789251685142517,
"learning_rate": 7.239119469904227e-07,
"loss": 0.2814,
"step": 1250
},
{
"epoch": 0.8517829981409498,
"grad_norm": 1.7281427383422852,
"learning_rate": 6.639886476864993e-07,
"loss": 0.2806,
"step": 1260
},
{
"epoch": 0.858543180665878,
"grad_norm": 1.7876800298690796,
"learning_rate": 6.064772610245051e-07,
"loss": 0.2778,
"step": 1270
},
{
"epoch": 0.8653033631908061,
"grad_norm": 1.909987449645996,
"learning_rate": 5.514097777971939e-07,
"loss": 0.2966,
"step": 1280
},
{
"epoch": 0.8720635457157343,
"grad_norm": 1.9117987155914307,
"learning_rate": 4.988168293724654e-07,
"loss": 0.2858,
"step": 1290
},
{
"epoch": 0.8788237282406625,
"grad_norm": 1.9773385524749756,
"learning_rate": 4.4872767065462787e-07,
"loss": 0.2871,
"step": 1300
},
{
"epoch": 0.8855839107655906,
"grad_norm": 1.7200452089309692,
"learning_rate": 4.0117016381130636e-07,
"loss": 0.2982,
"step": 1310
},
{
"epoch": 0.8923440932905189,
"grad_norm": 1.9773083925247192,
"learning_rate": 3.561707627750827e-07,
"loss": 0.2772,
"step": 1320
},
{
"epoch": 0.899104275815447,
"grad_norm": 1.8127182722091675,
"learning_rate": 3.137544985284441e-07,
"loss": 0.2939,
"step": 1330
},
{
"epoch": 0.9058644583403752,
"grad_norm": 1.8259408473968506,
"learning_rate": 2.739449651802756e-07,
"loss": 0.2912,
"step": 1340
},
{
"epoch": 0.9126246408653034,
"grad_norm": 1.96041738986969,
"learning_rate": 2.3676430684159035e-07,
"loss": 0.28,
"step": 1350
},
{
"epoch": 0.9193848233902315,
"grad_norm": 2.008439302444458,
"learning_rate": 2.0223320530784574e-07,
"loss": 0.2832,
"step": 1360
},
{
"epoch": 0.9261450059151597,
"grad_norm": 1.8017557859420776,
"learning_rate": 1.7037086855465902e-07,
"loss": 0.2857,
"step": 1370
},
{
"epoch": 0.9329051884400879,
"grad_norm": 1.8403921127319336,
"learning_rate": 1.4119502005334185e-07,
"loss": 0.2933,
"step": 1380
},
{
"epoch": 0.939665370965016,
"grad_norm": 1.755980134010315,
"learning_rate": 1.1472188891219816e-07,
"loss": 0.289,
"step": 1390
},
{
"epoch": 0.9464255534899443,
"grad_norm": 1.820554256439209,
"learning_rate": 9.096620084905472e-08,
"loss": 0.2908,
"step": 1400
},
{
"epoch": 0.9464255534899443,
"eval_loss": 0.2895006537437439,
"eval_runtime": 1270.2472,
"eval_samples_per_second": 4.141,
"eval_steps_per_second": 0.518,
"step": 1400
},
{
"epoch": 0.9531857360148724,
"grad_norm": 2.062675952911377,
"learning_rate": 6.994117000006185e-08,
"loss": 0.2998,
"step": 1410
},
{
"epoch": 0.9599459185398006,
"grad_norm": 1.8810018301010132,
"learning_rate": 5.165849156930969e-08,
"loss": 0.276,
"step": 1420
},
{
"epoch": 0.9667061010647288,
"grad_norm": 2.032048225402832,
"learning_rate": 3.6128335323353804e-08,
"loss": 0.2761,
"step": 1430
},
{
"epoch": 0.9734662835896569,
"grad_norm": 1.843258023262024,
"learning_rate": 2.335933993426687e-08,
"loss": 0.2995,
"step": 1440
},
{
"epoch": 0.9802264661145851,
"grad_norm": 1.7969026565551758,
"learning_rate": 1.3358608174368626e-08,
"loss": 0.2772,
"step": 1450
},
{
"epoch": 0.9869866486395132,
"grad_norm": 2.0245614051818848,
"learning_rate": 6.1317029652929734e-09,
"loss": 0.3069,
"step": 1460
},
{
"epoch": 0.9937468311644414,
"grad_norm": 1.9319560527801514,
"learning_rate": 1.6826442836082035e-09,
"loss": 0.2764,
"step": 1470
},
{
"epoch": 1.0,
"grad_norm": 4.367832660675049,
"learning_rate": 1.3906924697382195e-11,
"loss": 0.2913,
"step": 1480
},
{
"epoch": 1.0,
"step": 1480,
"total_flos": 760407761879040.0,
"train_loss": 0.35166695085731714,
"train_runtime": 62991.4895,
"train_samples_per_second": 0.751,
"train_steps_per_second": 0.023
}
],
"logging_steps": 10,
"max_steps": 1480,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 760407761879040.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}