Nephos-phi4 / checkpoint-2000 /trainer_state.json
Pragya-AI's picture
Upload NEPHOS fine-tuned model
3cd0db8 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.21384084894817032,
"eval_steps": 500,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0010692042447408515,
"grad_norm": 0.4522557854652405,
"learning_rate": 0.00018,
"loss": 2.4398,
"step": 10
},
{
"epoch": 0.002138408489481703,
"grad_norm": 0.5223538875579834,
"learning_rate": 0.0001990954773869347,
"loss": 2.1227,
"step": 20
},
{
"epoch": 0.003207612734222555,
"grad_norm": 0.47437742352485657,
"learning_rate": 0.00019809045226130653,
"loss": 1.9881,
"step": 30
},
{
"epoch": 0.004276816978963406,
"grad_norm": 0.3975315988063812,
"learning_rate": 0.0001970854271356784,
"loss": 2.0059,
"step": 40
},
{
"epoch": 0.0053460212237042585,
"grad_norm": 0.29887351393699646,
"learning_rate": 0.00019608040201005026,
"loss": 1.9811,
"step": 50
},
{
"epoch": 0.00641522546844511,
"grad_norm": 0.3441367745399475,
"learning_rate": 0.00019507537688442212,
"loss": 1.9828,
"step": 60
},
{
"epoch": 0.007484429713185962,
"grad_norm": 0.2983023226261139,
"learning_rate": 0.00019407035175879398,
"loss": 2.0323,
"step": 70
},
{
"epoch": 0.008553633957926812,
"grad_norm": 0.30667832493782043,
"learning_rate": 0.00019306532663316582,
"loss": 2.0422,
"step": 80
},
{
"epoch": 0.009622838202667664,
"grad_norm": 0.3337627947330475,
"learning_rate": 0.0001920603015075377,
"loss": 1.9896,
"step": 90
},
{
"epoch": 0.010692042447408517,
"grad_norm": 0.3218510150909424,
"learning_rate": 0.00019105527638190955,
"loss": 1.976,
"step": 100
},
{
"epoch": 0.011761246692149369,
"grad_norm": 0.3417668044567108,
"learning_rate": 0.00019005025125628142,
"loss": 1.9818,
"step": 110
},
{
"epoch": 0.01283045093689022,
"grad_norm": 0.2790803015232086,
"learning_rate": 0.00018904522613065328,
"loss": 2.0031,
"step": 120
},
{
"epoch": 0.013899655181631072,
"grad_norm": 0.4081987142562866,
"learning_rate": 0.00018804020100502512,
"loss": 1.9905,
"step": 130
},
{
"epoch": 0.014968859426371923,
"grad_norm": 0.36898791790008545,
"learning_rate": 0.00018703517587939698,
"loss": 2.0049,
"step": 140
},
{
"epoch": 0.016038063671112775,
"grad_norm": 0.29053226113319397,
"learning_rate": 0.00018603015075376885,
"loss": 1.9797,
"step": 150
},
{
"epoch": 0.017107267915853624,
"grad_norm": 0.25266537070274353,
"learning_rate": 0.00018502512562814071,
"loss": 1.9509,
"step": 160
},
{
"epoch": 0.018176472160594478,
"grad_norm": 0.2833998501300812,
"learning_rate": 0.00018402010050251258,
"loss": 1.9574,
"step": 170
},
{
"epoch": 0.019245676405335328,
"grad_norm": 0.5657384395599365,
"learning_rate": 0.00018301507537688442,
"loss": 2.0581,
"step": 180
},
{
"epoch": 0.02031488065007618,
"grad_norm": 0.29206541180610657,
"learning_rate": 0.00018201005025125628,
"loss": 1.8847,
"step": 190
},
{
"epoch": 0.021384084894817034,
"grad_norm": 0.33557596802711487,
"learning_rate": 0.00018100502512562815,
"loss": 2.0308,
"step": 200
},
{
"epoch": 0.022453289139557884,
"grad_norm": 0.3242787718772888,
"learning_rate": 0.00018,
"loss": 2.0498,
"step": 210
},
{
"epoch": 0.023522493384298737,
"grad_norm": 0.37068045139312744,
"learning_rate": 0.00017899497487437188,
"loss": 1.9788,
"step": 220
},
{
"epoch": 0.024591697629039587,
"grad_norm": 0.3749759793281555,
"learning_rate": 0.00017798994974874371,
"loss": 1.9289,
"step": 230
},
{
"epoch": 0.02566090187378044,
"grad_norm": 0.3888619840145111,
"learning_rate": 0.00017698492462311558,
"loss": 1.9179,
"step": 240
},
{
"epoch": 0.02673010611852129,
"grad_norm": 0.377247154712677,
"learning_rate": 0.00017597989949748744,
"loss": 2.0299,
"step": 250
},
{
"epoch": 0.027799310363262143,
"grad_norm": 0.49286213517189026,
"learning_rate": 0.0001749748743718593,
"loss": 1.9895,
"step": 260
},
{
"epoch": 0.028868514608002993,
"grad_norm": 0.30507227778434753,
"learning_rate": 0.00017396984924623117,
"loss": 1.9596,
"step": 270
},
{
"epoch": 0.029937718852743846,
"grad_norm": 0.3705558180809021,
"learning_rate": 0.000172964824120603,
"loss": 2.0357,
"step": 280
},
{
"epoch": 0.031006923097484696,
"grad_norm": 0.4542909562587738,
"learning_rate": 0.00017195979899497488,
"loss": 1.9323,
"step": 290
},
{
"epoch": 0.03207612734222555,
"grad_norm": 0.3561468720436096,
"learning_rate": 0.00017095477386934674,
"loss": 1.9695,
"step": 300
},
{
"epoch": 0.0331453315869664,
"grad_norm": 0.39809998869895935,
"learning_rate": 0.0001699497487437186,
"loss": 2.01,
"step": 310
},
{
"epoch": 0.03421453583170725,
"grad_norm": 0.279765784740448,
"learning_rate": 0.00016894472361809047,
"loss": 1.9361,
"step": 320
},
{
"epoch": 0.035283740076448106,
"grad_norm": 0.30650126934051514,
"learning_rate": 0.0001679396984924623,
"loss": 1.9411,
"step": 330
},
{
"epoch": 0.036352944321188956,
"grad_norm": 0.33605650067329407,
"learning_rate": 0.00016693467336683417,
"loss": 1.9908,
"step": 340
},
{
"epoch": 0.037422148565929805,
"grad_norm": 0.3010708689689636,
"learning_rate": 0.00016592964824120604,
"loss": 1.9326,
"step": 350
},
{
"epoch": 0.038491352810670655,
"grad_norm": 0.33158665895462036,
"learning_rate": 0.0001649246231155779,
"loss": 1.9362,
"step": 360
},
{
"epoch": 0.03956055705541151,
"grad_norm": 0.2630426585674286,
"learning_rate": 0.00016391959798994977,
"loss": 1.9345,
"step": 370
},
{
"epoch": 0.04062976130015236,
"grad_norm": 0.2744496762752533,
"learning_rate": 0.0001629145728643216,
"loss": 1.916,
"step": 380
},
{
"epoch": 0.04169896554489321,
"grad_norm": 0.33794867992401123,
"learning_rate": 0.00016190954773869347,
"loss": 1.9082,
"step": 390
},
{
"epoch": 0.04276816978963407,
"grad_norm": 0.2850714921951294,
"learning_rate": 0.00016090452261306533,
"loss": 1.9752,
"step": 400
},
{
"epoch": 0.04383737403437492,
"grad_norm": 0.34638258814811707,
"learning_rate": 0.0001598994974874372,
"loss": 1.8934,
"step": 410
},
{
"epoch": 0.04490657827911577,
"grad_norm": 0.2859291434288025,
"learning_rate": 0.00015889447236180906,
"loss": 1.9893,
"step": 420
},
{
"epoch": 0.04597578252385662,
"grad_norm": 0.31361544132232666,
"learning_rate": 0.0001578894472361809,
"loss": 1.9233,
"step": 430
},
{
"epoch": 0.047044986768597474,
"grad_norm": 0.3515094816684723,
"learning_rate": 0.00015688442211055277,
"loss": 1.9668,
"step": 440
},
{
"epoch": 0.048114191013338324,
"grad_norm": 0.3527173399925232,
"learning_rate": 0.00015587939698492463,
"loss": 1.9771,
"step": 450
},
{
"epoch": 0.049183395258079174,
"grad_norm": 0.289266973733902,
"learning_rate": 0.0001548743718592965,
"loss": 2.0094,
"step": 460
},
{
"epoch": 0.050252599502820024,
"grad_norm": 0.30293336510658264,
"learning_rate": 0.00015386934673366836,
"loss": 1.9822,
"step": 470
},
{
"epoch": 0.05132180374756088,
"grad_norm": 0.31669914722442627,
"learning_rate": 0.0001528643216080402,
"loss": 1.9341,
"step": 480
},
{
"epoch": 0.05239100799230173,
"grad_norm": 0.3566587567329407,
"learning_rate": 0.00015185929648241206,
"loss": 1.8382,
"step": 490
},
{
"epoch": 0.05346021223704258,
"grad_norm": 0.2622707188129425,
"learning_rate": 0.00015085427135678393,
"loss": 1.9165,
"step": 500
},
{
"epoch": 0.05452941648178343,
"grad_norm": 0.41455262899398804,
"learning_rate": 0.0001498492462311558,
"loss": 1.9656,
"step": 510
},
{
"epoch": 0.05559862072652429,
"grad_norm": 0.37541866302490234,
"learning_rate": 0.00014884422110552766,
"loss": 1.9291,
"step": 520
},
{
"epoch": 0.056667824971265136,
"grad_norm": 0.36062073707580566,
"learning_rate": 0.0001478391959798995,
"loss": 1.9518,
"step": 530
},
{
"epoch": 0.057737029216005986,
"grad_norm": 0.30928629636764526,
"learning_rate": 0.00014683417085427136,
"loss": 1.8933,
"step": 540
},
{
"epoch": 0.058806233460746836,
"grad_norm": 0.29138654470443726,
"learning_rate": 0.00014582914572864323,
"loss": 1.9848,
"step": 550
},
{
"epoch": 0.05987543770548769,
"grad_norm": 0.259957492351532,
"learning_rate": 0.0001448241206030151,
"loss": 1.8802,
"step": 560
},
{
"epoch": 0.06094464195022854,
"grad_norm": 0.40919119119644165,
"learning_rate": 0.00014381909547738696,
"loss": 1.9503,
"step": 570
},
{
"epoch": 0.06201384619496939,
"grad_norm": 0.323599249124527,
"learning_rate": 0.0001428140703517588,
"loss": 1.9125,
"step": 580
},
{
"epoch": 0.06308305043971024,
"grad_norm": 0.31641989946365356,
"learning_rate": 0.00014180904522613066,
"loss": 1.8805,
"step": 590
},
{
"epoch": 0.0641522546844511,
"grad_norm": 0.4122176766395569,
"learning_rate": 0.00014080402010050252,
"loss": 1.8804,
"step": 600
},
{
"epoch": 0.06522145892919196,
"grad_norm": 0.3238036334514618,
"learning_rate": 0.0001397989949748744,
"loss": 1.9645,
"step": 610
},
{
"epoch": 0.0662906631739328,
"grad_norm": 0.3590157926082611,
"learning_rate": 0.00013879396984924625,
"loss": 1.9316,
"step": 620
},
{
"epoch": 0.06735986741867366,
"grad_norm": 0.3035522997379303,
"learning_rate": 0.0001377889447236181,
"loss": 1.9626,
"step": 630
},
{
"epoch": 0.0684290716634145,
"grad_norm": 0.3481757640838623,
"learning_rate": 0.00013678391959798996,
"loss": 1.9469,
"step": 640
},
{
"epoch": 0.06949827590815535,
"grad_norm": 0.41229742765426636,
"learning_rate": 0.0001357788944723618,
"loss": 2.0198,
"step": 650
},
{
"epoch": 0.07056748015289621,
"grad_norm": 0.5381227731704712,
"learning_rate": 0.00013477386934673368,
"loss": 1.9417,
"step": 660
},
{
"epoch": 0.07163668439763705,
"grad_norm": 0.3313769996166229,
"learning_rate": 0.00013376884422110555,
"loss": 1.9537,
"step": 670
},
{
"epoch": 0.07270588864237791,
"grad_norm": 0.3597472310066223,
"learning_rate": 0.0001327638190954774,
"loss": 1.9603,
"step": 680
},
{
"epoch": 0.07377509288711877,
"grad_norm": 0.29230740666389465,
"learning_rate": 0.00013175879396984925,
"loss": 1.9189,
"step": 690
},
{
"epoch": 0.07484429713185961,
"grad_norm": 0.31095099449157715,
"learning_rate": 0.0001307537688442211,
"loss": 1.8667,
"step": 700
},
{
"epoch": 0.07591350137660047,
"grad_norm": 0.6215882897377014,
"learning_rate": 0.00012974874371859298,
"loss": 1.9415,
"step": 710
},
{
"epoch": 0.07698270562134131,
"grad_norm": 0.3483082056045532,
"learning_rate": 0.00012874371859296485,
"loss": 1.9062,
"step": 720
},
{
"epoch": 0.07805190986608217,
"grad_norm": 0.3564540147781372,
"learning_rate": 0.00012773869346733668,
"loss": 1.9511,
"step": 730
},
{
"epoch": 0.07912111411082302,
"grad_norm": 0.2901301980018616,
"learning_rate": 0.00012673366834170855,
"loss": 1.9828,
"step": 740
},
{
"epoch": 0.08019031835556387,
"grad_norm": 0.35523131489753723,
"learning_rate": 0.0001257286432160804,
"loss": 1.9864,
"step": 750
},
{
"epoch": 0.08125952260030472,
"grad_norm": 0.3558770716190338,
"learning_rate": 0.00012472361809045228,
"loss": 1.8961,
"step": 760
},
{
"epoch": 0.08232872684504558,
"grad_norm": 0.3421788811683655,
"learning_rate": 0.00012371859296482414,
"loss": 1.9287,
"step": 770
},
{
"epoch": 0.08339793108978642,
"grad_norm": 0.2824752926826477,
"learning_rate": 0.00012271356783919598,
"loss": 1.8427,
"step": 780
},
{
"epoch": 0.08446713533452728,
"grad_norm": 0.3791171908378601,
"learning_rate": 0.00012170854271356785,
"loss": 1.9233,
"step": 790
},
{
"epoch": 0.08553633957926814,
"grad_norm": 0.3605143129825592,
"learning_rate": 0.0001207035175879397,
"loss": 1.9573,
"step": 800
},
{
"epoch": 0.08660554382400898,
"grad_norm": 0.4379942715167999,
"learning_rate": 0.00011969849246231158,
"loss": 1.9844,
"step": 810
},
{
"epoch": 0.08767474806874984,
"grad_norm": 0.2933856248855591,
"learning_rate": 0.00011869346733668343,
"loss": 1.8231,
"step": 820
},
{
"epoch": 0.08874395231349068,
"grad_norm": 0.31451016664505005,
"learning_rate": 0.00011768844221105528,
"loss": 1.9715,
"step": 830
},
{
"epoch": 0.08981315655823154,
"grad_norm": 0.3226332366466522,
"learning_rate": 0.00011668341708542714,
"loss": 1.9178,
"step": 840
},
{
"epoch": 0.09088236080297239,
"grad_norm": 0.28982308506965637,
"learning_rate": 0.000115678391959799,
"loss": 2.0103,
"step": 850
},
{
"epoch": 0.09195156504771324,
"grad_norm": 0.30275505781173706,
"learning_rate": 0.00011467336683417087,
"loss": 1.9218,
"step": 860
},
{
"epoch": 0.09302076929245409,
"grad_norm": 0.3631080687046051,
"learning_rate": 0.00011366834170854272,
"loss": 1.9509,
"step": 870
},
{
"epoch": 0.09408997353719495,
"grad_norm": 0.3824511468410492,
"learning_rate": 0.00011266331658291458,
"loss": 1.9624,
"step": 880
},
{
"epoch": 0.09515917778193579,
"grad_norm": 0.24786897003650665,
"learning_rate": 0.00011165829145728644,
"loss": 1.8977,
"step": 890
},
{
"epoch": 0.09622838202667665,
"grad_norm": 0.39904719591140747,
"learning_rate": 0.00011065326633165829,
"loss": 1.9995,
"step": 900
},
{
"epoch": 0.09729758627141749,
"grad_norm": 0.3084559738636017,
"learning_rate": 0.00010964824120603017,
"loss": 1.9172,
"step": 910
},
{
"epoch": 0.09836679051615835,
"grad_norm": 0.35854995250701904,
"learning_rate": 0.00010864321608040202,
"loss": 1.9198,
"step": 920
},
{
"epoch": 0.0994359947608992,
"grad_norm": 0.29974690079689026,
"learning_rate": 0.00010763819095477387,
"loss": 1.9342,
"step": 930
},
{
"epoch": 0.10050519900564005,
"grad_norm": 0.3411110043525696,
"learning_rate": 0.00010663316582914574,
"loss": 1.9233,
"step": 940
},
{
"epoch": 0.1015744032503809,
"grad_norm": 0.3226073980331421,
"learning_rate": 0.00010562814070351759,
"loss": 1.8911,
"step": 950
},
{
"epoch": 0.10264360749512176,
"grad_norm": 0.35765019059181213,
"learning_rate": 0.00010462311557788944,
"loss": 1.9626,
"step": 960
},
{
"epoch": 0.1037128117398626,
"grad_norm": 0.3223406672477722,
"learning_rate": 0.00010361809045226132,
"loss": 1.8816,
"step": 970
},
{
"epoch": 0.10478201598460346,
"grad_norm": 0.3049243986606598,
"learning_rate": 0.00010261306532663317,
"loss": 1.8941,
"step": 980
},
{
"epoch": 0.10585122022934432,
"grad_norm": 0.2669491469860077,
"learning_rate": 0.00010160804020100503,
"loss": 1.8701,
"step": 990
},
{
"epoch": 0.10692042447408516,
"grad_norm": 0.3321566581726074,
"learning_rate": 0.00010060301507537689,
"loss": 1.9848,
"step": 1000
},
{
"epoch": 0.10798962871882602,
"grad_norm": 0.3456732928752899,
"learning_rate": 9.959798994974875e-05,
"loss": 1.9494,
"step": 1010
},
{
"epoch": 0.10905883296356686,
"grad_norm": 0.32657039165496826,
"learning_rate": 9.85929648241206e-05,
"loss": 1.9775,
"step": 1020
},
{
"epoch": 0.11012803720830772,
"grad_norm": 0.3481104373931885,
"learning_rate": 9.758793969849247e-05,
"loss": 1.9383,
"step": 1030
},
{
"epoch": 0.11119724145304857,
"grad_norm": 0.3254976272583008,
"learning_rate": 9.658291457286432e-05,
"loss": 1.9553,
"step": 1040
},
{
"epoch": 0.11226644569778942,
"grad_norm": 0.365232914686203,
"learning_rate": 9.55778894472362e-05,
"loss": 1.935,
"step": 1050
},
{
"epoch": 0.11333564994253027,
"grad_norm": 0.3333910405635834,
"learning_rate": 9.457286432160805e-05,
"loss": 1.9184,
"step": 1060
},
{
"epoch": 0.11440485418727113,
"grad_norm": 0.3671784996986389,
"learning_rate": 9.35678391959799e-05,
"loss": 1.9303,
"step": 1070
},
{
"epoch": 0.11547405843201197,
"grad_norm": 0.2980143427848816,
"learning_rate": 9.256281407035176e-05,
"loss": 1.9606,
"step": 1080
},
{
"epoch": 0.11654326267675283,
"grad_norm": 0.29698073863983154,
"learning_rate": 9.155778894472362e-05,
"loss": 1.9324,
"step": 1090
},
{
"epoch": 0.11761246692149367,
"grad_norm": 0.35693129897117615,
"learning_rate": 9.055276381909548e-05,
"loss": 1.9567,
"step": 1100
},
{
"epoch": 0.11868167116623453,
"grad_norm": 0.39428821206092834,
"learning_rate": 8.954773869346734e-05,
"loss": 1.9712,
"step": 1110
},
{
"epoch": 0.11975087541097539,
"grad_norm": 0.4319991171360016,
"learning_rate": 8.85427135678392e-05,
"loss": 1.9229,
"step": 1120
},
{
"epoch": 0.12082007965571623,
"grad_norm": 0.271451860666275,
"learning_rate": 8.753768844221106e-05,
"loss": 1.9374,
"step": 1130
},
{
"epoch": 0.12188928390045708,
"grad_norm": 0.3350280523300171,
"learning_rate": 8.653266331658291e-05,
"loss": 1.97,
"step": 1140
},
{
"epoch": 0.12295848814519794,
"grad_norm": 0.23576690256595612,
"learning_rate": 8.552763819095478e-05,
"loss": 1.9165,
"step": 1150
},
{
"epoch": 0.12402769238993878,
"grad_norm": 0.3180292844772339,
"learning_rate": 8.452261306532664e-05,
"loss": 1.9629,
"step": 1160
},
{
"epoch": 0.12509689663467963,
"grad_norm": 0.35042503476142883,
"learning_rate": 8.351758793969849e-05,
"loss": 1.9137,
"step": 1170
},
{
"epoch": 0.12616610087942048,
"grad_norm": 0.33998608589172363,
"learning_rate": 8.251256281407036e-05,
"loss": 1.8604,
"step": 1180
},
{
"epoch": 0.12723530512416134,
"grad_norm": 0.3942660093307495,
"learning_rate": 8.150753768844221e-05,
"loss": 1.9758,
"step": 1190
},
{
"epoch": 0.1283045093689022,
"grad_norm": 0.34661826491355896,
"learning_rate": 8.050251256281407e-05,
"loss": 1.9638,
"step": 1200
},
{
"epoch": 0.12937371361364305,
"grad_norm": 0.31751224398612976,
"learning_rate": 7.949748743718594e-05,
"loss": 1.8879,
"step": 1210
},
{
"epoch": 0.1304429178583839,
"grad_norm": 0.3244868516921997,
"learning_rate": 7.849246231155779e-05,
"loss": 1.9034,
"step": 1220
},
{
"epoch": 0.13151212210312474,
"grad_norm": 0.28967052698135376,
"learning_rate": 7.748743718592966e-05,
"loss": 1.8447,
"step": 1230
},
{
"epoch": 0.1325813263478656,
"grad_norm": 0.23402956128120422,
"learning_rate": 7.64824120603015e-05,
"loss": 1.8901,
"step": 1240
},
{
"epoch": 0.13365053059260645,
"grad_norm": 0.29249387979507446,
"learning_rate": 7.547738693467337e-05,
"loss": 1.958,
"step": 1250
},
{
"epoch": 0.1347197348373473,
"grad_norm": 0.3257281184196472,
"learning_rate": 7.447236180904524e-05,
"loss": 1.9411,
"step": 1260
},
{
"epoch": 0.13578893908208817,
"grad_norm": 0.2677409052848816,
"learning_rate": 7.346733668341709e-05,
"loss": 1.9618,
"step": 1270
},
{
"epoch": 0.136858143326829,
"grad_norm": 0.5253011584281921,
"learning_rate": 7.246231155778895e-05,
"loss": 1.888,
"step": 1280
},
{
"epoch": 0.13792734757156985,
"grad_norm": 0.3806459307670593,
"learning_rate": 7.14572864321608e-05,
"loss": 1.9266,
"step": 1290
},
{
"epoch": 0.1389965518163107,
"grad_norm": 0.4092906415462494,
"learning_rate": 7.045226130653267e-05,
"loss": 1.9766,
"step": 1300
},
{
"epoch": 0.14006575606105157,
"grad_norm": 0.2884989082813263,
"learning_rate": 6.944723618090453e-05,
"loss": 1.946,
"step": 1310
},
{
"epoch": 0.14113496030579242,
"grad_norm": 0.3107132315635681,
"learning_rate": 6.844221105527638e-05,
"loss": 1.8267,
"step": 1320
},
{
"epoch": 0.14220416455053325,
"grad_norm": 0.29931432008743286,
"learning_rate": 6.743718592964824e-05,
"loss": 1.9226,
"step": 1330
},
{
"epoch": 0.1432733687952741,
"grad_norm": 0.32327061891555786,
"learning_rate": 6.64321608040201e-05,
"loss": 1.9389,
"step": 1340
},
{
"epoch": 0.14434257304001497,
"grad_norm": 0.3812110126018524,
"learning_rate": 6.542713567839197e-05,
"loss": 1.9137,
"step": 1350
},
{
"epoch": 0.14541177728475582,
"grad_norm": 0.3208372890949249,
"learning_rate": 6.442211055276383e-05,
"loss": 1.8635,
"step": 1360
},
{
"epoch": 0.14648098152949668,
"grad_norm": 0.3378104567527771,
"learning_rate": 6.341708542713568e-05,
"loss": 1.9773,
"step": 1370
},
{
"epoch": 0.14755018577423754,
"grad_norm": 0.32731178402900696,
"learning_rate": 6.241206030150753e-05,
"loss": 1.9294,
"step": 1380
},
{
"epoch": 0.14861939001897836,
"grad_norm": 0.30876997113227844,
"learning_rate": 6.14070351758794e-05,
"loss": 1.9457,
"step": 1390
},
{
"epoch": 0.14968859426371922,
"grad_norm": 0.2618562579154968,
"learning_rate": 6.0402010050251256e-05,
"loss": 1.9014,
"step": 1400
},
{
"epoch": 0.15075779850846008,
"grad_norm": 0.3791179656982422,
"learning_rate": 5.939698492462312e-05,
"loss": 1.897,
"step": 1410
},
{
"epoch": 0.15182700275320093,
"grad_norm": 0.31716200709342957,
"learning_rate": 5.839195979899498e-05,
"loss": 1.947,
"step": 1420
},
{
"epoch": 0.1528962069979418,
"grad_norm": 0.2968918979167938,
"learning_rate": 5.738693467336683e-05,
"loss": 1.9895,
"step": 1430
},
{
"epoch": 0.15396541124268262,
"grad_norm": 0.3310339152812958,
"learning_rate": 5.63819095477387e-05,
"loss": 1.9455,
"step": 1440
},
{
"epoch": 0.15503461548742348,
"grad_norm": 0.30880674719810486,
"learning_rate": 5.537688442211055e-05,
"loss": 1.9152,
"step": 1450
},
{
"epoch": 0.15610381973216433,
"grad_norm": 0.28629955649375916,
"learning_rate": 5.437185929648242e-05,
"loss": 1.8665,
"step": 1460
},
{
"epoch": 0.1571730239769052,
"grad_norm": 0.3241690695285797,
"learning_rate": 5.3366834170854276e-05,
"loss": 1.9451,
"step": 1470
},
{
"epoch": 0.15824222822164605,
"grad_norm": 0.26445063948631287,
"learning_rate": 5.236180904522613e-05,
"loss": 1.897,
"step": 1480
},
{
"epoch": 0.1593114324663869,
"grad_norm": 0.3061549961566925,
"learning_rate": 5.135678391959799e-05,
"loss": 1.8208,
"step": 1490
},
{
"epoch": 0.16038063671112773,
"grad_norm": 0.334157794713974,
"learning_rate": 5.035175879396985e-05,
"loss": 1.854,
"step": 1500
},
{
"epoch": 0.1614498409558686,
"grad_norm": 0.2809840142726898,
"learning_rate": 4.934673366834171e-05,
"loss": 1.9399,
"step": 1510
},
{
"epoch": 0.16251904520060945,
"grad_norm": 0.331537663936615,
"learning_rate": 4.834170854271357e-05,
"loss": 1.9757,
"step": 1520
},
{
"epoch": 0.1635882494453503,
"grad_norm": 0.30003657937049866,
"learning_rate": 4.733668341708543e-05,
"loss": 1.9077,
"step": 1530
},
{
"epoch": 0.16465745369009116,
"grad_norm": 0.29070165753364563,
"learning_rate": 4.633165829145729e-05,
"loss": 1.9441,
"step": 1540
},
{
"epoch": 0.165726657934832,
"grad_norm": 0.3907763659954071,
"learning_rate": 4.532663316582915e-05,
"loss": 1.8893,
"step": 1550
},
{
"epoch": 0.16679586217957285,
"grad_norm": 0.3012675940990448,
"learning_rate": 4.4321608040201005e-05,
"loss": 1.9539,
"step": 1560
},
{
"epoch": 0.1678650664243137,
"grad_norm": 0.28272444009780884,
"learning_rate": 4.331658291457287e-05,
"loss": 1.9571,
"step": 1570
},
{
"epoch": 0.16893427066905456,
"grad_norm": 0.3389994502067566,
"learning_rate": 4.231155778894473e-05,
"loss": 1.8685,
"step": 1580
},
{
"epoch": 0.17000347491379542,
"grad_norm": 0.3380429744720459,
"learning_rate": 4.1306532663316586e-05,
"loss": 1.9519,
"step": 1590
},
{
"epoch": 0.17107267915853627,
"grad_norm": 0.3569534122943878,
"learning_rate": 4.0301507537688444e-05,
"loss": 1.8919,
"step": 1600
},
{
"epoch": 0.1721418834032771,
"grad_norm": 0.32496699690818787,
"learning_rate": 3.92964824120603e-05,
"loss": 1.8853,
"step": 1610
},
{
"epoch": 0.17321108764801796,
"grad_norm": 0.3131369650363922,
"learning_rate": 3.829145728643217e-05,
"loss": 1.8952,
"step": 1620
},
{
"epoch": 0.17428029189275882,
"grad_norm": 0.3050549626350403,
"learning_rate": 3.7286432160804025e-05,
"loss": 1.85,
"step": 1630
},
{
"epoch": 0.17534949613749967,
"grad_norm": 0.2770765423774719,
"learning_rate": 3.628140703517588e-05,
"loss": 1.9345,
"step": 1640
},
{
"epoch": 0.17641870038224053,
"grad_norm": 0.40778982639312744,
"learning_rate": 3.527638190954774e-05,
"loss": 1.885,
"step": 1650
},
{
"epoch": 0.17748790462698136,
"grad_norm": 0.305587500333786,
"learning_rate": 3.42713567839196e-05,
"loss": 1.9261,
"step": 1660
},
{
"epoch": 0.17855710887172221,
"grad_norm": 0.2813640534877777,
"learning_rate": 3.3266331658291464e-05,
"loss": 1.981,
"step": 1670
},
{
"epoch": 0.17962631311646307,
"grad_norm": 0.2698950469493866,
"learning_rate": 3.2261306532663315e-05,
"loss": 1.9081,
"step": 1680
},
{
"epoch": 0.18069551736120393,
"grad_norm": 0.3598436713218689,
"learning_rate": 3.125628140703517e-05,
"loss": 1.9129,
"step": 1690
},
{
"epoch": 0.18176472160594478,
"grad_norm": 0.25739961862564087,
"learning_rate": 3.0251256281407038e-05,
"loss": 1.9376,
"step": 1700
},
{
"epoch": 0.1828339258506856,
"grad_norm": 0.30413052439689636,
"learning_rate": 2.9246231155778896e-05,
"loss": 1.9186,
"step": 1710
},
{
"epoch": 0.18390313009542647,
"grad_norm": 0.2563576400279999,
"learning_rate": 2.8241206030150757e-05,
"loss": 1.847,
"step": 1720
},
{
"epoch": 0.18497233434016733,
"grad_norm": 0.3041762113571167,
"learning_rate": 2.7236180904522612e-05,
"loss": 1.9606,
"step": 1730
},
{
"epoch": 0.18604153858490818,
"grad_norm": 0.283974826335907,
"learning_rate": 2.6231155778894474e-05,
"loss": 1.9263,
"step": 1740
},
{
"epoch": 0.18711074282964904,
"grad_norm": 0.3330858051776886,
"learning_rate": 2.522613065326633e-05,
"loss": 1.8791,
"step": 1750
},
{
"epoch": 0.1881799470743899,
"grad_norm": 0.31441086530685425,
"learning_rate": 2.422110552763819e-05,
"loss": 1.9351,
"step": 1760
},
{
"epoch": 0.18924915131913073,
"grad_norm": 0.31107625365257263,
"learning_rate": 2.321608040201005e-05,
"loss": 1.9231,
"step": 1770
},
{
"epoch": 0.19031835556387158,
"grad_norm": 0.34114930033683777,
"learning_rate": 2.2211055276381913e-05,
"loss": 1.9559,
"step": 1780
},
{
"epoch": 0.19138755980861244,
"grad_norm": 0.23545467853546143,
"learning_rate": 2.120603015075377e-05,
"loss": 1.849,
"step": 1790
},
{
"epoch": 0.1924567640533533,
"grad_norm": 0.29872679710388184,
"learning_rate": 2.020100502512563e-05,
"loss": 1.9186,
"step": 1800
},
{
"epoch": 0.19352596829809415,
"grad_norm": 0.34200844168663025,
"learning_rate": 1.9195979899497487e-05,
"loss": 1.9165,
"step": 1810
},
{
"epoch": 0.19459517254283498,
"grad_norm": 0.3605504035949707,
"learning_rate": 1.8190954773869348e-05,
"loss": 1.9204,
"step": 1820
},
{
"epoch": 0.19566437678757584,
"grad_norm": 0.3210300803184509,
"learning_rate": 1.7185929648241206e-05,
"loss": 1.9172,
"step": 1830
},
{
"epoch": 0.1967335810323167,
"grad_norm": 0.31484687328338623,
"learning_rate": 1.6180904522613068e-05,
"loss": 1.9356,
"step": 1840
},
{
"epoch": 0.19780278527705755,
"grad_norm": 0.335658460855484,
"learning_rate": 1.5175879396984927e-05,
"loss": 1.9379,
"step": 1850
},
{
"epoch": 0.1988719895217984,
"grad_norm": 0.36707040667533875,
"learning_rate": 1.4170854271356784e-05,
"loss": 1.9189,
"step": 1860
},
{
"epoch": 0.19994119376653927,
"grad_norm": 0.2470160871744156,
"learning_rate": 1.3165829145728645e-05,
"loss": 1.9095,
"step": 1870
},
{
"epoch": 0.2010103980112801,
"grad_norm": 0.24876108765602112,
"learning_rate": 1.2160804020100503e-05,
"loss": 1.8907,
"step": 1880
},
{
"epoch": 0.20207960225602095,
"grad_norm": 0.31706857681274414,
"learning_rate": 1.1155778894472363e-05,
"loss": 1.9588,
"step": 1890
},
{
"epoch": 0.2031488065007618,
"grad_norm": 0.27313029766082764,
"learning_rate": 1.0150753768844223e-05,
"loss": 1.936,
"step": 1900
},
{
"epoch": 0.20421801074550266,
"grad_norm": 0.2927481532096863,
"learning_rate": 9.14572864321608e-06,
"loss": 1.904,
"step": 1910
},
{
"epoch": 0.20528721499024352,
"grad_norm": 0.261416494846344,
"learning_rate": 8.14070351758794e-06,
"loss": 1.9233,
"step": 1920
},
{
"epoch": 0.20635641923498435,
"grad_norm": 0.2947026193141937,
"learning_rate": 7.1356783919597995e-06,
"loss": 1.8992,
"step": 1930
},
{
"epoch": 0.2074256234797252,
"grad_norm": 0.31104397773742676,
"learning_rate": 6.130653266331659e-06,
"loss": 1.9726,
"step": 1940
},
{
"epoch": 0.20849482772446606,
"grad_norm": 0.2600247263908386,
"learning_rate": 5.125628140703518e-06,
"loss": 2.02,
"step": 1950
},
{
"epoch": 0.20956403196920692,
"grad_norm": 0.30064964294433594,
"learning_rate": 4.120603015075377e-06,
"loss": 1.8599,
"step": 1960
},
{
"epoch": 0.21063323621394778,
"grad_norm": 0.2797416150569916,
"learning_rate": 3.1155778894472364e-06,
"loss": 1.9514,
"step": 1970
},
{
"epoch": 0.21170244045868863,
"grad_norm": 0.34935057163238525,
"learning_rate": 2.1105527638190953e-06,
"loss": 1.8716,
"step": 1980
},
{
"epoch": 0.21277164470342946,
"grad_norm": 0.2744124233722687,
"learning_rate": 1.1055276381909548e-06,
"loss": 1.9384,
"step": 1990
},
{
"epoch": 0.21384084894817032,
"grad_norm": 0.2645925283432007,
"learning_rate": 1.0050251256281409e-07,
"loss": 1.9418,
"step": 2000
}
],
"logging_steps": 10,
"max_steps": 2000,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.9738373840896e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}