GRAM-Qwen3-8B-RewardModel / trainer_state.json
wangclnlp's picture
Upload folder using huggingface_hub
2fdc862 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 1028,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.019464720194647202,
"grad_norm": 467.6652206388871,
"learning_rate": 1.7475728155339808e-06,
"loss": 5.9691,
"step": 10
},
{
"epoch": 0.038929440389294405,
"grad_norm": 166.7345064857588,
"learning_rate": 3.689320388349515e-06,
"loss": 4.3874,
"step": 20
},
{
"epoch": 0.058394160583941604,
"grad_norm": 41.73023463472394,
"learning_rate": 5.631067961165049e-06,
"loss": 3.5618,
"step": 30
},
{
"epoch": 0.07785888077858881,
"grad_norm": 244.37911336081038,
"learning_rate": 7.572815533980583e-06,
"loss": 3.4526,
"step": 40
},
{
"epoch": 0.09732360097323602,
"grad_norm": 129.8562848389396,
"learning_rate": 9.514563106796117e-06,
"loss": 3.1905,
"step": 50
},
{
"epoch": 0.11678832116788321,
"grad_norm": 175.44705905680223,
"learning_rate": 1.145631067961165e-05,
"loss": 3.2603,
"step": 60
},
{
"epoch": 0.1362530413625304,
"grad_norm": 46.34518249123656,
"learning_rate": 1.3398058252427187e-05,
"loss": 3.0195,
"step": 70
},
{
"epoch": 0.15571776155717762,
"grad_norm": 101.58706380551824,
"learning_rate": 1.533980582524272e-05,
"loss": 3.4084,
"step": 80
},
{
"epoch": 0.17518248175182483,
"grad_norm": 45.705547095931536,
"learning_rate": 1.7281553398058253e-05,
"loss": 3.3306,
"step": 90
},
{
"epoch": 0.19464720194647203,
"grad_norm": 60.90122504323018,
"learning_rate": 1.922330097087379e-05,
"loss": 3.257,
"step": 100
},
{
"epoch": 0.2141119221411192,
"grad_norm": 21.059484514388654,
"learning_rate": 1.999792377815462e-05,
"loss": 3.0869,
"step": 110
},
{
"epoch": 0.23357664233576642,
"grad_norm": 25.875434967697206,
"learning_rate": 1.9985238877782747e-05,
"loss": 3.0562,
"step": 120
},
{
"epoch": 0.25304136253041365,
"grad_norm": 72.06341394647221,
"learning_rate": 1.9961037146461106e-05,
"loss": 2.9625,
"step": 130
},
{
"epoch": 0.2725060827250608,
"grad_norm": 98.87073080892851,
"learning_rate": 1.992534649811862e-05,
"loss": 2.9114,
"step": 140
},
{
"epoch": 0.291970802919708,
"grad_norm": 76.51312208443895,
"learning_rate": 1.9878208097835908e-05,
"loss": 2.7689,
"step": 150
},
{
"epoch": 0.31143552311435524,
"grad_norm": 18.366349202822,
"learning_rate": 1.9819676314366084e-05,
"loss": 2.9176,
"step": 160
},
{
"epoch": 0.3309002433090024,
"grad_norm": 29.9148451700412,
"learning_rate": 1.974981865742661e-05,
"loss": 2.7396,
"step": 170
},
{
"epoch": 0.35036496350364965,
"grad_norm": 72.74727837532194,
"learning_rate": 1.9668715699834553e-05,
"loss": 2.8081,
"step": 180
},
{
"epoch": 0.36982968369829683,
"grad_norm": 63.54808740972455,
"learning_rate": 1.957646098457507e-05,
"loss": 2.7765,
"step": 190
},
{
"epoch": 0.38929440389294406,
"grad_norm": 16.64660992237349,
"learning_rate": 1.9473160916910268e-05,
"loss": 2.8874,
"step": 200
},
{
"epoch": 0.40875912408759124,
"grad_norm": 18.115912598067805,
"learning_rate": 1.9358934641652897e-05,
"loss": 2.9733,
"step": 210
},
{
"epoch": 0.4282238442822384,
"grad_norm": 20.256318489786143,
"learning_rate": 1.923391390574645e-05,
"loss": 2.8468,
"step": 220
},
{
"epoch": 0.44768856447688565,
"grad_norm": 23.826173126914124,
"learning_rate": 1.909824290631012e-05,
"loss": 2.9378,
"step": 230
},
{
"epoch": 0.46715328467153283,
"grad_norm": 52.664784350677984,
"learning_rate": 1.8952078124323922e-05,
"loss": 2.8966,
"step": 240
},
{
"epoch": 0.48661800486618007,
"grad_norm": 20.857131222868926,
"learning_rate": 1.8795588144145784e-05,
"loss": 2.6412,
"step": 250
},
{
"epoch": 0.5060827250608273,
"grad_norm": 31.42812085173246,
"learning_rate": 1.8628953459068766e-05,
"loss": 2.7321,
"step": 260
},
{
"epoch": 0.5255474452554745,
"grad_norm": 65.70219765163803,
"learning_rate": 1.8452366263142694e-05,
"loss": 2.7163,
"step": 270
},
{
"epoch": 0.5450121654501217,
"grad_norm": 13.411159084121095,
"learning_rate": 1.8266030229500307e-05,
"loss": 2.6319,
"step": 280
},
{
"epoch": 0.5644768856447688,
"grad_norm": 23.156038596253314,
"learning_rate": 1.807016027544359e-05,
"loss": 2.6628,
"step": 290
},
{
"epoch": 0.583941605839416,
"grad_norm": 12.026706122091495,
"learning_rate": 1.786498231456125e-05,
"loss": 2.5571,
"step": 300
},
{
"epoch": 0.6034063260340633,
"grad_norm": 35.115708307179496,
"learning_rate": 1.7650732996163246e-05,
"loss": 2.771,
"step": 310
},
{
"epoch": 0.6228710462287105,
"grad_norm": 16.57795060108902,
"learning_rate": 1.7427659432332844e-05,
"loss": 2.6367,
"step": 320
},
{
"epoch": 0.6423357664233577,
"grad_norm": 29.470813056524836,
"learning_rate": 1.7196018912911126e-05,
"loss": 2.6234,
"step": 330
},
{
"epoch": 0.6618004866180048,
"grad_norm": 30.107755508667903,
"learning_rate": 1.6956078608742567e-05,
"loss": 2.7101,
"step": 340
},
{
"epoch": 0.681265206812652,
"grad_norm": 20.433411829181804,
"learning_rate": 1.6708115263524047e-05,
"loss": 2.5234,
"step": 350
},
{
"epoch": 0.7007299270072993,
"grad_norm": 18.14797765461415,
"learning_rate": 1.6452414874612608e-05,
"loss": 2.512,
"step": 360
},
{
"epoch": 0.7201946472019465,
"grad_norm": 19.011628268140942,
"learning_rate": 1.618927236316026e-05,
"loss": 2.6771,
"step": 370
},
{
"epoch": 0.7396593673965937,
"grad_norm": 22.399121490864864,
"learning_rate": 1.5918991233956145e-05,
"loss": 2.663,
"step": 380
},
{
"epoch": 0.7591240875912408,
"grad_norm": 25.84860211873027,
"learning_rate": 1.5641883225368468e-05,
"loss": 2.6363,
"step": 390
},
{
"epoch": 0.7785888077858881,
"grad_norm": 19.260026780424496,
"learning_rate": 1.5358267949789968e-05,
"loss": 2.603,
"step": 400
},
{
"epoch": 0.7980535279805353,
"grad_norm": 34.82940745668127,
"learning_rate": 1.5068472525001554e-05,
"loss": 2.5687,
"step": 410
},
{
"epoch": 0.8175182481751825,
"grad_norm": 17.062285116452156,
"learning_rate": 1.4772831196879383e-05,
"loss": 2.4697,
"step": 420
},
{
"epoch": 0.8369829683698297,
"grad_norm": 14.499086665174008,
"learning_rate": 1.4471684953880458e-05,
"loss": 2.5395,
"step": 430
},
{
"epoch": 0.8564476885644768,
"grad_norm": 10.249310145977729,
"learning_rate": 1.416538113375145e-05,
"loss": 2.6447,
"step": 440
},
{
"epoch": 0.8759124087591241,
"grad_norm": 35.81446244884903,
"learning_rate": 1.3854273022914333e-05,
"loss": 2.612,
"step": 450
},
{
"epoch": 0.8953771289537713,
"grad_norm": 36.08647745411907,
"learning_rate": 1.3538719448990905e-05,
"loss": 2.591,
"step": 460
},
{
"epoch": 0.9148418491484185,
"grad_norm": 13.136666748439112,
"learning_rate": 1.3219084366936172e-05,
"loss": 2.4204,
"step": 470
},
{
"epoch": 0.9343065693430657,
"grad_norm": 32.46036792908647,
"learning_rate": 1.2895736439257933e-05,
"loss": 2.489,
"step": 480
},
{
"epoch": 0.9537712895377128,
"grad_norm": 15.024934704537909,
"learning_rate": 1.256904861080674e-05,
"loss": 2.5419,
"step": 490
},
{
"epoch": 0.9732360097323601,
"grad_norm": 35.70123140670445,
"learning_rate": 1.223939767862668e-05,
"loss": 2.5546,
"step": 500
},
{
"epoch": 0.9927007299270073,
"grad_norm": 19.440166678844516,
"learning_rate": 1.190716385736307e-05,
"loss": 2.4077,
"step": 510
},
{
"epoch": 1.0116788321167882,
"grad_norm": 14.212810026975665,
"learning_rate": 1.1572730340728362e-05,
"loss": 2.0042,
"step": 520
},
{
"epoch": 1.0311435523114356,
"grad_norm": 29.760352735741566,
"learning_rate": 1.1236482859532019e-05,
"loss": 1.8801,
"step": 530
},
{
"epoch": 1.0506082725060828,
"grad_norm": 16.61510553154965,
"learning_rate": 1.0898809236784152e-05,
"loss": 1.8509,
"step": 540
},
{
"epoch": 1.07007299270073,
"grad_norm": 26.85804078008826,
"learning_rate": 1.0560098940386028e-05,
"loss": 1.9301,
"step": 550
},
{
"epoch": 1.0895377128953772,
"grad_norm": 14.355921769601613,
"learning_rate": 1.0220742633923393e-05,
"loss": 1.8723,
"step": 560
},
{
"epoch": 1.1090024330900243,
"grad_norm": 18.597689511400556,
"learning_rate": 9.88113172608072e-06,
"loss": 1.8816,
"step": 570
},
{
"epoch": 1.1284671532846715,
"grad_norm": 12.629375488202491,
"learning_rate": 9.541657919196049e-06,
"loss": 1.9158,
"step": 580
},
{
"epoch": 1.1479318734793187,
"grad_norm": 14.90971801614589,
"learning_rate": 9.202712757477145e-06,
"loss": 1.8408,
"step": 590
},
{
"epoch": 1.1673965936739659,
"grad_norm": 9.39274930443642,
"learning_rate": 8.864687175400045e-06,
"loss": 1.8544,
"step": 600
},
{
"epoch": 1.186861313868613,
"grad_norm": 17.042793241075625,
"learning_rate": 8.527971046810845e-06,
"loss": 1.8067,
"step": 610
},
{
"epoch": 1.2063260340632604,
"grad_norm": 33.97884279567901,
"learning_rate": 8.192952735250815e-06,
"loss": 1.9536,
"step": 620
},
{
"epoch": 1.2257907542579076,
"grad_norm": 24.082936845827216,
"learning_rate": 7.86001864602348e-06,
"loss": 1.7986,
"step": 630
},
{
"epoch": 1.2452554744525548,
"grad_norm": 12.692502249821047,
"learning_rate": 7.529552780520292e-06,
"loss": 1.7786,
"step": 640
},
{
"epoch": 1.264720194647202,
"grad_norm": 19.25902210538214,
"learning_rate": 7.201936293318946e-06,
"loss": 1.8931,
"step": 650
},
{
"epoch": 1.2841849148418492,
"grad_norm": 16.012762519773602,
"learning_rate": 6.877547052565177e-06,
"loss": 1.8869,
"step": 660
},
{
"epoch": 1.3036496350364963,
"grad_norm": 12.414650889891156,
"learning_rate": 6.556759204145069e-06,
"loss": 1.8596,
"step": 670
},
{
"epoch": 1.3231143552311435,
"grad_norm": 8.664945004312923,
"learning_rate": 6.239942740150571e-06,
"loss": 1.9327,
"step": 680
},
{
"epoch": 1.3425790754257907,
"grad_norm": 6.557048308984662,
"learning_rate": 5.927463072135936e-06,
"loss": 1.788,
"step": 690
},
{
"epoch": 1.3620437956204379,
"grad_norm": 8.009979100261784,
"learning_rate": 5.619680609657294e-06,
"loss": 1.7406,
"step": 700
},
{
"epoch": 1.381508515815085,
"grad_norm": 9.81447987045354,
"learning_rate": 5.316950344581439e-06,
"loss": 1.7733,
"step": 710
},
{
"epoch": 1.4009732360097322,
"grad_norm": 20.26417655558776,
"learning_rate": 5.019621441643336e-06,
"loss": 1.8627,
"step": 720
},
{
"epoch": 1.4204379562043796,
"grad_norm": 10.193673037660284,
"learning_rate": 4.728036835724512e-06,
"loss": 1.7829,
"step": 730
},
{
"epoch": 1.4399026763990268,
"grad_norm": 34.687716750512784,
"learning_rate": 4.442532836316909e-06,
"loss": 1.8428,
"step": 740
},
{
"epoch": 1.459367396593674,
"grad_norm": 16.272882238934493,
"learning_rate": 4.163438739628359e-06,
"loss": 1.7732,
"step": 750
},
{
"epoch": 1.4788321167883212,
"grad_norm": 23.631506760918857,
"learning_rate": 3.891076448777046e-06,
"loss": 1.8549,
"step": 760
},
{
"epoch": 1.4982968369829683,
"grad_norm": 18.67119103579321,
"learning_rate": 3.625760102513103e-06,
"loss": 1.7962,
"step": 770
},
{
"epoch": 1.5177615571776155,
"grad_norm": 8.089337911474972,
"learning_rate": 3.367795712895483e-06,
"loss": 1.8366,
"step": 780
},
{
"epoch": 1.537226277372263,
"grad_norm": 9.287748017940578,
"learning_rate": 3.117480812342054e-06,
"loss": 1.7671,
"step": 790
},
{
"epoch": 1.55669099756691,
"grad_norm": 17.5908812394651,
"learning_rate": 2.8751041104599818e-06,
"loss": 1.7606,
"step": 800
},
{
"epoch": 1.5761557177615573,
"grad_norm": 11.662802800948311,
"learning_rate": 2.6409451610522287e-06,
"loss": 1.8264,
"step": 810
},
{
"epoch": 1.5956204379562045,
"grad_norm": 23.001123043377426,
"learning_rate": 2.4152740396842044e-06,
"loss": 1.7828,
"step": 820
},
{
"epoch": 1.6150851581508516,
"grad_norm": 14.067182434026916,
"learning_rate": 2.1983510321825053e-06,
"loss": 1.7528,
"step": 830
},
{
"epoch": 1.6345498783454988,
"grad_norm": 5.048858419169921,
"learning_rate": 1.9904263344249743e-06,
"loss": 1.7638,
"step": 840
},
{
"epoch": 1.654014598540146,
"grad_norm": 4.150019848481595,
"learning_rate": 1.7917397637683799e-06,
"loss": 1.7929,
"step": 850
},
{
"epoch": 1.6734793187347932,
"grad_norm": 18.939088328160533,
"learning_rate": 1.602520482446519e-06,
"loss": 1.7464,
"step": 860
},
{
"epoch": 1.6929440389294403,
"grad_norm": 15.458459329408187,
"learning_rate": 1.4229867332577962e-06,
"loss": 1.7771,
"step": 870
},
{
"epoch": 1.7124087591240875,
"grad_norm": 11.89614505037642,
"learning_rate": 1.2533455878471158e-06,
"loss": 1.7895,
"step": 880
},
{
"epoch": 1.7318734793187347,
"grad_norm": 10.562392557248652,
"learning_rate": 1.0937927078724242e-06,
"loss": 1.7229,
"step": 890
},
{
"epoch": 1.7513381995133819,
"grad_norm": 22.418236930599566,
"learning_rate": 9.445121193313678e-07,
"loss": 1.785,
"step": 900
},
{
"epoch": 1.770802919708029,
"grad_norm": 9.965628962980276,
"learning_rate": 8.056760003083519e-07,
"loss": 1.8255,
"step": 910
},
{
"epoch": 1.7902676399026762,
"grad_norm": 27.744934957716747,
"learning_rate": 6.774444823868153e-07,
"loss": 1.802,
"step": 920
},
{
"epoch": 1.8097323600973236,
"grad_norm": 14.923494078289714,
"learning_rate": 5.59965465955763e-07,
"loss": 1.7966,
"step": 930
},
{
"epoch": 1.8291970802919708,
"grad_norm": 30.987342851593098,
"learning_rate": 4.533744496235859e-07,
"loss": 1.8231,
"step": 940
},
{
"epoch": 1.848661800486618,
"grad_norm": 9.228802981541063,
"learning_rate": 3.577943739359102e-07,
"loss": 1.769,
"step": 950
},
{
"epoch": 1.8681265206812652,
"grad_norm": 19.13021811644498,
"learning_rate": 2.7333547957774545e-07,
"loss": 1.8473,
"step": 960
},
{
"epoch": 1.8875912408759126,
"grad_norm": 13.567665700168476,
"learning_rate": 2.0009518022346075e-07,
"loss": 1.8161,
"step": 970
},
{
"epoch": 1.9070559610705597,
"grad_norm": 3.085249894515279,
"learning_rate": 1.3815795018125133e-07,
"loss": 1.7546,
"step": 980
},
{
"epoch": 1.926520681265207,
"grad_norm": 25.270283293442702,
"learning_rate": 8.759522696168865e-08,
"loss": 1.7881,
"step": 990
},
{
"epoch": 1.945985401459854,
"grad_norm": 19.015252808776353,
"learning_rate": 4.846532888272304e-08,
"loss": 1.7485,
"step": 1000
},
{
"epoch": 1.9654501216545013,
"grad_norm": 12.37689664623936,
"learning_rate": 2.081338780617337e-08,
"loss": 1.7304,
"step": 1010
},
{
"epoch": 1.9849148418491485,
"grad_norm": 13.01810227471341,
"learning_rate": 4.671297083285176e-09,
"loss": 1.7671,
"step": 1020
}
],
"logging_steps": 10,
"max_steps": 1028,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 354034804457472.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}