phi4-inst-r1-e1 / trainer_state.json
intervitens's picture
Upload folder using huggingface_hub
5593572 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0175438596491229,
"eval_steps": 500,
"global_step": 58,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.017543859649122806,
"grad_norm": 0.2277740020067137,
"learning_rate": 1.3333333333333334e-06,
"loss": 1.0079,
"step": 1
},
{
"epoch": 0.03508771929824561,
"grad_norm": 0.22928538645450053,
"learning_rate": 2.666666666666667e-06,
"loss": 1.0155,
"step": 2
},
{
"epoch": 0.05263157894736842,
"grad_norm": 0.23409780929414992,
"learning_rate": 4.000000000000001e-06,
"loss": 1.0421,
"step": 3
},
{
"epoch": 0.07017543859649122,
"grad_norm": 0.23025958832232085,
"learning_rate": 5.333333333333334e-06,
"loss": 1.0149,
"step": 4
},
{
"epoch": 0.08771929824561403,
"grad_norm": 0.2295803794644868,
"learning_rate": 6.666666666666667e-06,
"loss": 1.0157,
"step": 5
},
{
"epoch": 0.10526315789473684,
"grad_norm": 0.23515471946848018,
"learning_rate": 8.000000000000001e-06,
"loss": 1.0483,
"step": 6
},
{
"epoch": 0.12280701754385964,
"grad_norm": 0.22329817289145018,
"learning_rate": 9.333333333333334e-06,
"loss": 1.0304,
"step": 7
},
{
"epoch": 0.14035087719298245,
"grad_norm": 0.22637948060018379,
"learning_rate": 1.0666666666666667e-05,
"loss": 1.0359,
"step": 8
},
{
"epoch": 0.15789473684210525,
"grad_norm": 0.21383156578456822,
"learning_rate": 1.2e-05,
"loss": 1.019,
"step": 9
},
{
"epoch": 0.17543859649122806,
"grad_norm": 0.20957936278573103,
"learning_rate": 1.3333333333333333e-05,
"loss": 1.0361,
"step": 10
},
{
"epoch": 0.19298245614035087,
"grad_norm": 0.20598224772002247,
"learning_rate": 1.4666666666666666e-05,
"loss": 1.0221,
"step": 11
},
{
"epoch": 0.21052631578947367,
"grad_norm": 0.1930203246860445,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.9957,
"step": 12
},
{
"epoch": 0.22807017543859648,
"grad_norm": 0.18271011907622706,
"learning_rate": 1.7333333333333336e-05,
"loss": 1.0246,
"step": 13
},
{
"epoch": 0.24561403508771928,
"grad_norm": 0.17796002507846193,
"learning_rate": 1.866666666666667e-05,
"loss": 0.9837,
"step": 14
},
{
"epoch": 0.2631578947368421,
"grad_norm": 0.16795411728492177,
"learning_rate": 2e-05,
"loss": 0.9983,
"step": 15
},
{
"epoch": 0.2807017543859649,
"grad_norm": 0.1650788608050199,
"learning_rate": 1.9994965423831853e-05,
"loss": 0.9913,
"step": 16
},
{
"epoch": 0.2982456140350877,
"grad_norm": 0.1512189065589105,
"learning_rate": 1.9979866764718846e-05,
"loss": 0.9859,
"step": 17
},
{
"epoch": 0.3157894736842105,
"grad_norm": 0.13287398099914535,
"learning_rate": 1.9954719225730847e-05,
"loss": 0.9611,
"step": 18
},
{
"epoch": 0.3333333333333333,
"grad_norm": 0.13136472842938843,
"learning_rate": 1.9919548128307954e-05,
"loss": 0.9668,
"step": 19
},
{
"epoch": 0.3508771929824561,
"grad_norm": 0.14548926421813196,
"learning_rate": 1.9874388886763944e-05,
"loss": 0.9588,
"step": 20
},
{
"epoch": 0.3684210526315789,
"grad_norm": 0.13144155394538015,
"learning_rate": 1.9819286972627066e-05,
"loss": 0.9471,
"step": 21
},
{
"epoch": 0.38596491228070173,
"grad_norm": 0.1196142624795924,
"learning_rate": 1.9754297868854075e-05,
"loss": 0.9695,
"step": 22
},
{
"epoch": 0.40350877192982454,
"grad_norm": 0.12131818178632287,
"learning_rate": 1.9679487013963566e-05,
"loss": 0.9262,
"step": 23
},
{
"epoch": 0.42105263157894735,
"grad_norm": 0.11310397976940662,
"learning_rate": 1.9594929736144978e-05,
"loss": 0.9122,
"step": 24
},
{
"epoch": 0.43859649122807015,
"grad_norm": 0.1155410896041276,
"learning_rate": 1.9500711177409456e-05,
"loss": 0.9317,
"step": 25
},
{
"epoch": 0.45614035087719296,
"grad_norm": 0.1112297080718884,
"learning_rate": 1.9396926207859085e-05,
"loss": 0.8907,
"step": 26
},
{
"epoch": 0.47368421052631576,
"grad_norm": 0.09467229725540632,
"learning_rate": 1.9283679330160726e-05,
"loss": 0.8948,
"step": 27
},
{
"epoch": 0.49122807017543857,
"grad_norm": 0.0886221809302906,
"learning_rate": 1.9161084574320696e-05,
"loss": 0.9253,
"step": 28
},
{
"epoch": 0.5087719298245614,
"grad_norm": 0.08146640996640883,
"learning_rate": 1.9029265382866216e-05,
"loss": 0.9183,
"step": 29
},
{
"epoch": 0.5263157894736842,
"grad_norm": 0.08408705864629097,
"learning_rate": 1.8888354486549238e-05,
"loss": 0.9032,
"step": 30
},
{
"epoch": 0.543859649122807,
"grad_norm": 0.0817987458580947,
"learning_rate": 1.873849377069785e-05,
"loss": 0.9052,
"step": 31
},
{
"epoch": 0.5614035087719298,
"grad_norm": 0.0814844611264812,
"learning_rate": 1.8579834132349773e-05,
"loss": 0.9016,
"step": 32
},
{
"epoch": 0.5789473684210527,
"grad_norm": 0.08641094467764004,
"learning_rate": 1.8412535328311813e-05,
"loss": 0.8773,
"step": 33
},
{
"epoch": 0.5964912280701754,
"grad_norm": 0.07481802668162577,
"learning_rate": 1.8236765814298328e-05,
"loss": 0.8848,
"step": 34
},
{
"epoch": 0.6140350877192983,
"grad_norm": 0.07366727308629874,
"learning_rate": 1.8052702575310588e-05,
"loss": 0.8861,
"step": 35
},
{
"epoch": 0.631578947368421,
"grad_norm": 0.07604732280098149,
"learning_rate": 1.7860530947427878e-05,
"loss": 0.8798,
"step": 36
},
{
"epoch": 0.6491228070175439,
"grad_norm": 0.06608570283088218,
"learning_rate": 1.766044443118978e-05,
"loss": 0.8873,
"step": 37
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.06442356154926354,
"learning_rate": 1.745264449675755e-05,
"loss": 0.8802,
"step": 38
},
{
"epoch": 0.6842105263157895,
"grad_norm": 0.06437802994991032,
"learning_rate": 1.72373403810507e-05,
"loss": 0.8958,
"step": 39
},
{
"epoch": 0.7017543859649122,
"grad_norm": 0.060024266341070824,
"learning_rate": 1.7014748877063212e-05,
"loss": 0.8909,
"step": 40
},
{
"epoch": 0.7192982456140351,
"grad_norm": 0.06147598388170884,
"learning_rate": 1.6785094115571323e-05,
"loss": 0.8678,
"step": 41
},
{
"epoch": 0.7368421052631579,
"grad_norm": 0.05901942544394341,
"learning_rate": 1.6548607339452853e-05,
"loss": 0.866,
"step": 42
},
{
"epoch": 0.7543859649122807,
"grad_norm": 0.060690090313505786,
"learning_rate": 1.6305526670845225e-05,
"loss": 0.866,
"step": 43
},
{
"epoch": 0.7719298245614035,
"grad_norm": 0.057568056665357546,
"learning_rate": 1.6056096871376667e-05,
"loss": 0.8614,
"step": 44
},
{
"epoch": 0.7894736842105263,
"grad_norm": 0.05836987079061958,
"learning_rate": 1.5800569095711983e-05,
"loss": 0.8695,
"step": 45
},
{
"epoch": 0.8070175438596491,
"grad_norm": 0.05225892385784994,
"learning_rate": 1.5539200638661106e-05,
"loss": 0.8854,
"step": 46
},
{
"epoch": 0.8245614035087719,
"grad_norm": 0.06514120114392602,
"learning_rate": 1.5272254676105026e-05,
"loss": 0.8763,
"step": 47
},
{
"epoch": 0.8421052631578947,
"grad_norm": 0.05838519437169604,
"learning_rate": 1.5000000000000002e-05,
"loss": 0.8819,
"step": 48
},
{
"epoch": 0.8596491228070176,
"grad_norm": 0.055147055816124176,
"learning_rate": 1.472271074772683e-05,
"loss": 0.8659,
"step": 49
},
{
"epoch": 0.8771929824561403,
"grad_norm": 0.05286874702563172,
"learning_rate": 1.4440666126057743e-05,
"loss": 0.8734,
"step": 50
},
{
"epoch": 0.8947368421052632,
"grad_norm": 0.052084420196546105,
"learning_rate": 1.4154150130018867e-05,
"loss": 0.8849,
"step": 51
},
{
"epoch": 0.9122807017543859,
"grad_norm": 0.04814148180474802,
"learning_rate": 1.3863451256931286e-05,
"loss": 0.852,
"step": 52
},
{
"epoch": 0.9298245614035088,
"grad_norm": 0.050892470070269695,
"learning_rate": 1.356886221591872e-05,
"loss": 0.8675,
"step": 53
},
{
"epoch": 0.9473684210526315,
"grad_norm": 0.04691348989818088,
"learning_rate": 1.3270679633174219e-05,
"loss": 0.8723,
"step": 54
},
{
"epoch": 0.9649122807017544,
"grad_norm": 0.046602019196036536,
"learning_rate": 1.296920375328275e-05,
"loss": 0.8539,
"step": 55
},
{
"epoch": 0.9824561403508771,
"grad_norm": 0.0654888374071316,
"learning_rate": 1.266473813690035e-05,
"loss": 0.8633,
"step": 56
},
{
"epoch": 1.0,
"grad_norm": 0.04722014793729854,
"learning_rate": 1.2357589355094275e-05,
"loss": 0.8473,
"step": 57
},
{
"epoch": 1.0175438596491229,
"grad_norm": 0.04905237240990574,
"learning_rate": 1.2048066680651908e-05,
"loss": 0.8564,
"step": 58
}
],
"logging_steps": 1,
"max_steps": 114,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 29,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 7.038361147539456e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}