Qwen2.5-1.5B-Open-R1-Distill / trainer_state.json
piglaker's picture
Model save
e1207db verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 100,
"global_step": 169,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.029585798816568046,
"grad_norm": 0.9565215877406359,
"learning_rate": 1.940828402366864e-05,
"loss": 1.1887,
"step": 5
},
{
"epoch": 0.05917159763313609,
"grad_norm": 0.4750293559517379,
"learning_rate": 1.881656804733728e-05,
"loss": 1.0401,
"step": 10
},
{
"epoch": 0.08875739644970414,
"grad_norm": 0.32953737249996196,
"learning_rate": 1.822485207100592e-05,
"loss": 0.968,
"step": 15
},
{
"epoch": 0.11834319526627218,
"grad_norm": 0.318886344340766,
"learning_rate": 1.7633136094674557e-05,
"loss": 0.9185,
"step": 20
},
{
"epoch": 0.14792899408284024,
"grad_norm": 0.2638389003046881,
"learning_rate": 1.70414201183432e-05,
"loss": 0.9058,
"step": 25
},
{
"epoch": 0.17751479289940827,
"grad_norm": 0.32122906405989515,
"learning_rate": 1.6449704142011837e-05,
"loss": 0.9017,
"step": 30
},
{
"epoch": 0.20710059171597633,
"grad_norm": 0.23483871965844322,
"learning_rate": 1.5857988165680475e-05,
"loss": 0.8819,
"step": 35
},
{
"epoch": 0.23668639053254437,
"grad_norm": 0.20244306887706498,
"learning_rate": 1.5266272189349113e-05,
"loss": 0.8606,
"step": 40
},
{
"epoch": 0.26627218934911245,
"grad_norm": 0.22372494047589192,
"learning_rate": 1.4674556213017752e-05,
"loss": 0.8706,
"step": 45
},
{
"epoch": 0.2958579881656805,
"grad_norm": 0.21466269339189736,
"learning_rate": 1.4082840236686392e-05,
"loss": 0.8513,
"step": 50
},
{
"epoch": 0.3254437869822485,
"grad_norm": 0.20954818106403592,
"learning_rate": 1.349112426035503e-05,
"loss": 0.8265,
"step": 55
},
{
"epoch": 0.35502958579881655,
"grad_norm": 0.21146922721065695,
"learning_rate": 1.2899408284023668e-05,
"loss": 0.8576,
"step": 60
},
{
"epoch": 0.38461538461538464,
"grad_norm": 0.19665190091249535,
"learning_rate": 1.230769230769231e-05,
"loss": 0.8218,
"step": 65
},
{
"epoch": 0.41420118343195267,
"grad_norm": 0.20585290386463395,
"learning_rate": 1.1715976331360948e-05,
"loss": 0.8216,
"step": 70
},
{
"epoch": 0.4437869822485207,
"grad_norm": 0.18556862617304073,
"learning_rate": 1.1124260355029586e-05,
"loss": 0.8382,
"step": 75
},
{
"epoch": 0.47337278106508873,
"grad_norm": 0.18750887001217453,
"learning_rate": 1.0532544378698226e-05,
"loss": 0.8172,
"step": 80
},
{
"epoch": 0.5029585798816568,
"grad_norm": 0.19315058093994988,
"learning_rate": 9.940828402366864e-06,
"loss": 0.8314,
"step": 85
},
{
"epoch": 0.5325443786982249,
"grad_norm": 0.2050825812729317,
"learning_rate": 9.349112426035503e-06,
"loss": 0.8135,
"step": 90
},
{
"epoch": 0.5621301775147929,
"grad_norm": 0.2099801364496423,
"learning_rate": 8.757396449704143e-06,
"loss": 0.8203,
"step": 95
},
{
"epoch": 0.591715976331361,
"grad_norm": 0.19404392312346527,
"learning_rate": 8.165680473372781e-06,
"loss": 0.7997,
"step": 100
},
{
"epoch": 0.591715976331361,
"eval_loss": 0.8431870937347412,
"eval_runtime": 1.6461,
"eval_samples_per_second": 77.758,
"eval_steps_per_second": 2.43,
"step": 100
},
{
"epoch": 0.621301775147929,
"grad_norm": 0.18171103310198067,
"learning_rate": 7.573964497041421e-06,
"loss": 0.8093,
"step": 105
},
{
"epoch": 0.650887573964497,
"grad_norm": 0.19297652713486121,
"learning_rate": 6.98224852071006e-06,
"loss": 0.8127,
"step": 110
},
{
"epoch": 0.6804733727810651,
"grad_norm": 0.18560671609846813,
"learning_rate": 6.3905325443786995e-06,
"loss": 0.8045,
"step": 115
},
{
"epoch": 0.7100591715976331,
"grad_norm": 0.1849552337643038,
"learning_rate": 5.7988165680473375e-06,
"loss": 0.7871,
"step": 120
},
{
"epoch": 0.7396449704142012,
"grad_norm": 0.19462300584500286,
"learning_rate": 5.207100591715976e-06,
"loss": 0.7974,
"step": 125
},
{
"epoch": 0.7692307692307693,
"grad_norm": 0.18273706345405402,
"learning_rate": 4.615384615384616e-06,
"loss": 0.809,
"step": 130
},
{
"epoch": 0.7988165680473372,
"grad_norm": 0.1774041623216232,
"learning_rate": 4.023668639053255e-06,
"loss": 0.8001,
"step": 135
},
{
"epoch": 0.8284023668639053,
"grad_norm": 0.1912069740049753,
"learning_rate": 3.4319526627218935e-06,
"loss": 0.8038,
"step": 140
},
{
"epoch": 0.8579881656804734,
"grad_norm": 0.1757803683582901,
"learning_rate": 2.840236686390533e-06,
"loss": 0.799,
"step": 145
},
{
"epoch": 0.8875739644970414,
"grad_norm": 0.1722960937134139,
"learning_rate": 2.2485207100591717e-06,
"loss": 0.7891,
"step": 150
},
{
"epoch": 0.9171597633136095,
"grad_norm": 0.1845277131426741,
"learning_rate": 1.656804733727811e-06,
"loss": 0.8253,
"step": 155
},
{
"epoch": 0.9467455621301775,
"grad_norm": 0.17325421407966338,
"learning_rate": 1.06508875739645e-06,
"loss": 0.792,
"step": 160
},
{
"epoch": 0.9763313609467456,
"grad_norm": 0.17868163897456654,
"learning_rate": 4.733727810650888e-07,
"loss": 0.7757,
"step": 165
},
{
"epoch": 1.0,
"step": 169,
"total_flos": 76916824473600.0,
"train_loss": 0.0,
"train_runtime": 2.1926,
"train_samples_per_second": 9855.825,
"train_steps_per_second": 77.077
}
],
"logging_steps": 5,
"max_steps": 169,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 76916824473600.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}