Qwen2.5-1.5B-Open-R1-Distill / trainer_state.json
yeshsurya's picture
Model save
02f2a8e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 100,
"global_step": 169,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.029585798816568046,
"grad_norm": 2.345981653748066,
"learning_rate": 5.882352941176471e-06,
"loss": 1.0993,
"step": 5
},
{
"epoch": 0.05917159763313609,
"grad_norm": 1.634179708813976,
"learning_rate": 1.1764705882352942e-05,
"loss": 1.0403,
"step": 10
},
{
"epoch": 0.08875739644970414,
"grad_norm": 0.8897315348931364,
"learning_rate": 1.7647058823529414e-05,
"loss": 0.9533,
"step": 15
},
{
"epoch": 0.11834319526627218,
"grad_norm": 0.6063370817330525,
"learning_rate": 1.9980782984658682e-05,
"loss": 0.8923,
"step": 20
},
{
"epoch": 0.14792899408284024,
"grad_norm": 0.5397066772901767,
"learning_rate": 1.9863613034027224e-05,
"loss": 0.8552,
"step": 25
},
{
"epoch": 0.17751479289940827,
"grad_norm": 0.42679502726222557,
"learning_rate": 1.9641197940012136e-05,
"loss": 0.8283,
"step": 30
},
{
"epoch": 0.20710059171597633,
"grad_norm": 0.3759506880163052,
"learning_rate": 1.9315910880512792e-05,
"loss": 0.823,
"step": 35
},
{
"epoch": 0.23668639053254437,
"grad_norm": 0.4006437728829451,
"learning_rate": 1.8891222681391853e-05,
"loss": 0.8225,
"step": 40
},
{
"epoch": 0.26627218934911245,
"grad_norm": 0.344421083366018,
"learning_rate": 1.8371664782625287e-05,
"loss": 0.8074,
"step": 45
},
{
"epoch": 0.2958579881656805,
"grad_norm": 0.33565940861995097,
"learning_rate": 1.7762780887657576e-05,
"loss": 0.7978,
"step": 50
},
{
"epoch": 0.3254437869822485,
"grad_norm": 0.36708091137013027,
"learning_rate": 1.7071067811865477e-05,
"loss": 0.7877,
"step": 55
},
{
"epoch": 0.35502958579881655,
"grad_norm": 0.37230966234423446,
"learning_rate": 1.6303906161279554e-05,
"loss": 0.7981,
"step": 60
},
{
"epoch": 0.38461538461538464,
"grad_norm": 0.3421829082351356,
"learning_rate": 1.5469481581224274e-05,
"loss": 0.7721,
"step": 65
},
{
"epoch": 0.41420118343195267,
"grad_norm": 0.34582476617881,
"learning_rate": 1.4576697415156818e-05,
"loss": 0.7741,
"step": 70
},
{
"epoch": 0.4437869822485207,
"grad_norm": 0.33588032841998,
"learning_rate": 1.3635079705638298e-05,
"loss": 0.7852,
"step": 75
},
{
"epoch": 0.47337278106508873,
"grad_norm": 0.3495964302146845,
"learning_rate": 1.2654675551080724e-05,
"loss": 0.7666,
"step": 80
},
{
"epoch": 0.5029585798816568,
"grad_norm": 0.36116559595414593,
"learning_rate": 1.164594590280734e-05,
"loss": 0.7706,
"step": 85
},
{
"epoch": 0.5325443786982249,
"grad_norm": 0.3603638720069939,
"learning_rate": 1.0619653946285948e-05,
"loss": 0.77,
"step": 90
},
{
"epoch": 0.5621301775147929,
"grad_norm": 0.36188344962869246,
"learning_rate": 9.586750257511868e-06,
"loss": 0.7771,
"step": 95
},
{
"epoch": 0.591715976331361,
"grad_norm": 0.3343058399259233,
"learning_rate": 8.558255959926533e-06,
"loss": 0.753,
"step": 100
},
{
"epoch": 0.591715976331361,
"eval_loss": 0.7880676984786987,
"eval_runtime": 0.7823,
"eval_samples_per_second": 163.614,
"eval_steps_per_second": 5.113,
"step": 100
},
{
"epoch": 0.621301775147929,
"grad_norm": 0.33384320420975744,
"learning_rate": 7.545145128592009e-06,
"loss": 0.7569,
"step": 105
},
{
"epoch": 0.650887573964497,
"grad_norm": 0.34359828662237785,
"learning_rate": 6.558227696373617e-06,
"loss": 0.7681,
"step": 110
},
{
"epoch": 0.6804733727810651,
"grad_norm": 0.3079853122828334,
"learning_rate": 5.608034111526298e-06,
"loss": 0.7623,
"step": 115
},
{
"epoch": 0.7100591715976331,
"grad_norm": 0.29736335547076115,
"learning_rate": 4.704702977392914e-06,
"loss": 0.7513,
"step": 120
},
{
"epoch": 0.7396449704142012,
"grad_norm": 0.34221166685459126,
"learning_rate": 3.857872873103322e-06,
"loss": 0.7537,
"step": 125
},
{
"epoch": 0.7692307692307693,
"grad_norm": 0.3103567660476024,
"learning_rate": 3.0765795095517026e-06,
"loss": 0.7555,
"step": 130
},
{
"epoch": 0.7988165680473372,
"grad_norm": 0.3224055867351352,
"learning_rate": 2.369159318001937e-06,
"loss": 0.7584,
"step": 135
},
{
"epoch": 0.8284023668639053,
"grad_norm": 0.2993910120400221,
"learning_rate": 1.743160500034443e-06,
"loss": 0.7498,
"step": 140
},
{
"epoch": 0.8579881656804734,
"grad_norm": 0.3117989616206348,
"learning_rate": 1.2052624879351105e-06,
"loss": 0.7566,
"step": 145
},
{
"epoch": 0.8875739644970414,
"grad_norm": 0.29531838618560163,
"learning_rate": 7.612046748871327e-07,
"loss": 0.7665,
"step": 150
},
{
"epoch": 0.9171597633136095,
"grad_norm": 0.2988610021134082,
"learning_rate": 4.1572517541747294e-07,
"loss": 0.7613,
"step": 155
},
{
"epoch": 0.9467455621301775,
"grad_norm": 0.2858748779050992,
"learning_rate": 1.7251026952640583e-07,
"loss": 0.7607,
"step": 160
},
{
"epoch": 0.9763313609467456,
"grad_norm": 0.2755238703170473,
"learning_rate": 3.4155069933301535e-08,
"loss": 0.7446,
"step": 165
},
{
"epoch": 1.0,
"step": 169,
"total_flos": 76916824473600.0,
"train_loss": 0.8026506548097148,
"train_runtime": 496.5718,
"train_samples_per_second": 43.518,
"train_steps_per_second": 0.34
}
],
"logging_steps": 5,
"max_steps": 169,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 76916824473600.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}