Qwen2.5-0.5B-Open-R1-SFT / trainer_state.json
DGME's picture
Model save
cd8544d verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 9.964539007092199,
"eval_steps": 500,
"global_step": 170,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.28368794326241137,
"grad_norm": 1.467705249786377,
"learning_rate": 2.2222222222222223e-05,
"loss": 0.5187,
"num_tokens": 634122.0,
"step": 5
},
{
"epoch": 0.5673758865248227,
"grad_norm": 0.786109983921051,
"learning_rate": 5e-05,
"loss": 0.4691,
"num_tokens": 627314.0,
"step": 10
},
{
"epoch": 0.851063829787234,
"grad_norm": 0.529224157333374,
"learning_rate": 4.989299698973645e-05,
"loss": 0.4195,
"num_tokens": 1266402.0,
"step": 15
},
{
"epoch": 1.1702127659574468,
"grad_norm": 0.36877477169036865,
"learning_rate": 4.957300570509739e-05,
"loss": 0.4707,
"num_tokens": 721307.0,
"step": 20
},
{
"epoch": 1.4539007092198581,
"grad_norm": 0.3088724613189697,
"learning_rate": 4.9043069704368563e-05,
"loss": 0.3746,
"num_tokens": 1347576.0,
"step": 25
},
{
"epoch": 1.7375886524822695,
"grad_norm": 0.25784584879875183,
"learning_rate": 4.83082294095344e-05,
"loss": 0.3654,
"num_tokens": 1978085.0,
"step": 30
},
{
"epoch": 2.0,
"grad_norm": 0.23836882412433624,
"learning_rate": 4.7375474164915813e-05,
"loss": 0.3578,
"num_tokens": 2571704.0,
"step": 35
},
{
"epoch": 2.2836879432624113,
"grad_norm": 0.20160914957523346,
"learning_rate": 4.625367575886955e-05,
"loss": 0.3419,
"num_tokens": 3218590.0,
"step": 40
},
{
"epoch": 2.5673758865248226,
"grad_norm": 0.19208775460720062,
"learning_rate": 4.4953504040849445e-05,
"loss": 0.3445,
"num_tokens": 3863566.0,
"step": 45
},
{
"epoch": 2.851063829787234,
"grad_norm": 0.16179953515529633,
"learning_rate": 4.348732543642775e-05,
"loss": 0.3352,
"num_tokens": 4488605.0,
"step": 50
},
{
"epoch": 3.1134751773049647,
"grad_norm": 0.30722367763519287,
"learning_rate": 4.1869085325538425e-05,
"loss": 0.3326,
"num_tokens": 5057805.0,
"step": 55
},
{
"epoch": 3.397163120567376,
"grad_norm": 0.1563473492860794,
"learning_rate": 4.011417540268764e-05,
"loss": 0.33,
"num_tokens": 5676903.0,
"step": 60
},
{
"epoch": 3.6808510638297873,
"grad_norm": 0.13698497414588928,
"learning_rate": 3.8239287280718634e-05,
"loss": 0.3386,
"num_tokens": 6321151.0,
"step": 65
},
{
"epoch": 3.9645390070921986,
"grad_norm": 0.2415415197610855,
"learning_rate": 3.626225373056101e-05,
"loss": 0.3162,
"num_tokens": 6962012.0,
"step": 70
},
{
"epoch": 4.226950354609929,
"grad_norm": 0.13087180256843567,
"learning_rate": 3.420187906699333e-05,
"loss": 0.3179,
"num_tokens": 7568220.0,
"step": 75
},
{
"epoch": 4.51063829787234,
"grad_norm": 0.13442984223365784,
"learning_rate": 3.207776029368427e-05,
"loss": 0.3153,
"num_tokens": 8215106.0,
"step": 80
},
{
"epoch": 4.794326241134752,
"grad_norm": 0.13201209902763367,
"learning_rate": 2.99101007086695e-05,
"loss": 0.3088,
"num_tokens": 8842420.0,
"step": 85
},
{
"epoch": 5.056737588652482,
"grad_norm": 1.6477094888687134,
"learning_rate": 2.7719517743133312e-05,
"loss": 0.3266,
"num_tokens": 9397379.0,
"step": 90
},
{
"epoch": 5.567375886524823,
"grad_norm": 0.127790629863739,
"learning_rate": 2.5526846861212987e-05,
"loss": 0.3057,
"num_tokens": 618338.0,
"step": 95
},
{
"epoch": 5.851063829787234,
"grad_norm": 0.12059980630874634,
"learning_rate": 2.3352943386009223e-05,
"loss": 0.3081,
"num_tokens": 1273698.0,
"step": 100
},
{
"epoch": 6.170212765957447,
"grad_norm": 0.13467751443386078,
"learning_rate": 2.1218484136710372e-05,
"loss": 0.3696,
"num_tokens": 1988765.0,
"step": 105
},
{
"epoch": 6.453900709219858,
"grad_norm": 0.12073035538196564,
"learning_rate": 1.914377076353511e-05,
"loss": 0.2971,
"num_tokens": 625541.0,
"step": 110
},
{
"epoch": 6.73758865248227,
"grad_norm": 2.8106677532196045,
"learning_rate": 1.7148536651049078e-05,
"loss": 0.3054,
"num_tokens": 1270517.0,
"step": 115
},
{
"epoch": 7.056737588652482,
"grad_norm": 0.21665821969509125,
"learning_rate": 1.5251759226471556e-05,
"loss": 0.3581,
"num_tokens": 1987709.0,
"step": 120
},
{
"epoch": 7.340425531914893,
"grad_norm": 0.11727333068847656,
"learning_rate": 1.3471479458178499e-05,
"loss": 0.3057,
"num_tokens": 2609416.0,
"step": 125
},
{
"epoch": 7.624113475177305,
"grad_norm": 0.13120116293430328,
"learning_rate": 1.1824630261220466e-05,
"loss": 0.304,
"num_tokens": 3264776.0,
"step": 130
},
{
"epoch": 7.907801418439716,
"grad_norm": 0.11633836477994919,
"learning_rate": 1.0326875441955191e-05,
"loss": 0.3001,
"num_tokens": 635071.0,
"step": 135
},
{
"epoch": 8.22695035460993,
"grad_norm": 0.17760534584522247,
"learning_rate": 8.99246071365363e-06,
"loss": 0.3615,
"num_tokens": 1349625.0,
"step": 140
},
{
"epoch": 8.51063829787234,
"grad_norm": 0.2086412012577057,
"learning_rate": 7.834078200126287e-06,
"loss": 0.2978,
"num_tokens": 1973689.0,
"step": 145
},
{
"epoch": 8.794326241134751,
"grad_norm": 0.1149413138628006,
"learning_rate": 6.862745716127312e-06,
"loss": 0.2979,
"num_tokens": 634122.0,
"step": 150
},
{
"epoch": 9.113475177304965,
"grad_norm": 0.1138206422328949,
"learning_rate": 6.087701972745999e-06,
"loss": 0.3572,
"num_tokens": 714739.0,
"step": 155
},
{
"epoch": 9.397163120567376,
"grad_norm": 0.1209757998585701,
"learning_rate": 5.51631870452704e-06,
"loss": 0.2932,
"num_tokens": 1354872.0,
"step": 160
},
{
"epoch": 9.680851063829786,
"grad_norm": 0.11537827551364899,
"learning_rate": 5.154030554111651e-06,
"loss": 0.2973,
"num_tokens": 2002733.0,
"step": 165
},
{
"epoch": 9.964539007092199,
"grad_norm": 1.9767428636550903,
"learning_rate": 5.004283381293366e-06,
"loss": 0.2981,
"num_tokens": 2639235.0,
"step": 170
},
{
"epoch": 9.964539007092199,
"step": 170,
"total_flos": 4.719554866967808e+16,
"train_loss": 0.0366410073112039,
"train_runtime": 308.342,
"train_samples_per_second": 4.573,
"train_steps_per_second": 0.551
}
],
"logging_steps": 5,
"max_steps": 170,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 5,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.719554866967808e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}