Checkpoints / checkpoint-200 /trainer_state.json
KrafterDen's picture
Training in progress, step 200, checkpoint
63e5ac8 verified
raw
history blame
3.55 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.4922698253980463,
"eval_steps": 500,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.42,
"grad_norm": 0.1815568506717682,
"learning_rate": 2.9999999999999997e-05,
"loss": 3.8657,
"step": 10
},
{
"epoch": 0.84,
"grad_norm": 0.23455914855003357,
"learning_rate": 5.9999999999999995e-05,
"loss": 3.8223,
"step": 20
},
{
"epoch": 1.25,
"grad_norm": 0.32260793447494507,
"learning_rate": 8.999999999999999e-05,
"loss": 3.7255,
"step": 30
},
{
"epoch": 1.67,
"grad_norm": 0.38705918192863464,
"learning_rate": 0.00011999999999999999,
"loss": 3.4952,
"step": 40
},
{
"epoch": 2.09,
"grad_norm": 0.5273059606552124,
"learning_rate": 0.00015,
"loss": 3.098,
"step": 50
},
{
"epoch": 2.51,
"grad_norm": 0.6030514240264893,
"learning_rate": 0.00017999999999999998,
"loss": 2.5299,
"step": 60
},
{
"epoch": 2.92,
"grad_norm": 0.459722101688385,
"learning_rate": 0.00020999999999999998,
"loss": 1.899,
"step": 70
},
{
"epoch": 3.34,
"grad_norm": 0.1655016839504242,
"learning_rate": 0.00023999999999999998,
"loss": 1.6018,
"step": 80
},
{
"epoch": 3.76,
"grad_norm": 0.10938003659248352,
"learning_rate": 0.00027,
"loss": 1.4726,
"step": 90
},
{
"epoch": 4.18,
"grad_norm": 0.09813433140516281,
"learning_rate": 0.0003,
"loss": 1.4336,
"step": 100
},
{
"epoch": 0.27,
"grad_norm": 0.2995990812778473,
"learning_rate": 0.000285,
"loss": 3.3878,
"step": 110
},
{
"epoch": 0.3,
"grad_norm": 0.2459421455860138,
"learning_rate": 0.00027,
"loss": 3.0843,
"step": 120
},
{
"epoch": 0.32,
"grad_norm": 0.2377060502767563,
"learning_rate": 0.00025499999999999996,
"loss": 2.8413,
"step": 130
},
{
"epoch": 0.34,
"grad_norm": 0.1750001609325409,
"learning_rate": 0.00023999999999999998,
"loss": 2.7303,
"step": 140
},
{
"epoch": 0.37,
"grad_norm": 0.1821776032447815,
"learning_rate": 0.000225,
"loss": 2.6535,
"step": 150
},
{
"epoch": 0.39,
"grad_norm": 0.16587179899215698,
"learning_rate": 0.00020999999999999998,
"loss": 2.6147,
"step": 160
},
{
"epoch": 0.42,
"grad_norm": 0.15111136436462402,
"learning_rate": 0.000195,
"loss": 2.5929,
"step": 170
},
{
"epoch": 0.44,
"grad_norm": 0.13922317326068878,
"learning_rate": 0.00017999999999999998,
"loss": 2.554,
"step": 180
},
{
"epoch": 0.47,
"grad_norm": 0.14242495596408844,
"learning_rate": 0.000165,
"loss": 2.5381,
"step": 190
},
{
"epoch": 0.49,
"grad_norm": 0.1816890388727188,
"learning_rate": 0.00015,
"loss": 2.5145,
"step": 200
}
],
"logging_steps": 10,
"max_steps": 300,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 3.2322500059336704e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}