gemma-2-2b / last-checkpoint /trainer_state.json
SystemAdmin123's picture
Training in progress, step 200, checkpoint
5146ac9 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 6.451612903225806,
"eval_steps": 200,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03225806451612903,
"eval_loss": 2.5740277767181396,
"eval_runtime": 31.6195,
"eval_samples_per_second": 47.471,
"eval_steps_per_second": 1.992,
"step": 1
},
{
"epoch": 0.3225806451612903,
"grad_norm": 2.625,
"learning_rate": 8e-05,
"loss": 1.9901,
"step": 10
},
{
"epoch": 0.6451612903225806,
"grad_norm": 2.28125,
"learning_rate": 0.00016,
"loss": 2.0923,
"step": 20
},
{
"epoch": 0.967741935483871,
"grad_norm": 2.5625,
"learning_rate": 0.00019994532573409262,
"loss": 2.3185,
"step": 30
},
{
"epoch": 1.2903225806451613,
"grad_norm": 2.03125,
"learning_rate": 0.00019950829025450114,
"loss": 1.3841,
"step": 40
},
{
"epoch": 1.6129032258064515,
"grad_norm": 2.28125,
"learning_rate": 0.00019863613034027224,
"loss": 1.2813,
"step": 50
},
{
"epoch": 1.935483870967742,
"grad_norm": 2.15625,
"learning_rate": 0.0001973326597248006,
"loss": 1.3828,
"step": 60
},
{
"epoch": 2.258064516129032,
"grad_norm": 2.046875,
"learning_rate": 0.00019560357815343577,
"loss": 0.8247,
"step": 70
},
{
"epoch": 2.5806451612903225,
"grad_norm": 2.140625,
"learning_rate": 0.0001934564464599461,
"loss": 0.7192,
"step": 80
},
{
"epoch": 2.903225806451613,
"grad_norm": 2.109375,
"learning_rate": 0.00019090065350491626,
"loss": 0.7919,
"step": 90
},
{
"epoch": 3.225806451612903,
"grad_norm": 1.53125,
"learning_rate": 0.0001879473751206489,
"loss": 0.4979,
"step": 100
},
{
"epoch": 3.5483870967741935,
"grad_norm": 2.140625,
"learning_rate": 0.00018460952524209355,
"loss": 0.435,
"step": 110
},
{
"epoch": 3.870967741935484,
"grad_norm": 1.6953125,
"learning_rate": 0.00018090169943749476,
"loss": 0.4868,
"step": 120
},
{
"epoch": 4.193548387096774,
"grad_norm": 1.328125,
"learning_rate": 0.00017684011108568592,
"loss": 0.3416,
"step": 130
},
{
"epoch": 4.516129032258064,
"grad_norm": 2.0,
"learning_rate": 0.00017244252047910892,
"loss": 0.29,
"step": 140
},
{
"epoch": 4.838709677419355,
"grad_norm": 1.4453125,
"learning_rate": 0.00016772815716257412,
"loss": 0.3182,
"step": 150
},
{
"epoch": 5.161290322580645,
"grad_norm": 1.234375,
"learning_rate": 0.0001627176358473537,
"loss": 0.2422,
"step": 160
},
{
"epoch": 5.483870967741936,
"grad_norm": 1.1875,
"learning_rate": 0.00015743286626829437,
"loss": 0.1847,
"step": 170
},
{
"epoch": 5.806451612903226,
"grad_norm": 1.234375,
"learning_rate": 0.00015189695737812152,
"loss": 0.2181,
"step": 180
},
{
"epoch": 6.129032258064516,
"grad_norm": 1.03125,
"learning_rate": 0.0001461341162978688,
"loss": 0.1774,
"step": 190
},
{
"epoch": 6.451612903225806,
"grad_norm": 1.03125,
"learning_rate": 0.00014016954246529696,
"loss": 0.1334,
"step": 200
},
{
"epoch": 6.451612903225806,
"eval_loss": 4.735217571258545,
"eval_runtime": 31.1163,
"eval_samples_per_second": 48.238,
"eval_steps_per_second": 2.025,
"step": 200
}
],
"logging_steps": 10,
"max_steps": 500,
"num_input_tokens_seen": 0,
"num_train_epochs": 17,
"save_steps": 200,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.1941092488380416e+17,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}