gen_test_5 / checkpoint-31250 /trainer_state.json
mllm-dev's picture
Upload folder using huggingface_hub
4768dee verified
{
"best_metric": 1.7699204683303833,
"best_model_checkpoint": "gen_test/checkpoint-31250",
"epoch": 1.0,
"eval_steps": 500,
"global_step": 31250,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"grad_norm": 173470.46875,
"learning_rate": 5.9040000000000004e-05,
"loss": 2.0402,
"step": 500
},
{
"epoch": 0.03,
"grad_norm": 170031.4375,
"learning_rate": 5.808e-05,
"loss": 1.8985,
"step": 1000
},
{
"epoch": 0.05,
"grad_norm": 113037.2421875,
"learning_rate": 5.712e-05,
"loss": 1.8577,
"step": 1500
},
{
"epoch": 0.06,
"grad_norm": 116889.296875,
"learning_rate": 5.6160000000000004e-05,
"loss": 1.8269,
"step": 2000
},
{
"epoch": 0.08,
"grad_norm": 96221.234375,
"learning_rate": 5.520000000000001e-05,
"loss": 1.8069,
"step": 2500
},
{
"epoch": 0.1,
"grad_norm": 81969.984375,
"learning_rate": 5.424e-05,
"loss": 1.7914,
"step": 3000
},
{
"epoch": 0.11,
"grad_norm": 90032.578125,
"learning_rate": 5.3280000000000005e-05,
"loss": 1.7792,
"step": 3500
},
{
"epoch": 0.13,
"grad_norm": 77830.828125,
"learning_rate": 5.232e-05,
"loss": 1.7666,
"step": 4000
},
{
"epoch": 0.14,
"grad_norm": 65369.70703125,
"learning_rate": 5.136e-05,
"loss": 1.7696,
"step": 4500
},
{
"epoch": 0.16,
"grad_norm": 77884.03125,
"learning_rate": 5.04e-05,
"loss": 1.7607,
"step": 5000
},
{
"epoch": 0.18,
"grad_norm": 57507.2421875,
"learning_rate": 4.944e-05,
"loss": 1.7514,
"step": 5500
},
{
"epoch": 0.19,
"grad_norm": 65232.98828125,
"learning_rate": 4.8480000000000003e-05,
"loss": 1.7409,
"step": 6000
},
{
"epoch": 0.21,
"grad_norm": 58623.87109375,
"learning_rate": 4.7520000000000006e-05,
"loss": 1.7378,
"step": 6500
},
{
"epoch": 0.22,
"grad_norm": 57649.65625,
"learning_rate": 4.656e-05,
"loss": 1.7372,
"step": 7000
},
{
"epoch": 0.24,
"grad_norm": 61488.49609375,
"learning_rate": 4.5600000000000004e-05,
"loss": 1.7237,
"step": 7500
},
{
"epoch": 0.26,
"grad_norm": 57711.09765625,
"learning_rate": 4.464e-05,
"loss": 1.7231,
"step": 8000
},
{
"epoch": 0.27,
"grad_norm": 50364.1796875,
"learning_rate": 4.368e-05,
"loss": 1.7217,
"step": 8500
},
{
"epoch": 0.29,
"grad_norm": 50957.51171875,
"learning_rate": 4.272e-05,
"loss": 1.7173,
"step": 9000
},
{
"epoch": 0.3,
"grad_norm": 72140.7578125,
"learning_rate": 4.176e-05,
"loss": 1.7154,
"step": 9500
},
{
"epoch": 0.32,
"grad_norm": 49330.06640625,
"learning_rate": 4.08e-05,
"loss": 1.7086,
"step": 10000
},
{
"epoch": 0.34,
"grad_norm": 50961.6015625,
"learning_rate": 3.9840000000000005e-05,
"loss": 1.7096,
"step": 10500
},
{
"epoch": 0.35,
"grad_norm": 48586.94921875,
"learning_rate": 3.888e-05,
"loss": 1.7041,
"step": 11000
},
{
"epoch": 0.37,
"grad_norm": 55556.12890625,
"learning_rate": 3.792e-05,
"loss": 1.7032,
"step": 11500
},
{
"epoch": 0.38,
"grad_norm": 52399.66796875,
"learning_rate": 3.696e-05,
"loss": 1.6997,
"step": 12000
},
{
"epoch": 0.4,
"grad_norm": 48360.08203125,
"learning_rate": 3.6e-05,
"loss": 1.6936,
"step": 12500
},
{
"epoch": 0.42,
"grad_norm": 44732.66015625,
"learning_rate": 3.5039999999999997e-05,
"loss": 1.6921,
"step": 13000
},
{
"epoch": 0.43,
"grad_norm": 54025.0390625,
"learning_rate": 3.408e-05,
"loss": 1.6905,
"step": 13500
},
{
"epoch": 0.45,
"grad_norm": 54268.91796875,
"learning_rate": 3.312e-05,
"loss": 1.6876,
"step": 14000
},
{
"epoch": 0.46,
"grad_norm": 46711.734375,
"learning_rate": 3.2160000000000004e-05,
"loss": 1.688,
"step": 14500
},
{
"epoch": 0.48,
"grad_norm": 47818.2265625,
"learning_rate": 3.12e-05,
"loss": 1.6817,
"step": 15000
},
{
"epoch": 0.5,
"grad_norm": 49987.921875,
"learning_rate": 3.0240000000000002e-05,
"loss": 1.6786,
"step": 15500
},
{
"epoch": 0.51,
"grad_norm": 46060.59375,
"learning_rate": 2.928e-05,
"loss": 1.6736,
"step": 16000
},
{
"epoch": 0.53,
"grad_norm": 44150.3671875,
"learning_rate": 2.832e-05,
"loss": 1.6753,
"step": 16500
},
{
"epoch": 0.54,
"grad_norm": 43254.55859375,
"learning_rate": 2.7360000000000002e-05,
"loss": 1.6732,
"step": 17000
},
{
"epoch": 0.56,
"grad_norm": 50895.7421875,
"learning_rate": 2.64e-05,
"loss": 1.6727,
"step": 17500
},
{
"epoch": 0.58,
"grad_norm": 49795.7421875,
"learning_rate": 2.544e-05,
"loss": 1.6682,
"step": 18000
},
{
"epoch": 0.59,
"grad_norm": 57421.609375,
"learning_rate": 2.448e-05,
"loss": 1.6673,
"step": 18500
},
{
"epoch": 0.61,
"grad_norm": 44257.9296875,
"learning_rate": 2.3520000000000002e-05,
"loss": 1.6651,
"step": 19000
},
{
"epoch": 0.62,
"grad_norm": 48974.69921875,
"learning_rate": 2.256e-05,
"loss": 1.6608,
"step": 19500
},
{
"epoch": 0.64,
"grad_norm": 61821.53125,
"learning_rate": 2.16e-05,
"loss": 1.6637,
"step": 20000
},
{
"epoch": 0.66,
"grad_norm": 51516.53515625,
"learning_rate": 2.064e-05,
"loss": 1.6628,
"step": 20500
},
{
"epoch": 0.67,
"grad_norm": 50740.87890625,
"learning_rate": 1.968e-05,
"loss": 1.6651,
"step": 21000
},
{
"epoch": 0.69,
"grad_norm": 49265.2890625,
"learning_rate": 1.872e-05,
"loss": 1.6604,
"step": 21500
},
{
"epoch": 0.7,
"grad_norm": 44264.171875,
"learning_rate": 1.776e-05,
"loss": 1.6557,
"step": 22000
},
{
"epoch": 0.72,
"grad_norm": 50852.16015625,
"learning_rate": 1.6800000000000002e-05,
"loss": 1.6579,
"step": 22500
},
{
"epoch": 0.74,
"grad_norm": 58869.0625,
"learning_rate": 1.584e-05,
"loss": 1.655,
"step": 23000
},
{
"epoch": 0.75,
"grad_norm": 41634.01953125,
"learning_rate": 1.488e-05,
"loss": 1.6533,
"step": 23500
},
{
"epoch": 0.77,
"grad_norm": 42209.58984375,
"learning_rate": 1.392e-05,
"loss": 1.6506,
"step": 24000
},
{
"epoch": 0.78,
"grad_norm": 48772.890625,
"learning_rate": 1.296e-05,
"loss": 1.6501,
"step": 24500
},
{
"epoch": 0.8,
"grad_norm": 47330.41796875,
"learning_rate": 1.2e-05,
"loss": 1.65,
"step": 25000
},
{
"epoch": 0.82,
"grad_norm": 46274.1953125,
"learning_rate": 1.104e-05,
"loss": 1.6465,
"step": 25500
},
{
"epoch": 0.83,
"grad_norm": 48066.18359375,
"learning_rate": 1.008e-05,
"loss": 1.6462,
"step": 26000
},
{
"epoch": 0.85,
"grad_norm": 44796.63671875,
"learning_rate": 9.12e-06,
"loss": 1.6491,
"step": 26500
},
{
"epoch": 0.86,
"grad_norm": 51596.578125,
"learning_rate": 8.160000000000001e-06,
"loss": 1.6485,
"step": 27000
},
{
"epoch": 0.88,
"grad_norm": 42668.67578125,
"learning_rate": 7.2e-06,
"loss": 1.6413,
"step": 27500
},
{
"epoch": 0.9,
"grad_norm": 48424.36328125,
"learning_rate": 6.2399999999999995e-06,
"loss": 1.6477,
"step": 28000
},
{
"epoch": 0.91,
"grad_norm": 50913.546875,
"learning_rate": 5.279999999999999e-06,
"loss": 1.6455,
"step": 28500
},
{
"epoch": 0.93,
"grad_norm": 49008.09375,
"learning_rate": 4.32e-06,
"loss": 1.644,
"step": 29000
},
{
"epoch": 0.94,
"grad_norm": 43177.625,
"learning_rate": 3.36e-06,
"loss": 1.6416,
"step": 29500
},
{
"epoch": 0.96,
"grad_norm": 49836.08984375,
"learning_rate": 2.4000000000000003e-06,
"loss": 1.6419,
"step": 30000
},
{
"epoch": 0.98,
"grad_norm": 49725.71484375,
"learning_rate": 1.44e-06,
"loss": 1.6402,
"step": 30500
},
{
"epoch": 0.99,
"grad_norm": 44681.953125,
"learning_rate": 4.800000000000001e-07,
"loss": 1.6425,
"step": 31000
},
{
"epoch": 1.0,
"eval_loss": 1.7699204683303833,
"eval_runtime": 56.1676,
"eval_samples_per_second": 178.039,
"eval_steps_per_second": 2.795,
"step": 31250
}
],
"logging_steps": 500,
"max_steps": 31250,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"total_flos": 1.045167082831872e+18,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}