deberta_agnews_original / trainer_state.json
joygoround's picture
Upload 7 files
4da32ca verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 15000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"grad_norm": 9.451863288879395,
"learning_rate": 1.933866666666667e-05,
"loss": 0.4302,
"step": 500
},
{
"epoch": 0.07,
"grad_norm": 0.5204328894615173,
"learning_rate": 1.8672e-05,
"loss": 0.3988,
"step": 1000
},
{
"epoch": 0.1,
"grad_norm": 6.495779037475586,
"learning_rate": 1.8005333333333335e-05,
"loss": 0.3613,
"step": 1500
},
{
"epoch": 0.13,
"grad_norm": 0.3106362223625183,
"learning_rate": 1.7338666666666667e-05,
"loss": 0.3268,
"step": 2000
},
{
"epoch": 0.17,
"grad_norm": 16.08853530883789,
"learning_rate": 1.6673333333333335e-05,
"loss": 0.323,
"step": 2500
},
{
"epoch": 0.2,
"grad_norm": 7.482870101928711,
"learning_rate": 1.6006666666666667e-05,
"loss": 0.3025,
"step": 3000
},
{
"epoch": 0.23,
"grad_norm": 21.62608528137207,
"learning_rate": 1.5340000000000002e-05,
"loss": 0.2941,
"step": 3500
},
{
"epoch": 0.27,
"grad_norm": 7.615144729614258,
"learning_rate": 1.4673333333333336e-05,
"loss": 0.2858,
"step": 4000
},
{
"epoch": 0.3,
"grad_norm": 0.15844333171844482,
"learning_rate": 1.4008000000000002e-05,
"loss": 0.2939,
"step": 4500
},
{
"epoch": 0.33,
"grad_norm": 0.4972567856311798,
"learning_rate": 1.3341333333333336e-05,
"loss": 0.2877,
"step": 5000
},
{
"epoch": 0.37,
"grad_norm": 1.6350613832473755,
"learning_rate": 1.2674666666666669e-05,
"loss": 0.2917,
"step": 5500
},
{
"epoch": 0.4,
"grad_norm": 9.023903846740723,
"learning_rate": 1.2008000000000003e-05,
"loss": 0.3021,
"step": 6000
},
{
"epoch": 0.43,
"grad_norm": 25.3885498046875,
"learning_rate": 1.1341333333333336e-05,
"loss": 0.235,
"step": 6500
},
{
"epoch": 0.47,
"grad_norm": 9.231255531311035,
"learning_rate": 1.0674666666666666e-05,
"loss": 0.2774,
"step": 7000
},
{
"epoch": 0.5,
"grad_norm": 15.598228454589844,
"learning_rate": 1.0008e-05,
"loss": 0.2379,
"step": 7500
},
{
"epoch": 0.53,
"grad_norm": 0.8757464289665222,
"learning_rate": 9.341333333333335e-06,
"loss": 0.2538,
"step": 8000
},
{
"epoch": 0.57,
"grad_norm": 1.9172446727752686,
"learning_rate": 8.674666666666668e-06,
"loss": 0.2385,
"step": 8500
},
{
"epoch": 0.6,
"grad_norm": 0.49066710472106934,
"learning_rate": 8.008e-06,
"loss": 0.2457,
"step": 9000
},
{
"epoch": 0.63,
"grad_norm": 0.23613671958446503,
"learning_rate": 7.342666666666667e-06,
"loss": 0.218,
"step": 9500
},
{
"epoch": 0.67,
"grad_norm": 10.944806098937988,
"learning_rate": 6.676e-06,
"loss": 0.2328,
"step": 10000
},
{
"epoch": 0.7,
"grad_norm": 9.770712852478027,
"learning_rate": 6.009333333333334e-06,
"loss": 0.2245,
"step": 10500
},
{
"epoch": 0.73,
"grad_norm": 0.422610342502594,
"learning_rate": 5.342666666666667e-06,
"loss": 0.2362,
"step": 11000
},
{
"epoch": 0.77,
"grad_norm": 0.11568822711706161,
"learning_rate": 4.676000000000001e-06,
"loss": 0.2314,
"step": 11500
},
{
"epoch": 0.8,
"grad_norm": 1.0529944896697998,
"learning_rate": 4.010666666666667e-06,
"loss": 0.2175,
"step": 12000
},
{
"epoch": 0.83,
"grad_norm": 0.467757910490036,
"learning_rate": 3.344e-06,
"loss": 0.2315,
"step": 12500
},
{
"epoch": 0.87,
"grad_norm": 0.4078325927257538,
"learning_rate": 2.6773333333333336e-06,
"loss": 0.2231,
"step": 13000
},
{
"epoch": 0.9,
"grad_norm": 6.3522562980651855,
"learning_rate": 2.0106666666666667e-06,
"loss": 0.2241,
"step": 13500
},
{
"epoch": 0.93,
"grad_norm": 0.3300648629665375,
"learning_rate": 1.344e-06,
"loss": 0.1883,
"step": 14000
},
{
"epoch": 0.97,
"grad_norm": 0.18304280936717987,
"learning_rate": 6.786666666666668e-07,
"loss": 0.2217,
"step": 14500
},
{
"epoch": 1.0,
"grad_norm": 6.273829460144043,
"learning_rate": 1.3333333333333334e-08,
"loss": 0.2346,
"step": 15000
},
{
"epoch": 1.0,
"eval_accuracy": 0.945,
"eval_loss": 0.21884675323963165,
"eval_runtime": 20.7763,
"eval_samples_per_second": 365.801,
"eval_steps_per_second": 45.725,
"step": 15000
}
],
"logging_steps": 500,
"max_steps": 15000,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"total_flos": 7893614960640000.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}