QA-MatBERT-seed30 / trainer_state.json
MatildaS's picture
Upload 15 files
4bc809b verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"global_step": 22176,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05,
"learning_rate": 2.9323593073593074e-05,
"loss": 2.5375,
"step": 500
},
{
"epoch": 0.09,
"learning_rate": 2.8647186147186148e-05,
"loss": 1.7414,
"step": 1000
},
{
"epoch": 0.14,
"learning_rate": 2.797077922077922e-05,
"loss": 1.6079,
"step": 1500
},
{
"epoch": 0.18,
"learning_rate": 2.7294372294372295e-05,
"loss": 1.4997,
"step": 2000
},
{
"epoch": 0.23,
"learning_rate": 2.661796536796537e-05,
"loss": 1.4646,
"step": 2500
},
{
"epoch": 0.27,
"learning_rate": 2.594155844155844e-05,
"loss": 1.3928,
"step": 3000
},
{
"epoch": 0.32,
"learning_rate": 2.5265151515151516e-05,
"loss": 1.3433,
"step": 3500
},
{
"epoch": 0.36,
"learning_rate": 2.458874458874459e-05,
"loss": 1.2997,
"step": 4000
},
{
"epoch": 0.41,
"learning_rate": 2.3912337662337663e-05,
"loss": 1.2845,
"step": 4500
},
{
"epoch": 0.45,
"learning_rate": 2.3235930735930737e-05,
"loss": 1.2874,
"step": 5000
},
{
"epoch": 0.5,
"learning_rate": 2.255952380952381e-05,
"loss": 1.2555,
"step": 5500
},
{
"epoch": 0.54,
"learning_rate": 2.1883116883116884e-05,
"loss": 1.226,
"step": 6000
},
{
"epoch": 0.59,
"learning_rate": 2.1206709956709957e-05,
"loss": 1.2317,
"step": 6500
},
{
"epoch": 0.63,
"learning_rate": 2.053030303030303e-05,
"loss": 1.1745,
"step": 7000
},
{
"epoch": 0.68,
"learning_rate": 1.9853896103896105e-05,
"loss": 1.2023,
"step": 7500
},
{
"epoch": 0.72,
"learning_rate": 1.9177489177489178e-05,
"loss": 1.1795,
"step": 8000
},
{
"epoch": 0.77,
"learning_rate": 1.8501082251082252e-05,
"loss": 1.1303,
"step": 8500
},
{
"epoch": 0.81,
"learning_rate": 1.7824675324675322e-05,
"loss": 1.1206,
"step": 9000
},
{
"epoch": 0.86,
"learning_rate": 1.71482683982684e-05,
"loss": 1.1459,
"step": 9500
},
{
"epoch": 0.9,
"learning_rate": 1.6471861471861473e-05,
"loss": 1.0793,
"step": 10000
},
{
"epoch": 0.95,
"learning_rate": 1.5795454545454546e-05,
"loss": 1.0946,
"step": 10500
},
{
"epoch": 0.99,
"learning_rate": 1.511904761904762e-05,
"loss": 1.0956,
"step": 11000
},
{
"epoch": 1.04,
"learning_rate": 1.4442640692640693e-05,
"loss": 0.8725,
"step": 11500
},
{
"epoch": 1.08,
"learning_rate": 1.3766233766233767e-05,
"loss": 0.8071,
"step": 12000
},
{
"epoch": 1.13,
"learning_rate": 1.308982683982684e-05,
"loss": 0.8175,
"step": 12500
},
{
"epoch": 1.17,
"learning_rate": 1.2413419913419914e-05,
"loss": 0.7927,
"step": 13000
},
{
"epoch": 1.22,
"learning_rate": 1.1737012987012986e-05,
"loss": 0.8195,
"step": 13500
},
{
"epoch": 1.26,
"learning_rate": 1.106060606060606e-05,
"loss": 0.8075,
"step": 14000
},
{
"epoch": 1.31,
"learning_rate": 1.0384199134199135e-05,
"loss": 0.8079,
"step": 14500
},
{
"epoch": 1.35,
"learning_rate": 9.707792207792209e-06,
"loss": 0.7964,
"step": 15000
},
{
"epoch": 1.4,
"learning_rate": 9.031385281385282e-06,
"loss": 0.7764,
"step": 15500
},
{
"epoch": 1.44,
"learning_rate": 8.354978354978356e-06,
"loss": 0.7663,
"step": 16000
},
{
"epoch": 1.49,
"learning_rate": 7.678571428571428e-06,
"loss": 0.8211,
"step": 16500
},
{
"epoch": 1.53,
"learning_rate": 7.002164502164502e-06,
"loss": 0.7904,
"step": 17000
},
{
"epoch": 1.58,
"learning_rate": 6.325757575757576e-06,
"loss": 0.8048,
"step": 17500
},
{
"epoch": 1.62,
"learning_rate": 5.64935064935065e-06,
"loss": 0.7706,
"step": 18000
},
{
"epoch": 1.67,
"learning_rate": 4.972943722943723e-06,
"loss": 0.7539,
"step": 18500
},
{
"epoch": 1.71,
"learning_rate": 4.2965367965367965e-06,
"loss": 0.7587,
"step": 19000
},
{
"epoch": 1.76,
"learning_rate": 3.62012987012987e-06,
"loss": 0.7575,
"step": 19500
},
{
"epoch": 1.8,
"learning_rate": 2.943722943722944e-06,
"loss": 0.7534,
"step": 20000
},
{
"epoch": 1.85,
"learning_rate": 2.2673160173160173e-06,
"loss": 0.7477,
"step": 20500
},
{
"epoch": 1.89,
"learning_rate": 1.590909090909091e-06,
"loss": 0.7358,
"step": 21000
},
{
"epoch": 1.94,
"learning_rate": 9.145021645021645e-07,
"loss": 0.7233,
"step": 21500
},
{
"epoch": 1.98,
"learning_rate": 2.380952380952381e-07,
"loss": 0.7334,
"step": 22000
},
{
"epoch": 2.0,
"step": 22176,
"total_flos": 5.214986800612762e+16,
"train_loss": 1.056515955477738,
"train_runtime": 6821.0331,
"train_samples_per_second": 39.013,
"train_steps_per_second": 3.251
}
],
"max_steps": 22176,
"num_train_epochs": 2,
"total_flos": 5.214986800612762e+16,
"trial_name": null,
"trial_params": null
}