models-loo_idx1_5pc_1-step1 / trainer_state.json
aadityap's picture
Model save
4cfce0a verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 39,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07692307692307693,
"grad_norm": 0.011006172946138796,
"learning_rate": 2e-05,
"loss": 0.2391,
"step": 1
},
{
"epoch": 0.15384615384615385,
"grad_norm": 0.009920294569357094,
"learning_rate": 4e-05,
"loss": 0.2609,
"step": 2
},
{
"epoch": 0.23076923076923078,
"grad_norm": 0.01158092107894398,
"learning_rate": 6.000000000000001e-05,
"loss": 0.2587,
"step": 3
},
{
"epoch": 0.3076923076923077,
"grad_norm": 0.0201005612255337,
"learning_rate": 8e-05,
"loss": 0.2712,
"step": 4
},
{
"epoch": 0.38461538461538464,
"grad_norm": 0.013449641741916138,
"learning_rate": 7.983897175980957e-05,
"loss": 0.2408,
"step": 5
},
{
"epoch": 0.46153846153846156,
"grad_norm": 0.017403662565344077,
"learning_rate": 7.93571835439452e-05,
"loss": 0.2277,
"step": 6
},
{
"epoch": 0.5384615384615384,
"grad_norm": 0.016783900205254377,
"learning_rate": 7.855851442783414e-05,
"loss": 0.2281,
"step": 7
},
{
"epoch": 0.6153846153846154,
"grad_norm": 0.013041582389936975,
"learning_rate": 7.74493948255895e-05,
"loss": 0.2127,
"step": 8
},
{
"epoch": 0.6923076923076923,
"grad_norm": 0.008599268410488753,
"learning_rate": 7.603875471609677e-05,
"loss": 0.2198,
"step": 9
},
{
"epoch": 0.7692307692307693,
"grad_norm": 0.009272083398470401,
"learning_rate": 7.433795174407465e-05,
"loss": 0.2537,
"step": 10
},
{
"epoch": 0.8461538461538461,
"grad_norm": 0.017009886825312504,
"learning_rate": 7.236067977499791e-05,
"loss": 0.1931,
"step": 11
},
{
"epoch": 0.9230769230769231,
"grad_norm": 0.019269587613227883,
"learning_rate": 7.012285864014445e-05,
"loss": 0.23,
"step": 12
},
{
"epoch": 1.0,
"grad_norm": 0.01921119861962993,
"learning_rate": 6.76425059594746e-05,
"loss": 0.2169,
"step": 13
},
{
"epoch": 1.0769230769230769,
"grad_norm": 0.016008391623032813,
"learning_rate": 6.493959207434934e-05,
"loss": 0.2231,
"step": 14
},
{
"epoch": 1.1538461538461537,
"grad_norm": 0.013505093256517267,
"learning_rate": 6.20358792580841e-05,
"loss": 0.216,
"step": 15
},
{
"epoch": 1.2307692307692308,
"grad_norm": 0.009118977222006231,
"learning_rate": 5.895474649891995e-05,
"loss": 0.1955,
"step": 16
},
{
"epoch": 1.3076923076923077,
"grad_norm": 0.008130055798965707,
"learning_rate": 5.572100126615695e-05,
"loss": 0.2379,
"step": 17
},
{
"epoch": 1.3846153846153846,
"grad_norm": 0.008040382267002854,
"learning_rate": 5.23606797749979e-05,
"loss": 0.1926,
"step": 18
},
{
"epoch": 1.4615384615384617,
"grad_norm": 0.007985366963191003,
"learning_rate": 4.890083735825258e-05,
"loss": 0.196,
"step": 19
},
{
"epoch": 1.5384615384615383,
"grad_norm": 0.009022742061822532,
"learning_rate": 4.5369330632706223e-05,
"loss": 0.173,
"step": 20
},
{
"epoch": 1.6153846153846154,
"grad_norm": 0.010255624399866037,
"learning_rate": 4.17945932140206e-05,
"loss": 0.2147,
"step": 21
},
{
"epoch": 1.6923076923076923,
"grad_norm": 0.009741555715518118,
"learning_rate": 3.820540678597942e-05,
"loss": 0.1592,
"step": 22
},
{
"epoch": 1.7692307692307692,
"grad_norm": 0.009844925309939856,
"learning_rate": 3.4630669367293797e-05,
"loss": 0.2045,
"step": 23
},
{
"epoch": 1.8461538461538463,
"grad_norm": 0.010438901552927702,
"learning_rate": 3.109916264174743e-05,
"loss": 0.1494,
"step": 24
},
{
"epoch": 1.9230769230769231,
"grad_norm": 0.010622270315970113,
"learning_rate": 2.7639320225002108e-05,
"loss": 0.2134,
"step": 25
},
{
"epoch": 2.0,
"grad_norm": 0.009659972608976171,
"learning_rate": 2.427899873384306e-05,
"loss": 0.205,
"step": 26
},
{
"epoch": 2.076923076923077,
"grad_norm": 0.010916832572563411,
"learning_rate": 2.1045253501080058e-05,
"loss": 0.1858,
"step": 27
},
{
"epoch": 2.1538461538461537,
"grad_norm": 0.009749675363732882,
"learning_rate": 1.7964120741915905e-05,
"loss": 0.1672,
"step": 28
},
{
"epoch": 2.230769230769231,
"grad_norm": 0.008711108100307416,
"learning_rate": 1.5060407925650662e-05,
"loss": 0.1883,
"step": 29
},
{
"epoch": 2.3076923076923075,
"grad_norm": 0.009348284679650916,
"learning_rate": 1.2357494040525416e-05,
"loss": 0.1577,
"step": 30
},
{
"epoch": 2.3846153846153846,
"grad_norm": 0.008395179602647181,
"learning_rate": 9.877141359855567e-06,
"loss": 0.1929,
"step": 31
},
{
"epoch": 2.4615384615384617,
"grad_norm": 0.00858698129154695,
"learning_rate": 7.639320225002106e-06,
"loss": 0.1485,
"step": 32
},
{
"epoch": 2.5384615384615383,
"grad_norm": 0.008945513764739278,
"learning_rate": 5.662048255925357e-06,
"loss": 0.2167,
"step": 33
},
{
"epoch": 2.6153846153846154,
"grad_norm": 0.008857533620105983,
"learning_rate": 3.961245283903239e-06,
"loss": 0.1906,
"step": 34
},
{
"epoch": 2.6923076923076925,
"grad_norm": 0.00837899269292367,
"learning_rate": 2.550605174410512e-06,
"loss": 0.1738,
"step": 35
},
{
"epoch": 2.769230769230769,
"grad_norm": 0.008534100646481335,
"learning_rate": 1.4414855721658705e-06,
"loss": 0.1838,
"step": 36
},
{
"epoch": 2.8461538461538463,
"grad_norm": 0.008482103570619792,
"learning_rate": 6.428164560548134e-07,
"loss": 0.1508,
"step": 37
},
{
"epoch": 2.9230769230769234,
"grad_norm": 0.00892411300486341,
"learning_rate": 1.6102824019043728e-07,
"loss": 0.1676,
"step": 38
},
{
"epoch": 3.0,
"grad_norm": 0.008871456118818033,
"learning_rate": 0.0,
"loss": 0.1878,
"step": 39
},
{
"epoch": 3.0,
"step": 39,
"total_flos": 88667793129472.0,
"train_loss": 0.20370503381276742,
"train_runtime": 1178.0684,
"train_samples_per_second": 0.509,
"train_steps_per_second": 0.033
}
],
"logging_steps": 1,
"max_steps": 39,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 88667793129472.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}