xlam_1epoch_pythonic_turn5e-6 / trainer_state.json
AaronWu901225's picture
Upload LoRA adapter folder
03f449c verified
{
"best_metric": 0.6983290314674377,
"best_model_checkpoint": "./xlam_1epoch_pythonic_turn5e-6/checkpoint-294",
"epoch": 0.9981543004798819,
"eval_steps": 42,
"global_step": 338,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.029531192321889995,
"grad_norm": 0.27272307872772217,
"learning_rate": 4.5454545454545455e-06,
"loss": 0.249,
"step": 10
},
{
"epoch": 0.05906238464377999,
"grad_norm": 0.2998972237110138,
"learning_rate": 4.990660391382924e-06,
"loss": 0.2557,
"step": 20
},
{
"epoch": 0.08859357696567,
"grad_norm": 0.20621594786643982,
"learning_rate": 4.958464904721778e-06,
"loss": 0.2502,
"step": 30
},
{
"epoch": 0.11812476928755998,
"grad_norm": 0.11265894770622253,
"learning_rate": 4.903595102807423e-06,
"loss": 0.2291,
"step": 40
},
{
"epoch": 0.12403100775193798,
"eval_loss": 0.7797804474830627,
"eval_runtime": 40.6918,
"eval_samples_per_second": 2.335,
"eval_steps_per_second": 2.335,
"step": 42
},
{
"epoch": 0.14765596160945,
"grad_norm": 0.27329352498054504,
"learning_rate": 4.826557047444564e-06,
"loss": 0.2228,
"step": 50
},
{
"epoch": 0.17718715393134,
"grad_norm": 0.44345590472221375,
"learning_rate": 4.72806125727857e-06,
"loss": 0.2571,
"step": 60
},
{
"epoch": 0.20671834625322996,
"grad_norm": 0.364957332611084,
"learning_rate": 4.609016154712806e-06,
"loss": 0.2054,
"step": 70
},
{
"epoch": 0.23624953857511996,
"grad_norm": 0.39865991473197937,
"learning_rate": 4.470519687568185e-06,
"loss": 0.238,
"step": 80
},
{
"epoch": 0.24806201550387597,
"eval_loss": 0.7582277655601501,
"eval_runtime": 40.6918,
"eval_samples_per_second": 2.335,
"eval_steps_per_second": 2.335,
"step": 84
},
{
"epoch": 0.26578073089701,
"grad_norm": 0.45341649651527405,
"learning_rate": 4.3138492027580035e-06,
"loss": 0.1948,
"step": 90
},
{
"epoch": 0.2953119232189,
"grad_norm": 0.40793997049331665,
"learning_rate": 4.140449665372726e-06,
"loss": 0.2363,
"step": 100
},
{
"epoch": 0.32484311554079,
"grad_norm": 0.4665416479110718,
"learning_rate": 3.951920331829593e-06,
"loss": 0.2035,
"step": 110
},
{
"epoch": 0.35437430786268,
"grad_norm": 0.2355736345052719,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.1848,
"step": 120
},
{
"epoch": 0.37209302325581395,
"eval_loss": 0.7323265671730042,
"eval_runtime": 40.6881,
"eval_samples_per_second": 2.335,
"eval_steps_per_second": 2.335,
"step": 126
},
{
"epoch": 0.3839055001845699,
"grad_norm": 0.2675202786922455,
"learning_rate": 3.5365509723521046e-06,
"loss": 0.1868,
"step": 130
},
{
"epoch": 0.4134366925064599,
"grad_norm": 0.521271824836731,
"learning_rate": 3.3135418800158776e-06,
"loss": 0.2172,
"step": 140
},
{
"epoch": 0.4429678848283499,
"grad_norm": 0.3350629210472107,
"learning_rate": 3.0830295261835276e-06,
"loss": 0.1834,
"step": 150
},
{
"epoch": 0.4724990771502399,
"grad_norm": 0.27388790249824524,
"learning_rate": 2.8471399163028102e-06,
"loss": 0.1907,
"step": 160
},
{
"epoch": 0.49612403100775193,
"eval_loss": 0.7150933742523193,
"eval_runtime": 40.7241,
"eval_samples_per_second": 2.333,
"eval_steps_per_second": 2.333,
"step": 168
},
{
"epoch": 0.5020302694721299,
"grad_norm": 0.36372843384742737,
"learning_rate": 2.608048650020935e-06,
"loss": 0.1778,
"step": 170
},
{
"epoch": 0.53156146179402,
"grad_norm": 0.27681082487106323,
"learning_rate": 2.3679608557233246e-06,
"loss": 0.1482,
"step": 180
},
{
"epoch": 0.5610926541159099,
"grad_norm": 0.28764036297798157,
"learning_rate": 2.129090852730094e-06,
"loss": 0.1521,
"step": 190
},
{
"epoch": 0.5906238464378,
"grad_norm": 0.3415660560131073,
"learning_rate": 1.8936417287249447e-06,
"loss": 0.1634,
"step": 200
},
{
"epoch": 0.6201550387596899,
"grad_norm": 0.4162391722202301,
"learning_rate": 1.6637850207729484e-06,
"loss": 0.2028,
"step": 210
},
{
"epoch": 0.6201550387596899,
"eval_loss": 0.7053791880607605,
"eval_runtime": 40.7096,
"eval_samples_per_second": 2.334,
"eval_steps_per_second": 2.334,
"step": 210
},
{
"epoch": 0.64968623108158,
"grad_norm": 0.3097800016403198,
"learning_rate": 1.4416406873283273e-06,
"loss": 0.1813,
"step": 220
},
{
"epoch": 0.6792174234034699,
"grad_norm": 0.19078122079372406,
"learning_rate": 1.2292575559495143e-06,
"loss": 0.1805,
"step": 230
},
{
"epoch": 0.70874861572536,
"grad_norm": 0.3096450865268707,
"learning_rate": 1.0285944270513929e-06,
"loss": 0.1666,
"step": 240
},
{
"epoch": 0.7382798080472499,
"grad_norm": 0.31330665946006775,
"learning_rate": 8.415020079739761e-07,
"loss": 0.1457,
"step": 250
},
{
"epoch": 0.7441860465116279,
"eval_loss": 0.6999288201332092,
"eval_runtime": 40.6457,
"eval_samples_per_second": 2.337,
"eval_steps_per_second": 2.337,
"step": 252
},
{
"epoch": 0.7678110003691399,
"grad_norm": 0.28197088837623596,
"learning_rate": 6.697058439888285e-07,
"loss": 0.1615,
"step": 260
},
{
"epoch": 0.7973421926910299,
"grad_norm": 0.3669886589050293,
"learning_rate": 5.147904036698176e-07,
"loss": 0.143,
"step": 270
},
{
"epoch": 0.8268733850129198,
"grad_norm": 0.3345685303211212,
"learning_rate": 3.7818446540814014e-07,
"loss": 0.1705,
"step": 280
},
{
"epoch": 0.8564045773348099,
"grad_norm": 0.3705250322818756,
"learning_rate": 2.611479398511518e-07,
"loss": 0.1507,
"step": 290
},
{
"epoch": 0.8682170542635659,
"eval_loss": 0.6983290314674377,
"eval_runtime": 40.667,
"eval_samples_per_second": 2.336,
"eval_steps_per_second": 2.336,
"step": 294
},
{
"epoch": 0.8859357696566998,
"grad_norm": 0.3097485601902008,
"learning_rate": 1.6476024980107392e-07,
"loss": 0.1624,
"step": 300
},
{
"epoch": 0.9154669619785899,
"grad_norm": 0.23429091274738312,
"learning_rate": 8.991037474527897e-08,
"loss": 0.1345,
"step": 310
},
{
"epoch": 0.9449981543004798,
"grad_norm": 0.43888694047927856,
"learning_rate": 3.728865183701275e-08,
"loss": 0.1846,
"step": 320
},
{
"epoch": 0.9745293466223699,
"grad_norm": 0.35810691118240356,
"learning_rate": 7.380408945750483e-09,
"loss": 0.1663,
"step": 330
},
{
"epoch": 0.9922480620155039,
"eval_loss": 0.6983665227890015,
"eval_runtime": 40.727,
"eval_samples_per_second": 2.333,
"eval_steps_per_second": 2.333,
"step": 336
},
{
"epoch": 0.9981543004798819,
"step": 338,
"total_flos": 6.26298436248168e+18,
"train_loss": 0.1894198331607164,
"train_runtime": 51464.0129,
"train_samples_per_second": 0.211,
"train_steps_per_second": 0.007
}
],
"logging_steps": 10,
"max_steps": 338,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 42,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.26298436248168e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}