lillian039's picture
Upload folder using huggingface_hub
e838323 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 100,
"global_step": 936,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.21367521367521367,
"grad_norm": 0.20097456629216162,
"learning_rate": 9.998747147528374e-05,
"loss": 0.0895,
"step": 100
},
{
"epoch": 0.21367521367521367,
"eval_loss": 0.0701778456568718,
"eval_runtime": 32.5253,
"eval_samples_per_second": 0.584,
"eval_steps_per_second": 0.154,
"step": 100
},
{
"epoch": 0.42735042735042733,
"grad_norm": 0.47146535267265166,
"learning_rate": 9.614025209023084e-05,
"loss": 0.055,
"step": 200
},
{
"epoch": 0.42735042735042733,
"eval_loss": 0.06085389107465744,
"eval_runtime": 18.8879,
"eval_samples_per_second": 1.006,
"eval_steps_per_second": 0.265,
"step": 200
},
{
"epoch": 0.6410256410256411,
"grad_norm": 0.7842776649469202,
"learning_rate": 8.594394348255238e-05,
"loss": 0.0355,
"step": 300
},
{
"epoch": 0.6410256410256411,
"eval_loss": 0.022945081815123558,
"eval_runtime": 17.1293,
"eval_samples_per_second": 1.109,
"eval_steps_per_second": 0.292,
"step": 300
},
{
"epoch": 0.8547008547008547,
"grad_norm": 0.0404153814396507,
"learning_rate": 7.080159982206471e-05,
"loss": 0.0193,
"step": 400
},
{
"epoch": 0.8547008547008547,
"eval_loss": 0.004422679543495178,
"eval_runtime": 17.1656,
"eval_samples_per_second": 1.107,
"eval_steps_per_second": 0.291,
"step": 400
},
{
"epoch": 1.0683760683760684,
"grad_norm": 0.007439995471600861,
"learning_rate": 5.279687012637799e-05,
"loss": 0.0097,
"step": 500
},
{
"epoch": 1.0683760683760684,
"eval_loss": 0.02561795338988304,
"eval_runtime": 17.0727,
"eval_samples_per_second": 1.113,
"eval_steps_per_second": 0.293,
"step": 500
},
{
"epoch": 1.282051282051282,
"grad_norm": 0.012610177137456059,
"learning_rate": 3.4407279551696846e-05,
"loss": 0.005,
"step": 600
},
{
"epoch": 1.282051282051282,
"eval_loss": 0.0007839644094929099,
"eval_runtime": 17.103,
"eval_samples_per_second": 1.111,
"eval_steps_per_second": 0.292,
"step": 600
},
{
"epoch": 1.4957264957264957,
"grad_norm": 0.005782197107455585,
"learning_rate": 1.8163311700448898e-05,
"loss": 0.0029,
"step": 700
},
{
"epoch": 1.4957264957264957,
"eval_loss": 0.0003309359890408814,
"eval_runtime": 17.0932,
"eval_samples_per_second": 1.112,
"eval_steps_per_second": 0.293,
"step": 700
},
{
"epoch": 1.7094017094017095,
"grad_norm": 0.0027516000176519896,
"learning_rate": 6.300203628022272e-06,
"loss": 0.0003,
"step": 800
},
{
"epoch": 1.7094017094017095,
"eval_loss": 0.00017731569823808968,
"eval_runtime": 17.0859,
"eval_samples_per_second": 1.112,
"eval_steps_per_second": 0.293,
"step": 800
},
{
"epoch": 1.9230769230769231,
"grad_norm": 0.009537392750392705,
"learning_rate": 4.5036799358910697e-07,
"loss": 0.0002,
"step": 900
},
{
"epoch": 1.9230769230769231,
"eval_loss": 0.0001645805168664083,
"eval_runtime": 18.8331,
"eval_samples_per_second": 1.009,
"eval_steps_per_second": 0.265,
"step": 900
},
{
"epoch": 2.0,
"step": 936,
"total_flos": 34102773284864.0,
"train_loss": 0.023250845168382883,
"train_runtime": 4455.8048,
"train_samples_per_second": 0.84,
"train_steps_per_second": 0.21
}
],
"logging_steps": 100,
"max_steps": 936,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 1500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 34102773284864.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}