hoki / trainer_state.json
bimabk's picture
Upload task output 0fe99f84-0038-4cec-8e61-1eb9fea8dc55
6dcfb2d verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 94,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.10638297872340426,
"grad_norm": 6.875,
"learning_rate": 8.8845e-06,
"loss": 2.3618,
"step": 5
},
{
"epoch": 0.2127659574468085,
"grad_norm": 4.96875,
"learning_rate": 1.9990125e-05,
"loss": 2.1356,
"step": 10
},
{
"epoch": 0.3191489361702128,
"grad_norm": 4.71875,
"learning_rate": 3.1095750000000005e-05,
"loss": 1.9607,
"step": 15
},
{
"epoch": 0.425531914893617,
"grad_norm": 3.9375,
"learning_rate": 4.2201375e-05,
"loss": 1.8407,
"step": 20
},
{
"epoch": 0.5319148936170213,
"grad_norm": 3.890625,
"learning_rate": 5.3307e-05,
"loss": 1.7943,
"step": 25
},
{
"epoch": 0.6382978723404256,
"grad_norm": 3.921875,
"learning_rate": 6.4412625e-05,
"loss": 1.7089,
"step": 30
},
{
"epoch": 0.7446808510638298,
"grad_norm": 3.703125,
"learning_rate": 7.551825e-05,
"loss": 1.679,
"step": 35
},
{
"epoch": 0.851063829787234,
"grad_norm": 3.765625,
"learning_rate": 7.753475804658967e-05,
"loss": 1.6499,
"step": 40
},
{
"epoch": 0.9574468085106383,
"grad_norm": 3.28125,
"learning_rate": 7.670842209011892e-05,
"loss": 1.6324,
"step": 45
},
{
"epoch": 1.0,
"eval_loss": 1.5716543197631836,
"eval_runtime": 5.0246,
"eval_samples_per_second": 39.804,
"eval_steps_per_second": 39.804,
"step": 47
},
{
"epoch": 1.0638297872340425,
"grad_norm": 3.59375,
"learning_rate": 7.526567749442305e-05,
"loss": 1.4545,
"step": 50
},
{
"epoch": 1.1702127659574468,
"grad_norm": 3.171875,
"learning_rate": 7.323814868052365e-05,
"loss": 1.319,
"step": 55
},
{
"epoch": 1.2765957446808511,
"grad_norm": 3.5,
"learning_rate": 7.067027832041926e-05,
"loss": 1.3079,
"step": 60
},
{
"epoch": 1.3829787234042552,
"grad_norm": 3.25,
"learning_rate": 6.761835317036523e-05,
"loss": 1.3272,
"step": 65
},
{
"epoch": 1.4893617021276595,
"grad_norm": 3.125,
"learning_rate": 6.414927028626436e-05,
"loss": 1.2809,
"step": 70
},
{
"epoch": 1.5957446808510638,
"grad_norm": 3.265625,
"learning_rate": 6.033907066526388e-05,
"loss": 1.2853,
"step": 75
},
{
"epoch": 1.702127659574468,
"grad_norm": 2.875,
"learning_rate": 5.627127245558645e-05,
"loss": 1.256,
"step": 80
},
{
"epoch": 1.8085106382978724,
"grad_norm": 2.90625,
"learning_rate": 5.203504027001068e-05,
"loss": 1.2676,
"step": 85
},
{
"epoch": 1.9148936170212765,
"grad_norm": 2.890625,
"learning_rate": 4.7723230730964036e-05,
"loss": 1.2441,
"step": 90
},
{
"epoch": 2.0,
"eval_loss": 1.4222824573516846,
"eval_runtime": 4.9323,
"eval_samples_per_second": 40.549,
"eval_steps_per_second": 40.549,
"step": 94
}
],
"logging_steps": 5,
"max_steps": 141,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.124053825880064e+17,
"train_batch_size": 100,
"trial_name": null,
"trial_params": null
}