hoki / trainer_state.json
bimabk's picture
Upload task output 0fe99f84-0038-4cec-8e61-1eb9fea8dc55
35e08fe verified
raw
history blame
4 kB
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 94,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.10638297872340426,
"grad_norm": 6.875,
"learning_rate": 8.8845e-06,
"loss": 2.3619,
"step": 5
},
{
"epoch": 0.2127659574468085,
"grad_norm": 4.96875,
"learning_rate": 1.9990125e-05,
"loss": 2.1355,
"step": 10
},
{
"epoch": 0.3191489361702128,
"grad_norm": 4.75,
"learning_rate": 3.1095750000000005e-05,
"loss": 1.9609,
"step": 15
},
{
"epoch": 0.425531914893617,
"grad_norm": 3.9375,
"learning_rate": 4.2201375e-05,
"loss": 1.8411,
"step": 20
},
{
"epoch": 0.5319148936170213,
"grad_norm": 3.890625,
"learning_rate": 5.3307e-05,
"loss": 1.7944,
"step": 25
},
{
"epoch": 0.6382978723404256,
"grad_norm": 3.9375,
"learning_rate": 6.4412625e-05,
"loss": 1.7092,
"step": 30
},
{
"epoch": 0.7446808510638298,
"grad_norm": 3.703125,
"learning_rate": 7.551825e-05,
"loss": 1.679,
"step": 35
},
{
"epoch": 0.851063829787234,
"grad_norm": 3.75,
"learning_rate": 7.753475804658967e-05,
"loss": 1.65,
"step": 40
},
{
"epoch": 0.9574468085106383,
"grad_norm": 3.28125,
"learning_rate": 7.670842209011892e-05,
"loss": 1.6324,
"step": 45
},
{
"epoch": 1.0,
"eval_loss": 1.5718683004379272,
"eval_runtime": 5.0419,
"eval_samples_per_second": 39.668,
"eval_steps_per_second": 39.668,
"step": 47
},
{
"epoch": 1.0638297872340425,
"grad_norm": 3.5625,
"learning_rate": 7.526567749442305e-05,
"loss": 1.4548,
"step": 50
},
{
"epoch": 1.1702127659574468,
"grad_norm": 3.203125,
"learning_rate": 7.323814868052365e-05,
"loss": 1.3195,
"step": 55
},
{
"epoch": 1.2765957446808511,
"grad_norm": 3.515625,
"learning_rate": 7.067027832041926e-05,
"loss": 1.3081,
"step": 60
},
{
"epoch": 1.3829787234042552,
"grad_norm": 3.28125,
"learning_rate": 6.761835317036523e-05,
"loss": 1.3275,
"step": 65
},
{
"epoch": 1.4893617021276595,
"grad_norm": 3.140625,
"learning_rate": 6.414927028626436e-05,
"loss": 1.2813,
"step": 70
},
{
"epoch": 1.5957446808510638,
"grad_norm": 3.265625,
"learning_rate": 6.033907066526388e-05,
"loss": 1.2849,
"step": 75
},
{
"epoch": 1.702127659574468,
"grad_norm": 2.859375,
"learning_rate": 5.627127245558645e-05,
"loss": 1.2558,
"step": 80
},
{
"epoch": 1.8085106382978724,
"grad_norm": 2.90625,
"learning_rate": 5.203504027001068e-05,
"loss": 1.2672,
"step": 85
},
{
"epoch": 1.9148936170212765,
"grad_norm": 2.875,
"learning_rate": 4.7723230730964036e-05,
"loss": 1.2442,
"step": 90
},
{
"epoch": 2.0,
"eval_loss": 1.4226864576339722,
"eval_runtime": 4.9284,
"eval_samples_per_second": 40.581,
"eval_steps_per_second": 40.581,
"step": 94
}
],
"logging_steps": 5,
"max_steps": 141,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.124053825880064e+17,
"train_batch_size": 100,
"trial_name": null,
"trial_params": null
}