ViCA-thinking-5p / trainer_state.json
nkkbr's picture
Upload pretrained weights and config files
cff0aeb
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.050746268656716415,
"eval_steps": 500,
"global_step": 17,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0029850746268656717,
"grad_norm": 8.253287036233855,
"learning_rate": 9.090909090909091e-07,
"loss": 2.2247,
"step": 1
},
{
"epoch": 0.005970149253731343,
"grad_norm": 7.395570676912434,
"learning_rate": 1.8181818181818183e-06,
"loss": 2.1255,
"step": 2
},
{
"epoch": 0.008955223880597015,
"grad_norm": 8.228093047465732,
"learning_rate": 2.7272727272727272e-06,
"loss": 2.1028,
"step": 3
},
{
"epoch": 0.011940298507462687,
"grad_norm": 7.134217440402169,
"learning_rate": 3.6363636363636366e-06,
"loss": 2.0029,
"step": 4
},
{
"epoch": 0.014925373134328358,
"grad_norm": 5.937798020763942,
"learning_rate": 4.5454545454545455e-06,
"loss": 1.907,
"step": 5
},
{
"epoch": 0.01791044776119403,
"grad_norm": 5.559897081671283,
"learning_rate": 5.4545454545454545e-06,
"loss": 1.859,
"step": 6
},
{
"epoch": 0.020895522388059702,
"grad_norm": 5.0306441137723485,
"learning_rate": 6.363636363636364e-06,
"loss": 1.8198,
"step": 7
},
{
"epoch": 0.023880597014925373,
"grad_norm": 3.514372505025641,
"learning_rate": 7.272727272727273e-06,
"loss": 1.5112,
"step": 8
},
{
"epoch": 0.026865671641791045,
"grad_norm": 3.1293116769998894,
"learning_rate": 8.181818181818183e-06,
"loss": 1.4412,
"step": 9
},
{
"epoch": 0.029850746268656716,
"grad_norm": 2.7656396768471456,
"learning_rate": 9.090909090909091e-06,
"loss": 1.4058,
"step": 10
},
{
"epoch": 0.03283582089552239,
"grad_norm": 5.352646410247892,
"learning_rate": 1e-05,
"loss": 1.4371,
"step": 11
},
{
"epoch": 0.03582089552238806,
"grad_norm": 4.381930202952016,
"learning_rate": 9.99976495753613e-06,
"loss": 1.4176,
"step": 12
},
{
"epoch": 0.03880597014925373,
"grad_norm": 3.1514253661202765,
"learning_rate": 9.999059852242508e-06,
"loss": 1.2973,
"step": 13
},
{
"epoch": 0.041791044776119404,
"grad_norm": 2.6485142204402696,
"learning_rate": 9.997884750411004e-06,
"loss": 1.1784,
"step": 14
},
{
"epoch": 0.04477611940298507,
"grad_norm": 2.7946518315041007,
"learning_rate": 9.996239762521152e-06,
"loss": 1.3108,
"step": 15
},
{
"epoch": 0.04776119402985075,
"grad_norm": 2.6114018043960003,
"learning_rate": 9.994125043229753e-06,
"loss": 1.102,
"step": 16
},
{
"epoch": 0.050746268656716415,
"grad_norm": 2.3027638181918495,
"learning_rate": 9.991540791356342e-06,
"loss": 1.0699,
"step": 17
}
],
"logging_steps": 1.0,
"max_steps": 335,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 17,
"total_flos": 1382858137600.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}