ViCA-thinking-20p / trainer_state.json
nkkbr's picture
Upload pretrained weights and config files
e82447a
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.20298507462686566,
"eval_steps": 500,
"global_step": 68,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0029850746268656717,
"grad_norm": 8.253287036233855,
"learning_rate": 9.090909090909091e-07,
"loss": 2.2247,
"step": 1
},
{
"epoch": 0.005970149253731343,
"grad_norm": 7.395570676912434,
"learning_rate": 1.8181818181818183e-06,
"loss": 2.1255,
"step": 2
},
{
"epoch": 0.008955223880597015,
"grad_norm": 8.228093047465732,
"learning_rate": 2.7272727272727272e-06,
"loss": 2.1028,
"step": 3
},
{
"epoch": 0.011940298507462687,
"grad_norm": 7.134217440402169,
"learning_rate": 3.6363636363636366e-06,
"loss": 2.0029,
"step": 4
},
{
"epoch": 0.014925373134328358,
"grad_norm": 5.937798020763942,
"learning_rate": 4.5454545454545455e-06,
"loss": 1.907,
"step": 5
},
{
"epoch": 0.01791044776119403,
"grad_norm": 5.559897081671283,
"learning_rate": 5.4545454545454545e-06,
"loss": 1.859,
"step": 6
},
{
"epoch": 0.020895522388059702,
"grad_norm": 5.0306441137723485,
"learning_rate": 6.363636363636364e-06,
"loss": 1.8198,
"step": 7
},
{
"epoch": 0.023880597014925373,
"grad_norm": 3.514372505025641,
"learning_rate": 7.272727272727273e-06,
"loss": 1.5112,
"step": 8
},
{
"epoch": 0.026865671641791045,
"grad_norm": 3.1293116769998894,
"learning_rate": 8.181818181818183e-06,
"loss": 1.4412,
"step": 9
},
{
"epoch": 0.029850746268656716,
"grad_norm": 2.7656396768471456,
"learning_rate": 9.090909090909091e-06,
"loss": 1.4058,
"step": 10
},
{
"epoch": 0.03283582089552239,
"grad_norm": 5.352646410247892,
"learning_rate": 1e-05,
"loss": 1.4371,
"step": 11
},
{
"epoch": 0.03582089552238806,
"grad_norm": 4.381930202952016,
"learning_rate": 9.99976495753613e-06,
"loss": 1.4176,
"step": 12
},
{
"epoch": 0.03880597014925373,
"grad_norm": 3.1514253661202765,
"learning_rate": 9.999059852242508e-06,
"loss": 1.2973,
"step": 13
},
{
"epoch": 0.041791044776119404,
"grad_norm": 2.6485142204402696,
"learning_rate": 9.997884750411004e-06,
"loss": 1.1784,
"step": 14
},
{
"epoch": 0.04477611940298507,
"grad_norm": 2.7946518315041007,
"learning_rate": 9.996239762521152e-06,
"loss": 1.3108,
"step": 15
},
{
"epoch": 0.04776119402985075,
"grad_norm": 2.6114018043960003,
"learning_rate": 9.994125043229753e-06,
"loss": 1.102,
"step": 16
},
{
"epoch": 0.050746268656716415,
"grad_norm": 2.3027638181918495,
"learning_rate": 9.991540791356342e-06,
"loss": 1.0699,
"step": 17
},
{
"epoch": 0.05373134328358209,
"grad_norm": 2.204185879725156,
"learning_rate": 9.98848724986449e-06,
"loss": 1.1473,
"step": 18
},
{
"epoch": 0.056716417910447764,
"grad_norm": 2.2090761147091817,
"learning_rate": 9.98496470583896e-06,
"loss": 1.1816,
"step": 19
},
{
"epoch": 0.05970149253731343,
"grad_norm": 2.0223479706782737,
"learning_rate": 9.980973490458728e-06,
"loss": 1.1217,
"step": 20
},
{
"epoch": 0.0626865671641791,
"grad_norm": 2.080473927553376,
"learning_rate": 9.976513978965829e-06,
"loss": 1.0251,
"step": 21
},
{
"epoch": 0.06567164179104477,
"grad_norm": 2.3488846800450744,
"learning_rate": 9.971586590630094e-06,
"loss": 1.0279,
"step": 22
},
{
"epoch": 0.06865671641791045,
"grad_norm": 2.1532960090217212,
"learning_rate": 9.966191788709716e-06,
"loss": 1.0126,
"step": 23
},
{
"epoch": 0.07164179104477612,
"grad_norm": 2.055129377186085,
"learning_rate": 9.960330080407712e-06,
"loss": 1.0072,
"step": 24
},
{
"epoch": 0.07462686567164178,
"grad_norm": 2.156877426415811,
"learning_rate": 9.954002016824226e-06,
"loss": 1.0349,
"step": 25
},
{
"epoch": 0.07761194029850746,
"grad_norm": 1.9914221696004062,
"learning_rate": 9.947208192904722e-06,
"loss": 1.0244,
"step": 26
},
{
"epoch": 0.08059701492537313,
"grad_norm": 1.9887204088063186,
"learning_rate": 9.939949247384046e-06,
"loss": 0.9912,
"step": 27
},
{
"epoch": 0.08358208955223881,
"grad_norm": 2.110799590941847,
"learning_rate": 9.93222586272637e-06,
"loss": 1.0377,
"step": 28
},
{
"epoch": 0.08656716417910448,
"grad_norm": 2.0583073599588473,
"learning_rate": 9.924038765061042e-06,
"loss": 1.0139,
"step": 29
},
{
"epoch": 0.08955223880597014,
"grad_norm": 2.1723065119138196,
"learning_rate": 9.915388724114301e-06,
"loss": 0.9376,
"step": 30
},
{
"epoch": 0.09253731343283582,
"grad_norm": 2.064005981064233,
"learning_rate": 9.906276553136924e-06,
"loss": 1.0226,
"step": 31
},
{
"epoch": 0.0955223880597015,
"grad_norm": 2.122144912833922,
"learning_rate": 9.896703108827758e-06,
"loss": 0.9483,
"step": 32
},
{
"epoch": 0.09850746268656717,
"grad_norm": 2.1876975651489246,
"learning_rate": 9.886669291253178e-06,
"loss": 0.8942,
"step": 33
},
{
"epoch": 0.10149253731343283,
"grad_norm": 1.9606368757720636,
"learning_rate": 9.876176043762467e-06,
"loss": 0.885,
"step": 34
},
{
"epoch": 0.1044776119402985,
"grad_norm": 2.27926831852693,
"learning_rate": 9.86522435289912e-06,
"loss": 0.9491,
"step": 35
},
{
"epoch": 0.10746268656716418,
"grad_norm": 2.030550748336965,
"learning_rate": 9.853815248308101e-06,
"loss": 0.982,
"step": 36
},
{
"epoch": 0.11044776119402985,
"grad_norm": 2.0075365017841995,
"learning_rate": 9.841949802639031e-06,
"loss": 0.9843,
"step": 37
},
{
"epoch": 0.11343283582089553,
"grad_norm": 2.459727689278767,
"learning_rate": 9.829629131445342e-06,
"loss": 0.8796,
"step": 38
},
{
"epoch": 0.11641791044776119,
"grad_norm": 1.980360220017672,
"learning_rate": 9.816854393079402e-06,
"loss": 0.8793,
"step": 39
},
{
"epoch": 0.11940298507462686,
"grad_norm": 2.0803429090292433,
"learning_rate": 9.803626788583603e-06,
"loss": 0.8749,
"step": 40
},
{
"epoch": 0.12238805970149254,
"grad_norm": 2.0518013006773446,
"learning_rate": 9.789947561577445e-06,
"loss": 0.9123,
"step": 41
},
{
"epoch": 0.1253731343283582,
"grad_norm": 2.0868313477992695,
"learning_rate": 9.775817998140615e-06,
"loss": 0.884,
"step": 42
},
{
"epoch": 0.12835820895522387,
"grad_norm": 2.133757511141339,
"learning_rate": 9.761239426692077e-06,
"loss": 0.8846,
"step": 43
},
{
"epoch": 0.13134328358208955,
"grad_norm": 2.5042273900185332,
"learning_rate": 9.74621321786517e-06,
"loss": 0.9755,
"step": 44
},
{
"epoch": 0.13432835820895522,
"grad_norm": 1.9478417312433638,
"learning_rate": 9.730740784378755e-06,
"loss": 0.8857,
"step": 45
},
{
"epoch": 0.1373134328358209,
"grad_norm": 1.9676701142325428,
"learning_rate": 9.71482358090438e-06,
"loss": 0.8642,
"step": 46
},
{
"epoch": 0.14029850746268657,
"grad_norm": 1.920433325570768,
"learning_rate": 9.698463103929542e-06,
"loss": 0.8977,
"step": 47
},
{
"epoch": 0.14328358208955225,
"grad_norm": 1.9460499143315086,
"learning_rate": 9.681660891616967e-06,
"loss": 0.902,
"step": 48
},
{
"epoch": 0.14626865671641792,
"grad_norm": 1.968155074678708,
"learning_rate": 9.664418523660004e-06,
"loss": 0.9046,
"step": 49
},
{
"epoch": 0.14925373134328357,
"grad_norm": 1.9490820429685933,
"learning_rate": 9.646737621134112e-06,
"loss": 0.943,
"step": 50
},
{
"epoch": 0.15223880597014924,
"grad_norm": 2.08429080101051,
"learning_rate": 9.628619846344453e-06,
"loss": 0.9337,
"step": 51
},
{
"epoch": 0.15522388059701492,
"grad_norm": 1.930901797857114,
"learning_rate": 9.610066902669593e-06,
"loss": 0.8928,
"step": 52
},
{
"epoch": 0.1582089552238806,
"grad_norm": 1.9013217162350948,
"learning_rate": 9.591080534401371e-06,
"loss": 0.8905,
"step": 53
},
{
"epoch": 0.16119402985074627,
"grad_norm": 1.9982220077949842,
"learning_rate": 9.571662526580898e-06,
"loss": 0.8843,
"step": 54
},
{
"epoch": 0.16417910447761194,
"grad_norm": 1.9851369521318882,
"learning_rate": 9.551814704830734e-06,
"loss": 0.935,
"step": 55
},
{
"epoch": 0.16716417910447762,
"grad_norm": 2.1245353835937437,
"learning_rate": 9.531538935183252e-06,
"loss": 0.8666,
"step": 56
},
{
"epoch": 0.1701492537313433,
"grad_norm": 2.0723319083497245,
"learning_rate": 9.51083712390519e-06,
"loss": 0.9155,
"step": 57
},
{
"epoch": 0.17313432835820897,
"grad_norm": 1.7064471958774803,
"learning_rate": 9.48971121731844e-06,
"loss": 0.7976,
"step": 58
},
{
"epoch": 0.1761194029850746,
"grad_norm": 2.205553839755198,
"learning_rate": 9.468163201617063e-06,
"loss": 0.8575,
"step": 59
},
{
"epoch": 0.1791044776119403,
"grad_norm": 2.123341193411675,
"learning_rate": 9.446195102680531e-06,
"loss": 0.8779,
"step": 60
},
{
"epoch": 0.18208955223880596,
"grad_norm": 2.019140032242156,
"learning_rate": 9.423808985883289e-06,
"loss": 0.8087,
"step": 61
},
{
"epoch": 0.18507462686567164,
"grad_norm": 1.9693330173987202,
"learning_rate": 9.401006955900555e-06,
"loss": 0.8838,
"step": 62
},
{
"epoch": 0.1880597014925373,
"grad_norm": 1.7933631080982435,
"learning_rate": 9.377791156510456e-06,
"loss": 0.8881,
"step": 63
},
{
"epoch": 0.191044776119403,
"grad_norm": 2.064611639976522,
"learning_rate": 9.35416377039246e-06,
"loss": 0.9213,
"step": 64
},
{
"epoch": 0.19402985074626866,
"grad_norm": 2.108631144160596,
"learning_rate": 9.330127018922195e-06,
"loss": 0.8718,
"step": 65
},
{
"epoch": 0.19701492537313434,
"grad_norm": 2.0324645592765624,
"learning_rate": 9.305683161962569e-06,
"loss": 0.9008,
"step": 66
},
{
"epoch": 0.2,
"grad_norm": 2.433917861345567,
"learning_rate": 9.280834497651334e-06,
"loss": 0.8843,
"step": 67
},
{
"epoch": 0.20298507462686566,
"grad_norm": 2.1425405132153377,
"learning_rate": 9.255583362184998e-06,
"loss": 0.91,
"step": 68
}
],
"logging_steps": 1.0,
"max_steps": 335,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 17,
"total_flos": 5685247049728.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}