| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 201, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.07462686567164178, | |
| "grad_norm": 0.555541455745697, | |
| "learning_rate": 7.058823529411765e-06, | |
| "loss": 1.9119, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.14925373134328357, | |
| "grad_norm": 0.6075835227966309, | |
| "learning_rate": 1.5882352941176473e-05, | |
| "loss": 1.8409, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.22388059701492538, | |
| "grad_norm": 0.5370075702667236, | |
| "learning_rate": 2.4705882352941174e-05, | |
| "loss": 1.8208, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.29850746268656714, | |
| "grad_norm": 0.4811812937259674, | |
| "learning_rate": 2.9997072124327365e-05, | |
| "loss": 1.7775, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.373134328358209, | |
| "grad_norm": 0.4486791491508484, | |
| "learning_rate": 2.9964146648174195e-05, | |
| "loss": 1.7374, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.44776119402985076, | |
| "grad_norm": 0.46206820011138916, | |
| "learning_rate": 2.9894716440202756e-05, | |
| "loss": 1.6671, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.5223880597014925, | |
| "grad_norm": 0.4804733991622925, | |
| "learning_rate": 2.978895087399522e-05, | |
| "loss": 1.6429, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.5970149253731343, | |
| "grad_norm": 0.5382269620895386, | |
| "learning_rate": 2.9647107962502205e-05, | |
| "loss": 1.5443, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.6716417910447762, | |
| "grad_norm": 0.5486980080604553, | |
| "learning_rate": 2.946953372862538e-05, | |
| "loss": 1.5278, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.746268656716418, | |
| "grad_norm": 0.5177947282791138, | |
| "learning_rate": 2.9256661361101666e-05, | |
| "loss": 1.4649, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.8208955223880597, | |
| "grad_norm": 0.6433851718902588, | |
| "learning_rate": 2.9009010157748082e-05, | |
| "loss": 1.4577, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.8955223880597015, | |
| "grad_norm": 0.6647917032241821, | |
| "learning_rate": 2.8727184258645276e-05, | |
| "loss": 1.4189, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.9701492537313433, | |
| "grad_norm": 0.7281741499900818, | |
| "learning_rate": 2.841187117235008e-05, | |
| "loss": 1.364, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 1.044776119402985, | |
| "grad_norm": 0.8785673975944519, | |
| "learning_rate": 2.8063840098732322e-05, | |
| "loss": 1.2399, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.1194029850746268, | |
| "grad_norm": 1.0373804569244385, | |
| "learning_rate": 2.768394005252739e-05, | |
| "loss": 1.2203, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 1.1940298507462686, | |
| "grad_norm": 0.9147239923477173, | |
| "learning_rate": 2.7273097792182038e-05, | |
| "loss": 1.1517, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.2686567164179103, | |
| "grad_norm": 1.0866647958755493, | |
| "learning_rate": 2.6832315559045938e-05, | |
| "loss": 1.1163, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.3432835820895521, | |
| "grad_norm": 1.0413241386413574, | |
| "learning_rate": 2.6362668632424302e-05, | |
| "loss": 1.0881, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.417910447761194, | |
| "grad_norm": 1.0607777833938599, | |
| "learning_rate": 2.586530270645584e-05, | |
| "loss": 1.0298, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.4925373134328357, | |
| "grad_norm": 1.1334776878356934, | |
| "learning_rate": 2.534143109521518e-05, | |
| "loss": 0.9926, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.5671641791044775, | |
| "grad_norm": 1.3326873779296875, | |
| "learning_rate": 2.4792331772857826e-05, | |
| "loss": 0.9665, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.6417910447761193, | |
| "grad_norm": 1.3480701446533203, | |
| "learning_rate": 2.421934425602816e-05, | |
| "loss": 0.8914, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.716417910447761, | |
| "grad_norm": 1.232779622077942, | |
| "learning_rate": 2.3623866336135806e-05, | |
| "loss": 0.8639, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 1.7910447761194028, | |
| "grad_norm": 1.363123893737793, | |
| "learning_rate": 2.3007350669471866e-05, | |
| "loss": 0.8752, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.8656716417910446, | |
| "grad_norm": 1.403874158859253, | |
| "learning_rate": 2.237130123348338e-05, | |
| "loss": 0.8342, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.9402985074626866, | |
| "grad_norm": 1.5426722764968872, | |
| "learning_rate": 2.171726965785095e-05, | |
| "loss": 0.8016, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.014925373134328, | |
| "grad_norm": 2.370891571044922, | |
| "learning_rate": 2.1046851439319587e-05, | |
| "loss": 0.7699, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 2.08955223880597, | |
| "grad_norm": 1.4926725625991821, | |
| "learning_rate": 2.0361682049516837e-05, | |
| "loss": 0.7015, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.1641791044776117, | |
| "grad_norm": 1.7558742761611938, | |
| "learning_rate": 1.966343294525297e-05, | |
| "loss": 0.6799, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 2.2388059701492535, | |
| "grad_norm": 2.0960466861724854, | |
| "learning_rate": 1.8953807491036015e-05, | |
| "loss": 0.6271, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.3134328358208958, | |
| "grad_norm": 1.3451811075210571, | |
| "learning_rate": 1.8234536803748657e-05, | |
| "loss": 0.6382, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 2.388059701492537, | |
| "grad_norm": 1.6828371286392212, | |
| "learning_rate": 1.7507375529623748e-05, | |
| "loss": 0.5823, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.4626865671641793, | |
| "grad_norm": 1.7383309602737427, | |
| "learning_rate": 1.6774097563820486e-05, | |
| "loss": 0.6091, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 2.5373134328358207, | |
| "grad_norm": 1.7802586555480957, | |
| "learning_rate": 1.603649172304317e-05, | |
| "loss": 0.5538, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 2.611940298507463, | |
| "grad_norm": 1.7985855340957642, | |
| "learning_rate": 1.5296357381759197e-05, | |
| "loss": 0.5826, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 2.6865671641791042, | |
| "grad_norm": 1.6440534591674805, | |
| "learning_rate": 1.4555500082661603e-05, | |
| "loss": 0.5603, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.7611940298507465, | |
| "grad_norm": 1.6013951301574707, | |
| "learning_rate": 1.3815727132084322e-05, | |
| "loss": 0.5432, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 2.835820895522388, | |
| "grad_norm": 1.7982008457183838, | |
| "learning_rate": 1.3078843191115099e-05, | |
| "loss": 0.5615, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 2.91044776119403, | |
| "grad_norm": 1.8606013059616089, | |
| "learning_rate": 1.234664587316141e-05, | |
| "loss": 0.5094, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 2.9850746268656714, | |
| "grad_norm": 1.781867265701294, | |
| "learning_rate": 1.1620921358709076e-05, | |
| "loss": 0.5122, | |
| "step": 200 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 335, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 2000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4.415390346033234e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |