| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 201, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.07462686567164178, | |
| "grad_norm": 0.6940034031867981, | |
| "learning_rate": 7.058823529411765e-06, | |
| "loss": 1.9269, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.14925373134328357, | |
| "grad_norm": 0.6618649959564209, | |
| "learning_rate": 1.5882352941176473e-05, | |
| "loss": 1.8649, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.22388059701492538, | |
| "grad_norm": 0.46657705307006836, | |
| "learning_rate": 2.4705882352941174e-05, | |
| "loss": 1.8309, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.29850746268656714, | |
| "grad_norm": 0.48200997710227966, | |
| "learning_rate": 2.9997072124327365e-05, | |
| "loss": 1.7657, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.373134328358209, | |
| "grad_norm": 0.46101176738739014, | |
| "learning_rate": 2.9964146648174195e-05, | |
| "loss": 1.6862, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.44776119402985076, | |
| "grad_norm": 0.5005717873573303, | |
| "learning_rate": 2.9894716440202756e-05, | |
| "loss": 1.6617, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.5223880597014925, | |
| "grad_norm": 0.46145084500312805, | |
| "learning_rate": 2.978895087399522e-05, | |
| "loss": 1.6202, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.5970149253731343, | |
| "grad_norm": 0.5659571886062622, | |
| "learning_rate": 2.9647107962502205e-05, | |
| "loss": 1.6208, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.6716417910447762, | |
| "grad_norm": 0.5763270258903503, | |
| "learning_rate": 2.946953372862538e-05, | |
| "loss": 1.5594, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.746268656716418, | |
| "grad_norm": 0.5741795301437378, | |
| "learning_rate": 2.9256661361101666e-05, | |
| "loss": 1.5137, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.8208955223880597, | |
| "grad_norm": 0.6271291971206665, | |
| "learning_rate": 2.9009010157748082e-05, | |
| "loss": 1.4261, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.8955223880597015, | |
| "grad_norm": 0.7769139409065247, | |
| "learning_rate": 2.8727184258645276e-05, | |
| "loss": 1.3797, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.9701492537313433, | |
| "grad_norm": 0.7528422474861145, | |
| "learning_rate": 2.841187117235008e-05, | |
| "loss": 1.309, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 1.044776119402985, | |
| "grad_norm": 1.1243112087249756, | |
| "learning_rate": 2.8063840098732322e-05, | |
| "loss": 1.2633, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.1194029850746268, | |
| "grad_norm": 1.0037925243377686, | |
| "learning_rate": 2.768394005252739e-05, | |
| "loss": 1.1682, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 1.1940298507462686, | |
| "grad_norm": 0.8513566255569458, | |
| "learning_rate": 2.7273097792182038e-05, | |
| "loss": 1.1586, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.2686567164179103, | |
| "grad_norm": 0.923832356929779, | |
| "learning_rate": 2.6832315559045938e-05, | |
| "loss": 1.0996, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.3432835820895521, | |
| "grad_norm": 1.0570091009140015, | |
| "learning_rate": 2.6362668632424302e-05, | |
| "loss": 1.0817, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.417910447761194, | |
| "grad_norm": 1.1966323852539062, | |
| "learning_rate": 2.586530270645584e-05, | |
| "loss": 1.0424, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.4925373134328357, | |
| "grad_norm": 1.1580686569213867, | |
| "learning_rate": 2.534143109521518e-05, | |
| "loss": 1.0203, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.5671641791044775, | |
| "grad_norm": 1.1940172910690308, | |
| "learning_rate": 2.4792331772857826e-05, | |
| "loss": 0.9807, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.6417910447761193, | |
| "grad_norm": 1.1056792736053467, | |
| "learning_rate": 2.421934425602816e-05, | |
| "loss": 0.8966, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.716417910447761, | |
| "grad_norm": 1.3053609132766724, | |
| "learning_rate": 2.3623866336135806e-05, | |
| "loss": 0.8862, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 1.7910447761194028, | |
| "grad_norm": 1.788570761680603, | |
| "learning_rate": 2.3007350669471866e-05, | |
| "loss": 0.877, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.8656716417910446, | |
| "grad_norm": 1.2819461822509766, | |
| "learning_rate": 2.237130123348338e-05, | |
| "loss": 0.8625, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.9402985074626866, | |
| "grad_norm": 1.5122873783111572, | |
| "learning_rate": 2.171726965785095e-05, | |
| "loss": 0.8049, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.014925373134328, | |
| "grad_norm": 1.8568962812423706, | |
| "learning_rate": 2.1046851439319587e-05, | |
| "loss": 0.7773, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 2.08955223880597, | |
| "grad_norm": 1.5969818830490112, | |
| "learning_rate": 2.0361682049516837e-05, | |
| "loss": 0.6641, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.1641791044776117, | |
| "grad_norm": 1.5156569480895996, | |
| "learning_rate": 1.966343294525297e-05, | |
| "loss": 0.6781, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 2.2388059701492535, | |
| "grad_norm": 1.4276643991470337, | |
| "learning_rate": 1.8953807491036015e-05, | |
| "loss": 0.6448, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.3134328358208958, | |
| "grad_norm": 1.506522536277771, | |
| "learning_rate": 1.8234536803748657e-05, | |
| "loss": 0.6514, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 2.388059701492537, | |
| "grad_norm": 1.7856731414794922, | |
| "learning_rate": 1.7507375529623748e-05, | |
| "loss": 0.6337, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.4626865671641793, | |
| "grad_norm": 1.6359249353408813, | |
| "learning_rate": 1.6774097563820486e-05, | |
| "loss": 0.6236, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 2.5373134328358207, | |
| "grad_norm": 1.951614499092102, | |
| "learning_rate": 1.603649172304317e-05, | |
| "loss": 0.6058, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 2.611940298507463, | |
| "grad_norm": 1.7510719299316406, | |
| "learning_rate": 1.5296357381759197e-05, | |
| "loss": 0.5782, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 2.6865671641791042, | |
| "grad_norm": 2.2878072261810303, | |
| "learning_rate": 1.4555500082661603e-05, | |
| "loss": 0.5659, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.7611940298507465, | |
| "grad_norm": 2.4104154109954834, | |
| "learning_rate": 1.3815727132084322e-05, | |
| "loss": 0.58, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 2.835820895522388, | |
| "grad_norm": 1.8931833505630493, | |
| "learning_rate": 1.3078843191115099e-05, | |
| "loss": 0.5332, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 2.91044776119403, | |
| "grad_norm": 1.8558038473129272, | |
| "learning_rate": 1.234664587316141e-05, | |
| "loss": 0.5385, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 2.9850746268656714, | |
| "grad_norm": 1.8396011590957642, | |
| "learning_rate": 1.1620921358709076e-05, | |
| "loss": 0.5161, | |
| "step": 200 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 335, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 2000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4.5088951811178496e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |