| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 150, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.10101010101010101, | |
| "grad_norm": 0.7899569869041443, | |
| "learning_rate": 9.230769230769232e-06, | |
| "loss": 1.8635, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.20202020202020202, | |
| "grad_norm": 0.5808775424957275, | |
| "learning_rate": 2.076923076923077e-05, | |
| "loss": 1.8714, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.30303030303030304, | |
| "grad_norm": 0.5451018214225769, | |
| "learning_rate": 2.9998682174346518e-05, | |
| "loss": 1.8024, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.40404040404040403, | |
| "grad_norm": 0.5068315267562866, | |
| "learning_rate": 2.995258258522044e-05, | |
| "loss": 1.7618, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.5050505050505051, | |
| "grad_norm": 0.5028562545776367, | |
| "learning_rate": 2.9840823085861047e-05, | |
| "loss": 1.6499, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.6060606060606061, | |
| "grad_norm": 0.49116823077201843, | |
| "learning_rate": 2.9663894435550477e-05, | |
| "loss": 1.5963, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.7070707070707071, | |
| "grad_norm": 0.5087969303131104, | |
| "learning_rate": 2.9422573564911305e-05, | |
| "loss": 1.5512, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.8080808080808081, | |
| "grad_norm": 0.5537960529327393, | |
| "learning_rate": 2.911792016424208e-05, | |
| "loss": 1.4422, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.9090909090909091, | |
| "grad_norm": 0.6070820093154907, | |
| "learning_rate": 2.8751272030196054e-05, | |
| "loss": 1.4552, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.8924161791801453, | |
| "learning_rate": 2.832423919123698e-05, | |
| "loss": 1.3777, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.101010101010101, | |
| "grad_norm": 0.8165251612663269, | |
| "learning_rate": 2.7838696837668128e-05, | |
| "loss": 1.2406, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 1.202020202020202, | |
| "grad_norm": 0.8177559971809387, | |
| "learning_rate": 2.7296777087280396e-05, | |
| "loss": 1.1993, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.303030303030303, | |
| "grad_norm": 0.9653021097183228, | |
| "learning_rate": 2.6700859622778184e-05, | |
| "loss": 1.1785, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 1.404040404040404, | |
| "grad_norm": 0.9458374381065369, | |
| "learning_rate": 2.605356124209607e-05, | |
| "loss": 1.0942, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.5050505050505052, | |
| "grad_norm": 1.3656435012817383, | |
| "learning_rate": 2.5357724367493052e-05, | |
| "loss": 1.0857, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 1.606060606060606, | |
| "grad_norm": 1.0354753732681274, | |
| "learning_rate": 2.4616404563883302e-05, | |
| "loss": 0.9707, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.7070707070707072, | |
| "grad_norm": 1.0912060737609863, | |
| "learning_rate": 2.3832857121212992e-05, | |
| "loss": 0.9663, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.808080808080808, | |
| "grad_norm": 1.1176700592041016, | |
| "learning_rate": 2.3010522759802922e-05, | |
| "loss": 0.8917, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.9090909090909092, | |
| "grad_norm": 1.512507438659668, | |
| "learning_rate": 2.2153012521427593e-05, | |
| "loss": 0.9028, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 1.6655423641204834, | |
| "learning_rate": 2.1264091912477285e-05, | |
| "loss": 0.8271, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 2.101010101010101, | |
| "grad_norm": 1.374399185180664, | |
| "learning_rate": 2.0347664368833765e-05, | |
| "loss": 0.7512, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 2.202020202020202, | |
| "grad_norm": 1.506851077079773, | |
| "learning_rate": 1.9407754115068814e-05, | |
| "loss": 0.6869, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 2.303030303030303, | |
| "grad_norm": 1.575244665145874, | |
| "learning_rate": 1.8448488493234402e-05, | |
| "loss": 0.6779, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 2.404040404040404, | |
| "grad_norm": 1.4995136260986328, | |
| "learning_rate": 1.7474079838842513e-05, | |
| "loss": 0.665, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 2.505050505050505, | |
| "grad_norm": 1.683546781539917, | |
| "learning_rate": 1.6488806983620927e-05, | |
| "loss": 0.6238, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 2.606060606060606, | |
| "grad_norm": 1.7205966711044312, | |
| "learning_rate": 1.5496996466270265e-05, | |
| "loss": 0.5835, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.707070707070707, | |
| "grad_norm": 1.4404793977737427, | |
| "learning_rate": 1.450300353372974e-05, | |
| "loss": 0.5943, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 2.808080808080808, | |
| "grad_norm": 1.5309523344039917, | |
| "learning_rate": 1.3511193016379079e-05, | |
| "loss": 0.595, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.909090909090909, | |
| "grad_norm": 1.3346279859542847, | |
| "learning_rate": 1.2525920161157491e-05, | |
| "loss": 0.5422, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 2.2044155597686768, | |
| "learning_rate": 1.1551511506765599e-05, | |
| "loss": 0.5507, | |
| "step": 150 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 250, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 2000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.097466740985037e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |