| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 228, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "entropy": 0.3891168855130672, | |
| "epoch": 0.043859649122807015, | |
| "grad_norm": 18.536834716796875, | |
| "learning_rate": 2e-05, | |
| "loss": 4.281, | |
| "mean_token_accuracy": 0.5703265547752381, | |
| "num_tokens": 29513.0, | |
| "step": 10 | |
| }, | |
| { | |
| "entropy": 0.749189381301403, | |
| "epoch": 0.08771929824561403, | |
| "grad_norm": 3.8398051261901855, | |
| "learning_rate": 2e-05, | |
| "loss": 1.9625, | |
| "mean_token_accuracy": 0.6897866070270539, | |
| "num_tokens": 58932.0, | |
| "step": 20 | |
| }, | |
| { | |
| "entropy": 1.169826951622963, | |
| "epoch": 0.13157894736842105, | |
| "grad_norm": 2.5061917304992676, | |
| "learning_rate": 2e-05, | |
| "loss": 1.4255, | |
| "mean_token_accuracy": 0.7357227891683579, | |
| "num_tokens": 88352.0, | |
| "step": 30 | |
| }, | |
| { | |
| "entropy": 1.1334790736436844, | |
| "epoch": 0.17543859649122806, | |
| "grad_norm": 1.9473565816879272, | |
| "learning_rate": 2e-05, | |
| "loss": 1.1315, | |
| "mean_token_accuracy": 0.7862708762288093, | |
| "num_tokens": 117877.0, | |
| "step": 40 | |
| }, | |
| { | |
| "entropy": 0.8736076682806015, | |
| "epoch": 0.21929824561403508, | |
| "grad_norm": 1.7826578617095947, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8549, | |
| "mean_token_accuracy": 0.8343304082751274, | |
| "num_tokens": 147392.0, | |
| "step": 50 | |
| }, | |
| { | |
| "entropy": 0.5955807730555535, | |
| "epoch": 0.2631578947368421, | |
| "grad_norm": 1.6632131338119507, | |
| "learning_rate": 2e-05, | |
| "loss": 0.58, | |
| "mean_token_accuracy": 0.8876266479492188, | |
| "num_tokens": 176728.0, | |
| "step": 60 | |
| }, | |
| { | |
| "entropy": 0.34382262006402015, | |
| "epoch": 0.30701754385964913, | |
| "grad_norm": 1.4843252897262573, | |
| "learning_rate": 2e-05, | |
| "loss": 0.3445, | |
| "mean_token_accuracy": 0.9318736225366593, | |
| "num_tokens": 206184.0, | |
| "step": 70 | |
| }, | |
| { | |
| "entropy": 0.19373956136405468, | |
| "epoch": 0.3508771929824561, | |
| "grad_norm": 0.904003918170929, | |
| "learning_rate": 2e-05, | |
| "loss": 0.189, | |
| "mean_token_accuracy": 0.9710342198610306, | |
| "num_tokens": 235688.0, | |
| "step": 80 | |
| }, | |
| { | |
| "entropy": 0.12052652426064014, | |
| "epoch": 0.39473684210526316, | |
| "grad_norm": 0.7406700849533081, | |
| "learning_rate": 2e-05, | |
| "loss": 0.1056, | |
| "mean_token_accuracy": 0.9886028110980988, | |
| "num_tokens": 265172.0, | |
| "step": 90 | |
| }, | |
| { | |
| "entropy": 0.0992697212845087, | |
| "epoch": 0.43859649122807015, | |
| "grad_norm": 0.606019139289856, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0817, | |
| "mean_token_accuracy": 0.989163076877594, | |
| "num_tokens": 294687.0, | |
| "step": 100 | |
| }, | |
| { | |
| "entropy": 0.08367474023252726, | |
| "epoch": 0.4824561403508772, | |
| "grad_norm": 0.4705955386161804, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0737, | |
| "mean_token_accuracy": 0.9905414953827858, | |
| "num_tokens": 324088.0, | |
| "step": 110 | |
| }, | |
| { | |
| "entropy": 0.07386345528066159, | |
| "epoch": 0.5263157894736842, | |
| "grad_norm": 0.4300910234451294, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0631, | |
| "mean_token_accuracy": 0.9910139158368111, | |
| "num_tokens": 353582.0, | |
| "step": 120 | |
| }, | |
| { | |
| "entropy": 0.06888655042275786, | |
| "epoch": 0.5701754385964912, | |
| "grad_norm": 0.45082923769950867, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0661, | |
| "mean_token_accuracy": 0.9910464301705361, | |
| "num_tokens": 383080.0, | |
| "step": 130 | |
| }, | |
| { | |
| "entropy": 0.06553246006369591, | |
| "epoch": 0.6140350877192983, | |
| "grad_norm": 0.4258216619491577, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0615, | |
| "mean_token_accuracy": 0.9914803236722947, | |
| "num_tokens": 412559.0, | |
| "step": 140 | |
| }, | |
| { | |
| "entropy": 0.06352790119126439, | |
| "epoch": 0.6578947368421053, | |
| "grad_norm": 0.37360644340515137, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0579, | |
| "mean_token_accuracy": 0.9916400715708733, | |
| "num_tokens": 442008.0, | |
| "step": 150 | |
| }, | |
| { | |
| "entropy": 0.060972847137600185, | |
| "epoch": 0.7017543859649122, | |
| "grad_norm": 0.3406882584095001, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0576, | |
| "mean_token_accuracy": 0.9914845108985901, | |
| "num_tokens": 471495.0, | |
| "step": 160 | |
| }, | |
| { | |
| "entropy": 0.05891128294169903, | |
| "epoch": 0.7456140350877193, | |
| "grad_norm": 0.2854160964488983, | |
| "learning_rate": 2e-05, | |
| "loss": 0.057, | |
| "mean_token_accuracy": 0.9915445119142532, | |
| "num_tokens": 500954.0, | |
| "step": 170 | |
| }, | |
| { | |
| "entropy": 0.0579888011328876, | |
| "epoch": 0.7894736842105263, | |
| "grad_norm": 0.40168505907058716, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0507, | |
| "mean_token_accuracy": 0.9923512250185013, | |
| "num_tokens": 530401.0, | |
| "step": 180 | |
| }, | |
| { | |
| "entropy": 0.0573225624859333, | |
| "epoch": 0.8333333333333334, | |
| "grad_norm": 0.4589792490005493, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0561, | |
| "mean_token_accuracy": 0.9916056364774704, | |
| "num_tokens": 559940.0, | |
| "step": 190 | |
| }, | |
| { | |
| "entropy": 0.05970530286431312, | |
| "epoch": 0.8771929824561403, | |
| "grad_norm": 0.5275149941444397, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0573, | |
| "mean_token_accuracy": 0.9915869951248169, | |
| "num_tokens": 589427.0, | |
| "step": 200 | |
| }, | |
| { | |
| "entropy": 0.059507152158766986, | |
| "epoch": 0.9210526315789473, | |
| "grad_norm": 0.2661688029766083, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0515, | |
| "mean_token_accuracy": 0.9923170626163482, | |
| "num_tokens": 618838.0, | |
| "step": 210 | |
| }, | |
| { | |
| "entropy": 0.05687206219881773, | |
| "epoch": 0.9649122807017544, | |
| "grad_norm": 0.35179731249809265, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0506, | |
| "mean_token_accuracy": 0.9918064430356026, | |
| "num_tokens": 648258.0, | |
| "step": 220 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1824, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 8, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 419741502833664.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |