| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9977046671767407, | |
| "eval_steps": 100, | |
| "global_step": 163, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.030604437643458302, | |
| "grad_norm": 2.3956207575517907, | |
| "learning_rate": 5.882352941176471e-06, | |
| "loss": 1.0865, | |
| "mean_token_accuracy": 0.7095605345838285, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.061208875286916604, | |
| "grad_norm": 2.0418371914182054, | |
| "learning_rate": 1.1764705882352942e-05, | |
| "loss": 0.9585, | |
| "mean_token_accuracy": 0.7301618747737846, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.09181331293037491, | |
| "grad_norm": 1.11257855286638, | |
| "learning_rate": 1.7647058823529414e-05, | |
| "loss": 0.8486, | |
| "mean_token_accuracy": 0.7503171317905428, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.12241775057383321, | |
| "grad_norm": 0.8299724465639433, | |
| "learning_rate": 1.9979171608653926e-05, | |
| "loss": 0.8141, | |
| "mean_token_accuracy": 0.7560959624442517, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.1530221882172915, | |
| "grad_norm": 0.8916010345037251, | |
| "learning_rate": 1.9852201067560607e-05, | |
| "loss": 0.7739, | |
| "mean_token_accuracy": 0.765613780191612, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.18362662586074982, | |
| "grad_norm": 0.8550116014281596, | |
| "learning_rate": 1.961129783872301e-05, | |
| "loss": 0.7502, | |
| "mean_token_accuracy": 0.7706232570272338, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.21423106350420812, | |
| "grad_norm": 0.7246688121931725, | |
| "learning_rate": 1.92592477719385e-05, | |
| "loss": 0.7359, | |
| "mean_token_accuracy": 0.7731495622693539, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.24483550114766642, | |
| "grad_norm": 0.796594402129789, | |
| "learning_rate": 1.880012203973536e-05, | |
| "loss": 0.7416, | |
| "mean_token_accuracy": 0.7720866443990716, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.2754399387911247, | |
| "grad_norm": 0.6589999071952135, | |
| "learning_rate": 1.8239230057575542e-05, | |
| "loss": 0.7229, | |
| "mean_token_accuracy": 0.7758317882296452, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.306044376434583, | |
| "grad_norm": 0.6670732927253004, | |
| "learning_rate": 1.7583058084785626e-05, | |
| "loss": 0.7134, | |
| "mean_token_accuracy": 0.7779923586639284, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.3366488140780413, | |
| "grad_norm": 0.6402899859627691, | |
| "learning_rate": 1.683919421624611e-05, | |
| "loss": 0.7078, | |
| "mean_token_accuracy": 0.7796091069874582, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.36725325172149964, | |
| "grad_norm": 0.6682592985735738, | |
| "learning_rate": 1.6016240632249224e-05, | |
| "loss": 0.6944, | |
| "mean_token_accuracy": 0.7828741558176683, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.3978576893649579, | |
| "grad_norm": 0.5747503915274951, | |
| "learning_rate": 1.512371412128424e-05, | |
| "loss": 0.6828, | |
| "mean_token_accuracy": 0.7866091268932821, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.42846212700841624, | |
| "grad_norm": 0.6017366721008394, | |
| "learning_rate": 1.417193602612317e-05, | |
| "loss": 0.6833, | |
| "mean_token_accuracy": 0.7854777704763246, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.4590665646518745, | |
| "grad_norm": 0.6909656637620125, | |
| "learning_rate": 1.3171912885891063e-05, | |
| "loss": 0.6867, | |
| "mean_token_accuracy": 0.7841435922488781, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.48967100229533284, | |
| "grad_norm": 0.5680972717743182, | |
| "learning_rate": 1.2135209154397962e-05, | |
| "loss": 0.6704, | |
| "mean_token_accuracy": 0.7890687545099153, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.5202754399387911, | |
| "grad_norm": 0.5077512809088303, | |
| "learning_rate": 1.1073813466641633e-05, | |
| "loss": 0.6871, | |
| "mean_token_accuracy": 0.7840379169126969, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.5508798775822494, | |
| "grad_norm": 0.536075320882483, | |
| "learning_rate": 1e-05, | |
| "loss": 0.671, | |
| "mean_token_accuracy": 0.7886130547081162, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.5814843152257078, | |
| "grad_norm": 0.4904573809215121, | |
| "learning_rate": 8.92618653335837e-06, | |
| "loss": 0.6777, | |
| "mean_token_accuracy": 0.7871433307114094, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.612088752869166, | |
| "grad_norm": 0.55059789821494, | |
| "learning_rate": 7.86479084560204e-06, | |
| "loss": 0.6751, | |
| "mean_token_accuracy": 0.7871067609279663, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.612088752869166, | |
| "eval_loss": 0.6865678429603577, | |
| "eval_mean_token_accuracy": 0.7828014183630538, | |
| "eval_runtime": 3.2986, | |
| "eval_samples_per_second": 37.895, | |
| "eval_steps_per_second": 1.213, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.6426931905126243, | |
| "grad_norm": 0.5176104318935184, | |
| "learning_rate": 6.8280871141089415e-06, | |
| "loss": 0.673, | |
| "mean_token_accuracy": 0.7875675071968407, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.6732976281560826, | |
| "grad_norm": 0.49658972838719107, | |
| "learning_rate": 5.828063973876834e-06, | |
| "loss": 0.669, | |
| "mean_token_accuracy": 0.7885898620912861, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.703902065799541, | |
| "grad_norm": 0.48151170624177525, | |
| "learning_rate": 4.876285878715764e-06, | |
| "loss": 0.6591, | |
| "mean_token_accuracy": 0.7915397758950073, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.7345065034429993, | |
| "grad_norm": 0.4786459845507803, | |
| "learning_rate": 3.983759367750772e-06, | |
| "loss": 0.6553, | |
| "mean_token_accuracy": 0.792250694192391, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.7651109410864575, | |
| "grad_norm": 0.4512983509051795, | |
| "learning_rate": 3.1608057837538976e-06, | |
| "loss": 0.6708, | |
| "mean_token_accuracy": 0.7879882868675088, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.7957153787299158, | |
| "grad_norm": 0.42751771255008625, | |
| "learning_rate": 2.416941915214377e-06, | |
| "loss": 0.6557, | |
| "mean_token_accuracy": 0.7919469636759972, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.8263198163733741, | |
| "grad_norm": 0.45008874478679334, | |
| "learning_rate": 1.7607699424244583e-06, | |
| "loss": 0.6589, | |
| "mean_token_accuracy": 0.7912565925036045, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.8569242540168325, | |
| "grad_norm": 0.43122456587499, | |
| "learning_rate": 1.1998779602646438e-06, | |
| "loss": 0.6519, | |
| "mean_token_accuracy": 0.7932468473712841, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.8875286916602907, | |
| "grad_norm": 0.41656433519146435, | |
| "learning_rate": 7.40752228061502e-07, | |
| "loss": 0.6549, | |
| "mean_token_accuracy": 0.7921480402911436, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.918133129303749, | |
| "grad_norm": 0.4259733563062634, | |
| "learning_rate": 3.887021612769937e-07, | |
| "loss": 0.662, | |
| "mean_token_accuracy": 0.7900667175944671, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.9487375669472073, | |
| "grad_norm": 0.426432499566953, | |
| "learning_rate": 1.4779893243939358e-07, | |
| "loss": 0.657, | |
| "mean_token_accuracy": 0.7918590477702575, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.9793420045906657, | |
| "grad_norm": 0.4327175151190677, | |
| "learning_rate": 2.082839134607828e-08, | |
| "loss": 0.6544, | |
| "mean_token_accuracy": 0.7918795329068727, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.9977046671767407, | |
| "mean_token_accuracy": 0.792194867744806, | |
| "step": 163, | |
| "total_flos": 136410845675520.0, | |
| "train_loss": 0.7151586380472944, | |
| "train_runtime": 1873.0311, | |
| "train_samples_per_second": 11.163, | |
| "train_steps_per_second": 0.087 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 163, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": false, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 136410845675520.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |