| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.05959475566150179, | |
| "eval_steps": 50, | |
| "global_step": 50, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0011918951132300357, | |
| "grad_norm": 177782670.4326833, | |
| "learning_rate": 2e-05, | |
| "loss": 2.136, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.0023837902264600714, | |
| "grad_norm": 8.95409622744013, | |
| "learning_rate": 1.9979453927503366e-05, | |
| "loss": 2.1457, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.003575685339690107, | |
| "grad_norm": 3.4343548487673266, | |
| "learning_rate": 1.991790013823246e-05, | |
| "loss": 2.0768, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.004767580452920143, | |
| "grad_norm": 6.262035692357495, | |
| "learning_rate": 1.9815591569910654e-05, | |
| "loss": 2.1149, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.0059594755661501785, | |
| "grad_norm": 3.6066762749504586, | |
| "learning_rate": 1.9672948630390296e-05, | |
| "loss": 2.058, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.007151370679380214, | |
| "grad_norm": 1.6607457646380352, | |
| "learning_rate": 1.949055747010669e-05, | |
| "loss": 2.0418, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.00834326579261025, | |
| "grad_norm": 1.1060259280766658, | |
| "learning_rate": 1.926916757346022e-05, | |
| "loss": 2.0053, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.009535160905840286, | |
| "grad_norm": 1.123824033082117, | |
| "learning_rate": 1.900968867902419e-05, | |
| "loss": 1.9669, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.010727056019070322, | |
| "grad_norm": 0.8925102181405017, | |
| "learning_rate": 1.8713187041233896e-05, | |
| "loss": 1.9809, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.011918951132300357, | |
| "grad_norm": 0.5370556344286816, | |
| "learning_rate": 1.8380881048918406e-05, | |
| "loss": 1.9545, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.013110846245530394, | |
| "grad_norm": 0.8591076931477116, | |
| "learning_rate": 1.8014136218679566e-05, | |
| "loss": 1.9587, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.014302741358760428, | |
| "grad_norm": 0.7518926066158684, | |
| "learning_rate": 1.7614459583691346e-05, | |
| "loss": 1.9442, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.015494636471990465, | |
| "grad_norm": 0.4754532185659767, | |
| "learning_rate": 1.7183493500977277e-05, | |
| "loss": 1.9315, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.0166865315852205, | |
| "grad_norm": 0.5739699684266744, | |
| "learning_rate": 1.672300890261317e-05, | |
| "loss": 1.9354, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.017878426698450536, | |
| "grad_norm": 0.6661639036007014, | |
| "learning_rate": 1.6234898018587336e-05, | |
| "loss": 1.9424, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.01907032181168057, | |
| "grad_norm": 0.5505191930099311, | |
| "learning_rate": 1.5721166601221697e-05, | |
| "loss": 1.9213, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.02026221692491061, | |
| "grad_norm": 0.3672191630130131, | |
| "learning_rate": 1.5183925683105254e-05, | |
| "loss": 1.9377, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.021454112038140644, | |
| "grad_norm": 0.4558866292924925, | |
| "learning_rate": 1.4625382902408356e-05, | |
| "loss": 1.9303, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.02264600715137068, | |
| "grad_norm": 0.4785937312948391, | |
| "learning_rate": 1.4047833431223938e-05, | |
| "loss": 1.9428, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.023837902264600714, | |
| "grad_norm": 0.343635118158467, | |
| "learning_rate": 1.3453650544213078e-05, | |
| "loss": 1.9512, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.025029797377830752, | |
| "grad_norm": 0.27986353875534126, | |
| "learning_rate": 1.2845275866310325e-05, | |
| "loss": 1.9116, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.026221692491060787, | |
| "grad_norm": 0.3507652782068489, | |
| "learning_rate": 1.2225209339563144e-05, | |
| "loss": 1.9196, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.027413587604290822, | |
| "grad_norm": 0.41481744836140944, | |
| "learning_rate": 1.1595998950333794e-05, | |
| "loss": 1.9354, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.028605482717520857, | |
| "grad_norm": 0.3457895319852949, | |
| "learning_rate": 1.0960230259076819e-05, | |
| "loss": 1.9264, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.029797377830750895, | |
| "grad_norm": 0.27419676814468613, | |
| "learning_rate": 1.0320515775716556e-05, | |
| "loss": 1.9333, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.03098927294398093, | |
| "grad_norm": 0.2462847195962197, | |
| "learning_rate": 9.67948422428345e-06, | |
| "loss": 1.9329, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.03218116805721097, | |
| "grad_norm": 0.2879155277883314, | |
| "learning_rate": 9.039769740923183e-06, | |
| "loss": 1.9355, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.033373063170441, | |
| "grad_norm": 0.2851897532268947, | |
| "learning_rate": 8.404001049666211e-06, | |
| "loss": 1.9412, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.03456495828367104, | |
| "grad_norm": 0.23668999310579258, | |
| "learning_rate": 7.774790660436857e-06, | |
| "loss": 1.9088, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.03575685339690107, | |
| "grad_norm": 0.20150753286470247, | |
| "learning_rate": 7.154724133689677e-06, | |
| "loss": 1.8864, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.03694874851013111, | |
| "grad_norm": 0.19732527709920555, | |
| "learning_rate": 6.546349455786926e-06, | |
| "loss": 1.9214, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.03814064362336114, | |
| "grad_norm": 0.20799892741645987, | |
| "learning_rate": 5.952166568776062e-06, | |
| "loss": 1.9352, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.03933253873659118, | |
| "grad_norm": 0.22149899694065844, | |
| "learning_rate": 5.37461709759165e-06, | |
| "loss": 1.9105, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.04052443384982122, | |
| "grad_norm": 0.2116500916920916, | |
| "learning_rate": 4.81607431689475e-06, | |
| "loss": 1.9485, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.041716328963051254, | |
| "grad_norm": 0.1892174172626481, | |
| "learning_rate": 4.278833398778306e-06, | |
| "loss": 1.9311, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.04290822407628129, | |
| "grad_norm": 0.18247102552206337, | |
| "learning_rate": 3.7651019814126656e-06, | |
| "loss": 1.9581, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.04410011918951132, | |
| "grad_norm": 0.17834023507786856, | |
| "learning_rate": 3.2769910973868314e-06, | |
| "loss": 1.9185, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.04529201430274136, | |
| "grad_norm": 0.17663777171439146, | |
| "learning_rate": 2.8165064990227255e-06, | |
| "loss": 1.9363, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.04648390941597139, | |
| "grad_norm": 0.17672059688599515, | |
| "learning_rate": 2.3855404163086558e-06, | |
| "loss": 1.9329, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.04767580452920143, | |
| "grad_norm": 0.17340671604131572, | |
| "learning_rate": 1.9858637813204352e-06, | |
| "loss": 1.8915, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.04886769964243146, | |
| "grad_norm": 0.18391040636248215, | |
| "learning_rate": 1.6191189510815942e-06, | |
| "loss": 1.9172, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.050059594755661505, | |
| "grad_norm": 0.16529400970579494, | |
| "learning_rate": 1.286812958766106e-06, | |
| "loss": 1.8757, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.05125148986889154, | |
| "grad_norm": 0.16212347626037402, | |
| "learning_rate": 9.903113209758098e-07, | |
| "loss": 1.9082, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.052443384982121574, | |
| "grad_norm": 0.1578390908067507, | |
| "learning_rate": 7.308324265397837e-07, | |
| "loss": 1.9252, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.05363528009535161, | |
| "grad_norm": 0.1585342280034931, | |
| "learning_rate": 5.094425298933136e-07, | |
| "loss": 1.9208, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.054827175208581644, | |
| "grad_norm": 0.15955775315502635, | |
| "learning_rate": 3.2705136960970554e-07, | |
| "loss": 1.9389, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.05601907032181168, | |
| "grad_norm": 0.16110050987254387, | |
| "learning_rate": 1.844084300893456e-07, | |
| "loss": 1.9164, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.057210965435041714, | |
| "grad_norm": 0.1536329478387249, | |
| "learning_rate": 8.209986176753947e-08, | |
| "loss": 1.92, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.058402860548271755, | |
| "grad_norm": 0.16199777981854935, | |
| "learning_rate": 2.054607249663665e-08, | |
| "loss": 1.9132, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.05959475566150179, | |
| "grad_norm": 0.1591368984467094, | |
| "learning_rate": 0.0, | |
| "loss": 1.9537, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.05959475566150179, | |
| "step": 50, | |
| "total_flos": 106890730143744.0, | |
| "train_loss": 1.951620864868164, | |
| "train_runtime": 1599.1299, | |
| "train_samples_per_second": 58.032, | |
| "train_steps_per_second": 0.031 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 50, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 106890730143744.0, | |
| "train_batch_size": 58, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |