| { | |
| "best_global_step": 150, | |
| "best_metric": 0.34326156973838806, | |
| "best_model_checkpoint": "/content/output/checkpoint-150", | |
| "epoch": 5.0, | |
| "eval_steps": 500, | |
| "global_step": 375, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.13333333333333333, | |
| "grad_norm": 0.9156391620635986, | |
| "learning_rate": 1.7543859649122806e-05, | |
| "loss": 1.2171, | |
| "mean_token_accuracy": 0.7375583708286285, | |
| "num_tokens": 32413.0, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.26666666666666666, | |
| "grad_norm": 0.815642774105072, | |
| "learning_rate": 3.508771929824561e-05, | |
| "loss": 0.9632, | |
| "mean_token_accuracy": 0.7655525088310242, | |
| "num_tokens": 64714.0, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 0.39404281973838806, | |
| "learning_rate": 5.2631578947368424e-05, | |
| "loss": 0.5951, | |
| "mean_token_accuracy": 0.8334747165441513, | |
| "num_tokens": 96352.0, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.5333333333333333, | |
| "grad_norm": 0.41063451766967773, | |
| "learning_rate": 7.017543859649122e-05, | |
| "loss": 0.4305, | |
| "mean_token_accuracy": 0.8707121074199676, | |
| "num_tokens": 125717.0, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 0.3393928110599518, | |
| "learning_rate": 8.771929824561403e-05, | |
| "loss": 0.3874, | |
| "mean_token_accuracy": 0.8828857451677322, | |
| "num_tokens": 157446.0, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.2939395606517792, | |
| "learning_rate": 9.905660377358492e-05, | |
| "loss": 0.3613, | |
| "mean_token_accuracy": 0.8902347713708878, | |
| "num_tokens": 187947.0, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.9333333333333333, | |
| "grad_norm": 0.30780646204948425, | |
| "learning_rate": 9.59119496855346e-05, | |
| "loss": 0.3545, | |
| "mean_token_accuracy": 0.8908359378576278, | |
| "num_tokens": 219528.0, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.35836032032966614, | |
| "eval_mean_token_accuracy": 0.8900894594192504, | |
| "eval_num_tokens": 234547.0, | |
| "eval_runtime": 47.5564, | |
| "eval_samples_per_second": 2.103, | |
| "eval_steps_per_second": 0.526, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 1.0666666666666667, | |
| "grad_norm": 0.29364290833473206, | |
| "learning_rate": 9.276729559748428e-05, | |
| "loss": 0.3282, | |
| "mean_token_accuracy": 0.8956525355577469, | |
| "num_tokens": 249942.0, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "grad_norm": 0.3256814479827881, | |
| "learning_rate": 8.962264150943397e-05, | |
| "loss": 0.3033, | |
| "mean_token_accuracy": 0.9027387470006942, | |
| "num_tokens": 280660.0, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.3333333333333333, | |
| "grad_norm": 0.3860395848751068, | |
| "learning_rate": 8.647798742138365e-05, | |
| "loss": 0.3139, | |
| "mean_token_accuracy": 0.9029639422893524, | |
| "num_tokens": 312269.0, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.4666666666666668, | |
| "grad_norm": 0.35767263174057007, | |
| "learning_rate": 8.333333333333334e-05, | |
| "loss": 0.3188, | |
| "mean_token_accuracy": 0.9010519236326218, | |
| "num_tokens": 343451.0, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "grad_norm": 0.37099793553352356, | |
| "learning_rate": 8.018867924528302e-05, | |
| "loss": 0.3053, | |
| "mean_token_accuracy": 0.9043950855731964, | |
| "num_tokens": 375605.0, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.7333333333333334, | |
| "grad_norm": 0.42942672967910767, | |
| "learning_rate": 7.704402515723272e-05, | |
| "loss": 0.2819, | |
| "mean_token_accuracy": 0.9102458357810974, | |
| "num_tokens": 407072.0, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.8666666666666667, | |
| "grad_norm": 0.43511271476745605, | |
| "learning_rate": 7.389937106918238e-05, | |
| "loss": 0.2928, | |
| "mean_token_accuracy": 0.9080147057771683, | |
| "num_tokens": 439028.0, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.4053526818752289, | |
| "learning_rate": 7.075471698113208e-05, | |
| "loss": 0.2847, | |
| "mean_token_accuracy": 0.9118992000818252, | |
| "num_tokens": 469094.0, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.34326156973838806, | |
| "eval_mean_token_accuracy": 0.8937170529365539, | |
| "eval_num_tokens": 469094.0, | |
| "eval_runtime": 47.6074, | |
| "eval_samples_per_second": 2.101, | |
| "eval_steps_per_second": 0.525, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.1333333333333333, | |
| "grad_norm": 0.47702592611312866, | |
| "learning_rate": 6.761006289308176e-05, | |
| "loss": 0.2118, | |
| "mean_token_accuracy": 0.9336899906396866, | |
| "num_tokens": 500956.0, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.2666666666666666, | |
| "grad_norm": 0.5411467552185059, | |
| "learning_rate": 6.446540880503145e-05, | |
| "loss": 0.2049, | |
| "mean_token_accuracy": 0.9344588816165924, | |
| "num_tokens": 531097.0, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "grad_norm": 0.5084139704704285, | |
| "learning_rate": 6.132075471698113e-05, | |
| "loss": 0.218, | |
| "mean_token_accuracy": 0.930207297205925, | |
| "num_tokens": 562566.0, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.533333333333333, | |
| "grad_norm": 0.5238709449768066, | |
| "learning_rate": 5.817610062893082e-05, | |
| "loss": 0.2164, | |
| "mean_token_accuracy": 0.930533480644226, | |
| "num_tokens": 594889.0, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 2.6666666666666665, | |
| "grad_norm": 0.5833751559257507, | |
| "learning_rate": 5.503144654088051e-05, | |
| "loss": 0.212, | |
| "mean_token_accuracy": 0.9320303469896316, | |
| "num_tokens": 625913.0, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "grad_norm": 0.6267488598823547, | |
| "learning_rate": 5.188679245283019e-05, | |
| "loss": 0.1922, | |
| "mean_token_accuracy": 0.9386632025241852, | |
| "num_tokens": 658003.0, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 2.9333333333333336, | |
| "grad_norm": 0.5038567781448364, | |
| "learning_rate": 4.8742138364779875e-05, | |
| "loss": 0.2032, | |
| "mean_token_accuracy": 0.9358770161867142, | |
| "num_tokens": 688971.0, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.35934150218963623, | |
| "eval_mean_token_accuracy": 0.8932444596290589, | |
| "eval_num_tokens": 703641.0, | |
| "eval_runtime": 47.592, | |
| "eval_samples_per_second": 2.101, | |
| "eval_steps_per_second": 0.525, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 3.066666666666667, | |
| "grad_norm": 0.5663590431213379, | |
| "learning_rate": 4.559748427672956e-05, | |
| "loss": 0.1781, | |
| "mean_token_accuracy": 0.9423291176557541, | |
| "num_tokens": 720008.0, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 3.2, | |
| "grad_norm": 0.6952011585235596, | |
| "learning_rate": 4.245283018867925e-05, | |
| "loss": 0.1373, | |
| "mean_token_accuracy": 0.9570749044418335, | |
| "num_tokens": 750286.0, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 3.3333333333333335, | |
| "grad_norm": 0.6387498378753662, | |
| "learning_rate": 3.9308176100628936e-05, | |
| "loss": 0.1362, | |
| "mean_token_accuracy": 0.956162053346634, | |
| "num_tokens": 782161.0, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 3.466666666666667, | |
| "grad_norm": 0.7637629508972168, | |
| "learning_rate": 3.6163522012578616e-05, | |
| "loss": 0.1207, | |
| "mean_token_accuracy": 0.9621568471193314, | |
| "num_tokens": 813190.0, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 3.6, | |
| "grad_norm": 0.7092714309692383, | |
| "learning_rate": 3.30188679245283e-05, | |
| "loss": 0.1273, | |
| "mean_token_accuracy": 0.9595555752515793, | |
| "num_tokens": 844684.0, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 3.7333333333333334, | |
| "grad_norm": 0.7261990904808044, | |
| "learning_rate": 2.9874213836477987e-05, | |
| "loss": 0.1218, | |
| "mean_token_accuracy": 0.9604292154312134, | |
| "num_tokens": 876896.0, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 3.8666666666666667, | |
| "grad_norm": 0.7577289938926697, | |
| "learning_rate": 2.6729559748427674e-05, | |
| "loss": 0.1296, | |
| "mean_token_accuracy": 0.9597998350858689, | |
| "num_tokens": 907432.0, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.6601764559745789, | |
| "learning_rate": 2.358490566037736e-05, | |
| "loss": 0.1278, | |
| "mean_token_accuracy": 0.961115637421608, | |
| "num_tokens": 938188.0, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 0.4141911566257477, | |
| "eval_mean_token_accuracy": 0.8913846659660339, | |
| "eval_num_tokens": 938188.0, | |
| "eval_runtime": 47.5859, | |
| "eval_samples_per_second": 2.101, | |
| "eval_steps_per_second": 0.525, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 4.133333333333334, | |
| "grad_norm": 0.8669018745422363, | |
| "learning_rate": 2.0440251572327044e-05, | |
| "loss": 0.081, | |
| "mean_token_accuracy": 0.9762738227844239, | |
| "num_tokens": 969846.0, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 4.266666666666667, | |
| "grad_norm": 0.8257538080215454, | |
| "learning_rate": 1.7295597484276728e-05, | |
| "loss": 0.0816, | |
| "mean_token_accuracy": 0.9748408764600753, | |
| "num_tokens": 1001376.0, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 4.4, | |
| "grad_norm": 0.7576424479484558, | |
| "learning_rate": 1.4150943396226415e-05, | |
| "loss": 0.0779, | |
| "mean_token_accuracy": 0.9772728830575943, | |
| "num_tokens": 1033904.0, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 4.533333333333333, | |
| "grad_norm": 0.7902347445487976, | |
| "learning_rate": 1.1006289308176102e-05, | |
| "loss": 0.082, | |
| "mean_token_accuracy": 0.9764015346765518, | |
| "num_tokens": 1064634.0, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 4.666666666666667, | |
| "grad_norm": 0.8522145748138428, | |
| "learning_rate": 7.861635220125786e-06, | |
| "loss": 0.0761, | |
| "mean_token_accuracy": 0.9762265920639038, | |
| "num_tokens": 1094465.0, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 4.8, | |
| "grad_norm": 0.5950550436973572, | |
| "learning_rate": 4.716981132075472e-06, | |
| "loss": 0.0826, | |
| "mean_token_accuracy": 0.974476671218872, | |
| "num_tokens": 1125816.0, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 4.933333333333334, | |
| "grad_norm": 0.8709597587585449, | |
| "learning_rate": 1.5723270440251573e-06, | |
| "loss": 0.0836, | |
| "mean_token_accuracy": 0.975111848115921, | |
| "num_tokens": 1157854.0, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_loss": 0.47538331151008606, | |
| "eval_mean_token_accuracy": 0.8894158458709717, | |
| "eval_num_tokens": 1172735.0, | |
| "eval_runtime": 47.5654, | |
| "eval_samples_per_second": 2.102, | |
| "eval_steps_per_second": 0.526, | |
| "step": 375 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 375, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 6.547412120671027e+16, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |