| { | |
| "best_global_step": 138, | |
| "best_metric": 2.2118425369262695, | |
| "best_model_checkpoint": "tiny_bert_km_100_v2_stsb/checkpoint-138", | |
| "epoch": 11.0, | |
| "eval_steps": 500, | |
| "global_step": 253, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 3.3725574016571045, | |
| "learning_rate": 4.9e-05, | |
| "loss": 3.6374, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_combined_score": 0.10596566185927887, | |
| "eval_loss": 2.2224326133728027, | |
| "eval_pearson": 0.11104903258166131, | |
| "eval_runtime": 0.4837, | |
| "eval_samples_per_second": 3101.398, | |
| "eval_spearmanr": 0.10088229113689644, | |
| "eval_steps_per_second": 12.406, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 4.5145392417907715, | |
| "learning_rate": 4.8e-05, | |
| "loss": 2.0858, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_combined_score": 0.1421620216198273, | |
| "eval_loss": 2.510441780090332, | |
| "eval_pearson": 0.1490113294508295, | |
| "eval_runtime": 0.4923, | |
| "eval_samples_per_second": 3047.166, | |
| "eval_spearmanr": 0.1353127137888251, | |
| "eval_steps_per_second": 12.189, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 3.4198648929595947, | |
| "learning_rate": 4.7e-05, | |
| "loss": 1.9616, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_combined_score": 0.18882478983967088, | |
| "eval_loss": 2.2581183910369873, | |
| "eval_pearson": 0.19950411236940105, | |
| "eval_runtime": 0.4745, | |
| "eval_samples_per_second": 3161.064, | |
| "eval_spearmanr": 0.17814546730994069, | |
| "eval_steps_per_second": 12.644, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 5.960690021514893, | |
| "learning_rate": 4.600000000000001e-05, | |
| "loss": 1.8487, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_combined_score": 0.23515762587025904, | |
| "eval_loss": 2.3268494606018066, | |
| "eval_pearson": 0.244859037156418, | |
| "eval_runtime": 0.4407, | |
| "eval_samples_per_second": 3403.407, | |
| "eval_spearmanr": 0.2254562145841001, | |
| "eval_steps_per_second": 13.614, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 4.597402095794678, | |
| "learning_rate": 4.5e-05, | |
| "loss": 1.6866, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_combined_score": 0.2359440746771323, | |
| "eval_loss": 2.441965103149414, | |
| "eval_pearson": 0.24396977891477728, | |
| "eval_runtime": 0.5074, | |
| "eval_samples_per_second": 2956.101, | |
| "eval_spearmanr": 0.22791837043948734, | |
| "eval_steps_per_second": 11.824, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 10.052696228027344, | |
| "learning_rate": 4.4000000000000006e-05, | |
| "loss": 1.5138, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_combined_score": 0.29826980724670565, | |
| "eval_loss": 2.2118425369262695, | |
| "eval_pearson": 0.30374885148112496, | |
| "eval_runtime": 0.4639, | |
| "eval_samples_per_second": 3233.126, | |
| "eval_spearmanr": 0.29279076301228635, | |
| "eval_steps_per_second": 12.933, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 8.082393646240234, | |
| "learning_rate": 4.3e-05, | |
| "loss": 1.2926, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_combined_score": 0.3204148913897177, | |
| "eval_loss": 2.4205052852630615, | |
| "eval_pearson": 0.32316026294842765, | |
| "eval_runtime": 0.4572, | |
| "eval_samples_per_second": 3280.617, | |
| "eval_spearmanr": 0.3176695198310078, | |
| "eval_steps_per_second": 13.122, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "grad_norm": 6.976232051849365, | |
| "learning_rate": 4.2e-05, | |
| "loss": 1.0946, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_combined_score": 0.31205417114735745, | |
| "eval_loss": 2.5488226413726807, | |
| "eval_pearson": 0.3149398831861949, | |
| "eval_runtime": 0.5298, | |
| "eval_samples_per_second": 2831.125, | |
| "eval_spearmanr": 0.3091684591085199, | |
| "eval_steps_per_second": 11.324, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "grad_norm": 10.516256332397461, | |
| "learning_rate": 4.1e-05, | |
| "loss": 0.9053, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_combined_score": 0.3010749795301204, | |
| "eval_loss": 2.582094430923462, | |
| "eval_pearson": 0.30275703258582504, | |
| "eval_runtime": 0.4705, | |
| "eval_samples_per_second": 3187.994, | |
| "eval_spearmanr": 0.2993929264744158, | |
| "eval_steps_per_second": 12.752, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 7.436727046966553, | |
| "learning_rate": 4e-05, | |
| "loss": 0.7569, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_combined_score": 0.3180409751918106, | |
| "eval_loss": 2.504795789718628, | |
| "eval_pearson": 0.3204168496240031, | |
| "eval_runtime": 0.4883, | |
| "eval_samples_per_second": 3071.637, | |
| "eval_spearmanr": 0.3156651007596181, | |
| "eval_steps_per_second": 12.287, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "grad_norm": 8.786745071411133, | |
| "learning_rate": 3.9000000000000006e-05, | |
| "loss": 0.6373, | |
| "step": 253 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "eval_combined_score": 0.31257973894008273, | |
| "eval_loss": 2.5968363285064697, | |
| "eval_pearson": 0.31349662563099656, | |
| "eval_runtime": 0.4421, | |
| "eval_samples_per_second": 3392.544, | |
| "eval_spearmanr": 0.31166285224916884, | |
| "eval_steps_per_second": 13.57, | |
| "step": 253 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "step": 253, | |
| "total_flos": 1658303025916416.0, | |
| "train_loss": 1.5836853585224377, | |
| "train_runtime": 47.2972, | |
| "train_samples_per_second": 6077.531, | |
| "train_steps_per_second": 24.314 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 1150, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 50, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 5, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 5 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1658303025916416.0, | |
| "train_batch_size": 256, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |