| { | |
| "best_global_step": 92, | |
| "best_metric": 2.3171796798706055, | |
| "best_model_checkpoint": "distilbert_rand_100_v2_stsb/checkpoint-92", | |
| "epoch": 9.0, | |
| "eval_steps": 500, | |
| "global_step": 207, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 6.45198392868042, | |
| "learning_rate": 4.9e-05, | |
| "loss": 2.7709, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_combined_score": 0.12841265650500888, | |
| "eval_loss": 2.532341718673706, | |
| "eval_pearson": 0.1403550216341048, | |
| "eval_runtime": 0.641, | |
| "eval_samples_per_second": 2340.114, | |
| "eval_spearmanr": 0.11647029137591298, | |
| "eval_steps_per_second": 9.36, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 15.66421890258789, | |
| "learning_rate": 4.8e-05, | |
| "loss": 1.9319, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_combined_score": 0.1742201484465158, | |
| "eval_loss": 2.3231194019317627, | |
| "eval_pearson": 0.18676369113081998, | |
| "eval_runtime": 0.7031, | |
| "eval_samples_per_second": 2133.33, | |
| "eval_spearmanr": 0.1616766057622116, | |
| "eval_steps_per_second": 8.533, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 6.417102813720703, | |
| "learning_rate": 4.7e-05, | |
| "loss": 1.6901, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_combined_score": 0.21461132936351818, | |
| "eval_loss": 2.5205445289611816, | |
| "eval_pearson": 0.219567089823671, | |
| "eval_runtime": 0.6702, | |
| "eval_samples_per_second": 2238.16, | |
| "eval_spearmanr": 0.2096555689033653, | |
| "eval_steps_per_second": 8.953, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 13.033140182495117, | |
| "learning_rate": 4.600000000000001e-05, | |
| "loss": 1.4033, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_combined_score": 0.2873649472701957, | |
| "eval_loss": 2.3171796798706055, | |
| "eval_pearson": 0.29063065304298175, | |
| "eval_runtime": 0.6287, | |
| "eval_samples_per_second": 2386.001, | |
| "eval_spearmanr": 0.2840992414974096, | |
| "eval_steps_per_second": 9.544, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 11.78853988647461, | |
| "learning_rate": 4.5e-05, | |
| "loss": 1.0661, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_combined_score": 0.275515292992803, | |
| "eval_loss": 2.488051652908325, | |
| "eval_pearson": 0.2763992719431185, | |
| "eval_runtime": 0.6318, | |
| "eval_samples_per_second": 2374.257, | |
| "eval_spearmanr": 0.2746313140424874, | |
| "eval_steps_per_second": 9.497, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 9.148160934448242, | |
| "learning_rate": 4.4000000000000006e-05, | |
| "loss": 0.8233, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_combined_score": 0.3079887068893455, | |
| "eval_loss": 2.4506001472473145, | |
| "eval_pearson": 0.31048843320822817, | |
| "eval_runtime": 0.6293, | |
| "eval_samples_per_second": 2383.776, | |
| "eval_spearmanr": 0.3054889805704628, | |
| "eval_steps_per_second": 9.535, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 9.518824577331543, | |
| "learning_rate": 4.3e-05, | |
| "loss": 0.6527, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_combined_score": 0.30662081869418634, | |
| "eval_loss": 2.6286911964416504, | |
| "eval_pearson": 0.3095195146289885, | |
| "eval_runtime": 0.7124, | |
| "eval_samples_per_second": 2105.438, | |
| "eval_spearmanr": 0.3037221227593841, | |
| "eval_steps_per_second": 8.422, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "grad_norm": 13.455754280090332, | |
| "learning_rate": 4.2e-05, | |
| "loss": 0.5076, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_combined_score": 0.3029343491723508, | |
| "eval_loss": 2.6804022789001465, | |
| "eval_pearson": 0.30656147252147564, | |
| "eval_runtime": 0.6246, | |
| "eval_samples_per_second": 2401.55, | |
| "eval_spearmanr": 0.29930722582322594, | |
| "eval_steps_per_second": 9.606, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "grad_norm": 8.446640968322754, | |
| "learning_rate": 4.1e-05, | |
| "loss": 0.4436, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_combined_score": 0.31715983192654207, | |
| "eval_loss": 2.5318737030029297, | |
| "eval_pearson": 0.32015210561348145, | |
| "eval_runtime": 0.6773, | |
| "eval_samples_per_second": 2214.774, | |
| "eval_spearmanr": 0.31416755823960263, | |
| "eval_steps_per_second": 8.859, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "step": 207, | |
| "total_flos": 3426936721288704.0, | |
| "train_loss": 1.2543938493958995, | |
| "train_runtime": 66.8187, | |
| "train_samples_per_second": 4301.938, | |
| "train_steps_per_second": 17.211 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 1150, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 50, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 5, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 5 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3426936721288704.0, | |
| "train_batch_size": 256, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |