| { | |
| "best_global_step": 2000, | |
| "best_metric": 0.33797791600227356, | |
| "best_model_checkpoint": "Assignment4_Distilled_ModernBERT/run-3/checkpoint-2000", | |
| "epoch": 4.1928721174004195, | |
| "eval_steps": 100, | |
| "global_step": 2000, | |
| "is_hyper_param_search": true, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.20964360587002095, | |
| "grad_norm": 9.00671100616455, | |
| "learning_rate": 5.982303202015292e-05, | |
| "loss": 5.2947, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.20964360587002095, | |
| "eval_accuracy": 0.7319354838709677, | |
| "eval_loss": 3.0118744373321533, | |
| "eval_runtime": 24.6185, | |
| "eval_samples_per_second": 125.921, | |
| "eval_steps_per_second": 15.76, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.4192872117400419, | |
| "grad_norm": 9.74803352355957, | |
| "learning_rate": 5.9287097231582686e-05, | |
| "loss": 2.1846, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.4192872117400419, | |
| "eval_accuracy": 0.9187096774193548, | |
| "eval_loss": 1.5048528909683228, | |
| "eval_runtime": 24.5137, | |
| "eval_samples_per_second": 126.46, | |
| "eval_steps_per_second": 15.828, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.6289308176100629, | |
| "grad_norm": 2.313565731048584, | |
| "learning_rate": 5.8398628698129085e-05, | |
| "loss": 1.2384, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.6289308176100629, | |
| "eval_accuracy": 0.9496774193548387, | |
| "eval_loss": 0.9920670986175537, | |
| "eval_runtime": 24.4854, | |
| "eval_samples_per_second": 126.606, | |
| "eval_steps_per_second": 15.846, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.8385744234800838, | |
| "grad_norm": 6.652754306793213, | |
| "learning_rate": 5.71683210658621e-05, | |
| "loss": 0.8701, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.8385744234800838, | |
| "eval_accuracy": 0.9574193548387097, | |
| "eval_loss": 0.8129040002822876, | |
| "eval_runtime": 24.4845, | |
| "eval_samples_per_second": 126.611, | |
| "eval_steps_per_second": 15.847, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.0482180293501049, | |
| "grad_norm": 6.9664835929870605, | |
| "learning_rate": 5.561098375592486e-05, | |
| "loss": 0.6485, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.0482180293501049, | |
| "eval_accuracy": 0.964516129032258, | |
| "eval_loss": 0.6442162990570068, | |
| "eval_runtime": 24.8893, | |
| "eval_samples_per_second": 124.552, | |
| "eval_steps_per_second": 15.589, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.2578616352201257, | |
| "grad_norm": 1.474057912826538, | |
| "learning_rate": 5.3745362701025445e-05, | |
| "loss": 0.464, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.2578616352201257, | |
| "eval_accuracy": 0.9658064516129032, | |
| "eval_loss": 0.5599253177642822, | |
| "eval_runtime": 25.4446, | |
| "eval_samples_per_second": 121.833, | |
| "eval_steps_per_second": 15.249, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.4675052410901468, | |
| "grad_norm": 1.928739309310913, | |
| "learning_rate": 5.1593914697473044e-05, | |
| "loss": 0.4216, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.4675052410901468, | |
| "eval_accuracy": 0.9648387096774194, | |
| "eval_loss": 0.5214470624923706, | |
| "eval_runtime": 25.3207, | |
| "eval_samples_per_second": 122.43, | |
| "eval_steps_per_second": 15.323, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.6771488469601676, | |
| "grad_norm": 0.9871991276741028, | |
| "learning_rate": 4.918253708892097e-05, | |
| "loss": 0.3627, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.6771488469601676, | |
| "eval_accuracy": 0.9664516129032258, | |
| "eval_loss": 0.48103204369544983, | |
| "eval_runtime": 24.854, | |
| "eval_samples_per_second": 124.728, | |
| "eval_steps_per_second": 15.611, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.8867924528301887, | |
| "grad_norm": 3.7150447368621826, | |
| "learning_rate": 4.654025603565916e-05, | |
| "loss": 0.3641, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.8867924528301887, | |
| "eval_accuracy": 0.9716129032258064, | |
| "eval_loss": 0.4549710154533386, | |
| "eval_runtime": 29.4013, | |
| "eval_samples_per_second": 105.438, | |
| "eval_steps_per_second": 13.197, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.0964360587002098, | |
| "grad_norm": 1.0180896520614624, | |
| "learning_rate": 4.3698877121811395e-05, | |
| "loss": 0.3114, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.0964360587002098, | |
| "eval_accuracy": 0.97, | |
| "eval_loss": 0.42610129714012146, | |
| "eval_runtime": 28.5631, | |
| "eval_samples_per_second": 108.532, | |
| "eval_steps_per_second": 13.584, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.3060796645702304, | |
| "grad_norm": 0.8552842140197754, | |
| "learning_rate": 4.069260250613759e-05, | |
| "loss": 0.2549, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 2.3060796645702304, | |
| "eval_accuracy": 0.97, | |
| "eval_loss": 0.4119759500026703, | |
| "eval_runtime": 24.7756, | |
| "eval_samples_per_second": 125.123, | |
| "eval_steps_per_second": 15.661, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 2.5157232704402515, | |
| "grad_norm": 1.075974464416504, | |
| "learning_rate": 3.755761922486161e-05, | |
| "loss": 0.2496, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.5157232704402515, | |
| "eval_accuracy": 0.9703225806451613, | |
| "eval_loss": 0.3999555706977844, | |
| "eval_runtime": 24.7123, | |
| "eval_samples_per_second": 125.444, | |
| "eval_steps_per_second": 15.701, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.7253668763102725, | |
| "grad_norm": 0.907684326171875, | |
| "learning_rate": 3.433166360219327e-05, | |
| "loss": 0.2276, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.7253668763102725, | |
| "eval_accuracy": 0.9709677419354839, | |
| "eval_loss": 0.3826442062854767, | |
| "eval_runtime": 24.7314, | |
| "eval_samples_per_second": 125.347, | |
| "eval_steps_per_second": 15.689, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.9350104821802936, | |
| "grad_norm": 0.8015017509460449, | |
| "learning_rate": 3.1053567011808615e-05, | |
| "loss": 0.2209, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.9350104821802936, | |
| "eval_accuracy": 0.9712903225806452, | |
| "eval_loss": 0.37278029322624207, | |
| "eval_runtime": 24.835, | |
| "eval_samples_per_second": 124.824, | |
| "eval_steps_per_second": 15.623, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 3.1446540880503147, | |
| "grad_norm": 0.8900270462036133, | |
| "learning_rate": 2.776278845703461e-05, | |
| "loss": 0.1986, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 3.1446540880503147, | |
| "eval_accuracy": 0.9696774193548388, | |
| "eval_loss": 0.35799843072891235, | |
| "eval_runtime": 24.6469, | |
| "eval_samples_per_second": 125.776, | |
| "eval_steps_per_second": 15.742, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 3.3542976939203353, | |
| "grad_norm": 0.7829304337501526, | |
| "learning_rate": 2.44989395961494e-05, | |
| "loss": 0.185, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 3.3542976939203353, | |
| "eval_accuracy": 0.9716129032258064, | |
| "eval_loss": 0.3530636131763458, | |
| "eval_runtime": 24.982, | |
| "eval_samples_per_second": 124.089, | |
| "eval_steps_per_second": 15.531, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 3.5639412997903563, | |
| "grad_norm": 0.6377654671669006, | |
| "learning_rate": 2.130130793014917e-05, | |
| "loss": 0.1777, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 3.5639412997903563, | |
| "eval_accuracy": 0.97, | |
| "eval_loss": 0.35114002227783203, | |
| "eval_runtime": 25.0031, | |
| "eval_samples_per_second": 123.985, | |
| "eval_steps_per_second": 15.518, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 3.7735849056603774, | |
| "grad_norm": 0.7244699597358704, | |
| "learning_rate": 1.8208383892451047e-05, | |
| "loss": 0.175, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 3.7735849056603774, | |
| "eval_accuracy": 0.9719354838709677, | |
| "eval_loss": 0.34239596128463745, | |
| "eval_runtime": 25.13, | |
| "eval_samples_per_second": 123.358, | |
| "eval_steps_per_second": 15.44, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 3.9832285115303985, | |
| "grad_norm": 0.7429079413414001, | |
| "learning_rate": 1.525739753303325e-05, | |
| "loss": 0.1747, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 3.9832285115303985, | |
| "eval_accuracy": 0.9712903225806452, | |
| "eval_loss": 0.3403235673904419, | |
| "eval_runtime": 26.1406, | |
| "eval_samples_per_second": 118.59, | |
| "eval_steps_per_second": 14.843, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 4.1928721174004195, | |
| "grad_norm": 0.5660148859024048, | |
| "learning_rate": 1.2483870374024092e-05, | |
| "loss": 0.1555, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 4.1928721174004195, | |
| "eval_accuracy": 0.9709677419354839, | |
| "eval_loss": 0.33797791600227356, | |
| "eval_runtime": 31.949, | |
| "eval_samples_per_second": 97.03, | |
| "eval_steps_per_second": 12.144, | |
| "step": 2000 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 2862, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 6, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 896601881570736.0, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": { | |
| "alpha": 0.41727432608641335, | |
| "num_train_epochs": 6, | |
| "temperature": 10 | |
| } | |
| } | |