| { | |
| "best_global_step": 2000, | |
| "best_metric": 0.37039539217948914, | |
| "best_model_checkpoint": "Assignment4_Distilled_ModernBERT/run-0/checkpoint-2000", | |
| "epoch": 4.1928721174004195, | |
| "eval_steps": 100, | |
| "global_step": 2000, | |
| "is_hyper_param_search": true, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.20964360587002095, | |
| "grad_norm": 13.524835586547852, | |
| "learning_rate": 5.982303202015292e-05, | |
| "loss": 7.1794, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.20964360587002095, | |
| "eval_accuracy": 0.7551612903225806, | |
| "eval_loss": 3.656074285507202, | |
| "eval_runtime": 27.0212, | |
| "eval_samples_per_second": 114.725, | |
| "eval_steps_per_second": 14.359, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.4192872117400419, | |
| "grad_norm": 7.738944053649902, | |
| "learning_rate": 5.9287097231582686e-05, | |
| "loss": 2.6619, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.4192872117400419, | |
| "eval_accuracy": 0.9035483870967742, | |
| "eval_loss": 1.7683824300765991, | |
| "eval_runtime": 24.4159, | |
| "eval_samples_per_second": 126.967, | |
| "eval_steps_per_second": 15.891, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.6289308176100629, | |
| "grad_norm": 5.298769950866699, | |
| "learning_rate": 5.8398628698129085e-05, | |
| "loss": 1.4594, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.6289308176100629, | |
| "eval_accuracy": 0.9451612903225807, | |
| "eval_loss": 1.1510082483291626, | |
| "eval_runtime": 24.2312, | |
| "eval_samples_per_second": 127.934, | |
| "eval_steps_per_second": 16.012, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.8385744234800838, | |
| "grad_norm": 7.6655049324035645, | |
| "learning_rate": 5.71683210658621e-05, | |
| "loss": 1.0566, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.8385744234800838, | |
| "eval_accuracy": 0.9551612903225807, | |
| "eval_loss": 0.9296484589576721, | |
| "eval_runtime": 24.2932, | |
| "eval_samples_per_second": 127.608, | |
| "eval_steps_per_second": 15.972, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.0482180293501049, | |
| "grad_norm": 14.238273620605469, | |
| "learning_rate": 5.561098375592486e-05, | |
| "loss": 0.8012, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.0482180293501049, | |
| "eval_accuracy": 0.96, | |
| "eval_loss": 0.7582064270973206, | |
| "eval_runtime": 24.3409, | |
| "eval_samples_per_second": 127.358, | |
| "eval_steps_per_second": 15.94, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.2578616352201257, | |
| "grad_norm": 2.1679141521453857, | |
| "learning_rate": 5.3745362701025445e-05, | |
| "loss": 0.5647, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.2578616352201257, | |
| "eval_accuracy": 0.9661290322580646, | |
| "eval_loss": 0.6347441077232361, | |
| "eval_runtime": 25.9162, | |
| "eval_samples_per_second": 119.616, | |
| "eval_steps_per_second": 14.971, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.4675052410901468, | |
| "grad_norm": 3.5543723106384277, | |
| "learning_rate": 5.1593914697473044e-05, | |
| "loss": 0.5087, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.4675052410901468, | |
| "eval_accuracy": 0.9593548387096774, | |
| "eval_loss": 0.6069635152816772, | |
| "eval_runtime": 24.4377, | |
| "eval_samples_per_second": 126.853, | |
| "eval_steps_per_second": 15.877, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.6771488469601676, | |
| "grad_norm": 2.1209726333618164, | |
| "learning_rate": 4.918253708892097e-05, | |
| "loss": 0.4459, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.6771488469601676, | |
| "eval_accuracy": 0.9619354838709677, | |
| "eval_loss": 0.5518479347229004, | |
| "eval_runtime": 24.5853, | |
| "eval_samples_per_second": 126.092, | |
| "eval_steps_per_second": 15.782, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.8867924528301887, | |
| "grad_norm": 7.709955215454102, | |
| "learning_rate": 4.654025603565916e-05, | |
| "loss": 0.4246, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.8867924528301887, | |
| "eval_accuracy": 0.9693548387096774, | |
| "eval_loss": 0.5302325487136841, | |
| "eval_runtime": 24.3067, | |
| "eval_samples_per_second": 127.537, | |
| "eval_steps_per_second": 15.963, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.0964360587002098, | |
| "grad_norm": 4.7413787841796875, | |
| "learning_rate": 4.3698877121811395e-05, | |
| "loss": 0.3684, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.0964360587002098, | |
| "eval_accuracy": 0.967741935483871, | |
| "eval_loss": 0.4881534278392792, | |
| "eval_runtime": 24.3233, | |
| "eval_samples_per_second": 127.45, | |
| "eval_steps_per_second": 15.952, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.3060796645702304, | |
| "grad_norm": 1.3662949800491333, | |
| "learning_rate": 4.069260250613759e-05, | |
| "loss": 0.2996, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 2.3060796645702304, | |
| "eval_accuracy": 0.9696774193548388, | |
| "eval_loss": 0.4698588252067566, | |
| "eval_runtime": 26.4097, | |
| "eval_samples_per_second": 117.381, | |
| "eval_steps_per_second": 14.692, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 2.5157232704402515, | |
| "grad_norm": 2.2415976524353027, | |
| "learning_rate": 3.755761922486161e-05, | |
| "loss": 0.3004, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.5157232704402515, | |
| "eval_accuracy": 0.9690322580645161, | |
| "eval_loss": 0.4617905616760254, | |
| "eval_runtime": 24.5627, | |
| "eval_samples_per_second": 126.208, | |
| "eval_steps_per_second": 15.796, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.7253668763102725, | |
| "grad_norm": 1.3881778717041016, | |
| "learning_rate": 3.433166360219327e-05, | |
| "loss": 0.2721, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.7253668763102725, | |
| "eval_accuracy": 0.9696774193548388, | |
| "eval_loss": 0.4343847632408142, | |
| "eval_runtime": 24.5311, | |
| "eval_samples_per_second": 126.37, | |
| "eval_steps_per_second": 15.817, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.9350104821802936, | |
| "grad_norm": 1.2978781461715698, | |
| "learning_rate": 3.1053567011808615e-05, | |
| "loss": 0.2591, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.9350104821802936, | |
| "eval_accuracy": 0.97, | |
| "eval_loss": 0.418453186750412, | |
| "eval_runtime": 24.631, | |
| "eval_samples_per_second": 125.858, | |
| "eval_steps_per_second": 15.752, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 3.1446540880503147, | |
| "grad_norm": 1.3791582584381104, | |
| "learning_rate": 2.776278845703461e-05, | |
| "loss": 0.2313, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 3.1446540880503147, | |
| "eval_accuracy": 0.9693548387096774, | |
| "eval_loss": 0.40362659096717834, | |
| "eval_runtime": 24.4944, | |
| "eval_samples_per_second": 126.56, | |
| "eval_steps_per_second": 15.84, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 3.3542976939203353, | |
| "grad_norm": 1.2525160312652588, | |
| "learning_rate": 2.44989395961494e-05, | |
| "loss": 0.2179, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 3.3542976939203353, | |
| "eval_accuracy": 0.9706451612903226, | |
| "eval_loss": 0.3979440927505493, | |
| "eval_runtime": 26.4009, | |
| "eval_samples_per_second": 117.42, | |
| "eval_steps_per_second": 14.696, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 3.5639412997903563, | |
| "grad_norm": 0.8602608442306519, | |
| "learning_rate": 2.130130793014917e-05, | |
| "loss": 0.2074, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 3.5639412997903563, | |
| "eval_accuracy": 0.9690322580645161, | |
| "eval_loss": 0.3927501142024994, | |
| "eval_runtime": 24.7417, | |
| "eval_samples_per_second": 125.295, | |
| "eval_steps_per_second": 15.682, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 3.7735849056603774, | |
| "grad_norm": 0.9303811192512512, | |
| "learning_rate": 1.8208383892451047e-05, | |
| "loss": 0.204, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 3.7735849056603774, | |
| "eval_accuracy": 0.97, | |
| "eval_loss": 0.3800348937511444, | |
| "eval_runtime": 24.5036, | |
| "eval_samples_per_second": 126.512, | |
| "eval_steps_per_second": 15.834, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 3.9832285115303985, | |
| "grad_norm": 2.1014115810394287, | |
| "learning_rate": 1.525739753303325e-05, | |
| "loss": 0.202, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 3.9832285115303985, | |
| "eval_accuracy": 0.9709677419354839, | |
| "eval_loss": 0.37354010343551636, | |
| "eval_runtime": 24.7707, | |
| "eval_samples_per_second": 125.148, | |
| "eval_steps_per_second": 15.664, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 4.1928721174004195, | |
| "grad_norm": 0.7623395919799805, | |
| "learning_rate": 1.2483870374024092e-05, | |
| "loss": 0.1776, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 4.1928721174004195, | |
| "eval_accuracy": 0.9703225806451613, | |
| "eval_loss": 0.37039539217948914, | |
| "eval_runtime": 24.6812, | |
| "eval_samples_per_second": 125.602, | |
| "eval_steps_per_second": 15.72, | |
| "step": 2000 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 2862, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 6, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 896601881570736.0, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": { | |
| "alpha": 0.43742261757664425, | |
| "num_train_epochs": 6, | |
| "temperature": 7 | |
| } | |
| } | |