| { | |
| "best_metric": 0.10353731364011765, | |
| "best_model_checkpoint": "bert-base-uncased_rxnorm_babbage/checkpoint-2500", | |
| "epoch": 49.76958525345622, | |
| "eval_steps": 100, | |
| "global_step": 2700, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.8433179723502304, | |
| "grad_norm": 0.49460217356681824, | |
| "learning_rate": 4.8166666666666674e-05, | |
| "loss": 1.0876, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.8433179723502304, | |
| "eval_accuracy": 0.861022820273691, | |
| "eval_loss": 0.6498942375183105, | |
| "eval_runtime": 1.5825, | |
| "eval_samples_per_second": 921.986, | |
| "eval_steps_per_second": 7.583, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 3.686635944700461, | |
| "grad_norm": 0.4745028614997864, | |
| "learning_rate": 4.631481481481481e-05, | |
| "loss": 0.6229, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 3.686635944700461, | |
| "eval_accuracy": 0.8976233828461907, | |
| "eval_loss": 0.45648184418678284, | |
| "eval_runtime": 2.1968, | |
| "eval_samples_per_second": 664.134, | |
| "eval_steps_per_second": 5.462, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 5.529953917050691, | |
| "grad_norm": 0.4159073233604431, | |
| "learning_rate": 4.4462962962962966e-05, | |
| "loss": 0.4851, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 5.529953917050691, | |
| "eval_accuracy": 0.9128444732034104, | |
| "eval_loss": 0.3751491904258728, | |
| "eval_runtime": 2.5186, | |
| "eval_samples_per_second": 579.279, | |
| "eval_steps_per_second": 4.764, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 7.373271889400922, | |
| "grad_norm": 0.40352028608322144, | |
| "learning_rate": 4.261111111111111e-05, | |
| "loss": 0.4009, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 7.373271889400922, | |
| "eval_accuracy": 0.926327338644278, | |
| "eval_loss": 0.3037292957305908, | |
| "eval_runtime": 2.2024, | |
| "eval_samples_per_second": 662.468, | |
| "eval_steps_per_second": 5.449, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 9.216589861751151, | |
| "grad_norm": 0.3955836296081543, | |
| "learning_rate": 4.0759259259259264e-05, | |
| "loss": 0.34, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 9.216589861751151, | |
| "eval_accuracy": 0.9363742622825171, | |
| "eval_loss": 0.256952166557312, | |
| "eval_runtime": 2.2081, | |
| "eval_samples_per_second": 660.734, | |
| "eval_steps_per_second": 5.434, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 11.059907834101383, | |
| "grad_norm": 0.35034212470054626, | |
| "learning_rate": 3.890740740740741e-05, | |
| "loss": 0.302, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 11.059907834101383, | |
| "eval_accuracy": 0.9409385419824189, | |
| "eval_loss": 0.23246321082115173, | |
| "eval_runtime": 2.205, | |
| "eval_samples_per_second": 661.68, | |
| "eval_steps_per_second": 5.442, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 12.903225806451612, | |
| "grad_norm": 0.3660498261451721, | |
| "learning_rate": 3.705555555555556e-05, | |
| "loss": 0.2701, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 12.903225806451612, | |
| "eval_accuracy": 0.9453203622945321, | |
| "eval_loss": 0.21490083634853363, | |
| "eval_runtime": 1.8882, | |
| "eval_samples_per_second": 772.706, | |
| "eval_steps_per_second": 6.355, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 14.746543778801843, | |
| "grad_norm": 0.35468629002571106, | |
| "learning_rate": 3.520370370370371e-05, | |
| "loss": 0.2488, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 14.746543778801843, | |
| "eval_accuracy": 0.9499065420560747, | |
| "eval_loss": 0.19009685516357422, | |
| "eval_runtime": 2.5126, | |
| "eval_samples_per_second": 580.675, | |
| "eval_steps_per_second": 4.776, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 16.589861751152075, | |
| "grad_norm": 0.33421748876571655, | |
| "learning_rate": 3.3351851851851854e-05, | |
| "loss": 0.23, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 16.589861751152075, | |
| "eval_accuracy": 0.9532317494434636, | |
| "eval_loss": 0.17756867408752441, | |
| "eval_runtime": 2.5048, | |
| "eval_samples_per_second": 582.491, | |
| "eval_steps_per_second": 4.791, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 18.433179723502302, | |
| "grad_norm": 0.33518287539482117, | |
| "learning_rate": 3.15e-05, | |
| "loss": 0.2151, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 18.433179723502302, | |
| "eval_accuracy": 0.9545267509401824, | |
| "eval_loss": 0.16874656081199646, | |
| "eval_runtime": 2.1981, | |
| "eval_samples_per_second": 663.764, | |
| "eval_steps_per_second": 5.459, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 20.276497695852534, | |
| "grad_norm": 0.34771665930747986, | |
| "learning_rate": 2.9648148148148146e-05, | |
| "loss": 0.2035, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 20.276497695852534, | |
| "eval_accuracy": 0.9563604730084985, | |
| "eval_loss": 0.1599314957857132, | |
| "eval_runtime": 1.8901, | |
| "eval_samples_per_second": 771.908, | |
| "eval_steps_per_second": 6.349, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 22.119815668202765, | |
| "grad_norm": 0.36460649967193604, | |
| "learning_rate": 2.77962962962963e-05, | |
| "loss": 0.1931, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 22.119815668202765, | |
| "eval_accuracy": 0.9590011289404363, | |
| "eval_loss": 0.1495586484670639, | |
| "eval_runtime": 2.1981, | |
| "eval_samples_per_second": 663.751, | |
| "eval_steps_per_second": 5.459, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 23.963133640552996, | |
| "grad_norm": 0.31464290618896484, | |
| "learning_rate": 2.5944444444444444e-05, | |
| "loss": 0.1848, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 23.963133640552996, | |
| "eval_accuracy": 0.9585790320110277, | |
| "eval_loss": 0.1497500240802765, | |
| "eval_runtime": 2.2017, | |
| "eval_samples_per_second": 662.667, | |
| "eval_steps_per_second": 5.45, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 25.806451612903224, | |
| "grad_norm": 0.36158451437950134, | |
| "learning_rate": 2.4092592592592593e-05, | |
| "loss": 0.1766, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 25.806451612903224, | |
| "eval_accuracy": 0.962577764207473, | |
| "eval_loss": 0.13511128723621368, | |
| "eval_runtime": 1.8934, | |
| "eval_samples_per_second": 770.57, | |
| "eval_steps_per_second": 6.338, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 27.649769585253456, | |
| "grad_norm": 0.29482343792915344, | |
| "learning_rate": 2.2240740740740743e-05, | |
| "loss": 0.1695, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 27.649769585253456, | |
| "eval_accuracy": 0.9624521660736379, | |
| "eval_loss": 0.13419833779335022, | |
| "eval_runtime": 2.1989, | |
| "eval_samples_per_second": 663.518, | |
| "eval_steps_per_second": 5.457, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 29.493087557603687, | |
| "grad_norm": 0.3058522939682007, | |
| "learning_rate": 2.0388888888888892e-05, | |
| "loss": 0.1648, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 29.493087557603687, | |
| "eval_accuracy": 0.9629643832989789, | |
| "eval_loss": 0.12756001949310303, | |
| "eval_runtime": 1.8879, | |
| "eval_samples_per_second": 772.823, | |
| "eval_steps_per_second": 6.356, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 31.336405529953918, | |
| "grad_norm": 0.302731990814209, | |
| "learning_rate": 1.8537037037037037e-05, | |
| "loss": 0.1587, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 31.336405529953918, | |
| "eval_accuracy": 0.9647220525351252, | |
| "eval_loss": 0.1241796463727951, | |
| "eval_runtime": 2.1929, | |
| "eval_samples_per_second": 665.32, | |
| "eval_steps_per_second": 5.472, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 33.17972350230415, | |
| "grad_norm": 0.2924918830394745, | |
| "learning_rate": 1.6685185185185187e-05, | |
| "loss": 0.1535, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 33.17972350230415, | |
| "eval_accuracy": 0.9656390433391728, | |
| "eval_loss": 0.11920326948165894, | |
| "eval_runtime": 2.5148, | |
| "eval_samples_per_second": 580.167, | |
| "eval_steps_per_second": 4.772, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 35.02304147465438, | |
| "grad_norm": 0.28826281428337097, | |
| "learning_rate": 1.4833333333333336e-05, | |
| "loss": 0.1513, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 35.02304147465438, | |
| "eval_accuracy": 0.9667637762291024, | |
| "eval_loss": 0.11383702605962753, | |
| "eval_runtime": 2.2026, | |
| "eval_samples_per_second": 662.398, | |
| "eval_steps_per_second": 5.448, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 36.866359447004605, | |
| "grad_norm": 0.3061148226261139, | |
| "learning_rate": 1.2981481481481483e-05, | |
| "loss": 0.1475, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 36.866359447004605, | |
| "eval_accuracy": 0.9674900966719034, | |
| "eval_loss": 0.11254725605249405, | |
| "eval_runtime": 2.1953, | |
| "eval_samples_per_second": 664.587, | |
| "eval_steps_per_second": 5.466, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 38.70967741935484, | |
| "grad_norm": 0.29151493310928345, | |
| "learning_rate": 1.1129629629629631e-05, | |
| "loss": 0.1451, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 38.70967741935484, | |
| "eval_accuracy": 0.9666907144077611, | |
| "eval_loss": 0.11542753875255585, | |
| "eval_runtime": 2.1993, | |
| "eval_samples_per_second": 663.397, | |
| "eval_steps_per_second": 5.456, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 40.55299539170507, | |
| "grad_norm": 0.2895043194293976, | |
| "learning_rate": 9.277777777777778e-06, | |
| "loss": 0.1434, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 40.55299539170507, | |
| "eval_accuracy": 0.9673665255454876, | |
| "eval_loss": 0.11377675086259842, | |
| "eval_runtime": 2.5118, | |
| "eval_samples_per_second": 580.85, | |
| "eval_steps_per_second": 4.777, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 42.3963133640553, | |
| "grad_norm": 0.2832992374897003, | |
| "learning_rate": 7.425925925925927e-06, | |
| "loss": 0.1413, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 42.3963133640553, | |
| "eval_accuracy": 0.9687953762491814, | |
| "eval_loss": 0.10766085237264633, | |
| "eval_runtime": 2.5152, | |
| "eval_samples_per_second": 580.081, | |
| "eval_steps_per_second": 4.771, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 44.23963133640553, | |
| "grad_norm": 0.2880547046661377, | |
| "learning_rate": 5.574074074074074e-06, | |
| "loss": 0.1398, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 44.23963133640553, | |
| "eval_accuracy": 0.9683431416498955, | |
| "eval_loss": 0.10695406049489975, | |
| "eval_runtime": 2.1984, | |
| "eval_samples_per_second": 663.665, | |
| "eval_steps_per_second": 5.459, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 46.08294930875576, | |
| "grad_norm": 0.2785598337650299, | |
| "learning_rate": 3.722222222222222e-06, | |
| "loss": 0.1373, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 46.08294930875576, | |
| "eval_accuracy": 0.9688496396065625, | |
| "eval_loss": 0.10353731364011765, | |
| "eval_runtime": 1.8905, | |
| "eval_samples_per_second": 771.736, | |
| "eval_steps_per_second": 6.347, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 47.92626728110599, | |
| "grad_norm": 0.2812165319919586, | |
| "learning_rate": 1.8703703703703707e-06, | |
| "loss": 0.1366, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 47.92626728110599, | |
| "eval_accuracy": 0.9693371955635172, | |
| "eval_loss": 0.10568127781152725, | |
| "eval_runtime": 1.8904, | |
| "eval_samples_per_second": 771.79, | |
| "eval_steps_per_second": 6.348, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 49.76958525345622, | |
| "grad_norm": 0.2685796022415161, | |
| "learning_rate": 1.8518518518518518e-08, | |
| "loss": 0.1368, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 49.76958525345622, | |
| "eval_accuracy": 0.9689754071024974, | |
| "eval_loss": 0.10547203570604324, | |
| "eval_runtime": 1.8901, | |
| "eval_samples_per_second": 771.907, | |
| "eval_steps_per_second": 6.349, | |
| "step": 2700 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 2700, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 50, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.638543188324516e+17, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |