| { | |
| "best_metric": 0.5377358490566038, | |
| "best_model_checkpoint": "./Validated_Balanced_Raw_Data_model_boost5_outputs/checkpoint-640", | |
| "epoch": 30.0, | |
| "eval_steps": 500, | |
| "global_step": 2400, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.625, | |
| "grad_norm": 9.25554370880127, | |
| "learning_rate": 4.166666666666667e-06, | |
| "loss": 1.3971, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.3584905660377358, | |
| "eval_loss": 1.3380550146102905, | |
| "eval_runtime": 1.7516, | |
| "eval_samples_per_second": 121.03, | |
| "eval_steps_per_second": 15.414, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "grad_norm": 14.525150299072266, | |
| "learning_rate": 8.333333333333334e-06, | |
| "loss": 1.3703, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.875, | |
| "grad_norm": 21.691543579101562, | |
| "learning_rate": 1.25e-05, | |
| "loss": 1.3299, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.3632075471698113, | |
| "eval_loss": 1.2896958589553833, | |
| "eval_runtime": 1.7536, | |
| "eval_samples_per_second": 120.895, | |
| "eval_steps_per_second": 15.397, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": 16.006181716918945, | |
| "learning_rate": 1.6666666666666667e-05, | |
| "loss": 1.2782, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.41509433962264153, | |
| "eval_loss": 1.242240071296692, | |
| "eval_runtime": 1.7596, | |
| "eval_samples_per_second": 120.481, | |
| "eval_steps_per_second": 15.344, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 3.125, | |
| "grad_norm": 15.849879264831543, | |
| "learning_rate": 1.9998942319271076e-05, | |
| "loss": 1.2726, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 3.75, | |
| "grad_norm": 18.681062698364258, | |
| "learning_rate": 1.9961946980917457e-05, | |
| "loss": 1.2159, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.44339622641509435, | |
| "eval_loss": 1.1955242156982422, | |
| "eval_runtime": 1.7631, | |
| "eval_samples_per_second": 120.246, | |
| "eval_steps_per_second": 15.314, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 4.375, | |
| "grad_norm": 18.359554290771484, | |
| "learning_rate": 1.9872291131173743e-05, | |
| "loss": 1.2168, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 34.79191589355469, | |
| "learning_rate": 1.973044870579824e-05, | |
| "loss": 1.1707, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.5047169811320755, | |
| "eval_loss": 1.1768745183944702, | |
| "eval_runtime": 1.7634, | |
| "eval_samples_per_second": 120.223, | |
| "eval_steps_per_second": 15.311, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 5.625, | |
| "grad_norm": 24.4814395904541, | |
| "learning_rate": 1.953716950748227e-05, | |
| "loss": 1.1314, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.4858490566037736, | |
| "eval_loss": 1.184369683265686, | |
| "eval_runtime": 1.7518, | |
| "eval_samples_per_second": 121.017, | |
| "eval_steps_per_second": 15.413, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 6.25, | |
| "grad_norm": 16.425092697143555, | |
| "learning_rate": 1.9293475242268224e-05, | |
| "loss": 1.0748, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 6.875, | |
| "grad_norm": 29.872549057006836, | |
| "learning_rate": 1.900065411864121e-05, | |
| "loss": 1.0789, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.5141509433962265, | |
| "eval_loss": 1.1869956254959106, | |
| "eval_runtime": 1.7589, | |
| "eval_samples_per_second": 120.532, | |
| "eval_steps_per_second": 15.351, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 7.5, | |
| "grad_norm": 25.275009155273438, | |
| "learning_rate": 1.866025403784439e-05, | |
| "loss": 1.0419, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.5377358490566038, | |
| "eval_loss": 1.1317145824432373, | |
| "eval_runtime": 1.7618, | |
| "eval_samples_per_second": 120.332, | |
| "eval_steps_per_second": 15.325, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 8.125, | |
| "grad_norm": 26.997371673583984, | |
| "learning_rate": 1.8274074411415104e-05, | |
| "loss": 1.0718, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 8.75, | |
| "grad_norm": 34.84120559692383, | |
| "learning_rate": 1.784415664919576e-05, | |
| "loss": 1.0222, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_accuracy": 0.49056603773584906, | |
| "eval_loss": 1.2061761617660522, | |
| "eval_runtime": 1.7546, | |
| "eval_samples_per_second": 120.825, | |
| "eval_steps_per_second": 15.388, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 9.375, | |
| "grad_norm": 25.939376831054688, | |
| "learning_rate": 1.737277336810124e-05, | |
| "loss": 0.9666, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 31.671573638916016, | |
| "learning_rate": 1.686241637868734e-05, | |
| "loss": 1.0077, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_accuracy": 0.47641509433962265, | |
| "eval_loss": 1.1582320928573608, | |
| "eval_runtime": 1.7731, | |
| "eval_samples_per_second": 119.563, | |
| "eval_steps_per_second": 15.227, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 10.625, | |
| "grad_norm": 24.509418487548828, | |
| "learning_rate": 1.6315783513024977e-05, | |
| "loss": 0.9161, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "eval_accuracy": 0.5047169811320755, | |
| "eval_loss": 1.1841322183609009, | |
| "eval_runtime": 1.7646, | |
| "eval_samples_per_second": 120.142, | |
| "eval_steps_per_second": 15.301, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 11.25, | |
| "grad_norm": 22.128129959106445, | |
| "learning_rate": 1.573576436351046e-05, | |
| "loss": 0.917, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 11.875, | |
| "grad_norm": 25.539560317993164, | |
| "learning_rate": 1.5125425007998653e-05, | |
| "loss": 0.9066, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_accuracy": 0.5235849056603774, | |
| "eval_loss": 1.1602777242660522, | |
| "eval_runtime": 1.7679, | |
| "eval_samples_per_second": 119.918, | |
| "eval_steps_per_second": 15.273, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 12.5, | |
| "grad_norm": 26.286453247070312, | |
| "learning_rate": 1.4487991802004625e-05, | |
| "loss": 0.8948, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "eval_accuracy": 0.49056603773584906, | |
| "eval_loss": 1.1681482791900635, | |
| "eval_runtime": 1.7536, | |
| "eval_samples_per_second": 120.891, | |
| "eval_steps_per_second": 15.397, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 13.125, | |
| "grad_norm": 27.811683654785156, | |
| "learning_rate": 1.3826834323650899e-05, | |
| "loss": 0.8493, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 13.75, | |
| "grad_norm": 25.57184410095215, | |
| "learning_rate": 1.3145447561516138e-05, | |
| "loss": 0.8382, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "eval_accuracy": 0.49528301886792453, | |
| "eval_loss": 1.1979925632476807, | |
| "eval_runtime": 1.7633, | |
| "eval_samples_per_second": 120.231, | |
| "eval_steps_per_second": 15.312, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 14.375, | |
| "grad_norm": 13.473695755004883, | |
| "learning_rate": 1.2447433439543239e-05, | |
| "loss": 0.8182, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "grad_norm": 39.860130310058594, | |
| "learning_rate": 1.1736481776669307e-05, | |
| "loss": 0.8279, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "eval_accuracy": 0.49528301886792453, | |
| "eval_loss": 1.2144849300384521, | |
| "eval_runtime": 1.7677, | |
| "eval_samples_per_second": 119.929, | |
| "eval_steps_per_second": 15.274, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 15.625, | |
| "grad_norm": 31.376667022705078, | |
| "learning_rate": 1.101635078182802e-05, | |
| "loss": 0.7839, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "eval_accuracy": 0.5, | |
| "eval_loss": 1.21422278881073, | |
| "eval_runtime": 1.7592, | |
| "eval_samples_per_second": 120.508, | |
| "eval_steps_per_second": 15.348, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 16.25, | |
| "grad_norm": 50.575355529785156, | |
| "learning_rate": 1.0290847187431115e-05, | |
| "loss": 0.8003, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 16.875, | |
| "grad_norm": 23.401227951049805, | |
| "learning_rate": 9.563806126346643e-06, | |
| "loss": 0.7797, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 17.0, | |
| "eval_accuracy": 0.5, | |
| "eval_loss": 1.228758454322815, | |
| "eval_runtime": 1.7545, | |
| "eval_samples_per_second": 120.83, | |
| "eval_steps_per_second": 15.389, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 17.5, | |
| "grad_norm": 27.069194793701172, | |
| "learning_rate": 8.839070858747697e-06, | |
| "loss": 0.7794, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 18.0, | |
| "eval_accuracy": 0.5, | |
| "eval_loss": 1.2374235391616821, | |
| "eval_runtime": 1.7476, | |
| "eval_samples_per_second": 121.308, | |
| "eval_steps_per_second": 15.45, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 18.125, | |
| "grad_norm": 33.219261169433594, | |
| "learning_rate": 8.120472455998882e-06, | |
| "loss": 0.7616, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 18.75, | |
| "grad_norm": 18.872343063354492, | |
| "learning_rate": 7.411809548974792e-06, | |
| "loss": 0.7447, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 19.0, | |
| "eval_accuracy": 0.5047169811320755, | |
| "eval_loss": 1.207104206085205, | |
| "eval_runtime": 1.7675, | |
| "eval_samples_per_second": 119.94, | |
| "eval_steps_per_second": 15.275, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 19.375, | |
| "grad_norm": 25.904451370239258, | |
| "learning_rate": 6.716828247864391e-06, | |
| "loss": 0.7465, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "grad_norm": 12.064042091369629, | |
| "learning_rate": 6.039202339608432e-06, | |
| "loss": 0.7101, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "eval_accuracy": 0.5141509433962265, | |
| "eval_loss": 1.1943110227584839, | |
| "eval_runtime": 1.7594, | |
| "eval_samples_per_second": 120.498, | |
| "eval_steps_per_second": 15.346, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 20.625, | |
| "grad_norm": 35.04800796508789, | |
| "learning_rate": 5.382513867649663e-06, | |
| "loss": 0.7229, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 21.0, | |
| "eval_accuracy": 0.5141509433962265, | |
| "eval_loss": 1.1929620504379272, | |
| "eval_runtime": 1.7687, | |
| "eval_samples_per_second": 119.86, | |
| "eval_steps_per_second": 15.265, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 21.25, | |
| "grad_norm": 8.097602844238281, | |
| "learning_rate": 4.7502341966544e-06, | |
| "loss": 0.6702, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 21.875, | |
| "grad_norm": 33.80415725708008, | |
| "learning_rate": 4.1457056623005954e-06, | |
| "loss": 0.6906, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 22.0, | |
| "eval_accuracy": 0.5188679245283019, | |
| "eval_loss": 1.2124418020248413, | |
| "eval_runtime": 1.7626, | |
| "eval_samples_per_second": 120.276, | |
| "eval_steps_per_second": 15.318, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 22.5, | |
| "grad_norm": 36.851356506347656, | |
| "learning_rate": 3.5721239031346067e-06, | |
| "loss": 0.6743, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 23.0, | |
| "eval_accuracy": 0.5283018867924528, | |
| "eval_loss": 1.2044041156768799, | |
| "eval_runtime": 1.7597, | |
| "eval_samples_per_second": 120.478, | |
| "eval_steps_per_second": 15.344, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 23.125, | |
| "grad_norm": 25.65793228149414, | |
| "learning_rate": 3.032520967893453e-06, | |
| "loss": 0.7163, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 23.75, | |
| "grad_norm": 29.638307571411133, | |
| "learning_rate": 2.529749287590042e-06, | |
| "loss": 0.7181, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 24.0, | |
| "eval_accuracy": 0.5330188679245284, | |
| "eval_loss": 1.2020004987716675, | |
| "eval_runtime": 1.755, | |
| "eval_samples_per_second": 120.8, | |
| "eval_steps_per_second": 15.385, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 24.375, | |
| "grad_norm": 30.933732986450195, | |
| "learning_rate": 2.0664665970876496e-06, | |
| "loss": 0.6549, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 25.0, | |
| "grad_norm": 46.00556564331055, | |
| "learning_rate": 1.6451218858706374e-06, | |
| "loss": 0.7323, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 25.0, | |
| "eval_accuracy": 0.5235849056603774, | |
| "eval_loss": 1.2088254690170288, | |
| "eval_runtime": 1.762, | |
| "eval_samples_per_second": 120.319, | |
| "eval_steps_per_second": 15.324, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 25.625, | |
| "grad_norm": 14.342933654785156, | |
| "learning_rate": 1.2679424522780426e-06, | |
| "loss": 0.6597, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 26.0, | |
| "eval_accuracy": 0.5188679245283019, | |
| "eval_loss": 1.2148332595825195, | |
| "eval_runtime": 1.7676, | |
| "eval_samples_per_second": 119.936, | |
| "eval_steps_per_second": 15.275, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 26.25, | |
| "grad_norm": 26.21175765991211, | |
| "learning_rate": 9.369221296335007e-07, | |
| "loss": 0.6757, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 26.875, | |
| "grad_norm": 5.322547435760498, | |
| "learning_rate": 6.538107465101162e-07, | |
| "loss": 0.6476, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 27.0, | |
| "eval_accuracy": 0.5188679245283019, | |
| "eval_loss": 1.214443564414978, | |
| "eval_runtime": 1.7533, | |
| "eval_samples_per_second": 120.914, | |
| "eval_steps_per_second": 15.399, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 27.5, | |
| "grad_norm": 26.5172061920166, | |
| "learning_rate": 4.2010487684511105e-07, | |
| "loss": 0.6792, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 28.0, | |
| "eval_accuracy": 0.5141509433962265, | |
| "eval_loss": 1.2132720947265625, | |
| "eval_runtime": 1.7682, | |
| "eval_samples_per_second": 119.897, | |
| "eval_steps_per_second": 15.27, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 28.125, | |
| "grad_norm": 11.484118461608887, | |
| "learning_rate": 2.370399288006664e-07, | |
| "loss": 0.6336, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 28.75, | |
| "grad_norm": 43.180728912353516, | |
| "learning_rate": 1.055836141905553e-07, | |
| "loss": 0.6455, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 29.0, | |
| "eval_accuracy": 0.5283018867924528, | |
| "eval_loss": 1.2133941650390625, | |
| "eval_runtime": 1.7584, | |
| "eval_samples_per_second": 120.565, | |
| "eval_steps_per_second": 15.355, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 29.375, | |
| "grad_norm": 18.96051788330078, | |
| "learning_rate": 2.643083299427751e-08, | |
| "loss": 0.6814, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 30.0, | |
| "grad_norm": 22.531360626220703, | |
| "learning_rate": 0.0, | |
| "loss": 0.6719, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 30.0, | |
| "eval_accuracy": 0.5283018867924528, | |
| "eval_loss": 1.2133402824401855, | |
| "eval_runtime": 3.0896, | |
| "eval_samples_per_second": 68.618, | |
| "eval_steps_per_second": 8.739, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 30.0, | |
| "step": 2400, | |
| "total_flos": 1.5090549662800282e+18, | |
| "train_loss": 0.8874085402488708, | |
| "train_runtime": 581.5804, | |
| "train_samples_per_second": 32.807, | |
| "train_steps_per_second": 4.127 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 2400, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 30, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.5090549662800282e+18, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |