| { | |
| "best_metric": 0.5094339622641509, | |
| "best_model_checkpoint": "./Validated_Balanced_Raw_Data_model_vit_outputs/checkpoint-1440", | |
| "epoch": 25.0, | |
| "eval_steps": 500, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.625, | |
| "grad_norm": 10.029894828796387, | |
| "learning_rate": 1.47e-05, | |
| "loss": 1.4452, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.24056603773584906, | |
| "eval_loss": 1.4536809921264648, | |
| "eval_runtime": 1.1349, | |
| "eval_samples_per_second": 186.798, | |
| "eval_steps_per_second": 23.79, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "grad_norm": 13.045843124389648, | |
| "learning_rate": 2.97e-05, | |
| "loss": 1.4021, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.875, | |
| "grad_norm": 9.291085243225098, | |
| "learning_rate": 2.9950795096316707e-05, | |
| "loss": 1.3534, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.35377358490566035, | |
| "eval_loss": 1.4003523588180542, | |
| "eval_runtime": 1.1414, | |
| "eval_samples_per_second": 185.736, | |
| "eval_steps_per_second": 23.655, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": 8.364015579223633, | |
| "learning_rate": 2.979948160693239e-05, | |
| "loss": 1.2977, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.37735849056603776, | |
| "eval_loss": 1.3416857719421387, | |
| "eval_runtime": 1.1295, | |
| "eval_samples_per_second": 187.7, | |
| "eval_steps_per_second": 23.905, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 3.125, | |
| "grad_norm": 10.082795143127441, | |
| "learning_rate": 2.9547072651695803e-05, | |
| "loss": 1.2979, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 3.75, | |
| "grad_norm": 9.592968940734863, | |
| "learning_rate": 2.9195292439710517e-05, | |
| "loss": 1.2604, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.37735849056603776, | |
| "eval_loss": 1.3131976127624512, | |
| "eval_runtime": 1.1222, | |
| "eval_samples_per_second": 188.91, | |
| "eval_steps_per_second": 24.059, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 4.375, | |
| "grad_norm": 9.461295127868652, | |
| "learning_rate": 2.874654398652483e-05, | |
| "loss": 1.2509, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 20.441808700561523, | |
| "learning_rate": 2.8203892699098956e-05, | |
| "loss": 1.2428, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.4009433962264151, | |
| "eval_loss": 1.2442667484283447, | |
| "eval_runtime": 1.1294, | |
| "eval_samples_per_second": 187.712, | |
| "eval_steps_per_second": 23.907, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 5.625, | |
| "grad_norm": 7.7924604415893555, | |
| "learning_rate": 2.757104543596717e-05, | |
| "loss": 1.213, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.419811320754717, | |
| "eval_loss": 1.2147694826126099, | |
| "eval_runtime": 1.1426, | |
| "eval_samples_per_second": 185.546, | |
| "eval_steps_per_second": 23.631, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 6.25, | |
| "grad_norm": 9.47819709777832, | |
| "learning_rate": 2.6852325185635357e-05, | |
| "loss": 1.2163, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 6.875, | |
| "grad_norm": 9.0564546585083, | |
| "learning_rate": 2.605264153618602e-05, | |
| "loss": 1.1426, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.4669811320754717, | |
| "eval_loss": 1.2095811367034912, | |
| "eval_runtime": 1.125, | |
| "eval_samples_per_second": 188.45, | |
| "eval_steps_per_second": 24.001, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 7.5, | |
| "grad_norm": 6.277278900146484, | |
| "learning_rate": 2.517745713781345e-05, | |
| "loss": 1.1657, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.4669811320754717, | |
| "eval_loss": 1.2065520286560059, | |
| "eval_runtime": 1.1354, | |
| "eval_samples_per_second": 186.725, | |
| "eval_steps_per_second": 23.781, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 8.125, | |
| "grad_norm": 9.455373764038086, | |
| "learning_rate": 2.4252284842435667e-05, | |
| "loss": 1.1988, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 8.75, | |
| "grad_norm": 9.699421882629395, | |
| "learning_rate": 2.3245704321242494e-05, | |
| "loss": 1.1249, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_accuracy": 0.4386792452830189, | |
| "eval_loss": 1.220900297164917, | |
| "eval_runtime": 1.1295, | |
| "eval_samples_per_second": 187.686, | |
| "eval_steps_per_second": 23.903, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 9.375, | |
| "grad_norm": 10.024123191833496, | |
| "learning_rate": 2.218279727850104e-05, | |
| "loss": 1.0891, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 14.003544807434082, | |
| "learning_rate": 2.107082444708681e-05, | |
| "loss": 1.1622, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_accuracy": 0.4811320754716981, | |
| "eval_loss": 1.1445928812026978, | |
| "eval_runtime": 1.1414, | |
| "eval_samples_per_second": 185.734, | |
| "eval_steps_per_second": 23.655, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 10.625, | |
| "grad_norm": 10.41913890838623, | |
| "learning_rate": 1.9917381728963962e-05, | |
| "loss": 1.0625, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "eval_accuracy": 0.4669811320754717, | |
| "eval_loss": 1.1742380857467651, | |
| "eval_runtime": 1.1293, | |
| "eval_samples_per_second": 187.73, | |
| "eval_steps_per_second": 23.909, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 11.25, | |
| "grad_norm": 9.851762771606445, | |
| "learning_rate": 1.8730348307472828e-05, | |
| "loss": 1.043, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 11.875, | |
| "grad_norm": 8.781569480895996, | |
| "learning_rate": 1.7517832824518216e-05, | |
| "loss": 1.1157, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_accuracy": 0.44339622641509435, | |
| "eval_loss": 1.2199500799179077, | |
| "eval_runtime": 1.1331, | |
| "eval_samples_per_second": 187.094, | |
| "eval_steps_per_second": 23.828, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 12.5, | |
| "grad_norm": 10.422264099121094, | |
| "learning_rate": 1.628811799032326e-05, | |
| "loss": 1.0807, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "eval_accuracy": 0.4669811320754717, | |
| "eval_loss": 1.2117412090301514, | |
| "eval_runtime": 1.1321, | |
| "eval_samples_per_second": 187.259, | |
| "eval_steps_per_second": 23.849, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 13.125, | |
| "grad_norm": 8.4778470993042, | |
| "learning_rate": 1.5049604004119928e-05, | |
| "loss": 1.0891, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 13.75, | |
| "grad_norm": 11.677724838256836, | |
| "learning_rate": 1.381075117227066e-05, | |
| "loss": 1.0629, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "eval_accuracy": 0.4811320754716981, | |
| "eval_loss": 1.2296439409255981, | |
| "eval_runtime": 1.1234, | |
| "eval_samples_per_second": 188.721, | |
| "eval_steps_per_second": 24.035, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 14.375, | |
| "grad_norm": 11.892010688781738, | |
| "learning_rate": 1.2580022115797237e-05, | |
| "loss": 1.0673, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "grad_norm": 16.539581298828125, | |
| "learning_rate": 1.1365823962098208e-05, | |
| "loss": 1.0323, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "eval_accuracy": 0.49056603773584906, | |
| "eval_loss": 1.1886956691741943, | |
| "eval_runtime": 1.1303, | |
| "eval_samples_per_second": 187.563, | |
| "eval_steps_per_second": 23.888, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 15.625, | |
| "grad_norm": 6.841868877410889, | |
| "learning_rate": 1.0176450915744072e-05, | |
| "loss": 1.0128, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "eval_accuracy": 0.49528301886792453, | |
| "eval_loss": 1.2074586153030396, | |
| "eval_runtime": 1.1321, | |
| "eval_samples_per_second": 187.267, | |
| "eval_steps_per_second": 23.85, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 16.25, | |
| "grad_norm": 9.490548133850098, | |
| "learning_rate": 9.020027600649825e-06, | |
| "loss": 0.9715, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 16.875, | |
| "grad_norm": 9.569696426391602, | |
| "learning_rate": 7.904453560655389e-06, | |
| "loss": 1.0266, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 17.0, | |
| "eval_accuracy": 0.5, | |
| "eval_loss": 1.2082122564315796, | |
| "eval_runtime": 1.1449, | |
| "eval_samples_per_second": 185.172, | |
| "eval_steps_per_second": 23.583, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 17.5, | |
| "grad_norm": 9.353178977966309, | |
| "learning_rate": 6.837349297631114e-06, | |
| "loss": 1.004, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 18.0, | |
| "eval_accuracy": 0.5094339622641509, | |
| "eval_loss": 1.2154083251953125, | |
| "eval_runtime": 1.1238, | |
| "eval_samples_per_second": 188.649, | |
| "eval_steps_per_second": 24.026, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 18.125, | |
| "grad_norm": 9.470197677612305, | |
| "learning_rate": 5.826004215722937e-06, | |
| "loss": 1.002, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 18.75, | |
| "grad_norm": 6.417470932006836, | |
| "learning_rate": 4.87732682733072e-06, | |
| "loss": 0.9543, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 19.0, | |
| "eval_accuracy": 0.5047169811320755, | |
| "eval_loss": 1.2047548294067383, | |
| "eval_runtime": 1.1355, | |
| "eval_samples_per_second": 186.702, | |
| "eval_steps_per_second": 23.778, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 19.375, | |
| "grad_norm": 8.318925857543945, | |
| "learning_rate": 3.997797560963404e-06, | |
| "loss": 1.0218, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "grad_norm": 8.360044479370117, | |
| "learning_rate": 3.193424493341213e-06, | |
| "loss": 0.9439, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "eval_accuracy": 0.49056603773584906, | |
| "eval_loss": 1.2217515707015991, | |
| "eval_runtime": 1.1305, | |
| "eval_samples_per_second": 187.525, | |
| "eval_steps_per_second": 23.883, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 20.625, | |
| "grad_norm": 12.746959686279297, | |
| "learning_rate": 2.4697023081395654e-06, | |
| "loss": 0.9891, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 21.0, | |
| "eval_accuracy": 0.49056603773584906, | |
| "eval_loss": 1.2135605812072754, | |
| "eval_runtime": 1.1547, | |
| "eval_samples_per_second": 183.601, | |
| "eval_steps_per_second": 23.383, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 21.25, | |
| "grad_norm": 7.136344909667969, | |
| "learning_rate": 1.8315747617280382e-06, | |
| "loss": 0.9586, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 21.875, | |
| "grad_norm": 8.792285919189453, | |
| "learning_rate": 1.283400912301434e-06, | |
| "loss": 0.9801, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 22.0, | |
| "eval_accuracy": 0.4858490566037736, | |
| "eval_loss": 1.216606855392456, | |
| "eval_runtime": 1.133, | |
| "eval_samples_per_second": 187.117, | |
| "eval_steps_per_second": 23.831, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 22.5, | |
| "grad_norm": 8.172861099243164, | |
| "learning_rate": 8.289253430923128e-07, | |
| "loss": 0.9632, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 23.0, | |
| "eval_accuracy": 0.49056603773584906, | |
| "eval_loss": 1.2148730754852295, | |
| "eval_runtime": 1.1383, | |
| "eval_samples_per_second": 186.244, | |
| "eval_steps_per_second": 23.72, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 23.125, | |
| "grad_norm": 8.669483184814453, | |
| "learning_rate": 4.7125258307053385e-07, | |
| "loss": 0.9956, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 23.75, | |
| "grad_norm": 8.101666450500488, | |
| "learning_rate": 2.1282589986245494e-07, | |
| "loss": 0.9584, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 24.0, | |
| "eval_accuracy": 0.49056603773584906, | |
| "eval_loss": 1.213520884513855, | |
| "eval_runtime": 1.1315, | |
| "eval_samples_per_second": 187.37, | |
| "eval_steps_per_second": 23.863, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 24.375, | |
| "grad_norm": 25.165281295776367, | |
| "learning_rate": 5.5410609755597306e-08, | |
| "loss": 0.948, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 25.0, | |
| "grad_norm": 10.21505069732666, | |
| "learning_rate": 8.201879839297988e-11, | |
| "loss": 0.9561, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 25.0, | |
| "eval_accuracy": 0.49056603773584906, | |
| "eval_loss": 1.2136294841766357, | |
| "eval_runtime": 2.3217, | |
| "eval_samples_per_second": 91.313, | |
| "eval_steps_per_second": 11.63, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 25.0, | |
| "step": 2000, | |
| "total_flos": 1.2321447211229184e+18, | |
| "train_loss": 1.1025613689422606, | |
| "train_runtime": 425.612, | |
| "train_samples_per_second": 37.358, | |
| "train_steps_per_second": 4.699 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 2000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 25, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.2321447211229184e+18, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |