| { |
| "best_global_step": 24806, |
| "best_metric": 0.13123206794261932, |
| "best_model_checkpoint": "Model1-v1-Rival/checkpoint-24806", |
| "epoch": 2.0, |
| "eval_steps": 500, |
| "global_step": 24806, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.04031282754172378, |
| "grad_norm": 35.80685043334961, |
| "learning_rate": 2.011285771866183e-07, |
| "loss": 0.7492, |
| "step": 500 |
| }, |
| { |
| "epoch": 0.08062565508344756, |
| "grad_norm": 27.830657958984375, |
| "learning_rate": 4.0266021765417167e-07, |
| "loss": 0.564, |
| "step": 1000 |
| }, |
| { |
| "epoch": 0.12093848262517133, |
| "grad_norm": 22.993253707885742, |
| "learning_rate": 6.041918581217252e-07, |
| "loss": 0.497, |
| "step": 1500 |
| }, |
| { |
| "epoch": 0.16125131016689512, |
| "grad_norm": 31.184377670288086, |
| "learning_rate": 8.057234985892785e-07, |
| "loss": 0.4362, |
| "step": 2000 |
| }, |
| { |
| "epoch": 0.2015641377086189, |
| "grad_norm": 31.91282844543457, |
| "learning_rate": 9.991937290033595e-07, |
| "loss": 0.41, |
| "step": 2500 |
| }, |
| { |
| "epoch": 0.24187696525034266, |
| "grad_norm": 42.18364715576172, |
| "learning_rate": 9.767973124300112e-07, |
| "loss": 0.3815, |
| "step": 3000 |
| }, |
| { |
| "epoch": 0.28218979279206646, |
| "grad_norm": 31.624061584472656, |
| "learning_rate": 9.544008958566627e-07, |
| "loss": 0.3568, |
| "step": 3500 |
| }, |
| { |
| "epoch": 0.32250262033379024, |
| "grad_norm": 36.55485534667969, |
| "learning_rate": 9.320044792833146e-07, |
| "loss": 0.341, |
| "step": 4000 |
| }, |
| { |
| "epoch": 0.362815447875514, |
| "grad_norm": 34.942264556884766, |
| "learning_rate": 9.096080627099664e-07, |
| "loss": 0.3293, |
| "step": 4500 |
| }, |
| { |
| "epoch": 0.4031282754172378, |
| "grad_norm": 30.162443161010742, |
| "learning_rate": 8.872116461366181e-07, |
| "loss": 0.3184, |
| "step": 5000 |
| }, |
| { |
| "epoch": 0.44344110295896155, |
| "grad_norm": 42.151607513427734, |
| "learning_rate": 8.648152295632698e-07, |
| "loss": 0.3149, |
| "step": 5500 |
| }, |
| { |
| "epoch": 0.4837539305006853, |
| "grad_norm": 21.629465103149414, |
| "learning_rate": 8.424188129899216e-07, |
| "loss": 0.2969, |
| "step": 6000 |
| }, |
| { |
| "epoch": 0.5240667580424091, |
| "grad_norm": 45.252803802490234, |
| "learning_rate": 8.200223964165734e-07, |
| "loss": 0.2962, |
| "step": 6500 |
| }, |
| { |
| "epoch": 0.5643795855841329, |
| "grad_norm": 31.193838119506836, |
| "learning_rate": 7.97625979843225e-07, |
| "loss": 0.2858, |
| "step": 7000 |
| }, |
| { |
| "epoch": 0.6046924131258566, |
| "grad_norm": 96.9237289428711, |
| "learning_rate": 7.752295632698768e-07, |
| "loss": 0.268, |
| "step": 7500 |
| }, |
| { |
| "epoch": 0.6450052406675805, |
| "grad_norm": 21.470144271850586, |
| "learning_rate": 7.528331466965285e-07, |
| "loss": 0.2622, |
| "step": 8000 |
| }, |
| { |
| "epoch": 0.6853180682093042, |
| "grad_norm": 35.70467758178711, |
| "learning_rate": 7.304367301231803e-07, |
| "loss": 0.2741, |
| "step": 8500 |
| }, |
| { |
| "epoch": 0.725630895751028, |
| "grad_norm": 19.73318862915039, |
| "learning_rate": 7.080403135498319e-07, |
| "loss": 0.2626, |
| "step": 9000 |
| }, |
| { |
| "epoch": 0.7659437232927517, |
| "grad_norm": 23.529918670654297, |
| "learning_rate": 6.856438969764838e-07, |
| "loss": 0.2518, |
| "step": 9500 |
| }, |
| { |
| "epoch": 0.8062565508344756, |
| "grad_norm": 41.51838302612305, |
| "learning_rate": 6.632474804031355e-07, |
| "loss": 0.2495, |
| "step": 10000 |
| }, |
| { |
| "epoch": 0.8465693783761993, |
| "grad_norm": 14.217453956604004, |
| "learning_rate": 6.408510638297872e-07, |
| "loss": 0.2437, |
| "step": 10500 |
| }, |
| { |
| "epoch": 0.8868822059179231, |
| "grad_norm": 18.972307205200195, |
| "learning_rate": 6.184546472564389e-07, |
| "loss": 0.2482, |
| "step": 11000 |
| }, |
| { |
| "epoch": 0.9271950334596468, |
| "grad_norm": 47.28855514526367, |
| "learning_rate": 5.960582306830907e-07, |
| "loss": 0.2419, |
| "step": 11500 |
| }, |
| { |
| "epoch": 0.9675078610013707, |
| "grad_norm": 48.309165954589844, |
| "learning_rate": 5.736618141097424e-07, |
| "loss": 0.2421, |
| "step": 12000 |
| }, |
| { |
| "epoch": 1.0, |
| "eval_accuracy": 0.9340860036526648, |
| "eval_loss": 0.17090356349945068, |
| "eval_model_preparation_time": 0.0026, |
| "eval_runtime": 109.2506, |
| "eval_samples_per_second": 441.087, |
| "eval_steps_per_second": 55.139, |
| "step": 12403 |
| }, |
| { |
| "epoch": 1.0078206885430945, |
| "grad_norm": 47.087913513183594, |
| "learning_rate": 5.512653975363942e-07, |
| "loss": 0.2372, |
| "step": 12500 |
| }, |
| { |
| "epoch": 1.0481335160848182, |
| "grad_norm": 40.675697326660156, |
| "learning_rate": 5.288689809630459e-07, |
| "loss": 0.2249, |
| "step": 13000 |
| }, |
| { |
| "epoch": 1.088446343626542, |
| "grad_norm": 46.77396774291992, |
| "learning_rate": 5.064725643896977e-07, |
| "loss": 0.2207, |
| "step": 13500 |
| }, |
| { |
| "epoch": 1.1287591711682659, |
| "grad_norm": 69.7470703125, |
| "learning_rate": 4.840761478163493e-07, |
| "loss": 0.2276, |
| "step": 14000 |
| }, |
| { |
| "epoch": 1.1690719987099896, |
| "grad_norm": 35.32638168334961, |
| "learning_rate": 4.6167973124300107e-07, |
| "loss": 0.2235, |
| "step": 14500 |
| }, |
| { |
| "epoch": 1.2093848262517133, |
| "grad_norm": 63.080291748046875, |
| "learning_rate": 4.3928331466965287e-07, |
| "loss": 0.2219, |
| "step": 15000 |
| }, |
| { |
| "epoch": 1.249697653793437, |
| "grad_norm": 45.029239654541016, |
| "learning_rate": 4.1688689809630457e-07, |
| "loss": 0.2184, |
| "step": 15500 |
| }, |
| { |
| "epoch": 1.2900104813351607, |
| "grad_norm": 20.416336059570312, |
| "learning_rate": 3.944904815229563e-07, |
| "loss": 0.2081, |
| "step": 16000 |
| }, |
| { |
| "epoch": 1.3303233088768847, |
| "grad_norm": 28.367952346801758, |
| "learning_rate": 3.72094064949608e-07, |
| "loss": 0.2157, |
| "step": 16500 |
| }, |
| { |
| "epoch": 1.3706361364186084, |
| "grad_norm": 13.548885345458984, |
| "learning_rate": 3.4969764837625977e-07, |
| "loss": 0.212, |
| "step": 17000 |
| }, |
| { |
| "epoch": 1.4109489639603323, |
| "grad_norm": 94.12673950195312, |
| "learning_rate": 3.273012318029115e-07, |
| "loss": 0.2075, |
| "step": 17500 |
| }, |
| { |
| "epoch": 1.451261791502056, |
| "grad_norm": 38.12338638305664, |
| "learning_rate": 3.049048152295632e-07, |
| "loss": 0.2148, |
| "step": 18000 |
| }, |
| { |
| "epoch": 1.4915746190437797, |
| "grad_norm": 45.156455993652344, |
| "learning_rate": 2.82508398656215e-07, |
| "loss": 0.2075, |
| "step": 18500 |
| }, |
| { |
| "epoch": 1.5318874465855035, |
| "grad_norm": 18.083568572998047, |
| "learning_rate": 2.601119820828667e-07, |
| "loss": 0.2092, |
| "step": 19000 |
| }, |
| { |
| "epoch": 1.5722002741272272, |
| "grad_norm": 27.93409538269043, |
| "learning_rate": 2.3771556550951847e-07, |
| "loss": 0.205, |
| "step": 19500 |
| }, |
| { |
| "epoch": 1.612513101668951, |
| "grad_norm": 39.768192291259766, |
| "learning_rate": 2.153191489361702e-07, |
| "loss": 0.2039, |
| "step": 20000 |
| }, |
| { |
| "epoch": 1.6528259292106748, |
| "grad_norm": 28.78324317932129, |
| "learning_rate": 1.9292273236282195e-07, |
| "loss": 0.204, |
| "step": 20500 |
| }, |
| { |
| "epoch": 1.6931387567523988, |
| "grad_norm": 94.95115661621094, |
| "learning_rate": 1.7052631578947368e-07, |
| "loss": 0.2154, |
| "step": 21000 |
| }, |
| { |
| "epoch": 1.7334515842941225, |
| "grad_norm": 51.76600646972656, |
| "learning_rate": 1.4812989921612543e-07, |
| "loss": 0.208, |
| "step": 21500 |
| }, |
| { |
| "epoch": 1.7737644118358462, |
| "grad_norm": 24.089155197143555, |
| "learning_rate": 1.2573348264277713e-07, |
| "loss": 0.2071, |
| "step": 22000 |
| }, |
| { |
| "epoch": 1.81407723937757, |
| "grad_norm": 46.96974182128906, |
| "learning_rate": 1.0333706606942888e-07, |
| "loss": 0.198, |
| "step": 22500 |
| }, |
| { |
| "epoch": 1.8543900669192936, |
| "grad_norm": 66.37223815917969, |
| "learning_rate": 8.094064949608062e-08, |
| "loss": 0.2015, |
| "step": 23000 |
| }, |
| { |
| "epoch": 1.8947028944610174, |
| "grad_norm": 42.39664077758789, |
| "learning_rate": 5.854423292273236e-08, |
| "loss": 0.1982, |
| "step": 23500 |
| }, |
| { |
| "epoch": 1.9350157220027413, |
| "grad_norm": 42.68307113647461, |
| "learning_rate": 3.61478163493841e-08, |
| "loss": 0.1958, |
| "step": 24000 |
| }, |
| { |
| "epoch": 1.975328549544465, |
| "grad_norm": 17.30171012878418, |
| "learning_rate": 1.3751399776035833e-08, |
| "loss": 0.2024, |
| "step": 24500 |
| }, |
| { |
| "epoch": 2.0, |
| "eval_accuracy": 0.9520795284741823, |
| "eval_loss": 0.13123206794261932, |
| "eval_model_preparation_time": 0.0026, |
| "eval_runtime": 100.8699, |
| "eval_samples_per_second": 477.734, |
| "eval_steps_per_second": 59.72, |
| "step": 24806 |
| } |
| ], |
| "logging_steps": 500, |
| "max_steps": 24806, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 2, |
| "save_steps": 500, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 6.6484442538990895e+19, |
| "train_batch_size": 32, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|