| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 4.0, | |
| "global_step": 3104, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.00019677835051546392, | |
| "loss": 0.9382, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.00019355670103092784, | |
| "loss": 0.4059, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.00019033505154639175, | |
| "loss": 0.3835, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.0001871134020618557, | |
| "loss": 0.3911, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.0001838917525773196, | |
| "loss": 0.3482, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 0.0001806701030927835, | |
| "loss": 0.3409, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 0.00017744845360824743, | |
| "loss": 0.362, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 0.00017422680412371134, | |
| "loss": 0.2979, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 0.00017100515463917525, | |
| "loss": 0.2824, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 0.0001677835051546392, | |
| "loss": 0.2422, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "eval_loss": 0.260538786649704, | |
| "eval_runtime": 181.4734, | |
| "eval_samples_per_second": 8.299, | |
| "eval_steps_per_second": 1.041, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 0.0001645618556701031, | |
| "loss": 0.2609, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 0.00016134020618556702, | |
| "loss": 0.2699, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 0.00015811855670103093, | |
| "loss": 0.2714, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 0.00015489690721649487, | |
| "loss": 0.3153, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 0.00015167525773195875, | |
| "loss": 0.2316, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "learning_rate": 0.0001484536082474227, | |
| "loss": 0.1855, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "learning_rate": 0.0001452319587628866, | |
| "loss": 0.1853, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "learning_rate": 0.00014201030927835052, | |
| "loss": 0.194, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "learning_rate": 0.00013878865979381443, | |
| "loss": 0.2174, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "learning_rate": 0.00013556701030927837, | |
| "loss": 0.2246, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "eval_loss": 0.1958554983139038, | |
| "eval_runtime": 180.3002, | |
| "eval_samples_per_second": 8.353, | |
| "eval_steps_per_second": 1.048, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.35, | |
| "learning_rate": 0.00013234536082474226, | |
| "loss": 0.1895, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.42, | |
| "learning_rate": 0.0001291237113402062, | |
| "loss": 0.1964, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "learning_rate": 0.0001259020618556701, | |
| "loss": 0.1666, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.55, | |
| "learning_rate": 0.00012268041237113402, | |
| "loss": 0.2008, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.61, | |
| "learning_rate": 0.00011945876288659794, | |
| "loss": 0.1952, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "learning_rate": 0.00011623711340206188, | |
| "loss": 0.2173, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.74, | |
| "learning_rate": 0.00011301546391752577, | |
| "loss": 0.1722, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "learning_rate": 0.0001097938144329897, | |
| "loss": 0.1882, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.87, | |
| "learning_rate": 0.00010657216494845361, | |
| "loss": 0.2224, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 1.93, | |
| "learning_rate": 0.00010335051546391754, | |
| "loss": 0.1697, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.93, | |
| "eval_loss": 0.28020888566970825, | |
| "eval_runtime": 182.4392, | |
| "eval_samples_per_second": 8.255, | |
| "eval_steps_per_second": 1.036, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "learning_rate": 0.00010012886597938145, | |
| "loss": 0.1388, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.06, | |
| "learning_rate": 9.690721649484537e-05, | |
| "loss": 0.1353, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.13, | |
| "learning_rate": 9.368556701030928e-05, | |
| "loss": 0.0905, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.19, | |
| "learning_rate": 9.04639175257732e-05, | |
| "loss": 0.1694, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.26, | |
| "learning_rate": 8.724226804123712e-05, | |
| "loss": 0.1674, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.32, | |
| "learning_rate": 8.402061855670103e-05, | |
| "loss": 0.1464, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.38, | |
| "learning_rate": 8.079896907216496e-05, | |
| "loss": 0.1125, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.45, | |
| "learning_rate": 7.757731958762887e-05, | |
| "loss": 0.1548, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.51, | |
| "learning_rate": 7.43556701030928e-05, | |
| "loss": 0.0984, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 2.58, | |
| "learning_rate": 7.113402061855671e-05, | |
| "loss": 0.1224, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 2.58, | |
| "eval_loss": 0.1549469381570816, | |
| "eval_runtime": 183.076, | |
| "eval_samples_per_second": 8.226, | |
| "eval_steps_per_second": 1.032, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 2.64, | |
| "learning_rate": 6.791237113402062e-05, | |
| "loss": 0.0856, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 2.71, | |
| "learning_rate": 6.469072164948455e-05, | |
| "loss": 0.1656, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 2.77, | |
| "learning_rate": 6.146907216494846e-05, | |
| "loss": 0.1391, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 2.84, | |
| "learning_rate": 5.824742268041238e-05, | |
| "loss": 0.1165, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 2.9, | |
| "learning_rate": 5.50257731958763e-05, | |
| "loss": 0.1023, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 2.96, | |
| "learning_rate": 5.180412371134021e-05, | |
| "loss": 0.1263, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.03, | |
| "learning_rate": 4.8582474226804124e-05, | |
| "loss": 0.0729, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 3.09, | |
| "learning_rate": 4.536082474226804e-05, | |
| "loss": 0.1125, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.16, | |
| "learning_rate": 4.213917525773196e-05, | |
| "loss": 0.13, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 3.22, | |
| "learning_rate": 3.8917525773195875e-05, | |
| "loss": 0.1385, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.22, | |
| "eval_loss": 0.14271119236946106, | |
| "eval_runtime": 181.9727, | |
| "eval_samples_per_second": 8.276, | |
| "eval_steps_per_second": 1.039, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.29, | |
| "learning_rate": 3.5695876288659795e-05, | |
| "loss": 0.0899, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 3.35, | |
| "learning_rate": 3.2474226804123714e-05, | |
| "loss": 0.0811, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 3.41, | |
| "learning_rate": 2.925257731958763e-05, | |
| "loss": 0.0897, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 3.48, | |
| "learning_rate": 2.603092783505155e-05, | |
| "loss": 0.0789, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 3.54, | |
| "learning_rate": 2.2809278350515466e-05, | |
| "loss": 0.0745, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 3.61, | |
| "learning_rate": 1.9587628865979382e-05, | |
| "loss": 0.0523, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 3.67, | |
| "learning_rate": 1.6365979381443298e-05, | |
| "loss": 0.0735, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 3.74, | |
| "learning_rate": 1.3144329896907218e-05, | |
| "loss": 0.0909, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 3.8, | |
| "learning_rate": 9.922680412371134e-06, | |
| "loss": 0.064, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 3.87, | |
| "learning_rate": 6.701030927835052e-06, | |
| "loss": 0.0589, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 3.87, | |
| "eval_loss": 0.13845542073249817, | |
| "eval_runtime": 183.5137, | |
| "eval_samples_per_second": 8.206, | |
| "eval_steps_per_second": 1.03, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 3.93, | |
| "learning_rate": 3.4793814432989694e-06, | |
| "loss": 0.0887, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 3.99, | |
| "learning_rate": 2.577319587628866e-07, | |
| "loss": 0.0577, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "step": 3104, | |
| "total_flos": 3.845910334361518e+18, | |
| "train_loss": 0.188343092505148, | |
| "train_runtime": 9039.9113, | |
| "train_samples_per_second": 5.49, | |
| "train_steps_per_second": 0.343 | |
| } | |
| ], | |
| "max_steps": 3104, | |
| "num_train_epochs": 4, | |
| "total_flos": 3.845910334361518e+18, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |