| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 50, | |
| "global_step": 1065, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.009396288466055908, | |
| "grad_norm": 77.32342529296875, | |
| "learning_rate": 8.411214953271029e-07, | |
| "loss": 1.6798, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.018792576932111817, | |
| "grad_norm": 33.60771179199219, | |
| "learning_rate": 1.7757009345794394e-06, | |
| "loss": 1.4918, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.028188865398167725, | |
| "grad_norm": 15.923619270324707, | |
| "learning_rate": 2.7102803738317757e-06, | |
| "loss": 0.832, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.03758515386422363, | |
| "grad_norm": 25.03349494934082, | |
| "learning_rate": 3.6448598130841123e-06, | |
| "loss": 0.6918, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.04698144233027954, | |
| "grad_norm": 10.101052284240723, | |
| "learning_rate": 4.579439252336449e-06, | |
| "loss": 0.5121, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.04698144233027954, | |
| "eval_loss": 0.4876191020011902, | |
| "eval_runtime": 472.5825, | |
| "eval_samples_per_second": 2.002, | |
| "eval_steps_per_second": 1.001, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.05637773079633545, | |
| "grad_norm": 8.925775527954102, | |
| "learning_rate": 5.514018691588785e-06, | |
| "loss": 0.4886, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.06577401926239136, | |
| "grad_norm": 7.684209823608398, | |
| "learning_rate": 6.448598130841122e-06, | |
| "loss": 0.4985, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.07517030772844727, | |
| "grad_norm": 14.134685516357422, | |
| "learning_rate": 7.383177570093458e-06, | |
| "loss": 0.3483, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.08456659619450317, | |
| "grad_norm": 9.419997215270996, | |
| "learning_rate": 8.317757009345795e-06, | |
| "loss": 0.2995, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.09396288466055908, | |
| "grad_norm": 9.725701332092285, | |
| "learning_rate": 9.252336448598132e-06, | |
| "loss": 0.4615, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.09396288466055908, | |
| "eval_loss": 0.3848985433578491, | |
| "eval_runtime": 485.2521, | |
| "eval_samples_per_second": 1.95, | |
| "eval_steps_per_second": 0.975, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.10335917312661498, | |
| "grad_norm": 7.37769889831543, | |
| "learning_rate": 9.999892460708615e-06, | |
| "loss": 0.372, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.1127554615926709, | |
| "grad_norm": 5.251932144165039, | |
| "learning_rate": 9.996129071205167e-06, | |
| "loss": 0.3499, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.1221517500587268, | |
| "grad_norm": 6.633662700653076, | |
| "learning_rate": 9.98699334211771e-06, | |
| "loss": 0.3497, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.13154803852478272, | |
| "grad_norm": 9.925003051757812, | |
| "learning_rate": 9.972495097099379e-06, | |
| "loss": 0.3527, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.14094432699083861, | |
| "grad_norm": 2.5581600666046143, | |
| "learning_rate": 9.952649926118827e-06, | |
| "loss": 0.37, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.14094432699083861, | |
| "eval_loss": 0.32813695073127747, | |
| "eval_runtime": 450.9285, | |
| "eval_samples_per_second": 2.098, | |
| "eval_steps_per_second": 1.049, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.15034061545689453, | |
| "grad_norm": 8.05836009979248, | |
| "learning_rate": 9.927479168696327e-06, | |
| "loss": 0.4364, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.15973690392295042, | |
| "grad_norm": 8.204833984375, | |
| "learning_rate": 9.897009890957382e-06, | |
| "loss": 0.3828, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.16913319238900634, | |
| "grad_norm": 4.883147716522217, | |
| "learning_rate": 9.861274856528504e-06, | |
| "loss": 0.3503, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.17852948085506226, | |
| "grad_norm": 4.023066520690918, | |
| "learning_rate": 9.820312491306471e-06, | |
| "loss": 0.4124, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.18792576932111815, | |
| "grad_norm": 3.1607863903045654, | |
| "learning_rate": 9.77416684213896e-06, | |
| "loss": 0.3749, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.18792576932111815, | |
| "eval_loss": 0.2891885042190552, | |
| "eval_runtime": 458.85, | |
| "eval_samples_per_second": 2.062, | |
| "eval_steps_per_second": 1.031, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.19732205778717407, | |
| "grad_norm": 9.990116119384766, | |
| "learning_rate": 9.722887529460928e-06, | |
| "loss": 0.235, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.20671834625322996, | |
| "grad_norm": 4.4412760734558105, | |
| "learning_rate": 9.666529693937763e-06, | |
| "loss": 0.2777, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.21611463471928588, | |
| "grad_norm": 5.4278154373168945, | |
| "learning_rate": 9.605153937172495e-06, | |
| "loss": 0.3091, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.2255109231853418, | |
| "grad_norm": 4.043374061584473, | |
| "learning_rate": 9.538826256540866e-06, | |
| "loss": 0.287, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.2349072116513977, | |
| "grad_norm": 5.319589614868164, | |
| "learning_rate": 9.467617974224326e-06, | |
| "loss": 0.2863, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.2349072116513977, | |
| "eval_loss": 0.2756798267364502, | |
| "eval_runtime": 504.1073, | |
| "eval_samples_per_second": 1.877, | |
| "eval_steps_per_second": 0.938, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.2443035001174536, | |
| "grad_norm": 8.62342643737793, | |
| "learning_rate": 9.391605660517268e-06, | |
| "loss": 0.2357, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.2536997885835095, | |
| "grad_norm": 2.7996938228607178, | |
| "learning_rate": 9.310871051490953e-06, | |
| "loss": 0.2237, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.26309607704956545, | |
| "grad_norm": 4.890498638153076, | |
| "learning_rate": 9.225500961102685e-06, | |
| "loss": 0.2974, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.27249236551562134, | |
| "grad_norm": 5.307254314422607, | |
| "learning_rate": 9.135587187844727e-06, | |
| "loss": 0.2556, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.28188865398167723, | |
| "grad_norm": 4.411228656768799, | |
| "learning_rate": 9.041226416033344e-06, | |
| "loss": 0.3078, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.28188865398167723, | |
| "eval_loss": 0.25494658946990967, | |
| "eval_runtime": 518.8808, | |
| "eval_samples_per_second": 1.823, | |
| "eval_steps_per_second": 0.912, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.2912849424477331, | |
| "grad_norm": 2.772766351699829, | |
| "learning_rate": 8.942520111844117e-06, | |
| "loss": 0.2227, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.30068123091378907, | |
| "grad_norm": 5.582266807556152, | |
| "learning_rate": 8.839574414205335e-06, | |
| "loss": 0.2668, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.31007751937984496, | |
| "grad_norm": 4.721141815185547, | |
| "learning_rate": 8.732500020666748e-06, | |
| "loss": 0.2726, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.31947380784590085, | |
| "grad_norm": 4.359829425811768, | |
| "learning_rate": 8.621412068366455e-06, | |
| "loss": 0.2807, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.3288700963119568, | |
| "grad_norm": 2.9552581310272217, | |
| "learning_rate": 8.506430010223899e-06, | |
| "loss": 0.2921, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.3288700963119568, | |
| "eval_loss": 0.23158399760723114, | |
| "eval_runtime": 528.3567, | |
| "eval_samples_per_second": 1.79, | |
| "eval_steps_per_second": 0.895, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.3382663847780127, | |
| "grad_norm": 4.709650039672852, | |
| "learning_rate": 8.38767748649208e-06, | |
| "loss": 0.306, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.3476626732440686, | |
| "grad_norm": 2.7597694396972656, | |
| "learning_rate": 8.26528219180716e-06, | |
| "loss": 0.2148, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.3570589617101245, | |
| "grad_norm": 6.239573001861572, | |
| "learning_rate": 8.139375737878356e-06, | |
| "loss": 0.1558, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.3664552501761804, | |
| "grad_norm": 2.6109910011291504, | |
| "learning_rate": 8.01009351196582e-06, | |
| "loss": 0.1945, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.3758515386422363, | |
| "grad_norm": 5.080913543701172, | |
| "learning_rate": 7.877574531298666e-06, | |
| "loss": 0.3191, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.3758515386422363, | |
| "eval_loss": 0.23533165454864502, | |
| "eval_runtime": 491.1933, | |
| "eval_samples_per_second": 1.926, | |
| "eval_steps_per_second": 0.963, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.38524782710829225, | |
| "grad_norm": 3.903905153274536, | |
| "learning_rate": 7.741961293589693e-06, | |
| "loss": 0.2265, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.39464411557434814, | |
| "grad_norm": 5.586989879608154, | |
| "learning_rate": 7.603399623807519e-06, | |
| "loss": 0.2608, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.40404040404040403, | |
| "grad_norm": 3.9632835388183594, | |
| "learning_rate": 7.462038517370962e-06, | |
| "loss": 0.2456, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.4134366925064599, | |
| "grad_norm": 7.482256889343262, | |
| "learning_rate": 7.318029979934181e-06, | |
| "loss": 0.2898, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.42283298097251587, | |
| "grad_norm": 3.7885382175445557, | |
| "learning_rate": 7.17152886393495e-06, | |
| "loss": 0.313, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.42283298097251587, | |
| "eval_loss": 0.22310128808021545, | |
| "eval_runtime": 516.4179, | |
| "eval_samples_per_second": 1.832, | |
| "eval_steps_per_second": 0.916, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.43222926943857176, | |
| "grad_norm": 4.9588093757629395, | |
| "learning_rate": 7.022692702081766e-06, | |
| "loss": 0.2691, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.44162555790462765, | |
| "grad_norm": 4.674664497375488, | |
| "learning_rate": 6.871681537958862e-06, | |
| "loss": 0.2254, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.4510218463706836, | |
| "grad_norm": 5.224850654602051, | |
| "learning_rate": 6.718657753931284e-06, | |
| "loss": 0.2315, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.4604181348367395, | |
| "grad_norm": 4.065497398376465, | |
| "learning_rate": 6.56378589653506e-06, | |
| "loss": 0.2944, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.4698144233027954, | |
| "grad_norm": 4.546512603759766, | |
| "learning_rate": 6.40723249954025e-06, | |
| "loss": 0.2037, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.4698144233027954, | |
| "eval_loss": 0.213790163397789, | |
| "eval_runtime": 486.7052, | |
| "eval_samples_per_second": 1.944, | |
| "eval_steps_per_second": 0.972, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.4792107117688513, | |
| "grad_norm": 3.3908934593200684, | |
| "learning_rate": 6.2491659048771215e-06, | |
| "loss": 0.2189, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.4886070002349072, | |
| "grad_norm": 4.911809921264648, | |
| "learning_rate": 6.089756081617987e-06, | |
| "loss": 0.222, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.4980032887009631, | |
| "grad_norm": 2.711010694503784, | |
| "learning_rate": 5.929174443209416e-06, | |
| "loss": 0.2179, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.507399577167019, | |
| "grad_norm": 3.0764307975769043, | |
| "learning_rate": 5.767593663151265e-06, | |
| "loss": 0.2111, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.5167958656330749, | |
| "grad_norm": 4.922618389129639, | |
| "learning_rate": 5.605187489320815e-06, | |
| "loss": 0.1729, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.5167958656330749, | |
| "eval_loss": 0.20741979777812958, | |
| "eval_runtime": 508.5854, | |
| "eval_samples_per_second": 1.86, | |
| "eval_steps_per_second": 0.93, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.5261921540991309, | |
| "grad_norm": 3.6055169105529785, | |
| "learning_rate": 5.442130557141595e-06, | |
| "loss": 0.1785, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.5355884425651868, | |
| "grad_norm": 4.116625785827637, | |
| "learning_rate": 5.278598201797844e-06, | |
| "loss": 0.1761, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.5449847310312427, | |
| "grad_norm": 6.054320335388184, | |
| "learning_rate": 5.1147662696965254e-06, | |
| "loss": 0.2185, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.5543810194972986, | |
| "grad_norm": 3.523534059524536, | |
| "learning_rate": 4.9508109293796015e-06, | |
| "loss": 0.211, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.5637773079633545, | |
| "grad_norm": 6.204962730407715, | |
| "learning_rate": 4.7869084820899455e-06, | |
| "loss": 0.289, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.5637773079633545, | |
| "eval_loss": 0.1954464614391327, | |
| "eval_runtime": 498.5386, | |
| "eval_samples_per_second": 1.898, | |
| "eval_steps_per_second": 0.949, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.5731735964294103, | |
| "grad_norm": 4.921229839324951, | |
| "learning_rate": 4.623235172194532e-06, | |
| "loss": 0.2154, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.5825698848954662, | |
| "grad_norm": 4.28684139251709, | |
| "learning_rate": 4.459966997668812e-06, | |
| "loss": 0.2147, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.5919661733615222, | |
| "grad_norm": 4.43740177154541, | |
| "learning_rate": 4.297279520846002e-06, | |
| "loss": 0.1491, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.6013624618275781, | |
| "grad_norm": 6.9565863609313965, | |
| "learning_rate": 4.135347679634849e-06, | |
| "loss": 0.2285, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.610758750293634, | |
| "grad_norm": 1.496799349784851, | |
| "learning_rate": 3.974345599408833e-06, | |
| "loss": 0.2775, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.610758750293634, | |
| "eval_loss": 0.18974561989307404, | |
| "eval_runtime": 488.3923, | |
| "eval_samples_per_second": 1.937, | |
| "eval_steps_per_second": 0.968, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.6201550387596899, | |
| "grad_norm": 2.838442087173462, | |
| "learning_rate": 3.814446405769069e-06, | |
| "loss": 0.214, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.6295513272257458, | |
| "grad_norm": 7.474781513214111, | |
| "learning_rate": 3.6558220383822824e-06, | |
| "loss": 0.2392, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.6389476156918017, | |
| "grad_norm": 5.7680511474609375, | |
| "learning_rate": 3.4986430660940283e-06, | |
| "loss": 0.2062, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.6483439041578577, | |
| "grad_norm": 3.3340532779693604, | |
| "learning_rate": 3.3430785035159297e-06, | |
| "loss": 0.1738, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.6577401926239136, | |
| "grad_norm": 2.487337350845337, | |
| "learning_rate": 3.1892956292842103e-06, | |
| "loss": 0.1546, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.6577401926239136, | |
| "eval_loss": 0.18141551315784454, | |
| "eval_runtime": 504.0426, | |
| "eval_samples_per_second": 1.877, | |
| "eval_steps_per_second": 0.938, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.6671364810899695, | |
| "grad_norm": 4.324577331542969, | |
| "learning_rate": 3.0374598061848936e-06, | |
| "loss": 0.2037, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.6765327695560254, | |
| "grad_norm": 3.728818893432617, | |
| "learning_rate": 2.8877343033391523e-06, | |
| "loss": 0.14, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.6859290580220813, | |
| "grad_norm": 3.1286537647247314, | |
| "learning_rate": 2.7402801206399454e-06, | |
| "loss": 0.1721, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.6953253464881372, | |
| "grad_norm": 7.396164417266846, | |
| "learning_rate": 2.595255815628774e-06, | |
| "loss": 0.2586, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.704721634954193, | |
| "grad_norm": 1.8540730476379395, | |
| "learning_rate": 2.4528173329986855e-06, | |
| "loss": 0.1613, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.704721634954193, | |
| "eval_loss": 0.1746373176574707, | |
| "eval_runtime": 482.272, | |
| "eval_samples_per_second": 1.962, | |
| "eval_steps_per_second": 0.981, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.714117923420249, | |
| "grad_norm": 1.6450875997543335, | |
| "learning_rate": 2.313117836906897e-06, | |
| "loss": 0.2041, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.7235142118863049, | |
| "grad_norm": 1.6683011054992676, | |
| "learning_rate": 2.1763075462773002e-06, | |
| "loss": 0.2033, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.7329105003523608, | |
| "grad_norm": 5.101831912994385, | |
| "learning_rate": 2.0425335732700075e-06, | |
| "loss": 0.2497, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.7423067888184167, | |
| "grad_norm": 10.428893089294434, | |
| "learning_rate": 1.9119397650915774e-06, | |
| "loss": 0.2354, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.7517030772844726, | |
| "grad_norm": 3.380574941635132, | |
| "learning_rate": 1.7846665493160548e-06, | |
| "loss": 0.0956, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.7517030772844726, | |
| "eval_loss": 0.1724964827299118, | |
| "eval_runtime": 494.611, | |
| "eval_samples_per_second": 1.913, | |
| "eval_steps_per_second": 0.956, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.7610993657505285, | |
| "grad_norm": 3.29054594039917, | |
| "learning_rate": 1.6608507828831572e-06, | |
| "loss": 0.1711, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.7704956542165845, | |
| "grad_norm": 1.7452706098556519, | |
| "learning_rate": 1.5406256049359359e-06, | |
| "loss": 0.2362, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.7798919426826404, | |
| "grad_norm": 3.282259464263916, | |
| "learning_rate": 1.4241202936562164e-06, | |
| "loss": 0.1692, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.7892882311486963, | |
| "grad_norm": 2.6481103897094727, | |
| "learning_rate": 1.311460127251708e-06, | |
| "loss": 0.1608, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.7986845196147522, | |
| "grad_norm": 4.806708335876465, | |
| "learning_rate": 1.202766249244306e-06, | |
| "loss": 0.1692, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.7986845196147522, | |
| "eval_loss": 0.1683204621076584, | |
| "eval_runtime": 536.8401, | |
| "eval_samples_per_second": 1.762, | |
| "eval_steps_per_second": 0.881, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.8080808080808081, | |
| "grad_norm": 3.949918031692505, | |
| "learning_rate": 1.098155538204404e-06, | |
| "loss": 0.1718, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.817477096546864, | |
| "grad_norm": 4.87109899520874, | |
| "learning_rate": 9.977404820713315e-07, | |
| "loss": 0.1425, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.8268733850129198, | |
| "grad_norm": 2.2142727375030518, | |
| "learning_rate": 9.016290571950171e-07, | |
| "loss": 0.1196, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.8362696734789759, | |
| "grad_norm": 1.4838513135910034, | |
| "learning_rate": 8.099246122289861e-07, | |
| "loss": 0.2077, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.8456659619450317, | |
| "grad_norm": 3.8143913745880127, | |
| "learning_rate": 7.227257569995061e-07, | |
| "loss": 0.1885, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.8456659619450317, | |
| "eval_loss": 0.16525517404079437, | |
| "eval_runtime": 525.2309, | |
| "eval_samples_per_second": 1.801, | |
| "eval_steps_per_second": 0.901, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.8550622504110876, | |
| "grad_norm": 4.804526329040527, | |
| "learning_rate": 6.401262564704019e-07, | |
| "loss": 0.2379, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 0.8644585388771435, | |
| "grad_norm": 4.881964683532715, | |
| "learning_rate": 5.622149299175539e-07, | |
| "loss": 0.2253, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.8738548273431994, | |
| "grad_norm": 3.667691230773926, | |
| "learning_rate": 4.890755554214954e-07, | |
| "loss": 0.1952, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 0.8832511158092553, | |
| "grad_norm": 2.946258306503296, | |
| "learning_rate": 4.207867797808102e-07, | |
| "loss": 0.1522, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.8926474042753113, | |
| "grad_norm": 6.754327774047852, | |
| "learning_rate": 3.5742203394319606e-07, | |
| "loss": 0.2799, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.8926474042753113, | |
| "eval_loss": 0.163659006357193, | |
| "eval_runtime": 541.8924, | |
| "eval_samples_per_second": 1.746, | |
| "eval_steps_per_second": 0.873, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.9020436927413672, | |
| "grad_norm": 6.084792137145996, | |
| "learning_rate": 2.9904945404513606e-07, | |
| "loss": 0.173, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.9114399812074231, | |
| "grad_norm": 7.379616737365723, | |
| "learning_rate": 2.457318081450899e-07, | |
| "loss": 0.184, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 0.920836269673479, | |
| "grad_norm": 1.6431665420532227, | |
| "learning_rate": 1.9752642872897078e-07, | |
| "loss": 0.1533, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.9302325581395349, | |
| "grad_norm": 2.5774223804473877, | |
| "learning_rate": 1.5448515106050165e-07, | |
| "loss": 0.2073, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 0.9396288466055908, | |
| "grad_norm": 3.7589077949523926, | |
| "learning_rate": 1.166542574427354e-07, | |
| "loss": 0.1971, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.9396288466055908, | |
| "eval_loss": 0.16281382739543915, | |
| "eval_runtime": 517.5907, | |
| "eval_samples_per_second": 1.828, | |
| "eval_steps_per_second": 0.914, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.9490251350716467, | |
| "grad_norm": 2.6644127368927, | |
| "learning_rate": 8.407442745067552e-08, | |
| "loss": 0.1787, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 0.9584214235377027, | |
| "grad_norm": 2.4552407264709473, | |
| "learning_rate": 5.678069418851351e-08, | |
| "loss": 0.1581, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.9678177120037585, | |
| "grad_norm": 4.062498569488525, | |
| "learning_rate": 3.4802406618518195e-08, | |
| "loss": 0.1776, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 0.9772140004698144, | |
| "grad_norm": 5.595784664154053, | |
| "learning_rate": 1.816319800208466e-08, | |
| "loss": 0.1669, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.9866102889358703, | |
| "grad_norm": 3.4040722846984863, | |
| "learning_rate": 6.880960486877475e-09, | |
| "loss": 0.1464, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.9866102889358703, | |
| "eval_loss": 0.16257165372371674, | |
| "eval_runtime": 534.9845, | |
| "eval_samples_per_second": 1.768, | |
| "eval_steps_per_second": 0.884, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.9960065774019262, | |
| "grad_norm": 2.6516175270080566, | |
| "learning_rate": 9.678258673995189e-10, | |
| "loss": 0.2124, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 1065, | |
| "total_flos": 57141526265856.0, | |
| "train_loss": 0.2802479102018294, | |
| "train_runtime": 34553.4877, | |
| "train_samples_per_second": 0.246, | |
| "train_steps_per_second": 0.031 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1065, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 57141526265856.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |