| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 10, | |
| "global_step": 471, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.06369426751592357, | |
| "grad_norm": 5.655877590179443, | |
| "learning_rate": 4.893842887473461e-05, | |
| "loss": 0.9888, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.06369426751592357, | |
| "eval_loss": 0.6204623579978943, | |
| "eval_runtime": 260.4945, | |
| "eval_samples_per_second": 1.332, | |
| "eval_steps_per_second": 0.334, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.12738853503184713, | |
| "grad_norm": 5.2134904861450195, | |
| "learning_rate": 4.787685774946922e-05, | |
| "loss": 0.6134, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.12738853503184713, | |
| "eval_loss": 0.5212348699569702, | |
| "eval_runtime": 261.046, | |
| "eval_samples_per_second": 1.329, | |
| "eval_steps_per_second": 0.333, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.1910828025477707, | |
| "grad_norm": 6.658833026885986, | |
| "learning_rate": 4.681528662420383e-05, | |
| "loss": 0.5704, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.1910828025477707, | |
| "eval_loss": 0.5708295106887817, | |
| "eval_runtime": 266.4556, | |
| "eval_samples_per_second": 1.302, | |
| "eval_steps_per_second": 0.327, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.25477707006369427, | |
| "grad_norm": 4.93190336227417, | |
| "learning_rate": 4.575371549893843e-05, | |
| "loss": 0.5118, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.25477707006369427, | |
| "eval_loss": 0.45317262411117554, | |
| "eval_runtime": 263.8344, | |
| "eval_samples_per_second": 1.315, | |
| "eval_steps_per_second": 0.33, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.3184713375796178, | |
| "grad_norm": 6.612313747406006, | |
| "learning_rate": 4.469214437367304e-05, | |
| "loss": 0.5768, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.3184713375796178, | |
| "eval_loss": 0.5027761459350586, | |
| "eval_runtime": 317.3646, | |
| "eval_samples_per_second": 1.093, | |
| "eval_steps_per_second": 0.274, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.3821656050955414, | |
| "grad_norm": 6.43605375289917, | |
| "learning_rate": 4.3630573248407646e-05, | |
| "loss": 0.4945, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.3821656050955414, | |
| "eval_loss": 0.45588889718055725, | |
| "eval_runtime": 269.9949, | |
| "eval_samples_per_second": 1.285, | |
| "eval_steps_per_second": 0.322, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.445859872611465, | |
| "grad_norm": 3.744384527206421, | |
| "learning_rate": 4.256900212314226e-05, | |
| "loss": 0.4173, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.445859872611465, | |
| "eval_loss": 0.44179391860961914, | |
| "eval_runtime": 263.2415, | |
| "eval_samples_per_second": 1.318, | |
| "eval_steps_per_second": 0.33, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.5095541401273885, | |
| "grad_norm": 4.705707550048828, | |
| "learning_rate": 4.150743099787686e-05, | |
| "loss": 0.4277, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.5095541401273885, | |
| "eval_loss": 0.37640380859375, | |
| "eval_runtime": 264.4226, | |
| "eval_samples_per_second": 1.312, | |
| "eval_steps_per_second": 0.329, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.5732484076433121, | |
| "grad_norm": 2.7133395671844482, | |
| "learning_rate": 4.044585987261147e-05, | |
| "loss": 0.4374, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.5732484076433121, | |
| "eval_loss": 0.35762831568717957, | |
| "eval_runtime": 264.9493, | |
| "eval_samples_per_second": 1.31, | |
| "eval_steps_per_second": 0.328, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.6369426751592356, | |
| "grad_norm": 4.4099650382995605, | |
| "learning_rate": 3.9384288747346076e-05, | |
| "loss": 0.3717, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.6369426751592356, | |
| "eval_loss": 0.35183703899383545, | |
| "eval_runtime": 278.8216, | |
| "eval_samples_per_second": 1.245, | |
| "eval_steps_per_second": 0.312, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.7006369426751592, | |
| "grad_norm": 15.860236167907715, | |
| "learning_rate": 3.8322717622080686e-05, | |
| "loss": 0.4415, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.7006369426751592, | |
| "eval_loss": 0.6824913620948792, | |
| "eval_runtime": 281.0438, | |
| "eval_samples_per_second": 1.235, | |
| "eval_steps_per_second": 0.31, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.7643312101910829, | |
| "grad_norm": 5.011375427246094, | |
| "learning_rate": 3.7261146496815283e-05, | |
| "loss": 0.3639, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.7643312101910829, | |
| "eval_loss": 0.3454379737377167, | |
| "eval_runtime": 281.28, | |
| "eval_samples_per_second": 1.234, | |
| "eval_steps_per_second": 0.309, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.8280254777070064, | |
| "grad_norm": 3.8196499347686768, | |
| "learning_rate": 3.6199575371549894e-05, | |
| "loss": 0.3604, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.8280254777070064, | |
| "eval_loss": 0.33031439781188965, | |
| "eval_runtime": 266.1981, | |
| "eval_samples_per_second": 1.304, | |
| "eval_steps_per_second": 0.327, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.89171974522293, | |
| "grad_norm": 8.475601196289062, | |
| "learning_rate": 3.51380042462845e-05, | |
| "loss": 0.3782, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.89171974522293, | |
| "eval_loss": 0.33289575576782227, | |
| "eval_runtime": 265.8273, | |
| "eval_samples_per_second": 1.305, | |
| "eval_steps_per_second": 0.327, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.9554140127388535, | |
| "grad_norm": 4.658049583435059, | |
| "learning_rate": 3.407643312101911e-05, | |
| "loss": 0.36, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.9554140127388535, | |
| "eval_loss": 0.33607012033462524, | |
| "eval_runtime": 277.0995, | |
| "eval_samples_per_second": 1.252, | |
| "eval_steps_per_second": 0.314, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.019108280254777, | |
| "grad_norm": 2.7650649547576904, | |
| "learning_rate": 3.301486199575371e-05, | |
| "loss": 0.3244, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.019108280254777, | |
| "eval_loss": 0.3019406497478485, | |
| "eval_runtime": 281.1357, | |
| "eval_samples_per_second": 1.234, | |
| "eval_steps_per_second": 0.309, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.0828025477707006, | |
| "grad_norm": 7.399433612823486, | |
| "learning_rate": 3.1953290870488323e-05, | |
| "loss": 0.3337, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.0828025477707006, | |
| "eval_loss": 0.33017274737358093, | |
| "eval_runtime": 279.943, | |
| "eval_samples_per_second": 1.24, | |
| "eval_steps_per_second": 0.311, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.1464968152866242, | |
| "grad_norm": 6.1573567390441895, | |
| "learning_rate": 3.089171974522293e-05, | |
| "loss": 0.2967, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.1464968152866242, | |
| "eval_loss": 0.286271870136261, | |
| "eval_runtime": 279.1484, | |
| "eval_samples_per_second": 1.243, | |
| "eval_steps_per_second": 0.312, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.2101910828025477, | |
| "grad_norm": 5.677030086517334, | |
| "learning_rate": 2.9830148619957538e-05, | |
| "loss": 0.3236, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.2101910828025477, | |
| "eval_loss": 0.3013075590133667, | |
| "eval_runtime": 280.1463, | |
| "eval_samples_per_second": 1.239, | |
| "eval_steps_per_second": 0.311, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.2738853503184713, | |
| "grad_norm": 3.6576144695281982, | |
| "learning_rate": 2.8768577494692145e-05, | |
| "loss": 0.2848, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.2738853503184713, | |
| "eval_loss": 0.28381872177124023, | |
| "eval_runtime": 278.4374, | |
| "eval_samples_per_second": 1.246, | |
| "eval_steps_per_second": 0.312, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.3375796178343948, | |
| "grad_norm": 3.5609078407287598, | |
| "learning_rate": 2.7707006369426753e-05, | |
| "loss": 0.2138, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.3375796178343948, | |
| "eval_loss": 0.27076515555381775, | |
| "eval_runtime": 269.6079, | |
| "eval_samples_per_second": 1.287, | |
| "eval_steps_per_second": 0.323, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.4012738853503186, | |
| "grad_norm": 5.47592306137085, | |
| "learning_rate": 2.664543524416136e-05, | |
| "loss": 0.2646, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.4012738853503186, | |
| "eval_loss": 0.2966860830783844, | |
| "eval_runtime": 267.6504, | |
| "eval_samples_per_second": 1.296, | |
| "eval_steps_per_second": 0.325, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.4649681528662422, | |
| "grad_norm": 5.770505428314209, | |
| "learning_rate": 2.5583864118895967e-05, | |
| "loss": 0.2784, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.4649681528662422, | |
| "eval_loss": 0.25654563307762146, | |
| "eval_runtime": 270.188, | |
| "eval_samples_per_second": 1.284, | |
| "eval_steps_per_second": 0.322, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.5286624203821657, | |
| "grad_norm": 9.245145797729492, | |
| "learning_rate": 2.4522292993630575e-05, | |
| "loss": 0.3004, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.5286624203821657, | |
| "eval_loss": 0.2588840425014496, | |
| "eval_runtime": 270.9119, | |
| "eval_samples_per_second": 1.281, | |
| "eval_steps_per_second": 0.321, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.5923566878980893, | |
| "grad_norm": 6.825544834136963, | |
| "learning_rate": 2.3460721868365182e-05, | |
| "loss": 0.2585, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.5923566878980893, | |
| "eval_loss": 0.2777319848537445, | |
| "eval_runtime": 271.4967, | |
| "eval_samples_per_second": 1.278, | |
| "eval_steps_per_second": 0.32, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.6560509554140128, | |
| "grad_norm": 5.504091262817383, | |
| "learning_rate": 2.239915074309979e-05, | |
| "loss": 0.239, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.6560509554140128, | |
| "eval_loss": 0.2845991551876068, | |
| "eval_runtime": 275.4719, | |
| "eval_samples_per_second": 1.26, | |
| "eval_steps_per_second": 0.316, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.7197452229299364, | |
| "grad_norm": 1.7976171970367432, | |
| "learning_rate": 2.1337579617834397e-05, | |
| "loss": 0.324, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.7197452229299364, | |
| "eval_loss": 0.2418244183063507, | |
| "eval_runtime": 273.3976, | |
| "eval_samples_per_second": 1.269, | |
| "eval_steps_per_second": 0.318, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.78343949044586, | |
| "grad_norm": 7.518586158752441, | |
| "learning_rate": 2.0276008492569004e-05, | |
| "loss": 0.2661, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.78343949044586, | |
| "eval_loss": 0.2677413821220398, | |
| "eval_runtime": 273.2159, | |
| "eval_samples_per_second": 1.27, | |
| "eval_steps_per_second": 0.318, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.8471337579617835, | |
| "grad_norm": 3.743008613586426, | |
| "learning_rate": 1.921443736730361e-05, | |
| "loss": 0.2763, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.8471337579617835, | |
| "eval_loss": 0.2228710651397705, | |
| "eval_runtime": 272.9055, | |
| "eval_samples_per_second": 1.272, | |
| "eval_steps_per_second": 0.319, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.910828025477707, | |
| "grad_norm": 7.198996543884277, | |
| "learning_rate": 1.8152866242038215e-05, | |
| "loss": 0.1957, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.910828025477707, | |
| "eval_loss": 0.25310221314430237, | |
| "eval_runtime": 272.8452, | |
| "eval_samples_per_second": 1.272, | |
| "eval_steps_per_second": 0.319, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.9745222929936306, | |
| "grad_norm": 7.609084606170654, | |
| "learning_rate": 1.7091295116772823e-05, | |
| "loss": 0.251, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 1.9745222929936306, | |
| "eval_loss": 0.250274658203125, | |
| "eval_runtime": 274.2439, | |
| "eval_samples_per_second": 1.265, | |
| "eval_steps_per_second": 0.317, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 2.038216560509554, | |
| "grad_norm": 5.631246566772461, | |
| "learning_rate": 1.602972399150743e-05, | |
| "loss": 0.2124, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 2.038216560509554, | |
| "eval_loss": 0.23672077059745789, | |
| "eval_runtime": 273.0475, | |
| "eval_samples_per_second": 1.271, | |
| "eval_steps_per_second": 0.319, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 2.1019108280254777, | |
| "grad_norm": 8.399256706237793, | |
| "learning_rate": 1.4968152866242039e-05, | |
| "loss": 0.1832, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 2.1019108280254777, | |
| "eval_loss": 0.2170046865940094, | |
| "eval_runtime": 274.911, | |
| "eval_samples_per_second": 1.262, | |
| "eval_steps_per_second": 0.316, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 2.1656050955414012, | |
| "grad_norm": 6.787008285522461, | |
| "learning_rate": 1.3906581740976646e-05, | |
| "loss": 0.1968, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 2.1656050955414012, | |
| "eval_loss": 0.25750404596328735, | |
| "eval_runtime": 274.5796, | |
| "eval_samples_per_second": 1.264, | |
| "eval_steps_per_second": 0.317, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 2.229299363057325, | |
| "grad_norm": 4.561034202575684, | |
| "learning_rate": 1.2845010615711253e-05, | |
| "loss": 0.2169, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 2.229299363057325, | |
| "eval_loss": 0.21437929570674896, | |
| "eval_runtime": 274.1716, | |
| "eval_samples_per_second": 1.266, | |
| "eval_steps_per_second": 0.317, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 2.2929936305732483, | |
| "grad_norm": 2.4580085277557373, | |
| "learning_rate": 1.178343949044586e-05, | |
| "loss": 0.1645, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 2.2929936305732483, | |
| "eval_loss": 0.21549557149410248, | |
| "eval_runtime": 274.4038, | |
| "eval_samples_per_second": 1.265, | |
| "eval_steps_per_second": 0.317, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 2.356687898089172, | |
| "grad_norm": 5.2749786376953125, | |
| "learning_rate": 1.0721868365180468e-05, | |
| "loss": 0.1292, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 2.356687898089172, | |
| "eval_loss": 0.24459926784038544, | |
| "eval_runtime": 275.4779, | |
| "eval_samples_per_second": 1.26, | |
| "eval_steps_per_second": 0.316, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 2.4203821656050954, | |
| "grad_norm": 11.909132957458496, | |
| "learning_rate": 9.660297239915075e-06, | |
| "loss": 0.2064, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 2.4203821656050954, | |
| "eval_loss": 0.2202015370130539, | |
| "eval_runtime": 276.3076, | |
| "eval_samples_per_second": 1.256, | |
| "eval_steps_per_second": 0.315, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 2.484076433121019, | |
| "grad_norm": 8.857806205749512, | |
| "learning_rate": 8.598726114649681e-06, | |
| "loss": 0.2967, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 2.484076433121019, | |
| "eval_loss": 0.2075081467628479, | |
| "eval_runtime": 275.5237, | |
| "eval_samples_per_second": 1.259, | |
| "eval_steps_per_second": 0.316, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 2.5477707006369426, | |
| "grad_norm": 8.795026779174805, | |
| "learning_rate": 7.537154989384289e-06, | |
| "loss": 0.2349, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.5477707006369426, | |
| "eval_loss": 0.2141169160604477, | |
| "eval_runtime": 274.8429, | |
| "eval_samples_per_second": 1.263, | |
| "eval_steps_per_second": 0.317, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.611464968152866, | |
| "grad_norm": 4.121735572814941, | |
| "learning_rate": 6.4755838641188965e-06, | |
| "loss": 0.161, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 2.611464968152866, | |
| "eval_loss": 0.2091439962387085, | |
| "eval_runtime": 275.865, | |
| "eval_samples_per_second": 1.258, | |
| "eval_steps_per_second": 0.315, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 2.6751592356687897, | |
| "grad_norm": 7.995586395263672, | |
| "learning_rate": 5.414012738853504e-06, | |
| "loss": 0.1786, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 2.6751592356687897, | |
| "eval_loss": 0.21283170580863953, | |
| "eval_runtime": 274.9723, | |
| "eval_samples_per_second": 1.262, | |
| "eval_steps_per_second": 0.316, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 2.738853503184713, | |
| "grad_norm": 6.569371700286865, | |
| "learning_rate": 4.35244161358811e-06, | |
| "loss": 0.2665, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 2.738853503184713, | |
| "eval_loss": 0.19985607266426086, | |
| "eval_runtime": 275.6861, | |
| "eval_samples_per_second": 1.259, | |
| "eval_steps_per_second": 0.316, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 2.802547770700637, | |
| "grad_norm": 2.53202223777771, | |
| "learning_rate": 3.2908704883227177e-06, | |
| "loss": 0.1377, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 2.802547770700637, | |
| "eval_loss": 0.2025289386510849, | |
| "eval_runtime": 275.6519, | |
| "eval_samples_per_second": 1.259, | |
| "eval_steps_per_second": 0.316, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 2.8662420382165603, | |
| "grad_norm": 2.821432590484619, | |
| "learning_rate": 2.229299363057325e-06, | |
| "loss": 0.2082, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 2.8662420382165603, | |
| "eval_loss": 0.20033259689807892, | |
| "eval_runtime": 276.9112, | |
| "eval_samples_per_second": 1.253, | |
| "eval_steps_per_second": 0.314, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 2.9299363057324843, | |
| "grad_norm": 3.187981605529785, | |
| "learning_rate": 1.167728237791932e-06, | |
| "loss": 0.1408, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 2.9299363057324843, | |
| "eval_loss": 0.19696064293384552, | |
| "eval_runtime": 276.0427, | |
| "eval_samples_per_second": 1.257, | |
| "eval_steps_per_second": 0.315, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 2.9936305732484074, | |
| "grad_norm": 3.8218815326690674, | |
| "learning_rate": 1.0615711252653928e-07, | |
| "loss": 0.1506, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 2.9936305732484074, | |
| "eval_loss": 0.19708757102489471, | |
| "eval_runtime": 274.6555, | |
| "eval_samples_per_second": 1.263, | |
| "eval_steps_per_second": 0.317, | |
| "step": 470 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 471, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 495041961535488.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |