ORD / checkpoint_STORM /trainer_state.json
ttlyy's picture
Update checkpoint_STORM/trainer_state.json
9d7d3f7 verified
{
"best_global_step": 400,
"best_metric": 0.16972463,
"best_model_checkpoint": "/Qwen/Qwen-VL-master/ckp/checkpoints_IO_3B/v0-20250504-043938/checkpoint-400",
"epoch": 0.9996103390050656,
"eval_steps": 100,
"global_step": 481,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.002078191972983504,
"grad_norm": 49.5,
"learning_rate": 2.0000000000000002e-07,
"loss": 2.024962902069092,
"memory(GiB)": 30.5,
"step": 1,
"token_acc": 0.5925925925925926,
"train_speed(iter/s)": 0.096714
},
{
"epoch": 0.010390959864917521,
"grad_norm": 48.25,
"learning_rate": 1.0000000000000002e-06,
"loss": 1.9107239246368408,
"memory(GiB)": 36.99,
"step": 5,
"token_acc": 0.6030968247745983,
"train_speed(iter/s)": 0.104169
},
{
"epoch": 0.020781919729835042,
"grad_norm": 46.0,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.8856124877929688,
"memory(GiB)": 36.99,
"step": 10,
"token_acc": 0.6060725241878392,
"train_speed(iter/s)": 0.10431
},
{
"epoch": 0.031172879594752564,
"grad_norm": 41.25,
"learning_rate": 3e-06,
"loss": 1.6671241760253905,
"memory(GiB)": 36.99,
"step": 15,
"token_acc": 0.6276759587247805,
"train_speed(iter/s)": 0.104357
},
{
"epoch": 0.041563839459670085,
"grad_norm": 35.75,
"learning_rate": 4.000000000000001e-06,
"loss": 1.4283610343933106,
"memory(GiB)": 36.99,
"step": 20,
"token_acc": 0.644587791063562,
"train_speed(iter/s)": 0.104096
},
{
"epoch": 0.05195479932458761,
"grad_norm": 24.125,
"learning_rate": 5e-06,
"loss": 0.8581354141235351,
"memory(GiB)": 36.99,
"step": 25,
"token_acc": 0.7361944076641483,
"train_speed(iter/s)": 0.103919
},
{
"epoch": 0.06234575918950513,
"grad_norm": 10.4375,
"learning_rate": 4.998516877114918e-06,
"loss": 0.3941912889480591,
"memory(GiB)": 36.99,
"step": 30,
"token_acc": 0.8509957048028114,
"train_speed(iter/s)": 0.103913
},
{
"epoch": 0.07273671905442265,
"grad_norm": 3.6875,
"learning_rate": 4.994069268182461e-06,
"loss": 0.24166102409362794,
"memory(GiB)": 36.99,
"step": 35,
"token_acc": 0.9107004193197702,
"train_speed(iter/s)": 0.103983
},
{
"epoch": 0.08312767891934017,
"grad_norm": 1.828125,
"learning_rate": 4.986662450283107e-06,
"loss": 0.2008065700531006,
"memory(GiB)": 36.99,
"step": 40,
"token_acc": 0.9205524090309566,
"train_speed(iter/s)": 0.103895
},
{
"epoch": 0.0935186387842577,
"grad_norm": 2.265625,
"learning_rate": 4.976305211593758e-06,
"loss": 0.19015437364578247,
"memory(GiB)": 36.99,
"step": 45,
"token_acc": 0.9180647678501755,
"train_speed(iter/s)": 0.103752
},
{
"epoch": 0.10390959864917522,
"grad_norm": 1.9921875,
"learning_rate": 4.963009840960598e-06,
"loss": 0.19012608528137206,
"memory(GiB)": 36.99,
"step": 50,
"token_acc": 0.9183419043166589,
"train_speed(iter/s)": 0.103736
},
{
"epoch": 0.11430055851409274,
"grad_norm": 1.9921875,
"learning_rate": 4.9467921133183864e-06,
"loss": 0.1876815915107727,
"memory(GiB)": 36.99,
"step": 55,
"token_acc": 0.9174203696421549,
"train_speed(iter/s)": 0.103693
},
{
"epoch": 0.12469151837901025,
"grad_norm": 1.640625,
"learning_rate": 4.927671270973533e-06,
"loss": 0.18475005626678467,
"memory(GiB)": 36.99,
"step": 60,
"token_acc": 0.9194869417400711,
"train_speed(iter/s)": 0.103749
},
{
"epoch": 0.13508247824392777,
"grad_norm": 2.03125,
"learning_rate": 4.905670000773126e-06,
"loss": 0.18700193166732787,
"memory(GiB)": 36.99,
"step": 65,
"token_acc": 0.9183423809159722,
"train_speed(iter/s)": 0.103789
},
{
"epoch": 0.1454734381088453,
"grad_norm": 1.8984375,
"learning_rate": 4.880814407187037e-06,
"loss": 0.1833273410797119,
"memory(GiB)": 36.99,
"step": 70,
"token_acc": 0.9198803956145059,
"train_speed(iter/s)": 0.103777
},
{
"epoch": 0.15586439797376284,
"grad_norm": 1.8046875,
"learning_rate": 4.8531339813349995e-06,
"loss": 0.17667144536972046,
"memory(GiB)": 36.99,
"step": 75,
"token_acc": 0.9221237549996079,
"train_speed(iter/s)": 0.101758
},
{
"epoch": 0.16625535783868034,
"grad_norm": 1.8203125,
"learning_rate": 4.822661565995454e-06,
"loss": 0.17785824537277223,
"memory(GiB)": 36.99,
"step": 80,
"token_acc": 0.9208750290900628,
"train_speed(iter/s)": 0.096873
},
{
"epoch": 0.17664631770359787,
"grad_norm": 1.5390625,
"learning_rate": 4.789433316637644e-06,
"loss": 0.18061867952346802,
"memory(GiB)": 36.99,
"step": 85,
"token_acc": 0.9204729886389984,
"train_speed(iter/s)": 0.092995
},
{
"epoch": 0.1870372775685154,
"grad_norm": 1.8828125,
"learning_rate": 4.753488658523212e-06,
"loss": 0.17701296806335448,
"memory(GiB)": 36.99,
"step": 90,
"token_acc": 0.9225134926754048,
"train_speed(iter/s)": 0.089657
},
{
"epoch": 0.1974282374334329,
"grad_norm": 3.21875,
"learning_rate": 4.714870239928195e-06,
"loss": 0.1849340558052063,
"memory(GiB)": 36.99,
"step": 95,
"token_acc": 0.9169326586220319,
"train_speed(iter/s)": 0.086893
},
{
"epoch": 0.20781919729835044,
"grad_norm": 3.3125,
"learning_rate": 4.673623881540917e-06,
"loss": 0.1811493992805481,
"memory(GiB)": 36.99,
"step": 100,
"token_acc": 0.9194093158432174,
"train_speed(iter/s)": 0.084511
},
{
"epoch": 0.20781919729835044,
"eval_loss": 0.17230889201164246,
"eval_runtime": 1320.3184,
"eval_samples_per_second": 60.881,
"eval_steps_per_second": 3.805,
"eval_token_acc": 0.9252880661892655,
"step": 100
},
{
"epoch": 0.21821015716326797,
"grad_norm": 1.9375,
"learning_rate": 4.629798522095818e-06,
"loss": 0.1755787253379822,
"memory(GiB)": 36.99,
"step": 105,
"token_acc": 0.925254316998113,
"train_speed(iter/s)": 0.03748
},
{
"epoch": 0.22860111702818547,
"grad_norm": 1.6484375,
"learning_rate": 4.583446160307729e-06,
"loss": 0.16902718544006348,
"memory(GiB)": 36.99,
"step": 110,
"token_acc": 0.9260815437286025,
"train_speed(iter/s)": 0.0381
},
{
"epoch": 0.238992076893103,
"grad_norm": 3.0,
"learning_rate": 4.534621793175488e-06,
"loss": 0.17990721464157106,
"memory(GiB)": 36.99,
"step": 115,
"token_acc": 0.9196615695102073,
"train_speed(iter/s)": 0.038616
},
{
"epoch": 0.2493830367580205,
"grad_norm": 1.984375,
"learning_rate": 4.4833833507280884e-06,
"loss": 0.17740323543548583,
"memory(GiB)": 36.99,
"step": 120,
"token_acc": 0.9212245219170536,
"train_speed(iter/s)": 0.039106
},
{
"epoch": 0.25977399662293804,
"grad_norm": 2.03125,
"learning_rate": 4.429791627290802e-06,
"loss": 0.17831547260284425,
"memory(GiB)": 36.99,
"step": 125,
"token_acc": 0.9183422870299309,
"train_speed(iter/s)": 0.039569
},
{
"epoch": 0.27016495648785555,
"grad_norm": 1.5546875,
"learning_rate": 4.373910209352816e-06,
"loss": 0.1763285517692566,
"memory(GiB)": 36.99,
"step": 130,
"token_acc": 0.9201423817998917,
"train_speed(iter/s)": 0.040384
},
{
"epoch": 0.2805559163527731,
"grad_norm": 1.625,
"learning_rate": 4.315805400121963e-06,
"loss": 0.17515565156936647,
"memory(GiB)": 36.99,
"step": 135,
"token_acc": 0.9202636948673678,
"train_speed(iter/s)": 0.041134
},
{
"epoch": 0.2909468762176906,
"grad_norm": 2.09375,
"learning_rate": 4.255546140856087e-06,
"loss": 0.18066967725753785,
"memory(GiB)": 36.99,
"step": 140,
"token_acc": 0.9194688203774171,
"train_speed(iter/s)": 0.041899
},
{
"epoch": 0.3013378360826081,
"grad_norm": 1.140625,
"learning_rate": 4.1932039290643534e-06,
"loss": 0.18014354705810548,
"memory(GiB)": 36.99,
"step": 145,
"token_acc": 0.9178993503952414,
"train_speed(iter/s)": 0.042363
},
{
"epoch": 0.31172879594752567,
"grad_norm": 1.3515625,
"learning_rate": 4.128852733675572e-06,
"loss": 0.17886133193969728,
"memory(GiB)": 36.99,
"step": 150,
"token_acc": 0.9210793106137409,
"train_speed(iter/s)": 0.042968
},
{
"epoch": 0.3221197558124432,
"grad_norm": 1.7734375,
"learning_rate": 4.062568907274197e-06,
"loss": 0.18089871406555175,
"memory(GiB)": 36.99,
"step": 155,
"token_acc": 0.9191895486192599,
"train_speed(iter/s)": 0.043361
},
{
"epoch": 0.3325107156773607,
"grad_norm": 1.7421875,
"learning_rate": 3.994431095508102e-06,
"loss": 0.17732292413711548,
"memory(GiB)": 36.99,
"step": 160,
"token_acc": 0.9188111067657411,
"train_speed(iter/s)": 0.043692
},
{
"epoch": 0.34290167554227824,
"grad_norm": 1.78125,
"learning_rate": 3.9245201437756655e-06,
"loss": 0.1741779327392578,
"memory(GiB)": 36.99,
"step": 165,
"token_acc": 0.9216403392732083,
"train_speed(iter/s)": 0.043984
},
{
"epoch": 0.35329263540719574,
"grad_norm": 1.875,
"learning_rate": 3.852919001302833e-06,
"loss": 0.17619532346725464,
"memory(GiB)": 36.99,
"step": 170,
"token_acc": 0.9227966824277187,
"train_speed(iter/s)": 0.044279
},
{
"epoch": 0.36368359527211325,
"grad_norm": 3.71875,
"learning_rate": 3.779712622724003e-06,
"loss": 0.17437649965286256,
"memory(GiB)": 36.99,
"step": 175,
"token_acc": 0.9230951628201075,
"train_speed(iter/s)": 0.044551
},
{
"epoch": 0.3740745551370308,
"grad_norm": 1.84375,
"learning_rate": 3.704987867283499e-06,
"loss": 0.17724401950836183,
"memory(GiB)": 36.99,
"step": 180,
"token_acc": 0.9202978056426332,
"train_speed(iter/s)": 0.044808
},
{
"epoch": 0.3844655150019483,
"grad_norm": 2.1875,
"learning_rate": 3.628833395777224e-06,
"loss": 0.18116596937179566,
"memory(GiB)": 36.99,
"step": 185,
"token_acc": 0.9170886075949367,
"train_speed(iter/s)": 0.045056
},
{
"epoch": 0.3948564748668658,
"grad_norm": 2.046875,
"learning_rate": 3.551339565356769e-06,
"loss": 0.17593677043914796,
"memory(GiB)": 36.99,
"step": 190,
"token_acc": 0.9202586206896551,
"train_speed(iter/s)": 0.045325
},
{
"epoch": 0.4052474347317834,
"grad_norm": 2.0,
"learning_rate": 3.4725983223208155e-06,
"loss": 0.1823140025138855,
"memory(GiB)": 36.99,
"step": 195,
"token_acc": 0.917639683410391,
"train_speed(iter/s)": 0.04554
},
{
"epoch": 0.4156383945967009,
"grad_norm": 1.7109375,
"learning_rate": 3.392703093021e-06,
"loss": 0.18079570531845093,
"memory(GiB)": 36.99,
"step": 200,
"token_acc": 0.9180621543733936,
"train_speed(iter/s)": 0.045874
},
{
"epoch": 0.4156383945967009,
"eval_loss": 0.17064262926578522,
"eval_runtime": 1310.4171,
"eval_samples_per_second": 61.341,
"eval_steps_per_second": 3.834,
"eval_token_acc": 0.9238987477359346,
"step": 200
},
{
"epoch": 0.4260293544616184,
"grad_norm": 1.7890625,
"learning_rate": 3.3117486730117092e-06,
"loss": 0.181607449054718,
"memory(GiB)": 36.99,
"step": 205,
"token_acc": 0.9238299582974754,
"train_speed(iter/s)": 0.034354
},
{
"epoch": 0.43642031432653594,
"grad_norm": 2.0625,
"learning_rate": 3.229831114575315e-06,
"loss": 0.17884130477905275,
"memory(GiB)": 36.99,
"step": 210,
"token_acc": 0.9180160352145889,
"train_speed(iter/s)": 0.034671
},
{
"epoch": 0.44681127419145344,
"grad_norm": 1.484375,
"learning_rate": 3.147047612756302e-06,
"loss": 0.174602210521698,
"memory(GiB)": 36.99,
"step": 215,
"token_acc": 0.920204921214003,
"train_speed(iter/s)": 0.034976
},
{
"epoch": 0.45720223405637095,
"grad_norm": 1.234375,
"learning_rate": 3.063496390039516e-06,
"loss": 0.1783522129058838,
"memory(GiB)": 36.99,
"step": 220,
"token_acc": 0.9198404629702042,
"train_speed(iter/s)": 0.035377
},
{
"epoch": 0.46759319392128845,
"grad_norm": 1.78125,
"learning_rate": 2.9792765798093466e-06,
"loss": 0.179638671875,
"memory(GiB)": 36.99,
"step": 225,
"token_acc": 0.9207520783155932,
"train_speed(iter/s)": 0.035838
},
{
"epoch": 0.477984153786206,
"grad_norm": 1.921875,
"learning_rate": 2.8944881087281375e-06,
"loss": 0.18421536684036255,
"memory(GiB)": 36.99,
"step": 230,
"token_acc": 0.916609076249712,
"train_speed(iter/s)": 0.036335
},
{
"epoch": 0.4883751136511235,
"grad_norm": 1.7890625,
"learning_rate": 2.80923157817337e-06,
"loss": 0.17683053016662598,
"memory(GiB)": 36.99,
"step": 235,
"token_acc": 0.9213091922005571,
"train_speed(iter/s)": 0.036825
},
{
"epoch": 0.498766073516041,
"grad_norm": 1.84375,
"learning_rate": 2.723608144874298e-06,
"loss": 0.17713736295700072,
"memory(GiB)": 36.99,
"step": 240,
"token_acc": 0.92035536159601,
"train_speed(iter/s)": 0.037143
},
{
"epoch": 0.5091570333809585,
"grad_norm": 1.5234375,
"learning_rate": 2.637719400889664e-06,
"loss": 0.17735506296157838,
"memory(GiB)": 36.99,
"step": 245,
"token_acc": 0.9191973969631236,
"train_speed(iter/s)": 0.037405
},
{
"epoch": 0.5195479932458761,
"grad_norm": 1.3828125,
"learning_rate": 2.5516672530688864e-06,
"loss": 0.18297756910324098,
"memory(GiB)": 36.99,
"step": 250,
"token_acc": 0.9193918758413177,
"train_speed(iter/s)": 0.037652
},
{
"epoch": 0.5299389531107936,
"grad_norm": 2.203125,
"learning_rate": 2.4655538021397592e-06,
"loss": 0.17588781118392943,
"memory(GiB)": 36.99,
"step": 255,
"token_acc": 0.9216273752266814,
"train_speed(iter/s)": 0.037896
},
{
"epoch": 0.5403299129757111,
"grad_norm": 1.6796875,
"learning_rate": 2.3794812215661134e-06,
"loss": 0.18080203533172606,
"memory(GiB)": 36.99,
"step": 260,
"token_acc": 0.921203216826477,
"train_speed(iter/s)": 0.038125
},
{
"epoch": 0.5507208728406287,
"grad_norm": 2.6875,
"learning_rate": 2.2935516363191695e-06,
"loss": 0.18104695081710814,
"memory(GiB)": 36.99,
"step": 265,
"token_acc": 0.9180170831005029,
"train_speed(iter/s)": 0.038349
},
{
"epoch": 0.5611118327055462,
"grad_norm": 1.7578125,
"learning_rate": 2.2078670017064366e-06,
"loss": 0.1768229365348816,
"memory(GiB)": 36.99,
"step": 270,
"token_acc": 0.9203719621786356,
"train_speed(iter/s)": 0.038567
},
{
"epoch": 0.5715027925704637,
"grad_norm": 1.34375,
"learning_rate": 2.1225289824019077e-06,
"loss": 0.17726024389266967,
"memory(GiB)": 36.99,
"step": 275,
"token_acc": 0.9191293295201779,
"train_speed(iter/s)": 0.038795
},
{
"epoch": 0.5818937524353812,
"grad_norm": 1.71875,
"learning_rate": 2.037638831821104e-06,
"loss": 0.18150538206100464,
"memory(GiB)": 36.99,
"step": 280,
"token_acc": 0.9174561267277528,
"train_speed(iter/s)": 0.03901
},
{
"epoch": 0.5922847123002988,
"grad_norm": 1.546875,
"learning_rate": 1.953297271984061e-06,
"loss": 0.1750028371810913,
"memory(GiB)": 36.99,
"step": 285,
"token_acc": 0.9228211275230067,
"train_speed(iter/s)": 0.039295
},
{
"epoch": 0.6026756721652162,
"grad_norm": 1.234375,
"learning_rate": 1.8696043740088236e-06,
"loss": 0.17536247968673707,
"memory(GiB)": 36.99,
"step": 290,
"token_acc": 0.9208355815925383,
"train_speed(iter/s)": 0.039587
},
{
"epoch": 0.6130666320301338,
"grad_norm": 1.5390625,
"learning_rate": 1.7866594393772375e-06,
"loss": 0.1767476439476013,
"memory(GiB)": 36.99,
"step": 295,
"token_acc": 0.9220417633410672,
"train_speed(iter/s)": 0.039803
},
{
"epoch": 0.6234575918950513,
"grad_norm": 1.9140625,
"learning_rate": 1.7045608821139045e-06,
"loss": 0.17351619005203248,
"memory(GiB)": 36.99,
"step": 300,
"token_acc": 0.9228345246330029,
"train_speed(iter/s)": 0.040008
},
{
"epoch": 0.6234575918950513,
"eval_loss": 0.16996929049491882,
"eval_runtime": 1338.6589,
"eval_samples_per_second": 60.047,
"eval_steps_per_second": 3.753,
"eval_token_acc": 0.9252277899263542,
"step": 300
},
{
"epoch": 0.6338485517599688,
"grad_norm": 1.7109375,
"learning_rate": 1.6234061120181144e-06,
"loss": 0.1729782223701477,
"memory(GiB)": 36.99,
"step": 305,
"token_acc": 0.9252146572948351,
"train_speed(iter/s)": 0.033422
},
{
"epoch": 0.6442395116248864,
"grad_norm": 2.296875,
"learning_rate": 1.5432914190872757e-06,
"loss": 0.1758588194847107,
"memory(GiB)": 36.99,
"step": 310,
"token_acc": 0.9202410211686355,
"train_speed(iter/s)": 0.033685
},
{
"epoch": 0.6546304714898039,
"grad_norm": 2.03125,
"learning_rate": 1.464311859269003e-06,
"loss": 0.17637253999710084,
"memory(GiB)": 36.99,
"step": 315,
"token_acc": 0.9202949713658115,
"train_speed(iter/s)": 0.034017
},
{
"epoch": 0.6650214313547214,
"grad_norm": 2.296875,
"learning_rate": 1.3865611416773921e-06,
"loss": 0.1803775191307068,
"memory(GiB)": 36.99,
"step": 320,
"token_acc": 0.9197953561589925,
"train_speed(iter/s)": 0.034317
},
{
"epoch": 0.6754123912196389,
"grad_norm": 2.046875,
"learning_rate": 1.3101315174073162e-06,
"loss": 0.17441234588623047,
"memory(GiB)": 36.99,
"step": 325,
"token_acc": 0.9233814625058221,
"train_speed(iter/s)": 0.034642
},
{
"epoch": 0.6858033510845565,
"grad_norm": 1.640625,
"learning_rate": 1.235113670078658e-06,
"loss": 0.17859526872634887,
"memory(GiB)": 36.99,
"step": 330,
"token_acc": 0.9203665665981988,
"train_speed(iter/s)": 0.034906
},
{
"epoch": 0.6961943109494739,
"grad_norm": 1.375,
"learning_rate": 1.161596608240349e-06,
"loss": 0.17650686502456664,
"memory(GiB)": 36.99,
"step": 335,
"token_acc": 0.9200734956361966,
"train_speed(iter/s)": 0.035109
},
{
"epoch": 0.7065852708143915,
"grad_norm": 1.7734375,
"learning_rate": 1.0896675597618725e-06,
"loss": 0.18125290870666505,
"memory(GiB)": 36.99,
"step": 340,
"token_acc": 0.9172004012655297,
"train_speed(iter/s)": 0.035306
},
{
"epoch": 0.716976230679309,
"grad_norm": 1.046875,
"learning_rate": 1.0194118683375502e-06,
"loss": 0.1810152769088745,
"memory(GiB)": 36.99,
"step": 345,
"token_acc": 0.9158885657431501,
"train_speed(iter/s)": 0.035495
},
{
"epoch": 0.7273671905442265,
"grad_norm": 2.203125,
"learning_rate": 9.509128922263886e-07,
"loss": 0.17332950830459595,
"memory(GiB)": 36.99,
"step": 350,
"token_acc": 0.9222935923546921,
"train_speed(iter/s)": 0.035686
},
{
"epoch": 0.737758150409144,
"grad_norm": 1.28125,
"learning_rate": 8.842519053476476e-07,
"loss": 0.17291887998580932,
"memory(GiB)": 36.99,
"step": 355,
"token_acc": 0.9233901951744276,
"train_speed(iter/s)": 0.035869
},
{
"epoch": 0.7481491102740616,
"grad_norm": 1.5859375,
"learning_rate": 8.195080008494744e-07,
"loss": 0.17527254819869995,
"memory(GiB)": 36.99,
"step": 360,
"token_acc": 0.9194406927782632,
"train_speed(iter/s)": 0.03605
},
{
"epoch": 0.7585400701389791,
"grad_norm": 1.8984375,
"learning_rate": 7.567579972650116e-07,
"loss": 0.1758326292037964,
"memory(GiB)": 36.99,
"step": 365,
"token_acc": 0.9222837796244998,
"train_speed(iter/s)": 0.036228
},
{
"epoch": 0.7689310300038966,
"grad_norm": 1.4453125,
"learning_rate": 6.960763473673451e-07,
"loss": 0.17296208143234254,
"memory(GiB)": 36.99,
"step": 370,
"token_acc": 0.9199661069172701,
"train_speed(iter/s)": 0.036459
},
{
"epoch": 0.7793219898688142,
"grad_norm": 2.15625,
"learning_rate": 6.375350498314075e-07,
"loss": 0.17531843185424806,
"memory(GiB)": 36.99,
"step": 375,
"token_acc": 0.921526620645878,
"train_speed(iter/s)": 0.036681
},
{
"epoch": 0.7897129497337316,
"grad_norm": 1.453125,
"learning_rate": 5.812035638076785e-07,
"loss": 0.17355836629867555,
"memory(GiB)": 36.99,
"step": 380,
"token_acc": 0.9211911683821796,
"train_speed(iter/s)": 0.036859
},
{
"epoch": 0.8001039095986492,
"grad_norm": 2.03125,
"learning_rate": 5.271487265090163e-07,
"loss": 0.1834742784500122,
"memory(GiB)": 36.99,
"step": 385,
"token_acc": 0.9166990442147797,
"train_speed(iter/s)": 0.037022
},
{
"epoch": 0.8104948694635667,
"grad_norm": 1.78125,
"learning_rate": 4.754346739084173e-07,
"loss": 0.17866382598876954,
"memory(GiB)": 36.99,
"step": 390,
"token_acc": 0.9190055507778907,
"train_speed(iter/s)": 0.037179
},
{
"epoch": 0.8208858293284842,
"grad_norm": 1.7265625,
"learning_rate": 4.2612276464179673e-07,
"loss": 0.16837071180343627,
"memory(GiB)": 36.99,
"step": 395,
"token_acc": 0.925252047889099,
"train_speed(iter/s)": 0.037337
},
{
"epoch": 0.8312767891934018,
"grad_norm": 1.625,
"learning_rate": 3.7927150720606596e-07,
"loss": 0.17911137342453004,
"memory(GiB)": 36.99,
"step": 400,
"token_acc": 0.920388196307401,
"train_speed(iter/s)": 0.037494
},
{
"epoch": 0.8312767891934018,
"eval_loss": 0.16972462832927704,
"eval_runtime": 1325.8316,
"eval_samples_per_second": 60.628,
"eval_steps_per_second": 3.789,
"eval_token_acc": 0.9239007240068497,
"step": 400
},
{
"epoch": 0.8416677490583193,
"grad_norm": 1.546875,
"learning_rate": 3.3493649053890325e-07,
"loss": 0.17124630212783815,
"memory(GiB)": 36.99,
"step": 405,
"token_acc": 0.9238681922697054,
"train_speed(iter/s)": 0.032973
},
{
"epoch": 0.8520587089232368,
"grad_norm": 2.25,
"learning_rate": 2.931703180625736e-07,
"loss": 0.17123721837997435,
"memory(GiB)": 36.99,
"step": 410,
"token_acc": 0.9216517506672947,
"train_speed(iter/s)": 0.033214
},
{
"epoch": 0.8624496687881543,
"grad_norm": 1.5859375,
"learning_rate": 2.5402254527005286e-07,
"loss": 0.17746515274047853,
"memory(GiB)": 36.99,
"step": 415,
"token_acc": 0.9180823830579474,
"train_speed(iter/s)": 0.033458
},
{
"epoch": 0.8728406286530719,
"grad_norm": 1.3984375,
"learning_rate": 2.1753962092752545e-07,
"loss": 0.1777236580848694,
"memory(GiB)": 36.99,
"step": 420,
"token_acc": 0.9190801349972529,
"train_speed(iter/s)": 0.033705
},
{
"epoch": 0.8832315885179893,
"grad_norm": 1.53125,
"learning_rate": 1.837648319629956e-07,
"loss": 0.17560485601425171,
"memory(GiB)": 36.99,
"step": 425,
"token_acc": 0.9201813013751249,
"train_speed(iter/s)": 0.033868
},
{
"epoch": 0.8936225483829069,
"grad_norm": 1.7578125,
"learning_rate": 1.5273825210642608e-07,
"loss": 0.17915327548980714,
"memory(GiB)": 36.99,
"step": 430,
"token_acc": 0.9182488623882002,
"train_speed(iter/s)": 0.034029
},
{
"epoch": 0.9040135082478243,
"grad_norm": 1.3671875,
"learning_rate": 1.2449669434232202e-07,
"loss": 0.17606736421585084,
"memory(GiB)": 36.99,
"step": 435,
"token_acc": 0.9228688461241209,
"train_speed(iter/s)": 0.034183
},
{
"epoch": 0.9144044681127419,
"grad_norm": 1.546875,
"learning_rate": 9.907366723118678e-08,
"loss": 0.17585525512695313,
"memory(GiB)": 36.99,
"step": 440,
"token_acc": 0.9183866393571766,
"train_speed(iter/s)": 0.034333
},
{
"epoch": 0.9247954279776595,
"grad_norm": 1.5,
"learning_rate": 7.649933515167407e-08,
"loss": 0.16930280923843383,
"memory(GiB)": 36.99,
"step": 445,
"token_acc": 0.9238362821778419,
"train_speed(iter/s)": 0.034485
},
{
"epoch": 0.9351863878425769,
"grad_norm": 2.078125,
"learning_rate": 5.6800482510601937e-08,
"loss": 0.1705850124359131,
"memory(GiB)": 36.99,
"step": 450,
"token_acc": 0.925625,
"train_speed(iter/s)": 0.03464
},
{
"epoch": 0.9455773477074945,
"grad_norm": 1.5703125,
"learning_rate": 4.000048196330014e-08,
"loss": 0.1817856550216675,
"memory(GiB)": 36.99,
"step": 455,
"token_acc": 0.9208294062205467,
"train_speed(iter/s)": 0.03485
},
{
"epoch": 0.955968307572412,
"grad_norm": 1.234375,
"learning_rate": 2.611926668199316e-08,
"loss": 0.1783198595046997,
"memory(GiB)": 36.99,
"step": 460,
"token_acc": 0.9211605415860735,
"train_speed(iter/s)": 0.035001
},
{
"epoch": 0.9663592674373295,
"grad_norm": 1.59375,
"learning_rate": 1.517330670512629e-08,
"loss": 0.17677730321884155,
"memory(GiB)": 36.99,
"step": 465,
"token_acc": 0.9180868609125893,
"train_speed(iter/s)": 0.03514
},
{
"epoch": 0.976750227302247,
"grad_norm": 1.984375,
"learning_rate": 7.175589395692351e-09,
"loss": 0.17313094139099122,
"memory(GiB)": 36.99,
"step": 470,
"token_acc": 0.9224636548382054,
"train_speed(iter/s)": 0.03528
},
{
"epoch": 0.9871411871671646,
"grad_norm": 2.0625,
"learning_rate": 2.1356040317474512e-09,
"loss": 0.1789810061454773,
"memory(GiB)": 36.99,
"step": 475,
"token_acc": 0.918305714728202,
"train_speed(iter/s)": 0.035421
},
{
"epoch": 0.997532147032082,
"grad_norm": 1.296875,
"learning_rate": 5.933054739837296e-11,
"loss": 0.17718052864074707,
"memory(GiB)": 36.99,
"step": 480,
"token_acc": 0.9196187450357427,
"train_speed(iter/s)": 0.03556
},
{
"epoch": 0.9996103390050656,
"eval_loss": 0.16983823478221893,
"eval_runtime": 1324.7962,
"eval_samples_per_second": 60.675,
"eval_steps_per_second": 3.792,
"eval_token_acc": 0.9239886680625727,
"step": 481
}
],
"logging_steps": 5,
"max_steps": 481,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.1002858766178714e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}