ImpromptuVLAModel / 7B_Base_finetune /trainer_state.json
aaaaaap's picture
Upload folder using huggingface_hub
33aa6f3 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9974145360528583,
"eval_steps": 100,
"global_step": 870,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02298190175237001,
"grad_norm": 21.890032542057828,
"learning_rate": 3.703703703703703e-07,
"loss": 1.5181,
"step": 10
},
{
"epoch": 0.04596380350474002,
"grad_norm": 17.89946711651998,
"learning_rate": 7.407407407407406e-07,
"loss": 1.3746,
"step": 20
},
{
"epoch": 0.06894570525711002,
"grad_norm": 15.887341020456326,
"learning_rate": 9.999687519737637e-07,
"loss": 0.865,
"step": 30
},
{
"epoch": 0.09192760700948004,
"grad_norm": 1.0163819462384687,
"learning_rate": 9.994133401546208e-07,
"loss": 0.3276,
"step": 40
},
{
"epoch": 0.11490950876185005,
"grad_norm": 1.0056706102857214,
"learning_rate": 9.981644155508344e-07,
"loss": 0.2775,
"step": 50
},
{
"epoch": 0.13789141051422005,
"grad_norm": 1.0068467950472904,
"learning_rate": 9.962237124876827e-07,
"loss": 0.2712,
"step": 60
},
{
"epoch": 0.16087331226659007,
"grad_norm": 0.9154605564263402,
"learning_rate": 9.935939259319937e-07,
"loss": 0.2657,
"step": 70
},
{
"epoch": 0.18385521401896007,
"grad_norm": 1.121367655313322,
"learning_rate": 9.902787077497684e-07,
"loss": 0.2578,
"step": 80
},
{
"epoch": 0.20683711577133007,
"grad_norm": 1.0702818495131539,
"learning_rate": 9.86282661634998e-07,
"loss": 0.2541,
"step": 90
},
{
"epoch": 0.2298190175237001,
"grad_norm": 1.0093420472045618,
"learning_rate": 9.816113367167225e-07,
"loss": 0.2571,
"step": 100
},
{
"epoch": 0.25280091927607007,
"grad_norm": 1.006545415842139,
"learning_rate": 9.76271219853204e-07,
"loss": 0.2466,
"step": 110
},
{
"epoch": 0.2757828210284401,
"grad_norm": 1.1026022447408952,
"learning_rate": 9.70269726623921e-07,
"loss": 0.2489,
"step": 120
},
{
"epoch": 0.2987647227808101,
"grad_norm": 0.8739243814672304,
"learning_rate": 9.636151910318863e-07,
"loss": 0.2472,
"step": 130
},
{
"epoch": 0.32174662453318015,
"grad_norm": 0.9738602026104726,
"learning_rate": 9.563168539305926e-07,
"loss": 0.2437,
"step": 140
},
{
"epoch": 0.3447285262855501,
"grad_norm": 1.000438187236006,
"learning_rate": 9.483848501916577e-07,
"loss": 0.2441,
"step": 150
},
{
"epoch": 0.36771042803792015,
"grad_norm": 0.8389273932609584,
"learning_rate": 9.398301946309833e-07,
"loss": 0.2411,
"step": 160
},
{
"epoch": 0.39069232979029017,
"grad_norm": 1.0032600749058809,
"learning_rate": 9.306647667129779e-07,
"loss": 0.246,
"step": 170
},
{
"epoch": 0.41367423154266014,
"grad_norm": 0.9211746122929653,
"learning_rate": 9.209012940540805e-07,
"loss": 0.2474,
"step": 180
},
{
"epoch": 0.43665613329503017,
"grad_norm": 1.1059285797814897,
"learning_rate": 9.105533347484926e-07,
"loss": 0.2419,
"step": 190
},
{
"epoch": 0.4596380350474002,
"grad_norm": 1.0804224791941734,
"learning_rate": 8.996352585406653e-07,
"loss": 0.2511,
"step": 200
},
{
"epoch": 0.48261993679977017,
"grad_norm": 0.8337177518200303,
"learning_rate": 8.881622268706824e-07,
"loss": 0.2419,
"step": 210
},
{
"epoch": 0.5056018385521401,
"grad_norm": 1.362346904919404,
"learning_rate": 8.761501718202527e-07,
"loss": 0.2472,
"step": 220
},
{
"epoch": 0.5285837403045102,
"grad_norm": 1.3389339778682794,
"learning_rate": 8.636157739885461e-07,
"loss": 0.2406,
"step": 230
},
{
"epoch": 0.5515656420568802,
"grad_norm": 1.1000293691138154,
"learning_rate": 8.505764393285983e-07,
"loss": 0.2498,
"step": 240
},
{
"epoch": 0.5745475438092502,
"grad_norm": 1.0710195845349588,
"learning_rate": 8.370502749764485e-07,
"loss": 0.2364,
"step": 250
},
{
"epoch": 0.5975294455616202,
"grad_norm": 0.922079995841667,
"learning_rate": 8.230560641065758e-07,
"loss": 0.2462,
"step": 260
},
{
"epoch": 0.6205113473139903,
"grad_norm": 1.2020153580162134,
"learning_rate": 8.086132398485523e-07,
"loss": 0.2391,
"step": 270
},
{
"epoch": 0.6434932490663603,
"grad_norm": 0.9590899100670737,
"learning_rate": 7.937418583011324e-07,
"loss": 0.2417,
"step": 280
},
{
"epoch": 0.6664751508187302,
"grad_norm": 1.0736359668429754,
"learning_rate": 7.784625706812521e-07,
"loss": 0.2375,
"step": 290
},
{
"epoch": 0.6894570525711002,
"grad_norm": 1.1460847311389701,
"learning_rate": 7.627965946466166e-07,
"loss": 0.2295,
"step": 300
},
{
"epoch": 0.7124389543234703,
"grad_norm": 0.9143741882547864,
"learning_rate": 7.467656848316945e-07,
"loss": 0.2348,
"step": 310
},
{
"epoch": 0.7354208560758403,
"grad_norm": 0.9513901903630219,
"learning_rate": 7.303921026380388e-07,
"loss": 0.2354,
"step": 320
},
{
"epoch": 0.7584027578282103,
"grad_norm": 0.9652157742697586,
"learning_rate": 7.136985853208823e-07,
"loss": 0.2348,
"step": 330
},
{
"epoch": 0.7813846595805803,
"grad_norm": 1.080040265385343,
"learning_rate": 6.967083144149364e-07,
"loss": 0.2365,
"step": 340
},
{
"epoch": 0.8043665613329503,
"grad_norm": 0.9632445450114392,
"learning_rate": 6.794448835432393e-07,
"loss": 0.2406,
"step": 350
},
{
"epoch": 0.8273484630853203,
"grad_norm": 1.0092046798837944,
"learning_rate": 6.619322656537552e-07,
"loss": 0.2455,
"step": 360
},
{
"epoch": 0.8503303648376903,
"grad_norm": 0.9979460678237817,
"learning_rate": 6.441947797292236e-07,
"loss": 0.2402,
"step": 370
},
{
"epoch": 0.8733122665900603,
"grad_norm": 1.2291176835124817,
"learning_rate": 6.262570570164836e-07,
"loss": 0.2469,
"step": 380
},
{
"epoch": 0.8962941683424304,
"grad_norm": 0.9838745623679475,
"learning_rate": 6.081440068221722e-07,
"loss": 0.2416,
"step": 390
},
{
"epoch": 0.9192760700948004,
"grad_norm": 1.2545357047588914,
"learning_rate": 5.898807819222944e-07,
"loss": 0.2525,
"step": 400
},
{
"epoch": 0.9422579718471703,
"grad_norm": 1.0290119343425803,
"learning_rate": 5.714927436336963e-07,
"loss": 0.241,
"step": 410
},
{
"epoch": 0.9652398735995403,
"grad_norm": 1.349338299959401,
"learning_rate": 5.530054265959485e-07,
"loss": 0.2325,
"step": 420
},
{
"epoch": 0.9882217753519104,
"grad_norm": 1.1767063307657506,
"learning_rate": 5.344445033125437e-07,
"loss": 0.24,
"step": 430
},
{
"epoch": 1.009192760700948,
"grad_norm": 1.3236537841278382,
"learning_rate": 5.15835748500649e-07,
"loss": 0.2161,
"step": 440
},
{
"epoch": 1.032174662453318,
"grad_norm": 1.0102024212077714,
"learning_rate": 4.972050032989174e-07,
"loss": 0.2341,
"step": 450
},
{
"epoch": 1.055156564205688,
"grad_norm": 1.0307237113556675,
"learning_rate": 4.785781393830657e-07,
"loss": 0.2363,
"step": 460
},
{
"epoch": 1.078138465958058,
"grad_norm": 0.9294997507108657,
"learning_rate": 4.5998102303904327e-07,
"loss": 0.2411,
"step": 470
},
{
"epoch": 1.101120367710428,
"grad_norm": 1.1787927936886144,
"learning_rate": 4.4143947924368765e-07,
"loss": 0.2384,
"step": 480
},
{
"epoch": 1.124102269462798,
"grad_norm": 1.0331361385053468,
"learning_rate": 4.2297925580274513e-07,
"loss": 0.2326,
"step": 490
},
{
"epoch": 1.147084171215168,
"grad_norm": 0.7582355320386759,
"learning_rate": 4.0462598759605194e-07,
"loss": 0.2368,
"step": 500
},
{
"epoch": 1.170066072967538,
"grad_norm": 1.1513908345563186,
"learning_rate": 3.8640516097953404e-07,
"loss": 0.2404,
"step": 510
},
{
"epoch": 1.193047974719908,
"grad_norm": 1.1163970644132746,
"learning_rate": 3.683420783934537e-07,
"loss": 0.2375,
"step": 520
},
{
"epoch": 1.2160298764722781,
"grad_norm": 1.212072403924197,
"learning_rate": 3.50461823226051e-07,
"loss": 0.2334,
"step": 530
},
{
"epoch": 1.2390117782246481,
"grad_norm": 0.8278352713319073,
"learning_rate": 3.3278922498137454e-07,
"loss": 0.2369,
"step": 540
},
{
"epoch": 1.2619936799770182,
"grad_norm": 1.2571750371248345,
"learning_rate": 3.153488247996686e-07,
"loss": 0.2382,
"step": 550
},
{
"epoch": 1.284975581729388,
"grad_norm": 1.2851970626830909,
"learning_rate": 2.981648413781984e-07,
"loss": 0.231,
"step": 560
},
{
"epoch": 1.3079574834817582,
"grad_norm": 1.0730055633289826,
"learning_rate": 2.8126113733983646e-07,
"loss": 0.2388,
"step": 570
},
{
"epoch": 1.330939385234128,
"grad_norm": 1.1721323197103255,
"learning_rate": 2.6466118609611595e-07,
"loss": 0.2349,
"step": 580
},
{
"epoch": 1.353921286986498,
"grad_norm": 1.1634241170511743,
"learning_rate": 2.4838803925076145e-07,
"loss": 0.2365,
"step": 590
},
{
"epoch": 1.376903188738868,
"grad_norm": 1.2271083897584125,
"learning_rate": 2.3246429458896632e-07,
"loss": 0.2389,
"step": 600
},
{
"epoch": 1.399885090491238,
"grad_norm": 1.0613668223532369,
"learning_rate": 2.1691206469686806e-07,
"loss": 0.2367,
"step": 610
},
{
"epoch": 1.4228669922436081,
"grad_norm": 1.1652161586747365,
"learning_rate": 2.0175294625479694e-07,
"loss": 0.2397,
"step": 620
},
{
"epoch": 1.4458488939959782,
"grad_norm": 1.186909418739316,
"learning_rate": 1.8700799004693917e-07,
"loss": 0.2329,
"step": 630
},
{
"epoch": 1.4688307957483482,
"grad_norm": 1.3370986612612854,
"learning_rate": 1.7269767172906352e-07,
"loss": 0.2378,
"step": 640
},
{
"epoch": 1.4918126975007182,
"grad_norm": 1.357733016572047,
"learning_rate": 1.5884186339490007e-07,
"loss": 0.2344,
"step": 650
},
{
"epoch": 1.5147945992530882,
"grad_norm": 1.1772497942529265,
"learning_rate": 1.4545980598066088e-07,
"loss": 0.2375,
"step": 660
},
{
"epoch": 1.537776501005458,
"grad_norm": 0.9971294241414196,
"learning_rate": 1.325700825460192e-07,
"loss": 0.2355,
"step": 670
},
{
"epoch": 1.5607584027578283,
"grad_norm": 1.1231296302062765,
"learning_rate": 1.201905924686518e-07,
"loss": 0.238,
"step": 680
},
{
"epoch": 1.583740304510198,
"grad_norm": 1.0602709178915006,
"learning_rate": 1.0833852658818165e-07,
"loss": 0.2396,
"step": 690
},
{
"epoch": 1.6067222062625683,
"grad_norm": 1.0422028688805935,
"learning_rate": 9.703034333403315e-08,
"loss": 0.2349,
"step": 700
},
{
"epoch": 1.6297041080149381,
"grad_norm": 0.8320196355947896,
"learning_rate": 8.628174587035341e-08,
"loss": 0.2273,
"step": 710
},
{
"epoch": 1.6526860097673084,
"grad_norm": 0.9555868196565838,
"learning_rate": 7.610766028973709e-08,
"loss": 0.2296,
"step": 720
},
{
"epoch": 1.6756679115196782,
"grad_norm": 0.9937435581681181,
"learning_rate": 6.65222148860341e-08,
"loss": 0.2308,
"step": 730
},
{
"epoch": 1.6986498132720482,
"grad_norm": 1.020911625506034,
"learning_rate": 5.7538720535024675e-08,
"loss": 0.239,
"step": 740
},
{
"epoch": 1.7216317150244183,
"grad_norm": 0.8215715679494953,
"learning_rate": 4.916965221020752e-08,
"loss": 0.2342,
"step": 750
},
{
"epoch": 1.7446136167767883,
"grad_norm": 1.1007061186951645,
"learning_rate": 4.142663165936577e-08,
"loss": 0.2384,
"step": 760
},
{
"epoch": 1.7675955185291583,
"grad_norm": 1.102579872508456,
"learning_rate": 3.4320411265970126e-08,
"loss": 0.2385,
"step": 770
},
{
"epoch": 1.7905774202815283,
"grad_norm": 1.1528090007618998,
"learning_rate": 2.7860859117828982e-08,
"loss": 0.2302,
"step": 780
},
{
"epoch": 1.8135593220338984,
"grad_norm": 0.8163219245894903,
"learning_rate": 2.2056945303719654e-08,
"loss": 0.2349,
"step": 790
},
{
"epoch": 1.8365412237862682,
"grad_norm": 0.9797862840490112,
"learning_rate": 1.6916729457030876e-08,
"loss": 0.2435,
"step": 800
},
{
"epoch": 1.8595231255386384,
"grad_norm": 0.772366931661589,
"learning_rate": 1.2447349563713182e-08,
"loss": 0.2302,
"step": 810
},
{
"epoch": 1.8825050272910082,
"grad_norm": 0.9839548226895065,
"learning_rate": 8.655012050079568e-09,
"loss": 0.2359,
"step": 820
},
{
"epoch": 1.9054869290433785,
"grad_norm": 1.0212584518939574,
"learning_rate": 5.5449831642214174e-09,
"loss": 0.2399,
"step": 830
},
{
"epoch": 1.9284688307957483,
"grad_norm": 0.8008215994748586,
"learning_rate": 3.1215816630071335e-09,
"loss": 0.2289,
"step": 840
},
{
"epoch": 1.9514507325481185,
"grad_norm": 1.1503304447686444,
"learning_rate": 1.3881728148191773e-09,
"loss": 0.2384,
"step": 850
},
{
"epoch": 1.9744326343004883,
"grad_norm": 0.9158421449356217,
"learning_rate": 3.471637263576799e-10,
"loss": 0.2313,
"step": 860
},
{
"epoch": 1.9974145360528583,
"grad_norm": 1.2218975161834726,
"learning_rate": 0.0,
"loss": 0.2368,
"step": 870
}
],
"logging_steps": 10,
"max_steps": 870,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 296919333601280.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}