STRIDE-T002-PBCE-Qwen3-VL-2B / trainer_state.json
lakelee's picture
Model save
2bfe43a verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 54,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.018604651162790697,
"grad_norm": 51.7990608215332,
"learning_rate": 0.0,
"loss": 0.71331787109375,
"step": 1
},
{
"epoch": 0.037209302325581395,
"grad_norm": 56.753963470458984,
"learning_rate": 7.8125e-08,
"loss": 0.71588134765625,
"step": 2
},
{
"epoch": 0.05581395348837209,
"grad_norm": 45.70652389526367,
"learning_rate": 1.5625e-07,
"loss": 0.7108154296875,
"step": 3
},
{
"epoch": 0.07441860465116279,
"grad_norm": 53.1850700378418,
"learning_rate": 2.3437500000000003e-07,
"loss": 0.7137451171875,
"step": 4
},
{
"epoch": 0.09302325581395349,
"grad_norm": 49.760826110839844,
"learning_rate": 3.125e-07,
"loss": 0.71337890625,
"step": 5
},
{
"epoch": 0.11162790697674418,
"grad_norm": 57.31139373779297,
"learning_rate": 3.90625e-07,
"loss": 0.71978759765625,
"step": 6
},
{
"epoch": 0.13023255813953488,
"grad_norm": 56.42349624633789,
"learning_rate": 4.6875000000000006e-07,
"loss": 0.7164306640625,
"step": 7
},
{
"epoch": 0.14883720930232558,
"grad_norm": 54.40520477294922,
"learning_rate": 5.468750000000001e-07,
"loss": 0.71539306640625,
"step": 8
},
{
"epoch": 0.16744186046511628,
"grad_norm": 50.69281768798828,
"learning_rate": 6.25e-07,
"loss": 0.71405029296875,
"step": 9
},
{
"epoch": 0.18604651162790697,
"grad_norm": 58.078224182128906,
"learning_rate": 7.03125e-07,
"loss": 0.71514892578125,
"step": 10
},
{
"epoch": 0.20465116279069767,
"grad_norm": 60.195770263671875,
"learning_rate": 7.8125e-07,
"loss": 0.71466064453125,
"step": 11
},
{
"epoch": 0.22325581395348837,
"grad_norm": 53.095191955566406,
"learning_rate": 8.59375e-07,
"loss": 0.71002197265625,
"step": 12
},
{
"epoch": 0.24186046511627907,
"grad_norm": 49.17010498046875,
"learning_rate": 9.375000000000001e-07,
"loss": 0.70770263671875,
"step": 13
},
{
"epoch": 0.26046511627906976,
"grad_norm": 50.612850189208984,
"learning_rate": 1.0156250000000001e-06,
"loss": 0.7069091796875,
"step": 14
},
{
"epoch": 0.27906976744186046,
"grad_norm": 52.69828414916992,
"learning_rate": 1.0937500000000001e-06,
"loss": 0.71014404296875,
"step": 15
},
{
"epoch": 0.29767441860465116,
"grad_norm": 59.4550895690918,
"learning_rate": 1.1718750000000001e-06,
"loss": 0.701416015625,
"step": 16
},
{
"epoch": 0.31627906976744186,
"grad_norm": 55.300506591796875,
"learning_rate": 1.25e-06,
"loss": 0.6976318359375,
"step": 17
},
{
"epoch": 0.33488372093023255,
"grad_norm": 47.445289611816406,
"learning_rate": 1.328125e-06,
"loss": 0.69403076171875,
"step": 18
},
{
"epoch": 0.35348837209302325,
"grad_norm": 52.989654541015625,
"learning_rate": 1.40625e-06,
"loss": 0.69329833984375,
"step": 19
},
{
"epoch": 0.37209302325581395,
"grad_norm": 45.23550033569336,
"learning_rate": 1.484375e-06,
"loss": 0.692138671875,
"step": 20
},
{
"epoch": 0.39069767441860465,
"grad_norm": 51.775882720947266,
"learning_rate": 1.5625e-06,
"loss": 0.69146728515625,
"step": 21
},
{
"epoch": 0.40930232558139534,
"grad_norm": 44.17149353027344,
"learning_rate": 1.640625e-06,
"loss": 0.66351318359375,
"step": 22
},
{
"epoch": 0.42790697674418604,
"grad_norm": 44.85835266113281,
"learning_rate": 1.71875e-06,
"loss": 0.65997314453125,
"step": 23
},
{
"epoch": 0.44651162790697674,
"grad_norm": 43.70960998535156,
"learning_rate": 1.796875e-06,
"loss": 0.65948486328125,
"step": 24
},
{
"epoch": 0.46511627906976744,
"grad_norm": 43.53929138183594,
"learning_rate": 1.8750000000000003e-06,
"loss": 0.6571044921875,
"step": 25
},
{
"epoch": 0.48372093023255813,
"grad_norm": 41.27622985839844,
"learning_rate": 1.953125e-06,
"loss": 0.65191650390625,
"step": 26
},
{
"epoch": 0.5023255813953489,
"grad_norm": 45.456512451171875,
"learning_rate": 2.0312500000000002e-06,
"loss": 0.6461181640625,
"step": 27
},
{
"epoch": 0.5209302325581395,
"grad_norm": 47.72049331665039,
"learning_rate": 2.109375e-06,
"loss": 0.639892578125,
"step": 28
},
{
"epoch": 0.5395348837209303,
"grad_norm": 39.66932678222656,
"learning_rate": 2.1875000000000002e-06,
"loss": 0.64697265625,
"step": 29
},
{
"epoch": 0.5581395348837209,
"grad_norm": 32.290470123291016,
"learning_rate": 2.265625e-06,
"loss": 0.61138916015625,
"step": 30
},
{
"epoch": 0.5767441860465117,
"grad_norm": 33.039894104003906,
"learning_rate": 2.3437500000000002e-06,
"loss": 0.6019287109375,
"step": 31
},
{
"epoch": 0.5953488372093023,
"grad_norm": 38.35325622558594,
"learning_rate": 2.421875e-06,
"loss": 0.5806884765625,
"step": 32
},
{
"epoch": 0.6139534883720931,
"grad_norm": 33.649505615234375,
"learning_rate": 2.5e-06,
"loss": 0.58551025390625,
"step": 33
},
{
"epoch": 0.6325581395348837,
"grad_norm": 30.9255428314209,
"learning_rate": 2.5781250000000004e-06,
"loss": 0.58953857421875,
"step": 34
},
{
"epoch": 0.6511627906976745,
"grad_norm": 35.72899627685547,
"learning_rate": 2.65625e-06,
"loss": 0.572021484375,
"step": 35
},
{
"epoch": 0.6697674418604651,
"grad_norm": 35.09605026245117,
"learning_rate": 2.7343750000000004e-06,
"loss": 0.555633544921875,
"step": 36
},
{
"epoch": 0.6883720930232559,
"grad_norm": 29.248594284057617,
"learning_rate": 2.8125e-06,
"loss": 0.567535400390625,
"step": 37
},
{
"epoch": 0.7069767441860465,
"grad_norm": 31.37779426574707,
"learning_rate": 2.8906250000000004e-06,
"loss": 0.5550537109375,
"step": 38
},
{
"epoch": 0.7255813953488373,
"grad_norm": 30.69561767578125,
"learning_rate": 2.96875e-06,
"loss": 0.540008544921875,
"step": 39
},
{
"epoch": 0.7441860465116279,
"grad_norm": 30.249013900756836,
"learning_rate": 3.0468750000000004e-06,
"loss": 0.526031494140625,
"step": 40
},
{
"epoch": 0.7627906976744186,
"grad_norm": 30.84266471862793,
"learning_rate": 3.125e-06,
"loss": 0.4935302734375,
"step": 41
},
{
"epoch": 0.7813953488372093,
"grad_norm": 23.530658721923828,
"learning_rate": 3.2031250000000004e-06,
"loss": 0.506683349609375,
"step": 42
},
{
"epoch": 0.8,
"grad_norm": 26.80575180053711,
"learning_rate": 3.28125e-06,
"loss": 0.476959228515625,
"step": 43
},
{
"epoch": 0.8186046511627907,
"grad_norm": 27.92676544189453,
"learning_rate": 3.3593750000000003e-06,
"loss": 0.452056884765625,
"step": 44
},
{
"epoch": 0.8372093023255814,
"grad_norm": 14.391761779785156,
"learning_rate": 3.4375e-06,
"loss": 0.516387939453125,
"step": 45
},
{
"epoch": 0.8558139534883721,
"grad_norm": 21.352672576904297,
"learning_rate": 3.5156250000000003e-06,
"loss": 0.4576416015625,
"step": 46
},
{
"epoch": 0.8744186046511628,
"grad_norm": 17.331666946411133,
"learning_rate": 3.59375e-06,
"loss": 0.467803955078125,
"step": 47
},
{
"epoch": 0.8930232558139535,
"grad_norm": 18.73495864868164,
"learning_rate": 3.6718750000000003e-06,
"loss": 0.446685791015625,
"step": 48
},
{
"epoch": 0.9116279069767442,
"grad_norm": 16.758777618408203,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.442779541015625,
"step": 49
},
{
"epoch": 0.9302325581395349,
"grad_norm": 17.156814575195312,
"learning_rate": 3.828125000000001e-06,
"loss": 0.4298095703125,
"step": 50
},
{
"epoch": 0.9488372093023256,
"grad_norm": 11.690564155578613,
"learning_rate": 3.90625e-06,
"loss": 0.448089599609375,
"step": 51
},
{
"epoch": 0.9674418604651163,
"grad_norm": 12.62314510345459,
"learning_rate": 3.984375e-06,
"loss": 0.424957275390625,
"step": 52
},
{
"epoch": 0.986046511627907,
"grad_norm": 8.785666465759277,
"learning_rate": 4.0625000000000005e-06,
"loss": 0.44073486328125,
"step": 53
},
{
"epoch": 1.0,
"grad_norm": 9.357829093933105,
"learning_rate": 4.140625000000001e-06,
"loss": 0.413665771484375,
"step": 54
},
{
"epoch": 1.0,
"step": 54,
"total_flos": 1.4367917178722714e+17,
"train_loss": 0.6073235405815972,
"train_runtime": 2167.6199,
"train_samples_per_second": 3.168,
"train_steps_per_second": 0.025
}
],
"logging_steps": 1.0,
"max_steps": 54,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.4367917178722714e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}