lora-glue_mrpc / trainer_state.json
igzi's picture
Upload latest checkpoint for glue_mrpc
ed2654f verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.7225433526011561,
"eval_steps": 500,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0072254335260115606,
"grad_norm": 1.9609375,
"learning_rate": 0.00019800000000000002,
"loss": 0.9903,
"step": 50
},
{
"epoch": 0.014450867052023121,
"grad_norm": 2.4375,
"learning_rate": 0.000196,
"loss": 0.6379,
"step": 100
},
{
"epoch": 0.02167630057803468,
"grad_norm": 0.3046875,
"learning_rate": 0.000194,
"loss": 0.411,
"step": 150
},
{
"epoch": 0.028901734104046242,
"grad_norm": 0.4765625,
"learning_rate": 0.000192,
"loss": 0.3963,
"step": 200
},
{
"epoch": 0.036127167630057806,
"grad_norm": 0.439453125,
"learning_rate": 0.00019,
"loss": 0.3769,
"step": 250
},
{
"epoch": 0.04335260115606936,
"grad_norm": 0.41015625,
"learning_rate": 0.000188,
"loss": 0.346,
"step": 300
},
{
"epoch": 0.05057803468208093,
"grad_norm": 2.15625,
"learning_rate": 0.00018600000000000002,
"loss": 0.3799,
"step": 350
},
{
"epoch": 0.057803468208092484,
"grad_norm": 0.36328125,
"learning_rate": 0.00018400000000000003,
"loss": 0.3492,
"step": 400
},
{
"epoch": 0.06502890173410404,
"grad_norm": 2.75,
"learning_rate": 0.000182,
"loss": 0.4643,
"step": 450
},
{
"epoch": 0.07225433526011561,
"grad_norm": 18.25,
"learning_rate": 0.00018,
"loss": 0.4841,
"step": 500
},
{
"epoch": 0.07947976878612717,
"grad_norm": 0.0849609375,
"learning_rate": 0.00017800000000000002,
"loss": 0.3582,
"step": 550
},
{
"epoch": 0.08670520231213873,
"grad_norm": 0.0390625,
"learning_rate": 0.00017600000000000002,
"loss": 0.6567,
"step": 600
},
{
"epoch": 0.09393063583815028,
"grad_norm": 13.375,
"learning_rate": 0.000174,
"loss": 0.6067,
"step": 650
},
{
"epoch": 0.10115606936416185,
"grad_norm": 0.08935546875,
"learning_rate": 0.000172,
"loss": 0.6571,
"step": 700
},
{
"epoch": 0.10838150289017341,
"grad_norm": 9.25,
"learning_rate": 0.00017,
"loss": 0.6869,
"step": 750
},
{
"epoch": 0.11560693641618497,
"grad_norm": 1.28125,
"learning_rate": 0.000168,
"loss": 0.5443,
"step": 800
},
{
"epoch": 0.12283236994219653,
"grad_norm": 0.353515625,
"learning_rate": 0.000166,
"loss": 0.3886,
"step": 850
},
{
"epoch": 0.13005780346820808,
"grad_norm": 1.4375,
"learning_rate": 0.000164,
"loss": 0.4815,
"step": 900
},
{
"epoch": 0.13728323699421965,
"grad_norm": 0.13671875,
"learning_rate": 0.000162,
"loss": 0.4896,
"step": 950
},
{
"epoch": 0.14450867052023122,
"grad_norm": 0.30859375,
"learning_rate": 0.00016,
"loss": 0.5187,
"step": 1000
},
{
"epoch": 0.15173410404624277,
"grad_norm": 0.0947265625,
"learning_rate": 0.00015800000000000002,
"loss": 0.3277,
"step": 1050
},
{
"epoch": 0.15895953757225434,
"grad_norm": 50.75,
"learning_rate": 0.00015600000000000002,
"loss": 0.4379,
"step": 1100
},
{
"epoch": 0.16618497109826588,
"grad_norm": 0.01385498046875,
"learning_rate": 0.000154,
"loss": 0.2361,
"step": 1150
},
{
"epoch": 0.17341040462427745,
"grad_norm": 0.02783203125,
"learning_rate": 0.000152,
"loss": 0.4492,
"step": 1200
},
{
"epoch": 0.18063583815028902,
"grad_norm": 0.07373046875,
"learning_rate": 0.00015000000000000001,
"loss": 0.3762,
"step": 1250
},
{
"epoch": 0.18786127167630057,
"grad_norm": 1.5,
"learning_rate": 0.000148,
"loss": 0.6323,
"step": 1300
},
{
"epoch": 0.19508670520231214,
"grad_norm": 0.01171875,
"learning_rate": 0.000146,
"loss": 0.2352,
"step": 1350
},
{
"epoch": 0.2023121387283237,
"grad_norm": 0.171875,
"learning_rate": 0.000144,
"loss": 0.5015,
"step": 1400
},
{
"epoch": 0.20953757225433525,
"grad_norm": 0.0206298828125,
"learning_rate": 0.000142,
"loss": 0.1863,
"step": 1450
},
{
"epoch": 0.21676300578034682,
"grad_norm": 0.1953125,
"learning_rate": 0.00014,
"loss": 0.3393,
"step": 1500
},
{
"epoch": 0.2239884393063584,
"grad_norm": 0.0859375,
"learning_rate": 0.000138,
"loss": 0.3873,
"step": 1550
},
{
"epoch": 0.23121387283236994,
"grad_norm": 0.0306396484375,
"learning_rate": 0.00013600000000000003,
"loss": 0.2486,
"step": 1600
},
{
"epoch": 0.2384393063583815,
"grad_norm": 0.09130859375,
"learning_rate": 0.000134,
"loss": 0.2862,
"step": 1650
},
{
"epoch": 0.24566473988439305,
"grad_norm": 0.0537109375,
"learning_rate": 0.000132,
"loss": 0.2258,
"step": 1700
},
{
"epoch": 0.25289017341040465,
"grad_norm": 0.126953125,
"learning_rate": 0.00013000000000000002,
"loss": 0.4188,
"step": 1750
},
{
"epoch": 0.26011560693641617,
"grad_norm": 0.06884765625,
"learning_rate": 0.00012800000000000002,
"loss": 0.1568,
"step": 1800
},
{
"epoch": 0.26734104046242774,
"grad_norm": 0.0211181640625,
"learning_rate": 0.000126,
"loss": 0.3317,
"step": 1850
},
{
"epoch": 0.2745664739884393,
"grad_norm": 0.0311279296875,
"learning_rate": 0.000124,
"loss": 0.4656,
"step": 1900
},
{
"epoch": 0.2817919075144509,
"grad_norm": 11.3125,
"learning_rate": 0.000122,
"loss": 0.1262,
"step": 1950
},
{
"epoch": 0.28901734104046245,
"grad_norm": 0.0303955078125,
"learning_rate": 0.00012,
"loss": 0.25,
"step": 2000
},
{
"epoch": 0.29624277456647397,
"grad_norm": 0.83984375,
"learning_rate": 0.000118,
"loss": 0.1817,
"step": 2050
},
{
"epoch": 0.30346820809248554,
"grad_norm": 0.01007080078125,
"learning_rate": 0.000116,
"loss": 0.3367,
"step": 2100
},
{
"epoch": 0.3106936416184971,
"grad_norm": 0.74609375,
"learning_rate": 0.00011399999999999999,
"loss": 0.3013,
"step": 2150
},
{
"epoch": 0.3179190751445087,
"grad_norm": 0.13671875,
"learning_rate": 0.00011200000000000001,
"loss": 0.1856,
"step": 2200
},
{
"epoch": 0.32514450867052025,
"grad_norm": 0.017822265625,
"learning_rate": 0.00011000000000000002,
"loss": 0.1866,
"step": 2250
},
{
"epoch": 0.33236994219653176,
"grad_norm": 0.035888671875,
"learning_rate": 0.00010800000000000001,
"loss": 0.3231,
"step": 2300
},
{
"epoch": 0.33959537572254334,
"grad_norm": 6.875,
"learning_rate": 0.00010600000000000002,
"loss": 0.3213,
"step": 2350
},
{
"epoch": 0.3468208092485549,
"grad_norm": 188.0,
"learning_rate": 0.00010400000000000001,
"loss": 0.2325,
"step": 2400
},
{
"epoch": 0.3540462427745665,
"grad_norm": 0.01708984375,
"learning_rate": 0.00010200000000000001,
"loss": 0.1456,
"step": 2450
},
{
"epoch": 0.36127167630057805,
"grad_norm": 10.625,
"learning_rate": 0.0001,
"loss": 0.1886,
"step": 2500
},
{
"epoch": 0.3684971098265896,
"grad_norm": 0.01416015625,
"learning_rate": 9.8e-05,
"loss": 0.1784,
"step": 2550
},
{
"epoch": 0.37572254335260113,
"grad_norm": 0.050048828125,
"learning_rate": 9.6e-05,
"loss": 0.2939,
"step": 2600
},
{
"epoch": 0.3829479768786127,
"grad_norm": 92.0,
"learning_rate": 9.4e-05,
"loss": 0.2655,
"step": 2650
},
{
"epoch": 0.3901734104046243,
"grad_norm": 0.01263427734375,
"learning_rate": 9.200000000000001e-05,
"loss": 0.1456,
"step": 2700
},
{
"epoch": 0.39739884393063585,
"grad_norm": 0.0771484375,
"learning_rate": 9e-05,
"loss": 0.1656,
"step": 2750
},
{
"epoch": 0.4046242774566474,
"grad_norm": 0.0146484375,
"learning_rate": 8.800000000000001e-05,
"loss": 0.0131,
"step": 2800
},
{
"epoch": 0.41184971098265893,
"grad_norm": 0.0103759765625,
"learning_rate": 8.6e-05,
"loss": 0.2122,
"step": 2850
},
{
"epoch": 0.4190751445086705,
"grad_norm": 0.0311279296875,
"learning_rate": 8.4e-05,
"loss": 0.186,
"step": 2900
},
{
"epoch": 0.4263005780346821,
"grad_norm": 0.01806640625,
"learning_rate": 8.2e-05,
"loss": 0.3008,
"step": 2950
},
{
"epoch": 0.43352601156069365,
"grad_norm": 4.09375,
"learning_rate": 8e-05,
"loss": 0.1292,
"step": 3000
},
{
"epoch": 0.4407514450867052,
"grad_norm": 0.032958984375,
"learning_rate": 7.800000000000001e-05,
"loss": 0.2945,
"step": 3050
},
{
"epoch": 0.4479768786127168,
"grad_norm": 0.03857421875,
"learning_rate": 7.6e-05,
"loss": 0.1122,
"step": 3100
},
{
"epoch": 0.4552023121387283,
"grad_norm": 0.0281982421875,
"learning_rate": 7.4e-05,
"loss": 0.3546,
"step": 3150
},
{
"epoch": 0.4624277456647399,
"grad_norm": 0.09375,
"learning_rate": 7.2e-05,
"loss": 0.2244,
"step": 3200
},
{
"epoch": 0.46965317919075145,
"grad_norm": 0.02783203125,
"learning_rate": 7e-05,
"loss": 0.1002,
"step": 3250
},
{
"epoch": 0.476878612716763,
"grad_norm": 0.03466796875,
"learning_rate": 6.800000000000001e-05,
"loss": 0.285,
"step": 3300
},
{
"epoch": 0.4841040462427746,
"grad_norm": 0.04443359375,
"learning_rate": 6.6e-05,
"loss": 0.1826,
"step": 3350
},
{
"epoch": 0.4913294797687861,
"grad_norm": 0.07177734375,
"learning_rate": 6.400000000000001e-05,
"loss": 0.0866,
"step": 3400
},
{
"epoch": 0.4985549132947977,
"grad_norm": 0.0908203125,
"learning_rate": 6.2e-05,
"loss": 0.1351,
"step": 3450
},
{
"epoch": 0.5057803468208093,
"grad_norm": 0.01263427734375,
"learning_rate": 6e-05,
"loss": 0.124,
"step": 3500
},
{
"epoch": 0.5130057803468208,
"grad_norm": 69.5,
"learning_rate": 5.8e-05,
"loss": 0.2068,
"step": 3550
},
{
"epoch": 0.5202312138728323,
"grad_norm": 0.04833984375,
"learning_rate": 5.6000000000000006e-05,
"loss": 0.0863,
"step": 3600
},
{
"epoch": 0.5274566473988439,
"grad_norm": 0.1669921875,
"learning_rate": 5.4000000000000005e-05,
"loss": 0.2219,
"step": 3650
},
{
"epoch": 0.5346820809248555,
"grad_norm": 0.03759765625,
"learning_rate": 5.2000000000000004e-05,
"loss": 0.0932,
"step": 3700
},
{
"epoch": 0.541907514450867,
"grad_norm": 12.1875,
"learning_rate": 5e-05,
"loss": 0.1892,
"step": 3750
},
{
"epoch": 0.5491329479768786,
"grad_norm": 0.0498046875,
"learning_rate": 4.8e-05,
"loss": 0.1246,
"step": 3800
},
{
"epoch": 0.5563583815028902,
"grad_norm": 1.6953125,
"learning_rate": 4.600000000000001e-05,
"loss": 0.1856,
"step": 3850
},
{
"epoch": 0.5635838150289018,
"grad_norm": 0.026123046875,
"learning_rate": 4.4000000000000006e-05,
"loss": 0.2242,
"step": 3900
},
{
"epoch": 0.5708092485549133,
"grad_norm": 0.04833984375,
"learning_rate": 4.2e-05,
"loss": 0.103,
"step": 3950
},
{
"epoch": 0.5780346820809249,
"grad_norm": 0.08642578125,
"learning_rate": 4e-05,
"loss": 0.1996,
"step": 4000
},
{
"epoch": 0.5852601156069365,
"grad_norm": 0.1513671875,
"learning_rate": 3.8e-05,
"loss": 0.1966,
"step": 4050
},
{
"epoch": 0.5924855491329479,
"grad_norm": 0.02490234375,
"learning_rate": 3.6e-05,
"loss": 0.0311,
"step": 4100
},
{
"epoch": 0.5997109826589595,
"grad_norm": 0.07666015625,
"learning_rate": 3.4000000000000007e-05,
"loss": 0.2937,
"step": 4150
},
{
"epoch": 0.6069364161849711,
"grad_norm": 0.0654296875,
"learning_rate": 3.2000000000000005e-05,
"loss": 0.1919,
"step": 4200
},
{
"epoch": 0.6141618497109826,
"grad_norm": 0.13671875,
"learning_rate": 3e-05,
"loss": 0.1952,
"step": 4250
},
{
"epoch": 0.6213872832369942,
"grad_norm": 0.0164794921875,
"learning_rate": 2.8000000000000003e-05,
"loss": 0.2093,
"step": 4300
},
{
"epoch": 0.6286127167630058,
"grad_norm": 0.026611328125,
"learning_rate": 2.6000000000000002e-05,
"loss": 0.1218,
"step": 4350
},
{
"epoch": 0.6358381502890174,
"grad_norm": 0.0272216796875,
"learning_rate": 2.4e-05,
"loss": 0.2465,
"step": 4400
},
{
"epoch": 0.6430635838150289,
"grad_norm": 0.12158203125,
"learning_rate": 2.2000000000000003e-05,
"loss": 0.1993,
"step": 4450
},
{
"epoch": 0.6502890173410405,
"grad_norm": 0.8046875,
"learning_rate": 2e-05,
"loss": 0.1368,
"step": 4500
},
{
"epoch": 0.6575144508670521,
"grad_norm": 0.0673828125,
"learning_rate": 1.8e-05,
"loss": 0.0891,
"step": 4550
},
{
"epoch": 0.6647398843930635,
"grad_norm": 0.07666015625,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.072,
"step": 4600
},
{
"epoch": 0.6719653179190751,
"grad_norm": 0.0098876953125,
"learning_rate": 1.4000000000000001e-05,
"loss": 0.1378,
"step": 4650
},
{
"epoch": 0.6791907514450867,
"grad_norm": 0.0179443359375,
"learning_rate": 1.2e-05,
"loss": 0.1816,
"step": 4700
},
{
"epoch": 0.6864161849710982,
"grad_norm": 0.045166015625,
"learning_rate": 1e-05,
"loss": 0.2036,
"step": 4750
},
{
"epoch": 0.6936416184971098,
"grad_norm": 0.036376953125,
"learning_rate": 8.000000000000001e-06,
"loss": 0.2402,
"step": 4800
},
{
"epoch": 0.7008670520231214,
"grad_norm": 0.0234375,
"learning_rate": 6e-06,
"loss": 0.2079,
"step": 4850
},
{
"epoch": 0.708092485549133,
"grad_norm": 0.048095703125,
"learning_rate": 4.000000000000001e-06,
"loss": 0.1239,
"step": 4900
},
{
"epoch": 0.7153179190751445,
"grad_norm": 0.0272216796875,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.0889,
"step": 4950
},
{
"epoch": 0.7225433526011561,
"grad_norm": 0.0184326171875,
"learning_rate": 0.0,
"loss": 0.1328,
"step": 5000
}
],
"logging_steps": 50,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.6245324486656e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}