lora-snli / trainer_state.json
igzi's picture
Upload latest checkpoint for snli
7511ae1 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.08347245409015025,
"eval_steps": 500,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0008347245409015025,
"grad_norm": 5.03125,
"learning_rate": 0.00019800000000000002,
"loss": 1.2979,
"step": 50
},
{
"epoch": 0.001669449081803005,
"grad_norm": 3.734375,
"learning_rate": 0.000196,
"loss": 0.8508,
"step": 100
},
{
"epoch": 0.0025041736227045075,
"grad_norm": 0.58984375,
"learning_rate": 0.000194,
"loss": 0.7959,
"step": 150
},
{
"epoch": 0.00333889816360601,
"grad_norm": 0.578125,
"learning_rate": 0.000192,
"loss": 0.726,
"step": 200
},
{
"epoch": 0.004173622704507512,
"grad_norm": 0.328125,
"learning_rate": 0.00019,
"loss": 0.9015,
"step": 250
},
{
"epoch": 0.005008347245409015,
"grad_norm": 69.5,
"learning_rate": 0.000188,
"loss": 0.4003,
"step": 300
},
{
"epoch": 0.005843071786310518,
"grad_norm": 3.015625,
"learning_rate": 0.00018600000000000002,
"loss": 0.5518,
"step": 350
},
{
"epoch": 0.00667779632721202,
"grad_norm": 0.034912109375,
"learning_rate": 0.00018400000000000003,
"loss": 0.4198,
"step": 400
},
{
"epoch": 0.007512520868113523,
"grad_norm": 5.90625,
"learning_rate": 0.000182,
"loss": 0.8151,
"step": 450
},
{
"epoch": 0.008347245409015025,
"grad_norm": 17.125,
"learning_rate": 0.00018,
"loss": 0.5379,
"step": 500
},
{
"epoch": 0.009181969949916527,
"grad_norm": 0.0654296875,
"learning_rate": 0.00017800000000000002,
"loss": 0.5603,
"step": 550
},
{
"epoch": 0.01001669449081803,
"grad_norm": 0.10400390625,
"learning_rate": 0.00017600000000000002,
"loss": 0.7589,
"step": 600
},
{
"epoch": 0.010851419031719533,
"grad_norm": 0.265625,
"learning_rate": 0.000174,
"loss": 0.3531,
"step": 650
},
{
"epoch": 0.011686143572621035,
"grad_norm": 4.59375,
"learning_rate": 0.000172,
"loss": 0.2717,
"step": 700
},
{
"epoch": 0.012520868113522538,
"grad_norm": 0.25390625,
"learning_rate": 0.00017,
"loss": 0.3344,
"step": 750
},
{
"epoch": 0.01335559265442404,
"grad_norm": 4.6875,
"learning_rate": 0.000168,
"loss": 0.3585,
"step": 800
},
{
"epoch": 0.014190317195325543,
"grad_norm": 0.04736328125,
"learning_rate": 0.000166,
"loss": 0.3218,
"step": 850
},
{
"epoch": 0.015025041736227046,
"grad_norm": 0.046875,
"learning_rate": 0.000164,
"loss": 0.3344,
"step": 900
},
{
"epoch": 0.015859766277128547,
"grad_norm": 0.015625,
"learning_rate": 0.000162,
"loss": 0.4233,
"step": 950
},
{
"epoch": 0.01669449081803005,
"grad_norm": 0.08056640625,
"learning_rate": 0.00016,
"loss": 0.6014,
"step": 1000
},
{
"epoch": 0.017529215358931552,
"grad_norm": 3.390625,
"learning_rate": 0.00015800000000000002,
"loss": 0.4044,
"step": 1050
},
{
"epoch": 0.018363939899833055,
"grad_norm": 0.31640625,
"learning_rate": 0.00015600000000000002,
"loss": 0.9183,
"step": 1100
},
{
"epoch": 0.019198664440734557,
"grad_norm": 0.076171875,
"learning_rate": 0.000154,
"loss": 0.2508,
"step": 1150
},
{
"epoch": 0.02003338898163606,
"grad_norm": 0.01055908203125,
"learning_rate": 0.000152,
"loss": 0.3563,
"step": 1200
},
{
"epoch": 0.020868113522537562,
"grad_norm": 54.0,
"learning_rate": 0.00015000000000000001,
"loss": 0.4328,
"step": 1250
},
{
"epoch": 0.021702838063439065,
"grad_norm": 19.5,
"learning_rate": 0.000148,
"loss": 0.312,
"step": 1300
},
{
"epoch": 0.022537562604340568,
"grad_norm": 0.0380859375,
"learning_rate": 0.000146,
"loss": 0.3068,
"step": 1350
},
{
"epoch": 0.02337228714524207,
"grad_norm": 0.006622314453125,
"learning_rate": 0.000144,
"loss": 0.3987,
"step": 1400
},
{
"epoch": 0.024207011686143573,
"grad_norm": 8.0625,
"learning_rate": 0.000142,
"loss": 0.4942,
"step": 1450
},
{
"epoch": 0.025041736227045076,
"grad_norm": 26.125,
"learning_rate": 0.00014,
"loss": 0.4011,
"step": 1500
},
{
"epoch": 0.02587646076794658,
"grad_norm": 0.58203125,
"learning_rate": 0.000138,
"loss": 0.5129,
"step": 1550
},
{
"epoch": 0.02671118530884808,
"grad_norm": 0.0267333984375,
"learning_rate": 0.00013600000000000003,
"loss": 0.1984,
"step": 1600
},
{
"epoch": 0.027545909849749584,
"grad_norm": 0.0157470703125,
"learning_rate": 0.000134,
"loss": 0.4052,
"step": 1650
},
{
"epoch": 0.028380634390651086,
"grad_norm": 4.5625,
"learning_rate": 0.000132,
"loss": 0.4439,
"step": 1700
},
{
"epoch": 0.02921535893155259,
"grad_norm": 21.125,
"learning_rate": 0.00013000000000000002,
"loss": 0.5354,
"step": 1750
},
{
"epoch": 0.03005008347245409,
"grad_norm": 15.375,
"learning_rate": 0.00012800000000000002,
"loss": 0.5931,
"step": 1800
},
{
"epoch": 0.030884808013355594,
"grad_norm": 1.3515625,
"learning_rate": 0.000126,
"loss": 0.1363,
"step": 1850
},
{
"epoch": 0.03171953255425709,
"grad_norm": 0.53125,
"learning_rate": 0.000124,
"loss": 0.4885,
"step": 1900
},
{
"epoch": 0.0325542570951586,
"grad_norm": 0.0908203125,
"learning_rate": 0.000122,
"loss": 0.265,
"step": 1950
},
{
"epoch": 0.0333889816360601,
"grad_norm": 0.0142822265625,
"learning_rate": 0.00012,
"loss": 0.265,
"step": 2000
},
{
"epoch": 0.034223706176961605,
"grad_norm": 3.25,
"learning_rate": 0.000118,
"loss": 0.5515,
"step": 2050
},
{
"epoch": 0.035058430717863104,
"grad_norm": 3.5,
"learning_rate": 0.000116,
"loss": 0.3973,
"step": 2100
},
{
"epoch": 0.03589315525876461,
"grad_norm": 1.0703125,
"learning_rate": 0.00011399999999999999,
"loss": 0.4916,
"step": 2150
},
{
"epoch": 0.03672787979966611,
"grad_norm": 0.26171875,
"learning_rate": 0.00011200000000000001,
"loss": 0.4639,
"step": 2200
},
{
"epoch": 0.037562604340567615,
"grad_norm": 0.10693359375,
"learning_rate": 0.00011000000000000002,
"loss": 0.4581,
"step": 2250
},
{
"epoch": 0.038397328881469114,
"grad_norm": 5.75,
"learning_rate": 0.00010800000000000001,
"loss": 0.3952,
"step": 2300
},
{
"epoch": 0.03923205342237062,
"grad_norm": 0.058837890625,
"learning_rate": 0.00010600000000000002,
"loss": 0.5916,
"step": 2350
},
{
"epoch": 0.04006677796327212,
"grad_norm": 69.5,
"learning_rate": 0.00010400000000000001,
"loss": 0.394,
"step": 2400
},
{
"epoch": 0.040901502504173626,
"grad_norm": 0.04833984375,
"learning_rate": 0.00010200000000000001,
"loss": 0.2667,
"step": 2450
},
{
"epoch": 0.041736227045075125,
"grad_norm": 0.005615234375,
"learning_rate": 0.0001,
"loss": 0.4901,
"step": 2500
},
{
"epoch": 0.04257095158597663,
"grad_norm": 0.004180908203125,
"learning_rate": 9.8e-05,
"loss": 0.3322,
"step": 2550
},
{
"epoch": 0.04340567612687813,
"grad_norm": 0.048095703125,
"learning_rate": 9.6e-05,
"loss": 0.3777,
"step": 2600
},
{
"epoch": 0.04424040066777963,
"grad_norm": 0.08447265625,
"learning_rate": 9.4e-05,
"loss": 0.4049,
"step": 2650
},
{
"epoch": 0.045075125208681135,
"grad_norm": 0.0712890625,
"learning_rate": 9.200000000000001e-05,
"loss": 0.5967,
"step": 2700
},
{
"epoch": 0.045909849749582635,
"grad_norm": 5.78125,
"learning_rate": 9e-05,
"loss": 0.2451,
"step": 2750
},
{
"epoch": 0.04674457429048414,
"grad_norm": 0.0081787109375,
"learning_rate": 8.800000000000001e-05,
"loss": 0.4497,
"step": 2800
},
{
"epoch": 0.04757929883138564,
"grad_norm": 0.0260009765625,
"learning_rate": 8.6e-05,
"loss": 0.4576,
"step": 2850
},
{
"epoch": 0.048414023372287146,
"grad_norm": 0.01025390625,
"learning_rate": 8.4e-05,
"loss": 0.4427,
"step": 2900
},
{
"epoch": 0.049248747913188645,
"grad_norm": 0.04296875,
"learning_rate": 8.2e-05,
"loss": 0.4576,
"step": 2950
},
{
"epoch": 0.05008347245409015,
"grad_norm": 0.0634765625,
"learning_rate": 8e-05,
"loss": 0.3468,
"step": 3000
},
{
"epoch": 0.05091819699499165,
"grad_norm": 14.0625,
"learning_rate": 7.800000000000001e-05,
"loss": 0.491,
"step": 3050
},
{
"epoch": 0.05175292153589316,
"grad_norm": 0.0277099609375,
"learning_rate": 7.6e-05,
"loss": 0.3008,
"step": 3100
},
{
"epoch": 0.052587646076794656,
"grad_norm": 0.03466796875,
"learning_rate": 7.4e-05,
"loss": 0.3269,
"step": 3150
},
{
"epoch": 0.05342237061769616,
"grad_norm": 0.024169921875,
"learning_rate": 7.2e-05,
"loss": 0.4641,
"step": 3200
},
{
"epoch": 0.05425709515859766,
"grad_norm": 0.02099609375,
"learning_rate": 7e-05,
"loss": 0.2215,
"step": 3250
},
{
"epoch": 0.05509181969949917,
"grad_norm": 0.04833984375,
"learning_rate": 6.800000000000001e-05,
"loss": 0.3736,
"step": 3300
},
{
"epoch": 0.055926544240400666,
"grad_norm": 0.040283203125,
"learning_rate": 6.6e-05,
"loss": 0.114,
"step": 3350
},
{
"epoch": 0.05676126878130217,
"grad_norm": 0.45703125,
"learning_rate": 6.400000000000001e-05,
"loss": 0.2316,
"step": 3400
},
{
"epoch": 0.05759599332220367,
"grad_norm": 0.06982421875,
"learning_rate": 6.2e-05,
"loss": 0.6308,
"step": 3450
},
{
"epoch": 0.05843071786310518,
"grad_norm": 2.765625,
"learning_rate": 6e-05,
"loss": 0.2929,
"step": 3500
},
{
"epoch": 0.05926544240400668,
"grad_norm": 0.06396484375,
"learning_rate": 5.8e-05,
"loss": 0.5138,
"step": 3550
},
{
"epoch": 0.06010016694490818,
"grad_norm": 0.07861328125,
"learning_rate": 5.6000000000000006e-05,
"loss": 0.5067,
"step": 3600
},
{
"epoch": 0.06093489148580968,
"grad_norm": 3.875,
"learning_rate": 5.4000000000000005e-05,
"loss": 0.6174,
"step": 3650
},
{
"epoch": 0.06176961602671119,
"grad_norm": 0.17578125,
"learning_rate": 5.2000000000000004e-05,
"loss": 0.3295,
"step": 3700
},
{
"epoch": 0.06260434056761269,
"grad_norm": 0.08251953125,
"learning_rate": 5e-05,
"loss": 0.3084,
"step": 3750
},
{
"epoch": 0.06343906510851419,
"grad_norm": 0.33203125,
"learning_rate": 4.8e-05,
"loss": 0.3046,
"step": 3800
},
{
"epoch": 0.0642737896494157,
"grad_norm": 0.126953125,
"learning_rate": 4.600000000000001e-05,
"loss": 0.1521,
"step": 3850
},
{
"epoch": 0.0651085141903172,
"grad_norm": 0.08447265625,
"learning_rate": 4.4000000000000006e-05,
"loss": 0.4219,
"step": 3900
},
{
"epoch": 0.0659432387312187,
"grad_norm": 105.5,
"learning_rate": 4.2e-05,
"loss": 0.2136,
"step": 3950
},
{
"epoch": 0.0667779632721202,
"grad_norm": 0.0830078125,
"learning_rate": 4e-05,
"loss": 0.1255,
"step": 4000
},
{
"epoch": 0.0676126878130217,
"grad_norm": 3.84375,
"learning_rate": 3.8e-05,
"loss": 0.3222,
"step": 4050
},
{
"epoch": 0.06844741235392321,
"grad_norm": 0.04833984375,
"learning_rate": 3.6e-05,
"loss": 0.4844,
"step": 4100
},
{
"epoch": 0.06928213689482471,
"grad_norm": 3.65625,
"learning_rate": 3.4000000000000007e-05,
"loss": 0.2207,
"step": 4150
},
{
"epoch": 0.07011686143572621,
"grad_norm": 0.0478515625,
"learning_rate": 3.2000000000000005e-05,
"loss": 0.2303,
"step": 4200
},
{
"epoch": 0.0709515859766277,
"grad_norm": 0.10595703125,
"learning_rate": 3e-05,
"loss": 0.3358,
"step": 4250
},
{
"epoch": 0.07178631051752922,
"grad_norm": 0.08349609375,
"learning_rate": 2.8000000000000003e-05,
"loss": 0.6106,
"step": 4300
},
{
"epoch": 0.07262103505843072,
"grad_norm": 0.09130859375,
"learning_rate": 2.6000000000000002e-05,
"loss": 0.4787,
"step": 4350
},
{
"epoch": 0.07345575959933222,
"grad_norm": 0.042236328125,
"learning_rate": 2.4e-05,
"loss": 0.145,
"step": 4400
},
{
"epoch": 0.07429048414023372,
"grad_norm": 0.020263671875,
"learning_rate": 2.2000000000000003e-05,
"loss": 0.3278,
"step": 4450
},
{
"epoch": 0.07512520868113523,
"grad_norm": 0.00701904296875,
"learning_rate": 2e-05,
"loss": 0.3238,
"step": 4500
},
{
"epoch": 0.07595993322203673,
"grad_norm": 0.01080322265625,
"learning_rate": 1.8e-05,
"loss": 0.3776,
"step": 4550
},
{
"epoch": 0.07679465776293823,
"grad_norm": 0.049072265625,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.3402,
"step": 4600
},
{
"epoch": 0.07762938230383973,
"grad_norm": 0.0712890625,
"learning_rate": 1.4000000000000001e-05,
"loss": 0.429,
"step": 4650
},
{
"epoch": 0.07846410684474124,
"grad_norm": 0.53125,
"learning_rate": 1.2e-05,
"loss": 0.5878,
"step": 4700
},
{
"epoch": 0.07929883138564274,
"grad_norm": 3.828125,
"learning_rate": 1e-05,
"loss": 0.4823,
"step": 4750
},
{
"epoch": 0.08013355592654424,
"grad_norm": 0.059326171875,
"learning_rate": 8.000000000000001e-06,
"loss": 0.1802,
"step": 4800
},
{
"epoch": 0.08096828046744574,
"grad_norm": 0.00628662109375,
"learning_rate": 6e-06,
"loss": 0.4154,
"step": 4850
},
{
"epoch": 0.08180300500834725,
"grad_norm": 0.06005859375,
"learning_rate": 4.000000000000001e-06,
"loss": 0.295,
"step": 4900
},
{
"epoch": 0.08263772954924875,
"grad_norm": 0.01287841796875,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.3699,
"step": 4950
},
{
"epoch": 0.08347245409015025,
"grad_norm": 0.283203125,
"learning_rate": 0.0,
"loss": 0.3094,
"step": 5000
}
],
"logging_steps": 50,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.145082707738624e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}