lora-cb / trainer_state.json
igzi's picture
Upload latest checkpoint for cb
1ea48b2 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 12.5,
"eval_steps": 500,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.125,
"grad_norm": 0.044921875,
"learning_rate": 0.00019800000000000002,
"loss": 1.0694,
"step": 50
},
{
"epoch": 0.25,
"grad_norm": 0.01007080078125,
"learning_rate": 0.000196,
"loss": 0.3955,
"step": 100
},
{
"epoch": 0.375,
"grad_norm": 0.51171875,
"learning_rate": 0.000194,
"loss": 0.2071,
"step": 150
},
{
"epoch": 0.5,
"grad_norm": 2.515625,
"learning_rate": 0.000192,
"loss": 0.5064,
"step": 200
},
{
"epoch": 0.625,
"grad_norm": 0.0179443359375,
"learning_rate": 0.00019,
"loss": 0.4261,
"step": 250
},
{
"epoch": 0.75,
"grad_norm": 4.625,
"learning_rate": 0.000188,
"loss": 0.3917,
"step": 300
},
{
"epoch": 0.875,
"grad_norm": 7.53125,
"learning_rate": 0.00018600000000000002,
"loss": 0.5011,
"step": 350
},
{
"epoch": 1.0,
"grad_norm": 0.00019168853759765625,
"learning_rate": 0.00018400000000000003,
"loss": 0.1436,
"step": 400
},
{
"epoch": 1.125,
"grad_norm": 4.38690185546875e-05,
"learning_rate": 0.000182,
"loss": 0.2189,
"step": 450
},
{
"epoch": 1.25,
"grad_norm": 0.0032196044921875,
"learning_rate": 0.00018,
"loss": 0.3036,
"step": 500
},
{
"epoch": 1.375,
"grad_norm": 5.745887756347656e-05,
"learning_rate": 0.00017800000000000002,
"loss": 0.0851,
"step": 550
},
{
"epoch": 1.5,
"grad_norm": 3.796875,
"learning_rate": 0.00017600000000000002,
"loss": 0.2121,
"step": 600
},
{
"epoch": 1.625,
"grad_norm": 0.0037994384765625,
"learning_rate": 0.000174,
"loss": 0.0688,
"step": 650
},
{
"epoch": 1.75,
"grad_norm": 0.000759124755859375,
"learning_rate": 0.000172,
"loss": 0.1541,
"step": 700
},
{
"epoch": 1.875,
"grad_norm": 0.00113677978515625,
"learning_rate": 0.00017,
"loss": 0.1641,
"step": 750
},
{
"epoch": 2.0,
"grad_norm": 5.602836608886719e-06,
"learning_rate": 0.000168,
"loss": 0.0284,
"step": 800
},
{
"epoch": 2.125,
"grad_norm": 6.96875,
"learning_rate": 0.000166,
"loss": 0.0413,
"step": 850
},
{
"epoch": 2.25,
"grad_norm": 6.496906280517578e-06,
"learning_rate": 0.000164,
"loss": 0.0338,
"step": 900
},
{
"epoch": 2.375,
"grad_norm": 7.711350917816162e-07,
"learning_rate": 0.000162,
"loss": 0.0603,
"step": 950
},
{
"epoch": 2.5,
"grad_norm": 7.62939453125e-06,
"learning_rate": 0.00016,
"loss": 0.0454,
"step": 1000
},
{
"epoch": 2.625,
"grad_norm": 3.6656856536865234e-06,
"learning_rate": 0.00015800000000000002,
"loss": 0.0683,
"step": 1050
},
{
"epoch": 2.75,
"grad_norm": 14.4375,
"learning_rate": 0.00015600000000000002,
"loss": 0.0216,
"step": 1100
},
{
"epoch": 2.875,
"grad_norm": 4.947185516357422e-06,
"learning_rate": 0.000154,
"loss": 0.037,
"step": 1150
},
{
"epoch": 3.0,
"grad_norm": 4.9591064453125e-05,
"learning_rate": 0.000152,
"loss": 0.033,
"step": 1200
},
{
"epoch": 3.125,
"grad_norm": 7.003545761108398e-07,
"learning_rate": 0.00015000000000000001,
"loss": 0.0115,
"step": 1250
},
{
"epoch": 3.25,
"grad_norm": 0.0009918212890625,
"learning_rate": 0.000148,
"loss": 0.0295,
"step": 1300
},
{
"epoch": 3.375,
"grad_norm": 0.000263214111328125,
"learning_rate": 0.000146,
"loss": 0.0269,
"step": 1350
},
{
"epoch": 3.5,
"grad_norm": 7.674098014831543e-07,
"learning_rate": 0.000144,
"loss": 0.0203,
"step": 1400
},
{
"epoch": 3.625,
"grad_norm": 2.86102294921875e-06,
"learning_rate": 0.000142,
"loss": 0.0215,
"step": 1450
},
{
"epoch": 3.75,
"grad_norm": 6.51925802230835e-07,
"learning_rate": 0.00014,
"loss": 0.0245,
"step": 1500
},
{
"epoch": 3.875,
"grad_norm": 1.1548399925231934e-06,
"learning_rate": 0.000138,
"loss": 0.0204,
"step": 1550
},
{
"epoch": 4.0,
"grad_norm": 2.637505531311035e-06,
"learning_rate": 0.00013600000000000003,
"loss": 0.0108,
"step": 1600
},
{
"epoch": 4.125,
"grad_norm": 1.6689300537109375e-06,
"learning_rate": 0.000134,
"loss": 0.0068,
"step": 1650
},
{
"epoch": 4.25,
"grad_norm": 2.3096799850463867e-07,
"learning_rate": 0.000132,
"loss": 0.0191,
"step": 1700
},
{
"epoch": 4.375,
"grad_norm": 1.6689300537109375e-06,
"learning_rate": 0.00013000000000000002,
"loss": 0.0179,
"step": 1750
},
{
"epoch": 4.5,
"grad_norm": 5.5730342864990234e-06,
"learning_rate": 0.00012800000000000002,
"loss": 0.0049,
"step": 1800
},
{
"epoch": 4.625,
"grad_norm": 3.910064697265625e-05,
"learning_rate": 0.000126,
"loss": 0.028,
"step": 1850
},
{
"epoch": 4.75,
"grad_norm": 3.609375,
"learning_rate": 0.000124,
"loss": 0.0099,
"step": 1900
},
{
"epoch": 4.875,
"grad_norm": 0.00012111663818359375,
"learning_rate": 0.000122,
"loss": 0.0053,
"step": 1950
},
{
"epoch": 5.0,
"grad_norm": 2.0116567611694336e-06,
"learning_rate": 0.00012,
"loss": 0.0147,
"step": 2000
},
{
"epoch": 5.125,
"grad_norm": 0.31640625,
"learning_rate": 0.000118,
"loss": 0.0121,
"step": 2050
},
{
"epoch": 5.25,
"grad_norm": 3.1739473342895508e-06,
"learning_rate": 0.000116,
"loss": 0.0057,
"step": 2100
},
{
"epoch": 5.375,
"grad_norm": 2.3562461137771606e-07,
"learning_rate": 0.00011399999999999999,
"loss": 0.0116,
"step": 2150
},
{
"epoch": 5.5,
"grad_norm": 2.473592758178711e-06,
"learning_rate": 0.00011200000000000001,
"loss": 0.0064,
"step": 2200
},
{
"epoch": 5.625,
"grad_norm": 0.30859375,
"learning_rate": 0.00011000000000000002,
"loss": 0.005,
"step": 2250
},
{
"epoch": 5.75,
"grad_norm": 2.816319465637207e-06,
"learning_rate": 0.00010800000000000001,
"loss": 0.0147,
"step": 2300
},
{
"epoch": 5.875,
"grad_norm": 4.9114227294921875e-05,
"learning_rate": 0.00010600000000000002,
"loss": 0.0083,
"step": 2350
},
{
"epoch": 6.0,
"grad_norm": 2.428889274597168e-06,
"learning_rate": 0.00010400000000000001,
"loss": 0.0071,
"step": 2400
},
{
"epoch": 6.125,
"grad_norm": 5.602836608886719e-06,
"learning_rate": 0.00010200000000000001,
"loss": 0.0059,
"step": 2450
},
{
"epoch": 6.25,
"grad_norm": 0.412109375,
"learning_rate": 0.0001,
"loss": 0.0103,
"step": 2500
},
{
"epoch": 6.375,
"grad_norm": 0.6796875,
"learning_rate": 9.8e-05,
"loss": 0.0087,
"step": 2550
},
{
"epoch": 6.5,
"grad_norm": 0.45703125,
"learning_rate": 9.6e-05,
"loss": 0.0038,
"step": 2600
},
{
"epoch": 6.625,
"grad_norm": 2.7120113372802734e-06,
"learning_rate": 9.4e-05,
"loss": 0.0046,
"step": 2650
},
{
"epoch": 6.75,
"grad_norm": 0.37890625,
"learning_rate": 9.200000000000001e-05,
"loss": 0.0066,
"step": 2700
},
{
"epoch": 6.875,
"grad_norm": 4.5299530029296875e-05,
"learning_rate": 9e-05,
"loss": 0.0114,
"step": 2750
},
{
"epoch": 7.0,
"grad_norm": 2.3096799850463867e-07,
"learning_rate": 8.800000000000001e-05,
"loss": 0.0028,
"step": 2800
},
{
"epoch": 7.125,
"grad_norm": 2.4586915969848633e-06,
"learning_rate": 8.6e-05,
"loss": 0.0083,
"step": 2850
},
{
"epoch": 7.25,
"grad_norm": 0.353515625,
"learning_rate": 8.4e-05,
"loss": 0.008,
"step": 2900
},
{
"epoch": 7.375,
"grad_norm": 1.2665987014770508e-07,
"learning_rate": 8.2e-05,
"loss": 0.0042,
"step": 2950
},
{
"epoch": 7.5,
"grad_norm": 1.2479722499847412e-07,
"learning_rate": 8e-05,
"loss": 0.0028,
"step": 3000
},
{
"epoch": 7.625,
"grad_norm": 1.3317912817001343e-07,
"learning_rate": 7.800000000000001e-05,
"loss": 0.0077,
"step": 3050
},
{
"epoch": 7.75,
"grad_norm": 2.384185791015625e-06,
"learning_rate": 7.6e-05,
"loss": 0.0047,
"step": 3100
},
{
"epoch": 7.875,
"grad_norm": 1.0943040251731873e-07,
"learning_rate": 7.4e-05,
"loss": 0.002,
"step": 3150
},
{
"epoch": 8.0,
"grad_norm": 1.3969838619232178e-07,
"learning_rate": 7.2e-05,
"loss": 0.0106,
"step": 3200
},
{
"epoch": 8.125,
"grad_norm": 2.2649765014648438e-06,
"learning_rate": 7e-05,
"loss": 0.0037,
"step": 3250
},
{
"epoch": 8.25,
"grad_norm": 2.8312206268310547e-06,
"learning_rate": 6.800000000000001e-05,
"loss": 0.0074,
"step": 3300
},
{
"epoch": 8.375,
"grad_norm": 4.4405460357666016e-06,
"learning_rate": 6.6e-05,
"loss": 0.0046,
"step": 3350
},
{
"epoch": 8.5,
"grad_norm": 2.518296241760254e-06,
"learning_rate": 6.400000000000001e-05,
"loss": 0.0052,
"step": 3400
},
{
"epoch": 8.625,
"grad_norm": 2.8014183044433594e-06,
"learning_rate": 6.2e-05,
"loss": 0.0088,
"step": 3450
},
{
"epoch": 8.75,
"grad_norm": 4.544854164123535e-07,
"learning_rate": 6e-05,
"loss": 0.0065,
"step": 3500
},
{
"epoch": 8.875,
"grad_norm": 3.1851232051849365e-07,
"learning_rate": 5.8e-05,
"loss": 0.0079,
"step": 3550
},
{
"epoch": 9.0,
"grad_norm": 5.304813385009766e-06,
"learning_rate": 5.6000000000000006e-05,
"loss": 0.0022,
"step": 3600
},
{
"epoch": 9.125,
"grad_norm": 3.6954879760742188e-06,
"learning_rate": 5.4000000000000005e-05,
"loss": 0.0049,
"step": 3650
},
{
"epoch": 9.25,
"grad_norm": 1.3131648302078247e-07,
"learning_rate": 5.2000000000000004e-05,
"loss": 0.0019,
"step": 3700
},
{
"epoch": 9.375,
"grad_norm": 2.5480985641479492e-06,
"learning_rate": 5e-05,
"loss": 0.0036,
"step": 3750
},
{
"epoch": 9.5,
"grad_norm": 0.439453125,
"learning_rate": 4.8e-05,
"loss": 0.0094,
"step": 3800
},
{
"epoch": 9.625,
"grad_norm": 2.551823854446411e-07,
"learning_rate": 4.600000000000001e-05,
"loss": 0.0097,
"step": 3850
},
{
"epoch": 9.75,
"grad_norm": 1.4808028936386108e-07,
"learning_rate": 4.4000000000000006e-05,
"loss": 0.0066,
"step": 3900
},
{
"epoch": 9.875,
"grad_norm": 1.4156103134155273e-07,
"learning_rate": 4.2e-05,
"loss": 0.0041,
"step": 3950
},
{
"epoch": 10.0,
"grad_norm": 2.682209014892578e-06,
"learning_rate": 4e-05,
"loss": 0.0046,
"step": 4000
},
{
"epoch": 10.125,
"grad_norm": 1.2479722499847412e-07,
"learning_rate": 3.8e-05,
"loss": 0.0053,
"step": 4050
},
{
"epoch": 10.25,
"grad_norm": 2.130866050720215e-06,
"learning_rate": 3.6e-05,
"loss": 0.0081,
"step": 4100
},
{
"epoch": 10.375,
"grad_norm": 2.4028122425079346e-07,
"learning_rate": 3.4000000000000007e-05,
"loss": 0.0067,
"step": 4150
},
{
"epoch": 10.5,
"grad_norm": 1.8477439880371094e-06,
"learning_rate": 3.2000000000000005e-05,
"loss": 0.0023,
"step": 4200
},
{
"epoch": 10.625,
"grad_norm": 2.6226043701171875e-06,
"learning_rate": 3e-05,
"loss": 0.0052,
"step": 4250
},
{
"epoch": 10.75,
"grad_norm": 2.7120113372802734e-06,
"learning_rate": 2.8000000000000003e-05,
"loss": 0.0042,
"step": 4300
},
{
"epoch": 10.875,
"grad_norm": 0.447265625,
"learning_rate": 2.6000000000000002e-05,
"loss": 0.0056,
"step": 4350
},
{
"epoch": 11.0,
"grad_norm": 2.905726432800293e-07,
"learning_rate": 2.4e-05,
"loss": 0.0058,
"step": 4400
},
{
"epoch": 11.125,
"grad_norm": 2.9653310775756836e-06,
"learning_rate": 2.2000000000000003e-05,
"loss": 0.0022,
"step": 4450
},
{
"epoch": 11.25,
"grad_norm": 3.0100345611572266e-06,
"learning_rate": 2e-05,
"loss": 0.0069,
"step": 4500
},
{
"epoch": 11.375,
"grad_norm": 2.086162567138672e-07,
"learning_rate": 1.8e-05,
"loss": 0.0031,
"step": 4550
},
{
"epoch": 11.5,
"grad_norm": 1.2889504432678223e-06,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.0088,
"step": 4600
},
{
"epoch": 11.625,
"grad_norm": 2.1606683731079102e-07,
"learning_rate": 1.4000000000000001e-05,
"loss": 0.0036,
"step": 4650
},
{
"epoch": 11.75,
"grad_norm": 3.7066638469696045e-07,
"learning_rate": 1.2e-05,
"loss": 0.0063,
"step": 4700
},
{
"epoch": 11.875,
"grad_norm": 0.44921875,
"learning_rate": 1e-05,
"loss": 0.0067,
"step": 4750
},
{
"epoch": 12.0,
"grad_norm": 1.6205012798309326e-07,
"learning_rate": 8.000000000000001e-06,
"loss": 0.005,
"step": 4800
},
{
"epoch": 12.125,
"grad_norm": 3.0100345611572266e-06,
"learning_rate": 6e-06,
"loss": 0.0058,
"step": 4850
},
{
"epoch": 12.25,
"grad_norm": 1.1920928955078125e-07,
"learning_rate": 4.000000000000001e-06,
"loss": 0.0047,
"step": 4900
},
{
"epoch": 12.375,
"grad_norm": 2.4139881134033203e-06,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.0043,
"step": 4950
},
{
"epoch": 12.5,
"grad_norm": 8.153915405273438e-05,
"learning_rate": 0.0,
"loss": 0.0032,
"step": 5000
}
],
"logging_steps": 50,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 13,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.281807837380608e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}