lora-arc_challenge / trainer_state.json
igzi's picture
Upload latest checkpoint for arc_challenge
9145671 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.7472527472527473,
"eval_steps": 500,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.027472527472527472,
"grad_norm": 0.490234375,
"learning_rate": 0.00019800000000000002,
"loss": 1.0178,
"step": 50
},
{
"epoch": 0.054945054945054944,
"grad_norm": 2.796875,
"learning_rate": 0.000196,
"loss": 1.1549,
"step": 100
},
{
"epoch": 0.08241758241758242,
"grad_norm": 0.00921630859375,
"learning_rate": 0.000194,
"loss": 0.6677,
"step": 150
},
{
"epoch": 0.10989010989010989,
"grad_norm": 5.5625,
"learning_rate": 0.000192,
"loss": 0.932,
"step": 200
},
{
"epoch": 0.13736263736263737,
"grad_norm": 3.59375,
"learning_rate": 0.00019,
"loss": 0.5178,
"step": 250
},
{
"epoch": 0.16483516483516483,
"grad_norm": 5.5625,
"learning_rate": 0.000188,
"loss": 0.697,
"step": 300
},
{
"epoch": 0.19230769230769232,
"grad_norm": 0.000156402587890625,
"learning_rate": 0.00018600000000000002,
"loss": 0.7214,
"step": 350
},
{
"epoch": 0.21978021978021978,
"grad_norm": 4.84375,
"learning_rate": 0.00018400000000000003,
"loss": 0.7486,
"step": 400
},
{
"epoch": 0.24725274725274726,
"grad_norm": 0.447265625,
"learning_rate": 0.000182,
"loss": 0.6696,
"step": 450
},
{
"epoch": 0.27472527472527475,
"grad_norm": 0.000614166259765625,
"learning_rate": 0.00018,
"loss": 0.6315,
"step": 500
},
{
"epoch": 0.3021978021978022,
"grad_norm": 8.4375,
"learning_rate": 0.00017800000000000002,
"loss": 0.652,
"step": 550
},
{
"epoch": 0.32967032967032966,
"grad_norm": 0.000438690185546875,
"learning_rate": 0.00017600000000000002,
"loss": 0.5758,
"step": 600
},
{
"epoch": 0.35714285714285715,
"grad_norm": 0.0093994140625,
"learning_rate": 0.000174,
"loss": 0.562,
"step": 650
},
{
"epoch": 0.38461538461538464,
"grad_norm": 0.2109375,
"learning_rate": 0.000172,
"loss": 0.5937,
"step": 700
},
{
"epoch": 0.41208791208791207,
"grad_norm": 0.0032501220703125,
"learning_rate": 0.00017,
"loss": 0.6657,
"step": 750
},
{
"epoch": 0.43956043956043955,
"grad_norm": 8.8125,
"learning_rate": 0.000168,
"loss": 0.7224,
"step": 800
},
{
"epoch": 0.46703296703296704,
"grad_norm": 2.703125,
"learning_rate": 0.000166,
"loss": 0.9437,
"step": 850
},
{
"epoch": 0.4945054945054945,
"grad_norm": 0.00090789794921875,
"learning_rate": 0.000164,
"loss": 0.5451,
"step": 900
},
{
"epoch": 0.521978021978022,
"grad_norm": 87.0,
"learning_rate": 0.000162,
"loss": 0.4207,
"step": 950
},
{
"epoch": 0.5494505494505495,
"grad_norm": 0.00164031982421875,
"learning_rate": 0.00016,
"loss": 0.5841,
"step": 1000
},
{
"epoch": 0.5769230769230769,
"grad_norm": 0.00034332275390625,
"learning_rate": 0.00015800000000000002,
"loss": 0.5337,
"step": 1050
},
{
"epoch": 0.6043956043956044,
"grad_norm": 0.1201171875,
"learning_rate": 0.00015600000000000002,
"loss": 0.7521,
"step": 1100
},
{
"epoch": 0.6318681318681318,
"grad_norm": 0.05078125,
"learning_rate": 0.000154,
"loss": 0.6175,
"step": 1150
},
{
"epoch": 0.6593406593406593,
"grad_norm": 0.55078125,
"learning_rate": 0.000152,
"loss": 0.5597,
"step": 1200
},
{
"epoch": 0.6868131868131868,
"grad_norm": 0.06591796875,
"learning_rate": 0.00015000000000000001,
"loss": 0.9526,
"step": 1250
},
{
"epoch": 0.7142857142857143,
"grad_norm": 2.703125,
"learning_rate": 0.000148,
"loss": 0.416,
"step": 1300
},
{
"epoch": 0.7417582417582418,
"grad_norm": 0.01177978515625,
"learning_rate": 0.000146,
"loss": 0.6454,
"step": 1350
},
{
"epoch": 0.7692307692307693,
"grad_norm": 0.423828125,
"learning_rate": 0.000144,
"loss": 0.5772,
"step": 1400
},
{
"epoch": 0.7967032967032966,
"grad_norm": 2.171875,
"learning_rate": 0.000142,
"loss": 0.5839,
"step": 1450
},
{
"epoch": 0.8241758241758241,
"grad_norm": 0.003021240234375,
"learning_rate": 0.00014,
"loss": 0.7548,
"step": 1500
},
{
"epoch": 0.8516483516483516,
"grad_norm": 4.6875,
"learning_rate": 0.000138,
"loss": 0.678,
"step": 1550
},
{
"epoch": 0.8791208791208791,
"grad_norm": 0.00433349609375,
"learning_rate": 0.00013600000000000003,
"loss": 0.5795,
"step": 1600
},
{
"epoch": 0.9065934065934066,
"grad_norm": 0.00156402587890625,
"learning_rate": 0.000134,
"loss": 0.6352,
"step": 1650
},
{
"epoch": 0.9340659340659341,
"grad_norm": 6.875,
"learning_rate": 0.000132,
"loss": 0.5606,
"step": 1700
},
{
"epoch": 0.9615384615384616,
"grad_norm": 3.46875,
"learning_rate": 0.00013000000000000002,
"loss": 0.5033,
"step": 1750
},
{
"epoch": 0.989010989010989,
"grad_norm": 7.53125,
"learning_rate": 0.00012800000000000002,
"loss": 0.3787,
"step": 1800
},
{
"epoch": 1.0164835164835164,
"grad_norm": 8.625,
"learning_rate": 0.000126,
"loss": 0.5048,
"step": 1850
},
{
"epoch": 1.043956043956044,
"grad_norm": 0.000518798828125,
"learning_rate": 0.000124,
"loss": 0.5134,
"step": 1900
},
{
"epoch": 1.0714285714285714,
"grad_norm": 0.008056640625,
"learning_rate": 0.000122,
"loss": 0.2342,
"step": 1950
},
{
"epoch": 1.098901098901099,
"grad_norm": 4.78125,
"learning_rate": 0.00012,
"loss": 0.4388,
"step": 2000
},
{
"epoch": 1.1263736263736264,
"grad_norm": 7.343292236328125e-05,
"learning_rate": 0.000118,
"loss": 0.4928,
"step": 2050
},
{
"epoch": 1.1538461538461537,
"grad_norm": 7.34375,
"learning_rate": 0.000116,
"loss": 0.4759,
"step": 2100
},
{
"epoch": 1.1813186813186813,
"grad_norm": 5.3125,
"learning_rate": 0.00011399999999999999,
"loss": 0.3002,
"step": 2150
},
{
"epoch": 1.2087912087912087,
"grad_norm": 0.00025177001953125,
"learning_rate": 0.00011200000000000001,
"loss": 0.1603,
"step": 2200
},
{
"epoch": 1.2362637362637363,
"grad_norm": 0.0003719329833984375,
"learning_rate": 0.00011000000000000002,
"loss": 0.3096,
"step": 2250
},
{
"epoch": 1.2637362637362637,
"grad_norm": 0.000732421875,
"learning_rate": 0.00010800000000000001,
"loss": 0.2216,
"step": 2300
},
{
"epoch": 1.2912087912087913,
"grad_norm": 7.033348083496094e-06,
"learning_rate": 0.00010600000000000002,
"loss": 0.3195,
"step": 2350
},
{
"epoch": 1.3186813186813187,
"grad_norm": 2.0384788513183594e-05,
"learning_rate": 0.00010400000000000001,
"loss": 0.2727,
"step": 2400
},
{
"epoch": 1.3461538461538463,
"grad_norm": 0.00054931640625,
"learning_rate": 0.00010200000000000001,
"loss": 0.3244,
"step": 2450
},
{
"epoch": 1.3736263736263736,
"grad_norm": 10.9375,
"learning_rate": 0.0001,
"loss": 0.2455,
"step": 2500
},
{
"epoch": 1.401098901098901,
"grad_norm": 8.535385131835938e-05,
"learning_rate": 9.8e-05,
"loss": 0.3644,
"step": 2550
},
{
"epoch": 1.4285714285714286,
"grad_norm": 0.00012969970703125,
"learning_rate": 9.6e-05,
"loss": 0.3234,
"step": 2600
},
{
"epoch": 1.456043956043956,
"grad_norm": 6.389617919921875e-05,
"learning_rate": 9.4e-05,
"loss": 0.1711,
"step": 2650
},
{
"epoch": 1.4835164835164836,
"grad_norm": 7.96875,
"learning_rate": 9.200000000000001e-05,
"loss": 0.3783,
"step": 2700
},
{
"epoch": 1.510989010989011,
"grad_norm": 15.25,
"learning_rate": 9e-05,
"loss": 0.2034,
"step": 2750
},
{
"epoch": 1.5384615384615383,
"grad_norm": 4.96875,
"learning_rate": 8.800000000000001e-05,
"loss": 0.2156,
"step": 2800
},
{
"epoch": 1.565934065934066,
"grad_norm": 9.107589721679688e-05,
"learning_rate": 8.6e-05,
"loss": 0.3119,
"step": 2850
},
{
"epoch": 1.5934065934065935,
"grad_norm": 0.0002079010009765625,
"learning_rate": 8.4e-05,
"loss": 0.2044,
"step": 2900
},
{
"epoch": 1.620879120879121,
"grad_norm": 0.11376953125,
"learning_rate": 8.2e-05,
"loss": 0.2696,
"step": 2950
},
{
"epoch": 1.6483516483516483,
"grad_norm": 10.375,
"learning_rate": 8e-05,
"loss": 0.2998,
"step": 3000
},
{
"epoch": 1.6758241758241759,
"grad_norm": 0.00054931640625,
"learning_rate": 7.800000000000001e-05,
"loss": 0.2754,
"step": 3050
},
{
"epoch": 1.7032967032967035,
"grad_norm": 8.0108642578125e-05,
"learning_rate": 7.6e-05,
"loss": 0.2065,
"step": 3100
},
{
"epoch": 1.7307692307692308,
"grad_norm": 0.00016307830810546875,
"learning_rate": 7.4e-05,
"loss": 0.2248,
"step": 3150
},
{
"epoch": 1.7582417582417582,
"grad_norm": 0.00012969970703125,
"learning_rate": 7.2e-05,
"loss": 0.3116,
"step": 3200
},
{
"epoch": 1.7857142857142856,
"grad_norm": 0.0002231597900390625,
"learning_rate": 7e-05,
"loss": 0.2603,
"step": 3250
},
{
"epoch": 1.8131868131868132,
"grad_norm": 0.00012874603271484375,
"learning_rate": 6.800000000000001e-05,
"loss": 0.2854,
"step": 3300
},
{
"epoch": 1.8406593406593408,
"grad_norm": 3.96875,
"learning_rate": 6.6e-05,
"loss": 0.2222,
"step": 3350
},
{
"epoch": 1.8681318681318682,
"grad_norm": 0.0033416748046875,
"learning_rate": 6.400000000000001e-05,
"loss": 0.2902,
"step": 3400
},
{
"epoch": 1.8956043956043955,
"grad_norm": 6.008148193359375e-05,
"learning_rate": 6.2e-05,
"loss": 0.2299,
"step": 3450
},
{
"epoch": 1.9230769230769231,
"grad_norm": 18.75,
"learning_rate": 6e-05,
"loss": 0.2612,
"step": 3500
},
{
"epoch": 1.9505494505494505,
"grad_norm": 6.90625,
"learning_rate": 5.8e-05,
"loss": 0.2367,
"step": 3550
},
{
"epoch": 1.978021978021978,
"grad_norm": 5.650520324707031e-05,
"learning_rate": 5.6000000000000006e-05,
"loss": 0.2223,
"step": 3600
},
{
"epoch": 2.0054945054945055,
"grad_norm": 0.03564453125,
"learning_rate": 5.4000000000000005e-05,
"loss": 0.2567,
"step": 3650
},
{
"epoch": 2.032967032967033,
"grad_norm": 0.0002574920654296875,
"learning_rate": 5.2000000000000004e-05,
"loss": 0.1277,
"step": 3700
},
{
"epoch": 2.0604395604395602,
"grad_norm": 0.00070953369140625,
"learning_rate": 5e-05,
"loss": 0.1751,
"step": 3750
},
{
"epoch": 2.087912087912088,
"grad_norm": 0.00015735626220703125,
"learning_rate": 4.8e-05,
"loss": 0.1713,
"step": 3800
},
{
"epoch": 2.1153846153846154,
"grad_norm": 0.00021648406982421875,
"learning_rate": 4.600000000000001e-05,
"loss": 0.1682,
"step": 3850
},
{
"epoch": 2.142857142857143,
"grad_norm": 4.380941390991211e-06,
"learning_rate": 4.4000000000000006e-05,
"loss": 0.1259,
"step": 3900
},
{
"epoch": 2.17032967032967,
"grad_norm": 8.3125,
"learning_rate": 4.2e-05,
"loss": 0.1822,
"step": 3950
},
{
"epoch": 2.197802197802198,
"grad_norm": 0.0003376007080078125,
"learning_rate": 4e-05,
"loss": 0.105,
"step": 4000
},
{
"epoch": 2.2252747252747254,
"grad_norm": 8.726119995117188e-05,
"learning_rate": 3.8e-05,
"loss": 0.0887,
"step": 4050
},
{
"epoch": 2.2527472527472527,
"grad_norm": 0.00012493133544921875,
"learning_rate": 3.6e-05,
"loss": 0.1371,
"step": 4100
},
{
"epoch": 2.28021978021978,
"grad_norm": 3.25,
"learning_rate": 3.4000000000000007e-05,
"loss": 0.1725,
"step": 4150
},
{
"epoch": 2.3076923076923075,
"grad_norm": 0.00016117095947265625,
"learning_rate": 3.2000000000000005e-05,
"loss": 0.1363,
"step": 4200
},
{
"epoch": 2.3351648351648353,
"grad_norm": 4.125,
"learning_rate": 3e-05,
"loss": 0.203,
"step": 4250
},
{
"epoch": 2.3626373626373627,
"grad_norm": 3.796875,
"learning_rate": 2.8000000000000003e-05,
"loss": 0.1607,
"step": 4300
},
{
"epoch": 2.39010989010989,
"grad_norm": 2.8908252716064453e-06,
"learning_rate": 2.6000000000000002e-05,
"loss": 0.0885,
"step": 4350
},
{
"epoch": 2.4175824175824174,
"grad_norm": 2.046875,
"learning_rate": 2.4e-05,
"loss": 0.1202,
"step": 4400
},
{
"epoch": 2.4450549450549453,
"grad_norm": 1.0132789611816406e-05,
"learning_rate": 2.2000000000000003e-05,
"loss": 0.2216,
"step": 4450
},
{
"epoch": 2.4725274725274726,
"grad_norm": 2.265625,
"learning_rate": 2e-05,
"loss": 0.1367,
"step": 4500
},
{
"epoch": 2.5,
"grad_norm": 2.5,
"learning_rate": 1.8e-05,
"loss": 0.1546,
"step": 4550
},
{
"epoch": 2.5274725274725274,
"grad_norm": 0.00122833251953125,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.1059,
"step": 4600
},
{
"epoch": 2.5549450549450547,
"grad_norm": 0.0004730224609375,
"learning_rate": 1.4000000000000001e-05,
"loss": 0.0869,
"step": 4650
},
{
"epoch": 2.5824175824175826,
"grad_norm": 9.625,
"learning_rate": 1.2e-05,
"loss": 0.1142,
"step": 4700
},
{
"epoch": 2.60989010989011,
"grad_norm": 4.15625,
"learning_rate": 1e-05,
"loss": 0.2082,
"step": 4750
},
{
"epoch": 2.6373626373626373,
"grad_norm": 5.03125,
"learning_rate": 8.000000000000001e-06,
"loss": 0.1374,
"step": 4800
},
{
"epoch": 2.6648351648351647,
"grad_norm": 2.46875,
"learning_rate": 6e-06,
"loss": 0.1475,
"step": 4850
},
{
"epoch": 2.6923076923076925,
"grad_norm": 3.125,
"learning_rate": 4.000000000000001e-06,
"loss": 0.1593,
"step": 4900
},
{
"epoch": 2.71978021978022,
"grad_norm": 7.295608520507812e-05,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.1695,
"step": 4950
},
{
"epoch": 2.7472527472527473,
"grad_norm": 6.6875,
"learning_rate": 0.0,
"loss": 0.1467,
"step": 5000
}
],
"logging_steps": 50,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.36023173482496e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}