loki5 / checkpoint-400 /trainer_state.json
Farouk
Training in progress, step 400
b662a94
{
"best_metric": 0.5233827829360962,
"best_model_checkpoint": "./output_v2/7b_cluster00_Nous-Hermes-llama-2-7b_codellama_blob_1/checkpoint-400",
"epoch": 1.1228070175438596,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 2e-06,
"loss": 0.7795,
"step": 1
},
{
"epoch": 0.01,
"learning_rate": 4e-06,
"loss": 0.6789,
"step": 2
},
{
"epoch": 0.01,
"learning_rate": 5.999999999999999e-06,
"loss": 0.6847,
"step": 3
},
{
"epoch": 0.01,
"learning_rate": 8e-06,
"loss": 0.6876,
"step": 4
},
{
"epoch": 0.01,
"learning_rate": 9.999999999999999e-06,
"loss": 0.5964,
"step": 5
},
{
"epoch": 0.02,
"learning_rate": 1.1999999999999999e-05,
"loss": 0.6592,
"step": 6
},
{
"epoch": 0.02,
"learning_rate": 1.4e-05,
"loss": 0.6261,
"step": 7
},
{
"epoch": 0.02,
"learning_rate": 1.6e-05,
"loss": 0.6892,
"step": 8
},
{
"epoch": 0.03,
"learning_rate": 1.7999999999999997e-05,
"loss": 0.538,
"step": 9
},
{
"epoch": 0.03,
"learning_rate": 1.9999999999999998e-05,
"loss": 0.6015,
"step": 10
},
{
"epoch": 0.03,
"learning_rate": 2.2e-05,
"loss": 0.7061,
"step": 11
},
{
"epoch": 0.03,
"learning_rate": 2.3999999999999997e-05,
"loss": 0.6329,
"step": 12
},
{
"epoch": 0.04,
"learning_rate": 2.6e-05,
"loss": 0.5633,
"step": 13
},
{
"epoch": 0.04,
"learning_rate": 2.8e-05,
"loss": 0.5499,
"step": 14
},
{
"epoch": 0.04,
"learning_rate": 2.9999999999999997e-05,
"loss": 0.5846,
"step": 15
},
{
"epoch": 0.04,
"learning_rate": 3.2e-05,
"loss": 0.7056,
"step": 16
},
{
"epoch": 0.05,
"learning_rate": 3.399999999999999e-05,
"loss": 0.9478,
"step": 17
},
{
"epoch": 0.05,
"learning_rate": 3.5999999999999994e-05,
"loss": 0.7119,
"step": 18
},
{
"epoch": 0.05,
"learning_rate": 3.8e-05,
"loss": 0.5115,
"step": 19
},
{
"epoch": 0.06,
"learning_rate": 3.9999999999999996e-05,
"loss": 0.6481,
"step": 20
},
{
"epoch": 0.06,
"learning_rate": 4.2e-05,
"loss": 1.0479,
"step": 21
},
{
"epoch": 0.06,
"learning_rate": 4.4e-05,
"loss": 0.9266,
"step": 22
},
{
"epoch": 0.06,
"learning_rate": 4.599999999999999e-05,
"loss": 0.7517,
"step": 23
},
{
"epoch": 0.07,
"learning_rate": 4.7999999999999994e-05,
"loss": 0.7024,
"step": 24
},
{
"epoch": 0.07,
"learning_rate": 4.9999999999999996e-05,
"loss": 0.6996,
"step": 25
},
{
"epoch": 0.07,
"eval_loss": 0.6741535663604736,
"eval_runtime": 26.372,
"eval_samples_per_second": 7.584,
"eval_steps_per_second": 1.896,
"step": 25
},
{
"dharma_eval_accuracy": 0.47090526975371527,
"dharma_eval_accuracy_ARC-Challenge": 0.6296296296296297,
"dharma_eval_accuracy_ARC-Easy": 0.7592592592592593,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.4444444444444444,
"dharma_eval_accuracy_agieval": 0.2542372881355932,
"dharma_eval_accuracy_bigbench": 0.4098360655737705,
"dharma_eval_accuracy_openbookqa": 0.2962962962962963,
"dharma_eval_accuracy_truthful_qa": 0.37037037037037035,
"dharma_eval_accuracy_winogrande": 0.37037037037037035,
"dharma_loss": 3.7834435443878176,
"epoch": 0.07,
"step": 25
},
{
"epoch": 0.07,
"learning_rate": 5.2e-05,
"loss": 0.6123,
"step": 26
},
{
"epoch": 0.08,
"learning_rate": 5.399999999999999e-05,
"loss": 0.608,
"step": 27
},
{
"epoch": 0.08,
"learning_rate": 5.6e-05,
"loss": 0.4845,
"step": 28
},
{
"epoch": 0.08,
"learning_rate": 5.7999999999999994e-05,
"loss": 0.6026,
"step": 29
},
{
"epoch": 0.08,
"learning_rate": 5.9999999999999995e-05,
"loss": 0.87,
"step": 30
},
{
"epoch": 0.09,
"learning_rate": 6.199999999999999e-05,
"loss": 0.9898,
"step": 31
},
{
"epoch": 0.09,
"learning_rate": 6.4e-05,
"loss": 0.7354,
"step": 32
},
{
"epoch": 0.09,
"learning_rate": 6.599999999999999e-05,
"loss": 0.5053,
"step": 33
},
{
"epoch": 0.1,
"learning_rate": 6.799999999999999e-05,
"loss": 0.6644,
"step": 34
},
{
"epoch": 0.1,
"learning_rate": 7e-05,
"loss": 0.6932,
"step": 35
},
{
"epoch": 0.1,
"learning_rate": 7.199999999999999e-05,
"loss": 0.7425,
"step": 36
},
{
"epoch": 0.1,
"learning_rate": 7.4e-05,
"loss": 0.8268,
"step": 37
},
{
"epoch": 0.11,
"learning_rate": 7.6e-05,
"loss": 0.5957,
"step": 38
},
{
"epoch": 0.11,
"learning_rate": 7.8e-05,
"loss": 0.5579,
"step": 39
},
{
"epoch": 0.11,
"learning_rate": 7.999999999999999e-05,
"loss": 0.7978,
"step": 40
},
{
"epoch": 0.12,
"learning_rate": 8.199999999999999e-05,
"loss": 0.8261,
"step": 41
},
{
"epoch": 0.12,
"learning_rate": 8.4e-05,
"loss": 0.7618,
"step": 42
},
{
"epoch": 0.12,
"learning_rate": 8.6e-05,
"loss": 0.6062,
"step": 43
},
{
"epoch": 0.12,
"learning_rate": 8.8e-05,
"loss": 0.5791,
"step": 44
},
{
"epoch": 0.13,
"learning_rate": 8.999999999999999e-05,
"loss": 0.6008,
"step": 45
},
{
"epoch": 0.13,
"learning_rate": 9.199999999999999e-05,
"loss": 0.5143,
"step": 46
},
{
"epoch": 0.13,
"learning_rate": 9.4e-05,
"loss": 0.5329,
"step": 47
},
{
"epoch": 0.13,
"learning_rate": 9.599999999999999e-05,
"loss": 0.6973,
"step": 48
},
{
"epoch": 0.14,
"learning_rate": 9.799999999999998e-05,
"loss": 0.615,
"step": 49
},
{
"epoch": 0.14,
"learning_rate": 9.999999999999999e-05,
"loss": 0.6362,
"step": 50
},
{
"epoch": 0.14,
"eval_loss": 0.6042094826698303,
"eval_runtime": 26.344,
"eval_samples_per_second": 7.592,
"eval_steps_per_second": 1.898,
"step": 50
},
{
"dharma_eval_accuracy": 0.4825425901342051,
"dharma_eval_accuracy_ARC-Challenge": 0.6666666666666666,
"dharma_eval_accuracy_ARC-Easy": 0.6851851851851852,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.42592592592592593,
"dharma_eval_accuracy_agieval": 0.2542372881355932,
"dharma_eval_accuracy_bigbench": 0.45901639344262296,
"dharma_eval_accuracy_openbookqa": 0.3333333333333333,
"dharma_eval_accuracy_truthful_qa": 0.35185185185185186,
"dharma_eval_accuracy_winogrande": 0.46296296296296297,
"dharma_loss": 2.1548793907165527,
"epoch": 0.14,
"step": 50
},
{
"epoch": 0.14,
"learning_rate": 0.000102,
"loss": 0.6567,
"step": 51
},
{
"epoch": 0.15,
"learning_rate": 0.000104,
"loss": 0.9786,
"step": 52
},
{
"epoch": 0.15,
"learning_rate": 0.00010599999999999999,
"loss": 0.6146,
"step": 53
},
{
"epoch": 0.15,
"learning_rate": 0.00010799999999999998,
"loss": 0.6402,
"step": 54
},
{
"epoch": 0.15,
"learning_rate": 0.00010999999999999998,
"loss": 0.5906,
"step": 55
},
{
"epoch": 0.16,
"learning_rate": 0.000112,
"loss": 0.6441,
"step": 56
},
{
"epoch": 0.16,
"learning_rate": 0.00011399999999999999,
"loss": 0.6887,
"step": 57
},
{
"epoch": 0.16,
"learning_rate": 0.00011599999999999999,
"loss": 0.6283,
"step": 58
},
{
"epoch": 0.17,
"learning_rate": 0.00011799999999999998,
"loss": 0.5929,
"step": 59
},
{
"epoch": 0.17,
"learning_rate": 0.00011999999999999999,
"loss": 0.6939,
"step": 60
},
{
"epoch": 0.17,
"learning_rate": 0.000122,
"loss": 0.5535,
"step": 61
},
{
"epoch": 0.17,
"learning_rate": 0.00012399999999999998,
"loss": 0.5457,
"step": 62
},
{
"epoch": 0.18,
"learning_rate": 0.00012599999999999997,
"loss": 0.6425,
"step": 63
},
{
"epoch": 0.18,
"learning_rate": 0.000128,
"loss": 0.6855,
"step": 64
},
{
"epoch": 0.18,
"learning_rate": 0.00013,
"loss": 0.5396,
"step": 65
},
{
"epoch": 0.19,
"learning_rate": 0.00013199999999999998,
"loss": 0.6307,
"step": 66
},
{
"epoch": 0.19,
"learning_rate": 0.00013399999999999998,
"loss": 0.6396,
"step": 67
},
{
"epoch": 0.19,
"learning_rate": 0.00013599999999999997,
"loss": 0.5727,
"step": 68
},
{
"epoch": 0.19,
"learning_rate": 0.000138,
"loss": 0.4812,
"step": 69
},
{
"epoch": 0.2,
"learning_rate": 0.00014,
"loss": 0.5658,
"step": 70
},
{
"epoch": 0.2,
"learning_rate": 0.00014199999999999998,
"loss": 0.6314,
"step": 71
},
{
"epoch": 0.2,
"learning_rate": 0.00014399999999999998,
"loss": 0.6175,
"step": 72
},
{
"epoch": 0.2,
"learning_rate": 0.000146,
"loss": 0.5468,
"step": 73
},
{
"epoch": 0.21,
"learning_rate": 0.000148,
"loss": 0.6233,
"step": 74
},
{
"epoch": 0.21,
"learning_rate": 0.00015,
"loss": 0.5276,
"step": 75
},
{
"epoch": 0.21,
"eval_loss": 0.5642657279968262,
"eval_runtime": 26.2701,
"eval_samples_per_second": 7.613,
"eval_steps_per_second": 1.903,
"step": 75
},
{
"dharma_eval_accuracy": 0.45038002097061713,
"dharma_eval_accuracy_ARC-Challenge": 0.6111111111111112,
"dharma_eval_accuracy_ARC-Easy": 0.6111111111111112,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.42592592592592593,
"dharma_eval_accuracy_agieval": 0.2033898305084746,
"dharma_eval_accuracy_bigbench": 0.4426229508196721,
"dharma_eval_accuracy_openbookqa": 0.14814814814814814,
"dharma_eval_accuracy_truthful_qa": 0.42592592592592593,
"dharma_eval_accuracy_winogrande": 0.48148148148148145,
"dharma_loss": 2.056876979827881,
"epoch": 0.21,
"step": 75
},
{
"epoch": 0.21,
"learning_rate": 0.000152,
"loss": 0.5308,
"step": 76
},
{
"epoch": 0.22,
"learning_rate": 0.00015399999999999998,
"loss": 0.6546,
"step": 77
},
{
"epoch": 0.22,
"learning_rate": 0.000156,
"loss": 0.6451,
"step": 78
},
{
"epoch": 0.22,
"learning_rate": 0.00015799999999999996,
"loss": 0.5255,
"step": 79
},
{
"epoch": 0.22,
"learning_rate": 0.00015999999999999999,
"loss": 0.5629,
"step": 80
},
{
"epoch": 0.23,
"learning_rate": 0.000162,
"loss": 0.6642,
"step": 81
},
{
"epoch": 0.23,
"learning_rate": 0.00016399999999999997,
"loss": 0.6393,
"step": 82
},
{
"epoch": 0.23,
"learning_rate": 0.000166,
"loss": 0.5073,
"step": 83
},
{
"epoch": 0.24,
"learning_rate": 0.000168,
"loss": 0.5628,
"step": 84
},
{
"epoch": 0.24,
"learning_rate": 0.00016999999999999999,
"loss": 0.5566,
"step": 85
},
{
"epoch": 0.24,
"learning_rate": 0.000172,
"loss": 0.4653,
"step": 86
},
{
"epoch": 0.24,
"learning_rate": 0.00017399999999999997,
"loss": 0.5322,
"step": 87
},
{
"epoch": 0.25,
"learning_rate": 0.000176,
"loss": 0.7131,
"step": 88
},
{
"epoch": 0.25,
"learning_rate": 0.000178,
"loss": 0.5586,
"step": 89
},
{
"epoch": 0.25,
"learning_rate": 0.00017999999999999998,
"loss": 0.7246,
"step": 90
},
{
"epoch": 0.26,
"learning_rate": 0.00018199999999999998,
"loss": 0.6527,
"step": 91
},
{
"epoch": 0.26,
"learning_rate": 0.00018399999999999997,
"loss": 0.8067,
"step": 92
},
{
"epoch": 0.26,
"learning_rate": 0.000186,
"loss": 0.6427,
"step": 93
},
{
"epoch": 0.26,
"learning_rate": 0.000188,
"loss": 0.6309,
"step": 94
},
{
"epoch": 0.27,
"learning_rate": 0.00018999999999999998,
"loss": 0.6111,
"step": 95
},
{
"epoch": 0.27,
"learning_rate": 0.00019199999999999998,
"loss": 0.5911,
"step": 96
},
{
"epoch": 0.27,
"learning_rate": 0.00019399999999999997,
"loss": 0.6372,
"step": 97
},
{
"epoch": 0.28,
"learning_rate": 0.00019599999999999997,
"loss": 0.5664,
"step": 98
},
{
"epoch": 0.28,
"learning_rate": 0.000198,
"loss": 0.5618,
"step": 99
},
{
"epoch": 0.28,
"learning_rate": 0.00019999999999999998,
"loss": 0.7154,
"step": 100
},
{
"epoch": 0.28,
"eval_loss": 0.5470997095108032,
"eval_runtime": 26.3144,
"eval_samples_per_second": 7.6,
"eval_steps_per_second": 1.9,
"step": 100
},
{
"dharma_eval_accuracy": 0.398478315307064,
"dharma_eval_accuracy_ARC-Challenge": 0.5740740740740741,
"dharma_eval_accuracy_ARC-Easy": 0.48148148148148145,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.4074074074074074,
"dharma_eval_accuracy_agieval": 0.2711864406779661,
"dharma_eval_accuracy_bigbench": 0.4262295081967213,
"dharma_eval_accuracy_openbookqa": 0.07407407407407407,
"dharma_eval_accuracy_truthful_qa": 0.3888888888888889,
"dharma_eval_accuracy_winogrande": 0.25925925925925924,
"dharma_loss": 2.5759267053604127,
"epoch": 0.28,
"step": 100
},
{
"epoch": 0.28,
"learning_rate": 0.00020199999999999998,
"loss": 0.5837,
"step": 101
},
{
"epoch": 0.29,
"learning_rate": 0.000204,
"loss": 0.5707,
"step": 102
},
{
"epoch": 0.29,
"learning_rate": 0.00020599999999999997,
"loss": 0.7798,
"step": 103
},
{
"epoch": 0.29,
"learning_rate": 0.000208,
"loss": 0.4922,
"step": 104
},
{
"epoch": 0.29,
"learning_rate": 0.00020999999999999998,
"loss": 0.7033,
"step": 105
},
{
"epoch": 0.3,
"learning_rate": 0.00021199999999999998,
"loss": 0.7297,
"step": 106
},
{
"epoch": 0.3,
"learning_rate": 0.000214,
"loss": 0.5376,
"step": 107
},
{
"epoch": 0.3,
"learning_rate": 0.00021599999999999996,
"loss": 0.5372,
"step": 108
},
{
"epoch": 0.31,
"learning_rate": 0.00021799999999999999,
"loss": 0.7453,
"step": 109
},
{
"epoch": 0.31,
"learning_rate": 0.00021999999999999995,
"loss": 0.4582,
"step": 110
},
{
"epoch": 0.31,
"learning_rate": 0.00022199999999999998,
"loss": 0.54,
"step": 111
},
{
"epoch": 0.31,
"learning_rate": 0.000224,
"loss": 0.643,
"step": 112
},
{
"epoch": 0.32,
"learning_rate": 0.00022599999999999996,
"loss": 0.5972,
"step": 113
},
{
"epoch": 0.32,
"learning_rate": 0.00022799999999999999,
"loss": 0.5765,
"step": 114
},
{
"epoch": 0.32,
"learning_rate": 0.00023,
"loss": 0.6007,
"step": 115
},
{
"epoch": 0.33,
"learning_rate": 0.00023199999999999997,
"loss": 0.5343,
"step": 116
},
{
"epoch": 0.33,
"learning_rate": 0.000234,
"loss": 0.8235,
"step": 117
},
{
"epoch": 0.33,
"learning_rate": 0.00023599999999999996,
"loss": 0.6687,
"step": 118
},
{
"epoch": 0.33,
"learning_rate": 0.00023799999999999998,
"loss": 0.6696,
"step": 119
},
{
"epoch": 0.34,
"learning_rate": 0.00023999999999999998,
"loss": 0.77,
"step": 120
},
{
"epoch": 0.34,
"learning_rate": 0.00024199999999999997,
"loss": 0.5792,
"step": 121
},
{
"epoch": 0.34,
"learning_rate": 0.000244,
"loss": 0.3653,
"step": 122
},
{
"epoch": 0.35,
"learning_rate": 0.00024599999999999996,
"loss": 0.5344,
"step": 123
},
{
"epoch": 0.35,
"learning_rate": 0.00024799999999999996,
"loss": 0.5772,
"step": 124
},
{
"epoch": 0.35,
"learning_rate": 0.00025,
"loss": 0.7189,
"step": 125
},
{
"epoch": 0.35,
"eval_loss": 0.5514405369758606,
"eval_runtime": 26.3447,
"eval_samples_per_second": 7.592,
"eval_steps_per_second": 1.898,
"step": 125
},
{
"dharma_eval_accuracy": 0.3371043854202756,
"dharma_eval_accuracy_ARC-Challenge": 0.37037037037037035,
"dharma_eval_accuracy_ARC-Easy": 0.42592592592592593,
"dharma_eval_accuracy_BoolQ": 0.37037037037037035,
"dharma_eval_accuracy_MMLU": 0.35185185185185186,
"dharma_eval_accuracy_agieval": 0.22033898305084745,
"dharma_eval_accuracy_bigbench": 0.29508196721311475,
"dharma_eval_accuracy_openbookqa": 0.18518518518518517,
"dharma_eval_accuracy_truthful_qa": 0.3888888888888889,
"dharma_eval_accuracy_winogrande": 0.42592592592592593,
"dharma_loss": 2.1871530780792234,
"epoch": 0.35,
"step": 125
},
{
"epoch": 0.35,
"learning_rate": 0.00025199999999999995,
"loss": 0.5891,
"step": 126
},
{
"epoch": 0.36,
"learning_rate": 0.000254,
"loss": 0.8225,
"step": 127
},
{
"epoch": 0.36,
"learning_rate": 0.000256,
"loss": 0.6104,
"step": 128
},
{
"epoch": 0.36,
"learning_rate": 0.000258,
"loss": 0.6571,
"step": 129
},
{
"epoch": 0.36,
"learning_rate": 0.00026,
"loss": 0.7562,
"step": 130
},
{
"epoch": 0.37,
"learning_rate": 0.00026199999999999997,
"loss": 0.7172,
"step": 131
},
{
"epoch": 0.37,
"learning_rate": 0.00026399999999999997,
"loss": 0.4353,
"step": 132
},
{
"epoch": 0.37,
"learning_rate": 0.000266,
"loss": 0.5576,
"step": 133
},
{
"epoch": 0.38,
"learning_rate": 0.00026799999999999995,
"loss": 0.553,
"step": 134
},
{
"epoch": 0.38,
"learning_rate": 0.00027,
"loss": 0.5062,
"step": 135
},
{
"epoch": 0.38,
"learning_rate": 0.00027199999999999994,
"loss": 0.4819,
"step": 136
},
{
"epoch": 0.38,
"learning_rate": 0.000274,
"loss": 0.6195,
"step": 137
},
{
"epoch": 0.39,
"learning_rate": 0.000276,
"loss": 0.7592,
"step": 138
},
{
"epoch": 0.39,
"learning_rate": 0.000278,
"loss": 0.6118,
"step": 139
},
{
"epoch": 0.39,
"learning_rate": 0.00028,
"loss": 0.7897,
"step": 140
},
{
"epoch": 0.4,
"learning_rate": 0.00028199999999999997,
"loss": 0.5879,
"step": 141
},
{
"epoch": 0.4,
"learning_rate": 0.00028399999999999996,
"loss": 0.6055,
"step": 142
},
{
"epoch": 0.4,
"learning_rate": 0.00028599999999999996,
"loss": 0.541,
"step": 143
},
{
"epoch": 0.4,
"learning_rate": 0.00028799999999999995,
"loss": 0.7665,
"step": 144
},
{
"epoch": 0.41,
"learning_rate": 0.00029,
"loss": 0.612,
"step": 145
},
{
"epoch": 0.41,
"learning_rate": 0.000292,
"loss": 0.5397,
"step": 146
},
{
"epoch": 0.41,
"learning_rate": 0.000294,
"loss": 0.5727,
"step": 147
},
{
"epoch": 0.42,
"learning_rate": 0.000296,
"loss": 1.1184,
"step": 148
},
{
"epoch": 0.42,
"learning_rate": 0.000298,
"loss": 0.5912,
"step": 149
},
{
"epoch": 0.42,
"learning_rate": 0.0003,
"loss": 0.5581,
"step": 150
},
{
"epoch": 0.42,
"eval_loss": 0.5423073768615723,
"eval_runtime": 26.3108,
"eval_samples_per_second": 7.601,
"eval_steps_per_second": 1.9,
"step": 150
},
{
"dharma_eval_accuracy": 0.3860708907481159,
"dharma_eval_accuracy_ARC-Challenge": 0.42592592592592593,
"dharma_eval_accuracy_ARC-Easy": 0.4444444444444444,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.37037037037037035,
"dharma_eval_accuracy_agieval": 0.2542372881355932,
"dharma_eval_accuracy_bigbench": 0.4426229508196721,
"dharma_eval_accuracy_openbookqa": 0.18518518518518517,
"dharma_eval_accuracy_truthful_qa": 0.2962962962962963,
"dharma_eval_accuracy_winogrande": 0.35185185185185186,
"dharma_loss": 2.471666145801544,
"epoch": 0.42,
"step": 150
},
{
"epoch": 0.42,
"learning_rate": 0.0002999999685313931,
"loss": 0.656,
"step": 151
},
{
"epoch": 0.43,
"learning_rate": 0.00029999987412558584,
"loss": 0.5075,
"step": 152
},
{
"epoch": 0.43,
"learning_rate": 0.0002999997167826177,
"loss": 0.4696,
"step": 153
},
{
"epoch": 0.43,
"learning_rate": 0.00029999949650255474,
"loss": 0.7349,
"step": 154
},
{
"epoch": 0.44,
"learning_rate": 0.0002999992132854894,
"loss": 0.5935,
"step": 155
},
{
"epoch": 0.44,
"learning_rate": 0.0002999988671315404,
"loss": 0.4127,
"step": 156
},
{
"epoch": 0.44,
"learning_rate": 0.0002999984580408531,
"loss": 0.5503,
"step": 157
},
{
"epoch": 0.44,
"learning_rate": 0.00029999798601359915,
"loss": 0.5742,
"step": 158
},
{
"epoch": 0.45,
"learning_rate": 0.00029999745104997654,
"loss": 0.5606,
"step": 159
},
{
"epoch": 0.45,
"learning_rate": 0.0002999968531502098,
"loss": 0.6209,
"step": 160
},
{
"epoch": 0.45,
"learning_rate": 0.0002999961923145497,
"loss": 0.4977,
"step": 161
},
{
"epoch": 0.45,
"learning_rate": 0.0002999954685432736,
"loss": 0.7474,
"step": 162
},
{
"epoch": 0.46,
"learning_rate": 0.0002999946818366852,
"loss": 0.574,
"step": 163
},
{
"epoch": 0.46,
"learning_rate": 0.00029999383219511444,
"loss": 0.3991,
"step": 164
},
{
"epoch": 0.46,
"learning_rate": 0.0002999929196189179,
"loss": 0.6335,
"step": 165
},
{
"epoch": 0.47,
"learning_rate": 0.0002999919441084786,
"loss": 0.6144,
"step": 166
},
{
"epoch": 0.47,
"learning_rate": 0.0002999909056642057,
"loss": 0.4695,
"step": 167
},
{
"epoch": 0.47,
"learning_rate": 0.00029998980428653496,
"loss": 0.5322,
"step": 168
},
{
"epoch": 0.47,
"learning_rate": 0.00029998863997592843,
"loss": 0.6111,
"step": 169
},
{
"epoch": 0.48,
"learning_rate": 0.00029998741273287477,
"loss": 0.5517,
"step": 170
},
{
"epoch": 0.48,
"learning_rate": 0.0002999861225578888,
"loss": 0.5407,
"step": 171
},
{
"epoch": 0.48,
"learning_rate": 0.00029998476945151183,
"loss": 0.4942,
"step": 172
},
{
"epoch": 0.49,
"learning_rate": 0.00029998335341431174,
"loss": 0.4563,
"step": 173
},
{
"epoch": 0.49,
"learning_rate": 0.0002999818744468825,
"loss": 0.4406,
"step": 174
},
{
"epoch": 0.49,
"learning_rate": 0.0002999803325498448,
"loss": 0.5319,
"step": 175
},
{
"epoch": 0.49,
"eval_loss": 0.5312690734863281,
"eval_runtime": 26.3302,
"eval_samples_per_second": 7.596,
"eval_steps_per_second": 1.899,
"step": 175
},
{
"dharma_eval_accuracy": 0.40200352864364475,
"dharma_eval_accuracy_ARC-Challenge": 0.42592592592592593,
"dharma_eval_accuracy_ARC-Easy": 0.5,
"dharma_eval_accuracy_BoolQ": 0.7592592592592593,
"dharma_eval_accuracy_MMLU": 0.4074074074074074,
"dharma_eval_accuracy_agieval": 0.22033898305084745,
"dharma_eval_accuracy_bigbench": 0.36065573770491804,
"dharma_eval_accuracy_openbookqa": 0.2222222222222222,
"dharma_eval_accuracy_truthful_qa": 0.2962962962962963,
"dharma_eval_accuracy_winogrande": 0.42592592592592593,
"dharma_loss": 2.2439456572532652,
"epoch": 0.49,
"step": 175
},
{
"epoch": 0.49,
"learning_rate": 0.0002999787277238455,
"loss": 0.7389,
"step": 176
},
{
"epoch": 0.5,
"learning_rate": 0.000299977059969558,
"loss": 0.6848,
"step": 177
},
{
"epoch": 0.5,
"learning_rate": 0.00029997532928768204,
"loss": 0.5144,
"step": 178
},
{
"epoch": 0.5,
"learning_rate": 0.00029997353567894384,
"loss": 0.5707,
"step": 179
},
{
"epoch": 0.51,
"learning_rate": 0.0002999716791440959,
"loss": 0.668,
"step": 180
},
{
"epoch": 0.51,
"learning_rate": 0.00029996975968391715,
"loss": 0.4851,
"step": 181
},
{
"epoch": 0.51,
"learning_rate": 0.000299967777299213,
"loss": 0.725,
"step": 182
},
{
"epoch": 0.51,
"learning_rate": 0.0002999657319908153,
"loss": 0.6281,
"step": 183
},
{
"epoch": 0.52,
"learning_rate": 0.0002999636237595821,
"loss": 0.6357,
"step": 184
},
{
"epoch": 0.52,
"learning_rate": 0.00029996145260639806,
"loss": 0.5517,
"step": 185
},
{
"epoch": 0.52,
"learning_rate": 0.0002999592185321741,
"loss": 0.6792,
"step": 186
},
{
"epoch": 0.52,
"learning_rate": 0.0002999569215378477,
"loss": 0.619,
"step": 187
},
{
"epoch": 0.53,
"learning_rate": 0.0002999545616243825,
"loss": 0.5693,
"step": 188
},
{
"epoch": 0.53,
"learning_rate": 0.00029995213879276876,
"loss": 0.5759,
"step": 189
},
{
"epoch": 0.53,
"learning_rate": 0.000299949653044023,
"loss": 0.4382,
"step": 190
},
{
"epoch": 0.54,
"learning_rate": 0.00029994710437918824,
"loss": 0.7183,
"step": 191
},
{
"epoch": 0.54,
"learning_rate": 0.0002999444927993338,
"loss": 0.7328,
"step": 192
},
{
"epoch": 0.54,
"learning_rate": 0.00029994181830555555,
"loss": 0.645,
"step": 193
},
{
"epoch": 0.54,
"learning_rate": 0.00029993908089897555,
"loss": 0.5784,
"step": 194
},
{
"epoch": 0.55,
"learning_rate": 0.00029993628058074245,
"loss": 0.4547,
"step": 195
},
{
"epoch": 0.55,
"learning_rate": 0.00029993341735203114,
"loss": 0.5243,
"step": 196
},
{
"epoch": 0.55,
"learning_rate": 0.00029993049121404303,
"loss": 0.5059,
"step": 197
},
{
"epoch": 0.56,
"learning_rate": 0.0002999275021680058,
"loss": 0.6225,
"step": 198
},
{
"epoch": 0.56,
"learning_rate": 0.0002999244502151737,
"loss": 0.5956,
"step": 199
},
{
"epoch": 0.56,
"learning_rate": 0.00029992133535682725,
"loss": 0.553,
"step": 200
},
{
"epoch": 0.56,
"eval_loss": 0.5266188979148865,
"eval_runtime": 26.3,
"eval_samples_per_second": 7.605,
"eval_steps_per_second": 1.901,
"step": 200
},
{
"dharma_eval_accuracy": 0.4282802607491564,
"dharma_eval_accuracy_ARC-Challenge": 0.5185185185185185,
"dharma_eval_accuracy_ARC-Easy": 0.5740740740740741,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.4074074074074074,
"dharma_eval_accuracy_agieval": 0.22033898305084745,
"dharma_eval_accuracy_bigbench": 0.39344262295081966,
"dharma_eval_accuracy_openbookqa": 0.14814814814814814,
"dharma_eval_accuracy_truthful_qa": 0.3148148148148148,
"dharma_eval_accuracy_winogrande": 0.5740740740740741,
"dharma_loss": 2.507861177444458,
"epoch": 0.56,
"step": 200
},
{
"epoch": 0.56,
"learning_rate": 0.0002999181575942733,
"loss": 0.4542,
"step": 201
},
{
"epoch": 0.57,
"learning_rate": 0.0002999149169288452,
"loss": 0.568,
"step": 202
},
{
"epoch": 0.57,
"learning_rate": 0.0002999116133619028,
"loss": 0.462,
"step": 203
},
{
"epoch": 0.57,
"learning_rate": 0.0002999082468948321,
"loss": 0.4854,
"step": 204
},
{
"epoch": 0.58,
"learning_rate": 0.00029990481752904563,
"loss": 0.474,
"step": 205
},
{
"epoch": 0.58,
"learning_rate": 0.0002999013252659823,
"loss": 0.5331,
"step": 206
},
{
"epoch": 0.58,
"learning_rate": 0.0002998977701071074,
"loss": 0.606,
"step": 207
},
{
"epoch": 0.58,
"learning_rate": 0.00029989415205391263,
"loss": 0.5452,
"step": 208
},
{
"epoch": 0.59,
"learning_rate": 0.00029989047110791595,
"loss": 0.6128,
"step": 209
},
{
"epoch": 0.59,
"learning_rate": 0.0002998867272706619,
"loss": 0.7108,
"step": 210
},
{
"epoch": 0.59,
"learning_rate": 0.0002998829205437214,
"loss": 0.5557,
"step": 211
},
{
"epoch": 0.6,
"learning_rate": 0.0002998790509286915,
"loss": 0.556,
"step": 212
},
{
"epoch": 0.6,
"learning_rate": 0.000299875118427196,
"loss": 0.6482,
"step": 213
},
{
"epoch": 0.6,
"learning_rate": 0.00029987112304088483,
"loss": 0.6108,
"step": 214
},
{
"epoch": 0.6,
"learning_rate": 0.0002998670647714343,
"loss": 0.7099,
"step": 215
},
{
"epoch": 0.61,
"learning_rate": 0.0002998629436205473,
"loss": 0.5939,
"step": 216
},
{
"epoch": 0.61,
"learning_rate": 0.00029985875958995296,
"loss": 0.6238,
"step": 217
},
{
"epoch": 0.61,
"learning_rate": 0.00029985451268140683,
"loss": 0.5144,
"step": 218
},
{
"epoch": 0.61,
"learning_rate": 0.00029985020289669077,
"loss": 0.7351,
"step": 219
},
{
"epoch": 0.62,
"learning_rate": 0.00029984583023761317,
"loss": 0.6136,
"step": 220
},
{
"epoch": 0.62,
"learning_rate": 0.0002998413947060086,
"loss": 0.7544,
"step": 221
},
{
"epoch": 0.62,
"learning_rate": 0.00029983689630373825,
"loss": 0.5588,
"step": 222
},
{
"epoch": 0.63,
"learning_rate": 0.0002998323350326895,
"loss": 0.5725,
"step": 223
},
{
"epoch": 0.63,
"learning_rate": 0.0002998277108947762,
"loss": 0.6262,
"step": 224
},
{
"epoch": 0.63,
"learning_rate": 0.00029982302389193856,
"loss": 0.6217,
"step": 225
},
{
"epoch": 0.63,
"eval_loss": 0.527147114276886,
"eval_runtime": 26.3496,
"eval_samples_per_second": 7.59,
"eval_steps_per_second": 1.898,
"step": 225
},
{
"dharma_eval_accuracy": 0.39483532805751936,
"dharma_eval_accuracy_ARC-Challenge": 0.48148148148148145,
"dharma_eval_accuracy_ARC-Easy": 0.6111111111111112,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.5185185185185185,
"dharma_eval_accuracy_agieval": 0.2711864406779661,
"dharma_eval_accuracy_bigbench": 0.39344262295081966,
"dharma_eval_accuracy_openbookqa": 0.037037037037037035,
"dharma_eval_accuracy_truthful_qa": 0.18518518518518517,
"dharma_eval_accuracy_winogrande": 0.35185185185185186,
"dharma_loss": 2.334738277435303,
"epoch": 0.63,
"step": 225
},
{
"epoch": 0.63,
"learning_rate": 0.0002998182740261432,
"loss": 0.6376,
"step": 226
},
{
"epoch": 0.64,
"learning_rate": 0.0002998134612993829,
"loss": 0.5943,
"step": 227
},
{
"epoch": 0.64,
"learning_rate": 0.0002998085857136772,
"loss": 0.4096,
"step": 228
},
{
"epoch": 0.64,
"learning_rate": 0.00029980364727107166,
"loss": 0.5825,
"step": 229
},
{
"epoch": 0.65,
"learning_rate": 0.00029979864597363845,
"loss": 0.5372,
"step": 230
},
{
"epoch": 0.65,
"learning_rate": 0.0002997935818234759,
"loss": 0.5376,
"step": 231
},
{
"epoch": 0.65,
"learning_rate": 0.00029978845482270906,
"loss": 0.4369,
"step": 232
},
{
"epoch": 0.65,
"learning_rate": 0.0002997832649734889,
"loss": 0.5717,
"step": 233
},
{
"epoch": 0.66,
"learning_rate": 0.0002997780122779931,
"loss": 0.6981,
"step": 234
},
{
"epoch": 0.66,
"learning_rate": 0.0002997726967384255,
"loss": 0.5764,
"step": 235
},
{
"epoch": 0.66,
"learning_rate": 0.0002997673183570165,
"loss": 0.5937,
"step": 236
},
{
"epoch": 0.67,
"learning_rate": 0.00029976187713602273,
"loss": 0.2708,
"step": 237
},
{
"epoch": 0.67,
"learning_rate": 0.0002997563730777273,
"loss": 0.6383,
"step": 238
},
{
"epoch": 0.67,
"learning_rate": 0.00029975080618443946,
"loss": 0.491,
"step": 239
},
{
"epoch": 0.67,
"learning_rate": 0.00029974517645849503,
"loss": 0.5049,
"step": 240
},
{
"epoch": 0.68,
"learning_rate": 0.0002997394839022562,
"loss": 0.5768,
"step": 241
},
{
"epoch": 0.68,
"learning_rate": 0.00029973372851811145,
"loss": 0.566,
"step": 242
},
{
"epoch": 0.68,
"learning_rate": 0.00029972791030847553,
"loss": 0.7687,
"step": 243
},
{
"epoch": 0.68,
"learning_rate": 0.0002997220292757898,
"loss": 0.6399,
"step": 244
},
{
"epoch": 0.69,
"learning_rate": 0.0002997160854225217,
"loss": 0.545,
"step": 245
},
{
"epoch": 0.69,
"learning_rate": 0.00029971007875116527,
"loss": 0.6108,
"step": 246
},
{
"epoch": 0.69,
"learning_rate": 0.0002997040092642407,
"loss": 0.6478,
"step": 247
},
{
"epoch": 0.7,
"learning_rate": 0.0002996978769642947,
"loss": 0.5959,
"step": 248
},
{
"epoch": 0.7,
"learning_rate": 0.0002996916818539003,
"loss": 0.5995,
"step": 249
},
{
"epoch": 0.7,
"learning_rate": 0.0002996854239356567,
"loss": 0.5721,
"step": 250
},
{
"epoch": 0.7,
"eval_loss": 0.5253523588180542,
"eval_runtime": 26.3162,
"eval_samples_per_second": 7.6,
"eval_steps_per_second": 1.9,
"step": 250
},
{
"dharma_eval_accuracy": 0.35750614311016893,
"dharma_eval_accuracy_ARC-Challenge": 0.48148148148148145,
"dharma_eval_accuracy_ARC-Easy": 0.5370370370370371,
"dharma_eval_accuracy_BoolQ": 0.6851851851851852,
"dharma_eval_accuracy_MMLU": 0.3888888888888889,
"dharma_eval_accuracy_agieval": 0.23728813559322035,
"dharma_eval_accuracy_bigbench": 0.29508196721311475,
"dharma_eval_accuracy_openbookqa": 0.018518518518518517,
"dharma_eval_accuracy_truthful_qa": 0.2222222222222222,
"dharma_eval_accuracy_winogrande": 0.35185185185185186,
"dharma_loss": 3.018469988822937,
"epoch": 0.7,
"step": 250
},
{
"epoch": 0.7,
"learning_rate": 0.0002996791032121898,
"loss": 0.3632,
"step": 251
},
{
"epoch": 0.71,
"learning_rate": 0.0002996727196861515,
"loss": 0.5306,
"step": 252
},
{
"epoch": 0.71,
"learning_rate": 0.00029966627336022034,
"loss": 0.4332,
"step": 253
},
{
"epoch": 0.71,
"learning_rate": 0.000299659764237101,
"loss": 0.4484,
"step": 254
},
{
"epoch": 0.72,
"learning_rate": 0.00029965319231952455,
"loss": 0.5353,
"step": 255
},
{
"epoch": 0.72,
"learning_rate": 0.00029964655761024854,
"loss": 0.6232,
"step": 256
},
{
"epoch": 0.72,
"learning_rate": 0.00029963986011205675,
"loss": 0.672,
"step": 257
},
{
"epoch": 0.72,
"learning_rate": 0.0002996330998277593,
"loss": 0.5865,
"step": 258
},
{
"epoch": 0.73,
"learning_rate": 0.0002996262767601926,
"loss": 0.8846,
"step": 259
},
{
"epoch": 0.73,
"learning_rate": 0.0002996193909122197,
"loss": 0.4847,
"step": 260
},
{
"epoch": 0.73,
"learning_rate": 0.00029961244228672953,
"loss": 0.5886,
"step": 261
},
{
"epoch": 0.74,
"learning_rate": 0.0002996054308866378,
"loss": 0.6324,
"step": 262
},
{
"epoch": 0.74,
"learning_rate": 0.0002995983567148862,
"loss": 0.4812,
"step": 263
},
{
"epoch": 0.74,
"learning_rate": 0.000299591219774443,
"loss": 0.6549,
"step": 264
},
{
"epoch": 0.74,
"learning_rate": 0.00029958402006830274,
"loss": 0.4712,
"step": 265
},
{
"epoch": 0.75,
"learning_rate": 0.0002995767575994863,
"loss": 0.452,
"step": 266
},
{
"epoch": 0.75,
"learning_rate": 0.00029956943237104084,
"loss": 0.5716,
"step": 267
},
{
"epoch": 0.75,
"learning_rate": 0.0002995620443860399,
"loss": 0.6757,
"step": 268
},
{
"epoch": 0.76,
"learning_rate": 0.0002995545936475833,
"loss": 0.5403,
"step": 269
},
{
"epoch": 0.76,
"learning_rate": 0.0002995470801587973,
"loss": 0.6785,
"step": 270
},
{
"epoch": 0.76,
"learning_rate": 0.0002995395039228343,
"loss": 0.6292,
"step": 271
},
{
"epoch": 0.76,
"learning_rate": 0.00029953186494287336,
"loss": 0.5181,
"step": 272
},
{
"epoch": 0.77,
"learning_rate": 0.0002995241632221195,
"loss": 0.5219,
"step": 273
},
{
"epoch": 0.77,
"learning_rate": 0.00029951639876380425,
"loss": 0.5475,
"step": 274
},
{
"epoch": 0.77,
"learning_rate": 0.0002995085715711854,
"loss": 0.5605,
"step": 275
},
{
"epoch": 0.77,
"eval_loss": 0.5327661633491516,
"eval_runtime": 26.3174,
"eval_samples_per_second": 7.6,
"eval_steps_per_second": 1.9,
"step": 275
},
{
"dharma_eval_accuracy": 0.4329237545408704,
"dharma_eval_accuracy_ARC-Challenge": 0.5555555555555556,
"dharma_eval_accuracy_ARC-Easy": 0.6111111111111112,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.37037037037037035,
"dharma_eval_accuracy_agieval": 0.2542372881355932,
"dharma_eval_accuracy_bigbench": 0.47540983606557374,
"dharma_eval_accuracy_openbookqa": 0.09259259259259259,
"dharma_eval_accuracy_truthful_qa": 0.3148148148148148,
"dharma_eval_accuracy_winogrande": 0.5185185185185185,
"dharma_loss": 2.377507550239563,
"epoch": 0.77,
"step": 275
},
{
"epoch": 0.77,
"learning_rate": 0.0002995006816475471,
"loss": 0.5067,
"step": 276
},
{
"epoch": 0.78,
"learning_rate": 0.00029949272899619994,
"loss": 0.6751,
"step": 277
},
{
"epoch": 0.78,
"learning_rate": 0.0002994847136204805,
"loss": 0.5557,
"step": 278
},
{
"epoch": 0.78,
"learning_rate": 0.0002994766355237521,
"loss": 0.6013,
"step": 279
},
{
"epoch": 0.79,
"learning_rate": 0.0002994684947094039,
"loss": 0.4763,
"step": 280
},
{
"epoch": 0.79,
"learning_rate": 0.00029946029118085193,
"loss": 0.5,
"step": 281
},
{
"epoch": 0.79,
"learning_rate": 0.000299452024941538,
"loss": 0.4948,
"step": 282
},
{
"epoch": 0.79,
"learning_rate": 0.0002994436959949306,
"loss": 0.4705,
"step": 283
},
{
"epoch": 0.8,
"learning_rate": 0.0002994353043445244,
"loss": 0.8139,
"step": 284
},
{
"epoch": 0.8,
"learning_rate": 0.0002994268499938403,
"loss": 0.4713,
"step": 285
},
{
"epoch": 0.8,
"learning_rate": 0.0002994183329464256,
"loss": 0.5527,
"step": 286
},
{
"epoch": 0.81,
"learning_rate": 0.00029940975320585396,
"loss": 0.5425,
"step": 287
},
{
"epoch": 0.81,
"learning_rate": 0.00029940111077572526,
"loss": 0.5926,
"step": 288
},
{
"epoch": 0.81,
"learning_rate": 0.00029939240565966574,
"loss": 0.5987,
"step": 289
},
{
"epoch": 0.81,
"learning_rate": 0.00029938363786132774,
"loss": 0.6077,
"step": 290
},
{
"epoch": 0.82,
"learning_rate": 0.00029937480738439023,
"loss": 0.4822,
"step": 291
},
{
"epoch": 0.82,
"learning_rate": 0.00029936591423255826,
"loss": 0.6527,
"step": 292
},
{
"epoch": 0.82,
"learning_rate": 0.00029935695840956327,
"loss": 0.5724,
"step": 293
},
{
"epoch": 0.83,
"learning_rate": 0.00029934793991916295,
"loss": 0.6617,
"step": 294
},
{
"epoch": 0.83,
"learning_rate": 0.00029933885876514115,
"loss": 0.5795,
"step": 295
},
{
"epoch": 0.83,
"learning_rate": 0.0002993297149513083,
"loss": 0.5049,
"step": 296
},
{
"epoch": 0.83,
"learning_rate": 0.00029932050848150105,
"loss": 0.5826,
"step": 297
},
{
"epoch": 0.84,
"learning_rate": 0.0002993112393595821,
"loss": 0.4766,
"step": 298
},
{
"epoch": 0.84,
"learning_rate": 0.0002993019075894406,
"loss": 0.4514,
"step": 299
},
{
"epoch": 0.84,
"learning_rate": 0.0002992925131749921,
"loss": 0.5825,
"step": 300
},
{
"epoch": 0.84,
"eval_loss": 0.5207599401473999,
"eval_runtime": 26.3377,
"eval_samples_per_second": 7.594,
"eval_steps_per_second": 1.898,
"step": 300
},
{
"dharma_eval_accuracy": 0.42874678265681937,
"dharma_eval_accuracy_ARC-Challenge": 0.5,
"dharma_eval_accuracy_ARC-Easy": 0.6111111111111112,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.3333333333333333,
"dharma_eval_accuracy_agieval": 0.23728813559322035,
"dharma_eval_accuracy_bigbench": 0.4918032786885246,
"dharma_eval_accuracy_openbookqa": 0.09259259259259259,
"dharma_eval_accuracy_truthful_qa": 0.2962962962962963,
"dharma_eval_accuracy_winogrande": 0.5925925925925926,
"dharma_loss": 2.452057005882263,
"epoch": 0.84,
"step": 300
},
{
"epoch": 0.84,
"learning_rate": 0.00029928305612017823,
"loss": 0.4251,
"step": 301
},
{
"epoch": 0.85,
"learning_rate": 0.0002992735364289671,
"loss": 0.4762,
"step": 302
},
{
"epoch": 0.85,
"learning_rate": 0.0002992639541053528,
"loss": 0.4531,
"step": 303
},
{
"epoch": 0.85,
"learning_rate": 0.0002992543091533561,
"loss": 0.64,
"step": 304
},
{
"epoch": 0.86,
"learning_rate": 0.00029924460157702376,
"loss": 0.6483,
"step": 305
},
{
"epoch": 0.86,
"learning_rate": 0.0002992348313804289,
"loss": 0.5646,
"step": 306
},
{
"epoch": 0.86,
"learning_rate": 0.00029922499856767094,
"loss": 0.4947,
"step": 307
},
{
"epoch": 0.86,
"learning_rate": 0.00029921510314287545,
"loss": 0.4793,
"step": 308
},
{
"epoch": 0.87,
"learning_rate": 0.00029920514511019456,
"loss": 0.3957,
"step": 309
},
{
"epoch": 0.87,
"learning_rate": 0.00029919512447380625,
"loss": 0.6941,
"step": 310
},
{
"epoch": 0.87,
"learning_rate": 0.0002991850412379151,
"loss": 0.6013,
"step": 311
},
{
"epoch": 0.88,
"learning_rate": 0.0002991748954067519,
"loss": 0.6011,
"step": 312
},
{
"epoch": 0.88,
"learning_rate": 0.0002991646869845736,
"loss": 0.7579,
"step": 313
},
{
"epoch": 0.88,
"learning_rate": 0.0002991544159756634,
"loss": 0.4339,
"step": 314
},
{
"epoch": 0.88,
"learning_rate": 0.0002991440823843309,
"loss": 0.4666,
"step": 315
},
{
"epoch": 0.89,
"learning_rate": 0.0002991336862149119,
"loss": 0.5458,
"step": 316
},
{
"epoch": 0.89,
"learning_rate": 0.00029912322747176835,
"loss": 0.6499,
"step": 317
},
{
"epoch": 0.89,
"learning_rate": 0.0002991127061592887,
"loss": 0.7208,
"step": 318
},
{
"epoch": 0.9,
"learning_rate": 0.00029910212228188734,
"loss": 0.7648,
"step": 319
},
{
"epoch": 0.9,
"learning_rate": 0.0002990914758440051,
"loss": 0.9322,
"step": 320
},
{
"epoch": 0.9,
"learning_rate": 0.00029908076685010915,
"loss": 0.6769,
"step": 321
},
{
"epoch": 0.9,
"learning_rate": 0.0002990699953046926,
"loss": 0.6241,
"step": 322
},
{
"epoch": 0.91,
"learning_rate": 0.00029905916121227515,
"loss": 0.409,
"step": 323
},
{
"epoch": 0.91,
"learning_rate": 0.00029904826457740247,
"loss": 0.3138,
"step": 324
},
{
"epoch": 0.91,
"learning_rate": 0.00029903730540464666,
"loss": 0.5241,
"step": 325
},
{
"epoch": 0.91,
"eval_loss": 0.5201582908630371,
"eval_runtime": 26.3949,
"eval_samples_per_second": 7.577,
"eval_steps_per_second": 1.894,
"step": 325
},
{
"dharma_eval_accuracy": 0.3616831149942199,
"dharma_eval_accuracy_ARC-Challenge": 0.4444444444444444,
"dharma_eval_accuracy_ARC-Easy": 0.5,
"dharma_eval_accuracy_BoolQ": 0.7222222222222222,
"dharma_eval_accuracy_MMLU": 0.4444444444444444,
"dharma_eval_accuracy_agieval": 0.2542372881355932,
"dharma_eval_accuracy_bigbench": 0.2786885245901639,
"dharma_eval_accuracy_openbookqa": 0.05555555555555555,
"dharma_eval_accuracy_truthful_qa": 0.3888888888888889,
"dharma_eval_accuracy_winogrande": 0.16666666666666666,
"dharma_loss": 2.3925395412445067,
"epoch": 0.91,
"step": 325
},
{
"epoch": 0.92,
"learning_rate": 0.0002990262836986059,
"loss": 0.358,
"step": 326
},
{
"epoch": 0.92,
"learning_rate": 0.0002990151994639048,
"loss": 0.5683,
"step": 327
},
{
"epoch": 0.92,
"learning_rate": 0.000299004052705194,
"loss": 0.4744,
"step": 328
},
{
"epoch": 0.92,
"learning_rate": 0.00029899284342715054,
"loss": 0.6101,
"step": 329
},
{
"epoch": 0.93,
"learning_rate": 0.00029898157163447763,
"loss": 0.5381,
"step": 330
},
{
"epoch": 0.93,
"learning_rate": 0.0002989702373319047,
"loss": 0.5269,
"step": 331
},
{
"epoch": 0.93,
"learning_rate": 0.00029895884052418735,
"loss": 0.4993,
"step": 332
},
{
"epoch": 0.93,
"learning_rate": 0.00029894738121610755,
"loss": 0.5623,
"step": 333
},
{
"epoch": 0.94,
"learning_rate": 0.0002989358594124733,
"loss": 0.5988,
"step": 334
},
{
"epoch": 0.94,
"learning_rate": 0.0002989242751181191,
"loss": 0.5623,
"step": 335
},
{
"epoch": 0.94,
"learning_rate": 0.0002989126283379054,
"loss": 0.4471,
"step": 336
},
{
"epoch": 0.95,
"learning_rate": 0.000298900919076719,
"loss": 0.489,
"step": 337
},
{
"epoch": 0.95,
"learning_rate": 0.00029888914733947275,
"loss": 0.5404,
"step": 338
},
{
"epoch": 0.95,
"learning_rate": 0.00029887731313110613,
"loss": 0.5639,
"step": 339
},
{
"epoch": 0.95,
"learning_rate": 0.0002988654164565843,
"loss": 0.6034,
"step": 340
},
{
"epoch": 0.96,
"learning_rate": 0.00029885345732089905,
"loss": 0.5529,
"step": 341
},
{
"epoch": 0.96,
"learning_rate": 0.0002988414357290681,
"loss": 0.6584,
"step": 342
},
{
"epoch": 0.96,
"learning_rate": 0.0002988293516861356,
"loss": 0.5119,
"step": 343
},
{
"epoch": 0.97,
"learning_rate": 0.0002988172051971717,
"loss": 0.4797,
"step": 344
},
{
"epoch": 0.97,
"learning_rate": 0.0002988049962672728,
"loss": 0.619,
"step": 345
},
{
"epoch": 0.97,
"learning_rate": 0.0002987927249015616,
"loss": 0.621,
"step": 346
},
{
"epoch": 0.97,
"learning_rate": 0.00029878039110518704,
"loss": 0.6019,
"step": 347
},
{
"epoch": 0.98,
"learning_rate": 0.000298767994883324,
"loss": 0.5099,
"step": 348
},
{
"epoch": 0.98,
"learning_rate": 0.00029875553624117375,
"loss": 0.3973,
"step": 349
},
{
"epoch": 0.98,
"learning_rate": 0.00029874301518396376,
"loss": 0.6068,
"step": 350
},
{
"epoch": 0.98,
"eval_loss": 0.517635703086853,
"eval_runtime": 26.3545,
"eval_samples_per_second": 7.589,
"eval_steps_per_second": 1.897,
"step": 350
},
{
"dharma_eval_accuracy": 0.36985182212251455,
"dharma_eval_accuracy_ARC-Challenge": 0.37037037037037035,
"dharma_eval_accuracy_ARC-Easy": 0.4444444444444444,
"dharma_eval_accuracy_BoolQ": 0.6851851851851852,
"dharma_eval_accuracy_MMLU": 0.2962962962962963,
"dharma_eval_accuracy_agieval": 0.23728813559322035,
"dharma_eval_accuracy_bigbench": 0.29508196721311475,
"dharma_eval_accuracy_openbookqa": 0.09259259259259259,
"dharma_eval_accuracy_truthful_qa": 0.35185185185185186,
"dharma_eval_accuracy_winogrande": 0.5555555555555556,
"dharma_loss": 2.8203083801269533,
"epoch": 0.98,
"step": 350
},
{
"epoch": 0.99,
"learning_rate": 0.00029873043171694755,
"loss": 0.5346,
"step": 351
},
{
"epoch": 0.99,
"learning_rate": 0.0002987177858454049,
"loss": 0.3792,
"step": 352
},
{
"epoch": 0.99,
"learning_rate": 0.00029870507757464193,
"loss": 0.5227,
"step": 353
},
{
"epoch": 0.99,
"learning_rate": 0.0002986923069099906,
"loss": 0.4218,
"step": 354
},
{
"epoch": 1.0,
"learning_rate": 0.00029867947385680936,
"loss": 0.4866,
"step": 355
},
{
"epoch": 1.0,
"learning_rate": 0.00029866657842048274,
"loss": 0.4593,
"step": 356
},
{
"epoch": 1.0,
"learning_rate": 0.00029865362060642136,
"loss": 0.6367,
"step": 357
},
{
"epoch": 1.0,
"learning_rate": 0.0002986406004200621,
"loss": 0.5064,
"step": 358
},
{
"epoch": 1.01,
"learning_rate": 0.00029862751786686797,
"loss": 0.4206,
"step": 359
},
{
"epoch": 1.01,
"learning_rate": 0.0002986143729523282,
"loss": 0.6043,
"step": 360
},
{
"epoch": 1.01,
"learning_rate": 0.0002986011656819582,
"loss": 0.6889,
"step": 361
},
{
"epoch": 1.02,
"learning_rate": 0.0002985878960612993,
"loss": 0.6266,
"step": 362
},
{
"epoch": 1.02,
"learning_rate": 0.0002985745640959194,
"loss": 0.6016,
"step": 363
},
{
"epoch": 1.02,
"learning_rate": 0.00029856116979141224,
"loss": 0.387,
"step": 364
},
{
"epoch": 1.02,
"learning_rate": 0.00029854771315339785,
"loss": 0.4954,
"step": 365
},
{
"epoch": 1.03,
"learning_rate": 0.0002985341941875224,
"loss": 0.5165,
"step": 366
},
{
"epoch": 1.03,
"learning_rate": 0.0002985206128994581,
"loss": 0.459,
"step": 367
},
{
"epoch": 1.03,
"learning_rate": 0.0002985069692949036,
"loss": 0.5966,
"step": 368
},
{
"epoch": 1.04,
"learning_rate": 0.0002984932633795833,
"loss": 0.4271,
"step": 369
},
{
"epoch": 1.04,
"learning_rate": 0.00029847949515924806,
"loss": 0.4243,
"step": 370
},
{
"epoch": 1.04,
"learning_rate": 0.00029846566463967477,
"loss": 0.4879,
"step": 371
},
{
"epoch": 1.04,
"learning_rate": 0.0002984517718266664,
"loss": 0.4225,
"step": 372
},
{
"epoch": 1.05,
"learning_rate": 0.00029843781672605216,
"loss": 0.385,
"step": 373
},
{
"epoch": 1.05,
"learning_rate": 0.00029842379934368735,
"loss": 0.5161,
"step": 374
},
{
"epoch": 1.05,
"learning_rate": 0.0002984097196854534,
"loss": 0.5034,
"step": 375
},
{
"epoch": 1.05,
"eval_loss": 0.5201339721679688,
"eval_runtime": 26.3832,
"eval_samples_per_second": 7.581,
"eval_steps_per_second": 1.895,
"step": 375
},
{
"dharma_eval_accuracy": 0.40587691825690037,
"dharma_eval_accuracy_ARC-Challenge": 0.24074074074074073,
"dharma_eval_accuracy_ARC-Easy": 0.2962962962962963,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.2037037037037037,
"dharma_eval_accuracy_agieval": 0.23728813559322035,
"dharma_eval_accuracy_bigbench": 0.5081967213114754,
"dharma_eval_accuracy_openbookqa": 0.24074074074074073,
"dharma_eval_accuracy_truthful_qa": 0.7037037037037037,
"dharma_eval_accuracy_winogrande": 0.5185185185185185,
"dharma_loss": 2.7163280713558198,
"epoch": 1.05,
"step": 375
},
{
"epoch": 1.06,
"learning_rate": 0.0002983955777572578,
"loss": 0.6131,
"step": 376
},
{
"epoch": 1.06,
"learning_rate": 0.0002983813735650344,
"loss": 0.546,
"step": 377
},
{
"epoch": 1.06,
"learning_rate": 0.00029836710711474287,
"loss": 0.3439,
"step": 378
},
{
"epoch": 1.06,
"learning_rate": 0.0002983527784123692,
"loss": 0.6843,
"step": 379
},
{
"epoch": 1.07,
"learning_rate": 0.0002983383874639254,
"loss": 0.4989,
"step": 380
},
{
"epoch": 1.07,
"learning_rate": 0.0002983239342754498,
"loss": 0.4311,
"step": 381
},
{
"epoch": 1.07,
"learning_rate": 0.0002983094188530065,
"loss": 0.6035,
"step": 382
},
{
"epoch": 1.08,
"learning_rate": 0.000298294841202686,
"loss": 0.3606,
"step": 383
},
{
"epoch": 1.08,
"learning_rate": 0.0002982802013306048,
"loss": 0.3556,
"step": 384
},
{
"epoch": 1.08,
"learning_rate": 0.00029826549924290557,
"loss": 0.4458,
"step": 385
},
{
"epoch": 1.08,
"learning_rate": 0.0002982507349457569,
"loss": 0.4737,
"step": 386
},
{
"epoch": 1.09,
"learning_rate": 0.00029823590844535366,
"loss": 0.5703,
"step": 387
},
{
"epoch": 1.09,
"learning_rate": 0.0002982210197479169,
"loss": 0.3601,
"step": 388
},
{
"epoch": 1.09,
"learning_rate": 0.00029820606885969347,
"loss": 0.337,
"step": 389
},
{
"epoch": 1.09,
"learning_rate": 0.00029819105578695655,
"loss": 0.4757,
"step": 390
},
{
"epoch": 1.1,
"learning_rate": 0.0002981759805360054,
"loss": 0.5072,
"step": 391
},
{
"epoch": 1.1,
"learning_rate": 0.0002981608431131653,
"loss": 0.4409,
"step": 392
},
{
"epoch": 1.1,
"learning_rate": 0.00029814564352478753,
"loss": 0.5252,
"step": 393
},
{
"epoch": 1.11,
"learning_rate": 0.00029813038177724965,
"loss": 0.5016,
"step": 394
},
{
"epoch": 1.11,
"learning_rate": 0.00029811505787695524,
"loss": 0.3186,
"step": 395
},
{
"epoch": 1.11,
"learning_rate": 0.0002980996718303338,
"loss": 0.5822,
"step": 396
},
{
"epoch": 1.11,
"learning_rate": 0.00029808422364384113,
"loss": 0.54,
"step": 397
},
{
"epoch": 1.12,
"learning_rate": 0.00029806871332395895,
"loss": 0.4582,
"step": 398
},
{
"epoch": 1.12,
"learning_rate": 0.0002980531408771951,
"loss": 0.5073,
"step": 399
},
{
"epoch": 1.12,
"learning_rate": 0.00029803750631008356,
"loss": 0.5227,
"step": 400
},
{
"epoch": 1.12,
"eval_loss": 0.5233827829360962,
"eval_runtime": 26.3434,
"eval_samples_per_second": 7.592,
"eval_steps_per_second": 1.898,
"step": 400
},
{
"dharma_eval_accuracy": 0.3770703338947604,
"dharma_eval_accuracy_ARC-Challenge": 0.24074074074074073,
"dharma_eval_accuracy_ARC-Easy": 0.3148148148148148,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.2037037037037037,
"dharma_eval_accuracy_agieval": 0.23728813559322035,
"dharma_eval_accuracy_bigbench": 0.5081967213114754,
"dharma_eval_accuracy_openbookqa": 0.09259259259259259,
"dharma_eval_accuracy_truthful_qa": 0.5555555555555556,
"dharma_eval_accuracy_winogrande": 0.5370370370370371,
"dharma_loss": 2.5418553066253664,
"epoch": 1.12,
"step": 400
}
],
"max_steps": 5000,
"num_train_epochs": 15,
"total_flos": 1.2906498742913434e+17,
"trial_name": null,
"trial_params": null
}