loki5 / checkpoint-600 /trainer_state.json
Farouk
Training in progress, step 600
1524afd
{
"best_metric": 0.510773241519928,
"best_model_checkpoint": "./output_v2/7b_cluster00_Nous-Hermes-llama-2-7b_codellama_blob_1/checkpoint-600",
"epoch": 1.6842105263157894,
"global_step": 600,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 2e-06,
"loss": 0.7795,
"step": 1
},
{
"epoch": 0.01,
"learning_rate": 4e-06,
"loss": 0.6789,
"step": 2
},
{
"epoch": 0.01,
"learning_rate": 5.999999999999999e-06,
"loss": 0.6847,
"step": 3
},
{
"epoch": 0.01,
"learning_rate": 8e-06,
"loss": 0.6876,
"step": 4
},
{
"epoch": 0.01,
"learning_rate": 9.999999999999999e-06,
"loss": 0.5964,
"step": 5
},
{
"epoch": 0.02,
"learning_rate": 1.1999999999999999e-05,
"loss": 0.6592,
"step": 6
},
{
"epoch": 0.02,
"learning_rate": 1.4e-05,
"loss": 0.6261,
"step": 7
},
{
"epoch": 0.02,
"learning_rate": 1.6e-05,
"loss": 0.6892,
"step": 8
},
{
"epoch": 0.03,
"learning_rate": 1.7999999999999997e-05,
"loss": 0.538,
"step": 9
},
{
"epoch": 0.03,
"learning_rate": 1.9999999999999998e-05,
"loss": 0.6015,
"step": 10
},
{
"epoch": 0.03,
"learning_rate": 2.2e-05,
"loss": 0.7061,
"step": 11
},
{
"epoch": 0.03,
"learning_rate": 2.3999999999999997e-05,
"loss": 0.6329,
"step": 12
},
{
"epoch": 0.04,
"learning_rate": 2.6e-05,
"loss": 0.5633,
"step": 13
},
{
"epoch": 0.04,
"learning_rate": 2.8e-05,
"loss": 0.5499,
"step": 14
},
{
"epoch": 0.04,
"learning_rate": 2.9999999999999997e-05,
"loss": 0.5846,
"step": 15
},
{
"epoch": 0.04,
"learning_rate": 3.2e-05,
"loss": 0.7056,
"step": 16
},
{
"epoch": 0.05,
"learning_rate": 3.399999999999999e-05,
"loss": 0.9478,
"step": 17
},
{
"epoch": 0.05,
"learning_rate": 3.5999999999999994e-05,
"loss": 0.7119,
"step": 18
},
{
"epoch": 0.05,
"learning_rate": 3.8e-05,
"loss": 0.5115,
"step": 19
},
{
"epoch": 0.06,
"learning_rate": 3.9999999999999996e-05,
"loss": 0.6481,
"step": 20
},
{
"epoch": 0.06,
"learning_rate": 4.2e-05,
"loss": 1.0479,
"step": 21
},
{
"epoch": 0.06,
"learning_rate": 4.4e-05,
"loss": 0.9266,
"step": 22
},
{
"epoch": 0.06,
"learning_rate": 4.599999999999999e-05,
"loss": 0.7517,
"step": 23
},
{
"epoch": 0.07,
"learning_rate": 4.7999999999999994e-05,
"loss": 0.7024,
"step": 24
},
{
"epoch": 0.07,
"learning_rate": 4.9999999999999996e-05,
"loss": 0.6996,
"step": 25
},
{
"epoch": 0.07,
"eval_loss": 0.6741535663604736,
"eval_runtime": 26.372,
"eval_samples_per_second": 7.584,
"eval_steps_per_second": 1.896,
"step": 25
},
{
"dharma_eval_accuracy": 0.47090526975371527,
"dharma_eval_accuracy_ARC-Challenge": 0.6296296296296297,
"dharma_eval_accuracy_ARC-Easy": 0.7592592592592593,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.4444444444444444,
"dharma_eval_accuracy_agieval": 0.2542372881355932,
"dharma_eval_accuracy_bigbench": 0.4098360655737705,
"dharma_eval_accuracy_openbookqa": 0.2962962962962963,
"dharma_eval_accuracy_truthful_qa": 0.37037037037037035,
"dharma_eval_accuracy_winogrande": 0.37037037037037035,
"dharma_loss": 3.7834435443878176,
"epoch": 0.07,
"step": 25
},
{
"epoch": 0.07,
"learning_rate": 5.2e-05,
"loss": 0.6123,
"step": 26
},
{
"epoch": 0.08,
"learning_rate": 5.399999999999999e-05,
"loss": 0.608,
"step": 27
},
{
"epoch": 0.08,
"learning_rate": 5.6e-05,
"loss": 0.4845,
"step": 28
},
{
"epoch": 0.08,
"learning_rate": 5.7999999999999994e-05,
"loss": 0.6026,
"step": 29
},
{
"epoch": 0.08,
"learning_rate": 5.9999999999999995e-05,
"loss": 0.87,
"step": 30
},
{
"epoch": 0.09,
"learning_rate": 6.199999999999999e-05,
"loss": 0.9898,
"step": 31
},
{
"epoch": 0.09,
"learning_rate": 6.4e-05,
"loss": 0.7354,
"step": 32
},
{
"epoch": 0.09,
"learning_rate": 6.599999999999999e-05,
"loss": 0.5053,
"step": 33
},
{
"epoch": 0.1,
"learning_rate": 6.799999999999999e-05,
"loss": 0.6644,
"step": 34
},
{
"epoch": 0.1,
"learning_rate": 7e-05,
"loss": 0.6932,
"step": 35
},
{
"epoch": 0.1,
"learning_rate": 7.199999999999999e-05,
"loss": 0.7425,
"step": 36
},
{
"epoch": 0.1,
"learning_rate": 7.4e-05,
"loss": 0.8268,
"step": 37
},
{
"epoch": 0.11,
"learning_rate": 7.6e-05,
"loss": 0.5957,
"step": 38
},
{
"epoch": 0.11,
"learning_rate": 7.8e-05,
"loss": 0.5579,
"step": 39
},
{
"epoch": 0.11,
"learning_rate": 7.999999999999999e-05,
"loss": 0.7978,
"step": 40
},
{
"epoch": 0.12,
"learning_rate": 8.199999999999999e-05,
"loss": 0.8261,
"step": 41
},
{
"epoch": 0.12,
"learning_rate": 8.4e-05,
"loss": 0.7618,
"step": 42
},
{
"epoch": 0.12,
"learning_rate": 8.6e-05,
"loss": 0.6062,
"step": 43
},
{
"epoch": 0.12,
"learning_rate": 8.8e-05,
"loss": 0.5791,
"step": 44
},
{
"epoch": 0.13,
"learning_rate": 8.999999999999999e-05,
"loss": 0.6008,
"step": 45
},
{
"epoch": 0.13,
"learning_rate": 9.199999999999999e-05,
"loss": 0.5143,
"step": 46
},
{
"epoch": 0.13,
"learning_rate": 9.4e-05,
"loss": 0.5329,
"step": 47
},
{
"epoch": 0.13,
"learning_rate": 9.599999999999999e-05,
"loss": 0.6973,
"step": 48
},
{
"epoch": 0.14,
"learning_rate": 9.799999999999998e-05,
"loss": 0.615,
"step": 49
},
{
"epoch": 0.14,
"learning_rate": 9.999999999999999e-05,
"loss": 0.6362,
"step": 50
},
{
"epoch": 0.14,
"eval_loss": 0.6042094826698303,
"eval_runtime": 26.344,
"eval_samples_per_second": 7.592,
"eval_steps_per_second": 1.898,
"step": 50
},
{
"dharma_eval_accuracy": 0.4825425901342051,
"dharma_eval_accuracy_ARC-Challenge": 0.6666666666666666,
"dharma_eval_accuracy_ARC-Easy": 0.6851851851851852,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.42592592592592593,
"dharma_eval_accuracy_agieval": 0.2542372881355932,
"dharma_eval_accuracy_bigbench": 0.45901639344262296,
"dharma_eval_accuracy_openbookqa": 0.3333333333333333,
"dharma_eval_accuracy_truthful_qa": 0.35185185185185186,
"dharma_eval_accuracy_winogrande": 0.46296296296296297,
"dharma_loss": 2.1548793907165527,
"epoch": 0.14,
"step": 50
},
{
"epoch": 0.14,
"learning_rate": 0.000102,
"loss": 0.6567,
"step": 51
},
{
"epoch": 0.15,
"learning_rate": 0.000104,
"loss": 0.9786,
"step": 52
},
{
"epoch": 0.15,
"learning_rate": 0.00010599999999999999,
"loss": 0.6146,
"step": 53
},
{
"epoch": 0.15,
"learning_rate": 0.00010799999999999998,
"loss": 0.6402,
"step": 54
},
{
"epoch": 0.15,
"learning_rate": 0.00010999999999999998,
"loss": 0.5906,
"step": 55
},
{
"epoch": 0.16,
"learning_rate": 0.000112,
"loss": 0.6441,
"step": 56
},
{
"epoch": 0.16,
"learning_rate": 0.00011399999999999999,
"loss": 0.6887,
"step": 57
},
{
"epoch": 0.16,
"learning_rate": 0.00011599999999999999,
"loss": 0.6283,
"step": 58
},
{
"epoch": 0.17,
"learning_rate": 0.00011799999999999998,
"loss": 0.5929,
"step": 59
},
{
"epoch": 0.17,
"learning_rate": 0.00011999999999999999,
"loss": 0.6939,
"step": 60
},
{
"epoch": 0.17,
"learning_rate": 0.000122,
"loss": 0.5535,
"step": 61
},
{
"epoch": 0.17,
"learning_rate": 0.00012399999999999998,
"loss": 0.5457,
"step": 62
},
{
"epoch": 0.18,
"learning_rate": 0.00012599999999999997,
"loss": 0.6425,
"step": 63
},
{
"epoch": 0.18,
"learning_rate": 0.000128,
"loss": 0.6855,
"step": 64
},
{
"epoch": 0.18,
"learning_rate": 0.00013,
"loss": 0.5396,
"step": 65
},
{
"epoch": 0.19,
"learning_rate": 0.00013199999999999998,
"loss": 0.6307,
"step": 66
},
{
"epoch": 0.19,
"learning_rate": 0.00013399999999999998,
"loss": 0.6396,
"step": 67
},
{
"epoch": 0.19,
"learning_rate": 0.00013599999999999997,
"loss": 0.5727,
"step": 68
},
{
"epoch": 0.19,
"learning_rate": 0.000138,
"loss": 0.4812,
"step": 69
},
{
"epoch": 0.2,
"learning_rate": 0.00014,
"loss": 0.5658,
"step": 70
},
{
"epoch": 0.2,
"learning_rate": 0.00014199999999999998,
"loss": 0.6314,
"step": 71
},
{
"epoch": 0.2,
"learning_rate": 0.00014399999999999998,
"loss": 0.6175,
"step": 72
},
{
"epoch": 0.2,
"learning_rate": 0.000146,
"loss": 0.5468,
"step": 73
},
{
"epoch": 0.21,
"learning_rate": 0.000148,
"loss": 0.6233,
"step": 74
},
{
"epoch": 0.21,
"learning_rate": 0.00015,
"loss": 0.5276,
"step": 75
},
{
"epoch": 0.21,
"eval_loss": 0.5642657279968262,
"eval_runtime": 26.2701,
"eval_samples_per_second": 7.613,
"eval_steps_per_second": 1.903,
"step": 75
},
{
"dharma_eval_accuracy": 0.45038002097061713,
"dharma_eval_accuracy_ARC-Challenge": 0.6111111111111112,
"dharma_eval_accuracy_ARC-Easy": 0.6111111111111112,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.42592592592592593,
"dharma_eval_accuracy_agieval": 0.2033898305084746,
"dharma_eval_accuracy_bigbench": 0.4426229508196721,
"dharma_eval_accuracy_openbookqa": 0.14814814814814814,
"dharma_eval_accuracy_truthful_qa": 0.42592592592592593,
"dharma_eval_accuracy_winogrande": 0.48148148148148145,
"dharma_loss": 2.056876979827881,
"epoch": 0.21,
"step": 75
},
{
"epoch": 0.21,
"learning_rate": 0.000152,
"loss": 0.5308,
"step": 76
},
{
"epoch": 0.22,
"learning_rate": 0.00015399999999999998,
"loss": 0.6546,
"step": 77
},
{
"epoch": 0.22,
"learning_rate": 0.000156,
"loss": 0.6451,
"step": 78
},
{
"epoch": 0.22,
"learning_rate": 0.00015799999999999996,
"loss": 0.5255,
"step": 79
},
{
"epoch": 0.22,
"learning_rate": 0.00015999999999999999,
"loss": 0.5629,
"step": 80
},
{
"epoch": 0.23,
"learning_rate": 0.000162,
"loss": 0.6642,
"step": 81
},
{
"epoch": 0.23,
"learning_rate": 0.00016399999999999997,
"loss": 0.6393,
"step": 82
},
{
"epoch": 0.23,
"learning_rate": 0.000166,
"loss": 0.5073,
"step": 83
},
{
"epoch": 0.24,
"learning_rate": 0.000168,
"loss": 0.5628,
"step": 84
},
{
"epoch": 0.24,
"learning_rate": 0.00016999999999999999,
"loss": 0.5566,
"step": 85
},
{
"epoch": 0.24,
"learning_rate": 0.000172,
"loss": 0.4653,
"step": 86
},
{
"epoch": 0.24,
"learning_rate": 0.00017399999999999997,
"loss": 0.5322,
"step": 87
},
{
"epoch": 0.25,
"learning_rate": 0.000176,
"loss": 0.7131,
"step": 88
},
{
"epoch": 0.25,
"learning_rate": 0.000178,
"loss": 0.5586,
"step": 89
},
{
"epoch": 0.25,
"learning_rate": 0.00017999999999999998,
"loss": 0.7246,
"step": 90
},
{
"epoch": 0.26,
"learning_rate": 0.00018199999999999998,
"loss": 0.6527,
"step": 91
},
{
"epoch": 0.26,
"learning_rate": 0.00018399999999999997,
"loss": 0.8067,
"step": 92
},
{
"epoch": 0.26,
"learning_rate": 0.000186,
"loss": 0.6427,
"step": 93
},
{
"epoch": 0.26,
"learning_rate": 0.000188,
"loss": 0.6309,
"step": 94
},
{
"epoch": 0.27,
"learning_rate": 0.00018999999999999998,
"loss": 0.6111,
"step": 95
},
{
"epoch": 0.27,
"learning_rate": 0.00019199999999999998,
"loss": 0.5911,
"step": 96
},
{
"epoch": 0.27,
"learning_rate": 0.00019399999999999997,
"loss": 0.6372,
"step": 97
},
{
"epoch": 0.28,
"learning_rate": 0.00019599999999999997,
"loss": 0.5664,
"step": 98
},
{
"epoch": 0.28,
"learning_rate": 0.000198,
"loss": 0.5618,
"step": 99
},
{
"epoch": 0.28,
"learning_rate": 0.00019999999999999998,
"loss": 0.7154,
"step": 100
},
{
"epoch": 0.28,
"eval_loss": 0.5470997095108032,
"eval_runtime": 26.3144,
"eval_samples_per_second": 7.6,
"eval_steps_per_second": 1.9,
"step": 100
},
{
"dharma_eval_accuracy": 0.398478315307064,
"dharma_eval_accuracy_ARC-Challenge": 0.5740740740740741,
"dharma_eval_accuracy_ARC-Easy": 0.48148148148148145,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.4074074074074074,
"dharma_eval_accuracy_agieval": 0.2711864406779661,
"dharma_eval_accuracy_bigbench": 0.4262295081967213,
"dharma_eval_accuracy_openbookqa": 0.07407407407407407,
"dharma_eval_accuracy_truthful_qa": 0.3888888888888889,
"dharma_eval_accuracy_winogrande": 0.25925925925925924,
"dharma_loss": 2.5759267053604127,
"epoch": 0.28,
"step": 100
},
{
"epoch": 0.28,
"learning_rate": 0.00020199999999999998,
"loss": 0.5837,
"step": 101
},
{
"epoch": 0.29,
"learning_rate": 0.000204,
"loss": 0.5707,
"step": 102
},
{
"epoch": 0.29,
"learning_rate": 0.00020599999999999997,
"loss": 0.7798,
"step": 103
},
{
"epoch": 0.29,
"learning_rate": 0.000208,
"loss": 0.4922,
"step": 104
},
{
"epoch": 0.29,
"learning_rate": 0.00020999999999999998,
"loss": 0.7033,
"step": 105
},
{
"epoch": 0.3,
"learning_rate": 0.00021199999999999998,
"loss": 0.7297,
"step": 106
},
{
"epoch": 0.3,
"learning_rate": 0.000214,
"loss": 0.5376,
"step": 107
},
{
"epoch": 0.3,
"learning_rate": 0.00021599999999999996,
"loss": 0.5372,
"step": 108
},
{
"epoch": 0.31,
"learning_rate": 0.00021799999999999999,
"loss": 0.7453,
"step": 109
},
{
"epoch": 0.31,
"learning_rate": 0.00021999999999999995,
"loss": 0.4582,
"step": 110
},
{
"epoch": 0.31,
"learning_rate": 0.00022199999999999998,
"loss": 0.54,
"step": 111
},
{
"epoch": 0.31,
"learning_rate": 0.000224,
"loss": 0.643,
"step": 112
},
{
"epoch": 0.32,
"learning_rate": 0.00022599999999999996,
"loss": 0.5972,
"step": 113
},
{
"epoch": 0.32,
"learning_rate": 0.00022799999999999999,
"loss": 0.5765,
"step": 114
},
{
"epoch": 0.32,
"learning_rate": 0.00023,
"loss": 0.6007,
"step": 115
},
{
"epoch": 0.33,
"learning_rate": 0.00023199999999999997,
"loss": 0.5343,
"step": 116
},
{
"epoch": 0.33,
"learning_rate": 0.000234,
"loss": 0.8235,
"step": 117
},
{
"epoch": 0.33,
"learning_rate": 0.00023599999999999996,
"loss": 0.6687,
"step": 118
},
{
"epoch": 0.33,
"learning_rate": 0.00023799999999999998,
"loss": 0.6696,
"step": 119
},
{
"epoch": 0.34,
"learning_rate": 0.00023999999999999998,
"loss": 0.77,
"step": 120
},
{
"epoch": 0.34,
"learning_rate": 0.00024199999999999997,
"loss": 0.5792,
"step": 121
},
{
"epoch": 0.34,
"learning_rate": 0.000244,
"loss": 0.3653,
"step": 122
},
{
"epoch": 0.35,
"learning_rate": 0.00024599999999999996,
"loss": 0.5344,
"step": 123
},
{
"epoch": 0.35,
"learning_rate": 0.00024799999999999996,
"loss": 0.5772,
"step": 124
},
{
"epoch": 0.35,
"learning_rate": 0.00025,
"loss": 0.7189,
"step": 125
},
{
"epoch": 0.35,
"eval_loss": 0.5514405369758606,
"eval_runtime": 26.3447,
"eval_samples_per_second": 7.592,
"eval_steps_per_second": 1.898,
"step": 125
},
{
"dharma_eval_accuracy": 0.3371043854202756,
"dharma_eval_accuracy_ARC-Challenge": 0.37037037037037035,
"dharma_eval_accuracy_ARC-Easy": 0.42592592592592593,
"dharma_eval_accuracy_BoolQ": 0.37037037037037035,
"dharma_eval_accuracy_MMLU": 0.35185185185185186,
"dharma_eval_accuracy_agieval": 0.22033898305084745,
"dharma_eval_accuracy_bigbench": 0.29508196721311475,
"dharma_eval_accuracy_openbookqa": 0.18518518518518517,
"dharma_eval_accuracy_truthful_qa": 0.3888888888888889,
"dharma_eval_accuracy_winogrande": 0.42592592592592593,
"dharma_loss": 2.1871530780792234,
"epoch": 0.35,
"step": 125
},
{
"epoch": 0.35,
"learning_rate": 0.00025199999999999995,
"loss": 0.5891,
"step": 126
},
{
"epoch": 0.36,
"learning_rate": 0.000254,
"loss": 0.8225,
"step": 127
},
{
"epoch": 0.36,
"learning_rate": 0.000256,
"loss": 0.6104,
"step": 128
},
{
"epoch": 0.36,
"learning_rate": 0.000258,
"loss": 0.6571,
"step": 129
},
{
"epoch": 0.36,
"learning_rate": 0.00026,
"loss": 0.7562,
"step": 130
},
{
"epoch": 0.37,
"learning_rate": 0.00026199999999999997,
"loss": 0.7172,
"step": 131
},
{
"epoch": 0.37,
"learning_rate": 0.00026399999999999997,
"loss": 0.4353,
"step": 132
},
{
"epoch": 0.37,
"learning_rate": 0.000266,
"loss": 0.5576,
"step": 133
},
{
"epoch": 0.38,
"learning_rate": 0.00026799999999999995,
"loss": 0.553,
"step": 134
},
{
"epoch": 0.38,
"learning_rate": 0.00027,
"loss": 0.5062,
"step": 135
},
{
"epoch": 0.38,
"learning_rate": 0.00027199999999999994,
"loss": 0.4819,
"step": 136
},
{
"epoch": 0.38,
"learning_rate": 0.000274,
"loss": 0.6195,
"step": 137
},
{
"epoch": 0.39,
"learning_rate": 0.000276,
"loss": 0.7592,
"step": 138
},
{
"epoch": 0.39,
"learning_rate": 0.000278,
"loss": 0.6118,
"step": 139
},
{
"epoch": 0.39,
"learning_rate": 0.00028,
"loss": 0.7897,
"step": 140
},
{
"epoch": 0.4,
"learning_rate": 0.00028199999999999997,
"loss": 0.5879,
"step": 141
},
{
"epoch": 0.4,
"learning_rate": 0.00028399999999999996,
"loss": 0.6055,
"step": 142
},
{
"epoch": 0.4,
"learning_rate": 0.00028599999999999996,
"loss": 0.541,
"step": 143
},
{
"epoch": 0.4,
"learning_rate": 0.00028799999999999995,
"loss": 0.7665,
"step": 144
},
{
"epoch": 0.41,
"learning_rate": 0.00029,
"loss": 0.612,
"step": 145
},
{
"epoch": 0.41,
"learning_rate": 0.000292,
"loss": 0.5397,
"step": 146
},
{
"epoch": 0.41,
"learning_rate": 0.000294,
"loss": 0.5727,
"step": 147
},
{
"epoch": 0.42,
"learning_rate": 0.000296,
"loss": 1.1184,
"step": 148
},
{
"epoch": 0.42,
"learning_rate": 0.000298,
"loss": 0.5912,
"step": 149
},
{
"epoch": 0.42,
"learning_rate": 0.0003,
"loss": 0.5581,
"step": 150
},
{
"epoch": 0.42,
"eval_loss": 0.5423073768615723,
"eval_runtime": 26.3108,
"eval_samples_per_second": 7.601,
"eval_steps_per_second": 1.9,
"step": 150
},
{
"dharma_eval_accuracy": 0.3860708907481159,
"dharma_eval_accuracy_ARC-Challenge": 0.42592592592592593,
"dharma_eval_accuracy_ARC-Easy": 0.4444444444444444,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.37037037037037035,
"dharma_eval_accuracy_agieval": 0.2542372881355932,
"dharma_eval_accuracy_bigbench": 0.4426229508196721,
"dharma_eval_accuracy_openbookqa": 0.18518518518518517,
"dharma_eval_accuracy_truthful_qa": 0.2962962962962963,
"dharma_eval_accuracy_winogrande": 0.35185185185185186,
"dharma_loss": 2.471666145801544,
"epoch": 0.42,
"step": 150
},
{
"epoch": 0.42,
"learning_rate": 0.0002999999685313931,
"loss": 0.656,
"step": 151
},
{
"epoch": 0.43,
"learning_rate": 0.00029999987412558584,
"loss": 0.5075,
"step": 152
},
{
"epoch": 0.43,
"learning_rate": 0.0002999997167826177,
"loss": 0.4696,
"step": 153
},
{
"epoch": 0.43,
"learning_rate": 0.00029999949650255474,
"loss": 0.7349,
"step": 154
},
{
"epoch": 0.44,
"learning_rate": 0.0002999992132854894,
"loss": 0.5935,
"step": 155
},
{
"epoch": 0.44,
"learning_rate": 0.0002999988671315404,
"loss": 0.4127,
"step": 156
},
{
"epoch": 0.44,
"learning_rate": 0.0002999984580408531,
"loss": 0.5503,
"step": 157
},
{
"epoch": 0.44,
"learning_rate": 0.00029999798601359915,
"loss": 0.5742,
"step": 158
},
{
"epoch": 0.45,
"learning_rate": 0.00029999745104997654,
"loss": 0.5606,
"step": 159
},
{
"epoch": 0.45,
"learning_rate": 0.0002999968531502098,
"loss": 0.6209,
"step": 160
},
{
"epoch": 0.45,
"learning_rate": 0.0002999961923145497,
"loss": 0.4977,
"step": 161
},
{
"epoch": 0.45,
"learning_rate": 0.0002999954685432736,
"loss": 0.7474,
"step": 162
},
{
"epoch": 0.46,
"learning_rate": 0.0002999946818366852,
"loss": 0.574,
"step": 163
},
{
"epoch": 0.46,
"learning_rate": 0.00029999383219511444,
"loss": 0.3991,
"step": 164
},
{
"epoch": 0.46,
"learning_rate": 0.0002999929196189179,
"loss": 0.6335,
"step": 165
},
{
"epoch": 0.47,
"learning_rate": 0.0002999919441084786,
"loss": 0.6144,
"step": 166
},
{
"epoch": 0.47,
"learning_rate": 0.0002999909056642057,
"loss": 0.4695,
"step": 167
},
{
"epoch": 0.47,
"learning_rate": 0.00029998980428653496,
"loss": 0.5322,
"step": 168
},
{
"epoch": 0.47,
"learning_rate": 0.00029998863997592843,
"loss": 0.6111,
"step": 169
},
{
"epoch": 0.48,
"learning_rate": 0.00029998741273287477,
"loss": 0.5517,
"step": 170
},
{
"epoch": 0.48,
"learning_rate": 0.0002999861225578888,
"loss": 0.5407,
"step": 171
},
{
"epoch": 0.48,
"learning_rate": 0.00029998476945151183,
"loss": 0.4942,
"step": 172
},
{
"epoch": 0.49,
"learning_rate": 0.00029998335341431174,
"loss": 0.4563,
"step": 173
},
{
"epoch": 0.49,
"learning_rate": 0.0002999818744468825,
"loss": 0.4406,
"step": 174
},
{
"epoch": 0.49,
"learning_rate": 0.0002999803325498448,
"loss": 0.5319,
"step": 175
},
{
"epoch": 0.49,
"eval_loss": 0.5312690734863281,
"eval_runtime": 26.3302,
"eval_samples_per_second": 7.596,
"eval_steps_per_second": 1.899,
"step": 175
},
{
"dharma_eval_accuracy": 0.40200352864364475,
"dharma_eval_accuracy_ARC-Challenge": 0.42592592592592593,
"dharma_eval_accuracy_ARC-Easy": 0.5,
"dharma_eval_accuracy_BoolQ": 0.7592592592592593,
"dharma_eval_accuracy_MMLU": 0.4074074074074074,
"dharma_eval_accuracy_agieval": 0.22033898305084745,
"dharma_eval_accuracy_bigbench": 0.36065573770491804,
"dharma_eval_accuracy_openbookqa": 0.2222222222222222,
"dharma_eval_accuracy_truthful_qa": 0.2962962962962963,
"dharma_eval_accuracy_winogrande": 0.42592592592592593,
"dharma_loss": 2.2439456572532652,
"epoch": 0.49,
"step": 175
},
{
"epoch": 0.49,
"learning_rate": 0.0002999787277238455,
"loss": 0.7389,
"step": 176
},
{
"epoch": 0.5,
"learning_rate": 0.000299977059969558,
"loss": 0.6848,
"step": 177
},
{
"epoch": 0.5,
"learning_rate": 0.00029997532928768204,
"loss": 0.5144,
"step": 178
},
{
"epoch": 0.5,
"learning_rate": 0.00029997353567894384,
"loss": 0.5707,
"step": 179
},
{
"epoch": 0.51,
"learning_rate": 0.0002999716791440959,
"loss": 0.668,
"step": 180
},
{
"epoch": 0.51,
"learning_rate": 0.00029996975968391715,
"loss": 0.4851,
"step": 181
},
{
"epoch": 0.51,
"learning_rate": 0.000299967777299213,
"loss": 0.725,
"step": 182
},
{
"epoch": 0.51,
"learning_rate": 0.0002999657319908153,
"loss": 0.6281,
"step": 183
},
{
"epoch": 0.52,
"learning_rate": 0.0002999636237595821,
"loss": 0.6357,
"step": 184
},
{
"epoch": 0.52,
"learning_rate": 0.00029996145260639806,
"loss": 0.5517,
"step": 185
},
{
"epoch": 0.52,
"learning_rate": 0.0002999592185321741,
"loss": 0.6792,
"step": 186
},
{
"epoch": 0.52,
"learning_rate": 0.0002999569215378477,
"loss": 0.619,
"step": 187
},
{
"epoch": 0.53,
"learning_rate": 0.0002999545616243825,
"loss": 0.5693,
"step": 188
},
{
"epoch": 0.53,
"learning_rate": 0.00029995213879276876,
"loss": 0.5759,
"step": 189
},
{
"epoch": 0.53,
"learning_rate": 0.000299949653044023,
"loss": 0.4382,
"step": 190
},
{
"epoch": 0.54,
"learning_rate": 0.00029994710437918824,
"loss": 0.7183,
"step": 191
},
{
"epoch": 0.54,
"learning_rate": 0.0002999444927993338,
"loss": 0.7328,
"step": 192
},
{
"epoch": 0.54,
"learning_rate": 0.00029994181830555555,
"loss": 0.645,
"step": 193
},
{
"epoch": 0.54,
"learning_rate": 0.00029993908089897555,
"loss": 0.5784,
"step": 194
},
{
"epoch": 0.55,
"learning_rate": 0.00029993628058074245,
"loss": 0.4547,
"step": 195
},
{
"epoch": 0.55,
"learning_rate": 0.00029993341735203114,
"loss": 0.5243,
"step": 196
},
{
"epoch": 0.55,
"learning_rate": 0.00029993049121404303,
"loss": 0.5059,
"step": 197
},
{
"epoch": 0.56,
"learning_rate": 0.0002999275021680058,
"loss": 0.6225,
"step": 198
},
{
"epoch": 0.56,
"learning_rate": 0.0002999244502151737,
"loss": 0.5956,
"step": 199
},
{
"epoch": 0.56,
"learning_rate": 0.00029992133535682725,
"loss": 0.553,
"step": 200
},
{
"epoch": 0.56,
"eval_loss": 0.5266188979148865,
"eval_runtime": 26.3,
"eval_samples_per_second": 7.605,
"eval_steps_per_second": 1.901,
"step": 200
},
{
"dharma_eval_accuracy": 0.4282802607491564,
"dharma_eval_accuracy_ARC-Challenge": 0.5185185185185185,
"dharma_eval_accuracy_ARC-Easy": 0.5740740740740741,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.4074074074074074,
"dharma_eval_accuracy_agieval": 0.22033898305084745,
"dharma_eval_accuracy_bigbench": 0.39344262295081966,
"dharma_eval_accuracy_openbookqa": 0.14814814814814814,
"dharma_eval_accuracy_truthful_qa": 0.3148148148148148,
"dharma_eval_accuracy_winogrande": 0.5740740740740741,
"dharma_loss": 2.507861177444458,
"epoch": 0.56,
"step": 200
},
{
"epoch": 0.56,
"learning_rate": 0.0002999181575942733,
"loss": 0.4542,
"step": 201
},
{
"epoch": 0.57,
"learning_rate": 0.0002999149169288452,
"loss": 0.568,
"step": 202
},
{
"epoch": 0.57,
"learning_rate": 0.0002999116133619028,
"loss": 0.462,
"step": 203
},
{
"epoch": 0.57,
"learning_rate": 0.0002999082468948321,
"loss": 0.4854,
"step": 204
},
{
"epoch": 0.58,
"learning_rate": 0.00029990481752904563,
"loss": 0.474,
"step": 205
},
{
"epoch": 0.58,
"learning_rate": 0.0002999013252659823,
"loss": 0.5331,
"step": 206
},
{
"epoch": 0.58,
"learning_rate": 0.0002998977701071074,
"loss": 0.606,
"step": 207
},
{
"epoch": 0.58,
"learning_rate": 0.00029989415205391263,
"loss": 0.5452,
"step": 208
},
{
"epoch": 0.59,
"learning_rate": 0.00029989047110791595,
"loss": 0.6128,
"step": 209
},
{
"epoch": 0.59,
"learning_rate": 0.0002998867272706619,
"loss": 0.7108,
"step": 210
},
{
"epoch": 0.59,
"learning_rate": 0.0002998829205437214,
"loss": 0.5557,
"step": 211
},
{
"epoch": 0.6,
"learning_rate": 0.0002998790509286915,
"loss": 0.556,
"step": 212
},
{
"epoch": 0.6,
"learning_rate": 0.000299875118427196,
"loss": 0.6482,
"step": 213
},
{
"epoch": 0.6,
"learning_rate": 0.00029987112304088483,
"loss": 0.6108,
"step": 214
},
{
"epoch": 0.6,
"learning_rate": 0.0002998670647714343,
"loss": 0.7099,
"step": 215
},
{
"epoch": 0.61,
"learning_rate": 0.0002998629436205473,
"loss": 0.5939,
"step": 216
},
{
"epoch": 0.61,
"learning_rate": 0.00029985875958995296,
"loss": 0.6238,
"step": 217
},
{
"epoch": 0.61,
"learning_rate": 0.00029985451268140683,
"loss": 0.5144,
"step": 218
},
{
"epoch": 0.61,
"learning_rate": 0.00029985020289669077,
"loss": 0.7351,
"step": 219
},
{
"epoch": 0.62,
"learning_rate": 0.00029984583023761317,
"loss": 0.6136,
"step": 220
},
{
"epoch": 0.62,
"learning_rate": 0.0002998413947060086,
"loss": 0.7544,
"step": 221
},
{
"epoch": 0.62,
"learning_rate": 0.00029983689630373825,
"loss": 0.5588,
"step": 222
},
{
"epoch": 0.63,
"learning_rate": 0.0002998323350326895,
"loss": 0.5725,
"step": 223
},
{
"epoch": 0.63,
"learning_rate": 0.0002998277108947762,
"loss": 0.6262,
"step": 224
},
{
"epoch": 0.63,
"learning_rate": 0.00029982302389193856,
"loss": 0.6217,
"step": 225
},
{
"epoch": 0.63,
"eval_loss": 0.527147114276886,
"eval_runtime": 26.3496,
"eval_samples_per_second": 7.59,
"eval_steps_per_second": 1.898,
"step": 225
},
{
"dharma_eval_accuracy": 0.39483532805751936,
"dharma_eval_accuracy_ARC-Challenge": 0.48148148148148145,
"dharma_eval_accuracy_ARC-Easy": 0.6111111111111112,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.5185185185185185,
"dharma_eval_accuracy_agieval": 0.2711864406779661,
"dharma_eval_accuracy_bigbench": 0.39344262295081966,
"dharma_eval_accuracy_openbookqa": 0.037037037037037035,
"dharma_eval_accuracy_truthful_qa": 0.18518518518518517,
"dharma_eval_accuracy_winogrande": 0.35185185185185186,
"dharma_loss": 2.334738277435303,
"epoch": 0.63,
"step": 225
},
{
"epoch": 0.63,
"learning_rate": 0.0002998182740261432,
"loss": 0.6376,
"step": 226
},
{
"epoch": 0.64,
"learning_rate": 0.0002998134612993829,
"loss": 0.5943,
"step": 227
},
{
"epoch": 0.64,
"learning_rate": 0.0002998085857136772,
"loss": 0.4096,
"step": 228
},
{
"epoch": 0.64,
"learning_rate": 0.00029980364727107166,
"loss": 0.5825,
"step": 229
},
{
"epoch": 0.65,
"learning_rate": 0.00029979864597363845,
"loss": 0.5372,
"step": 230
},
{
"epoch": 0.65,
"learning_rate": 0.0002997935818234759,
"loss": 0.5376,
"step": 231
},
{
"epoch": 0.65,
"learning_rate": 0.00029978845482270906,
"loss": 0.4369,
"step": 232
},
{
"epoch": 0.65,
"learning_rate": 0.0002997832649734889,
"loss": 0.5717,
"step": 233
},
{
"epoch": 0.66,
"learning_rate": 0.0002997780122779931,
"loss": 0.6981,
"step": 234
},
{
"epoch": 0.66,
"learning_rate": 0.0002997726967384255,
"loss": 0.5764,
"step": 235
},
{
"epoch": 0.66,
"learning_rate": 0.0002997673183570165,
"loss": 0.5937,
"step": 236
},
{
"epoch": 0.67,
"learning_rate": 0.00029976187713602273,
"loss": 0.2708,
"step": 237
},
{
"epoch": 0.67,
"learning_rate": 0.0002997563730777273,
"loss": 0.6383,
"step": 238
},
{
"epoch": 0.67,
"learning_rate": 0.00029975080618443946,
"loss": 0.491,
"step": 239
},
{
"epoch": 0.67,
"learning_rate": 0.00029974517645849503,
"loss": 0.5049,
"step": 240
},
{
"epoch": 0.68,
"learning_rate": 0.0002997394839022562,
"loss": 0.5768,
"step": 241
},
{
"epoch": 0.68,
"learning_rate": 0.00029973372851811145,
"loss": 0.566,
"step": 242
},
{
"epoch": 0.68,
"learning_rate": 0.00029972791030847553,
"loss": 0.7687,
"step": 243
},
{
"epoch": 0.68,
"learning_rate": 0.0002997220292757898,
"loss": 0.6399,
"step": 244
},
{
"epoch": 0.69,
"learning_rate": 0.0002997160854225217,
"loss": 0.545,
"step": 245
},
{
"epoch": 0.69,
"learning_rate": 0.00029971007875116527,
"loss": 0.6108,
"step": 246
},
{
"epoch": 0.69,
"learning_rate": 0.0002997040092642407,
"loss": 0.6478,
"step": 247
},
{
"epoch": 0.7,
"learning_rate": 0.0002996978769642947,
"loss": 0.5959,
"step": 248
},
{
"epoch": 0.7,
"learning_rate": 0.0002996916818539003,
"loss": 0.5995,
"step": 249
},
{
"epoch": 0.7,
"learning_rate": 0.0002996854239356567,
"loss": 0.5721,
"step": 250
},
{
"epoch": 0.7,
"eval_loss": 0.5253523588180542,
"eval_runtime": 26.3162,
"eval_samples_per_second": 7.6,
"eval_steps_per_second": 1.9,
"step": 250
},
{
"dharma_eval_accuracy": 0.35750614311016893,
"dharma_eval_accuracy_ARC-Challenge": 0.48148148148148145,
"dharma_eval_accuracy_ARC-Easy": 0.5370370370370371,
"dharma_eval_accuracy_BoolQ": 0.6851851851851852,
"dharma_eval_accuracy_MMLU": 0.3888888888888889,
"dharma_eval_accuracy_agieval": 0.23728813559322035,
"dharma_eval_accuracy_bigbench": 0.29508196721311475,
"dharma_eval_accuracy_openbookqa": 0.018518518518518517,
"dharma_eval_accuracy_truthful_qa": 0.2222222222222222,
"dharma_eval_accuracy_winogrande": 0.35185185185185186,
"dharma_loss": 3.018469988822937,
"epoch": 0.7,
"step": 250
},
{
"epoch": 0.7,
"learning_rate": 0.0002996791032121898,
"loss": 0.3632,
"step": 251
},
{
"epoch": 0.71,
"learning_rate": 0.0002996727196861515,
"loss": 0.5306,
"step": 252
},
{
"epoch": 0.71,
"learning_rate": 0.00029966627336022034,
"loss": 0.4332,
"step": 253
},
{
"epoch": 0.71,
"learning_rate": 0.000299659764237101,
"loss": 0.4484,
"step": 254
},
{
"epoch": 0.72,
"learning_rate": 0.00029965319231952455,
"loss": 0.5353,
"step": 255
},
{
"epoch": 0.72,
"learning_rate": 0.00029964655761024854,
"loss": 0.6232,
"step": 256
},
{
"epoch": 0.72,
"learning_rate": 0.00029963986011205675,
"loss": 0.672,
"step": 257
},
{
"epoch": 0.72,
"learning_rate": 0.0002996330998277593,
"loss": 0.5865,
"step": 258
},
{
"epoch": 0.73,
"learning_rate": 0.0002996262767601926,
"loss": 0.8846,
"step": 259
},
{
"epoch": 0.73,
"learning_rate": 0.0002996193909122197,
"loss": 0.4847,
"step": 260
},
{
"epoch": 0.73,
"learning_rate": 0.00029961244228672953,
"loss": 0.5886,
"step": 261
},
{
"epoch": 0.74,
"learning_rate": 0.0002996054308866378,
"loss": 0.6324,
"step": 262
},
{
"epoch": 0.74,
"learning_rate": 0.0002995983567148862,
"loss": 0.4812,
"step": 263
},
{
"epoch": 0.74,
"learning_rate": 0.000299591219774443,
"loss": 0.6549,
"step": 264
},
{
"epoch": 0.74,
"learning_rate": 0.00029958402006830274,
"loss": 0.4712,
"step": 265
},
{
"epoch": 0.75,
"learning_rate": 0.0002995767575994863,
"loss": 0.452,
"step": 266
},
{
"epoch": 0.75,
"learning_rate": 0.00029956943237104084,
"loss": 0.5716,
"step": 267
},
{
"epoch": 0.75,
"learning_rate": 0.0002995620443860399,
"loss": 0.6757,
"step": 268
},
{
"epoch": 0.76,
"learning_rate": 0.0002995545936475833,
"loss": 0.5403,
"step": 269
},
{
"epoch": 0.76,
"learning_rate": 0.0002995470801587973,
"loss": 0.6785,
"step": 270
},
{
"epoch": 0.76,
"learning_rate": 0.0002995395039228343,
"loss": 0.6292,
"step": 271
},
{
"epoch": 0.76,
"learning_rate": 0.00029953186494287336,
"loss": 0.5181,
"step": 272
},
{
"epoch": 0.77,
"learning_rate": 0.0002995241632221195,
"loss": 0.5219,
"step": 273
},
{
"epoch": 0.77,
"learning_rate": 0.00029951639876380425,
"loss": 0.5475,
"step": 274
},
{
"epoch": 0.77,
"learning_rate": 0.0002995085715711854,
"loss": 0.5605,
"step": 275
},
{
"epoch": 0.77,
"eval_loss": 0.5327661633491516,
"eval_runtime": 26.3174,
"eval_samples_per_second": 7.6,
"eval_steps_per_second": 1.9,
"step": 275
},
{
"dharma_eval_accuracy": 0.4329237545408704,
"dharma_eval_accuracy_ARC-Challenge": 0.5555555555555556,
"dharma_eval_accuracy_ARC-Easy": 0.6111111111111112,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.37037037037037035,
"dharma_eval_accuracy_agieval": 0.2542372881355932,
"dharma_eval_accuracy_bigbench": 0.47540983606557374,
"dharma_eval_accuracy_openbookqa": 0.09259259259259259,
"dharma_eval_accuracy_truthful_qa": 0.3148148148148148,
"dharma_eval_accuracy_winogrande": 0.5185185185185185,
"dharma_loss": 2.377507550239563,
"epoch": 0.77,
"step": 275
},
{
"epoch": 0.77,
"learning_rate": 0.0002995006816475471,
"loss": 0.5067,
"step": 276
},
{
"epoch": 0.78,
"learning_rate": 0.00029949272899619994,
"loss": 0.6751,
"step": 277
},
{
"epoch": 0.78,
"learning_rate": 0.0002994847136204805,
"loss": 0.5557,
"step": 278
},
{
"epoch": 0.78,
"learning_rate": 0.0002994766355237521,
"loss": 0.6013,
"step": 279
},
{
"epoch": 0.79,
"learning_rate": 0.0002994684947094039,
"loss": 0.4763,
"step": 280
},
{
"epoch": 0.79,
"learning_rate": 0.00029946029118085193,
"loss": 0.5,
"step": 281
},
{
"epoch": 0.79,
"learning_rate": 0.000299452024941538,
"loss": 0.4948,
"step": 282
},
{
"epoch": 0.79,
"learning_rate": 0.0002994436959949306,
"loss": 0.4705,
"step": 283
},
{
"epoch": 0.8,
"learning_rate": 0.0002994353043445244,
"loss": 0.8139,
"step": 284
},
{
"epoch": 0.8,
"learning_rate": 0.0002994268499938403,
"loss": 0.4713,
"step": 285
},
{
"epoch": 0.8,
"learning_rate": 0.0002994183329464256,
"loss": 0.5527,
"step": 286
},
{
"epoch": 0.81,
"learning_rate": 0.00029940975320585396,
"loss": 0.5425,
"step": 287
},
{
"epoch": 0.81,
"learning_rate": 0.00029940111077572526,
"loss": 0.5926,
"step": 288
},
{
"epoch": 0.81,
"learning_rate": 0.00029939240565966574,
"loss": 0.5987,
"step": 289
},
{
"epoch": 0.81,
"learning_rate": 0.00029938363786132774,
"loss": 0.6077,
"step": 290
},
{
"epoch": 0.82,
"learning_rate": 0.00029937480738439023,
"loss": 0.4822,
"step": 291
},
{
"epoch": 0.82,
"learning_rate": 0.00029936591423255826,
"loss": 0.6527,
"step": 292
},
{
"epoch": 0.82,
"learning_rate": 0.00029935695840956327,
"loss": 0.5724,
"step": 293
},
{
"epoch": 0.83,
"learning_rate": 0.00029934793991916295,
"loss": 0.6617,
"step": 294
},
{
"epoch": 0.83,
"learning_rate": 0.00029933885876514115,
"loss": 0.5795,
"step": 295
},
{
"epoch": 0.83,
"learning_rate": 0.0002993297149513083,
"loss": 0.5049,
"step": 296
},
{
"epoch": 0.83,
"learning_rate": 0.00029932050848150105,
"loss": 0.5826,
"step": 297
},
{
"epoch": 0.84,
"learning_rate": 0.0002993112393595821,
"loss": 0.4766,
"step": 298
},
{
"epoch": 0.84,
"learning_rate": 0.0002993019075894406,
"loss": 0.4514,
"step": 299
},
{
"epoch": 0.84,
"learning_rate": 0.0002992925131749921,
"loss": 0.5825,
"step": 300
},
{
"epoch": 0.84,
"eval_loss": 0.5207599401473999,
"eval_runtime": 26.3377,
"eval_samples_per_second": 7.594,
"eval_steps_per_second": 1.898,
"step": 300
},
{
"dharma_eval_accuracy": 0.42874678265681937,
"dharma_eval_accuracy_ARC-Challenge": 0.5,
"dharma_eval_accuracy_ARC-Easy": 0.6111111111111112,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.3333333333333333,
"dharma_eval_accuracy_agieval": 0.23728813559322035,
"dharma_eval_accuracy_bigbench": 0.4918032786885246,
"dharma_eval_accuracy_openbookqa": 0.09259259259259259,
"dharma_eval_accuracy_truthful_qa": 0.2962962962962963,
"dharma_eval_accuracy_winogrande": 0.5925925925925926,
"dharma_loss": 2.452057005882263,
"epoch": 0.84,
"step": 300
},
{
"epoch": 0.84,
"learning_rate": 0.00029928305612017823,
"loss": 0.4251,
"step": 301
},
{
"epoch": 0.85,
"learning_rate": 0.0002992735364289671,
"loss": 0.4762,
"step": 302
},
{
"epoch": 0.85,
"learning_rate": 0.0002992639541053528,
"loss": 0.4531,
"step": 303
},
{
"epoch": 0.85,
"learning_rate": 0.0002992543091533561,
"loss": 0.64,
"step": 304
},
{
"epoch": 0.86,
"learning_rate": 0.00029924460157702376,
"loss": 0.6483,
"step": 305
},
{
"epoch": 0.86,
"learning_rate": 0.0002992348313804289,
"loss": 0.5646,
"step": 306
},
{
"epoch": 0.86,
"learning_rate": 0.00029922499856767094,
"loss": 0.4947,
"step": 307
},
{
"epoch": 0.86,
"learning_rate": 0.00029921510314287545,
"loss": 0.4793,
"step": 308
},
{
"epoch": 0.87,
"learning_rate": 0.00029920514511019456,
"loss": 0.3957,
"step": 309
},
{
"epoch": 0.87,
"learning_rate": 0.00029919512447380625,
"loss": 0.6941,
"step": 310
},
{
"epoch": 0.87,
"learning_rate": 0.0002991850412379151,
"loss": 0.6013,
"step": 311
},
{
"epoch": 0.88,
"learning_rate": 0.0002991748954067519,
"loss": 0.6011,
"step": 312
},
{
"epoch": 0.88,
"learning_rate": 0.0002991646869845736,
"loss": 0.7579,
"step": 313
},
{
"epoch": 0.88,
"learning_rate": 0.0002991544159756634,
"loss": 0.4339,
"step": 314
},
{
"epoch": 0.88,
"learning_rate": 0.0002991440823843309,
"loss": 0.4666,
"step": 315
},
{
"epoch": 0.89,
"learning_rate": 0.0002991336862149119,
"loss": 0.5458,
"step": 316
},
{
"epoch": 0.89,
"learning_rate": 0.00029912322747176835,
"loss": 0.6499,
"step": 317
},
{
"epoch": 0.89,
"learning_rate": 0.0002991127061592887,
"loss": 0.7208,
"step": 318
},
{
"epoch": 0.9,
"learning_rate": 0.00029910212228188734,
"loss": 0.7648,
"step": 319
},
{
"epoch": 0.9,
"learning_rate": 0.0002990914758440051,
"loss": 0.9322,
"step": 320
},
{
"epoch": 0.9,
"learning_rate": 0.00029908076685010915,
"loss": 0.6769,
"step": 321
},
{
"epoch": 0.9,
"learning_rate": 0.0002990699953046926,
"loss": 0.6241,
"step": 322
},
{
"epoch": 0.91,
"learning_rate": 0.00029905916121227515,
"loss": 0.409,
"step": 323
},
{
"epoch": 0.91,
"learning_rate": 0.00029904826457740247,
"loss": 0.3138,
"step": 324
},
{
"epoch": 0.91,
"learning_rate": 0.00029903730540464666,
"loss": 0.5241,
"step": 325
},
{
"epoch": 0.91,
"eval_loss": 0.5201582908630371,
"eval_runtime": 26.3949,
"eval_samples_per_second": 7.577,
"eval_steps_per_second": 1.894,
"step": 325
},
{
"dharma_eval_accuracy": 0.3616831149942199,
"dharma_eval_accuracy_ARC-Challenge": 0.4444444444444444,
"dharma_eval_accuracy_ARC-Easy": 0.5,
"dharma_eval_accuracy_BoolQ": 0.7222222222222222,
"dharma_eval_accuracy_MMLU": 0.4444444444444444,
"dharma_eval_accuracy_agieval": 0.2542372881355932,
"dharma_eval_accuracy_bigbench": 0.2786885245901639,
"dharma_eval_accuracy_openbookqa": 0.05555555555555555,
"dharma_eval_accuracy_truthful_qa": 0.3888888888888889,
"dharma_eval_accuracy_winogrande": 0.16666666666666666,
"dharma_loss": 2.3925395412445067,
"epoch": 0.91,
"step": 325
},
{
"epoch": 0.92,
"learning_rate": 0.0002990262836986059,
"loss": 0.358,
"step": 326
},
{
"epoch": 0.92,
"learning_rate": 0.0002990151994639048,
"loss": 0.5683,
"step": 327
},
{
"epoch": 0.92,
"learning_rate": 0.000299004052705194,
"loss": 0.4744,
"step": 328
},
{
"epoch": 0.92,
"learning_rate": 0.00029899284342715054,
"loss": 0.6101,
"step": 329
},
{
"epoch": 0.93,
"learning_rate": 0.00029898157163447763,
"loss": 0.5381,
"step": 330
},
{
"epoch": 0.93,
"learning_rate": 0.0002989702373319047,
"loss": 0.5269,
"step": 331
},
{
"epoch": 0.93,
"learning_rate": 0.00029895884052418735,
"loss": 0.4993,
"step": 332
},
{
"epoch": 0.93,
"learning_rate": 0.00029894738121610755,
"loss": 0.5623,
"step": 333
},
{
"epoch": 0.94,
"learning_rate": 0.0002989358594124733,
"loss": 0.5988,
"step": 334
},
{
"epoch": 0.94,
"learning_rate": 0.0002989242751181191,
"loss": 0.5623,
"step": 335
},
{
"epoch": 0.94,
"learning_rate": 0.0002989126283379054,
"loss": 0.4471,
"step": 336
},
{
"epoch": 0.95,
"learning_rate": 0.000298900919076719,
"loss": 0.489,
"step": 337
},
{
"epoch": 0.95,
"learning_rate": 0.00029888914733947275,
"loss": 0.5404,
"step": 338
},
{
"epoch": 0.95,
"learning_rate": 0.00029887731313110613,
"loss": 0.5639,
"step": 339
},
{
"epoch": 0.95,
"learning_rate": 0.0002988654164565843,
"loss": 0.6034,
"step": 340
},
{
"epoch": 0.96,
"learning_rate": 0.00029885345732089905,
"loss": 0.5529,
"step": 341
},
{
"epoch": 0.96,
"learning_rate": 0.0002988414357290681,
"loss": 0.6584,
"step": 342
},
{
"epoch": 0.96,
"learning_rate": 0.0002988293516861356,
"loss": 0.5119,
"step": 343
},
{
"epoch": 0.97,
"learning_rate": 0.0002988172051971717,
"loss": 0.4797,
"step": 344
},
{
"epoch": 0.97,
"learning_rate": 0.0002988049962672728,
"loss": 0.619,
"step": 345
},
{
"epoch": 0.97,
"learning_rate": 0.0002987927249015616,
"loss": 0.621,
"step": 346
},
{
"epoch": 0.97,
"learning_rate": 0.00029878039110518704,
"loss": 0.6019,
"step": 347
},
{
"epoch": 0.98,
"learning_rate": 0.000298767994883324,
"loss": 0.5099,
"step": 348
},
{
"epoch": 0.98,
"learning_rate": 0.00029875553624117375,
"loss": 0.3973,
"step": 349
},
{
"epoch": 0.98,
"learning_rate": 0.00029874301518396376,
"loss": 0.6068,
"step": 350
},
{
"epoch": 0.98,
"eval_loss": 0.517635703086853,
"eval_runtime": 26.3545,
"eval_samples_per_second": 7.589,
"eval_steps_per_second": 1.897,
"step": 350
},
{
"dharma_eval_accuracy": 0.36985182212251455,
"dharma_eval_accuracy_ARC-Challenge": 0.37037037037037035,
"dharma_eval_accuracy_ARC-Easy": 0.4444444444444444,
"dharma_eval_accuracy_BoolQ": 0.6851851851851852,
"dharma_eval_accuracy_MMLU": 0.2962962962962963,
"dharma_eval_accuracy_agieval": 0.23728813559322035,
"dharma_eval_accuracy_bigbench": 0.29508196721311475,
"dharma_eval_accuracy_openbookqa": 0.09259259259259259,
"dharma_eval_accuracy_truthful_qa": 0.35185185185185186,
"dharma_eval_accuracy_winogrande": 0.5555555555555556,
"dharma_loss": 2.8203083801269533,
"epoch": 0.98,
"step": 350
},
{
"epoch": 0.99,
"learning_rate": 0.00029873043171694755,
"loss": 0.5346,
"step": 351
},
{
"epoch": 0.99,
"learning_rate": 0.0002987177858454049,
"loss": 0.3792,
"step": 352
},
{
"epoch": 0.99,
"learning_rate": 0.00029870507757464193,
"loss": 0.5227,
"step": 353
},
{
"epoch": 0.99,
"learning_rate": 0.0002986923069099906,
"loss": 0.4218,
"step": 354
},
{
"epoch": 1.0,
"learning_rate": 0.00029867947385680936,
"loss": 0.4866,
"step": 355
},
{
"epoch": 1.0,
"learning_rate": 0.00029866657842048274,
"loss": 0.4593,
"step": 356
},
{
"epoch": 1.0,
"learning_rate": 0.00029865362060642136,
"loss": 0.6367,
"step": 357
},
{
"epoch": 1.0,
"learning_rate": 0.0002986406004200621,
"loss": 0.5064,
"step": 358
},
{
"epoch": 1.01,
"learning_rate": 0.00029862751786686797,
"loss": 0.4206,
"step": 359
},
{
"epoch": 1.01,
"learning_rate": 0.0002986143729523282,
"loss": 0.6043,
"step": 360
},
{
"epoch": 1.01,
"learning_rate": 0.0002986011656819582,
"loss": 0.6889,
"step": 361
},
{
"epoch": 1.02,
"learning_rate": 0.0002985878960612993,
"loss": 0.6266,
"step": 362
},
{
"epoch": 1.02,
"learning_rate": 0.0002985745640959194,
"loss": 0.6016,
"step": 363
},
{
"epoch": 1.02,
"learning_rate": 0.00029856116979141224,
"loss": 0.387,
"step": 364
},
{
"epoch": 1.02,
"learning_rate": 0.00029854771315339785,
"loss": 0.4954,
"step": 365
},
{
"epoch": 1.03,
"learning_rate": 0.0002985341941875224,
"loss": 0.5165,
"step": 366
},
{
"epoch": 1.03,
"learning_rate": 0.0002985206128994581,
"loss": 0.459,
"step": 367
},
{
"epoch": 1.03,
"learning_rate": 0.0002985069692949036,
"loss": 0.5966,
"step": 368
},
{
"epoch": 1.04,
"learning_rate": 0.0002984932633795833,
"loss": 0.4271,
"step": 369
},
{
"epoch": 1.04,
"learning_rate": 0.00029847949515924806,
"loss": 0.4243,
"step": 370
},
{
"epoch": 1.04,
"learning_rate": 0.00029846566463967477,
"loss": 0.4879,
"step": 371
},
{
"epoch": 1.04,
"learning_rate": 0.0002984517718266664,
"loss": 0.4225,
"step": 372
},
{
"epoch": 1.05,
"learning_rate": 0.00029843781672605216,
"loss": 0.385,
"step": 373
},
{
"epoch": 1.05,
"learning_rate": 0.00029842379934368735,
"loss": 0.5161,
"step": 374
},
{
"epoch": 1.05,
"learning_rate": 0.0002984097196854534,
"loss": 0.5034,
"step": 375
},
{
"epoch": 1.05,
"eval_loss": 0.5201339721679688,
"eval_runtime": 26.3832,
"eval_samples_per_second": 7.581,
"eval_steps_per_second": 1.895,
"step": 375
},
{
"dharma_eval_accuracy": 0.40587691825690037,
"dharma_eval_accuracy_ARC-Challenge": 0.24074074074074073,
"dharma_eval_accuracy_ARC-Easy": 0.2962962962962963,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.2037037037037037,
"dharma_eval_accuracy_agieval": 0.23728813559322035,
"dharma_eval_accuracy_bigbench": 0.5081967213114754,
"dharma_eval_accuracy_openbookqa": 0.24074074074074073,
"dharma_eval_accuracy_truthful_qa": 0.7037037037037037,
"dharma_eval_accuracy_winogrande": 0.5185185185185185,
"dharma_loss": 2.7163280713558198,
"epoch": 1.05,
"step": 375
},
{
"epoch": 1.06,
"learning_rate": 0.0002983955777572578,
"loss": 0.6131,
"step": 376
},
{
"epoch": 1.06,
"learning_rate": 0.0002983813735650344,
"loss": 0.546,
"step": 377
},
{
"epoch": 1.06,
"learning_rate": 0.00029836710711474287,
"loss": 0.3439,
"step": 378
},
{
"epoch": 1.06,
"learning_rate": 0.0002983527784123692,
"loss": 0.6843,
"step": 379
},
{
"epoch": 1.07,
"learning_rate": 0.0002983383874639254,
"loss": 0.4989,
"step": 380
},
{
"epoch": 1.07,
"learning_rate": 0.0002983239342754498,
"loss": 0.4311,
"step": 381
},
{
"epoch": 1.07,
"learning_rate": 0.0002983094188530065,
"loss": 0.6035,
"step": 382
},
{
"epoch": 1.08,
"learning_rate": 0.000298294841202686,
"loss": 0.3606,
"step": 383
},
{
"epoch": 1.08,
"learning_rate": 0.0002982802013306048,
"loss": 0.3556,
"step": 384
},
{
"epoch": 1.08,
"learning_rate": 0.00029826549924290557,
"loss": 0.4458,
"step": 385
},
{
"epoch": 1.08,
"learning_rate": 0.0002982507349457569,
"loss": 0.4737,
"step": 386
},
{
"epoch": 1.09,
"learning_rate": 0.00029823590844535366,
"loss": 0.5703,
"step": 387
},
{
"epoch": 1.09,
"learning_rate": 0.0002982210197479169,
"loss": 0.3601,
"step": 388
},
{
"epoch": 1.09,
"learning_rate": 0.00029820606885969347,
"loss": 0.337,
"step": 389
},
{
"epoch": 1.09,
"learning_rate": 0.00029819105578695655,
"loss": 0.4757,
"step": 390
},
{
"epoch": 1.1,
"learning_rate": 0.0002981759805360054,
"loss": 0.5072,
"step": 391
},
{
"epoch": 1.1,
"learning_rate": 0.0002981608431131653,
"loss": 0.4409,
"step": 392
},
{
"epoch": 1.1,
"learning_rate": 0.00029814564352478753,
"loss": 0.5252,
"step": 393
},
{
"epoch": 1.11,
"learning_rate": 0.00029813038177724965,
"loss": 0.5016,
"step": 394
},
{
"epoch": 1.11,
"learning_rate": 0.00029811505787695524,
"loss": 0.3186,
"step": 395
},
{
"epoch": 1.11,
"learning_rate": 0.0002980996718303338,
"loss": 0.5822,
"step": 396
},
{
"epoch": 1.11,
"learning_rate": 0.00029808422364384113,
"loss": 0.54,
"step": 397
},
{
"epoch": 1.12,
"learning_rate": 0.00029806871332395895,
"loss": 0.4582,
"step": 398
},
{
"epoch": 1.12,
"learning_rate": 0.0002980531408771951,
"loss": 0.5073,
"step": 399
},
{
"epoch": 1.12,
"learning_rate": 0.00029803750631008356,
"loss": 0.5227,
"step": 400
},
{
"epoch": 1.12,
"eval_loss": 0.5233827829360962,
"eval_runtime": 26.3434,
"eval_samples_per_second": 7.592,
"eval_steps_per_second": 1.898,
"step": 400
},
{
"dharma_eval_accuracy": 0.3770703338947604,
"dharma_eval_accuracy_ARC-Challenge": 0.24074074074074073,
"dharma_eval_accuracy_ARC-Easy": 0.3148148148148148,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.2037037037037037,
"dharma_eval_accuracy_agieval": 0.23728813559322035,
"dharma_eval_accuracy_bigbench": 0.5081967213114754,
"dharma_eval_accuracy_openbookqa": 0.09259259259259259,
"dharma_eval_accuracy_truthful_qa": 0.5555555555555556,
"dharma_eval_accuracy_winogrande": 0.5370370370370371,
"dharma_loss": 2.5418553066253664,
"epoch": 1.12,
"step": 400
},
{
"epoch": 1.13,
"learning_rate": 0.00029802180962918426,
"loss": 0.3211,
"step": 401
},
{
"epoch": 1.13,
"learning_rate": 0.00029800605084108315,
"loss": 0.5834,
"step": 402
},
{
"epoch": 1.13,
"learning_rate": 0.0002979902299523925,
"loss": 0.6354,
"step": 403
},
{
"epoch": 1.13,
"learning_rate": 0.00029797434696975035,
"loss": 0.4923,
"step": 404
},
{
"epoch": 1.14,
"learning_rate": 0.0002979584018998209,
"loss": 0.4609,
"step": 405
},
{
"epoch": 1.14,
"learning_rate": 0.0002979423947492944,
"loss": 0.4805,
"step": 406
},
{
"epoch": 1.14,
"learning_rate": 0.0002979263255248872,
"loss": 0.4223,
"step": 407
},
{
"epoch": 1.15,
"learning_rate": 0.0002979101942333416,
"loss": 0.4872,
"step": 408
},
{
"epoch": 1.15,
"learning_rate": 0.00029789400088142605,
"loss": 0.4437,
"step": 409
},
{
"epoch": 1.15,
"learning_rate": 0.000297877745475935,
"loss": 0.7126,
"step": 410
},
{
"epoch": 1.15,
"learning_rate": 0.00029786142802368877,
"loss": 0.3855,
"step": 411
},
{
"epoch": 1.16,
"learning_rate": 0.00029784504853153397,
"loss": 0.4374,
"step": 412
},
{
"epoch": 1.16,
"learning_rate": 0.0002978286070063431,
"loss": 0.4274,
"step": 413
},
{
"epoch": 1.16,
"learning_rate": 0.0002978121034550148,
"loss": 0.406,
"step": 414
},
{
"epoch": 1.16,
"learning_rate": 0.00029779553788447357,
"loss": 0.4755,
"step": 415
},
{
"epoch": 1.17,
"learning_rate": 0.00029777891030167,
"loss": 0.5286,
"step": 416
},
{
"epoch": 1.17,
"learning_rate": 0.00029776222071358074,
"loss": 0.6529,
"step": 417
},
{
"epoch": 1.17,
"learning_rate": 0.0002977454691272084,
"loss": 0.4096,
"step": 418
},
{
"epoch": 1.18,
"learning_rate": 0.0002977286555495818,
"loss": 0.4292,
"step": 419
},
{
"epoch": 1.18,
"learning_rate": 0.00029771177998775536,
"loss": 0.4458,
"step": 420
},
{
"epoch": 1.18,
"learning_rate": 0.0002976948424488099,
"loss": 0.5428,
"step": 421
},
{
"epoch": 1.18,
"learning_rate": 0.0002976778429398521,
"loss": 0.5953,
"step": 422
},
{
"epoch": 1.19,
"learning_rate": 0.00029766078146801453,
"loss": 0.3686,
"step": 423
},
{
"epoch": 1.19,
"learning_rate": 0.000297643658040456,
"loss": 0.6322,
"step": 424
},
{
"epoch": 1.19,
"learning_rate": 0.0002976264726643611,
"loss": 0.35,
"step": 425
},
{
"epoch": 1.19,
"eval_loss": 0.5209073424339294,
"eval_runtime": 26.4295,
"eval_samples_per_second": 7.567,
"eval_steps_per_second": 1.892,
"step": 425
},
{
"dharma_eval_accuracy": 0.3832431734009332,
"dharma_eval_accuracy_ARC-Challenge": 0.25925925925925924,
"dharma_eval_accuracy_ARC-Easy": 0.35185185185185186,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.2222222222222222,
"dharma_eval_accuracy_agieval": 0.23728813559322035,
"dharma_eval_accuracy_bigbench": 0.5081967213114754,
"dharma_eval_accuracy_openbookqa": 0.1111111111111111,
"dharma_eval_accuracy_truthful_qa": 0.5370370370370371,
"dharma_eval_accuracy_winogrande": 0.5185185185185185,
"dharma_loss": 2.4492057392597197,
"epoch": 1.19,
"step": 425
},
{
"epoch": 1.2,
"learning_rate": 0.00029760922534694055,
"loss": 0.5357,
"step": 426
},
{
"epoch": 1.2,
"learning_rate": 0.00029759191609543095,
"loss": 0.5112,
"step": 427
},
{
"epoch": 1.2,
"learning_rate": 0.0002975745449170949,
"loss": 0.3245,
"step": 428
},
{
"epoch": 1.2,
"learning_rate": 0.0002975571118192212,
"loss": 0.6413,
"step": 429
},
{
"epoch": 1.21,
"learning_rate": 0.00029753961680912427,
"loss": 0.8456,
"step": 430
},
{
"epoch": 1.21,
"learning_rate": 0.00029752205989414475,
"loss": 0.5009,
"step": 431
},
{
"epoch": 1.21,
"learning_rate": 0.0002975044410816492,
"loss": 0.6083,
"step": 432
},
{
"epoch": 1.22,
"learning_rate": 0.0002974867603790302,
"loss": 0.5866,
"step": 433
},
{
"epoch": 1.22,
"learning_rate": 0.0002974690177937062,
"loss": 0.5901,
"step": 434
},
{
"epoch": 1.22,
"learning_rate": 0.00029745121333312163,
"loss": 0.4596,
"step": 435
},
{
"epoch": 1.22,
"learning_rate": 0.00029743334700474693,
"loss": 0.4795,
"step": 436
},
{
"epoch": 1.23,
"learning_rate": 0.00029741541881607854,
"loss": 0.5234,
"step": 437
},
{
"epoch": 1.23,
"learning_rate": 0.00029739742877463865,
"loss": 0.4583,
"step": 438
},
{
"epoch": 1.23,
"learning_rate": 0.0002973793768879757,
"loss": 0.4084,
"step": 439
},
{
"epoch": 1.24,
"learning_rate": 0.0002973612631636638,
"loss": 0.5177,
"step": 440
},
{
"epoch": 1.24,
"learning_rate": 0.0002973430876093033,
"loss": 0.5485,
"step": 441
},
{
"epoch": 1.24,
"learning_rate": 0.00029732485023252015,
"loss": 0.7071,
"step": 442
},
{
"epoch": 1.24,
"learning_rate": 0.0002973065510409665,
"loss": 0.4424,
"step": 443
},
{
"epoch": 1.25,
"learning_rate": 0.00029728819004232036,
"loss": 0.5394,
"step": 444
},
{
"epoch": 1.25,
"learning_rate": 0.0002972697672442856,
"loss": 0.4934,
"step": 445
},
{
"epoch": 1.25,
"learning_rate": 0.0002972512826545922,
"loss": 0.5253,
"step": 446
},
{
"epoch": 1.25,
"learning_rate": 0.0002972327362809958,
"loss": 0.414,
"step": 447
},
{
"epoch": 1.26,
"learning_rate": 0.0002972141281312782,
"loss": 0.3736,
"step": 448
},
{
"epoch": 1.26,
"learning_rate": 0.000297195458213247,
"loss": 0.471,
"step": 449
},
{
"epoch": 1.26,
"learning_rate": 0.0002971767265347358,
"loss": 0.5088,
"step": 450
},
{
"epoch": 1.26,
"eval_loss": 0.5178927779197693,
"eval_runtime": 26.409,
"eval_samples_per_second": 7.573,
"eval_steps_per_second": 1.893,
"step": 450
},
{
"dharma_eval_accuracy": 0.38306879940358374,
"dharma_eval_accuracy_ARC-Challenge": 0.25925925925925924,
"dharma_eval_accuracy_ARC-Easy": 0.35185185185185186,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.2777777777777778,
"dharma_eval_accuracy_agieval": 0.2542372881355932,
"dharma_eval_accuracy_bigbench": 0.5081967213114754,
"dharma_eval_accuracy_openbookqa": 0.18518518518518517,
"dharma_eval_accuracy_truthful_qa": 0.46296296296296297,
"dharma_eval_accuracy_winogrande": 0.4444444444444444,
"dharma_loss": 2.727772394180298,
"epoch": 1.26,
"step": 450
},
{
"epoch": 1.27,
"learning_rate": 0.0002971579331036041,
"loss": 0.5458,
"step": 451
},
{
"epoch": 1.27,
"learning_rate": 0.00029713907792773716,
"loss": 0.6182,
"step": 452
},
{
"epoch": 1.27,
"learning_rate": 0.0002971201610150463,
"loss": 0.2314,
"step": 453
},
{
"epoch": 1.27,
"learning_rate": 0.0002971011823734688,
"loss": 0.479,
"step": 454
},
{
"epoch": 1.28,
"learning_rate": 0.00029708214201096755,
"loss": 0.3984,
"step": 455
},
{
"epoch": 1.28,
"learning_rate": 0.0002970630399355317,
"loss": 0.542,
"step": 456
},
{
"epoch": 1.28,
"learning_rate": 0.00029704387615517606,
"loss": 0.3715,
"step": 457
},
{
"epoch": 1.29,
"learning_rate": 0.00029702465067794144,
"loss": 0.4325,
"step": 458
},
{
"epoch": 1.29,
"learning_rate": 0.00029700536351189445,
"loss": 0.5227,
"step": 459
},
{
"epoch": 1.29,
"learning_rate": 0.0002969860146651276,
"loss": 0.6047,
"step": 460
},
{
"epoch": 1.29,
"learning_rate": 0.0002969666041457594,
"loss": 0.598,
"step": 461
},
{
"epoch": 1.3,
"learning_rate": 0.00029694713196193404,
"loss": 0.5142,
"step": 462
},
{
"epoch": 1.3,
"learning_rate": 0.0002969275981218218,
"loss": 0.3851,
"step": 463
},
{
"epoch": 1.3,
"learning_rate": 0.0002969080026336186,
"loss": 0.5747,
"step": 464
},
{
"epoch": 1.31,
"learning_rate": 0.00029688834550554646,
"loss": 0.4373,
"step": 465
},
{
"epoch": 1.31,
"learning_rate": 0.00029686862674585307,
"loss": 0.3868,
"step": 466
},
{
"epoch": 1.31,
"learning_rate": 0.00029684884636281203,
"loss": 0.6756,
"step": 467
},
{
"epoch": 1.31,
"learning_rate": 0.00029682900436472286,
"loss": 0.5035,
"step": 468
},
{
"epoch": 1.32,
"learning_rate": 0.00029680910075991087,
"loss": 0.4708,
"step": 469
},
{
"epoch": 1.32,
"learning_rate": 0.0002967891355567273,
"loss": 0.5669,
"step": 470
},
{
"epoch": 1.32,
"learning_rate": 0.0002967691087635491,
"loss": 0.4667,
"step": 471
},
{
"epoch": 1.32,
"learning_rate": 0.0002967490203887793,
"loss": 0.5232,
"step": 472
},
{
"epoch": 1.33,
"learning_rate": 0.00029672887044084636,
"loss": 0.408,
"step": 473
},
{
"epoch": 1.33,
"learning_rate": 0.000296708658928205,
"loss": 0.3419,
"step": 474
},
{
"epoch": 1.33,
"learning_rate": 0.00029668838585933556,
"loss": 0.4646,
"step": 475
},
{
"epoch": 1.33,
"eval_loss": 0.5185211300849915,
"eval_runtime": 26.3942,
"eval_samples_per_second": 7.577,
"eval_steps_per_second": 1.894,
"step": 475
},
{
"dharma_eval_accuracy": 0.39335686524720515,
"dharma_eval_accuracy_ARC-Challenge": 0.2962962962962963,
"dharma_eval_accuracy_ARC-Easy": 0.3333333333333333,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.25925925925925924,
"dharma_eval_accuracy_agieval": 0.2542372881355932,
"dharma_eval_accuracy_bigbench": 0.5081967213114754,
"dharma_eval_accuracy_openbookqa": 0.16666666666666666,
"dharma_eval_accuracy_truthful_qa": 0.5185185185185185,
"dharma_eval_accuracy_winogrande": 0.5,
"dharma_loss": 2.565536605358124,
"epoch": 1.33,
"step": 475
},
{
"epoch": 1.34,
"learning_rate": 0.00029666805124274425,
"loss": 0.3971,
"step": 476
},
{
"epoch": 1.34,
"learning_rate": 0.00029664765508696306,
"loss": 0.4487,
"step": 477
},
{
"epoch": 1.34,
"learning_rate": 0.0002966271974005499,
"loss": 0.5002,
"step": 478
},
{
"epoch": 1.34,
"learning_rate": 0.00029660667819208836,
"loss": 0.5285,
"step": 479
},
{
"epoch": 1.35,
"learning_rate": 0.0002965860974701879,
"loss": 0.5782,
"step": 480
},
{
"epoch": 1.35,
"learning_rate": 0.00029656545524348396,
"loss": 0.5334,
"step": 481
},
{
"epoch": 1.35,
"learning_rate": 0.0002965447515206375,
"loss": 0.2758,
"step": 482
},
{
"epoch": 1.36,
"learning_rate": 0.00029652398631033547,
"loss": 0.3964,
"step": 483
},
{
"epoch": 1.36,
"learning_rate": 0.0002965031596212905,
"loss": 0.5087,
"step": 484
},
{
"epoch": 1.36,
"learning_rate": 0.0002964822714622412,
"loss": 0.6133,
"step": 485
},
{
"epoch": 1.36,
"learning_rate": 0.0002964613218419517,
"loss": 0.3947,
"step": 486
},
{
"epoch": 1.37,
"learning_rate": 0.0002964403107692122,
"loss": 0.4799,
"step": 487
},
{
"epoch": 1.37,
"learning_rate": 0.00029641923825283854,
"loss": 0.5146,
"step": 488
},
{
"epoch": 1.37,
"learning_rate": 0.0002963981043016723,
"loss": 0.348,
"step": 489
},
{
"epoch": 1.38,
"learning_rate": 0.000296376908924581,
"loss": 0.5181,
"step": 490
},
{
"epoch": 1.38,
"learning_rate": 0.0002963556521304577,
"loss": 0.4437,
"step": 491
},
{
"epoch": 1.38,
"learning_rate": 0.0002963343339282214,
"loss": 0.5235,
"step": 492
},
{
"epoch": 1.38,
"learning_rate": 0.0002963129543268168,
"loss": 0.3413,
"step": 493
},
{
"epoch": 1.39,
"learning_rate": 0.00029629151333521446,
"loss": 0.4321,
"step": 494
},
{
"epoch": 1.39,
"learning_rate": 0.00029627001096241057,
"loss": 0.3405,
"step": 495
},
{
"epoch": 1.39,
"learning_rate": 0.0002962484472174271,
"loss": 0.4895,
"step": 496
},
{
"epoch": 1.4,
"learning_rate": 0.0002962268221093118,
"loss": 0.4285,
"step": 497
},
{
"epoch": 1.4,
"learning_rate": 0.0002962051356471383,
"loss": 0.4785,
"step": 498
},
{
"epoch": 1.4,
"learning_rate": 0.0002961833878400056,
"loss": 0.618,
"step": 499
},
{
"epoch": 1.4,
"learning_rate": 0.0002961615786970389,
"loss": 0.663,
"step": 500
},
{
"epoch": 1.4,
"eval_loss": 0.5207934379577637,
"eval_runtime": 26.4199,
"eval_samples_per_second": 7.57,
"eval_steps_per_second": 1.893,
"step": 500
},
{
"dharma_eval_accuracy": 0.39952970475337796,
"dharma_eval_accuracy_ARC-Challenge": 0.3148148148148148,
"dharma_eval_accuracy_ARC-Easy": 0.3888888888888889,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.3148148148148148,
"dharma_eval_accuracy_agieval": 0.2542372881355932,
"dharma_eval_accuracy_bigbench": 0.5081967213114754,
"dharma_eval_accuracy_openbookqa": 0.1111111111111111,
"dharma_eval_accuracy_truthful_qa": 0.5185185185185185,
"dharma_eval_accuracy_winogrande": 0.48148148148148145,
"dharma_loss": 2.4424794154167175,
"epoch": 1.4,
"step": 500
},
{
"epoch": 1.41,
"learning_rate": 0.00029613970822738874,
"loss": 0.365,
"step": 501
},
{
"epoch": 1.41,
"learning_rate": 0.0002961177764402317,
"loss": 0.6666,
"step": 502
},
{
"epoch": 1.41,
"learning_rate": 0.00029609578334476987,
"loss": 0.4039,
"step": 503
},
{
"epoch": 1.41,
"learning_rate": 0.0002960737289502311,
"loss": 0.3414,
"step": 504
},
{
"epoch": 1.42,
"learning_rate": 0.00029605161326586916,
"loss": 0.7406,
"step": 505
},
{
"epoch": 1.42,
"learning_rate": 0.00029602943630096325,
"loss": 0.5311,
"step": 506
},
{
"epoch": 1.42,
"learning_rate": 0.00029600719806481844,
"loss": 0.5669,
"step": 507
},
{
"epoch": 1.43,
"learning_rate": 0.0002959848985667655,
"loss": 0.6251,
"step": 508
},
{
"epoch": 1.43,
"learning_rate": 0.00029596253781616084,
"loss": 0.5971,
"step": 509
},
{
"epoch": 1.43,
"learning_rate": 0.0002959401158223867,
"loss": 0.6097,
"step": 510
},
{
"epoch": 1.43,
"learning_rate": 0.00029591763259485083,
"loss": 0.7195,
"step": 511
},
{
"epoch": 1.44,
"learning_rate": 0.0002958950881429869,
"loss": 0.344,
"step": 512
},
{
"epoch": 1.44,
"learning_rate": 0.000295872482476254,
"loss": 0.5158,
"step": 513
},
{
"epoch": 1.44,
"learning_rate": 0.00029584981560413717,
"loss": 0.4253,
"step": 514
},
{
"epoch": 1.45,
"learning_rate": 0.0002958270875361469,
"loss": 0.5837,
"step": 515
},
{
"epoch": 1.45,
"learning_rate": 0.0002958042982818196,
"loss": 0.4485,
"step": 516
},
{
"epoch": 1.45,
"learning_rate": 0.0002957814478507171,
"loss": 0.2953,
"step": 517
},
{
"epoch": 1.45,
"learning_rate": 0.00029575853625242704,
"loss": 0.446,
"step": 518
},
{
"epoch": 1.46,
"learning_rate": 0.00029573556349656277,
"loss": 0.4378,
"step": 519
},
{
"epoch": 1.46,
"learning_rate": 0.0002957125295927631,
"loss": 0.4399,
"step": 520
},
{
"epoch": 1.46,
"learning_rate": 0.00029568943455069276,
"loss": 0.4011,
"step": 521
},
{
"epoch": 1.47,
"learning_rate": 0.00029566627838004193,
"loss": 0.5164,
"step": 522
},
{
"epoch": 1.47,
"learning_rate": 0.0002956430610905265,
"loss": 0.454,
"step": 523
},
{
"epoch": 1.47,
"learning_rate": 0.00029561978269188814,
"loss": 0.6102,
"step": 524
},
{
"epoch": 1.47,
"learning_rate": 0.0002955964431938939,
"loss": 0.4802,
"step": 525
},
{
"epoch": 1.47,
"eval_loss": 0.5176565051078796,
"eval_runtime": 26.4201,
"eval_samples_per_second": 7.57,
"eval_steps_per_second": 1.892,
"step": 525
},
{
"dharma_eval_accuracy": 0.41669896873502815,
"dharma_eval_accuracy_ARC-Challenge": 0.35185185185185186,
"dharma_eval_accuracy_ARC-Easy": 0.48148148148148145,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.37037037037037035,
"dharma_eval_accuracy_agieval": 0.2542372881355932,
"dharma_eval_accuracy_bigbench": 0.45901639344262296,
"dharma_eval_accuracy_openbookqa": 0.12962962962962962,
"dharma_eval_accuracy_truthful_qa": 0.5185185185185185,
"dharma_eval_accuracy_winogrande": 0.48148148148148145,
"dharma_loss": 2.5137911858558657,
"epoch": 1.47,
"step": 525
},
{
"epoch": 1.48,
"learning_rate": 0.0002955730426063365,
"loss": 0.364,
"step": 526
},
{
"epoch": 1.48,
"learning_rate": 0.00029554958093903466,
"loss": 0.3682,
"step": 527
},
{
"epoch": 1.48,
"learning_rate": 0.00029552605820183235,
"loss": 0.3183,
"step": 528
},
{
"epoch": 1.48,
"learning_rate": 0.00029550247440459916,
"loss": 0.5107,
"step": 529
},
{
"epoch": 1.49,
"learning_rate": 0.0002954788295572305,
"loss": 0.3863,
"step": 530
},
{
"epoch": 1.49,
"learning_rate": 0.00029545512366964735,
"loss": 0.5892,
"step": 531
},
{
"epoch": 1.49,
"learning_rate": 0.00029543135675179626,
"loss": 0.5724,
"step": 532
},
{
"epoch": 1.5,
"learning_rate": 0.00029540752881364925,
"loss": 0.5255,
"step": 533
},
{
"epoch": 1.5,
"learning_rate": 0.00029538363986520425,
"loss": 0.6092,
"step": 534
},
{
"epoch": 1.5,
"learning_rate": 0.00029535968991648454,
"loss": 0.4652,
"step": 535
},
{
"epoch": 1.5,
"learning_rate": 0.00029533567897753905,
"loss": 0.4963,
"step": 536
},
{
"epoch": 1.51,
"learning_rate": 0.0002953116070584424,
"loss": 0.4462,
"step": 537
},
{
"epoch": 1.51,
"learning_rate": 0.00029528747416929463,
"loss": 0.599,
"step": 538
},
{
"epoch": 1.51,
"learning_rate": 0.00029526328032022155,
"loss": 0.6611,
"step": 539
},
{
"epoch": 1.52,
"learning_rate": 0.00029523902552137433,
"loss": 0.5107,
"step": 540
},
{
"epoch": 1.52,
"learning_rate": 0.00029521470978292994,
"loss": 0.4466,
"step": 541
},
{
"epoch": 1.52,
"learning_rate": 0.00029519033311509077,
"loss": 0.3413,
"step": 542
},
{
"epoch": 1.52,
"learning_rate": 0.0002951658955280848,
"loss": 0.3824,
"step": 543
},
{
"epoch": 1.53,
"learning_rate": 0.0002951413970321657,
"loss": 0.4518,
"step": 544
},
{
"epoch": 1.53,
"learning_rate": 0.0002951168376376124,
"loss": 0.4747,
"step": 545
},
{
"epoch": 1.53,
"learning_rate": 0.0002950922173547296,
"loss": 0.526,
"step": 546
},
{
"epoch": 1.54,
"learning_rate": 0.00029506753619384766,
"loss": 0.571,
"step": 547
},
{
"epoch": 1.54,
"learning_rate": 0.00029504279416532223,
"loss": 0.5026,
"step": 548
},
{
"epoch": 1.54,
"learning_rate": 0.00029501799127953465,
"loss": 0.5395,
"step": 549
},
{
"epoch": 1.54,
"learning_rate": 0.0002949931275468917,
"loss": 0.2888,
"step": 550
},
{
"epoch": 1.54,
"eval_loss": 0.521288275718689,
"eval_runtime": 26.3822,
"eval_samples_per_second": 7.581,
"eval_steps_per_second": 1.895,
"step": 550
},
{
"dharma_eval_accuracy": 0.40364493109082655,
"dharma_eval_accuracy_ARC-Challenge": 0.3148148148148148,
"dharma_eval_accuracy_ARC-Easy": 0.46296296296296297,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.37037037037037035,
"dharma_eval_accuracy_agieval": 0.2542372881355932,
"dharma_eval_accuracy_bigbench": 0.5081967213114754,
"dharma_eval_accuracy_openbookqa": 0.1111111111111111,
"dharma_eval_accuracy_truthful_qa": 0.4444444444444444,
"dharma_eval_accuracy_winogrande": 0.46296296296296297,
"dharma_loss": 2.6077233877182007,
"epoch": 1.54,
"step": 550
},
{
"epoch": 1.55,
"learning_rate": 0.00029496820297782574,
"loss": 0.5497,
"step": 551
},
{
"epoch": 1.55,
"learning_rate": 0.00029494321758279465,
"loss": 0.4195,
"step": 552
},
{
"epoch": 1.55,
"learning_rate": 0.0002949181713722819,
"loss": 0.5639,
"step": 553
},
{
"epoch": 1.56,
"learning_rate": 0.0002948930643567964,
"loss": 0.5605,
"step": 554
},
{
"epoch": 1.56,
"learning_rate": 0.00029486789654687253,
"loss": 0.5815,
"step": 555
},
{
"epoch": 1.56,
"learning_rate": 0.00029484266795307037,
"loss": 0.4015,
"step": 556
},
{
"epoch": 1.56,
"learning_rate": 0.0002948173785859752,
"loss": 0.4135,
"step": 557
},
{
"epoch": 1.57,
"learning_rate": 0.000294792028456198,
"loss": 0.4335,
"step": 558
},
{
"epoch": 1.57,
"learning_rate": 0.0002947666175743753,
"loss": 0.512,
"step": 559
},
{
"epoch": 1.57,
"learning_rate": 0.00029474114595116896,
"loss": 0.4886,
"step": 560
},
{
"epoch": 1.57,
"learning_rate": 0.00029471561359726645,
"loss": 0.4351,
"step": 561
},
{
"epoch": 1.58,
"learning_rate": 0.0002946900205233807,
"loss": 0.4052,
"step": 562
},
{
"epoch": 1.58,
"learning_rate": 0.00029466436674024997,
"loss": 0.4267,
"step": 563
},
{
"epoch": 1.58,
"learning_rate": 0.0002946386522586382,
"loss": 0.5737,
"step": 564
},
{
"epoch": 1.59,
"learning_rate": 0.00029461287708933473,
"loss": 0.2997,
"step": 565
},
{
"epoch": 1.59,
"learning_rate": 0.00029458704124315425,
"loss": 0.523,
"step": 566
},
{
"epoch": 1.59,
"learning_rate": 0.0002945611447309371,
"loss": 0.4795,
"step": 567
},
{
"epoch": 1.59,
"learning_rate": 0.00029453518756354885,
"loss": 0.5125,
"step": 568
},
{
"epoch": 1.6,
"learning_rate": 0.0002945091697518808,
"loss": 0.4387,
"step": 569
},
{
"epoch": 1.6,
"learning_rate": 0.0002944830913068494,
"loss": 0.6114,
"step": 570
},
{
"epoch": 1.6,
"learning_rate": 0.0002944569522393968,
"loss": 0.3893,
"step": 571
},
{
"epoch": 1.61,
"learning_rate": 0.00029443075256049036,
"loss": 0.6212,
"step": 572
},
{
"epoch": 1.61,
"learning_rate": 0.0002944044922811231,
"loss": 0.333,
"step": 573
},
{
"epoch": 1.61,
"learning_rate": 0.0002943781714123132,
"loss": 0.4745,
"step": 574
},
{
"epoch": 1.61,
"learning_rate": 0.00029435178996510455,
"loss": 0.5085,
"step": 575
},
{
"epoch": 1.61,
"eval_loss": 0.5148590207099915,
"eval_runtime": 26.387,
"eval_samples_per_second": 7.58,
"eval_steps_per_second": 1.895,
"step": 575
},
{
"dharma_eval_accuracy": 0.42216344960934504,
"dharma_eval_accuracy_ARC-Challenge": 0.3148148148148148,
"dharma_eval_accuracy_ARC-Easy": 0.37037037037037035,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.35185185185185186,
"dharma_eval_accuracy_agieval": 0.2542372881355932,
"dharma_eval_accuracy_bigbench": 0.5081967213114754,
"dharma_eval_accuracy_openbookqa": 0.18518518518518517,
"dharma_eval_accuracy_truthful_qa": 0.5925925925925926,
"dharma_eval_accuracy_winogrande": 0.5185185185185185,
"dharma_loss": 2.7804353098869323,
"epoch": 1.61,
"step": 575
},
{
"epoch": 1.62,
"learning_rate": 0.0002943253479505662,
"loss": 0.4255,
"step": 576
},
{
"epoch": 1.62,
"learning_rate": 0.0002942988453797928,
"loss": 0.4885,
"step": 577
},
{
"epoch": 1.62,
"learning_rate": 0.00029427228226390424,
"loss": 0.5474,
"step": 578
},
{
"epoch": 1.63,
"learning_rate": 0.00029424565861404606,
"loss": 0.5646,
"step": 579
},
{
"epoch": 1.63,
"learning_rate": 0.00029421897444138897,
"loss": 0.3399,
"step": 580
},
{
"epoch": 1.63,
"learning_rate": 0.00029419222975712915,
"loss": 0.4264,
"step": 581
},
{
"epoch": 1.63,
"learning_rate": 0.00029416542457248816,
"loss": 0.5138,
"step": 582
},
{
"epoch": 1.64,
"learning_rate": 0.000294138558898713,
"loss": 0.5768,
"step": 583
},
{
"epoch": 1.64,
"learning_rate": 0.000294111632747076,
"loss": 0.475,
"step": 584
},
{
"epoch": 1.64,
"learning_rate": 0.0002940846461288748,
"loss": 0.4983,
"step": 585
},
{
"epoch": 1.64,
"learning_rate": 0.00029405759905543256,
"loss": 0.4168,
"step": 586
},
{
"epoch": 1.65,
"learning_rate": 0.00029403049153809774,
"loss": 0.6149,
"step": 587
},
{
"epoch": 1.65,
"learning_rate": 0.0002940033235882441,
"loss": 0.4848,
"step": 588
},
{
"epoch": 1.65,
"learning_rate": 0.0002939760952172708,
"loss": 0.5926,
"step": 589
},
{
"epoch": 1.66,
"learning_rate": 0.0002939488064366024,
"loss": 0.4797,
"step": 590
},
{
"epoch": 1.66,
"learning_rate": 0.00029392145725768874,
"loss": 0.5197,
"step": 591
},
{
"epoch": 1.66,
"learning_rate": 0.0002938940476920051,
"loss": 0.4958,
"step": 592
},
{
"epoch": 1.66,
"learning_rate": 0.0002938665777510519,
"loss": 0.7065,
"step": 593
},
{
"epoch": 1.67,
"learning_rate": 0.0002938390474463551,
"loss": 0.4155,
"step": 594
},
{
"epoch": 1.67,
"learning_rate": 0.00029381145678946584,
"loss": 0.522,
"step": 595
},
{
"epoch": 1.67,
"learning_rate": 0.00029378380579196076,
"loss": 0.4253,
"step": 596
},
{
"epoch": 1.68,
"learning_rate": 0.00029375609446544165,
"loss": 0.6296,
"step": 597
},
{
"epoch": 1.68,
"learning_rate": 0.0002937283228215356,
"loss": 0.515,
"step": 598
},
{
"epoch": 1.68,
"learning_rate": 0.00029370049087189514,
"loss": 0.6629,
"step": 599
},
{
"epoch": 1.68,
"learning_rate": 0.00029367259862819804,
"loss": 0.5582,
"step": 600
},
{
"epoch": 1.68,
"eval_loss": 0.510773241519928,
"eval_runtime": 26.4135,
"eval_samples_per_second": 7.572,
"eval_steps_per_second": 1.893,
"step": 600
},
{
"dharma_eval_accuracy": 0.4201058364406208,
"dharma_eval_accuracy_ARC-Challenge": 0.2962962962962963,
"dharma_eval_accuracy_ARC-Easy": 0.3888888888888889,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.2962962962962963,
"dharma_eval_accuracy_agieval": 0.2542372881355932,
"dharma_eval_accuracy_bigbench": 0.5081967213114754,
"dharma_eval_accuracy_openbookqa": 0.24074074074074073,
"dharma_eval_accuracy_truthful_qa": 0.5740740740740741,
"dharma_eval_accuracy_winogrande": 0.5185185185185185,
"dharma_loss": 2.6925288438796997,
"epoch": 1.68,
"step": 600
}
],
"max_steps": 5000,
"num_train_epochs": 15,
"total_flos": 1.9307426648459674e+17,
"trial_name": null,
"trial_params": null
}