File size: 4,051 Bytes
90285b5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 | {
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9966329966329966,
"eval_steps": 500,
"global_step": 185,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0053872053872053875,
"grad_norm": 0.5911839008331299,
"learning_rate": 0.0,
"loss": 0.834,
"step": 1
},
{
"epoch": 0.05387205387205387,
"grad_norm": 0.4561307728290558,
"learning_rate": 9.473684210526316e-05,
"loss": 1.4016,
"step": 10
},
{
"epoch": 0.10774410774410774,
"grad_norm": 0.06328851729631424,
"learning_rate": 0.0002,
"loss": 0.8905,
"step": 20
},
{
"epoch": 0.16161616161616163,
"grad_norm": 0.022817041724920273,
"learning_rate": 0.00018795180722891569,
"loss": 0.6956,
"step": 30
},
{
"epoch": 0.21548821548821548,
"grad_norm": 0.018048042431473732,
"learning_rate": 0.00017590361445783134,
"loss": 0.6707,
"step": 40
},
{
"epoch": 0.26936026936026936,
"grad_norm": 0.028622902929782867,
"learning_rate": 0.00016385542168674699,
"loss": 0.6629,
"step": 50
},
{
"epoch": 0.32323232323232326,
"grad_norm": 0.012167639099061489,
"learning_rate": 0.00015180722891566266,
"loss": 0.6721,
"step": 60
},
{
"epoch": 0.3771043771043771,
"grad_norm": 0.01069930475205183,
"learning_rate": 0.00013975903614457834,
"loss": 0.6565,
"step": 70
},
{
"epoch": 0.43097643097643096,
"grad_norm": 0.015863914042711258,
"learning_rate": 0.00012771084337349396,
"loss": 0.6458,
"step": 80
},
{
"epoch": 0.48484848484848486,
"grad_norm": 0.012852534651756287,
"learning_rate": 0.00011566265060240964,
"loss": 0.6363,
"step": 90
},
{
"epoch": 0.5387205387205387,
"grad_norm": 0.025411192327737808,
"learning_rate": 0.0001036144578313253,
"loss": 0.6416,
"step": 100
},
{
"epoch": 0.5925925925925926,
"grad_norm": 0.01106669008731842,
"learning_rate": 9.156626506024096e-05,
"loss": 0.6575,
"step": 110
},
{
"epoch": 0.6464646464646465,
"grad_norm": 0.010959242470562458,
"learning_rate": 7.951807228915663e-05,
"loss": 0.6438,
"step": 120
},
{
"epoch": 0.7003367003367004,
"grad_norm": 0.010998900979757309,
"learning_rate": 6.746987951807229e-05,
"loss": 0.6374,
"step": 130
},
{
"epoch": 0.7542087542087542,
"grad_norm": 0.010141533799469471,
"learning_rate": 5.5421686746987955e-05,
"loss": 0.6316,
"step": 140
},
{
"epoch": 0.8080808080808081,
"grad_norm": 0.023810530081391335,
"learning_rate": 4.337349397590362e-05,
"loss": 0.6369,
"step": 150
},
{
"epoch": 0.8619528619528619,
"grad_norm": 0.010837017558515072,
"learning_rate": 3.132530120481928e-05,
"loss": 0.6532,
"step": 160
},
{
"epoch": 0.9158249158249159,
"grad_norm": 0.009072311222553253,
"learning_rate": 1.927710843373494e-05,
"loss": 0.6411,
"step": 170
},
{
"epoch": 0.9696969696969697,
"grad_norm": 0.013384884223341942,
"learning_rate": 7.228915662650602e-06,
"loss": 0.629,
"step": 180
}
],
"logging_steps": 10,
"max_steps": 185,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.109138039878451e+16,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}
|