File size: 3,979 Bytes
9873622 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 |
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9983579638752053,
"eval_steps": 500,
"global_step": 19,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.052545155993431854,
"grad_norm": 16.077190802187676,
"learning_rate": 0.0,
"loss": 2.3057,
"step": 1
},
{
"epoch": 0.10509031198686371,
"grad_norm": 17.39692052150077,
"learning_rate": 5e-06,
"loss": 2.39,
"step": 2
},
{
"epoch": 0.15763546798029557,
"grad_norm": 12.305210679711815,
"learning_rate": 1e-05,
"loss": 2.2061,
"step": 3
},
{
"epoch": 0.21018062397372742,
"grad_norm": 8.35782605169056,
"learning_rate": 9.91486549841951e-06,
"loss": 1.9225,
"step": 4
},
{
"epoch": 0.2627257799671593,
"grad_norm": 11.34251016444334,
"learning_rate": 9.66236114702178e-06,
"loss": 1.667,
"step": 5
},
{
"epoch": 0.31527093596059114,
"grad_norm": 15.501119915951904,
"learning_rate": 9.251085678648072e-06,
"loss": 1.5072,
"step": 6
},
{
"epoch": 0.367816091954023,
"grad_norm": 6.363923937713399,
"learning_rate": 8.695044586103297e-06,
"loss": 1.2449,
"step": 7
},
{
"epoch": 0.42036124794745483,
"grad_norm": 4.647071328375543,
"learning_rate": 8.013173181896283e-06,
"loss": 1.1704,
"step": 8
},
{
"epoch": 0.4729064039408867,
"grad_norm": 3.465580340666149,
"learning_rate": 7.2286917788826926e-06,
"loss": 1.1505,
"step": 9
},
{
"epoch": 0.5254515599343186,
"grad_norm": 2.7262505628699203,
"learning_rate": 6.368314950360416e-06,
"loss": 1.0885,
"step": 10
},
{
"epoch": 0.5779967159277504,
"grad_norm": 2.7528743802546627,
"learning_rate": 5.46134179731651e-06,
"loss": 1.1462,
"step": 11
},
{
"epoch": 0.6305418719211823,
"grad_norm": 2.4196115928746784,
"learning_rate": 4.53865820268349e-06,
"loss": 1.0478,
"step": 12
},
{
"epoch": 0.6830870279146142,
"grad_norm": 2.2650596225267834,
"learning_rate": 3.6316850496395863e-06,
"loss": 1.0811,
"step": 13
},
{
"epoch": 0.735632183908046,
"grad_norm": 2.344241062074149,
"learning_rate": 2.771308221117309e-06,
"loss": 1.0594,
"step": 14
},
{
"epoch": 0.7881773399014779,
"grad_norm": 2.3269744895106927,
"learning_rate": 1.9868268181037186e-06,
"loss": 1.0291,
"step": 15
},
{
"epoch": 0.8407224958949097,
"grad_norm": 2.140419827300594,
"learning_rate": 1.3049554138967052e-06,
"loss": 0.931,
"step": 16
},
{
"epoch": 0.8932676518883416,
"grad_norm": 2.062028584753248,
"learning_rate": 7.489143213519301e-07,
"loss": 1.0292,
"step": 17
},
{
"epoch": 0.9458128078817734,
"grad_norm": 2.0801310407367026,
"learning_rate": 3.3763885297822153e-07,
"loss": 1.0591,
"step": 18
},
{
"epoch": 0.9983579638752053,
"grad_norm": 2.1425834658879146,
"learning_rate": 8.513450158049109e-08,
"loss": 1.0019,
"step": 19
}
],
"logging_steps": 1,
"max_steps": 19,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3084490588160.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}
|