File size: 3,563 Bytes
03503b9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 |
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.703625319517357,
"eval_steps": 1000,
"global_step": 800,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04397658246983481,
"grad_norm": 1.0395877361297607,
"learning_rate": 0.00019138082673702725,
"loss": 1.3457,
"step": 50
},
{
"epoch": 0.08795316493966962,
"grad_norm": 1.0277217626571655,
"learning_rate": 0.00018258575197889184,
"loss": 1.0388,
"step": 100
},
{
"epoch": 0.13192974740950444,
"grad_norm": 0.9229634404182434,
"learning_rate": 0.0001737906772207564,
"loss": 0.9245,
"step": 150
},
{
"epoch": 0.17590632987933924,
"grad_norm": 0.8828799724578857,
"learning_rate": 0.00016499560246262094,
"loss": 0.8399,
"step": 200
},
{
"epoch": 0.21988291234917406,
"grad_norm": 1.018609642982483,
"learning_rate": 0.0001562005277044855,
"loss": 0.8015,
"step": 250
},
{
"epoch": 0.2638594948190089,
"grad_norm": 0.9031912684440613,
"learning_rate": 0.00014740545294635005,
"loss": 0.7248,
"step": 300
},
{
"epoch": 0.3078360772888437,
"grad_norm": 0.8781272768974304,
"learning_rate": 0.0001386103781882146,
"loss": 0.684,
"step": 350
},
{
"epoch": 0.3518126597586785,
"grad_norm": 0.8193464279174805,
"learning_rate": 0.00012981530343007916,
"loss": 0.6587,
"step": 400
},
{
"epoch": 0.3957892422285133,
"grad_norm": 0.84972083568573,
"learning_rate": 0.00012102022867194372,
"loss": 0.6357,
"step": 450
},
{
"epoch": 0.43976582469834813,
"grad_norm": 0.9738919734954834,
"learning_rate": 0.00011222515391380828,
"loss": 0.6107,
"step": 500
},
{
"epoch": 0.48374240716818295,
"grad_norm": 0.9879764318466187,
"learning_rate": 0.00010343007915567282,
"loss": 0.573,
"step": 550
},
{
"epoch": 0.5277189896380178,
"grad_norm": 0.9049842953681946,
"learning_rate": 9.463500439753739e-05,
"loss": 0.5274,
"step": 600
},
{
"epoch": 0.5716955721078526,
"grad_norm": 0.9793794751167297,
"learning_rate": 8.583992963940193e-05,
"loss": 0.527,
"step": 650
},
{
"epoch": 0.6156721545776874,
"grad_norm": 1.096916913986206,
"learning_rate": 7.704485488126649e-05,
"loss": 0.5072,
"step": 700
},
{
"epoch": 0.6596487370475222,
"grad_norm": 0.9264512062072754,
"learning_rate": 6.824978012313104e-05,
"loss": 0.4914,
"step": 750
},
{
"epoch": 0.703625319517357,
"grad_norm": 0.8605396151542664,
"learning_rate": 5.94547053649956e-05,
"loss": 0.4893,
"step": 800
}
],
"logging_steps": 50,
"max_steps": 1137,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 200,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.5520273893154816e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}
|