File size: 4,090 Bytes
f1add34 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 185,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05405405405405406,
"grad_norm": 1.8533293008804321,
"learning_rate": 0.0001,
"loss": 0.7553,
"step": 10
},
{
"epoch": 0.10810810810810811,
"grad_norm": 1.8566731214523315,
"learning_rate": 9.919647942993148e-05,
"loss": 0.2456,
"step": 20
},
{
"epoch": 0.16216216216216217,
"grad_norm": 1.426089882850647,
"learning_rate": 9.681174353198687e-05,
"loss": 0.1699,
"step": 30
},
{
"epoch": 0.21621621621621623,
"grad_norm": 0.7362193465232849,
"learning_rate": 9.292243968009331e-05,
"loss": 0.1341,
"step": 40
},
{
"epoch": 0.2702702702702703,
"grad_norm": 1.2846084833145142,
"learning_rate": 8.765357330018056e-05,
"loss": 0.1093,
"step": 50
},
{
"epoch": 0.32432432432432434,
"grad_norm": 0.5028020739555359,
"learning_rate": 8.117449009293668e-05,
"loss": 0.106,
"step": 60
},
{
"epoch": 0.3783783783783784,
"grad_norm": 1.2113736867904663,
"learning_rate": 7.369343312364993e-05,
"loss": 0.0977,
"step": 70
},
{
"epoch": 0.43243243243243246,
"grad_norm": 0.6515302658081055,
"learning_rate": 6.545084971874738e-05,
"loss": 0.0861,
"step": 80
},
{
"epoch": 0.4864864864864865,
"grad_norm": 0.5156140923500061,
"learning_rate": 5.6711663290882776e-05,
"loss": 0.0856,
"step": 90
},
{
"epoch": 0.5405405405405406,
"grad_norm": 0.4655522108078003,
"learning_rate": 4.775675848247427e-05,
"loss": 0.065,
"step": 100
},
{
"epoch": 0.5945945945945946,
"grad_norm": 0.6544004678726196,
"learning_rate": 3.887395330218429e-05,
"loss": 0.0742,
"step": 110
},
{
"epoch": 0.6486486486486487,
"grad_norm": 0.5006158947944641,
"learning_rate": 3.0348748417303823e-05,
"loss": 0.0641,
"step": 120
},
{
"epoch": 0.7027027027027027,
"grad_norm": 0.4036032259464264,
"learning_rate": 2.245515092739488e-05,
"loss": 0.0562,
"step": 130
},
{
"epoch": 0.7567567567567568,
"grad_norm": 0.6734123229980469,
"learning_rate": 1.544686755065677e-05,
"loss": 0.0609,
"step": 140
},
{
"epoch": 0.8108108108108109,
"grad_norm": 0.7298457622528076,
"learning_rate": 9.549150281252633e-06,
"loss": 0.065,
"step": 150
},
{
"epoch": 0.8648648648648649,
"grad_norm": 0.47052305936813354,
"learning_rate": 4.951556604879048e-06,
"loss": 0.0577,
"step": 160
},
{
"epoch": 0.918918918918919,
"grad_norm": 0.46818605065345764,
"learning_rate": 1.8018569652073381e-06,
"loss": 0.0547,
"step": 170
},
{
"epoch": 0.972972972972973,
"grad_norm": 0.6747378706932068,
"learning_rate": 2.012853002380466e-07,
"loss": 0.0502,
"step": 180
},
{
"epoch": 1.0,
"step": 185,
"total_flos": 2.671639181415504e+16,
"train_loss": 0.12806547799625911,
"train_runtime": 362.2784,
"train_samples_per_second": 32.53,
"train_steps_per_second": 0.511
}
],
"logging_steps": 10,
"max_steps": 185,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.671639181415504e+16,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}
|