File size: 3,687 Bytes
aaacc21 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.008923830643542047,
"eval_steps": 500,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 0.8500534296035767,
"learning_rate": 4.997769042339115e-05,
"loss": 5.7067,
"step": 100
},
{
"epoch": 0.0,
"grad_norm": 0.29711076617240906,
"learning_rate": 4.995538084678229e-05,
"loss": 4.9888,
"step": 200
},
{
"epoch": 0.0,
"grad_norm": 0.21760140359401703,
"learning_rate": 4.9933071270173436e-05,
"loss": 4.7622,
"step": 300
},
{
"epoch": 0.0,
"grad_norm": 0.2236577719449997,
"learning_rate": 4.991076169356459e-05,
"loss": 4.7175,
"step": 400
},
{
"epoch": 0.0,
"grad_norm": 0.18726830184459686,
"learning_rate": 4.9888452116955725e-05,
"loss": 4.6896,
"step": 500
},
{
"epoch": 0.0,
"grad_norm": 0.3301438093185425,
"learning_rate": 4.986614254034687e-05,
"loss": 4.6776,
"step": 600
},
{
"epoch": 0.0,
"grad_norm": 0.23270975053310394,
"learning_rate": 4.984383296373802e-05,
"loss": 4.6656,
"step": 700
},
{
"epoch": 0.0,
"grad_norm": 0.26907047629356384,
"learning_rate": 4.982152338712916e-05,
"loss": 4.6493,
"step": 800
},
{
"epoch": 0.0,
"grad_norm": 0.22385388612747192,
"learning_rate": 4.979921381052031e-05,
"loss": 4.6447,
"step": 900
},
{
"epoch": 0.0,
"grad_norm": 0.28366366028785706,
"learning_rate": 4.9776904233911454e-05,
"loss": 4.632,
"step": 1000
},
{
"epoch": 0.0,
"grad_norm": 0.26614850759506226,
"learning_rate": 4.975459465730259e-05,
"loss": 4.6319,
"step": 1100
},
{
"epoch": 0.01,
"grad_norm": 0.3065544068813324,
"learning_rate": 4.973228508069374e-05,
"loss": 4.6223,
"step": 1200
},
{
"epoch": 0.01,
"grad_norm": 0.23633620142936707,
"learning_rate": 4.970997550408489e-05,
"loss": 4.619,
"step": 1300
},
{
"epoch": 0.01,
"grad_norm": 0.2925296127796173,
"learning_rate": 4.968766592747603e-05,
"loss": 4.616,
"step": 1400
},
{
"epoch": 0.01,
"grad_norm": 0.3636851906776428,
"learning_rate": 4.9665356350867176e-05,
"loss": 4.6118,
"step": 1500
},
{
"epoch": 0.01,
"grad_norm": 0.3567110598087311,
"learning_rate": 4.964304677425832e-05,
"loss": 4.6092,
"step": 1600
},
{
"epoch": 0.01,
"grad_norm": 0.35951370000839233,
"learning_rate": 4.9620737197649465e-05,
"loss": 4.6004,
"step": 1700
},
{
"epoch": 0.01,
"grad_norm": 0.2747381329536438,
"learning_rate": 4.959842762104061e-05,
"loss": 4.5976,
"step": 1800
},
{
"epoch": 0.01,
"grad_norm": 0.33775028586387634,
"learning_rate": 4.9576118044431754e-05,
"loss": 4.594,
"step": 1900
},
{
"epoch": 0.01,
"grad_norm": 0.2855284810066223,
"learning_rate": 4.95538084678229e-05,
"loss": 4.5928,
"step": 2000
}
],
"logging_steps": 100,
"max_steps": 224119,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"total_flos": 1.5251220029472768e+16,
"train_batch_size": 256,
"trial_name": null,
"trial_params": null
}
|