Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- README.md +267 -0
- adapter_config.json +21 -0
- adapter_model.bin +3 -0
- checkpoint-120/README.md +21 -0
- checkpoint-120/adapter_config.json +21 -0
- checkpoint-120/adapter_model.safetensors +3 -0
- checkpoint-120/optimizer.pt +3 -0
- checkpoint-120/rng_state.pth +3 -0
- checkpoint-120/scheduler.pt +3 -0
- checkpoint-120/trainer_state.json +355 -0
- checkpoint-120/training_args.bin +3 -0
- checkpoint-130/README.md +21 -0
- checkpoint-130/adapter_config.json +21 -0
- checkpoint-130/adapter_model.safetensors +3 -0
- checkpoint-130/optimizer.pt +3 -0
- checkpoint-130/rng_state.pth +3 -0
- checkpoint-130/scheduler.pt +3 -0
- checkpoint-130/trainer_state.json +383 -0
- checkpoint-130/training_args.bin +3 -0
- checkpoint-140/README.md +21 -0
- checkpoint-140/adapter_config.json +21 -0
- checkpoint-140/adapter_model.safetensors +3 -0
- checkpoint-140/optimizer.pt +3 -0
- checkpoint-140/rng_state.pth +3 -0
- checkpoint-140/scheduler.pt +3 -0
- checkpoint-140/trainer_state.json +411 -0
- checkpoint-140/training_args.bin +3 -0
- checkpoint-60/README.md +23 -0
- checkpoint-60/adapter_config.json +21 -0
- checkpoint-60/adapter_model.safetensors +3 -0
- checkpoint-60/optimizer.pt +3 -0
- checkpoint-60/rng_state.pth +3 -0
- checkpoint-60/scheduler.pt +3 -0
- checkpoint-60/trainer_state.json +201 -0
- checkpoint-60/training_args.bin +3 -0
- checkpoint-70/README.md +68 -0
- checkpoint-70/adapter_config.json +21 -0
- checkpoint-70/adapter_model.safetensors +3 -0
- checkpoint-70/optimizer.pt +3 -0
- checkpoint-70/rng_state.pth +3 -0
- checkpoint-70/scheduler.pt +3 -0
- checkpoint-70/trainer_state.json +231 -0
- checkpoint-70/training_args.bin +3 -0
- config.json +42 -0
- logs/events.out.tfevents.1699989718.node0370.palmetto.clemson.edu.1119956.2 +3 -0
- logs/events.out.tfevents.1699989813.node0370.palmetto.clemson.edu.1119956.4 +3 -0
- logs/events.out.tfevents.1699994287.node0370.palmetto.clemson.edu.1126403.1 +3 -0
- logs/events.out.tfevents.1699995040.node0370.palmetto.clemson.edu.1127435.1 +3 -0
- logs/events.out.tfevents.1700064888.node0277.palmetto.clemson.edu.1971495.1 +3 -0
- logs/events.out.tfevents.1700066157.node0277.palmetto.clemson.edu.1973537.1 +3 -0
README.md
ADDED
|
@@ -0,0 +1,267 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: peft
|
| 3 |
+
---
|
| 4 |
+
## Training procedure
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 8 |
+
- quant_method: bitsandbytes
|
| 9 |
+
- _load_in_8bit: False
|
| 10 |
+
- _load_in_4bit: False
|
| 11 |
+
- llm_int8_threshold: 6.0
|
| 12 |
+
- llm_int8_skip_modules: None
|
| 13 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 14 |
+
- llm_int8_has_fp16_weight: False
|
| 15 |
+
- bnb_4bit_quant_type: fp4
|
| 16 |
+
- bnb_4bit_use_double_quant: False
|
| 17 |
+
- bnb_4bit_compute_dtype: float32
|
| 18 |
+
- load_in_4bit: False
|
| 19 |
+
- load_in_8bit: False
|
| 20 |
+
|
| 21 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 22 |
+
- quant_method: bitsandbytes
|
| 23 |
+
- _load_in_8bit: False
|
| 24 |
+
- _load_in_4bit: False
|
| 25 |
+
- llm_int8_threshold: 6.0
|
| 26 |
+
- llm_int8_skip_modules: None
|
| 27 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 28 |
+
- llm_int8_has_fp16_weight: False
|
| 29 |
+
- bnb_4bit_quant_type: fp4
|
| 30 |
+
- bnb_4bit_use_double_quant: False
|
| 31 |
+
- bnb_4bit_compute_dtype: float32
|
| 32 |
+
- load_in_4bit: False
|
| 33 |
+
- load_in_8bit: False
|
| 34 |
+
|
| 35 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 36 |
+
- quant_method: bitsandbytes
|
| 37 |
+
- _load_in_8bit: False
|
| 38 |
+
- _load_in_4bit: False
|
| 39 |
+
- llm_int8_threshold: 6.0
|
| 40 |
+
- llm_int8_skip_modules: None
|
| 41 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 42 |
+
- llm_int8_has_fp16_weight: False
|
| 43 |
+
- bnb_4bit_quant_type: fp4
|
| 44 |
+
- bnb_4bit_use_double_quant: False
|
| 45 |
+
- bnb_4bit_compute_dtype: float32
|
| 46 |
+
- load_in_4bit: False
|
| 47 |
+
- load_in_8bit: False
|
| 48 |
+
|
| 49 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 50 |
+
- quant_method: bitsandbytes
|
| 51 |
+
- _load_in_8bit: False
|
| 52 |
+
- _load_in_4bit: False
|
| 53 |
+
- llm_int8_threshold: 6.0
|
| 54 |
+
- llm_int8_skip_modules: None
|
| 55 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 56 |
+
- llm_int8_has_fp16_weight: False
|
| 57 |
+
- bnb_4bit_quant_type: fp4
|
| 58 |
+
- bnb_4bit_use_double_quant: False
|
| 59 |
+
- bnb_4bit_compute_dtype: float32
|
| 60 |
+
- load_in_4bit: False
|
| 61 |
+
- load_in_8bit: False
|
| 62 |
+
|
| 63 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 64 |
+
- quant_method: bitsandbytes
|
| 65 |
+
- _load_in_8bit: False
|
| 66 |
+
- _load_in_4bit: False
|
| 67 |
+
- llm_int8_threshold: 6.0
|
| 68 |
+
- llm_int8_skip_modules: None
|
| 69 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 70 |
+
- llm_int8_has_fp16_weight: False
|
| 71 |
+
- bnb_4bit_quant_type: fp4
|
| 72 |
+
- bnb_4bit_use_double_quant: False
|
| 73 |
+
- bnb_4bit_compute_dtype: float32
|
| 74 |
+
- load_in_4bit: False
|
| 75 |
+
- load_in_8bit: False
|
| 76 |
+
|
| 77 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 78 |
+
- quant_method: bitsandbytes
|
| 79 |
+
- _load_in_8bit: False
|
| 80 |
+
- _load_in_4bit: False
|
| 81 |
+
- llm_int8_threshold: 6.0
|
| 82 |
+
- llm_int8_skip_modules: None
|
| 83 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 84 |
+
- llm_int8_has_fp16_weight: False
|
| 85 |
+
- bnb_4bit_quant_type: fp4
|
| 86 |
+
- bnb_4bit_use_double_quant: False
|
| 87 |
+
- bnb_4bit_compute_dtype: float32
|
| 88 |
+
- load_in_4bit: False
|
| 89 |
+
- load_in_8bit: False
|
| 90 |
+
|
| 91 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 92 |
+
- quant_method: bitsandbytes
|
| 93 |
+
- load_in_8bit: True
|
| 94 |
+
- load_in_4bit: False
|
| 95 |
+
- llm_int8_threshold: 6.0
|
| 96 |
+
- llm_int8_skip_modules: None
|
| 97 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 98 |
+
- llm_int8_has_fp16_weight: False
|
| 99 |
+
- bnb_4bit_quant_type: fp4
|
| 100 |
+
- bnb_4bit_use_double_quant: False
|
| 101 |
+
- bnb_4bit_compute_dtype: float32
|
| 102 |
+
|
| 103 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 104 |
+
- quant_method: bitsandbytes
|
| 105 |
+
- load_in_8bit: True
|
| 106 |
+
- load_in_4bit: False
|
| 107 |
+
- llm_int8_threshold: 6.0
|
| 108 |
+
- llm_int8_skip_modules: None
|
| 109 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 110 |
+
- llm_int8_has_fp16_weight: False
|
| 111 |
+
- bnb_4bit_quant_type: fp4
|
| 112 |
+
- bnb_4bit_use_double_quant: False
|
| 113 |
+
- bnb_4bit_compute_dtype: float32
|
| 114 |
+
|
| 115 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 116 |
+
- quant_method: bitsandbytes
|
| 117 |
+
- load_in_8bit: True
|
| 118 |
+
- load_in_4bit: False
|
| 119 |
+
- llm_int8_threshold: 6.0
|
| 120 |
+
- llm_int8_skip_modules: None
|
| 121 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 122 |
+
- llm_int8_has_fp16_weight: False
|
| 123 |
+
- bnb_4bit_quant_type: fp4
|
| 124 |
+
- bnb_4bit_use_double_quant: False
|
| 125 |
+
- bnb_4bit_compute_dtype: float32
|
| 126 |
+
|
| 127 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 128 |
+
- quant_method: bitsandbytes
|
| 129 |
+
- load_in_8bit: True
|
| 130 |
+
- load_in_4bit: False
|
| 131 |
+
- llm_int8_threshold: 6.0
|
| 132 |
+
- llm_int8_skip_modules: None
|
| 133 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 134 |
+
- llm_int8_has_fp16_weight: False
|
| 135 |
+
- bnb_4bit_quant_type: fp4
|
| 136 |
+
- bnb_4bit_use_double_quant: False
|
| 137 |
+
- bnb_4bit_compute_dtype: float32
|
| 138 |
+
|
| 139 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 140 |
+
- quant_method: bitsandbytes
|
| 141 |
+
- load_in_8bit: True
|
| 142 |
+
- load_in_4bit: False
|
| 143 |
+
- llm_int8_threshold: 6.0
|
| 144 |
+
- llm_int8_skip_modules: None
|
| 145 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 146 |
+
- llm_int8_has_fp16_weight: False
|
| 147 |
+
- bnb_4bit_quant_type: fp4
|
| 148 |
+
- bnb_4bit_use_double_quant: False
|
| 149 |
+
- bnb_4bit_compute_dtype: float32
|
| 150 |
+
|
| 151 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 152 |
+
- quant_method: bitsandbytes
|
| 153 |
+
- load_in_8bit: True
|
| 154 |
+
- load_in_4bit: False
|
| 155 |
+
- llm_int8_threshold: 6.0
|
| 156 |
+
- llm_int8_skip_modules: None
|
| 157 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 158 |
+
- llm_int8_has_fp16_weight: False
|
| 159 |
+
- bnb_4bit_quant_type: fp4
|
| 160 |
+
- bnb_4bit_use_double_quant: False
|
| 161 |
+
- bnb_4bit_compute_dtype: float32
|
| 162 |
+
|
| 163 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 164 |
+
- quant_method: bitsandbytes
|
| 165 |
+
- load_in_8bit: True
|
| 166 |
+
- load_in_4bit: False
|
| 167 |
+
- llm_int8_threshold: 6.0
|
| 168 |
+
- llm_int8_skip_modules: None
|
| 169 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 170 |
+
- llm_int8_has_fp16_weight: False
|
| 171 |
+
- bnb_4bit_quant_type: fp4
|
| 172 |
+
- bnb_4bit_use_double_quant: False
|
| 173 |
+
- bnb_4bit_compute_dtype: float32
|
| 174 |
+
|
| 175 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 176 |
+
- quant_method: bitsandbytes
|
| 177 |
+
- load_in_8bit: True
|
| 178 |
+
- load_in_4bit: False
|
| 179 |
+
- llm_int8_threshold: 6.0
|
| 180 |
+
- llm_int8_skip_modules: None
|
| 181 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 182 |
+
- llm_int8_has_fp16_weight: False
|
| 183 |
+
- bnb_4bit_quant_type: fp4
|
| 184 |
+
- bnb_4bit_use_double_quant: False
|
| 185 |
+
- bnb_4bit_compute_dtype: float32
|
| 186 |
+
|
| 187 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 188 |
+
- quant_method: bitsandbytes
|
| 189 |
+
- load_in_8bit: True
|
| 190 |
+
- load_in_4bit: False
|
| 191 |
+
- llm_int8_threshold: 6.0
|
| 192 |
+
- llm_int8_skip_modules: None
|
| 193 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 194 |
+
- llm_int8_has_fp16_weight: False
|
| 195 |
+
- bnb_4bit_quant_type: fp4
|
| 196 |
+
- bnb_4bit_use_double_quant: False
|
| 197 |
+
- bnb_4bit_compute_dtype: float32
|
| 198 |
+
|
| 199 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 200 |
+
- quant_method: bitsandbytes
|
| 201 |
+
- load_in_8bit: True
|
| 202 |
+
- load_in_4bit: False
|
| 203 |
+
- llm_int8_threshold: 6.0
|
| 204 |
+
- llm_int8_skip_modules: None
|
| 205 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 206 |
+
- llm_int8_has_fp16_weight: False
|
| 207 |
+
- bnb_4bit_quant_type: fp4
|
| 208 |
+
- bnb_4bit_use_double_quant: False
|
| 209 |
+
- bnb_4bit_compute_dtype: float32
|
| 210 |
+
|
| 211 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 212 |
+
- quant_method: bitsandbytes
|
| 213 |
+
- load_in_8bit: True
|
| 214 |
+
- load_in_4bit: False
|
| 215 |
+
- llm_int8_threshold: 6.0
|
| 216 |
+
- llm_int8_skip_modules: None
|
| 217 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 218 |
+
- llm_int8_has_fp16_weight: False
|
| 219 |
+
- bnb_4bit_quant_type: fp4
|
| 220 |
+
- bnb_4bit_use_double_quant: False
|
| 221 |
+
- bnb_4bit_compute_dtype: float32
|
| 222 |
+
|
| 223 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 224 |
+
- quant_method: bitsandbytes
|
| 225 |
+
- load_in_8bit: True
|
| 226 |
+
- load_in_4bit: False
|
| 227 |
+
- llm_int8_threshold: 6.0
|
| 228 |
+
- llm_int8_skip_modules: None
|
| 229 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 230 |
+
- llm_int8_has_fp16_weight: False
|
| 231 |
+
- bnb_4bit_quant_type: fp4
|
| 232 |
+
- bnb_4bit_use_double_quant: False
|
| 233 |
+
- bnb_4bit_compute_dtype: float32
|
| 234 |
+
|
| 235 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 236 |
+
- quant_method: bitsandbytes
|
| 237 |
+
- load_in_8bit: True
|
| 238 |
+
- load_in_4bit: False
|
| 239 |
+
- llm_int8_threshold: 6.0
|
| 240 |
+
- llm_int8_skip_modules: None
|
| 241 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 242 |
+
- llm_int8_has_fp16_weight: False
|
| 243 |
+
- bnb_4bit_quant_type: fp4
|
| 244 |
+
- bnb_4bit_use_double_quant: False
|
| 245 |
+
- bnb_4bit_compute_dtype: float32
|
| 246 |
+
### Framework versions
|
| 247 |
+
|
| 248 |
+
- PEFT 0.5.0
|
| 249 |
+
- PEFT 0.5.0
|
| 250 |
+
- PEFT 0.5.0
|
| 251 |
+
- PEFT 0.5.0
|
| 252 |
+
- PEFT 0.5.0
|
| 253 |
+
- PEFT 0.5.0
|
| 254 |
+
- PEFT 0.5.0
|
| 255 |
+
- PEFT 0.5.0
|
| 256 |
+
- PEFT 0.5.0
|
| 257 |
+
- PEFT 0.5.0
|
| 258 |
+
- PEFT 0.5.0
|
| 259 |
+
- PEFT 0.5.0
|
| 260 |
+
- PEFT 0.5.0
|
| 261 |
+
- PEFT 0.5.0
|
| 262 |
+
- PEFT 0.5.0
|
| 263 |
+
- PEFT 0.5.0
|
| 264 |
+
- PEFT 0.5.0
|
| 265 |
+
- PEFT 0.5.0
|
| 266 |
+
|
| 267 |
+
- PEFT 0.5.0
|
adapter_config.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"auto_mapping": null,
|
| 3 |
+
"base_model_name_or_path": "meta-llama/Llama-2-70b-hf",
|
| 4 |
+
"bias": "none",
|
| 5 |
+
"fan_in_fan_out": false,
|
| 6 |
+
"inference_mode": true,
|
| 7 |
+
"init_lora_weights": true,
|
| 8 |
+
"layers_pattern": null,
|
| 9 |
+
"layers_to_transform": null,
|
| 10 |
+
"lora_alpha": 32,
|
| 11 |
+
"lora_dropout": 0.05,
|
| 12 |
+
"modules_to_save": null,
|
| 13 |
+
"peft_type": "LORA",
|
| 14 |
+
"r": 8,
|
| 15 |
+
"revision": null,
|
| 16 |
+
"target_modules": [
|
| 17 |
+
"q_proj",
|
| 18 |
+
"v_proj"
|
| 19 |
+
],
|
| 20 |
+
"task_type": "CAUSAL_LM"
|
| 21 |
+
}
|
adapter_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2fcc18a4181c065d107e66cca9058af5a18b263f6263db5b9e223ccd13b5a128
|
| 3 |
+
size 65652106
|
checkpoint-120/README.md
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: peft
|
| 3 |
+
---
|
| 4 |
+
## Training procedure
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 8 |
+
- quant_method: bitsandbytes
|
| 9 |
+
- load_in_8bit: True
|
| 10 |
+
- load_in_4bit: False
|
| 11 |
+
- llm_int8_threshold: 6.0
|
| 12 |
+
- llm_int8_skip_modules: None
|
| 13 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 14 |
+
- llm_int8_has_fp16_weight: False
|
| 15 |
+
- bnb_4bit_quant_type: fp4
|
| 16 |
+
- bnb_4bit_use_double_quant: False
|
| 17 |
+
- bnb_4bit_compute_dtype: float32
|
| 18 |
+
### Framework versions
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
- PEFT 0.5.0
|
checkpoint-120/adapter_config.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"auto_mapping": null,
|
| 3 |
+
"base_model_name_or_path": "meta-llama/Llama-2-7b-hf",
|
| 4 |
+
"bias": "none",
|
| 5 |
+
"fan_in_fan_out": false,
|
| 6 |
+
"inference_mode": true,
|
| 7 |
+
"init_lora_weights": true,
|
| 8 |
+
"layers_pattern": null,
|
| 9 |
+
"layers_to_transform": null,
|
| 10 |
+
"lora_alpha": 32,
|
| 11 |
+
"lora_dropout": 0.05,
|
| 12 |
+
"modules_to_save": null,
|
| 13 |
+
"peft_type": "LORA",
|
| 14 |
+
"r": 8,
|
| 15 |
+
"revision": null,
|
| 16 |
+
"target_modules": [
|
| 17 |
+
"q_proj",
|
| 18 |
+
"v_proj"
|
| 19 |
+
],
|
| 20 |
+
"task_type": "CAUSAL_LM"
|
| 21 |
+
}
|
checkpoint-120/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:62efd5c2cab302e216422bdba2e55c8799578254d233788eab87a5ab9d4e7aa6
|
| 3 |
+
size 16794200
|
checkpoint-120/optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5b492af0c9917e531befd23e336bf7606d5ab7c02fc99d746779bf82c4104a8d
|
| 3 |
+
size 33663866
|
checkpoint-120/rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:02fba8e88cedb0246f54d160518d95e35995cb5dc7cd1fa4e39f15fddf38af99
|
| 3 |
+
size 14244
|
checkpoint-120/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8d483870968c555f36109349b17935837f24c25f4c05d770d0123f84163b943e
|
| 3 |
+
size 1064
|
checkpoint-120/trainer_state.json
ADDED
|
@@ -0,0 +1,355 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_metric": 1.6305923461914062,
|
| 3 |
+
"best_model_checkpoint": "/scratch/kwamea/llama-output/checkpoint-120",
|
| 4 |
+
"epoch": 8.571428571428571,
|
| 5 |
+
"eval_steps": 5,
|
| 6 |
+
"global_step": 120,
|
| 7 |
+
"is_hyper_param_search": false,
|
| 8 |
+
"is_local_process_zero": true,
|
| 9 |
+
"is_world_process_zero": true,
|
| 10 |
+
"log_history": [
|
| 11 |
+
{
|
| 12 |
+
"epoch": 0.36,
|
| 13 |
+
"learning_rate": 9.642857142857143e-05,
|
| 14 |
+
"loss": 2.2959,
|
| 15 |
+
"step": 5
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"epoch": 0.36,
|
| 19 |
+
"eval_loss": 2.26009202003479,
|
| 20 |
+
"eval_runtime": 2.5228,
|
| 21 |
+
"eval_samples_per_second": 2.378,
|
| 22 |
+
"eval_steps_per_second": 0.396,
|
| 23 |
+
"step": 5
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"epoch": 0.71,
|
| 27 |
+
"learning_rate": 9.285714285714286e-05,
|
| 28 |
+
"loss": 2.2056,
|
| 29 |
+
"step": 10
|
| 30 |
+
},
|
| 31 |
+
{
|
| 32 |
+
"epoch": 0.71,
|
| 33 |
+
"eval_loss": 2.15522837638855,
|
| 34 |
+
"eval_runtime": 2.5338,
|
| 35 |
+
"eval_samples_per_second": 2.368,
|
| 36 |
+
"eval_steps_per_second": 0.395,
|
| 37 |
+
"step": 10
|
| 38 |
+
},
|
| 39 |
+
{
|
| 40 |
+
"epoch": 1.07,
|
| 41 |
+
"learning_rate": 8.92857142857143e-05,
|
| 42 |
+
"loss": 2.097,
|
| 43 |
+
"step": 15
|
| 44 |
+
},
|
| 45 |
+
{
|
| 46 |
+
"epoch": 1.07,
|
| 47 |
+
"eval_loss": 2.0667991638183594,
|
| 48 |
+
"eval_runtime": 2.5411,
|
| 49 |
+
"eval_samples_per_second": 2.361,
|
| 50 |
+
"eval_steps_per_second": 0.394,
|
| 51 |
+
"step": 15
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"epoch": 1.43,
|
| 55 |
+
"learning_rate": 8.571428571428571e-05,
|
| 56 |
+
"loss": 2.0293,
|
| 57 |
+
"step": 20
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"epoch": 1.43,
|
| 61 |
+
"eval_loss": 2.0328972339630127,
|
| 62 |
+
"eval_runtime": 2.5419,
|
| 63 |
+
"eval_samples_per_second": 2.36,
|
| 64 |
+
"eval_steps_per_second": 0.393,
|
| 65 |
+
"step": 20
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
"epoch": 1.79,
|
| 69 |
+
"learning_rate": 8.214285714285714e-05,
|
| 70 |
+
"loss": 2.0228,
|
| 71 |
+
"step": 25
|
| 72 |
+
},
|
| 73 |
+
{
|
| 74 |
+
"epoch": 1.79,
|
| 75 |
+
"eval_loss": 1.998112678527832,
|
| 76 |
+
"eval_runtime": 2.5416,
|
| 77 |
+
"eval_samples_per_second": 2.361,
|
| 78 |
+
"eval_steps_per_second": 0.393,
|
| 79 |
+
"step": 25
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"epoch": 2.14,
|
| 83 |
+
"learning_rate": 7.857142857142858e-05,
|
| 84 |
+
"loss": 1.9493,
|
| 85 |
+
"step": 30
|
| 86 |
+
},
|
| 87 |
+
{
|
| 88 |
+
"epoch": 2.14,
|
| 89 |
+
"eval_loss": 1.968154788017273,
|
| 90 |
+
"eval_runtime": 2.5414,
|
| 91 |
+
"eval_samples_per_second": 2.361,
|
| 92 |
+
"eval_steps_per_second": 0.393,
|
| 93 |
+
"step": 30
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"epoch": 2.5,
|
| 97 |
+
"learning_rate": 7.500000000000001e-05,
|
| 98 |
+
"loss": 1.9252,
|
| 99 |
+
"step": 35
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"epoch": 2.5,
|
| 103 |
+
"eval_loss": 1.937127947807312,
|
| 104 |
+
"eval_runtime": 2.5401,
|
| 105 |
+
"eval_samples_per_second": 2.362,
|
| 106 |
+
"eval_steps_per_second": 0.394,
|
| 107 |
+
"step": 35
|
| 108 |
+
},
|
| 109 |
+
{
|
| 110 |
+
"epoch": 2.86,
|
| 111 |
+
"learning_rate": 7.142857142857143e-05,
|
| 112 |
+
"loss": 1.8848,
|
| 113 |
+
"step": 40
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"epoch": 2.86,
|
| 117 |
+
"eval_loss": 1.9035807847976685,
|
| 118 |
+
"eval_runtime": 2.5391,
|
| 119 |
+
"eval_samples_per_second": 2.363,
|
| 120 |
+
"eval_steps_per_second": 0.394,
|
| 121 |
+
"step": 40
|
| 122 |
+
},
|
| 123 |
+
{
|
| 124 |
+
"epoch": 3.21,
|
| 125 |
+
"learning_rate": 6.785714285714286e-05,
|
| 126 |
+
"loss": 1.8708,
|
| 127 |
+
"step": 45
|
| 128 |
+
},
|
| 129 |
+
{
|
| 130 |
+
"epoch": 3.21,
|
| 131 |
+
"eval_loss": 1.8712326288223267,
|
| 132 |
+
"eval_runtime": 2.5413,
|
| 133 |
+
"eval_samples_per_second": 2.361,
|
| 134 |
+
"eval_steps_per_second": 0.394,
|
| 135 |
+
"step": 45
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"epoch": 3.57,
|
| 139 |
+
"learning_rate": 6.428571428571429e-05,
|
| 140 |
+
"loss": 1.795,
|
| 141 |
+
"step": 50
|
| 142 |
+
},
|
| 143 |
+
{
|
| 144 |
+
"epoch": 3.57,
|
| 145 |
+
"eval_loss": 1.8411849737167358,
|
| 146 |
+
"eval_runtime": 2.5425,
|
| 147 |
+
"eval_samples_per_second": 2.36,
|
| 148 |
+
"eval_steps_per_second": 0.393,
|
| 149 |
+
"step": 50
|
| 150 |
+
},
|
| 151 |
+
{
|
| 152 |
+
"epoch": 3.93,
|
| 153 |
+
"learning_rate": 6.0714285714285715e-05,
|
| 154 |
+
"loss": 1.7851,
|
| 155 |
+
"step": 55
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"epoch": 3.93,
|
| 159 |
+
"eval_loss": 1.8131023645401,
|
| 160 |
+
"eval_runtime": 2.5418,
|
| 161 |
+
"eval_samples_per_second": 2.36,
|
| 162 |
+
"eval_steps_per_second": 0.393,
|
| 163 |
+
"step": 55
|
| 164 |
+
},
|
| 165 |
+
{
|
| 166 |
+
"epoch": 4.29,
|
| 167 |
+
"learning_rate": 5.714285714285714e-05,
|
| 168 |
+
"loss": 1.737,
|
| 169 |
+
"step": 60
|
| 170 |
+
},
|
| 171 |
+
{
|
| 172 |
+
"epoch": 4.29,
|
| 173 |
+
"eval_loss": 1.7883529663085938,
|
| 174 |
+
"eval_runtime": 2.541,
|
| 175 |
+
"eval_samples_per_second": 2.361,
|
| 176 |
+
"eval_steps_per_second": 0.394,
|
| 177 |
+
"step": 60
|
| 178 |
+
},
|
| 179 |
+
{
|
| 180 |
+
"epoch": 4.64,
|
| 181 |
+
"learning_rate": 5.3571428571428575e-05,
|
| 182 |
+
"loss": 1.7509,
|
| 183 |
+
"step": 65
|
| 184 |
+
},
|
| 185 |
+
{
|
| 186 |
+
"epoch": 4.64,
|
| 187 |
+
"eval_loss": 1.7668665647506714,
|
| 188 |
+
"eval_runtime": 2.5422,
|
| 189 |
+
"eval_samples_per_second": 2.36,
|
| 190 |
+
"eval_steps_per_second": 0.393,
|
| 191 |
+
"step": 65
|
| 192 |
+
},
|
| 193 |
+
{
|
| 194 |
+
"epoch": 5.0,
|
| 195 |
+
"learning_rate": 5e-05,
|
| 196 |
+
"loss": 1.7,
|
| 197 |
+
"step": 70
|
| 198 |
+
},
|
| 199 |
+
{
|
| 200 |
+
"epoch": 5.0,
|
| 201 |
+
"eval_loss": 1.7516651153564453,
|
| 202 |
+
"eval_runtime": 2.5412,
|
| 203 |
+
"eval_samples_per_second": 2.361,
|
| 204 |
+
"eval_steps_per_second": 0.394,
|
| 205 |
+
"step": 70
|
| 206 |
+
},
|
| 207 |
+
{
|
| 208 |
+
"epoch": 5.36,
|
| 209 |
+
"learning_rate": 4.642857142857143e-05,
|
| 210 |
+
"loss": 1.6734,
|
| 211 |
+
"step": 75
|
| 212 |
+
},
|
| 213 |
+
{
|
| 214 |
+
"epoch": 5.36,
|
| 215 |
+
"eval_loss": 1.7334843873977661,
|
| 216 |
+
"eval_runtime": 2.5431,
|
| 217 |
+
"eval_samples_per_second": 2.359,
|
| 218 |
+
"eval_steps_per_second": 0.393,
|
| 219 |
+
"step": 75
|
| 220 |
+
},
|
| 221 |
+
{
|
| 222 |
+
"epoch": 5.71,
|
| 223 |
+
"learning_rate": 4.2857142857142856e-05,
|
| 224 |
+
"loss": 1.6719,
|
| 225 |
+
"step": 80
|
| 226 |
+
},
|
| 227 |
+
{
|
| 228 |
+
"epoch": 5.71,
|
| 229 |
+
"eval_loss": 1.7061794996261597,
|
| 230 |
+
"eval_runtime": 2.5412,
|
| 231 |
+
"eval_samples_per_second": 2.361,
|
| 232 |
+
"eval_steps_per_second": 0.394,
|
| 233 |
+
"step": 80
|
| 234 |
+
},
|
| 235 |
+
{
|
| 236 |
+
"epoch": 6.07,
|
| 237 |
+
"learning_rate": 3.928571428571429e-05,
|
| 238 |
+
"loss": 1.5932,
|
| 239 |
+
"step": 85
|
| 240 |
+
},
|
| 241 |
+
{
|
| 242 |
+
"epoch": 6.07,
|
| 243 |
+
"eval_loss": 1.6727076768875122,
|
| 244 |
+
"eval_runtime": 2.5367,
|
| 245 |
+
"eval_samples_per_second": 2.365,
|
| 246 |
+
"eval_steps_per_second": 0.394,
|
| 247 |
+
"step": 85
|
| 248 |
+
},
|
| 249 |
+
{
|
| 250 |
+
"epoch": 6.43,
|
| 251 |
+
"learning_rate": 3.571428571428572e-05,
|
| 252 |
+
"loss": 1.5866,
|
| 253 |
+
"step": 90
|
| 254 |
+
},
|
| 255 |
+
{
|
| 256 |
+
"epoch": 6.43,
|
| 257 |
+
"eval_loss": 1.66474187374115,
|
| 258 |
+
"eval_runtime": 2.5383,
|
| 259 |
+
"eval_samples_per_second": 2.364,
|
| 260 |
+
"eval_steps_per_second": 0.394,
|
| 261 |
+
"step": 90
|
| 262 |
+
},
|
| 263 |
+
{
|
| 264 |
+
"epoch": 6.79,
|
| 265 |
+
"learning_rate": 3.2142857142857144e-05,
|
| 266 |
+
"loss": 1.577,
|
| 267 |
+
"step": 95
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"epoch": 6.79,
|
| 271 |
+
"eval_loss": 1.6590815782546997,
|
| 272 |
+
"eval_runtime": 2.5384,
|
| 273 |
+
"eval_samples_per_second": 2.364,
|
| 274 |
+
"eval_steps_per_second": 0.394,
|
| 275 |
+
"step": 95
|
| 276 |
+
},
|
| 277 |
+
{
|
| 278 |
+
"epoch": 7.14,
|
| 279 |
+
"learning_rate": 2.857142857142857e-05,
|
| 280 |
+
"loss": 1.5532,
|
| 281 |
+
"step": 100
|
| 282 |
+
},
|
| 283 |
+
{
|
| 284 |
+
"epoch": 7.14,
|
| 285 |
+
"eval_loss": 1.6509045362472534,
|
| 286 |
+
"eval_runtime": 2.5429,
|
| 287 |
+
"eval_samples_per_second": 2.359,
|
| 288 |
+
"eval_steps_per_second": 0.393,
|
| 289 |
+
"step": 100
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"epoch": 7.5,
|
| 293 |
+
"learning_rate": 2.5e-05,
|
| 294 |
+
"loss": 1.5099,
|
| 295 |
+
"step": 105
|
| 296 |
+
},
|
| 297 |
+
{
|
| 298 |
+
"epoch": 7.5,
|
| 299 |
+
"eval_loss": 1.6463295221328735,
|
| 300 |
+
"eval_runtime": 2.5379,
|
| 301 |
+
"eval_samples_per_second": 2.364,
|
| 302 |
+
"eval_steps_per_second": 0.394,
|
| 303 |
+
"step": 105
|
| 304 |
+
},
|
| 305 |
+
{
|
| 306 |
+
"epoch": 7.86,
|
| 307 |
+
"learning_rate": 2.1428571428571428e-05,
|
| 308 |
+
"loss": 1.5717,
|
| 309 |
+
"step": 110
|
| 310 |
+
},
|
| 311 |
+
{
|
| 312 |
+
"epoch": 7.86,
|
| 313 |
+
"eval_loss": 1.6409095525741577,
|
| 314 |
+
"eval_runtime": 2.5359,
|
| 315 |
+
"eval_samples_per_second": 2.366,
|
| 316 |
+
"eval_steps_per_second": 0.394,
|
| 317 |
+
"step": 110
|
| 318 |
+
},
|
| 319 |
+
{
|
| 320 |
+
"epoch": 8.21,
|
| 321 |
+
"learning_rate": 1.785714285714286e-05,
|
| 322 |
+
"loss": 1.5354,
|
| 323 |
+
"step": 115
|
| 324 |
+
},
|
| 325 |
+
{
|
| 326 |
+
"epoch": 8.21,
|
| 327 |
+
"eval_loss": 1.6344412565231323,
|
| 328 |
+
"eval_runtime": 2.5372,
|
| 329 |
+
"eval_samples_per_second": 2.365,
|
| 330 |
+
"eval_steps_per_second": 0.394,
|
| 331 |
+
"step": 115
|
| 332 |
+
},
|
| 333 |
+
{
|
| 334 |
+
"epoch": 8.57,
|
| 335 |
+
"learning_rate": 1.4285714285714285e-05,
|
| 336 |
+
"loss": 1.5127,
|
| 337 |
+
"step": 120
|
| 338 |
+
},
|
| 339 |
+
{
|
| 340 |
+
"epoch": 8.57,
|
| 341 |
+
"eval_loss": 1.6305923461914062,
|
| 342 |
+
"eval_runtime": 2.537,
|
| 343 |
+
"eval_samples_per_second": 2.365,
|
| 344 |
+
"eval_steps_per_second": 0.394,
|
| 345 |
+
"step": 120
|
| 346 |
+
}
|
| 347 |
+
],
|
| 348 |
+
"logging_steps": 5,
|
| 349 |
+
"max_steps": 140,
|
| 350 |
+
"num_train_epochs": 10,
|
| 351 |
+
"save_steps": 10,
|
| 352 |
+
"total_flos": 3.89964374212608e+16,
|
| 353 |
+
"trial_name": null,
|
| 354 |
+
"trial_params": null
|
| 355 |
+
}
|
checkpoint-120/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:584bdb07d1eef7bc6a4f12879793b3487ecc8146c1335851f27ac0cac2080d65
|
| 3 |
+
size 4600
|
checkpoint-130/README.md
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: peft
|
| 3 |
+
---
|
| 4 |
+
## Training procedure
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 8 |
+
- quant_method: bitsandbytes
|
| 9 |
+
- load_in_8bit: True
|
| 10 |
+
- load_in_4bit: False
|
| 11 |
+
- llm_int8_threshold: 6.0
|
| 12 |
+
- llm_int8_skip_modules: None
|
| 13 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 14 |
+
- llm_int8_has_fp16_weight: False
|
| 15 |
+
- bnb_4bit_quant_type: fp4
|
| 16 |
+
- bnb_4bit_use_double_quant: False
|
| 17 |
+
- bnb_4bit_compute_dtype: float32
|
| 18 |
+
### Framework versions
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
- PEFT 0.5.0
|
checkpoint-130/adapter_config.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"auto_mapping": null,
|
| 3 |
+
"base_model_name_or_path": "meta-llama/Llama-2-7b-hf",
|
| 4 |
+
"bias": "none",
|
| 5 |
+
"fan_in_fan_out": false,
|
| 6 |
+
"inference_mode": true,
|
| 7 |
+
"init_lora_weights": true,
|
| 8 |
+
"layers_pattern": null,
|
| 9 |
+
"layers_to_transform": null,
|
| 10 |
+
"lora_alpha": 32,
|
| 11 |
+
"lora_dropout": 0.05,
|
| 12 |
+
"modules_to_save": null,
|
| 13 |
+
"peft_type": "LORA",
|
| 14 |
+
"r": 8,
|
| 15 |
+
"revision": null,
|
| 16 |
+
"target_modules": [
|
| 17 |
+
"q_proj",
|
| 18 |
+
"v_proj"
|
| 19 |
+
],
|
| 20 |
+
"task_type": "CAUSAL_LM"
|
| 21 |
+
}
|
checkpoint-130/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0bf971da943c24cf2309c9781a35c85b99251124517b3700a11fad54a7449bdf
|
| 3 |
+
size 16794200
|
checkpoint-130/optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:85432cbb3394c47f731a428717c461cd6c0b05fcbdb32de6ebcc7368f8892fe5
|
| 3 |
+
size 33663866
|
checkpoint-130/rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:685328dbc242eb3fbc87935b960d8b4f81528ca5a1536e2981f75930a9a32737
|
| 3 |
+
size 14244
|
checkpoint-130/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:17c5fa4f32b4713c8e2d7526e153039e69cef4d5ae6474766f511c2326d33908
|
| 3 |
+
size 1064
|
checkpoint-130/trainer_state.json
ADDED
|
@@ -0,0 +1,383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_metric": 1.6272777318954468,
|
| 3 |
+
"best_model_checkpoint": "/scratch/kwamea/llama-output/checkpoint-130",
|
| 4 |
+
"epoch": 9.285714285714286,
|
| 5 |
+
"eval_steps": 5,
|
| 6 |
+
"global_step": 130,
|
| 7 |
+
"is_hyper_param_search": false,
|
| 8 |
+
"is_local_process_zero": true,
|
| 9 |
+
"is_world_process_zero": true,
|
| 10 |
+
"log_history": [
|
| 11 |
+
{
|
| 12 |
+
"epoch": 0.36,
|
| 13 |
+
"learning_rate": 9.642857142857143e-05,
|
| 14 |
+
"loss": 2.2959,
|
| 15 |
+
"step": 5
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"epoch": 0.36,
|
| 19 |
+
"eval_loss": 2.26009202003479,
|
| 20 |
+
"eval_runtime": 2.5228,
|
| 21 |
+
"eval_samples_per_second": 2.378,
|
| 22 |
+
"eval_steps_per_second": 0.396,
|
| 23 |
+
"step": 5
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"epoch": 0.71,
|
| 27 |
+
"learning_rate": 9.285714285714286e-05,
|
| 28 |
+
"loss": 2.2056,
|
| 29 |
+
"step": 10
|
| 30 |
+
},
|
| 31 |
+
{
|
| 32 |
+
"epoch": 0.71,
|
| 33 |
+
"eval_loss": 2.15522837638855,
|
| 34 |
+
"eval_runtime": 2.5338,
|
| 35 |
+
"eval_samples_per_second": 2.368,
|
| 36 |
+
"eval_steps_per_second": 0.395,
|
| 37 |
+
"step": 10
|
| 38 |
+
},
|
| 39 |
+
{
|
| 40 |
+
"epoch": 1.07,
|
| 41 |
+
"learning_rate": 8.92857142857143e-05,
|
| 42 |
+
"loss": 2.097,
|
| 43 |
+
"step": 15
|
| 44 |
+
},
|
| 45 |
+
{
|
| 46 |
+
"epoch": 1.07,
|
| 47 |
+
"eval_loss": 2.0667991638183594,
|
| 48 |
+
"eval_runtime": 2.5411,
|
| 49 |
+
"eval_samples_per_second": 2.361,
|
| 50 |
+
"eval_steps_per_second": 0.394,
|
| 51 |
+
"step": 15
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"epoch": 1.43,
|
| 55 |
+
"learning_rate": 8.571428571428571e-05,
|
| 56 |
+
"loss": 2.0293,
|
| 57 |
+
"step": 20
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"epoch": 1.43,
|
| 61 |
+
"eval_loss": 2.0328972339630127,
|
| 62 |
+
"eval_runtime": 2.5419,
|
| 63 |
+
"eval_samples_per_second": 2.36,
|
| 64 |
+
"eval_steps_per_second": 0.393,
|
| 65 |
+
"step": 20
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
"epoch": 1.79,
|
| 69 |
+
"learning_rate": 8.214285714285714e-05,
|
| 70 |
+
"loss": 2.0228,
|
| 71 |
+
"step": 25
|
| 72 |
+
},
|
| 73 |
+
{
|
| 74 |
+
"epoch": 1.79,
|
| 75 |
+
"eval_loss": 1.998112678527832,
|
| 76 |
+
"eval_runtime": 2.5416,
|
| 77 |
+
"eval_samples_per_second": 2.361,
|
| 78 |
+
"eval_steps_per_second": 0.393,
|
| 79 |
+
"step": 25
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"epoch": 2.14,
|
| 83 |
+
"learning_rate": 7.857142857142858e-05,
|
| 84 |
+
"loss": 1.9493,
|
| 85 |
+
"step": 30
|
| 86 |
+
},
|
| 87 |
+
{
|
| 88 |
+
"epoch": 2.14,
|
| 89 |
+
"eval_loss": 1.968154788017273,
|
| 90 |
+
"eval_runtime": 2.5414,
|
| 91 |
+
"eval_samples_per_second": 2.361,
|
| 92 |
+
"eval_steps_per_second": 0.393,
|
| 93 |
+
"step": 30
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"epoch": 2.5,
|
| 97 |
+
"learning_rate": 7.500000000000001e-05,
|
| 98 |
+
"loss": 1.9252,
|
| 99 |
+
"step": 35
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"epoch": 2.5,
|
| 103 |
+
"eval_loss": 1.937127947807312,
|
| 104 |
+
"eval_runtime": 2.5401,
|
| 105 |
+
"eval_samples_per_second": 2.362,
|
| 106 |
+
"eval_steps_per_second": 0.394,
|
| 107 |
+
"step": 35
|
| 108 |
+
},
|
| 109 |
+
{
|
| 110 |
+
"epoch": 2.86,
|
| 111 |
+
"learning_rate": 7.142857142857143e-05,
|
| 112 |
+
"loss": 1.8848,
|
| 113 |
+
"step": 40
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"epoch": 2.86,
|
| 117 |
+
"eval_loss": 1.9035807847976685,
|
| 118 |
+
"eval_runtime": 2.5391,
|
| 119 |
+
"eval_samples_per_second": 2.363,
|
| 120 |
+
"eval_steps_per_second": 0.394,
|
| 121 |
+
"step": 40
|
| 122 |
+
},
|
| 123 |
+
{
|
| 124 |
+
"epoch": 3.21,
|
| 125 |
+
"learning_rate": 6.785714285714286e-05,
|
| 126 |
+
"loss": 1.8708,
|
| 127 |
+
"step": 45
|
| 128 |
+
},
|
| 129 |
+
{
|
| 130 |
+
"epoch": 3.21,
|
| 131 |
+
"eval_loss": 1.8712326288223267,
|
| 132 |
+
"eval_runtime": 2.5413,
|
| 133 |
+
"eval_samples_per_second": 2.361,
|
| 134 |
+
"eval_steps_per_second": 0.394,
|
| 135 |
+
"step": 45
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"epoch": 3.57,
|
| 139 |
+
"learning_rate": 6.428571428571429e-05,
|
| 140 |
+
"loss": 1.795,
|
| 141 |
+
"step": 50
|
| 142 |
+
},
|
| 143 |
+
{
|
| 144 |
+
"epoch": 3.57,
|
| 145 |
+
"eval_loss": 1.8411849737167358,
|
| 146 |
+
"eval_runtime": 2.5425,
|
| 147 |
+
"eval_samples_per_second": 2.36,
|
| 148 |
+
"eval_steps_per_second": 0.393,
|
| 149 |
+
"step": 50
|
| 150 |
+
},
|
| 151 |
+
{
|
| 152 |
+
"epoch": 3.93,
|
| 153 |
+
"learning_rate": 6.0714285714285715e-05,
|
| 154 |
+
"loss": 1.7851,
|
| 155 |
+
"step": 55
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"epoch": 3.93,
|
| 159 |
+
"eval_loss": 1.8131023645401,
|
| 160 |
+
"eval_runtime": 2.5418,
|
| 161 |
+
"eval_samples_per_second": 2.36,
|
| 162 |
+
"eval_steps_per_second": 0.393,
|
| 163 |
+
"step": 55
|
| 164 |
+
},
|
| 165 |
+
{
|
| 166 |
+
"epoch": 4.29,
|
| 167 |
+
"learning_rate": 5.714285714285714e-05,
|
| 168 |
+
"loss": 1.737,
|
| 169 |
+
"step": 60
|
| 170 |
+
},
|
| 171 |
+
{
|
| 172 |
+
"epoch": 4.29,
|
| 173 |
+
"eval_loss": 1.7883529663085938,
|
| 174 |
+
"eval_runtime": 2.541,
|
| 175 |
+
"eval_samples_per_second": 2.361,
|
| 176 |
+
"eval_steps_per_second": 0.394,
|
| 177 |
+
"step": 60
|
| 178 |
+
},
|
| 179 |
+
{
|
| 180 |
+
"epoch": 4.64,
|
| 181 |
+
"learning_rate": 5.3571428571428575e-05,
|
| 182 |
+
"loss": 1.7509,
|
| 183 |
+
"step": 65
|
| 184 |
+
},
|
| 185 |
+
{
|
| 186 |
+
"epoch": 4.64,
|
| 187 |
+
"eval_loss": 1.7668665647506714,
|
| 188 |
+
"eval_runtime": 2.5422,
|
| 189 |
+
"eval_samples_per_second": 2.36,
|
| 190 |
+
"eval_steps_per_second": 0.393,
|
| 191 |
+
"step": 65
|
| 192 |
+
},
|
| 193 |
+
{
|
| 194 |
+
"epoch": 5.0,
|
| 195 |
+
"learning_rate": 5e-05,
|
| 196 |
+
"loss": 1.7,
|
| 197 |
+
"step": 70
|
| 198 |
+
},
|
| 199 |
+
{
|
| 200 |
+
"epoch": 5.0,
|
| 201 |
+
"eval_loss": 1.7516651153564453,
|
| 202 |
+
"eval_runtime": 2.5412,
|
| 203 |
+
"eval_samples_per_second": 2.361,
|
| 204 |
+
"eval_steps_per_second": 0.394,
|
| 205 |
+
"step": 70
|
| 206 |
+
},
|
| 207 |
+
{
|
| 208 |
+
"epoch": 5.36,
|
| 209 |
+
"learning_rate": 4.642857142857143e-05,
|
| 210 |
+
"loss": 1.6734,
|
| 211 |
+
"step": 75
|
| 212 |
+
},
|
| 213 |
+
{
|
| 214 |
+
"epoch": 5.36,
|
| 215 |
+
"eval_loss": 1.7334843873977661,
|
| 216 |
+
"eval_runtime": 2.5431,
|
| 217 |
+
"eval_samples_per_second": 2.359,
|
| 218 |
+
"eval_steps_per_second": 0.393,
|
| 219 |
+
"step": 75
|
| 220 |
+
},
|
| 221 |
+
{
|
| 222 |
+
"epoch": 5.71,
|
| 223 |
+
"learning_rate": 4.2857142857142856e-05,
|
| 224 |
+
"loss": 1.6719,
|
| 225 |
+
"step": 80
|
| 226 |
+
},
|
| 227 |
+
{
|
| 228 |
+
"epoch": 5.71,
|
| 229 |
+
"eval_loss": 1.7061794996261597,
|
| 230 |
+
"eval_runtime": 2.5412,
|
| 231 |
+
"eval_samples_per_second": 2.361,
|
| 232 |
+
"eval_steps_per_second": 0.394,
|
| 233 |
+
"step": 80
|
| 234 |
+
},
|
| 235 |
+
{
|
| 236 |
+
"epoch": 6.07,
|
| 237 |
+
"learning_rate": 3.928571428571429e-05,
|
| 238 |
+
"loss": 1.5932,
|
| 239 |
+
"step": 85
|
| 240 |
+
},
|
| 241 |
+
{
|
| 242 |
+
"epoch": 6.07,
|
| 243 |
+
"eval_loss": 1.6727076768875122,
|
| 244 |
+
"eval_runtime": 2.5367,
|
| 245 |
+
"eval_samples_per_second": 2.365,
|
| 246 |
+
"eval_steps_per_second": 0.394,
|
| 247 |
+
"step": 85
|
| 248 |
+
},
|
| 249 |
+
{
|
| 250 |
+
"epoch": 6.43,
|
| 251 |
+
"learning_rate": 3.571428571428572e-05,
|
| 252 |
+
"loss": 1.5866,
|
| 253 |
+
"step": 90
|
| 254 |
+
},
|
| 255 |
+
{
|
| 256 |
+
"epoch": 6.43,
|
| 257 |
+
"eval_loss": 1.66474187374115,
|
| 258 |
+
"eval_runtime": 2.5383,
|
| 259 |
+
"eval_samples_per_second": 2.364,
|
| 260 |
+
"eval_steps_per_second": 0.394,
|
| 261 |
+
"step": 90
|
| 262 |
+
},
|
| 263 |
+
{
|
| 264 |
+
"epoch": 6.79,
|
| 265 |
+
"learning_rate": 3.2142857142857144e-05,
|
| 266 |
+
"loss": 1.577,
|
| 267 |
+
"step": 95
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"epoch": 6.79,
|
| 271 |
+
"eval_loss": 1.6590815782546997,
|
| 272 |
+
"eval_runtime": 2.5384,
|
| 273 |
+
"eval_samples_per_second": 2.364,
|
| 274 |
+
"eval_steps_per_second": 0.394,
|
| 275 |
+
"step": 95
|
| 276 |
+
},
|
| 277 |
+
{
|
| 278 |
+
"epoch": 7.14,
|
| 279 |
+
"learning_rate": 2.857142857142857e-05,
|
| 280 |
+
"loss": 1.5532,
|
| 281 |
+
"step": 100
|
| 282 |
+
},
|
| 283 |
+
{
|
| 284 |
+
"epoch": 7.14,
|
| 285 |
+
"eval_loss": 1.6509045362472534,
|
| 286 |
+
"eval_runtime": 2.5429,
|
| 287 |
+
"eval_samples_per_second": 2.359,
|
| 288 |
+
"eval_steps_per_second": 0.393,
|
| 289 |
+
"step": 100
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"epoch": 7.5,
|
| 293 |
+
"learning_rate": 2.5e-05,
|
| 294 |
+
"loss": 1.5099,
|
| 295 |
+
"step": 105
|
| 296 |
+
},
|
| 297 |
+
{
|
| 298 |
+
"epoch": 7.5,
|
| 299 |
+
"eval_loss": 1.6463295221328735,
|
| 300 |
+
"eval_runtime": 2.5379,
|
| 301 |
+
"eval_samples_per_second": 2.364,
|
| 302 |
+
"eval_steps_per_second": 0.394,
|
| 303 |
+
"step": 105
|
| 304 |
+
},
|
| 305 |
+
{
|
| 306 |
+
"epoch": 7.86,
|
| 307 |
+
"learning_rate": 2.1428571428571428e-05,
|
| 308 |
+
"loss": 1.5717,
|
| 309 |
+
"step": 110
|
| 310 |
+
},
|
| 311 |
+
{
|
| 312 |
+
"epoch": 7.86,
|
| 313 |
+
"eval_loss": 1.6409095525741577,
|
| 314 |
+
"eval_runtime": 2.5359,
|
| 315 |
+
"eval_samples_per_second": 2.366,
|
| 316 |
+
"eval_steps_per_second": 0.394,
|
| 317 |
+
"step": 110
|
| 318 |
+
},
|
| 319 |
+
{
|
| 320 |
+
"epoch": 8.21,
|
| 321 |
+
"learning_rate": 1.785714285714286e-05,
|
| 322 |
+
"loss": 1.5354,
|
| 323 |
+
"step": 115
|
| 324 |
+
},
|
| 325 |
+
{
|
| 326 |
+
"epoch": 8.21,
|
| 327 |
+
"eval_loss": 1.6344412565231323,
|
| 328 |
+
"eval_runtime": 2.5372,
|
| 329 |
+
"eval_samples_per_second": 2.365,
|
| 330 |
+
"eval_steps_per_second": 0.394,
|
| 331 |
+
"step": 115
|
| 332 |
+
},
|
| 333 |
+
{
|
| 334 |
+
"epoch": 8.57,
|
| 335 |
+
"learning_rate": 1.4285714285714285e-05,
|
| 336 |
+
"loss": 1.5127,
|
| 337 |
+
"step": 120
|
| 338 |
+
},
|
| 339 |
+
{
|
| 340 |
+
"epoch": 8.57,
|
| 341 |
+
"eval_loss": 1.6305923461914062,
|
| 342 |
+
"eval_runtime": 2.537,
|
| 343 |
+
"eval_samples_per_second": 2.365,
|
| 344 |
+
"eval_steps_per_second": 0.394,
|
| 345 |
+
"step": 120
|
| 346 |
+
},
|
| 347 |
+
{
|
| 348 |
+
"epoch": 8.93,
|
| 349 |
+
"learning_rate": 1.0714285714285714e-05,
|
| 350 |
+
"loss": 1.5234,
|
| 351 |
+
"step": 125
|
| 352 |
+
},
|
| 353 |
+
{
|
| 354 |
+
"epoch": 8.93,
|
| 355 |
+
"eval_loss": 1.6271618604660034,
|
| 356 |
+
"eval_runtime": 2.5403,
|
| 357 |
+
"eval_samples_per_second": 2.362,
|
| 358 |
+
"eval_steps_per_second": 0.394,
|
| 359 |
+
"step": 125
|
| 360 |
+
},
|
| 361 |
+
{
|
| 362 |
+
"epoch": 9.29,
|
| 363 |
+
"learning_rate": 7.142857142857143e-06,
|
| 364 |
+
"loss": 1.4811,
|
| 365 |
+
"step": 130
|
| 366 |
+
},
|
| 367 |
+
{
|
| 368 |
+
"epoch": 9.29,
|
| 369 |
+
"eval_loss": 1.6272777318954468,
|
| 370 |
+
"eval_runtime": 2.5362,
|
| 371 |
+
"eval_samples_per_second": 2.366,
|
| 372 |
+
"eval_steps_per_second": 0.394,
|
| 373 |
+
"step": 130
|
| 374 |
+
}
|
| 375 |
+
],
|
| 376 |
+
"logging_steps": 5,
|
| 377 |
+
"max_steps": 140,
|
| 378 |
+
"num_train_epochs": 10,
|
| 379 |
+
"save_steps": 10,
|
| 380 |
+
"total_flos": 4.22461405396992e+16,
|
| 381 |
+
"trial_name": null,
|
| 382 |
+
"trial_params": null
|
| 383 |
+
}
|
checkpoint-130/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:584bdb07d1eef7bc6a4f12879793b3487ecc8146c1335851f27ac0cac2080d65
|
| 3 |
+
size 4600
|
checkpoint-140/README.md
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: peft
|
| 3 |
+
---
|
| 4 |
+
## Training procedure
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 8 |
+
- quant_method: bitsandbytes
|
| 9 |
+
- load_in_8bit: True
|
| 10 |
+
- load_in_4bit: False
|
| 11 |
+
- llm_int8_threshold: 6.0
|
| 12 |
+
- llm_int8_skip_modules: None
|
| 13 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 14 |
+
- llm_int8_has_fp16_weight: False
|
| 15 |
+
- bnb_4bit_quant_type: fp4
|
| 16 |
+
- bnb_4bit_use_double_quant: False
|
| 17 |
+
- bnb_4bit_compute_dtype: float32
|
| 18 |
+
### Framework versions
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
- PEFT 0.5.0
|
checkpoint-140/adapter_config.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"auto_mapping": null,
|
| 3 |
+
"base_model_name_or_path": "meta-llama/Llama-2-7b-hf",
|
| 4 |
+
"bias": "none",
|
| 5 |
+
"fan_in_fan_out": false,
|
| 6 |
+
"inference_mode": true,
|
| 7 |
+
"init_lora_weights": true,
|
| 8 |
+
"layers_pattern": null,
|
| 9 |
+
"layers_to_transform": null,
|
| 10 |
+
"lora_alpha": 32,
|
| 11 |
+
"lora_dropout": 0.05,
|
| 12 |
+
"modules_to_save": null,
|
| 13 |
+
"peft_type": "LORA",
|
| 14 |
+
"r": 8,
|
| 15 |
+
"revision": null,
|
| 16 |
+
"target_modules": [
|
| 17 |
+
"q_proj",
|
| 18 |
+
"v_proj"
|
| 19 |
+
],
|
| 20 |
+
"task_type": "CAUSAL_LM"
|
| 21 |
+
}
|
checkpoint-140/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:deacdec1e19e3dc341f8b2842de0a46ea150ef194e7c21c513d50e197684b64b
|
| 3 |
+
size 16794200
|
checkpoint-140/optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ee1e452a5a0ffd2da36e28a66280aed0ac73a5d69bf887705cedcede4fe70297
|
| 3 |
+
size 33663866
|
checkpoint-140/rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8bf7d2b3876e2e10b18c6cac5ddf00968ca6923be08b3bd84e271aa516fea7de
|
| 3 |
+
size 14244
|
checkpoint-140/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f8c98fcf42f494fa75f135fb338a20acefff4d78f4f44abc68b10fde88458a02
|
| 3 |
+
size 1064
|
checkpoint-140/trainer_state.json
ADDED
|
@@ -0,0 +1,411 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_metric": 1.623923897743225,
|
| 3 |
+
"best_model_checkpoint": "/scratch/kwamea/llama-output/checkpoint-140",
|
| 4 |
+
"epoch": 10.0,
|
| 5 |
+
"eval_steps": 5,
|
| 6 |
+
"global_step": 140,
|
| 7 |
+
"is_hyper_param_search": false,
|
| 8 |
+
"is_local_process_zero": true,
|
| 9 |
+
"is_world_process_zero": true,
|
| 10 |
+
"log_history": [
|
| 11 |
+
{
|
| 12 |
+
"epoch": 0.36,
|
| 13 |
+
"learning_rate": 9.642857142857143e-05,
|
| 14 |
+
"loss": 2.2959,
|
| 15 |
+
"step": 5
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"epoch": 0.36,
|
| 19 |
+
"eval_loss": 2.26009202003479,
|
| 20 |
+
"eval_runtime": 2.5228,
|
| 21 |
+
"eval_samples_per_second": 2.378,
|
| 22 |
+
"eval_steps_per_second": 0.396,
|
| 23 |
+
"step": 5
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"epoch": 0.71,
|
| 27 |
+
"learning_rate": 9.285714285714286e-05,
|
| 28 |
+
"loss": 2.2056,
|
| 29 |
+
"step": 10
|
| 30 |
+
},
|
| 31 |
+
{
|
| 32 |
+
"epoch": 0.71,
|
| 33 |
+
"eval_loss": 2.15522837638855,
|
| 34 |
+
"eval_runtime": 2.5338,
|
| 35 |
+
"eval_samples_per_second": 2.368,
|
| 36 |
+
"eval_steps_per_second": 0.395,
|
| 37 |
+
"step": 10
|
| 38 |
+
},
|
| 39 |
+
{
|
| 40 |
+
"epoch": 1.07,
|
| 41 |
+
"learning_rate": 8.92857142857143e-05,
|
| 42 |
+
"loss": 2.097,
|
| 43 |
+
"step": 15
|
| 44 |
+
},
|
| 45 |
+
{
|
| 46 |
+
"epoch": 1.07,
|
| 47 |
+
"eval_loss": 2.0667991638183594,
|
| 48 |
+
"eval_runtime": 2.5411,
|
| 49 |
+
"eval_samples_per_second": 2.361,
|
| 50 |
+
"eval_steps_per_second": 0.394,
|
| 51 |
+
"step": 15
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"epoch": 1.43,
|
| 55 |
+
"learning_rate": 8.571428571428571e-05,
|
| 56 |
+
"loss": 2.0293,
|
| 57 |
+
"step": 20
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"epoch": 1.43,
|
| 61 |
+
"eval_loss": 2.0328972339630127,
|
| 62 |
+
"eval_runtime": 2.5419,
|
| 63 |
+
"eval_samples_per_second": 2.36,
|
| 64 |
+
"eval_steps_per_second": 0.393,
|
| 65 |
+
"step": 20
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
"epoch": 1.79,
|
| 69 |
+
"learning_rate": 8.214285714285714e-05,
|
| 70 |
+
"loss": 2.0228,
|
| 71 |
+
"step": 25
|
| 72 |
+
},
|
| 73 |
+
{
|
| 74 |
+
"epoch": 1.79,
|
| 75 |
+
"eval_loss": 1.998112678527832,
|
| 76 |
+
"eval_runtime": 2.5416,
|
| 77 |
+
"eval_samples_per_second": 2.361,
|
| 78 |
+
"eval_steps_per_second": 0.393,
|
| 79 |
+
"step": 25
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"epoch": 2.14,
|
| 83 |
+
"learning_rate": 7.857142857142858e-05,
|
| 84 |
+
"loss": 1.9493,
|
| 85 |
+
"step": 30
|
| 86 |
+
},
|
| 87 |
+
{
|
| 88 |
+
"epoch": 2.14,
|
| 89 |
+
"eval_loss": 1.968154788017273,
|
| 90 |
+
"eval_runtime": 2.5414,
|
| 91 |
+
"eval_samples_per_second": 2.361,
|
| 92 |
+
"eval_steps_per_second": 0.393,
|
| 93 |
+
"step": 30
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"epoch": 2.5,
|
| 97 |
+
"learning_rate": 7.500000000000001e-05,
|
| 98 |
+
"loss": 1.9252,
|
| 99 |
+
"step": 35
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"epoch": 2.5,
|
| 103 |
+
"eval_loss": 1.937127947807312,
|
| 104 |
+
"eval_runtime": 2.5401,
|
| 105 |
+
"eval_samples_per_second": 2.362,
|
| 106 |
+
"eval_steps_per_second": 0.394,
|
| 107 |
+
"step": 35
|
| 108 |
+
},
|
| 109 |
+
{
|
| 110 |
+
"epoch": 2.86,
|
| 111 |
+
"learning_rate": 7.142857142857143e-05,
|
| 112 |
+
"loss": 1.8848,
|
| 113 |
+
"step": 40
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"epoch": 2.86,
|
| 117 |
+
"eval_loss": 1.9035807847976685,
|
| 118 |
+
"eval_runtime": 2.5391,
|
| 119 |
+
"eval_samples_per_second": 2.363,
|
| 120 |
+
"eval_steps_per_second": 0.394,
|
| 121 |
+
"step": 40
|
| 122 |
+
},
|
| 123 |
+
{
|
| 124 |
+
"epoch": 3.21,
|
| 125 |
+
"learning_rate": 6.785714285714286e-05,
|
| 126 |
+
"loss": 1.8708,
|
| 127 |
+
"step": 45
|
| 128 |
+
},
|
| 129 |
+
{
|
| 130 |
+
"epoch": 3.21,
|
| 131 |
+
"eval_loss": 1.8712326288223267,
|
| 132 |
+
"eval_runtime": 2.5413,
|
| 133 |
+
"eval_samples_per_second": 2.361,
|
| 134 |
+
"eval_steps_per_second": 0.394,
|
| 135 |
+
"step": 45
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"epoch": 3.57,
|
| 139 |
+
"learning_rate": 6.428571428571429e-05,
|
| 140 |
+
"loss": 1.795,
|
| 141 |
+
"step": 50
|
| 142 |
+
},
|
| 143 |
+
{
|
| 144 |
+
"epoch": 3.57,
|
| 145 |
+
"eval_loss": 1.8411849737167358,
|
| 146 |
+
"eval_runtime": 2.5425,
|
| 147 |
+
"eval_samples_per_second": 2.36,
|
| 148 |
+
"eval_steps_per_second": 0.393,
|
| 149 |
+
"step": 50
|
| 150 |
+
},
|
| 151 |
+
{
|
| 152 |
+
"epoch": 3.93,
|
| 153 |
+
"learning_rate": 6.0714285714285715e-05,
|
| 154 |
+
"loss": 1.7851,
|
| 155 |
+
"step": 55
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"epoch": 3.93,
|
| 159 |
+
"eval_loss": 1.8131023645401,
|
| 160 |
+
"eval_runtime": 2.5418,
|
| 161 |
+
"eval_samples_per_second": 2.36,
|
| 162 |
+
"eval_steps_per_second": 0.393,
|
| 163 |
+
"step": 55
|
| 164 |
+
},
|
| 165 |
+
{
|
| 166 |
+
"epoch": 4.29,
|
| 167 |
+
"learning_rate": 5.714285714285714e-05,
|
| 168 |
+
"loss": 1.737,
|
| 169 |
+
"step": 60
|
| 170 |
+
},
|
| 171 |
+
{
|
| 172 |
+
"epoch": 4.29,
|
| 173 |
+
"eval_loss": 1.7883529663085938,
|
| 174 |
+
"eval_runtime": 2.541,
|
| 175 |
+
"eval_samples_per_second": 2.361,
|
| 176 |
+
"eval_steps_per_second": 0.394,
|
| 177 |
+
"step": 60
|
| 178 |
+
},
|
| 179 |
+
{
|
| 180 |
+
"epoch": 4.64,
|
| 181 |
+
"learning_rate": 5.3571428571428575e-05,
|
| 182 |
+
"loss": 1.7509,
|
| 183 |
+
"step": 65
|
| 184 |
+
},
|
| 185 |
+
{
|
| 186 |
+
"epoch": 4.64,
|
| 187 |
+
"eval_loss": 1.7668665647506714,
|
| 188 |
+
"eval_runtime": 2.5422,
|
| 189 |
+
"eval_samples_per_second": 2.36,
|
| 190 |
+
"eval_steps_per_second": 0.393,
|
| 191 |
+
"step": 65
|
| 192 |
+
},
|
| 193 |
+
{
|
| 194 |
+
"epoch": 5.0,
|
| 195 |
+
"learning_rate": 5e-05,
|
| 196 |
+
"loss": 1.7,
|
| 197 |
+
"step": 70
|
| 198 |
+
},
|
| 199 |
+
{
|
| 200 |
+
"epoch": 5.0,
|
| 201 |
+
"eval_loss": 1.7516651153564453,
|
| 202 |
+
"eval_runtime": 2.5412,
|
| 203 |
+
"eval_samples_per_second": 2.361,
|
| 204 |
+
"eval_steps_per_second": 0.394,
|
| 205 |
+
"step": 70
|
| 206 |
+
},
|
| 207 |
+
{
|
| 208 |
+
"epoch": 5.36,
|
| 209 |
+
"learning_rate": 4.642857142857143e-05,
|
| 210 |
+
"loss": 1.6734,
|
| 211 |
+
"step": 75
|
| 212 |
+
},
|
| 213 |
+
{
|
| 214 |
+
"epoch": 5.36,
|
| 215 |
+
"eval_loss": 1.7334843873977661,
|
| 216 |
+
"eval_runtime": 2.5431,
|
| 217 |
+
"eval_samples_per_second": 2.359,
|
| 218 |
+
"eval_steps_per_second": 0.393,
|
| 219 |
+
"step": 75
|
| 220 |
+
},
|
| 221 |
+
{
|
| 222 |
+
"epoch": 5.71,
|
| 223 |
+
"learning_rate": 4.2857142857142856e-05,
|
| 224 |
+
"loss": 1.6719,
|
| 225 |
+
"step": 80
|
| 226 |
+
},
|
| 227 |
+
{
|
| 228 |
+
"epoch": 5.71,
|
| 229 |
+
"eval_loss": 1.7061794996261597,
|
| 230 |
+
"eval_runtime": 2.5412,
|
| 231 |
+
"eval_samples_per_second": 2.361,
|
| 232 |
+
"eval_steps_per_second": 0.394,
|
| 233 |
+
"step": 80
|
| 234 |
+
},
|
| 235 |
+
{
|
| 236 |
+
"epoch": 6.07,
|
| 237 |
+
"learning_rate": 3.928571428571429e-05,
|
| 238 |
+
"loss": 1.5932,
|
| 239 |
+
"step": 85
|
| 240 |
+
},
|
| 241 |
+
{
|
| 242 |
+
"epoch": 6.07,
|
| 243 |
+
"eval_loss": 1.6727076768875122,
|
| 244 |
+
"eval_runtime": 2.5367,
|
| 245 |
+
"eval_samples_per_second": 2.365,
|
| 246 |
+
"eval_steps_per_second": 0.394,
|
| 247 |
+
"step": 85
|
| 248 |
+
},
|
| 249 |
+
{
|
| 250 |
+
"epoch": 6.43,
|
| 251 |
+
"learning_rate": 3.571428571428572e-05,
|
| 252 |
+
"loss": 1.5866,
|
| 253 |
+
"step": 90
|
| 254 |
+
},
|
| 255 |
+
{
|
| 256 |
+
"epoch": 6.43,
|
| 257 |
+
"eval_loss": 1.66474187374115,
|
| 258 |
+
"eval_runtime": 2.5383,
|
| 259 |
+
"eval_samples_per_second": 2.364,
|
| 260 |
+
"eval_steps_per_second": 0.394,
|
| 261 |
+
"step": 90
|
| 262 |
+
},
|
| 263 |
+
{
|
| 264 |
+
"epoch": 6.79,
|
| 265 |
+
"learning_rate": 3.2142857142857144e-05,
|
| 266 |
+
"loss": 1.577,
|
| 267 |
+
"step": 95
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"epoch": 6.79,
|
| 271 |
+
"eval_loss": 1.6590815782546997,
|
| 272 |
+
"eval_runtime": 2.5384,
|
| 273 |
+
"eval_samples_per_second": 2.364,
|
| 274 |
+
"eval_steps_per_second": 0.394,
|
| 275 |
+
"step": 95
|
| 276 |
+
},
|
| 277 |
+
{
|
| 278 |
+
"epoch": 7.14,
|
| 279 |
+
"learning_rate": 2.857142857142857e-05,
|
| 280 |
+
"loss": 1.5532,
|
| 281 |
+
"step": 100
|
| 282 |
+
},
|
| 283 |
+
{
|
| 284 |
+
"epoch": 7.14,
|
| 285 |
+
"eval_loss": 1.6509045362472534,
|
| 286 |
+
"eval_runtime": 2.5429,
|
| 287 |
+
"eval_samples_per_second": 2.359,
|
| 288 |
+
"eval_steps_per_second": 0.393,
|
| 289 |
+
"step": 100
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"epoch": 7.5,
|
| 293 |
+
"learning_rate": 2.5e-05,
|
| 294 |
+
"loss": 1.5099,
|
| 295 |
+
"step": 105
|
| 296 |
+
},
|
| 297 |
+
{
|
| 298 |
+
"epoch": 7.5,
|
| 299 |
+
"eval_loss": 1.6463295221328735,
|
| 300 |
+
"eval_runtime": 2.5379,
|
| 301 |
+
"eval_samples_per_second": 2.364,
|
| 302 |
+
"eval_steps_per_second": 0.394,
|
| 303 |
+
"step": 105
|
| 304 |
+
},
|
| 305 |
+
{
|
| 306 |
+
"epoch": 7.86,
|
| 307 |
+
"learning_rate": 2.1428571428571428e-05,
|
| 308 |
+
"loss": 1.5717,
|
| 309 |
+
"step": 110
|
| 310 |
+
},
|
| 311 |
+
{
|
| 312 |
+
"epoch": 7.86,
|
| 313 |
+
"eval_loss": 1.6409095525741577,
|
| 314 |
+
"eval_runtime": 2.5359,
|
| 315 |
+
"eval_samples_per_second": 2.366,
|
| 316 |
+
"eval_steps_per_second": 0.394,
|
| 317 |
+
"step": 110
|
| 318 |
+
},
|
| 319 |
+
{
|
| 320 |
+
"epoch": 8.21,
|
| 321 |
+
"learning_rate": 1.785714285714286e-05,
|
| 322 |
+
"loss": 1.5354,
|
| 323 |
+
"step": 115
|
| 324 |
+
},
|
| 325 |
+
{
|
| 326 |
+
"epoch": 8.21,
|
| 327 |
+
"eval_loss": 1.6344412565231323,
|
| 328 |
+
"eval_runtime": 2.5372,
|
| 329 |
+
"eval_samples_per_second": 2.365,
|
| 330 |
+
"eval_steps_per_second": 0.394,
|
| 331 |
+
"step": 115
|
| 332 |
+
},
|
| 333 |
+
{
|
| 334 |
+
"epoch": 8.57,
|
| 335 |
+
"learning_rate": 1.4285714285714285e-05,
|
| 336 |
+
"loss": 1.5127,
|
| 337 |
+
"step": 120
|
| 338 |
+
},
|
| 339 |
+
{
|
| 340 |
+
"epoch": 8.57,
|
| 341 |
+
"eval_loss": 1.6305923461914062,
|
| 342 |
+
"eval_runtime": 2.537,
|
| 343 |
+
"eval_samples_per_second": 2.365,
|
| 344 |
+
"eval_steps_per_second": 0.394,
|
| 345 |
+
"step": 120
|
| 346 |
+
},
|
| 347 |
+
{
|
| 348 |
+
"epoch": 8.93,
|
| 349 |
+
"learning_rate": 1.0714285714285714e-05,
|
| 350 |
+
"loss": 1.5234,
|
| 351 |
+
"step": 125
|
| 352 |
+
},
|
| 353 |
+
{
|
| 354 |
+
"epoch": 8.93,
|
| 355 |
+
"eval_loss": 1.6271618604660034,
|
| 356 |
+
"eval_runtime": 2.5403,
|
| 357 |
+
"eval_samples_per_second": 2.362,
|
| 358 |
+
"eval_steps_per_second": 0.394,
|
| 359 |
+
"step": 125
|
| 360 |
+
},
|
| 361 |
+
{
|
| 362 |
+
"epoch": 9.29,
|
| 363 |
+
"learning_rate": 7.142857142857143e-06,
|
| 364 |
+
"loss": 1.4811,
|
| 365 |
+
"step": 130
|
| 366 |
+
},
|
| 367 |
+
{
|
| 368 |
+
"epoch": 9.29,
|
| 369 |
+
"eval_loss": 1.6272777318954468,
|
| 370 |
+
"eval_runtime": 2.5362,
|
| 371 |
+
"eval_samples_per_second": 2.366,
|
| 372 |
+
"eval_steps_per_second": 0.394,
|
| 373 |
+
"step": 130
|
| 374 |
+
},
|
| 375 |
+
{
|
| 376 |
+
"epoch": 9.64,
|
| 377 |
+
"learning_rate": 3.5714285714285714e-06,
|
| 378 |
+
"loss": 1.5167,
|
| 379 |
+
"step": 135
|
| 380 |
+
},
|
| 381 |
+
{
|
| 382 |
+
"epoch": 9.64,
|
| 383 |
+
"eval_loss": 1.625247597694397,
|
| 384 |
+
"eval_runtime": 2.5383,
|
| 385 |
+
"eval_samples_per_second": 2.364,
|
| 386 |
+
"eval_steps_per_second": 0.394,
|
| 387 |
+
"step": 135
|
| 388 |
+
},
|
| 389 |
+
{
|
| 390 |
+
"epoch": 10.0,
|
| 391 |
+
"learning_rate": 0.0,
|
| 392 |
+
"loss": 1.4972,
|
| 393 |
+
"step": 140
|
| 394 |
+
},
|
| 395 |
+
{
|
| 396 |
+
"epoch": 10.0,
|
| 397 |
+
"eval_loss": 1.623923897743225,
|
| 398 |
+
"eval_runtime": 2.536,
|
| 399 |
+
"eval_samples_per_second": 2.366,
|
| 400 |
+
"eval_steps_per_second": 0.394,
|
| 401 |
+
"step": 140
|
| 402 |
+
}
|
| 403 |
+
],
|
| 404 |
+
"logging_steps": 5,
|
| 405 |
+
"max_steps": 140,
|
| 406 |
+
"num_train_epochs": 10,
|
| 407 |
+
"save_steps": 10,
|
| 408 |
+
"total_flos": 4.54958436581376e+16,
|
| 409 |
+
"trial_name": null,
|
| 410 |
+
"trial_params": null
|
| 411 |
+
}
|
checkpoint-140/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:584bdb07d1eef7bc6a4f12879793b3487ecc8146c1335851f27ac0cac2080d65
|
| 3 |
+
size 4600
|
checkpoint-60/README.md
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: peft
|
| 3 |
+
---
|
| 4 |
+
## Training procedure
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 8 |
+
- quant_method: bitsandbytes
|
| 9 |
+
- _load_in_8bit: False
|
| 10 |
+
- _load_in_4bit: False
|
| 11 |
+
- llm_int8_threshold: 6.0
|
| 12 |
+
- llm_int8_skip_modules: None
|
| 13 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 14 |
+
- llm_int8_has_fp16_weight: False
|
| 15 |
+
- bnb_4bit_quant_type: fp4
|
| 16 |
+
- bnb_4bit_use_double_quant: False
|
| 17 |
+
- bnb_4bit_compute_dtype: float32
|
| 18 |
+
- load_in_4bit: False
|
| 19 |
+
- load_in_8bit: False
|
| 20 |
+
### Framework versions
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
- PEFT 0.5.0
|
checkpoint-60/adapter_config.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"auto_mapping": null,
|
| 3 |
+
"base_model_name_or_path": "meta-llama/Llama-2-70b-hf",
|
| 4 |
+
"bias": "none",
|
| 5 |
+
"fan_in_fan_out": false,
|
| 6 |
+
"inference_mode": true,
|
| 7 |
+
"init_lora_weights": true,
|
| 8 |
+
"layers_pattern": null,
|
| 9 |
+
"layers_to_transform": null,
|
| 10 |
+
"lora_alpha": 32,
|
| 11 |
+
"lora_dropout": 0.05,
|
| 12 |
+
"modules_to_save": null,
|
| 13 |
+
"peft_type": "LORA",
|
| 14 |
+
"r": 8,
|
| 15 |
+
"revision": null,
|
| 16 |
+
"target_modules": [
|
| 17 |
+
"q_proj",
|
| 18 |
+
"v_proj"
|
| 19 |
+
],
|
| 20 |
+
"task_type": "CAUSAL_LM"
|
| 21 |
+
}
|
checkpoint-60/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7bd8f64fda45ff6615b3e4252c72be74e77270ca7b822f9dcb75c103285df211
|
| 3 |
+
size 65578776
|
checkpoint-60/optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f3fc876fe0e006834631446ad7f0e991808e2435e346516e2ba28c5184fae096
|
| 3 |
+
size 131345914
|
checkpoint-60/rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f2b438a27eddee154866003dcf691d9169d17b12b3ded46c944feca5f44c9583
|
| 3 |
+
size 14244
|
checkpoint-60/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:532a066b51f06d04618044a51287ede2469b5330f8f8e15d3cb8c13dc71684e6
|
| 3 |
+
size 1064
|
checkpoint-60/trainer_state.json
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_metric": 1.6068811416625977,
|
| 3 |
+
"best_model_checkpoint": "/scratch/kwamea/llama-output/checkpoint-60",
|
| 4 |
+
"epoch": 8.275862068965518,
|
| 5 |
+
"eval_steps": 5,
|
| 6 |
+
"global_step": 60,
|
| 7 |
+
"is_hyper_param_search": false,
|
| 8 |
+
"is_local_process_zero": true,
|
| 9 |
+
"is_world_process_zero": true,
|
| 10 |
+
"log_history": [
|
| 11 |
+
{
|
| 12 |
+
"epoch": 0.69,
|
| 13 |
+
"grad_norm": 0.27489739656448364,
|
| 14 |
+
"learning_rate": 9.285714285714286e-05,
|
| 15 |
+
"loss": 1.9945,
|
| 16 |
+
"step": 5
|
| 17 |
+
},
|
| 18 |
+
{
|
| 19 |
+
"epoch": 0.69,
|
| 20 |
+
"eval_loss": 2.0204718112945557,
|
| 21 |
+
"eval_runtime": 14.7113,
|
| 22 |
+
"eval_samples_per_second": 0.408,
|
| 23 |
+
"eval_steps_per_second": 0.068,
|
| 24 |
+
"step": 5
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
"epoch": 1.38,
|
| 28 |
+
"grad_norm": 0.36750176548957825,
|
| 29 |
+
"learning_rate": 8.571428571428571e-05,
|
| 30 |
+
"loss": 1.9427,
|
| 31 |
+
"step": 10
|
| 32 |
+
},
|
| 33 |
+
{
|
| 34 |
+
"epoch": 1.38,
|
| 35 |
+
"eval_loss": 1.9357692003250122,
|
| 36 |
+
"eval_runtime": 14.713,
|
| 37 |
+
"eval_samples_per_second": 0.408,
|
| 38 |
+
"eval_steps_per_second": 0.068,
|
| 39 |
+
"step": 10
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"epoch": 2.07,
|
| 43 |
+
"grad_norm": 0.4677024185657501,
|
| 44 |
+
"learning_rate": 7.857142857142858e-05,
|
| 45 |
+
"loss": 1.8651,
|
| 46 |
+
"step": 15
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"epoch": 2.07,
|
| 50 |
+
"eval_loss": 1.8461824655532837,
|
| 51 |
+
"eval_runtime": 14.7203,
|
| 52 |
+
"eval_samples_per_second": 0.408,
|
| 53 |
+
"eval_steps_per_second": 0.068,
|
| 54 |
+
"step": 15
|
| 55 |
+
},
|
| 56 |
+
{
|
| 57 |
+
"epoch": 2.76,
|
| 58 |
+
"grad_norm": 0.40606027841567993,
|
| 59 |
+
"learning_rate": 7.142857142857143e-05,
|
| 60 |
+
"loss": 1.7948,
|
| 61 |
+
"step": 20
|
| 62 |
+
},
|
| 63 |
+
{
|
| 64 |
+
"epoch": 2.76,
|
| 65 |
+
"eval_loss": 1.801284670829773,
|
| 66 |
+
"eval_runtime": 14.7281,
|
| 67 |
+
"eval_samples_per_second": 0.407,
|
| 68 |
+
"eval_steps_per_second": 0.068,
|
| 69 |
+
"step": 20
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"epoch": 3.45,
|
| 73 |
+
"grad_norm": 0.3058153986930847,
|
| 74 |
+
"learning_rate": 6.428571428571429e-05,
|
| 75 |
+
"loss": 1.7233,
|
| 76 |
+
"step": 25
|
| 77 |
+
},
|
| 78 |
+
{
|
| 79 |
+
"epoch": 3.45,
|
| 80 |
+
"eval_loss": 1.766973853111267,
|
| 81 |
+
"eval_runtime": 14.6916,
|
| 82 |
+
"eval_samples_per_second": 0.408,
|
| 83 |
+
"eval_steps_per_second": 0.068,
|
| 84 |
+
"step": 25
|
| 85 |
+
},
|
| 86 |
+
{
|
| 87 |
+
"epoch": 4.14,
|
| 88 |
+
"grad_norm": 0.2967859208583832,
|
| 89 |
+
"learning_rate": 5.714285714285714e-05,
|
| 90 |
+
"loss": 1.7117,
|
| 91 |
+
"step": 30
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"epoch": 4.14,
|
| 95 |
+
"eval_loss": 1.7350422143936157,
|
| 96 |
+
"eval_runtime": 14.7126,
|
| 97 |
+
"eval_samples_per_second": 0.408,
|
| 98 |
+
"eval_steps_per_second": 0.068,
|
| 99 |
+
"step": 30
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"epoch": 4.83,
|
| 103 |
+
"grad_norm": 0.31578946113586426,
|
| 104 |
+
"learning_rate": 5e-05,
|
| 105 |
+
"loss": 1.6904,
|
| 106 |
+
"step": 35
|
| 107 |
+
},
|
| 108 |
+
{
|
| 109 |
+
"epoch": 4.83,
|
| 110 |
+
"eval_loss": 1.7066936492919922,
|
| 111 |
+
"eval_runtime": 14.7265,
|
| 112 |
+
"eval_samples_per_second": 0.407,
|
| 113 |
+
"eval_steps_per_second": 0.068,
|
| 114 |
+
"step": 35
|
| 115 |
+
},
|
| 116 |
+
{
|
| 117 |
+
"epoch": 5.52,
|
| 118 |
+
"grad_norm": 0.34217244386672974,
|
| 119 |
+
"learning_rate": 4.2857142857142856e-05,
|
| 120 |
+
"loss": 1.6459,
|
| 121 |
+
"step": 40
|
| 122 |
+
},
|
| 123 |
+
{
|
| 124 |
+
"epoch": 5.52,
|
| 125 |
+
"eval_loss": 1.6809816360473633,
|
| 126 |
+
"eval_runtime": 14.7004,
|
| 127 |
+
"eval_samples_per_second": 0.408,
|
| 128 |
+
"eval_steps_per_second": 0.068,
|
| 129 |
+
"step": 40
|
| 130 |
+
},
|
| 131 |
+
{
|
| 132 |
+
"epoch": 6.21,
|
| 133 |
+
"grad_norm": 0.36268118023872375,
|
| 134 |
+
"learning_rate": 3.571428571428572e-05,
|
| 135 |
+
"loss": 1.5992,
|
| 136 |
+
"step": 45
|
| 137 |
+
},
|
| 138 |
+
{
|
| 139 |
+
"epoch": 6.21,
|
| 140 |
+
"eval_loss": 1.6565723419189453,
|
| 141 |
+
"eval_runtime": 14.6987,
|
| 142 |
+
"eval_samples_per_second": 0.408,
|
| 143 |
+
"eval_steps_per_second": 0.068,
|
| 144 |
+
"step": 45
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"epoch": 6.9,
|
| 148 |
+
"grad_norm": 0.3854450583457947,
|
| 149 |
+
"learning_rate": 2.857142857142857e-05,
|
| 150 |
+
"loss": 1.6088,
|
| 151 |
+
"step": 50
|
| 152 |
+
},
|
| 153 |
+
{
|
| 154 |
+
"epoch": 6.9,
|
| 155 |
+
"eval_loss": 1.6376428604125977,
|
| 156 |
+
"eval_runtime": 14.7228,
|
| 157 |
+
"eval_samples_per_second": 0.408,
|
| 158 |
+
"eval_steps_per_second": 0.068,
|
| 159 |
+
"step": 50
|
| 160 |
+
},
|
| 161 |
+
{
|
| 162 |
+
"epoch": 7.59,
|
| 163 |
+
"grad_norm": 0.39184266328811646,
|
| 164 |
+
"learning_rate": 2.1428571428571428e-05,
|
| 165 |
+
"loss": 1.5722,
|
| 166 |
+
"step": 55
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"epoch": 7.59,
|
| 170 |
+
"eval_loss": 1.6203418970108032,
|
| 171 |
+
"eval_runtime": 14.7373,
|
| 172 |
+
"eval_samples_per_second": 0.407,
|
| 173 |
+
"eval_steps_per_second": 0.068,
|
| 174 |
+
"step": 55
|
| 175 |
+
},
|
| 176 |
+
{
|
| 177 |
+
"epoch": 8.28,
|
| 178 |
+
"grad_norm": 0.41270822286605835,
|
| 179 |
+
"learning_rate": 1.4285714285714285e-05,
|
| 180 |
+
"loss": 1.5593,
|
| 181 |
+
"step": 60
|
| 182 |
+
},
|
| 183 |
+
{
|
| 184 |
+
"epoch": 8.28,
|
| 185 |
+
"eval_loss": 1.6068811416625977,
|
| 186 |
+
"eval_runtime": 14.7065,
|
| 187 |
+
"eval_samples_per_second": 0.408,
|
| 188 |
+
"eval_steps_per_second": 0.068,
|
| 189 |
+
"step": 60
|
| 190 |
+
}
|
| 191 |
+
],
|
| 192 |
+
"logging_steps": 5,
|
| 193 |
+
"max_steps": 70,
|
| 194 |
+
"num_input_tokens_seen": 0,
|
| 195 |
+
"num_train_epochs": 10,
|
| 196 |
+
"save_steps": 10,
|
| 197 |
+
"total_flos": 4.053912739695821e+17,
|
| 198 |
+
"train_batch_size": 2,
|
| 199 |
+
"trial_name": null,
|
| 200 |
+
"trial_params": null
|
| 201 |
+
}
|
checkpoint-60/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f4c74a2b62a8cab8f69632f5511d4781e140b9b59c7c1d8108fbd66233b3d511
|
| 3 |
+
size 4920
|
checkpoint-70/README.md
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: peft
|
| 3 |
+
---
|
| 4 |
+
## Training procedure
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 8 |
+
- quant_method: bitsandbytes
|
| 9 |
+
- _load_in_8bit: False
|
| 10 |
+
- _load_in_4bit: False
|
| 11 |
+
- llm_int8_threshold: 6.0
|
| 12 |
+
- llm_int8_skip_modules: None
|
| 13 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 14 |
+
- llm_int8_has_fp16_weight: False
|
| 15 |
+
- bnb_4bit_quant_type: fp4
|
| 16 |
+
- bnb_4bit_use_double_quant: False
|
| 17 |
+
- bnb_4bit_compute_dtype: float32
|
| 18 |
+
- load_in_4bit: False
|
| 19 |
+
- load_in_8bit: False
|
| 20 |
+
|
| 21 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 22 |
+
- quant_method: bitsandbytes
|
| 23 |
+
- _load_in_8bit: False
|
| 24 |
+
- _load_in_4bit: False
|
| 25 |
+
- llm_int8_threshold: 6.0
|
| 26 |
+
- llm_int8_skip_modules: None
|
| 27 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 28 |
+
- llm_int8_has_fp16_weight: False
|
| 29 |
+
- bnb_4bit_quant_type: fp4
|
| 30 |
+
- bnb_4bit_use_double_quant: False
|
| 31 |
+
- bnb_4bit_compute_dtype: float32
|
| 32 |
+
- load_in_4bit: False
|
| 33 |
+
- load_in_8bit: False
|
| 34 |
+
|
| 35 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 36 |
+
- quant_method: bitsandbytes
|
| 37 |
+
- _load_in_8bit: False
|
| 38 |
+
- _load_in_4bit: False
|
| 39 |
+
- llm_int8_threshold: 6.0
|
| 40 |
+
- llm_int8_skip_modules: None
|
| 41 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 42 |
+
- llm_int8_has_fp16_weight: False
|
| 43 |
+
- bnb_4bit_quant_type: fp4
|
| 44 |
+
- bnb_4bit_use_double_quant: False
|
| 45 |
+
- bnb_4bit_compute_dtype: float32
|
| 46 |
+
- load_in_4bit: False
|
| 47 |
+
- load_in_8bit: False
|
| 48 |
+
|
| 49 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 50 |
+
- quant_method: bitsandbytes
|
| 51 |
+
- _load_in_8bit: False
|
| 52 |
+
- _load_in_4bit: False
|
| 53 |
+
- llm_int8_threshold: 6.0
|
| 54 |
+
- llm_int8_skip_modules: None
|
| 55 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 56 |
+
- llm_int8_has_fp16_weight: False
|
| 57 |
+
- bnb_4bit_quant_type: fp4
|
| 58 |
+
- bnb_4bit_use_double_quant: False
|
| 59 |
+
- bnb_4bit_compute_dtype: float32
|
| 60 |
+
- load_in_4bit: False
|
| 61 |
+
- load_in_8bit: False
|
| 62 |
+
### Framework versions
|
| 63 |
+
|
| 64 |
+
- PEFT 0.5.0
|
| 65 |
+
- PEFT 0.5.0
|
| 66 |
+
- PEFT 0.5.0
|
| 67 |
+
|
| 68 |
+
- PEFT 0.5.0
|
checkpoint-70/adapter_config.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"auto_mapping": null,
|
| 3 |
+
"base_model_name_or_path": "meta-llama/Llama-2-70b-hf",
|
| 4 |
+
"bias": "none",
|
| 5 |
+
"fan_in_fan_out": false,
|
| 6 |
+
"inference_mode": true,
|
| 7 |
+
"init_lora_weights": true,
|
| 8 |
+
"layers_pattern": null,
|
| 9 |
+
"layers_to_transform": null,
|
| 10 |
+
"lora_alpha": 32,
|
| 11 |
+
"lora_dropout": 0.05,
|
| 12 |
+
"modules_to_save": null,
|
| 13 |
+
"peft_type": "LORA",
|
| 14 |
+
"r": 8,
|
| 15 |
+
"revision": null,
|
| 16 |
+
"target_modules": [
|
| 17 |
+
"q_proj",
|
| 18 |
+
"v_proj"
|
| 19 |
+
],
|
| 20 |
+
"task_type": "CAUSAL_LM"
|
| 21 |
+
}
|
checkpoint-70/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:151c72fda5d538af519864c3bc1a2b3cd0d654e54208865ae1c7ae608be4af9a
|
| 3 |
+
size 65578776
|
checkpoint-70/optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:54c0d5b07ddbceee7338bc556836c713b321200dc211a359184f6293a0d66b40
|
| 3 |
+
size 131345914
|
checkpoint-70/rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b5f0e1881495fd4cc456489a0d9fe631a628cf2bf127bb12be7abb9a7388d623
|
| 3 |
+
size 14244
|
checkpoint-70/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b6972eef268c24c724483865d9931d7303b58c4b5768be8129b5fb16010a7eb9
|
| 3 |
+
size 1064
|
checkpoint-70/trainer_state.json
ADDED
|
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_metric": 1.5954241752624512,
|
| 3 |
+
"best_model_checkpoint": "/scratch/kwamea/llama-output/checkpoint-70",
|
| 4 |
+
"epoch": 9.655172413793103,
|
| 5 |
+
"eval_steps": 5,
|
| 6 |
+
"global_step": 70,
|
| 7 |
+
"is_hyper_param_search": false,
|
| 8 |
+
"is_local_process_zero": true,
|
| 9 |
+
"is_world_process_zero": true,
|
| 10 |
+
"log_history": [
|
| 11 |
+
{
|
| 12 |
+
"epoch": 0.69,
|
| 13 |
+
"grad_norm": 0.27489739656448364,
|
| 14 |
+
"learning_rate": 9.285714285714286e-05,
|
| 15 |
+
"loss": 1.9945,
|
| 16 |
+
"step": 5
|
| 17 |
+
},
|
| 18 |
+
{
|
| 19 |
+
"epoch": 0.69,
|
| 20 |
+
"eval_loss": 2.0204718112945557,
|
| 21 |
+
"eval_runtime": 14.7113,
|
| 22 |
+
"eval_samples_per_second": 0.408,
|
| 23 |
+
"eval_steps_per_second": 0.068,
|
| 24 |
+
"step": 5
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
"epoch": 1.38,
|
| 28 |
+
"grad_norm": 0.36750176548957825,
|
| 29 |
+
"learning_rate": 8.571428571428571e-05,
|
| 30 |
+
"loss": 1.9427,
|
| 31 |
+
"step": 10
|
| 32 |
+
},
|
| 33 |
+
{
|
| 34 |
+
"epoch": 1.38,
|
| 35 |
+
"eval_loss": 1.9357692003250122,
|
| 36 |
+
"eval_runtime": 14.713,
|
| 37 |
+
"eval_samples_per_second": 0.408,
|
| 38 |
+
"eval_steps_per_second": 0.068,
|
| 39 |
+
"step": 10
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"epoch": 2.07,
|
| 43 |
+
"grad_norm": 0.4677024185657501,
|
| 44 |
+
"learning_rate": 7.857142857142858e-05,
|
| 45 |
+
"loss": 1.8651,
|
| 46 |
+
"step": 15
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"epoch": 2.07,
|
| 50 |
+
"eval_loss": 1.8461824655532837,
|
| 51 |
+
"eval_runtime": 14.7203,
|
| 52 |
+
"eval_samples_per_second": 0.408,
|
| 53 |
+
"eval_steps_per_second": 0.068,
|
| 54 |
+
"step": 15
|
| 55 |
+
},
|
| 56 |
+
{
|
| 57 |
+
"epoch": 2.76,
|
| 58 |
+
"grad_norm": 0.40606027841567993,
|
| 59 |
+
"learning_rate": 7.142857142857143e-05,
|
| 60 |
+
"loss": 1.7948,
|
| 61 |
+
"step": 20
|
| 62 |
+
},
|
| 63 |
+
{
|
| 64 |
+
"epoch": 2.76,
|
| 65 |
+
"eval_loss": 1.801284670829773,
|
| 66 |
+
"eval_runtime": 14.7281,
|
| 67 |
+
"eval_samples_per_second": 0.407,
|
| 68 |
+
"eval_steps_per_second": 0.068,
|
| 69 |
+
"step": 20
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"epoch": 3.45,
|
| 73 |
+
"grad_norm": 0.3058153986930847,
|
| 74 |
+
"learning_rate": 6.428571428571429e-05,
|
| 75 |
+
"loss": 1.7233,
|
| 76 |
+
"step": 25
|
| 77 |
+
},
|
| 78 |
+
{
|
| 79 |
+
"epoch": 3.45,
|
| 80 |
+
"eval_loss": 1.766973853111267,
|
| 81 |
+
"eval_runtime": 14.6916,
|
| 82 |
+
"eval_samples_per_second": 0.408,
|
| 83 |
+
"eval_steps_per_second": 0.068,
|
| 84 |
+
"step": 25
|
| 85 |
+
},
|
| 86 |
+
{
|
| 87 |
+
"epoch": 4.14,
|
| 88 |
+
"grad_norm": 0.2967859208583832,
|
| 89 |
+
"learning_rate": 5.714285714285714e-05,
|
| 90 |
+
"loss": 1.7117,
|
| 91 |
+
"step": 30
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"epoch": 4.14,
|
| 95 |
+
"eval_loss": 1.7350422143936157,
|
| 96 |
+
"eval_runtime": 14.7126,
|
| 97 |
+
"eval_samples_per_second": 0.408,
|
| 98 |
+
"eval_steps_per_second": 0.068,
|
| 99 |
+
"step": 30
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"epoch": 4.83,
|
| 103 |
+
"grad_norm": 0.31578946113586426,
|
| 104 |
+
"learning_rate": 5e-05,
|
| 105 |
+
"loss": 1.6904,
|
| 106 |
+
"step": 35
|
| 107 |
+
},
|
| 108 |
+
{
|
| 109 |
+
"epoch": 4.83,
|
| 110 |
+
"eval_loss": 1.7066936492919922,
|
| 111 |
+
"eval_runtime": 14.7265,
|
| 112 |
+
"eval_samples_per_second": 0.407,
|
| 113 |
+
"eval_steps_per_second": 0.068,
|
| 114 |
+
"step": 35
|
| 115 |
+
},
|
| 116 |
+
{
|
| 117 |
+
"epoch": 5.52,
|
| 118 |
+
"grad_norm": 0.34217244386672974,
|
| 119 |
+
"learning_rate": 4.2857142857142856e-05,
|
| 120 |
+
"loss": 1.6459,
|
| 121 |
+
"step": 40
|
| 122 |
+
},
|
| 123 |
+
{
|
| 124 |
+
"epoch": 5.52,
|
| 125 |
+
"eval_loss": 1.6809816360473633,
|
| 126 |
+
"eval_runtime": 14.7004,
|
| 127 |
+
"eval_samples_per_second": 0.408,
|
| 128 |
+
"eval_steps_per_second": 0.068,
|
| 129 |
+
"step": 40
|
| 130 |
+
},
|
| 131 |
+
{
|
| 132 |
+
"epoch": 6.21,
|
| 133 |
+
"grad_norm": 0.36268118023872375,
|
| 134 |
+
"learning_rate": 3.571428571428572e-05,
|
| 135 |
+
"loss": 1.5992,
|
| 136 |
+
"step": 45
|
| 137 |
+
},
|
| 138 |
+
{
|
| 139 |
+
"epoch": 6.21,
|
| 140 |
+
"eval_loss": 1.6565723419189453,
|
| 141 |
+
"eval_runtime": 14.6987,
|
| 142 |
+
"eval_samples_per_second": 0.408,
|
| 143 |
+
"eval_steps_per_second": 0.068,
|
| 144 |
+
"step": 45
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"epoch": 6.9,
|
| 148 |
+
"grad_norm": 0.3854450583457947,
|
| 149 |
+
"learning_rate": 2.857142857142857e-05,
|
| 150 |
+
"loss": 1.6088,
|
| 151 |
+
"step": 50
|
| 152 |
+
},
|
| 153 |
+
{
|
| 154 |
+
"epoch": 6.9,
|
| 155 |
+
"eval_loss": 1.6376428604125977,
|
| 156 |
+
"eval_runtime": 14.7228,
|
| 157 |
+
"eval_samples_per_second": 0.408,
|
| 158 |
+
"eval_steps_per_second": 0.068,
|
| 159 |
+
"step": 50
|
| 160 |
+
},
|
| 161 |
+
{
|
| 162 |
+
"epoch": 7.59,
|
| 163 |
+
"grad_norm": 0.39184266328811646,
|
| 164 |
+
"learning_rate": 2.1428571428571428e-05,
|
| 165 |
+
"loss": 1.5722,
|
| 166 |
+
"step": 55
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"epoch": 7.59,
|
| 170 |
+
"eval_loss": 1.6203418970108032,
|
| 171 |
+
"eval_runtime": 14.7373,
|
| 172 |
+
"eval_samples_per_second": 0.407,
|
| 173 |
+
"eval_steps_per_second": 0.068,
|
| 174 |
+
"step": 55
|
| 175 |
+
},
|
| 176 |
+
{
|
| 177 |
+
"epoch": 8.28,
|
| 178 |
+
"grad_norm": 0.41270822286605835,
|
| 179 |
+
"learning_rate": 1.4285714285714285e-05,
|
| 180 |
+
"loss": 1.5593,
|
| 181 |
+
"step": 60
|
| 182 |
+
},
|
| 183 |
+
{
|
| 184 |
+
"epoch": 8.28,
|
| 185 |
+
"eval_loss": 1.6068811416625977,
|
| 186 |
+
"eval_runtime": 14.7065,
|
| 187 |
+
"eval_samples_per_second": 0.408,
|
| 188 |
+
"eval_steps_per_second": 0.068,
|
| 189 |
+
"step": 60
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
"epoch": 8.97,
|
| 193 |
+
"grad_norm": 0.44460129737854004,
|
| 194 |
+
"learning_rate": 7.142857142857143e-06,
|
| 195 |
+
"loss": 1.5212,
|
| 196 |
+
"step": 65
|
| 197 |
+
},
|
| 198 |
+
{
|
| 199 |
+
"epoch": 8.97,
|
| 200 |
+
"eval_loss": 1.5982898473739624,
|
| 201 |
+
"eval_runtime": 14.6929,
|
| 202 |
+
"eval_samples_per_second": 0.408,
|
| 203 |
+
"eval_steps_per_second": 0.068,
|
| 204 |
+
"step": 65
|
| 205 |
+
},
|
| 206 |
+
{
|
| 207 |
+
"epoch": 9.66,
|
| 208 |
+
"grad_norm": 0.44520503282546997,
|
| 209 |
+
"learning_rate": 0.0,
|
| 210 |
+
"loss": 1.5438,
|
| 211 |
+
"step": 70
|
| 212 |
+
},
|
| 213 |
+
{
|
| 214 |
+
"epoch": 9.66,
|
| 215 |
+
"eval_loss": 1.5954241752624512,
|
| 216 |
+
"eval_runtime": 14.7317,
|
| 217 |
+
"eval_samples_per_second": 0.407,
|
| 218 |
+
"eval_steps_per_second": 0.068,
|
| 219 |
+
"step": 70
|
| 220 |
+
}
|
| 221 |
+
],
|
| 222 |
+
"logging_steps": 5,
|
| 223 |
+
"max_steps": 70,
|
| 224 |
+
"num_input_tokens_seen": 0,
|
| 225 |
+
"num_train_epochs": 10,
|
| 226 |
+
"save_steps": 10,
|
| 227 |
+
"total_flos": 4.7295648629784576e+17,
|
| 228 |
+
"train_batch_size": 2,
|
| 229 |
+
"trial_name": null,
|
| 230 |
+
"trial_params": null
|
| 231 |
+
}
|
checkpoint-70/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f4c74a2b62a8cab8f69632f5511d4781e140b9b59c7c1d8108fbd66233b3d511
|
| 3 |
+
size 4920
|
config.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "meta-llama/Llama-2-70b-hf",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"LlamaForCausalLM"
|
| 5 |
+
],
|
| 6 |
+
"attention_bias": false,
|
| 7 |
+
"attention_dropout": 0.0,
|
| 8 |
+
"bos_token_id": 1,
|
| 9 |
+
"eos_token_id": 2,
|
| 10 |
+
"hidden_act": "silu",
|
| 11 |
+
"hidden_size": 8192,
|
| 12 |
+
"initializer_range": 0.02,
|
| 13 |
+
"intermediate_size": 28672,
|
| 14 |
+
"max_position_embeddings": 4096,
|
| 15 |
+
"model_type": "llama",
|
| 16 |
+
"num_attention_heads": 64,
|
| 17 |
+
"num_hidden_layers": 80,
|
| 18 |
+
"num_key_value_heads": 8,
|
| 19 |
+
"pretraining_tp": 1,
|
| 20 |
+
"quantization_config": {
|
| 21 |
+
"_load_in_4bit": false,
|
| 22 |
+
"_load_in_8bit": false,
|
| 23 |
+
"bnb_4bit_compute_dtype": "float32",
|
| 24 |
+
"bnb_4bit_quant_type": "fp4",
|
| 25 |
+
"bnb_4bit_use_double_quant": false,
|
| 26 |
+
"llm_int8_enable_fp32_cpu_offload": false,
|
| 27 |
+
"llm_int8_has_fp16_weight": false,
|
| 28 |
+
"llm_int8_skip_modules": null,
|
| 29 |
+
"llm_int8_threshold": 6.0,
|
| 30 |
+
"load_in_4bit": false,
|
| 31 |
+
"load_in_8bit": false,
|
| 32 |
+
"quant_method": "bitsandbytes"
|
| 33 |
+
},
|
| 34 |
+
"rms_norm_eps": 1e-05,
|
| 35 |
+
"rope_scaling": null,
|
| 36 |
+
"rope_theta": 10000.0,
|
| 37 |
+
"tie_word_embeddings": false,
|
| 38 |
+
"torch_dtype": "float16",
|
| 39 |
+
"transformers_version": "4.38.1",
|
| 40 |
+
"use_cache": true,
|
| 41 |
+
"vocab_size": 32000
|
| 42 |
+
}
|
logs/events.out.tfevents.1699989718.node0370.palmetto.clemson.edu.1119956.2
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a8338f7ab492bb36430b90fde21a7822be3a5f94276aa93f72ad83aeaf872e40
|
| 3 |
+
size 4868
|
logs/events.out.tfevents.1699989813.node0370.palmetto.clemson.edu.1119956.4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4773b11c0e593bac1fe819e5c6324bb2a3542c475a7d375bb608e46408b2b009
|
| 3 |
+
size 4602
|
logs/events.out.tfevents.1699994287.node0370.palmetto.clemson.edu.1126403.1
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:edff87033b1a80ab70d0a0af49d7943790a698e659a0875f929d78a3299c699d
|
| 3 |
+
size 7962
|
logs/events.out.tfevents.1699995040.node0370.palmetto.clemson.edu.1127435.1
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f054998bd37e1977597fdb66e495fb58c70a3b0042fa2ef4428756252ee49a4c
|
| 3 |
+
size 33937
|
logs/events.out.tfevents.1700064888.node0277.palmetto.clemson.edu.1971495.1
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:587a2145d6c92f9cbc7c114d9b2d452e13239b0a925f3bdcaa5631c48bdca19a
|
| 3 |
+
size 9908
|
logs/events.out.tfevents.1700066157.node0277.palmetto.clemson.edu.1973537.1
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5fcffea7a0e86207d5c20daaabb71040aa71cb529a4bbddc4a309e37d9666f12
|
| 3 |
+
size 14528
|