Upload LlamaForCausalLM
Browse files- config.json +4 -4
- model.safetensors +2 -2
config.json
CHANGED
|
@@ -25,8 +25,8 @@
|
|
| 25 |
"pad_token_id": 128004,
|
| 26 |
"pretraining_tp": 1,
|
| 27 |
"quantization_config": {
|
| 28 |
-
"_load_in_4bit":
|
| 29 |
-
"_load_in_8bit":
|
| 30 |
"bnb_4bit_compute_dtype": "float32",
|
| 31 |
"bnb_4bit_quant_storage": "uint8",
|
| 32 |
"bnb_4bit_quant_type": "fp4",
|
|
@@ -35,8 +35,8 @@
|
|
| 35 |
"llm_int8_has_fp16_weight": false,
|
| 36 |
"llm_int8_skip_modules": null,
|
| 37 |
"llm_int8_threshold": 6.0,
|
| 38 |
-
"load_in_4bit":
|
| 39 |
-
"load_in_8bit":
|
| 40 |
"quant_method": "bitsandbytes"
|
| 41 |
},
|
| 42 |
"rms_norm_eps": 1e-05,
|
|
|
|
| 25 |
"pad_token_id": 128004,
|
| 26 |
"pretraining_tp": 1,
|
| 27 |
"quantization_config": {
|
| 28 |
+
"_load_in_4bit": true,
|
| 29 |
+
"_load_in_8bit": false,
|
| 30 |
"bnb_4bit_compute_dtype": "float32",
|
| 31 |
"bnb_4bit_quant_storage": "uint8",
|
| 32 |
"bnb_4bit_quant_type": "fp4",
|
|
|
|
| 35 |
"llm_int8_has_fp16_weight": false,
|
| 36 |
"llm_int8_skip_modules": null,
|
| 37 |
"llm_int8_threshold": 6.0,
|
| 38 |
+
"load_in_4bit": true,
|
| 39 |
+
"load_in_8bit": false,
|
| 40 |
"quant_method": "bitsandbytes"
|
| 41 |
},
|
| 42 |
"rms_norm_eps": 1e-05,
|
model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c4c8de245510d5c99f62be6ada94d69c319f6aa8d5fc83b05fbd4b45ad60f38a
|
| 3 |
+
size 1072901744
|