smokxy commited on
Commit
cf43346
·
verified ·
1 Parent(s): e16dad3

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. config.json +16 -1
  2. model.safetensors +2 -2
config.json CHANGED
@@ -42,9 +42,24 @@
42
  "num_hidden_layers": 4,
43
  "num_mel_bins": 80,
44
  "pad_token_id": 50257,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  "scale_embedding": false,
46
  "suppress_tokens": [],
47
- "torch_dtype": "float32",
48
  "transformers_version": "4.41.1",
49
  "use_cache": true,
50
  "use_weighted_layer_sum": false,
 
42
  "num_hidden_layers": 4,
43
  "num_mel_bins": 80,
44
  "pad_token_id": 50257,
45
+ "quantization_config": {
46
+ "_load_in_4bit": true,
47
+ "_load_in_8bit": false,
48
+ "bnb_4bit_compute_dtype": "float32",
49
+ "bnb_4bit_quant_storage": "uint8",
50
+ "bnb_4bit_quant_type": "fp4",
51
+ "bnb_4bit_use_double_quant": false,
52
+ "llm_int8_enable_fp32_cpu_offload": false,
53
+ "llm_int8_has_fp16_weight": false,
54
+ "llm_int8_skip_modules": null,
55
+ "llm_int8_threshold": 6.0,
56
+ "load_in_4bit": true,
57
+ "load_in_8bit": false,
58
+ "quant_method": "bitsandbytes"
59
+ },
60
  "scale_embedding": false,
61
  "suppress_tokens": [],
62
+ "torch_dtype": "float16",
63
  "transformers_version": "4.41.1",
64
  "use_cache": true,
65
  "use_weighted_layer_sum": false,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b8573cbe71fa4c903fe787bc065eef5d244a50e0d097680fd87fa785f9081a82
3
- size 151061672
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e008631c2eac939b0f7cf9c28375df7e726d29c3c767646c01908e59a0a0a898
3
+ size 51832696