Sefika commited on
Commit
45846ff
·
verified ·
1 Parent(s): e3a3848

Upload T5ForConditionalGeneration

Browse files
Files changed (2) hide show
  1. config.json +1 -16
  2. model.safetensors +2 -2
config.json CHANGED
@@ -23,21 +23,6 @@
23
  "num_layers": 12,
24
  "output_past": true,
25
  "pad_token_id": 0,
26
- "quantization_config": {
27
- "_load_in_4bit": true,
28
- "_load_in_8bit": false,
29
- "bnb_4bit_compute_dtype": "float16",
30
- "bnb_4bit_quant_storage": "uint8",
31
- "bnb_4bit_quant_type": "nf4",
32
- "bnb_4bit_use_double_quant": false,
33
- "llm_int8_enable_fp32_cpu_offload": false,
34
- "llm_int8_has_fp16_weight": false,
35
- "llm_int8_skip_modules": null,
36
- "llm_int8_threshold": 6.0,
37
- "load_in_4bit": true,
38
- "load_in_8bit": false,
39
- "quant_method": "bitsandbytes"
40
- },
41
  "relative_attention_max_distance": 128,
42
  "relative_attention_num_buckets": 32,
43
  "task_specific_params": {
@@ -70,7 +55,7 @@
70
  }
71
  },
72
  "tie_word_embeddings": false,
73
- "torch_dtype": "float16",
74
  "transformers_version": "4.45.2",
75
  "use_cache": true,
76
  "vocab_size": 32128
 
23
  "num_layers": 12,
24
  "output_past": true,
25
  "pad_token_id": 0,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  "relative_attention_max_distance": 128,
27
  "relative_attention_num_buckets": 32,
28
  "task_specific_params": {
 
55
  }
56
  },
57
  "tie_word_embeddings": false,
58
+ "torch_dtype": "float32",
59
  "transformers_version": "4.45.2",
60
  "use_cache": true,
61
  "vocab_size": 32128
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e8aff9bbac06c837845bb03c21d478991977e9a1d9ffd2f863c995e1a80a7cc3
3
- size 340168944
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4adf9dc9eb3a582fdba9fca8370c7709cf235cb72cbb6643821e452827c09c70
3
+ size 990345064