comoZ commited on
Commit
f33e3be
·
verified ·
1 Parent(s): d803bf1

Upload Exaone4ForCausalLM

Browse files
config.json ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Exaone4ForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 1,
7
+ "dtype": "float16",
8
+ "eos_token_id": 361,
9
+ "head_dim": 128,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 5120,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 27392,
14
+ "layer_types": [
15
+ "sliding_attention",
16
+ "sliding_attention",
17
+ "sliding_attention",
18
+ "full_attention",
19
+ "sliding_attention",
20
+ "sliding_attention",
21
+ "sliding_attention",
22
+ "full_attention",
23
+ "sliding_attention",
24
+ "sliding_attention",
25
+ "sliding_attention",
26
+ "full_attention",
27
+ "sliding_attention",
28
+ "sliding_attention",
29
+ "sliding_attention",
30
+ "full_attention",
31
+ "sliding_attention",
32
+ "sliding_attention",
33
+ "sliding_attention",
34
+ "full_attention",
35
+ "sliding_attention",
36
+ "sliding_attention",
37
+ "sliding_attention",
38
+ "full_attention",
39
+ "sliding_attention",
40
+ "sliding_attention",
41
+ "sliding_attention",
42
+ "full_attention",
43
+ "sliding_attention",
44
+ "sliding_attention",
45
+ "sliding_attention",
46
+ "full_attention",
47
+ "sliding_attention",
48
+ "sliding_attention",
49
+ "sliding_attention",
50
+ "full_attention",
51
+ "sliding_attention",
52
+ "sliding_attention",
53
+ "sliding_attention",
54
+ "full_attention",
55
+ "sliding_attention",
56
+ "sliding_attention",
57
+ "sliding_attention",
58
+ "full_attention",
59
+ "sliding_attention",
60
+ "sliding_attention",
61
+ "sliding_attention",
62
+ "full_attention",
63
+ "sliding_attention",
64
+ "sliding_attention",
65
+ "sliding_attention",
66
+ "full_attention",
67
+ "sliding_attention",
68
+ "sliding_attention",
69
+ "sliding_attention",
70
+ "full_attention",
71
+ "sliding_attention",
72
+ "sliding_attention",
73
+ "sliding_attention",
74
+ "full_attention",
75
+ "sliding_attention",
76
+ "sliding_attention",
77
+ "sliding_attention",
78
+ "full_attention"
79
+ ],
80
+ "max_position_embeddings": 131072,
81
+ "model_type": "exaone4",
82
+ "num_attention_heads": 40,
83
+ "num_hidden_layers": 64,
84
+ "num_key_value_heads": 8,
85
+ "pad_token_id": 0,
86
+ "quantization_config": {
87
+ "_load_in_4bit": true,
88
+ "_load_in_8bit": false,
89
+ "bnb_4bit_compute_dtype": "bfloat16",
90
+ "bnb_4bit_quant_storage": "uint8",
91
+ "bnb_4bit_quant_type": "nf4",
92
+ "bnb_4bit_use_double_quant": false,
93
+ "llm_int8_enable_fp32_cpu_offload": false,
94
+ "llm_int8_has_fp16_weight": false,
95
+ "llm_int8_skip_modules": null,
96
+ "llm_int8_threshold": 6.0,
97
+ "load_in_4bit": true,
98
+ "load_in_8bit": false,
99
+ "quant_method": "bitsandbytes"
100
+ },
101
+ "rms_norm_eps": 1e-05,
102
+ "rope_scaling": {
103
+ "factor": 16.0,
104
+ "high_freq_factor": 4.0,
105
+ "low_freq_factor": 1.0,
106
+ "original_max_position_embeddings": 8192,
107
+ "rope_type": "llama3"
108
+ },
109
+ "rope_theta": 1000000,
110
+ "sliding_window": 4096,
111
+ "sliding_window_pattern": "LLLG",
112
+ "tie_word_embeddings": false,
113
+ "transformers_version": "4.57.3",
114
+ "use_cache": true,
115
+ "vocab_size": 102400
116
+ }
generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "cache_implementation": "hybrid",
5
+ "eos_token_id": 361,
6
+ "pad_token_id": 0,
7
+ "transformers_version": "4.57.3"
8
+ }
pytorch_model-00001-of-00010.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60a1a6dfa8647e94b266208edab763e92d396b79164778ab249fb9a05bb01d0d
3
+ size 1979133074
pytorch_model-00002-of-00010.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:401e58245de7455954df2339c0346f4e207880029acb1ce4772f3aff6e6cb703
3
+ size 1983518121
pytorch_model-00003-of-00010.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4ead56587630ed3c043dd9980de04dcabe1f91bf5068edf94a7e5a9a4f99269
3
+ size 1998286683
pytorch_model-00004-of-00010.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bcd75d001a69cef7c436aa38392dcd34c0fa26a2ba0a396d76b4d1e81e19f55
3
+ size 1995401466
pytorch_model-00005-of-00010.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad788db14104b7aa4410ef96419e0785f3995755dcedcd9de063b4d6a6b8b20d
3
+ size 1992284962
pytorch_model-00006-of-00010.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1e1cf3b98d8f507598587bf1d07050e974bfa57fc8c6b76c2924c47270122b6
3
+ size 1998286683
pytorch_model-00007-of-00010.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e0ab144bb15023596130d85333e06ed1b393ac824ad0a01ee6126f159063298
3
+ size 1995401466
pytorch_model-00008-of-00010.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f22d5bec1440c0b8ca6f34fc5f952880d6e6e11ed73ea495e99e2441be0d6008
3
+ size 1992284962
pytorch_model-00009-of-00010.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e89b4bb35cbfc5ac7f8ba3e61e60f063043f9e8bcf7ca0a343e113e6ea049c9a
3
+ size 1998286683
pytorch_model-00010-of-00010.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0334cd5f0d03cd71baa98923bd5c492e71fce9f7ce66e32cf51c74f924f6db65
3
+ size 1578019645
pytorch_model.bin.index.json ADDED
The diff for this file is too large to render. See raw diff