allenxiao commited on
Commit
e5cfb57
·
verified ·
1 Parent(s): ccf5ecf

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "MixtralForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 15,
7
+ "eos_token_id": 16,
8
+ "head_dim": null,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 1024,
11
+ "initializer_range": 0.01,
12
+ "intermediate_size": 4096,
13
+ "iter": 118698,
14
+ "max_position_embeddings": 1048576,
15
+ "model_type": "mixtral",
16
+ "num_attention_heads": 16,
17
+ "num_experts_per_tok": 2,
18
+ "num_hidden_layers": 12,
19
+ "num_key_value_heads": 8,
20
+ "num_local_experts": 8,
21
+ "output_router_logits": false,
22
+ "pad_token_id": 14,
23
+ "rms_norm_eps": 1e-05,
24
+ "rope_theta": 50000000,
25
+ "router_aux_loss_coef": 0.001,
26
+ "router_jitter_noise": 0.0,
27
+ "sliding_window": null,
28
+ "tie_word_embeddings": false,
29
+ "torch_dtype": "float32",
30
+ "transformers_version": "4.52.4",
31
+ "use_cache": true,
32
+ "vocab_size": 128
33
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a325f2cdf8071f2039f9a3b1073302cdd3d371b4a47722643c01cc8a54db211
3
+ size 4984424416
special_tokens_map.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<EOD>"
4
+ ],
5
+ "bos_token": "<s>",
6
+ "cls_token": "<CLS>",
7
+ "eos_token": "</s>",
8
+ "mask_token": "<MASK>",
9
+ "pad_token": "<PAD>",
10
+ "sep_token": "<SEP>",
11
+ "unk_token": "<UNK>"
12
+ }
tokenizer.json ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 10,
8
+ "content": "<CLS>",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 11,
17
+ "content": "<SEP>",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 12,
26
+ "content": "<EOD>",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 13,
35
+ "content": "<MASK>",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ },
42
+ {
43
+ "id": 14,
44
+ "content": "<PAD>",
45
+ "single_word": false,
46
+ "lstrip": false,
47
+ "rstrip": false,
48
+ "normalized": false,
49
+ "special": true
50
+ },
51
+ {
52
+ "id": 15,
53
+ "content": "<s>",
54
+ "single_word": false,
55
+ "lstrip": false,
56
+ "rstrip": false,
57
+ "normalized": false,
58
+ "special": true
59
+ },
60
+ {
61
+ "id": 16,
62
+ "content": "</s>",
63
+ "single_word": false,
64
+ "lstrip": false,
65
+ "rstrip": false,
66
+ "normalized": false,
67
+ "special": true
68
+ },
69
+ {
70
+ "id": 17,
71
+ "content": "<UNK>",
72
+ "single_word": false,
73
+ "lstrip": false,
74
+ "rstrip": false,
75
+ "normalized": false,
76
+ "special": true
77
+ }
78
+ ],
79
+ "normalizer": null,
80
+ "pre_tokenizer": {
81
+ "type": "ByteLevel",
82
+ "add_prefix_space": false,
83
+ "trim_offsets": true,
84
+ "use_regex": true
85
+ },
86
+ "post_processor": {
87
+ "type": "TemplateProcessing",
88
+ "single": [
89
+ {
90
+ "Sequence": {
91
+ "id": "A",
92
+ "type_id": 0
93
+ }
94
+ }
95
+ ],
96
+ "pair": [
97
+ {
98
+ "Sequence": {
99
+ "id": "A",
100
+ "type_id": 0
101
+ }
102
+ },
103
+ {
104
+ "Sequence": {
105
+ "id": "B",
106
+ "type_id": 0
107
+ }
108
+ }
109
+ ],
110
+ "special_tokens": {
111
+ "<CLS>": {
112
+ "id": "<CLS>",
113
+ "ids": [
114
+ 10
115
+ ],
116
+ "tokens": [
117
+ "<CLS>"
118
+ ]
119
+ },
120
+ "<SEP>": {
121
+ "id": "<SEP>",
122
+ "ids": [
123
+ 11
124
+ ],
125
+ "tokens": [
126
+ "<SEP>"
127
+ ]
128
+ }
129
+ }
130
+ },
131
+ "decoder": null,
132
+ "model": {
133
+ "type": "BPE",
134
+ "dropout": null,
135
+ "unk_token": "<UNK>",
136
+ "continuing_subword_prefix": null,
137
+ "end_of_word_suffix": null,
138
+ "fuse_unk": false,
139
+ "byte_fallback": false,
140
+ "ignore_merges": false,
141
+ "vocab": {
142
+ "[PAD]": 0,
143
+ "[UNK]": 1,
144
+ "[CLS]": 2,
145
+ "[SEP]": 3,
146
+ "[MASK]": 4,
147
+ "C": 5,
148
+ "G": 6,
149
+ "T": 7,
150
+ "A": 8,
151
+ "N": 9,
152
+ "<CLS>": 10,
153
+ "<SEP>": 11,
154
+ "<EOD>": 12,
155
+ "<MASK>": 13,
156
+ "<PAD>": 14,
157
+ "<s>": 15,
158
+ "</s>": 16,
159
+ "<UNK>": 17
160
+ },
161
+ "merges": []
162
+ }
163
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "10": {
4
+ "content": "<CLS>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "11": {
12
+ "content": "<SEP>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "12": {
20
+ "content": "<EOD>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "13": {
28
+ "content": "<MASK>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "14": {
36
+ "content": "<PAD>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "15": {
44
+ "content": "<s>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "16": {
52
+ "content": "</s>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "17": {
60
+ "content": "<UNK>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ }
67
+ },
68
+ "additional_special_tokens": [
69
+ "<EOD>"
70
+ ],
71
+ "bos_token": "<s>",
72
+ "clean_up_tokenization_spaces": false,
73
+ "cls_token": "<CLS>",
74
+ "eos_token": "</s>",
75
+ "extra_special_tokens": {},
76
+ "mask_token": "<MASK>",
77
+ "model_max_length": 1000000000000000019884624838656,
78
+ "pad_token": "<PAD>",
79
+ "sep_token": "<SEP>",
80
+ "tokenizer_class": "PreTrainedTokenizer",
81
+ "unk_token": "<UNK>"
82
+ }