kalsun commited on
Commit
d70bfa7
·
verified ·
1 Parent(s): 7ff990e

Upload folder using huggingface_hub

Browse files
mlc-chat-config.json ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "0.1.0",
3
+ "model_type": "gemma3_text",
4
+ "quantization": "q4f16_1",
5
+ "model_config": {
6
+ "text_config": {
7
+ "hidden_size": 640,
8
+ "intermediate_size": 2048,
9
+ "num_hidden_layers": 18,
10
+ "attention_bias": false,
11
+ "num_attention_heads": 4,
12
+ "num_key_value_heads": 1,
13
+ "head_dim": 256,
14
+ "rms_norm_eps": 1e-06,
15
+ "hidden_activation": "gelu_pytorch_tanh",
16
+ "position_embedding_base": 1000000,
17
+ "rope_scaling": 0,
18
+ "context_window_size": 8192,
19
+ "prefill_chunk_size": 8192,
20
+ "query_pre_attn_scalar": 256,
21
+ "sliding_window_size": 512,
22
+ "kwargs": {
23
+ "_sliding_window_pattern": 6,
24
+ "architectures": [
25
+ "Gemma3ForCausalLM"
26
+ ],
27
+ "attention_dropout": 0.0,
28
+ "attn_logit_softcapping": null,
29
+ "bos_token_id": 2,
30
+ "dtype": "bfloat16",
31
+ "eos_token_id": 1,
32
+ "final_logit_softcapping": null,
33
+ "initializer_range": 0.02,
34
+ "layer_types": [
35
+ "sliding_attention",
36
+ "sliding_attention",
37
+ "sliding_attention",
38
+ "sliding_attention",
39
+ "sliding_attention",
40
+ "full_attention",
41
+ "sliding_attention",
42
+ "sliding_attention",
43
+ "sliding_attention",
44
+ "sliding_attention",
45
+ "sliding_attention",
46
+ "full_attention",
47
+ "sliding_attention",
48
+ "sliding_attention",
49
+ "sliding_attention",
50
+ "sliding_attention",
51
+ "sliding_attention",
52
+ "full_attention"
53
+ ],
54
+ "max_position_embeddings": 32768,
55
+ "model_type": "gemma3_text",
56
+ "pad_token_id": 0,
57
+ "rope_parameters": {
58
+ "full_attention": {
59
+ "rope_theta": 1000000.0,
60
+ "rope_type": "default"
61
+ },
62
+ "sliding_attention": {
63
+ "rope_theta": 10000.0,
64
+ "rope_type": "default"
65
+ }
66
+ },
67
+ "sliding_window": 512,
68
+ "tie_word_embeddings": true,
69
+ "transformers_version": "5.0.0",
70
+ "use_bidirectional_attention": false,
71
+ "use_cache": false
72
+ }
73
+ },
74
+ "vocab_size": 262144,
75
+ "tensor_parallel_shards": 1,
76
+ "max_batch_size": 128,
77
+ "context_window_size": 8192,
78
+ "sliding_window_size": 512,
79
+ "prefill_chunk_size": 8192,
80
+ "is_text_model": true
81
+ },
82
+ "vocab_size": 262144,
83
+ "context_window_size": 8192,
84
+ "sliding_window_size": 512,
85
+ "prefill_chunk_size": 8192,
86
+ "attention_sink_size": -1,
87
+ "tensor_parallel_shards": 1,
88
+ "pipeline_parallel_stages": 1,
89
+ "active_vocab_size": 262146,
90
+ "temperature": 1.0,
91
+ "presence_penalty": 0.0,
92
+ "frequency_penalty": 0.0,
93
+ "repetition_penalty": 1.0,
94
+ "top_p": 0.95,
95
+ "tokenizer_files": [
96
+ "tokenizer.json",
97
+ "tokenizer_config.json"
98
+ ],
99
+ "tokenizer_info": {
100
+ "token_postproc_method": "byte_fallback",
101
+ "prepend_space_in_encode": false,
102
+ "strip_space_in_decode": false
103
+ },
104
+ "conv_template": {
105
+ "name": "gemma_instruction",
106
+ "system_template": "{system_message}",
107
+ "system_message": "",
108
+ "system_prefix_token_ids": [
109
+ 2
110
+ ],
111
+ "add_role_after_system_message": true,
112
+ "roles": {
113
+ "user": "<start_of_turn>user",
114
+ "assistant": "<start_of_turn>model"
115
+ },
116
+ "role_templates": {
117
+ "user": "{user_message}",
118
+ "assistant": "{assistant_message}",
119
+ "tool": "{tool_message}"
120
+ },
121
+ "messages": [],
122
+ "seps": [
123
+ "<end_of_turn>\n"
124
+ ],
125
+ "role_content_sep": "\n",
126
+ "role_empty_sep": "\n",
127
+ "stop_str": [
128
+ "<end_of_turn>"
129
+ ],
130
+ "stop_token_ids": [
131
+ 1,
132
+ 107
133
+ ],
134
+ "function_string": "",
135
+ "use_function_calling": false
136
+ },
137
+ "pad_token_id": 0,
138
+ "bos_token_id": 2,
139
+ "eos_token_id": [
140
+ 1,
141
+ 1,
142
+ 50,
143
+ 106
144
+ ]
145
+ }
params_shard_0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a63fe74046a66116dcfbf9b0503b86b86df987a705794e9629e00c22df4748ea
3
+ size 83886080
params_shard_1.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31a20437b88ffb782cd68ab083ffb5866a0be32b842f1cca208ff5ea04419cc8
3
+ size 33201408
params_shard_2.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d4d34900c3c0cdb0b4439e473c93998dd1b1c65a0217975e282aaf3a078a797
3
+ size 33336064
params_shard_3.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8c57f33402f02f2da07100586020cc02fba6a883ad1414d761c3d1039d4a1a8
3
+ size 462080
tensor-cache.json ADDED
The diff for this file is too large to render. See raw diff