ALmonster commited on
Commit
1d38fd1
·
verified ·
1 Parent(s): bebd8e4

Upload 10 files

Browse files
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/root/lwt/tech/mcmd-72b",
3
+ "architectures": [
4
+ "mcmdForCausalLM"
5
+ ],
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_mcmd.mcmdConfig",
8
+ "AutoModel": "modeling_mcmd.mcmdForCausalLM",
9
+ "AutoModelForCausalLM": "modeling_mcmd.mcmdForCausalLM"
10
+ },
11
+ "clip_path": "/data1/model/DFN5B-CLIP-ViT-H-14-378",
12
+ "hidden_size": 8192,
13
+ "hidden_sizes": 8192,
14
+ "input_img_size": 378,
15
+ "lm_model": {
16
+ "attention_dropout": 0.0,
17
+ "bos_token_id": 151643,
18
+ "eos_token_id": 151645,
19
+ "hidden_act": "silu",
20
+ "hidden_size": 8192,
21
+ "initializer_range": 0.02,
22
+ "intermediate_size": 29696,
23
+ "max_position_embeddings": 32768,
24
+ "max_window_layers": 70,
25
+ "model_type": "qwen2",
26
+ "num_attention_heads": 64,
27
+ "num_hidden_layers": 80,
28
+ "num_key_value_heads": 8,
29
+ "rms_norm_eps": 1e-06,
30
+ "rope_theta": 1000000.0,
31
+ "sliding_window": 131072,
32
+ "tie_word_embeddings": false,
33
+ "torch_dtype": "bfloat16",
34
+ "transformers_version": "4.41.2",
35
+ "use_cache": true,
36
+ "use_sliding_window": false,
37
+ "vocab_size": 152064
38
+ },
39
+ "lm_path": "/data1/model/Qwen2-72B-Instruct",
40
+ "max_length": 1024,
41
+ "model_type": "mcmd",
42
+ "torch_dtype": "bfloat16",
43
+ "transformers_version": "4.40.0",
44
+ "vision_config": "mlp2x_gelu",
45
+ "vocab_size": 152064
46
+ }
configuration_mcmd.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from transformers.configuration_utils import PretrainedConfig
3
+
4
+ class mcmdConfig(PretrainedConfig):
5
+
6
+ model_type = "mcmd"
7
+ _auto_class = "AutoConfig"
8
+
9
+ def __init__(
10
+ self,
11
+ **kwargs,
12
+ ):
13
+ super().__init__(
14
+ **kwargs,
15
+ )
latest ADDED
@@ -0,0 +1 @@
 
 
1
+ global_step100000
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0574798991a644b5858d4db2e7fdfb3f4860c24bfefed012581ae666d71e4ea1
3
+ size 4695909400
model-00002-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50b1debb07d460f8e18ad0c22dee3e8f3433dce74cbc83375d522d8f80bc6287
3
+ size 4781670640
model-00003-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba6bcf17507be8c7d4ad96d7102b6c35e3cc9435aabd150e113ed700a44ef629
3
+ size 4964101648
model-00004-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffe86370a452e24971078f9751f7645eafaf13f2a9ac003c3c759817652063f4
3
+ size 4781637624
model-00005-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4dd1ef77fc8afe107ee5637f9d02bc63a1990e062aad653e8d28bf3e1b419d67
3
+ size 4781670664