qgallouedec HF Staff commited on
Commit
4582191
·
verified ·
1 Parent(s): c837622

Upload Qwen2ForCausalLM

Browse files
Files changed (3) hide show
  1. config.json +69 -5
  2. generation_config.json +3 -1
  3. model.safetensors +2 -2
config.json CHANGED
@@ -3,28 +3,92 @@
3
  "Qwen2ForCausalLM"
4
  ],
5
  "attention_dropout": 0.0,
 
 
 
6
  "hidden_act": "silu",
7
  "hidden_size": 8,
8
  "initializer_range": 0.02,
9
  "intermediate_size": 32,
10
  "layer_types": [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  "full_attention",
12
  "full_attention"
13
  ],
14
  "max_position_embeddings": 32768,
15
- "max_window_layers": 28,
16
  "model_type": "qwen2",
17
  "num_attention_heads": 4,
18
  "num_hidden_layers": 2,
19
  "num_key_value_heads": 2,
20
  "rms_norm_eps": 1e-06,
21
  "rope_scaling": null,
22
- "rope_theta": 10000.0,
23
  "sliding_window": null,
24
  "tie_word_embeddings": false,
25
- "torch_dtype": "float32",
26
- "transformers_version": "4.55.0.dev0",
27
  "use_cache": true,
28
  "use_sliding_window": false,
29
- "vocab_size": 151665
30
  }
 
3
  "Qwen2ForCausalLM"
4
  ],
5
  "attention_dropout": 0.0,
6
+ "bos_token_id": 151643,
7
+ "dtype": "float32",
8
+ "eos_token_id": 151645,
9
  "hidden_act": "silu",
10
  "hidden_size": 8,
11
  "initializer_range": 0.02,
12
  "intermediate_size": 32,
13
  "layer_types": [
14
+ "full_attention",
15
+ "full_attention",
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention",
53
+ "full_attention",
54
+ "full_attention",
55
+ "full_attention",
56
+ "full_attention",
57
+ "full_attention",
58
+ "full_attention",
59
+ "full_attention",
60
+ "full_attention",
61
+ "full_attention",
62
+ "full_attention",
63
+ "full_attention",
64
+ "full_attention",
65
+ "full_attention",
66
+ "full_attention",
67
+ "full_attention",
68
+ "full_attention",
69
+ "full_attention",
70
+ "full_attention",
71
+ "full_attention",
72
+ "full_attention",
73
+ "full_attention",
74
+ "full_attention",
75
+ "full_attention",
76
  "full_attention",
77
  "full_attention"
78
  ],
79
  "max_position_embeddings": 32768,
80
+ "max_window_layers": 70,
81
  "model_type": "qwen2",
82
  "num_attention_heads": 4,
83
  "num_hidden_layers": 2,
84
  "num_key_value_heads": 2,
85
  "rms_norm_eps": 1e-06,
86
  "rope_scaling": null,
87
+ "rope_theta": 1000000.0,
88
  "sliding_window": null,
89
  "tie_word_embeddings": false,
90
+ "transformers_version": "4.57.0.dev0",
 
91
  "use_cache": true,
92
  "use_sliding_window": false,
93
+ "vocab_size": 152064
94
  }
generation_config.json CHANGED
@@ -1,4 +1,6 @@
1
  {
2
  "_from_model_config": true,
3
- "transformers_version": "4.55.0.dev0"
 
 
4
  }
 
1
  {
2
  "_from_model_config": true,
3
+ "bos_token_id": 151643,
4
+ "eos_token_id": 151645,
5
+ "transformers_version": "4.57.0.dev0"
6
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:44bbf78764fc96ba18a5749184100b3a5411d7ce4c1367b3b4ea77bb8151804f
3
- size 9717288
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:906793531715db547acf52632fb05baed225e2e40ee4fe1420aaf4f293ef4306
3
+ size 9742824