qgallouedec HF Staff commited on
Commit
67ab26a
·
verified ·
1 Parent(s): fb99b79

Upload Qwen2ForSequenceClassification

Browse files
Files changed (2) hide show
  1. config.json +70 -6
  2. model.safetensors +2 -2
config.json CHANGED
@@ -3,9 +3,11 @@
3
  "Qwen2ForSequenceClassification"
4
  ],
5
  "attention_dropout": 0.0,
6
- "dtype": "float32",
 
 
7
  "hidden_act": "silu",
8
- "hidden_size": 8,
9
  "id2label": {
10
  "0": "LABEL_0"
11
  },
@@ -15,22 +17,84 @@
15
  "LABEL_0": 0
16
  },
17
  "layer_types": [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  "full_attention",
19
  "full_attention"
20
  ],
21
  "max_position_embeddings": 32768,
22
- "max_window_layers": 28,
23
  "model_type": "qwen2",
24
  "num_attention_heads": 4,
25
  "num_hidden_layers": 2,
26
  "num_key_value_heads": 2,
27
  "rms_norm_eps": 1e-06,
28
  "rope_scaling": null,
29
- "rope_theta": 10000.0,
30
  "sliding_window": null,
31
  "tie_word_embeddings": false,
32
- "transformers_version": "4.57.0.dev0",
33
  "use_cache": true,
34
  "use_sliding_window": false,
35
- "vocab_size": 151665
36
  }
 
3
  "Qwen2ForSequenceClassification"
4
  ],
5
  "attention_dropout": 0.0,
6
+ "bos_token_id": 151643,
7
+ "dtype": "bfloat16",
8
+ "eos_token_id": 151645,
9
  "hidden_act": "silu",
10
+ "hidden_size": 16,
11
  "id2label": {
12
  "0": "LABEL_0"
13
  },
 
17
  "LABEL_0": 0
18
  },
19
  "layer_types": [
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention",
53
+ "full_attention",
54
+ "full_attention",
55
+ "full_attention",
56
+ "full_attention",
57
+ "full_attention",
58
+ "full_attention",
59
+ "full_attention",
60
+ "full_attention",
61
+ "full_attention",
62
+ "full_attention",
63
+ "full_attention",
64
+ "full_attention",
65
+ "full_attention",
66
+ "full_attention",
67
+ "full_attention",
68
+ "full_attention",
69
+ "full_attention",
70
+ "full_attention",
71
+ "full_attention",
72
+ "full_attention",
73
+ "full_attention",
74
+ "full_attention",
75
+ "full_attention",
76
+ "full_attention",
77
+ "full_attention",
78
+ "full_attention",
79
+ "full_attention",
80
+ "full_attention",
81
+ "full_attention",
82
  "full_attention",
83
  "full_attention"
84
  ],
85
  "max_position_embeddings": 32768,
86
+ "max_window_layers": 70,
87
  "model_type": "qwen2",
88
  "num_attention_heads": 4,
89
  "num_hidden_layers": 2,
90
  "num_key_value_heads": 2,
91
  "rms_norm_eps": 1e-06,
92
  "rope_scaling": null,
93
+ "rope_theta": 1000000.0,
94
  "sliding_window": null,
95
  "tie_word_embeddings": false,
96
+ "transformers_version": "4.56.2",
97
  "use_cache": true,
98
  "use_sliding_window": false,
99
+ "vocab_size": 152064
100
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:168ec3590d3ecfefc9e1aec931ae574f018e190b2afc83cbd8a60a3981bea04e
3
- size 4864032
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9bbe7474ae75a83b203a58943280bb97fa618a22b8482f16d298e895ad238ef
3
+ size 4878392