jiaruz2 commited on
Commit
71c1118
·
verified ·
1 Parent(s): bef21c5

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<R>": 151646,
3
+ "<S>": 151647,
4
+ "<X>": 151648,
5
+ "<extra_0>": 151651,
6
+ "<mask>": 151649,
7
+ "<sep>": 151650,
8
+ "<|endoftext|>": 151643,
9
+ "<|im_end|>": 151645,
10
+ "<|im_start|>": 151644
11
+ }
config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "SFT-GRPO-7B",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "config_prm.Qwen2RMConfig",
9
+ "AutoModel": "modeling_prm.Qwen2ForProcessRewardModel"
10
+ },
11
+ "bos_token_id": 151643,
12
+ "eos_token_id": 151645,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 3584,
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 18944,
17
+ "max_position_embeddings": 4096,
18
+ "max_window_layers": 28,
19
+ "model_type": "qwen2",
20
+ "num_attention_heads": 28,
21
+ "num_hidden_layers": 28,
22
+ "num_key_value_heads": 4,
23
+ "rms_norm_eps": 1e-05,
24
+ "rope_theta": 10000.0,
25
+ "sliding_window": null,
26
+ "tie_word_embeddings": false,
27
+ "torch_dtype": "bfloat16",
28
+ "transformers_version": "4.49.0",
29
+ "use_cache": true,
30
+ "use_mrope": false,
31
+ "use_sliding_window": false,
32
+ "vocab_size": 152064
33
+ }
config_prm.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers.configuration_utils import PretrainedConfig
2
+ from transformers.utils import logging
3
+
4
+
5
+ logger = logging.get_logger(__name__)
6
+
7
+
8
+ class Qwen2RMConfig(PretrainedConfig):
9
+ r"""
10
+ This is the configuration class to store the configuration of a [`Qwen2Model`]. It is used to instantiate a
11
+ Qwen2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
12
+ with the defaults will yield a similar configuration to that of
13
+ Qwen2-7B-beta [Qwen/Qwen2-7B-beta](https://huggingface.co/Qwen/Qwen2-7B-beta).
14
+
15
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
16
+ documentation from [`PretrainedConfig`] for more information.
17
+
18
+
19
+ Args:
20
+ vocab_size (`int`, *optional*, defaults to 151936):
21
+ Vocabulary size of the Qwen2 model. Defines the number of different tokens that can be represented by the
22
+ `inputs_ids` passed when calling [`Qwen2Model`]
23
+ hidden_size (`int`, *optional*, defaults to 4096):
24
+ Dimension of the hidden representations.
25
+ intermediate_size (`int`, *optional*, defaults to 22016):
26
+ Dimension of the MLP representations.
27
+ num_hidden_layers (`int`, *optional*, defaults to 32):
28
+ Number of hidden layers in the Transformer encoder.
29
+ num_attention_heads (`int`, *optional*, defaults to 32):
30
+ Number of attention heads for each attention layer in the Transformer encoder.
31
+ num_key_value_heads (`int`, *optional*, defaults to 32):
32
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
33
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
34
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
35
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
36
+ by meanpooling all the original heads within that group. For more details checkout [this
37
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`.
38
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
39
+ The non-linear activation function (function or string) in the decoder.
40
+ max_position_embeddings (`int`, *optional*, defaults to 32768):
41
+ The maximum sequence length that this model might ever be used with.
42
+ initializer_range (`float`, *optional*, defaults to 0.02):
43
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
44
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
45
+ The epsilon used by the rms normalization layers.
46
+ use_cache (`bool`, *optional*, defaults to `True`):
47
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
48
+ relevant if `config.is_decoder=True`.
49
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
50
+ Whether the model's input and output word embeddings should be tied.
51
+ rope_theta (`float`, *optional*, defaults to 10000.0):
52
+ The base period of the RoPE embeddings.
53
+ use_sliding_window (`bool`, *optional*, defaults to `False`):
54
+ Whether to use sliding window attention.
55
+ sliding_window (`int`, *optional*, defaults to 4096):
56
+ Sliding window attention (SWA) window size. If not specified, will default to `4096`.
57
+ max_window_layers (`int`, *optional*, defaults to 28):
58
+ The number of layers that use SWA (Sliding Window Attention). The bottom layers use SWA while the top use full attention.
59
+ attention_dropout (`float`, *optional*, defaults to 0.0):
60
+ The dropout ratio for the attention probabilities.
61
+
62
+ ```python
63
+ >>> from transformers import Qwen2Model, Qwen2Config
64
+
65
+ >>> # Initializing a Qwen2 style configuration
66
+ >>> configuration = Qwen2Config()
67
+
68
+ >>> # Initializing a model from the Qwen2-7B style configuration
69
+ >>> model = Qwen2Model(configuration)
70
+
71
+ >>> # Accessing the model configuration
72
+ >>> configuration = model.config
73
+ ```"""
74
+
75
+ model_type = "qwen2"
76
+ keys_to_ignore_at_inference = ["past_key_values"]
77
+
78
+ def __init__(
79
+ self,
80
+ vocab_size=151936,
81
+ hidden_size=4096,
82
+ intermediate_size=22016,
83
+ num_hidden_layers=32,
84
+ num_attention_heads=32,
85
+ num_key_value_heads=32,
86
+ hidden_act="silu",
87
+ max_position_embeddings=32768,
88
+ initializer_range=0.02,
89
+ rms_norm_eps=1e-6,
90
+ use_cache=True,
91
+ tie_word_embeddings=False,
92
+ rope_theta=10000.0,
93
+ use_sliding_window=False,
94
+ sliding_window=4096,
95
+ max_window_layers=28,
96
+ attention_dropout=0.0,
97
+ **kwargs,
98
+ ):
99
+ self.vocab_size = vocab_size
100
+ self.max_position_embeddings = max_position_embeddings
101
+ self.hidden_size = hidden_size
102
+ self.intermediate_size = intermediate_size
103
+ self.num_hidden_layers = num_hidden_layers
104
+ self.num_attention_heads = num_attention_heads
105
+ self.use_sliding_window = use_sliding_window
106
+ self.sliding_window = sliding_window if use_sliding_window else None
107
+ self.max_window_layers = max_window_layers
108
+
109
+ # for backward compatibility
110
+ if num_key_value_heads is None:
111
+ num_key_value_heads = num_attention_heads
112
+
113
+ self.num_key_value_heads = num_key_value_heads
114
+ self.hidden_act = hidden_act
115
+ self.initializer_range = initializer_range
116
+ self.rms_norm_eps = rms_norm_eps
117
+ self.use_cache = use_cache
118
+ self.rope_theta = rope_theta
119
+ self.attention_dropout = attention_dropout
120
+
121
+ super().__init__(
122
+ tie_word_embeddings=tie_word_embeddings,
123
+ **kwargs,
124
+ )
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cddf6943eb317b2784574fc975c6df94c904561fb29a0c9ffd7737904cd42bc
3
+ size 4877660776
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34022e8b3756fa42fa78a3d6717a972504243ce0ed2a70047019eded672cb27c
3
+ size 4932751008
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56f9fc60b755294d643bda1ef305dda679e4e5439ed351dad3117a348e560d84
3
+ size 4356577172
model.safetensors.index.json ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 14166949892
4
+ },
5
+ "weight_map": {
6
+ "model.embed_tokens.weight": "model-00001-of-00003.safetensors",
7
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
8
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
9
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
10
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
11
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
12
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
13
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
14
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
15
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
16
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
17
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
18
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
19
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
20
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
21
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
22
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
23
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
24
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
25
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
26
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
27
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
28
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
29
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
30
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
31
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
32
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
33
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
34
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
35
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
36
+ "model.layers.10.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
37
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
38
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
39
+ "model.layers.10.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
40
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
41
+ "model.layers.10.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
42
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
43
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
44
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
45
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
46
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
47
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
48
+ "model.layers.11.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
49
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
50
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
51
+ "model.layers.11.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
52
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
53
+ "model.layers.11.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
54
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
55
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
56
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
57
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
58
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
59
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
60
+ "model.layers.12.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
61
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
62
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
63
+ "model.layers.12.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
64
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
65
+ "model.layers.12.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
66
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
67
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
68
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
69
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
70
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
71
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
72
+ "model.layers.13.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
73
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
74
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
75
+ "model.layers.13.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
76
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
77
+ "model.layers.13.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
78
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
79
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
80
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
81
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
82
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
83
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
84
+ "model.layers.14.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
85
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
86
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
87
+ "model.layers.14.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
88
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
89
+ "model.layers.14.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
90
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
91
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
92
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
93
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
94
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
95
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
96
+ "model.layers.15.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
97
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
98
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
99
+ "model.layers.15.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
100
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
101
+ "model.layers.15.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
102
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
103
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
104
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
105
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
106
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
107
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
108
+ "model.layers.16.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
109
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
110
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
111
+ "model.layers.16.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
112
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
113
+ "model.layers.16.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
114
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
115
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
116
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
117
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
118
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
119
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
120
+ "model.layers.17.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
121
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
122
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
123
+ "model.layers.17.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
124
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
125
+ "model.layers.17.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
126
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
127
+ "model.layers.18.input_layernorm.weight": "model-00003-of-00003.safetensors",
128
+ "model.layers.18.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
129
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
130
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
131
+ "model.layers.18.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
132
+ "model.layers.18.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
133
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
134
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
135
+ "model.layers.18.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
136
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
137
+ "model.layers.18.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
138
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
139
+ "model.layers.19.input_layernorm.weight": "model-00003-of-00003.safetensors",
140
+ "model.layers.19.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
141
+ "model.layers.19.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
142
+ "model.layers.19.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
143
+ "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
144
+ "model.layers.19.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
145
+ "model.layers.19.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
146
+ "model.layers.19.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
147
+ "model.layers.19.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
148
+ "model.layers.19.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
149
+ "model.layers.19.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
150
+ "model.layers.19.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
151
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
152
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
153
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
154
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
155
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
156
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
157
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
158
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
159
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
160
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
161
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
162
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
163
+ "model.layers.20.input_layernorm.weight": "model-00003-of-00003.safetensors",
164
+ "model.layers.20.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
165
+ "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
166
+ "model.layers.20.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
167
+ "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
168
+ "model.layers.20.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
169
+ "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
170
+ "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
171
+ "model.layers.20.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
172
+ "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
173
+ "model.layers.20.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
174
+ "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
175
+ "model.layers.21.input_layernorm.weight": "model-00003-of-00003.safetensors",
176
+ "model.layers.21.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
177
+ "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
178
+ "model.layers.21.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
179
+ "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
180
+ "model.layers.21.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
181
+ "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
182
+ "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
183
+ "model.layers.21.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
184
+ "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
185
+ "model.layers.21.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
186
+ "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
187
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
188
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
189
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
190
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
191
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
192
+ "model.layers.22.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
193
+ "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
194
+ "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
195
+ "model.layers.22.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
196
+ "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
197
+ "model.layers.22.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
198
+ "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
199
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
200
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
201
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
202
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
203
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
204
+ "model.layers.23.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
205
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
206
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
207
+ "model.layers.23.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
208
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
209
+ "model.layers.23.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
210
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
211
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
212
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
213
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
214
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
215
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
216
+ "model.layers.24.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
217
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
218
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
219
+ "model.layers.24.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
220
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
221
+ "model.layers.24.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
222
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
223
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
224
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
225
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
226
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
227
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
228
+ "model.layers.25.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
229
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
230
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
231
+ "model.layers.25.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
232
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
233
+ "model.layers.25.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
234
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
235
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
236
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
237
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
238
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
239
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
240
+ "model.layers.26.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
241
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
242
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
243
+ "model.layers.26.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
244
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
245
+ "model.layers.26.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
246
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
247
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
248
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
249
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
250
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
251
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
252
+ "model.layers.27.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
253
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
254
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
255
+ "model.layers.27.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
256
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
257
+ "model.layers.27.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
258
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
259
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
260
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
261
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
262
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
263
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
264
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
265
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
266
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
267
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
268
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
269
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
270
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
271
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
272
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
273
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
274
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
275
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
276
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
277
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
278
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
279
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
280
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
281
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
282
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
283
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
284
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
285
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
286
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
287
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
288
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
289
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
290
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
291
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
292
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
293
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
294
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
295
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
296
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
297
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
298
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
299
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
300
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
301
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
302
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
303
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
304
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
305
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
306
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
307
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
308
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
309
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
310
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
311
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
312
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
313
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
314
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
315
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
316
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
317
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
318
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
319
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00003.safetensors",
320
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
321
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
322
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
323
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
324
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
325
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
326
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
327
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
328
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
329
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
330
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
331
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00003.safetensors",
332
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
333
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
334
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
335
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
336
+ "model.layers.9.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
337
+ "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
338
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
339
+ "model.layers.9.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
340
+ "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
341
+ "model.layers.9.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
342
+ "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
343
+ "model.norm.weight": "model-00003-of-00003.safetensors",
344
+ "score.0.bias": "model-00003-of-00003.safetensors",
345
+ "score.0.weight": "model-00003-of-00003.safetensors",
346
+ "score.2.bias": "model-00003-of-00003.safetensors",
347
+ "score.2.weight": "model-00003-of-00003.safetensors"
348
+ }
349
+ }
modeling_prm.py ADDED
@@ -0,0 +1,1609 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from typing import List, Optional, Tuple, Union
3
+
4
+ import torch
5
+ import torch.utils.checkpoint
6
+ from torch import nn
7
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
8
+
9
+ from transformers.activations import ACT2FN
10
+ from transformers.cache_utils import Cache, DynamicCache#, StaticCache
11
+ from transformers.modeling_attn_mask_utils import AttentionMaskConverter
12
+ from transformers.modeling_outputs import (
13
+ BaseModelOutputWithPast,
14
+ CausalLMOutputWithPast,
15
+ SequenceClassifierOutputWithPast,
16
+ TokenClassifierOutput,
17
+ )
18
+ from transformers.modeling_utils import PreTrainedModel
19
+ from transformers.utils import (
20
+ add_start_docstrings,
21
+ add_start_docstrings_to_model_forward,
22
+ is_flash_attn_2_available,
23
+ is_flash_attn_greater_or_equal_2_10,
24
+ logging,
25
+ replace_return_docstrings,
26
+ )
27
+ from .config_prm import Qwen2RMConfig as Qwen2Config
28
+
29
+
30
+ if is_flash_attn_2_available():
31
+ from transformers.modeling_flash_attention_utils import _flash_attention_forward
32
+
33
+
34
+ logger = logging.get_logger(__name__)
35
+
36
+
37
+ _CHECKPOINT_FOR_DOC = "Qwen/Qwen2-7B-beta"
38
+ _CONFIG_FOR_DOC = "Qwen2Config"
39
+
40
+
41
+ # Copied from transformers.models.llama.modeling_llama._prepare_4d_causal_attention_mask_with_cache_position
42
+ def _prepare_4d_causal_attention_mask_with_cache_position(
43
+ attention_mask: torch.Tensor,
44
+ sequence_length: int,
45
+ target_length: int,
46
+ dtype: torch.dtype,
47
+ device: torch.device,
48
+ min_dtype: float,
49
+ cache_position: torch.Tensor,
50
+ batch_size: int,
51
+ ):
52
+ """
53
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
54
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
55
+
56
+ Args:
57
+ attention_mask (`torch.Tensor`):
58
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
59
+ sequence_length (`int`):
60
+ The sequence length being processed.
61
+ target_length (`int`):
62
+ The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
63
+ dtype (`torch.dtype`):
64
+ The dtype to use for the 4D attention mask.
65
+ device (`torch.device`):
66
+ The device to plcae the 4D attention mask on.
67
+ min_dtype (`float`):
68
+ The minimum value representable with the dtype `dtype`.
69
+ cache_position (`torch.Tensor`):
70
+ Indices depicting the position of the input sequence tokens in the sequence.
71
+ batch_size (`torch.Tensor`):
72
+ Batch size.
73
+ """
74
+ if attention_mask is not None and attention_mask.dim() == 4:
75
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
76
+ causal_mask = attention_mask
77
+ else:
78
+ causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
79
+ if sequence_length != 1:
80
+ causal_mask = torch.triu(causal_mask, diagonal=1)
81
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
82
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
83
+ if attention_mask is not None:
84
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
85
+ mask_length = attention_mask.shape[-1]
86
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
87
+ padding_mask = padding_mask == 0
88
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
89
+ padding_mask, min_dtype
90
+ )
91
+
92
+ return causal_mask
93
+
94
+
95
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Qwen2
96
+ class Qwen2RMSNorm(nn.Module):
97
+ def __init__(self, hidden_size, eps=1e-6):
98
+ """
99
+ Qwen2RMSNorm is equivalent to T5LayerNorm
100
+ """
101
+ super().__init__()
102
+ self.weight = nn.Parameter(torch.ones(hidden_size))
103
+ self.variance_epsilon = eps
104
+
105
+ def forward(self, hidden_states):
106
+ input_dtype = hidden_states.dtype
107
+ hidden_states = hidden_states.to(torch.float32)
108
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
109
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
110
+ return self.weight * hidden_states.to(input_dtype)
111
+
112
+ def extra_repr(self):
113
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
114
+
115
+
116
+ # Copied from transformers.models.mixtral.modeling_mixtral.MixtralRotaryEmbedding with Mixtral->Qwen2
117
+ class Qwen2RotaryEmbedding(nn.Module):
118
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
119
+ super().__init__()
120
+
121
+ self.dim = dim
122
+ self.max_position_embeddings = max_position_embeddings
123
+ self.base = base
124
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
125
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
126
+
127
+ # Build here to make `torch.jit.trace` work.
128
+ self._set_cos_sin_cache(
129
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
130
+ )
131
+
132
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
133
+ self.max_seq_len_cached = seq_len
134
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
135
+
136
+ freqs = torch.outer(t, self.inv_freq)
137
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
138
+ emb = torch.cat((freqs, freqs), dim=-1)
139
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
140
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
141
+
142
+ def forward(self, x, seq_len=None):
143
+ # x: [bs, num_attention_heads, seq_len, head_size]
144
+ if seq_len > self.max_seq_len_cached:
145
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
146
+
147
+ return (
148
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
149
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
150
+ )
151
+
152
+
153
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
154
+ def rotate_half(x):
155
+ """Rotates half the hidden dims of the input."""
156
+ x1 = x[..., : x.shape[-1] // 2]
157
+ x2 = x[..., x.shape[-1] // 2 :]
158
+ return torch.cat((-x2, x1), dim=-1)
159
+
160
+
161
+ # Copied from transformers.models.mixtral.modeling_mixtral.apply_rotary_pos_emb
162
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
163
+ """Applies Rotary Position Embedding to the query and key tensors.
164
+
165
+ Args:
166
+ q (`torch.Tensor`): The query tensor.
167
+ k (`torch.Tensor`): The key tensor.
168
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
169
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
170
+ position_ids (`torch.Tensor`):
171
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
172
+ used to pass offsetted position ids when working with a KV-cache.
173
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
174
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
175
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
176
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
177
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
178
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
179
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
180
+ Returns:
181
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
182
+ """
183
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
184
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
185
+ q_embed = (q * cos) + (rotate_half(q) * sin)
186
+ k_embed = (k * cos) + (rotate_half(k) * sin)
187
+ return q_embed, k_embed
188
+
189
+
190
+ # Copied from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->Qwen2
191
+ class Qwen2MLP(nn.Module):
192
+ def __init__(self, config):
193
+ super().__init__()
194
+ self.hidden_size = config.hidden_size
195
+ self.intermediate_size = config.intermediate_size
196
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
197
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
198
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
199
+ self.act_fn = ACT2FN[config.hidden_act]
200
+
201
+ def forward(self, hidden_state):
202
+ return self.down_proj(self.act_fn(self.gate_proj(hidden_state)) * self.up_proj(hidden_state))
203
+
204
+
205
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
206
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
207
+ """
208
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
209
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
210
+ """
211
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
212
+ if n_rep == 1:
213
+ return hidden_states
214
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
215
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
216
+
217
+
218
+ class Qwen2Attention(nn.Module):
219
+ """
220
+ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
221
+ and "Generating Long Sequences with Sparse Transformers".
222
+ """
223
+
224
+ def __init__(self, config: Qwen2Config, layer_idx: Optional[int] = None):
225
+ super().__init__()
226
+ self.config = config
227
+ self.layer_idx = layer_idx
228
+ if layer_idx is None:
229
+ logger.warning_once(
230
+ f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
231
+ "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
232
+ "when creating this class."
233
+ )
234
+
235
+ self.hidden_size = config.hidden_size
236
+ self.num_heads = config.num_attention_heads
237
+ self.head_dim = self.hidden_size // self.num_heads
238
+ self.num_key_value_heads = config.num_key_value_heads
239
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
240
+ self.max_position_embeddings = config.max_position_embeddings
241
+ self.rope_theta = config.rope_theta
242
+ self.is_causal = True
243
+ self.attention_dropout = config.attention_dropout
244
+
245
+ if (self.head_dim * self.num_heads) != self.hidden_size:
246
+ raise ValueError(
247
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
248
+ f" and `num_heads`: {self.num_heads})."
249
+ )
250
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True)
251
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
252
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
253
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
254
+
255
+ self.rotary_emb = Qwen2RotaryEmbedding(
256
+ self.head_dim,
257
+ max_position_embeddings=self.max_position_embeddings,
258
+ base=self.rope_theta,
259
+ )
260
+
261
+ def forward(
262
+ self,
263
+ hidden_states: torch.Tensor,
264
+ attention_mask: Optional[torch.Tensor] = None,
265
+ position_ids: Optional[torch.LongTensor] = None,
266
+ past_key_value: Optional[Cache] = None,
267
+ output_attentions: bool = False,
268
+ use_cache: bool = False,
269
+ cache_position: Optional[torch.LongTensor] = None,
270
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
271
+ bsz, q_len, _ = hidden_states.size()
272
+
273
+ query_states = self.q_proj(hidden_states)
274
+ key_states = self.k_proj(hidden_states)
275
+ value_states = self.v_proj(hidden_states)
276
+
277
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
278
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
279
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
280
+
281
+ kv_seq_len = key_states.shape[-2]
282
+ if past_key_value is not None:
283
+ if self.layer_idx is None:
284
+ raise ValueError(
285
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
286
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
287
+ "with a layer index."
288
+ )
289
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
290
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
291
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
292
+
293
+ if past_key_value is not None:
294
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
295
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
296
+
297
+ # repeat k/v heads if n_kv_heads < n_heads
298
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
299
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
300
+
301
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
302
+
303
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
304
+ raise ValueError(
305
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
306
+ f" {attn_weights.size()}"
307
+ )
308
+
309
+ if attention_mask is not None: # no matter the length, we just slice it
310
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
311
+ attn_weights = attn_weights + causal_mask
312
+
313
+ # upcast attention to fp32
314
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
315
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
316
+ attn_output = torch.matmul(attn_weights, value_states)
317
+
318
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
319
+ raise ValueError(
320
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
321
+ f" {attn_output.size()}"
322
+ )
323
+
324
+ attn_output = attn_output.transpose(1, 2).contiguous()
325
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
326
+
327
+ attn_output = self.o_proj(attn_output)
328
+
329
+ if not output_attentions:
330
+ attn_weights = None
331
+
332
+ return attn_output, attn_weights, past_key_value
333
+
334
+
335
+ class Qwen2FlashAttention2(Qwen2Attention):
336
+ """
337
+ Qwen2 flash attention module, following Qwen2 attention module. This module inherits from `Qwen2Attention`
338
+ as the weights of the module stays untouched. The only required change would be on the forward pass
339
+ where it needs to correctly call the public API of flash attention and deal with padding tokens
340
+ in case the input contains any of them. Additionally, for sliding window attention, we apply SWA only to the bottom
341
+ config.max_window_layers layers.
342
+ """
343
+
344
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
345
+ def __init__(self, *args, **kwargs):
346
+ super().__init__(*args, **kwargs)
347
+
348
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
349
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
350
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
351
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
352
+
353
+ def forward(
354
+ self,
355
+ hidden_states: torch.Tensor,
356
+ attention_mask: Optional[torch.Tensor] = None,
357
+ position_ids: Optional[torch.LongTensor] = None,
358
+ past_key_value: Optional[Cache] = None,
359
+ output_attentions: bool = False,
360
+ use_cache: bool = False,
361
+ cache_position: Optional[torch.LongTensor] = None,
362
+ ):
363
+ bsz, q_len, _ = hidden_states.size()
364
+
365
+ query_states = self.q_proj(hidden_states)
366
+ key_states = self.k_proj(hidden_states)
367
+ value_states = self.v_proj(hidden_states)
368
+
369
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
370
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
371
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
372
+
373
+ kv_seq_len = key_states.shape[-2]
374
+ if past_key_value is not None:
375
+ if self.layer_idx is None:
376
+ raise ValueError(
377
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
378
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
379
+ "with a layer index."
380
+ )
381
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
382
+
383
+ # Because the input can be padded, the absolute sequence length depends on the max position id.
384
+ rotary_seq_len = (
385
+ max(kv_seq_len, position_ids[:, -1].max().item() + 1) if position_ids is not None else kv_seq_len
386
+ )
387
+
388
+ cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len)
389
+
390
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
391
+
392
+ if past_key_value is not None:
393
+ # Activate slicing cache only if the config has a value `sliding_windows` attribute
394
+ cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
395
+ if (
396
+ getattr(self.config, "sliding_window", None) is not None
397
+ and kv_seq_len > self.config.sliding_window
398
+ and cache_has_contents
399
+ ):
400
+ slicing_tokens = 1 - self.config.sliding_window
401
+
402
+ past_key = past_key_value[self.layer_idx][0]
403
+ past_value = past_key_value[self.layer_idx][1]
404
+
405
+ past_key = past_key[:, :, slicing_tokens:, :].contiguous()
406
+ past_value = past_value[:, :, slicing_tokens:, :].contiguous()
407
+
408
+ if past_key.shape[-2] != self.config.sliding_window - 1:
409
+ raise ValueError(
410
+ f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
411
+ f" {past_key.shape}"
412
+ )
413
+
414
+ if attention_mask is not None:
415
+ attention_mask = attention_mask[:, slicing_tokens:]
416
+ attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
417
+
418
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
419
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
420
+
421
+ # repeat k/v heads if n_kv_heads < n_heads
422
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
423
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
424
+ dropout_rate = 0.0 if not self.training else self.attention_dropout
425
+
426
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
427
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
428
+ # cast them back in float16 just to be sure everything works as expected.
429
+ input_dtype = query_states.dtype
430
+ if input_dtype == torch.float32:
431
+ if torch.is_autocast_enabled():
432
+ target_dtype = torch.get_autocast_gpu_dtype()
433
+ # Handle the case where the model is quantized
434
+ elif hasattr(self.config, "_pre_quantization_dtype"):
435
+ target_dtype = self.config._pre_quantization_dtype
436
+ else:
437
+ target_dtype = self.q_proj.weight.dtype
438
+
439
+ logger.warning_once(
440
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
441
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
442
+ f" {target_dtype}."
443
+ )
444
+
445
+ query_states = query_states.to(target_dtype)
446
+ key_states = key_states.to(target_dtype)
447
+ value_states = value_states.to(target_dtype)
448
+
449
+ # Reashape to the expected shape for Flash Attention
450
+ query_states = query_states.transpose(1, 2)
451
+ key_states = key_states.transpose(1, 2)
452
+ value_states = value_states.transpose(1, 2)
453
+
454
+ if (
455
+ self.config.use_sliding_window
456
+ and getattr(self.config, "sliding_window", None) is not None
457
+ and self.layer_idx >= self.config.max_window_layers
458
+ ):
459
+ sliding_window = self.config.sliding_window
460
+ else:
461
+ sliding_window = None
462
+
463
+ attn_output = _flash_attention_forward(
464
+ query_states,
465
+ key_states,
466
+ value_states,
467
+ attention_mask,
468
+ q_len,
469
+ position_ids=position_ids,
470
+ dropout=dropout_rate,
471
+ sliding_window=sliding_window,
472
+ is_causal=self.is_causal,
473
+ use_top_left_mask=self._flash_attn_uses_top_left_mask,
474
+ )
475
+
476
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
477
+ attn_output = self.o_proj(attn_output)
478
+
479
+ if not output_attentions:
480
+ attn_weights = None
481
+
482
+ return attn_output, attn_weights, past_key_value
483
+
484
+
485
+ # Copied from transformers.models.mixtral.modeling_mixtral.MixtralSdpaAttention with Mixtral->Qwen2
486
+ class Qwen2SdpaAttention(Qwen2Attention):
487
+ """
488
+ Qwen2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
489
+ `Qwen2Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
490
+ SDPA API.
491
+ """
492
+
493
+ # Adapted from Qwen2Attention.forward
494
+ def forward(
495
+ self,
496
+ hidden_states: torch.Tensor,
497
+ attention_mask: Optional[torch.Tensor] = None,
498
+ position_ids: Optional[torch.LongTensor] = None,
499
+ past_key_value: Optional[Cache] = None,
500
+ output_attentions: bool = False,
501
+ use_cache: bool = False,
502
+ cache_position: Optional[torch.LongTensor] = None,
503
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
504
+ if output_attentions:
505
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
506
+ logger.warning_once(
507
+ "Qwen2Model is using Qwen2SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
508
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
509
+ )
510
+ return super().forward(
511
+ hidden_states=hidden_states,
512
+ attention_mask=attention_mask,
513
+ position_ids=position_ids,
514
+ past_key_value=past_key_value,
515
+ output_attentions=output_attentions,
516
+ use_cache=use_cache,
517
+ )
518
+
519
+ bsz, q_len, _ = hidden_states.size()
520
+
521
+ query_states = self.q_proj(hidden_states)
522
+ key_states = self.k_proj(hidden_states)
523
+ value_states = self.v_proj(hidden_states)
524
+
525
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
526
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
527
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
528
+
529
+ kv_seq_len = key_states.shape[-2]
530
+ if past_key_value is not None:
531
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
532
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
533
+
534
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
535
+
536
+ if past_key_value is not None:
537
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
538
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
539
+
540
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
541
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
542
+
543
+ causal_mask = attention_mask
544
+ if attention_mask is not None: # no matter the length, we just slice it
545
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
546
+
547
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
548
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
549
+ if query_states.device.type == "cuda" and attention_mask is not None:
550
+ query_states = query_states.contiguous()
551
+ key_states = key_states.contiguous()
552
+ value_states = value_states.contiguous()
553
+
554
+ # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
555
+ # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
556
+ # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
557
+ is_causal = True if causal_mask is None and q_len > 1 else False
558
+
559
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
560
+ query_states,
561
+ key_states,
562
+ value_states,
563
+ attn_mask=causal_mask,
564
+ dropout_p=self.attention_dropout if self.training else 0.0,
565
+ is_causal=is_causal,
566
+ )
567
+
568
+ attn_output = attn_output.transpose(1, 2).contiguous()
569
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
570
+
571
+ attn_output = self.o_proj(attn_output)
572
+
573
+ return attn_output, None, past_key_value
574
+
575
+
576
+ QWEN2_ATTENTION_CLASSES = {
577
+ "eager": Qwen2Attention,
578
+ "flash_attention_2": Qwen2FlashAttention2,
579
+ "sdpa": Qwen2SdpaAttention,
580
+ }
581
+
582
+
583
+ class Qwen2DecoderLayer(nn.Module):
584
+ def __init__(self, config: Qwen2Config, layer_idx: int):
585
+ super().__init__()
586
+ self.hidden_size = config.hidden_size
587
+
588
+ if config.sliding_window and config._attn_implementation != "flash_attention_2":
589
+ logger.warning_once(
590
+ f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; "
591
+ "unexpected results may be encountered."
592
+ )
593
+ self.self_attn = QWEN2_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
594
+
595
+ self.mlp = Qwen2MLP(config)
596
+ self.input_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
597
+ self.post_attention_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
598
+
599
+ def forward(
600
+ self,
601
+ hidden_states: torch.Tensor,
602
+ attention_mask: Optional[torch.Tensor] = None,
603
+ position_ids: Optional[torch.LongTensor] = None,
604
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
605
+ output_attentions: Optional[bool] = False,
606
+ use_cache: Optional[bool] = False,
607
+ cache_position: Optional[torch.LongTensor] = None,
608
+ **kwargs,
609
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
610
+ """
611
+ Args:
612
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
613
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
614
+ `(batch, sequence_length)` where padding elements are indicated by 0.
615
+ output_attentions (`bool`, *optional*):
616
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
617
+ returned tensors for more detail.
618
+ use_cache (`bool`, *optional*):
619
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
620
+ (see `past_key_values`).
621
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
622
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
623
+ Indices depicting the position of the input sequence tokens in the sequence.
624
+ kwargs (`dict`, *optional*):
625
+ Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
626
+ into the model
627
+ """
628
+
629
+ residual = hidden_states
630
+
631
+ hidden_states = self.input_layernorm(hidden_states)
632
+
633
+ # Self Attention
634
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
635
+ hidden_states=hidden_states,
636
+ attention_mask=attention_mask,
637
+ position_ids=position_ids,
638
+ past_key_value=past_key_value,
639
+ output_attentions=output_attentions,
640
+ use_cache=use_cache,
641
+ cache_position=cache_position,
642
+ )
643
+ hidden_states = residual + hidden_states
644
+
645
+ # Fully Connected
646
+ residual = hidden_states
647
+ hidden_states = self.post_attention_layernorm(hidden_states)
648
+ hidden_states = self.mlp(hidden_states)
649
+ hidden_states = residual + hidden_states
650
+
651
+ outputs = (hidden_states,)
652
+
653
+ if output_attentions:
654
+ outputs += (self_attn_weights,)
655
+
656
+ if use_cache:
657
+ outputs += (present_key_value,)
658
+
659
+ return outputs
660
+
661
+
662
+ QWEN2_START_DOCSTRING = r"""
663
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
664
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
665
+ etc.)
666
+
667
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
668
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
669
+ and behavior.
670
+
671
+ Parameters:
672
+ config ([`Qwen2Config`]):
673
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
674
+ load the weights associated with the model, only the configuration. Check out the
675
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
676
+ """
677
+
678
+
679
+ @add_start_docstrings(
680
+ "The bare Qwen2 Model outputting raw hidden-states without any specific head on top.",
681
+ QWEN2_START_DOCSTRING,
682
+ )
683
+ class Qwen2PreTrainedModel(PreTrainedModel):
684
+ config_class = Qwen2Config
685
+ base_model_prefix = "model"
686
+ supports_gradient_checkpointing = True
687
+ _no_split_modules = ["Qwen2DecoderLayer"]
688
+ _skip_keys_device_placement = "past_key_values"
689
+ _supports_flash_attn_2 = True
690
+ _supports_sdpa = True
691
+ _supports_cache_class = True
692
+
693
+ def _init_weights(self, module):
694
+ std = self.config.initializer_range
695
+ if isinstance(module, nn.Linear):
696
+ module.weight.data.normal_(mean=0.0, std=std)
697
+ if module.bias is not None:
698
+ module.bias.data.zero_()
699
+ elif isinstance(module, nn.Embedding):
700
+ module.weight.data.normal_(mean=0.0, std=std)
701
+ if module.padding_idx is not None:
702
+ module.weight.data[module.padding_idx].zero_()
703
+
704
+
705
+ QWEN2_INPUTS_DOCSTRING = r"""
706
+ Args:
707
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
708
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
709
+ it.
710
+
711
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
712
+ [`PreTrainedTokenizer.__call__`] for details.
713
+
714
+ [What are input IDs?](../glossary#input-ids)
715
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
716
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
717
+
718
+ - 1 for tokens that are **not masked**,
719
+ - 0 for tokens that are **masked**.
720
+
721
+ [What are attention masks?](../glossary#attention-mask)
722
+
723
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
724
+ [`PreTrainedTokenizer.__call__`] for details.
725
+
726
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
727
+ `past_key_values`).
728
+
729
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
730
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
731
+ information on the default strategy.
732
+
733
+ - 1 indicates the head is **not masked**,
734
+ - 0 indicates the head is **masked**.
735
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
736
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
737
+ config.n_positions - 1]`.
738
+
739
+ [What are position IDs?](../glossary#position-ids)
740
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
741
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
742
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
743
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
744
+
745
+ Two formats are allowed:
746
+ - a [`~cache_utils.Cache`] instance;
747
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
748
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
749
+ cache format.
750
+
751
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
752
+ legacy cache format will be returned.
753
+
754
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
755
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
756
+ of shape `(batch_size, sequence_length)`.
757
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
758
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
759
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
760
+ model's internal embedding lookup matrix.
761
+ use_cache (`bool`, *optional*):
762
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
763
+ `past_key_values`).
764
+ output_attentions (`bool`, *optional*):
765
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
766
+ tensors for more detail.
767
+ output_hidden_states (`bool`, *optional*):
768
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
769
+ more detail.
770
+ return_dict (`bool`, *optional*):
771
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
772
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
773
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
774
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
775
+ the complete sequence length.
776
+ """
777
+
778
+
779
+ @add_start_docstrings(
780
+ "The bare Qwen2 Model outputting raw hidden-states without any specific head on top.",
781
+ QWEN2_START_DOCSTRING,
782
+ )
783
+ class Qwen2Model(Qwen2PreTrainedModel):
784
+ """
785
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2DecoderLayer`]
786
+
787
+ Args:
788
+ config: Qwen2Config
789
+ """
790
+
791
+ def __init__(self, config: Qwen2Config):
792
+ super().__init__(config)
793
+ self.padding_idx = config.pad_token_id
794
+ self.vocab_size = config.vocab_size
795
+
796
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
797
+ self.layers = nn.ModuleList(
798
+ [Qwen2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
799
+ )
800
+ self._attn_implementation = config._attn_implementation
801
+ self.norm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
802
+
803
+ self.gradient_checkpointing = False
804
+ # Initialize weights and apply final processing
805
+ self.post_init()
806
+
807
+ def get_input_embeddings(self):
808
+ return self.embed_tokens
809
+
810
+ def set_input_embeddings(self, value):
811
+ self.embed_tokens = value
812
+
813
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
814
+ def forward(
815
+ self,
816
+ input_ids: torch.LongTensor = None,
817
+ attention_mask: Optional[torch.Tensor] = None,
818
+ position_ids: Optional[torch.LongTensor] = None,
819
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
820
+ inputs_embeds: Optional[torch.FloatTensor] = None,
821
+ use_cache: Optional[bool] = None,
822
+ output_attentions: Optional[bool] = None,
823
+ output_hidden_states: Optional[bool] = None,
824
+ return_dict: Optional[bool] = None,
825
+ cache_position: Optional[torch.LongTensor] = None,
826
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
827
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
828
+ output_hidden_states = (
829
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
830
+ )
831
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
832
+
833
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
834
+
835
+ if (input_ids is None) ^ (inputs_embeds is not None):
836
+ raise ValueError(
837
+ "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
838
+ )
839
+
840
+ if self.gradient_checkpointing and self.training:
841
+ if use_cache:
842
+ logger.warning_once(
843
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
844
+ )
845
+ use_cache = False
846
+
847
+ use_legacy_cache = False
848
+ if use_cache and not isinstance(past_key_values, Cache) and not self.training:
849
+ use_legacy_cache = True
850
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
851
+ logger.warning_once(
852
+ "We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. "
853
+ "Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)"
854
+ )
855
+
856
+ if inputs_embeds is None:
857
+ inputs_embeds = self.embed_tokens(input_ids)
858
+
859
+ if cache_position is None:
860
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
861
+ cache_position = torch.arange(
862
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
863
+ )
864
+ if position_ids is None:
865
+ position_ids = cache_position.unsqueeze(0)
866
+
867
+ causal_mask = self._update_causal_mask(
868
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
869
+ )
870
+
871
+ hidden_states = inputs_embeds
872
+
873
+ # decoder layers
874
+ all_hidden_states = () if output_hidden_states else None
875
+ all_self_attns = () if output_attentions else None
876
+ next_decoder_cache = None
877
+
878
+ for decoder_layer in self.layers:
879
+ if output_hidden_states:
880
+ all_hidden_states += (hidden_states,)
881
+
882
+ if self.gradient_checkpointing and self.training:
883
+ layer_outputs = self._gradient_checkpointing_func(
884
+ decoder_layer.__call__,
885
+ hidden_states,
886
+ causal_mask,
887
+ position_ids,
888
+ past_key_values,
889
+ output_attentions,
890
+ use_cache,
891
+ cache_position,
892
+ )
893
+ else:
894
+ layer_outputs = decoder_layer(
895
+ hidden_states,
896
+ attention_mask=causal_mask,
897
+ position_ids=position_ids,
898
+ past_key_value=past_key_values,
899
+ output_attentions=output_attentions,
900
+ use_cache=use_cache,
901
+ cache_position=cache_position,
902
+ )
903
+
904
+ hidden_states = layer_outputs[0]
905
+
906
+ if use_cache:
907
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
908
+
909
+ if output_attentions:
910
+ all_self_attns += (layer_outputs[1],)
911
+
912
+ hidden_states = self.norm(hidden_states)
913
+
914
+ # add hidden states from the last decoder layer
915
+ if output_hidden_states:
916
+ all_hidden_states += (hidden_states,)
917
+
918
+ next_cache = None
919
+ if use_cache:
920
+ next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
921
+
922
+ if not return_dict:
923
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
924
+ return BaseModelOutputWithPast(
925
+ last_hidden_state=hidden_states,
926
+ past_key_values=next_cache,
927
+ hidden_states=all_hidden_states,
928
+ attentions=all_self_attns,
929
+ )
930
+
931
+ # Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask
932
+ def _update_causal_mask(
933
+ self,
934
+ attention_mask: torch.Tensor,
935
+ input_tensor: torch.Tensor,
936
+ cache_position: torch.Tensor,
937
+ past_key_values: Cache,
938
+ output_attentions: bool,
939
+ ):
940
+ # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static
941
+ # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes.
942
+ # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using
943
+ # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114
944
+
945
+ if self.config._attn_implementation == "flash_attention_2":
946
+ if attention_mask is not None and 0.0 in attention_mask:
947
+ return attention_mask
948
+ return None
949
+
950
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
951
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
952
+ # to infer the attention mask.
953
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
954
+ using_static_cache = False#isinstance(past_key_values, StaticCache)
955
+
956
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
957
+ if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
958
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
959
+ attention_mask,
960
+ inputs_embeds=input_tensor,
961
+ past_key_values_length=past_seen_tokens,
962
+ is_training=self.training,
963
+ ):
964
+ return None
965
+
966
+ dtype, device = input_tensor.dtype, input_tensor.device
967
+ min_dtype = torch.finfo(dtype).min
968
+ sequence_length = input_tensor.shape[1]
969
+ if using_static_cache:
970
+ target_length = past_key_values.get_max_length()
971
+ else:
972
+ target_length = (
973
+ attention_mask.shape[-1]
974
+ if isinstance(attention_mask, torch.Tensor)
975
+ else past_seen_tokens + sequence_length + 1
976
+ )
977
+
978
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
979
+ causal_mask = _prepare_4d_causal_attention_mask_with_cache_position(
980
+ attention_mask,
981
+ sequence_length=sequence_length,
982
+ target_length=target_length,
983
+ dtype=dtype,
984
+ device=device,
985
+ min_dtype=min_dtype,
986
+ cache_position=cache_position,
987
+ batch_size=input_tensor.shape[0],
988
+ )
989
+
990
+ if (
991
+ self.config._attn_implementation == "sdpa"
992
+ and attention_mask is not None
993
+ and attention_mask.device.type == "cuda"
994
+ and not output_attentions
995
+ ):
996
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
997
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
998
+ # Details: https://github.com/pytorch/pytorch/issues/110213
999
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
1000
+
1001
+ return causal_mask
1002
+
1003
+
1004
+ class Qwen2ForCausalLM(Qwen2PreTrainedModel):
1005
+ _tied_weights_keys = ["lm_head.weight"]
1006
+
1007
+ def __init__(self, config):
1008
+ super().__init__(config)
1009
+ self.model = Qwen2Model(config)
1010
+ self.vocab_size = config.vocab_size
1011
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1012
+
1013
+ # Initialize weights and apply final processing
1014
+ self.post_init()
1015
+
1016
+ def get_input_embeddings(self):
1017
+ return self.model.embed_tokens
1018
+
1019
+ def set_input_embeddings(self, value):
1020
+ self.model.embed_tokens = value
1021
+
1022
+ def get_output_embeddings(self):
1023
+ return self.lm_head
1024
+
1025
+ def set_output_embeddings(self, new_embeddings):
1026
+ self.lm_head = new_embeddings
1027
+
1028
+ def set_decoder(self, decoder):
1029
+ self.model = decoder
1030
+
1031
+ def get_decoder(self):
1032
+ return self.model
1033
+
1034
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
1035
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1036
+ def forward(
1037
+ self,
1038
+ input_ids: torch.LongTensor = None,
1039
+ attention_mask: Optional[torch.Tensor] = None,
1040
+ position_ids: Optional[torch.LongTensor] = None,
1041
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1042
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1043
+ labels: Optional[torch.LongTensor] = None,
1044
+ use_cache: Optional[bool] = None,
1045
+ output_attentions: Optional[bool] = None,
1046
+ output_hidden_states: Optional[bool] = None,
1047
+ return_dict: Optional[bool] = None,
1048
+ cache_position: Optional[torch.LongTensor] = None,
1049
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1050
+ r"""
1051
+ Args:
1052
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1053
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1054
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1055
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1056
+
1057
+ Returns:
1058
+
1059
+ Example:
1060
+
1061
+ ```python
1062
+ >>> from transformers import AutoTokenizer, Qwen2ForCausalLM
1063
+
1064
+ >>> model = Qwen2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
1065
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
1066
+
1067
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1068
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1069
+
1070
+ >>> # Generate
1071
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1072
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1073
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1074
+ ```"""
1075
+
1076
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1077
+ output_hidden_states = (
1078
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1079
+ )
1080
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1081
+
1082
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1083
+ outputs = self.model(
1084
+ input_ids=input_ids,
1085
+ attention_mask=attention_mask,
1086
+ position_ids=position_ids,
1087
+ past_key_values=past_key_values,
1088
+ inputs_embeds=inputs_embeds,
1089
+ use_cache=use_cache,
1090
+ output_attentions=output_attentions,
1091
+ output_hidden_states=output_hidden_states,
1092
+ return_dict=return_dict,
1093
+ cache_position=cache_position,
1094
+ )
1095
+
1096
+ hidden_states = outputs[0]
1097
+ logits = self.lm_head(hidden_states)
1098
+ logits = logits.float()
1099
+
1100
+ loss = None
1101
+ if labels is not None:
1102
+ # Shift so that tokens < n predict n
1103
+ shift_logits = logits[..., :-1, :].contiguous()
1104
+ shift_labels = labels[..., 1:].contiguous()
1105
+ # Flatten the tokens
1106
+ loss_fct = CrossEntropyLoss()
1107
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1108
+ shift_labels = shift_labels.view(-1)
1109
+ # Enable model parallelism
1110
+ shift_labels = shift_labels.to(shift_logits.device)
1111
+ loss = loss_fct(shift_logits, shift_labels)
1112
+
1113
+ if not return_dict:
1114
+ output = (logits,) + outputs[1:]
1115
+ return (loss,) + output if loss is not None else output
1116
+
1117
+ return CausalLMOutputWithPast(
1118
+ loss=loss,
1119
+ logits=logits,
1120
+ past_key_values=outputs.past_key_values,
1121
+ hidden_states=outputs.hidden_states,
1122
+ attentions=outputs.attentions,
1123
+ )
1124
+
1125
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.prepare_inputs_for_generation
1126
+ def prepare_inputs_for_generation(
1127
+ self,
1128
+ input_ids,
1129
+ past_key_values=None,
1130
+ attention_mask=None,
1131
+ inputs_embeds=None,
1132
+ cache_position=None,
1133
+ position_ids=None,
1134
+ use_cache=True,
1135
+ **kwargs,
1136
+ ):
1137
+ # If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens
1138
+ # Exception 1: when passing input_embeds, input_ids may be missing entries
1139
+ # Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here
1140
+ if past_key_values is not None:
1141
+ if inputs_embeds is not None: # Exception 1
1142
+ input_ids = input_ids[:, -cache_position.shape[0] :]
1143
+ elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2)
1144
+ input_ids = input_ids[:, cache_position]
1145
+
1146
+ if attention_mask is not None and position_ids is None:
1147
+ # create position_ids on the fly for batch generation
1148
+ position_ids = attention_mask.long().cumsum(-1) - 1
1149
+ position_ids.masked_fill_(attention_mask == 0, 1)
1150
+ if past_key_values:
1151
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1152
+
1153
+ # This `clone` call is needed to avoid recapturing cuda graphs with `torch.compile`'s `mode="reduce-overhead`, as otherwise the input `position_ids` would have various stride during the decoding. Here, simply using `.contiguous()` is not sufficient as in the batch size = 1 case, `position_ids` is already contiguous but with varying stride which retriggers a capture.
1154
+ position_ids = position_ids.clone(memory_format=torch.contiguous_format)
1155
+
1156
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1157
+ if inputs_embeds is not None and cache_position[0] == 0:
1158
+ model_inputs = {"inputs_embeds": inputs_embeds}
1159
+ else:
1160
+ model_inputs = {"input_ids": input_ids}
1161
+
1162
+ if False and isinstance(past_key_values, StaticCache) and attention_mask.ndim == 2:
1163
+ if inputs_embeds is not None:
1164
+ batch_size, sequence_length = inputs_embeds.shape
1165
+ device = inputs_embeds.device
1166
+ else:
1167
+ batch_size, sequence_length = input_ids.shape
1168
+ device = input_ids.device
1169
+
1170
+ dtype = self.lm_head.weight.dtype
1171
+ min_dtype = torch.finfo(dtype).min
1172
+
1173
+ attention_mask = _prepare_4d_causal_attention_mask_with_cache_position(
1174
+ attention_mask,
1175
+ sequence_length=sequence_length,
1176
+ target_length=past_key_values.get_max_length(),
1177
+ dtype=dtype,
1178
+ device=device,
1179
+ min_dtype=min_dtype,
1180
+ cache_position=cache_position,
1181
+ batch_size=batch_size,
1182
+ )
1183
+
1184
+ model_inputs.update(
1185
+ {
1186
+ "position_ids": position_ids,
1187
+ "cache_position": cache_position,
1188
+ "past_key_values": past_key_values,
1189
+ "use_cache": use_cache,
1190
+ "attention_mask": attention_mask,
1191
+ }
1192
+ )
1193
+ return model_inputs
1194
+
1195
+
1196
+ @add_start_docstrings(
1197
+ """
1198
+ The Qwen2 Model transformer with a sequence classification head on top (linear layer).
1199
+
1200
+ [`Qwen2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1201
+ (e.g. GPT-2) do.
1202
+
1203
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1204
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1205
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1206
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1207
+ each row of the batch).
1208
+ """,
1209
+ QWEN2_START_DOCSTRING,
1210
+ )
1211
+ class Qwen2ForSequenceClassification(Qwen2PreTrainedModel):
1212
+ def __init__(self, config):
1213
+ super().__init__(config)
1214
+ self.num_labels = config.num_labels
1215
+ self.model = Qwen2Model(config)
1216
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1217
+
1218
+ # Initialize weights and apply final processing
1219
+ self.post_init()
1220
+
1221
+ def get_input_embeddings(self):
1222
+ return self.model.embed_tokens
1223
+
1224
+ def set_input_embeddings(self, value):
1225
+ self.model.embed_tokens = value
1226
+
1227
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
1228
+ def forward(
1229
+ self,
1230
+ input_ids: torch.LongTensor = None,
1231
+ attention_mask: Optional[torch.Tensor] = None,
1232
+ position_ids: Optional[torch.LongTensor] = None,
1233
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1234
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1235
+ labels: Optional[torch.LongTensor] = None,
1236
+ use_cache: Optional[bool] = None,
1237
+ output_attentions: Optional[bool] = None,
1238
+ output_hidden_states: Optional[bool] = None,
1239
+ return_dict: Optional[bool] = None,
1240
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1241
+ r"""
1242
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1243
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1244
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1245
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1246
+ """
1247
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1248
+
1249
+ transformer_outputs = self.model(
1250
+ input_ids,
1251
+ attention_mask=attention_mask,
1252
+ position_ids=position_ids,
1253
+ past_key_values=past_key_values,
1254
+ inputs_embeds=inputs_embeds,
1255
+ use_cache=use_cache,
1256
+ output_attentions=output_attentions,
1257
+ output_hidden_states=output_hidden_states,
1258
+ return_dict=return_dict,
1259
+ )
1260
+ hidden_states = transformer_outputs[0]
1261
+ logits = self.score(hidden_states)
1262
+
1263
+ if input_ids is not None:
1264
+ batch_size = input_ids.shape[0]
1265
+ else:
1266
+ batch_size = inputs_embeds.shape[0]
1267
+
1268
+ if self.config.pad_token_id is None and batch_size != 1:
1269
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1270
+ if self.config.pad_token_id is None:
1271
+ sequence_lengths = -1
1272
+ else:
1273
+ if input_ids is not None:
1274
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1275
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1276
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1277
+ sequence_lengths = sequence_lengths.to(logits.device)
1278
+ else:
1279
+ sequence_lengths = -1
1280
+
1281
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1282
+
1283
+ loss = None
1284
+ if labels is not None:
1285
+ labels = labels.to(logits.device)
1286
+ if self.config.problem_type is None:
1287
+ if self.num_labels == 1:
1288
+ self.config.problem_type = "regression"
1289
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1290
+ self.config.problem_type = "single_label_classification"
1291
+ else:
1292
+ self.config.problem_type = "multi_label_classification"
1293
+
1294
+ if self.config.problem_type == "regression":
1295
+ loss_fct = MSELoss()
1296
+ if self.num_labels == 1:
1297
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1298
+ else:
1299
+ loss = loss_fct(pooled_logits, labels)
1300
+ elif self.config.problem_type == "single_label_classification":
1301
+ loss_fct = CrossEntropyLoss()
1302
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1303
+ elif self.config.problem_type == "multi_label_classification":
1304
+ loss_fct = BCEWithLogitsLoss()
1305
+ loss = loss_fct(pooled_logits, labels)
1306
+ if not return_dict:
1307
+ output = (pooled_logits,) + transformer_outputs[1:]
1308
+ return ((loss,) + output) if loss is not None else output
1309
+
1310
+ return SequenceClassifierOutputWithPast(
1311
+ loss=loss,
1312
+ logits=pooled_logits,
1313
+ past_key_values=transformer_outputs.past_key_values,
1314
+ hidden_states=transformer_outputs.hidden_states,
1315
+ attentions=transformer_outputs.attentions,
1316
+ )
1317
+
1318
+
1319
+ @add_start_docstrings(
1320
+ """
1321
+ The Qwen2 Model transformer with a token classification head on top (a linear layer on top of the hidden-states
1322
+ output) e.g. for Named-Entity-Recognition (NER) tasks.
1323
+ """,
1324
+ QWEN2_START_DOCSTRING,
1325
+ )
1326
+ # Copied from transformers.models.llama.modeling_llama.LlamaForTokenClassification with Llama->Qwen2, LLAMA->QWEN2
1327
+ class Qwen2ForTokenClassification(Qwen2PreTrainedModel):
1328
+ def __init__(self, config):
1329
+ super().__init__(config)
1330
+ self.num_labels = config.num_labels
1331
+ self.model = Qwen2Model(config)
1332
+ if getattr(config, "classifier_dropout", None) is not None:
1333
+ classifier_dropout = config.classifier_dropout
1334
+ elif getattr(config, "hidden_dropout", None) is not None:
1335
+ classifier_dropout = config.hidden_dropout
1336
+ else:
1337
+ classifier_dropout = 0.1
1338
+ self.dropout = nn.Dropout(classifier_dropout)
1339
+ self.score = nn.Linear(config.hidden_size, config.num_labels)
1340
+
1341
+ # Initialize weights and apply final processing
1342
+ self.post_init()
1343
+
1344
+ def get_input_embeddings(self):
1345
+ return self.model.embed_tokens
1346
+
1347
+ def set_input_embeddings(self, value):
1348
+ self.model.embed_tokens = value
1349
+
1350
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
1351
+ def forward(
1352
+ self,
1353
+ input_ids: Optional[torch.LongTensor] = None,
1354
+ attention_mask: Optional[torch.Tensor] = None,
1355
+ position_ids: Optional[torch.LongTensor] = None,
1356
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1357
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1358
+ labels: Optional[torch.LongTensor] = None,
1359
+ use_cache: Optional[bool] = None,
1360
+ output_attentions: Optional[bool] = None,
1361
+ output_hidden_states: Optional[bool] = None,
1362
+ return_dict: Optional[bool] = None,
1363
+ ) -> Union[Tuple, TokenClassifierOutput]:
1364
+ r"""
1365
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1366
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1367
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1368
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1369
+ """
1370
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1371
+
1372
+ outputs = self.model(
1373
+ input_ids,
1374
+ attention_mask=attention_mask,
1375
+ position_ids=position_ids,
1376
+ past_key_values=past_key_values,
1377
+ inputs_embeds=inputs_embeds,
1378
+ use_cache=use_cache,
1379
+ output_attentions=output_attentions,
1380
+ output_hidden_states=output_hidden_states,
1381
+ return_dict=return_dict,
1382
+ )
1383
+ sequence_output = outputs[0]
1384
+ sequence_output = self.dropout(sequence_output)
1385
+ logits = self.score(sequence_output)
1386
+
1387
+ loss = None
1388
+ if labels is not None:
1389
+ loss_fct = CrossEntropyLoss()
1390
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1391
+
1392
+ if not return_dict:
1393
+ output = (logits,) + outputs[2:]
1394
+ return ((loss,) + output) if loss is not None else output
1395
+
1396
+ return TokenClassifierOutput(
1397
+ loss=loss,
1398
+ logits=logits,
1399
+ hidden_states=outputs.hidden_states,
1400
+ attentions=outputs.attentions,
1401
+ )
1402
+
1403
+
1404
+ @add_start_docstrings(
1405
+ """
1406
+ The Qwen2 Model transformer with a sequence classification head on top (linear layer).
1407
+
1408
+ [`Qwen2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1409
+ (e.g. GPT-2) do.
1410
+
1411
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1412
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1413
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1414
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1415
+ each row of the batch).
1416
+ """,
1417
+ QWEN2_START_DOCSTRING,
1418
+ )
1419
+ class Qwen2ForRewardModel(Qwen2PreTrainedModel):
1420
+ def __init__(self, config):
1421
+ super().__init__(config)
1422
+ self.num_labels = 1#config.num_labels
1423
+ self.model = Qwen2Model(config)
1424
+ self.score = nn.Sequential(
1425
+ nn.Linear(config.hidden_size, config.hidden_size),
1426
+ nn.ReLU(),
1427
+ nn.Linear(config.hidden_size, self.num_labels)
1428
+ )
1429
+
1430
+ # Initialize weights and apply final processing
1431
+ self.post_init()
1432
+
1433
+ def get_input_embeddings(self):
1434
+ return self.model.embed_tokens
1435
+
1436
+ def set_input_embeddings(self, value):
1437
+ self.model.embed_tokens = value
1438
+
1439
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
1440
+ def forward(
1441
+ self,
1442
+ input_ids: torch.LongTensor = None,
1443
+ attention_mask: Optional[torch.Tensor] = None,
1444
+ position_ids: Optional[torch.LongTensor] = None,
1445
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1446
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1447
+ labels: Optional[torch.LongTensor] = None,
1448
+ use_cache: Optional[bool] = None,
1449
+ output_attentions: Optional[bool] = None,
1450
+ output_hidden_states: Optional[bool] = None,
1451
+ return_dict: Optional[bool] = None,
1452
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1453
+ r"""
1454
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1455
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1456
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1457
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1458
+ """
1459
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1460
+
1461
+ transformer_outputs = self.model(
1462
+ input_ids,
1463
+ attention_mask=attention_mask,
1464
+ position_ids=position_ids,
1465
+ past_key_values=past_key_values,
1466
+ inputs_embeds=inputs_embeds,
1467
+ use_cache=use_cache,
1468
+ output_attentions=output_attentions,
1469
+ output_hidden_states=output_hidden_states,
1470
+ return_dict=return_dict,
1471
+ )
1472
+ hidden_states = transformer_outputs[0]
1473
+ logits = self.score(hidden_states)
1474
+
1475
+ if input_ids is not None:
1476
+ batch_size = input_ids.shape[0]
1477
+ else:
1478
+ batch_size = inputs_embeds.shape[0]
1479
+
1480
+ if self.config.pad_token_id is None and batch_size != 1:
1481
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1482
+ if self.config.pad_token_id is None:
1483
+ sequence_lengths = -1
1484
+ else:
1485
+ if input_ids is not None:
1486
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1487
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1488
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1489
+ sequence_lengths = sequence_lengths.to(logits.device)
1490
+ else:
1491
+ sequence_lengths = -1
1492
+
1493
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1494
+
1495
+ loss = None
1496
+ if labels is not None:
1497
+ labels = labels.to(logits.device)
1498
+ if self.config.problem_type is None:
1499
+ if self.num_labels == 1:
1500
+ self.config.problem_type = "regression"
1501
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1502
+ self.config.problem_type = "single_label_classification"
1503
+ else:
1504
+ self.config.problem_type = "multi_label_classification"
1505
+
1506
+ if self.config.problem_type == "regression":
1507
+ loss_fct = MSELoss()
1508
+ if self.num_labels == 1:
1509
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1510
+ else:
1511
+ loss = loss_fct(pooled_logits, labels)
1512
+ elif self.config.problem_type == "single_label_classification":
1513
+ loss_fct = CrossEntropyLoss()
1514
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1515
+ elif self.config.problem_type == "multi_label_classification":
1516
+ loss_fct = BCEWithLogitsLoss()
1517
+ loss = loss_fct(pooled_logits, labels)
1518
+ if not return_dict:
1519
+ output = (pooled_logits,) + transformer_outputs[1:]
1520
+ return ((loss,) + output) if loss is not None else output
1521
+
1522
+ return SequenceClassifierOutputWithPast(
1523
+ loss=loss,
1524
+ logits=pooled_logits,
1525
+ past_key_values=transformer_outputs.past_key_values,
1526
+ hidden_states=transformer_outputs.hidden_states,
1527
+ attentions=transformer_outputs.attentions,
1528
+ )
1529
+
1530
+
1531
+ @add_start_docstrings(
1532
+ """
1533
+ The Qwen2 Model transformer with a token classification head on top (a linear layer on top of the hidden-states
1534
+ output) e.g. for Named-Entity-Recognition (NER) tasks.
1535
+ """,
1536
+ QWEN2_START_DOCSTRING,
1537
+ )
1538
+ # Copied from transformers.models.llama.modeling_llama.LlamaForTokenClassification with Llama->Qwen2, LLAMA->QWEN2
1539
+ class Qwen2ForProcessRewardModel(Qwen2PreTrainedModel):
1540
+ def __init__(self, config):
1541
+ super().__init__(config)
1542
+ self.num_labels = 2
1543
+ self.model = Qwen2Model(config)
1544
+ self.score = nn.Sequential(
1545
+ nn.Linear(config.hidden_size, config.hidden_size),
1546
+ nn.ReLU(),
1547
+ nn.Linear(config.hidden_size, self.num_labels)
1548
+ )
1549
+
1550
+ # Initialize weights and apply final processing
1551
+ self.post_init()
1552
+
1553
+ def get_input_embeddings(self):
1554
+ return self.model.embed_tokens
1555
+
1556
+ def set_input_embeddings(self, value):
1557
+ self.model.embed_tokens = value
1558
+
1559
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
1560
+ def forward(
1561
+ self,
1562
+ input_ids: Optional[torch.LongTensor] = None,
1563
+ attention_mask: Optional[torch.Tensor] = None,
1564
+ position_ids: Optional[torch.LongTensor] = None,
1565
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1566
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1567
+ labels: Optional[torch.LongTensor] = None,
1568
+ use_cache: Optional[bool] = None,
1569
+ output_attentions: Optional[bool] = None,
1570
+ output_hidden_states: Optional[bool] = None,
1571
+ return_dict: Optional[bool] = None,
1572
+ ) -> Union[Tuple, TokenClassifierOutput]:
1573
+ r"""
1574
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1575
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1576
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1577
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1578
+ """
1579
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1580
+
1581
+ outputs = self.model(
1582
+ input_ids,
1583
+ attention_mask=attention_mask,
1584
+ position_ids=position_ids,
1585
+ past_key_values=past_key_values,
1586
+ inputs_embeds=inputs_embeds,
1587
+ use_cache=use_cache,
1588
+ output_attentions=output_attentions,
1589
+ output_hidden_states=output_hidden_states,
1590
+ return_dict=return_dict,
1591
+ )
1592
+ hidden_states = outputs[0]
1593
+ logits = self.score(hidden_states)
1594
+
1595
+ loss = None
1596
+ if labels is not None:
1597
+ loss_fct = CrossEntropyLoss()
1598
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1599
+
1600
+ if not return_dict:
1601
+ output = (logits,) + outputs[2:]
1602
+ return ((loss,) + output) if loss is not None else output
1603
+
1604
+ return TokenClassifierOutput(
1605
+ loss=loss,
1606
+ logits=logits,
1607
+ hidden_states=outputs.hidden_states,
1608
+ attentions=outputs.attentions,
1609
+ )
special_tokens_map.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<R>",
6
+ "<S>",
7
+ "<X>",
8
+ "<mask>",
9
+ "<sep>",
10
+ "<extra_0>"
11
+ ],
12
+ "eos_token": {
13
+ "content": "<|im_end|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false
18
+ },
19
+ "pad_token": {
20
+ "content": "<|endoftext|>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ }
26
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f87f84cd744477c28ec88506449627f5caff7040f3cd320bc0f4f2b8de36279
3
+ size 11419357
tokenizer_config.json ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "151646": {
29
+ "content": "<R>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "151647": {
37
+ "content": "<S>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "151648": {
45
+ "content": "<X>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "151649": {
53
+ "content": "<mask>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "151650": {
61
+ "content": "<sep>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "151651": {
69
+ "content": "<extra_0>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ }
76
+ },
77
+ "additional_special_tokens": [
78
+ "<|im_start|>",
79
+ "<|im_end|>",
80
+ "<R>",
81
+ "<S>",
82
+ "<X>",
83
+ "<mask>",
84
+ "<sep>",
85
+ "<extra_0>"
86
+ ],
87
+ "bos_token": null,
88
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{% if loop.last %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>'}}{% else %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endif %}{% endfor %}{{ '<|endoftext|>' }}",
89
+ "clean_up_tokenization_spaces": false,
90
+ "eos_token": "<|im_end|>",
91
+ "errors": "replace",
92
+ "extra_special_tokens": {},
93
+ "model_max_length": 131072,
94
+ "pad_token": "<|endoftext|>",
95
+ "split_special_tokens": false,
96
+ "tokenizer_class": "Qwen2Tokenizer",
97
+ "unk_token": null
98
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff