ZaandaTeika commited on
Commit
ad1c020
·
verified ·
1 Parent(s): 679b476

Upload model

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<extra_0>": 151665,
4
+ "<tool_call>": 151657,
5
+ "<|box_end|>": 151649,
6
+ "<|box_start|>": 151648,
7
+ "<|endoftext|>": 151643,
8
+ "<|file_sep|>": 151664,
9
+ "<|fim_middle|>": 151660,
10
+ "<|fim_pad|>": 151662,
11
+ "<|fim_prefix|>": 151659,
12
+ "<|fim_suffix|>": 151661,
13
+ "<|im_end|>": 151645,
14
+ "<|im_start|>": 151644,
15
+ "<|image_pad|>": 151655,
16
+ "<|object_ref_end|>": 151647,
17
+ "<|object_ref_start|>": 151646,
18
+ "<|quad_end|>": 151651,
19
+ "<|quad_start|>": 151650,
20
+ "<|repo_name|>": 151663,
21
+ "<|video_pad|>": 151656,
22
+ "<|vision_end|>": 151653,
23
+ "<|vision_pad|>": 151654,
24
+ "<|vision_start|>": 151652
25
+ }
chat_template.jinja ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0]['role'] == 'system' %}
4
+ {{- messages[0]['content'] }}
5
+ {%- else %}
6
+ {{- 'Please reason step by step, and put your final answer within \\boxed{}.' }}
7
+ {%- endif %}
8
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
+ {%- for tool in tools %}
10
+ {{- "\n" }}
11
+ {{- tool | tojson }}
12
+ {%- endfor %}
13
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
+ {%- else %}
15
+ {%- if messages[0]['role'] == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nPlease reason step by step, and put your final answer within \\boxed{}.<|im_end|>\n' }}
19
+ {%- endif %}
20
+ {%- endif %}
21
+ {%- for message in messages %}
22
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
23
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
24
+ {%- elif message.role == "assistant" %}
25
+ {{- '<|im_start|>' + message.role }}
26
+ {%- if message.content %}
27
+ {{- '\n' + message.content }}
28
+ {%- endif %}
29
+ {%- for tool_call in message.tool_calls %}
30
+ {%- if tool_call.function is defined %}
31
+ {%- set tool_call = tool_call.function %}
32
+ {%- endif %}
33
+ {{- '\n<tool_call>\n{"name": "' }}
34
+ {{- tool_call.name }}
35
+ {{- '", "arguments": ' }}
36
+ {{- tool_call.arguments | tojson }}
37
+ {{- '}\n</tool_call>' }}
38
+ {%- endfor %}
39
+ {{- '<|im_end|>\n' }}
40
+ {%- elif message.role == "tool" %}
41
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
42
+ {{- '<|im_start|>user' }}
43
+ {%- endif %}
44
+ {{- '\n<tool_response>\n' }}
45
+ {{- message.content }}
46
+ {{- '\n</tool_response>' }}
47
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
48
+ {{- '<|im_end|>\n' }}
49
+ {%- endif %}
50
+ {%- endif %}
51
+ {%- endfor %}
52
+ {%- if add_generation_prompt %}
53
+ {{- '<|im_start|>assistant\n' }}
54
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2ForProcessRewardModel"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_qwen2_rm.Qwen2RMConfig",
8
+ "AutoModel": "modeling_qwen2_rm.Qwen2ForProcessRewardModel"
9
+ },
10
+ "dtype": "bfloat16",
11
+ "eos_token_id": 151645,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 1536,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 8960,
16
+ "layer_types": [
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention"
45
+ ],
46
+ "max_position_embeddings": 4096,
47
+ "max_window_layers": 21,
48
+ "model_type": "qwen2",
49
+ "num_attention_heads": 12,
50
+ "num_hidden_layers": 28,
51
+ "num_key_value_heads": 2,
52
+ "pad_token_id": 151643,
53
+ "rms_norm_eps": 1e-06,
54
+ "rope_scaling": null,
55
+ "rope_theta": 10000.0,
56
+ "sliding_window": null,
57
+ "tie_word_embeddings": true,
58
+ "transformers_version": "4.57.5",
59
+ "use_cache": false,
60
+ "use_sliding_window": false,
61
+ "vocab_size": 151666
62
+ }
configuration_qwen2_rm.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Qwen2 model configuration"""
16
+
17
+ from transformers.configuration_utils import PretrainedConfig
18
+ from transformers.utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ class Qwen2RMConfig(PretrainedConfig):
25
+ r"""
26
+ This is the configuration class to store the configuration of a [`Qwen2Model`]. It is used to instantiate a
27
+ Qwen2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
28
+ with the defaults will yield a similar configuration to that of
29
+ Qwen2-7B-beta [Qwen/Qwen2-7B-beta](https://huggingface.co/Qwen/Qwen2-7B-beta).
30
+
31
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
32
+ documentation from [`PretrainedConfig`] for more information.
33
+
34
+
35
+ Args:
36
+ vocab_size (`int`, *optional*, defaults to 151936):
37
+ Vocabulary size of the Qwen2 model. Defines the number of different tokens that can be represented by the
38
+ `inputs_ids` passed when calling [`Qwen2Model`]
39
+ hidden_size (`int`, *optional*, defaults to 4096):
40
+ Dimension of the hidden representations.
41
+ intermediate_size (`int`, *optional*, defaults to 22016):
42
+ Dimension of the MLP representations.
43
+ num_hidden_layers (`int`, *optional*, defaults to 32):
44
+ Number of hidden layers in the Transformer encoder.
45
+ num_attention_heads (`int`, *optional*, defaults to 32):
46
+ Number of attention heads for each attention layer in the Transformer encoder.
47
+ num_key_value_heads (`int`, *optional*, defaults to 32):
48
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
49
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
50
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
51
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
52
+ by meanpooling all the original heads within that group. For more details checkout [this
53
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`.
54
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
55
+ The non-linear activation function (function or string) in the decoder.
56
+ max_position_embeddings (`int`, *optional*, defaults to 32768):
57
+ The maximum sequence length that this model might ever be used with.
58
+ initializer_range (`float`, *optional*, defaults to 0.02):
59
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
60
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
61
+ The epsilon used by the rms normalization layers.
62
+ use_cache (`bool`, *optional*, defaults to `True`):
63
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
64
+ relevant if `config.is_decoder=True`.
65
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
66
+ Whether the model's input and output word embeddings should be tied.
67
+ rope_theta (`float`, *optional*, defaults to 10000.0):
68
+ The base period of the RoPE embeddings.
69
+ use_sliding_window (`bool`, *optional*, defaults to `False`):
70
+ Whether to use sliding window attention.
71
+ sliding_window (`int`, *optional*, defaults to 4096):
72
+ Sliding window attention (SWA) window size. If not specified, will default to `4096`.
73
+ max_window_layers (`int`, *optional*, defaults to 28):
74
+ The number of layers that use SWA (Sliding Window Attention). The bottom layers use SWA while the top use full attention.
75
+ attention_dropout (`float`, *optional*, defaults to 0.0):
76
+ The dropout ratio for the attention probabilities.
77
+
78
+ ```python
79
+ >>> from transformers import Qwen2Model, Qwen2Config
80
+
81
+ >>> # Initializing a Qwen2 style configuration
82
+ >>> configuration = Qwen2Config()
83
+
84
+ >>> # Initializing a model from the Qwen2-7B style configuration
85
+ >>> model = Qwen2Model(configuration)
86
+
87
+ >>> # Accessing the model configuration
88
+ >>> configuration = model.config
89
+ ```"""
90
+
91
+ model_type = "qwen2"
92
+ keys_to_ignore_at_inference = ["past_key_values"]
93
+
94
+ def __init__(
95
+ self,
96
+ vocab_size=151936,
97
+ hidden_size=4096,
98
+ intermediate_size=22016,
99
+ num_hidden_layers=32,
100
+ num_attention_heads=32,
101
+ num_key_value_heads=32,
102
+ hidden_act="silu",
103
+ max_position_embeddings=32768,
104
+ initializer_range=0.02,
105
+ rms_norm_eps=1e-6,
106
+ use_cache=True,
107
+ tie_word_embeddings=False,
108
+ rope_theta=10000.0,
109
+ use_sliding_window=False,
110
+ sliding_window=4096,
111
+ max_window_layers=28,
112
+ attention_dropout=0.0,
113
+ **kwargs,
114
+ ):
115
+ self.vocab_size = vocab_size
116
+ self.max_position_embeddings = max_position_embeddings
117
+ self.hidden_size = hidden_size
118
+ self.intermediate_size = intermediate_size
119
+ self.num_hidden_layers = num_hidden_layers
120
+ self.num_attention_heads = num_attention_heads
121
+ self.use_sliding_window = use_sliding_window
122
+ self.sliding_window = sliding_window if use_sliding_window else None
123
+ self.max_window_layers = max_window_layers
124
+
125
+ # for backward compatibility
126
+ if num_key_value_heads is None:
127
+ num_key_value_heads = num_attention_heads
128
+
129
+ self.num_key_value_heads = num_key_value_heads
130
+ self.hidden_act = hidden_act
131
+ self.initializer_range = initializer_range
132
+ self.rms_norm_eps = rms_norm_eps
133
+ self.use_cache = use_cache
134
+ self.rope_theta = rope_theta
135
+ self.attention_dropout = attention_dropout
136
+
137
+ super().__init__(
138
+ tie_word_embeddings=tie_word_embeddings,
139
+ **kwargs,
140
+ )
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:347dfd6f2608bca7764ccdd6938b9489468e85f7c50de753eb6bfd4cba6b0060
3
+ size 3092195308
modeling_qwen2_rm.py ADDED
@@ -0,0 +1,1637 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """PyTorch Qwen2 model."""
21
+
22
+ import math
23
+ from typing import List, Optional, Tuple, Union
24
+
25
+ import torch
26
+ import torch.utils.checkpoint
27
+ from torch import nn
28
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
29
+
30
+ from transformers.activations import ACT2FN
31
+ from transformers.cache_utils import Cache, DynamicCache#, StaticCache
32
+ from transformers.modeling_attn_mask_utils import AttentionMaskConverter
33
+ from transformers.modeling_outputs import (
34
+ BaseModelOutputWithPast,
35
+ CausalLMOutputWithPast,
36
+ SequenceClassifierOutputWithPast,
37
+ TokenClassifierOutput,
38
+ )
39
+ from transformers.modeling_utils import PreTrainedModel
40
+ from transformers.utils import (
41
+ add_start_docstrings,
42
+ add_start_docstrings_to_model_forward,
43
+ is_flash_attn_2_available,
44
+ is_flash_attn_greater_or_equal_2_10,
45
+ logging,
46
+ replace_return_docstrings,
47
+ )
48
+ try:
49
+ from .configuration_qwen2_rm import Qwen2RMConfig as Qwen2Config
50
+ except ImportError:
51
+ # Fallback для прямого импорта
52
+ import sys
53
+ import os
54
+ sys.path.insert(0, os.path.dirname(__file__))
55
+ from configuration_qwen2_rm import Qwen2RMConfig as Qwen2Config
56
+
57
+
58
+ if is_flash_attn_2_available():
59
+ from transformers.modeling_flash_attention_utils import _flash_attention_forward
60
+
61
+
62
+ logger = logging.get_logger(__name__)
63
+
64
+
65
+ _CHECKPOINT_FOR_DOC = "Qwen/Qwen2-7B-beta"
66
+ _CONFIG_FOR_DOC = "Qwen2Config"
67
+
68
+
69
+ # Copied from transformers.models.llama.modeling_llama._prepare_4d_causal_attention_mask_with_cache_position
70
+ def _prepare_4d_causal_attention_mask_with_cache_position(
71
+ attention_mask: torch.Tensor,
72
+ sequence_length: int,
73
+ target_length: int,
74
+ dtype: torch.dtype,
75
+ device: torch.device,
76
+ min_dtype: float,
77
+ cache_position: torch.Tensor,
78
+ batch_size: int,
79
+ ):
80
+ """
81
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
82
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
83
+
84
+ Args:
85
+ attention_mask (`torch.Tensor`):
86
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
87
+ sequence_length (`int`):
88
+ The sequence length being processed.
89
+ target_length (`int`):
90
+ The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
91
+ dtype (`torch.dtype`):
92
+ The dtype to use for the 4D attention mask.
93
+ device (`torch.device`):
94
+ The device to plcae the 4D attention mask on.
95
+ min_dtype (`float`):
96
+ The minimum value representable with the dtype `dtype`.
97
+ cache_position (`torch.Tensor`):
98
+ Indices depicting the position of the input sequence tokens in the sequence.
99
+ batch_size (`torch.Tensor`):
100
+ Batch size.
101
+ """
102
+ if attention_mask is not None and attention_mask.dim() == 4:
103
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
104
+ causal_mask = attention_mask
105
+ else:
106
+ causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
107
+ if sequence_length != 1:
108
+ causal_mask = torch.triu(causal_mask, diagonal=1)
109
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
110
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
111
+ if attention_mask is not None:
112
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
113
+ mask_length = attention_mask.shape[-1]
114
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
115
+ padding_mask = padding_mask == 0
116
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
117
+ padding_mask, min_dtype
118
+ )
119
+
120
+ return causal_mask
121
+
122
+
123
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Qwen2
124
+ class Qwen2RMSNorm(nn.Module):
125
+ def __init__(self, hidden_size, eps=1e-6):
126
+ """
127
+ Qwen2RMSNorm is equivalent to T5LayerNorm
128
+ """
129
+ super().__init__()
130
+ self.weight = nn.Parameter(torch.ones(hidden_size))
131
+ self.variance_epsilon = eps
132
+
133
+ def forward(self, hidden_states):
134
+ input_dtype = hidden_states.dtype
135
+ hidden_states = hidden_states.to(torch.float32)
136
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
137
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
138
+ return self.weight * hidden_states.to(input_dtype)
139
+
140
+ def extra_repr(self):
141
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
142
+
143
+
144
+ # Copied from transformers.models.mixtral.modeling_mixtral.MixtralRotaryEmbedding with Mixtral->Qwen2
145
+ class Qwen2RotaryEmbedding(nn.Module):
146
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
147
+ super().__init__()
148
+
149
+ self.dim = dim
150
+ self.max_position_embeddings = max_position_embeddings
151
+ self.base = base
152
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
153
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
154
+
155
+ # Build here to make `torch.jit.trace` work.
156
+ self._set_cos_sin_cache(
157
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
158
+ )
159
+
160
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
161
+ self.max_seq_len_cached = seq_len
162
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
163
+
164
+ freqs = torch.outer(t, self.inv_freq)
165
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
166
+ emb = torch.cat((freqs, freqs), dim=-1)
167
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
168
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
169
+
170
+ def forward(self, x, seq_len=None):
171
+ # x: [bs, num_attention_heads, seq_len, head_size]
172
+ if seq_len > self.max_seq_len_cached:
173
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
174
+
175
+ return (
176
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
177
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
178
+ )
179
+
180
+
181
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
182
+ def rotate_half(x):
183
+ """Rotates half the hidden dims of the input."""
184
+ x1 = x[..., : x.shape[-1] // 2]
185
+ x2 = x[..., x.shape[-1] // 2 :]
186
+ return torch.cat((-x2, x1), dim=-1)
187
+
188
+
189
+ # Copied from transformers.models.mixtral.modeling_mixtral.apply_rotary_pos_emb
190
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
191
+ """Applies Rotary Position Embedding to the query and key tensors.
192
+
193
+ Args:
194
+ q (`torch.Tensor`): The query tensor.
195
+ k (`torch.Tensor`): The key tensor.
196
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
197
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
198
+ position_ids (`torch.Tensor`):
199
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
200
+ used to pass offsetted position ids when working with a KV-cache.
201
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
202
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
203
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
204
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
205
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
206
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
207
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
208
+ Returns:
209
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
210
+ """
211
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
212
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
213
+ q_embed = (q * cos) + (rotate_half(q) * sin)
214
+ k_embed = (k * cos) + (rotate_half(k) * sin)
215
+ return q_embed, k_embed
216
+
217
+
218
+ # Copied from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->Qwen2
219
+ class Qwen2MLP(nn.Module):
220
+ def __init__(self, config):
221
+ super().__init__()
222
+ self.hidden_size = config.hidden_size
223
+ self.intermediate_size = config.intermediate_size
224
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
225
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
226
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
227
+ self.act_fn = ACT2FN[config.hidden_act]
228
+
229
+ def forward(self, hidden_state):
230
+ return self.down_proj(self.act_fn(self.gate_proj(hidden_state)) * self.up_proj(hidden_state))
231
+
232
+
233
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
234
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
235
+ """
236
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
237
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
238
+ """
239
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
240
+ if n_rep == 1:
241
+ return hidden_states
242
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
243
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
244
+
245
+
246
+ class Qwen2Attention(nn.Module):
247
+ """
248
+ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
249
+ and "Generating Long Sequences with Sparse Transformers".
250
+ """
251
+
252
+ def __init__(self, config: Qwen2Config, layer_idx: Optional[int] = None):
253
+ super().__init__()
254
+ self.config = config
255
+ self.layer_idx = layer_idx
256
+ if layer_idx is None:
257
+ logger.warning_once(
258
+ f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
259
+ "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
260
+ "when creating this class."
261
+ )
262
+
263
+ self.hidden_size = config.hidden_size
264
+ self.num_heads = config.num_attention_heads
265
+ self.head_dim = self.hidden_size // self.num_heads
266
+ self.num_key_value_heads = config.num_key_value_heads
267
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
268
+ self.max_position_embeddings = config.max_position_embeddings
269
+ self.rope_theta = config.rope_theta
270
+ self.is_causal = True
271
+ self.attention_dropout = config.attention_dropout
272
+
273
+ if (self.head_dim * self.num_heads) != self.hidden_size:
274
+ raise ValueError(
275
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
276
+ f" and `num_heads`: {self.num_heads})."
277
+ )
278
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True)
279
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
280
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
281
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
282
+
283
+ self.rotary_emb = Qwen2RotaryEmbedding(
284
+ self.head_dim,
285
+ max_position_embeddings=self.max_position_embeddings,
286
+ base=self.rope_theta,
287
+ )
288
+
289
+ def forward(
290
+ self,
291
+ hidden_states: torch.Tensor,
292
+ attention_mask: Optional[torch.Tensor] = None,
293
+ position_ids: Optional[torch.LongTensor] = None,
294
+ past_key_value: Optional[Cache] = None,
295
+ output_attentions: bool = False,
296
+ use_cache: bool = False,
297
+ cache_position: Optional[torch.LongTensor] = None,
298
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
299
+ bsz, q_len, _ = hidden_states.size()
300
+
301
+ query_states = self.q_proj(hidden_states)
302
+ key_states = self.k_proj(hidden_states)
303
+ value_states = self.v_proj(hidden_states)
304
+
305
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
306
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
307
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
308
+
309
+ kv_seq_len = key_states.shape[-2]
310
+ if past_key_value is not None:
311
+ if self.layer_idx is None:
312
+ raise ValueError(
313
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
314
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
315
+ "with a layer index."
316
+ )
317
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
318
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
319
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
320
+
321
+ if past_key_value is not None:
322
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
323
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
324
+
325
+ # repeat k/v heads if n_kv_heads < n_heads
326
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
327
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
328
+
329
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
330
+
331
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
332
+ raise ValueError(
333
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
334
+ f" {attn_weights.size()}"
335
+ )
336
+
337
+ if attention_mask is not None: # no matter the length, we just slice it
338
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
339
+ attn_weights = attn_weights + causal_mask
340
+
341
+ # upcast attention to fp32
342
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
343
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
344
+ attn_output = torch.matmul(attn_weights, value_states)
345
+
346
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
347
+ raise ValueError(
348
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
349
+ f" {attn_output.size()}"
350
+ )
351
+
352
+ attn_output = attn_output.transpose(1, 2).contiguous()
353
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
354
+
355
+ attn_output = self.o_proj(attn_output)
356
+
357
+ if not output_attentions:
358
+ attn_weights = None
359
+
360
+ return attn_output, attn_weights, past_key_value
361
+
362
+
363
+ class Qwen2FlashAttention2(Qwen2Attention):
364
+ """
365
+ Qwen2 flash attention module, following Qwen2 attention module. This module inherits from `Qwen2Attention`
366
+ as the weights of the module stays untouched. The only required change would be on the forward pass
367
+ where it needs to correctly call the public API of flash attention and deal with padding tokens
368
+ in case the input contains any of them. Additionally, for sliding window attention, we apply SWA only to the bottom
369
+ config.max_window_layers layers.
370
+ """
371
+
372
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
373
+ def __init__(self, *args, **kwargs):
374
+ super().__init__(*args, **kwargs)
375
+
376
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
377
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
378
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
379
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
380
+
381
+ def forward(
382
+ self,
383
+ hidden_states: torch.Tensor,
384
+ attention_mask: Optional[torch.Tensor] = None,
385
+ position_ids: Optional[torch.LongTensor] = None,
386
+ past_key_value: Optional[Cache] = None,
387
+ output_attentions: bool = False,
388
+ use_cache: bool = False,
389
+ cache_position: Optional[torch.LongTensor] = None,
390
+ ):
391
+ bsz, q_len, _ = hidden_states.size()
392
+
393
+ query_states = self.q_proj(hidden_states)
394
+ key_states = self.k_proj(hidden_states)
395
+ value_states = self.v_proj(hidden_states)
396
+
397
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
398
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
399
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
400
+
401
+ kv_seq_len = key_states.shape[-2]
402
+ if past_key_value is not None:
403
+ if self.layer_idx is None:
404
+ raise ValueError(
405
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
406
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
407
+ "with a layer index."
408
+ )
409
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
410
+
411
+ # Because the input can be padded, the absolute sequence length depends on the max position id.
412
+ rotary_seq_len = (
413
+ max(kv_seq_len, position_ids[:, -1].max().item() + 1) if position_ids is not None else kv_seq_len
414
+ )
415
+
416
+ cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len)
417
+
418
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
419
+
420
+ if past_key_value is not None:
421
+ # Activate slicing cache only if the config has a value `sliding_windows` attribute
422
+ cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
423
+ if (
424
+ getattr(self.config, "sliding_window", None) is not None
425
+ and kv_seq_len > self.config.sliding_window
426
+ and cache_has_contents
427
+ ):
428
+ slicing_tokens = 1 - self.config.sliding_window
429
+
430
+ past_key = past_key_value[self.layer_idx][0]
431
+ past_value = past_key_value[self.layer_idx][1]
432
+
433
+ past_key = past_key[:, :, slicing_tokens:, :].contiguous()
434
+ past_value = past_value[:, :, slicing_tokens:, :].contiguous()
435
+
436
+ if past_key.shape[-2] != self.config.sliding_window - 1:
437
+ raise ValueError(
438
+ f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
439
+ f" {past_key.shape}"
440
+ )
441
+
442
+ if attention_mask is not None:
443
+ attention_mask = attention_mask[:, slicing_tokens:]
444
+ attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
445
+
446
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
447
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
448
+
449
+ # repeat k/v heads if n_kv_heads < n_heads
450
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
451
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
452
+ dropout_rate = 0.0 if not self.training else self.attention_dropout
453
+
454
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
455
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
456
+ # cast them back in float16 just to be sure everything works as expected.
457
+ input_dtype = query_states.dtype
458
+ if input_dtype == torch.float32:
459
+ if torch.is_autocast_enabled():
460
+ target_dtype = torch.get_autocast_gpu_dtype()
461
+ # Handle the case where the model is quantized
462
+ elif hasattr(self.config, "_pre_quantization_dtype"):
463
+ target_dtype = self.config._pre_quantization_dtype
464
+ else:
465
+ target_dtype = self.q_proj.weight.dtype
466
+
467
+ logger.warning_once(
468
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
469
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
470
+ f" {target_dtype}."
471
+ )
472
+
473
+ query_states = query_states.to(target_dtype)
474
+ key_states = key_states.to(target_dtype)
475
+ value_states = value_states.to(target_dtype)
476
+
477
+ # Reashape to the expected shape for Flash Attention
478
+ query_states = query_states.transpose(1, 2)
479
+ key_states = key_states.transpose(1, 2)
480
+ value_states = value_states.transpose(1, 2)
481
+
482
+ if (
483
+ self.config.use_sliding_window
484
+ and getattr(self.config, "sliding_window", None) is not None
485
+ and self.layer_idx >= self.config.max_window_layers
486
+ ):
487
+ sliding_window = self.config.sliding_window
488
+ else:
489
+ sliding_window = None
490
+
491
+ attn_output = _flash_attention_forward(
492
+ query_states,
493
+ key_states,
494
+ value_states,
495
+ attention_mask,
496
+ q_len,
497
+ position_ids=position_ids,
498
+ dropout=dropout_rate,
499
+ sliding_window=sliding_window,
500
+ is_causal=self.is_causal,
501
+ use_top_left_mask=self._flash_attn_uses_top_left_mask,
502
+ )
503
+
504
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
505
+ attn_output = self.o_proj(attn_output)
506
+
507
+ if not output_attentions:
508
+ attn_weights = None
509
+
510
+ return attn_output, attn_weights, past_key_value
511
+
512
+
513
+ # Copied from transformers.models.mixtral.modeling_mixtral.MixtralSdpaAttention with Mixtral->Qwen2
514
+ class Qwen2SdpaAttention(Qwen2Attention):
515
+ """
516
+ Qwen2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
517
+ `Qwen2Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
518
+ SDPA API.
519
+ """
520
+
521
+ # Adapted from Qwen2Attention.forward
522
+ def forward(
523
+ self,
524
+ hidden_states: torch.Tensor,
525
+ attention_mask: Optional[torch.Tensor] = None,
526
+ position_ids: Optional[torch.LongTensor] = None,
527
+ past_key_value: Optional[Cache] = None,
528
+ output_attentions: bool = False,
529
+ use_cache: bool = False,
530
+ cache_position: Optional[torch.LongTensor] = None,
531
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
532
+ if output_attentions:
533
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
534
+ logger.warning_once(
535
+ "Qwen2Model is using Qwen2SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
536
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
537
+ )
538
+ return super().forward(
539
+ hidden_states=hidden_states,
540
+ attention_mask=attention_mask,
541
+ position_ids=position_ids,
542
+ past_key_value=past_key_value,
543
+ output_attentions=output_attentions,
544
+ use_cache=use_cache,
545
+ )
546
+
547
+ bsz, q_len, _ = hidden_states.size()
548
+
549
+ query_states = self.q_proj(hidden_states)
550
+ key_states = self.k_proj(hidden_states)
551
+ value_states = self.v_proj(hidden_states)
552
+
553
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
554
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
555
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
556
+
557
+ kv_seq_len = key_states.shape[-2]
558
+ if past_key_value is not None:
559
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
560
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
561
+
562
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
563
+
564
+ if past_key_value is not None:
565
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
566
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
567
+
568
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
569
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
570
+
571
+ causal_mask = attention_mask
572
+ if attention_mask is not None: # no matter the length, we just slice it
573
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
574
+
575
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
576
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
577
+ if query_states.device.type == "cuda" and attention_mask is not None:
578
+ query_states = query_states.contiguous()
579
+ key_states = key_states.contiguous()
580
+ value_states = value_states.contiguous()
581
+
582
+ # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
583
+ # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
584
+ # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
585
+ is_causal = True if causal_mask is None and q_len > 1 else False
586
+
587
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
588
+ query_states,
589
+ key_states,
590
+ value_states,
591
+ attn_mask=causal_mask,
592
+ dropout_p=self.attention_dropout if self.training else 0.0,
593
+ is_causal=is_causal,
594
+ )
595
+
596
+ attn_output = attn_output.transpose(1, 2).contiguous()
597
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
598
+
599
+ attn_output = self.o_proj(attn_output)
600
+
601
+ return attn_output, None, past_key_value
602
+
603
+
604
+ QWEN2_ATTENTION_CLASSES = {
605
+ "eager": Qwen2Attention,
606
+ "flash_attention_2": Qwen2FlashAttention2,
607
+ "sdpa": Qwen2SdpaAttention,
608
+ }
609
+
610
+
611
+ class Qwen2DecoderLayer(nn.Module):
612
+ def __init__(self, config: Qwen2Config, layer_idx: int):
613
+ super().__init__()
614
+ self.hidden_size = config.hidden_size
615
+
616
+ if config.sliding_window and config._attn_implementation != "flash_attention_2":
617
+ logger.warning_once(
618
+ f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; "
619
+ "unexpected results may be encountered."
620
+ )
621
+ self.self_attn = QWEN2_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
622
+
623
+ self.mlp = Qwen2MLP(config)
624
+ self.input_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
625
+ self.post_attention_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
626
+
627
+ def forward(
628
+ self,
629
+ hidden_states: torch.Tensor,
630
+ attention_mask: Optional[torch.Tensor] = None,
631
+ position_ids: Optional[torch.LongTensor] = None,
632
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
633
+ output_attentions: Optional[bool] = False,
634
+ use_cache: Optional[bool] = False,
635
+ cache_position: Optional[torch.LongTensor] = None,
636
+ **kwargs,
637
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
638
+ """
639
+ Args:
640
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
641
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
642
+ `(batch, sequence_length)` where padding elements are indicated by 0.
643
+ output_attentions (`bool`, *optional*):
644
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
645
+ returned tensors for more detail.
646
+ use_cache (`bool`, *optional*):
647
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
648
+ (see `past_key_values`).
649
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
650
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
651
+ Indices depicting the position of the input sequence tokens in the sequence.
652
+ kwargs (`dict`, *optional*):
653
+ Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
654
+ into the model
655
+ """
656
+
657
+ residual = hidden_states
658
+
659
+ hidden_states = self.input_layernorm(hidden_states)
660
+
661
+ # Self Attention
662
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
663
+ hidden_states=hidden_states,
664
+ attention_mask=attention_mask,
665
+ position_ids=position_ids,
666
+ past_key_value=past_key_value,
667
+ output_attentions=output_attentions,
668
+ use_cache=use_cache,
669
+ cache_position=cache_position,
670
+ )
671
+ hidden_states = residual + hidden_states
672
+
673
+ # Fully Connected
674
+ residual = hidden_states
675
+ hidden_states = self.post_attention_layernorm(hidden_states)
676
+ hidden_states = self.mlp(hidden_states)
677
+ hidden_states = residual + hidden_states
678
+
679
+ outputs = (hidden_states,)
680
+
681
+ if output_attentions:
682
+ outputs += (self_attn_weights,)
683
+
684
+ if use_cache:
685
+ outputs += (present_key_value,)
686
+
687
+ return outputs
688
+
689
+
690
+ QWEN2_START_DOCSTRING = r"""
691
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
692
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
693
+ etc.)
694
+
695
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
696
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
697
+ and behavior.
698
+
699
+ Parameters:
700
+ config ([`Qwen2Config`]):
701
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
702
+ load the weights associated with the model, only the configuration. Check out the
703
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
704
+ """
705
+
706
+
707
+ @add_start_docstrings(
708
+ "The bare Qwen2 Model outputting raw hidden-states without any specific head on top.",
709
+ QWEN2_START_DOCSTRING,
710
+ )
711
+ class Qwen2PreTrainedModel(PreTrainedModel):
712
+ config_class = Qwen2Config
713
+ base_model_prefix = "model"
714
+ supports_gradient_checkpointing = True
715
+ _no_split_modules = ["Qwen2DecoderLayer"]
716
+ _skip_keys_device_placement = "past_key_values"
717
+ _supports_flash_attn_2 = True
718
+ _supports_sdpa = True
719
+ _supports_cache_class = True
720
+
721
+ def _init_weights(self, module):
722
+ std = self.config.initializer_range
723
+ if isinstance(module, nn.Linear):
724
+ module.weight.data.normal_(mean=0.0, std=std)
725
+ if module.bias is not None:
726
+ module.bias.data.zero_()
727
+ elif isinstance(module, nn.Embedding):
728
+ module.weight.data.normal_(mean=0.0, std=std)
729
+ if module.padding_idx is not None:
730
+ module.weight.data[module.padding_idx].zero_()
731
+
732
+
733
+ QWEN2_INPUTS_DOCSTRING = r"""
734
+ Args:
735
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
736
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
737
+ it.
738
+
739
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
740
+ [`PreTrainedTokenizer.__call__`] for details.
741
+
742
+ [What are input IDs?](../glossary#input-ids)
743
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
744
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
745
+
746
+ - 1 for tokens that are **not masked**,
747
+ - 0 for tokens that are **masked**.
748
+
749
+ [What are attention masks?](../glossary#attention-mask)
750
+
751
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
752
+ [`PreTrainedTokenizer.__call__`] for details.
753
+
754
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
755
+ `past_key_values`).
756
+
757
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
758
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
759
+ information on the default strategy.
760
+
761
+ - 1 indicates the head is **not masked**,
762
+ - 0 indicates the head is **masked**.
763
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
764
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
765
+ config.n_positions - 1]`.
766
+
767
+ [What are position IDs?](../glossary#position-ids)
768
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
769
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
770
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
771
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
772
+
773
+ Two formats are allowed:
774
+ - a [`~cache_utils.Cache`] instance;
775
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
776
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
777
+ cache format.
778
+
779
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
780
+ legacy cache format will be returned.
781
+
782
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
783
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
784
+ of shape `(batch_size, sequence_length)`.
785
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
786
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
787
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
788
+ model's internal embedding lookup matrix.
789
+ use_cache (`bool`, *optional*):
790
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
791
+ `past_key_values`).
792
+ output_attentions (`bool`, *optional*):
793
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
794
+ tensors for more detail.
795
+ output_hidden_states (`bool`, *optional*):
796
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
797
+ more detail.
798
+ return_dict (`bool`, *optional*):
799
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
800
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
801
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
802
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
803
+ the complete sequence length.
804
+ """
805
+
806
+
807
+ @add_start_docstrings(
808
+ "The bare Qwen2 Model outputting raw hidden-states without any specific head on top.",
809
+ QWEN2_START_DOCSTRING,
810
+ )
811
+ class Qwen2Model(Qwen2PreTrainedModel):
812
+ """
813
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2DecoderLayer`]
814
+
815
+ Args:
816
+ config: Qwen2Config
817
+ """
818
+
819
+ def __init__(self, config: Qwen2Config):
820
+ super().__init__(config)
821
+ self.padding_idx = config.pad_token_id
822
+ self.vocab_size = config.vocab_size
823
+
824
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
825
+ self.layers = nn.ModuleList(
826
+ [Qwen2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
827
+ )
828
+ self._attn_implementation = config._attn_implementation
829
+ self.norm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
830
+
831
+ self.gradient_checkpointing = False
832
+ # Initialize weights and apply final processing
833
+ self.post_init()
834
+
835
+ def get_input_embeddings(self):
836
+ return self.embed_tokens
837
+
838
+ def set_input_embeddings(self, value):
839
+ self.embed_tokens = value
840
+
841
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
842
+ def forward(
843
+ self,
844
+ input_ids: torch.LongTensor = None,
845
+ attention_mask: Optional[torch.Tensor] = None,
846
+ position_ids: Optional[torch.LongTensor] = None,
847
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
848
+ inputs_embeds: Optional[torch.FloatTensor] = None,
849
+ use_cache: Optional[bool] = None,
850
+ output_attentions: Optional[bool] = None,
851
+ output_hidden_states: Optional[bool] = None,
852
+ return_dict: Optional[bool] = None,
853
+ cache_position: Optional[torch.LongTensor] = None,
854
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
855
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
856
+ output_hidden_states = (
857
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
858
+ )
859
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
860
+
861
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
862
+
863
+ if (input_ids is None) ^ (inputs_embeds is not None):
864
+ raise ValueError(
865
+ "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
866
+ )
867
+
868
+ if self.gradient_checkpointing and self.training:
869
+ if use_cache:
870
+ logger.warning_once(
871
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
872
+ )
873
+ use_cache = False
874
+
875
+ use_legacy_cache = False
876
+ if use_cache and not isinstance(past_key_values, Cache) and not self.training:
877
+ use_legacy_cache = True
878
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
879
+ logger.warning_once(
880
+ "We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. "
881
+ "Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)"
882
+ )
883
+
884
+ if inputs_embeds is None:
885
+ inputs_embeds = self.embed_tokens(input_ids)
886
+
887
+ if cache_position is None:
888
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
889
+ cache_position = torch.arange(
890
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
891
+ )
892
+ if position_ids is None:
893
+ position_ids = cache_position.unsqueeze(0)
894
+
895
+ causal_mask = self._update_causal_mask(
896
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
897
+ )
898
+
899
+ hidden_states = inputs_embeds
900
+
901
+ # decoder layers
902
+ all_hidden_states = () if output_hidden_states else None
903
+ all_self_attns = () if output_attentions else None
904
+ next_decoder_cache = None
905
+
906
+ for decoder_layer in self.layers:
907
+ if output_hidden_states:
908
+ all_hidden_states += (hidden_states,)
909
+
910
+ if self.gradient_checkpointing and self.training:
911
+ layer_outputs = self._gradient_checkpointing_func(
912
+ decoder_layer.__call__,
913
+ hidden_states,
914
+ causal_mask,
915
+ position_ids,
916
+ past_key_values,
917
+ output_attentions,
918
+ use_cache,
919
+ cache_position,
920
+ )
921
+ else:
922
+ layer_outputs = decoder_layer(
923
+ hidden_states,
924
+ attention_mask=causal_mask,
925
+ position_ids=position_ids,
926
+ past_key_value=past_key_values,
927
+ output_attentions=output_attentions,
928
+ use_cache=use_cache,
929
+ cache_position=cache_position,
930
+ )
931
+
932
+ hidden_states = layer_outputs[0]
933
+
934
+ if use_cache:
935
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
936
+
937
+ if output_attentions:
938
+ all_self_attns += (layer_outputs[1],)
939
+
940
+ hidden_states = self.norm(hidden_states)
941
+
942
+ # add hidden states from the last decoder layer
943
+ if output_hidden_states:
944
+ all_hidden_states += (hidden_states,)
945
+
946
+ next_cache = None
947
+ if use_cache:
948
+ next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
949
+
950
+ if not return_dict:
951
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
952
+ return BaseModelOutputWithPast(
953
+ last_hidden_state=hidden_states,
954
+ past_key_values=next_cache,
955
+ hidden_states=all_hidden_states,
956
+ attentions=all_self_attns,
957
+ )
958
+
959
+ # Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask
960
+ def _update_causal_mask(
961
+ self,
962
+ attention_mask: torch.Tensor,
963
+ input_tensor: torch.Tensor,
964
+ cache_position: torch.Tensor,
965
+ past_key_values: Cache,
966
+ output_attentions: bool,
967
+ ):
968
+ # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static
969
+ # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes.
970
+ # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using
971
+ # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114
972
+
973
+ if self.config._attn_implementation == "flash_attention_2":
974
+ if attention_mask is not None and 0.0 in attention_mask:
975
+ return attention_mask
976
+ return None
977
+
978
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
979
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
980
+ # to infer the attention mask.
981
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
982
+ using_static_cache = False#isinstance(past_key_values, StaticCache)
983
+
984
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
985
+ if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
986
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
987
+ attention_mask,
988
+ inputs_embeds=input_tensor,
989
+ past_key_values_length=past_seen_tokens,
990
+ is_training=self.training,
991
+ ):
992
+ return None
993
+
994
+ dtype, device = input_tensor.dtype, input_tensor.device
995
+ min_dtype = torch.finfo(dtype).min
996
+ sequence_length = input_tensor.shape[1]
997
+ if using_static_cache:
998
+ target_length = past_key_values.get_max_length()
999
+ else:
1000
+ target_length = (
1001
+ attention_mask.shape[-1]
1002
+ if isinstance(attention_mask, torch.Tensor)
1003
+ else past_seen_tokens + sequence_length + 1
1004
+ )
1005
+
1006
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
1007
+ causal_mask = _prepare_4d_causal_attention_mask_with_cache_position(
1008
+ attention_mask,
1009
+ sequence_length=sequence_length,
1010
+ target_length=target_length,
1011
+ dtype=dtype,
1012
+ device=device,
1013
+ min_dtype=min_dtype,
1014
+ cache_position=cache_position,
1015
+ batch_size=input_tensor.shape[0],
1016
+ )
1017
+
1018
+ if (
1019
+ self.config._attn_implementation == "sdpa"
1020
+ and attention_mask is not None
1021
+ and attention_mask.device.type == "cuda"
1022
+ and not output_attentions
1023
+ ):
1024
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
1025
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
1026
+ # Details: https://github.com/pytorch/pytorch/issues/110213
1027
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
1028
+
1029
+ return causal_mask
1030
+
1031
+
1032
+ class Qwen2ForCausalLM(Qwen2PreTrainedModel):
1033
+ _tied_weights_keys = ["lm_head.weight"]
1034
+
1035
+ def __init__(self, config):
1036
+ super().__init__(config)
1037
+ self.model = Qwen2Model(config)
1038
+ self.vocab_size = config.vocab_size
1039
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1040
+
1041
+ # Initialize weights and apply final processing
1042
+ self.post_init()
1043
+
1044
+ def get_input_embeddings(self):
1045
+ return self.model.embed_tokens
1046
+
1047
+ def set_input_embeddings(self, value):
1048
+ self.model.embed_tokens = value
1049
+
1050
+ def get_output_embeddings(self):
1051
+ return self.lm_head
1052
+
1053
+ def set_output_embeddings(self, new_embeddings):
1054
+ self.lm_head = new_embeddings
1055
+
1056
+ def set_decoder(self, decoder):
1057
+ self.model = decoder
1058
+
1059
+ def get_decoder(self):
1060
+ return self.model
1061
+
1062
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
1063
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1064
+ def forward(
1065
+ self,
1066
+ input_ids: torch.LongTensor = None,
1067
+ attention_mask: Optional[torch.Tensor] = None,
1068
+ position_ids: Optional[torch.LongTensor] = None,
1069
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1070
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1071
+ labels: Optional[torch.LongTensor] = None,
1072
+ use_cache: Optional[bool] = None,
1073
+ output_attentions: Optional[bool] = None,
1074
+ output_hidden_states: Optional[bool] = None,
1075
+ return_dict: Optional[bool] = None,
1076
+ cache_position: Optional[torch.LongTensor] = None,
1077
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1078
+ r"""
1079
+ Args:
1080
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1081
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1082
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1083
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1084
+
1085
+ Returns:
1086
+
1087
+ Example:
1088
+
1089
+ ```python
1090
+ >>> from transformers import AutoTokenizer, Qwen2ForCausalLM
1091
+
1092
+ >>> model = Qwen2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
1093
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
1094
+
1095
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1096
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1097
+
1098
+ >>> # Generate
1099
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1100
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1101
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1102
+ ```"""
1103
+
1104
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1105
+ output_hidden_states = (
1106
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1107
+ )
1108
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1109
+
1110
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1111
+ outputs = self.model(
1112
+ input_ids=input_ids,
1113
+ attention_mask=attention_mask,
1114
+ position_ids=position_ids,
1115
+ past_key_values=past_key_values,
1116
+ inputs_embeds=inputs_embeds,
1117
+ use_cache=use_cache,
1118
+ output_attentions=output_attentions,
1119
+ output_hidden_states=output_hidden_states,
1120
+ return_dict=return_dict,
1121
+ cache_position=cache_position,
1122
+ )
1123
+
1124
+ hidden_states = outputs[0]
1125
+ logits = self.lm_head(hidden_states)
1126
+ logits = logits.float()
1127
+
1128
+ loss = None
1129
+ if labels is not None:
1130
+ # Shift so that tokens < n predict n
1131
+ shift_logits = logits[..., :-1, :].contiguous()
1132
+ shift_labels = labels[..., 1:].contiguous()
1133
+ # Flatten the tokens
1134
+ loss_fct = CrossEntropyLoss()
1135
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1136
+ shift_labels = shift_labels.view(-1)
1137
+ # Enable model parallelism
1138
+ shift_labels = shift_labels.to(shift_logits.device)
1139
+ loss = loss_fct(shift_logits, shift_labels)
1140
+
1141
+ if not return_dict:
1142
+ output = (logits,) + outputs[1:]
1143
+ return (loss,) + output if loss is not None else output
1144
+
1145
+ return CausalLMOutputWithPast(
1146
+ loss=loss,
1147
+ logits=logits,
1148
+ past_key_values=outputs.past_key_values,
1149
+ hidden_states=outputs.hidden_states,
1150
+ attentions=outputs.attentions,
1151
+ )
1152
+
1153
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.prepare_inputs_for_generation
1154
+ def prepare_inputs_for_generation(
1155
+ self,
1156
+ input_ids,
1157
+ past_key_values=None,
1158
+ attention_mask=None,
1159
+ inputs_embeds=None,
1160
+ cache_position=None,
1161
+ position_ids=None,
1162
+ use_cache=True,
1163
+ **kwargs,
1164
+ ):
1165
+ # If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens
1166
+ # Exception 1: when passing input_embeds, input_ids may be missing entries
1167
+ # Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here
1168
+ if past_key_values is not None:
1169
+ if inputs_embeds is not None: # Exception 1
1170
+ input_ids = input_ids[:, -cache_position.shape[0] :]
1171
+ elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2)
1172
+ input_ids = input_ids[:, cache_position]
1173
+
1174
+ if attention_mask is not None and position_ids is None:
1175
+ # create position_ids on the fly for batch generation
1176
+ position_ids = attention_mask.long().cumsum(-1) - 1
1177
+ position_ids.masked_fill_(attention_mask == 0, 1)
1178
+ if past_key_values:
1179
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1180
+
1181
+ # This `clone` call is needed to avoid recapturing cuda graphs with `torch.compile`'s `mode="reduce-overhead`, as otherwise the input `position_ids` would have various stride during the decoding. Here, simply using `.contiguous()` is not sufficient as in the batch size = 1 case, `position_ids` is already contiguous but with varying stride which retriggers a capture.
1182
+ position_ids = position_ids.clone(memory_format=torch.contiguous_format)
1183
+
1184
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1185
+ if inputs_embeds is not None and cache_position[0] == 0:
1186
+ model_inputs = {"inputs_embeds": inputs_embeds}
1187
+ else:
1188
+ model_inputs = {"input_ids": input_ids}
1189
+
1190
+ if False and isinstance(past_key_values, StaticCache) and attention_mask.ndim == 2:
1191
+ if inputs_embeds is not None:
1192
+ batch_size, sequence_length = inputs_embeds.shape
1193
+ device = inputs_embeds.device
1194
+ else:
1195
+ batch_size, sequence_length = input_ids.shape
1196
+ device = input_ids.device
1197
+
1198
+ dtype = self.lm_head.weight.dtype
1199
+ min_dtype = torch.finfo(dtype).min
1200
+
1201
+ attention_mask = _prepare_4d_causal_attention_mask_with_cache_position(
1202
+ attention_mask,
1203
+ sequence_length=sequence_length,
1204
+ target_length=past_key_values.get_max_length(),
1205
+ dtype=dtype,
1206
+ device=device,
1207
+ min_dtype=min_dtype,
1208
+ cache_position=cache_position,
1209
+ batch_size=batch_size,
1210
+ )
1211
+
1212
+ model_inputs.update(
1213
+ {
1214
+ "position_ids": position_ids,
1215
+ "cache_position": cache_position,
1216
+ "past_key_values": past_key_values,
1217
+ "use_cache": use_cache,
1218
+ "attention_mask": attention_mask,
1219
+ }
1220
+ )
1221
+ return model_inputs
1222
+
1223
+
1224
+ @add_start_docstrings(
1225
+ """
1226
+ The Qwen2 Model transformer with a sequence classification head on top (linear layer).
1227
+
1228
+ [`Qwen2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1229
+ (e.g. GPT-2) do.
1230
+
1231
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1232
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1233
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1234
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1235
+ each row of the batch).
1236
+ """,
1237
+ QWEN2_START_DOCSTRING,
1238
+ )
1239
+ class Qwen2ForSequenceClassification(Qwen2PreTrainedModel):
1240
+ def __init__(self, config):
1241
+ super().__init__(config)
1242
+ self.num_labels = config.num_labels
1243
+ self.model = Qwen2Model(config)
1244
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1245
+
1246
+ # Initialize weights and apply final processing
1247
+ self.post_init()
1248
+
1249
+ def get_input_embeddings(self):
1250
+ return self.model.embed_tokens
1251
+
1252
+ def set_input_embeddings(self, value):
1253
+ self.model.embed_tokens = value
1254
+
1255
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
1256
+ def forward(
1257
+ self,
1258
+ input_ids: torch.LongTensor = None,
1259
+ attention_mask: Optional[torch.Tensor] = None,
1260
+ position_ids: Optional[torch.LongTensor] = None,
1261
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1262
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1263
+ labels: Optional[torch.LongTensor] = None,
1264
+ use_cache: Optional[bool] = None,
1265
+ output_attentions: Optional[bool] = None,
1266
+ output_hidden_states: Optional[bool] = None,
1267
+ return_dict: Optional[bool] = None,
1268
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1269
+ r"""
1270
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1271
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1272
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1273
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1274
+ """
1275
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1276
+
1277
+ transformer_outputs = self.model(
1278
+ input_ids,
1279
+ attention_mask=attention_mask,
1280
+ position_ids=position_ids,
1281
+ past_key_values=past_key_values,
1282
+ inputs_embeds=inputs_embeds,
1283
+ use_cache=use_cache,
1284
+ output_attentions=output_attentions,
1285
+ output_hidden_states=output_hidden_states,
1286
+ return_dict=return_dict,
1287
+ )
1288
+ hidden_states = transformer_outputs[0]
1289
+ logits = self.score(hidden_states)
1290
+
1291
+ if input_ids is not None:
1292
+ batch_size = input_ids.shape[0]
1293
+ else:
1294
+ batch_size = inputs_embeds.shape[0]
1295
+
1296
+ if self.config.pad_token_id is None and batch_size != 1:
1297
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1298
+ if self.config.pad_token_id is None:
1299
+ sequence_lengths = -1
1300
+ else:
1301
+ if input_ids is not None:
1302
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1303
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1304
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1305
+ sequence_lengths = sequence_lengths.to(logits.device)
1306
+ else:
1307
+ sequence_lengths = -1
1308
+
1309
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1310
+
1311
+ loss = None
1312
+ if labels is not None:
1313
+ labels = labels.to(logits.device)
1314
+ if self.config.problem_type is None:
1315
+ if self.num_labels == 1:
1316
+ self.config.problem_type = "regression"
1317
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1318
+ self.config.problem_type = "single_label_classification"
1319
+ else:
1320
+ self.config.problem_type = "multi_label_classification"
1321
+
1322
+ if self.config.problem_type == "regression":
1323
+ loss_fct = MSELoss()
1324
+ if self.num_labels == 1:
1325
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1326
+ else:
1327
+ loss = loss_fct(pooled_logits, labels)
1328
+ elif self.config.problem_type == "single_label_classification":
1329
+ loss_fct = CrossEntropyLoss()
1330
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1331
+ elif self.config.problem_type == "multi_label_classification":
1332
+ loss_fct = BCEWithLogitsLoss()
1333
+ loss = loss_fct(pooled_logits, labels)
1334
+ if not return_dict:
1335
+ output = (pooled_logits,) + transformer_outputs[1:]
1336
+ return ((loss,) + output) if loss is not None else output
1337
+
1338
+ return SequenceClassifierOutputWithPast(
1339
+ loss=loss,
1340
+ logits=pooled_logits,
1341
+ past_key_values=transformer_outputs.past_key_values,
1342
+ hidden_states=transformer_outputs.hidden_states,
1343
+ attentions=transformer_outputs.attentions,
1344
+ )
1345
+
1346
+
1347
+ @add_start_docstrings(
1348
+ """
1349
+ The Qwen2 Model transformer with a token classification head on top (a linear layer on top of the hidden-states
1350
+ output) e.g. for Named-Entity-Recognition (NER) tasks.
1351
+ """,
1352
+ QWEN2_START_DOCSTRING,
1353
+ )
1354
+ # Copied from transformers.models.llama.modeling_llama.LlamaForTokenClassification with Llama->Qwen2, LLAMA->QWEN2
1355
+ class Qwen2ForTokenClassification(Qwen2PreTrainedModel):
1356
+ def __init__(self, config):
1357
+ super().__init__(config)
1358
+ self.num_labels = config.num_labels
1359
+ self.model = Qwen2Model(config)
1360
+ if getattr(config, "classifier_dropout", None) is not None:
1361
+ classifier_dropout = config.classifier_dropout
1362
+ elif getattr(config, "hidden_dropout", None) is not None:
1363
+ classifier_dropout = config.hidden_dropout
1364
+ else:
1365
+ classifier_dropout = 0.1
1366
+ self.dropout = nn.Dropout(classifier_dropout)
1367
+ self.score = nn.Linear(config.hidden_size, config.num_labels)
1368
+
1369
+ # Initialize weights and apply final processing
1370
+ self.post_init()
1371
+
1372
+ def get_input_embeddings(self):
1373
+ return self.model.embed_tokens
1374
+
1375
+ def set_input_embeddings(self, value):
1376
+ self.model.embed_tokens = value
1377
+
1378
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
1379
+ def forward(
1380
+ self,
1381
+ input_ids: Optional[torch.LongTensor] = None,
1382
+ attention_mask: Optional[torch.Tensor] = None,
1383
+ position_ids: Optional[torch.LongTensor] = None,
1384
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1385
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1386
+ labels: Optional[torch.LongTensor] = None,
1387
+ use_cache: Optional[bool] = None,
1388
+ output_attentions: Optional[bool] = None,
1389
+ output_hidden_states: Optional[bool] = None,
1390
+ return_dict: Optional[bool] = None,
1391
+ ) -> Union[Tuple, TokenClassifierOutput]:
1392
+ r"""
1393
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1394
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1395
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1396
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1397
+ """
1398
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1399
+
1400
+ outputs = self.model(
1401
+ input_ids,
1402
+ attention_mask=attention_mask,
1403
+ position_ids=position_ids,
1404
+ past_key_values=past_key_values,
1405
+ inputs_embeds=inputs_embeds,
1406
+ use_cache=use_cache,
1407
+ output_attentions=output_attentions,
1408
+ output_hidden_states=output_hidden_states,
1409
+ return_dict=return_dict,
1410
+ )
1411
+ sequence_output = outputs[0]
1412
+ sequence_output = self.dropout(sequence_output)
1413
+ logits = self.score(sequence_output)
1414
+
1415
+ loss = None
1416
+ if labels is not None:
1417
+ loss_fct = CrossEntropyLoss()
1418
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1419
+
1420
+ if not return_dict:
1421
+ output = (logits,) + outputs[2:]
1422
+ return ((loss,) + output) if loss is not None else output
1423
+
1424
+ return TokenClassifierOutput(
1425
+ loss=loss,
1426
+ logits=logits,
1427
+ hidden_states=outputs.hidden_states,
1428
+ attentions=outputs.attentions,
1429
+ )
1430
+
1431
+
1432
+ @add_start_docstrings(
1433
+ """
1434
+ The Qwen2 Model transformer with a sequence classification head on top (linear layer).
1435
+
1436
+ [`Qwen2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1437
+ (e.g. GPT-2) do.
1438
+
1439
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1440
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1441
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1442
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1443
+ each row of the batch).
1444
+ """,
1445
+ QWEN2_START_DOCSTRING,
1446
+ )
1447
+ class Qwen2ForRewardModel(Qwen2PreTrainedModel):
1448
+ def __init__(self, config):
1449
+ super().__init__(config)
1450
+ self.num_labels = 1#config.num_labels
1451
+ self.model = Qwen2Model(config)
1452
+ self.score = nn.Sequential(
1453
+ nn.Linear(config.hidden_size, config.hidden_size),
1454
+ nn.ReLU(),
1455
+ nn.Linear(config.hidden_size, self.num_labels)
1456
+ )
1457
+
1458
+ # Initialize weights and apply final processing
1459
+ self.post_init()
1460
+
1461
+ def get_input_embeddings(self):
1462
+ return self.model.embed_tokens
1463
+
1464
+ def set_input_embeddings(self, value):
1465
+ self.model.embed_tokens = value
1466
+
1467
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
1468
+ def forward(
1469
+ self,
1470
+ input_ids: torch.LongTensor = None,
1471
+ attention_mask: Optional[torch.Tensor] = None,
1472
+ position_ids: Optional[torch.LongTensor] = None,
1473
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1474
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1475
+ labels: Optional[torch.LongTensor] = None,
1476
+ use_cache: Optional[bool] = None,
1477
+ output_attentions: Optional[bool] = None,
1478
+ output_hidden_states: Optional[bool] = None,
1479
+ return_dict: Optional[bool] = None,
1480
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1481
+ r"""
1482
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1483
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1484
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1485
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1486
+ """
1487
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1488
+
1489
+ transformer_outputs = self.model(
1490
+ input_ids,
1491
+ attention_mask=attention_mask,
1492
+ position_ids=position_ids,
1493
+ past_key_values=past_key_values,
1494
+ inputs_embeds=inputs_embeds,
1495
+ use_cache=use_cache,
1496
+ output_attentions=output_attentions,
1497
+ output_hidden_states=output_hidden_states,
1498
+ return_dict=return_dict,
1499
+ )
1500
+ hidden_states = transformer_outputs[0]
1501
+ logits = self.score(hidden_states)
1502
+
1503
+ if input_ids is not None:
1504
+ batch_size = input_ids.shape[0]
1505
+ else:
1506
+ batch_size = inputs_embeds.shape[0]
1507
+
1508
+ if self.config.pad_token_id is None and batch_size != 1:
1509
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1510
+ if self.config.pad_token_id is None:
1511
+ sequence_lengths = -1
1512
+ else:
1513
+ if input_ids is not None:
1514
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1515
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1516
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1517
+ sequence_lengths = sequence_lengths.to(logits.device)
1518
+ else:
1519
+ sequence_lengths = -1
1520
+
1521
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1522
+
1523
+ loss = None
1524
+ if labels is not None:
1525
+ labels = labels.to(logits.device)
1526
+ if self.config.problem_type is None:
1527
+ if self.num_labels == 1:
1528
+ self.config.problem_type = "regression"
1529
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1530
+ self.config.problem_type = "single_label_classification"
1531
+ else:
1532
+ self.config.problem_type = "multi_label_classification"
1533
+
1534
+ if self.config.problem_type == "regression":
1535
+ loss_fct = MSELoss()
1536
+ if self.num_labels == 1:
1537
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1538
+ else:
1539
+ loss = loss_fct(pooled_logits, labels)
1540
+ elif self.config.problem_type == "single_label_classification":
1541
+ loss_fct = CrossEntropyLoss()
1542
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1543
+ elif self.config.problem_type == "multi_label_classification":
1544
+ loss_fct = BCEWithLogitsLoss()
1545
+ loss = loss_fct(pooled_logits, labels)
1546
+ if not return_dict:
1547
+ output = (pooled_logits,) + transformer_outputs[1:]
1548
+ return ((loss,) + output) if loss is not None else output
1549
+
1550
+ return SequenceClassifierOutputWithPast(
1551
+ loss=loss,
1552
+ logits=pooled_logits,
1553
+ past_key_values=transformer_outputs.past_key_values,
1554
+ hidden_states=transformer_outputs.hidden_states,
1555
+ attentions=transformer_outputs.attentions,
1556
+ )
1557
+
1558
+
1559
+ @add_start_docstrings(
1560
+ """
1561
+ The Qwen2 Model transformer with a token classification head on top (a linear layer on top of the hidden-states
1562
+ output) e.g. for Named-Entity-Recognition (NER) tasks.
1563
+ """,
1564
+ QWEN2_START_DOCSTRING,
1565
+ )
1566
+ # Copied from transformers.models.llama.modeling_llama.LlamaForTokenClassification with Llama->Qwen2, LLAMA->QWEN2
1567
+ class Qwen2ForProcessRewardModel(Qwen2PreTrainedModel):
1568
+ def __init__(self, config):
1569
+ super().__init__(config)
1570
+ self.num_labels = 2
1571
+ self.model = Qwen2Model(config)
1572
+ self.score = nn.Sequential(
1573
+ nn.Linear(config.hidden_size, config.hidden_size),
1574
+ nn.ReLU(),
1575
+ nn.Linear(config.hidden_size, self.num_labels)
1576
+ )
1577
+
1578
+ # Initialize weights and apply final processing
1579
+ self.post_init()
1580
+
1581
+ def get_input_embeddings(self):
1582
+ return self.model.embed_tokens
1583
+
1584
+ def set_input_embeddings(self, value):
1585
+ self.model.embed_tokens = value
1586
+
1587
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
1588
+ def forward(
1589
+ self,
1590
+ input_ids: Optional[torch.LongTensor] = None,
1591
+ attention_mask: Optional[torch.Tensor] = None,
1592
+ position_ids: Optional[torch.LongTensor] = None,
1593
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1594
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1595
+ labels: Optional[torch.LongTensor] = None,
1596
+ use_cache: Optional[bool] = None,
1597
+ output_attentions: Optional[bool] = None,
1598
+ output_hidden_states: Optional[bool] = None,
1599
+ return_dict: Optional[bool] = None,
1600
+ ) -> Union[Tuple, TokenClassifierOutput]:
1601
+ r"""
1602
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1603
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1604
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1605
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1606
+ """
1607
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1608
+
1609
+ outputs = self.model(
1610
+ input_ids,
1611
+ attention_mask=attention_mask,
1612
+ position_ids=position_ids,
1613
+ past_key_values=past_key_values,
1614
+ inputs_embeds=inputs_embeds,
1615
+ use_cache=use_cache,
1616
+ output_attentions=output_attentions,
1617
+ output_hidden_states=output_hidden_states,
1618
+ return_dict=return_dict,
1619
+ )
1620
+ hidden_states = outputs[0]
1621
+ logits = self.score(hidden_states)
1622
+
1623
+ loss = None
1624
+ if labels is not None:
1625
+ loss_fct = CrossEntropyLoss()
1626
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1627
+
1628
+ if not return_dict:
1629
+ output = (logits,) + outputs[2:]
1630
+ return ((loss,) + output) if loss is not None else output
1631
+
1632
+ return TokenClassifierOutput(
1633
+ loss=loss,
1634
+ logits=logits,
1635
+ hidden_states=outputs.hidden_states,
1636
+ attentions=outputs.attentions,
1637
+ )
special_tokens_map.json ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<|im_start|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ {
11
+ "content": "<|im_end|>",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ },
17
+ {
18
+ "content": "<|object_ref_start|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ {
25
+ "content": "<|object_ref_end|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ },
31
+ {
32
+ "content": "<|box_start|>",
33
+ "lstrip": false,
34
+ "normalized": false,
35
+ "rstrip": false,
36
+ "single_word": false
37
+ },
38
+ {
39
+ "content": "<|box_end|>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false
44
+ },
45
+ {
46
+ "content": "<|quad_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false
51
+ },
52
+ {
53
+ "content": "<|quad_end|>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false
58
+ },
59
+ {
60
+ "content": "<|vision_start|>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false
65
+ },
66
+ {
67
+ "content": "<|vision_end|>",
68
+ "lstrip": false,
69
+ "normalized": false,
70
+ "rstrip": false,
71
+ "single_word": false
72
+ },
73
+ {
74
+ "content": "<|vision_pad|>",
75
+ "lstrip": false,
76
+ "normalized": false,
77
+ "rstrip": false,
78
+ "single_word": false
79
+ },
80
+ {
81
+ "content": "<|image_pad|>",
82
+ "lstrip": false,
83
+ "normalized": false,
84
+ "rstrip": false,
85
+ "single_word": false
86
+ },
87
+ {
88
+ "content": "<|video_pad|>",
89
+ "lstrip": false,
90
+ "normalized": false,
91
+ "rstrip": false,
92
+ "single_word": false
93
+ },
94
+ {
95
+ "content": "<extra_0>",
96
+ "lstrip": false,
97
+ "normalized": false,
98
+ "rstrip": false,
99
+ "single_word": false
100
+ }
101
+ ],
102
+ "eos_token": {
103
+ "content": "<|im_end|>",
104
+ "lstrip": false,
105
+ "normalized": false,
106
+ "rstrip": false,
107
+ "single_word": false
108
+ },
109
+ "pad_token": {
110
+ "content": "<|endoftext|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false
115
+ }
116
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95c6876df803c949f96a170e0750b4d26e8dcbaf7849fc40a9079617fa8ccadb
3
+ size 11422349
tokenizer_config.json ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<extra_0>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": true
188
+ }
189
+ },
190
+ "additional_special_tokens": [
191
+ "<|im_start|>",
192
+ "<|im_end|>",
193
+ "<|object_ref_start|>",
194
+ "<|object_ref_end|>",
195
+ "<|box_start|>",
196
+ "<|box_end|>",
197
+ "<|quad_start|>",
198
+ "<|quad_end|>",
199
+ "<|vision_start|>",
200
+ "<|vision_end|>",
201
+ "<|vision_pad|>",
202
+ "<|image_pad|>",
203
+ "<|video_pad|>",
204
+ "<extra_0>"
205
+ ],
206
+ "bos_token": null,
207
+ "clean_up_tokenization_spaces": false,
208
+ "eos_token": "<|im_end|>",
209
+ "errors": "replace",
210
+ "extra_special_tokens": {},
211
+ "model_max_length": 131072,
212
+ "pad_token": "<|endoftext|>",
213
+ "split_special_tokens": false,
214
+ "tokenizer_class": "Qwen2Tokenizer",
215
+ "unk_token": null
216
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:faaaf92bedfaa37c75b8aaa9bc69ede68dd3d1627ba3ff6c7bcc16228899a5fe
3
+ size 5841
vocab.json ADDED
The diff for this file is too large to render. See raw diff