atrost commited on
Commit
c6a079c
·
verified ·
1 Parent(s): 7121266

Add steerable Qwen2 (post-block adapters) with auto_map + code

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
chat_template.jinja ADDED
@@ -0,0 +1 @@
 
 
1
+ {% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|><think>\n'}}{% endif %}
config.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2ForCausalLMPostBlockSteeringFixed"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 151643,
7
+ "dtype": "bfloat16",
8
+ "eos_token_id": 151643,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 1536,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 8960,
13
+ "layer_types": [
14
+ "full_attention",
15
+ "full_attention",
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention"
42
+ ],
43
+ "max_position_embeddings": 131072,
44
+ "max_window_layers": 21,
45
+ "model_type": "qwen2",
46
+ "num_attention_heads": 12,
47
+ "num_hidden_layers": 28,
48
+ "num_key_value_heads": 2,
49
+ "rms_norm_eps": 1e-06,
50
+ "rope_scaling": null,
51
+ "rope_theta": 10000,
52
+ "sliding_window": null,
53
+ "tie_word_embeddings": false,
54
+ "transformers_version": "4.57.3",
55
+ "use_cache": true,
56
+ "use_mrope": false,
57
+ "use_sliding_window": false,
58
+ "vocab_size": 151936,
59
+ "auto_map": {
60
+ "AutoModel": "qwen2_postblock_steering_fixed.Qwen2ModelPostBlockSteering",
61
+ "AutoModelForCausalLM": "qwen2_postblock_steering_fixed.Qwen2ForCausalLMPostBlockSteeringFixed"
62
+ }
63
+ }
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 151646,
4
+ "do_sample": true,
5
+ "eos_token_id": 151643,
6
+ "temperature": 0.6,
7
+ "top_p": 0.95,
8
+ "transformers_version": "4.57.3"
9
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af9185649e044f52b38c5986479f764e8adda71b5c65a6b6434b23b6eb214a94
3
+ size 3555597480
qwen2_postblock_steering_fixed.py ADDED
@@ -0,0 +1,328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import os
3
+ import torch.nn as nn
4
+ from typing import Optional, Tuple, Iterable, Union
5
+
6
+ from transformers.models.qwen2.modeling_qwen2 import (
7
+ Qwen2ForCausalLM,
8
+ Qwen2Model,
9
+ Qwen2DecoderLayer,
10
+ )
11
+
12
+ # -------------------------
13
+ # Low-rank adapter
14
+ # -------------------------
15
+
16
+ def _get_activation(name: str):
17
+ name = name.lower()
18
+ if name in ("silu", "swish"):
19
+ return nn.SiLU()
20
+ if name == "relu":
21
+ return nn.ReLU()
22
+ if name == "gelu":
23
+ return nn.GELU()
24
+ if name == "tanh":
25
+ return nn.Tanh()
26
+ raise ValueError(f"Unknown activation: {name}")
27
+
28
+ class LowRankAdapter(nn.Module):
29
+ """
30
+ Δh = α * W_up( act(W_down(h)) )
31
+ """
32
+ def __init__(self, hidden_size: int, rank: int, alpha: float, activation: str):
33
+ super().__init__()
34
+ self.alpha = float(alpha)
35
+ self.act = _get_activation(activation)
36
+ self.down = nn.Linear(hidden_size, rank, bias=False)
37
+ self.up = nn.Linear(rank, hidden_size, bias=False)
38
+
39
+ # start as no-op => preserves pretrained behavior at init
40
+ nn.init.zeros_(self.up.weight)
41
+
42
+ def forward(self, h: torch.Tensor) -> torch.Tensor:
43
+ return self.alpha * self.up(self.act(self.down(h)))
44
+
45
+
46
+ # -------------------------
47
+ # Steered Decoder Layer (post-block only)
48
+ # -------------------------
49
+
50
+ class Qwen2DecoderLayerPostBlockSteering(Qwen2DecoderLayer):
51
+ """
52
+ Drop-in Qwen2DecoderLayer that adds an adapter AFTER the block output.
53
+
54
+ apply_to:
55
+ - "last": apply only to last token (B,S,H) -> only position -1
56
+ - "all": apply to all tokens
57
+ """
58
+ def __init__(
59
+ self,
60
+ config,
61
+ layer_idx: int,
62
+ enable: bool = True,
63
+ rank: int = 8,
64
+ alpha: float = 1.0,
65
+ activation: str = "silu",
66
+ apply_to: str = "all",
67
+ ):
68
+ super().__init__(config, layer_idx)
69
+ assert apply_to in ("last", "all")
70
+ self.apply_to = apply_to
71
+ self._adapter_enabled = True
72
+
73
+ self.adapter_block = (
74
+ LowRankAdapter(
75
+ hidden_size=config.hidden_size,
76
+ rank=rank,
77
+ alpha=alpha,
78
+ activation=activation,
79
+ )
80
+ if enable
81
+ else None
82
+ )
83
+
84
+ def set_adapter_enabled(self, enabled: bool):
85
+ self._adapter_enabled = bool(enabled)
86
+
87
+ def _apply_last(self, x: torch.Tensor, adapter: nn.Module) -> torch.Tensor:
88
+ if x.ndim != 3:
89
+ return x
90
+ last = x[:, -1, :] # (B,H)
91
+ new_last = (last + adapter(last)).unsqueeze(1) # (B,1,H)
92
+ return torch.cat([x[:, :-1, :], new_last], dim=1)
93
+
94
+ def _apply_all(self, x: torch.Tensor, adapter: nn.Module) -> torch.Tensor:
95
+ if x.ndim != 3:
96
+ return x
97
+ b, s, h = x.shape
98
+ flat = x.reshape(b * s, h)
99
+ delta = adapter(flat).reshape(b, s, h)
100
+ return x + delta
101
+
102
+ def _apply_adapter(self, x: torch.Tensor) -> torch.Tensor:
103
+ if (self.adapter_block is None) or (not self._adapter_enabled):
104
+ return x
105
+ if self.apply_to == "last":
106
+ return self._apply_last(x, self.adapter_block)
107
+ return self._apply_all(x, self.adapter_block)
108
+
109
+ def forward(
110
+ self,
111
+ hidden_states: torch.Tensor,
112
+ attention_mask: Optional[torch.Tensor] = None,
113
+ position_ids: Optional[torch.LongTensor] = None,
114
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
115
+ output_attentions: Optional[bool] = False,
116
+ use_cache: Optional[bool] = False,
117
+ cache_position: Optional[torch.LongTensor] = None,
118
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.FloatTensor]] = None,
119
+ **kwargs,
120
+ ):
121
+ # Standard Qwen2 layer, inject adapter at the very end (post-block).
122
+ residual = hidden_states
123
+ hidden_states = self.input_layernorm(hidden_states)
124
+
125
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
126
+ hidden_states=hidden_states,
127
+ attention_mask=attention_mask,
128
+ position_ids=position_ids,
129
+ past_key_value=past_key_value,
130
+ output_attentions=output_attentions,
131
+ use_cache=use_cache,
132
+ cache_position=cache_position,
133
+ position_embeddings=position_embeddings,
134
+ )
135
+ hidden_states = residual + hidden_states
136
+
137
+ residual = hidden_states
138
+ hidden_states = self.post_attention_layernorm(hidden_states)
139
+ mlp_out = self.mlp(hidden_states)
140
+ hidden_states = residual + mlp_out
141
+
142
+ # ✅ post-block steering
143
+ hidden_states = self._apply_adapter(hidden_states)
144
+
145
+ outputs = (hidden_states,)
146
+ if output_attentions:
147
+ outputs += (self_attn_weights,)
148
+ if use_cache:
149
+ outputs += (present_key_value,)
150
+ return outputs
151
+
152
+
153
+ # -------------------------
154
+ # Qwen2Model + hardcoded steering config
155
+ # -------------------------
156
+
157
+ class Qwen2ModelPostBlockSteering(Qwen2Model):
158
+ def __init__(
159
+ self,
160
+ config,
161
+ layers_to_steer: Union[str, Iterable[int]],
162
+ rank: int,
163
+ apply_to: str,
164
+ alpha: float,
165
+ activation: str,
166
+ ):
167
+ super().__init__(config)
168
+
169
+ if layers_to_steer == "all":
170
+ layer_ids = set(range(config.num_hidden_layers))
171
+ else:
172
+ layer_ids = set(int(i) for i in layers_to_steer)
173
+
174
+ new_layers = nn.ModuleList()
175
+ for i in range(config.num_hidden_layers):
176
+ new_layers.append(
177
+ Qwen2DecoderLayerPostBlockSteering(
178
+ config=config,
179
+ layer_idx=i,
180
+ enable=(i in layer_ids),
181
+ rank=rank,
182
+ alpha=alpha,
183
+ activation=activation,
184
+ apply_to=apply_to,
185
+ )
186
+ )
187
+ self.layers = new_layers
188
+
189
+ def set_adapter_enabled(self, enabled: bool):
190
+ for layer in self.layers:
191
+ if hasattr(layer, "set_adapter_enabled"):
192
+ layer.set_adapter_enabled(enabled)
193
+
194
+
195
+ # -------------------------
196
+ # Qwen2ForCausalLM with hardcoded knobs + base frozen by default
197
+ # -------------------------
198
+
199
+ class Qwen2ForCausalLMPostBlockSteeringFixed(Qwen2ForCausalLM):
200
+ """
201
+ Hardcoded steering config + base frozen by default.
202
+
203
+ Change these class constants to match what you want globally.
204
+ """
205
+ STEER_RANK: int = 8
206
+ STEER_APPLY_TO: str = "last" # "last" or "all"
207
+ STEER_LAYERS: Union[str, Iterable[int]] = "all" # or e.g. [0, 5, 10]
208
+ STEER_ALPHA: float = 1.0
209
+ STEER_ACTIVATION: str = "silu"
210
+
211
+ def __init__(self, config):
212
+ super().__init__(config)
213
+
214
+ # Replace base transformer with steered one using hardcoded config
215
+ self.model = Qwen2ModelPostBlockSteering(
216
+ config,
217
+ layers_to_steer=self.STEER_LAYERS,
218
+ rank=self.STEER_RANK,
219
+ apply_to=self.STEER_APPLY_TO,
220
+ alpha=self.STEER_ALPHA,
221
+ activation=self.STEER_ACTIVATION,
222
+ )
223
+
224
+ # Freeze base by default (only steering trainable)
225
+ self.freeze_base_keep_steering_trainable()
226
+
227
+ # ---- freezing / params ----
228
+
229
+ def freeze_base_keep_steering_trainable(self):
230
+ for n, p in self.named_parameters():
231
+ p.requires_grad = ("adapter_block" in n)
232
+
233
+ def steering_parameters(self):
234
+ for n, p in self.named_parameters():
235
+ if "adapter_block" in n:
236
+ yield p
237
+
238
+ # ---- dtype/device correctness for device_map="auto" ----
239
+
240
+ def cast_adapters_like_base(self):
241
+ """
242
+ If you load with torch_dtype="auto" and/or device_map="auto",
243
+ adapters are newly-created modules and need to match each layer’s dtype/device.
244
+ """
245
+ for layer in self.model.layers:
246
+ ref = layer.input_layernorm.weight
247
+ if getattr(layer, "adapter_block", None) is not None:
248
+ layer.adapter_block.to(device=ref.device, dtype=ref.dtype)
249
+
250
+ @classmethod
251
+ def from_pretrained(cls, *args, **kwargs):
252
+ model = super().from_pretrained(*args, **kwargs)
253
+ # Ensure adapters are on the right shards/dtype, then freeze base
254
+ if hasattr(model, "cast_adapters_like_base"):
255
+ model.cast_adapters_like_base()
256
+ if hasattr(model, "freeze_base_keep_steering_trainable"):
257
+ model.freeze_base_keep_steering_trainable()
258
+ return model
259
+
260
+ def _prepare_for_serialization(self):
261
+ """
262
+ If the model was loaded with device_map/offload, Accelerate attaches hooks that
263
+ can break save_pretrained for newly-added params (like adapter_block.*).
264
+ This removes those hooks and consolidates to CPU.
265
+ """
266
+ try:
267
+ from accelerate.hooks import remove_hook_from_module
268
+ remove_hook_from_module(self, recurse=True)
269
+ except Exception:
270
+ pass
271
+
272
+ # Clean up common accelerate attributes if present
273
+ for attr in ("hf_device_map", "_hf_hook"):
274
+ if hasattr(self, attr):
275
+ try:
276
+ delattr(self, attr)
277
+ except Exception:
278
+ pass
279
+
280
+ # Ensure all params are materialized on CPU for a normal state_dict save
281
+ self.to("cpu")
282
+
283
+ def _strip_accelerate_offload_hooks(self):
284
+ """
285
+ Remove Accelerate's device_map/offload hooks so saving doesn't go through
286
+ get_state_dict_from_offload (which doesn't know about new adapter params).
287
+ """
288
+ # Best-effort official removers
289
+ try:
290
+ from accelerate.hooks import remove_hook_from_module
291
+ remove_hook_from_module(self, recurse=True) # documented API :contentReference[oaicite:3]{index=3}
292
+ except Exception:
293
+ pass
294
+
295
+ # Hard removal: delete _hf_hook from every submodule if still present
296
+ for m in self.modules():
297
+ if hasattr(m, "_hf_hook"):
298
+ # try to detach cleanly if possible
299
+ try:
300
+ m._hf_hook.detach_hook(m)
301
+ except Exception:
302
+ pass
303
+ try:
304
+ delattr(m, "_hf_hook")
305
+ except Exception:
306
+ pass
307
+
308
+ # device_map bookkeeping (common on big-model inference)
309
+ if hasattr(self, "hf_device_map"):
310
+ try:
311
+ delattr(self, "hf_device_map")
312
+ except Exception:
313
+ pass
314
+
315
+ def save_pretrained(self, save_directory, **kwargs):
316
+ os.makedirs(save_directory, exist_ok=True)
317
+
318
+ # 1) remove accelerate offload hooks
319
+ self._strip_accelerate_offload_hooks()
320
+
321
+ # 2) consolidate to CPU (you cannot save sharded/offloaded weights “in place”)
322
+ self.to("cpu")
323
+
324
+ # 3) create a normal state_dict and pass it explicitly to bypass accelerate offload-saving
325
+ # (save_pretrained supports state_dict=...) :contentReference[oaicite:4]{index=4}
326
+ sd = {k: v.cpu() for k, v in self.state_dict().items()}
327
+
328
+ return super().save_pretrained(save_directory, state_dict=sd, **kwargs)
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|begin▁of▁sentence|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|end▁of▁sentence|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|end▁of▁sentence|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
3
+ size 11422778
tokenizer_config.json ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "151643": {
7
+ "content": "<|end▁of▁sentence|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "151644": {
15
+ "content": "<|User|>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": false
21
+ },
22
+ "151645": {
23
+ "content": "<|Assistant|>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": false
29
+ },
30
+ "151646": {
31
+ "content": "<|begin▁of▁sentence|>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ },
38
+ "151647": {
39
+ "content": "<|EOT|>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false,
44
+ "special": false
45
+ },
46
+ "151648": {
47
+ "content": "<think>",
48
+ "lstrip": false,
49
+ "normalized": false,
50
+ "rstrip": false,
51
+ "single_word": false,
52
+ "special": false
53
+ },
54
+ "151649": {
55
+ "content": "</think>",
56
+ "lstrip": false,
57
+ "normalized": false,
58
+ "rstrip": false,
59
+ "single_word": false,
60
+ "special": false
61
+ },
62
+ "151650": {
63
+ "content": "<|quad_start|>",
64
+ "lstrip": false,
65
+ "normalized": false,
66
+ "rstrip": false,
67
+ "single_word": false,
68
+ "special": true
69
+ },
70
+ "151651": {
71
+ "content": "<|quad_end|>",
72
+ "lstrip": false,
73
+ "normalized": false,
74
+ "rstrip": false,
75
+ "single_word": false,
76
+ "special": true
77
+ },
78
+ "151652": {
79
+ "content": "<|vision_start|>",
80
+ "lstrip": false,
81
+ "normalized": false,
82
+ "rstrip": false,
83
+ "single_word": false,
84
+ "special": true
85
+ },
86
+ "151653": {
87
+ "content": "<|vision_end|>",
88
+ "lstrip": false,
89
+ "normalized": false,
90
+ "rstrip": false,
91
+ "single_word": false,
92
+ "special": true
93
+ },
94
+ "151654": {
95
+ "content": "<|vision_pad|>",
96
+ "lstrip": false,
97
+ "normalized": false,
98
+ "rstrip": false,
99
+ "single_word": false,
100
+ "special": true
101
+ },
102
+ "151655": {
103
+ "content": "<|image_pad|>",
104
+ "lstrip": false,
105
+ "normalized": false,
106
+ "rstrip": false,
107
+ "single_word": false,
108
+ "special": true
109
+ },
110
+ "151656": {
111
+ "content": "<|video_pad|>",
112
+ "lstrip": false,
113
+ "normalized": false,
114
+ "rstrip": false,
115
+ "single_word": false,
116
+ "special": true
117
+ },
118
+ "151657": {
119
+ "content": "<tool_call>",
120
+ "lstrip": false,
121
+ "normalized": false,
122
+ "rstrip": false,
123
+ "single_word": false,
124
+ "special": false
125
+ },
126
+ "151658": {
127
+ "content": "</tool_call>",
128
+ "lstrip": false,
129
+ "normalized": false,
130
+ "rstrip": false,
131
+ "single_word": false,
132
+ "special": false
133
+ },
134
+ "151659": {
135
+ "content": "<|fim_prefix|>",
136
+ "lstrip": false,
137
+ "normalized": false,
138
+ "rstrip": false,
139
+ "single_word": false,
140
+ "special": false
141
+ },
142
+ "151660": {
143
+ "content": "<|fim_middle|>",
144
+ "lstrip": false,
145
+ "normalized": false,
146
+ "rstrip": false,
147
+ "single_word": false,
148
+ "special": false
149
+ },
150
+ "151661": {
151
+ "content": "<|fim_suffix|>",
152
+ "lstrip": false,
153
+ "normalized": false,
154
+ "rstrip": false,
155
+ "single_word": false,
156
+ "special": false
157
+ },
158
+ "151662": {
159
+ "content": "<|fim_pad|>",
160
+ "lstrip": false,
161
+ "normalized": false,
162
+ "rstrip": false,
163
+ "single_word": false,
164
+ "special": false
165
+ },
166
+ "151663": {
167
+ "content": "<|repo_name|>",
168
+ "lstrip": false,
169
+ "normalized": false,
170
+ "rstrip": false,
171
+ "single_word": false,
172
+ "special": false
173
+ },
174
+ "151664": {
175
+ "content": "<|file_sep|>",
176
+ "lstrip": false,
177
+ "normalized": false,
178
+ "rstrip": false,
179
+ "single_word": false,
180
+ "special": false
181
+ }
182
+ },
183
+ "bos_token": "<|begin▁of▁sentence|>",
184
+ "clean_up_tokenization_spaces": false,
185
+ "eos_token": "<|end▁of▁sentence|>",
186
+ "extra_special_tokens": {},
187
+ "legacy": true,
188
+ "model_max_length": 16384,
189
+ "pad_token": "<|end▁of▁sentence|>",
190
+ "sp_model_kwargs": {},
191
+ "tokenizer_class": "LlamaTokenizerFast",
192
+ "unk_token": null,
193
+ "use_default_system_prompt": false
194
+ }