MosRat commited on
Commit
322823c
·
verified ·
1 Parent(s): 82f590b

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Gext_Pt1-596M-F16.gguf filter=lfs diff=lfs merge=lfs -text
37
+ Gext_Pt1-596M-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
38
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
Gext_Pt1-596M-F16.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47fcfa69c3dc0481d80da53fb5644b12e7678e8bb8c2d49303f7222ecde61247
3
+ size 1198181952
Gext_Pt1-596M-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:458a00f8db4925da41b4dfe1d7157a568c54823b61a06e195b0f874cc7621765
3
+ size 396704320
added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endoftext|>": 151643,
11
+ "<|file_sep|>": 151664,
12
+ "<|fim_middle|>": 151660,
13
+ "<|fim_pad|>": 151662,
14
+ "<|fim_prefix|>": 151659,
15
+ "<|fim_suffix|>": 151661,
16
+ "<|im_end|>": 151645,
17
+ "<|im_start|>": 151644,
18
+ "<|image_pad|>": 151655,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "GexTQwenForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_gex.GexTConfig",
9
+ "AutoModel": "modeling_gex.GexTQwenForCausalLM",
10
+ "AutoModelForCausalLM": "modeling_gex.GexTQwenForCausalLM"
11
+ },
12
+ "bos_token_id": 151643,
13
+ "eos_token_id": 151643,
14
+ "head_dim": 128,
15
+ "hidden_act": "silu",
16
+ "hidden_size": 1024,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 3072,
19
+ "max_position_embeddings": 32768,
20
+ "max_window_layers": 28,
21
+ "model_type": "gext",
22
+ "num_attention_heads": 16,
23
+ "num_hidden_layers": 28,
24
+ "num_key_value_heads": 8,
25
+ "rms_norm_eps": 1e-06,
26
+ "rope_scaling": null,
27
+ "rope_theta": 1000000,
28
+ "sliding_window": null,
29
+ "tie_word_embeddings": true,
30
+ "torch_dtype": "bfloat16",
31
+ "transformers_version": "4.51.3",
32
+ "use_cache": true,
33
+ "use_sliding_window": false,
34
+ "vocab_size": 151936,
35
+ "vision_config": {
36
+ "hidden_size": 768,
37
+ "in_chans": 3,
38
+ "intermediate_size": 2073,
39
+ "depth":12,
40
+ "fullatt_block_indexes": [
41
+ 2,
42
+ 5,
43
+ 8,
44
+ 11
45
+ ],
46
+ "window_size": 14,
47
+ "model_type": "gotvary",
48
+ "out_hidden_size": 5120,
49
+ "spatial_patch_size": 14,
50
+ "tokens_per_second": 2,
51
+ "torch_dtype": "bfloat16",
52
+ "image_size": 1024,
53
+ "patch_size": 16,
54
+ "num_attention_heads": 12,
55
+ "n_head": 12,
56
+ "num_hidden_layers": 12
57
+ }
58
+ }
configuration_gex.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from transformers import Qwen2Config,Qwen3Config
2
+
3
+ class GexConfig(Qwen2Config):
4
+ model_type = "gex"
5
+
6
+ class GexTConfig(Qwen3Config):
7
+ model_type = "gext"
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 151643,
4
+ "eos_token_id": 151643,
5
+ "transformers_version": "4.51.3"
6
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:515cd3cc86762a87b22ea6d54ad1ea542e9bfaaeede52139305620c60e17951d
3
+ size 1389802360
modeling_gex.py ADDED
@@ -0,0 +1,763 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Tuple, Union
2
+ from functools import partial
3
+
4
+ import torch
5
+ import torch.nn.functional as F
6
+ import torch.nn as nn
7
+ from torch.nn.attention import SDPBackend, sdpa_kernel
8
+
9
+ from torchvision import transforms
10
+ from transformers.cache_utils import Cache
11
+ from transformers.modeling_outputs import (
12
+ BaseModelOutputWithPast,
13
+ CausalLMOutputWithPast,
14
+ )
15
+
16
+ from torchvision.transforms.functional import InterpolationMode
17
+ from transformers import (
18
+ Qwen2Config,
19
+ Qwen2Model,
20
+ Qwen2ForCausalLM,
21
+ Qwen3ForCausalLM,
22
+ Qwen3Model,
23
+ Qwen3Config,
24
+ )
25
+
26
+ try:
27
+ from liger_kernel.transformers.model.loss_utils import LigerForCausalLMLoss
28
+ from liger_kernel.transformers import LigerLayerNorm
29
+ from liger_kernel.transformers.layer_norm import LigerLayerNormFunction
30
+
31
+ def liger_layer_norm(input, normalized_shape, weight, bias, eps):
32
+ return LigerLayerNormFunction.apply(input, weight, bias, eps)
33
+
34
+ use_liger = True
35
+ except ImportError:
36
+ use_liger = False
37
+
38
+
39
+ from .configuration_gex import GexConfig, GexTConfig
40
+
41
+
42
+ LayerNorm = (
43
+ partial(LigerLayerNorm, bias=True) if use_liger else partial(nn.LayerNorm, eps=1e-6)
44
+ )
45
+ layer_norm = liger_layer_norm if use_liger else torch.nn.functional.layer_norm
46
+
47
+ BOS_TOEKN_IDS: int = 151652
48
+ EOS_TOEKN_IDS: int = 151643
49
+ IMG_PAD_IDS: int = 151655
50
+ IMG_END_IDS: int = 25
51
+
52
+
53
+ @torch.no_grad
54
+ def process_batch_labels(labels, pad_token_id=EOS_TOEKN_IDS):
55
+ # 创建 mask:标记所有 pad_token_id 的位置
56
+ pad_mask = labels == pad_token_id
57
+
58
+ # 找到每个样本第一个 pad_token_id 的位置
59
+ first_pad_pos = pad_mask.int().argmax(dim=1, keepdim=True) # shape: (bsz,)
60
+ first_pad_pos[first_pad_pos == 0] = 256
61
+
62
+ # 生成要替换为 -100 的位置 mask
63
+ replace_mask = torch.arange(labels.size(1), device=labels.device) > first_pad_pos
64
+
65
+ # 执行替换(保留第一个 pad_token_id)
66
+ labels[replace_mask] = -100
67
+
68
+ return labels
69
+
70
+
71
+ class GexImageEvalProcessor:
72
+ def __init__(self, image_size=1024, mean=None, std=None):
73
+ if mean is None:
74
+ mean = (0.48145466, 0.4578275, 0.40821073)
75
+ if std is None:
76
+ std = (0.26862954, 0.26130258, 0.27577711)
77
+
78
+ self.normalize = transforms.Normalize(mean, std)
79
+
80
+ self.transform = transforms.Compose(
81
+ [
82
+ transforms.Resize(
83
+ (image_size, image_size), interpolation=InterpolationMode.BICUBIC
84
+ ),
85
+ transforms.ToTensor(),
86
+ self.normalize,
87
+ ]
88
+ )
89
+
90
+ def __call__(self, item):
91
+ return self.transform(item)
92
+
93
+
94
+ class LayerNorm2d(nn.Module):
95
+ def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
96
+ super().__init__()
97
+ self.weight = nn.Parameter(torch.ones(num_channels))
98
+ self.bias = nn.Parameter(torch.zeros(num_channels))
99
+ self.num_channels = num_channels
100
+ self.eps = eps
101
+
102
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
103
+ x = x.permute(0, 2, 3, 1)
104
+ return layer_norm(
105
+ x,
106
+ normalized_shape=(self.num_channels,),
107
+ weight=self.weight,
108
+ bias=self.bias,
109
+ eps=self.eps,
110
+ ).permute(0, 3, 1, 2)
111
+
112
+
113
+ class PatchEmbed(nn.Module):
114
+ """
115
+ Image to Patch Embedding.
116
+ """
117
+
118
+ def __init__(
119
+ self,
120
+ kernel_size: Tuple[int, int] = (16, 16),
121
+ stride: Tuple[int, int] = (16, 16),
122
+ in_chans: int = 3,
123
+ embed_dim: int = 768,
124
+ ) -> None:
125
+ """
126
+ Args:
127
+ kernel_size (Tuple): kernel size of the projection layer.
128
+ stride (Tuple): stride of the projection layer.
129
+ padding (Tuple): padding size of the projection layer.
130
+ in_chans (int): Number of input image channels.
131
+ embed_dim (int): Patch embedding dimension.
132
+ """
133
+ super().__init__()
134
+
135
+ self.proj = nn.Conv2d(
136
+ in_chans, embed_dim, kernel_size=kernel_size, stride=stride
137
+ )
138
+
139
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
140
+ x = self.proj(x)
141
+ # B C H W -> B H W C
142
+ x = x.permute(0, 2, 3, 1)
143
+ return x
144
+
145
+
146
+ class Attention(nn.Module):
147
+ def __init__(
148
+ self,
149
+ dim: int,
150
+ num_heads: int = 8,
151
+ input_size: Optional[Tuple[int, int]] = None,
152
+ ) -> None:
153
+ super().__init__()
154
+ self.num_heads = num_heads
155
+ self.head_dim = 64
156
+ self.scale = 64**-0.5
157
+ self.seq_len = input_size[0] * input_size[1]
158
+ self.input_size = input_size
159
+
160
+ self.qkv = nn.Linear(dim, dim * 3, bias=True)
161
+ self.proj = nn.Linear(dim, dim)
162
+
163
+ # self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, self.head_dim))
164
+ # self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, self.head_dim))
165
+ self.rel_pos_h = nn.Parameter(
166
+ torch.zeros(input_size[0], input_size[0], self.head_dim)
167
+ )
168
+ self.rel_pos_w = nn.Parameter(
169
+ torch.zeros(input_size[1], input_size[1], self.head_dim)
170
+ )
171
+
172
+ def init_rel_pos(self):
173
+ q_size, k_size = self.input_size
174
+ q_coords = torch.arange(q_size)[:, None]
175
+
176
+ k_coords = torch.arange(k_size)[None, :]
177
+ relative_coords = (q_coords - k_coords) + (k_size - 1)
178
+
179
+ self.rel_pos_h = nn.Parameter(self.rel_pos_h.data[relative_coords.long()])
180
+ self.rel_pos_w = nn.Parameter(self.rel_pos_w.data[relative_coords.long()])
181
+
182
+ def get_attn_bias(self, q: torch.Tensor):
183
+ q = q.view(-1, *self.input_size, 64)
184
+
185
+ rel_h = torch.einsum("bhwc,hkc->bhwk", q, self.rel_pos_h)
186
+ rel_w = torch.einsum("bhwc,wkc->bhwk", q, self.rel_pos_w)
187
+
188
+ return (rel_h.unsqueeze(-1) + rel_w.unsqueeze(-2)).reshape(
189
+ -1, self.num_heads, self.seq_len, self.seq_len
190
+ )
191
+
192
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
193
+ qkv = torch.split(
194
+ self.qkv(x).view(-1, self.seq_len, 3 * 768),
195
+ 768,
196
+ dim=2,
197
+ )
198
+
199
+ q, k, v = (
200
+ i.unflatten(-1, (self.num_heads, -1)).transpose(1, 2).contiguous()
201
+ for i in qkv
202
+ )
203
+
204
+ attn_bias = self.get_attn_bias(q)
205
+ with sdpa_kernel(
206
+ [
207
+ SDPBackend.FLASH_ATTENTION,
208
+ SDPBackend.CUDNN_ATTENTION,
209
+ SDPBackend.EFFICIENT_ATTENTION,
210
+ ],
211
+ set_priority=True,
212
+ ):
213
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
214
+ q, k, v, attn_mask=attn_bias, is_causal=False
215
+ )
216
+ attn_output = attn_output.transpose(1, 2).flatten(-2)
217
+
218
+ x = self.proj(attn_output)
219
+
220
+ return x.view(-1, *self.input_size, 768)
221
+
222
+
223
+ class MLP(nn.Module):
224
+ def __init__(
225
+ self,
226
+ ):
227
+ super().__init__()
228
+ self.lin1 = nn.Linear(768, 4 * 768)
229
+ self.lin2 = nn.Linear(4 * 768, 768)
230
+ self.act = nn.GELU()
231
+
232
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
233
+ return self.lin2(self.act(self.lin1(x)))
234
+
235
+
236
+ class Block(nn.Module):
237
+ def __init__(self, idx: int, window_size: int = 14):
238
+ super().__init__()
239
+
240
+ self.idx = idx
241
+ self.window_size = window_size
242
+
243
+ self.norm1 = LayerNorm(768)
244
+
245
+ self.attn = Attention(
246
+ dim=768,
247
+ num_heads=12,
248
+ input_size=(64, 64) if window_size == 0 else (14, 14),
249
+ )
250
+
251
+ self.norm2 = LayerNorm(768)
252
+ self.mlp = MLP()
253
+
254
+ @staticmethod
255
+ def window_partition(x: torch.Tensor) -> torch.Tensor:
256
+ x = F.pad(x, (0, 0, 0, 6, 0, 6))
257
+ x = (
258
+ x.view(-1, 5, 14, 5, 14, 768)
259
+ .permute(0, 1, 3, 2, 4, 5)
260
+ .contiguous()
261
+ .view(-1, 14, 14, 768)
262
+ )
263
+ return x
264
+
265
+ @staticmethod
266
+ def window_unpartition(x: torch.Tensor) -> torch.Tensor:
267
+ x = (
268
+ x.view(-1, 5, 5, 14, 14, 768)
269
+ .permute(0, 1, 3, 2, 4, 5)
270
+ .contiguous()
271
+ .view(-1, 70, 70, 768)
272
+ )
273
+ return x[:, :64, :64, :].contiguous()
274
+
275
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
276
+ shortcut = x
277
+ x = self.norm1(x)
278
+ if self.window_size > 0:
279
+ x = self.window_partition(x)
280
+
281
+ x = self.attn(x)
282
+
283
+ if self.window_size > 0:
284
+ x = self.window_unpartition(x)
285
+
286
+ x = shortcut + x
287
+ x = x + self.mlp(self.norm2(x))
288
+
289
+ return x
290
+
291
+
292
+ class GexVit(nn.Module):
293
+ def __init__(self, global_attn_indexes=[2, 5, 8, 11], **kwargs):
294
+ super().__init__()
295
+ self.global_attn_indexes = global_attn_indexes
296
+ self.patch_embed = PatchEmbed()
297
+
298
+ self.pos_embed = nn.Parameter(torch.zeros(1, 64, 64, 768))
299
+
300
+ self.blocks = nn.ModuleList(
301
+ [
302
+ Block(idx=i, window_size=14 if i not in global_attn_indexes else 0)
303
+ for i in range(12)
304
+ ]
305
+ )
306
+
307
+ self.neck = nn.ModuleList(
308
+ [
309
+ nn.Conv2d(
310
+ 768,
311
+ 256,
312
+ kernel_size=1,
313
+ bias=False,
314
+ ),
315
+ LayerNorm2d(256),
316
+ nn.Conv2d(
317
+ 256,
318
+ 256,
319
+ kernel_size=3,
320
+ padding=1,
321
+ bias=False,
322
+ ),
323
+ LayerNorm2d(256),
324
+ ]
325
+ )
326
+
327
+ self.net_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1, bias=False)
328
+ self.net_3 = nn.Conv2d(
329
+ 512, 1024, kernel_size=3, stride=2, padding=1, bias=False
330
+ )
331
+
332
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
333
+ x = self.patch_embed(x)
334
+ x = x + self.pos_embed
335
+
336
+ for blk in self.blocks:
337
+ x = blk(x)
338
+
339
+ x = x.permute(0, 3, 1, 2)
340
+
341
+ for m in self.neck:
342
+ x = m(x)
343
+
344
+ x = self.net_2(x)
345
+ x = self.net_3(x)
346
+
347
+ return x
348
+
349
+
350
+ class GexQwenModel(Qwen2Model):
351
+ config_class = GexConfig
352
+ _auto_class = "AutoModel"
353
+
354
+ def __init__(self, config: Qwen2Config):
355
+ super().__init__(config)
356
+ self.vit = GexVit()
357
+ self.vit_proj = nn.Linear(1024, 1024)
358
+
359
+ def forward(
360
+ self,
361
+ input_ids: torch.LongTensor = None,
362
+ attention_mask: Optional[torch.Tensor] = None,
363
+ position_ids: Optional[torch.LongTensor] = None,
364
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
365
+ inputs_embeds: Optional[torch.FloatTensor] = None,
366
+ use_cache: Optional[bool] = None,
367
+ output_attentions: Optional[bool] = None,
368
+ output_hidden_states: Optional[bool] = None,
369
+ images: Optional[torch.FloatTensor] = None,
370
+ return_dict: Optional[bool] = None,
371
+ **kwargs,
372
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
373
+ if inputs_embeds is None and input_ids is not None:
374
+ inputs_embeds = self.embed_tokens(input_ids)
375
+ assert images is not None
376
+ # img_pos = input_ids == IMG_PAD_IDS
377
+ # if torch.any(img_pos):
378
+ vit_feature = self.vit(images).flatten(2).permute(0, 2, 1)
379
+ vit_feature = self.vit_proj(vit_feature)
380
+ # img_ids = img_pos.nonzero().squeeze_()
381
+ # inputs_embeds[img_ids[:, 0], img_ids[:, 1]] = vit_feature.view(-1,1024)
382
+ inputs_embeds[:, 1:257, :] = vit_feature
383
+ with sdpa_kernel(SDPBackend.FLASH_ATTENTION):
384
+ return super().forward(
385
+ input_ids=None,
386
+ attention_mask=attention_mask,
387
+ past_key_values=past_key_values,
388
+ inputs_embeds=inputs_embeds,
389
+ use_cache=use_cache,
390
+ position_ids=position_ids,
391
+ output_attentions=output_attentions,
392
+ output_hidden_states=output_hidden_states,
393
+ return_dict=return_dict,
394
+ **kwargs,
395
+ )
396
+
397
+
398
+ class GexQwenForCausalLM(Qwen2ForCausalLM):
399
+ config_class = GexConfig
400
+ # supports_gradient_checkpointing = True
401
+ _auto_class = "AutoModelForCausalLM"
402
+
403
+ def __init__(self, config):
404
+ super().__init__(config)
405
+ self.model = GexQwenModel(config)
406
+
407
+ self.vocab_size = config.vocab_size
408
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
409
+
410
+ # Initialize weights and apply final processing
411
+ self.post_init()
412
+
413
+ self.image_preprocess = GexImageEvalProcessor()
414
+
415
+ def forward(
416
+ self,
417
+ input_ids: torch.LongTensor = None,
418
+ attention_mask: Optional[torch.Tensor] = None,
419
+ position_ids: Optional[torch.LongTensor] = None,
420
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
421
+ inputs_embeds: Optional[torch.FloatTensor] = None,
422
+ labels: Optional[torch.LongTensor] = None,
423
+ use_cache: Optional[bool] = None,
424
+ output_attentions: Optional[bool] = None,
425
+ output_hidden_states: Optional[bool] = None,
426
+ return_dict: Optional[bool] = None,
427
+ cache_position: Optional[torch.LongTensor] = None,
428
+ logits_to_keep: Union[int, torch.Tensor] = 0,
429
+ images: Optional[torch.FloatTensor] = None,
430
+ **kwargs,
431
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
432
+ output_attentions = (
433
+ output_attentions
434
+ if output_attentions is not None
435
+ else self.config.output_attentions
436
+ )
437
+ output_hidden_states = (
438
+ output_hidden_states
439
+ if output_hidden_states is not None
440
+ else self.config.output_hidden_states
441
+ )
442
+ return_dict = (
443
+ return_dict if return_dict is not None else self.config.use_return_dict
444
+ )
445
+
446
+ if labels is not None and input_ids is None:
447
+ input_ids: torch.Tensor = labels
448
+ shifted_input_ids = input_ids.new_zeros(
449
+ (input_ids.shape[0], input_ids.shape[1] + 256), device=input_ids.device
450
+ )
451
+ shifted_input_ids[:, 257:].copy_(input_ids[:, :-1])
452
+ decoder_start_token_id = BOS_TOEKN_IDS
453
+ shifted_input_ids[:, 0] = decoder_start_token_id
454
+ shifted_input_ids[:, 1:257] = IMG_PAD_IDS
455
+ input_ids = shifted_input_ids
456
+ imgs_pad: torch.Tenosr = torch.full(
457
+ (1, 256), IMG_PAD_IDS, device=self.device, dtype=torch.long
458
+ )
459
+ labels = torch.cat(
460
+ [
461
+ imgs_pad.expand(labels.shape[0], -1),
462
+ process_batch_labels(labels),
463
+ ],
464
+ dim=-1,
465
+ )
466
+ # labels = process_batch_labels(labels)
467
+
468
+ outputs = self.model(
469
+ input_ids=input_ids,
470
+ attention_mask=attention_mask,
471
+ position_ids=position_ids,
472
+ past_key_values=past_key_values,
473
+ inputs_embeds=inputs_embeds,
474
+ use_cache=use_cache,
475
+ output_attentions=output_attentions,
476
+ output_hidden_states=output_hidden_states,
477
+ return_dict=return_dict,
478
+ cache_position=cache_position,
479
+ images=images,
480
+ **kwargs,
481
+ )
482
+
483
+ hidden_states = outputs[0]
484
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
485
+ slice_indices = (
486
+ slice(-logits_to_keep, None)
487
+ if isinstance(logits_to_keep, int)
488
+ else logits_to_keep
489
+ )
490
+
491
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
492
+ # if (past_key_values is None or len(past_key_values) <= 0):
493
+ # logits = self.lm_head(hidden_states[:, 256:, :])
494
+ # # if labels is not None:
495
+ # # lb = labels[:,256:].contiguous()
496
+ # # del labels
497
+ # # labels = lb
498
+ # else:
499
+ # slice_indices = (
500
+ # slice(-logits_to_keep, None)
501
+ # if isinstance(logits_to_keep, int)
502
+ # else logits_to_keep
503
+ # )
504
+ # logits = self.lm_head(hidden_states[:, slice_indices, :])
505
+
506
+ loss = None
507
+ if labels is not None:
508
+ loss = self.loss_function(
509
+ logits=logits,
510
+ labels=None,
511
+ shift_labels=labels,
512
+ vocab_size=self.config.vocab_size,
513
+ **kwargs,
514
+ )
515
+
516
+ if not return_dict:
517
+ output = (logits,) + outputs[1:]
518
+ return (loss,) + output if loss is not None else output
519
+
520
+ return CausalLMOutputWithPast(
521
+ loss=loss,
522
+ logits=logits,
523
+ past_key_values=outputs.past_key_values,
524
+ hidden_states=outputs.hidden_states,
525
+ attentions=outputs.attentions,
526
+ )
527
+
528
+ def generate(self, *args, images, **kwargs):
529
+ pad = torch.tensor(
530
+ [[BOS_TOEKN_IDS] + [IMG_PAD_IDS] * 256],
531
+ dtype=torch.long,
532
+ device=self.device,
533
+ )
534
+ if (input_ids := kwargs.pop("input_ids", None)) is not None:
535
+ input_ids = torch.cat(
536
+ [pad.expand(input_ids.shape[0], -1), input_ids], dim=-1
537
+ )
538
+ else:
539
+ input_ids = pad.expand(images.shape[0], -1)
540
+
541
+ res = super().generate(
542
+ *args,
543
+ input_ids=input_ids,
544
+ images=images,
545
+ max_length=kwargs.pop("max_length", 10) + 257,
546
+ **kwargs,
547
+ )
548
+ return res
549
+
550
+
551
+ class GexTQwenModel(Qwen3Model):
552
+ config_class = GexTConfig
553
+ _auto_class = "AutoModel"
554
+
555
+ def __init__(self, config: Qwen3Config):
556
+ super().__init__(config)
557
+ self.vit = GexVit()
558
+ self.vit_proj = nn.Linear(1024, 1024)
559
+
560
+ def forward(
561
+ self,
562
+ input_ids: Optional[torch.LongTensor] = None,
563
+ attention_mask: Optional[torch.Tensor] = None,
564
+ position_ids: Optional[torch.LongTensor] = None,
565
+ past_key_values: Optional[Cache] = None,
566
+ inputs_embeds: Optional[torch.FloatTensor] = None,
567
+ use_cache: Optional[bool] = None,
568
+ output_attentions: Optional[bool] = None,
569
+ output_hidden_states: Optional[bool] = None,
570
+ images: Optional[torch.FloatTensor] = None,
571
+ cache_position: Optional[torch.LongTensor] = None,
572
+ **flash_attn_kwargs,
573
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
574
+ if inputs_embeds is None and input_ids is not None:
575
+ inputs_embeds = self.embed_tokens(input_ids)
576
+ assert images is not None
577
+ # img_pos = input_ids == IMG_PAD_IDS
578
+ # if torch.any(img_pos):
579
+ vit_feature = self.vit(images).flatten(2).permute(0, 2, 1)
580
+ vit_feature = self.vit_proj(vit_feature)
581
+ # img_ids = img_pos.nonzero().squeeze_()
582
+ # inputs_embeds[img_ids[:, 0], img_ids[:, 1]] = vit_feature.view(-1,1024)
583
+ inputs_embeds[:, 1:257, :] = vit_feature
584
+ with sdpa_kernel(SDPBackend.FLASH_ATTENTION):
585
+ return super().forward(
586
+ input_ids=None,
587
+ attention_mask=attention_mask,
588
+ past_key_values=past_key_values,
589
+ inputs_embeds=inputs_embeds,
590
+ use_cache=use_cache,
591
+ position_ids=position_ids,
592
+ output_attentions=output_attentions,
593
+ output_hidden_states=output_hidden_states,
594
+ cache_position=cache_position,
595
+ **flash_attn_kwargs,
596
+ )
597
+
598
+
599
+ class GexTQwenForCausalLM(Qwen3ForCausalLM):
600
+ config_class = GexTConfig
601
+ # supports_gradient_checkpointing = True
602
+ _auto_class = "AutoModelForCausalLM"
603
+
604
+ def __init__(self, config):
605
+ super().__init__(config)
606
+ self.model = GexTQwenModel(config)
607
+
608
+ self.vocab_size = config.vocab_size
609
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
610
+
611
+ # Initialize weights and apply final processing
612
+ self.post_init()
613
+
614
+ self.image_preprocess = GexImageEvalProcessor()
615
+
616
+ def forward(
617
+ self,
618
+ input_ids: Optional[torch.LongTensor] = None,
619
+ attention_mask: Optional[torch.Tensor] = None,
620
+ position_ids: Optional[torch.LongTensor] = None,
621
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
622
+ inputs_embeds: Optional[torch.FloatTensor] = None,
623
+ labels: Optional[torch.LongTensor] = None,
624
+ use_cache: Optional[bool] = None,
625
+ output_attentions: Optional[bool] = None,
626
+ output_hidden_states: Optional[bool] = None,
627
+ cache_position: Optional[torch.LongTensor] = None,
628
+ logits_to_keep: Union[int, torch.Tensor] = 0,
629
+ images: Optional[torch.FloatTensor] = None,
630
+ **kwargs,
631
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
632
+ output_attentions = (
633
+ output_attentions
634
+ if output_attentions is not None
635
+ else self.config.output_attentions
636
+ )
637
+ output_hidden_states = (
638
+ output_hidden_states
639
+ if output_hidden_states is not None
640
+ else self.config.output_hidden_states
641
+ )
642
+
643
+ if labels is not None and input_ids is None:
644
+ input_ids: torch.Tensor = labels
645
+ shifted_input_ids = input_ids.new_zeros(
646
+ (input_ids.shape[0], input_ids.shape[1] + 257), device=input_ids.device
647
+ )
648
+ shifted_input_ids[:, 258:].copy_(input_ids[:, :-1])
649
+ decoder_start_token_id = BOS_TOEKN_IDS
650
+ shifted_input_ids[:, 0] = decoder_start_token_id
651
+ shifted_input_ids[:, 257] = IMG_END_IDS
652
+ shifted_input_ids[:, 1:257] = IMG_PAD_IDS
653
+ input_ids = shifted_input_ids
654
+ imgs_pad: torch.Tenosr = torch.full( # type: ignore
655
+ (1, 257), IMG_PAD_IDS, device=self.device, dtype=torch.long
656
+ )
657
+ imgs_pad[:, -1] = IMG_END_IDS
658
+ labels = torch.cat(
659
+ [
660
+ imgs_pad.expand(labels.shape[0], -1),
661
+ process_batch_labels(labels),
662
+ ],
663
+ dim=-1,
664
+ ) # type: ignore
665
+ # labels = process_batch_labels(labels)
666
+
667
+ outputs = self.model(
668
+ input_ids=input_ids,
669
+ attention_mask=attention_mask,
670
+ position_ids=position_ids,
671
+ past_key_values=past_key_values,
672
+ inputs_embeds=inputs_embeds,
673
+ use_cache=use_cache,
674
+ output_attentions=output_attentions,
675
+ output_hidden_states=output_hidden_states,
676
+ cache_position=cache_position,
677
+ images=images,
678
+ **kwargs,
679
+ )
680
+
681
+ hidden_states = outputs.last_hidden_state
682
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
683
+
684
+ # if (past_key_values is None or len(past_key_values) <= 0):
685
+ # logits = self.lm_head(hidden_states[:, 256:, :])
686
+ # # if labels is not None:
687
+ # # lb = labels[:,256:].contiguous()
688
+ # # del labels
689
+ # # labels = lb
690
+ # else:
691
+ # slice_indices = (
692
+ # slice(-logits_to_keep, None)
693
+ # if isinstance(logits_to_keep, int)
694
+ # else logits_to_keep
695
+ # )
696
+ # logits = self.lm_head(hidden_states[:, slice_indices, :])
697
+
698
+ loss = None
699
+ if labels is not None:
700
+ if self.training and use_liger:
701
+ loss = LigerForCausalLMLoss(
702
+ hidden_states=hidden_states,
703
+ lm_head_weight=self.lm_head.weight,
704
+ labels=None,
705
+ shift_labels=labels,
706
+ hidden_size=self.config.hidden_size,
707
+ **kwargs,
708
+ )
709
+ logits = None
710
+
711
+ else:
712
+ slice_indices = (
713
+ slice(-logits_to_keep, None)
714
+ if isinstance(logits_to_keep, int)
715
+ else logits_to_keep
716
+ )
717
+
718
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
719
+ loss = self.loss_function(
720
+ logits=logits,
721
+ labels=None,
722
+ shift_labels=labels,
723
+ vocab_size=self.config.vocab_size,
724
+ **kwargs,
725
+ )
726
+ else:
727
+ slice_indices = (
728
+ slice(-logits_to_keep, None)
729
+ if isinstance(logits_to_keep, int)
730
+ else logits_to_keep
731
+ )
732
+
733
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
734
+
735
+ return CausalLMOutputWithPast(
736
+ loss=loss,
737
+ logits=logits,
738
+ past_key_values=outputs.past_key_values,
739
+ hidden_states=outputs.hidden_states,
740
+ attentions=outputs.attentions,
741
+ )
742
+
743
+ def generate(self, *args, images, **kwargs):
744
+ pad = torch.tensor(
745
+ [[BOS_TOEKN_IDS] + [IMG_PAD_IDS] * 256 + [IMG_END_IDS]],
746
+ dtype=torch.long,
747
+ device=self.device,
748
+ )
749
+ if (input_ids := kwargs.pop("input_ids", None)) is not None:
750
+ input_ids = torch.cat(
751
+ [pad.expand(input_ids.shape[0], -1), input_ids], dim=-1
752
+ )
753
+ else:
754
+ input_ids = pad.expand(images.shape[0], -1)
755
+
756
+ res = super().generate(
757
+ *args,
758
+ input_ids=input_ids,
759
+ images=images,
760
+ max_length=kwargs.pop("max_length", 25) + 258,
761
+ **kwargs,
762
+ )
763
+ return res
preprocessor_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "image_size": 1024,
3
+ "image_mean": [
4
+ 0.48145466,
5
+ 0.4578275,
6
+ 0.40821073
7
+ ],
8
+ "image_std": [
9
+ 0.26862954,
10
+ 0.26130258,
11
+ 0.27577711
12
+ ],
13
+ "image_processor_type": "GexImageProcessor"
14
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
3
+ size 11422654
tokenizer_config.json ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
+ "bos_token": null,
230
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0].role == 'system' %}\n {{- messages[0].content + '\\n\\n' }}\n {%- endif %}\n {{- \"# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0].role == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0].content + '<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}\n{%- for message in messages[::-1] %}\n {%- set index = (messages|length - 1) - loop.index0 %}\n {%- if ns.multi_step_tool and message.role == \"user\" and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}\n {%- set ns.multi_step_tool = false %}\n {%- set ns.last_query_index = index %}\n {%- endif %}\n{%- endfor %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {%- set content = message.content %}\n {%- set reasoning_content = '' %}\n {%- if message.reasoning_content is defined and message.reasoning_content is not none %}\n {%- set reasoning_content = message.reasoning_content %}\n {%- else %}\n {%- if '</think>' in message.content %}\n {%- set content = message.content.split('</think>')[-1].lstrip('\\n') %}\n {%- set reasoning_content = message.content.split('</think>')[0].rstrip('\\n').split('<think>')[-1].lstrip('\\n') %}\n {%- endif %}\n {%- endif %}\n {%- if loop.index0 > ns.last_query_index %}\n {%- if loop.last or (not loop.last and reasoning_content) %}\n {{- '<|im_start|>' + message.role + '\\n<think>\\n' + reasoning_content.strip('\\n') + '\\n</think>\\n\\n' + content.lstrip('\\n') }}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- if message.tool_calls %}\n {%- for tool_call in message.tool_calls %}\n {%- if (loop.first and content) or (not loop.first) %}\n {{- '\\n' }}\n {%- endif %}\n {%- if tool_call.function %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {%- if tool_call.arguments is string %}\n {{- tool_call.arguments }}\n {%- else %}\n {{- tool_call.arguments | tojson }}\n {%- endif %}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {%- endif %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if loop.first or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n {%- if enable_thinking is defined and enable_thinking is false %}\n {{- '<think>\\n\\n</think>\\n\\n' }}\n {%- endif %}\n{%- endif %}",
231
+ "clean_up_tokenization_spaces": false,
232
+ "eos_token": "<|im_end|>",
233
+ "errors": "replace",
234
+ "extra_special_tokens": {},
235
+ "model_max_length": 131072,
236
+ "pad_token": "<|endoftext|>",
237
+ "split_special_tokens": false,
238
+ "tokenizer_class": "Qwen2Tokenizer",
239
+ "unk_token": null
240
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff