littlebird13 commited on
Commit
1e6d4ea
·
verified ·
1 Parent(s): 82baf00

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endoftext|>": 151643,
11
+ "<|file_sep|>": 151664,
12
+ "<|fim_middle|>": 151660,
13
+ "<|fim_pad|>": 151662,
14
+ "<|fim_prefix|>": 151659,
15
+ "<|fim_suffix|>": 151661,
16
+ "<|im_end|>": 151645,
17
+ "<|im_start|>": 151644,
18
+ "<|image_pad|>": 151655,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
chat_template.jinja ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- set default_system_message = 'Represent the user\'s input.' -%}
2
+ {%- if tools %}
3
+ {{- '<|im_start|>system\n' }}
4
+ {%- if messages[0].role == 'system' %}
5
+ {%- if messages[0].content is string %}
6
+ {{- messages[0].content }}
7
+ {%- else %}
8
+ {%- for content in messages[0].content %}
9
+ {%- if 'text' in content %}
10
+ {{- content.text }}
11
+ {%- endif %}
12
+ {%- endfor %}
13
+ {%- endif %}
14
+ {{- '\n\n' }}
15
+ {%- else %}
16
+ {{- default_system_message + '\n\n' }}
17
+ {%- endif %}
18
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
19
+ {%- for tool in tools %}
20
+ {{- "\n" }}
21
+ {{- tool | tojson }}
22
+ {%- endfor %}
23
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
24
+ {%- else %}
25
+ {%- if messages[0].role == 'system' %}
26
+ {{- '<|im_start|>system\n' }}
27
+ {%- if messages[0].content is string %}
28
+ {{- messages[0].content }}
29
+ {%- else %}
30
+ {%- for content in messages[0].content %}
31
+ {%- if 'text' in content %}
32
+ {{- content.text }}
33
+ {%- endif %}
34
+ {%- endfor %}
35
+ {%- endif %}
36
+ {{- '<|im_end|>\n' }}
37
+ {%- else %}
38
+ {{- '<|im_start|>system\n' + default_system_message + '<|im_end|>\n' }}
39
+ {%- endif %}
40
+ {%- endif %}
41
+ {%- set image_count = namespace(value=0) %}
42
+ {%- set video_count = namespace(value=0) %}
43
+ {%- for message in messages %}
44
+ {%- if message.role == "user" %}
45
+ {{- '<|im_start|>' + message.role + '\n' }}
46
+ {%- if message.content is string %}
47
+ {{- message.content }}
48
+ {%- else %}
49
+ {%- for content in message.content %}
50
+ {%- if content.type == 'image' or 'image' in content or 'image_url' in content %}
51
+ {%- set image_count.value = image_count.value + 1 %}
52
+ {%- if add_vision_id %}Picture {{ image_count.value }}: {% endif -%}
53
+ <|vision_start|><|image_pad|><|vision_end|>
54
+ {%- elif content.type == 'video' or 'video' in content %}
55
+ {%- set video_count.value = video_count.value + 1 %}
56
+ {%- if add_vision_id %}Video {{ video_count.value }}: {% endif -%}
57
+ <|vision_start|><|video_pad|><|vision_end|>
58
+ {%- elif 'text' in content %}
59
+ {{- content.text }}
60
+ {%- endif %}
61
+ {%- endfor %}
62
+ {%- endif %}
63
+ {{- '<|im_end|>\n' }}
64
+ {%- elif message.role == "assistant" %}
65
+ {{- '<|im_start|>' + message.role + '\n' }}
66
+ {%- if message.content is string %}
67
+ {{- message.content }}
68
+ {%- else %}
69
+ {%- for content_item in message.content %}
70
+ {%- if 'text' in content_item %}
71
+ {{- content_item.text }}
72
+ {%- endif %}
73
+ {%- endfor %}
74
+ {%- endif %}
75
+ {%- if message.tool_calls %}
76
+ {%- for tool_call in message.tool_calls %}
77
+ {%- if (loop.first and message.content) or (not loop.first) %}
78
+ {{- '\n' }}
79
+ {%- endif %}
80
+ {%- if tool_call.function %}
81
+ {%- set tool_call = tool_call.function %}
82
+ {%- endif %}
83
+ {{- '<tool_call>\n{"name": "' }}
84
+ {{- tool_call.name }}
85
+ {{- '", "arguments": ' }}
86
+ {%- if tool_call.arguments is string %}
87
+ {{- tool_call.arguments }}
88
+ {%- else %}
89
+ {{- tool_call.arguments | tojson }}
90
+ {%- endif %}
91
+ {{- '}\n</tool_call>' }}
92
+ {%- endfor %}
93
+ {%- endif %}
94
+ {{- '<|im_end|>\n' }}
95
+ {%- elif message.role == "tool" %}
96
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
97
+ {{- '<|im_start|>user' }}
98
+ {%- endif %}
99
+ {{- '\n<tool_response>\n' }}
100
+ {%- if message.content is string %}
101
+ {{- message.content }}
102
+ {%- else %}
103
+ {%- for content in message.content %}
104
+ {%- if content.type == 'image' or 'image' in content or 'image_url' in content %}
105
+ {%- set image_count.value = image_count.value + 1 %}
106
+ {%- if add_vision_id %}Picture {{ image_count.value }}: {% endif -%}
107
+ <|vision_start|><|image_pad|><|vision_end|>
108
+ {%- elif content.type == 'video' or 'video' in content %}
109
+ {%- set video_count.value = video_count.value + 1 %}
110
+ {%- if add_vision_id %}Video {{ video_count.value }}: {% endif -%}
111
+ <|vision_start|><|video_pad|><|vision_end|>
112
+ {%- elif 'text' in content %}
113
+ {{- content.text }}
114
+ {%- endif %}
115
+ {%- endfor %}
116
+ {%- endif %}
117
+ {{- '\n</tool_response>' }}
118
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
119
+ {{- '<|im_end|>\n' }}
120
+ {%- endif %}
121
+ {%- endif %}
122
+ {%- endfor %}
123
+ {%- if add_generation_prompt %}
124
+ {{- '<|im_start|>assistant\n' }}
125
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen3VLForConditionalGeneration"
4
+ ],
5
+ "dtype": "bfloat16",
6
+ "image_token_id": 151655,
7
+ "model_type": "qwen3_vl",
8
+ "text_config": {
9
+ "attention_bias": false,
10
+ "attention_dropout": 0.0,
11
+ "bos_token_id": 151643,
12
+ "dtype": "bfloat16",
13
+ "eos_token_id": 151645,
14
+ "head_dim": 128,
15
+ "hidden_act": "silu",
16
+ "hidden_size": 2048,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 6144,
19
+ "max_position_embeddings": 262144,
20
+ "model_type": "qwen3_vl_text",
21
+ "num_attention_heads": 16,
22
+ "num_hidden_layers": 28,
23
+ "num_key_value_heads": 8,
24
+ "rms_norm_eps": 1e-06,
25
+ "rope_scaling": {
26
+ "mrope_interleaved": true,
27
+ "mrope_section": [
28
+ 24,
29
+ 20,
30
+ 20
31
+ ],
32
+ "rope_type": "default"
33
+ },
34
+ "rope_theta": 5000000,
35
+ "tie_word_embeddings": true,
36
+ "use_cache": true,
37
+ "vocab_size": 151936
38
+ },
39
+ "tie_word_embeddings": true,
40
+ "transformers_version": "4.57.1",
41
+ "use_cache": false,
42
+ "video_token_id": 151656,
43
+ "vision_config": {
44
+ "deepstack_visual_indexes": [
45
+ 5,
46
+ 11,
47
+ 17
48
+ ],
49
+ "depth": 24,
50
+ "dtype": "bfloat16",
51
+ "hidden_act": "gelu_pytorch_tanh",
52
+ "hidden_size": 1024,
53
+ "in_channels": 3,
54
+ "initializer_range": 0.02,
55
+ "intermediate_size": 4096,
56
+ "model_type": "qwen3_vl",
57
+ "num_heads": 16,
58
+ "num_position_embeddings": 2304,
59
+ "out_hidden_size": 2048,
60
+ "patch_size": 16,
61
+ "spatial_merge_size": 2,
62
+ "temporal_patch_size": 2
63
+ },
64
+ "vision_end_token_id": 151653,
65
+ "vision_start_token_id": 151652
66
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c73fa9caeddeb3ff831d46c085a7a5708343248ca777e90f2d486964464509c1
3
+ size 4255140312
preprocessor_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": null,
3
+ "data_format": "channels_first",
4
+ "default_to_square": true,
5
+ "device": null,
6
+ "disable_grouping": null,
7
+ "do_center_crop": null,
8
+ "do_convert_rgb": true,
9
+ "do_normalize": true,
10
+ "do_pad": null,
11
+ "do_rescale": true,
12
+ "do_resize": true,
13
+ "image_mean": [
14
+ 0.5,
15
+ 0.5,
16
+ 0.5
17
+ ],
18
+ "image_processor_type": "Qwen2VLImageProcessorFast",
19
+ "image_std": [
20
+ 0.5,
21
+ 0.5,
22
+ 0.5
23
+ ],
24
+ "input_data_format": null,
25
+ "max_pixels": 1310720,
26
+ "merge_size": 2,
27
+ "min_pixels": 4096,
28
+ "pad_size": null,
29
+ "patch_size": 16,
30
+ "processor_class": "Qwen3VLProcessor",
31
+ "resample": 3,
32
+ "rescale_factor": 0.00392156862745098,
33
+ "return_tensors": null,
34
+ "size": {
35
+ "longest_edge": 1310720,
36
+ "shortest_edge": 4096
37
+ },
38
+ "temporal_patch_size": 2
39
+ }
scripts/qwen3_vl_embedding.py ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+ import unicodedata
4
+ import numpy as np
5
+ import logging
6
+
7
+ from PIL import Image
8
+ from dataclasses import dataclass
9
+ from typing import Optional, List, Union, Dict, Any
10
+ from transformers.models.qwen3_vl.modeling_qwen3_vl import Qwen3VLPreTrainedModel, Qwen3VLModel, Qwen3VLConfig
11
+ from transformers.models.qwen3_vl.processing_qwen3_vl import Qwen3VLProcessor
12
+ from transformers.modeling_outputs import ModelOutput
13
+ from transformers.processing_utils import Unpack
14
+ from transformers.utils import TransformersKwargs
15
+ from transformers.cache_utils import Cache
16
+ from transformers.utils.generic import check_model_inputs
17
+ from qwen_vl_utils.vision_process import process_vision_info
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+ # Constants for configuration
22
+ MAX_LENGTH = 8192
23
+ IMAGE_BASE_FACTOR = 16
24
+ IMAGE_FACTOR = IMAGE_BASE_FACTOR * 2
25
+ MIN_PIXELS = 4 * IMAGE_FACTOR * IMAGE_FACTOR
26
+ MAX_PIXELS = 1800 * IMAGE_FACTOR * IMAGE_FACTOR
27
+ FPS = 1
28
+ MAX_FRAMES = 64
29
+ FRAME_MAX_PIXELS = 768 * IMAGE_FACTOR * IMAGE_FACTOR
30
+ MAX_TOTAL_PIXELS = 10 * FRAME_MAX_PIXELS
31
+ PAD_TOKEN = "<|endoftext|>"
32
+
33
+ # Define output structure for embeddings
34
+ @dataclass
35
+ class Qwen3VLForEmbeddingOutput(ModelOutput):
36
+ last_hidden_state: Optional[torch.FloatTensor] = None
37
+ attention_mask: Optional[torch.Tensor] = None
38
+
39
+ # Define model class to compute embeddings
40
+ class Qwen3VLForEmbedding(Qwen3VLPreTrainedModel):
41
+ _checkpoint_conversion_mapping = {}
42
+ accepts_loss_kwargs = False
43
+ config: Qwen3VLConfig
44
+
45
+ def __init__(self, config):
46
+ super().__init__(config)
47
+ self.model = Qwen3VLModel(config)
48
+ self.post_init()
49
+
50
+ def get_input_embeddings(self):
51
+ return self.model.get_input_embeddings()
52
+
53
+ def set_input_embeddings(self, value):
54
+ self.model.set_input_embeddings(value)
55
+
56
+ def set_decoder(self, decoder):
57
+ self.model.set_decoder(decoder)
58
+
59
+ def get_decoder(self):
60
+ return self.model.get_decoder()
61
+
62
+ # Extract video features from model
63
+ def get_video_features(self, pixel_values_videos: torch.FloatTensor,
64
+ video_grid_thw: Optional[torch.LongTensor] = None):
65
+ return self.model.get_video_features(pixel_values_videos, video_grid_thw)
66
+
67
+ # Extract image features from model
68
+ def get_image_features(self, pixel_values: torch.FloatTensor,
69
+ image_grid_thw: Optional[torch.LongTensor] = None):
70
+ return self.model.get_image_features(pixel_values, image_grid_thw)
71
+
72
+ # Make modules accessible through properties
73
+ @property
74
+ def language_model(self):
75
+ return self.model.language_model
76
+
77
+ @property
78
+ def visual(self):
79
+ return self.model.visual
80
+
81
+ # Forward pass through model with input parameters
82
+ # @check_model_inputs
83
+ def forward(self,
84
+ input_ids: torch.LongTensor = None,
85
+ attention_mask: Optional[torch.Tensor] = None,
86
+ position_ids: Optional[torch.LongTensor] = None,
87
+ past_key_values: Optional[Cache] = None,
88
+ inputs_embeds: Optional[torch.FloatTensor] = None,
89
+ pixel_values: Optional[torch.Tensor] = None,
90
+ pixel_values_videos: Optional[torch.FloatTensor] = None,
91
+ image_grid_thw: Optional[torch.LongTensor] = None,
92
+ video_grid_thw: Optional[torch.LongTensor] = None,
93
+ cache_position: Optional[torch.LongTensor] = None,
94
+ logits_to_keep: Union[int, torch.Tensor] = 0,
95
+ **kwargs: Unpack[TransformersKwargs],
96
+ ) -> Union[tuple, Qwen3VLForEmbeddingOutput]:
97
+ # Pass inputs through the model
98
+ outputs = self.model(
99
+ input_ids=input_ids,
100
+ pixel_values=pixel_values,
101
+ pixel_values_videos=pixel_values_videos,
102
+ image_grid_thw=image_grid_thw,
103
+ video_grid_thw=video_grid_thw,
104
+ position_ids=position_ids,
105
+ attention_mask=attention_mask,
106
+ past_key_values=past_key_values,
107
+ inputs_embeds=inputs_embeds,
108
+ cache_position=cache_position,
109
+ **kwargs,
110
+ )
111
+ # Return the model output
112
+ return Qwen3VLForEmbeddingOutput(
113
+ last_hidden_state=outputs.last_hidden_state,
114
+ attention_mask=attention_mask,
115
+ )
116
+
117
+ # Define embedder class for processing inputs and generating embeddings
118
+ class Qwen3VLEmbedder():
119
+ def __init__(self, model_name_or_path: str, max_length: int = MAX_LENGTH,
120
+ instruction: Optional[str] = None, **kwargs):
121
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
122
+ self.max_length = max_length
123
+ self.instruction = instruction or "Represent the user's input."
124
+ # Set pixel and frame configurations
125
+ self.min_pixels = kwargs.pop('min_pixels', MIN_PIXELS)
126
+ self.max_pixels = kwargs.pop('max_pixels', MAX_PIXELS)
127
+ self.total_pixels = kwargs.pop('total_pixels', MAX_TOTAL_PIXELS)
128
+ self.fps = kwargs.pop('fps', FPS)
129
+ self.num_frames = kwargs.pop('num_frames', MAX_FRAMES)
130
+ self.max_frames = kwargs.pop('max_frames', MAX_FRAMES)
131
+
132
+ # Initialize model and processor
133
+ self.model = Qwen3VLForEmbedding.from_pretrained(
134
+ model_name_or_path, trust_remote_code=True, **kwargs
135
+ ).to(device)
136
+ self.processor = Qwen3VLProcessor.from_pretrained(
137
+ model_name_or_path, padding_side='right'
138
+ )
139
+
140
+ # Define padding token id
141
+ self.model.eval() # Set model to evaluation mode
142
+
143
+ # Forward pass for the embedder model
144
+ @torch.no_grad()
145
+ def forward(self, inputs: Dict[str, Any]) -> Dict[str, torch.Tensor]:
146
+ outputs = self.model(**inputs)
147
+ return {
148
+ 'last_hidden_state': outputs.last_hidden_state,
149
+ 'attention_mask': inputs.get('attention_mask')
150
+ }
151
+
152
+ # Sample frames from video files
153
+ def _sample_frames(self, frames: List[str], num_segments: int, max_segments: int) -> List[str]:
154
+ duration = len(frames)
155
+ frame_id_array = np.linspace(0, duration - 1, num_segments, dtype=int)
156
+ frame_id_list = frame_id_array.tolist()
157
+ last_frame_id = frame_id_list[-1]
158
+
159
+ # Create a list of sampled frames
160
+ sampled_frames = []
161
+ for frame_idx in frame_id_list:
162
+ try:
163
+ sampled_frames.append(frames[frame_idx])
164
+ except:
165
+ break
166
+ # Ensure the sampled list meets the required segment count
167
+ while len(sampled_frames) < num_segments:
168
+ sampled_frames.append(frames[last_frame_id])
169
+ return sampled_frames[:max_segments]
170
+
171
+ # Truncate token sequence to a specified max length
172
+ def _truncate_tokens(self, token_ids: List[int], max_length: int) -> List[int]:
173
+ if len(token_ids) <= max_length:
174
+ return token_ids
175
+
176
+ special_token_ids = set(self.processor.tokenizer.all_special_ids)
177
+ num_special = sum(1 for token_idx in token_ids if token_idx in special_token_ids)
178
+ num_non_special_to_keep = max_length - num_special
179
+
180
+ final_token_ids = []
181
+ non_special_kept_count = 0
182
+ # Ensure retention of special tokens while truncating the rest
183
+ for token_idx in token_ids:
184
+ if token_idx in special_token_ids:
185
+ final_token_ids.append(token_idx)
186
+ elif non_special_kept_count < num_non_special_to_keep:
187
+ final_token_ids.append(token_idx)
188
+ non_special_kept_count += 1
189
+ return final_token_ids
190
+
191
+ # Format input based on provided text, image, video, and instruction
192
+ def format_model_input(self, text: Optional[str] = None,
193
+ image: Optional[Union[str, Image.Image]] = None,
194
+ video: Optional[Union[str, List[str]]] = None,
195
+ instruction: Optional[str] = None,
196
+ fps: Optional[float] = None,
197
+ max_frames: Optional[int] = None) -> List[Dict]:
198
+
199
+ # Ensure instruction ends with punctuation
200
+ if instruction:
201
+ instruction = instruction.strip()
202
+ if instruction and not unicodedata.category(instruction[-1]).startswith('P'):
203
+ instruction = instruction + '.'
204
+
205
+ # Initialize conversation with system prompts
206
+ content = []
207
+ conversation = [
208
+ {"role": "system", "content": [{"type": "text", "text": instruction or self.instruction}]},
209
+ {"role": "user", "content": content}
210
+ ]
211
+
212
+ # Add text, image, or video content to conversation
213
+ if not text and not image and not video:
214
+ content.append({'type': 'text', 'text': ""})
215
+ return conversation
216
+
217
+ if video:
218
+ video_content = None
219
+ if isinstance(video, list):
220
+ video_content = video
221
+ if self.num_frames is not None or self.max_frames is not None:
222
+ video_content = self._sample_frames(video_content, self.num_frames, self.max_frames)
223
+ video_content = ['file://' + ele for ele in video_content]
224
+ elif isinstance(video, str):
225
+ video_content = video if video.startswith(('http', 'oss')) else 'file://' + video
226
+ else:
227
+ video_content = video
228
+
229
+ # Add video input details to content
230
+ if video_content:
231
+ content.append({
232
+ 'type': 'video', 'video': video_content,
233
+ 'total_pixels': self.total_pixels,
234
+ 'max_frames': max_frames or self.max_frames,
235
+ 'fps': fps or self.fps,
236
+ 'sample_fps': fps or self.fps,
237
+ })
238
+
239
+ if image:
240
+ image_content = None
241
+ if isinstance(image, Image.Image):
242
+ image_content = image
243
+ elif isinstance(image, str):
244
+ image_content = image if image.startswith(('http', 'oss')) else 'file://' + image
245
+ else:
246
+ image_content = image
247
+
248
+ # Add image input details to content
249
+ if image_content:
250
+ content.append({
251
+ 'type': 'image', 'image': image_content,
252
+ "min_pixels": self.min_pixels,
253
+ "max_pixels": self.max_pixels
254
+ })
255
+
256
+ if text:
257
+ content.append({'type': 'text', 'text': text})
258
+
259
+ return conversation
260
+
261
+ # Preprocess input conversations for model consumption
262
+ def _preprocess_inputs(self, conversations: List[List[Dict]]) -> Dict[str, torch.Tensor]:
263
+ text = self.processor.apply_chat_template(
264
+ conversations, add_generation_prompt=True, tokenize=False
265
+ )
266
+
267
+ try:
268
+ images, video_inputs, video_kwargs = process_vision_info(
269
+ conversations, image_patch_size=16,
270
+ return_video_metadata=True, return_video_kwargs=True
271
+ )
272
+ except Exception as e:
273
+ logger.warning(f"Error in processing vision info: {e}")
274
+ images = None
275
+ video_inputs = None
276
+ video_kwargs = {'do_sample_frames': False}
277
+ text = self.processor.apply_chat_template(
278
+ [{'role': 'user', 'content': [{'type': 'text', 'text': 'NULL'}]}],
279
+ add_generation_prompt=True, tokenize=False
280
+ )
281
+
282
+ if video_inputs is not None:
283
+ videos, video_metadata = zip(*video_inputs)
284
+ videos = list(videos)
285
+ video_metadata = list(video_metadata)
286
+ else:
287
+ videos, video_metadata = None, None
288
+
289
+ inputs = self.processor(
290
+ text=text, images=images, videos=videos, video_metadata=video_metadata, truncation=True,
291
+ max_length=self.max_length, padding=True, do_resize=False, return_tensors='pt',
292
+ **video_kwargs
293
+ )
294
+ return inputs
295
+
296
+ # Pool the last hidden state by attention mask for embeddings
297
+ @staticmethod
298
+ def _pooling_last(hidden_state: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor:
299
+ flipped_tensor = attention_mask.flip(dims=[1])
300
+ last_one_positions = flipped_tensor.argmax(dim=1)
301
+ col = attention_mask.shape[1] - last_one_positions - 1
302
+ row = torch.arange(hidden_state.shape[0], device=hidden_state.device)
303
+ return hidden_state[row, col]
304
+
305
+ # Process inputs to generate normalized embeddings
306
+ def process(self, inputs: List[Dict[str, Any]], normalize: bool = True) -> tuple:
307
+ conversations = [self.format_model_input(
308
+ text=ele.get('text'),
309
+ image=ele.get('image'),
310
+ video=ele.get('video'),
311
+ instruction=ele.get('instruction'),
312
+ fps=ele.get('fps'),
313
+ max_frames=ele.get('max_frames')
314
+ ) for ele in inputs]
315
+
316
+ processed_inputs = self._preprocess_inputs(conversations)
317
+ processed_inputs = {k: v.to(self.model.device) for k, v in processed_inputs.items()}
318
+
319
+ outputs = self.forward(processed_inputs)
320
+ embeddings = self._pooling_last(outputs['last_hidden_state'], outputs['attention_mask'])
321
+
322
+ # Normalize the embeddings if specified
323
+ if normalize:
324
+ embeddings = F.normalize(embeddings, p=2, dim=-1)
325
+
326
+ return embeddings
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:def76fb086971c7867b829c23a26261e38d9d74e02139253b38aeb9df8b4b50a
3
+ size 11423705
tokenizer_config.json ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
+ "bos_token": null,
230
+ "clean_up_tokenization_spaces": false,
231
+ "eos_token": "<|im_end|>",
232
+ "errors": "replace",
233
+ "extra_special_tokens": {},
234
+ "model_max_length": 262144,
235
+ "pad_token": "<|endoftext|>",
236
+ "split_special_tokens": false,
237
+ "tokenizer_class": "Qwen2Tokenizer",
238
+ "unk_token": null
239
+ }
video_preprocessor_config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": null,
3
+ "data_format": "channels_first",
4
+ "default_to_square": true,
5
+ "device": null,
6
+ "do_center_crop": null,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "do_sample_frames": true,
12
+ "fps": 2,
13
+ "image_mean": [
14
+ 0.5,
15
+ 0.5,
16
+ 0.5
17
+ ],
18
+ "image_std": [
19
+ 0.5,
20
+ 0.5,
21
+ 0.5
22
+ ],
23
+ "input_data_format": null,
24
+ "max_frames": 768,
25
+ "merge_size": 2,
26
+ "min_frames": 4,
27
+ "num_frames": null,
28
+ "pad_size": null,
29
+ "patch_size": 16,
30
+ "processor_class": "Qwen3VLProcessor",
31
+ "resample": 3,
32
+ "rescale_factor": 0.00392156862745098,
33
+ "return_metadata": false,
34
+ "size": {
35
+ "longest_edge": 25165824,
36
+ "shortest_edge": 4096
37
+ },
38
+ "temporal_patch_size": 2,
39
+ "video_metadata": null,
40
+ "video_processor_type": "Qwen3VLVideoProcessor"
41
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff