yjj23 commited on
Commit
d04461f
·
verified ·
1 Parent(s): 0932fdf

Initial model upload

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: en
3
+ tags:
4
+ - vision-language-model
5
+ - multimodal
6
+ - vision
7
+ - qwen
8
+ - siglip
9
+ license: apache-2.0
10
+ datasets:
11
+ - cc3m
12
+ ---
13
+
14
+ # VLM Model: Qwen2.5 + SigLIP
15
+
16
+ This model combines:
17
+ - Vision encoder: google/siglip-base-patch16-224
18
+ - Language model: Qwen/Qwen2.5-0.5B-Instruct
19
+
20
+ ## Model Details
21
+
22
+ - **Developed by:** [Your name or organization]
23
+ - **Model type:** Vision-Language Model (VLM)
24
+ - **Language(s):** English
25
+ - **License:** Apache 2.0
26
+
27
+ ## Usage
28
+
29
+ ```python
30
+ from transformers import AutoProcessor, AutoTokenizer
31
+ from vlm_model import VLMConfig, VLM
32
+
33
+ # Load configuration, model, and tokenizer
34
+ config = VLMConfig.from_pretrained("your-username/vlm-qwen-siglip")
35
+ model = VLM.from_pretrained("your-username/vlm-qwen-siglip")
36
+ tokenizer = AutoTokenizer.from_pretrained("your-username/vlm-qwen-siglip")
37
+ processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224")
38
+
39
+ # Usage example with an image
40
+ from PIL import Image
41
+ import torch
42
+
43
+ # Load image
44
+ image = Image.open("path/to/image.jpg").convert("RGB")
45
+
46
+ # Process image
47
+ processor_output = processor(text=None, images=image, return_tensors="pt")
48
+ pixel_values = processor_output['pixel_values']
49
+
50
+ # Create chat input
51
+ chat = [
52
+ {"role": "system", "content": "You are a helpful assistant."},
53
+ {"role": "user", "content": "What's in this image?" + "<|image_pad|>" * config.image_pad_num}
54
+ ]
55
+
56
+ # Apply chat template
57
+ input_text = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
58
+ input_ids = tokenizer(input_text, return_tensors="pt").input_ids
59
+
60
+ # Generate response
61
+ with torch.no_grad():
62
+ generated_ids = model.generate(
63
+ input_ids=input_ids,
64
+ pixel_values=pixel_values,
65
+ max_new_tokens=200,
66
+ do_sample=True,
67
+ temperature=0.7,
68
+ top_p=0.9,
69
+ )
70
+
71
+ # Decode response
72
+ response = tokenizer.decode(generated_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
73
+ print(response)
74
+ ```
75
+
76
+ ## Training Procedure
77
+
78
+ This model was trained on [dataset details] using a custom training pipeline that:
79
+ 1. Processes images with SigLIP vision encoder
80
+ 2. Projects image features to the LLM embedding space
81
+ 3. Inserts image features at image token positions in text prompts
82
+ 4. Fine-tunes the projection layers while keeping vision and language models frozen
83
+
84
+ ## Limitations
85
+
86
+ - [List any known limitations of your model]
87
+ - [Performance characteristics]
88
+ - [Known issues]
added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "VLM"
4
+ ],
5
+ "freeze_vision_model": true,
6
+ "image_pad_num": 49,
7
+ "llm_model_id": "Qwen/Qwen2.5-0.5B-Instruct",
8
+ "model_type": "vlm_model",
9
+ "torch_dtype": "float32",
10
+ "transformers_version": "4.51.2",
11
+ "vision_model_id": "google/siglip-base-patch16-224"
12
+ }
example.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from vlm_model import VLMConfig, VLM
2
+ from transformers import AutoProcessor, AutoTokenizer
3
+ from PIL import Image
4
+ import torch
5
+
6
+ # Load model and tokenizers
7
+ config = VLMConfig.from_pretrained("YOUR_USERNAME/vlm-qwen-siglip")
8
+ model = VLM.from_pretrained("YOUR_USERNAME/vlm-qwen-siglip")
9
+ tokenizer = AutoTokenizer.from_pretrained("YOUR_USERNAME/vlm-qwen-siglip")
10
+ processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224")
11
+
12
+ # Load image
13
+ image = Image.open("your_image.jpg").convert("RGB")
14
+ processor_output = processor(text=None, images=image, return_tensors="pt")
15
+ pixel_values = processor_output['pixel_values']
16
+
17
+ # Create input with image placeholder
18
+ chat = [
19
+ {"role": "system", "content": "You are a helpful assistant."},
20
+ {"role": "user", "content": f"What's in this image?{('<|image_pad|>' * config.image_pad_num)}"}
21
+ ]
22
+ input_text = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
23
+ input_ids = tokenizer(input_text, return_tensors="pt").input_ids
24
+
25
+ # Generate response
26
+ with torch.no_grad():
27
+ generated_ids = model.generate(
28
+ input_ids=input_ids,
29
+ pixel_values=pixel_values,
30
+ max_new_tokens=200,
31
+ do_sample=True,
32
+ temperature=0.7,
33
+ top_p=0.9,
34
+ )
35
+
36
+ # Decode response
37
+ response = tokenizer.decode(generated_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
38
+ print(response)
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a35ba549cd40c52fff2a7376cbee1b545f16ce0756df42e0591c6c8673e7f11
3
+ size 2803230786
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ transformers>=4.35.0
2
+ torch>=2.0.0
3
+ pillow>=9.0.0
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
3
+ size 11421896
tokenizer_config.json ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|im_end|>",
201
+ "errors": "replace",
202
+ "extra_special_tokens": {},
203
+ "model_max_length": 131072,
204
+ "pad_token": "<|endoftext|>",
205
+ "split_special_tokens": false,
206
+ "tokenizer_class": "Qwen2Tokenizer",
207
+ "unk_token": null
208
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94bb7a6bce525b1ac9a3402c658f0ad0bb7971ce8f7c676d0dcb804c19587b5b
3
+ size 5304
vlm_model/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .model import VLMConfig, VLM
vlm_model/model.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from transformers import PreTrainedModel, PretrainedConfig, AutoTokenizer, AutoModelForCausalLM
5
+ from transformers import AutoProcessor, AutoModel
6
+ from transformers.modeling_outputs import CausalLMOutputWithPast
7
+
8
+ # VLM Config and Model classes
9
+ class VLMConfig(PretrainedConfig):
10
+ model_type = "vlm_model"
11
+ def __init__(self,
12
+ llm_model_id = "Qwen/Qwen2.5-0.5B-Instruct",
13
+ vision_model_id = "google/siglip-base-patch16-224",
14
+ freeze_vision_model = True,
15
+ image_pad_num = 49,
16
+ **kwargs):
17
+ self.vision_model_id = vision_model_id
18
+ self.llm_model_id = llm_model_id
19
+ self.freeze_vision_model = freeze_vision_model
20
+ self.image_pad_num = image_pad_num
21
+ super().__init__(**kwargs)
22
+
23
+ class VLM(PreTrainedModel):
24
+ config_class = VLMConfig
25
+ def __init__(self, config):
26
+ super().__init__(config)
27
+ self.config = config
28
+
29
+ # Load models from Hugging Face
30
+ self.vision_model = AutoModel.from_pretrained(self.config.vision_model_id)
31
+ self.processor = AutoProcessor.from_pretrained(self.config.vision_model_id)
32
+ self.llm_model = AutoModelForCausalLM.from_pretrained(self.config.llm_model_id)
33
+ self.tokenizer = AutoTokenizer.from_pretrained(self.config.llm_model_id)
34
+
35
+ # Projection layers
36
+ self.linear1 = nn.Linear(self.vision_model.config.vision_config.hidden_size*4, self.llm_model.config.hidden_size)
37
+ self.linear2 = nn.Linear(self.llm_model.config.hidden_size, self.llm_model.config.hidden_size)
38
+
39
+ # Freeze models
40
+ if self.config.freeze_vision_model:
41
+ for param in self.vision_model.parameters():
42
+ param.requires_grad = False
43
+
44
+ for param in self.llm_model.parameters():
45
+ param.requires_grad = False
46
+
47
+ def forward(self, input_ids, labels=None, pixel_values=None, attention_mask=None):
48
+ # Get text embeddings
49
+ text_embeds = self.llm_model.get_input_embeddings()(input_ids)
50
+
51
+ if pixel_values is not None:
52
+ # Ensure pixel_values has the right shape [batch_size, channels, height, width]
53
+ if len(pixel_values.shape) == 3:
54
+ pixel_values = pixel_values.unsqueeze(0)
55
+ # Handle case where pixel_values might have extra dimensions
56
+ elif len(pixel_values.shape) > 4:
57
+ # Reshape to expected 4D format (assuming first dim is batch)
58
+ b = pixel_values.shape[0]
59
+ pixel_values = pixel_values.view(b, 3, 224, 224) # Assuming standard 224x224 image size
60
+
61
+ # Get image embeddings
62
+ image_embeds = self.vision_model.vision_model(pixel_values).last_hidden_state
63
+ b, s, d = image_embeds.shape
64
+ # Compress image tokens
65
+ image_embeds = image_embeds.view(b, -1, d*4)
66
+ image_features = self.linear2(F.silu(self.linear1(image_embeds)))
67
+
68
+ # Match dtype
69
+ text_embeds = text_embeds.to(image_features.dtype)
70
+
71
+ # Merge embeddings
72
+ inputs_embeds = self.merge_input_ids_with_image_features(image_features, text_embeds, input_ids)
73
+ else:
74
+ inputs_embeds = text_embeds
75
+
76
+ # Forward pass
77
+ outputs = self.llm_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask)
78
+ logits = outputs[0]
79
+
80
+ # Calculate loss
81
+ loss = None
82
+ if labels is not None:
83
+ loss_fct = nn.CrossEntropyLoss(ignore_index=self.tokenizer.pad_token_id)
84
+ loss = loss_fct(
85
+ logits.view(-1, logits.size(-1)), labels.view(-1).to(logits.device)
86
+ )
87
+ return CausalLMOutputWithPast(loss=loss, logits=logits)
88
+
89
+ def merge_input_ids_with_image_features(self, image_features, inputs_embeds, input_ids):
90
+ # Replace image placeholder tokens with image features
91
+ batch_indices, image_indices = torch.where(input_ids == self.tokenizer('<|image_pad|>')['input_ids'][0])
92
+
93
+ # Handle the case of multiple batches with multiple images
94
+ if len(batch_indices) > 0:
95
+ num_images, num_image_patches, embed_dim = image_features.shape
96
+ # Group indices by batch
97
+ for b_idx in range(input_ids.shape[0]):
98
+ batch_mask = (batch_indices == b_idx)
99
+ if batch_mask.sum() > 0:
100
+ # Get indices for this batch
101
+ img_indices = image_indices[batch_mask]
102
+ # Replace tokens with image features
103
+ img_idx = min(b_idx, num_images-1) # Prevent out of bounds
104
+ inputs_embeds[b_idx, img_indices] = image_features[img_idx].repeat(len(img_indices), 1)[:len(img_indices)]
105
+
106
+ return inputs_embeds
107
+
108
+ def generate(self, input_ids=None, pixel_values=None, attention_mask=None, **kwargs):
109
+ # Process the input just like in forward pass
110
+ text_embeds = self.llm_model.get_input_embeddings()(input_ids)
111
+
112
+ if pixel_values is not None:
113
+ # Ensure pixel_values has the right shape
114
+ if len(pixel_values.shape) == 3:
115
+ pixel_values = pixel_values.unsqueeze(0)
116
+ elif len(pixel_values.shape) > 4:
117
+ b = pixel_values.shape[0]
118
+ pixel_values = pixel_values.view(b, 3, 224, 224)
119
+
120
+ # Get image embeddings and project
121
+ image_embeds = self.vision_model.vision_model(pixel_values).last_hidden_state
122
+ b, s, d = image_embeds.shape
123
+ image_embeds = image_embeds.view(b, -1, d*4)
124
+ image_features = self.linear2(F.silu(self.linear1(image_embeds)))
125
+
126
+ # Match dtype
127
+ text_embeds = text_embeds.to(image_features.dtype)
128
+
129
+ # Merge embeddings
130
+ inputs_embeds = self.merge_input_ids_with_image_features(image_features, text_embeds, input_ids)
131
+ else:
132
+ inputs_embeds = text_embeds
133
+
134
+ # Use the LLM's generate method with the processed inputs
135
+ return self.llm_model.generate(
136
+ inputs_embeds=inputs_embeds,
137
+ attention_mask=attention_mask,
138
+ **kwargs
139
+ )
vocab.json ADDED
The diff for this file is too large to render. See raw diff