Fahad-S commited on
Commit
b25349d
·
verified ·
1 Parent(s): c83c97e

Upload A_MobileO_With_Edit/llava_arch.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. A_MobileO_With_Edit/llava_arch.py +486 -0
A_MobileO_With_Edit/llava_arch.py ADDED
@@ -0,0 +1,486 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Haotian Liu
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from abc import ABC, abstractmethod
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+ from diffusers.models.embeddings import PixArtAlphaTextProjection
22
+
23
+ from .mobile_block import MobileConditioningProjector
24
+ from .multimodal_llava_encoder.builder import build_vision_tower
25
+ from .multimodal_llava_projector.builder import build_vision_projector
26
+ from .multimodal_projector.builder import build_down_projector
27
+ from .multimodal_decoder.builder import build_vae, build_sana
28
+ from diffusers import FlowMatchEulerDiscreteScheduler, DPMSolverMultistepScheduler
29
+ from diffusers.models.normalization import RMSNorm
30
+ import math
31
+
32
+ from blip3o.constants import DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, DEFAULT_IMAGE_PATCH_TOKEN, IGNORE_INDEX, IMAGE_TOKEN_INDEX
33
+
34
+
35
+ class DiffusionConnector(nn.Module):
36
+ def __init__(self, input_dim=896, hidden_dim=1024, output_dim=2304, eps=1e-5):
37
+ super().__init__()
38
+ self.linear1 = nn.Linear(input_dim, hidden_dim)
39
+ self.act = nn.GELU(approximate="tanh")
40
+ self.linear2 = nn.Linear(hidden_dim, output_dim)
41
+ self.norm = RMSNorm(output_dim, eps=eps, elementwise_affine=True)
42
+
43
+ nn.init.xavier_uniform_(self.linear1.weight)
44
+ nn.init.zeros_(self.linear1.bias)
45
+ nn.init.xavier_uniform_(self.linear2.weight)
46
+ nn.init.zeros_(self.linear2.bias)
47
+ with torch.no_grad():
48
+ self.norm.weight.fill_(math.sqrt(5.5))
49
+
50
+ def forward(self, x):
51
+ x = self.linear1(x)
52
+ x = self.act(x)
53
+ x = self.linear2(x)
54
+ x = self.norm(x)
55
+ return x
56
+
57
+
58
+ class LlavaMetaModel:
59
+
60
+ def __init__(self, config):
61
+ super(LlavaMetaModel, self).__init__(config)
62
+
63
+ if hasattr(config, "mm_vision_tower"):
64
+ self.vision_tower = build_vision_tower(config, delay_load=True)
65
+ self.mm_projector = build_vision_projector(config)
66
+ if hasattr(config, "diffusion_name_or_path"):
67
+ self.dit = build_sana(config)
68
+ self.vae = build_vae(config)
69
+ #self.diffusion_connector = DiffusionConnector(input_dim=self.config.hidden_size,hidden_dim=1024,output_dim=2304)
70
+ self.diffusion_connector = MobileConditioningProjector(input_dim=896, hidden_dim=512, output_dim=2304, num_layers=config.vlm_num_layers)
71
+
72
+ '''
73
+ norm = RMSNorm(896, eps=1e-5, elementwise_affine=True)
74
+ with torch.no_grad():
75
+ norm.weight.fill_(math.sqrt(5.5))
76
+ self.diffusion_connector = nn.Sequential(
77
+ nn.Linear(config.hidden_size, 896),
78
+ nn.GELU(approximate="tanh"),
79
+ nn.Linear(896, 896),
80
+ norm,
81
+ )
82
+ '''
83
+ if hasattr(config, "is_train"):
84
+ if config.is_train:
85
+ print("FLOW MATCHING !!")
86
+ self.noise_scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(config.diffusion_name_or_path, subfolder="scheduler")
87
+ else:
88
+ print("DPM SOLVER !!")
89
+ self.noise_scheduler = DPMSolverMultistepScheduler.from_pretrained(config.diffusion_name_or_path, subfolder="scheduler")
90
+ else:
91
+ print("FLOW MATCHING !!")
92
+ self.noise_scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(config.diffusion_name_or_path, subfolder="scheduler")
93
+
94
+
95
+
96
+ def get_vision_tower(self):
97
+ vision_tower = getattr(self, 'vision_tower', None)
98
+ if type(vision_tower) is list:
99
+ vision_tower = vision_tower[0]
100
+ return vision_tower
101
+
102
+ def get_sana(self):
103
+ dit = getattr(self, 'dit', None)
104
+ if type(dit) is list:
105
+ dit = dit[0]
106
+ if dit is not None:
107
+ dit.to(self.device)
108
+ return dit
109
+
110
+ def get_sana_vae(self):
111
+ vae = getattr(self, 'vae', None)
112
+ if type(vae) is list:
113
+ vae = vae[0]
114
+ if vae is not None:
115
+ vae.to(self.device)
116
+ return vae
117
+
118
+ def initialize_vision_modules(self, model_args, fsdp=None):
119
+ mm_vision_select_layer = model_args.mm_vision_select_layer
120
+ mm_vision_select_feature = model_args.mm_vision_select_feature
121
+ mm_patch_merge_type = model_args.mm_patch_merge_type
122
+
123
+
124
+ if self.get_sana() is None:
125
+ dit = build_sana(model_args)
126
+ if hasattr(model_args, "is_train"):
127
+ if model_args.is_train:
128
+ print("FLOW MATCHING !!")
129
+ self.noise_scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(model_args.diffusion_name_or_path, subfolder="scheduler")
130
+ else:
131
+ print("DPM SOLVER !!")
132
+ self.noise_scheduler = DPMSolverMultistepScheduler.from_pretrained(model_args.diffusion_name_or_path, subfolder="scheduler")
133
+
134
+ if fsdp is not None and len(fsdp) > 0:
135
+ self.dit = [dit]
136
+ else:
137
+ self.dit = dit
138
+ else:
139
+ if fsdp is not None and len(fsdp) > 0:
140
+ dit = self.dit[0]
141
+ else:
142
+ dit = self.dit
143
+ for p in dit.parameters():
144
+ p.requires_grad = False
145
+
146
+ if self.get_sana_vae() is None:
147
+ vae = build_vae(model_args)
148
+
149
+ if fsdp is not None and len(fsdp) > 0:
150
+ self.vae = [vae]
151
+ else:
152
+ self.vae = vae
153
+ else:
154
+ if fsdp is not None and len(fsdp) > 0:
155
+ vae = self.vae[0]
156
+ else:
157
+ vae = self.vae
158
+ for p in vae.parameters():
159
+ p.requires_grad = False
160
+
161
+
162
+ if self.get_vision_tower() is None:
163
+ print("=" * 20, "Building vision tower", "=" * 20)
164
+ vision_tower = build_vision_tower(model_args)
165
+
166
+
167
+ if fsdp is not None and len(fsdp) > 0:
168
+ self.vision_tower = [vision_tower]
169
+ else:
170
+ self.vision_tower = vision_tower
171
+ else:
172
+ if fsdp is not None and len(fsdp) > 0:
173
+ vision_tower = self.vision_tower[0]
174
+ else:
175
+ vision_tower = self.vision_tower
176
+ vision_tower.load_model()
177
+
178
+
179
+ if getattr(self, 'diffusion_connector', None) is None:
180
+ #self.diffusion_connector = DiffusionConnector(input_dim=self.config.hidden_size,hidden_dim=1024,output_dim=2304)
181
+ self.diffusion_connector = MobileConditioningProjector(input_dim=896, hidden_dim=512, output_dim=2304, num_layers=model_args.vlm_num_layers)
182
+
183
+
184
+ '''
185
+ norm = RMSNorm(2304, eps=1e-5, elementwise_affine=True)
186
+ with torch.no_grad():
187
+ norm.weight.fill_(math.sqrt(5.5))
188
+ self.diffusion_connector = nn.Sequential(
189
+ nn.Linear(self.config.hidden_size, 1024),
190
+ nn.GELU(approximate="tanh"),
191
+ nn.Linear(1024, 2304),
192
+ norm,
193
+ )
194
+ '''
195
+ else:
196
+ for p in self.diffusion_connector.parameters():
197
+ p.requires_grad = True
198
+
199
+ # freeze all parameters in dit except for caption_projection
200
+ for name, param in self.dit.named_parameters():
201
+ if "caption" in name:
202
+ param.requires_grad = True
203
+ else:
204
+ param.requires_grad = False
205
+
206
+
207
+ for p in dit.parameters():
208
+ p.requires_grad = True
209
+ for p in vision_tower.parameters():
210
+ p.requires_grad = True
211
+
212
+ self.config.use_mm_proj = True
213
+ self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear')
214
+ self.config.mm_vision_select_layer = mm_vision_select_layer
215
+ self.config.mm_vision_select_feature = mm_vision_select_feature
216
+ self.config.mm_patch_merge_type = mm_patch_merge_type
217
+ self.config.diffusion_name_or_path = model_args.diffusion_name_or_path
218
+ self.config.is_train = False #model_args.is_train
219
+
220
+ if getattr(self, 'down_projector', None) is None:
221
+ self.down_projector = build_down_projector(self.config)
222
+ else:
223
+ # In case it is frozen by LoRA
224
+ for p in self.down_projector.parameters():
225
+ p.requires_grad = True
226
+
227
+
228
+
229
+
230
+
231
+
232
+ def unpad_image(tensor, original_size):
233
+ """
234
+ Unpads a PyTorch tensor of a padded and resized image.
235
+
236
+ Args:
237
+ tensor (torch.Tensor): The image tensor, assumed to be in CxHxW format.
238
+ original_size (tuple): The original size of PIL image (width, height).
239
+
240
+ Returns:
241
+ torch.Tensor: The unpadded image tensor.
242
+ """
243
+ original_width, original_height = original_size
244
+ current_height, current_width = tensor.shape[1:]
245
+
246
+ original_aspect_ratio = original_width / original_height
247
+ current_aspect_ratio = current_width / current_height
248
+
249
+ if original_aspect_ratio > current_aspect_ratio:
250
+ scale_factor = current_width / original_width
251
+ new_height = int(original_height * scale_factor)
252
+ padding = (current_height - new_height) // 2
253
+ unpadded_tensor = tensor[:, padding:current_height - padding, :]
254
+ else:
255
+ scale_factor = current_height / original_height
256
+ new_width = int(original_width * scale_factor)
257
+ padding = (current_width - new_width) // 2
258
+ unpadded_tensor = tensor[:, :, padding:current_width - padding]
259
+
260
+ return unpadded_tensor
261
+
262
+
263
+ class LlavaMetaForCausalLM(ABC):
264
+
265
+ @abstractmethod
266
+ def get_model(self):
267
+ pass
268
+
269
+ def get_vision_tower(self):
270
+ return self.get_model().get_vision_tower()
271
+
272
+ def visual(self, pixel_values: torch.Tensor) -> torch.Tensor:
273
+ image_features = self.get_model().get_vision_tower()(pixel_values)
274
+ image_features = self.get_model().mm_projector(image_features)
275
+ return image_features
276
+
277
+
278
+ def get_mm_projector(self):
279
+ return self.get_model().mm_projector
280
+
281
+
282
+ def get_sigmas(self, timesteps, device, n_dim=4, dtype=torch.float32):
283
+ sigmas = self.get_model().noise_scheduler.sigmas.to(device=device, dtype=dtype)
284
+ schedule_timesteps = self.get_model().noise_scheduler.timesteps.to(device=device)
285
+ timesteps = timesteps.to(device)
286
+ step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps]
287
+
288
+ sigma = sigmas[step_indices].flatten()
289
+ while len(sigma.shape) < n_dim:
290
+ sigma = sigma.unsqueeze(-1)
291
+ return sigma
292
+
293
+ def mask_drop(self, latents, drop_prob=0.1):
294
+ if drop_prob <= 0:
295
+ return latents
296
+ mask = torch.bernoulli(torch.zeros(latents.shape[0], device=latents.device, dtype=latents.dtype) + drop_prob)
297
+ while len(mask.shape) < len(latents.shape):
298
+ mask = mask.unsqueeze(-1)
299
+ mask = 1 - mask # need to flip 0 <-> 1
300
+ return latents * mask
301
+
302
+ def prepare_inputs_labels_for_multimodal(
303
+ self, input_ids, position_ids, attention_mask, past_key_values, labels,
304
+ gen_images=None, und_images=None
305
+ ):
306
+ if (gen_images is None and und_images is None) or input_ids.shape[1] == 1 or self.get_vision_tower() is None:
307
+ return input_ids, position_ids, attention_mask, past_key_values, None, labels, None, None, None
308
+ if gen_images is not None:
309
+ vae = self.get_model().get_sana_vae()
310
+ vae_device = vae.device
311
+ prompt_image_embeds = vae.encode(gen_images.to(vae_device)).latent if gen_images is not None else None
312
+ prompt_image_embeds = prompt_image_embeds * vae.config.scaling_factor if prompt_image_embeds is not None else None
313
+ target_image_embeds = torch.clone(prompt_image_embeds).detach()
314
+ else:
315
+ target_image_embeds = None
316
+
317
+
318
+ images = und_images
319
+ if type(images) is list or images.ndim == 5:
320
+ if type(images) is list:
321
+ images = [x.unsqueeze(0) if x.ndim == 3 else x for x in images]
322
+ concat_images = torch.cat([image for image in images], dim=0)
323
+ image_features = self.visual(concat_images)
324
+ split_sizes = [image.shape[0] for image in images]
325
+ image_features = torch.split(image_features, split_sizes, dim=0)
326
+ image_features = [x.flatten(0, 1) for x in image_features]
327
+ else:
328
+ image_features = self.visual(images) # [B, image_tokens, hidden_size]
329
+
330
+
331
+ # Let's just add dummy tensors if they do not exist,
332
+ # it is a headache to deal with None all the time.
333
+ # But it is not ideal, and if you have a better idea,
334
+ # please open an issue / submit a PR, thanks.
335
+ _labels = labels
336
+ _position_ids = position_ids
337
+ _attention_mask = attention_mask
338
+ if attention_mask is None:
339
+ attention_mask = torch.ones_like(input_ids, dtype=torch.bool)
340
+ else:
341
+ attention_mask = attention_mask.bool()
342
+ if position_ids is None:
343
+ position_ids = torch.arange(0, input_ids.shape[1], dtype=torch.long, device=input_ids.device)
344
+ if labels is None:
345
+ labels = torch.full_like(input_ids, IGNORE_INDEX)
346
+
347
+ # remove the padding using attention_mask -- FIXME
348
+ input_ids = [cur_input_ids[cur_attention_mask] for cur_input_ids, cur_attention_mask in zip(input_ids, attention_mask)]
349
+ labels = [cur_labels[cur_attention_mask] for cur_labels, cur_attention_mask in zip(labels, attention_mask)]
350
+
351
+ new_input_embeds = []
352
+ new_labels = []
353
+ new_input_ids = []
354
+ cur_image_idx = 0
355
+ for batch_idx, cur_input_ids in enumerate(input_ids):
356
+ num_images = (cur_input_ids == IMAGE_TOKEN_INDEX).sum()
357
+ if num_images == 0:
358
+ cur_image_features = image_features[cur_image_idx]
359
+ cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids)
360
+ cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0]], dim=0)
361
+ new_input_embeds.append(cur_input_embeds)
362
+ new_labels.append(labels[batch_idx])
363
+ cur_image_idx += 1
364
+ continue
365
+ image_token_indices = [-1] + torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0].tolist() + [cur_input_ids.shape[0]]
366
+ cur_input_ids_noim = []
367
+ cur_labels = labels[batch_idx]
368
+ cur_labels_noim = []
369
+ for i in range(len(image_token_indices) - 1):
370
+ cur_input_ids_noim.append(cur_input_ids[image_token_indices[i]+1:image_token_indices[i+1]])
371
+ cur_labels_noim.append(cur_labels[image_token_indices[i]+1:image_token_indices[i+1]])
372
+ split_sizes = [x.shape[0] for x in cur_labels_noim]
373
+ cur_input_embeds = self.get_model().embed_tokens(torch.cat(cur_input_ids_noim))
374
+ cur_input_embeds_no_im = torch.split(cur_input_embeds, split_sizes, dim=0)
375
+ cur_new_input_embeds = []
376
+ cur_new_labels = []
377
+ cur_new_input_ids = []
378
+
379
+ for i in range(num_images + 1):
380
+ cur_new_input_embeds.append(cur_input_embeds_no_im[i])
381
+ cur_new_labels.append(cur_labels_noim[i])
382
+ cur_new_input_ids.append(cur_input_ids_noim[i])
383
+ if i < num_images:
384
+ if cur_image_idx < image_features.shape[0]:
385
+ cur_image_features = image_features[cur_image_idx]
386
+ else:
387
+ cur_image_features = image_features[-1]
388
+ cur_image_idx += 1
389
+ cur_new_input_embeds.append(cur_image_features)
390
+ cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=cur_labels.device, dtype=cur_labels.dtype))
391
+ cur_new_input_ids.append(torch.full((cur_image_features.shape[0],), IMAGE_TOKEN_INDEX, device=cur_labels.device, dtype=cur_labels.dtype))
392
+
393
+ cur_new_input_embeds = [x.to(self.device) for x in cur_new_input_embeds]
394
+
395
+ cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)
396
+ cur_new_labels = torch.cat(cur_new_labels, dim=0)
397
+ cur_new_input_ids = torch.cat(cur_new_input_ids, dim=0)
398
+
399
+ new_input_embeds.append(cur_new_input_embeds)
400
+ new_labels.append(cur_new_labels)
401
+ new_input_ids.append(cur_new_input_ids)
402
+
403
+ # Combine them
404
+ max_len = max(x.shape[0] for x in new_input_embeds)
405
+ batch_size = len(new_input_embeds)
406
+
407
+ new_input_embeds_padded = []
408
+ new_labels_padded = torch.full((batch_size, max_len), IGNORE_INDEX, dtype=new_labels[0].dtype, device=new_labels[0].device)
409
+ attention_mask = torch.zeros((batch_size, max_len), dtype=attention_mask.dtype, device=attention_mask.device)
410
+ position_ids = torch.zeros((batch_size, max_len), dtype=position_ids.dtype, device=position_ids.device)
411
+ new_input_ids_padded = torch.full((batch_size, max_len), -300, dtype=new_input_ids[0].dtype, device=new_input_ids[0].device) if len(new_input_ids) > 0 else None
412
+
413
+
414
+ for i, (cur_new_embed, cur_new_labels, cur_new_input_ids) in enumerate(zip(new_input_embeds, new_labels, new_input_ids)):
415
+ cur_len = cur_new_embed.shape[0]
416
+ new_input_embeds_padded.append(torch.cat((
417
+ cur_new_embed,
418
+ torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)
419
+ ), dim=0))
420
+ if cur_len > 0:
421
+ new_labels_padded[i, :cur_len] = cur_new_labels
422
+ attention_mask[i, :cur_len] = True
423
+ position_ids[i, :cur_len] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device)
424
+ new_input_ids_padded[i, :cur_len] = cur_new_input_ids
425
+
426
+ new_input_embeds = torch.stack(new_input_embeds_padded, dim=0)
427
+
428
+ if _labels is None:
429
+ new_labels = None
430
+ else:
431
+ new_labels = new_labels_padded
432
+
433
+ if _attention_mask is None:
434
+ attention_mask = None
435
+ else:
436
+ attention_mask = attention_mask.to(dtype=_attention_mask.dtype)
437
+
438
+ if _position_ids is None:
439
+ position_ids = None
440
+
441
+ return None, position_ids, attention_mask, past_key_values, new_input_embeds, new_labels, target_image_embeds
442
+
443
+
444
+ def initialize_vision_tokenizer(self, model_args, tokenizer):
445
+ if model_args.mm_use_im_patch_token:
446
+ tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
447
+ self.resize_token_embeddings(len(tokenizer))
448
+
449
+ if model_args.mm_use_im_start_end:
450
+ num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
451
+ self.resize_token_embeddings(len(tokenizer))
452
+
453
+ if num_new_tokens > 0:
454
+ input_embeddings = self.get_input_embeddings().weight.data
455
+ output_embeddings = self.get_output_embeddings().weight.data
456
+
457
+ input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(
458
+ dim=0, keepdim=True)
459
+ output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(
460
+ dim=0, keepdim=True)
461
+
462
+ input_embeddings[-num_new_tokens:] = input_embeddings_avg
463
+ output_embeddings[-num_new_tokens:] = output_embeddings_avg
464
+
465
+ if model_args.tune_mm_mlp_adapter:
466
+ for p in self.get_input_embeddings().parameters():
467
+ p.requires_grad = True
468
+ for p in self.get_output_embeddings().parameters():
469
+ p.requires_grad = False
470
+
471
+ if model_args.pretrain_mm_mlp_adapter:
472
+ mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu')
473
+ embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight']
474
+ assert num_new_tokens == 2
475
+ if input_embeddings.shape == embed_tokens_weight.shape:
476
+ input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]
477
+ elif embed_tokens_weight.shape[0] == num_new_tokens:
478
+ input_embeddings[-num_new_tokens:] = embed_tokens_weight
479
+ else:
480
+ raise ValueError(f"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.")
481
+ elif model_args.mm_use_im_patch_token:
482
+ if model_args.tune_mm_mlp_adapter:
483
+ for p in self.get_input_embeddings().parameters():
484
+ p.requires_grad = False
485
+ for p in self.get_output_embeddings().parameters():
486
+ p.requires_grad = False