Fahad-S commited on
Commit
b68c324
·
verified ·
1 Parent(s): e07a694

Upload teacher_code/llava_arch.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. teacher_code/llava_arch.py +434 -0
teacher_code/llava_arch.py ADDED
@@ -0,0 +1,434 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Haotian Liu
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from abc import ABC, abstractmethod
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+ from diffusers.models.embeddings import PixArtAlphaTextProjection
22
+
23
+ from .multimodal_llava_encoder.builder import build_vision_tower
24
+ from .multimodal_llava_projector.builder import build_vision_projector
25
+ from .multimodal_projector.builder import build_down_projector
26
+ from .multimodal_decoder.builder import build_vae, build_sana
27
+ from diffusers import FlowMatchEulerDiscreteScheduler
28
+ from diffusers.models.normalization import RMSNorm
29
+ import math
30
+
31
+ from blip3o.constants import DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, IMAGE_TOKEN_IDX, UND_IMAGE_TOKEN_IDX, DEFAULT_IMAGE_PATCH_TOKEN
32
+
33
+
34
+ class DiffusionConnector(nn.Module):
35
+ def __init__(self, input_dim=896, hidden_dim=1024, output_dim=2304, eps=1e-5):
36
+ super().__init__()
37
+ self.linear1 = nn.Linear(input_dim, hidden_dim)
38
+ self.act = nn.GELU(approximate="tanh")
39
+ self.linear2 = nn.Linear(hidden_dim, output_dim)
40
+ self.norm = RMSNorm(output_dim, eps=eps, elementwise_affine=True)
41
+
42
+ nn.init.xavier_uniform_(self.linear1.weight)
43
+ nn.init.zeros_(self.linear1.bias)
44
+ nn.init.xavier_uniform_(self.linear2.weight)
45
+ nn.init.zeros_(self.linear2.bias)
46
+ with torch.no_grad():
47
+ self.norm.weight.fill_(math.sqrt(5.5))
48
+
49
+ def forward(self, x):
50
+ x = self.linear1(x)
51
+ x = self.act(x)
52
+ x = self.linear2(x)
53
+ x = self.norm(x)
54
+ return x
55
+
56
+
57
+ class LlavaMetaModel:
58
+
59
+ def __init__(self, config):
60
+ super(LlavaMetaModel, self).__init__(config)
61
+
62
+ if hasattr(config, "mm_vision_tower"):
63
+ self.vision_tower = build_vision_tower(config, delay_load=True)
64
+ self.mm_projector = build_vision_projector(config)
65
+ if hasattr(config, "diffusion_name_or_path"):
66
+ self.dit, _, self.noise_scheduler, _ = build_sana(config, load_teacher=False)
67
+ self.vae = build_vae(config)
68
+ self.diffusion_connector = DiffusionConnector(input_dim=self.config.hidden_size,hidden_dim=1024,output_dim=2304)
69
+ '''
70
+ norm = RMSNorm(896, eps=1e-5, elementwise_affine=True)
71
+ with torch.no_grad():
72
+ norm.weight.fill_(math.sqrt(5.5))
73
+ self.diffusion_connector = nn.Sequential(
74
+ nn.Linear(config.hidden_size, 896),
75
+ nn.GELU(approximate="tanh"),
76
+ nn.Linear(896, 896),
77
+ norm,
78
+ )
79
+ '''
80
+ self.latent_queries = nn.Parameter(torch.randn(1, self.config.n_query, self.config.hidden_size))
81
+
82
+
83
+
84
+ def get_vision_tower(self):
85
+ vision_tower = getattr(self, 'vision_tower', None)
86
+ if type(vision_tower) is list:
87
+ vision_tower = vision_tower[0]
88
+ return vision_tower
89
+
90
+ def get_sana(self):
91
+ dit = getattr(self, 'dit', None)
92
+ if type(dit) is list:
93
+ dit = dit[0]
94
+ if dit is not None:
95
+ dit.to(self.device)
96
+ return dit
97
+
98
+ def get_sana_vae(self):
99
+ vae = getattr(self, 'vae', None)
100
+ if type(vae) is list:
101
+ vae = vae[0]
102
+ if vae is not None:
103
+ vae.to(self.device)
104
+ return vae
105
+
106
+ def initialize_vision_modules(self, model_args, fsdp=None):
107
+ vision_tower = model_args.vision_tower
108
+ mm_vision_select_layer = model_args.mm_vision_select_layer
109
+ mm_vision_select_feature = model_args.mm_vision_select_feature
110
+ mm_patch_merge_type = model_args.mm_patch_merge_type
111
+
112
+ self.config.mm_vision_tower = vision_tower
113
+ self.config.vision_tower_pretrained = getattr(model_args, "vision_tower_pretrained", "")
114
+
115
+ if self.get_sana() is None:
116
+ dit, self.dit_teacher, self.noise_scheduler, self.text_encoder = build_sana(model_args, device=self.device)
117
+
118
+ if fsdp is not None and len(fsdp) > 0:
119
+ self.dit = [dit]
120
+ else:
121
+ self.dit = dit
122
+ else:
123
+ if fsdp is not None and len(fsdp) > 0:
124
+ dit = self.dit[0]
125
+ else:
126
+ dit = self.dit
127
+ # dit_teacher = self.dit_teacher
128
+ for p in self.text_encoder.parameters():
129
+ p.requires_grad = False
130
+
131
+ for p in self.dit_teacher.parameters():
132
+ p.requires_grad = False
133
+
134
+ if self.get_sana_vae() is None:
135
+ vae = build_vae(model_args)
136
+
137
+ if fsdp is not None and len(fsdp) > 0:
138
+ self.vae = [vae]
139
+ else:
140
+ self.vae = vae
141
+ else:
142
+ if fsdp is not None and len(fsdp) > 0:
143
+ vae = self.vae[0]
144
+ else:
145
+ vae = self.vae
146
+ for p in vae.parameters():
147
+ p.requires_grad = False
148
+
149
+
150
+ if self.get_vision_tower() is None:
151
+ print("=" * 20, "Building vision tower", "=" * 20)
152
+ vision_tower = build_vision_tower(model_args)
153
+
154
+
155
+ if fsdp is not None and len(fsdp) > 0:
156
+ self.vision_tower = [vision_tower]
157
+ else:
158
+ self.vision_tower = vision_tower
159
+ else:
160
+ if fsdp is not None and len(fsdp) > 0:
161
+ vision_tower = self.vision_tower[0]
162
+ else:
163
+ vision_tower = self.vision_tower
164
+ vision_tower.load_model()
165
+
166
+
167
+ if getattr(self, 'diffusion_connector', None) is None:
168
+ self.diffusion_connector = DiffusionConnector(input_dim=self.config.hidden_size,hidden_dim=1024,output_dim=2304)
169
+
170
+ '''
171
+ norm = RMSNorm(2304, eps=1e-5, elementwise_affine=True)
172
+ with torch.no_grad():
173
+ norm.weight.fill_(math.sqrt(5.5))
174
+ self.diffusion_connector = nn.Sequential(
175
+ nn.Linear(self.config.hidden_size, 1024),
176
+ nn.GELU(approximate="tanh"),
177
+ nn.Linear(1024, 2304),
178
+ norm,
179
+ )
180
+ '''
181
+ else:
182
+ for p in self.diffusion_connector.parameters():
183
+ p.requires_grad = True
184
+
185
+ for p in self.diffusion_connector_teacher.parameters():
186
+ p.requires_grad = True
187
+
188
+ # freeze all parameters in dit except for caption_projection
189
+ for name, param in self.dit.named_parameters():
190
+ if "caption" in name:
191
+ param.requires_grad = True
192
+ else:
193
+ param.requires_grad = False
194
+
195
+
196
+ #for p in dit.parameters():
197
+ # p.requires_grad = True
198
+ #if param.ndim > 1:
199
+ # nn.init.xavier_uniform_(param)
200
+ #else:
201
+ # nn.init.zeros_(param)
202
+
203
+ #for p in dit.parameters():
204
+ # p.requires_grad = True
205
+
206
+ '''
207
+ for p in dit_teacher.parameters():
208
+ p.requires_grad = False
209
+
210
+ for name, param in self.dit_teacher.named_parameters():
211
+ if "caption" in name:
212
+ param.requires_grad = True
213
+ else:
214
+ param.requires_grad = False
215
+ '''
216
+
217
+ self.config.use_mm_proj = True
218
+ self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear')
219
+ self.config.mm_vision_select_layer = mm_vision_select_layer
220
+ self.config.mm_vision_select_feature = mm_vision_select_feature
221
+ self.config.mm_patch_merge_type = mm_patch_merge_type
222
+ self.config.n_query = model_args.n_query
223
+ self.config.gen_pooling = model_args.gen_pooling
224
+ self.config.diffusion_name_or_path = model_args.diffusion_name_or_path
225
+
226
+
227
+
228
+ if getattr(self, 'down_projector', None) is None:
229
+ self.down_projector = build_down_projector(self.config)
230
+ else:
231
+ # In case it is frozen by LoRA
232
+ for p in self.down_projector.parameters():
233
+ p.requires_grad = True
234
+
235
+ if getattr(self, 'latent_queries', None) is None:
236
+ print("random initiation the latent_queries !!!")
237
+ self.latent_queries = nn.Parameter(torch.randn(1, self.config.n_query, self.config.hidden_size))
238
+ else:
239
+ print("latent_queries load from checkpoint!!!")
240
+ self.latent_queries.requires_grad = True
241
+ if not hasattr(self, 'dit_teacher') or self.dit_teacher is None:
242
+ print("Teacher model not properly initialized!")
243
+
244
+
245
+
246
+ def unpad_image(tensor, original_size):
247
+ """
248
+ Unpads a PyTorch tensor of a padded and resized image.
249
+
250
+ Args:
251
+ tensor (torch.Tensor): The image tensor, assumed to be in CxHxW format.
252
+ original_size (tuple): The original size of PIL image (width, height).
253
+
254
+ Returns:
255
+ torch.Tensor: The unpadded image tensor.
256
+ """
257
+ original_width, original_height = original_size
258
+ current_height, current_width = tensor.shape[1:]
259
+
260
+ original_aspect_ratio = original_width / original_height
261
+ current_aspect_ratio = current_width / current_height
262
+
263
+ if original_aspect_ratio > current_aspect_ratio:
264
+ scale_factor = current_width / original_width
265
+ new_height = int(original_height * scale_factor)
266
+ padding = (current_height - new_height) // 2
267
+ unpadded_tensor = tensor[:, padding:current_height - padding, :]
268
+ else:
269
+ scale_factor = current_height / original_height
270
+ new_width = int(original_width * scale_factor)
271
+ padding = (current_width - new_width) // 2
272
+ unpadded_tensor = tensor[:, :, padding:current_width - padding]
273
+
274
+ return unpadded_tensor
275
+
276
+
277
+ class LlavaMetaForCausalLM(ABC):
278
+
279
+ @abstractmethod
280
+ def get_model(self):
281
+ pass
282
+
283
+ def get_vision_tower(self):
284
+ return self.get_model().get_vision_tower()
285
+
286
+
287
+ def encode_image(self, images):
288
+ vision_tower = self.get_vision_tower()
289
+ device = vision_tower.device
290
+ images = images.to(device)
291
+ prompt_image_embeds = vision_tower(images)
292
+ if 'early' in self.get_gen_pooling():
293
+ prompt_image_embeds = self.pool_img(prompt_image_embeds)
294
+
295
+ # ------------- compute similarity -------
296
+ all_dist = 0
297
+ count = 0
298
+ for i in range(2, prompt_image_embeds.shape[1]-1):
299
+ diff = (prompt_image_embeds[:,i,:].unsqueeze(1) - prompt_image_embeds[:,:i,:])
300
+ dist = torch.sqrt(diff.square().sum(-1)).min().item()
301
+ all_dist+=dist
302
+ count+=1
303
+ all_dist /= count
304
+ return prompt_image_embeds
305
+
306
+ def get_mm_projector(self):
307
+ return self.get_model().mm_projector
308
+
309
+ def get_gen_projector(self):
310
+ return None
311
+
312
+
313
+ def get_n_query(self):
314
+ return self.get_model().config.n_query
315
+
316
+ def get_gen_pooling(self):
317
+ return self.get_model().config.gen_pooling
318
+
319
+ def pool_img(self, image_features):
320
+ num_img, n, c = image_features.shape
321
+ gen_pooling = self.get_gen_pooling()
322
+ stride = int(gen_pooling.split('_')[-1])
323
+ sqrt_n = int(n**0.5)
324
+ image_features = image_features.permute(0, 2, 1).view(num_img, c, sqrt_n, sqrt_n)
325
+ image_features = F.avg_pool2d(image_features, kernel_size=(stride, stride), stride=stride)
326
+ return image_features
327
+
328
+ def get_sigmas(self, timesteps, device, n_dim=4, dtype=torch.float32):
329
+ sigmas = self.get_model().noise_scheduler.sigmas.to(device=device, dtype=dtype)
330
+ schedule_timesteps = self.get_model().noise_scheduler.timesteps.to(device=device)
331
+ timesteps = timesteps.to(device)
332
+ step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps]
333
+
334
+ sigma = sigmas[step_indices].flatten()
335
+ while len(sigma.shape) < n_dim:
336
+ sigma = sigma.unsqueeze(-1)
337
+ return sigma
338
+
339
+ def mask_drop(self, latents, drop_prob=0.1):
340
+ if drop_prob <= 0:
341
+ return latents
342
+ mask = torch.bernoulli(torch.zeros(latents.shape[0], device=latents.device, dtype=latents.dtype) + drop_prob)
343
+ while len(mask.shape) < len(latents.shape):
344
+ mask = mask.unsqueeze(-1)
345
+ mask = 1 - mask # need to flip 0 <-> 1
346
+ return latents * mask
347
+
348
+ def prepare_inputs_labels_for_multimodal(
349
+ self, input_ids, position_ids, attention_mask, past_key_values, labels,
350
+ gen_images, und_images, grid_thw, i_s_pos, image_sizes=None
351
+ ):
352
+ vision_tower = self.visual
353
+ if (gen_images is None and und_images is None) or input_ids.shape[1] == 1:
354
+ return input_ids, position_ids, attention_mask, past_key_values, None, labels, None, None, None
355
+
356
+ vae = self.get_model().get_sana_vae()
357
+ vae_device = vae.device
358
+ prompt_image_embeds = vae.encode(gen_images.to(vae_device)).latent if gen_images is not None else None
359
+ prompt_image_embeds = prompt_image_embeds * vae.config.scaling_factor if prompt_image_embeds is not None else None
360
+ target_image_embeds = torch.clone(prompt_image_embeds).detach()
361
+ latent_queries = self.get_model().latent_queries.repeat(input_ids.shape[0], 1, 1)
362
+ H = latent_queries.shape[-1]
363
+ latent_queries = latent_queries.contiguous().view(-1, H)
364
+
365
+ # vocab_size = self.get_model().embed_tokens.num_embeddings
366
+ # max_token_id = input_ids.max().item()
367
+ # min_token_id = input_ids.min().item()
368
+ # print(f"Vocab size: {vocab_size}")
369
+ # print(f"Max token ID: {max_token_id}")
370
+ # print(f"Min token ID: {min_token_id}")
371
+
372
+ if not und_images is None:
373
+ und_image_embeds = vision_tower(und_images, grid_thw=grid_thw)
374
+
375
+ image_idx = (input_ids == IMAGE_TOKEN_IDX)
376
+ und_image_idx = (input_ids == UND_IMAGE_TOKEN_IDX)
377
+ output_indicator = labels != -100
378
+ input_indicator = labels == -100
379
+ text_embeds = self.get_model().embed_tokens(input_ids)
380
+ gen_img_idx = torch.logical_and(output_indicator, image_idx)
381
+ text_embeds = text_embeds.clone()
382
+ text_embeds[gen_img_idx] = latent_queries.to(text_embeds.dtype)
383
+ und_img_idx = torch.logical_and(input_indicator, und_image_idx)
384
+
385
+ if not und_images is None:
386
+ text_embeds[und_img_idx] = und_image_embeds.to(text_embeds.device)[:und_img_idx.sum(), :]
387
+
388
+ labels[image_idx] = -100
389
+ return None, position_ids, attention_mask, past_key_values, text_embeds, labels, target_image_embeds
390
+
391
+
392
+ def initialize_vision_tokenizer(self, model_args, tokenizer):
393
+ if model_args.mm_use_im_patch_token:
394
+ tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
395
+ self.resize_token_embeddings(len(tokenizer))
396
+
397
+ if model_args.mm_use_im_start_end:
398
+ num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
399
+ self.resize_token_embeddings(len(tokenizer))
400
+
401
+ if num_new_tokens > 0:
402
+ input_embeddings = self.get_input_embeddings().weight.data
403
+ output_embeddings = self.get_output_embeddings().weight.data
404
+
405
+ input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(
406
+ dim=0, keepdim=True)
407
+ output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(
408
+ dim=0, keepdim=True)
409
+
410
+ input_embeddings[-num_new_tokens:] = input_embeddings_avg
411
+ output_embeddings[-num_new_tokens:] = output_embeddings_avg
412
+
413
+ if model_args.tune_mm_mlp_adapter:
414
+ for p in self.get_input_embeddings().parameters():
415
+ p.requires_grad = True
416
+ for p in self.get_output_embeddings().parameters():
417
+ p.requires_grad = False
418
+
419
+ if model_args.pretrain_mm_mlp_adapter:
420
+ mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu')
421
+ embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight']
422
+ assert num_new_tokens == 2
423
+ if input_embeddings.shape == embed_tokens_weight.shape:
424
+ input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]
425
+ elif embed_tokens_weight.shape[0] == num_new_tokens:
426
+ input_embeddings[-num_new_tokens:] = embed_tokens_weight
427
+ else:
428
+ raise ValueError(f"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.")
429
+ elif model_args.mm_use_im_patch_token:
430
+ if model_args.tune_mm_mlp_adapter:
431
+ for p in self.get_input_embeddings().parameters():
432
+ p.requires_grad = False
433
+ for p in self.get_output_embeddings().parameters():
434
+ p.requires_grad = False