Fahad-S commited on
Commit
951a8f6
·
verified ·
1 Parent(s): dd15a4b

Upload noqueries_code/llava_arch.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. noqueries_code/llava_arch.py +381 -0
noqueries_code/llava_arch.py ADDED
@@ -0,0 +1,381 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Haotian Liu
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from abc import ABC, abstractmethod
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+ from diffusers.models.embeddings import PixArtAlphaTextProjection
22
+
23
+ from .multimodal_llava_encoder.builder import build_vision_tower
24
+ from .multimodal_llava_projector.builder import build_vision_projector
25
+ from .multimodal_projector.builder import build_down_projector
26
+ from .multimodal_decoder.builder import build_vae, build_sana
27
+ from diffusers import FlowMatchEulerDiscreteScheduler, DPMSolverMultistepScheduler
28
+ from diffusers.models.normalization import RMSNorm
29
+ import math
30
+
31
+ from blip3o.constants import DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, IMAGE_TOKEN_IDX, UND_IMAGE_TOKEN_IDX, DEFAULT_IMAGE_PATCH_TOKEN
32
+
33
+
34
+ class DiffusionConnector(nn.Module):
35
+ def __init__(self, input_dim=896, hidden_dim=1024, output_dim=2304, eps=1e-5):
36
+ super().__init__()
37
+ self.linear1 = nn.Linear(input_dim, hidden_dim)
38
+ self.act = nn.GELU(approximate="tanh")
39
+ self.linear2 = nn.Linear(hidden_dim, output_dim)
40
+ self.norm = RMSNorm(output_dim, eps=eps, elementwise_affine=True)
41
+
42
+ nn.init.xavier_uniform_(self.linear1.weight)
43
+ nn.init.zeros_(self.linear1.bias)
44
+ nn.init.xavier_uniform_(self.linear2.weight)
45
+ nn.init.zeros_(self.linear2.bias)
46
+ with torch.no_grad():
47
+ self.norm.weight.fill_(math.sqrt(5.5))
48
+
49
+ def forward(self, x):
50
+ x = self.linear1(x)
51
+ x = self.act(x)
52
+ x = self.linear2(x)
53
+ x = self.norm(x)
54
+ return x
55
+
56
+
57
+ class LlavaMetaModel:
58
+
59
+ def __init__(self, config):
60
+ super(LlavaMetaModel, self).__init__(config)
61
+
62
+ if hasattr(config, "mm_vision_tower"):
63
+ self.vision_tower = build_vision_tower(config, delay_load=True)
64
+ self.mm_projector = build_vision_projector(config)
65
+ if hasattr(config, "diffusion_name_or_path"):
66
+ self.dit = build_sana(config)
67
+ self.vae = build_vae(config)
68
+ self.diffusion_connector = DiffusionConnector(input_dim=self.config.hidden_size,hidden_dim=1024,output_dim=2304)
69
+ '''
70
+ norm = RMSNorm(896, eps=1e-5, elementwise_affine=True)
71
+ with torch.no_grad():
72
+ norm.weight.fill_(math.sqrt(5.5))
73
+ self.diffusion_connector = nn.Sequential(
74
+ nn.Linear(config.hidden_size, 896),
75
+ nn.GELU(approximate="tanh"),
76
+ nn.Linear(896, 896),
77
+ norm,
78
+ )
79
+ '''
80
+ self.noise_scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(config.diffusion_name_or_path, subfolder="scheduler")
81
+ #self.noise_scheduler = DPMSolverMultistepScheduler.from_pretrained(config.diffusion_name_or_path, subfolder="scheduler")
82
+
83
+ #self.latent_queries = nn.Parameter(torch.randn(1, self.config.n_query, self.config.hidden_size))
84
+
85
+
86
+
87
+ def get_vision_tower(self):
88
+ vision_tower = getattr(self, 'vision_tower', None)
89
+ if type(vision_tower) is list:
90
+ vision_tower = vision_tower[0]
91
+ return vision_tower
92
+
93
+ def get_sana(self):
94
+ dit = getattr(self, 'dit', None)
95
+ if type(dit) is list:
96
+ dit = dit[0]
97
+ if dit is not None:
98
+ dit.to(self.device)
99
+ return dit
100
+
101
+ def get_sana_vae(self):
102
+ vae = getattr(self, 'vae', None)
103
+ if type(vae) is list:
104
+ vae = vae[0]
105
+ if vae is not None:
106
+ vae.to(self.device)
107
+ return vae
108
+
109
+ def initialize_vision_modules(self, model_args, fsdp=None):
110
+ vision_tower = model_args.vision_tower
111
+ mm_vision_select_layer = model_args.mm_vision_select_layer
112
+ mm_vision_select_feature = model_args.mm_vision_select_feature
113
+ mm_patch_merge_type = model_args.mm_patch_merge_type
114
+
115
+ self.config.mm_vision_tower = vision_tower
116
+ self.config.vision_tower_pretrained = getattr(model_args, "vision_tower_pretrained", "")
117
+
118
+ if self.get_sana() is None:
119
+ dit = build_sana(model_args)
120
+ self.noise_scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(model_args.diffusion_name_or_path, subfolder="scheduler"
121
+ )
122
+ self.scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(model_args.diffusion_name_or_path, subfolder="scheduler")
123
+
124
+ #self.noise_scheduler = DPMSolverMultistepScheduler.from_pretrained(config.diffusion_name_or_path, subfolder="scheduler")
125
+ #self.scheduler = DPMSolverMultistepScheduler.from_pretrained(config.diffusion_name_or_path, subfolder="scheduler")
126
+
127
+ if fsdp is not None and len(fsdp) > 0:
128
+ self.dit = [dit]
129
+ else:
130
+ self.dit = dit
131
+ else:
132
+ if fsdp is not None and len(fsdp) > 0:
133
+ dit = self.dit[0]
134
+ else:
135
+ dit = self.dit
136
+ for p in dit.parameters():
137
+ p.requires_grad = False
138
+
139
+ if self.get_sana_vae() is None:
140
+ vae = build_vae(model_args)
141
+
142
+ if fsdp is not None and len(fsdp) > 0:
143
+ self.vae = [vae]
144
+ else:
145
+ self.vae = vae
146
+ else:
147
+ if fsdp is not None and len(fsdp) > 0:
148
+ vae = self.vae[0]
149
+ else:
150
+ vae = self.vae
151
+ for p in vae.parameters():
152
+ p.requires_grad = False
153
+
154
+
155
+ if self.get_vision_tower() is None:
156
+ print("=" * 20, "Building vision tower", "=" * 20)
157
+ vision_tower = build_vision_tower(model_args)
158
+
159
+
160
+ if fsdp is not None and len(fsdp) > 0:
161
+ self.vision_tower = [vision_tower]
162
+ else:
163
+ self.vision_tower = vision_tower
164
+ else:
165
+ if fsdp is not None and len(fsdp) > 0:
166
+ vision_tower = self.vision_tower[0]
167
+ else:
168
+ vision_tower = self.vision_tower
169
+ vision_tower.load_model()
170
+
171
+
172
+ if getattr(self, 'diffusion_connector', None) is None:
173
+ self.diffusion_connector = DiffusionConnector(input_dim=self.config.hidden_size,hidden_dim=1024,output_dim=2304)
174
+
175
+
176
+ '''
177
+ norm = RMSNorm(2304, eps=1e-5, elementwise_affine=True)
178
+ with torch.no_grad():
179
+ norm.weight.fill_(math.sqrt(5.5))
180
+ self.diffusion_connector = nn.Sequential(
181
+ nn.Linear(self.config.hidden_size, 1024),
182
+ nn.GELU(approximate="tanh"),
183
+ nn.Linear(1024, 2304),
184
+ norm,
185
+ )
186
+ '''
187
+ else:
188
+ for p in self.diffusion_connector.parameters():
189
+ p.requires_grad = True
190
+
191
+ # freeze all parameters in dit except for caption_projection
192
+ for name, param in self.dit.named_parameters():
193
+ if "caption" in name:
194
+ param.requires_grad = True
195
+ else:
196
+ param.requires_grad = False
197
+
198
+
199
+ #for p in dit.parameters():
200
+ # p.requires_grad = True
201
+
202
+ self.config.use_mm_proj = True
203
+ self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear')
204
+ self.config.mm_vision_select_layer = mm_vision_select_layer
205
+ self.config.mm_vision_select_feature = mm_vision_select_feature
206
+ self.config.mm_patch_merge_type = mm_patch_merge_type
207
+ self.config.gen_pooling = model_args.gen_pooling
208
+
209
+
210
+ if getattr(self, 'down_projector', None) is None:
211
+ self.down_projector = build_down_projector(self.config)
212
+ else:
213
+ # In case it is frozen by LoRA
214
+ for p in self.down_projector.parameters():
215
+ p.requires_grad = True
216
+
217
+
218
+
219
+
220
+
221
+
222
+ def unpad_image(tensor, original_size):
223
+ """
224
+ Unpads a PyTorch tensor of a padded and resized image.
225
+
226
+ Args:
227
+ tensor (torch.Tensor): The image tensor, assumed to be in CxHxW format.
228
+ original_size (tuple): The original size of PIL image (width, height).
229
+
230
+ Returns:
231
+ torch.Tensor: The unpadded image tensor.
232
+ """
233
+ original_width, original_height = original_size
234
+ current_height, current_width = tensor.shape[1:]
235
+
236
+ original_aspect_ratio = original_width / original_height
237
+ current_aspect_ratio = current_width / current_height
238
+
239
+ if original_aspect_ratio > current_aspect_ratio:
240
+ scale_factor = current_width / original_width
241
+ new_height = int(original_height * scale_factor)
242
+ padding = (current_height - new_height) // 2
243
+ unpadded_tensor = tensor[:, padding:current_height - padding, :]
244
+ else:
245
+ scale_factor = current_height / original_height
246
+ new_width = int(original_width * scale_factor)
247
+ padding = (current_width - new_width) // 2
248
+ unpadded_tensor = tensor[:, :, padding:current_width - padding]
249
+
250
+ return unpadded_tensor
251
+
252
+
253
+ class LlavaMetaForCausalLM(ABC):
254
+
255
+ @abstractmethod
256
+ def get_model(self):
257
+ pass
258
+
259
+ def get_vision_tower(self):
260
+ return self.get_model().get_vision_tower()
261
+
262
+
263
+ def encode_image(self, images):
264
+ vision_tower = self.get_vision_tower()
265
+ device = vision_tower.device
266
+ images = images.to(device)
267
+ prompt_image_embeds = vision_tower(images)
268
+ if 'early' in self.get_gen_pooling():
269
+ prompt_image_embeds = self.pool_img(prompt_image_embeds)
270
+
271
+ # ------------- compute similarity -------
272
+ all_dist = 0
273
+ count = 0
274
+ for i in range(2, prompt_image_embeds.shape[1]-1):
275
+ diff = (prompt_image_embeds[:,i,:].unsqueeze(1) - prompt_image_embeds[:,:i,:])
276
+ dist = torch.sqrt(diff.square().sum(-1)).min().item()
277
+ all_dist+=dist
278
+ count+=1
279
+ all_dist /= count
280
+ return prompt_image_embeds
281
+
282
+ def get_mm_projector(self):
283
+ return self.get_model().mm_projector
284
+
285
+ def get_gen_projector(self):
286
+ return None
287
+
288
+
289
+ def get_gen_pooling(self):
290
+ return self.get_model().config.gen_pooling
291
+
292
+ def pool_img(self, image_features):
293
+ num_img, n, c = image_features.shape
294
+ gen_pooling = self.get_gen_pooling()
295
+ stride = int(gen_pooling.split('_')[-1])
296
+ sqrt_n = int(n**0.5)
297
+ image_features = image_features.permute(0, 2, 1).view(num_img, c, sqrt_n, sqrt_n)
298
+ image_features = F.avg_pool2d(image_features, kernel_size=(stride, stride), stride=stride)
299
+ return image_features
300
+
301
+ def get_sigmas(self, timesteps, device, n_dim=4, dtype=torch.float32):
302
+ sigmas = self.get_model().noise_scheduler.sigmas.to(device=device, dtype=dtype)
303
+ schedule_timesteps = self.get_model().noise_scheduler.timesteps.to(device=device)
304
+ timesteps = timesteps.to(device)
305
+ step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps]
306
+
307
+ sigma = sigmas[step_indices].flatten()
308
+ while len(sigma.shape) < n_dim:
309
+ sigma = sigma.unsqueeze(-1)
310
+ return sigma
311
+
312
+ def mask_drop(self, latents, drop_prob=0.1):
313
+ if drop_prob <= 0:
314
+ return latents
315
+ mask = torch.bernoulli(torch.zeros(latents.shape[0], device=latents.device, dtype=latents.dtype) + drop_prob)
316
+ while len(mask.shape) < len(latents.shape):
317
+ mask = mask.unsqueeze(-1)
318
+ mask = 1 - mask # need to flip 0 <-> 1
319
+ return latents * mask
320
+
321
+ def prepare_inputs_labels_for_multimodal(
322
+ self, input_ids, position_ids, attention_mask, past_key_values, labels,
323
+ gen_images, und_images, grid_thw, i_s_pos, image_sizes=None
324
+ ):
325
+ if (gen_images is None and und_images is None) or input_ids.shape[1] == 1:
326
+ return input_ids, position_ids, attention_mask, past_key_values, None, labels, None, None, None
327
+
328
+ vae = self.get_model().get_sana_vae()
329
+ vae_device = vae.device
330
+ prompt_image_embeds = vae.encode(gen_images.to(vae_device)).latent if gen_images is not None else None
331
+ prompt_image_embeds = prompt_image_embeds * vae.config.scaling_factor if prompt_image_embeds is not None else None
332
+ target_image_embeds = torch.clone(prompt_image_embeds).detach()
333
+ image_idx = (input_ids == IMAGE_TOKEN_IDX)
334
+ text_embeds = self.get_model().embed_tokens(input_ids)
335
+ labels[image_idx] = -100
336
+ return None, position_ids, attention_mask, past_key_values, text_embeds, labels, target_image_embeds
337
+
338
+
339
+ def initialize_vision_tokenizer(self, model_args, tokenizer):
340
+ if model_args.mm_use_im_patch_token:
341
+ tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
342
+ self.resize_token_embeddings(len(tokenizer))
343
+
344
+ if model_args.mm_use_im_start_end:
345
+ num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
346
+ self.resize_token_embeddings(len(tokenizer))
347
+
348
+ if num_new_tokens > 0:
349
+ input_embeddings = self.get_input_embeddings().weight.data
350
+ output_embeddings = self.get_output_embeddings().weight.data
351
+
352
+ input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(
353
+ dim=0, keepdim=True)
354
+ output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(
355
+ dim=0, keepdim=True)
356
+
357
+ input_embeddings[-num_new_tokens:] = input_embeddings_avg
358
+ output_embeddings[-num_new_tokens:] = output_embeddings_avg
359
+
360
+ if model_args.tune_mm_mlp_adapter:
361
+ for p in self.get_input_embeddings().parameters():
362
+ p.requires_grad = True
363
+ for p in self.get_output_embeddings().parameters():
364
+ p.requires_grad = False
365
+
366
+ if model_args.pretrain_mm_mlp_adapter:
367
+ mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu')
368
+ embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight']
369
+ assert num_new_tokens == 2
370
+ if input_embeddings.shape == embed_tokens_weight.shape:
371
+ input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]
372
+ elif embed_tokens_weight.shape[0] == num_new_tokens:
373
+ input_embeddings[-num_new_tokens:] = embed_tokens_weight
374
+ else:
375
+ raise ValueError(f"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.")
376
+ elif model_args.mm_use_im_patch_token:
377
+ if model_args.tune_mm_mlp_adapter:
378
+ for p in self.get_input_embeddings().parameters():
379
+ p.requires_grad = False
380
+ for p in self.get_output_embeddings().parameters():
381
+ p.requires_grad = False