|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from abc import ABC, abstractmethod |
|
|
|
|
|
import torch |
|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
from diffusers.models.embeddings import PixArtAlphaTextProjection |
|
|
|
|
|
from .multimodal_llava_encoder.builder import build_vision_tower |
|
|
from .multimodal_llava_projector.builder import build_vision_projector |
|
|
from .multimodal_projector.builder import build_down_projector |
|
|
from .multimodal_decoder.builder import build_vae, build_sana |
|
|
from diffusers import FlowMatchEulerDiscreteScheduler, DPMSolverMultistepScheduler |
|
|
from diffusers.models.normalization import RMSNorm |
|
|
import math |
|
|
|
|
|
from blip3o.constants import DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, IMAGE_TOKEN_IDX, UND_IMAGE_TOKEN_IDX, DEFAULT_IMAGE_PATCH_TOKEN |
|
|
|
|
|
|
|
|
class DiffusionConnector(nn.Module): |
|
|
def __init__(self, input_dim=896, hidden_dim=1024, output_dim=2304, eps=1e-5): |
|
|
super().__init__() |
|
|
self.linear1 = nn.Linear(input_dim, hidden_dim) |
|
|
self.act = nn.GELU(approximate="tanh") |
|
|
self.linear2 = nn.Linear(hidden_dim, output_dim) |
|
|
self.norm = RMSNorm(output_dim, eps=eps, elementwise_affine=True) |
|
|
|
|
|
nn.init.xavier_uniform_(self.linear1.weight) |
|
|
nn.init.zeros_(self.linear1.bias) |
|
|
nn.init.xavier_uniform_(self.linear2.weight) |
|
|
nn.init.zeros_(self.linear2.bias) |
|
|
with torch.no_grad(): |
|
|
self.norm.weight.fill_(math.sqrt(5.5)) |
|
|
|
|
|
def forward(self, x): |
|
|
x = self.linear1(x) |
|
|
x = self.act(x) |
|
|
x = self.linear2(x) |
|
|
x = self.norm(x) |
|
|
return x |
|
|
|
|
|
|
|
|
class LlavaMetaModel: |
|
|
|
|
|
def __init__(self, config): |
|
|
super(LlavaMetaModel, self).__init__(config) |
|
|
|
|
|
if hasattr(config, "mm_vision_tower"): |
|
|
self.vision_tower = build_vision_tower(config, delay_load=True) |
|
|
self.mm_projector = build_vision_projector(config) |
|
|
if hasattr(config, "diffusion_name_or_path"): |
|
|
self.dit = build_sana(config) |
|
|
self.vae = build_vae(config) |
|
|
self.diffusion_connector = DiffusionConnector(input_dim=self.config.hidden_size,hidden_dim=1024,output_dim=2304) |
|
|
''' |
|
|
norm = RMSNorm(896, eps=1e-5, elementwise_affine=True) |
|
|
with torch.no_grad(): |
|
|
norm.weight.fill_(math.sqrt(5.5)) |
|
|
self.diffusion_connector = nn.Sequential( |
|
|
nn.Linear(config.hidden_size, 896), |
|
|
nn.GELU(approximate="tanh"), |
|
|
nn.Linear(896, 896), |
|
|
norm, |
|
|
) |
|
|
''' |
|
|
self.noise_scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(config.diffusion_name_or_path, subfolder="scheduler") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_vision_tower(self): |
|
|
vision_tower = getattr(self, 'vision_tower', None) |
|
|
if type(vision_tower) is list: |
|
|
vision_tower = vision_tower[0] |
|
|
return vision_tower |
|
|
|
|
|
def get_sana(self): |
|
|
dit = getattr(self, 'dit', None) |
|
|
if type(dit) is list: |
|
|
dit = dit[0] |
|
|
if dit is not None: |
|
|
dit.to(self.device) |
|
|
return dit |
|
|
|
|
|
def get_sana_vae(self): |
|
|
vae = getattr(self, 'vae', None) |
|
|
if type(vae) is list: |
|
|
vae = vae[0] |
|
|
if vae is not None: |
|
|
vae.to(self.device) |
|
|
return vae |
|
|
|
|
|
def initialize_vision_modules(self, model_args, fsdp=None): |
|
|
vision_tower = model_args.vision_tower |
|
|
mm_vision_select_layer = model_args.mm_vision_select_layer |
|
|
mm_vision_select_feature = model_args.mm_vision_select_feature |
|
|
mm_patch_merge_type = model_args.mm_patch_merge_type |
|
|
|
|
|
self.config.mm_vision_tower = vision_tower |
|
|
self.config.vision_tower_pretrained = getattr(model_args, "vision_tower_pretrained", "") |
|
|
|
|
|
if self.get_sana() is None: |
|
|
dit = build_sana(model_args) |
|
|
self.noise_scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(model_args.diffusion_name_or_path, subfolder="scheduler" |
|
|
) |
|
|
self.scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(model_args.diffusion_name_or_path, subfolder="scheduler") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if fsdp is not None and len(fsdp) > 0: |
|
|
self.dit = [dit] |
|
|
else: |
|
|
self.dit = dit |
|
|
else: |
|
|
if fsdp is not None and len(fsdp) > 0: |
|
|
dit = self.dit[0] |
|
|
else: |
|
|
dit = self.dit |
|
|
for p in dit.parameters(): |
|
|
p.requires_grad = False |
|
|
|
|
|
if self.get_sana_vae() is None: |
|
|
vae = build_vae(model_args) |
|
|
|
|
|
if fsdp is not None and len(fsdp) > 0: |
|
|
self.vae = [vae] |
|
|
else: |
|
|
self.vae = vae |
|
|
else: |
|
|
if fsdp is not None and len(fsdp) > 0: |
|
|
vae = self.vae[0] |
|
|
else: |
|
|
vae = self.vae |
|
|
for p in vae.parameters(): |
|
|
p.requires_grad = False |
|
|
|
|
|
|
|
|
if self.get_vision_tower() is None: |
|
|
print("=" * 20, "Building vision tower", "=" * 20) |
|
|
vision_tower = build_vision_tower(model_args) |
|
|
|
|
|
|
|
|
if fsdp is not None and len(fsdp) > 0: |
|
|
self.vision_tower = [vision_tower] |
|
|
else: |
|
|
self.vision_tower = vision_tower |
|
|
else: |
|
|
if fsdp is not None and len(fsdp) > 0: |
|
|
vision_tower = self.vision_tower[0] |
|
|
else: |
|
|
vision_tower = self.vision_tower |
|
|
vision_tower.load_model() |
|
|
|
|
|
|
|
|
if getattr(self, 'diffusion_connector', None) is None: |
|
|
self.diffusion_connector = DiffusionConnector(input_dim=self.config.hidden_size,hidden_dim=1024,output_dim=2304) |
|
|
|
|
|
|
|
|
''' |
|
|
norm = RMSNorm(2304, eps=1e-5, elementwise_affine=True) |
|
|
with torch.no_grad(): |
|
|
norm.weight.fill_(math.sqrt(5.5)) |
|
|
self.diffusion_connector = nn.Sequential( |
|
|
nn.Linear(self.config.hidden_size, 1024), |
|
|
nn.GELU(approximate="tanh"), |
|
|
nn.Linear(1024, 2304), |
|
|
norm, |
|
|
) |
|
|
''' |
|
|
else: |
|
|
for p in self.diffusion_connector.parameters(): |
|
|
p.requires_grad = True |
|
|
|
|
|
|
|
|
for name, param in self.dit.named_parameters(): |
|
|
if "caption" in name: |
|
|
param.requires_grad = True |
|
|
else: |
|
|
param.requires_grad = False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.config.use_mm_proj = True |
|
|
self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear') |
|
|
self.config.mm_vision_select_layer = mm_vision_select_layer |
|
|
self.config.mm_vision_select_feature = mm_vision_select_feature |
|
|
self.config.mm_patch_merge_type = mm_patch_merge_type |
|
|
self.config.gen_pooling = model_args.gen_pooling |
|
|
|
|
|
|
|
|
if getattr(self, 'down_projector', None) is None: |
|
|
self.down_projector = build_down_projector(self.config) |
|
|
else: |
|
|
|
|
|
for p in self.down_projector.parameters(): |
|
|
p.requires_grad = True |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def unpad_image(tensor, original_size): |
|
|
""" |
|
|
Unpads a PyTorch tensor of a padded and resized image. |
|
|
|
|
|
Args: |
|
|
tensor (torch.Tensor): The image tensor, assumed to be in CxHxW format. |
|
|
original_size (tuple): The original size of PIL image (width, height). |
|
|
|
|
|
Returns: |
|
|
torch.Tensor: The unpadded image tensor. |
|
|
""" |
|
|
original_width, original_height = original_size |
|
|
current_height, current_width = tensor.shape[1:] |
|
|
|
|
|
original_aspect_ratio = original_width / original_height |
|
|
current_aspect_ratio = current_width / current_height |
|
|
|
|
|
if original_aspect_ratio > current_aspect_ratio: |
|
|
scale_factor = current_width / original_width |
|
|
new_height = int(original_height * scale_factor) |
|
|
padding = (current_height - new_height) // 2 |
|
|
unpadded_tensor = tensor[:, padding:current_height - padding, :] |
|
|
else: |
|
|
scale_factor = current_height / original_height |
|
|
new_width = int(original_width * scale_factor) |
|
|
padding = (current_width - new_width) // 2 |
|
|
unpadded_tensor = tensor[:, :, padding:current_width - padding] |
|
|
|
|
|
return unpadded_tensor |
|
|
|
|
|
|
|
|
class LlavaMetaForCausalLM(ABC): |
|
|
|
|
|
@abstractmethod |
|
|
def get_model(self): |
|
|
pass |
|
|
|
|
|
def get_vision_tower(self): |
|
|
return self.get_model().get_vision_tower() |
|
|
|
|
|
|
|
|
def encode_image(self, images): |
|
|
vision_tower = self.get_vision_tower() |
|
|
device = vision_tower.device |
|
|
images = images.to(device) |
|
|
prompt_image_embeds = vision_tower(images) |
|
|
if 'early' in self.get_gen_pooling(): |
|
|
prompt_image_embeds = self.pool_img(prompt_image_embeds) |
|
|
|
|
|
|
|
|
all_dist = 0 |
|
|
count = 0 |
|
|
for i in range(2, prompt_image_embeds.shape[1]-1): |
|
|
diff = (prompt_image_embeds[:,i,:].unsqueeze(1) - prompt_image_embeds[:,:i,:]) |
|
|
dist = torch.sqrt(diff.square().sum(-1)).min().item() |
|
|
all_dist+=dist |
|
|
count+=1 |
|
|
all_dist /= count |
|
|
return prompt_image_embeds |
|
|
|
|
|
def get_mm_projector(self): |
|
|
return self.get_model().mm_projector |
|
|
|
|
|
def get_gen_projector(self): |
|
|
return None |
|
|
|
|
|
|
|
|
def get_gen_pooling(self): |
|
|
return self.get_model().config.gen_pooling |
|
|
|
|
|
def pool_img(self, image_features): |
|
|
num_img, n, c = image_features.shape |
|
|
gen_pooling = self.get_gen_pooling() |
|
|
stride = int(gen_pooling.split('_')[-1]) |
|
|
sqrt_n = int(n**0.5) |
|
|
image_features = image_features.permute(0, 2, 1).view(num_img, c, sqrt_n, sqrt_n) |
|
|
image_features = F.avg_pool2d(image_features, kernel_size=(stride, stride), stride=stride) |
|
|
return image_features |
|
|
|
|
|
def get_sigmas(self, timesteps, device, n_dim=4, dtype=torch.float32): |
|
|
sigmas = self.get_model().noise_scheduler.sigmas.to(device=device, dtype=dtype) |
|
|
schedule_timesteps = self.get_model().noise_scheduler.timesteps.to(device=device) |
|
|
timesteps = timesteps.to(device) |
|
|
step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps] |
|
|
|
|
|
sigma = sigmas[step_indices].flatten() |
|
|
while len(sigma.shape) < n_dim: |
|
|
sigma = sigma.unsqueeze(-1) |
|
|
return sigma |
|
|
|
|
|
def mask_drop(self, latents, drop_prob=0.1): |
|
|
if drop_prob <= 0: |
|
|
return latents |
|
|
mask = torch.bernoulli(torch.zeros(latents.shape[0], device=latents.device, dtype=latents.dtype) + drop_prob) |
|
|
while len(mask.shape) < len(latents.shape): |
|
|
mask = mask.unsqueeze(-1) |
|
|
mask = 1 - mask |
|
|
return latents * mask |
|
|
|
|
|
def prepare_inputs_labels_for_multimodal( |
|
|
self, input_ids, position_ids, attention_mask, past_key_values, labels, |
|
|
gen_images, und_images, grid_thw, i_s_pos, image_sizes=None |
|
|
): |
|
|
if (gen_images is None and und_images is None) or input_ids.shape[1] == 1: |
|
|
return input_ids, position_ids, attention_mask, past_key_values, None, labels, None, None, None |
|
|
|
|
|
vae = self.get_model().get_sana_vae() |
|
|
vae_device = vae.device |
|
|
prompt_image_embeds = vae.encode(gen_images.to(vae_device)).latent if gen_images is not None else None |
|
|
prompt_image_embeds = prompt_image_embeds * vae.config.scaling_factor if prompt_image_embeds is not None else None |
|
|
target_image_embeds = torch.clone(prompt_image_embeds).detach() |
|
|
image_idx = (input_ids == IMAGE_TOKEN_IDX) |
|
|
text_embeds = self.get_model().embed_tokens(input_ids) |
|
|
labels[image_idx] = -100 |
|
|
return None, position_ids, attention_mask, past_key_values, text_embeds, labels, target_image_embeds |
|
|
|
|
|
|
|
|
def initialize_vision_tokenizer(self, model_args, tokenizer): |
|
|
if model_args.mm_use_im_patch_token: |
|
|
tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True) |
|
|
self.resize_token_embeddings(len(tokenizer)) |
|
|
|
|
|
if model_args.mm_use_im_start_end: |
|
|
num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True) |
|
|
self.resize_token_embeddings(len(tokenizer)) |
|
|
|
|
|
if num_new_tokens > 0: |
|
|
input_embeddings = self.get_input_embeddings().weight.data |
|
|
output_embeddings = self.get_output_embeddings().weight.data |
|
|
|
|
|
input_embeddings_avg = input_embeddings[:-num_new_tokens].mean( |
|
|
dim=0, keepdim=True) |
|
|
output_embeddings_avg = output_embeddings[:-num_new_tokens].mean( |
|
|
dim=0, keepdim=True) |
|
|
|
|
|
input_embeddings[-num_new_tokens:] = input_embeddings_avg |
|
|
output_embeddings[-num_new_tokens:] = output_embeddings_avg |
|
|
|
|
|
if model_args.tune_mm_mlp_adapter: |
|
|
for p in self.get_input_embeddings().parameters(): |
|
|
p.requires_grad = True |
|
|
for p in self.get_output_embeddings().parameters(): |
|
|
p.requires_grad = False |
|
|
|
|
|
if model_args.pretrain_mm_mlp_adapter: |
|
|
mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu') |
|
|
embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight'] |
|
|
assert num_new_tokens == 2 |
|
|
if input_embeddings.shape == embed_tokens_weight.shape: |
|
|
input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:] |
|
|
elif embed_tokens_weight.shape[0] == num_new_tokens: |
|
|
input_embeddings[-num_new_tokens:] = embed_tokens_weight |
|
|
else: |
|
|
raise ValueError(f"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.") |
|
|
elif model_args.mm_use_im_patch_token: |
|
|
if model_args.tune_mm_mlp_adapter: |
|
|
for p in self.get_input_embeddings().parameters(): |
|
|
p.requires_grad = False |
|
|
for p in self.get_output_embeddings().parameters(): |
|
|
p.requires_grad = False |
|
|
|