xmutly's picture
Upload 294 files
e1aaaac verified
# Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
import shutil
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig, BitsAndBytesConfig
import torch
from llava.model import *
from llava.constants import DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
def map_keys(model, pretrained_ckpt_loc):
ckpt = torch.load(pretrained_ckpt_loc, map_location='cpu')
print(ckpt.keys())
print(ckpt['proj'].size())
i = 0
for name, param in model.named_parameters():
# print(ckpt.keys())
i+=1
print(name, param.size())
# if param.requires_grad:
print(i)
exit()
with torch.no_grad():
for i in range(4):
for p in range(2):
self.downsample_layers[i][p].weight.copy_(ckpt[f'downsample_layers.{i}.{p}.weight'])
self.downsample_layers[i][p].bias.copy_(ckpt[f'downsample_layers.{i}.{p}.bias'])
for j in range(4):
for k in range(stt[j]):
self.stages[j][k].gamma.copy_(ckpt[f'stages.{j}.{k}.gamma'])
self.stages[j][k].dwconv.weight.copy_(ckpt[f'stages.{j}.{k}.dwconv.weight'])
self.stages[j][k].dwconv.bias.copy_(ckpt[f'stages.{j}.{k}.dwconv.bias'])
self.stages[j][k].norm.weight.copy_(ckpt[f'stages.{j}.{k}.norm.weight'])
self.stages[j][k].norm.bias.copy_(ckpt[f'stages.{j}.{k}.norm.bias'])
self.stages[j][k].pwconv1.weight.copy_(ckpt[f'stages.{j}.{k}.pwconv1.weight'])
self.stages[j][k].pwconv1.bias.copy_(ckpt[f'stages.{j}.{k}.pwconv1.bias'])
self.stages[j][k].pwconv2.weight.copy_(ckpt[f'stages.{j}.{k}.pwconv2.weight'])
self.stages[j][k].pwconv2.bias.copy_(ckpt[f'stages.{j}.{k}.pwconv2.bias'])
class ClipVisionModel(torch.nn.Module):
def __init__(self, model, normalize, all_tokens=False, proj=True):
super().__init__()
self.model = model
self.normalize = normalize
self.proj = model.proj
if all_tokens:
self.model.output_tokens = True
if not proj:
self.model.proj = None
def forward(self, vision_, output_normalize):
embedding = self.model(self.normalize(vision_))
if output_normalize:
embedding = F.normalize(embedding, dim=-1)
if self.model.output_tokens:
# flatten and concatenate all tokens
return torch.hstack([embedding[0].flatten(1), embedding[1].flatten(1)])
else:
return embedding
def load_pretrained_model(model_path, model_base, model_name, pretrained_rob_path=None, dtype=None, device_map="auto", device="cuda"):
kwargs = {"device_map": device_map}
load_8bit=False
load_4bit=False
if load_8bit:
kwargs['load_in_8bit'] = True
elif load_4bit:
kwargs['load_in_4bit'] = True
kwargs['quantization_config'] = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type='nf4'
)
else:
assert dtype is not None
if dtype == 'float16':
kwargs['torch_dtype'] = torch.float16
elif dtype == 'float32':
kwargs['torch_dtype'] = torch.float32
else:
raise ValueError(f"Unknown dtype {dtype}, must be float16 or float32")
if 'llava' in model_name.lower():
# Load LLaVA model
if 'lora' in model_name.lower() and model_base is None:
warnings.warn('There is `lora` in model name but no `model_base` is provided. If you are loading a LoRA model, please provide the `model_base` argument. Detailed instruction: https://github.com/haotian-liu/LLaVA#launch-a-model-worker-lora-weights-unmerged.')
if 'lora' in model_name.lower() and model_base is not None:
lora_cfg_pretrained = AutoConfig.from_pretrained(model_path)
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
print('Loading LLaVA from base model...')
model = LlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs)
token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features
if model.lm_head.weight.shape[0] != token_num:
model.lm_head.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
model.model.embed_tokens.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
print('Loading additional LLaVA weights...')
if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')):
non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu')
else:
# this is probably from HF Hub
from huggingface_hub import hf_hub_download
def load_from_hf(repo_id, filename, subfolder=None):
cache_file = hf_hub_download(
repo_id=repo_id,
filename=filename,
subfolder=subfolder)
return torch.load(cache_file, map_location='cpu')
non_lora_trainables = load_from_hf(model_path, 'non_lora_trainables.bin')
non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()}
if any(k.startswith('model.model.') for k in non_lora_trainables):
non_lora_trainables = {(k[6:] if k.startswith('model.') else k): v for k, v in non_lora_trainables.items()}
model.load_state_dict(non_lora_trainables, strict=False)
from peft import PeftModel
print('Loading LoRA weights...')
model = PeftModel.from_pretrained(model, model_path)
print('Merging LoRA weights...')
model = model.merge_and_unload()
print('Model is loaded...')
elif model_base is not None:
# this may be mm projector only
print('Loading LLaVA from base model...')
if 'mpt' in model_name.lower():
if not os.path.isfile(os.path.join(model_path, 'configuration_mpt.py')):
shutil.copyfile(os.path.join(model_base, 'configuration_mpt.py'), os.path.join(model_path, 'configuration_mpt.py'))
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=True)
cfg_pretrained = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
model = LlavaMPTForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)
else:
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
cfg_pretrained = AutoConfig.from_pretrained(model_path)
model = LlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)
mm_projector_weights = torch.load(os.path.join(model_path, 'mm_projector.bin'), map_location='cpu')
mm_projector_weights = {k: v.to(kwargs["torch_dtype"]) for k, v in mm_projector_weights.items()}
model.load_state_dict(mm_projector_weights, strict=False)
else:
if 'mpt' in model_name.lower():
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
model = LlavaMPTForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
else:
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
model = LlavaLlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
else:
# Load language model
if model_base is not None:
# PEFT model
from peft import PeftModel
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(model_base, torch_dtype=kwargs["torch_dtype"], low_cpu_mem_usage=True, device_map="auto")
print(f"Loading LoRA weights from {model_path}")
model = PeftModel.from_pretrained(model, model_path)
print(f"Merging weights")
model = model.merge_and_unload()
if kwargs["torch_dtype"] == torch.float16:
print('Convert to FP16...')
model.to(torch.float16)
else:
use_fast = False
if 'mpt' in model_name.lower():
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, trust_remote_code=True, **kwargs)
else:
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
image_processor = None
if 'llava' in model_name.lower():
mm_use_im_start_end = getattr(model.config, "mm_use_im_start_end", False)
mm_use_im_patch_token = getattr(model.config, "mm_use_im_patch_token", True)
if mm_use_im_patch_token:
tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
if mm_use_im_start_end:
tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
model.resize_token_embeddings(len(tokenizer))
vision_tower = model.get_vision_tower()
# vision_tower.set_device(device)
non_llava = True if pretrained_rob_path not in [None, 'None', 'none'] else False
if not vision_tower.is_loaded:
vision_tower.load_model(non_llava, pretrained_rob_path)#.to(device=device)
# print(vision_tower.vision_tower)
vision_tower.to(device=device, dtype=kwargs["torch_dtype"])
image_processor = vision_tower.image_processor
if hasattr(model.config, "max_sequence_length"):
context_len = model.config.max_sequence_length
else:
context_len = 2048
return model, image_processor, tokenizer, context_len