| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
|
|
| import os |
| import shutil |
|
|
| import torch |
| from llava.constants import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN, |
| DEFAULT_IMAGE_PATCH_TOKEN) |
| from llava.model import * |
| from transformers import (AutoConfig, AutoModelForCausalLM, AutoTokenizer, |
| BitsAndBytesConfig) |
|
|
|
|
| def load_pretrained_model( |
| model_path, |
| model_base, |
| model_name, |
| load_8bit=False, |
| load_4bit=False, |
| device_map="auto", |
| ): |
| kwargs = {"device_map": device_map} |
|
|
| if load_8bit: |
| kwargs["load_in_8bit"] = True |
| elif load_4bit: |
| kwargs["load_in_4bit"] = True |
| kwargs["quantization_config"] = BitsAndBytesConfig( |
| load_in_4bit=True, |
| bnb_4bit_compute_dtype=torch.float16, |
| bnb_4bit_use_double_quant=True, |
| bnb_4bit_quant_type="nf4", |
| ) |
| else: |
| kwargs["torch_dtype"] = torch.float16 |
|
|
| if "llava" in model_name.lower(): |
| |
| if "lora" in model_name.lower() and model_base is not None: |
| lora_cfg_pretrained = AutoConfig.from_pretrained(model_path) |
| tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False) |
| print("Loading LLaVA from base model...") |
| model = LlavaLlamaForCausalLM.from_pretrained( |
| model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs |
| ) |
| token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features |
| if model.lm_head.weight.shape[0] != token_num: |
| model.lm_head.weight = torch.nn.Parameter( |
| torch.empty( |
| token_num, tokem_dim, device=model.device, dtype=model.dtype |
| ) |
| ) |
| model.model.embed_tokens.weight = torch.nn.Parameter( |
| torch.empty( |
| token_num, tokem_dim, device=model.device, dtype=model.dtype |
| ) |
| ) |
|
|
| print("Loading additional LLaVA weights...") |
| if os.path.exists(os.path.join(model_path, "non_lora_trainables.bin")): |
| non_lora_trainables = torch.load( |
| os.path.join(model_path, "non_lora_trainables.bin"), |
| map_location="cpu", |
| ) |
| else: |
| |
| from huggingface_hub import hf_hub_download |
|
|
| def load_from_hf(repo_id, filename, subfolder=None): |
| cache_file = hf_hub_download( |
| repo_id=repo_id, filename=filename, subfolder=subfolder |
| ) |
| return torch.load(cache_file, map_location="cpu") |
|
|
| non_lora_trainables = load_from_hf( |
| model_path, "non_lora_trainables.bin" |
| ) |
| non_lora_trainables = { |
| (k[11:] if k.startswith("base_model.") else k): v |
| for k, v in non_lora_trainables.items() |
| } |
| if any(k.startswith("model.model.") for k in non_lora_trainables): |
| non_lora_trainables = { |
| (k[6:] if k.startswith("model.") else k): v |
| for k, v in non_lora_trainables.items() |
| } |
| model.load_state_dict(non_lora_trainables, strict=False) |
|
|
| from peft import PeftModel |
|
|
| print("Loading LoRA weights...") |
| model = PeftModel.from_pretrained(model, model_path) |
| print("Merging LoRA weights...") |
| model = model.merge_and_unload() |
| print("Model is loaded...") |
| elif model_base is not None: |
| |
| print("Loading LLaVA from base model...") |
| if "mpt" in model_name.lower(): |
| if not os.path.isfile(os.path.join(model_path, "configuration_mpt.py")): |
| shutil.copyfile( |
| os.path.join(model_base, "configuration_mpt.py"), |
| os.path.join(model_path, "configuration_mpt.py"), |
| ) |
| tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=True) |
| cfg_pretrained = AutoConfig.from_pretrained( |
| model_path, trust_remote_code=True |
| ) |
| model = LlavaMPTForCausalLM.from_pretrained( |
| model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs |
| ) |
| else: |
| tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False) |
| cfg_pretrained = AutoConfig.from_pretrained(model_path) |
| model = LlavaLlamaForCausalLM.from_pretrained( |
| model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs |
| ) |
|
|
| mm_projector_weights = torch.load( |
| os.path.join(model_path, "mm_projector.bin"), map_location="cpu" |
| ) |
| mm_projector_weights = { |
| k: v.to(torch.float16) for k, v in mm_projector_weights.items() |
| } |
| model.load_state_dict(mm_projector_weights, strict=False) |
| else: |
| if "mpt" in model_name.lower(): |
| tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True) |
| model = LlavaMPTForCausalLM.from_pretrained( |
| model_path, low_cpu_mem_usage=True, **kwargs |
| ) |
| else: |
| tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False) |
| model = LlavaLlamaForCausalLM.from_pretrained( |
| model_path, low_cpu_mem_usage=True, **kwargs |
| ) |
| else: |
| |
| if model_base is not None: |
| |
| from peft import PeftModel |
|
|
| tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False) |
| model = AutoModelForCausalLM.from_pretrained( |
| model_base, |
| torch_dtype=torch.float16, |
| low_cpu_mem_usage=True, |
| device_map="auto", |
| ) |
| print(f"Loading LoRA weights from {model_path}") |
| model = PeftModel.from_pretrained(model, model_path) |
| print(f"Merging weights") |
| model = model.merge_and_unload() |
| print("Convert to FP16...") |
| model.to(torch.float16) |
| else: |
| use_fast = False |
| if "mpt" in model_name.lower(): |
| tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True) |
| model = AutoModelForCausalLM.from_pretrained( |
| model_path, low_cpu_mem_usage=True, trust_remote_code=True, **kwargs |
| ) |
| else: |
| tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False) |
| model = AutoModelForCausalLM.from_pretrained( |
| model_path, low_cpu_mem_usage=True, **kwargs |
| ) |
|
|
| image_processor = None |
|
|
| if "llava" in model_name.lower(): |
| mm_use_im_start_end = getattr(model.config, "mm_use_im_start_end", False) |
| mm_use_im_patch_token = getattr(model.config, "mm_use_im_patch_token", True) |
| if mm_use_im_patch_token: |
| tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True) |
| if mm_use_im_start_end: |
| tokenizer.add_tokens( |
| [DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True |
| ) |
| model.resize_token_embeddings(len(tokenizer)) |
|
|
| vision_tower = model.get_vision_tower() |
| if not vision_tower.is_loaded: |
| vision_tower.load_model() |
| vision_tower.to(device="cuda", dtype=torch.float16) |
| image_processor = vision_tower.image_processor |
|
|
| if hasattr(model.config, "max_sequence_length"): |
| context_len = model.config.max_sequence_length |
| else: |
| context_len = 2048 |
|
|
| return tokenizer, model, image_processor, context_len |
|
|