| import os |
| import shutil |
| from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig, BitsAndBytesConfig |
| import torch |
| from ChatUniVi.model import * |
| from ChatUniVi.constants import DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN |
| from accelerate import init_empty_weights, load_checkpoint_and_dispatch |
| from transformers import AutoConfig, AutoModelForCausalLM |
|
|
|
|
| def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False, device_map="auto"): |
| kwargs = {"device_map": device_map} |
|
|
| if load_8bit: |
| kwargs['load_in_8bit'] = True |
| elif load_4bit: |
| kwargs['load_in_4bit'] = True |
| kwargs['quantization_config'] = BitsAndBytesConfig( |
| load_in_4bit=True, |
| bnb_4bit_compute_dtype=torch.float16, |
| bnb_4bit_use_double_quant=True, |
| bnb_4bit_quant_type='nf4' |
| ) |
| else: |
| kwargs['torch_dtype'] = torch.float16 |
|
|
| if 'chatunivi' in model_name.lower(): |
| |
| if 'lora' in model_name.lower() and model_base is not None: |
| lora_cfg_pretrained = AutoConfig.from_pretrained(model_path) |
| tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False) |
| print('Loading ChatUniVi from base model...') |
| model = ChatUniViLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs) |
| token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features |
| if model.lm_head.weight.shape[0] != token_num: |
| model.lm_head.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype)) |
| model.model.embed_tokens.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype)) |
|
|
| print('Loading additional ChatUniVi weights...') |
| if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')): |
| non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu') |
| else: |
| |
| from huggingface_hub import hf_hub_download |
| def load_from_hf(repo_id, filename, subfolder=None): |
| cache_file = hf_hub_download( |
| repo_id=repo_id, |
| filename=filename, |
| subfolder=subfolder) |
| return torch.load(cache_file, map_location='cpu') |
| non_lora_trainables = load_from_hf(model_path, 'non_lora_trainables.bin') |
| non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()} |
| if any(k.startswith('model.model.') for k in non_lora_trainables): |
| non_lora_trainables = {(k[6:] if k.startswith('model.') else k): v for k, v in non_lora_trainables.items()} |
| model.load_state_dict(non_lora_trainables, strict=False) |
|
|
| from peft import PeftModel |
| print('Loading LoRA weights...') |
| model = PeftModel.from_pretrained(model, model_path) |
| print('Merging LoRA weights...') |
| model = model.merge_and_unload() |
| print('Model is loaded...') |
| elif model_base is not None: |
| |
| print('Loading ChatUniVi from base model...') |
| tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False) |
| cfg_pretrained = AutoConfig.from_pretrained(model_path) |
| model = ChatUniViLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs) |
|
|
| mm_projector_weights = torch.load(os.path.join(model_path, 'mm_projector.bin'), map_location='cpu') |
| mm_projector_weights = {k: v.to(torch.float16) for k, v in mm_projector_weights.items()} |
| model.load_state_dict(mm_projector_weights, strict=False) |
| else: |
| |
| tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False) |
| model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs) |
| else: |
| |
| if model_base is not None: |
| |
| from peft import PeftModel |
| tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False) |
| model = AutoModelForCausalLM.from_pretrained(model_base, torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map="auto") |
| print(f"Loading LoRA weights from {model_path}") |
| model = PeftModel.from_pretrained(model, model_path) |
| print(f"Merging weights") |
| model = model.merge_and_unload() |
| print('Convert to FP16...') |
| model.to(torch.float16) |
| else: |
| use_fast = False |
| tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False) |
| model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs) |
|
|
| image_processor = None |
|
|
| if 'chatunivi' in model_name.lower(): |
| mm_use_im_start_end = getattr(model.config, "mm_use_im_start_end", False) |
| mm_use_im_patch_token = getattr(model.config, "mm_use_im_patch_token", True) |
| if mm_use_im_patch_token: |
| tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True) |
| if mm_use_im_start_end: |
| tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True) |
| model.resize_token_embeddings(len(tokenizer)) |
|
|
| vision_tower = model.get_vision_tower() |
| if not vision_tower.is_loaded: |
| vision_tower.load_model() |
| vision_tower.to(device='cuda', dtype=torch.float16) |
|
|
| image_processor = vision_tower.image_eval_processor |
|
|
| if hasattr(model.config, "max_sequence_length"): |
| context_len = model.config.max_sequence_length |
| else: |
| context_len = 2048 |
|
|
| return tokenizer, model, image_processor, context_len |
|
|