File size: 4,774 Bytes
fca4fc0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import os
import shutil

from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig, BitsAndBytesConfig
import torch
from vtimellm.model import *
from peft import PeftModel

def load_lora(model, lora_path):
    non_lora_trainables_path = os.path.join(lora_path, 'non_lora_trainables.bin')
    if os.path.exists(non_lora_trainables_path):
        non_lora_trainables = torch.load(non_lora_trainables_path, map_location='cpu')
        non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()}
        if any(k.startswith('model.model.') for k in non_lora_trainables):
            non_lora_trainables = {(k[6:] if k.startswith('model.') else k): v for k, v in non_lora_trainables.items()}
        model.load_state_dict(non_lora_trainables, strict=False)
    print('Loading LoRA weights...')
    model = PeftModel.from_pretrained(model, lora_path)
    return model

def load_pretrained_model(args, stage2=None, stage3=None, stage4=None, stage5=None):
    """
    Load VTimeLLM model with proper GPU device handling
    
    FIXED VERSION: This function now properly handles GPU device selection
    to prevent multiple GPU detection issues.
    """
    kwargs = {'torch_dtype': torch.float16}

    # model_path = os.path.expanduser(args.model_path)
    model_base = args.model_base

    # FIX: Set up single GPU environment before model loading
    if torch.cuda.is_available():
        # Ensure we're using the correct GPU device
        current_device = torch.cuda.current_device()
        print(f'Using GPU device: {current_device}')
        print(f'GPU name: {torch.cuda.get_device_name(current_device)}')
        
        # Set device for all operations
        device = f'cuda:{current_device}'
    else:
        device = 'cpu'
        print('No CUDA available, using CPU')

    # lora_cfg_pretrained = AutoConfig.from_pretrained(model_path)
    print('Loading VTimeLLM from base model...')
    if 'chatglm' in model_base:
        tokenizer = AutoTokenizer.from_pretrained(model_base, trust_remote_code=True)
        model = VTimeLLMChatGLMForCausalLM.from_pretrained(model_base)
    else:
        tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
        model = VTimeLLMLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, **kwargs)
        token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features
        if model.lm_head.weight.shape[0] != token_num:
            model.lm_head.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
            model.model.embed_tokens.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))

    # FIX: Move model to GPU with explicit device selection
    if torch.cuda.is_available():
        model = model.to(device)
        print(f'Model moved to {device}')
    else:
        print('Model loaded on CPU')
    
    model.get_model().initialize_vision_modules(args)

    if stage2 is not None and stage2 != "":
        print('Loading stage2 weights...')
        model = load_lora(model, stage2)
        print('Merging stage2 weights...')
        model = model.merge_and_unload()

        if stage3 is not None and stage3 != "" :
            print('Loading stage3 weights...')
            model = load_lora(model, stage3)
            print('Merging stage3 weights...')
            model = model.merge_and_unload()

        if stage4 is not None and stage4 != "":
            print('Loading stage4 weights...')
            model = load_lora(model, stage4)
            print('Merging stage4 weights...')
            model = model.merge_and_unload()      

        if stage5 is not None and stage5 != "":
            print('Loading stage5 weights...')
            model = load_lora(model, stage5)
            print('Merging stage5 weights...')
            model = model.merge_and_unload()

    if hasattr(model.config, "max_sequence_length"):
        context_len = model.config.max_sequence_length
    else:
        context_len = 2048

    return tokenizer, model, context_len

def load_pretrained_model_single_gpu(args, stage2=None, stage3=None, stage4=None, stage5=None):
    """
    Load VTimeLLM model with forced single GPU usage
    
    This function ensures only one GPU is used by setting environment variables
    and explicitly managing device placement.
    """
    # Force single GPU usage
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    os.environ['OMPI_COMM_WORLD_SIZE'] = '1'
    
    if torch.cuda.is_available():
        torch.cuda.set_device(0)
        print(f'Forced single GPU usage: {torch.cuda.get_device_name(0)}')
    
    return load_pretrained_model(args, stage2, stage3, stage4, stage5)