File size: 5,731 Bytes
bc3092e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
import os
import torch
import safetensors.torch
import matplotlib
import numpy as np
import torch.nn.functional as F
from torchvision import transforms as tt
from omegaconf import OmegaConf
from PIL import Image
from transformers import Blip2Processor, Blip2ForConditionalGeneration

from PrimeDepth_inference.ldm.util import instantiate_from_config


def get_state_dict(d):
    return d.get('state_dict', d)


def load_state_dict(ckpt_path, location='cpu'):
    _, extension = os.path.splitext(ckpt_path)
    if extension.lower() == ".safetensors":
        state_dict = safetensors.torch.load_file(ckpt_path, device=location)
    else:
        state_dict = get_state_dict(
            torch.load(
                ckpt_path, map_location=torch.device(location)
            )
        )
    state_dict = get_state_dict(state_dict)
    print(f'Loaded state_dict from [{ckpt_path}]')
    return state_dict


def create_model(config_path, device="cpu"):
    config = OmegaConf.load(config_path)
    model = instantiate_from_config(config.model).to(device)
    print(f'Loaded model config from [{config_path}]')
    return model


def get_image(path):
    image = Image.open(path)
    if not image.mode == "RGB":
        image = image.convert("RGB")
    return image


class InferenceEngine:
    """
    Utility class for obtaining PrimeDepth predictions

    """

    def __init__(self, pd_config_path, blip2_cache_dir=None, cmap="Spectral", device="cuda"):
        """
        pd_config_path : str
            Path to the model configuration file
        blip2_cache_dir : str, optional
            Path to the cache directory for the BLIP2 model, by default None
        cmap: str, optional
            Matplotlib colormap name, by default "Spectral"
        device : str, optional
            Device to run the model on, by default "cuda"

        """

        self.pd = create_model(pd_config_path, device)
        self.processor, self.model = self.load_BLIP2(blip2_cache_dir, device)
        self.cm = matplotlib.colormaps[cmap]

    '''
    def load_BLIP2(self, cache_dir=None, device="cuda"):
        processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b", cache_dir=cache_dir)
        model = Blip2ForConditionalGeneration.from_pretrained(
            "Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16, cache_dir=cache_dir
            )
        model = model.to(device)
        return processor, model
    '''

    def load_BLIP2(self, cache_dir=None, device="cuda"):
        import os
        from transformers import AutoProcessor, AutoModelForCausalLM
        import torch
        
        # 使用 git-base 作为替代
        model_name = "microsoft/git-base"
        
        try:
            print(f"Loading alternative model: {model_name}")
            processor = AutoProcessor.from_pretrained(model_name)
            model = AutoModelForCausalLM.from_pretrained(model_name)
            model = model.to(device)
            print("Model loaded successfully!")
            return processor, model
            
        except Exception as e:
            print(f"Error loading model: {e}")
            raise

    def captionize(self, image):
        # 1. 首先转换输入
        inputs = self.processor(images=image, return_tensors="pt")
        # 2. 然后将所有张量移到GPU并转换为float16
        inputs = {k: v.to(device='cuda', dtype=torch.float16) if torch.is_tensor(v) else v 
                for k, v in inputs.items()}

        # 3. 生成caption
        generated_ids = self.model.generate(
            **inputs,
            max_length=30,
            num_beams=4,
            num_return_sequences=1
        )
        
        # 4. 解码生成的文本
        caption = self.processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
        return caption

    '''
    def captionize(self, image):
        inputs = self.processor(images=image, return_tensors="pt").to('cuda', torch.float16)
        generated_ids = self.model.generate(**inputs)
        caption = self.processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
        return caption
        '''


    def predict(self, image_path, max_size=1024):
        """
        Parameters
        ----------
        image_path : str
            Path to the image
        max_size : int, optional
            Maximal processing size of the longer image edge, by default 1024

        Returns
        -------
        depth_ssi : np.ndarray
            Scale and shift invariant depth map prediction
        depth_color : PIL.Image
            Colorized depth map prediction

        """

        image = get_image(image_path)
        caption = self.captionize(image)
        w, h = image.size

        if max_size is not None and max(h, w) > max_size:
            if h == w:
                image = tt.Resize(max_size)(image)
            else:
                image = tt.Resize(max_size-1, max_size=max_size)(image)

        with torch.no_grad():
            labels = self.pd.get_label_from_image(image, prompt=caption)
        depth_ssi = labels['depth'].clone().mean(dim=1)

        if max_size is not None and max(h, w) > max_size:
            depth_ssi = F.interpolate(depth_ssi[None], (h, w), mode='bilinear', align_corners=False)[0, 0]
        else:
            depth_ssi = depth_ssi[0]

        depth_ssi = depth_ssi.cpu().numpy().astype(np.float32)

        depth_color = depth_ssi.copy()
        depth_color = (depth_color - depth_color.min()) / (depth_color.max() - depth_color.min())
        depth_color = self.cm(depth_color)[:, :, :3]
        depth_color = (depth_color * 255).astype(np.uint8)
        depth_color = Image.fromarray(depth_color)

        return depth_ssi, depth_color