File size: 1,639 Bytes
1688f96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
from transformers import AutoProcessor
from transformers import AutoModelForCausalLM
from qwen_vl_utils import process_vision_info

model_path="../"

print(f"LOAD MODEL FROM: {model_path}")



key_mapping = {
        "^visual": "model.visual",
        r"^model(?!\.(language_model|visual))": "model.language_model",
    }


model =  AutoModelForCausalLM.from_pretrained(
        model_path,
        trust_remote_code=True,
        torch_dtype='auto',
        key_mapping=key_mapping).eval().cuda()

conversation = [
    {
        "role": "system",
        "content": [
            {"type": "text", "text": "你是华为公司开发的多模态大模型,名字是openPangu-VL-7B。你能够处理文本和视觉模态的输入,并给出文本输出。"},     
        ]
    },
    {
        "role": "user",
        "content": [
            {"type": "text", "text": "你好,你是谁?"},     
        ]
    }
] 


processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True)
text = processor.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)

image_inputs, video_inputs = process_vision_info(conversation)

inputs = processor(
    text=[text],
    images=image_inputs,
    videos=video_inputs,
    padding=False,
    return_tensors="pt",
)
inputs = inputs.to(model.device)

generated_ids = model.generate(**inputs, max_new_tokens=128)
generated_ids_trimmed = [out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)]
res = processor.batch_decode(
    generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
print(f"OUTPUT: {res}")