File size: 3,364 Bytes
ab9773e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
---
library_name: transformers
pipeline_tag: image-text-to-text
base_model: Qwen/Qwen2-VL-72B
tags:
  - chat
inference: true
widget:
- text: Hello!
  example_title: Hello world
  group: Python
---

This model is for debugging. It is randomly initialized with the config from [Qwen/QVQ-72B-Preview](https://huggingface.co/Qwen/QVQ-72B-Preview) but is of smaller size. 

Codes:
```python
import os
from typing import Dict

import requests
import torch
import transformers
from PIL import Image
from torchvision import io
from transformers import (AutoConfig, AutoModelForCausalLM, AutoProcessor,
                          AutoTokenizer, GenerationConfig,
                          enable_full_determinism, pipeline, set_seed)
from transformers.models.qwen2_vl import Qwen2VLForConditionalGeneration

model_id = "Qwen/QVQ-72B-Preview"
repo_id = "yujiepan/qvq-preview-tiny-random"
save_path = f"/tmp/{repo_id}"

config = AutoConfig.from_pretrained(model_id, trust_remote_code=True)
config.hidden_size = 16
config.intermediate_size = 32
config.num_attention_heads = 2
config.num_hidden_layers = 2
config.num_key_value_heads = 1
config.vision_config.embed_dim = 16
config.vision_config.num_heads = 2
config.vision_config.hidden_size = 16
config.vision_config.depth = 2
config.rope_scaling['mrope_section'] = [1, 1, 2]  # sum needs to be 4 here

enable_full_determinism(42)
model = Qwen2VLForConditionalGeneration(config=config)
model = model.to(torch.bfloat16).cuda().eval()
model.generation_config = GenerationConfig.from_pretrained(
    model_id, trust_remote_code=True,
)

processor = AutoProcessor.from_pretrained(model_id)
model.save_pretrained(save_path)
processor.save_pretrained(save_path)
os.system(f"ls -alh {save_path}")


def try_inference(model_id):
    torch.use_deterministic_algorithms(False)
    from qwen_vl_utils import process_vision_info
    from transformers import (AutoProcessor, AutoTokenizer,
                              Qwen2VLForConditionalGeneration)

    model = Qwen2VLForConditionalGeneration.from_pretrained(
        model_id, device_map="cuda"
    )
    processor = AutoProcessor.from_pretrained(model_id)
    messages = [
        {
            "role": "system",
            "content": [
                {"type": "text", "text": "You are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step."}
            ],
        },
        {
            "role": "user",
            "content": [
                {
                    "type": "image",
                    "image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/QVQ/demo.png",
                },
                {"type": "text", "text": "What value should be filled in the blank space?"},
            ],
        }
    ]
    text = processor.apply_chat_template(
        messages, tokenize=False, add_generation_prompt=True
    )
    image_inputs, video_inputs = process_vision_info(messages)
    inputs = processor(
        text=[text],
        images=image_inputs,
        videos=video_inputs,
        padding=True,
        return_tensors="pt",
    )
    inputs = inputs.to("cuda")
    generated_ids = model.generate(**inputs, max_new_tokens=32)
    output_text = processor.batch_decode(
        generated_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False
    )
    print(output_text)


try_inference(save_path)
```