shilinxu commited on
Commit
5a1fa52
·
verified ·
1 Parent(s): 064e549

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +50 -0
README.md ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ ---
4
+
5
+ ```python
6
+
7
+ from PIL import Image
8
+ from transformers import AutoModelForCausalLM, AutoProcessor
9
+
10
+ import torch
11
+ model_path = 'shilinxu/EFVLM-36200'
12
+ model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.bfloat16 ,device_map='cuda:0', trust_remote_code=True)
13
+
14
+ processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True)
15
+
16
+
17
+ url = 'https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg'
18
+ import requests
19
+ image = Image.open(requests.get(url, stream=True).raw)
20
+
21
+
22
+ messages = [
23
+ {
24
+ 'role':'user',
25
+ 'content': [
26
+ {'type':'text', 'text': 'Describe this image in detail.'},
27
+ {'type':'image'}
28
+ ]
29
+ }
30
+ ]
31
+
32
+
33
+ text = processor.apply_chat_template(
34
+ messages, tokenize=False, add_generation_prompt=True
35
+ )
36
+
37
+ inputs = processor(
38
+ text=text,
39
+ images=[image],
40
+ padding=False,
41
+ return_tensors="pt",
42
+ )
43
+ inputs = inputs.to(model.device, dtype=torch.bfloat16)
44
+
45
+ generated_ids = model.generate(**inputs, max_new_tokens=128, temperature=1.0, repetition_penalty=1.2)
46
+ generated_ids = [out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)]
47
+
48
+ output_text = processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
49
+ print(output_text)
50
+ ```