Update README.md
Browse files
README.md
CHANGED
|
@@ -222,8 +222,6 @@ print(answer)
|
|
| 222 |
|
| 223 |
## 🤗 Transformers:
|
| 224 |
```python
|
| 225 |
-
from __future__ import annotations
|
| 226 |
-
|
| 227 |
import torch
|
| 228 |
from PIL import Image
|
| 229 |
from transformers import AutoProcessor, Qwen2_5_VLForConditionalGeneration
|
|
@@ -244,7 +242,7 @@ model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
|
| 244 |
trust_remote_code=True,
|
| 245 |
)
|
| 246 |
|
| 247 |
-
img = Image.open("
|
| 248 |
messages = [{
|
| 249 |
"role": "user",
|
| 250 |
"content": [
|
|
@@ -252,14 +250,14 @@ messages = [{
|
|
| 252 |
],
|
| 253 |
}]
|
| 254 |
prompt = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 255 |
-
|
| 256 |
|
| 257 |
with torch.no_grad():
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
out = processor.decode(out[0])
|
| 261 |
|
| 262 |
-
|
| 263 |
-
|
|
|
|
|
|
|
| 264 |
```
|
| 265 |
|
|
|
|
| 222 |
|
| 223 |
## 🤗 Transformers:
|
| 224 |
```python
|
|
|
|
|
|
|
| 225 |
import torch
|
| 226 |
from PIL import Image
|
| 227 |
from transformers import AutoProcessor, Qwen2_5_VLForConditionalGeneration
|
|
|
|
| 242 |
trust_remote_code=True,
|
| 243 |
)
|
| 244 |
|
| 245 |
+
img = Image.open("image.png").convert("RGB")
|
| 246 |
messages = [{
|
| 247 |
"role": "user",
|
| 248 |
"content": [
|
|
|
|
| 250 |
],
|
| 251 |
}]
|
| 252 |
prompt = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 253 |
+
model_input = processor(text=prompt, images=[img], return_tensors="pt").to(model.device)
|
| 254 |
|
| 255 |
with torch.no_grad():
|
| 256 |
+
model_output = model.generate(**model_input, temperature = 0.7, max_new_tokens=5000)
|
|
|
|
|
|
|
| 257 |
|
| 258 |
+
result = processor.decode(model_output[0])
|
| 259 |
+
reasoning = result.split("<think>")[1].split("</think>")[0]
|
| 260 |
+
answer = result.split("<answer>")[1].split("</answer>")[0]
|
| 261 |
+
print(answer)
|
| 262 |
```
|
| 263 |
|