Fix inference script image download and formatting
Browse files
README.md
CHANGED
|
@@ -136,6 +136,7 @@ Then you can run the following code:
|
|
| 136 |
```python
|
| 137 |
import torch
|
| 138 |
from PIL import Image
|
|
|
|
| 139 |
import requests
|
| 140 |
|
| 141 |
from transformers import AutoModelForCausalLM, AutoProcessor
|
|
@@ -146,11 +147,11 @@ processor = AutoProcessor.from_pretrained("microsoft/Magma-8B", trust_remote_cod
|
|
| 146 |
model.to("cuda")
|
| 147 |
|
| 148 |
# Inference
|
| 149 |
-
url = "https://assets-c4akfrf5b4d3f4b7.z01.azurefd.net/assets/2024/04/BMDataViz_661fb89f3845e.png"
|
| 150 |
-
image = Image.open(requests.get(url, stream=True).
|
| 151 |
|
| 152 |
convs = [
|
| 153 |
-
{"role": "system", "content": "You are agent that can see, talk and act."},
|
| 154 |
{"role": "user", "content": "<image_start><image><image_end>\nWhat is in this image?"},
|
| 155 |
]
|
| 156 |
prompt = processor.tokenizer.apply_chat_template(convs, tokenize=False, add_generation_prompt=True)
|
|
|
|
| 136 |
```python
|
| 137 |
import torch
|
| 138 |
from PIL import Image
|
| 139 |
+
from io import BytesIO
|
| 140 |
import requests
|
| 141 |
|
| 142 |
from transformers import AutoModelForCausalLM, AutoProcessor
|
|
|
|
| 147 |
model.to("cuda")
|
| 148 |
|
| 149 |
# Inference
|
| 150 |
+
url = "https://assets-c4akfrf5b4d3f4b7.z01.azurefd.net/assets/2024/04/BMDataViz_661fb89f3845e.png"
|
| 151 |
+
image = Image.open(BytesIO(requests.get(url, stream=True).content))
|
| 152 |
|
| 153 |
convs = [
|
| 154 |
+
{"role": "system", "content": "You are agent that can see, talk and act."},
|
| 155 |
{"role": "user", "content": "<image_start><image><image_end>\nWhat is in this image?"},
|
| 156 |
]
|
| 157 |
prompt = processor.tokenizer.apply_chat_template(convs, tokenize=False, add_generation_prompt=True)
|