Update backPrompt.py
Browse files- backPrompt.py +2 -2
backPrompt.py
CHANGED
|
@@ -20,7 +20,7 @@ def build_transform(input_size):
|
|
| 20 |
return transform
|
| 21 |
|
| 22 |
|
| 23 |
-
def load_image(image_file
|
| 24 |
transform = build_transform(input_size=input_size)
|
| 25 |
pixel_values = transform(image_file).unsqueeze(0) # Add batch dimension
|
| 26 |
return pixel_values
|
|
@@ -36,7 +36,7 @@ def main(image_path):
|
|
| 36 |
use_flash_attn=True,
|
| 37 |
trust_remote_code=True).eval()
|
| 38 |
tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False)
|
| 39 |
-
pixel_values = load_image(image_path
|
| 40 |
generation_config = dict(max_new_tokens=1024, do_sample=True)
|
| 41 |
|
| 42 |
question = """<image>\n**Instruction:**
|
|
|
|
| 20 |
return transform
|
| 21 |
|
| 22 |
|
| 23 |
+
def load_image(image_file):
|
| 24 |
transform = build_transform(input_size=input_size)
|
| 25 |
pixel_values = transform(image_file).unsqueeze(0) # Add batch dimension
|
| 26 |
return pixel_values
|
|
|
|
| 36 |
use_flash_attn=True,
|
| 37 |
trust_remote_code=True).eval()
|
| 38 |
tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False)
|
| 39 |
+
pixel_values = load_image(image_path).to(torch.bfloat16).cuda()
|
| 40 |
generation_config = dict(max_new_tokens=1024, do_sample=True)
|
| 41 |
|
| 42 |
question = """<image>\n**Instruction:**
|