File size: 1,570 Bytes
9794be0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 |
from llava.eval.run_llava import eval_model
from llava.mm_utils import tokenizer_image_token, process_images, get_model_name_from_path
import os
model_path = "liuhaotian/llava-v1.5-7b"
#prompt = "What are the things I should be cautious about when I visit here?"
#prompt = "Could you help describe the input image?"
prompt="Could you help describe the main object of the input image?"
#prompt="In this view, identify and describe the object that is most likely for human interaction"
#prompt = "Please describe the object with the green mask in the input image."
# prompt = "Please describe the object coverd by the green mask."
# prompt = "what is the object covered by the green mask?"
#prompt = "what is the object in the red bounding box of the image"
#prompt = "What is the object that is most likely interative with people?"
#image_file = "https://llava-vl.github.io/static/images/view.jpg"
image_file_list = ["images/WechatIMG2241.jpg","images/WechatIMG2242.jpg", "images/WechatIMG2243.jpg", "images/WechatIMG2244.jpg"]
ego_list = os.listdir("./images/ego")
ego_list = ["images/ego/"+ f for f in ego_list]
exo_list = os.listdir("./images/exo")
exo_list = ["images/exo/"+f for f in exo_list]
images = ego_list
print(ego_list)
args = type('Args', (), {
"model_path": model_path,
"model_base": None,
"model_name": get_model_name_from_path(model_path),
"query": prompt,
"conv_mode": None,
"image_file": images,
"sep": ",",
"temperature": 0,
"top_p": None,
"num_beams": 1,
"max_new_tokens": 512
})()
eval_model(args)
|