FUNPosterEval / run /qwen_32b.py
BruceFeng98's picture
Upload 2 files
a661d53 verified
raw
history blame
4.49 kB
from transformers import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
from qwen_vl_utils import process_vision_info
from tqdm import tqdm
import json
import os
from pathlib import Path
from threading import Lock
from typing import Any
_json_write_lock = Lock()
def save_json_file(
data: Any,
file_path: str,
indent: int = 4,
temp_suffix: str = ".tmp"
) -> None:
"""
"""
path = Path(file_path)
path.parent.mkdir(parents=True, exist_ok=True)
temp_path = f"{file_path}{temp_suffix}"
with _json_write_lock:
try:
with open(temp_path, "w", encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False, indent=indent)
f.flush()
os.fsync(f.fileno())
os.replace(temp_path, file_path)
except Exception as e:
try:
if os.path.exists(temp_path):
os.remove(temp_path)
except OSError:
pass
raise RuntimeError(f"save json failed: {e}") from e
def read_json_file(file_path):
"""
Reads a JSON file and returns the parsed data as a Python object.
:param file_path: The path to the JSON file
:return: The data parsed from the JSON file
"""
with open(file_path, 'r', encoding='utf-8') as f:
data = json.load(f)
return data
def merge_json_lists(folder_path):
json_list = [
os.path.join(folder_path, f)
for f in os.listdir(folder_path)
if f.lower().endswith('.json') and os.path.isfile(os.path.join(folder_path, f))
]
merged_list = []
for file_path in json_list:
try:
with open(file_path, 'r', encoding='utf-8') as f:
data = json.load(f)
if isinstance(data, list):
merged_list.extend(data)
else:
print(f"警告: {file_path} 的内容不是列表,已跳过")
except Exception as e:
print(f"处理文件 {file_path} 时出错: {str(e)}")
return merged_list
def build():
#cache_dir="/workspace/huggingface"
# default: Load the model on the available device(s)
model = Qwen2_5_VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2.5-VL-32B-Instruct", torch_dtype="auto", device_map="auto")
# default processer
processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-32B-Instruct")
return model,processor
def conversation(model, processor, image_path, prompt):
messages = [
{
"role": "user",
"content": [
{
"type": "image",
"image": image_path,
},
{"type": "text", "text": prompt},
],
}
]
# Preparation for inference
text = processor.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor(
text=[text],
images=image_inputs,
videos=video_inputs,
padding=True,
return_tensors="pt",
)
inputs = inputs.to("cuda")
# Inference: Generation of the output
generated_ids = model.generate(**inputs, max_new_tokens=128)
generated_ids_trimmed = [
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
print(output_text)
return output_text
if __name__ == "__main__":
folder_path = r"your_path/poster/data"
save_dir = r"your_path/poster/result"
saved_josn = os.path.join(save_dir,"qwen_32b_bench.json")
if not os.path.exists(saved_josn):
tasks = merge_json_lists(folder_path)
save_json_file(tasks, saved_josn)
tasks = read_json_file(saved_josn)
model,processor = build()
print("build model successful")
for item in tqdm(tasks):
if "response" in item: continue
prompt = item["prompt"]
image_path = os.path.join(folder_path, item["path"])
try:
item["response"] = conversation(model, processor, image_path, prompt)
print(item["response"])
save_json_file(tasks, saved_josn)
break
except Exception as e:
print(f"[Warning] failed: {e}")