Pu Miao
eval code
4a192d8
import json
import logging
import os
import re
from tqdm import tqdm
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
logger.addHandler(handler)
logger.setLevel(logging.INFO)
COUNTING_BASE_PROMPT = 'You should output a json string with format {"answer": a int number}.Your output should be directly parsed by json.loads function. eg.```json{"answer": 1}```.\nNow the question is:'
RELATION_BASE_PROMPT = 'You should output a json string with format {"answer": "str"}, where str must be one of ["up", "under", "back", "front", "left", "right"]. Your output should be directly parsed by json.loads function. eg.```json{"answer": "left"}```.\nNow the question is:'
VALID_RELATIONS = ["up", "under", "back", "front", "left", "right"]
SPLIT_SYMBOL = "="*50
def parse_model_answer(model_output, task_type="counting"):
"""
Parse the output JSON format answer of the model
:param model_output:
:param task_type:
:return:
"""
pattern = r'```json\s*(\{.*?\})\s*```'
match = re.search(pattern, model_output, re.DOTALL)
parsed_answer = None
if match:
try:
json_str = match.group(1)
result = json.loads(json_str)
answer = result.get('answer')
if task_type == "counting":
if isinstance(answer, str) and answer.isdigit():
parsed_answer = int(answer)
elif isinstance(answer, int):
parsed_answer = answer
else:
parsed_answer = None
else: # relation task
parsed_answer = answer if answer in VALID_RELATIONS else None
except Exception as e:
logger.error(f"{model_output};\n{str(e)}")
return parsed_answer
def eval_loop(model, dataset, process_fn, task_type="counting", **kwargs):
"""
:param dataset:
:param model:
:param process_fn:
:param task_type:
:param kwargs:
:return:
"""
eval_result = []
for item in tqdm(dataset):
result = process_fn(model, item, task_type=task_type, **kwargs)
resp = {
"id": item["id"],
"question": item["question"],
"image_path": item['image_path'],
"model_answer": result['model_answer'],
"parsed_answer": result['parsed_answer'],
"ground_truth": item['answer']
}
eval_item_info = f"""\n{json.dumps(resp, indent=4)}\n"""
logger.info(eval_item_info)
eval_result.append(resp)
return eval_result
def eval_pipeline(model_name, current_dir, params):
results = {}
parent_dir = os.path.split(current_dir)[0]
# Counting
print("process counting dataset...")
counting_data_path = os.path.join(parent_dir, 'Counting.json')
relations_data_path = os.path.join(parent_dir, 'Relation.json')
combination_data_path = os.path.join(parent_dir, 'Combination.json')
counting_data = json.load(open(counting_data_path, 'r', encoding='utf-8'))
counting_results = eval_loop(dataset=counting_data, **params)
results["counting_results"] = counting_results
# Relation
print("process relations dataset...")
relations_data = json.load(open(relations_data_path, 'r', encoding='utf-8'))
relations_results = eval_loop(dataset=relations_data, task_type="relation", **params)
results["relations_results"] = relations_results
# Relation
print("process combination dataset...")
combination_data = json.load(open(combination_data_path, 'r', encoding='utf-8'))
combination_results = eval_loop(dataset=combination_data, **params)
results["combination_results"] = combination_results
result_parent_path = os.path.join(current_dir, './result/')
if not os.path.exists(result_parent_path):
os.makedirs(result_parent_path)
result_path = os.path.join(result_parent_path, f'{model_name}_results.json')
with open(result_path, 'w', encoding='utf-8') as f:
json.dump(results, f, ensure_ascii=False, indent=2)
print(f"The process has finished. The evaluation results are saved to ./result/{model_name}_results.json")
print(f"The number of counting samples processed successfully: {len(results['counting_results'])}")
print(f"The number of relationship samples processed successfully: {len(results['relations_results'])}")
print(f"The number of combination samples processed successfully: {len(results['combination_results'])}")