Datasets:

ArXiv:
ea-dev-pjlab-results / eval_agent /eval_agent_for_vbench_open.py
shulin16's picture
Upload folder using huggingface_hub
9f3bc09 verified
import re, time, os
from tqdm import tqdm
import json
from datetime import datetime
import argparse
import Levenshtein
from base_agent import BaseAgent_SFT, BaseAgent_Open
from system_prompts import sys_prompts
from tools import ToolCalling
from vbench_leaderboard import VBenchLeaderboard
import pandas as pd
from process import *
def parse_args():
parser = argparse.ArgumentParser(description='Eval-Agent-VBench', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"--user_query",
type=str,
required=True,
help="user query",
)
parser.add_argument(
"--model",
type=str,
default="latte1",
help="target model",
)
parser.add_argument(
"--recommend",
action="store_true",
help="recommend model",
)
args = parser.parse_args()
return args
class EvalAgent:
def __init__(self, sample_model="latte1", save_mode="video", refer_file="vbench_dimension_scores.tsv", recommend=False):
self.tools = ToolCalling(sample_model=sample_model, save_mode=save_mode)
self.sample_model = sample_model
self.user_query = ""
self.tsv_file_path = refer_file
self.recommend = recommend
def init_agent(self):
self.eval_agent = BaseAgent_SFT(system_prompt=sys_prompts["eval-agent-vbench-training-sys"], use_history=True, temp=0.5)
# self.prompt_agent = BaseAgent_Open(system_prompt=sys_prompts["vbench-prompt-sys"], use_history=True, temp=0.5)
self.prompt_agent = BaseAgent_SFT(system_prompt=sys_prompts["vbench-prompt-sys-open"], use_history=True, temp=0.5, model_name_or_path="http://0.0.0.0:12334/v1/chat/completions")
def recommend_model(self, query):
leaderboard = VBenchLeaderboard()
recommendations = leaderboard.recommend_model(query, top_k=3)
report = leaderboard.generate_recommendation_report(query, recommendations)
return report
def search_auxiliary(self, designed_prompts, prompt):
for _, value in designed_prompts.items():
if value['prompt'] == prompt:
return value["auxiliary_info"]
raise "Didn't find auxiliary info, please check your json."
def sample_and_eval(self, designed_prompts, save_path, tool_name):
try:
prompts = [item["prompt"] for _, item in designed_prompts.items()]
except:
designed_prompts = parse_json(designed_prompts)
if isinstance(designed_prompts, list):
prompts = [item["prompt"] for item in designed_prompts]
else:
prompts = [item["prompt"] for _, item in designed_prompts.items()]
video_pairs = self.tools.sample(prompts, save_path)
if 'auxiliary_info' in designed_prompts["Step 1"]:
for item in video_pairs:
item["auxiliary_info"] = self.search_auxiliary(designed_prompts, item["prompt"])
eval_results = self.tools.eval(tool_name, video_pairs)
return eval_results
def reference_prompt(self, search_dim):
file_path = "./eval_tools/vbench/VBench_full_info.json"
data = json.load(open(file_path, "r"))
results = []
for item in data:
if search_dim in item["dimension"]:
item.pop("dimension")
item["Prompt"] = item.pop("prompt_en")
if 'auxiliary_info' in item and search_dim in item['auxiliary_info']:
item["auxiliary_info"] = list(item["auxiliary_info"][search_dim].values())[0]
results.append(item)
return results
# def format_eval_result(self, results, reference_table):
# question = results["Sub-aspect"]
# tool_name = results["Tool"]
# average_score = results["eval_results"]["score"][0]
# video_results = results["eval_results"]["score"][1]
# output = f"Sub-aspect: {question}\n"
# output += f"The score categorization table for the numerical results evaluated by the '{tool_name}' is as follows:\n{reference_table}\n\n"
# output += f"Observation: The evaluation results using '{tool_name}' are summarized below.\n"
# output += f"Average Score: {average_score:.4f}\n"
# output += "Detailed Results:\n"
# for i, video in enumerate(video_results, 1):
# prompt = video["prompt"]
# score = video["video_results"]
# output += f"\t{i}. Prompt: {prompt}\n"
# output += f"\tScore: {score:.4f}\n"
# return output
def format_eval_results(self, results, reference_table):
tool_name = results["tool"]
average_score = results["eval_results"]["score"][0]
video_results = results["eval_results"]["score"][1]
# More concise and structured format for SFT
output = f"Scoring Reference Table of '{tool_name}': {reference_table}\n\n"
output += f"Results:\n"
output += f"- Overall score: {average_score:.4f}\n"
output += f"- Per-prompt scores:\n"
for video in video_results:
prompt = video["prompt"]
score = video["video_results"]
output += f" • \"{prompt}\": {score:.4f}\n"
return output
def update_info(self):
# folder_name = datetime.now().strftime('%Y-%m-%d-%H:%M:%S') + "-" + self.user_query.replace(" ", "_")
folder_name = os.environ["FOLDER_NAME"] if "FOLDER_NAME" in os.environ else datetime.now().strftime('%Y-%m-%d-%H:%M:%S') + "-" + self.user_query.replace(" ", "_") # the environment folder name categorizes model into different rounds
self.save_path = f"./eval_vbench_results/{self.sample_model}/{folder_name}"
os.makedirs(self.save_path, exist_ok=True)
self.video_folder = os.path.join(self.save_path, "videos")
self.file_name = os.path.join(self.save_path, f"eval_results.json")
def explore(self, query, all_chat=[]):
self.user_query = query
self.update_info()
self.init_agent()
df = pd.read_csv(self.tsv_file_path, sep='\t')
plan_query = query
all_chat.append(plan_query)
n = 0
start_time = time.time()
while True:
plans_str = self.eval_agent(plan_query)
plans = format_plans(plans_str)
if '</summary>' in plans_str:
print(f"Finish! Time: {time.time() - start_time:.2f}s")
plans["eval_time"] = time.time() - start_time
if self.recommend:
print("Generating recommendation report...")
report = self.recommend_model(query)
plans["recommendation_report"] = report
print(f"\nQuery: {query}")
print("-" * 40)
print(report)
print("\n" + "="*80)
all_chat.append(plans)
break
for _ in range(3):
try:
tool = plans.get('tool', None)
if tool and tool_existence(tool):
plans["tool"] = tool_existence(tool)
break
else:
# If tool does not exist, regenerate plan_str
plans_str = self.eval_agent(plan_query)
plans = format_plans(plans_str)
except Exception as e:
# Safe error message that doesn't assume 'tool' key exists
tool_name = plans.get("tool", "UNKNOWN")
print(f"❌ Tool '{tool_name}' not found or not valid.")
print(f"Generated plan: {plans_str[:200]}...")
print(f"Parsed result: {plans}")
print(f"Error: {e}")
continue # Try again
# tool_name_ori = plans["tool"]
# reference_table = format_dimension_as_string(df, tool_name_ori)
reference_table = format_dimension_as_string(df, plans["tool"])
# tool_name = tool_name_ori.replace(" ", "_").lower() # Subject Consistency -> subject_consistency
prompt_list = self.reference_prompt(plans["tool"])
prompt_query = f"## Context:\n{json.dumps(plans)}\n\n ## Prompt list:\n{json.dumps(prompt_list)}"
designed_prompts = self.prompt_agent(prompt_query)
plans["eval_results"] = self.sample_and_eval(designed_prompts, self.video_folder, plans["tool"])
plan_query = self.format_eval_results(plans, reference_table=reference_table) # NEW plan query, simpler
all_chat.append(plans)
if n > 9:
break
n += 1
all_chat.append(self.eval_agent.messages)
save_json(all_chat, self.file_name)
def main():
args = parse_args()
user_query = args.user_query
eval_agent = EvalAgent(sample_model=args.model, save_mode="video", recommend=args.recommend)
eval_agent.explore(user_query)
if __name__ == "__main__":
main()