Datasets:

Modalities:
Image
Text
Formats:
parquet
Size:
< 1K
ArXiv:
License:
tianerb's picture
Upload folder using huggingface_hub
26b362d verified
# from evalscope.backend.vlm_eval_kit import VLMEvalKitBackendManager
# print(f'** All models from VLMEvalKit backend: {VLMEvalKitBackendManager.list_supported_datasets()}')
import argparse
import yaml
import importlib.util
import sys
import os
from evalscope.run import run_task
from evalscope.summarizer import Summarizer
from data_configs import DATASET_CONFIGS
def import_custom_dataset(dataset_name):
"""Dynamically import a custom dataset module for the given dataset."""
if dataset_name in DATASET_CONFIGS:
config = DATASET_CONFIGS[dataset_name]
if config["custom_script"] and config["custom_eval_class"]:
print(f"Importing custom dataset module for {dataset_name}")
# Get the script path
script_path = config["custom_script"]
# Extract module name from script path
module_name = f"custom_dataset_{dataset_name}"
try:
# Check if module is already loaded
if module_name in sys.modules:
# Remove it to force reload
del sys.modules[module_name]
# Load the module dynamically
spec = importlib.util.spec_from_file_location(module_name, script_path)
if spec and spec.loader:
module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = module
spec.loader.exec_module(module)
# The module should automatically patch CustomVQADataset when loaded
print(f"Successfully imported custom dataset module for {dataset_name}")
return True
else:
print(f"Failed to create module spec for {dataset_name}")
return False
except Exception as e:
print(f"Failed to import custom dataset module for {dataset_name}: {str(e)}")
return False
return False
def run_eval(config, analysis_report=False):
# Since evalscope runs evaluations dataset by dataset, We'll hook into the evaluation process to import custom datasets as needed
# First, let's check if this is running a specific dataset
# We can inspect the config to see which datasets are being evaluated
with open(config, "r") as f:
cfg = yaml.safe_load(f)
datasets = cfg.get("eval_config", {}).get("data", [])
# Import VLMEvalKit's registry to hook into dataset loading
try:
from vlmeval.config import supported_VLM
from vlmeval.dataset import build_dataset
from vlmeval.dataset import DATASET_TYPE
# Override the build_dataset function to import custom datasets on demand
original_build_dataset = build_dataset
def custom_build_dataset(dataset_name, **kwargs):
# Import custom dataset if needed
import_custom_dataset(dataset_name)
# Call the original build_dataset function
return original_build_dataset(dataset_name, **kwargs)
# Monkey patch the build_dataset function
import vlmeval.dataset
vlmeval.dataset.build_dataset = custom_build_dataset
except Exception as e:
print(f"Warning: Could not hook into VLMEvalKit dataset loading: {e}")
# Fall back to importing all custom datasets (with the overwriting issue)
for dataset_name in datasets:
import_custom_dataset(dataset_name)
# Compatibility patch: ensure VLMEval receives a `reuse_aux` attribute
# Some versions of VLMEvalKit's runner access args.reuse_aux but
# evalscope constructs an Arguments object without that field.
try:
import vlmeval.run as _vlm_run
_orig_run_task = _vlm_run.run_task
def _run_task_with_reuse_aux(args):
if not hasattr(args, "reuse_aux"):
setattr(args, "reuse_aux", True)
return _orig_run_task(args)
_vlm_run.run_task = _run_task_with_reuse_aux
except Exception as _e:
print(f"Warning: Failed to patch vlmeval.run.run_task for reuse_aux: {_e}")
run_task(task_cfg=config)
if analysis_report:
report_list = Summarizer.get_report_from_cfg(config)
print(f"\n>> The report list: {report_list}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, required=True, help="Path to evaluation config file")
parser.add_argument(
"--analysis_report", type=str, choices=["True", "False"], default="True", help="Generate analysis report"
)
args = parser.parse_args()
analysis_report = args.analysis_report == "True"
run_eval(args.config, analysis_report)