| |
| |
|
|
| import argparse |
| import yaml |
| import importlib.util |
| import sys |
| import os |
| from evalscope.run import run_task |
| from evalscope.summarizer import Summarizer |
|
|
| from data_configs import DATASET_CONFIGS |
|
|
|
|
| def import_custom_dataset(dataset_name): |
| """Dynamically import a custom dataset module for the given dataset.""" |
| if dataset_name in DATASET_CONFIGS: |
| config = DATASET_CONFIGS[dataset_name] |
| if config["custom_script"] and config["custom_eval_class"]: |
| print(f"Importing custom dataset module for {dataset_name}") |
|
|
| |
| script_path = config["custom_script"] |
|
|
| |
| module_name = f"custom_dataset_{dataset_name}" |
|
|
| try: |
| |
| if module_name in sys.modules: |
| |
| del sys.modules[module_name] |
|
|
| |
| spec = importlib.util.spec_from_file_location(module_name, script_path) |
| if spec and spec.loader: |
| module = importlib.util.module_from_spec(spec) |
| sys.modules[module_name] = module |
| spec.loader.exec_module(module) |
|
|
| |
| print(f"Successfully imported custom dataset module for {dataset_name}") |
| return True |
| else: |
| print(f"Failed to create module spec for {dataset_name}") |
| return False |
| except Exception as e: |
| print(f"Failed to import custom dataset module for {dataset_name}: {str(e)}") |
| return False |
| return False |
|
|
|
|
| def run_eval(config, analysis_report=False): |
| |
|
|
| |
| |
| with open(config, "r") as f: |
| cfg = yaml.safe_load(f) |
|
|
| datasets = cfg.get("eval_config", {}).get("data", []) |
|
|
| |
| try: |
| from vlmeval.config import supported_VLM |
| from vlmeval.dataset import build_dataset |
| from vlmeval.dataset import DATASET_TYPE |
|
|
| |
| original_build_dataset = build_dataset |
|
|
| def custom_build_dataset(dataset_name, **kwargs): |
| |
| import_custom_dataset(dataset_name) |
| |
| return original_build_dataset(dataset_name, **kwargs) |
|
|
| |
| import vlmeval.dataset |
|
|
| vlmeval.dataset.build_dataset = custom_build_dataset |
|
|
| except Exception as e: |
| print(f"Warning: Could not hook into VLMEvalKit dataset loading: {e}") |
| |
| for dataset_name in datasets: |
| import_custom_dataset(dataset_name) |
| |
| |
| |
| try: |
| import vlmeval.run as _vlm_run |
|
|
| _orig_run_task = _vlm_run.run_task |
|
|
| def _run_task_with_reuse_aux(args): |
| if not hasattr(args, "reuse_aux"): |
| setattr(args, "reuse_aux", True) |
| return _orig_run_task(args) |
|
|
| _vlm_run.run_task = _run_task_with_reuse_aux |
| except Exception as _e: |
| print(f"Warning: Failed to patch vlmeval.run.run_task for reuse_aux: {_e}") |
|
|
| run_task(task_cfg=config) |
|
|
| if analysis_report: |
| report_list = Summarizer.get_report_from_cfg(config) |
| print(f"\n>> The report list: {report_list}") |
|
|
|
|
| if __name__ == "__main__": |
| parser = argparse.ArgumentParser() |
| parser.add_argument("--config", type=str, required=True, help="Path to evaluation config file") |
| parser.add_argument( |
| "--analysis_report", type=str, choices=["True", "False"], default="True", help="Generate analysis report" |
| ) |
| args = parser.parse_args() |
|
|
| analysis_report = args.analysis_report == "True" |
| run_eval(args.config, analysis_report) |
|
|