| |
| import json |
| import os |
| import base64 |
| import logging |
| import time |
| import random |
| import pandas as pd |
| from typing import Optional |
|
|
| logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
| logger = logging.getLogger(__name__) |
|
|
| BASE_JSON_DIR = '/data1/luyt/code_omniquality/mnt/petrelfs/luyiting/MultiAgentEval/data_process_v1' |
| OUTPUT_DIR = '/data1/luyt/code_omniquality/mnt/petrelfs/luyiting/MultiAgentEval/data_process_v1/test_json' |
|
|
| DATASET_CONFIGS = { |
| "koniq": { |
| "json_file": "test_koniq.json", |
| "image_base": "/data1/datasets/IQA/koniq1", |
| "path_type": "relative", |
| }, |
| "kadid": { |
| "json_file": "test_kadid.json", |
| "image_base": "/data1/datasets/IQA/kadid10k/distorted_images", |
| "path_type": "relative_basename", |
| }, |
| "spaq": { |
| "json_file": "test_spaq.json", |
| "image_base": "/data1/datasets/IQA/SPAQ/512x384", |
| "path_type": "relative_basename", |
| }, |
| "ava": { |
| "json_file": "test_ava.json", |
| "image_base": "/data2/datasets/AVA", |
| "path_type": "relative", |
| "sample_n": 2000, |
| }, |
| "tad66k": { |
| "json_file": "test_TAD66k_forDG.json", |
| "image_base": "/data2/datasets/TAD66k", |
| "path_type": "absolute_remap", |
| "remap_prefix": "/mnt/petrelfs/luyiting/data/IQA/TAD66K/", |
| "remap_target": "/data2/datasets/TAD66k/", |
| "sample_n": 2000, |
| }, |
| "evalmuse": { |
| "json_file": "train_evalmuse_llava_style.json", |
| "image_base": "/data2/datasets/EvalMuse/dataset/images", |
| "path_type": "absolute_remap", |
| "remap_prefix": "/mnt/petrelfs/luyiting/data/IQA/EvalMuse/dataset/images/", |
| "remap_target": "/data2/datasets/EvalMuse/dataset/images/", |
| "output_name": "test_evalmuse.json", |
| "sample_n": 2000, |
| }, |
| "agiqa3k": { |
| "json_file": None, |
| "csv_file": "AGIQA3K_data.csv", |
| "image_base": "/data2/datasets/AGIQA-3k", |
| "path_type": "filename_only", |
| }, |
| "evalmi": { |
| "json_file": "test_Evalmi50k_1_5_sampled.json", |
| "image_base": "/data2/datasets/EvalMi-50K", |
| "path_type": "absolute_remap", |
| "remap_prefix": "/mnt/petrelfs/luyiting/data/IQA/EvalMi-50K/", |
| "remap_target": "/data2/datasets/EvalMi-50K/", |
| "sample_n": 2000, |
| "must_find": True, |
| }, |
| } |
|
|
|
|
| def load_image_as_base64(img_path: str) -> Optional[str]: |
| try: |
| if not os.path.exists(img_path): |
| return None |
| with open(img_path, 'rb') as f: |
| return base64.b64encode(f.read()).decode('utf-8') |
| except Exception as e: |
| logger.error(f"Error loading image {img_path}: {e}") |
| return None |
|
|
|
|
| def resolve_image_path(image_field: str, config: dict) -> Optional[str]: |
| image_base = config["image_base"] |
| if image_base is None: |
| return None |
|
|
| path_type = config["path_type"] |
|
|
| if path_type == "relative": |
| return os.path.join(image_base, image_field) |
| elif path_type == "relative_basename": |
| return os.path.join(image_base, os.path.basename(image_field)) |
| elif path_type == "filename_only": |
| return os.path.join(image_base, image_field) |
| elif path_type == "absolute": |
| if os.path.exists(image_field): |
| return image_field |
| return None |
| elif path_type == "absolute_remap": |
| remap_prefix = config["remap_prefix"] |
| remap_target = config["remap_target"] |
| if image_field.startswith(remap_prefix): |
| return remap_target + image_field[len(remap_prefix):] |
| return os.path.join(remap_target, os.path.basename(image_field)) |
|
|
| return None |
|
|
|
|
| def generate_agiqa3k_json(csv_path: str) -> list: |
| """Generate AGIQA-3k test JSON from CSV.""" |
| df = pd.read_csv(csv_path) |
| logger.info(f"[agiqa3k] Loaded CSV: {len(df)} rows") |
|
|
| json_data = [] |
| for _, row in df.iterrows(): |
| if pd.isna(row.get('name')) or pd.isna(row.get('prompt')): |
| continue |
| json_data.append({ |
| "image": str(row['name']), |
| "gt_score": float(row['mos_align']) if pd.notna(row.get('mos_align')) else 0.0, |
| "gt_score1": float(row['mos_quality']) if pd.notna(row.get('mos_quality')) else 0.0, |
| "gt_score2": float(row['std_align']) if pd.notna(row.get('std_align')) else 0.0, |
| "prompt": str(row['prompt']), |
| "conversations": [ |
| { |
| "from": "human", |
| "value": f"Judge the image alignment with the prompt: \"{row['prompt']}\"\n" |
| "Please evaluate how well the image matches each element of provided prompt.\n\n" |
| "And answer with the final alignment rating.\n" |
| "Pick from [bad, poor, fair, good, excellent]." |
| } |
| ] |
| }) |
| return json_data |
|
|
|
|
| def process_dataset(name: str, config: dict): |
| if config["image_base"] is not None and not os.path.exists(config["image_base"]): |
| logger.warning(f"[{name}] Image base dir not found: {config['image_base']}, skipping") |
| return |
| if config["image_base"] is None: |
| logger.warning(f"[{name}] No image base configured (images not available locally), skipping") |
| return |
|
|
| |
| if config.get("csv_file"): |
| csv_path = os.path.join(BASE_JSON_DIR, config["csv_file"]) |
| if not os.path.exists(csv_path): |
| logger.warning(f"[{name}] CSV file not found: {csv_path}, skipping") |
| return |
| data = generate_agiqa3k_json(csv_path) |
| else: |
| json_path = os.path.join(BASE_JSON_DIR, config["json_file"]) |
| if not os.path.exists(json_path): |
| logger.warning(f"[{name}] JSON file not found: {json_path}, skipping") |
| return |
| logger.info(f"[{name}] Loading JSON: {json_path}") |
| with open(json_path, 'r', encoding='utf-8') as f: |
| data = json.load(f) |
|
|
| |
| sample_n = config.get("sample_n") |
| if config.get("must_find") and sample_n: |
| logger.info(f"[{name}] Pre-filtering items with existing local images...") |
| valid_data = [] |
| for item in data: |
| if 'image' not in item: |
| continue |
| img_path = resolve_image_path(item['image'], config) |
| if img_path and os.path.exists(img_path): |
| valid_data.append(item) |
| logger.info(f"[{name}] Found {len(valid_data)}/{len(data)} items with local images") |
| data = valid_data |
|
|
| |
| if sample_n and len(data) > sample_n: |
| logger.info(f"[{name}] Randomly sampling {sample_n} from {len(data)} items") |
| random.seed(42) |
| data = random.sample(data, sample_n) |
|
|
| total = len(data) |
| found = 0 |
| not_found = 0 |
| start_time = time.time() |
|
|
| for i, item in enumerate(data): |
| if 'image' not in item: |
| continue |
|
|
| img_path = resolve_image_path(item['image'], config) |
| if img_path and os.path.exists(img_path): |
| img_bytes = load_image_as_base64(img_path) |
| if img_bytes: |
| item['image_byte'] = img_bytes |
| found += 1 |
| else: |
| not_found += 1 |
| else: |
| not_found += 1 |
|
|
| if (i + 1) % 500 == 0: |
| elapsed = time.time() - start_time |
| logger.info(f"[{name}] Progress: {i+1}/{total}, found: {found}, not_found: {not_found}, elapsed: {elapsed:.1f}s") |
|
|
| elapsed = time.time() - start_time |
| logger.info(f"[{name}] Done: total={total}, found={found}, not_found={not_found}, elapsed={elapsed:.1f}s") |
|
|
| output_name = config.get("output_name") or config.get("json_file") or f"test_{name}.json" |
| output_path = os.path.join(OUTPUT_DIR, output_name) |
| logger.info(f"[{name}] Saving to: {output_path}") |
| with open(output_path, 'w', encoding='utf-8') as f: |
| json.dump(data, f, ensure_ascii=False) |
| logger.info(f"[{name}] Saved successfully ({os.path.getsize(output_path) / 1024 / 1024:.1f} MB)") |
|
|
|
|
| def main(): |
| os.makedirs(OUTPUT_DIR, exist_ok=True) |
|
|
| |
| already_done = set() |
| for fname in os.listdir(OUTPUT_DIR): |
| if fname.endswith('.json'): |
| already_done.add(fname) |
|
|
| for name, config in DATASET_CONFIGS.items(): |
| output_name = config.get("output_name") or config.get("json_file") or f"test_{name}.json" |
| if output_name in already_done: |
| logger.info(f"[{name}] Already processed ({output_name}), skipping. Delete to reprocess.") |
| continue |
| try: |
| process_dataset(name, config) |
| except Exception as e: |
| logger.error(f"[{name}] Failed: {e}") |
| import traceback |
| traceback.print_exc() |
|
|
| logger.info("All datasets processed.") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|