|
|
|
|
|
""" |
|
|
Verify xView2 dataset integrity |
|
|
Checks that all files referenced in JSON metadata exist |
|
|
""" |
|
|
|
|
|
import json |
|
|
from pathlib import Path |
|
|
from tqdm import tqdm |
|
|
from collections import defaultdict |
|
|
from typing import Dict, List, Tuple |
|
|
|
|
|
|
|
|
def verify_dataset_split( |
|
|
json_file: Path, |
|
|
base_dir: Path, |
|
|
split_name: str, |
|
|
verbose: bool = False |
|
|
) -> Tuple[bool, Dict]: |
|
|
""" |
|
|
Verify a single dataset split |
|
|
|
|
|
Args: |
|
|
json_file: Path to JSON metadata file |
|
|
base_dir: Base directory containing the dataset |
|
|
split_name: Name of the split (train/test) |
|
|
verbose: Print detailed statistics |
|
|
|
|
|
Returns: |
|
|
Tuple of (all_valid, statistics) |
|
|
""" |
|
|
if not json_file.exists(): |
|
|
if verbose: |
|
|
print(f"❌ JSON file not found: {json_file}") |
|
|
return False, {} |
|
|
|
|
|
|
|
|
with open(json_file, 'r', encoding='utf-8') as f: |
|
|
data = json.load(f) |
|
|
|
|
|
|
|
|
stats = { |
|
|
'total_entries': len(data), |
|
|
'missing_files': [], |
|
|
'disaster_types': defaultdict(int), |
|
|
'valid_entries': 0, |
|
|
'invalid_entries': 0 |
|
|
} |
|
|
|
|
|
|
|
|
all_valid = True |
|
|
|
|
|
|
|
|
iterator = tqdm(data, desc=f"Checking {split_name}", unit="entry", disable=not verbose) if verbose else data |
|
|
|
|
|
for idx, entry in enumerate(iterator): |
|
|
entry_valid = True |
|
|
|
|
|
|
|
|
disaster_type = entry.get('disaster_type', 'unknown') |
|
|
stats['disaster_types'][disaster_type] += 1 |
|
|
|
|
|
|
|
|
required_fields = [ |
|
|
'pre_disaster_image', |
|
|
'post_disaster_image', |
|
|
'pre_disaster_mask', |
|
|
'post_disaster_mask', |
|
|
'disaster', |
|
|
'disaster_type' |
|
|
] |
|
|
|
|
|
for field in required_fields: |
|
|
if field not in entry: |
|
|
stats['missing_files'].append({ |
|
|
'entry_idx': idx, |
|
|
'field': field, |
|
|
'reason': 'Field missing from JSON' |
|
|
}) |
|
|
entry_valid = False |
|
|
continue |
|
|
|
|
|
|
|
|
if field.endswith('_image') or field.endswith('_mask') or field.endswith('_colormask'): |
|
|
file_path = base_dir / entry[field] |
|
|
if not file_path.exists(): |
|
|
stats['missing_files'].append({ |
|
|
'entry_idx': idx, |
|
|
'field': field, |
|
|
'path': str(file_path), |
|
|
'reason': 'File not found' |
|
|
}) |
|
|
entry_valid = False |
|
|
|
|
|
if entry_valid: |
|
|
stats['valid_entries'] += 1 |
|
|
else: |
|
|
stats['invalid_entries'] += 1 |
|
|
all_valid = False |
|
|
|
|
|
|
|
|
if not all_valid and verbose: |
|
|
print(f"\n✗ Invalid entries: {stats['invalid_entries']}") |
|
|
print(f"✗ Missing files: {len(stats['missing_files'])}") |
|
|
if stats['missing_files']: |
|
|
print(f"\nFirst 5 missing files:") |
|
|
for missing in stats['missing_files'][:5]: |
|
|
print(f" - Entry {missing['entry_idx']}: {missing['field']} - {missing['reason']}") |
|
|
if 'path' in missing: |
|
|
print(f" Path: {missing['path']}") |
|
|
|
|
|
return all_valid, stats |
|
|
|
|
|
|
|
|
def verify_sharegpt_format( |
|
|
sharegpt_file: Path, |
|
|
split_name: str, |
|
|
verbose: bool = False |
|
|
) -> Tuple[bool, Dict]: |
|
|
""" |
|
|
Verify ShareGPT format file |
|
|
|
|
|
Args: |
|
|
sharegpt_file: Path to ShareGPT JSON file |
|
|
split_name: Name of the split |
|
|
verbose: Print detailed statistics |
|
|
|
|
|
Returns: |
|
|
Tuple of (all_valid, statistics) |
|
|
""" |
|
|
if not sharegpt_file.exists(): |
|
|
if verbose: |
|
|
print(f"❌ ShareGPT file not found: {sharegpt_file}") |
|
|
return False, {} |
|
|
|
|
|
|
|
|
with open(sharegpt_file, 'r', encoding='utf-8') as f: |
|
|
conversations = json.load(f) |
|
|
|
|
|
stats = { |
|
|
'total_conversations': len(conversations), |
|
|
'valid_conversations': 0, |
|
|
'invalid_conversations': 0, |
|
|
'languages': defaultdict(int), |
|
|
'image_types': defaultdict(int), |
|
|
'issues': [] |
|
|
} |
|
|
|
|
|
all_valid = True |
|
|
|
|
|
|
|
|
iterator = tqdm(conversations, desc=f"Checking ShareGPT {split_name}", unit="conv", disable=not verbose) if verbose else conversations |
|
|
|
|
|
for idx, conv in enumerate(iterator): |
|
|
conv_valid = True |
|
|
|
|
|
|
|
|
if 'id' not in conv: |
|
|
stats['issues'].append(f"Entry {idx}: Missing 'id' field") |
|
|
conv_valid = False |
|
|
else: |
|
|
|
|
|
parts = conv['id'].split('_') |
|
|
if len(parts) >= 4: |
|
|
img_type = parts[-2] |
|
|
lang = parts[-1] |
|
|
stats['languages'][lang] += 1 |
|
|
stats['image_types'][img_type] += 1 |
|
|
|
|
|
if 'images' not in conv or not conv['images']: |
|
|
stats['issues'].append(f"Entry {idx}: Missing or empty 'images' field") |
|
|
conv_valid = False |
|
|
|
|
|
if 'messages' not in conv or len(conv['messages']) != 4: |
|
|
stats['issues'].append(f"Entry {idx}: Expected 4 messages, got {len(conv.get('messages', []))}") |
|
|
conv_valid = False |
|
|
else: |
|
|
|
|
|
messages = conv['messages'] |
|
|
expected_pattern = ['human', 'gpt', 'human', 'gpt'] |
|
|
actual_pattern = [m.get('from', '') for m in messages] |
|
|
|
|
|
if actual_pattern != expected_pattern: |
|
|
stats['issues'].append(f"Entry {idx}: Unexpected message pattern {actual_pattern}") |
|
|
conv_valid = False |
|
|
|
|
|
|
|
|
if messages[0].get('value', '').find('<image>') == -1: |
|
|
stats['issues'].append(f"Entry {idx}: First message missing <image> tag") |
|
|
conv_valid = False |
|
|
|
|
|
if conv_valid: |
|
|
stats['valid_conversations'] += 1 |
|
|
else: |
|
|
stats['invalid_conversations'] += 1 |
|
|
all_valid = False |
|
|
|
|
|
|
|
|
if not all_valid and verbose: |
|
|
print(f"\n✗ Invalid conversations: {stats['invalid_conversations']}") |
|
|
print(f"\nFirst 5 issues:") |
|
|
for issue in stats['issues'][:5]: |
|
|
print(f" - {issue}") |
|
|
|
|
|
return all_valid, stats |
|
|
|
|
|
|
|
|
def main(): |
|
|
"""Main verification function""" |
|
|
|
|
|
import sys |
|
|
import argparse |
|
|
|
|
|
parser = argparse.ArgumentParser( |
|
|
description='Verify xView2 dataset integrity', |
|
|
formatter_class=argparse.RawDescriptionHelpFormatter, |
|
|
epilog=''' |
|
|
Examples: |
|
|
# Verify from current directory with default file patterns |
|
|
%(prog)s |
|
|
|
|
|
# Verify specific files |
|
|
%(prog)s --train-json xview2_train.json --test-json xview2_test.json |
|
|
|
|
|
# Verify with verbose output |
|
|
%(prog)s --verbose |
|
|
|
|
|
# Specify custom base directory |
|
|
%(prog)s --base-dir /path/to/xview2 |
|
|
''' |
|
|
) |
|
|
|
|
|
parser.add_argument( |
|
|
'--base-dir', |
|
|
type=Path, |
|
|
default=None, |
|
|
help='Base directory containing the dataset (default: current working directory)' |
|
|
) |
|
|
|
|
|
parser.add_argument( |
|
|
'--train-json', |
|
|
type=str, |
|
|
default=None, |
|
|
help='Training metadata JSON file name (default: auto-detect xview2_train*.json)' |
|
|
) |
|
|
|
|
|
parser.add_argument( |
|
|
'--test-json', |
|
|
type=str, |
|
|
default=None, |
|
|
help='Test metadata JSON file name (default: auto-detect xview2_test*.json)' |
|
|
) |
|
|
|
|
|
parser.add_argument( |
|
|
'--train-sharegpt', |
|
|
type=str, |
|
|
default=None, |
|
|
help='Training ShareGPT JSON file name (default: auto-detect xview2_train*_sharegpt.json)' |
|
|
) |
|
|
|
|
|
parser.add_argument( |
|
|
'--test-sharegpt', |
|
|
type=str, |
|
|
default=None, |
|
|
help='Test ShareGPT JSON file name (default: auto-detect xview2_test*_sharegpt.json)' |
|
|
) |
|
|
|
|
|
parser.add_argument( |
|
|
'-v', '--verbose', |
|
|
action='store_true', |
|
|
help='Print detailed verification statistics' |
|
|
) |
|
|
|
|
|
parser.add_argument( |
|
|
'--skip-original', |
|
|
action='store_true', |
|
|
help='Skip verification of original metadata files' |
|
|
) |
|
|
|
|
|
parser.add_argument( |
|
|
'--skip-sharegpt', |
|
|
action='store_true', |
|
|
help='Skip verification of ShareGPT format files' |
|
|
) |
|
|
|
|
|
args = parser.parse_args() |
|
|
|
|
|
|
|
|
base_dir = args.base_dir if args.base_dir else Path.cwd() |
|
|
base_dir = base_dir.resolve() |
|
|
|
|
|
if not base_dir.exists(): |
|
|
print(f"❌ Base directory does not exist: {base_dir}") |
|
|
sys.exit(1) |
|
|
|
|
|
if not args.verbose: |
|
|
print("Verifying dataset integrity...", end=" ", flush=True) |
|
|
|
|
|
|
|
|
def find_file(pattern: str, description: str) -> Path | None: |
|
|
"""Find a file matching the pattern in base_dir""" |
|
|
matches = list(base_dir.glob(pattern)) |
|
|
if not matches: |
|
|
if args.verbose: |
|
|
print(f"⚠️ No {description} found matching pattern: {pattern}") |
|
|
return None |
|
|
if len(matches) > 1: |
|
|
if args.verbose: |
|
|
print(f"⚠️ Multiple {description} found, using: {matches[0].name}") |
|
|
return matches[0] |
|
|
|
|
|
|
|
|
train_json = None |
|
|
test_json = None |
|
|
train_sharegpt = None |
|
|
test_sharegpt = None |
|
|
|
|
|
if not args.skip_original: |
|
|
if args.train_json: |
|
|
train_json = base_dir / args.train_json |
|
|
else: |
|
|
|
|
|
candidates = [f for f in base_dir.glob("*train*.json") if 'sharegpt' not in f.name.lower()] |
|
|
train_json = candidates[0] if candidates else None |
|
|
|
|
|
if args.test_json: |
|
|
test_json = base_dir / args.test_json |
|
|
else: |
|
|
|
|
|
candidates = [f for f in base_dir.glob("*test*.json") if 'sharegpt' not in f.name.lower()] |
|
|
test_json = candidates[0] if candidates else None |
|
|
|
|
|
if not args.skip_sharegpt: |
|
|
if args.train_sharegpt: |
|
|
train_sharegpt = base_dir / args.train_sharegpt |
|
|
else: |
|
|
train_sharegpt = find_file("*train*sharegpt.json", "training ShareGPT file") |
|
|
|
|
|
if args.test_sharegpt: |
|
|
test_sharegpt = base_dir / args.test_sharegpt |
|
|
else: |
|
|
test_sharegpt = find_file("*test*sharegpt.json", "test ShareGPT file") |
|
|
|
|
|
|
|
|
train_valid = True |
|
|
test_valid = True |
|
|
train_stats = {} |
|
|
test_stats = {} |
|
|
|
|
|
if not args.skip_original: |
|
|
if train_json and train_json.exists(): |
|
|
train_valid, train_stats = verify_dataset_split(train_json, base_dir, "train", verbose=args.verbose) |
|
|
elif args.verbose: |
|
|
print(f"⚠️ Skipping train verification: file not found") |
|
|
|
|
|
if test_json and test_json.exists(): |
|
|
test_valid, test_stats = verify_dataset_split(test_json, base_dir, "test", verbose=args.verbose) |
|
|
elif args.verbose: |
|
|
print(f"⚠️ Skipping test verification: file not found") |
|
|
|
|
|
|
|
|
train_sharegpt_valid = True |
|
|
test_sharegpt_valid = True |
|
|
train_sharegpt_stats = {} |
|
|
test_sharegpt_stats = {} |
|
|
|
|
|
if not args.skip_sharegpt: |
|
|
if train_sharegpt and train_sharegpt.exists(): |
|
|
train_sharegpt_valid, train_sharegpt_stats = verify_sharegpt_format(train_sharegpt, "train", verbose=args.verbose) |
|
|
elif args.verbose: |
|
|
print(f"⚠️ Skipping train ShareGPT verification: file not found") |
|
|
|
|
|
if test_sharegpt and test_sharegpt.exists(): |
|
|
test_sharegpt_valid, test_sharegpt_stats = verify_sharegpt_format(test_sharegpt, "test", verbose=args.verbose) |
|
|
elif args.verbose: |
|
|
print(f"⚠️ Skipping test ShareGPT verification: file not found") |
|
|
|
|
|
|
|
|
all_checks_passed = train_valid and test_valid and train_sharegpt_valid and test_sharegpt_valid |
|
|
|
|
|
if not args.verbose: |
|
|
print("") |
|
|
|
|
|
if all_checks_passed: |
|
|
print("✅ Dataset is ready") |
|
|
if args.verbose: |
|
|
print(f"\nVerified in directory: {base_dir}") |
|
|
if train_json: |
|
|
print(f" Train JSON: {train_json.name}") |
|
|
if test_json: |
|
|
print(f" Test JSON: {test_json.name}") |
|
|
if train_sharegpt: |
|
|
print(f" Train ShareGPT: {train_sharegpt.name}") |
|
|
if test_sharegpt: |
|
|
print(f" Test ShareGPT: {test_sharegpt.name}") |
|
|
else: |
|
|
print("❌ Dataset verification failed") |
|
|
print("\nIssues found:") |
|
|
if not train_valid: |
|
|
print(f" - Training metadata has issues") |
|
|
if not test_valid: |
|
|
print(f" - Test metadata has issues") |
|
|
if not train_sharegpt_valid: |
|
|
print(f" - Training ShareGPT format has issues") |
|
|
if not test_sharegpt_valid: |
|
|
print(f" - Test ShareGPT format has issues") |
|
|
print("\nRun with --verbose flag for detailed information") |
|
|
sys.exit(1) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|
|
|
|