solarVQA / preprocess.py
masked-token's picture
Upload preprocess.py
7054ad3 verified
import os
from pathlib import Path
import shutil
import pandas as pd
import random
# ==========================================
# 1. CONFIGURATION & PATHS
# ==========================================
# Set random seed for reproducible splits
random.seed(42)
# Base dataset directory
base_dir = Path("/path/to/UCF-EL-Defect")
# Input Paths
csv_path = base_dir / "AnnotationsCombined.csv"
train_folder = base_dir / "training/data/train/images"
test_folder = base_dir / "training/data/test/images"
combined_folder = base_dir / "Test_Images"
# Output Paths
output_base = '/path/to/output/filtered_ucf_el_defect_dataset'
out_train_dir = os.path.join(output_base, 'train')
out_val_dir = os.path.join(output_base, 'val')
out_test_dir = os.path.join(output_base, 'test')
TRAIN_TO_VAL_RATIO = 0.10
# ==========================================
# 2. HELPER FUNCTIONS
# ==========================================
def get_images_from_folder(folder_path):
if not os.path.exists(folder_path):
return set()
return set(f for f in os.listdir(folder_path) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp', '.tif')))
def print_distribution(df, train_set, val_set, test_set, title):
"""Helper to print class distributions across the current splits."""
print(f"\n{title}")
print("-" * 50)
# Temporarily map files to their current split for counting
df['temp_split'] = 'none'
df.loc[df['filename'].isin(train_set), 'temp_split'] = 'train'
df.loc[df['filename'].isin(val_set), 'temp_split'] = 'val'
df.loc[df['filename'].isin(test_set), 'temp_split'] = 'test'
active_df = df[df['temp_split'] != 'none']
# Calculate unique images per class per split
dist = active_df.groupby(['Defect_Class', 'temp_split'])['filename'].nunique().unstack(fill_value=0)
# Ensure columns exist even if empty
for col in ['train', 'val', 'test']:
if col not in dist.columns:
dist[col] = 0
dist = dist[['train', 'val', 'test']] # Order columns
dist['Total'] = dist.sum(axis=1)
print(dist.to_string())
print("-" * 50)
# Clean up temp column
df.drop(columns=['temp_split'], inplace=True)
for path in [out_train_dir, out_val_dir, out_test_dir]:
os.makedirs(path, exist_ok=True)
# ==========================================
# 3. IDENTIFY GLOBAL ISSUES IN CSV
# ==========================================
print("--- 1. Analyzing CSV for Issues ---")
df = pd.read_csv(csv_path)
malformed_id_mask = pd.to_numeric(df['region_id'], errors='coerce').isna()
print(f"[FIX PENDING] Found {malformed_id_mask.sum()} malformed region_id entries.")
shape_attrs = df['region_shape_attributes'].astype(str).str.replace(" ", "")
region_attrs = df['region_attributes'].astype(str).str.replace(" ", "")
conflict_mask = (shape_attrs != "{}") & (region_attrs == "{}")
global_conflict_files = set(df[conflict_mask]['filename'].unique())
duplicate_mask = df.duplicated(keep=False)
global_duplicate_files = set(df[duplicate_mask]['filename'].unique())
# ==========================================
# 4. GATHER FILES & APPLY FILTERS
# ==========================================
print("\n--- 2. Filtering Files Across All Folders ---")
all_orig_train = get_images_from_folder(train_folder)
all_orig_test = get_images_from_folder(test_folder)
all_new_images = get_images_from_folder(combined_folder)
file_source_map = {}
for f in all_orig_train: file_source_map[f] = train_folder
for f in all_orig_test: file_source_map[f] = test_folder
for f in all_new_images:
if f not in file_source_map: file_source_map[f] = combined_folder
all_files = set(file_source_map.keys())
csv_files = set(df['filename'].unique())
files_missing_from_csv = all_files - csv_files
files_not_starting_with_M = {f for f in all_files if not f.startswith('M')}
files_with_conflicts = all_files.intersection(global_conflict_files)
files_with_duplicates = all_files.intersection(global_duplicate_files)
bad_files = (files_missing_from_csv
.union(files_not_starting_with_M)
.union(files_with_conflicts)
.union(files_with_duplicates))
valid_files = all_files - bad_files
print(f"Total initial unique files across all folders: {len(all_files)}")
print(f"[-] Removed {len(files_missing_from_csv)} files not present in the CSV")
print(f"[-] Removed {len(files_not_starting_with_M)} files not starting with 'M'")
print(f"[-] Removed {len(files_with_conflicts)} files with shape/attribute conflicts")
print(f"[-] Removed {len(files_with_duplicates)} files with exact duplicate CSV rows")
print(f"Total valid, clean files remaining: {len(valid_files)}\n")
# ==========================================
# 5. CREATE BASE SPLITS & DISTRIBUTE NEW FILES
# ==========================================
print("--- 3. Creating Splits & Distributing ---")
valid_orig_train = list(all_orig_train.intersection(valid_files))
valid_orig_train.sort()
random.shuffle(valid_orig_train)
val_split_index = int(len(valid_orig_train) * TRAIN_TO_VAL_RATIO)
base_val = set(valid_orig_train[:val_split_index])
base_train = set(valid_orig_train[val_split_index:])
base_test = set(all_orig_test.intersection(valid_files))
total_base_images = len(base_train) + len(base_val) + len(base_test)
weight_train = len(base_train) / total_base_images
weight_val = len(base_val) / total_base_images
orig_union = set(valid_orig_train).union(base_test)
unique_new_images = list(all_new_images.intersection(valid_files) - orig_union)
unique_new_images.sort()
random.shuffle(unique_new_images)
new_to_train_count = int(len(unique_new_images) * weight_train)
new_to_val_count = int(len(unique_new_images) * weight_val)
new_to_train = set(unique_new_images[:new_to_train_count])
new_to_val = set(unique_new_images[new_to_train_count : new_to_train_count + new_to_val_count])
new_to_test = set(unique_new_images[new_to_train_count + new_to_val_count:])
final_train_set = base_train.union(new_to_train)
final_val_set = base_val.union(new_to_val)
final_test_set = base_test.union(new_to_test)
# ==========================================
# 6. CLASS MODIFICATIONS (DELETE & REDISTRIBUTE)
# ==========================================
# Create a clean DF with a readable class column for processing
clean_df = df[df['filename'].isin(valid_files)].copy()
clean_df['Defect_Class'] = clean_df['region_attributes'].astype(str).str.extract(r'"Defect_Class"\s*:\s*"([^"]+)"')
clean_df['Defect_Class'] = clean_df['Defect_Class'].fillna('Background / No Defect')
print_distribution(clean_df, final_train_set, final_val_set, final_test_set, "📊 DISTRIBUTION BEFORE TASK MODIFICATIONS")
# --- TASK A: Delete Unwanted Classes ---
classes_to_delete = ['Unknown', 'Contact_BeltMarks']
images_to_delete = set(clean_df[clean_df['Defect_Class'].isin(classes_to_delete)]['filename'].unique())
final_train_set -= images_to_delete
final_val_set -= images_to_delete
final_test_set -= images_to_delete
clean_df = clean_df[~clean_df['filename'].isin(images_to_delete)]
print(f"🗑️ Removed {len(images_to_delete)} images containing 'Unknown' or 'Contact_BeltMarks'.")
# --- TASK B: Redistribute Target Class ---
target_class = 'Interconnect_Disconnected'
target_images = clean_df[clean_df['Defect_Class'] == target_class]['filename'].unique()
total_target_images = len(target_images)
if total_target_images > 0:
target_val_count = int(total_target_images * 0.10)
target_test_count = int(total_target_images * 0.20)
val_imgs = [img for img in target_images if img in final_val_set]
test_imgs = [img for img in target_images if img in final_test_set]
train_imgs = [img for img in target_images if img in final_train_set]
need_val = max(0, target_val_count - len(val_imgs))
need_test = max(0, target_test_count - len(test_imgs))
print(f"🔄 Total '{target_class}' images: {total_target_images}")
print(f" Moving {need_val} images from Train -> Val")
print(f" Moving {need_test} images from Train -> Test")
random.shuffle(train_imgs)
imgs_to_val = train_imgs[:need_val]
imgs_to_test = train_imgs[need_val : need_val + need_test]
for img in imgs_to_val:
final_train_set.remove(img)
final_val_set.add(img)
for img in imgs_to_test:
final_train_set.remove(img)
final_test_set.add(img)
print_distribution(clean_df, final_train_set, final_val_set, final_test_set, "📊 DISTRIBUTION AFTER TASK MODIFICATIONS")
# ==========================================
# 7. COPY FILES TO NEW DATASET
# ==========================================
print("\n--- 4. Copying Files to New Dataset Folders ---")
def copy_files(file_set, dest_dir):
for f in file_set:
src_path = os.path.join(file_source_map[f], f)
dest_path = os.path.join(dest_dir, f)
shutil.copy2(src_path, dest_path)
copy_files(final_train_set, out_train_dir)
copy_files(final_val_set, out_val_dir)
copy_files(final_test_set, out_test_dir)
print("Files successfully copied.")
# ==========================================
# 8. CREATE CLEAN, RE-INDEXED CSV FILES
# ==========================================
print("\n--- 5. Generating Clean CSVs for Each Split ---")
# Drop the helper Defect_Class column to preserve original CSV schema
clean_df = clean_df.drop(columns=['Defect_Class'], errors='ignore')
# Fix 1: Recalculate Region ID perfectly from 0 -> N for each file
clean_df['region_id'] = clean_df.groupby('filename').cumcount()
# Fix 2: Recalculate Region Count to ensure it matches the actual remaining rows per image
clean_df['region_count'] = clean_df.groupby('filename')['filename'].transform('count')
train_csv_path = os.path.join(output_base, 'train_annotations.csv')
val_csv_path = os.path.join(output_base, 'val_annotations.csv')
test_csv_path = os.path.join(output_base, 'test_annotations.csv')
clean_df[clean_df['filename'].isin(final_train_set)].to_csv(train_csv_path, index=False)
clean_df[clean_df['filename'].isin(final_val_set)].to_csv(val_csv_path, index=False)
clean_df[clean_df['filename'].isin(final_test_set)].to_csv(test_csv_path, index=False)
print(f"CSVs saved to: \n - {train_csv_path}\n - {val_csv_path}\n - {test_csv_path}")
print("\n[SUCCESS] Master Dataset Generation Complete!")