|
|
import csv |
|
|
import re |
|
|
import shutil |
|
|
from pathlib import Path |
|
|
import argparse |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
parser = argparse.ArgumentParser(description="Clean dataset filenames and metadata.") |
|
|
parser.add_argument("dir_path", type=str, help="Path to the source directory containing batches and metadata.csv") |
|
|
args = parser.parse_args() |
|
|
|
|
|
dir_path = args.dir_path |
|
|
|
|
|
SOURCE_ROOT = Path(dir_path) |
|
|
CSV_NAME = "metadata.csv" |
|
|
|
|
|
DEST_ROOT = Path(dir_path+"_clean") |
|
|
DEST_CSV_NAME = "metadata.csv" |
|
|
|
|
|
REPLACEMENT_CHAR = "_" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def clean_filename(filename: str, replacement: str = "_") -> str: |
|
|
""" |
|
|
Replace special characters in filename while preserving extension. |
|
|
Allowed: letters, numbers, dot, underscore, dash |
|
|
""" |
|
|
p = Path(filename) |
|
|
stem = p.stem |
|
|
suffix = p.suffix |
|
|
|
|
|
|
|
|
cleaned_stem = re.sub(r"[^A-Za-z0-9._]", replacement, stem) |
|
|
|
|
|
|
|
|
cleaned_stem = re.sub(rf"{re.escape(replacement)}+", replacement, cleaned_stem) |
|
|
|
|
|
|
|
|
if not cleaned_stem: |
|
|
cleaned_stem = "file" |
|
|
|
|
|
return cleaned_stem + suffix |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def main(): |
|
|
source_csv_path = SOURCE_ROOT / CSV_NAME |
|
|
dest_csv_path = DEST_ROOT / DEST_CSV_NAME |
|
|
|
|
|
DEST_ROOT.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
|
|
|
filename_map = {} |
|
|
|
|
|
rows = [] |
|
|
|
|
|
with open(source_csv_path, newline="", encoding="utf-8") as f: |
|
|
reader = csv.reader(f) |
|
|
|
|
|
|
|
|
for row in reader: |
|
|
original_name = row[0] |
|
|
cleaned_name = clean_filename(original_name, REPLACEMENT_CHAR) |
|
|
|
|
|
filename_map[original_name] = cleaned_name |
|
|
|
|
|
new_row = row.copy() |
|
|
new_row[0] = original_name.split("/")[0] +"/" + cleaned_name |
|
|
|
|
|
rows.append(new_row) |
|
|
|
|
|
|
|
|
for batch_dir in SOURCE_ROOT.iterdir(): |
|
|
if not batch_dir.is_dir(): |
|
|
continue |
|
|
if not batch_dir.name.startswith("batch_"): |
|
|
continue |
|
|
|
|
|
dest_batch_dir = DEST_ROOT / batch_dir.name |
|
|
dest_batch_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
for file_path in batch_dir.iterdir(): |
|
|
if not file_path.is_file(): |
|
|
continue |
|
|
|
|
|
original_name = file_path.name |
|
|
|
|
|
if str(Path(batch_dir.name) / original_name) not in filename_map: |
|
|
print(f"WARNING: {str(Path(batch_dir.name) / original_name)} not found in CSV. Skipping.") |
|
|
|
|
|
continue |
|
|
|
|
|
cleaned_name = filename_map[str(Path(batch_dir.name) / original_name)] |
|
|
dest_file_path = dest_batch_dir / cleaned_name |
|
|
|
|
|
shutil.copy2(file_path, dest_file_path) |
|
|
|
|
|
|
|
|
with open(dest_csv_path, "w", newline="", encoding="utf-8") as f: |
|
|
writer = csv.writer(f) |
|
|
|
|
|
writer.writerows(rows) |
|
|
|
|
|
print("Clean dataset created successfully:") |
|
|
print(f" Folder: {DEST_ROOT}") |
|
|
print(f" CSV: {dest_csv_path}") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|