FreeSound_Popularity / clean_dataset.py
MeysamSh's picture
Add files using upload-large-folder tool
ea3051f verified
import csv
import re
import shutil
from pathlib import Path
import argparse
# ==========================
# Configuration
# ==========================
parser = argparse.ArgumentParser(description="Clean dataset filenames and metadata.")
parser.add_argument("dir_path", type=str, help="Path to the source directory containing batches and metadata.csv")
args = parser.parse_args()
dir_path = args.dir_path
SOURCE_ROOT = Path(dir_path) # folder containing batch_00x + metadata.csv
CSV_NAME = "metadata.csv"
DEST_ROOT = Path(dir_path+"_clean")
DEST_CSV_NAME = "metadata.csv"
REPLACEMENT_CHAR = "_" # character to replace special characters with
# ==========================
# Filename cleaning function
# ==========================
def clean_filename(filename: str, replacement: str = "_") -> str:
"""
Replace special characters in filename while preserving extension.
Allowed: letters, numbers, dot, underscore, dash
"""
p = Path(filename)
stem = p.stem
suffix = p.suffix
# Replace unwanted characters
cleaned_stem = re.sub(r"[^A-Za-z0-9._]", replacement, stem)
# Collapse multiple replacements
cleaned_stem = re.sub(rf"{re.escape(replacement)}+", replacement, cleaned_stem)
# Avoid empty names
if not cleaned_stem:
cleaned_stem = "file"
return cleaned_stem + suffix
# ==========================
# Main process
# ==========================
def main():
source_csv_path = SOURCE_ROOT / CSV_NAME
dest_csv_path = DEST_ROOT / DEST_CSV_NAME
DEST_ROOT.mkdir(parents=True, exist_ok=True)
# Build mapping: original filename -> cleaned filename
filename_map = {}
rows = []
with open(source_csv_path, newline="", encoding="utf-8") as f:
reader = csv.reader(f)
# header = next(reader)
for row in reader:
original_name = row[0]
cleaned_name = clean_filename(original_name, REPLACEMENT_CHAR)
filename_map[original_name] = cleaned_name
new_row = row.copy()
new_row[0] = original_name.split("/")[0] +"/" + cleaned_name
# import pdb; pdb.set_trace()
rows.append(new_row)
# Copy and rename files batch by batch
for batch_dir in SOURCE_ROOT.iterdir():
if not batch_dir.is_dir():
continue
if not batch_dir.name.startswith("batch_"):
continue
dest_batch_dir = DEST_ROOT / batch_dir.name
dest_batch_dir.mkdir(parents=True, exist_ok=True)
for file_path in batch_dir.iterdir():
if not file_path.is_file():
continue
original_name = file_path.name
if str(Path(batch_dir.name) / original_name) not in filename_map:
print(f"WARNING: {str(Path(batch_dir.name) / original_name)} not found in CSV. Skipping.")
# import pdb; pdb.set_trace()
continue
cleaned_name = filename_map[str(Path(batch_dir.name) / original_name)]
dest_file_path = dest_batch_dir / cleaned_name
shutil.copy2(file_path, dest_file_path)
# Write cleaned CSV
with open(dest_csv_path, "w", newline="", encoding="utf-8") as f:
writer = csv.writer(f)
# writer.writerow(header)
writer.writerows(rows)
print("Clean dataset created successfully:")
print(f" Folder: {DEST_ROOT}")
print(f" CSV: {dest_csv_path}")
if __name__ == "__main__":
main()