neodata / clean_data.py
ChipYTY's picture
Upload neonatal MRI dataset (DICOM images, clinical data, analysis reports)
75e9074 verified
#!/usr/bin/env python3
"""
Data Cleaning Script for Neonatal Brain MRI Dataset
- Removes PII (patient name, hospital ID)
- Translates column headers to English
- Converts xlsx to CSV
- Renames folders to English
- Generates a combined dataset and cleaning report
"""
import os
import shutil
import csv
import json
from datetime import datetime
import openpyxl
# ============================================================
# Configuration
# ============================================================
BASE_DIR = "/Users/yty/Desktop/med_paper/feng_dataset"
# Mapping: original Chinese folder name -> English name
FOLDER_MAP = {
"正常 2026-2-6": "normal",
"扩张 2026-2-6": "LVM",
"软化 2026-2-6": "PVL",
"扩张+软化 2026-2-7": "LVM_PVL",
}
# Mapping: original xlsx filename (inside each folder) -> group info
XLSX_MAP = {
"正常 2026-2-6": "正常 2026-2-6.xlsx",
"扩张 2026-2-6": "扩张 2026-2-6.xlsx",
"软化 2026-2-6": "软化 2026-2-6.xlsx",
"扩张+软化 2026-2-7": "扩张+软化 2026-2-7 严老师.xlsx",
}
# Group labels
GROUP_LABELS = {
"normal": {"label": 0, "cn": "正常", "en": "Normal"},
"LVM": {"label": 1, "cn": "侧脑室扩张", "en": "Lateral Ventricular Megaly (LVM)"},
"PVL": {"label": 2, "cn": "脑白质软化", "en": "Periventricular Leukomalacia (PVL)"},
"LVM_PVL": {"label": 3, "cn": "扩张+软化", "en": "LVM + PVL"},
}
# Target MRI channels
TARGET_CHANNELS = ["FLAIR", "T1WI", "T2-SAG", "T2WI"]
# Column name translation
COLUMN_MAP = {
"编号": "patient_id",
"检查描述": "exam_description",
"检查结果": "diagnosis",
}
# ============================================================
# Step 1: Process xlsx -> CSV (remove PII, translate headers)
# ============================================================
def process_xlsx(folder_cn, folder_en):
"""Read xlsx, remove PII columns, save as CSV. Returns list of dicts."""
xlsx_path = os.path.join(BASE_DIR, folder_cn, XLSX_MAP[folder_cn])
wb = openpyxl.load_workbook(xlsx_path)
ws = wb["Sheet1"]
records = []
for row_idx in range(2, ws.max_row + 1):
patient_id = ws.cell(row=row_idx, column=1).value
exam_desc = ws.cell(row=row_idx, column=4).value
diagnosis = ws.cell(row=row_idx, column=5).value
if patient_id is None:
continue
# Clean text: strip whitespace, normalize
patient_id = str(patient_id).strip()
exam_desc = (exam_desc or "").strip().replace("\xa0", " ")
diagnosis = (diagnosis or "").strip().replace("\xa0", " ")
records.append({
"patient_id": patient_id,
"group": folder_en,
"group_label": GROUP_LABELS[folder_en]["label"],
"group_name_en": GROUP_LABELS[folder_en]["en"],
"exam_description": exam_desc,
"diagnosis": diagnosis,
})
return records
# ============================================================
# Step 2: Rename folders
# ============================================================
def rename_folders():
"""Rename Chinese folder names to English."""
renamed = []
for cn_name, en_name in FOLDER_MAP.items():
src = os.path.join(BASE_DIR, cn_name)
dst = os.path.join(BASE_DIR, en_name)
if os.path.exists(src) and not os.path.exists(dst):
os.rename(src, dst)
renamed.append((cn_name, en_name))
elif os.path.exists(dst):
renamed.append((cn_name, f"{en_name} (already exists)"))
return renamed
# ============================================================
# Step 3: Scan data integrity
# ============================================================
def scan_integrity(folder_en):
"""Check each patient folder for target channels and count DCM files."""
folder_path = os.path.join(BASE_DIR, folder_en)
patients = sorted([
d for d in os.listdir(folder_path)
if os.path.isdir(os.path.join(folder_path, d)) and not d.startswith(".")
])
results = []
for p in patients:
p_path = os.path.join(folder_path, p)
subdirs = [
d for d in os.listdir(p_path)
if os.path.isdir(os.path.join(p_path, d)) and not d.startswith(".")
]
channel_info = {}
missing_channels = []
extra_channels = []
for ch in TARGET_CHANNELS:
ch_path = os.path.join(p_path, ch)
if os.path.isdir(ch_path):
dcm_count = len([
f for f in os.listdir(ch_path)
if f.upper().endswith(".DCM")
])
channel_info[ch] = dcm_count
else:
missing_channels.append(ch)
channel_info[ch] = 0
for d in subdirs:
if d not in TARGET_CHANNELS:
extra_channels.append(d)
results.append({
"patient_id": p,
"channels": channel_info,
"missing_channels": missing_channels,
"extra_channels": extra_channels,
"total_target_dcm": sum(channel_info.values()),
})
return results
# ============================================================
# Main
# ============================================================
def main():
print("=" * 60)
print(" Neonatal Brain MRI Dataset - Data Cleaning")
print("=" * 60)
# --- Step 1: Process xlsx files ---
print("\n[1/4] Processing xlsx files -> CSV ...")
all_records = []
group_stats = {}
for cn_name, en_name in FOLDER_MAP.items():
records = process_xlsx(cn_name, en_name)
all_records.extend(records)
group_stats[en_name] = len(records)
print(f" {cn_name} -> {en_name}: {len(records)} records")
# --- Step 2: Rename folders ---
print("\n[2/4] Renaming folders to English ...")
renamed = rename_folders()
for cn, en in renamed:
print(f" {cn} -> {en}")
# --- Step 3: Save individual and combined CSVs ---
print("\n[3/4] Saving CSV files ...")
csv_fields = ["patient_id", "group", "group_label", "group_name_en",
"exam_description", "diagnosis"]
# Individual CSVs
for en_name in FOLDER_MAP.values():
group_records = [r for r in all_records if r["group"] == en_name]
csv_path = os.path.join(BASE_DIR, en_name, "clinical_data.csv")
with open(csv_path, "w", newline="", encoding="utf-8") as f:
writer = csv.DictWriter(f, fieldnames=csv_fields)
writer.writeheader()
writer.writerows(group_records)
print(f" Saved: {en_name}/clinical_data.csv ({len(group_records)} records)")
# Combined CSV
combined_path = os.path.join(BASE_DIR, "clinical_data_all.csv")
with open(combined_path, "w", newline="", encoding="utf-8") as f:
writer = csv.DictWriter(f, fieldnames=csv_fields)
writer.writeheader()
writer.writerows(all_records)
print(f" Saved: clinical_data_all.csv ({len(all_records)} total records)")
# --- Step 4: Integrity scan & report ---
print("\n[4/4] Scanning data integrity ...")
report_lines = []
report_lines.append("# Data Cleaning Report")
report_lines.append(f"**Generated**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
report_lines.append("## 1. Dataset Overview\n")
report_lines.append("| Group | Folder | Patients | Label |")
report_lines.append("|-------|--------|----------|-------|")
for en_name, info in GROUP_LABELS.items():
count = group_stats.get(en_name, 0)
report_lines.append(
f"| {info['en']} | `{en_name}/` | {count} | {info['label']} |"
)
report_lines.append(f"\n**Total patients**: {len(all_records)}\n")
report_lines.append("## 2. Data Cleaning Actions\n")
report_lines.append("| Action | Details |")
report_lines.append("|--------|---------|")
report_lines.append("| Removed PII | `姓名` (patient name), `住院号` (hospital ID) |")
report_lines.append("| Column translation | `编号`→`patient_id`, `检查描述`→`exam_description`, `检查结果`→`diagnosis` |")
report_lines.append("| Added columns | `group`, `group_label` (0-3), `group_name_en` |")
report_lines.append("| Format conversion | `.xlsx` → `.csv` (UTF-8) |")
report_lines.append("| Folder renaming | Chinese → English (see below) |")
report_lines.append("| Text cleaning | Removed non-breaking spaces (`\\xa0`), stripped whitespace |\n")
report_lines.append("### Folder Renaming\n")
report_lines.append("| Original (Chinese) | New (English) |")
report_lines.append("|-------------------|---------------|")
for cn, en in FOLDER_MAP.items():
report_lines.append(f"| `{cn}` | `{en}` |")
report_lines.append("\n## 3. MRI Channel Integrity\n")
report_lines.append(f"**Target channels**: {', '.join(TARGET_CHANNELS)}\n")
total_issues = 0
all_integrity = {}
for en_name in FOLDER_MAP.values():
integrity = scan_integrity(en_name)
all_integrity[en_name] = integrity
issues = [r for r in integrity if r["missing_channels"]]
total_issues += len(issues)
total_dcm = sum(r["total_target_dcm"] for r in integrity)
avg_dcm = total_dcm / len(integrity) if integrity else 0
report_lines.append(f"### {en_name} ({GROUP_LABELS[en_name]['en']})\n")
report_lines.append(f"- Patients: {len(integrity)}")
report_lines.append(f"- Total target-channel DCM files: {total_dcm}")
report_lines.append(f"- Average DCM per patient (4 channels): {avg_dcm:.1f}")
if issues:
report_lines.append(f"- **Missing channels ({len(issues)} patients)**:")
for r in issues:
report_lines.append(
f" - `{r['patient_id']}`: missing {', '.join(r['missing_channels'])}"
)
else:
report_lines.append("- All patients have complete target channels")
# Extra channels summary
all_extras = set()
for r in integrity:
all_extras.update(r["extra_channels"])
if all_extras:
report_lines.append(
f"- Extra channels present (not used): {', '.join(sorted(all_extras))}"
)
report_lines.append("")
report_lines.append("## 4. Output File Structure\n")
report_lines.append("```")
report_lines.append("feng_dataset/")
report_lines.append("├── clinical_data_all.csv # Combined dataset (all groups)")
report_lines.append("├── clean_data.py # This cleaning script")
report_lines.append("├── cleaning_report.md # This report")
report_lines.append("├── normal/ # Normal controls")
report_lines.append("│ ├── clinical_data.csv")
report_lines.append("│ └── 001-441/ # Patient folders")
report_lines.append("│ ├── FLAIR/ (*.DCM)")
report_lines.append("│ ├── T1WI/ (*.DCM)")
report_lines.append("│ ├── T2-SAG/ (*.DCM)")
report_lines.append("│ └── T2WI/ (*.DCM)")
report_lines.append("├── LVM/ # Lateral Ventricular Megaly")
report_lines.append("│ ├── clinical_data.csv")
report_lines.append("│ └── 100-xxx/ ...")
report_lines.append("├── PVL/ # Periventricular Leukomalacia")
report_lines.append("│ ├── clinical_data.csv")
report_lines.append("│ └── 010-xxx/ ...")
report_lines.append("└── LVM_PVL/ # LVM + PVL")
report_lines.append(" ├── clinical_data.csv")
report_lines.append(" └── 110-xxx/ ...")
report_lines.append("```\n")
report_lines.append("## 5. CSV Column Description\n")
report_lines.append("| Column | Type | Description |")
report_lines.append("|--------|------|-------------|")
report_lines.append("| `patient_id` | string | Unique patient identifier (e.g., `001-441`) |")
report_lines.append("| `group` | string | Group folder name: `normal`, `LVM`, `PVL`, `LVM_PVL` |")
report_lines.append("| `group_label` | int | Numeric label: 0=Normal, 1=LVM, 2=PVL, 3=LVM+PVL |")
report_lines.append("| `group_name_en` | string | Full English group name |")
report_lines.append("| `exam_description` | string | Radiologist's MRI examination description (Chinese) |")
report_lines.append("| `diagnosis` | string | Final diagnosis conclusion (Chinese) |\n")
report_lines.append("## 6. Notes\n")
report_lines.append("- **Medical text** (`exam_description`, `diagnosis`) is kept in the original Chinese "
"to preserve clinical accuracy. Machine translation of specialized medical "
"radiology reports may introduce errors.")
report_lines.append("- **Original xlsx files** are retained in each folder as backup.")
report_lines.append(f"- **Data integrity issues**: {total_issues} patient(s) with missing target channels.")
report_lines.append("- **Disease description document**: `新生儿侧脑室扩张+脑白质软化疾病描述 AI+医生审核版本.docx` "
"is preserved as-is (reference document, not patient data).")
# Write report
report_path = os.path.join(BASE_DIR, "cleaning_report.md")
with open(report_path, "w", encoding="utf-8") as f:
f.write("\n".join(report_lines))
print(f" Report saved: cleaning_report.md")
# Summary
print("\n" + "=" * 60)
print(" CLEANING COMPLETE")
print("=" * 60)
print(f" Total patients: {len(all_records)}")
print(f" Groups: {len(FOLDER_MAP)}")
print(f" Integrity issues: {total_issues} patient(s) with missing channels")
print(f" Output files:")
print(f" - clinical_data_all.csv (combined)")
print(f" - */clinical_data.csv (per-group)")
print(f" - cleaning_report.md (report)")
print()
if __name__ == "__main__":
main()