File size: 14,264 Bytes
75e9074
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
#!/usr/bin/env python3
"""
Data Cleaning Script for Neonatal Brain MRI Dataset
- Removes PII (patient name, hospital ID)
- Translates column headers to English
- Converts xlsx to CSV
- Renames folders to English
- Generates a combined dataset and cleaning report
"""

import os
import shutil
import csv
import json
from datetime import datetime

import openpyxl

# ============================================================
# Configuration
# ============================================================
BASE_DIR = "/Users/yty/Desktop/med_paper/feng_dataset"

# Mapping: original Chinese folder name -> English name
FOLDER_MAP = {
    "正常 2026-2-6":       "normal",
    "扩张 2026-2-6":       "LVM",
    "软化 2026-2-6":       "PVL",
    "扩张+软化 2026-2-7":  "LVM_PVL",
}

# Mapping: original xlsx filename (inside each folder) -> group info
XLSX_MAP = {
    "正常 2026-2-6":       "正常 2026-2-6.xlsx",
    "扩张 2026-2-6":       "扩张 2026-2-6.xlsx",
    "软化 2026-2-6":       "软化 2026-2-6.xlsx",
    "扩张+软化 2026-2-7":  "扩张+软化 2026-2-7 严老师.xlsx",
}

# Group labels
GROUP_LABELS = {
    "normal":   {"label": 0, "cn": "正常",       "en": "Normal"},
    "LVM":      {"label": 1, "cn": "侧脑室扩张",  "en": "Lateral Ventricular Megaly (LVM)"},
    "PVL":      {"label": 2, "cn": "脑白质软化",   "en": "Periventricular Leukomalacia (PVL)"},
    "LVM_PVL":  {"label": 3, "cn": "扩张+软化",   "en": "LVM + PVL"},
}

# Target MRI channels
TARGET_CHANNELS = ["FLAIR", "T1WI", "T2-SAG", "T2WI"]

# Column name translation
COLUMN_MAP = {
    "编号": "patient_id",
    "检查描述": "exam_description",
    "检查结果": "diagnosis",
}


# ============================================================
# Step 1: Process xlsx -> CSV (remove PII, translate headers)
# ============================================================
def process_xlsx(folder_cn, folder_en):
    """Read xlsx, remove PII columns, save as CSV. Returns list of dicts."""
    xlsx_path = os.path.join(BASE_DIR, folder_cn, XLSX_MAP[folder_cn])
    wb = openpyxl.load_workbook(xlsx_path)
    ws = wb["Sheet1"]

    records = []
    for row_idx in range(2, ws.max_row + 1):
        patient_id = ws.cell(row=row_idx, column=1).value
        exam_desc  = ws.cell(row=row_idx, column=4).value
        diagnosis  = ws.cell(row=row_idx, column=5).value

        if patient_id is None:
            continue

        # Clean text: strip whitespace, normalize
        patient_id = str(patient_id).strip()
        exam_desc  = (exam_desc or "").strip().replace("\xa0", " ")
        diagnosis  = (diagnosis or "").strip().replace("\xa0", " ")

        records.append({
            "patient_id": patient_id,
            "group": folder_en,
            "group_label": GROUP_LABELS[folder_en]["label"],
            "group_name_en": GROUP_LABELS[folder_en]["en"],
            "exam_description": exam_desc,
            "diagnosis": diagnosis,
        })

    return records


# ============================================================
# Step 2: Rename folders
# ============================================================
def rename_folders():
    """Rename Chinese folder names to English."""
    renamed = []
    for cn_name, en_name in FOLDER_MAP.items():
        src = os.path.join(BASE_DIR, cn_name)
        dst = os.path.join(BASE_DIR, en_name)
        if os.path.exists(src) and not os.path.exists(dst):
            os.rename(src, dst)
            renamed.append((cn_name, en_name))
        elif os.path.exists(dst):
            renamed.append((cn_name, f"{en_name} (already exists)"))
    return renamed


# ============================================================
# Step 3: Scan data integrity
# ============================================================
def scan_integrity(folder_en):
    """Check each patient folder for target channels and count DCM files."""
    folder_path = os.path.join(BASE_DIR, folder_en)
    patients = sorted([
        d for d in os.listdir(folder_path)
        if os.path.isdir(os.path.join(folder_path, d)) and not d.startswith(".")
    ])

    results = []
    for p in patients:
        p_path = os.path.join(folder_path, p)
        subdirs = [
            d for d in os.listdir(p_path)
            if os.path.isdir(os.path.join(p_path, d)) and not d.startswith(".")
        ]

        channel_info = {}
        missing_channels = []
        extra_channels = []

        for ch in TARGET_CHANNELS:
            ch_path = os.path.join(p_path, ch)
            if os.path.isdir(ch_path):
                dcm_count = len([
                    f for f in os.listdir(ch_path)
                    if f.upper().endswith(".DCM")
                ])
                channel_info[ch] = dcm_count
            else:
                missing_channels.append(ch)
                channel_info[ch] = 0

        for d in subdirs:
            if d not in TARGET_CHANNELS:
                extra_channels.append(d)

        results.append({
            "patient_id": p,
            "channels": channel_info,
            "missing_channels": missing_channels,
            "extra_channels": extra_channels,
            "total_target_dcm": sum(channel_info.values()),
        })

    return results


# ============================================================
# Main
# ============================================================
def main():
    print("=" * 60)
    print("  Neonatal Brain MRI Dataset - Data Cleaning")
    print("=" * 60)

    # --- Step 1: Process xlsx files ---
    print("\n[1/4] Processing xlsx files -> CSV ...")
    all_records = []
    group_stats = {}

    for cn_name, en_name in FOLDER_MAP.items():
        records = process_xlsx(cn_name, en_name)
        all_records.extend(records)
        group_stats[en_name] = len(records)
        print(f"  {cn_name} -> {en_name}: {len(records)} records")

    # --- Step 2: Rename folders ---
    print("\n[2/4] Renaming folders to English ...")
    renamed = rename_folders()
    for cn, en in renamed:
        print(f"  {cn} -> {en}")

    # --- Step 3: Save individual and combined CSVs ---
    print("\n[3/4] Saving CSV files ...")
    csv_fields = ["patient_id", "group", "group_label", "group_name_en",
                   "exam_description", "diagnosis"]

    # Individual CSVs
    for en_name in FOLDER_MAP.values():
        group_records = [r for r in all_records if r["group"] == en_name]
        csv_path = os.path.join(BASE_DIR, en_name, "clinical_data.csv")
        with open(csv_path, "w", newline="", encoding="utf-8") as f:
            writer = csv.DictWriter(f, fieldnames=csv_fields)
            writer.writeheader()
            writer.writerows(group_records)
        print(f"  Saved: {en_name}/clinical_data.csv ({len(group_records)} records)")

    # Combined CSV
    combined_path = os.path.join(BASE_DIR, "clinical_data_all.csv")
    with open(combined_path, "w", newline="", encoding="utf-8") as f:
        writer = csv.DictWriter(f, fieldnames=csv_fields)
        writer.writeheader()
        writer.writerows(all_records)
    print(f"  Saved: clinical_data_all.csv ({len(all_records)} total records)")

    # --- Step 4: Integrity scan & report ---
    print("\n[4/4] Scanning data integrity ...")
    report_lines = []
    report_lines.append("# Data Cleaning Report")
    report_lines.append(f"**Generated**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")

    report_lines.append("## 1. Dataset Overview\n")
    report_lines.append("| Group | Folder | Patients | Label |")
    report_lines.append("|-------|--------|----------|-------|")
    for en_name, info in GROUP_LABELS.items():
        count = group_stats.get(en_name, 0)
        report_lines.append(
            f"| {info['en']} | `{en_name}/` | {count} | {info['label']} |"
        )
    report_lines.append(f"\n**Total patients**: {len(all_records)}\n")

    report_lines.append("## 2. Data Cleaning Actions\n")
    report_lines.append("| Action | Details |")
    report_lines.append("|--------|---------|")
    report_lines.append("| Removed PII | `姓名` (patient name), `住院号` (hospital ID) |")
    report_lines.append("| Column translation | `编号`→`patient_id`, `检查描述`→`exam_description`, `检查结果`→`diagnosis` |")
    report_lines.append("| Added columns | `group`, `group_label` (0-3), `group_name_en` |")
    report_lines.append("| Format conversion | `.xlsx` → `.csv` (UTF-8) |")
    report_lines.append("| Folder renaming | Chinese → English (see below) |")
    report_lines.append("| Text cleaning | Removed non-breaking spaces (`\\xa0`), stripped whitespace |\n")

    report_lines.append("### Folder Renaming\n")
    report_lines.append("| Original (Chinese) | New (English) |")
    report_lines.append("|-------------------|---------------|")
    for cn, en in FOLDER_MAP.items():
        report_lines.append(f"| `{cn}` | `{en}` |")

    report_lines.append("\n## 3. MRI Channel Integrity\n")
    report_lines.append(f"**Target channels**: {', '.join(TARGET_CHANNELS)}\n")

    total_issues = 0
    all_integrity = {}

    for en_name in FOLDER_MAP.values():
        integrity = scan_integrity(en_name)
        all_integrity[en_name] = integrity

        issues = [r for r in integrity if r["missing_channels"]]
        total_issues += len(issues)

        total_dcm = sum(r["total_target_dcm"] for r in integrity)
        avg_dcm = total_dcm / len(integrity) if integrity else 0

        report_lines.append(f"### {en_name} ({GROUP_LABELS[en_name]['en']})\n")
        report_lines.append(f"- Patients: {len(integrity)}")
        report_lines.append(f"- Total target-channel DCM files: {total_dcm}")
        report_lines.append(f"- Average DCM per patient (4 channels): {avg_dcm:.1f}")

        if issues:
            report_lines.append(f"- **Missing channels ({len(issues)} patients)**:")
            for r in issues:
                report_lines.append(
                    f"  - `{r['patient_id']}`: missing {', '.join(r['missing_channels'])}"
                )
        else:
            report_lines.append("- All patients have complete target channels")

        # Extra channels summary
        all_extras = set()
        for r in integrity:
            all_extras.update(r["extra_channels"])
        if all_extras:
            report_lines.append(
                f"- Extra channels present (not used): {', '.join(sorted(all_extras))}"
            )
        report_lines.append("")

    report_lines.append("## 4. Output File Structure\n")
    report_lines.append("```")
    report_lines.append("feng_dataset/")
    report_lines.append("├── clinical_data_all.csv          # Combined dataset (all groups)")
    report_lines.append("├── clean_data.py                  # This cleaning script")
    report_lines.append("├── cleaning_report.md             # This report")
    report_lines.append("├── normal/                        # Normal controls")
    report_lines.append("│   ├── clinical_data.csv")
    report_lines.append("│   └── 001-441/                   # Patient folders")
    report_lines.append("│       ├── FLAIR/  (*.DCM)")
    report_lines.append("│       ├── T1WI/   (*.DCM)")
    report_lines.append("│       ├── T2-SAG/ (*.DCM)")
    report_lines.append("│       └── T2WI/   (*.DCM)")
    report_lines.append("├── LVM/                           # Lateral Ventricular Megaly")
    report_lines.append("│   ├── clinical_data.csv")
    report_lines.append("│   └── 100-xxx/ ...")
    report_lines.append("├── PVL/                           # Periventricular Leukomalacia")
    report_lines.append("│   ├── clinical_data.csv")
    report_lines.append("│   └── 010-xxx/ ...")
    report_lines.append("└── LVM_PVL/                       # LVM + PVL")
    report_lines.append("    ├── clinical_data.csv")
    report_lines.append("    └── 110-xxx/ ...")
    report_lines.append("```\n")

    report_lines.append("## 5. CSV Column Description\n")
    report_lines.append("| Column | Type | Description |")
    report_lines.append("|--------|------|-------------|")
    report_lines.append("| `patient_id` | string | Unique patient identifier (e.g., `001-441`) |")
    report_lines.append("| `group` | string | Group folder name: `normal`, `LVM`, `PVL`, `LVM_PVL` |")
    report_lines.append("| `group_label` | int | Numeric label: 0=Normal, 1=LVM, 2=PVL, 3=LVM+PVL |")
    report_lines.append("| `group_name_en` | string | Full English group name |")
    report_lines.append("| `exam_description` | string | Radiologist's MRI examination description (Chinese) |")
    report_lines.append("| `diagnosis` | string | Final diagnosis conclusion (Chinese) |\n")

    report_lines.append("## 6. Notes\n")
    report_lines.append("- **Medical text** (`exam_description`, `diagnosis`) is kept in the original Chinese "
                        "to preserve clinical accuracy. Machine translation of specialized medical "
                        "radiology reports may introduce errors.")
    report_lines.append("- **Original xlsx files** are retained in each folder as backup.")
    report_lines.append(f"- **Data integrity issues**: {total_issues} patient(s) with missing target channels.")
    report_lines.append("- **Disease description document**: `新生儿侧脑室扩张+脑白质软化疾病描述 AI+医生审核版本.docx` "
                        "is preserved as-is (reference document, not patient data).")

    # Write report
    report_path = os.path.join(BASE_DIR, "cleaning_report.md")
    with open(report_path, "w", encoding="utf-8") as f:
        f.write("\n".join(report_lines))
    print(f"  Report saved: cleaning_report.md")

    # Summary
    print("\n" + "=" * 60)
    print("  CLEANING COMPLETE")
    print("=" * 60)
    print(f"  Total patients:  {len(all_records)}")
    print(f"  Groups:          {len(FOLDER_MAP)}")
    print(f"  Integrity issues: {total_issues} patient(s) with missing channels")
    print(f"  Output files:")
    print(f"    - clinical_data_all.csv (combined)")
    print(f"    - */clinical_data.csv   (per-group)")
    print(f"    - cleaning_report.md    (report)")
    print()


if __name__ == "__main__":
    main()