File size: 13,049 Bytes
aef1f5a
3c4c67b
 
 
aef1f5a
363ba14
 
 
 
80cbb1a
3c4c67b
363ba14
a544a50
 
3c4c67b
 
 
aef1f5a
3c4c67b
a544a50
 
3c4c67b
aef1f5a
 
363ba14
 
 
 
 
 
 
 
 
3c4c67b
aef1f5a
 
3c4c67b
 
aef1f5a
3c4c67b
 
aef1f5a
3c4c67b
363ba14
 
 
 
 
 
 
3c4c67b
aef1f5a
 
3c4c67b
 
aef1f5a
 
 
 
3c4c67b
363ba14
 
 
 
3c4c67b
aef1f5a
 
3c4c67b
aef1f5a
 
 
 
 
 
 
 
 
 
 
 
a544a50
26f14be
 
 
aef1f5a
 
 
 
 
26f14be
 
 
aef1f5a
a544a50
 
aef1f5a
 
 
 
 
a544a50
aef1f5a
 
 
 
 
 
 
a544a50
 
aef1f5a
 
 
 
 
 
 
 
 
 
a544a50
 
 
 
 
 
 
 
 
 
 
 
 
 
aef1f5a
363ba14
 
 
 
 
 
 
 
 
 
 
 
80cbb1a
 
 
 
 
 
363ba14
 
 
80cbb1a
363ba14
 
 
 
 
 
 
 
 
80cbb1a
363ba14
 
 
 
e244238
363ba14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80cbb1a
 
 
 
 
363ba14
 
80cbb1a
 
363ba14
80cbb1a
363ba14
80cbb1a
 
 
 
363ba14
 
80cbb1a
 
 
363ba14
 
 
 
 
 
 
 
 
 
80cbb1a
 
 
363ba14
 
 
 
 
 
 
 
 
 
 
80cbb1a
 
363ba14
 
 
 
 
 
 
80cbb1a
 
 
363ba14
 
 
80cbb1a
 
 
 
 
 
363ba14
 
 
80cbb1a
 
 
 
 
 
 
 
 
 
 
 
 
518063b
80cbb1a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
363ba14
 
be12b50
 
 
 
 
 
363ba14
 
 
 
 
 
80cbb1a
363ba14
80cbb1a
 
 
e244238
363ba14
 
 
 
 
 
80cbb1a
 
 
 
 
e244238
80cbb1a
 
 
 
 
 
 
 
363ba14
80cbb1a
 
 
 
 
363ba14
 
 
80cbb1a
 
363ba14
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
"""Provide typed access to ISLES24 cases."""

from __future__ import annotations

import re
import shutil
import tempfile
from dataclasses import dataclass, field
from pathlib import Path
from typing import TYPE_CHECKING, Self

from stroke_deepisles_demo.core.exceptions import DataLoadError
from stroke_deepisles_demo.core.logging import get_logger

if TYPE_CHECKING:
    from collections.abc import Iterator

    from stroke_deepisles_demo.core.types import CaseFiles

logger = get_logger(__name__)


@dataclass
class LocalDataset:
    """File-based dataset for local ISLES24 data.

    Can be used as a context manager for consistency with HuggingFaceDataset,
    though no cleanup is needed for local files.

    Example:
        with build_local_dataset(path) as ds:
            case = ds.get_case(0)
    """

    data_dir: Path
    cases: dict[str, CaseFiles]  # subject_id -> files

    def __len__(self) -> int:
        return len(self.cases)

    def __iter__(self) -> Iterator[str]:
        return iter(self.cases.keys())

    def __enter__(self) -> Self:
        return self

    def __exit__(self, *args: object) -> None:
        # No cleanup needed for local files
        pass

    def list_case_ids(self) -> list[str]:
        """Return sorted list of subject IDs."""
        return sorted(self.cases.keys())

    def get_case(self, case_id: str | int) -> CaseFiles:
        """Get files for a case by ID or index."""
        if isinstance(case_id, int):
            case_id = self.list_case_ids()[case_id]
        return self.cases[case_id]

    def cleanup(self) -> None:
        """No-op for local dataset (files are not temporary)."""
        pass


# Subject ID extraction
SUBJECT_PATTERN = re.compile(r"sub-(stroke\d{4})_ses-\d+_.*\.nii\.gz")


def parse_subject_id(filename: str) -> str | None:
    """Extract subject ID from BIDS filename."""
    match = SUBJECT_PATTERN.match(filename)
    return f"sub-{match.group(1)}" if match else None


def build_local_dataset(data_dir: Path) -> LocalDataset:
    """
    Scan directory and build case mapping.

    Matches DWI + ADC + Mask files by subject ID.
    Logs warnings for incomplete cases that are skipped.

    Raises:
        FileNotFoundError: If DWI subdirectory (Images-DWI) is missing
    """
    dwi_dir = data_dir / "Images-DWI"
    adc_dir = data_dir / "Images-ADC"
    mask_dir = data_dir / "Masks"

    if not dwi_dir.exists():
        raise FileNotFoundError(f"Data directory not found or invalid: {dwi_dir}")

    cases: dict[str, CaseFiles] = {}
    skipped_no_subject_id = 0
    skipped_no_adc: list[str] = []

    # Scan DWI files to get subject IDs
    for dwi_file in dwi_dir.glob("*.nii.gz"):
        subject_id = parse_subject_id(dwi_file.name)
        if not subject_id:
            skipped_no_subject_id += 1
            continue

        # Find matching ADC and Mask
        adc_file = adc_dir / dwi_file.name.replace("_dwi.", "_adc.")
        mask_file = mask_dir / dwi_file.name.replace("_dwi.", "_lesion-msk.")

        if not adc_file.exists():
            skipped_no_adc.append(subject_id)
            continue

        case_files: CaseFiles = {
            "dwi": dwi_file,
            "adc": adc_file,
        }
        if mask_file.exists():
            case_files["ground_truth"] = mask_file

        cases[subject_id] = case_files

    # Log skipped cases for debugging
    if skipped_no_subject_id > 0:
        logger.warning(
            "Skipped %d DWI files: could not parse subject ID from filename",
            skipped_no_subject_id,
        )
    if skipped_no_adc:
        logger.warning(
            "Skipped %d cases missing ADC file: %s",
            len(skipped_no_adc),
            ", ".join(skipped_no_adc[:5]) + ("..." if len(skipped_no_adc) > 5 else ""),
        )

    logger.info("Loaded %d cases from %s", len(cases), data_dir)
    return LocalDataset(data_dir=data_dir, cases=cases)


# =============================================================================
# HuggingFace Dataset Adapter
# =============================================================================


@dataclass
class HuggingFaceDataset:
    """Dataset adapter for HuggingFace ISLES24 dataset.

    Wraps the HuggingFace dataset and provides the same interface as LocalDataset.
    When get_case() is called, downloads NIfTI bytes from individual parquet files
    and writes them to temp files.

    This implementation bypasses `load_dataset()` entirely to avoid:
    1. PyArrow streaming bug (apache/arrow#45214) that hangs on parquet iteration
    2. Memory issues from downloading the full 99GB dataset

    IMPORTANT: Use as a context manager to ensure temp files are cleaned up:

        with build_huggingface_dataset(dataset_id) as ds:
            case = ds.get_case(0)
            # ... process case ...
        # temp files automatically cleaned up

    Or call cleanup() manually when done.
    """

    dataset_id: str
    _case_ids: list[str] = field(default_factory=list)
    _case_index: dict[str, int] = field(default_factory=dict)
    _temp_dir: Path | None = field(default=None, repr=False)
    _cached_cases: dict[str, CaseFiles] = field(default_factory=dict, repr=False)

    def __len__(self) -> int:
        return len(self._case_ids)

    def __iter__(self) -> Iterator[str]:
        return iter(self._case_ids)

    def __enter__(self) -> Self:
        return self

    def __exit__(self, *args: object) -> None:
        self.cleanup()

    def list_case_ids(self) -> list[str]:
        """Return sorted list of subject IDs."""
        return sorted(self._case_ids)

    def get_case(self, case_id: str | int) -> CaseFiles:
        """Get files for a case by ID or index.

        Downloads NIfTI bytes from the individual parquet file for this case
        and writes to temp files. Returns cached paths on subsequent calls.

        This uses HfFileSystem + pyarrow to download only the single case (~50MB)
        instead of the full dataset (99GB), completing in ~2 seconds.

        Raises:
            DataLoadError: If HuggingFace data is malformed or missing required fields.
            KeyError: If case_id is not found in the dataset.
        """
        # Resolve case_id to subject_id and file index
        if isinstance(case_id, int):
            if case_id < 0 or case_id >= len(self._case_ids):
                raise IndexError(f"Case index {case_id} out of range [0, {len(self._case_ids)})")
            subject_id = self._case_ids[case_id]
            file_idx = case_id
        else:
            subject_id = case_id
            if subject_id not in self._case_index:
                raise KeyError(f"Case ID '{subject_id}' not found in dataset")
            file_idx = self._case_index[subject_id]

        # Return cached case if already materialized
        if subject_id in self._cached_cases:
            return self._cached_cases[subject_id]

        # Create shared temp directory on first use
        if self._temp_dir is None:
            self._temp_dir = Path(tempfile.mkdtemp(prefix="isles24_hf_"))
            logger.debug("Created temp directory: %s", self._temp_dir)

        # Download case data from individual parquet file
        logger.info("Downloading case %s from HuggingFace...", subject_id)
        case_data = self._download_case_from_parquet(file_idx, subject_id)

        # Create case subdirectory
        case_dir = self._temp_dir / subject_id
        case_dir.mkdir(exist_ok=True)

        # Write NIfTI files to temp directory
        dwi_path = case_dir / f"{subject_id}_ses-02_dwi.nii.gz"
        adc_path = case_dir / f"{subject_id}_ses-02_adc.nii.gz"
        mask_path = case_dir / f"{subject_id}_ses-02_lesion-msk.nii.gz"

        # Write the gzipped NIfTI bytes
        dwi_path.write_bytes(case_data["dwi_bytes"])
        adc_path.write_bytes(case_data["adc_bytes"])

        case_files: CaseFiles = {
            "dwi": dwi_path,
            "adc": adc_path,
        }

        # Write lesion mask if available
        if case_data.get("mask_bytes"):
            mask_path.write_bytes(case_data["mask_bytes"])
            case_files["ground_truth"] = mask_path

        # Cache for subsequent calls
        self._cached_cases[subject_id] = case_files
        logger.info(
            "Case %s ready: DWI=%.1fMB, ADC=%.1fMB",
            subject_id,
            len(case_data["dwi_bytes"]) / 1024 / 1024,
            len(case_data["adc_bytes"]) / 1024 / 1024,
        )

        return case_files

    def _download_case_from_parquet(self, file_idx: int, subject_id: str) -> dict[str, bytes]:
        """Download case data directly from individual parquet file.

        Uses HfFileSystem + pyarrow to read only the columns we need from
        a single parquet file, avoiding the need to download the full dataset.

        Args:
            file_idx: Index of the parquet file (0-148)
            subject_id: Expected subject ID (for validation)

        Returns:
            Dict with dwi_bytes, adc_bytes, and optionally mask_bytes
        """
        import pyarrow.parquet as pq
        from huggingface_hub import HfFileSystem

        from stroke_deepisles_demo.data.constants import ISLES24_NUM_FILES

        # Construct path to the specific parquet file
        fpath = f"datasets/{self.dataset_id}/data/train-{file_idx:05d}-of-{ISLES24_NUM_FILES:05d}.parquet"

        try:
            fs = HfFileSystem()
            with fs.open(fpath, "rb") as f:
                pf = pq.ParquetFile(f)
                # Read only the columns we need
                table = pf.read(columns=["subject_id", "dwi", "adc", "lesion_mask"])
                df = table.to_pandas()

                if len(df) != 1:
                    raise DataLoadError(f"Expected 1 row in parquet file, got {len(df)}: {fpath}")

                row = df.iloc[0]

                # Validate subject_id matches
                actual_subject_id = row["subject_id"]
                if actual_subject_id != subject_id:
                    raise DataLoadError(
                        f"Subject ID mismatch: expected {subject_id}, got {actual_subject_id} in {fpath}"
                    )

                # Extract bytes with defensive error handling
                try:
                    dwi_bytes = row["dwi"]["bytes"]
                    adc_bytes = row["adc"]["bytes"]
                except (KeyError, TypeError) as e:
                    raise DataLoadError(
                        f"Malformed HuggingFace data for {subject_id}: missing 'dwi' or 'adc' bytes. "
                        f"The dataset schema may have changed. Error: {e}"
                    ) from e

                result: dict[str, bytes] = {
                    "dwi_bytes": dwi_bytes,
                    "adc_bytes": adc_bytes,
                }

                # Extract mask if available
                mask_data = row.get("lesion_mask")
                if mask_data is not None and isinstance(mask_data, dict) and mask_data.get("bytes"):
                    result["mask_bytes"] = mask_data["bytes"]

                return result

        except Exception as e:
            if isinstance(e, DataLoadError):
                raise
            raise DataLoadError(f"Failed to download case {subject_id} from {fpath}: {e}") from e

    def cleanup(self) -> None:
        """Remove temp directory and clear cache."""
        if self._temp_dir is not None and self._temp_dir.exists():
            try:
                shutil.rmtree(self._temp_dir)
                logger.debug("Cleaned up temp directory: %s", self._temp_dir)
            except OSError as e:
                logger.warning("Failed to cleanup temp directory %s: %s", self._temp_dir, e)
        self._temp_dir = None
        self._cached_cases.clear()


def build_huggingface_dataset(dataset_id: str) -> HuggingFaceDataset:
    """
    Build ISLES24 dataset adapter for HuggingFace Hub.

    Uses pre-computed case IDs to avoid streaming enumeration (which hangs
    due to PyArrow bug apache/arrow#45214). Actual data is downloaded lazily
    from individual parquet files when get_case() is called.

    Args:
        dataset_id: HuggingFace dataset identifier (e.g., "hugging-science/isles24-stroke")

    Returns:
        HuggingFaceDataset providing case access
    """
    from stroke_deepisles_demo.data.constants import (
        ISLES24_CASE_IDS,
        ISLES24_CASE_INDEX,
        ISLES24_DATASET_ID,
    )

    # Validate dataset_id matches our pre-computed constants
    if dataset_id != ISLES24_DATASET_ID:
        logger.warning(
            "Dataset ID '%s' does not match pre-computed constants for '%s'. "
            "Case IDs may be incorrect.",
            dataset_id,
            ISLES24_DATASET_ID,
        )

    logger.info(
        "Building HuggingFace dataset adapter: %s (%d cases, pre-computed)",
        dataset_id,
        len(ISLES24_CASE_IDS),
    )

    return HuggingFaceDataset(
        dataset_id=dataset_id,
        _case_ids=list(ISLES24_CASE_IDS),
        _case_index=dict(ISLES24_CASE_INDEX),
    )