File size: 5,215 Bytes
7868a18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
import pandas as pd
from pathlib import Path
from datasets import Features, Value, Sequence
from datasets import DatasetInfo, GeneratorBasedBuilder, SplitGenerator, Split
import pickle
import pyarrow as pa
import pyarrow.parquet as pq


class FcsParquetIO:
    """A class including all methods to write and read parquets transformed from one fcs by FileTransformer.fcs_to_parquet.
    """
    @staticmethod
    def read_parquets(read_dir: Path) -> tuple[dict, pd.DataFrame]:
        """A method to read a group of parquets, which are transformed and partitioned from one fcs by FileTransformer.fcs_to_parquet,  from the given directory. 

        Args:
            read_dir (Path): The directory including all parquets transformed and partitioned from one fcs.

        Returns:
            tuple[dict, pd.DataFrame]: The tuple includes two elements. The first one is the meta data from the original fcs. The second one is the event data from the original fcs.
        """
        parquet_filepaths = list(read_dir.glob("*.parquet"))
        table = {"meta": None, "event": pd.DataFrame()}
        for parquet_filepath in parquet_filepaths:
            table = pq.read_table(parquet_filepath)

            parquet_filename = str(
                parquet_filepath.name).replace(".parquet", '')
            if "part_001" in parquet_filename:
                raw_meta = table.schema.metadata or {}
                table["meta"] = {k.decode('utf8'): v.decode('utf8')
                                 for k, v in raw_meta.items()}

            table["event"] = pd.concat(
                [table["event"], table.to_pandas()], axis=0, ignore_index=True)

        return table["meta"], table["event"]


feature_fields = {
    "flow_id": Value("string"),
    "original_id": Value("string"),
    "specimen": Value("string"),
    "purpose": Value("string"),
    "site": Value("string"),
    "machine": Value("string"),
    "color_counts": Value("int32"),
    "sampling_date": Value("date32"),
    "measuring_date": Value("date32"),
    "panel": Value("string"),
    "label": Value("string"),
    "sublabel": Sequence(Value("string")),
    "RDP": Value("float32"),

    "tube_counts": Value("int32"),
    "tube_event_counts": Value("string"),
    "tube_channels": Value("string"),

    "data": Value("binary")
}
features = Features(feature_fields)


class BCCasebook_binary(GeneratorBasedBuilder):
    def _info(self):
        return DatasetInfo(
            features=features,
            description="This dataset is a project of BC Casebook data labeled for Malignant and Non-malignant.",
            homepage="",
            citation="",
            license=""
        )

    def _split_generators(self, dl_manager):
        data_dir = Path(self.base_path) / "fcs"
        return [
            SplitGenerator(
                name=Split.TRAIN,
                gen_kwargs={"data_dir": data_dir},
            )
        ]

    def _generate_examples(self, data_dir: Path):
        sample_metas = pd.read_csv(Path(self.base_path) / "sample_metas.csv")

        for column in sample_metas.columns.to_list():
            if column in feature_fields.keys():
                feature_field = column
                feature_type = feature_fields[feature_field]
                if feature_type is Value("int32"):
                    sample_metas[feature_field] = sample_metas[feature_field].astype(
                        "Int32")
                elif feature_type is Value("float32"):
                    sample_metas[feature_field] = sample_metas[feature_field].astype(
                        "float32")
                elif feature_type is Value("date32"):
                    sample_metas[feature_field] = pd.to_datetime(
                        sample_metas[feature_field], format="%m/%d/%Y").dt.date
                elif feature_type is Value("string"):
                    sample_metas[feature_field] = sample_metas[feature_field].astype(
                        "string")

        for column in feature_fields.keys():
            if column not in sample_metas.columns.to_list():
                sample_metas[column] = None

        sublabel_columns = [
            column for column in sample_metas.columns.to_list() if "sublabel" in column]
        if len(sublabel_columns) > 0:
            sample_metas["sublabel"] = sample_metas.apply(
                lambda row: [str(row[col]) for col in sublabel_columns], axis=1)
            sample_metas = sample_metas.drop(columns=sublabel_columns)

        for sample_index, sample_dir in enumerate(sorted(list(data_dir.glob('*')))):
            if sample_dir.is_dir():
                sample_meta = sample_metas[sample_metas["flow_id"]
                                           == str(sample_dir.name)].to_dict(orient="records")[0]
                sample_tubes = {}
                for tube_dir in sorted(list(sample_dir.glob('*'))):
                    if tube_dir.is_dir():
                        meta, event = FcsParquetIO.read_parquets(tube_dir)
                        sample_tubes[tube_dir.name] = {
                            "meta": meta, "event": event}
                sample_meta["data"] = pickle.dumps(sample_tubes)

                yield sample_index, sample_meta