|
|
import pandas as pd |
|
|
from pathlib import Path |
|
|
from datasets import Features, Value, Sequence |
|
|
from datasets import DatasetInfo, GeneratorBasedBuilder, SplitGenerator, Split |
|
|
import pickle |
|
|
import pyarrow as pa |
|
|
import pyarrow.parquet as pq |
|
|
|
|
|
|
|
|
class FcsParquetIO: |
|
|
"""A class including all methods to write and read parquets transformed from one fcs by FileTransformer.fcs_to_parquet. |
|
|
""" |
|
|
@staticmethod |
|
|
def read_parquets(read_dir: Path) -> tuple[dict, pd.DataFrame]: |
|
|
"""A method to read a group of parquets, which are transformed and partitioned from one fcs by FileTransformer.fcs_to_parquet, from the given directory. |
|
|
|
|
|
Args: |
|
|
read_dir (Path): The directory including all parquets transformed and partitioned from one fcs. |
|
|
|
|
|
Returns: |
|
|
tuple[dict, pd.DataFrame]: The tuple includes two elements. The first one is the meta data from the original fcs. The second one is the event data from the original fcs. |
|
|
""" |
|
|
parquet_filepaths = list(read_dir.glob("*.parquet")) |
|
|
table = {"meta": None, "event": pd.DataFrame()} |
|
|
for parquet_filepath in parquet_filepaths: |
|
|
table = pq.read_table(parquet_filepath) |
|
|
|
|
|
parquet_filename = str( |
|
|
parquet_filepath.name).replace(".parquet", '') |
|
|
if "part_001" in parquet_filename: |
|
|
raw_meta = table.schema.metadata or {} |
|
|
table["meta"] = {k.decode('utf8'): v.decode('utf8') |
|
|
for k, v in raw_meta.items()} |
|
|
|
|
|
table["event"] = pd.concat( |
|
|
[table["event"], table.to_pandas()], axis=0, ignore_index=True) |
|
|
|
|
|
return table["meta"], table["event"] |
|
|
|
|
|
|
|
|
feature_fields = { |
|
|
"flow_id": Value("string"), |
|
|
"original_id": Value("string"), |
|
|
"specimen": Value("string"), |
|
|
"purpose": Value("string"), |
|
|
"site": Value("string"), |
|
|
"machine": Value("string"), |
|
|
"color_counts": Value("int32"), |
|
|
"sampling_date": Value("date32"), |
|
|
"measuring_date": Value("date32"), |
|
|
"panel": Value("string"), |
|
|
"label": Value("string"), |
|
|
"sublabel": Sequence(Value("string")), |
|
|
"RDP": Value("float32"), |
|
|
|
|
|
"tube_counts": Value("int32"), |
|
|
"tube_event_counts": Value("string"), |
|
|
"tube_channels": Value("string"), |
|
|
|
|
|
"data": Value("binary") |
|
|
} |
|
|
features = Features(feature_fields) |
|
|
|
|
|
|
|
|
class BCCasebook_binary(GeneratorBasedBuilder): |
|
|
def _info(self): |
|
|
return DatasetInfo( |
|
|
features=features, |
|
|
description="This dataset is a project of BC Casebook data labeled for Malignant and Non-malignant.", |
|
|
homepage="", |
|
|
citation="", |
|
|
license="" |
|
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
|
data_dir = Path(self.base_path) / "fcs" |
|
|
return [ |
|
|
SplitGenerator( |
|
|
name=Split.TRAIN, |
|
|
gen_kwargs={"data_dir": data_dir}, |
|
|
) |
|
|
] |
|
|
|
|
|
def _generate_examples(self, data_dir: Path): |
|
|
sample_metas = pd.read_csv(Path(self.base_path) / "sample_metas.csv") |
|
|
|
|
|
for column in sample_metas.columns.to_list(): |
|
|
if column in feature_fields.keys(): |
|
|
feature_field = column |
|
|
feature_type = feature_fields[feature_field] |
|
|
if feature_type is Value("int32"): |
|
|
sample_metas[feature_field] = sample_metas[feature_field].astype( |
|
|
"Int32") |
|
|
elif feature_type is Value("float32"): |
|
|
sample_metas[feature_field] = sample_metas[feature_field].astype( |
|
|
"float32") |
|
|
elif feature_type is Value("date32"): |
|
|
sample_metas[feature_field] = pd.to_datetime( |
|
|
sample_metas[feature_field], format="%m/%d/%Y").dt.date |
|
|
elif feature_type is Value("string"): |
|
|
sample_metas[feature_field] = sample_metas[feature_field].astype( |
|
|
"string") |
|
|
|
|
|
for column in feature_fields.keys(): |
|
|
if column not in sample_metas.columns.to_list(): |
|
|
sample_metas[column] = None |
|
|
|
|
|
sublabel_columns = [ |
|
|
column for column in sample_metas.columns.to_list() if "sublabel" in column] |
|
|
if len(sublabel_columns) > 0: |
|
|
sample_metas["sublabel"] = sample_metas.apply( |
|
|
lambda row: [str(row[col]) for col in sublabel_columns], axis=1) |
|
|
sample_metas = sample_metas.drop(columns=sublabel_columns) |
|
|
|
|
|
for sample_index, sample_dir in enumerate(sorted(list(data_dir.glob('*')))): |
|
|
if sample_dir.is_dir(): |
|
|
sample_meta = sample_metas[sample_metas["flow_id"] |
|
|
== str(sample_dir.name)].to_dict(orient="records")[0] |
|
|
sample_tubes = {} |
|
|
for tube_dir in sorted(list(sample_dir.glob('*'))): |
|
|
if tube_dir.is_dir(): |
|
|
meta, event = FcsParquetIO.read_parquets(tube_dir) |
|
|
sample_tubes[tube_dir.name] = { |
|
|
"meta": meta, "event": event} |
|
|
sample_meta["data"] = pickle.dumps(sample_tubes) |
|
|
|
|
|
yield sample_index, sample_meta |
|
|
|