BD_LST / BD_LST.py
RobinChu's picture
Upload folder using huggingface_hub
5cc31cd verified
import pandas as pd
from pathlib import Path
from datasets import Features, Value, Sequence
from datasets import DatasetInfo, GeneratorBasedBuilder
import pickle
import pyarrow as pa
import pyarrow.parquet as pq
class fcsParquetIO:
def __init__(self):
pass
def write_parquets(self, tables: dict[str, pa.Table], dump_dir: Path):
if not dump_dir.exists():
dump_dir.mkdir(parents=True, exist_ok=True)
for parquet_filename, table in tables.items():
pq.write_table(table, dump_dir / f"{parquet_filename}.parquet")
def read_parquets(self, read_dir: Path) -> tuple[dict, pd.DataFrame]:
parquet_filepaths = list(read_dir.glob("*.parquet"))
table = {"meta": None, "event": pd.DataFrame()}
for parquet_filepath in parquet_filepaths:
table = pq.read_table(parquet_filepath)
parquet_filename = str(
parquet_filepath.name).replace(".parquet", '')
if "part_001" in parquet_filename:
raw_meta = table.schema.metadata or {}
table["meta"] = {k.decode('utf8'): v.decode('utf8')
for k, v in raw_meta.items()}
table["event"] = pd.concat(
[table["event"], table.to_pandas()], axis=0, ignore_index=True)
return table["meta"], table["event"]
features = Features({
"flow_id": Value("string"),
"original_id": Value("string"),
"specimen": Value("string"),
"purpose": Value("string"),
"site": Value("string"),
"machine": Value("string"),
"color_counts": Value("int16"),
"sampling_date": Value("date32"),
"measuring_date": Value("date32"),
"panel": Value("string"),
"label": Value("string"),
"sublabel": Sequence(Value("string")),
"RDP": Value("float64"),
"tube_counts": Value("int16"),
"tube_event_counts": Value("string"),
"tube_channels": Value("string"),
"data": Value("binary"),
})
class BD_LST(GeneratorBasedBuilder):
def _info(self):
return DatasetInfo(
features=features,
description="This dataset is a project of BD LST data from datalake with various training conditions, contained in different branches.",
homepage="",
citation="",
license=""
)
def _generate_examples(self, dataset_dir: Path):
sample_metas = pd.read_csv(dataset_dir / "sample_metas.csv")
data_dir = dataset_dir / "fcs"
for sample_dir in sorted(list(data_dir.glob('*'))):
if sample_dir.is_dir():
sample_meta = sample_metas[sample_metas["flow_id"]
== sample_dir.name].to_dict()
sample_tubes = {}
for tube_dir in sorted(list(sample_dir.glob('*'))):
if tube_dir.is_dir():
meta, event = fcsParquetIO().read_parquets(tube_dir)
sample_tubes[tube_dir.name] = {
"meta": meta, "event": event}
sample_data = pickle.dumps(sample_tubes)
yield sample_meta | sample_data