File size: 4,021 Bytes
f1cd483 30e3bb3 de756c2 f1cd483 e0db0f7 f1cd483 d236af9 df21e59 f1cd483 c54d9d2 f1cd483 db99d9c 30e3bb3 5581a85 e0db0f7 7276ad4 e0db0f7 30e3bb3 d236af9 30e3bb3 d236af9 7276ad4 e0db0f7 7276ad4 f1cd483 cec9feb e0db0f7 d236af9 e0db0f7 2c2f666 85e270a df21e59 e0db0f7 d236af9 e0db0f7 8e71634 eeae479 12ba8ca eeae479 df21e59 eeae479 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 | import datasets
import os
import pandas as pd
from huggingface_hub import list_repo_files
import glob
class MERFISHConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
self.gene_subset = kwargs.pop("gene_subset", None)
super().__init__(**kwargs)
class MERFISH(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
MERFISHConfig(name="raw", description="Raw MERFISH counts per gene"),
MERFISHConfig(name="processed", description="Processed MERFISH data"),
]
def _info(self):
return datasets.DatasetInfo(
description="MERFISH dataset of mouse brain slices",
features=datasets.Features({
"cell_identifier": datasets.Value("string"),
"expression": datasets.Sequence(datasets.Value("float32")),
"gene_names": datasets.Sequence(datasets.Value("string")),
}),
supervised_keys=None,
)
def _split_generators(self, dl_manager):
expression_prefix = f"{self.config.name}/expression"
repo_id = "data4science/merfish"
if dl_manager.is_streaming:
data_files = {
"expression": os.path.join(self.config.name, "expression", "*.parquet"),
"gene_metadata": os.path.join(self.config.name, "gene_metadata.parquet"),
"cell_metadata": os.path.join(self.config.name, "cell_metadata.parquet"),
}
downloaded = dl_manager.download(data_files)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"expression_files": sorted(glob.glob(downloaded["expression"])),
"gene_metadata_path": downloaded["gene_metadata"],
"cell_metadata_path": downloaded["cell_metadata"],
},
),
]
else:
# List exact files from the Hub
all_files = list_repo_files(repo_id, repo_type="dataset")
expression_files = [
f for f in all_files
if f.startswith(expression_prefix) and f.endswith(".parquet")
]
expression_files = dl_manager.download(expression_files)
gene_metadata = dl_manager.download(f"{self.config.name}/gene_metadata.parquet")
cell_metadata = dl_manager.download(f"{self.config.name}/cell_metadata.parquet")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"expression_files": expression_files,
"gene_metadata_path": gene_metadata,
"cell_metadata_path": cell_metadata,
"fs": dl_manager.fs if dl_manager.is_streaming else None,
},
),
]
def _generate_examples(self, expression_files, gene_metadata_path, cell_metadata_path, fs=None):
if fs is not None:
gene_df = pd.read_parquet(fs.open(gene_metadata_path, "rb"))
cell_df = pd.read_parquet(fs.open(cell_metadata_path, "rb"))
else:
gene_df = pd.read_parquet(gene_metadata_path)
cell_df = pd.read_parquet(cell_metadata_path)
gene_names = gene_df["gene_identifier"].tolist() if "gene_identifier" in gene_df.columns else gene_df.index.tolist()
idx = 0
for filepath in expression_files:
if fs is not None:
with fs.open(filepath, "rb") as f:
df = pd.read_parquet(f)
else:
df = pd.read_parquet(filepath)
for idx_row, row in df.iterrows():
yield idx, {
"cell_identifier": str(idx_row),
"expression": row.to_numpy(dtype="float32").tolist(),
"gene_names": gene_names,
}
idx += 1
|