snoroozi commited on
Commit
23c1ea5
·
verified ·
1 Parent(s): 9b1cd05

Delete survhte_bench.py

Browse files
Files changed (1) hide show
  1. survhte_bench.py +0 -102
survhte_bench.py DELETED
@@ -1,102 +0,0 @@
1
- """
2
- HuggingFace dataset loading script for SurvHTE-Bench.
3
-
4
- Usage
5
- -----
6
- # Full data (default) — reconstruct experiment_setups / experiment_repeat_setups
7
- from datasets import load_dataset
8
- ds = load_dataset("snoroozi/SurvHTE-Bench", "synthetic")
9
-
10
- # Pre-computed splits — get a specific train/val/test fold
11
- ds = load_dataset("snoroozi/SurvHTE-Bench", "synthetic_splits")
12
- train0 = ds["train_0"].to_pandas()
13
-
14
- # Or use the helper wrappers (hf_load.py):
15
- from hf_load import load_data, load_splits
16
- experiment_setups, experiment_repeat_setups = load_data("synthetic")
17
- results = load_splits("synthetic")
18
- """
19
-
20
- import datasets
21
-
22
- _DESCRIPTION = """\
23
- SurvHTE-Bench: A Benchmark for Heterogeneous Treatment Effect Estimation in Survival Analysis.
24
- Includes synthetic, semi-synthetic, and real-world datasets with pre-computed train/val/test splits.
25
- Published at ICLR 2026.
26
- """
27
-
28
- _HOMEPAGE = "https://huggingface.co/datasets/snoroozi/SurvHTE-Benchmark"
29
- _LICENSE = "MIT"
30
- _REPO_URL = "https://huggingface.co/datasets/snoroozi/SurvHTE-Benchmark/resolve/main"
31
-
32
- # ── dataset names ──────────────────────────────────────────────────────────
33
- _DATASETS = {
34
- "synthetic": "synthetic",
35
- "actg_syn": "semi-synthetic",
36
- "twin": "real",
37
- "actgHC": "real",
38
- "actgLC": "real",
39
- }
40
-
41
- _ALL_CONFIGS = list(_DATASETS.keys()) + [f"{n}_repeats" for n in _DATASETS] + \
42
- [f"{n}_splits" for n in _DATASETS]
43
-
44
-
45
- class SurvHTEBenchConfig(datasets.BuilderConfig):
46
- def __init__(self, name, **kwargs):
47
- super().__init__(name=name, version=datasets.Version("1.0.0"), **kwargs)
48
-
49
-
50
- class SurvHTEBench(datasets.GeneratorBasedBuilder):
51
- """SurvHTE-Bench dataset loader."""
52
-
53
- BUILDER_CONFIGS = [SurvHTEBenchConfig(name=n) for n in _ALL_CONFIGS]
54
- DEFAULT_CONFIG_NAME = "synthetic"
55
-
56
- def _info(self):
57
- # Features are discovered at runtime from parquet; return minimal info here.
58
- return datasets.DatasetInfo(
59
- description=_DESCRIPTION,
60
- homepage=_HOMEPAGE,
61
- license=_LICENSE,
62
- )
63
-
64
- def _split_generators(self, dl_manager):
65
- name = self.config.name
66
-
67
- # ── repeats config ─────────────────────────────────────────────────
68
- if name.endswith("_repeats"):
69
- parquet_url = f"{_REPO_URL}/{name}/train/0000.parquet"
70
- path = dl_manager.download(parquet_url)
71
- return [datasets.SplitGenerator(name=datasets.Split.TRAIN,
72
- gen_kwargs={"path": path})]
73
-
74
- # ── splits config ─────────────────────────────────────────────────
75
- if name.endswith("_splits"):
76
- base = name[: -len("_splits")]
77
- splits = []
78
- for split_type in ("train", "val", "test"):
79
- for r in range(10):
80
- split_name = f"{split_type}_{r}"
81
- url = f"{_REPO_URL}/{base}_splits/{split_name}/0000.parquet"
82
- path = dl_manager.download(url)
83
- splits.append(
84
- datasets.SplitGenerator(
85
- name=split_name,
86
- gen_kwargs={"path": path},
87
- )
88
- )
89
- return splits
90
-
91
- # ── full data config ───────────────────────────────────────────────
92
- parquet_url = f"{_REPO_URL}/{name}/train/0000.parquet"
93
- path = dl_manager.download(parquet_url)
94
- return [datasets.SplitGenerator(name=datasets.Split.TRAIN,
95
- gen_kwargs={"path": path})]
96
-
97
- def _generate_examples(self, path):
98
- import pandas as pd
99
- df = pd.read_parquet(path)
100
- for idx, row in df.iterrows():
101
- yield idx, row.to_dict()
102
-