snoroozi commited on
Commit
bc024bd
·
verified ·
1 Parent(s): 23c1ea5

Added hf_load.py for helper functions of loading the dataset.

Browse files
Files changed (1) hide show
  1. hf_load.py +171 -0
hf_load.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Load SurvHTE-Bench from HuggingFace Hub.
3
+
4
+ Two interfaces:
5
+
6
+ 1. load_data(dataset_name) → experiment_setups, experiment_repeat_setups
7
+ (identical output to the original local load_data())
8
+
9
+ 2. load_splits(dataset_name) → nested dict mirroring prepare_data_split output
10
+ results[config_name][scenario_key][rand_idx]["train"|"val"|"test"]
11
+ = (X, W, Y, cate_true)
12
+ """
13
+
14
+ import json
15
+ import numpy as np
16
+ import pandas as pd
17
+ import datasets as hf_datasets
18
+
19
+ REPO_ID = "snoroozi/SurvHTE-Bench"
20
+
21
+ SCHEMA = {
22
+ "synthetic": dict(
23
+ X_cols=None, # resolved dynamically: cols starting with X + digit
24
+ W_col="W",
25
+ y_cols=["observed_time", "event"],
26
+ cate_col=None, # computed as T1 - T0
27
+ idx_col="id",
28
+ ),
29
+ "actg_syn": dict(
30
+ X_cols=["age","wtkg","hemo","homo","drugs","karnof","oprior","z30","zprior","preanti",
31
+ "race","gender","str2","strat","symptom","treat","offtrt",
32
+ "cd40","cd420","cd496","r","cd80","cd820"],
33
+ W_col="W",
34
+ y_cols=["observed_time", "event"],
35
+ cate_col="true_cate",
36
+ idx_col="idx",
37
+ ),
38
+ "twin": dict(
39
+ X_cols=["anemia","cardiac","lung","diabetes","herpes","hydra","hemo","chyper","phyper",
40
+ "eclamp","incervix","pre4000","preterm","renal","rh","uterine","othermr",
41
+ "gestat","dmage","dmeduc","dmar","nprevist","adequacy",
42
+ "dtotord","cigar","drink","wtgain",
43
+ "pldel_2","pldel_3","pldel_4","pldel_5","resstatb_2","resstatb_3","resstatb_4",
44
+ "mpcb_1","mpcb_2","mpcb_3","mpcb_4","mpcb_5","mpcb_6","mpcb_7","mpcb_8","mpcb_9"],
45
+ W_col="W",
46
+ y_cols=["observed_time", "event"],
47
+ cate_col="true_cate",
48
+ idx_col="idx",
49
+ ),
50
+ "actgHC": dict(
51
+ X_cols=["gender","race","hemo","homo","drugs","str2","symptom","age","wtkg","karnof","cd40","cd80"],
52
+ W_col="trt",
53
+ y_cols=None, # 10 versions: t0/e0 .. t9/e9 — caller slices Y[:, r*2:(r+1)*2]
54
+ cate_col="cate_base",
55
+ idx_col="id",
56
+ ),
57
+ "actgLC": dict(
58
+ X_cols=["gender","race","hemo","homo","drugs","str2","symptom","age","wtkg","karnof","cd40","cd80"],
59
+ W_col="trt",
60
+ y_cols=["observed_time_month", "effect_non_censor"],
61
+ cate_col="cate_base",
62
+ idx_col="id",
63
+ ),
64
+ }
65
+
66
+ # Mirrors SPLIT_SIZES in hf_upload.py
67
+ SPLIT_SIZES = {
68
+ "synthetic": (5000, 2500, 2500),
69
+ "actg_syn": (0.50, 0.25, 0.25),
70
+ "twin": (0.50, 0.25, 0.25),
71
+ "actgHC": (0.50, 0.25, 0.25),
72
+ "actgLC": (0.50, 0.25, 0.25),
73
+ }
74
+
75
+
76
+ def load_data(
77
+ dataset_name: str,
78
+ repo_id: str = REPO_ID,
79
+ ) -> tuple[dict, dict]:
80
+ """Identical output to local load_data()."""
81
+
82
+ setups_df = hf_datasets.load_dataset(repo_id, name=dataset_name, split="train").to_pandas()
83
+ meta_cols = {"setup_key", "scenario", "summary_json", "metadata_json"}
84
+ data_cols = [c for c in setups_df.columns if c not in meta_cols]
85
+
86
+ experiment_setups: dict = {}
87
+ for (setup_key, scenario), grp in setups_df.groupby(["setup_key", "scenario"], sort=False):
88
+ info = {
89
+ "dataset": grp[data_cols].reset_index(drop=True),
90
+ "summary": json.loads(grp["summary_json"].iloc[0]),
91
+ }
92
+ if "metadata_json" in grp.columns and grp["metadata_json"].notna().any():
93
+ info["metadata"] = json.loads(grp["metadata_json"].iloc[0])
94
+ experiment_setups.setdefault(setup_key, {})[scenario] = info
95
+
96
+ repeats_df = hf_datasets.load_dataset(repo_id, name=f"{dataset_name}_repeats", split="train").to_pandas()
97
+ idx_cols = [c for c in repeats_df.columns if c != "repeat_key"]
98
+
99
+ if dataset_name in ("actgHC", "actgLC"):
100
+ experiment_repeat_setups: dict = {}
101
+ for repeat_key, grp in repeats_df.groupby("repeat_key", sort=False):
102
+ experiment_repeat_setups[repeat_key] = grp[idx_cols].reset_index(drop=True)
103
+ else:
104
+ experiment_repeat_setups = repeats_df[idx_cols].reset_index(drop=True)
105
+
106
+ return experiment_setups, experiment_repeat_setups
107
+
108
+
109
+ def load_splits(
110
+ dataset_name: str,
111
+ num_repeats: int = 10,
112
+ repo_id: str = REPO_ID,
113
+ ) -> dict:
114
+ """
115
+ Returns nested dict mirroring the experiment loop:
116
+
117
+ results[config_name][scenario_key][rand_idx]["train"|"val"|"test"]
118
+ = (X, W, Y, cate_true)
119
+ """
120
+ schema = SCHEMA[dataset_name]
121
+ W_col = schema["W_col"]
122
+ y_cols = schema["y_cols"]
123
+ cate_col = schema["cate_col"]
124
+
125
+ # Load all 30 splits once
126
+ raw: dict[tuple, pd.DataFrame] = {}
127
+ for split_type in ("train", "val", "test"):
128
+ for r in range(num_repeats):
129
+ raw[(split_type, r)] = hf_datasets.load_dataset(
130
+ repo_id, name=f"{dataset_name}_splits", split=f"{split_type}_{r}"
131
+ ).to_pandas()
132
+
133
+ # Discover (config_name, scenario) pairs from train_0
134
+ pairs = list(
135
+ raw[("train", 0)].groupby(["setup_key", "scenario"], sort=False).groups.keys()
136
+ )
137
+
138
+ # Resolve X columns from train_0
139
+ sample_df = raw[("train", 0)]
140
+ if schema["X_cols"] is None:
141
+ X_cols = [c for c in sample_df.columns if c.startswith("X") and c[1:].isdigit()]
142
+ else:
143
+ X_cols = schema["X_cols"]
144
+
145
+ def _extract(df_grp: pd.DataFrame) -> tuple:
146
+ X = df_grp[X_cols].to_numpy()
147
+ W = df_grp[W_col].to_numpy()
148
+ if y_cols is not None:
149
+ Y = df_grp[y_cols].to_numpy()
150
+ else:
151
+ # actgHC: return all t/e cols, caller slices Y[:, r*2:(r+1)*2]
152
+ te_cols = [col for i in range(10) for col in (f"t{i}", f"e{i}") if col in df_grp.columns]
153
+ Y = df_grp[te_cols].to_numpy()
154
+ if cate_col is not None and cate_col in df_grp.columns:
155
+ cate_true = df_grp[cate_col].to_numpy()
156
+ else:
157
+ cate_true = (df_grp["T1"] - df_grp["T0"]).to_numpy()
158
+ return (X, W, Y, cate_true)
159
+
160
+ results: dict = {}
161
+ for config_name, scenario_key in pairs:
162
+ results.setdefault(config_name, {})[scenario_key] = {r: {} for r in range(num_repeats)}
163
+ for split_type in ("train", "val", "test"):
164
+ for r in range(num_repeats):
165
+ df = raw[(split_type, r)]
166
+ grp = df[
167
+ (df["setup_key"] == config_name) & (df["scenario"] == scenario_key)
168
+ ].reset_index(drop=True)
169
+ results[config_name][scenario_key][r][split_type] = _extract(grp)
170
+
171
+ return results