Datasets:

Modalities:
Tabular
Text
Size:
< 1K
DOI:
License:
shakxy42 commited on
Commit
f3a51e0
·
verified ·
1 Parent(s): 408f3fb

Update BubbleML_2.py

Browse files
Files changed (1) hide show
  1. BubbleML_2.py +57 -50
BubbleML_2.py CHANGED
@@ -19,9 +19,8 @@ from datasets import (
19
 
20
  _CITATION = "" # optional
21
  _DESCRIPTION = """
22
- BubbleML: high-fidelity boiling simulations for 3 Liquids- (FC72 & R515B & LN2)
23
- and Flow Boiling Regimes.
24
- Pre-defined train/test splits across all benchmarks.
25
  """
26
 
27
  class BubbleMLConfig(BuilderConfig):
@@ -48,7 +47,7 @@ class BubbleMLDataset(GeneratorBasedBuilder):
48
  BubbleMLConfig(
49
  name="single-bubble",
50
  description="Single-bubble (FC72 & R515B) train/test split",
51
- data_dir="", # repo root when loading remotely; overridden by --data_dir in CLI
52
  data_files={
53
  "train": [
54
  # FC72 train
@@ -85,7 +84,6 @@ class BubbleMLDataset(GeneratorBasedBuilder):
85
  ]
86
 
87
  def _info(self) -> DatasetInfo:
88
- # Nested Sequence to represent 4D arrays (time, channel, H, W) of floats
89
  features = Features({
90
  "input": Sequence(Sequence(Sequence(Sequence(Value("float32"))))),
91
  "output": Sequence(Sequence(Sequence(Sequence(Value("float32"))))),
@@ -101,60 +99,69 @@ class BubbleMLDataset(GeneratorBasedBuilder):
101
  )
102
 
103
  def _split_generators(self, dl_manager: datasets.DownloadManager):
104
- base_dir = (
105
- self.config.data_dir
106
- if self.config.data_dir
107
- else os.path.dirname(os.path.abspath(__file__))
108
- )
109
 
110
- def resolve(split_name: str):
111
- out = []
112
- for rel in self.config.data_files[split_name]:
113
- full = os.path.join(base_dir, rel)
114
- if not os.path.isfile(full):
115
- raise FileNotFoundError(f"Expected data file at {full}, but not found.")
116
- out.append(full)
117
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
 
119
  return [
120
- datasets.SplitGenerator(
121
- name=Split.TRAIN, gen_kwargs={"files": resolve("train")}
122
- ),
123
- datasets.SplitGenerator(
124
- name=Split.TEST, gen_kwargs={"files": resolve("test")}
125
- ),
126
  ]
127
 
128
- def _generate_examples(self, files):
129
- """Yield examples as dicts with input, output, fluid_params, filename."""
130
- for idx, path in enumerate(files):
131
- # Load HDF5 arrays
132
- with h5py.File(path, "r") as h5f:
133
  inp = np.stack([h5f[k][:5] for k in ["dfun", "temperature", "velx", "vely"]])
134
  out = np.stack([h5f[k][5:10] for k in ["dfun", "temperature", "velx", "vely"]])
135
-
136
- # Load metadata JSON
137
- meta_path = path.replace(".hdf5", ".json")
138
- if os.path.isfile(meta_path):
139
- with open(meta_path, "r") as jf:
140
- p = json.load(jf)
141
- fluid_params = [
142
- p["inv_reynolds"],
143
- p["cpgas"],
144
- p["mugas"],
145
- p["rhogas"],
146
- p["thcogas"],
147
- p.get("stefan", 0.0),
148
- p["prandtl"],
149
- p["heater"]["nucWaitTime"],
150
- p["heater"].get("wallTemp", 0.0),
151
- ]
152
- else:
153
- fluid_params = []
154
-
155
  yield idx, {
156
  "input": inp.astype(np.float32).tolist(),
157
  "output": out.astype(np.float32).tolist(),
158
  "fluid_params": fluid_params,
159
- "filename": os.path.basename(path),
160
  }
 
19
 
20
  _CITATION = "" # optional
21
  _DESCRIPTION = """
22
+ BubbleML: high-fidelity single-bubble boiling simulations (FC72 & R515B).
23
+ Pre-defined train/test splits across two directories.
 
24
  """
25
 
26
  class BubbleMLConfig(BuilderConfig):
 
47
  BubbleMLConfig(
48
  name="single-bubble",
49
  description="Single-bubble (FC72 & R515B) train/test split",
50
+ data_dir="", # use repo root locally; if empty, remote download is used
51
  data_files={
52
  "train": [
53
  # FC72 train
 
84
  ]
85
 
86
  def _info(self) -> DatasetInfo:
 
87
  features = Features({
88
  "input": Sequence(Sequence(Sequence(Sequence(Value("float32"))))),
89
  "output": Sequence(Sequence(Sequence(Sequence(Value("float32"))))),
 
99
  )
100
 
101
  def _split_generators(self, dl_manager: datasets.DownloadManager):
102
+ cfg = self.config
 
 
 
 
103
 
104
+ if cfg.data_dir:
105
+ # Local testing: use the provided data_dir
106
+ base_dir = cfg.data_dir
107
+ def resolve_local(split):
108
+ paths = []
109
+ for rel in cfg.data_files[split]:
110
+ full = os.path.join(base_dir, rel)
111
+ if not os.path.isfile(full):
112
+ raise FileNotFoundError(f"Expected {full}, but it was not found.")
113
+ paths.append((full, full.replace(".hdf5", ".json")))
114
+ return paths
115
+ train_pairs = resolve_local("train")
116
+ test_pairs = resolve_local("test")
117
+ else:
118
+ # Remote download: fetch each .hdf5 and .json from the Hub
119
+ base_url = "https://huggingface.co/datasets/hpcforge/BubbleML_2/resolve/main/"
120
+ # build map of rel -> url
121
+ url_map = {}
122
+ for split in ["train", "test"]:
123
+ for rel in cfg.data_files[split]:
124
+ url_map[rel] = base_url + rel
125
+ meta = rel.replace(".hdf5", ".json")
126
+ url_map[meta] = base_url + meta
127
+ # download all
128
+ downloaded = dl_manager.download(url_map)
129
+ # resolve into pairs
130
+ train_pairs = []
131
+ test_pairs = []
132
+ for split in ["train", "test"]:
133
+ for rel in cfg.data_files[split]:
134
+ h5_path = downloaded[rel]
135
+ json_path = downloaded[rel.replace(".hdf5", ".json")]
136
+ train_pairs.append((h5_path, json_path)) if split == "train" else test_pairs.append((h5_path, json_path))
137
 
138
  return [
139
+ datasets.SplitGenerator(name=Split.TRAIN, gen_kwargs={"file_pairs": train_pairs}),
140
+ datasets.SplitGenerator(name=Split.TEST, gen_kwargs={"file_pairs": test_pairs}),
 
 
 
 
141
  ]
142
 
143
+ def _generate_examples(self, file_pairs):
144
+ """Yield examples from each (hdf5,json) pair."""
145
+ for idx, (h5_path, json_path) in enumerate(file_pairs):
146
+ with h5py.File(h5_path, "r") as h5f:
 
147
  inp = np.stack([h5f[k][:5] for k in ["dfun", "temperature", "velx", "vely"]])
148
  out = np.stack([h5f[k][5:10] for k in ["dfun", "temperature", "velx", "vely"]])
149
+ with open(json_path, "r") as jf:
150
+ p = json.load(jf)
151
+ fluid_params = [
152
+ p["inv_reynolds"],
153
+ p["cpgas"],
154
+ p["mugas"],
155
+ p["rhogas"],
156
+ p["thcogas"],
157
+ p["stefan"],
158
+ p["prandtl"],
159
+ p["heater"]["nucWaitTime"],
160
+ p["heater"]["wallTemp"],
161
+ ]
 
 
 
 
 
 
 
162
  yield idx, {
163
  "input": inp.astype(np.float32).tolist(),
164
  "output": out.astype(np.float32).tolist(),
165
  "fluid_params": fluid_params,
166
+ "filename": os.path.basename(h5_path),
167
  }