File size: 5,781 Bytes
9094afb | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 | """
DrivAerML Preprocessing Script
================================
Downloads boundary_i.vtp files from neashton/drivaerml, samples 100k cell
centres with CFD fields, applies local coordinate normalisation, and saves
as boundary_i.pt in a staging directory for upload to HuggingFace.
Usage:
pip install pyvista huggingface_hub torch numpy requests
export HF_TOKEN=your_token_here
python preprocess_drivaerml.py [--start 1] [--end 500]
"""
import os, sys, argparse, tempfile, shutil, requests
import numpy as np
import torch
SOURCE_REPO = "neashton/drivaerml"
TARGET_REPO = "JrHoss/DrivaerML-PCTR"
N_POINTS = 100_000
SEED = 42
STAGING_DIR = "./staging"
MISSING_RUNS = {
167, 211, 218, 221, 248, 282, 291, 295,
316, 325, 329, 364, 370, 376, 403, 473
}
TARGET_FIELDS = [
"CpMeanTrim",
"pMeanTrim",
"pPrime2MeanTrim",
"wallShearStressMeanTrim",
]
# ── Core processing ────────────────────────────────────────────────────────────
def download_vtp(run_id, dest, token):
url = f"https://huggingface.co/datasets/{SOURCE_REPO}/resolve/main/run_{run_id}/boundary_{run_id}.vtp"
with requests.get(url, headers={"Authorization": f"Bearer {token}"}, stream=True, timeout=300) as r:
if r.status_code == 404:
return False
r.raise_for_status()
with open(dest, "wb") as f:
for chunk in r.iter_content(chunk_size=8192 * 1024):
f.write(chunk)
return True
def process_vtp(vtp_path, run_id):
"""Extract cell centres and target fields from a VTP file."""
import pyvista as pv
mesh = pv.read(vtp_path)
coords = np.array(mesh.cell_centers().points, dtype=np.float32) # [M, 3]
targets = np.zeros((len(coords), 4), dtype=np.float32)
for i, field in enumerate(TARGET_FIELDS):
data = np.array(mesh.cell_data[field], dtype=np.float32)
if data.ndim == 2: # wallShearStress is a vector
data = np.linalg.norm(data, axis=1)
targets[:, i] = data
return coords, targets
def sample_and_normalise(coords, targets, run_id):
"""Random sample 100k points and apply per-axis z-score normalisation."""
rng = np.random.default_rng(run_id + SEED)
idx = rng.choice(len(coords), size=N_POINTS, replace=len(coords) < N_POINTS)
coords = coords[idx]
targets = targets[idx]
mean = coords.mean(axis=0)
std = np.where((s := coords.std(axis=0)) < 1e-8, 1.0, s)
return (coords - mean) / std, targets, idx, mean, std
def make_pt(coords_norm, targets, idx, mean, std, run_id):
return {
"coords": torch.tensor(coords_norm, dtype=torch.float32), # [100000, 3]
"targets": torch.tensor(targets, dtype=torch.float32), # [100000, 4]
"sample_idx": torch.tensor(idx, dtype=torch.int64),
"coords_mean": torch.tensor(mean, dtype=torch.float32), # [3]
"coords_std": torch.tensor(std, dtype=torch.float32), # [3]
"run_id": run_id,
}
# ── Per-run pipeline ───────────────────────────────────────────────────────────
def process_run(run_id, token, tmp_dir):
pt_path = os.path.join(STAGING_DIR, f"run_{run_id}", f"boundary_{run_id}.pt")
if os.path.exists(pt_path):
print(f"[{run_id:03d}] already staged — skip")
return True
vtp_path = os.path.join(tmp_dir, f"boundary_{run_id}.vtp")
try:
print(f"[{run_id:03d}] downloading ...", end=" ", flush=True)
if not download_vtp(run_id, vtp_path, token):
print("NOT FOUND")
return False
print(f"done ({os.path.getsize(vtp_path)/1024**2:.0f} MB)")
coords, targets = process_vtp(vtp_path, run_id)
coords_n, targets, idx, mu, sigma = sample_and_normalise(coords, targets, run_id)
os.makedirs(os.path.dirname(pt_path), exist_ok=True)
torch.save(make_pt(coords_n, targets, idx, mu, sigma, run_id), pt_path)
print(f"[{run_id:03d}] saved ({os.path.getsize(pt_path)/1024**2:.1f} MB)")
return True
except Exception as e:
print(f"[{run_id:03d}] ERROR: {e}")
return False
finally:
if os.path.exists(vtp_path):
os.remove(vtp_path)
# ── Main ───────────────────────────────────────────────────────────────────────
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--start", type=int, default=1)
parser.add_argument("--end", type=int, default=500)
args = parser.parse_args()
token = os.environ.get("HF_TOKEN")
if not token:
sys.exit("Set HF_TOKEN environment variable first.")
os.makedirs(STAGING_DIR, exist_ok=True)
tmp_dir = tempfile.mkdtemp(prefix="drivaerml_")
processed, skipped, failed = [], [], []
try:
for run_id in range(args.start, args.end + 1):
if run_id in MISSING_RUNS:
skipped.append(run_id)
continue
(processed if process_run(run_id, token, tmp_dir) else failed).append(run_id)
except KeyboardInterrupt:
print("\nInterrupted.")
finally:
shutil.rmtree(tmp_dir, ignore_errors=True)
print(f"\nDone — processed: {len(processed)} skipped: {len(skipped)} failed: {len(failed)}")
if failed:
print(f"Failed IDs: {failed}")
if __name__ == "__main__":
main() |