Datasets:
Tasks:
Tabular Regression
Sub-tasks:
tabular-single-column-regression
Languages:
English
Size:
100K<n<1M
ArXiv:
License:
Upload preprocess/qmof_preprocessor.py
Browse files
data/lmdb/PMT/preprocess/qmof_preprocessor.py
ADDED
|
@@ -0,0 +1,532 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
MOFTransformer Preprocessor — LMDB Edition
|
| 3 |
+
|
| 4 |
+
Prepares raw MOF dataset for training by processing CIF files and packing
|
| 5 |
+
the results into three LMDB files (train / val / test), one file per split.
|
| 6 |
+
|
| 7 |
+
All numeric target columns from id_prop.csv are stored in the LMDB.
|
| 8 |
+
The target variable to predict is chosen at training time, not here.
|
| 9 |
+
|
| 10 |
+
LMDB schema per file:
|
| 11 |
+
b'__metadata__' → pickle dict {target_columns: [...], n_samples: int}
|
| 12 |
+
b'__keys__' → pickle list [cif_id, ...]
|
| 13 |
+
b'__targets__' → pickle dict {cif_id: {col: float_or_nan, ...}}
|
| 14 |
+
b'{cif_id}' → pickle dict {cif_id,
|
| 15 |
+
atom_num, nbr_idx, nbr_dist,
|
| 16 |
+
uni_idx, uni_count,
|
| 17 |
+
grid_header, griddata16}
|
| 18 |
+
|
| 19 |
+
Author: MOFTransformer Team
|
| 20 |
+
Date: 2026-03-16
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
import os
|
| 24 |
+
import sys
|
| 25 |
+
import json
|
| 26 |
+
import shutil
|
| 27 |
+
import pickle
|
| 28 |
+
import argparse
|
| 29 |
+
from pathlib import Path
|
| 30 |
+
from typing import Optional, Tuple, List
|
| 31 |
+
|
| 32 |
+
import pandas as pd
|
| 33 |
+
import numpy as np
|
| 34 |
+
import lmdb
|
| 35 |
+
|
| 36 |
+
# Use the pip-installed moftransformer for preprocessing (has compiled GRIDAY).
|
| 37 |
+
# The local MOFTransformer copy is only used by trainer.py (patched modules).
|
| 38 |
+
from moftransformer.utils.prepare_data import prepare_data
|
| 39 |
+
from moftransformer.utils.install_griday import install_griday
|
| 40 |
+
|
| 41 |
+
# Dummy downstream name used internally for prepare_data splitting.
|
| 42 |
+
_DUMMY_DOWNSTREAM = "lmdb_split"
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
# ---------------------------------------------------------------------------
|
| 46 |
+
# GRIDAY helpers
|
| 47 |
+
# ---------------------------------------------------------------------------
|
| 48 |
+
|
| 49 |
+
def verify_griday_installation() -> None:
|
| 50 |
+
"""Verify that GRIDAY is installed, install if necessary."""
|
| 51 |
+
try:
|
| 52 |
+
from moftransformer import __root_dir__
|
| 53 |
+
griday_path = os.path.join(__root_dir__, "libs/GRIDAY/scripts/grid_gen")
|
| 54 |
+
if not os.path.exists(griday_path):
|
| 55 |
+
print("GRIDAY not found. Installing GRIDAY...")
|
| 56 |
+
install_griday()
|
| 57 |
+
except ImportError as e:
|
| 58 |
+
print(f"Error importing GRIDAY: {e}")
|
| 59 |
+
print("Attempting to install GRIDAY...")
|
| 60 |
+
install_griday()
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
# ---------------------------------------------------------------------------
|
| 64 |
+
# CSV / CIF validation helpers
|
| 65 |
+
# ---------------------------------------------------------------------------
|
| 66 |
+
|
| 67 |
+
def load_all_targets(data_dir: Path) -> Tuple[pd.DataFrame, List[str]]:
|
| 68 |
+
"""
|
| 69 |
+
Load id_prop.csv and return a DataFrame with all numeric target columns.
|
| 70 |
+
|
| 71 |
+
Returns
|
| 72 |
+
-------
|
| 73 |
+
Tuple[pd.DataFrame, List[str]]
|
| 74 |
+
DataFrame with columns [cif_id, col1, col2, ...] and list of target
|
| 75 |
+
column names. Values may be NaN where data is missing.
|
| 76 |
+
"""
|
| 77 |
+
id_prop_path = data_dir / "id_prop.csv"
|
| 78 |
+
if not id_prop_path.exists():
|
| 79 |
+
raise FileNotFoundError(f"id_prop.csv not found at {id_prop_path}")
|
| 80 |
+
|
| 81 |
+
df = pd.read_csv(id_prop_path)
|
| 82 |
+
if df.empty:
|
| 83 |
+
raise ValueError("id_prop.csv is empty")
|
| 84 |
+
if df.shape[1] < 2:
|
| 85 |
+
raise ValueError(
|
| 86 |
+
f"id_prop.csv must have at least 2 columns. Found {df.shape[1]}."
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
cif_id_col = df.columns[0]
|
| 90 |
+
|
| 91 |
+
# Detect numeric columns (excluding the cif_id column)
|
| 92 |
+
numeric_cols = []
|
| 93 |
+
for col in df.columns[1:]:
|
| 94 |
+
converted = pd.to_numeric(df[col], errors="coerce")
|
| 95 |
+
if converted.notna().sum() > 0:
|
| 96 |
+
numeric_cols.append(col)
|
| 97 |
+
df[col] = converted # ensure numeric dtype
|
| 98 |
+
|
| 99 |
+
if not numeric_cols:
|
| 100 |
+
raise ValueError("No numeric target columns found in id_prop.csv")
|
| 101 |
+
|
| 102 |
+
result = df[[cif_id_col] + numeric_cols].copy()
|
| 103 |
+
result = result.rename(columns={cif_id_col: "cif_id"})
|
| 104 |
+
result["cif_id"] = result["cif_id"].astype(str)
|
| 105 |
+
result = result.set_index("cif_id")
|
| 106 |
+
|
| 107 |
+
print(f"Loaded {len(result)} rows from id_prop.csv")
|
| 108 |
+
print(f"Found {len(numeric_cols)} numeric target columns:")
|
| 109 |
+
for col in numeric_cols:
|
| 110 |
+
n_valid = result[col].notna().sum()
|
| 111 |
+
print(f" {col:50s} {n_valid}/{len(result)} non-NaN")
|
| 112 |
+
|
| 113 |
+
return result, numeric_cols
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def verify_cif_files(data_dir: Path, df: pd.DataFrame) -> pd.DataFrame:
|
| 117 |
+
"""Drop rows whose CIF file is missing; return updated DataFrame."""
|
| 118 |
+
raw_dir = data_dir / "raw"
|
| 119 |
+
if not raw_dir.exists():
|
| 120 |
+
raise FileNotFoundError(f"Raw CIF directory not found at {raw_dir}")
|
| 121 |
+
|
| 122 |
+
cif_files = {f.stem for f in raw_dir.glob("*.cif")}
|
| 123 |
+
print(f"\nFound {len(cif_files)} CIF files in raw/")
|
| 124 |
+
|
| 125 |
+
valid_ids = set(df.index) & cif_files
|
| 126 |
+
missing = set(df.index) - cif_files
|
| 127 |
+
if missing:
|
| 128 |
+
print(f"Warning: {len(missing)} entries have no CIF file and will be skipped")
|
| 129 |
+
|
| 130 |
+
filtered = df.loc[list(valid_ids)].copy()
|
| 131 |
+
print(f"Valid samples: {len(filtered)}")
|
| 132 |
+
|
| 133 |
+
if len(filtered) == 0:
|
| 134 |
+
raise ValueError("No valid samples found after CIF verification.")
|
| 135 |
+
return filtered
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def create_dummy_raw_json(df: pd.DataFrame, raw_dir: Path) -> None:
|
| 139 |
+
"""
|
| 140 |
+
Write raw_{DUMMY}.json (cif_id → 0.0) so prepare_data can split the data.
|
| 141 |
+
All CIF IDs present in the DataFrame are included.
|
| 142 |
+
"""
|
| 143 |
+
dummy_data = {cif_id: 0.0 for cif_id in df.index}
|
| 144 |
+
json_path = raw_dir / f"raw_{_DUMMY_DOWNSTREAM}.json"
|
| 145 |
+
with open(json_path, "w") as f:
|
| 146 |
+
json.dump(dummy_data, f, indent=2)
|
| 147 |
+
print(f"Created dummy split JSON: {json_path} ({len(dummy_data)} entries)")
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def create_filtered_id_prop(df: pd.DataFrame, raw_dir: Path) -> None:
|
| 151 |
+
"""Write a minimal id_prop.csv (cif_id, dummy_target) for prepare_data."""
|
| 152 |
+
csv_df = pd.DataFrame({"cif_id": df.index, _DUMMY_DOWNSTREAM: 0.0})
|
| 153 |
+
csv_path = raw_dir / "id_prop.csv"
|
| 154 |
+
csv_df.to_csv(csv_path, index=False)
|
| 155 |
+
print(f"Created {csv_path}")
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
# ---------------------------------------------------------------------------
|
| 159 |
+
# Data preparation (calls MOFTransformer's prepare_data)
|
| 160 |
+
# ---------------------------------------------------------------------------
|
| 161 |
+
|
| 162 |
+
def run_data_preparation(
|
| 163 |
+
raw_dir: Path,
|
| 164 |
+
processed_dir: Path,
|
| 165 |
+
train_fraction: float,
|
| 166 |
+
test_fraction: float,
|
| 167 |
+
seed: int,
|
| 168 |
+
) -> None:
|
| 169 |
+
"""Run MOFTransformer's prepare_data to build graph / grid embeddings."""
|
| 170 |
+
print("\nStarting data preparation with MOFTransformer utilities...")
|
| 171 |
+
print(f" Raw directory : {raw_dir}")
|
| 172 |
+
print(f" Processed dir : {processed_dir}")
|
| 173 |
+
print(f" Downstream : {_DUMMY_DOWNSTREAM}")
|
| 174 |
+
|
| 175 |
+
prepare_data(
|
| 176 |
+
root_cifs=raw_dir,
|
| 177 |
+
root_dataset=processed_dir,
|
| 178 |
+
downstream=_DUMMY_DOWNSTREAM,
|
| 179 |
+
train_fraction=train_fraction,
|
| 180 |
+
test_fraction=test_fraction,
|
| 181 |
+
seed=seed,
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
expected = [
|
| 185 |
+
processed_dir / f"train_{_DUMMY_DOWNSTREAM}.json",
|
| 186 |
+
processed_dir / f"val_{_DUMMY_DOWNSTREAM}.json",
|
| 187 |
+
processed_dir / f"test_{_DUMMY_DOWNSTREAM}.json",
|
| 188 |
+
]
|
| 189 |
+
print("\nVerifying processed split files...")
|
| 190 |
+
for p in expected:
|
| 191 |
+
if p.exists() and p.stat().st_size > 0:
|
| 192 |
+
print(f" OK {p.name}")
|
| 193 |
+
else:
|
| 194 |
+
raise RuntimeError(
|
| 195 |
+
f"Data preparation did not produce expected file: {p}"
|
| 196 |
+
)
|
| 197 |
+
print("Data preparation completed!")
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
# ---------------------------------------------------------------------------
|
| 201 |
+
# LMDB packing
|
| 202 |
+
# ---------------------------------------------------------------------------
|
| 203 |
+
|
| 204 |
+
def _estimate_map_size(split_dir: Path, cif_ids: list) -> int:
|
| 205 |
+
"""Estimate LMDB map_size from file sizes × 4 safety margin (virtual mem)."""
|
| 206 |
+
total = 0
|
| 207 |
+
for cif_id in cif_ids:
|
| 208 |
+
for ext in (".graphdata", ".griddata16", ".grid"):
|
| 209 |
+
f = split_dir / f"{cif_id}{ext}"
|
| 210 |
+
if f.exists():
|
| 211 |
+
total += f.stat().st_size
|
| 212 |
+
return max(int(total * 4), 1 << 30)
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def pack_split_to_lmdb(
|
| 216 |
+
processed_dir: Path,
|
| 217 |
+
split: str,
|
| 218 |
+
targets_df: pd.DataFrame,
|
| 219 |
+
target_columns: List[str],
|
| 220 |
+
output_path: Path,
|
| 221 |
+
) -> None:
|
| 222 |
+
"""
|
| 223 |
+
Pack one split into an LMDB file.
|
| 224 |
+
|
| 225 |
+
Parameters
|
| 226 |
+
----------
|
| 227 |
+
processed_dir : Path
|
| 228 |
+
Directory produced by prepare_data.
|
| 229 |
+
split : str
|
| 230 |
+
'train', 'val', or 'test'.
|
| 231 |
+
targets_df : pd.DataFrame
|
| 232 |
+
Index = cif_id, columns = all numeric target columns (may contain NaN).
|
| 233 |
+
target_columns : List[str]
|
| 234 |
+
Ordered list of target column names to store.
|
| 235 |
+
output_path : Path
|
| 236 |
+
Destination LMDB file path.
|
| 237 |
+
"""
|
| 238 |
+
json_path = processed_dir / f"{split}_{_DUMMY_DOWNSTREAM}.json"
|
| 239 |
+
if not json_path.exists():
|
| 240 |
+
raise FileNotFoundError(f"Split JSON not found: {json_path}")
|
| 241 |
+
|
| 242 |
+
with open(json_path) as f:
|
| 243 |
+
split_cif_ids: list = list(json.load(f).keys())
|
| 244 |
+
|
| 245 |
+
split_dir = processed_dir / split
|
| 246 |
+
if not split_dir.exists():
|
| 247 |
+
raise FileNotFoundError(f"Split directory not found: {split_dir}")
|
| 248 |
+
|
| 249 |
+
n = len(split_cif_ids)
|
| 250 |
+
print(f"\n Packing '{split}' split → {output_path.name}")
|
| 251 |
+
print(f" Samples in split: {n}")
|
| 252 |
+
|
| 253 |
+
# Build targets lookup: {cif_id: {col: float_or_nan}}
|
| 254 |
+
all_targets: dict = {}
|
| 255 |
+
for cif_id in split_cif_ids:
|
| 256 |
+
if cif_id in targets_df.index:
|
| 257 |
+
row = targets_df.loc[cif_id]
|
| 258 |
+
all_targets[cif_id] = {
|
| 259 |
+
col: float(row[col]) if pd.notna(row[col]) else float("nan")
|
| 260 |
+
for col in target_columns
|
| 261 |
+
}
|
| 262 |
+
else:
|
| 263 |
+
all_targets[cif_id] = {col: float("nan") for col in target_columns}
|
| 264 |
+
|
| 265 |
+
# Coverage stats
|
| 266 |
+
for col in target_columns[:5]:
|
| 267 |
+
n_valid = sum(1 for v in all_targets.values() if not np.isnan(v[col]))
|
| 268 |
+
print(f" {col[:50]:50s} {n_valid}/{n} non-NaN")
|
| 269 |
+
if len(target_columns) > 5:
|
| 270 |
+
print(f" ... ({len(target_columns) - 5} more columns)")
|
| 271 |
+
|
| 272 |
+
map_size = _estimate_map_size(split_dir, split_cif_ids)
|
| 273 |
+
print(f" LMDB map_size : {map_size / 1e9:.2f} GB (virtual)")
|
| 274 |
+
|
| 275 |
+
metadata = {
|
| 276 |
+
"target_columns": target_columns,
|
| 277 |
+
"n_samples": n,
|
| 278 |
+
}
|
| 279 |
+
|
| 280 |
+
env = lmdb.open(
|
| 281 |
+
str(output_path),
|
| 282 |
+
map_size=map_size,
|
| 283 |
+
subdir=False,
|
| 284 |
+
readonly=False,
|
| 285 |
+
meminit=False,
|
| 286 |
+
map_async=True,
|
| 287 |
+
)
|
| 288 |
+
|
| 289 |
+
missing_files = []
|
| 290 |
+
written = 0
|
| 291 |
+
|
| 292 |
+
with env.begin(write=True) as txn:
|
| 293 |
+
txn.put(b"__metadata__", pickle.dumps(metadata, protocol=4))
|
| 294 |
+
txn.put(b"__keys__", pickle.dumps(split_cif_ids, protocol=4))
|
| 295 |
+
txn.put(b"__targets__", pickle.dumps(all_targets, protocol=4))
|
| 296 |
+
|
| 297 |
+
for cif_id in split_cif_ids:
|
| 298 |
+
graph_path = split_dir / f"{cif_id}.graphdata"
|
| 299 |
+
grid_path = split_dir / f"{cif_id}.grid"
|
| 300 |
+
griddata_path = split_dir / f"{cif_id}.griddata16"
|
| 301 |
+
|
| 302 |
+
missing = [p for p in (graph_path, grid_path, griddata_path) if not p.exists()]
|
| 303 |
+
if missing:
|
| 304 |
+
missing_files.extend(str(p) for p in missing)
|
| 305 |
+
continue
|
| 306 |
+
|
| 307 |
+
with open(graph_path, "rb") as fh:
|
| 308 |
+
graphdata = pickle.load(fh)
|
| 309 |
+
|
| 310 |
+
grid_header = grid_path.read_text()
|
| 311 |
+
|
| 312 |
+
with open(griddata_path, "rb") as fh:
|
| 313 |
+
griddata16 = pickle.load(fh)
|
| 314 |
+
|
| 315 |
+
sample = {
|
| 316 |
+
"cif_id": cif_id,
|
| 317 |
+
"atom_num": graphdata[1],
|
| 318 |
+
"nbr_idx": graphdata[2],
|
| 319 |
+
"nbr_dist": graphdata[3],
|
| 320 |
+
"uni_idx": graphdata[4],
|
| 321 |
+
"uni_count": graphdata[5],
|
| 322 |
+
"grid_header": grid_header,
|
| 323 |
+
"griddata16": griddata16,
|
| 324 |
+
}
|
| 325 |
+
|
| 326 |
+
txn.put(cif_id.encode(), pickle.dumps(sample, protocol=4))
|
| 327 |
+
written += 1
|
| 328 |
+
|
| 329 |
+
env.sync()
|
| 330 |
+
env.close()
|
| 331 |
+
|
| 332 |
+
if missing_files:
|
| 333 |
+
print(f" WARNING: {len(missing_files)} files missing, samples skipped:")
|
| 334 |
+
for mf in missing_files[:10]:
|
| 335 |
+
print(f" {mf}")
|
| 336 |
+
|
| 337 |
+
lmdb_size_mb = output_path.stat().st_size / 1e6
|
| 338 |
+
print(f" Written {written}/{n} samples ({lmdb_size_mb:.1f} MB on disk)")
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
def pack_all_splits_to_lmdb(
|
| 342 |
+
processed_dir: Path,
|
| 343 |
+
targets_df: pd.DataFrame,
|
| 344 |
+
target_columns: List[str],
|
| 345 |
+
output_prefix: str,
|
| 346 |
+
) -> dict:
|
| 347 |
+
"""Pack train / val / test into separate LMDB files."""
|
| 348 |
+
prefix = Path(output_prefix)
|
| 349 |
+
prefix.parent.mkdir(parents=True, exist_ok=True)
|
| 350 |
+
|
| 351 |
+
paths = {}
|
| 352 |
+
for split in ("train", "val", "test"):
|
| 353 |
+
out = prefix.parent / f"{prefix.name}_{split}.lmdb"
|
| 354 |
+
pack_split_to_lmdb(processed_dir, split, targets_df, target_columns, out)
|
| 355 |
+
paths[split] = out
|
| 356 |
+
|
| 357 |
+
return paths
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
# ---------------------------------------------------------------------------
|
| 361 |
+
# Main preprocessing pipeline
|
| 362 |
+
# ---------------------------------------------------------------------------
|
| 363 |
+
|
| 364 |
+
def preprocess_dataset(
|
| 365 |
+
data_dir: str,
|
| 366 |
+
output_prefix: str,
|
| 367 |
+
train_fraction: float = 0.8,
|
| 368 |
+
test_fraction: float = 0.1,
|
| 369 |
+
seed: int = 42,
|
| 370 |
+
) -> dict:
|
| 371 |
+
"""
|
| 372 |
+
Main preprocessing function.
|
| 373 |
+
|
| 374 |
+
Reads ALL numeric columns from id_prop.csv and stores them in the LMDB.
|
| 375 |
+
The target variable is chosen at training time via --target-column.
|
| 376 |
+
|
| 377 |
+
Produces:
|
| 378 |
+
{output_prefix}_train.lmdb
|
| 379 |
+
{output_prefix}_val.lmdb
|
| 380 |
+
{output_prefix}_test.lmdb
|
| 381 |
+
"""
|
| 382 |
+
print("=" * 60)
|
| 383 |
+
print("MOFTransformer Preprocessor — LMDB Edition")
|
| 384 |
+
print("=" * 60)
|
| 385 |
+
|
| 386 |
+
data_dir = Path(data_dir).resolve()
|
| 387 |
+
output_prefix = str(Path(output_prefix).resolve())
|
| 388 |
+
|
| 389 |
+
print(f"\nData directory : {data_dir}")
|
| 390 |
+
print(f"Output prefix : {output_prefix}")
|
| 391 |
+
print(f"Train / Test : {train_fraction} / {test_fraction} seed={seed}")
|
| 392 |
+
|
| 393 |
+
# Step 1
|
| 394 |
+
print("\nStep 1: Verifying GRIDAY installation...")
|
| 395 |
+
verify_griday_installation()
|
| 396 |
+
|
| 397 |
+
# Step 2
|
| 398 |
+
print("\nStep 2: Loading all numeric targets from id_prop.csv...")
|
| 399 |
+
targets_df, target_columns = load_all_targets(data_dir)
|
| 400 |
+
|
| 401 |
+
# Step 3
|
| 402 |
+
print("\nStep 3: Verifying CIF files...")
|
| 403 |
+
targets_df = verify_cif_files(data_dir, targets_df)
|
| 404 |
+
|
| 405 |
+
# Step 4
|
| 406 |
+
print("\nStep 4: Setting up working directory...")
|
| 407 |
+
work_dir = data_dir / "preprocessed_work"
|
| 408 |
+
raw_dir = work_dir / "raw"
|
| 409 |
+
processed_dir = work_dir / "processed"
|
| 410 |
+
|
| 411 |
+
if work_dir.exists():
|
| 412 |
+
shutil.rmtree(work_dir)
|
| 413 |
+
for d in (work_dir, raw_dir, processed_dir):
|
| 414 |
+
d.mkdir(parents=True, exist_ok=True)
|
| 415 |
+
|
| 416 |
+
source_raw = data_dir / "raw"
|
| 417 |
+
for cif_id in targets_df.index:
|
| 418 |
+
shutil.copy2(source_raw / f"{cif_id}.cif", raw_dir / f"{cif_id}.cif")
|
| 419 |
+
print(f"Copied {len(targets_df)} CIF files")
|
| 420 |
+
|
| 421 |
+
# Step 5
|
| 422 |
+
print("\nStep 5: Creating dummy split JSON for prepare_data...")
|
| 423 |
+
create_dummy_raw_json(targets_df, raw_dir)
|
| 424 |
+
create_filtered_id_prop(targets_df, raw_dir)
|
| 425 |
+
|
| 426 |
+
# Step 6
|
| 427 |
+
print("\nStep 6: Running data preparation (graph + grid embeddings)...")
|
| 428 |
+
run_data_preparation(
|
| 429 |
+
raw_dir=raw_dir,
|
| 430 |
+
processed_dir=processed_dir,
|
| 431 |
+
train_fraction=train_fraction,
|
| 432 |
+
test_fraction=test_fraction,
|
| 433 |
+
seed=seed,
|
| 434 |
+
)
|
| 435 |
+
|
| 436 |
+
# Step 7
|
| 437 |
+
print("\nStep 7: Packing into LMDB files...")
|
| 438 |
+
lmdb_paths = pack_all_splits_to_lmdb(
|
| 439 |
+
processed_dir=processed_dir,
|
| 440 |
+
targets_df=targets_df,
|
| 441 |
+
target_columns=target_columns,
|
| 442 |
+
output_prefix=output_prefix,
|
| 443 |
+
)
|
| 444 |
+
|
| 445 |
+
# Step 8
|
| 446 |
+
print("\nStep 8: Cleaning up working directory...")
|
| 447 |
+
shutil.rmtree(work_dir)
|
| 448 |
+
print("Working directory removed")
|
| 449 |
+
|
| 450 |
+
print("\n" + "=" * 60)
|
| 451 |
+
print("Preprocessing completed!")
|
| 452 |
+
print(f"Stored {len(target_columns)} target columns:")
|
| 453 |
+
for col in target_columns:
|
| 454 |
+
print(f" {col}")
|
| 455 |
+
print("\nOutput LMDB files:")
|
| 456 |
+
for split, path in lmdb_paths.items():
|
| 457 |
+
size_mb = path.stat().st_size / 1e6
|
| 458 |
+
print(f" {split:5s}: {path} ({size_mb:.1f} MB)")
|
| 459 |
+
print("=" * 60)
|
| 460 |
+
|
| 461 |
+
return lmdb_paths
|
| 462 |
+
|
| 463 |
+
|
| 464 |
+
# ---------------------------------------------------------------------------
|
| 465 |
+
# CLI
|
| 466 |
+
# ---------------------------------------------------------------------------
|
| 467 |
+
|
| 468 |
+
def parse_arguments() -> argparse.Namespace:
|
| 469 |
+
parser = argparse.ArgumentParser(
|
| 470 |
+
description=(
|
| 471 |
+
"Preprocess MOF dataset and store ALL numeric targets in LMDB files. "
|
| 472 |
+
"The target variable to predict is chosen at training time."
|
| 473 |
+
),
|
| 474 |
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
| 475 |
+
epilog="""
|
| 476 |
+
Examples:
|
| 477 |
+
python preprocessor.py \\
|
| 478 |
+
--data-dir ./qmof_cif/ \\
|
| 479 |
+
--output-prefix ./output/qmof_pmt_lmdb \\
|
| 480 |
+
--train-fraction 0.8 --test-fraction 0.1
|
| 481 |
+
|
| 482 |
+
# Produces:
|
| 483 |
+
# ./output/qmof_pmt_lmdb_train.lmdb
|
| 484 |
+
# ./output/qmof_pmt_lmdb_val.lmdb
|
| 485 |
+
# ./output/qmof_pmt_lmdb_test.lmdb
|
| 486 |
+
#
|
| 487 |
+
# Each LMDB stores ALL numeric columns from id_prop.csv.
|
| 488 |
+
# Choose which target to train on via --target-column in trainer.py.
|
| 489 |
+
""",
|
| 490 |
+
)
|
| 491 |
+
parser.add_argument(
|
| 492 |
+
"--data-dir", type=str, required=True,
|
| 493 |
+
help="Dataset directory with id_prop.csv and raw/ folder",
|
| 494 |
+
)
|
| 495 |
+
parser.add_argument(
|
| 496 |
+
"--output-prefix", type=str, required=True,
|
| 497 |
+
help="Base path prefix for output LMDB files (no extension)",
|
| 498 |
+
)
|
| 499 |
+
parser.add_argument(
|
| 500 |
+
"--train-fraction", type=float, default=0.8,
|
| 501 |
+
help="Fraction for training (default: 0.8)",
|
| 502 |
+
)
|
| 503 |
+
parser.add_argument(
|
| 504 |
+
"--test-fraction", type=float, default=0.1,
|
| 505 |
+
help="Fraction for testing (default: 0.1)",
|
| 506 |
+
)
|
| 507 |
+
parser.add_argument(
|
| 508 |
+
"--seed", type=int, default=42,
|
| 509 |
+
help="Random seed (default: 42)",
|
| 510 |
+
)
|
| 511 |
+
return parser.parse_args()
|
| 512 |
+
|
| 513 |
+
|
| 514 |
+
def main():
|
| 515 |
+
args = parse_arguments()
|
| 516 |
+
try:
|
| 517 |
+
preprocess_dataset(
|
| 518 |
+
data_dir=args.data_dir,
|
| 519 |
+
output_prefix=args.output_prefix,
|
| 520 |
+
train_fraction=args.train_fraction,
|
| 521 |
+
test_fraction=args.test_fraction,
|
| 522 |
+
seed=args.seed,
|
| 523 |
+
)
|
| 524 |
+
except Exception as e:
|
| 525 |
+
print(f"\nERROR: Preprocessing failed: {e}")
|
| 526 |
+
import traceback
|
| 527 |
+
traceback.print_exc()
|
| 528 |
+
sys.exit(1)
|
| 529 |
+
|
| 530 |
+
|
| 531 |
+
if __name__ == "__main__":
|
| 532 |
+
main()
|