qmof_project / data /util /preprocess_hmof.py
hermanhugging's picture
Upload 4 files
2044f69 verified
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import csv
import errno
import json
import os
import shutil
from pathlib import Path
from typing import Any, Dict, Iterable, List
DEFAULT_TARGET_COLUMNS = [
"lcd",
"pld",
"void_fraction",
"surface_area_m2g",
"surface_area_m2cm3",
]
def parse_target_columns(raw: str) -> List[str]:
cols = [c.strip() for c in raw.split(",") if c.strip()]
if not cols:
raise ValueError("Список target-колонок пуст.")
return cols
def get_nested_value(data: Any, path: str) -> Any:
current = data
for part in path.split("."):
if isinstance(current, dict):
if part not in current:
return None
current = current[part]
continue
if isinstance(current, list) and part.isdigit():
idx = int(part)
if idx < 0 or idx >= len(current):
return None
current = current[idx]
continue
return None
return current
def normalize_scalar(value: Any) -> Any:
if value is None:
return ""
if isinstance(value, (str, int, float, bool)):
return value
return json.dumps(value, ensure_ascii=False)
def try_parse_float(value: Any) -> float | None:
if isinstance(value, bool):
return None
if isinstance(value, (int, float)):
return float(value)
if isinstance(value, str):
raw = value.strip()
if not raw:
return None
try:
return float(raw)
except ValueError:
return None
return None
def all_targets_are_zero(row: Dict[str, Any], target_columns: List[str]) -> bool:
for col in target_columns:
num = try_parse_float(row.get(col))
if num is None or num != 0.0:
return False
return True
def safe_link_or_copy(src: Path, dst: Path) -> bool:
try:
os.link(src, dst)
return False
except OSError as exc:
if exc.errno not in {errno.EXDEV, errno.EPERM, errno.EOPNOTSUPP, errno.EACCES}:
raise
shutil.copy2(src, dst)
return True
def place_cif_file(src: Path, dst: Path, file_op: str) -> bool:
"""
Returns True if fallback to copy happened (only for hardlink mode), else False.
"""
if file_op == "copy":
shutil.copy2(src, dst)
return False
if file_op == "move":
shutil.move(str(src), str(dst))
return False
if file_op == "symlink":
os.symlink(src.resolve(), dst)
return False
if file_op == "hardlink":
return safe_link_or_copy(src, dst)
raise ValueError(f"Неизвестный file-op: {file_op}")
def iter_cif_files(hmof_dir: Path) -> Iterable[Path]:
return sorted(p for p in hmof_dir.iterdir() if p.is_file() and p.suffix.lower() == ".cif")
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description=(
"Создать структуру датасета для hMOF в стиле CoreMof/ASR: "
"<out-dir>/raw/*.cif + <out-dir>/id_prop.csv"
)
)
parser.add_argument(
"--hmof-dir",
type=Path,
default=Path("hMOF"),
help="Папка с исходными файлами *.cif и *.json (по умолчанию: hMOF)",
)
parser.add_argument(
"--out-dir",
type=Path,
default=None,
help="Куда собрать датасет (по умолчанию: <hmof-dir>/ASR)",
)
parser.add_argument(
"--target-columns",
default=",".join(DEFAULT_TARGET_COLUMNS),
help=(
"Target-колонки из JSON (через запятую). "
"Поддерживаются и вложенные пути, например: adsorbent.id"
),
)
parser.add_argument(
"--file-op",
choices=["hardlink", "copy", "move", "symlink"],
default="hardlink",
help=(
"Как поместить CIF в raw: hardlink/copy/move/symlink "
"(по умолчанию: hardlink)"
),
)
parser.add_argument(
"--overwrite-files",
action="store_true",
help="Перезаписывать уже существующие CIF в raw",
)
parser.add_argument(
"--max-files",
type=int,
default=None,
help="Ограничить количество обрабатываемых CIF (для теста)",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Проверка без записи файлов",
)
parser.add_argument(
"--progress-every",
type=int,
default=5000,
help="Как часто печатать прогресс (по умолчанию: 5000)",
)
return parser.parse_args()
def main() -> None:
args = parse_args()
hmof_dir = args.hmof_dir
out_dir = args.out_dir or (hmof_dir / "ASR")
raw_dir = out_dir / "raw"
id_prop_path = out_dir / "id_prop.csv"
target_columns = parse_target_columns(args.target_columns)
if not hmof_dir.is_dir():
raise FileNotFoundError(f"Папка не найдена: {hmof_dir}")
cif_files = list(iter_cif_files(hmof_dir))
if args.max_files is not None:
cif_files = cif_files[: args.max_files]
total = len(cif_files)
if total == 0:
raise RuntimeError(f"В {hmof_dir} не найдено ни одного .cif файла.")
if not args.dry_run:
raw_dir.mkdir(parents=True, exist_ok=True)
out_dir.mkdir(parents=True, exist_ok=True)
stats: Dict[str, int] = {
"total_cif": total,
"written_rows": 0,
"missing_json": 0,
"bad_json": 0,
"skipped_all_zero_targets": 0,
"removed_all_zero_from_raw": 0,
"placed_files": 0,
"already_present": 0,
"hardlink_fallback_copy": 0,
}
missing_targets: Dict[str, int] = {col: 0 for col in target_columns}
if args.dry_run:
sink = open(os.devnull, "w", newline="", encoding="utf-8")
else:
sink = id_prop_path.open("w", newline="", encoding="utf-8")
try:
writer = csv.DictWriter(sink, fieldnames=["mof_id", *target_columns])
writer.writeheader()
for idx, cif_path in enumerate(cif_files, start=1):
mof_id = cif_path.stem
json_path = hmof_dir / f"{mof_id}.json"
if not json_path.is_file():
stats["missing_json"] += 1
continue
try:
with json_path.open("r", encoding="utf-8") as f:
payload = json.load(f)
except json.JSONDecodeError:
stats["bad_json"] += 1
continue
row: Dict[str, Any] = {"mof_id": mof_id}
for col in target_columns:
value = get_nested_value(payload, col)
if value is None:
missing_targets[col] += 1
row[col] = normalize_scalar(value)
raw_cif_path = raw_dir / cif_path.name
# Пропускаем структуры, где все target-значения равны 0.0.
if all_targets_are_zero(row, target_columns):
if not args.dry_run and raw_cif_path.exists():
raw_cif_path.unlink()
stats["removed_all_zero_from_raw"] += 1
stats["skipped_all_zero_targets"] += 1
continue
if not args.dry_run:
if raw_cif_path.exists():
if args.overwrite_files:
raw_cif_path.unlink()
fallback_copy = place_cif_file(cif_path, raw_cif_path, args.file_op)
if fallback_copy:
stats["hardlink_fallback_copy"] += 1
stats["placed_files"] += 1
else:
stats["already_present"] += 1
else:
fallback_copy = place_cif_file(cif_path, raw_cif_path, args.file_op)
if fallback_copy:
stats["hardlink_fallback_copy"] += 1
stats["placed_files"] += 1
writer.writerow(row)
stats["written_rows"] += 1
if args.progress_every > 0 and idx % args.progress_every == 0:
print(
f"[{idx}/{total}] rows={stats['written_rows']} "
f"missing_json={stats['missing_json']} bad_json={stats['bad_json']}"
)
finally:
sink.close()
print("=== hMOF preprocess done ===")
print(f"hmof dir: {hmof_dir}")
print(f"out dir: {out_dir}")
print(f"raw dir: {raw_dir}")
print(f"id_prop: {id_prop_path}")
print(f"dry run: {args.dry_run}")
print(f"file op: {args.file_op}")
print(f"total cif: {stats['total_cif']}")
print(f"id_prop rows written: {stats['written_rows']}")
print(f"missing json: {stats['missing_json']}")
print(f"bad json: {stats['bad_json']}")
print(f"skipped all-zero targets: {stats['skipped_all_zero_targets']}")
print(f"removed all-zero from raw: {stats['removed_all_zero_from_raw']}")
print(f"placed cif files: {stats['placed_files']}")
print(f"already present in raw: {stats['already_present']}")
print(f"hardlink fallback copy: {stats['hardlink_fallback_copy']}")
print(f"target columns: {', '.join(target_columns)}")
for col in target_columns:
print(f"missing target '{col}': {missing_targets[col]}")
if __name__ == "__main__":
main()