Datasets:

ArXiv:
License:
PubChemQCR / Code /data.py
ZacharyyyK's picture
Upload 5 files
abd5619 verified
raw
history blame
22.7 kB
import os
import random
from concurrent.futures import ProcessPoolExecutor
from functools import partial
from pathlib import Path
from typing import Optional
import torch.nn.functional as F
import lmdb
import gzip
import pickle
import json
from itertools import product
import numpy as np
from tqdm import tqdm
import torch
from torch_geometric.data import Data, Dataset
from torch_geometric.loader import DataLoader
import periodictable
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import SubsetRandomSampler, random_split, Subset
import bisect
HARTREE_2_EV = 27.2114
BOHR_2_ANGSTROM = 1.8897
_MEAN_ENERGY = -4.269320623583757
_STD_ENERGY = 1.0
_STD_FORCE_SCALE = 1.0
atomic_number_mapping = {}
for element in periodictable.elements:
atomic_number_mapping[element.symbol] = element.number
atomic_number_mapping[element.symbol.upper()] = element.number
atomic_number_mapping[element.symbol.lower()] = element.number
atom_energy = {
1: -0.5002727762,
4: -14.6684425428,
5: -24.6543539532,
6: -37.8462799513,
7: -54.5844893657,
8: -75.0606214015,
9: -99.7155354215,
14: -289.3723539998,
15: -341.2580898032,
16: -398.1049925382,
17: -460.1362417086,
21: -760.5813501324,
22: -849.3013849537,
23: -943.8255794204,
24: -1044.2810289455,
25: -1150.8680174849,
26: -1263.5207828239406,
27: -1382.5485719267936,
28: -1508.0542451335,
29: -1640.1731641564784,
31: -1924.5926070018,
32: -2076.6914561594,
33: -2235.5683127287,
34: -2401.2347730327,
35: -2573.8397377628
}
def find_last_index_with_key(objects, key):
last_index = -1
for i in range(len(objects) - 1, -1, -1):
if key in objects[i] and objects[i][key] is not None:
last_index = i
break
return last_index
def data_to_pyg(data, key, stage='1st', filter=False):
def process_data(phase):
nonlocal data
nonlocal key
nonlocal stage
datas = []
if phase is None or len(phase) == 0:
return datas
if stage == 'mixing':
if len(data['DFT_2nd']) != 0:
last_index = find_last_index_with_key(data['DFT_2nd'], 'energy')
if last_index == -1:
if data['DFT_1st'] is None or len(data['DFT_1st']) == 0:
return datas
last_index = find_last_index_with_key(data['DFT_1st'], 'energy')
last_data = data['DFT_1st'][last_index]
else:
last_data = data['DFT_2nd'][last_index]
else:
if data['DFT_1st'] is None or len(data['DFT_1st']) == 0:
return datas
last_index = find_last_index_with_key(data['DFT_1st'], 'energy')
last_data = data['DFT_1st'][last_index]
elif stage == '1st':
last_index = find_last_index_with_key(data['DFT_1st'], 'energy')
if last_index == -1:
return datas
last_data = phase[last_index]
elif stage == '1st_smash':
last_index = find_last_index_with_key(data['DFT_1st'], 'energy')
if last_index == -1:
return datas
last_data = phase[last_index]
elif stage == '2nd':
last_index = find_last_index_with_key(data['DFT_2nd'], 'energy')
if last_index == -1:
return datas
last_data = phase[last_index]
elif stage == 'hf':
last_index = find_last_index_with_key(data['hf'], 'energy')
if last_index == -1:
return datas
last_data = phase[last_index]
elif stage == 'pm3':
last_index = find_last_index_with_key(data['pm3'], 'energy')
if last_index == -1:
return datas
last_data = phase[last_index]
else:
raise Exception('Unknown stage')
last_coordinates = last_data['coordinates']
last_energy = last_data['energy']
if stage == '1st_smash':
if 'charge' in last_coordinates[0]:
return datas
for d in phase:
coords = d['coordinates']
energy = d['energy']
gradient = d['gradient']
formation_energies = []
atomic_numbers = []
positions = []
last_positions = []
forces = []
if coords is None or len(
coords) == 0:
continue
if stage == '1st_smash':
if 'charge' in coords[0]:
continue
if energy is None:
continue
if len(coords) != len(last_coordinates):
continue
if len(gradient) != len(coords):
continue
for i, atom_info in enumerate(coords):
atom = atom_info['atom']
atomic_number = atomic_number_mapping[atom]
x = atom_info['x']
y = atom_info['y']
z = atom_info['z']
atomic_numbers.append(atomic_number)
formation_energies.append(atom_energy[atomic_number])
positions.append([x, y, z])
last_positions.append([last_coordinates[i]['x'], last_coordinates[i]['y'],
last_coordinates[i]['z']])
forces.append([-gradient[i]['dx'] * HARTREE_2_EV * BOHR_2_ANGSTROM, -gradient[i]['dy'] * HARTREE_2_EV * BOHR_2_ANGSTROM,
-gradient[i]['dz'] * HARTREE_2_EV * BOHR_2_ANGSTROM])
x = torch.tensor(atomic_numbers, dtype=torch.long).view(-1,
1)
pos = torch.tensor(positions, dtype=torch.float)
last_pos = torch.tensor(last_positions,
dtype=torch.float)
y = torch.tensor([(energy - sum(formation_energies)) * HARTREE_2_EV / x.size(0)],
dtype=torch.float)
last_y = torch.tensor([(last_energy - sum(formation_energies)) * HARTREE_2_EV / x.size(0)],
dtype=torch.float)
y_force = torch.tensor(forces,
dtype=torch.float)
if (torch.isnan(x).any() or torch.isnan(pos).any() or torch.isnan(last_pos).any() or torch.isnan(
y).any() or torch.isnan(last_y).any() or torch.isnan(y_force).any()):
continue
ds = Data(x=x, natoms=x.size(0), pos=pos, last_pos=last_pos, y=y, last_y=last_y, y_force=y_force, cid=str(key))
datas.append(ds)
return datas
if stage == '1st':
return process_data(data['DFT_1st'])
elif stage == '1st_smash':
return process_data(data['DFT_1st'])
elif stage == '2nd':
return process_data(data['DFT_2nd'])
elif stage == 'mixing':
return process_data(data['DFT_1st']) + process_data(data['DFT_2nd'])
elif stage == 'pm3':
return process_data(data['pm3'])
elif stage == 'hf':
return process_data(data['hf'])
else:
raise Exception('Unknown stage')
def process_key(key, db_path, stage, filtering):
env = lmdb.open(str(db_path), subdir=False, readonly=True, lock=False)
with env.begin(write=False) as txn:
datapoint_pickled = txn.get(key)
data_objects = data_to_pyg(pickle.loads(gzip.decompress(datapoint_pickled)), stage, filter=filtering)
if len(data_objects) > 0:
return key
else:
return None
def process_num(key, db_path, stage, filtering):
env = lmdb.open(str(db_path), subdir=False, readonly=True, lock=False)
with env.begin(write=False) as txn:
datapoint_pickled = txn.get(key)
data_objects = data_to_pyg(pickle.loads(gzip.decompress(datapoint_pickled)), stage, filter=filtering)
if len(data_objects) > 0:
return len(data_objects)
else:
return None
def get_valid_nums(db_path, keys, stage, filtering):
valid_nums = []
worker_func = partial(process_num, db_path=db_path, stage=stage, filtering=filtering)
with ProcessPoolExecutor(max_workers=32) as executor:
results = executor.map(worker_func, keys)
for maybe_len in tqdm(results, total=len(keys), desc="Get valid numbers"):
if maybe_len is not None:
valid_nums.append(maybe_len)
return valid_nums
def filter_valid_keys(db_path, keys, stage, filtering):
valid_keys = []
worker_func = partial(process_key, db_path=db_path, stage=stage, filtering=filtering)
with ProcessPoolExecutor(max_workers=32) as executor:
results = executor.map(worker_func, keys)
for maybe_key in tqdm(results, total=len(keys), desc="Filtering valid keys"):
if maybe_key is not None:
valid_keys.append(maybe_key)
return valid_keys
class LMDBDataset(Dataset):
def __init__(self, path, transform=None, keys_file='valid_keys', stage='1st', total_traj=True,
SubsetOnly=False, getTest = False, stochastic_frame = False) -> None:
super(LMDBDataset, self).__init__()
self.path = Path(path)
self.keys_file = keys_file
self.stage = stage
self.total_traj = total_traj
self.stochastic_frame = stochastic_frame
assert self.path.is_dir(), "Path is not a directory"
db_paths = sorted(self.path.glob("*.lmdb"))
assert len(db_paths) > 0, f"No LMDBs found in '{self.path}'"
self._keys = []
if total_traj:
self._nums = []
self.envs = []
self.SubsetOnly = SubsetOnly
self.postfix = ""
if SubsetOnly:
self.postfix = "_Subset"
for i, db_path in enumerate(db_paths):
if SubsetOnly:
if 'Data06.lmdb' not in str(db_path):
continue
# If we're generating the test set, skip all lmdbs that aren't the test otherwise skip only the test lmdb
if getTest:
if 'test.lmdb' not in str(db_path):
continue
else:
if 'test.lmdb' in str(db_path):
continue
cur_env = self.connect_db(db_path)
self.envs.append(cur_env)
lmdb_name = Path(str(db_path)).stem
if os.path.exists(self.path / Path(self.keys_file + f'_{lmdb_name}_{self.stage}{self.postfix}.txt')):
self._keys.append(self.load_keys(lmdb_name))
else:
with cur_env.begin() as txn:
all_keys = [key for key in tqdm(txn.cursor().iternext(values=False))]
filter_keys = filter_valid_keys(db_path, all_keys, self.stage, not self.SubsetOnly)
self._keys.append(filter_keys)
self.save_keys(filter_keys, lmdb_name)
if total_traj:
if os.path.exists(
self.path / Path(self.keys_file + f'_{lmdb_name}_{self.stage}{self.postfix}_number.txt')):
self._nums.append(self.load_nums(lmdb_name))
else:
numbers = get_valid_nums(db_path, self._keys[-1], self.stage, not self.SubsetOnly)
self._nums.append(numbers)
self.save_numbers(numbers, lmdb_name)
if not total_traj:
keylens = [len(k) for k in self._keys]
self._keylen_cumulative = np.cumsum(keylens).tolist()
self.num_samples = sum(keylens)
else:
keylens = [sum(k) for k in self._nums]
self._keylen_cumulative = np.cumsum(keylens).tolist()
self._num_cumulative = [np.cumsum(k).tolist() for k in
self._nums]
self.num_samples = sum(
keylens)
nums_flat = np.concatenate([np.array(nums) for nums in self._nums])
cumulative_nums = np.cumsum(nums_flat)
start_indices = np.concatenate(([0], cumulative_nums[:-1]))
self.trajectory_indices = list(zip(start_indices.tolist(), cumulative_nums.tolist()))
self.transform = transform
self.maximum_dist = 0
def save_keys(self, keys, lmdb_name):
with open(self.path / Path(self.keys_file + f'_{lmdb_name}_{self.stage}{self.postfix}.txt'), 'w') as f:
for key in keys:
f.write(key.hex() + '\n')
def save_numbers(self, numbers, lmdb_name):
with open(self.path / Path(self.keys_file + f'_{lmdb_name}_{self.stage}{self.postfix}_number.txt'), 'w') as f:
for num in numbers:
f.write(str(num) + '\n')
def load_keys(self, lmdb_name):
with open(self.path / Path(self.keys_file + f'_{lmdb_name}_{self.stage}{self.postfix}.txt'), 'r') as f:
keys = [bytes.fromhex(line.strip()) for line in f]
return keys
def load_nums(self, lmdb_name):
with open(self.path / Path(self.keys_file + f'_{lmdb_name}_{self.stage}{self.postfix}_number.txt'), 'r') as f:
nums = [int(line.strip()) for line in f]
return nums
def __len__(self) -> int:
return self.num_samples
def __getitem__(self, idx: int):
db_idx = bisect.bisect(self._keylen_cumulative, idx)
el_idx = idx
if db_idx != 0:
el_idx = idx - self._keylen_cumulative[db_idx - 1]
assert el_idx >= 0
if not self.total_traj:
datapoint_pickled = (
self.envs[db_idx]
.begin()
.get(self._keys[db_idx][el_idx])
)
data_objects = data_to_pyg(pickle.loads(gzip.decompress(datapoint_pickled)), self._keys[db_idx][el_idx], filter=not self.SubsetOnly)
if len(data_objects) == 0:
return None
if self.transform is not None:
data_objects = [self.transform(data_object, self.stochastic_frame) for data_object in data_objects]
return random.choice(data_objects)
else:
num_idx = bisect.bisect(self._num_cumulative[db_idx], el_idx)
data_idx = el_idx
if num_idx != 0:
data_idx = el_idx - self._num_cumulative[db_idx][num_idx - 1]
assert data_idx >= 0
datapoint_pickled = (
self.envs[db_idx]
.begin()
.get(self._keys[db_idx][num_idx])
)
data_objects = data_to_pyg(pickle.loads(gzip.decompress(datapoint_pickled)), self._keys[db_idx][num_idx], filter=not self.SubsetOnly)
data_object = data_objects[data_idx]
if self.transform is not None:
data_object = self.transform(data_object, self.stochastic_frame)
return data_object
def connect_db(self, lmdb_path: Optional[Path] = None) -> lmdb.Environment:
env = lmdb.open(
str(lmdb_path),
subdir=False,
readonly=True,
lock=False,
readahead=False,
meminit=False,
max_readers=128,
)
return env
def close_db(self) -> None:
self.env.close()
class CommonLMDBDataset(Dataset):
def __init__(self, path, transform=None) -> None:
super(CommonLMDBDataset, self).__init__()
self.path = Path(path)
assert self.path.is_file(), "Path is not a file"
self.env = self.connect_db(self.path)
self.transform = transform
def __len__(self) -> int:
with self.env.begin() as txn:
self.all_keys = [key for key in tqdm(txn.cursor().iternext(values=False))]
return len(self.all_keys)
def __getitem__(self, idx: int):
datapoint_pickled = self.env.begin().get(self.all_keys[idx])
data_object = pickle.loads(gzip.decompress(datapoint_pickled))
pos = random.choice([pos for pos in data_object.pos])
data_object.pos = pos
if self.transform is not None:
data_object = self.transform(data_object, self.stochastic_frame)
return data_object
def connect_db(self, lmdb_path: Optional[Path] = None) -> lmdb.Environment:
env = lmdb.open(
str(lmdb_path),
subdir=False,
readonly=True,
lock=False,
readahead=False,
meminit=False,
max_readers=128,
)
return env
def close_db(self) -> None:
self.env.close()
def initialize_datasets(root, transform, stage, total_traj, SubsetOnly, stochastic_frame):
lmdb_dataset = LMDBDataset(
root,
transform=transform,
stage=stage,
total_traj=total_traj,
SubsetOnly=SubsetOnly,
stochastic_frame=stochastic_frame
)
if not total_traj:
train_size = int(0.8 * len(lmdb_dataset))
val_size = len(lmdb_dataset) - train_size
with open('splits/new_split.json' if SubsetOnly else 'splits/new_split_full.json', 'r') as f:
split = json.load(f)
mol_indices = list(range(len(lmdb_dataset)))
mol_indices_np = np.array(mol_indices)
train_trajectory_indices = (mol_indices_np[split['train']]).tolist()
val_trajectory_indices = (mol_indices_np[split['val']]).tolist()
train_dataset = Subset(lmdb_dataset, train_trajectory_indices)
val_dataset = Subset(lmdb_dataset, val_trajectory_indices)
else:
num_trajectories = len(lmdb_dataset.trajectory_indices)
trajectory_indices = list(range(num_trajectories))
with open('splits/new_split.json' if SubsetOnly else 'splits/new_split_full.json', 'r') as f:
split = json.load(f)
trajectory_indices_np = np.array(trajectory_indices)
train_trajectory_indices = (trajectory_indices_np[split['train']]).tolist()
val_trajectory_indices = (trajectory_indices_np[split['val']]).tolist()
train_snapshot_indices = []
val_snapshot_indices = []
for idx_set, snapshot_indices_set in zip(
[train_trajectory_indices, val_trajectory_indices],
[train_snapshot_indices, val_snapshot_indices],
):
for traj_idx in idx_set:
start_idx, end_idx = lmdb_dataset.trajectory_indices[traj_idx]
snapshot_indices_set.extend(range(start_idx, end_idx))
train_dataset = Subset(lmdb_dataset, train_snapshot_indices)
val_dataset = Subset(lmdb_dataset, val_snapshot_indices)
lmdb_test_dataset = LMDBDataset(
root,
transform=transform,
stage=stage,
total_traj=True,
SubsetOnly=False,
getTest=True,
stochastic_frame=stochastic_frame
)
test_snapshot_indices = []
for start_idx, end_idx in lmdb_test_dataset.trajectory_indices:
test_snapshot_indices.extend(range(start_idx, end_idx))
test_dataset = Subset(lmdb_test_dataset, test_snapshot_indices)
return {"train": train_dataset, "val": val_dataset, "test": test_dataset}
def scale_transform(data, stochastic_frame=False):
y_scale = (data.y - _MEAN_ENERGY) / _STD_ENERGY
data.y = y_scale
data.y_force = data.y_force / _STD_FORCE_SCALE
data.pos = data.pos - data.pos.mean(0, keepdim=True)
data.num_atoms = data.pos.size(0)
if stochastic_frame:
plus_minus_list = list(product([1, -1], repeat=3))
index = random.randint(0, len(plus_minus_list) - 1)
signs = plus_minus_list[index]
Q = torch.linalg.eig(data.pos.T @ data.pos)[1] * torch.tensor(signs).unsqueeze(0)
data.Q = Q.to(torch.float32).unsqueeze(0).expand(data.pos.size(0), 3, 3)
return data
class LMDBDataLoader:
def __init__(
self,
root,
batch_size=32,
num_workers=4,
stage='1st',
total_traj=False,
SubsetOnly=False,
stochastic_frame=False
) -> None:
self.batch_size = batch_size
self.num_workers = num_workers
self.datasets = initialize_datasets(root, scale_transform, stage, total_traj,
SubsetOnly=SubsetOnly, stochastic_frame=stochastic_frame)
def train_loader(self, distributed=False):
if distributed:
sampler = DistributedSampler(self.datasets["train"])
else:
subset_indices = torch.randperm(len(self.datasets["train"]))
sampler = SubsetRandomSampler(subset_indices)
return DataLoader(
self.datasets["train"],
batch_size=self.batch_size,
drop_last=False,
num_workers=self.num_workers,
sampler=sampler,
pin_memory=True,
)
def val_loader(self, distributed=False):
if distributed:
sampler = DistributedSampler(self.datasets["val"])
return DataLoader(
self.datasets["val"],
batch_size=self.batch_size,
drop_last=False,
num_workers=self.num_workers,
sampler=sampler,
pin_memory=True,
)
return DataLoader(
self.datasets["val"],
batch_size=self.batch_size,
drop_last=False,
num_workers=self.num_workers,
pin_memory=True,
)
def test_loader(self, distributed=False):
if distributed:
sampler = DistributedSampler(self.datasets["test"])
return DataLoader(
self.datasets["test"],
batch_size=self.batch_size,
drop_last=False,
num_workers=self.num_workers,
sampler=sampler,
pin_memory=True,
)
return DataLoader(
self.datasets["test"],
batch_size=self.batch_size,
drop_last=False,
num_workers=self.num_workers,
pin_memory=True,
)
def serialize_and_compress(data: Data):
"""
Serializes the Data object using msgpack and compresses it using lz4.
"""
return gzip.compress(pickle.dumps(data))