Datasets:
ArXiv:
License:
| """ | |
| =============================================================================== | |
| File: painn | |
| Date: 6/16/2024 | |
| Description: Code is adapted from torch geometric implementation https://github.com/pyg-team/pytorch_geometric/blob/master/torch_geometric/nn/models/schnet.py. | |
| All rights reserved to original authors. | |
| =============================================================================== | |
| """ | |
| import os | |
| import os.path as osp | |
| import warnings | |
| from math import pi as PI | |
| from typing import Callable, Dict, Optional, Tuple | |
| import numpy as np | |
| import torch | |
| import torch.nn.functional as F | |
| from torch import Tensor, nn | |
| from torch.nn import Embedding, Linear, ModuleList, Sequential | |
| from torch_geometric.nn import MessagePassing, SumAggregation, radius_graph | |
| from torch_geometric.nn.resolver import aggregation_resolver as aggr_resolver | |
| from torch_scatter import scatter | |
| from torch_geometric.utils import remove_isolated_nodes | |
| class SchNet(torch.nn.Module): | |
| def __init__( | |
| self, | |
| hidden_channels: int = 128, | |
| num_filters: int = 128, | |
| num_interactions: int = 4, | |
| num_gaussians: int = 128, | |
| cutoff: float = 5.0, | |
| max_num_neighbors: int = 100, | |
| readout: str = 'mean', | |
| ): | |
| super().__init__() | |
| self.max_num_neighbors = max_num_neighbors | |
| self.hidden_channels = hidden_channels | |
| self.num_filters = num_filters | |
| self.num_interactions = num_interactions | |
| self.num_gaussians = num_gaussians | |
| self.cutoff = cutoff | |
| self.sum_aggr = SumAggregation() | |
| self.readout = aggr_resolver(readout) | |
| self.embedding = Embedding(80, hidden_channels) | |
| self.interaction_graph = RadiusInteractionGraph(cutoff, max_num_neighbors) | |
| self.distance_expansion = GaussianSmearing(0.0, cutoff, num_gaussians) | |
| self.interactions = ModuleList() | |
| for _ in range(num_interactions): | |
| block = InteractionBlock(hidden_channels, num_gaussians, | |
| num_filters, cutoff) | |
| self.interactions.append(block) | |
| self.lin1 = Linear(hidden_channels, hidden_channels // 2) | |
| self.act = ShiftedSoftplus() | |
| self.lin2 = Linear(hidden_channels // 2, 1) | |
| #self.force_decoder = nn.Sequential(Linear(hidden_channels, hidden_channels), ShiftedSoftplus(), | |
| # Linear(hidden_channels, 3)) | |
| self.reset_parameters() | |
| def reset_parameters(self): | |
| r"""Resets all learnable parameters of the module.""" | |
| self.embedding.reset_parameters() | |
| for interaction in self.interactions: | |
| interaction.reset_parameters() | |
| torch.nn.init.xavier_uniform_(self.lin1.weight) | |
| self.lin1.bias.data.fill_(0) | |
| torch.nn.init.xavier_uniform_(self.lin2.weight) | |
| self.lin2.bias.data.fill_(0) | |
| def forward(self, data): | |
| # edge_index = radius_graph(data.pos, r=self.cutoff, batch=data.batch, max_num_neighbors=self.max_num_neighbors) | |
| # edge_index, _, mask = remove_isolated_nodes(edge_index, num_nodes=data.num_nodes) | |
| # data.pos = data.pos[mask] | |
| # data.x = data.x[mask] | |
| # data.batch = data.batch[mask] | |
| batch_size = data.batch.max().item() + 1 | |
| edge_index = radius_graph(data.pos, r=self.cutoff, batch=data.batch, max_num_neighbors=self.max_num_neighbors) | |
| edge_index, _, mask = remove_isolated_nodes(edge_index, num_nodes=data.num_nodes) | |
| pos = data.pos[mask] | |
| x = data.x[mask] | |
| batch = data.batch[mask] | |
| pos.requires_grad_(True) | |
| z = x.long().squeeze(-1) | |
| h = self.embedding(z) | |
| #edge_index, edge_weight = self.interaction_graph(pos, batch) | |
| row, col = edge_index | |
| edge_weight = (pos[row] - pos[col]).norm(dim=-1) | |
| edge_attr = self.distance_expansion(edge_weight) | |
| for interaction in self.interactions: | |
| h = h + interaction(h, edge_index, edge_weight, edge_attr) | |
| # forces = self.force_decoder(h) | |
| h = self.lin1(h) | |
| h = self.act(h) | |
| h = self.lin2(h) | |
| #out = self.readout(h, batch, dim=0).squeeze() | |
| out = scatter(h, batch, dim=0, dim_size=batch_size, reduce='sum').squeeze() | |
| forces = -1 * ( | |
| torch.autograd.grad( | |
| out, | |
| pos, | |
| grad_outputs=torch.ones_like(out), | |
| create_graph=True, | |
| )[0] | |
| ) | |
| return out, forces, mask | |
| class RadiusInteractionGraph(torch.nn.Module): | |
| r"""Creates edges based on atom positions :obj:`pos` to all points within | |
| the cutoff distance. | |
| Args: | |
| cutoff (float, optional): Cutoff distance for interatomic interactions. | |
| (default: :obj:`10.0`) | |
| max_num_neighbors (int, optional): The maximum number of neighbors to | |
| collect for each node within the :attr:`cutoff` distance with the | |
| default interaction graph method. | |
| (default: :obj:`32`) | |
| """ | |
| def __init__(self, cutoff: float = 10.0, max_num_neighbors: int = 32): | |
| super().__init__() | |
| self.cutoff = cutoff | |
| self.max_num_neighbors = max_num_neighbors | |
| def forward(self, pos: Tensor, batch: Tensor) -> Tuple[Tensor, Tensor]: | |
| r"""Forward pass. | |
| Args: | |
| pos (Tensor): Coordinates of each atom. | |
| batch (LongTensor, optional): Batch indices assigning each atom to | |
| a separate molecule. | |
| :rtype: (:class:`LongTensor`, :class:`Tensor`) | |
| """ | |
| edge_index = radius_graph(pos, r=self.cutoff, batch=batch, | |
| max_num_neighbors=self.max_num_neighbors) | |
| row, col = edge_index | |
| edge_weight = (pos[row] - pos[col]).norm(dim=-1) | |
| return edge_index, edge_weight | |
| class InteractionBlock(torch.nn.Module): | |
| def __init__(self, hidden_channels: int, num_gaussians: int, | |
| num_filters: int, cutoff: float): | |
| super().__init__() | |
| self.mlp = Sequential( | |
| Linear(num_gaussians, num_filters), | |
| ShiftedSoftplus(), | |
| Linear(num_filters, num_filters), | |
| ) | |
| self.conv = CFConv(hidden_channels, hidden_channels, num_filters, | |
| self.mlp, cutoff) | |
| self.act = ShiftedSoftplus() | |
| self.lin = Linear(hidden_channels, hidden_channels) | |
| self.reset_parameters() | |
| def reset_parameters(self): | |
| torch.nn.init.xavier_uniform_(self.mlp[0].weight) | |
| self.mlp[0].bias.data.fill_(0) | |
| torch.nn.init.xavier_uniform_(self.mlp[2].weight) | |
| self.mlp[2].bias.data.fill_(0) | |
| self.conv.reset_parameters() | |
| torch.nn.init.xavier_uniform_(self.lin.weight) | |
| self.lin.bias.data.fill_(0) | |
| def forward(self, x: Tensor, edge_index: Tensor, edge_weight: Tensor, | |
| edge_attr: Tensor) -> Tensor: | |
| x = self.conv(x, edge_index, edge_weight, edge_attr) | |
| x = self.act(x) | |
| x = self.lin(x) | |
| return x | |
| class CFConv(MessagePassing): | |
| def __init__( | |
| self, | |
| in_channels: int, | |
| out_channels: int, | |
| num_filters: int, | |
| nn: Sequential, | |
| cutoff: float, | |
| ): | |
| super().__init__(aggr='add') | |
| self.lin1 = Linear(in_channels, num_filters, bias=False) | |
| self.lin2 = Linear(num_filters, out_channels) | |
| self.nn = nn | |
| self.cutoff = cutoff | |
| self.reset_parameters() | |
| def reset_parameters(self): | |
| torch.nn.init.xavier_uniform_(self.lin1.weight) | |
| torch.nn.init.xavier_uniform_(self.lin2.weight) | |
| self.lin2.bias.data.fill_(0) | |
| def forward(self, x: Tensor, edge_index: Tensor, edge_weight: Tensor, | |
| edge_attr: Tensor) -> Tensor: | |
| C = 0.5 * (torch.cos(edge_weight * PI / self.cutoff) + 1.0) | |
| W = self.nn(edge_attr) * C.view(-1, 1) | |
| x = self.lin1(x) | |
| x = self.propagate(edge_index, x=x, W=W) | |
| x = self.lin2(x) | |
| return x | |
| def message(self, x_j: Tensor, W: Tensor) -> Tensor: | |
| return x_j * W | |
| class GaussianSmearing(torch.nn.Module): | |
| def __init__( | |
| self, | |
| start: float = 0.0, | |
| stop: float = 5.0, | |
| num_gaussians: int = 50, | |
| ): | |
| super().__init__() | |
| offset = torch.linspace(start, stop, num_gaussians) | |
| self.coeff = -0.5 / (offset[1] - offset[0]).item() ** 2 | |
| self.register_buffer('offset', offset) | |
| def forward(self, dist: Tensor) -> Tensor: | |
| dist = dist.view(-1, 1) - self.offset.view(1, -1) | |
| return torch.exp(self.coeff * torch.pow(dist, 2)) | |
| class ShiftedSoftplus(torch.nn.Module): | |
| def __init__(self): | |
| super().__init__() | |
| self.shift = torch.log(torch.tensor(2.0)).item() | |
| def forward(self, x: Tensor) -> Tensor: | |
| return F.softplus(x) - self.shift | |