Datasets:

ArXiv:
License:
ZacharyyyK commited on
Commit
3176131
·
verified ·
1 Parent(s): d0c0b13

Upload 2 files

Browse files
Files changed (2) hide show
  1. Code/models/painn.py +392 -0
  2. Code/models/schnet.py +260 -0
Code/models/painn.py ADDED
@@ -0,0 +1,392 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ===============================================================================
3
+ File: painn
4
+ Date: 6/16/2024
5
+ Description: Code is adapted from PaiNN OC20 implementation https://github.com/facebookresearch/fairchem/tree/fairchem_core-1.10.0/src/fairchem/core/models/painn.
6
+
7
+ All rights reserved to original authors.
8
+
9
+ ===============================================================================
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ import math
15
+ import typing
16
+
17
+ import torch
18
+ from torch import nn
19
+ from torch.nn import SiLU
20
+ from torch_cluster import radius_graph
21
+
22
+ if typing.TYPE_CHECKING:
23
+ from torch_geometric.data.batch import Batch
24
+ from torch_geometric.nn import MessagePassing
25
+ from torch_scatter import scatter, segment_coo
26
+
27
+ from torch_geometric.utils import remove_isolated_nodes
28
+
29
+
30
+ class GaussianSmearing(torch.nn.Module):
31
+ def __init__(
32
+ self,
33
+ start: float = 0.0,
34
+ stop: float = 5.0,
35
+ num_gaussians: int = 50,
36
+ ):
37
+ super().__init__()
38
+ offset = torch.linspace(start, stop, num_gaussians)
39
+ self.coeff = -0.5 / (offset[1] - offset[0]).item() ** 2
40
+ self.register_buffer('offset', offset)
41
+
42
+ def forward(self, dist):
43
+ dist = dist.view(-1, 1) - self.offset.view(1, -1)
44
+ return torch.exp(self.coeff * torch.pow(dist, 2))
45
+
46
+
47
+ class RadiusInteractionGraph(torch.nn.Module):
48
+ r"""Creates edges based on atom positions :obj:`pos` to all points within
49
+ the cutoff distance.
50
+
51
+ Args:
52
+ cutoff (float, optional): Cutoff distance for interatomic interactions.
53
+ (default: :obj:`10.0`)
54
+ max_num_neighbors (int, optional): The maximum number of neighbors to
55
+ collect for each node within the :attr:`cutoff` distance with the
56
+ default interaction graph method.
57
+ (default: :obj:`32`)
58
+ """
59
+
60
+ def __init__(self, cutoff: float = 10.0, max_num_neighbors: int = 32):
61
+ super().__init__()
62
+ self.cutoff = cutoff
63
+ self.max_num_neighbors = max_num_neighbors
64
+
65
+ def forward(self, pos, batch):
66
+ r"""Forward pass.
67
+
68
+ Args:
69
+ pos (Tensor): Coordinates of each atom.
70
+ batch (LongTensor, optional): Batch indices assigning each atom to
71
+ a separate molecule.
72
+
73
+ :rtype: (:class:`LongTensor`, :class:`Tensor`)
74
+ """
75
+ edge_index = radius_graph(pos, r=self.cutoff, batch=batch,
76
+ max_num_neighbors=self.max_num_neighbors)
77
+ row, col = edge_index
78
+ edge_weight = (pos[row] - pos[col]).norm(dim=-1)
79
+ return edge_index, edge_weight
80
+
81
+
82
+ class PaiNN(nn.Module):
83
+ r"""PaiNN model based on the description in Schütt et al. (2021):
84
+ Equivariant message passing for the prediction of tensorial properties
85
+ and molecular spectra, https://arxiv.org/abs/2102.03150.
86
+ """
87
+
88
+ def __init__(
89
+ self,
90
+ hidden_channels: int = 512,
91
+ num_layers: int = 6,
92
+ num_rbf: int = 128,
93
+ cutoff: float = 12.0,
94
+ max_neighbors: int = 100,
95
+ gradient_force: bool = True,
96
+ num_elements: int = 80,
97
+ ) -> None:
98
+ super().__init__()
99
+
100
+ self.hidden_channels = hidden_channels
101
+ self.num_layers = num_layers
102
+ self.num_rbf = num_rbf
103
+ self.cutoff = cutoff
104
+ self.max_neighbors = max_neighbors
105
+ self.gradient_force = gradient_force
106
+
107
+ #### Learnable parameters #############################################
108
+
109
+ self.atom_emb = nn.Embedding(num_elements, hidden_channels)
110
+ self.interaction_graph = RadiusInteractionGraph(cutoff, max_neighbors)
111
+
112
+ self.radial_basis = GaussianSmearing(
113
+ stop=cutoff,
114
+ num_gaussians=num_rbf,
115
+ )
116
+
117
+ self.message_layers = nn.ModuleList()
118
+ self.update_layers = nn.ModuleList()
119
+
120
+ for i in range(num_layers):
121
+ self.message_layers.append(
122
+ PaiNNMessage(hidden_channels, num_rbf).jittable()
123
+ )
124
+ self.update_layers.append(PaiNNUpdate(hidden_channels))
125
+
126
+ self.out_energy = nn.Sequential(
127
+ nn.Linear(hidden_channels, hidden_channels // 2),
128
+ SiLU(),
129
+ nn.Linear(hidden_channels // 2, 1),
130
+ )
131
+
132
+ if self.gradient_force is False:
133
+ self.out_forces = PaiNNOutput(hidden_channels)
134
+
135
+ self.inv_sqrt_2 = 1 / math.sqrt(2.0)
136
+
137
+ self.reset_parameters()
138
+
139
+ def reset_parameters(self) -> None:
140
+ nn.init.xavier_uniform_(self.out_energy[0].weight)
141
+ self.out_energy[0].bias.data.fill_(0)
142
+ nn.init.xavier_uniform_(self.out_energy[2].weight)
143
+ self.out_energy[2].bias.data.fill_(0)
144
+
145
+
146
+ def forward(self, data):
147
+
148
+ batch_size = data.batch.max().item() + 1
149
+
150
+ edge_index = radius_graph(data.pos, self.cutoff, data.batch, max_num_neighbors=self.max_neighbors)
151
+ edge_index, _, mask = remove_isolated_nodes(edge_index, num_nodes=data.num_nodes)
152
+
153
+ pos = data.pos[mask]
154
+ batch = data.batch[mask]
155
+ z = data.x[mask].long().squeeze()
156
+ if self.gradient_force:
157
+ pos = pos.requires_grad_(True)
158
+
159
+ edge_index, edge_dist = self.interaction_graph(pos, batch)
160
+ edge_vector = pos[edge_index[1]] - pos[edge_index[0]]
161
+ assert z.dim() == 1
162
+ assert z.dtype == torch.long
163
+
164
+ edge_rbf = self.radial_basis(edge_dist) # rbf * envelope
165
+
166
+ x = self.atom_emb(z)
167
+ vec = torch.zeros(x.size(0), 3, x.size(1), device=x.device)
168
+
169
+ #### Interaction blocks ###############################################
170
+
171
+ for i in range(self.num_layers):
172
+ dx, dvec = self.message_layers[i](x, vec, edge_index, edge_rbf, edge_vector)
173
+
174
+ x = x + dx
175
+ vec = vec + dvec
176
+ x = x * self.inv_sqrt_2
177
+
178
+ dx, dvec = self.update_layers[i](x, vec)
179
+
180
+ x = x + dx
181
+ vec = vec + dvec
182
+
183
+ #### Output block #####################################################
184
+
185
+ per_atom_energy = self.out_energy(x).squeeze(1)
186
+ energy = scatter(per_atom_energy, batch, dim=0, reduce='mean', dim_size=batch_size)
187
+
188
+ if self.gradient_force:
189
+ forces = -1 * (
190
+ torch.autograd.grad(
191
+ energy,
192
+ pos,
193
+ grad_outputs=torch.ones_like(energy),
194
+ create_graph=True,
195
+ )[0]
196
+ )
197
+ else:
198
+ forces = self.out_forces(x, vec)
199
+
200
+ return energy, forces, mask
201
+
202
+
203
+
204
+ class PaiNNMessage(MessagePassing):
205
+ def __init__(
206
+ self,
207
+ hidden_channels,
208
+ num_rbf,
209
+ ) -> None:
210
+ super().__init__(aggr="add", node_dim=0)
211
+
212
+ self.hidden_channels = hidden_channels
213
+
214
+ self.x_proj = nn.Sequential(
215
+ nn.Linear(hidden_channels, hidden_channels),
216
+ SiLU(),
217
+ nn.Linear(hidden_channels, hidden_channels * 3),
218
+ )
219
+ self.rbf_proj = nn.Linear(num_rbf, hidden_channels * 3)
220
+
221
+ self.inv_sqrt_3 = 1 / math.sqrt(3.0)
222
+ self.inv_sqrt_h = 1 / math.sqrt(hidden_channels)
223
+ self.x_layernorm = nn.LayerNorm(hidden_channels)
224
+
225
+ self.reset_parameters()
226
+
227
+ def reset_parameters(self) -> None:
228
+ nn.init.xavier_uniform_(self.x_proj[0].weight)
229
+ self.x_proj[0].bias.data.fill_(0)
230
+ nn.init.xavier_uniform_(self.x_proj[2].weight)
231
+ self.x_proj[2].bias.data.fill_(0)
232
+ nn.init.xavier_uniform_(self.rbf_proj.weight)
233
+ self.rbf_proj.bias.data.fill_(0)
234
+ self.x_layernorm.reset_parameters()
235
+
236
+ def forward(self, x, vec, edge_index, edge_rbf, edge_vector):
237
+ xh = self.x_proj(self.x_layernorm(x))
238
+
239
+ # TODO(@abhshkdz): Nans out with AMP here during backprop. Debug / fix.
240
+ rbfh = self.rbf_proj(edge_rbf)
241
+
242
+ # propagate_type: (xh: Tensor, vec: Tensor, rbfh_ij: Tensor, r_ij: Tensor)
243
+ dx, dvec = self.propagate(
244
+ edge_index,
245
+ xh=xh,
246
+ vec=vec,
247
+ rbfh_ij=rbfh,
248
+ r_ij=edge_vector,
249
+ size=None,
250
+ )
251
+
252
+ return dx, dvec
253
+
254
+ def message(self, xh_j, vec_j, rbfh_ij, r_ij):
255
+ x, xh2, xh3 = torch.split(xh_j * rbfh_ij, self.hidden_channels, dim=-1)
256
+ xh2 = xh2 * self.inv_sqrt_3
257
+
258
+ vec = vec_j * xh2.unsqueeze(1) + xh3.unsqueeze(1) * r_ij.unsqueeze(2)
259
+ vec = vec * self.inv_sqrt_h
260
+
261
+ return x, vec
262
+
263
+ def aggregate(
264
+ self,
265
+ features: tuple[torch.Tensor, torch.Tensor],
266
+ index: torch.Tensor,
267
+ dim_size: int,
268
+ ) -> tuple[torch.Tensor, torch.Tensor]:
269
+ x, vec = features
270
+ x = scatter(x, index, dim=self.node_dim, dim_size=dim_size)
271
+ vec = scatter(vec, index, dim=self.node_dim, dim_size=dim_size)
272
+ return x, vec
273
+
274
+ def update(
275
+ self, inputs: tuple[torch.Tensor, torch.Tensor]
276
+ ) -> tuple[torch.Tensor, torch.Tensor]:
277
+ return inputs
278
+
279
+
280
+ class PaiNNUpdate(nn.Module):
281
+ def __init__(self, hidden_channels) -> None:
282
+ super().__init__()
283
+ self.hidden_channels = hidden_channels
284
+
285
+ self.vec_proj = nn.Linear(hidden_channels, hidden_channels * 2, bias=False)
286
+ self.xvec_proj = nn.Sequential(
287
+ nn.Linear(hidden_channels * 2, hidden_channels),
288
+ SiLU(),
289
+ nn.Linear(hidden_channels, hidden_channels * 3),
290
+ )
291
+
292
+ self.inv_sqrt_2 = 1 / math.sqrt(2.0)
293
+ self.inv_sqrt_h = 1 / math.sqrt(hidden_channels)
294
+
295
+ self.reset_parameters()
296
+
297
+ def reset_parameters(self) -> None:
298
+ nn.init.xavier_uniform_(self.vec_proj.weight)
299
+ nn.init.xavier_uniform_(self.xvec_proj[0].weight)
300
+ self.xvec_proj[0].bias.data.fill_(0)
301
+ nn.init.xavier_uniform_(self.xvec_proj[2].weight)
302
+ self.xvec_proj[2].bias.data.fill_(0)
303
+
304
+ def forward(self, x, vec):
305
+ vec1, vec2 = torch.split(self.vec_proj(vec), self.hidden_channels, dim=-1)
306
+ vec_dot = (vec1 * vec2).sum(dim=1) * self.inv_sqrt_h
307
+
308
+ # NOTE: Can't use torch.norm because the gradient is NaN for input = 0.
309
+ # Add an epsilon offset to make sure sqrt is always positive.
310
+ x_vec_h = self.xvec_proj(
311
+ torch.cat([x, torch.sqrt(torch.sum(vec2**2, dim=-2) + 1e-8)], dim=-1)
312
+ )
313
+ xvec1, xvec2, xvec3 = torch.split(x_vec_h, self.hidden_channels, dim=-1)
314
+
315
+ dx = xvec1 + xvec2 * vec_dot
316
+ dx = dx * self.inv_sqrt_2
317
+
318
+ dvec = xvec3.unsqueeze(1) * vec1
319
+
320
+ return dx, dvec
321
+
322
+
323
+ class PaiNNOutput(nn.Module):
324
+ def __init__(self, hidden_channels) -> None:
325
+ super().__init__()
326
+ self.hidden_channels = hidden_channels
327
+
328
+ self.output_network = nn.ModuleList(
329
+ [
330
+ GatedEquivariantBlock(
331
+ hidden_channels,
332
+ hidden_channels // 2,
333
+ ),
334
+ GatedEquivariantBlock(hidden_channels // 2, 1),
335
+ ]
336
+ )
337
+
338
+ self.reset_parameters()
339
+
340
+ def reset_parameters(self) -> None:
341
+ for layer in self.output_network:
342
+ layer.reset_parameters()
343
+
344
+ def forward(self, x, vec):
345
+ for layer in self.output_network:
346
+ x, vec = layer(x, vec)
347
+ return vec.squeeze()
348
+
349
+
350
+ # Borrowed from TorchMD-Net
351
+ class GatedEquivariantBlock(nn.Module):
352
+ """Gated Equivariant Block as defined in Schütt et al. (2021):
353
+ Equivariant message passing for the prediction of tensorial properties and molecular spectra
354
+ """
355
+
356
+ def __init__(
357
+ self,
358
+ hidden_channels,
359
+ out_channels,
360
+ ) -> None:
361
+ super().__init__()
362
+ self.out_channels = out_channels
363
+
364
+ self.vec1_proj = nn.Linear(hidden_channels, hidden_channels, bias=False)
365
+ self.vec2_proj = nn.Linear(hidden_channels, out_channels, bias=False)
366
+
367
+ self.update_net = nn.Sequential(
368
+ nn.Linear(hidden_channels * 2, hidden_channels),
369
+ SiLU(),
370
+ nn.Linear(hidden_channels, out_channels * 2),
371
+ )
372
+
373
+ self.act = SiLU()
374
+
375
+ def reset_parameters(self) -> None:
376
+ nn.init.xavier_uniform_(self.vec1_proj.weight)
377
+ nn.init.xavier_uniform_(self.vec2_proj.weight)
378
+ nn.init.xavier_uniform_(self.update_net[0].weight)
379
+ self.update_net[0].bias.data.fill_(0)
380
+ nn.init.xavier_uniform_(self.update_net[2].weight)
381
+ self.update_net[2].bias.data.fill_(0)
382
+
383
+ def forward(self, x, v):
384
+ vec1 = torch.norm(self.vec1_proj(v), dim=-2)
385
+ vec2 = self.vec2_proj(v)
386
+
387
+ x = torch.cat([x, vec1], dim=-1)
388
+ x, v = torch.split(self.update_net(x), self.out_channels, dim=-1)
389
+ v = v.unsqueeze(1) * vec2
390
+
391
+ x = self.act(x)
392
+ return x, v
Code/models/schnet.py ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ===============================================================================
3
+ File: painn
4
+ Date: 6/16/2024
5
+ Description: Code is adapted from torch geometric implementation https://github.com/pyg-team/pytorch_geometric/blob/master/torch_geometric/nn/models/schnet.py.
6
+
7
+ All rights reserved to original authors.
8
+
9
+ ===============================================================================
10
+ """
11
+ import os
12
+ import os.path as osp
13
+ import warnings
14
+ from math import pi as PI
15
+ from typing import Callable, Dict, Optional, Tuple
16
+
17
+ import numpy as np
18
+ import torch
19
+ import torch.nn.functional as F
20
+ from torch import Tensor, nn
21
+ from torch.nn import Embedding, Linear, ModuleList, Sequential
22
+
23
+ from torch_geometric.nn import MessagePassing, SumAggregation, radius_graph
24
+ from torch_geometric.nn.resolver import aggregation_resolver as aggr_resolver
25
+
26
+ from torch_scatter import scatter
27
+
28
+ from torch_geometric.utils import remove_isolated_nodes
29
+
30
+ class SchNet(torch.nn.Module):
31
+ def __init__(
32
+ self,
33
+ hidden_channels: int = 128,
34
+ num_filters: int = 128,
35
+ num_interactions: int = 4,
36
+ num_gaussians: int = 128,
37
+ cutoff: float = 5.0,
38
+ max_num_neighbors: int = 100,
39
+ readout: str = 'mean',
40
+ ):
41
+ super().__init__()
42
+
43
+ self.max_num_neighbors = max_num_neighbors
44
+
45
+ self.hidden_channels = hidden_channels
46
+ self.num_filters = num_filters
47
+ self.num_interactions = num_interactions
48
+ self.num_gaussians = num_gaussians
49
+ self.cutoff = cutoff
50
+ self.sum_aggr = SumAggregation()
51
+ self.readout = aggr_resolver(readout)
52
+
53
+ self.embedding = Embedding(80, hidden_channels)
54
+ self.interaction_graph = RadiusInteractionGraph(cutoff, max_num_neighbors)
55
+ self.distance_expansion = GaussianSmearing(0.0, cutoff, num_gaussians)
56
+
57
+ self.interactions = ModuleList()
58
+ for _ in range(num_interactions):
59
+ block = InteractionBlock(hidden_channels, num_gaussians,
60
+ num_filters, cutoff)
61
+ self.interactions.append(block)
62
+
63
+ self.lin1 = Linear(hidden_channels, hidden_channels // 2)
64
+ self.act = ShiftedSoftplus()
65
+ self.lin2 = Linear(hidden_channels // 2, 1)
66
+ #self.force_decoder = nn.Sequential(Linear(hidden_channels, hidden_channels), ShiftedSoftplus(),
67
+ # Linear(hidden_channels, 3))
68
+ self.reset_parameters()
69
+
70
+ def reset_parameters(self):
71
+ r"""Resets all learnable parameters of the module."""
72
+ self.embedding.reset_parameters()
73
+ for interaction in self.interactions:
74
+ interaction.reset_parameters()
75
+ torch.nn.init.xavier_uniform_(self.lin1.weight)
76
+ self.lin1.bias.data.fill_(0)
77
+ torch.nn.init.xavier_uniform_(self.lin2.weight)
78
+ self.lin2.bias.data.fill_(0)
79
+
80
+ def forward(self, data):
81
+
82
+ # edge_index = radius_graph(data.pos, r=self.cutoff, batch=data.batch, max_num_neighbors=self.max_num_neighbors)
83
+ # edge_index, _, mask = remove_isolated_nodes(edge_index, num_nodes=data.num_nodes)
84
+
85
+ # data.pos = data.pos[mask]
86
+ # data.x = data.x[mask]
87
+ # data.batch = data.batch[mask]
88
+
89
+ batch_size = data.batch.max().item() + 1
90
+
91
+ edge_index = radius_graph(data.pos, r=self.cutoff, batch=data.batch, max_num_neighbors=self.max_num_neighbors)
92
+ edge_index, _, mask = remove_isolated_nodes(edge_index, num_nodes=data.num_nodes)
93
+
94
+ pos = data.pos[mask]
95
+ x = data.x[mask]
96
+ batch = data.batch[mask]
97
+
98
+ pos.requires_grad_(True)
99
+
100
+ z = x.long().squeeze(-1)
101
+
102
+ h = self.embedding(z)
103
+ #edge_index, edge_weight = self.interaction_graph(pos, batch)
104
+
105
+ row, col = edge_index
106
+ edge_weight = (pos[row] - pos[col]).norm(dim=-1)
107
+
108
+
109
+ edge_attr = self.distance_expansion(edge_weight)
110
+
111
+ for interaction in self.interactions:
112
+ h = h + interaction(h, edge_index, edge_weight, edge_attr)
113
+ # forces = self.force_decoder(h)
114
+ h = self.lin1(h)
115
+ h = self.act(h)
116
+ h = self.lin2(h)
117
+ #out = self.readout(h, batch, dim=0).squeeze()
118
+
119
+ out = scatter(h, batch, dim=0, dim_size=batch_size, reduce='sum').squeeze()
120
+
121
+
122
+ forces = -1 * (
123
+ torch.autograd.grad(
124
+ out,
125
+ pos,
126
+ grad_outputs=torch.ones_like(out),
127
+ create_graph=True,
128
+ )[0]
129
+ )
130
+ return out, forces, mask
131
+
132
+
133
+ class RadiusInteractionGraph(torch.nn.Module):
134
+ r"""Creates edges based on atom positions :obj:`pos` to all points within
135
+ the cutoff distance.
136
+
137
+ Args:
138
+ cutoff (float, optional): Cutoff distance for interatomic interactions.
139
+ (default: :obj:`10.0`)
140
+ max_num_neighbors (int, optional): The maximum number of neighbors to
141
+ collect for each node within the :attr:`cutoff` distance with the
142
+ default interaction graph method.
143
+ (default: :obj:`32`)
144
+ """
145
+
146
+ def __init__(self, cutoff: float = 10.0, max_num_neighbors: int = 32):
147
+ super().__init__()
148
+ self.cutoff = cutoff
149
+ self.max_num_neighbors = max_num_neighbors
150
+
151
+ def forward(self, pos: Tensor, batch: Tensor) -> Tuple[Tensor, Tensor]:
152
+ r"""Forward pass.
153
+
154
+ Args:
155
+ pos (Tensor): Coordinates of each atom.
156
+ batch (LongTensor, optional): Batch indices assigning each atom to
157
+ a separate molecule.
158
+
159
+ :rtype: (:class:`LongTensor`, :class:`Tensor`)
160
+ """
161
+ edge_index = radius_graph(pos, r=self.cutoff, batch=batch,
162
+ max_num_neighbors=self.max_num_neighbors)
163
+ row, col = edge_index
164
+ edge_weight = (pos[row] - pos[col]).norm(dim=-1)
165
+ return edge_index, edge_weight
166
+
167
+
168
+ class InteractionBlock(torch.nn.Module):
169
+ def __init__(self, hidden_channels: int, num_gaussians: int,
170
+ num_filters: int, cutoff: float):
171
+ super().__init__()
172
+ self.mlp = Sequential(
173
+ Linear(num_gaussians, num_filters),
174
+ ShiftedSoftplus(),
175
+ Linear(num_filters, num_filters),
176
+ )
177
+ self.conv = CFConv(hidden_channels, hidden_channels, num_filters,
178
+ self.mlp, cutoff)
179
+ self.act = ShiftedSoftplus()
180
+ self.lin = Linear(hidden_channels, hidden_channels)
181
+
182
+ self.reset_parameters()
183
+
184
+ def reset_parameters(self):
185
+ torch.nn.init.xavier_uniform_(self.mlp[0].weight)
186
+ self.mlp[0].bias.data.fill_(0)
187
+ torch.nn.init.xavier_uniform_(self.mlp[2].weight)
188
+ self.mlp[2].bias.data.fill_(0)
189
+ self.conv.reset_parameters()
190
+ torch.nn.init.xavier_uniform_(self.lin.weight)
191
+ self.lin.bias.data.fill_(0)
192
+
193
+ def forward(self, x: Tensor, edge_index: Tensor, edge_weight: Tensor,
194
+ edge_attr: Tensor) -> Tensor:
195
+ x = self.conv(x, edge_index, edge_weight, edge_attr)
196
+ x = self.act(x)
197
+ x = self.lin(x)
198
+ return x
199
+
200
+
201
+ class CFConv(MessagePassing):
202
+ def __init__(
203
+ self,
204
+ in_channels: int,
205
+ out_channels: int,
206
+ num_filters: int,
207
+ nn: Sequential,
208
+ cutoff: float,
209
+ ):
210
+ super().__init__(aggr='add')
211
+ self.lin1 = Linear(in_channels, num_filters, bias=False)
212
+ self.lin2 = Linear(num_filters, out_channels)
213
+ self.nn = nn
214
+ self.cutoff = cutoff
215
+
216
+ self.reset_parameters()
217
+
218
+ def reset_parameters(self):
219
+ torch.nn.init.xavier_uniform_(self.lin1.weight)
220
+ torch.nn.init.xavier_uniform_(self.lin2.weight)
221
+ self.lin2.bias.data.fill_(0)
222
+
223
+ def forward(self, x: Tensor, edge_index: Tensor, edge_weight: Tensor,
224
+ edge_attr: Tensor) -> Tensor:
225
+ C = 0.5 * (torch.cos(edge_weight * PI / self.cutoff) + 1.0)
226
+ W = self.nn(edge_attr) * C.view(-1, 1)
227
+
228
+ x = self.lin1(x)
229
+ x = self.propagate(edge_index, x=x, W=W)
230
+ x = self.lin2(x)
231
+ return x
232
+
233
+ def message(self, x_j: Tensor, W: Tensor) -> Tensor:
234
+ return x_j * W
235
+
236
+
237
+ class GaussianSmearing(torch.nn.Module):
238
+ def __init__(
239
+ self,
240
+ start: float = 0.0,
241
+ stop: float = 5.0,
242
+ num_gaussians: int = 50,
243
+ ):
244
+ super().__init__()
245
+ offset = torch.linspace(start, stop, num_gaussians)
246
+ self.coeff = -0.5 / (offset[1] - offset[0]).item() ** 2
247
+ self.register_buffer('offset', offset)
248
+
249
+ def forward(self, dist: Tensor) -> Tensor:
250
+ dist = dist.view(-1, 1) - self.offset.view(1, -1)
251
+ return torch.exp(self.coeff * torch.pow(dist, 2))
252
+
253
+
254
+ class ShiftedSoftplus(torch.nn.Module):
255
+ def __init__(self):
256
+ super().__init__()
257
+ self.shift = torch.log(torch.tensor(2.0)).item()
258
+
259
+ def forward(self, x: Tensor) -> Tensor:
260
+ return F.softplus(x) - self.shift