repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
tps-torch | tps-torch-main/dimer_solv/ml_test/nn_initialization.py | #import sys
#sys.path.insert(0,"/global/home/users/muhammad_hasyim/tps-torch/build")
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from dimer_ftsus import FTSLayerUSCustom as FTSLayer
from committor_nn import CommittorNetDR, CommittorNetBP, SchNet
#from tpstorch.ml.data import FTSSimulation, EXPReweightStringSimulation
from tpstorch.ml.optim import ParallelSGD, ParallelAdam#, FTSImplicitUpdate, FTSUpdate
#from tpstorch.ml.nn import BKELossFTS, BKELossEXP, FTSCommittorLoss, FTSLayer
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
torch.manual_seed(5070)
np.random.seed(5070)
Np = 32
prefix = 'simple'
#Initialize neural net
def initializer(s):
return (1-s)*start+s*end
#Initialization
r0 = 2**(1/6.0)
width = 0.25
#Reactant
dist_init = 0.98
start = torch.zeros((2,3))
start[0][2] = -0.5*dist_init
start[1][2] = 0.5*dist_init
#Product state
dist_init = 1.75
end = torch.zeros((2,3))
end[0][2] = -0.5*dist_init
end[1][2] = 0.5*dist_init
#scale down/up the distance of one of the particle dimer
box = [8.617738760127533, 8.617738760127533, 8.617738760127533]
def CubicLattice(dist_init):
state = torch.zeros(Np, 3);
num_spacing = np.ceil(Np**(1/3.0))
spacing_x = box[0]/num_spacing;
spacing_y = box[1]/num_spacing;
spacing_z = box[2]/num_spacing;
count = 0;
id_x = 0;
id_y = 0;
id_z = 0;
while Np > count:
state[int(id_z+id_y*num_spacing+id_x*num_spacing*num_spacing)][0] = spacing_x*id_x-0.5*box[0];
state[int(id_z+id_y*num_spacing+id_x*num_spacing*num_spacing)][1] = spacing_y*id_y-0.5*box[1];
state[int(id_z+id_y*num_spacing+id_x*num_spacing*num_spacing)][2] = spacing_z*id_z-0.5*box[2];
count += 1;
id_z += 1;
if(id_z==num_spacing):
id_z = 0;
id_y += 1;
if(id_y==num_spacing):
id_y = 0;
id_x += 1;
#Compute the pair distance
dx = (state[0]-state[1])
dx = dx-torch.round(dx/box[0])*box[0]
#Re-compute one of the coordinates and shift to origin
state[0] = dx/torch.norm(dx)*dist_init+state[1]
x_com = 0.5*(state[0]+state[1])
for i in range(Np):
state[i] -= x_com
state[i] -= torch.round(state[i]/box[0])*box[0]
return state;
start = CubicLattice(dist_init_start)
end = CubicLattice(dist_init_end)
initial_config = initializer(rank/(world_size-1))
committor = SchNet(hidden_channels = 64, num_filters = 64, num_interactions = 3, num_gaussians = 50, cutoff = box[0], max_num_neighbors = 31, boxsize=box[0], Np=32, dim=3).to('cpu')
ftslayer = FTSLayer(react_config=start.flatten(),prod_config=end.flatten(),num_nodes=world_size,boxsize=10.0,kappa_perpend=0.0,kappa_parallel=0.0).to('cpu')
#Initial Training Loss
initloss = nn.MSELoss()
initoptimizer = ParallelAdam(committor.parameters(), lr=1.0e-4)#,momentum=0.95, nesterov=True)
#initoptimizer = ParallelSGD(committor.parameters(), lr=1e-2,momentum=0.95, nesterov=True)
#from torchsummary import summary
running_loss = 0.0
tolerance = 1e-3
#Initial training try to fit the committor to the initial condition
tolerance = 1e-4
#batch_sizes = [64]
#for size in batch_sizes:
loss_io = []
if rank == 0:
loss_io = open("{}_statistic_{}.txt".format(prefix,rank+1),'w')
if rank == 0:
print("Before training")
for i in range(10**7):
# zero the parameter gradients
initoptimizer.zero_grad()
# forward + backward + optimize
q_vals = committor(initial_config.view(-1,3))
targets = torch.ones_like(q_vals)*rank/(dist.get_world_size()-1)
cost = initloss(q_vals, targets)#,committor,config,cx)
cost.backward()
#Stepping up
initoptimizer.step()
with torch.no_grad():
dist.all_reduce(cost)
#if i % 10 == 0 and rank == 0:
# print(i,cost.item() / world_size, committor(ftslayer.string[-1]))
# torch.save(committor.state_dict(), "initial_1hl_nn")#_{}".format(size))#prefix,rank+1))
if rank == 0:
loss_io.write("Step {:d} Loss {:.5E}\n".format(i,cost.item()))
loss_io.flush()
if cost.item() / world_size < tolerance:
if rank == 0:
torch.save(committor.state_dict(), "initial_1hl_nn")#_{}".format(size))#prefix,rank+1))
torch.save(ftslayer.state_dict(), "test_string_config")#_{}".format(size))#prefix,rank+1))
print("Early Break!")
break
committor.zero_grad()
| 4,668 | 32.589928 | 181 | py |
tps-torch | tps-torch-main/dimer_solv/ml_test/plotmodel.py | #Import necessarry tools from torch
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader
import torch.nn as nn
#Import necessarry tools from tpstorch
#from brownian_ml import CommittorNet
from committor_nn import CommittorNet, CommittorNetBP, CommittorNetDR
import numpy as np
#Import any other thing
import tqdm, sys
Np = 32
prefix = 'simple'
#Initialize neural net
#committor = CommittorNetDR(d=1,num_nodes=200).to('cpu')
box = [14.736125994561544, 14.736125994561544, 14.736125994561544]
#committor = CommittorNetDR(num_nodes=2500, boxsize=box[0]).to('cpu')
committor = torch.jit.script(CommittorNetBP(num_nodes=200, boxsize=box[0], Np =32, rc=2.5, sigma=1.0).to('cpu'))
committor.load_state_dict(torch.load("ftsus/{}_params_t_631_0".format(prefix)))
#Initialize neural net
def initializer(s):
return (1-s)*start+s*end
#Initialization
r0 = 2**(1/6.0)
width = 0.5*r0
#Reactant
dist_init_start = r0
#Product state
dist_init_end = r0+2*width
#scale down/up the distance of one of the particle dimer
def CubicLattice(dist_init):
state = torch.zeros(Np, 3);
num_spacing = np.ceil(Np**(1/3.0))
spacing_x = box[0]/num_spacing;
spacing_y = box[1]/num_spacing;
spacing_z = box[2]/num_spacing;
count = 0;
id_x = 0;
id_y = 0;
id_z = 0;
while Np > count:
state[int(id_z+id_y*num_spacing+id_x*num_spacing*num_spacing)][0] = spacing_x*id_x-0.5*box[0];
state[int(id_z+id_y*num_spacing+id_x*num_spacing*num_spacing)][1] = spacing_y*id_y-0.5*box[1];
state[int(id_z+id_y*num_spacing+id_x*num_spacing*num_spacing)][2] = spacing_z*id_z-0.5*box[2];
count += 1;
id_z += 1;
if(id_z==num_spacing):
id_z = 0;
id_y += 1;
if(id_y==num_spacing):
id_y = 0;
id_x += 1;
#Compute the pair distance
dx = (state[0]-state[1])
dx = dx-torch.round(dx/box[0])*box[0]
#Re-compute one of the coordinates and shift to origin
state[0] = dx/torch.norm(dx)*dist_init+state[1]
x_com = 0.5*(state[0]+state[1])
for i in range(Np):
state[i] -= x_com
state[i] -= torch.round(state[i]/box[0])*box[0]
return state;
start = CubicLattice(dist_init_start)
end = CubicLattice(dist_init_end)
s = torch.linspace(0,1,100)
x = []
y = []
boxsize = 10.0
for val in s:
r = initializer(val)
dr = r[1]-r[0]
dr -= torch.round(dr/boxsize)*boxsize
dr = torch.norm(dr)#.view(-1,1)
x.append(dr)
y.append(committor(r).item())
#Load exact solution
data = np.loadtxt('committor.txt')
import matplotlib.pyplot as plt
plt.figure(figsize=(5,5))
#Neural net solution vs. exact solution
plt.plot(x,y,'-')
plt.plot(data[:,0],data[:,1],'--')
#plt.savefig('test.png')
#plt.close()
plt.show()
| 2,791 | 27.489796 | 112 | py |
tps-torch | tps-torch-main/dimer_solv/ml_test/dimer_us.py | import sys
sys.path.insert(0,'/global/home/users/muhammad_hasyim/tps-torch/build/')
#Import necessarry tools from torch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.examples.dimer_solv_ml import MyMLEXPSampler
from tpstorch import _rank, _world_size
import numpy as np
from tpstorch.ml.nn import FTSLayerUS
#Import any other thing
import tqdm, sys
# This is a sketch of what it should be like, but basically have to also make committor play nicely
# within function
# have omnious committor part that actual committor overrides?
class DimerUS(MyMLEXPSampler):
def __init__(self,param,config,rank,beta,kappa, mpi_group,output_time, save_config=False):
super(DimerUS, self).__init__(param,config.detach().clone(),rank,beta,kappa, mpi_group)
self.output_time = output_time
self.save_config = save_config
self.timestep = 0
self.torch_config = config
#Save config size and its flattened version
self.config_size = config.size()
self.flattened_size = config.flatten().size()
self.invkT = beta
self.setConfig(config)
self.Np = 30+2
self.dt =0
self.gamma = 0
#Read the local param file to get info on step size and friction constant
with open("param","r") as f:
for line in f:
test = line.strip()
test = test.split()
if (len(test) == 0):
continue
else:
if test[0] == "gamma":
self.gamma = float(test[1])
elif test[0] == "dt":
self.dt = float(test[1])
@torch.no_grad()
def initialize_from_torchconfig(self, config):
# Don't have to worry about all that much all, can just set it
self.setConfig(config.detach().clone())
def computeWForce(self, committor_val, qval):
return -self.kappa*self.torch_config.grad.data*(committor_val-qval)#/self.gamma
def step(self, committor_val, onlytst=False):
with torch.no_grad():
#state_old = self.getConfig().detach().clone()
if onlytst:
self.stepBiased(self.computeWForce(committor_val, 0.5))#state_old.flatten()))
else:
self.stepBiased(self.computeWForce(committor_val, self.qvals[_rank]))#state_old.flatten()))
self.torch_config.requires_grad_()
self.torch_config.grad.data.zero_()
def step_unbiased(self):
with torch.no_grad():
self.stepUnbiased()
self.torch_config.requires_grad_()
try:
self.torch_config.grad.data.zero_()
except:
pass
@torch.no_grad()
def isReactant(self, x = None):
r0 = 2**(1/6.0)
s = 0.25
#Compute the pair distance
if x is None:
if self.getBondLength() <= r0:
return True
else:
return False
else:
if self.getBondLengthConfig(x) <= r0:
return True
else:
return False
@torch.no_grad()
def isProduct(self,x = None):
r0 = 2**(1/6.0)
s = 0.25
if x is None:
if self.getBondLength() >= r0+2*s:
return True
else:
return False
else:
if self.getBondLengthConfig(x) >= r0+2*s:
return True
else:
return False
@torch.no_grad()
def step_bc(self):
state_old = self.getConfig().detach().clone()
self.step_unbiased()
if self.isReactant() or self.isProduct():
pass
else:
#If it's not in the reactant or product state, reset!
self.setConfig(state_old)
def save(self):
self.timestep += 1
if self.save_config and self.timestep % self.output_time == 0:
self.dumpConfig(self.timestep)
| 4,070 | 32.097561 | 107 | py |
tps-torch | tps-torch-main/dimer_solv/ml_test/run_us_sl.py | import sys
sys.path.append("..")
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from dimer_us import DimerUS
from committor_nn import CommittorNetDR
from tpstorch.ml.data import EXPReweightSimulation
from tpstorch.ml.optim import ParallelAdam
from tpstorch.ml.nn import BKELossEXP, CommittorLoss2
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
torch.manual_seed(5070)
np.random.seed(5070)
prefix = 'simple'
Np = 30+2
#Initialize neural net
def initializer(s):
return (1-s)*start+s*end
#Initialization
r0 = 2**(1/6.0)
width = 0.5*r0
#Reactant
dist_init_start = r0
#Product state
dist_init_end = r0+2*width
#scale down/up the distance of one of the particle dimer
box = [14.736125994561544, 14.736125994561544, 14.736125994561544]
def CubicLattice(dist_init):
state = torch.zeros(Np, 3);
num_spacing = np.ceil(Np**(1/3.0))
spacing_x = box[0]/num_spacing;
spacing_y = box[1]/num_spacing;
spacing_z = box[2]/num_spacing;
count = 0;
id_x = 0;
id_y = 0;
id_z = 0;
while Np > count:
state[int(id_z+id_y*num_spacing+id_x*num_spacing*num_spacing)][0] = spacing_x*id_x-0.5*box[0];
state[int(id_z+id_y*num_spacing+id_x*num_spacing*num_spacing)][1] = spacing_y*id_y-0.5*box[1];
state[int(id_z+id_y*num_spacing+id_x*num_spacing*num_spacing)][2] = spacing_z*id_z-0.5*box[2];
count += 1;
id_z += 1;
if(id_z==num_spacing):
id_z = 0;
id_y += 1;
if(id_y==num_spacing):
id_y = 0;
id_x += 1;
#Compute the pair distance
dx = (state[0]-state[1])
dx = dx-torch.round(dx/box[0])*box[0]
#Re-compute one of the coordinates and shift to origin
state[0] = dx/torch.norm(dx)*dist_init+state[1]
x_com = 0.5*(state[0]+state[1])
for i in range(Np):
state[i] -= x_com
state[i] -= torch.round(state[i]/box[0])*box[0]
return state;
start = CubicLattice(dist_init_start)
end = CubicLattice(dist_init_end)
initial_config = initializer(rank/(world_size-1))
#Initialize neural net
committor = CommittorNetDR(num_nodes=2500, boxsize=10).to('cpu')
kappa= 600#10
#Initialize the string for FTS method
#Load the pre-initialized neural network and string
committor.load_state_dict(torch.load("../initial_1hl_nn"))
kT = 1.0
n_boundary_samples = 100
batch_size = 8
period = 25
dimer_sim_bc = DimerUS(param="param_bc",config=initial_config.clone().detach(), rank=rank, beta=1/kT, kappa = 0.0, save_config=False, mpi_group = mpi_group, output_time=batch_size*period)
dimer_sim = DimerUS(param="param",config=initial_config.clone().detach(), rank=rank, beta=1/kT, kappa = kappa, save_config=True, mpi_group = mpi_group, output_time=batch_size*period)
dimer_sim_com = DimerUS(param="param",config=initial_config.clone().detach(), rank=rank, beta=1/kT, kappa = 0.0,save_config=True, mpi_group = mpi_group, output_time=batch_size*period)
#Construct FTSSimulation
datarunner = EXPReweightSimulation(dimer_sim, committor, period=period, batch_size=batch_size, dimN=Np*3)
#Initialize main loss function and optimizers
#Optimizer, doing EXP Reweighting. We can do SGD (integral control), or Heavy-Ball (PID control)
loss = BKELossEXP( bc_sampler = dimer_sim_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 100,
bc_period = 10,
batch_size_bc = 0.5,
)
cmloss = CommittorLoss2( cl_sampler = dimer_sim_com,
committor = committor,
lambda_cl=100.0,
cl_start=10,
cl_end=200,
cl_rate=10,
cl_trials=50,
batch_size_cl=0.5
)
lambda_cl_end = 10**3
cl_start=200
cl_end=10000
cl_stepsize = (lambda_cl_end-cmloss.lambda_cl)/(cl_end-cl_start)
loss_io = []
loss_io = open("{}_statistic_{}.txt".format(prefix,rank+1),'w')
#Training loop
optimizer = ParallelAdam(committor.parameters(), lr=1.5e-3)
#We can train in terms of epochs, but we will keep it in one epoch
for epoch in range(1):
if rank == 0:
print("epoch: [{}]".format(epoch+1))
for i in tqdm.tqdm(range(10000)):#20000)):
# get data and reweighting factors
if (i > cl_start) and (i <= cl_end):
cmloss.lambda_cl += cl_stepsize
elif i > cl_end:
cmloss.lambda_cl = lambda_cl_end
config, grad_xs, invc, fwd_wl, bwrd_wl = datarunner.runSimulation()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
# forward + backward + optimize
bkecost = loss(grad_xs,invc,fwd_wl,bwrd_wl)
cmcost = cmloss(i, dimer_sim.getConfig())
cost = bkecost+cmcost
cost.backward()
optimizer.step()
# print statistics
with torch.no_grad():
#if counter % 10 == 0:
main_loss = loss.main_loss
cm_loss = cmloss.cl_loss
bc_loss = loss.bc_loss
loss_io.write('{:d} {:.5E} {:.5E} {:.5E} \n'.format(i+1,main_loss.item(),bc_loss.item(),cm_loss.item()))
loss_io.flush()
#Print statistics
if rank == 0:
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,i,rank))
| 5,785 | 32.062857 | 187 | py |
tps-torch | tps-torch-main/dimer_solv/ml_test/dimer_fts_nosolv.py | import sys
sys.path.insert(0,'/global/home/users/muhammad_hasyim/tps-torch/build/')
import scipy.spatial
#Import necessarry tools from torch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.examples.dimer_solv_ml import MyMLFTSSampler
from tpstorch import _rank, _world_size
import numpy as np
from tpstorch.ml.nn import FTSLayer
#Import any other thing
import tqdm, sys
#This function only assumes that the string consists of the dimer without solvent particles
@torch.no_grad()
def dimer_reorient(vec,x,boxsize):
Np = 2
##(1) Pre-processing so that dimer is at the center
old_x = x.view(2,3).clone()
#Compute the pair distance
dx = (old_x[0]-old_x[1])
dx = dx-torch.round(dx/boxsize)*boxsize
#Re-compute one of the coordinates and shift to origin
old_x[0] = dx+old_x[1]
x_com = 0.5*(old_x[0]+old_x[1])
new_vec = vec.view(Np,3).clone()
#Compute the pair distance
ds = (new_vec[0]-new_vec[1])
ds = ds-torch.round(ds/boxsize)*boxsize
#Re-compute one of the coordinates and shift to origin
new_vec[0] = ds+new_vec[1]
s_com = 0.5*(new_vec[0]+new_vec[1])
for i in range(Np):
old_x[i] -= x_com
new_vec[i] -= s_com
old_x[i] -= torch.round(old_x[i]/boxsize)*boxsize
new_vec[i] -= torch.round(new_vec[i]/boxsize)*boxsize
##(2) Rotate the system using Kabsch algorithm
weights = np.ones(Np)
rotate,rmsd = scipy.spatial.transform.Rotation.align_vectors(new_vec.numpy(),old_x.numpy(), weights=weights)
for i in range(Np):
old_x[i] = torch.tensor(rotate.apply(old_x[i].numpy()))
old_x[i] -= torch.round(old_x[i]/boxsize)*boxsize
return old_x.flatten()
class FTSLayerCustom(FTSLayer):
r""" A linear layer, where the paramaters correspond to the string obtained by the
general FTS method. Customized to take into account rotational and translational symmetry of the dimer problem
Args:
react_config (torch.Tensor): starting configuration in the reactant basin.
prod_config (torch.Tensor): starting configuration in the product basin.
"""
def __init__(self, react_config, prod_config, num_nodes,boxsize,num_particles):
super(FTSLayerCustom,self).__init__(react_config, prod_config, num_nodes)
self.boxsize = boxsize
self.Np = num_particles
@torch.no_grad()
def compute_metric(self,x):
##(1) Pre-processing so that dimer is at the center
old_x = x.view(32,3)[:2].clone()
#Compute the pair distance
dx = (old_x[0]-old_x[1])
dx = dx-torch.round(dx/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
old_x[0] = dx+old_x[1]
x_com = 0.5*(old_x[0]+old_x[1])
#Do the same thing to the string configurations
new_string = self.string.view(_world_size, self.Np,3).clone()
#Compute the pair distance
ds = (new_string[:,0]-new_string[:,1])
ds = ds-torch.round(ds/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_string[:,0] = ds+new_string[:,1]
s_com = 0.5*(new_string[:,0]+new_string[:,1])
for i in range(self.Np):
old_x[i] -= x_com
new_string[:,i] -= s_com
old_x[i] -= torch.round(old_x[i]/self.boxsize)*self.boxsize
new_string[:,i] -= torch.round(new_string[:,i]/self.boxsize)*self.boxsize
##(2) Rotate the system using Kabsch algorithm
dist_sq_list = torch.zeros(_world_size)
weights = np.ones(self.Np)#np.ones(self.Np)/(self.Np-2)
for i in range(_world_size):
_, rmsd = scipy.spatial.transform.Rotation.align_vectors(old_x.numpy(), new_string[i].numpy(),weights=weights)
dist_sq_list[i] = rmsd**2
return dist_sq_list
#WARNING! Needs to be changed
#Only testing if configurations are constrained properly
@torch.no_grad()
def forward(self,x):
##(1) Pre-processing so that dimer is at the center
old_x = x.view(32,3)[:2].clone()
#Compute the pair distance
dx = (old_x[0]-old_x[1])
dx = dx-torch.round(dx/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
old_x[0] = dx+old_x[1]
x_com = 0.5*(old_x[0]+old_x[1])
#Do the same thing to the string configurations
new_string = self.string[_rank].view(self.Np,3).clone()
#Compute the pair distance
ds = (new_string[0]-new_string[1])
ds = ds-torch.round(ds/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_string[0] = ds+new_string[1]
s_com = 0.5*(new_string[0]+new_string[1])
for i in range(self.Np):
old_x[i] -= x_com
new_string[i] -= s_com
old_x[i] -= torch.round(old_x[i]/self.boxsize)*self.boxsize
new_string[i] -= torch.round(new_string[i]/self.boxsize)*self.boxsize
##(2) Rotate the system using Kabsch algorithm
dist_sq_list = torch.zeros(_world_size)
weights = np.ones(self.Np)
_, rmsd = scipy.spatial.transform.Rotation.align_vectors(old_x.numpy(), new_string.numpy(),weights=weights)
return rmsd**2
class DimerFTS(MyMLFTSSampler):
def __init__(self,param,config,rank,beta,mpi_group,ftslayer,output_time, save_config=False):
super(DimerFTS, self).__init__(param,config.detach().clone(),rank,beta,mpi_group)
self.output_time = output_time
self.save_config = save_config
self.timestep = 0
self.torch_config = config
#Save config size and its flattened version
self.config_size = config.size()
self.flattened_size = config.flatten().size()
self.invkT = beta
self.ftslayer = ftslayer
self.setConfig(config)
self.Np = 30+2
@torch.no_grad()
def initialize_from_torchconfig(self, config):
# Don't have to worry about all that much all, can just set it
self.setConfig(config.detach().clone())
@torch.no_grad()
def reset(self):
self.steps = 0
self.computeMetric()
inftscell = self.checkFTSCell(_rank, _world_size)
if inftscell:
pass
else:
#This teleports the dimer to the origin. But it should be okay?
#Going to set config so that the dimer is that of the string, but rotated in frame
state_old = self.getConfig().detach().clone()
#state_old[:2] = self.ftslayer.string[_rank].view(2,3).detach().clone()
string_old = self.ftslayer.string[_rank].view(2,3).detach().clone()
distance = torch.abs(string_old[1,2]-string_old[0,2])
dx = state_old[0]-state_old[1]
boxsize = self.ftslayer.boxsize
dx = dx-torch.round(dx/boxsize)*boxsize
distance_ref = torch.norm(dx)
dx_norm = dx/distance_ref
mod_dist = 0.5*(distance_ref-distance)
state_old[0] = state_old[0]-mod_dist*dx_norm
state_old[1] = state_old[1]+mod_dist*dx_norm
state_old[0] -= torch.round(state_old[0]/boxsize)*boxsize
state_old[1] -= torch.round(state_old[1]/boxsize)*boxsize
self.setConfig(state_old)
def computeMetric(self):
self.distance_sq_list = self.ftslayer.compute_metric(self.getConfig().flatten())
@torch.no_grad()
def step(self):
state_old = self.getConfig().detach().clone()
self.stepUnbiased()
self.computeMetric()
inftscell = self.checkFTSCell(_rank, _world_size)
if inftscell:
pass
else:
self.setConfig(state_old)
self.torch_config.requires_grad_()
try:
self.torch_config.grad.data.zero_()
except:
pass
def step_unbiased(self):
with torch.no_grad():
self.stepUnbiased()
self.torch_config.requires_grad_()
try:
self.torch_config.grad.data.zero_()
except:
pass
@torch.no_grad()
def isReactant(self, x = None):
r0 = 2**(1/6.0)
s = 0.25
#Compute the pair distance
if x is None:
if self.getBondLength() <= r0:
return True
else:
return False
else:
if self.getBondLengthConfig(x) <= r0:
return True
else:
return False
@torch.no_grad()
def isProduct(self,x = None):
r0 = 2**(1/6.0)
s = 0.25
if x is None:
if self.getBondLength() >= r0+2*s:
return True
else:
return False
else:
if self.getBondLengthConfig(x) >= r0+2*s:
return True
else:
return False
def step_bc(self):
with torch.no_grad():
state_old = self.getConfig().detach().clone()
self.step_unbiased()
if self.isReactant() or self.isProduct():
pass
else:
#If it's not in the reactant or product state, reset!
self.setConfig(state_old)
def save(self):
self.timestep += 1
if self.save_config and self.timestep % self.output_time == 0:
self.dumpConfig(self.timestep)
| 9,628 | 36.034615 | 122 | py |
tps-torch | tps-torch-main/dimer_solv/ml_test/committor_nn.py | #Import necessarry tools from torch
import torch
import torch.nn as nn
import numpy as np
#Import any other thing
import tqdm, sys
# SchNet imports
from typing import Optional
import os
import warnings
import os.path as osp
from math import pi as PI
import torch.nn.functional as F
from torch.nn import Embedding, Sequential, Linear, ModuleList
from torch_scatter import scatter
from torch_geometric.data.makedirs import makedirs
from torch_geometric.data import download_url, extract_zip, Dataset
from torch_geometric.nn import radius_graph, MessagePassing
#Initialize neural net
def initializer(s, start, end):
return (1-s)*start+s*end
def CubicLattice(dist_init, box, Np):
state = torch.zeros(Np, 3);
num_spacing = np.ceil(Np**(1/3.0))
spacing_x = box[0]/num_spacing;
spacing_y = box[1]/num_spacing;
spacing_z = box[2]/num_spacing;
count = 0;
id_x = 0;
id_y = 0;
id_z = 0;
while Np > count:
state[int(id_z+id_y*num_spacing+id_x*num_spacing*num_spacing)][0] = spacing_x*id_x-0.5*box[0];
state[int(id_z+id_y*num_spacing+id_x*num_spacing*num_spacing)][1] = spacing_y*id_y-0.5*box[1];
state[int(id_z+id_y*num_spacing+id_x*num_spacing*num_spacing)][2] = spacing_z*id_z-0.5*box[2];
count += 1;
id_z += 1;
if(id_z==num_spacing):
id_z = 0;
id_y += 1;
if(id_y==num_spacing):
id_y = 0;
id_x += 1;
#Compute the pair distance
dx = (state[0]-state[1])
dx = dx-torch.round(dx/box[0])*box[0]
#Re-compute one of the coordinates and shift to origin
state[0] = dx/torch.norm(dx)*dist_init+state[1]
x_com = 0.5*(state[0]+state[1])
for i in range(Np):
state[i] -= x_com
state[i] -= torch.round(state[i]/box[0])*box[0]
return state;
def initializeConfig(s, r0, width, boxsize, Np):
#Reactant
dist_init_start = r0
#Product state
dist_init_end = r0+2*width
start = CubicLattice(dist_init_start, boxsize, Np)
end = CubicLattice(dist_init_end, boxsize, Np)
return start, end, initializer(s, start, end)
class CommittorNet(nn.Module):
def __init__(self, d, num_nodes, unit=torch.relu):
super(CommittorNet, self).__init__()
self.num_nodes = num_nodes
self.d = d
self.unit = unit
self.lin1 = nn.Linear(d, num_nodes, bias=True)
self.lin2 = nn.Linear(num_nodes, 1, bias=False)
def forward(self, x):
#X needs to be flattened
x = x.view(-1,6)
x = self.lin1(x)
x = self.unit(x)
x = self.lin2(x)
return torch.sigmoid(x)
class CommittorNetDR(nn.Module):
def __init__(self, num_nodes, boxsize, unit=torch.relu):
super(CommittorNetDR, self).__init__()
self.num_nodes = num_nodes
self.unit = unit
self.lin1 = nn.Linear(1, num_nodes, bias=True)
self.lin2 = nn.Linear(num_nodes, 1, bias=False)
self.boxsize = boxsize
def forward(self, x):
#Need to compute pair distance
#By changing the view from flattened to 2 by x array
x = x.view(-1,32,3)
dx = x[:,0]-x[:,1]
dx -= torch.round(dx/self.boxsize)*self.boxsize
dx = torch.norm(dx,dim=1).view(-1,1)
#Feed it to one hidden layer
x = self.lin1(dx)
x = self.unit(x)
x = self.lin2(x)
return torch.sigmoid(x)
class CommittorNetBP(nn.Module):
def __init__(self, num_nodes, boxsize, Np, rc, sigma, unit=torch.relu):
super(CommittorNetBP, self).__init__()
self.num_nodes = num_nodes
self.unit = unit
self.Np = Np
self.rc = rc
self.factor = 1/(sigma**2)
self.lin1 = nn.Linear(Np, num_nodes, bias=True)
self.lin2 = nn.Linear(num_nodes, 1, bias=False)
self.boxsize = boxsize
def forward(self, x):
PI = 3.1415927410125732
x = x.view(-1,self.Np,3)
#Create input array with shape batch_size x # of particles
inputt = torch.zeros((x.shape[0],self.Np))
count = 0
for i in range(self.Np):
for j in range(self.Np):
#Compute pairwise distance
if i != j:
dx = x[:,j]-x[:,i]
dx -= torch.round(dx/self.boxsize)*self.boxsize
dx = torch.norm(dx,dim=1)#.view(-1,1)
#Compute inputt per sample in batch
for k, val in enumerate(dx):
if val < self.rc:
inputt[k,i] += torch.exp(-self.factor*val**2)*0.5*(torch.cos(PI*val/self.rc)+1)
#Feed it to one hidden layer
x = self.lin1(inputt)
x = self.unit(x)
x = self.lin2(x)
return torch.sigmoid(x)
class CommittorNetTwoHidden(nn.Module):
def __init__(self, d, num_nodes, unit=torch.relu):
super(CommittorNetTwoHidden, self).__init__()
self.num_nodes = num_nodes
self.d = d
self.unit = unit
self.lin1 = nn.Linear(d, num_nodes, bias=True)
self.lin3 = nn.Linear(num_nodes, num_nodes, bias=True)
self.lin2 = nn.Linear(num_nodes, 1, bias=False)
def forward(self, x):
x = self.lin1(x)
x = self.unit(x)
x = self.lin3(x)
x = self.unit(x)
x = self.lin2(x)
return torch.sigmoid(x)
class SchNet(torch.nn.Module):
r"""The continuous-filter convolutional neural network SchNet from the
`"SchNet: A Continuous-filter Convolutional Neural Network for Modeling
Quantum Interactions" <https://arxiv.org/abs/1706.08566>`_ paper that uses
the interactions blocks of the form
.. math::
\mathbf{x}^{\prime}_i = \sum_{j \in \mathcal{N}(i)} \mathbf{x}_j \odot
h_{\mathbf{\Theta}} ( \exp(-\gamma(\mathbf{e}_{j,i} - \mathbf{\mu}))),
here :math:`h_{\mathbf{\Theta}}` denotes an MLP and
:math:`\mathbf{e}_{j,i}` denotes the interatomic distances between atoms.
.. note::
For an example of using a pretrained SchNet variant, see
`examples/qm9_pretrained_schnet.py
<https://github.com/pyg-team/pytorch_geometric/blob/master/examples/
qm9_pretrained_schnet.py>`_.
Args:
hidden_channels (int, optional): Hidden embedding size.
(default: :obj:`128`)
num_filters (int, optional): The number of filters to use.
(default: :obj:`128`)
num_interactions (int, optional): The number of interaction blocks.
(default: :obj:`6`)
num_gaussians (int, optional): The number of gaussians :math:`\mu`.
(default: :obj:`50`)
cutoff (float, optional): Cutoff distance for interatomic interactions.
(default: :obj:`10.0`)
max_num_neighbors (int, optional): The maximum number of neighbors to
collect for each node within the :attr:`cutoff` distance.
(default: :obj:`32`)
readout (string, optional): Whether to apply :obj:`"add"` or
:obj:`"mean"` global aggregation. (default: :obj:`"add"`)
dipole (bool, optional): If set to :obj:`True`, will use the magnitude
of the dipole moment to make the final prediction, *e.g.*, for
target 0 of :class:`torch_geometric.datasets.QM9`.
(default: :obj:`False`)
mean (float, optional): The mean of the property to predict.
(default: :obj:`None`)
std (float, optional): The standard deviation of the property to
predict. (default: :obj:`None`)
atomref (torch.Tensor, optional): The reference of single-atom
properties.
Expects a vector of shape :obj:`(max_atomic_number, )`.
"""
url = 'http://www.quantum-machine.org/datasets/trained_schnet_models.zip'
def __init__(self, hidden_channels: int = 128, num_filters: int = 128,
num_interactions: int = 6, num_gaussians: int = 50,
cutoff: float = 10.0, max_num_neighbors: int = 32,
readout: str = 'add', dipole: bool = False,
mean: Optional[float] = None, std: Optional[float] = None,
atomref: Optional[torch.Tensor] = None, boxsize: float = 5.0,
Np: int = 32, dim: int = 3):
super().__init__()
self.hidden_channels = hidden_channels
self.num_filters = num_filters
self.num_interactions = num_interactions
self.num_gaussians = num_gaussians
self.cutoff = cutoff
self.max_num_neighbors = max_num_neighbors
self.readout = readout
self.dipole = dipole
self.readout = 'add' if self.dipole else self.readout
self.mean = mean
self.std = std
self.scale = None
self.boxsize = boxsize
self.Np = Np
self.dim = dim
atomic_mass = torch.from_numpy(np.array([1,2]))
self.register_buffer('atomic_mass', atomic_mass)
self.embedding = Embedding(2, hidden_channels)
self.distance_expansion = GaussianSmearing(0.0, cutoff, num_gaussians)
self.interactions = ModuleList()
for _ in range(num_interactions):
block = InteractionBlock(hidden_channels, num_gaussians,
num_filters, cutoff)
self.interactions.append(block)
self.lin1 = Linear(hidden_channels, hidden_channels // 2)
self.act = ShiftedSoftplus()
self.lin2 = Linear(hidden_channels // 2, 1)
self.register_buffer('initial_atomref', atomref)
self.atomref = None
if atomref is not None:
self.atomref = Embedding(100, 1)
self.atomref.weight.data.copy_(atomref)
self.reset_parameters()
def reset_parameters(self):
self.embedding.reset_parameters()
for interaction in self.interactions:
interaction.reset_parameters()
torch.nn.init.xavier_uniform_(self.lin1.weight)
self.lin1.bias.data.fill_(0)
torch.nn.init.xavier_uniform_(self.lin2.weight)
self.lin2.bias.data.fill_(0)
if self.atomref is not None:
self.atomref.weight.data.copy_(self.initial_atomref)
def forward(self, pos):
""""""
pos = pos.view(-1,self.dim)
total_positions = pos.size(dim=0)
num_configs = total_positions//self.Np
z = torch.zeros((total_positions,1), dtype=torch.int)
z = z.view(-1,self.Np)
z[:,0] = 1
z[:,1] = 1
z = z.view(-1)
batch = torch.zeros(int(num_configs*self.Np), dtype=torch.int64)
for i in range(num_configs):
batch[(self.Np*i):(self.Np*(i+1))] = i
h = self.embedding(z)
edge_index = radius_graph(pos, r=2*self.boxsize, batch=batch,
max_num_neighbors=self.max_num_neighbors)
row, col = edge_index
dx = pos[row] - pos[col]
dx = dx-torch.round(dx/self.boxsize)*self.boxsize
edge_weight = (dx).norm(dim=-1)
edge_attr = self.distance_expansion(edge_weight)
for interaction in self.interactions:
h = h + interaction(h, edge_index, edge_weight, edge_attr)
h = self.lin1(h)
h = self.act(h)
h = self.lin2(h)
if self.dipole:
# Get center of mass.
mass = self.atomic_mass[z].view(-1, 1)
c = scatter(mass * pos, batch, dim=0) / scatter(mass, batch, dim=0)
h = h * (pos - c.index_select(0, batch))
if not self.dipole and self.mean is not None and self.std is not None:
h = h * self.std + self.mean
if not self.dipole and self.atomref is not None:
h = h + self.atomref(z)
out = scatter(h, batch, dim=0, reduce=self.readout)
if self.dipole:
out = torch.norm(out, dim=-1, keepdim=True)
if self.scale is not None:
out = self.scale * out
return torch.sigmoid(out)
def __repr__(self):
return (f'{self.__class__.__name__}('
f'hidden_channels={self.hidden_channels}, '
f'num_filters={self.num_filters}, '
f'num_interactions={self.num_interactions}, '
f'num_gaussians={self.num_gaussians}, '
f'cutoff={self.cutoff})')
class InteractionBlock(torch.nn.Module):
def __init__(self, hidden_channels, num_gaussians, num_filters, cutoff):
super().__init__()
self.mlp = Sequential(
Linear(num_gaussians, num_filters),
ShiftedSoftplus(),
Linear(num_filters, num_filters),
)
self.conv = CFConv(hidden_channels, hidden_channels, num_filters,
self.mlp, cutoff)
self.act = ShiftedSoftplus()
self.lin = Linear(hidden_channels, hidden_channels)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.mlp[0].weight)
self.mlp[0].bias.data.fill_(0)
torch.nn.init.xavier_uniform_(self.mlp[2].weight)
self.mlp[2].bias.data.fill_(0)
self.conv.reset_parameters()
torch.nn.init.xavier_uniform_(self.lin.weight)
self.lin.bias.data.fill_(0)
def forward(self, x, edge_index, edge_weight, edge_attr):
x = self.conv(x, edge_index, edge_weight, edge_attr)
x = self.act(x)
x = self.lin(x)
return x
class CFConv(MessagePassing):
def __init__(self, in_channels, out_channels, num_filters, nn, cutoff):
super().__init__(aggr='add')
self.lin1 = Linear(in_channels, num_filters, bias=False)
self.lin2 = Linear(num_filters, out_channels)
self.nn = nn
self.cutoff = cutoff
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.lin1.weight)
torch.nn.init.xavier_uniform_(self.lin2.weight)
self.lin2.bias.data.fill_(0)
def forward(self, x, edge_index, edge_weight, edge_attr):
C = 0.5 * (torch.cos(edge_weight * PI / self.cutoff) + 1.0)
C *= (edge_weight < self.cutoff).float()
W = self.nn(edge_attr) * C.view(-1, 1)
x = self.lin1(x)
x = self.propagate(edge_index, x=x, W=W)
x = self.lin2(x)
return x
def message(self, x_j, W):
return x_j * W
class GaussianSmearing(torch.nn.Module):
def __init__(self, start=0.0, stop=5.0, num_gaussians=50):
super().__init__()
offset = torch.linspace(start, stop, num_gaussians)
self.coeff = -0.5 / (offset[1] - offset[0]).item()**2
self.register_buffer('offset', offset)
def forward(self, dist):
dist = dist.view(-1, 1) - self.offset.view(1, -1)
return torch.exp(self.coeff * torch.pow(dist, 2))
class ShiftedSoftplus(torch.nn.Module):
def __init__(self):
super().__init__()
self.shift = torch.log(torch.tensor(2.0)).item()
def forward(self, x):
return F.softplus(x) - self.shift
| 15,044 | 34.736342 | 107 | py |
tps-torch | tps-torch-main/dimer_solv/ml_test/dimer_ftsus_nosolv.py | import sys
sys.path.insert(0,'/global/home/users/muhammad_hasyim/tps-torch/build/')
import scipy.spatial
#Import necessarry tools from torch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.examples.dimer_solv_ml import MyMLEXPStringSampler
from tpstorch import _rank, _world_size
import numpy as np
from tpstorch.ml.nn import FTSLayerUS
#Import any other thing
import tqdm, sys
#This function only assumes that the string consists of the dimer without solvent particles
@torch.no_grad()
def dimer_reorient(vec,x,boxsize):
Np = 2
##(1) Pre-processing so that dimer is at the center
old_x = x.view(2,3).clone()
#Compute the pair distance
dx = (old_x[0]-old_x[1])
dx = dx-torch.round(dx/boxsize)*boxsize
#Re-compute one of the coordinates and shift to origin
old_x[0] = dx+old_x[1]
x_com = 0.5*(old_x[0]+old_x[1])
new_vec = vec.view(Np,3).clone()
#Compute the pair distance
ds = (new_vec[0]-new_vec[1])
ds = ds-torch.round(ds/boxsize)*boxsize
#Re-compute one of the coordinates and shift to origin
new_vec[0] = ds+new_vec[1]
s_com = 0.5*(new_vec[0]+new_vec[1])
for i in range(Np):
old_x[i] -= x_com
new_vec[i] -= s_com
old_x[i] -= torch.round(old_x[i]/boxsize)*boxsize
new_vec[i] -= torch.round(new_vec[i]/boxsize)*boxsize
##(2) Rotate the system using Kabsch algorithm
weights = np.ones(Np)
rotate,rmsd = scipy.spatial.transform.Rotation.align_vectors(new_vec.numpy(),old_x.numpy(), weights=weights)
for i in range(Np):
old_x[i] = torch.tensor(rotate.apply(old_x[i].numpy()))
old_x[i] -= torch.round(old_x[i]/boxsize)*boxsize
return old_x.flatten()
class FTSLayerUSCustom(FTSLayerUS):
r""" A linear layer, where the paramaters correspond to the string obtained by the
general FTS method. Customized to take into account rotational and translational symmetry of the dimer problem
Args:
react_config (torch.Tensor): starting configuration in the reactant basin.
prod_config (torch.Tensor): starting configuration in the product basin.
"""
def __init__(self, react_config, prod_config, num_nodes,boxsize,kappa_perpend, kappa_parallel, num_particles):
super(FTSLayerUSCustom,self).__init__(react_config, prod_config, num_nodes, kappa_perpend, kappa_parallel)
self.boxsize = boxsize
self.Np = num_particles
@torch.no_grad()
def compute_metric(self,x):
##(1) Pre-processing so that dimer is at the center
old_x = x.view(32,3)[:2].clone()
#Compute the pair distance
dx = (old_x[0]-old_x[1])
dx = dx-torch.round(dx/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
old_x[0] = dx+old_x[1]
x_com = 0.5*(old_x[0]+old_x[1])
#Do the same thing to the string configurations
new_string = self.string.view(_world_size, self.Np,3).clone()
#Compute the pair distance
ds = (new_string[:,0]-new_string[:,1])
ds = ds-torch.round(ds/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_string[:,0] = ds+new_string[:,1]
s_com = 0.5*(new_string[:,0]+new_string[:,1])
for i in range(self.Np):
old_x[i] -= x_com
new_string[:,i] -= s_com
old_x[i] -= torch.round(old_x[i]/self.boxsize)*self.boxsize
new_string[:,i] -= torch.round(new_string[:,i]/self.boxsize)*self.boxsize
##(2) Rotate the system using Kabsch algorithm
dist_sq_list = torch.zeros(_world_size)
#weights = np.ones(self.Np)/(self.Np-2)
weights = np.zeros(self.Np)#np.ones(self.Np)/(self.Np-2)
weights[0] = 1.0
weights[1] = 1.0
for i in range(_world_size):
_, rmsd = scipy.spatial.transform.Rotation.align_vectors(old_x.numpy(), new_string[i].numpy(),weights=weights)
dist_sq_list[i] = rmsd**2
return dist_sq_list
@torch.no_grad()
def compute_umbrellaforce(self,x):
##(1) Pre-processing so that dimer is at the center
old_x = x.view(32,3)[:2].clone()
#Compute the pair distance
dx = (old_x[0]-old_x[1])
dx = dx-torch.round(dx/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
old_x[0] = dx+old_x[1]
x_com = 0.5*(old_x[0]+old_x[1])
#Do the same thing to the string configurations
new_string = self.string[_rank].view(self.Np,3).clone()
#Compute the pair distance
ds = (new_string[0]-new_string[1])
ds = ds-torch.round(ds/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_string[0] = ds+new_string[1]
s_com = 0.5*(new_string[0]+new_string[1])
for i in range(self.Np):
old_x[i] -= x_com
new_string[i] -= s_com
old_x[i] -= torch.round(old_x[i]/self.boxsize)*self.boxsize
new_string[i] -= torch.round(new_string[i]/self.boxsize)*self.boxsize
##(2) Rotate the system using Kabsch algorithm
dist_sq_list = torch.zeros(_world_size)
weights = np.zeros(self.Np)
weights[0] = 1.0
weights[1] = 1.0
rotate, _ = scipy.spatial.transform.Rotation.align_vectors(old_x.numpy(), new_string.numpy(),weights=weights)
force = torch.zeros_like(old_x)
dX = torch.zeros_like(old_x)
for i in range(self.Np):
new_string[i] = torch.tensor(rotate.apply(new_string[i].numpy()))
dX[i] = old_x[i]-new_string[i]
dX[i] -= torch.round(dX[i]/self.boxsize)*self.boxsize
force[i] += -self.kappa_perpend*dX[i]
tangent_dx = torch.dot(self.tangent[_rank],dX.flatten())
force += -(self.kappa_parallel-self.kappa_perpend)*self.tangent[_rank].view(2,3)*tangent_dx
allforce = torch.zeros(32*3)
allforce[:6] = force.flatten()
return allforce
def forward(self,x):
##(1) Pre-processing so that dimer is at the center
old_x = x.view(32,3)[:2].clone()
#Compute the pair distance
dx = (old_x[0]-old_x[1])
dx = dx-torch.round(dx/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
old_x[0] = dx+old_x[1]
x_com = 0.5*(old_x[0]+old_x[1])
#Do the same thing to the string configurations
new_string = self.string[_rank].view(self.Np,3).clone()
#Compute the pair distance
ds = (new_string[0]-new_string[1])
ds = ds-torch.round(ds/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_string[0] = ds+new_string[1]
s_com = 0.5*(new_string[0]+new_string[1])
for i in range(self.Np):
old_x[i] -= x_com
new_string[i] -= s_com
old_x[i] -= torch.round(old_x[i]/self.boxsize)*self.boxsize
new_string[i] -= torch.round(new_string[i]/self.boxsize)*self.boxsize
##(2) Rotate the system using Kabsch algorithm
dist_sq_list = torch.zeros(_world_size)
weights = np.zeros(self.Np)
weights[0] = 1.0
weights[1] = 1.0
_, rmsd = scipy.spatial.transform.Rotation.align_vectors(old_x.numpy(), new_string.numpy(),weights=weights)
dist_sq = rmsd**2
dX = torch.zeros_like(new_x)
for i in range(self.Np):
old_x[i] = torch.tensor(rotate.apply(old_x[i].numpy()))
dX[i] = old_x[i]-new_string[i]
dX[i] -= torch.round(dX[i]/self.boxsize)*self.boxsize
tangent_dx = torch.dot(self.tangent[_rank],dX.flatten())
return dist_sq+(self.kappa_parallel-self.kappa_perpend)*tangent_dx**2/self.kappa_perpend
# This is a sketch of what it should be like, but basically have to also make committor play nicely
# within function
# have omnious committor part that actual committor overrides?
class DimerFTSUS(MyMLEXPStringSampler):
def __init__(self,param,config,rank,beta,kappa, mpi_group,ftslayer,output_time, save_config=False):
super(DimerFTSUS, self).__init__(param,config.detach().clone(),rank,beta,kappa, mpi_group)
self.output_time = output_time
self.save_config = save_config
self.timestep = 0
self.torch_config = config
#Save config size and its flattened version
self.config_size = config.size()
self.flattened_size = config.flatten().size()
self.invkT = beta
self.ftslayer = ftslayer
#tconfig = ftslayer.string[_rank].view(2,3).detach().clone()
#tconfig.requires_grad = False
self.setConfig(config)
self.Np = 30+2
@torch.no_grad()
def initialize_from_torchconfig(self, config):
# Don't have to worry about all that much all, can just set it
self.setConfig(config.detach().clone())
@torch.no_grad()
def reset(self):
self.steps = 0
self.computeMetric()
inftscell = self.checkFTSCell(_rank, _world_size)
if inftscell:
pass
else:
#This teleports the dimer to the origin. But it should be okay?
#Going to set config so that the dimer is that of the string, but rotated in frame
state_old = self.getConfig().detach().clone()
#state_old[:2] = self.ftslayer.string[_rank].view(2,3).detach().clone()
string_old = self.ftslayer.string[_rank].view(2,3).detach().clone()
distance = torch.abs(string_old[1,2]-string_old[0,2])
dx = state_old[0]-state_old[1]
boxsize = self.ftslayer.boxsize
dx = dx-torch.round(dx/boxsize)*boxsize
distance_ref = torch.norm(dx)
dx_norm = dx/distance_ref
mod_dist = 0.5*(distance_ref-distance)
state_old[0] = state_old[0]-mod_dist*dx_norm
state_old[1] = state_old[1]+mod_dist*dx_norm
state_old[0] -= torch.round(state_old[0]/boxsize)*boxsize
state_old[1] -= torch.round(state_old[1]/boxsize)*boxsize
self.setConfig(state_old)
def computeMetric(self):
self.distance_sq_list = self.ftslayer.compute_metric(self.getConfig().flatten())
def computeWForce(self,x):
return self.ftslayer.compute_umbrellaforce(x)
@torch.no_grad()
def step(self):
state_old = self.getConfig().detach().clone()
self.stepBiased(self.computeWForce(state_old.flatten()))
self.torch_config.requires_grad_()
try:
self.torch_config.grad.data.zero_()
except:
pass
def step_unbiased(self):
with torch.no_grad():
self.stepUnbiased()
self.torch_config.requires_grad_()
try:
self.torch_config.grad.data.zero_()
except:
pass
@torch.no_grad()
def isReactant(self, x = None):
r0 = 2**(1/6.0)
s = 0.25
#Compute the pair distance
if x is None:
if self.getBondLength() <= r0:
return True
else:
return False
else:
if self.getBondLengthConfig(x) <= r0:
return True
else:
return False
@torch.no_grad()
def isProduct(self,x = None):
r0 = 2**(1/6.0)
s = 0.25
if x is None:
if self.getBondLength() >= r0+2*s:
return True
else:
return False
else:
if self.getBondLengthConfig(x) >= r0+2*s:
return True
else:
return False
@torch.no_grad()
def step_bc(self):
state_old = self.getConfig().detach().clone()
self.step_unbiased()
if self.isReactant() or self.isProduct():
pass
else:
#If it's not in the reactant or product state, reset!
self.setConfig(state_old)
def save(self):
self.timestep += 1
if self.save_config and self.timestep % self.output_time == 0:
self.dumpConfig(self.timestep)
| 12,403 | 38.129338 | 122 | py |
tps-torch | tps-torch-main/dimer_solv/ml_test/ftsme/run_ftsme_r.py | import sys
sys.path.append("..")
import time
t0 = time.time()
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
import scipy.spatial
#Import necessarry tools from tpstorch
from dimer_fts_nosolv import dimer_reorient, DimerFTS
from committor_nn import initializeConfig, CommittorNet, CommittorNetBP, CommittorNetDR, SchNet
from dimer_fts_nosolv import FTSLayerCustom as FTSLayer
from tpstorch.ml.data import FTSSimulation
from tpstorch.ml.optim import ParallelSGD, ParallelAdam, FTSUpdate
from tpstorch.ml.nn import BKELossFTS
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
# reload count
count = int(np.genfromtxt("count.txt"))
torch.manual_seed(count)
np.random.seed(count)
prefix = 'simple'
#(1) Initialization
r0 = 2**(1/6.0)
width = 0.25
Np = 30+2
box = [8.617738760127533, 8.617738760127533, 8.617738760127533]
kT = 1.0
initial_config = np.genfromtxt("../restart/config_"+str(rank)+".xyz", usecols=(1,2,3))
start = np.genfromtxt("../restart_bc/config_"+str(rank)+"_react.xyz", usecols=(1,2,3))
end = np.genfromtxt("../restart_bc/config_"+str(rank)+"_prod.xyz", usecols=(1,2,3))
initial_config = torch.from_numpy(initial_config)
start = torch.from_numpy(start)
end = torch.from_numpy(end)
initial_config = initial_config.float()
start = start.float()
end = end.float()
#Initialize neural net
#committor = torch.jit.script(CommittorNetDR(num_nodes=2500, boxsize=box[0]).to('cpu'))
#committor = torch.jit.script(CommittorNetBP(num_nodes=200, boxsize=box[0], Np=32,rc=2.5,sigma=1.0).to('cpu'))
committor = SchNet(hidden_channels = 64, num_filters = 64, num_interactions = 3, num_gaussians = 50, cutoff = box[0], max_num_neighbors = 31, boxsize=box[0], Np=32, dim=3).to('cpu')
#Initialize the string for FTS method
ftslayer = FTSLayer(react_config=start[:2].flatten(),prod_config=end[:2].flatten(),num_nodes=world_size,boxsize=box[0],num_particles=2).to('cpu')
committor.load_state_dict(torch.load("simple_params", map_location=torch.device('cpu')))
ftslayer.load_state_dict(torch.load("simple_string"))
n_boundary_samples = 100
batch_size = 8
period = 25
#Initialize the dimer simulation
dimer_sim_bc = DimerFTS(param="param_bc",
config=initial_config.detach().clone(),
rank=rank,
beta=1/kT,
save_config=False,
mpi_group = mpi_group,
ftslayer=ftslayer,
output_time=batch_size*period
)
dimer_sim = DimerFTS( param="param",
config=initial_config.detach().clone(),
rank=rank,
beta=1/kT,
save_config=True,
mpi_group = mpi_group,
ftslayer=ftslayer,
output_time=batch_size*period
)
dimer_sim.useRestart()
#Construct datarunner
datarunner = FTSSimulation(dimer_sim, committor = committor, nn_training = True, period=period, batch_size=batch_size, dimN=Np*3)
#Construct optimizers
ftsoptimizer = FTSUpdate(ftslayer.parameters(), deltatau=0.01,momentum=0.9,nesterov=True,kappa=0.1,periodic=True,dim=3)
optimizer = ParallelAdam(committor.parameters(), lr=1e-4)
optimizer.load_state_dict(torch.load("optimizer_params"))
ftsoptimizer.load_state_dict(torch.load("ftsoptimizer_params"))
#Initialize main loss function
loss = BKELossFTS( bc_sampler = dimer_sim_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 0,
bc_period = 100,
batch_size_bc = 0.5,
tol = 5e-10,
mode= 'shift')
# Save reactant, product configurations
loss.react_configs = torch.load("react_configs_"+str(rank+1)+".pt")
loss.prod_configs = torch.load("prod_configs_"+str(rank+1)+".pt")
loss.n_bc_samples = torch.load("n_bc_samples_"+str(rank+1)+".pt")
loss_io = []
if rank == 0:
loss_io = open("{}_statistic_{}.txt".format(prefix,rank+1),'a')
#Training loop
with open("string_{}_config.xyz".format(rank),"a") as f, open("string_{}_log.txt".format(rank),"a") as g:
for epoch in range(1):
if rank == 0:
print("epoch: [{}]".format(epoch+1))
time_max = 9.0*60
time_out = True
#for i in range(count,count+100):#20000)):
i = count
while(time_out):
# get data and reweighting factors
configs, grad_xs = datarunner.runSimulation()
dimer_sim.dumpRestart()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
# forward + backward + optimize
cost = loss(grad_xs, dimer_sim.rejection_count)
cost.backward()
optimizer.step()
#ftsoptimizer.step(configs[:,:6],len(configs),boxsize=box[0],reorient_sample=dimer_reorient)
# print statistics
with torch.no_grad():
string_temp = ftslayer.string[rank].view(2,3)
f.write("2 \n")
f.write('Lattice=\"10.0 0.0 0.0 0.0 10.0 0.0 0.0 0.0 10.0\" ')
f.write('Origin=\"-5.0 -5.0 -5.0\" ')
f.write("Properties=type:S:1:pos:R:3:aux1:R:1 \n")
f.write("2 {} {} {} {} \n".format(string_temp[0,0],string_temp[0,1], string_temp[0,2],0.25))
f.write("2 {} {} {} {} \n".format(string_temp[1,0],string_temp[1,1], string_temp[1,2],0.25))
f.flush()
g.write("{} {} \n".format((i+1)*period,torch.norm(string_temp[0]-string_temp[1])))
g.flush()
main_loss = loss.main_loss
bc_loss = loss.bc_loss
#Print statistics
if rank == 0:
if i%100 == 0:
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,i,rank))
torch.save(ftslayer.state_dict(), "{}_string_t_{}_{}".format(prefix,i,rank))
torch.save(committor.state_dict(), "{}_params".format(prefix))
torch.save(ftslayer.state_dict(), "{}_string".format(prefix))
torch.save(optimizer.state_dict(), "optimizer_params")
torch.save(ftsoptimizer.state_dict(), "ftsoptimizer_params")
np.savetxt("count.txt", np.array((i+1,)))
loss_io.write('{:d} {:.5E} {:.5E} \n'.format(i+1,main_loss.item(),bc_loss.item()))
loss_io.flush()
i = i+1
t1 = time.time()
time_diff = t1-t0
time_diff = torch.tensor(time_diff)
dist.all_reduce(time_diff,op=dist.ReduceOp.MAX)
if time_diff > time_max:
time_out = False
| 7,280 | 40.135593 | 181 | py |
tps-torch | tps-torch-main/dimer_solv/ml_test/ftsme/run_ftsme.py | import sys
sys.path.append("..")
import time
t0 = time.time()
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
import scipy.spatial
#Import necessarry tools from tpstorch
from dimer_fts_nosolv import dimer_reorient, DimerFTS
from committor_nn import initializeConfig, CommittorNet, CommittorNetBP, CommittorNetDR, SchNet
from dimer_fts_nosolv import FTSLayerCustom as FTSLayer
from tpstorch.ml.data import FTSSimulation
from tpstorch.ml.optim import ParallelSGD, ParallelAdam, FTSUpdate
from tpstorch.ml.nn import BKELossFTS
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
torch.manual_seed(5070)
np.random.seed(5070)
prefix = 'simple'
#(1) Initialization
r0 = 2**(1/6.0)
width = 0.25
Np = 30+2
box = [8.617738760127533, 8.617738760127533, 8.617738760127533]
kT = 1.0
initial_config = np.genfromtxt("../restart/config_"+str(rank)+".xyz", usecols=(1,2,3))
start = np.genfromtxt("../restart_bc/config_"+str(rank)+"_react.xyz", usecols=(1,2,3))
end = np.genfromtxt("../restart_bc/config_"+str(rank)+"_prod.xyz", usecols=(1,2,3))
initial_config = torch.from_numpy(initial_config)
start = torch.from_numpy(start)
end = torch.from_numpy(end)
initial_config = initial_config.float()
start = start.float()
end = end.float()
#Initialize neural net
#committor = torch.jit.script(CommittorNetDR(num_nodes=2500, boxsize=box[0]).to('cpu'))
#committor = torch.jit.script(CommittorNetBP(num_nodes=200, boxsize=box[0], Np=32,rc=2.5,sigma=1.0).to('cpu'))
committor = SchNet(hidden_channels = 64, num_filters = 64, num_interactions = 3, num_gaussians = 50, cutoff = box[0], max_num_neighbors = 31, boxsize=box[0], Np=32, dim=3).to('cpu')
#Initialize the string for FTS method
ftslayer = FTSLayer(react_config=start[:2].flatten(),prod_config=end[:2].flatten(),num_nodes=world_size,boxsize=box[0],num_particles=2).to('cpu')
committor.load_state_dict(torch.load("initial_1hl_nn", map_location=torch.device('cpu')))
ftslayer.load_state_dict(torch.load("../test_string_config"))
n_boundary_samples = 100
batch_size = 8
period = 25
#Initialize the dimer simulation
dimer_sim_bc = DimerFTS(param="param_bc",config=initial_config.detach().clone(),rank=rank,beta=1/kT, mpi_group = mpi_group, ftslayer=ftslayer,output_time=batch_size*period, save_config=False)
dimer_sim = DimerFTS(param="param",config=initial_config.detach().clone(), rank=rank, beta=1/kT, mpi_group = mpi_group, ftslayer=ftslayer,output_time=batch_size*period, save_config=True)
#Construct datarunner
datarunner = FTSSimulation(dimer_sim, committor = committor, nn_training = True, period=period, batch_size=batch_size, dimN=Np*3)
#Construct optimizers
ftsoptimizer = FTSUpdate(ftslayer.parameters(), deltatau=0.01,momentum=0.9,nesterov=True,kappa=0.1,periodic=True,dim=3)
optimizer = ParallelAdam(committor.parameters(), lr=1e-4)
#Initialize main loss function
loss = BKELossFTS( bc_sampler = dimer_sim_bc,committor = committor,lambda_A = 1e4,lambda_B = 1e4,start_react = start,start_prod = end,n_bc_samples = 100, bc_period = 100,batch_size_bc = 0.5,tol = 5e-10, mode= 'shift')
loss_io = []
if rank == 0:
loss_io = open("{}_statistic_{}.txt".format(prefix,rank+1),'w')
#Training loop
# Save reactant, product configurations
torch.save(loss.react_configs, "react_configs_"+str(rank+1)+".pt")
torch.save(loss.prod_configs, "prod_configs_"+str(rank+1)+".pt")
torch.save(loss.n_bc_samples, "n_bc_samples_"+str(rank+1)+".pt")
with open("string_{}_config.xyz".format(rank),"w") as f, open("string_{}_log.txt".format(rank),"w") as g:
for epoch in range(1):
if rank == 0:
print("epoch: [{}]".format(epoch+1))
time_max = 9.0*60
time_out = True
#for i in range(count,count+100):#20000)):
i = 0
while(time_out):
# get data and reweighting factors
configs, grad_xs = datarunner.runSimulation()
dimer_sim.dumpRestart()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
# forward + backward + optimize
cost = loss(grad_xs, dimer_sim.rejection_count)
cost.backward()
optimizer.step()
#ftsoptimizer.step(configs[:,:6],len(configs),boxsize=box[0],reorient_sample=dimer_reorient)
# print statistics
with torch.no_grad():
string_temp = ftslayer.string[rank].view(2,3)
f.write("2 \n")
f.write('Lattice=\"10.0 0.0 0.0 0.0 10.0 0.0 0.0 0.0 10.0\" ')
f.write('Origin=\"-5.0 -5.0 -5.0\" ')
f.write("Properties=type:S:1:pos:R:3:aux1:R:1 \n")
f.write("2 {} {} {} {} \n".format(string_temp[0,0],string_temp[0,1], string_temp[0,2],0.25))
f.write("2 {} {} {} {} \n".format(string_temp[1,0],string_temp[1,1], string_temp[1,2],0.25))
f.flush()
g.write("{} {} \n".format((i+1)*period,torch.norm(string_temp[0]-string_temp[1])))
g.flush()
main_loss = loss.main_loss
bc_loss = loss.bc_loss
#Print statistics
if rank == 0:
if i%100 == 0:
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,i,rank))
torch.save(ftslayer.state_dict(), "{}_string_t_{}_{}".format(prefix,i,rank))
torch.save(committor.state_dict(), "{}_params".format(prefix))
torch.save(ftslayer.state_dict(), "{}_string".format(prefix))
torch.save(optimizer.state_dict(), "optimizer_params")
torch.save(ftsoptimizer.state_dict(), "ftsoptimizer_params")
np.savetxt("count.txt", np.array((i+1,)))
loss_io.write('{:d} {:.5E} {:.5E} \n'.format(i+1,main_loss.item(),bc_loss.item()))
loss_io.flush()
i = i+1
t1 = time.time()
time_diff = t1-t0
time_diff = torch.tensor(time_diff)
dist.all_reduce(time_diff,op=dist.ReduceOp.MAX)
if time_diff > time_max:
time_out = False
| 6,468 | 43.308219 | 218 | py |
tps-torch | tps-torch-main/dimer_solv/ml_test/ftsus/run_ftsus.py | import sys
sys.path.append("..")
import time
t0 = time.time()
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
import scipy.spatial
#Import necessarry tools from tpstorch
from dimer_ftsus_nosolv import DimerFTSUS
from committor_nn import initializeConfig, CommittorNet, CommittorNetBP, CommittorNetDR, SchNet
from dimer_ftsus_nosolv import FTSLayerUSCustom as FTSLayer
from tpstorch.ml.data import FTSSimulation, EXPReweightStringSimulation
from tpstorch.ml.optim import ParallelSGD, ParallelAdam
from tpstorch.ml.nn import BKELossEXP, CommittorLoss2
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
torch.manual_seed(5070)
np.random.seed(5070)
prefix = 'simple'
#(1) Initialization
r0 = 2**(1/6.0)
width = 0.25
Np = 30+2
box = [8.617738760127533, 8.617738760127533, 8.617738760127533]
kappa_perp = 1200.0
kappa_par = 1200.0
kT = 1.0
initial_config = np.genfromtxt("../restart/config_"+str(rank)+".xyz", usecols=(1,2,3))
start = np.genfromtxt("../restart_bc/config_"+str(rank)+"_react.xyz", usecols=(1,2,3))
end = np.genfromtxt("../restart_bc/config_"+str(rank)+"_prod.xyz", usecols=(1,2,3))
initial_config = torch.from_numpy(initial_config)
start = torch.from_numpy(start)
end = torch.from_numpy(end)
initial_config = initial_config.float()
start = start.float()
end = end.float()
#Initialize neural net
#committor = torch.jit.script(CommittorNetDR(num_nodes=2500, boxsize=box[0]).to('cpu'))
#committor = torch.jit.script(CommittorNetBP(num_nodes=200, boxsize=box[0], Np=32,rc=2.5,sigma=1.0).to('cpu'))
committor = SchNet(hidden_channels = 64, num_filters = 64, num_interactions = 3, num_gaussians = 50, cutoff = box[0], max_num_neighbors = 31, boxsize=box[0], Np=32, dim=3).to('cpu')
#Initialize the string for FTS method
ftslayer = FTSLayer(react_config=start[:2].flatten(),prod_config=end[:2].flatten(),num_nodes=world_size,boxsize=box[0],kappa_perpend=kappa_perp, kappa_parallel=kappa_par, num_particles=2).to('cpu')
#Load the pre-initialized neural network and string
committor.load_state_dict(torch.load("initial_1hl_nn", map_location=torch.device('cpu')))
ftslayer.load_state_dict(torch.load("../test_string_config"))
ftslayer.set_tangent()
n_boundary_samples = 100
batch_size = 8
period = 25
dimer_sim_bc = DimerFTSUS( param="param_bc",config=initial_config.clone().detach(), rank=rank, beta=1/kT, kappa = 0.0, save_config=False, mpi_group = mpi_group, ftslayer=ftslayer,output_time=batch_size*period)
dimer_sim = DimerFTSUS( param="param",config=initial_config.clone().detach(), rank=rank, beta=1/kT, kappa = kappa_perp, save_config=True, mpi_group = mpi_group, ftslayer=ftslayer,output_time=batch_size*period)
#Construct datarunner
datarunner = EXPReweightStringSimulation(dimer_sim, committor, period=period, batch_size=batch_size, dimN=Np*3)
#Construct optimizers
optimizer = ParallelAdam(committor.parameters(), lr=1e-4)
#Initialize main loss function and optimizers
#Optimizer, doing EXP Reweighting. We can do SGD (integral control), or Heavy-Ball (PID control)
loss = BKELossEXP( bc_sampler = dimer_sim_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 100,
bc_period = 100,
batch_size_bc = 0.5,
)
loss_io = []
if rank == 0:
loss_io = open("{}_statistic_{}.txt".format(prefix,rank+1),'w')
# Save reactant, product configurations
torch.save(loss.react_configs, "react_configs_"+str(rank+1)+".pt")
torch.save(loss.prod_configs, "prod_configs_"+str(rank+1)+".pt")
torch.save(loss.n_bc_samples, "n_bc_samples_"+str(rank+1)+".pt")
#Training loop
for epoch in range(1):
if rank == 0:
print("epoch: [{}]".format(epoch+1))
time_max = 9.0*60
time_out = True
#for i in range(count,count+100):#20000)):
i = 0
while(time_out):
# get data and reweighting factors
config, grad_xs, invc, fwd_wl, bwrd_wl = datarunner.runSimulation()
dimer_sim.dumpRestart()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
# forward + backward + optimize
bkecost = loss(grad_xs,invc,fwd_wl,bwrd_wl)
cost = bkecost
cost.backward()
optimizer.step()
# print statistics
with torch.no_grad():
main_loss = loss.main_loss
bc_loss = loss.bc_loss
#Print statistics
if rank == 0:
if i%100 == 0:
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,i,rank))
torch.save(committor.state_dict(), "{}_params".format(prefix))
torch.save(optimizer.state_dict(), "optimizer_params")
np.savetxt("count.txt", np.array((i+1,)))
loss_io.write('{:d} {:.5E} {:.5E}\n'.format(i+1,main_loss.item(),bc_loss.item()))
loss_io.flush()
i = i+1
t1 = time.time()
time_diff = t1-t0
time_diff = torch.tensor(time_diff)
dist.all_reduce(time_diff,op=dist.ReduceOp.MAX)
if time_diff > time_max:
time_out = False
| 5,510 | 37.538462 | 210 | py |
tps-torch | tps-torch-main/dimer_solv/ml_test/ftsus/run_ftsus_r.py | import sys
sys.path.append("..")
import time
t0 = time.time()
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
import scipy.spatial
#Import necessarry tools from tpstorch
from dimer_ftsus_nosolv import DimerFTSUS
from committor_nn import initializeConfig, CommittorNet, CommittorNetBP, CommittorNetDR, SchNet
from dimer_ftsus_nosolv import FTSLayerUSCustom as FTSLayer
from tpstorch.ml.data import FTSSimulation, EXPReweightStringSimulation
from tpstorch.ml.optim import ParallelSGD, ParallelAdam
from tpstorch.ml.nn import BKELossEXP, CommittorLoss2
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
# reload count
count = int(np.genfromtxt("count.txt"))
torch.manual_seed(count)
np.random.seed(count)
prefix = 'simple'
#(1) Initialization
r0 = 2**(1/6.0)
width = 0.25
Np = 30+2
box = [8.617738760127533, 8.617738760127533, 8.617738760127533]
kappa_perp = 1200.0
kappa_par = 1200.0
kT = 1.0
initial_config = np.genfromtxt("../restart/config_"+str(rank)+".xyz", usecols=(1,2,3))
start = np.genfromtxt("../restart_bc/config_"+str(rank)+"_react.xyz", usecols=(1,2,3))
end = np.genfromtxt("../restart_bc/config_"+str(rank)+"_prod.xyz", usecols=(1,2,3))
initial_config = torch.from_numpy(initial_config)
start = torch.from_numpy(start)
end = torch.from_numpy(end)
initial_config = initial_config.float()
start = start.float()
end = end.float()
#Initialize neural net
#committor = torch.jit.script(CommittorNetDR(num_nodes=2500, boxsize=box[0]).to('cpu'))
#committor = torch.jit.script(CommittorNetBP(num_nodes=200, boxsize=box[0], Np=32,rc=2.5,sigma=1.0).to('cpu'))
committor = SchNet(hidden_channels = 64, num_filters = 64, num_interactions = 3, num_gaussians = 50, cutoff = box[0], max_num_neighbors = 31, boxsize=box[0], Np=32, dim=3).to('cpu')
#Initialize the string for FTS method
ftslayer = FTSLayer(react_config=start[:2].flatten(),prod_config=end[:2].flatten(),num_nodes=world_size,boxsize=box[0],kappa_perpend=kappa_perp, kappa_parallel=kappa_par, num_particles=2).to('cpu')
#Load the pre-initialized neural network and string
committor.load_state_dict(torch.load("simple_params", map_location=torch.device('cpu')))
ftslayer.load_state_dict(torch.load("../test_string_config"))
ftslayer.set_tangent()
n_boundary_samples = 100
batch_size = 8
period = 25
dimer_sim_bc = DimerFTSUS( param="param_bc",config=initial_config.clone().detach(), rank=rank, beta=1/kT, kappa = 0.0, save_config=False, mpi_group = mpi_group, ftslayer=ftslayer,output_time=batch_size*period)
dimer_sim = DimerFTSUS( param="param",config=initial_config.clone().detach(), rank=rank, beta=1/kT, kappa = kappa_perp, save_config=True, mpi_group = mpi_group, ftslayer=ftslayer,output_time=batch_size*period)
dimer_sim.useRestart()
#Construct datarunner
datarunner = EXPReweightStringSimulation(dimer_sim, committor, period=period, batch_size=batch_size, dimN=Np*3)
#Construct optimizers
optimizer = ParallelAdam(committor.parameters(), lr=1e-4)
optimizer.load_state_dict(torch.load("optimizer_params"))
#Initialize main loss function and optimizers
#Optimizer, doing EXP Reweighting. We can do SGD (integral control), or Heavy-Ball (PID control)
loss = BKELossEXP( bc_sampler = dimer_sim_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 0,
bc_period = 100,
batch_size_bc = 0.5,
)
loss_io = []
if rank == 0:
loss_io = open("{}_statistic_{}.txt".format(prefix,rank+1),'a')
# Save reactant, product configurations
loss.react_configs = torch.load("react_configs_"+str(rank+1)+".pt")
loss.prod_configs = torch.load("prod_configs_"+str(rank+1)+".pt")
loss.n_bc_samples = torch.load("n_bc_samples_"+str(rank+1)+".pt")
#Training loop
for epoch in range(1):
if rank == 0:
print("epoch: [{}]".format(epoch+1))
time_max = 9.0*60
time_out = True
#for i in range(count,count+100):#20000)):
i = count
while(time_out):
# get data and reweighting factors
config, grad_xs, invc, fwd_wl, bwrd_wl = datarunner.runSimulation()
dimer_sim.dumpRestart()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
# forward + backward + optimize
bkecost = loss(grad_xs,invc,fwd_wl,bwrd_wl)
cost = bkecost
cost.backward()
optimizer.step()
# print statistics
with torch.no_grad():
main_loss = loss.main_loss
bc_loss = loss.bc_loss
#Print statistics
if rank == 0:
if i%100 == 0:
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,i,rank))
torch.save(committor.state_dict(), "{}_params".format(prefix))
torch.save(optimizer.state_dict(), "optimizer_params")
np.savetxt("count.txt", np.array((i+1,)))
loss_io.write('{:d} {:.5E} {:.5E}\n'.format(i+1,main_loss.item(),bc_loss.item()))
loss_io.flush()
i = i+1
t1 = time.time()
time_diff = t1-t0
time_diff = torch.tensor(time_diff)
dist.all_reduce(time_diff,op=dist.ReduceOp.MAX)
if time_diff > time_max:
time_out = False
| 5,652 | 37.455782 | 210 | py |
tps-torch | tps-torch-main/dimer_solv/ml_test/us/run_us_r.py | import sys
sys.path.append("..")
import time
t0 = time.time()
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from dimer_us import DimerUS
from committor_nn import initializeConfig, CommittorNet, CommittorNetBP, CommittorNetDR, SchNet
from tpstorch.ml.data import EXPReweightSimulation
from tpstorch.ml.optim import ParallelAdam
from tpstorch.ml.nn import BKELossEXP, CommittorLoss2
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
# reload count
count = int(np.genfromtxt("count.txt"))
torch.manual_seed(count)
np.random.seed(count)
prefix = 'simple'
#(1) Initialization
r0 = 2**(1/6.0)
width = 0.25
Np = 30+2
box = [8.617738760127533, 8.617738760127533, 8.617738760127533]
kappa= 100
kT = 1.0
initial_config = np.genfromtxt("../restart/config_"+str(rank)+".xyz", usecols=(1,2,3))
start = np.genfromtxt("../restart_bc/config_"+str(rank)+"_react.xyz", usecols=(1,2,3))
end = np.genfromtxt("../restart_bc/config_"+str(rank)+"_prod.xyz", usecols=(1,2,3))
initial_config = torch.from_numpy(initial_config)
start = torch.from_numpy(start)
end = torch.from_numpy(end)
initial_config = initial_config.float()
start = start.float()
end = end.float()
#Initialize neural net
#committor = torch.jit.script(CommittorNetDR(num_nodes=2500, boxsize=box[0]).to('cpu'))
committor = SchNet(hidden_channels = 64, num_filters = 64, num_interactions = 3, num_gaussians = 50, cutoff = box[0], max_num_neighbors = 31, boxsize=box[0], Np=32, dim=3).to('cpu')
committor.load_state_dict(torch.load("simple_params", map_location=torch.device('cpu')))
n_boundary_samples = 100
batch_size = 8
period = 25
dimer_sim_bc = DimerUS( param="param_bc",
config=initial_config.clone().detach(),
rank=rank,
beta=1/kT,
kappa = 0.0,
save_config=False,
mpi_group = mpi_group,
output_time=batch_size*period
)
dimer_sim = DimerUS( param="param",
config=initial_config.clone().detach(),
rank=rank,
beta=1/kT,
kappa = kappa,
save_config=True,
mpi_group = mpi_group,
output_time=batch_size*period
)
dimer_sim.useRestart()
#Construct datarunner
datarunner = EXPReweightSimulation(dimer_sim, committor, period=period, batch_size=batch_size, dimN=Np*3)
#Construct optimizers
optimizer = ParallelAdam(committor.parameters(), lr=1e-4)
optimizer.load_state_dict(torch.load("optimizer_params"))
#The BKE Loss function, with EXP Reweighting
loss = BKELossEXP( bc_sampler = dimer_sim_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 0,
bc_period = 100,
batch_size_bc = 0.5,
)
loss_io = []
if rank == 0:
loss_io = open("{}_statistic_{}.txt".format(prefix,rank+1),'a')
# Save reactant, product configurations
loss.react_configs = torch.load("react_configs_"+str(rank+1)+".pt")
loss.prod_configs = torch.load("prod_configs_"+str(rank+1)+".pt")
loss.n_bc_samples = torch.load("n_bc_samples_"+str(rank+1)+".pt")
#Training loop
for epoch in range(1):
if rank == 0:
print("epoch: [{}]".format(epoch+1))
time_max = 9.0*60
time_out = True
#for i in range(count,count+100):#20000)):
i = count
while(time_out):
# get data and reweighting factors
config, grad_xs, invc, fwd_wl, bwrd_wl = datarunner.runSimulation()
dimer_sim.dumpRestart()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
# forward + backward + optimize
bkecost = loss(grad_xs,invc,fwd_wl,bwrd_wl)
cost = bkecost
cost.backward()
optimizer.step()
# print statistics
with torch.no_grad():
main_loss = loss.main_loss
bc_loss = loss.bc_loss
#Print statistics
if rank == 0:
if i%100 == 0:
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,i,rank))
torch.save(committor.state_dict(), "{}_params".format(prefix))
torch.save(optimizer.state_dict(), "optimizer_params")
np.savetxt("count.txt", np.array((i+1,)))
loss_io.write('{:d} {:.5E} {:.5E}\n'.format(i+1,main_loss.item(),bc_loss.item()))
loss_io.flush()
i = i+1
t1 = time.time()
time_diff = t1-t0
time_diff = torch.tensor(time_diff)
dist.all_reduce(time_diff,op=dist.ReduceOp.MAX)
if time_diff > time_max:
time_out = False
| 5,262 | 33.625 | 181 | py |
tps-torch | tps-torch-main/dimer_solv/ml_test/us/run_us.py | import sys
sys.path.append("..")
import time
t0 = time.time()
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from dimer_us import DimerUS
from committor_nn import initializeConfig, CommittorNet, CommittorNetBP, CommittorNetDR, SchNet
from tpstorch.ml.data import EXPReweightSimulation
from tpstorch.ml.optim import ParallelAdam
from tpstorch.ml.nn import BKELossEXP, CommittorLoss2
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
torch.manual_seed(5070)
np.random.seed(5070)
prefix = 'simple'
#(1) Initialization
r0 = 2**(1/6.0)
width = 0.25
Np = 30+2
box = [8.617738760127533, 8.617738760127533, 8.617738760127533]
kappa= 100
kT = 1.0
initial_config = np.genfromtxt("../restart/config_"+str(rank)+".xyz", usecols=(1,2,3))
start = np.genfromtxt("../restart_bc/config_"+str(rank)+"_react.xyz", usecols=(1,2,3))
end = np.genfromtxt("../restart_bc/config_"+str(rank)+"_prod.xyz", usecols=(1,2,3))
initial_config = torch.from_numpy(initial_config)
start = torch.from_numpy(start)
end = torch.from_numpy(end)
initial_config = initial_config.float()
start = start.float()
end = end.float()
#Initialize neural net
#committor = torch.jit.script(CommittorNetDR(num_nodes=2500, boxsize=box[0]).to('cpu'))
committor = SchNet(hidden_channels = 64, num_filters = 64, num_interactions = 3, num_gaussians = 50, cutoff = box[0], max_num_neighbors = 31, boxsize=box[0], Np=32, dim=3).to('cpu')
committor.load_state_dict(torch.load("initial_1hl_nn", map_location=torch.device('cpu')))
n_boundary_samples = 100
batch_size = 8
period = 25
dimer_sim_bc = DimerUS( param="param_bc",
config=initial_config.clone().detach(),
rank=rank,
beta=1/kT,
kappa = 0.0,
save_config=False,
mpi_group = mpi_group,
output_time=batch_size*period
)
dimer_sim = DimerUS( param="param",
config=initial_config.clone().detach(),
rank=rank,
beta=1/kT,
kappa = kappa,
save_config=True,
mpi_group = mpi_group,
output_time=batch_size*period
)
#Construct datarunner
datarunner = EXPReweightSimulation(dimer_sim, committor, period=period, batch_size=batch_size, dimN=Np*3)
#Construct optimizers
optimizer = ParallelAdam(committor.parameters(), lr=1e-4)
#The BKE Loss function, with EXP Reweighting
loss = BKELossEXP( bc_sampler = dimer_sim_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 100,
bc_period = 100,
batch_size_bc = 0.5,
)
loss_io = []
if rank == 0:
loss_io = open("{}_statistic_{}.txt".format(prefix,rank+1),'w')
# Save reactant, product configurations
torch.save(loss.react_configs, "react_configs_"+str(rank+1)+".pt")
torch.save(loss.prod_configs, "prod_configs_"+str(rank+1)+".pt")
torch.save(loss.n_bc_samples, "n_bc_samples_"+str(rank+1)+".pt")
#Training loop
for epoch in range(1):
if rank == 0:
print("epoch: [{}]".format(epoch+1))
time_max = 9.0*60
time_out = True
#for i in range(count,count+100):#20000)):
i = 0
while(time_out):
# get data and reweighting factors
config, grad_xs, invc, fwd_wl, bwrd_wl = datarunner.runSimulation()
dimer_sim.dumpRestart()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
# forward + backward + optimize
bkecost = loss(grad_xs,invc,fwd_wl,bwrd_wl)
cost = bkecost
cost.backward()
optimizer.step()
# print statistics
with torch.no_grad():
main_loss = loss.main_loss
bc_loss = loss.bc_loss
#Print statistics
if rank == 0:
if i%100 == 0:
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,i,rank))
torch.save(committor.state_dict(), "{}_params".format(prefix))
torch.save(optimizer.state_dict(), "optimizer_params")
np.savetxt("count.txt", np.array((i+1,)))
loss_io.write('{:d} {:.5E} {:.5E}\n'.format(i+1,main_loss.item(),bc_loss.item()))
loss_io.flush()
i = i+1
t1 = time.time()
time_diff = t1-t0
time_diff = torch.tensor(time_diff)
dist.all_reduce(time_diff,op=dist.ReduceOp.MAX)
if time_diff > time_max:
time_out = False
| 5,120 | 33.601351 | 181 | py |
tps-torch | tps-torch-main/dimer_solv/ml_test/ftsus_sl/run_ftsus_sl_r.py | import sys
sys.path.append("..")
import time
t0 = time.time()
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
import scipy.spatial
#Import necessarry tools from tpstorch
from dimer_ftsus_nosolv import DimerFTSUS
from committor_nn import initializeConfig, CommittorNet, CommittorNetBP, CommittorNetDR, SchNet
from dimer_ftsus_nosolv import FTSLayerUSCustom as FTSLayer
from tpstorch.ml.data import FTSSimulation, EXPReweightStringSimulation
from tpstorch.ml.optim import ParallelSGD, ParallelAdam
from tpstorch.ml.nn import BKELossEXP, CommittorLoss2
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
# reload count
count = int(np.genfromtxt("count.txt"))
torch.manual_seed(count)
np.random.seed(count)
prefix = 'simple'
#(1) Initialization
r0 = 2**(1/6.0)
width = 0.25
Np = 30+2
box = [8.617738760127533, 8.617738760127533, 8.617738760127533]
kappa_perp = 1200.0
kappa_par = 1200.0
kT = 1.0
initial_config = np.genfromtxt("../restart/config_"+str(rank)+".xyz", usecols=(1,2,3))
start = np.genfromtxt("../restart_bc/config_"+str(rank)+"_react.xyz", usecols=(1,2,3))
end = np.genfromtxt("../restart_bc/config_"+str(rank)+"_prod.xyz", usecols=(1,2,3))
initial_config = torch.from_numpy(initial_config)
start = torch.from_numpy(start)
end = torch.from_numpy(end)
initial_config = initial_config.float()
start = start.float()
end = end.float()
#Initialize neural net
#committor = torch.jit.script(CommittorNetDR(num_nodes=2500, boxsize=box[0]).to('cpu'))
#committor = torch.jit.script(CommittorNetBP(num_nodes=200, boxsize=box[0], Np=32,rc=2.5,sigma=1.0).to('cpu'))
committor = SchNet(hidden_channels = 64, num_filters = 64, num_interactions = 3, num_gaussians = 50, cutoff = box[0], max_num_neighbors = 31, boxsize=box[0], Np=32, dim=3).to('cpu')
#Initialize the string for FTS method
ftslayer = FTSLayer(react_config=start[:2].flatten(),prod_config=end[:2].flatten(),num_nodes=world_size,boxsize=box[0],kappa_perpend=kappa_perp, kappa_parallel=kappa_par, num_particles=2).to('cpu')
#Load the pre-initialized neural network and string
committor.load_state_dict(torch.load("simple_params", map_location=torch.device('cpu')))
ftslayer.load_state_dict(torch.load("../test_string_config"))
ftslayer.set_tangent()
n_boundary_samples = 100
batch_size = 8
period = 25
dimer_sim_bc = DimerFTSUS( param="param_bc",
config=initial_config.clone().detach(),
rank=rank,
beta=1/kT,
kappa = 0.0,
save_config=False,
mpi_group = mpi_group,
ftslayer=ftslayer,
output_time=batch_size*period
)
dimer_sim = DimerFTSUS( param="param",
config=initial_config.clone().detach(),
rank=rank,
beta=1/kT,
kappa = kappa_perp,
save_config=True,
mpi_group = mpi_group,
ftslayer=ftslayer,
output_time=batch_size*period
)
dimer_sim.useRestart()
dimer_sim_com = DimerFTSUS( param="param",
config=initial_config.clone().detach(),
rank=rank,
beta=1/kT,
kappa = 0.0,
save_config=False,
mpi_group = mpi_group,
ftslayer=ftslayer,
output_time=batch_size*period
)
#Construct datarunner
datarunner = EXPReweightStringSimulation(dimer_sim, committor, period=period, batch_size=batch_size, dimN=Np*3)
#Construct optimizers
optimizer = ParallelAdam(committor.parameters(), lr=1e-4)
optimizer.load_state_dict(torch.load("optimizer_params"))
#Initialize main loss function and optimizers
#Optimizer, doing EXP Reweighting. We can do SGD (integral control), or Heavy-Ball (PID control)
loss = BKELossEXP( bc_sampler = dimer_sim_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 0,
bc_period = 100,
batch_size_bc = 0.5,
)
cmloss = CommittorLoss2( cl_sampler = dimer_sim_com,
committor = committor,
lambda_cl=100.0,
cl_start=10,
cl_end=5000,
cl_rate=10,
cl_trials=100,
batch_size_cl=0.5
)
loss_io = []
if rank == 0:
loss_io = open("{}_statistic_{}.txt".format(prefix,rank+1),'a')
lambda_cl_end = 10**3
cl_start=200
cl_end=10000
cl_stepsize = (lambda_cl_end-cmloss.lambda_cl)/(cl_end-cl_start)
# Save reactant, product configurations
loss.react_configs = torch.load("react_configs_"+str(rank+1)+".pt")
loss.prod_configs = torch.load("prod_configs_"+str(rank+1)+".pt")
loss.n_bc_samples = torch.load("n_bc_samples_"+str(rank+1)+".pt")
# cmloss variables
cmloss.lambda_cl = torch.load("lambda_cl_"+str(rank+1)+".pt")
cmloss.cl_configs = torch.load("cl_configs_"+str(rank+1)+".pt")
cmloss.cl_configs_values = torch.load("cl_configs_values_"+str(rank+1)+".pt")
cmloss.cl_configs_count = torch.load("cl_configs_count_"+str(rank+1)+".pt")
#Training loop
for epoch in range(1):
if rank == 0:
print("epoch: [{}]".format(epoch+1))
time_max = 9.0*60
time_out = True
#for i in range(count,count+100):#20000)):
i = count
while(time_out):
if (i > cl_start) and (i <= cl_end):
cmloss.lambda_cl += cl_stepsize
elif i > cl_end:
cmloss.lambda_cl = lambda_cl_end
# get data and reweighting factors
config, grad_xs, invc, fwd_wl, bwrd_wl = datarunner.runSimulation()
dimer_sim.dumpRestart()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
# forward + backward + optimize
bkecost = loss(grad_xs,invc,fwd_wl,bwrd_wl)
cmcost = cmloss(i, dimer_sim.getConfig())
cost = bkecost+cmcost
cost.backward()
optimizer.step()
# print statistics
with torch.no_grad():
main_loss = loss.main_loss
cm_loss = cmloss.cl_loss
bc_loss = loss.bc_loss
#Print statistics
if rank == 0:
if i%100 == 0:
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,i,rank))
torch.save(committor.state_dict(), "{}_params".format(prefix))
torch.save(optimizer.state_dict(), "optimizer_params")
np.savetxt("count.txt", np.array((i+1,)))
loss_io.write('{:d} {:.5E} {:.5E} {:.5E}\n'.format(i+1,main_loss.item(),bc_loss.item(),cm_loss.item()))
loss_io.flush()
torch.save(cmloss.lambda_cl, "lambda_cl_"+str(rank+1)+".pt")
torch.save(cmloss.cl_configs, "cl_configs_"+str(rank+1)+".pt")
torch.save(cmloss.cl_configs_values, "cl_configs_values_"+str(rank+1)+".pt")
torch.save(cmloss.cl_configs_count, "cl_configs_count_"+str(rank+1)+".pt")
i = i+1
t1 = time.time()
time_diff = t1-t0
time_diff = torch.tensor(time_diff)
dist.all_reduce(time_diff,op=dist.ReduceOp.MAX)
if time_diff > time_max:
time_out = False
| 7,980 | 37.742718 | 197 | py |
tps-torch | tps-torch-main/dimer_solv/ml_test/ftsus_sl/run_ftsus_sl.py | import sys
sys.path.append("..")
import time
t0 = time.time()
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
import scipy.spatial
#Import necessarry tools from tpstorch
from dimer_ftsus_nosolv import DimerFTSUS
from committor_nn import initializeConfig, CommittorNet, CommittorNetBP, CommittorNetDR, SchNet
from dimer_ftsus_nosolv import FTSLayerUSCustom as FTSLayer
from tpstorch.ml.data import FTSSimulation, EXPReweightStringSimulation
from tpstorch.ml.optim import ParallelSGD, ParallelAdam
from tpstorch.ml.nn import BKELossEXP, CommittorLoss2
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
torch.manual_seed(5070)
np.random.seed(5070)
prefix = 'simple'
#(1) Initialization
r0 = 2**(1/6.0)
width = 0.25
Np = 30+2
box = [8.617738760127533, 8.617738760127533, 8.617738760127533]
kappa_perp = 1200.0
kappa_par = 1200.0
kT = 1.0
initial_config = np.genfromtxt("../restart/config_"+str(rank)+".xyz", usecols=(1,2,3))
start = np.genfromtxt("../restart_bc/config_"+str(rank)+"_react.xyz", usecols=(1,2,3))
end = np.genfromtxt("../restart_bc/config_"+str(rank)+"_prod.xyz", usecols=(1,2,3))
initial_config = torch.from_numpy(initial_config)
start = torch.from_numpy(start)
end = torch.from_numpy(end)
initial_config = initial_config.float()
start = start.float()
end = end.float()
#Initialize neural net
#committor = torch.jit.script(CommittorNetDR(num_nodes=2500, boxsize=box[0]).to('cpu'))
#committor = torch.jit.script(CommittorNetBP(num_nodes=200, boxsize=box[0], Np=32,rc=2.5,sigma=1.0).to('cpu'))
committor = SchNet(hidden_channels = 64, num_filters = 64, num_interactions = 3, num_gaussians = 50, cutoff = box[0], max_num_neighbors = 31, boxsize=box[0], Np=32, dim=3).to('cpu')
#Initialize the string for FTS method
ftslayer = FTSLayer(react_config=start[:2].flatten(),prod_config=end[:2].flatten(),num_nodes=world_size,boxsize=box[0],kappa_perpend=kappa_perp, kappa_parallel=kappa_par, num_particles=2).to('cpu')
#Load the pre-initialized neural network and string
committor.load_state_dict(torch.load("initial_1hl_nn", map_location=torch.device('cpu')))
ftslayer.load_state_dict(torch.load("../test_string_config"))
ftslayer.set_tangent()
n_boundary_samples = 100
batch_size = 8
period = 25
dimer_sim_bc = DimerFTSUS( param="param_bc",
config=initial_config.clone().detach(),
rank=rank,
beta=1/kT,
kappa = 0.0,
save_config=False,
mpi_group = mpi_group,
ftslayer=ftslayer,
output_time=batch_size*period
)
dimer_sim = DimerFTSUS( param="param",
config=initial_config.clone().detach(),
rank=rank,
beta=1/kT,
kappa = kappa_perp,
save_config=True,
mpi_group = mpi_group,
ftslayer=ftslayer,
output_time=batch_size*period
)
dimer_sim_com = DimerFTSUS( param="param",
config=initial_config.clone().detach(),
rank=rank,
beta=1/kT,
kappa = 0.0,
save_config=False,
mpi_group = mpi_group,
ftslayer=ftslayer,
output_time=batch_size*period
)
#Construct datarunner
datarunner = EXPReweightStringSimulation(dimer_sim, committor, period=period, batch_size=batch_size, dimN=Np*3)
#Construct optimizers
optimizer = ParallelAdam(committor.parameters(), lr=1e-4)
#Initialize main loss function and optimizers
#Optimizer, doing EXP Reweighting. We can do SGD (integral control), or Heavy-Ball (PID control)
loss = BKELossEXP( bc_sampler = dimer_sim_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 100,
bc_period = 100,
batch_size_bc = 0.5,
)
cmloss = CommittorLoss2( cl_sampler = dimer_sim_com,
committor = committor,
lambda_cl=100.0,
cl_start=10,
cl_end=5000,
cl_rate=10,
cl_trials=100,
batch_size_cl=0.5
)
loss_io = []
if rank == 0:
loss_io = open("{}_statistic_{}.txt".format(prefix,rank+1),'w')
lambda_cl_end = 10**3
cl_start=200
cl_end=10000
cl_stepsize = (lambda_cl_end-cmloss.lambda_cl)/(cl_end-cl_start)
# Save reactant, product configurations
torch.save(loss.react_configs, "react_configs_"+str(rank+1)+".pt")
torch.save(loss.prod_configs, "prod_configs_"+str(rank+1)+".pt")
torch.save(loss.n_bc_samples, "n_bc_samples_"+str(rank+1)+".pt")
#Training loop
for epoch in range(1):
if rank == 0:
print("epoch: [{}]".format(epoch+1))
time_max = 9.0*60
time_out = True
#for i in range(count,count+100):#20000)):
i = 0
while(time_out):
if (i > cl_start) and (i <= cl_end):
cmloss.lambda_cl += cl_stepsize
elif i > cl_end:
cmloss.lambda_cl = lambda_cl_end
# get data and reweighting factors
config, grad_xs, invc, fwd_wl, bwrd_wl = datarunner.runSimulation()
dimer_sim.dumpRestart()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
# forward + backward + optimize
bkecost = loss(grad_xs,invc,fwd_wl,bwrd_wl)
cmcost = cmloss(i, dimer_sim.getConfig())
cost = bkecost+cmcost
cost.backward()
optimizer.step()
# print statistics
with torch.no_grad():
main_loss = loss.main_loss
cm_loss = cmloss.cl_loss
bc_loss = loss.bc_loss
#Print statistics
if rank == 0:
if i%100 == 0:
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,i,rank))
torch.save(committor.state_dict(), "{}_params".format(prefix))
torch.save(optimizer.state_dict(), "optimizer_params")
np.savetxt("count.txt", np.array((i+1,)))
loss_io.write('{:d} {:.5E} {:.5E} {:.5E}\n'.format(i+1,main_loss.item(),bc_loss.item(),cm_loss.item()))
loss_io.flush()
torch.save(cmloss.lambda_cl, "lambda_cl_"+str(rank+1)+".pt")
torch.save(cmloss.cl_configs, "cl_configs_"+str(rank+1)+".pt")
torch.save(cmloss.cl_configs_values, "cl_configs_values_"+str(rank+1)+".pt")
torch.save(cmloss.cl_configs_count, "cl_configs_count_"+str(rank+1)+".pt")
i = i+1
t1 = time.time()
time_diff = t1-t0
time_diff = torch.tensor(time_diff)
dist.all_reduce(time_diff,op=dist.ReduceOp.MAX)
if time_diff > time_max:
time_out = False
| 7,539 | 37.274112 | 197 | py |
tps-torch | tps-torch-main/dimer_solv/ml_test/ftsme_sl/run_ftsme_sl_r.py | import sys
sys.path.append("..")
import time
t0 = time.time()
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
import scipy.spatial
#Import necessarry tools from tpstorch
from dimer_fts_nosolv import dimer_reorient, DimerFTS
from committor_nn import initializeConfig, CommittorNet, CommittorNetBP, CommittorNetDR, SchNet
from dimer_fts_nosolv import FTSLayerCustom as FTSLayer
from tpstorch.ml.data import FTSSimulation
from tpstorch.ml.optim import ParallelSGD, ParallelAdam, FTSUpdate
from tpstorch.ml.nn import BKELossFTS, CommittorLoss2
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
# reload count
count = int(np.genfromtxt("count.txt"))
torch.manual_seed(count)
np.random.seed(count)
prefix = 'simple'
#(1) Initialization
r0 = 2**(1/6.0)
width = 0.25
Np = 30+2
box = [8.617738760127533, 8.617738760127533, 8.617738760127533]
kT = 1.0
initial_config = np.genfromtxt("../restart/config_"+str(rank)+".xyz", usecols=(1,2,3))
start = np.genfromtxt("../restart_bc/config_"+str(rank)+"_react.xyz", usecols=(1,2,3))
end = np.genfromtxt("../restart_bc/config_"+str(rank)+"_prod.xyz", usecols=(1,2,3))
initial_config = torch.from_numpy(initial_config)
start = torch.from_numpy(start)
end = torch.from_numpy(end)
initial_config = initial_config.float()
start = start.float()
end = end.float()
#Initialize neural net
#committor = torch.jit.script(CommittorNetDR(num_nodes=2500, boxsize=box[0]).to('cpu'))
#committor = torch.jit.script(CommittorNetBP(num_nodes=200, boxsize=box[0], Np=32,rc=2.5,sigma=1.0).to('cpu'))
committor = SchNet(hidden_channels = 64, num_filters = 64, num_interactions = 3, num_gaussians = 50, cutoff = box[0], max_num_neighbors = 31, boxsize=box[0], Np=32, dim=3).to('cpu')
#Initialize the string for FTS method
ftslayer = FTSLayer(react_config=start[:2].flatten(),prod_config=end[:2].flatten(),num_nodes=world_size,boxsize=box[0],num_particles=2).to('cpu')
committor.load_state_dict(torch.load("simple_params", map_location=torch.device('cpu')))
ftslayer.load_state_dict(torch.load("simple_string"))
n_boundary_samples = 100
batch_size = 8
period = 25
#Initialize the dimer simulation
dimer_sim_bc = DimerFTS(param="param_bc",
config=initial_config.detach().clone(),
rank=rank,
beta=1/kT,
save_config=False,
mpi_group = mpi_group,
ftslayer=ftslayer,
output_time=batch_size*period
)
dimer_sim = DimerFTS( param="param",
config=initial_config.detach().clone(),
rank=rank,
beta=1/kT,
save_config=True,
mpi_group = mpi_group,
ftslayer=ftslayer,
output_time=batch_size*period
)
dimer_sim.useRestart()
dimer_sim_com = DimerFTS(param="param",
config=initial_config.detach().clone(),
rank=rank,
beta=1/kT,
save_config=False,
mpi_group = mpi_group,
ftslayer=ftslayer,
output_time=batch_size*period
)
#Construct datarunner
datarunner = FTSSimulation(dimer_sim, committor = committor, nn_training = True, period=period, batch_size=batch_size, dimN=Np*3)
#Construct optimizers
ftsoptimizer = FTSUpdate(ftslayer.parameters(), deltatau=0.01,momentum=0.9,nesterov=True,kappa=0.1,periodic=True,dim=3)
optimizer = ParallelAdam(committor.parameters(), lr=1e-4)
optimizer.load_state_dict(torch.load("optimizer_params"))
ftsoptimizer.load_state_dict(torch.load("ftsoptimizer_params"))
#Initialize main loss function
loss = BKELossFTS( bc_sampler = dimer_sim_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 0,
bc_period = 100,
batch_size_bc = 0.5,
tol = 5e-10,
mode= 'shift')
cmloss = CommittorLoss2( cl_sampler = dimer_sim_com,
committor = committor,
lambda_cl=100.0,
cl_start=10,
cl_end=5000,
cl_rate=10,
cl_trials=100,
batch_size_cl=0.5
)
loss_io = []
if rank == 0:
loss_io = open("{}_statistic_{}.txt".format(prefix,rank+1),'a')
#Training loop
lambda_cl_end = 10**3
cl_start=200
cl_end=10000
cl_stepsize = (lambda_cl_end-cmloss.lambda_cl)/(cl_end-cl_start)
# Save reactant, product configurations
loss.react_configs = torch.load("react_configs_"+str(rank+1)+".pt")
loss.prod_configs = torch.load("prod_configs_"+str(rank+1)+".pt")
loss.n_bc_samples = torch.load("n_bc_samples_"+str(rank+1)+".pt")
# cmloss variables
cmloss.lambda_cl = torch.load("lambda_cl_"+str(rank+1)+".pt")
cmloss.cl_configs = torch.load("cl_configs_"+str(rank+1)+".pt")
cmloss.cl_configs_values = torch.load("cl_configs_values_"+str(rank+1)+".pt")
cmloss.cl_configs_count = torch.load("cl_configs_count_"+str(rank+1)+".pt")
#We can train in terms of epochs, but we will keep it in one epoch
with open("string_{}_config.xyz".format(rank),"a") as f, open("string_{}_log.txt".format(rank),"a") as g:
for epoch in range(1):
if rank == 0:
print("epoch: [{}]".format(epoch+1))
time_max = 9.0*60
time_out = True
#for i in range(count,count+100):#20000)):
i = count
while(time_out):
if (i > cl_start) and (i <= cl_end):
cmloss.lambda_cl += cl_stepsize
elif i > cl_end:
cmloss.lambda_cl = lambda_cl_end
# get data and reweighting factors
configs, grad_xs = datarunner.runSimulation()
dimer_sim.dumpRestart()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
# forward + backward + optimize
bkecost = loss(grad_xs, dimer_sim.rejection_count)
cmcost = cmloss(i, dimer_sim.getConfig())
cost = bkecost+cmcost
cost.backward()
optimizer.step()
#ftsoptimizer.step(configs[:,:6],len(configs),boxsize=box[0],reorient_sample=dimer_reorient)
# print statistics
with torch.no_grad():
string_temp = ftslayer.string[rank].view(2,3)
f.write("2 \n")
f.write('Lattice=\"10.0 0.0 0.0 0.0 10.0 0.0 0.0 0.0 10.0\" ')
f.write('Origin=\"-5.0 -5.0 -5.0\" ')
f.write("Properties=type:S:1:pos:R:3:aux1:R:1 \n")
f.write("2 {} {} {} {} \n".format(string_temp[0,0],string_temp[0,1], string_temp[0,2],0.25))
f.write("2 {} {} {} {} \n".format(string_temp[1,0],string_temp[1,1], string_temp[1,2],0.25))
f.flush()
g.write("{} {} \n".format((i+1)*period,torch.norm(string_temp[0]-string_temp[1])))
g.flush()
main_loss = loss.main_loss
bc_loss = loss.bc_loss
cm_loss = cmloss.cl_loss
#Print statistics
if rank == 0:
if i%100 == 0:
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,i,rank))
torch.save(ftslayer.state_dict(), "{}_string_t_{}_{}".format(prefix,i,rank))
torch.save(committor.state_dict(), "{}_params".format(prefix))
torch.save(ftslayer.state_dict(), "{}_string".format(prefix))
torch.save(optimizer.state_dict(), "optimizer_params")
torch.save(ftsoptimizer.state_dict(), "ftsoptimizer_params")
np.savetxt("count.txt", np.array((i+1,)))
loss_io.write('{:d} {:.5E} {:.5E} {:.5E}\n'.format(i+1,main_loss.item(),bc_loss.item(),cm_loss.item()))
loss_io.flush()
torch.save(cmloss.lambda_cl, "lambda_cl_"+str(rank+1)+".pt")
torch.save(cmloss.cl_configs, "cl_configs_"+str(rank+1)+".pt")
torch.save(cmloss.cl_configs_values, "cl_configs_values_"+str(rank+1)+".pt")
torch.save(cmloss.cl_configs_count, "cl_configs_count_"+str(rank+1)+".pt")
i = i+1
t1 = time.time()
time_diff = t1-t0
time_diff = torch.tensor(time_diff)
dist.all_reduce(time_diff,op=dist.ReduceOp.MAX)
if time_diff > time_max:
time_out = False
| 9,196 | 40.995434 | 181 | py |
tps-torch | tps-torch-main/dimer_solv/ml_test/ftsme_sl/run_ftsme_sl.py | import sys
sys.path.append("..")
import time
t0 = time.time()
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
import scipy.spatial
#Import necessarry tools from tpstorch
from dimer_fts_nosolv import dimer_reorient, DimerFTS
from committor_nn import initializeConfig, CommittorNet, CommittorNetBP, CommittorNetDR, SchNet
from dimer_fts_nosolv import FTSLayerCustom as FTSLayer
from tpstorch.ml.data import FTSSimulation
from tpstorch.ml.optim import ParallelSGD, ParallelAdam, FTSUpdate
from tpstorch.ml.nn import BKELossFTS, CommittorLoss2
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
torch.manual_seed(5070)
np.random.seed(5070)
prefix = 'simple'
#(1) Initialization
r0 = 2**(1/6.0)
width = 0.25
Np = 30+2
box = [8.617738760127533, 8.617738760127533, 8.617738760127533]
kT = 1.0
initial_config = np.genfromtxt("../restart/config_"+str(rank)+".xyz", usecols=(1,2,3))
start = np.genfromtxt("../restart_bc/config_"+str(rank)+"_react.xyz", usecols=(1,2,3))
end = np.genfromtxt("../restart_bc/config_"+str(rank)+"_prod.xyz", usecols=(1,2,3))
initial_config = torch.from_numpy(initial_config)
start = torch.from_numpy(start)
end = torch.from_numpy(end)
initial_config = initial_config.float()
start = start.float()
end = end.float()
#Initialize neural net
#committor = torch.jit.script(CommittorNetDR(num_nodes=2500, boxsize=box[0]).to('cpu'))
#committor = torch.jit.script(CommittorNetBP(num_nodes=200, boxsize=box[0], Np=32,rc=2.5,sigma=1.0).to('cpu'))
committor = SchNet(hidden_channels = 64, num_filters = 64, num_interactions = 3, num_gaussians = 50, cutoff = box[0], max_num_neighbors = 31, boxsize=box[0], Np=32, dim=3).to('cpu')
#Initialize the string for FTS method
ftslayer = FTSLayer(react_config=start[:2].flatten(),prod_config=end[:2].flatten(),num_nodes=world_size,boxsize=box[0],num_particles=2).to('cpu')
committor.load_state_dict(torch.load("initial_1hl_nn", map_location=torch.device('cpu')))
ftslayer.load_state_dict(torch.load("../test_string_config"))
n_boundary_samples = 100
batch_size = 8
period = 25
#Initialize the dimer simulation
dimer_sim_bc = DimerFTS(param="param_bc",
config=initial_config.detach().clone(),
rank=rank,
beta=1/kT,
save_config=False,
mpi_group = mpi_group,
ftslayer=ftslayer,
output_time=batch_size*period
)
dimer_sim = DimerFTS( param="param",
config=initial_config.detach().clone(),
rank=rank,
beta=1/kT,
save_config=True,
mpi_group = mpi_group,
ftslayer=ftslayer,
output_time=batch_size*period
)
dimer_sim_com = DimerFTS(param="param",
config=initial_config.detach().clone(),
rank=rank,
beta=1/kT,
save_config=False,
mpi_group = mpi_group,
ftslayer=ftslayer,
output_time=batch_size*period
)
#Construct datarunner
datarunner = FTSSimulation(dimer_sim, committor = committor, nn_training = True, period=period, batch_size=batch_size, dimN=Np*3)
#Construct optimizers
ftsoptimizer = FTSUpdate(ftslayer.parameters(), deltatau=0.01,momentum=0.9,nesterov=True,kappa=0.1,periodic=True,dim=3)
optimizer = ParallelAdam(committor.parameters(), lr=1e-4)
#Initialize main loss function
loss = BKELossFTS( bc_sampler = dimer_sim_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 100,
bc_period = 100,
batch_size_bc = 0.5,
tol = 5e-10,
mode= 'shift')
cmloss = CommittorLoss2( cl_sampler = dimer_sim_com,
committor = committor,
lambda_cl=100.0,
cl_start=10,
cl_end=5000,
cl_rate=10,
cl_trials=100,
batch_size_cl=0.5
)
loss_io = []
if rank == 0:
loss_io = open("{}_statistic_{}.txt".format(prefix,rank+1),'w')
#Training loop
lambda_cl_end = 10**3
cl_start=200
cl_end=10000
cl_stepsize = (lambda_cl_end-cmloss.lambda_cl)/(cl_end-cl_start)
# Save reactant, product configurations
torch.save(loss.react_configs, "react_configs_"+str(rank+1)+".pt")
torch.save(loss.prod_configs, "prod_configs_"+str(rank+1)+".pt")
torch.save(loss.n_bc_samples, "n_bc_samples_"+str(rank+1)+".pt")
#We can train in terms of epochs, but we will keep it in one epoch
with open("string_{}_config.xyz".format(rank),"w") as f, open("string_{}_log.txt".format(rank),"w") as g:
for epoch in range(1):
if rank == 0:
print("epoch: [{}]".format(epoch+1))
time_max = 9.0*60
time_out = True
#for i in range(count,count+100):#20000)):
i = 0
while(time_out):
if (i > cl_start) and (i <= cl_end):
cmloss.lambda_cl += cl_stepsize
elif i > cl_end:
cmloss.lambda_cl = lambda_cl_end
# get data and reweighting factors
configs, grad_xs = datarunner.runSimulation()
dimer_sim.dumpRestart()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
# forward + backward + optimize
bkecost = loss(grad_xs, dimer_sim.rejection_count)
cmcost = cmloss(i, dimer_sim.getConfig())
cost = bkecost+cmcost
cost.backward()
optimizer.step()
#ftsoptimizer.step(configs[:,:6],len(configs),boxsize=box[0],reorient_sample=dimer_reorient)
# print statistics
with torch.no_grad():
string_temp = ftslayer.string[rank].view(2,3)
f.write("2 \n")
f.write('Lattice=\"10.0 0.0 0.0 0.0 10.0 0.0 0.0 0.0 10.0\" ')
f.write('Origin=\"-5.0 -5.0 -5.0\" ')
f.write("Properties=type:S:1:pos:R:3:aux1:R:1 \n")
f.write("2 {} {} {} {} \n".format(string_temp[0,0],string_temp[0,1], string_temp[0,2],0.25))
f.write("2 {} {} {} {} \n".format(string_temp[1,0],string_temp[1,1], string_temp[1,2],0.25))
f.flush()
g.write("{} {} \n".format((i+1)*period,torch.norm(string_temp[0]-string_temp[1])))
g.flush()
main_loss = loss.main_loss
bc_loss = loss.bc_loss
cm_loss = cmloss.cl_loss
#Print statistics
if rank == 0:
if i%100 == 0:
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,i,rank))
torch.save(ftslayer.state_dict(), "{}_string_t_{}_{}".format(prefix,i,rank))
torch.save(committor.state_dict(), "{}_params".format(prefix))
torch.save(ftslayer.state_dict(), "{}_string".format(prefix))
torch.save(optimizer.state_dict(), "optimizer_params")
torch.save(ftsoptimizer.state_dict(), "ftsoptimizer_params")
np.savetxt("count.txt", np.array((i+1,)))
loss_io.write('{:d} {:.5E} {:.5E} {:.5E}\n'.format(i+1,main_loss.item(),bc_loss.item(),cm_loss.item()))
loss_io.flush()
torch.save(cmloss.lambda_cl, "lambda_cl_"+str(rank+1)+".pt")
torch.save(cmloss.cl_configs, "cl_configs_"+str(rank+1)+".pt")
torch.save(cmloss.cl_configs_values, "cl_configs_values_"+str(rank+1)+".pt")
torch.save(cmloss.cl_configs_count, "cl_configs_count_"+str(rank+1)+".pt")
i = i+1
t1 = time.time()
time_diff = t1-t0
time_diff = torch.tensor(time_diff)
dist.all_reduce(time_diff,op=dist.ReduceOp.MAX)
if time_diff > time_max:
time_out = False
| 8,699 | 40.626794 | 181 | py |
tps-torch | tps-torch-main/dimer_solv/ml_test/ftme_sl/dimer_fts_nosolv.py | import sys
sys.path.insert(0,'/global/home/users/muhammad_hasyim/tps-torch/build/')
import scipy.spatial
#Import necessarry tools from torch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.examples.dimer_solv_ml import MyMLFTSSampler
from tpstorch import _rank, _world_size
import numpy as np
from tpstorch.ml.nn import FTSLayer#US
#Import any other thing
import tqdm, sys
class FTSLayerCustom(FTSLayer):
r""" A linear layer, where the paramaters correspond to the string obtained by the
general FTS method. Customized to take into account rotational and translational symmetry of the dimer problem
Args:
react_config (torch.Tensor): starting configuration in the reactant basin.
prod_config (torch.Tensor): starting configuration in the product basin.
"""
def __init__(self, react_config, prod_config, num_nodes,boxsize,num_particles):
super(FTSLayerCustom,self).__init__(react_config, prod_config, num_nodes)
self.boxsize = boxsize
self.Np = num_particles
@torch.no_grad()
def compute_metric(self,x):
##(1) Pre-processing so that dimer is at the center
old_x = x.view(32,3)[:2].clone()
#Compute the pair distance
dx = (old_x[0]-old_x[1])
dx = dx-torch.round(dx/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
old_x[0] = dx+old_x[1]
x_com = 0.5*(old_x[0]+old_x[1])
#Do the same thing to the string configurations
new_string = self.string.view(_world_size, self.Np,3).clone()
#Compute the pair distance
ds = (new_string[:,0]-new_string[:,1])
ds = ds-torch.round(ds/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_string[:,0] = ds+new_string[:,1]
s_com = 0.5*(new_string[:,0]+new_string[:,1])
for i in range(self.Np):
old_x[i] -= x_com
new_string[:,i] -= s_com
old_x[i] -= torch.round(old_x[i]/self.boxsize)*self.boxsize
new_string[:,i] -= torch.round(new_string[:,i]/self.boxsize)*self.boxsize
##(2) Rotate the system using Kabsch algorithm
dist_sq_list = torch.zeros(_world_size)
#weights = np.ones(self.Np)/(self.Np-2)
weights = np.zeros(self.Np)#np.ones(self.Np)/(self.Np-2)
weights[0] = 1.0
weights[1] = 1.0
for i in range(_world_size):
_, rmsd = scipy.spatial.transform.Rotation.align_vectors(old_x.numpy(), new_string[i].numpy(),weights=weights)
dist_sq_list[i] = rmsd**2
return dist_sq_list
#WARNING! Needs to be changed
#Only testing if configurations are constrained properly
@torch.no_grad()
def forward(self,x):
##(1) Pre-processing so that dimer is at the center
old_x = x.view(32,3)[:2].clone()
#Compute the pair distance
dx = (old_x[0]-old_x[1])
dx = dx-torch.round(dx/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
old_x[0] = dx+old_x[1]
x_com = 0.5*(old_x[0]+old_x[1])
#Do the same thing to the string configurations
new_string = self.string[_rank].view(self.Np,3).clone()
#Compute the pair distance
ds = (new_string[0]-new_string[1])
ds = ds-torch.round(ds/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_string[0] = ds+new_string[1]
s_com = 0.5*(new_string[0]+new_string[1])
for i in range(self.Np):
old_x[i] -= x_com
new_string[i] -= s_com
old_x[i] -= torch.round(old_x[i]/self.boxsize)*self.boxsize
new_string[i] -= torch.round(new_string[i]/self.boxsize)*self.boxsize
##(2) Rotate the system using Kabsch algorithm
dist_sq_list = torch.zeros(_world_size)
weights = np.zeros(self.Np)
weights[0] = 1.0
weights[1] = 1.0
_, rmsd = scipy.spatial.transform.Rotation.align_vectors(old_x.numpy(), new_string.numpy(),weights=weights)
return rmsd**2
# This is a sketch of what it should be like, but basically have to also make committor play nicely
# within function
# have omnious committor part that actual committor overrides?
class DimerFTS(MyMLFTSSampler):
def __init__(self,param,config,rank,beta,mpi_group,ftslayer,output_time, save_config=False):
super(DimerFTS, self).__init__(param,config.detach().clone(),rank,beta,mpi_group)
self.output_time = output_time
self.save_config = save_config
self.timestep = 0
self.torch_config = config
#Save config size and its flattened version
self.config_size = config.size()
self.flattened_size = config.flatten().size()
self.invkT = beta
self.ftslayer = ftslayer
#tconfig = ftslayer.string[_rank].view(2,3).detach().clone()
#tconfig.requires_grad = False
self.setConfig(config)
self.Np = 30+2
#Configs file Save Alternative, since the default XYZ format is an overkill
#self.file = open("newconfigs_{}.txt".format(dist.get_rank()), "w")
@torch.no_grad()
def initialize_from_torchconfig(self, config):
# Don't have to worry about all that much all, can just set it
self.setConfig(config.detach().clone())
@torch.no_grad()
def reset(self):
self.steps = 0
self.computeMetric()
inftscell = self.checkFTSCell(_rank, _world_size)
if inftscell:
pass
else:
#This teleports the dimer to the origin. But it should be okay?
state_old = self.getConfig().detach().clone()
state_old[:2] = self.ftslayer.string[_rank].view(2,3).detach().clone()
self.setConfig(state_old)
def computeMetric(self):
self.distance_sq_list = self.ftslayer.compute_metric(self.getConfig().flatten())
@torch.no_grad()
def step(self):
state_old = self.getConfig().detach().clone()
self.stepUnbiased()
self.computeMetric()
inftscell = self.checkFTSCell(_rank, _world_size)
if inftscell:
pass
else:
self.setConfig(state_old)
self.torch_config.requires_grad_()
try:
self.torch_config.grad.data.zero_()
except:
pass
def step_unbiased(self):
with torch.no_grad():
self.stepUnbiased()
self.torch_config.requires_grad_()
try:
self.torch_config.grad.data.zero_()
except:
pass
@torch.no_grad()
def isReactant(self, x = None):
r0 = 2**(1/6.0)
s = 0.5*r0
#Compute the pair distance
if x is None:
if self.getBondLength() <= r0:
return True
else:
return False
else:
if self.getBondLengthConfig(x) <= r0:
return True
else:
return False
@torch.no_grad()
def isProduct(self,x = None):
r0 = 2**(1/6.0)
s = 0.5*r0
if x is None:
if self.getBondLength() >= r0+2*s:
return True
else:
return False
else:
if self.getBondLengthConfig(x) >= r0+2*s:
return True
else:
return False
def step_bc(self):
with torch.no_grad():
state_old = self.getConfig().detach().clone()
self.step_unbiased()
if self.isReactant() or self.isProduct():
pass
else:
#If it's not in the reactant or product state, reset!
self.setConfig(state_old)
def save(self):
self.timestep += 1
if self.save_config and self.timestep % self.output_time == 0:
self.dumpConfig(self.timestep)
| 8,134 | 35.644144 | 122 | py |
tps-torch | tps-torch-main/dimer_solv/ml_test/ftme_sl/run_ftsme_sl.py | import sys
sys.path.append("..")
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
import scipy.spatial
#Import necessarry tools from tpstorch
from dimer_fts_nosolv import DimerFTS
from committor_nn import CommittorNet, CommittorNetBP, CommittorNetDR
from dimer_fts_nosolv import FTSLayerCustom as FTSLayer
from tpstorch.ml.data import FTSSimulation#, EXPReweightStringSimulation
from tpstorch.ml.optim import ParallelSGD, ParallelAdam, FTSUpdate
#from tpstorch.ml.nn import BKELossEXP
from tpstorch.ml.nn import BKELossFTS, CommittorLoss2
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
torch.manual_seed(5070)
np.random.seed(5070)
prefix = 'simple'
Np = 32
def initializer(s):
return (1-s)*start+s*end
#This function only assumes that the string consists of the dimer without solvent particles
@torch.no_grad()
def dimer_nullspace(vec,x,boxsize):
Np = 2
##(1) Pre-processing so that dimer is at the center
old_x = x.view(2,3).clone()
#Compute the pair distance
dx = (old_x[0]-old_x[1])
dx = dx-torch.round(dx/boxsize)*boxsize
#Re-compute one of the coordinates and shift to origin
old_x[0] = dx+old_x[1]
x_com = 0.5*(old_x[0]+old_x[1])
new_vec = vec.view(Np,3).clone()
#Compute the pair distance
ds = (new_vec[0]-new_vec[1])
ds = ds-torch.round(ds/boxsize)*boxsize
#Re-compute one of the coordinates and shift to origin
new_vec[0] = ds+new_vec[1]
s_com = 0.5*(new_vec[0]+new_vec[1])
for i in range(Np):
old_x[i] -= x_com
new_vec[i] -= s_com
old_x[i] -= torch.round(old_x[i]/boxsize)*boxsize
new_vec[i] -= torch.round(new_vec[i]/boxsize)*boxsize
##(2) Rotate the system using Kabsch algorithm
#weights = np.ones(Np)
#weights = np.zeros(Np)
#weights[0] = 1.0
#weights[1] = 1.0
#weights = np.ones(Np)/(Np-2)
weights = np.zeros(Np)#np.ones(self.Np)/(self.Np-2)
weights[0] = 1.0
weights[1] = 1.0
rotate,rmsd = scipy.spatial.transform.Rotation.align_vectors(new_vec.numpy(),old_x.numpy(), weights=weights)
for i in range(Np):
old_x[i] = torch.tensor(rotate.apply(old_x[i].numpy()))
old_x[i] -= torch.round(old_x[i]/boxsize)*boxsize
return old_x.flatten()
#Initialization
r0 = 2**(1/6.0)
width = 0.5*r0
#Reactant
dist_init_start = r0
#Product state
dist_init_end = r0+2*width
#scale down/up the distance of one of the particle dimer
box = [14.736125994561544, 14.736125994561544, 14.736125994561544]
def CubicLattice(dist_init):
state = torch.zeros(Np, 3);
num_spacing = np.ceil(Np**(1/3.0))
spacing_x = box[0]/num_spacing;
spacing_y = box[1]/num_spacing;
spacing_z = box[2]/num_spacing;
count = 0;
id_x = 0;
id_y = 0;
id_z = 0;
while Np > count:
state[int(id_z+id_y*num_spacing+id_x*num_spacing*num_spacing)][0] = spacing_x*id_x-0.5*box[0];
state[int(id_z+id_y*num_spacing+id_x*num_spacing*num_spacing)][1] = spacing_y*id_y-0.5*box[1];
state[int(id_z+id_y*num_spacing+id_x*num_spacing*num_spacing)][2] = spacing_z*id_z-0.5*box[2];
count += 1;
id_z += 1;
if(id_z==num_spacing):
id_z = 0;
id_y += 1;
if(id_y==num_spacing):
id_y = 0;
id_x += 1;
#Compute the pair distance
dx = (state[0]-state[1])
dx = dx-torch.round(dx/box[0])*box[0]
#Re-compute one of the coordinates and shift to origin
state[0] = dx/torch.norm(dx)*dist_init+state[1]
x_com = 0.5*(state[0]+state[1])
for i in range(Np):
state[i] -= x_com
state[i] -= torch.round(state[i]/box[0])*box[0]
return state;
start = CubicLattice(dist_init_start)
end = CubicLattice(dist_init_end)
initial_config = initializer(rank/(world_size-1))
#Initialize neural net
#committor = CommittorNetDR(num_nodes=2500, boxsize=10).to('cpu')
committor = torch.jit.script(CommittorNetBP(num_nodes=200, boxsize=box[0], Np=32,rc=2.5,sigma=1.0).to('cpu'))
#Initialize the string for FTS method
ftslayer = FTSLayer(react_config=start[:2].flatten(),prod_config=end[:2].flatten(),num_nodes=world_size,boxsize=box[0],num_particles=2).to('cpu')
#Load the pre-initialized neural network and string
committor.load_state_dict(torch.load("../initial_1hl_nn_bp"))
kT = 1.0
ftslayer.load_state_dict(torch.load("../test_string_config"))
n_boundary_samples = 100
batch_size = 8
period = 25
#Initialize the dimer simulation
dimer_sim_bc = DimerFTS(param="param_bc",config=initial_config.detach().clone(), rank=rank, beta=1/kT, save_config=False, mpi_group = mpi_group, ftslayer=ftslayer,output_time=batch_size*period)
dimer_sim = DimerFTS(param="param",config=initial_config.detach().clone(), rank=rank, beta=1/kT, save_config=True, mpi_group = mpi_group, ftslayer=ftslayer,output_time=batch_size*period)
dimer_sim_com = DimerFTS(param="param",config=initial_config.detach().clone(), rank=rank, beta=1/kT, save_config=False, mpi_group = mpi_group, ftslayer=ftslayer,output_time=batch_size*period)
#Construct FTSSimulation
ftsoptimizer = FTSUpdate(ftslayer.parameters(), deltatau=0.02,momentum=0.9,nesterov=True,kappa=0.1,periodic=True,dim=3)
datarunner = FTSSimulation(dimer_sim, committor = committor, nn_training = True, period=period, batch_size=batch_size, dimN=Np*3)
#Initialize main loss function and optimizers
#Optimizer, doing EXP Reweighting. We can do SGD (integral control), or Heavy-Ball (PID control)
loss = BKELossFTS( bc_sampler = dimer_sim_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 100,
bc_period = 10,
batch_size_bc = 0.5,
tol = 5e-10,
mode= 'shift')
cmloss = CommittorLoss2( cl_sampler = dimer_sim_com,
committor = committor,
lambda_cl=100.0,
cl_start=10,
cl_end=200,
cl_rate=10,
cl_trials=50,
batch_size_cl=0.5
)
loss_io = []
loss_io = open("{}_statistic_{}.txt".format(prefix,rank+1),'w')
#Training loop
optimizer = ParallelAdam(committor.parameters(), lr=3e-3)
lambda_cl_end = 10**3
cl_start=200
cl_end=10000
cl_stepsize = (lambda_cl_end-cmloss.lambda_cl)/(cl_end-cl_start)
#We can train in terms of epochs, but we will keep it in one epoch
with open("string_{}_config.xyz".format(rank),"w") as f, open("string_{}_log.txt".format(rank),"w") as g:
for epoch in range(1):
if rank == 0:
print("epoch: [{}]".format(epoch+1))
for i in tqdm.tqdm(range(10000)):#20000)):
if (i > cl_start) and (i <= cl_end):
cmloss.lambda_cl += cl_stepsize
elif i > cl_end:
cmloss.lambda_cl = lambda_cl_end
# get data and reweighting factors
configs, grad_xs = datarunner.runSimulation()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
# forward + backward + optimize
bkecost = loss(grad_xs, dimer_sim.rejection_count)
bkecost = loss(grad_xs,invc,fwd_wl,bwrd_wl)
cmcost = cmloss(i, dimer_sim.getConfig())
cost = bkecost+cmcost
cost.backward()
optimizer.step()
ftsoptimizer.step(configs[:,:6],len(configs),boxsize=box[0],remove_nullspace=dimer_nullspace)
# print statistics
with torch.no_grad():
string_temp = ftslayer.string[rank].view(2,3)
f.write("2 \n")
f.write('Lattice=\"10.0 0.0 0.0 0.0 10.0 0.0 0.0 0.0 10.0\" ')
f.write('Origin=\"-5.0 -5.0 -5.0\" ')
f.write("Properties=type:S:1:pos:R:3:aux1:R:1 \n")
f.write("2 {} {} {} {} \n".format(string_temp[0,0],string_temp[0,1], string_temp[0,2],0.5*r0))
f.write("2 {} {} {} {} \n".format(string_temp[1,0],string_temp[1,1], string_temp[1,2],0.5*r0))
f.flush()
g.write("{} {} \n".format((i+1)*period,torch.norm(string_temp[0]-string_temp[1])))
g.flush()
#if counter % 10 == 0:
main_loss = loss.main_loss
bc_loss = loss.bc_loss
loss_io.write('{:d} {:.5E} {:.5E} \n'.format(i+1,main_loss.item(),bc_loss.item()))
loss_io.flush()
#Print statistics
if rank == 0:
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,i,rank))
| 9,052 | 37.198312 | 193 | py |
tps-torch | tps-torch-main/dimer_solv/ml_test/us_sl/run_us_sl.py | import sys
sys.path.append("..")
import time
t0 = time.time()
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from dimer_us import DimerUS
from committor_nn import initializeConfig, CommittorNet, CommittorNetBP, CommittorNetDR, SchNet
from tpstorch.ml.data import EXPReweightSimulation
from tpstorch.ml.optim import ParallelAdam
from tpstorch.ml.nn import BKELossEXP, CommittorLoss2
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
torch.manual_seed(5070)
np.random.seed(5070)
prefix = 'simple'
#(1) Initialization
r0 = 2**(1/6.0)
width = 0.25
Np = 30+2
box = [8.617738760127533, 8.617738760127533, 8.617738760127533]
kappa= 100
kT = 1.0
initial_config = np.genfromtxt("../restart/config_"+str(rank)+".xyz", usecols=(1,2,3))
start = np.genfromtxt("../restart_bc/config_"+str(rank)+"_react.xyz", usecols=(1,2,3))
end = np.genfromtxt("../restart_bc/config_"+str(rank)+"_prod.xyz", usecols=(1,2,3))
initial_config = torch.from_numpy(initial_config)
start = torch.from_numpy(start)
end = torch.from_numpy(end)
initial_config = initial_config.float()
start = start.float()
end = end.float()
#Initialize neural net
#committor = torch.jit.script(CommittorNetDR(num_nodes=2500, boxsize=box[0]).to('cpu'))
committor = SchNet(hidden_channels = 64, num_filters = 64, num_interactions = 3, num_gaussians = 50, cutoff = box[0], max_num_neighbors = 31, boxsize=box[0], Np=32, dim=3).to('cpu')
committor.load_state_dict(torch.load("initial_1hl_nn", map_location=torch.device('cpu')))
n_boundary_samples = 100
batch_size = 8
period = 25
dimer_sim_bc = DimerUS( param="param_bc",
config=initial_config.clone().detach(),
rank=rank,
beta=1/kT,
kappa = 0.0,
save_config=False,
mpi_group = mpi_group,
output_time=batch_size*period
)
dimer_sim = DimerUS( param="param",
config=initial_config.clone().detach(),
rank=rank,
beta=1/kT,
kappa = kappa,
save_config=True,
mpi_group = mpi_group,
output_time=batch_size*period
)
dimer_sim_com = DimerUS(param="param",
config=initial_config.clone().detach(),
rank=rank,
beta=1/kT,
kappa = 0.0,
save_config=False,
mpi_group = mpi_group,
output_time=batch_size*period
)
#Construct datarunner
datarunner = EXPReweightSimulation(dimer_sim, committor, period=period, batch_size=batch_size, dimN=Np*3)
#Construct optimizers
optimizer = ParallelAdam(committor.parameters(), lr=1e-4)
#The BKE Loss function, with EXP Reweighting
loss = BKELossEXP( bc_sampler = dimer_sim_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 100,
bc_period = 100,
batch_size_bc = 0.5,
)
#The supervised learning loss
cmloss = CommittorLoss2( cl_sampler = dimer_sim_com,
committor = committor,
lambda_cl=100.0,
cl_start=10,
cl_end=5000,
cl_rate=10,
cl_trials=100,
batch_size_cl=0.5
)
loss_io = []
if rank == 0:
loss_io = open("{}_statistic_{}.txt".format(prefix,rank+1),'w')
lambda_cl_end = 10**3
cl_start=200
cl_end=10000
cl_stepsize = (lambda_cl_end-cmloss.lambda_cl)/(cl_end-cl_start)
# Save reactant, product configurations
torch.save(loss.react_configs, "react_configs_"+str(rank+1)+".pt")
torch.save(loss.prod_configs, "prod_configs_"+str(rank+1)+".pt")
torch.save(loss.n_bc_samples, "n_bc_samples_"+str(rank+1)+".pt")
#Training loop
for epoch in range(1):
if rank == 0:
print("epoch: [{}]".format(epoch+1))
time_max = 9.0*60
time_out = True
#for i in range(count,count+100):#20000)):
i = 0
while(time_out):
if (i > cl_start) and (i <= cl_end):
cmloss.lambda_cl += cl_stepsize
elif i > cl_end:
cmloss.lambda_cl = lambda_cl_end
# get data and reweighting factors
config, grad_xs, invc, fwd_wl, bwrd_wl = datarunner.runSimulation()
dimer_sim.dumpRestart()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
# forward + backward + optimize
bkecost = loss(grad_xs,invc,fwd_wl,bwrd_wl)
cmcost = cmloss(i, dimer_sim.getConfig())
cost = bkecost+cmcost
cost.backward()
optimizer.step()
# print statistics
with torch.no_grad():
main_loss = loss.main_loss
cm_loss = cmloss.cl_loss
bc_loss = loss.bc_loss
#Print statistics
if rank == 0:
if i%100 == 0:
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,i,rank))
torch.save(committor.state_dict(), "{}_params".format(prefix))
torch.save(optimizer.state_dict(), "optimizer_params")
np.savetxt("count.txt", np.array((i+1,)))
loss_io.write('{:d} {:.5E} {:.5E} {:.5E}\n'.format(i+1,main_loss.item(),bc_loss.item(),cm_loss.item()))
loss_io.flush()
torch.save(cmloss.lambda_cl, "lambda_cl_"+str(rank+1)+".pt")
torch.save(cmloss.cl_configs, "cl_configs_"+str(rank+1)+".pt")
torch.save(cmloss.cl_configs_values, "cl_configs_values_"+str(rank+1)+".pt")
torch.save(cmloss.cl_configs_count, "cl_configs_count_"+str(rank+1)+".pt")
i = i+1
t1 = time.time()
time_diff = t1-t0
time_diff = torch.tensor(time_diff)
dist.all_reduce(time_diff,op=dist.ReduceOp.MAX)
if time_diff > time_max:
time_out = False
| 6,609 | 34.923913 | 181 | py |
tps-torch | tps-torch-main/dimer_solv/ml_test/us_sl/run_us_sl_r.py | import sys
sys.path.append("..")
import time
t0 = time.time()
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from dimer_us import DimerUS
from committor_nn import initializeConfig, CommittorNet, CommittorNetBP, CommittorNetDR, SchNet
from tpstorch.ml.data import EXPReweightSimulation
from tpstorch.ml.optim import ParallelAdam
from tpstorch.ml.nn import BKELossEXP, CommittorLoss2
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
# reload count
count = int(np.genfromtxt("count.txt"))
torch.manual_seed(count)
np.random.seed(count)
prefix = 'simple'
#(1) Initialization
r0 = 2**(1/6.0)
width = 0.25
Np = 30+2
box = [8.617738760127533, 8.617738760127533, 8.617738760127533]
kappa= 100
kT = 1.0
initial_config = np.genfromtxt("../restart/config_"+str(rank)+".xyz", usecols=(1,2,3))
start = np.genfromtxt("../restart_bc/config_"+str(rank)+"_react.xyz", usecols=(1,2,3))
end = np.genfromtxt("../restart_bc/config_"+str(rank)+"_prod.xyz", usecols=(1,2,3))
initial_config = torch.from_numpy(initial_config)
start = torch.from_numpy(start)
end = torch.from_numpy(end)
initial_config = initial_config.float()
start = start.float()
end = end.float()
#Initialize neural net
#committor = torch.jit.script(CommittorNetDR(num_nodes=2500, boxsize=box[0]).to('cpu'))
committor = SchNet(hidden_channels = 64, num_filters = 64, num_interactions = 3, num_gaussians = 50, cutoff = box[0], max_num_neighbors = 31, boxsize=box[0], Np=32, dim=3).to('cpu')
committor.load_state_dict(torch.load("simple_params", map_location=torch.device('cpu')))
n_boundary_samples = 100
batch_size = 8
period = 25
dimer_sim_bc = DimerUS( param="param_bc",
config=initial_config.clone().detach(),
rank=rank,
beta=1/kT,
kappa = 0.0,
save_config=False,
mpi_group = mpi_group,
output_time=batch_size*period
)
dimer_sim = DimerUS( param="param",
config=initial_config.clone().detach(),
rank=rank,
beta=1/kT,
kappa = kappa,
save_config=True,
mpi_group = mpi_group,
output_time=batch_size*period
)
dimer_sim.useRestart()
dimer_sim_com = DimerUS(param="param",
config=initial_config.clone().detach(),
rank=rank,
beta=1/kT,
kappa = 0.0,
save_config=False,
mpi_group = mpi_group,
output_time=batch_size*period
)
#Construct datarunner
datarunner = EXPReweightSimulation(dimer_sim, committor, period=period, batch_size=batch_size, dimN=Np*3)
#Construct optimizers
optimizer = ParallelAdam(committor.parameters(), lr=1e-4)
optimizer.load_state_dict(torch.load("optimizer_params"))
#The BKE Loss function, with EXP Reweighting
loss = BKELossEXP( bc_sampler = dimer_sim_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 0,
bc_period = 100,
batch_size_bc = 0.5,
)
#The supervised learning loss
cmloss = CommittorLoss2( cl_sampler = dimer_sim_com,
committor = committor,
lambda_cl=100.0,
cl_start=10,
cl_end=5000,
cl_rate=10,
cl_trials=100,
batch_size_cl=0.5
)
loss_io = []
if rank == 0:
loss_io = open("{}_statistic_{}.txt".format(prefix,rank+1),'a')
lambda_cl_end = 10**3
cl_start=200
cl_end=10000
cl_stepsize = (lambda_cl_end-cmloss.lambda_cl)/(cl_end-cl_start)
# Save reactant, product configurations
loss.react_configs = torch.load("react_configs_"+str(rank+1)+".pt")
loss.prod_configs = torch.load("prod_configs_"+str(rank+1)+".pt")
loss.n_bc_samples = torch.load("n_bc_samples_"+str(rank+1)+".pt")
# cmloss variables
cmloss.lambda_cl = torch.load("lambda_cl_"+str(rank+1)+".pt")
cmloss.cl_configs = torch.load("cl_configs_"+str(rank+1)+".pt")
cmloss.cl_configs_values = torch.load("cl_configs_values_"+str(rank+1)+".pt")
cmloss.cl_configs_count = torch.load("cl_configs_count_"+str(rank+1)+".pt")
#Training loop
for epoch in range(1):
if rank == 0:
print("epoch: [{}]".format(epoch+1))
time_max = 9.0*60
time_out = True
#for i in range(count,count+100):#20000)):
i = count
while(time_out):
if (i > cl_start) and (i <= cl_end):
cmloss.lambda_cl += cl_stepsize
elif i > cl_end:
cmloss.lambda_cl = lambda_cl_end
# get data and reweighting factors
config, grad_xs, invc, fwd_wl, bwrd_wl = datarunner.runSimulation()
dimer_sim.dumpRestart()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
# forward + backward + optimize
bkecost = loss(grad_xs,invc,fwd_wl,bwrd_wl)
cmcost = cmloss(i, dimer_sim.getConfig())
cost = bkecost+cmcost
cost.backward()
optimizer.step()
# print statistics
with torch.no_grad():
main_loss = loss.main_loss
cm_loss = cmloss.cl_loss
bc_loss = loss.bc_loss
#Print statistics
if rank == 0:
if i%100 == 0:
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,i,rank))
torch.save(committor.state_dict(), "{}_params".format(prefix))
torch.save(optimizer.state_dict(), "optimizer_params")
np.savetxt("count.txt", np.array((i+1,)))
loss_io.write('{:d} {:.5E} {:.5E} {:.5E}\n'.format(i+1,main_loss.item(),bc_loss.item(),cm_loss.item()))
loss_io.flush()
torch.save(cmloss.lambda_cl, "lambda_cl_"+str(rank+1)+".pt")
torch.save(cmloss.cl_configs, "cl_configs_"+str(rank+1)+".pt")
torch.save(cmloss.cl_configs_values, "cl_configs_values_"+str(rank+1)+".pt")
torch.save(cmloss.cl_configs_count, "cl_configs_count_"+str(rank+1)+".pt")
i = i+1
t1 = time.time()
time_diff = t1-t0
time_diff = torch.tensor(time_diff)
dist.all_reduce(time_diff,op=dist.ReduceOp.MAX)
if time_diff > time_max:
time_out = False
| 7,050 | 35.533679 | 181 | py |
tps-torch | tps-torch-main/muller-brown/committor_test.py | import torch
import numpy as np
import tpstorch.fts as fts
import mullerbrown as mb
# Basically I will read in averaged Voronoi cells, and then determine the committor at the Voronoi cells
committor_vals = np.zeros((32,))
committor_std = np.zeros((32,))
def myprod_checker(config):
end = torch.tensor([[0.5,0.0]])
radii = 0.2
end_ = config-end
end_ = end_.pow(2).sum()**0.5
if ((end_ <= radii) or (config[1]<(config[0]+0.8))):
return True
else:
return False
def myreact_checker(config):
start = torch.tensor([[-0.5,1.5]])
radii = 0.2
start_ = config-start
start_ = start_.pow(2).sum()**0.5
if ((start_ <= radii) or (config[1]>(0.5*config[0]+1.5))):
return True
else:
return False
for i in range(32):
# Get config to test
configs = np.genfromtxt(str(i)+"/config_"+str(i)+".xyz", skip_header=19000,usecols=(1,2))
config_avg = np.mean(configs[1::2],axis=0)
start = torch.from_numpy(config_avg)
start = start.float()
mb_sim = mb.MySampler("param_test",start, int(i), int(0))
counts = []
for j in range(1000):
hitting = False
mb_sim.setConfig(start)
while hitting is False:
mb_sim.runStep()
config_ = mb_sim.getConfig()
if myprod_checker(config_) is True:
counts.append(1.0)
hitting = True
if myreact_checker(config_) is True:
counts.append(0.0)
hitting = True
# Now compute the committor
counts = np.array(counts)
mean_count = np.mean(counts)
conf_count = 1.96*np.std(counts)/len(counts)**0.5
committor_vals[i] = mean_count
committor_std[i] = conf_count
print("{:.8E} {:.8E} {:.8E} {:.8E}".format(config_avg[0],config_avg[1],mean_count,conf_count))
| 1,828 | 30 | 104 | py |
tps-torch | tps-torch-main/muller-brown/test.py | import torch
import tpstorch.fts
import mullerbrown as mb
a = mb.MySampler("param",torch.tensor([[0.0,1.0]]),0,0)
#These are just mock tensors
#Intepreat the MD potential as a 2D system for one particle
left_weight = torch.tensor([[1.0,-1.0]])
left_bias = torch.tensor(0.0)
right_weight = torch.tensor([[1.0,-1.0]])
right_bias = torch.tensor(2.0)
a.runSimulation(10**6,left_weight,right_weight,left_bias,right_bias)
| 417 | 31.153846 | 68 | py |
tps-torch | tps-torch-main/muller-brown/test_vor.py | import torch
import tpstorch.fts as fts
import mullerbrown as mb
import torch.distributed as dist
dist.init_process_group(backend='mpi')
mygroup = dist.distributed_c10d._get_default_group()
rank = dist.get_rank()
#Override FTS class and modify routines as needed
class CustomFTSMethod(fts.FTSMethodVor):
def __init__(self,sampler,initial_config,final_config,num_nodes,deltatau,kappa,update_rule):
super(CustomFTSMethod, self).__init__(sampler,initial_config,final_config,num_nodes,deltatau,kappa,update_rule)
def dump(self):
self.send_strings()
voronoi_list = torch.stack(self.voronoi, dim=0)
self.sampler.dumpConfigVor(self.rank, voronoi_list)
# cooked up an easy system with basin at [0.0,0.0] and [1.0,1.0]
start = torch.tensor([[0.0,0.0]])
end = torch.tensor([[1.0,1.0]])
def initializer(s,start,end):
return (1-s)*start+s*end
alphas = torch.linspace(0.0,1,dist.get_world_size())
mb_sim = mb.MySampler("param_test",initializer(alphas[rank],start,end), rank, 0)
# if(rank==0):
# mb_sim = mb.MySampler("param_test",initializer(alphas[rank],start,end), rank, 1)
if(rank==0):
print(alphas)
print(alphas.size())
# Now do FTS method
fts_method = CustomFTSMethod(sampler=mb_sim,initial_config=start,final_config=end,num_nodes=dist.get_world_size(),deltatau=0.1,kappa=0.1,update_rule=1)
print(fts_method.string)
for i in range(100000):
fts_method.run(1)
if(i%50==0):
fts_method.dump()
if(rank == 0):
print(i)
print(fts_method.voronoi)
| 1,539 | 35.666667 | 151 | py |
tps-torch | tps-torch-main/muller-brown/test_fts.py | import torch
import tpstorch.fts as fts
import mullerbrown as mb
import torch.distributed as dist
dist.init_process_group(backend='mpi')
mygroup = dist.distributed_c10d._get_default_group()
rank = dist.get_rank()
#Override FTS class and modify routines as needed
class CustomFTSMethod(fts.FTSMethod):
def __init__(self,sampler,initial_config,final_config,num_nodes,deltatau,kappa):
super(CustomFTSMethod, self).__init__(sampler,initial_config,final_config,num_nodes,deltatau,kappa)
def dump(self,biases):
self.sampler.dumpConfig(biases)
# cooked up an easy system with basin at [0.0,0.0] and [1.0,1.0]
start = torch.tensor([[0.0,0.0]])
end = torch.tensor([[1.0,1.0]])
def initializer(s,start,end):
return (1-s)*start+s*end
alphas = torch.linspace(0.0,1,dist.get_world_size()+2)[1:-1]
mb_sim = mb.MySampler("param_test",initializer(alphas[rank],start,end), rank, 0)
# if(rank==0):
# mb_sim = mb.MySampler("param_test",initializer(alphas[rank],start,end), rank, 1)
if(rank==0):
print(alphas)
print(alphas.size)
# Now do FTS method
fts_method = CustomFTSMethod(sampler=mb_sim,initial_config=start,final_config=end,num_nodes=dist.get_world_size()+2,deltatau=0.1,kappa=0.1)
for i in range(100000):
fts_method.run(1)
if(i%50==0):
fts_method.dump(1)
if(rank == 0):
print(i)
print(fts_method.biases)
| 1,384 | 34.512821 | 139 | py |
tps-torch | tps-torch-main/1dbrownian/fts_test/generate_validate.py | #Start up torch dist package
import torch
import torch.distributed as dist
dist.init_process_group(backend='mpi')
#Load classes for simulations and controls
from brownian_fts import BrownianParticle
import numpy as np
#Starting and ending configuration.
start = torch.tensor([[-1.0]])
end = torch.tensor([[1.0]])
def initializer(s):
return (1-s)*start+s*end
alphas = torch.linspace(0.0,1,dist.get_world_size()+2)[1:-1]
bp_simulator = BrownianParticle(dt=2e-3,gamma=1.0, kT = 0.4, initial=initializer(alphas[dist.get_rank()]),prefix='test',save_config=False)
#Generate data for validation test
#For this 1D brownian example case, the TSE is generated by the middle replica.
#Note that This is assuming that you run an odd number of MPI processes
data = np.loadtxt('test_bp_{}.txt'.format(int((dist.get_world_size()+1)/2)))
#If I run 10 processes, that's 10*10 = 100 initial configurations!
num_configs = 10
trials = 500
validation_io = open("test_validation_{}.txt".format(dist.get_rank()+1),"w")
import tqdm
#For loop over initial states
if dist.get_rank() == 0:
print("Ready to generate validation test!".format(dist.get_rank()+1))
for i in range(num_configs):
counts = []
initial_config = torch.from_numpy(np.array([[np.random.choice(data[:,0])]]).astype(np.float32)).detach().clone()
for j in tqdm.tqdm(range(trials)):
hitting = False
bp_simulator.qt = initial_config.detach().clone()
#Run simulation and stop until it falls into the product or reactant state
while hitting is False:
bp_simulator.runUnbiased()
if np.abs(bp_simulator.qt.item()) >= 1.0:
if bp_simulator.qt.item() < 0:
counts.append(0.0)
elif bp_simulator.qt.item() > 0:
counts.append(1.0)
hitting = True
#Compute the committor after a certain number of trials
counts = np.array(counts)
mean_count = np.mean(counts)
conf_count = 1.96*np.std(counts)/len(counts)**0.5 #do 95 % confidence interval
#Save into io
if validation_io is not None:
validation_io.write('{} {} {} \n'.format(mean_count, conf_count,initial_config.item()))
validation_io.flush()
| 2,237 | 38.263158 | 138 | py |
tps-torch | tps-torch-main/1dbrownian/fts_test/brownian_fts.py | #Import necessarry tools from torch
import torch
import torch.distributed as dist
#Import necessarry tools from tpstorch
#import tpstorch.fts as fts
from tpstorch.fts import FTSSampler, AltFTSMethod
import numpy as np
#Import any other thing
import tqdm, sys
class BrownianParticle(FTSSampler):
def __init__(self, dt, gamma, kT, initial,prefix='',save_config=False):
super(BrownianParticle, self).__init__()
#Timestep
self.dt = dt
#Noise variance
self.coeff = np.sqrt(2*kT/gamma)
self.gamma = gamma
#The current position. We make sure that its gradient zero
self.qt = initial.detach().clone()
#IO for BP position and committor values
self.save_config = save_config
if self.save_config:
self.qt_io = open("{}_bp_{}.txt".format(prefix,dist.get_rank()+1),"w")
#Allocated value for self.qi
self.invkT = 1/kT
#Tracking steps
self.timestep = 0
#Runs dynamics after a given numver of n steps. Other parameters *_weight and *_bias defines the boundaries of the voronoi cell as hyperplanes.
def runSimulation(self, nsteps, left_weight, right_weight, left_bias, right_bias):
for i in range(nsteps):
q0 = self.qt-4*self.qt*(-1+self.qt**2)*self.dt/self.gamma + self.coeff*torch.normal(torch.tensor([[0.0]]),std=np.sqrt(self.dt))
if dist.get_rank() == 0:
if (torch.sum(q0*right_weight)+right_bias).item() < 0:
self.qt = q0.detach().clone()
elif dist.get_rank() == dist.get_world_size()-1:
if (torch.sum(q0*left_weight)+left_bias).item() > 0:
self.qt = q0.detach().clone()
else:
if (torch.sum(q0*left_weight)+left_bias).item() > 0 and (torch.sum(q0*right_weight)+right_bias).item() < 0:
self.qt = q0.detach().clone()
self.timestep += 1
#An unbiased simulation run
def runUnbiased(self):
q0 = self.qt-4*self.qt*(-1+self.qt**2)*self.dt/self.gamma + self.coeff*torch.normal(torch.tensor([[0.0]]),std=np.sqrt(self.dt))
self.qt = q0.detach().clone()
def getConfig(self):
return self.qt.detach().clone()
def dumpConfig(self):
#Update timestep counter
#self.timestep += 1
if self.save_config:
# if self.timestep % 10 == 0:
self.qt_io.write("{} {}\n".format(self.qt.item(),1/self.invkT))
self.qt_io.flush()
#Override the class and modify the routine which dumps the transition path
class CustomFTSMethod(AltFTSMethod):
def __init__(self,sampler,initial_config,final_config,num_nodes,deltatau,kappa):
super(CustomFTSMethod, self).__init__(sampler,initial_config,final_config,num_nodes,deltatau,kappa)
#Dump the string into a file
def dump(self,dumpstring=False):
if dumpstring:
self.string_io.write("{} ".format(self.string[0,0]))
self.string_io.write("\n")
self.sampler.dumpConfig()
| 3,089 | 38.113924 | 149 | py |
tps-torch | tps-torch-main/1dbrownian/fts_test/run.py | #Start up torch dist package
import torch
import torch.distributed as dist
dist.init_process_group(backend='mpi')
#Load classes for simulations and controls
from brownian_fts import BrownianParticle, CustomFTSMethod
import numpy as np
#Starting and ending configuration.
start = torch.tensor([[-1.0]])
end = torch.tensor([[1.0]])
def initializer(s):
return (1-s)*start+s*end
alphas = torch.linspace(0.0,1,dist.get_world_size()+2)[1:-1]
bp_simulator = BrownianParticle(dt=2e-3,gamma=1.0, kT = 0.4, initial=initializer(alphas[dist.get_rank()]),prefix='test',save_config=True)
fts_method = CustomFTSMethod(sampler=bp_simulator,initial_config=start,final_config=end,num_nodes=dist.get_world_size()+2,deltatau=0.01,kappa=0.01)
import tqdm
for i in tqdm.tqdm(range(40000)):
#Run the simulation a single time-step
fts_method.run(1)
if i % 10 == 0:
fts_method.dump(True)
| 891 | 33.307692 | 147 | py |
tps-torch | tps-torch-main/1dbrownian/ml_test/brownian_ml_fts.py | #Import necessarry tools from torch
import torch
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.ml import MLSamplerFTS
from tpstorch.ml.nn import FTSLayer
from tpstorch import dist, _rank, _world_size
from brownian_ml import CommittorNet
import numpy as np
#Import any other thing
import tqdm, sys
#The 1D Brownian particle simulator
class BrownianParticle(MLSamplerFTS):
def __init__(self, dt, ftslayer, gamma, kT, initial,prefix='',save_config=False):
super(BrownianParticle, self).__init__(initial.detach().clone())
#Timestep
self.dt = dt
self.ftslayer = ftslayer
#Noise variance
self.coeff = np.sqrt(2*kT/gamma)
self.gamma = gamma
#The current position. We make sure that its gradient zero
self.qt = initial.detach().clone()
#IO for BP position and committor values
self.save_config = save_config
if self.save_config:
self.qt_io = open("{}_bp_{}.txt".format(prefix,_rank+1),"w")
#Allocated value for self.qi
self.invkT = 1/kT
#Tracking steps
self.timestep = 0
#Save config size and its flattened version
self.config_size = initial.size()
self.flattened_size = initial.flatten().size()
self.torch_config = (self.qt.flatten()).detach().clone()
self.torch_config.requires_grad_()
@torch.no_grad()
def initialize_from_torchconfig(self, config):
#by implementation, torch config cannot be structured: it must be a flat 1D tensor
#config can ot be flat
if config.size() != self.flattened_size:
raise RuntimeError("Config is not flat! Check implementation")
else:
self.qt = config.view(-1,1);
self.torch_config = config.detach().clone()
if self.qt.size() != self.config_size:
raise RuntimeError("New config has inconsistent size compared to previous simulation! Check implementation")
@torch.no_grad()
def reset(self):
self.steps = 0
self.distance_sq_list = self.ftslayer(self.getConfig().flatten())
inftscell = self.checkFTSCell(_rank, _world_size)
if inftscell:
pass
else:
self.setConfig(self.ftslayer.string[_rank])
pass
def step(self):
with torch.no_grad():
config_test = self.qt-4*self.qt*(-1+self.qt**2)*self.dt/self.gamma + self.coeff*torch.normal(torch.tensor([[0.0]]),std=np.sqrt(self.dt))
self.distance_sq_list = self.ftslayer(config_test.flatten())
inftscell = self.checkFTSCell(_rank, _world_size)
if inftscell:
self.qt = config_test
else:
pass
self.torch_config = (self.getConfig().flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
@torch.no_grad()
#These functions check whether I'm in the reactant or product basins!
def isProduct(self, config):
end = torch.tensor([[1.0]])
if config.item() >= end.item():
return True
else:
return False
@torch.no_grad()
def isReactant(self, config):
start = torch.tensor([[-1.0]])
if config.item() <= start.item():
return True
else:
return False
def step_bc(self):#, basin = None):
with torch.no_grad():
#Update one step
config_test = self.qt-4*self.qt*(-1+self.qt**2)*self.dt/self.gamma + self.coeff*torch.normal(torch.tensor([[0.0]]),std=np.sqrt(self.dt))
if self.isReactant(config_test.flatten()) or self.isProduct(config_test.flatten()):
self.qt = config_test.detach().clone()
else:
pass
#Don't forget to zero out gradient data after every timestep
#If you print out torch_config it should be tensor([a,b,c,..,d])
#where a,b,c and d are some
self.torch_config = (self.qt.flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
@torch.no_grad()
def setConfig(self,config):
#by implementation, torch config cannot be structured: it must be a flat 1D tensor
#config can ot be flat
self.qt = config.detach().clone()
self.torch_config = (self.qt.flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
@torch.no_grad()
def getConfig(self):
config = (self.qt.flatten()).detach().clone()
return config
def save(self):
#Update timestep counter
self.timestep += 1
if self.save_config:
if self.timestep % 100 == 0:
self.qt_io.write("{} {}\n".format(self.torch_config[0],1/self.invkT))
self.qt_io.flush()
def step_unbiased(self):
#Update one
self.qt += -4*self.qt*(-1+self.qt**2)*self.dt/self.gamma + self.coeff*torch.normal(torch.tensor([[0.0]]),std=np.sqrt(self.dt))
#Don't forget to zero out gradient data after every timestep
self.torch_config = (self.qt.flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
| 5,596 | 34.424051 | 148 | py |
tps-torch | tps-torch-main/1dbrownian/ml_test/brownian_ml.py | #Import necessarry tools from torch
import torch
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.ml import MLSamplerEXP
from tpstorch.ml.nn import FTSLayer
from tpstorch import dist, _rank, _world_size
import numpy as np
#Import any other thing
import tqdm, sys
#The Neural Network for 1D should be very simple.
class CommittorNet(nn.Module):
def __init__(self, d, num_nodes, unit=torch.relu):
super(CommittorNet, self).__init__()
self.num_nodes = num_nodes
self.d = d
self.unit = unit
self.lin1 = nn.Linear(d,num_nodes,bias=True)
self.lin2 = nn.Linear(num_nodes, 1, bias=True)
self.broadcast()
def forward(self, x):
#At the moment, x is flat. So if you want it to be 2x1 or 3x4 arrays, then you do it here!
x = self.lin1(x)
x = self.unit(x)
x = self.lin2(x)
return torch.sigmoid(x)
def broadcast(self):
for name, param in self.named_parameters():
dist.broadcast(param.data,src=0)
""""
#In this simple 1D case, torch treates the flattened array into
#a 0D array, which doesn't work with current FTSLayer format.
#So, we change the way forward calculation is done a little bit.
class FTSLayer1D(FTSLayer):
def __init(self,start, end,fts_size):
super(FTSLayer1D).__init(start,end_fts_size)
def forward(self, x):
w_times_x= torch.matmul(x,(self.string[1:]-self.string[:-1]).t())
bias = -torch.sum(0.5*(self.string[1:]+self.string[:-1])*(self.string[1:]-self.string[:-1]),dim=1)
return torch.add(w_times_x, bias)
"""
#The Neural Network for 1D should be very simple.
class CommittorFTSNet(nn.Module):
def __init__(self, d, num_nodes, start, end, fts_size, unit=torch.relu):
super(CommittorFTSNet, self).__init__()
self.num_nodes = num_nodes
self.d = d
self.unit = unit
self.lin1 = FTSLayer(start, end, fts_size)
self.lin3 = nn.Linear(d, num_nodes-fts_size+1, bias=True)
self.lin2 = nn.Linear(num_nodes, 1, bias=True)
self.broadcast()
def forward(self, x):
#At the moment, x is flat. So if you want it to be 2x1 or 3x4 arrays, then you do it here!
if x.shape == torch.Size([self.d]):
x = x.view(-1,1)
x1 = self.lin1(x)
x1 = self.unit(x1)
x3 = self.lin3(x)
x3 = self.unit(x3)
x = torch.cat((x1,x3),dim=1)
x = self.lin2(x)
#x = self.lin2(x)
return torch.sigmoid(x)
def broadcast(self):
for name, param in self.named_parameters():
if param.requires_grad:
dist.broadcast(param.data,src=0)
#The 1D Brownian particle simulator
class BrownianParticle(MLSamplerEXP):
def __init__(self, dt, gamma, kT, initial,kappa,prefix='',save_config=False):
super(BrownianParticle, self).__init__(initial.detach().clone())
#Timestep
self.dt = dt
self.kappa = kappa
#Noise variance
self.coeff = np.sqrt(2*kT/gamma)
self.gamma = gamma
#The current position. We make sure that its gradient zero
self.qt = initial.detach().clone()
#IO for BP position and committor values
self.save_config = save_config
if self.save_config:
self.qt_io = open("{}_bp_{}.txt".format(prefix,_rank+1),"w")
#Allocated value for self.qi
self.invkT = 1/kT
#Tracking steps
self.timestep = 0
#Save config size and its flattened version
self.config_size = initial.size()
self.flattened_size = initial.flatten().size()
self.torch_config = (self.qt.flatten()).detach().clone()
self.torch_config.requires_grad_()
def initialize_from_torchconfig(self, config):
#by implementation, torch config cannot be structured: it must be a flat 1D tensor
#config can ot be flat
if config.size() != self.flattened_size:
raise RuntimeError("Config is not flat! Check implementation")
else:
self.qt = config.view(-1,1);
self.torch_config = config.detach().clone()
if self.qt.size() != self.config_size:
raise RuntimeError("New config has inconsistent size compared to previous simulation! Check implementation")
def computeWForce(self, committor_val, qval):
return -self.dt*self.kappa*self.torch_config.grad.data*(committor_val-qval)/self.gamma
def step(self, committor_val, onlytst=False):
with torch.no_grad():
#Update one step
if onlytst:
self.qt += self.computeWForce(committor_val,0.5)-4*self.qt*(-1+self.qt**2)*self.dt/self.gamma + self.coeff*torch.normal(torch.tensor([[0.0]]),std=np.sqrt(self.dt))
else:
self.qt += self.computeWForce(committor_val, self.qvals[_rank])-4*self.qt*(-1+self.qt**2)*self.dt/self.gamma + self.coeff*torch.normal(torch.tensor([[0.0]]),std=np.sqrt(self.dt))
#Don't forget to zero out gradient data after every timestep
#If you print out torch_config it should be tensor([a,b,c,..,d])
#where a,b,c and d are some
self.torch_config = (self.qt.flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
#These functions check whether I'm in the reactant or product basins!
def isProduct(self, config):
end = torch.tensor([[1.0]])
if config.item() >= end.item():
return True
else:
return False
def isReactant(self, config):
start = torch.tensor([[-1.0]])
if config.item() <= start.item():
return True
else:
return False
def step_bc(self):#, basin = None):
with torch.no_grad():
#Update one step
config_test = self.qt-4*self.qt*(-1+self.qt**2)*self.dt/self.gamma + self.coeff*torch.normal(torch.tensor([[0.0]]),std=np.sqrt(self.dt))
if self.isReactant(config_test.flatten()) or self.isProduct(config_test.flatten()):
self.qt = config_test.detach().clone()
else:
pass
#Don't forget to zero out gradient data after every timestep
#If you print out torch_config it should be tensor([a,b,c,..,d])
#where a,b,c and d are some
self.torch_config = (self.qt.flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
def setConfig(self,config):
#by implementation, torch config cannot be structured: it must be a flat 1D tensor
#config can ot be flat
self.qt = config.detach().clone()
self.torch_config = (self.qt.flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
def getConfig(self):
config = (self.qt.flatten()).detach().clone()
return config
def save(self):
#Update timestep counter
self.timestep += 1
if self.save_config:
if self.timestep % 100 == 0:
self.qt_io.write("{} {}\n".format(self.torch_config[0],1/self.invkT))
self.qt_io.flush()
def step_unbiased(self):
#Update one
self.qt += -4*self.qt*(-1+self.qt**2)*self.dt/self.gamma + self.coeff*torch.normal(torch.tensor([[0.0]]),std=np.sqrt(self.dt))
#Don't forget to zero out gradient data after every timestep
self.torch_config = (self.qt.flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
"""
class BrownianLoss(CommittorLossEXP):
def __init__(self, lagrange_bc, world_size, n_boundary_samples, react_configs, prod_configs, mode="random", ref_index=None, batch_size_bc = 0.5):
super(BrownianLoss,self).__init__()
#Store the MPI world size (number of MPI processes)
self.world_size = world_size
# Batch size for BC Loss
self.batch_size_bc = batch_size_bc
# Stuff for boundary condition loss
self.react_configs = react_configs #list of reactant basin configurations
self.prod_configs = prod_configs #list of product basin configurations
self.lagrange_bc = lagrange_bc #strength for quadratic BC loss
self.mean_recipnormconst = torch.zeros(1)
#Storing a 1D Tensor of reweighting factors
self.reweight = [torch.zeros(1) for i in range(_world_size)]
#Choose whether to sample window references randomly or not
self.mode = mode
if mode != "random":
if ref_index is None or ref_index < 0:
raise ValueError("For non-random choice of window reference, you need to set ref_index!")
else:
self.ref_index = torch.tensor(ref_index)
def compute_bc(self, committor):
#Assume that first dimension is batch dimension
loss_bc = torch.zeros(1)
#Randomly sample from available BC configurations
indices_react = torch.randperm(len(self.react_configs))[:int(self.batch_size_bc*len(self.react_configs))]
indices_prod = torch.randperm(len(self.prod_configs))[:int(self.batch_size_bc*len(self.prod_configs))]
#Computing the BC loss
react_penalty = torch.sum(committor(self.react_configs[indices_react,:])**2)
prod_penalty = torch.sum((1.0-committor(self.prod_configs[indices_prod,:]))**2)
loss_bc += 0.5*self.lagrange_bc*react_penalty#-self.react_lambda*react_penalty
loss_bc += 0.5*self.lagrange_bc*prod_penalty#-self.prod_lambda*prod_penalty
return loss_bc/self.world_size
def computeZl(self,k,fwd_meanwgtfactor,bwrd_meanwgtfactor):
with torch.no_grad():
empty = []
for l in range(_world_size):
if l > k:
empty.append(torch.prod(fwd_meanwgtfactor[k:l]))
elif l < k:
empty.append(torch.prod(bwrd_meanwgtfactor[l:k]))
else:
empty.append(torch.tensor(1.0))
return torch.tensor(empty).flatten()
def forward(self, gradients, committor, config, invnormconstants, fwd_weightfactors, bwrd_weightfactors, reciprocal_normconstants):
self.main_loss = self.compute_loss(gradients, invnormconstants)
self.bc_loss = self.compute_bc(committor)#, config, invnormconstants)
#renormalize losses
#get prefactors
self.reweight = [torch.zeros(1) for i in range(_world_size)]
fwd_meanwgtfactor = self.reweight.copy()
dist.all_gather(fwd_meanwgtfactor,torch.mean(fwd_weightfactors))
fwd_meanwgtfactor = torch.tensor(fwd_meanwgtfactor[:-1])
bwrd_meanwgtfactor = self.reweight.copy()
dist.all_gather(bwrd_meanwgtfactor,torch.mean(bwrd_weightfactors))
bwrd_meanwgtfactor = torch.tensor(bwrd_meanwgtfactor[1:])
#Randomly select a window as a free energy reference and broadcast that index across all processes
if self.mode == "random":
self.ref_index = torch.randint(low=0,high=_world_size,size=(1,))
dist.broadcast(self.ref_index, src=0)
#Computing the reweighting factors, z_l in our notation
self.reweight = self.computeZl(self.ref_index,fwd_meanwgtfactor,bwrd_meanwgtfactor)
self.reweight.div_(torch.sum(self.reweight)) #normalize
#Use it first to compute the mean inverse normalizing constant
mean_recipnormconst = torch.mean(invnormconstants)
mean_recipnormconst.mul_(self.reweight[_rank])
#All reduce the mean invnormalizing constant
dist.all_reduce(mean_recipnormconst)
#renormalize main_loss
self.main_loss.mul_(self.reweight[_rank])
dist.all_reduce(self.main_loss)
self.main_loss.div_(mean_recipnormconst)
#normalize bc_loss
dist.all_reduce(self.bc_loss)
return self.main_loss+self.bc_loss
"""
| 12,395 | 39.37785 | 194 | py |
tps-torch | tps-torch-main/1dbrownian/ml_test/run_vanilla.py | #Import necessarry tools from torch
import torch
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.ml.data import EXPReweightSimulation
from tpstorch.ml.optim import ParallelAdam, ParallelSGD
from tpstorch.ml.nn import BKELossEXP, BKELossFTS
#Import model-specific classes
from brownian_ml import CommittorNet, BrownianParticle
import numpy as np
#Save the rank and world size
from tpstorch import _rank, _world_size
rank = _rank
world_size = _world_size
#Import any other things
import tqdm, sys
torch.manual_seed(0)
np.random.seed(0)
prefix = 'vanilla'
#Set initial configuration and BP simulator
start = torch.tensor([[-1.0]])
end = torch.tensor([[1.0]])
def initializer(s):
return (1-s)*start+s*end
initial_config = initializer(_rank/(_world_size-1))
kT = 1/15.0
bp_sampler = BrownianParticle(dt=5e-3,gamma=1.0,kT=kT, kappa=50,initial = initial_config,prefix=prefix,save_config=True)
bp_sampler_bc = BrownianParticle(dt=5e-3,gamma=1.0,kT=kT, kappa=0.0,initial = initial_config,prefix=prefix,save_config=True)
#Initialize neural net
committor = CommittorNet(d=1,num_nodes=200).to('cpu')
committor.load_state_dict(torch.load("initial_nn"))
#Construct EXPSimulation
batch_size = 4
datarunner = EXPReweightSimulation(bp_sampler, committor, period=100, batch_size=batch_size, dimN=1)
#Optimizer, doing EXP Reweighting. We can do SGD (integral control), or Heavy-Ball (PID control)
loss = BKELossEXP( bc_sampler = bp_sampler_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 100,
bc_period = 10,
batch_size_bc = 0.5)
#optimizer = ParallelAdam(committor.parameters(), lr=1e-2)#, momentum=0.90,weight_decay=1e-3
optimizer = ParallelSGD(committor.parameters(), lr=5e-4,momentum=0.95)
#Save loss function statistics
loss_io = []
if _rank == 0:
loss_io = open("{}_loss.txt".format(prefix),'w')
#Save timing statistics
import time
time_io = open("{}_timing_{}.txt".format(prefix,rank),'w')
#Training loop
for epoch in range(1):
if _rank == 0:
print("epoch: [{}]".format(epoch+1))
actual_counter = 0
while actual_counter <= 2500:
t0 = time.time()
# get data and reweighting factors
config, grad_xs, invc, fwd_wl, bwrd_wl = datarunner.runSimulation()
t1 = time.time()
sampling_time = t1-t0
t0 = time.time()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
cost = loss(grad_xs,invc,fwd_wl,bwrd_wl)
cost.backward()
optimizer.step()
t1 = time.time()
optimization_time = t1-t0
time_io.write('{:d} {:.5E} {:.5E} \n'.format(actual_counter+1,sampling_time, optimization_time))#main_loss.item(),bc_loss.item()))
time_io.flush()
# print statistics
with torch.no_grad():
main_loss = loss.main_loss
bc_loss = loss.bc_loss
if _rank == 0:
#Print statistics
print('[{}] loss: {:.5E} penalty: {:.5E} lr: {:.3E}'.format(actual_counter + 1, main_loss.item(), bc_loss.item(), optimizer.param_groups[0]['lr']))
#Also print the reweighting factors
print(loss.zl)
loss_io.write('{:d} {:.5E} {:.5E} \n'.format(actual_counter+1,main_loss.item(),bc_loss.item()))
loss_io.flush()
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,actual_counter,rank))
#Only save parameters from rank 0
torch.save(committor.state_dict(), "{}_params_{}".format(prefix,rank+1))
actual_counter += 1
| 3,912 | 32.732759 | 163 | py |
tps-torch | tps-torch-main/1dbrownian/ml_test/run_cl.py | #Import necessarry tools from torch
import torch
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.ml.data import EXPReweightSimulation
from tpstorch.ml.optim import ParallelAdam, ParallelSGD
from tpstorch.ml.nn import BKELossEXP, BKELossFTS, CommittorLoss2
from brownian_ml import CommittorNet, BrownianParticle
import numpy as np
from tpstorch import _rank, _world_size
rank = _rank
world_size = _world_size
#Import any other thing
import tqdm, sys
torch.manual_seed(0)
np.random.seed(0)
prefix = 'cl'
#Set initial configuration and BP simulator
start = torch.tensor([[-1.0]])
end = torch.tensor([[1.0]])
def initializer(s):
return (1-s)*start+s*end
initial_config = initializer(_rank/(_world_size-1))
kT = 1/15.0
bp_sampler = BrownianParticle(dt=5e-3,gamma=1.0,kT=kT, kappa=50,initial = initial_config,prefix=prefix,save_config=True)
bp_sampler_bc = BrownianParticle(dt=5e-3,gamma=1.0,kT=kT, kappa=0.0,initial = initial_config,prefix=prefix,save_config=True)
#Initialize neural net
committor = CommittorNet(d=1,num_nodes=200).to('cpu')
committor.load_state_dict(torch.load("initial_nn"))
#Construct EXPSimulation
batch_size = 4
datarunner = EXPReweightSimulation(bp_sampler, committor, period=100, batch_size=batch_size, dimN=1)
#Optimizer, doing EXP Reweighting. We can do SGD (integral control), or Heavy-Ball (PID control)
bkeloss = BKELossEXP( bc_sampler = bp_sampler_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 100,
bc_period = 10,
batch_size_bc = 0.5)
cmloss = CommittorLoss2( cl_sampler = bp_sampler_bc,
committor = committor,
lambda_cl=100.0,
cl_start=10,
cl_end=5000,
cl_rate=40,
cl_trials=100,
batch_size_cl=0.5
)
#optimizer = ParallelAdam(committor.parameters(), lr=1e-2)#, momentum=0.90,weight_decay=1e-3
optimizer = ParallelSGD(committor.parameters(), lr=5e-4,momentum=0.95)#,nesterov=True)
#Save loss function statistics
loss_io = []
if _rank == 0:
loss_io = open("{}_loss.txt".format(prefix),'w')
#Save timing statistics
import time
time_io = open("{}_timing_{}.txt".format(prefix,rank),'w')
#Training loop
for epoch in range(1):
if _rank == 0:
print("epoch: [{}]".format(epoch+1))
actual_counter = 0
while actual_counter <= 2500:
t0 = time.time()
# get data and reweighting factors
config, grad_xs, invc, fwd_wl, bwrd_wl = datarunner.runSimulation()
t1 = time.time()
sampling_time = t1-t0
t0 = time.time()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
bkecost = bkeloss(grad_xs,invc,fwd_wl,bwrd_wl)
t1 = time.time()
optimization_time = t1-t0
t0 = time.time()
cmcost = cmloss(actual_counter, bp_sampler.getConfig())
t1 = time.time()
supervised_time = t1-t0
t0 = time.time()
cost = cmcost+bkecost
cost.backward()
optimizer.step()
t1 = time.time()
optimization_time += t1-t0
time_io.write('{:d} {:.5E} {:.5E} {:.5E} \n'.format(actual_counter+1,sampling_time, optimization_time, supervised_time))#main_loss.item(),bc_loss.item()))
time_io.flush()
# print statistics
with torch.no_grad():
main_loss = bkeloss.main_loss
bc_loss = bkeloss.bc_loss
cl_loss = cmloss.cl_loss
if _rank == 0:
#Print statistics
print('[{}] loss: {:.5E} bc: {:.5E} cm: {:.5E} lr: {:.3E}'.format(actual_counter + 1, main_loss.item(), bc_loss.item(), cl_loss.item(), optimizer.param_groups[0]['lr']))
#Also print the reweighting factors
print(bkeloss.zl)
loss_io.write('{:d} {:.5E} {:.5E} {:.5E} \n'.format(actual_counter+1,main_loss.item(),bc_loss.item(),cl_loss.item()))
loss_io.flush()
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,actual_counter,rank))
torch.save(committor.state_dict(), "{}_params_{}".format(prefix,rank+1))
actual_counter += 1
| 4,611 | 33.939394 | 185 | py |
tps-torch | tps-torch-main/1dbrownian/ml_test/plot.py | #Import necessarry tools from torch
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader
import torch.nn as nn
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.ticker import AutoMinorLocator
mpl.rcParams['text.usetex'] = True
params= {'text.latex.preamble' : [r'\usepackage{bm}',r'\usepackage{mathtools,amsmath}']}
mpl.rcParams.update(params)
#Import necessarry tools from tpstorch
from brownian_ml import CommittorNet
from brownian_ml import CommittorFTSNet
import numpy as np
#Import any other thing
import tqdm, sys
#prefix = 'vanilla_highT'
prefix = 'fts_highT'
#prefix = 'cl_highT'
#Computing solution from neural network
start = torch.tensor([[-1.0]])
end = torch.tensor([[1.0]])
def initializer(s):
return (1-s)*start+s*end
size = 11
#Initialize neural net
if prefix == 'fts_highT':
committor = CommittorFTSNet(d=1,start=start.flatten(),end=end.flatten(),num_nodes=200, fts_size=size).to('cpu')
else:
committor = CommittorNet(d=1,num_nodes=200).to('cpu')
committor.load_state_dict(torch.load("{}_params_1".format(prefix)))
data = np.loadtxt("{}_bp_1.txt".format(prefix))
kT = data[-1,1]
s = torch.linspace(0,1,200)
x = []
y = []
for val in s:
x.append(initializer(val))
y.append(committor(x[-1]).item())
#Computing exact solution
from scipy.integrate import quad
newx = torch.linspace(-1.0,1.0,100)
yexact = []
def integrand(x):
return np.exp((1-x**2)**2/kT)
norm = quad(integrand,-1,1)[0]
def exact(x):
return quad(integrand,-1,x)[0]/norm
for val in newx:
yexact.append(exact(val.item()))
plt.figure(figsize=(6,3))
#Neural net solution vs. exact solution
plt.subplot(121)
plt.plot(x,y,label='NN')
plt.plot(newx,yexact,label='Exact')
plt.xlabel('$x$',fontsize=14)
plt.ylabel('$q(x)$',fontsize=14)
plt.legend(loc=0)
#The loss function over iterations
plt.subplot(122)
data = np.loadtxt("{}_loss.txt".format(prefix))
index = data[:,1] > 0
plt.semilogy(data[index,0],data[index,1],label='$\\frac{1}{2}| \\nabla q(x)|^2$')
plt.legend(loc=0)
plt.savefig('solution_{}.png'.format(prefix),dpi=300)
print(kT)
#Plotting Histogram of particle trajectories
plt.figure(figsize=(6,2))
for i in range(size):
data = np.loadtxt("{}_bp_{}.txt".format(prefix,i+1))
#plt.hist(data[:,0],bins='auto',histtype='step')
plt.plot(data[:,0],'-')#,bins='auto',histtype='step')#x,y)
plt.savefig('hist_{}.png'.format(prefix),dpi=300)
#Plot validation result
"""
plt.figure(figsize=(5,5))
for i in range(11):
data = np.loadtxt("{}_validation_{}.txt".format(prefix,i+1))
plt.errorbar(data[:,0],data[:,1],yerr=data[:,2],ls='None',color='k',marker='o')
x = np.linspace(0.3,0.7)
plt.plot(x,x,'--')
plt.ylim([0.3,0.7])
plt.xlim([0.3,0.7])
"""
plt.show()
| 2,752 | 25.728155 | 115 | py |
tps-torch | tps-torch-main/1dbrownian/ml_test/run_fts.py | #Import necessarry tools from torch
import torch
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.ml.data import FTSSimulation
from tpstorch.ml.optim import ParallelAdam, FTSUpdate, ParallelSGD
from tpstorch.ml.nn import BKELossFTS, FTSCommittorLoss, FTSLayer
from brownian_ml_fts import CommittorNet, BrownianParticle
import numpy as np
from tpstorch import _rank, _world_size
from tpstorch import dist
world_size = _world_size
rank = _rank
#Import any other thing
import tqdm, sys
torch.manual_seed(0)
np.random.seed(0)
prefix = 'fts'
#Set initial configuration and BP simulator
start = torch.tensor([[-1.0]])
end = torch.tensor([[1.0]])
def initializer(s):
return (1-s)*start+s*end
initial_config = initializer(_rank/(_world_size-1))
#Initialize neural net
committor = CommittorNet(d=1,num_nodes=200).to('cpu')
#Initialize the string for FTS method
ftslayer = FTSLayer(react_config=start,prod_config=end,num_nodes=world_size).to('cpu')
kT = 1/15.0
bp_sampler = BrownianParticle(dt=5e-3,ftslayer=ftslayer , gamma=1.0,kT=kT, initial = initial_config,prefix=prefix,save_config=True)
bp_sampler_bc = BrownianParticle(dt=5e-3,ftslayer=ftslayer, gamma=1.0,kT=kT, initial = initial_config,prefix=prefix,save_config=True)
committor.load_state_dict(torch.load("initial_nn"))
#Construct EXPSimulation
batch_size = 4
period = 100
datarunner = FTSSimulation(bp_sampler, committor, period=period, batch_size=batch_size, dimN=1)#,mode='adaptive',min_count=250)#,max_period=100)#,max_steps=10**3)#,max_period=10)
#Optimizer, doing EXP Reweighting. We can do SGD (integral control), or Heavy-Ball (PID control)
loss = BKELossFTS( bc_sampler = bp_sampler_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 100,
bc_period = 10,
batch_size_bc = 0.5,
tol = 2e-9,
mode='shift')
#optimizer = ParallelAdam(committor.parameters(), lr=5e-3)#, momentum=0.90,weight_decay=1e-3
optimizer = ParallelSGD(committor.parameters(), lr=5e-4,momentum=0.95)
ftsoptimizer = FTSUpdate(committor.lin1.parameters(), deltatau=1e-2,momentum=0.9,nesterov=True,kappa=0.1)
#Save loss function statistics
loss_io = []
if _rank == 0:
loss_io = open("{}_loss.txt".format(prefix),'w')
#Save timing statistics
import time
time_io = open("{}_timing_{}.txt".format(prefix,rank),'w')
#Training loop
for epoch in range(1):
if _rank == 0:
print("epoch: [{}]".format(epoch+1))
actual_counter = 0
while actual_counter <= 2500:
t0 = time.time()
# get data and reweighting factors
config, grad_xs = datarunner.runSimulation()
t1 = time.time()
sampling_time = t1-t0
t0 = time.time()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
cost = loss(grad_xs, bp_sampler.rejection_count)
cost.backward()
optimizer.step()
t1 = time.time()
optimization_time = t1-t0
time_io.write('{:d} {:.5E} {:.5E} \n'.format(actual_counter+1,sampling_time, optimization_time))#main_loss.item(),bc_loss.item()))
time_io.flush()
# print statistics
with torch.no_grad():
main_loss = loss.main_loss
bc_loss = loss.bc_loss
#Track the average number of sampling period
test = torch.tensor([float(datarunner.period)])
dist.all_reduce(test)
test /= float(world_size)
if rank == 0:
print(test)
if _rank == 0:
#Print statistics
print('[{}] main_loss: {:.5E} bc_loss: {:.5E} lr: {:.3E} period: {:.3f}'.format(actual_counter + 1, main_loss.item(), bc_loss.item(), optimizer.param_groups[0]['lr'], test.item()),flush=True)
print(ftslayer.string,flush=True)
#Also print the reweighting factors
print(loss.zl)
loss_io.write('{:d} {:.5E} {:.5E} \n'.format(actual_counter+1,main_loss.item(),bc_loss.item()))
loss_io.flush()
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,actual_counter,rank))
torch.save(ftslayer.state_dict(), "{}_string_t_{}_{}".format(prefix,actual_counter,rank))
torch.save(committor.state_dict(), "{}_params_{}".format(prefix,rank+1))
torch.save(ftslayer.state_dict(), "{}_string_{}".format(prefix,rank+1))
#scheduler.step()
actual_counter += 1
| 4,850 | 35.473684 | 207 | py |
tps-torch | tps-torch-main/1dbrownian/ml_test/run_fts_cl.py | #Import necessarry tools from torch
import torch
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.ml.data import FTSSimulation
from tpstorch.ml.optim import ParallelAdam, FTSUpdate, ParallelSGD
from tpstorch.ml.nn import BKELossFTS, FTSCommittorLoss, CommittorLoss2, FTSLayer
from brownian_ml_fts import CommittorNet, BrownianParticle
import numpy as np
from tpstorch import _rank, _world_size
from tpstorch import dist
world_size = _world_size
rank = _rank
#Import any other thing
import tqdm, sys
torch.manual_seed(0)
np.random.seed(0)
prefix = 'fts_cl'
#Set initial configuration and BP simulator
start = torch.tensor([[-1.0]])
end = torch.tensor([[1.0]])
def initializer(s):
return (1-s)*start+s*end
initial_config = initializer(_rank/(_world_size-1))
#Initialize neural net
committor = CommittorNet(d=1,num_nodes=200).to('cpu')
#Initialize the string for FTS method
ftslayer = FTSLayer(react_config=start,prod_config=end,num_nodes=world_size).to('cpu')
kT = 1/15.0
bp_sampler = BrownianParticle(dt=5e-3,ftslayer=ftslayer , gamma=1.0,kT=kT, initial = initial_config,prefix=prefix,save_config=True)
bp_sampler_bc = BrownianParticle(dt=5e-3,ftslayer=ftslayer, gamma=1.0,kT=kT, initial = initial_config,prefix=prefix,save_config=True)
committor.load_state_dict(torch.load("initial_nn"))
#Construct EXPSimulation
batch_size = 4
period = 100
datarunner = FTSSimulation(bp_sampler, committor, period=period, batch_size=batch_size, dimN=1)
#,mode='adaptive',min_rejection_count=1)#,max_period=100)#,max_steps=10**3)#,max_period=10)
#Optimizer, doing EXP Reweighting. We can do SGD (integral control), or Heavy-Ball (PID control)
loss = BKELossFTS( bc_sampler = bp_sampler_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 100,
bc_period = 10,
batch_size_bc = 0.5,
tol = 2e-9,
mode='shift')
cmloss = CommittorLoss2( cl_sampler = bp_sampler_bc,
committor = committor,
lambda_cl=100.0,
cl_start=10,
cl_end=5000,
cl_rate=40,
cl_trials=100,
batch_size_cl=0.5
)
#optimizer = ParallelAdam(committor.parameters(), lr=5e-3)#, momentum=0.90,weight_decay=1e-3
optimizer = ParallelSGD(committor.parameters(), lr=5e-4,momentum=0.95)#,nesterov=True)
ftsoptimizer = FTSUpdate(committor.lin1.parameters(), deltatau=1e-2,momentum=0.9,nesterov=True,kappa=0.1)
#Save loss function statistics
loss_io = []
if _rank == 0:
loss_io = open("{}_loss.txt".format(prefix),'w')
#Save timing statistics
import time
time_io = open("{}_timing_{}.txt".format(prefix,rank),'w')
#Training loop
for epoch in range(1):
if _rank == 0:
print("epoch: [{}]".format(epoch+1))
actual_counter = 0
while actual_counter <= 2500:
t0 = time.time()
# get data and reweighting factors
config, grad_xs = datarunner.runSimulation()
t1 = time.time()
sampling_time = t1-t0
t0 = time.time()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
cost = loss(grad_xs, bp_sampler.rejection_count)
t1 = time.time()
optimization_time = t1-t0
t0 = time.time()
cmcost = cmloss(actual_counter, bp_sampler.getConfig())
t1 = time.time()
supervised_time = t1-t0
t0 = time.time()
totalcost = cost+cmcost
totalcost.backward()
optimizer.step()
t1 = time.time()
optimization_time += t1-t0
time_io.write('{:d} {:.5E} {:.5E} {:.5E} \n'.format(actual_counter+1,sampling_time, optimization_time, supervised_time))#main_loss.item(),bc_loss.item()))
time_io.flush()
# print statistics
with torch.no_grad():
main_loss = loss.main_loss
bc_loss = loss.bc_loss
#Track the average number of sampling period
test = torch.tensor([float(datarunner.period)])
dist.all_reduce(test)
test /= float(world_size)
if rank == 0:
print(test)
if _rank == 0:
#Print statistics
print('[{}] main_loss: {:.5E} bc_loss: {:.5E} fts_loss: {:.5E} lr: {:.3E} period: {:.3f}'.format(actual_counter + 1, main_loss.item(), bc_loss.item(), cmcost.item(), optimizer.param_groups[0]['lr'], test.item()),flush=True)
print(ftslayer.string,flush=True)
#Also print the reweighting factors
print(loss.zl)
loss_io.write('{:d} {:.5E} {:.5E} {:.5E} \n'.format(actual_counter+1,main_loss.item(),bc_loss.item(),cmcost.item()))
loss_io.flush()
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,actual_counter,rank))
torch.save(ftslayer.state_dict(), "{}_string_t_{}_{}".format(prefix,actual_counter,rank))
torch.save(committor.state_dict(), "{}_params_{}".format(prefix,rank+1))
torch.save(ftslayer.state_dict(), "{}_string_{}".format(prefix,rank+1))
#scheduler.step()
actual_counter += 1
| 5,616 | 35.474026 | 239 | py |
tps-torch | tps-torch-main/muller-brown-ml/mullerbrown_prod.py | #Import necessarry tools from torch
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.ml.optim import project_simplex
from tpstorch.ml import MLSamplerEXP
from tpstorch.ml.nn import CommittorLossEXP
from mullerbrown_ml import MySampler
import numpy as np
#Import any other thing
import tqdm, sys
class CommittorNet(nn.Module):
def __init__(self, d, num_nodes, unit=torch.relu):
super(CommittorNet, self).__init__()
self.num_nodes = num_nodes
self.d = d
self.unit = unit
self.lin1 = nn.Linear(d, num_nodes, bias=True)
#self.lin12 = nn.Linear(num_nodes, num_nodes, bias=True)
self.lin2 = nn.Linear(num_nodes, 1, bias=False)
self.thresh = torch.sigmoid
self.project()
self.broadcast()
def forward(self, x):
#At the moment, x is flat. So if you want it to be 2x1 or 3x4 arrays, then you do it here!
x = self.lin1(x)
x = self.unit(x)
# x = self.lin12(x)
# x = self.unit(x)
x = self.lin2(x)
x = self.thresh(x)
return x
def broadcast(self):
for name, param in self.named_parameters():
if param.requires_grad:
dist.broadcast(param.data,src=0)
def project(self):
#Project the coefficients so that they are make the output go from zero to one
with torch.no_grad():
self.lin2.weight.data = project_simplex(self.lin2.weight.data)
# This is a sketch of what it should be like, but basically have to also make committor play nicely
# within function
# have omnious committor part that actual committor overrides?
class MullerBrown(MySampler):
def __init__(self,param,config,rank,dump,beta,kappa,mpi_group,committor,save_config=False):
super(MullerBrown, self).__init__(param,config.detach().clone(),rank,dump,beta,kappa,mpi_group)
self.committor = committor
self.save_config = save_config
self.timestep = 0
#Save config size and its flattened version
self.config_size = config.size()
self.flattened_size = config.flatten().size()
self.torch_config = config
def initialize_from_torchconfig(self, config):
# Don't have to worry about all that much all, can just set it
self.setConfig(config)
def step(self, committor_val, onlytst=False):
with torch.no_grad():
config_test = torch.zeros_like(self.torch_config)
self.propose(config_test, committor_val, onlytst)
committor_val_ = self.committor(config_test)
self.acceptReject(config_test, committor_val_, onlytst, True)
self.torch_config = (self.getConfig().flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
def step_unbiased(self):
with torch.no_grad():
config_test = torch.zeros_like(self.torch_config)
self.propose(config_test, 0.0, False)
self.acceptReject(config_test, 0.0, False, False)
self.torch_config = (self.getConfig().flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
def save(self):
self.timestep += 1
if self.save_config:
if self.timestep % 100 == 0:
self.dumpConfig(0)
class MullerBrownLoss(CommittorLossEXP):
def __init__(self, lagrange_bc, batch_size,start,end,radii,world_size,n_boundary_samples,react_configs,prod_configs,committor_start,committor_end,committor_rate,final_count, k_committor, sim_committor, committor_trials, mode="random", ref_index=None, batch_size_bc = 0.5, batch_size_cm = 0.5):
super(MullerBrownLoss,self).__init__()
# Committor loss stuff
self.cm_loss = torch.zeros(1)
self.committor_start = committor_start
self.committor_end = committor_end
self.final_count = final_count
self.committor_rate = committor_rate
self.committor_configs = torch.zeros(int((self.committor_end-self.committor_start)/committor_rate+2),react_configs.shape[1], dtype=torch.float)
self.committor_configs_values = torch.zeros(int((self.committor_end-self.committor_start)/committor_rate+2), dtype=torch.float)
self.committor_configs_count = 0
self.k_committor = k_committor
self.sim_committor = sim_committor
self.committor_trials = committor_trials
# Batch size for BC, CM losses
self.batch_size_bc = batch_size_bc
self.batch_size_cm = batch_size_cm
# Other stuff
self.lagrange_bc = lagrange_bc
self.start = start
self.end = end
self.radii = radii
self.world_size = world_size
self.react_configs = react_configs
self.prod_configs = prod_configs
self.react_lambda = torch.zeros(1)
self.prod_lambda = torch.zeros(1)
self.mean_recipnormconst = torch.zeros(1)
#Storing a 1D Tensor of reweighting factors
self.reweight = [torch.zeros(1) for i in range(dist.get_world_size())]
#Choose whether to sample window references randomly or not
self.mode = mode
if mode != "random":
if ref_index is None or ref_index < 0:
raise ValueError("For non-random choice of window reference, you need to set ref_index!")
else:
self.ref_index = torch.tensor(ref_index)
def compute_bc(self, committor, configs, invnormconstants):
#Assume that first dimension is batch dimension
loss_bc = torch.zeros(1)
# if dist.get_rank() == 0:
# print(self.react_configs)
# print(committor(self.react_configs))
# print(torch.mean(committor(self.react_configs)))
# print(self.prod_configs)
# print(committor(self.prod_configs))
# print(torch.mean(committor(self.prod_configs)))
self.react_lambda = self.react_lambda.detach()
self.prod_lambda = self.prod_lambda.detach()
indices_react = torch.randperm(len(self.react_configs))[:int(self.batch_size_bc*len(self.react_configs))]
indices_prod = torch.randperm(len(self.prod_configs))[:int(self.batch_size_bc*len(self.prod_configs))]
react_penalty = torch.mean(committor(self.react_configs[indices_react,:]))
prod_penalty = torch.mean(1.0-committor(self.prod_configs[indices_prod,:]))
#self.react_lambda -= self.lagrange_bc*react_penalty
#self.prod_lambda -= self.lagrange_bc*prod_penalty
loss_bc += 0.5*self.lagrange_bc*react_penalty**2-self.react_lambda*react_penalty
loss_bc += 0.5*self.lagrange_bc*prod_penalty**2-self.prod_lambda*prod_penalty
return loss_bc/self.world_size
def computeZl(self,k,fwd_meanwgtfactor,bwrd_meanwgtfactor):
with torch.no_grad():
empty = []
for l in range(dist.get_world_size()):
if l > k:
empty.append(torch.prod(fwd_meanwgtfactor[k:l]))
elif l < k:
empty.append(torch.prod(bwrd_meanwgtfactor[l:k]))
else:
empty.append(torch.tensor(1.0))
return torch.tensor(empty).flatten()
def forward(self, gradients, committor, config, invnormconstants, fwd_weightfactors, bwrd_weightfactors, reciprocal_normconstants, counter, config_current):
self.main_loss = self.compute_loss(gradients, invnormconstants)
self.bc_loss = self.compute_bc(committor, config, invnormconstants)
self.cm_loss = self.compute_cl(config_current, committor, counter)
#renormalize losses
#get prefactors
self.reweight = [torch.zeros(1) for i in range(dist.get_world_size())]
fwd_meanwgtfactor = self.reweight.copy()
dist.all_gather(fwd_meanwgtfactor,torch.mean(fwd_weightfactors))
fwd_meanwgtfactor = torch.tensor(fwd_meanwgtfactor[:-1])
bwrd_meanwgtfactor = self.reweight.copy()
dist.all_gather(bwrd_meanwgtfactor,torch.mean(bwrd_weightfactors))
bwrd_meanwgtfactor = torch.tensor(bwrd_meanwgtfactor[1:])
#Randomly select a window as a free energy reference and broadcast that index across all processes
if self.mode == "random":
self.ref_index = torch.randint(low=0,high=dist.get_world_size(),size=(1,))
dist.broadcast(self.ref_index, src=0)
#Computing the reweighting factors, z_l in our notation
self.reweight = self.computeZl(self.ref_index,fwd_meanwgtfactor,bwrd_meanwgtfactor)
self.reweight.div_(torch.sum(self.reweight)) #normalize
#Use it first to compute the mean inverse normalizing constant
mean_recipnormconst = torch.mean(reciprocal_normconstants)#invnormconstants)
mean_recipnormconst.mul_(self.reweight[dist.get_rank()])
#All reduce the mean invnormalizing constant
dist.all_reduce(mean_recipnormconst)
#renormalize main_loss
self.main_loss *= self.reweight[dist.get_rank()]
dist.all_reduce(self.main_loss)
self.main_loss /= mean_recipnormconst
#normalize bc_loss
dist.all_reduce(self.bc_loss)
#normalize cm_loss
dist.all_reduce(self.cm_loss)
return self.main_loss+self.bc_loss+self.cm_loss
def myprod_checker(self, config):
end = torch.tensor([[0.5,0.0]])
radii = 0.025
end_ = config-end
end_ = end_.pow(2).sum()**0.5
if end_ <= radii:
return True
else:
return False
def myreact_checker(self, config):
start = torch.tensor([[-0.5,1.5]])
radii = 0.025
start_ = config-start
start_ = start_.pow(2).sum()**0.5
if start_ <= radii:
return True
else:
return False
def compute_cl(self, config, committor, counter):
loss_cl = torch.zeros(1)
if(counter<self.committor_start):
return loss_cl
elif(counter==self.committor_start):
# Generate first committor config
counts = []
for i in range(self.committor_trials):
self.sim_committor.initialize_from_torchconfig(config.detach().clone())
hitting = False
#Run simulation and stop until it falls into the product or reactant state
while hitting is False:
self.sim_committor.step_unbiased()
if self.myreact_checker(self.sim_committor.getConfig()):
hitting = True
counts.append(0)
elif self.myprod_checker(self.sim_committor.getConfig()):
hitting = True
counts.append(1)
counts = np.array(counts)
self.committor_configs_values[0] = np.mean(counts)
self.committor_configs[0] = config.detach().clone()
self.committor_configs_count += 1
# Now compute loss
committor_penalty = committor(self.committor_configs[0])-self.committor_configs_values[0]
loss_cl += 0.5*self.k_committor*committor_penalty**2
return loss_cl/self.world_size
else:
# Generate new committor configs and keep on generating the loss
if counter%self.committor_rate==0 and counter < self.committor_end:
# Generate first committor config
counts = []
for i in range(self.committor_trials):
self.sim_committor.initialize_from_torchconfig(config.detach().clone())
hitting = False
#Run simulation and stop until it falls into the product or reactant state
while hitting is False:
self.sim_committor.step_unbiased()
if self.myreact_checker(self.sim_committor.getConfig()):
hitting = True
counts.append(0)
elif self.myprod_checker(self.sim_committor.getConfig()):
hitting = True
counts.append(1)
counts = np.array(counts)
configs_count = self.committor_configs_count
self.committor_configs_values[configs_count] = np.mean(counts)
self.committor_configs[configs_count] = config.detach().clone()
self.committor_configs_count += 1
# Compute loss
indices_committor = torch.randperm(self.committor_configs_count)[:int(self.batch_size_cm*self.committor_configs_count)]
if self.committor_configs_count == 1:
indices_committor = 0
committor_penalty = torch.mean(committor(self.committor_configs[indices_committor])-self.committor_configs_values[indices_committor])
print(str(dist.get_rank())+" "+str(committor_penalty))
loss_cl += 0.5*self.k_committor*committor_penalty**2
return loss_cl/self.world_size
| 13,332 | 43.591973 | 297 | py |
tps-torch | tps-torch-main/muller-brown-ml/plot_tricky.py | import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.distributed as dist
dist.init_process_group('mpi')
#Import necessarry tools from tpstorch
from mullerbrown import CommittorNet
import numpy as np
#Import any other thing
import tqdm, sys
#Initialize neural net
committor = CommittorNet(d=2,num_nodes=200).to('cpu')
committor.load_state_dict(torch.load("simple_params_1"))
A = np.array([-20,-10,-17,1.5])
a = np.array([-1,-1,-6.5,0.7])
b = np.array([0,0,11,0.6])
c = np.array([-10,-10,-6.5,0.7])
x_ = np.array([1,0,-0.5,-1])
y_ = np.array([0,0.5,1.5,1])
def energy(x,y,A,a,b,c,x_,y_):
energy_ = np.zeros((x.shape))
for i in range(len(A)):
energy_ += A[i]*np.exp(a[i]*(x-x_[i])**2+b[i]*(x-x_[i])*(y-y_[i])+c[i]*(y-y_[i])**2)
return energy_
def q(xval,yval):
qvals = np.zeros_like(xval)
for i in range(nx):
for j in range(ny):
Array = np.array([xval[i,j],yval[i,j]]).astype(np.float32)
Array = torch.from_numpy(Array)
qvals[i,j] = committor(Array).item()
return qvals
nx, ny = 100,100
X = np.linspace(-2.0, 1.5, nx)
Y = np.linspace(-1.0, 2.5, ny)
print(X.shape)
xv, yv = np.meshgrid(X, Y)
z = energy(xv,yv,A,a,b,c,x_,y_)
h = plt.contourf(X,Y,z,levels=[-15+i for i in range(16)])
print(np.shape(z),np.shape(xv))
qvals = q(xv,yv)
CS = plt.contour(X, Y, qvals,levels=[0.01,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.99])
plt.colorbar()
plt.clabel(CS, fontsize=10, inline=1)#
plt.show()
| 1,485 | 27.037736 | 92 | py |
tps-torch | tps-torch-main/muller-brown-ml/test.py | #Import necessarry tools from torch
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.ml.data import EXPReweightSimulation, TSTValidation
from tpstorch.ml.optim import UnweightedSGD, EXPReweightSGD
from torch.distributed import distributed_c10d
from mullerbrown import CommittorNet, MullerBrown, MullerBrownLoss
import numpy as np
dist.init_process_group(backend='mpi')
mpi_group = dist.distributed_c10d._get_default_group()
#Import any other thing
import tqdm, sys
prefix = 'simple'
#Initialize neural net
committor = CommittorNet(d=2,num_nodes=200).to('cpu')
#Set initial configuration and BP simulator
start = torch.tensor([[0.0,0.0]])
end = torch.tensor([[1.0,1.0]])
def initializer(s):
return (1-s)*start+s*end
initial_config = initializer(dist.get_rank()/(dist.get_world_size()-1))
mb_sim = MullerBrown(param="param",config=initial_config, rank=dist.get_rank(), dump=1, beta=0.5, kappa=200, save_config=True, mpi_group = mpi_group, committor=committor)
#Committor Loss
initloss = nn.MSELoss()
initoptimizer = UnweightedSGD(committor.parameters(), lr=1e-2)#,momentum=0.9,nesterov=True)#, weight_decay=1e-3)
#from torchsummary import summary
running_loss = 0.0
#Initial training try to fit the committor to the initial condition
for i in tqdm.tqdm(range(10**5)):
# zero the parameter gradients
initoptimizer.zero_grad()
# forward + backward + optimize
q_vals = committor(initial_config)
targets = torch.ones_like(q_vals)*dist.get_rank()/(dist.get_world_size()-1)
cost = initloss(q_vals, targets)#,committor,config,cx)
cost.backward()
#Stepping up
initoptimizer.step()
#committor.renormalize()
committor.project()
committor.zero_grad()
from torch.optim import lr_scheduler
#Construct EXPReweightSimulation
batch_size = 128
dataset = EXPReweightSimulation(mb_sim, committor, period=10)
loader = DataLoader(dataset,batch_size=batch_size)
#Optimizer, doing EXP Reweighting. We can do SGD (integral control), or Heavy-Ball (PID control)
loss = MullerBrownLoss(lagrange_bc = 100.0,batch_size=batch_size,start=start,end=end,radii=0.1)
optimizer = EXPReweightSGD(committor.parameters(), lr=0.05, momentum=0.80)
#lr_lambda = lambda epoch : 0.9**epoch
#scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch=-1, verbose=False)
loss_io = []
if dist.get_rank() == 0:
loss_io = open("{}_loss.txt".format(prefix),'w')
#Training loop
#1 epoch: 200 iterations, 200 time-windows
for epoch in range(1):
if dist.get_rank() == 0:
print("epoch: [{}]".format(epoch+1))
actual_counter = 0
for counter, batch in enumerate(loader):
if counter > 200:
break
# get data and reweighting factors
config, grad_xs, invc, fwd_wl, bwrd_wl = batch
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
cost = loss(grad_xs,committor,config,invc)
cost.backward()
meaninvc, reweight = optimizer.step(fwd_weightfactors=fwd_wl, bwrd_weightfactors=bwrd_wl, reciprocal_normconstants=invc)
committor.project()
# print statistics
with torch.no_grad():
#if counter % 10 == 0:
main_loss = loss.main_loss
bc_loss = loss.bc_loss
#What we need to do now is to compute with its respective weight
main_loss.mul_(reweight[dist.get_rank()])
bc_loss.mul_(reweight[dist.get_rank()])
#All reduce the gradients
dist.all_reduce(main_loss)
dist.all_reduce(bc_loss)
#Divide in-place by the mean inverse normalizing constant
main_loss.div_(meaninvc)
bc_loss.div_(meaninvc)
#Print statistics
if dist.get_rank() == 0:
print('[{}] loss: {:.5E} penalty: {:.5E} lr: {:.3E}'.format(counter + 1, main_loss.item(), bc_loss.item(), optimizer.param_groups[0]['lr']))
#Also print the reweighting factors
print(reweight)
loss_io.write('{:d} {:.5E} \n'.format(actual_counter+1,main_loss))
loss_io.flush()
#Only save parameters from rank 0
torch.save(committor.state_dict(), "{}_params_{}".format(prefix,dist.get_rank()+1))
actual_counter += 1
##Perform Validation Test
if dist.get_rank() == 0:
print("Finished Training! Now performing validation through committor analysis")
#Construct TSTValidation
print("Generating transition state")
for i in range(40000):
config_cur = mb_sim.getConfig()
mb_sim.step(committor_val=committor(config_cur), onlytst=True)
init_config = mb_sim.getConfig()
print("q value is "+str(committor(init_config)))
mb_sim = MullerBrown(param="param_tst",config=init_config, rank=dist.get_rank(), dump=1, beta=0.5, kappa=100, save_config=True, mpi_group = mpi_group, committor=committor)
#mb_sim.setConfig(init_config)
#mb_sim = MullerBrown(param="param",config=init_config, rank=dist.get_rank(), dump=1, beta=0.20, kappa=80, save_config=True, mpi_group = mpi_group, committor=committor)
batch_size = 100 #batch of initial configuration to do the committor analysis per rank
dataset = TSTValidation(mb_sim, committor, period=20)
loader = DataLoader(dataset,batch_size=batch_size)
#Save validation scores and
myval_io = open("{}_validation_{}.txt".format(prefix,dist.get_rank()+1),'w')
radii = 0.1
def myreact_checker(config):
check = config[0]+config[1]
if check <= 0.4:
return True
else:
return False
def myprod_checker(config):
check = config[0]+config[1]
if check >= 1.6:
return True
else:
return False
#Run validation loop
actual_counter = 0
for epoch, batch in enumerate(loader):
if epoch > 1:
break
if dist.get_rank() == 0:
print("epoch: [{}]".format(epoch+1))
#Call the validation function
configs, committor_values = batch
dataset.validate(batch, trials=100, validation_io=myval_io, product_checker=myprod_checker, reactant_checker=myreact_checker)
| 6,259 | 35.823529 | 171 | py |
tps-torch | tps-torch-main/muller-brown-ml/mb_fts.py | #Import necessarry tools from torch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.examples.mullerbrown_ml import MyMLFTSSampler
from tpstorch import _rank, _world_size
import numpy as np
#Import any other thing
import tqdm, sys
#A single hidden layer NN, where some nodes possess the string configurations
class CommittorNet(nn.Module):
def __init__(self, d, num_nodes, unit=torch.relu):
super(CommittorNet, self).__init__()
self.num_nodes = num_nodes
self.d = d
self.unit = unit
self.lin1 = nn.Linear(d, num_nodes, bias=True)
self.lin3 = nn.Linear(num_nodes, num_nodes, bias=True)
self.lin2 = nn.Linear(num_nodes, 1, bias=False)
self.broadcast()
def forward(self, x):
x = self.lin1(x)
x = self.unit(x)
x = self.lin3(x)
x = self.unit(x)
x = self.lin2(x)
return torch.sigmoid(x)
def broadcast(self):
for name, param in self.named_parameters():
if param.requires_grad:
dist.broadcast(param.data,src=0)
# This is a sketch of what it should be like, but basically have to also make committor play nicely
# within function
# have omnious committor part that actual committor overrides?
class MullerBrown(MyMLFTSSampler):
def __init__(self,param,config,rank,dump,beta,mpi_group,ftslayer,save_config=False):
super(MullerBrown, self).__init__(param,config.detach().clone(),rank,dump,beta,mpi_group)
self.save_config = save_config
self.timestep = 0
#Save config size and its flattened version
self.config_size = config.size()
self.flattened_size = config.flatten().size()
self.torch_config = config
self.ftslayer = ftslayer
#Configs file Save Alternative, since the default XYZ format is an overkill
self.file = open("configs_{}.txt".format(dist.get_rank()), "w")
@torch.no_grad()
def initialize_from_torchconfig(self, config):
# Don't have to worry about all that much all, can just set it
self.setConfig(config)
@torch.no_grad()
def reset(self):
self.steps = 0
self.distance_sq_list = self.ftslayer(self.getConfig().flatten())
inftscell = self.checkFTSCell(_rank, _world_size)
if inftscell:
pass
else:
self.setConfig(self.ftslayer.string[_rank])
pass
def step(self):
with torch.no_grad():
config_test = torch.zeros_like(self.torch_config)
self.propose(config_test, 0.0, False)
self.distance_sq_list = self.ftslayer(config_test.flatten())
inftscell = self.checkFTSCell(_rank, _world_size)
if inftscell:
self.acceptReject(config_test)
else:
pass
self.torch_config = (self.getConfig().flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
def step_unbiased(self):
with torch.no_grad():
config_test = torch.zeros_like(self.torch_config)
self.propose(config_test, 0.0, False)
self.acceptReject(config_test)#, committor_val_.item(), False, True)
self.torch_config = (self.getConfig().flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
@torch.no_grad()
def isProduct(self,config):
end = torch.tensor([[0.6,0.08]])
radii = 0.025
end_ = config-end
end_ = end_.pow(2).sum()**0.5
if end_ <= radii:
return True
else:
return False
@torch.no_grad()
def isReactant(self,config):
start = torch.tensor([[-0.5,1.5]])
radii = 0.025
start_ = config-start
start_ = start_.pow(2).sum()**0.5
if start_ <= radii:
return True
else:
return False
def step_bc(self):
with torch.no_grad():
config_test = torch.zeros_like(self.torch_config)
self.propose(config_test, 0.0, False)
if self.isReactant(config_test.flatten()) or self.isProduct(config_test.flatten()):
self.acceptReject(config_test)
else:
pass
self.torch_config = (self.getConfig().flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
def save(self):
self.timestep += 1
if self.save_config:
if self.timestep % 10 == 0:
# self.dumpConfig(0)
self.file.write("{} {} \n".format(self.torch_config[0].item(), self.torch_config[1].item()))
self.file.flush()
| 4,992 | 32.965986 | 108 | py |
tps-torch | tps-torch-main/muller-brown-ml/mullerbrown.py | #Import necessarry tools from torch
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.ml.optim import project_simplex
from tpstorch.ml import MLSamplerEXP
from tpstorch.ml.nn import CommittorLossEXP
from mullerbrown_ml import MySampler
import numpy as np
#Import any other thing
import tqdm, sys
class CommittorNet(nn.Module):
def __init__(self, d, num_nodes, unit=torch.sigmoid):
super(CommittorNet, self).__init__()
self.num_nodes = num_nodes
self.d = d
self.unit = unit
self.lin1 = nn.Linear(d, num_nodes, bias=True)
self.lin2 = nn.Linear(num_nodes, 1, bias=False)
self.project()
self.broadcast()
def forward(self, x):
#At the moment, x is flat. So if you want it to be 2x1 or 3x4 arrays, then you do it here!
x = self.lin1(x)
x = self.unit(x)
x = self.lin2(x)
return x
def broadcast(self):
for name, param in self.named_parameters():
if param.requires_grad:
dist.broadcast(param.data,src=0)
def project(self):
#Project the coefficients so that they are make the output go from zero to one
with torch.no_grad():
self.lin2.weight.data = project_simplex(self.lin2.weight.data)
# This is a sketch of what it should be like, but basically have to also make committor play nicely
# within function
# have omnious committor part that actual committor overrides?
class MullerBrown(MySampler):
def __init__(self,param,config,rank,dump,beta,kappa,mpi_group,committor,save_config=False):
super(MullerBrown, self).__init__(param,config.detach().clone(),rank,dump,beta,kappa,mpi_group)
self.committor = committor
self.save_config = save_config
self.timestep = 0
#Save config size and its flattened version
self.config_size = config.size()
self.flattened_size = config.flatten().size()
self.torch_config = config
def initialize_from_torchconfig(self, config):
# Don't have to worry about all that much all, can just set it
self.setConfig(config)
def step(self, committor_val, onlytst=False):
with torch.no_grad():
config_test = torch.zeros_like(self.torch_config)
self.propose(config_test, committor_val, onlytst)
committor_val_ = self.committor(config_test)
self.acceptReject(config_test, committor_val_, onlytst, True)
self.torch_config = (self.getConfig().flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
def step_unbiased(self):
with torch.no_grad():
config_test = torch.zeros_like(self.torch_config)
self.propose(config_test, 0.0, False)
self.acceptReject(config_test, 0.0, False, False)
self.torch_config = (self.getConfig().flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
def save(self):
self.timestep += 1
if self.save_config:
if self.timestep % 100 == 0:
self.dumpConfig(0)
class MullerBrownLoss(CommittorLossEXP):
def __init__(self, lagrange_bc, batch_size,start,end,radii,world_size,n_boundary_samples,react_configs,prod_configs):
super(MullerBrownLoss,self).__init__()
self.lagrange_bc = lagrange_bc
self.start = start
self.end = end
self.radii = radii
self.world_size = world_size
self.react_configs = react_configs
self.prod_configs = prod_configs
def compute_bc(self, committor, configs, invnormconstants):
#Assume that first dimension is batch dimension
loss_bc = torch.zeros(1)
if dist.get_rank() == 0:
print(self.react_configs)
print(committor(self.react_configs))
print(torch.mean(committor(self.react_configs)))
print(self.prod_configs)
print(committor(self.prod_configs))
print(torch.mean(committor(self.prod_configs)))
loss_bc += 0.5*self.lagrange_bc*torch.mean(committor(self.react_configs)**2)
loss_bc += 0.5*self.lagrange_bc*torch.mean((1.0-committor(self.prod_configs))**2)
return loss_bc/self.world_size
| 4,508 | 36.264463 | 121 | py |
tps-torch | tps-torch-main/muller-brown-ml/run_fts_c.py | #import sys
#sys.path.insert(0,"/global/home/users/muhammad_hasyim/tps-torch/build")
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from mb_fts import MullerBrown, CommittorNet
from tpstorch.ml.data import FTSSimulation
from tpstorch.ml.optim import ParallelSGD, ParallelAdam, FTSUpdate
from tpstorch.ml.nn import BKELossFTS, FTSCommittorLoss, FTSLayer
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
torch.manual_seed(5090)
np.random.seed(5090)
prefix = 'simple'
#Initialize neural net
def initializer(s):
return (1-s)*start+s*end
start = torch.tensor([-0.5,1.5])
end = torch.tensor([0.6,0.08])
#Initialize neural net
committor = CommittorNet(d=2,num_nodes=200).to('cpu')
#Initialize the string for FTS method
ftslayer = FTSLayer(react_config=start,prod_config=end,num_nodes=world_size).to('cpu')
initial_config = initializer(rank/(dist.get_world_size()-1))
start = torch.tensor([[-0.5,1.5]])
end = torch.tensor([[0.6,0.08]])
kT = 10.0
mb_sim = MullerBrown(param="param",config=initial_config, rank=rank, dump=1, beta=1/kT, save_config=True, mpi_group = mpi_group, ftslayer=ftslayer)
mb_sim_bc = MullerBrown(param="param_bc",config=initial_config, rank=rank, dump=1, beta=1/kT, save_config=True, mpi_group = mpi_group, ftslayer=ftslayer)
#Initial Training Loss
initloss = nn.MSELoss()
initoptimizer = ParallelSGD(committor.parameters(), lr=1e-3)#,momentum=0.95, nesterov=True)
#from torchsummary import summary
running_loss = 0.0
#Initial training try to fit the committor to the initial condition
for i in range(3*10**3):
# zero the parameter gradients
initoptimizer.zero_grad()
# forward + backward + optimize
q_vals = committor(initial_config.view(-1,2))
targets = torch.ones_like(q_vals)*rank/(dist.get_world_size()-1)
cost = initloss(q_vals, targets)#,committor,config,cx)
cost.backward()
#Stepping up
initoptimizer.step()
if i%1000 == 0 and rank == 0:
print("Init step "+str(i),cost)
if rank == 0:
#Only save parameters from rank 0
torch.save(committor.state_dict(), "{}_params_{}".format(prefix,rank+1))
committor.zero_grad()
#Construct FTSSimulation
n_boundary_samples = 100
batch_size = 512
period = 10
datarunner = FTSSimulation(mb_sim, committor, period=period, batch_size=batch_size, dimN=2)
#Initialize main loss function and optimizers
loss = BKELossFTS( bc_sampler = mb_sim_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 100,
bc_period = 10,
batch_size_bc = 0.5,
tol = 5e-9,
mode = 'shift')
#This is the new committor loss! Feel free to comment this out if you don't need it
cmloss = FTSCommittorLoss( fts_sampler = mb_sim,
committor = committor,
fts_layer=ftslayer,
dimN = 2,
lambda_fts=100,
fts_start=100,
fts_end=5000,
fts_max_steps=batch_size*period*10, #To estimate the committor, we'll run longer
fts_rate=10, #In turn, we will only sample more committor value estimate after 10 iterations
fts_min_count=2000, #Minimum count so that simulation doesn't (potentially) run too long
batch_size_fts=0.5,
tol = 5e-9,
mode = 'shift'
)
#optimizer = ParallelAdam(committor.parameters(), lr=1e-2)
optimizer = ParallelSGD(committor.parameters(), lr=1e-3)#,momentum=0.95,nesterov=True)
ftsoptimizer = FTSUpdate(ftslayer.parameters(), deltatau=0.005,momentum=0.5,nesterov=True, kappa=0.2)
#FTS Needs a scheduler because we're doing stochastic gradient descent, i.e., we're not accumulating a running average
#But only computes mini-batch averages
from torch.optim.lr_scheduler import LambdaLR
lr_lambda = lambda epoch: 0.999**epoch
scheduler = LambdaLR(ftsoptimizer, lr_lambda)
loss_io = []
if rank == 0:
loss_io = open("{}_loss.txt".format(prefix),'w')
#Training loop
#We can train in terms of epochs, but we will keep it in one epoch
for epoch in range(1):
if rank == 0:
print("epoch: [{}]".format(epoch+1))
actual_counter = 0
while actual_counter <= 20000:
# get data and reweighting factors
configs, grad_xs = datarunner.runSimulation()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
cost = loss(grad_xs, mb_sim.rejection_count)
#We can skip the new committor loss calculation
cmcost = cmloss(actual_counter, ftslayer.string)
totalcost = cost+cmcost
totalcost.backward()
optimizer.step()
# (1) Update the string
ftsoptimizer.step(configs,batch_size)
# print statistics
with torch.no_grad():
#if counter % 10 == 0:
main_loss = loss.main_loss
bc_loss = loss.bc_loss
#Track the average number of sampling period
test = torch.tensor([float(datarunner.period)])
dist.all_reduce(test)
test /= float(world_size)
if rank == 0:
print(test)
#Print statistics
if rank == 0:
print('[{}] main_loss: {:.5E} bc_loss: {:.5E} fts_loss: {:.5E} lr: {:.3E} period: {:.3f}'.format(actual_counter + 1, main_loss.item(), bc_loss.item(), cmcost.item(), optimizer.param_groups[0]['lr'], test.item()),flush=True)
print(ftslayer.string,flush=True)
print(loss.zl)
loss_io.write('{:d} {:.5E} {:.5E} {:.5E} \n'.format(actual_counter+1,main_loss.item(),bc_loss.item(),cmcost.item()))
loss_io.flush()
if actual_counter % 4 == 0:
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,actual_counter,rank))
torch.save(ftslayer.state_dict(), "{}_string_t_{}_{}".format(prefix,actual_counter,rank))
torch.save(committor.state_dict(), "{}_params_{}".format(prefix,rank+1))
torch.save(ftslayer.state_dict(), "{}_string_{}".format(prefix,rank+1))
scheduler.step()
actual_counter += 1
if rank == 0:
print('FTS step size: {}'.format(ftsoptimizer.param_groups[0]['lr']))
| 6,959 | 38.101124 | 239 | py |
tps-torch | tps-torch-main/muller-brown-ml/test_sampler_2.py | import mullerbrown_ml as mb
import torch
import torch.distributed as dist
from torch.distributed import distributed_c10d
dist.init_process_group(backend='mpi')
mpi_group = dist.distributed_c10d._get_default_group()
a = mb.MySampler('param_tst',torch.tensor([[0.5,0.5]]),0,0,2.0,0.0,distributed_c10d._get_default_group())
for i in range(100000):
config = torch.tensor([[0.0,0.0]])
a.propose(config, 0, False)
a.acceptReject(config, 0, False, False)
test = a.getConfig()
print(test)
| 504 | 30.5625 | 105 | py |
tps-torch | tps-torch-main/muller-brown-ml/run_fts_c2.py | #import sys
#sys.path.insert(0,"/global/home/users/muhammad_hasyim/tps-torch/build")
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from mb_fts import MullerBrown, CommittorNet
from tpstorch.ml.data import FTSSimulation
from tpstorch.ml.optim import ParallelSGD, ParallelAdam, FTSUpdate
from tpstorch.ml.nn import BKELossFTS, FTSCommittorLoss, CommittorLoss, FTSLayer
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
torch.manual_seed(5090)
np.random.seed(5090)
prefix = 'simple'
#Initialize neural net
def initializer(s):
return (1-s)*start+s*end
start = torch.tensor([-0.5,1.5])
end = torch.tensor([0.6,0.08])
#Initialize neural net
committor = CommittorNet(d=2,num_nodes=200).to('cpu')
#Initialize the string for FTS method
ftslayer = FTSLayer(react_config=start,prod_config=end,num_nodes=world_size).to('cpu')
initial_config = initializer(rank/(dist.get_world_size()-1))
start = torch.tensor([[-0.5,1.5]])
end = torch.tensor([[0.6,0.08]])
kT = 10.0
mb_sim = MullerBrown(param="param",config=initial_config, rank=rank, dump=1, beta=1/kT, save_config=True, mpi_group = mpi_group, ftslayer=ftslayer)
mb_sim_bc = MullerBrown(param="param_bc",config=initial_config, rank=rank, dump=0, beta=1/kT, save_config=False, mpi_group = mpi_group, ftslayer=ftslayer)
mb_sim_com = MullerBrown(param="param_tst",config=initial_config, rank=rank, dump=0, beta=1/kT, save_config=False, mpi_group = mpi_group, ftslayer=ftslayer)
#Initial Training Loss
initloss = nn.MSELoss()
initoptimizer = ParallelSGD(committor.parameters(), lr=1e-3)#,momentum=0.95, nesterov=True)
#from torchsummary import summary
running_loss = 0.0
#Initial training try to fit the committor to the initial condition
for i in range(3*10**3):
# zero the parameter gradients
initoptimizer.zero_grad()
# forward + backward + optimize
q_vals = committor(initial_config.view(-1,2))
targets = torch.ones_like(q_vals)*rank/(dist.get_world_size()-1)
cost = initloss(q_vals, targets)#,committor,config,cx)
cost.backward()
#Stepping up
initoptimizer.step()
if i%1000 == 0 and rank == 0:
print("Init step "+str(i),cost)
if rank == 0:
#Only save parameters from rank 0
torch.save(committor.state_dict(), "{}_params_{}".format(prefix,rank+1))
committor.zero_grad()
#Construct FTSSimulation
n_boundary_samples = 100
batch_size = 128
period = 10
datarunner = FTSSimulation(mb_sim, committor, period=period, batch_size=batch_size, dimN=2)
#Initialize main loss function and optimizers
loss = BKELossFTS( bc_sampler = mb_sim_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 100,
bc_period = 10,
batch_size_bc = 0.5,
tol = 5e-9,
mode = 'shift')
#This is the new committor loss! Feel free to comment this out if you don't need it
cmloss = FTSCommittorLoss( fts_sampler = mb_sim,
committor = committor,
fts_layer=ftslayer,
dimN = 2,
lambda_fts=100,
fts_start=100,
fts_end=2000,
fts_max_steps=batch_size*period*10, #To estimate the committor, we'll run longer
fts_rate=10, #In turn, we will only sample more committor value estimate after 10 iterations
fts_min_count=2000, #Minimum count so that simulation doesn't (potentially) run too long
batch_size_fts=0.5,
tol = 5e-9,
mode = 'shift'
)
cmloss2 = CommittorLoss( cl_sampler = mb_sim_com,
committor = committor,
lambda_cl=100.0,
cl_start=10,
cl_end=2000,
cl_rate=10,
cl_trials=50,
batch_size_cl=0.5
)
#optimizer = ParallelAdam(committor.parameters(), lr=1e-2)
optimizer = ParallelSGD(committor.parameters(), lr=1e-3)#,momentum=0.95,nesterov=True)
ftsoptimizer = FTSUpdate(ftslayer.parameters(), deltatau=0.005,momentum=0.5,nesterov=True, kappa=0.02)
#FTS Needs a scheduler because we're doing stochastic gradient descent, i.e., we're not accumulating a running average
#But only computes mini-batch averages
from torch.optim.lr_scheduler import LambdaLR
lr_lambda = lambda epoch: 0.999**epoch
scheduler = LambdaLR(ftsoptimizer, lr_lambda)
loss_io = []
if rank == 0:
loss_io = open("{}_loss.txt".format(prefix),'w')
#Training loop
#We can train in terms of epochs, but we will keep it in one epoch
for epoch in range(1):
if rank == 0:
print("epoch: [{}]".format(epoch+1))
actual_counter = 0
while actual_counter <= 20000:
# get data and reweighting factors
configs, grad_xs = datarunner.runSimulation()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
cost = loss(grad_xs, mb_sim.rejection_count)
#We can skip the new committor loss calculation
cmcost = cmloss(actual_counter, ftslayer.string)
cmcost2 = cmloss2(actual_counter, mb_sim.getConfig())
totalcost = cost+cmcost+cmcost2
totalcost.backward()
optimizer.step()
# (1) Update the string
ftsoptimizer.step(configs,batch_size)
# print statistics
with torch.no_grad():
#if counter % 10 == 0:
main_loss = loss.main_loss
bc_loss = loss.bc_loss
#Track the average number of sampling period
test = torch.tensor([float(datarunner.period)])
dist.all_reduce(test)
test /= float(world_size)
if rank == 0:
print(test)
#Print statistics
if rank == 0:
print('[{}] main_loss: {:.5E} bc_loss: {:.5E} fts_loss: {:.5E} cl_loss: {:.5E} lr: {:.3E} period: {:.3f}'.format(actual_counter + 1, main_loss.item(), bc_loss.item(), cmcost.item(), cmcost2.item(), optimizer.param_groups[0]['lr'], test.item()),flush=True)
print(ftslayer.string,flush=True)
print(loss.zl)
loss_io.write('{:d} {:.5E} {:.5E} {:.5E} {:.5E} \n'.format(actual_counter+1,main_loss.item(),bc_loss.item(),cmcost.item(),cmcost2.item()))
loss_io.flush()
if actual_counter % 4 == 0:
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,actual_counter,rank))
torch.save(ftslayer.state_dict(), "{}_string_t_{}_{}".format(prefix,actual_counter,rank))
torch.save(committor.state_dict(), "{}_params_{}".format(prefix,rank+1))
torch.save(ftslayer.state_dict(), "{}_string_{}".format(prefix,rank+1))
scheduler.step()
actual_counter += 1
if rank == 0:
print('FTS step size: {}'.format(ftsoptimizer.param_groups[0]['lr']))
| 7,612 | 39.068421 | 271 | py |
tps-torch | tps-torch-main/muller-brown-ml/run_vanilla.py | #import sys
#sys.path.insert(0,"/global/home/users/muhammad_hasyim/tps-torch/build")
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from mb_ml import MullerBrown, CommittorNet
from tpstorch.ml.data import EXPReweightSimulation
from tpstorch.ml.optim import ParallelSGD, ParallelAdam
from tpstorch.ml.nn import BKELossEXP
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
torch.manual_seed(3046)
np.random.seed(3046)
prefix = 'simple'
#Initialize neural net
def initializer(s):
return (1-s)*start+s*end
start = torch.tensor([-0.5,1.5])
end = torch.tensor([0.6,0.08])
#Initialize neural net
committor = CommittorNet(d=2,num_nodes=200).to('cpu')
initial_config = initializer(rank/(dist.get_world_size()-1))
start = torch.tensor([[-0.5,1.5]])
end = torch.tensor([[0.6,0.08]])
kT = 10.0
mb_sim = MullerBrown(param="param",config=initial_config, rank=rank, dump=1, beta=1/kT, save_config=True, mpi_group = mpi_group, kappa=20000, committor=committor)
mb_sim_bc = MullerBrown(param="param_bc",config=initial_config, rank=rank, dump=1, beta=1/kT, save_config=True, mpi_group = mpi_group, kappa=0, committor=committor)
#Initial Training Loss
initloss = nn.MSELoss()
initoptimizer = ParallelSGD(committor.parameters(), lr=1e-3)#,momentum=0.95, nesterov=True)
#from torchsummary import summary
running_loss = 0.0
#Initial training try to fit the committor to the initial condition
for i in range(10**5):
# zero the parameter gradients
initoptimizer.zero_grad()
# forward + backward + optimize
q_vals = committor(initial_config.view(-1,2))
targets = torch.ones_like(q_vals)*rank/(dist.get_world_size()-1)
cost = initloss(q_vals, targets)#,committor,config,cx)
cost.backward()
#Stepping up
initoptimizer.step()
if i%1000 == 0 and rank == 0:
print("Init step "+str(i),cost)
if rank == 0:
#Only save parameters from rank 0
torch.save(committor.state_dict(), "{}_params_{}".format(prefix,rank+1))
committor.zero_grad()
n_boundary_samples = 100
batch_size = 128
period = 10
datarunner = EXPReweightSimulation(mb_sim, committor, period=period, batch_size=batch_size, dimN=2)
#Initialize main loss function and optimizers
loss = BKELossEXP( bc_sampler = mb_sim_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 100,
bc_period = 10,
batch_size_bc = 0.5)
#This is the new committor loss! Feel free to comment this out if you don't need it
#cmloss = FTSCommittorLoss( fts_sampler = mb_sim,
# committor = committor,
# fts_layer=ftslayer,
# dimN = 2,
# lambda_fts=1e-1,
# fts_start=200,
# fts_end=2000,
# fts_max_steps=batch_size*period*4, #To estimate the committor, we'll run foru times as fast
# fts_rate=4, #In turn, we will only sample more committor value estimate after 4 iterations
# fts_min_count=2000, #Minimum count so that simulation doesn't (potentially) run too long
# batch_size_fts=0.5,
# tol = 1e-6,
# mode = 'shift'
# )
#optimizer = ParallelAdam(committor.parameters(), lr=1e-2)
optimizer = ParallelSGD(committor.parameters(), lr=1e-3)#,momentum=0.95,nesterov=True)
loss_io = []
if rank == 0:
loss_io = open("{}_loss.txt".format(prefix),'w')
#Training loop
#We can train in terms of epochs, but we will keep it in one epoch
for epoch in range(1):
if rank == 0:
print("epoch: [{}]".format(epoch+1))
actual_counter = 0
while actual_counter <= 20000:
# get data and reweighting factors
configs, grad_xs, invc, fwd_wl, bwrd_wl = datarunner.runSimulation()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
cost = loss(grad_xs, invc, fwd_wl, bwrd_wl)
totalcost = cost#+cmcost
totalcost.backward()
optimizer.step()
# print statistics
with torch.no_grad():
#if counter % 10 == 0:
main_loss = loss.main_loss
bc_loss = loss.bc_loss
#Print statistics
if rank == 0:
print('[{}] main_loss: {:.5E} bc_loss: {:.5E} lr: {:.3E}'.format(actual_counter + 1, main_loss.item(), bc_loss.item(), optimizer.param_groups[0]['lr']),flush=True)
print(loss.zl)
loss_io.write('{:d} {:.5E} {:.5E} \n'.format(actual_counter+1,main_loss.item(),bc_loss.item()))
loss_io.flush()
if actual_counter % 100 == 0:
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,actual_counter,rank))
torch.save(committor.state_dict(), "{}_params_{}".format(prefix,rank+1))
actual_counter += 1
| 5,494 | 36.128378 | 180 | py |
tps-torch | tps-torch-main/muller-brown-ml/test_sampler.py | import mullerbrown_ml as mb
import torch
import torch.distributed as dist
dist.init_process_group(backend='mpi')
mpi_group = dist.distributed_c10d._get_default_group()
a = mb.MySampler('param',torch.tensor([[0.0,0.0]]),0,0,0.0,0.0,dist.distributed_c10d._get_default_group())
| 277 | 29.888889 | 106 | py |
tps-torch | tps-torch-main/muller-brown-ml/plot_tricky_string.py | import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.distributed as dist
#Import necessarry tools from tpstorch
from mb_fts import CommittorNet
from tpstorch.ml.nn import FTSLayer
import numpy as np
#Import any other thing
import tqdm, sys
#Initialize neural net
committor = CommittorNet(d=2,num_nodes=200).to('cpu')
committor.load_state_dict(torch.load("simple_params_1"))
start = torch.tensor([-0.5,1.5])
end = torch.tensor([0.6,0.08])
ftslayer = FTSLayer(react_config=start,prod_config=end,num_nodes=48).to('cpu')
ftslayer.load_state_dict(torch.load("simple_string_1"))
print(ftslayer.string)
ftslayer_np = ftslayer.string.cpu().detach().numpy()
print(ftslayer_np)
A = np.array([-20,-10,-17,1.5])
a = np.array([-1,-1,-6.5,0.7])
b = np.array([0,0,11,0.6])
c = np.array([-10,-10,-6.5,0.7])
x_ = np.array([1,0,-0.5,-1])
y_ = np.array([0,0.5,1.5,1])
def energy(x,y,A,a,b,c,x_,y_):
energy_ = np.zeros((x.shape))
for i in range(len(A)):
energy_ += A[i]*np.exp(a[i]*(x-x_[i])**2+b[i]*(x-x_[i])*(y-y_[i])+c[i]*(y-y_[i])**2)
return energy_
def q(xval,yval):
qvals = np.zeros_like(xval)
for i in range(nx):
for j in range(ny):
Array = np.array([xval[i,j],yval[i,j]]).astype(np.float32)
Array = torch.from_numpy(Array)
qvals[i,j] = committor(Array).item()
return qvals
nx, ny = 100,100
X = np.linspace(-2.0, 1.5, nx)
Y = np.linspace(-1.0, 2.5, ny)
print(X.shape)
xv, yv = np.meshgrid(X, Y)
z = energy(xv,yv,A,a,b,c,x_,y_)
h = plt.contourf(X,Y,z,levels=[-15+i for i in range(16)])
print(np.shape(z),np.shape(xv))
qvals = q(xv,yv)
CS = plt.contour(X, Y, qvals,levels=[0.01,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.99])
plt.colorbar()
plt.clabel(CS, fontsize=10, inline=1)#
plt.plot(ftslayer_np[:,0], ftslayer_np[:,1], 'bo-')
plt.show()
| 1,831 | 29.032787 | 92 | py |
tps-torch | tps-torch-main/muller-brown-ml/run_cl.py | #import sys
#sys.path.insert(0,"/global/home/users/muhammad_hasyim/tps-torch/build")
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from mb_ml import MullerBrown, CommittorNet
from tpstorch.ml.data import EXPReweightSimulation
from tpstorch.ml.optim import ParallelSGD, ParallelAdam
from tpstorch.ml.nn import BKELossEXP, CommittorLoss
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
torch.manual_seed(3036)
np.random.seed(3036)
prefix = 'simple'
#Initialize neural net
def initializer(s):
return (1-s)*start+s*end
start = torch.tensor([-0.5,1.5])
end = torch.tensor([0.6,0.08])
#Initialize neural net
committor = CommittorNet(d=2,num_nodes=200).to('cpu')
initial_config = initializer(rank/(dist.get_world_size()-1))
start = torch.tensor([[-0.5,1.5]])
end = torch.tensor([[0.6,0.08]])
kT = 10.0
mb_sim = MullerBrown(param="param",config=initial_config, rank=rank, dump=1, beta=1/kT, save_config=True, mpi_group = mpi_group, kappa=20000, committor=committor)
mb_sim_bc = MullerBrown(param="param_bc",config=initial_config, rank=rank, dump=1, beta=1/kT, save_config=True, mpi_group = mpi_group, kappa=0, committor=committor)
mb_sim_com = MullerBrown(param="param_tst",config=initial_config, rank=rank, dump=1, beta=1/kT, save_config=True, mpi_group = mpi_group, kappa=0, committor=committor)
#Initial Training Loss
initloss = nn.MSELoss()
initoptimizer = ParallelSGD(committor.parameters(), lr=1e-3)#,momentum=0.95, nesterov=True)
#from torchsummary import summary
running_loss = 0.0
#Initial training try to fit the committor to the initial condition
for i in range(10**5):
# zero the parameter gradients
initoptimizer.zero_grad()
# forward + backward + optimize
q_vals = committor(initial_config.view(-1,2))
targets = torch.ones_like(q_vals)*rank/(dist.get_world_size()-1)
cost = initloss(q_vals, targets)#,committor,config,cx)
cost.backward()
#Stepping up
initoptimizer.step()
if i%1000 == 0 and rank == 0:
print("Init step "+str(i),cost)
if rank == 0:
#Only save parameters from rank 0
torch.save(committor.state_dict(), "{}_params_{}".format(prefix,rank+1))
committor.zero_grad()
n_boundary_samples = 100
batch_size = 128
period = 10
datarunner = EXPReweightSimulation(mb_sim, committor, period=period, batch_size=batch_size, dimN=2)
#Initialize main loss function and optimizers
bkeloss = BKELossEXP( bc_sampler = mb_sim_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 100,
bc_period = 10,
batch_size_bc = 0.5)
cmloss = CommittorLoss( cl_sampler = mb_sim_com,
committor = committor,
lambda_cl=100.0,
cl_start=2000,
cl_end=4000,
cl_rate=10,
cl_trials=50,
batch_size_cl=0.5
)
#This is the new committor loss! Feel free to comment this out if you don't need it
#cmloss = FTSCommittorLoss( fts_sampler = mb_sim,
# committor = committor,
# fts_layer=ftslayer,
# dimN = 2,
# lambda_fts=1e-1,
# fts_start=200,
# fts_end=2000,
# fts_max_steps=batch_size*period*4, #To estimate the committor, we'll run foru times as fast
# fts_rate=4, #In turn, we will only sample more committor value estimate after 4 iterations
# fts_min_count=2000, #Minimum count so that simulation doesn't (potentially) run too long
# batch_size_fts=0.5,
# tol = 1e-6,
# mode = 'shift'
# )
#optimizer = ParallelAdam(committor.parameters(), lr=1e-2)
optimizer = ParallelSGD(committor.parameters(), lr=1e-3)#,momentum=0.95,nesterov=True)
loss_io = []
if rank == 0:
loss_io = open("{}_loss.txt".format(prefix),'w')
#Training loop
#We can train in terms of epochs, but we will keep it in one epoch
for epoch in range(1):
if rank == 0:
print("epoch: [{}]".format(epoch+1))
actual_counter = 0
while actual_counter <= 20000:
# get data and reweighting factors
configs, grad_xs, invc, fwd_wl, bwrd_wl = datarunner.runSimulation()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
bkecost = bkeloss(grad_xs, invc, fwd_wl, bwrd_wl)
cmcost = cmloss(actual_counter, mb_sim.getConfig())
cost = bkecost+cmcost
cost.backward()
optimizer.step()
# print statistics
with torch.no_grad():
#if counter % 10 == 0:
main_loss = bkeloss.main_loss
bc_loss = bkeloss.bc_loss
cl_loss = cmloss.cl_loss
#Print statistics
if rank == 0:
print('[{}] loss: {:.5E} bc: {:.5E} cm: {:.5E} lr: {:.3E}'.format(actual_counter + 1, main_loss.item(), bc_loss.item(), cl_loss.item(), optimizer.param_groups[0]['lr']))
#Also print the reweighting factors
print(bkeloss.zl)
loss_io.write('{:d} {:.5E} {:.5E} {:.5E} \n'.format(actual_counter+1,main_loss.item(),bc_loss.item(),cl_loss.item()))
loss_io.flush()
if actual_counter % 25 == 0:
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,actual_counter,rank))
torch.save(committor.state_dict(), "{}_params_{}".format(prefix,rank+1))
actual_counter += 1
| 6,208 | 37.565217 | 185 | py |
tps-torch | tps-torch-main/muller-brown-ml/test_prod.py | #Import necessarry tools from torch
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.ml.data import EXPReweightSimulation, EXPReweightSimulationManual, TSTValidation
from tpstorch.ml.optim import UnweightedSGD, EXPReweightSGD
from torch.distributed import distributed_c10d
from mullerbrown import CommittorNet, MullerBrown, MullerBrownLoss
import numpy as np
dist.init_process_group(backend='mpi')
mpi_group = dist.distributed_c10d._get_default_group()
#Import any other thing
import tqdm, sys
torch.manual_seed(0)
np.random.seed(0)
prefix = 'simple'
#Initialize neural net
#Thinking about having Grant's initialization procedure...
committor = CommittorNet(d=2,num_nodes=200).to('cpu')
#Set initial configuration and BP simulator
start = torch.tensor([-1.2,0.9])
end = torch.tensor([-0.5,0.5])
# start = torch.tensor([[-0.5,1.5]])
# end = torch.tensor([[0.5,0.0]])
def initializer(s):
return (1-s)*start+s*end
initial_config = initializer(dist.get_rank()/(dist.get_world_size()-1))
start = torch.tensor([[-0.5,1.5]])
end = torch.tensor([[0.5,0.0]])
mb_sim = MullerBrown(param="param",config=initial_config, rank=dist.get_rank(), dump=1, beta=0.1, kappa=10000, save_config=True, mpi_group = mpi_group, committor=committor)
mb_sim_committor = MullerBrown(param="param_tst",config=initial_config, rank=dist.get_rank(), dump=1, beta=0.1, kappa=10000, save_config=True, mpi_group = mpi_group, committor=committor)
#Generate unbiased configurations in reactant, product regions
n_boundary_samples = 100
react_data = torch.zeros(n_boundary_samples, start.shape[0]*start.shape[1], dtype=torch.float)
prod_data = torch.zeros(n_boundary_samples, end.shape[0]*end.shape[1], dtype=torch.float)
#Reactant
mb_sim_react = MullerBrown(param="param",config=start, rank=dist.get_rank(), dump=1, beta=0.1, kappa=0.0, save_config=True, mpi_group = mpi_group, committor=committor)
for i in range(n_boundary_samples):
for j in range(200):
mb_sim_react.step_unbiased()
react_data[i] = mb_sim_react.getConfig()
#Product
mb_sim_prod = MullerBrown(param="param",config=end, rank=dist.get_rank(), dump=1, beta=0.1, kappa=0.0, save_config=True, mpi_group = mpi_group, committor=committor)
for i in range(n_boundary_samples):
for j in range(200):
mb_sim_prod.step_unbiased()
prod_data[i] = mb_sim_prod.getConfig()
#Committor Loss
initloss = nn.MSELoss()
initoptimizer = UnweightedSGD(committor.parameters(), lr=5e-2)#,momentum=0.9,nesterov=True)#, weight_decay=1e-3)
#from torchsummary import summary
running_loss = 0.0
#Initial training try to fit the committor to the initial condition
for i in range(10**5):
if i%1000 == 0 and dist.get_rank() == 0:
print("Init step "+str(i))
# zero the parameter gradients
initoptimizer.zero_grad()
# forward + backward + optimize
q_vals = committor(initial_config)
targets = torch.ones_like(q_vals)*dist.get_rank()/(dist.get_world_size()-1)
cost = initloss(q_vals, targets)#,committor,config,cx)
cost.backward()
#Stepping up
initoptimizer.step()
#committor.renormalize()
#committor.project()
committor.zero_grad()
from torch.optim import lr_scheduler
#Construct EXPReweightSimulation
batch_size = 128
#dataset = EXPReweightSimulation(mb_sim, committor, period=10)
#loader = DataLoader(dataset,batch_size=batch_size)
datarunner = EXPReweightSimulationManual(mb_sim, committor, period=10, batch_size=batch_size, dimN=2)
#Optimizer, doing EXP Reweighting. We can do SGD (integral control), or Heavy-Ball (PID control)
loss = MullerBrownLoss(lagrange_bc = 25.0,batch_size=batch_size,start=start,end=end,radii=0.5,world_size=dist.get_world_size(),n_boundary_samples=n_boundary_samples,react_configs=react_data,prod_configs=prod_data, committor_start=200, committor_end=10000, committor_rate=40, final_count=20000, k_committor=100, sim_committor=mb_sim_committor, committor_trials=50, batch_size_bc=0.5, batch_size_cm=0.5)
if dist.get_rank() == 0:
loss.compute_bc(committor, 0, 0)
#optimizer = EXPReweightSGD(committor.parameters(), lr=0.001, momentum=0.90, nesterov=True)
optimizer = UnweightedSGD(committor.parameters(), lr=0.001, momentum=0.9, nesterov=True)#, weight_decay=1e-3)
#lr_lambda = lambda epoch : 0.9**epoch
#scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch=-1, verbose=False)
loss_io = []
if dist.get_rank() == 0:
loss_io = open("{}_loss.txt".format(prefix),'w')
#Training loop
#1 epoch: 200 iterations, 200 time-windows
bc_end = 250000
bc_step_start = 10
bc_step_end = 10000
bc_stepsize = (bc_end-loss.lagrange_bc)/(bc_step_end-bc_step_start)
cm_end = 250000
cm_step_start = 300
cm_step_end = 10000
cm_stepsize = (cm_end-loss.k_committor)/(cm_step_end-cm_step_start)
for epoch in range(1):
if dist.get_rank() == 0:
print("epoch: [{}]".format(epoch+1))
actual_counter = 0
while actual_counter <= 20000:
if (actual_counter > bc_step_start) and (actual_counter <= bc_step_end):
loss.lagrange_bc += bc_stepsize
if dist.get_rank() == 0:
print("lagrange_bc is now "+str(loss.lagrange_bc))
if (actual_counter > cm_step_start) and (actual_counter <= cm_step_end):
loss.k_committor += cm_stepsize
if dist.get_rank() == 0:
print("k_committor is now "+str(loss.k_committor))
if actual_counter == cm_step_end:
loss.batch_size_bc = 1.0
loss.batch_size_cm = 1.0
# get data and reweighting factors
config, grad_xs, invc, fwd_wl, bwrd_wl = datarunner.runSimulation()
# print("CONFIGS")
# print(grad_xs.size())
# print(config.size())
# print(invc.size())
# print(fwd_wl.size())
# print(bwrd_wl.size())
# print("END CONFIGS")
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
cost = loss(grad_xs,committor,config,invc,fwd_wl,bwrd_wl,invc, actual_counter,mb_sim.getConfig())
cost.backward()
# print(cost.size())
#print("FACTORS")
#print(fwd_wl)
#print(bwrd_wl)
#print(invc)
#meaninvc, reweight = optimizer.step(fwd_weightfactors=fwd_wl, bwrd_weightfactors=bwrd_wl, reciprocal_normconstants=invc)
optimizer.step()
#print(meaninvc)
#print(reweight)
#committor.project()
# print statistics
with torch.no_grad():
#if counter % 10 == 0:
main_loss = loss.main_loss
bc_loss = loss.bc_loss
cm_loss = loss.cm_loss
#What we need to do now is to compute with its respective weight
#main_loss.mul_(reweight[dist.get_rank()])
#bc_loss.mul_(reweight[dist.get_rank()])
#All reduce the gradients
#dist.all_reduce(main_loss)
#dist.all_reduce(bc_loss)
#dist.all_reduce(cm_loss)
#Divide in-place by the mean inverse normalizing constant
#main_loss.div_(meaninvc)
#bc_loss.div_(meaninvc)
# print("LOSS")
# print(main_loss.size())
# print(bc_loss.size())
# print("END LOSS")
#Print statistics
if dist.get_rank() == 0:
print(cm_loss)
print('[{}] loss: {:.5E} penalty: {:.5E} {:.5E} lr: {:.3E}'.format(actual_counter + 1, main_loss.item(), bc_loss.item(), cm_loss.item(), optimizer.param_groups[0]['lr']))
#Also print the reweighting factors
print(loss.reweight)
loss_io.write('{:d} {:.5E} \n'.format(actual_counter+1,main_loss))
loss_io.flush()
#Only save parameters from rank 0
torch.save(committor.state_dict(), "{}_params_{}".format(prefix,dist.get_rank()+1))
if actual_counter%20 == 0:
torch.save(committor.state_dict(), "{}_params_t_{}".format(prefix,actual_counter))
actual_counter += 1
##Perform Validation Test
if dist.get_rank() == 0:
print("Finished Training! Now performing validation through committor analysis")
#Construct TSTValidation
print("Generating transition state")
for i in range(40000):
config_cur = mb_sim.getConfig()
mb_sim.step(committor_val=committor(config_cur), onlytst=True)
init_config = mb_sim.getConfig()
print("q value is "+str(committor(init_config)))
mb_sim = MullerBrown(param="param_tst",config=init_config, rank=dist.get_rank(), dump=1, beta=0.1, kappa=10000, save_config=True, mpi_group = mpi_group, committor=committor)
#mb_sim.setConfig(init_config)
#mb_sim = MullerBrown(param="param",config=init_config, rank=dist.get_rank(), dump=1, beta=0.20, kappa=80, save_config=True, mpi_group = mpi_group, committor=committor)
batch_size = 10 #batch of initial configuration to do the committor analysis per rank
dataset = TSTValidation(mb_sim, committor, period=20)
loader = DataLoader(dataset,batch_size=batch_size)
#Save validation scores and
myval_io = open("{}_validation_{}.txt".format(prefix,dist.get_rank()+1),'w')
def myprod_checker(config):
end = torch.tensor([[0.5,0.0]])
radii = 0.1
end_ = config-end
end_ = end_.pow(2).sum()**0.5
if end_ <= radii:
return True
else:
return False
def myreact_checker(config):
start = torch.tensor([[-0.5,1.5]])
radii = 0.1
start_ = config-start
start_ = start_.pow(2).sum()**0.5
if start_ <= radii:
return True
else:
return False
#Run validation loop
actual_counter = 0
for epoch, batch in enumerate(loader):
if epoch > 1:
break
if dist.get_rank() == 0:
print("epoch: [{}]".format(epoch+1))
#Call the validation function
configs, committor_values = batch
dataset.validate(batch, trials=100, validation_io=myval_io, product_checker=myprod_checker, reactant_checker=myreact_checker)
| 10,128 | 39.678715 | 401 | py |
tps-torch | tps-torch-main/muller-brown-ml/plot.py | import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.distributed as dist
dist.init_process_group('mpi')
#Import necessarry tools from tpstorch
from mullerbrown import CommittorNet
import numpy as np
#Import any other thing
import tqdm, sys
#Initialize neural net
committor = CommittorNet(d=2,num_nodes=200).to('cpu')
committor.load_state_dict(torch.load("simple_params_1"))
A= -10
a = -2
b = 0
c = -2
x = [0,1]
y = [0,1]
def V(xval,yval):
val = 0
for i in range(2):
val += A*np.exp(a*(xval-x[i])**2+c*(yval-y[i])**2)
return val
def q(xval,yval):
qvals = np.zeros_like(xval)
for i in range(nx):
for j in range(ny):
Array = np.array([xval[i,j],yval[i,j]]).astype(np.float32)
Array = torch.from_numpy(Array)
qvals[i,j] = committor(Array).item()
return qvals
nx, ny = 100,100
X = np.linspace(-1.0, 2.0, nx)
Y = np.linspace(-1.0, 2.0, ny)
print(X.shape)
xv, yv = np.meshgrid(X, Y)
z = V(xv,yv)
h = plt.contourf(X,Y,z)
print(np.shape(z),np.shape(xv))
qvals = q(xv,yv)
CS = plt.contour(X, Y, qvals,levels=[0.01,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.99])
plt.colorbar()
plt.clabel(CS, fontsize=10, inline=1)#
plt.show()
| 1,220 | 22.037736 | 84 | py |
tps-torch | tps-torch-main/muller-brown-ml/mb_ml.py | #Import necessarry tools from torch
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.ml import MLSamplerEXP
from tpstorch.examples.mullerbrown_ml import MyMLSampler
import numpy as np
#Import any other thing
import tqdm, sys
#dist.init_process_group(backend='mpi')
class CommittorNet(nn.Module):
def __init__(self, d, num_nodes, unit=torch.relu):
super(CommittorNet, self).__init__()
self.num_nodes = num_nodes
self.d = d
self.unit = unit
self.lin1 = nn.Linear(d, num_nodes, bias=True)
self.lin3 = nn.Linear(num_nodes, num_nodes, bias=True)
self.lin2 = nn.Linear(num_nodes, 1, bias=False)
self.broadcast()
def forward(self, x):
x = self.lin1(x)
x = self.unit(x)
x = self.lin3(x)
x = self.unit(x)
x = self.lin2(x)
return torch.sigmoid(x)
def broadcast(self):
for name, param in self.named_parameters():
if param.requires_grad:
dist.broadcast(param.data,src=0)
class MullerBrown(MyMLSampler):
def __init__(self,param,config,rank,dump,beta,kappa,mpi_group,committor,save_config=False):
super(MullerBrown, self).__init__(param,config.detach().clone(),rank,dump,beta,kappa,mpi_group)
#super(MullerBrown, self).__init__(config.detach().clone())
#super(MullerBrown, self).__init__(param,config.detach().clone(),rank,dump,beta,kappa,mpi_group)
self.committor = committor
self.save_config = save_config
self.timestep = 0
#Save config size and its flattened version
self.config_size = config.size()
self.flattened_size = config.flatten().size()
self.torch_config = config
#Configs file Save Alternative, since the default XYZ format is an overkill
self.file = open("configs_{}.txt".format(dist.get_rank()), "w")
@torch.no_grad()
def initialize_from_torchconfig(self, config):
# Don't have to worry about all that much all, can just set it
self.setConfig(config)
def step(self, committor_val, onlytst=False):
with torch.no_grad():
config_test = torch.zeros_like(self.torch_config)
self.propose(config_test, committor_val, onlytst)
committor_val_ = self.committor(config_test)
self.acceptReject(config_test, committor_val_, onlytst, True)
self.torch_config = (self.getConfig().flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
def step_unbiased(self):
with torch.no_grad():
config_test = torch.zeros_like(self.torch_config)
self.propose(config_test, 0.0, False)
self.acceptReject(config_test, 0.0, False, False)
self.torch_config = (self.getConfig().flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
@torch.no_grad()
def isProduct(self,config):
end = torch.tensor([[0.6,0.08]])
radii = 0.025
end_ = config-end
end_ = end_.pow(2).sum()**0.5
if end_ <= radii:
return True
else:
return False
@torch.no_grad()
def isReactant(self,config):
start = torch.tensor([[-0.5,1.5]])
radii = 0.025
start_ = config-start
start_ = start_.pow(2).sum()**0.5
if start_ <= radii:
return True
else:
return False
def step_bc(self):
with torch.no_grad():
config_test = torch.zeros_like(self.torch_config)
self.propose(config_test, 0.0, False)
if self.isReactant(config_test.flatten()) or self.isProduct(config_test.flatten()):
self.acceptReject(config_test, 0.0, False, False)
else:
pass
self.torch_config = (self.getConfig().flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
def save(self):
self.timestep += 1
if self.save_config:
if self.timestep % 100 == 0:
#self.dumpConfig(0)
self.file.write("{} {} \n".format(self.torch_config[0].item(), self.torch_config[1].item()))
self.file.flush()
| 4,567 | 33.345865 | 108 | py |
tps-torch | tps-torch-main/muller-brown-ml/mullerbrown_tricky.py | #Import necessarry tools from torch
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.ml.optim import project_simplex
from tpstorch.ml import MLSamplerEXP
from tpstorch.ml.nn import CommittorLossEXP
from mullerbrown_ml import MySampler
import numpy as np
#Import any other thing
import tqdm, sys
class CommittorNet(nn.Module):
def __init__(self, d, num_nodes, unit=torch.sigmoid):
super(CommittorNet, self).__init__()
self.num_nodes = num_nodes
self.d = d
self.unit = unit
self.lin1 = nn.Linear(d, num_nodes, bias=True)
self.lin2 = nn.Linear(num_nodes, 1, bias=False)
self.project()
self.broadcast()
def forward(self, x):
#At the moment, x is flat. So if you want it to be 2x1 or 3x4 arrays, then you do it here!
x = self.lin1(x)
x = self.unit(x)
x = self.lin2(x)
return x
def broadcast(self):
for name, param in self.named_parameters():
if param.requires_grad:
dist.broadcast(param.data,src=0)
def project(self):
#Project the coefficients so that they are make the output go from zero to one
with torch.no_grad():
self.lin2.weight.data = project_simplex(self.lin2.weight.data)
# This is a sketch of what it should be like, but basically have to also make committor play nicely
# within function
# have omnious committor part that actual committor overrides?
class MullerBrown(MySampler):
def __init__(self,param,config,rank,dump,beta,kappa,mpi_group,committor,save_config=False):
super(MullerBrown, self).__init__(param,config.detach().clone(),rank,dump,beta,kappa,mpi_group)
self.committor = committor
self.save_config = save_config
self.timestep = 0
#Save config size and its flattened version
self.config_size = config.size()
self.flattened_size = config.flatten().size()
self.torch_config = config
def initialize_from_torchconfig(self, config):
# Don't have to worry about all that much all, can just set it
self.setConfig(config)
def step(self, committor_val, onlytst=False):
with torch.no_grad():
config_test = torch.zeros_like(self.torch_config)
self.propose(config_test, committor_val, onlytst)
committor_val_ = self.committor(config_test)
self.acceptReject(config_test, committor_val_, onlytst, True)
self.torch_config = (self.getConfig().flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
def step_unbiased(self):
with torch.no_grad():
config_test = torch.zeros_like(self.torch_config)
self.propose(config_test, 0.0, False)
self.acceptReject(config_test, 0.0, False, False)
self.torch_config = (self.getConfig().flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
def save(self):
self.timestep += 1
if self.save_config:
if self.timestep % 100 == 0:
self.dumpConfig(0)
class MullerBrownLoss(CommittorLossEXP):
def __init__(self, lagrange_bc, batch_size,start,end,radii):
super(MullerBrownLoss,self).__init__()
self.lagrange_bc = lagrange_bc
self.start = start
self.end = end
self.radii = radii
def compute_bc(self, committor, configs, invnormconstants):
#Assume that first dimension is batch dimension
loss_bc = torch.zeros(1)
for i, config in enumerate(configs):
start_ = config-self.start
start_ = start_.pow(2).sum()**0.5
end_ = config-self.end
end_ = end_.pow(2).sum()**0.5
#check_1 = config[1]-config[0]
#check_2 = config[1]-0.5*config[0]
if ((start_ <= self.radii) or (config[1]>0.5*config[0]+1.5)):
loss_bc += 0.5*self.lagrange_bc*(committor(config.flatten())**2)*invnormconstants[i]
elif ((end_ <= self.radii) or (config[1]<config[0]+0.8)):
loss_bc += 0.5*self.lagrange_bc*(committor(config.flatten())-1.0)**2*invnormconstants[i]
return loss_bc/(i+1)
| 4,466 | 36.225 | 104 | py |
tps-torch | tps-torch-main/muller-brown-ml/run_fts.py | #import sys
#sys.path.insert(0,"/global/home/users/muhammad_hasyim/tps-torch/build")
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from mb_fts import MullerBrown, CommittorNet
from tpstorch.ml.data import FTSSimulation
from tpstorch.ml.optim import ParallelSGD, ParallelAdam, FTSUpdate
from tpstorch.ml.nn import BKELossFTS, FTSCommittorLoss, FTSLayer
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
torch.manual_seed(0)
np.random.seed(0)
prefix = 'simple'
#Initialize neural net
def initializer(s):
return (1-s)*start+s*end
start = torch.tensor([-0.5,1.5])
end = torch.tensor([0.6,0.08])
#Initialize neural net
committor = CommittorNet(d=2,num_nodes=100).to('cpu')
#Initialize the string for FTS method
ftslayer = FTSLayer(react_config=start,prod_config=end,num_nodes=world_size).to('cpu')
initial_config = initializer(rank/(dist.get_world_size()-1))
start = torch.tensor([[-0.5,1.5]])
end = torch.tensor([[0.6,0.08]])
kT = 10.0
mb_sim = MullerBrown(param="param",config=initial_config, rank=rank, dump=1, beta=1/kT, save_config=True, mpi_group = mpi_group, ftslayer=ftslayer)
mb_sim_bc = MullerBrown(param="param_bc",config=initial_config, rank=rank, dump=1, beta=1/kT, save_config=True, mpi_group = mpi_group, ftslayer=ftslayer)
#Initial Training Loss
initloss = nn.MSELoss()
initoptimizer = ParallelSGD(committor.parameters(), lr=1e-3,momentum=0.95, nesterov=True)
#from torchsummary import summary
running_loss = 0.0
#Initial training try to fit the committor to the initial condition
for i in range(3*10**3):
# zero the parameter gradients
initoptimizer.zero_grad()
# forward + backward + optimize
q_vals = committor(initial_config.view(-1,2))
targets = torch.ones_like(q_vals)*rank/(dist.get_world_size()-1)
cost = initloss(q_vals, targets)#,committor,config,cx)
cost.backward()
#Stepping up
initoptimizer.step()
if i%1000 == 0 and rank == 0:
print("Init step "+str(i),cost)
if rank == 0:
#Only save parameters from rank 0
torch.save(committor.state_dict(), "{}_params_{}".format(prefix,rank+1))
committor.zero_grad()
#Construct FTSSimulation
n_boundary_samples = 100
batch_size = 32
period = 40
datarunner = FTSSimulation(mb_sim, committor, period=period, batch_size=batch_size, dimN=2)
#Initialize main loss function and optimizers
loss = BKELossFTS( bc_sampler = mb_sim_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 100,
bc_period = 10,
batch_size_bc = 0.5,
tol = 1e-6,
mode = 'shift')
#This is the new committor loss! Feel free to comment this out if you don't need it
cmloss = FTSCommittorLoss( fts_sampler = mb_sim,
committor = committor,
fts_layer=ftslayer,
dimN = 2,
lambda_fts=1e-1,
fts_start=200,
fts_end=2000,
fts_max_steps=batch_size*period*4, #To estimate the committor, we'll run foru times as fast
fts_rate=4, #In turn, we will only sample more committor value estimate after 4 iterations
fts_min_count=2000, #Minimum count so that simulation doesn't (potentially) run too long
batch_size_fts=0.5,
tol = 1e-6,
mode = 'shift'
)
optimizer = ParallelAdam(committor.parameters(), lr=1e-2)
#optimizer = ParallelSGD(committor.parameters(), lr=1e-3,momentum=0.95,nesterov=True)
ftsoptimizer = FTSUpdate(ftslayer.parameters(), deltatau=1.0/batch_size,momentum=0.95,nesterov=True, kappa=0.1)
#FTS Needs a scheduler because we're doing stochastic gradient descent, i.e., we're not accumulating a running average
#But only computes mini-batch averages
from torch.optim.lr_scheduler import LambdaLR
lr_lambda = lambda epoch: 1/(epoch+1)
scheduler = LambdaLR(ftsoptimizer, lr_lambda)
loss_io = []
if rank == 0:
loss_io = open("{}_loss.txt".format(prefix),'w')
#Training loop
#We can train in terms of epochs, but we will keep it in one epoch
for epoch in range(1):
if rank == 0:
print("epoch: [{}]".format(epoch+1))
actual_counter = 0
while actual_counter <= 5000:
# get data and reweighting factors
configs, grad_xs = datarunner.runSimulation()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
cost = loss(grad_xs, mb_sim.rejection_count)
#We can skip the new committor loss calculation
cmcost = cmloss(actual_counter, ftslayer.string)
totalcost = cost+cmcost
totalcost.backward()
optimizer.step()
# (1) Update the string
ftsoptimizer.step(configs,batch_size)
# print statistics
with torch.no_grad():
#if counter % 10 == 0:
main_loss = loss.main_loss
bc_loss = loss.bc_loss
#Track the average number of sampling period
test = torch.tensor([float(datarunner.period)])
dist.all_reduce(test)
test /= float(world_size)
if rank == 0:
print(test)
#Print statistics
if rank == 0:
print('[{}] main_loss: {:.5E} bc_loss: {:.5E} fts_loss: {:.5E} lr: {:.3E} period: {:.3f}'.format(actual_counter + 1, main_loss.item(), bc_loss.item(), cmcost.item(), optimizer.param_groups[0]['lr'], test.item()),flush=True)
print(ftslayer.string,flush=True)
print(loss.zl)
loss_io.write('{:d} {:.5E} \n'.format(actual_counter+1,main_loss))
loss_io.flush()
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,actual_counter,rank))
torch.save(ftslayer.state_dict(), "{}_string_t_{}_{}".format(prefix,actual_counter,rank))
torch.save(committor.state_dict(), "{}_params_{}".format(prefix,rank+1))
torch.save(ftslayer.state_dict(), "{}_string_{}".format(prefix,rank+1))
scheduler.step()
actual_counter += 1
if rank == 0:
print('FTS step size: {}'.format(ftsoptimizer.param_groups[0]['lr']))
| 6,865 | 37.79096 | 239 | py |
tps-torch | tps-torch-main/muller-brown-ml/test_tricky.py | #Import necessarry tools from torch
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.ml.data import EXPReweightSimulation, EXPReweightSimulationManual, TSTValidation
from tpstorch.ml.optim import UnweightedSGD, EXPReweightSGD
from torch.distributed import distributed_c10d
from mullerbrown import CommittorNet, MullerBrown, MullerBrownLoss
import numpy as np
dist.init_process_group(backend='mpi')
mpi_group = dist.distributed_c10d._get_default_group()
#Import any other thing
import tqdm, sys
prefix = 'simple'
#Initialize neural net
#Thinking about having Grant's initialization procedure...
committor = CommittorNet(d=2,num_nodes=200).to('cpu')
#Set initial configuration and BP simulator
start = torch.tensor([-1.2,0.9])
end = torch.tensor([-0.5,0.5])
def initializer(s):
return (1-s)*start+s*end
initial_config = initializer(dist.get_rank()/(dist.get_world_size()-1))
start = torch.tensor([[-0.5,1.5]])
end = torch.tensor([[0.5,0.0]])
mb_sim = MullerBrown(param="param",config=initial_config, rank=dist.get_rank(), dump=1, beta=0.025, kappa=20000, save_config=True, mpi_group = mpi_group, committor=committor)
#Generate unbiased configurations in reactant, product regions
n_boundary_samples = 25
react_data = torch.zeros(n_boundary_samples, start.shape[0]*start.shape[1], dtype=torch.float)
prod_data = torch.zeros(n_boundary_samples, end.shape[0]*end.shape[1], dtype=torch.float)
#Reactant
mb_sim_react = MullerBrown(param="param_tst",config=start, rank=dist.get_rank(), dump=1, beta=0.1, kappa=0.0, save_config=True, mpi_group = mpi_group, committor=committor)
for i in range(n_boundary_samples):
for j in range(2000):
mb_sim_react.step_unbiased()
react_data[i] = mb_sim_react.getConfig()
#Product
mb_sim_prod = MullerBrown(param="param_tst",config=end, rank=dist.get_rank(), dump=1, beta=0.1, kappa=0.0, save_config=True, mpi_group = mpi_group, committor=committor)
for i in range(n_boundary_samples):
for j in range(2000):
mb_sim_prod.step_unbiased()
prod_data[i] = mb_sim_prod.getConfig()
#Committor Loss
initloss = nn.MSELoss()
initoptimizer = UnweightedSGD(committor.parameters(), lr=1e-2)#,momentum=0.9,nesterov=True)#, weight_decay=1e-3)
#from torchsummary import summary
running_loss = 0.0
#Initial training try to fit the committor to the initial condition
for i in tqdm.tqdm(range(10**5)):
# zero the parameter gradients
initoptimizer.zero_grad()
# forward + backward + optimize
q_vals = committor(initial_config)
targets = torch.ones_like(q_vals)*dist.get_rank()/(dist.get_world_size()-1)
cost = initloss(q_vals, targets)#,committor,config,cx)
cost.backward()
#Stepping up
initoptimizer.step()
#committor.renormalize()
#committor.project()
committor.zero_grad()
from torch.optim import lr_scheduler
#Construct EXPReweightSimulation
batch_size = 128
#dataset = EXPReweightSimulation(mb_sim, committor, period=10)
#loader = DataLoader(dataset,batch_size=batch_size)
datarunner = EXPReweightSimulationManual(mb_sim, committor, period=10, batch_size=batch_size, dimN=2)
#Optimizer, doing EXP Reweighting. We can do SGD (integral control), or Heavy-Ball (PID control)
loss = MullerBrownLoss(lagrange_bc = 25.0,batch_size=batch_size,start=start,end=end,radii=0.5,world_size=dist.get_world_size(),n_boundary_samples=n_boundary_samples,react_configs=react_data,prod_configs=prod_data)
if dist.get_rank() == 0:
loss.compute_bc(committor, 0, 0)
optimizer = EXPReweightSGD(committor.parameters(), lr=0.001, momentum=0.90, nesterov=True)
#lr_lambda = lambda epoch : 0.9**epoch
#scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch=-1, verbose=False)
loss_io = []
if dist.get_rank() == 0:
loss_io = open("{}_loss.txt".format(prefix),'w')
#Training loop
#1 epoch: 200 iterations, 200 time-windows
for epoch in range(1):
if dist.get_rank() == 0:
print("epoch: [{}]".format(epoch+1))
actual_counter = 0
while actual_counter <= 200:
# get data and reweighting factors
config, grad_xs, invc, fwd_wl, bwrd_wl = datarunner.runSimulation()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
cost = loss(grad_xs,committor,config,invc)
cost.backward()
meaninvc, reweight = optimizer.step(fwd_weightfactors=fwd_wl, bwrd_weightfactors=bwrd_wl, reciprocal_normconstants=invc)
#committor.project()
# print statistics
with torch.no_grad():
#if counter % 10 == 0:
main_loss = loss.main_loss
bc_loss = loss.bc_loss
#What we need to do now is to compute with its respective weight
main_loss.mul_(reweight[dist.get_rank()])
#bc_loss.mul_(reweight[dist.get_rank()])
#All reduce the gradients
dist.all_reduce(main_loss)
dist.all_reduce(bc_loss)
#Divide in-place by the mean inverse normalizing constant
main_loss.div_(meaninvc)
#bc_loss.div_(meaninvc)
#Print statistics
if dist.get_rank() == 0:
print('[{}] loss: {:.5E} penalty: {:.5E} lr: {:.3E}'.format(actual_counter + 1, main_loss.item(), bc_loss.item(), optimizer.param_groups[0]['lr']))
#Also print the reweighting factors
print(reweight)
loss_io.write('{:d} {:.5E} \n'.format(actual_counter+1,main_loss))
loss_io.flush()
#Only save parameters from rank 0
torch.save(committor.state_dict(), "{}_params_{}".format(prefix,dist.get_rank()+1))
actual_counter += 1
##Perform Validation Test
if dist.get_rank() == 0:
print("Finished Training! Now performing validation through committor analysis")
#Construct TSTValidation
print("Generating transition state")
for i in range(40000):
config_cur = mb_sim.getConfig()
mb_sim.step(committor_val=committor(config_cur), onlytst=True)
init_config = mb_sim.getConfig()
print("q value is "+str(committor(init_config)))
mb_sim = MullerBrown(param="param_tst",config=init_config, rank=dist.get_rank(), dump=1, beta=0.025, kappa=20000, save_config=True, mpi_group = mpi_group, committor=committor)
#mb_sim.setConfig(init_config)
#mb_sim = MullerBrown(param="param",config=init_config, rank=dist.get_rank(), dump=1, beta=0.20, kappa=80, save_config=True, mpi_group = mpi_group, committor=committor)
batch_size = 100 #batch of initial configuration to do the committor analysis per rank
dataset = TSTValidation(mb_sim, committor, period=20)
loader = DataLoader(dataset,batch_size=batch_size)
#Save validation scores and
myval_io = open("{}_validation_{}.txt".format(prefix,dist.get_rank()+1),'w')
def myprod_checker(config):
end = torch.tensor([[0.5,0.0]])
end_2 = torch.tensor([[0.0,0.5]])
radii = 0.3
end_ = config-end
end_ = end_.pow(2).sum()**0.5
end_2_ = config-end_2
end_2_ = end_2_.pow(2).sum()**0.5
if ((end_ <= radii) or (end_2_ <= radii) or (config[1]<(config[0]+0.8))):
return True
else:
return False
def myreact_checker(config):
start = torch.tensor([[-0.5,1.5]])
radii = 0.3
start_ = config-start
start_ = start_.pow(2).sum()**0.5
if ((start_ <= radii) or (config[1]>(0.5*config[0]+1.5))):
return True
else:
return False
#Run validation loop
actual_counter = 0
for epoch, batch in enumerate(loader):
if epoch > 1:
break
if dist.get_rank() == 0:
print("epoch: [{}]".format(epoch+1))
#Call the validation function
configs, committor_values = batch
dataset.validate(batch, trials=25, validation_io=myval_io, product_checker=myprod_checker, reactant_checker=myreact_checker)
| 7,966 | 38.835 | 213 | py |
tps-torch | tps-torch-main/muller-brown-ml/mullerbrown_relu.py | #Import necessarry tools from torch
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.ml.optim import project_simplex
from tpstorch.ml import MLSamplerEXP
from tpstorch.ml.nn import CommittorLossEXP
from mullerbrown_ml import MySampler
import numpy as np
#Import any other thing
import tqdm, sys
class CommittorNet(nn.Module):
def __init__(self, d, num_nodes, unit=torch.relu):
super(CommittorNet, self).__init__()
self.num_nodes = num_nodes
self.d = d
self.unit = unit
self.lin1 = nn.Linear(d, num_nodes, bias=True)
self.lin2 = nn.Linear(num_nodes, 1, bias=False)
self.thresh = torch.sigmoid
self.project()
self.broadcast()
def forward(self, x):
#At the moment, x is flat. So if you want it to be 2x1 or 3x4 arrays, then you do it here!
x = self.lin1(x)
x = self.unit(x)
x = self.lin2(x)
x = self.thresh(x)
return x
def broadcast(self):
for name, param in self.named_parameters():
if param.requires_grad:
dist.broadcast(param.data,src=0)
def project(self):
#Project the coefficients so that they are make the output go from zero to one
with torch.no_grad():
self.lin2.weight.data = project_simplex(self.lin2.weight.data)
# This is a sketch of what it should be like, but basically have to also make committor play nicely
# within function
# have omnious committor part that actual committor overrides?
class MullerBrown(MySampler):
def __init__(self,param,config,rank,dump,beta,kappa,mpi_group,committor,save_config=False):
super(MullerBrown, self).__init__(param,config.detach().clone(),rank,dump,beta,kappa,mpi_group)
self.committor = committor
self.save_config = save_config
self.timestep = 0
#Save config size and its flattened version
self.config_size = config.size()
self.flattened_size = config.flatten().size()
self.torch_config = config
def initialize_from_torchconfig(self, config):
# Don't have to worry about all that much all, can just set it
self.setConfig(config)
def step(self, committor_val, onlytst=False):
with torch.no_grad():
config_test = torch.zeros_like(self.torch_config)
self.propose(config_test, committor_val, onlytst)
committor_val_ = self.committor(config_test)
self.acceptReject(config_test, committor_val_, onlytst, True)
self.torch_config = (self.getConfig().flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
def step_unbiased(self):
with torch.no_grad():
config_test = torch.zeros_like(self.torch_config)
self.propose(config_test, 0.0, False)
self.acceptReject(config_test, 0.0, False, False)
self.torch_config = (self.getConfig().flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
def save(self):
self.timestep += 1
if self.save_config:
if self.timestep % 100 == 0:
self.dumpConfig(0)
class MullerBrownLoss(CommittorLossEXP):
def __init__(self, lagrange_bc, batch_size,start,end,radii,world_size,n_boundary_samples,react_configs,prod_configs):
super(MullerBrownLoss,self).__init__()
self.lagrange_bc = lagrange_bc
self.start = start
self.end = end
self.radii = radii
self.world_size = world_size
self.react_configs = react_configs
self.prod_configs = prod_configs
self.react_lambda = torch.zeros(1)
self.prod_lambda = torch.zeros(1)
def compute_bc(self, committor, configs, invnormconstants):
#Assume that first dimension is batch dimension
loss_bc = torch.zeros(1)
if dist.get_rank() == 0:
print(self.react_configs)
print(committor(self.react_configs))
print(torch.mean(committor(self.react_configs)))
print(self.prod_configs)
print(committor(self.prod_configs))
print(torch.mean(committor(self.prod_configs)))
self.react_lambda = self.react_lambda.detach()
self.prod_lambda = self.prod_lambda.detach()
react_penalty = torch.mean(committor(self.react_configs))
prod_penalty = torch.mean(1.0-committor(self.prod_configs))
#self.react_lambda -= self.lagrange_bc*react_penalty
#self.prod_lambda -= self.lagrange_bc*prod_penalty
loss_bc += 0.5*self.lagrange_bc*react_penalty**2-self.react_lambda*react_penalty
loss_bc += 0.5*self.lagrange_bc*prod_penalty**2-self.prod_lambda*prod_penalty
return loss_bc/self.world_size
| 5,015 | 37.290076 | 121 | py |
tps-torch | tps-torch-main/muller-brown-ml/test_relu.py | #Import necessarry tools from torch
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.ml.data import EXPReweightSimulation, TSTValidation
from tpstorch.ml.optim import UnweightedSGD, EXPReweightSGD
from torch.distributed import distributed_c10d
from mullerbrown import CommittorNet, MullerBrown, MullerBrownLoss
import numpy as np
dist.init_process_group(backend='mpi')
mpi_group = dist.distributed_c10d._get_default_group()
#Import any other thing
import tqdm, sys
prefix = 'simple'
#Initialize neural net
#Thinking about having Grant's initialization procedure...
committor = CommittorNet(d=2,num_nodes=200).to('cpu')
#Set initial configuration and BP simulator
start = torch.tensor([-1.2,0.9])
end = torch.tensor([-0.5,0.5])
def initializer(s):
return (1-s)*start+s*end
initial_config = initializer(dist.get_rank()/(dist.get_world_size()-1))
start = torch.tensor([[-0.5,1.5]])
end = torch.tensor([[0.5,0.0]])
mb_sim = MullerBrown(param="param",config=initial_config, rank=dist.get_rank(), dump=1, beta=0.025, kappa=20000, save_config=True, mpi_group = mpi_group, committor=committor)
#Generate unbiased configurations in reactant, product regions
n_boundary_samples = 25
react_data = torch.zeros(n_boundary_samples, start.shape[0]*start.shape[1], dtype=torch.float)
prod_data = torch.zeros(n_boundary_samples, end.shape[0]*end.shape[1], dtype=torch.float)
#Reactant
mb_sim_react = MullerBrown(param="param_tst",config=start, rank=dist.get_rank(), dump=1, beta=0.1, kappa=0.0, save_config=True, mpi_group = mpi_group, committor=committor)
for i in range(n_boundary_samples):
for j in range(2000):
mb_sim_react.step_unbiased()
react_data[i] = mb_sim_react.getConfig()
#Product
mb_sim_prod = MullerBrown(param="param_tst",config=end, rank=dist.get_rank(), dump=1, beta=0.1, kappa=0.0, save_config=True, mpi_group = mpi_group, committor=committor)
for i in range(n_boundary_samples):
for j in range(2000):
mb_sim_prod.step_unbiased()
prod_data[i] = mb_sim_prod.getConfig()
#Committor Loss
initloss = nn.MSELoss()
initoptimizer = UnweightedSGD(committor.parameters(), lr=1e-2)#,momentum=0.9,nesterov=True)#, weight_decay=1e-3)
#from torchsummary import summary
running_loss = 0.0
#Initial training try to fit the committor to the initial condition
for i in tqdm.tqdm(range(10**5)):
# zero the parameter gradients
initoptimizer.zero_grad()
# forward + backward + optimize
q_vals = committor(initial_config)
targets = torch.ones_like(q_vals)*dist.get_rank()/(dist.get_world_size()-1)
cost = initloss(q_vals, targets)#,committor,config,cx)
cost.backward()
#Stepping up
initoptimizer.step()
#committor.renormalize()
#committor.project()
committor.zero_grad()
from torch.optim import lr_scheduler
#Construct EXPReweightSimulation
batch_size = 128
dataset = EXPReweightSimulation(mb_sim, committor, period=10)
loader = DataLoader(dataset,batch_size=batch_size)
#Optimizer, doing EXP Reweighting. We can do SGD (integral control), or Heavy-Ball (PID control)
loss = MullerBrownLoss(lagrange_bc = 25.0,batch_size=batch_size,start=start,end=end,radii=0.5,world_size=dist.get_world_size(),n_boundary_samples=n_boundary_samples,react_configs=react_data,prod_configs=prod_data)
if dist.get_rank() == 0:
loss.compute_bc(committor, 0, 0)
optimizer = EXPReweightSGD(committor.parameters(), lr=0.001, momentum=0.90, nesterov=True)
#lr_lambda = lambda epoch : 0.9**epoch
#scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch=-1, verbose=False)
loss_io = []
if dist.get_rank() == 0:
loss_io = open("{}_loss.txt".format(prefix),'w')
#Training loop
#1 epoch: 200 iterations, 200 time-windows
for epoch in range(1):
if dist.get_rank() == 0:
print("epoch: [{}]".format(epoch+1))
actual_counter = 0
for counter, batch in enumerate(loader):
if counter > 200:
break
# get data and reweighting factors
config, grad_xs, invc, fwd_wl, bwrd_wl = batch
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
cost = loss(grad_xs,committor,config,invc)
cost.backward()
meaninvc, reweight = optimizer.step(fwd_weightfactors=fwd_wl, bwrd_weightfactors=bwrd_wl, reciprocal_normconstants=invc)
#committor.project()
# print statistics
with torch.no_grad():
#if counter % 10 == 0:
main_loss = loss.main_loss
bc_loss = loss.bc_loss
#What we need to do now is to compute with its respective weight
main_loss.mul_(reweight[dist.get_rank()])
#bc_loss.mul_(reweight[dist.get_rank()])
#All reduce the gradients
dist.all_reduce(main_loss)
dist.all_reduce(bc_loss)
#Divide in-place by the mean inverse normalizing constant
main_loss.div_(meaninvc)
#bc_loss.div_(meaninvc)
#Print statistics
if dist.get_rank() == 0:
print('[{}] loss: {:.5E} penalty: {:.5E} lr: {:.3E}'.format(counter + 1, main_loss.item(), bc_loss.item(), optimizer.param_groups[0]['lr']))
#Also print the reweighting factors
print(reweight)
loss_io.write('{:d} {:.5E} \n'.format(actual_counter+1,main_loss))
loss_io.flush()
#Only save parameters from rank 0
torch.save(committor.state_dict(), "{}_params_{}".format(prefix,dist.get_rank()+1))
actual_counter += 1
##Perform Validation Test
if dist.get_rank() == 0:
print("Finished Training! Now performing validation through committor analysis")
#Construct TSTValidation
print("Generating transition state")
for i in range(40000):
config_cur = mb_sim.getConfig()
mb_sim.step(committor_val=committor(config_cur), onlytst=True)
init_config = mb_sim.getConfig()
print("q value is "+str(committor(init_config)))
mb_sim = MullerBrown(param="param_tst",config=init_config, rank=dist.get_rank(), dump=1, beta=0.025, kappa=20000, save_config=True, mpi_group = mpi_group, committor=committor)
#mb_sim.setConfig(init_config)
#mb_sim = MullerBrown(param="param",config=init_config, rank=dist.get_rank(), dump=1, beta=0.20, kappa=80, save_config=True, mpi_group = mpi_group, committor=committor)
batch_size = 100 #batch of initial configuration to do the committor analysis per rank
dataset = TSTValidation(mb_sim, committor, period=20)
loader = DataLoader(dataset,batch_size=batch_size)
#Save validation scores and
myval_io = open("{}_validation_{}.txt".format(prefix,dist.get_rank()+1),'w')
def myprod_checker(config):
end = torch.tensor([[0.5,0.0]])
end_2 = torch.tensor([[0.0,0.5]])
radii = 0.3
end_ = config-end
end_ = end_.pow(2).sum()**0.5
end_2_ = config-end_2
end_2_ = end_2_.pow(2).sum()**0.5
if ((end_ <= radii) or (end_2_ <= radii) or (config[1]<(config[0]+0.8))):
return True
else:
return False
def myreact_checker(config):
start = torch.tensor([[-0.5,1.5]])
radii = 0.3
start_ = config-start
start_ = start_.pow(2).sum()**0.5
if ((start_ <= radii) or (config[1]>(0.5*config[0]+1.5))):
return True
else:
return False
#Run validation loop
actual_counter = 0
for epoch, batch in enumerate(loader):
if epoch > 1:
break
if dist.get_rank() == 0:
print("epoch: [{}]".format(epoch+1))
#Call the validation function
configs, committor_values = batch
dataset.validate(batch, trials=25, validation_io=myval_io, product_checker=myprod_checker, reactant_checker=myreact_checker)
| 7,861 | 38.114428 | 213 | py |
tps-torch | tps-torch-main/muller-brown-ml/mrh_tests/mb_fts.py | #Import necessarry tools from torch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.examples.mullerbrown_ml import MyMLFTSSampler
from tpstorch import _rank, _world_size
import numpy as np
#Import any other thing
import tqdm, sys
#A single hidden layer NN, where some nodes possess the string configurations
class CommittorNet(nn.Module):
def __init__(self, d, num_nodes, unit=torch.relu):
super(CommittorNet, self).__init__()
self.num_nodes = num_nodes
self.d = d
self.unit = unit
self.lin1 = nn.Linear(d, num_nodes, bias=True)
#self.lin3 = nn.Linear(num_nodes, num_nodes, bias=True)
self.lin2 = nn.Linear(num_nodes, 1, bias=False)
self.broadcast()
def forward(self, x):
x = self.lin1(x)
x = self.unit(x)
#x = self.lin3(x)
#x = self.unit(x)
x = self.lin2(x)
return torch.sigmoid(x)
def broadcast(self):
for name, param in self.named_parameters():
if param.requires_grad:
dist.broadcast(param.data,src=0)
# This is a sketch of what it should be like, but basically have to also make committor play nicely
# within function
# have omnious committor part that actual committor overrides?
class MullerBrown(MyMLFTSSampler):
def __init__(self,param,config,rank,dump,beta,mpi_group,ftslayer,save_config=False):
super(MullerBrown, self).__init__(param,config.detach().clone(),rank,dump,beta,mpi_group)
self.save_config = save_config
self.timestep = 0
#Save config size and its flattened version
self.config_size = config.size()
self.flattened_size = config.flatten().size()
self.torch_config = config
self.ftslayer = ftslayer
#Configs file Save Alternative, since the default XYZ format is an overkill
self.file = open("configs_{}.txt".format(dist.get_rank()), "w")
@torch.no_grad()
def initialize_from_torchconfig(self, config):
# Don't have to worry about all that much all, can just set it
self.setConfig(config)
@torch.no_grad()
def reset(self):
self.steps = 0
self.distance_sq_list = self.ftslayer(self.getConfig().flatten())
inftscell = self.checkFTSCell(_rank, _world_size)
if inftscell:
pass
else:
self.setConfig(self.ftslayer.string[_rank])
pass
def step(self):
with torch.no_grad():
config_test = torch.zeros_like(self.torch_config)
self.propose(config_test, 0.0, False)
self.distance_sq_list = self.ftslayer(config_test.flatten())
inftscell = self.checkFTSCell(_rank, _world_size)
if inftscell:
self.acceptReject(config_test)
else:
pass
self.torch_config = (self.getConfig().flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
def step_unbiased(self):
with torch.no_grad():
config_test = torch.zeros_like(self.torch_config)
self.propose(config_test, 0.0, False)
self.acceptReject(config_test)#, committor_val_.item(), False, True)
self.torch_config = (self.getConfig().flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
@torch.no_grad()
def isProduct(self,config):
end = torch.tensor([[0.6,0.08]])
radii = 0.025
end_ = config-end
end_ = end_.pow(2).sum()**0.5
if end_ <= radii:
return True
else:
return False
@torch.no_grad()
def isReactant(self,config):
start = torch.tensor([[-0.5,1.5]])
radii = 0.025
start_ = config-start
start_ = start_.pow(2).sum()**0.5
if start_ <= radii:
return True
else:
return False
def step_bc(self):
with torch.no_grad():
config_test = torch.zeros_like(self.torch_config)
self.propose(config_test, 0.0, False)
if self.isReactant(config_test.flatten()) or self.isProduct(config_test.flatten()):
self.acceptReject(config_test)
else:
pass
self.torch_config = (self.getConfig().flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
def save(self):
self.timestep += 1
if self.save_config:
if self.timestep % 10 == 0:
# self.dumpConfig(0)
self.file.write("{} {} \n".format(self.torch_config[0].item(), self.torch_config[1].item()))
self.file.flush()
| 4,995 | 32.986395 | 108 | py |
tps-torch | tps-torch-main/muller-brown-ml/mrh_tests/run_vanilla.py | #Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from mb_ml import MullerBrown, CommittorNet
from tpstorch.ml.data import EXPReweightSimulation
from tpstorch.ml.optim import ParallelSGD, ParallelAdam
from tpstorch.ml.nn import BKELossEXP
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
torch.manual_seed(3046)
np.random.seed(3046)
prefix = 'vanilla'
#Initialize neural net
def initializer(s):
return (1-s)*start+s*end
start = torch.tensor([-0.5,1.5])
end = torch.tensor([0.6,0.08])
#Initialize neural net
committor = CommittorNet(d=2,num_nodes=200).to('cpu')
initial_config = initializer(rank/(dist.get_world_size()-1))
start = torch.tensor([[-0.5,1.5]])
end = torch.tensor([[0.6,0.08]])
kT = 10.0
mb_sim = MullerBrown(param="param",config=initial_config, rank=rank, dump=1, beta=1/kT, save_config=True, mpi_group = mpi_group, kappa=20000, committor=committor)
mb_sim_bc = MullerBrown(param="param_bc",config=initial_config, rank=rank, dump=1, beta=1/kT, save_config=True, mpi_group = mpi_group, kappa=0, committor=committor)
#Initial Training Loss
initloss = nn.MSELoss()
initoptimizer = ParallelSGD(committor.parameters(), lr=1e-3)#,momentum=0.95, nesterov=True)
#from torchsummary import summary
running_loss = 0.0
#Initial training try to fit the committor to the initial condition
for i in range(10**3):
# zero the parameter gradients
initoptimizer.zero_grad()
# forward + backward + optimize
q_vals = committor(initial_config.view(-1,2))
targets = torch.ones_like(q_vals)*rank/(dist.get_world_size()-1)
cost = initloss(q_vals, targets)#,committor,config,cx)
cost.backward()
#Stepping up
initoptimizer.step()
if i%1000 == 0 and rank == 0:
print("Init step "+str(i),cost)
if rank == 0:
#Only save parameters from rank 0
torch.save(committor.state_dict(), "{}_params_{}".format(prefix,rank+1))
committor.zero_grad()
n_boundary_samples = 100
batch_size = 16#32#128
period = 25
datarunner = EXPReweightSimulation(mb_sim, committor, period=period, batch_size=batch_size, dimN=2)
#Initialize main loss function and optimizers
loss = BKELossEXP( bc_sampler = mb_sim_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 100,
bc_period = 10,
batch_size_bc = 0.5)
#This is the new committor loss! Feel free to comment this out if you don't need it
#cmloss = FTSCommittorLoss( fts_sampler = mb_sim,
# committor = committor,
# fts_layer=ftslayer,
# dimN = 2,
# lambda_fts=1e-1,
# fts_start=200,
# fts_end=2000,
# fts_max_steps=batch_size*period*4, #To estimate the committor, we'll run foru times as fast
# fts_rate=4, #In turn, we will only sample more committor value estimate after 4 iterations
# fts_min_count=2000, #Minimum count so that simulation doesn't (potentially) run too long
# batch_size_fts=0.5,
# tol = 1e-6,
# mode = 'shift'
# )
optimizer = ParallelAdam(committor.parameters(), lr=1e-3)
#optimizer = ParallelSGD(committor.parameters(), lr=1e-4,momentum=0.95,nesterov=True)
loss_io = []
if rank == 0:
loss_io = open("{}_loss.txt".format(prefix),'w')
#Training loop
#We can train in terms of epochs, but we will keep it in one epoch
for epoch in range(1):
if rank == 0:
print("epoch: [{}]".format(epoch+1))
actual_counter = 0
while actual_counter <= 20000:
# get data and reweighting factors
configs, grad_xs, invc, fwd_wl, bwrd_wl = datarunner.runSimulation()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
cost = loss(grad_xs, invc, fwd_wl, bwrd_wl)
totalcost = cost#+cmcost
totalcost.backward()
optimizer.step()
# print statistics
with torch.no_grad():
#if counter % 10 == 0:
main_loss = loss.main_loss
bc_loss = loss.bc_loss
#Print statistics
if rank == 0:
print('[{}] main_loss: {:.5E} bc_loss: {:.5E} lr: {:.3E}'.format(actual_counter + 1, main_loss.item(), bc_loss.item(), optimizer.param_groups[0]['lr']),flush=True)
print(loss.zl)
loss_io.write('{:d} {:.5E} {:.5E} \n'.format(actual_counter+1,main_loss.item(),bc_loss.item()))
loss_io.flush()
if actual_counter % 100 == 0:
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,actual_counter,rank))
torch.save(committor.state_dict(), "{}_params_{}".format(prefix,rank+1))
actual_counter += 1
| 5,413 | 36.337931 | 180 | py |
tps-torch | tps-torch-main/muller-brown-ml/mrh_tests/mb_ml.py | #Import necessarry tools from torch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.ml import MLSamplerEXP
from tpstorch.examples.mullerbrown_ml import MyMLSampler
import numpy as np
#Import any other thing
import tqdm, sys
#dist.init_process_group(backend='mpi')
class CommittorNet(nn.Module):
def __init__(self, d, num_nodes, unit=torch.relu):
super(CommittorNet, self).__init__()
self.num_nodes = num_nodes
self.d = d
self.unit = unit
self.lin1 = nn.Linear(d, num_nodes, bias=True)
self.lin3 = nn.Linear(num_nodes, num_nodes, bias=True)
self.lin2 = nn.Linear(num_nodes, 1, bias=False)
self.broadcast()
def forward(self, x):
x = self.lin1(x)
x = self.unit(x)
x = self.lin3(x)
x = self.unit(x)
x = self.lin2(x)
return torch.sigmoid(x)
def broadcast(self):
for name, param in self.named_parameters():
if param.requires_grad:
dist.broadcast(param.data,src=0)
class MullerBrown(MyMLSampler):
def __init__(self,param,config,rank,dump,beta,kappa,mpi_group,committor,save_config=False):
super(MullerBrown, self).__init__(param,config.detach().clone(),rank,dump,beta,kappa,mpi_group)
#super(MullerBrown, self).__init__(config.detach().clone())
#super(MullerBrown, self).__init__(param,config.detach().clone(),rank,dump,beta,kappa,mpi_group)
self.committor = committor
self.save_config = save_config
self.timestep = 0
#Save config size and its flattened version
self.config_size = config.size()
self.flattened_size = config.flatten().size()
self.torch_config = config
#Configs file Save Alternative, since the default XYZ format is an overkill
self.file = open("configs_{}.txt".format(dist.get_rank()), "w")
@torch.no_grad()
def initialize_from_torchconfig(self, config):
# Don't have to worry about all that much all, can just set it
self.setConfig(config)
def step(self, committor_val, onlytst=False):
with torch.no_grad():
config_test = torch.zeros_like(self.torch_config)
self.propose(config_test, committor_val, onlytst)
committor_val_ = self.committor(config_test)
self.acceptReject(config_test, committor_val_, onlytst, True)
self.torch_config = (self.getConfig().flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
def step_unbiased(self):
with torch.no_grad():
config_test = torch.zeros_like(self.torch_config)
self.propose(config_test, 0.0, False)
self.acceptReject(config_test, 0.0, False, False)
self.torch_config = (self.getConfig().flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
@torch.no_grad()
def isProduct(self,config):
end = torch.tensor([[0.6,0.08]])
radii = 0.025
end_ = config-end
end_ = end_.pow(2).sum()**0.5
if end_ <= radii:
return True
else:
return False
@torch.no_grad()
def isReactant(self,config):
start = torch.tensor([[-0.5,1.5]])
radii = 0.025
start_ = config-start
start_ = start_.pow(2).sum()**0.5
if start_ <= radii:
return True
else:
return False
def step_bc(self):
with torch.no_grad():
config_test = torch.zeros_like(self.torch_config)
self.propose(config_test, 0.0, False)
if self.isReactant(config_test.flatten()) or self.isProduct(config_test.flatten()):
self.acceptReject(config_test, 0.0, False, False)
else:
pass
self.torch_config = (self.getConfig().flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
def save(self):
self.timestep += 1
if self.save_config:
if self.timestep % 100 == 0:
#self.dumpConfig(0)
self.file.write("{} {} \n".format(self.torch_config[0].item(), self.torch_config[1].item()))
self.file.flush()
| 4,527 | 33.30303 | 108 | py |
tps-torch | tps-torch-main/muller-brown-ml/mrh_tests/run_fts.py | #import sys
#sys.path.insert(0,"/global/home/users/muhammad_hasyim/tps-torch/build")
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from mb_fts import MullerBrown, CommittorNet
from tpstorch.ml.data import FTSSimulation
from tpstorch.ml.optim import ParallelSGD, ParallelAdam, FTSUpdate
from tpstorch.ml.nn import BKELossFTS, FTSCommittorLoss, FTSLayer
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
torch.manual_seed(0)
np.random.seed(0)
prefix = 'simple_7500'
#Initialize neural net
def initializer(s):
return (1-s)*start+s*end
start = torch.tensor([-0.5,1.5])
end = torch.tensor([0.6,0.08])
#Initialize neural net
committor = CommittorNet(d=2,num_nodes=7500).to('cpu')
#Initialize the string for FTS method
ftslayer = FTSLayer(react_config=start,prod_config=end,num_nodes=world_size).to('cpu')
initial_config = initializer(rank/(dist.get_world_size()-1))
start = torch.tensor([[-0.5,1.5]])
end = torch.tensor([[0.6,0.08]])
kT = 10.0
mb_sim = MullerBrown(param="param",config=initial_config, rank=rank, dump=1, beta=1/kT, save_config=True, mpi_group = mpi_group, ftslayer=ftslayer)
mb_sim_bc = MullerBrown(param="param_bc",config=initial_config, rank=rank, dump=1, beta=1/kT, save_config=True, mpi_group = mpi_group, ftslayer=ftslayer)
#Initial Training Loss
initloss = nn.MSELoss()
initoptimizer = ParallelSGD(committor.parameters(), lr=1e-3)#,momentum=0.95, nesterov=True)
#from torchsummary import summary
running_loss = 0.0
#Initial training try to fit the committor to the initial condition
for i in range(10**3):
# zero the parameter gradients
initoptimizer.zero_grad()
# forward + backward + optimize
q_vals = committor(initial_config.view(-1,2))
targets = torch.ones_like(q_vals)*rank/(dist.get_world_size()-1)
cost = initloss(q_vals, targets)#,committor,config,cx)
cost.backward()
#Stepping up
initoptimizer.step()
if i%1000 == 0 and rank == 0:
print("Init step "+str(i),cost)
if rank == 0:
#Only save parameters from rank 0
torch.save(committor.state_dict(), "{}_params_{}".format(prefix,rank+1))
committor.zero_grad()
#Construct FTSSimulation
n_boundary_samples = 100
batch_size = 4#16
period = 100#25
datarunner = FTSSimulation(mb_sim, committor, period=period, batch_size=batch_size, dimN=2,mode='nonadaptive')
#Initialize main loss function and optimizers
loss = BKELossFTS( bc_sampler = mb_sim_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 100,
bc_period = 10,
batch_size_bc = 0.5,
tol = 2e-9,
mode= 'shift')
cmloss = FTSCommittorLoss( fts_sampler = mb_sim,
committor = committor,
fts_layer=ftslayer,
dimN = 2,
lambda_fts=1.0,
fts_start=200,
fts_end=2000,
fts_max_steps=10**8,#batch_size*period*40, #To estimate the committor, we'll run foru times as fast
fts_rate=40, #In turn, we will only sample more committor value estimate after 4 iterations
fts_min_count=500, #Minimum count so that simulation doesn't (potentially) run too long
batch_size_fts=0.5,
tol = 2e-9,
mode = 'shift'
)
optimizer = ParallelAdam(committor.parameters(), lr=1e-3)
#optimizer = ParallelSGD(committor.parameters(), lr=1e-4,momentum=0.95)#,nesterov=True)
ftsoptimizer = FTSUpdate(ftslayer.parameters(), deltatau=1.0/batch_size,momentum=0.95,nesterov=True, kappa=0.1)
#FTS Needs a scheduler because we're doing stochastic gradient descent, i.e., we're not accumulating a running average
#But only computes mini-batch averages
from torch.optim.lr_scheduler import LambdaLR
lr_lambda = lambda epoch: 1/(epoch+1)
scheduler = LambdaLR(ftsoptimizer, lr_lambda)
loss_io = []
if rank == 0:
loss_io = open("{}_loss.txt".format(prefix),'w')
#Training loop
#We can train in terms of epochs, but we will keep it in one epoch
for epoch in range(1):
if rank == 0:
print("epoch: [{}]".format(epoch+1))
actual_counter = 0
while actual_counter <= 500:
# get data and reweighting factors
configs, grad_xs = datarunner.runSimulation()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
cost = loss(grad_xs, mb_sim.rejection_count)
#Don't compute the new FTS Committor Loss for this run!
#cmcost = cmloss(actual_counter, ftslayer.string)
totalcost = cost#+cmcost
totalcost.backward()
optimizer.step()
# (1) Update the string
ftsoptimizer.step(configs,batch_size)
# print statistics
with torch.no_grad():
#if counter % 10 == 0:
main_loss = loss.main_loss
bc_loss = loss.bc_loss
#Track the average number of sampling period
test = torch.tensor([float(datarunner.period)])
dist.all_reduce(test)
test /= float(world_size)
if rank == 0:
print(test)
#Print statistics
if rank == 0:
print('[{}] main_loss: {:.5E} bc_loss: {:.5E} fts_loss: {:.5E} lr: {:.3E} period: {:.3f}'.format(actual_counter + 1, main_loss.item(), bc_loss.item(), 0.0, optimizer.param_groups[0]['lr'], test.item()),flush=True)
#print('[{}] main_loss: {:.5E} bc_loss: {:.5E} fts_loss: {:.5E} lr: {:.3E} period: {:.3f}'.format(actual_counter + 1, main_loss.item(), bc_loss.item(), cmcost.item(), optimizer.param_groups[0]['lr'], test.item()),flush=True)
print(ftslayer.string,flush=True)
print(loss.zl)
loss_io.write('{:d} {:.5E} \n'.format(actual_counter+1,main_loss))
loss_io.flush()
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,actual_counter,rank))
torch.save(ftslayer.state_dict(), "{}_string_t_{}_{}".format(prefix,actual_counter,rank))
torch.save(committor.state_dict(), "{}_params_{}".format(prefix,rank+1))
torch.save(ftslayer.state_dict(), "{}_string_{}".format(prefix,rank+1))
scheduler.step()
actual_counter += 1
if rank == 0:
print('FTS step size: {}'.format(ftsoptimizer.param_groups[0]['lr']))
| 7,051 | 39.068182 | 240 | py |
tps-torch | tps-torch-main/muller-brown-ml/examples/0/mullerbrown.py | #Import necessarry tools from torch
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.ml.optim import project_simplex
from tpstorch.ml import MLSamplerEXP
from tpstorch.ml.nn import CommittorLossEXP
from mullerbrown_ml import MySampler
import numpy as np
#Import any other thing
import tqdm, sys
class CommittorNet(nn.Module):
def __init__(self, d, num_nodes, unit=torch.sigmoid):
super(CommittorNet, self).__init__()
self.num_nodes = num_nodes
self.d = d
self.unit = unit
self.lin1 = nn.Linear(d, num_nodes, bias=True)
self.lin3 = nn.Linear(num_nodes, num_nodes, bias=True)
self.lin2 = nn.Linear(num_nodes, 1, bias=False)
self.project()
self.broadcast()
def forward(self, x):
#At the moment, x is flat. So if you want it to be 2x1 or 3x4 arrays, then you do it here!
x = self.lin1(x)
x = self.unit(x)
x = self.lin3(x)
x = self.unit(x)
x = self.lin2(x)
return x
def broadcast(self):
for name, param in self.named_parameters():
if param.requires_grad:
dist.broadcast(param.data,src=0)
def project(self):
#Project the coefficients so that they are make the output go from zero to one
with torch.no_grad():
self.lin2.weight.data = project_simplex(self.lin2.weight.data)
# This is a sketch of what it should be like, but basically have to also make committor play nicely
# within function
# have omnious committor part that actual committor overrides?
class MullerBrown(MySampler):
def __init__(self,param,config,rank,dump,beta,kappa,mpi_group,committor,save_config=False):
super(MullerBrown, self).__init__(param,config.detach().clone(),rank,dump,beta,kappa,mpi_group)
self.committor = committor
self.save_config = save_config
self.timestep = 0
#Save config size and its flattened version
self.config_size = config.size()
self.flattened_size = config.flatten().size()
self.torch_config = config
def initialize_from_torchconfig(self, config):
# Don't have to worry about all that much all, can just set it
self.setConfig(config)
def step(self, committor_val, onlytst=False):
with torch.no_grad():
config_test = torch.zeros_like(self.torch_config)
self.propose(config_test, committor_val, onlytst)
committor_val_ = self.committor(config_test)
self.acceptReject(config_test, committor_val_, onlytst, True)
self.torch_config = (self.getConfig().flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
def step_unbiased(self):
with torch.no_grad():
config_test = torch.zeros_like(self.torch_config)
self.propose(config_test, 0.0, False)
self.acceptReject(config_test, 0.0, False, False)
self.torch_config = (self.getConfig().flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
def save(self):
self.timestep += 1
if self.save_config:
if self.timestep % 100 == 0:
self.dumpConfig(0)
class MullerBrownLoss(CommittorLossEXP):
def __init__(self, lagrange_bc, batch_size,start,end,radii):
super(MullerBrownLoss,self).__init__()
self.lagrange_bc = lagrange_bc
self.start = start
self.end = end
self.radii = radii
def compute_bc(self, committor, configs, invnormconstants):
#Assume that first dimension is batch dimension
loss_bc = torch.zeros(1)
for i, config in enumerate(configs):
start_ = config-self.start
start_ = start_.pow(2).sum()**0.5
end_ = config-self.end
end_ = end_.pow(2).sum()**0.5
#check_1 = config[1]-config[0]
#check_2 = config[1]-0.5*config[0]
if ((start_ <= self.radii) or (config[1]>0.5*config[0]+1.5)):
loss_bc += 0.5*self.lagrange_bc*(committor(config.flatten())**2)*invnormconstants[i]
elif ((end_ <= self.radii) or (config[1]<config[0]+0.8)):
loss_bc += 0.5*self.lagrange_bc*(committor(config.flatten())-1.0)**2*invnormconstants[i]
return loss_bc/(i+1)
| 4,579 | 36.235772 | 104 | py |
tps-torch | tps-torch-main/muller-brown-ml/examples/0/run.py | #Import necessarry tools from torch
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.ml.data import EXPReweightSimulation, TSTValidation
from tpstorch.ml.optim import UnweightedSGD, EXPReweightSGD
from torch.distributed import distributed_c10d
from mullerbrown import CommittorNet, MullerBrown, MullerBrownLoss
import numpy as np
dist.init_process_group(backend='mpi')
mpi_group = dist.distributed_c10d._get_default_group()
#Import any other thing
import tqdm, sys
prefix = 'simple'
#Initialize neural net
committor = CommittorNet(d=2,num_nodes=40).to('cpu')
#Set initial configuration and BP simulator
start = torch.tensor([-1.2,0.9])
end = torch.tensor([-0.5,0.5])
def initializer(s):
return (1-s)*start+s*end
initial_config = initializer(dist.get_rank()/(dist.get_world_size()-1))
start = torch.tensor([[-0.5,1.5]])
end = torch.tensor([[0.5,0.0]])
mb_sim = MullerBrown(param="param",config=initial_config, rank=dist.get_rank(), dump=1, beta=0.025, kappa=20000, save_config=True, mpi_group = mpi_group, committor=committor)
#Committor Loss
initloss = nn.MSELoss()
initoptimizer = UnweightedSGD(committor.parameters(), lr=1e-2)#,momentum=0.9,nesterov=True)#, weight_decay=1e-3)
#from torchsummary import summary
running_loss = 0.0
#Initial training try to fit the committor to the initial condition
for i in tqdm.tqdm(range(10**5)):
# zero the parameter gradients
initoptimizer.zero_grad()
# forward + backward + optimize
q_vals = committor(initial_config)
targets = torch.ones_like(q_vals)*dist.get_rank()/(dist.get_world_size()-1)
cost = initloss(q_vals, targets)#,committor,config,cx)
cost.backward()
#Stepping up
initoptimizer.step()
#committor.renormalize()
committor.project()
committor.zero_grad()
from torch.optim import lr_scheduler
#Construct EXPReweightSimulation
batch_size = 128
dataset = EXPReweightSimulation(mb_sim, committor, period=10)
loader = DataLoader(dataset,batch_size=batch_size)
#Optimizer, doing EXP Reweighting. We can do SGD (integral control), or Heavy-Ball (PID control)
loss = MullerBrownLoss(lagrange_bc = 800.0,batch_size=batch_size,start=start,end=end,radii=0.5)
optimizer = EXPReweightSGD(committor.parameters(), lr=0.001, momentum=0.90, nesterov=True)
#lr_lambda = lambda epoch : 0.9**epoch
#scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch=-1, verbose=False)
loss_io = []
if dist.get_rank() == 0:
loss_io = open("{}_loss.txt".format(prefix),'w')
#Training loop
#1 epoch: 200 iterations, 200 time-windows
for epoch in range(1):
if dist.get_rank() == 0:
print("epoch: [{}]".format(epoch+1))
actual_counter = 0
for counter, batch in enumerate(loader):
if counter > 300:
break
# get data and reweighting factors
config, grad_xs, invc, fwd_wl, bwrd_wl = batch
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
cost = loss(grad_xs,committor,config,invc)
cost.backward()
meaninvc, reweight = optimizer.step(fwd_weightfactors=fwd_wl, bwrd_weightfactors=bwrd_wl, reciprocal_normconstants=invc)
committor.project()
# print statistics
with torch.no_grad():
#if counter % 10 == 0:
main_loss = loss.main_loss
bc_loss = loss.bc_loss
#What we need to do now is to compute with its respective weight
main_loss.mul_(reweight[dist.get_rank()])
bc_loss.mul_(reweight[dist.get_rank()])
#All reduce the gradients
dist.all_reduce(main_loss)
dist.all_reduce(bc_loss)
#Divide in-place by the mean inverse normalizing constant
main_loss.div_(meaninvc)
bc_loss.div_(meaninvc)
#Print statistics
if dist.get_rank() == 0:
print('[{}] loss: {:.5E} penalty: {:.5E} lr: {:.3E}'.format(counter + 1, main_loss.item(), bc_loss.item(), optimizer.param_groups[0]['lr']))
#Also print the reweighting factors
print(reweight)
loss_io.write('{:d} {:.5E} \n'.format(actual_counter+1,main_loss))
loss_io.flush()
#Only save parameters from rank 0
torch.save(committor.state_dict(), "{}_params_{}".format(prefix,dist.get_rank()+1))
actual_counter += 1
##Perform Validation Test
if dist.get_rank() == 0:
print("Finished Training! Now performing validation through committor analysis")
#Construct TSTValidation
print("Generating transition state")
for i in range(80000):
config_cur = mb_sim.getConfig()
mb_sim.step(committor_val=committor(config_cur), onlytst=True)
init_config = mb_sim.getConfig()
print("q value is "+str(committor(init_config)))
mb_sim = MullerBrown(param="param_tst",config=init_config, rank=dist.get_rank(), dump=1, beta=0.025, kappa=20000, save_config=True, mpi_group = mpi_group, committor=committor)
#mb_sim.setConfig(init_config)
#mb_sim = MullerBrown(param="param",config=init_config, rank=dist.get_rank(), dump=1, beta=0.20, kappa=80, save_config=True, mpi_group = mpi_group, committor=committor)
batch_size = 100 #batch of initial configuration to do the committor analysis per rank
dataset = TSTValidation(mb_sim, committor, period=20)
loader = DataLoader(dataset,batch_size=batch_size)
#Save validation scores and
myval_io = open("{}_validation_{}.txt".format(prefix,dist.get_rank()+1),'w')
def myprod_checker(config):
end = torch.tensor([[0.5,0.0]])
end_2 = torch.tensor([[0.0,0.5]])
radii = 0.3
end_ = config-end
end_ = end_.pow(2).sum()**0.5
end_2_ = config-end_2
end_2_ = end_2_.pow(2).sum()**0.5
if ((end_ <= radii) or (end_2_ <= radii) or (config[1]<(config[0]+0.8))):
return True
else:
return False
def myreact_checker(config):
start = torch.tensor([[-0.5,1.5]])
radii = 0.3
start_ = config-start
start_ = start_.pow(2).sum()**0.5
if ((start_ <= radii) or (config[1]>(0.5*config[0]+1.5))):
return True
else:
return False
#Run validation loop
actual_counter = 0
for epoch, batch in enumerate(loader):
if epoch > 1:
break
if dist.get_rank() == 0:
print("epoch: [{}]".format(epoch+1))
#Call the validation function
configs, committor_values = batch
dataset.validate(batch, trials=25, validation_io=myval_io, product_checker=myprod_checker, reactant_checker=myreact_checker)
| 6,698 | 36.216667 | 175 | py |
tps-torch | tps-torch-main/muller-brown-ml/examples/2/mullerbrown.py | #Import necessarry tools from torch
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.ml.optim import project_simplex
from tpstorch.ml import MLSamplerEXP
from tpstorch.ml.nn import CommittorLossEXP
from mullerbrown_ml import MySampler
import numpy as np
#Import any other thing
import tqdm, sys
class CommittorNet(nn.Module):
def __init__(self, d, num_nodes, unit=torch.sigmoid):
super(CommittorNet, self).__init__()
self.num_nodes = num_nodes
self.d = d
self.unit = unit
self.lin1 = nn.Linear(d, num_nodes, bias=True)
self.lin2 = nn.Linear(num_nodes, 1, bias=False)
self.project()
self.broadcast()
def forward(self, x):
#At the moment, x is flat. So if you want it to be 2x1 or 3x4 arrays, then you do it here!
x = self.lin1(x)
x = self.unit(x)
x = self.lin2(x)
return x
def broadcast(self):
for name, param in self.named_parameters():
if param.requires_grad:
dist.broadcast(param.data,src=0)
def project(self):
#Project the coefficients so that they are make the output go from zero to one
with torch.no_grad():
self.lin2.weight.data = project_simplex(self.lin2.weight.data)
# This is a sketch of what it should be like, but basically have to also make committor play nicely
# within function
# have omnious committor part that actual committor overrides?
class MullerBrown(MySampler):
def __init__(self,param,config,rank,dump,beta,kappa,mpi_group,committor,save_config=False):
super(MullerBrown, self).__init__(param,config.detach().clone(),rank,dump,beta,kappa,mpi_group)
self.committor = committor
self.save_config = save_config
self.timestep = 0
#Save config size and its flattened version
self.config_size = config.size()
self.flattened_size = config.flatten().size()
self.torch_config = config
def initialize_from_torchconfig(self, config):
# Don't have to worry about all that much all, can just set it
self.setConfig(config)
def step(self, committor_val, onlytst=False):
with torch.no_grad():
config_test = torch.zeros_like(self.torch_config)
self.propose(config_test, committor_val, onlytst)
committor_val_ = self.committor(config_test)
self.acceptReject(config_test, committor_val_, onlytst, True)
self.torch_config = (self.getConfig().flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
def step_unbiased(self):
with torch.no_grad():
config_test = torch.zeros_like(self.torch_config)
self.propose(config_test, 0.0, False)
self.acceptReject(config_test, 0.0, False, False)
self.torch_config = (self.getConfig().flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
def save(self):
self.timestep += 1
if self.save_config:
if self.timestep % 100 == 0:
self.dumpConfig(0)
class MullerBrownLoss(CommittorLossEXP):
def __init__(self, lagrange_bc, batch_size,start,end,radii):
super(MullerBrownLoss,self).__init__()
self.lagrange_bc = lagrange_bc
self.start = start
self.end = end
self.radii = radii
def compute_bc(self, committor, configs, invnormconstants):
#Assume that first dimension is batch dimension
loss_bc = torch.zeros(1)
for i, config in enumerate(configs):
start_ = config-self.start
start_ = start_.pow(2).sum()**0.5
end_ = config-self.end
end_ = end_.pow(2).sum()**0.5
#check_1 = config[1]-config[0]
#check_2 = config[1]-0.5*config[0]
if ((start_ <= self.radii) or (config[1]>0.5*config[0]+1.5)):
loss_bc += 0.5*self.lagrange_bc*(committor(config.flatten())**2)*invnormconstants[i]
elif ((end_ <= self.radii) or (config[1]<config[0]+0.8)):
loss_bc += 0.5*self.lagrange_bc*(committor(config.flatten())-1.0)**2*invnormconstants[i]
return loss_bc/(i+1)
| 4,466 | 36.225 | 104 | py |
tps-torch | tps-torch-main/muller-brown-ml/examples/2/run.py | #Import necessarry tools from torch
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.ml.data import EXPReweightSimulation, TSTValidation
from tpstorch.ml.optim import UnweightedSGD, EXPReweightSGD
from torch.distributed import distributed_c10d
from mullerbrown import CommittorNet, MullerBrown, MullerBrownLoss
import numpy as np
dist.init_process_group(backend='mpi')
mpi_group = dist.distributed_c10d._get_default_group()
#Import any other thing
import tqdm, sys
prefix = 'simple'
#Initialize neural net
committor = CommittorNet(d=2,num_nodes=400).to('cpu')
#Set initial configuration and BP simulator
start = torch.tensor([-1.2,0.9])
end = torch.tensor([-0.5,0.5])
def initializer(s):
return (1-s)*start+s*end
initial_config = initializer(dist.get_rank()/(dist.get_world_size()-1))
start = torch.tensor([[-0.5,1.5]])
end = torch.tensor([[0.5,0.0]])
mb_sim = MullerBrown(param="param",config=initial_config, rank=dist.get_rank(), dump=1, beta=0.025, kappa=20000, save_config=True, mpi_group = mpi_group, committor=committor)
#Committor Loss
initloss = nn.MSELoss()
initoptimizer = UnweightedSGD(committor.parameters(), lr=1e-2)#,momentum=0.9,nesterov=True)#, weight_decay=1e-3)
#from torchsummary import summary
running_loss = 0.0
#Initial training try to fit the committor to the initial condition
for i in tqdm.tqdm(range(10**5)):
# zero the parameter gradients
initoptimizer.zero_grad()
# forward + backward + optimize
q_vals = committor(initial_config)
targets = torch.ones_like(q_vals)*dist.get_rank()/(dist.get_world_size()-1)
cost = initloss(q_vals, targets)#,committor,config,cx)
cost.backward()
#Stepping up
initoptimizer.step()
#committor.renormalize()
committor.project()
committor.zero_grad()
from torch.optim import lr_scheduler
#Construct EXPReweightSimulation
batch_size = 128
dataset = EXPReweightSimulation(mb_sim, committor, period=10)
loader = DataLoader(dataset,batch_size=batch_size)
#Optimizer, doing EXP Reweighting. We can do SGD (integral control), or Heavy-Ball (PID control)
loss = MullerBrownLoss(lagrange_bc = 800.0,batch_size=batch_size,start=start,end=end,radii=0.5)
optimizer = EXPReweightSGD(committor.parameters(), lr=0.001, momentum=0.90, nesterov=True)
#lr_lambda = lambda epoch : 0.9**epoch
#scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch=-1, verbose=False)
loss_io = []
if dist.get_rank() == 0:
loss_io = open("{}_loss.txt".format(prefix),'w')
#Training loop
#1 epoch: 200 iterations, 200 time-windows
for epoch in range(1):
if dist.get_rank() == 0:
print("epoch: [{}]".format(epoch+1))
actual_counter = 0
for counter, batch in enumerate(loader):
if counter > 300:
break
# get data and reweighting factors
config, grad_xs, invc, fwd_wl, bwrd_wl = batch
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
cost = loss(grad_xs,committor,config,invc)
cost.backward()
meaninvc, reweight = optimizer.step(fwd_weightfactors=fwd_wl, bwrd_weightfactors=bwrd_wl, reciprocal_normconstants=invc)
committor.project()
# print statistics
with torch.no_grad():
#if counter % 10 == 0:
main_loss = loss.main_loss
bc_loss = loss.bc_loss
#What we need to do now is to compute with its respective weight
main_loss.mul_(reweight[dist.get_rank()])
bc_loss.mul_(reweight[dist.get_rank()])
#All reduce the gradients
dist.all_reduce(main_loss)
dist.all_reduce(bc_loss)
#Divide in-place by the mean inverse normalizing constant
main_loss.div_(meaninvc)
bc_loss.div_(meaninvc)
#Print statistics
if dist.get_rank() == 0:
print('[{}] loss: {:.5E} penalty: {:.5E} lr: {:.3E}'.format(counter + 1, main_loss.item(), bc_loss.item(), optimizer.param_groups[0]['lr']))
#Also print the reweighting factors
print(reweight)
loss_io.write('{:d} {:.5E} \n'.format(actual_counter+1,main_loss))
loss_io.flush()
#Only save parameters from rank 0
torch.save(committor.state_dict(), "{}_params_{}".format(prefix,dist.get_rank()+1))
actual_counter += 1
##Perform Validation Test
if dist.get_rank() == 0:
print("Finished Training! Now performing validation through committor analysis")
#Construct TSTValidation
print("Generating transition state")
for i in range(80000):
config_cur = mb_sim.getConfig()
mb_sim.step(committor_val=committor(config_cur), onlytst=True)
init_config = mb_sim.getConfig()
print("q value is "+str(committor(init_config)))
mb_sim = MullerBrown(param="param_tst",config=init_config, rank=dist.get_rank(), dump=1, beta=0.025, kappa=20000, save_config=True, mpi_group = mpi_group, committor=committor)
#mb_sim.setConfig(init_config)
#mb_sim = MullerBrown(param="param",config=init_config, rank=dist.get_rank(), dump=1, beta=0.20, kappa=80, save_config=True, mpi_group = mpi_group, committor=committor)
batch_size = 100 #batch of initial configuration to do the committor analysis per rank
dataset = TSTValidation(mb_sim, committor, period=20)
loader = DataLoader(dataset,batch_size=batch_size)
#Save validation scores and
myval_io = open("{}_validation_{}.txt".format(prefix,dist.get_rank()+1),'w')
def myprod_checker(config):
end = torch.tensor([[0.5,0.0]])
end_2 = torch.tensor([[0.0,0.5]])
radii = 0.3
end_ = config-end
end_ = end_.pow(2).sum()**0.5
end_2_ = config-end_2
end_2_ = end_2_.pow(2).sum()**0.5
if ((end_ <= radii) or (end_2_ <= radii) or (config[1]<(config[0]+0.8))):
return True
else:
return False
def myreact_checker(config):
start = torch.tensor([[-0.5,1.5]])
radii = 0.3
start_ = config-start
start_ = start_.pow(2).sum()**0.5
if ((start_ <= radii) or (config[1]>(0.5*config[0]+1.5))):
return True
else:
return False
#Run validation loop
actual_counter = 0
for epoch, batch in enumerate(loader):
if epoch > 1:
break
if dist.get_rank() == 0:
print("epoch: [{}]".format(epoch+1))
#Call the validation function
configs, committor_values = batch
dataset.validate(batch, trials=25, validation_io=myval_io, product_checker=myprod_checker, reactant_checker=myreact_checker)
| 6,699 | 36.222222 | 175 | py |
tps-torch | tps-torch-main/muller-brown-ml/examples/1/mullerbrown.py | #Import necessarry tools from torch
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.ml.optim import project_simplex
from tpstorch.ml import MLSamplerEXP
from tpstorch.ml.nn import CommittorLossEXP
from mullerbrown_ml import MySampler
import numpy as np
#Import any other thing
import tqdm, sys
class CommittorNet(nn.Module):
def __init__(self, d, num_nodes, unit=torch.sigmoid):
super(CommittorNet, self).__init__()
self.num_nodes = num_nodes
self.d = d
self.unit = unit
self.lin1 = nn.Linear(d, num_nodes, bias=True)
self.lin2 = nn.Linear(num_nodes, 1, bias=False)
self.project()
self.broadcast()
def forward(self, x):
#At the moment, x is flat. So if you want it to be 2x1 or 3x4 arrays, then you do it here!
x = self.lin1(x)
x = self.unit(x)
x = self.lin2(x)
return x
def broadcast(self):
for name, param in self.named_parameters():
if param.requires_grad:
dist.broadcast(param.data,src=0)
def project(self):
#Project the coefficients so that they are make the output go from zero to one
with torch.no_grad():
self.lin2.weight.data = project_simplex(self.lin2.weight.data)
# This is a sketch of what it should be like, but basically have to also make committor play nicely
# within function
# have omnious committor part that actual committor overrides?
class MullerBrown(MySampler):
def __init__(self,param,config,rank,dump,beta,kappa,mpi_group,committor,save_config=False):
super(MullerBrown, self).__init__(param,config.detach().clone(),rank,dump,beta,kappa,mpi_group)
self.committor = committor
self.save_config = save_config
self.timestep = 0
#Save config size and its flattened version
self.config_size = config.size()
self.flattened_size = config.flatten().size()
self.torch_config = config
def initialize_from_torchconfig(self, config):
# Don't have to worry about all that much all, can just set it
self.setConfig(config)
def step(self, committor_val, onlytst=False):
with torch.no_grad():
config_test = torch.zeros_like(self.torch_config)
self.propose(config_test, committor_val, onlytst)
committor_val_ = self.committor(config_test)
self.acceptReject(config_test, committor_val_, onlytst, True)
self.torch_config = (self.getConfig().flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
def step_unbiased(self):
with torch.no_grad():
config_test = torch.zeros_like(self.torch_config)
self.propose(config_test, 0.0, False)
self.acceptReject(config_test, 0.0, False, False)
self.torch_config = (self.getConfig().flatten()).detach().clone()
self.torch_config.requires_grad_()
try:
self.torch.grad.data.zero_()
except:
pass
def save(self):
self.timestep += 1
if self.save_config:
if self.timestep % 100 == 0:
self.dumpConfig(0)
class MullerBrownLoss(CommittorLossEXP):
def __init__(self, lagrange_bc, batch_size,start,end,radii):
super(MullerBrownLoss,self).__init__()
self.lagrange_bc = lagrange_bc
self.start = start
self.end = end
self.radii = radii
def compute_bc(self, committor, configs, invnormconstants):
#Assume that first dimension is batch dimension
loss_bc = torch.zeros(1)
for i, config in enumerate(configs):
start_ = config-self.start
start_ = start_.pow(2).sum()**0.5
end_ = config-self.end
end_ = end_.pow(2).sum()**0.5
#check_1 = config[1]-config[0]
#check_2 = config[1]-0.5*config[0]
if ((start_ <= self.radii) or (config[1]>0.5*config[0]+1.5)):
loss_bc += 0.5*self.lagrange_bc*(committor(config.flatten())**2)*invnormconstants[i]
elif ((end_ <= self.radii) or (config[1]<config[0]+0.8)):
loss_bc += 0.5*self.lagrange_bc*(committor(config.flatten())-1.0)**2*invnormconstants[i]
return loss_bc/(i+1)
| 4,466 | 36.225 | 104 | py |
tps-torch | tps-torch-main/muller-brown-ml/examples/1/run.py | #Import necessarry tools from torch
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.ml.data import EXPReweightSimulation, TSTValidation
from tpstorch.ml.optim import UnweightedSGD, EXPReweightSGD
from torch.distributed import distributed_c10d
from mullerbrown import CommittorNet, MullerBrown, MullerBrownLoss
import numpy as np
dist.init_process_group(backend='mpi')
mpi_group = dist.distributed_c10d._get_default_group()
#Import any other thing
import tqdm, sys
prefix = 'simple'
#Initialize neural net
committor = CommittorNet(d=2,num_nodes=200).to('cpu')
#Set initial configuration and BP simulator
start = torch.tensor([-1.2,0.9])
end = torch.tensor([-0.5,0.5])
def initializer(s):
return (1-s)*start+s*end
initial_config = initializer(dist.get_rank()/(dist.get_world_size()-1))
start = torch.tensor([[-0.5,1.5]])
end = torch.tensor([[0.5,0.0]])
mb_sim = MullerBrown(param="param",config=initial_config, rank=dist.get_rank(), dump=1, beta=0.025, kappa=20000, save_config=True, mpi_group = mpi_group, committor=committor)
#Committor Loss
initloss = nn.MSELoss()
initoptimizer = UnweightedSGD(committor.parameters(), lr=1e-2)#,momentum=0.9,nesterov=True)#, weight_decay=1e-3)
#from torchsummary import summary
running_loss = 0.0
#Initial training try to fit the committor to the initial condition
for i in tqdm.tqdm(range(10**5)):
# zero the parameter gradients
initoptimizer.zero_grad()
# forward + backward + optimize
q_vals = committor(initial_config)
targets = torch.ones_like(q_vals)*dist.get_rank()/(dist.get_world_size()-1)
cost = initloss(q_vals, targets)#,committor,config,cx)
cost.backward()
#Stepping up
initoptimizer.step()
#committor.renormalize()
committor.project()
committor.zero_grad()
from torch.optim import lr_scheduler
#Construct EXPReweightSimulation
batch_size = 128
dataset = EXPReweightSimulation(mb_sim, committor, period=10)
loader = DataLoader(dataset,batch_size=batch_size)
#Optimizer, doing EXP Reweighting. We can do SGD (integral control), or Heavy-Ball (PID control)
loss = MullerBrownLoss(lagrange_bc = 800.0,batch_size=batch_size,start=start,end=end,radii=0.5)
optimizer = EXPReweightSGD(committor.parameters(), lr=0.001, momentum=0.90, nesterov=True)
#lr_lambda = lambda epoch : 0.9**epoch
#scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch=-1, verbose=False)
loss_io = []
if dist.get_rank() == 0:
loss_io = open("{}_loss.txt".format(prefix),'w')
#Training loop
#1 epoch: 200 iterations, 200 time-windows
for epoch in range(1):
if dist.get_rank() == 0:
print("epoch: [{}]".format(epoch+1))
actual_counter = 0
for counter, batch in enumerate(loader):
if counter > 300:
break
# get data and reweighting factors
config, grad_xs, invc, fwd_wl, bwrd_wl = batch
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
cost = loss(grad_xs,committor,config,invc)
cost.backward()
meaninvc, reweight = optimizer.step(fwd_weightfactors=fwd_wl, bwrd_weightfactors=bwrd_wl, reciprocal_normconstants=invc)
committor.project()
# print statistics
with torch.no_grad():
#if counter % 10 == 0:
main_loss = loss.main_loss
bc_loss = loss.bc_loss
#What we need to do now is to compute with its respective weight
main_loss.mul_(reweight[dist.get_rank()])
bc_loss.mul_(reweight[dist.get_rank()])
#All reduce the gradients
dist.all_reduce(main_loss)
dist.all_reduce(bc_loss)
#Divide in-place by the mean inverse normalizing constant
main_loss.div_(meaninvc)
bc_loss.div_(meaninvc)
#Print statistics
if dist.get_rank() == 0:
print('[{}] loss: {:.5E} penalty: {:.5E} lr: {:.3E}'.format(counter + 1, main_loss.item(), bc_loss.item(), optimizer.param_groups[0]['lr']))
#Also print the reweighting factors
print(reweight)
loss_io.write('{:d} {:.5E} \n'.format(actual_counter+1,main_loss))
loss_io.flush()
#Only save parameters from rank 0
torch.save(committor.state_dict(), "{}_params_{}".format(prefix,dist.get_rank()+1))
actual_counter += 1
##Perform Validation Test
if dist.get_rank() == 0:
print("Finished Training! Now performing validation through committor analysis")
#Construct TSTValidation
print("Generating transition state")
for i in range(80000):
config_cur = mb_sim.getConfig()
mb_sim.step(committor_val=committor(config_cur), onlytst=True)
init_config = mb_sim.getConfig()
print("q value is "+str(committor(init_config)))
mb_sim = MullerBrown(param="param_tst",config=init_config, rank=dist.get_rank(), dump=1, beta=0.025, kappa=20000, save_config=True, mpi_group = mpi_group, committor=committor)
#mb_sim.setConfig(init_config)
#mb_sim = MullerBrown(param="param",config=init_config, rank=dist.get_rank(), dump=1, beta=0.20, kappa=80, save_config=True, mpi_group = mpi_group, committor=committor)
batch_size = 100 #batch of initial configuration to do the committor analysis per rank
dataset = TSTValidation(mb_sim, committor, period=20)
loader = DataLoader(dataset,batch_size=batch_size)
#Save validation scores and
myval_io = open("{}_validation_{}.txt".format(prefix,dist.get_rank()+1),'w')
def myprod_checker(config):
end = torch.tensor([[0.5,0.0]])
end_2 = torch.tensor([[0.0,0.5]])
radii = 0.3
end_ = config-end
end_ = end_.pow(2).sum()**0.5
end_2_ = config-end_2
end_2_ = end_2_.pow(2).sum()**0.5
if ((end_ <= radii) or (end_2_ <= radii) or (config[1]<(config[0]+0.8))):
return True
else:
return False
def myreact_checker(config):
start = torch.tensor([[-0.5,1.5]])
radii = 0.3
start_ = config-start
start_ = start_.pow(2).sum()**0.5
if ((start_ <= radii) or (config[1]>(0.5*config[0]+1.5))):
return True
else:
return False
#Run validation loop
actual_counter = 0
for epoch, batch in enumerate(loader):
if epoch > 1:
break
if dist.get_rank() == 0:
print("epoch: [{}]".format(epoch+1))
#Call the validation function
configs, committor_values = batch
dataset.validate(batch, trials=25, validation_io=myval_io, product_checker=myprod_checker, reactant_checker=myreact_checker)
| 6,699 | 36.222222 | 175 | py |
tps-torch | tps-torch-main/muller-brown-ml/analysis_scripts/plot_tricky_2.py | import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.distributed as dist
from torch.autograd import grad
#Import necessarry tools from tpstorch
from mb_ml import CommittorNet
import numpy as np
#Import any other thing
import tqdm, sys
#Initialize neural net
committor = CommittorNet(d=2,num_nodes=200).to('cpu')
committor.load_state_dict(torch.load("simple_params_1"))
A = np.array([-20,-10,-17,1.5])
a = np.array([-1,-1,-6.5,0.7])
b = np.array([0,0,11,0.6])
c = np.array([-10,-10,-6.5,0.7])
x_ = np.array([1,0,-0.5,-1])
y_ = np.array([0,0.5,1.5,1])
def energy(x,y,A,a,b,c,x_,y_):
energy_ = np.zeros((x.shape))
for i in range(len(A)):
energy_ += A[i]*np.exp(a[i]*(x-x_[i])**2+b[i]*(x-x_[i])*(y-y_[i])+c[i]*(y-y_[i])**2)
return energy_
def q(xval,yval):
qvals = np.zeros_like(xval)
for i in range(nx):
for j in range(ny):
Array = np.array([xval[i,j],yval[i,j]]).astype(np.float32)
Array = torch.from_numpy(Array)
qvals[i,j] = committor(Array).item()
return qvals
nx, ny = 100,100
X = np.linspace(-1.75, 1.25, nx)
Y = np.linspace(-0.5, 2.25, ny)
print(X.shape)
xv, yv = np.meshgrid(X, Y)
z = energy(xv,yv,A,a,b,c,x_,y_)
h = plt.contourf(X,Y,z,levels=[-15+i for i in range(16)])
print(np.shape(z),np.shape(xv))
qvals = q(xv,yv)
CS = plt.contour(X, Y, qvals,levels=[0.01,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.99], cmap='inferno')
plt.clabel(CS, fontsize=10, inline=1)#
#Now evaluate gradient stuff
n_struct = 100
points_x = np.linspace(-1.75,1.25,n_struct)
points_y = np.linspace(-0.5,2.25,n_struct)
xx, yy = np.meshgrid(points_x,points_y)
zz = np.zeros_like(xx)
grad_2_zz = np.zeros_like(xx)
for i in range(n_struct):
for j in range(n_struct):
test = torch.tensor([xx[i][j],yy[i][j]], dtype=torch.float32, requires_grad=True)
test_y = committor(test)
dy_dx = grad(outputs=test_y, inputs=test)
zz[i][j] = test_y.item()
grad_2_zz_ = dy_dx[0][0]**2+dy_dx[0][1]**2
grad_2_zz[i][j] = grad_2_zz_.item()
energies = energy(xx,yy,A,a,b,c,x_,y_)
from scipy.integrate import simps
energy_int = simps(simps(grad_2_zz*np.exp(-1.0*energies),points_y),points_x)
with open('energy.txt', 'w') as f:
print(energy_int, file=f)
values = np.genfromtxt("../../analysis_scripts/values_of_interest.txt")
q_fem = np.genfromtxt("../../analysis_scripts/zz_structured.txt")
indices = np.nonzero(values)
q_metric = values[indices]*np.abs(qvals[indices]-q_fem[indices])
q_int = np.array((np.mean(q_metric),np.std(q_metric)/len(q_metric)**0.5))
np.savetxt("q_int.txt", q_int)
# plot energies
h = plt.contourf(xx,yy,energies,levels=[-15+i for i in range(16)])
plt.colorbar()
CS = plt.contour(xx,yy,grad_2_zz*np.exp(-1.0*energies), cmap='Greys')
plt.colorbar()
plt.tick_params(axis='both', which='major', labelsize=9)
plt.tick_params(axis='both', which='minor', labelsize=9)
plt.savefig('energies.pdf', bbox_inches='tight')
plt.close()
| 2,969 | 31.637363 | 100 | py |
tps-torch | tps-torch-main/muller-brown-ml/analysis_scripts/plot_tricky.py | import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.distributed as dist
dist.init_process_group('mpi')
#Import necessarry tools from tpstorch
from mullerbrown import CommittorNet
import numpy as np
#Import any other thing
import tqdm, sys
#Initialize neural net
committor = CommittorNet(d=2,num_nodes=200).to('cpu')
committor.load_state_dict(torch.load("simple_params_1"))
A = np.array([-20,-10,-17,1.5])
a = np.array([-1,-1,-6.5,0.7])
b = np.array([0,0,11,0.6])
c = np.array([-10,-10,-6.5,0.7])
x_ = np.array([1,0,-0.5,-1])
y_ = np.array([0,0.5,1.5,1])
def energy(x,y,A,a,b,c,x_,y_):
energy_ = np.zeros((x.shape))
for i in range(len(A)):
energy_ += A[i]*np.exp(a[i]*(x-x_[i])**2+b[i]*(x-x_[i])*(y-y_[i])+c[i]*(y-y_[i])**2)
return energy_
def q(xval,yval):
qvals = np.zeros_like(xval)
for i in range(nx):
for j in range(ny):
Array = np.array([xval[i,j],yval[i,j]]).astype(np.float32)
Array = torch.from_numpy(Array)
qvals[i,j] = committor(Array).item()
return qvals
nx, ny = 100,100
X = np.linspace(-2.0, 1.5, nx)
Y = np.linspace(-1.0, 2.5, ny)
print(X.shape)
xv, yv = np.meshgrid(X, Y)
z = energy(xv,yv,A,a,b,c,x_,y_)
h = plt.contourf(X,Y,z,levels=[-15+i for i in range(16)])
print(np.shape(z),np.shape(xv))
qvals = q(xv,yv)
CS = plt.contour(X, Y, qvals,levels=[0.001,0.01,0.05,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99,0.999])
plt.colorbar()
plt.clabel(CS, fontsize=10, inline=1)#
plt.show()
| 1,507 | 27.45283 | 106 | py |
tps-torch | tps-torch-main/muller-brown-ml/analysis_scripts/plot_tricky_string_2.py | import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.distributed as dist
from torch.autograd import grad
#Import necessarry tools from tpstorch
from mb_fts import CommittorNet
from tpstorch.ml.nn import FTSLayer
import numpy as np
#Import any other thing
import tqdm, sys
#Initialize neural net
committor = CommittorNet(d=2,num_nodes=200).to('cpu')
committor.load_state_dict(torch.load("simple_params_1"))
start = torch.tensor([-0.5,1.5])
end = torch.tensor([0.6,0.08])
ftslayer = FTSLayer(react_config=start,prod_config=end,num_nodes=48).to('cpu')
ftslayer.load_state_dict(torch.load("simple_string_1"))
print(ftslayer.string)
ftslayer_np = ftslayer.string.cpu().detach().numpy()
print(ftslayer_np)
A = np.array([-20,-10,-17,1.5])
a = np.array([-1,-1,-6.5,0.7])
b = np.array([0,0,11,0.6])
c = np.array([-10,-10,-6.5,0.7])
x_ = np.array([1,0,-0.5,-1])
y_ = np.array([0,0.5,1.5,1])
def energy(x,y,A,a,b,c,x_,y_):
energy_ = np.zeros((x.shape))
for i in range(len(A)):
energy_ += A[i]*np.exp(a[i]*(x-x_[i])**2+b[i]*(x-x_[i])*(y-y_[i])+c[i]*(y-y_[i])**2)
return energy_
def q(xval,yval):
qvals = np.zeros_like(xval)
for i in range(nx):
for j in range(ny):
Array = np.array([xval[i,j],yval[i,j]]).astype(np.float32)
Array = torch.from_numpy(Array)
qvals[i,j] = committor(Array).item()
return qvals
nx, ny = 100,100
X = np.linspace(-1.75, 1.25, nx)
Y = np.linspace(-0.5, 2.25, ny)
print(X.shape)
xv, yv = np.meshgrid(X, Y)
z = energy(xv,yv,A,a,b,c,x_,y_)
h = plt.contourf(X,Y,z,levels=[-15+i for i in range(16)])
print(np.shape(z),np.shape(xv))
qvals = q(xv,yv)
CS = plt.contour(X, Y, qvals,levels=[0.01,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.99], cmap='inferno')
plt.colorbar()
plt.clabel(CS, fontsize=10, inline=1)#
plt.plot(ftslayer_np[:,0], ftslayer_np[:,1], 'bo-')
plt.tick_params(axis='both', which='major', labelsize=9)
plt.tick_params(axis='both', which='minor', labelsize=9)
plt.savefig('energies.pdf', bbox_inches='tight')
#plt.show()
plt.close()
#Now evaluate gradient stuff
n_struct = 100
points_x = np.linspace(-1.75,1.25,n_struct)
points_y = np.linspace(-0.5,2.25,n_struct)
xx, yy = np.meshgrid(points_x,points_y)
zz = np.zeros_like(xx)
grad_2_zz = np.zeros_like(xx)
for i in range(n_struct):
for j in range(n_struct):
test = torch.tensor([xx[i][j],yy[i][j]], dtype=torch.float32, requires_grad=True)
test_y = committor(test)
dy_dx = grad(outputs=test_y, inputs=test)
zz[i][j] = test_y.item()
grad_2_zz_ = dy_dx[0][0]**2+dy_dx[0][1]**2
grad_2_zz[i][j] = grad_2_zz_.item()
energies = energy(xx,yy,A,a,b,c,x_,y_)
from scipy.integrate import simps
energy_int = simps(simps(grad_2_zz*np.exp(-1.0*energies),points_y),points_x)
with open('energy.txt', 'w') as f:
print(energy_int, file=f)
values = np.genfromtxt("../../analysis_scripts/values_of_interest.txt")
q_fem = np.genfromtxt("../../analysis_scripts/zz_structured.txt")
indices = np.nonzero(values)
q_metric = values[indices]*np.abs(qvals[indices]-q_fem[indices])
q_int = np.array((np.mean(q_metric),np.std(q_metric)/len(q_metric)**0.5))
np.savetxt("q_int.txt", q_int)
| 3,195 | 32.291667 | 100 | py |
tps-torch | tps-torch-main/muller-brown-ml/analysis_scripts/plot_tricky_t.py | import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.distributed as dist
dist.init_process_group('mpi')
#Import necessarry tools from tpstorch
from mullerbrown import CommittorNet
import numpy as np
#Import any other thing
import tqdm, sys
#Initialize neural net
committor = CommittorNet(d=2,num_nodes=200).to('cpu')
committor.load_state_dict(torch.load("simple_params_t_220"))
A = np.array([-20,-10,-17,1.5])
a = np.array([-1,-1,-6.5,0.7])
b = np.array([0,0,11,0.6])
c = np.array([-10,-10,-6.5,0.7])
x_ = np.array([1,0,-0.5,-1])
y_ = np.array([0,0.5,1.5,1])
def energy(x,y,A,a,b,c,x_,y_):
energy_ = np.zeros((x.shape))
for i in range(len(A)):
energy_ += A[i]*np.exp(a[i]*(x-x_[i])**2+b[i]*(x-x_[i])*(y-y_[i])+c[i]*(y-y_[i])**2)
return energy_
def q(xval,yval):
qvals = np.zeros_like(xval)
for i in range(nx):
for j in range(ny):
Array = np.array([xval[i,j],yval[i,j]]).astype(np.float32)
Array = torch.from_numpy(Array)
qvals[i,j] = committor(Array).item()
return qvals
nx, ny = 100,100
X = np.linspace(-2.0, 1.5, nx)
Y = np.linspace(-1.0, 2.5, ny)
print(X.shape)
xv, yv = np.meshgrid(X, Y)
z = energy(xv,yv,A,a,b,c,x_,y_)
h = plt.contourf(X,Y,z,levels=[-15+i for i in range(16)])
print(np.shape(z),np.shape(xv))
qvals = q(xv,yv)
CS = plt.contour(X, Y, qvals,levels=[0.001,0.01,0.05,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99,0.999])
plt.colorbar()
plt.clabel(CS, fontsize=10, inline=1)#
plt.show()
| 1,511 | 27.528302 | 106 | py |
tps-torch | tps-torch-main/muller-brown-ml/analysis_scripts/plot_tricky_string.py | import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.distributed as dist
#Import necessarry tools from tpstorch
from mb_fts import CommittorNet
from tpstorch.ml.nn import FTSLayer
import numpy as np
#Import any other thing
import tqdm, sys
#Initialize neural net
committor = CommittorNet(d=2,num_nodes=200).to('cpu')
committor.load_state_dict(torch.load("simple_params_1"))
start = torch.tensor([-0.5,1.5])
end = torch.tensor([0.6,0.08])
ftslayer = FTSLayer(react_config=start,prod_config=end,num_nodes=48).to('cpu')
ftslayer.load_state_dict(torch.load("simple_string_1"))
print(ftslayer.string)
ftslayer_np = ftslayer.string.cpu().detach().numpy()
print(ftslayer_np)
A = np.array([-20,-10,-17,1.5])
a = np.array([-1,-1,-6.5,0.7])
b = np.array([0,0,11,0.6])
c = np.array([-10,-10,-6.5,0.7])
x_ = np.array([1,0,-0.5,-1])
y_ = np.array([0,0.5,1.5,1])
def energy(x,y,A,a,b,c,x_,y_):
energy_ = np.zeros((x.shape))
for i in range(len(A)):
energy_ += A[i]*np.exp(a[i]*(x-x_[i])**2+b[i]*(x-x_[i])*(y-y_[i])+c[i]*(y-y_[i])**2)
return energy_
def q(xval,yval):
qvals = np.zeros_like(xval)
for i in range(nx):
for j in range(ny):
Array = np.array([xval[i,j],yval[i,j]]).astype(np.float32)
Array = torch.from_numpy(Array)
qvals[i,j] = committor(Array).item()
return qvals
nx, ny = 100,100
X = np.linspace(-2.0, 1.5, nx)
Y = np.linspace(-1.0, 2.5, ny)
print(X.shape)
xv, yv = np.meshgrid(X, Y)
z = energy(xv,yv,A,a,b,c,x_,y_)
h = plt.contourf(X,Y,z,levels=[-15+i for i in range(16)])
print(np.shape(z),np.shape(xv))
qvals = q(xv,yv)
CS = plt.contour(X, Y, qvals,levels=[0.01,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.99])
plt.colorbar()
plt.clabel(CS, fontsize=10, inline=1)#
plt.plot(ftslayer_np[:,0], ftslayer_np[:,1], 'bo-')
plt.show()
| 1,831 | 29.032787 | 92 | py |
tps-torch | tps-torch-main/muller-brown-ml/analysis_scripts/energies.py | import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.distributed as dist
from torch.autograd import grad
#Import necessarry tools from tpstorch
from mb_ml import CommittorNet
import numpy as np
#Import any other thing
import tqdm, sys
#Initialize neural net
committor = CommittorNet(d=2,num_nodes=200).to('cpu')
A = np.array([-20,-10,-17,1.5])
a = np.array([-1,-1,-6.5,0.7])
b = np.array([0,0,11,0.6])
c = np.array([-10,-10,-6.5,0.7])
x_ = np.array([1,0,-0.5,-1])
y_ = np.array([0,0.5,1.5,1])
def energy(x,y,A,a,b,c,x_,y_):
energy_ = np.zeros((x.shape))
for i in range(len(A)):
energy_ += A[i]*np.exp(a[i]*(x-x_[i])**2+b[i]*(x-x_[i])*(y-y_[i])+c[i]*(y-y_[i])**2)
return energy_
def q(xval,yval):
qvals = np.zeros_like(xval)
for i in range(nx):
for j in range(ny):
Array = np.array([xval[i,j],yval[i,j]]).astype(np.float32)
Array = torch.from_numpy(Array)
qvals[i,j] = committor(Array).item()
return qvals
nx, ny = 100,100
X = np.linspace(-1.75, 1.25, nx)
Y = np.linspace(-0.5, 2.25, ny)
from scipy.integrate import simps
n_struct = 100
points_x = np.linspace(-1.75,1.25,n_struct)
points_y = np.linspace(-0.5,2.25,n_struct)
xx, yy = np.meshgrid(points_x,points_y)
#Now evaluate gradient stuff
energies = energy(xx,yy,A,a,b,c,x_,y_)
string_list = []
for i in range(0,20000,4):
string_list.append("simple_params_t_"+str(i)+"_0")
values = np.genfromtxt("values_of_interest.txt")
q_fem = np.genfromtxt("zz_structured.txt")
indices = np.nonzero(values)
integral_eq = np.zeros((len(string_list),))
q_eq = np.zeros((len(string_list),2))
f = open("energies_data.txt", 'a+')
f2 = open("q_int_data.txt", 'a+')
for frame in range(len(string_list)):
print(frame)
zz = np.zeros_like(xx)
committor.load_state_dict(torch.load(string_list[frame]))
grad_2_zz = np.zeros_like(xx)
for i in range(n_struct):
for j in range(n_struct):
test = torch.tensor([xx[i][j],yy[i][j]], dtype=torch.float32, requires_grad=True)
test_y = committor(test)
dy_dx = grad(outputs=test_y, inputs=test)
zz[i][j] = test_y.item()
grad_2_zz_ = dy_dx[0][0]**2+dy_dx[0][1]**2
grad_2_zz[i][j] = grad_2_zz_.item()
integral_eq[frame] = simps(simps(grad_2_zz*np.exp(-1.0*energies),points_y),points_x)
f.write("{:.5e}\n".format(integral_eq[frame]))
q_metric = values[indices]*np.abs(zz[indices]-q_fem[indices])
q_int = np.array((np.mean(q_metric),np.std(q_metric)/len(q_metric)**0.5))
q_eq[frame,:] = q_int[:]
f2.write("{:.5e} {:.5E}\n".format(q_eq[frame,0],q_eq[frame,1]))
np.savetxt("energies_time.txt", energies)
np.savetxt("q_time.txt", energies)
| 2,741 | 31.258824 | 93 | py |
tps-torch | tps-torch-main/tpstorch/__init__.py | #Always initialize MPI when importing this module
import torch
import torch.distributed as dist
dist.init_process_group(backend='mpi')
#Save the MPI group, rank, and size
_mpi_group = dist.distributed_c10d._get_default_group()
_rank = dist.get_rank()
_world_size = dist.get_world_size()
from . import _tpstorch
from . import fts
from . import ml
#A helper function to project a set of vectors into a simplex
#Unashamedly copied from https://github.com/smatmo/ProjectionOntoSimplex/blob/master/project_simplex_pytorch.py
def project_simplex(v, z=1.0, axis=-1):
"""
Implements the algorithm in Figure 1 of
John Duchi, Shai Shalev-Shwartz, Yoram Singer, Tushar Chandra,
"Efficient Projections onto the l1-Ball for Learning in High Dimensions", ICML 2008.
https://stanford.edu/~jduchi/projects/DuchiShSiCh08.pdf
This algorithm project vectors v onto the simplex w >= 0, \sum w_i = z.
:param v: A torch tensor, will be interpreted as a collection of vectors.
:param z: Vectors will be projected onto the z-Simplex: \sum w_i = z.
:param axis: Indicates the axis of v, which defines the vectors to be projected.
:return: w: result of the projection
"""
def _project_simplex_2d(v, z):
"""
Helper function, assuming that all vectors are arranged in rows of v.
:param v: NxD torch tensor; Duchi et al. algorithm is applied to each row in vecotrized form
:param z: Vectors will be projected onto the z-Simplex: \sum w_i = z.
:return: w: result of the projection
"""
with torch.no_grad():
shape = v.shape
if shape[1] == 1:
w = v.clone().detach()
w[:] = z
return w
mu = torch.sort(v, dim=1)[0]
mu = torch.flip(mu, dims=(1,))
cum_sum = torch.cumsum(mu, dim=1)
j = torch.unsqueeze(torch.arange(1, shape[1] + 1, dtype=mu.dtype, device=mu.device), 0)
rho = torch.sum(mu * j - cum_sum + z > 0.0, dim=1, keepdim=True) - 1.
max_nn = cum_sum[torch.arange(shape[0],dtype=torch.long), rho[:, 0].type(torch.long)]
theta = (torch.unsqueeze(max_nn, -1) - z) / (rho.type(max_nn.dtype) + 1)
w = torch.clamp(v - theta, min=0.0)
return w
with torch.no_grad():
shape = v.shape
if len(shape) == 1:
return _project_simplex_2d(torch.unsqueeze(v, 0), z)[0, :]
else:
axis = axis % len(shape)
t_shape = tuple(range(axis)) + tuple(range(axis + 1, len(shape))) + (axis,)
tt_shape = tuple(range(axis)) + (len(shape) - 1,) + tuple(range(axis, len(shape) - 1))
v_t = v.permute(t_shape)
v_t_shape = v_t.shape
v_t_unroll = torch.reshape(v_t, (-1, v_t_shape[-1]))
w_t = _project_simplex_2d(v_t_unroll, z)
w_t_reroll = torch.reshape(w_t, v_t_shape)
return w_t_reroll.permute(tt_shape)
| 2,980 | 40.985915 | 111 | py |
tps-torch | tps-torch-main/tpstorch/fts/__init__.py | import tpstorch
import torch
import torch.distributed as dist
from tpstorch.fts import _fts
#A class that interfaces with an existing MD/MC code. Its main job is to streamline
#An existing MD code with an FTS method Class.
class FTSSampler(_fts.FTSSampler):
pass
#Class for Handling Finite-Temperature String Method (non-CVs)
class FTSMethod:
def __init__(self, sampler, initial_config, final_config, num_nodes, deltatau, kappa):
#The MD Simulation object, which interfaces with an MD Library
self.sampler = sampler
#String timestep
self.deltatau = deltatau
#Regularization strength
self.kappa = kappa*num_nodes*deltatau
#Number of nodes including endpoints
self.num_nodes = num_nodes
#Number of samples in the running average
self.nsamples = 0
#Timestep
self.timestep = 0
#Saving the typical configuration size
#TO DO: assert the config_size as defining a rank-2 tensor. Or else abort the simulation!
self.config_size = initial_config.size()
#Nodal parameters
self.alpha = torch.linspace(0,1,num_nodes)
#Store rank and world size
self.rank = dist.get_rank()
self.world = dist.get_world_size()
self.string = []
self.avgconfig = []
self.string_io = []
if self.rank == 0:
self.string = torch.zeros(self.num_nodes, self.config_size[0],self.config_size[1])
for i in range(self.num_nodes):
self.string[i] = torch.lerp(initial_config,final_config,self.alpha[i])
if i > 0 and i < self.num_nodes-1:
self.string_io.append(open("string_{}.xyz".format(i),"w"))
#savenodal configurations and running average.
#Note that there's no need to compute running averages on the two end nodes (because they don't move)
self.avgconfig = torch.zeros_like(self.string[1:-1])
#The weights constraining hyperplanes
self.weights = torch.stack((torch.zeros(self.config_size), torch.zeros(self.config_size)))
#The biases constraining hyperplanes
self.biases = torch.zeros(2)
#Sends the weights and biases of the hyperplanes used to restrict the MD simulation
#It performs point-to-point communication with every sampler
def compute_hyperplanes(self):
if self.rank == 0:
#String configurations are pre-processed to create new weights and biases
#For the hyerplanes. Then they're sent to the other ranks
for i in range(1,self.world):
self.compute_weights(i+1)
dist.send(self.weights, dst=i, tag=2*i)
self.compute_biases(i+1)
dist.send(self.biases, dst=i, tag=2*i+1)
self.compute_weights(1)
self.compute_biases(1)
else:
dist.recv(self.weights, src = 0, tag = 2*self.rank )
dist.recv(self.biases, src = 0, tag = 2*self.rank+1 )
#Helper function for creating weights
def compute_weights(self,i):
if self.rank == 0:
self.weights = torch.stack((0.5*(self.string[i]-self.string[i-1]), 0.5*(self.string[i+1]-self.string[i])))
else:
raise RuntimeError('String is not stored in Rank-{}'.format(self.rank))
#Helper function for creating biases
def compute_biases(self,i):
if self.rank == 0:
self.biases = torch.tensor([torch.sum(-0.5*(self.string[i]-self.string[i-1])*0.5*(self.string[i]+self.string[i-1])),
torch.sum(-0.5*(self.string[i+1]-self.string[i])*0.5*(self.string[i+1]+self.string[i]))],
)
else:
raise RuntimeError('String is not stored in Rank-{}'.format(self.rank))
#Update the string. Since it only exists in the first rank, only the first rank gets to do this
def update(self):
if self.rank == 0:
## (1) Regularized Gradient Descent
self.string[1:-1] += -self.deltatau*(self.string[1:-1]-self.avgconfig)+self.kappa*self.deltatau*self.num_nodes*(self.string[0:-2]-2*self.string[1:-1]+self.string[2:])
## (2) Re-parameterization/Projection
#print(self.string)
#Compute the new intermediate nodal variables
#which doesn't obey equal arc-length parametrization
ell_k = torch.norm(self.string[1:]-self.string[:-1],dim=(1,2))
ellsum = torch.sum(ell_k)
ell_k /= ellsum
intm_alpha = torch.zeros_like(self.alpha)
for i in range(1,self.num_nodes):
intm_alpha[i] += ell_k[i-1]+intm_alpha[i-1]
#Now interpolate back to the correct parametrization
#TO DO: Figure out how to avoid unnecessary copy, i.e., newstring copy
index = torch.bucketize(intm_alpha,self.alpha)
newstring = torch.zeros_like(self.string)
for counter, item in enumerate(index[1:-1]):
weight = (self.alpha[counter+1]-intm_alpha[item-1])/(intm_alpha[item]-intm_alpha[item-1])
newstring[counter+1] = torch.lerp(self.string[item-1],self.string[item],weight)
self.string[1:-1] = newstring[1:-1].detach().clone()
del newstring
#Will make MD simulation run on each window
def run(self, n_steps):
self.compute_hyperplanes()
#Do one step in MD simulation, constrained to pre-defined hyperplanes
self.sampler.runSimulation(n_steps,self.weights[0],self.weights[1],self.biases[0],self.biases[1])
config = self.sampler.getConfig()
#Accumulate running average
#Note that configurations must be sent back to the master rank and thus,
#it performs point-to-point communication with every sampler
#TO DO: Try to not accumulate running average and use the more conventional
#Stochastic gradient descent
if self.rank == 0:
temp_config = torch.zeros_like(self.avgconfig[0])
self.avgconfig[0] = (config+self.nsamples*self.avgconfig[0])/(self.nsamples+1)
for i in range(1,self.world):
dist.recv(temp_config, src=i)
self.avgconfig[i] = (temp_config+self.nsamples*self.avgconfig[i])/(self.nsamples+1)
self.nsamples += 1
else:
dist.send(config, dst=0)
#Update the string
self.update()
self.timestep += 1
#Dump the string into a file
def dump(self,dumpstring=False):
if dumpstring and self.rank == 0:
for counter, io in enumerate(self.string_io):
io.write("{} \n".format(self.config_size[0]))
io.write("# step {} \n".format(self.timestep))
for i in range(self.config_size[0]):
for j in range(self.config_size[1]):
io.write("{} ".format(self.string[counter+1,i,j]))
io.write("\n")
self.sampler.dumpConfig()
#FTSMethod but with different parallelization strategy
class AltFTSMethod:
def __init__(self, sampler, initial_config, final_config, num_nodes, deltatau, kappa):
#The MD Simulation object, which interfaces with an MD Library
self.sampler = sampler
#String timestep
self.deltatau = deltatau
#Regularization strength
self.kappa = kappa*num_nodes*deltatau
#Number of nodes including endpoints
self.num_nodes = dist.get_world_size()
#Number of samples in the running average
self.nsamples = 0
#Timestep
self.timestep = 0
#Saving the typical configuration size
#TO DO: assert the config_size as defining a rank-2 tensor. Or else abort the simulation!
self.config_size = initial_config.size()
#Store rank and world size
self.rank = dist.get_rank()
self.world = dist.get_world_size()
#Nodal parameters
self.alpha = self.rank/(self.world-1)
self.string = torch.lerp(initial_config,final_config,dist.get_rank()/(dist.get_world_size()-1))
if self.rank > 0 and self.rank < self.world-1:
self.lstring = -torch.ones_like(self.string)
self.rstring = -torch.ones_like(self.string)
elif self.rank == 0:
self.rstring = -torch.ones_like(self.string)
elif self.rank == self.world-1:
self.lstring = -torch.ones_like(self.string)
self.avgconfig = torch.zeros_like(self.string)
self.string_io = open("string_{}.xyz".format(dist.get_rank()+1),"w")
#Initialize the weights and biases that constrain the MD simulation
if self.rank > 0 and self.rank < self.world-1:
self.weights = torch.stack((torch.zeros(self.config_size), torch.zeros(self.config_size)))
self.biases = torch.zeros(2)
else:
self.weights = torch.stack((torch.zeros(self.config_size),))
#The biases constraining hyperplanes
self.biases = torch.zeros(1)
def send_strings(self):
#Send to left and right neighbors
req = None
if dist.get_rank() < dist.get_world_size()-1:
dist.send(self.string,dst=dist.get_rank()+1,tag=2*dist.get_rank())
if dist.get_rank() >= 0:
dist.recv(self.rstring,src=dist.get_rank()+1,tag=2*(dist.get_rank()+1)+1)
if dist.get_rank() > 0:
if dist.get_rank() <= dist.get_world_size()-1:
dist.recv(self.lstring,src=dist.get_rank()-1,tag=2*(dist.get_rank()-1))
dist.send(self.string,dst=dist.get_rank()-1,tag=2*dist.get_rank()+1)
#Sends the weights and biases of the hyperplanes used to restrict the MD simulation
#It performs point-to-point communication with every sampler
def compute_hyperplanes(self):
self.send_strings()
if self.rank > 0 and self.rank < self.world-1:
self.weights = torch.stack((0.5*(self.string-self.lstring), 0.5*(self.rstring-self.string)))
self.biases = torch.tensor([torch.sum(-0.5*(self.string-self.lstring)*0.5*(self.string+self.lstring)),
torch.sum(-0.5*(self.rstring-self.string)*0.5*(self.rstring+self.string))],
)
elif self.rank == 0:
self.weights = torch.stack((0.5*(self.rstring-self.string),))
self.biases = torch.tensor([torch.sum(-0.5*(self.rstring-self.string)*0.5*(self.rstring+self.string))])
elif self.rank == self.world-1:
self.weights = torch.stack((0.5*(self.string-self.lstring),))
self.biases = torch.tensor([torch.sum(-0.5*(self.string-self.lstring)*0.5*(self.lstring+self.string))])
#Update the string. Since it only exists in the first rank, only the first rank gets to do this
def update(self):
## (1) Regularized Gradient Descent
if self.rank > 0 and self.rank < self.world-1:
self.string += -self.deltatau*(self.string-self.avgconfig)+self.kappa*(self.rstring-2*self.string+self.lstring)
## (2) Re-parameterization/Projection
## Fist, Send the new intermediate string configurations
self.send_strings()
## Next, compute the length segment of each string
ell_k = torch.tensor(0.0)
if self.rank >= 0 and self.rank < self.world -1:
ell_k = torch.norm(self.rstring-self.string)
## Next, compute the arc-length parametrization of the intermediate configuration
list_of_ell = []
for i in range(self.world):
list_of_ell.append(torch.tensor(0.0))
dist.all_gather(tensor_list=list_of_ell, tensor=ell_k)
#REALLY REALLY IMPORTANT. this interpolation assumes that the intermediate configuration lies between the left and right neighbors
#of the desired configuration,
if self.rank > 0 and self.rank < self.world-1:
del list_of_ell[-1]
ellsum = sum(list_of_ell)
intm_alpha = torch.zeros(self.num_nodes)
for i in range(1,self.num_nodes):
intm_alpha[i] += list_of_ell[i-1].detach().clone()/ellsum+intm_alpha[i-1].detach().clone()
#Now interpolate back to the correct parametrization
index = torch.bucketize(self.alpha,intm_alpha)
weight = (self.alpha-intm_alpha[index-1])/(intm_alpha[index]-intm_alpha[index-1])
if index == self.rank+1:
self.string = torch.lerp(self.string,self.rstring,weight)
elif index == self.rank:
self.string = torch.lerp(self.lstring,self.string,weight)
else:
raise RuntimeError("You need to interpolate from points beyond your nearest neighbors. \n \
Reduce your timestep for the string update!")
#REALLY REALLY IMPORTANT. this interpolation assumes that the intermediate configuration lies between the left and right neighbors
#of the desired configuration,
#Will make MD simulation run on each window
def run(self, n_steps):
self.compute_hyperplanes()
#Do one step in MD simulation, constrained to pre-defined hyperplanes
if self.rank > 0 and self.rank < self.world-1:
self.sampler.runSimulation(n_steps,self.weights[0],self.weights[1],self.biases[0],self.biases[1])
elif self.rank == 0:
self.sampler.runSimulation(n_steps,torch.zeros_like(self.weights[0]),self.weights[0],torch.zeros_like(self.biases[0]),self.biases[0])
elif self.rank == self.world-1:
self.sampler.runSimulation(n_steps,self.weights[0],torch.zeros_like(self.weights[0]),self.biases[0],torch.zeros_like(self.biases[0]))
#Compute the running average
self.avgconfig = (self.sampler.getConfig()+self.nsamples*self.avgconfig).detach().clone()/(self.nsamples+1)
#Update the string
self.update()
self.timestep += 1
#Dump the string into a file
def dump(self,dumpstring=False):
if dumpstring and self.rank == 0:
for counter, io in enumerate(self.string_io):
io.write("{} \n".format(self.config_size[0]))
io.write("# step {} \n".format(self.timestep))
for i in range(self.config_size[0]):
for j in range(self.config_size[1]):
io.write("{} ".format(self.string[counter+1,i,j]))
io.write("\n")
self.sampler.dumpConfig()
#FTSMethod but matches that used in 2009 paper
# A few things I liked to try here as well that I'll try to get with options, but bare for now
class FTSMethodVor:
def __init__(self, sampler, initial_config, final_config, num_nodes, deltatau, kappa, update_rule):
#The MD Simulation object, which interfaces with an MD Library
self.sampler = sampler
#String timestep
self.deltatau = deltatau
#Regularization strength
self.kappa = kappa*num_nodes*deltatau
#Number of nodes including endpoints
self.num_nodes = dist.get_world_size()
#Number of samples in the running average
self.nsamples = 0
#Timestep
self.timestep = 0
#Update rule
self.update_rule = update_rule
#Saving the typical configuration size
#TO DO: assert the config_size as defining a rank-2 tensor. Or else abort the simulation!
self.config_size = initial_config.size()
self.config_size_abs = self.config_size[0]*self.config_size[1]
#Store rank and world size
self.rank = dist.get_rank()
self.world = dist.get_world_size()
# Matrix used for inversing
# Construct only on rank 0
# Note that it always stays the same, so invert here and use at each iteration
# Kinda confusing notation, but essentially to make this tridiagonal order is
# we go through each direction in order
# zeros
self.matrix = torch.zeros(self.config_size_abs*self.num_nodes, self.config_size_abs*self.num_nodes, dtype=torch.float)
# first, last row
for i in range(self.config_size_abs):
self.matrix[i*self.num_nodes,i*self.num_nodes] = 1.0
self.matrix[(i+1)*self.num_nodes-1,(i+1)*self.num_nodes-1] = 1.0
# rest of rows
for i in range(self.config_size_abs):
for j in range(1,self.num_nodes-1):
self.matrix[i*self.num_nodes+j,i*self.num_nodes+j] = 1.0+2.0*self.kappa
self.matrix[i*self.num_nodes+j,i*self.num_nodes+j-1] = -1.0*self.kappa
self.matrix[i*self.num_nodes+j,i*self.num_nodes+j+1] = -1.0*self.kappa
# inverse
self.matrix_inverse = torch.inverse(self.matrix)
#Nodal parameters
self.alpha = self.rank/(self.world-1)
self.string = torch.lerp(initial_config,final_config,dist.get_rank()/(dist.get_world_size()-1))
self.avgconfig = torch.zeros_like(self.string)
self.string_io = open("string_{}.xyz".format(dist.get_rank()),"w")
#Initialize the Voronoi cell
# Could maybe make more efficient by looking at only the closest nodes
#self.voronoi = torch.empty(self.world, self.config_size_abs, dtype=torch.float)
self.voronoi = [torch.empty(self.config_size[0], self.config_size[1], dtype=torch.float) for i in range(self.world)]
def send_strings(self):
# Use an all-gather to communicate all strings to each other
dist.all_gather(self.voronoi, self.string)
#Update the string. Since it only exists in the first rank, only the first rank gets to do this
def update(self):
## (1) Regularized Gradient Descent
# Will use matrix solving in the near future, but for now use original update scheme
# Will probably make it an option to use explicit or implicit scheme
if self.rank > 0 and self.rank < self.world-1:
self.string += -self.deltatau*(self.string-self.avgconfig)+self.kappa*(self.voronoi[self.rank-1]-2*self.string+self.voronoi[self.rank+1])
elif self.rank == 0:
self.string -= self.deltatau*(self.string-self.avgconfig)
else:
self.string -= self.deltatau*(self.string-self.avgconfig)
## (2) Re-parameterization/Projection
## Fist, Send the new intermediate string configurations
self.send_strings()
## Next, compute the length segment of each string
ell_k = torch.tensor(0.0)
if self.rank >= 0 and self.rank < self.world -1:
ell_k = torch.norm(self.voronoi[self.rank+1]-self.string)
## Next, compute the arc-length parametrization of the intermediate configuration
list_of_ell = []
for i in range(self.world):
list_of_ell.append(torch.tensor(0.0))
dist.all_gather(tensor_list=list_of_ell, tensor=ell_k)
#REALLY REALLY IMPORTANT. this interpolation assumes that the intermediate configuration lies between the left and right neighbors
#of the desired configuration,
if self.rank > 0 and self.rank < self.world-1:
del list_of_ell[-1]
ellsum = sum(list_of_ell)
intm_alpha = torch.zeros(self.num_nodes)
for i in range(1,self.num_nodes):
intm_alpha[i] += list_of_ell[i-1].detach().clone()/ellsum+intm_alpha[i-1].detach().clone()
#Now interpolate back to the correct parametrization
index = torch.bucketize(self.alpha,intm_alpha)
weight = (self.alpha-intm_alpha[index-1])/(intm_alpha[index]-intm_alpha[index-1])
if index == self.rank+1:
self.string = torch.lerp(self.string,self.voronoi[self.rank+1],weight)
elif index == self.rank:
self.string = torch.lerp(self.voronoi[self.rank-1],self.string,weight)
else:
raise RuntimeError("You need to interpolate from points beyond your nearest neighbors. \n \
Reduce your timestep for the string update!")
#REALLY REALLY IMPORTANT. this interpolation assumes that the intermediate configuration lies between the left and right neighbors
#of the desired configuration,
#If not the case, set config equal to string center
#Not ideal, but will leave that to code implementation as there are a
#bunch of tricky things I don't want to assume here (namely periodic
#boundary condition)
#Update the string. Since it only exists in the first rank, only the first rank gets to do this
def update_matrix(self):
#Matrix solving scheme
#Make forcing side, then invert and communicate back
#Doing some shuffling to make it compatible with the matrix
#Solving it on all processors, not good practice but it works
force = self.string-self.deltatau*(self.string-self.avgconfig)
forces = [torch.empty(self.config_size[0], self.config_size[1], dtype=torch.float) for i in range(self.world)]
dist.all_gather(forces, force)
forces_solve = torch.empty(self.world*self.config_size[0]*self.config_size[1], dtype=torch.float)
for i in range(self.config_size[1]):
for j in range(self.world):
for k in range(self.config_size[0]):
forces_solve[k+j*self.config_size[0]+i*self.config_size[0]*self.world] = forces[j][k,i]
new_string = torch.matmul(self.matrix_inverse, forces_solve)
for i in range(self.config_size[1]):
for k in range(self.config_size[0]):
self.string[k,i] = new_string[k+self.rank*self.config_size[0]+i*self.config_size[0]*self.world]
## (2) Re-parameterization/Projection
## Fist, Send the new intermediate string configurations
self.send_strings()
## Next, compute the length segment of each string
ell_k = torch.tensor(0.0)
if self.rank >= 0 and self.rank < self.world -1:
ell_k = torch.norm(self.voronoi[self.rank+1]-self.string)
## Next, compute the arc-length parametrization of the intermediate configuration
list_of_ell = []
for i in range(self.world):
list_of_ell.append(torch.tensor(0.0))
dist.all_gather(tensor_list=list_of_ell, tensor=ell_k)
#REALLY REALLY IMPORTANT. this interpolation assumes that the intermediate configuration lies between the left and right neighbors
#of the desired configuration,
if self.rank > 0 and self.rank < self.world-1:
del list_of_ell[-1]
ellsum = sum(list_of_ell)
intm_alpha = torch.zeros(self.num_nodes)
for i in range(1,self.num_nodes):
intm_alpha[i] += list_of_ell[i-1].detach().clone()/ellsum+intm_alpha[i-1].detach().clone()
#Now interpolate back to the correct parametrization
index = torch.bucketize(self.alpha,intm_alpha)
weight = (self.alpha-intm_alpha[index-1])/(intm_alpha[index]-intm_alpha[index-1])
if index == self.rank+1:
self.string = torch.lerp(self.string,self.voronoi[self.rank+1],weight)
elif index == self.rank:
self.string = torch.lerp(self.voronoi[self.rank-1],self.string,weight)
else:
raise RuntimeError("You need to interpolate from points beyond your nearest neighbors. \n \
Reduce your timestep for the string update!")
#REALLY REALLY IMPORTANT. this interpolation assumes that the intermediate configuration lies between the left and right neighbors
#of the desired configuration,
#If not the case, set config equal to string center
#Not ideal, but will leave that to code implementation as there are a
#bunch of tricky things I don't want to assume here (namely periodic
#boundary condition)
#Will make MD simulation run on each window
def run(self, n_steps):
#Do one step in MD simulation, constrained to Voronoi cells
self.send_strings()
voronoi_list = torch.stack(self.voronoi, dim=0)
self.sampler.runSimulationVor(n_steps,self.rank,voronoi_list)
#Compute the running average
self.avgconfig = (self.sampler.getConfig()+self.nsamples*self.avgconfig).detach().clone()/(self.nsamples+1)
#Update the string
if self.update_rule == 0:
self.update()
else:
self.update_matrix()
self.timestep += 1
#Dump the string into a file
def dump(self,dumpstring=False):
if dumpstring and self.rank == 0:
for counter, io in enumerate(self.string_io):
io.write("{} \n".format(self.config_size[0]))
io.write("# step {} \n".format(self.timestep))
for i in range(self.config_size[0]):
for j in range(self.config_size[1]):
io.write("{} ".format(self.string[counter+1,i,j]))
io.write("\n")
self.sampler.dumpConfig()
| 25,508 | 50.742394 | 178 | py |
tps-torch | tps-torch-main/tpstorch/examples/mullerbrown_ml/__init__.py | from tpstorch.ml import _ml
from . import _mullerbrown_ml
#Just something to pass the class
class MyMLSampler(_mullerbrown_ml.MySampler):
pass
#Just something to pass the class
class MyMLEXPStringSampler(_mullerbrown_ml.MySamplerEXPString):
pass
#Just something to pass the class
class MyMLFTSSampler(_mullerbrown_ml.MySamplerFTS):
pass
| 351 | 24.142857 | 63 | py |
tps-torch | tps-torch-main/tpstorch/examples/dimer_ml/__init__.py | from tpstorch.ml import _ml
from . import _dimer_ml
#Just something to pass the class
class MyMLEXPSampler(_dimer_ml.DimerEXP):
pass
class MyMLEXPStringSampler(_dimer_ml.DimerEXPString):
pass
class MyMLFTSSampler(_dimer_ml.DimerFTS):
pass
| 254 | 18.615385 | 53 | py |
tps-torch | tps-torch-main/tpstorch/examples/dimer_solv_ml/__init__.py | from tpstorch.ml import _ml
from . import _dimer_solv_ml
#Just something to pass the class
class MyMLEXPSampler(_dimer_solv_ml.DimerSolvEXP):
pass
class MyMLEXPStringSampler(_dimer_solv_ml.DimerSolvEXPString):
pass
class MyMLFTSSampler(_dimer_solv_ml.DimerSolvFTS):
pass
| 286 | 21.076923 | 62 | py |
tps-torch | tps-torch-main/tpstorch/ml/deprecated.py | """
A place to put classes and method which we will discard for all future implementations. We'll still include in case we want to revert back
"""
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.utils.data import IterableDataset
import torch.distributed as dist
import torch.optim.functional as F
from itertools import cycle
import tqdm
import numpy as np
class FTSLayer(nn.Module):
r""" A linear layer, where the paramaters correspond to the string obtained by the
general FTS method.
Args:
react_config (torch.Tensor): starting configuration in the reactant basin.
prod_config (torch.Tensor): starting configuration in the product basin.
"""
def __init__(self, react_config, prod_config, num_nodes):
super().__init__()
#Declare my string as NN paramaters and disable gradient computations
string = torch.vstack([(1-s)*react_config+s*prod_config for s in np.linspace(0, 1, num_nodes)])
self.string = nn.Parameter(string)
self.string.requires_grad = False
def forward(self, x):
#The weights of this layer models hyperplanes wedged between each node
w_times_x= torch.matmul(x,(self.string[1:]-self.string[:-1]).t())
#The bias so that at the half-way point between two strings, the function is zero
bias = -torch.sum(0.5*(self.string[1:]+self.string[:-1])*(self.string[1:]-self.string[:-1]),dim=1)
return torch.add(w_times_x, bias)
class EXPReweightSGD(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum).
This implementation has an additional step which is reweighting the computed
gradients through free-energy estimation techniques. Currently only implementng
exponential averaging (EXP) because it is cheap.
Any more detailed implementation should be consulted on torch.optim.SGD
"""
def __init__(self, params, sampler=required, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False, mode="random", ref_index=None):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(EXPReweightSGD, self).__init__(params, defaults)
#Storing a 1D Tensor of reweighting factors
self.reweight = [torch.zeros(1) for i in range(dist.get_world_size())]
#Choose whether to sample window references randomly or not
self.mode = mode
if mode != "random":
if ref_index is None or ref_index < 0:
raise ValueError("For non-random choice of window reference, you need to set ref_index!")
else:
self.ref_index = torch.tensor(ref_index)
def __setstate__(self, state):
super(EXPReweightSGD, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
@torch.no_grad()
def step(self, closure=None, fwd_weightfactors=required, bwrd_weightfactors=required, reciprocal_normconstants=required):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
#Average out the batch of weighting factors (unique to each process)
#and distribute them across all processes.
#TO DO: combine weight factors into a single array so that we have one contigous memory to distribute
self.reweight = [torch.zeros(1) for i in range(dist.get_world_size())]
fwd_meanwgtfactor = self.reweight.copy()
dist.all_gather(fwd_meanwgtfactor,torch.mean(fwd_weightfactors))
fwd_meanwgtfactor = torch.tensor(fwd_meanwgtfactor[:-1])
bwrd_meanwgtfactor = self.reweight.copy()
dist.all_gather(bwrd_meanwgtfactor,torch.mean(bwrd_weightfactors))
bwrd_meanwgtfactor = torch.tensor(bwrd_meanwgtfactor[1:])
#Randomly select a window as a free energy reference and broadcast that index across all processes
if self.mode == "random":
self.ref_index = torch.randint(low=0,high=dist.get_world_size(),size=(1,))
dist.broadcast(self.ref_index, src=0)
#Computing the reweighting factors, z_l in our notation
self.reweight = computeZl(self.ref_index.item(),fwd_meanwgtfactor,bwrd_meanwgtfactor)#newcontainer)
self.reweight.div_(torch.sum(self.reweight)) #normalize
#Use it first to compute the mean inverse normalizing constant
mean_recipnormconst = torch.mean(reciprocal_normconstants)#invnormconstants)
mean_recipnormconst.mul_(self.reweight[dist.get_rank()])
#All reduce the mean invnormalizing constant
dist.all_reduce(mean_recipnormconst)
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
#Gradient of parameters
#p.grad should be the average of grad(x)/c(x) over the minibatch
d_p = p.grad
#Multiply with the window's respective reweighting factor
d_p.mul_(self.reweight[dist.get_rank()])
#All reduce the gradients
dist.all_reduce(d_p)
#Divide in-place by the mean inverse normalizing constant
d_p.div_(mean_recipnormconst)
if weight_decay != 0:
d_p = d_p.add(p, alpha=weight_decay)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
p.add_(d_p, alpha=-group['lr'])
return mean_recipnormconst, self.reweight
class OldEXPReweightSimulation(IterableDataset):
def __init__(self, sampler, committor, period):
## Store the MD/MC Simulator, which samples our data
self.sampler = sampler
## The number of timesteps we do per iteration of the optimization
self.period = period
## A flag which is set False when we detect something wrong
self.continue_simulation = True
## The committor, which we will continuously call for gradient computations
self.committor = committor
## We also run the first forward-backward pass here, using the torch_config
## saved in our sampler
#Zero out any gradients
self.committor.zero_grad()
#Forward pass
self.out = self.committor(self.sampler.torch_config)
#Compute the first set of reweighting factors from our initial condition
self.sampler.computeFactors(self.out)
#Backprop to compute gradients w.r.t. x
self.out.backward()
def runSimulation(self):
while self.continue_simulation:
for i in range(self.period):
#Take one step
self.sampler.step(self.out,onlytst=False)
#Save config
self.sampler.save()
#Zero out any gradients
self.committor.zero_grad()
#Forward pass
self.out = self.committor(self.sampler.torch_config)
#Compute the new set of reweighting factors from this new step
self.sampler.computeFactors(self.out)
#Backprop to compute gradients of x
self.out.backward()
if torch.sum(torch.isnan(self.out)) > 0:
raise ValueError("Committor value is NaN!")
else:
yield ( self.sampler.torch_config, #The configuration of the system
torch.autograd.grad(self.committor(self.sampler.torch_config), self.sampler.torch_config, create_graph=True)[0], #The gradient of commmittor with respect to input
self.sampler.reciprocal_normconstant, #Inverse of normalizing constant, denoted as 1/c(x) in the manuscript
self.sampler.fwd_weightfactor, #this is the un-normalized weighting factor, this should compute w_{l+1}/w_{l} where l is the l-th window
self.sampler.bwrd_weightfactor, #this is the un-normalized weighting factor, this should compute w_{l-1}/w_{l} where l is the l-th window
)
def __iter__(self):
#Cycle through every period indefinitely
return cycle(self.runSimulation())
## TO DO: Revamp how we do committor analysis!
class TSTValidation(IterableDataset):
def __init__(self, sampler, committor, period):
## Store the MD/MC Simulator, which samples our data
self.sampler = sampler
## The number of timesteps we do per iteration of the optimization
self.period = period
## A flag which is set False when we detect something wrong
self.continue_simulation = True
## The committor, which we will continuously call for gradient computations
self.committor = committor
## We also run the first forward-backward pass here, using the torch_config
## saved in our sampler
#Zero out any gradients
self.committor.zero_grad()
#Forward pass
self.out = self.committor(self.sampler.torch_config)
#Backprop to compute gradients w.r.t. x
self.out.backward()
def generateInitialConfigs(self):
while self.continue_simulation:
for i in range(self.period):
#Take one step in the transition state region
self.sampler.step(self.out,onlytst=True)
#Zero out any gradients
self.committor.zero_grad()
#Forward pass
self.out = self.committor(self.sampler.torch_config)
#Backprop to compute gradients of x
self.out.backward()
if torch.sum(torch.isnan(self.out)) > 0:
raise ValueError("Committor value is NaN!")
else:
yield ( self.sampler.torch_config, #The configuration of the system
self.committor(self.sampler.torch_config), #the commmittor value at that point, which we will compare it with.
)
#Validation for-loop, performing committor analysis
def validate(self, batch, trials=10, validation_io=None, product_checker= None, reactant_checker = None):
#Separate out batch data from configurations and the neural net's prediction
configs, committor_values = batch
if product_checker is None or reactant_checker is None:
raise RuntimeError("User must supply a function that checks if a configuration is in the product state or not!")
else:
#Use tqdm to track the progress
for idx, initial_config in tqdm.tqdm(enumerate(configs)):
counts = []
for i in range(trials):
hitting = False
#Override the current configuration using a fixed initialization routine
self.sampler.initialize_from_torchconfig(initial_config.detach().clone())
#Zero out any gradients
self.committor.zero_grad()
#Forward pass
self.out = self.committor(self.sampler.torch_config)
#Backprop to compute gradients of x
self.out.backward()
#Run simulation and stop until it falls into the product or reactant state
while hitting is False:
self.sampler.step_unbiased()
#Zero out any gradients
self.committor.zero_grad()
#Forward pass
self.out = self.committor(self.sampler.torch_config)
#Backprop to compute gradients of x
self.out.backward()
if product_checker(self.sampler.torch_config) is True:
counts.append(1.0)
hitting = True
if reactant_checker(self.sampler.torch_config) is True:
counts.append(0.0)
hitting = True
#Compute the committor after a certain number of trials
counts = np.array(counts)
mean_count = np.mean(counts)
conf_count = 1.96*np.std(counts)/len(counts)**0.5 #do 95 % confidence interval
#Save into io
if validation_io is not None:
validation_io.write('{} {} {} \n'.format(committor_values[idx].item(),mean_count, conf_count))
validation_io.flush()
def __iter__(self):
#Cycle through every period indefinitely
return cycle(self.generateInitialConfigs())
| 14,663 | 43.302115 | 186 | py |
tps-torch | tps-torch-main/tpstorch/ml/optim.py | import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from tpstorch import _rank, _world_size
from tpstorch import dist
# Stolen adam implementation from PyTorch, weird *, operator kills us everytime
import math
from torch import Tensor
from typing import List, Optional
def adam(params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
max_exp_avg_sqs: List[Tensor],
state_steps: List[int],
amsgrad: bool,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float):
r"""Functional API that performs Adam algorithm computation.
See :class:`~torch.optim.Adam` for details.
"""
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step = state_steps[i]
bias_correction1 = 1 - beta1 ** step
bias_correction2 = 1 - beta2 ** step
if weight_decay != 0:
grad = grad.add(param, alpha=weight_decay)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj(), value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i])
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sqs[i].sqrt() / math.sqrt(bias_correction2)).add_(eps)
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
step_size = lr / bias_correction1
param.addcdiv_(exp_avg, denom, value=-step_size)
class ParallelAdam(Optimizer):
r"""Implements Adam algorithm.
This implementation has an additional step which is to collect gradients computed in different
MPI processes and just average them. This is useful when you're running many unbiased simulations
Any more detailed implementation should be consulted on torch.optim.Adam
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(ParallelAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(ParallelAdam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
state_sums = []
max_exp_avg_sqs = []
state_steps = []
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is not None:
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError('ParallelAdam does not support sparse gradients!')
#This is the new part from the original Adam implementation
#We just do an all reduce on the gradients
d_p = p.grad
dist.all_reduce(d_p)
grads.append(d_p)
state = self.state[p]
# Lazy state initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['amsgrad']:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
if group['amsgrad']:
max_exp_avg_sqs.append(state['max_exp_avg_sq'])
# update the steps for each param group update
state['step'] += 1
# record the step after step update
state_steps.append(state['step'])
adam(params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
group['amsgrad'],
beta1,
beta2,
group['lr'],
group['weight_decay'],
group['eps']
)
return loss
class ParallelSGD(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum).
This implementation has an additional step which is to collect gradients computed in different
MPI processes and just average them. This is useful when you're running many unbiased simulations
Any more detailed implementation should be consulted on torch.optim.ParallelSGD
"""
def __init__(self, params, sampler=required, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(ParallelSGD, self).__init__(params, defaults)
def __setstate__(self, state):
super(ParallelSGD, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
#Gradient of parameters
#p.grad should be the average of grad(x)/c(x) over the minibatch
d_p = p.grad
#This is the new part from the original ParallelSGD implementation
#We just do an all reduce on the gradients
dist.all_reduce(d_p)
if weight_decay != 0:
d_p = d_p.add(p, alpha=weight_decay)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
p.add_(d_p, alpha=-group['lr'])
return loss
class FTSImplicitUpdate(Optimizer):
r"""Implements the Finite-Temperature String Method update, which is all implicit. This provides larger region in stability
"""
def __init__(self, params, sampler=required, deltatau=required, dimN=required, kappa = 0.1, freeze=False, periodic=False,dim = 3):
if deltatau is not required and deltatau < 0.0:
raise ValueError("Invalid step size: {}".format(deltatau))
defaults = dict(lr=deltatau, kappa=kappa, freeze=freeze)
super(FTSImplicitUpdate, self).__init__(params, defaults)
self.avgconfig = 0
self.nsamples = 0.0
self.periodic = periodic
self.dim = dim
# Matrix used for inversing
# Construct only on rank 0
# Note that it always stays the same, so invert here and use at each iteration
# Kinda confusing notation, but essentially to make this tridiagonal order is
# we go through each direction in order
# zeros
self.matrix = torch.zeros(dimN*_world_size, dimN*_world_size, dtype=torch.float)
# first, last row
#for i in range(dimN):
# self.matrix[i*_world_size,i*_world_size] = 1.0+deltatau
# self.matrix[(i+1)*_world_size-1,(i+1)*_world_size-1] = 1.+deltatau
# rest of rows
shape = _world_size-1
# first, last row
#Go through for every node
torch.set_printoptions(threshold=10000)
self.matrix = torch.zeros(dimN*_world_size, dimN*_world_size, dtype=torch.float)
# first, last row
for i in range(dimN):
self.matrix[i*_world_size,i*_world_size] = 1.0+deltatau
self.matrix[(i+1)*_world_size-1,(i+1)*_world_size-1] = 1.0+deltatau
# rest of rows
for i in range(dimN):
for j in range(1,_world_size-1):
self.matrix[i*_world_size+j,i*_world_size+j] = 1.0+deltatau+2.0*kappa*deltatau*_world_size
self.matrix[i*_world_size+j,i*_world_size+j-1] = -1.0*kappa*deltatau*_world_size
self.matrix[i*_world_size+j,i*_world_size+j+1] = -1.0*kappa*deltatau*_world_size
self.dimN = dimN
self.matrix_inverse = torch.inverse(self.matrix)
def __setstate__(self, state):
super(FTSImplicitUpdate, self).__setstate__(state)
@torch.no_grad()
def step(self, configs, batch_size,boxsize,reorient_sample):
"""Performs a single optimization step.
"""
for group in self.param_groups:
kappa = group['kappa']
freeze = group['freeze']
for p in group['params']:
if p.requires_grad is True:
print("Warning! String stored in Rank [{}] has gradient enabled. Make sure that the string is not being updated during NN training!".format(_rank))
## (1) Compute the rotated and translated average configuration
avgconfig = torch.zeros_like(p)
if self.periodic == True:
for num in range(batch_size):
configs[num] = reorient_sample(p[_rank].clone(), configs[num], 10.0)
avgconfig[_rank] = torch.mean(configs,dim=0)
#if self.periodic == True:
# avgconfig[_rank] = reorient_sample(p[_rank].clone(),avgconfig[_rank],boxsize)#configs)
dist.all_reduce(avgconfig)
## (1) Implicit Stochastic Gradient Descent
force = p.clone()+group['lr']*(avgconfig)#[1:-1]
p.zero_()
p.add_(torch.matmul(self.matrix_inverse, force.t().flatten()).view(-1,_world_size).t())#.clone().detach()
## (2) Re-parameterization/Projection
#Compute the new intermediate nodal variables
#which doesn't obey equal arc-length parametrization
alpha = torch.linspace(0,1,_world_size)
ell_k = torch.norm(p[1:].clone()-p[:-1].clone(),dim=1)
ellsum = torch.sum(ell_k)
ell_k /= ellsum
intm_alpha = torch.zeros_like(alpha)
for i in range(1, p.shape[0]):
intm_alpha[i] += ell_k[i-1]+intm_alpha[i-1]
#If we need to account for periodic boundary conditions
if self.periodic == True:
p.add_(-boxsize*torch.round(p/boxsize))
#REALLY REALLY IMPORTANT. this interpolation assumes that the intermediate configuration lies between the left and right neighbors
#of the desired configuration,
#Now interpolate back to the correct parametrization
newstring = torch.zeros_like(p)
newstring[0] = p[0].clone()/_world_size
newstring[-1] = p[-1].clone()/_world_size
if _rank > 0 and _rank < _world_size-1:
index = torch.bucketize(alpha[_rank],intm_alpha)
weight = (alpha[_rank]-intm_alpha[index-1])/(intm_alpha[index]-intm_alpha[index-1])
if index == _rank+1:
newstring[_rank] = torch.lerp(p.clone()[_rank],p.clone()[_rank+1],weight)
elif index == _rank:
newstring[_rank] = torch.lerp(p.clone()[_rank-1],p.clone()[_rank],weight)
elif index == _rank-1:
newstring[_rank] = torch.lerp(p.clone()[_rank-2],p.clone()[_rank],weight)
else:
raise RuntimeError("Rank [{}]: You need to interpolate from points beyond your nearest neighbors. \n \
Reduce your timestep for the string update!".format(_rank))
dist.all_reduce(newstring)
p.zero_()
p.add_(newstring.clone().detach())
del newstring
class FTSUpdate(Optimizer):
r"""Implements the Finite-Temperature String Method update.
It can be shown that the FTS method update is just stochastic gradient descent. Thus, one can also opt to compute the update with momentum to accelerate convergence.
"""
def __init__(self, params, sampler=required, deltatau=required, momentum=0, nesterov=False, kappa = 0.1, freeze=False,periodic=False,dim=3):
if deltatau is not required and deltatau < 0.0:
raise ValueError("Invalid step size: {}".format(deltatau))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
defaults = dict(lr=deltatau, momentum=momentum, nesterov=nesterov, kappa=kappa, freeze=freeze)
super(FTSUpdate, self).__init__(params, defaults)
self.avgconfig = 0
self.nsamples = 0.0
self.periodic = periodic
self.dim = dim
def __setstate__(self, state):
super(FTSUpdate, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
@torch.no_grad()
def step(self, configs, batch_size,boxsize,reorient_sample):
"""Performs a single optimization step.
"""
for group in self.param_groups:
momentum = group['momentum']
kappa = group['kappa']
nesterov = group['nesterov']
freeze = group['freeze']
for p in group['params']:
if p.requires_grad is True:
#print("Warning! String stored in Rank [{}] has gradient enabled. Make sure that the string is not being updated during NN training!")
print("Warning! String stored in Rank [{}] has gradient enabled. Make sure that the string is not being updated during NN training!".format(_rank))
## (1) Compute the rotated and translated average configuration
avgconfig = torch.zeros_like(p)
if self.periodic == True:
for num in range(batch_size):
configs[num] = reorient_sample(p[_rank].clone(), configs[num], boxsize)
avgconfig[_rank] = torch.mean(configs,dim=0)
if self.periodic == True:
avgconfig[_rank] = reorient_sample(p[_rank].clone(),avgconfig[_rank],boxsize)
dist.all_reduce(avgconfig)
## (2) Stochastic Gradient Descent
d_p = torch.zeros_like(p)
#Add the gradient of cost function
d_p[1:-1] = (p[1:-1]-avgconfig[1:-1])-kappa*_world_size*(p[0:-2]-2*p[1:-1]+p[2:])
if freeze is False:
d_p[0] = (p[0]-avgconfig[0])
d_p[-1] = (p[-1]-avgconfig[-1])
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
p.add_(d_p, alpha=-group['lr'])
#If we need to account for periodic boundary conditions
if self.periodic == True:
p.add_(-boxsize*torch.round(p/boxsize))
## (3) Re-parameterization/Projection
#Compute the new intermediate nodal variables
#which doesn't obey equal arc-length parametrization
alpha = torch.linspace(0,1,_world_size)
ell_k = torch.norm(p[1:].clone()-p[:-1].clone(),dim=1)
ellsum = torch.sum(ell_k)
ell_k /= ellsum
intm_alpha = torch.zeros_like(alpha)
for i in range(1, p.shape[0]):
intm_alpha[i] += ell_k[i-1]+intm_alpha[i-1]
#REALLY REALLY IMPORTANT. this interpolation assumes that the intermediate configuration lies between the left and right neighbors
#of the desired configuration,
#Now interpolate back to the correct parametrization
newstring = torch.zeros_like(p)
newstring[0] = p[0].clone()/_world_size
newstring[-1] = p[-1].clone()/_world_size
if _rank > 0 and _rank < _world_size-1:
index = torch.bucketize(alpha[_rank],intm_alpha)
weight = (alpha[_rank]-intm_alpha[index-1])/(intm_alpha[index]-intm_alpha[index-1])
if index == _rank+1:
newstring[_rank] = torch.lerp(p[_rank].clone(),p[_rank+1].clone(),weight)
elif index == _rank:
newstring[_rank] = torch.lerp(p[_rank-1].clone(),p[_rank].clone(),weight)
elif index == _rank-1:
newstring[_rank] = torch.lerp(p[_rank-2].clone(),p[_rank].clone(),weight)
else:
raise RuntimeError("Rank [{}]: You need to interpolate from points beyond your nearest neighbors. \n \
Reduce your timestep for the string update!".format(_rank))
dist.all_reduce(newstring)
p.zero_()
p.add_(newstring.clone().detach())
del newstring
| 20,999 | 43.680851 | 170 | py |
tps-torch | tps-torch-main/tpstorch/ml/data.py | import torch
import numpy as np
from tpstorch import _rank, _world_size
class FTSSimulation:
def __init__(self, sampler, batch_size, dimN, period=10,mode='nonadaptive', committor = None, nn_training=True,**kwargs):#, nn_training=True):
## Store the MD/MC Simulator, which samples our data
self.sampler = sampler
## The number of timesteps we do per iteration of the optimization
self.period = period
## The number of iterations we do per optimization step
self.batch_size = batch_size
## The size of the problem
self.dimN = dimN
## A flag which is set False when we detect something wrong
self.continue_simulation = True
self.nn_training = nn_training
if nn_training:# == True:
## The committor, which we will continuously call for gradient computations
self.committor = committor
## We also run the first forward-backward pass here, using the torch_config
## saved in our sampler
#Zero out any gradients
self.committor.zero_grad()
#No more backprop because gradients will never be needed during sampling
#Forward pass
self.out = self.committor(self.sampler.torch_config)
#Backprop to compute gradients w.r.t. x
self.out.backward()
#Mode of sampling
self.mode = mode
#Default values for adaptive mode sampling
self.min_count = 1
self.min_period = 1
self.max_period = np.inf
self.max_steps = 10**6
if mode == 'adaptive':
if any("min_count" in s for s in kwargs):
self.min_count = kwargs["min_count"]
if any("min_period" in s for s in kwargs):
self.period = kwargs["min_period"]
if any("max_period" in s for s in kwargs):
self.max_period = kwargs["max_period"]
if any("max_steps" in s for s in kwargs):
self.max_steps = kwargs["max_steps"]
elif mode == 'nonadaptive':
pass
else:
raise RuntimeError("There are only two options for mode, 'adaptive' or 'nonadaptive")
def runSimulation(self):
## Create storage entries
#Configurations for one mini-batch
configs = torch.zeros(self.batch_size,self.dimN)
if self.nn_training:
#Gradient of committor w.r.t. x for every x in configs
grads = torch.zeros(self.batch_size,self.dimN)
#Reset the simulation if it is outside fo the cell
self.sampler.reset()
for i in range(self.batch_size):
for j in range(self.period):
#Take one step
self.sampler.step()
#Save config
self.sampler.save()
if self.nn_training:# == True:
#No more backprop because gradients will never be needed during sampling
#Zero out any gradients
self.committor.zero_grad()
#Forward pass
self.out = self.committor(self.sampler.torch_config)
#Backprop to compute gradients of x
self.out.backward()
if torch.sum(torch.isnan(self.out)) > 0:
raise ValueError("Committor value is NaN!")
else:
#Compute all for all storage entries
configs[i,:] = self.sampler.torch_config.flatten()
grads[i,:] = torch.autograd.grad(self.committor(self.sampler.torch_config), self.sampler.torch_config, create_graph=True)[0].reshape(-1)
else:
configs[i,:] = self.sampler.torch_config.flatten()
with torch.no_grad():
if self.mode == 'adaptive' and self.min_count != 0:
#Here we check if we have enough rejection counts,
#If we don't, then we need to run the simulation a little longer
for i in range(self.max_steps):
if _rank == 0:
if self.sampler.rejection_count[_rank+1].item() >= self.min_count:
break
else:
self.sampler.step()
self.sampler.save()
elif _rank == _world_size-1:
if self.sampler.rejection_count[_rank-1].item() >= self.min_count:
break
else:
self.sampler.step()
self.sampler.save()
else:
if self.sampler.rejection_count[_rank-1].item() >= self.min_count and self.sampler.rejection_count[_rank+1].item() >= self.min_count:
break
else:
self.sampler.step()
self.sampler.save()
#Update the sampling period so that you only need to run the minimal amount of simulation time to get the minimum number of rejection counts
if _rank == 0:
if self.sampler.rejection_count[_rank+1].item() >= self.min_count:
self.period = int(np.round((self.min_count/(self.sampler.rejection_count[_rank+1].item()*self.batch_size)*self.sampler.steps)))
elif _rank == _world_size-1:
if self.sampler.rejection_count[_rank-1].item() >= self.min_count:
self.period = int(np.round((self.min_count/(self.sampler.rejection_count[_rank-1].item()*self.batch_size)*self.sampler.steps)))
else:
if self.sampler.rejection_count[_rank-1].item() >= self.min_count and self.sampler.rejection_count[_rank+1].item() >= self.min_count:
if self.sampler.rejection_count[_rank-1].item() < self.sampler.rejection_count[_rank+1].item():
self.period = int(np.round((self.min_count/(self.sampler.rejection_count[_rank-1].item()*self.batch_size)*self.sampler.steps)))
else:
self.period = int(np.round((self.min_count/(self.sampler.rejection_count[_rank+1].item()*self.batch_size)*self.sampler.steps)))
#See if the new sampling period is larger than what's indicated. If so, we reset to this upper bound.
if self.period > self.max_period:
self.period = self.max_period
#Since simulations may run in un-equal amount of times, we have to normalize rejection counts by the number of timesteps taken
self.sampler.normalizeRejectionCounts()
if self.nn_training:
#Zero out any gradients in the parameters as the last remaining step
self.committor.zero_grad()
return configs, grads
else:
return configs
class EMUSReweightStringSimulation:
def __init__(self, sampler, committor, period, batch_size, dimN):
## Store the MD/MC Simulator, which samples our data
self.sampler = sampler
## The number of timesteps we do per iteration of the optimization
self.period = period
## The number of iterations we do per optimization step
self.batch_size = batch_size
## The size of the problem
self.dimN = dimN
## A flag which is set False when we detect something wrong
self.continue_simulation = True
## The committor, which we will continuously call for gradient computations
self.committor = committor
## We also run the first forward-backward pass here, using the torch_config
## saved in our sampler
#Zero out any gradients
self.committor.zero_grad()
#Forward pass
self.out = self.committor(self.sampler.torch_config)
#Compute the first set of reweighting factors from our initial condition
self.sampler.computeFactors()#self.out)
#Backprop to compute gradients w.r.t. x
self.out.backward()
def runSimulation(self):
## Create storage entries
#Configurations for one mini-batch
configs = torch.zeros(self.batch_size,self.dimN)
#Gradient of committor w.r.t. x for every x in configs
grads = torch.zeros(self.batch_size,self.dimN)
#1/c(x) constant for every x in configs
reciprocal_normconstant = torch.zeros(self.batch_size)
#w_{l+1}/w_{l} computed for every x in configs
overlapprob_row = torch.zeros(self.batch_size, _world_size)
for i in range(self.batch_size):
for j in range(self.period):
#Take one step
self.sampler.step()#self.out,onlytst=False)
#Save config
self.sampler.save()
#Compute the new set of reweighting factors from this new step
self.sampler.computeFactors()
#Zero out any gradients
self.committor.zero_grad()
#Forward pass
self.out = self.committor(self.sampler.torch_config)
#Backprop to compute gradients of x
self.out.backward()
if torch.sum(torch.isnan(self.out)) > 0:
raise ValueError("Committor value is NaN!")
else:
#Compute all for all storage entries
configs[i,:] = self.sampler.torch_config.flatten()
grads[i,:] = torch.autograd.grad(self.committor(self.sampler.torch_config), self.sampler.torch_config, create_graph=True)[0]
reciprocal_normconstant[i] = self.sampler.reciprocal_normconstant
overlapprob_row[i,:] = self.sampler.overlapprob_row
#Zero out any gradients in the parameters as the last remaining step
self.committor.zero_grad()
return configs, grads, reciprocal_normconstant, overlapprob_row
class EXPReweightStringSimulation:
def __init__(self, sampler, committor, period, batch_size, dimN):
## Store the MD/MC Simulator, which samples our data
self.sampler = sampler
## The number of timesteps we do per iteration of the optimization
self.period = period
## The number of iterations we do per optimization step
self.batch_size = batch_size
## The size of the problem
self.dimN = dimN
## A flag which is set False when we detect something wrong
self.continue_simulation = True
## The committor, which we will continuously call for gradient computations
self.committor = committor
## We also run the first forward-backward pass here, using the torch_config
## saved in our sampler
#Zero out any gradients
self.committor.zero_grad()
#Forward pass
self.out = self.committor(self.sampler.torch_config)
self.sampler.computeMetric()
#Compute the first set of reweighting factors from our initial condition
self.sampler.computeFactors()#self.out)
#Backprop to compute gradients w.r.t. x
#self.out.backward()
def runSimulation(self):
## Create storage entries
#Configurations for one mini-batch
configs = torch.zeros(self.batch_size,self.dimN)
#Gradient of committor w.r.t. x for every x in configs
grads = torch.zeros(self.batch_size,self.dimN)
#1/c(x) constant for every x in configs
reciprocal_normconstant = torch.zeros(self.batch_size)
#w_{l+1}/w_{l} computed for every x in configs
fwd_weightfactor = torch.zeros(self.batch_size, 1)
#w_{l-1}/w_{l} computed for every x in configs
bwrd_weightfactor = torch.zeros(self.batch_size, 1)
for i in range(self.batch_size):
for j in range(self.period):
#Take one step
self.sampler.step()#self.out,onlytst=False)
#Save config
self.sampler.save()
self.sampler.computeMetric()
#Compute the new set of reweighting factors from this new step
self.sampler.computeFactors()
#Zero out any gradients
self.committor.zero_grad()
#Forward pass
self.out = self.committor(self.sampler.torch_config)
#Backprop to compute gradients of x
#self.out.backward()
if torch.sum(torch.isnan(self.out)) > 0:
raise ValueError("Committor value is NaN!")
else:
#Compute all for all storage entries
configs[i,:] = self.sampler.torch_config.flatten()
grads[i,:] = torch.autograd.grad(self.committor(self.sampler.torch_config), self.sampler.torch_config, create_graph=True)[0].reshape(-1)
reciprocal_normconstant[i] = self.sampler.reciprocal_normconstant
fwd_weightfactor[i,:] = self.sampler.fwd_weightfactor
bwrd_weightfactor[i,:] = self.sampler.bwrd_weightfactor
#Zero out any gradients in the parameters as the last remaining step
self.committor.zero_grad()
return configs, grads, reciprocal_normconstant, fwd_weightfactor, bwrd_weightfactor
class EXPReweightSimulation:
def __init__(self, sampler, committor, period, batch_size, dimN):
## Store the MD/MC Simulator, which samples our data
self.sampler = sampler
## The number of timesteps we do per iteration of the optimization
self.period = period
## The number of iterations we do per optimization step
self.batch_size = batch_size
## The size of the problem
self.dimN = dimN
## A flag which is set False when we detect something wrong
self.continue_simulation = True
## The committor, which we will continuously call for gradient computations
self.committor = committor
## We also run the first forward-backward pass here, using the torch_config
## saved in our sampler
#Zero out any gradients
self.committor.zero_grad()
#Forward pass
self.out = self.committor(self.sampler.torch_config)
#Compute the first set of reweighting factors from our initial condition
self.sampler.computeFactors(self.out)
#Backprop to compute gradients w.r.t. x
self.out.backward()
def runSimulation(self):
## Create storage entries
#Configurations for one mini-batch
configs = torch.zeros(self.batch_size,self.dimN)
#Gradient of committor w.r.t. x for every x in configs
grads = torch.zeros(self.batch_size,self.dimN)
#1/c(x) constant for every x in configs
reciprocal_normconstant = torch.zeros(self.batch_size)
#w_{l+1}/w_{l} computed for every x in configs
fwd_weightfactor = torch.zeros(self.batch_size, 1)
#w_{l-1}/w_{l} computed for every x in configs
bwrd_weightfactor = torch.zeros(self.batch_size, 1)
for i in range(self.batch_size):
for j in range(self.period):
#Take one step
self.sampler.step(self.out,onlytst=False)
#Save config
self.sampler.save()
#Zero out any gradients
self.committor.zero_grad()
#Forward pass
self.out = self.committor(self.sampler.torch_config)
#Compute the new set of reweighting factors from this new step
self.sampler.computeFactors(self.out)
#Backprop to compute gradients of x
self.out.backward()
if torch.sum(torch.isnan(self.out)) > 0:
raise ValueError("Committor value is NaN!")
else:
#Compute all for all storage entries
configs[i,:] = self.sampler.torch_config.flatten()
grads[i,:] = torch.autograd.grad(self.committor(self.sampler.torch_config), self.sampler.torch_config, create_graph=True)[0].reshape(-1)
reciprocal_normconstant[i] = self.sampler.reciprocal_normconstant
fwd_weightfactor[i,:] = self.sampler.fwd_weightfactor
bwrd_weightfactor[i,:] = self.sampler.bwrd_weightfactor
#Zero out any gradients in the parameters as the last remaining step
self.committor.zero_grad()
return configs, grads, reciprocal_normconstant, fwd_weightfactor, bwrd_weightfactor
| 17,425 | 41.921182 | 157 | py |
tps-torch | tps-torch-main/tpstorch/ml/nn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.nn.modules.loss import _Loss
#import scipy.sparse.linalg
from tpstorch import _rank, _world_size
from tpstorch import dist
from numpy.linalg import svd
import scipy.linalg
#Helper function to obtain null space
def nullspace(A, atol=1e-13, rtol=0):
A = np.atleast_2d(A)
u, s, vh = svd(A)
tol = max(atol, rtol * s[0])
nnz = (s >= tol).sum()
ns = vh[nnz:].conj().T
ss = s[nnz:]
return ns, ss
class FTSLayer(nn.Module):
r""" A linear layer, where the paramaters correspond to the string obtained by the
general FTS method.
Args:
react_config (torch.Tensor): starting configuration in the reactant basin.
prod_config (torch.Tensor): starting configuration in the product basin.
"""
def __init__(self, react_config, prod_config, num_nodes):
super().__init__()
#Declare my string as NN paramaters and disable gradient computations
string = torch.vstack([(1-s)*react_config+s*prod_config for s in np.linspace(0, 1, num_nodes)])
self.string = nn.Parameter(string)
self.string.requires_grad = False
def compute_metric(self, x):
return torch.sum((self.string-x)**2,dim=1)
def forward(self, x):
return torch.sum((self.string[_rank]-x)**2)#,dim=1)
class FTSLayerUS(FTSLayer):
r""" A linear layer, where the paramaters correspond to the string obtained by the
general FTS method.
Args:
react_config (torch.Tensor): starting configuration in the reactant basin.
prod_config (torch.Tensor): starting configuration in the product basin.
"""
def __init__(self, react_config, prod_config, num_nodes,kappa_perpend,kappa_parallel):
super(FTSLayerUS,self).__init__(react_config, prod_config, num_nodes)
self.kappa_perpend = kappa_perpend
self.kappa_parallel = kappa_parallel
ds = torch.norm(self.string[1]-self.string[0])#**2)**0.5
self.tangent = torch.zeros_like(self.string)#dist_sq)#self.string)
self.tangent[0] = (self.string[1]-self.string[0])/ds
self.tangent[-1] = (self.string[-1]-self.string[-2])/ds
self.tangent[1:-1] = 0.5*(self.string[2:]-self.string[:-2])/ds
def compute_metric(self, x):
with torch.no_grad():
dist_sq = torch.sum((self.string-x)**2,dim=1)
tangent_sq = torch.sum(self.tangent*(x-self.string),dim=1)
return dist_sq+(self.kappa_parallel-self.kappa_perpend)*tangent_sq**2/self.kappa_perpend#torch.sum((tangent*(self.string-x))**2,dim=1)
def forward(self, x):
with torch.no_grad():
dist_sq = torch.sum((self.string[_rank]-x)**2)#,dim=1)
tangent_sq = torch.sum(self.tangent[_rank]*(x-self.string[_rank]))#,dim=1)
return dist_sq+(self.kappa_parallel-self.kappa_perpend)*tangent_sq**2/self.kappa_perpend#torch.sum((tangent*(self.string-x))**2,dim=1)
def set_tangent(self):
ds = torch.norm(self.string[1]-self.string[0])#**2)**0.5
self.tangent = torch.zeros_like(self.string)#dist_sq)#self.string)
self.tangent[0] = (self.string[1]-self.string[0])/ds
self.tangent[-1] = (self.string[-1]-self.string[-2])/ds
self.tangent[1:-1] = 0.5*(self.string[2:]-self.string[:-2])/ds
"""
def forward(self, x):
with torch.no_grad():
dist_sq = torch.sum((self.string[_rank]-x)**2)#,dim=1)
tangent = torch.zeros_like(dist_sq)#_world_size)#dist_sq)#self.string)
ds = torch.norm(self.string[1]-self.string[0])#**2)**0.5
if _rank == 0:
tangent = torch.dot( (self.string[_rank+1]-self.string[_rank])/ds,x-self.string[_rank])
if _rank == _world_size-1:
tangent = torch.dot( (self.string[_rank]-self.string[_rank-1])/ds, x-self.string[_rank])
else:
tangent = torch.dot( 0.5*(self.string[_rank+1]-self.string[_rank-1])/ds, x-self.string[_rank])
return dist_sq+(self.kappa_parallel-self.kappa_perpend)*tangent**2/self.kappa_perpend#torch.sum((tangent*(self.string-x))**2,dim=1)
"""
class FTSCommittorLoss(_Loss):
r"""Loss function which implements the MSE loss for the committor function.
This loss function automatically collects the approximate values of the committor around a string.
Args:
fts_sampler (tpstorch.MLSampler): the MC/MD sampler to perform biased simulations in the string.
lambda_fts (float): the penalty strength of the MSE loss. Defaults to one.
fts_start (int): iteration number where we start collecting samples for the committor loss.
fts_end (int): iteration number where we stop collecting samples for the committor loss
fts_rate (int): sampling rate for collecting committor samples during the iterations.
fts_max_steps (int): number of maximum timesteps to compute the committor per initial configuration.
fts_min_count (int): minimum number of rejection counts before we stop the simulation.
batch_size_fts (float): size of mini-batch used during training, expressed as the fraction of total batch collected at that point.
"""
def __init__(self, fts_sampler, committor, fts_layer, dimN, lambda_fts=1.0, fts_start=200, fts_end=2000000, fts_rate=100, fts_max_steps=10**6, fts_min_count=0, batch_size_fts=0.5, tol = 5e-9, mode='noshift'):
super(FTSCommittorLoss, self).__init__()
self.fts_loss = torch.zeros(1)
self.fts_layer = fts_layer
self.committor = committor
self.fts_sampler = fts_sampler
self.fts_start = fts_start
self.fts_end = fts_end
self.lambda_fts = lambda_fts
self.fts_rate = fts_rate
self.batch_size_fts = batch_size_fts
self.fts_configs = torch.zeros(int((self.fts_end-self.fts_start)/fts_rate+2), dimN, dtype=torch.float)
self.fts_configs_values = torch.zeros(int((self.fts_end-self.fts_start)/fts_rate+2), dtype=torch.float)
self.fts_configs_count = 0
self.min_count = fts_min_count
self.max_steps = fts_max_steps
self.tol = tol
self.mode = mode
if mode != 'noshift' and mode != 'shift':
raise RuntimeError("The only available modes are 'shift' or 'noshift'!")
def runSimulation(self, strings):
with torch.no_grad():
#Initialize
self.fts_sampler.reset()
for i in range(self.max_steps):
self.fts_sampler.step()
self.fts_sampler.save()
#If we don't, then we need to run the simulation a little longer
if self.min_count != 0:
#Here we check if we have enough rejection counts,
if _rank == 0:
if self.fts_sampler.rejection_count[_rank+1].item() >= self.min_count:
break
elif _rank == _world_size-1:
if self.fts_sampler.rejection_count[_rank-1].item() >= self.min_count:
break
else:
if self.fts_sampler.rejection_count[_rank-1].item() >= self.min_count and self.fts_sampler.rejection_count[_rank+1].item() >= self.min_count:
break
#Since simulations may run in un-equal amount of times, we have to normalize rejection counts by the number of timesteps taken
self.fts_sampler.normalizeRejectionCounts()
@torch.no_grad()
def compute_qalpha(self):
""" Computes the reweighting factor z_l by solving an eigenvalue problem
Args:
rejection_counts (torch.Tensor): an array of rejection counts, i.e., how many times
a system steps out of its cell in the FTS smulation, stored by an MPI process.
Formula and maths is based on our paper.
"""
qalpha = torch.linspace(0,1,_world_size)
#Set the row according to rank
Kmatrix = torch.zeros(_world_size,_world_size)
Kmatrix[_rank] = self.fts_sampler.rejection_count
#Add tolerance values to the off-diagonal elements
if self.mode == 'shift':
if _rank == 0:
Kmatrix[_rank+1] += self.tol
elif _rank == _world_size-1:
Kmatrix[_rank-1] += self.tol
else:
Kmatrix[_rank-1] += self.tol
Kmatrix[_rank+1] += self.tol
#All reduce to collect the results
dist.all_reduce(Kmatrix)
#Finallly, build the final matrix
Kmatrix = Kmatrix.t()-torch.diag(torch.sum(Kmatrix,dim=1))
#Compute the reweighting factors using an eigensolver
#w, v = scipy.sparse.linalg.eigs(A=Kmatrix.numpy(),k=1, which='SM')
v, w = nullspace(Kmatrix.numpy(), atol=self.tol, rtol=0)
try:
index = np.argmin(w)
zl = np.real(v[:,index])
except:
#Shift value may not be small enough so we choose a default higher tolerance detection
v, w = nullspace(Kmatrix.numpy(), atol=10**(-5), rtol=0)
index = np.argmin(w)
zl = np.real(v[:,index])
#Normalize
zl = zl/np.sum(zl)
#Alright, now compute approximate committor values around the string
#Which solves a linear equation to solve Ax=b
#Build the A matrix
A = torch.zeros(_world_size-2,_world_size-2)
if _rank > 0 and _rank < _world_size-1:
i = _rank-1
A[i,i] = float(-zl[_rank-1]-zl[_rank+1])
if i != 0:
A[i,i-1] = float(zl[_rank-1])
if i != _world_size-3:
A[i,i+1] = float(zl[_rank+1])
dist.all_reduce(A)
#Build the b vector
b = np.zeros(_world_size-2)
b[-1] = -zl[-1]
#Approximate committor values
qalpha[1:-1] = torch.from_numpy(scipy.linalg.solve(a=A.numpy(),b=b))
#Print out the estimated committor values
dist.barrier()
if _rank == 0:
print("Estimated committor values: \n {}".format(qalpha.numpy()))
return qalpha[_rank]
def compute_fts(self,counter, strings):#,initial_config):
"""Computes the committor loss function
TO DO: Complete this docstrings
"""
#Initialize loss to zero
loss_fts = torch.zeros(1)
if ( counter < self.fts_start):
#Not the time yet to compute the committor loss
return loss_fts
elif (counter==self.fts_start):
#Generate the first committor sample
self.runSimulation(strings)
print("Rank [{}] finishes simulation for committor calculation".format(_rank),flush=True)
#Save the committor values and initial configuration
self.fts_configs_values[0] = self.compute_qalpha()
self.fts_configs[0] = strings[_rank].detach().clone()
self.fts_configs_count += 1
# Now compute loss
committor_penalty = torch.mean((self.committor(self.fts_configs[0])-self.fts_configs_values[0])**2)
loss_fts += 0.5*self.lambda_fts*committor_penalty
#Collect all the results
dist.all_reduce(loss_fts)
return loss_fts/_world_size
else:
if counter % self.fts_rate==0 and counter < self.fts_end:
# Generate new committor configs and keep on generating the loss
self.runSimulation(strings)
print("Rank [{}] finishes simulation for committor calculation".format(_rank),flush=True)
configs_count = self.fts_configs_count
self.fts_configs_values[configs_count] = self.compute_qalpha()
self.fts_configs[configs_count] = strings[_rank].detach().clone()
self.fts_configs_count += 1
# Compute loss by sub-sampling however many batches we have at the moment
indices_committor = torch.randperm(self.fts_configs_count)[:int(self.batch_size_fts*self.fts_configs_count)]
if self.fts_configs_count == 1:
indices_committor = 0
committor_penalty = torch.mean((self.committor(self.fts_configs[indices_committor])-self.fts_configs_values[indices_committor])**2)
loss_fts += 0.5*self.lambda_fts*committor_penalty
#Collect all the results
dist.all_reduce(loss_fts)
return loss_fts/_world_size
def forward(self, counter, strings):
self.fts_loss = self.compute_fts(counter, strings)
return self.fts_loss
class CommittorLoss(_Loss):
r"""Loss function which implements the MSE loss for the committor function.
This loss function automatically collects the committor values through brute-force simulation.
Args:
cl_sampler (tpstorch.MLSampler): the MC/MD sampler to perform unbiased simulations.
committor (tpstorch.nn.Module): the committor function, represented as a neural network.
lambda_cl (float): the penalty strength of the MSE loss. Defaults to one.
cl_start (int): iteration number where we start collecting samples for the committor loss.
cl_end (int): iteration number where we stop collecting samples for the committor loss
cl_rate (int): sampling rate for collecting committor samples during the iterations.
cl_trials (int): number of trials to compute the committor per initial configuration.
batch_size_cl (float): size of mini-batch used during training, expressed as the fraction of total batch collected at that point.
"""
def __init__(self, cl_sampler, committor, lambda_cl=1.0, cl_start=200, cl_end=2000000, cl_rate=100, cl_trials=50, batch_size_cl=0.5):
super(CommittorLoss, self).__init__()
self.cl_loss = torch.zeros(1)
self.committor = committor
self.cl_sampler = cl_sampler
self.cl_start = cl_start
self.cl_end = cl_end
self.lambda_cl = lambda_cl
self.cl_rate = cl_rate
self.cl_trials = cl_trials
self.batch_size_cl = batch_size_cl
self.cl_configs = torch.zeros(int((self.cl_end-self.cl_start)/cl_rate+2), cl_sampler.torch_config.shape[0]*cl_sampler.torch_config.shape[1], dtype=torch.float)
self.cl_configs_values = torch.zeros(int((self.cl_end-self.cl_start)/cl_rate+2), dtype=torch.float)
self.cl_configs_count = 0
def runTrials(self, config):
counts = []
for i in range(self.cl_trials):
self.cl_sampler.initialize_from_torchconfig(config.detach().clone())
hitting = False
#Run simulation and stop until it falls into the product or reactant state
steps = 0
while hitting is False:
if self.cl_sampler.isReactant(self.cl_sampler.getConfig()):
hitting = True
counts.append(0)
elif self.cl_sampler.isProduct(self.cl_sampler.getConfig()):
hitting = True
counts.append(1)
self.cl_sampler.step_unbiased()
steps += 1
return np.array(counts)
def compute_cl(self,counter,initial_config):
"""Computes the committor loss function
TO DO: Complete this docstrings
"""
#Initialize loss to zero
loss_cl = torch.zeros(1)
if ( counter < self.cl_start):
#Not the time yet to compute the committor loss
return loss_cl
elif (counter==self.cl_start):
#Generate the first committor sample
counts = self.runTrials(initial_config)
print("Rank [{}] finishes committor calculation: {} +/- {}".format(_rank, np.mean(counts), np.std(counts)/len(counts)**0.5),flush=True)
#Save the committor values and initial configuration
self.cl_configs_values[0] = np.mean(counts)
self.cl_configs[0] = initial_config.detach().clone().reshape(-1)
self.cl_configs_count += 1
# Now compute loss
committor_penalty = torch.mean((self.committor(self.cl_configs[0])-self.cl_configs_values[0])**2)
loss_cl += 0.5*self.lambda_cl*committor_penalty
#Collect all the results
dist.all_reduce(loss_cl)
return loss_cl/_world_size
else:
if counter % self.cl_rate==0 and counter < self.cl_end:
# Generate new committor configs and keep on generating the loss
counts = self.runTrials(initial_config)
print("Rank [{}] finishes committor calculation: {} +/- {}".format(_rank, np.mean(counts), np.std(counts)/len(counts)**0.5),flush=True)
configs_count = self.cl_configs_count
self.cl_configs_values[configs_count] = np.mean(counts)
self.cl_configs[configs_count] = initial_config.detach().clone().reshape(-1)
self.cl_configs_count += 1
# Compute loss by sub-sampling however many batches we have at the moment
indices_committor = torch.randperm(self.cl_configs_count)[:int(self.batch_size_cl*self.cl_configs_count)]
if self.cl_configs_count == 1:
indices_committor = 0
committor_penalty = torch.mean((self.committor(self.cl_configs[indices_committor])-self.cl_configs_values[indices_committor])**2)
loss_cl += 0.5*self.lambda_cl*committor_penalty
#Collect all the results
dist.all_reduce(loss_cl)
return loss_cl/_world_size
def forward(self, counter, initial_config):
self.cl_loss = self.compute_cl(counter, initial_config)
return self.cl_loss
class CommittorLoss2(_Loss):
r"""Loss function which implements the MSE loss for the committor function.
This loss function automatically collects the committor values through brute-force simulation.
Args:
cl_sampler (tpstorch.MLSampler): the MC/MD sampler to perform unbiased simulations.
committor (tpstorch.nn.Module): the committor function, represented as a neural network.
lambda_cl (float): the penalty strength of the MSE loss. Defaults to one.
cl_start (int): iteration number where we start collecting samples for the committor loss.
cl_end (int): iteration number where we stop collecting samples for the committor loss
cl_rate (int): sampling rate for collecting committor samples during the iterations.
cl_trials (int): number of trials to compute the committor per initial configuration.
batch_size_cl (float): size of mini-batch used during training, expressed as the fraction of total batch collected at that point.
"""
def __init__(self, cl_sampler, committor, lambda_cl=1.0, cl_start=200, cl_end=2000000, cl_rate=100, cl_trials=50, batch_size_cl=0.5):
super(CommittorLoss2, self).__init__()
self.cl_loss = torch.zeros(1)
self.committor = committor
self.cl_sampler = cl_sampler
self.cl_start = cl_start
self.cl_end = cl_end
self.lambda_cl = lambda_cl
self.cl_rate = cl_rate
self.cl_trials = cl_trials
self.batch_size_cl = batch_size_cl
self.cl_configs = torch.zeros(int((self.cl_end-self.cl_start)/cl_rate+2), cl_sampler.torch_config.shape[0]*cl_sampler.torch_config.shape[1], dtype=torch.float)
self.cl_configs_values = torch.zeros(int((self.cl_end-self.cl_start)/cl_rate+2), dtype=torch.float)
self.cl_configs_count = 0
def runTrials(self, config):
counts = []
for i in range(self.cl_trials):
self.cl_sampler.initialize_from_torchconfig(config.detach().clone())
hitting = False
#Run simulation and stop until it falls into the product or reactant state
steps = 0
while hitting is False:
if self.cl_sampler.isReactant(self.cl_sampler.getConfig()):
hitting = True
counts.append(0)
elif self.cl_sampler.isProduct(self.cl_sampler.getConfig()):
hitting = True
counts.append(1)
self.cl_sampler.step_unbiased()
steps += 1
return np.array(counts)
def compute_cl(self,counter,initial_config):
"""Computes the committor loss function
TO DO: Complete this docstrings
"""
#Initialize loss to zero
loss_cl = torch.zeros(1)
if ( counter < self.cl_start):
#Not the time yet to compute the committor loss
return loss_cl
elif (counter==self.cl_start):
#Generate the first committor sample
counts = self.runTrials(initial_config)
print("Rank [{}] finishes committor calculation: {} +/- {}".format(_rank, np.mean(counts), np.std(counts)/len(counts)**0.5),flush=True)
#Save the committor values and initial configuration
self.cl_configs_values[0] = np.mean(counts)
self.cl_configs[0] = initial_config.detach().clone().reshape(-1)
self.cl_configs_count += 1
# Now compute loss
committor_penalty = torch.mean((self.committor(self.cl_configs[0])-self.cl_configs_values[0]))
loss_cl += 0.5*self.lambda_cl*committor_penalty**2
#Collect all the results
dist.all_reduce(loss_cl)
return loss_cl/_world_size
else:
if counter % self.cl_rate==0 and counter < self.cl_end:
# Generate new committor configs and keep on generating the loss
counts = self.runTrials(initial_config)
print("Rank [{}] finishes committor calculation: {} +/- {}".format(_rank, np.mean(counts), np.std(counts)/len(counts)**0.5),flush=True)
configs_count = self.cl_configs_count
self.cl_configs_values[configs_count] = np.mean(counts)
self.cl_configs[configs_count] = initial_config.detach().clone().reshape(-1)
self.cl_configs_count += 1
# Compute loss by sub-sampling however many batches we have at the moment
indices_committor = torch.randperm(self.cl_configs_count)[:int(self.batch_size_cl*self.cl_configs_count)]
if self.cl_configs_count == 1:
indices_committor = 0
committor_penalty = torch.mean((self.committor(self.cl_configs[indices_committor])-self.cl_configs_values[indices_committor]))
loss_cl += 0.5*self.lambda_cl*committor_penalty**2
#Collect all the results
dist.all_reduce(loss_cl)
return loss_cl/_world_size
def forward(self, counter, initial_config):
self.cl_loss = self.compute_cl(counter, initial_config)
return self.cl_loss
class _BKELoss(_Loss):
r"""Base classs for computing the loss function corresponding to the variational form
of the Backward Kolmogorov Equation. This base class includes default implementation
for boundary conditions.
Args:
bc_sampler (tpstorch.MLSamplerEXP): the MD/MC sampler used for obtaining configurations in
product and reactant basin.
committor (tpstorch.nn.Module): the committor function, represented as a neural network.
lambda_A (float): penalty strength for enforcing boundary conditions at the reactant basin.
lambda_B (float): penalty strength for enforcing boundary conditions at the product basin.
If None is given, lambda_B=lambda_A
start_react (torch.Tensor): starting configuration to sample reactant basin.
start_prod (torch.Tensor): starting configuration to sample product basin.
n_bc_samples (int, optional): total number of samples to collect at both product and
reactant basin.
bc_period (int, optional): the number of timesteps to collect one configuration during
sampling at either product and reactant basin.
batch_size_bc (float, optional): size of mini-batch for the boundary condition loss during
gradient descent, expressed as fraction of n_bc_samples.
"""
def __init__(self, bc_sampler, committor, lambda_A, lambda_B, start_react,
start_prod, n_bc_samples=320, bc_period=10, batch_size_bc=0.1):
super(_BKELoss, self).__init__()
self.main_loss = torch.zeros(1)
self.bc_loss = torch.zeros(1)
self.committor = committor
self.lambda_A = lambda_A
self.lambda_B = lambda_B
self.n_bc_samples = n_bc_samples
self.batch_size_bc = batch_size_bc
#Make sure to flatten the configs
self.prod_configs = torch.zeros(self.n_bc_samples, start_prod.shape[0]*start_prod.shape[1], dtype=torch.float)
self.react_configs = torch.zeros_like(self.prod_configs)
self.zl = [torch.zeros(1) for i in range(_world_size)]
#Sample product basin first
if _rank == 0:
print("Sampling the product basin",flush=True)
bc_sampler.setConfig(start_prod)
for i in range(self.n_bc_samples):
for j in range(bc_period):
bc_sampler.step_bc()
self.prod_configs[i] = bc_sampler.getConfig().view(-1)
#Next, sample reactant basin
if _rank == 0:
print("Sampling the reactant basin",flush=True)
bc_sampler.setConfig(start_react)
for i in range(self.n_bc_samples):
for j in range(bc_period):
bc_sampler.step_bc()
self.react_configs[i] = bc_sampler.getConfig().view(-1)
def compute_bc(self):
"""Computes the loss due to the boundary conditions.
Default implementation is to apply the penalty method, where the loss at every MPI
process is computed as:
.. math::
\ell_{BC,i} = \frac{\lambda_A}{2} \avg_{x \in M_A} (q(x))^2
+ \frac{\lambda_B}{2} \avg_{x \in M_B} (1-q(x))^2
where 'i' is the index of the MPI process, :math: 'q(x)' is the
committor function, :math: 'M_A' is the mini-batch for the product
basin and 'M_B' is the the mini-batch for the reactant basin.
The result is then collected (MPI collective communication) as follows:
.. math::
\ell_{BC} = {1}{S} \sum_{i=0}^{S-1} \ell_{BC,i}
where 'S' is the MPI world size.
Note that PyTorch does not track arithmetic operations during MPI
collective calls. Thus, the last sum is not reflected in the
computational graph tracked by individual MPI process. The final
gradients will be collected in each respective optimizer.
#To do: create a mode where configurations in either basin is always
#always sampled on-the-fly.
"""
loss_bc = torch.zeros(1)
#Compute random indices to sub-sample the list of reactant and product
#configurations
indices_react = torch.randperm(len(self.react_configs))[:int(self.batch_size_bc*len(self.react_configs))]
indices_prod = torch.randperm(len(self.prod_configs))[:int(self.batch_size_bc*len(self.prod_configs))]
react_penalty = torch.mean(self.committor(self.react_configs[indices_react,:])**2)
prod_penalty = torch.mean((1.0-self.committor(self.prod_configs[indices_prod,:]))**2)
loss_bc += 0.5*self.lambda_A*react_penalty
loss_bc += 0.5*self.lambda_B*prod_penalty
return loss_bc/_world_size
@torch.no_grad()
def computeZl(self):
raise NotImplementedError
def compute_bkeloss(self):
raise NotImplementedError
def forward(self):
r"""Default implementation computes the boundary condition loss only"""
self.bc_loss = self.compute_bc()
return self.bc_loss
class BKELossEMUS(_BKELoss):
r"""Loss function corresponding to the variational form of the Backward Kolmogorov Equation,
which includes reweighting by exponential (EXP) averaging.
Args:
bc_sampler (tpstorch.MLSamplerEXP): the MD/MC sampler used for obtaining configurations in
product and reactant basin.
committor (tpstorch.nn.Module): the committor function, represented as a neural network.
lambda_A (float): penalty strength for enforcing boundary conditions at the reactant basin.
lambda_B (float): penalty strength for enforcing boundary conditions at the product basin.
If None is given, lambda_B=lambda_A
start_react (torch.Tensor): starting configuration to sample reactant basin.
start_prod (torch.Tensor): starting configuration to sample product basin.
n_bc_samples (int, optional): total number of samples to collect at both product and
reactant basin.
bc_period (int, optional): the number of timesteps to collect one configuration during
sampling at either product and reactant basin.
batch_size_bc (float, optional): size of mini-batch for the boundary condition loss during
gradient descent, expressed as fraction of n_bc_samples.
mode (string, optional): the mode for EXP reweighting. If mode is 'random', then the
reference umbrella window is chosen randomly at every iteration. If it's not random,
then ref_index must be supplied.
ref_index (int, optional): a fixed chosen umbrella window for computing the reweighting
factors.
"""
def __init__(self, bc_sampler, committor, lambda_A, lambda_B, start_react,
start_prod, n_bc_samples=320, bc_period=10, batch_size_bc=0.1, tol=2e-9):#mode='shift',tol=2e-9):
super(BKELossEMUS, self).__init__(bc_sampler, committor, lambda_A, lambda_B, start_react,
start_prod, n_bc_samples, bc_period, batch_size_bc)
self.tol = tol
#self.mode = mode
#if mode != 'noshift' and mode != 'shift':
# raise RuntimeError("The only available modes are 'shift' or 'noshift'!")
@torch.no_grad()
def computeZl(self, overlapprob_rows):
#Set the row according to rank
Kmatrix = torch.zeros(_world_size,_world_size)
Kmatrix[_rank] = torch.mean(overlapprob_rows,dim=0)#rejection_counts
#Add tolerance values to the off-diagonal elements
#if self.mode == 'shift':
# if _rank == 0:
# Kmatrix[_rank+1] += self.tol
# elif _rank == _world_size-1:
# Kmatrix[_rank-1] += self.tol
# else:
# Kmatrix[_rank-1] += self.tol
# Kmatrix[_rank+1] += self.tol
#All reduce to collect the results
dist.all_reduce(Kmatrix)
#Finallly, build the final matrix
Kmatrix = Kmatrix.t()-torch.eye(_world_size)#diag(torch.sum(Kmatrix,dim=1))#-self.tol)
#Compute the reweighting factors using an eigensolver
v, w = nullspace(Kmatrix.numpy(), atol=self.tol)
try:
index = np.argmin(w)
zl = np.real(v[:,index])
except:
#Shift value may not be small enough so we choose a default higher tolerance detection
v, w = nullspace(Kmatrix.numpy(), atol=10**(-5), rtol=0)
index = np.argmin(w)
zl = np.real(v[:,index])
#Normalize
zl = zl/np.sum(zl)
return zl
def compute_bkeloss(self, gradients, inv_normconstants, overlapprob_rows):#fwd_weightfactors, bwrd_weightfactors):
"""Computes the loss corresponding to the varitional form of the BKE including
the EXP reweighting factors.
Independent computation is first done on individual MPI process. First, we compute
the following quantities at every 'l'-th MPI process:
.. math::
L_l = \frac{1}{2} \sum_{x \in M_l} |\grad q(x)|^2/c(x) ,
c_l = \sum_{ x \in M_l} 1/c(x) ,
where :math: $M_l$ is the mini-batch collected by the l-th MPI
process. We then collect the computation to compute the main loss as
.. math::
\ell_{main} = \frac{\sum_{l=1}^{S-1} L_l z_l)}{\sum_{l=1}^{S-1} c_l z_l)}
where :math: 'S' is the MPI world size.
Args:
gradients (torch.Tensor): mini-batch of \grad q(x). First dimension is the size of
the mini-batch while the second is system size (flattened).
inv_normconstants (torch.Tensor): mini-batch of 1/c(x).
fwd_weightfactors (torch.Tensor): mini-batch of w_{l+1}/w_{l}, which are forward
ratios of the umbrella potential Boltzmann factors stored by the l-th umbrella
window/MPI process
bwrd_weightfactors (torch.Tensor): mini-batch of w_{l-1}/w_{l}, which are forward
ratios of the umbrella potential Boltzmann factors stored by the l-th umbrella
window/MPI process
Note that PyTorch does not track arithmetic operations during MPI
collective calls. Thus, the last sum containing L_l is not reflected
in the computational graph tracked by individual MPI process. The
final gradients will be collected in each respective optimizer.
"""
main_loss = torch.zeros(1)
#Compute the first part of the loss
#main_loss = 0.5*torch.sum(torch.mean(gradients*gradients*inv_normconstants.view(-1,1),dim=0));
main_loss = 0.5*torch.sum(((gradients*gradients).t() @ inv_normconstants)/len(inv_normconstants))
#print(((gradients*gradients).t() @ inv_normconstants).shape,inv_normconstants.view(-1,1).shape)
#Computing the reweighting factors, z_l in our notation
self.zl = self.computeZl(overlapprob_rows)#fwd_weightfactors, bwrd_weightfactors)
#Use it first to compute the mean inverse normalizing constant
mean_recipnormconst = torch.mean(inv_normconstants)
mean_recipnormconst.mul_(self.zl[_rank])
#All reduce the mean invnormalizing constant
dist.all_reduce(mean_recipnormconst)
#renormalize main_loss
main_loss *= self.zl[_rank]
dist.all_reduce(main_loss)
main_loss /= mean_recipnormconst
return main_loss
def forward(self, gradients, inv_normconstants, overlapprob_rows):#fwd_weightfactors, bwrd_weightfactors):
self.main_loss = self.compute_bkeloss(gradients, inv_normconstants, overlapprob_rows)#fwd_weightfactors, bwrd_weightfactors)
self.bc_loss = self.compute_bc()
return self.main_loss+self.bc_loss
class BKELossEXP(_BKELoss):
r"""Loss function corresponding to the variational form of the Backward Kolmogorov Equation,
which includes reweighting by exponential (EXP) averaging.
Args:
bc_sampler (tpstorch.MLSamplerEXP): the MD/MC sampler used for obtaining configurations in
product and reactant basin.
committor (tpstorch.nn.Module): the committor function, represented as a neural network.
lambda_A (float): penalty strength for enforcing boundary conditions at the reactant basin.
lambda_B (float): penalty strength for enforcing boundary conditions at the product basin.
If None is given, lambda_B=lambda_A
start_react (torch.Tensor): starting configuration to sample reactant basin.
start_prod (torch.Tensor): starting configuration to sample product basin.
n_bc_samples (int, optional): total number of samples to collect at both product and
reactant basin.
bc_period (int, optional): the number of timesteps to collect one configuration during
sampling at either product and reactant basin.
batch_size_bc (float, optional): size of mini-batch for the boundary condition loss during
gradient descent, expressed as fraction of n_bc_samples.
mode (string, optional): the mode for EXP reweighting. If mode is 'random', then the
reference umbrella window is chosen randomly at every iteration. If it's not random,
then ref_index must be supplied.
ref_index (int, optional): a fixed chosen umbrella window for computing the reweighting
factors.
"""
def __init__(self, bc_sampler, committor, lambda_A, lambda_B, start_react,
start_prod, n_bc_samples=320, bc_period=10, batch_size_bc=0.1,
mode='random', ref_index=None):
super(BKELossEXP, self).__init__(bc_sampler, committor, lambda_A, lambda_B, start_react,
start_prod, n_bc_samples, bc_period, batch_size_bc)
self.mode = mode
if self.mode != 'random':
if ref_index is not None:
self.ref_index = int(ref_index)
else:
raise TypeError
@torch.no_grad()
def computeZl(self, fwd_weightfactors, bwrd_weightfactors):
"""Computes the reweighting factor z_L needed for computing gradient
averages.
Args:
fwd_weightfactors (torch.Tensor): mini-batch of w_{l+1}/w_{l}, which are forward
ratios of the umbrella potential Boltzmann factors stored by the l-th umbrella
window/MPI process
bwrd_weightfactors (torch.Tensor): mini-batch of w_{l-1}/w_{l}, which are forward
ratios of the umbrella potential Boltzmann factors stored by the l-th umbrella
window/MPI process
Formula is based on Eq. REF of our paper.
"""
#Randomly select a window as a free energy reference and broadcast that index across all processes
if self.mode == "random":
self.ref_index = torch.randint(low=0,high=_world_size,size=(1,))
dist.broadcast(self.ref_index, src=0)
#Compute the average of forward and backward ratios of Boltzmann factors
fwd_meanwgtfactor = [torch.zeros(1) for i in range(_world_size)]
dist.all_gather(fwd_meanwgtfactor,torch.mean(fwd_weightfactors))
fwd_meanwgtfactor = torch.tensor(fwd_meanwgtfactor[:-1])
bwrd_meanwgtfactor = [torch.zeros(1) for i in range(_world_size)]
dist.all_gather(bwrd_meanwgtfactor,torch.mean(bwrd_weightfactors))
bwrd_meanwgtfactor = torch.tensor(bwrd_meanwgtfactor[1:])
#Compute the reweighting factor
zl = []
for l in range(_world_size):
if l > self.ref_index:
zl.append(torch.prod(fwd_meanwgtfactor[self.ref_index:l]))
elif l < self.ref_index:
zl.append(torch.prod(bwrd_meanwgtfactor[l:self.ref_index]))
else:
zl.append(torch.tensor(1.0))
#Normalize the reweighting factor
zl = torch.tensor(zl).flatten()
zl.div_(torch.sum(zl))
return zl
def compute_bkeloss(self, gradients, inv_normconstants, fwd_weightfactors, bwrd_weightfactors):
"""Computes the loss corresponding to the varitional form of the BKE including
the EXP reweighting factors.
Independent computation is first done on individual MPI process. First, we compute
the following quantities at every 'l'-th MPI process:
.. math::
L_l = \frac{1}{2} \sum_{x \in M_l} |\grad q(x)|^2/c(x) ,
c_l = \sum_{ x \in M_l} 1/c(x) ,
where :math: $M_l$ is the mini-batch collected by the l-th MPI
process. We then collect the computation to compute the main loss as
.. math::
\ell_{main} = \frac{\sum_{l=1}^{S-1} L_l z_l)}{\sum_{l=1}^{S-1} c_l z_l)}
where :math: 'S' is the MPI world size.
Args:
gradients (torch.Tensor): mini-batch of \grad q(x). First dimension is the size of
the mini-batch while the second is system size (flattened).
inv_normconstants (torch.Tensor): mini-batch of 1/c(x).
fwd_weightfactors (torch.Tensor): mini-batch of w_{l+1}/w_{l}, which are forward
ratios of the umbrella potential Boltzmann factors stored by the l-th umbrella
window/MPI process
bwrd_weightfactors (torch.Tensor): mini-batch of w_{l-1}/w_{l}, which are forward
ratios of the umbrella potential Boltzmann factors stored by the l-th umbrella
window/MPI process
Note that PyTorch does not track arithmetic operations during MPI
collective calls. Thus, the last sum containing L_l is not reflected
in the computational graph tracked by individual MPI process. The
final gradients will be collected in each respective optimizer.
"""
main_loss = torch.zeros(1)
#Compute the first part of the loss
#main_loss = 0.5*torch.sum(torch.mean(gradients*gradients*inv_normconstants.view(-1,1),dim=0));
main_loss = 0.5*torch.sum(((gradients*gradients).t() @ inv_normconstants)/len(inv_normconstants))
#print(((gradients*gradients).t() @ inv_normconstants).shape,inv_normconstants.view(-1,1).shape)
#Computing the reweighting factors, z_l in our notation
self.zl = self.computeZl(fwd_weightfactors, bwrd_weightfactors)
#Use it first to compute the mean inverse normalizing constant
mean_recipnormconst = torch.mean(inv_normconstants)
mean_recipnormconst.mul_(self.zl[_rank])
#All reduce the mean invnormalizing constant
dist.all_reduce(mean_recipnormconst)
#renormalize main_loss
main_loss *= self.zl[_rank]
dist.all_reduce(main_loss)
main_loss /= mean_recipnormconst
return main_loss
def forward(self, gradients, inv_normconstants, fwd_weightfactors, bwrd_weightfactors):
self.main_loss = self.compute_bkeloss(gradients, inv_normconstants, fwd_weightfactors, bwrd_weightfactors)
self.bc_loss = self.compute_bc()
return self.main_loss+self.bc_loss
class BKELossEXP_2(_BKELoss):
r"""Loss function corresponding to the variational form of the Backward Kolmogorov Equation,
which uses fixed reweighting/free energy terms.
Args:
bc_sampler (tpstorch.MLSamplerEXP): the MD/MC sampler used for obtaining configurations in
product and reactant basin.
committor (tpstorch.nn.Module): the committor function, represented as a neural network.
lambda_A (float): penalty strength for enforcing boundary conditions at the reactant basin.
lambda_B (float): penalty strength for enforcing boundary conditions at the product basin.
If None is given, lambda_B=lambda_A
start_react (torch.Tensor): starting configuration to sample reactant basin.
start_prod (torch.Tensor): starting configuration to sample product basin.
n_bc_samples (int, optional): total number of samples to collect at both product and
reactant basin.
bc_period (int, optional): the number of timesteps to collect one configuration during
sampling at either product and reactant basin.
batch_size_bc (float, optional): size of mini-batch for the boundary condition loss during
gradient descent, expressed as fraction of n_bc_samples.
mode (string, optional): the mode for EXP reweighting. If mode is 'random', then the
reference umbrella window is chosen randomly at every iteration. If it's not random,
then ref_index must be supplied.
ref_index (int, optional): a fixed chosen umbrella window for computing the reweighting
factors.
"""
def __init__(self, bc_sampler, committor, lambda_A, lambda_B, start_react,
start_prod, n_bc_samples=320, bc_period=10, batch_size_bc=0.1,
mode='random', ref_index=None):
super(BKELossEXP_2, self).__init__(bc_sampler, committor, lambda_A, lambda_B, start_react,
start_prod, n_bc_samples, bc_period, batch_size_bc)
self.mode = mode
if self.mode != 'random':
if ref_index is not None:
self.ref_index = int(ref_index)
else:
raise TypeError
def compute_bkeloss(self, gradients, inv_normconstants, zl):
"""Computes the loss corresponding to the varitional form of the BKE including
the EXP reweighting factors.
Independent computation is first done on individual MPI process. First, we compute
the following quantities at every 'l'-th MPI process:
.. math::
L_l = \frac{1}{2} \sum_{x \in M_l} |\grad q(x)|^2/c(x) ,
c_l = \sum_{ x \in M_l} 1/c(x) ,
where :math: $M_l$ is the mini-batch collected by the l-th MPI
process. We then collect the computation to compute the main loss as
.. math::
\ell_{main} = \frac{\sum_{l=1}^{S-1} L_l z_l)}{\sum_{l=1}^{S-1} c_l z_l)}
where :math: 'S' is the MPI world size.
Args:
gradients (torch.Tensor): mini-batch of \grad q(x). First dimension is the size of
the mini-batch while the second is system size (flattened).
inv_normconstants (torch.Tensor): mini-batch of 1/c(x).
fwd_weightfactors (torch.Tensor): mini-batch of w_{l+1}/w_{l}, which are forward
ratios of the umbrella potential Boltzmann factors stored by the l-th umbrella
window/MPI process
bwrd_weightfactors (torch.Tensor): mini-batch of w_{l-1}/w_{l}, which are forward
ratios of the umbrella potential Boltzmann factors stored by the l-th umbrella
window/MPI process
Note that PyTorch does not track arithmetic operations during MPI
collective calls. Thus, the last sum containing L_l is not reflected
in the computational graph tracked by individual MPI process. The
final gradients will be collected in each respective optimizer.
"""
main_loss = torch.zeros(1)
#Compute the first part of the loss
#main_loss = 0.5*torch.sum(torch.mean(gradients*gradients*inv_normconstants.view(-1,1),dim=0));
main_loss = 0.5*torch.sum(((gradients*gradients).t() @ inv_normconstants)/len(inv_normconstants))
#print(((gradients*gradients).t() @ inv_normconstants).shape,inv_normconstants.view(-1,1).shape)
#Use it first to compute the mean inverse normalizing constant
mean_recipnormconst = torch.mean(inv_normconstants)
mean_recipnormconst.mul_(zl[_rank])
#All reduce the mean invnormalizing constant
dist.all_reduce(mean_recipnormconst)
#renormalize main_loss
main_loss *= zl[_rank]
dist.all_reduce(main_loss)
main_loss /= mean_recipnormconst
return main_loss
def forward(self, gradients, inv_normconstants, zl):
self.main_loss = self.compute_bkeloss(gradients, inv_normconstants, zl)
self.bc_loss = self.compute_bc()
return self.main_loss+self.bc_loss
class BKELossFTS(_BKELoss):
r"""Loss function corresponding to the variational form of the Backward
Kolmogorov Equation, which includes reweighting by exponential (EXP)
averaging.
Args:
bc_sampler (tpstorch.MLSamplerEXP): the MD/MC sampler used for obtaining
configurations in product and reactant basin.
committor (tpstorch.nn.Module): the committor function, represented
as a neural network.
lambda_A (float): penalty strength for enforcing boundary conditions at
the reactant basin.
lambda_B (float): penalty strength for enforcing boundary
conditions at the product basin. If None is given, lambda_B=lambda_A
start_react (torch.Tensor): starting configuration to sample reactant
basin.
start_prod (torch.Tensor): starting configuration to sample product
basin
n_bc_samples (int, optional): total number of samples to collect at both
product and reactant basin.
bc_period (int, optional): the number of timesteps to collect one
configuration during sampling at either product and reactant basin.
batch_size_bc (float, optional): size of mini-batch for the boundary
condition loss during gradient descent, expressed as fraction of
n_bc_samples.
"""
def __init__(self, bc_sampler, committor, lambda_A, lambda_B, start_react,
start_prod, n_bc_samples=320, bc_period=10, batch_size_bc=0.1, tol=5e-9, mode='noshift'):
super(BKELossFTS, self).__init__(bc_sampler, committor, lambda_A, lambda_B, start_react,
start_prod, n_bc_samples, bc_period, batch_size_bc)
self.tol = tol
self.mode = mode
if mode != 'noshift' and mode != 'shift':
raise RuntimeError("The only available modes are 'shift' or 'noshift'!")
@torch.no_grad()
def computeZl(self,rejection_counts):
""" Computes the reweighting factor z_l by solving an eigenvalue problem
Args:
rejection_counts (torch.Tensor): an array of rejection counts, i.e., how many times
a system steps out of its cell in the FTS smulation, stored by an MPI process.
Formula and maths is based on our paper.
"""
#Set the row according to rank
Kmatrix = torch.zeros(_world_size,_world_size)
Kmatrix[_rank] = rejection_counts
#Add tolerance values to the off-diagonal elements
if self.mode == 'shift':
if _rank == 0:
Kmatrix[_rank+1] += self.tol
elif _rank == _world_size-1:
Kmatrix[_rank-1] += self.tol
else:
Kmatrix[_rank-1] += self.tol
Kmatrix[_rank+1] += self.tol
#All reduce to collect the results
dist.all_reduce(Kmatrix)
#Finallly, build the final matrix
Kmatrix = Kmatrix.t()-torch.diag(torch.sum(Kmatrix,dim=1))#-self.tol)
#Compute the reweighting factors using an eigensolver
v, w = nullspace(Kmatrix.numpy(), atol=self.tol)
try:
index = np.argmin(w)
zl = np.real(v[:,index])
except:
#Shift value may not be small enough so we choose a default higher tolerance detection
v, w = nullspace(Kmatrix.numpy(), atol=10**(-5), rtol=0)
index = np.argmin(w)
zl = np.real(v[:,index])
#Normalize
zl = zl/np.sum(zl)
return zl
def compute_bkeloss(self, gradients, rejection_counts):
"""Computes the loss corresponding to the varitional form of the BKE
including the FTS reweighting factors.
Independent computation is first done on individual MPI process.
First, we compute the following at every 'l'-th MPI process:
.. math::
L_l = \frac{1}{2 M_l} \sum_{x \in M_l} |\grad q(x)|^2,
where :math: $M_l$ is the mini-batch collected by the l-th MPI
process. We then collect the computation to compute the main loss as
.. math::
\ell_{main} = \sum_{l=1}^{S-1} L_l z_l)
where :math: 'S' is the MPI world size.
Note that PyTorch does not track arithmetic operations during MPI
collective calls. Thus, the last sum containing L_l z_l is not reflected
in the computational graph tracked by individual MPI process. The
final gradients will be collected in each respective optimizer.
"""
main_loss = torch.zeros(1)
#Compute the first part of the loss
main_loss = 0.5*torch.sum(torch.mean(gradients*gradients,dim=0))
#Computing the reweighting factors, z_l in our notation
self.zl = self.computeZl(rejection_counts)
#renormalize main_loss
main_loss *= self.zl[_rank]
dist.all_reduce(main_loss)
return main_loss
def forward(self, gradients, rejection_counts):
self.main_loss = self.compute_bkeloss(gradients, rejection_counts)
self.bc_loss = self.compute_bc()
return self.main_loss+self.bc_loss
class BKELossFTS_2(_BKELoss):
r"""Loss function corresponding to the variational form of the Backward
Kolmogorov Equation, which includes reweighting by exponential (EXP)
averaging.
Args:
bc_sampler (tpstorch.MLSamplerEXP): the MD/MC sampler used for obtaining
configurations in product and reactant basin.
committor (tpstorch.nn.Module): the committor function, represented
as a neural network.
lambda_A (float): penalty strength for enforcing boundary conditions at
the reactant basin.
lambda_B (float): penalty strength for enforcing boundary
conditions at the product basin. If None is given, lambda_B=lambda_A
start_react (torch.Tensor): starting configuration to sample reactant
basin.
start_prod (torch.Tensor): starting configuration to sample product
basin
n_bc_samples (int, optional): total number of samples to collect at both
product and reactant basin.
bc_period (int, optional): the number of timesteps to collect one
configuration during sampling at either product and reactant basin.
batch_size_bc (float, optional): size of mini-batch for the boundary
condition loss during gradient descent, expressed as fraction of
n_bc_samples.
"""
def __init__(self, bc_sampler, committor, lambda_A, lambda_B, start_react,
start_prod, n_bc_samples=320, bc_period=10, batch_size_bc=0.1, tol=5e-9, mode='noshift'):
super(BKELossFTS_2, self).__init__(bc_sampler, committor, lambda_A, lambda_B, start_react,
start_prod, n_bc_samples, bc_period, batch_size_bc)
self.tol = tol
self.mode = mode
if mode != 'noshift' and mode != 'shift':
raise RuntimeError("The only available modes are 'shift' or 'noshift'!")
def compute_bkeloss(self, gradients, zl):
"""Computes the loss corresponding to the varitional form of the BKE
including the FTS reweighting factors.
Independent computation is first done on individual MPI process.
First, we compute the following at every 'l'-th MPI process:
.. math::
L_l = \frac{1}{2 M_l} \sum_{x \in M_l} |\grad q(x)|^2,
where :math: $M_l$ is the mini-batch collected by the l-th MPI
process. We then collect the computation to compute the main loss as
.. math::
\ell_{main} = \sum_{l=1}^{S-1} L_l z_l)
where :math: 'S' is the MPI world size.
Note that PyTorch does not track arithmetic operations during MPI
collective calls. Thus, the last sum containing L_l z_l is not reflected
in the computational graph tracked by individual MPI process. The
final gradients will be collected in each respective optimizer.
"""
main_loss = torch.zeros(1)
#Compute the first part of the loss
main_loss = 0.5*torch.sum(torch.mean(gradients*gradients,dim=0))
#renormalize main_loss
main_loss *= zl[_rank]
dist.all_reduce(main_loss)
return main_loss
def forward(self, gradients, zl):
self.main_loss = self.compute_bkeloss(gradients, zl)
self.bc_loss = self.compute_bc()
return self.main_loss+self.bc_loss
class BKELossFTS2(_BKELoss):
r"""Loss function corresponding to the variational form of the Backward
Kolmogorov Equation, which includes reweighting by exponential (EXP)
averaging.
Args:
bc_sampler (tpstorch.MLSamplerEXP): the MD/MC sampler used for obtaining
configurations in product and reactant basin.
committor (tpstorch.nn.Module): the committor function, represented
as a neural network.
lambda_A (float): penalty strength for enforcing boundary conditions at
the reactant basin.
lambda_B (float): penalty strength for enforcing boundary
conditions at the product basin. If None is given, lambda_B=lambda_A
start_react (torch.Tensor): starting configuration to sample reactant
basin.
start_prod (torch.Tensor): starting configuration to sample product
basin
n_bc_samples (int, optional): total number of samples to collect at both
product and reactant basin.
bc_period (int, optional): the number of timesteps to collect one
configuration during sampling at either product and reactant basin.
batch_size_bc (float, optional): size of mini-batch for the boundary
condition loss during gradient descent, expressed as fraction of
n_bc_samples.
"""
def __init__(self, bc_sampler, committor, lambda_A, lambda_B, start_react,
start_prod, n_bc_samples=320, bc_period=10, batch_size_bc=0.1, tol=5e-9, mode='noshift'):
super(BKELossFTS2, self).__init__(bc_sampler, committor, lambda_A, lambda_B, start_react,
start_prod, n_bc_samples, bc_period, batch_size_bc)
self.tol = tol
self.mode = mode
if mode != 'noshift' and mode != 'shift':
raise RuntimeError("The only available modes are 'shift' or 'noshift'!")
@torch.no_grad()
def computeZl(self,rejection_counts):
""" Computes the reweighting factor z_l by solving an eigenvalue problem
Args:
rejection_counts (torch.Tensor): an array of rejection counts, i.e., how many times
a system steps out of its cell in the FTS smulation, stored by an MPI process.
Formula and maths is based on our paper.
"""
#Set the row according to rank
Kmatrix = torch.zeros(_world_size,_world_size)
Kmatrix[_rank] = rejection_counts
#Add tolerance values to the off-diagonal elements
if self.mode == 'shift':
if _rank == 0:
Kmatrix[_rank+1] = self.tol
elif _rank == _world_size-1:
Kmatrix[_rank-1] = self.tol
else:
Kmatrix[_rank-1] = self.tol
Kmatrix[_rank+1] = self.tol
#All reduce to collect the results
dist.all_reduce(Kmatrix)
#Finallly, build the final matrix
Kmatrix = Kmatrix.t()-torch.diag(torch.sum(Kmatrix,dim=1)-self.tol)
#Compute the reweighting factors using an eigensolver
v, w = nullspace(Kmatrix.numpy(), atol=self.tol)
try:
index = np.argmin(w)
zl = np.real(v[:,index])
except:
#Shift value may not be small enough so we choose a default higher tolerance detection
v, w = nullspace(Kmatrix.numpy(), atol=10**(-5), rtol=0)
index = np.argmin(w)
zl = np.real(v[:,index])
#Normalize
zl = zl/np.sum(zl)
return zl
def compute_bkeloss(self, gradients, rejection_counts, start_exact, zl_exact=None):
"""Computes the loss corresponding to the varitional form of the BKE
including the FTS reweighting factors.
Independent computation is first done on individual MPI process.
First, we compute the following at every 'l'-th MPI process:
.. math::
L_l = \frac{1}{2 M_l} \sum_{x \in M_l} |\grad q(x)|^2,
where :math: $M_l$ is the mini-batch collected by the l-th MPI
process. We then collect the computation to compute the main loss as
.. math::
\ell_{main} = \sum_{l=1}^{S-1} L_l z_l)
where :math: 'S' is the MPI world size.
Note that PyTorch does not track arithmetic operations during MPI
collective calls. Thus, the last sum containing L_l z_l is not reflected
in the computational graph tracked by individual MPI process. The
final gradients will be collected in each respective optimizer.
"""
main_loss = torch.zeros(1)
#Compute the first part of the loss
main_loss = 0.5*torch.sum(torch.mean(gradients*gradients,dim=0))
#Computing the reweighting factors, z_l in our notation
self.zl = self.computeZl(rejection_counts)
#renormalize main_loss
if start_exact == 0:
main_loss *= self.zl[_rank]
dist.all_reduce(main_loss)
else:
main_loss *= zl_exact[_rank]
dist.all_reduce(main_loss)
return main_loss
def forward(self, gradients, rejection_counts, start_exact, zl_exact=None):
self.main_loss = self.compute_bkeloss(gradients, rejection_counts, start_exact, zl_exact)
self.bc_loss = self.compute_bc()
return self.main_loss+self.bc_loss
| 64,002 | 44.586182 | 212 | py |
tps-torch | tps-torch-main/tpstorch/ml/__init__.py | from . import _ml, optim, nn, data
from tpstorch import _mpi_group
#Inherit the C++ compiled sampler class, and add the MPI process group by default
class MLSamplerEXP(_ml.MLSamplerEXP):
def __init__(self, initial_config):
super(MLSamplerEXP,self).__init__(initial_config, _mpi_group)
#Inherit the C++ compiled sampler class, and add the MPI process group by default
class MLSamplerEXPString(_ml.MLSamplerEXPString):
def __init__(self, initial_config):
super(MLSamplerEXPString,self).__init__(initial_config, _mpi_group)
#Inherit the C++ compiled sampler class, and add the MPI process group by default
class MLSamplerEMUSString(_ml.MLSamplerEMUSString):
def __init__(self, initial_config):
super(MLSamplerEMUSString,self).__init__(initial_config, _mpi_group)
#Inherit the C++ compiled sampler class, and add the MPI process group by default
class MLSamplerFTS(_ml.MLSamplerFTS):
def __init__(self, initial_config):
super(MLSamplerFTS,self).__init__(initial_config, _mpi_group)
| 1,030 | 43.826087 | 81 | py |
tps-torch | tps-torch-main/dimer/fts_test/dimer_fts.py | import sys
sys.path.insert(0,'/global/home/users/muhammad_hasyim/tps-torch/build/')
#Import necessarry tools from torch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.examples.dimer_ml import MyMLFTSSampler
from tpstorch import _rank, _world_size
import numpy as np
from tpstorch.ml.nn import FTSLayer#US
#Import any other thing
import tqdm, sys
class FTSLayerCustom(FTSLayer):
r""" A linear layer, where the paramaters correspond to the string obtained by the
general FTS method. Customized to take into account rotational and translational symmetry of the dimer problem
Args:
react_config (torch.Tensor): starting configuration in the reactant basin.
prod_config (torch.Tensor): starting configuration in the product basin.
"""
def __init__(self, react_config, prod_config, num_nodes,boxsize):
super(FTSLayerCustom,self).__init__(react_config, prod_config, num_nodes)
self.boxsize = boxsize
@torch.no_grad()
def compute_metric(self,x):
##(1) Remove center of mass
old_x = x.view(2,3).clone()
#Compute the pair distance
dx = (old_x[0]-old_x[1])
dx = dx-torch.round(dx/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
old_x[0] = dx+old_x[1]
x_com = 0.5*(old_x[0]+old_x[1])
old_x[0] -= x_com
old_x[1] -= x_com
new_string = self.string.view(_world_size,2,3).clone()
#Compute the pair distance
ds = (new_string[:,0]-new_string[:,1])
ds = ds-torch.round(ds/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_string[:,0] = ds+new_string[:,1]
s_com = 0.5*(new_string[:,0]+new_string[:,1])#.detach().clone()#,dim=1)
new_string[:,0] -= s_com
new_string[:,1] -= s_com
##(2) Rotate the configuration
dx /= torch.norm(dx)
new_x = torch.zeros_like(new_string)
for i in range(_world_size):
ds[i] /= torch.norm(ds[i])
v = torch.cross(dx,ds[i])
cosine = torch.dot(ds[i],dx)
new_x[i,0] = old_x[0] +torch.cross(v,old_x[0])+torch.cross(v,torch.cross(v,old_x[0]))/(1+cosine)
new_x[i,1] = old_x[1] +torch.cross(v,old_x[1])+torch.cross(v,torch.cross(v,old_x[1]))/(1+cosine)
return torch.sum((new_string.view(_world_size,6)-new_x.view(_world_size,6))**2,dim=1)
def forward(self,x):
##(1) Remove center of mass
new_x = x.view(2,3).clone()
#Compute the pair distance
dx = (new_x[0]-new_x[1])
dx = dx-torch.round(dx/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_x[0] = dx+new_x[1]
x_com = 0.5*(new_x[0]+new_x[1])
new_x[0] -= x_com
new_x[1] -= x_com
new_string = self.string[_rank].view(2,3).clone()
#Compute the pair distance
ds = (new_string[0]-new_string[1])
ds = ds-torch.round(ds/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_string[0] = ds+new_string[1]
s_com = 0.5*(new_string[0]+new_string[1])#.detach().clone()#,dim=1)
new_string[0] -= s_com
new_string[1] -= s_com
##(2) Rotate the configuration
dx /= torch.norm(dx)
ds /= torch.norm(ds)
v = torch.cross(dx,ds)
cosine = torch.dot(ds,dx)
new_x[0] += torch.cross(v,new_x[0])+torch.cross(v,torch.cross(v,new_x[0]))/(1+cosine)
new_x[1] += torch.cross(v,new_x[1])+torch.cross(v,torch.cross(v,new_x[1]))/(1+cosine)
return torch.sum((new_string.view(_world_size,6)-new_x.flatten())**2)
# This is a sketch of what it should be like, but basically have to also make committor play nicely
# within function
# have omnious committor part that actual committor overrides?
class DimerFTS(MyMLFTSSampler):
def __init__(self,param,config,rank,beta,mpi_group,ftslayer,output_time, save_config=False):
super(DimerFTS, self).__init__(param,config.detach().clone(),rank,beta,mpi_group)
self.output_time = output_time
self.save_config = save_config
self.timestep = 0
self.torch_config = config
#Save config size and its flattened version
self.config_size = config.size()
self.flattened_size = config.flatten().size()
self.invkT = beta
self.ftslayer = ftslayer
#tconfig = ftslayer.string[_rank].view(2,3).detach().clone()
#tconfig.requires_grad = False
self.setConfig(config)
#Configs file Save Alternative, since the default XYZ format is an overkill
#self.file = open("newconfigs_{}.txt".format(dist.get_rank()), "w")
@torch.no_grad()
def initialize_from_torchconfig(self, config):
# Don't have to worry about all that much all, can just set it
self.setConfig(config.detach().clone())
@torch.no_grad()
def reset(self):
self.steps = 0
self.computeMetric()
inftscell = self.checkFTSCell(_rank, _world_size)
if inftscell:
pass
else:
self.setConfig(self.ftslayer.string[_rank].view(2,3).detach().clone())
def computeMetric(self):
self.distance_sq_list = self.ftslayer.compute_metric(self.getConfig().flatten())
@torch.no_grad()
def step(self):
state_old = self.getConfig().detach().clone()
self.stepUnbiased()
self.computeMetric()
inftscell = self.checkFTSCell(_rank, _world_size)
if inftscell:
pass
else:
self.setConfig(state_old)
self.torch_config.requires_grad_()
try:
self.torch_config.grad.data.zero_()
except:
pass
def step_unbiased(self):
with torch.no_grad():
self.stepUnbiased()
self.torch_config.requires_grad_()
try:
self.torch_config.grad.data.zero_()
except:
pass
@torch.no_grad()
def isReactant(self, x = None):
r0 = 2**(1/6.0)
s = 0.5*r0
#Compute the pair distance
if x is None:
if self.getBondLength() <= r0:
return True
else:
return False
else:
if self.getBondLengthConfig(x) <= r0:
return True
else:
return False
@torch.no_grad()
def isProduct(self,x = None):
r0 = 2**(1/6.0)
s = 0.5*r0
if x is None:
if self.getBondLength() >= r0+2*s:
return True
else:
return False
else:
if self.getBondLengthConfig(x) >= r0+2*s:
return True
else:
return False
def step_bc(self):
with torch.no_grad():
state_old = self.getConfig().detach().clone()
self.step_unbiased()
if self.isReactant() or self.isProduct():
pass
else:
#If it's not in the reactant or product state, reset!
self.setConfig(state_old)
def save(self):
self.timestep += 1
if self.save_config and self.timestep % self.output_time == 0:
self.dumpConfig(self.timestep)
| 7,580 | 34.425234 | 118 | py |
tps-torch | tps-torch-main/dimer/fts_test/fts_simulation.py | #import sys
#sys.path.insert(0,"/global/home/users/muhammad_hasyim/tps-torch/build")
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
#from mb_fts import MullerBrown as MullerBrownFTS#, CommittorNet
from dimer_fts import DimerFTS
from dimer_fts import FTSLayerCustom as FTSLayer
#from tpstorch.ml.data import FTSSimulation#, EXPReweightStringSimulation
from tpstorch.ml.data import FTSSimulation#, EXPReweightStringSimulation
from tpstorch.ml.optim import ParallelSGD, ParallelAdam, FTSImplicitUpdate, FTSUpdate
#from tpstorch.ml.nn import FTSLayer#BKELossFTS, BKELossEXP, FTSCommittorLoss, FTSLayer
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
torch.manual_seed(5070)
np.random.seed(5070)
prefix = 'simple'
#Initialize neural net
def initializer(s):
return (1-s)*start+s*end
@torch.no_grad()
def dimer_nullspace(vec,x,boxsize):
##(1) Remove center of mass
old_x = x.view(2,3).clone()
#Compute the pair distance
dx = (old_x[0]-old_x[1])
dx = dx-torch.round(dx/boxsize)*boxsize
#Re-compute one of the coordinates and shift to origin
old_x[0] = dx+old_x[1]
x_com = 0.5*(old_x[0]+old_x[1])
old_x[0] -= x_com
old_x[1] -= x_com
vec = vec.view(2,3).clone()
#Compute the pair distance
ds = (vec[0]-vec[1])
ds = ds-torch.round(ds/boxsize)*boxsize
#Re-compute one of the coordinates and shift to origin
vec[0] = ds+vec[1]
s_com = 0.5*(vec[0]+vec[1])#.detach().clone()#,dim=1)
vec[0] -= s_com
vec[1] -= s_com
##(2) Rotate the configuration
dx /= torch.norm(dx)
ds /= torch.norm(ds)
new_x = torch.zeros_like(old_x)
v = torch.cross(dx,ds)
cosine = torch.dot(ds,dx)
new_x[0] = old_x[0] +torch.cross(v,old_x[0])+torch.cross(v,torch.cross(v,old_x[0]))/(1+cosine)
new_x[1] = old_x[1] +torch.cross(v,old_x[1])+torch.cross(v,torch.cross(v,old_x[1]))/(1+cosine)
return new_x.flatten()
@torch.no_grad()
def reset_orientation(vec,boxsize):
#Remove center of mass
vec = vec.view(2,3).clone()
s_com = (0.5*(vec[0]+vec[1])).detach().clone()#,dim=1)
vec[0] -= s_com
vec[1] -= s_com
#Create the orientation vector
ds = (vec[0]-vec[1])#/torch.norm(vec[0]-vec[1])
ds = ds-torch.round(ds/boxsize)*boxsize
ds /= torch.norm(ds)
#We want to remove center of mass in x and string
x = torch.zeros((2,3))
x[0,2] = -1.0
x[1,2] = 1.0
dx = (x[0]-x[1])
dx /= torch.norm(x[0]-x[1])
#Rotate the configuration
v = torch.cross(ds,dx)
cosine = torch.dot(ds,dx)
vec[0] += torch.cross(v,vec[0])+torch.cross(v,torch.cross(v,vec[0]))/(1+cosine)
#vec[0] -= torch.round(vec[0]/boxsize)*boxsize
vec[1] += torch.cross(v,vec[1])+torch.cross(v,torch.cross(v,vec[1]))/(1+cosine)
#vec[1] -= torch.round(vec[1]/boxsize)*boxsize
return vec.flatten()
#Initialization
r0 = 2**(1/6.0)
width = 0.5*r0
#Reactant
dist_init = r0
start = torch.zeros((2,3))
start[0][2] = -0.5*dist_init
start[1][2] = 0.5*dist_init
#Product state
dist_init = r0+2*width
end = torch.zeros((2,3))
end[0][2] = -0.5*dist_init
end[1][2] = 0.5*dist_init
#Initialize the string
ftslayer = FTSLayer(react_config=start.flatten(),prod_config=end.flatten(),num_nodes=world_size,boxsize=10.0).to('cpu')
#Construct FTSSimulation
ftsoptimizer = FTSUpdate(ftslayer.parameters(), deltatau=0.05,momentum=0.9,nesterov=True, kappa=0.1,periodic=True,dim=3)
kT = 1.0
batch_size = 10
period = 10
#Initialize the dimer
dimer_sim_fts = DimerFTS(param="param",config=ftslayer.string[rank].view(2,3).detach().clone(), rank=rank, beta=1/kT, save_config=True, mpi_group = mpi_group, ftslayer=ftslayer,output_time=batch_size*period)
datarunner_fts = FTSSimulation(dimer_sim_fts, nn_training = False, period=period, batch_size=batch_size, dimN=6)
#FTS Simulation Training Loop
with open("string_{}_config.xyz".format(rank),"w") as f, open("string_{}_log.txt".format(rank),"w") as g:
for i in tqdm.tqdm(range(500)):
# get data and reweighting factors
configs = datarunner_fts.runSimulation()
ftsoptimizer.step(configs,len(configs),boxsize=10.0,remove_nullspace=dimer_nullspace,reset_orient=reset_orientation)
string_temp = ftslayer.string[rank].view(2,3)
f.write("2 \n")
f.write('Lattice=\"10.0 0.0 0.0 0.0 10.0 0.0 0.0 0.0 10.0\" ')
f.write('Origin=\"-5.0 -5.0 -5.0\" ')
f.write("Properties=type:S:1:pos:R:3:aux1:R:1 \n")
f.write("2 {} {} {} {} \n".format(string_temp[0,0],string_temp[0,1], string_temp[0,2],0.5*r0))
f.write("2 {} {} {} {} \n".format(string_temp[1,0],string_temp[1,1], string_temp[1,2],0.5*r0))
f.flush()
g.write("{} {} \n".format((i+1)*period,torch.norm(string_temp[0]-string_temp[1])))
g.flush()
if rank == 0:
torch.save(ftslayer.state_dict(), "test_string_config")
| 5,130 | 33.206667 | 207 | py |
tps-torch | tps-torch-main/dimer/ml_test/nn_initialization.py | #import sys
#sys.path.insert(0,"/global/home/users/muhammad_hasyim/tps-torch/build")
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from dimer_ftsus import FTSLayerUSCustom as FTSLayer
from committor_nn import CommittorNetDR
#from tpstorch.ml.data import FTSSimulation, EXPReweightStringSimulation
from tpstorch.ml.optim import ParallelSGD, ParallelAdam#, FTSImplicitUpdate, FTSUpdate
#from tpstorch.ml.nn import BKELossFTS, BKELossEXP, FTSCommittorLoss, FTSLayer
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
torch.manual_seed(5070)
np.random.seed(5070)
prefix = 'simple'
#Initialize neural net
def initializer(s):
return (1-s)*start+s*end
#Initialization
r0 = 2**(1/6.0)
width = 0.5*r0
#Reactant
dist_init = r0-0.20*r0
start = torch.zeros((2,3))
start[0][2] = -0.5*dist_init
start[1][2] = 0.5*dist_init
#Product state
dist_init = r0+2*width+0.20*r0
end = torch.zeros((2,3))
end[0][2] = -0.5*dist_init
end[1][2] = 0.5*dist_init
if rank == 0:
print("At NN start stuff")
committor = CommittorNetDR(num_nodes=50, boxsize=10).to('cpu')
ftslayer = FTSLayer(react_config=start.flatten(),prod_config=end.flatten(),num_nodes=world_size,boxsize=10.0,kappa_perpend=0.0,kappa_parallel=0.0).to('cpu')
#Initial Training Loss
initloss = nn.MSELoss()
initoptimizer = ParallelAdam(committor.parameters(), lr=1e-3)
#from torchsummary import summary
running_loss = 0.0
tolerance = 1e-3
#Initial training try to fit the committor to the initial condition
tolerance = 1e-4
#batch_sizes = [64]
#for size in batch_sizes:
loss_io = []
if rank == 0:
loss_io = open("{}_statistic_{}.txt".format(prefix,rank+1),'w')
if rank == 0:
print("Before training")
for i in range(10**7):
# zero the parameter gradients
initoptimizer.zero_grad()
# forward + backward + optimize
q_vals = committor(ftslayer.string[rank])#initial_config.view(-1,2))
targets = torch.ones_like(q_vals)*rank/(dist.get_world_size()-1)
cost = initloss(q_vals, targets)#,committor,config,cx)
cost.backward()
#Stepping up
initoptimizer.step()
with torch.no_grad():
dist.all_reduce(cost)
#if i % 10 == 0 and rank == 0:
# print(i,cost.item() / world_size, committor(ftslayer.string[-1]))
# torch.save(committor.state_dict(), "initial_1hl_nn")#_{}".format(size))#prefix,rank+1))
if rank == 0:
loss_io.write("Step {:d} Loss {:.5E}\n".format(i,cost.item()))
loss_io.flush()
if cost.item() / world_size < tolerance:
if rank == 0:
torch.save(committor.state_dict(), "initial_1hl_nn")#_{}".format(size))#prefix,rank+1))
torch.save(ftslayer.state_dict(), "test_string_config")#_{}".format(size))#prefix,rank+1))
print("Early Break!")
break
committor.zero_grad()
| 3,069 | 29.7 | 156 | py |
tps-torch | tps-torch-main/dimer/ml_test/plotmodel.py | #Import necessarry tools from torch
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader
import torch.nn as nn
#Import necessarry tools from tpstorch
#from brownian_ml import CommittorNet
from committor_nn import CommittorNet, CommittorNetDR
import numpy as np
#Import any other thing
import tqdm, sys
prefix = 'simple'
#Initialize neural net
#committor = CommittorNetDR(d=1,num_nodes=200).to('cpu')
committor = CommittorNetDR(num_nodes=2500, boxsize=10).to('cpu')
committor.load_state_dict(torch.load("ftsus_sl/{}_params_t_180_0".format(prefix)))
#Computing solution from neural network
#Reactant
r0 = 2**(1/6.0)
width = 0.5*r0
dist_init = r0
start = torch.zeros((2,3))
start[0][2] = -0.5*dist_init
start[1][2] = 0.5*dist_init
#Product state
dist_init = r0+2*width
end = torch.zeros((2,3))
end[0][2] = -0.5*dist_init
end[1][2] = 0.5*dist_init
def initializer(s):
return (1-s)*start+s*end
s = torch.linspace(0,1,100)
x = []
y = []
boxsize = 10.0
for val in s:
r = initializer(val)
dr = r[1]-r[0]
dr -= torch.round(dr/boxsize)*boxsize
dr = torch.norm(dr)#.view(-1,1)
x.append(dr)
y.append(committor(r).item())
#Load exact solution
data = np.loadtxt('committor.txt')
import matplotlib.pyplot as plt
plt.figure(figsize=(5,5))
#Neural net solution vs. exact solution
plt.plot(x,y,'-')
plt.plot(data[:,0],data[:,1],'--')
plt.show()
| 1,397 | 22.694915 | 82 | py |
tps-torch | tps-torch-main/dimer/ml_test/committor_nn.py | #Import necessarry tools from torch
import torch
import torch.nn as nn
import numpy as np
#Import any other thing
import tqdm, sys
class CommittorNet(nn.Module):
def __init__(self, d, num_nodes, unit=torch.relu):
super(CommittorNet, self).__init__()
self.num_nodes = num_nodes
self.d = d
self.unit = unit
self.lin1 = nn.Linear(d, num_nodes, bias=True)
self.lin2 = nn.Linear(num_nodes, 1, bias=False)
def forward(self, x):
#X needs to be flattened
x = x.view(-1,6)
x = self.lin1(x)
x = self.unit(x)
x = self.lin2(x)
return torch.sigmoid(x)
class CommittorNetDR(nn.Module):
def __init__(self, num_nodes, boxsize, unit=torch.relu):
super(CommittorNetDR, self).__init__()
self.num_nodes = num_nodes
self.unit = unit
self.lin1 = nn.Linear(1, num_nodes, bias=True)
self.lin2 = nn.Linear(num_nodes, 1, bias=False)
self.boxsize = boxsize
def forward(self, x):
#Need to compute pair distance
#By changing the view from flattened to 2 by x array
x = x.view(-1,2,3)
dx = x[:,0]-x[:,1]
dx -= torch.round(dx/self.boxsize)*self.boxsize
dx = torch.norm(dx,dim=1).view(-1,1)
#Feed it to one hidden layer
x = self.lin1(dx)
x = self.unit(x)
x = self.lin2(x)
return torch.sigmoid(x)
class CommittorNetTwoHidden(nn.Module):
def __init__(self, d, num_nodes, unit=torch.relu):
super(CommittorNetTwoHidden, self).__init__()
self.num_nodes = num_nodes
self.d = d
self.unit = unit
self.lin1 = nn.Linear(d, num_nodes, bias=True)
self.lin3 = nn.Linear(num_nodes, num_nodes, bias=True)
self.lin2 = nn.Linear(num_nodes, 1, bias=False)
def forward(self, x):
x = self.lin1(x)
x = self.unit(x)
x = self.lin3(x)
x = self.unit(x)
x = self.lin2(x)
return torch.sigmoid(x)
| 2,015 | 28.647059 | 62 | py |
tps-torch | tps-torch-main/dimer/ml_test/ftsme/dimer_fts.py | import sys
sys.path.insert(0,'/global/home/users/muhammad_hasyim/tps-torch/build/')
#Import necessarry tools from torch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.examples.dimer_ml import MyMLFTSSampler
from tpstorch import _rank, _world_size
import numpy as np
from tpstorch.ml.nn import FTSLayer#US
#Import any other thing
import tqdm, sys
class FTSLayerCustom(FTSLayer):
r""" A linear layer, where the paramaters correspond to the string obtained by the
general FTS method. Customized to take into account rotational and translational symmetry of the dimer problem
Args:
react_config (torch.Tensor): starting configuration in the reactant basin.
prod_config (torch.Tensor): starting configuration in the product basin.
"""
def __init__(self, react_config, prod_config, num_nodes,boxsize):
super(FTSLayerCustom,self).__init__(react_config, prod_config, num_nodes)
self.boxsize = boxsize
@torch.no_grad()
def compute_metric(self,x):
##(1) Remove center of mass
old_x = x.view(2,3).detach().clone()
#Compute the pair distance
dx = (old_x[0]-old_x[1])
dx = dx-torch.round(dx/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
old_x[0] = dx+old_x[1]
x_com = 0.5*(old_x[0]+old_x[1])
old_x[0] -= x_com
old_x[1] -= x_com
new_string = self.string.view(_world_size,2,3).detach().clone()
#Compute the pair distance
ds = (new_string[:,0]-new_string[:,1])
ds = ds-torch.round(ds/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_string[:,0] = ds+new_string[:,1]
s_com = 0.5*(new_string[:,0]+new_string[:,1])#.detach().clone()#,dim=1)
new_string[:,0] -= s_com
new_string[:,1] -= s_com
##(2) Rotate the configuration
dx /= torch.norm(dx)
new_x = torch.zeros_like(new_string)
for i in range(_world_size):
ds[i] /= torch.norm(ds[i])
v = torch.cross(dx,ds[i])
cosine = torch.dot(ds[i],dx)
if cosine < 0:
new_string[i] *= -1
ds[i] *= -1
v *= -1
cosine = torch.dot(ds[i],dx)
new_x[i,0] = old_x[0] +torch.cross(v,old_x[0])+torch.cross(v,torch.cross(v,old_x[0]))/(1+cosine)
new_x[i,1] = old_x[1] +torch.cross(v,old_x[1])+torch.cross(v,torch.cross(v,old_x[1]))/(1+cosine)
return torch.sum((new_string.view(_world_size,6)-new_x.view(_world_size,6))**2,dim=1)
def forward(self,x):
##(1) Remove center of mass
new_x = x.view(2,3).detach().clone()
#Compute the pair distance
dx = (new_x[0]-new_x[1])
dx = dx-torch.round(dx/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_x[0] = dx+new_x[1]
x_com = 0.5*(new_x[0]+new_x[1])
new_x[0] -= x_com
new_x[1] -= x_com
new_string = self.string[_rank].view(2,3).detach().clone()
#Compute the pair distance
ds = (new_string[0]-new_string[1])
ds = ds-torch.round(ds/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_string[0] = ds+new_string[1]
s_com = 0.5*(new_string[0]+new_string[1])#.detach().clone()#,dim=1)
new_string[0] -= s_com
new_string[1] -= s_com
##(2) Rotate the configuration
dx /= torch.norm(dx)
ds /= torch.norm(ds)
v = torch.cross(dx,ds)
cosine = torch.dot(ds,dx)
if cosine < 0:
ds *= -1
new_string *= -1
v *= -1
cosine = torch.dot(ds,dx)
new_x[0] += torch.cross(v,new_x[0])+torch.cross(v,torch.cross(v,new_x[0]))/(1+cosine)
new_x[1] += torch.cross(v,new_x[1])+torch.cross(v,torch.cross(v,new_x[1]))/(1+cosine)
return torch.sum((new_string.view(_world_size,6)-new_x.flatten())**2)
# This is a sketch of what it should be like, but basically have to also make committor play nicely
# within function
# have omnious committor part that actual committor overrides?
class DimerFTS(MyMLFTSSampler):
def __init__(self,param,config,rank,beta,mpi_group,ftslayer,output_time, save_config=False):
super(DimerFTS, self).__init__(param,config.detach().clone(),rank,beta,mpi_group)
self.output_time = output_time
self.save_config = save_config
self.timestep = 0
self.torch_config = config
#Save config size and its flattened version
self.config_size = config.size()
self.flattened_size = config.flatten().size()
self.invkT = beta
self.ftslayer = ftslayer
#tconfig = ftslayer.string[_rank].view(2,3).detach().clone()
#tconfig.requires_grad = False
self.setConfig(config)
#Configs file Save Alternative, since the default XYZ format is an overkill
#self.file = open("newconfigs_{}.txt".format(dist.get_rank()), "w")
@torch.no_grad()
def initialize_from_torchconfig(self, config):
# Don't have to worry about all that much all, can just set it
self.setConfig(config.detach().clone())
@torch.no_grad()
def reset(self):
self.steps = 0
self.computeMetric()
inftscell = self.checkFTSCell(_rank, _world_size)
if inftscell:
pass
else:
self.setConfig(self.ftslayer.string[_rank].view(2,3).detach().clone())
def computeMetric(self):
self.distance_sq_list = self.ftslayer.compute_metric(self.getConfig().flatten())
@torch.no_grad()
def step(self):
state_old = self.getConfig().detach().clone()
self.stepUnbiased()
self.computeMetric()
inftscell = self.checkFTSCell(_rank, _world_size)
if inftscell:
pass
else:
self.setConfig(state_old)
self.torch_config.requires_grad_()
try:
self.torch_config.grad.data.zero_()
except:
pass
def step_unbiased(self):
with torch.no_grad():
self.stepUnbiased()
self.torch_config.requires_grad_()
try:
self.torch_config.grad.data.zero_()
except:
pass
@torch.no_grad()
def isReactant(self, x = None):
r0 = 2**(1/6.0)
s = 0.5*r0
#Compute the pair distance
if x is None:
if self.getBondLength() <= r0:
return True
else:
return False
else:
if self.getBondLengthConfig(x) <= r0:
return True
else:
return False
@torch.no_grad()
def isProduct(self,x = None):
r0 = 2**(1/6.0)
s = 0.5*r0
if x is None:
if self.getBondLength() >= r0+2*s:
return True
else:
return False
else:
if self.getBondLengthConfig(x) >= r0+2*s:
return True
else:
return False
def step_bc(self):
with torch.no_grad():
state_old = self.getConfig().detach().clone()
self.step_unbiased()
if self.isReactant() or self.isProduct():
pass
else:
#If it's not in the reactant or product state, reset!
self.setConfig(state_old)
def save(self):
self.timestep += 1
if self.save_config and self.timestep % self.output_time == 0:
self.dumpConfig(self.timestep)
| 7,906 | 34.457399 | 118 | py |
tps-torch | tps-torch-main/dimer/ml_test/ftsme/run_ftsme.py | import sys
sys.path.append("..")
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from dimer_fts import DimerFTS
from committor_nn import CommittorNet, CommittorNetDR
from dimer_fts import FTSLayerCustom as FTSLayer
from tpstorch.ml.data import FTSSimulation#, EXPReweightStringSimulation
from tpstorch.ml.optim import ParallelSGD, ParallelAdam, FTSUpdate
#from tpstorch.ml.nn import BKELossEXP
from tpstorch.ml.nn import BKELossFTS
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
torch.manual_seed(5070)
np.random.seed(5070)
prefix = 'simple'
@torch.no_grad()
def dimer_nullspace(vec,x,boxsize):
##(1) Remove center of mass
old_x = x.view(2,3).clone()
#Compute the pair distance
dx = (old_x[0]-old_x[1])
dx = dx-torch.round(dx/boxsize)*boxsize
#Re-compute one of the coordinates and shift to origin
old_x[0] = dx+old_x[1]
x_com = 0.5*(old_x[0]+old_x[1])
old_x[0] -= x_com
old_x[1] -= x_com
vec = vec.view(2,3).clone()
#Compute the pair distance
ds = (vec[0]-vec[1])
ds = ds-torch.round(ds/boxsize)*boxsize
#Re-compute one of the coordinates and shift to origin
vec[0] = ds+vec[1]
s_com = 0.5*(vec[0]+vec[1])#.detach().clone()#,dim=1)
vec[0] -= s_com
vec[1] -= s_com
##(2) Rotate the configuration
dx /= torch.norm(dx)
ds /= torch.norm(ds)
new_x = torch.zeros_like(old_x)
v = torch.cross(dx,ds)
cosine = torch.dot(ds,dx)
if cosine < 0:
dx *= -1
v *= -1
old_x *= -1
cosine = torch.dot(ds,dx)
new_x[0] = old_x[0] +torch.cross(v,old_x[0])+torch.cross(v,torch.cross(v,old_x[0]))/(1+cosine)
new_x[1] = old_x[1] +torch.cross(v,old_x[1])+torch.cross(v,torch.cross(v,old_x[1]))/(1+cosine)
return new_x.flatten()
#Initialize neural net
#Initialization
r0 = 2**(1/6.0)
width = 0.5*r0
#Reactant
dist_init = r0-0.95*r0
start = torch.zeros((2,3))
start[0][2] = -0.5*dist_init
start[1][2] = 0.5*dist_init
#Product state
dist_init = r0+2*width+0.95*r0
end = torch.zeros((2,3))
end[0][2] = -0.5*dist_init
end[1][2] = 0.5*dist_init
#Initialize neural net
#committor = CommittorNet(d=6,num_nodes=2500).to('cpu')
committor = CommittorNetDR(num_nodes=2500, boxsize=10).to('cpu')
#Initialize the string for FTS method
ftslayer = FTSLayer(react_config=start.flatten(),prod_config=end.flatten(),num_nodes=world_size,boxsize=10.0).to('cpu')
#Load the pre-initialized neural network and string
committor.load_state_dict(torch.load("../initial_1hl_nn"))
kT = 1.0
ftslayer.load_state_dict(torch.load("../test_string_config"))
n_boundary_samples = 100
batch_size = 8
period = 25
dimer_sim_bc = DimerFTS(param="param_bc",config=ftslayer.string[rank].view(2,3).clone().detach(), rank=rank, beta=1/kT, save_config=False, mpi_group = mpi_group, ftslayer=ftslayer,output_time=batch_size*period)
dimer_sim = DimerFTS(param="param",config=ftslayer.string[rank].view(2,3).clone().detach(), rank=rank, beta=1/kT, save_config=True, mpi_group = mpi_group, ftslayer=ftslayer,output_time=batch_size*period)
#Construct FTSSimulation
ftsoptimizer = FTSUpdate(ftslayer.parameters(), deltatau=0.02,momentum=0.9,nesterov=True,kappa=0.1,periodic=True,dim=3)
datarunner = FTSSimulation(dimer_sim, committor = committor, nn_training = True, period=period, batch_size=batch_size, dimN=6)
#Initialize main loss function and optimizers
#Optimizer, doing EXP Reweighting. We can do SGD (integral control), or Heavy-Ball (PID control)
loss = BKELossFTS( bc_sampler = dimer_sim_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 100,
bc_period = 10,
batch_size_bc = 0.5,
tol = 5e-10,
mode= 'shift')
loss_io = []
loss_io = open("{}_statistic_{}.txt".format(prefix,rank+1),'w')
#Training loop
optimizer = ParallelAdam(committor.parameters(), lr=3e-3)
#We can train in terms of epochs, but we will keep it in one epoch
with open("string_{}_config.xyz".format(rank),"w") as f, open("string_{}_log.txt".format(rank),"w") as g:
for epoch in range(1):
if rank == 0:
print("epoch: [{}]".format(epoch+1))
for i in tqdm.tqdm(range(1000)):#20000)):
# get data and reweighting factors
configs, grad_xs = datarunner.runSimulation()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
# forward + backward + optimize
cost = loss(grad_xs, dimer_sim.rejection_count)
cost.backward()
optimizer.step()
ftsoptimizer.step(configs,len(configs),boxsize=10.0,remove_nullspace=dimer_nullspace)
# print statistics
with torch.no_grad():
string_temp = ftslayer.string[rank].view(2,3)
f.write("2 \n")
f.write('Lattice=\"10.0 0.0 0.0 0.0 10.0 0.0 0.0 0.0 10.0\" ')
f.write('Origin=\"-5.0 -5.0 -5.0\" ')
f.write("Properties=type:S:1:pos:R:3:aux1:R:1 \n")
f.write("2 {} {} {} {} \n".format(string_temp[0,0],string_temp[0,1], string_temp[0,2],0.5*r0))
f.write("2 {} {} {} {} \n".format(string_temp[1,0],string_temp[1,1], string_temp[1,2],0.5*r0))
f.flush()
g.write("{} {} \n".format((i+1)*period,torch.norm(string_temp[0]-string_temp[1])))
g.flush()
#if counter % 10 == 0:
main_loss = loss.main_loss
bc_loss = loss.bc_loss
loss_io.write('{:d} {:.5E} {:.5E} \n'.format(i+1,main_loss.item(),bc_loss.item()))
loss_io.flush()
#Print statistics
if rank == 0:
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,i,rank))
| 6,330 | 34.768362 | 210 | py |
tps-torch | tps-torch-main/dimer/ml_test/ftsus/run_ftsus.py | import sys
sys.path.append("..")
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from dimer_ftsus import DimerFTSUS
from committor_nn import CommittorNet, CommittorNetDR
from dimer_ftsus import FTSLayerUSCustom as FTSLayer
from tpstorch.ml.data import FTSSimulation, EXPReweightStringSimulation
from tpstorch.ml.optim import ParallelSGD, ParallelAdam
from tpstorch.ml.nn import BKELossEXP
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
torch.manual_seed(5070)
np.random.seed(5070)
prefix = 'simple'
#Initialize neural net
#Initialization
r0 = 2**(1/6.0)
width = 0.5*r0
#Reactant
dist_init = r0
start = torch.zeros((2,3))
start[0][2] = -0.5*dist_init
start[1][2] = 0.5*dist_init
#Product state
dist_init = r0+2*width
end = torch.zeros((2,3))
end[0][2] = -0.5*dist_init
end[1][2] = 0.5*dist_init
#Initialize neural net
#committor = CommittorNet(d=6,num_nodes=2500).to('cpu')
committor = CommittorNetDR(num_nodes=2500, boxsize=10).to('cpu')
kappa_perp = 200.0#60#10
kappa_par = 500.0#60
#Initialize the string for FTS method
ftslayer = FTSLayer(react_config=start.flatten(),prod_config=end.flatten(),num_nodes=world_size,boxsize=10.0,kappa_perpend=kappa_perp,kappa_parallel=kappa_par).to('cpu')
#Load the pre-initialized neural network and string
committor.load_state_dict(torch.load("../initial_1hl_ftsus_nn"))
kT = 1.0
ftslayer.load_state_dict(torch.load("../test_string_config"))
ftslayer.set_tangent()
print(ftslayer.string)
print(ftslayer.tangent)
n_boundary_samples = 100
batch_size = 8
period = 25
dimer_sim_bc = DimerFTSUS(param="param_bc",config=ftslayer.string[rank].view(2,3).clone().detach(), rank=rank, beta=1/kT, kappa = 0.0, save_config=False, mpi_group = mpi_group, ftslayer=ftslayer,output_time=batch_size*period)
dimer_sim = DimerFTSUS(param="param",config=ftslayer.string[rank].view(2,3).clone().detach(), rank=rank, beta=1/kT, kappa = kappa_perp, save_config=True, mpi_group = mpi_group, ftslayer=ftslayer,output_time=batch_size*period)
#Construct FTSSimulation
datarunner = EXPReweightStringSimulation(dimer_sim, committor, period=period, batch_size=batch_size, dimN=6)
#Initialize main loss function and optimizers
#Optimizer, doing EXP Reweighting. We can do SGD (integral control), or Heavy-Ball (PID control)
loss = BKELossEXP( bc_sampler = dimer_sim_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 100,
bc_period = 10,
batch_size_bc = 0.5,
)
loss_io = []
loss_io = open("{}_statistic_{}.txt".format(prefix,rank+1),'w')
#Training loop
optimizer = ParallelAdam(committor.parameters(), lr=1e-3)
#We can train in terms of epochs, but we will keep it in one epoch
for epoch in range(1):
if rank == 0:
print("epoch: [{}]".format(epoch+1))
for i in tqdm.tqdm(range(1000)):
# get data and reweighting factors
config, grad_xs, invc, fwd_wl, bwrd_wl = datarunner.runSimulation()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
# forward + backward + optimize
cost = loss(grad_xs,invc,fwd_wl,bwrd_wl)
cost.backward()#retain_graph=True)
optimizer.step()
# print statistics
with torch.no_grad():
#if counter % 10 == 0:
main_loss = loss.main_loss
bc_loss = loss.bc_loss
loss_io.write('{:d} {:.5E} {:.5E} \n'.format(i+1,main_loss.item(),bc_loss.item()))
loss_io.flush()
#Print statistics
if rank == 0:
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,i,rank))
| 4,082 | 32.195122 | 225 | py |
tps-torch | tps-torch-main/dimer/ml_test/ftsus/dimer_ftsus.py | import sys
sys.path.insert(0,'/global/home/users/muhammad_hasyim/tps-torch/build/')
#Import necessarry tools from torch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.examples.dimer_ml import MyMLEXPStringSampler
from tpstorch import _rank, _world_size
import numpy as np
from tpstorch.ml.nn import FTSLayerUS
#Import any other thing
import tqdm, sys
class FTSLayerUSCustom(FTSLayerUS):
r""" A linear layer, where the paramaters correspond to the string obtained by the
general FTS method. Customized to take into account rotational and translational symmetry of the dimer problem
Args:
react_config (torch.Tensor): starting configuration in the reactant basin.
prod_config (torch.Tensor): starting configuration in the product basin.
"""
def __init__(self, react_config, prod_config, num_nodes,boxsize,kappa_perpend, kappa_parallel):
super(FTSLayerUSCustom,self).__init__(react_config, prod_config, num_nodes, kappa_perpend, kappa_parallel)
self.boxsize = boxsize
@torch.no_grad()
def compute_metric(self,x):
##(1) Remove center of mass
old_x = x.view(2,3).detach().clone()
#Compute the pair distance
dx = (old_x[0]-old_x[1])
dx = dx-torch.round(dx/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
old_x[0] = dx+old_x[1]
x_com = 0.5*(old_x[0]+old_x[1])
old_x[0] -= x_com
old_x[1] -= x_com
new_string = self.string.view(_world_size,2,3).detach().clone()
#Compute the pair distance
ds = (new_string[:,0]-new_string[:,1])
ds = ds-torch.round(ds/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_string[:,0] = ds+new_string[:,1]
s_com = 0.5*(new_string[:,0]+new_string[:,1])#.detach().clone()#,dim=1)
new_string[:,0] -= s_com
new_string[:,1] -= s_com
##(2) Rotate the configuration
dx /= torch.norm(dx)
new_x = torch.zeros_like(new_string)
for i in range(_world_size):
ds[i] /= torch.norm(ds[i])
v = torch.cross(dx,ds[i])
cosine = torch.dot(ds[i],dx)
if cosine < 0:
new_string[i] *= -1
ds[i] *= -1
v *= -1
cosine = torch.dot(ds[i],dx)
new_x[i,0] = old_x[0] +torch.cross(v,old_x[0])+torch.cross(v,torch.cross(v,old_x[0]))/(1+cosine)
new_x[i,1] = old_x[1] +torch.cross(v,old_x[1])+torch.cross(v,torch.cross(v,old_x[1]))/(1+cosine)
return torch.sum((new_string.view(_world_size,6)-new_x.view(_world_size,6))**2,dim=1)
@torch.no_grad()
def compute_umbrellaforce(self,x):
#For now, don't rotate or translate the system
##(1) Remove center of mass
new_x = x.view(2,3).detach().clone()
new_string = self.string[_rank].view(2,3).detach().clone()
#Compute the pair distance
dx = (new_x[0]-new_x[1])
dx = dx-torch.round(dx/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_x[0] = dx+new_x[1]
x_com = 0.5*(new_x[0]+new_x[1])
new_x[0] -= x_com
new_x[1] -= x_com
new_string = self.string[_rank].view(2,3).detach().clone()
#Compute the pair distance
ds = (new_string[0]-new_string[1])
ds = ds-torch.round(ds/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_string[0] = ds+new_string[1]
s_com = 0.5*(new_string[0]+new_string[1])#.detach().clone()#,dim=1)
new_string[0] -= s_com
new_string[1] -= s_com
##(2) Rotate the configuration
dx /= torch.norm(dx)
ds /= torch.norm(ds)
#v = torch.cross(dx,ds)
v = torch.cross(ds,dx)
cosine = torch.dot(ds,dx)
if cosine < 0:
ds *= -1
new_string *= -1
v *= -1
cosine = torch.dot(ds,dx)
#new_x[0] += torch.cross(v,new_x[0])+torch.cross(v,torch.cross(v,new_x[0]))/(1+cosine)
#new_x[1] += torch.cross(v,new_x[1])+torch.cross(v,torch.cross(v,new_x[1]))/(1+cosine)
new_string[0] += torch.cross(v,new_string[0])+torch.cross(v,torch.cross(v,new_string[0]))/(1+cosine)
new_string[1] += torch.cross(v,new_string[1])+torch.cross(v,torch.cross(v,new_string[1]))/(1+cosine)
dX = new_x.flatten()-new_string.flatten()
dX = dX-torch.round(dX/self.boxsize)*self.boxsize
tangent_dx = torch.dot(self.tangent[_rank],dX)
return -self.kappa_perpend*dX-(self.kappa_parallel-self.kappa_perpend)*self.tangent[_rank]*tangent_dx
def forward(self,x):
##(1) Remove center of mass
new_x = x.view(2,3).detach().clone()
#new_string = self.string[_rank].view(2,3).detach().clone()
#Compute the pair distance
dx = (new_x[0]-new_x[1])
dx = dx-torch.round(dx/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_x[0] = dx+new_x[1]
x_com = 0.5*(new_x[0]+new_x[1])
new_x[0] -= x_com
new_x[1] -= x_com
new_string = self.string[_rank].view(2,3).detach().clone()
#Compute the pair distance
ds = (new_string[0]-new_string[1])
ds = ds-torch.round(ds/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_string[0] = ds+new_string[1]
s_com = 0.5*(new_string[0]+new_string[1])#.detach().clone()#,dim=1)
new_string[0] -= s_com
new_string[1] -= s_com
##(2) Rotate the configuration
dx /= torch.norm(dx)
ds /= torch.norm(ds)
v = torch.cross(ds,dx)
#v = torch.cross(dx,ds)
cosine = torch.dot(ds,dx)
if cosine < 0:
ds *= -1
new_string *= -1
v *= -1
cosine = torch.dot(ds,dx)
#new_x[0] += torch.cross(v,new_x[0])+torch.cross(v,torch.cross(v,new_x[0]))/(1+cosine)
#new_x[1] += torch.cross(v,new_x[1])+torch.cross(v,torch.cross(v,new_x[1]))/(1+cosine)
new_string[0] += torch.cross(v,new_string[0])+torch.cross(v,torch.cross(v,new_string[0]))/(1+cosine)
new_string[1] += torch.cross(v,new_string[1])+torch.cross(v,torch.cross(v,new_string[1]))/(1+cosine)
dX = new_x.flatten()-new_string.flatten()
dX = dX-torch.round(dX/self.boxsize)*self.boxsize
dist_sq = torch.sum(dX**2)
tangent_dx = torch.sum(self.tangent[_rank]*dX)
return dist_sq+(self.kappa_parallel-self.kappa_perpend)*tangent_dx**2/self.kappa_perpend#torch.sum((tangent*(self.string-x))**2,dim=1)
# This is a sketch of what it should be like, but basically have to also make committor play nicely
# within function
# have omnious committor part that actual committor overrides?
class DimerFTSUS(MyMLEXPStringSampler):
def __init__(self,param,config,rank,beta,kappa, mpi_group,ftslayer,output_time, save_config=False):
super(DimerFTSUS, self).__init__(param,config.detach().clone(),rank,beta,kappa, mpi_group)
self.output_time = output_time
self.save_config = save_config
self.timestep = 0
self.torch_config = config
#Save config size and its flattened version
self.config_size = config.size()
self.flattened_size = config.flatten().size()
self.invkT = beta
self.ftslayer = ftslayer
self.setConfig(config)
@torch.no_grad()
def initialize_from_torchconfig(self, config):
# Don't have to worry about all that much all, can just set it
self.setConfig(config.detach().clone())
@torch.no_grad()
def reset(self):
self.steps = 0
self.computeMetric()
inftscell = self.checkFTSCell(_rank, _world_size)
if inftscell:
pass
else:
self.setConfig(self.ftslayer.string[_rank].view(2,3).detach().clone())
def computeMetric(self):
self.distance_sq_list = self.ftslayer.compute_metric(self.getConfig().flatten())
def computeWForce(self,x):
return self.ftslayer.compute_umbrellaforce(x)
@torch.no_grad()
def step(self):
state_old = self.getConfig().detach().clone()
self.stepBiased(self.computeWForce(state_old.flatten()))
self.torch_config.requires_grad_()
try:
self.torch_config.grad.data.zero_()
except:
pass
def step_unbiased(self):
with torch.no_grad():
self.stepUnbiased()
self.torch_config.requires_grad_()
try:
self.torch_config.grad.data.zero_()
except:
pass
@torch.no_grad()
def isReactant(self, x = None):
r0 = 2**(1/6.0)
s = 0.5*r0
#Compute the pair distance
if x is None:
if self.getBondLength() <= r0:
return True
else:
return False
else:
if self.getBondLengthConfig(x) <= r0:
return True
else:
return False
@torch.no_grad()
def isProduct(self,x = None):
r0 = 2**(1/6.0)
s = 0.5*r0
if x is None:
if self.getBondLength() >= r0+2*s:
return True
else:
return False
else:
if self.getBondLengthConfig(x) >= r0+2*s:
return True
else:
return False
@torch.no_grad()
def step_bc(self):
state_old = self.getConfig().detach().clone()
self.step_unbiased()
if self.isReactant() or self.isProduct():
pass
else:
#If it's not in the reactant or product state, reset!
self.setConfig(state_old)
def save(self):
self.timestep += 1
if self.save_config and self.timestep % self.output_time == 0:
self.dumpConfig(self.timestep)
| 10,322 | 36.538182 | 142 | py |
tps-torch | tps-torch-main/dimer/ml_test/us/dimer_us.py | import sys
sys.path.insert(0,'/global/home/users/muhammad_hasyim/tps-torch/build/')
#Import necessarry tools from torch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.examples.dimer_ml import MyMLEXPSampler
from tpstorch import _rank, _world_size
import numpy as np
from tpstorch.ml.nn import FTSLayerUS
#Import any other thing
import tqdm, sys
# This is a sketch of what it should be like, but basically have to also make committor play nicely
# within function
# have omnious committor part that actual committor overrides?
class DimerUS(MyMLEXPSampler):
def __init__(self,param,config,rank,beta,kappa, mpi_group,output_time, save_config=False):
super(DimerUS, self).__init__(param,config.detach().clone(),rank,beta,kappa, mpi_group)
self.output_time = output_time
self.save_config = save_config
self.timestep = 0
self.torch_config = config
#Save config size and its flattened version
self.config_size = config.size()
self.flattened_size = config.flatten().size()
self.invkT = beta
self.setConfig(config)
self.dt =0
self.gamma = 0
#Read the local param file to get info on step size and friction constant
with open("param","r") as f:
for line in f:
test = line.strip()
test = test.split()
if (len(test) == 0):
continue
else:
if test[0] == "gamma":
self.gamma = float(test[1])
elif test[0] == "dt":
self.dt = float(test[1])
@torch.no_grad()
def initialize_from_torchconfig(self, config):
# Don't have to worry about all that much all, can just set it
self.setConfig(config.detach().clone())
def computeWForce(self, committor_val, qval):
return -self.kappa*self.torch_config.grad.data*(committor_val-qval)#/self.gamma
def step(self, committor_val, onlytst=False):
with torch.no_grad():
#state_old = self.getConfig().detach().clone()
if onlytst:
self.stepBiased(self.computeWForce(committor_val, 0.5))#state_old.flatten()))
else:
self.stepBiased(self.computeWForce(committor_val, self.qvals[_rank]))#state_old.flatten()))
self.torch_config.requires_grad_()
self.torch_config.grad.data.zero_()
def step_unbiased(self):
with torch.no_grad():
self.stepUnbiased()
self.torch_config.requires_grad_()
try:
self.torch_config.grad.data.zero_()
except:
pass
@torch.no_grad()
def isReactant(self, x = None):
r0 = 2**(1/6.0)
s = 0.5*r0
#Compute the pair distance
if x is None:
if self.getBondLength() <= r0:
return True
else:
return False
else:
if self.getBondLengthConfig(x) <= r0:
return True
else:
return False
@torch.no_grad()
def isProduct(self,x = None):
r0 = 2**(1/6.0)
s = 0.5*r0
if x is None:
if self.getBondLength() >= r0+2*s:
return True
else:
return False
else:
if self.getBondLengthConfig(x) >= r0+2*s:
return True
else:
return False
@torch.no_grad()
def step_bc(self):
state_old = self.getConfig().detach().clone()
self.step_unbiased()
if self.isReactant() or self.isProduct():
pass
else:
#If it's not in the reactant or product state, reset!
self.setConfig(state_old)
def save(self):
self.timestep += 1
if self.save_config and self.timestep % self.output_time == 0:
self.dumpConfig(self.timestep)
| 4,046 | 32.172131 | 107 | py |
tps-torch | tps-torch-main/dimer/ml_test/us/run_us.py | import sys
sys.path.append("..")
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from dimer_us import DimerUS
from committor_nn import CommittorNetDR
from tpstorch.ml.data import EXPReweightSimulation
from tpstorch.ml.optim import ParallelAdam
from tpstorch.ml.nn import BKELossEXP
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
torch.manual_seed(5070)
np.random.seed(5070)
prefix = 'simple'
#Initialize neural net
#Initialization
r0 = 2**(1/6.0)
width = 0.5*r0
#Reactant
dist_init = r0#-0.95*r0
start = torch.zeros((2,3))
start[0][2] = -0.5*dist_init
start[1][2] = 0.5*dist_init
#Product state
dist_init = r0+2*width#+0.95*r0
end = torch.zeros((2,3))
end[0][2] = -0.5*dist_init
end[1][2] = 0.5*dist_init
def initializer(s):
return (1-s)*start+s*end
initial_config = initializer(rank/(world_size-1))
#Initialize neural net
committor = CommittorNetDR(num_nodes=2500, boxsize=10).to('cpu')
kappa= 600#10
#Initialize the string for FTS method
#Load the pre-initialized neural network and string
committor.load_state_dict(torch.load("../initial_1hl_nn"))
kT = 1.0
n_boundary_samples = 100
batch_size = 8
period = 25
dimer_sim_bc = DimerUS(param="param_bc",config=initial_config.clone().detach(), rank=rank, beta=1/kT, kappa = 0.0, save_config=False, mpi_group = mpi_group, output_time=batch_size*period)
dimer_sim = DimerUS(param="param",config=initial_config.clone().detach(), rank=rank, beta=1/kT, kappa = kappa, save_config=True, mpi_group = mpi_group, output_time=batch_size*period)
#Construct FTSSimulation
datarunner = EXPReweightSimulation(dimer_sim, committor, period=period, batch_size=batch_size, dimN=6)
#Initialize main loss function and optimizers
#Optimizer, doing EXP Reweighting. We can do SGD (integral control), or Heavy-Ball (PID control)
loss = BKELossEXP( bc_sampler = dimer_sim_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 100,
bc_period = 10,
batch_size_bc = 0.5,
)
loss_io = []
loss_io = open("{}_statistic_{}.txt".format(prefix,rank+1),'w')
#Training loop
optimizer = ParallelAdam(committor.parameters(), lr=1.5e-3)
#We can train in terms of epochs, but we will keep it in one epoch
for epoch in range(1):
if rank == 0:
print("epoch: [{}]".format(epoch+1))
for i in tqdm.tqdm(range(1000)):
# get data and reweighting factors
config, grad_xs, invc, fwd_wl, bwrd_wl = datarunner.runSimulation()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
# forward + backward + optimize
cost = loss(grad_xs,invc,fwd_wl,bwrd_wl)
cost.backward()
optimizer.step()
# print statistics
with torch.no_grad():
#if counter % 10 == 0:
main_loss = loss.main_loss
bc_loss = loss.bc_loss
loss_io.write('{:d} {:.5E} {:.5E} \n'.format(i+1,main_loss.item(),bc_loss.item()))
loss_io.flush()
#Print statistics
if rank == 0:
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,i,rank))
| 3,591 | 29.700855 | 187 | py |
tps-torch | tps-torch-main/dimer/ml_test/ftsus_sl/dimer_ftsus.py | import sys
sys.path.insert(0,'/global/home/users/muhammad_hasyim/tps-torch/build/')
#Import necessarry tools from torch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.examples.dimer_ml import MyMLEXPStringSampler
from tpstorch import _rank, _world_size
import numpy as np
from tpstorch.ml.nn import FTSLayerUS
#Import any other thing
import tqdm, sys
class FTSLayerUSCustom(FTSLayerUS):
r""" A linear layer, where the paramaters correspond to the string obtained by the
general FTS method. Customized to take into account rotational and translational symmetry of the dimer problem
Args:
react_config (torch.Tensor): starting configuration in the reactant basin.
prod_config (torch.Tensor): starting configuration in the product basin.
"""
def __init__(self, react_config, prod_config, num_nodes,boxsize,kappa_perpend, kappa_parallel):
super(FTSLayerUSCustom,self).__init__(react_config, prod_config, num_nodes, kappa_perpend, kappa_parallel)
self.boxsize = boxsize
@torch.no_grad()
def compute_metric(self,x):
##(1) Remove center of mass
old_x = x.view(2,3).detach().clone()
#Compute the pair distance
dx = (old_x[0]-old_x[1])
dx = dx-torch.round(dx/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
old_x[0] = dx+old_x[1]
x_com = 0.5*(old_x[0]+old_x[1])
old_x[0] -= x_com
old_x[1] -= x_com
new_string = self.string.view(_world_size,2,3).detach().clone()
#Compute the pair distance
ds = (new_string[:,0]-new_string[:,1])
ds = ds-torch.round(ds/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_string[:,0] = ds+new_string[:,1]
s_com = 0.5*(new_string[:,0]+new_string[:,1])#.detach().clone()#,dim=1)
new_string[:,0] -= s_com
new_string[:,1] -= s_com
##(2) Rotate the configuration
dx /= torch.norm(dx)
new_x = torch.zeros_like(new_string)
for i in range(_world_size):
ds[i] /= torch.norm(ds[i])
v = torch.cross(dx,ds[i])
cosine = torch.dot(ds[i],dx)
if cosine < 0:
new_string[i] *= -1
ds[i] *= -1
v *= -1
cosine = torch.dot(ds[i],dx)
new_x[i,0] = old_x[0] +torch.cross(v,old_x[0])+torch.cross(v,torch.cross(v,old_x[0]))/(1+cosine)
new_x[i,1] = old_x[1] +torch.cross(v,old_x[1])+torch.cross(v,torch.cross(v,old_x[1]))/(1+cosine)
return torch.sum((new_string.view(_world_size,6)-new_x.view(_world_size,6))**2,dim=1)
@torch.no_grad()
def compute_umbrellaforce(self,x):
#For now, don't rotate or translate the system
##(1) Remove center of mass
new_x = x.view(2,3).detach().clone()
new_string = self.string[_rank].view(2,3).detach().clone()
#Compute the pair distance
dx = (new_x[0]-new_x[1])
dx = dx-torch.round(dx/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_x[0] = dx+new_x[1]
x_com = 0.5*(new_x[0]+new_x[1])
new_x[0] -= x_com
new_x[1] -= x_com
new_string = self.string[_rank].view(2,3).detach().clone()
#Compute the pair distance
ds = (new_string[0]-new_string[1])
ds = ds-torch.round(ds/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_string[0] = ds+new_string[1]
s_com = 0.5*(new_string[0]+new_string[1])#.detach().clone()#,dim=1)
new_string[0] -= s_com
new_string[1] -= s_com
##(2) Rotate the configuration
dx /= torch.norm(dx)
ds /= torch.norm(ds)
#v = torch.cross(dx,ds)
v = torch.cross(ds,dx)
cosine = torch.dot(ds,dx)
if cosine < 0:
ds *= -1
new_string *= -1
v *= -1
cosine = torch.dot(ds,dx)
#new_x[0] += torch.cross(v,new_x[0])+torch.cross(v,torch.cross(v,new_x[0]))/(1+cosine)
#new_x[1] += torch.cross(v,new_x[1])+torch.cross(v,torch.cross(v,new_x[1]))/(1+cosine)
new_string[0] += torch.cross(v,new_string[0])+torch.cross(v,torch.cross(v,new_string[0]))/(1+cosine)
new_string[1] += torch.cross(v,new_string[1])+torch.cross(v,torch.cross(v,new_string[1]))/(1+cosine)
dX = new_x.flatten()-new_string.flatten()
dX = dX-torch.round(dX/self.boxsize)*self.boxsize
tangent_dx = torch.dot(self.tangent[_rank],dX)
return -self.kappa_perpend*dX-(self.kappa_parallel-self.kappa_perpend)*self.tangent[_rank]*tangent_dx
def forward(self,x):
##(1) Remove center of mass
new_x = x.view(2,3).detach().clone()
#new_string = self.string[_rank].view(2,3).detach().clone()
#Compute the pair distance
dx = (new_x[0]-new_x[1])
dx = dx-torch.round(dx/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_x[0] = dx+new_x[1]
x_com = 0.5*(new_x[0]+new_x[1])
new_x[0] -= x_com
new_x[1] -= x_com
new_string = self.string[_rank].view(2,3).detach().clone()
#Compute the pair distance
ds = (new_string[0]-new_string[1])
ds = ds-torch.round(ds/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_string[0] = ds+new_string[1]
s_com = 0.5*(new_string[0]+new_string[1])#.detach().clone()#,dim=1)
new_string[0] -= s_com
new_string[1] -= s_com
##(2) Rotate the configuration
dx /= torch.norm(dx)
ds /= torch.norm(ds)
v = torch.cross(ds,dx)
#v = torch.cross(dx,ds)
cosine = torch.dot(ds,dx)
if cosine < 0:
ds *= -1
new_string *= -1
v *= -1
cosine = torch.dot(ds,dx)
#new_x[0] += torch.cross(v,new_x[0])+torch.cross(v,torch.cross(v,new_x[0]))/(1+cosine)
#new_x[1] += torch.cross(v,new_x[1])+torch.cross(v,torch.cross(v,new_x[1]))/(1+cosine)
new_string[0] += torch.cross(v,new_string[0])+torch.cross(v,torch.cross(v,new_string[0]))/(1+cosine)
new_string[1] += torch.cross(v,new_string[1])+torch.cross(v,torch.cross(v,new_string[1]))/(1+cosine)
dX = new_x.flatten()-new_string.flatten()
dX = dX-torch.round(dX/self.boxsize)*self.boxsize
dist_sq = torch.sum(dX**2)
tangent_dx = torch.sum(self.tangent[_rank]*dX)
return dist_sq+(self.kappa_parallel-self.kappa_perpend)*tangent_dx**2/self.kappa_perpend#torch.sum((tangent*(self.string-x))**2,dim=1)
# This is a sketch of what it should be like, but basically have to also make committor play nicely
# within function
# have omnious committor part that actual committor overrides?
class DimerFTSUS(MyMLEXPStringSampler):
def __init__(self,param,config,rank,beta,kappa, mpi_group,ftslayer,output_time, save_config=False):
super(DimerFTSUS, self).__init__(param,config.detach().clone(),rank,beta,kappa, mpi_group)
self.output_time = output_time
self.save_config = save_config
self.timestep = 0
self.torch_config = config
#Save config size and its flattened version
self.config_size = config.size()
self.flattened_size = config.flatten().size()
self.invkT = beta
self.ftslayer = ftslayer
self.setConfig(config)
@torch.no_grad()
def initialize_from_torchconfig(self, config):
# Don't have to worry about all that much all, can just set it
self.setConfig(config.detach().clone())
@torch.no_grad()
def reset(self):
self.steps = 0
self.computeMetric()
inftscell = self.checkFTSCell(_rank, _world_size)
if inftscell:
pass
else:
self.setConfig(self.ftslayer.string[_rank].view(2,3).detach().clone())
def computeMetric(self):
self.distance_sq_list = self.ftslayer.compute_metric(self.getConfig().flatten())
def computeWForce(self,x):
return self.ftslayer.compute_umbrellaforce(x)
@torch.no_grad()
def step(self):
state_old = self.getConfig().detach().clone()
self.stepBiased(self.computeWForce(state_old.flatten()))
self.torch_config.requires_grad_()
try:
self.torch_config.grad.data.zero_()
except:
pass
def step_unbiased(self):
with torch.no_grad():
self.stepUnbiased()
self.torch_config.requires_grad_()
try:
self.torch_config.grad.data.zero_()
except:
pass
@torch.no_grad()
def isReactant(self, x = None):
r0 = 2**(1/6.0)
s = 0.5*r0
#Compute the pair distance
if x is None:
if self.getBondLength() <= r0:
return True
else:
return False
else:
if self.getBondLengthConfig(x) <= r0:
return True
else:
return False
@torch.no_grad()
def isProduct(self,x = None):
r0 = 2**(1/6.0)
s = 0.5*r0
if x is None:
if self.getBondLength() >= r0+2*s:
return True
else:
return False
else:
if self.getBondLengthConfig(x) >= r0+2*s:
return True
else:
return False
@torch.no_grad()
def step_bc(self):
state_old = self.getConfig().detach().clone()
self.step_unbiased()
if self.isReactant() or self.isProduct():
pass
else:
#If it's not in the reactant or product state, reset!
self.setConfig(state_old)
def save(self):
self.timestep += 1
if self.save_config and self.timestep % self.output_time == 0:
self.dumpConfig(self.timestep)
| 10,322 | 36.538182 | 142 | py |
tps-torch | tps-torch-main/dimer/ml_test/ftsus_sl/run_ftsus_sl.py | import sys
sys.path.append("..")
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from dimer_ftsus import DimerFTSUS
from committor_nn import CommittorNet, CommittorNetDR
from dimer_ftsus import FTSLayerUSCustom as FTSLayer
from tpstorch.ml.data import FTSSimulation, EXPReweightStringSimulation
from tpstorch.ml.optim import ParallelAdam
from tpstorch.ml.nn import BKELossEXP, CommittorLoss2
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
torch.manual_seed(5070)
np.random.seed(5070)
prefix = 'simple'
#Initialize neural net
#Initialization
r0 = 2**(1/6.0)
width = 0.5*r0
#Reactant
dist_init = r0
start = torch.zeros((2,3))
start[0][2] = -0.5*dist_init
start[1][2] = 0.5*dist_init
#Product state
dist_init = r0+2*width
end = torch.zeros((2,3))
end[0][2] = -0.5*dist_init
end[1][2] = 0.5*dist_init
#Initialize neural net
#committor = CommittorNet(d=6,num_nodes=2500).to('cpu')
committor = CommittorNetDR(num_nodes=2500, boxsize=10).to('cpu')
kappa_perp = 300#10
kappa_par = 600
#Initialize the string for FTS method
ftslayer = FTSLayer(react_config=start.flatten(),prod_config=end.flatten(),num_nodes=world_size,boxsize=10.0,kappa_perpend=kappa_perp,kappa_parallel=kappa_par).to('cpu')
#Load the pre-initialized neural network and string
committor.load_state_dict(torch.load("../initial_1hl_nn"))
kT = 1.0
ftslayer.load_state_dict(torch.load("../test_string_config"))
ftslayer.set_tangent()
print(ftslayer.string)
print(ftslayer.tangent)
n_boundary_samples = 100
batch_size = 8
period = 25
dimer_sim_bc = DimerFTSUS(param="param_bc",config=ftslayer.string[rank].view(2,3).clone().detach(), rank=rank, beta=1/kT, kappa =0.0, save_config=False, mpi_group = mpi_group, ftslayer=ftslayer,output_time=batch_size*period)
dimer_sim = DimerFTSUS(param="param",config=ftslayer.string[rank].view(2,3).clone().detach(), rank=rank, beta=1/kT, kappa = kappa_perp, save_config=True, mpi_group = mpi_group, ftslayer=ftslayer,output_time=batch_size*period)
dimer_sim_com = DimerFTSUS(param="param",config=ftslayer.string[rank].view(2,3).clone().detach(), rank=rank, beta=1/kT, kappa = 0.0,save_config=True, mpi_group = mpi_group, ftslayer=ftslayer,output_time=batch_size*period)
#Construct FTSSimulation
datarunner = EXPReweightStringSimulation(dimer_sim, committor, period=period, batch_size=batch_size, dimN=6)
#Initialize main loss function and optimizers
#Optimizer, doing EXP Reweighting. We can do SGD (integral control), or Heavy-Ball (PID control)
loss = BKELossEXP( bc_sampler = dimer_sim_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 100,
bc_period = 10,
batch_size_bc = 0.5,
)
cmloss = CommittorLoss2( cl_sampler = dimer_sim_com,
committor = committor,
lambda_cl=100.0,
cl_start=10,
cl_end=200,
cl_rate=10,
cl_trials=50,
batch_size_cl=0.5
)
loss_io = []
loss_io = open("{}_statistic_{}.txt".format(prefix,rank+1),'w')
#Training loop
optimizer = ParallelAdam(committor.parameters(), lr=3e-3)
lambda_cl_end = 10**3
cl_start=200
cl_end=10000
cl_stepsize = (lambda_cl_end-cmloss.lambda_cl)/(cl_end-cl_start)
#We can train in terms of epochs, but we will keep it in one epoch
for epoch in range(1):
if rank == 0:
print("epoch: [{}]".format(epoch+1))
for i in tqdm.tqdm(range(20000)):
if (i > cl_start) and (i <= cl_end):
cmloss.lambda_cl += cl_stepsize
elif i > cl_end:
cmloss.lambda_cl = lambda_cl_end
config, grad_xs, invc, fwd_wl, bwrd_wl = datarunner.runSimulation()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
# forward + backward + optimize
bkecost = loss(grad_xs,invc,fwd_wl,bwrd_wl)
cmcost = cmloss(i, dimer_sim.getConfig())
cost = bkecost+cmcost
cost.backward()
optimizer.step()
# print statistics
with torch.no_grad():
#if counter % 10 == 0:
main_loss = loss.main_loss
cm_loss = cmloss.cl_loss
bc_loss = loss.bc_loss
loss_io.write('{:d} {:.5E} {:.5E} {:.5E} \n'.format(i+1,main_loss.item(),bc_loss.item(),cm_loss.item()))#/cmloss.lambda_cl))
loss_io.flush()
#Print statistics
if rank == 0:
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,i,rank))
| 5,023 | 33.176871 | 225 | py |
tps-torch | tps-torch-main/dimer/ml_test/ftsme_sl/dimer_fts.py | import sys
sys.path.insert(0,'/global/home/users/muhammad_hasyim/tps-torch/build/')
#Import necessarry tools from torch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.examples.dimer_ml import MyMLFTSSampler
from tpstorch import _rank, _world_size
import numpy as np
from tpstorch.ml.nn import FTSLayer#US
#Import any other thing
import tqdm, sys
class FTSLayerCustom(FTSLayer):
r""" A linear layer, where the paramaters correspond to the string obtained by the
general FTS method. Customized to take into account rotational and translational symmetry of the dimer problem
Args:
react_config (torch.Tensor): starting configuration in the reactant basin.
prod_config (torch.Tensor): starting configuration in the product basin.
"""
def __init__(self, react_config, prod_config, num_nodes,boxsize):
super(FTSLayerCustom,self).__init__(react_config, prod_config, num_nodes)
self.boxsize = boxsize
@torch.no_grad()
def compute_metric(self,x):
##(1) Remove center of mass
old_x = x.view(2,3).detach().clone()
#Compute the pair distance
dx = (old_x[0]-old_x[1])
dx = dx-torch.round(dx/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
old_x[0] = dx+old_x[1]
x_com = 0.5*(old_x[0]+old_x[1])
old_x[0] -= x_com
old_x[1] -= x_com
new_string = self.string.view(_world_size,2,3).detach().clone()
#Compute the pair distance
ds = (new_string[:,0]-new_string[:,1])
ds = ds-torch.round(ds/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_string[:,0] = ds+new_string[:,1]
s_com = 0.5*(new_string[:,0]+new_string[:,1])#.detach().clone()#,dim=1)
new_string[:,0] -= s_com
new_string[:,1] -= s_com
##(2) Rotate the configuration
dx /= torch.norm(dx)
new_x = torch.zeros_like(new_string)
for i in range(_world_size):
ds[i] /= torch.norm(ds[i])
v = torch.cross(dx,ds[i])
cosine = torch.dot(ds[i],dx)
if cosine < 0:
new_string[i] *= -1
ds[i] *= -1
v *= -1
cosine = torch.dot(ds[i],dx)
new_x[i,0] = old_x[0] +torch.cross(v,old_x[0])+torch.cross(v,torch.cross(v,old_x[0]))/(1+cosine)
new_x[i,1] = old_x[1] +torch.cross(v,old_x[1])+torch.cross(v,torch.cross(v,old_x[1]))/(1+cosine)
return torch.sum((new_string.view(_world_size,6)-new_x.view(_world_size,6))**2,dim=1)
def forward(self,x):
##(1) Remove center of mass
new_x = x.view(2,3).detach().clone()
#Compute the pair distance
dx = (new_x[0]-new_x[1])
dx = dx-torch.round(dx/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_x[0] = dx+new_x[1]
x_com = 0.5*(new_x[0]+new_x[1])
new_x[0] -= x_com
new_x[1] -= x_com
new_string = self.string[_rank].view(2,3).detach().clone()
#Compute the pair distance
ds = (new_string[0]-new_string[1])
ds = ds-torch.round(ds/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_string[0] = ds+new_string[1]
s_com = 0.5*(new_string[0]+new_string[1])#.detach().clone()#,dim=1)
new_string[0] -= s_com
new_string[1] -= s_com
##(2) Rotate the configuration
dx /= torch.norm(dx)
ds /= torch.norm(ds)
v = torch.cross(dx,ds)
cosine = torch.dot(ds,dx)
if cosine < 0:
ds *= -1
new_string *= -1
v *= -1
cosine = torch.dot(ds,dx)
new_x[0] += torch.cross(v,new_x[0])+torch.cross(v,torch.cross(v,new_x[0]))/(1+cosine)
new_x[1] += torch.cross(v,new_x[1])+torch.cross(v,torch.cross(v,new_x[1]))/(1+cosine)
return torch.sum((new_string.view(_world_size,6)-new_x.flatten())**2)
# This is a sketch of what it should be like, but basically have to also make committor play nicely
# within function
# have omnious committor part that actual committor overrides?
class DimerFTS(MyMLFTSSampler):
def __init__(self,param,config,rank,beta,mpi_group,ftslayer,output_time, save_config=False):
super(DimerFTS, self).__init__(param,config.detach().clone(),rank,beta,mpi_group)
self.output_time = output_time
self.save_config = save_config
self.timestep = 0
self.torch_config = config
#Save config size and its flattened version
self.config_size = config.size()
self.flattened_size = config.flatten().size()
self.invkT = beta
self.ftslayer = ftslayer
#tconfig = ftslayer.string[_rank].view(2,3).detach().clone()
#tconfig.requires_grad = False
self.setConfig(config)
#Configs file Save Alternative, since the default XYZ format is an overkill
#self.file = open("newconfigs_{}.txt".format(dist.get_rank()), "w")
@torch.no_grad()
def initialize_from_torchconfig(self, config):
# Don't have to worry about all that much all, can just set it
self.setConfig(config.detach().clone())
@torch.no_grad()
def reset(self):
self.steps = 0
self.computeMetric()
inftscell = self.checkFTSCell(_rank, _world_size)
if inftscell:
pass
else:
self.setConfig(self.ftslayer.string[_rank].view(2,3).detach().clone())
def computeMetric(self):
self.distance_sq_list = self.ftslayer.compute_metric(self.getConfig().flatten())
@torch.no_grad()
def step(self):
state_old = self.getConfig().detach().clone()
self.stepUnbiased()
self.computeMetric()
inftscell = self.checkFTSCell(_rank, _world_size)
if inftscell:
pass
else:
self.setConfig(state_old)
self.torch_config.requires_grad_()
try:
self.torch_config.grad.data.zero_()
except:
pass
def step_unbiased(self):
with torch.no_grad():
self.stepUnbiased()
self.torch_config.requires_grad_()
try:
self.torch_config.grad.data.zero_()
except:
pass
@torch.no_grad()
def isReactant(self, x = None):
r0 = 2**(1/6.0)
s = 0.5*r0
#Compute the pair distance
if x is None:
if self.getBondLength() <= r0:
return True
else:
return False
else:
if self.getBondLengthConfig(x) <= r0:
return True
else:
return False
@torch.no_grad()
def isProduct(self,x = None):
r0 = 2**(1/6.0)
s = 0.5*r0
if x is None:
if self.getBondLength() >= r0+2*s:
return True
else:
return False
else:
if self.getBondLengthConfig(x) >= r0+2*s:
return True
else:
return False
def step_bc(self):
with torch.no_grad():
state_old = self.getConfig().detach().clone()
self.step_unbiased()
if self.isReactant() or self.isProduct():
pass
else:
#If it's not in the reactant or product state, reset!
self.setConfig(state_old)
def save(self):
self.timestep += 1
if self.save_config and self.timestep % self.output_time == 0:
self.dumpConfig(self.timestep)
| 7,906 | 34.457399 | 118 | py |
tps-torch | tps-torch-main/dimer/ml_test/ftsme_sl/run_ftsme_sl.py | import sys
sys.path.append("..")
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from dimer_fts import DimerFTS
from committor_nn import CommittorNet, CommittorNetDR
from dimer_fts import FTSLayerCustom as FTSLayer
from tpstorch.ml.data import FTSSimulation#, EXPReweightStringSimulation
from tpstorch.ml.optim import ParallelSGD, ParallelAdam, FTSUpdate
#from tpstorch.ml.nn import BKELossEXP
from tpstorch.ml.nn import CommittorLoss2, BKELossFTS
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
torch.manual_seed(5070)
np.random.seed(5070)
prefix = 'simple'
@torch.no_grad()
def dimer_nullspace(vec,x,boxsize):
##(1) Remove center of mass
old_x = x.view(2,3).clone()
#Compute the pair distance
dx = (old_x[0]-old_x[1])
dx = dx-torch.round(dx/boxsize)*boxsize
#Re-compute one of the coordinates and shift to origin
old_x[0] = dx+old_x[1]
x_com = 0.5*(old_x[0]+old_x[1])
old_x[0] -= x_com
old_x[1] -= x_com
vec = vec.view(2,3).clone()
#Compute the pair distance
ds = (vec[0]-vec[1])
ds = ds-torch.round(ds/boxsize)*boxsize
#Re-compute one of the coordinates and shift to origin
vec[0] = ds+vec[1]
s_com = 0.5*(vec[0]+vec[1])#.detach().clone()#,dim=1)
vec[0] -= s_com
vec[1] -= s_com
##(2) Rotate the configuration
dx /= torch.norm(dx)
ds /= torch.norm(ds)
new_x = torch.zeros_like(old_x)
v = torch.cross(dx,ds)
cosine = torch.dot(ds,dx)
if cosine < 0:
dx *= -1
v *= -1
old_x *= -1
cosine = torch.dot(ds,dx)
new_x[0] = old_x[0] +torch.cross(v,old_x[0])+torch.cross(v,torch.cross(v,old_x[0]))/(1+cosine)
new_x[1] = old_x[1] +torch.cross(v,old_x[1])+torch.cross(v,torch.cross(v,old_x[1]))/(1+cosine)
return new_x.flatten()
#Initialize neural net
#Initialization
r0 = 2**(1/6.0)
width = 0.5*r0
#Reactant
dist_init = r0-0.95*r0
start = torch.zeros((2,3))
start[0][2] = -0.5*dist_init
start[1][2] = 0.5*dist_init
#Product state
dist_init = r0+2*width+0.95*r0
end = torch.zeros((2,3))
end[0][2] = -0.5*dist_init
end[1][2] = 0.5*dist_init
#Initialize neural net
#committor = CommittorNet(d=6,num_nodes=2500).to('cpu')
committor = CommittorNetDR(num_nodes=2500, boxsize=10).to('cpu')
#Initialize the string for FTS method
ftslayer = FTSLayer(react_config=start.flatten(),prod_config=end.flatten(),num_nodes=world_size,boxsize=10.0).to('cpu')
#Load the pre-initialized neural network and string
committor.load_state_dict(torch.load("../initial_1hl_nn"))
kT = 1.0
ftslayer.load_state_dict(torch.load("../test_string_config"))
n_boundary_samples = 100
batch_size = 8
period = 25
dimer_sim_bc = DimerFTS(param="param_bc",config=ftslayer.string[rank].view(2,3).clone().detach(), rank=rank, beta=1/kT, save_config=False, mpi_group = mpi_group, ftslayer=ftslayer,output_time=batch_size*period)
dimer_sim = DimerFTS(param="param",config=ftslayer.string[rank].view(2,3).clone().detach(), rank=rank, beta=1/kT, save_config=True, mpi_group = mpi_group, ftslayer=ftslayer,output_time=batch_size*period)
dimer_sim_com = DimerFTS(param="param",config=ftslayer.string[rank].view(2,3).clone().detach(), rank=rank, beta=1/kT, save_config=True, mpi_group = mpi_group, ftslayer=ftslayer,output_time=batch_size*period)
#Construct FTSSimulation
ftsoptimizer = FTSUpdate(ftslayer.parameters(), deltatau=0.001,momentum=0.5,nesterov=True,kappa=0.2,periodic=True,dim=3)
datarunner = FTSSimulation(dimer_sim, committor = committor, nn_training = True, period=period, batch_size=batch_size, dimN=6)
#Initialize main loss function and optimizers
#Optimizer, doing EXP Reweighting. We can do SGD (integral control), or Heavy-Ball (PID control)
loss = BKELossFTS( bc_sampler = dimer_sim_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 100,
bc_period = 10,
batch_size_bc = 0.5,
tol = 5e-10,
mode= 'shift')
cmloss = CommittorLoss2( cl_sampler = dimer_sim_com,
committor = committor,
lambda_cl=100.0,
cl_start=10,
cl_end=200,
cl_rate=10,
cl_trials=50,
batch_size_cl=0.5
)
lambda_cl_end = 10**3
cl_start=200
cl_end=10000
cl_stepsize = (lambda_cl_end-cmloss.lambda_cl)/(cl_end-cl_start)
loss_io = []
loss_io = open("{}_statistic_{}.txt".format(prefix,rank+1),'w')
#Training loop
optimizer = ParallelAdam(committor.parameters(), lr=3e-3)
#We can train in terms of epochs, but we will keep it in one epoch
with open("string_{}_config.xyz".format(rank),"w") as f, open("string_{}_log.txt".format(rank),"w") as g:
for epoch in range(1):
if rank == 0:
print("epoch: [{}]".format(epoch+1))
for i in tqdm.tqdm(range(10000)):
if (i > cl_start) and (i <= cl_end):
cmloss.lambda_cl += cl_stepsize
elif i > cl_end:
cmloss.lambda_cl = lambda_cl_end
configs, grad_xs = datarunner.runSimulation()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
# forward + backward + optimize
bkecost = loss(grad_xs, dimer_sim.rejection_count)
cmcost = cmloss(i, dimer_sim.getConfig())
cost = bkecost+cmcost
cost.backward()
optimizer.step()
ftsoptimizer.step(configs,len(configs),boxsize=10.0,remove_nullspace=dimer_nullspace)
# print statistics
with torch.no_grad():
string_temp = ftslayer.string[rank].view(2,3)
f.write("2 \n")
f.write('Lattice=\"10.0 0.0 0.0 0.0 10.0 0.0 0.0 0.0 10.0\" ')
f.write('Origin=\"-5.0 -5.0 -5.0\" ')
f.write("Properties=type:S:1:pos:R:3:aux1:R:1 \n")
f.write("2 {} {} {} {} \n".format(string_temp[0,0],string_temp[0,1], string_temp[0,2],0.5*r0))
f.write("2 {} {} {} {} \n".format(string_temp[1,0],string_temp[1,1], string_temp[1,2],0.5*r0))
f.flush()
g.write("{} {} \n".format((i+1)*period,torch.norm(string_temp[0]-string_temp[1])))
g.flush()
main_loss = loss.main_loss
cm_loss = cmloss.cl_loss
bc_loss = loss.bc_loss
loss_io.write('{:d} {:.5E} {:.5E} {:.5E} \n'.format(i+1,main_loss.item(),bc_loss.item(),cm_loss.item()))#/cmloss.lambda_cl))
loss_io.flush()
#Print statistics
if rank == 0:
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,i,rank))
| 7,300 | 35.323383 | 210 | py |
tps-torch | tps-torch-main/dimer/ml_test/us_sl/dimer_us.py | import sys
sys.path.insert(0,'/global/home/users/muhammad_hasyim/tps-torch/build/')
#Import necessarry tools from torch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.examples.dimer_ml import MyMLEXPSampler
from tpstorch import _rank, _world_size
import numpy as np
from tpstorch.ml.nn import FTSLayerUS
#Import any other thing
import tqdm, sys
# This is a sketch of what it should be like, but basically have to also make committor play nicely
# within function
# have omnious committor part that actual committor overrides?
class DimerUS(MyMLEXPSampler):
def __init__(self,param,config,rank,beta,kappa, mpi_group,output_time, save_config=False):
super(DimerUS, self).__init__(param,config.detach().clone(),rank,beta,kappa, mpi_group)
self.output_time = output_time
self.save_config = save_config
self.timestep = 0
self.torch_config = config
#Save config size and its flattened version
self.config_size = config.size()
self.flattened_size = config.flatten().size()
self.invkT = beta
self.setConfig(config)
self.dt =0
self.gamma = 0
#Read the local param file to get info on step size and friction constant
with open("param","r") as f:
for line in f:
test = line.strip()
test = test.split()
if (len(test) == 0):
continue
else:
if test[0] == "gamma":
self.gamma = float(test[1])
elif test[0] == "dt":
self.dt = float(test[1])
@torch.no_grad()
def initialize_from_torchconfig(self, config):
# Don't have to worry about all that much all, can just set it
self.setConfig(config.detach().clone())
def computeWForce(self, committor_val, qval):
return -self.kappa*self.torch_config.grad.data*(committor_val-qval)#/self.gamma
def step(self, committor_val, onlytst=False):
with torch.no_grad():
#state_old = self.getConfig().detach().clone()
if onlytst:
self.stepBiased(self.computeWForce(committor_val, 0.5))#state_old.flatten()))
else:
self.stepBiased(self.computeWForce(committor_val, self.qvals[_rank]))#state_old.flatten()))
self.torch_config.requires_grad_()
self.torch_config.grad.data.zero_()
def step_unbiased(self):
with torch.no_grad():
self.stepUnbiased()
self.torch_config.requires_grad_()
try:
self.torch_config.grad.data.zero_()
except:
pass
@torch.no_grad()
def isReactant(self, x = None):
r0 = 2**(1/6.0)
s = 0.5*r0
#Compute the pair distance
if x is None:
if self.getBondLength() <= r0:
return True
else:
return False
else:
if self.getBondLengthConfig(x) <= r0:
return True
else:
return False
@torch.no_grad()
def isProduct(self,x = None):
r0 = 2**(1/6.0)
s = 0.5*r0
if x is None:
if self.getBondLength() >= r0+2*s:
return True
else:
return False
else:
if self.getBondLengthConfig(x) >= r0+2*s:
return True
else:
return False
@torch.no_grad()
def step_bc(self):
state_old = self.getConfig().detach().clone()
self.step_unbiased()
if self.isReactant() or self.isProduct():
pass
else:
#If it's not in the reactant or product state, reset!
self.setConfig(state_old)
def save(self):
self.timestep += 1
if self.save_config and self.timestep % self.output_time == 0:
self.dumpConfig(self.timestep)
| 4,046 | 32.172131 | 107 | py |
tps-torch | tps-torch-main/dimer/ml_test/us_sl/run_us_sl.py | import sys
sys.path.append("..")
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from dimer_us import DimerUS
from committor_nn import CommittorNetDR
from tpstorch.ml.data import EXPReweightSimulation
from tpstorch.ml.optim import ParallelAdam
from tpstorch.ml.nn import BKELossEXP, CommittorLoss2
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
torch.manual_seed(5070)
np.random.seed(5070)
prefix = 'simple'
#Initialize neural net
#Initialization
r0 = 2**(1/6.0)
width = 0.5*r0
#Reactant
dist_init = r0
start = torch.zeros((2,3))
start[0][2] = -0.5*dist_init
start[1][2] = 0.5*dist_init
#Product state
dist_init = r0+2*width
end = torch.zeros((2,3))
end[0][2] = -0.5*dist_init
end[1][2] = 0.5*dist_init
def initializer(s):
return (1-s)*start+s*end
initial_config = initializer(rank/(world_size-1))
#Initialize neural net
committor = CommittorNetDR(num_nodes=2500, boxsize=10).to('cpu')
kappa= 600#10
#Initialize the string for FTS method
#Load the pre-initialized neural network and string
committor.load_state_dict(torch.load("../initial_1hl_nn"))
kT = 1.0
n_boundary_samples = 100
batch_size = 8
period = 25
dimer_sim_bc = DimerUS(param="param_bc",config=initial_config.clone().detach(), rank=rank, beta=1/kT, kappa = 0.0, save_config=False, mpi_group = mpi_group, output_time=batch_size*period)
dimer_sim = DimerUS(param="param",config=initial_config.clone().detach(), rank=rank, beta=1/kT, kappa = kappa, save_config=True, mpi_group = mpi_group, output_time=batch_size*period)
dimer_sim_com = DimerUS(param="param",config=initial_config.clone().detach(), rank=rank, beta=1/kT, kappa = 0.0,save_config=True, mpi_group = mpi_group, output_time=batch_size*period)
#Construct FTSSimulation
datarunner = EXPReweightSimulation(dimer_sim, committor, period=period, batch_size=batch_size, dimN=6)
#Initialize main loss function and optimizers
#Optimizer, doing EXP Reweighting. We can do SGD (integral control), or Heavy-Ball (PID control)
loss = BKELossEXP( bc_sampler = dimer_sim_bc,
committor = committor,
lambda_A = 1e4,
lambda_B = 1e4,
start_react = start,
start_prod = end,
n_bc_samples = 100,
bc_period = 10,
batch_size_bc = 0.5,
)
cmloss = CommittorLoss2( cl_sampler = dimer_sim_com,
committor = committor,
lambda_cl=100.0,
cl_start=10,
cl_end=200,
cl_rate=10,
cl_trials=50,
batch_size_cl=0.5
)
lambda_cl_end = 10**3
cl_start=200
cl_end=10000
cl_stepsize = (lambda_cl_end-cmloss.lambda_cl)/(cl_end-cl_start)
loss_io = []
loss_io = open("{}_statistic_{}.txt".format(prefix,rank+1),'w')
#Training loop
optimizer = ParallelAdam(committor.parameters(), lr=1.5e-3)
#We can train in terms of epochs, but we will keep it in one epoch
for epoch in range(1):
if rank == 0:
print("epoch: [{}]".format(epoch+1))
for i in tqdm.tqdm(range(10000)):#20000)):
# get data and reweighting factors
if (i > cl_start) and (i <= cl_end):
cmloss.lambda_cl += cl_stepsize
elif i > cl_end:
cmloss.lambda_cl = lambda_cl_end
config, grad_xs, invc, fwd_wl, bwrd_wl = datarunner.runSimulation()
# zero the parameter gradients
optimizer.zero_grad()
# (2) Update the neural network
# forward + backward + optimize
bkecost = loss(grad_xs,invc,fwd_wl,bwrd_wl)
cmcost = cmloss(i, dimer_sim.getConfig())
cost = bkecost+cmcost
cost.backward()
optimizer.step()
# print statistics
with torch.no_grad():
#if counter % 10 == 0:
main_loss = loss.main_loss
cm_loss = cmloss.cl_loss
bc_loss = loss.bc_loss
loss_io.write('{:d} {:.5E} {:.5E} {:.5E} \n'.format(i+1,main_loss.item(),bc_loss.item(),cm_loss.item()))
loss_io.flush()
#Print statistics
if rank == 0:
torch.save(committor.state_dict(), "{}_params_t_{}_{}".format(prefix,i,rank))
| 4,555 | 31.542857 | 187 | py |
AutoAE | AutoAE-master/attack_ops.py | import numpy as np
from itertools import product, repeat
import PIL
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
from torchvision import transforms
from tv_utils import SpatialAffine, GaussianSmoothing
from attack_utils import projection_linf, check_shape, dlr_loss, get_diff_logits_grads_batch
from imagenet_c import corrupt
import torch.optim as optim
import math
from advertorch.attacks import LinfSPSAAttack
import os
import time
from fab_projections import fab_projection_linf, projection_l2
#from torch.autograd.gradcheck import zero_gradients #for torch 1.9
def zero_gradients(x):
if isinstance(x, torch.Tensor):
if x.grad is not None:
x.grad.detach_()
x.grad.zero_()
elif isinstance(x, collections.abc.Iterable):
for elem in x:
zero_gradients(elem)
def predict_from_logits(logits, dim=1):
return logits.max(dim=dim, keepdim=False)[1]
def check_oscillation(x, j, k, y5, k3=0.5):
t = np.zeros(x.shape[1])
for counter5 in range(k):
t += x[j - counter5] > x[j - counter5 - 1]
return t <= k*k3*np.ones(t.shape)
def CW_Attack_adaptive_stepsize(x, y, model, magnitude, previous_p, max_eps, max_iters=20, target=None, _type='l2', gpu_idx=None):
model.eval()
device = 'cuda:{}'.format(gpu_idx)
x = x.to(device)
y = y.to(device)
if target is not None:
target = target.to(device)
adv = x.clone()
pred = predict_from_logits(model(x))
if torch.sum((pred==y)).item() == 0:
return adv, previous_p
ind_non_suc = (pred==y).nonzero().squeeze()
x = x[ind_non_suc]
y = y[ind_non_suc]
x = x if len(x.shape) == 4 else x.unsqueeze(0)
y = y if len(y.shape) == 1 else y.unsqueeze(0)
# print(x.shape)
if previous_p is not None:
previous_p = previous_p.to(device)
previous_p_c = previous_p.clone()
previous_p = previous_p[ind_non_suc]
previous_p = previous_p if len(previous_p.shape) == 4 else previous_p.unsqueeze(0)
max_x = x - previous_p + max_eps
min_x = x - previous_p - max_eps
else:
max_x = x + max_eps
min_x = x - max_eps
one_hot_y = torch.zeros(y.size(0), 10).to(device)
one_hot_y[torch.arange(y.size(0)), y] = 1
x.requires_grad = True
n_iter_2, n_iter_min, size_decr = max(int(0.22 * max_iters), 1), max(int(0.06 * max_iters), 1), max(int(0.03 * max_iters), 1)
if _type == 'linf':
t = 2 * torch.rand(x.shape).to(device).detach() - 1
x_adv = x.detach() + magnitude * torch.ones([x.shape[0], 1, 1, 1]).to(device).detach() * t / (t.reshape([t.shape[0], -1]).abs().max(dim=1, keepdim=True)[0].reshape([-1, 1, 1, 1]))
x_adv = torch.clamp(torch.min(torch.max(x_adv, min_x), max_x), 0.0, 1.0)
elif _type == 'l2':
t = torch.randn(x.shape).to(device).detach()
x_adv = x.detach() + magnitude * torch.ones([x.shape[0], 1, 1, 1]).to(device).detach() * t / ((t ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12)
if previous_p is not None:
x_adv = torch.clamp(x - previous_p + (x_adv - x + previous_p) / (((x_adv - x + previous_p) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(
max_eps * torch.ones(x.shape).to(device).detach(), ((x_adv - x + previous_p) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12), 0.0, 1.0)
x_adv = x_adv.clamp(0., 1.)
x_best = x_adv.clone()
x_best_adv = x_adv.clone()
loss_steps = torch.zeros([max_iters, x.shape[0]])
loss_best_steps = torch.zeros([max_iters + 1, x.shape[0]])
acc_steps = torch.zeros_like(loss_best_steps)
x_adv.requires_grad_()
with torch.enable_grad():
logits = model(x_adv) # 1 forward pass (eot_iter = 1)
correct_logit = torch.sum(one_hot_y * logits, dim=1)
wrong_logit,_ = torch.max((1-one_hot_y) * logits-1e4*one_hot_y, dim=1)
loss_indiv = -F.relu(correct_logit-wrong_logit+50)
loss = loss_indiv.sum()
grad = torch.autograd.grad(loss, [x_adv])[0].detach()
grad_best = grad.clone()
acc = logits.detach().max(1)[1] == y
acc_steps[0] = acc + 0
loss_best = loss_indiv.detach().clone()
step_size = magnitude * torch.ones([x.shape[0], 1, 1, 1]).to(device).detach() * torch.Tensor([2.0]).to(device).detach().reshape([1, 1, 1, 1])
x_adv_old = x_adv.clone()
k = n_iter_2 + 0
u = np.arange(x.shape[0])
counter3 = 0
loss_best_last_check = loss_best.clone()
reduced_last_check = np.zeros(loss_best.shape) == np.zeros(loss_best.shape)
n_reduced = 0
for i in range(max_iters):
with torch.no_grad():
x_adv = x_adv.detach()
x_adv_old = x_adv.clone()
if _type == 'linf':
x_adv_1 = x_adv + step_size * torch.sign(grad)
x_adv_1 = torch.clamp(torch.min(torch.max(x_adv_1, x - magnitude), x + magnitude), 0.0, 1.0)
x_adv_1 = torch.clamp(torch.min(torch.max(x_adv + (x_adv_1 - x_adv), x - magnitude), x + magnitude), 0.0, 1.0)
x_adv_1 = torch.clamp(torch.min(torch.max(x_adv_1, min_x), max_x), 0.0, 1.0)
elif _type == 'l2':
x_adv_1 = x_adv + step_size[0] * grad / ((grad ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12)
x_adv_1 = torch.clamp(x + (x_adv_1 - x) / (((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(
magnitude * torch.ones(x.shape).to(device).detach(), ((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt()), 0.0, 1.0)
x_adv_1 = x_adv + (x_adv_1 - x_adv)
x_adv_1 = torch.clamp(x + (x_adv_1 - x) / (((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(
magnitude * torch.ones(x.shape).to(device).detach(), ((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12), 0.0, 1.0)
if previous_p is not None:
x_adv_1 = torch.clamp(x - previous_p + (x_adv_1 - x + previous_p) / (((x_adv_1 - x + previous_p) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(
max_eps * torch.ones(x.shape).to(device).detach(), ((x_adv_1 - x + previous_p) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12), 0.0, 1.0)
x_adv = x_adv_1 + 0.
x_adv.requires_grad_()
with torch.enable_grad():
logits = model(x_adv) # 1 forward pass (eot_iter = 1)
correct_logit = torch.sum(one_hot_y * logits, dim=1)
wrong_logit,_ = torch.max((1-one_hot_y) * logits-1e4*one_hot_y, dim=1)
loss_indiv = -F.relu(correct_logit-wrong_logit+50)
loss = loss_indiv.sum()
grad = torch.autograd.grad(loss, [x_adv])[0].detach() # 1 backward pass (eot_iter = 1)
pred = logits.detach().max(1)[1] == y
acc = torch.min(acc, pred)
acc_steps[i + 1] = acc + 0
x_best_adv[(pred == 0).nonzero().squeeze()] = x_adv[(pred == 0).nonzero().squeeze()] + 0.
### check step size
with torch.no_grad():
y1 = loss_indiv.detach().clone()
loss_steps[i] = y1.cpu() + 0
ind = (y1 > loss_best).nonzero().squeeze()
x_best[ind] = x_adv[ind].clone()
grad_best[ind] = grad[ind].clone()
loss_best[ind] = y1[ind] + 0
loss_best_steps[i + 1] = loss_best + 0
counter3 += 1
if counter3 == k:
fl_oscillation = check_oscillation(loss_steps.detach().cpu().numpy(), i, k, loss_best.detach().cpu().numpy(), k3=.75)
fl_reduce_no_impr = (~reduced_last_check) * (loss_best_last_check.cpu().numpy() >= loss_best.cpu().numpy())
fl_oscillation = ~(~fl_oscillation * ~fl_reduce_no_impr)
reduced_last_check = np.copy(fl_oscillation)
loss_best_last_check = loss_best.clone()
if np.sum(fl_oscillation) > 0:
step_size[u[fl_oscillation]] /= 2.0
n_reduced = fl_oscillation.astype(float).sum()
fl_oscillation = np.where(fl_oscillation)
x_adv[fl_oscillation] = x_best[fl_oscillation].clone()
grad[fl_oscillation] = grad_best[fl_oscillation].clone()
counter3 = 0
k = np.maximum(k - size_decr, n_iter_min)
adv[ind_non_suc] = x_best_adv
now_p = x_best_adv-x
if previous_p is not None:
previous_p_c[ind_non_suc] = previous_p + now_p
return adv, previous_p_c
return adv, now_p
def Record_CW_Attack_adaptive_stepsize(x, y, model, magnitude, previous_p, max_eps, max_iters=20, target=None, _type='l2', gpu_idx=None):
model.eval()
device = 'cuda:{}'.format(gpu_idx)
x = x.to(device)
y = y.to(device)
if target is not None:
target = target.to(device)
adv = x.clone()
pred = predict_from_logits(model(x))
if torch.sum((pred==y)).item() == 0:
return adv, previous_p
ind_non_suc = (pred==y).nonzero().squeeze()
ind_suc = (pred!=y).nonzero().squeeze()
record_list = []
x = x if len(x.shape) == 4 else x.unsqueeze(0)
y = y if len(y.shape) == 1 else y.unsqueeze(0)
if previous_p is not None:
previous_p = previous_p.to(device)
previous_p_c = previous_p.clone()
previous_p = previous_p[ind_non_suc]
previous_p = previous_p if len(previous_p.shape) == 4 else previous_p.unsqueeze(0)
max_x = x - previous_p + max_eps
min_x = x - previous_p - max_eps
else:
max_x = x + max_eps
min_x = x - max_eps
one_hot_y = torch.zeros(y.size(0), 10).to(device)
one_hot_y[torch.arange(y.size(0)), y] = 1
x.requires_grad = True
n_iter_2, n_iter_min, size_decr = max(int(0.22 * max_iters), 1), max(int(0.06 * max_iters), 1), max(int(0.03 * max_iters), 1)
if _type == 'linf':
t = 2 * torch.rand(x.shape).to(device).detach() - 1
x_adv = x.detach() + magnitude * torch.ones([x.shape[0], 1, 1, 1]).to(device).detach() * t / (t.reshape([t.shape[0], -1]).abs().max(dim=1, keepdim=True)[0].reshape([-1, 1, 1, 1]))
x_adv = torch.clamp(torch.min(torch.max(x_adv, min_x), max_x), 0.0, 1.0)
elif _type == 'l2':
t = torch.randn(x.shape).to(device).detach()
x_adv = x.detach() + magnitude * torch.ones([x.shape[0], 1, 1, 1]).to(device).detach() * t / ((t ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12)
if previous_p is not None:
x_adv = torch.clamp(x - previous_p + (x_adv - x + previous_p) / (((x_adv - x + previous_p) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(
max_eps * torch.ones(x.shape).to(device).detach(), ((x_adv - x + previous_p) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12), 0.0, 1.0)
x_adv = x_adv.clamp(0., 1.)
x_best = x_adv.clone()
x_best_adv = x_adv.clone()
loss_steps = torch.zeros([max_iters, x.shape[0]])
loss_best_steps = torch.zeros([max_iters + 1, x.shape[0]])
acc_steps = torch.zeros_like(loss_best_steps)
x_adv.requires_grad_()
with torch.enable_grad():
logits = model(x_adv) # 1 forward pass (eot_iter = 1)
correct_logit = torch.sum(one_hot_y * logits, dim=1)
wrong_logit,_ = torch.max((1-one_hot_y) * logits-1e4*one_hot_y, dim=1)
loss_indiv = -F.relu(correct_logit-wrong_logit+50)
loss = loss_indiv.sum()
grad = torch.autograd.grad(loss, [x_adv])[0].detach()
grad_best = grad.clone()
acc = logits.detach().max(1)[1] == y
acc_steps[0] = acc + 0
loss_best = loss_indiv.detach().clone()
step_size = magnitude * torch.ones([x.shape[0], 1, 1, 1]).to(device).detach() * torch.Tensor([2.0]).to(device).detach().reshape([1, 1, 1, 1])
x_adv_old = x_adv.clone()
k = n_iter_2 + 0
u = np.arange(x.shape[0])
counter3 = 0
loss_best_last_check = loss_best.clone()
reduced_last_check = np.zeros(loss_best.shape) == np.zeros(loss_best.shape)
n_reduced = 0
for i in range(max_iters):
with torch.no_grad():
x_adv = x_adv.detach()
x_adv_old = x_adv.clone()
if _type == 'linf':
x_adv_1 = x_adv + step_size * torch.sign(grad)
x_adv_1 = torch.clamp(torch.min(torch.max(x_adv_1, x - magnitude), x + magnitude), 0.0, 1.0)
x_adv_1 = torch.clamp(torch.min(torch.max(x_adv + (x_adv_1 - x_adv), x - magnitude), x + magnitude), 0.0, 1.0)
x_adv_1 = torch.clamp(torch.min(torch.max(x_adv_1, min_x), max_x), 0.0, 1.0)
elif _type == 'l2':
x_adv_1 = x_adv + step_size[0] * grad / ((grad ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12)
x_adv_1 = torch.clamp(x + (x_adv_1 - x) / (((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(
magnitude * torch.ones(x.shape).to(device).detach(), ((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt()), 0.0, 1.0)
x_adv_1 = x_adv + (x_adv_1 - x_adv)
x_adv_1 = torch.clamp(x + (x_adv_1 - x) / (((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(
magnitude * torch.ones(x.shape).to(device).detach(), ((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12), 0.0, 1.0)
if previous_p is not None:
x_adv_1 = torch.clamp(x - previous_p + (x_adv_1 - x + previous_p) / (((x_adv_1 - x + previous_p) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(
max_eps * torch.ones(x.shape).to(device).detach(), ((x_adv_1 - x + previous_p) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12), 0.0, 1.0)
x_adv = x_adv_1 + 0.
x_adv.requires_grad_()
with torch.enable_grad():
logits = model(x_adv) # 1 forward pass (eot_iter = 1)
pred_after_attack = predict_from_logits(logits)
record = np.ones(len(pred_after_attack))
record = record * (pred_after_attack==y).cpu().numpy()
record_list.append(record)
correct_logit = torch.sum(one_hot_y * logits, dim=1)
wrong_logit,_ = torch.max((1-one_hot_y) * logits-1e4*one_hot_y, dim=1)
loss_indiv = -F.relu(correct_logit-wrong_logit+50)
loss = loss_indiv.sum()
grad = torch.autograd.grad(loss, [x_adv])[0].detach() # 1 backward pass (eot_iter = 1)
pred = logits.detach().max(1)[1] == y
acc = torch.min(acc, pred)
acc_steps[i + 1] = acc + 0
x_best_adv[(pred == 0).nonzero().squeeze()] = x_adv[(pred == 0).nonzero().squeeze()] + 0.
### check step size
with torch.no_grad():
y1 = loss_indiv.detach().clone()
loss_steps[i] = y1.cpu() + 0
ind = (y1 > loss_best).nonzero().squeeze()
x_best[ind] = x_adv[ind].clone()
grad_best[ind] = grad[ind].clone()
loss_best[ind] = y1[ind] + 0
loss_best_steps[i + 1] = loss_best + 0
counter3 += 1
if counter3 == k:
fl_oscillation = check_oscillation(loss_steps.detach().cpu().numpy(), i, k, loss_best.detach().cpu().numpy(), k3=.75)
fl_reduce_no_impr = (~reduced_last_check) * (loss_best_last_check.cpu().numpy() >= loss_best.cpu().numpy())
fl_oscillation = ~(~fl_oscillation * ~fl_reduce_no_impr)
reduced_last_check = np.copy(fl_oscillation)
loss_best_last_check = loss_best.clone()
if np.sum(fl_oscillation) > 0:
step_size[u[fl_oscillation]] /= 2.0
n_reduced = fl_oscillation.astype(float).sum()
fl_oscillation = np.where(fl_oscillation)
x_adv[fl_oscillation] = x_best[fl_oscillation].clone()
grad[fl_oscillation] = grad_best[fl_oscillation].clone()
counter3 = 0
k = np.maximum(k - size_decr, n_iter_min)
adv[ind_non_suc] = x_best_adv[ind_non_suc]
now_p = x_best_adv-x
if previous_p is not None:
previous_p_c[ind_non_suc] = previous_p + now_p
return adv, previous_p_c
for item in record_list:
item[ind_suc.cpu().numpy()]=0
return adv, now_p, record_list
def MultiTargetedAttack(x, y, model, magnitude, previous_p, max_eps, max_iters=20, target=None, _type='l2', gpu_idx=None):
device = 'cuda:{}'.format(gpu_idx)
x = x.to(device)
y = y.to(device)
if target is not None:
target = target.to(device)
adv_out = x.clone()
pred = predict_from_logits(model(x))
if torch.sum((pred==y)).item() == 0:
return adv, previous_p
ind_non_suc = (pred==y).nonzero().squeeze()
x = x[ind_non_suc]
y = y[ind_non_suc]
x = x if len(x.shape) == 4 else x.unsqueeze(0)
y = y if len(y.shape) == 1 else y.unsqueeze(0)
if previous_p is not None:
previous_p = previous_p.to(device)
previous_p_c = previous_p.clone()
previous_p = previous_p[ind_non_suc]
previous_p = previous_p if len(previous_p.shape) == 4 else previous_p.unsqueeze(0)
def run_once(model, x_in, y_in, magnitude, max_iters, _type, target_class, max_eps, previous_p):
x = x_in.clone() if len(x_in.shape) == 4 else x_in.clone().unsqueeze(0)
y = y_in.clone() if len(y_in.shape) == 1 else y_in.clone().unsqueeze(0)
# print(x.shape)
if previous_p is not None:
max_x = x - previous_p + max_eps
min_x = x - previous_p - max_eps
else:
max_x = x + max_eps
min_x = x - max_eps
n_iter_2, n_iter_min, size_decr = max(int(0.22 * max_iters), 1), max(int(0.06 * max_iters), 1), max(int(0.03 * max_iters), 1)
if _type == 'linf':
t = 2 * torch.rand(x.shape).to(device).detach() - 1
x_adv = x.detach() + magnitude * torch.ones([x.shape[0], 1, 1, 1]).to(device).detach() * t / (t.reshape([t.shape[0], -1]).abs().max(dim=1, keepdim=True)[0].reshape([-1, 1, 1, 1]))
x_adv = torch.clamp(torch.min(torch.max(x_adv, min_x), max_x), 0.0, 1.0)
elif _type == 'l2':
t = torch.randn(x.shape).to(device).detach()
x_adv = x.detach() + magnitude * torch.ones([x.shape[0], 1, 1, 1]).to(device).detach() * t / ((t ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12)
if previous_p is not None:
x_adv = torch.clamp(x - previous_p + (x_adv - x + previous_p) / (((x_adv - x + previous_p) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(
max_eps * torch.ones(x.shape).to(device).detach(), ((x_adv - x + previous_p) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12), 0.0, 1.0)
x_adv = x_adv.clamp(0., 1.)
x_best = x_adv.clone()
x_best_adv = x_adv.clone()
loss_steps = torch.zeros([max_iters, x.shape[0]])
loss_best_steps = torch.zeros([max_iters + 1, x.shape[0]])
acc_steps = torch.zeros_like(loss_best_steps)
output = model(x)
y_target = output.sort(dim=1)[1][:, -target_class]
x_adv.requires_grad_()
grad = torch.zeros_like(x)
for _ in range(1):
with torch.enable_grad():
logits = model(x_adv) # 1 forward pass (eot_iter = 1)
loss_indiv = dlr_loss(logits, y, y_target)
loss = loss_indiv.sum()
grad += torch.autograd.grad(loss, [x_adv])[0].detach()
grad_best = grad.clone()
acc = logits.detach().max(1)[1] == y
acc_steps[0] = acc + 0
loss_best = loss_indiv.detach().clone()
step_size = magnitude * torch.ones([x.shape[0], 1, 1, 1]).to(device).detach() * torch.Tensor([2.0]).to(device).detach().reshape([1, 1, 1, 1])
x_adv_old = x_adv.clone()
counter = 0
k = n_iter_2 + 0
u = np.arange(x.shape[0])
counter3 = 0
loss_best_last_check = loss_best.clone()
reduced_last_check = np.zeros(loss_best.shape) == np.zeros(loss_best.shape)
n_reduced = 0
for i in range(max_iters):
with torch.no_grad():
x_adv = x_adv.detach()
grad2 = x_adv - x_adv_old
x_adv_old = x_adv.clone()
a = 0.75 if i > 0 else 1.0
if _type == 'linf':
x_adv_1 = x_adv + step_size * torch.sign(grad)
x_adv_1 = torch.clamp(torch.min(torch.max(x_adv_1, x - magnitude), x + magnitude), 0.0, 1.0)
x_adv_1 = torch.clamp(torch.min(torch.max(x_adv + (x_adv_1 - x_adv)*a + grad2*(1 - a), x - magnitude), x + magnitude), 0.0, 1.0)
x_adv_1 = torch.clamp(torch.min(torch.max(x_adv_1, min_x), max_x), 0.0, 1.0)
elif _type == 'l2':
x_adv_1 = x_adv + step_size[0] * grad / ((grad ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12)
x_adv_1 = torch.clamp(x + (x_adv_1 - x) / (((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(
magnitude * torch.ones(x.shape).to(device).detach(), ((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt()), 0.0, 1.0)
x_adv_1 = x_adv + (x_adv_1 - x_adv)*a + grad2*(1 - a)
x_adv_1 = torch.clamp(x + (x_adv_1 - x) / (((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(
magnitude * torch.ones(x.shape).to(device).detach(), ((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12), 0.0, 1.0)
if previous_p is not None:
x_adv_1 = torch.clamp(x - previous_p + (x_adv_1 - x + previous_p) / (((x_adv_1 - x + previous_p) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(
max_eps * torch.ones(x.shape).to(device).detach(), ((x_adv_1 - x + previous_p) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12), 0.0, 1.0)
x_adv = x_adv_1 + 0.
x_adv.requires_grad_()
grad = torch.zeros_like(x)
for _ in range(1):
with torch.enable_grad():
logits = model(x_adv) # 1 forward pass (eot_iter = 1)
loss_indiv = dlr_loss(logits, y, y_target)
loss = loss_indiv.sum()
grad += torch.autograd.grad(loss, [x_adv])[0].detach() # 1 backward pass (eot_iter = 1)
pred = logits.detach().max(1)[1] == y
acc = torch.min(acc, pred)
acc_steps[i + 1] = acc + 0
x_best_adv[(pred == 0).nonzero().squeeze()] = x_adv[(pred == 0).nonzero().squeeze()] + 0.
### check step size
with torch.no_grad():
y1 = loss_indiv.detach().clone()
loss_steps[i] = y1.cpu() + 0
ind = (y1 > loss_best).nonzero().squeeze()
x_best[ind] = x_adv[ind].clone()
grad_best[ind] = grad[ind].clone()
loss_best[ind] = y1[ind] + 0
loss_best_steps[i + 1] = loss_best + 0
counter3 += 1
if counter3 == k:
fl_oscillation = check_oscillation(loss_steps.detach().cpu().numpy(), i, k, loss_best.detach().cpu().numpy(), k3=.75)
fl_reduce_no_impr = (~reduced_last_check) * (loss_best_last_check.cpu().numpy() >= loss_best.cpu().numpy())
fl_oscillation = ~(~fl_oscillation * ~fl_reduce_no_impr)
reduced_last_check = np.copy(fl_oscillation)
loss_best_last_check = loss_best.clone()
if np.sum(fl_oscillation) > 0:
step_size[u[fl_oscillation]] /= 2.0
n_reduced = fl_oscillation.astype(float).sum()
fl_oscillation = np.where(fl_oscillation)
x_adv[fl_oscillation] = x_best[fl_oscillation].clone()
grad[fl_oscillation] = grad_best[fl_oscillation].clone()
counter3 = 0
k = np.maximum(k - size_decr, n_iter_min)
return acc, x_best_adv
adv = x.clone()
for target_class in range(2, 9 + 2):
acc_curr, adv_curr = run_once(model, x, y, magnitude, max_iters, _type, target_class, max_eps, previous_p)
ind_curr = (acc_curr == 0).nonzero().squeeze()
adv[ind_curr] = adv_curr[ind_curr].clone()
now_p = adv-x
adv_out[ind_non_suc] = adv
if previous_p is not None:
previous_p_c[ind_non_suc] = previous_p + now_p
return adv_out, previous_p_c
return adv_out, now_p
def RecordMultiTargetedAttack(x, y, model, magnitude, previous_p, max_eps, max_iters=20, target=None, _type='l2', gpu_idx=None):
device = 'cuda:{}'.format(gpu_idx)
x = x.to(device)
y = y.to(device)
if target is not None:
target = target.to(device)
adv_out = x.clone()
pred = predict_from_logits(model(x))
if torch.sum((pred==y)).item() == 0:
return adv_out, previous_p
ind_non_suc = (pred==y).nonzero().squeeze()
ind_suc = (pred!=y).nonzero().squeeze()
x = x if len(x.shape) == 4 else x.unsqueeze(0)
y = y if len(y.shape) == 1 else y.unsqueeze(0)
if previous_p is not None:
previous_p = previous_p.to(device)
previous_p_c = previous_p.clone()
previous_p = previous_p[ind_non_suc]
previous_p = previous_p if len(previous_p.shape) == 4 else previous_p.unsqueeze(0)
def run_once(model, x_in, y_in, magnitude, max_iters, _type, target_class, max_eps, previous_p):
x = x_in.clone() if len(x_in.shape) == 4 else x_in.clone().unsqueeze(0)
y = y_in.clone() if len(y_in.shape) == 1 else y_in.clone().unsqueeze(0)
# print(x.shape)
if previous_p is not None:
max_x = x - previous_p + max_eps
min_x = x - previous_p - max_eps
else:
max_x = x + max_eps
min_x = x - max_eps
n_iter_2, n_iter_min, size_decr = max(int(0.22 * max_iters), 1), max(int(0.06 * max_iters), 1), max(int(0.03 * max_iters), 1)
if _type == 'linf':
t = 2 * torch.rand(x.shape).to(device).detach() - 1
x_adv = x.detach() + magnitude * torch.ones([x.shape[0], 1, 1, 1]).to(device).detach() * t / (t.reshape([t.shape[0], -1]).abs().max(dim=1, keepdim=True)[0].reshape([-1, 1, 1, 1]))
x_adv = torch.clamp(torch.min(torch.max(x_adv, min_x), max_x), 0.0, 1.0)
elif _type == 'l2':
t = torch.randn(x.shape).to(device).detach()
x_adv = x.detach() + magnitude * torch.ones([x.shape[0], 1, 1, 1]).to(device).detach() * t / ((t ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12)
if previous_p is not None:
x_adv = torch.clamp(x - previous_p + (x_adv - x + previous_p) / (((x_adv - x + previous_p) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(
max_eps * torch.ones(x.shape).to(device).detach(), ((x_adv - x + previous_p) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12), 0.0, 1.0)
x_adv = x_adv.clamp(0., 1.)
x_best = x_adv.clone()
x_best_adv = x_adv.clone()
loss_steps = torch.zeros([max_iters, x.shape[0]])
loss_best_steps = torch.zeros([max_iters + 1, x.shape[0]])
acc_steps = torch.zeros_like(loss_best_steps)
output = model(x)
y_target = output.sort(dim=1)[1][:, -target_class]
x_adv.requires_grad_()
grad = torch.zeros_like(x)
for _ in range(1):
with torch.enable_grad():
logits = model(x_adv) # 1 forward pass (eot_iter = 1)
loss_indiv = dlr_loss(logits, y, y_target)
loss = loss_indiv.sum()
grad += torch.autograd.grad(loss, [x_adv])[0].detach()
grad_best = grad.clone()
acc = logits.detach().max(1)[1] == y
acc_steps[0] = acc + 0
loss_best = loss_indiv.detach().clone()
step_size = magnitude * torch.ones([x.shape[0], 1, 1, 1]).to(device).detach() * torch.Tensor([2.0]).to(device).detach().reshape([1, 1, 1, 1])
x_adv_old = x_adv.clone()
counter = 0
k = n_iter_2 + 0
u = np.arange(x.shape[0])
counter3 = 0
loss_best_last_check = loss_best.clone()
reduced_last_check = np.zeros(loss_best.shape) == np.zeros(loss_best.shape)
n_reduced = 0
for i in range(max_iters):
with torch.no_grad():
x_adv = x_adv.detach()
grad2 = x_adv - x_adv_old
x_adv_old = x_adv.clone()
a = 0.75 if i > 0 else 1.0
if _type == 'linf':
x_adv_1 = x_adv + step_size * torch.sign(grad)
x_adv_1 = torch.clamp(torch.min(torch.max(x_adv_1, x - magnitude), x + magnitude), 0.0, 1.0)
x_adv_1 = torch.clamp(torch.min(torch.max(x_adv + (x_adv_1 - x_adv)*a + grad2*(1 - a), x - magnitude), x + magnitude), 0.0, 1.0)
x_adv_1 = torch.clamp(torch.min(torch.max(x_adv_1, min_x), max_x), 0.0, 1.0)
elif _type == 'l2':
x_adv_1 = x_adv + step_size[0] * grad / ((grad ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12)
x_adv_1 = torch.clamp(x + (x_adv_1 - x) / (((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(
magnitude * torch.ones(x.shape).to(device).detach(), ((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt()), 0.0, 1.0)
x_adv_1 = x_adv + (x_adv_1 - x_adv)*a + grad2*(1 - a)
x_adv_1 = torch.clamp(x + (x_adv_1 - x) / (((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(
magnitude * torch.ones(x.shape).to(device).detach(), ((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12), 0.0, 1.0)
if previous_p is not None:
x_adv_1 = torch.clamp(x - previous_p + (x_adv_1 - x + previous_p) / (((x_adv_1 - x + previous_p) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(
max_eps * torch.ones(x.shape).to(device).detach(), ((x_adv_1 - x + previous_p) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12), 0.0, 1.0)
x_adv = x_adv_1 + 0.
x_adv.requires_grad_()
grad = torch.zeros_like(x)
for _ in range(1):
with torch.enable_grad():
logits = model(x_adv) # 1 forward pass (eot_iter = 1)
pred_after_attack = predict_from_logits(logits)
record = np.ones(len(pred_after_attack))
record = record * (pred_after_attack==y).cpu().numpy()
record_list.append(record)
loss_indiv = dlr_loss(logits, y, y_target)
loss = loss_indiv.sum()
grad += torch.autograd.grad(loss, [x_adv])[0].detach() # 1 backward pass (eot_iter = 1)
pred = logits.detach().max(1)[1] == y
acc = torch.min(acc, pred)
acc_steps[i + 1] = acc + 0
x_best_adv[(pred == 0).nonzero().squeeze()] = x_adv[(pred == 0).nonzero().squeeze()] + 0.
### check step size
with torch.no_grad():
y1 = loss_indiv.detach().clone()
loss_steps[i] = y1.cpu() + 0
ind = (y1 > loss_best).nonzero().squeeze()
x_best[ind] = x_adv[ind].clone()
grad_best[ind] = grad[ind].clone()
loss_best[ind] = y1[ind] + 0
loss_best_steps[i + 1] = loss_best + 0
counter3 += 1
if counter3 == k:
fl_oscillation = check_oscillation(loss_steps.detach().cpu().numpy(), i, k, loss_best.detach().cpu().numpy(), k3=.75)
fl_reduce_no_impr = (~reduced_last_check) * (loss_best_last_check.cpu().numpy() >= loss_best.cpu().numpy())
fl_oscillation = ~(~fl_oscillation * ~fl_reduce_no_impr)
reduced_last_check = np.copy(fl_oscillation)
loss_best_last_check = loss_best.clone()
if np.sum(fl_oscillation) > 0:
step_size[u[fl_oscillation]] /= 2.0
n_reduced = fl_oscillation.astype(float).sum()
fl_oscillation = np.where(fl_oscillation)
x_adv[fl_oscillation] = x_best[fl_oscillation].clone()
grad[fl_oscillation] = grad_best[fl_oscillation].clone()
counter3 = 0
k = np.maximum(k - size_decr, n_iter_min)
for item in record_list:
item[ind_suc.cpu().numpy()]=0
#item[ind_suc]=0
return acc, x_best_adv,record_list
adv = x.clone()
result_record_list =[]
for target_class in range(2, 9 + 2):
record_list = []
acc_curr, adv_curr,new_record_list= run_once(model, x, y, magnitude, max_iters, _type, target_class, max_eps, previous_p)
if len(result_record_list)==0:
result_record_list = new_record_list
else:
for i in range(len(result_record_list)):
for j in range(len(result_record_list[i])):
result_record_list[i][j] = int(result_record_list[i][j])&int(new_record_list[i][j])
ind_curr = (acc_curr == 0).nonzero().squeeze()
adv[ind_curr] = adv_curr[ind_curr].clone()
now_p = adv-x
adv_out[ind_non_suc]= adv[ind_non_suc]
#adv_out[ind_non_suc] = adv
if previous_p is not None:
previous_p_c[ind_non_suc] = previous_p + now_p
return adv_out, previous_p_c
return adv_out, now_p,result_record_list
def ApgdCeAttack(x,y,model,magnitude,previous_p,max_eps,max_iters=20,target=None,_type='l2',gpu_idx=None,seed=time.time(),n_restarts=1):
device = 'cuda:{}'.format(gpu_idx)
if not y is None and len(y.shape) == 0:
x.unsqueeze_(0)
y.unsqueeze_(0)
x = x.detach().clone().float().to(device)
y_pred = model(x).max(1)[1]
if y is None:
y = y_pred.detach().clone().long().to(device)
else:
y = y.detach().clone().long().to(device)
adv = x.clone()
acc = y_pred == y
loss = -1e10 * torch.ones_like(acc).float()
torch.random.manual_seed(seed)
torch.cuda.random.manual_seed(seed)
for counter in range(n_restarts):
ind_to_fool = acc.nonzero().squeeze()
if len(ind_to_fool.shape) == 0:
ind_to_fool = ind_to_fool.unsqueeze(0)
if ind_to_fool.numel() != 0:
x_to_fool = x[ind_to_fool].clone()
y_to_fool = y[ind_to_fool].clone()
res_curr = apgd_attack_single_run(x= x_to_fool, y = y_to_fool,
model = model,_type=_type,gpu_idx=gpu_idx,n_iter=max_iters,loss='ce',eps = magnitude)
best_curr, acc_curr, loss_curr, adv_curr = res_curr
ind_curr = (acc_curr == 0).nonzero().squeeze()
acc[ind_to_fool[ind_curr]] = 0
adv[ind_to_fool[ind_curr]] = adv_curr[ind_curr].clone()
return adv,None
def apgd_attack_single_run(x, y,model,eps,x_init=None,_type=None,gpu_idx=None,n_iter=100,loss='ce',eot_iter=20,thr_decr=.75):
device = 'cuda:{}'.format(gpu_idx)
orig_dim = list(x.shape[1:])
ndims = len(orig_dim)
if len(x.shape) < ndims:
x = x.unsqueeze(0)
y = y.unsqueeze(0)
if _type == 'linf':
t = 2 * torch.rand(x.shape).to(device).detach() - 1
x_adv = x + eps * torch.ones_like(x).detach() * normalize(t,_type)
elif _type == 'l2':
t = torch.randn(x.shape).to(device).detach()
x_adv = x + eps * torch.ones_like(x).detach() * normalize(t,_type)
if not x_init is None:
x_adv = x_init.clone()
x_adv = x_adv.clamp(0., 1.)
x_best = x_adv.clone()
x_best_adv = x_adv.clone()
loss_steps = torch.zeros([n_iter, x.shape[0]]).to(device)
loss_best_steps = torch.zeros([n_iter + 1, x.shape[0]]).to(device)
acc_steps = torch.zeros_like(loss_best_steps)
if loss == 'ce':
criterion_indiv = nn.CrossEntropyLoss(reduction='none')
elif loss == 'dlr':
criterion_indiv = auto_attack_dlr_loss
else:
raise ValueError('unknowkn loss')
x_adv.requires_grad_()
grad = torch.zeros_like(x)
for _ in range(eot_iter):
with torch.enable_grad():
logits = model(x_adv)
loss_indiv = criterion_indiv(logits, y)
loss = loss_indiv.sum()
grad += torch.autograd.grad(loss, [x_adv])[0].detach()
grad /= float(eot_iter)
grad_best = grad.clone()
acc = logits.detach().max(1)[1] == y
acc_steps[0] = acc + 0
loss_best = loss_indiv.detach().clone()
alpha = 2. if _type in ['linf', 'l2'] else 1. if _type in ['L1'] else 2e-2
orig_dim = list(x.shape[1:])
ndims = len(orig_dim)
step_size = alpha * eps * torch.ones([x.shape[0], *([1] * ndims)]).to(device).detach()
x_adv_old = x_adv.clone()
counter = 0
n_iter_2 = max(int(0.22 * n_iter), 1)
k = n_iter_2 + 0
counter3 = 0
loss_best_last_check = loss_best.clone()
reduced_last_check = torch.ones_like(loss_best)
n_reduced = 0
n_fts = x.shape[-3] * x.shape[-2] * x.shape[-1]
u = torch.arange(x.shape[0], device=device)
for i in range(n_iter):
### gradient step
with torch.no_grad():
x_adv = x_adv.detach()
grad2 = x_adv - x_adv_old
x_adv_old = x_adv.clone()
a = 0.75 if i > 0 else 1.0
if _type == 'linf':
x_adv_1 = x_adv + step_size * torch.sign(grad)
x_adv_1 = torch.clamp(torch.min(torch.max(x_adv_1,
x - eps), x + eps), 0.0, 1.0)
x_adv_1 = torch.clamp(torch.min(torch.max(
x_adv + (x_adv_1 - x_adv) * a + grad2 * (1 - a),
x - eps), x + eps), 0.0, 1.0)
elif _type == 'l2':
x_adv_1 = x_adv + step_size * normalize(grad, _type)
x_adv_1 = torch.clamp(x + normalize(x_adv_1 - x,_type
) * torch.min(eps * torch.ones_like(x).detach(),
lp_norm(x_adv_1 - x,_type=_type)), 0.0, 1.0)
x_adv_1 = x_adv + (x_adv_1 - x_adv) * a + grad2 * (1 - a)
x_adv_1 = torch.clamp(x + normalize(x_adv_1 - x, _type
) * torch.min(eps * torch.ones_like(x).detach(),
lp_norm(x_adv_1 - x,_type=_type)), 0.0, 1.0)
x_adv = x_adv_1 + 0.
### get gradient
x_adv.requires_grad_()
grad = torch.zeros_like(x)
for _ in range(eot_iter):
with torch.enable_grad():
logits = model(x_adv)
loss_indiv = criterion_indiv(logits, y)
loss = loss_indiv.sum()
grad += torch.autograd.grad(loss, [x_adv])[0].detach()
grad /= float(eot_iter)
pred = logits.detach().max(1)[1] == y
acc = torch.min(acc, pred)
acc_steps[i + 1] = acc + 0
ind_pred = (pred == 0).nonzero().squeeze()
x_best_adv[ind_pred] = x_adv[ind_pred] + 0.
# if self.verbose:
# str_stats = ' - step size: {:.5f} - topk: {:.2f}'.format(
# step_size.mean(), topk.mean() * n_fts) if self.norm in ['L1'] else ''
# print('[m] iteration: {} - best loss: {:.6f} - robust accuracy: {:.2%}{}'.format(
# i, loss_best.sum(), acc.float().mean(), str_stats))
### check step size
with torch.no_grad():
y1 = loss_indiv.detach().clone()
loss_steps[i] = y1 + 0
ind = (y1 > loss_best).nonzero().squeeze()
x_best[ind] = x_adv[ind].clone()
grad_best[ind] = grad[ind].clone()
loss_best[ind] = y1[ind] + 0
loss_best_steps[i + 1] = loss_best + 0
counter3 += 1
if counter3 == k:
if _type in ['linf', 'l2']:
fl_oscillation = apgd_check_oscillation(loss_steps, i, k,
loss_best, k3=thr_decr,gpu_idx=gpu_idx)
fl_reduce_no_impr = (1. - reduced_last_check) * (
loss_best_last_check >= loss_best).float()
fl_oscillation = torch.max(fl_oscillation,
fl_reduce_no_impr)
reduced_last_check = fl_oscillation.clone()
loss_best_last_check = loss_best.clone()
if fl_oscillation.sum() > 0:
ind_fl_osc = (fl_oscillation > 0).nonzero().squeeze()
step_size[ind_fl_osc] /= 2.0
n_reduced = fl_oscillation.sum()
x_adv[ind_fl_osc] = x_best[ind_fl_osc].clone()
grad[ind_fl_osc] = grad_best[ind_fl_osc].clone()
size_decr = max(int(0.03 * n_iter), 1)
n_iter_min = max(int(0.06 * n_iter), 1)
k = max(k - size_decr, n_iter_min)
counter3 = 0
return (x_best, acc, loss_best, x_best_adv)
def RecordApgdCeAttack(x,y,model,magnitude,previous_p,max_eps,max_iters=20,target=None,_type='l2',gpu_idx=None,seed=time.time(),n_restarts=1):
device = 'cuda:{}'.format(gpu_idx)
if not y is None and len(y.shape) == 0:
x.unsqueeze_(0)
y.unsqueeze_(0)
x = x.detach().clone().float().to(device)
y_pred = model(x).max(1)[1]
if y is None:
y = y_pred.detach().clone().long().to(device)
else:
y = y.detach().clone().long().to(device)
adv = x.clone()
acc = y_pred == y
ori_non_acc = y_pred != y
loss = -1e10 * torch.ones_like(acc).float()
for counter in range(n_restarts):
x_to_fool = x.clone()
y_to_fool = y.clone()
res_curr = record_apgd_attack_single_run(x= x_to_fool, y = y_to_fool,
model = model,_type=_type,gpu_idx=gpu_idx,n_iter=max_iters,loss='ce',eps = magnitude)
best_curr, acc_curr, loss_curr, adv_curr,record_list = res_curr
ind_curr = (acc_curr == 0).nonzero().squeeze()
acc[ind_curr] = 0
adv[ind_curr] = adv_curr[ind_curr].clone()
ind_not_to_fool = ori_non_acc.nonzero().squeeze()
for item in record_list:
item[ind_not_to_fool.cpu().numpy()] = 0
return adv,None,record_list
def record_apgd_attack_single_run(x, y,model,eps,x_init=None,_type=None,gpu_idx=None,n_iter=100,loss='ce',eot_iter=20,thr_decr=.75):
device = 'cuda:{}'.format(gpu_idx)
orig_dim = list(x.shape[1:])
ndims = len(orig_dim)
if len(x.shape) < ndims:
x = x.unsqueeze(0)
y = y.unsqueeze(0)
if _type == 'linf':
t = 2 * torch.rand(x.shape).to(device).detach() - 1
x_adv = x + eps * torch.ones_like(x).detach() * normalize(t,_type)
elif _type == 'l2':
t = torch.randn(x.shape).to(device).detach()
x_adv = x + eps * torch.ones_like(x).detach() * normalize(t,_type)
if not x_init is None:
x_adv = x_init.clone()
x_adv = x_adv.clamp(0., 1.)
x_best = x_adv.clone()
x_best_adv = x_adv.clone()
loss_steps = torch.zeros([n_iter, x.shape[0]]).to(device)
loss_best_steps = torch.zeros([n_iter + 1, x.shape[0]]).to(device)
acc_steps = torch.zeros_like(loss_best_steps)
if loss == 'ce':
criterion_indiv = nn.CrossEntropyLoss(reduction='none')
elif loss == 'dlr':
criterion_indiv = auto_attack_dlr_loss
else:
raise ValueError('unknowkn loss')
x_adv.requires_grad_()
grad = torch.zeros_like(x)
for _ in range(eot_iter):
with torch.enable_grad():
logits = model(x_adv)
loss_indiv = criterion_indiv(logits, y)
loss = loss_indiv.sum()
grad += torch.autograd.grad(loss, [x_adv])[0].detach()
grad /= float(eot_iter)
grad_best = grad.clone()
acc = logits.detach().max(1)[1] == y
acc_steps[0] = acc + 0
loss_best = loss_indiv.detach().clone()
alpha = 2. if _type in ['linf', 'l2'] else 1. if _type in ['L1'] else 2e-2
orig_dim = list(x.shape[1:])
ndims = len(orig_dim)
step_size = alpha * eps * torch.ones([x.shape[0], *([1] * ndims)]).to(device).detach()
x_adv_old = x_adv.clone()
counter = 0
n_iter_2 = max(int(0.22 * n_iter), 1)
k = n_iter_2 + 0
counter3 = 0
loss_best_last_check = loss_best.clone()
reduced_last_check = torch.ones_like(loss_best)
n_reduced = 0
n_fts = x.shape[-3] * x.shape[-2] * x.shape[-1]
u = torch.arange(x.shape[0], device=device)
record_list = []
for i in range(n_iter):
### gradient step
with torch.no_grad():
x_adv = x_adv.detach()
grad2 = x_adv - x_adv_old
x_adv_old = x_adv.clone()
a = 0.75 if i > 0 else 1.0
if _type == 'linf':
x_adv_1 = x_adv + step_size * torch.sign(grad)
x_adv_1 = torch.clamp(torch.min(torch.max(x_adv_1,
x - eps), x + eps), 0.0, 1.0)
x_adv_1 = torch.clamp(torch.min(torch.max(
x_adv + (x_adv_1 - x_adv) * a + grad2 * (1 - a),
x - eps), x + eps), 0.0, 1.0)
elif _type == 'l2':
x_adv_1 = x_adv + step_size * normalize(grad, _type)
x_adv_1 = torch.clamp(x + normalize(x_adv_1 - x,_type
) * torch.min(eps * torch.ones_like(x).detach(),
lp_norm(x_adv_1 - x,_type=_type)), 0.0, 1.0)
x_adv_1 = x_adv + (x_adv_1 - x_adv) * a + grad2 * (1 - a)
x_adv_1 = torch.clamp(x + normalize(x_adv_1 - x, _type
) * torch.min(eps * torch.ones_like(x).detach(),
lp_norm(x_adv_1 - x,_type=_type)), 0.0, 1.0)
x_adv = x_adv_1 + 0.
### get gradient
x_adv.requires_grad_()
grad = torch.zeros_like(x)
for _ in range(eot_iter):
with torch.enable_grad():
logits = model(x_adv)
loss_indiv = criterion_indiv(logits, y) #loss function
loss = loss_indiv.sum()
grad += torch.autograd.grad(loss, [x_adv])[0].detach()
pred_after_attack = predict_from_logits(logits)
record = np.ones(len(pred_after_attack))
record= record * (pred_after_attack==y).cpu().numpy()
record_list.append(record)
grad /= float(eot_iter)
pred = logits.detach().max(1)[1] == y
acc = torch.min(acc, pred)
acc_steps[i + 1] = acc + 0
ind_pred = (pred == 0).nonzero().squeeze()
x_best_adv[ind_pred] = x_adv[ind_pred] + 0.
# if self.verbose:
# str_stats = ' - step size: {:.5f} - topk: {:.2f}'.format(
# step_size.mean(), topk.mean() * n_fts) if self.norm in ['L1'] else ''
# print('[m] iteration: {} - best loss: {:.6f} - robust accuracy: {:.2%}{}'.format(
# i, loss_best.sum(), acc.float().mean(), str_stats))
### check step size
with torch.no_grad():
y1 = loss_indiv.detach().clone()
loss_steps[i] = y1 + 0
ind = (y1 > loss_best).nonzero().squeeze()
x_best[ind] = x_adv[ind].clone()
grad_best[ind] = grad[ind].clone()
loss_best[ind] = y1[ind] + 0
loss_best_steps[i + 1] = loss_best + 0
counter3 += 1
if counter3 == k:
if _type in ['linf', 'l2']:
fl_oscillation = apgd_check_oscillation(loss_steps, i, k,
loss_best, k3=thr_decr,gpu_idx=gpu_idx)
fl_reduce_no_impr = (1. - reduced_last_check) * (
loss_best_last_check >= loss_best).float()
fl_oscillation = torch.max(fl_oscillation,
fl_reduce_no_impr)
reduced_last_check = fl_oscillation.clone()
loss_best_last_check = loss_best.clone()
if fl_oscillation.sum() > 0:
ind_fl_osc = (fl_oscillation > 0).nonzero().squeeze()
step_size[ind_fl_osc] /= 2.0
n_reduced = fl_oscillation.sum()
x_adv[ind_fl_osc] = x_best[ind_fl_osc].clone()
grad[ind_fl_osc] = grad_best[ind_fl_osc].clone()
size_decr = max(int(0.03 * n_iter), 1)
n_iter_min = max(int(0.06 * n_iter), 1)
k = max(k - size_decr, n_iter_min)
counter3 = 0
return (x_best, acc, loss_best, x_best_adv,record_list)
def normalize(x, _type):
orig_dim = list(x.shape[1:])
ndims = len(orig_dim)
if _type == 'linf':
t = x.abs().view(x.shape[0], -1).max(1)[0]
return x / (t.view(-1, *([1] * ndims)) + 1e-12)
elif _type == 'l2':
t = (x ** 2).view(x.shape[0], -1).sum(-1).sqrt()
return x / (t.view(-1, *([1] * ndims)) + 1e-12)
def auto_attack_dlr_loss(x, y):
x_sorted, ind_sorted = x.sort(dim=1)
ind = (ind_sorted[:, -1] == y).float()
u = torch.arange(x.shape[0])
return -(x[u, y] - x_sorted[:, -2] * ind - x_sorted[:, -1] * (1. - ind)) / (x_sorted[:, -1] - x_sorted[:, -3] + 1e-12)
def lp_norm(x,_type):
if _type == 'l2':
orig_dim = list(x.shape[1:])
ndims = len(orig_dim)
t = (x ** 2).view(x.shape[0], -1).sum(-1).sqrt()
return t.view(-1, *([1] * ndims))
def apgd_check_oscillation(x, j, k, y5, k3=0.75,gpu_idx=None):
device = 'cuda:{}'.format(gpu_idx)
t = torch.zeros(x.shape[1]).to(device)
for counter5 in range(k):
t += (x[j - counter5] > x[j - counter5 - 1]).float()
return (t <= k * k3 * torch.ones_like(t)).float()
def ApgdDlrAttack(x,y,model,magnitude,previous_p,max_eps,max_iters=20,target=None,_type='l2',gpu_idx=None,seed=time.time(),n_restarts=1):
device = 'cuda:{}'.format(gpu_idx)
if not y is None and len(y.shape) == 0:
x.unsqueeze_(0)
y.unsqueeze_(0)
x = x.detach().clone().float().to(device)
y_pred = model(x).max(1)[1]
if y is None:
y = y_pred.detach().clone().long().to(device)
else:
y = y.detach().clone().long().to(device)
adv = x.clone()
acc = y_pred == y
loss = -1e10 * torch.ones_like(acc).float()
startt = time.time()
torch.random.manual_seed(seed)
torch.cuda.random.manual_seed(seed)
for counter in range(n_restarts):
ind_to_fool = acc.nonzero().squeeze()
if len(ind_to_fool.shape) == 0:
ind_to_fool = ind_to_fool.unsqueeze(0)
if ind_to_fool.numel() != 0:
x_to_fool = x[ind_to_fool].clone()
y_to_fool = y[ind_to_fool].clone()
res_curr = apgd_attack_single_run(x= x_to_fool, y = y_to_fool,
model = model,_type=_type,gpu_idx=gpu_idx,loss='dlr',eps = magnitude,n_iter=max_iters)
best_curr, acc_curr, loss_curr, adv_curr = res_curr
ind_curr = (acc_curr == 0).nonzero().squeeze()
acc[ind_to_fool[ind_curr]] = 0
adv[ind_to_fool[ind_curr]] = adv_curr[ind_curr].clone()
return adv,None
def RecordApgdDlrAttack(x,y,model,magnitude,previous_p,max_eps,max_iters=20,target=None,_type='l2',gpu_idx=None,seed=time.time(),n_restarts=1):
device = 'cuda:{}'.format(gpu_idx)
if not y is None and len(y.shape) == 0:
x.unsqueeze_(0)
y.unsqueeze_(0)
x = x.detach().clone().float().to(device)
y_pred = model(x).max(1)[1]
if y is None:
y = y_pred.detach().clone().long().to(device)
else:
y = y.detach().clone().long().to(device)
adv = x.clone()
acc = y_pred == y
ori_non_acc = y_pred != y
loss = -1e10 * torch.ones_like(acc).float()
for counter in range(n_restarts):
x_to_fool = x.clone()
y_to_fool = y.clone()
res_curr = record_apgd_attack_single_run(x= x_to_fool, y = y_to_fool,
model = model,_type=_type,gpu_idx=gpu_idx,n_iter=max_iters,loss='dlr',eps = magnitude)
best_curr, acc_curr, loss_curr, adv_curr,record_list = res_curr
ind_curr = (acc_curr == 0).nonzero().squeeze()
acc[ind_curr] = 0
adv[ind_curr] = adv_curr[ind_curr].clone()
ind_not_to_fool = ori_non_acc.nonzero().squeeze()
for item in record_list:
item[ind_not_to_fool.cpu().numpy()] = 0
return adv,None,record_list
def FabAttack(x,y,model,magnitude,previous_p,max_eps,max_iters=20,target=None,_type='l2',gpu_idx=None,seed=time.time(),n_restarts=1):
device = 'cuda:{}'.format(gpu_idx)
#self.device = x.device
adv = x.clone()
with torch.no_grad():
acc = model(x).max(1)[1] == y
startt = time.time()
torch.random.manual_seed(seed)
torch.cuda.random.manual_seed(seed)
for counter in range(n_restarts):
ind_to_fool = acc.nonzero().squeeze()
if len(ind_to_fool.shape) == 0: ind_to_fool = ind_to_fool.unsqueeze(0)
if ind_to_fool.numel() != 0:
x_to_fool, y_to_fool = x[ind_to_fool].clone(), y[ind_to_fool].clone()
adv_curr = fab_attack_single_run(x_to_fool, y_to_fool,model=model,magnitude=magnitude,_type=_type,gpu_idx=gpu_idx,n_iter = max_iters,use_rand_start=(counter > 0), is_targeted=False)
acc_curr = model(adv_curr).max(1)[1] == y_to_fool
if _type == 'linf':
res = (x_to_fool - adv_curr).abs().reshape(x_to_fool.shape[0], -1).max(1)[0]
elif _type == 'l2':
res = ((x_to_fool - adv_curr) ** 2).reshape(x_to_fool.shape[0], -1).sum(dim=-1).sqrt()
acc_curr = torch.max(acc_curr, res > magnitude)
ind_curr = (acc_curr == 0).nonzero().squeeze()
acc[ind_to_fool[ind_curr]] = 0
adv[ind_to_fool[ind_curr]] = adv_curr[ind_curr].clone()
return adv,None
def fab_attack_single_run(x, y,model,magnitude,_type,use_rand_start=False, is_targeted=False,gpu_idx=None,n_iter=100,alpha_max=0.1,eta=1.05,beta=0.9):
"""
:param x: clean images
:param y: clean labels, if None we use the predicted labels
:param is_targeted True if we ise targeted version. Targeted class is assigned by `self.target_class`
"""
device = 'cuda:{}'.format(gpu_idx)
orig_dim = list(x.shape[1:])
ndims = len(orig_dim)
x = x.detach().clone().float().to(device)
y_pred = fab_get_predicted_label(x,model)
if y is None:
y = y_pred.detach().clone().long().to(device)
else:
y = y.detach().clone().long().to(device)
pred = y_pred == y
corr_classified = pred.float().sum()
# if self.verbose:
# print('Clean accuracy: {:.2%}'.format(pred.float().mean()))
if pred.sum() == 0:
return x
pred = fab_check_shape(pred.nonzero().squeeze())
startt = time.time()
# runs the attack only on correctly classified points
im2 = x[pred].detach().clone()
la2 = y[pred].detach().clone()
if len(im2.shape) == ndims:
im2 = im2.unsqueeze(0)
bs = im2.shape[0]
u1 = torch.arange(bs)
adv = im2.clone()
adv_c = x.clone()
res2 = 1e10 * torch.ones([bs]).to(device)
res_c = torch.zeros([x.shape[0]]).to(device)
x1 = im2.clone()
x0 = im2.clone().reshape([bs, -1])
counter_restarts = 0
while counter_restarts < 1:
if use_rand_start:
if _type == 'linf':
t = 2 * torch.rand(x1.shape).to(device) - 1
x1 = im2 + (torch.min(res2,
magnitude * torch.ones(res2.shape)
.to(device)
).reshape([-1, *[1]*ndims])
) * t / (t.reshape([t.shape[0], -1]).abs()
.max(dim=1, keepdim=True)[0]
.reshape([-1, *[1]*ndims])) * .5
elif _type == 'l2':
t = torch.randn(x1.shape).to(device)
x1 = im2 + (torch.min(res2,
magnitude * torch.ones(res2.shape)
.to(device)
).reshape([-1, *[1]*ndims])
) * t / ((t ** 2)
.view(t.shape[0], -1)
.sum(dim=-1)
.sqrt()
.view(t.shape[0], *[1]*ndims)) * .5
x1 = x1.clamp(0.0, 1.0)
counter_iter = 0
while counter_iter < n_iter:
with torch.no_grad():
df, dg = fab_get_diff_logits_grads_batch(x1, la2,model,gpu_idx)
if _type == 'linf':
dist1 = df.abs() / (1e-12 +
dg.abs()
.reshape(dg.shape[0], dg.shape[1], -1)
.sum(dim=-1))
elif _type == 'l2':
dist1 = df.abs() / (1e-12 + (dg ** 2)
.reshape(dg.shape[0], dg.shape[1], -1)
.sum(dim=-1).sqrt())
else:
raise ValueError('norm not supported')
ind = dist1.min(dim=1)[1]
dg2 = dg[u1, ind]
b = (- df[u1, ind] + (dg2 * x1).reshape(x1.shape[0], -1)
.sum(dim=-1))
w = dg2.reshape([bs, -1])
if _type == 'linf':
d3 = fab_projection_linf(
torch.cat((x1.reshape([bs, -1]), x0), 0),
torch.cat((w, w), 0),
torch.cat((b, b), 0))
elif _type == 'l2':
d3 = projection_l2(
torch.cat((x1.reshape([bs, -1]), x0), 0),
torch.cat((w, w), 0),
torch.cat((b, b), 0))
d1 = torch.reshape(d3[:bs], x1.shape)
d2 = torch.reshape(d3[-bs:], x1.shape)
if _type == 'linf':
a0 = d3.abs().max(dim=1, keepdim=True)[0]\
.view(-1, *[1]*ndims)
elif _type == 'l2':
a0 = (d3 ** 2).sum(dim=1, keepdim=True).sqrt()\
.view(-1, *[1]*ndims)
a0 = torch.max(a0, 1e-8 * torch.ones(
a0.shape).to(device))
a1 = a0[:bs]
a2 = a0[-bs:]
alpha = torch.min(torch.max(a1 / (a1 + a2),
torch.zeros(a1.shape)
.to(device)),
alpha_max * torch.ones(a1.shape)
.to(device))
x1 = ((x1 + eta * d1) * (1 - alpha) +
(im2 + d2 * eta) * alpha).clamp(0.0, 1.0)
is_adv = fab_get_predicted_label(x1,model) != la2
if is_adv.sum() > 0:
ind_adv = is_adv.nonzero().squeeze()
ind_adv = fab_check_shape(ind_adv)
if _type == 'linf':
t = (x1[ind_adv] - im2[ind_adv]).reshape(
[ind_adv.shape[0], -1]).abs().max(dim=1)[0]
elif _type == 'l2':
t = ((x1[ind_adv] - im2[ind_adv]) ** 2)\
.reshape(ind_adv.shape[0], -1).sum(dim=-1).sqrt()
adv[ind_adv] = x1[ind_adv] * (t < res2[ind_adv]).\
float().reshape([-1, *[1]*ndims]) + adv[ind_adv]\
* (t >= res2[ind_adv]).float().reshape(
[-1, *[1]*ndims])
res2[ind_adv] = t * (t < res2[ind_adv]).float()\
+ res2[ind_adv] * (t >= res2[ind_adv]).float()
x1[ind_adv] = im2[ind_adv] + (
x1[ind_adv] - im2[ind_adv]) * beta
counter_iter += 1
counter_restarts += 1
ind_succ = res2 < 1e10
# if self.verbose:
# print('success rate: {:.0f}/{:.0f}'
# .format(ind_succ.float().sum(), corr_classified) +
# ' (on correctly classified points) in {:.1f} s'
# .format(time.time() - startt))
res_c[pred] = res2 * ind_succ.float() + 1e10 * (1 - ind_succ.float())
ind_succ = fab_check_shape(ind_succ.nonzero().squeeze())
adv_c[pred[ind_succ]] = adv[ind_succ].clone()
return adv_c
def RecordFabAttack(x,y,model,magnitude,previous_p,max_eps,max_iters=20,target=None,_type='l2',gpu_idx=None,seed=time.time(),n_restarts=1):
device = 'cuda:{}'.format(gpu_idx)
#self.device = x.device
adv = x.clone()
with torch.no_grad():
acc = model(x).max(1)[1] == y
startt = time.time()
torch.random.manual_seed(seed)
torch.cuda.random.manual_seed(seed)
for counter in range(n_restarts):
# ind_to_fool = acc.nonzero().squeeze()
# if len(ind_to_fool.shape) == 0: ind_to_fool = ind_to_fool.unsqueeze(0)
# if ind_to_fool.numel() != 0:
# x_to_fool, y_to_fool = x[ind_to_fool].clone(), y[ind_to_fool].clone()
x_to_fool = x.clone()
y_to_fool = y.clone()
adv_curr,record_list = record_fab_attack_single_run(x_to_fool, y_to_fool,model=model,magnitude=magnitude,_type=_type,gpu_idx=gpu_idx,n_iter = max_iters,use_rand_start=(counter > 0), is_targeted=False)
acc_curr = model(adv_curr).max(1)[1] == y_to_fool
if _type == 'linf':
res = (x_to_fool - adv_curr).abs().reshape(x_to_fool.shape[0], -1).max(1)[0]
elif _type == 'l2':
res = ((x_to_fool - adv_curr) ** 2).reshape(x_to_fool.shape[0], -1).sum(dim=-1).sqrt()
acc_curr = torch.max(acc_curr, res > magnitude)
ind_curr = (acc_curr == 0).nonzero().squeeze()
# acc[ind_to_fool[ind_curr]] = 0
# adv[ind_to_fool[ind_curr]] = adv_curr[ind_curr].clone()
acc[ind_curr] = 0
adv[ind_curr] = adv_curr[ind_curr].clone()
return adv,None,record_list
def record_fab_attack_single_run(x, y,model,magnitude,_type,use_rand_start=False, is_targeted=False,gpu_idx=None,n_iter=100,alpha_max=0.1,eta=1.05,beta=0.9):
"""
:param x: clean images
:param y: clean labels, if None we use the predicted labels
:param is_targeted True if we ise targeted version. Targeted class is assigned by `self.target_class`
"""
device = 'cuda:{}'.format(gpu_idx)
orig_dim = list(x.shape[1:])
ndims = len(orig_dim)
x = x.detach().clone().float().to(device)
y_pred = fab_get_predicted_label(x,model)
if y is None:
y = y_pred.detach().clone().long().to(device)
else:
y = y.detach().clone().long().to(device)
pred = y_pred == y
not_pred = y_pred!=y
corr_classified = pred.float().sum()
# if self.verbose:
# print('Clean accuracy: {:.2%}'.format(pred.float().mean()))
if pred.sum() == 0:
return x
pred = fab_check_shape(pred.nonzero().squeeze())
startt = time.time()
# runs the attack only on correctly classified points
im2 = x[pred].detach().clone()
la2 = y[pred].detach().clone()
if len(im2.shape) == ndims:
im2 = im2.unsqueeze(0)
bs = im2.shape[0]
u1 = torch.arange(bs)
adv = im2.clone()
adv_c = x.clone()
res2 = 1e10 * torch.ones([bs]).to(device) #?
res_c = torch.zeros([x.shape[0]]).to(device)#[0,0,0,0,0,0,...,0]
x1 = im2.clone()
x0 = im2.clone().reshape([bs, -1])
counter_restarts = 0
while counter_restarts < 1:
if use_rand_start:
if _type == 'linf':
t = 2 * torch.rand(x1.shape).to(device) - 1
x1 = im2 + (torch.min(res2,
magnitude * torch.ones(res2.shape)
.to(device)
).reshape([-1, *[1]*ndims])
) * t / (t.reshape([t.shape[0], -1]).abs()
.max(dim=1, keepdim=True)[0]
.reshape([-1, *[1]*ndims])) * .5
elif _type == 'l2':
t = torch.randn(x1.shape).to(device)
x1 = im2 + (torch.min(res2,
magnitude * torch.ones(res2.shape)
.to(device)
).reshape([-1, *[1]*ndims])
) * t / ((t ** 2)
.view(t.shape[0], -1)
.sum(dim=-1)
.sqrt()
.view(t.shape[0], *[1]*ndims)) * .5
x1 = x1.clamp(0.0, 1.0)
record_list=[]
counter_iter = 0
while counter_iter < n_iter:
with torch.no_grad():
df, dg = fab_get_diff_logits_grads_batch(x1, la2,model,gpu_idx)
if _type == 'linf':
dist1 = df.abs() / (1e-12 +
dg.abs()
.reshape(dg.shape[0], dg.shape[1], -1)
.sum(dim=-1))
elif _type == 'l2':
dist1 = df.abs() / (1e-12 + (dg ** 2)
.reshape(dg.shape[0], dg.shape[1], -1)
.sum(dim=-1).sqrt())
else:
raise ValueError('norm not supported')
ind = dist1.min(dim=1)[1]
dg2 = dg[u1, ind]
b = (- df[u1, ind] + (dg2 * x1).reshape(x1.shape[0], -1)
.sum(dim=-1))
w = dg2.reshape([bs, -1])
if _type == 'linf':
d3 = fab_projection_linf(
torch.cat((x1.reshape([bs, -1]), x0), 0),
torch.cat((w, w), 0),
torch.cat((b, b), 0))
elif _type == 'l2':
d3 = projection_l2(
torch.cat((x1.reshape([bs, -1]), x0), 0),
torch.cat((w, w), 0),
torch.cat((b, b), 0))
d1 = torch.reshape(d3[:bs], x1.shape)
d2 = torch.reshape(d3[-bs:], x1.shape)
if _type == 'linf':
a0 = d3.abs().max(dim=1, keepdim=True)[0]\
.view(-1, *[1]*ndims)
elif _type == 'l2':
a0 = (d3 ** 2).sum(dim=1, keepdim=True).sqrt()\
.view(-1, *[1]*ndims)
a0 = torch.max(a0, 1e-8 * torch.ones(
a0.shape).to(device))
a1 = a0[:bs]
a2 = a0[-bs:]
alpha = torch.min(torch.max(a1 / (a1 + a2),
torch.zeros(a1.shape)
.to(device)),
alpha_max * torch.ones(a1.shape)
.to(device))
x1 = ((x1 + eta * d1) * (1 - alpha) +
(im2 + d2 * eta) * alpha).clamp(0.0, 1.0)
is_adv = fab_get_predicted_label(x1,model) != la2
if is_adv.sum() > 0:
ind_adv = is_adv.nonzero().squeeze()
ind_adv = fab_check_shape(ind_adv)
if _type == 'linf':
t = (x1[ind_adv] - im2[ind_adv]).reshape(
[ind_adv.shape[0], -1]).abs().max(dim=1)[0]
elif _type == 'l2':
t = ((x1[ind_adv] - im2[ind_adv]) ** 2)\
.reshape(ind_adv.shape[0], -1).sum(dim=-1).sqrt()
adv[ind_adv] = x1[ind_adv] * (t < res2[ind_adv]).\
float().reshape([-1, *[1]*ndims]) + adv[ind_adv]\
* (t >= res2[ind_adv]).float().reshape(
[-1, *[1]*ndims])
res2[ind_adv] = t * (t < res2[ind_adv]).float()\
+ res2[ind_adv] * (t >= res2[ind_adv]).float()
x1[ind_adv] = im2[ind_adv] + (
x1[ind_adv] - im2[ind_adv]) * beta
ind_succ = res2 < 1e10
ind_succ = fab_check_shape(ind_succ.nonzero().squeeze())
acc_curr = model(adv).max(1)[1] == la2
if _type == 'linf':
res = (im2 - adv).abs().reshape(im2.shape[0], -1).max(1)[0]
elif _type == 'l2':
res = ((im2 - adv) ** 2).reshape(im2.shape[0], -1).sum(dim=-1).sqrt()
acc_curr = torch.max(acc_curr, res > magnitude).cpu().numpy()
record = np.ones(len(y))
record[pred[ind_succ.cpu().numpy()].cpu().numpy()] = acc_curr[ind_succ.cpu().numpy()]
record[not_pred.cpu().numpy()]=0
record_list.append(record)
counter_iter += 1
counter_restarts += 1
ind_succ = res2 < 1e10
res_c[pred] = res2 * ind_succ.float() + 1e10 * (1 - ind_succ.float())
ind_succ = fab_check_shape(ind_succ.nonzero().squeeze())
adv_c[pred[ind_succ]] = adv[ind_succ].clone()
return adv_c,record_list
def fab_get_diff_logits_grads_batch(imgs, la,model,gpu_idx):
device = 'cuda:{}'.format(gpu_idx)
im = imgs.clone().requires_grad_()
with torch.enable_grad():
y = model(im)
g2 = torch.zeros([y.shape[-1], *imgs.size()]).to(device)
grad_mask = torch.zeros_like(y)
for counter in range(y.shape[-1]):
zero_gradients(im)
grad_mask[:, counter] = 1.0
y.backward(grad_mask, retain_graph=True)
grad_mask[:, counter] = 0.0
g2[counter] = im.grad.data
g2 = torch.transpose(g2, 0, 1).detach()
#y2 = self.predict(imgs).detach()
y2 = y.detach()
df = y2 - y2[torch.arange(imgs.shape[0]), la].unsqueeze(1)
dg = g2 - g2[torch.arange(imgs.shape[0]), la].unsqueeze(1)
df[torch.arange(imgs.shape[0]), la] = 1e10
return df, dg
def fab_get_predicted_label(x,model):
with torch.no_grad():
outputs = model(x)
_, y = torch.max(outputs, dim=1)
return y
def fab_check_shape(x):
return x if len(x.shape) > 0 else x.unsqueeze(0)
def PGD_Attack_adaptive_stepsize(x, y, model, magnitude, previous_p, max_eps, max_iters=20, target=None, _type='linf', gpu_idx=None):
model.eval()
device = 'cuda:{}'.format(gpu_idx)
x = x.to(device)
y = y.to(device)
if target is not None:
target = target.to(device)
adv = x.clone()
pred = predict_from_logits(model(x))
if torch.sum((pred==y)).item() == 0:
return adv, previous_p
ind_non_suc = (pred==y).nonzero().squeeze()
x = x[ind_non_suc]
y = y[ind_non_suc]
x = x if len(x.shape) == 4 else x.unsqueeze(0)
y = y if len(y.shape) == 1 else y.unsqueeze(0)
# print(x.shape)
if previous_p is not None:
previous_p = previous_p.to(device)
previous_p_c = previous_p.clone()
previous_p = previous_p[ind_non_suc]
previous_p = previous_p if len(previous_p.shape) == 4 else previous_p.unsqueeze(0)
max_x = x - previous_p + max_eps
min_x = x - previous_p - max_eps
else:
max_x = x + max_eps
min_x = x - max_eps
x.requires_grad = True
n_iter_2, n_iter_min, size_decr = max(int(0.22 * max_iters), 1), max(int(0.06 * max_iters), 1), max(int(0.03 * max_iters), 1)
if _type == 'linf':
t = 2 * torch.rand(x.shape).to(device).detach() - 1
x_adv = x.detach() + magnitude * torch.ones([x.shape[0], 1, 1, 1]).to(device).detach() * t / (t.reshape([t.shape[0], -1]).abs().max(dim=1, keepdim=True)[0].reshape([-1, 1, 1, 1]))
x_adv = torch.clamp(torch.min(torch.max(x_adv, min_x), max_x), 0.0, 1.0)
elif _type == 'l2':
t = torch.randn(x.shape).to(device).detach()
x_adv = x.detach() + magnitude * torch.ones([x.shape[0], 1, 1, 1]).to(device).detach() * t / ((t ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12)
if previous_p is not None:
x_adv = torch.clamp(x - previous_p + (x_adv - x + previous_p) / (((x_adv - x + previous_p) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(
max_eps * torch.ones(x.shape).to(device).detach(), ((x_adv - x + previous_p) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12), 0.0, 1.0)
x_adv = x_adv.clamp(0., 1.)
x_best = x_adv.clone()
x_best_adv = x_adv.clone()
loss_steps = torch.zeros([max_iters, x.shape[0]])
loss_best_steps = torch.zeros([max_iters + 1, x.shape[0]])
acc_steps = torch.zeros_like(loss_best_steps)
x_adv.requires_grad_()
with torch.enable_grad():
logits = model(x_adv) # 1 forward pass (eot_iter = 1)
if target is not None:
loss_indiv = -F.cross_entropy(logits, target, reduce=False)
else:
loss_indiv = F.cross_entropy(logits, y, reduce=False)
loss = loss_indiv.sum()
grad = torch.autograd.grad(loss, [x_adv])[0].detach()
grad_best = grad.clone()
acc = logits.detach().max(1)[1] == y
acc_steps[0] = acc + 0
loss_best = loss_indiv.detach().clone()
step_size = magnitude * torch.ones([x.shape[0], 1, 1, 1]).to(device).detach() * torch.Tensor([2.0]).to(device).detach().reshape([1, 1, 1, 1])
x_adv_old = x_adv.clone()
k = n_iter_2 + 0
u = np.arange(x.shape[0])
counter3 = 0
loss_best_last_check = loss_best.clone()
reduced_last_check = np.zeros(loss_best.shape) == np.zeros(loss_best.shape)
n_reduced = 0
for i in range(max_iters):
with torch.no_grad():
x_adv = x_adv.detach()
x_adv_old = x_adv.clone()
if _type == 'linf':
x_adv_1 = x_adv + step_size * torch.sign(grad)
x_adv_1 = torch.clamp(torch.min(torch.max(x_adv_1, x - magnitude), x + magnitude), 0.0, 1.0)
x_adv_1 = torch.clamp(torch.min(torch.max(x_adv + (x_adv_1 - x_adv), x - magnitude), x + magnitude), 0.0, 1.0)
x_adv_1 = torch.clamp(torch.min(torch.max(x_adv_1, min_x), max_x), 0.0, 1.0)
elif _type == 'l2':
x_adv_1 = x_adv + step_size[0] * grad / ((grad ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12)
x_adv_1 = torch.clamp(x + (x_adv_1 - x) / (((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(
magnitude * torch.ones(x.shape).to(device).detach(), ((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt()), 0.0, 1.0)
x_adv_1 = x_adv + (x_adv_1 - x_adv)
x_adv_1 = torch.clamp(x + (x_adv_1 - x) / (((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(
magnitude * torch.ones(x.shape).to(device).detach(), ((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12), 0.0, 1.0)
if previous_p is not None:
x_adv_1 = torch.clamp(x - previous_p + (x_adv_1 - x + previous_p) / (((x_adv_1 - x + previous_p) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(
max_eps * torch.ones(x.shape).to(device).detach(), ((x_adv_1 - x + previous_p) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12), 0.0, 1.0)
x_adv = x_adv_1 + 0.
x_adv.requires_grad_()
with torch.enable_grad():
logits = model(x_adv) # 1 forward pass (eot_iter = 1)
if target is not None:
loss_indiv = -F.cross_entropy(logits, target, reduce=False)
else:
loss_indiv = F.cross_entropy(logits, y, reduce=False)
loss = loss_indiv.sum()
grad = torch.autograd.grad(loss, [x_adv])[0].detach() # 1 backward pass (eot_iter = 1)
pred = logits.detach().max(1)[1] == y
acc = torch.min(acc, pred)
acc_steps[i + 1] = acc + 0
x_best_adv[(pred == 0).nonzero().squeeze()] = x_adv[(pred == 0).nonzero().squeeze()] + 0.
### check step size
with torch.no_grad():
y1 = loss_indiv.detach().clone()
loss_steps[i] = y1.cpu() + 0
ind = (y1 > loss_best).nonzero().squeeze()
x_best[ind] = x_adv[ind].clone()
grad_best[ind] = grad[ind].clone()
loss_best[ind] = y1[ind] + 0
loss_best_steps[i + 1] = loss_best + 0
counter3 += 1
if counter3 == k:
fl_oscillation = check_oscillation(loss_steps.detach().cpu().numpy(), i, k, loss_best.detach().cpu().numpy(), k3=.75)
fl_reduce_no_impr = (~reduced_last_check) * (loss_best_last_check.cpu().numpy() >= loss_best.cpu().numpy())
fl_oscillation = ~(~fl_oscillation * ~fl_reduce_no_impr)
reduced_last_check = np.copy(fl_oscillation)
loss_best_last_check = loss_best.clone()
if np.sum(fl_oscillation) > 0:
step_size[u[fl_oscillation]] /= 2.0
n_reduced = fl_oscillation.astype(float).sum()
fl_oscillation = np.where(fl_oscillation)
x_adv[fl_oscillation] = x_best[fl_oscillation].clone()
grad[fl_oscillation] = grad_best[fl_oscillation].clone()
counter3 = 0
k = np.maximum(k - size_decr, n_iter_min)
adv[ind_non_suc] = x_best_adv
now_p = x_best_adv-x
if previous_p is not None:
previous_p_c[ind_non_suc] = previous_p + now_p
return adv, previous_p_c
return adv, now_p
def Record_PGD_Attack_adaptive_stepsize(x, y, model, magnitude, previous_p, max_eps, max_iters=20, target=None, _type='l2', gpu_idx=None):
model.eval()
device = 'cuda:{}'.format(gpu_idx)
x = x.to(device)
y = y.to(device)
if target is not None:
target = target.to(device)
adv = x.clone()
pred = predict_from_logits(model(x))
if torch.sum((pred==y)).item() == 0:
return adv, previous_p
ind_non_suc = (pred==y).nonzero().squeeze()
ind_suc = (pred!=y).nonzero().squeeze()
record_list = []
x = x if len(x.shape) == 4 else x.unsqueeze(0)
y = y if len(y.shape) == 1 else y.unsqueeze(0)
if previous_p is not None:
previous_p = previous_p.to(device)
previous_p_c = previous_p.clone()
previous_p = previous_p[ind_non_suc]
previous_p = previous_p if len(previous_p.shape) == 4 else previous_p.unsqueeze(0)
max_x = x - previous_p + max_eps
min_x = x - previous_p - max_eps
else:
max_x = x + max_eps
min_x = x - max_eps
x.requires_grad = True
n_iter_2, n_iter_min, size_decr = max(int(0.22 * max_iters), 1), max(int(0.06 * max_iters), 1), max(int(0.03 * max_iters), 1)
if _type == 'linf':
t = 2 * torch.rand(x.shape).to(device).detach() - 1
x_adv = x.detach() + magnitude * torch.ones([x.shape[0], 1, 1, 1]).to(device).detach() * t / (t.reshape([t.shape[0], -1]).abs().max(dim=1, keepdim=True)[0].reshape([-1, 1, 1, 1]))
x_adv = torch.clamp(torch.min(torch.max(x_adv, min_x), max_x), 0.0, 1.0)
elif _type == 'l2':
t = torch.randn(x.shape).to(device).detach()
x_adv = x.detach() + magnitude * torch.ones([x.shape[0], 1, 1, 1]).to(device).detach() * t / ((t ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12)
if previous_p is not None:
x_adv = torch.clamp(x - previous_p + (x_adv - x + previous_p) / (((x_adv - x + previous_p) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(
max_eps * torch.ones(x.shape).to(device).detach(), ((x_adv - x + previous_p) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12), 0.0, 1.0)
x_adv = x_adv.clamp(0., 1.)
x_best = x_adv.clone()
x_best_adv = x_adv.clone()
loss_steps = torch.zeros([max_iters, x.shape[0]])
loss_best_steps = torch.zeros([max_iters + 1, x.shape[0]])
acc_steps = torch.zeros_like(loss_best_steps)
x_adv.requires_grad_()
with torch.enable_grad():
logits = model(x_adv) # 1 forward pass (eot_iter = 1)
if target is not None:
loss_indiv = -F.cross_entropy(logits, target, reduce=False)
else:
loss_indiv = F.cross_entropy(logits, y, reduce=False)
loss = loss_indiv.sum()
grad = torch.autograd.grad(loss, [x_adv])[0].detach()
grad_best = grad.clone()
acc = logits.detach().max(1)[1] == y
acc_steps[0] = acc + 0
loss_best = loss_indiv.detach().clone()
step_size = magnitude * torch.ones([x.shape[0], 1, 1, 1]).to(device).detach() * torch.Tensor([2.0]).to(device).detach().reshape([1, 1, 1, 1])
x_adv_old = x_adv.clone()
k = n_iter_2 + 0
u = np.arange(x.shape[0])
counter3 = 0
loss_best_last_check = loss_best.clone()
reduced_last_check = np.zeros(loss_best.shape) == np.zeros(loss_best.shape)
n_reduced = 0
for i in range(max_iters):
with torch.no_grad():
x_adv = x_adv.detach()
x_adv_old = x_adv.clone()
if _type == 'linf':
x_adv_1 = x_adv + step_size * torch.sign(grad)
x_adv_1 = torch.clamp(torch.min(torch.max(x_adv_1, x - magnitude), x + magnitude), 0.0, 1.0)
x_adv_1 = torch.clamp(torch.min(torch.max(x_adv + (x_adv_1 - x_adv), x - magnitude), x + magnitude), 0.0, 1.0)
x_adv_1 = torch.clamp(torch.min(torch.max(x_adv_1, min_x), max_x), 0.0, 1.0)
elif _type == 'l2':
x_adv_1 = x_adv + step_size[0] * grad / ((grad ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12)
x_adv_1 = torch.clamp(x + (x_adv_1 - x) / (((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(
magnitude * torch.ones(x.shape).to(device).detach(), ((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt()), 0.0, 1.0)
x_adv_1 = x_adv + (x_adv_1 - x_adv)
x_adv_1 = torch.clamp(x + (x_adv_1 - x) / (((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(
magnitude * torch.ones(x.shape).to(device).detach(), ((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12), 0.0, 1.0)
if previous_p is not None:
x_adv_1 = torch.clamp(x - previous_p + (x_adv_1 - x + previous_p) / (((x_adv_1 - x + previous_p) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(
max_eps * torch.ones(x.shape).to(device).detach(), ((x_adv_1 - x + previous_p) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12), 0.0, 1.0)
x_adv = x_adv_1 + 0.
x_adv.requires_grad_()
with torch.enable_grad():
logits = model(x_adv) # 1 forward pass (eot_iter = 1)
pred_after_attack = predict_from_logits(logits)
record = np.ones(len(pred_after_attack))
record = record * (pred_after_attack==y).cpu().numpy()
record_list.append(record)
if target is not None:
loss_indiv = -F.cross_entropy(logits, target, reduce=False)
else:
loss_indiv = F.cross_entropy(logits, y, reduce=False)
loss = loss_indiv.sum()
grad = torch.autograd.grad(loss, [x_adv])[0].detach() # 1 backward pass (eot_iter = 1)
pred = logits.detach().max(1)[1] == y
acc = torch.min(acc, pred)
acc_steps[i + 1] = acc + 0
x_best_adv[(pred == 0).nonzero().squeeze()] = x_adv[(pred == 0).nonzero().squeeze()] + 0.
### check step size
with torch.no_grad():
y1 = loss_indiv.detach().clone()
loss_steps[i] = y1.cpu() + 0
ind = (y1 > loss_best).nonzero().squeeze()
x_best[ind] = x_adv[ind].clone()
grad_best[ind] = grad[ind].clone()
loss_best[ind] = y1[ind] + 0
loss_best_steps[i + 1] = loss_best + 0
counter3 += 1
if counter3 == k:
fl_oscillation = check_oscillation(loss_steps.detach().cpu().numpy(), i, k, loss_best.detach().cpu().numpy(), k3=.75)
fl_reduce_no_impr = (~reduced_last_check) * (loss_best_last_check.cpu().numpy() >= loss_best.cpu().numpy())
fl_oscillation = ~(~fl_oscillation * ~fl_reduce_no_impr)
reduced_last_check = np.copy(fl_oscillation)
loss_best_last_check = loss_best.clone()
if np.sum(fl_oscillation) > 0:
step_size[u[fl_oscillation]] /= 2.0
n_reduced = fl_oscillation.astype(float).sum()
fl_oscillation = np.where(fl_oscillation)
x_adv[fl_oscillation] = x_best[fl_oscillation].clone()
grad[fl_oscillation] = grad_best[fl_oscillation].clone()
counter3 = 0
k = np.maximum(k - size_decr, n_iter_min)
adv[ind_non_suc] = x_best_adv[ind_non_suc]
now_p = x_best_adv-x
if previous_p is not None:
previous_p_c[ind_non_suc] = previous_p + now_p
return adv, previous_p_c
for item in record_list:
item[ind_suc.cpu().numpy()]=0
return adv, now_p, record_list
def DDNL2Attack(x, y, model, magnitude, previous_p, max_eps, max_iters=20, target=None, _type='l2', gpu_idx=None):
device = 'cuda:{}'.format(gpu_idx)
x = x.to(device)
y = y.to(device)
if target is not None:
target = target.to(device)
adv = x.clone()
pred = predict_from_logits(model(x))
if torch.sum((pred==y)).item() == 0:
return adv, previous_p
ind_non_suc = (pred==y).nonzero().squeeze()
x = x[ind_non_suc]
y = y[ind_non_suc]
x = x if len(x.shape) == 4 else x.unsqueeze(0)
y = y if len(y.shape) == 1 else y.unsqueeze(0)
if previous_p is not None:
previous_p = previous_p.to(device)
previous_p_c = previous_p.clone()
previous_p = previous_p[ind_non_suc]
previous_p = previous_p if len(previous_p.shape) == 4 else previous_p.unsqueeze(0)
batch_size = x.shape[0]
data_dims = (1,) * (x.dim() - 1)
norm = torch.full((batch_size,), 1, dtype=torch.float).to(device)
worst_norm = torch.max(x - 0, 1 - x).flatten(1).norm(p=2, dim=1)
delta = torch.zeros_like(x, requires_grad=True)
optimizer = torch.optim.SGD([delta], lr=1)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=max_iters, eta_min=0.01)
best_l2 = worst_norm.clone()
best_delta = torch.zeros_like(x)
for i in range(max_iters):
l2 = delta.data.flatten(1).norm(p=2, dim=1)
logits = model(x + delta)
pred_labels = logits.argmax(1)
if target is not None:
loss = F.cross_entropy(logits, target)
else:
loss = -F.cross_entropy(logits, y)
is_adv = (pred_labels == target) if target is not None else (
pred_labels != y)
is_smaller = l2 < best_l2
is_both = is_adv * is_smaller
best_l2[is_both] = l2[is_both]
best_delta[is_both] = delta.data[is_both]
optimizer.zero_grad()
loss.backward()
# renorming gradient
grad_norms = delta.grad.flatten(1).norm(p=2, dim=1)
delta.grad.div_(grad_norms.view(-1, *data_dims))
# avoid nan or inf if gradient is 0
if (grad_norms == 0).any():
delta.grad[grad_norms == 0] = torch.randn_like(
delta.grad[grad_norms == 0])
optimizer.step()
scheduler.step()
norm.mul_(1 - (2 * is_adv.float() - 1) * 0.05)
delta.data.mul_((norm / delta.data.flatten(1).norm(
p=2, dim=1)).view(-1, *data_dims))
delta.data.add_(x)
delta.data.mul_(255).round_().div_(255)
delta.data.clamp_(0, 1).sub_(x)
# print(best_l2)
adv_imgs = x + best_delta
dist = (adv_imgs - x)
dist = dist.view(x.shape[0], -1)
dist_norm = torch.norm(dist, dim=1, keepdim=True)
mask = (dist_norm > max_eps).unsqueeze(2).unsqueeze(3)
dist = dist / dist_norm
dist *= max_eps
dist = dist.view(x.shape)
adv_imgs = (x + dist) * mask.float() + adv_imgs * (1 - mask.float())
if previous_p is not None:
original_image = x - previous_p
global_dist = adv_imgs - original_image
global_dist = global_dist.view(x.shape[0], -1)
dist_norm = torch.norm(global_dist, dim=1, keepdim=True)
# print(dist_norm)
mask = (dist_norm > max_eps).unsqueeze(2).unsqueeze(3)
global_dist = global_dist / dist_norm
global_dist *= max_eps
global_dist = global_dist.view(x.shape)
adv_imgs = (original_image + global_dist) * mask.float() + adv_imgs * (1 - mask.float())
now_p = adv_imgs-x
adv[ind_non_suc] = adv_imgs
if previous_p is not None:
previous_p_c[ind_non_suc] = previous_p + now_p
return adv, previous_p_c
return adv, now_p
def RecordDDNL2Attack(x, y, model, magnitude, previous_p, max_eps, max_iters=20, target=None, _type='l2', gpu_idx=None):
device = 'cuda:{}'.format(gpu_idx)
x = x.to(device)
y = y.to(device)
if target is not None:
target = target.to(device)
adv = x.clone()
pred = predict_from_logits(model(x))
if torch.sum((pred==y)).item() == 0:
return adv, previous_p
ind_non_suc = (pred==y).nonzero().squeeze()
ind_suc = (pred!=y).nonzero().squeeze()
record_list = []
x = x if len(x.shape) == 4 else x.unsqueeze(0)
y = y if len(y.shape) == 1 else y.unsqueeze(0)
if previous_p is not None:
previous_p = previous_p.to(device)
previous_p_c = previous_p.clone()
previous_p = previous_p[ind_non_suc]
previous_p = previous_p if len(previous_p.shape) == 4 else previous_p.unsqueeze(0)
batch_size = x.shape[0]
data_dims = (1,) * (x.dim() - 1)
norm = torch.full((batch_size,), 1, dtype=torch.float).to(device)
worst_norm = torch.max(x - 0, 1 - x).flatten(1).norm(p=2, dim=1)
delta = torch.zeros_like(x, requires_grad=True)
optimizer = torch.optim.SGD([delta], lr=1)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=max_iters, eta_min=0.01)
best_l2 = worst_norm.clone()
best_delta = torch.zeros_like(x)
for i in range(max_iters):
l2 = delta.data.flatten(1).norm(p=2, dim=1)
logits = model(x + delta)
pred_labels = logits.argmax(1)
if target is not None:
loss = F.cross_entropy(logits, target)
else:
loss = -F.cross_entropy(logits, y)
is_adv = (pred_labels == target) if target is not None else (
pred_labels != y)
is_smaller = l2 < best_l2
is_both = is_adv * is_smaller
best_l2[is_both] = l2[is_both]
best_delta[is_both] = delta.data[is_both]
optimizer.zero_grad()
loss.backward()
# renorming gradient
grad_norms = delta.grad.flatten(1).norm(p=2, dim=1)
delta.grad.div_(grad_norms.view(-1, *data_dims))
# avoid nan or inf if gradient is 0
if (grad_norms == 0).any():
delta.grad[grad_norms == 0] = torch.randn_like(
delta.grad[grad_norms == 0])
optimizer.step()
scheduler.step()
norm.mul_(1 - (2 * is_adv.float() - 1) * 0.05)
delta.data.mul_((norm / delta.data.flatten(1).norm(
p=2, dim=1)).view(-1, *data_dims))
delta.data.add_(x)
delta.data.mul_(255).round_().div_(255)
delta.data.clamp_(0, 1).sub_(x)
# print(best_l2)
adv_imgs = x + best_delta
dist = (adv_imgs - x)
dist = dist.view(x.shape[0], -1)
dist_norm = torch.norm(dist, dim=1, keepdim=True)
mask = (dist_norm > max_eps).unsqueeze(2).unsqueeze(3)
dist = dist / dist_norm
dist *= max_eps
dist = dist.view(x.shape)
adv_imgs = (x + dist) * mask.float() + adv_imgs * (1 - mask.float()) #?
logits = model(adv_imgs)
pred_after_attack = predict_from_logits(logits)
record = np.ones(len(pred_after_attack))
record = record * (pred_after_attack==y).cpu().numpy()
record_list.append(record)
if previous_p is not None: #None
original_image = x - previous_p
global_dist = adv_imgs - original_image
global_dist = global_dist.view(x.shape[0], -1)
dist_norm = torch.norm(global_dist, dim=1, keepdim=True)
# print(dist_norm)
mask = (dist_norm > max_eps).unsqueeze(2).unsqueeze(3)
global_dist = global_dist / dist_norm
global_dist *= max_eps
global_dist = global_dist.view(x.shape)
adv_imgs = (original_image + global_dist) * mask.float() + adv_imgs * (1 - mask.float())
now_p = adv_imgs-x
adv[ind_non_suc] = adv_imgs[ind_non_suc]
if previous_p is not None:
previous_p_c[ind_non_suc] = previous_p + now_p
return adv, previous_p_c
for item in record_list:
item[ind_suc.cpu().numpy()]=0
return adv, now_p ,record_list
def attacker_list():
l = [
MultiTargetedAttack,
RecordMultiTargetedAttack,
CW_Attack_adaptive_stepsize,
Record_CW_Attack_adaptive_stepsize,
ApgdCeAttack,
RecordApgdCeAttack,
ApgdDlrAttack,
RecordApgdDlrAttack,
FabAttack,
RecordFabAttack,
PGD_Attack_adaptive_stepsize,
Record_PGD_Attack_adaptive_stepsize,
DDNL2Attack,
RecordDDNL2Attack,
]
return l
attacker_dict = {fn.__name__: fn for fn in attacker_list()}
def get_attacker(name):
return attacker_dict[name]
def apply_attacker(img, name, y, model, magnitude, p, steps, max_eps, target=None, _type=None, gpu_idx=None):
augment_fn = get_attacker(name)
return augment_fn(x=img, y=y, model=model, magnitude=magnitude, previous_p=p, max_iters=steps,max_eps=max_eps, target=target, _type=_type, gpu_idx=gpu_idx) | 96,677 | 40.017395 | 212 | py |
AutoAE | AutoAE-master/get_robust_accuracy_by_AutoAE.py |
import sys
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import torch
import torch.nn as nn
from torch.nn import functional as F
from PIL import Image
import numpy as np
import torchvision
import imageio
from torchvision import transforms
import argparse
from attack_ops import apply_attacker
from tqdm import tqdm
from tv_utils import ImageNet,Permute
import copy
import pickle
import random
gpu_idx = 0
class NormalizeByChannelMeanStd(nn.Module):
def __init__(self, mean, std):
super(NormalizeByChannelMeanStd, self).__init__()
if not isinstance(mean, torch.Tensor):
mean = torch.tensor(mean)
if not isinstance(std, torch.Tensor):
std = torch.tensor(std)
self.register_buffer("mean", mean)
self.register_buffer("std", std)
def forward(self, tensor):
return normalize_fn(tensor, self.mean, self.std)
def extra_repr(self):
return 'mean={}, std={}'.format(self.mean, self.std)
def normalize_fn(tensor, mean, std):
"""Differentiable version of torchvision.functional.normalize"""
# here we assume the color channel is in at dim=1
mean = mean[None, :, None, None]
std = std[None, :, None, None]
return tensor.sub(mean).div(std)
def predict_from_logits(logits, dim=1):
return logits.max(dim=dim, keepdim=False)[1]
def get_attacker_accuracy(model,new_attack):
model.eval()
model = model.to(device)
criterion = nn.CrossEntropyLoss()
criterion = criterion.to(device)
acc_curve = []
acc_total = np.ones(len(test_loader.dataset))
for _ in range(args.num_restarts):
total_num = 0
clean_acc_num = 0
adv_acc_num = 0
attack_successful_num = 0
batch_idx = 0
for loaded_data in tqdm(test_loader):
test_images, test_labels = loaded_data[0], loaded_data[1]
bstart = batch_idx * args.batch_size
if test_labels.size(0) < args.batch_size:
bend = batch_idx * args.batch_size + test_labels.size(0)
else:
bend = (batch_idx+1) * args.batch_size
test_images, test_labels = test_images.to(device), test_labels.to(device)
total_num += test_labels.size(0)
clean_logits = model(test_images)
pred = predict_from_logits(clean_logits)
pred_right = (pred==test_labels).nonzero().squeeze()
acc_total[bstart:bend] = acc_total[bstart:bend] * (pred==test_labels).cpu().numpy()
test_images = test_images[pred_right]
test_labels = test_labels[pred_right]
if len(test_images.shape) == 3:
test_images = test_images.unsqueeze(0)
test_labels = test_labels.unsqueeze(0)
if len(test_labels.size()) == 0:
clean_acc_num += 1
else:
clean_acc_num += test_labels.size(0)
previous_p = None
attack_name = new_attack['attacker']
attack_eps = new_attack['magnitude']
attack_steps = new_attack['step']
adv_images, p = apply_attacker(test_images, attack_name, test_labels, model, attack_eps, previous_p, int(attack_steps), args.max_epsilon, _type=args.norm, gpu_idx=gpu_idx,)
pred = predict_from_logits(model(adv_images.detach()))
acc_total[bstart:bend][pred_right.cpu().numpy()] = acc_total[bstart:bend][pred_right.cpu().numpy()] * (pred==test_labels).cpu().numpy()
batch_idx += 1
print('accuracy_total: {}/{}'.format(int(acc_total.sum()), len(test_loader.dataset)))
print('natural_acc_oneshot: ', clean_acc_num/total_num)
print('robust_acc_oneshot: ', (total_num-len(test_loader.dataset)+acc_total.sum()) /total_num)
acc_curve.append(acc_total.sum())
print('accuracy_curve: ', acc_curve)
return acc_total
def get_policy_accuracy(model,policy):
result_acc_total = np.ones(len(test_loader.dataset))
for new_attack in policy:
tmp_acc_total = get_attacker_accuracy(model,new_attack)
result_acc_total = list(map(int,result_acc_total))
tmp_acc_total = list(map(int,tmp_acc_total))
result_acc_total = np.bitwise_and(result_acc_total,tmp_acc_total)
return result_acc_total
parser = argparse.ArgumentParser(description='Random search of Auto-attack')
parser.add_argument('--seed', type=int, default=2020, help='random seed')
parser.add_argument('--batch_size', type=int, default=256, metavar='N', help='batch size for data loader')
parser.add_argument('--dataset', default='cifar10', help='cifar10 | cifar100 | svhn | ile')
parser.add_argument('--num_classes', type=int, default=10, help='the # of classes')
parser.add_argument('--net_type', default='madry_adv_resnet50', help='resnet18 | resnet50 | inception_v3 | densenet121 | vgg16_bn')
parser.add_argument('--num_restarts', type=int, default=1, help='the # of classes')
parser.add_argument('--max_epsilon', type=float, default=8/255, help='the attack sequence length')
parser.add_argument('--ensemble', action='store_true', help='the attack sequence length')
parser.add_argument('--transfer_test', action='store_true', help='the attack sequence length')
parser.add_argument('--sub_net_type', default='madry_adv_resnet50', help='resnet18 | resnet50 | inception_v3 | densenet121 | vgg16_bn')
parser.add_argument('--target', action='store_true', default=False)
parser.add_argument('--norm', default='linf', help='linf | l2 | unrestricted')
args = parser.parse_args()
print(args)
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device('cpu')
if args.dataset == 'cifar10':
args.num_classes = 10
cifar10_val = torchvision.datasets.CIFAR10(root='./data/cifar10', train=False, transform = transforms.ToTensor(),download = True)
test_loader = torch.utils.data.DataLoader(cifar10_val, batch_size=args.batch_size,
shuffle=False, pin_memory=True, num_workers=8)
if args.net_type == 'madry_adv_resnet50':
#demo
# from cifar_models.resnet import resnet50
# model = resnet50()
# model.load_state_dict({k[13:]:v for k,v in torch.load('./checkpoints/cifar_linf_8.pt')['state_dict'].items() if 'attacker' not in k and 'new' not in k})
# normalize = NormalizeByChannelMeanStd(
# mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010])
# model = nn.Sequential(normalize, model)
###########################################################################
# robust model-Linf-1
# from robustbench.utils import load_model
# print("robust Linf model-1")
# model = load_model(model_name="Rebuffi2021Fixing_70_16_cutmix_extra",dataset='cifar10', threat_model='Linf')
###########################################################################
# robust model-Linf-3
# from robustbench.utils import load_model
# print("robust Linf model-3")
# model = load_model(model_name="Rebuffi2021Fixing_106_16_cutmix_ddpm",dataset='cifar10', threat_model='Linf')
###########################################################################
# robust model-Linf-4
# from robustbench.utils import load_model
# print("robust Linf model-4")
# model = load_model(model_name="Rebuffi2021Fixing_70_16_cutmix_ddpm",dataset='cifar10', threat_model='Linf')
###########################################################################
# robust model-5
# from robustbench.utils import load_model
# print("robust Linf model-5")
# model = load_model(model_name="Rade2021Helper_extra",dataset='cifar10', threat_model='Linf')
###########################################################################
# robust model-6
# from robustbench.utils import load_model
# print("robust Linf model-6")
# model = load_model(model_name="Gowal2020Uncovering_28_10_extra",dataset='cifar10', threat_model='Linf')
###########################################################################
# robust model-Linf-7
# from robustbench.utils import load_model
# print("robust Linf model-7")
# model = load_model(model_name="Rade2021Helper_ddpm",dataset='cifar10', threat_model='Linf')
###########################################################################
# robust model-Linf-8
from robustbench.utils import load_model
print("robust Linf model-8")
model = load_model(model_name="Rebuffi2021Fixing_28_10_cutmix_ddpm",dataset='cifar10', threat_model='Linf')
###########################################################################
# robust model-Linf-10
# from robustbench.utils import load_model
# print("robust Linf model-10")
# model = load_model(model_name="Sridhar2021Robust_34_15",dataset='cifar10', threat_model='Linf')
###########################################################################
# robust model-Linf-12
# from robustbench.utils import load_model
# print("robust Linf model-12")
# model = load_model(model_name="Sridhar2021Robust",dataset='cifar10', threat_model='Linf')
###########################################################################
# robust model-13
# from robustbench.utils import load_model
# print("robust model-13")
# model = load_model(model_name="Zhang2020Geometry",dataset='cifar10', threat_model='Linf')
# ##########################################################################
# # robust model-14
# from robustbench.utils import load_model
# print("robust model-14")
# model = load_model(model_name="Carmon2019Unlabeled",dataset='cifar10', threat_model='Linf')
# ##########################################################################
# # robust model-15
# from robustbench.utils import load_model
# print("robust model-15")
# model = load_model(model_name="Sehwag2021Proxy",dataset='cifar10', threat_model='Linf')
# ##########################################################################
# # robust linf model-16
# from robustbench.utils import load_model
# print("robust Linf model-16")
# model = load_model(model_name="Rade2021Helper_R18_extra",dataset='cifar10', threat_model='Linf')
elif args.net_type == 'madry_adv_resnet50_l2':
# from cifar_models.resnet import resnet50
# model = resnet50()
# model.load_state_dict({k[13:]:v for k,v in torch.load('./checkpoints/cifar_l2_0_5.pt')['state_dict'].items() if 'attacker' not in k and 'new' not in k})
# normalize = NormalizeByChannelMeanStd(
# mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010])
# model = nn.Sequential(normalize, model)
##########################################################################
# # robust l2 model-8
# from robustbench.utils import load_model
# print("robust l2 model-8")
# model = load_model(model_name="Sehwag2021Proxy",dataset='cifar10', threat_model='L2')
##########################################################################
# # robust l2 model-10
# from robustbench.utils import load_model
# print("robust l2 model-10")
# model = load_model(model_name="Gowal2020Uncovering",dataset='cifar10', threat_model='L2')
##########################################################################
# # robust l2 model-12
# from robustbench.utils import load_model
# print("robust l2 model-12")
# model = load_model(model_name="Sehwag2021Proxy_R18",dataset='cifar10', threat_model='L2')
pass
else:
raise Exception('The net_type of {} is not supported by now!'.format(args.net_type))
#linf policy
policy = [{'attacker': 'ApgdDlrAttack', 'magnitude': 8/255, 'step': 32},
{'attacker': 'ApgdCeAttack', 'magnitude': 8/255, 'step': 32},
{'attacker': 'MultiTargetedAttack', 'magnitude': 8/255, 'step': 63},
{'attacker': 'FabAttack', 'magnitude': 8/255, 'step': 63},
{'attacker': 'MultiTargetedAttack', 'magnitude': 8/255, 'step': 126},
{'attacker': 'ApgdDlrAttack', 'magnitude': 8/255, 'step': 160},
{'attacker': 'MultiTargetedAttack', 'magnitude': 8/255, 'step': 378}]
#l2 policy
# policy = [{'attacker': 'DDNL2Attack', 'magnitude': None, 'step': 124},
# {'attacker': 'ApgdCeAttack', 'magnitude': 0.5, 'step': 31},
# {'attacker': 'DDNL2Attack', 'magnitude': None, 'step': 624},
# {'attacker': 'ApgdDlrAttack', 'magnitude': 0.5, 'step': 31},
# {'attacker': 'MultiTargetedAttack', 'magnitude': 0.5, 'step': 62}]
result_accuray = get_policy_accuracy(model,policy)
print(result_accuray.sum()) | 12,604 | 41.728814 | 175 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.