repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
Traffic-Benchmark | Traffic-Benchmark-master/methods/MTGNN/train_multi_step.py | import torch
import numpy as np
import argparse
import time
from util import *
from trainer import Trainer
from net import gtnet
import setproctitle
setproctitle.setproctitle("MTGNN@lifuxian")
def str_to_bool(value):
if isinstance(value, bool):
return value
if value.lower() in {'false', 'f', '0', 'no', 'n'}:
return False
elif value.lower() in {'true', 't', '1', 'yes', 'y'}:
return True
raise ValueError(f'{value} is not a valid boolean value')
parser = argparse.ArgumentParser()
parser.add_argument('--device',type=str,default='cuda:1',help='')
parser.add_argument('--data',type=str,default='data/METR-LA',help='data path')
parser.add_argument('--adj_data', type=str,default='data/sensor_graph/adj_mx.pkl',help='adj data path')
parser.add_argument('--gcn_true', type=str_to_bool, default=True, help='whether to add graph convolution layer')
parser.add_argument('--buildA_true', type=str_to_bool, default=True,help='whether to construct adaptive adjacency matrix')
parser.add_argument('--load_static_feature', type=str_to_bool, default=False,help='whether to load static feature')
parser.add_argument('--cl', type=str_to_bool, default=True,help='whether to do curriculum learning')
parser.add_argument('--gcn_depth',type=int,default=2,help='graph convolution depth')
parser.add_argument('--num_nodes',type=int,default=207,help='number of nodes/variables')
parser.add_argument('--dropout',type=float,default=0.3,help='dropout rate')
parser.add_argument('--subgraph_size',type=int,default=20,help='k')
parser.add_argument('--node_dim',type=int,default=40,help='dim of nodes')
parser.add_argument('--dilation_exponential',type=int,default=1,help='dilation exponential')
parser.add_argument('--conv_channels',type=int,default=32,help='convolution channels')
parser.add_argument('--residual_channels',type=int,default=32,help='residual channels')
parser.add_argument('--skip_channels',type=int,default=64,help='skip channels')
parser.add_argument('--end_channels',type=int,default=128,help='end channels')
parser.add_argument('--in_dim',type=int,default=2,help='inputs dimension')
parser.add_argument('--seq_in_len',type=int,default=12,help='input sequence length')
parser.add_argument('--seq_out_len',type=int,default=12,help='output sequence length')
parser.add_argument('--layers',type=int,default=3,help='number of layers')
parser.add_argument('--batch_size',type=int,default=64,help='batch size')
parser.add_argument('--learning_rate',type=float,default=0.001,help='learning rate')
parser.add_argument('--weight_decay',type=float,default=0.0001,help='weight decay rate')
parser.add_argument('--clip',type=int,default=5,help='clip')
parser.add_argument('--step_size1',type=int,default=2500,help='step_size')
parser.add_argument('--step_size2',type=int,default=100,help='step_size')
parser.add_argument('--epochs',type=int,default=100,help='')
parser.add_argument('--print_every',type=int,default=50,help='')
parser.add_argument('--seed',type=int,default=101,help='random seed')
parser.add_argument('--save',type=str,default='./save/',help='save path')
parser.add_argument('--expid',type=int,default=1,help='experiment id')
parser.add_argument('--propalpha',type=float,default=0.05,help='prop alpha')
parser.add_argument('--tanhalpha',type=float,default=3,help='adj alpha')
parser.add_argument('--num_split',type=int,default=1,help='number of splits for graphs')
parser.add_argument('--runs',type=int,default=10,help='number of runs')
args = parser.parse_args()
torch.set_num_threads(3)
import os
os.makedirs(args.save, exist_ok=True)
def main(runid):
# torch.manual_seed(args.seed)
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
# np.random.seed(args.seed)
#load data
device = torch.device(args.device)
dataloader = load_dataset(args.data, args.batch_size, args.batch_size, args.batch_size)
scaler = dataloader['scaler']
predefined_A = load_adj(args.adj_data)
predefined_A = torch.tensor(predefined_A)-torch.eye(args.num_nodes)
predefined_A = predefined_A.to(device)
# if args.load_static_feature:
# static_feat = load_node_feature('data/sensor_graph/location.csv')
# else:
# static_feat = None
model = gtnet(args.gcn_true, args.buildA_true, args.gcn_depth, args.num_nodes,
device, predefined_A=predefined_A,
dropout=args.dropout, subgraph_size=args.subgraph_size,
node_dim=args.node_dim,
dilation_exponential=args.dilation_exponential,
conv_channels=args.conv_channels, residual_channels=args.residual_channels,
skip_channels=args.skip_channels, end_channels= args.end_channels,
seq_length=args.seq_in_len, in_dim=args.in_dim, out_dim=args.seq_out_len,
layers=args.layers, propalpha=args.propalpha, tanhalpha=args.tanhalpha, layer_norm_affline=True)
print(args)
print('The recpetive field size is', model.receptive_field)
nParams = sum([p.nelement() for p in model.parameters()])
print('Number of model parameters is', nParams)
engine = Trainer(model, args.learning_rate, args.weight_decay, args.clip, args.step_size1, args.seq_out_len, scaler, device, args.cl)
print("start training...",flush=True)
his_loss =[]
val_time = []
train_time = []
minl = 1e5
epoch_best = -1
tolerance = 100
count_lfx = 0
for i in range(1,args.epochs+1):
train_loss = []
train_mape = []
train_rmse = []
t1 = time.time()
dataloader['train_loader'].shuffle()
for iter, (x, y) in enumerate(dataloader['train_loader'].get_iterator()):
trainx = torch.Tensor(x).to(device)
trainx= trainx.transpose(1, 3)
trainy = torch.Tensor(y).to(device)
trainy = trainy.transpose(1, 3)
if iter%args.step_size2==0:
perm = np.random.permutation(range(args.num_nodes))
num_sub = int(args.num_nodes/args.num_split)
for j in range(args.num_split):
if j != args.num_split-1:
id = perm[j * num_sub:(j + 1) * num_sub]
else:
id = perm[j * num_sub:]
id = torch.tensor(id).to(device)
tx = trainx[:, :, id, :]
ty = trainy[:, :, id, :]
metrics = engine.train(tx, ty[:,0,:,:],id)
train_loss.append(metrics[0])
train_mape.append(metrics[1])
train_rmse.append(metrics[2])
if iter % args.print_every == 0 :
log = 'Iter: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}'
print(log.format(iter, train_loss[-1], train_mape[-1], train_rmse[-1]),flush=True)
t2 = time.time()
train_time.append(t2-t1)
#validation
valid_loss = []
valid_mape = []
valid_rmse = []
s1 = time.time()
for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()):
testx = torch.Tensor(x).to(device)
testx = testx.transpose(1, 3)
testy = torch.Tensor(y).to(device)
testy = testy.transpose(1, 3)
metrics = engine.eval(testx, testy[:,0,:,:])
valid_loss.append(metrics[0])
valid_mape.append(metrics[1])
valid_rmse.append(metrics[2])
s2 = time.time()
log = 'Epoch: {:03d}, Inference Time: {:.4f} secs'
print(log.format(i,(s2-s1)))
val_time.append(s2-s1)
mtrain_loss = np.mean(train_loss)
mtrain_mape = np.mean(train_mape)
mtrain_rmse = np.mean(train_rmse)
mvalid_loss = np.mean(valid_loss)
mvalid_mape = np.mean(valid_mape)
mvalid_rmse = np.mean(valid_rmse)
his_loss.append(mvalid_loss)
log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch'
print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse, mvalid_loss, mvalid_mape, mvalid_rmse, (t2 - t1)),flush=True)
if mvalid_loss<minl:
torch.save(engine.model.state_dict(), args.save + "exp" + str(args.expid) + "_" + str(runid) +".pth")
minl = mvalid_loss
epoch_best = i
count_lfx = 0
else:
count_lfx += 1
if count_lfx > tolerance:
break
print("Average Training Time: {:.4f} secs/epoch".format(np.mean(train_time)))
print("Average Inference Time: {:.4f} secs".format(np.mean(val_time)))
bestid = np.argmin(his_loss)
engine.model.load_state_dict(torch.load(args.save + "exp" + str(args.expid) + "_" + str(runid) +".pth"))
print("Training finished")
print("The valid loss on best model is {}, epoch:{}".format(str(round(his_loss[bestid],4)), epoch_best))
#valid data
outputs = []
realy = torch.Tensor(dataloader['y_val']).to(device)
realy = realy.transpose(1,3)[:,0,:,:]
for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()):
testx = torch.Tensor(x).to(device)
testx = testx.transpose(1,3)
with torch.no_grad():
preds = engine.model(testx)
preds = preds.transpose(1,3)
outputs.append(preds.squeeze())
yhat = torch.cat(outputs,dim=0)
yhat = yhat[:realy.size(0),...]
pred = scaler.inverse_transform(yhat)
vmae, vmape, vrmse = metric(pred,realy)
#test data
outputs = []
realy = torch.Tensor(dataloader['y_test']).to(device)
realy = realy.transpose(1, 3)[:, 0, :, :]
for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
testx = torch.Tensor(x).to(device)
testx = testx.transpose(1, 3)
with torch.no_grad():
preds = engine.model(testx)
preds = preds.transpose(1, 3)
outputs.append(preds.squeeze())
yhat = torch.cat(outputs, dim=0)
yhat = yhat[:realy.size(0), ...]
mae = []
mape = []
rmse = []
for i in range(args.seq_out_len):
pred = scaler.inverse_transform(yhat[:, :, i])
real = realy[:, :, i]
metrics = metric(pred, real)
log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
print(log.format(i + 1, metrics[0], metrics[1], metrics[2]))
mae.append(metrics[0])
mape.append(metrics[1])
rmse.append(metrics[2])
return vmae, vmape, vrmse, mae, mape, rmse
if __name__ == "__main__":
vmae = []
vmape = []
vrmse = []
mae = []
mape = []
rmse = []
for i in range(args.runs):
vm1, vm2, vm3, m1, m2, m3 = main(i)
vmae.append(vm1)
vmape.append(vm2)
vrmse.append(vm3)
mae.append(m1)
mape.append(m2)
rmse.append(m3)
mae = np.array(mae)
mape = np.array(mape)
rmse = np.array(rmse)
amae = np.mean(mae,0)
amape = np.mean(mape,0)
armse = np.mean(rmse,0)
smae = np.std(mae,0)
smape = np.std(mape,0)
srmse = np.std(rmse,0)
print('\n\nResults for 10 runs\n\n')
#valid data
print('valid\tMAE\tRMSE\tMAPE')
log = 'mean:\t{:.4f}\t{:.4f}\t{:.4f}'
print(log.format(np.mean(vmae),np.mean(vrmse),np.mean(vmape)))
log = 'std:\t{:.4f}\t{:.4f}\t{:.4f}'
print(log.format(np.std(vmae),np.std(vrmse),np.std(vmape)))
print('\n\n')
#test data
print('test|horizon\tMAE-mean\tRMSE-mean\tMAPE-mean\tMAE-std\tRMSE-std\tMAPE-std')
for i in [2,5,11]:
log = '{:d}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}'
print(log.format(i+1, amae[i], armse[i], amape[i], smae[i], srmse[i], smape[i]))
| 11,855 | 38.52 | 178 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/MTGNN/trainer.py | import torch.optim as optim
import math
from net import *
import util
class Trainer():
def __init__(self, model, lrate, wdecay, clip, step_size, seq_out_len, scaler, device, cl=True):
self.scaler = scaler
self.model = model
self.model.to(device)
self.optimizer = optim.Adam(self.model.parameters(), lr=lrate, weight_decay=wdecay)
self.loss = util.masked_mae
self.clip = clip
self.step = step_size
self.iter = 1
self.task_level = 1
self.seq_out_len = seq_out_len
self.cl = cl
def train(self, input, real_val, idx=None):
self.model.train()
self.optimizer.zero_grad()
output = self.model(input, idx=idx)
output = output.transpose(1,3)
real = torch.unsqueeze(real_val,dim=1)
predict = self.scaler.inverse_transform(output)
if self.iter%self.step==0 and self.task_level<=self.seq_out_len:
self.task_level +=1
if self.cl:
loss = self.loss(predict[:, :, :, :self.task_level], real[:, :, :, :self.task_level], 0.0)
else:
loss = self.loss(predict, real, 0.0)
loss.backward()
if self.clip is not None:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)
self.optimizer.step()
# mae = util.masked_mae(predict,real,0.0).item()
mape = util.masked_mape(predict,real,0.0).item()
rmse = util.masked_rmse(predict,real,0.0).item()
self.iter += 1
return loss.item(),mape,rmse
def eval(self, input, real_val):
self.model.eval()
output = self.model(input)
output = output.transpose(1,3)
real = torch.unsqueeze(real_val,dim=1)
predict = self.scaler.inverse_transform(output)
loss = self.loss(predict, real, 0.0)
mape = util.masked_mape(predict,real,0.0).item()
rmse = util.masked_rmse(predict,real,0.0).item()
return loss.item(),mape,rmse
class Optim(object):
def _makeOptimizer(self):
if self.method == 'sgd':
self.optimizer = optim.SGD(self.params, lr=self.lr, weight_decay=self.lr_decay)
elif self.method == 'adagrad':
self.optimizer = optim.Adagrad(self.params, lr=self.lr, weight_decay=self.lr_decay)
elif self.method == 'adadelta':
self.optimizer = optim.Adadelta(self.params, lr=self.lr, weight_decay=self.lr_decay)
elif self.method == 'adam':
self.optimizer = optim.Adam(self.params, lr=self.lr, weight_decay=self.lr_decay)
else:
raise RuntimeError("Invalid optim method: " + self.method)
def __init__(self, params, method, lr, clip, lr_decay=1, start_decay_at=None):
self.params = params # careful: params may be a generator
self.last_ppl = None
self.lr = lr
self.clip = clip
self.method = method
self.lr_decay = lr_decay
self.start_decay_at = start_decay_at
self.start_decay = False
self._makeOptimizer()
def step(self):
# Compute gradients norm.
grad_norm = 0
if self.clip is not None:
torch.nn.utils.clip_grad_norm_(self.params, self.clip)
# for param in self.params:
# grad_norm += math.pow(param.grad.data.norm(), 2)
#
# grad_norm = math.sqrt(grad_norm)
# if grad_norm > 0:
# shrinkage = self.max_grad_norm / grad_norm
# else:
# shrinkage = 1.
#
# for param in self.params:
# if shrinkage < 1:
# param.grad.data.mul_(shrinkage)
self.optimizer.step()
return grad_norm
# decay learning rate if val perf does not improve or we hit the start_decay_at limit
def updateLearningRate(self, ppl, epoch):
if self.start_decay_at is not None and epoch >= self.start_decay_at:
self.start_decay = True
if self.last_ppl is not None and ppl > self.last_ppl:
self.start_decay = True
if self.start_decay:
self.lr = self.lr * self.lr_decay
print("Decaying learning rate to %g" % self.lr)
#only decay for one epoch
self.start_decay = False
self.last_ppl = ppl
self._makeOptimizer()
| 4,312 | 34.644628 | 102 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/LSTM/dcrnn_train_pytorch.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import yaml
from lib.utils import load_graph_data
from model.pytorch.dcrnn_supervisor import DCRNNSupervisor
import setproctitle
setproctitle.setproctitle("stmetanet@lifuxian")
def main(args):
with open(args.config_filename) as f:
supervisor_config = yaml.load(f)
graph_pkl_filename = supervisor_config['data'].get('graph_pkl_filename')
sensor_ids, sensor_id_to_ind, adj_mx = load_graph_data(graph_pkl_filename)
data_type = args.config_filename.split('/')[-1].split('.')[0].split('_')[-1] #'bay' or 'la'
supervisor = DCRNNSupervisor(data_type = data_type, LOAD_INITIAL = args.LOAD_INITIAL, adj_mx=adj_mx, **supervisor_config)
if args.TEST_ONLY:
supervisor.evaluate_test()
else:
supervisor.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_filename', default=None, type=str,
help='Configuration filename for restoring the model.')
parser.add_argument('--use_cpu_only', default=False, type=bool, help='Set to true to only use cpu.')
parser.add_argument('--LOAD_INITIAL', default=False, type=bool, help='If LOAD_INITIAL.')
parser.add_argument('--TEST_ONLY', default=False, type=bool, help='If TEST_ONLY.')
args = parser.parse_args()
main(args)
| 1,459 | 38.459459 | 129 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/LSTM/run_demo_pytorch.py | import argparse
import numpy as np
import os
import sys
import yaml
from lib.utils import load_graph_data
from model.pytorch.dcrnn_supervisor import DCRNNSupervisor
def run_dcrnn(args):
with open(args.config_filename) as f:
supervisor_config = yaml.load(f)
graph_pkl_filename = supervisor_config['data'].get('graph_pkl_filename')
sensor_ids, sensor_id_to_ind, adj_mx = load_graph_data(graph_pkl_filename)
supervisor = DCRNNSupervisor(adj_mx=adj_mx, **supervisor_config)
mean_score, outputs = supervisor.evaluate('test')
np.savez_compressed(args.output_filename, **outputs)
print("MAE : {}".format(mean_score))
print('Predictions saved as {}.'.format(args.output_filename))
if __name__ == '__main__':
sys.path.append(os.getcwd())
parser = argparse.ArgumentParser()
parser.add_argument('--use_cpu_only', default=False, type=str, help='Whether to run tensorflow on cpu.')
parser.add_argument('--config_filename', default='data/model/pretrained/METR-LA/config.yaml', type=str,
help='Config file for pretrained model.')
parser.add_argument('--output_filename', default='data/dcrnn_predictions.npz')
args = parser.parse_args()
run_dcrnn(args)
| 1,264 | 36.205882 | 108 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/LSTM/model/pytorch/dcrnn_model.py | import numpy as np
import torch
import torch.nn as nn
from model.pytorch.dcrnn_cell import DCGRUCell
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
class Seq2SeqAttrs:
def __init__(self, adj_mx, **model_kwargs):
self.adj_mx = adj_mx
self.max_diffusion_step = int(model_kwargs.get('max_diffusion_step', 2))
self.cl_decay_steps = int(model_kwargs.get('cl_decay_steps', 1000))
self.filter_type = model_kwargs.get('filter_type', 'laplacian')
self.num_nodes = int(model_kwargs.get('num_nodes', 1))
self.num_rnn_layers = int(model_kwargs.get('num_rnn_layers', 1))
self.rnn_units = int(model_kwargs.get('rnn_units'))
self.hidden_state_size = self.num_nodes * self.rnn_units
class EncoderModel(nn.Module, Seq2SeqAttrs):
def __init__(self, adj_mx, **model_kwargs):
nn.Module.__init__(self)
Seq2SeqAttrs.__init__(self, adj_mx, **model_kwargs)
self.input_dim = int(model_kwargs.get('input_dim', 1))
self.seq_len = int(model_kwargs.get('seq_len')) # for the encoder
self.dcgru_layers = nn.ModuleList(
[DCGRUCell(self.rnn_units, adj_mx, self.max_diffusion_step, self.num_nodes,
filter_type=self.filter_type) for _ in range(self.num_rnn_layers)])
def forward(self, inputs, hidden_state=None):
"""
Encoder forward pass.
:param inputs: shape (batch_size, self.num_nodes * self.input_dim)
:param hidden_state: (num_layers, batch_size, self.hidden_state_size)
optional, zeros if not provided
:return: output: # shape (batch_size, self.hidden_state_size)
hidden_state # shape (num_layers, batch_size, self.hidden_state_size)
(lower indices mean lower layers)
"""
batch_size, _ = inputs.size()
if hidden_state is None:
hidden_state = torch.zeros((self.num_rnn_layers, batch_size, self.hidden_state_size),
device=device)
hidden_states = []
output = inputs
for layer_num, dcgru_layer in enumerate(self.dcgru_layers):
next_hidden_state = dcgru_layer(output, hidden_state[layer_num])
hidden_states.append(next_hidden_state)
output = next_hidden_state
return output, torch.stack(hidden_states) # runs in O(num_layers) so not too slow
class DecoderModel(nn.Module, Seq2SeqAttrs):
def __init__(self, adj_mx, **model_kwargs):
# super().__init__(is_training, adj_mx, **model_kwargs)
nn.Module.__init__(self)
Seq2SeqAttrs.__init__(self, adj_mx, **model_kwargs)
self.output_dim = int(model_kwargs.get('output_dim', 1))
self.horizon = int(model_kwargs.get('horizon', 1)) # for the decoder
self.projection_layer = nn.Linear(self.rnn_units, self.output_dim)
self.dcgru_layers = nn.ModuleList(
[DCGRUCell(self.rnn_units, adj_mx, self.max_diffusion_step, self.num_nodes,
filter_type=self.filter_type) for _ in range(self.num_rnn_layers)])
def forward(self, inputs, hidden_state=None):
"""
Decoder forward pass.
:param inputs: shape (batch_size, self.num_nodes * self.output_dim)
:param hidden_state: (num_layers, batch_size, self.hidden_state_size)
optional, zeros if not provided
:return: output: # shape (batch_size, self.num_nodes * self.output_dim)
hidden_state # shape (num_layers, batch_size, self.hidden_state_size)
(lower indices mean lower layers)
"""
hidden_states = []
output = inputs
for layer_num, dcgru_layer in enumerate(self.dcgru_layers):
next_hidden_state = dcgru_layer(output, hidden_state[layer_num])
hidden_states.append(next_hidden_state)
output = next_hidden_state
projected = self.projection_layer(output.view(-1, self.rnn_units))
output = projected.view(-1, self.num_nodes * self.output_dim)
return output, torch.stack(hidden_states)
class DCRNNModel(nn.Module, Seq2SeqAttrs):
def __init__(self, adj_mx, logger, **model_kwargs):
super().__init__()
Seq2SeqAttrs.__init__(self, adj_mx, **model_kwargs)
self.encoder_model = EncoderModel(adj_mx, **model_kwargs)
self.decoder_model = DecoderModel(adj_mx, **model_kwargs)
self.cl_decay_steps = int(model_kwargs.get('cl_decay_steps', 1000))
self.use_curriculum_learning = bool(model_kwargs.get('use_curriculum_learning', False))
self._logger = logger
def _compute_sampling_threshold(self, batches_seen):
return self.cl_decay_steps / (
self.cl_decay_steps + np.exp(batches_seen / self.cl_decay_steps))
def encoder(self, inputs):
"""
encoder forward pass on t time steps
:param inputs: shape (seq_len, batch_size, num_sensor * input_dim)
:return: encoder_hidden_state: (num_layers, batch_size, self.hidden_state_size)
"""
encoder_hidden_state = None
for t in range(self.encoder_model.seq_len):
_, encoder_hidden_state = self.encoder_model(inputs[t], encoder_hidden_state)
return encoder_hidden_state
def decoder(self, encoder_hidden_state, labels=None, batches_seen=None):
"""
Decoder forward pass
:param encoder_hidden_state: (num_layers, batch_size, self.hidden_state_size)
:param labels: (self.horizon, batch_size, self.num_nodes * self.output_dim) [optional, not exist for inference]
:param batches_seen: global step [optional, not exist for inference]
:return: output: (self.horizon, batch_size, self.num_nodes * self.output_dim)
"""
batch_size = encoder_hidden_state.size(1)
go_symbol = torch.zeros((batch_size, self.num_nodes * self.decoder_model.output_dim),
device=device)
decoder_hidden_state = encoder_hidden_state
decoder_input = go_symbol
outputs = []
for t in range(self.decoder_model.horizon):
decoder_output, decoder_hidden_state = self.decoder_model(decoder_input,
decoder_hidden_state)
decoder_input = decoder_output
outputs.append(decoder_output)
if self.training and self.use_curriculum_learning:
c = np.random.uniform(0, 1)
if c < self._compute_sampling_threshold(batches_seen):
decoder_input = labels[t]
outputs = torch.stack(outputs)
return outputs
def forward(self, inputs, labels=None, batches_seen=None):
"""
seq2seq forward pass
:param inputs: shape (seq_len, batch_size, num_sensor * input_dim)
:param labels: shape (horizon, batch_size, num_sensor * output)
:param batches_seen: batches seen till now
:return: output: (self.horizon, batch_size, self.num_nodes * self.output_dim)
"""
encoder_hidden_state = self.encoder(inputs)
self._logger.debug("Encoder complete, starting decoder")
outputs = self.decoder(encoder_hidden_state, labels, batches_seen=batches_seen)
self._logger.debug("Decoder complete")
if batches_seen == 0:
self._logger.info(
"Total trainable parameters {}".format(count_parameters(self))
)
return outputs
####################################################################################################################
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class TimeBlock(nn.Module):
"""
Neural network block that applies a temporal convolution to each node of
a graph in isolation.
"""
def __init__(self, in_channels, out_channels, kernel_size=3):
"""
:param in_channels: Number of input features at each node in each time
step.
:param out_channels: Desired number of output channels at each node in
each time step.
:param kernel_size: Size of the 1D temporal kernel.
"""
super(TimeBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, (1, kernel_size))
self.conv2 = nn.Conv2d(in_channels, out_channels, (1, kernel_size))
self.conv3 = nn.Conv2d(in_channels, out_channels, (1, kernel_size))
def forward(self, X):
"""
:param X: Input data of shape (batch_size, num_nodes, num_timesteps,
num_features=in_channels)
:return: Output data of shape (batch_size, num_nodes,
num_timesteps_out, num_features_out=out_channels)
"""
# Convert into NCHW format for pytorch to perform convolutions.
X = X.permute(0, 3, 1, 2)
temp = self.conv1(X) + torch.sigmoid(self.conv2(X))
out = F.relu(temp + self.conv3(X))
# Convert back from NCHW to NHWC
out = out.permute(0, 2, 3, 1)
return out
class STGCNBlock(nn.Module):
"""
Neural network block that applies a temporal convolution on each node in
isolation, followed by a graph convolution, followed by another temporal
convolution on each node.
"""
def __init__(self, in_channels, spatial_channels, out_channels,
num_nodes):
"""
:param in_channels: Number of input features at each node in each time
step.
:param spatial_channels: Number of output channels of the graph
convolutional, spatial sub-block.
:param out_channels: Desired number of output features at each node in
each time step.
:param num_nodes: Number of nodes in the graph.
"""
super(STGCNBlock, self).__init__()
self.temporal1 = TimeBlock(in_channels=in_channels,
out_channels=out_channels)
self.Theta1 = nn.Parameter(torch.FloatTensor(out_channels,
spatial_channels))
self.temporal2 = TimeBlock(in_channels=spatial_channels,
out_channels=out_channels)
self.batch_norm = nn.BatchNorm2d(num_nodes)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.Theta1.shape[1])
self.Theta1.data.uniform_(-stdv, stdv)
def forward(self, X, A_hat):
"""
:param X: Input data of shape (batch_size, num_nodes, num_timesteps,
num_features=in_channels).
:param A_hat: Normalized adjacency matrix.
:return: Output data of shape (batch_size, num_nodes,
num_timesteps_out, num_features=out_channels).
"""
t = self.temporal1(X)
lfs = torch.einsum("ij,jklm->kilm", [A_hat, t.permute(1, 0, 2, 3)])
# t2 = F.relu(torch.einsum("ijkl,lp->ijkp", [lfs, self.Theta1]))
t2 = F.relu(torch.matmul(lfs, self.Theta1))
t3 = self.temporal2(t2)
return self.batch_norm(t3)
# return t3
class STGCN(nn.Module):
"""
Spatio-temporal graph convolutional network as described in
https://arxiv.org/abs/1709.04875v3 by Yu et al.
Input should have shape (batch_size, num_nodes, num_input_time_steps,
num_features).
"""
def __init__(self, num_nodes, num_features, num_timesteps_input,
num_timesteps_output):
"""
:param num_nodes: Number of nodes in the graph.
:param num_features: Number of features at each node in each time step.
:param num_timesteps_input: Number of past time steps fed into the
network.
:param num_timesteps_output: Desired number of future time steps
output by the network.
"""
super(STGCN, self).__init__()
self.block1 = STGCNBlock(in_channels=num_features, out_channels=64,
spatial_channels=16, num_nodes=num_nodes)
self.block2 = STGCNBlock(in_channels=64, out_channels=64,
spatial_channels=16, num_nodes=num_nodes)
self.last_temporal = TimeBlock(in_channels=64, out_channels=64)
self.fully = nn.Linear((num_timesteps_input - 2 * 5) * 64,
num_timesteps_output)
self.num_nodes = num_nodes
self.input_dim = num_features
self.seq_len = num_timesteps_input
self.horizon = num_timesteps_output
def forward(self, A_hat, X):
"""
:param X: Input data of shape (batch_size, num_nodes, num_timesteps,
num_features=in_channels).
:param A_hat: Normalized adjacency matrix.
"""
X = X.view(self.seq_len, -1, self.num_nodes, self.input_dim).permute(1, 2, 0, 3).contiguous()
out1 = self.block1(X, A_hat)
out2 = self.block2(out1, A_hat)
out3 = self.last_temporal(out2)
out4 = self.fully(out3.reshape((out3.shape[0], out3.shape[1], -1)))
return out4.permute(2, 0, 1).contiguous()
# :return: x: shape (seq_len, batch_size, num_sensor * input_dim)
# y: shape (horizon, batch_size, num_sensor * output_dim)
####################################################################################################################
import math
import random
from typing import List, Tuple
import numpy as np
import dgl
import torch
from dgl import DGLGraph, init
from torch import nn, Tensor
class MultiLayerPerception(nn.Sequential):
def __init__(self, hiddens: List[int], hidden_act, out_act: bool):
super(MultiLayerPerception, self).__init__()
for i in range(1, len(hiddens)):
self.add_module(f'Layer{i}', nn.Linear(hiddens[i - 1], hiddens[i]))
if i < len(hiddens) - 1 or out_act:
self.add_module(f'Activation{i}', hidden_act())
class MetaDense(nn.Module):
def __init__(self, f_in: int, f_out: int, feat_size: int, meta_hiddens: List[int]):
super(MetaDense, self).__init__()
self.weights_mlp = MultiLayerPerception([feat_size] + meta_hiddens + [f_in * f_out], nn.Sigmoid, False)
self.bias_mlp = MultiLayerPerception([feat_size] + meta_hiddens + [f_out], nn.Sigmoid, False)
def forward(self, feature: Tensor, data: Tensor) -> Tensor:
"""
:param feature: tensor, [N, F]
:param data: tensor, [B, N, F_in]
:return: tensor, [B, N, F_out]
"""
b, n, f_in = data.shape
data = data.reshape(b, n, 1, f_in)
weights = self.weights_mlp(feature).reshape(1, n, f_in, -1) # [F_in, F_out]
bias = self.bias_mlp(feature) # [n, F_out]
return data.matmul(weights).squeeze(2) + bias
class RNNCell(nn.Module):
def __init__(self):
super(RNNCell, self).__init__()
def one_step(self, feature: Tensor, data: Tensor, begin_state: Tensor = None) -> Tuple[Tensor, Tensor]:
"""
:param feature: tensor, [N, F]
:param data: tensor, [B, N, F]
:param begin_state: None or tensor, [B, N, F]
:return: output, tensor, [B, N, F]
begin_state, [B, N, F]
"""
raise NotImplementedError("Not Implemented")
def forward(self, feature: Tensor, data: Tensor, begin_state: Tensor = None) -> Tuple[Tensor, Tensor]:
"""
:param feature: tensor, [N, F]
:param data: tensor, [B, T, N, F]
:param begin_state: [B, N, F]
:return:
"""
b, t, n, _ = data.shape
outputs, state = list(), begin_state
for i_t in range(t):
output, state = self.one_step(feature, data[:, i_t], state)
outputs.append(output)
return torch.stack(outputs, 1), state
class MetaGRUCell(RNNCell):
def __init__(self, f_in: int, hid_size: int, feat_size: int, meta_hiddens: List[int]):
super(MetaGRUCell, self).__init__()
self.hidden_size = hid_size
self.dense_zr = MetaDense(f_in + hid_size, 2 * hid_size, feat_size, meta_hiddens=meta_hiddens)
self.dense_i2h = MetaDense(f_in, hid_size, feat_size, meta_hiddens=meta_hiddens)
self.dense_h2h = MetaDense(hid_size, hid_size, feat_size, meta_hiddens=meta_hiddens)
def one_step(self, feature: Tensor, data: Tensor, begin_state: Tensor = None) -> Tuple[Tensor, Tensor]:
b, n, _ = data.shape
if begin_state is None:
begin_state = torch.zeros(b, n, self.hidden_size, dtype=data.dtype, device=data.device)
data_and_state = torch.cat([data, begin_state], -1)
zr = torch.sigmoid(self.dense_zr(feature, data_and_state))
z, r = zr.split(self.hidden_size, -1)
c = torch.tanh(self.dense_i2h(feature, data))
h = self.dense_h2h(feature, r * begin_state)
state = z * begin_state + torch.sub(1., z) * c + h
return state, state
class NormalGRUCell(RNNCell):
def __init__(self, f_in: int, hid_size: int):
super(NormalGRUCell, self).__init__()
self.cell = nn.GRUCell(f_in, hid_size)
def one_step(self, feature: Tensor, data: Tensor, begin_state: Tensor = None) -> Tuple[Tensor, Tensor]:
b, n, _ = data.shape
data = data.reshape(b * n, -1)
if begin_state is not None:
begin_state = begin_state.reshape(b * n, -1)
h = self.cell(data, begin_state)
h = h.reshape(b, n, -1)
return h, h
import sys
class GraphAttNet(nn.Module):
def __init__(self, dist: np.ndarray, edge: list, hid_size: int, feat_size: int, meta_hiddens: List[int]):
super(GraphAttNet, self).__init__()
self.hidden_size = hid_size
self.feature_size = feat_size
self.meta_hiddens = meta_hiddens
self.num_nodes = n = dist.shape[0]
src, dst, dis = list(), list(), list()
for i in range(n):
for j in edge[i]:
src.append(j)
dst.append(i)
dis.append(dist[j, i])
dist = torch.tensor(dis).unsqueeze_(1)
g = DGLGraph()
g.set_n_initializer(init.zero_initializer)
g.add_nodes(n)
g.add_edges(src, dst, {'dist': dist})
self.graph = g
def forward(self, state: Tensor, feature: Tensor) -> Tensor:
"""
:param state: tensor, [B, T, N, F] or [B, N, F]
:param feature: tensor, [N, F]
:return: tensor, [B, T, N, F]
"""
# print(state.shape)
# torch.Size([32, 12, 207, 32])
# shape => [N, B, T, F] or [N, B, F]
state = state.unsqueeze(0).transpose(0, -2).squeeze(-2)
g = self.graph.local_var()
g.to(state.device)
g.ndata['state'] = state
g.ndata['feature'] = feature
g.update_all(self.msg_edge, self.msg_reduce)
state = g.ndata.pop('new_state')
# print(state.shape)
# torch.Size([207, 32, 12, 32])
# sys.exit(0)
return state.unsqueeze(-2).transpose(0, -2).squeeze(0)
def msg_edge(self, edge: dgl.EdgeBatch):
"""
:param edge: a dictionary of edge data.
edge.src['state'] and edge.dst['state']: hidden states of the nodes, with shape [e, b, t, d] or [e, b, d]
edge.src['feature'] and edge.dst['state']: features of the nodes, with shape [e, d]
edge.data['dist']: distance matrix of the edges, with shape [e, d]
:return: a dictionray of messages
"""
raise NotImplementedError('Not implemented.')
def msg_reduce(self, node: dgl.NodeBatch):
"""
:param node:
node.mailbox['state'], tensor, [n, e, b, t, d] or [n, e, b, d]
node.mailbox['alpha'], tensor, [n, e, b, t, d] or [n, e, b, d]
:return: tensor, [n, b, t, d] or [n, b, d]
"""
raise NotImplementedError('Not implemented.')
class MetaGAT(GraphAttNet):
def __init__(self, *args, **kwargs):
super(MetaGAT, self).__init__(*args, **kwargs)
self.w_mlp = MultiLayerPerception(
[self.feature_size * 2 + 1] + self.meta_hiddens + [self.hidden_size * 2 * self.hidden_size],
nn.Sigmoid, False)
self.act = nn.LeakyReLU()
self.weight = nn.Parameter(torch.tensor(0.0), requires_grad=True)
def msg_edge(self, edge: dgl.EdgeBatch):
state = torch.cat([edge.src['state'], edge.dst['state']], -1) # [X, B, T, 2H] or [X, B, 2H]
feature = torch.cat([edge.src['feature'], edge.dst['feature'], edge.data['dist']], -1) # [X, 2F + 1]
weight = self.w_mlp(feature).reshape(-1, self.hidden_size * 2, self.hidden_size) # [X, 2H, H]
shape = state.shape
state = state.reshape(shape[0], -1, shape[-1])
# [X, ?, 2H] * [X. 2H, H] => [X, ?, H]
alpha = self.act(torch.bmm(state, weight))
alpha = alpha.reshape(*shape[:-1], self.hidden_size)
return {'alpha': alpha, 'state': edge.src['state']}
def msg_reduce(self, node: dgl.NodeBatch):
state = node.mailbox['state']
alpha = node.mailbox['alpha']
alpha = torch.softmax(alpha, 1)
new_state = torch.relu(torch.sum(alpha * state, dim=1)) * torch.sigmoid(self.weight)
return {'new_state': new_state}
class STMetaEncoder(nn.Module):
def __init__(self, input_dim: int, rnn_types: List[str], rnn_hiddens: List[int], feat_size: int,
meta_hiddens: List[int], graph: Tuple[np.ndarray, list, list]):
super(STMetaEncoder, self).__init__()
dist, e_in, e_out = graph
grus, gats = list(), list()
rnn_hiddens = [input_dim] + rnn_hiddens
for i, rnn_type in enumerate(rnn_types):
in_dim, out_dim = rnn_hiddens[i], rnn_hiddens[i + 1]
if rnn_type == 'NormalGRU':
grus.append(NormalGRUCell(in_dim, out_dim))
elif rnn_type == 'MetaGRU':
grus.append(MetaGRUCell(in_dim, out_dim, feat_size, meta_hiddens))
else:
raise ValueError(f'{rnn_type} is not implemented.')
if i == len(rnn_types) - 1:
break
g1 = MetaGAT(dist.T, e_in, out_dim, feat_size, meta_hiddens)
g2 = MetaGAT(dist, e_out, out_dim, feat_size, meta_hiddens)
gats.append(nn.ModuleList([g1, g2]))
self.grus = nn.ModuleList(grus)
self.gats = nn.ModuleList(gats)
def forward(self, feature: Tensor, data: Tensor) -> List[Tensor]:
"""
:param feature: tensor, [N, F]
:param data: tensor, [B, T, N, F]
:return: list of tensors
"""
states = list()
for depth, (g1, g2) in enumerate(self.gats):
data, state = self.grus[depth](feature, data)
states.append(state)
# data = g1(data, feature) + g2(data, feature)
else:
_, state = self.grus[-1](feature, data)
states.append(state)
return states
class STMetaDecoder(nn.Module):
def __init__(self, n_preds: int, output_dim: int, rnn_types: List[str], rnn_hiddens: List[int], feat_size: int,
meta_hiddens: List[int], graph: Tuple[np.ndarray, list, list], input_dim):
super(STMetaDecoder, self).__init__()
self.output_dim = output_dim
self.n_preds = n_preds
dist, e_in, e_out = graph
grus, gats = list(), list()
# rnn_hiddens = [output_dim] + rnn_hiddens
rnn_hiddens = [input_dim] + rnn_hiddens
self.input_dim = input_dim
for i, rnn_type in enumerate(rnn_types):
in_dim, out_dim = rnn_hiddens[i], rnn_hiddens[i + 1]
if rnn_type == 'NormalGRU':
grus.append(NormalGRUCell(in_dim, out_dim))
elif rnn_type == 'MetaGRU':
grus.append(MetaGRUCell(in_dim, out_dim, feat_size, meta_hiddens))
else:
raise ValueError(f'{rnn_type} is not implemented.')
if i == len(rnn_types) - 1:
break
g1 = MetaGAT(dist.T, e_in, out_dim, feat_size, meta_hiddens)
g2 = MetaGAT(dist, e_out, out_dim, feat_size, meta_hiddens)
gats.append(nn.ModuleList([g1, g2]))
self.grus = nn.ModuleList(grus)
self.gats = nn.ModuleList(gats)
self.out = nn.Linear(rnn_hiddens[1], output_dim)
# def sampling(self):
# """ Schedule sampling: sampling the ground truth. """
# threshold = self.cl_decay_steps / (self.cl_decay_steps + math.exp(self.global_steps / self.cl_decay_steps))
# return float(random.random() < threshold)
def forward(self, feature: Tensor, begin_states: List[Tensor], targets: Tensor = None,
teacher_force: bool = 0.5) -> Tensor:
"""
:param feature: tensor, [N, F]
:param begin_states: list of tensors, each of [B, N, hidden_size]
:param targets: none or tensor, [B, T, N, input_dim]
:param teacher_force: float, random to use targets as decoder inputs
:return:
"""
b, n, _ = begin_states[0].shape
label = targets
aux = label[:,:,:, self.output_dim:] # [b,t,n,d]
# label = label[:,:,:, :self.output_dim] # [n,b,t,d]
go = torch.zeros(b, n, self.input_dim, device=feature.device, dtype=feature.dtype)
# outputs = list()
outputs, states = [], begin_states
for i_pred in range(self.n_preds):
if i_pred == 0:
inputs = go
else:
inputs = torch.cat([inputs, aux[:, i_pred - 1, :, :]], -1)
for depth, (g1, g2) in enumerate(self.gats):
inputs, states[0] = self.grus[depth].one_step(feature, inputs, states[0])
# inputs = (g1(state, feature) + g2(state, feature)) / 2
else:
# print(len(self.grus), len(states))
inputs, states[1] = self.grus[-1].one_step(feature, inputs, states[1])
inputs = self.out(inputs)
outputs.append(inputs)
# if self.training and (targets is not None) and (random.random() < teacher_force):
# inputs = targets[:, i_pred]
return torch.stack(outputs, 1)
class STMetaNet(nn.Module):
def __init__(self,
graph: Tuple[np.ndarray, list, list],
n_preds: int,
input_dim: int,
output_dim: int,
cl_decay_steps: int,
rnn_types: List[str],
rnn_hiddens: List[int],
meta_hiddens: List[int],
geo_hiddens: List[int],
num_nodes: int):
super(STMetaNet, self).__init__()
feat_size = geo_hiddens[-1]
self.cl_decay_steps = cl_decay_steps
self.encoder = STMetaEncoder(input_dim, rnn_types, rnn_hiddens, feat_size, meta_hiddens, graph)
self.decoder = STMetaDecoder(n_preds, output_dim, rnn_types, rnn_hiddens, feat_size, meta_hiddens, graph, input_dim)
self.geo_encoder = MultiLayerPerception(geo_hiddens, hidden_act=nn.ReLU, out_act=True)
features = graph[0]
# self.num_nodes = features.shape[0]
self.num_nodes = num_nodes
self.input_dim = input_dim
self.output_dim = output_dim
self.seq_len = 12
self.horizon = n_preds
def forward(self, feature: Tensor, inputs: Tensor, targets: Tensor = None, batch_seen: int = 0) -> Tensor:
"""
dynamic convolutional recurrent neural network
:param feature: [N, d]
:param inputs: [B, n_hist, N, input_dim]
:param targets: exists for training, tensor, [B, n_pred, N, output_dim]
:param batch_seen: int, the number of batches the model has seen
:return: [B, n_pred, N, output_dim]
"""
inputs = inputs.view(self.seq_len, -1, self.num_nodes, self.input_dim).permute(1, 0, 2, 3).contiguous()
targets = targets.view(self.horizon, -1, self.num_nodes, self.input_dim).permute(1, 0, 2, 3).contiguous()
feature = self.geo_encoder(feature.float())
states = self.encoder(feature, inputs)
# targets = None
outputs = self.decoder(feature, states, targets, self._compute_sampling_threshold(batch_seen))
return outputs.permute(1, 0, 2, 3).contiguous().view(self.horizon, -1, self.num_nodes * self.output_dim)
def _compute_sampling_threshold(self, batches_seen: int):
return self.cl_decay_steps / (self.cl_decay_steps + math.exp(batches_seen / self.cl_decay_steps))
# :return: x: shape (seq_len, batch_size, num_sensor * input_dim)
# y: shape (horizon, batch_size, num_sensor * output_dim)
# def test():
# dist = np.random.randn(207, 207)
# edge1, edge2 = [[] for _ in range(207)], [[] for _ in range(207)]
# for i in range(207):
# for j in range(207):
# if np.random.random() < 0.2:
# edge1[i].append(j)
# edge2[j].append(i)
# me = STMetaEncoder(2, 32, 32, 32, [32, 4], (dist, edge1, edge2), 32)
# md = STMetaDecoder(12, 1, 32, 32, 32, [32, 4], (dist, edge1, edge2), 32)
# data = torch.randn(31, 12, 207, 2)
# feature = torch.randn(207, 32)
# states = me(feature, data)
# print(states[0].shape, states[1].shape)
# outputs = md(feature, states)
# m = STMetaNet((dist, edge1, edge2), 12, 2, 1, 2000, ['NormalGRU', 'MetaGRU'], [], [16, 2], [32, 32])
| 29,536 | 40.31049 | 124 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/LSTM/model/pytorch/dcrnn_cell.py | import numpy as np
import torch
from lib import utils
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class LayerParams:
def __init__(self, rnn_network: torch.nn.Module, layer_type: str):
self._rnn_network = rnn_network
self._params_dict = {}
self._biases_dict = {}
self._type = layer_type
def get_weights(self, shape):
if shape not in self._params_dict:
nn_param = torch.nn.Parameter(torch.empty(*shape, device=device))
torch.nn.init.xavier_normal_(nn_param)
self._params_dict[shape] = nn_param
self._rnn_network.register_parameter('{}_weight_{}'.format(self._type, str(shape)),
nn_param)
return self._params_dict[shape]
def get_biases(self, length, bias_start=0.0):
if length not in self._biases_dict:
biases = torch.nn.Parameter(torch.empty(length, device=device))
torch.nn.init.constant_(biases, bias_start)
self._biases_dict[length] = biases
self._rnn_network.register_parameter('{}_biases_{}'.format(self._type, str(length)),
biases)
return self._biases_dict[length]
class DCGRUCell(torch.nn.Module):
def __init__(self, num_units, adj_mx, max_diffusion_step, num_nodes, nonlinearity='tanh',
filter_type="laplacian", use_gc_for_ru=True):
"""
:param num_units:
:param adj_mx:
:param max_diffusion_step:
:param num_nodes:
:param nonlinearity:
:param filter_type: "laplacian", "random_walk", "dual_random_walk".
:param use_gc_for_ru: whether to use Graph convolution to calculate the reset and update gates.
"""
super().__init__()
self._activation = torch.tanh if nonlinearity == 'tanh' else torch.relu
# support other nonlinearities up here?
self._num_nodes = num_nodes
self._num_units = num_units
self._max_diffusion_step = max_diffusion_step
self._supports = []
self._use_gc_for_ru = use_gc_for_ru
supports = []
if filter_type == "laplacian":
supports.append(utils.calculate_scaled_laplacian(adj_mx, lambda_max=None))
elif filter_type == "random_walk":
supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
elif filter_type == "dual_random_walk":
supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
supports.append(utils.calculate_random_walk_matrix(adj_mx.T).T)
else:
supports.append(utils.calculate_scaled_laplacian(adj_mx))
for support in supports:
self._supports.append(self._build_sparse_matrix(support))
self._fc_params = LayerParams(self, 'fc')
self._gconv_params = LayerParams(self, 'gconv')
@staticmethod
def _build_sparse_matrix(L):
L = L.tocoo()
indices = np.column_stack((L.row, L.col))
# this is to ensure row-major ordering to equal torch.sparse.sparse_reorder(L)
indices = indices[np.lexsort((indices[:, 0], indices[:, 1]))]
L = torch.sparse_coo_tensor(indices.T, L.data, L.shape, device=device)
return L
def forward(self, inputs, hx):
"""Gated recurrent unit (GRU) with Graph Convolution.
:param inputs: (B, num_nodes * input_dim)
:param hx: (B, num_nodes * rnn_units)
:return
- Output: A `2-D` tensor with shape `(B, num_nodes * rnn_units)`.
"""
output_size = 2 * self._num_units
if self._use_gc_for_ru:
fn = self._gconv
else:
fn = self._fc
value = torch.sigmoid(fn(inputs, hx, output_size, bias_start=1.0))
value = torch.reshape(value, (-1, self._num_nodes, output_size))
r, u = torch.split(tensor=value, split_size_or_sections=self._num_units, dim=-1)
r = torch.reshape(r, (-1, self._num_nodes * self._num_units))
u = torch.reshape(u, (-1, self._num_nodes * self._num_units))
c = self._gconv(inputs, r * hx, self._num_units)
if self._activation is not None:
c = self._activation(c)
new_state = u * hx + (1.0 - u) * c
return new_state
@staticmethod
def _concat(x, x_):
x_ = x_.unsqueeze(0)
return torch.cat([x, x_], dim=0)
def _fc(self, inputs, state, output_size, bias_start=0.0):
batch_size = inputs.shape[0]
inputs = torch.reshape(inputs, (batch_size * self._num_nodes, -1))
state = torch.reshape(state, (batch_size * self._num_nodes, -1))
inputs_and_state = torch.cat([inputs, state], dim=-1)
input_size = inputs_and_state.shape[-1]
weights = self._fc_params.get_weights((input_size, output_size))
value = torch.sigmoid(torch.matmul(inputs_and_state, weights))
biases = self._fc_params.get_biases(output_size, bias_start)
value += biases
return value
def _gconv(self, inputs, state, output_size, bias_start=0.0):
# Reshape input and state to (batch_size, num_nodes, input_dim/state_dim)
batch_size = inputs.shape[0]
inputs = torch.reshape(inputs, (batch_size, self._num_nodes, -1))
state = torch.reshape(state, (batch_size, self._num_nodes, -1))
inputs_and_state = torch.cat([inputs, state], dim=2)
input_size = inputs_and_state.size(2)
x = inputs_and_state
x0 = x.permute(1, 2, 0) # (num_nodes, total_arg_size, batch_size)
x0 = torch.reshape(x0, shape=[self._num_nodes, input_size * batch_size])
x = torch.unsqueeze(x0, 0)
if self._max_diffusion_step == 0:
pass
else:
for support in self._supports:
x1 = torch.sparse.mm(support, x0)
x = self._concat(x, x1)
for k in range(2, self._max_diffusion_step + 1):
x2 = 2 * torch.sparse.mm(support, x1) - x0
x = self._concat(x, x2)
x1, x0 = x2, x1
num_matrices = len(self._supports) * self._max_diffusion_step + 1 # Adds for x itself.
x = torch.reshape(x, shape=[num_matrices, self._num_nodes, input_size, batch_size])
x = x.permute(3, 1, 2, 0) # (batch_size, num_nodes, input_size, order)
x = torch.reshape(x, shape=[batch_size * self._num_nodes, input_size * num_matrices])
weights = self._gconv_params.get_weights((input_size * num_matrices, output_size))
x = torch.matmul(x, weights) # (batch_size * self._num_nodes, output_size)
biases = self._gconv_params.get_biases(output_size, bias_start)
x += biases
# Reshape res back to 2D: (batch_size, num_node, state_dim) -> (batch_size, num_node * state_dim)
return torch.reshape(x, [batch_size, self._num_nodes * output_size])
| 6,939 | 41.576687 | 105 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/LSTM/model/pytorch/utils.py | import torch
import numpy as np
def masked_mae_loss(y_pred, y_true):
mask = (y_true != 0).float()
mask /= mask.mean()
loss = torch.abs(y_pred - y_true)
loss = loss * mask
# trick for nans: https://discuss.pytorch.org/t/how-to-set-nan-in-tensor-to-0/3918/3
loss[loss != loss] = 0
return loss.mean()
def masked_mse(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = (preds-labels)**2
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_rmse(preds, labels, null_val=np.nan):
return torch.sqrt(masked_mse(preds=preds, labels=labels, null_val=null_val))
def masked_mae(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds-labels)
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_mape(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds-labels)/labels
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def metric(pred, real):
mae = masked_mae(pred,real,0.0).item()
mape = masked_mape(pred,real,0.0).item()
rmse = masked_rmse(pred,real,0.0).item()
return mae,mape,rmse
def get_normalized_adj(A):
"""
Returns the degree normalized adjacency matrix.
"""
A = A + np.diag(np.ones(A.shape[0], dtype=np.float32))
D = np.array(np.sum(A, axis=1)).reshape((-1,))
D[D <= 10e-5] = 10e-5 # Prevent infs
diag = np.reciprocal(np.sqrt(D))
A_wave = np.multiply(np.multiply(diag.reshape((-1, 1)), A),
diag.reshape((1, -1)))
return A_wave | 2,390 | 30.051948 | 88 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/LSTM/model/pytorch/dcrnn_supervisor.py | import os
import time
import numpy as np
import torch
import torch.nn as nn
# from torch.utils.tensorboard import SummaryWriter
from lib import utils
# from model.pytorch.dcrnn_model import DCRNNModel
from model.pytorch.dcrnn_model import STMetaNet
from model.pytorch.utils import masked_mae_loss, metric, get_normalized_adj
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class DCRNNSupervisor:
def __init__(self, data_type, LOAD_INITIAL, adj_mx, **kwargs):
self._kwargs = kwargs
self._data_kwargs = kwargs.get('data')
self._model_kwargs = kwargs.get('model')
self._train_kwargs = kwargs.get('train')
self.max_grad_norm = self._train_kwargs.get('max_grad_norm', 1.)
# logging.
self._log_dir = self._get_log_dir(kwargs)
# self._writer = SummaryWriter('runs/' + self._log_dir)
log_level = self._kwargs.get('log_level', 'INFO')
self._logger = utils.get_logger(self._log_dir, __name__, 'info.log', level=log_level)
# data set
self._data = utils.load_dataset(**self._data_kwargs)
self.standard_scaler = self._data['scaler']
self.num_nodes = int(self._model_kwargs.get('num_nodes', 1))
self.input_dim = int(self._model_kwargs.get('input_dim', 1))
self.seq_len = int(self._model_kwargs.get('seq_len')) # for the encoder
self.output_dim = int(self._model_kwargs.get('output_dim', 1))
self.use_curriculum_learning = bool(
self._model_kwargs.get('use_curriculum_learning', False))
self.horizon = int(self._model_kwargs.get('horizon', 1)) # for the decoder
# features, (dist, e_in_out, e_in_out) = np.load('./data/feat_stmetanet.npy', allow_pickle=True)
features, (dist, e_in_out, e_in_out) = np.load('./data/feat_stmetanet_metrla.npy', allow_pickle=True)
# features, (dist, e_in_out, e_in_out) = np.load('/home/lifuxian/BikeNYC/feat_stmetanet.npy', allow_pickle=True)
self.features = torch.from_numpy(features).to(device)
# setup model
# dcrnn_model = DCRNNModel(adj_mx, self._logger, **self._model_kwargs)
dcrnn_model = STMetaNet(
graph = (dist, e_in_out, e_in_out),#Tuple[np.ndarray, list, list],
n_preds = self.horizon,
input_dim = self.input_dim,
output_dim = self.output_dim,
cl_decay_steps = 2000,
# rnn_types = ['NormalGRU', 'MetaGRU'],
rnn_types = ['NormalGRU', 'NormalGRU'],
rnn_hiddens = [32, 32],
meta_hiddens = [16, 2],
# geo_hiddens = [20, 32, 32]
geo_hiddens = [20, 32, 32], #list的首个元素表示features的维度(11维)
num_nodes = self.num_nodes
)
self.dcrnn_model = dcrnn_model.cuda() if torch.cuda.is_available() else dcrnn_model
self._logger.info("Model created")
self._epoch_num = self._train_kwargs.get('epoch', 0)
# if self._epoch_num > 0: #事实上self._epoch_num的预设值确实为0
# self.load_model()
self.data_type = data_type
self.LOAD_INITIAL = LOAD_INITIAL
if LOAD_INITIAL:
self.load_lfx()
# self.features = torch.from_numpy(get_normalized_adj(adj_mx)).to(device)
@staticmethod
def _get_log_dir(kwargs):
log_dir = kwargs['train'].get('log_dir')
if log_dir is None:
batch_size = kwargs['data'].get('batch_size')
learning_rate = kwargs['train'].get('base_lr')
max_diffusion_step = kwargs['model'].get('max_diffusion_step')
num_rnn_layers = kwargs['model'].get('num_rnn_layers')
rnn_units = kwargs['model'].get('rnn_units')
structure = '-'.join(
['%d' % rnn_units for _ in range(num_rnn_layers)])
horizon = kwargs['model'].get('horizon')
filter_type = kwargs['model'].get('filter_type')
filter_type_abbr = 'L'
if filter_type == 'random_walk':
filter_type_abbr = 'R'
elif filter_type == 'dual_random_walk':
filter_type_abbr = 'DR'
run_id = 'dcrnn_%s_%d_h_%d_%s_lr_%g_bs_%d_%s/' % (
filter_type_abbr, max_diffusion_step, horizon,
structure, learning_rate, batch_size,
time.strftime('%m%d%H%M%S'))
base_dir = kwargs.get('base_dir')
log_dir = os.path.join(base_dir, run_id)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
# def save_model(self, epoch):
# if not os.path.exists('models/'):
# os.makedirs('models/')
#
# config = dict(self._kwargs)
# config['model_state_dict'] = self.dcrnn_model.state_dict()
# config['epoch'] = epoch
# torch.save(config, 'models/epo%d.tar' % epoch)
# self._logger.info("Saved model at {}".format(epoch))
# return 'models/epo%d.tar' % epoch
def save_model(self, epoch):
path = 'models/%s_best.tar' % self.data_type
if not os.path.exists('models/'):
os.makedirs('models/')
config = dict(self._kwargs)
config['model_state_dict'] = self.dcrnn_model.state_dict()
config['epoch'] = epoch
torch.save(config, path)
self._logger.info("Saved model at {}".format(epoch))
return path
# def load_model(self):
# self._setup_graph()
# assert os.path.exists('models/epo%d.tar' % self._epoch_num), 'Weights at epoch %d not found' % self._epoch_num
# checkpoint = torch.load('models/epo%d.tar' % self._epoch_num, map_location='cpu')
# self.dcrnn_model.load_state_dict(checkpoint['model_state_dict'])
# self._logger.info("Loaded model at {}".format(self._epoch_num))
def load_lfx(self):
path = 'models/%s_best.tar' % self.data_type
# self._setup_graph()
assert os.path.exists(path), 'Weights not found'
checkpoint = torch.load(path, map_location='cpu')
self.dcrnn_model.load_state_dict(checkpoint['model_state_dict'])
self._logger.info("Loaded model successfully!")
self._epoch_num = checkpoint['epoch']
def _setup_graph(self):
with torch.no_grad():
self.dcrnn_model = self.dcrnn_model.eval()
val_iterator = self._data['val_loader'].get_iterator()
for _, (x, y) in enumerate(val_iterator):
x, y, target = self._prepare_data(x, y)
output = self.dcrnn_model(x) #为何要这步处理??
break
def train(self, **kwargs):
kwargs.update(self._train_kwargs)
return self._train(**kwargs)
def evaluate(self, dataset='val', batches_seen=0):
"""
Computes mean L1Loss
:return: mean L1Loss
"""
with torch.no_grad():
self.dcrnn_model = self.dcrnn_model.eval()
val_iterator = self._data['{}_loader'.format(dataset)].get_iterator()
losses = []
y_truths = []
y_preds = []
for _, (x, y) in enumerate(val_iterator):
x, y, target = self._prepare_data(x, y)
# output = self.dcrnn_model(x)
output = self.dcrnn_model(self.features, x, target, batches_seen)
loss = self._compute_loss(y, output)
losses.append(loss.item())
y_truths.append(y.cpu())
y_preds.append(output.cpu())
mean_loss = np.mean(losses)
# self._writer.add_scalar('{} loss'.format(dataset), mean_loss, batches_seen)
y_preds = np.concatenate(y_preds, axis=1)
y_truths = np.concatenate(y_truths, axis=1) # concatenate on batch dimension
y_truths_scaled = []
y_preds_scaled = []
for t in range(y_preds.shape[0]):
# y_truth = self.standard_scaler.inverse_transform(y_truths[t])
y_pred = self.standard_scaler.inverse_transform(y_preds[t])
# y_truths_scaled.append(y_truth)
y_truths_scaled.append(y_truths[t])
y_preds_scaled.append(y_pred)
return mean_loss, {'prediction': y_preds_scaled, 'truth': y_truths_scaled}
def evaluate_test(self, dataset='test'):
"""
Computes mean L1Loss
:return: mean L1Loss
"""
with torch.no_grad():
self.dcrnn_model = self.dcrnn_model.eval()
val_iterator = self._data['{}_loader'.format(dataset)].get_iterator()
# losses = []
y_truths = []
y_preds = []
for _, (x, y) in enumerate(val_iterator):
x, y, target = self._prepare_data(x, y)
# output = self.dcrnn_model(x)
output = self.dcrnn_model(self.features, x, target)
# losses.append(loss.item())
y_truths.append(y.cpu())
y_preds.append(output.cpu())
# mean_loss = np.mean(losses)
# y_preds = np.concatenate(y_preds, axis=1)
# y_truths = np.concatenate(y_truths, axis=1) # concatenate on batch dimension
y_preds = torch.cat(y_preds, dim=1)
y_truths = torch.cat(y_truths, dim=1) # concatenate on batch dimension
# y_truths_scaled = []
# y_preds_scaled = []
for t in range(y_preds.shape[0]):
# y_truth = self.standard_scaler.inverse_transform(y_truths[t])
# y_pred = self.standard_scaler.inverse_transform(y_preds[t])
# y_truths_scaled.append(y_truth)
# y_preds_scaled.append(y_pred)
# loss = self._compute_loss(y_truths[t], y_preds[t])
# log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}'
# print(log.format(t + 1, loss.item()))
metrics = self._compute_metrics(y_truths[t], y_preds[t])
log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
print(log.format(t + 1, metrics[0], metrics[1], metrics[2]))
def _train(self, base_lr,
steps, patience=50, epochs=100, lr_decay_ratio=0.1, log_every=1, save_model=1,
test_every_n_epochs=10, epsilon=1e-8, **kwargs):
# steps is used in learning rate - will see if need to use it?
if self.LOAD_INITIAL:
min_val_loss, _ = self.evaluate(dataset='val')
else:
min_val_loss = float('inf')
wait = 0
optimizer = torch.optim.Adam(self.dcrnn_model.parameters(), lr=base_lr, eps=epsilon)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=steps,
gamma=lr_decay_ratio)
self._logger.info('Start training ...')
# this will fail if model is loaded with a changed batch_size
num_batches = self._data['train_loader'].num_batch
self._logger.info("num_batches:{}".format(num_batches))
batches_seen = num_batches * self._epoch_num
epochs = 1000
for epoch_num in range(self._epoch_num, epochs):
self.dcrnn_model = self.dcrnn_model.train()
train_iterator = self._data['train_loader'].get_iterator()
losses = []
start_time = time.time()
for _, (x, y) in enumerate(train_iterator):
optimizer.zero_grad()
x, y, target = self._prepare_data(x, y)
# output = self.dcrnn_model(x, y, batches_seen)
output = self.dcrnn_model(self.features, x, target, batches_seen)
if batches_seen == 0:
# this is a workaround to accommodate dynamically registered parameters in DCGRUCell
optimizer = torch.optim.Adam(self.dcrnn_model.parameters(), lr=base_lr, eps=epsilon)
# loss = self._compute_loss(y, output)
loss = self._compute_loss(y, output)
self._logger.debug(loss.item())
losses.append(loss.item())
batches_seen += 1
loss.backward()
# gradient clipping - this does it in place
torch.nn.utils.clip_grad_norm_(self.dcrnn_model.parameters(), self.max_grad_norm)
optimizer.step()
self._logger.info("epoch complete")
lr_scheduler.step()
self._logger.info("evaluating now!")
val_loss, _ = self.evaluate(dataset='val', batches_seen=batches_seen)
end_time = time.time()
# self._writer.add_scalar('training loss',
# np.mean(losses),
# batches_seen)
if (epoch_num % log_every) == log_every - 1:
message = 'Epoch [{}/{}] ({}) train_loss: {:.4f}, val_mae: {:.4f}, lr: {:.6f}, ' \
'{:.1f}s'.format(epoch_num, epochs, batches_seen,
np.mean(losses), val_loss, lr_scheduler.get_lr()[0],
(end_time - start_time))
self._logger.info(message)
# if (epoch_num % test_every_n_epochs) == test_every_n_epochs - 1:
# test_loss, _ = self.evaluate(dataset='test', batches_seen=batches_seen)
# message = 'Epoch [{}/{}] ({}) train_mae: {:.4f}, test_mae: {:.4f}, lr: {:.6f}, ' \
# '{:.1f}s'.format(epoch_num, epochs, batches_seen,
# np.mean(losses), test_loss, lr_scheduler.get_lr()[0],
# (end_time - start_time))
# self._logger.info(message)
if val_loss < min_val_loss:
wait = 0
if save_model:
model_file_name = self.save_model(epoch_num)
self._logger.info(
'Val loss decrease from {:.4f} to {:.4f}, '
'saving to {}'.format(min_val_loss, val_loss, model_file_name))
min_val_loss = val_loss
elif val_loss >= min_val_loss:
wait += 1
if wait == patience:
self._logger.warning('Early stopping at epoch: %d' % epoch_num)
break
self.load_lfx()
self.evaluate_test(dataset='test')
def _prepare_data(self, x, y):
x, y = self._get_x_y(x, y)
x, y, target = self._get_x_y_in_correct_dims(x, y)
return x.to(device), y.to(device), target.to(device)
def _get_x_y(self, x, y):
"""
:param x: shape (batch_size, seq_len, num_sensor, input_dim)
:param y: shape (batch_size, horizon, num_sensor, input_dim)
:returns x shape (seq_len, batch_size, num_sensor, input_dim)
y shape (horizon, batch_size, num_sensor, input_dim)
"""
x = torch.from_numpy(x).float()
y = torch.from_numpy(y).float()
self._logger.debug("X: {}".format(x.size()))
self._logger.debug("y: {}".format(y.size()))
x = x.permute(1, 0, 2, 3)
y = y.permute(1, 0, 2, 3)
return x, y
def _get_x_y_in_correct_dims(self, x, y):
"""
:param x: shape (seq_len, batch_size, num_sensor, input_dim)
:param y: shape (horizon, batch_size, num_sensor, input_dim)
:return: x: shape (seq_len, batch_size, num_sensor * input_dim)
y: shape (horizon, batch_size, num_sensor * output_dim)
"""
batch_size = x.size(1)
x = x.view(self.seq_len, batch_size, self.num_nodes * self.input_dim)
target = torch.cat([self.standard_scaler.transform(y[..., :1]), y[..., 1:]], -1).view(self.horizon, batch_size,
self.num_nodes * self.input_dim)
y = y[..., :self.output_dim].view(self.horizon, batch_size,
self.num_nodes * self.output_dim)
return x, y, target
def _compute_loss(self, y_true, y_predicted):
# y_true = self.standard_scaler.inverse_transform(y_true)
y_predicted = self.standard_scaler.inverse_transform(y_predicted)
return masked_mae_loss(y_predicted, y_true)
def _compute_loss_mse(self, y_true, y_predicted):
# y_true = self.standard_scaler.inverse_transform(y_true)
y_predicted = self.standard_scaler.inverse_transform(y_predicted)
return nn.MSELoss()(y_predicted, y_true)
def _compute_metrics(self, y_true, y_predicted):
# y_true = self.standard_scaler.inverse_transform(y_true)
y_predicted = self.standard_scaler.inverse_transform(y_predicted)
return metric(y_predicted, y_true) | 16,974 | 40.605392 | 129 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/STGCN/dcrnn_train_pytorch.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import yaml
from lib.utils import load_graph_data
from model.pytorch.dcrnn_supervisor import DCRNNSupervisor
import setproctitle
setproctitle.setproctitle("stgcn@lifuxian")
def main(args):
with open(args.config_filename) as f:
supervisor_config = yaml.load(f)
graph_pkl_filename = supervisor_config['data'].get('graph_pkl_filename')
sensor_ids, sensor_id_to_ind, adj_mx = load_graph_data(graph_pkl_filename)
data_type = args.config_filename.split('/')[-1].split('.')[0].split('_')[-1] #'bay' or 'la'
supervisor = DCRNNSupervisor(data_type = data_type, LOAD_INITIAL = args.LOAD_INITIAL, adj_mx=adj_mx, **supervisor_config)
if args.TEST_ONLY:
supervisor.evaluate_test()
else:
supervisor.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_filename', default=None, type=str,
help='Configuration filename for restoring the model.')
parser.add_argument('--use_cpu_only', default=False, type=bool, help='Set to true to only use cpu.')
parser.add_argument('--LOAD_INITIAL', default=False, type=bool, help='If LOAD_INITIAL.')
parser.add_argument('--TEST_ONLY', default=False, type=bool, help='If TEST_ONLY.')
args = parser.parse_args()
main(args)
| 1,455 | 38.351351 | 129 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/STGCN/run_demo_pytorch.py | import argparse
import numpy as np
import os
import sys
import yaml
from lib.utils import load_graph_data
from model.pytorch.dcrnn_supervisor import DCRNNSupervisor
def run_dcrnn(args):
with open(args.config_filename) as f:
supervisor_config = yaml.load(f)
graph_pkl_filename = supervisor_config['data'].get('graph_pkl_filename')
sensor_ids, sensor_id_to_ind, adj_mx = load_graph_data(graph_pkl_filename)
supervisor = DCRNNSupervisor(adj_mx=adj_mx, **supervisor_config)
mean_score, outputs = supervisor.evaluate('test')
np.savez_compressed(args.output_filename, **outputs)
print("MAE : {}".format(mean_score))
print('Predictions saved as {}.'.format(args.output_filename))
if __name__ == '__main__':
sys.path.append(os.getcwd())
parser = argparse.ArgumentParser()
parser.add_argument('--use_cpu_only', default=False, type=str, help='Whether to run tensorflow on cpu.')
parser.add_argument('--config_filename', default='data/model/pretrained/METR-LA/config.yaml', type=str,
help='Config file for pretrained model.')
parser.add_argument('--output_filename', default='data/dcrnn_predictions.npz')
args = parser.parse_args()
run_dcrnn(args)
| 1,264 | 36.205882 | 108 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/STGCN/model/pytorch/dcrnn_model.py | import numpy as np
import torch
import torch.nn as nn
from model.pytorch.dcrnn_cell import DCGRUCell
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
class Seq2SeqAttrs:
def __init__(self, adj_mx, **model_kwargs):
self.adj_mx = adj_mx
self.max_diffusion_step = int(model_kwargs.get('max_diffusion_step', 2))
self.cl_decay_steps = int(model_kwargs.get('cl_decay_steps', 1000))
self.filter_type = model_kwargs.get('filter_type', 'laplacian')
self.num_nodes = int(model_kwargs.get('num_nodes', 1))
self.num_rnn_layers = int(model_kwargs.get('num_rnn_layers', 1))
self.rnn_units = int(model_kwargs.get('rnn_units'))
self.hidden_state_size = self.num_nodes * self.rnn_units
class EncoderModel(nn.Module, Seq2SeqAttrs):
def __init__(self, adj_mx, **model_kwargs):
nn.Module.__init__(self)
Seq2SeqAttrs.__init__(self, adj_mx, **model_kwargs)
self.input_dim = int(model_kwargs.get('input_dim', 1))
self.seq_len = int(model_kwargs.get('seq_len')) # for the encoder
self.dcgru_layers = nn.ModuleList(
[DCGRUCell(self.rnn_units, adj_mx, self.max_diffusion_step, self.num_nodes,
filter_type=self.filter_type) for _ in range(self.num_rnn_layers)])
def forward(self, inputs, hidden_state=None):
"""
Encoder forward pass.
:param inputs: shape (batch_size, self.num_nodes * self.input_dim)
:param hidden_state: (num_layers, batch_size, self.hidden_state_size)
optional, zeros if not provided
:return: output: # shape (batch_size, self.hidden_state_size)
hidden_state # shape (num_layers, batch_size, self.hidden_state_size)
(lower indices mean lower layers)
"""
batch_size, _ = inputs.size()
if hidden_state is None:
hidden_state = torch.zeros((self.num_rnn_layers, batch_size, self.hidden_state_size),
device=device)
hidden_states = []
output = inputs
for layer_num, dcgru_layer in enumerate(self.dcgru_layers):
next_hidden_state = dcgru_layer(output, hidden_state[layer_num])
hidden_states.append(next_hidden_state)
output = next_hidden_state
return output, torch.stack(hidden_states) # runs in O(num_layers) so not too slow
class DecoderModel(nn.Module, Seq2SeqAttrs):
def __init__(self, adj_mx, **model_kwargs):
# super().__init__(is_training, adj_mx, **model_kwargs)
nn.Module.__init__(self)
Seq2SeqAttrs.__init__(self, adj_mx, **model_kwargs)
self.output_dim = int(model_kwargs.get('output_dim', 1))
self.horizon = int(model_kwargs.get('horizon', 1)) # for the decoder
self.projection_layer = nn.Linear(self.rnn_units, self.output_dim)
self.dcgru_layers = nn.ModuleList(
[DCGRUCell(self.rnn_units, adj_mx, self.max_diffusion_step, self.num_nodes,
filter_type=self.filter_type) for _ in range(self.num_rnn_layers)])
def forward(self, inputs, hidden_state=None):
"""
Decoder forward pass.
:param inputs: shape (batch_size, self.num_nodes * self.output_dim)
:param hidden_state: (num_layers, batch_size, self.hidden_state_size)
optional, zeros if not provided
:return: output: # shape (batch_size, self.num_nodes * self.output_dim)
hidden_state # shape (num_layers, batch_size, self.hidden_state_size)
(lower indices mean lower layers)
"""
hidden_states = []
output = inputs
for layer_num, dcgru_layer in enumerate(self.dcgru_layers):
next_hidden_state = dcgru_layer(output, hidden_state[layer_num])
hidden_states.append(next_hidden_state)
output = next_hidden_state
projected = self.projection_layer(output.view(-1, self.rnn_units))
output = projected.view(-1, self.num_nodes * self.output_dim)
return output, torch.stack(hidden_states)
class DCRNNModel(nn.Module, Seq2SeqAttrs):
def __init__(self, adj_mx, logger, **model_kwargs):
super().__init__()
Seq2SeqAttrs.__init__(self, adj_mx, **model_kwargs)
self.encoder_model = EncoderModel(adj_mx, **model_kwargs)
self.decoder_model = DecoderModel(adj_mx, **model_kwargs)
self.cl_decay_steps = int(model_kwargs.get('cl_decay_steps', 1000))
self.use_curriculum_learning = bool(model_kwargs.get('use_curriculum_learning', False))
self._logger = logger
def _compute_sampling_threshold(self, batches_seen):
return self.cl_decay_steps / (
self.cl_decay_steps + np.exp(batches_seen / self.cl_decay_steps))
def encoder(self, inputs):
"""
encoder forward pass on t time steps
:param inputs: shape (seq_len, batch_size, num_sensor * input_dim)
:return: encoder_hidden_state: (num_layers, batch_size, self.hidden_state_size)
"""
encoder_hidden_state = None
for t in range(self.encoder_model.seq_len):
_, encoder_hidden_state = self.encoder_model(inputs[t], encoder_hidden_state)
return encoder_hidden_state
def decoder(self, encoder_hidden_state, labels=None, batches_seen=None):
"""
Decoder forward pass
:param encoder_hidden_state: (num_layers, batch_size, self.hidden_state_size)
:param labels: (self.horizon, batch_size, self.num_nodes * self.output_dim) [optional, not exist for inference]
:param batches_seen: global step [optional, not exist for inference]
:return: output: (self.horizon, batch_size, self.num_nodes * self.output_dim)
"""
batch_size = encoder_hidden_state.size(1)
go_symbol = torch.zeros((batch_size, self.num_nodes * self.decoder_model.output_dim),
device=device)
decoder_hidden_state = encoder_hidden_state
decoder_input = go_symbol
outputs = []
for t in range(self.decoder_model.horizon):
decoder_output, decoder_hidden_state = self.decoder_model(decoder_input,
decoder_hidden_state)
decoder_input = decoder_output
outputs.append(decoder_output)
if self.training and self.use_curriculum_learning:
c = np.random.uniform(0, 1)
if c < self._compute_sampling_threshold(batches_seen):
decoder_input = labels[t]
outputs = torch.stack(outputs)
return outputs
def forward(self, inputs, labels=None, batches_seen=None):
"""
seq2seq forward pass
:param inputs: shape (seq_len, batch_size, num_sensor * input_dim)
:param labels: shape (horizon, batch_size, num_sensor * output)
:param batches_seen: batches seen till now
:return: output: (self.horizon, batch_size, self.num_nodes * self.output_dim)
"""
encoder_hidden_state = self.encoder(inputs)
self._logger.debug("Encoder complete, starting decoder")
outputs = self.decoder(encoder_hidden_state, labels, batches_seen=batches_seen)
self._logger.debug("Decoder complete")
if batches_seen == 0:
self._logger.info(
"Total trainable parameters {}".format(count_parameters(self))
)
return outputs
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class TimeBlock(nn.Module):
"""
Neural network block that applies a temporal convolution to each node of
a graph in isolation.
"""
def __init__(self, in_channels, out_channels, kernel_size=3):
"""
:param in_channels: Number of input features at each node in each time
step.
:param out_channels: Desired number of output channels at each node in
each time step.
:param kernel_size: Size of the 1D temporal kernel.
"""
super(TimeBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, (1, kernel_size))
self.conv2 = nn.Conv2d(in_channels, out_channels, (1, kernel_size))
self.conv3 = nn.Conv2d(in_channels, out_channels, (1, kernel_size))
def forward(self, X):
"""
:param X: Input data of shape (batch_size, num_nodes, num_timesteps,
num_features=in_channels)
:return: Output data of shape (batch_size, num_nodes,
num_timesteps_out, num_features_out=out_channels)
"""
# Convert into NCHW format for pytorch to perform convolutions.
X = X.permute(0, 3, 1, 2)
temp = self.conv1(X) + torch.sigmoid(self.conv2(X))
out = F.relu(temp + self.conv3(X))
# Convert back from NCHW to NHWC
out = out.permute(0, 2, 3, 1)
return out
class STGCNBlock(nn.Module):
"""
Neural network block that applies a temporal convolution on each node in
isolation, followed by a graph convolution, followed by another temporal
convolution on each node.
"""
def __init__(self, in_channels, spatial_channels, out_channels,
num_nodes):
"""
:param in_channels: Number of input features at each node in each time
step.
:param spatial_channels: Number of output channels of the graph
convolutional, spatial sub-block.
:param out_channels: Desired number of output features at each node in
each time step.
:param num_nodes: Number of nodes in the graph.
"""
super(STGCNBlock, self).__init__()
self.temporal1 = TimeBlock(in_channels=in_channels,
out_channels=out_channels)
self.Theta1 = nn.Parameter(torch.FloatTensor(out_channels,
spatial_channels))
self.temporal2 = TimeBlock(in_channels=spatial_channels,
out_channels=out_channels)
self.batch_norm = nn.BatchNorm2d(num_nodes)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.Theta1.shape[1])
self.Theta1.data.uniform_(-stdv, stdv)
def forward(self, X, A_hat):
"""
:param X: Input data of shape (batch_size, num_nodes, num_timesteps,
num_features=in_channels).
:param A_hat: Normalized adjacency matrix.
:return: Output data of shape (batch_size, num_nodes,
num_timesteps_out, num_features=out_channels).
"""
t = self.temporal1(X)
lfs = torch.einsum("ij,jklm->kilm", [A_hat, t.permute(1, 0, 2, 3)])
# t2 = F.relu(torch.einsum("ijkl,lp->ijkp", [lfs, self.Theta1]))
t2 = F.relu(torch.matmul(lfs, self.Theta1))
t3 = self.temporal2(t2)
return self.batch_norm(t3)
# return t3
class STGCN(nn.Module):
"""
Spatio-temporal graph convolutional network as described in
https://arxiv.org/abs/1709.04875v3 by Yu et al.
Input should have shape (batch_size, num_nodes, num_input_time_steps,
num_features).
"""
def __init__(self, num_nodes, num_features, num_timesteps_input,
num_timesteps_output):
"""
:param num_nodes: Number of nodes in the graph.
:param num_features: Number of features at each node in each time step.
:param num_timesteps_input: Number of past time steps fed into the
network.
:param num_timesteps_output: Desired number of future time steps
output by the network.
"""
super(STGCN, self).__init__()
self.block1 = STGCNBlock(in_channels=num_features, out_channels=64,
spatial_channels=16, num_nodes=num_nodes)
self.block2 = STGCNBlock(in_channels=64, out_channels=64,
spatial_channels=16, num_nodes=num_nodes)
self.last_temporal = TimeBlock(in_channels=64, out_channels=64)
self.fully = nn.Linear((num_timesteps_input - 2 * 5) * 64,
num_timesteps_output)
self.num_nodes = num_nodes
self.input_dim = num_features
self.seq_len = num_timesteps_input
self.horizon = num_timesteps_output
def forward(self, A_hat, X):
"""
:param X: Input data of shape (batch_size, num_nodes, num_timesteps,
num_features=in_channels).
:param A_hat: Normalized adjacency matrix.
"""
X = X.view(self.seq_len, -1, self.num_nodes, self.input_dim).permute(1, 2, 0, 3).contiguous()
out1 = self.block1(X, A_hat)
out2 = self.block2(out1, A_hat)
out3 = self.last_temporal(out2)
out4 = self.fully(out3.reshape((out3.shape[0], out3.shape[1], -1)))
return out4.permute(2, 0, 1).contiguous()
# :return: x: shape (seq_len, batch_size, num_sensor * input_dim)
# y: shape (horizon, batch_size, num_sensor * output_dim)
| 13,218 | 41.779935 | 119 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/STGCN/model/pytorch/dcrnn_cell.py | import numpy as np
import torch
from lib import utils
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class LayerParams:
def __init__(self, rnn_network: torch.nn.Module, layer_type: str):
self._rnn_network = rnn_network
self._params_dict = {}
self._biases_dict = {}
self._type = layer_type
def get_weights(self, shape):
if shape not in self._params_dict:
nn_param = torch.nn.Parameter(torch.empty(*shape, device=device))
torch.nn.init.xavier_normal_(nn_param)
self._params_dict[shape] = nn_param
self._rnn_network.register_parameter('{}_weight_{}'.format(self._type, str(shape)),
nn_param)
return self._params_dict[shape]
def get_biases(self, length, bias_start=0.0):
if length not in self._biases_dict:
biases = torch.nn.Parameter(torch.empty(length, device=device))
torch.nn.init.constant_(biases, bias_start)
self._biases_dict[length] = biases
self._rnn_network.register_parameter('{}_biases_{}'.format(self._type, str(length)),
biases)
return self._biases_dict[length]
class DCGRUCell(torch.nn.Module):
def __init__(self, num_units, adj_mx, max_diffusion_step, num_nodes, nonlinearity='tanh',
filter_type="laplacian", use_gc_for_ru=True):
"""
:param num_units:
:param adj_mx:
:param max_diffusion_step:
:param num_nodes:
:param nonlinearity:
:param filter_type: "laplacian", "random_walk", "dual_random_walk".
:param use_gc_for_ru: whether to use Graph convolution to calculate the reset and update gates.
"""
super().__init__()
self._activation = torch.tanh if nonlinearity == 'tanh' else torch.relu
# support other nonlinearities up here?
self._num_nodes = num_nodes
self._num_units = num_units
self._max_diffusion_step = max_diffusion_step
self._supports = []
self._use_gc_for_ru = use_gc_for_ru
supports = []
if filter_type == "laplacian":
supports.append(utils.calculate_scaled_laplacian(adj_mx, lambda_max=None))
elif filter_type == "random_walk":
supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
elif filter_type == "dual_random_walk":
supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
supports.append(utils.calculate_random_walk_matrix(adj_mx.T).T)
else:
supports.append(utils.calculate_scaled_laplacian(adj_mx))
for support in supports:
self._supports.append(self._build_sparse_matrix(support))
self._fc_params = LayerParams(self, 'fc')
self._gconv_params = LayerParams(self, 'gconv')
@staticmethod
def _build_sparse_matrix(L):
L = L.tocoo()
indices = np.column_stack((L.row, L.col))
# this is to ensure row-major ordering to equal torch.sparse.sparse_reorder(L)
indices = indices[np.lexsort((indices[:, 0], indices[:, 1]))]
L = torch.sparse_coo_tensor(indices.T, L.data, L.shape, device=device)
return L
def forward(self, inputs, hx):
"""Gated recurrent unit (GRU) with Graph Convolution.
:param inputs: (B, num_nodes * input_dim)
:param hx: (B, num_nodes * rnn_units)
:return
- Output: A `2-D` tensor with shape `(B, num_nodes * rnn_units)`.
"""
output_size = 2 * self._num_units
if self._use_gc_for_ru:
fn = self._gconv
else:
fn = self._fc
value = torch.sigmoid(fn(inputs, hx, output_size, bias_start=1.0))
value = torch.reshape(value, (-1, self._num_nodes, output_size))
r, u = torch.split(tensor=value, split_size_or_sections=self._num_units, dim=-1)
r = torch.reshape(r, (-1, self._num_nodes * self._num_units))
u = torch.reshape(u, (-1, self._num_nodes * self._num_units))
c = self._gconv(inputs, r * hx, self._num_units)
if self._activation is not None:
c = self._activation(c)
new_state = u * hx + (1.0 - u) * c
return new_state
@staticmethod
def _concat(x, x_):
x_ = x_.unsqueeze(0)
return torch.cat([x, x_], dim=0)
def _fc(self, inputs, state, output_size, bias_start=0.0):
batch_size = inputs.shape[0]
inputs = torch.reshape(inputs, (batch_size * self._num_nodes, -1))
state = torch.reshape(state, (batch_size * self._num_nodes, -1))
inputs_and_state = torch.cat([inputs, state], dim=-1)
input_size = inputs_and_state.shape[-1]
weights = self._fc_params.get_weights((input_size, output_size))
value = torch.sigmoid(torch.matmul(inputs_and_state, weights))
biases = self._fc_params.get_biases(output_size, bias_start)
value += biases
return value
def _gconv(self, inputs, state, output_size, bias_start=0.0):
# Reshape input and state to (batch_size, num_nodes, input_dim/state_dim)
batch_size = inputs.shape[0]
inputs = torch.reshape(inputs, (batch_size, self._num_nodes, -1))
state = torch.reshape(state, (batch_size, self._num_nodes, -1))
inputs_and_state = torch.cat([inputs, state], dim=2)
input_size = inputs_and_state.size(2)
x = inputs_and_state
x0 = x.permute(1, 2, 0) # (num_nodes, total_arg_size, batch_size)
x0 = torch.reshape(x0, shape=[self._num_nodes, input_size * batch_size])
x = torch.unsqueeze(x0, 0)
if self._max_diffusion_step == 0:
pass
else:
for support in self._supports:
x1 = torch.sparse.mm(support, x0)
x = self._concat(x, x1)
for k in range(2, self._max_diffusion_step + 1):
x2 = 2 * torch.sparse.mm(support, x1) - x0
x = self._concat(x, x2)
x1, x0 = x2, x1
num_matrices = len(self._supports) * self._max_diffusion_step + 1 # Adds for x itself.
x = torch.reshape(x, shape=[num_matrices, self._num_nodes, input_size, batch_size])
x = x.permute(3, 1, 2, 0) # (batch_size, num_nodes, input_size, order)
x = torch.reshape(x, shape=[batch_size * self._num_nodes, input_size * num_matrices])
weights = self._gconv_params.get_weights((input_size * num_matrices, output_size))
x = torch.matmul(x, weights) # (batch_size * self._num_nodes, output_size)
biases = self._gconv_params.get_biases(output_size, bias_start)
x += biases
# Reshape res back to 2D: (batch_size, num_node, state_dim) -> (batch_size, num_node * state_dim)
return torch.reshape(x, [batch_size, self._num_nodes * output_size])
| 6,939 | 41.576687 | 105 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/STGCN/model/pytorch/utils.py | import torch
import numpy as np
def masked_mae_loss(y_pred, y_true):
mask = (y_true != 0).float()
mask /= mask.mean()
loss = torch.abs(y_pred - y_true)
loss = loss * mask
# trick for nans: https://discuss.pytorch.org/t/how-to-set-nan-in-tensor-to-0/3918/3
loss[loss != loss] = 0
return loss.mean()
def masked_mse_loss(y_pred, y_true):
mask = (y_true != 0).float()
mask /= mask.mean()
loss = torch.abs(y_pred - y_true)**2
loss = loss * mask
# trick for nans: https://discuss.pytorch.org/t/how-to-set-nan-in-tensor-to-0/3918/3
loss[loss != loss] = 0
return loss.mean()
def masked_mape_loss(y_pred, y_true):
mask = (y_true != 0).float()
mask /= mask.mean()
loss = torch.abs(y_pred - y_true)/y_true
loss = loss * mask
# print(mask) #全1
# print(mask.mean()) #tensor(1.)
# print(mask.sum())
# print((y_true == 0).float().sum()) #tensor(0.)
# trick for nans: https://discuss.pytorch.org/t/how-to-set-nan-in-tensor-to-0/3918/3
loss[loss != loss] = 0
return loss.mean()
def masked_mse(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = (preds-labels)**2
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_rmse(preds, labels, null_val=np.nan):
return torch.sqrt(masked_mse(preds=preds, labels=labels, null_val=null_val))
def masked_mae(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds-labels)
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_mape(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds-labels)/labels
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def metric(pred, real):
mae = masked_mae(pred,real,0.0).item()
mape = masked_mape(pred,real,0.0).item()
# mape = masked_mape_loss(pred,real).item()
rmse = masked_rmse(pred,real,0.0).item()
return mae,mape,rmse
def get_normalized_adj(A):
"""
Returns the degree normalized adjacency matrix.
"""
A = A + np.diag(np.ones(A.shape[0], dtype=np.float32))
D = np.array(np.sum(A, axis=1)).reshape((-1,))
D[D <= 10e-5] = 10e-5 # Prevent infs
diag = np.reciprocal(np.sqrt(D))
A_wave = np.multiply(np.multiply(diag.reshape((-1, 1)), A),
diag.reshape((1, -1)))
return A_wave | 3,175 | 30.76 | 88 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/STGCN/model/pytorch/dcrnn_supervisor.py | import os
import time
import numpy as np
import torch
import torch.nn as nn
# from torch.utils.tensorboard import SummaryWriter
from lib import utils
# from model.pytorch.dcrnn_model import DCRNNModel
from model.pytorch.dcrnn_model import STGCN
from model.pytorch.utils import masked_mae_loss, metric, get_normalized_adj, masked_mse_loss
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class DCRNNSupervisor:
def __init__(self, data_type, LOAD_INITIAL, adj_mx, **kwargs):
self._kwargs = kwargs
self._data_kwargs = kwargs.get('data')
self._model_kwargs = kwargs.get('model')
self._train_kwargs = kwargs.get('train')
self.max_grad_norm = self._train_kwargs.get('max_grad_norm', 1.)
# logging.
self._log_dir = self._get_log_dir(kwargs)
# self._writer = SummaryWriter('runs/' + self._log_dir)
log_level = self._kwargs.get('log_level', 'INFO')
self._logger = utils.get_logger(self._log_dir, __name__, 'info.log', level=log_level)
# data set
self._data = utils.load_dataset(**self._data_kwargs)
self.standard_scaler = self._data['scaler']
self.num_nodes = int(self._model_kwargs.get('num_nodes', 1))
self.input_dim = int(self._model_kwargs.get('input_dim', 1))
self.seq_len = int(self._model_kwargs.get('seq_len')) # for the encoder
self.output_dim = int(self._model_kwargs.get('output_dim', 1))
self.use_curriculum_learning = bool(
self._model_kwargs.get('use_curriculum_learning', False))
self.horizon = int(self._model_kwargs.get('horizon', 1)) # for the decoder
# setup model
# dcrnn_model = DCRNNModel(adj_mx, self._logger, **self._model_kwargs)
dcrnn_model = STGCN(self.num_nodes, self.input_dim, self.seq_len, self.horizon)
self.dcrnn_model = dcrnn_model.cuda() if torch.cuda.is_available() else dcrnn_model
self._logger.info("Model created")
self._epoch_num = self._train_kwargs.get('epoch', 0)
# if self._epoch_num > 0: #事实上self._epoch_num的预设值确实为0
# self.load_model()
self.data_type = data_type
self.LOAD_INITIAL = LOAD_INITIAL
if LOAD_INITIAL:
self.load_lfx()
self.A_wave = torch.from_numpy(get_normalized_adj(adj_mx)).to(device)
@staticmethod
def _get_log_dir(kwargs):
log_dir = kwargs['train'].get('log_dir')
if log_dir is None:
batch_size = kwargs['data'].get('batch_size')
learning_rate = kwargs['train'].get('base_lr')
max_diffusion_step = kwargs['model'].get('max_diffusion_step')
num_rnn_layers = kwargs['model'].get('num_rnn_layers')
rnn_units = kwargs['model'].get('rnn_units')
structure = '-'.join(
['%d' % rnn_units for _ in range(num_rnn_layers)])
horizon = kwargs['model'].get('horizon')
filter_type = kwargs['model'].get('filter_type')
filter_type_abbr = 'L'
if filter_type == 'random_walk':
filter_type_abbr = 'R'
elif filter_type == 'dual_random_walk':
filter_type_abbr = 'DR'
run_id = 'dcrnn_%s_%d_h_%d_%s_lr_%g_bs_%d_%s/' % (
filter_type_abbr, max_diffusion_step, horizon,
structure, learning_rate, batch_size,
time.strftime('%m%d%H%M%S'))
base_dir = kwargs.get('base_dir')
log_dir = os.path.join(base_dir, run_id)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
# def save_model(self, epoch):
# if not os.path.exists('models/'):
# os.makedirs('models/')
#
# config = dict(self._kwargs)
# config['model_state_dict'] = self.dcrnn_model.state_dict()
# config['epoch'] = epoch
# torch.save(config, 'models/epo%d.tar' % epoch)
# self._logger.info("Saved model at {}".format(epoch))
# return 'models/epo%d.tar' % epoch
def save_model(self, epoch):
path = 'models/%s_best.tar' % self.data_type
if not os.path.exists('models/'):
os.makedirs('models/')
config = dict(self._kwargs)
config['model_state_dict'] = self.dcrnn_model.state_dict()
config['epoch'] = epoch
torch.save(config, path)
self._logger.info("Saved model at {}".format(epoch))
return path
# def load_model(self):
# self._setup_graph()
# assert os.path.exists('models/epo%d.tar' % self._epoch_num), 'Weights at epoch %d not found' % self._epoch_num
# checkpoint = torch.load('models/epo%d.tar' % self._epoch_num, map_location='cpu')
# self.dcrnn_model.load_state_dict(checkpoint['model_state_dict'])
# self._logger.info("Loaded model at {}".format(self._epoch_num))
def load_lfx(self):
path = 'models/%s_best.tar' % self.data_type
# self._setup_graph()
assert os.path.exists(path), 'Weights not found'
checkpoint = torch.load(path, map_location='cpu')
self.dcrnn_model.load_state_dict(checkpoint['model_state_dict'])
self._logger.info("Loaded model successfully!")
self._epoch_num = checkpoint['epoch']
def _setup_graph(self):
with torch.no_grad():
self.dcrnn_model = self.dcrnn_model.eval()
val_iterator = self._data['val_loader'].get_iterator()
for _, (x, y) in enumerate(val_iterator):
x, y = self._prepare_data(x, y)
output = self.dcrnn_model(x) #为何要这步处理??
break
def train(self, **kwargs):
kwargs.update(self._train_kwargs)
return self._train(**kwargs)
def evaluate(self, dataset='val', batches_seen=0):
"""
Computes mean L1Loss
:return: mean L1Loss
"""
with torch.no_grad():
self.dcrnn_model = self.dcrnn_model.eval()
val_iterator = self._data['{}_loader'.format(dataset)].get_iterator()
losses = []
y_truths = []
y_preds = []
for _, (x, y) in enumerate(val_iterator):
x, y = self._prepare_data(x, y)
# output = self.dcrnn_model(x)
output = self.dcrnn_model(self.A_wave, x)
loss = self._compute_loss(y, output)
losses.append(loss.item())
y_truths.append(y.cpu())
y_preds.append(output.cpu())
mean_loss = np.mean(losses)
# self._writer.add_scalar('{} loss'.format(dataset), mean_loss, batches_seen)
y_preds = np.concatenate(y_preds, axis=1)
y_truths = np.concatenate(y_truths, axis=1) # concatenate on batch dimension
y_truths_scaled = []
y_preds_scaled = []
for t in range(y_preds.shape[0]):
y_truth = self.standard_scaler.inverse_transform(y_truths[t])
y_pred = self.standard_scaler.inverse_transform(y_preds[t])
y_truths_scaled.append(y_truth)
y_preds_scaled.append(y_pred)
return mean_loss, {'prediction': y_preds_scaled, 'truth': y_truths_scaled}
def evaluate_test(self, dataset='test'):
"""
Computes mean L1Loss
:return: mean L1Loss
"""
with torch.no_grad():
self.dcrnn_model = self.dcrnn_model.eval()
val_iterator = self._data['{}_loader'.format(dataset)].get_iterator()
# losses = []
y_truths = []
y_preds = []
for _, (x, y) in enumerate(val_iterator):
x, y = self._prepare_data(x, y)
# output = self.dcrnn_model(x)
output = self.dcrnn_model(self.A_wave, x)
# losses.append(loss.item())
y_truths.append(y.cpu())
y_preds.append(output.cpu())
# mean_loss = np.mean(losses)
# y_preds = np.concatenate(y_preds, axis=1)
# y_truths = np.concatenate(y_truths, axis=1) # concatenate on batch dimension
y_preds = torch.cat(y_preds, dim=1)
# print(y_preds)
# print(y_preds.shape) #torch.Size([12, 1472, 1540])
y_truths = torch.cat(y_truths, dim=1) # concatenate on batch dimension
# y_truths_scaled = []
# y_preds_scaled = []
for t in range(y_preds.shape[0]):
# y_truth = self.standard_scaler.inverse_transform(y_truths[t])
# y_pred = self.standard_scaler.inverse_transform(y_preds[t])
# y_truths_scaled.append(y_truth)
# y_preds_scaled.append(y_pred)
# loss = self._compute_loss(y_truths[t], y_preds[t])
# log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}'
# print(log.format(t + 1, loss.item()))
metrics = self._compute_metrics(y_truths[t], y_preds[t])
log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
print(log.format(t + 1, metrics[0], metrics[1], metrics[2]))
def _train(self, base_lr,
steps, patience=50, epochs=100, lr_decay_ratio=0.1, log_every=1, save_model=1,
test_every_n_epochs=10, epsilon=1e-8, **kwargs):
# steps is used in learning rate - will see if need to use it?
if self.LOAD_INITIAL:
min_val_loss, _ = self.evaluate(dataset='val')
else:
min_val_loss = float('inf')
wait = 0
optimizer = torch.optim.Adam(self.dcrnn_model.parameters(), lr=base_lr, eps=epsilon)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=steps,
gamma=lr_decay_ratio)
self._logger.info('Start training ...')
# this will fail if model is loaded with a changed batch_size
num_batches = self._data['train_loader'].num_batch
self._logger.info("num_batches:{}".format(num_batches))
batches_seen = num_batches * self._epoch_num
epochs = 1000
for epoch_num in range(self._epoch_num, epochs):
self.dcrnn_model = self.dcrnn_model.train()
train_iterator = self._data['train_loader'].get_iterator()
losses = []
start_time = time.time()
for _, (x, y) in enumerate(train_iterator):
optimizer.zero_grad()
x, y = self._prepare_data(x, y)
# output = self.dcrnn_model(x, y, batches_seen)
output = self.dcrnn_model(self.A_wave, x)
if batches_seen == 0:
# this is a workaround to accommodate dynamically registered parameters in DCGRUCell
optimizer = torch.optim.Adam(self.dcrnn_model.parameters(), lr=base_lr, eps=epsilon)
# loss = self._compute_loss(y, output)
loss = self._compute_loss_mse(y, output)
self._logger.debug(loss.item())
losses.append(loss.item())
batches_seen += 1
loss.backward()
# gradient clipping - this does it in place
torch.nn.utils.clip_grad_norm_(self.dcrnn_model.parameters(), self.max_grad_norm)
optimizer.step()
self._logger.info("epoch complete")
lr_scheduler.step()
self._logger.info("evaluating now!")
val_loss, _ = self.evaluate(dataset='val', batches_seen=batches_seen)
end_time = time.time()
# self._writer.add_scalar('training loss',
# np.mean(losses),
# batches_seen)
if (epoch_num % log_every) == log_every - 1:
message = 'Epoch [{}/{}] ({}) train_loss: {:.4f}, val_mae: {:.4f}, lr: {:.6f}, ' \
'{:.1f}s'.format(epoch_num, epochs, batches_seen,
np.mean(losses), val_loss, lr_scheduler.get_lr()[0],
(end_time - start_time))
self._logger.info(message)
# if (epoch_num % test_every_n_epochs) == test_every_n_epochs - 1:
# test_loss, _ = self.evaluate(dataset='test', batches_seen=batches_seen)
# message = 'Epoch [{}/{}] ({}) train_mae: {:.4f}, test_mae: {:.4f}, lr: {:.6f}, ' \
# '{:.1f}s'.format(epoch_num, epochs, batches_seen,
# np.mean(losses), test_loss, lr_scheduler.get_lr()[0],
# (end_time - start_time))
# self._logger.info(message)
if val_loss < min_val_loss:
wait = 0
if save_model:
model_file_name = self.save_model(epoch_num)
self._logger.info(
'Val loss decrease from {:.4f} to {:.4f}, '
'saving to {}'.format(min_val_loss, val_loss, model_file_name))
min_val_loss = val_loss
elif val_loss >= min_val_loss:
wait += 1
if wait == patience:
self._logger.warning('Early stopping at epoch: %d' % epoch_num)
break
self.load_lfx()
self.evaluate_test(dataset='test')
def _prepare_data(self, x, y):
x, y = self._get_x_y(x, y)
x, y = self._get_x_y_in_correct_dims(x, y)
return x.to(device), y.to(device)
def _get_x_y(self, x, y):
"""
:param x: shape (batch_size, seq_len, num_sensor, input_dim)
:param y: shape (batch_size, horizon, num_sensor, input_dim)
:returns x shape (seq_len, batch_size, num_sensor, input_dim)
y shape (horizon, batch_size, num_sensor, input_dim)
"""
x = torch.from_numpy(x).float()
y = torch.from_numpy(y).float()
self._logger.debug("X: {}".format(x.size()))
self._logger.debug("y: {}".format(y.size()))
x = x.permute(1, 0, 2, 3)
y = y.permute(1, 0, 2, 3)
return x, y
def _get_x_y_in_correct_dims(self, x, y):
"""
:param x: shape (seq_len, batch_size, num_sensor, input_dim)
:param y: shape (horizon, batch_size, num_sensor, input_dim)
:return: x: shape (seq_len, batch_size, num_sensor * input_dim)
y: shape (horizon, batch_size, num_sensor * output_dim)
"""
batch_size = x.size(1)
x = x.view(self.seq_len, batch_size, self.num_nodes * self.input_dim)
y = y[..., :self.output_dim].view(self.horizon, batch_size,
self.num_nodes * self.output_dim)
return x, y
def _compute_loss(self, y_true, y_predicted):
# y_true = self.standard_scaler.inverse_transform(y_true)
y_predicted = self.standard_scaler.inverse_transform(y_predicted)
return masked_mae_loss(y_predicted, y_true)
# def _compute_loss_mse(self, y_true, y_predicted):
# y_true = self.standard_scaler.inverse_transform(y_true)
# y_predicted = self.standard_scaler.inverse_transform(y_predicted)
# return nn.MSELoss()(y_predicted, y_true)
#这里犯了巨大的错误!!之前maeloss本就带了mask,这里没有带mask会使本来不应参与loss计算的0值参与计算!
# Evaluate best model on test data for horizon 1, Test MAE: 5.1383, Test MAPE: 24294.3398, Test RMSE: 8.3837
# Evaluate best model on test data for horizon 2, Test MAE: 5.1264, Test MAPE: 24150.6680, Test RMSE: 8.3845
# Evaluate best model on test data for horizon 3, Test MAE: 5.1331, Test MAPE: 24014.9238, Test RMSE: 8.4222
# Evaluate best model on test data for horizon 4, Test MAE: 5.1451, Test MAPE: 23861.4805, Test RMSE: 8.4651
# Evaluate best model on test data for horizon 5, Test MAE: 5.1562, Test MAPE: 23847.7773, Test RMSE: 8.5100
# Evaluate best model on test data for horizon 6, Test MAE: 5.1783, Test MAPE: 23548.7598, Test RMSE: 8.5621
# Evaluate best model on test data for horizon 7, Test MAE: 5.2065, Test MAPE: 23437.7461, Test RMSE: 8.6286
# Evaluate best model on test data for horizon 8, Test MAE: 5.2424, Test MAPE: 23596.6816, Test RMSE: 8.7054
# Evaluate best model on test data for horizon 9, Test MAE: 5.2675, Test MAPE: 23671.2520, Test RMSE: 8.7606
# Evaluate best model on test data for horizon 10, Test MAE: 5.3108, Test MAPE: 23697.0801, Test RMSE: 8.8295
# Evaluate best model on test data for horizon 11, Test MAE: 5.3616, Test MAPE: 23790.2695, Test RMSE: 8.9047
# Evaluate best model on test data for horizon 12, Test MAE: 5.4234, Test MAPE: 23957.7305, Test RMSE: 8.9975
def _compute_loss_mse(self, y_true, y_predicted):
# y_true = self.standard_scaler.inverse_transform(y_true)
y_predicted = self.standard_scaler.inverse_transform(y_predicted)
return masked_mse_loss(y_predicted, y_true)
def _compute_metrics(self, y_true, y_predicted):
# y_true = self.standard_scaler.inverse_transform(y_true)
y_predicted = self.standard_scaler.inverse_transform(y_predicted)
return metric(y_predicted, y_true) | 17,411 | 41.8867 | 129 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/DGCRN/layer.py | from __future__ import division
import torch
import torch.nn as nn
from torch.nn import init
import numbers
import torch.nn.functional as F
from collections import OrderedDict
class gconv_RNN(nn.Module):
def __init__(self):
super(gconv_RNN, self).__init__()
def forward(self, x, A):
x = torch.einsum('nvc,nvw->nwc', (x, A))
return x.contiguous()
class gconv_hyper(nn.Module):
def __init__(self):
super(gconv_hyper, self).__init__()
def forward(self, x, A):
x = torch.einsum('nvc,vw->nwc', (x, A))
return x.contiguous()
class gcn(nn.Module):
def __init__(self, dims, gdep, dropout, alpha, beta, gamma, type=None):
super(gcn, self).__init__()
if type == 'RNN':
self.gconv = gconv_RNN()
self.gconv_preA = gconv_hyper()
self.mlp = nn.Linear((gdep + 1) * dims[0], dims[1])
elif type == 'hyper':
self.gconv = gconv_hyper()
self.mlp = nn.Sequential(
OrderedDict([('fc1', nn.Linear((gdep + 1) * dims[0], dims[1])),
('sigmoid1', nn.Sigmoid()),
('fc2', nn.Linear(dims[1], dims[2])),
('sigmoid2', nn.Sigmoid()),
('fc3', nn.Linear(dims[2], dims[3]))]))
self.gdep = gdep
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.type_GNN = type
def forward(self, x, adj):
h = x
out = [h]
if self.type_GNN == 'RNN':
for _ in range(self.gdep):
h = self.alpha * x + self.beta * self.gconv(
h, adj[0]) + self.gamma * self.gconv_preA(h, adj[1])
out.append(h)
else:
for _ in range(self.gdep):
h = self.alpha * x + self.gamma * self.gconv(h, adj)
out.append(h)
ho = torch.cat(out, dim=-1)
ho = self.mlp(ho)
return ho
| 2,007 | 27.28169 | 79 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/DGCRN/net.py | import torch.utils.data as utils
import torch.nn.functional as F
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import numpy as np
import pandas as pd
import math
import time
from layer import *
import sys
from collections import OrderedDict
class DGCRN(nn.Module):
def __init__(self,
gcn_depth,
num_nodes,
device,
predefined_A=None,
dropout=0.3,
subgraph_size=20,
node_dim=40,
middle_dim=2,
seq_length=12,
in_dim=2,
out_dim=12,
layers=3,
list_weight=[0.05, 0.95, 0.95],
tanhalpha=3,
cl_decay_steps=4000,
rnn_size=64,
hyperGNN_dim=16):
super(DGCRN, self).__init__()
self.output_dim = 1
self.num_nodes = num_nodes
self.dropout = dropout
self.predefined_A = predefined_A
self.seq_length = seq_length
self.emb1 = nn.Embedding(self.num_nodes, node_dim)
self.emb2 = nn.Embedding(self.num_nodes, node_dim)
self.lin1 = nn.Linear(node_dim, node_dim)
self.lin2 = nn.Linear(node_dim, node_dim)
self.idx = torch.arange(self.num_nodes).to(device)
self.rnn_size = rnn_size
self.in_dim = in_dim
hidden_size = self.rnn_size
self.hidden_size = self.rnn_size
dims_hyper = [
self.hidden_size + in_dim, hyperGNN_dim, middle_dim, node_dim
]
self.GCN1_tg = gcn(dims_hyper, gcn_depth, dropout, *list_weight,
'hyper')
self.GCN2_tg = gcn(dims_hyper, gcn_depth, dropout, *list_weight,
'hyper')
self.GCN1_tg_de = gcn(dims_hyper, gcn_depth, dropout, *list_weight,
'hyper')
self.GCN2_tg_de = gcn(dims_hyper, gcn_depth, dropout, *list_weight,
'hyper')
self.GCN1_tg_1 = gcn(dims_hyper, gcn_depth, dropout, *list_weight,
'hyper')
self.GCN2_tg_1 = gcn(dims_hyper, gcn_depth, dropout, *list_weight,
'hyper')
self.GCN1_tg_de_1 = gcn(dims_hyper, gcn_depth, dropout, *list_weight,
'hyper')
self.GCN2_tg_de_1 = gcn(dims_hyper, gcn_depth, dropout, *list_weight,
'hyper')
self.fc_final = nn.Linear(self.hidden_size, self.output_dim)
self.alpha = tanhalpha
self.device = device
self.k = subgraph_size
dims = [in_dim + self.hidden_size, self.hidden_size]
self.gz1 = gcn(dims, gcn_depth, dropout, *list_weight, 'RNN')
self.gz2 = gcn(dims, gcn_depth, dropout, *list_weight, 'RNN')
self.gr1 = gcn(dims, gcn_depth, dropout, *list_weight, 'RNN')
self.gr2 = gcn(dims, gcn_depth, dropout, *list_weight, 'RNN')
self.gc1 = gcn(dims, gcn_depth, dropout, *list_weight, 'RNN')
self.gc2 = gcn(dims, gcn_depth, dropout, *list_weight, 'RNN')
self.gz1_de = gcn(dims, gcn_depth, dropout, *list_weight, 'RNN')
self.gz2_de = gcn(dims, gcn_depth, dropout, *list_weight, 'RNN')
self.gr1_de = gcn(dims, gcn_depth, dropout, *list_weight, 'RNN')
self.gr2_de = gcn(dims, gcn_depth, dropout, *list_weight, 'RNN')
self.gc1_de = gcn(dims, gcn_depth, dropout, *list_weight, 'RNN')
self.gc2_de = gcn(dims, gcn_depth, dropout, *list_weight, 'RNN')
self.use_curriculum_learning = True
self.cl_decay_steps = cl_decay_steps
self.gcn_depth = gcn_depth
def preprocessing(self, adj, predefined_A):
adj = adj + torch.eye(self.num_nodes).to(self.device)
adj = adj / torch.unsqueeze(adj.sum(-1), -1)
return [adj, predefined_A]
def step(self,
input,
Hidden_State,
Cell_State,
predefined_A,
type='encoder',
idx=None,
i=None):
x = input
x = x.transpose(1, 2).contiguous()
nodevec1 = self.emb1(self.idx)
nodevec2 = self.emb2(self.idx)
hyper_input = torch.cat(
(x, Hidden_State.view(-1, self.num_nodes, self.hidden_size)), 2)
if type == 'encoder':
filter1 = self.GCN1_tg(hyper_input,
predefined_A[0]) + self.GCN1_tg_1(
hyper_input, predefined_A[1])
filter2 = self.GCN2_tg(hyper_input,
predefined_A[0]) + self.GCN2_tg_1(
hyper_input, predefined_A[1])
if type == 'decoder':
filter1 = self.GCN1_tg_de(hyper_input,
predefined_A[0]) + self.GCN1_tg_de_1(
hyper_input, predefined_A[1])
filter2 = self.GCN2_tg_de(hyper_input,
predefined_A[0]) + self.GCN2_tg_de_1(
hyper_input, predefined_A[1])
nodevec1 = torch.tanh(self.alpha * torch.mul(nodevec1, filter1))
nodevec2 = torch.tanh(self.alpha * torch.mul(nodevec2, filter2))
a = torch.matmul(nodevec1, nodevec2.transpose(2, 1)) - torch.matmul(
nodevec2, nodevec1.transpose(2, 1))
adj = F.relu(torch.tanh(self.alpha * a))
adp = self.preprocessing(adj, predefined_A[0])
adpT = self.preprocessing(adj.transpose(1, 2), predefined_A[1])
Hidden_State = Hidden_State.view(-1, self.num_nodes, self.hidden_size)
Cell_State = Cell_State.view(-1, self.num_nodes, self.hidden_size)
combined = torch.cat((x, Hidden_State), -1)
if type == 'encoder':
z = F.sigmoid(self.gz1(combined, adp) + self.gz2(combined, adpT))
r = F.sigmoid(self.gr1(combined, adp) + self.gr2(combined, adpT))
temp = torch.cat((x, torch.mul(r, Hidden_State)), -1)
Cell_State = F.tanh(self.gc1(temp, adp) + self.gc2(temp, adpT))
elif type == 'decoder':
z = F.sigmoid(
self.gz1_de(combined, adp) + self.gz2_de(combined, adpT))
r = F.sigmoid(
self.gr1_de(combined, adp) + self.gr2_de(combined, adpT))
temp = torch.cat((x, torch.mul(r, Hidden_State)), -1)
Cell_State = F.tanh(
self.gc1_de(temp, adp) + self.gc2_de(temp, adpT))
Hidden_State = torch.mul(z, Hidden_State) + torch.mul(
1 - z, Cell_State)
return Hidden_State.view(-1, self.hidden_size), Cell_State.view(
-1, self.hidden_size)
def forward(self,
input,
idx=None,
ycl=None,
batches_seen=None,
task_level=12):
predefined_A = self.predefined_A
x = input
batch_size = x.size(0)
Hidden_State, Cell_State = self.initHidden(batch_size * self.num_nodes,
self.hidden_size)
outputs = None
for i in range(self.seq_length):
Hidden_State, Cell_State = self.step(torch.squeeze(x[..., i]),
Hidden_State, Cell_State,
predefined_A, 'encoder', idx,
i)
if outputs is None:
outputs = Hidden_State.unsqueeze(1)
else:
outputs = torch.cat((outputs, Hidden_State.unsqueeze(1)), 1)
go_symbol = torch.zeros((batch_size, self.output_dim, self.num_nodes),
device=self.device)
timeofday = ycl[:, 1:, :, :]
decoder_input = go_symbol
outputs_final = []
for i in range(task_level):
try:
decoder_input = torch.cat([decoder_input, timeofday[..., i]],
dim=1)
except:
print(decoder_input.shape, timeofday.shape)
sys.exit(0)
Hidden_State, Cell_State = self.step(decoder_input, Hidden_State,
Cell_State, predefined_A,
'decoder', idx, None)
decoder_output = self.fc_final(Hidden_State)
decoder_input = decoder_output.view(batch_size, self.num_nodes,
self.output_dim).transpose(
1, 2)
outputs_final.append(decoder_output)
if self.training and self.use_curriculum_learning:
c = np.random.uniform(0, 1)
if c < self._compute_sampling_threshold(batches_seen):
decoder_input = ycl[:, :1, :, i]
outputs_final = torch.stack(outputs_final, dim=1)
outputs_final = outputs_final.view(batch_size, self.num_nodes,
task_level,
self.output_dim).transpose(1, 2)
return outputs_final
def initHidden(self, batch_size, hidden_size):
use_gpu = torch.cuda.is_available()
if use_gpu:
Hidden_State = Variable(
torch.zeros(batch_size, hidden_size).to(self.device))
Cell_State = Variable(
torch.zeros(batch_size, hidden_size).to(self.device))
nn.init.orthogonal(Hidden_State)
nn.init.orthogonal(Cell_State)
return Hidden_State, Cell_State
else:
Hidden_State = Variable(torch.zeros(batch_size, hidden_size))
Cell_State = Variable(torch.zeros(batch_size, hidden_size))
return Hidden_State, Cell_State
def _compute_sampling_threshold(self, batches_seen):
return self.cl_decay_steps / (
self.cl_decay_steps + np.exp(batches_seen / self.cl_decay_steps))
| 10,196 | 36.215328 | 79 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/DGCRN/util.py | import pickle
import numpy as np
import os
import scipy.sparse as sp
import torch
from scipy.sparse import linalg
from torch.autograd import Variable
def normal_std(x):
return x.std() * np.sqrt((len(x) - 1.) / (len(x)))
class DataLoaderS(object):
def __init__(self,
file_name,
train,
valid,
device,
horizon,
window,
normalize=2):
self.P = window
self.h = horizon
fin = open(file_name)
self.rawdat = np.loadtxt(fin, delimiter=',')
self.dat = np.zeros(self.rawdat.shape)
self.n, self.m = self.dat.shape
self.normalize = 2
self.scale = np.ones(self.m)
self._normalized(normalize)
self._split(int(train * self.n), int((train + valid) * self.n), self.n)
self.scale = torch.from_numpy(self.scale).float()
tmp = self.test[1] * self.scale.expand(self.test[1].size(0), self.m)
self.scale = self.scale.to(device)
self.scale = Variable(self.scale)
self.rse = normal_std(tmp)
self.rae = torch.mean(torch.abs(tmp - torch.mean(tmp)))
self.device = device
def _normalized(self, normalize):
if (normalize == 0):
self.dat = self.rawdat
if (normalize == 1):
self.dat = self.rawdat / np.max(self.rawdat)
if (normalize == 2):
for i in range(self.m):
self.scale[i] = np.max(np.abs(self.rawdat[:, i]))
self.dat[:, i] = self.rawdat[:, i] / np.max(
np.abs(self.rawdat[:, i]))
def _split(self, train, valid, test):
train_set = range(self.P + self.h - 1, train)
valid_set = range(train, valid)
test_set = range(valid, self.n)
self.train = self._batchify(train_set, self.h)
self.valid = self._batchify(valid_set, self.h)
self.test = self._batchify(test_set, self.h)
def _batchify(self, idx_set, horizon):
n = len(idx_set)
X = torch.zeros((n, self.P, self.m))
Y = torch.zeros((n, self.m))
for i in range(n):
end = idx_set[i] - self.h + 1
start = end - self.P
X[i, :, :] = torch.from_numpy(self.dat[start:end, :])
Y[i, :] = torch.from_numpy(self.dat[idx_set[i], :])
return [X, Y]
def get_batches(self, inputs, targets, batch_size, shuffle=True):
length = len(inputs)
if shuffle:
index = torch.randperm(length)
else:
index = torch.LongTensor(range(length))
start_idx = 0
while (start_idx < length):
end_idx = min(length, start_idx + batch_size)
excerpt = index[start_idx:end_idx]
X = inputs[excerpt]
Y = targets[excerpt]
X = X.to(self.device)
Y = Y.to(self.device)
yield Variable(X), Variable(Y)
start_idx += batch_size
class DataLoaderM(object):
def __init__(self, xs, ys, batch_size, pad_with_last_sample=True):
"""
:param xs:
:param ys:
:param batch_size:
:param pad_with_last_sample: pad with the last sample to make number of samples divisible to batch_size.
"""
self.batch_size = batch_size
self.current_ind = 0
if pad_with_last_sample:
num_padding = (batch_size - (len(xs) % batch_size)) % batch_size
x_padding = np.repeat(xs[-1:], num_padding, axis=0)
y_padding = np.repeat(ys[-1:], num_padding, axis=0)
xs = np.concatenate([xs, x_padding], axis=0)
ys = np.concatenate([ys, y_padding], axis=0)
self.size = len(xs)
self.num_batch = int(self.size // self.batch_size)
self.xs = xs
self.ys = ys
def shuffle(self):
permutation = np.random.permutation(self.size)
xs, ys = self.xs[permutation], self.ys[permutation]
self.xs = xs
self.ys = ys
def get_iterator(self):
self.current_ind = 0
def _wrapper():
while self.current_ind < self.num_batch:
start_ind = self.batch_size * self.current_ind
end_ind = min(self.size,
self.batch_size * (self.current_ind + 1))
x_i = self.xs[start_ind:end_ind, ...]
y_i = self.ys[start_ind:end_ind, ...]
yield (x_i, y_i)
self.current_ind += 1
return _wrapper()
class DataLoaderM_new(object):
def __init__(self, xs, ys, ycl, batch_size, pad_with_last_sample=True):
"""
:param xs:
:param ys:
:param batch_size:
:param pad_with_last_sample: pad with the last sample to make number of samples divisible to batch_size.
"""
self.batch_size = batch_size
self.current_ind = 0
if pad_with_last_sample:
num_padding = (batch_size - (len(xs) % batch_size)) % batch_size
x_padding = np.repeat(xs[-1:], num_padding, axis=0)
y_padding = np.repeat(ys[-1:], num_padding, axis=0)
xs = np.concatenate([xs, x_padding], axis=0)
ys = np.concatenate([ys, y_padding], axis=0)
ycl = np.concatenate([ycl, y_padding], axis=0)
self.size = len(xs)
self.num_batch = int(self.size // self.batch_size)
self.xs = xs
self.ys = ys
self.ycl = ycl
def shuffle(self):
permutation = np.random.permutation(self.size)
xs, ys, ycl = self.xs[permutation], self.ys[permutation], self.ycl[
permutation]
self.xs = xs
self.ys = ys
self.ycl = ycl
def get_iterator(self):
self.current_ind = 0
def _wrapper():
while self.current_ind < self.num_batch:
start_ind = self.batch_size * self.current_ind
end_ind = min(self.size,
self.batch_size * (self.current_ind + 1))
x_i = self.xs[start_ind:end_ind, ...]
y_i = self.ys[start_ind:end_ind, ...]
y_i_cl = self.ycl[start_ind:end_ind, ...]
yield (x_i, y_i, y_i_cl)
self.current_ind += 1
return _wrapper()
class StandardScaler():
"""
Standard the input
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def transform(self, data):
return (data - self.mean) / self.std
def inverse_transform(self, data):
return (data * self.std) + self.mean
def sym_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).astype(
np.float32).todense()
def asym_adj(adj):
"""Asymmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1)).flatten()
d_inv = np.power(rowsum, -1).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat = sp.diags(d_inv)
return d_mat.dot(adj).astype(np.float32).todense()
def calculate_normalized_laplacian(adj):
"""
:param adj:
:return:
"""
adj = sp.coo_matrix(adj)
d = np.array(adj.sum(1))
d_inv_sqrt = np.power(d, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
normalized_laplacian = sp.eye(adj.shape[0]) - adj.dot(
d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
return normalized_laplacian
def calculate_scaled_laplacian(adj_mx, lambda_max=2, undirected=True):
if undirected:
adj_mx = np.maximum.reduce([adj_mx, adj_mx.T])
L = calculate_normalized_laplacian(adj_mx)
if lambda_max is None:
lambda_max, _ = linalg.eigsh(L, 1, which='LM')
lambda_max = lambda_max[0]
L = sp.csr_matrix(L)
M, _ = L.shape
I = sp.identity(M, format='csr', dtype=L.dtype)
L = (2 / lambda_max * L) - I
return L.astype(np.float32).todense()
def load_pickle(pickle_file):
try:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
except UnicodeDecodeError as e:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f, encoding='latin1')
except Exception as e:
print('Unable to load data ', pickle_file, ':', e)
raise
return pickle_data
def load_adj(pkl_filename):
sensor_ids, sensor_id_to_ind, adj_mx = load_pickle(pkl_filename)
return [asym_adj(adj_mx), asym_adj(np.transpose(adj_mx))]
def load_dataset(dataset_dir,
batch_size,
valid_batch_size=None,
test_batch_size=None):
data = {}
for category in ['train', 'val', 'test']:
cat_data = np.load(os.path.join(dataset_dir, category + '.npz'))
data['x_' + category] = cat_data['x']
data['y_' + category] = cat_data['y']
scaler = StandardScaler(mean=data['x_train'][..., 0].mean(),
std=data['x_train'][..., 0].std())
for category in ['train', 'val', 'test']:
data['x_' + category][...,
0] = scaler.transform(data['x_' + category][...,
0])
import copy
data['y_train_cl'] = copy.deepcopy(data['y_train'])
data['y_train_cl'][..., 0] = scaler.transform(data['y_train'][..., 0])
data['train_loader'] = DataLoaderM_new(data['x_train'], data['y_train'],
data['y_train_cl'], batch_size)
data['val_loader'] = DataLoaderM(data['x_val'], data['y_val'],
valid_batch_size)
data['test_loader'] = DataLoaderM(data['x_test'], data['y_test'],
test_batch_size)
data['scaler'] = scaler
return data
def masked_mse(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels != null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = (preds - labels)**2
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_rmse(preds, labels, null_val=np.nan):
return torch.sqrt(masked_mse(preds=preds, labels=labels,
null_val=null_val))
def masked_mae(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels != null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds - labels)
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_mape(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels != null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds - labels) / labels
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def metric(pred, real):
mae = masked_mae(pred, real, 0.0).item()
mape = masked_mape(pred, real, 0.0).item()
rmse = masked_rmse(pred, real, 0.0).item()
return mae, mape, rmse
def load_node_feature(path):
fi = open(path)
x = []
for li in fi:
li = li.strip()
li = li.split(",")
e = [float(t) for t in li[1:]]
x.append(e)
x = np.array(x)
mean = np.mean(x, axis=0)
std = np.std(x, axis=0)
z = torch.tensor((x - mean) / std, dtype=torch.float)
return z
def normal_std(x):
return x.std() * np.sqrt((len(x) - 1.) / (len(x)))
| 12,210 | 31.562667 | 112 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/DGCRN/train.py | import torch
import numpy as np
import argparse
import time
from util import *
from trainer import Trainer
from net import DGCRN
import setproctitle
import os
setproctitle.setproctitle("DGCRN@lifuxian")
def str_to_bool(value):
if isinstance(value, bool):
return value
if value.lower() in {'false', 'f', '0', 'no', 'n'}:
return False
elif value.lower() in {'true', 't', '1', 'yes', 'y'}:
return True
raise ValueError(f'{value} is not a valid boolean value')
parser = argparse.ArgumentParser()
parser.add_argument('--runs', type=int, default=10, help='number of runs')
parser.add_argument('--LOAD_INITIAL',
default=False,
type=str_to_bool,
help='If LOAD_INITIAL.')
parser.add_argument('--TEST_ONLY',
default=False,
type=str_to_bool,
help='If TEST_ONLY.')
parser.add_argument('--tolerance',
type=int,
default=100,
help='tolerance for earlystopping')
parser.add_argument('--OUTPUT_PREDICTION',
default=False,
type=str_to_bool,
help='If OUTPUT_PREDICTION.')
parser.add_argument('--cl_decay_steps',
default=2000,
type=float,
help='cl_decay_steps.')
parser.add_argument('--new_training_method',
default=False,
type=str_to_bool,
help='new_training_method.')
parser.add_argument('--rnn_size', default=64, type=int, help='rnn_size.')
parser.add_argument('--hyperGNN_dim',
default=16,
type=int,
help='hyperGNN_dim.')
parser.add_argument('--device', type=str, default='cuda:1', help='')
parser.add_argument('--data',
type=str,
default='data/METR-LA',
help='data path')
parser.add_argument('--adj_data',
type=str,
default='data/sensor_graph/adj_mx.pkl',
help='adj data path')
parser.add_argument('--propalpha', type=float, default=0.05, help='prop alpha')
parser.add_argument('--cl',
type=str_to_bool,
default=True,
help='whether to do curriculum learning')
parser.add_argument('--gcn_depth',
type=int,
default=2,
help='graph convolution depth')
parser.add_argument('--num_nodes',
type=int,
default=207,
help='number of nodes/variables')
parser.add_argument('--dropout', type=float, default=0.3, help='dropout rate')
parser.add_argument('--subgraph_size', type=int, default=20, help='k')
parser.add_argument('--node_dim', type=int, default=40, help='dim of nodes')
parser.add_argument('--in_dim', type=int, default=2, help='inputs dimension')
parser.add_argument('--seq_in_len',
type=int,
default=12,
help='input sequence length')
parser.add_argument('--seq_out_len',
type=int,
default=12,
help='output sequence length')
parser.add_argument('--layers', type=int, default=3, help='number of layers')
parser.add_argument('--batch_size', type=int, default=64, help='batch size')
parser.add_argument('--tanhalpha', type=float, default=3, help='adj alpha')
parser.add_argument('--learning_rate',
type=float,
default=0.001,
help='learning rate')
parser.add_argument('--weight_decay',
type=float,
default=0.0001,
help='weight decay rate')
parser.add_argument('--clip', type=int, default=5, help='clip')
parser.add_argument('--step_size1', type=int, default=2500, help='step_size')
parser.add_argument('--epochs', type=int, default=100, help='')
parser.add_argument('--print_every', type=int, default=50, help='')
parser.add_argument('--save', type=str, default='./save/', help='save path')
parser.add_argument('--expid', type=str, default='1', help='experiment id')
args = parser.parse_args()
torch.set_num_threads(3)
os.makedirs(args.save, exist_ok=True)
rnn_size = args.rnn_size
device = torch.device(args.device)
dataloader = load_dataset(args.data, args.batch_size, args.batch_size,
args.batch_size)
scaler = dataloader['scaler']
predefined_A = load_adj(args.adj_data)
predefined_A = [torch.tensor(adj).to(device) for adj in predefined_A]
def main(runid):
model = DGCRN(args.gcn_depth,
args.num_nodes,
device,
predefined_A=predefined_A,
dropout=args.dropout,
subgraph_size=args.subgraph_size,
node_dim=args.node_dim,
middle_dim=2,
seq_length=args.seq_in_len,
in_dim=args.in_dim,
out_dim=args.seq_out_len,
layers=args.layers,
list_weight=[0.05, 0.95, 0.95],
tanhalpha=args.tanhalpha,
cl_decay_steps=args.cl_decay_steps,
rnn_size=rnn_size,
hyperGNN_dim=args.hyperGNN_dim)
print(args)
nParams = sum([p.nelement() for p in model.parameters()])
print('Number of model parameters is', nParams)
engine = Trainer(model, args.learning_rate, args.weight_decay, args.clip,
args.step_size1, args.seq_out_len, scaler, device,
args.cl, args.new_training_method)
if args.LOAD_INITIAL:
engine.model.load_state_dict(
torch.load(args.save + "exp" + str(args.expid) + "_" + str(runid) +
".pth",
map_location='cpu'))
print('model load success!')
if args.TEST_ONLY:
outputs = []
realy = torch.Tensor(dataloader['y_test']).to(device)
realy = realy.transpose(1, 3)[:, 0, :, :]
for iter, (x,
y) in enumerate(dataloader['test_loader'].get_iterator()):
testx = torch.Tensor(x).to(device)
testx = testx.transpose(1, 3)
testy = torch.Tensor(y).to(device)
testy = testy.transpose(1, 3)
with torch.no_grad():
engine.model.eval()
preds = engine.model(testx, ycl=testy)
preds = preds.transpose(1, 3)
outputs.append(preds.squeeze(dim=1))
yhat = torch.cat(outputs, dim=0)
yhat = yhat[:realy.size(0), ...]
if args.OUTPUT_PREDICTION:
pred_all = scaler.inverse_transform(yhat).cpu()
path_savepred = args.save + 'result_pred/' + "exp" + str(
args.expid) + "_" + str(runid)
os.makedirs(args.save + 'result_pred/', exist_ok=True)
np.save(path_savepred, pred_all)
print('result of prediction has been saved, path: ' + os.getcwd() +
path_savepred[1:] + '.npy' + ", shape: " +
str(pred_all.shape))
mae = []
mape = []
rmse = []
for i in [2, 5, 8, 11]:
pred = scaler.inverse_transform(yhat[:, :, i])
real = realy[:, :, i]
metrics = metric(pred, real)
log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
print(log.format(i + 1, metrics[0], metrics[1], metrics[2]))
mae.append(metrics[0])
mape.append(metrics[1])
rmse.append(metrics[2])
return mae, mape, rmse, mae, mape, rmse
else:
print("start training...", flush=True)
his_loss = []
val_time = []
train_time = []
minl = 1e5
epoch_best = -1
tolerance = args.tolerance
count_lfx = 0
batches_seen = 0
for i in range(1, args.epochs + 1):
train_loss = []
train_mape = []
train_rmse = []
t1 = time.time()
dataloader['train_loader'].shuffle()
for iter, (x, y, ycl) in enumerate(
dataloader['train_loader'].get_iterator()):
batches_seen += 1
trainx = torch.Tensor(x).to(device)
trainx = trainx.transpose(1, 3)
trainy = torch.Tensor(y).to(device)
trainy = trainy.transpose(1, 3)
trainycl = torch.Tensor(ycl).to(device)
trainycl = trainycl.transpose(1, 3)
metrics = engine.train(trainx,
trainy[:, 0, :, :],
trainycl,
idx=None,
batches_seen=batches_seen)
train_loss.append(metrics[0])
train_mape.append(metrics[1])
train_rmse.append(metrics[2])
t2 = time.time()
train_time.append(t2 - t1)
valid_loss = []
valid_mape = []
valid_rmse = []
s1 = time.time()
for iter, (x, y) in enumerate(
dataloader['val_loader'].get_iterator()):
testx = torch.Tensor(x).to(device)
testx = testx.transpose(1, 3)
testy = torch.Tensor(y).to(device)
testy = testy.transpose(1, 3)
metrics = engine.eval(testx, testy[:, 0, :, :], testy)
valid_loss.append(metrics[0])
valid_mape.append(metrics[1])
valid_rmse.append(metrics[2])
s2 = time.time()
val_time.append(s2 - s1)
mtrain_loss = np.mean(train_loss)
mtrain_mape = np.mean(train_mape)
mtrain_rmse = np.mean(train_rmse)
mvalid_loss = np.mean(valid_loss)
mvalid_mape = np.mean(valid_mape)
mvalid_rmse = np.mean(valid_rmse)
his_loss.append(mvalid_loss)
if (i - 1) % args.print_every == 0:
log = 'Epoch: {:03d}, Inference Time: {:.4f} secs'
print(log.format(i, (s2 - s1)))
log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch'
print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse,
mvalid_loss, mvalid_mape, mvalid_rmse,
(t2 - t1)),
flush=True)
if mvalid_loss < minl:
torch.save(
engine.model.state_dict(), args.save + "exp" +
str(args.expid) + "_" + str(runid) + ".pth")
minl = mvalid_loss
epoch_best = i
count_lfx = 0
else:
count_lfx += 1
if count_lfx > tolerance:
break
print("Average Training Time: {:.4f} secs/epoch".format(
np.mean(train_time)))
print("Average Inference Time: {:.4f} secs".format(np.mean(val_time)))
bestid = np.argmin(his_loss)
engine.model.load_state_dict(
torch.load(args.save + "exp" + str(args.expid) + "_" + str(runid) +
".pth",
map_location='cpu'))
print("Training finished")
print("The valid loss on best model is {}, epoch:{}".format(
str(round(his_loss[bestid], 4)), epoch_best))
outputs = []
realy = torch.Tensor(dataloader['y_val']).to(device)
realy = realy.transpose(1, 3)[:, 0, :, :]
for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()):
testx = torch.Tensor(x).to(device)
testx = testx.transpose(1, 3)
testy = torch.Tensor(y).to(device)
testy = testy.transpose(1, 3)
with torch.no_grad():
preds = engine.model(testx, ycl=testy)
preds = preds.transpose(1, 3)
outputs.append(preds.squeeze(dim=1))
yhat = torch.cat(outputs, dim=0)
yhat = yhat[:realy.size(0), ...]
pred = scaler.inverse_transform(yhat)
vmae, vmape, vrmse = metric(pred, realy)
outputs = []
realy = torch.Tensor(dataloader['y_test']).to(device)
realy = realy.transpose(1, 3)[:, 0, :, :]
for iter, (x,
y) in enumerate(dataloader['test_loader'].get_iterator()):
testx = torch.Tensor(x).to(device)
testx = testx.transpose(1, 3)
testy = torch.Tensor(y).to(device)
testy = testy.transpose(1, 3)
with torch.no_grad():
preds = engine.model(testx, ycl=testy)
preds = preds.transpose(1, 3)
outputs.append(preds.squeeze(dim=1))
yhat = torch.cat(outputs, dim=0)
yhat = yhat[:realy.size(0), ...]
mae = []
mape = []
rmse = []
for i in [2, 5, 8, 11]:
pred = scaler.inverse_transform(yhat[:, :, i])
real = realy[:, :, i]
metrics = metric(pred, real)
log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
print(log.format(i + 1, metrics[0], metrics[1], metrics[2]))
mae.append(metrics[0])
mape.append(metrics[1])
rmse.append(metrics[2])
return vmae, vmape, vrmse, mae, mape, rmse
if __name__ == "__main__":
vmae = []
vmape = []
vrmse = []
mae = []
mape = []
rmse = []
for i in range(args.runs):
if args.TEST_ONLY:
vm1, vm2, vm3, m1, m2, m3 = main(i)
else:
vm1, vm2, vm3, m1, m2, m3 = main(i)
vmae.append(vm1)
vmape.append(vm2)
vrmse.append(vm3)
mae.append(m1)
mape.append(m2)
rmse.append(m3)
mae = np.array(mae)
mape = np.array(mape)
rmse = np.array(rmse)
amae = np.mean(mae, 0)
amape = np.mean(mape, 0)
armse = np.mean(rmse, 0)
smae = np.std(mae, 0)
smape = np.std(mape, 0)
srmse = np.std(rmse, 0)
print('\n\nResults for ' + str(args.runs) + ' runs\n\n')
print('valid\tMAE\tRMSE\tMAPE')
log = 'mean:\t{:.4f}\t{:.4f}\t{:.4f}'
print(log.format(np.mean(vmae), np.mean(vrmse), np.mean(vmape)))
log = 'std:\t{:.4f}\t{:.4f}\t{:.4f}'
print(log.format(np.std(vmae), np.std(vrmse), np.std(vmape)))
print('\n\n')
print(
'test|horizon\tMAE-mean\tRMSE-mean\tMAPE-mean\tMAE-std\tRMSE-std\tMAPE-std'
)
for i in range(4):
log = '{:d}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}'
print(
log.format([3, 6, 9, 12][i], amae[i], armse[i], amape[i], smae[i],
srmse[i], smape[i]))
| 15,124 | 35.184211 | 186 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/DGCRN/trainer.py | import torch.optim as optim
import math
from net import *
import util
class Trainer():
def __init__(self,
model,
lrate,
wdecay,
clip,
step_size,
seq_out_len,
scaler,
device,
cl=True,
new_training_method=False):
self.scaler = scaler
self.model = model
self.model.to(device)
self.optimizer = optim.Adam(self.model.parameters(),
lr=lrate,
weight_decay=wdecay)
self.loss = util.masked_mae
self.clip = clip
self.step = step_size
self.iter = 0
self.task_level = 1
self.seq_out_len = seq_out_len
self.cl = cl
self.new_training_method = new_training_method
def train(self, input, real_val, ycl, idx=None, batches_seen=None):
self.iter += 1
if self.iter % self.step == 0 and self.task_level < self.seq_out_len:
self.task_level += 1
if self.new_training_method:
self.iter = 0
self.model.train()
self.optimizer.zero_grad()
if self.cl:
output = self.model(input,
idx=idx,
ycl=ycl,
batches_seen=self.iter,
task_level=self.task_level)
else:
output = self.model(input,
idx=idx,
ycl=ycl,
batches_seen=self.iter,
task_level=self.seq_out_len)
output = output.transpose(1, 3)
real = torch.unsqueeze(real_val, dim=1)
predict = self.scaler.inverse_transform(output)
if self.cl:
loss = self.loss(predict[:, :, :, :self.task_level],
real[:, :, :, :self.task_level], 0.0)
mape = util.masked_mape(predict[:, :, :, :self.task_level],
real[:, :, :, :self.task_level],
0.0).item()
rmse = util.masked_rmse(predict[:, :, :, :self.task_level],
real[:, :, :, :self.task_level],
0.0).item()
else:
loss = self.loss(predict, real, 0.0)
mape = util.masked_mape(predict, real, 0.0).item()
rmse = util.masked_rmse(predict, real, 0.0).item()
loss.backward()
if self.clip is not None:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)
self.optimizer.step()
return loss.item(), mape, rmse
def eval(self, input, real_val, ycl):
self.model.eval()
with torch.no_grad():
output = self.model(input, ycl=ycl)
output = output.transpose(1, 3)
real = torch.unsqueeze(real_val, dim=1)
predict = self.scaler.inverse_transform(output)
loss = self.loss(predict, real, 0.0)
mape = util.masked_mape(predict, real, 0.0).item()
rmse = util.masked_rmse(predict, real, 0.0).item()
return loss.item(), mape, rmse | 3,313 | 33.520833 | 78 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/AGCRN/model/AGCRN.py | import torch
import torch.nn as nn
from model.AGCRNCell import AGCRNCell
class AVWDCRNN(nn.Module):
def __init__(self, node_num, dim_in, dim_out, cheb_k, embed_dim, num_layers=1):
super(AVWDCRNN, self).__init__()
assert num_layers >= 1, 'At least one DCRNN layer in the Encoder.'
self.node_num = node_num
self.input_dim = dim_in
self.num_layers = num_layers
self.dcrnn_cells = nn.ModuleList()
self.dcrnn_cells.append(AGCRNCell(node_num, dim_in, dim_out, cheb_k, embed_dim))
for _ in range(1, num_layers):
self.dcrnn_cells.append(AGCRNCell(node_num, dim_out, dim_out, cheb_k, embed_dim))
def forward(self, x, init_state, node_embeddings):
#shape of x: (B, T, N, D)
#shape of init_state: (num_layers, B, N, hidden_dim)
assert x.shape[2] == self.node_num and x.shape[3] == self.input_dim
seq_length = x.shape[1]
current_inputs = x
output_hidden = []
for i in range(self.num_layers):
state = init_state[i]
inner_states = []
for t in range(seq_length):
state = self.dcrnn_cells[i](current_inputs[:, t, :, :], state, node_embeddings)
inner_states.append(state)
output_hidden.append(state)
current_inputs = torch.stack(inner_states, dim=1)
#current_inputs: the outputs of last layer: (B, T, N, hidden_dim)
#output_hidden: the last state for each layer: (num_layers, B, N, hidden_dim)
#last_state: (B, N, hidden_dim)
return current_inputs, output_hidden
def init_hidden(self, batch_size):
init_states = []
for i in range(self.num_layers):
init_states.append(self.dcrnn_cells[i].init_hidden_state(batch_size))
return torch.stack(init_states, dim=0) #(num_layers, B, N, hidden_dim)
class AGCRN(nn.Module):
def __init__(self, args):
super(AGCRN, self).__init__()
self.num_node = args.num_nodes
self.input_dim = args.input_dim
self.hidden_dim = args.rnn_units
self.output_dim = args.output_dim
self.horizon = args.horizon
self.num_layers = args.num_layers
self.default_graph = args.default_graph
self.node_embeddings = nn.Parameter(torch.randn(self.num_node, args.embed_dim), requires_grad=True)
self.encoder = AVWDCRNN(args.num_nodes, args.input_dim, args.rnn_units, args.cheb_k,
args.embed_dim, args.num_layers)
#predictor
self.end_conv = nn.Conv2d(1, args.horizon * self.output_dim, kernel_size=(1, self.hidden_dim), bias=True)
def forward(self, source, targets, teacher_forcing_ratio=0.5):
#source: B, T_1, N, D
#target: B, T_2, N, D
#supports = F.softmax(F.relu(torch.mm(self.nodevec1, self.nodevec1.transpose(0,1))), dim=1)
init_state = self.encoder.init_hidden(source.shape[0])
output, _ = self.encoder(source, init_state, self.node_embeddings) #B, T, N, hidden
output = output[:, -1:, :, :] #B, 1, N, hidden
#CNN based predictor
output = self.end_conv((output)) #B, T*C, N, 1
output = output.squeeze(-1).reshape(-1, self.horizon, self.output_dim, self.num_node)
output = output.permute(0, 1, 3, 2) #B, T, N, C
return output | 3,454 | 44.460526 | 113 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/AGCRN/model/Run_PEMS-BAY.py | import os
import sys
file_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(file_dir)
sys.path.append(file_dir)
import torch
import numpy as np
import torch.nn as nn
import argparse
import configparser
from datetime import datetime
from model.AGCRN import AGCRN as Network
from model.BasicTrainer import Trainer
from lib.TrainInits import init_seed
from lib.dataloader import get_dataloader
from lib.TrainInits import print_model_parameters
Mode = 'train'
DEBUG = 'False'
DATASET = 'PEMS-BAY'
MODEL = 'AGCRN'
config_file = './{}_{}.conf'.format(DATASET, MODEL)
config = configparser.ConfigParser()
config.read(config_file)
from lib.metrics import MAE_torch
def masked_mae_loss(scaler, mask_value):
def loss(preds, labels):
if scaler:
preds = scaler.inverse_transform(preds)
mae = MAE_torch(pred=preds, true=labels, mask_value=mask_value)
return mae
return loss
args = argparse.ArgumentParser(description='arguments')
args.add_argument('--dataset', default=DATASET, type=str)
args.add_argument('--mode', default=Mode, type=str)
args.add_argument('--debug', default=DEBUG, type=eval)
args.add_argument('--model', default=MODEL, type=str)
args.add_argument('--cuda', default=True, type=bool)
args.add_argument('--val_ratio',
default=config['data']['val_ratio'],
type=float)
args.add_argument('--test_ratio',
default=config['data']['test_ratio'],
type=float)
args.add_argument('--lag', default=config['data']['lag'], type=int)
args.add_argument('--horizon', default=config['data']['horizon'], type=int)
args.add_argument('--num_nodes', default=config['data']['num_nodes'], type=int)
args.add_argument('--tod', default=config['data']['tod'], type=eval)
args.add_argument('--normalizer',
default=config['data']['normalizer'],
type=str)
args.add_argument('--column_wise',
default=config['data']['column_wise'],
type=eval)
args.add_argument('--default_graph',
default=config['data']['default_graph'],
type=eval)
args.add_argument('--input_dim',
default=config['model']['input_dim'],
type=int)
args.add_argument('--output_dim',
default=config['model']['output_dim'],
type=int)
args.add_argument('--embed_dim',
default=config['model']['embed_dim'],
type=int)
args.add_argument('--rnn_units',
default=config['model']['rnn_units'],
type=int)
args.add_argument('--num_layers',
default=config['model']['num_layers'],
type=int)
args.add_argument('--cheb_k', default=config['model']['cheb_order'], type=int)
args.add_argument('--loss_func',
default=config['train']['loss_func'],
type=str)
args.add_argument('--seed', default=config['train']['seed'], type=int)
args.add_argument('--batch_size',
default=config['train']['batch_size'],
type=int)
args.add_argument('--epochs', default=config['train']['epochs'], type=int)
args.add_argument('--lr_init', default=config['train']['lr_init'], type=float)
args.add_argument('--lr_decay', default=config['train']['lr_decay'], type=eval)
args.add_argument('--lr_decay_rate',
default=config['train']['lr_decay_rate'],
type=float)
args.add_argument('--lr_decay_step',
default=config['train']['lr_decay_step'],
type=str)
args.add_argument('--early_stop',
default=config['train']['early_stop'],
type=eval)
args.add_argument('--early_stop_patience',
default=config['train']['early_stop_patience'],
type=int)
args.add_argument('--grad_norm',
default=config['train']['grad_norm'],
type=eval)
args.add_argument('--max_grad_norm',
default=config['train']['max_grad_norm'],
type=int)
args.add_argument('--teacher_forcing', default=False, type=bool)
args.add_argument('--real_value',
default=config['train']['real_value'],
type=eval,
help='use real value for loss calculation')
args.add_argument('--mae_thresh',
default=config['test']['mae_thresh'],
type=eval)
args.add_argument('--mape_thresh',
default=config['test']['mape_thresh'],
type=float)
args.add_argument('--log_dir', default='./', type=str)
args.add_argument('--log_step', default=config['log']['log_step'], type=int)
args.add_argument('--plot', default=config['log']['plot'], type=eval)
args.add_argument('--dataset_dir',
type=str,
default='data/METR-LA',
help='data path')
args.add_argument('--device', type=str, default='cuda:1', help='')
args = args.parse_args()
init_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.set_device(int(args.device[5]))
else:
args.device = 'cpu'
model = Network(args)
model = model.to(args.device)
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
else:
nn.init.uniform_(p)
print_model_parameters(model, only_num=False)
train_loader, val_loader, test_loader, scaler = get_dataloader(
args,
normalizer=args.normalizer,
tod=args.tod,
dow=False,
weather=False,
single=False)
loss = masked_mae_loss(scaler, mask_value=0.0)
optimizer = torch.optim.Adam(params=model.parameters(),
lr=args.lr_init,
eps=1.0e-8,
weight_decay=0,
amsgrad=False)
lr_scheduler = None
if args.lr_decay:
print('Applying learning rate decay.')
lr_decay_steps = [int(i) for i in list(args.lr_decay_step.split(','))]
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer=optimizer,
milestones=lr_decay_steps,
gamma=args.lr_decay_rate)
current_time = datetime.now().strftime('%Y%m%d%H%M%S')
current_dir = os.path.dirname(os.path.realpath(__file__))
log_dir = os.path.join(current_dir, 'experiments', args.dataset, current_time)
args.log_dir = log_dir
trainer = Trainer(model,
loss,
optimizer,
train_loader,
val_loader,
test_loader,
scaler,
args,
lr_scheduler=lr_scheduler)
if args.mode == 'train':
trainer.train()
elif args.mode == 'test':
model.load_state_dict(
torch.load('../pre-trained/{}.pth'.format(args.dataset)))
print("Load saved model")
trainer.test(model, trainer.args, test_loader, scaler, trainer.logger)
else:
raise ValueError
| 6,953 | 32.921951 | 79 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/AGCRN/model/AGCRN_debug.py | import torch
import torch.nn as nn
from model.AGCRNCell import AGCRNCell
class AVWDCRNN(nn.Module):
def __init__(self, node_num, dim_in, dim_out, cheb_k, embed_dim, num_layers=1):
super(AVWDCRNN, self).__init__()
assert num_layers >= 1, 'At least one DCRNN layer in the Encoder.'
self.node_num = node_num
self.input_dim = dim_in
self.num_layers = num_layers
self.dcrnn_cells = nn.ModuleList()
self.dcrnn_cells.append(AGCRNCell(node_num, dim_in, dim_out, cheb_k, embed_dim))
for _ in range(1, num_layers):
self.dcrnn_cells.append(AGCRNCell(node_num, dim_out, dim_out, cheb_k, embed_dim))
def forward(self, x, init_state, node_embeddings):
#shape of x: (B, T, N, D)
#shape of init_state: (num_layers, B, N, hidden_dim)
assert x.shape[2] == self.node_num and x.shape[3] == self.input_dim
seq_length = x.shape[1]
current_inputs = x
output_hidden = []
for i in range(self.num_layers):
state = init_state[i]
inner_states = []
for t in range(seq_length):
state = self.dcrnn_cells[i](current_inputs[:, t, :, :], state, node_embeddings)
inner_states.append(state)
output_hidden.append(state)
current_inputs = torch.stack(inner_states, dim=1)
#current_inputs: the outputs of last layer: (B, T, N, hidden_dim)
#output_hidden: the last state for each layer: (num_layers, B, N, hidden_dim)
#last_state: (B, N, hidden_dim)
return current_inputs, output_hidden
def init_hidden(self, batch_size):
init_states = []
for i in range(self.num_layers):
init_states.append(self.dcrnn_cells[i].init_hidden_state(batch_size))
return torch.stack(init_states, dim=0) #(num_layers, B, N, hidden_dim)
import sys
class AGCRN(nn.Module):
def __init__(self, args):
super(AGCRN, self).__init__()
self.num_node = args.num_nodes
self.input_dim = args.input_dim
self.hidden_dim = args.rnn_units
self.output_dim = args.output_dim
self.horizon = args.horizon
self.num_layers = args.num_layers
self.default_graph = args.default_graph
self.node_embeddings = nn.Parameter(torch.randn(self.num_node, args.embed_dim), requires_grad=True)
self.encoder = AVWDCRNN(args.num_nodes, args.input_dim, args.rnn_units, args.cheb_k,
args.embed_dim, args.num_layers)
#predictor
self.end_conv = nn.Conv2d(1, args.horizon * self.output_dim, kernel_size=(1, self.hidden_dim), bias=True)
def forward(self, source, targets, teacher_forcing_ratio=0.5):
#source: B, T_1, N, D
#target: B, T_2, N, D
#supports = F.softmax(F.relu(torch.mm(self.nodevec1, self.nodevec1.transpose(0,1))), dim=1)
# source = source[:, :1, :, :]
# source = source.transpose(1, 3)
print(source.shape)
print('0: ', torch.sum(torch.isnan(source)))
init_state = self.encoder.init_hidden(source.shape[0])
print('1: ', torch.sum(torch.isnan(init_state)))
output, _ = self.encoder(source, init_state, self.node_embeddings) #B, T, N, hidden
print('2: ', torch.sum(torch.isnan(output)))
print('2 node embedding: ', torch.sum(torch.isnan(self.node_embeddings)))
output = output[:, -1:, :, :] #B, 1, N, hidden
print('3: ', torch.sum(torch.isnan(output)))
#CNN based predictor
output = self.end_conv((output)) #B, T*C, N, 1
print('4: ', torch.sum(torch.isnan(output)))
output = output.squeeze(-1).reshape(-1, self.horizon, self.output_dim, self.num_node)
output = output.permute(0, 1, 3, 2) #B, T, N, C
print('5: ', torch.sum(torch.isnan(output)))
sys.exit(0)
# torch.Size([64, 12, 207, 1])
# 0: tensor(0, device='cuda:0')
# 1: tensor(0)
# 2: tensor(10174464, device='cuda:0')
# 2 node embedding: tensor(0, device='cuda:0')
# 3: tensor(847872, device='cuda:0')
# 4: tensor(158976, device='cuda:0')
# 5: tensor(158976, device='cuda:0')
# torch.Size([64, 12, 207, 2])
# 0: tensor(0, device='cuda:1')
# 1: tensor(0)
# 2: tensor(0, device='cuda:1')
# 2
# node
# embedding: tensor(0, device='cuda:1')
# 3: tensor(0, device='cuda:1')
# 4: tensor(0, device='cuda:1')
# 5: tensor(0, device='cuda:1')
return output | 4,677 | 41.917431 | 113 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/AGCRN/model/AGCN.py | import torch
import torch.nn.functional as F
import torch.nn as nn
class AVWGCN(nn.Module):
def __init__(self, dim_in, dim_out, cheb_k, embed_dim):
super(AVWGCN, self).__init__()
self.cheb_k = cheb_k
self.weights_pool = nn.Parameter(torch.FloatTensor(embed_dim, cheb_k, dim_in, dim_out))
self.bias_pool = nn.Parameter(torch.FloatTensor(embed_dim, dim_out))
def forward(self, x, node_embeddings):
#x shaped[B, N, C], node_embeddings shaped [N, D] -> supports shaped [N, N]
#output shape [B, N, C]
node_num = node_embeddings.shape[0]
supports = F.softmax(F.relu(torch.mm(node_embeddings, node_embeddings.transpose(0, 1))), dim=1)
support_set = [torch.eye(node_num).to(supports.device), supports]
#default cheb_k = 3
for k in range(2, self.cheb_k):
support_set.append(torch.matmul(2 * supports, support_set[-1]) - support_set[-2])
supports = torch.stack(support_set, dim=0)
weights = torch.einsum('nd,dkio->nkio', node_embeddings, self.weights_pool) #N, cheb_k, dim_in, dim_out
bias = torch.matmul(node_embeddings, self.bias_pool) #N, dim_out
x_g = torch.einsum("knm,bmc->bknc", supports, x) #B, cheb_k, N, dim_in
x_g = x_g.permute(0, 2, 1, 3) # B, N, cheb_k, dim_in
x_gconv = torch.einsum('bnki,nkio->bno', x_g, weights) + bias #b, N, dim_out
return x_gconv | 1,453 | 54.923077 | 112 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/AGCRN/model/AGCRNCell.py | import torch
import torch.nn as nn
from model.AGCN import AVWGCN
class AGCRNCell(nn.Module):
def __init__(self, node_num, dim_in, dim_out, cheb_k, embed_dim):
super(AGCRNCell, self).__init__()
self.node_num = node_num
self.hidden_dim = dim_out
self.gate = AVWGCN(dim_in+self.hidden_dim, 2*dim_out, cheb_k, embed_dim)
self.update = AVWGCN(dim_in+self.hidden_dim, dim_out, cheb_k, embed_dim)
def forward(self, x, state, node_embeddings):
#x: B, num_nodes, input_dim
#state: B, num_nodes, hidden_dim
state = state.to(x.device)
input_and_state = torch.cat((x, state), dim=-1)
z_r = torch.sigmoid(self.gate(input_and_state, node_embeddings))
z, r = torch.split(z_r, self.hidden_dim, dim=-1)
candidate = torch.cat((x, z*state), dim=-1)
hc = torch.tanh(self.update(candidate, node_embeddings))
h = r*state + (1-r)*hc
return h
def init_hidden_state(self, batch_size):
return torch.zeros(batch_size, self.node_num, self.hidden_dim) | 1,065 | 40 | 80 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/AGCRN/model/Run_METR-LA.py | import os
import sys
file_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(file_dir)
sys.path.append(file_dir)
import torch
import numpy as np
import torch.nn as nn
import argparse
import configparser
from datetime import datetime
from model.AGCRN import AGCRN as Network
from model.BasicTrainer import Trainer
from lib.TrainInits import init_seed
from lib.dataloader import get_dataloader
from lib.TrainInits import print_model_parameters
Mode = 'train'
DEBUG = 'False'
DATASET = 'METR-LA'
MODEL = 'AGCRN'
config_file = './{}_{}.conf'.format(DATASET, MODEL)
config = configparser.ConfigParser()
config.read(config_file)
from lib.metrics import MAE_torch
def masked_mae_loss(scaler, mask_value):
def loss(preds, labels):
if scaler:
preds = scaler.inverse_transform(preds)
mae = MAE_torch(pred=preds, true=labels, mask_value=mask_value)
return mae
return loss
args = argparse.ArgumentParser(description='arguments')
args.add_argument('--dataset', default=DATASET, type=str)
args.add_argument('--mode', default=Mode, type=str)
args.add_argument('--debug', default=DEBUG, type=eval)
args.add_argument('--model', default=MODEL, type=str)
args.add_argument('--cuda', default=True, type=bool)
args.add_argument('--val_ratio',
default=config['data']['val_ratio'],
type=float)
args.add_argument('--test_ratio',
default=config['data']['test_ratio'],
type=float)
args.add_argument('--lag', default=config['data']['lag'], type=int)
args.add_argument('--horizon', default=config['data']['horizon'], type=int)
args.add_argument('--num_nodes', default=config['data']['num_nodes'], type=int)
args.add_argument('--tod', default=config['data']['tod'], type=eval)
args.add_argument('--normalizer',
default=config['data']['normalizer'],
type=str)
args.add_argument('--column_wise',
default=config['data']['column_wise'],
type=eval)
args.add_argument('--default_graph',
default=config['data']['default_graph'],
type=eval)
args.add_argument('--input_dim',
default=config['model']['input_dim'],
type=int)
args.add_argument('--output_dim',
default=config['model']['output_dim'],
type=int)
args.add_argument('--embed_dim',
default=config['model']['embed_dim'],
type=int)
args.add_argument('--rnn_units',
default=config['model']['rnn_units'],
type=int)
args.add_argument('--num_layers',
default=config['model']['num_layers'],
type=int)
args.add_argument('--cheb_k', default=config['model']['cheb_order'], type=int)
args.add_argument('--loss_func',
default=config['train']['loss_func'],
type=str)
args.add_argument('--seed', default=config['train']['seed'], type=int)
args.add_argument('--batch_size',
default=config['train']['batch_size'],
type=int)
args.add_argument('--epochs', default=config['train']['epochs'], type=int)
args.add_argument('--lr_init', default=config['train']['lr_init'], type=float)
args.add_argument('--lr_decay', default=config['train']['lr_decay'], type=eval)
args.add_argument('--lr_decay_rate',
default=config['train']['lr_decay_rate'],
type=float)
args.add_argument('--lr_decay_step',
default=config['train']['lr_decay_step'],
type=str)
args.add_argument('--early_stop',
default=config['train']['early_stop'],
type=eval)
args.add_argument('--early_stop_patience',
default=config['train']['early_stop_patience'],
type=int)
args.add_argument('--grad_norm',
default=config['train']['grad_norm'],
type=eval)
args.add_argument('--max_grad_norm',
default=config['train']['max_grad_norm'],
type=int)
args.add_argument('--teacher_forcing', default=False, type=bool)
args.add_argument('--real_value',
default=config['train']['real_value'],
type=eval,
help='use real value for loss calculation')
args.add_argument('--mae_thresh',
default=config['test']['mae_thresh'],
type=eval)
args.add_argument('--mape_thresh',
default=config['test']['mape_thresh'],
type=float)
args.add_argument('--log_dir', default='./', type=str)
args.add_argument('--log_step', default=config['log']['log_step'], type=int)
args.add_argument('--plot', default=config['log']['plot'], type=eval)
args.add_argument('--dataset_dir',
type=str,
default='data/METR-LA',
help='data path')
args.add_argument('--device', type=str, default='cuda:1', help='')
args = args.parse_args()
init_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.set_device(int(args.device[5]))
else:
args.device = 'cpu'
model = Network(args)
model = model.to(args.device)
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
else:
nn.init.uniform_(p)
print_model_parameters(model, only_num=False)
train_loader, val_loader, test_loader, scaler = get_dataloader(
args,
normalizer=args.normalizer,
tod=args.tod,
dow=False,
weather=False,
single=False)
loss = masked_mae_loss(scaler, mask_value=0.0)
optimizer = torch.optim.Adam(params=model.parameters(),
lr=args.lr_init,
eps=1.0e-8,
weight_decay=0,
amsgrad=False)
lr_scheduler = None
if args.lr_decay:
print('Applying learning rate decay.')
lr_decay_steps = [int(i) for i in list(args.lr_decay_step.split(','))]
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer=optimizer,
milestones=lr_decay_steps,
gamma=args.lr_decay_rate)
current_time = datetime.now().strftime('%Y%m%d%H%M%S')
current_dir = os.path.dirname(os.path.realpath(__file__))
log_dir = os.path.join(current_dir, 'experiments', args.dataset, current_time)
args.log_dir = log_dir
trainer = Trainer(model,
loss,
optimizer,
train_loader,
val_loader,
test_loader,
scaler,
args,
lr_scheduler=lr_scheduler)
if args.mode == 'train':
trainer.train()
elif args.mode == 'test':
model.load_state_dict(
torch.load('../pre-trained/{}.pth'.format(args.dataset)))
print("Load saved model")
trainer.test(model, trainer.args, test_loader, scaler, trainer.logger)
else:
raise ValueError
| 6,953 | 32.757282 | 79 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/AGCRN/model/BasicTrainer.py | import torch
import math
import os
import time
import copy
import numpy as np
from lib.logger import get_logger
from lib.metrics import All_Metrics
class Trainer(object):
def __init__(self, model, loss, optimizer, train_loader, val_loader, test_loader,
scaler, args, lr_scheduler=None):
super(Trainer, self).__init__()
self.model = model
self.loss = loss
self.optimizer = optimizer
self.train_loader = train_loader
self.val_loader = val_loader
self.test_loader = test_loader
self.scaler = scaler
self.args = args
self.lr_scheduler = lr_scheduler
self.train_per_epoch = len(train_loader)
if val_loader != None:
self.val_per_epoch = len(val_loader)
self.best_path = os.path.join(self.args.log_dir, 'best_model.pth')
self.loss_figure_path = os.path.join(self.args.log_dir, 'loss.png')
#log
if os.path.isdir(args.log_dir) == False and not args.debug:
os.makedirs(args.log_dir, exist_ok=True)
self.logger = get_logger(args.log_dir, name=args.model, debug=args.debug)
self.logger.info('Experiment log path in: {}'.format(args.log_dir))
#if not args.debug:
#self.logger.info("Argument: %r", args)
# for arg, value in sorted(vars(args).items()):
# self.logger.info("Argument %s: %r", arg, value)
def val_epoch(self, epoch, val_dataloader):
self.model.eval()
total_val_loss = 0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(val_dataloader):
data = data[..., :self.args.input_dim]
label = target[..., :self.args.output_dim]
output = self.model(data, target, teacher_forcing_ratio=0.)
if self.args.real_value:
# label = self.scaler.inverse_transform(label)
# output = self.scaler.inverse_transform(output)
pass
loss = self.loss(output.cuda(), label)
#a whole batch of Metr_LA is filtered
if not torch.isnan(loss):
total_val_loss += loss.item()
val_loss = total_val_loss / len(val_dataloader)
self.logger.info('**********Val Epoch {}: average Loss: {:.6f}'.format(epoch, val_loss))
return val_loss
def train_epoch(self, epoch):
self.model.train()
total_loss = 0
for batch_idx, (data, target) in enumerate(self.train_loader):
data = data[..., :self.args.input_dim]
label = target[..., :self.args.output_dim] # (..., 1)
self.optimizer.zero_grad()
#teacher_forcing for RNN encoder-decoder model
#if teacher_forcing_ratio = 1: use label as input in the decoder for all steps
if self.args.teacher_forcing:
global_step = (epoch - 1) * self.train_per_epoch + batch_idx
teacher_forcing_ratio = self._compute_sampling_threshold(global_step, self.args.tf_decay_steps)
else:
teacher_forcing_ratio = 1.
#data and target shape: B, T, N, F; output shape: B, T, N, F
output = self.model(data, target, teacher_forcing_ratio=teacher_forcing_ratio)
if self.args.real_value:
# label = self.scaler.inverse_transform(label)
# output = self.scaler.inverse_transform(output)
pass
# loss = self.loss(output.cuda(), label)
loss = self.loss(output, label)
loss.backward()
# add max grad clipping
if self.args.grad_norm:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.max_grad_norm)
self.optimizer.step()
total_loss += loss.item()
#log information
if batch_idx % self.args.log_step == 0:
self.logger.info('Train Epoch {}: {}/{} Loss: {:.6f}'.format(
epoch, batch_idx, self.train_per_epoch, loss.item()))
train_epoch_loss = total_loss/self.train_per_epoch
self.logger.info('**********Train Epoch {}: averaged Loss: {:.6f}, tf_ratio: {:.6f}'.format(epoch, train_epoch_loss, teacher_forcing_ratio))
#learning rate decay
if self.args.lr_decay:
self.lr_scheduler.step()
return train_epoch_loss
def train(self):
best_model = None
best_loss = float('inf')
not_improved_count = 0
train_loss_list = []
val_loss_list = []
start_time = time.time()
for epoch in range(1, self.args.epochs + 1):
#epoch_time = time.time()
train_epoch_loss = self.train_epoch(epoch)
#print(time.time()-epoch_time)
#exit()
if self.val_loader == None:
val_dataloader = self.test_loader
else:
val_dataloader = self.val_loader
val_epoch_loss = self.val_epoch(epoch, val_dataloader)
#print('LR:', self.optimizer.param_groups[0]['lr'])
train_loss_list.append(train_epoch_loss)
val_loss_list.append(val_epoch_loss)
if train_epoch_loss > 1e6:
self.logger.warning('Gradient explosion detected. Ending...')
break
#if self.val_loader == None:
#val_epoch_loss = train_epoch_loss
if val_epoch_loss < best_loss:
best_loss = val_epoch_loss
not_improved_count = 0
best_state = True
else:
not_improved_count += 1
best_state = False
# early stop
if self.args.early_stop:
if not_improved_count == self.args.early_stop_patience:
self.logger.info("Validation performance didn\'t improve for {} epochs. "
"Training stops.".format(self.args.early_stop_patience))
break
# save the best state
if best_state == True:
self.logger.info('*********************************Current best model saved!')
best_model = copy.deepcopy(self.model.state_dict())
training_time = time.time() - start_time
self.logger.info("Total training time: {:.4f}min, best loss: {:.6f}".format((training_time / 60), best_loss))
#save the best model to file
if not self.args.debug:
torch.save(best_model, self.best_path)
self.logger.info("Saving current best model to " + self.best_path)
#test
self.model.load_state_dict(best_model)
#self.val_epoch(self.args.epochs, self.test_loader)
self.test(self.model, self.args, self.test_loader, self.scaler, self.logger)
def save_checkpoint(self):
state = {
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'config': self.args
}
torch.save(state, self.best_path)
self.logger.info("Saving current best model to " + self.best_path)
@staticmethod
def test(model, args, data_loader, scaler, logger, path=None):
if path != None:
check_point = torch.load(path)
state_dict = check_point['state_dict']
args = check_point['config']
model.load_state_dict(state_dict)
model.to(args.device)
model.eval()
y_pred = []
y_true = []
with torch.no_grad():
for batch_idx, (data, target) in enumerate(data_loader):
data = data[..., :args.input_dim]
label = target[..., :args.output_dim]
output = model(data, target, teacher_forcing_ratio=0)
y_true.append(label)
y_pred.append(output)
# y_true = scaler.inverse_transform(torch.cat(y_true, dim=0))
y_true = torch.cat(y_true, dim=0)
y_pred = scaler.inverse_transform(torch.cat(y_pred, dim=0))
# if args.real_value:
# y_pred = torch.cat(y_pred, dim=0)
# else:
# y_pred = scaler.inverse_transform(torch.cat(y_pred, dim=0))
np.save('./{}_true.npy'.format(args.dataset), y_true.cpu().numpy())
np.save('./{}_pred.npy'.format(args.dataset), y_pred.cpu().numpy())
for t in range(y_true.shape[1]):
mae, rmse, mape, _, _ = All_Metrics(y_pred[:, t, ...], y_true[:, t, ...],
args.mae_thresh, args.mape_thresh)
logger.info("Horizon {:02d}, MAE: {:.2f}, RMSE: {:.2f}, MAPE: {:.4f}%".format(
t + 1, mae, rmse, mape*100))
mae, rmse, mape, _, _ = All_Metrics(y_pred, y_true, args.mae_thresh, args.mape_thresh)
logger.info("Average Horizon, MAE: {:.2f}, RMSE: {:.2f}, MAPE: {:.4f}%".format(
mae, rmse, mape*100))
@staticmethod
def _compute_sampling_threshold(global_step, k):
"""
Computes the sampling probability for scheduled sampling using inverse sigmoid.
:param global_step:
:param k:
:return:
"""
return k / (k + math.exp(global_step / k)) | 9,286 | 42.600939 | 148 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/AGCRN/model/Run_BJ.py | import os
import sys
file_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(file_dir)
sys.path.append(file_dir)
import torch
import numpy as np
import torch.nn as nn
import argparse
import configparser
from datetime import datetime
from model.AGCRN import AGCRN as Network
from model.BasicTrainer import Trainer
from lib.TrainInits import init_seed
from lib.dataloader import get_dataloader
from lib.TrainInits import print_model_parameters
Mode = 'train'
DEBUG = 'False'
DATASET = 'BJ'
MODEL = 'AGCRN'
config_file = './{}_{}.conf'.format(DATASET, MODEL)
config = configparser.ConfigParser()
config.read(config_file)
from lib.metrics import MAE_torch
def masked_mae_loss(scaler, mask_value):
def loss(preds, labels):
if scaler:
preds = scaler.inverse_transform(preds)
mae = MAE_torch(pred=preds, true=labels, mask_value=mask_value)
return mae
return loss
args = argparse.ArgumentParser(description='arguments')
args.add_argument('--dataset', default=DATASET, type=str)
args.add_argument('--mode', default=Mode, type=str)
args.add_argument('--debug', default=DEBUG, type=eval)
args.add_argument('--model', default=MODEL, type=str)
args.add_argument('--cuda', default=True, type=bool)
args.add_argument('--val_ratio',
default=config['data']['val_ratio'],
type=float)
args.add_argument('--test_ratio',
default=config['data']['test_ratio'],
type=float)
args.add_argument('--lag', default=config['data']['lag'], type=int)
args.add_argument('--horizon', default=config['data']['horizon'], type=int)
args.add_argument('--num_nodes', default=config['data']['num_nodes'], type=int)
args.add_argument('--tod', default=config['data']['tod'], type=eval)
args.add_argument('--normalizer',
default=config['data']['normalizer'],
type=str)
args.add_argument('--column_wise',
default=config['data']['column_wise'],
type=eval)
args.add_argument('--default_graph',
default=config['data']['default_graph'],
type=eval)
args.add_argument('--input_dim',
default=config['model']['input_dim'],
type=int)
args.add_argument('--output_dim',
default=config['model']['output_dim'],
type=int)
args.add_argument('--embed_dim',
default=config['model']['embed_dim'],
type=int)
args.add_argument('--rnn_units',
default=config['model']['rnn_units'],
type=int)
args.add_argument('--num_layers',
default=config['model']['num_layers'],
type=int)
args.add_argument('--cheb_k', default=config['model']['cheb_order'], type=int)
args.add_argument('--loss_func',
default=config['train']['loss_func'],
type=str)
args.add_argument('--seed', default=config['train']['seed'], type=int)
args.add_argument('--batch_size',
default=config['train']['batch_size'],
type=int)
args.add_argument('--epochs', default=config['train']['epochs'], type=int)
args.add_argument('--lr_init', default=config['train']['lr_init'], type=float)
args.add_argument('--lr_decay', default=config['train']['lr_decay'], type=eval)
args.add_argument('--lr_decay_rate',
default=config['train']['lr_decay_rate'],
type=float)
args.add_argument('--lr_decay_step',
default=config['train']['lr_decay_step'],
type=str)
args.add_argument('--early_stop',
default=config['train']['early_stop'],
type=eval)
args.add_argument('--early_stop_patience',
default=config['train']['early_stop_patience'],
type=int)
args.add_argument('--grad_norm',
default=config['train']['grad_norm'],
type=eval)
args.add_argument('--max_grad_norm',
default=config['train']['max_grad_norm'],
type=int)
args.add_argument('--teacher_forcing', default=False, type=bool)
args.add_argument('--real_value',
default=config['train']['real_value'],
type=eval,
help='use real value for loss calculation')
args.add_argument('--mae_thresh',
default=config['test']['mae_thresh'],
type=eval)
args.add_argument('--mape_thresh',
default=config['test']['mape_thresh'],
type=float)
args.add_argument('--log_dir', default='./', type=str)
args.add_argument('--log_step', default=config['log']['log_step'], type=int)
args.add_argument('--plot', default=config['log']['plot'], type=eval)
args.add_argument('--dataset_dir',
type=str,
default='data/METR-LA',
help='data path')
args.add_argument('--device', type=str, default='cuda:1', help='')
args = args.parse_args()
init_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.set_device(int(args.device[5]))
else:
args.device = 'cpu'
model = Network(args)
model = model.to(args.device)
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
else:
nn.init.uniform_(p)
print_model_parameters(model, only_num=False)
train_loader, val_loader, test_loader, scaler = get_dataloader(
args,
normalizer=args.normalizer,
tod=args.tod,
dow=False,
weather=False,
single=False)
loss = masked_mae_loss(scaler, mask_value=0.0)
optimizer = torch.optim.Adam(params=model.parameters(),
lr=args.lr_init,
eps=1.0e-8,
weight_decay=0,
amsgrad=False)
lr_scheduler = None
if args.lr_decay:
print('Applying learning rate decay.')
lr_decay_steps = [int(i) for i in list(args.lr_decay_step.split(','))]
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer=optimizer,
milestones=lr_decay_steps,
gamma=args.lr_decay_rate)
current_time = datetime.now().strftime('%Y%m%d%H%M%S')
current_dir = os.path.dirname(os.path.realpath(__file__))
log_dir = os.path.join(current_dir, 'experiments', args.dataset, current_time)
args.log_dir = log_dir
trainer = Trainer(model,
loss,
optimizer,
train_loader,
val_loader,
test_loader,
scaler,
args,
lr_scheduler=lr_scheduler)
if args.mode == 'train':
trainer.train()
elif args.mode == 'test':
model.load_state_dict(
torch.load('../pre-trained/{}.pth'.format(args.dataset)))
print("Load saved model")
trainer.test(model, trainer.args, test_loader, scaler, trainer.logger)
else:
raise ValueError
| 6,947 | 32.892683 | 79 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/AGCRN/lib/TrainInits.py | import torch
import random
import numpy as np
def init_seed(seed):
'''
Disable cudnn to maximize reproducibility
'''
torch.cuda.cudnn_enabled = False
torch.backends.cudnn.deterministic = True
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def init_device(opt):
if torch.cuda.is_available():
opt.cuda = True
torch.cuda.set_device(int(opt.device[5]))
else:
opt.cuda = False
opt.device = 'cpu'
return opt
def init_optim(model, opt):
'''
Initialize optimizer
'''
return torch.optim.Adam(params=model.parameters(),lr=opt.lr_init)
def init_lr_scheduler(optim, opt):
'''
Initialize the learning rate scheduler
'''
#return torch.optim.lr_scheduler.StepLR(optimizer=optim,gamma=opt.lr_scheduler_rate,step_size=opt.lr_scheduler_step)
return torch.optim.lr_scheduler.MultiStepLR(optimizer=optim, milestones=opt.lr_decay_steps,
gamma = opt.lr_scheduler_rate)
def print_model_parameters(model, only_num = True):
print('*****************Model Parameter*****************')
if not only_num:
for name, param in model.named_parameters():
print(name, param.shape, param.requires_grad)
total_num = sum([param.nelement() for param in model.parameters()])
print('Total params num: {}'.format(total_num))
print('*****************Finish Parameter****************')
def get_memory_usage(device):
allocated_memory = torch.cuda.memory_allocated(device) / (1024*1024.)
cached_memory = torch.cuda.memory_cached(device) / (1024*1024.)
return allocated_memory, cached_memory
#print('Allocated Memory: {:.2f} MB, Cached Memory: {:.2f} MB'.format(allocated_memory, cached_memory)) | 1,818 | 33.980769 | 120 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/AGCRN/lib/dataloader.py | import torch
import numpy as np
import torch.utils.data
from lib.add_window import Add_Window_Horizon
from lib.load_dataset import load_st_dataset
from lib.normalization import NScaler, MinMax01Scaler, MinMax11Scaler, StandardScaler, ColumnMinMaxScaler
def normalize_dataset(data, normalizer, column_wise=False):
if normalizer == 'max01':
if column_wise:
minimum = data.min(axis=0, keepdims=True)
maximum = data.max(axis=0, keepdims=True)
else:
minimum = data.min()
maximum = data.max()
scaler = MinMax01Scaler(minimum, maximum)
data = scaler.transform(data)
print('Normalize the dataset by MinMax01 Normalization')
elif normalizer == 'max11':
if column_wise:
minimum = data.min(axis=0, keepdims=True)
maximum = data.max(axis=0, keepdims=True)
else:
minimum = data.min()
maximum = data.max()
scaler = MinMax11Scaler(minimum, maximum)
data = scaler.transform(data)
print('Normalize the dataset by MinMax11 Normalization')
elif normalizer == 'std':
if column_wise:
mean = data.mean(axis=0, keepdims=True)
std = data.std(axis=0, keepdims=True)
else:
mean = data.mean()
std = data.std()
scaler = StandardScaler(mean, std)
data = scaler.transform(data)
print('Normalize the dataset by Standard Normalization')
elif normalizer == 'None':
scaler = NScaler()
data = scaler.transform(data)
print('Does not normalize the dataset')
elif normalizer == 'cmax':
#column min max, to be depressed
#note: axis must be the spatial dimension, please check !
scaler = ColumnMinMaxScaler(data.min(axis=0), data.max(axis=0))
data = scaler.transform(data)
print('Normalize the dataset by Column Min-Max Normalization')
else:
raise ValueError
return data, scaler
def split_data_by_days(data, val_days, test_days, interval=60):
'''
:param data: [B, *]
:param val_days:
:param test_days:
:param interval: interval (15, 30, 60) minutes
:return:
'''
T = int((24*60)/interval)
test_data = data[-T*test_days:]
val_data = data[-T*(test_days + val_days): -T*test_days]
train_data = data[:-T*(test_days + val_days)]
return train_data, val_data, test_data
def split_data_by_ratio(data, val_ratio, test_ratio):
data_len = data.shape[0]
test_data = data[-int(data_len*test_ratio):]
val_data = data[-int(data_len*(test_ratio+val_ratio)):-int(data_len*test_ratio)]
train_data = data[:-int(data_len*(test_ratio+val_ratio))]
return train_data, val_data, test_data
def data_loader(X, Y, batch_size, shuffle=True, drop_last=True):
cuda = True if torch.cuda.is_available() else False
TensorFloat = torch.cuda.FloatTensor if cuda else torch.FloatTensor
X, Y = TensorFloat(X), TensorFloat(Y)
data = torch.utils.data.TensorDataset(X, Y)
dataloader = torch.utils.data.DataLoader(data, batch_size=batch_size,
shuffle=shuffle, drop_last=drop_last)
return dataloader
def generate_data(dataset_dir):
data = {}
mean = None
std = None
for category in ['train', 'val', 'test']:
cat_data = np.load(os.path.join(dataset_dir, category + '.npz'))
data['x_' + category] = np.transpose(cat_data['x'][..., :1], (0, 2, 3, 1))
data['y_' + category] = np.transpose(cat_data['y'][..., 0], (0, 2, 1))
# x = cat_data['x'][..., :1]
# y = cat_data['y'][..., :1]
if mean is None:
mean = data['x_train'].mean()
if std is None:
std = data['x_train'].std()
# yield (x - mean) / std, y
# return (data['x_train'] - mean) / std, (data['y_train'] - mean) / std, (data['x_val'] - mean) / std, (data['y_val'] - mean) / std, (data['x_test'] - mean) / std, (data['y_test'] - mean) / std, mean, std
return (data['x_train'] - mean) / std, data['y_train'], (data['x_val'] - mean) / std, data['y_val'], (data['x_test'] - mean) / std, data['y_test'], mean, std
# scaler = StandardScaler(mean=data['x_train'][..., 0].mean(), std=data['x_train'][..., 0].std())
# # Data format
# for category in ['train', 'val', 'test']:
# data['x_' + category][..., 0] = scaler.transform(data['x_' + category][..., 0])
#
# data['train_loader'] = DataLoaderM(data['x_train'], data['y_train'], batch_size)
# data['val_loader'] = DataLoaderM(data['x_val'], data['y_val'], valid_batch_size)
# data['test_loader'] = DataLoaderM(data['x_test'], data['y_test'], test_batch_size)
# data['scaler'] = scaler
# return data
import os
def get_dataloader(args, normalizer = 'std', tod=False, dow=False, weather=False, single=True):
# #load raw st dataset
# data = load_st_dataset(args.dataset) # B, N, D
# #normalize st data
# data, scaler = normalize_dataset(data, normalizer, args.column_wise)
# #spilit dataset by days or by ratio
# if args.test_ratio > 1:
# data_train, data_val, data_test = split_data_by_days(data, args.val_ratio, args.test_ratio)
# else:
# data_train, data_val, data_test = split_data_by_ratio(data, args.val_ratio, args.test_ratio)
# #add time window
# x_tra, y_tra = Add_Window_Horizon(data_train, args.lag, args.horizon, single)
# x_val, y_val = Add_Window_Horizon(data_val, args.lag, args.horizon, single)
# x_test, y_test = Add_Window_Horizon(data_test, args.lag, args.horizon, single)
data = {}
for category in ['train', 'val', 'test']:
cat_data = np.load(os.path.join(args.dataset_dir, category + '.npz'))
data['x_' + category] = cat_data['x']
data['y_' + category] = cat_data['y']
scaler = StandardScaler(mean=data['x_train'][..., 0].mean(), std=data['x_train'][..., 0].std())
# Data format
for category in ['train', 'val', 'test']:
data['x_' + category][..., 0] = scaler.transform(data['x_' + category][..., 0])
x_tra, y_tra, x_val, y_val, x_test, y_test = data['x_train'], data['y_train'], data['x_val'], data['y_val'], data['x_test'], data['y_test']
print('Train: ', x_tra.shape, y_tra.shape)
print('Val: ', x_val.shape, y_val.shape)
print('Test: ', x_test.shape, y_test.shape)
##############get dataloader######################
train_dataloader = data_loader(x_tra, y_tra, args.batch_size, shuffle=True, drop_last=True)
if len(x_val) == 0:
val_dataloader = None
else:
val_dataloader = data_loader(x_val, y_val, args.batch_size, shuffle=False, drop_last=True)
test_dataloader = data_loader(x_test, y_test, args.batch_size, shuffle=False, drop_last=False)
return train_dataloader, val_dataloader, test_dataloader, scaler
# def get_dataloader(args, normalizer = 'std', tod=False, dow=False, weather=False, single=True):
# #load raw st dataset
# data = load_st_dataset(args.dataset) # B, N, D
# #normalize st data
# data, scaler = normalize_dataset(data, normalizer, args.column_wise)
# #spilit dataset by days or by ratio
# if args.test_ratio > 1:
# data_train, data_val, data_test = split_data_by_days(data, args.val_ratio, args.test_ratio)
# else:
# data_train, data_val, data_test = split_data_by_ratio(data, args.val_ratio, args.test_ratio)
# #add time window
# x_tra, y_tra = Add_Window_Horizon(data_train, args.lag, args.horizon, single)
# x_val, y_val = Add_Window_Horizon(data_val, args.lag, args.horizon, single)
# x_test, y_test = Add_Window_Horizon(data_test, args.lag, args.horizon, single)
# print('Train: ', x_tra.shape, y_tra.shape)
# print('Val: ', x_val.shape, y_val.shape)
# print('Test: ', x_test.shape, y_test.shape)
# ##############get dataloader######################
# train_dataloader = data_loader(x_tra, y_tra, args.batch_size, shuffle=True, drop_last=True)
# if len(x_val) == 0:
# val_dataloader = None
# else:
# val_dataloader = data_loader(x_val, y_val, args.batch_size, shuffle=False, drop_last=True)
# test_dataloader = data_loader(x_test, y_test, args.batch_size, shuffle=False, drop_last=False)
# return train_dataloader, val_dataloader, test_dataloader, scaler
if __name__ == '__main__':
import argparse
#MetrLA 207; BikeNYC 128; SIGIR_solar 137; SIGIR_electric 321
DATASET = 'SIGIR_electric'
if DATASET == 'MetrLA':
NODE_NUM = 207
elif DATASET == 'BikeNYC':
NODE_NUM = 128
elif DATASET == 'SIGIR_solar':
NODE_NUM = 137
elif DATASET == 'SIGIR_electric':
NODE_NUM = 321
parser = argparse.ArgumentParser(description='PyTorch dataloader')
parser.add_argument('--dataset', default=DATASET, type=str)
parser.add_argument('--num_nodes', default=NODE_NUM, type=int)
parser.add_argument('--val_ratio', default=0.1, type=float)
parser.add_argument('--test_ratio', default=0.2, type=float)
parser.add_argument('--lag', default=12, type=int)
parser.add_argument('--horizon', default=12, type=int)
parser.add_argument('--batch_size', default=64, type=int)
args = parser.parse_args()
train_dataloader, val_dataloader, test_dataloader, scaler = get_dataloader(args, normalizer = 'std', tod=False, dow=False, weather=False, single=True) | 9,437 | 45.492611 | 208 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/AGCRN/lib/metrics.py | '''
Always evaluate the model with MAE, RMSE, MAPE, RRSE, PNBI, and oPNBI.
Why add mask to MAE and RMSE?
Filter the 0 that may be caused by error (such as loop sensor)
Why add mask to MAPE and MARE?
Ignore very small values (e.g., 0.5/0.5=100%)
'''
import numpy as np
import torch
def MAE_torch(pred, true, mask_value=None):
if mask_value != None:
mask = torch.gt(true, mask_value)
pred = torch.masked_select(pred, mask)
true = torch.masked_select(true, mask)
return torch.mean(torch.abs(true-pred))
def MSE_torch(pred, true, mask_value=None):
if mask_value != None:
mask = torch.gt(true, mask_value)
pred = torch.masked_select(pred, mask)
true = torch.masked_select(true, mask)
return torch.mean((pred - true) ** 2)
def RMSE_torch(pred, true, mask_value=None):
if mask_value != None:
mask = torch.gt(true, mask_value)
pred = torch.masked_select(pred, mask)
true = torch.masked_select(true, mask)
return torch.sqrt(torch.mean((pred - true) ** 2))
def RRSE_torch(pred, true, mask_value=None):
if mask_value != None:
mask = torch.gt(true, mask_value)
pred = torch.masked_select(pred, mask)
true = torch.masked_select(true, mask)
return torch.sqrt(torch.sum((pred - true) ** 2)) / torch.sqrt(torch.sum((pred - true.mean()) ** 2))
def CORR_torch(pred, true, mask_value=None):
#input B, T, N, D or B, N, D or B, N
if len(pred.shape) == 2:
pred = pred.unsqueeze(dim=1).unsqueeze(dim=1)
true = true.unsqueeze(dim=1).unsqueeze(dim=1)
elif len(pred.shape) == 3:
pred = pred.transpose(1, 2).unsqueeze(dim=1)
true = true.transpose(1, 2).unsqueeze(dim=1)
elif len(pred.shape) == 4:
#B, T, N, D -> B, T, D, N
pred = pred.transpose(2, 3)
true = true.transpose(2, 3)
else:
raise ValueError
dims = (0, 1, 2)
pred_mean = pred.mean(dim=dims)
true_mean = true.mean(dim=dims)
pred_std = pred.std(dim=dims)
true_std = true.std(dim=dims)
correlation = ((pred - pred_mean)*(true - true_mean)).mean(dim=dims) / (pred_std*true_std)
index = (true_std != 0)
correlation = (correlation[index]).mean()
return correlation
def MAPE_torch(pred, true, mask_value=None):
if mask_value != None:
mask = torch.gt(true, mask_value)
pred = torch.masked_select(pred, mask)
true = torch.masked_select(true, mask)
return torch.mean(torch.abs(torch.div((true - pred), true)))
def PNBI_torch(pred, true, mask_value=None):
if mask_value != None:
mask = torch.gt(true, mask_value)
pred = torch.masked_select(pred, mask)
true = torch.masked_select(true, mask)
indicator = torch.gt(pred - true, 0).float()
return indicator.mean()
def oPNBI_torch(pred, true, mask_value=None):
if mask_value != None:
mask = torch.gt(true, mask_value)
pred = torch.masked_select(pred, mask)
true = torch.masked_select(true, mask)
bias = (true+pred) / (2*true)
return bias.mean()
def MARE_torch(pred, true, mask_value=None):
if mask_value != None:
mask = torch.gt(true, mask_value)
pred = torch.masked_select(pred, mask)
true = torch.masked_select(true, mask)
return torch.div(torch.sum(torch.abs((true - pred))), torch.sum(true))
def SMAPE_torch(pred, true, mask_value=None):
if mask_value != None:
mask = torch.gt(true, mask_value)
pred = torch.masked_select(pred, mask)
true = torch.masked_select(true, mask)
return torch.mean(torch.abs(true-pred)/(torch.abs(true)+torch.abs(pred)))
def MAE_np(pred, true, mask_value=None):
if mask_value != None:
mask = np.where(true > (mask_value), True, False)
true = true[mask]
pred = pred[mask]
MAE = np.mean(np.absolute(pred-true))
return MAE
def RMSE_np(pred, true, mask_value=None):
if mask_value != None:
mask = np.where(true > (mask_value), True, False)
true = true[mask]
pred = pred[mask]
RMSE = np.sqrt(np.mean(np.square(pred-true)))
return RMSE
#Root Relative Squared Error
def RRSE_np(pred, true, mask_value=None):
if mask_value != None:
mask = np.where(true > (mask_value), True, False)
true = true[mask]
pred = pred[mask]
mean = true.mean()
return np.divide(np.sqrt(np.sum((pred-true) ** 2)), np.sqrt(np.sum((true-mean) ** 2)))
def MAPE_np(pred, true, mask_value=None):
if mask_value != None:
mask = np.where(true > (mask_value), True, False)
true = true[mask]
pred = pred[mask]
return np.mean(np.absolute(np.divide((true - pred), true)))
def PNBI_np(pred, true, mask_value=None):
#if PNBI=0, all pred are smaller than true
#if PNBI=1, all pred are bigger than true
if mask_value != None:
mask = np.where(true > (mask_value), True, False)
true = true[mask]
pred = pred[mask]
bias = pred-true
indicator = np.where(bias>0, True, False)
return indicator.mean()
def oPNBI_np(pred, true, mask_value=None):
#if oPNBI>1, pred are bigger than true
#if oPNBI<1, pred are smaller than true
#however, this metric is too sentive to small values. Not good!
if mask_value != None:
mask = np.where(true > (mask_value), True, False)
true = true[mask]
pred = pred[mask]
bias = (true + pred) / (2 * true)
return bias.mean()
def MARE_np(pred, true, mask_value=None):
if mask_value != None:
mask = np.where(true> (mask_value), True, False)
true = true[mask]
pred = pred[mask]
return np.divide(np.sum(np.absolute((true - pred))), np.sum(true))
def CORR_np(pred, true, mask_value=None):
#input B, T, N, D or B, N, D or B, N
if len(pred.shape) == 2:
#B, N
pred = pred.unsqueeze(dim=1).unsqueeze(dim=1)
true = true.unsqueeze(dim=1).unsqueeze(dim=1)
elif len(pred.shape) == 3:
#np.transpose include permute, B, T, N
pred = np.expand_dims(pred.transpose(0, 2, 1), axis=1)
true = np.expand_dims(true.transpose(0, 2, 1), axis=1)
elif len(pred.shape) == 4:
#B, T, N, D -> B, T, D, N
pred = pred.transpose(0, 1, 2, 3)
true = true.transpose(0, 1, 2, 3)
else:
raise ValueError
dims = (0, 1, 2)
pred_mean = pred.mean(axis=dims)
true_mean = true.mean(axis=dims)
pred_std = pred.std(axis=dims)
true_std = true.std(axis=dims)
correlation = ((pred - pred_mean)*(true - true_mean)).mean(axis=dims) / (pred_std*true_std)
index = (true_std != 0)
correlation = (correlation[index]).mean()
return correlation
def All_Metrics(pred, true, mask1, mask2):
#mask1 filter the very small value, mask2 filter the value lower than a defined threshold
assert type(pred) == type(true)
if type(pred) == np.ndarray:
mae = MAE_np(pred, true, mask1)
rmse = RMSE_np(pred, true, mask1)
mape = MAPE_np(pred, true, mask2)
rrse = RRSE_np(pred, true, mask1)
corr = 0
#corr = CORR_np(pred, true, mask1)
#pnbi = PNBI_np(pred, true, mask1)
#opnbi = oPNBI_np(pred, true, mask2)
elif type(pred) == torch.Tensor:
mae = MAE_torch(pred, true, mask1)
rmse = RMSE_torch(pred, true, mask1)
mape = MAPE_torch(pred, true, mask2)
rrse = RRSE_torch(pred, true, mask1)
corr = CORR_torch(pred, true, mask1)
#pnbi = PNBI_torch(pred, true, mask1)
#opnbi = oPNBI_torch(pred, true, mask2)
else:
raise TypeError
return mae, rmse, mape, rrse, corr
def SIGIR_Metrics(pred, true, mask1, mask2):
rrse = RRSE_torch(pred, true, mask1)
corr = CORR_torch(pred, true, 0)
return rrse, corr
if __name__ == '__main__':
pred = torch.Tensor([1, 2, 3,4])
true = torch.Tensor([2, 1, 4,5])
print(All_Metrics(pred, true, None, None))
| 7,947 | 34.641256 | 103 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/AGCRN/lib/normalization.py | import numpy as np
import torch
class NScaler(object):
def transform(self, data):
return data
def inverse_transform(self, data):
return data
class StandardScaler:
"""
Standard the input
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def transform(self, data):
return (data - self.mean) / self.std
def inverse_transform(self, data):
if type(data) == torch.Tensor and type(self.mean) == np.ndarray:
self.std = torch.from_numpy(self.std).to(data.device).type(data.dtype)
self.mean = torch.from_numpy(self.mean).to(data.device).type(data.dtype)
return (data * self.std) + self.mean
class MinMax01Scaler:
"""
Standard the input
"""
def __init__(self, min, max):
self.min = min
self.max = max
def transform(self, data):
return (data - self.min) / (self.max - self.min)
def inverse_transform(self, data):
if type(data) == torch.Tensor and type(self.min) == np.ndarray:
self.min = torch.from_numpy(self.min).to(data.device).type(data.dtype)
self.max = torch.from_numpy(self.max).to(data.device).type(data.dtype)
return (data * (self.max - self.min) + self.min)
class MinMax11Scaler:
"""
Standard the input
"""
def __init__(self, min, max):
self.min = min
self.max = max
def transform(self, data):
return ((data - self.min) / (self.max - self.min)) * 2. - 1.
def inverse_transform(self, data):
if type(data) == torch.Tensor and type(self.min) == np.ndarray:
self.min = torch.from_numpy(self.min).to(data.device).type(data.dtype)
self.max = torch.from_numpy(self.max).to(data.device).type(data.dtype)
return ((data + 1.) / 2.) * (self.max - self.min) + self.min
class ColumnMinMaxScaler():
#Note: to use this scale, must init the min and max with column min and column max
def __init__(self, min, max):
self.min = min
self.min_max = max - self.min
self.min_max[self.min_max==0] = 1
def transform(self, data):
print(data.shape, self.min_max.shape)
return (data - self.min) / self.min_max
def inverse_transform(self, data):
if type(data) == torch.Tensor and type(self.min) == np.ndarray:
self.min_max = torch.from_numpy(self.min_max).to(data.device).type(torch.float32)
self.min = torch.from_numpy(self.min).to(data.device).type(torch.float32)
#print(data.dtype, self.min_max.dtype, self.min.dtype)
return (data * self.min_max + self.min)
def one_hot_by_column(data):
#data is a 2D numpy array
len = data.shape[0]
for i in range(data.shape[1]):
column = data[:, i]
max = column.max()
min = column.min()
#print(len, max, min)
zero_matrix = np.zeros((len, max-min+1))
zero_matrix[np.arange(len), column-min] = 1
if i == 0:
encoded = zero_matrix
else:
encoded = np.hstack((encoded, zero_matrix))
return encoded
def minmax_by_column(data):
# data is a 2D numpy array
for i in range(data.shape[1]):
column = data[:, i]
max = column.max()
min = column.min()
column = (column - min) / (max - min)
column = column[:, np.newaxis]
if i == 0:
_normalized = column
else:
_normalized = np.hstack((_normalized, column))
return _normalized
if __name__ == '__main__':
test_data = np.array([[0,0,0, 1], [0, 1, 3, 2], [0, 2, 1, 3]])
print(test_data)
minimum = test_data.min(axis=1)
print(minimum, minimum.shape, test_data.shape)
maximum = test_data.max(axis=1)
print(maximum)
print(test_data-minimum)
test_data = (test_data-minimum) / (maximum-minimum)
print(test_data)
print(0 == 0)
print(0.00 == 0)
print(0 == 0.00)
#print(one_hot_by_column(test_data))
#print(minmax_by_column(test_data)) | 4,047 | 30.138462 | 93 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/DGCRN_BJ/layer.py | from __future__ import division
import torch
import torch.nn as nn
from torch.nn import init
import numbers
import torch.nn.functional as F
from collections import OrderedDict
class gconv_RNN(nn.Module):
def __init__(self):
super(gconv_RNN, self).__init__()
def forward(self, x, A):
x = torch.einsum('nvc,nvw->nwc', (x, A))
return x.contiguous()
class gconv_hyper(nn.Module):
def __init__(self):
super(gconv_hyper, self).__init__()
def forward(self, x, A):
x = torch.einsum('nvc,vw->nwc', (x, A))
return x.contiguous()
class gcn(nn.Module):
def __init__(self, dims, gdep, dropout, alpha, beta, gamma, type=None):
super(gcn, self).__init__()
if type == 'RNN':
self.gconv = gconv_RNN()
self.gconv_preA = gconv_hyper()
self.mlp = nn.Linear((gdep + 1) * dims[0], dims[1])
elif type == 'hyper':
self.gconv = gconv_hyper()
self.mlp = nn.Sequential(
OrderedDict([('fc1', nn.Linear((gdep + 1) * dims[0], dims[1])),
('sigmoid1', nn.Sigmoid()),
('fc2', nn.Linear(dims[1], dims[2])),
('sigmoid2', nn.Sigmoid()),
('fc3', nn.Linear(dims[2], dims[3]))]))
self.gdep = gdep
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.type_GNN = type
def forward(self, x, adj):
h = x
out = [h]
if self.type_GNN == 'RNN':
for _ in range(self.gdep):
h = self.alpha * x + self.beta * self.gconv(
h, adj[0]) + self.gamma * self.gconv_preA(h, adj[1])
out.append(h)
else:
for _ in range(self.gdep):
h = self.alpha * x + self.gamma * self.gconv(h, adj)
out.append(h)
ho = torch.cat(out, dim=-1)
ho = self.mlp(ho)
return ho
| 2,008 | 26.902778 | 79 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/DGCRN_BJ/net.py | import torch.utils.data as utils
import torch.nn.functional as F
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import numpy as np
import pandas as pd
import math
import time
from layer import *
import random
import sys
from collections import OrderedDict
class DGCRN(nn.Module):
def __init__(self,
gcn_depth,
num_nodes,
device,
predefined_A=None,
dropout=0.3,
subgraph_size=20,
node_dim=40,
middle_dim=2,
seq_length=12,
in_dim=2,
out_dim=12,
layers=3,
list_weight=[0.05, 0.95, 0.95],
tanhalpha=3,
cl_decay_steps=2000,
rnn_size=64,
hyperGNN_dim=32):
super(DGCRN, self).__init__()
self.output_dim = 1
self.num_nodes = num_nodes
self.dropout = dropout
self.predefined_A = predefined_A
self.seq_length = seq_length
self.emb1 = nn.Embedding(self.num_nodes, node_dim)
self.emb2 = nn.Embedding(self.num_nodes, node_dim)
self.lin1 = nn.Linear(node_dim, node_dim)
self.lin2 = nn.Linear(node_dim, node_dim)
self.idx = torch.arange(self.num_nodes).to(device)
self.rnn_size = rnn_size
hyperGNN_dim = hyperGNN_dim
self.in_dim = in_dim
hidden_size = self.rnn_size
self.hidden_size = self.rnn_size
dims_hyper = [
self.hidden_size + in_dim, hyperGNN_dim, middle_dim, node_dim
]
self.GCN1_tg = gcn(dims_hyper, gcn_depth, dropout, *list_weight,
'hyper')
self.GCN2_tg = gcn(dims_hyper, gcn_depth, dropout, *list_weight,
'hyper')
self.GCN1_tg_de = gcn(dims_hyper, gcn_depth, dropout, *list_weight,
'hyper')
self.GCN2_tg_de = gcn(dims_hyper, gcn_depth, dropout, *list_weight,
'hyper')
self.fc_final = nn.Linear(self.hidden_size, self.output_dim)
self.alpha = tanhalpha
self.device = device
self.k = subgraph_size
dims = [in_dim + self.hidden_size, self.hidden_size]
self.gz1 = gcn(dims, gcn_depth, dropout, *list_weight, 'RNN')
self.gz2 = gcn(dims, gcn_depth, dropout, *list_weight, 'RNN')
self.gr1 = gcn(dims, gcn_depth, dropout, *list_weight, 'RNN')
self.gr2 = gcn(dims, gcn_depth, dropout, *list_weight, 'RNN')
self.gc1 = gcn(dims, gcn_depth, dropout, *list_weight, 'RNN')
self.gc2 = gcn(dims, gcn_depth, dropout, *list_weight, 'RNN')
self.gz1_de = gcn(dims, gcn_depth, dropout, *list_weight, 'RNN')
self.gz2_de = gcn(dims, gcn_depth, dropout, *list_weight, 'RNN')
self.gr1_de = gcn(dims, gcn_depth, dropout, *list_weight, 'RNN')
self.gr2_de = gcn(dims, gcn_depth, dropout, *list_weight, 'RNN')
self.gc1_de = gcn(dims, gcn_depth, dropout, *list_weight, 'RNN')
self.gc2_de = gcn(dims, gcn_depth, dropout, *list_weight, 'RNN')
self.use_curriculum_learning = True
self.cl_decay_steps = cl_decay_steps
self.gcn_depth = gcn_depth
def preprocessing(self, adj, predefined_A):
adj = adj + torch.eye(self.num_nodes).to(self.device)
adj = adj / torch.unsqueeze(adj.sum(-1), -1)
return [adj, predefined_A]
def step(self,
input,
Hidden_State,
Cell_State,
predefined_A,
type='encoder',
idx=None,
i=None):
x = input
x = x.transpose(1, 2).contiguous()
nodevec1 = self.emb1(self.idx)
nodevec2 = self.emb2(self.idx)
hyper_input = torch.cat(
(x, Hidden_State.view(-1, self.num_nodes, self.hidden_size)), 2)
if type == 'encoder':
filter1 = self.GCN1_tg(hyper_input, predefined_A)
filter2 = self.GCN2_tg(hyper_input, predefined_A)
elif type == 'decoder':
filter1 = self.GCN1_tg_de(hyper_input, predefined_A)
filter2 = self.GCN2_tg_de(hyper_input, predefined_A)
nodevec1 = torch.tanh(self.alpha * torch.mul(nodevec1, filter1))
nodevec2 = torch.tanh(self.alpha * torch.mul(nodevec2, filter2))
a = torch.matmul(nodevec1, nodevec2.transpose(2, 1)) - torch.matmul(
nodevec2, nodevec1.transpose(2, 1))
adj = F.relu(torch.tanh(self.alpha * a))
mask = torch.zeros_like(adj).to(self.device)
mask.fill_(float('0'))
s1, t1 = adj.topk(self.k, 2)
mask.scatter_(2, t1, s1.fill_(1))
adj = adj * mask
adp = self.preprocessing(adj, predefined_A)
adpT = self.preprocessing(adj.transpose(1, 2), predefined_A)
Hidden_State = Hidden_State.view(-1, self.num_nodes, self.hidden_size)
Cell_State = Cell_State.view(-1, self.num_nodes, self.hidden_size)
combined = torch.cat((x, Hidden_State), -1)
if type == 'encoder':
z = F.sigmoid(self.gz1(combined, adp) + self.gz2(combined, adpT))
r = F.sigmoid(self.gr1(combined, adp) + self.gr2(combined, adpT))
temp = torch.cat((x, torch.mul(r, Hidden_State)), -1)
Cell_State = F.tanh(self.gc1(temp, adp) + self.gc2(temp, adpT))
elif type == 'decoder':
z = F.sigmoid(
self.gz1_de(combined, adp) + self.gz2_de(combined, adpT))
r = F.sigmoid(
self.gr1_de(combined, adp) + self.gr2_de(combined, adpT))
temp = torch.cat((x, torch.mul(r, Hidden_State)), -1)
Cell_State = F.tanh(
self.gc1_de(temp, adp) + self.gc2_de(temp, adpT))
Hidden_State = torch.mul(z, Hidden_State) + torch.mul(
1 - z, Cell_State)
return Hidden_State.view(-1, self.hidden_size), Cell_State.view(
-1, self.hidden_size)
def forward(self,
input,
idx=None,
ycl=None,
batches_seen=None,
task_level=12):
predefined_A = self.predefined_A
x = input
batch_size = x.size(0)
Hidden_State, Cell_State = self.initHidden(batch_size * self.num_nodes,
self.hidden_size)
for i in range(self.seq_length):
Hidden_State, Cell_State = self.step(torch.squeeze(x[..., i]),
Hidden_State, Cell_State,
predefined_A, 'encoder', idx,
i)
go_symbol = torch.zeros((batch_size, self.output_dim, self.num_nodes),
device=self.device)
timeofday = ycl[:, 1:, :, :]
decoder_input = go_symbol
outputs_final = []
for i in range(task_level):
try:
decoder_input = torch.cat([decoder_input, timeofday[..., i]],
dim=1)
except:
print(decoder_input.shape, timeofday.shape)
sys.exit(0)
Hidden_State, Cell_State = self.step(decoder_input, Hidden_State,
Cell_State, predefined_A,
'decoder', idx, None)
decoder_output = self.fc_final(Hidden_State)
decoder_input = decoder_output.view(batch_size, self.num_nodes,
self.output_dim).transpose(
1, 2)
outputs_final.append(decoder_output)
if self.training and self.use_curriculum_learning:
c = np.random.uniform(0, 1)
if c < self._compute_sampling_threshold(batches_seen):
decoder_input = ycl[:, :1, :, i]
outputs_final = torch.stack(outputs_final, dim=1)
outputs_final = outputs_final.view(batch_size, self.num_nodes,
task_level,
self.output_dim).transpose(1, 2)
return outputs_final
def initHidden(self, batch_size, hidden_size):
use_gpu = torch.cuda.is_available()
if use_gpu:
Hidden_State = Variable(
torch.zeros(batch_size, hidden_size).to(self.device))
Cell_State = Variable(
torch.zeros(batch_size, hidden_size).to(self.device))
return Hidden_State, Cell_State
else:
Hidden_State = Variable(torch.zeros(batch_size, hidden_size))
Cell_State = Variable(torch.zeros(batch_size, hidden_size))
return Hidden_State, Cell_State
def _compute_sampling_threshold(self, batches_seen):
threshold = self.cl_decay_steps / (
self.cl_decay_steps + np.exp(batches_seen / self.cl_decay_steps))
if threshold < 0.7:
threshold = 0.7
return threshold
| 9,248 | 33.901887 | 79 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/DGCRN_BJ/util.py | import pickle
import numpy as np
import os
import scipy.sparse as sp
import torch
from scipy.sparse import linalg
from torch.autograd import Variable
def normal_std(x):
return x.std() * np.sqrt((len(x) - 1.) / (len(x)))
class DataLoaderS(object):
def __init__(self,
file_name,
train,
valid,
device,
horizon,
window,
normalize=2):
self.P = window
self.h = horizon
fin = open(file_name)
self.rawdat = np.loadtxt(fin, delimiter=',')
self.dat = np.zeros(self.rawdat.shape)
self.n, self.m = self.dat.shape
self.normalize = 2
self.scale = np.ones(self.m)
self._normalized(normalize)
self._split(int(train * self.n), int((train + valid) * self.n), self.n)
self.scale = torch.from_numpy(self.scale).float()
tmp = self.test[1] * self.scale.expand(self.test[1].size(0), self.m)
self.scale = self.scale.to(device)
self.scale = Variable(self.scale)
self.rse = normal_std(tmp)
self.rae = torch.mean(torch.abs(tmp - torch.mean(tmp)))
self.device = device
def _normalized(self, normalize):
if (normalize == 0):
self.dat = self.rawdat
if (normalize == 1):
self.dat = self.rawdat / np.max(self.rawdat)
if (normalize == 2):
for i in range(self.m):
self.scale[i] = np.max(np.abs(self.rawdat[:, i]))
self.dat[:, i] = self.rawdat[:, i] / np.max(
np.abs(self.rawdat[:, i]))
def _split(self, train, valid, test):
train_set = range(self.P + self.h - 1, train)
valid_set = range(train, valid)
test_set = range(valid, self.n)
self.train = self._batchify(train_set, self.h)
self.valid = self._batchify(valid_set, self.h)
self.test = self._batchify(test_set, self.h)
def _batchify(self, idx_set, horizon):
n = len(idx_set)
X = torch.zeros((n, self.P, self.m))
Y = torch.zeros((n, self.m))
for i in range(n):
end = idx_set[i] - self.h + 1
start = end - self.P
X[i, :, :] = torch.from_numpy(self.dat[start:end, :])
Y[i, :] = torch.from_numpy(self.dat[idx_set[i], :])
return [X, Y]
def get_batches(self, inputs, targets, batch_size, shuffle=True):
length = len(inputs)
if shuffle:
index = torch.randperm(length)
else:
index = torch.LongTensor(range(length))
start_idx = 0
while (start_idx < length):
end_idx = min(length, start_idx + batch_size)
excerpt = index[start_idx:end_idx]
X = inputs[excerpt]
Y = targets[excerpt]
X = X.to(self.device)
Y = Y.to(self.device)
yield Variable(X), Variable(Y)
start_idx += batch_size
class DataLoaderM(object):
def __init__(self, xs, ys, batch_size, pad_with_last_sample=True):
"""
:param xs:
:param ys:
:param batch_size:
:param pad_with_last_sample: pad with the last sample to make number of samples divisible to batch_size.
"""
self.batch_size = batch_size
self.current_ind = 0
if pad_with_last_sample:
num_padding = (batch_size - (len(xs) % batch_size)) % batch_size
x_padding = np.repeat(xs[-1:], num_padding, axis=0)
y_padding = np.repeat(ys[-1:], num_padding, axis=0)
xs = np.concatenate([xs, x_padding], axis=0)
ys = np.concatenate([ys, y_padding], axis=0)
self.size = len(xs)
self.num_batch = int(self.size // self.batch_size)
self.xs = xs
self.ys = ys
def shuffle(self):
permutation = np.random.permutation(self.size)
xs, ys = self.xs[permutation], self.ys[permutation]
self.xs = xs
self.ys = ys
def get_iterator(self):
self.current_ind = 0
def _wrapper():
while self.current_ind < self.num_batch:
start_ind = self.batch_size * self.current_ind
end_ind = min(self.size,
self.batch_size * (self.current_ind + 1))
x_i = self.xs[start_ind:end_ind, ...]
y_i = self.ys[start_ind:end_ind, ...]
yield (x_i, y_i)
self.current_ind += 1
return _wrapper()
class DataLoaderM_new(object):
def __init__(self, xs, ys, ycl, batch_size, pad_with_last_sample=True):
"""
:param xs:
:param ys:
:param batch_size:
:param pad_with_last_sample: pad with the last sample to make number of samples divisible to batch_size.
"""
self.batch_size = batch_size
self.current_ind = 0
if pad_with_last_sample:
num_padding = (batch_size - (len(xs) % batch_size)) % batch_size
x_padding = np.repeat(xs[-1:], num_padding, axis=0)
y_padding = np.repeat(ys[-1:], num_padding, axis=0)
xs = np.concatenate([xs, x_padding], axis=0)
ys = np.concatenate([ys, y_padding], axis=0)
ycl = np.concatenate([ycl, y_padding], axis=0)
self.size = len(xs)
self.num_batch = int(self.size // self.batch_size)
self.xs = xs
self.ys = ys
self.ycl = ycl
def shuffle(self):
permutation = np.random.permutation(self.size)
xs, ys, ycl = self.xs[permutation], self.ys[permutation], self.ycl[
permutation]
self.xs = xs
self.ys = ys
self.ycl = ycl
def get_iterator(self):
self.current_ind = 0
def _wrapper():
while self.current_ind < self.num_batch:
start_ind = self.batch_size * self.current_ind
end_ind = min(self.size,
self.batch_size * (self.current_ind + 1))
x_i = self.xs[start_ind:end_ind, ...]
y_i = self.ys[start_ind:end_ind, ...]
y_i_cl = self.ycl[start_ind:end_ind, ...]
yield (x_i, y_i, y_i_cl)
self.current_ind += 1
return _wrapper()
class StandardScaler():
"""
Standard the input
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def transform(self, data):
return (data - self.mean) / self.std
def inverse_transform(self, data):
return (data * self.std) + self.mean
def sym_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).astype(
np.float32).todense()
def asym_adj(adj):
"""Asymmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1)).flatten()
d_inv = np.power(rowsum, -1).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat = sp.diags(d_inv)
return d_mat.dot(adj).astype(np.float32).todense()
def calculate_normalized_laplacian(adj):
"""
:param adj:
:return:
"""
adj = sp.coo_matrix(adj)
d = np.array(adj.sum(1))
d_inv_sqrt = np.power(d, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
normalized_laplacian = sp.eye(adj.shape[0]) - adj.dot(
d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
return normalized_laplacian
def calculate_scaled_laplacian(adj_mx, lambda_max=2, undirected=True):
if undirected:
adj_mx = np.maximum.reduce([adj_mx, adj_mx.T])
L = calculate_normalized_laplacian(adj_mx)
if lambda_max is None:
lambda_max, _ = linalg.eigsh(L, 1, which='LM')
lambda_max = lambda_max[0]
L = sp.csr_matrix(L)
M, _ = L.shape
I = sp.identity(M, format='csr', dtype=L.dtype)
L = (2 / lambda_max * L) - I
return L.astype(np.float32).todense()
def load_pickle(pickle_file):
try:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
except UnicodeDecodeError as e:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f, encoding='latin1')
except Exception as e:
print('Unable to load data ', pickle_file, ':', e)
raise
return pickle_data
def load_adj(pkl_filename):
sensor_ids, sensor_id_to_ind, adj = load_pickle(pkl_filename)
return adj
def load_dataset(dataset_dir,
batch_size,
valid_batch_size=None,
test_batch_size=None):
data = {}
for category in ['train', 'val', 'test']:
cat_data = np.load(os.path.join(dataset_dir, category + '.npz'))
data['x_' + category] = cat_data['x'].astype(np.float32)
data['y_' + category] = cat_data['y'].astype(np.float32)
scaler = StandardScaler(mean=data['x_train'][..., 0].mean(),
std=data['x_train'][..., 0].std())
for category in ['train', 'val', 'test']:
data['x_' + category][...,
0] = scaler.transform(data['x_' + category][...,
0])
import copy
data['y_train_cl'] = copy.deepcopy(data['y_train'])
data['y_train_cl'][..., 0] = scaler.transform(data['y_train'][..., 0])
data['train_loader'] = DataLoaderM_new(data['x_train'], data['y_train'],
data['y_train_cl'], batch_size)
data['val_loader'] = DataLoaderM(data['x_val'], data['y_val'],
valid_batch_size)
data['test_loader'] = DataLoaderM(data['x_test'], data['y_test'],
test_batch_size)
data['scaler'] = scaler
return data
def masked_mse(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels != null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = (preds - labels)**2
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_rmse(preds, labels, null_val=np.nan):
return torch.sqrt(masked_mse(preds=preds, labels=labels,
null_val=null_val))
def masked_mae(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels != null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds - labels)
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_mape(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels != null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds - labels) / labels
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def metric(pred, real):
mae = masked_mae(pred, real, 0.0).item()
mape = masked_mape(pred, real, 0.0).item()
rmse = masked_rmse(pred, real, 0.0).item()
return mae, mape, rmse
def load_node_feature(path):
fi = open(path)
x = []
for li in fi:
li = li.strip()
li = li.split(",")
e = [float(t) for t in li[1:]]
x.append(e)
x = np.array(x)
mean = np.mean(x, axis=0)
std = np.std(x, axis=0)
z = torch.tensor((x - mean) / std, dtype=torch.float)
return z
def normal_std(x):
return x.std() * np.sqrt((len(x) - 1.) / (len(x)))
| 12,198 | 31.530667 | 112 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/DGCRN_BJ/train.py | import torch
import numpy as np
import argparse
import time
from util import *
from trainer import Trainer
from net import DGCRN
import setproctitle
import os
import random
setproctitle.setproctitle("DGCRN@lifuxian")
def str_to_bool(value):
if isinstance(value, bool):
return value
if value.lower() in {'false', 'f', '0', 'no', 'n'}:
return False
elif value.lower() in {'true', 't', '1', 'yes', 'y'}:
return True
raise ValueError(f'{value} is not a valid boolean value')
parser = argparse.ArgumentParser()
parser.add_argument('--runs', type=int, default=10, help='number of runs')
parser.add_argument('--LOAD_INITIAL',
default=False,
type=bool,
help='If LOAD_INITIAL.')
parser.add_argument('--TEST_ONLY',
default=False,
type=bool,
help='If TEST_ONLY.')
parser.add_argument('--tolerance',
type=int,
default=100,
help='tolerance for earlystopping')
parser.add_argument('--OUTPUT_PREDICTION',
default=False,
type=bool,
help='If OUTPUT_PREDICTION.')
parser.add_argument('--cl_decay_steps',
default=2000,
type=float,
help='cl_decay_steps.')
parser.add_argument('--new_training_method',
default=False,
type=bool,
help='new_training_method.')
parser.add_argument('--rnn_size', type=int, default=64, help='rnn_size')
parser.add_argument('--hyperGNN_dim',
type=int,
default=32,
help='hyperGNN_dim')
parser.add_argument('--device', type=str, default='cuda:1', help='')
parser.add_argument('--data',
type=str,
default='data/METR-LA',
help='data path')
parser.add_argument('--adj_data',
type=str,
default='data/sensor_graph/adj_mx.pkl',
help='adj data path')
parser.add_argument('--propalpha', type=float, default=0.05, help='prop alpha')
parser.add_argument('--cl',
type=str_to_bool,
default=True,
help='whether to do curriculum learning')
parser.add_argument('--gcn_depth',
type=int,
default=2,
help='graph convolution depth')
parser.add_argument('--num_nodes',
type=int,
default=207,
help='number of nodes/variables')
parser.add_argument('--dropout', type=float, default=0.3, help='dropout rate')
parser.add_argument('--subgraph_size', type=int, default=20, help='k')
parser.add_argument('--node_dim', type=int, default=40, help='dim of nodes')
parser.add_argument('--in_dim', type=int, default=2, help='inputs dimension')
parser.add_argument('--seq_in_len',
type=int,
default=12,
help='input sequence length')
parser.add_argument('--seq_out_len',
type=int,
default=12,
help='output sequence length')
parser.add_argument('--layers', type=int, default=3, help='number of layers')
parser.add_argument('--batch_size', type=int, default=64, help='batch size')
parser.add_argument('--tanhalpha', type=float, default=3, help='adj alpha')
parser.add_argument('--learning_rate',
type=float,
default=0.001,
help='learning rate')
parser.add_argument('--weight_decay',
type=float,
default=0.0001,
help='weight decay rate')
parser.add_argument('--clip', type=int, default=5, help='clip')
parser.add_argument('--step_size1', type=int, default=2500, help='step_size')
parser.add_argument('--epochs', type=int, default=100, help='')
parser.add_argument('--print_every', type=int, default=50, help='')
parser.add_argument('--save', type=str, default='./save/', help='save path')
parser.add_argument('--expid', type=str, default='1', help='experiment id')
args = parser.parse_args()
torch.set_num_threads(3)
os.makedirs(args.save, exist_ok=True)
device = torch.device(args.device)
dataloader = load_dataset(args.data, args.batch_size, args.batch_size,
args.batch_size)
scaler = dataloader['scaler']
predefined_A = load_adj(args.adj_data)
predefined_A = torch.tensor(predefined_A)
predefined_A = predefined_A / predefined_A.sum(-1).view(-1, 1)
predefined_A = predefined_A.to(device)
def main(runid):
model = DGCRN(args.gcn_depth,
args.num_nodes,
device,
predefined_A=predefined_A,
dropout=args.dropout,
subgraph_size=args.subgraph_size,
node_dim=args.node_dim,
middle_dim=2,
seq_length=args.seq_in_len,
in_dim=args.in_dim,
out_dim=args.seq_out_len,
layers=args.layers,
list_weight=[0.05, 0.95, 0.95],
tanhalpha=args.tanhalpha,
cl_decay_steps=args.cl_decay_steps,
rnn_size=args.rnn_size,
hyperGNN_dim=args.hyperGNN_dim)
print(args)
nParams = sum([p.nelement() for p in model.parameters()])
print('Number of model parameters is', nParams)
engine = Trainer(model, args.learning_rate, args.weight_decay, args.clip,
args.step_size1, args.seq_out_len, scaler, device,
args.cl, args.new_training_method)
if args.LOAD_INITIAL:
engine.model.load_state_dict(
torch.load(args.save + "exp" + str(args.expid) + "_" + str(runid) +
".pth",
map_location='cpu'))
print('model load success!')
if args.TEST_ONLY:
outputs = []
realy = torch.Tensor(dataloader['y_test']).to(device)
realy = realy.transpose(1, 3)[:, 0, :, :]
for iter, (x,
y) in enumerate(dataloader['test_loader'].get_iterator()):
testx = torch.Tensor(x).to(device)
testx = testx.transpose(1, 3)
testy = torch.Tensor(y).to(device)
testy = testy.transpose(1, 3)
with torch.no_grad():
engine.model.eval()
preds = engine.model(testx, ycl=testy)
preds = preds.transpose(1, 3)
outputs.append(preds.squeeze(dim=1))
yhat = torch.cat(outputs, dim=0)
yhat = yhat[:realy.size(0), ...]
if args.OUTPUT_PREDICTION:
pred_all = scaler.inverse_transform(yhat).cpu()
path_savepred = args.save + 'result_pred/' + "exp" + str(
args.expid) + "_" + str(runid)
os.makedirs(args.save + 'result_pred/', exist_ok=True)
np.save(path_savepred, pred_all)
print('result of prediction has been saved, path: ' + os.getcwd() +
path_savepred[1:] + '.npy' + ", shape: " +
str(pred_all.shape))
mae = []
mape = []
rmse = []
pred = scaler.inverse_transform(yhat)
tmae, tmape, trmse = metric(pred, realy)
for i in [2, 5, 8, 11]:
pred = scaler.inverse_transform(yhat[:, :, i])
real = realy[:, :, i]
metrics = metric(pred, real)
log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
print(log.format(i + 1, metrics[0], metrics[1], metrics[2]))
mae.append(metrics[0])
mape.append(metrics[1])
rmse.append(metrics[2])
return mae, mape, rmse, mae, mape, rmse, tmae, tmape, trmse
else:
print("start training...", flush=True)
his_loss = []
val_time = []
train_time = []
minl = 1e5
minl_test = 1e5
epoch_best = -1
tolerance = args.tolerance
count_lfx = 0
batches_seen = 0
for i in range(1, args.epochs + 1):
train_loss = []
train_mape = []
train_rmse = []
t1 = time.time()
dataloader['train_loader'].shuffle()
for iter, (x, y, ycl) in enumerate(
dataloader['train_loader'].get_iterator()):
batches_seen += 1
trainx = torch.Tensor(x).to(device)
trainx = trainx.transpose(1, 3)
trainy = torch.Tensor(y).to(device)
trainy = trainy.transpose(1, 3)
trainycl = torch.Tensor(ycl).to(device)
trainycl = trainycl.transpose(1, 3)
metrics = engine.train(trainx,
trainy[:, 0, :, :],
trainycl,
idx=None,
batches_seen=batches_seen)
train_loss.append(metrics[0])
train_mape.append(metrics[1])
train_rmse.append(metrics[2])
t2 = time.time()
train_time.append(t2 - t1)
valid_loss = []
valid_mape = []
valid_rmse = []
s1 = time.time()
for iter, (x, y) in enumerate(
dataloader['val_loader'].get_iterator()):
testx = torch.Tensor(x).to(device)
testx = testx.transpose(1, 3)
testy = torch.Tensor(y).to(device)
testy = testy.transpose(1, 3)
metrics = engine.eval(testx, testy[:, 0, :, :], testy)
valid_loss.append(metrics[0])
valid_mape.append(metrics[1])
valid_rmse.append(metrics[2])
s2 = time.time()
val_time.append(s2 - s1)
mtrain_loss = np.mean(train_loss)
mtrain_mape = np.mean(train_mape)
mtrain_rmse = np.mean(train_rmse)
mvalid_loss = np.mean(valid_loss)
mvalid_mape = np.mean(valid_mape)
mvalid_rmse = np.mean(valid_rmse)
his_loss.append(mvalid_loss)
if (i - 1) % args.print_every == 0:
log = 'Epoch: {:03d}, Inference Time: {:.4f} secs'
print(log.format(i, (s2 - s1)))
log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch'
print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse,
mvalid_loss, mvalid_mape, mvalid_rmse,
(t2 - t1)),
flush=True)
if mvalid_loss < minl:
torch.save(
engine.model.state_dict(), args.save + "exp" +
str(args.expid) + "_" + str(runid) + ".pth")
minl = mvalid_loss
epoch_best = i
count_lfx = 0
else:
count_lfx += 1
if count_lfx > tolerance:
break
print("Average Training Time: {:.4f} secs/epoch".format(
np.mean(train_time)))
print("Average Inference Time: {:.4f} secs".format(np.mean(val_time)))
bestid = np.argmin(his_loss)
engine.model.load_state_dict(
torch.load(args.save + "exp" + str(args.expid) + "_" + str(runid) +
".pth",
map_location='cpu'))
print("Training finished")
print("The valid loss on best model is {}, epoch:{}".format(
str(round(his_loss[bestid], 4)), epoch_best))
outputs = []
realy = torch.Tensor(dataloader['y_val']).to(device)
realy = realy.transpose(1, 3)[:, 0, :, :]
for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()):
testx = torch.Tensor(x).to(device)
testx = testx.transpose(1, 3)
testy = torch.Tensor(y).to(device)
testy = testy.transpose(1, 3)
with torch.no_grad():
preds = engine.model(testx, ycl=testy)
preds = preds.transpose(1, 3)
outputs.append(preds.squeeze(dim=1))
yhat = torch.cat(outputs, dim=0)
yhat = yhat[:realy.size(0), ...]
pred = scaler.inverse_transform(yhat)
vmae, vmape, vrmse = metric(pred, realy)
outputs = []
realy = torch.Tensor(dataloader['y_test']).to(device)
realy = realy.transpose(1, 3)[:, 0, :, :]
for iter, (x,
y) in enumerate(dataloader['test_loader'].get_iterator()):
testx = torch.Tensor(x).to(device)
testx = testx.transpose(1, 3)
testy = torch.Tensor(y).to(device)
testy = testy.transpose(1, 3)
with torch.no_grad():
preds = engine.model(testx, ycl=testy)
preds = preds.transpose(1, 3)
outputs.append(preds.squeeze(dim=1))
yhat = torch.cat(outputs, dim=0)
yhat = yhat[:realy.size(0), ...]
mae = []
mape = []
rmse = []
pred = scaler.inverse_transform(yhat)
tmae, tmape, trmse = metric(pred, realy)
for i in [2, 5, 8, 11]:
pred = scaler.inverse_transform(yhat[:, :, i])
real = realy[:, :, i]
metrics = metric(pred, real)
log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
print(log.format(i + 1, metrics[0], metrics[1], metrics[2]))
mae.append(metrics[0])
mape.append(metrics[1])
rmse.append(metrics[2])
return vmae, vmape, vrmse, mae, mape, rmse, tmae, tmape, trmse
if __name__ == "__main__":
vmae = []
vmape = []
vrmse = []
mae = []
mape = []
rmse = []
tmae = []
tmape = []
trmse = []
for i in range(args.runs):
if args.runs == 1:
i = 2
elif args.runs == 2:
i += 1
i += 3
vm1, vm2, vm3, m1, m2, m3, tm1, tm2, tm3 = main(i)
vmae.append(vm1)
vmape.append(vm2)
vrmse.append(vm3)
mae.append(m1)
mape.append(m2)
rmse.append(m3)
tmae.append(tm1)
tmape.append(tm2)
trmse.append(tm3)
mae = np.array(mae)
mape = np.array(mape)
rmse = np.array(rmse)
amae = np.mean(mae, 0)
amape = np.mean(mape, 0)
armse = np.mean(rmse, 0)
smae = np.std(mae, 0)
smape = np.std(mape, 0)
srmse = np.std(rmse, 0)
print('\n\nResults for ' + str(args.runs) + ' runs\n\n')
print('valid\tMAE\tRMSE\tMAPE')
log = 'mean:\t{:.4f}\t{:.4f}\t{:.4f}'
print(log.format(np.mean(vmae), np.mean(vrmse), np.mean(vmape)))
log = 'std:\t{:.4f}\t{:.4f}\t{:.4f}'
print(log.format(np.std(vmae), np.std(vrmse), np.std(vmape)))
print('\n\n')
print('test\tMAE\tRMSE\tMAPE')
log = 'mean:\t{:.4f}\t{:.4f}\t{:.4f}'
print(log.format(np.mean(tmae), np.mean(trmse), np.mean(tmape)))
log = 'std:\t{:.4f}\t{:.4f}\t{:.4f}'
print(log.format(np.std(tmae), np.std(trmse), np.std(tmape)))
print('\n\n')
print(
'test|horizon\tMAE-mean\tRMSE-mean\tMAPE-mean\tMAE-std\tRMSE-std\tMAPE-std'
)
for i in range(4):
log = '{:d}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}'
print(
log.format([3, 6, 9, 12][i], amae[i], armse[i], amape[i], smae[i],
srmse[i], smape[i]))
| 15,842 | 34.76298 | 186 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/DGCRN_BJ/trainer.py | import torch.optim as optim
import math
from net import *
import util
class Trainer():
def __init__(self,
model,
lrate,
wdecay,
clip,
step_size,
seq_out_len,
scaler,
device,
cl=True,
new_training_method=False):
self.scaler = scaler
self.model = model
self.model.to(device)
self.optimizer = optim.Adam(self.model.parameters(),
lr=lrate,
weight_decay=wdecay)
self.loss = util.masked_mae
self.clip = clip
self.step = step_size
self.iter = 0
self.task_level = 1
self.seq_out_len = seq_out_len
self.cl = cl
self.new_training_method = new_training_method
def train(self, input, real_val, ycl, idx=None, batches_seen=None):
self.iter += 1
if self.iter % self.step == 0 and self.task_level < self.seq_out_len:
self.task_level += 1
if self.new_training_method:
self.iter = 0
self.model.train()
self.optimizer.zero_grad()
if self.cl:
output = self.model(input,
idx=idx,
ycl=ycl,
batches_seen=self.iter,
task_level=self.task_level)
else:
output = self.model(input,
idx=idx,
ycl=ycl,
batches_seen=self.iter,
task_level=self.seq_out_len)
output = output.transpose(1, 3)
real = torch.unsqueeze(real_val, dim=1)
predict = self.scaler.inverse_transform(output)
if self.cl:
loss = self.loss(predict[:, :, :, :self.task_level],
real[:, :, :, :self.task_level], 0.0)
mape = util.masked_mape(predict[:, :, :, :self.task_level],
real[:, :, :, :self.task_level],
0.0).item()
rmse = util.masked_rmse(predict[:, :, :, :self.task_level],
real[:, :, :, :self.task_level],
0.0).item()
else:
loss = self.loss(predict, real, 0.0)
mape = util.masked_mape(predict, real, 0.0).item()
rmse = util.masked_rmse(predict, real, 0.0).item()
loss.backward()
if self.clip is not None:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)
self.optimizer.step()
return loss.item(), mape, rmse
def eval(self, input, real_val, ycl):
self.model.eval()
with torch.no_grad():
output = self.model(input, ycl=ycl)
output = output.transpose(1, 3)
real = torch.unsqueeze(real_val, dim=1)
predict = self.scaler.inverse_transform(output)
loss = self.loss(predict, real, 0.0)
mape = util.masked_mape(predict, real, 0.0).item()
rmse = util.masked_rmse(predict, real, 0.0).item()
return loss.item(), mape, rmse | 3,313 | 33.520833 | 78 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/ASTGCN/train_MSTGCN_r.py | #!/usr/bin/env python
# coding: utf-8
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import os
from time import time
import shutil
import argparse
import configparser
from model.MSTGCN_r import make_model
from lib.utils import load_graphdata_channel1, get_adjacency_matrix, evaluate_on_test_mstgcn, compute_val_loss_mstgcn, predict_and_save_results_mstgcn
from tensorboardX import SummaryWriter
parser = argparse.ArgumentParser()
parser.add_argument("--config", default='configurations/PEMS04_astgcn.conf', type=str,
help="configuration file path")
args = parser.parse_args()
config = configparser.ConfigParser()
print('Read configuration file: %s' % (args.config))
config.read(args.config)
data_config = config['Data']
training_config = config['Training']
adj_filename = data_config['adj_filename']
graph_signal_matrix_filename = data_config['graph_signal_matrix_filename']
if config.has_option('Data', 'id_filename'):
id_filename = data_config['id_filename']
else:
id_filename = None
num_of_vertices = int(data_config['num_of_vertices'])
points_per_hour = int(data_config['points_per_hour'])
num_for_predict = int(data_config['num_for_predict'])
len_input = int(data_config['len_input'])
dataset_name = data_config['dataset_name']
model_name = training_config['model_name']
ctx = training_config['ctx']
os.environ["CUDA_VISIBLE_DEVICES"] = ctx
USE_CUDA = torch.cuda.is_available()
DEVICE = torch.device('cuda:0')
print("CUDA:", USE_CUDA, DEVICE)
learning_rate = float(training_config['learning_rate'])
epochs = int(training_config['epochs'])
start_epoch = int(training_config['start_epoch'])
batch_size = int(training_config['batch_size'])
num_of_weeks = int(training_config['num_of_weeks'])
num_of_days = int(training_config['num_of_days'])
num_of_hours = int(training_config['num_of_hours'])
time_strides = num_of_hours
nb_chev_filter = int(training_config['nb_chev_filter'])
nb_time_filter = int(training_config['nb_time_filter'])
in_channels = int(training_config['in_channels'])
nb_block = int(training_config['nb_block'])
K = int(training_config['K'])
folder_dir = '%s_h%dd%dw%d_channel%d_%e' % (model_name, num_of_hours, num_of_days, num_of_weeks, in_channels, learning_rate)
print('folder_dir:', folder_dir)
params_path = os.path.join('../experiments', dataset_name, folder_dir)
print('params_path:', params_path)
train_loader, train_target_tensor, val_loader, val_target_tensor, test_loader, test_target_tensor, _mean, _std = load_graphdata_channel1(
graph_signal_matrix_filename, num_of_hours,
num_of_days, num_of_weeks, DEVICE, batch_size)
adj_mx, distance_mx = get_adjacency_matrix(adj_filename, num_of_vertices, id_filename)
net = make_model(DEVICE, nb_block, in_channels, K, nb_chev_filter, nb_time_filter, time_strides, adj_mx,
num_for_predict, len_input)
def train_main():
if (start_epoch == 0) and (not os.path.exists(params_path)):
os.makedirs(params_path)
print('create params directory %s' % (params_path))
elif (start_epoch == 0) and (os.path.exists(params_path)):
shutil.rmtree(params_path)
os.makedirs(params_path)
print('delete the old one and create params directory %s' % (params_path))
elif (start_epoch > 0) and (os.path.exists(params_path)):
print('train from params directory %s' % (params_path))
else:
raise SystemExit('Wrong type of model!')
print('param list:')
print('CUDA\t', DEVICE)
print('in_channels\t', in_channels)
print('nb_block\t', nb_block)
print('nb_chev_filter\t', nb_chev_filter)
print('nb_time_filter\t', nb_time_filter)
print('time_strides\t', time_strides)
print('batch_size\t', batch_size)
print('graph_signal_matrix_filename\t', graph_signal_matrix_filename)
print('start_epoch\t', start_epoch)
print('epochs\t', epochs)
criterion = nn.MSELoss().to(DEVICE)
optimizer = optim.Adam(net.parameters(), lr=learning_rate)
sw = SummaryWriter(logdir=params_path, flush_secs=5)
print(net)
print('Net\'s state_dict:')
total_param = 0
for param_tensor in net.state_dict():
print(param_tensor, '\t', net.state_dict()[param_tensor].size())
total_param += np.prod(net.state_dict()[param_tensor].size())
print('Net\'s total params:', total_param)
print('Optimizer\'s state_dict:')
for var_name in optimizer.state_dict():
print(var_name, '\t', optimizer.state_dict()[var_name])
global_step = 0
best_epoch = 0
best_val_loss = np.inf
start_time = time()
if start_epoch > 0:
params_filename = os.path.join(params_path, 'epoch_%s.params' % start_epoch)
net.load_state_dict(torch.load(params_filename))
print('start epoch:', start_epoch)
print('load weight from: ', params_filename)
# train model
for epoch in range(start_epoch, epochs):
params_filename = os.path.join(params_path, 'epoch_%s.params' % epoch)
evaluate_on_test_mstgcn(net, test_loader, test_target_tensor, sw, epoch, _mean, _std)
val_loss = compute_val_loss_mstgcn(net, val_loader, criterion, sw, epoch)
if val_loss < best_val_loss:
best_val_loss = val_loss
best_epoch = epoch
torch.save(net.state_dict(), params_filename)
print('save parameters to file: %s' % params_filename)
net.train()
for batch_index, batch_data in enumerate(train_loader):
encoder_inputs, labels = batch_data
optimizer.zero_grad()
outputs = net(encoder_inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
training_loss = loss.item()
global_step += 1
sw.add_scalar('training_loss', training_loss, global_step)
if global_step % 1000 == 0:
print('global step: %s, training loss: %.2f, time: %.2fs' % (global_step, training_loss, time() - start_time))
print('best epoch:', best_epoch)
# apply the best model on the test set
predict_main(best_epoch, test_loader, test_target_tensor, _mean, _std, 'test')
def predict_main(global_step, data_loader, data_target_tensor, _mean, _std, type):
'''
:param global_step: int
:param data_loader: torch.utils.data.utils.DataLoader
:param data_target_tensor: tensor
:param mean: (1, 1, 3, 1)
:param std: (1, 1, 3, 1)
:param type: string
:return:
'''
params_filename = os.path.join(params_path, 'epoch_%s.params' % global_step)
print('load weight from:', params_filename)
net.load_state_dict(torch.load(params_filename))
predict_and_save_results_mstgcn(net, data_loader, data_target_tensor, global_step, _mean, _std, params_path, type)
if __name__ == "__main__":
train_main()
# predict_main(224, test_loader, test_target_tensor, _mean, _std, 'test')
| 6,970 | 31.423256 | 150 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/ASTGCN/train_ASTGCN_r.py | #!/usr/bin/env python
# coding: utf-8
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import os
from time import time
import shutil
import argparse
import configparser
from model.ASTGCN_r import make_model
from lib.utils import load_graphdata_channel1, get_adjacency_matrix, compute_val_loss_mstgcn, predict_and_save_results_mstgcn, load_adj, load_data_new
from tensorboardX import SummaryWriter
parser = argparse.ArgumentParser()
parser.add_argument("--config", default='configurations/PEMS04_astgcn.conf', type=str,
help="configuration file path")
args = parser.parse_args()
config = configparser.ConfigParser()
print('Read configuration file: %s' % (args.config))
config.read(args.config)
data_config = config['Data']
training_config = config['Training']
adj_filename = data_config['adj_filename']
graph_signal_matrix_filename = data_config['graph_signal_matrix_filename']
if config.has_option('Data', 'id_filename'):
id_filename = data_config['id_filename']
else:
id_filename = None
num_of_vertices = int(data_config['num_of_vertices'])
points_per_hour = int(data_config['points_per_hour'])
num_for_predict = int(data_config['num_for_predict'])
len_input = int(data_config['len_input'])
dataset_name = data_config['dataset_name']
model_name = training_config['model_name']
ctx = training_config['ctx']
# os.environ["CUDA_VISIBLE_DEVICES"] = ctx
USE_CUDA = torch.cuda.is_available()
DEVICE = torch.device('cuda:' + str(ctx))
print("CUDA:", USE_CUDA, DEVICE)
learning_rate = float(training_config['learning_rate'])
epochs = int(training_config['epochs'])
start_epoch = int(training_config['start_epoch'])
batch_size = int(training_config['batch_size'])
num_of_weeks = int(training_config['num_of_weeks'])
num_of_days = int(training_config['num_of_days'])
num_of_hours = int(training_config['num_of_hours'])
time_strides = num_of_hours
nb_chev_filter = int(training_config['nb_chev_filter'])
nb_time_filter = int(training_config['nb_time_filter'])
in_channels = int(training_config['in_channels'])
nb_block = int(training_config['nb_block'])
K = int(training_config['K'])
folder_dir = '%s_h%dd%dw%d_channel%d_%e' % (model_name, num_of_hours, num_of_days, num_of_weeks, in_channels, learning_rate)
print('folder_dir:', folder_dir)
params_path = os.path.join('experiments', dataset_name, folder_dir)
print('params_path:', params_path)
train_loader, train_target_tensor, val_loader, val_target_tensor, test_loader, test_target_tensor, _mean, _std = load_data_new(
graph_signal_matrix_filename, num_of_hours,
num_of_days, num_of_weeks, DEVICE, batch_size)
# adj_mx, distance_mx = get_adjacency_matrix(adj_filename, num_of_vertices, id_filename)
adj_mx = load_adj(adj_filename)
net = make_model(DEVICE, nb_block, in_channels, K, nb_chev_filter, nb_time_filter, time_strides, adj_mx,
num_for_predict, len_input, num_of_vertices)
def train_main():
if (start_epoch == 0) and (not os.path.exists(params_path)):
os.makedirs(params_path)
print('create params directory %s' % (params_path))
elif (start_epoch == 0) and (os.path.exists(params_path)):
shutil.rmtree(params_path)
os.makedirs(params_path)
print('delete the old one and create params directory %s' % (params_path))
elif (start_epoch > 0) and (os.path.exists(params_path)):
print('train from params directory %s' % (params_path))
else:
raise SystemExit('Wrong type of model!')
print('param list:')
print('CUDA\t', DEVICE)
print('in_channels\t', in_channels)
print('nb_block\t', nb_block)
print('nb_chev_filter\t', nb_chev_filter)
print('nb_time_filter\t', nb_time_filter)
print('time_strides\t', time_strides)
print('batch_size\t', batch_size)
print('graph_signal_matrix_filename\t', graph_signal_matrix_filename)
print('start_epoch\t', start_epoch)
print('epochs\t', epochs)
criterion = nn.MSELoss().to(DEVICE)
optimizer = optim.Adam(net.parameters(), lr=learning_rate)
sw = SummaryWriter(logdir=params_path, flush_secs=5)
print(net)
print('Net\'s state_dict:')
total_param = 0
for param_tensor in net.state_dict():
print(param_tensor, '\t', net.state_dict()[param_tensor].size())
total_param += np.prod(net.state_dict()[param_tensor].size())
print('Net\'s total params:', total_param)
print('Optimizer\'s state_dict:')
for var_name in optimizer.state_dict():
print(var_name, '\t', optimizer.state_dict()[var_name])
global_step = 0
best_epoch = 0
best_val_loss = np.inf
start_time = time()
if start_epoch > 0:
params_filename = os.path.join(params_path, 'epoch_%s.params' % start_epoch)
net.load_state_dict(torch.load(params_filename))
print('start epoch:', start_epoch)
print('load weight from: ', params_filename)
# train model
for epoch in range(start_epoch, epochs):
params_filename = os.path.join(params_path, 'epoch_%s.params' % epoch)
val_loss = compute_val_loss_mstgcn(net, val_loader, criterion, sw, epoch)
if val_loss < best_val_loss:
best_val_loss = val_loss
best_epoch = epoch
torch.save(net.state_dict(), params_filename)
print('save parameters to file: %s' % params_filename)
net.train() # ensure dropout layers are in train mode
for batch_index, batch_data in enumerate(train_loader):
encoder_inputs, labels = batch_data
optimizer.zero_grad()
outputs = net(encoder_inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
training_loss = loss.item()
global_step += 1
sw.add_scalar('training_loss', training_loss, global_step)
if global_step % 1000 == 0:
print('global step: %s, training loss: %.2f, time: %.2fs' % (global_step, training_loss, time() - start_time))
print('best epoch:', best_epoch)
# apply the best model on the test set
predict_main(best_epoch, test_loader, test_target_tensor, _mean, _std, 'test')
def predict_main(global_step, data_loader, data_target_tensor, _mean, _std, type):
'''
:param global_step: int
:param data_loader: torch.utils.data.utils.DataLoader
:param data_target_tensor: tensor
:param mean: (1, 1, 3, 1)
:param std: (1, 1, 3, 1)
:param type: string
:return:
'''
params_filename = os.path.join(params_path, 'epoch_%s.params' % global_step)
print('load weight from:', params_filename)
net.load_state_dict(torch.load(params_filename))
predict_and_save_results_mstgcn(net, data_loader, data_target_tensor, global_step, _mean, _std, params_path, type)
if __name__ == "__main__":
train_main()
# predict_main(31, test_loader, test_target_tensor, _mean, _std, 'test')
| 6,972 | 30.840183 | 150 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/ASTGCN/model/ASTGCN_r.py | # -*- coding:utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.utils import scaled_Laplacian, cheb_polynomial
class Spatial_Attention_layer(nn.Module):
'''
compute spatial attention scores
'''
def __init__(self, DEVICE, in_channels, num_of_vertices, num_of_timesteps):
super(Spatial_Attention_layer, self).__init__()
self.W1 = nn.Parameter(torch.FloatTensor(num_of_timesteps).to(DEVICE))
self.W2 = nn.Parameter(torch.FloatTensor(in_channels, num_of_timesteps).to(DEVICE))
self.W3 = nn.Parameter(torch.FloatTensor(in_channels).to(DEVICE))
self.bs = nn.Parameter(torch.FloatTensor(1, num_of_vertices, num_of_vertices).to(DEVICE))
self.Vs = nn.Parameter(torch.FloatTensor(num_of_vertices, num_of_vertices).to(DEVICE))
def forward(self, x):
'''
:param x: (batch_size, N, F_in, T)
:return: (B,N,N)
'''
lhs = torch.matmul(torch.matmul(x, self.W1), self.W2) # (b,N,F,T)(T)->(b,N,F)(F,T)->(b,N,T)
rhs = torch.matmul(self.W3, x).transpose(-1, -2) # (F)(b,N,F,T)->(b,N,T)->(b,T,N)
product = torch.matmul(lhs, rhs) # (b,N,T)(b,T,N) -> (B, N, N)
S = torch.matmul(self.Vs, torch.sigmoid(product + self.bs)) # (N,N)(B, N, N)->(B,N,N)
S_normalized = F.softmax(S, dim=1)
return S_normalized
class cheb_conv_withSAt(nn.Module):
'''
K-order chebyshev graph convolution
'''
def __init__(self, K, cheb_polynomials, in_channels, out_channels):
'''
:param K: int
:param in_channles: int, num of channels in the input sequence
:param out_channels: int, num of channels in the output sequence
'''
super(cheb_conv_withSAt, self).__init__()
self.K = K
self.cheb_polynomials = cheb_polynomials
self.in_channels = in_channels
self.out_channels = out_channels
self.DEVICE = cheb_polynomials[0].device
self.Theta = nn.ParameterList([nn.Parameter(torch.FloatTensor(in_channels, out_channels).to(self.DEVICE)) for _ in range(K)])
def forward(self, x, spatial_attention):
'''
Chebyshev graph convolution operation
:param x: (batch_size, N, F_in, T)
:return: (batch_size, N, F_out, T)
'''
batch_size, num_of_vertices, in_channels, num_of_timesteps = x.shape
outputs = []
for time_step in range(num_of_timesteps):
graph_signal = x[:, :, :, time_step] # (b, N, F_in)
output = torch.zeros(batch_size, num_of_vertices, self.out_channels).to(self.DEVICE) # (b, N, F_out)
for k in range(self.K):
T_k = self.cheb_polynomials[k] # (N,N)
T_k_with_at = T_k.mul(spatial_attention) # (N,N)*(N,N) = (N,N) 多行和为1, 按着列进行归一化
theta_k = self.Theta[k] # (in_channel, out_channel)
rhs = T_k_with_at.permute(0, 2, 1).matmul(graph_signal) # (N, N)(b, N, F_in) = (b, N, F_in) 因为是左乘,所以多行和为1变为多列和为1,即一行之和为1,进行左乘
output = output + rhs.matmul(theta_k) # (b, N, F_in)(F_in, F_out) = (b, N, F_out)
outputs.append(output.unsqueeze(-1)) # (b, N, F_out, 1)
return F.relu(torch.cat(outputs, dim=-1)) # (b, N, F_out, T)
class Temporal_Attention_layer(nn.Module):
def __init__(self, DEVICE, in_channels, num_of_vertices, num_of_timesteps):
super(Temporal_Attention_layer, self).__init__()
self.U1 = nn.Parameter(torch.FloatTensor(num_of_vertices).to(DEVICE))
self.U2 = nn.Parameter(torch.FloatTensor(in_channels, num_of_vertices).to(DEVICE))
self.U3 = nn.Parameter(torch.FloatTensor(in_channels).to(DEVICE))
self.be = nn.Parameter(torch.FloatTensor(1, num_of_timesteps, num_of_timesteps).to(DEVICE))
self.Ve = nn.Parameter(torch.FloatTensor(num_of_timesteps, num_of_timesteps).to(DEVICE))
def forward(self, x):
'''
:param x: (batch_size, N, F_in, T)
:return: (B, T, T)
'''
_, num_of_vertices, num_of_features, num_of_timesteps = x.shape
lhs = torch.matmul(torch.matmul(x.permute(0, 3, 2, 1), self.U1), self.U2)
# x:(B, N, F_in, T) -> (B, T, F_in, N)
# (B, T, F_in, N)(N) -> (B,T,F_in)
# (B,T,F_in)(F_in,N)->(B,T,N)
rhs = torch.matmul(self.U3, x) # (F)(B,N,F,T)->(B, N, T)
product = torch.matmul(lhs, rhs) # (B,T,N)(B,N,T)->(B,T,T)
E = torch.matmul(self.Ve, torch.sigmoid(product + self.be)) # (B, T, T)
E_normalized = F.softmax(E, dim=1)
return E_normalized
class cheb_conv(nn.Module):
'''
K-order chebyshev graph convolution
'''
def __init__(self, K, cheb_polynomials, in_channels, out_channels):
'''
:param K: int
:param in_channles: int, num of channels in the input sequence
:param out_channels: int, num of channels in the output sequence
'''
super(cheb_conv, self).__init__()
self.K = K
self.cheb_polynomials = cheb_polynomials
self.in_channels = in_channels
self.out_channels = out_channels
self.DEVICE = cheb_polynomials[0].device
self.Theta = nn.ParameterList([nn.Parameter(torch.FloatTensor(in_channels, out_channels).to(self.DEVICE)) for _ in range(K)])
def forward(self, x):
'''
Chebyshev graph convolution operation
:param x: (batch_size, N, F_in, T)
:return: (batch_size, N, F_out, T)
'''
batch_size, num_of_vertices, in_channels, num_of_timesteps = x.shape
outputs = []
for time_step in range(num_of_timesteps):
graph_signal = x[:, :, :, time_step] # (b, N, F_in)
output = torch.zeros(batch_size, num_of_vertices, self.out_channels).to(self.DEVICE) # (b, N, F_out)
for k in range(self.K):
T_k = self.cheb_polynomials[k] # (N,N)
theta_k = self.Theta[k] # (in_channel, out_channel)
rhs = graph_signal.permute(0, 2, 1).matmul(T_k).permute(0, 2, 1)
output = output + rhs.matmul(theta_k)
outputs.append(output.unsqueeze(-1))
return F.relu(torch.cat(outputs, dim=-1))
class ASTGCN_block(nn.Module):
def __init__(self, DEVICE, in_channels, K, nb_chev_filter, nb_time_filter, time_strides, cheb_polynomials, num_of_vertices, num_of_timesteps):
super(ASTGCN_block, self).__init__()
self.TAt = Temporal_Attention_layer(DEVICE, in_channels, num_of_vertices, num_of_timesteps)
self.SAt = Spatial_Attention_layer(DEVICE, in_channels, num_of_vertices, num_of_timesteps)
self.cheb_conv_SAt = cheb_conv_withSAt(K, cheb_polynomials, in_channels, nb_chev_filter)
self.time_conv = nn.Conv2d(nb_chev_filter, nb_time_filter, kernel_size=(1, 3), stride=(1, time_strides), padding=(0, 1))
self.residual_conv = nn.Conv2d(in_channels, nb_time_filter, kernel_size=(1, 1), stride=(1, time_strides))
self.ln = nn.LayerNorm(nb_time_filter) #需要将channel放到最后一个维度上
def forward(self, x):
'''
:param x: (batch_size, N, F_in, T)
:return: (batch_size, N, nb_time_filter, T)
'''
batch_size, num_of_vertices, num_of_features, num_of_timesteps = x.shape
# TAt
temporal_At = self.TAt(x) # (b, T, T)
x_TAt = torch.matmul(x.reshape(batch_size, -1, num_of_timesteps), temporal_At).reshape(batch_size, num_of_vertices, num_of_features, num_of_timesteps)
# SAt
spatial_At = self.SAt(x_TAt)
# cheb gcn
spatial_gcn = self.cheb_conv_SAt(x, spatial_At) # (b,N,F,T)
# spatial_gcn = self.cheb_conv(x)
# convolution along the time axis
time_conv_output = self.time_conv(spatial_gcn.permute(0, 2, 1, 3)) # (b,N,F,T)->(b,F,N,T) 用(1,3)的卷积核去做->(b,F,N,T)
# residual shortcut
x_residual = self.residual_conv(x.permute(0, 2, 1, 3)) # (b,N,F,T)->(b,F,N,T) 用(1,1)的卷积核去做->(b,F,N,T)
x_residual = self.ln(F.relu(x_residual + time_conv_output).permute(0, 3, 2, 1)).permute(0, 2, 3, 1)
# (b,F,N,T)->(b,T,N,F) -ln-> (b,T,N,F)->(b,N,F,T)
return x_residual
class ASTGCN_submodule(nn.Module):
def __init__(self, DEVICE, nb_block, in_channels, K, nb_chev_filter, nb_time_filter, time_strides, cheb_polynomials, num_for_predict, len_input, num_of_vertices):
'''
:param nb_block:
:param in_channels:
:param K:
:param nb_chev_filter:
:param nb_time_filter:
:param time_strides:
:param cheb_polynomials:
:param nb_predict_step:
'''
super(ASTGCN_submodule, self).__init__()
self.BlockList = nn.ModuleList([ASTGCN_block(DEVICE, in_channels, K, nb_chev_filter, nb_time_filter, time_strides, cheb_polynomials, num_of_vertices, len_input)])
self.BlockList.extend([ASTGCN_block(DEVICE, nb_time_filter, K, nb_chev_filter, nb_time_filter, 1, cheb_polynomials, num_of_vertices, len_input//time_strides) for _ in range(nb_block-1)])
self.final_conv = nn.Conv2d(int(len_input/time_strides), num_for_predict, kernel_size=(1, nb_time_filter))
self.DEVICE = DEVICE
self.to(DEVICE)
def forward(self, x):
'''
:param x: (B, N_nodes, F_in, T_in)
:return: (B, N_nodes, T_out)
'''
for block in self.BlockList:
x = block(x)
output = self.final_conv(x.permute(0, 3, 1, 2))[:, :, :, -1].permute(0, 2, 1)
# (b,N,F,T)->(b,T,N,F)-conv<1,F>->(b,c_out*T,N,1)->(b,c_out*T,N)->(b,N,T)
return output
def make_model(DEVICE, nb_block, in_channels, K, nb_chev_filter, nb_time_filter, time_strides, adj_mx, num_for_predict, len_input, num_of_vertices):
'''
:param DEVICE:
:param nb_block:
:param in_channels:
:param K:
:param nb_chev_filter:
:param nb_time_filter:
:param time_strides:
:param cheb_polynomials:
:param nb_predict_step:
:param len_input
:return:
'''
L_tilde = scaled_Laplacian(adj_mx)
cheb_polynomials = [torch.from_numpy(i).type(torch.FloatTensor).to(DEVICE) for i in cheb_polynomial(L_tilde, K)]
model = ASTGCN_submodule(DEVICE, nb_block, in_channels, K, nb_chev_filter, nb_time_filter, time_strides, cheb_polynomials, num_for_predict, len_input, num_of_vertices)
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
else:
nn.init.uniform_(p)
return model | 10,548 | 36.275618 | 194 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/ASTGCN/model/MSTGCN_r.py | # -*- coding:utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.utils import scaled_Laplacian, cheb_polynomial
class cheb_conv(nn.Module):
'''
K-order chebyshev graph convolution
'''
def __init__(self, K, cheb_polynomials, in_channels, out_channels):
'''
:param K: int
:param in_channles: int, num of channels in the input sequence
:param out_channels: int, num of channels in the output sequence
'''
super(cheb_conv, self).__init__()
self.K = K
self.cheb_polynomials = cheb_polynomials
self.in_channels = in_channels
self.out_channels = out_channels
self.DEVICE = cheb_polynomials[0].device
self.Theta = nn.ParameterList([nn.Parameter(torch.FloatTensor(in_channels, out_channels).to(self.DEVICE)) for _ in range(K)])
def forward(self, x):
'''
Chebyshev graph convolution operation
:param x: (batch_size, N, F_in, T)
:return: (batch_size, N, F_out, T)
'''
batch_size, num_of_vertices, in_channels, num_of_timesteps = x.shape
outputs = []
for time_step in range(num_of_timesteps):
graph_signal = x[:, :, :, time_step] # (b, N, F_in)
output = torch.zeros(batch_size, num_of_vertices, self.out_channels).to(self.DEVICE) # (b, N, F_out)
for k in range(self.K):
T_k = self.cheb_polynomials[k] # (N,N)
theta_k = self.Theta[k] # (in_channel, out_channel)
rhs = graph_signal.permute(0, 2, 1).matmul(T_k).permute(0, 2, 1)
output = output + rhs.matmul(theta_k)
outputs.append(output.unsqueeze(-1))
return F.relu(torch.cat(outputs, dim=-1))
class MSTGCN_block(nn.Module):
def __init__(self, in_channels, K, nb_chev_filter, nb_time_filter, time_strides, cheb_polynomials):
super(MSTGCN_block, self).__init__()
self.cheb_conv = cheb_conv(K, cheb_polynomials, in_channels, nb_chev_filter)
self.time_conv = nn.Conv2d(nb_chev_filter, nb_time_filter, kernel_size=(1, 3), stride=(1, time_strides), padding=(0, 1))
self.residual_conv = nn.Conv2d(in_channels, nb_time_filter, kernel_size=(1, 1), stride=(1, time_strides))
self.ln = nn.LayerNorm(nb_time_filter)
def forward(self, x):
'''
:param x: (batch_size, N, F_in, T)
:return: (batch_size, N, nb_time_filter, T)
'''
# cheb gcn
spatial_gcn = self.cheb_conv(x) # (b,N,F,T)
# convolution along the time axis
time_conv_output = self.time_conv(spatial_gcn.permute(0, 2, 1, 3)) # (b,F,N,T)
# residual shortcut
x_residual = self.residual_conv(x.permute(0, 2, 1, 3)) # (b,F,N,T)
x_residual = self.ln(F.relu(x_residual + time_conv_output).permute(0, 3, 2, 1)).permute(0, 2, 3, 1) # (b,N,F,T)
return x_residual
class MSTGCN_submodule(nn.Module):
def __init__(self, DEVICE, nb_block, in_channels, K, nb_chev_filter, nb_time_filter, time_strides, cheb_polynomials, num_for_predict, len_input):
'''
:param nb_block:
:param in_channels:
:param K:
:param nb_chev_filter:
:param nb_time_filter:
:param time_strides:
:param cheb_polynomials:
:param nb_predict_step:
'''
super(MSTGCN_submodule, self).__init__()
self.BlockList = nn.ModuleList([MSTGCN_block(in_channels, K, nb_chev_filter, nb_time_filter, time_strides, cheb_polynomials)])
self.BlockList.extend([MSTGCN_block(nb_time_filter, K, nb_chev_filter, nb_time_filter, 1, cheb_polynomials) for _ in range(nb_block-1)])
self.final_conv = nn.Conv2d(int(len_input/time_strides), num_for_predict, kernel_size=(1, nb_time_filter))
self.DEVICE = DEVICE
self.to(DEVICE)
def forward(self, x):
'''
:param x: (B, N_nodes, F_in, T_in)
:return: (B, N_nodes, T_out)
'''
for block in self.BlockList:
x = block(x)
output = self.final_conv(x.permute(0, 3, 1, 2))[:, :, :, -1].permute(0, 2, 1)
return output
def make_model(DEVICE, nb_block, in_channels, K, nb_chev_filter, nb_time_filter, time_strides, adj_mx, num_for_predict, len_input):
'''
:param DEVICE:
:param nb_block:
:param in_channels:
:param K:
:param nb_chev_filter:
:param nb_time_filter:
:param time_strides:
:param cheb_polynomials:
:param nb_predict_step:
:param len_input
:return:
'''
L_tilde = scaled_Laplacian(adj_mx)
cheb_polynomials = [torch.from_numpy(i).type(torch.FloatTensor).to(DEVICE) for i in cheb_polynomial(L_tilde, K)]
model = MSTGCN_submodule(DEVICE, nb_block, in_channels, K, nb_chev_filter, nb_time_filter, time_strides, cheb_polynomials, num_for_predict, len_input)
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model | 5,014 | 32.885135 | 154 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/ASTGCN/lib/utils.py | import os
import numpy as np
import torch
import torch.utils.data
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from .metrics import masked_mape_np
from scipy.sparse.linalg import eigs
import pickle
def load_pickle(pickle_file):
try:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
except UnicodeDecodeError as e:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f, encoding='latin1')
except Exception as e:
print('Unable to load data ', pickle_file, ':', e)
raise
return pickle_data
def load_adj(pkl_filename):
sensor_ids, sensor_id_to_ind, adj = load_pickle(pkl_filename)
return adj
def re_normalization(x, mean, std):
x = x * std + mean
return x
def max_min_normalization(x, _max, _min):
x = 1. * (x - _min)/(_max - _min)
x = x * 2. - 1.
return x
def re_max_min_normalization(x, _max, _min):
x = (x + 1.) / 2.
x = 1. * x * (_max - _min) + _min
return x
def get_adjacency_matrix(distance_df_filename, num_of_vertices, id_filename=None):
'''
Parameters
----------
distance_df_filename: str, path of the csv file contains edges information
num_of_vertices: int, the number of vertices
Returns
----------
A: np.ndarray, adjacency matrix
'''
if 'npy' in distance_df_filename:
adj_mx = np.load(distance_df_filename)
return adj_mx, None
else:
import csv
A = np.zeros((int(num_of_vertices), int(num_of_vertices)),
dtype=np.float32)
distaneA = np.zeros((int(num_of_vertices), int(num_of_vertices)),
dtype=np.float32)
if id_filename:
with open(id_filename, 'r') as f:
id_dict = {int(i): idx for idx, i in enumerate(f.read().strip().split('\n'))} # 把节点id(idx)映射成从0开始的索引
with open(distance_df_filename, 'r') as f:
f.readline()
reader = csv.reader(f)
for row in reader:
if len(row) != 3:
continue
i, j, distance = int(row[0]), int(row[1]), float(row[2])
A[id_dict[i], id_dict[j]] = 1
distaneA[id_dict[i], id_dict[j]] = distance
return A, distaneA
else:
with open(distance_df_filename, 'r') as f:
f.readline()
reader = csv.reader(f)
for row in reader:
if len(row) != 3:
continue
i, j, distance = int(row[0]), int(row[1]), float(row[2])
A[i, j] = 1
distaneA[i, j] = distance
return A, distaneA
def scaled_Laplacian(W):
'''
compute \tilde{L}
Parameters
----------
W: np.ndarray, shape is (N, N), N is the num of vertices
Returns
----------
scaled_Laplacian: np.ndarray, shape (N, N)
'''
assert W.shape[0] == W.shape[1]
D = np.diag(np.sum(W, axis=1))
L = D - W
lambda_max = eigs(L, k=1, which='LR')[0].real
return (2 * L) / lambda_max - np.identity(W.shape[0])
def cheb_polynomial(L_tilde, K):
'''
compute a list of chebyshev polynomials from T_0 to T_{K-1}
Parameters
----------
L_tilde: scaled Laplacian, np.ndarray, shape (N, N)
K: the maximum order of chebyshev polynomials
Returns
----------
cheb_polynomials: list(np.ndarray), length: K, from T_0 to T_{K-1}
'''
N = L_tilde.shape[0]
cheb_polynomials = [np.identity(N), L_tilde.copy()]
for i in range(2, K):
cheb_polynomials.append(2 * L_tilde * cheb_polynomials[i - 1] - cheb_polynomials[i - 2])
return cheb_polynomials
def generate_data(dataset_dir):
data = {}
mean = None
std = None
for category in ['train', 'val', 'test']:
cat_data = np.load(os.path.join(dataset_dir, category + '.npz'))
data['x_' + category] = np.transpose(cat_data['x'][..., :1], (0, 2, 3, 1))
data['y_' + category] = np.transpose(cat_data['y'][..., 0], (0, 2, 1))
# x = cat_data['x'][..., :1]
# y = cat_data['y'][..., :1]
if mean is None:
mean = data['x_train'].mean()
if std is None:
std = data['x_train'].std()
# yield (x - mean) / std, y
# return (data['x_train'] - mean) / std, (data['y_train'] - mean) / std, (data['x_val'] - mean) / std, (data['y_val'] - mean) / std, (data['x_test'] - mean) / std, (data['y_test'] - mean) / std, mean, std
return (data['x_train'] - mean) / std, data['y_train'], (data['x_val'] - mean) / std, data['y_val'], (data['x_test'] - mean) / std, data['y_test'], mean, std
# scaler = StandardScaler(mean=data['x_train'][..., 0].mean(), std=data['x_train'][..., 0].std())
# # Data format
# for category in ['train', 'val', 'test']:
# data['x_' + category][..., 0] = scaler.transform(data['x_' + category][..., 0])
#
# data['train_loader'] = DataLoaderM(data['x_train'], data['y_train'], batch_size)
# data['val_loader'] = DataLoaderM(data['x_val'], data['y_val'], valid_batch_size)
# data['test_loader'] = DataLoaderM(data['x_test'], data['y_test'], test_batch_size)
# data['scaler'] = scaler
# return data
def load_data_new(graph_signal_matrix_filename, num_of_hours, num_of_days, num_of_weeks, DEVICE, batch_size, shuffle=True):
'''
这个是为PEMS的数据准备的函数
将x,y都处理成归一化到[-1,1]之前的数据;
每个样本同时包含所有监测点的数据,所以本函数构造的数据输入时空序列预测模型;
该函数会把hour, day, week的时间串起来;
注: 从文件读入的数据,x是最大最小归一化的,但是y是真实值
这个函数转为mstgcn,astgcn设计,返回的数据x都是通过减均值除方差进行归一化的,y都是真实值
:param graph_signal_matrix_filename: str
:param num_of_hours: int
:param num_of_days: int
:param num_of_weeks: int
:param DEVICE:
:param batch_size: int
:return:
three DataLoaders, each dataloader contains:
test_x_tensor: (B, N_nodes, in_feature, T_input)
test_decoder_input_tensor: (B, N_nodes, T_output)
test_target_tensor: (B, N_nodes, T_output)
'''
# file = os.path.basename(graph_signal_matrix_filename).split('.')[0]
#
# dirpath = os.path.dirname(graph_signal_matrix_filename)
#
# filename = os.path.join(dirpath,
# file + '_r' + str(num_of_hours) + '_d' + str(num_of_days) + '_w' + str(num_of_weeks)) +'_astcgn'
#
# print('load file:', filename)
#
# file_data = np.load(filename + '.npz')
# train_x = file_data['train_x'] # (10181, 307, 3, 12)
# train_x = train_x[:, :, 0:1, :]
# train_target = file_data['train_target'] # (10181, 307, 12)
#
# val_x = file_data['val_x']
# val_x = val_x[:, :, 0:1, :]
# val_target = file_data['val_target']
#
# test_x = file_data['test_x']
# test_x = test_x[:, :, 0:1, :]
# test_target = file_data['test_target']
#
# mean = file_data['mean'][:, :, 0:1, :] # (1, 1, 3, 1)
# std = file_data['std'][:, :, 0:1, :] # (1, 1, 3, 1)
train_x, train_target, val_x, val_target, test_x, test_target, mean, std = generate_data(graph_signal_matrix_filename)
# ------- train_loader -------
train_x_tensor = torch.from_numpy(train_x).type(torch.FloatTensor).to(DEVICE) # (B, N, F, T)
train_target_tensor = torch.from_numpy(train_target).type(torch.FloatTensor).to(DEVICE) # (B, N, T)
train_dataset = torch.utils.data.TensorDataset(train_x_tensor, train_target_tensor)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=shuffle)
# ------- val_loader -------
val_x_tensor = torch.from_numpy(val_x).type(torch.FloatTensor).to(DEVICE) # (B, N, F, T)
val_target_tensor = torch.from_numpy(val_target).type(torch.FloatTensor).to(DEVICE) # (B, N, T)
val_dataset = torch.utils.data.TensorDataset(val_x_tensor, val_target_tensor)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
# ------- test_loader -------
test_x_tensor = torch.from_numpy(test_x).type(torch.FloatTensor).to(DEVICE) # (B, N, F, T)
test_target_tensor = torch.from_numpy(test_target).type(torch.FloatTensor).to(DEVICE) # (B, N, T)
test_dataset = torch.utils.data.TensorDataset(test_x_tensor, test_target_tensor)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
# print
print('train:', train_x_tensor.size(), train_target_tensor.size())
print('val:', val_x_tensor.size(), val_target_tensor.size())
print('test:', test_x_tensor.size(), test_target_tensor.size())
return train_loader, train_target_tensor, val_loader, val_target_tensor, test_loader, test_target_tensor, mean, std
def load_graphdata_channel1(graph_signal_matrix_filename, num_of_hours, num_of_days, num_of_weeks, DEVICE, batch_size, shuffle=True):
'''
这个是为PEMS的数据准备的函数
将x,y都处理成归一化到[-1,1]之前的数据;
每个样本同时包含所有监测点的数据,所以本函数构造的数据输入时空序列预测模型;
该函数会把hour, day, week的时间串起来;
注: 从文件读入的数据,x是最大最小归一化的,但是y是真实值
这个函数转为mstgcn,astgcn设计,返回的数据x都是通过减均值除方差进行归一化的,y都是真实值
:param graph_signal_matrix_filename: str
:param num_of_hours: int
:param num_of_days: int
:param num_of_weeks: int
:param DEVICE:
:param batch_size: int
:return:
three DataLoaders, each dataloader contains:
test_x_tensor: (B, N_nodes, in_feature, T_input)
test_decoder_input_tensor: (B, N_nodes, T_output)
test_target_tensor: (B, N_nodes, T_output)
'''
file = os.path.basename(graph_signal_matrix_filename).split('.')[0]
dirpath = os.path.dirname(graph_signal_matrix_filename)
filename = os.path.join(dirpath,
file + '_r' + str(num_of_hours) + '_d' + str(num_of_days) + '_w' + str(num_of_weeks)) +'_astcgn'
print('load file:', filename)
file_data = np.load(filename + '.npz')
train_x = file_data['train_x'] # (10181, 307, 3, 12)
train_x = train_x[:, :, 0:1, :]
train_target = file_data['train_target'] # (10181, 307, 12)
val_x = file_data['val_x']
val_x = val_x[:, :, 0:1, :]
val_target = file_data['val_target']
test_x = file_data['test_x']
test_x = test_x[:, :, 0:1, :]
test_target = file_data['test_target']
mean = file_data['mean'][:, :, 0:1, :] # (1, 1, 3, 1)
std = file_data['std'][:, :, 0:1, :] # (1, 1, 3, 1)
# ------- train_loader -------
train_x_tensor = torch.from_numpy(train_x).type(torch.FloatTensor).to(DEVICE) # (B, N, F, T)
train_target_tensor = torch.from_numpy(train_target).type(torch.FloatTensor).to(DEVICE) # (B, N, T)
train_dataset = torch.utils.data.TensorDataset(train_x_tensor, train_target_tensor)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=shuffle)
# ------- val_loader -------
val_x_tensor = torch.from_numpy(val_x).type(torch.FloatTensor).to(DEVICE) # (B, N, F, T)
val_target_tensor = torch.from_numpy(val_target).type(torch.FloatTensor).to(DEVICE) # (B, N, T)
val_dataset = torch.utils.data.TensorDataset(val_x_tensor, val_target_tensor)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
# ------- test_loader -------
test_x_tensor = torch.from_numpy(test_x).type(torch.FloatTensor).to(DEVICE) # (B, N, F, T)
test_target_tensor = torch.from_numpy(test_target).type(torch.FloatTensor).to(DEVICE) # (B, N, T)
test_dataset = torch.utils.data.TensorDataset(test_x_tensor, test_target_tensor)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
# print
print('train:', train_x_tensor.size(), train_target_tensor.size())
print('val:', val_x_tensor.size(), val_target_tensor.size())
print('test:', test_x_tensor.size(), test_target_tensor.size())
return train_loader, train_target_tensor, val_loader, val_target_tensor, test_loader, test_target_tensor, mean, std
def compute_val_loss_mstgcn(net, val_loader, criterion, sw, epoch, limit=None):
'''
for rnn, compute mean loss on validation set
:param net: model
:param val_loader: torch.utils.data.utils.DataLoader
:param criterion: torch.nn.MSELoss
:param sw: tensorboardX.SummaryWriter
:param global_step: int, current global_step
:param limit: int,
:return: val_loss
'''
net.train(False) # ensure dropout layers are in evaluation mode
with torch.no_grad():
val_loader_length = len(val_loader) # nb of batch
tmp = [] # 记录了所有batch的loss
for batch_index, batch_data in enumerate(val_loader):
encoder_inputs, labels = batch_data
outputs = net(encoder_inputs)
loss = criterion(outputs, labels) # 计算误差
tmp.append(loss.item())
if batch_index % 100 == 0:
print('validation batch %s / %s, loss: %.2f' % (batch_index + 1, val_loader_length, loss.item()))
if (limit is not None) and batch_index >= limit:
break
validation_loss = sum(tmp) / len(tmp)
sw.add_scalar('validation_loss', validation_loss, epoch)
return validation_loss
def evaluate_on_test_mstgcn(net, test_loader, test_target_tensor, sw, epoch, _mean, _std):
'''
for rnn, compute MAE, RMSE, MAPE scores of the prediction for every time step on testing set.
:param net: model
:param test_loader: torch.utils.data.utils.DataLoader
:param test_target_tensor: torch.tensor (B, N_nodes, T_output, out_feature)=(B, N_nodes, T_output, 1)
:param sw:
:param epoch: int, current epoch
:param _mean: (1, 1, 3(features), 1)
:param _std: (1, 1, 3(features), 1)
'''
net.train(False) # ensure dropout layers are in test mode
with torch.no_grad():
test_loader_length = len(test_loader)
test_target_tensor = test_target_tensor.cpu().numpy()
prediction = [] # 存储所有batch的output
for batch_index, batch_data in enumerate(test_loader):
encoder_inputs, labels = batch_data
outputs = net(encoder_inputs)
prediction.append(outputs.detach().cpu().numpy())
if batch_index % 100 == 0:
print('predicting testing set batch %s / %s' % (batch_index + 1, test_loader_length))
prediction = np.concatenate(prediction, 0) # (batch, T', 1)
prediction_length = prediction.shape[2]
for i in range(prediction_length):
assert test_target_tensor.shape[0] == prediction.shape[0]
print('current epoch: %s, predict %s points' % (epoch, i))
mae = mean_absolute_error(test_target_tensor[:, :, i], prediction[:, :, i])
rmse = mean_squared_error(test_target_tensor[:, :, i], prediction[:, :, i]) ** 0.5
mape = masked_mape_np(test_target_tensor[:, :, i], prediction[:, :, i], 0)
print('MAE: %.2f' % (mae))
print('RMSE: %.2f' % (rmse))
print('MAPE: %.2f' % (mape))
print()
if sw:
sw.add_scalar('MAE_%s_points' % (i), mae, epoch)
sw.add_scalar('RMSE_%s_points' % (i), rmse, epoch)
sw.add_scalar('MAPE_%s_points' % (i), mape, epoch)
def predict_and_save_results_mstgcn(net, data_loader, data_target_tensor, global_step, _mean, _std, params_path, type):
'''
:param net: nn.Module
:param data_loader: torch.utils.data.utils.DataLoader
:param data_target_tensor: tensor
:param epoch: int
:param _mean: (1, 1, 3, 1)
:param _std: (1, 1, 3, 1)
:param params_path: the path for saving the results
:return:
'''
net.train(False) # ensure dropout layers are in test mode
with torch.no_grad():
data_target_tensor = data_target_tensor.cpu().numpy()
loader_length = len(data_loader) # nb of batch
prediction = [] # 存储所有batch的output
input = [] # 存储所有batch的input
for batch_index, batch_data in enumerate(data_loader):
encoder_inputs, labels = batch_data
input.append(encoder_inputs[:, :, 0:1].cpu().numpy()) # (batch, T', 1)
outputs = net(encoder_inputs)
prediction.append(outputs.detach().cpu().numpy())
if batch_index % 100 == 0:
print('predicting data set batch %s / %s' % (batch_index + 1, loader_length))
input = np.concatenate(input, 0)
input = re_normalization(input, _mean, _std)
prediction = np.concatenate(prediction, 0) # (batch, T', 1)
print('input:', input.shape)
print('prediction:', prediction.shape)
print('data_target_tensor:', data_target_tensor.shape)
output_filename = os.path.join(params_path, 'output_epoch_%s_%s' % (global_step, type))
np.savez(output_filename, input=input, prediction=prediction, data_target_tensor=data_target_tensor)
# 计算误差
excel_list = []
prediction_length = prediction.shape[2]
for i in range(prediction_length):
assert data_target_tensor.shape[0] == prediction.shape[0]
print('current epoch: %s, predict %s points' % (global_step, i))
mae = mean_absolute_error(data_target_tensor[:, :, i], prediction[:, :, i])
rmse = mean_squared_error(data_target_tensor[:, :, i], prediction[:, :, i]) ** 0.5
mape = masked_mape_np(data_target_tensor[:, :, i], prediction[:, :, i], 0)
print('MAE: %.2f' % (mae))
print('RMSE: %.2f' % (rmse))
print('MAPE: %.2f' % (mape))
excel_list.extend([mae, rmse, mape])
# print overall results
mae = mean_absolute_error(data_target_tensor.reshape(-1, 1), prediction.reshape(-1, 1))
rmse = mean_squared_error(data_target_tensor.reshape(-1, 1), prediction.reshape(-1, 1)) ** 0.5
mape = masked_mape_np(data_target_tensor.reshape(-1, 1), prediction.reshape(-1, 1), 0)
print('all MAE: %.2f' % (mae))
print('all RMSE: %.2f' % (rmse))
print('all MAPE: %.2f' % (mape))
excel_list.extend([mae, rmse, mape])
print(excel_list)
| 18,213 | 34.996047 | 208 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/STSGCN/main.py | # -*- coding:utf-8 -*-
import setproctitle
setproctitle.setproctitle("STSGCN@lifuxian")
import time
import json
import argparse
import numpy as np
import mxnet as mx
from utils import (construct_model, generate_data,
masked_mae_np, masked_mape_np, masked_mse_np)
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, help='configuration file')
parser.add_argument("--test", action="store_true", help="test program")
parser.add_argument("--plot", help="plot network graph", action="store_true")
parser.add_argument("--save", action="store_true", help="save model")
args = parser.parse_args()
config_filename = args.config
with open(config_filename, 'r') as f:
config = json.loads(f.read())
print(json.dumps(config, sort_keys=True, indent=4))
net = construct_model(config)
batch_size = config['batch_size']
num_of_vertices = config['num_of_vertices']
graph_signal_matrix_filename = config['graph_signal_matrix_filename']
if isinstance(config['ctx'], list):
ctx = [mx.gpu(i) for i in config['ctx']]
elif isinstance(config['ctx'], int):
ctx = mx.gpu(config['ctx'])
loaders = []
true_values = []
for idx, (x, y) in enumerate(generate_data(graph_signal_matrix_filename)):
if args.test:
x = x[: 100]
y = y[: 100]
y = y.squeeze(axis=-1)
print(x.shape, y.shape)
loaders.append(
mx.io.NDArrayIter(
x, y if idx == 0 else None,
batch_size=batch_size,
shuffle=(idx == 0),
label_name='label'
)
)
if idx == 0:
training_samples = x.shape[0]
else:
true_values.append(y)
train_loader, val_loader, test_loader = loaders
val_y, test_y = true_values
global_epoch = 1
global_train_steps = training_samples // batch_size + 1
all_info = []
epochs = config['epochs']
mod = mx.mod.Module(
net,
data_names=['data'],
label_names=['label'],
context=ctx
)
mod.bind(
data_shapes=[(
'data',
(batch_size, config['points_per_hour'], num_of_vertices, 1)
), ],
label_shapes=[(
'label',
(batch_size, config['points_per_hour'], num_of_vertices)
)]
)
mod.init_params(initializer=mx.init.Xavier(magnitude=0.0003))
lr_sch = mx.lr_scheduler.PolyScheduler(
max_update=global_train_steps * epochs * config['max_update_factor'],
base_lr=config['learning_rate'],
pwr=2,
warmup_steps=global_train_steps
)
mod.init_optimizer(
optimizer=config['optimizer'],
optimizer_params=(('lr_scheduler', lr_sch),)
)
num_of_parameters = 0
for param_name, param_value in mod.get_params()[0].items():
# print(param_name, param_value.shape)
num_of_parameters += np.prod(param_value.shape)
print("Number of Parameters: {}".format(num_of_parameters), flush=True)
metric = mx.metric.create(['RMSE', 'MAE'], output_names=['pred_output'])
if args.plot:
graph = mx.viz.plot_network(net)
graph.format = 'png'
graph.render('graph')
def training(epochs):
global global_epoch
lowest_val_loss = 1e6
tolerance = 50
cnt_temp = 0
for epoch in range(epochs):
t = time.time()
info = [global_epoch]
train_loader.reset()
metric.reset()
for idx, databatch in enumerate(train_loader):
mod.forward_backward(databatch)
mod.update_metric(metric, databatch.label)
mod.update()
metric_values = dict(zip(*metric.get()))
print('training: Epoch: %s, RMSE: %.2f, MAE: %.2f, time: %.2f s' % (
global_epoch, metric_values['rmse'], metric_values['mae'],
time.time() - t), flush=True)
info.append(metric_values['mae'])
val_loader.reset()
prediction = mod.predict(val_loader)[1].asnumpy()
loss = masked_mae_np(val_y, prediction, 0)
print('validation: Epoch: %s, loss: %.2f, time: %.2f s' % (
global_epoch, loss, time.time() - t), flush=True)
info.append(loss)
if loss < lowest_val_loss:
test_loader.reset()
prediction = mod.predict(test_loader)[1].asnumpy()
tmp_info = []
for idx in range(config['num_for_predict']):
# y, x = test_y[:, : idx + 1, :], prediction[:, : idx + 1, :]
y, x = test_y[:, idx : idx + 1, :], prediction[:, idx : idx + 1, :]
tmp_info.append((
masked_mae_np(y, x, 0),
masked_mape_np(y, x, 0),
masked_mse_np(y, x, 0) ** 0.5
))
mae, mape, rmse = tmp_info[-1]
print('test: Epoch: {}, MAE: {:.2f}, MAPE: {:.2f}, RMSE: {:.2f}, '
'time: {:.2f}s'.format(
global_epoch, mae, mape, rmse, time.time() - t))
print(flush=True)
info.extend((mae, mape, rmse))
info.append(tmp_info)
all_info.append(info)
lowest_val_loss = loss
cnt_temp = 0
else:
cnt_temp += 1
if cnt_temp < tolerance:
global_epoch += 1
else:
print('earlystopping at epoch ', epoch)
break
if args.test:
epochs = 5
training(epochs)
the_best = min(all_info, key=lambda x: x[2])
# print('step: {}\ntraining loss: {:.2f}\nvalidation loss: {:.2f}\n'
# 'tesing: MAE: {:.2f}\ntesting: MAPE: {:.2f}\n'
# 'testing: RMSE: {:.2f}\n'.format(*the_best))
# print(the_best)
# the_best = [68, 1.411298357080995, 1.796820238713304, 2.261148341010781, 5.400548858947783, 5.2094251746191915, [(0.960964881090736, 1.8586036151815766, 1.7065738206433105), (1.231127729653431, 2.5126222235681794, 2.4261496147356), (1.4356823103392997, 3.037844077552284, 3.0128969394052794), (1.5964147147430807, 3.481093203674549, 3.4896500217794113), (1.729473497683941, 3.8699448526983558, 3.8808571468819997), (1.834626912422782, 4.16717613882355, 4.181905769364844), (1.9326727985467773, 4.442828320728456, 4.432138512797938), (2.0172858990449942, 4.6921727248928145, 4.63484797416534), (2.086977551399003, 4.89622051400139, 4.810464363080575), (2.146455060549889, 5.077811552706208, 4.952446549782243), (2.202791783490587, 5.23813464423378, 5.084228846970088), (2.261148341010781, 5.400548858947783, 5.2094251746191915)]]
print('step: {}\ntraining loss: {:.2f}\nvalidation loss: {:.2f}\n'.format(*the_best))
for i in [2,5,11]:
print('Horizon ' + str(i+1)+':')
print('test\tMAE\t\tMAPE\t\tRMSE')
print(the_best[6][i])
if args.save:
mod.save_checkpoint('STSGCN', epochs)
| 6,547 | 32.238579 | 830 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/STSGCN/utils.py |
import os
import numpy as np
import mxnet as mx
import pickle
def load_pickle(pickle_file):
try:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
except UnicodeDecodeError as e:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f, encoding='latin1')
except Exception as e:
print('Unable to load data ', pickle_file, ':', e)
raise
return pickle_data
def load_adj(pkl_filename):
sensor_ids, sensor_id_to_ind, adj = load_pickle(pkl_filename)
return adj
def construct_model(config):
from models.stsgcn import stsgcn
module_type = config['module_type']
act_type = config['act_type']
temporal_emb = config['temporal_emb']
spatial_emb = config['spatial_emb']
use_mask = config['use_mask']
batch_size = config['batch_size']
num_of_vertices = config['num_of_vertices']
num_of_features = config['num_of_features']
points_per_hour = config['points_per_hour']
num_for_predict = config['num_for_predict']
adj_filename = config['adj_filename']
id_filename = config['id_filename']
if id_filename is not None:
if not os.path.exists(id_filename):
id_filename = None
adj = load_adj(adj_filename)
print('Original A: ', adj.shape)
adj_mx = construct_adj(adj, 3)
print("The shape of localized adjacency matrix: {}".format(
adj_mx.shape), flush=True)
data = mx.sym.var("data")
label = mx.sym.var("label")
adj = mx.sym.Variable('adj', shape=adj_mx.shape,
init=mx.init.Constant(value=adj_mx.tolist()))
adj = mx.sym.BlockGrad(adj)
mask_init_value = mx.init.Constant(value=(adj_mx != 0)
.astype('float32').tolist())
filters = config['filters']
first_layer_embedding_size = config['first_layer_embedding_size']
if first_layer_embedding_size:
data = mx.sym.Activation(
mx.sym.FullyConnected(
data,
flatten=False,
num_hidden=first_layer_embedding_size
),
act_type='relu'
)
else:
first_layer_embedding_size = num_of_features
net = stsgcn(
data, adj, label,
points_per_hour, num_of_vertices, first_layer_embedding_size,
filters, module_type, act_type,
use_mask, mask_init_value, temporal_emb, spatial_emb,
prefix="", rho=1, predict_length=12
)
assert net.infer_shape(
data=(batch_size, points_per_hour, num_of_vertices, 1),
label=(batch_size, num_for_predict, num_of_vertices)
)[1][1] == (batch_size, num_for_predict, num_of_vertices)
return net
def get_adjacency_matrix(distance_df_filename, num_of_vertices,
type_='connectivity', id_filename=None):
'''
Parameters
----------
distance_df_filename: str, path of the csv file contains edges information
num_of_vertices: int, the number of vertices
type_: str, {connectivity, distance}
Returns
----------
A: np.ndarray, adjacency matrix
'''
import csv
A = np.zeros((int(num_of_vertices), int(num_of_vertices)),
dtype=np.float32)
if id_filename:
with open(id_filename, 'r') as f:
id_dict = {int(i): idx
for idx, i in enumerate(f.read().strip().split('\n'))}
with open(distance_df_filename, 'r') as f:
f.readline()
reader = csv.reader(f)
for row in reader:
if len(row) != 3:
continue
i, j, distance = int(row[0]), int(row[1]), float(row[2])
A[id_dict[i], id_dict[j]] = 1
A[id_dict[j], id_dict[i]] = 1
return A
with open(distance_df_filename, 'r') as f:
f.readline()
reader = csv.reader(f)
for row in reader:
if len(row) != 3:
continue
i, j, distance = int(row[0]), int(row[1]), float(row[2])
if type_ == 'connectivity':
A[i, j] = 1
A[j, i] = 1
elif type == 'distance':
A[i, j] = 1 / distance
A[j, i] = 1 / distance
else:
raise ValueError("type_ error, must be "
"connectivity or distance!")
return A
def construct_adj(A, steps):
'''
construct a bigger adjacency matrix using the given matrix
Parameters
----------
A: np.ndarray, adjacency matrix, shape is (N, N)
steps: how many times of the does the new adj mx bigger than A
Returns
----------
new adjacency matrix: csr_matrix, shape is (N * steps, N * steps)
'''
N = len(A)
adj = np.zeros([N * steps] * 2)
for i in range(steps):
adj[i * N: (i + 1) * N, i * N: (i + 1) * N] = A
for i in range(N):
for k in range(steps - 1):
adj[k * N + i, (k + 1) * N + i] = 1
adj[(k + 1) * N + i, k * N + i] = 1
for i in range(len(adj)):
adj[i, i] = 1
return adj
def generate_from_train_val_test(data, transformer):
mean = None
std = None
for key in ('train', 'val', 'test'):
x, y = generate_seq(data[key], 12, 12)
if transformer:
x = transformer(x)
y = transformer(y)
if mean is None:
mean = x.mean()
if std is None:
std = x.std()
yield (x - mean) / std, y
def generate_from_data(data, length, transformer):
mean = None
std = None
train_line, val_line = int(length * 0.6), int(length * 0.8)
for line1, line2 in ((0, train_line),
(train_line, val_line),
(val_line, length)):
x, y = generate_seq(data['data'][line1: line2], 12, 12)
if transformer:
x = transformer(x)
y = transformer(y)
if mean is None:
mean = x.mean()
if std is None:
std = x.std()
yield (x - mean) / std, y
def generate_data(dataset_dir):
mean = None
std = None
for category in ['train', 'val', 'test']:
cat_data = np.load(os.path.join(dataset_dir, category + '.npz'))
x = cat_data['x'][..., :1]
y = cat_data['y'][..., :1]
if mean is None:
mean = x.mean()
if std is None:
std = x.std()
yield (x - mean) / std, y
def generate_seq(data, train_length, pred_length):
seq = np.concatenate([np.expand_dims(
data[i: i + train_length + pred_length], 0)
for i in range(data.shape[0] - train_length - pred_length + 1)],
axis=0)[:, :, :, 0: 1]
return np.split(seq, 2, axis=1)
def mask_np(array, null_val):
if np.isnan(null_val):
return (~np.isnan(null_val)).astype('float32')
else:
return np.not_equal(array, null_val).astype('float32')
def masked_mape_np(y_true, y_pred, null_val=np.nan):
with np.errstate(divide='ignore', invalid='ignore'):
mask = mask_np(y_true, null_val)
mask /= mask.mean()
mape = np.abs((y_pred - y_true) / y_true)
mape = np.nan_to_num(mask * mape)
return np.mean(mape) * 100
def masked_mse_np(y_true, y_pred, null_val=np.nan):
mask = mask_np(y_true, null_val)
mask /= mask.mean()
mse = (y_true - y_pred) ** 2
return np.mean(np.nan_to_num(mask * mse))
def masked_mae_np(y_true, y_pred, null_val=np.nan):
mask = mask_np(y_true, null_val)
mask /= mask.mean()
mae = np.abs(y_true - y_pred)
return np.mean(np.nan_to_num(mask * mae))
| 7,732 | 27.747212 | 78 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/STSGCN/load_params.py | # -*- coding:utf-8 -*-
import mxnet as mx
sym, arg_params, aux_params = mx.model.load_checkpoint('STSGCN', 200)
print(type(arg_params), type(aux_params))
| 157 | 18.75 | 69 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/STSGCN/models/stsgcn.py | # -*- coding:utf-8 -*-
import mxnet as mx
def position_embedding(data,
input_length, num_of_vertices, embedding_size,
temporal=True, spatial=True,
init=mx.init.Xavier(magnitude=0.0003), prefix=""):
'''
Parameters
----------
data: mx.sym.var, shape is (B, T, N, C)
input_length: int, length of time series, T
num_of_vertices: int, N
embedding_size: int, C
temporal, spatial: bool, whether equip this type of embeddings
init: mx.initializer.Initializer
prefix: str
Returns
----------
data: output shape is (B, T, N, C)
'''
temporal_emb = None
spatial_emb = None
if temporal:
# shape is (1, T, 1, C)
temporal_emb = mx.sym.var(
"{}_t_emb".format(prefix),
shape=(1, input_length, 1, embedding_size),
init=init
)
if spatial:
# shape is (1, 1, N, C)
spatial_emb = mx.sym.var(
"{}_v_emb".format(prefix),
shape=(1, 1, num_of_vertices, embedding_size),
init=init
)
if temporal_emb is not None:
data = mx.sym.broadcast_add(data, temporal_emb)
if spatial_emb is not None:
data = mx.sym.broadcast_add(data, spatial_emb)
return data
def gcn_operation(data, adj,
num_of_filter, num_of_features, num_of_vertices,
activation, prefix=""):
'''
graph convolutional operation, a simple GCN we defined in paper
Parameters
----------
data: mx.sym.var, shape is (3N, B, C)
adj: mx.sym.var, shape is (3N, 3N)
num_of_filter: int, C'
num_of_features: int, C
num_of_vertices: int, N
activation: str, {'GLU', 'relu'}
prefix: str
Returns
----------
output shape is (3N, B, C')
'''
assert activation in {'GLU', 'relu'}
# shape is (3N, B, C)
data = mx.sym.dot(adj, data)
if activation == 'GLU':
# shape is (3N, B, 2C')
data = mx.sym.FullyConnected(
data,
flatten=False,
num_hidden=2 * num_of_filter
)
# shape is (3N, B, C'), (3N, B, C')
lhs, rhs = mx.sym.split(data, num_outputs=2, axis=2)
# shape is (3N, B, C')
return lhs * mx.sym.sigmoid(rhs)
elif activation == 'relu':
# shape is (3N, B, C')
return mx.sym.Activation(
mx.sym.FullyConnected(
data,
flatten=False,
num_hidden=num_of_filter
), activation
)
def stsgcm(data, adj,
filters, num_of_features, num_of_vertices,
activation, prefix=""):
'''
STSGCM, multiple stacked gcn layers with cropping and max operation
Parameters
----------
data: mx.sym.var, shape is (3N, B, C)
adj: mx.sym.var, shape is (3N, 3N)
filters: list[int], list of C'
num_of_features: int, C
num_of_vertices: int, N
activation: str, {'GLU', 'relu'}
prefix: str
Returns
----------
output shape is (N, B, C')
'''
need_concat = []
for i in range(len(filters)):
data = gcn_operation(
data, adj,
filters[i], num_of_features, num_of_vertices,
activation=activation,
prefix="{}_gcn_{}".format(prefix, i)
)
need_concat.append(data)
num_of_features = filters[i]
# shape of each element is (1, N, B, C')
need_concat = [
mx.sym.expand_dims(
mx.sym.slice(
i,
begin=(num_of_vertices, None, None),
end=(2 * num_of_vertices, None, None)
), 0
) for i in need_concat
]
# shape is (N, B, C')
return mx.sym.max(mx.sym.concat(*need_concat, dim=0), axis=0)
def stsgcl(data, adj,
T, num_of_vertices, num_of_features, filters,
module_type, activation, temporal_emb=True, spatial_emb=True,
prefix=""):
'''
STSGCL
Parameters
----------
data: mx.sym.var, shape is (B, T, N, C)
adj: mx.sym.var, shape is (3N, 3N)
T: int, length of time series, T
num_of_vertices: int, N
num_of_features: int, C
filters: list[int], list of C'
module_type: str, {'sharing', 'individual'}
activation: str, {'GLU', 'relu'}
temporal_emb, spatial_emb: bool
prefix: str
Returns
----------
output shape is (B, T-2, N, C')
'''
assert module_type in {'sharing', 'individual'}
if module_type == 'individual':
return sthgcn_layer_individual(
data, adj,
T, num_of_vertices, num_of_features, filters,
activation, temporal_emb, spatial_emb, prefix
)
else:
return sthgcn_layer_sharing(
data, adj,
T, num_of_vertices, num_of_features, filters,
activation, temporal_emb, spatial_emb, prefix
)
def sthgcn_layer_individual(data, adj,
T, num_of_vertices, num_of_features, filters,
activation, temporal_emb=True, spatial_emb=True,
prefix=""):
'''
STSGCL, multiple individual STSGCMs
Parameters
----------
data: mx.sym.var, shape is (B, T, N, C)
adj: mx.sym.var, shape is (3N, 3N)
T: int, length of time series, T
num_of_vertices: int, N
num_of_features: int, C
filters: list[int], list of C'
activation: str, {'GLU', 'relu'}
temporal_emb, spatial_emb: bool
prefix: str
Returns
----------
output shape is (B, T-2, N, C')
'''
# shape is (B, T, N, C)
data = position_embedding(data, T, num_of_vertices, num_of_features,
temporal_emb, spatial_emb,
prefix="{}_emb".format(prefix))
need_concat = []
for i in range(T - 2):
# shape is (B, 3, N, C)
t = mx.sym.slice(data, begin=(None, i, None, None),
end=(None, i + 3, None, None))
# shape is (B, 3N, C)
t = mx.sym.reshape(t, (-1, 3 * num_of_vertices, num_of_features))
# shape is (3N, B, C)
t = mx.sym.transpose(t, (1, 0, 2))
# shape is (N, B, C')
t = stsgcm(
t, adj, filters, num_of_features, num_of_vertices,
activation=activation,
prefix="{}_stsgcm_{}".format(prefix, i)
)
# shape is (B, N, C')
t = mx.sym.swapaxes(t, 0, 1)
# shape is (B, 1, N, C')
need_concat.append(mx.sym.expand_dims(t, axis=1))
# shape is (B, T-2, N, C')
return mx.sym.concat(*need_concat, dim=1)
def sthgcn_layer_sharing(data, adj,
T, num_of_vertices, num_of_features, filters,
activation, temporal_emb=True, spatial_emb=True,
prefix=""):
'''
STSGCL, multiple a sharing STSGCM
Parameters
----------
data: mx.sym.var, shape is (B, T, N, C)
adj: mx.sym.var, shape is (3N, 3N)
T: int, length of time series, T
num_of_vertices: int, N
num_of_features: int, C
filters: list[int], list of C'
activation: str, {'GLU', 'relu'}
temporal_emb, spatial_emb: bool
prefix: str
Returns
----------
output shape is (B, T-2, N, C')
'''
# shape is (B, T, N, C)
data = position_embedding(data, T, num_of_vertices, num_of_features,
temporal_emb, spatial_emb,
prefix="{}_emb".format(prefix))
need_concat = []
for i in range(T - 2):
# shape is (B, 3, N, C)
t = mx.sym.slice(data, begin=(None, i, None, None),
end=(None, i + 3, None, None))
# shape is (B, 3N, C)
t = mx.sym.reshape(t, (-1, 3 * num_of_vertices, num_of_features))
# shape is (3N, B, C)
t = mx.sym.swapaxes(t, 0, 1)
need_concat.append(t)
# shape is (3N, (T-2)*B, C)
t = mx.sym.concat(*need_concat, dim=1)
# shape is (N, (T-2)*B, C')
t = stsgcm(
t, adj, filters, num_of_features, num_of_vertices,
activation=activation,
prefix="{}_stsgcm".format(prefix)
)
# shape is (N, T - 2, B, C)
t = t.reshape((num_of_vertices, T - 2, -1, filters[-1]))
# shape is (B, T - 2, N, C)
return mx.sym.swapaxes(t, 0, 2)
def output_layer(data, num_of_vertices, input_length, num_of_features,
num_of_filters=128, predict_length=12):
'''
Parameters
----------
data: mx.sym.var, shape is (B, T, N, C)
num_of_vertices: int, N
input_length: int, length of time series, T
num_of_features: int, C
num_of_filters: int, C'
predict_length: int, length of predicted time series, T'
Returns
----------
output shape is (B, T', N)
'''
# data shape is (B, N, T, C)
data = mx.sym.swapaxes(data, 1, 2)
# (B, N, T * C)
data = mx.sym.reshape(
data, (-1, num_of_vertices, input_length * num_of_features)
)
# (B, N, C')
data = mx.sym.Activation(
mx.sym.FullyConnected(
data,
flatten=False,
num_hidden=num_of_filters
), 'relu'
)
# (B, N, T')
data = mx.sym.FullyConnected(
data,
flatten=False,
num_hidden=predict_length
)
# (B, T', N)
data = mx.sym.swapaxes(data, 1, 2)
return data
def huber_loss(data, label, rho=1):
'''
Parameters
----------
data: mx.sym.var, shape is (B, T', N)
label: mx.sym.var, shape is (B, T', N)
rho: float
Returns
----------
loss: mx.sym
'''
loss = mx.sym.abs(data - label)
loss = mx.sym.where(loss > rho, loss - 0.5 * rho,
(0.5 / rho) * mx.sym.square(loss))
loss = mx.sym.MakeLoss(loss)
return loss
def weighted_loss(data, label, input_length, rho=1):
'''
weighted loss build on huber loss
Parameters
----------
data: mx.sym.var, shape is (B, T', N)
label: mx.sym.var, shape is (B, T', N)
input_length: int, T'
rho: float
Returns
----------
agg_loss: mx.sym
'''
# shape is (1, T, 1)
weight = mx.sym.expand_dims(
mx.sym.expand_dims(
mx.sym.flip(mx.sym.arange(1, input_length + 1), axis=0),
axis=0
), axis=-1
)
agg_loss = mx.sym.broadcast_mul(
huber_loss(data, label, rho),
weight
)
return agg_loss
def stsgcn(data, adj, label,
input_length, num_of_vertices, num_of_features,
filter_list, module_type, activation,
use_mask=True, mask_init_value=None,
temporal_emb=True, spatial_emb=True,
prefix="", rho=1, predict_length=12):
'''
data shape is (B, T, N, C)
adj shape is (3N, 3N)
label shape is (B, T, N)
'''
if use_mask:
if mask_init_value is None:
raise ValueError("mask init value is None!")
mask = mx.sym.var("{}_mask".format(prefix),
shape=(3 * num_of_vertices, 3 * num_of_vertices),
init=mask_init_value)
adj = mask * adj
for idx, filters in enumerate(filter_list):
data = stsgcl(data, adj, input_length, num_of_vertices,
num_of_features, filters, module_type,
activation=activation,
temporal_emb=temporal_emb,
spatial_emb=spatial_emb,
prefix="{}_stsgcl_{}".format(prefix, idx))
input_length -= 2
num_of_features = filters[-1]
# (B, 1, N)
need_concat = []
for i in range(predict_length):
need_concat.append(
output_layer(
data, num_of_vertices, input_length, num_of_features,
num_of_filters=128, predict_length=1
)
)
data = mx.sym.concat(*need_concat, dim=1)
loss = huber_loss(data, label, rho=rho)
return mx.sym.Group([loss, mx.sym.BlockGrad(data, name='pred')])
| 12,140 | 23.137177 | 76 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/STSGCN/test/test_stsgcn.py | # -*- coding:utf-8 -*-
import sys
import mxnet as mx
sys.path.append('.')
num_of_vertices = 358
batch_size = 16
filter_ = [3, 3, 3]
filter_list = [[3, 3, 3], [6, 6, 6], [9, 9, 9]]
predict_length = 12
data = mx.sym.var('data')
adj = mx.sym.var('adj')
label = mx.sym.var('label')
def test_position_embedding():
from models.stsgcn import position_embedding
for temporal_emb in (True, False):
for spatial_emb in (True, False):
net = position_embedding(
data, 12, num_of_vertices, 32,
temporal_emb, spatial_emb
)
assert net.infer_shape(
data=(batch_size, 12, num_of_vertices, 32)
)[1][0] == (batch_size, 12, num_of_vertices, 32)
def test_gcn_operation():
from models.stsgcn import gcn_operation
for activation in ('GLU', 'relu'):
net = gcn_operation(
data, adj, 64, 32, num_of_vertices, activation, "gcn_operation"
)
assert net.infer_shape(
data=(3 * num_of_vertices, batch_size, 32),
adj=(3 * num_of_vertices, 3 * num_of_vertices)
)[1][0] == (3 * num_of_vertices, batch_size, 64)
def test_stsgcm():
from models.stsgcn import stsgcm
for activation in ('GLU', 'relu'):
net = stsgcm(
data, adj,
filter_, 32, num_of_vertices,
activation, "stsgcm"
)
assert net.infer_shape(
data=(3 * num_of_vertices, batch_size, 32),
adj=(3 * num_of_vertices, 3 * num_of_vertices)
)[1][0] == (num_of_vertices, batch_size, filter_[-1])
def test_stsgcl():
from models.stsgcn import stsgcl
for module_type in ('sharing', 'individual'):
for activation in ('GLU', 'relu'):
for temporal_emb in (True, False):
for spatial_emb in (True, False):
net = stsgcl(
data, adj, 12, num_of_vertices, 32, filter_,
module_type, activation,
temporal_emb, spatial_emb, "sthgcl"
)
assert net.infer_shape(
data=(batch_size, 12, num_of_vertices, 32),
adj=(3 * num_of_vertices, 3 * num_of_vertices)
)[1][0] == (
batch_size, 10, num_of_vertices, filter_[-1])
def test_output_layer():
from models.stsgcn import output_layer
net = output_layer(data, num_of_vertices, 12, 32,
num_of_filters=128, predict_length=predict_length)
assert net.infer_shape(
data=(batch_size, 12, num_of_vertices, 32)
)[1][0] == (batch_size, predict_length, num_of_vertices)
def test_huber_loss():
from models.stsgcn import huber_loss
net = huber_loss(data, label, rho=1)
assert net.infer_shape(
data=(batch_size, 12, num_of_vertices),
label=(batch_size, 12, num_of_vertices)
)[1][0] == (batch_size, 12, num_of_vertices)
def test_weighted_loss():
from models.stsgcn import weighted_loss
net = weighted_loss(data, label, 12, rho=1)
assert net.infer_shape(
data=(batch_size, 12, num_of_vertices),
label=(batch_size, 12, num_of_vertices)
)[1][0] == (batch_size, 12, num_of_vertices)
def test_stsgcn():
from models.stsgcn import stsgcn
import numpy as np
mask_init_value = mx.init.Constant(value=np.random.uniform(
size=(3 * num_of_vertices, 3 * num_of_vertices)).tolist())
for module_type in ('sharing', 'individual'):
for activation in ('GLU', 'relu'):
for temporal_emb in (True, False):
for spatial_emb in (True, False):
for use_mask in (True, False):
net = stsgcn(
data, adj, label,
12, num_of_vertices, 32, filter_list,
module_type, activation,
use_mask, mask_init_value,
temporal_emb, spatial_emb,
prefix="stsgcn", rho=1, predict_length=12
)
assert net.infer_shape(
data=(batch_size, 12, num_of_vertices, 32),
adj=(3 * num_of_vertices, 3 * num_of_vertices),
label=(batch_size, 12, num_of_vertices)
)[1][0] == (batch_size, 12, num_of_vertices)
def test_model_construction():
import json
import os
from utils import construct_model
config_folder = 'config/PEMS08'
for file_name in os.listdir(config_folder):
config_file = os.path.join(config_folder, file_name)
with open(config_file, 'r') as f:
config = json.loads(f.read().strip())
config['adj_filename'] = 'data/PEMS08/PEMS08.csv'
config['id_filename'] = None
construct_model(config)
| 4,947 | 34.342857 | 75 | py |
FL-MRCM | FL-MRCM-main/main_fl_mr.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import copy
import numpy as np
import torch
import os
from utils.options import args_parser
from models.recon_Update import LocalUpdate
from models.Fed import FedAvg
from models.test import evaluator_normal as evaluator
from data.mri_data import SliceData, DataTransform
from data.subsample import create_mask_for_mask_type
from models.unet_model import UnetModel
from tensorboardX import SummaryWriter
import pathlib
if __name__ == '__main__':
os.environ["HDF5_USE_FILE_LOCKING"] = 'FALSE'
# parse args
args = args_parser()
path_dict = {'B': pathlib.Path('Dataset dir B'),
'F': pathlib.Path('Dataset dir F'),
'H': pathlib.Path('Dataset dir H'),
'I': pathlib.Path('Dataset dir I')}
rate_dict = {'B': 1.0,'F': 1.0,'H': 1.0, 'I': 1.0} # control the sample rate for each dataset
args.device = torch.device('cuda:{}'.format(args.gpu[0]) if torch.cuda.is_available() and args.gpu != -1 else 'cpu')
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
writer = SummaryWriter(log_dir=args.save_dir/ 'summary')
def save_networks(net, epoch, local=False, local_no = None):
"""Save all the networks to the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
if local:
save_filename = '%s_C%s_net.pth' % (epoch,local_no)
else:
save_filename = '%s_net.pth' % (epoch)
save_path = os.path.join(args.save_dir, save_filename)
if len(args.gpu) > 1 and torch.cuda.is_available():
torch.save(net.module.cpu().state_dict(), save_path)
net.to(args.device)
else:
torch.save(net.cpu().state_dict(), save_path)
net.to(args.device)
# data loader
def _create_dataset(data_path,data_transform, data_partition, sequence, sample_rate=None):
dataset = SliceData(
root=data_path / data_partition,
transform=data_transform,
sample_rate=sample_rate,
challenge=args.challenge,
sequence =sequence
)
return dataset
# load dataset and split users
if args.dataset == 'mri':
mask = create_mask_for_mask_type(args.mask_type, args.center_fractions,
args.accelerations)
train_data_transform = DataTransform(args.resolution, args.challenge, mask, use_seed=False)
val_data_transform = DataTransform(args.resolution, args.challenge, mask)
datasets_list = []
if args.phase == 'train':
for data in args.train_datasets:
dataset_train = _create_dataset(path_dict[data]/args.sequence,train_data_transform, 'train', args.sequence,rate_dict[data])
datasets_list.append(dataset_train)
dataset_val = _create_dataset(path_dict[args.test_dataset]/args.sequence,val_data_transform, 'val', args.sequence, args.val_sample_rate)
else:
exit('Error: unrecognized dataset')
assert (len(datasets_list)==args.num_users)
# build model
if args.model == 'unet':
net_glob = UnetModel(
in_chans=1,
out_chans=1,
chans=32,
num_pool_layers=4,
drop_prob=0.0
).to(args.device)
else:
exit('Error: unrecognized model')
print(net_glob)
net_glob.train()
# copy weights
if len(args.gpu) > 1:
net_glob = torch.nn.DataParallel(net_glob, args.gpu)
w_glob = net_glob.module.state_dict()
else:
w_glob = net_glob.state_dict()
# training
loss_train = []
cv_loss, cv_acc = [], []
val_loss_pre, counter = 0, 0
net_best = None
best_loss = None
val_acc_list, net_list = [], []
if args.phase == 'train':
start_epoch = -1
if args.continues:
if len(args.gpu) > 1:
net_glob.module.load_state_dict(torch.load(args.checkpoint))
else:
net_glob.load_state_dict(torch.load(args.checkpoint))
print('Load checkpoint :', args.checkpoint)
start_epoch = int(args.checkpoint.split('/')[-1].split('_')[0])
for iter in range(start_epoch+1,args.epochs):
w_locals, loss_locals = [], []
for idx, dataset_train in enumerate(datasets_list):
local = LocalUpdate(args=args, device=args.device, dataset=dataset_train)
# global update
w, loss, _ = local.train(net=copy.deepcopy(net_glob).to(args.device),epoch=iter, idx=idx, writer=writer)
w_locals.append(copy.deepcopy(w))
loss_locals.append(copy.deepcopy(loss))
# update global weights
w_glob = FedAvg(w_locals)
# copy weight to net_glob
net_glob.load_state_dict(w_glob)
# print loss
loss_avg = np.sum(loss_locals) / len(loss_locals)
print('Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg))
print('saving the model at the end of epoch %d' % (iter))
save_networks(net_glob, iter)
print('Evaluation ...')
validation = evaluator(dataset_val, args, writer,args.device)
validation.evaluate_recon(net_glob,iter)
writer.close()
| 5,441 | 37.595745 | 148 | py |
FL-MRCM | FL-MRCM-main/main_test.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import torch
import os
from utils.options import args_parser
from models.test import test_save_result, test_save_vector
from data.mri_data import SliceData, DataTransform
from data.subsample import create_mask_for_mask_type
from models.unet_model import UnetModel, UnetModel_ad_da
import pathlib
if __name__ == '__main__':
os.environ["HDF5_USE_FILE_LOCKING"] = 'FALSE'
# parse args
args = args_parser()
path_dict = {'B': pathlib.Path('Dataset dir B'),
'F': pathlib.Path('Dataset dir F'),
'H': pathlib.Path('Dataset dir H'),
'I': pathlib.Path('Dataset dir I')}
args.device = torch.device('cuda:{}'.format(args.gpu[0]) if torch.cuda.is_available() and args.gpu != -1 else 'cpu')
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
# data loader
def _create_dataset(data_path,data_transform, data_partition, sequence, sample_rate=None):
sample_rate = sample_rate or args.sample_rate
dataset = SliceData(
root=data_path / data_partition,
transform=data_transform,
sample_rate=sample_rate,
challenge=args.challenge,
sequence=sequence
)
return dataset
# load dataset and split users
if args.dataset == 'mri':
mask = create_mask_for_mask_type(args.mask_type, args.center_fractions,
args.accelerations)
val_data_transform = DataTransform(args.resolution, args.challenge, mask)
dataset_val = _create_dataset(path_dict[args.test_dataset]/args.sequence,val_data_transform, 'test', args.sequence,1.0)
else:
exit('Error: unrecognized dataset')
if args.model == 'unet': # for fl_mr
net_glob = UnetModel(
in_chans=1,
out_chans=1,
chans=32,
num_pool_layers=4,
drop_prob=0.0
).to(args.device)
elif args.model == 'unet_ad_da': # for fl_mrcm
net_glob = UnetModel_ad_da(
in_chans=1,
out_chans=1,
chans=32,
num_pool_layers=4,
drop_prob=0.0
).to(args.device)
else:
exit('Error: unrecognized model')
print(net_glob)
# copy weights
if len(args.gpu) > 1:
net_glob = torch.nn.DataParallel(net_glob, args.gpu)
if args.phase == 'test':
# testing
net_glob.eval()
if len(args.gpu) > 1:
net_glob.module.load_state_dict(torch.load(args.checkpoint))
else:
net_glob.load_state_dict(torch.load(args.checkpoint))
print('Load checkpoint for test:', args.checkpoint)
test_save_result(net_glob, dataset_val, args)
| 2,799 | 32.73494 | 127 | py |
FL-MRCM | FL-MRCM-main/main_fl_mrcm.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as plt
import copy
import numpy as np
import torch
import os
from utils.options import args_parser
from models.recon_Update import LocalUpdate_ad_da
from models.Fed import FedAvg
from models.test import evaluator
from data.mri_data import SliceData, DataTransform
from data.subsample import create_mask_for_mask_type
from models.unet_model import UnetModel_ad_da, Feature_discriminator
from tensorboardX import SummaryWriter
import pathlib
if __name__ == '__main__':
os.environ["HDF5_USE_FILE_LOCKING"] = 'FALSE'
# parse args
args = args_parser()
path_dict = {'B': pathlib.Path('Dataset dir B'),
'F': pathlib.Path('Dataset dir F'),
'H': pathlib.Path('Dataset dir H'),
'I': pathlib.Path('Dataset dir I')}
rate_dict = {'B': 1.0, 'F': 1.0, 'H': 1.0, 'I': 1.0} # control the sample rate for each dataset
print(rate_dict)
args.device = torch.device('cuda:{}'.format(args.gpu[0]) if torch.cuda.is_available() and args.gpu != -1 else 'cpu')
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
writer = SummaryWriter(log_dir=args.save_dir/ 'summary')
def save_networks(net, epoch, local=False, local_no = None):
"""Save all the networks to the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
if local:
save_filename = '%s_net_D_%s.pth' % (epoch,local_no)
else:
save_filename = '%s_net.pth' % (epoch)
save_path = os.path.join(args.save_dir, save_filename)
if len(args.gpu) > 1 and torch.cuda.is_available():
torch.save(net.module.cpu().state_dict(), save_path)
net.to(args.device)
else:
torch.save(net.cpu().state_dict(), save_path)
net.to(args.device)
# data loader
def _create_dataset(data_path,data_transform, data_partition, sequence, sample_rate=None, seed=42):
dataset = SliceData(
root=data_path / data_partition,
transform=data_transform,
sample_rate=sample_rate,
challenge=args.challenge,
sequence=sequence,
seed=seed
)
return dataset
# load dataset and split users
if args.dataset == 'mri':
mask = create_mask_for_mask_type(args.mask_type, args.center_fractions,
args.accelerations)
train_data_transform = DataTransform(args.resolution, args.challenge, mask, use_seed=False)
val_data_transform = DataTransform(args.resolution, args.challenge, mask)
datasets_list = []
if args.phase == 'train':
for data in args.train_datasets:
dataset_train = _create_dataset(path_dict[data]/args.sequence,train_data_transform, 'train', args.sequence,rate_dict[data], args.seed)
datasets_list.append(dataset_train)
dataset_val = _create_dataset(path_dict[args.test_dataset]/args.sequence,val_data_transform, 'val', args.sequence, args.val_sample_rate)
else:
exit('Error: unrecognized dataset')
#make target domain dataset has the same number of sample as max train dataset
target_datasets_list=[]
for dataset in datasets_list:
tmp_list = []
dataset_target_domain = _create_dataset(path_dict[args.test_dataset] / args.sequence, train_data_transform,'train', args.sequence,rate_dict[args.test_dataset])
while len(tmp_list)<len(dataset.examples):
for sample in dataset_target_domain.examples:
tmp_list.append(sample)
if len(tmp_list) ==len(dataset.examples):
break
dataset_target_domain.examples = tmp_list
target_datasets_list.append(dataset_target_domain)
assert (len(datasets_list)==args.num_users)
# build model
if args.model == 'unet':
net_glob = UnetModel_ad_da(
in_chans=1,
out_chans=1,
chans=32,
num_pool_layers=4,
drop_prob=0.0
).to(args.device)
else:
exit('Error: unrecognized model')
print(net_glob)
net_glob.train()
G_s = []
FD = []
for i in range(args.num_users):
if len(args.gpu) > 1:
G_s.append(torch.nn.DataParallel(UnetModel_ad_da(in_chans=1, out_chans=1, chans=32, num_pool_layers=4,
drop_prob=0.0).to(args.device),args.gpu))
FD.append(torch.nn.DataParallel(Feature_discriminator().to(args.device),args.gpu))
else:
G_s.append(UnetModel_ad_da(in_chans=1,out_chans=1,chans=32,num_pool_layers=4,drop_prob=0.0).to(args.device))
FD.append(Feature_discriminator().to(args.device))
# setting optimizer
opt_g_s = []
opt_FD= []
for i in range(args.num_users):
opt_g_s.append(torch.optim.RMSprop(G_s[i].parameters(), lr=args.lr))
opt_FD.append(torch.optim.RMSprop(FD[i].parameters(), lr=args.lr*10))
# copy weights
if len(args.gpu) > 1:
net_glob = torch.nn.DataParallel(net_glob, args.gpu)
w_glob = net_glob.state_dict()
else:
w_glob = net_glob.state_dict()
# initilize parameters
for G in G_s:
for net, net_cardinal in zip(G.named_parameters(), net_glob.named_parameters()):
net[1].data = net_cardinal[1].data.clone()
# training
if args.phase == 'train':
start_epoch = -1
if args.continues:
if len(args.gpu) > 1:
net_glob.module.load_state_dict(torch.load(args.checkpoint))
print('Load checkpoint :', args.checkpoint)
for i, net_d in enumerate(FD):
path = args.checkpoint.split('.')[0]+'_D_%s.pth'%(i)
print('Load checkpoint :', path)
net_d.module.load_state_dict(torch.load(path))
start_epoch = int(args.checkpoint.split('/')[-1].split('_')[0])
else:
net_glob.load_state_dict(torch.load(args.checkpoint))
print('Load checkpoint :', args.checkpoint)
for i, net_d in enumerate(FD):
path = args.checkpoint.split('.')[0] + '_D_%s.pth' % (i)
print('Load checkpoint :', path)
net_d.load_state_dict(torch.load(path))
start_epoch = int(args.checkpoint.split('/')[-1].split('_')[0])
for iter in range(start_epoch+1,args.epochs):
w_locals, loss_locals = [], []
for idx, dataset_train in enumerate(datasets_list):
flag = args.train_datasets[idx] == args.test_dataset # for disable adv loss for target dataset
local = LocalUpdate_ad_da(args=args, device=args.device, dataset=dataset_train,
dataset_target = target_datasets_list[idx], optimizer=opt_g_s[idx],optimizer_fd=opt_FD[idx],flag=flag)
# models communication
G_s[idx].load_state_dict(net_glob.state_dict())
# global update
w, loss = local.train(net=G_s[idx],net_fd=FD[idx] ,epoch=iter, idx=idx, writer=writer)
w_locals.append(copy.deepcopy(w))
loss_locals.append(copy.deepcopy(loss))
# update global weights
w_glob = FedAvg(w_locals)
# copy weight to net_glob
net_glob.load_state_dict(w_glob)
# print loss
loss_avg = np.sum(loss_locals) / len(loss_locals)
print('Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg))
print('saving the model at the end of epoch %d' % (iter))
save_networks(net_glob, iter)
for i, net_d in enumerate(FD):
save_networks(net_d, iter, local=True, local_no=i)
print('Evaluation ...')
validation = evaluator(dataset_val, args, writer,args.device)
validation.evaluate_recon(net_glob,iter)
writer.close()
| 8,221 | 43.443243 | 167 | py |
FL-MRCM | FL-MRCM-main/models/test.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @python: 3.6
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from collections import defaultdict
import numpy as np
from utils import evaluate
import h5py
from tqdm import tqdm
def test_save_result(net_g, datatest, args):
net_g.eval()
# testing
data_loader = DataLoader(datatest, batch_size=args.bs, pin_memory=True)
test_logs =[]
with torch.no_grad():
for idx, batch in enumerate(data_loader):
input, target, mean, std,_, fname, slice = batch
if args.model == 'unet_ad_da':
output, _ = net_g(input.cuda())
else:
output = net_g(input.cuda())
# sum up batch loss
test_loss = F.l1_loss(output, target.cuda())
mean = mean.unsqueeze(1).unsqueeze(2).cuda()
std = std.unsqueeze(1).unsqueeze(2).cuda()
test_logs.append({
'fname': fname,
'slice': slice,
'output': (output * std + mean).cpu().detach().numpy(),
'target': (target.cuda() * std + mean).cpu().numpy(),
'input': (input.cuda() * std + mean).cpu().numpy(),
'loss': test_loss.cpu().detach().numpy(),
})
losses = []
outputs = defaultdict(list)
targets = defaultdict(list)
inputs = defaultdict(list)
for log in test_logs:
losses.append(log['loss'])
for i, (fname, slice) in enumerate(zip(log['fname'], log['slice'])):
outputs[fname].append((slice, log['output'][i]))
targets[fname].append((slice, log['target'][i]))
inputs[fname].append((slice, log['input'][i]))
print('loss len: ',len(losses))
metrics = dict(val_loss=losses, nmse=[], ssim=[], psnr=[])
outputs_save = defaultdict(list)
targets_save = defaultdict(list)
for fname in outputs:
outputs_save[fname] = np.stack([out for _, out in sorted(outputs[fname])])
targets_save[fname] = np.stack([tgt for _, tgt in sorted(targets[fname])])
inputs[fname] = np.stack([tgt for _, tgt in sorted(inputs[fname])])
for fname in outputs:
output = np.stack([out for _, out in sorted(outputs[fname])])
target = np.stack([tgt for _, tgt in sorted(targets[fname])])
metrics['nmse'].append(evaluate.nmse(target, output))
metrics['ssim'].append(evaluate.ssim(target, output))
metrics['psnr'].append(evaluate.psnr(target, output))
metrics = {metric: np.mean(values) for metric, values in metrics.items()}
print(metrics, '\n')
save_reconstructions(outputs_save, args.save_dir /'reconstructions')
save_reconstructions(targets_save, args.save_dir /'gt')
save_reconstructions(inputs, args.save_dir /'input')
#
def test_save_vector(net_g, datatest, args):
net_g.eval()
# testing
data_loader = DataLoader(datatest, batch_size=args.local_bs, pin_memory=True)
test_logs =[]
with torch.no_grad():
for idx, batch in tqdm(enumerate(data_loader)):
input, target, mean, std, _, fname, slice = batch
output, vector = net_g(input.cuda())
test_logs.append({
'fname': fname,
'slice': slice,
'vector': vector.cpu().detach().numpy()
})
vectors = defaultdict(list)
for log in test_logs:
for i, (fname, slice) in enumerate(zip(log['fname'], log['slice'])):
vectors[fname].append((slice, log['vector'][i]))
for fname in vectors:
vectors[fname] = np.stack([tgt for _, tgt in sorted(vectors[fname])])
print('save the result ...', '\n')
save_reconstructions(vectors, args.save_dir /'vector')
def save_reconstructions(reconstructions, out_dir):
"""
Saves the reconstructions from a model into h5 files that is appropriate for submission
to the leaderboard.
Args:
reconstructions (dict[str, np.array]): A dictionary mapping input filenames to
corresponding reconstructions (of shape num_slices x height x width).
out_dir (pathlib.Path): Path to the output directory where the reconstructions
should be saved.
"""
out_dir.mkdir(exist_ok=True, parents=True)
for fname, recons in reconstructions.items():
with h5py.File(out_dir / fname, 'w') as f:
f.create_dataset('reconstruction', data=recons)
class evaluator(object):
def __init__(self, datatest, args, writer,device):
self.args = args
self.device = device
self.writer = writer
self.data_loader = DataLoader(datatest, batch_size=args.bs, pin_memory=False)
def evaluate_recon(self,net_g, epoch=None):
net_g.eval()
# testing
test_logs = []
with torch.no_grad():
for idx, batch in enumerate(self.data_loader):
input, target, mean, std,_, fname, slice = batch
output,_ = net_g(input.to(self.device))
# sum up batch loss
test_loss = F.l1_loss(output, target.to(self.device))
mean = mean.unsqueeze(1).unsqueeze(2).to(self.device)
std = std.unsqueeze(1).unsqueeze(2).to(self.device)
test_logs.append({
'fname': fname,
'slice': slice,
'output': (output * std + mean).cpu().detach().numpy(),
'target': (target.to(self.device) * std + mean).cpu().numpy(),
'loss': test_loss.cpu().detach().numpy(),
})
losses = []
outputs = defaultdict(list)
targets = defaultdict(list)
for log in test_logs:
losses.append(log['loss'])
for i, (fname, slice) in enumerate(zip(log['fname'], log['slice'])):
outputs[fname].append((slice, log['output'][i]))
targets[fname].append((slice, log['target'][i]))
print('loss len: ', len(losses))
metrics = dict(val_loss=losses, nmse=[], ssim=[], psnr=[])
for fname in outputs:
output = np.stack([out for _, out in sorted(outputs[fname])])
target = np.stack([tgt for _, tgt in sorted(targets[fname])])
metrics['nmse'].append(evaluate.nmse(target, output))
metrics['ssim'].append(evaluate.ssim(target, output))
metrics['psnr'].append(evaluate.psnr(target, output))
metrics = {metric: np.mean(values) for metric, values in metrics.items()}
print(metrics, '\n')
print('No. Slices: ', len(outputs))
if self.writer != None:
self.writer.add_scalar('Dev_Loss/NMSE', metrics['nmse'], epoch)
self.writer.add_scalar('Dev_Loss/SSIM', metrics['ssim'], epoch)
self.writer.add_scalar('Dev_Loss/PSNR', metrics['psnr'], epoch)
torch.cuda.empty_cache()
class evaluator_normal(object):
def __init__(self, datatest, args, writer,device):
self.args = args
self.device = device
self.writer = writer
self.data_loader = DataLoader(datatest, batch_size=args.bs, pin_memory=False)
def evaluate_recon(self,net_g, epoch=None):
net_g.eval()
# testing
test_logs = []
with torch.no_grad():
for idx, batch in enumerate(self.data_loader):
input, target, mean, std,_, fname, slice = batch
output = net_g(input.to(self.device))
# sum up batch loss
test_loss = F.l1_loss(output, target.to(self.device))
mean = mean.unsqueeze(1).unsqueeze(2).to(self.device)
std = std.unsqueeze(1).unsqueeze(2).to(self.device)
test_logs.append({
'fname': fname,
'slice': slice,
'output': (output * std + mean).cpu().detach().numpy(),
'target': (target.to(self.device) * std + mean).cpu().numpy(),
'loss': test_loss.cpu().detach().numpy(),
})
losses = []
outputs = defaultdict(list)
targets = defaultdict(list)
for log in test_logs:
losses.append(log['loss'])
for i, (fname, slice) in enumerate(zip(log['fname'], log['slice'])):
outputs[fname].append((slice, log['output'][i]))
targets[fname].append((slice, log['target'][i]))
print('loss len: ', len(losses))
metrics = dict(val_loss=losses, nmse=[], ssim=[], psnr=[])
for fname in outputs:
output = np.stack([out for _, out in sorted(outputs[fname])])
target = np.stack([tgt for _, tgt in sorted(targets[fname])])
metrics['nmse'].append(evaluate.nmse(target, output))
metrics['ssim'].append(evaluate.ssim(target, output))
metrics['psnr'].append(evaluate.psnr(target, output))
metrics = {metric: np.mean(values) for metric, values in metrics.items()}
print(metrics, '\n')
print('No. Slices: ', len(outputs))
if self.writer != None:
self.writer.add_scalar('Dev_Loss/NMSE', metrics['nmse'], epoch)
self.writer.add_scalar('Dev_Loss/SSIM', metrics['ssim'], epoch)
self.writer.add_scalar('Dev_Loss/PSNR', metrics['psnr'], epoch)
torch.cuda.empty_cache()
| 9,725 | 44.877358 | 91 | py |
FL-MRCM | FL-MRCM-main/models/unet_model.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch import nn
from torch.nn import functional as F
class ConvBlock(nn.Module):
"""
A Convolutional Block that consists of two convolution layers each followed by
instance normalization, LeakyReLU activation and dropout.
"""
def __init__(self, in_chans, out_chans, drop_prob):
"""
Args:
in_chans (int): Number of channels in the input.
out_chans (int): Number of channels in the output.
drop_prob (float): Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.drop_prob = drop_prob
self.layers = nn.Sequential(
nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob)
)
def forward(self, input):
"""
Args:
input (torch.Tensor): Input tensor of shape [batch_size, self.in_chans, height, width]
Returns:
(torch.Tensor): Output tensor of shape [batch_size, self.out_chans, height, width]
"""
return self.layers(input)
def __repr__(self):
return f'ConvBlock(in_chans={self.in_chans}, out_chans={self.out_chans}, ' \
f'drop_prob={self.drop_prob})'
class TransposeConvBlock(nn.Module):
"""
A Transpose Convolutional Block that consists of one convolution transpose layers followed by
instance normalization and LeakyReLU activation.
"""
def __init__(self, in_chans, out_chans):
"""
Args:
in_chans (int): Number of channels in the input.
out_chans (int): Number of channels in the output.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.layers = nn.Sequential(
nn.ConvTranspose2d(in_chans, out_chans, kernel_size=2, stride=2, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def forward(self, input):
"""
Args:
input (torch.Tensor): Input tensor of shape [batch_size, self.in_chans, height, width]
Returns:
(torch.Tensor): Output tensor of shape [batch_size, self.out_chans, height, width]
"""
return self.layers(input)
def __repr__(self):
return f'ConvBlock(in_chans={self.in_chans}, out_chans={self.out_chans})'
class UnetModel(nn.Module):
"""
PyTorch implementation of a U-Net model.
This is based on:
Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks
for biomedical image segmentation. In International Conference on Medical image
computing and computer-assisted intervention, pages 234–241. Springer, 2015.
"""
def __init__(self, in_chans, out_chans, chans, num_pool_layers, drop_prob):
"""
Args:
in_chans (int): Number of channels in the input to the U-Net model.
out_chans (int): Number of channels in the output to the U-Net model.
chans (int): Number of output channels of the first convolution layer.
num_pool_layers (int): Number of down-sampling and up-sampling layers.
drop_prob (float): Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.chans = chans
self.num_pool_layers = num_pool_layers
self.drop_prob = drop_prob
self.down_sample_layers = nn.ModuleList([ConvBlock(in_chans, chans, drop_prob)])
ch = chans
for i in range(num_pool_layers - 1):
self.down_sample_layers += [ConvBlock(ch, ch * 2, drop_prob)]
ch *= 2
self.conv = ConvBlock(ch, ch * 2, drop_prob)
self.up_conv = nn.ModuleList()
self.up_transpose_conv = nn.ModuleList()
for i in range(num_pool_layers - 1):
self.up_transpose_conv += [TransposeConvBlock(ch * 2, ch)]
self.up_conv += [ConvBlock(ch * 2, ch, drop_prob)]
ch //= 2
self.up_transpose_conv += [TransposeConvBlock(ch * 2, ch)]
self.up_conv += [
nn.Sequential(
ConvBlock(ch * 2, ch, drop_prob),
nn.Conv2d(ch, self.out_chans, kernel_size=1, stride=1),
)]
def forward(self, input):
"""
Args:
input (torch.Tensor): Input tensor of shape [batch_size, self.in_chans, height, width]
Returns:
(torch.Tensor): Output tensor of shape [batch_size, self.out_chans, height, width]
"""
stack = []
output = input.unsqueeze(1)
# Apply down-sampling layers
for i, layer in enumerate(self.down_sample_layers):
output = layer(output)
stack.append(output)
output = F.avg_pool2d(output, kernel_size=2, stride=2, padding=0)
output = self.conv(output)
# Apply up-sampling layers
for transpose_conv, conv in zip(self.up_transpose_conv, self.up_conv):
downsample_layer = stack.pop()
output = transpose_conv(output)
# Reflect pad on the right/botton if needed to handle odd input dimensions.
padding = [0, 0, 0, 0]
if output.shape[-1] != downsample_layer.shape[-1]:
padding[1] = 1 # Padding right
if output.shape[-2] != downsample_layer.shape[-2]:
padding[3] = 1 # Padding bottom
if sum(padding) != 0:
output = F.pad(output, padding, "reflect")
output = torch.cat([output, downsample_layer], dim=1)
output = conv(output)
return output.squeeze(1)
class UnetModel_ad_da(nn.Module):
"""
PyTorch implementation of a U-Net model.
This is based on:
Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks
for biomedical image segmentation. In International Conference on Medical image
computing and computer-assisted intervention, pages 234–241. Springer, 2015.
"""
def __init__(self, in_chans, out_chans, chans, num_pool_layers, drop_prob):
"""
Args:
in_chans (int): Number of channels in the input to the U-Net model.
out_chans (int): Number of channels in the output to the U-Net model.
chans (int): Number of output channels of the first convolution layer.
num_pool_layers (int): Number of down-sampling and up-sampling layers.
drop_prob (float): Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.chans = chans
self.num_pool_layers = num_pool_layers
self.drop_prob = drop_prob
self.down_sample_layers = nn.ModuleList([ConvBlock(in_chans, chans, drop_prob)])
ch = chans
for i in range(num_pool_layers - 1):
self.down_sample_layers += [ConvBlock(ch, ch * 2, drop_prob)]
ch *= 2
self.conv = ConvBlock(ch, ch * 2, drop_prob)
self.up_conv = nn.ModuleList()
self.up_transpose_conv = nn.ModuleList()
for i in range(num_pool_layers - 1):
self.up_transpose_conv += [TransposeConvBlock(ch * 2, ch)]
self.up_conv += [ConvBlock(ch * 2, ch, drop_prob)]
ch //= 2
self.up_transpose_conv += [TransposeConvBlock(ch * 2, ch)]
self.up_conv += [
nn.Sequential(
ConvBlock(ch * 2, ch, drop_prob),
nn.Conv2d(ch, self.out_chans, kernel_size=1, stride=1),
)]
def forward(self, input):
"""
Args:
input (torch.Tensor): Input tensor of shape [batch_size, self.in_chans, height, width]
Returns:
(torch.Tensor): Output tensor of shape [batch_size, self.out_chans, height, width]
"""
stack = []
output = input.unsqueeze(1)
# Apply down-sampling layers
for i, layer in enumerate(self.down_sample_layers):
output = layer(output)
stack.append(output)
output = F.avg_pool2d(output, kernel_size=2, stride=2, padding=0)
output = self.conv(output)
z = F.adaptive_avg_pool2d(output,2).view(output.shape[0],-1)
# Apply up-sampling layers
for transpose_conv, conv in zip(self.up_transpose_conv, self.up_conv):
downsample_layer = stack.pop()
output = transpose_conv(output)
# Reflect pad on the right/botton if needed to handle odd input dimensions.
padding = [0, 0, 0, 0]
if output.shape[-1] != downsample_layer.shape[-1]:
padding[1] = 1 # Padding right
if output.shape[-2] != downsample_layer.shape[-2]:
padding[3] = 1 # Padding bottom
if sum(padding) != 0:
output = F.pad(output, padding, "reflect")
output = torch.cat([output, downsample_layer], dim=1)
output = conv(output)
return output.squeeze(1), z
class Feature_discriminator(nn.Module):
def __init__(self, input_dim=2048):
super(Feature_discriminator, self).__init__()
self.fc1 = nn.Linear(input_dim, 256)
self.fc2 = nn.Linear(256, 2)
def forward(self, x):
x = F.leaky_relu(self.fc1(x), 0.2)
x = F.leaky_relu(self.fc2(x), 0.2)
return x
| 10,036 | 35.234657 | 98 | py |
FL-MRCM | FL-MRCM-main/models/Fed.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import copy
import torch
from torch import nn
def FedAvg(w):
w_avg = copy.deepcopy(w[0])
for k in w_avg.keys():
for i in range(1, len(w)):
w_avg[k] += w[i][k]
w_avg[k] = torch.div(w_avg[k], len(w))
return w_avg
| 322 | 18 | 46 | py |
FL-MRCM | FL-MRCM-main/models/recon_Update.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import torch
from torch import nn
from torch.utils.data import DataLoader, Dataset
import time
import numpy as np
from torch.autograd import Variable
from torch.nn import functional as F
class LocalUpdate(object):
def __init__(self, args, device, dataset=None):
self.args = args
self.device = device
self.loss_func = nn.L1Loss().to(device)
self.selected_clients = []
self.ldr_train = DataLoader(dataset, batch_size=self.args.local_bs, shuffle=True, pin_memory=True)
def train(self, net, epoch, idx, writer):
net.train()
# train and update
optimizer = torch.optim.RMSprop(net.parameters(), lr=self.args.lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=40, gamma=0.1)
epoch_loss = []
for iter in range(self.args.local_ep):
batch_loss = []
iter_data_time = time.time()
for batch_idx, batch in enumerate(self.ldr_train):
input, target, mean, std, norm, fname, slice = batch
net.zero_grad()
output = net(input.to(self.device))
loss = self.loss_func(output, target.to(self.device))
loss.backward()
optimizer.step()
scheduler.step(epoch)
if self.args.verbose and batch_idx % 10 == 0:
print('Update Epoch: {} Local: {} idx: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, iter, idx, batch_idx * len(input), len(self.ldr_train.dataset),
100. * batch_idx / len(self.ldr_train), loss.detach().item()))
t_comp = (time.time() - iter_data_time)
iter_data_time = time.time()
print('itr time: ',t_comp)
print('lr: ',optimizer.param_groups[0]['lr'])
batch_loss.append(loss.detach().item())
epoch_loss.append(sum(batch_loss)/len(batch_loss))
writer.add_scalar('TrainLoss/L1/'+ self.args.train_datasets[idx], sum(epoch_loss) / len(epoch_loss), epoch)
torch.cuda.empty_cache()
return net.state_dict(), sum(epoch_loss) / len(epoch_loss), net
class LocalUpdate_ad_da(object):
# v1
def __init__(self, args, device, dataset, dataset_target,optimizer,optimizer_fd,flag):
self.args = args
self.device = device
self.loss_func = nn.L1Loss().to(device)
self.adv_loss = nn.BCEWithLogitsLoss().to(device)
self.selected_clients = []
self.ldr_train = DataLoader(dataset, batch_size=self.args.local_bs, shuffle=True, pin_memory=True)
self.target_domain = DataLoader(dataset_target, batch_size=self.args.local_bs, shuffle=True, pin_memory=True)
self.optimizer = optimizer
self.optimizer_fd = optimizer_fd
self.flag = flag
def nmse_loss(self,output,target,std, mean, norm):
return F.mse_loss((output * std + mean) / norm, (target * std + mean) / norm)
def set_requires_grad(self, nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
def train(self, net, net_fd, epoch, idx, writer):
net.train()
net_fd.train()
# train and update
optimizer = self.optimizer
optimizer_fd = self.optimizer_fd
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=40, gamma=0.1)
epoch_loss = []
epoch_loss_L1 = []
epoch_loss_adv_g = []
epoch_loss_adv_d = []
for iter in range(self.args.local_ep):
batch_loss = []
batch_loss_L1 = []
batch_loss_adv_g = []
batch_loss_adv_d = []
iter_data_time = time.time()
for batch_idx, (batch, batch_t) in enumerate(zip(self.ldr_train,self.target_domain)):
input, target, mean, std, norm, fname, slice = batch
input_t,target_t, _,_,_,_,_ = batch_t
net.zero_grad()
net_fd.zero_grad()
output,z = net(input.to(self.device))
output_t, z_t = net(input_t.to(self.device))
mean = mean.unsqueeze(1).unsqueeze(2).to(self.device)
std = std.unsqueeze(1).unsqueeze(2).to(self.device)
norm = norm.unsqueeze(1).unsqueeze(2).to(self.device)
# prepare labels for fd
src_domain_code = np.repeat(np.array([[*([1]), *([0])]]), input.shape[0], axis=0)
tgt_domain_code = np.repeat(np.array([[*([0]), *([1])]]), input_t.shape[0], axis=0)
src_domain_code = Variable(torch.FloatTensor(src_domain_code).to(self.device), requires_grad=False)
tgt_domain_code = Variable(torch.FloatTensor(tgt_domain_code).to(self.device), requires_grad=False)
# update DF for source site
df_loss = 0
if not self.flag:
self.set_requires_grad(net_fd, True)
src_domain_pred = net_fd(z.detach())
tgt_domain_pred = net_fd(z_t.detach())
# adv loss for DF
df_loss_src = self.adv_loss(src_domain_pred, 1 - src_domain_code)
df_loss_tgt = self.adv_loss(tgt_domain_pred, 1 - tgt_domain_code)
df_loss = ((df_loss_src + df_loss_tgt)/2)
df_loss.backward()
optimizer_fd.step()
df_loss = df_loss.detach().item()
batch_loss_adv_d.append(df_loss)
# update Recon
self.set_requires_grad(net_fd, False)
loss_adv_g = 0
if not self.flag:
# adv for recon net
src_domain_pred = net_fd(z)
tgt_domain_pred = net_fd(z_t)
df_loss_src = self.adv_loss(src_domain_pred, src_domain_code)
df_loss_tgt = self.adv_loss(tgt_domain_pred, tgt_domain_code)
loss_adv_g =((df_loss_src+df_loss_tgt)/2)
batch_loss_adv_g.append(loss_adv_g.detach().item())
L1_loss = self.loss_func(output, target.to(self.device))
batch_loss_L1.append(L1_loss.detach().item())
if not self.flag:
loss = L1_loss + loss_adv_g
else:
loss = L1_loss
batch_loss.append(loss.detach().item())
loss.backward()
optimizer.step()
scheduler.step(epoch)
if self.args.verbose and batch_idx % 10 == 0:
print('Update Epoch: {} Local: {} idx: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, iter, idx, batch_idx * len(input), len(self.ldr_train.dataset),
100. * batch_idx / len(self.ldr_train), loss.detach().item()))
t_comp = (time.time() - iter_data_time)
iter_data_time = time.time()
print('itr time: ',t_comp)
print('lr: ',optimizer.param_groups[0]['lr'])
epoch_loss.append(sum(batch_loss)/len(batch_loss))
epoch_loss_L1.append(sum(batch_loss_L1)/len(batch_loss_L1))
epoch_loss_adv_g.append(sum(batch_loss_adv_g)/len(batch_loss_adv_g))
epoch_loss_adv_d.append(sum(batch_loss_adv_d)/len(batch_loss_adv_d))
writer.add_scalar('TrainLoss/L1/'+ self.args.train_datasets[idx], sum(epoch_loss_L1) / len(epoch_loss_L1), epoch)
writer.add_scalar('TrainLoss/total_loss/' + self.args.train_datasets[idx], sum(epoch_loss) / len(epoch_loss),epoch)
writer.add_scalar('TrainLoss/adv_g/' + self.args.train_datasets[idx], sum(epoch_loss_adv_g) / len(epoch_loss_adv_g), epoch)
writer.add_scalar('TrainLoss/adv_d/' + self.args.train_datasets[idx], sum(epoch_loss_adv_d) / len(epoch_loss_adv_d), epoch)
torch.cuda.empty_cache()
return net.state_dict(), sum(epoch_loss) / len(epoch_loss)
| 8,565 | 46.588889 | 131 | py |
FL-MRCM | FL-MRCM-main/utils/preprocess_datasets_brats.py |
import os
import h5py
import pathlib
from data import transforms
import numpy as np
import torch
import nibabel as nib
from tqdm import tqdm
def mkdir(folder):
if not os.path.exists(folder):
os.makedirs(folder)
def main():
root_dir ='path to /MICCAI_BraTS2020_ValidationData'
root_out_dir = 'path to /BraTS2020/T1/val'
sequence = 't1'
data_dir = pathlib.Path(os.path.join(root_dir))
out_dir = pathlib.Path(os.path.join(root_out_dir))
mkdir(out_dir)
list_shape = []
list_final_shape = []
for data_file in tqdm(data_dir.iterdir()):
fname = os.path.basename(data_file)
idx = fname
if not os.path.isfile(os.path.join(out_dir, fname)+'.h5'):
print('%s processing' % os.path.join(out_dir, fname))
else:
continue
try:
img = nib.load(os.path.join(data_dir, fname, fname+ '_' + sequence + '.nii.gz'))
except:
print('No file: ', os.path.join(data_dir, fname, fname+ '_' + sequence + '.nii.gz'))
continue
img_np = np.array(img.dataobj)
list_shape.append(img_np.shape)
pad_0 = (0, 0)
pad_1 = (0, 0)
# make img to 256 x 256, you may ignore or change this step in your need
if img_np.shape[0] < 256:
pad_n = int((256 - img_np.shape[0]) / 2)
pad_0 = (pad_n, 256 - pad_n - img_np.shape[0])
if img_np.shape[1] < 256:
pad_n = int((256 - img_np.shape[1]) / 2)
pad_1 = (pad_n, 256 - pad_n - img_np.shape[1])
img_np = np.pad(img_np, (pad_0, pad_1, (0, 0)))
img_np = np.transpose(img_np, (2, 0, 1)).astype(np.float32)
list_final_shape.append(img_np.shape)
img_tensor = transforms.to_tensor(img_np).unsqueeze(-1)
fake_imag = torch.zeros_like(img_tensor) # make imaginary channel as zero
img_tensor_complex = torch.cat((img_tensor, fake_imag), dim=-1) # construct complex image
kspace = transforms.fft2(img_tensor_complex)
kspace = transforms.to_numpy(kspace)
max = np.max(img_np)
norm = np.linalg.norm(img_np)
acquisition = 'str'
# construct fastMRI style data format
with h5py.File(os.path.join(out_dir, idx + '.h5')) as data:
data.create_dataset('reconstruction_esc', data=img_np)
data.create_dataset('kspace', data=kspace)
data.attrs['max'] = max
data.attrs['norm'] = norm
data.attrs['acquisition'] = acquisition
data.flush()
print('input shapes',set(list_shape))
print('final shapes', set(list_final_shape))
if __name__ == '__main__':
main() | 2,678 | 35.202703 | 97 | py |
FL-MRCM | FL-MRCM-main/utils/sampling.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import numpy as np
from torchvision import datasets, transforms
from collections import OrderedDict
def mnist_iid(dataset, num_users):
"""
Sample I.I.D. client data from MNIST dataset
:param dataset:
:param num_users:
:return: dict of image index
"""
num_items = int(len(dataset)/num_users)
dict_users, all_idxs = {}, [i for i in range(len(dataset))]
for i in range(num_users):
dict_users[i] = set(np.random.choice(all_idxs, num_items, replace=False))
all_idxs = list(set(all_idxs) - dict_users[i])
return dict_users
def mnist_noniid(dataset, num_users):
"""
Sample non-I.I.D client data from MNIST dataset
:param dataset:
:param num_users:
:return:
"""
num_shards, num_imgs = 200, 300
idx_shard = [i for i in range(num_shards)]
dict_users = {i: np.array([], dtype='int64') for i in range(num_users)}
idxs = np.arange(num_shards*num_imgs)
labels = dataset.train_labels.numpy()
# sort labels
idxs_labels = np.vstack((idxs, labels))
idxs_labels = idxs_labels[:,idxs_labels[1,:].argsort()]
idxs = idxs_labels[0,:]
# divide and assign
for i in range(num_users):
rand_set = set(np.random.choice(idx_shard, 2, replace=False))
idx_shard = list(set(idx_shard) - rand_set)
for rand in rand_set:
dict_users[i] = np.concatenate((dict_users[i], idxs[rand*num_imgs:(rand+1)*num_imgs]), axis=0)
return dict_users
def cifar_iid(dataset, num_users):
"""
Sample I.I.D. client data from CIFAR10 dataset
:param dataset:
:param num_users:
:return: dict of image index
"""
num_items = int(len(dataset)/num_users)
dict_users, all_idxs = {}, [i for i in range(len(dataset))]
for i in range(num_users):
dict_users[i] = set(np.random.choice(all_idxs, num_items, replace=False))
all_idxs = list(set(all_idxs) - dict_users[i])
return dict_users
def recon_iid(dataset, num_users):
"""
Sample I.I.D. client data from CIFAR10 dataset
:param dataset:
:param num_users:
:return: dict of image index
"""
tmp_dict = OrderedDict()
# split by volume
for idx, example in enumerate(dataset.examples):
file_n = example[0].name
if file_n in tmp_dict.keys():
t = tmp_dict[file_n]
t.append(idx)
tmp_dict[file_n] = t
else:
tmp_dict[file_n] = [idx]
tmp_keys = list(tmp_dict.keys())
num_items = int(len(tmp_keys) / num_users)
dict_users_sub, sub_idxs = {}, [i for i in range(len(tmp_keys))]
#assign volume level id
for i in range(num_users):
dict_users_sub[i] = set(np.random.choice(sub_idxs, num_items, replace=False))
sub_idxs = list(set(sub_idxs) - dict_users_sub[i])
# assign slice level id
dict_users = {}
for i in range(num_users):
vol_id = dict_users_sub[i]
slice_id = []
for id in vol_id:
tmp_key = tmp_keys[id]
slice_id += tmp_dict[tmp_key]
dict_users[i] = set(slice_id)
return dict_users
if __name__ == '__main__':
dataset_train = datasets.MNIST('../data/mnist/', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
num = 100
d = mnist_noniid(dataset_train, num)
| 3,579 | 31.844037 | 106 | py |
FL-MRCM | FL-MRCM-main/data/volume_sampler.py | import torch
import numpy as np
from torch.utils.data import Sampler
import torch.distributed as dist
class VolumeSampler(Sampler):
"""
Based on pytorch DistributedSampler, the difference is that all instances from the same
volume need to go to the same node. Dataset example is a list of tuples (fname, instance),
where fname is essentially the volume name (actually a filename).
"""
def __init__(self, dataset):
"""
Args:
dataset: Dataset used for sampling.
"""
self.dataset = dataset
self.world_size = dist.get_world_size()
self.rank = dist.get_rank()
self.epoch = 0
# All nodes
self.all_volume_names = np.array(sorted(list({example[0] for example in self.dataset.examples})))
self.all_volumes_split = np.array_split(self.all_volume_names, self.world_size)
# This node
self.volumes = self.all_volumes_split[self.rank]
self.indices = []
for i, example in enumerate(self.dataset.examples):
vname = example[0]
if vname in self.volumes:
self.indices.append(i)
self.total_size = len(self.dataset.examples)
self.indices = np.array(self.indices)
self.num_samples = len(self.indices)
def __iter__(self):
# Deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
ordering = torch.randperm(self.num_samples, generator=g).tolist()
indices_shuffled = self.indices[ordering]
return iter(indices_shuffled.tolist())
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
| 1,738 | 30.618182 | 105 | py |
FL-MRCM | FL-MRCM-main/data/mri_data.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import pathlib
import random
import numpy as np
import h5py
from torch.utils.data import Dataset
from data import transforms
import torch
class SliceData(Dataset):
"""
A PyTorch Dataset that provides access to MR image slices.
"""
def __init__(self, root, transform, challenge, sequence, sample_rate, seed=42):
"""
Args:
root (pathlib.Path): Path to the dataset.
transform (callable): A callable object that pre-processes the raw data into
appropriate form. The transform function should take 'kspace', 'target',
'attributes', 'filename', and 'slice' as inputs. 'target' may be null
for test data.
challenge (str): "singlecoil" or "multicoil" depending on which challenge to use.
sample_rate (float, optional): A float between 0 and 1. This controls what fraction
of the volumes should be loaded.
"""
if challenge not in ('singlecoil', 'multicoil'):
raise ValueError('challenge should be either "singlecoil" or "multicoil"')
self.transform = transform
self.recons_key = 'reconstruction_esc' if challenge == 'singlecoil' \
else 'reconstruction_rss'
dataset_name = root.parts[-3]
self.examples = []
files = list(pathlib.Path(root).iterdir())
print('Loading dataset :', root)
random.seed(seed)
if sample_rate < 1:
random.shuffle(files)
num_files = round(len(files) * sample_rate)
files = files[:num_files]
for fname in sorted(files):
data = h5py.File(fname, 'r')
padding_left = None
padding_right = None
kspace = data['kspace']
num_slices = kspace.shape[0]
num_start = 0
if dataset_name == 'BraTS2020':
num_start = 14
num_slices = 135
self.examples += [(fname, slice, padding_left, padding_right) for slice in range(num_start, num_slices)]
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
fname, slice, padding_left, padding_right = self.examples[i]
with h5py.File(fname, 'r') as data:
kspace = data['kspace'][slice]
mask = np.asarray(data['mask']) if 'mask' in data else None
target = data[self.recons_key][slice] if self.recons_key in data else None
attrs = dict(data.attrs)
attrs['padding_left'] = padding_left
attrs['padding_right'] = padding_right
return self.transform(kspace, mask, target, attrs, fname.name, slice)
class DataTransform:
"""
Data Transformer for training U-Net models.
"""
def __init__(self, resolution, which_challenge, mask_func=None, use_seed=True):
"""
Args:
mask_func (common.subsample.MaskFunc): A function that can create a mask of
appropriate shape.
resolution (int): Resolution of the image.
which_challenge (str): Either "singlecoil" or "multicoil" denoting the dataset.
use_seed (bool): If true, this class computes a pseudo random number generator seed
from the filename. This ensures that the same mask is used for all the slices of
a given volume every time.
"""
if which_challenge not in ('singlecoil', 'multicoil'):
raise ValueError(
f'Challenge should either be "singlecoil" or "multicoil"')
self.mask_func = mask_func
self.resolution = resolution
self.which_challenge = which_challenge
self.use_seed = use_seed
def __call__(self, kspace, mask, target, attrs, fname, slice):
"""
Args:
kspace (numpy.array): Input k-space of shape (num_coils, rows, cols, 2) for multi-coil
data or (rows, cols, 2) for single coil data.
mask (numpy.array): Mask from the test dataset
target (numpy.array): Target image
attrs (dict): Acquisition related information stored in the HDF5 object.
fname (str): File name
slice (int): Serial number of the slice.
Returns:
(tuple): tuple containing:
image (torch.Tensor): Zero-filled input image.
target (torch.Tensor): Target image converted to a torch Tensor.
mean (float): Mean value used for normalization.
std (float): Standard deviation value used for normalization.
"""
kspace = transforms.to_tensor(kspace)
# Apply mask
if self.mask_func:
seed = None if not self.use_seed else tuple(map(ord, fname))
masked_kspace, mask = transforms.apply_mask(
kspace, self.mask_func, seed)
else:
masked_kspace = kspace
# Inverse Fourier Transform to get zero filled solution
image = transforms.ifft2(masked_kspace)
# Crop input image to given resolution if larger
smallest_width = min(self.resolution, image.shape[-2])
smallest_height = min(self.resolution, image.shape[-3])
if target is not None:
smallest_width = min(smallest_width, target.shape[-1])
smallest_height = min(smallest_height, target.shape[-2])
crop_size = (smallest_height, smallest_width)
image = transforms.complex_center_crop(image, crop_size)
# Absolute value
image = transforms.complex_abs(image)
# Apply Root-Sum-of-Squares if multicoil data
if self.which_challenge == 'multicoil':
image = transforms.root_sum_of_squares(image)
# Normalize input
image, mean, std = transforms.normalize_instance(image, eps=1e-11)
image = image.clamp(-6, 6)
# Normalize target
if target is not None:
target = transforms.to_tensor(target)
target = transforms.center_crop(target, crop_size)
target = transforms.normalize(target, mean, std, eps=1e-11)
target = target.clamp(-6, 6)
else:
target = torch.Tensor([0])
return image, target, mean, std, attrs['norm'].astype(np.float32), fname, slice
| 6,487 | 40.063291 | 116 | py |
FL-MRCM | FL-MRCM-main/data/subsample.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import torch
def create_mask_for_mask_type(mask_type_str, center_fractions, accelerations):
if mask_type_str == 'random':
return RandomMaskFunc(center_fractions, accelerations)
elif mask_type_str == 'equispaced':
return EquispacedMaskFunc(center_fractions, accelerations)
else:
raise Exception(f"{mask_type_str} not supported")
class MaskFunc():
def __init__(self, center_fractions, accelerations):
"""
Args:
center_fractions (List[float]): Fraction of low-frequency columns to be retained.
If multiple values are provided, then one of these numbers is chosen uniformly
each time.
accelerations (List[int]): Amount of under-sampling. This should have the same length
as center_fractions. If multiple values are provided, then one of these is chosen
uniformly each time.
"""
if len(center_fractions) != len(accelerations):
raise ValueError('Number of center fractions should match number of accelerations')
self.center_fractions = center_fractions
self.accelerations = accelerations
self.rng = np.random.RandomState()
def choose_acceleration(self):
choice = self.rng.randint(0, len(self.accelerations))
center_fraction = self.center_fractions[choice]
acceleration = self.accelerations[choice]
return center_fraction, acceleration
class RandomMaskFunc(MaskFunc):
"""
RandomMaskFunc creates a sub-sampling mask of a given shape.
The mask selects a subset of columns from the input k-space data. If the k-space data has N
columns, the mask picks out:
1. N_low_freqs = (N * center_fraction) columns in the center corresponding to
low-frequencies
2. The other columns are selected uniformly at random with a probability equal to:
prob = (N / acceleration - N_low_freqs) / (N - N_low_freqs).
This ensures that the expected number of columns selected is equal to (N / acceleration)
It is possible to use multiple center_fractions and accelerations, in which case one possible
(center_fraction, acceleration) is chosen uniformly at random each time the RandomMaskFunc object is
called.
For example, if accelerations = [4, 8] and center_fractions = [0.08, 0.04], then there
is a 50% probability that 4-fold acceleration with 8% center fraction is selected and a 50%
probability that 8-fold acceleration with 4% center fraction is selected.
"""
def __init__(self, center_fractions, accelerations):
"""
Args:
center_fractions (List[float]): Fraction of low-frequency columns to be retained.
If multiple values are provided, then one of these numbers is chosen uniformly
each time.
accelerations (List[int]): Amount of under-sampling. This should have the same length
as center_fractions. If multiple values are provided, then one of these is chosen
uniformly each time. An acceleration of 4 retains 25% of the columns, but they may
not be spaced evenly.
"""
if len(center_fractions) != len(accelerations):
raise ValueError('Number of center fractions should match number of accelerations')
self.center_fractions = center_fractions
self.accelerations = accelerations
self.rng = np.random.RandomState()
def __call__(self, shape, seed=None):
"""
Args:
shape (iterable[int]): The shape of the mask to be created. The shape should have
at least 3 dimensions. Samples are drawn along the second last dimension.
seed (int, optional): Seed for the random number generator. Setting the seed
ensures the same mask is generated each time for the same shape.
Returns:
torch.Tensor: A mask of the specified shape.
"""
if len(shape) < 3:
raise ValueError('Shape should have 3 or more dimensions')
self.rng.seed(seed)
num_cols = shape[-2]
center_fraction, acceleration = self.choose_acceleration()
# Create the mask
num_low_freqs = int(round(num_cols * center_fraction))
prob = (num_cols / acceleration - num_low_freqs) / (num_cols - num_low_freqs)
mask = self.rng.uniform(size=num_cols) < prob
pad = (num_cols - num_low_freqs + 1) // 2
mask[pad:pad + num_low_freqs] = True
# Reshape the mask
mask_shape = [1 for _ in shape]
mask_shape[-2] = num_cols
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
return mask
class EquispacedMaskFunc(MaskFunc):
"""
EquispacedMaskFunc creates a sub-sampling mask of a given shape.
The mask selects a subset of columns from the input k-space data. If the k-space data has N
columns, the mask picks out:
1. N_low_freqs = (N * center_fraction) columns in the center corresponding to
low-frequencies
2. The other columns are selected with equal spacing at a proportion that reaches the
desired acceleration rate taking into consideration the number of low frequencies. This
ensures that the expected number of columns selected is equal to (N / acceleration)
It is possible to use multiple center_fractions and accelerations, in which case one possible
(center_fraction, acceleration) is chosen uniformly at random each time the EquispacedMaskFunc
object is called.
"""
def __call__(self, shape, seed):
"""
Args:
shape (iterable[int]): The shape of the mask to be created. The shape should have
at least 3 dimensions. Samples are drawn along the second last dimension.
seed (int, optional): Seed for the random number generator. Setting the seed
ensures the same mask is generated each time for the same shape.
Returns:
torch.Tensor: A mask of the specified shape.
"""
if len(shape) < 3:
raise ValueError('Shape should have 3 or more dimensions')
self.rng.seed(seed)
center_fraction, acceleration = self.choose_acceleration()
num_cols = shape[-2]
num_low_freqs = int(round(num_cols * center_fraction))
# Create the mask
mask = np.zeros(num_cols, dtype=np.float32)
pad = (num_cols - num_low_freqs + 1) // 2
mask[pad:pad + num_low_freqs] = True
# Determine acceleration rate by adjusting for the number of low frequencies
adjusted_accel = (acceleration * (num_low_freqs - num_cols)) / (num_low_freqs * acceleration - num_cols)
offset = self.rng.randint(0, round(adjusted_accel))
accel_samples = np.arange(offset, num_cols - 1, adjusted_accel)
accel_samples = np.around(accel_samples).astype(np.uint)
mask[accel_samples] = True
# Reshape the mask
mask_shape = [1 for _ in shape]
mask_shape[-2] = num_cols
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
return mask
| 7,422 | 42.409357 | 112 | py |
FL-MRCM | FL-MRCM-main/data/transforms.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import torch
def to_tensor(data):
"""
Convert numpy array to PyTorch tensor. For complex arrays, the real and imaginary parts
are stacked along the last dimension.
Args:
data (np.array): Input numpy array
Returns:
torch.Tensor: PyTorch version of data
"""
if np.iscomplexobj(data):
data = np.stack((data.real, data.imag), axis=-1)
return torch.from_numpy(data)
def to_numpy(data):
"""
Convert PyTorch tensor to numpy array. For complex tensor with two channels, the complex numpy arrays are used.
Args:
data (torch.Tensor): Input torch tensor
Returns:
np.array numpy arrays
"""
if data.shape[-1] == 2:
out = np.zeros(data.shape[:-1], dtype=np.complex64)
real = data[..., 0].numpy()
imag = data[..., 1].numpy()
out.real = real
out.imag = imag
else:
out = data.numpy()
return out
def apply_mask(data, mask_func, seed=None):
"""
Subsample given k-space by multiplying with a mask.
Args:
data (torch.Tensor): The input k-space data. This should have at least 3 dimensions, where
dimensions -3 and -2 are the spatial dimensions, and the final dimension has size
2 (for complex values).
mask_func (callable): A function that takes a shape (tuple of ints) and a random
number seed and returns a mask.
seed (int or 1-d array_like, optional): Seed for the random number generator.
Returns:
(tuple): tuple containing:
masked data (torch.Tensor): Subsampled k-space data
mask (torch.Tensor): The generated mask
"""
shape = np.array(data.shape)
shape[:-3] = 1
mask = mask_func(shape, seed)
return data * mask, mask
def fft2(data, normalized=True):
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The FFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = torch.fft(data, 2, normalized=normalized)
data = fftshift(data, dim=(-3, -2))
return data
def rfft2(data):
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The FFT of the input.
"""
data = ifftshift(data, dim=(-2, -1))
data = torch.rfft(data, 2, normalized=True, onesided=False)
data = fftshift(data, dim=(-3, -2))
return data
def ifft2(data, normalized=True):
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The IFFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = torch.ifft(data, 2, normalized=normalized)
data = fftshift(data, dim=(-3, -2))
return data
def irfft2(data):
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The IFFT of the input.
"""
data = ifftshift(data, dim=(-3, -2))
data = torch.irfft(data, 2, normalized=True, onesided=False)
data = fftshift(data, dim=(-2, -1))
return data
def complex_to_mag_phase(data):
"""
:param data (torch.Tensor): A complex valued tensor, where the size of the third last dimension should be 2
:return: Mag and Phase (torch.Tensor): tensor of same size as input
"""
assert data.size(-3) == 2
mag = (data ** 2).sum(dim=-3).sqrt()
phase = torch.atan2(data[:, 1, :, :], data[:, 0, :, :])
return torch.stack((mag, phase), dim=-3)
def mag_phase_to_complex(data):
"""
:param data (torch.Tensor): Mag and Phase (torch.Tensor):
:return: A complex valued tensor, where the size of the third last dimension is 2
"""
assert data.size(-3) == 2
real = data[:, 0, :, :] * torch.cos(data[:, 1, :, :])
imag = data[:, 0, :, :] * torch.sin(data[:, 1, :, :])
return torch.stack((real, imag), dim=-3)
def partial_fourier(data):
"""
:param data:
:return:
"""
def complex_abs(data):
"""
Compute the absolute value of a complex valued input tensor.
Args:
data (torch.Tensor): A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
torch.Tensor: Absolute value of data
"""
assert data.size(-1) == 2 or data.size(-3) == 2
return (data ** 2).sum(dim=-1).sqrt() if data.size(-1) == 2 else (data ** 2).sum(dim=-3).sqrt()
def root_sum_of_squares(data, dim=0):
"""
Compute the Root Sum of Squares (RSS) transform along a given dimension of a tensor.
Args:
data (torch.Tensor): The input tensor
dim (int): The dimensions along which to apply the RSS transform
Returns:
torch.Tensor: The RSS value
"""
return torch.sqrt((data ** 2).sum(dim))
def center_crop(data, shape):
"""
Apply a center crop to the input real image or batch of real images.
Args:
data (torch.Tensor): The input tensor to be center cropped. It should have at
least 2 dimensions and the cropping is applied along the last two dimensions.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-2]
assert 0 < shape[1] <= data.shape[-1]
w_from = (data.shape[-2] - shape[0]) // 2
h_from = (data.shape[-1] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to]
def complex_center_crop(data, shape):
"""
Apply a center crop to the input image or batch of complex images.
Args:
data (torch.Tensor): The complex input tensor to be center cropped. It should
have at least 3 dimensions and the cropping is applied along dimensions
-3 and -2 and the last dimensions should have a size of 2.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-3]
assert 0 < shape[1] <= data.shape[-2]
w_from = (data.shape[-3] - shape[0]) // 2
h_from = (data.shape[-2] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to, :]
def normalize(data, mean, stddev, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
Args:
data (torch.Tensor): Input data to be normalized
mean (float): Mean value
stddev (float): Standard deviation
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
return (data - mean) / (stddev + eps)
def normalize_instance(data, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
where mean and stddev are computed from the data itself.
Args:
data (torch.Tensor): Input data to be normalized
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
mean = data.mean()
std = data.std()
return normalize(data, mean, std, eps), mean, std
def normalize_volume(data, mean, std, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
where mean and stddev are provided and computed from volume.
Args:
data (torch.Tensor): Input data to be normalized
mean: mean of whole volume
std: std of whole volume
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
return normalize(data, mean, std, eps), mean, std
def normalize_complex(data, eps=0.):
"""
Normalize the given complex tensor using:
(data - mean) / (stddev + eps)
where mean and stddev are computed from magnitude of data.
Note that data is centered by complex mean so that the result centered data have average zero magnitude.
Args:
data (torch.Tensor): Input data to be normalized (*, 2)
mean: mean of image magnitude
std: std of image magnitude
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized complex tensor with 2 channels (*, 2)
"""
mag = complex_abs(data)
mag_mean = mag.mean()
mag_std = mag.std()
temp = mag_mean/mag
mean_real = data[..., 0] * temp
mean_imag = data[..., 1] * temp
mean_complex = torch.stack((mean_real, mean_imag), dim=-1)
stddev = mag_std
return (data - mean_complex) / (stddev + eps), mag_mean, stddev
# Helper functions
def roll(x, shift, dim):
"""
Similar to np.roll but applies to PyTorch Tensors
"""
if isinstance(shift, (tuple, list)):
assert len(shift) == len(dim)
for s, d in zip(shift, dim):
x = roll(x, s, d)
return x
shift = shift % x.size(dim)
if shift == 0:
return x
left = x.narrow(dim, 0, x.size(dim) - shift)
right = x.narrow(dim, x.size(dim) - shift, shift)
return torch.cat((right, left), dim=dim)
def fftshift(x, dim=None):
"""
Similar to np.fft.fftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [dim // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = x.shape[dim] // 2
else:
shift = [x.shape[i] // 2 for i in dim]
return roll(x, shift, dim)
def ifftshift(x, dim=None):
"""
Similar to np.fft.ifftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [(dim + 1) // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = (x.shape[dim] + 1) // 2
else:
shift = [(x.shape[i] + 1) // 2 for i in dim]
return roll(x, shift, dim)
| 11,406 | 28.705729 | 115 | py |
HabitatDyn | HabitatDyn-main/metric_cal.py | import argparse
import logging
import os
import numpy as np
import torch
import torch.utils.data.dataloader as dataloader
from PIL import Image
from torch.utils.data import Dataset
from torch.utils.data.dataset import Dataset
from tqdm import tqdm
from utils.common import safe_mkdir
from utils.meter import AverageValueMeter
from utils.metrics import *
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
parser = argparse.ArgumentParser(
description='PyTorch Metrics Calculator', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--gt_data', metavar='DIR',
help='path to ground truth dataset', required=True)
parser.add_argument('--pred_data', metavar='DIR',
help='path to predicted dataset', required=True)
parser.add_argument('--flag', type=int, required=True,
help='''which kind HabitatDyn data to do metric evalutation:
flag: 0: calculate All
1: calculate Single class
2: calculate Multi class
3: calculate Speed 1
4: calculate Speed 2
5: calculate Speed 3
6: calculate Human classes
7: calculate toy car/robot classes
8: calculate dog/cat classes''')
args = parser.parse_args()
class MetricDataset(Dataset):
"""A dataset to load gt_data and pred_data
Args: gt_data, pred_data, the root dir of ground truth and predicition
flag: 0: calculate all
1: calculate single class
2: calculate Multi class
3: calculate speed 1
4: calculate speed 2
5: calculate speed 3
"""
def __init__(self, pred_data, gt_data, flag):
self.pred_data = pred_data
self.gt_data = gt_data
self.flag = flag
self.data = []
if flag == 0:
self.scene_names = self.load_all()
elif flag == 1:
self.scene_names = self.load_single_class()
elif flag == 2:
self.scene_names = self.load_multi_class()
elif flag == 3:
self.scene_names = self.load_speed_1()
elif flag == 4:
self.scene_names = self.load_speed_2()
elif flag == 5:
self.scene_names = self.load_speed_3()
elif flag == 6:
self.scene_names = self.load_human_class()
elif flag == 7:
self.scene_names = self.load_car_robot_class()
elif flag == 8:
self.scene_names = self.load_dog_cat_class()
for scene_name in self.scene_names:
gt_folder = os.path.join(
self.gt_data, 'habitat_sim_DAVIS/Annotations/480p', scene_name)
pred_folder = os.path.join(self.pred_data, scene_name)
for filename in os.listdir(gt_folder):
if filename.endswith('.png') or filename.endswith('.jpg'):
gt_path = os.path.join(gt_folder, filename)
pred_path = os.path.join(pred_folder, filename[1:])
if not os.path.isfile(pred_path):
continue
self.data.append((gt_path, pred_path))
def __len__(self):
return len(self.data)
def __getitem__(self, index):
pred_img = Image.open(self.data[index][1]).convert('L')
binary_pred_img = pred_img.point(lambda x: 0 if x == 0 else 1)
gt_img = Image.open(self.data[index][0]).convert('L')
binary_gt_img = gt_img.point(lambda x: 0 if x == 0 else 1)
return torch.from_numpy(np.array(binary_pred_img)), torch.from_numpy(np.array(binary_gt_img))
def load_all(self):
# load all data path stored in .txt file in gt_data
txt_file = os.path.join(self.gt_data, 'video_name_mapping.txt')
data = np.genfromtxt(txt_file, delimiter='\t', dtype='str')
data_filtered = []
for i in range(len(data)):
data_filtered.append(data[i][0])
return data_filtered
def load_single_class(self):
# load all data path that has single class stored in .txt file in gt_data
# meaning first 36 entries of 54
txt_file = os.path.join(self.gt_data, 'video_name_mapping.txt')
data = np.genfromtxt(txt_file, delimiter='\t', dtype='str')
data_filtered = []
for i in range(len(data)):
if int(data[i][0]) % 54 < 36:
data_filtered.append(data[i][0])
return data_filtered
def load_multi_class(self):
# load all data path that has multi class stored in .txt file in gt_data
# meaning last 18 entries of 54
txt_file = os.path.join(self.gt_data, 'video_name_mapping.txt')
data = np.genfromtxt(txt_file, delimiter='\t', dtype='str')
data_filtered = []
for i in range(len(data)):
if int(data[i][0]) % 54 >= 36:
data_filtered.append(data[i][0])
return data_filtered
def load_speed_1(self):
txt_file = os.path.join(self.gt_data, 'video_name_mapping.txt')
data = np.genfromtxt(txt_file, delimiter='\t', dtype='str')
data_filtered = []
for i in range(len(data)):
if data[i][1].split('_')[-1] == '1':
data_filtered.append(data[i][0])
return data_filtered
def load_speed_2(self):
txt_file = os.path.join(self.gt_data, 'video_name_mapping.txt')
data = np.genfromtxt(txt_file, delimiter='\t', dtype='str')
data_filtered = []
for i in range(len(data)):
if data[i][1].split('_')[-1] == '2':
data_filtered.append(data[i][0])
return data_filtered
def load_speed_3(self):
txt_file = os.path.join(self.gt_data, 'video_name_mapping.txt')
data = np.genfromtxt(txt_file, delimiter='\t', dtype='str')
data_filtered = []
for i in range(len(data)):
if data[i][1].split('_')[-1] == '3':
data_filtered.append(data[i][0])
return data_filtered
def load_human_class(self):
txt_file = os.path.join(self.gt_data, 'video_name_mapping.txt')
data = np.genfromtxt(txt_file, delimiter='\t', dtype='str')
data_filtered = []
for i in range(len(data)):
if int(data[i][0]) % 54 < 36:
if 'angry_girl' in data[i][1] or 'ferbibliotecario' in data[i][1]:
data_filtered.append(data[i][0])
return data_filtered
def load_car_robot_class(self):
txt_file = os.path.join(self.gt_data, 'video_name_mapping.txt')
data = np.genfromtxt(txt_file, delimiter='\t', dtype='str')
data_filtered = []
for i in range(len(data)):
if int(data[i][0]) % 54 < 36:
if 'robot' in data[i][1] or 'toy_car' in data[i][1]:
data_filtered.append(data[i][0])
return data_filtered
def load_dog_cat_class(self):
txt_file = os.path.join(self.gt_data, 'video_name_mapping.txt')
data = np.genfromtxt(txt_file, delimiter='\t', dtype='str')
data_filtered = []
for i in range(len(data)):
if int(data[i][0]) % 54 < 36:
if 'shiba' in data[i][1] or 'cat' in data[i][1]:
data_filtered.append(data[i][0])
return data_filtered
def main():
flag_names = {0: 'calculate All',
1: 'calculate Single class',
2: 'calculate Multi class',
3: 'calculate Speed 1',
4: 'calculate Speed 2',
5: 'calculate Speed 3',
6: 'calculate Human classes',
7: 'calculate toy car/robot classes',
8: 'calculate dog/cat classe'}
print(flag_names[args.flag])
gt_data = args.gt_data
pred_data = args.pred_data
flag = args.flag
metric_data = MetricDataset(pred_data, gt_data, flag)
metric_dataloader = dataloader.DataLoader(metric_data, batch_size=256)
iou_meter = AverageValueMeter()
precision_meter = AverageValueMeter()
recall_meter = AverageValueMeter()
for pred, gt in tqdm(metric_dataloader):
pred.to(device)
gt.to(device)
curr_iou = iou(pred, gt)
iou_meter.add(torch.sum(curr_iou).cpu(
).detach().numpy(), curr_iou.shape[0])
precision, recall, f1 = prf_metrics(pred, gt)
precision_meter.add(torch.sum(precision).cpu(
).detach().numpy(), precision.shape[0])
recall_meter.add(torch.sum(recall).cpu(
).detach().numpy(), recall.shape[0])
# f1_meter.add(torch.sum(f1).cpu().detach().numpy(), f1.shape[0])
# print(precision, recall, f1)
# print(precision)
# print(precision.shape)
print("final IOU mean", iou_meter.mean)
print("final mean precision ", precision_meter.mean)
print("final mean recall", recall_meter.mean)
# print("final mean f1", f1_meter.mean)
# save logging
safe_mkdir('./detection_results')
logging.basicConfig(level=logging.DEBUG, filename="./detection_results/logfile", filemode="a+",
format="%(asctime)-15s %(levelname)-8s %(message)s")
logging.info(f"final IOU mean {iou_meter.mean}")
logging.info(f"final mean precision {precision_meter.mean}")
logging.info(f"final mean recall {recall_meter.mean}")
# TODO: subdirect and exp name for each call or a speration line for each logging entry
# TODO: demo: example dataset
if __name__ == "__main__":
main()
| 9,689 | 36.55814 | 101 | py |
HabitatDyn | HabitatDyn-main/dist_cal.py | import json
import logging
import os
import cv2
import argparse
import matplotlib.pyplot as plt
import numpy as np
import scipy.spatial.distance as sci_dis
import torch
import yaml
from sklearn.cluster import DBSCAN
from sklearn.neighbors import LocalOutlierFactor
from tqdm import tqdm
import utils.distance_estimation as dis
from config.default import Config as CN
from utils.common import intersect2d, safe_mkdir, union2d
from utils.create_training_data import (get_pixel_number_from_name,
gt_obj_points)
parser = argparse.ArgumentParser(
description='extract pose/location info of top-down view using dectection results')
# TODO add config.yaml argument
parser.add_argument('--exp_name', default=None,
help='save results to dist_eval_results/exp_name')
parser.add_argument('--habitatDyn_data', metavar='DIR',
help='path to habitatDyn dataset', required=True)
parser.add_argument('--mask_data', metavar='DIR',
help='path to moving object detection output mask', required=True)
args = parser.parse_args()
# set up the parameters
USE_GT = False
SHOW_IMG = False
ADD_TRAIN_GT = True
match_rate_th = 0
# open and load config file
with open("config/test.yaml", "r") as yamlfile:
config_yaml = yaml.load(yamlfile, Loader=yaml.FullLoader)
print("Read successful")
print(config_yaml)
config = CN(config_yaml)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
map_size = config.SEMANTIC_ANTICIPATOR.map_size
map_scale = config.SEMANTIC_ANTICIPATOR.map_scale
# change the setting according to the dataset
# camera height of 1.25m has a pitch value pi/8, 0.20m has a pitch value 0
projection = dis.GTEgoMap(
map_size=map_size, map_scale=map_scale, camera_pitch=np.pi/8)
imgh = config.SEMANTIC_ANTICIPATOR.imgh
imgw = config.SEMANTIC_ANTICIPATOR.imgw
pad = 0.1
# font settings
# font type
font = cv2.FONT_HERSHEY_SIMPLEX
# fontScale
fontScale = 0.5
# Blue color in BGR
color = (255, 0, 0)
# Line thickness of 2 px
thickness = 1
start_clip = 0
x_matrix = (np.arange(map_size) - np.int32(map_size/2) -
1)[np.newaxis]*map_scale*(np.ones(map_size)[:, np.newaxis])
x_tensor = torch.tensor(x_matrix[np.newaxis, np.newaxis]).to(device)
z_matrix = -(map_size - np.arange(map_size) -
1)[:, np.newaxis]*np.ones(map_size)[np.newaxis, :]*map_scale
z_tensor = torch.tensor(z_matrix[np.newaxis, np.newaxis]).to(device)
img_dataset_list = [args.habitatDyn_data]
pred_list = [args.mask_data]
if not args.exp_name:
dir_count = sum(os.path.isdir(os.path.join('./dist_eval_results/', i))
for i in os.listdir(f'./dist_eval_results'))
exp_name = f"exp_{dir_count:03d}"
else:
exp_name = args.exp_name
for i in range(len(img_dataset_list)):
# the habitatDyn sub dataset to be evluated on distance estimation
dataset_name = img_dataset_list[i].split("/")[-1]
data_root = img_dataset_list[i]
root_file = os.path.join(data_root, 'habitat_sim_DAVIS/Annotations/')
img_path = os.path.join(data_root, 'habitat_sim_DAVIS/JPEGImages/480p')
clips_name = os.listdir(root_file + '480p')
clips_name = sorted(clips_name)
frame_list = os.listdir(root_file + '480p/0000')
frame_list = sorted(frame_list)
for clip in tqdm(clips_name[start_clip:], desc="Clip", position=0):
meta_file_location = os.path.join(data_root, f'stats_info/480p/{clip}')
with open(os.path.join(meta_file_location, 'semantic_id_to_name.json')) as f:
id_to_name = json.load(f)
im_folder = ''
focal = ''
safe_mkdir('dist_eval_results')
if USE_GT:
record_save_path = f'dist_eval_results/{exp_name}/{dataset_name}/use_gt/{focal}{im_folder}/'
else:
record_save_path = f'dist_eval_results/{exp_name}/{dataset_name}/use_pre/{focal}{im_folder}/'
safe_mkdir(record_save_path)
logging.basicConfig(level=logging.DEBUG, filename=record_save_path + "logfile", filemode="a+",
format="%(asctime)-15s %(levelname)-8s %(message)s")
eval_record = []
error = []
right_pixel = 0
wrong_pixel = 0
wrong_object = 0
timecost = 0
eval_time = 0
for frame in tqdm(frame_list, desc="Frame", position=1, leave=False):
fram_num = int(frame.split('.')[0])
pose_camera = np.load(
os.path.join(meta_file_location, 'camera_spec.npy'), allow_pickle=True).tolist()
pose_ped = np.load(os.path.join(meta_file_location,
'peds_infos.npy'), allow_pickle=True).tolist()
location_camera = pose_camera['position']
location_ped = pose_ped['positions']
ground_truth = np.uint8(cv2.imread(
root_file + '480p_objectID/' + clip + '/' + frame)[:, :, 0])
depth_img = cv2.imread(
root_file + '480p_depth/' + clip + '/' + frame)
depth_array = depth_img[:, :, 0]/255.
ground_truth_points = {}
gt_mask = ground_truth > 0
object_ids = np.unique(ground_truth)
# validation object:
valid_object_ids = []
for object_id in object_ids[1:]:
location = location_ped[object_id-1][fram_num]
dis_gt_2 = location[0]*location[0] + location[2]*location[2]
dis_gt = np.sqrt(dis_gt_2)
object_mask = ground_truth == object_id
object_name = id_to_name[str(object_id)]
pixel_th = get_pixel_number_from_name(object_name)
if dis_gt > 1:
beta = 0.5
if dis_gt < 1:
beta = 0.2
if np.sum(object_mask)*dis_gt_2 > beta*pixel_th:
valid_object_ids.append(object_id)
if USE_GT:
# mask_img = cv2.imread(
# root_file + '480p_colored/' + clip + '/' + frame)
# mask_img_1d = np.sum(mask_img, axis=2)
# mask = mask_img_1d > 0
raise(NotImplementedError)
else:
mask_file_path = args.mask_data
mask_img = cv2.imread(os.path.join(
mask_file_path, f'{clip}/{frame[1:]}'))
mask = np.sum(mask_img, axis=2) > 0
# if all the objects are not detected
if np.all(mask == False):
if not np.all(gt_mask == False):
for object_id in valid_object_ids:
obj_points, ref_position = gt_obj_points(pose_ped, pose_camera,
fram_num, object_id, map_size, map_scale, id_to_name)
record = {
'clip': clip,
'frame': fram_num,
'object_id': object_id,
'obj': id_to_name[str(object_id)],
'detected': False,
'pre_location': None,
'gt_location': ref_position,
'intersect': 0,
'union': len(obj_points)
}
#print(f'!!!object detect failed, record:{record} \n')
eval_record.append(record)
continue
ego_map_masked = projection.get_observation(depth_array*mask)
dilation_kernel = np.ones((5, 5))
dilation_mask = cv2.dilate(ego_map_masked[:, :, 0], dilation_kernel, iterations=2,
).astype(np.float32)
points = np.array(np.where(ego_map_masked[:, :, 0] > 0.3)).T
if points.shape[0] < 2:
# if all the objects are not detected
if not np.all(gt_mask == False):
for object_id in valid_object_ids:
obj_points, ref_position = gt_obj_points(pose_ped, pose_camera,
fram_num, object_id, map_size, map_scale, id_to_name)
record = {
'clip': clip,
'frame': fram_num,
'object_id': object_id,
'obj': id_to_name[str(object_id)],
'detected': False,
'pre_location': None,
'gt_location': ref_position,
'intersect': 0,
'union': len(obj_points)
}
#print(f'!!!object detect failed, record:{record} \n')
eval_record.append(record)
continue
clf1 = LocalOutlierFactor(n_neighbors=np.min(
[int(points.shape[0]/5+1), 50]), contamination=0.3)
y_pred = clf1.fit_predict(points)
if SHOW_IMG:
plt.figure(figsize=(10, 10))
plt.xlim([0, map_size])
plt.ylim([0, map_size])
plt.axis('off')
plt.scatter(points[:, 1], points[:, 0], color="k", s=1.0)
plt.scatter(points[y_pred > 0, 1],
points[y_pred > 0, 0], color="r", s=1.0)
plt.show()
clustering = DBSCAN(eps=3, min_samples=2).fit(points[y_pred > 0])
relativ_average_cors = {}
valid_clusters = []
# matching cluster with ground truth labels in top-down view
# TODO: clear all "_o" variable
for lable in np.unique(clustering.labels_):
o_cluster_points = points[y_pred > 0][np.where(
clustering.labels_ == lable)]
other_cluster_points = points[y_pred > 0][np.where(
clustering.labels_ != lable)]
if o_cluster_points.shape[0] > 2:
valid_clusters.append(lable)
shift_centre = []
cluster_points = points[y_pred > 0][np.where(
clustering.labels_ == lable)] - [map_size, int(map_size/2)+1]
cluster_points = cluster_points*map_scale
cluster_centre = np.average(cluster_points, axis=0)
x_pt_o = cluster_centre[1]
z_pt_o = cluster_centre[0]
cluster_score = 0
cluster_score_o = 0
x_pt = x_pt_o
z_pt = z_pt_o
cluster_ext = o_cluster_points
ext_points_img = None
imagine_scores = np.zeros((4, 9))
cluster_points[:, 1] = - cluster_points[:, 1]
cluster_points = cluster_points*map_scale
cluster_centre = np.average(cluster_points, axis=0)
relativ_average_cors[lable] = {
'orignal_points': cluster_ext,
'cluster_points': cluster_points,
'cluster_centre': [x_pt, z_pt],
'cluster_centre_o': [x_pt_o, z_pt_o],
'cluster_centre_no_shift': shift_centre,
'cluster_score': cluster_score,
'cluster_score_o': cluster_score_o,
'cluster_o': o_cluster_points,
'imagination_score': imagine_scores,
}
# calculate ground truth points for each label and stored in ground_truth_points dict
for object_id in object_ids[1:]:
mask = ground_truth == object_id
ego_map_gt_object = projection.get_observation(
depth_array*mask)
points = np.array(np.where(ego_map_gt_object[:, :, 0] > 0)).T
obj_points, ref_position = gt_obj_points(pose_ped, pose_camera,
fram_num, object_id, map_size, map_scale, id_to_name)
if points.shape[0] > 0:
clf = LocalOutlierFactor(n_neighbors=np.min(
[int(points.shape[0]/5+1), 50]), contamination=0.2)
y_pred = clf.fit_predict(points)
location = pose_ped['positions'][object_id-1][fram_num]
rotation = pose_camera['agent_orient'][fram_num]
obj_heading = pose_ped['orientations'][object_id-1][fram_num]
ground_truth_points[object_id] = {
'mask_points': points[y_pred > 0],
'gt_points': np.array(obj_points),
'ref_position': ref_position,
'location': location,
'rob_rotation': rotation,
'obj_heading': obj_heading
}
# find the match between estimated data and ground truth data
# here the 'label' is the label of DBSCAN cluster, so check whether is just numbering or meaningful label for habitatDyn
id2lable = {}
for object_id in valid_object_ids:
if object_id in ground_truth_points.keys():
best_match = {
'lable': -1,
'points_match': 0
} # -1 no match, 0 pints match
for lable in valid_clusters:
points_pre = relativ_average_cors[lable]['orignal_points']
points_gt = union2d(
ground_truth_points[object_id]['mask_points'], ground_truth_points[object_id]['gt_points'])
points_matched = intersect2d(points_pre, points_gt)
match_rate = points_matched.shape[0]/points_gt.shape[0]
if points_matched.shape[0] > best_match['points_match'] and match_rate > match_rate_th:
best_match['lable'] = lable
best_match['points_match'] = points_matched.shape[0]
id2lable[object_id] = best_match['lable']
# rename
rocorded_pre_label = []
for object_id in id2lable.keys():
# if no cluster for certain gt object_id found
if id2lable[object_id] == -1:
record = {
'clip': clip,
'frame': fram_num,
'object_id': object_id,
'obj': id_to_name[str(object_id)],
'detected': False,
'pre_location': None,
'gt_location': ground_truth_points[object_id]['ref_position'],
'intersect': 0,
'union': len(ground_truth_points[object_id]['gt_points']),
'location': ground_truth_points[object_id]['location'],
'rotation': ground_truth_points[object_id]['rob_rotation'],
'obj_heading': ground_truth_points[object_id]['obj_heading']
}
# print(record)
eval_record.append(record)
else:
points_pre = relativ_average_cors[id2lable[object_id]
]['orignal_points']
points_pre_o = relativ_average_cors[id2lable[object_id]]['cluster_o']
points_gt = ground_truth_points[object_id]['gt_points']
points_match_o = intersect2d(points_pre_o, points_gt)
points_matched = intersect2d(points_pre, points_gt)
x = relativ_average_cors[id2lable[object_id]
]['cluster_centre'][0]
z = relativ_average_cors[id2lable[object_id]
]['cluster_centre'][1]
x_object_map_coor = x/map_scale + int(map_size/2) + 1
z_object_map_coor = z/map_scale + map_size
distance_ct = sci_dis.cdist(points_pre, np.array(
[z_object_map_coor, x_object_map_coor])[np.newaxis])
r = max(distance_ct)
record = {
'clip': clip,
'frame': fram_num,
'object_id': object_id,
'lable': lable,
'obj': id_to_name[str(object_id)],
'score': relativ_average_cors[lable]['cluster_score'],
'score_o': relativ_average_cors[lable]['cluster_score_o'],
'detected': True,
'pre_location': [x, z],
'pre_location_o': relativ_average_cors[lable]['cluster_centre_o'],
'no_shift_location': relativ_average_cors[id2lable[object_id]]['cluster_centre_no_shift'],
'gt_location': ground_truth_points[object_id]['ref_position'],
'intersect': points_matched.shape[0],
'intersect_o': len(points_match_o),
'union': len(points_gt),
'detected_size': len(points_pre),
'detected_size_0': len(points_pre_o),
'location': ground_truth_points[object_id]['location'],
'rotation': ground_truth_points[object_id]['rob_rotation'],
'obj_heading': ground_truth_points[object_id]['obj_heading'],
'r': r,
'imagination_score': relativ_average_cors[id2lable[object_id]]['imagination_score'],
}
# print(record)
right_pixel += points_matched.shape[0]
eval_record.append(record)
error.append(
np.array(record['pre_location']) - np.array(record['gt_location']))
rocorded_pre_label.append(id2lable[object_id])
# if a predicted cluster not matched(e.g moving object segmentation model predicted 2 cluster for same moving object_id, the smaller cluster will be ignored by before ops)
for lable in valid_clusters:
if lable not in rocorded_pre_label:
points_pre = relativ_average_cors[lable]['orignal_points']
points_pre_o = relativ_average_cors[lable]['cluster_o']
best_match = {
'object_id': -1,
'points_match': 0
} # -1 no match, 0 pints match
for object_id in ground_truth_points.keys():
points_gt = union2d(
ground_truth_points[object_id]['mask_points'], ground_truth_points[object_id]['gt_points'])
points_matched = intersect2d(points_pre, points_gt)
if points_matched.shape[0] > best_match['points_match']:
best_match['object_id'] = lable
best_match['points_match'] = points_matched.shape[0]
alpha = best_match['points_match']/len(points_pre)
x = relativ_average_cors[lable]['cluster_centre'][0]
z = relativ_average_cors[lable]['cluster_centre'][1]
x_object_map_coor = x/map_scale + int(map_size/2) + 1
z_object_map_coor = z/map_scale + map_size
distance_ct = sci_dis.cdist(points_pre, np.array(
[z_object_map_coor, x_object_map_coor])[np.newaxis])
r = max(distance_ct)
if alpha < 0.1:
record = {
'clip': clip,
'frame': fram_num,
'lable': lable,
'obj': 'unknown',
'score': relativ_average_cors[lable]['cluster_score'],
'score_o': relativ_average_cors[lable]['cluster_score_o'],
'detected': True,
'pre_location': [x, z],
'gt_location': [0, 0],
'intersect': best_match['points_match'],
'union': len(points_pre),
'detected_size': len(points_pre),
'detected_size_0': len(points_pre_o),
'gt_ob_id': valid_object_ids,
'r': r,
'imagination_score': relativ_average_cors[lable]['imagination_score'],
}
if len(valid_object_ids) > 0:
wrong_pixel += len(points_pre)
wrong_object += 1
eval_record.append(record)
if len(error) > 0:
mse = np.mean(np.array(error)[:, 0] ** 2) + \
np.mean(np.array(error)[:, 1] ** 2)
print(record_save_path)
print(f'clip:{clip} error: {mse} detected: {len(error)} \
\n pixel for obejct : {right_pixel}, wrong pixel: {wrong_pixel} \
\n wrong_object: {wrong_object} timecost: {timecost/(eval_time+0.001)}')
logging.info(f'clip:{clip} error: {mse} detected: {len(error)} \
\n pixel for obejct : {right_pixel}, wrong pixel: {wrong_pixel} \
\n wrong_object: {wrong_object} timecost: {timecost/(eval_time+0.001)}')
np.save(record_save_path + clip + '.npy', eval_record)
| 22,013 | 47.170678 | 183 | py |
HabitatDyn | HabitatDyn-main/utils/common.py | import pathlib
import numpy as np
import math
import numbers
import torch
from torch import nn
from torch.nn import functional as F
def safe_mkdir(path):
try:
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
except:
pass
def intersect2d(A,B):
'''
calculate the intersection of two points list A and B are two 2-dim matrix.
with dim (n,m) where n is the number of the points and m is the coordinate's dim
'''
nrows, ncols = A.shape
dtype={'names':['f{}'.format(i) for i in range(ncols)],
'formats':ncols * [A.dtype]}
C = np.intersect1d(A.view(dtype), B.view(dtype))
C = C.view(A.dtype).reshape(-1, ncols)
return C
def union2d(A,B):
'''
calculate the intersection of two points list A and B are two 2-dim matrix.
with dim (n,m) where n is the number of the points and m is the coordinate's dim
'''
nrows, ncols = A.shape
dtype={'names':['f{}'.format(i) for i in range(ncols)],
'formats':ncols * [A.dtype]}
C = np.union1d(A.view(dtype), B.view(dtype))
C = C.view(A.dtype).reshape(-1, ncols)
return C
import math
import numbers
import torch
from torch import nn
from torch.nn import functional as F
class GaussianSmoothing(nn.Module):
"""
Apply gaussian smoothing on a
1d, 2d or 3d tensor. Filtering is performed seperately for each channel
in the input using a depthwise convolution.
Arguments:
channels (int, sequence): Number of channels of the input tensors. Output will
have this number of channels as well.
kernel_size (int, sequence): Size of the gaussian kernel.
sigma (float, sequence): Standard deviation of the gaussian kernel.
dim (int, optional): The number of dimensions of the data.
Default value is 2 (spatial).
"""
def __init__(self, channels, kernel_size, sigma, dim=2):
super(GaussianSmoothing, self).__init__()
if isinstance(kernel_size, numbers.Number):
kernel_size = [kernel_size] * dim
if isinstance(sigma, numbers.Number):
sigma = [sigma] * dim
# The gaussian kernel is the product of the
# gaussian function of each dimension.
kernel = 1
meshgrids = torch.meshgrid(
[
torch.arange(size, dtype=torch.float32)
for size in kernel_size
]
)
for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
mean = (size - 1) / 2
kernel *= 1 / (std * math.sqrt(2 * math.pi)) * \
torch.exp(-((mgrid - mean) / std) ** 2 / 2)
# Make sure sum of values in gaussian kernel equals 1.
kernel = kernel / torch.sum(kernel)
# Reshape to depthwise convolutional weight
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
self.register_buffer('weight', kernel)
self.groups = channels
if dim == 1:
self.conv = F.conv1d
elif dim == 2:
self.conv = F.conv2d
elif dim == 3:
self.conv = F.conv3d
else:
raise RuntimeError(
'Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim)
)
def forward(self, input):
"""
Apply gaussian filter to input.
Arguments:
input (torch.Tensor): Input to apply gaussian filter on.
Returns:
filtered (torch.Tensor): Filtered output.
"""
return self.conv(input, weight=self.weight, groups=self.groups) | 3,632 | 32.330275 | 86 | py |
HabitatDyn | HabitatDyn-main/utils/metrics.py | import torch
import torch.nn.functional as F
import torch.nn as nn
def iou(pred_mask, gt_mask):
"""Calculates the IoU of two masks.
Args:
pred_mask: A torch.Tensor of shape (batch_size, height, width).
gt_mask: A torch.Tensor of shape (batch_size, height, width).
Returns:
A torch.Tensor of shape (batch_size,).
"""
pred_mask = pred_mask.bool()
gt_mask = gt_mask.bool()
# Compute the intersection and union of the masks.
intersection = torch.sum(pred_mask * gt_mask, dim=[1, 2])
union = torch.sum(pred_mask, dim=[1, 2]) + torch.sum(gt_mask, dim=[1, 2])
# a mask to filter out images that dose not have object
mask = torch.sum(gt_mask, dim=[1, 2])
intersection = torch.where(mask > 0, intersection, torch.zeros_like(intersection))
union = torch.where(mask > 0, union, torch.ones_like(union))
# Calculate the IoU.
iou = intersection / union
return iou[iou.nonzero()]
def prf_metrics(pred, target):
""" calculate precision, recall and f1
Args:
pred
target
Returns:
precision, recall, f1
"""
assert pred.shape == target.shape
pred = pred.bool()
target = target.bool()
# a mask to filter out images that dose not have object
mask = torch.sum(target, dim=[1, 2])
# True positive
tp = (pred & target).sum(dim=(1, 2)).float()
# False positive
fp = (pred & ~target).sum(dim=(1, 2)).float()
# False negative
fn = (~pred & target).sum(dim=(1, 2)).float()
precision = tp / (tp + fp + 1e-12)
recall = tp / (tp + fn + 1e-12)
f1 = 2 * ((precision * recall) / (precision + recall + 1e-12))
# return precision[precision.nonzero()], recall[recall.nonzero()], f1[f1.nonzero()]
return precision[mask.bool()], recall[mask.bool()], f1[mask.bool()]
| 1,792 | 27.015625 | 87 | py |
HabitatDyn | HabitatDyn-main/utils/meter.py | import numpy as np
class Meter(object):
"""Meters provide a way to keep track of important statistics in an online manner.
This class is abstract, but provides a standard interface for all meters to follow.
"""
def reset(self):
"""Reset the meter to default settings."""
pass
def add(self, value):
"""Log a new value to the meter
Args:
value: Next result to include.
"""
pass
def value(self):
"""Get the value of the meter in the current state."""
pass
class AverageValueMeter(Meter):
def __init__(self):
super(AverageValueMeter, self).__init__()
self.reset()
self.val = 0
def add(self, value, n=1):
self.val = value
self.sum += value
self.var += value * value
self.n += n
if self.n == 0:
self.mean, self.std = np.nan, np.nan
elif self.n == 1:
self.mean = 0.0 + self.sum # This is to force a copy in torch/numpy
self.std = np.inf
self.mean_old = self.mean
self.m_s = 0.0
else:
self.mean = self.mean_old + (value - n * self.mean_old) / float(self.n)
self.m_s += (value - self.mean_old) * (value - self.mean)
self.mean_old = self.mean
self.std = np.sqrt(self.m_s / (self.n - 1.0))
def value(self):
return self.mean, self.std
def reset(self):
self.n = 0
self.sum = 0.0
self.var = 0.0
self.val = 0.0
self.mean = np.nan
self.mean_old = 0.0
self.m_s = 0.0
self.std = np.nan | 1,655 | 26.147541 | 87 | py |
FastJTNNpy3 | FastJTNNpy3-master/fast_bo/gen_latent.py | import sys
sys.path.append('../')
import torch
import torch.nn as nn
from optparse import OptionParser
from tqdm import tqdm
import rdkit
from rdkit.Chem import Descriptors
from rdkit.Chem import MolFromSmiles, MolToSmiles
from rdkit.Chem import rdmolops
import numpy as np
from fast_jtnn import *
from fast_jtnn import sascorer
import networkx as nx
import os
def scorer(smiles):
smiles_rdkit = []
for i in range(len(smiles)):
smiles_rdkit.append(
MolToSmiles(MolFromSmiles(smiles[i]), isomericSmiles=True))
logP_values = []
for i in range(len(smiles)):
logP_values.append(
Descriptors.MolLogP(MolFromSmiles(smiles_rdkit[i])))
SA_scores = []
for i in range(len(smiles)):
SA_scores.append(
-sascorer.calculateScore(MolFromSmiles(smiles_rdkit[i])))
cycle_scores = []
for i in range(len(smiles)):
cycle_list = nx.cycle_basis(
nx.Graph(
rdmolops.GetAdjacencyMatrix(MolFromSmiles(smiles_rdkit[i]))))
if len(cycle_list) == 0:
cycle_length = 0
else:
cycle_length = max([len(j) for j in cycle_list])
if cycle_length <= 6:
cycle_length = 0
else:
cycle_length = cycle_length - 6
cycle_scores.append(-cycle_length)
SA_scores_normalized = (
np.array(SA_scores) - np.mean(SA_scores)) / np.std(SA_scores)
logP_values_normalized = (
np.array(logP_values) - np.mean(logP_values)) / np.std(logP_values)
cycle_scores_normalized = (
np.array(cycle_scores) - np.mean(cycle_scores)) / np.std(cycle_scores)
targets = (SA_scores_normalized +
logP_values_normalized +
cycle_scores_normalized)
return (SA_scores,
logP_values,
cycle_scores,
targets)
def main_gen_latent(data_path, vocab_path,
model_path, output_path='./',
hidden_size=450, latent_size=56,
depthT=20, depthG=3, batch_size=100):
with open(data_path) as f:
smiles = f.readlines()
if os.path.isdir(output_path) is False:
os.makedirs(output_path)
for i in range(len(smiles)):
smiles[i] = smiles[i].strip()
vocab = [x.strip("\r\n ") for x in open(vocab_path)]
vocab = Vocab(vocab)
model = JTNNVAE(vocab, hidden_size, latent_size, depthT, depthG)
model.load_state_dict(torch.load(model_path))
model = model.cuda()
model.eval()
with torch.no_grad():
latent_points = []
for i in tqdm(range(0, len(smiles), batch_size)):
batch = smiles[i:i + batch_size]
mol_vec = model.encode_from_smiles(batch)
latent_points.append(mol_vec.data.cpu().numpy())
latent_points = np.vstack(latent_points)
SA_scores, logP_values, cycle_scores, targets = scorer(smiles)
np.savetxt(
os.path.join(output_path, 'latent_features.txt'), latent_points)
np.savetxt(
os.path.join(output_path, 'targets.txt'), targets)
np.savetxt(
os.path.join(output_path, 'logP_values.txt'), np.array(logP_values))
np.savetxt(
os.path.join(output_path, 'SA_scores.txt'), np.array(SA_scores))
np.savetxt(
os.path.join(output_path, 'cycle_scores.txt'), np.array(cycle_scores))
if __name__ == '__main__':
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
parser = OptionParser()
parser.add_option("-a", "--data", dest="data_path")
parser.add_option("-v", "--vocab", dest="vocab_path")
parser.add_option("-m", "--model", dest="model_path")
parser.add_option("-o", "--output", dest="output_path", default='./')
parser.add_option("-w", "--hidden", dest="hidden_size", default=450)
parser.add_option("-l", "--latent", dest="latent_size", default=56)
parser.add_option("-t", "--depthT", dest="depthT", default=20)
parser.add_option("-g", "--depthG", dest="depthG", default=3)
opts, args = parser.parse_args()
hidden_size = int(opts.hidden_size)
latent_size = int(opts.latent_size)
depthT = int(opts.depthT)
depthG = int(opts.depthG)
main_gen_latent(opts.data_path, opts.vocab_path,
opts.model_path, output_path=opts.output_path,
hidden_size=hidden_size, latent_size=latent_size,
depthT=depthT, depthG=depthG)
| 4,431 | 32.074627 | 78 | py |
FastJTNNpy3 | FastJTNNpy3-master/fast_bo/run_bo.py | import sys
sys.path.append('../')
import pickle
import gzip
import scipy.stats as sps
import numpy as np
import os
import rdkit
from rdkit.Chem import MolFromSmiles, MolToSmiles
from rdkit.Chem import Descriptors
import torch
import torch.nn as nn
from fast_jtnn import create_var, JTNNVAE, Vocab, sascorer
from fast_jtnn.sparse_gp import SparseGP
import networkx as nx
from rdkit.Chem import rdmolops
from tqdm import tqdm
from optparse import OptionParser
import joblib
# We define the functions used to load and save objects
def save_object(obj, filename):
joblib.dump(obj, filename)
def load_object(filename):
return joblib.load(filename)
def save_object_old(obj, filename):
result = pickle.dumps(obj)
with gzip.GzipFile(filename, 'wb') as dest:
dest.write(result)
dest.close()
def load_object_old(filename):
with gzip.GzipFile(filename, 'rb') as source:
result = source.read()
ret = pickle.loads(result)
source.close()
return ret
def main_bo(vocab_path,
model_path,
save_dir,
descriptor_path,
sampling=60,
iterations=2,
epochs=2,
hidden_size=450,
latent_size=56,
depthT=20,
depthG=3,
random_seed=1):
if os.path.isdir(save_dir) is False:
os.makedirs(save_dir)
vocab = [x.strip("\r\n ") for x in open(vocab_path)]
vocab = Vocab(vocab)
model = JTNNVAE(vocab, hidden_size, latent_size, depthT, depthG)
model.load_state_dict(torch.load(model_path))
model = model.cuda()
# We load the random seed
np.random.seed(random_seed)
# Path of the files
latent_feature = os.path.join(descriptor_path, './latent_features.txt')
target = os.path.join(descriptor_path, './targets.txt')
logp_value = os.path.join(descriptor_path, './logP_values.txt')
sa_score = os.path.join(descriptor_path, './SA_scores.txt')
cycle_score = os.path.join(descriptor_path, './cycle_scores.txt')
# We load the data (y is minued!)
X = np.loadtxt(latent_feature)
y = -np.loadtxt(target)
y = y.reshape((-1, 1))
n = X.shape[0]
permutation = np.random.choice(n, n, replace=False)
X_train = X[permutation, :][0:np.int(np.round(0.9 * n)), :]
X_test = X[permutation, :][np.int(np.round(0.9 * n)):, :]
y_train = y[permutation][0: np.int(np.round(0.9 * n))]
y_test = y[permutation][np.int(np.round(0.9 * n)):]
np.random.seed(random_seed)
logP_values = np.loadtxt(logp_value)
SA_scores = np.loadtxt(sa_score)
cycle_scores = np.loadtxt(cycle_score)
iteration = 0
while iteration < iterations:
# We fit the GP
np.random.seed(iteration * random_seed)
M = 500
sgp = SparseGP(X_train, 0 * X_train, y_train, M)
sgp.train_via_ADAM(X_train,
0 * X_train,
y_train,
X_test,
X_test * 0,
y_test,
minibatch_size=10 * M,
max_iterations=5,
learning_rate=0.001)
pred, uncert = sgp.predict(X_test, 0 * X_test)
error = np.sqrt(np.mean((pred - y_test)**2))
testll = np.mean(sps.norm.logpdf(pred - y_test, scale=np.sqrt(uncert)))
print('Test RMSE: ', error)
print('Test ll: ', testll)
pred, uncert = sgp.predict(X_train, 0 * X_train)
error = np.sqrt(np.mean((pred - y_train)**2))
trainll = np.mean(
sps.norm.logpdf(pred - y_train, scale=np.sqrt(uncert)))
print('Train RMSE: ', error)
print('Train ll: ', trainll)
# We pick the next 60 inputs
next_inputs = sgp.batched_greedy_ei(sampling,
np.min(X_train, 0),
np.max(X_train, 0))
# joblib.dump(next_inputs, './next_inputs.pkl')
# next_inputs = joblib.load('./next_inputs.pkl')
valid_smiles = []
new_features = []
for i in tqdm(range(sampling)):
all_vec = next_inputs[i].reshape((1, -1))
tree_vec, mol_vec = np.hsplit(all_vec, 2)
tree_vec = create_var(torch.from_numpy(tree_vec).float())
mol_vec = create_var(torch.from_numpy(mol_vec).float())
tree_vecs, _ = model.rsample(tree_vec, model.T_mean, model.T_var)
mol_vecs, _ = model.rsample(mol_vec, model.G_mean, model.G_var)
s = model.decode(tree_vecs, mol_vecs, prob_decode=False)
if s is not None:
valid_smiles.append(s)
new_features.append(all_vec)
print(len(valid_smiles), "molecules are found")
valid_smiles = valid_smiles
new_features = next_inputs
new_features = np.vstack(new_features)
save_object(
valid_smiles,
os.path.join(save_dir, "valid_smiles{}.pkl".format(iteration))
)
scores = []
for i in range(len(valid_smiles)):
current_log_P_value = Descriptors.MolLogP(
MolFromSmiles(valid_smiles[i]))
current_SA_score = -sascorer.calculateScore(
MolFromSmiles(valid_smiles[i]))
cycle_list = nx.cycle_basis(
nx.Graph(rdmolops.GetAdjacencyMatrix(
MolFromSmiles(valid_smiles[i]))))
if len(cycle_list) == 0:
cycle_length = 0
else:
cycle_length = max([len(j) for j in cycle_list])
if cycle_length <= 6:
cycle_length = 0
else:
cycle_length = cycle_length - 6
current_cycle_score = -cycle_length
current_SA_score_normalized = (
current_SA_score - np.mean(
SA_scores)) / np.std(SA_scores)
current_log_P_value_normalized = (
current_log_P_value - np.mean(
logP_values)) / np.std(logP_values)
current_cycle_score_normalized = (
current_cycle_score - np.mean(
cycle_scores)) / np.std(cycle_scores)
score = (current_SA_score_normalized +
current_log_P_value_normalized +
current_cycle_score_normalized)
scores.append(-score) # target is always minused
print(valid_smiles)
print(scores)
save_object(
scores,
os.path.join(save_dir, "scores{}.pkl".format(iteration))
)
if len(new_features) > 0:
X_train = np.concatenate([X_train, new_features], 0)
y_train = np.concatenate([y_train, np.array(scores)[:, None]], 0)
iteration += 1
if __name__ == '__main__':
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
parser = OptionParser()
parser.add_option("-v", "--vocab", dest="vocab_path")
parser.add_option("-m", "--model", dest="model_path")
parser.add_option("-o", "--save_dir", dest="save_dir")
parser.add_option("-f", "--descriptors", dest="descriptor_path")
parser.add_option("-b", "--sampling", dest="sampling", default=60)
parser.add_option("-i", "--iteration", dest="iteration", default=2)
parser.add_option("-e", "--epochs", dest="epochs", default=2)
parser.add_option("-w", "--hidden", dest="hidden_size", default=450)
parser.add_option("-l", "--latent", dest="latent_size", default=56)
parser.add_option("-t", "--depthT", dest="depthT", default=20)
parser.add_option("-g", "--depthG", dest="depthG", default=3)
parser.add_option("-r", "--seed", dest="random_seed", default=1)
opts, args = parser.parse_args()
hidden_size = int(opts.hidden_size)
latent_size = int(opts.latent_size)
depthT = int(opts.depthT)
depthG = int(opts.depthG)
random_seed = int(opts.random_seed)
iteration = int(opts.iteration)
epochs = int(opts.epochs)
sampling = int(opts.sampling)
main_bo(opts.vocab_path,
opts.model_path,
opts.save_dir,
opts.descriptor_path,
sampling,
iteration,
epochs,
hidden_size,
latent_size,
depthT,
depthG,
random_seed)
| 8,400 | 32.738956 | 79 | py |
FastJTNNpy3 | FastJTNNpy3-master/fast_jtnn/jtnn_enc.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import deque
from .mol_tree import Vocab, MolTree
from .nnutils import create_var, index_select_ND
class JTNNEncoder(nn.Module):
def __init__(self, hidden_size, depth, embedding):
super(JTNNEncoder, self).__init__()
self.hidden_size = hidden_size
self.depth = depth
self.embedding = embedding
self.outputNN = nn.Sequential(
nn.Linear(2 * hidden_size, hidden_size),
nn.ReLU()
)
self.GRU = GraphGRU(hidden_size, hidden_size, depth=depth)
def forward(self, fnode, fmess, node_graph, mess_graph, scope):
fnode = create_var(fnode)
fmess = create_var(fmess)
node_graph = create_var(node_graph)
mess_graph = create_var(mess_graph)
messages = create_var(torch.zeros(mess_graph.size(0), self.hidden_size))
fnode = self.embedding(fnode)
fmess = index_select_ND(fnode, 0, fmess)
messages = self.GRU(messages, fmess, mess_graph)
mess_nei = index_select_ND(messages, 0, node_graph)
node_vecs = torch.cat([fnode, mess_nei.sum(dim=1)], dim=-1)
node_vecs = self.outputNN(node_vecs)
max_len = max([x for _,x in scope])
batch_vecs = []
for st,le in scope:
cur_vecs = node_vecs[st] #Root is the first node
batch_vecs.append( cur_vecs )
tree_vecs = torch.stack(batch_vecs, dim=0)
return tree_vecs, messages
@staticmethod
def tensorize(tree_batch):
node_batch = []
scope = []
for tree in tree_batch:
scope.append( (len(node_batch), len(tree.nodes)) )
node_batch.extend(tree.nodes)
return JTNNEncoder.tensorize_nodes(node_batch, scope)
@staticmethod
def tensorize_nodes(node_batch, scope):
messages,mess_dict = [None],{}
fnode = []
for x in node_batch:
fnode.append(x.wid)
for y in x.neighbors:
mess_dict[(x.idx,y.idx)] = len(messages)
messages.append( (x,y) )
node_graph = [[] for i in range(len(node_batch))]
mess_graph = [[] for i in range(len(messages))]
fmess = [0] * len(messages)
for x,y in messages[1:]:
mid1 = mess_dict[(x.idx,y.idx)]
fmess[mid1] = x.idx
node_graph[y.idx].append(mid1)
for z in y.neighbors:
if z.idx == x.idx: continue
mid2 = mess_dict[(y.idx,z.idx)]
mess_graph[mid2].append(mid1)
max_len = max([len(t) for t in node_graph] + [1])
for t in node_graph:
pad_len = max_len - len(t)
t.extend([0] * pad_len)
max_len = max([len(t) for t in mess_graph] + [1])
for t in mess_graph:
pad_len = max_len - len(t)
t.extend([0] * pad_len)
mess_graph = torch.LongTensor(mess_graph)
node_graph = torch.LongTensor(node_graph)
fmess = torch.LongTensor(fmess)
fnode = torch.LongTensor(fnode)
return (fnode, fmess, node_graph, mess_graph, scope), mess_dict
class GraphGRU(nn.Module):
def __init__(self, input_size, hidden_size, depth):
super(GraphGRU, self).__init__()
self.hidden_size = hidden_size
self.input_size = input_size
self.depth = depth
self.W_z = nn.Linear(input_size + hidden_size, hidden_size)
self.W_r = nn.Linear(input_size, hidden_size, bias=False)
self.U_r = nn.Linear(hidden_size, hidden_size)
self.W_h = nn.Linear(input_size + hidden_size, hidden_size)
def forward(self, h, x, mess_graph):
mask = torch.ones(h.size(0), 1)
mask[0] = 0 #first vector is padding
mask = create_var(mask)
for it in range(self.depth):
h_nei = index_select_ND(h, 0, mess_graph)
sum_h = h_nei.sum(dim=1)
z_input = torch.cat([x, sum_h], dim=1)
z = F.sigmoid(self.W_z(z_input))
r_1 = self.W_r(x).view(-1, 1, self.hidden_size)
r_2 = self.U_r(h_nei)
r = F.sigmoid(r_1 + r_2)
gated_h = r * h_nei
sum_gated_h = gated_h.sum(dim=1)
h_input = torch.cat([x, sum_gated_h], dim=1)
pre_h = F.tanh(self.W_h(h_input))
h = (1.0 - z) * sum_h + z * pre_h
h = h * mask
return h
| 4,473 | 32.893939 | 80 | py |
FastJTNNpy3 | FastJTNNpy3-master/fast_jtnn/datautils.py | import torch
from torch.utils.data import Dataset, DataLoader
from .mol_tree import MolTree
import numpy as np
from .jtnn_enc import JTNNEncoder
from .mpn import MPN
from .jtmpn import JTMPN
import pickle as pickle
import os, random
class PairTreeFolder(object):
def __init__(self, data_folder, vocab, batch_size, num_workers=4, shuffle=True, y_assm=True, replicate=None):
self.data_folder = data_folder
self.data_files = [fn for fn in os.listdir(data_folder)]
self.batch_size = batch_size
self.vocab = vocab
self.num_workers = num_workers
self.y_assm = y_assm
self.shuffle = shuffle
if replicate is not None: #expand is int
self.data_files = self.data_files * replicate
def __iter__(self):
for fn in self.data_files:
fn = os.path.join(self.data_folder, fn)
with open(fn, 'rb') as f:
data = pickle.load(f)
if self.shuffle:
random.shuffle(data) #shuffle data before batch
batches = [data[i : i + self.batch_size] for i in range(0, len(data), self.batch_size)]
if len(batches[-1]) < self.batch_size:
batches.pop()
dataset = PairTreeDataset(batches, self.vocab, self.y_assm)
dataloader = DataLoader(dataset, batch_size=1, shuffle=False, collate_fn=lambda x:x[0])#, num_workers=self.num_workers)
for b in dataloader:
yield b
del data, batches, dataset, dataloader
class MolTreeFolder(object):
def __init__(self, data_folder, vocab, batch_size, num_workers=4, shuffle=True, assm=True, replicate=None):
self.data_folder = data_folder
self.data_files = [fn for fn in os.listdir(data_folder)]
self.batch_size = batch_size
self.vocab = vocab
self.num_workers = num_workers
self.shuffle = shuffle
self.assm = assm
if replicate is not None: #expand is int
self.data_files = self.data_files * replicate
def __iter__(self):
for fn in self.data_files:
fn = os.path.join(self.data_folder, fn)
with open(fn, 'rb') as f:
data = pickle.load(f)
if self.shuffle:
random.shuffle(data) #shuffle data before batch
batches = [data[i : i + self.batch_size] for i in range(0, len(data), self.batch_size)]
if len(batches[-1]) < self.batch_size:
batches.pop()
dataset = MolTreeDataset(batches, self.vocab, self.assm)
dataloader = DataLoader(dataset, batch_size=1, shuffle=False, collate_fn=lambda x:x[0])#, num_workers=self.num_workers)
for b in dataloader:
yield b
del data, batches, dataset, dataloader
class PairTreeDataset(Dataset):
def __init__(self, data, vocab, y_assm):
self.data = data
self.vocab = vocab
self.y_assm = y_assm
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
batch0, batch1 = list(zip(*self.data[idx]))
return tensorize(batch0, self.vocab, assm=False), tensorize(batch1, self.vocab, assm=self.y_assm)
class MolTreeDataset(Dataset):
def __init__(self, data, vocab, assm=True):
self.data = data
self.vocab = vocab
self.assm = assm
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return tensorize(self.data[idx], self.vocab, assm=self.assm)
def tensorize(tree_batch, vocab, assm=True):
set_batch_nodeID(tree_batch, vocab)
smiles_batch = [tree.smiles for tree in tree_batch]
jtenc_holder,mess_dict = JTNNEncoder.tensorize(tree_batch)
jtenc_holder = jtenc_holder
mpn_holder = MPN.tensorize(smiles_batch)
if assm is False:
return tree_batch, jtenc_holder, mpn_holder
cands = []
batch_idx = []
for i,mol_tree in enumerate(tree_batch):
for node in mol_tree.nodes:
#Leaf node's attachment is determined by neighboring node's attachment
if node.is_leaf or len(node.cands) == 1: continue
cands.extend( [(cand, mol_tree.nodes, node) for cand in node.cands] )
batch_idx.extend([i] * len(node.cands))
jtmpn_holder = JTMPN.tensorize(cands, mess_dict)
batch_idx = torch.LongTensor(batch_idx)
return tree_batch, jtenc_holder, mpn_holder, (jtmpn_holder,batch_idx)
def set_batch_nodeID(mol_batch, vocab):
tot = 0
for mol_tree in mol_batch:
for node in mol_tree.nodes:
node.idx = tot
node.wid = vocab.get_index(node.smiles)
tot += 1
| 4,697 | 32.798561 | 131 | py |
FastJTNNpy3 | FastJTNNpy3-master/fast_jtnn/nnutils.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
def create_var(tensor, requires_grad=None):
if requires_grad is None:
return Variable(tensor).cuda()
else:
return Variable(tensor, requires_grad=requires_grad).cuda()
def index_select_ND(source, dim, index):
index_size = index.size()
suffix_dim = source.size()[1:]
final_size = index_size + suffix_dim
target = source.index_select(dim, index.view(-1))
return target.view(final_size)
def avg_pool(all_vecs, scope, dim):
size = create_var(torch.Tensor([le for _,le in scope]))
return all_vecs.sum(dim=dim) / size.unsqueeze(-1)
def stack_pad_tensor(tensor_list):
max_len = max([t.size(0) for t in tensor_list])
for i,tensor in enumerate(tensor_list):
pad_len = max_len - tensor.size(0)
tensor_list[i] = F.pad( tensor, (0,0,0,pad_len) )
return torch.stack(tensor_list, dim=0)
#3D padded tensor to 2D matrix, with padded zeros removed
def flatten_tensor(tensor, scope):
assert tensor.size(0) == len(scope)
tlist = []
for i,tup in enumerate(scope):
le = tup[1]
tlist.append( tensor[i, 0:le] )
return torch.cat(tlist, dim=0)
#2D matrix to 3D padded tensor
def inflate_tensor(tensor, scope):
max_len = max([le for _,le in scope])
batch_vecs = []
for st,le in scope:
cur_vecs = tensor[st : st + le]
cur_vecs = F.pad( cur_vecs, (0,0,0,max_len-le) )
batch_vecs.append( cur_vecs )
return torch.stack(batch_vecs, dim=0)
def GRU(x, h_nei, W_z, W_r, U_r, W_h):
hidden_size = x.size()[-1]
sum_h = h_nei.sum(dim=1)
z_input = torch.cat([x,sum_h], dim=1)
z = F.sigmoid(W_z(z_input))
r_1 = W_r(x).view(-1,1,hidden_size)
r_2 = U_r(h_nei)
r = F.sigmoid(r_1 + r_2)
gated_h = r * h_nei
sum_gated_h = gated_h.sum(dim=1)
h_input = torch.cat([x,sum_gated_h], dim=1)
pre_h = F.tanh(W_h(h_input))
new_h = (1.0 - z) * sum_h + z * pre_h
return new_h
| 2,042 | 29.492537 | 67 | py |
FastJTNNpy3 | FastJTNNpy3-master/fast_jtnn/mpn.py | import torch
import torch.nn as nn
import rdkit.Chem as Chem
import torch.nn.functional as F
from .nnutils import *
from .chemutils import get_mol
ELEM_LIST = ['C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl', 'Br', 'Mg', 'Na', 'Ca', 'Fe', 'Al', 'I', 'B', 'K', 'Se', 'Zn', 'H', 'Cu', 'Mn', 'unknown']
ATOM_FDIM = len(ELEM_LIST) + 6 + 5 + 4 + 1
BOND_FDIM = 5 + 6
MAX_NB = 6
def onek_encoding_unk(x, allowable_set):
if x not in allowable_set:
x = allowable_set[-1]
return [x == s for s in allowable_set]
def atom_features(atom):
return torch.Tensor(onek_encoding_unk(atom.GetSymbol(), ELEM_LIST)
+ onek_encoding_unk(atom.GetDegree(), [0,1,2,3,4,5])
+ onek_encoding_unk(atom.GetFormalCharge(), [-1,-2,1,2,0])
+ onek_encoding_unk(int(atom.GetChiralTag()), [0,1,2,3])
+ [atom.GetIsAromatic()])
def bond_features(bond):
bt = bond.GetBondType()
stereo = int(bond.GetStereo())
fbond = [bt == Chem.rdchem.BondType.SINGLE, bt == Chem.rdchem.BondType.DOUBLE, bt == Chem.rdchem.BondType.TRIPLE, bt == Chem.rdchem.BondType.AROMATIC, bond.IsInRing()]
fstereo = onek_encoding_unk(stereo, [0,1,2,3,4,5])
return torch.Tensor(fbond + fstereo)
class MPN(nn.Module):
def __init__(self, hidden_size, depth):
super(MPN, self).__init__()
self.hidden_size = hidden_size
self.depth = depth
self.W_i = nn.Linear(ATOM_FDIM + BOND_FDIM, hidden_size, bias=False)
self.W_h = nn.Linear(hidden_size, hidden_size, bias=False)
self.W_o = nn.Linear(ATOM_FDIM + hidden_size, hidden_size)
def forward(self, fatoms, fbonds, agraph, bgraph, scope):
fatoms = create_var(fatoms)
fbonds = create_var(fbonds)
agraph = create_var(agraph)
bgraph = create_var(bgraph)
binput = self.W_i(fbonds)
message = F.relu(binput)
for i in range(self.depth - 1):
nei_message = index_select_ND(message, 0, bgraph)
nei_message = nei_message.sum(dim=1)
nei_message = self.W_h(nei_message)
message = F.relu(binput + nei_message)
nei_message = index_select_ND(message, 0, agraph)
nei_message = nei_message.sum(dim=1)
ainput = torch.cat([fatoms, nei_message], dim=1)
atom_hiddens = F.relu(self.W_o(ainput))
max_len = max([x for _,x in scope])
batch_vecs = []
for st,le in scope:
cur_vecs = atom_hiddens[st : st + le].mean(dim=0)
batch_vecs.append( cur_vecs )
mol_vecs = torch.stack(batch_vecs, dim=0)
return mol_vecs
@staticmethod
def tensorize(mol_batch):
padding = torch.zeros(ATOM_FDIM + BOND_FDIM)
fatoms,fbonds = [],[padding] #Ensure bond is 1-indexed
in_bonds,all_bonds = [],[(-1,-1)] #Ensure bond is 1-indexed
scope = []
total_atoms = 0
for smiles in mol_batch:
mol = get_mol(smiles)
#mol = Chem.MolFromSmiles(smiles)
n_atoms = mol.GetNumAtoms()
for atom in mol.GetAtoms():
fatoms.append( atom_features(atom) )
in_bonds.append([])
for bond in mol.GetBonds():
a1 = bond.GetBeginAtom()
a2 = bond.GetEndAtom()
x = a1.GetIdx() + total_atoms
y = a2.GetIdx() + total_atoms
b = len(all_bonds)
all_bonds.append((x,y))
fbonds.append( torch.cat([fatoms[x], bond_features(bond)], 0) )
in_bonds[y].append(b)
b = len(all_bonds)
all_bonds.append((y,x))
fbonds.append( torch.cat([fatoms[y], bond_features(bond)], 0) )
in_bonds[x].append(b)
scope.append((total_atoms,n_atoms))
total_atoms += n_atoms
total_bonds = len(all_bonds)
fatoms = torch.stack(fatoms, 0)
fbonds = torch.stack(fbonds, 0)
agraph = torch.zeros(total_atoms,MAX_NB).long()
bgraph = torch.zeros(total_bonds,MAX_NB).long()
for a in range(total_atoms):
for i,b in enumerate(in_bonds[a]):
agraph[a,i] = b
for b1 in range(1, total_bonds):
x,y = all_bonds[b1]
for i,b2 in enumerate(in_bonds[x]):
if all_bonds[b2][0] != y:
bgraph[b1,i] = b2
return (fatoms, fbonds, agraph, bgraph, scope)
| 4,469 | 34.47619 | 171 | py |
FastJTNNpy3 | FastJTNNpy3-master/fast_jtnn/jtnn_vae.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .mol_tree import Vocab, MolTree
from .nnutils import create_var, flatten_tensor, avg_pool
from .jtnn_enc import JTNNEncoder
from .jtnn_dec import JTNNDecoder
from .mpn import MPN
from .jtmpn import JTMPN
from .datautils import tensorize
from .chemutils import enum_assemble, set_atommap, copy_edit_mol, attach_mols
import rdkit
import rdkit.Chem as Chem
import copy, math
class JTNNVAE(nn.Module):
def __init__(self, vocab, hidden_size, latent_size, depthT, depthG):
super(JTNNVAE, self).__init__()
self.vocab = vocab
self.hidden_size = hidden_size
self.latent_size = latent_size = latent_size // 2 #Tree and Mol has two vectors
self.jtnn = JTNNEncoder(hidden_size, depthT, nn.Embedding(vocab.size(), hidden_size))
self.decoder = JTNNDecoder(vocab, hidden_size, latent_size, nn.Embedding(vocab.size(), hidden_size))
self.jtmpn = JTMPN(hidden_size, depthG)
self.mpn = MPN(hidden_size, depthG)
self.A_assm = nn.Linear(latent_size, hidden_size, bias=False)
self.assm_loss = nn.CrossEntropyLoss(size_average=False)
self.T_mean = nn.Linear(hidden_size, latent_size)
self.T_var = nn.Linear(hidden_size, latent_size)
self.G_mean = nn.Linear(hidden_size, latent_size)
self.G_var = nn.Linear(hidden_size, latent_size)
def encode(self, jtenc_holder, mpn_holder):
tree_vecs, tree_mess = self.jtnn(*jtenc_holder)
mol_vecs = self.mpn(*mpn_holder)
return tree_vecs, tree_mess, mol_vecs
def encode_from_smiles(self, smiles_list):
tree_batch = [MolTree(s) for s in smiles_list]
_, jtenc_holder, mpn_holder = tensorize(tree_batch, self.vocab, assm=False)
tree_vecs, _, mol_vecs = self.encode(jtenc_holder, mpn_holder)
return torch.cat([tree_vecs, mol_vecs], dim=-1)
def encode_latent(self, jtenc_holder, mpn_holder):
tree_vecs, _ = self.jtnn(*jtenc_holder)
mol_vecs = self.mpn(*mpn_holder)
tree_mean = self.T_mean(tree_vecs)
mol_mean = self.G_mean(mol_vecs)
tree_var = -torch.abs(self.T_var(tree_vecs))
mol_var = -torch.abs(self.G_var(mol_vecs))
return torch.cat([tree_mean, mol_mean], dim=1), torch.cat([tree_var, mol_var], dim=1)
def rsample(self, z_vecs, W_mean, W_var):
batch_size = z_vecs.size(0)
z_mean = W_mean(z_vecs)
z_log_var = -torch.abs(W_var(z_vecs)) #Following Mueller et al.
kl_loss = -0.5 * torch.sum(1.0 + z_log_var - z_mean * z_mean - torch.exp(z_log_var)) / batch_size
epsilon = create_var(torch.randn_like(z_mean))
z_vecs = z_mean + torch.exp(z_log_var / 2) * epsilon
return z_vecs, kl_loss
def sample_prior(self, prob_decode=False):
z_tree = torch.randn(1, self.latent_size).cuda()
z_mol = torch.randn(1, self.latent_size).cuda()
return self.decode(z_tree, z_mol, prob_decode)
def forward(self, x_batch, beta):
x_batch, x_jtenc_holder, x_mpn_holder, x_jtmpn_holder = x_batch
x_tree_vecs, x_tree_mess, x_mol_vecs = self.encode(x_jtenc_holder, x_mpn_holder)
z_tree_vecs,tree_kl = self.rsample(x_tree_vecs, self.T_mean, self.T_var)
z_mol_vecs,mol_kl = self.rsample(x_mol_vecs, self.G_mean, self.G_var)
kl_div = tree_kl + mol_kl
word_loss, topo_loss, word_acc, topo_acc = self.decoder(x_batch, z_tree_vecs)
assm_loss, assm_acc = self.assm(x_batch, x_jtmpn_holder, z_mol_vecs, x_tree_mess)
return word_loss + topo_loss + assm_loss + beta * kl_div, kl_div.item(), word_acc, topo_acc, assm_acc
def assm(self, mol_batch, jtmpn_holder, x_mol_vecs, x_tree_mess):
jtmpn_holder,batch_idx = jtmpn_holder
fatoms,fbonds,agraph,bgraph,scope = jtmpn_holder
batch_idx = create_var(batch_idx)
cand_vecs = self.jtmpn(fatoms, fbonds, agraph, bgraph, scope, x_tree_mess)
x_mol_vecs = x_mol_vecs.index_select(0, batch_idx)
x_mol_vecs = self.A_assm(x_mol_vecs) #bilinear
scores = torch.bmm(
x_mol_vecs.unsqueeze(1),
cand_vecs.unsqueeze(-1)
).squeeze()
cnt,tot,acc = 0,0,0
all_loss = []
for i,mol_tree in enumerate(mol_batch):
comp_nodes = [node for node in mol_tree.nodes if len(node.cands) > 1 and not node.is_leaf]
cnt += len(comp_nodes)
for node in comp_nodes:
label = node.cands.index(node.label)
ncand = len(node.cands)
cur_score = scores.narrow(0, tot, ncand)
tot += ncand
if cur_score.data[label] >= cur_score.max().item():
acc += 1
label = create_var(torch.LongTensor([label]))
all_loss.append( self.assm_loss(cur_score.view(1,-1), label) )
all_loss = sum(all_loss) / len(mol_batch)
return all_loss, acc * 1.0 / cnt
def decode(self, x_tree_vecs, x_mol_vecs, prob_decode):
#currently do not support batch decoding
assert x_tree_vecs.size(0) == 1 and x_mol_vecs.size(0) == 1
pred_root,pred_nodes = self.decoder.decode(x_tree_vecs, prob_decode)
if len(pred_nodes) == 0: return None
elif len(pred_nodes) == 1: return pred_root.smiles
#Mark nid & is_leaf & atommap
for i,node in enumerate(pred_nodes):
node.nid = i + 1
node.is_leaf = (len(node.neighbors) == 1)
if len(node.neighbors) > 1:
set_atommap(node.mol, node.nid)
scope = [(0, len(pred_nodes))]
jtenc_holder,mess_dict = JTNNEncoder.tensorize_nodes(pred_nodes, scope)
_,tree_mess = self.jtnn(*jtenc_holder)
tree_mess = (tree_mess, mess_dict) #Important: tree_mess is a matrix, mess_dict is a python dict
x_mol_vecs = self.A_assm(x_mol_vecs).squeeze() #bilinear
cur_mol = copy_edit_mol(pred_root.mol)
global_amap = [{}] + [{} for node in pred_nodes]
global_amap[1] = {atom.GetIdx():atom.GetIdx() for atom in cur_mol.GetAtoms()}
cur_mol,_ = self.dfs_assemble(tree_mess, x_mol_vecs, pred_nodes, cur_mol, global_amap, [], pred_root, None, prob_decode, check_aroma=True)
if cur_mol is None:
cur_mol = copy_edit_mol(pred_root.mol)
global_amap = [{}] + [{} for node in pred_nodes]
global_amap[1] = {atom.GetIdx():atom.GetIdx() for atom in cur_mol.GetAtoms()}
cur_mol,pre_mol = self.dfs_assemble(tree_mess, x_mol_vecs, pred_nodes, cur_mol, global_amap, [], pred_root, None, prob_decode, check_aroma=False)
if cur_mol is None: cur_mol = pre_mol
if cur_mol is None:
return None
cur_mol = cur_mol.GetMol()
set_atommap(cur_mol)
cur_mol = Chem.MolFromSmiles(Chem.MolToSmiles(cur_mol))
return Chem.MolToSmiles(cur_mol) if cur_mol is not None else None
def dfs_assemble(self, y_tree_mess, x_mol_vecs, all_nodes, cur_mol, global_amap, fa_amap, cur_node, fa_node, prob_decode, check_aroma):
fa_nid = fa_node.nid if fa_node is not None else -1
prev_nodes = [fa_node] if fa_node is not None else []
children = [nei for nei in cur_node.neighbors if nei.nid != fa_nid]
neighbors = [nei for nei in children if nei.mol.GetNumAtoms() > 1]
neighbors = sorted(neighbors, key=lambda x:x.mol.GetNumAtoms(), reverse=True)
singletons = [nei for nei in children if nei.mol.GetNumAtoms() == 1]
neighbors = singletons + neighbors
cur_amap = [(fa_nid,a2,a1) for nid,a1,a2 in fa_amap if nid == cur_node.nid]
cands,aroma_score = enum_assemble(cur_node, neighbors, prev_nodes, cur_amap)
if len(cands) == 0 or (sum(aroma_score) < 0 and check_aroma):
return None, cur_mol
cand_smiles,cand_amap = list(zip(*cands))
aroma_score = torch.Tensor(aroma_score).cuda()
cands = [(smiles, all_nodes, cur_node) for smiles in cand_smiles]
if len(cands) > 1:
jtmpn_holder = JTMPN.tensorize(cands, y_tree_mess[1])
fatoms,fbonds,agraph,bgraph,scope = jtmpn_holder
cand_vecs = self.jtmpn(fatoms, fbonds, agraph, bgraph, scope, y_tree_mess[0])
scores = torch.mv(cand_vecs, x_mol_vecs) + aroma_score
else:
scores = torch.Tensor([1.0])
if prob_decode:
probs = F.softmax(scores.view(1,-1), dim=1).squeeze() + 1e-7 #prevent prob = 0
cand_idx = torch.multinomial(probs, probs.numel())
else:
_,cand_idx = torch.sort(scores, descending=True)
backup_mol = Chem.RWMol(cur_mol)
pre_mol = cur_mol
for i in range(cand_idx.numel()):
cur_mol = Chem.RWMol(backup_mol)
pred_amap = cand_amap[cand_idx[i].item()]
new_global_amap = copy.deepcopy(global_amap)
for nei_id,ctr_atom,nei_atom in pred_amap:
if nei_id == fa_nid:
continue
new_global_amap[nei_id][nei_atom] = new_global_amap[cur_node.nid][ctr_atom]
cur_mol = attach_mols(cur_mol, children, [], new_global_amap) #father is already attached
new_mol = cur_mol.GetMol()
new_mol = Chem.MolFromSmiles(Chem.MolToSmiles(new_mol))
if new_mol is None: continue
has_error = False
for nei_node in children:
if nei_node.is_leaf: continue
tmp_mol, tmp_mol2 = self.dfs_assemble(y_tree_mess, x_mol_vecs, all_nodes, cur_mol, new_global_amap, pred_amap, nei_node, cur_node, prob_decode, check_aroma)
if tmp_mol is None:
has_error = True
if i == 0: pre_mol = tmp_mol2
break
cur_mol = tmp_mol
if not has_error: return cur_mol, cur_mol
return None, pre_mol
| 10,015 | 43.318584 | 172 | py |
FastJTNNpy3 | FastJTNNpy3-master/fast_jtnn/jtprop_vae.py | import torch
import torch.nn as nn
from .mol_tree import Vocab, MolTree
from .nnutils import create_var
from .jtnn_enc import JTNNEncoder
from .jtnn_dec import JTNNDecoder
from .mpn import MPN, mol2graph
from .jtmpn import JTMPN
from .chemutils import enum_assemble, set_atommap, copy_edit_mol, attach_mols, atom_equal, decode_stereo
import rdkit
import rdkit.Chem as Chem
from rdkit import DataStructs
from rdkit.Chem import AllChem
import copy, math
def set_batch_nodeID(mol_batch, vocab):
tot = 0
for mol_tree in mol_batch:
for node in mol_tree.nodes:
node.idx = tot
node.wid = vocab.get_index(node.smiles)
tot += 1
class JTPropVAE(nn.Module):
def __init__(self, vocab, hidden_size, latent_size, depth):
super(JTPropVAE, self).__init__()
self.vocab = vocab
self.hidden_size = hidden_size
self.latent_size = latent_size
self.depth = depth
self.embedding = nn.Embedding(vocab.size(), hidden_size)
self.jtnn = JTNNEncoder(vocab, hidden_size, self.embedding)
self.jtmpn = JTMPN(hidden_size, depth)
self.mpn = MPN(hidden_size, depth)
self.decoder = JTNNDecoder(vocab, hidden_size, latent_size / 2, self.embedding)
self.T_mean = nn.Linear(hidden_size, latent_size / 2)
self.T_var = nn.Linear(hidden_size, latent_size / 2)
self.G_mean = nn.Linear(hidden_size, latent_size / 2)
self.G_var = nn.Linear(hidden_size, latent_size / 2)
self.propNN = nn.Sequential(
nn.Linear(self.latent_size, self.hidden_size),
nn.Tanh(),
nn.Linear(self.hidden_size, 1)
)
self.prop_loss = nn.MSELoss()
self.assm_loss = nn.CrossEntropyLoss(size_average=False)
self.stereo_loss = nn.CrossEntropyLoss(size_average=False)
def encode(self, mol_batch):
set_batch_nodeID(mol_batch, self.vocab)
root_batch = [mol_tree.nodes[0] for mol_tree in mol_batch]
tree_mess,tree_vec = self.jtnn(root_batch)
smiles_batch = [mol_tree.smiles for mol_tree in mol_batch]
mol_vec = self.mpn(mol2graph(smiles_batch))
return tree_mess, tree_vec, mol_vec
def encode_latent_mean(self, smiles_list):
mol_batch = [MolTree(s) for s in smiles_list]
for mol_tree in mol_batch:
mol_tree.recover()
_, tree_vec, mol_vec = self.encode(mol_batch)
tree_mean = self.T_mean(tree_vec)
mol_mean = self.G_mean(mol_vec)
return torch.cat([tree_mean,mol_mean], dim=1)
def forward(self, mol_batch, beta=0):
batch_size = len(mol_batch)
mol_batch, prop_batch = list(zip(*mol_batch))
tree_mess, tree_vec, mol_vec = self.encode(mol_batch)
tree_mean = self.T_mean(tree_vec)
tree_log_var = -torch.abs(self.T_var(tree_vec)) #Following Mueller et al.
mol_mean = self.G_mean(mol_vec)
mol_log_var = -torch.abs(self.G_var(mol_vec)) #Following Mueller et al.
z_mean = torch.cat([tree_mean,mol_mean], dim=1)
z_log_var = torch.cat([tree_log_var,mol_log_var], dim=1)
kl_loss = -0.5 * torch.sum(1.0 + z_log_var - z_mean * z_mean - torch.exp(z_log_var)) / batch_size
epsilon = create_var(torch.randn(batch_size, self.latent_size / 2), False)
tree_vec = tree_mean + torch.exp(tree_log_var / 2) * epsilon
epsilon = create_var(torch.randn(batch_size, self.latent_size / 2), False)
mol_vec = mol_mean + torch.exp(mol_log_var / 2) * epsilon
word_loss, topo_loss, word_acc, topo_acc = self.decoder(mol_batch, tree_vec)
assm_loss, assm_acc = self.assm(mol_batch, mol_vec, tree_mess)
stereo_loss, stereo_acc = self.stereo(mol_batch, mol_vec)
all_vec = torch.cat([tree_vec, mol_vec], dim=1)
prop_label = create_var(torch.Tensor(prop_batch))
prop_loss = self.prop_loss(self.propNN(all_vec).squeeze(), prop_label)
loss = word_loss + topo_loss + assm_loss + 2 * stereo_loss + beta * kl_loss + prop_loss
return loss, kl_loss.data[0], word_acc, topo_acc, assm_acc, stereo_acc, prop_loss.data[0]
def assm(self, mol_batch, mol_vec, tree_mess):
cands = []
batch_idx = []
for i,mol_tree in enumerate(mol_batch):
for node in mol_tree.nodes:
#Leaf node's attachment is determined by neighboring node's attachment
if node.is_leaf or len(node.cands) == 1: continue
cands.extend( [(cand, mol_tree.nodes, node) for cand in node.cand_mols] )
batch_idx.extend([i] * len(node.cands))
cand_vec = self.jtmpn(cands, tree_mess)
cand_vec = self.G_mean(cand_vec)
batch_idx = create_var(torch.LongTensor(batch_idx))
mol_vec = mol_vec.index_select(0, batch_idx)
mol_vec = mol_vec.view(-1, 1, self.latent_size / 2)
cand_vec = cand_vec.view(-1, self.latent_size / 2, 1)
scores = torch.bmm(mol_vec, cand_vec).squeeze()
cnt,tot,acc = 0,0,0
all_loss = []
for i,mol_tree in enumerate(mol_batch):
comp_nodes = [node for node in mol_tree.nodes if len(node.cands) > 1 and not node.is_leaf]
cnt += len(comp_nodes)
for node in comp_nodes:
label = node.cands.index(node.label)
ncand = len(node.cands)
cur_score = scores.narrow(0, tot, ncand)
tot += ncand
if cur_score.data[label] >= cur_score.max().data[0]:
acc += 1
label = create_var(torch.LongTensor([label]))
all_loss.append( self.assm_loss(cur_score.view(1,-1), label) )
all_loss = sum(all_loss) / len(mol_batch)
return all_loss, acc * 1.0 / cnt
def stereo(self, mol_batch, mol_vec):
stereo_cands,batch_idx = [],[]
labels = []
for i,mol_tree in enumerate(mol_batch):
cands = mol_tree.stereo_cands
if len(cands) == 1: continue
if mol_tree.smiles3D not in cands:
cands.append(mol_tree.smiles3D)
stereo_cands.extend(cands)
batch_idx.extend([i] * len(cands))
labels.append( (cands.index(mol_tree.smiles3D), len(cands)) )
if len(labels) == 0:
return create_var(torch.zeros(1)), 1.0
batch_idx = create_var(torch.LongTensor(batch_idx))
stereo_cands = self.mpn(mol2graph(stereo_cands))
stereo_cands = self.G_mean(stereo_cands)
stereo_labels = mol_vec.index_select(0, batch_idx)
scores = torch.nn.CosineSimilarity()(stereo_cands, stereo_labels)
st,acc = 0,0
all_loss = []
for label,le in labels:
cur_scores = scores.narrow(0, st, le)
if cur_scores.data[label] >= cur_scores.max().data[0]:
acc += 1
label = create_var(torch.LongTensor([label]))
all_loss.append( self.stereo_loss(cur_scores.view(1,-1), label) )
st += le
all_loss = sum(all_loss) / len(labels)
return all_loss, acc * 1.0 / len(labels)
def reconstruct(self, smiles, prob_decode=False):
mol_tree = MolTree(smiles)
mol_tree.recover()
_,tree_vec,mol_vec = self.encode([mol_tree])
tree_mean = self.T_mean(tree_vec)
tree_log_var = -torch.abs(self.T_var(tree_vec)) #Following Mueller et al.
mol_mean = self.G_mean(mol_vec)
mol_log_var = -torch.abs(self.G_var(mol_vec)) #Following Mueller et al.
epsilon = create_var(torch.randn(1, self.latent_size / 2), False)
tree_vec = tree_mean + torch.exp(tree_log_var / 2) * epsilon
epsilon = create_var(torch.randn(1, self.latent_size / 2), False)
mol_vec = mol_mean + torch.exp(mol_log_var / 2) * epsilon
return self.decode(tree_vec, mol_vec, prob_decode)
def sample_prior(self, prob_decode=False):
tree_vec = create_var(torch.randn(1, self.latent_size / 2), False)
mol_vec = create_var(torch.randn(1, self.latent_size / 2), False)
return self.decode(tree_vec, mol_vec, prob_decode)
def optimize(self, smiles, sim_cutoff, lr=2.0, num_iter=20):
mol_tree = MolTree(smiles)
mol_tree.recover()
_,tree_vec,mol_vec = self.encode([mol_tree])
mol = Chem.MolFromSmiles(smiles)
fp1 = AllChem.GetMorganFingerprint(mol, 2)
tree_mean = self.T_mean(tree_vec)
tree_log_var = -torch.abs(self.T_var(tree_vec)) #Following Mueller et al.
mol_mean = self.G_mean(mol_vec)
mol_log_var = -torch.abs(self.G_var(mol_vec)) #Following Mueller et al.
mean = torch.cat([tree_mean, mol_mean], dim=1)
log_var = torch.cat([tree_log_var, mol_log_var], dim=1)
cur_vec = create_var(mean.data, True)
visited = []
for step in range(num_iter):
prop_val = self.propNN(cur_vec).squeeze()
grad = torch.autograd.grad(prop_val, cur_vec)[0]
cur_vec = cur_vec.data + lr * grad.data
cur_vec = create_var(cur_vec, True)
visited.append(cur_vec)
l,r = 0, num_iter - 1
while l < r - 1:
mid = (l + r) / 2
new_vec = visited[mid]
tree_vec,mol_vec = torch.chunk(new_vec, 2, dim=1)
new_smiles = self.decode(tree_vec, mol_vec, prob_decode=False)
if new_smiles is None:
r = mid - 1
continue
new_mol = Chem.MolFromSmiles(new_smiles)
fp2 = AllChem.GetMorganFingerprint(new_mol, 2)
sim = DataStructs.TanimotoSimilarity(fp1, fp2)
if sim < sim_cutoff:
r = mid - 1
else:
l = mid
"""
best_vec = visited[0]
for new_vec in visited:
tree_vec,mol_vec = torch.chunk(new_vec, 2, dim=1)
new_smiles = self.decode(tree_vec, mol_vec, prob_decode=False)
if new_smiles is None: continue
new_mol = Chem.MolFromSmiles(new_smiles)
fp2 = AllChem.GetMorganFingerprint(new_mol, 2)
sim = DataStructs.TanimotoSimilarity(fp1, fp2)
if sim >= sim_cutoff:
best_vec = new_vec
"""
tree_vec,mol_vec = torch.chunk(visited[l], 2, dim=1)
#tree_vec,mol_vec = torch.chunk(best_vec, 2, dim=1)
new_smiles = self.decode(tree_vec, mol_vec, prob_decode=False)
if new_smiles is None:
return smiles, 1.0
new_mol = Chem.MolFromSmiles(new_smiles)
fp2 = AllChem.GetMorganFingerprint(new_mol, 2)
sim = DataStructs.TanimotoSimilarity(fp1, fp2)
if sim >= sim_cutoff:
return new_smiles, sim
else:
return smiles, 1.0
def decode(self, tree_vec, mol_vec, prob_decode):
pred_root,pred_nodes = self.decoder.decode(tree_vec, prob_decode)
#Mark nid & is_leaf & atommap
for i,node in enumerate(pred_nodes):
node.nid = i + 1
node.is_leaf = (len(node.neighbors) == 1)
if len(node.neighbors) > 1:
set_atommap(node.mol, node.nid)
tree_mess = self.jtnn([pred_root])[0]
cur_mol = copy_edit_mol(pred_root.mol)
global_amap = [{}] + [{} for node in pred_nodes]
global_amap[1] = {atom.GetIdx():atom.GetIdx() for atom in cur_mol.GetAtoms()}
cur_mol = self.dfs_assemble(tree_mess, mol_vec, pred_nodes, cur_mol, global_amap, [], pred_root, None, prob_decode)
if cur_mol is None:
return None
cur_mol = cur_mol.GetMol()
set_atommap(cur_mol)
cur_mol = Chem.MolFromSmiles(Chem.MolToSmiles(cur_mol))
if cur_mol is None: return None
smiles2D = Chem.MolToSmiles(cur_mol)
stereo_cands = decode_stereo(smiles2D)
if len(stereo_cands) == 1:
return stereo_cands[0]
stereo_vecs = self.mpn(mol2graph(stereo_cands))
stereo_vecs = self.G_mean(stereo_vecs)
scores = nn.CosineSimilarity()(stereo_vecs, mol_vec)
_,max_id = scores.max(dim=0)
return stereo_cands[max_id.data[0]]
def dfs_assemble(self, tree_mess, mol_vec, all_nodes, cur_mol, global_amap, fa_amap, cur_node, fa_node, prob_decode):
fa_nid = fa_node.nid if fa_node is not None else -1
prev_nodes = [fa_node] if fa_node is not None else []
children = [nei for nei in cur_node.neighbors if nei.nid != fa_nid]
neighbors = [nei for nei in children if nei.mol.GetNumAtoms() > 1]
neighbors = sorted(neighbors, key=lambda x:x.mol.GetNumAtoms(), reverse=True)
singletons = [nei for nei in children if nei.mol.GetNumAtoms() == 1]
neighbors = singletons + neighbors
cur_amap = [(fa_nid,a2,a1) for nid,a1,a2 in fa_amap if nid == cur_node.nid]
cands = enum_assemble(cur_node, neighbors, prev_nodes, cur_amap)
if len(cands) == 0:
return None
cand_smiles,cand_mols,cand_amap = list(zip(*cands))
cands = [(candmol, all_nodes, cur_node) for candmol in cand_mols]
cand_vecs = self.jtmpn(cands, tree_mess)
cand_vecs = self.G_mean(cand_vecs)
mol_vec = mol_vec.squeeze()
scores = torch.mv(cand_vecs, mol_vec) * 20
if prob_decode:
probs = nn.Softmax()(scores.view(1,-1)).squeeze() + 1e-5 #prevent prob = 0
cand_idx = torch.multinomial(probs, probs.numel())
else:
_,cand_idx = torch.sort(scores, descending=True)
backup_mol = Chem.RWMol(cur_mol)
for i in range(cand_idx.numel()):
cur_mol = Chem.RWMol(backup_mol)
pred_amap = cand_amap[cand_idx[i].data[0]]
new_global_amap = copy.deepcopy(global_amap)
for nei_id,ctr_atom,nei_atom in pred_amap:
if nei_id == fa_nid:
continue
new_global_amap[nei_id][nei_atom] = new_global_amap[cur_node.nid][ctr_atom]
cur_mol = attach_mols(cur_mol, children, [], new_global_amap) #father is already attached
new_mol = cur_mol.GetMol()
new_mol = Chem.MolFromSmiles(Chem.MolToSmiles(new_mol))
if new_mol is None: continue
result = True
for nei_node in children:
if nei_node.is_leaf: continue
cur_mol = self.dfs_assemble(tree_mess, mol_vec, all_nodes, cur_mol, new_global_amap, pred_amap, nei_node, cur_node, prob_decode)
if cur_mol is None:
result = False
break
if result: return cur_mol
return None
| 14,781 | 40.757062 | 144 | py |
FastJTNNpy3 | FastJTNNpy3-master/fast_jtnn/jtmpn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .nnutils import create_var, index_select_ND
from .chemutils import get_mol
import rdkit.Chem as Chem
ELEM_LIST = ['C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl', 'Br', 'Mg', 'Na', 'Ca', 'Fe', 'Al', 'I', 'B', 'K', 'Se', 'Zn', 'H', 'Cu', 'Mn', 'unknown']
ATOM_FDIM = len(ELEM_LIST) + 6 + 5 + 1
BOND_FDIM = 5
MAX_NB = 15
def onek_encoding_unk(x, allowable_set):
if x not in allowable_set:
x = allowable_set[-1]
return [x == s for s in allowable_set]
def atom_features(atom):
return torch.Tensor(onek_encoding_unk(atom.GetSymbol(), ELEM_LIST)
+ onek_encoding_unk(atom.GetDegree(), [0,1,2,3,4,5])
+ onek_encoding_unk(atom.GetFormalCharge(), [-1,-2,1,2,0])
+ [atom.GetIsAromatic()])
def bond_features(bond):
bt = bond.GetBondType()
return torch.Tensor([bt == Chem.rdchem.BondType.SINGLE, bt == Chem.rdchem.BondType.DOUBLE, bt == Chem.rdchem.BondType.TRIPLE, bt == Chem.rdchem.BondType.AROMATIC, bond.IsInRing()])
class JTMPN(nn.Module):
def __init__(self, hidden_size, depth):
super(JTMPN, self).__init__()
self.hidden_size = hidden_size
self.depth = depth
self.W_i = nn.Linear(ATOM_FDIM + BOND_FDIM, hidden_size, bias=False)
self.W_h = nn.Linear(hidden_size, hidden_size, bias=False)
self.W_o = nn.Linear(ATOM_FDIM + hidden_size, hidden_size)
def forward(self, fatoms, fbonds, agraph, bgraph, scope, tree_message): #tree_message[0] == vec(0)
fatoms = create_var(fatoms)
fbonds = create_var(fbonds)
agraph = create_var(agraph)
bgraph = create_var(bgraph)
binput = self.W_i(fbonds)
graph_message = F.relu(binput)
for i in range(self.depth - 1):
message = torch.cat([tree_message,graph_message], dim=0)
nei_message = index_select_ND(message, 0, bgraph)
nei_message = nei_message.sum(dim=1) #assuming tree_message[0] == vec(0)
nei_message = self.W_h(nei_message)
graph_message = F.relu(binput + nei_message)
message = torch.cat([tree_message,graph_message], dim=0)
nei_message = index_select_ND(message, 0, agraph)
nei_message = nei_message.sum(dim=1)
ainput = torch.cat([fatoms, nei_message], dim=1)
atom_hiddens = F.relu(self.W_o(ainput))
mol_vecs = []
for st,le in scope:
mol_vec = atom_hiddens.narrow(0, st, le).sum(dim=0) / le
mol_vecs.append(mol_vec)
mol_vecs = torch.stack(mol_vecs, dim=0)
return mol_vecs
@staticmethod
def tensorize(cand_batch, mess_dict):
fatoms,fbonds = [],[]
in_bonds,all_bonds = [],[]
total_atoms = 0
total_mess = len(mess_dict) + 1 #must include vec(0) padding
scope = []
for smiles,all_nodes,ctr_node in cand_batch:
mol = Chem.MolFromSmiles(smiles)
Chem.Kekulize(mol) #The original jtnn version kekulizes. Need to revisit why it is necessary
n_atoms = mol.GetNumAtoms()
ctr_bid = ctr_node.idx
for atom in mol.GetAtoms():
fatoms.append( atom_features(atom) )
in_bonds.append([])
for bond in mol.GetBonds():
a1 = bond.GetBeginAtom()
a2 = bond.GetEndAtom()
x = a1.GetIdx() + total_atoms
y = a2.GetIdx() + total_atoms
#Here x_nid,y_nid could be 0
x_nid,y_nid = a1.GetAtomMapNum(),a2.GetAtomMapNum()
x_bid = all_nodes[x_nid - 1].idx if x_nid > 0 else -1
y_bid = all_nodes[y_nid - 1].idx if y_nid > 0 else -1
bfeature = bond_features(bond)
b = total_mess + len(all_bonds) #bond idx offseted by total_mess
all_bonds.append((x,y))
fbonds.append( torch.cat([fatoms[x], bfeature], 0) )
in_bonds[y].append(b)
b = total_mess + len(all_bonds)
all_bonds.append((y,x))
fbonds.append( torch.cat([fatoms[y], bfeature], 0) )
in_bonds[x].append(b)
if x_bid >= 0 and y_bid >= 0 and x_bid != y_bid:
if (x_bid,y_bid) in mess_dict:
mess_idx = mess_dict[(x_bid,y_bid)]
in_bonds[y].append(mess_idx)
if (y_bid,x_bid) in mess_dict:
mess_idx = mess_dict[(y_bid,x_bid)]
in_bonds[x].append(mess_idx)
scope.append((total_atoms,n_atoms))
total_atoms += n_atoms
total_bonds = len(all_bonds)
fatoms = torch.stack(fatoms, 0)
fbonds = torch.stack(fbonds, 0)
agraph = torch.zeros(total_atoms,MAX_NB).long()
bgraph = torch.zeros(total_bonds,MAX_NB).long()
for a in range(total_atoms):
for i,b in enumerate(in_bonds[a]):
agraph[a,i] = b
for b1 in range(total_bonds):
x,y = all_bonds[b1]
for i,b2 in enumerate(in_bonds[x]): #b2 is offseted by total_mess
if b2 < total_mess or all_bonds[b2-total_mess][0] != y:
bgraph[b1,i] = b2
return (fatoms, fbonds, agraph, bgraph, scope)
| 5,387 | 37.76259 | 184 | py |
FastJTNNpy3 | FastJTNNpy3-master/fast_jtnn/jtnn_dec.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .mol_tree import Vocab, MolTree, MolTreeNode
from .nnutils import create_var, GRU
from .chemutils import enum_assemble, set_atommap
import copy
MAX_NB = 15
MAX_DECODE_LEN = 100
class JTNNDecoder(nn.Module):
def __init__(self, vocab, hidden_size, latent_size, embedding):
super(JTNNDecoder, self).__init__()
self.hidden_size = hidden_size
self.vocab_size = vocab.size()
self.vocab = vocab
self.embedding = embedding
#GRU Weights
self.W_z = nn.Linear(2 * hidden_size, hidden_size)
self.U_r = nn.Linear(hidden_size, hidden_size, bias=False)
self.W_r = nn.Linear(hidden_size, hidden_size)
self.W_h = nn.Linear(2 * hidden_size, hidden_size)
#Word Prediction Weights
self.W = nn.Linear(hidden_size + latent_size, hidden_size)
#Stop Prediction Weights
self.U = nn.Linear(hidden_size + latent_size, hidden_size)
self.U_i = nn.Linear(2 * hidden_size, hidden_size)
#Output Weights
self.W_o = nn.Linear(hidden_size, self.vocab_size)
self.U_o = nn.Linear(hidden_size, 1)
#Loss Functions
self.pred_loss = nn.CrossEntropyLoss(size_average=False)
self.stop_loss = nn.BCEWithLogitsLoss(size_average=False)
def aggregate(self, hiddens, contexts, x_tree_vecs, mode):
if mode == 'word':
V, V_o = self.W, self.W_o
elif mode == 'stop':
V, V_o = self.U, self.U_o
else:
raise ValueError('aggregate mode is wrong')
tree_contexts = x_tree_vecs.index_select(0, contexts)
input_vec = torch.cat([hiddens, tree_contexts], dim=-1)
output_vec = F.relu( V(input_vec) )
return V_o(output_vec)
def forward(self, mol_batch, x_tree_vecs):
pred_hiddens,pred_contexts,pred_targets = [],[],[]
stop_hiddens,stop_contexts,stop_targets = [],[],[]
traces = []
for mol_tree in mol_batch:
s = []
dfs(s, mol_tree.nodes[0], -1)
traces.append(s)
for node in mol_tree.nodes:
node.neighbors = []
#Predict Root
batch_size = len(mol_batch)
pred_hiddens.append(create_var(torch.zeros(len(mol_batch),self.hidden_size)))
pred_targets.extend([mol_tree.nodes[0].wid for mol_tree in mol_batch])
pred_contexts.append( create_var( torch.LongTensor(list(range(batch_size))) ) )
max_iter = max([len(tr) for tr in traces])
padding = create_var(torch.zeros(self.hidden_size), False)
h = {}
for t in range(max_iter):
prop_list = []
batch_list = []
for i,plist in enumerate(traces):
if t < len(plist):
prop_list.append(plist[t])
batch_list.append(i)
cur_x = []
cur_h_nei,cur_o_nei = [],[]
for node_x, real_y, _ in prop_list:
#Neighbors for message passing (target not included)
cur_nei = [h[(node_y.idx,node_x.idx)] for node_y in node_x.neighbors if node_y.idx != real_y.idx]
pad_len = MAX_NB - len(cur_nei)
cur_h_nei.extend(cur_nei)
cur_h_nei.extend([padding] * pad_len)
#Neighbors for stop prediction (all neighbors)
cur_nei = [h[(node_y.idx,node_x.idx)] for node_y in node_x.neighbors]
pad_len = MAX_NB - len(cur_nei)
cur_o_nei.extend(cur_nei)
cur_o_nei.extend([padding] * pad_len)
#Current clique embedding
cur_x.append(node_x.wid)
#Clique embedding
cur_x = create_var(torch.LongTensor(cur_x))
cur_x = self.embedding(cur_x)
#Message passing
cur_h_nei = torch.stack(cur_h_nei, dim=0).view(-1,MAX_NB,self.hidden_size)
new_h = GRU(cur_x, cur_h_nei, self.W_z, self.W_r, self.U_r, self.W_h)
#Node Aggregate
cur_o_nei = torch.stack(cur_o_nei, dim=0).view(-1,MAX_NB,self.hidden_size)
cur_o = cur_o_nei.sum(dim=1)
#Gather targets
pred_target,pred_list = [],[]
stop_target = []
for i,m in enumerate(prop_list):
node_x,node_y,direction = m
x,y = node_x.idx,node_y.idx
h[(x,y)] = new_h[i]
node_y.neighbors.append(node_x)
if direction == 1:
pred_target.append(node_y.wid)
pred_list.append(i)
stop_target.append(direction)
#Hidden states for stop prediction
cur_batch = create_var(torch.LongTensor(batch_list))
stop_hidden = torch.cat([cur_x,cur_o], dim=1)
stop_hiddens.append( stop_hidden )
stop_contexts.append( cur_batch )
stop_targets.extend( stop_target )
#Hidden states for clique prediction
if len(pred_list) > 0:
batch_list = [batch_list[i] for i in pred_list]
cur_batch = create_var(torch.LongTensor(batch_list))
pred_contexts.append( cur_batch )
cur_pred = create_var(torch.LongTensor(pred_list))
pred_hiddens.append( new_h.index_select(0, cur_pred) )
pred_targets.extend( pred_target )
#Last stop at root
cur_x,cur_o_nei = [],[]
for mol_tree in mol_batch:
node_x = mol_tree.nodes[0]
cur_x.append(node_x.wid)
cur_nei = [h[(node_y.idx,node_x.idx)] for node_y in node_x.neighbors]
pad_len = MAX_NB - len(cur_nei)
cur_o_nei.extend(cur_nei)
cur_o_nei.extend([padding] * pad_len)
cur_x = create_var(torch.LongTensor(cur_x))
cur_x = self.embedding(cur_x)
cur_o_nei = torch.stack(cur_o_nei, dim=0).view(-1,MAX_NB,self.hidden_size)
cur_o = cur_o_nei.sum(dim=1)
stop_hidden = torch.cat([cur_x,cur_o], dim=1)
stop_hiddens.append( stop_hidden )
stop_contexts.append( create_var( torch.LongTensor(list(range(batch_size))) ) )
stop_targets.extend( [0] * len(mol_batch) )
#Predict next clique
pred_contexts = torch.cat(pred_contexts, dim=0)
pred_hiddens = torch.cat(pred_hiddens, dim=0)
pred_scores = self.aggregate(pred_hiddens, pred_contexts, x_tree_vecs, 'word')
pred_targets = create_var(torch.LongTensor(pred_targets))
pred_loss = self.pred_loss(pred_scores, pred_targets) / len(mol_batch)
_,preds = torch.max(pred_scores, dim=1)
pred_acc = torch.eq(preds, pred_targets).float()
pred_acc = torch.sum(pred_acc) / pred_targets.nelement()
#Predict stop
stop_contexts = torch.cat(stop_contexts, dim=0)
stop_hiddens = torch.cat(stop_hiddens, dim=0)
stop_hiddens = F.relu( self.U_i(stop_hiddens) )
stop_scores = self.aggregate(stop_hiddens, stop_contexts, x_tree_vecs, 'stop')
stop_scores = stop_scores.squeeze(-1)
stop_targets = create_var(torch.Tensor(stop_targets))
stop_loss = self.stop_loss(stop_scores, stop_targets) / len(mol_batch)
stops = torch.ge(stop_scores, 0).float()
stop_acc = torch.eq(stops, stop_targets).float()
stop_acc = torch.sum(stop_acc) / stop_targets.nelement()
return pred_loss, stop_loss, pred_acc.item(), stop_acc.item()
def decode(self, x_tree_vecs, prob_decode):
assert x_tree_vecs.size(0) == 1
stack = []
init_hiddens = create_var( torch.zeros(1, self.hidden_size) )
zero_pad = create_var(torch.zeros(1,1,self.hidden_size))
contexts = create_var( torch.LongTensor(1).zero_() )
#Root Prediction
root_score = self.aggregate(init_hiddens, contexts, x_tree_vecs, 'word')
_,root_wid = torch.max(root_score, dim=1)
root_wid = root_wid.item()
root = MolTreeNode(self.vocab.get_smiles(root_wid))
root.wid = root_wid
root.idx = 0
stack.append( (root, self.vocab.get_slots(root.wid)) )
all_nodes = [root]
h = {}
for step in range(MAX_DECODE_LEN):
node_x,fa_slot = stack[-1]
cur_h_nei = [ h[(node_y.idx,node_x.idx)] for node_y in node_x.neighbors ]
if len(cur_h_nei) > 0:
cur_h_nei = torch.stack(cur_h_nei, dim=0).view(1,-1,self.hidden_size)
else:
cur_h_nei = zero_pad
cur_x = create_var(torch.LongTensor([node_x.wid]))
cur_x = self.embedding(cur_x)
#Predict stop
cur_h = cur_h_nei.sum(dim=1)
stop_hiddens = torch.cat([cur_x,cur_h], dim=1)
stop_hiddens = F.relu( self.U_i(stop_hiddens) )
stop_score = self.aggregate(stop_hiddens, contexts, x_tree_vecs, 'stop')
if prob_decode:
backtrack = (torch.bernoulli( torch.sigmoid(stop_score) ).item() == 0)
else:
backtrack = (stop_score.item() < 0)
if not backtrack: #Forward: Predict next clique
new_h = GRU(cur_x, cur_h_nei, self.W_z, self.W_r, self.U_r, self.W_h)
pred_score = self.aggregate(new_h, contexts, x_tree_vecs, 'word')
if prob_decode:
sort_wid = torch.multinomial(F.softmax(pred_score, dim=1).squeeze(), 5)
else:
_,sort_wid = torch.sort(pred_score, dim=1, descending=True)
sort_wid = sort_wid.data.squeeze()
next_wid = None
for wid in sort_wid[:5]:
slots = self.vocab.get_slots(wid)
node_y = MolTreeNode(self.vocab.get_smiles(wid))
if have_slots(fa_slot, slots) and can_assemble(node_x, node_y):
next_wid = wid
next_slots = slots
break
if next_wid is None:
backtrack = True #No more children can be added
else:
node_y = MolTreeNode(self.vocab.get_smiles(next_wid))
node_y.wid = next_wid
node_y.idx = len(all_nodes)
node_y.neighbors.append(node_x)
h[(node_x.idx,node_y.idx)] = new_h[0]
stack.append( (node_y,next_slots) )
all_nodes.append(node_y)
if backtrack: #Backtrack, use if instead of else
if len(stack) == 1:
break #At root, terminate
node_fa,_ = stack[-2]
cur_h_nei = [ h[(node_y.idx,node_x.idx)] for node_y in node_x.neighbors if node_y.idx != node_fa.idx ]
if len(cur_h_nei) > 0:
cur_h_nei = torch.stack(cur_h_nei, dim=0).view(1,-1,self.hidden_size)
else:
cur_h_nei = zero_pad
new_h = GRU(cur_x, cur_h_nei, self.W_z, self.W_r, self.U_r, self.W_h)
h[(node_x.idx,node_fa.idx)] = new_h[0]
node_fa.neighbors.append(node_x)
stack.pop()
return root, all_nodes
"""
Helper Functions:
"""
def dfs(stack, x, fa_idx):
for y in x.neighbors:
if y.idx == fa_idx: continue
stack.append( (x,y,1) )
dfs(stack, y, x.idx)
stack.append( (y,x,0) )
def have_slots(fa_slots, ch_slots):
if len(fa_slots) > 2 and len(ch_slots) > 2:
return True
matches = []
for i,s1 in enumerate(fa_slots):
a1,c1,h1 = s1
for j,s2 in enumerate(ch_slots):
a2,c2,h2 = s2
if a1 == a2 and c1 == c2 and (a1 != "C" or h1 + h2 >= 4):
matches.append( (i,j) )
if len(matches) == 0: return False
fa_match,ch_match = list(zip(*matches))
if len(set(fa_match)) == 1 and 1 < len(fa_slots) <= 2: #never remove atom from ring
fa_slots.pop(fa_match[0])
if len(set(ch_match)) == 1 and 1 < len(ch_slots) <= 2: #never remove atom from ring
ch_slots.pop(ch_match[0])
return True
def can_assemble(node_x, node_y):
node_x.nid = 1
node_x.is_leaf = False
set_atommap(node_x.mol, node_x.nid)
neis = node_x.neighbors + [node_y]
for i,nei in enumerate(neis):
nei.nid = i + 2
nei.is_leaf = (len(nei.neighbors) <= 1)
if nei.is_leaf:
set_atommap(nei.mol, 0)
else:
set_atommap(nei.mol, nei.nid)
neighbors = [nei for nei in neis if nei.mol.GetNumAtoms() > 1]
neighbors = sorted(neighbors, key=lambda x:x.mol.GetNumAtoms(), reverse=True)
singletons = [nei for nei in neis if nei.mol.GetNumAtoms() == 1]
neighbors = singletons + neighbors
cands,aroma_scores = enum_assemble(node_x, neighbors)
return len(cands) > 0# and sum(aroma_scores) >= 0
if __name__ == "__main__":
smiles = ["O=C1[C@@H]2C=C[C@@H](C=CC2)C1(c1ccccc1)c1ccccc1","O=C([O-])CC[C@@]12CCCC[C@]1(O)OC(=O)CC2", "ON=C1C[C@H]2CC3(C[C@@H](C1)c1ccccc12)OCCO3", "C[C@H]1CC(=O)[C@H]2[C@@]3(O)C(=O)c4cccc(O)c4[C@@H]4O[C@@]43[C@@H](O)C[C@]2(O)C1", 'Cc1cc(NC(=O)CSc2nnc3c4ccccc4n(C)c3n2)ccc1Br', 'CC(C)(C)c1ccc(C(=O)N[C@H]2CCN3CCCc4cccc2c43)cc1', "O=c1c2ccc3c(=O)n(-c4nccs4)c(=O)c4ccc(c(=O)n1-c1nccs1)c2c34", "O=C(N1CCc2c(F)ccc(F)c2C1)C1(O)Cc2ccccc2C1"]
for s in smiles:
print(s)
tree = MolTree(s)
for i,node in enumerate(tree.nodes):
node.idx = i
stack = []
dfs(stack, tree.nodes[0], -1)
for x,y,d in stack:
print((x.smiles, y.smiles, d))
print('------------------------------')
| 13,820 | 38.945087 | 440 | py |
FastJTNNpy3 | FastJTNNpy3-master/fast_molopt/pretrain.py | import torch
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.data import DataLoader
from torch.autograd import Variable
import math, random, sys
from optparse import OptionParser
from collections import deque
from jtnn import *
import rdkit
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
parser = OptionParser()
parser.add_option("-t", "--train", dest="train_path")
parser.add_option("-v", "--vocab", dest="vocab_path")
parser.add_option("-p", "--prop", dest="prop_path")
parser.add_option("-s", "--save_dir", dest="save_path")
parser.add_option("-b", "--batch", dest="batch_size", default=40)
parser.add_option("-w", "--hidden", dest="hidden_size", default=200)
parser.add_option("-l", "--latent", dest="latent_size", default=56)
parser.add_option("-d", "--depth", dest="depth", default=3)
opts,args = parser.parse_args()
vocab = [x.strip("\r\n ") for x in open(opts.vocab_path)]
vocab = Vocab(vocab)
batch_size = int(opts.batch_size)
hidden_size = int(opts.hidden_size)
latent_size = int(opts.latent_size)
depth = int(opts.depth)
model = JTPropVAE(vocab, hidden_size, latent_size, depth)
for param in model.parameters():
if param.dim() == 1:
nn.init.constant(param, 0)
else:
nn.init.xavier_normal(param)
model = model.cpu()
print ("Model #Params: %dK" % (sum([x.nelement() for x in model.parameters()]) / 1000,))
optimizer = optim.Adam(model.parameters(), lr=1e-3)
scheduler = lr_scheduler.ExponentialLR(optimizer, 0.9)
scheduler.step()
dataset = PropDataset(opts.train_path, opts.prop_path)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=4, collate_fn=lambda x:x)
MAX_EPOCH = 3
PRINT_ITER = 20
for epoch in xrange(MAX_EPOCH):
word_acc,topo_acc,assm_acc,steo_acc,prop_acc = 0,0,0,0,0
for it, batch in enumerate(dataloader):
for mol_tree,_ in batch:
for node in mol_tree.nodes:
if node.label not in node.cands:
node.cands.append(node.label)
node.cand_mols.append(node.label_mol)
model.zero_grad()
loss, kl_div, wacc, tacc, sacc, dacc, pacc = model(batch, beta=0)
loss.sum().backward()
optimizer.step()
word_acc += wacc
topo_acc += tacc
assm_acc += sacc
steo_acc += dacc
prop_acc += pacc
if (it + 1) % PRINT_ITER == 0:
word_acc = word_acc / PRINT_ITER * 100
topo_acc = topo_acc / PRINT_ITER * 100
assm_acc = assm_acc / PRINT_ITER * 100
steo_acc = steo_acc / PRINT_ITER * 100
prop_acc = prop_acc / PRINT_ITER
print "KL: %.1f, Word: %.2f, Topo: %.2f, Assm: %.2f, Steo: %.2f, Prop: %.4f" % (kl_div, word_acc, topo_acc, assm_acc, steo_acc, prop_acc)
word_acc,topo_acc,assm_acc,steo_acc,prop_acc = 0,0,0,0,0
sys.stdout.flush()
scheduler.step()
print "learning rate: %.6f" % scheduler.get_lr()[0]
torch.save(model.state_dict(), opts.save_path + "/model.iter-" + str(epoch))
| 3,110 | 32.095745 | 149 | py |
FastJTNNpy3 | FastJTNNpy3-master/fast_molopt/vaetrain.py | import torch
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.data import DataLoader
from torch.autograd import Variable
import math, random, sys
from optparse import OptionParser
from collections import deque
from jtnn import *
import rdkit
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
parser = OptionParser()
parser.add_option("-t", "--train", dest="train_path")
parser.add_option("-v", "--vocab", dest="vocab_path")
parser.add_option("-p", "--prop", dest="prop_path")
parser.add_option("-s", "--save_dir", dest="save_path")
parser.add_option("-m", "--model", dest="model_path", default=None)
parser.add_option("-b", "--batch", dest="batch_size", default=40)
parser.add_option("-w", "--hidden", dest="hidden_size", default=200)
parser.add_option("-l", "--latent", dest="latent_size", default=56)
parser.add_option("-d", "--depth", dest="depth", default=3)
parser.add_option("-z", "--beta", dest="beta", default=1.0)
parser.add_option("-q", "--lr", dest="lr", default=1e-3)
opts,args = parser.parse_args()
vocab = [x.strip("\r\n ") for x in open(opts.vocab_path)]
vocab = Vocab(vocab)
batch_size = int(opts.batch_size)
hidden_size = int(opts.hidden_size)
latent_size = int(opts.latent_size)
depth = int(opts.depth)
beta = float(opts.beta)
lr = float(opts.lr)
model = JTPropVAE(vocab, hidden_size, latent_size, depth)
if opts.model_path is not None:
model.load_state_dict(torch.load(opts.model_path))
else:
for param in model.parameters():
if param.dim() == 1:
nn.init.constant(param, 0)
else:
nn.init.xavier_normal(param)
model = model.cuda()
print "Model #Params: %dK" % (sum([x.nelement() for x in model.parameters()]) / 1000,)
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = lr_scheduler.ExponentialLR(optimizer, 0.9)
scheduler.step()
dataset = PropDataset(opts.train_path, opts.prop_path)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=4, collate_fn=lambda x:x, drop_last=True)
MAX_EPOCH = 6
PRINT_ITER = 20
for epoch in xrange(MAX_EPOCH):
word_acc,topo_acc,assm_acc,steo_acc,prop_acc = 0,0,0,0,0
for it, batch in enumerate(dataloader):
for mol_tree,_ in batch:
for node in mol_tree.nodes:
if node.label not in node.cands:
node.cands.append(node.label)
node.cand_mols.append(node.label_mol)
model.zero_grad()
loss, kl_div, wacc, tacc, sacc, dacc, pacc = model(batch, beta)
loss.sum().backward()
optimizer.step()
word_acc += wacc
topo_acc += tacc
assm_acc += sacc
steo_acc += dacc
prop_acc += pacc
if (it + 1) % PRINT_ITER == 0:
word_acc = word_acc / PRINT_ITER * 100
topo_acc = topo_acc / PRINT_ITER * 100
assm_acc = assm_acc / PRINT_ITER * 100
steo_acc = steo_acc / PRINT_ITER * 100
prop_acc /= PRINT_ITER
print "KL: %.1f, Word: %.2f, Topo: %.2f, Assm: %.2f, Steo: %.2f, Prop: %.4f" % (kl_div, word_acc, topo_acc, assm_acc, steo_acc, prop_acc)
word_acc,topo_acc,assm_acc,steo_acc,prop_acc = 0,0,0,0,0
sys.stdout.flush()
if (it + 1) % 1500 == 0: #Fast annealing
scheduler.step()
print "learning rate: %.6f" % scheduler.get_lr()[0]
torch.save(model.state_dict(), opts.save_path + "/model.iter-%d-%d" % (epoch, it + 1))
scheduler.step()
print "learning rate: %.6f" % scheduler.get_lr()[0]
torch.save(model.state_dict(), opts.save_path + "/model.iter-" + str(epoch))
| 3,695 | 33.542056 | 149 | py |
FastJTNNpy3 | FastJTNNpy3-master/fast_molopt/optimize.py | import torch
import torch.nn as nn
from torch.autograd import Variable
import math, random, sys
from optparse import OptionParser
from collections import deque
import rdkit
import rdkit.Chem as Chem
from rdkit.Chem import Descriptors
import sascorer
from jtnn import *
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
parser = OptionParser()
parser.add_option("-t", "--test", dest="test_path")
parser.add_option("-v", "--vocab", dest="vocab_path")
parser.add_option("-m", "--model", dest="model_path")
parser.add_option("-w", "--hidden", dest="hidden_size", default=200)
parser.add_option("-l", "--latent", dest="latent_size", default=56)
parser.add_option("-d", "--depth", dest="depth", default=3)
parser.add_option("-s", "--sim", dest="cutoff", default=0.0)
opts,args = parser.parse_args()
vocab = [x.strip("\r\n ") for x in open(opts.vocab_path)]
vocab = Vocab(vocab)
hidden_size = int(opts.hidden_size)
latent_size = int(opts.latent_size)
depth = int(opts.depth)
sim_cutoff = float(opts.cutoff)
model = JTPropVAE(vocab, hidden_size, latent_size, depth)
model.load_state_dict(torch.load(opts.model_path))
model = model.cuda()
data = []
with open(opts.test_path) as f:
for line in f:
s = line.strip("\r\n ").split()[0]
data.append(s)
res = []
for smiles in data:
mol = Chem.MolFromSmiles(smiles)
score = Descriptors.MolLogP(mol) - sascorer.calculateScore(mol)
new_smiles,sim = model.optimize(smiles, sim_cutoff=sim_cutoff, lr=2, num_iter=80)
new_mol = Chem.MolFromSmiles(new_smiles)
new_score = Descriptors.MolLogP(new_mol) - sascorer.calculateScore(new_mol)
res.append( (new_score - score, sim, score, new_score, smiles, new_smiles) )
print new_score - score, sim, score, new_score, smiles, new_smiles
print sum([x[0] for x in res]), sum([x[1] for x in res])
| 1,845 | 29.766667 | 85 | py |
FastJTNNpy3 | FastJTNNpy3-master/Old/molvae/pretrain.py | import torch
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.data import DataLoader
from torch.autograd import Variable
import math, random, sys
from optparse import OptionParser
from collections import deque
from jtnn import Vocab, JTNNVAE, MoleculeDataset
import rdkit
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
parser = OptionParser()
parser.add_option("-t", "--train", dest="train_path")
parser.add_option("-v", "--vocab", dest="vocab_path")
parser.add_option("-s", "--save_dir", dest="save_path")
parser.add_option("-b", "--batch", dest="batch_size", default=1000)
parser.add_option("-w", "--hidden", dest="hidden_size", default=200)
parser.add_option("-l", "--latent", dest="latent_size", default=56)
parser.add_option("-d", "--depth", dest="depth", default=3)
opts,args = parser.parse_args()
vocab = [x.strip("\r\n ") for x in open(opts.vocab_path)]
vocab = Vocab(vocab)
batch_size = int(opts.batch_size)
hidden_size = int(opts.hidden_size)
latent_size = int(opts.latent_size)
depth = int(opts.depth)
model = JTNNVAE(vocab, hidden_size, latent_size, depth)
for param in model.parameters():
if param.dim() == 1:
nn.init.constant(param, 0)
else:
nn.init.xavier_normal(param)
#model = model.cuda()
print("Model #Params: %dK" % (sum([x.nelement() for x in model.parameters()]) / 1000,))
optimizer = optim.Adam(model.parameters(), lr=1e-3)
scheduler = lr_scheduler.ExponentialLR(optimizer, 0.9)
scheduler.step()
dataset = MoleculeDataset(opts.train_path)
MAX_EPOCH = 3
PRINT_ITER = 20
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=4, collate_fn=lambda x:x, drop_last=True)
for epoch in range(MAX_EPOCH):
word_acc,topo_acc,assm_acc,steo_acc = 0,0,0,0
for it, batch in enumerate(dataloader):
for mol_tree in batch:
for node in mol_tree.nodes:
if node.label not in node.cands:
node.cands.append(node.label)
node.cand_mols.append(node.label_mol)
model.zero_grad()
loss, kl_div, wacc, tacc, sacc, dacc = model(batch, beta=0)
loss.sum().backward()
optimizer.step()
word_acc += wacc
topo_acc += tacc
assm_acc += sacc
steo_acc += dacc
if (it + 1) % PRINT_ITER == 0:
word_acc = word_acc / PRINT_ITER * 100
topo_acc = topo_acc / PRINT_ITER * 100
assm_acc = assm_acc / PRINT_ITER * 100
steo_acc = steo_acc / PRINT_ITER * 100
print("KL: %.1f, Word: %.2f, Topo: %.2f, Assm: %.2f, Steo: %.2f" % (kl_div, word_acc, topo_acc, assm_acc, steo_acc))
word_acc,topo_acc,assm_acc,steo_acc = 0,0,0,0
sys.stdout.flush()
scheduler.step()
print("learning rate: %.6f" % scheduler.get_lr()[0])
torch.save(model.state_dict(), opts.save_path + "/model.iter-" + str(epoch))
| 2,973 | 31.326087 | 128 | py |
FastJTNNpy3 | FastJTNNpy3-master/Old/molvae/sample.py | import torch
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.autograd import Variable
import math, random, sys
from optparse import OptionParser
from collections import deque
import rdkit
import rdkit.Chem as Chem
from rdkit.Chem import Draw
from jtnn import *
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
parser = OptionParser()
parser.add_option("-n", "--nsample", dest="nsample")
parser.add_option("-v", "--vocab", dest="vocab_path")
parser.add_option("-m", "--model", dest="model_path")
parser.add_option("-w", "--hidden", dest="hidden_size", default=200)
parser.add_option("-l", "--latent", dest="latent_size", default=56)
parser.add_option("-d", "--depth", dest="depth", default=3)
opts,args = parser.parse_args()
vocab = [x.strip("\r\n ") for x in open(opts.vocab_path)]
vocab = Vocab(vocab)
hidden_size = int(opts.hidden_size)
latent_size = int(opts.latent_size)
depth = int(opts.depth)
nsample = int(opts.nsample)
model = JTNNVAE(vocab, hidden_size, latent_size, depth)
load_dict = torch.load(opts.model_path)
missing = {k: v for k, v in model.state_dict().items() if k not in load_dict}
load_dict.update(missing)
model.load_state_dict(load_dict)
model = model.cuda()
torch.manual_seed(0)
for i in xrange(nsample):
print(model.sample_prior(prob_decode=True))
| 1,368 | 28.76087 | 77 | py |
FastJTNNpy3 | FastJTNNpy3-master/Old/molvae/vaetrain.py | import torch
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.data import DataLoader
from torch.autograd import Variable
import math, random, sys
from optparse import OptionParser
from collections import deque
from jtnn import *
import rdkit
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
parser = OptionParser()
parser.add_option("-t", "--train", dest="train_path")
parser.add_option("-v", "--vocab", dest="vocab_path")
parser.add_option("-s", "--save_dir", dest="save_path")
parser.add_option("-m", "--model", dest="model_path", default=None)
parser.add_option("-b", "--batch", dest="batch_size", default=40)
parser.add_option("-w", "--hidden", dest="hidden_size", default=200)
parser.add_option("-l", "--latent", dest="latent_size", default=56)
parser.add_option("-d", "--depth", dest="depth", default=3)
parser.add_option("-z", "--beta", dest="beta", default=1.0)
parser.add_option("-q", "--lr", dest="lr", default=1e-3)
opts,args = parser.parse_args()
vocab = [x.strip("\r\n ") for x in open(opts.vocab_path)]
vocab = Vocab(vocab)
batch_size = int(opts.batch_size)
hidden_size = int(opts.hidden_size)
latent_size = int(opts.latent_size)
depth = int(opts.depth)
beta = float(opts.beta)
lr = float(opts.lr)
anneal = float(opts.anneal)
model = JTNNVAE(vocab, hidden_size, latent_size, depth)
if opts.model_path is not None:
model.load_state_dict(torch.load(opts.model_path))
else:
for param in model.parameters():
if param.dim() == 1:
nn.init.constant(param, 0)
else:
nn.init.xavier_normal(param)
model = model.cuda()
print "Model #Params: %dK" % (sum([x.nelement() for x in model.parameters()]) / 1000,)
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = lr_scheduler.ExponentialLR(optimizer, 0.9)
scheduler.step()
dataset = MoleculeDataset(opts.train_path)
MAX_EPOCH = 7
PRINT_ITER = 20
for epoch in xrange(MAX_EPOCH):
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=4, collate_fn=lambda x:x, drop_last=True)
word_acc,topo_acc,assm_acc,steo_acc = 0,0,0,0
for it, batch in enumerate(dataloader):
for mol_tree in batch:
for node in mol_tree.nodes:
if node.label not in node.cands:
node.cands.append(node.label)
node.cand_mols.append(node.label_mol)
model.zero_grad()
loss, kl_div, wacc, tacc, sacc, dacc = model(batch, beta)
loss.backward()
optimizer.step()
word_acc += wacc
topo_acc += tacc
assm_acc += sacc
steo_acc += dacc
if (it + 1) % PRINT_ITER == 0:
word_acc = word_acc / PRINT_ITER * 100
topo_acc = topo_acc / PRINT_ITER * 100
assm_acc = assm_acc / PRINT_ITER * 100
steo_acc = steo_acc / PRINT_ITER * 100
print "KL: %.1f, Word: %.2f, Topo: %.2f, Assm: %.2f, Steo: %.2f" % (kl_div, word_acc, topo_acc, assm_acc, steo_acc)
word_acc,topo_acc,assm_acc,steo_acc = 0,0,0,0
sys.stdout.flush()
if (it + 1) % 1500 == 0: #Fast annealing
scheduler.step()
print "learning rate: %.6f" % scheduler.get_lr()[0]
torch.save(model.state_dict(), opts.save_path + "/model.iter-%d-%d" % (epoch, it + 1))
scheduler.step()
print "learning rate: %.6f" % scheduler.get_lr()[0]
torch.save(model.state_dict(), opts.save_path + "/model.iter-" + str(epoch))
| 3,544 | 32.443396 | 127 | py |
FastJTNNpy3 | FastJTNNpy3-master/Old/molvae/reconstruct.py | import torch
import torch.nn as nn
from torch.autograd import Variable
import math, random, sys
from optparse import OptionParser
from collections import deque
import rdkit
import rdkit.Chem as Chem
from jtnn import *
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
parser = OptionParser()
parser.add_option("-t", "--test", dest="test_path")
parser.add_option("-v", "--vocab", dest="vocab_path")
parser.add_option("-m", "--model", dest="model_path")
parser.add_option("-w", "--hidden", dest="hidden_size", default=200)
parser.add_option("-l", "--latent", dest="latent_size", default=56)
parser.add_option("-d", "--depth", dest="depth", default=3)
opts,args = parser.parse_args()
vocab = [x.strip("\r\n ") for x in open(opts.vocab_path)]
vocab = Vocab(vocab)
hidden_size = int(opts.hidden_size)
latent_size = int(opts.latent_size)
depth = int(opts.depth)
model = JTNNVAE(vocab, hidden_size, latent_size, depth)
model.load_state_dict(torch.load(opts.model_path))
model = model.cuda()
data = []
with open(opts.test_path) as f:
for line in f:
s = line.strip("\r\n ").split()[0]
data.append(s)
acc = 0.0
tot = 0
for smiles in data:
mol = Chem.MolFromSmiles(smiles)
smiles3D = Chem.MolToSmiles(mol, isomericSmiles=True)
dec_smiles = model.reconstruct(smiles3D)
if dec_smiles == smiles3D:
acc += 1
tot += 1
print acc / tot
"""
dec_smiles = model.recon_eval(smiles3D)
tot += len(dec_smiles)
for s in dec_smiles:
if s == smiles3D:
acc += 1
print acc / tot
"""
| 1,586 | 24.190476 | 68 | py |
FastJTNNpy3 | FastJTNNpy3-master/Old/molvae/draw_nei.py | import torch
import torch.nn as nn
from torch.autograd import Variable
import math, random, sys
from optparse import OptionParser
import rdkit
import rdkit.Chem as Chem
from rdkit.Chem import Draw
import numpy as np
from jtnn import *
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
parser = OptionParser()
parser.add_option("-v", "--vocab", dest="vocab_path")
parser.add_option("-m", "--model", dest="model_path")
parser.add_option("-w", "--hidden", dest="hidden_size", default=200)
parser.add_option("-l", "--latent", dest="latent_size", default=56)
parser.add_option("-d", "--depth", dest="depth", default=3)
opts,args = parser.parse_args()
vocab = [x.strip("\r\n ") for x in open(opts.vocab_path)]
vocab = Vocab(vocab)
hidden_size = int(opts.hidden_size)
latent_size = int(opts.latent_size)
depth = int(opts.depth)
model = JTNNVAE(vocab, hidden_size, latent_size, depth)
model.load_state_dict(torch.load(opts.model_path))
#model = model.cuda()
np.random.seed(0)
x = np.random.randn(latent_size)
x /= np.linalg.norm(x)
y = np.random.randn(latent_size)
y -= y.dot(x) * x
y /= np.linalg.norm(y)
#z0 = "CN1C(C2=CC(NC3C[C@H](C)C[C@@H](C)C3)=CN=C2)=NN=C1"
z0 = "COC1=CC(OC)=CC([C@@H]2C[NH+](CCC(F)(F)F)CC2)=C1"
z0 = model.encode_latent_mean([z0]).squeeze()
z0 = z0.data.cpu().numpy()
delta = 1
nei_mols = []
for dx in range(-6,7):
for dy in range(-6,7):
z = z0 + x * delta * dx + y * delta * dy
tree_z, mol_z = torch.Tensor(z).unsqueeze(0).chunk(2, dim=1)
tree_z, mol_z = create_var(tree_z), create_var(mol_z)
nei_mols.append( model.decode(tree_z, mol_z, prob_decode=False) )
nei_mols = [Chem.MolFromSmiles(s) for s in nei_mols]
img = Draw.MolsToGridImage(nei_mols, molsPerRow=13, subImgSize=(200,200), useSVG=True)
print img
| 1,797 | 27.539683 | 86 | py |
FastJTNNpy3 | FastJTNNpy3-master/Old/molvae/jtnn/jtnn_enc.py | import torch
import torch.nn as nn
from collections import deque
from mol_tree import Vocab, MolTree
from nnutils import create_var, GRU
MAX_NB = 8
class JTNNEncoder(nn.Module):
def __init__(self, vocab, hidden_size, embedding=None):
super(JTNNEncoder, self).__init__()
self.hidden_size = hidden_size
self.vocab_size = vocab.size()
self.vocab = vocab
if embedding is None:
self.embedding = nn.Embedding(self.vocab_size, hidden_size)
else:
self.embedding = embedding
self.W_z = nn.Linear(2 * hidden_size, hidden_size)
self.W_r = nn.Linear(hidden_size, hidden_size, bias=False)
self.U_r = nn.Linear(hidden_size, hidden_size)
self.W_h = nn.Linear(2 * hidden_size, hidden_size)
self.W = nn.Linear(2 * hidden_size, hidden_size)
def forward(self, root_batch):
orders = []
for root in root_batch:
order = get_prop_order(root)
orders.append(order)
h = {}
max_depth = max([len(x) for x in orders])
padding = create_var(torch.zeros(self.hidden_size), False)
for t in xrange(max_depth):
prop_list = []
for order in orders:
if t < len(order):
prop_list.extend(order[t])
cur_x = []
cur_h_nei = []
for node_x,node_y in prop_list:
x,y = node_x.idx,node_y.idx
cur_x.append(node_x.wid)
h_nei = []
for node_z in node_x.neighbors:
z = node_z.idx
if z == y: continue
h_nei.append(h[(z,x)])
pad_len = MAX_NB - len(h_nei)
h_nei.extend([padding] * pad_len)
cur_h_nei.extend(h_nei)
cur_x = create_var(torch.LongTensor(cur_x))
cur_x = self.embedding(cur_x)
cur_h_nei = torch.cat(cur_h_nei, dim=0).view(-1,MAX_NB,self.hidden_size)
new_h = GRU(cur_x, cur_h_nei, self.W_z, self.W_r, self.U_r, self.W_h)
for i,m in enumerate(prop_list):
x,y = m[0].idx,m[1].idx
h[(x,y)] = new_h[i]
root_vecs = node_aggregate(root_batch, h, self.embedding, self.W)
return h, root_vecs
"""
Helper functions
"""
def get_prop_order(root):
queue = deque([root])
visited = set([root.idx])
root.depth = 0
order1,order2 = [],[]
while len(queue) > 0:
x = queue.popleft()
for y in x.neighbors:
if y.idx not in visited:
queue.append(y)
visited.add(y.idx)
y.depth = x.depth + 1
if y.depth > len(order1):
order1.append([])
order2.append([])
order1[y.depth-1].append( (x,y) )
order2[y.depth-1].append( (y,x) )
order = order2[::-1] + order1
return order
def node_aggregate(nodes, h, embedding, W):
x_idx = []
h_nei = []
hidden_size = embedding.embedding_dim
padding = create_var(torch.zeros(hidden_size), False)
for node_x in nodes:
x_idx.append(node_x.wid)
nei = [ h[(node_y.idx,node_x.idx)] for node_y in node_x.neighbors ]
pad_len = MAX_NB - len(nei)
nei.extend([padding] * pad_len)
h_nei.extend(nei)
h_nei = torch.cat(h_nei, dim=0).view(-1,MAX_NB,hidden_size)
sum_h_nei = h_nei.sum(dim=1)
x_vec = create_var(torch.LongTensor(x_idx))
x_vec = embedding(x_vec)
node_vec = torch.cat([x_vec, sum_h_nei], dim=1)
return nn.ReLU()(W(node_vec))
| 3,664 | 30.324786 | 84 | py |
FastJTNNpy3 | FastJTNNpy3-master/Old/molvae/jtnn/datautils.py | from torch.utils.data import Dataset
from mol_tree import MolTree
import numpy as np
class MoleculeDataset(Dataset):
def __init__(self, data_file):
with open(data_file) as f:
self.data = [line.strip("\r\n ").split()[0] for line in f]
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
smiles = self.data[idx]
mol_tree = MolTree(smiles)
mol_tree.recover()
mol_tree.assemble()
return mol_tree
class PropDataset(Dataset):
def __init__(self, data_file, prop_file):
self.prop_data = np.loadtxt(prop_file)
with open(data_file) as f:
self.data = [line.strip("\r\n ").split()[0] for line in f]
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
smiles = self.data[idx]
mol_tree = MolTree(smiles)
mol_tree.recover()
mol_tree.assemble()
return mol_tree, self.prop_data[idx]
| 985 | 24.947368 | 70 | py |
FastJTNNpy3 | FastJTNNpy3-master/Old/molvae/jtnn/nnutils.py | import torch
import torch.nn as nn
from torch.autograd import Variable
def create_var(tensor, requires_grad=None):
if requires_grad is None:
return Variable(tensor)
else:
return Variable(tensor, requires_grad=requires_grad)
def index_select_ND(source, dim, index):
index_size = index.size()
suffix_dim = source.size()[1:]
final_size = index_size + suffix_dim
target = source.index_select(dim, index.view(-1))
return target.view(final_size)
def GRU(x, h_nei, W_z, W_r, U_r, W_h):
hidden_size = x.size()[-1]
sum_h = h_nei.sum(dim=1)
z_input = torch.cat([x,sum_h], dim=1)
z = nn.Sigmoid()(W_z(z_input))
r_1 = W_r(x).view(-1,1,hidden_size)
r_2 = U_r(h_nei)
r = nn.Sigmoid()(r_1 + r_2)
gated_h = r * h_nei
sum_gated_h = gated_h.sum(dim=1)
h_input = torch.cat([x,sum_gated_h], dim=1)
pre_h = nn.Tanh()(W_h(h_input))
new_h = (1.0 - z) * sum_h + z * pre_h
return new_h
| 968 | 25.916667 | 60 | py |
FastJTNNpy3 | FastJTNNpy3-master/Old/molvae/jtnn/mpn.py | import torch
import torch.nn as nn
import rdkit.Chem as Chem
import torch.nn.functional as F
from nnutils import *
from chemutils import get_mol
ELEM_LIST = ['C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl', 'Br', 'Mg', 'Na', 'Ca', 'Fe', 'Al', 'I', 'B', 'K', 'Se', 'Zn', 'H', 'Cu', 'Mn', 'unknown']
ATOM_FDIM = len(ELEM_LIST) + 6 + 5 + 4 + 1
BOND_FDIM = 5 + 6
MAX_NB = 6
def onek_encoding_unk(x, allowable_set):
if x not in allowable_set:
x = allowable_set[-1]
return map(lambda s: x == s, allowable_set)
def atom_features(atom):
return torch.Tensor(onek_encoding_unk(atom.GetSymbol(), ELEM_LIST)
+ onek_encoding_unk(atom.GetDegree(), [0,1,2,3,4,5])
+ onek_encoding_unk(atom.GetFormalCharge(), [-1,-2,1,2,0])
+ onek_encoding_unk(int(atom.GetChiralTag()), [0,1,2,3])
+ [atom.GetIsAromatic()])
def bond_features(bond):
bt = bond.GetBondType()
stereo = int(bond.GetStereo())
fbond = [bt == Chem.rdchem.BondType.SINGLE, bt == Chem.rdchem.BondType.DOUBLE, bt == Chem.rdchem.BondType.TRIPLE, bt == Chem.rdchem.BondType.AROMATIC, bond.IsInRing()]
fstereo = onek_encoding_unk(stereo, [0,1,2,3,4,5])
return torch.Tensor(fbond + fstereo)
def mol2graph(mol_batch):
padding = torch.zeros(ATOM_FDIM + BOND_FDIM)
fatoms,fbonds = [],[padding] #Ensure bond is 1-indexed
in_bonds,all_bonds = [],[(-1,-1)] #Ensure bond is 1-indexed
scope = []
total_atoms = 0
for smiles in mol_batch:
mol = get_mol(smiles)
#mol = Chem.MolFromSmiles(smiles)
n_atoms = mol.GetNumAtoms()
for atom in mol.GetAtoms():
fatoms.append( atom_features(atom) )
in_bonds.append([])
for bond in mol.GetBonds():
a1 = bond.GetBeginAtom()
a2 = bond.GetEndAtom()
x = a1.GetIdx() + total_atoms
y = a2.GetIdx() + total_atoms
b = len(all_bonds)
all_bonds.append((x,y))
fbonds.append( torch.cat([fatoms[x], bond_features(bond)], 0) )
in_bonds[y].append(b)
b = len(all_bonds)
all_bonds.append((y,x))
fbonds.append( torch.cat([fatoms[y], bond_features(bond)], 0) )
in_bonds[x].append(b)
scope.append((total_atoms,n_atoms))
total_atoms += n_atoms
total_bonds = len(all_bonds)
fatoms = torch.stack(fatoms, 0)
fbonds = torch.stack(fbonds, 0)
agraph = torch.zeros(total_atoms,MAX_NB).long()
bgraph = torch.zeros(total_bonds,MAX_NB).long()
for a in xrange(total_atoms):
for i,b in enumerate(in_bonds[a]):
agraph[a,i] = b
for b1 in xrange(1, total_bonds):
x,y = all_bonds[b1]
for i,b2 in enumerate(in_bonds[x]):
if all_bonds[b2][0] != y:
bgraph[b1,i] = b2
return fatoms, fbonds, agraph, bgraph, scope
class MPN(nn.Module):
def __init__(self, hidden_size, depth):
super(MPN, self).__init__()
self.hidden_size = hidden_size
self.depth = depth
self.W_i = nn.Linear(ATOM_FDIM + BOND_FDIM, hidden_size, bias=False)
self.W_h = nn.Linear(hidden_size, hidden_size, bias=False)
self.W_o = nn.Linear(ATOM_FDIM + hidden_size, hidden_size)
def forward(self, mol_graph):
fatoms,fbonds,agraph,bgraph,scope = mol_graph
fatoms = create_var(fatoms)
fbonds = create_var(fbonds)
agraph = create_var(agraph)
bgraph = create_var(bgraph)
binput = self.W_i(fbonds)
message = nn.ReLU()(binput)
for i in xrange(self.depth - 1):
nei_message = index_select_ND(message, 0, bgraph)
nei_message = nei_message.sum(dim=1)
nei_message = self.W_h(nei_message)
message = nn.ReLU()(binput + nei_message)
nei_message = index_select_ND(message, 0, agraph)
nei_message = nei_message.sum(dim=1)
ainput = torch.cat([fatoms, nei_message], dim=1)
atom_hiddens = nn.ReLU()(self.W_o(ainput))
mol_vecs = []
for st,le in scope:
mol_vec = atom_hiddens.narrow(0, st, le).sum(dim=0) / le
mol_vecs.append(mol_vec)
mol_vecs = torch.stack(mol_vecs, dim=0)
return mol_vecs
| 4,279 | 33.24 | 171 | py |
FastJTNNpy3 | FastJTNNpy3-master/Old/molvae/jtnn/jtnn_vae.py | import torch
import torch.nn as nn
from mol_tree import Vocab, MolTree
from nnutils import create_var
from jtnn_enc import JTNNEncoder
from jtnn_dec import JTNNDecoder
from mpn import MPN, mol2graph
from jtmpn import JTMPN
from chemutils import enum_assemble, set_atommap, copy_edit_mol, attach_mols, atom_equal, decode_stereo
import rdkit
import rdkit.Chem as Chem
from rdkit import DataStructs
from rdkit.Chem import AllChem
import copy, math
def set_batch_nodeID(mol_batch, vocab):
tot = 0
for mol_tree in mol_batch:
for node in mol_tree.nodes:
node.idx = tot
node.wid = vocab.get_index(node.smiles)
tot += 1
class JTNNVAE(nn.Module):
def __init__(self, vocab, hidden_size, latent_size, depth):
super(JTNNVAE, self).__init__()
self.vocab = vocab
self.hidden_size = hidden_size
self.latent_size = latent_size
self.depth = depth
self.embedding = nn.Embedding(vocab.size(), hidden_size)
self.jtnn = JTNNEncoder(vocab, hidden_size, self.embedding)
self.jtmpn = JTMPN(hidden_size, depth)
self.mpn = MPN(hidden_size, depth)
self.decoder = JTNNDecoder(vocab, hidden_size, latent_size / 2, self.embedding)
self.T_mean = nn.Linear(hidden_size, latent_size / 2)
self.T_var = nn.Linear(hidden_size, latent_size / 2)
self.G_mean = nn.Linear(hidden_size, latent_size / 2)
self.G_var = nn.Linear(hidden_size, latent_size / 2)
self.assm_loss = nn.CrossEntropyLoss(size_average=False)
self.stereo_loss = nn.CrossEntropyLoss(size_average=False)
def encode(self, mol_batch):
set_batch_nodeID(mol_batch, self.vocab)
root_batch = [mol_tree.nodes[0] for mol_tree in mol_batch]
tree_mess,tree_vec = self.jtnn(root_batch)
smiles_batch = [mol_tree.smiles for mol_tree in mol_batch]
mol_vec = self.mpn(mol2graph(smiles_batch))
return tree_mess, tree_vec, mol_vec
def encode_latent_mean(self, smiles_list):
mol_batch = [MolTree(s) for s in smiles_list]
for mol_tree in mol_batch:
mol_tree.recover()
_, tree_vec, mol_vec = self.encode(mol_batch)
tree_mean = self.T_mean(tree_vec)
mol_mean = self.G_mean(mol_vec)
return torch.cat([tree_mean,mol_mean], dim=1)
def forward(self, mol_batch, beta=0):
batch_size = len(mol_batch)
tree_mess, tree_vec, mol_vec = self.encode(mol_batch)
tree_mean = self.T_mean(tree_vec)
tree_log_var = -torch.abs(self.T_var(tree_vec)) #Following Mueller et al.
mol_mean = self.G_mean(mol_vec)
mol_log_var = -torch.abs(self.G_var(mol_vec)) #Following Mueller et al.
z_mean = torch.cat([tree_mean,mol_mean], dim=1)
z_log_var = torch.cat([tree_log_var,mol_log_var], dim=1)
kl_loss = -0.5 * torch.sum(1.0 + z_log_var - z_mean * z_mean - torch.exp(z_log_var)) / batch_size
epsilon = create_var(torch.randn(batch_size, self.latent_size / 2), False)
tree_vec = tree_mean + torch.exp(tree_log_var / 2) * epsilon
epsilon = create_var(torch.randn(batch_size, self.latent_size / 2), False)
mol_vec = mol_mean + torch.exp(mol_log_var / 2) * epsilon
word_loss, topo_loss, word_acc, topo_acc = self.decoder(mol_batch, tree_vec)
assm_loss, assm_acc = self.assm(mol_batch, mol_vec, tree_mess)
stereo_loss, stereo_acc = self.stereo(mol_batch, mol_vec)
all_vec = torch.cat([tree_vec, mol_vec], dim=1)
loss = word_loss + topo_loss + assm_loss + 2 * stereo_loss + beta * kl_loss
return loss, kl_loss.data[0], word_acc, topo_acc, assm_acc, stereo_acc
def assm(self, mol_batch, mol_vec, tree_mess):
cands = []
batch_idx = []
for i,mol_tree in enumerate(mol_batch):
for node in mol_tree.nodes:
#Leaf node's attachment is determined by neighboring node's attachment
if node.is_leaf or len(node.cands) == 1: continue
cands.extend( [(cand, mol_tree.nodes, node) for cand in node.cand_mols] )
batch_idx.extend([i] * len(node.cands))
cand_vec = self.jtmpn(cands, tree_mess)
cand_vec = self.G_mean(cand_vec)
batch_idx = create_var(torch.LongTensor(batch_idx))
mol_vec = mol_vec.index_select(0, batch_idx)
mol_vec = mol_vec.view(-1, 1, self.latent_size / 2)
cand_vec = cand_vec.view(-1, self.latent_size / 2, 1)
scores = torch.bmm(mol_vec, cand_vec).squeeze()
cnt,tot,acc = 0,0,0
all_loss = []
for i,mol_tree in enumerate(mol_batch):
comp_nodes = [node for node in mol_tree.nodes if len(node.cands) > 1 and not node.is_leaf]
cnt += len(comp_nodes)
for node in comp_nodes:
label = node.cands.index(node.label)
ncand = len(node.cands)
cur_score = scores.narrow(0, tot, ncand)
tot += ncand
if cur_score.data[label] >= cur_score.max().data[0]:
acc += 1
label = create_var(torch.LongTensor([label]))
all_loss.append( self.assm_loss(cur_score.view(1,-1), label) )
#all_loss = torch.stack(all_loss).sum() / len(mol_batch)
all_loss = sum(all_loss) / len(mol_batch)
return all_loss, acc * 1.0 / cnt
def stereo(self, mol_batch, mol_vec):
stereo_cands,batch_idx = [],[]
labels = []
for i,mol_tree in enumerate(mol_batch):
cands = mol_tree.stereo_cands
if len(cands) == 1: continue
if mol_tree.smiles3D not in cands:
cands.append(mol_tree.smiles3D)
stereo_cands.extend(cands)
batch_idx.extend([i] * len(cands))
labels.append( (cands.index(mol_tree.smiles3D), len(cands)) )
if len(labels) == 0:
return create_var(torch.Tensor(0)), 1.0
batch_idx = create_var(torch.LongTensor(batch_idx))
stereo_cands = self.mpn(mol2graph(stereo_cands))
stereo_cands = self.G_mean(stereo_cands)
stereo_labels = mol_vec.index_select(0, batch_idx)
scores = torch.nn.CosineSimilarity()(stereo_cands, stereo_labels)
st,acc = 0,0
all_loss = []
for label,le in labels:
cur_scores = scores.narrow(0, st, le)
if cur_scores.data[label] >= cur_scores.max().data[0]:
acc += 1
label = create_var(torch.LongTensor([label]))
all_loss.append( self.stereo_loss(cur_scores.view(1,-1), label) )
st += le
#all_loss = torch.cat(all_loss).sum() / len(labels)
all_loss = sum(all_loss) / len(labels)
return all_loss, acc * 1.0 / len(labels)
def reconstruct(self, smiles, prob_decode=False):
mol_tree = MolTree(smiles)
mol_tree.recover()
_,tree_vec,mol_vec = self.encode([mol_tree])
tree_mean = self.T_mean(tree_vec)
tree_log_var = -torch.abs(self.T_var(tree_vec)) #Following Mueller et al.
mol_mean = self.G_mean(mol_vec)
mol_log_var = -torch.abs(self.G_var(mol_vec)) #Following Mueller et al.
epsilon = create_var(torch.randn(1, self.latent_size / 2), False)
tree_vec = tree_mean + torch.exp(tree_log_var / 2) * epsilon
epsilon = create_var(torch.randn(1, self.latent_size / 2), False)
mol_vec = mol_mean + torch.exp(mol_log_var / 2) * epsilon
return self.decode(tree_vec, mol_vec, prob_decode)
def recon_eval(self, smiles):
mol_tree = MolTree(smiles)
mol_tree.recover()
_,tree_vec,mol_vec = self.encode([mol_tree])
tree_mean = self.T_mean(tree_vec)
tree_log_var = -torch.abs(self.T_var(tree_vec)) #Following Mueller et al.
mol_mean = self.G_mean(mol_vec)
mol_log_var = -torch.abs(self.G_var(mol_vec)) #Following Mueller et al.
all_smiles = []
for i in xrange(10):
epsilon = create_var(torch.randn(1, self.latent_size / 2), False)
tree_vec = tree_mean + torch.exp(tree_log_var / 2) * epsilon
epsilon = create_var(torch.randn(1, self.latent_size / 2), False)
mol_vec = mol_mean + torch.exp(mol_log_var / 2) * epsilon
for j in xrange(10):
new_smiles = self.decode(tree_vec, mol_vec, prob_decode=True)
all_smiles.append(new_smiles)
return all_smiles
def sample_prior(self, prob_decode=False):
tree_vec = create_var(torch.randn(1, self.latent_size / 2), False)
mol_vec = create_var(torch.randn(1, self.latent_size / 2), False)
return self.decode(tree_vec, mol_vec, prob_decode)
def sample_eval(self):
tree_vec = create_var(torch.randn(1, self.latent_size / 2), False)
mol_vec = create_var(torch.randn(1, self.latent_size / 2), False)
all_smiles = []
for i in xrange(100):
s = self.decode(tree_vec, mol_vec, prob_decode=True)
all_smiles.append(s)
return all_smiles
def decode(self, tree_vec, mol_vec, prob_decode):
pred_root,pred_nodes = self.decoder.decode(tree_vec, prob_decode)
#Mark nid & is_leaf & atommap
for i,node in enumerate(pred_nodes):
node.nid = i + 1
node.is_leaf = (len(node.neighbors) == 1)
if len(node.neighbors) > 1:
set_atommap(node.mol, node.nid)
tree_mess = self.jtnn([pred_root])[0]
cur_mol = copy_edit_mol(pred_root.mol)
global_amap = [{}] + [{} for node in pred_nodes]
global_amap[1] = {atom.GetIdx():atom.GetIdx() for atom in cur_mol.GetAtoms()}
cur_mol = self.dfs_assemble(tree_mess, mol_vec, pred_nodes, cur_mol, global_amap, [], pred_root, None, prob_decode)
if cur_mol is None:
return None
cur_mol = cur_mol.GetMol()
set_atommap(cur_mol)
cur_mol = Chem.MolFromSmiles(Chem.MolToSmiles(cur_mol))
if cur_mol is None: return None
smiles2D = Chem.MolToSmiles(cur_mol)
stereo_cands = decode_stereo(smiles2D)
if len(stereo_cands) == 1:
return stereo_cands[0]
stereo_vecs = self.mpn(mol2graph(stereo_cands))
stereo_vecs = self.G_mean(stereo_vecs)
scores = nn.CosineSimilarity()(stereo_vecs, mol_vec)
_,max_id = scores.max(dim=0)
return stereo_cands[max_id.data[0]]
def dfs_assemble(self, tree_mess, mol_vec, all_nodes, cur_mol, global_amap, fa_amap, cur_node, fa_node, prob_decode):
fa_nid = fa_node.nid if fa_node is not None else -1
prev_nodes = [fa_node] if fa_node is not None else []
children = [nei for nei in cur_node.neighbors if nei.nid != fa_nid]
neighbors = [nei for nei in children if nei.mol.GetNumAtoms() > 1]
neighbors = sorted(neighbors, key=lambda x:x.mol.GetNumAtoms(), reverse=True)
singletons = [nei for nei in children if nei.mol.GetNumAtoms() == 1]
neighbors = singletons + neighbors
cur_amap = [(fa_nid,a2,a1) for nid,a1,a2 in fa_amap if nid == cur_node.nid]
cands = enum_assemble(cur_node, neighbors, prev_nodes, cur_amap)
if len(cands) == 0:
return None
cand_smiles,cand_mols,cand_amap = zip(*cands)
cands = [(candmol, all_nodes, cur_node) for candmol in cand_mols]
cand_vecs = self.jtmpn(cands, tree_mess)
cand_vecs = self.G_mean(cand_vecs)
mol_vec = mol_vec.squeeze()
scores = torch.mv(cand_vecs, mol_vec) * 20
if prob_decode:
probs = nn.Softmax()(scores.view(1,-1)).squeeze() + 1e-5 #prevent prob = 0
cand_idx = torch.multinomial(probs, probs.numel())
else:
_,cand_idx = torch.sort(scores, descending=True)
backup_mol = Chem.RWMol(cur_mol)
for i in xrange(cand_idx.numel()):
cur_mol = Chem.RWMol(backup_mol)
pred_amap = cand_amap[cand_idx[i].data[0]]
new_global_amap = copy.deepcopy(global_amap)
for nei_id,ctr_atom,nei_atom in pred_amap:
if nei_id == fa_nid:
continue
new_global_amap[nei_id][nei_atom] = new_global_amap[cur_node.nid][ctr_atom]
cur_mol = attach_mols(cur_mol, children, [], new_global_amap) #father is already attached
new_mol = cur_mol.GetMol()
new_mol = Chem.MolFromSmiles(Chem.MolToSmiles(new_mol))
if new_mol is None: continue
result = True
for nei_node in children:
if nei_node.is_leaf: continue
cur_mol = self.dfs_assemble(tree_mess, mol_vec, all_nodes, cur_mol, new_global_amap, pred_amap, nei_node, cur_node, prob_decode)
if cur_mol is None:
result = False
break
if result: return cur_mol
return None
| 13,071 | 40.897436 | 144 | py |
FastJTNNpy3 | FastJTNNpy3-master/Old/molvae/jtnn/jtprop_vae.py | import torch
import torch.nn as nn
from mol_tree import Vocab, MolTree
from nnutils import create_var
from jtnn_enc import JTNNEncoder
from jtnn_dec import JTNNDecoder
from mpn import MPN, mol2graph
from jtmpn import JTMPN
from chemutils import enum_assemble, set_atommap, copy_edit_mol, attach_mols, atom_equal, decode_stereo
import rdkit
import rdkit.Chem as Chem
from rdkit import DataStructs
from rdkit.Chem import AllChem
import copy, math
def set_batch_nodeID(mol_batch, vocab):
tot = 0
for mol_tree in mol_batch:
for node in mol_tree.nodes:
node.idx = tot
node.wid = vocab.get_index(node.smiles)
tot += 1
class JTPropVAE(nn.Module):
def __init__(self, vocab, hidden_size, latent_size, depth):
super(JTPropVAE, self).__init__()
self.vocab = vocab
self.hidden_size = hidden_size
self.latent_size = latent_size
self.depth = depth
self.embedding = nn.Embedding(vocab.size(), hidden_size)
self.jtnn = JTNNEncoder(vocab, hidden_size, self.embedding)
self.jtmpn = JTMPN(hidden_size, depth)
self.mpn = MPN(hidden_size, depth)
self.decoder = JTNNDecoder(vocab, hidden_size, latent_size / 2, self.embedding)
self.T_mean = nn.Linear(hidden_size, latent_size / 2)
self.T_var = nn.Linear(hidden_size, latent_size / 2)
self.G_mean = nn.Linear(hidden_size, latent_size / 2)
self.G_var = nn.Linear(hidden_size, latent_size / 2)
self.propNN = nn.Sequential(
nn.Linear(self.latent_size, self.hidden_size),
nn.Tanh(),
nn.Linear(self.hidden_size, 1)
)
self.prop_loss = nn.MSELoss()
self.assm_loss = nn.CrossEntropyLoss(size_average=False)
self.stereo_loss = nn.CrossEntropyLoss(size_average=False)
def encode(self, mol_batch):
set_batch_nodeID(mol_batch, self.vocab)
root_batch = [mol_tree.nodes[0] for mol_tree in mol_batch]
tree_mess,tree_vec = self.jtnn(root_batch)
smiles_batch = [mol_tree.smiles for mol_tree in mol_batch]
mol_vec = self.mpn(mol2graph(smiles_batch))
return tree_mess, tree_vec, mol_vec
def encode_latent_mean(self, smiles_list):
mol_batch = [MolTree(s) for s in smiles_list]
for mol_tree in mol_batch:
mol_tree.recover()
_, tree_vec, mol_vec = self.encode(mol_batch)
tree_mean = self.T_mean(tree_vec)
mol_mean = self.G_mean(mol_vec)
return torch.cat([tree_mean,mol_mean], dim=1)
def forward(self, mol_batch, beta=0):
batch_size = len(mol_batch)
mol_batch, prop_batch = zip(*mol_batch)
tree_mess, tree_vec, mol_vec = self.encode(mol_batch)
tree_mean = self.T_mean(tree_vec)
tree_log_var = -torch.abs(self.T_var(tree_vec)) #Following Mueller et al.
mol_mean = self.G_mean(mol_vec)
mol_log_var = -torch.abs(self.G_var(mol_vec)) #Following Mueller et al.
z_mean = torch.cat([tree_mean,mol_mean], dim=1)
z_log_var = torch.cat([tree_log_var,mol_log_var], dim=1)
kl_loss = -0.5 * torch.sum(1.0 + z_log_var - z_mean * z_mean - torch.exp(z_log_var)) / batch_size
epsilon = create_var(torch.randn(batch_size, self.latent_size / 2), False)
tree_vec = tree_mean + torch.exp(tree_log_var / 2) * epsilon
epsilon = create_var(torch.randn(batch_size, self.latent_size / 2), False)
mol_vec = mol_mean + torch.exp(mol_log_var / 2) * epsilon
word_loss, topo_loss, word_acc, topo_acc = self.decoder(mol_batch, tree_vec)
assm_loss, assm_acc = self.assm(mol_batch, mol_vec, tree_mess)
stereo_loss, stereo_acc = self.stereo(mol_batch, mol_vec)
all_vec = torch.cat([tree_vec, mol_vec], dim=1)
prop_label = create_var(torch.Tensor(prop_batch))
prop_loss = self.prop_loss(self.propNN(all_vec).squeeze(), prop_label)
loss = word_loss + topo_loss + assm_loss + 2 * stereo_loss + beta * kl_loss + prop_loss
return loss, kl_loss.data[0], word_acc, topo_acc, assm_acc, stereo_acc, prop_loss.data[0]
def assm(self, mol_batch, mol_vec, tree_mess):
cands = []
batch_idx = []
for i,mol_tree in enumerate(mol_batch):
for node in mol_tree.nodes:
#Leaf node's attachment is determined by neighboring node's attachment
if node.is_leaf or len(node.cands) == 1: continue
cands.extend( [(cand, mol_tree.nodes, node) for cand in node.cand_mols] )
batch_idx.extend([i] * len(node.cands))
cand_vec = self.jtmpn(cands, tree_mess)
cand_vec = self.G_mean(cand_vec)
batch_idx = create_var(torch.LongTensor(batch_idx))
mol_vec = mol_vec.index_select(0, batch_idx)
mol_vec = mol_vec.view(-1, 1, self.latent_size / 2)
cand_vec = cand_vec.view(-1, self.latent_size / 2, 1)
scores = torch.bmm(mol_vec, cand_vec).squeeze()
cnt,tot,acc = 0,0,0
all_loss = []
for i,mol_tree in enumerate(mol_batch):
comp_nodes = [node for node in mol_tree.nodes if len(node.cands) > 1 and not node.is_leaf]
cnt += len(comp_nodes)
for node in comp_nodes:
label = node.cands.index(node.label)
ncand = len(node.cands)
cur_score = scores.narrow(0, tot, ncand)
tot += ncand
if cur_score.data[label] >= cur_score.max().data[0]:
acc += 1
label = create_var(torch.LongTensor([label]))
all_loss.append( self.assm_loss(cur_score.view(1,-1), label) )
all_loss = sum(all_loss) / len(mol_batch)
return all_loss, acc * 1.0 / cnt
def stereo(self, mol_batch, mol_vec):
stereo_cands,batch_idx = [],[]
labels = []
for i,mol_tree in enumerate(mol_batch):
cands = mol_tree.stereo_cands
if len(cands) == 1: continue
if mol_tree.smiles3D not in cands:
cands.append(mol_tree.smiles3D)
stereo_cands.extend(cands)
batch_idx.extend([i] * len(cands))
labels.append( (cands.index(mol_tree.smiles3D), len(cands)) )
if len(labels) == 0:
return create_var(torch.Tensor(0)), 1.0
batch_idx = create_var(torch.LongTensor(batch_idx))
stereo_cands = self.mpn(mol2graph(stereo_cands))
stereo_cands = self.G_mean(stereo_cands)
stereo_labels = mol_vec.index_select(0, batch_idx)
scores = torch.nn.CosineSimilarity()(stereo_cands, stereo_labels)
st,acc = 0,0
all_loss = []
for label,le in labels:
cur_scores = scores.narrow(0, st, le)
if cur_scores.data[label] >= cur_scores.max().data[0]:
acc += 1
label = create_var(torch.LongTensor([label]))
all_loss.append( self.stereo_loss(cur_scores.view(1,-1), label) )
st += le
all_loss = sum(all_loss) / len(labels)
return all_loss, acc * 1.0 / len(labels)
def reconstruct(self, smiles, prob_decode=False):
mol_tree = MolTree(smiles)
mol_tree.recover()
_,tree_vec,mol_vec = self.encode([mol_tree])
tree_mean = self.T_mean(tree_vec)
tree_log_var = -torch.abs(self.T_var(tree_vec)) #Following Mueller et al.
mol_mean = self.G_mean(mol_vec)
mol_log_var = -torch.abs(self.G_var(mol_vec)) #Following Mueller et al.
epsilon = create_var(torch.randn(1, self.latent_size / 2), False)
tree_vec = tree_mean + torch.exp(tree_log_var / 2) * epsilon
epsilon = create_var(torch.randn(1, self.latent_size / 2), False)
mol_vec = mol_mean + torch.exp(mol_log_var / 2) * epsilon
return self.decode(tree_vec, mol_vec, prob_decode)
def sample_prior(self, prob_decode=False):
tree_vec = create_var(torch.randn(1, self.latent_size / 2), False)
mol_vec = create_var(torch.randn(1, self.latent_size / 2), False)
return self.decode(tree_vec, mol_vec, prob_decode)
def optimize(self, smiles, sim_cutoff, lr=2.0, num_iter=20):
mol_tree = MolTree(smiles)
mol_tree.recover()
_,tree_vec,mol_vec = self.encode([mol_tree])
mol = Chem.MolFromSmiles(smiles)
fp1 = AllChem.GetMorganFingerprint(mol, 2)
tree_mean = self.T_mean(tree_vec)
tree_log_var = -torch.abs(self.T_var(tree_vec)) #Following Mueller et al.
mol_mean = self.G_mean(mol_vec)
mol_log_var = -torch.abs(self.G_var(mol_vec)) #Following Mueller et al.
mean = torch.cat([tree_mean, mol_mean], dim=1)
log_var = torch.cat([tree_log_var, mol_log_var], dim=1)
cur_vec = create_var(mean.data, True)
visited = []
for step in xrange(num_iter):
prop_val = self.propNN(cur_vec).squeeze()
grad = torch.autograd.grad(prop_val, cur_vec)[0]
cur_vec = cur_vec.data + lr * grad.data
cur_vec = create_var(cur_vec, True)
visited.append(cur_vec)
l,r = 0, num_iter - 1
while l < r - 1:
mid = (l + r) / 2
new_vec = visited[mid]
tree_vec,mol_vec = torch.chunk(new_vec, 2, dim=1)
new_smiles = self.decode(tree_vec, mol_vec, prob_decode=False)
if new_smiles is None:
r = mid - 1
continue
new_mol = Chem.MolFromSmiles(new_smiles)
fp2 = AllChem.GetMorganFingerprint(new_mol, 2)
sim = DataStructs.TanimotoSimilarity(fp1, fp2)
if sim < sim_cutoff:
r = mid - 1
else:
l = mid
"""
best_vec = visited[0]
for new_vec in visited:
tree_vec,mol_vec = torch.chunk(new_vec, 2, dim=1)
new_smiles = self.decode(tree_vec, mol_vec, prob_decode=False)
if new_smiles is None: continue
new_mol = Chem.MolFromSmiles(new_smiles)
fp2 = AllChem.GetMorganFingerprint(new_mol, 2)
sim = DataStructs.TanimotoSimilarity(fp1, fp2)
if sim >= sim_cutoff:
best_vec = new_vec
"""
tree_vec,mol_vec = torch.chunk(visited[l], 2, dim=1)
#tree_vec,mol_vec = torch.chunk(best_vec, 2, dim=1)
new_smiles = self.decode(tree_vec, mol_vec, prob_decode=False)
if new_smiles is None:
return smiles, 1.0
new_mol = Chem.MolFromSmiles(new_smiles)
fp2 = AllChem.GetMorganFingerprint(new_mol, 2)
sim = DataStructs.TanimotoSimilarity(fp1, fp2)
if sim >= sim_cutoff:
return new_smiles, sim
else:
return smiles, 1.0
def decode(self, tree_vec, mol_vec, prob_decode):
pred_root,pred_nodes = self.decoder.decode(tree_vec, prob_decode)
#Mark nid & is_leaf & atommap
for i,node in enumerate(pred_nodes):
node.nid = i + 1
node.is_leaf = (len(node.neighbors) == 1)
if len(node.neighbors) > 1:
set_atommap(node.mol, node.nid)
tree_mess = self.jtnn([pred_root])[0]
cur_mol = copy_edit_mol(pred_root.mol)
global_amap = [{}] + [{} for node in pred_nodes]
global_amap[1] = {atom.GetIdx():atom.GetIdx() for atom in cur_mol.GetAtoms()}
cur_mol = self.dfs_assemble(tree_mess, mol_vec, pred_nodes, cur_mol, global_amap, [], pred_root, None, prob_decode)
if cur_mol is None:
return None
cur_mol = cur_mol.GetMol()
set_atommap(cur_mol)
cur_mol = Chem.MolFromSmiles(Chem.MolToSmiles(cur_mol))
if cur_mol is None: return None
smiles2D = Chem.MolToSmiles(cur_mol)
stereo_cands = decode_stereo(smiles2D)
if len(stereo_cands) == 1:
return stereo_cands[0]
stereo_vecs = self.mpn(mol2graph(stereo_cands))
stereo_vecs = self.G_mean(stereo_vecs)
scores = nn.CosineSimilarity()(stereo_vecs, mol_vec)
_,max_id = scores.max(dim=0)
return stereo_cands[max_id.data[0]]
def dfs_assemble(self, tree_mess, mol_vec, all_nodes, cur_mol, global_amap, fa_amap, cur_node, fa_node, prob_decode):
fa_nid = fa_node.nid if fa_node is not None else -1
prev_nodes = [fa_node] if fa_node is not None else []
children = [nei for nei in cur_node.neighbors if nei.nid != fa_nid]
neighbors = [nei for nei in children if nei.mol.GetNumAtoms() > 1]
neighbors = sorted(neighbors, key=lambda x:x.mol.GetNumAtoms(), reverse=True)
singletons = [nei for nei in children if nei.mol.GetNumAtoms() == 1]
neighbors = singletons + neighbors
cur_amap = [(fa_nid,a2,a1) for nid,a1,a2 in fa_amap if nid == cur_node.nid]
cands = enum_assemble(cur_node, neighbors, prev_nodes, cur_amap)
if len(cands) == 0:
return None
cand_smiles,cand_mols,cand_amap = zip(*cands)
cands = [(candmol, all_nodes, cur_node) for candmol in cand_mols]
cand_vecs = self.jtmpn(cands, tree_mess)
cand_vecs = self.G_mean(cand_vecs)
mol_vec = mol_vec.squeeze()
scores = torch.mv(cand_vecs, mol_vec) * 20
if prob_decode:
probs = nn.Softmax()(scores.view(1,-1)).squeeze() + 1e-5 #prevent prob = 0
cand_idx = torch.multinomial(probs, probs.numel())
else:
_,cand_idx = torch.sort(scores, descending=True)
backup_mol = Chem.RWMol(cur_mol)
for i in xrange(cand_idx.numel()):
cur_mol = Chem.RWMol(backup_mol)
pred_amap = cand_amap[cand_idx[i].data[0]]
new_global_amap = copy.deepcopy(global_amap)
for nei_id,ctr_atom,nei_atom in pred_amap:
if nei_id == fa_nid:
continue
new_global_amap[nei_id][nei_atom] = new_global_amap[cur_node.nid][ctr_atom]
cur_mol = attach_mols(cur_mol, children, [], new_global_amap) #father is already attached
new_mol = cur_mol.GetMol()
new_mol = Chem.MolFromSmiles(Chem.MolToSmiles(new_mol))
if new_mol is None: continue
result = True
for nei_node in children:
if nei_node.is_leaf: continue
cur_mol = self.dfs_assemble(tree_mess, mol_vec, all_nodes, cur_mol, new_global_amap, pred_amap, nei_node, cur_node, prob_decode)
if cur_mol is None:
result = False
break
if result: return cur_mol
return None
| 14,765 | 40.711864 | 144 | py |
FastJTNNpy3 | FastJTNNpy3-master/Old/molvae/jtnn/jtmpn.py | import torch
import torch.nn as nn
from nnutils import create_var, index_select_ND
from chemutils import get_mol
#from mpn import atom_features, bond_features, ATOM_FDIM, BOND_FDIM
import rdkit.Chem as Chem
ELEM_LIST = ['C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl', 'Br', 'Mg', 'Na', 'Ca', 'Fe', 'Al', 'I', 'B', 'K', 'Se', 'Zn', 'H', 'Cu', 'Mn', 'unknown']
ATOM_FDIM = len(ELEM_LIST) + 6 + 5 + 1
BOND_FDIM = 5
MAX_NB = 10
def onek_encoding_unk(x, allowable_set):
if x not in allowable_set:
x = allowable_set[-1]
return map(lambda s: x == s, allowable_set)
def atom_features(atom):
return torch.Tensor(onek_encoding_unk(atom.GetSymbol(), ELEM_LIST)
+ onek_encoding_unk(atom.GetDegree(), [0,1,2,3,4,5])
+ onek_encoding_unk(atom.GetFormalCharge(), [-1,-2,1,2,0])
+ [atom.GetIsAromatic()])
def bond_features(bond):
bt = bond.GetBondType()
return torch.Tensor([bt == Chem.rdchem.BondType.SINGLE, bt == Chem.rdchem.BondType.DOUBLE, bt == Chem.rdchem.BondType.TRIPLE, bt == Chem.rdchem.BondType.AROMATIC, bond.IsInRing()])
class JTMPN(nn.Module):
def __init__(self, hidden_size, depth):
super(JTMPN, self).__init__()
self.hidden_size = hidden_size
self.depth = depth
self.W_i = nn.Linear(ATOM_FDIM + BOND_FDIM, hidden_size, bias=False)
self.W_h = nn.Linear(hidden_size, hidden_size, bias=False)
self.W_o = nn.Linear(ATOM_FDIM + hidden_size, hidden_size)
def forward(self, cand_batch, tree_mess):
fatoms,fbonds = [],[]
in_bonds,all_bonds = [],[]
mess_dict,all_mess = {},[create_var(torch.zeros(self.hidden_size))] #Ensure index 0 is vec(0)
total_atoms = 0
scope = []
for e,vec in tree_mess.iteritems():
mess_dict[e] = len(all_mess)
all_mess.append(vec)
for mol,all_nodes,ctr_node in cand_batch:
n_atoms = mol.GetNumAtoms()
ctr_bid = ctr_node.idx
for atom in mol.GetAtoms():
fatoms.append( atom_features(atom) )
in_bonds.append([])
for bond in mol.GetBonds():
a1 = bond.GetBeginAtom()
a2 = bond.GetEndAtom()
x = a1.GetIdx() + total_atoms
y = a2.GetIdx() + total_atoms
#Here x_nid,y_nid could be 0
x_nid,y_nid = a1.GetAtomMapNum(),a2.GetAtomMapNum()
x_bid = all_nodes[x_nid - 1].idx if x_nid > 0 else -1
y_bid = all_nodes[y_nid - 1].idx if y_nid > 0 else -1
bfeature = bond_features(bond)
b = len(all_mess) + len(all_bonds) #bond idx offseted by len(all_mess)
all_bonds.append((x,y))
fbonds.append( torch.cat([fatoms[x], bfeature], 0) )
in_bonds[y].append(b)
b = len(all_mess) + len(all_bonds)
all_bonds.append((y,x))
fbonds.append( torch.cat([fatoms[y], bfeature], 0) )
in_bonds[x].append(b)
if x_bid >= 0 and y_bid >= 0 and x_bid != y_bid:
if (x_bid,y_bid) in mess_dict:
mess_idx = mess_dict[(x_bid,y_bid)]
in_bonds[y].append(mess_idx)
if (y_bid,x_bid) in mess_dict:
mess_idx = mess_dict[(y_bid,x_bid)]
in_bonds[x].append(mess_idx)
scope.append((total_atoms,n_atoms))
total_atoms += n_atoms
total_bonds = len(all_bonds)
total_mess = len(all_mess)
fatoms = torch.stack(fatoms, 0)
fbonds = torch.stack(fbonds, 0)
agraph = torch.zeros(total_atoms,MAX_NB).long()
bgraph = torch.zeros(total_bonds,MAX_NB).long()
tree_message = torch.stack(all_mess, dim=0)
for a in xrange(total_atoms):
for i,b in enumerate(in_bonds[a]):
agraph[a,i] = b
for b1 in xrange(total_bonds):
x,y = all_bonds[b1]
for i,b2 in enumerate(in_bonds[x]): #b2 is offseted by len(all_mess)
if b2 < total_mess or all_bonds[b2-total_mess][0] != y:
bgraph[b1,i] = b2
fatoms = create_var(fatoms)
fbonds = create_var(fbonds)
agraph = create_var(agraph)
bgraph = create_var(bgraph)
binput = self.W_i(fbonds)
graph_message = nn.ReLU()(binput)
for i in xrange(self.depth - 1):
message = torch.cat([tree_message,graph_message], dim=0)
nei_message = index_select_ND(message, 0, bgraph)
nei_message = nei_message.sum(dim=1)
nei_message = self.W_h(nei_message)
graph_message = nn.ReLU()(binput + nei_message)
message = torch.cat([tree_message,graph_message], dim=0)
nei_message = index_select_ND(message, 0, agraph)
nei_message = nei_message.sum(dim=1)
ainput = torch.cat([fatoms, nei_message], dim=1)
atom_hiddens = nn.ReLU()(self.W_o(ainput))
mol_vecs = []
for st,le in scope:
mol_vec = atom_hiddens.narrow(0, st, le).sum(dim=0) / le
mol_vecs.append(mol_vec)
mol_vecs = torch.stack(mol_vecs, dim=0)
return mol_vecs
| 5,326 | 37.323741 | 184 | py |
FastJTNNpy3 | FastJTNNpy3-master/Old/molvae/jtnn/jtnn_dec.py | import torch
import torch.nn as nn
from mol_tree import Vocab, MolTree, MolTreeNode
from nnutils import create_var, GRU
from chemutils import enum_assemble
import copy
MAX_NB = 8
MAX_DECODE_LEN = 100
class JTNNDecoder(nn.Module):
def __init__(self, vocab, hidden_size, latent_size, embedding=None):
super(JTNNDecoder, self).__init__()
self.hidden_size = hidden_size
self.vocab_size = vocab.size()
self.vocab = vocab
if embedding is None:
self.embedding = nn.Embedding(self.vocab_size, hidden_size)
else:
self.embedding = embedding
#GRU Weights
self.W_z = nn.Linear(2 * hidden_size, hidden_size)
self.U_r = nn.Linear(hidden_size, hidden_size, bias=False)
self.W_r = nn.Linear(hidden_size, hidden_size)
self.W_h = nn.Linear(2 * hidden_size, hidden_size)
#Feature Aggregate Weights
self.W = nn.Linear(latent_size + hidden_size, hidden_size)
self.U = nn.Linear(latent_size + 2 * hidden_size, hidden_size)
#Output Weights
self.W_o = nn.Linear(hidden_size, self.vocab_size)
self.U_s = nn.Linear(hidden_size, 1)
#Loss Functions
self.pred_loss = nn.CrossEntropyLoss(size_average=False)
self.stop_loss = nn.BCEWithLogitsLoss(size_average=False)
def get_trace(self, node):
super_root = MolTreeNode("")
super_root.idx = -1
trace = []
dfs(trace, node, super_root)
return [(x.smiles, y.smiles, z) for x,y,z in trace]
def forward(self, mol_batch, mol_vec):
super_root = MolTreeNode("")
super_root.idx = -1
#Initialize
pred_hiddens,pred_mol_vecs,pred_targets = [],[],[]
stop_hiddens,stop_targets = [],[]
traces = []
for mol_tree in mol_batch:
s = []
dfs(s, mol_tree.nodes[0], super_root)
traces.append(s)
for node in mol_tree.nodes:
node.neighbors = []
#Predict Root
pred_hiddens.append(create_var(torch.zeros(len(mol_batch),self.hidden_size)))
pred_targets.extend([mol_tree.nodes[0].wid for mol_tree in mol_batch])
pred_mol_vecs.append(mol_vec)
max_iter = max([len(tr) for tr in traces])
padding = create_var(torch.zeros(self.hidden_size), False)
h = {}
for t in xrange(max_iter):
prop_list = []
batch_list = []
for i,plist in enumerate(traces):
if t < len(plist):
prop_list.append(plist[t])
batch_list.append(i)
cur_x = []
cur_h_nei,cur_o_nei = [],[]
for node_x,real_y,_ in prop_list:
#Neighbors for message passing (target not included)
cur_nei = [h[(node_y.idx,node_x.idx)] for node_y in node_x.neighbors if node_y.idx != real_y.idx]
pad_len = MAX_NB - len(cur_nei)
cur_h_nei.extend(cur_nei)
cur_h_nei.extend([padding] * pad_len)
#Neighbors for stop prediction (all neighbors)
cur_nei = [h[(node_y.idx,node_x.idx)] for node_y in node_x.neighbors]
pad_len = MAX_NB - len(cur_nei)
cur_o_nei.extend(cur_nei)
cur_o_nei.extend([padding] * pad_len)
#Current clique embedding
cur_x.append(node_x.wid)
#Clique embedding
cur_x = create_var(torch.LongTensor(cur_x))
cur_x = self.embedding(cur_x)
#Message passing
cur_h_nei = torch.stack(cur_h_nei, dim=0).view(-1,MAX_NB,self.hidden_size)
new_h = GRU(cur_x, cur_h_nei, self.W_z, self.W_r, self.U_r, self.W_h)
#Node Aggregate
cur_o_nei = torch.stack(cur_o_nei, dim=0).view(-1,MAX_NB,self.hidden_size)
cur_o = cur_o_nei.sum(dim=1)
#Gather targets
pred_target,pred_list = [],[]
stop_target = []
for i,m in enumerate(prop_list):
node_x,node_y,direction = m
x,y = node_x.idx,node_y.idx
h[(x,y)] = new_h[i]
node_y.neighbors.append(node_x)
if direction == 1:
pred_target.append(node_y.wid)
pred_list.append(i)
stop_target.append(direction)
#Hidden states for stop prediction
cur_batch = create_var(torch.LongTensor(batch_list))
cur_mol_vec = mol_vec.index_select(0, cur_batch)
stop_hidden = torch.cat([cur_x,cur_o,cur_mol_vec], dim=1)
stop_hiddens.append( stop_hidden )
stop_targets.extend( stop_target )
#Hidden states for clique prediction
if len(pred_list) > 0:
batch_list = [batch_list[i] for i in pred_list]
cur_batch = create_var(torch.LongTensor(batch_list))
pred_mol_vecs.append( mol_vec.index_select(0, cur_batch) )
cur_pred = create_var(torch.LongTensor(pred_list))
pred_hiddens.append( new_h.index_select(0, cur_pred) )
pred_targets.extend( pred_target )
#Last stop at root
cur_x,cur_o_nei = [],[]
for mol_tree in mol_batch:
node_x = mol_tree.nodes[0]
cur_x.append(node_x.wid)
cur_nei = [h[(node_y.idx,node_x.idx)] for node_y in node_x.neighbors]
pad_len = MAX_NB - len(cur_nei)
cur_o_nei.extend(cur_nei)
cur_o_nei.extend([padding] * pad_len)
cur_x = create_var(torch.LongTensor(cur_x))
cur_x = self.embedding(cur_x)
cur_o_nei = torch.stack(cur_o_nei, dim=0).view(-1,MAX_NB,self.hidden_size)
cur_o = cur_o_nei.sum(dim=1)
stop_hidden = torch.cat([cur_x,cur_o,mol_vec], dim=1)
stop_hiddens.append( stop_hidden )
stop_targets.extend( [0] * len(mol_batch) )
#Predict next clique
pred_hiddens = torch.cat(pred_hiddens, dim=0)
pred_mol_vecs = torch.cat(pred_mol_vecs, dim=0)
pred_vecs = torch.cat([pred_hiddens, pred_mol_vecs], dim=1)
pred_vecs = nn.ReLU()(self.W(pred_vecs))
pred_scores = self.W_o(pred_vecs)
pred_targets = create_var(torch.LongTensor(pred_targets))
pred_loss = self.pred_loss(pred_scores, pred_targets) / len(mol_batch)
_,preds = torch.max(pred_scores, dim=1)
pred_acc = torch.eq(preds, pred_targets).float()
pred_acc = torch.sum(pred_acc) / pred_targets.nelement()
#Predict stop
stop_hiddens = torch.cat(stop_hiddens, dim=0)
stop_vecs = nn.ReLU()(self.U(stop_hiddens))
stop_scores = self.U_s(stop_vecs).squeeze()
stop_targets = create_var(torch.Tensor(stop_targets))
stop_loss = self.stop_loss(stop_scores, stop_targets) / len(mol_batch)
stops = torch.ge(stop_scores, 0).float()
stop_acc = torch.eq(stops, stop_targets).float()
stop_acc = torch.sum(stop_acc) / stop_targets.nelement()
return pred_loss, stop_loss, pred_acc.data[0], stop_acc.data[0]
def decode(self, mol_vec, prob_decode):
stack,trace = [],[]
init_hidden = create_var(torch.zeros(1,self.hidden_size))
zero_pad = create_var(torch.zeros(1,1,self.hidden_size))
#Root Prediction
root_hidden = torch.cat([init_hidden, mol_vec], dim=1)
root_hidden = nn.ReLU()(self.W(root_hidden))
root_score = self.W_o(root_hidden)
_,root_wid = torch.max(root_score, dim=1)
root_wid = root_wid.data[0]
root = MolTreeNode(self.vocab.get_smiles(root_wid))
root.wid = root_wid
root.idx = 0
stack.append( (root, self.vocab.get_slots(root.wid)) )
all_nodes = [root]
h = {}
for step in xrange(MAX_DECODE_LEN):
node_x,fa_slot = stack[-1]
cur_h_nei = [ h[(node_y.idx,node_x.idx)] for node_y in node_x.neighbors ]
if len(cur_h_nei) > 0:
cur_h_nei = torch.stack(cur_h_nei, dim=0).view(1,-1,self.hidden_size)
else:
cur_h_nei = zero_pad
cur_x = create_var(torch.LongTensor([node_x.wid]))
cur_x = self.embedding(cur_x)
#Predict stop
cur_h = cur_h_nei.sum(dim=1)
stop_hidden = torch.cat([cur_x,cur_h,mol_vec], dim=1)
stop_hidden = nn.ReLU()(self.U(stop_hidden))
stop_score = nn.Sigmoid()(self.U_s(stop_hidden) * 20).squeeze()
if prob_decode:
backtrack = (torch.bernoulli(1.0 - stop_score.data)[0] == 1)
else:
backtrack = (stop_score.data[0] < 0.5)
if not backtrack: #Forward: Predict next clique
new_h = GRU(cur_x, cur_h_nei, self.W_z, self.W_r, self.U_r, self.W_h)
pred_hidden = torch.cat([new_h,mol_vec], dim=1)
pred_hidden = nn.ReLU()(self.W(pred_hidden))
pred_score = nn.Softmax()(self.W_o(pred_hidden) * 20)
if prob_decode:
sort_wid = torch.multinomial(pred_score.data.squeeze(), 5)
else:
_,sort_wid = torch.sort(pred_score, dim=1, descending=True)
sort_wid = sort_wid.data.squeeze()
next_wid = None
for wid in sort_wid[:5]:
slots = self.vocab.get_slots(wid)
node_y = MolTreeNode(self.vocab.get_smiles(wid))
if have_slots(fa_slot, slots) and can_assemble(node_x, node_y):
next_wid = wid
next_slots = slots
break
if next_wid is None:
backtrack = True #No more children can be added
else:
node_y = MolTreeNode(self.vocab.get_smiles(next_wid))
node_y.wid = next_wid
node_y.idx = step + 1
node_y.neighbors.append(node_x)
h[(node_x.idx,node_y.idx)] = new_h[0]
stack.append( (node_y,next_slots) )
all_nodes.append(node_y)
if backtrack: #Backtrack, use if instead of else
if len(stack) == 1:
break #At root, terminate
node_fa,_ = stack[-2]
cur_h_nei = [ h[(node_y.idx,node_x.idx)] for node_y in node_x.neighbors if node_y.idx != node_fa.idx ]
if len(cur_h_nei) > 0:
cur_h_nei = torch.stack(cur_h_nei, dim=0).view(1,-1,self.hidden_size)
else:
cur_h_nei = zero_pad
new_h = GRU(cur_x, cur_h_nei, self.W_z, self.W_r, self.U_r, self.W_h)
h[(node_x.idx,node_fa.idx)] = new_h[0]
node_fa.neighbors.append(node_x)
stack.pop()
return root, all_nodes
"""
Helper Functions:
"""
def dfs(stack, x, fa):
for y in x.neighbors:
if y.idx == fa.idx:
continue
stack.append((x,y,1))
dfs(stack, y, x)
stack.append((y,x,0))
def have_slots(fa_slots, ch_slots):
if len(fa_slots) > 2 and len(ch_slots) > 2:
return True
matches = []
for i,s1 in enumerate(fa_slots):
a1,c1,h1 = s1
for j,s2 in enumerate(ch_slots):
a2,c2,h2 = s2
if a1 == a2 and c1 == c2 and (a1 != "C" or h1 + h2 >= 4):
matches.append( (i,j) )
if len(matches) == 0: return False
fa_match,ch_match = zip(*matches)
if len(set(fa_match)) == 1 and 1 < len(fa_slots) <= 2: #never remove atom from ring
fa_slots.pop(fa_match[0])
if len(set(ch_match)) == 1 and 1 < len(ch_slots) <= 2: #never remove atom from ring
ch_slots.pop(ch_match[0])
return True
def can_assemble(node_x, node_y):
neis = node_x.neighbors + [node_y]
for i,nei in enumerate(neis):
nei.nid = i
neighbors = [nei for nei in neis if nei.mol.GetNumAtoms() > 1]
neighbors = sorted(neighbors, key=lambda x:x.mol.GetNumAtoms(), reverse=True)
singletons = [nei for nei in neis if nei.mol.GetNumAtoms() == 1]
neighbors = singletons + neighbors
cands = enum_assemble(node_x, neighbors)
return len(cands) > 0
| 12,422 | 37.580745 | 118 | py |
FastJTNNpy3 | FastJTNNpy3-master/Old/molopt/pretrain.py | import torch
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.data import DataLoader
from torch.autograd import Variable
import math, random, sys
from optparse import OptionParser
from collections import deque
from jtnn import *
import rdkit
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
parser = OptionParser()
parser.add_option("-t", "--train", dest="train_path")
parser.add_option("-v", "--vocab", dest="vocab_path")
parser.add_option("-p", "--prop", dest="prop_path")
parser.add_option("-s", "--save_dir", dest="save_path")
parser.add_option("-b", "--batch", dest="batch_size", default=40)
parser.add_option("-w", "--hidden", dest="hidden_size", default=200)
parser.add_option("-l", "--latent", dest="latent_size", default=56)
parser.add_option("-d", "--depth", dest="depth", default=3)
opts,args = parser.parse_args()
vocab = [x.strip("\r\n ") for x in open(opts.vocab_path)]
vocab = Vocab(vocab)
batch_size = int(opts.batch_size)
hidden_size = int(opts.hidden_size)
latent_size = int(opts.latent_size)
depth = int(opts.depth)
model = JTPropVAE(vocab, hidden_size, latent_size, depth)
for param in model.parameters():
if param.dim() == 1:
nn.init.constant(param, 0)
else:
nn.init.xavier_normal(param)
model = model.cpu()
print ("Model #Params: %dK" % (sum([x.nelement() for x in model.parameters()]) / 1000,))
optimizer = optim.Adam(model.parameters(), lr=1e-3)
scheduler = lr_scheduler.ExponentialLR(optimizer, 0.9)
scheduler.step()
dataset = PropDataset(opts.train_path, opts.prop_path)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=4, collate_fn=lambda x:x)
MAX_EPOCH = 3
PRINT_ITER = 20
for epoch in xrange(MAX_EPOCH):
word_acc,topo_acc,assm_acc,steo_acc,prop_acc = 0,0,0,0,0
for it, batch in enumerate(dataloader):
for mol_tree,_ in batch:
for node in mol_tree.nodes:
if node.label not in node.cands:
node.cands.append(node.label)
node.cand_mols.append(node.label_mol)
model.zero_grad()
loss, kl_div, wacc, tacc, sacc, dacc, pacc = model(batch, beta=0)
loss.sum().backward()
optimizer.step()
word_acc += wacc
topo_acc += tacc
assm_acc += sacc
steo_acc += dacc
prop_acc += pacc
if (it + 1) % PRINT_ITER == 0:
word_acc = word_acc / PRINT_ITER * 100
topo_acc = topo_acc / PRINT_ITER * 100
assm_acc = assm_acc / PRINT_ITER * 100
steo_acc = steo_acc / PRINT_ITER * 100
prop_acc = prop_acc / PRINT_ITER
print "KL: %.1f, Word: %.2f, Topo: %.2f, Assm: %.2f, Steo: %.2f, Prop: %.4f" % (kl_div, word_acc, topo_acc, assm_acc, steo_acc, prop_acc)
word_acc,topo_acc,assm_acc,steo_acc,prop_acc = 0,0,0,0,0
sys.stdout.flush()
scheduler.step()
print "learning rate: %.6f" % scheduler.get_lr()[0]
torch.save(model.state_dict(), opts.save_path + "/model.iter-" + str(epoch))
| 3,110 | 32.095745 | 149 | py |
FastJTNNpy3 | FastJTNNpy3-master/Old/molopt/vaetrain.py | import torch
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.data import DataLoader
from torch.autograd import Variable
import math, random, sys
from optparse import OptionParser
from collections import deque
from jtnn import *
import rdkit
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
parser = OptionParser()
parser.add_option("-t", "--train", dest="train_path")
parser.add_option("-v", "--vocab", dest="vocab_path")
parser.add_option("-p", "--prop", dest="prop_path")
parser.add_option("-s", "--save_dir", dest="save_path")
parser.add_option("-m", "--model", dest="model_path", default=None)
parser.add_option("-b", "--batch", dest="batch_size", default=40)
parser.add_option("-w", "--hidden", dest="hidden_size", default=200)
parser.add_option("-l", "--latent", dest="latent_size", default=56)
parser.add_option("-d", "--depth", dest="depth", default=3)
parser.add_option("-z", "--beta", dest="beta", default=1.0)
parser.add_option("-q", "--lr", dest="lr", default=1e-3)
opts,args = parser.parse_args()
vocab = [x.strip("\r\n ") for x in open(opts.vocab_path)]
vocab = Vocab(vocab)
batch_size = int(opts.batch_size)
hidden_size = int(opts.hidden_size)
latent_size = int(opts.latent_size)
depth = int(opts.depth)
beta = float(opts.beta)
lr = float(opts.lr)
model = JTPropVAE(vocab, hidden_size, latent_size, depth)
if opts.model_path is not None:
model.load_state_dict(torch.load(opts.model_path))
else:
for param in model.parameters():
if param.dim() == 1:
nn.init.constant(param, 0)
else:
nn.init.xavier_normal(param)
model = model.cuda()
print "Model #Params: %dK" % (sum([x.nelement() for x in model.parameters()]) / 1000,)
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = lr_scheduler.ExponentialLR(optimizer, 0.9)
scheduler.step()
dataset = PropDataset(opts.train_path, opts.prop_path)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=4, collate_fn=lambda x:x, drop_last=True)
MAX_EPOCH = 6
PRINT_ITER = 20
for epoch in xrange(MAX_EPOCH):
word_acc,topo_acc,assm_acc,steo_acc,prop_acc = 0,0,0,0,0
for it, batch in enumerate(dataloader):
for mol_tree,_ in batch:
for node in mol_tree.nodes:
if node.label not in node.cands:
node.cands.append(node.label)
node.cand_mols.append(node.label_mol)
model.zero_grad()
loss, kl_div, wacc, tacc, sacc, dacc, pacc = model(batch, beta)
loss.sum().backward()
optimizer.step()
word_acc += wacc
topo_acc += tacc
assm_acc += sacc
steo_acc += dacc
prop_acc += pacc
if (it + 1) % PRINT_ITER == 0:
word_acc = word_acc / PRINT_ITER * 100
topo_acc = topo_acc / PRINT_ITER * 100
assm_acc = assm_acc / PRINT_ITER * 100
steo_acc = steo_acc / PRINT_ITER * 100
prop_acc /= PRINT_ITER
print "KL: %.1f, Word: %.2f, Topo: %.2f, Assm: %.2f, Steo: %.2f, Prop: %.4f" % (kl_div, word_acc, topo_acc, assm_acc, steo_acc, prop_acc)
word_acc,topo_acc,assm_acc,steo_acc,prop_acc = 0,0,0,0,0
sys.stdout.flush()
if (it + 1) % 1500 == 0: #Fast annealing
scheduler.step()
print "learning rate: %.6f" % scheduler.get_lr()[0]
torch.save(model.state_dict(), opts.save_path + "/model.iter-%d-%d" % (epoch, it + 1))
scheduler.step()
print "learning rate: %.6f" % scheduler.get_lr()[0]
torch.save(model.state_dict(), opts.save_path + "/model.iter-" + str(epoch))
| 3,695 | 33.542056 | 149 | py |
FastJTNNpy3 | FastJTNNpy3-master/Old/molopt/optimize.py | import torch
import torch.nn as nn
from torch.autograd import Variable
import math, random, sys
from optparse import OptionParser
from collections import deque
import rdkit
import rdkit.Chem as Chem
from rdkit.Chem import Descriptors
import sascorer
from jtnn import *
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
parser = OptionParser()
parser.add_option("-t", "--test", dest="test_path")
parser.add_option("-v", "--vocab", dest="vocab_path")
parser.add_option("-m", "--model", dest="model_path")
parser.add_option("-w", "--hidden", dest="hidden_size", default=200)
parser.add_option("-l", "--latent", dest="latent_size", default=56)
parser.add_option("-d", "--depth", dest="depth", default=3)
parser.add_option("-s", "--sim", dest="cutoff", default=0.0)
opts,args = parser.parse_args()
vocab = [x.strip("\r\n ") for x in open(opts.vocab_path)]
vocab = Vocab(vocab)
hidden_size = int(opts.hidden_size)
latent_size = int(opts.latent_size)
depth = int(opts.depth)
sim_cutoff = float(opts.cutoff)
model = JTPropVAE(vocab, hidden_size, latent_size, depth)
model.load_state_dict(torch.load(opts.model_path))
model = model.cuda()
data = []
with open(opts.test_path) as f:
for line in f:
s = line.strip("\r\n ").split()[0]
data.append(s)
res = []
for smiles in data:
mol = Chem.MolFromSmiles(smiles)
score = Descriptors.MolLogP(mol) - sascorer.calculateScore(mol)
new_smiles,sim = model.optimize(smiles, sim_cutoff=sim_cutoff, lr=2, num_iter=80)
new_mol = Chem.MolFromSmiles(new_smiles)
new_score = Descriptors.MolLogP(new_mol) - sascorer.calculateScore(new_mol)
res.append( (new_score - score, sim, score, new_score, smiles, new_smiles) )
print new_score - score, sim, score, new_score, smiles, new_smiles
print sum([x[0] for x in res]), sum([x[1] for x in res])
| 1,845 | 29.766667 | 85 | py |
FastJTNNpy3 | FastJTNNpy3-master/Old/bo/gen_latent.py | import torch
import torch.nn as nn
from torch.autograd import Variable
from optparse import OptionParser
import rdkit
from rdkit.Chem import Descriptors
from rdkit.Chem import MolFromSmiles, MolToSmiles
from rdkit.Chem import rdmolops
import sascorer
import numpy as np
from jtnn import *
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
parser = OptionParser()
parser.add_option("-a", "--data", dest="data_path")
parser.add_option("-v", "--vocab", dest="vocab_path")
parser.add_option("-m", "--model", dest="model_path")
parser.add_option("-w", "--hidden", dest="hidden_size", default=200)
parser.add_option("-l", "--latent", dest="latent_size", default=56)
parser.add_option("-d", "--depth", dest="depth", default=3)
opts,args = parser.parse_args()
with open(opts.data_path) as f:
smiles = f.readlines()
for i in xrange(len(smiles)):
smiles[ i ] = smiles[ i ].strip()
vocab = [x.strip("\r\n ") for x in open(opts.vocab_path)]
vocab = Vocab(vocab)
batch_size = 100
hidden_size = int(opts.hidden_size)
latent_size = int(opts.latent_size)
depth = int(opts.depth)
model = JTNNVAE(vocab, hidden_size, latent_size, depth)
model.load_state_dict(torch.load(opts.model_path))
model = model.cuda()
smiles_rdkit = []
for i in xrange(len(smiles)):
smiles_rdkit.append(MolToSmiles(MolFromSmiles(smiles[ i ]), isomericSmiles=True))
logP_values = []
for i in xrange(len(smiles)):
logP_values.append(Descriptors.MolLogP(MolFromSmiles(smiles_rdkit[ i ])))
SA_scores = []
for i in xrange(len(smiles)):
SA_scores.append(-sascorer.calculateScore(MolFromSmiles(smiles_rdkit[ i ])))
import networkx as nx
cycle_scores = []
for i in range(len(smiles)):
cycle_list = nx.cycle_basis(nx.Graph(rdmolops.GetAdjacencyMatrix(MolFromSmiles(smiles_rdkit[ i ]))))
if len(cycle_list) == 0:
cycle_length = 0
else:
cycle_length = max([ len(j) for j in cycle_list ])
if cycle_length <= 6:
cycle_length = 0
else:
cycle_length = cycle_length - 6
cycle_scores.append(-cycle_length)
SA_scores_normalized = (np.array(SA_scores) - np.mean(SA_scores)) / np.std(SA_scores)
logP_values_normalized = (np.array(logP_values) - np.mean(logP_values)) / np.std(logP_values)
cycle_scores_normalized = (np.array(cycle_scores) - np.mean(cycle_scores)) / np.std(cycle_scores)
latent_points = []
for i in xrange(0, len(smiles), batch_size):
batch = smiles[i:i+batch_size]
mol_vec = model.encode_latent_mean(batch)
latent_points.append(mol_vec.data.cpu().numpy())
# We store the results
latent_points = np.vstack(latent_points)
np.savetxt('latent_features.txt', latent_points)
targets = SA_scores_normalized + logP_values_normalized + cycle_scores_normalized
np.savetxt('targets.txt', targets)
np.savetxt('logP_values.txt', np.array(logP_values))
np.savetxt('SA_scores.txt', np.array(SA_scores))
np.savetxt('cycle_scores.txt', np.array(cycle_scores))
| 2,922 | 31.120879 | 104 | py |
FastJTNNpy3 | FastJTNNpy3-master/Old/bo/run_bo.py | import pickle
import gzip
from sparse_gp import SparseGP
import scipy.stats as sps
import numpy as np
import os.path
import rdkit
from rdkit.Chem import MolFromSmiles, MolToSmiles
from rdkit.Chem import Descriptors
import torch
import torch.nn as nn
from jtnn import create_var, JTNNVAE, Vocab
from optparse import OptionParser
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
# We define the functions used to load and save objects
def save_object(obj, filename):
result = pickle.dumps(obj)
with gzip.GzipFile(filename, 'wb') as dest: dest.write(result)
dest.close()
def load_object(filename):
with gzip.GzipFile(filename, 'rb') as source: result = source.read()
ret = pickle.loads(result)
source.close()
return ret
parser = OptionParser()
parser.add_option("-v", "--vocab", dest="vocab_path")
parser.add_option("-m", "--model", dest="model_path")
parser.add_option("-o", "--save_dir", dest="save_dir")
parser.add_option("-w", "--hidden", dest="hidden_size", default=200)
parser.add_option("-l", "--latent", dest="latent_size", default=56)
parser.add_option("-d", "--depth", dest="depth", default=3)
parser.add_option("-r", "--seed", dest="random_seed", default=None)
opts,args = parser.parse_args()
vocab = [x.strip("\r\n ") for x in open(opts.vocab_path)]
vocab = Vocab(vocab)
hidden_size = int(opts.hidden_size)
latent_size = int(opts.latent_size)
depth = int(opts.depth)
random_seed = int(opts.random_seed)
model = JTNNVAE(vocab, hidden_size, latent_size, depth)
model.load_state_dict(torch.load(opts.model_path))
model = model.cuda()
# We load the random seed
np.random.seed(random_seed)
# We load the data (y is minued!)
X = np.loadtxt('latent_features.txt')
y = -np.loadtxt('targets.txt')
y = y.reshape((-1, 1))
n = X.shape[ 0 ]
permutation = np.random.choice(n, n, replace = False)
X_train = X[ permutation, : ][ 0 : np.int(np.round(0.9 * n)), : ]
X_test = X[ permutation, : ][ np.int(np.round(0.9 * n)) :, : ]
y_train = y[ permutation ][ 0 : np.int(np.round(0.9 * n)) ]
y_test = y[ permutation ][ np.int(np.round(0.9 * n)) : ]
np.random.seed(random_seed)
logP_values = np.loadtxt('logP_values.txt')
SA_scores = np.loadtxt('SA_scores.txt')
cycle_scores = np.loadtxt('cycle_scores.txt')
SA_scores_normalized = (np.array(SA_scores) - np.mean(SA_scores)) / np.std(SA_scores)
logP_values_normalized = (np.array(logP_values) - np.mean(logP_values)) / np.std(logP_values)
cycle_scores_normalized = (np.array(cycle_scores) - np.mean(cycle_scores)) / np.std(cycle_scores)
iteration = 0
while iteration < 5:
# We fit the GP
np.random.seed(iteration * random_seed)
M = 500
sgp = SparseGP(X_train, 0 * X_train, y_train, M)
sgp.train_via_ADAM(X_train, 0 * X_train, y_train, X_test, X_test * 0, y_test, minibatch_size = 10 * M, max_iterations = 100, learning_rate = 0.001)
pred, uncert = sgp.predict(X_test, 0 * X_test)
error = np.sqrt(np.mean((pred - y_test)**2))
testll = np.mean(sps.norm.logpdf(pred - y_test, scale = np.sqrt(uncert)))
print 'Test RMSE: ', error
print 'Test ll: ', testll
pred, uncert = sgp.predict(X_train, 0 * X_train)
error = np.sqrt(np.mean((pred - y_train)**2))
trainll = np.mean(sps.norm.logpdf(pred - y_train, scale = np.sqrt(uncert)))
print 'Train RMSE: ', error
print 'Train ll: ', trainll
# We pick the next 60 inputs
next_inputs = sgp.batched_greedy_ei(60, np.min(X_train, 0), np.max(X_train, 0))
valid_smiles = []
new_features = []
for i in xrange(60):
all_vec = next_inputs[i].reshape((1,-1))
tree_vec,mol_vec = np.hsplit(all_vec, 2)
tree_vec = create_var(torch.from_numpy(tree_vec).float())
mol_vec = create_var(torch.from_numpy(mol_vec).float())
s = model.decode(tree_vec, mol_vec, prob_decode=False)
if s is not None:
valid_smiles.append(s)
new_features.append(all_vec)
print len(valid_smiles), "molecules are found"
valid_smiles = valid_smiles[:50]
new_features = next_inputs[:50]
new_features = np.vstack(new_features)
save_object(valid_smiles, opts.save_dir + "/valid_smiles{}.dat".format(iteration))
import sascorer
import networkx as nx
from rdkit.Chem import rdmolops
scores = []
for i in range(len(valid_smiles)):
current_log_P_value = Descriptors.MolLogP(MolFromSmiles(valid_smiles[ i ]))
current_SA_score = -sascorer.calculateScore(MolFromSmiles(valid_smiles[ i ]))
cycle_list = nx.cycle_basis(nx.Graph(rdmolops.GetAdjacencyMatrix(MolFromSmiles(valid_smiles[ i ]))))
if len(cycle_list) == 0:
cycle_length = 0
else:
cycle_length = max([ len(j) for j in cycle_list ])
if cycle_length <= 6:
cycle_length = 0
else:
cycle_length = cycle_length - 6
current_cycle_score = -cycle_length
current_SA_score_normalized = (current_SA_score - np.mean(SA_scores)) / np.std(SA_scores)
current_log_P_value_normalized = (current_log_P_value - np.mean(logP_values)) / np.std(logP_values)
current_cycle_score_normalized = (current_cycle_score - np.mean(cycle_scores)) / np.std(cycle_scores)
score = current_SA_score_normalized + current_log_P_value_normalized + current_cycle_score_normalized
scores.append(-score) #target is always minused
print valid_smiles
print scores
save_object(scores, opts.save_dir + "/scores{}.dat".format(iteration))
if len(new_features) > 0:
X_train = np.concatenate([ X_train, new_features ], 0)
y_train = np.concatenate([ y_train, np.array(scores)[ :, None ] ], 0)
iteration += 1
| 5,700 | 35.082278 | 151 | py |
FastJTNNpy3 | FastJTNNpy3-master/fast_molvae/sample.py | import sys
sys.path.append('../')
import torch
import torch.nn as nn
import math, random, sys
import argparse
from fast_jtnn import *
import rdkit
def load_model(vocab, model_path, hidden_size=450, latent_size=56, depthT=20, depthG=3):
vocab = [x.strip("\r\n ") for x in open(vocab)]
vocab = Vocab(vocab)
model = JTNNVAE(vocab, hidden_size, latent_size, depthT, depthG)
dict_buffer = torch.load(model_path)
model.load_state_dict(dict_buffer)
model = model.cuda()
torch.manual_seed(0)
return model
def main_sample(vocab, output_file, model_path, nsample, hidden_size=450, latent_size=56, depthT=20, depthG=3):
vocab = [x.strip("\r\n ") for x in open(vocab)]
vocab = Vocab(vocab)
model = JTNNVAE(vocab, hidden_size, latent_size, depthT, depthG)
dict_buffer = torch.load(model_path)
model.load_state_dict(dict_buffer)
model = model.cuda()
torch.manual_seed(0)
with open(output_file, 'w') as out_file:
for i in range(nsample):
out_file.write(str(model.sample_prior())+'\n')
if __name__ == '__main__':
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
parser = argparse.ArgumentParser()
parser.add_argument('--nsample', type=int, required=True)
parser.add_argument('--vocab', required=True)
parser.add_argument('--model', required=True)
parser.add_argument('--output_file', required=True)
parser.add_argument('--hidden_size', type=int, default=450)
parser.add_argument('--latent_size', type=int, default=56)
parser.add_argument('--depthT', type=int, default=20)
parser.add_argument('--depthG', type=int, default=3)
args = parser.parse_args()
main_sample(args.vocab, args.output_file, args.model, args.nsample, args.hidden_size, args.latent_size, args.depthT, args.depthG) | 1,836 | 33.660377 | 133 | py |
FastJTNNpy3 | FastJTNNpy3-master/fast_molvae/preprocess.py | import sys
sys.path.append('../')
import torch
import torch.nn as nn
from multiprocessing import Pool
import numpy as np
import os
from tqdm import tqdm
import math, random, sys
from optparse import OptionParser
import pickle
from fast_jtnn import *
import rdkit
def tensorize(smiles, assm=True):
mol_tree = MolTree(smiles)
mol_tree.recover()
if assm:
mol_tree.assemble()
for node in mol_tree.nodes:
if node.label not in node.cands:
node.cands.append(node.label)
del mol_tree.mol
for node in mol_tree.nodes:
del node.mol
return mol_tree
def convert(train_path, pool, num_splits, output_path):
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
out_path = os.path.join(output_path, './')
if os.path.isdir(out_path) is False:
os.makedirs(out_path)
with open(train_path) as f:
data = [line.strip("\r\n ").split()[0] for line in f]
print('Input File read')
print('Tensorizing .....')
all_data = pool.map(tensorize, data)
all_data_split = np.array_split(all_data, num_splits)
print('Tensorizing Complete')
for split_id in tqdm(range(num_splits)):
with open(os.path.join(output_path, 'tensors-%d.pkl' % split_id), 'wb') as f:
pickle.dump(all_data_split[split_id], f)
return True
def main_preprocess(train_path, output_path, num_splits=10, njobs=os.cpu_count()):
pool = Pool(njobs)
convert(train_path, pool, num_splits, output_path)
return True
if __name__ == "__main__":
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
parser = OptionParser()
parser.add_option("-t", "--train", dest="train_path")
parser.add_option("-n", "--split", dest="nsplits", default=10)
parser.add_option("-j", "--jobs", dest="njobs", default=8)
parser.add_option("-o", "--output", dest="output_path")
opts, args = parser.parse_args()
opts.njobs = int(opts.njobs)
pool = Pool(opts.njobs)
num_splits = int(opts.nsplits)
convert(opts.train_path, pool, num_splits, opts.output_path)
| 2,146 | 27.25 | 85 | py |
FastJTNNpy3 | FastJTNNpy3-master/fast_molvae/vae_train.py | import sys
sys.path.append('../')
import torch
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.data import DataLoader
from torch.autograd import Variable
import math, random, sys
import numpy as np
import argparse
from collections import deque
import pickle as pickle
from fast_jtnn import *
import rdkit
from tqdm import tqdm
import os
def main_vae_train(train,
vocab,
save_dir,
load_epoch=0,
hidden_size=450,
batch_size=32,
latent_size=56,
depthT=20,
depthG=3,
lr=1e-3,
clip_norm=50.0,
beta=0.0,
step_beta=0.002,
max_beta=1.0,
warmup=40000,
epoch=20,
anneal_rate=0.9,
anneal_iter=40000,
kl_anneal_iter=2000,
print_iter=50,
save_iter=5000):
vocab = [x.strip("\r\n ") for x in open(vocab)]
vocab = Vocab(vocab)
model = JTNNVAE(vocab, int(hidden_size), int(latent_size), int(depthT), int(depthG)).cuda()
print(model)
for param in model.parameters():
if param.dim() == 1:
nn.init.constant_(param, 0)
else:
nn.init.xavier_normal_(param)
if os.path.isdir(save_dir) is False:
os.makedirs(save_dir)
if load_epoch > 0:
model.load_state_dict(torch.load(save_dir + "/model.epoch-" + str(load_epoch)))
print("Model #Params: %dK" % (sum([x.nelement() for x in model.parameters()]) / 1000,))
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = lr_scheduler.ExponentialLR(optimizer, anneal_rate)
scheduler.step()
param_norm = lambda m: math.sqrt(sum([p.norm().item() ** 2 for p in m.parameters()]))
grad_norm = lambda m: math.sqrt(sum([p.grad.norm().item() ** 2 for p in m.parameters() if p.grad is not None]))
total_step = load_epoch
beta = beta
meters = np.zeros(4)
for epoch in tqdm(range(epoch)):
loader = MolTreeFolder(train, vocab, batch_size)#, num_workers=4)
for batch in loader:
total_step += 1
try:
model.zero_grad()
loss, kl_div, wacc, tacc, sacc = model(batch, beta)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), clip_norm)
optimizer.step()
except Exception as e:
print(e)
continue
meters = meters + np.array([kl_div, wacc * 100, tacc * 100, sacc * 100])
if total_step % print_iter == 0:
meters /= print_iter
print("[%d] Beta: %.3f, KL: %.2f, Word: %.2f, Topo: %.2f, Assm: %.2f, PNorm: %.2f, GNorm: %.2f" % (total_step, beta, meters[0], meters[1], meters[2], meters[3], param_norm(model), grad_norm(model)))
sys.stdout.flush()
meters *= 0
if total_step % save_iter == 0:
torch.save(model.state_dict(), save_dir + "/model.iter-" + str(total_step))
if total_step % anneal_iter == 0:
scheduler.step()
print("learning rate: %.6f" % scheduler.get_lr()[0])
if total_step % kl_anneal_iter == 0 and total_step >= warmup:
beta = min(max_beta, beta + step_beta)
# torch.save(model.state_dict(), save_dir + "/model.epoch-" + str(epoch))
torch.save(model.state_dict(), save_dir + "/model.epoch-" + str(epoch))
return model
if __name__ == '__main__':
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
parser = argparse.ArgumentParser()
parser.add_argument('--train', required=True)
parser.add_argument('--vocab', required=True)
parser.add_argument('--save_dir', required=True)
parser.add_argument('--load_epoch', type=int, default=0)
parser.add_argument('--hidden_size', type=int, default=450)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--latent_size', type=int, default=56)
parser.add_argument('--depthT', type=int, default=20)
parser.add_argument('--depthG', type=int, default=3)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--clip_norm', type=float, default=50.0)
parser.add_argument('--beta', type=float, default=0.0)
parser.add_argument('--step_beta', type=float, default=0.002)
parser.add_argument('--max_beta', type=float, default=1.0)
parser.add_argument('--warmup', type=int, default=40000)
parser.add_argument('--epoch', type=int, default=20)
parser.add_argument('--anneal_rate', type=float, default=0.9)
parser.add_argument('--anneal_iter', type=int, default=40000)
parser.add_argument('--kl_anneal_iter', type=int, default=2000)
parser.add_argument('--print_iter', type=int, default=50)
parser.add_argument('--save_iter', type=int, default=5000)
args = parser.parse_args()
print(args)
main_vae_train(args.train,
args.vocab,
args.save_dir,
args.load_epoch,
args.hidden_size,
args.batch_size,
args.latent_size,
args.depthT,
args.depthG,
args.lr,
args.clip_norm,
args.beta,
args.step_beta,
args.max_beta,
args.warmup,
args.epoch,
args.anneal_rate,
args.anneal_iter,
args.kl_anneal_iter,
args.print_iter,
args.save_iter)
| 5,667 | 33.351515 | 214 | py |
ctcdecode | ctcdecode-master/setup.py | #!/usr/bin/env python
import glob
import multiprocessing.pool
import os
import tarfile
import urllib.request
import warnings
from setuptools import distutils, find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CppExtension, include_paths
def download_extract(url, dl_path):
if not os.path.isfile(dl_path):
# Already downloaded
urllib.request.urlretrieve(url, dl_path)
if dl_path.endswith(".tar.gz") and os.path.isdir(dl_path[: -len(".tar.gz")]):
# Already extracted
return
tar = tarfile.open(dl_path)
tar.extractall("third_party/")
tar.close()
# Download/Extract openfst, boost
download_extract(
"https://github.com/parlance/ctcdecode/releases/download/v1.0/openfst-1.6.7.tar.gz",
"third_party/openfst-1.6.7.tar.gz",
)
download_extract(
"https://github.com/parlance/ctcdecode/releases/download/v1.0/boost_1_67_0.tar.gz",
"third_party/boost_1_67_0.tar.gz",
)
for file in ["third_party/kenlm/setup.py", "third_party/ThreadPool/ThreadPool.h"]:
if not os.path.exists(file):
warnings.warn("File `{}` does not appear to be present. Did you forget `git submodule update`?".format(file))
# Does gcc compile with this header and library?
def compile_test(header, library):
dummy_path = os.path.join(os.path.dirname(__file__), "dummy")
command = (
'bash -c "g++ -include '
+ header
+ " -l"
+ library
+ " -x c++ - <<<'int main() {}' -o "
+ dummy_path
+ " >/dev/null 2>/dev/null && rm "
+ dummy_path
+ ' 2>/dev/null"'
)
return os.system(command) == 0
compile_args = ["-O3", "-DKENLM_MAX_ORDER=6", "-std=c++14", "-fPIC"]
ext_libs = []
if compile_test("zlib.h", "z"):
compile_args.append("-DHAVE_ZLIB")
ext_libs.append("z")
if compile_test("bzlib.h", "bz2"):
compile_args.append("-DHAVE_BZLIB")
ext_libs.append("bz2")
if compile_test("lzma.h", "lzma"):
compile_args.append("-DHAVE_XZLIB")
ext_libs.append("lzma")
third_party_libs = ["kenlm", "openfst-1.6.7/src/include", "ThreadPool", "boost_1_67_0", "utf8"]
compile_args.extend(["-DINCLUDE_KENLM", "-DKENLM_MAX_ORDER=6"])
lib_sources = (
glob.glob("third_party/kenlm/util/*.cc")
+ glob.glob("third_party/kenlm/lm/*.cc")
+ glob.glob("third_party/kenlm/util/double-conversion/*.cc")
+ glob.glob("third_party/openfst-1.6.7/src/lib/*.cc")
)
lib_sources = [fn for fn in lib_sources if not (fn.endswith("main.cc") or fn.endswith("test.cc"))]
third_party_includes = [os.path.realpath(os.path.join("third_party", lib)) for lib in third_party_libs]
ctc_sources = glob.glob("ctcdecode/src/*.cpp")
extension = CppExtension(
name="ctcdecode._ext.ctc_decode",
package=True,
with_cuda=False,
sources=ctc_sources + lib_sources,
include_dirs=third_party_includes + include_paths(),
libraries=ext_libs,
extra_compile_args=compile_args,
language="c++",
)
# monkey-patch for parallel compilation
# See: https://stackoverflow.com/a/13176803
def parallelCCompile(
self,
sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None,
):
# those lines are copied from distutils.ccompiler.CCompiler directly
macros, objects, extra_postargs, pp_opts, build = self._setup_compile(
output_dir, macros, include_dirs, sources, depends, extra_postargs
)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
# parallel code
def _single_compile(obj):
try:
src, ext = build[obj]
except KeyError:
return
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# convert to list, imap is evaluated on-demand
thread_pool = multiprocessing.pool.ThreadPool(os.cpu_count())
list(thread_pool.imap(_single_compile, objects))
return objects
# hack compile to support parallel compiling
distutils.ccompiler.CCompiler.compile = parallelCCompile
setup(
name="ctcdecode",
version="1.0.3",
description="CTC Decoder for PyTorch based on Paddle Paddle's implementation",
url="https://github.com/parlance/ctcdecode",
author="Ryan Leary",
author_email="ryanleary@gmail.com",
# Exclude the build files.
packages=find_packages(exclude=["build"]),
ext_modules=[extension],
cmdclass={"build_ext": BuildExtension},
)
| 4,428 | 29.756944 | 117 | py |
ctcdecode | ctcdecode-master/tests/test_decode.py | """Test decoders."""
from __future__ import absolute_import, division, print_function
import os
import unittest
import ctcdecode
import torch
class TestDecoders(unittest.TestCase):
def setUp(self):
self.vocab_list = ["'", " ", "a", "b", "c", "d", "_"]
self.beam_size = 20
self.probs_seq1 = [
[0.06390443, 0.21124858, 0.27323887, 0.06870235, 0.0361254, 0.18184413, 0.16493624],
[0.03309247, 0.22866108, 0.24390638, 0.09699597, 0.31895462, 0.0094893, 0.06890021],
[0.218104, 0.19992557, 0.18245131, 0.08503348, 0.14903535, 0.08424043, 0.08120984],
[0.12094152, 0.19162472, 0.01473646, 0.28045061, 0.24246305, 0.05206269, 0.09772094],
[0.1333387, 0.00550838, 0.00301669, 0.21745861, 0.20803985, 0.41317442, 0.01946335],
[0.16468227, 0.1980699, 0.1906545, 0.18963251, 0.19860937, 0.04377724, 0.01457421],
]
self.probs_seq2 = [
[0.08034842, 0.22671944, 0.05799633, 0.36814645, 0.11307441, 0.04468023, 0.10903471],
[0.09742457, 0.12959763, 0.09435383, 0.21889204, 0.15113123, 0.10219457, 0.20640612],
[0.45033529, 0.09091417, 0.15333208, 0.07939558, 0.08649316, 0.12298585, 0.01654384],
[0.02512238, 0.22079203, 0.19664364, 0.11906379, 0.07816055, 0.22538587, 0.13483174],
[0.17928453, 0.06065261, 0.41153005, 0.1172041, 0.11880313, 0.07113197, 0.04139363],
[0.15882358, 0.1235788, 0.23376776, 0.20510435, 0.00279306, 0.05294827, 0.22298418],
]
self.greedy_result = ["ac'bdc", "b'da"]
self.beam_search_result = ["acdc", "b'a", "a a"]
def convert_to_string(self, tokens, vocab, seq_len):
return "".join([vocab[x] for x in tokens[0:seq_len]])
def test_beam_search_decoder_1(self):
probs_seq = torch.FloatTensor([self.probs_seq1])
decoder = ctcdecode.CTCBeamDecoder(
self.vocab_list, beam_width=self.beam_size, blank_id=self.vocab_list.index("_")
)
beam_result, beam_scores, timesteps, out_seq_len = decoder.decode(probs_seq)
output_str = self.convert_to_string(beam_result[0][0], self.vocab_list, out_seq_len[0][0])
self.assertEqual(output_str, self.beam_search_result[0])
def test_beam_search_decoder_2(self):
probs_seq = torch.FloatTensor([self.probs_seq2])
decoder = ctcdecode.CTCBeamDecoder(
self.vocab_list, beam_width=self.beam_size, blank_id=self.vocab_list.index("_")
)
beam_result, beam_scores, timesteps, out_seq_len = decoder.decode(probs_seq)
output_str = self.convert_to_string(beam_result[0][0], self.vocab_list, out_seq_len[0][0])
self.assertEqual(output_str, self.beam_search_result[1])
def test_beam_search_decoder_3(self):
lm_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test.arpa")
probs_seq = torch.FloatTensor([self.probs_seq2])
decoder = ctcdecode.CTCBeamDecoder(
self.vocab_list, beam_width=self.beam_size, blank_id=self.vocab_list.index("_"), model_path=lm_path
)
beam_result, beam_scores, timesteps, out_seq_len = decoder.decode(probs_seq)
output_str = self.convert_to_string(beam_result[0][0], self.vocab_list, out_seq_len[0][0])
self.assertEqual(output_str, self.beam_search_result[2])
def test_beam_search_decoder_batch(self):
probs_seq = torch.FloatTensor([self.probs_seq1, self.probs_seq2])
decoder = ctcdecode.CTCBeamDecoder(
self.vocab_list, beam_width=self.beam_size, blank_id=self.vocab_list.index("_"), num_processes=24
)
beam_results, beam_scores, timesteps, out_seq_len = decoder.decode(probs_seq)
output_str1 = self.convert_to_string(beam_results[0][0], self.vocab_list, out_seq_len[0][0])
output_str2 = self.convert_to_string(beam_results[1][0], self.vocab_list, out_seq_len[1][0])
self.assertEqual(output_str1, self.beam_search_result[0])
self.assertEqual(output_str2, self.beam_search_result[1])
del decoder
def test_beam_search_decoder_batch_log(self):
probs_seq = torch.FloatTensor([self.probs_seq1, self.probs_seq2]).log()
decoder = ctcdecode.CTCBeamDecoder(
self.vocab_list,
beam_width=self.beam_size,
blank_id=self.vocab_list.index("_"),
log_probs_input=True,
num_processes=24,
)
beam_results, beam_scores, timesteps, out_seq_len = decoder.decode(probs_seq)
output_str1 = self.convert_to_string(beam_results[0][0], self.vocab_list, out_seq_len[0][0])
output_str2 = self.convert_to_string(beam_results[1][0], self.vocab_list, out_seq_len[1][0])
self.assertEqual(output_str1, self.beam_search_result[0])
self.assertEqual(output_str2, self.beam_search_result[1])
def test_online_decoder_decoding(self):
lm_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test.arpa")
decoder = ctcdecode.OnlineCTCBeamDecoder(
self.vocab_list,
beam_width=self.beam_size,
blank_id=self.vocab_list.index("_"),
log_probs_input=True,
num_processes=24,
model_path=lm_path,
)
state1 = ctcdecode.DecoderState(decoder)
state2 = ctcdecode.DecoderState(decoder)
probs_seq = torch.FloatTensor([self.probs_seq2, self.probs_seq2]).log()
is_eos_s = [True for _ in range(len(probs_seq))]
beam_results, beam_scores, timesteps, out_seq_len = decoder.decode(probs_seq, [state1, state2], is_eos_s)
output_str1 = self.convert_to_string(beam_results[0][0], self.vocab_list, out_seq_len[0][0])
output_str2 = self.convert_to_string(beam_results[1][0], self.vocab_list, out_seq_len[1][0])
self.assertEqual(output_str1, self.beam_search_result[2])
self.assertEqual(output_str2, self.beam_search_result[2])
def test_online_decoder_decoding_no_lm(self):
decoder = ctcdecode.OnlineCTCBeamDecoder(
self.vocab_list,
beam_width=self.beam_size,
blank_id=self.vocab_list.index("_"),
log_probs_input=True,
num_processes=24,
)
state1 = ctcdecode.DecoderState(decoder)
state2 = ctcdecode.DecoderState(decoder)
probs_seq = torch.FloatTensor([self.probs_seq1, self.probs_seq2]).log()
is_eos_s = [True for _ in range(len(probs_seq))]
beam_results, beam_scores, timesteps, out_seq_len = decoder.decode(
probs_seq, [state1, state2], is_eos_s
)
output_str1 = self.convert_to_string(beam_results[0][0], self.vocab_list, out_seq_len[0][0])
output_str2 = self.convert_to_string(beam_results[1][0], self.vocab_list, out_seq_len[1][0])
self.assertEqual(output_str1, self.beam_search_result[0])
self.assertEqual(output_str2, self.beam_search_result[1])
def test_online_decoder_decoding_with_two_calls(self):
lm_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test.arpa")
decoder = ctcdecode.OnlineCTCBeamDecoder(
self.vocab_list,
beam_width=self.beam_size,
blank_id=self.vocab_list.index("_"),
log_probs_input=True,
num_processes=24,
model_path=lm_path,
)
state1 = ctcdecode.DecoderState(decoder)
probs_seq = torch.FloatTensor([self.probs_seq2]).log()
beam_results, beam_scores, timesteps, out_seq_len = decoder.decode(probs_seq[:, :2], [state1], [False])
beam_results, beam_scores, timesteps, out_seq_len = decoder.decode(probs_seq[:, 2:], [state1], [True])
output_str1 = self.convert_to_string(beam_results[0][0], self.vocab_list, out_seq_len[0][0])
self.assertEqual(output_str1, self.beam_search_result[2])
def test_online_decoder_decoding_with_two_calls_no_lm(self):
decoder = ctcdecode.OnlineCTCBeamDecoder(
self.vocab_list,
beam_width=self.beam_size,
blank_id=self.vocab_list.index("_"),
log_probs_input=True,
num_processes=24,
)
state1 = ctcdecode.DecoderState(decoder)
state2 = ctcdecode.DecoderState(decoder)
probs_seq = torch.FloatTensor([self.probs_seq1, self.probs_seq2]).log()
beam_results, beam_scores, timesteps, out_seq_len = decoder.decode(
probs_seq[:, :2], [state1, state2], [False, False]
)
beam_results, beam_scores, timesteps, out_seq_len = decoder.decode(
probs_seq[:, 2:], [state1, state2], [True, True]
)
del state1, state2
size = beam_results.shape
output_str1 = self.convert_to_string(beam_results[0][0], self.vocab_list, out_seq_len[0][0])
output_str2 = self.convert_to_string(beam_results[1][0], self.vocab_list, out_seq_len[1][0])
self.assertEqual(output_str1, self.beam_search_result[0])
self.assertEqual(output_str2, self.beam_search_result[1])
def test_online_decoder_decoding_with_a_lot_calls_no_lm_check_size(self):
decoder = ctcdecode.OnlineCTCBeamDecoder(
self.vocab_list,
beam_width=self.beam_size,
blank_id=self.vocab_list.index("_"),
log_probs_input=True,
num_processes=24,
)
state1 = ctcdecode.DecoderState(decoder)
probs_seq = torch.FloatTensor([self.probs_seq1]).log()
for i in range(1000):
beam_results, beam_scores, timesteps, out_seq_len = decoder.decode(
probs_seq, [state1], [False, False]
)
beam_results, beam_scores, timesteps, out_seq_len = decoder.decode(
probs_seq, [state1], [True, True]
)
del state1
self.assertGreaterEqual(beam_results.shape[2], out_seq_len.max())
if __name__ == "__main__":
unittest.main()
| 9,962 | 45.125 | 113 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.