code stringlengths 17 6.64M |
|---|
class GeniePathLayer(Layer):
"This layer equals to the Adaptive Path Layer in\n paper 'GeniePath: Graph Neural Networks with Adaptive Receptive Paths.'\n The code is adapted from shawnwang-tech/GeniePath-pytorch\n "
def __init__(self, placeholders, nodes, in_dim, dim, heads=1, name=None, **kwargs):
super(GeniePathLayer, self).__init__(**kwargs)
self.nodes = nodes
self.in_dim = in_dim
self.dim = dim
self.heads = heads
self.placeholders = placeholders
if (name is not None):
name = ('/' + name)
else:
name = ''
if self.logging:
self._log_vars()
def depth_forward(self, x, h, c):
with tf.variable_scope('lstm', reuse=tf.AUTO_REUSE):
cell = tf.nn.rnn_cell.LSTMCell(num_units=h, state_is_tuple=True)
(x, (c, h)) = tf.nn.dynamic_rnn(cell, x, dtype=tf.float32)
return (x, (c, h))
def breadth_forward(self, x, bias_in):
x = tf.tanh(GAT(self.dim, attn_drop=0, ffd_drop=0, bias_mat=bias_in, n_heads=self.heads).inference(x))
return x
def forward(self, x, bias_in, h, c):
x = self.breadth_forward(x, bias_in)
(x, (h, c)) = self.depth_forward(x, h, c)
x = x[0]
return (x, (h, c))
|
class Model(object):
'Adapted from tkipf/gcn.'
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging'}
for kwarg in kwargs.keys():
assert (kwarg in allowed_kwargs), ('Invalid keyword argument: ' + kwarg)
name = kwargs.get('name')
if (not name):
name = self.__class__.__name__.lower()
self.name = name
logging = kwargs.get('logging', False)
self.logging = logging
self.vars = {}
self.layers = []
self.activations = []
self.inputs = None
self.outputs = None
self.dim1 = None
self.dim2 = None
self.adj = None
def _build(self):
raise NotImplementedError
def build(self):
' Wrapper for _build() '
with tf.variable_scope(self.name):
self._build()
self.activations.append(self.inputs)
for layer in self.layers:
hidden = layer(self.activations[(- 1)])
self.activations.append(hidden)
self.outputs = self.activations[(- 1)]
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)
self.vars = {var.name: var for var in variables}
def embedding(self):
pass
def save(self, sess=None):
if (not sess):
raise AttributeError('TensorFlow session not provided.')
saver = tf.train.Saver(self.vars)
save_path = saver.save(sess, ('tmp/%s.ckpt' % self.name))
print(('Model saved in file: %s' % save_path))
def load(self, sess=None):
if (not sess):
raise AttributeError('TensorFlow session not provided.')
saver = tf.train.Saver(self.vars)
save_path = ('tmp/%s.ckpt' % self.name)
saver.restore(sess, save_path)
print(('Model restored from file: %s' % save_path))
|
class GCN(Model):
def __init__(self, placeholders, dim1, input_dim, output_dim, index=0, **kwargs):
super(GCN, self).__init__(**kwargs)
self.inputs = placeholders['x']
self.placeholders = placeholders
self.input_dim = input_dim
self.output_dim = output_dim
self.dim1 = dim1
self.index = index
self.build()
def _build(self):
self.layers.append(GraphConvolution(input_dim=self.input_dim, output_dim=self.dim1, placeholders=self.placeholders, index=self.index, act=tf.nn.relu, dropout=0.0, sparse_inputs=False, logging=self.logging, norm=True))
self.layers.append(GraphConvolution(input_dim=self.dim1, output_dim=self.output_dim, placeholders=self.placeholders, index=self.index, act=tf.nn.relu, dropout=0.0, logging=self.logging, norm=False))
def embedding(self):
return self.outputs
|
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default='GAS', help="['Player2Vec', 'FdGars','GEM','SemiGNN','GAS','GeniePath']")
parser.add_argument('--seed', type=int, default=123, help='Random seed.')
parser.add_argument('--dataset_str', type=str, default='example', help="['dblp','example']")
parser.add_argument('--epoch_num', type=int, default=30, help='Number of epochs to train.')
parser.add_argument('--batch_size', type=int, default=1000)
parser.add_argument('--momentum', type=int, default=0.9)
parser.add_argument('--learning_rate', default=0.001, help='the ratio of training set in whole dataset.')
parser.add_argument('--hidden1', default=16, help='Number of units in GCN hidden layer 1.')
parser.add_argument('--hidden2', default=16, help='Number of units in GCN hidden layer 2.')
parser.add_argument('--gcn_output', default=4, help='gcn output size.')
parser.add_argument('--review_num sample', default=7, help='review number.')
parser.add_argument('--gcn_dim', type=int, default=5, help='gcn layer size.')
parser.add_argument('--encoding1', type=int, default=64)
parser.add_argument('--encoding2', type=int, default=64)
parser.add_argument('--encoding3', type=int, default=64)
parser.add_argument('--encoding4', type=int, default=64)
parser.add_argument('--init_emb_size', default=4, help='initial node embedding size')
parser.add_argument('--semi_encoding1', default=3, help='the first view attention layer unit number')
parser.add_argument('--semi_encoding2', default=2, help='the second view attention layer unit number')
parser.add_argument('--semi_encoding3', default=4, help='one-layer perceptron units')
parser.add_argument('--Ul', default=8, help='labeled users number')
parser.add_argument('--alpha', default=0.5, help='loss alpha')
parser.add_argument('--lamtha', default=0.5, help='loss lamtha')
parser.add_argument('--hop', default=1, help='hop number')
parser.add_argument('--k', default=16, help='gem layer unit')
parser.add_argument('--dim', default=128)
parser.add_argument('--lstm_hidden', default=128, help='lstm_hidden unit')
parser.add_argument('--heads', default=1, help='gat heads')
parser.add_argument('--layer_num', default=4, help='geniePath layer num')
args = parser.parse_args()
return args
|
def set_env(args):
tf.reset_default_graph()
np.random.seed(args.seed)
tf.set_random_seed(args.seed)
|
def get_data(ix, int_batch, train_size):
if ((ix + int_batch) >= train_size):
ix = (train_size - int_batch)
end = train_size
else:
end = (ix + int_batch)
return (train_data[ix:end], train_label[ix:end])
|
def load_data(args):
if (args.dataset_str == 'dblp'):
(adj_list, features, train_data, train_label, test_data, test_label) = load_data_dblp('dataset/DBLP4057_GAT_with_idx_tra200_val_800.mat')
node_size = features.shape[0]
node_embedding = features.shape[1]
class_size = train_label.shape[1]
train_size = len(train_data)
paras = [node_size, node_embedding, class_size, train_size]
if ((args.dataset_str == 'example') and (args.model != 'GAS')):
if (args.model == 'GEM'):
(adj_list, features, train_data, train_label, test_data, test_label) = load_example_gem()
if (args.model == 'SemiGNN'):
(adj_list, features, train_data, train_label, test_data, test_label) = load_example_semi()
node_size = features.shape[0]
node_embedding = features.shape[1]
class_size = train_label.shape[1]
train_size = len(train_data)
paras = [node_size, node_embedding, class_size, train_size]
if ((args.dataset_str == 'example') and (args.model == 'GAS')):
(adj_list, features, train_data, train_label, test_data, test_label) = load_data_gas()
node_embedding_r = features[0].shape[1]
node_embedding_u = features[1].shape[1]
node_embedding_i = features[2].shape[1]
node_size = features[0].shape[0]
h_u_size = (adj_list[0].shape[1] * (node_embedding_r + node_embedding_u))
h_i_size = (adj_list[2].shape[1] * (node_embedding_r + node_embedding_i))
class_size = train_label.shape[1]
train_size = len(train_data)
paras = [node_size, node_embedding_r, node_embedding_u, node_embedding_i, class_size, train_size, h_u_size, h_i_size]
return (adj_list, features, train_data, train_label, test_data, test_label, paras)
|
def train(args, adj_list, features, train_data, train_label, test_data, test_label, paras):
with tf.Session() as sess:
if (args.model == 'Player2Vec'):
adj_data = [normalize_adj(adj) for adj in adj_list]
meta_size = len(adj_list)
net = Player2Vec(session=sess, class_size=paras[2], gcn_output1=args.hidden1, meta=meta_size, nodes=paras[0], embedding=paras[1], encoding=args.gcn_output)
if (args.model == 'FdGars'):
adj_data = [normalize_adj(adj) for adj in adj_list]
meta_size = len(adj_list)
net = FdGars(session=sess, class_size=paras[2], gcn_output1=args.hidden1, gcn_output2=args.hidden2, meta=meta_size, nodes=paras[0], embedding=paras[1], encoding=args.gcn_output)
if (args.model == 'GAS'):
adj_data = adj_list
net = GAS(session=sess, nodes=paras[0], class_size=paras[4], embedding_r=paras[1], embedding_u=paras[2], embedding_i=paras[3], h_u_size=paras[6], h_i_size=paras[7], encoding1=args.encoding1, encoding2=args.encoding2, encoding3=args.encoding3, encoding4=args.encoding4, gcn_dim=args.gcn_dim)
if (args.model == 'GEM'):
adj_data = adj_list
meta_size = len(adj_list)
net = GEM(session=sess, class_size=paras[2], encoding=args.k, meta=meta_size, nodes=paras[0], embedding=paras[1], hop=args.hop)
if (args.model == 'GeniePath'):
adj_data = adj_list
net = GeniePath(session=sess, out_dim=paras[2], dim=args.dim, lstm_hidden=args.lstm_hidden, nodes=paras[0], in_dim=paras[1], heads=args.heads, layer_num=args.layer_num, class_size=paras[2])
if (args.model == 'SemiGNN'):
adj_nodelists = [matrix_to_adjlist(adj, pad=False) for adj in adj_list]
meta_size = len(adj_list)
pairs = [random_walks(adj_nodelists[i], 2, 3) for i in range(meta_size)]
net = SemiGNN(session=sess, class_size=paras[2], semi_encoding1=args.semi_encoding1, semi_encoding2=args.semi_encoding2, semi_encoding3=args.semi_encoding3, meta=meta_size, nodes=paras[0], init_emb_size=args.init_emb_size, ul=args.batch_size, alpha=args.alpha, lamtha=args.lamtha)
adj_data = [pairs_to_matrix(p, paras[0]) for p in pairs]
u_i = []
u_j = []
for (adj_nodelist, p) in zip(adj_nodelists, pairs):
(u_i_t, u_j_t, graph_label) = get_negative_sampling(p, adj_nodelist)
u_i.append(u_i_t)
u_j.append(u_j_t)
u_i = np.concatenate(np.array(u_i))
u_j = np.concatenate(np.array(u_j))
sess.run(tf.global_variables_initializer())
t_start = time.clock()
for epoch in range(args.epoch_num):
train_loss = 0
train_acc = 0
count = 0
for index in range(0, paras[3], args.batch_size):
if (args.model == 'SemiGNN'):
(batch_data, batch_sup_label) = get_data(index, args.batch_size, paras[3])
(loss, acc, pred, prob) = net.train(adj_data, u_i, u_j, graph_label, batch_data, batch_sup_label, args.learning_rate, args.momentum)
else:
(batch_data, batch_label) = get_data(index, args.batch_size, paras[3])
(loss, acc, pred, prob) = net.train(features, adj_data, batch_label, batch_data, args.learning_rate, args.momentum)
print('batch loss: {:.4f}, batch acc: {:.4f}'.format(loss, acc))
train_loss += loss
train_acc += acc
count += 1
train_loss = (train_loss / count)
train_acc = (train_acc / count)
print('epoch{:d} : train_loss: {:.4f}, train_acc: {:.4f}'.format(epoch, train_loss, train_acc))
t_end = time.clock()
print('train time=', '{:.5f}'.format((t_end - t_start)))
print('Train end!')
if (args.model == 'SemiGNN'):
(test_acc, test_pred, test_probabilities, test_tags) = net.test(adj_data, u_i, u_j, graph_label, test_data, test_label, args.learning_rate, args.momentum)
else:
(test_acc, test_pred, test_probabilities, test_tags) = net.test(features, adj_data, test_label, test_data)
print('test acc:', test_acc)
|
class Baseline(torch.nn.Module):
def __init__(self, params, num_notes, num_lengths):
super(Baseline, self).__init__()
self.params = params
self.width_reduction = 1
self.height_reduction = 1
for i in range(4):
self.width_reduction = (self.width_reduction * params['conv_pooling_size'][i][1])
self.height_reduction = (self.height_reduction * params['conv_pooling_size'][i][0])
self.b1 = nn.Sequential(nn.Conv2d(params['img_channels'], params['conv_filter_n'][0], kernel_size=(3, 3), padding=1), nn.BatchNorm2d(params['conv_filter_n'][0]), nn.LeakyReLU(0.2, inplace=True), nn.MaxPool2d(kernel_size=params['conv_pooling_size'][0], stride=params['conv_pooling_size'][0]))
self.b2 = nn.Sequential(nn.Conv2d(params['conv_filter_n'][0], params['conv_filter_n'][1], kernel_size=(3, 3), padding=1), nn.BatchNorm2d(params['conv_filter_n'][1]), nn.LeakyReLU(0.2, inplace=True), nn.MaxPool2d(kernel_size=(2, 1), stride=(2, 1)))
self.b3 = nn.Sequential(nn.Conv2d(params['conv_filter_n'][1], params['conv_filter_n'][2], kernel_size=(3, 3), padding=1), nn.BatchNorm2d(params['conv_filter_n'][2]), nn.LeakyReLU(0.2, inplace=True), nn.MaxPool2d(kernel_size=(2, 1), stride=(2, 1)))
self.b4 = nn.Sequential(nn.Conv2d(params['conv_filter_n'][2], params['conv_filter_n'][3], kernel_size=(3, 3), padding=1), nn.BatchNorm2d(params['conv_filter_n'][3]), nn.LeakyReLU(0.2, inplace=True), nn.MaxPool2d(kernel_size=(2, 1), stride=(2, 1)))
rnn_hidden_units = params['rnn_units']
rnn_hidden_layers = params['rnn_layers']
feature_dim = (params['conv_filter_n'][(- 1)] * (params['img_height'] / self.height_reduction))
self.r1 = nn.LSTM(int(feature_dim), hidden_size=rnn_hidden_units, num_layers=rnn_hidden_layers, dropout=0.5, bidirectional=True)
self.num_notes = num_notes
self.num_lengths = num_lengths
self.note_emb = nn.Linear((2 * rnn_hidden_units), (self.num_notes + 1))
self.length_emb = nn.Linear((2 * rnn_hidden_units), (self.num_lengths + 1))
self.sm = nn.LogSoftmax(dim=2)
print('Vocab size:', (num_lengths + num_notes))
def forward(self, x):
params = self.params
width_reduction = self.width_reduction
height_reduction = self.height_reduction
input_shape = x.shape
x = self.b1(x)
x = self.b2(x)
x = self.b3(x)
x = self.b4(x)
features = x.permute(3, 0, 2, 1)
feature_dim = (params['conv_filter_n'][(- 1)] * (params['img_height'] // height_reduction))
feature_width = ((((2 * 2) * 2) * input_shape[3]) // width_reduction)
stack = (int(feature_width), input_shape[0], int(feature_dim))
features = torch.reshape(features, stack)
(rnn_out, _) = self.r1(features)
note_out = self.note_emb(rnn_out)
length_out = self.length_emb(rnn_out)
note_logits = self.sm(note_out)
length_logits = self.sm(length_out)
return (note_logits, length_logits)
|
class RNNDecoder(torch.nn.Module):
def __init__(self, params, num_notes, num_lengths, max_chord_stack):
super(RNNDecoder, self).__init__()
self.params = params
self.width_reduction = 1
self.height_reduction = 1
self.max_chord_stack = max_chord_stack
for i in range(4):
self.width_reduction = (self.width_reduction * params['conv_pooling_size'][i][1])
self.height_reduction = (self.height_reduction * params['conv_pooling_size'][i][0])
self.b1 = nn.Sequential(nn.Conv2d(params['img_channels'], params['conv_filter_n'][0], kernel_size=(3, 3), padding=1), nn.BatchNorm2d(params['conv_filter_n'][0]), nn.LeakyReLU(0.2, inplace=True), nn.MaxPool2d(kernel_size=params['conv_pooling_size'][0], stride=params['conv_pooling_size'][0]))
self.b2 = nn.Sequential(nn.Conv2d(params['conv_filter_n'][0], params['conv_filter_n'][1], kernel_size=(3, 3), padding=1), nn.BatchNorm2d(params['conv_filter_n'][1]), nn.LeakyReLU(0.2, inplace=True), nn.MaxPool2d(kernel_size=(2, 1), stride=(2, 1)))
self.b3 = nn.Sequential(nn.Conv2d(params['conv_filter_n'][1], params['conv_filter_n'][2], kernel_size=(3, 3), padding=1), nn.BatchNorm2d(params['conv_filter_n'][2]), nn.LeakyReLU(0.2, inplace=True), nn.MaxPool2d(kernel_size=(2, 1), stride=(2, 1)))
self.b4 = nn.Sequential(nn.Conv2d(params['conv_filter_n'][2], params['conv_filter_n'][3], kernel_size=(3, 3), padding=1), nn.BatchNorm2d(params['conv_filter_n'][3]), nn.LeakyReLU(0.2, inplace=True), nn.MaxPool2d(kernel_size=(2, 1), stride=(2, 1)))
rnn_hidden_units = (params['rnn_units'] * 2)
rnn_hidden_layers = params['rnn_layers']
feature_dim = (params['conv_filter_n'][(- 1)] * (params['img_height'] / self.height_reduction))
self.r1 = nn.LSTM(int(feature_dim), hidden_size=rnn_hidden_units, num_layers=rnn_hidden_layers, dropout=0.5, bidirectional=True)
self.num_notes = num_notes
self.num_lengths = num_lengths
self.hidden_size = 512
self.note_emb = nn.Linear(((2 * rnn_hidden_units) + self.hidden_size), (self.num_notes + 1))
self.length_emb = nn.Linear(((2 * rnn_hidden_units) + self.hidden_size), (self.num_lengths + 1))
self.lin_note_h = nn.Linear(self.hidden_size, self.hidden_size)
self.lin_note_i = nn.Linear((self.num_notes + 1), self.hidden_size)
self.lin_len_h = nn.Linear(self.hidden_size, self.hidden_size)
self.lin_len_i = nn.Linear((self.num_lengths + 1), self.hidden_size)
self.sm = nn.LogSoftmax(dim=2)
print('Vocab size:', (num_lengths + num_notes))
def forward(self, x):
params = self.params
width_reduction = self.width_reduction
height_reduction = self.height_reduction
input_shape = x.shape
x = self.b1(x)
x = self.b2(x)
x = self.b3(x)
x = self.b4(x)
features = x.permute(3, 0, 2, 1)
feature_dim = (params['conv_filter_n'][(- 1)] * (params['img_height'] // height_reduction))
feature_width = ((((2 * 2) * 2) * input_shape[3]) // width_reduction)
stack = (int(feature_width), input_shape[0], int(feature_dim))
features = torch.reshape(features, stack)
(rnn_out, _) = self.r1(features)
prev_pred_note = torch.zeros((rnn_out.shape[0], rnn_out.shape[1], self.hidden_size)).cuda()
prev_pred_length = torch.zeros((rnn_out.shape[0], rnn_out.shape[1], self.hidden_size)).cuda()
note_outs = []
length_outs = []
for _ in range(self.max_chord_stack):
note_out = self.note_emb(torch.cat((rnn_out, prev_pred_note), 2))
length_out = self.length_emb(torch.cat((rnn_out, prev_pred_length), 2))
prev_pred_note = torch.tanh((self.lin_note_i(note_out) + self.lin_note_h(prev_pred_note)))
prev_pred_length = torch.tanh((self.lin_len_i(length_out) + self.lin_len_h(prev_pred_length)))
note_outs.append(self.sm(note_out))
length_outs.append(self.sm(length_out))
return (note_outs, length_outs)
|
class FlagDecoder(torch.nn.Module):
def __init__(self, params, num_notes, num_durs, num_accs):
super(FlagDecoder, self).__init__()
self.params = params
self.width_reduction = 1
self.height_reduction = 1
self.num_notes = num_notes
self.num_durs = num_durs
self.num_accs = num_accs
for i in range(4):
self.width_reduction = (self.width_reduction * params['conv_pooling_size'][i][1])
self.height_reduction = (self.height_reduction * params['conv_pooling_size'][i][0])
self.b1 = nn.Sequential(nn.Conv2d(params['img_channels'], params['conv_filter_n'][0], kernel_size=(3, 3), padding=1), nn.BatchNorm2d(params['conv_filter_n'][0]), nn.LeakyReLU(0.2, inplace=True), nn.MaxPool2d(kernel_size=params['conv_pooling_size'][0], stride=params['conv_pooling_size'][0]))
self.b2 = nn.Sequential(nn.Conv2d(params['conv_filter_n'][0], params['conv_filter_n'][1], kernel_size=(3, 3), padding=1), nn.BatchNorm2d(params['conv_filter_n'][1]), nn.LeakyReLU(0.2, inplace=True), nn.MaxPool2d(kernel_size=(2, 1), stride=(2, 1)))
self.b3 = nn.Sequential(nn.Conv2d(params['conv_filter_n'][1], params['conv_filter_n'][2], kernel_size=(3, 3), padding=1), nn.BatchNorm2d(params['conv_filter_n'][2]), nn.LeakyReLU(0.2, inplace=True), nn.MaxPool2d(kernel_size=(2, 1), stride=(2, 1)))
self.b4 = nn.Sequential(nn.Conv2d(params['conv_filter_n'][2], params['conv_filter_n'][3], kernel_size=(3, 3), padding=1), nn.BatchNorm2d(params['conv_filter_n'][3]), nn.LeakyReLU(0.2, inplace=True), nn.MaxPool2d(kernel_size=(2, 1), stride=(2, 1)))
rnn_hidden_units = params['rnn_units']
rnn_hidden_layers = params['rnn_layers']
feature_dim = (params['conv_filter_n'][(- 1)] * (params['img_height'] / self.height_reduction))
self.r1 = nn.LSTM(int(feature_dim), hidden_size=rnn_hidden_units, num_layers=rnn_hidden_layers, dropout=0.5, bidirectional=True)
intermediate_size = 512
self.note_fc1 = nn.Linear((2 * rnn_hidden_units), intermediate_size)
self.sym_fc1 = nn.Linear((2 * rnn_hidden_units), intermediate_size)
self.note_emb = nn.Linear(intermediate_size, (90 * (self.num_durs + 1)))
self.sym_emb = nn.Linear(intermediate_size, ((self.num_notes + 1) - 90))
self.acc_emb = nn.Linear(intermediate_size, (90 * (self.num_accs + 1)))
self.sm = nn.LogSoftmax(dim=3)
self.relu = nn.ReLU()
print('Vocab size:', (num_durs + num_notes))
def forward(self, x):
params = self.params
width_reduction = self.width_reduction
height_reduction = self.height_reduction
input_shape = x.shape
x = self.b1(x)
x = self.b2(x)
x = self.b3(x)
x = self.b4(x)
features = x.permute(3, 0, 2, 1)
feature_dim = (params['conv_filter_n'][(- 1)] * (params['img_height'] // height_reduction))
feature_width = ((((2 * 2) * 2) * input_shape[3]) // width_reduction)
stack = (int(feature_width), input_shape[0], int(feature_dim))
features = torch.reshape(features, stack)
(rnn_out, _) = self.r1(features)
note_out = self.relu(self.note_fc1(rnn_out))
note_out = self.note_emb(note_out)
note_out = note_out.reshape((note_out.shape[0], note_out.shape[1], 90, (self.num_durs + 1)))
note_out = self.sm(note_out)
sym_out = self.relu(self.sym_fc1(rnn_out))
sym_out = self.sym_emb(sym_out)
sym_out = torch.sigmoid(sym_out)
sym_out = (sym_out + 1e-30)
sym_out = torch.log(sym_out)
acc_out = self.relu(self.note_fc1(rnn_out))
acc_out = self.acc_emb(acc_out)
acc_out = acc_out.reshape((acc_out.shape[0], acc_out.shape[1], 90, (self.num_accs + 1)))
acc_out = self.sm(acc_out)
return (note_out, sym_out, acc_out)
|
def default_model_params():
params = dict()
params['img_height'] = 128
params['img_width'] = None
params['batch_size'] = 12
params['img_channels'] = 1
params['conv_blocks'] = 4
params['conv_filter_n'] = [32, 64, 128, 256]
params['conv_filter_size'] = [[3, 3], [3, 3], [3, 3], [3, 3]]
params['conv_pooling_size'] = [[2, 2], [2, 2], [2, 2], [2, 2]]
params['rnn_units'] = 512
params['rnn_layers'] = 2
return params
|
def init_weights(m):
if (isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Linear)):
torch.nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0)
|
def save_model():
root_model_path = (('models/latest_model' + str(model_num)) + '.pt')
model_dict = nn_model.state_dict()
state_dict = {'model': model_dict, 'optimizer': optimizer.state_dict()}
torch.save(state_dict, root_model_path)
print('Saved model')
|
class Measure():
def __init__(self, measure, num_staves, beats, beat_type):
self.measure = measure
self.num_staves = num_staves
self.beats = beats
self.beat_type = beat_type
def parse_attributes(self, attributes):
'\n Reads through all attributes of a measure \n (this contains key info, time info, etc.)\n\n attributes: the parse tree representing the attributes\n '
sequence = ''
skip = 0
for attribute in attributes:
if (attribute.tag == 'key'):
try:
sequence += (('keySignature-' + self.num_sharps_flats_to_key(int(attribute[0].text))) + ' ')
except ValueError:
return ('percussion', skip, self.beats, self.beat_type)
elif (attribute.tag == 'time'):
try:
self.beats = int(attribute[0].text)
self.beat_type = int(attribute[1].text)
except ValueError:
return ('percussion', skip, self.beats, self.beat_type)
if ('symbol' in attribute.attrib):
if (attribute.attrib['symbol'] == 'cut'):
sequence += ('timeSignature-C/' + ' ')
elif (attribute.attrib['symbol'] == 'common'):
sequence += ('timeSignature-C' + ' ')
else:
sequence += (((('timeSignature-' + attribute[0].text) + '/') + attribute[1].text) + ' ')
else:
sequence += (((('timeSignature-' + attribute[0].text) + '/') + attribute[1].text) + ' ')
elif ((attribute.tag == 'clef') and (('number' not in attribute.attrib) or (attribute.attrib['number'] == '1'))):
sequence = (((('clef-' + attribute[0].text) + attribute[1].text) + ' ') + sequence)
elif (attribute.tag == 'measure-style'):
(s, skip) = self.parse_measure_style(attribute)
sequence += s
seq_split = sequence.split()
if (len(seq_split) > 1):
sequence = (' + '.join(seq_split) + ' ')
sequence = [sequence for i in range(self.num_staves)]
return (sequence, skip, self.beats, self.beat_type)
def parse_note(self, note):
'\n Reads through a note of a measure \n (this contains staff, voice, articulation, pitch info, etc.)\n\n note: the parse tree representing the note\n '
sequence = ['' for x in range(self.num_staves)]
cur_rest = False
(staff, voice, has_dot, is_chord, dur, is_grace, stem_down, articulation) = (0, 1, False, False, 0, False, True, '')
for e in note:
if (e.tag == 'staff'):
staff = (int(e.text) - 1)
if (staff > 0):
return ('multi-staff', True, voice, dur, is_grace, articulation)
if (e.tag == 'voice'):
voice = int(e.text)
if (e.tag == 'dot'):
has_dot = True
if (e.tag == 'chord'):
is_chord = True
if (e.tag == 'duration'):
dur = int(e.text)
if (e.tag == 'grace'):
is_grace = True
if (e.tag == 'stem'):
stem_down = (False if (e.text == 'up') else True)
if (('print-object' in note.attrib) and (note.attrib['print-object'] == 'no')):
return ('forward', True, voice, dur, is_grace, articulation)
pitch = ''
alter = ''
octave = ''
for elem in note:
if (elem.tag == 'accidental'):
if (elem.text == 'sharp'):
alter = '#'
elif (elem.text == 'flat'):
alter = 'b'
elif (elem.text == 'natural'):
alter = 'N'
elif (elem.text == 'double-sharp'):
alter = '##'
elif (elem.text == 'flat-flat'):
alter = 'bb'
for elem in note:
if (elem.tag == 'pitch'):
for e in elem:
if (e.tag == 'step'):
pitch = e.text
elif (e.tag == 'alter'):
pass
elif (e.tag == 'octave'):
octave = e.text
sequence[staff] += ((('note-' + pitch) + alter) + octave)
if (elem.tag == 'rest'):
if (('measure' in elem.attrib) and (elem.attrib['measure'] == 'yes')):
sequence[staff] += (self.rest_measure_to_note() + ' ')
else:
sequence[staff] += 'rest'
cur_rest = True
elif (elem.tag == 'type'):
dot = ('. ' if has_dot else ' ')
duration = ('sixteenth' if (elem.text == '16th') else ('thirty_second' if (elem.text == '32nd') else ('sixty_fourth' if (elem.text == '64th') else ('hundred_twenty_eighth' if (elem.text == '128th') else elem.text))))
if cur_rest:
sequence[staff] += (('-' + duration) + dot)
cur_rest = False
else:
sequence[staff] += (('_' + duration) + dot)
elif (elem.tag == 'chord'):
pass
elif (elem.tag == 'notations'):
articulation = self.parse_notations(elem, stem_down)
return (sequence, is_chord, voice, dur, is_grace, articulation)
def parse_direction(self, direction):
'\n Reads through a direction element of a measure (unused)\n (this contains dynamics information)\n\n note: the parse tree representing the note\n '
sequence = ['' for x in range(self.num_staves)]
"\n staff = 0\n for e in direction:\n if e.tag == 'staff':\n staff = int(e.text) - 1\n "
"\n for elem in direction:\n\n pass\n\n if elem.tag == 'direction-type':\n\n if elem[0].tag == 'dynamics':\n sequence[staff] += elem[0][0].tag + '-dynamic' + ' '\n\n if elem[0].tag == 'words' and elem[0].text is not None:\n sequence[staff] += elem[0].text + '-dynamic' + ' '\n\n elif elem.tag == 'sound':\n\n if 'tempo' in elem.attrib:\n pass # don't show tempo for now\n #sequence[staff] += elem.attrib['tempo'] + '-tempo' + ' '\n "
return sequence
def parse_notations(self, notation, stem_down):
'\n Reads through a notation element of a note (unused)\n (this contains articulation information)\n\n note: the parse tree representing the notation\n stem_down: indicates direction of stem, can be used for finding\n location of articulation (above vs below)\n '
sequence = ''
has_fermata = False
for n in notation:
if (n.tag == 'tied'):
if (n.attrib['type'] == 'start'):
pass
elif (n.tag == 'slur'):
pass
elif (n.tag == 'articulations'):
for articulation in n:
sequence += (articulation.tag + ' ')
elif (n.tag == 'fermata'):
has_fermata = True
if has_fermata:
sequence += 'fermata '
return sequence
def parse_measure_style(self, style):
'\n Reads through a style element of a measure\n (this contains information about multirests)\n\n style: the parse tree representing the style\n '
sequence = ''
skip = 0
for s in style:
if (s.tag == 'multiple-rest'):
sequence += (('multirest-' + s.text) + ' ')
skip = int(s.text)
return (sequence, skip)
def num_sharps_flats_to_key(self, num):
'\n Converts num sharps/flats to key\n\n num: indicates num sharps/flat (> 0 is sharp, < 0 is flat)\n '
mapping = {7: 'C#M', 6: 'F#M', 5: 'BM', 4: 'EM', 3: 'AM', 2: 'DM', 1: 'GM', 0: 'CM', (- 1): 'FM', (- 2): 'BbM', (- 3): 'EbM', (- 4): 'AbM', (- 5): 'DbM', (- 6): 'GbM', (- 7): 'CbM'}
return mapping[num]
def rest_measure_to_note(self):
'\n Converts rest-measure to coresponding value\n based on time signature\n '
type_map = {'1': 'whole', '2': 'half', '4': 'quarter', '8': 'eighth', '12': 'eighth.', '16': 'sixteenth', '32': 'thirthy_second', '48': 'thirthy_second.'}
return 'rest-whole'
|
class MusicXML():
def __init__(self, input_file=None, output_file=None):
'\n Stores MusicXML file passed in \n '
self.input_file = input_file
self.output_file = output_file
self.key = ''
self.clef = ''
self.time = ''
self.beat = 4
self.beat_type = 4
self.polyphonic_page = False
self.get_width()
def get_width(self):
'\n Reads width/cutoffs on left/right of XML\n '
margins = 0
with open(self.input_file, 'r', errors='ignore') as input_file:
try:
tree = ET.parse(input_file)
root = tree.getroot()
except:
return
defaults_idx = (- 1)
for (i, child) in enumerate(root):
if (child.tag == 'defaults'):
defaults_idx = i
break
if (defaults_idx == (- 1)):
print('MusicXML file:', self.input_file, ' missing <score-partwise> or <part>')
return
margin_found = False
for (i, e) in enumerate(root[defaults_idx]):
if (e.tag == 'page-layout'):
for c in e:
if (c.tag == 'page-width'):
self.width = float(c.text)
elif ((c.tag == 'page-margins') and (not margin_found)):
for k in c:
if (k.tag == 'left-margin'):
margins += float(k.text)
elif (k.tag == 'right-margin'):
margins += float(k.text)
margin_found = True
self.width_cutoff = ((self.width - margins) + 1)
def write_sequences(self):
'\n Outputs the sequences of this MusicXML object\n to the output file (one page = one sequence)\n '
sequences = self.get_sequences()
file_num = 0
fname = self.output_file.split('.')[0]
for seq in sequences:
break
file_num += 1
if (seq == ''):
continue
with open((((fname + '-') + str(file_num)) + '.semantic'), 'w') as out_file:
out_file.write('')
out_file.write((seq + '\n'))
out_file.write('')
out_file.close()
def get_sequences(self):
'\n Parses MusicXML file and returns sequences corresponding\n to the first staff of the first part of the score\n (list of symbols for each page)\n '
sequences = []
new_score = True
with open(self.input_file, 'r') as input_file:
try:
tree = ET.parse(input_file)
root = tree.getroot()
except:
return sequences
part_list_idx = (- 1)
part_idx = (- 1)
for (i, child) in enumerate(root):
if (child.tag == 'part-list'):
part_list_idx = i
elif (child.tag == 'part'):
part_idx = (i if (part_idx == (- 1)) else part_idx)
if ((part_list_idx == (- 1)) or (part_idx == (- 1))):
print('MusicXML file:', self.input_file, ' missing <part-list> or <part>')
return ['']
num_staves = 1
try:
for e in root[part_idx][0][0]:
if (e.tag == 'staff-layout'):
num_staves = int(e.attrib['number'])
except IndexError:
return ['']
staves = ['' for x in range(num_staves)]
r_iter = iter(root[part_idx])
cur_width = 0.0
page_num = 1
new_page = False
for (i, measure) in enumerate(r_iter):
cur_width += float(measure.attrib['width'])
child_elems = [e for e in measure]
child_tags = [e.tag for e in child_elems]
if ('print' in child_tags):
print_children = [e.tag for e in list(iter(child_elems[child_tags.index('print')]))]
if ('system-layout' in print_children):
new_page = True
if ((cur_width > self.width_cutoff) or new_page):
sequences.append(staves[0])
staves = ['' for x in range(num_staves)]
cur_width = int(float(measure.attrib['width']))
page_num += 1
if self.polyphonic_page:
print(((self.input_file.split('\\')[(- 1)].split('.')[0] + '-') + str((page_num - 1))))
self.polyphonic_page = False
(measure_staves, skip) = self.read_measure(measure, num_staves, new_page, staves, new_score)
new_score = False
for j in range(num_staves):
staves[j] += measure_staves[j]
for j in range((skip - 1)):
next(r_iter)
new_page = False
if (cur_width > 0):
sequences.append(staves[0])
staves = ['' for x in range(num_staves)]
cur_width = int(float(measure.attrib['width']))
return sequences
def read_measure(self, measure, num_staves, new_page, cur_staves, new_score):
'\n Reads a measure and returns a sequence of symbols\n\n measure: .xml element of the current measure being read\n num_staves: number of staves in the measure\n new_page: indiciates if starting a new page\n cur_staves: rest of sequence so far from previous measures\n new_score: indicates if first measure of the score\n '
m = Measure(measure, num_staves, self.beat, self.beat_type)
staves = ['' for _ in range(num_staves)]
skip = 0
voice_lines = dict()
voice_durations = dict()
forward_dur = []
cur_voice = (- 1)
start_clef = self.clef
start_key = self.key
start_time = self.time
if (('percussion' in self.clef) or ('TAB' in self.clef)):
return (staves, 0)
is_grace = False
prev_grace = False
for elem in measure:
cur_elem = ['' for _ in range(num_staves)]
is_chord = False
if (elem.tag == 'attributes'):
(cur_elem, skip, self.beat, self.beat_type) = m.parse_attributes(elem)
if (('percussion' in cur_elem[0]) or ('TAB' in cur_elem[0]) or ('percussion' in cur_elem) or ('TAB' in cur_elem)):
self.clef = 'percussion'
return (['' for _ in range(num_staves)], 0)
elif (elem.tag == 'note'):
(cur_elem, is_chord, voice, duration, is_grace, _) = m.parse_note(elem)
if ((cur_voice != voice) and (cur_voice != (- 1))):
if ((len(forward_dur) != 0) and (cur_voice in voice_lines)):
voice_lines[cur_voice].append('forward')
voice_durations[cur_voice].append(forward_dur[(- 1)])
del forward_dur[(- 1)]
cur_voice = voice
if (cur_elem == 'multi-staff'):
continue
if (cur_elem == 'forward'):
if (len(forward_dur) == 1):
forward_dur[0] += duration
else:
forward_dur.append(duration)
else:
if (voice not in voice_lines):
voice_lines[voice] = []
voice_durations[voice] = []
if (len(forward_dur) != 0):
voice_lines[voice].append('+ ')
voice_lines[voice].append('forward')
voice_durations[voice].append(0)
voice_durations[voice].append(forward_dur[(- 1)])
del forward_dur[(- 1)]
if ((((cur_staves[0] != '') or (staves[0] != '')) and (not is_chord) and (cur_elem[0] != '')) and (not is_grace) and (not prev_grace)):
voice_lines[voice].append('+ ')
voice_durations[voice].append(0)
voice_lines[voice].append(cur_elem[0])
voice_durations[voice].append(duration)
elif (staves[0] != ''):
voice_lines[voice].append(cur_elem[0])
voice_durations[voice].append(0)
else:
voice_lines[voice].append('+ ')
voice_durations[voice].append(0)
voice_lines[voice].append(cur_elem[0])
voice_durations[voice].append(duration)
elif (elem.tag == 'direction'):
cur_elem = m.parse_direction(elem)
elif (elem.tag == 'forward'):
forward_dur.append(int(elem[0].text))
elif (elem.tag == 'backup'):
if ((len(forward_dur) != 0) and (cur_voice in voice_lines)):
voice_lines[cur_voice].append('forward')
voice_durations[cur_voice].append(forward_dur[(- 1)])
del forward_dur[(- 1)]
if (len(forward_dur) != 0):
forward_dur = []
cur_voice = (- 1)
for i in range(num_staves):
if ('multirest' in cur_elem[0]):
pass
if (cur_elem != 'forward'):
if (((cur_staves[i] != '') or (staves[i] != '')) and (not is_chord) and (cur_elem[i] != '')):
staves[i] += ('+ ' + cur_elem[i])
else:
staves[i] += cur_elem[i]
for word in cur_elem[0].split():
if ('key' in word):
if ((((cur_staves[0] != '') or (staves[0] != '')) and (not is_chord) and (cur_elem[0] != '')) and (self.key != '')):
for v in voice_lines.keys():
voice_durations[v].append(0)
voice_durations[v].append(0)
voice_lines[v].append('+ ')
voice_lines[v].append((word + ' '))
self.key = word
start_key = self.key
if ('clef' in word):
if ((((cur_staves[0] != '') or (staves[0] != '')) and (not is_chord) and (cur_elem[0] != '')) and (self.clef != '')):
for v in voice_lines.keys():
voice_durations[v].append(0)
voice_durations[v].append(0)
voice_lines[v].append('+ ')
voice_lines[v].append((word + ' '))
self.clef = word
start_clef = self.clef
if ('time' in word):
if ((((cur_staves[0] != '') or (staves[0] != '')) and (not is_chord) and (cur_elem[0] != '')) and (self.time != '')):
for v in voice_lines.keys():
voice_durations[v].append(0)
voice_durations[v].append(0)
voice_lines[v].append('+ ')
voice_lines[v].append((word + ' '))
self.time = word
start_time = self.time
if (skip > 0):
break
prev_grace = is_grace
if (len(voice_lines) > 0):
key = sorted(voice_lines.keys())[0]
voice_lines[key].append('+ ')
voice_lines[key].append('barline ')
voice_durations[key].append(0)
voice_durations[key].append(0)
if ((len(forward_dur) != 0) and (len(voice_lines) > 0)):
keys = sorted(voice_lines.keys())
max_sum = sum(voice_durations[keys[0]])
if ((sum(voice_durations[keys[(- 1)]]) + forward_dur[(- 1)]) <= max_sum):
voice_lines[keys[(- 1)]].append('forward')
voice_durations[keys[(- 1)]].append(forward_dur[(- 1)])
del forward_dur[(- 1)]
for i in range(num_staves):
staves[i] = (staves[i] + '+ barline ')
if (len(voice_lines) > 1):
self.polyphonic_page = True
keys = sorted(voice_lines.keys())
max_sum = max([sum(voice_durations[k]) for k in keys])
for k in keys:
if (sum(voice_durations[k]) < max_sum):
voice_lines[k].append('')
voice_durations[k].append((max_sum - sum(voice_durations[k])))
staves[0] = ''
min_sum = 0
total = 0
voice_idxs = dict()
voice_sums = dict()
for voice in voice_lines:
voice_idxs[voice] = 0
voice_sums[voice] = 0
total = max(total, sum(voice_durations[voice]))
notes_to_add = []
c = 0
while (min_sum < total):
c += 1
if (c >= 100):
print('Loop broken')
return (['' for x in range(num_staves)], 0)
cur_sums = []
for voice in voice_lines:
cur_sums.append(voice_sums[voice])
min_sum = min(cur_sums)
for voice in voice_lines:
if ((voice_sums[voice] == min_sum) and (voice_idxs[voice] < len(voice_lines[voice]))):
if (((len(notes_to_add) == 0) or ((notes_to_add[(- 1)] != '+ ') or (voice_lines[voice][voice_idxs[voice]] != '+ '))) and (voice_lines[voice][voice_idxs[voice]] != 'forward')):
notes_to_add.append(voice_lines[voice][voice_idxs[voice]])
voice_sums[voice] += voice_durations[voice][voice_idxs[voice]]
voice_idxs[voice] += 1
while ((voice_idxs[voice] < len(voice_lines[voice])) and (((voice_lines[voice][voice_idxs[voice]] != '+ ') and (notes_to_add[(- 1)] != '+ ')) or (voice_lines[voice][voice_idxs[voice]] == 'barline '))):
if (voice_lines[voice][voice_idxs[voice]] != 'forward'):
notes_to_add.append(voice_lines[voice][voice_idxs[voice]])
voice_sums[voice] += voice_durations[voice][voice_idxs[voice]]
voice_idxs[voice] += 1
min_sum = min(min_sum, voice_sums[voice])
staff_zero = ''
idx = 0
while (idx < len(notes_to_add)):
symbols = [notes_to_add[idx]]
idx += 1
while (('+ ' not in symbols) and (idx < len(notes_to_add))):
symbols.append(notes_to_add[idx])
idx += 1
if ('+ ' in symbols):
symbols.remove('+ ')
if (len(symbols) == 0):
continue
symbols.sort(key=functools.cmp_to_key(self.compare_symbols))
staff_zero += ('+ ' + ''.join(symbols))
staves[0] = staff_zero
for i in range(num_staves):
if (len(voice_lines) > 1):
time_added = False
key_added = False
if new_score:
if ('time' not in ''.join(staves[i].split()[:5])):
staves[i] = ((start_time + ' ') + staves[i])
time_added = True
if new_page:
if ('key' not in ''.join(staves[i].split()[:5])):
if time_added:
staves[i] = ((start_key + ' + ') + staves[i])
else:
staves[i] = ((start_key + ' ') + staves[i])
key_added = True
if ('clef' not in ''.join(staves[i].split()[:5])):
if (time_added or key_added):
staves[i] = ((start_clef + ' + ') + staves[i])
else:
staves[i] = ((start_clef + ' ') + staves[i])
else:
if new_score:
if ('time' not in ''.join(staves[i].split()[:5])):
staves[i] = ((start_time + ' ') + staves[i])
if new_page:
if ('key' not in ''.join(staves[i].split()[:5])):
staves[i] = ((start_key + ' + ') + staves[i])
if ('clef' not in ''.join(staves[i].split()[:5])):
staves[i] = ((start_clef + ' + ') + staves[i])
return (staves, skip)
def compare_symbols(self, a, b):
'\n Given a list of symbols, sort from how they would\n appear on a staff (top to bottom), assume rest/clef is on top\n '
ret_val = 0
if ('clef' in a):
ret_val = 1
return ret_val
if ('clef' in b):
ret_val = (- 1)
return ret_val
if (('note' in a) and ('note' in b)):
a_note = self.note_to_num(''.join(a.split('-')[1].split('_')[0][:(- 1)]))
a_oct = int(a.split('_')[0][(- 1)])
b_note = self.note_to_num(''.join(b.split('-')[1].split('_')[0][:(- 1)]))
b_oct = int(b.split('_')[0][(- 1)])
if (a_oct > b_oct):
ret_val = 1
elif (a_oct == b_oct):
ret_val = (1 if (a_note > b_note) else (- 1))
else:
ret_val = (- 1)
elif (('rest' in a) and ('rest' not in b)):
ret_val = 1
elif (('rest' in a) and ('rest' in b)):
ret_val = 0
else:
ret_val = (- 1)
return ret_val
def note_to_num(self, note):
'\n Converts note to num for purpose of sorting\n '
note_dict = {'Cb': 0, 'C': 1, 'C#': 2, 'Db': 2, 'D': 3, 'D#': 4, 'Eb': 4, 'E': 5, 'E#': 6, 'Fb': 6, 'F': 7, 'F#': 8, 'Gb': 8, 'G': 9, 'G#': 10, 'Ab': 10, 'A': 11, 'A#': 12, 'Bb': 12, 'B': 13, 'B#': 14}
try:
return note_dict[note]
except KeyError:
try:
return note_dict[note[:(- 1)]]
except KeyError:
print('Error with note dict?', note)
return 0
|
def main():
'\n Main method\n '
num_files = 0
parser = argparse.ArgumentParser()
parser.add_argument('-input', dest='input', type=str, required=('-c' not in sys.argv), help='Path to the input directory with MusicXMLs.')
args = parser.parse_args()
for file_name in os.listdir(args.input):
if (not file_name.endswith('.musicxml')):
continue
input_file = os.path.join(args.input, file_name)
try:
doc = etree.parse(input_file)
except:
os.remove(input_file)
continue
for elem in doc.xpath('//credit'):
parent = elem.getparent()
parent.remove(elem)
for elem in doc.xpath('//rights'):
parent = elem.getparent()
parent.remove(elem)
f = open(input_file, 'wb')
f.write(etree.tostring(doc))
f.close()
num_files += 1
print('Total files:', num_files)
|
def main():
'\n Main method\n '
parser = argparse.ArgumentParser()
parser.add_argument('-imgs', dest='imgs', type=str, required=True, help='Path to the directory with imgs.')
parser.add_argument('-labels', dest='labels', type=str, required=True, help='Path to the directory with labels.')
args = parser.parse_args()
num_missing = 0
num_total = 0
for file_name in os.listdir(args.imgs):
if (not file_name.endswith('.png')):
continue
num_total += 1
sample_id = file_name.split('-')[0]
num = file_name.split('-')[1].split('.')[0]
if num.startswith('00'):
num = num[2:]
elif num.startswith('0'):
num = num[1:]
sem_name1 = (((sample_id + '-') + num) + '.semantic')
sem_name2 = (((sample_id + '-0') + num) + '.semantic')
sem_name3 = (((sample_id + '-00') + num) + '.semantic')
try:
sem_file = open(os.path.join(args.labels, sem_name1), 'r')
sem_file.close()
except FileNotFoundError:
try:
sem_file = open(os.path.join(args.labels, sem_name2), 'r')
sem_file.close()
except FileNotFoundError:
try:
sem_file = open(os.path.join(args.labels, sem_name3), 'r')
sem_file.close()
except FileNotFoundError:
num_missing += 1
os.remove(os.path.join(args.imgs, file_name))
continue
print(num_missing, num_total, (num_total - num_missing))
|
def main():
'\n Main method\n '
parser = argparse.ArgumentParser()
parser.add_argument('-poly', dest='poly', type=str, required=True, help='File with list of polyphonic files')
parser.add_argument('-dir', dest='dir', type=str, required=True, help='Path to the directory with labels or images.')
args = parser.parse_args()
f = open(args.poly, 'r')
p_files = set([s.split('.')[0].strip() for s in f.readlines()])
print(len(p_files))
for file_name in os.listdir(args.dir):
sem_name1 = file_name.split('.')[0]
sample_id = file_name.split('-')[0]
num = file_name.split('-')[1].split('.')[0]
if num.startswith('00'):
num = num[2:]
elif num.startswith('0'):
num = num[1:]
sem_name2 = ((sample_id + '-0') + num)
sem_name3 = ((sample_id + '-00') + num)
matching = ((sem_name1 in p_files) or (sem_name2 in p_files) or (sem_name3 in p_files))
if (not matching):
os.remove(os.path.join(args.dir, file_name))
|
def main():
'\n Main method\n '
parser = argparse.ArgumentParser()
parser.add_argument('-input', dest='input', type=str, required=('-c' not in sys.argv), help='Path to the directory with images and labels')
args = parser.parse_args()
sparse_count = 0
for file_name in os.listdir(args.input):
if (not file_name.endswith('.semantic')):
continue
sem_name1 = file_name
sample_id = file_name.split('-')[0]
num = file_name.split('-')[1].split('.')[0]
if num.startswith('0'):
num = num[1:]
sem_name2 = (((sample_id + '-') + num) + '.semantic')
try:
sem_file = open(os.path.join(args.input, sem_name1), 'r')
seq = sem_file.read()
if ('note' not in seq):
sem_file.close()
sparse_count += 1
os.remove(os.path.join(args.input, sem_name1))
os.remove(os.path.join(args.input, file_name))
except FileNotFoundError:
try:
sem_file = open(os.path.join(args.input, sem_name2), 'r')
seq = sem_file.read()
if ('note' not in seq):
sem_file.close()
sparse_count += 1
os.remove(os.path.join(args.input, sem_name2))
os.remove(os.path.join(args.input, file_name))
except FileNotFoundError:
os.remove(os.path.join(args.input, file_name))
continue
print('Number of sparse files:', sparse_count)
|
def main():
'\n Main method\n '
parser = argparse.ArgumentParser()
parser.add_argument('-input', dest='input', type=str, required=('-c' not in sys.argv), help='Path to the directory with images.')
args = parser.parse_args()
for file_name in os.listdir(args.input):
if (file_name.endswith('-1.png') or file_name.endswith('-01.png') or file_name.endswith('-001.png')):
os.remove(os.path.join(args.input, file_name))
|
class FdGars(keras.Model):
'\n The FdGars model\n '
def __init__(self, input_dim: int, nhid: int, output_dim: int, args: argparse.ArgumentParser().parse_args()) -> None:
'\n :param input_dim: the input feature dimension\n :param nhid: the output embedding dimension of the first GCN layer\n :param output_dim: the output embedding dimension of the last GCN layer\n (number of classes)\n :param args: additional parameters\n '
super().__init__()
self.input_dim = input_dim
self.nhid = nhid
self.output_dim = output_dim
self.weight_decay = args.weight_decay
self.num_features_nonzero = args.num_features_nonzero
self.layers_ = []
self.layers_.append(GraphConvolution(input_dim=self.input_dim, output_dim=self.nhid, num_features_nonzero=self.num_features_nonzero, activation=tf.nn.relu, dropout=args.dropout, is_sparse_inputs=True, norm=True))
self.layers_.append(GraphConvolution(input_dim=self.nhid, output_dim=self.output_dim, num_features_nonzero=self.num_features_nonzero, activation=(lambda x: x), dropout=args.dropout, norm=False))
def call(self, inputs: list, training: bool=True) -> Tuple[(tf.Tensor, tf.Tensor)]:
'\n Forward propagation\n :param inputs: the information passed to next layers\n :param training: whether in the training mode\n '
(support, x, label, mask) = inputs
outputs = [x]
for layer in self.layers:
hidden = layer((outputs[(- 1)], support), training)
outputs.append(hidden)
output = outputs[(- 1)]
loss = tf.zeros([])
for var in self.layers_[0].trainable_variables:
loss += (self.weight_decay * tf.nn.l2_loss(var))
loss += masked_softmax_cross_entropy(output, label, mask)
acc = masked_accuracy(output, label, mask)
return (loss, acc)
|
def FdGars_main(support: list, features: tf.SparseTensor, label: tf.Tensor, masks: list, args: argparse.ArgumentParser().parse_args()) -> None:
'\n Main function to train, val and test the model\n\n :param support: a list of the sparse adjacency matrices\n :param features: node feature tuple for all nodes {coords, values, shape}\n :param label: the label tensor for all nodes\n :param masks: a list of mask tensors to obtain the train, val, test data\n :param args: additional parameters\n '
model = FdGars(args.input_dim, args.nhid, args.output_dim, args)
optimizer = optimizers.Adam(lr=args.lr)
for epoch in tqdm(range(args.epochs)):
with tf.GradientTape() as tape:
(train_loss, train_acc) = model([support, features, label, masks[0]])
grads = tape.gradient(train_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
(val_loss, val_acc) = model([support, features, label, masks[1]], training=False)
if ((epoch % 10) == 0):
print(f'train_loss: {train_loss:.4f}, train_acc: {train_acc:.4f},val_loss: {val_loss:.4f},val_acc: {val_acc:.4f}')
(_, test_acc) = model([support, features, label, masks[2]], training=False)
print(f'Test acc: {test_acc:.4f}')
|
class GAS(keras.Model):
'\n The GAS model\n '
def __init__(self, args: argparse.ArgumentParser().parse_args()) -> None:
'\n :param args: argument used by the GAS model\n '
super().__init__()
self.class_size = args.class_size
self.reviews_num = args.reviews_num
self.input_dim_i = args.input_dim_i
self.input_dim_u = args.input_dim_u
self.input_dim_r = args.input_dim_r
self.input_dim_r_gcn = args.input_dim_r_gcn
self.output_dim1 = args.output_dim1
self.output_dim2 = args.output_dim2
self.output_dim3 = args.output_dim3
self.num_features_nonzero = args.num_features_nonzero
self.gcn_dim = args.gcn_dim
self.h_i_size = args.h_i_size
self.h_u_size = args.h_u_size
self.r_agg_layer = ConcatenationAggregator(input_dim=((self.input_dim_r + self.input_dim_u) + self.input_dim_i), output_dim=self.output_dim1)
self.iu_agg_layer = AttentionAggregator(input_dim1=self.h_u_size, input_dim2=self.h_i_size, input_dim3=self.input_dim_u, input_dim4=self.input_dim_i, output_dim=self.output_dim2, concat=True)
self.r_gcn_layer = GraphConvolution(input_dim=self.input_dim_r_gcn, output_dim=self.output_dim3, num_features_nonzero=self.num_features_nonzero, activation=tf.nn.relu, dropout=args.dropout, is_sparse_inputs=True, norm=True)
self.concat_layer = GASConcatenation()
self.x_init = tf.keras.initializers.GlorotUniform()
self.u = tf.Variable(initial_value=self.x_init(shape=(((self.output_dim1 + (4 * self.output_dim2)) + self.output_dim3), self.class_size), dtype=tf.float32), trainable=True)
def call(self, inputs: list, training: bool=True) -> Tuple[(tf.Tensor, tf.Tensor)]:
'\n Forward propagation\n :param inputs: the information passed to next layers\n :param training: whether in the training mode\n '
(support, r_support, features, r_feature, label, idx_mask) = inputs
h_r = self.r_agg_layer((support, features))
(h_u, h_i) = self.iu_agg_layer((support, features))
p_e = self.r_gcn_layer((r_feature, r_support), training=True)
concat_vecs = [h_r, h_u, h_i, p_e]
gas_out = self.concat_layer((support, concat_vecs))
masked_data = tf.gather(gas_out, idx_mask)
masked_label = tf.gather(label, idx_mask)
logits = tf.nn.softmax(tf.matmul(masked_data, self.u))
loss = (- tf.reduce_sum(tf.math.log(tf.nn.sigmoid((masked_label * logits)))))
acc = accuracy(logits, masked_label)
return (loss, acc)
|
def GAS_main(adj_list: list, r_support: list, features: tf.Tensor, r_feature: tf.SparseTensor, label: tf.Tensor, masks: list, args: argparse.ArgumentParser().parse_args()) -> None:
'\n Main function to train and test the model\n\n :param adj_list:\n a list of Homogeneous graphs and a sparse adjacency matrices\n :param r_support: a sparse adjacency matrices\n :param features: node feature tuple for all nodes {coords, values, shape}\n :param r_feature: the feature of review\n :param label: the label tensor for all nodes\n :param masks: a list of mask tensors to obtain the train, val, test data\n :param args: additional parameters\n '
model = GAS(args)
optimizer = optimizers.Adam(lr=args.lr)
for _ in tqdm(range(args.epochs)):
with tf.GradientTape() as tape:
(train_loss, train_acc) = model([adj_list, r_support, features, r_feature, label, masks[0]])
print(f'train_loss: {train_loss:.4f}, train_acc: {train_acc:.4f}')
grads = tape.gradient(train_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
(test_loss, test_acc) = model([adj_list, r_support, features, r_feature, label, masks[1]])
print(f'test_loss: {test_loss:.4f}, test_acc: {test_acc:.4f}')
|
class GEM(keras.Model):
def __init__(self, input_dim, output_dim, args):
super().__init__()
self.nodes_num = args.nodes_num
self.class_size = args.class_size
self.input_dim = input_dim
self.output_dim = output_dim
self.device_num = args.device_num
self.hop = args.hop
self.zero_init = tf.keras.initializers.Zeros()
self.h_0 = tf.Variable(initial_value=self.zero_init(shape=(self.nodes_num, self.output_dim), dtype=tf.float32))
self.layers_ = []
self.input_layer = GEMLayer(self.nodes_num, self.input_dim, self.output_dim, self.device_num)
for _ in range((self.hop - 1)):
self.layers_.append(GEMLayer(self.nodes_num, self.input_dim, self.output_dim, self.device_num))
self.x_init = tf.keras.initializers.GlorotUniform()
self.u = tf.Variable(initial_value=self.x_init(shape=(self.output_dim, self.class_size), dtype=tf.float32), trainable=True)
def call(self, inputs):
':param inputs include support, x, label, mask\n support means a list of the sparse adjacency Tensor\n x means feature\n label means label tensor\n mask means a list of mask tensors to obtain the train data\n '
(supports, x, label, idx_mask) = inputs
outputs = [self.input_layer((x, supports, self.h_0))]
for layer in self.layers_:
hidden = layer((x, supports, outputs[(- 1)]))
outputs.append(hidden)
gem_out = outputs[(- 1)]
masked_data = tf.gather(gem_out, idx_mask)
masked_label = tf.gather(label, idx_mask)
logits = tf.nn.softmax(tf.matmul(masked_data, self.u))
loss = (- tf.reduce_sum(tf.math.log(tf.nn.sigmoid((masked_label * logits)))))
acc = accuracy(logits, masked_label)
return (loss, acc)
|
def GEM_main(supports: list, features: tf.SparseTensor, label: tf.Tensor, masks: list, args) -> None:
'\n :param supports: a list of the sparse adjacency matrix\n :param features: the feature of the sparse tensor for all nodes\n :param label: the label tensor for all nodes\n :param masks: a list of mask tensors to obtain the train-val-test data\n :param args: additional parameters\n '
model = GEM(args.input_dim, args.output_dim, args)
optimizer = optimizers.Adam(lr=args.lr)
for _ in tqdm(range(args.epochs)):
with tf.GradientTape() as tape:
(train_loss, train_acc) = model([supports, features, label, masks[0]])
grads = tape.gradient(train_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
(val_loss, val_acc) = model([supports, features, label, masks[1]])
print(f'train_loss: {train_loss:.4f}, train_acc: {train_acc:.4f},val_loss: {val_loss:.4f},val_acc: {val_acc:.4f}')
(test_loss, test_acc) = model([supports, features, label, masks[2]])
print(f'test_loss: {test_loss:.4f}, test_acc: {test_acc:.4f}')
|
class GraphConsis(keras.Model):
'\n The GraphConsis model\n '
def __init__(self, features_dim: int, internal_dim: int, num_layers: int, num_classes: int, num_relations: int) -> None:
'\n :param int features_dim: input dimension\n :param int internal_dim: hidden layer dimension\n :param int num_layers: number of sample layer\n :param int num_classes: number of node classes\n :param int num_relations: number of relations\n '
super().__init__()
self.seq_layers = []
self.attention_vec = tf.Variable(tf.random.uniform([(2 * internal_dim), 1], dtype=tf.float32))
self.relation_vectors = tf.Variable(tf.random.uniform([num_relations, internal_dim], dtype=tf.float32))
for i in range(1, (num_layers + 1)):
layer_name = ('agg_lv' + str(i))
input_dim = (internal_dim if (i > 1) else features_dim)
aggregator_layer = ConsisMeanAggregator(input_dim, internal_dim, name=layer_name)
self.seq_layers.append(aggregator_layer)
self.classifier = tf.keras.layers.Dense(num_classes, activation=tf.nn.softmax, use_bias=False, kernel_initializer=init_fn, name='classifier')
def call(self, minibatchs: namedtuple, features: tf.Tensor) -> tf.Tensor:
'\n Forward propagation\n :param minibatchs: minibatch list of each relation\n :param features: 2d features of nodes\n '
xs = []
for (i, minibatch) in enumerate(minibatchs):
x = tf.gather(tf.Variable(features, dtype=float), tf.squeeze(minibatch.src_nodes))
for aggregator_layer in self.seq_layers:
x = aggregator_layer(x, minibatch.dstsrc2srcs.pop(), minibatch.dstsrc2dsts.pop(), minibatch.dif_mats.pop(), tf.nn.embedding_lookup(self.relation_vectors, i), self.attention_vec)
xs.append(x)
return self.classifier(tf.nn.l2_normalize(tf.reduce_sum(tf.stack(xs, 1), axis=1, keepdims=False), 1))
|
class GraphSage(tf.keras.Model):
'\n GraphSage model\n '
def __init__(self, features_dim, internal_dim, num_layers, num_classes):
'\n :param int features_dim: input dimension\n :param int internal_dim: hidden layer dimension\n :param int num_layers: number of sample layer\n :param int num_classes: number of node classes\n '
super().__init__()
self.seq_layers = []
for i in range(1, (num_layers + 1)):
layer_name = ('agg_lv' + str(i))
input_dim = (internal_dim if (i > 1) else features_dim)
aggregator_layer = SageMeanAggregator(input_dim, internal_dim, name=layer_name, activ=True)
self.seq_layers.append(aggregator_layer)
self.classifier = tf.keras.layers.Dense(num_classes, activation=tf.nn.softmax, use_bias=False, kernel_initializer=init_fn, name='classifier')
def call(self, minibatch, features):
'\n :param namedtuple minibatch: minibatch of target nodes\n :param tensor features: 2d features of nodes\n '
x = tf.gather(tf.constant(features, dtype=float), tf.squeeze(minibatch.src_nodes))
for aggregator_layer in self.seq_layers:
x = aggregator_layer(x, minibatch.dstsrc2srcs.pop(), minibatch.dstsrc2dsts.pop(), minibatch.dif_mats.pop())
return self.classifier(x)
|
class Player2Vec(keras.Model):
'\n The Player2Vec model\n '
def __init__(self, input_dim: int, nhid: int, output_dim: int, args: argparse.ArgumentParser().parse_args()) -> None:
'\n :param input_dim: the input feature dimension\n :param nhid: the output embedding dimension of the first GCN layer\n :param output_dim: the output embedding dimension of the last GCN layer\n (number of classes)\n :param args: additional parameters\n '
super().__init__()
self.input_dim = input_dim
self.nodes = args.nodes
self.nhid = nhid
self.class_size = args.class_size
self.train_size = args.train_size
self.output_dim = output_dim
self.num_meta = args.num_meta
self.weight_decay = args.weight_decay
self.num_features_nonzero = args.num_features_nonzero
self.GCN_layers = []
self.GCN_layers.append(GraphConvolution(input_dim=self.input_dim, output_dim=self.nhid, num_features_nonzero=self.num_features_nonzero, activation=tf.nn.relu, dropout=args.dropout, is_sparse_inputs=True, norm=True))
self.GCN_layers.append(GraphConvolution(input_dim=self.nhid, output_dim=self.output_dim, num_features_nonzero=self.num_features_nonzero, activation=(lambda x: x), dropout=args.dropout, norm=False))
self.att_layer = AttentionLayer(input_dim=output_dim, num_nodes=self.nodes, attention_size=self.num_meta, v_type='tanh')
def call(self, inputs: list, training: bool=True) -> Tuple[(tf.Tensor, tf.Tensor)]:
'\n Forward propagation\n :param inputs: the information passed to next layers\n :param training: whether in the training mode\n '
(supports, x, label, mask) = inputs
outputs = []
for i in range(len(supports)):
output = [x]
for layer in self.GCN_layers:
hidden = layer((output[(- 1)], [supports[i]]), training)
output.append(hidden)
output = output[(- 1)]
outputs.append(output)
outputs = tf.reshape(outputs, [len(supports), (self.nodes * self.output_dim)])
outputs = self.att_layer(inputs=outputs)
outputs = tf.reshape(outputs, [self.nodes, self.output_dim])
loss = tf.zeros([])
for layer in self.GCN_layers:
for var in layer.trainable_variables:
loss += (self.weight_decay * tf.nn.l2_loss(var))
for var in self.att_layer.trainable_variables:
loss += (self.weight_decay * tf.nn.l2_loss(var))
loss += masked_softmax_cross_entropy(outputs, label, mask)
acc = masked_accuracy(outputs, label, mask)
return (loss, acc)
|
def Player2Vec_main(support: list, features: tf.SparseTensor, label: tf.Tensor, masks: list, args: argparse.ArgumentParser().parse_args()) -> None:
'\n Main function to train, val and test the model\n\n :param support: a list of the sparse adjacency matrices\n :param features: node feature tuple for all nodes {coords, values, shape}\n :param label: the label tensor for all nodes\n :param masks: a list of mask tensors to obtain the train, val, test data\n :param args: additional parameters\n '
model = Player2Vec(args.input_dim, args.nhid, args.output_dim, args)
optimizer = optimizers.Adam(lr=args.lr)
for epoch in tqdm(range(args.epochs)):
with tf.GradientTape() as tape:
(train_loss, train_acc) = model([support, features, label, masks[0]])
grads = tape.gradient(train_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
(val_loss, val_acc) = model([support, features, label, masks[1]])
print(f'Epoch: {epoch:d}, train_loss: {train_loss:.4f}, train_acc: {train_acc:.4f},val_loss: {val_loss:.4f}, val_acc: {val_acc:.4f}')
(test_loss, test_acc) = model([support, features, label, masks[2]])
print(f'test_loss: {test_loss:.4f}, test_acc: {test_acc:.4f}')
|
class SemiGNN(keras.Model):
'\n The SemiGNN model\n '
def __init__(self, nodes: int, class_size: int, semi_encoding1: int, semi_encoding2: int, semi_encoding3: int, init_emb_size: int, view_num: int, alpha: float) -> None:
'\n :param nodes: total nodes number\n :param semi_encoding1: the first view attention layer unit number\n :param semi_encoding2: the second view attention layer unit number\n :param semi_encoding3: MLP layer unit number\n :param init_emb_size: the initial node embedding\n :param view_num: number of views\n :param alpha: the coefficient of loss function\n '
super().__init__()
self.nodes = nodes
self.class_size = class_size
self.semi_encoding1 = semi_encoding1
self.semi_encoding2 = semi_encoding2
self.semi_encoding3 = semi_encoding3
self.init_emb_size = init_emb_size
self.view_num = view_num
self.alpha = alpha
self.x_init = tf.keras.initializers.GlorotUniform()
self.emb = tf.Variable(initial_value=self.x_init(shape=(self.nodes, self.init_emb_size), dtype=tf.float32), trainable=True)
self.node_att_layer = []
for _ in range(view_num):
self.node_att_layer.append(NodeAttention(input_dim=init_emb_size))
encoding = [self.semi_encoding1, self.semi_encoding2]
self.view_att_layer = ViewAttention(layer_size=len(encoding), view_num=self.view_num, encoding=encoding)
self.olp = tf.keras.layers.Dense(self.semi_encoding3)
self.theta = tf.Variable(initial_value=self.x_init(shape=(self.semi_encoding3, self.class_size), dtype=tf.float32), trainable=True)
def call(self, inputs: list, training: bool=True) -> Tuple[(tf.Tensor, tf.Tensor)]:
'\n Forward propagation\n :param inputs: the information passed to next layers\n :param training: whether in the training mode\n '
(adj_data, u_i, u_j, graph_label, label, idx_mask) = inputs
h1 = []
for v in range(self.view_num):
h = self.node_att_layer[v]([self.emb, adj_data[v]])
h = tf.reshape(h, [self.nodes, self.emb.shape[1]])
h1.append(h)
h1 = tf.concat(h1, 0)
h1 = tf.reshape(h1, [self.view_num, self.nodes, self.init_emb_size])
h2 = self.view_att_layer(h1)
a_u = self.olp(h2)
masked_data = tf.gather(a_u, idx_mask)
masked_label = tf.gather(label, idx_mask)
logits = tf.matmul(masked_data, self.theta)
loss1 = ((- (1 / len(idx_mask))) * tf.reduce_sum((masked_label * tf.math.log(tf.nn.softmax(logits)))))
u_i_embedding = tf.nn.embedding_lookup(a_u, tf.cast(u_i, dtype=tf.int32))
u_j_embedding = tf.nn.embedding_lookup(a_u, tf.cast(u_j, dtype=tf.int32))
inner_product = tf.reduce_sum((u_i_embedding * u_j_embedding), axis=1)
loss2 = (- tf.reduce_mean(tf.math.log_sigmoid((graph_label * inner_product))))
loss = ((self.alpha * loss1) + ((1 - self.alpha) * loss2))
acc = accuracy(logits, masked_label)
return (loss, acc)
|
def SemiGNN_main(adj_list: list, label: tf.Tensor, masks: list, args: argparse.ArgumentParser().parse_args()) -> None:
'\n Main function to train and test the model\n\n :param adj_list: a list of the sparse adjacency matrices\n :param label: the label tensor for all nodes\n :param masks: a list of mask tensors to obtain the train, val, test data\n :param args: model arguments\n '
model = SemiGNN(args.nodes, args.class_size, args.semi_encoding1, args.semi_encoding2, args.semi_encoding3, args.init_emb_size, args.view_num, args.alpha)
optimizer = optimizers.Adam(lr=args.lr)
adj_nodelists = [matrix_to_adjlist(adj, pad=False) for adj in adj_list]
pairs = [random_walks(adj_nodelists[i], 2, 3) for i in range(args.view_num)]
adj_data = [pairs_to_matrix(p, args.nodes) for p in pairs]
u_i = []
u_j = []
graph_label = []
for (adj_nodelist, p) in zip(adj_nodelists, pairs):
(u_i_t, u_j_t, graph_label_t) = get_negative_sampling(p, adj_nodelist)
u_i.append(u_i_t)
u_j.append(u_j_t)
graph_label.append(graph_label_t)
u_i = np.concatenate(np.array(u_i))
u_j = np.concatenate(np.array(u_j))
graph_label = tf.convert_to_tensor(np.concatenate(graph_label), dtype=tf.float32)
for epoch in range(args.epochs):
with tf.GradientTape() as tape:
(train_loss, train_acc) = model([adj_data, u_i, u_j, graph_label, label, masks[0]])
print(f'train_loss: {train_loss:.4f}, train_acc: {train_acc:.4f}')
grads = tape.gradient(train_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
(test_loss, test_acc) = model([adj_data, u_i, u_j, graph_label, label, masks[1]])
print(f'test_loss: {test_loss:.4f}, test_acc: {test_acc:.4f}')
|
def sparse_dropout(x: tf.SparseTensor, rate: float, noise_shape: int) -> tf.SparseTensor:
'\n Dropout for sparse tensors.\n\n :param x: the input sparse tensor\n :param rate: the dropout rate\n :param noise_shape: the feature dimension\n '
random_tensor = (1 - rate)
random_tensor += tf.random.uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse.retain(x, dropout_mask)
return (pre_out * (1.0 / (1 - rate)))
|
def dot(x: tf.Tensor, y: tf.Tensor, sparse: bool=False) -> tf.Tensor:
'\n Wrapper for tf.matmul (sparse vs dense).\n\n :param x: first tensor\n :param y: second tensor\n :param sparse: whether the first tensor is of type tf.SparseTensor\n '
if sparse:
res = tf.sparse.sparse_dense_matmul(x, y)
else:
res = tf.matmul(x, y)
return res
|
class GraphConvolution(layers.Layer):
'\n Graph convolution layer.\n Source:https://github.com/dragen1860/GCN-TF2/blob/master/layers.py\n\n :param input_dim: the input feature dimension\n :param output_dim: the output dimension (number of classes)\n :param num_features_nonzero: the node feature dimension\n :param dropout: the dropout rate\n :param is_sparse_inputs: whether the input feature/adj are sparse matrices\n :param activation: the activation function\n :param norm: whether adding L2-normalization to parameters\n :param bias: whether adding bias term to the output\n :param featureless: whether the input has features\n '
def __init__(self, input_dim: int, output_dim: int, num_features_nonzero: int, dropout: float=0.0, is_sparse_inputs: bool=False, activation: Callable[([tf.Tensor], tf.Tensor)]=tf.nn.relu, norm: bool=False, bias: bool=False, featureless: bool=False, **kwargs: Optional) -> None:
super(GraphConvolution, self).__init__(**kwargs)
self.dropout = dropout
self.activation = activation
self.is_sparse_inputs = is_sparse_inputs
self.featureless = featureless
self.bias = bias
self.norm = norm
self.num_features_nonzero = num_features_nonzero
self.weights_ = []
for i in range(1):
w = self.add_weight(('weight' + str(i)), [input_dim, output_dim], dtype=tf.float32)
self.weights_.append(w)
if self.bias:
self.bias = self.add_weight('bias', [output_dim], dtype=tf.float32)
def call(self, inputs: Tuple[(tf.Tensor, tf.Tensor)], training: bool=True) -> tf.Tensor:
'\n Forward propagation\n\n :param inputs: the information passed to next layers\n :param training: whether in the training mode\n '
(x, support_) = inputs
if ((training is not False) and self.is_sparse_inputs):
x = sparse_dropout(x, self.dropout, self.num_features_nonzero)
elif (training is not False):
x = tf.nn.dropout(x, self.dropout)
supports = list()
for i in range(len(support_)):
if (not self.featureless):
pre_sup = dot(x, self.weights_[i], sparse=self.is_sparse_inputs)
else:
pre_sup = self.weights_[i]
support = dot(support_[i], pre_sup, sparse=True)
supports.append(support)
output = tf.add_n(supports)
axis = list(range((len(output.get_shape()) - 1)))
(mean, variance) = tf.nn.moments(output, axis)
scale = None
offset = None
variance_epsilon = 0.001
output = tf.nn.batch_normalization(output, mean, variance, offset, scale, variance_epsilon)
if self.bias:
output += self.bias
if self.norm:
return tf.nn.l2_normalize(self.activation(output), axis=None, epsilon=1e-12)
return self.activation(output)
|
class AttentionLayer(layers.Layer):
' AttentionLayer is a function f : hkey × Hval → hval which maps\n a feature vector hkey and the set of candidates’ feature vectors\n Hval to an weighted sum of elements in Hval.\n\n :param input_dim: the input dimension\n :param attention_size: the number of meta_paths\n :param v_type:activation function type\n :param bias: whether add bias\n '
def __init__(self, input_dim: int, num_nodes: int, attention_size: int, v_type: str='relu', bias: bool=True, **kwargs: Optional) -> None:
super().__init__(**kwargs)
self.w_omega = tf.Variable(tf.random.uniform([(num_nodes * input_dim), attention_size]))
self.u_omega = tf.Variable(tf.random.uniform([attention_size]))
self.bias = bias
self.activation = v_type
if self.bias:
self.b_omega = tf.Variable(tf.random.uniform([attention_size]))
def call(self, inputs: tf.Tensor, return_weights: bool=False, joint_type: str='weighted_sum', multi_view: bool=True) -> Union[(tf.Tensor, Tuple[(tf.Tensor, tf.Tensor)])]:
"\n Obtain attention value between different meta_path\n\n :param inputs: the information passed to next layers\n :param return_weights: the output whether return weights\n :param joint_type: the way of calculating output\n :param multi_view: whether it's a multiple view\n "
if multi_view:
inputs = tf.expand_dims(inputs, 0)
v = tf.tensordot(inputs, self.w_omega, axes=1)
if self.bias:
v += self.b_omega
if (self.activation == 'tanh'):
v = tf.tanh(v)
if (self.activation == 'relu'):
v = tf.nn.relu(v)
vu = tf.tensordot(v, self.u_omega, axes=1, name='vu')
alpha = tf.nn.softmax(vu)
if (joint_type == 'weighted_sum'):
output = tf.reduce_sum((inputs * tf.expand_dims(alpha, (- 1))), 1)
if (joint_type == 'concatenation'):
output = tf.concat((inputs * tf.expand_dims(alpha, (- 1))), 2)
if (not return_weights):
return output
else:
return (output, alpha)
|
class NodeAttention(layers.Layer):
' Node level attention for SemiGNN.\n\n :param input_dim: the input dimension\n :param view_num: the number of views\n '
def __init__(self, input_dim: int, **kwargs: Optional) -> None:
super().__init__(**kwargs)
self.H_v = tf.Variable(tf.random.normal([input_dim, 1], stddev=0.1))
def call(self, inputs: list, return_weights: bool=False) -> Union[(tf.Tensor, Tuple[(tf.Tensor, tf.Tensor)])]:
'\n Obtain attention value between nodes\n\n :param inputs: the information passed to next layers\n :param return_weights: the output whether return weights\n '
(emb, adj) = inputs
zero = tf.constant(0, dtype=tf.float32)
where = tf.not_equal(adj, zero)
indices = tf.where(where)
values = tf.gather_nd(adj, indices)
adj = tf.SparseTensor(indices=indices, values=values, dense_shape=adj.shape)
v = (tf.cast(adj, tf.float32) * tf.squeeze(tf.tensordot(emb, self.H_v, axes=1)))
alpha = tf.sparse.softmax(v)
output = tf.sparse.sparse_dense_matmul(alpha, emb)
if (not return_weights):
return output
else:
return (output, alpha)
|
class ViewAttention(layers.Layer):
' View level attention implementation for SemiGNN\n\n :param encoding: a list of MLP encoding sizes for each view\n :param layer_size: the number of view attention layer\n :param view_num: the number of views\n '
def __init__(self, encoding: list, layer_size: int, view_num: int, **kwargs: Optional) -> None:
super().__init__(**kwargs)
self.encoding = encoding
self.view_num = view_num
self.layer_size = layer_size
self.dense_layers = []
for _ in range(view_num):
mlp = Sequential()
for l in range(layer_size):
mlp.add(tf.keras.layers.Dense(encoding[l], activation='relu'))
self.dense_layers.append(mlp)
self.phi = []
for _ in range(view_num):
self.phi.append(tf.Variable(tf.random.normal([encoding[(- 1)], 1], stddev=0.1)))
def call(self, inputs: tf.Tensor, return_weights=False) -> Union[(tf.Tensor, Tuple[(tf.Tensor, tf.Tensor)])]:
'\n Obtain attention value between different views.\n Eq. (3) and Eq. (4) in the paper\n\n :param inputs: the information passed to next layers\n :param return_weights: the output whether return weights\n '
h = []
h_phi = []
for v in range(self.view_num):
h_v = self.dense_layers[v](inputs[v])
h.append(h_v)
h_phi.append(tf.matmul(h_v, self.phi[v]))
(h, h_phi) = (tf.concat(h, 0), tf.concat(h_phi, 0))
h = tf.reshape(h, [self.view_num, inputs[0].shape[0], self.encoding[(- 1)]])
h_phi = tf.reshape(h_phi, [self.view_num, inputs[0].shape[0]])
alpha = tf.nn.softmax(h_phi, axis=0)
alpha = tf.expand_dims(alpha, axis=2)
alpha = tf.repeat(alpha, self.encoding[(- 1)], axis=(- 1))
output = tf.reshape((alpha * h), [inputs[0].shape[0], (self.encoding[(- 1)] * self.view_num)])
if (not return_weights):
return output
else:
return (output, alpha)
|
def scaled_dot_product_attention(q: tf.Tensor, k: tf.Tensor, v: tf.Tensor) -> Tuple[(tf.Tensor, tf.Tensor)]:
'\n Obtain attention value in one embedding\n\n :param q: original embedding\n :param k: original embedding\n :param v:embedding after aggregate neighbour feature\n :param mask: whether use mask\n '
qk = tf.matmul(q, k, transpose_b=True)
dk = tf.cast(tf.shape(k)[(- 1)], tf.float32)
scaled_attention = (qk / tf.math.sqrt(dk))
scaled_attention += 1
weights = tf.nn.softmax(scaled_attention, axis=(- 1))
output = tf.matmul(weights, v)
return (output, weights)
|
class ConcatenationAggregator(layers.Layer):
"This layer equals to the equation (3) in\n paper 'Spam Review Detection with Graph Convolutional Networks.'\n "
def __init__(self, input_dim, output_dim, dropout=0.0, act=tf.nn.relu, concat=False, **kwargs):
'\n :param input_dim: the dimension of input\n :param output_dim: the dimension of output\n '
super(ConcatenationAggregator, self).__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.dropout = dropout
self.act = act
self.concat = concat
self.con_agg_weights = self.add_weight('con_agg_weights', [input_dim, output_dim], dtype=tf.float32)
def call(self, inputs):
'\n :param inputs: the information passed to next layers\n '
(adj_list, features) = inputs
review_vecs = tf.nn.dropout(features[0], self.dropout)
user_vecs = tf.nn.dropout(features[1], self.dropout)
item_vecs = tf.nn.dropout(features[2], self.dropout)
ri = tf.nn.embedding_lookup(item_vecs, tf.cast(adj_list[5], dtype=tf.int32))
ri = tf.transpose(tf.random.shuffle(tf.transpose(ri)))
ru = tf.nn.embedding_lookup(user_vecs, tf.cast(adj_list[4], dtype=tf.int32))
ru = tf.transpose(tf.random.shuffle(tf.transpose(ru)))
concate_vecs = tf.concat([review_vecs, ru, ri], axis=1)
output = dot(concate_vecs, self.con_agg_weights, sparse=False)
return self.act(output)
|
class SageMeanAggregator(layers.Layer):
' GraphSAGE Mean Aggregation Layer\n Parts of this code file were originally forked from\n https://github.com/subbyte/graphsage-tf2\n '
def __init__(self, src_dim, dst_dim, activ=True, **kwargs):
'\n :param int src_dim: input dimension\n :param int dst_dim: output dimension\n '
super().__init__(**kwargs)
self.activ_fn = (tf.nn.relu if activ else tf.identity)
self.w = self.add_weight(name=(kwargs['name'] + '_weight'), shape=((src_dim * 2), dst_dim), dtype=tf.float32, initializer=GlorotUniform, trainable=True)
def call(self, dstsrc_features, dstsrc2src, dstsrc2dst, dif_mat):
'\n :param tensor dstsrc_features: the embedding from the previous layer\n :param tensor dstsrc2dst: 1d index mapping\n (prepraed by minibatch generator)\n :param tensor dstsrc2src: 1d index mapping\n (prepraed by minibatch generator)\n :param tensor dif_mat: 2d diffusion matrix\n (prepraed by minibatch generator)\n '
dst_features = tf.gather(dstsrc_features, dstsrc2dst)
src_features = tf.gather(dstsrc_features, dstsrc2src)
aggregated_features = tf.matmul(dif_mat, src_features)
concatenated_features = tf.concat([aggregated_features, dst_features], 1)
x = tf.matmul(concatenated_features, self.w)
return self.activ_fn(x)
|
class ConsisMeanAggregator(SageMeanAggregator):
' GraphConsis Mean Aggregation Layer Inherited SageMeanAggregator\n Parts of this code file were originally forked from\n https://github.com/subbyte/graphsage-tf2\n '
def __init__(self, src_dim, dst_dim, **kwargs):
'\n :param int src_dim: input dimension\n :param int dst_dim: output dimension\n '
super().__init__(src_dim, dst_dim, activ=False, **kwargs)
def __call__(self, dstsrc_features, dstsrc2src, dstsrc2dst, dif_mat, relation_vec, attention_vec):
'\n :param tensor dstsrc_features: the embedding from the previous layer\n :param tensor dstsrc2dst: 1d index mapping\n (prepraed by minibatch generator)\n :param tensor dstsrc2src: 1d index mapping\n (prepraed by minibatch generator)\n :param tensor dif_mat: 2d diffusion matrix\n (prepraed by minibatch generator)\n :param tensor relation_vec: 1d corresponding relation vector\n :param tensor attention_vec: 1d layers shared attention weights vector\n '
x = super().__call__(dstsrc_features, dstsrc2src, dstsrc2dst, dif_mat)
relation_features = tf.tile([relation_vec], [x.shape[0], 1])
alpha = tf.matmul(tf.concat([x, relation_features], 1), attention_vec)
alpha = tf.tile(alpha, [1, x.shape[(- 1)]])
x = tf.multiply(alpha, x)
return x
|
class AttentionAggregator(layers.Layer):
"This layer equals to equation (5) and equation (8) in\n paper 'Spam Review Detection with Graph Convolutional Networks.'\n "
def __init__(self, input_dim1, input_dim2, input_dim3, input_dim4, output_dim, dropout=0.0, bias=False, act=tf.nn.relu, concat=False, **kwargs):
'\n :param input_dim1: input dimension in user layer\n :param input_dim2: input dimension in item layer\n :param output_dim: output dimension\n :param hid_dim: hidden dimension\n '
super(AttentionAggregator, self).__init__(**kwargs)
self.dropout = dropout
self.bias = bias
self.act = act
self.concat = concat
self.user_neigh_weights = self.add_weight('user_neigh_weights', [input_dim1, output_dim], dtype=tf.float32)
self.item_neigh_weights = self.add_weight('item_neigh_weights', [input_dim2, output_dim], dtype=tf.float32)
self.center_user_weights = self.add_weight('center_user_weights', [input_dim3, output_dim], dtype=tf.float32)
self.center_item_weights = self.add_weight('center_item_weights', [input_dim4, output_dim], dtype=tf.float32)
if self.bias:
self.user_bias = self.add_weight('user_bias', [self.output_dim], dtype=tf.float32)
self.item_bias = self.add_weight('item_bias', [self.output_dim], dtype=tf.float32)
self.input_dim1 = input_dim1
self.input_dim2 = input_dim2
self.output_dim = output_dim
def call(self, inputs):
'\n :param inputs: the information passed to next layers\n '
(adj_list, features) = inputs
review_vecs = tf.nn.dropout(features[0], self.dropout)
user_vecs = tf.nn.dropout(features[1], self.dropout)
item_vecs = tf.nn.dropout(features[2], self.dropout)
ur = tf.nn.embedding_lookup(review_vecs, tf.cast(adj_list[0], dtype=tf.int32))
ur = tf.transpose(tf.random.shuffle(tf.transpose(ur)))
ri = tf.nn.embedding_lookup(item_vecs, tf.cast(adj_list[1], dtype=tf.int32))
ri = tf.transpose(tf.random.shuffle(tf.transpose(ri)))
ir = tf.nn.embedding_lookup(review_vecs, tf.cast(adj_list[2], dtype=tf.int32))
ir = tf.transpose(tf.random.shuffle(tf.transpose(ir)))
ru = tf.nn.embedding_lookup(user_vecs, tf.cast(adj_list[3], dtype=tf.int32))
ru = tf.transpose(tf.random.shuffle(tf.transpose(ru)))
concate_user_vecs = tf.concat([ur, ri], axis=2)
concate_item_vecs = tf.concat([ir, ru], axis=2)
s1 = tf.shape(concate_user_vecs)
s2 = tf.shape(concate_item_vecs)
concate_user_vecs = tf.reshape(concate_user_vecs, [s1[0], (s1[1] * s1[2])])
concate_item_vecs = tf.reshape(concate_item_vecs, [s2[0], (s2[1] * s2[2])])
(concate_user_vecs, _) = scaled_dot_product_attention(q=user_vecs, k=user_vecs, v=concate_user_vecs)
(concate_item_vecs, _) = scaled_dot_product_attention(q=item_vecs, k=item_vecs, v=concate_item_vecs)
user_output = dot(concate_user_vecs, self.user_neigh_weights, sparse=False)
item_output = dot(concate_item_vecs, self.item_neigh_weights, sparse=False)
if self.bias:
user_output += self.user_bias
item_output += self.item_bias
user_output = self.act(user_output)
item_output = self.act(item_output)
if self.concat:
user_vecs = dot(user_vecs, self.center_user_weights, sparse=False)
item_vecs = dot(item_vecs, self.center_item_weights, sparse=False)
user_output = tf.concat([user_vecs, user_output], axis=1)
item_output = tf.concat([item_vecs, item_output], axis=1)
return (user_output, item_output)
|
class GASConcatenation(layers.Layer):
'GCN-based Anti-Spam(GAS) layer for concatenation of comment embedding\n learned by GCN from the Comment Graph and other embeddings learned in\n previous operations.\n '
def __init__(self, **kwargs):
super(GASConcatenation, self).__init__(**kwargs)
def __call__(self, inputs):
'\n :param inputs: the information passed to next layers\n '
(adj_list, concat_vecs) = inputs
ri = tf.nn.embedding_lookup(concat_vecs[2], tf.cast(adj_list[5], dtype=tf.int32))
ru = tf.nn.embedding_lookup(concat_vecs[1], tf.cast(adj_list[4], dtype=tf.int32))
concate_vecs = tf.concat([ri, concat_vecs[0], ru, concat_vecs[3]], axis=1)
return concate_vecs
|
class GEMLayer(layers.Layer):
"\n This layer equals to the equation (8) in\n paper 'Heterogeneous Graph Neural Networks\n for Malicious Account Detection.'\n "
def __init__(self, nodes_num, input_dim, output_dim, device_num, **kwargs):
super(GEMLayer, self).__init__(**kwargs)
self.nodes_num = nodes_num
self.input_dim = input_dim
self.output_dim = output_dim
self.device_num = device_num
self.W = self.add_weight('weight', [input_dim, output_dim], dtype=tf.float32)
self.V = self.add_weight('V', [output_dim, output_dim], dtype=tf.float32)
self.alpha = self.add_weight('alpha', [self.device_num, 1], dtype=tf.float32)
def call(self, inputs):
'\n :@param inputs: include x, support, h\n x: the node feature\n support: a list of the sparse adjacency Tensor\n h: the hidden layer Tensor\n '
(x, support_, h) = inputs
h1 = dot(x, self.W, sparse=True)
h2 = []
for d in range(self.device_num):
ahv = dot(dot(support_[d], h, sparse=True), self.V, sparse=False)
h2.append(ahv)
h2 = tf.concat(h2, 0)
h2 = tf.reshape(h2, [self.device_num, (self.nodes_num * self.output_dim)])
h2 = tf.transpose(h2, [1, 0])
h2 = tf.reshape(tf.matmul(h2, tf.nn.softmax(self.alpha)), [self.nodes_num, self.output_dim])
return tf.nn.relu((h1 + h2))
|
def load_example_data(meta: bool=False, data: str='dblp') -> Tuple[(list, np.array, list, np.array)]:
"\n Loading the a small handcrafted data for testing\n\n :param meta: if True: it loads a HIN with two meta-graphs,\n if False: it loads a homogeneous graph\n :param data: the example data type, 'dblp' or 'yelp'\n "
features = np.array([[1, 1, 0, 0, 0, 0, 0], [0, 0, 1, 0, 1, 1, 0], [0, 1, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0, 1], [1, 0, 1, 1, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 1, 1]], dtype=np.float)
if meta:
rownetworks = [np.array([[1, 0, 0, 1, 0, 1, 1, 1], [1, 0, 0, 1, 1, 1, 0, 1], [1, 0, 0, 0, 0, 0, 0, 1], [0, 1, 0, 0, 1, 1, 1, 0], [0, 1, 1, 1, 0, 1, 0, 0], [1, 0, 0, 1, 1, 1, 0, 1], [1, 0, 0, 0, 0, 0, 0, 1], [0, 1, 0, 0, 1, 1, 1, 0]], dtype=np.float), np.array([[1, 0, 0, 0, 0, 1, 1, 1], [0, 1, 0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0, 1], [1, 1, 0, 1, 1, 0, 0, 0], [1, 0, 0, 1, 0, 1, 1, 1], [1, 0, 0, 1, 1, 1, 0, 1], [1, 0, 0, 0, 0, 0, 0, 1]], dtype=np.float)]
else:
rownetworks = [np.array([[1, 0, 0, 1, 0, 1, 1, 1], [1, 0, 0, 1, 1, 1, 0, 1], [1, 0, 0, 0, 0, 0, 0, 1], [0, 1, 0, 0, 1, 1, 1, 0], [0, 1, 1, 1, 0, 1, 0, 0], [1, 0, 0, 1, 1, 1, 0, 1], [1, 0, 0, 0, 0, 0, 0, 1], [0, 1, 0, 0, 1, 1, 1, 0]], dtype=np.float)]
if (data == 'dblp'):
y = np.array([[0, 1], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [0, 1]], dtype=np.float)
else:
y = np.array([0, 1, 1, 1, 1, 1, 1, 0], dtype=np.int)
index = range(len(y))
(X_train, X_test, y_train, y_test) = train_test_split(index, y, test_size=0.375, random_state=48, shuffle=True)
(X_train, X_val, y_train, y_val) = train_test_split(X_train, y_train, test_size=0.2, random_state=48, shuffle=True)
split_ids = [X_train, y_train, X_val, y_val, X_test, y_test]
return (rownetworks, features, split_ids, y)
|
def load_data_dblp(path: str='dataset/DBLP4057_GAT_with_idx_tra200_val_800.mat', train_size: int=0.8, meta: bool=True) -> Tuple[(list, np.array, list, np.array)]:
'\n The data loader to load the DBLP heterogeneous information network data\n source: https://github.com/Jhy1993/HAN\n\n :param path: the local path of the dataset file\n :param train_size: the percentage of training data\n :param meta: if True: it loads a HIN with three meta-graphs,\n if False: it loads a homogeneous APA meta-graph\n '
data = sio.loadmat(path)
(truelabels, features) = (data['label'], data['features'].astype(float))
N = features.shape[0]
if (not meta):
rownetworks = [(data['net_APA'] - np.eye(N))]
else:
rownetworks = [(data['net_APA'] - np.eye(N)), (data['net_APCPA'] - np.eye(N)), (data['net_APTPA'] - np.eye(N))]
y = truelabels
index = np.arange(len(y))
(X_train, X_test, y_train, y_test) = train_test_split(index, y, stratify=y, test_size=(1 - train_size), random_state=48, shuffle=True)
(X_train, X_val, y_train, y_val) = train_test_split(X_train, y_train, stratify=y_train, test_size=0.2, random_state=48, shuffle=True)
split_ids = [X_train, y_train, X_val, y_val, X_test, y_test]
return (rownetworks, features, split_ids, np.array(y))
|
def load_data_yelp(path: str='dataset/YelpChi.mat', train_size: int=0.8, meta: bool=True) -> Tuple[(list, np.array, list, np.array)]:
'\n The data loader to load the Yelp heterogeneous information network data\n source: http://odds.cs.stonybrook.edu/yelpchi-dataset\n\n :param path: the local path of the dataset file\n :param train_size: the percentage of training data\n :param meta: if True: it loads a HIN with three meta-graphs,\n if False: it loads a homogeneous rur meta-graph\n '
data = sio.loadmat(path)
(truelabels, features) = (data['label'], data['features'].astype(float))
truelabels = truelabels.tolist()[0]
if (not meta):
rownetworks = [data['net_rur']]
else:
rownetworks = [data['net_rur'], data['net_rsr'], data['net_rtr']]
y = truelabels
index = np.arange(len(y))
(X_train, X_test, y_train, y_test) = train_test_split(index, y, stratify=y, test_size=(1 - train_size), random_state=48, shuffle=True)
(X_train, X_val, y_train, y_val) = train_test_split(X_train, y_train, stratify=y_train, test_size=0.2, random_state=48, shuffle=True)
split_ids = [X_train, y_train, X_val, y_val, X_test, y_test]
return (rownetworks, features, split_ids, np.array(y))
|
def load_example_semi():
'\n The data loader to load the example data for SemiGNN\n '
features = np.array([[1, 1, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0, 1], [1, 0, 1, 1, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1, 1]])
rownetworks = [np.array([[1, 0, 0, 1, 0, 1, 1, 1], [1, 0, 0, 1, 1, 1, 0, 1], [1, 0, 0, 0, 0, 0, 0, 1], [0, 1, 0, 0, 1, 1, 1, 0], [0, 1, 1, 1, 0, 1, 0, 0], [1, 0, 0, 1, 1, 1, 0, 1], [1, 0, 0, 0, 0, 0, 0, 1], [0, 1, 0, 0, 1, 1, 1, 0]]), np.array([[1, 0, 0, 0, 0, 1, 1, 1], [0, 1, 0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0, 1], [1, 1, 0, 1, 1, 0, 0, 0], [1, 0, 0, 1, 0, 1, 1, 1], [1, 0, 0, 1, 1, 1, 0, 1], [1, 0, 0, 0, 0, 0, 0, 1]])]
y = np.array([[0, 1], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [0, 1]])
index = range(len(y))
(X_train, X_test, y_train, y_test) = train_test_split(index, y, stratify=y, test_size=0.2, random_state=48, shuffle=True)
split_ids = [X_train, X_test]
return (rownetworks, features, split_ids, y)
|
def load_data_gas():
'\n The data loader to load the example data for GAS\n\n '
user_review_adj = [[0, 1], [2], [3], [5], [4, 6]]
user_review_adj = pad_adjlist(user_review_adj)
user_item_adj = [[0, 1], [0], [0], [2], [1, 2]]
user_item_adj = pad_adjlist(user_item_adj)
item_review_adj = [[0, 2, 3], [1, 4], [5, 6]]
item_review_adj = pad_adjlist(item_review_adj)
item_user_adj = [[0, 1, 2], [0, 4], [3, 4]]
item_user_adj = pad_adjlist(item_user_adj)
review_item_adj = [0, 1, 0, 0, 1, 2, 2]
review_user_adj = [0, 0, 1, 2, 4, 3, 4]
review_vecs = np.array([[1, 0, 0, 1, 0], [1, 0, 0, 1, 1], [1, 0, 0, 0, 0], [0, 1, 0, 0, 1], [0, 1, 1, 1, 0], [0, 0, 1, 1, 1], [1, 1, 0, 1, 1]])
user_vecs = np.array([[1, 1, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0, 1]])
item_vecs = np.array([[1, 0, 1, 1, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1, 1]])
features = [review_vecs, user_vecs, item_vecs]
homo_adj = [[1, 0, 0, 0, 1, 1, 1], [1, 0, 0, 0, 1, 1, 0], [0, 0, 0, 1, 1, 1, 0], [1, 0, 1, 0, 0, 1, 0], [0, 1, 1, 1, 1, 0, 0], [0, 1, 1, 0, 1, 0, 0], [0, 1, 0, 0, 1, 0, 0]]
adjs = [user_review_adj, user_item_adj, item_review_adj, item_user_adj, review_user_adj, review_item_adj, homo_adj]
y = np.array([[0, 1], [1, 0], [1, 0], [0, 1], [1, 0], [1, 0], [0, 1]])
index = range(len(y))
(X_train, X_test, y_train, y_test) = train_test_split(index, y, stratify=y, test_size=0.4, random_state=48, shuffle=True)
split_ids = [X_train, X_test]
return (adjs, features, split_ids, y)
|
def masked_softmax_cross_entropy(preds: tf.Tensor, labels: tf.Tensor, mask: tf.Tensor) -> tf.Tensor:
'\n Softmax cross-entropy loss with masking.\n\n :param preds: the last layer logits of the input data\n :param labels: the labels of the input data\n :param mask: the mask for train/val/test data\n '
loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.maximum(tf.reduce_sum(mask), tf.constant([1.0]))
loss *= mask
return tf.reduce_mean(loss)
|
def masked_accuracy(preds: tf.Tensor, labels: tf.Tensor, mask: tf.Tensor) -> tf.Tensor:
'\n Accuracy with masking.\n\n :param preds: the class prediction probabilities of the input data\n :param labels: the labels of the input data\n :param mask: the mask for train/val/test data\n '
correct_prediction = tf.equal(tf.argmax(preds, 1), tf.argmax(labels, 1))
accuracy_all = tf.cast(correct_prediction, tf.float32)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
accuracy_all *= mask
return tf.reduce_mean(accuracy_all)
|
def accuracy(preds: tf.Tensor, labels: tf.Tensor) -> tf.Tensor:
'\n Accuracy.\n\n :param preds: the class prediction probabilities of the input data\n :param labels: the labels of the input data\n '
correct_prediction = tf.equal(tf.argmax(preds, 1), tf.argmax(labels, 1))
accuracy_all = tf.cast(correct_prediction, tf.float32)
return (tf.reduce_sum(accuracy_all) / preds.shape[0])
|
def eval_other_methods(x, y, names=None):
gmm = mixture.GaussianMixture(covariance_type='full', n_components=args.n_clusters, random_state=0)
gmm.fit(x)
y_pred_prob = gmm.predict_proba(x)
y_pred = y_pred_prob.argmax(1)
acc = np.round(cluster_acc(y, y_pred), 5)
nmi = np.round(metrics.normalized_mutual_info_score(y, y_pred), 5)
ari = np.round(metrics.adjusted_rand_score(y, y_pred), 5)
print((args.dataset + ' | GMM clustering on raw data'))
print(('=' * 80))
print(acc)
print(nmi)
print(ari)
print(('=' * 80))
y_pred = KMeans(n_clusters=args.n_clusters, random_state=0).fit_predict(x)
acc = np.round(cluster_acc(y, y_pred), 5)
nmi = np.round(metrics.normalized_mutual_info_score(y, y_pred), 5)
ari = np.round(metrics.adjusted_rand_score(y, y_pred), 5)
print((args.dataset + ' | K-Means clustering on raw data'))
print(('=' * 80))
print(acc)
print(nmi)
print(ari)
print(('=' * 80))
sc = SpectralClustering(n_clusters=args.n_clusters, random_state=0, affinity='nearest_neighbors')
y_pred = sc.fit_predict(x)
acc = np.round(cluster_acc(y, y_pred), 5)
nmi = np.round(metrics.normalized_mutual_info_score(y, y_pred), 5)
ari = np.round(metrics.adjusted_rand_score(y, y_pred), 5)
print((args.dataset + ' | Spectral Clustering on raw data'))
print(('=' * 80))
print(acc)
print(nmi)
print(ari)
print(('=' * 80))
if (args.manifold_learner == 'UMAP'):
md = float(args.umap_min_dist)
hle = umap.UMAP(random_state=0, metric=args.umap_metric, n_components=args.umap_dim, n_neighbors=args.umap_neighbors, min_dist=md).fit_transform(x)
elif (args.manifold_learner == 'LLE'):
from sklearn.manifold import LocallyLinearEmbedding
hle = LocallyLinearEmbedding(n_components=args.umap_dim, n_neighbors=args.umap_neighbors).fit_transform(x)
elif (args.manifold_learner == 'tSNE'):
method = 'exact'
hle = TSNE(n_components=args.umap_dim, n_jobs=16, random_state=0, verbose=0).fit_transform(x)
elif (args.manifold_learner == 'isomap'):
hle = Isomap(n_components=args.umap_dim, n_neighbors=5).fit_transform(x)
gmm = mixture.GaussianMixture(covariance_type='full', n_components=args.n_clusters, random_state=0)
gmm.fit(hle)
y_pred_prob = gmm.predict_proba(hle)
y_pred = y_pred_prob.argmax(1)
acc = np.round(cluster_acc(y, y_pred), 5)
nmi = np.round(metrics.normalized_mutual_info_score(y, y_pred), 5)
ari = np.round(metrics.adjusted_rand_score(y, y_pred), 5)
print((((args.dataset + ' | GMM clustering on ') + str(args.manifold_learner)) + ' embedding'))
print(('=' * 80))
print(acc)
print(nmi)
print(ari)
print(('=' * 80))
if args.visualize:
plot(hle, y, 'UMAP', names)
(y_pred_viz, _, _) = best_cluster_fit(y, y_pred)
plot(hle, y_pred_viz, 'UMAP-predicted', names)
return
y_pred = KMeans(n_clusters=args.n_clusters, random_state=0).fit_predict(hle)
acc = np.round(cluster_acc(y, y_pred), 5)
nmi = np.round(metrics.normalized_mutual_info_score(y, y_pred), 5)
ari = np.round(metrics.adjusted_rand_score(y, y_pred), 5)
print((((args.dataset + ' | K-Means ') + str(args.manifold_learner)) + ' embedding'))
print(('=' * 80))
print(acc)
print(nmi)
print(ari)
print(('=' * 80))
sc = SpectralClustering(n_clusters=args.n_clusters, random_state=0, affinity='nearest_neighbors')
y_pred = sc.fit_predict(hle)
acc = np.round(cluster_acc(y, y_pred), 5)
nmi = np.round(metrics.normalized_mutual_info_score(y, y_pred), 5)
ari = np.round(metrics.adjusted_rand_score(y, y_pred), 5)
print((((args.dataset + ' | Spectral Clustering on ') + str(args.manifold_learner)) + ' embedding'))
print(('=' * 80))
print(acc)
print(nmi)
print(ari)
print(('=' * 80))
|
def cluster_manifold_in_embedding(hl, y, label_names=None):
if (args.manifold_learner == 'UMAP'):
md = float(args.umap_min_dist)
hle = umap.UMAP(random_state=0, metric=args.umap_metric, n_components=args.umap_dim, n_neighbors=args.umap_neighbors, min_dist=md).fit_transform(hl)
elif (args.manifold_learner == 'LLE'):
hle = LocallyLinearEmbedding(n_components=args.umap_dim, n_neighbors=args.umap_neighbors).fit_transform(hl)
elif (args.manifold_learner == 'tSNE'):
hle = TSNE(n_components=args.umap_dim, n_jobs=16, random_state=0, verbose=0).fit_transform(hl)
elif (args.manifold_learner == 'isomap'):
hle = Isomap(n_components=args.umap_dim, n_neighbors=5).fit_transform(hl)
if (args.cluster == 'GMM'):
gmm = mixture.GaussianMixture(covariance_type='full', n_components=args.n_clusters, random_state=0)
gmm.fit(hle)
y_pred_prob = gmm.predict_proba(hle)
y_pred = y_pred_prob.argmax(1)
elif (args.cluster == 'KM'):
km = KMeans(init='k-means++', n_clusters=args.n_clusters, random_state=0, n_init=20)
y_pred = km.fit_predict(hle)
elif (args.cluster == 'SC'):
sc = SpectralClustering(n_clusters=args.n_clusters, random_state=0, affinity='nearest_neighbors')
y_pred = sc.fit_predict(hle)
y_pred = np.asarray(y_pred)
y = np.asarray(y)
acc = np.round(cluster_acc(y, y_pred), 5)
nmi = np.round(metrics.normalized_mutual_info_score(y, y_pred), 5)
ari = np.round(metrics.adjusted_rand_score(y, y_pred), 5)
print((((((args.dataset + ' | ') + args.manifold_learner) + ' on autoencoded embedding with ') + args.cluster) + ' - N2D'))
print(('=' * 80))
print(acc)
print(nmi)
print(ari)
print(('=' * 80))
if args.visualize:
plot(hle, y, 'n2d', label_names)
(y_pred_viz, _, _) = best_cluster_fit(y, y_pred)
plot(hle, y_pred_viz, 'n2d-predicted', label_names)
return (y_pred, acc, nmi, ari)
|
def best_cluster_fit(y_true, y_pred):
y_true = y_true.astype(np.int64)
D = (max(y_pred.max(), y_true.max()) + 1)
w = np.zeros((D, D), dtype=np.int64)
for i in range(y_pred.size):
w[(y_pred[i], y_true[i])] += 1
ind = linear_assignment((w.max() - w))
best_fit = []
for i in range(y_pred.size):
for j in range(len(ind)):
if (ind[j][0] == y_pred[i]):
best_fit.append(ind[j][1])
return (best_fit, ind, w)
|
def cluster_acc(y_true, y_pred):
(_, ind, w) = best_cluster_fit(y_true, y_pred)
return ((sum([w[(i, j)] for (i, j) in ind]) * 1.0) / y_pred.size)
|
def plot(x, y, plot_id, names=None):
viz_df = pd.DataFrame(data=x[:5000])
viz_df['Label'] = y[:5000]
if (names is not None):
viz_df['Label'] = viz_df['Label'].map(names)
viz_df.to_csv((((args.save_dir + '/') + args.dataset) + '.csv'))
plt.subplots(figsize=(8, 5))
sns.scatterplot(x=0, y=1, hue='Label', legend='full', hue_order=sorted(viz_df['Label'].unique()), palette=sns.color_palette('hls', n_colors=args.n_clusters), alpha=0.5, data=viz_df)
l = plt.legend(bbox_to_anchor=((- 0.1), 1.0, 1.1, 0.5), loc='lower left', markerfirst=True, mode='expand', borderaxespad=0, ncol=(args.n_clusters + 1), handletextpad=0.01)
l.texts[0].set_text('')
plt.ylabel('')
plt.xlabel('')
plt.tight_layout()
plt.savefig((((((args.save_dir + '/') + args.dataset) + '-') + plot_id) + '.png'), dpi=300)
plt.clf()
|
def autoencoder(dims, act='relu'):
n_stacks = (len(dims) - 1)
x = Input(shape=(dims[0],), name='input')
h = x
for i in range((n_stacks - 1)):
h = Dense(dims[(i + 1)], activation=act, name=('encoder_%d' % i))(h)
h = Dense(dims[(- 1)], name=('encoder_%d' % (n_stacks - 1)))(h)
for i in range((n_stacks - 1), 0, (- 1)):
h = Dense(dims[i], activation=act, name=('decoder_%d' % i))(h)
h = Dense(dims[0], name='decoder_0')(h)
return Model(inputs=x, outputs=h)
|
def visualize_graph(graph, path, og_set=None):
g = ig.Graph(len(graph), list(zip(*list(zip(*nx.to_edgelist(graph)))[:2])), directed=True)
layout = g.layout('kk')
visual_style = {'vertex_size': 10, 'vertex_color': '#AAAAFF', 'edge_width': 1, 'arrow_size': 0.01, 'vertex_label': range(g.vcount()), 'layout': layout}
if (og_set is not None):
red_edges = g.es.select(_source_in=og_set, _target_in=og_set)
red_edges['color'] = 'red'
ig.plot(g, path, **visual_style, bbox=(1000, 1000), margin=120, hovermode='closest')
|
def remap_graph(graph):
node_index = 0
node_mapping = {}
remapped_graph = nx.DiGraph()
for node in graph.nodes():
node_mapping[node] = node_index
node_index += 1
for (n1, n2) in graph.edges():
n1 = node_mapping[n1]
n2 = node_mapping[n2]
remapped_graph.add_edge(n1, n2)
return remapped_graph
|
def create_fcg_graph(dex_path):
save_path = dex_path.replace('apk_files', 'graph_files').replace('.apk', '.edgelist')
if (not os.path.exists(save_path)):
os.makedirs(os.path.dirname(save_path), exist_ok=True)
try:
(a, d, dx) = AnalyzeAPK(dex_path)
cg = dx.get_call_graph()
cg_stripped = remap_graph(cg)
visualize_graph(cg_stripped, path=save_path.replace('.edgelist', '.png'))
if (len(cg_stripped) > 0):
os.makedirs(os.path.dirname(save_path), exist_ok=True)
with open(save_path, 'w') as file:
file.write('# Directed graph (each unordered pair of nodes is saved once)\n')
file.write('# Function call graph of malicious Android APK\n')
file.write('# SHA-256: {}\n'.format(save_path.rsplit('/', 1)[1].replace('.edgelist', '')))
file.write('# Nodes: {}, Edges: {}\n'.format(len(cg_stripped), len(list(cg_stripped.edges))))
file.write('# FromNodeId\tToNodeId\n')
for edge in cg_stripped.edges():
file.write('{}\t{}\n'.format(edge[0], edge[1]))
except Exception as e:
print('Error extracting FCG', e, dex_path)
|
def main():
print('Constructing FCG Dataset')
apk_files = glob(os.path.join(os.getcwd(), 'apk_files/*.apk'))
Parallel(n_jobs=1)((delayed(create_fcg_graph)(apk_path) for apk_path in tqdm(apk_files)))
|
def run_method(idx, args, file, method):
if (method == 'sf'):
graph = process_file_karate(file)
result = sf(graph, args['n_eigen'])
elif (method == 'ldp'):
subgraphs = process_file_karate(file)
result = ldp(subgraphs)
elif (method == 'fgsd'):
graph = process_file_karate(file)
result = fgsd(graph)
elif (method == 'feather'):
graph = process_file_karate(file)
result = feather(graph, order=args['order'])
elif (method == 'geo_scattering'):
graph = process_file_karate(file)
result = geo_scattering(graph, order=args['order'])
elif (method == 'g2v'):
graph = process_file_karate(file)
result = g2v_document(idx, graph)
elif (method == 'lsd'):
graph = process_file_slaq(file)
result = netlsd_naive(graph)
elif (method == 'lsd_slaq'):
graph = process_file_slaq(file)
result = netlsd(graph, lanczos_steps=args['n_steps'], nvectors=args['n_vectors'])
elif (method == 'vnge_slaq'):
graph = process_file_slaq(file)
result = vnge(graph, lanczos_steps=args['n_steps'], nvectors=args['n_vectors'])
elif (method == 'vnge'):
graph = process_file_slaq(file)
result = vnge_naive(graph)
elif (method == 'nog'):
graph = process_file_nog(file)
result = np.array([graph.number_of_nodes(), graph.number_of_edges()], dtype=np.int64)
else:
print('Method {} not implemented'.format(method))
exit(1)
return result
|
def get_kernel_embedding(args, train_files, val_files, test_files):
print('\n******Running WL Kernel on train set******')
gk = GraphKernel(kernel=[{'name': 'weisfeiler_lehman', 'n_iter': args['n_iter']}, 'subtree_wl'], normalize=True, n_jobs=args['n_cores'])
graphs = Parallel(n_jobs=args['n_cores'])((delayed(process_file_grakel)(file) for file in tqdm(train_files)))
x_train = gk.fit_transform(graphs)
print('\n******Running WL Kernel on val set******')
x_val = kernel_transform(args, val_files, gk)
print('\n******Running WL Kernel on test set******')
x_test = kernel_transform(args, test_files, gk)
return (x_train, x_val, x_test)
|
def get_embedding(args, files, run_type):
print('\n******Running {} on {} set******'.format(args['method'], run_type))
embedding = Parallel(n_jobs=args['n_cores'])((delayed(run_method)(idx, args, file, args['method']) for (idx, file) in enumerate(tqdm(files))))
embedding = np.asarray(embedding)
if (len(embedding.shape) == 1):
embedding = embedding.reshape((- 1), 1)
return embedding
|
def warn(*args, **kwargs):
pass
|
def run_experiment(args_og, files_train, files_val, files_test, y_train, y_val, y_test, train_ratios, wl_train_ratios):
args = copy.deepcopy(args_og)
result = []
if (args['method'] != 'wl'):
start = time.time()
(x_train, x_val, x_test) = (get_embedding(args, files_train, run_type='train'), get_embedding(args, files_val, run_type='val'), get_embedding(args, files_test, run_type='test'))
end = time.time()
for ratio in train_ratios:
args['train_ratio'] = ratio
split_point = int((ratio * x_train.shape[0]))
(val_score, test_score) = classify(args, x_train[:split_point], x_val, x_test, y_train[:split_point], y_val, y_test, run_time=(end - start))
result.append((args, val_score, test_score))
else:
for ratio in wl_train_ratios:
args['train_ratio'] = ratio
split_point = int((args['train_ratio'] * len(files_train)))
start = time.time()
(x_train, x_val, x_test) = get_kernel_embedding(args, files_train[:split_point], files_val, files_test)
end = time.time()
(val_score, test_score) = classify(args, x_train, x_val, x_test, y_train[:split_point], y_val, y_test, run_time=(end - start))
result.append((args, val_score, test_score))
return result
|
def run_param_search():
from config import args as args
args.update({'metric': 'macro-F1', 'train_ratio': 1.0, 'val_ratio': 0.1, 'test_ratio': 0.2, 'malnet_tiny': False})
groups = ['type', 'family']
results = []
for group in groups:
args['group'] = group
(files_train, files_val, files_test, y_train, y_val, y_test, label_dict) = get_split_info(args)
args['class_labels'] = list(label_dict.keys())
args['class_indexes'] = list(label_dict.values())
for method in ['vnge_slaq', 'lsd_slaq', 'nog', 'feather', 'ldp']:
args['method'] = method
if (method == 'wl'):
for n_iter in [2, 5, 10]:
(args['n_iter'], args['order'], args['n_eigen'], args['n_vectors'], args['n_steps']) = (n_iter, 0, 0, 0, 0)
result = run_experiment(args, files_train, files_val, files_test, y_train, y_val, y_test, train_ratios=[1.0], wl_train_ratios=[0.1])
results.extend(result)
elif ((method == 'feather') or (method == 'geo_scattering')):
for order in [4, 5, 6]:
(args['order'], args['n_iter'], args['n_eigen'], args['n_vectors'], args['n_steps']) = (order, 0, 0, 0, 0)
result = run_experiment(args, files_train, files_val, files_test, y_train, y_val, y_test, train_ratios=[1.0], wl_train_ratios=[1.0])
results.extend(result)
elif (method == 'sf'):
for n_eigen in [100, 200, 300]:
(args['n_eigen'], args['order'], args['n_iter'], args['n_vectors'], args['n_steps']) = (n_eigen, 0, 0, 0, 0)
result = run_experiment(args, files_train, files_val, files_test, y_train, y_val, y_test, train_ratios=[1.0], wl_train_ratios=[1.0])
results.extend(result)
elif ((method == 'vnge_slaq') or (method == 'lsd_slaq')):
for (n_vectors, n_steps) in [(10, 10), (15, 15), (20, 20)]:
(args['n_vectors'], args['n_steps'], args['order'], args['n_iter'], args['n_eigen']) = (n_vectors, n_steps, 0, 0, 0)
result = run_experiment(args, files_train, files_val, files_test, y_train, y_val, y_test, train_ratios=[1.0], wl_train_ratios=[1.0])
results.extend(result)
else:
(args['n_eigen'], args['order'], args['n_iter'], args['n_vectors'], args['n_steps']) = (0, 0, 0, 0, 0)
result = run_experiment(args, files_train, files_val, files_test, y_train, y_val, y_test, train_ratios=[1.0], wl_train_ratios=[1.0])
results.extend(result)
for (args_r, val_score, test_score) in results:
print('Method={}, malnet_tiny={}, group={}, train_ratio={}, n_iter={}, order={}, n_eigen={}, n_vectors={}, n_steps={}, val_{}={}, test_{}={}'.format(args_r['method'], args_r['malnet_tiny'], args_r['group'], args_r['train_ratio'], args_r['n_iter'], args['order'], args_r['n_eigen'], args_r['n_vectors'], args_r['n_steps'], args_r['metric'], val_score, args_r['metric'], test_score))
|
def run_best_params():
from config import args as args
args.update({'metric': 'macro-F1', 'group': 'type', 'train_ratio': 1.0, 'val_ratio': 0.1, 'test_ratio': 0.2, 'malnet_tiny': False})
(files_train, files_val, files_test, y_train, y_val, y_test, label_dict) = get_split_info(args)
args['class_labels'] = list(label_dict.keys())
args['class_indexes'] = list(label_dict.values())
results = []
for method in ['vnge_slaq', 'lsd_slaq', 'geo_scattering', 'sf', 'lsd', 'wl', 'nog', 'feather', 'ldp']:
args['method'] = method
if (method == 'wl'):
for n_iter in [2]:
(args['n_iter'], args['order'], args['n_eigen'], args['n_vectors'], args['n_steps']) = (n_iter, 0, 0, 0, 0)
result = run_experiment(args, files_train, files_val, files_test, y_train, y_val, y_test, train_ratios=[1.0], wl_train_ratios=[1.0])
results.extend(result)
elif ((method == 'feather') or (method == 'geo_scattering')):
for order in [4]:
(args['order'], args['n_iter'], args['n_eigen'], args['n_vectors'], args['n_steps']) = (order, 0, 0, 0, 0)
result = run_experiment(args, files_train, files_val, files_test, y_train, y_val, y_test, train_ratios=[1.0], wl_train_ratios=[1.0])
results.extend(result)
elif (method == 'sf'):
for n_eigen in [100]:
(args['n_eigen'], args['order'], args['n_iter'], args['n_vectors'], args['n_steps']) = (n_eigen, 0, 0, 0, 0)
result = run_experiment(args, files_train, files_val, files_test, y_train, y_val, y_test, train_ratios=[1.0], wl_train_ratios=[1.0])
results.extend(result)
elif ((method == 'vnge_slaq') or (method == 'lsd_slaq')):
for (n_vectors, n_steps) in [(10, 10)]:
(args['n_vectors'], args['n_steps'], args['order'], args['n_iter'], args['n_eigen']) = (n_vectors, n_steps, 0, 0, 0)
result = run_experiment(args, files_train, files_val, files_test, y_train, y_val, y_test, train_ratios=[1.0], wl_train_ratios=[1.0])
results.extend(result)
else:
(args['n_eigen'], args['order'], args['n_iter'], args['n_vectors'], args['n_steps']) = (0, 0, 0, 0, 0)
result = run_experiment(args, files_train, files_val, files_test, y_train, y_val, y_test, train_ratios=[1.0], wl_train_ratios=[1.0])
results.extend(result)
for (args_r, val_score, test_score) in results:
print('Method={}, malnet_tiny={}, group={}, train_ratio={}, n_iter={}, order={}, n_eigen={}, n_vectors={}, n_steps={}, val_{}={}, test_{}={}'.format(args_r['method'], args_r['malnet_tiny'], args_r['group'], args_r['train_ratio'], args_r['n_iter'], args['order'], args_r['n_eigen'], args_r['n_vectors'], args_r['n_steps'], args_r['metric'], val_score, args_r['metric'], test_score))
|
def ldp(graph):
model = LDP()
model._check_graphs([graph])
embedding = model._calculate_ldp(graph)
return embedding
|
def feather(graph, order=5):
model = FeatherGraph(order=order)
model._set_seed()
model._check_graphs([graph])
embedding = model._calculate_feather(graph)
return embedding
|
def ige(graph, max_deg):
model = IGE()
model._set_seed()
model._check_graphs([graph])
model.max_deg = max_deg
embedding = model._calculate_invariant_embedding(graph)
return embedding
|
def fgsd(graph):
model = FGSD()
model._set_seed()
model._check_graphs([graph])
embedding = model._calculate_fgsd(graph)
return embedding
|
def lsd(graph):
model = NetLSD()
model._set_seed()
model._check_graphs([graph])
embedding = model._calculate_netlsd(graph)
return embedding
|
def sf(graph, n_eigenvalues=128):
model = SF(dimensions=n_eigenvalues)
model._set_seed()
model._check_graphs([graph])
embedding = model._calculate_sf(graph)
return embedding
|
def geo_scattering(graph, order=4):
model = GeoScattering(order=order)
model._set_seed()
model._check_graphs([graph])
embedding = model._calculate_geoscattering(graph)
return embedding
|
def g2v_document(idx, graph):
model = Graph2Vec()
model._set_seed()
model._check_graphs([graph])
document = WeisfeilerLehmanHashing(graph, model.wl_iterations, model.attributed, model.erase_base_features)
document = TaggedDocument(words=document.get_graph_features(), tags=str(idx))
return document
|
def g2v(documents):
from tqdm import tqdm
model = Doc2Vec(documents)
embedding = [model.docvecs[str(i)] for (i, _) in enumerate(tqdm(documents))]
return np.array(embedding)
|
def warn(*args, **kwargs):
pass
|
def process_file_karate(file):
g = nx.read_edgelist(file)
gcc = sorted(nx.connected_components(g), key=len, reverse=True)
g = g.subgraph(gcc[0])
g = nx.convert_node_labels_to_integers(g)
return g
|
def process_file_nog(file):
return nx.read_edgelist(file)
|
def process_file_slaq(file):
g = nx.read_edgelist(file)
g = nx.convert_node_labels_to_integers(g)
adj = nx.to_scipy_sparse_matrix(g, dtype=np.float32, format='csr')
adj.data = np.ones(adj.data.shape, dtype=np.float32)
return adj
|
def process_file_grakel(file):
g = nx.read_edgelist(file)
gcc = sorted(nx.connected_components(g), key=len, reverse=True)
g = g.subgraph(gcc[0])
g = nx.convert_node_labels_to_integers(g)
nx.set_node_attributes(g, 'a', 'label')
return list(graph_from_networkx([g], node_labels_tag='label', as_Graph=True))[0]
|
def chunker(seq, size):
return (seq[pos:(pos + size)] for pos in range(0, len(seq), size))
|
def kernel_transform(args, files, gk):
chunk_size = 1000
pbar = tqdm(total=len(files))
for (idx, files) in enumerate(chunker(files, chunk_size)):
graphs = Parallel(n_jobs=args['n_cores'])((delayed(process_file_grakel)(file) for file in files))
data = gk.transform(graphs)
if (idx == 0):
embedding = data
else:
embedding = np.concatenate((embedding, data), axis=0)
pbar.update(chunk_size)
pbar.close()
return embedding
|
class MalnetDataset(Dataset):
def __init__(self, args, root, files, labels, transform=None, pre_transform=None):
self.args = args
self.files = files
self.labels = labels
self.num_classes = len(np.unique(labels))
super(MalnetDataset, self).__init__(root, transform, pre_transform)
@property
def raw_file_names(self):
return self.files
@property
def processed_file_names(self):
return glob((self.processed_dir.replace('/processed', '') + '/*.pt'))
def download(self):
pass
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
x = torch.load((self.processed_dir.replace('/processed', '') + '/data_{}.pt'.format(idx)))
x.y = self.labels[idx]
return x
|
def model_search(gpu, malnet_tiny, group, metric, epochs, model, K, num_layers, hidden_dim, lr, dropout, train_ratio):
from config import args
args.update({'gpu': gpu, 'batch_size': 64, 'node_feature': 'ldp', 'directed_graph': True, 'remove_isolates': False, 'lcc_only': False, 'add_self_loops': True, 'model': model, 'K': K, 'hidden_dim': hidden_dim, 'num_layers': num_layers, 'metric': metric, 'lr': lr, 'dropout': dropout, 'epochs': epochs, 'group': group, 'train_ratio': train_ratio, 'malnet_tiny': malnet_tiny})
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = str(args['gpu'])
from gnn import run_experiment
(val_score, test_score, param_count, run_time) = run_experiment(args)
return (args, val_score, test_score, param_count, run_time)
|
def preprocess_search(gpu, epochs, node_feature, directed_graph, remove_isolates, lcc_only, add_self_loops, model='gcn', K=0, hidden_dim=32, num_layers=3, lr=0.0001, dropout=0):
from config import args
args.update({'gpu': gpu, 'batch_size': 128, 'node_feature': node_feature, 'directed_graph': directed_graph, 'remove_isolates': remove_isolates, 'lcc_only': lcc_only, 'add_self_loops': add_self_loops, 'model': model, 'K': K, 'hidden_dim': hidden_dim, 'num_layers': num_layers, 'lr': lr, 'dropout': dropout, 'epochs': epochs, 'group': 'type', 'train_ratio': 1.0, 'malnet_tiny': True})
from gnn import run_experiment
(val_score, test_score, param_count, run_time) = run_experiment(args, args['group'], gpu)
return (args, val_score, test_score, param_count, run_time)
|
def search_all_preprocess():
epochs = 1000
gpus = [0, 1, 2, 3, 4, 5, 6, 7]
Parallel(n_jobs=len(gpus))((delayed(preprocess_search)(gpus[idx], epochs, node_feature=feature, directed_graph=True, remove_isolates=True, lcc_only=False, add_self_loops=False) for (idx, feature) in enumerate(tqdm(['ldp', 'constant', 'degree']))))
Parallel(n_jobs=len(gpus))((delayed(preprocess_search)(gpus[idx], epochs, node_feature='constant', directed_graph=directed, remove_isolates=True, lcc_only=False, add_self_loops=False) for (idx, directed) in enumerate(tqdm([True, False]))))
Parallel(n_jobs=len(gpus))((delayed(preprocess_search)(gpus[idx], epochs, node_feature='constant', directed_graph=True, remove_isolates=isolates, lcc_only=False, add_self_loops=False) for (idx, isolates) in enumerate(tqdm([True, False]))))
Parallel(n_jobs=len(gpus))((delayed(preprocess_search)(gpus[idx], epochs, node_feature='constant', directed_graph=False, remove_isolates=True, lcc_only=lcc, add_self_loops=False) for (idx, lcc) in enumerate(tqdm([True, False]))))
Parallel(n_jobs=len(gpus))((delayed(preprocess_search)(gpus[idx], epochs, node_feature='constant', directed_graph=True, remove_isolates=True, lcc_only=False, add_self_loops=self_loops) for (idx, self_loops) in enumerate(tqdm([True, False]))))
|
def search_all_models():
gpus = [2]
models = ['gin']
layers = [5]
hidden_dims = [64]
learning_rates = [0.0001]
dropouts = [0]
epochs = 500
metric = 'macro-F1'
groups = ['family']
malnet_tiny = False
train_ratios = [1.0]
combinations = list(itertools.product(*[groups, models, layers, hidden_dims, learning_rates, dropouts, train_ratios]))
results = Parallel(n_jobs=len(combinations))((delayed(model_search)(gpus[(idx % len(gpus))], malnet_tiny, group, metric, epochs, model=model, K=0, num_layers=num_layers, hidden_dim=hidden_dim, lr=lr, dropout=dropout, train_ratio=train_ratio) for (idx, (group, model, num_layers, hidden_dim, lr, dropout, train_ratio)) in enumerate(tqdm(combinations))))
for (args, val_score, test_score, param_count, run_time) in results:
print('Tiny={}, group={}, train_ratio={}, model={}, epochs={}, run time={} seconds, # parameters={}, layers={}, hidden_dims={}, learning_rate={}, dropout={}, val_score={}, test_score={}'.format(args['malnet_tiny'], args['group'], args['train_ratio'], args['model'], args['epochs'], run_time, param_count, args['num_layers'], args['hidden_dim'], args['lr'], args['dropout'], val_score, test_score))
|
def run_best_models():
epochs = 500
gpus = [2, 3, 4, 5]
metric = 'macro-F1'
group = 'family'
malnet_tiny = True
combinations = [['gin', 0, 3, 64, 0.001, 0.5]]
results = Parallel(n_jobs=len(combinations))((delayed(model_search)(gpus[(idx % len(gpus))], malnet_tiny, group, metric, epochs, model=model, K=K, num_layers=num_layers, hidden_dim=hidden_dim, lr=lr, dropout=dropout) for (idx, (model, K, num_layers, hidden_dim, lr, dropout)) in enumerate(tqdm(combinations))))
for (args, val_score, test_score, param_count, run_time) in results:
print('Tiny={}, group={}, train_ratio={}, model={}, epochs={}, run time={} seconds, # parameters={}, layers={}, hidden_dims={}, learning_rate={}, dropout={}, val_score={}, test_score={}'.format(args['malnet_tiny'], args['group'], args['train_ratio'], args['model'], args['epochs'], run_time, param_count, args['num_layers'], args['hidden_dim'], args['lr'], args['dropout'], val_score, test_score))
|
class GIN(torch.nn.Module):
def __init__(self, args):
super(GIN, self).__init__()
self.args = args
self.layers = torch.nn.ModuleList([])
for i in range((args['num_layers'] + 1)):
dim_input = (args['num_features'] if (i == 0) else args['hidden_dim'])
nn = Sequential(Linear(dim_input, args['hidden_dim']), ReLU(), Linear(args['hidden_dim'], args['hidden_dim']))
conv = GINConv(nn)
self.layers.append(conv)
self.fc1 = Linear(args['hidden_dim'], args['hidden_dim'])
self.fc2 = Linear(args['hidden_dim'], args['num_classes'])
def forward(self, x, edge_index, batch):
for (i, _) in enumerate(self.layers):
x = F.relu(self.layers[i](x, edge_index))
x = global_add_pool(x, batch)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=self.args['dropout'], training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=(- 1))
|
class MLP(torch.nn.Module):
def __init__(self, args):
super(MLP, self).__init__()
self.args = args
self.layers = torch.nn.ModuleList([])
for i in range((args['num_layers'] + 1)):
dim_input = (args['num_features'] if (i == 0) else args['hidden_dim'])
dim_output = (args['num_classes'] if (i == args['num_layers']) else args['hidden_dim'])
linear = Linear(dim_input, dim_output)
self.layers.append(linear)
def forward(self, x, edge_index, batch):
x = global_add_pool(x, batch)
for (i, layer) in enumerate(self.layers):
if (i < (len(self.layers) - 1)):
x = F.relu(layer(x))
else:
x = F.dropout(x, p=self.args['dropout'], training=self.training)
x = layer(x)
return F.log_softmax(x, dim=(- 1))
|
class GraphSAGE(torch.nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
self.layers = torch.nn.ModuleList([])
for i in range((args['num_layers'] + 1)):
dim_input = (args['num_features'] if (i == 0) else args['hidden_dim'])
conv = SAGEConv(dim_input, args['hidden_dim'])
self.layers.append(conv)
self.fc1 = torch.nn.Linear(((args['num_layers'] + 1) * args['hidden_dim']), args['hidden_dim'])
self.fc2 = torch.nn.Linear(args['hidden_dim'], args['num_classes'])
def forward(self, x, edge_index, batch):
x_all = []
for (i, layer) in enumerate(self.layers):
x = layer(x, edge_index)
x_all.append(x)
x = torch.cat(x_all, dim=1)
x = global_add_pool(x, batch)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=self.args['dropout'], training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=(- 1))
|
class GCN(torch.nn.Module):
def __init__(self, args):
super(GCN, self).__init__()
self.args = args
self.layers = torch.nn.ModuleList([])
for i in range((args['num_layers'] + 1)):
dim_input = (args['num_features'] if (i == 0) else args['hidden_dim'])
conv = GCNConv(dim_input, args['hidden_dim'])
self.layers.append(conv)
self.fc1 = Linear(args['hidden_dim'], args['hidden_dim'])
self.fc2 = Linear(args['hidden_dim'], args['num_classes'])
def forward(self, x, edge_index, batch):
for (i, layer) in enumerate(self.layers):
x = F.relu(layer(x, edge_index))
x = global_add_pool(x, batch)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=self.args['dropout'], training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=(- 1))
|
class SGC(torch.nn.Module):
def __init__(self, args):
super(SGC, self).__init__()
self.args = args
self.layers = torch.nn.ModuleList([])
for i in range((args['num_layers'] + 1)):
dim_input = (args['num_features'] if (i == 0) else args['hidden_dim'])
conv = SGConv(dim_input, args['hidden_dim'], K=args['K'], add_self_loops=False, cached=False)
self.layers.append(conv)
self.fc1 = Linear(args['hidden_dim'], args['hidden_dim'])
self.fc2 = Linear(args['hidden_dim'], args['num_classes'])
def forward(self, x, edge_index, batch):
for (i, layer) in enumerate(self.layers):
x = F.relu(layer(x, edge_index))
x = global_add_pool(x, batch)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=self.args['dropout'], training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
|
def process_file(args, idx, file, processed_dir, pre_transform):
if args['directed_graph']:
graph = nx.read_edgelist(file, create_using=nx.DiGraph)
else:
graph = nx.read_edgelist(file)
if args['lcc_only']:
graph = graph.subgraph(sorted(nx.connected_components(graph), key=len, reverse=True)[0])
data = from_networkx(graph)
if args['remove_isolates']:
data = T.RemoveIsolatedNodes()(data)
if args['add_self_loops']:
data = T.AddSelfLoops()(data)
if (pre_transform is not None):
data = pre_transform(data)
torch.save(data, (processed_dir + 'data_{}.pt'.format(idx)))
|
def convert_files_pytorch(args, files, processed_dir, pre_transform):
if (len(glob((processed_dir + '*.pt'))) != len(files)):
os.makedirs(processed_dir, exist_ok=True)
Parallel(n_jobs=args['n_cores'])((delayed(process_file)(args, idx, file, processed_dir, pre_transform) for (idx, file) in enumerate(tqdm(files))))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.