code stringlengths 17 6.64M |
|---|
def train(args, adj_list, features, train_data, train_label, test_data, test_label, paras):
with tf.Session() as sess:
adj_data = adj_list
net = GAS(session=sess, nodes=paras[0], class_size=paras[4], embedding_r=paras[1], embedding_u=paras[2], embedding_i=paras[3], h_u_size=paras[6], h_i_size=paras[7], encoding1=args.encoding1, encoding2=args.encoding2, encoding3=args.encoding3, encoding4=args.encoding4, gcn_dim=args.gcn_dim)
sess.run(tf.global_variables_initializer())
t_start = time.clock()
for epoch in range(args.epoch_num):
train_loss = 0
train_acc = 0
count = 0
for index in range(0, paras[3], args.batch_size):
(batch_data, batch_label) = get_data(index, args.batch_size, paras[3])
(loss, acc, pred, prob) = net.train(features, adj_data, batch_label, batch_data, args.learning_rate, args.momentum)
print('batch loss: {:.4f}, batch acc: {:.4f}'.format(loss, acc))
train_loss += loss
train_acc += acc
count += 1
train_loss = (train_loss / count)
train_acc = (train_acc / count)
print('epoch{:d} : train_loss: {:.4f}, train_acc: {:.4f}'.format(epoch, train_loss, train_acc))
t_end = time.clock()
print('train time=', '{:.5f}'.format((t_end - t_start)))
print('Train end!')
(test_acc, test_pred, test_probabilities, test_tags) = net.test(features, adj_data, test_label, test_data)
print('test acc:', test_acc)
|
class GEM(Algorithm):
def __init__(self, session, nodes, class_size, meta, embedding, encoding, hop):
self.nodes = nodes
self.meta = meta
self.class_size = class_size
self.embedding = embedding
self.encoding = encoding
self.hop = hop
self.placeholders = {'a': tf.placeholder(tf.float32, [self.meta, self.nodes, self.nodes], 'adj'), 'x': tf.placeholder(tf.float32, [self.nodes, self.embedding], 'nxf'), 'batch_index': tf.placeholder(tf.int32, [None], 'index'), 't': tf.placeholder(tf.float32, [None, self.class_size], 'labels'), 'lr': tf.placeholder(tf.float32, [], 'learning_rate'), 'mom': tf.placeholder(tf.float32, [], 'momentum'), 'num_features_nonzero': tf.placeholder(tf.int32)}
(loss, probabilities) = self.forward_propagation()
(self.loss, self.probabilities) = (loss, probabilities)
self.l2 = tf.contrib.layers.apply_regularization(tf.contrib.layers.l2_regularizer(0.01), tf.trainable_variables())
x = tf.ones_like(self.probabilities)
y = tf.zeros_like(self.probabilities)
self.pred = tf.where((self.probabilities > 0.5), x=x, y=y)
print(self.pred.shape)
self.correct_prediction = tf.equal(self.pred, self.placeholders['t'])
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, 'float'))
print('Forward propagation finished.')
self.sess = session
self.optimizer = tf.train.AdamOptimizer(self.placeholders['lr'])
gradients = self.optimizer.compute_gradients((self.loss + self.l2))
capped_gradients = [(tf.clip_by_value(grad, (- 5.0), 5.0), var) for (grad, var) in gradients if (grad is not None)]
self.train_op = self.optimizer.apply_gradients(capped_gradients)
self.init = tf.global_variables_initializer()
print('Backward propagation finished.')
def forward_propagation(self):
with tf.variable_scope('gem_embedding'):
h = tf.get_variable(name='init_embedding', shape=[self.nodes, self.encoding], initializer=tf.contrib.layers.xavier_initializer())
for i in range(0, self.hop):
f = GEMLayer(self.placeholders, self.nodes, self.meta, self.embedding, self.encoding)
gem_out = f(inputs=h)
h = tf.reshape(gem_out, [self.nodes, self.encoding])
print('GEM embedding over!')
with tf.variable_scope('classification'):
batch_data = tf.matmul(tf.one_hot(self.placeholders['batch_index'], self.nodes), h)
W = tf.get_variable(name='weights', shape=[self.encoding, self.class_size], initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable(name='bias', shape=[1, self.class_size], initializer=tf.zeros_initializer())
tf.transpose(batch_data, perm=[0, 1])
logits = (tf.matmul(batch_data, W) + b)
u = tf.get_variable(name='u', shape=[1, self.encoding], initializer=tf.contrib.layers.xavier_initializer())
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=self.placeholders['t'], logits=logits)
return (loss, tf.nn.sigmoid(logits))
def train(self, x, a, t, b, learning_rate=0.01, momentum=0.9):
feed_dict = utils.construct_feed_dict(x, a, t, b, learning_rate, momentum, self.placeholders)
outs = self.sess.run([self.train_op, self.loss, self.accuracy, self.pred, self.probabilities], feed_dict=feed_dict)
loss = outs[1]
acc = outs[2]
pred = outs[3]
prob = outs[4]
return (loss, acc, pred, prob)
def test(self, x, a, t, b, learning_rate=0.01, momentum=0.9):
feed_dict = utils.construct_feed_dict(x, a, t, b, learning_rate, momentum, self.placeholders)
(acc, pred, probabilities, tags) = self.sess.run([self.accuracy, self.pred, self.probabilities, self.correct_prediction], feed_dict=feed_dict)
return (acc, pred, probabilities, tags)
|
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=123, help='Random seed.')
parser.add_argument('--dataset_str', type=str, default='dblp', help="['dblp','example']")
parser.add_argument('--epoch_num', type=int, default=30, help='Number of epochs to train.')
parser.add_argument('--batch_size', type=int, default=1000)
parser.add_argument('--momentum', type=int, default=0.9)
parser.add_argument('--learning_rate', default=0.001, help='the ratio of training set in whole dataset.')
parser.add_argument('--hop', default=1, help='hop number')
parser.add_argument('--k', default=16, help='gem layer unit')
args = parser.parse_args()
return args
|
def set_env(args):
tf.reset_default_graph()
np.random.seed(args.seed)
tf.set_random_seed(args.seed)
|
def get_data(ix, int_batch, train_size):
if ((ix + int_batch) >= train_size):
ix = (train_size - int_batch)
end = train_size
else:
end = (ix + int_batch)
return (train_data[ix:end], train_label[ix:end])
|
def load_data(args):
if (args.dataset_str == 'dblp'):
(adj_list, features, train_data, train_label, test_data, test_label) = load_data_dblp()
if (args.dataset_str == 'example'):
(adj_list, features, train_data, train_label, test_data, test_label) = load_example_gem()
node_size = features.shape[0]
node_embedding = features.shape[1]
class_size = train_label.shape[1]
train_size = len(train_data)
paras = [node_size, node_embedding, class_size, train_size]
return (adj_list, features, train_data, train_label, test_data, test_label, paras)
|
def train(args, adj_list, features, train_data, train_label, test_data, test_label, paras):
with tf.Session() as sess:
adj_data = adj_list
meta_size = len(adj_list)
net = GEM(session=sess, class_size=paras[2], encoding=args.k, meta=meta_size, nodes=paras[0], embedding=paras[1], hop=args.hop)
sess.run(tf.global_variables_initializer())
t_start = time.clock()
for epoch in range(args.epoch_num):
train_loss = 0
train_acc = 0
count = 0
for index in range(0, paras[3], args.batch_size):
(batch_data, batch_label) = get_data(index, args.batch_size, paras[3])
(loss, acc, pred, prob) = net.train(features, adj_data, batch_label, batch_data, args.learning_rate, args.momentum)
print('batch loss: {:.4f}, batch acc: {:.4f}'.format(loss, acc))
train_loss += loss
train_acc += acc
count += 1
train_loss = (train_loss / count)
train_acc = (train_acc / count)
print('epoch{:d} : train_loss: {:.4f}, train_acc: {:.4f}'.format(epoch, train_loss, train_acc))
t_end = time.clock()
print('train time=', '{:.5f}'.format((t_end - t_start)))
print('Train end!')
(test_acc, test_pred, test_probabilities, test_tags) = net.test(features, adj_data, test_label, test_data)
print('test acc:', test_acc)
|
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=123, help='Random seed.')
parser.add_argument('--dataset_str', type=str, default='dblp', help="['dblp','example']")
parser.add_argument('--epoch_num', type=int, default=30, help='Number of epochs to train.')
parser.add_argument('--batch_size', type=int, default=1000)
parser.add_argument('--momentum', type=int, default=0.9)
parser.add_argument('--learning_rate', default=0.001, help='the ratio of training set in whole dataset.')
parser.add_argument('--dim', default=128)
parser.add_argument('--lstm_hidden', default=128, help='lstm_hidden unit')
parser.add_argument('--heads', default=1, help='gat heads')
parser.add_argument('--layer_num', default=4, help='geniePath layer num')
args = parser.parse_args()
return args
|
def set_env(args):
tf.reset_default_graph()
np.random.seed(args.seed)
tf.set_random_seed(args.seed)
|
def get_data(ix, int_batch, train_size):
if ((ix + int_batch) >= train_size):
ix = (train_size - int_batch)
end = train_size
else:
end = (ix + int_batch)
return (train_data[ix:end], train_label[ix:end])
|
def load_data(args):
if (args.dataset_str == 'dblp'):
(adj_list, features, train_data, train_label, test_data, test_label) = load_data_dblp()
node_size = features.shape[0]
node_embedding = features.shape[1]
class_size = train_label.shape[1]
train_size = len(train_data)
paras = [node_size, node_embedding, class_size, train_size]
return (adj_list, features, train_data, train_label, test_data, test_label, paras)
|
def train(args, adj_list, features, train_data, train_label, test_data, test_label, paras):
with tf.Session() as sess:
adj_data = adj_list
net = GeniePath(session=sess, out_dim=paras[2], dim=args.dim, lstm_hidden=args.lstm_hidden, nodes=paras[0], in_dim=paras[1], heads=args.heads, layer_num=args.layer_num, class_size=paras[2])
sess.run(tf.global_variables_initializer())
t_start = time.clock()
for epoch in range(args.epoch_num):
train_loss = 0
train_acc = 0
count = 0
for index in range(0, paras[3], args.batch_size):
(batch_data, batch_label) = get_data(index, args.batch_size, paras[3])
(loss, acc, pred, prob) = net.train(features, adj_data, batch_label, batch_data, args.learning_rate, args.momentum)
print('batch loss: {:.4f}, batch acc: {:.4f}'.format(loss, acc))
train_loss += loss
train_acc += acc
count += 1
train_loss = (train_loss / count)
train_acc = (train_acc / count)
print('epoch{:d} : train_loss: {:.4f}, train_acc: {:.4f}'.format(epoch, train_loss, train_acc))
t_end = time.clock()
print('train time=', '{:.5f}'.format((t_end - t_start)))
print('Train end!')
(test_acc, test_pred, test_probabilities, test_tags) = net.test(features, adj_data, test_label, test_data)
print('test acc:', test_acc)
|
class MeanAggregator(Layer):
'\n Aggregates via mean followed by matmul and non-linearity.\n '
def __init__(self, input_dim, output_dim, neigh_input_dim=None, dropout=0.0, bias=False, act=tf.nn.relu, name=None, concat=False, **kwargs):
super(MeanAggregator, self).__init__(**kwargs)
self.dropout = dropout
self.bias = bias
self.act = act
self.concat = concat
if (neigh_input_dim is None):
neigh_input_dim = input_dim
if (name is not None):
name = ('/' + name)
else:
name = ''
with tf.variable_scope(((self.name + name) + '_vars')):
self.vars['neigh_weights'] = glorot([neigh_input_dim, output_dim], name='neigh_weights')
self.vars['self_weights'] = glorot([input_dim, output_dim], name='self_weights')
if self.bias:
self.vars['bias'] = zeros([self.output_dim], name='bias')
if self.logging:
self._log_vars()
self.input_dim = input_dim
self.output_dim = output_dim
def _call(self, inputs):
(self_vecs, neigh_vecs) = inputs
neigh_vecs = tf.nn.dropout(neigh_vecs, (1 - self.dropout))
self_vecs = tf.nn.dropout(self_vecs, (1 - self.dropout))
neigh_means = tf.reduce_mean(neigh_vecs, axis=1)
from_neighs = tf.matmul(neigh_means, self.vars['neigh_weights'])
from_self = tf.matmul(self_vecs, self.vars['self_weights'])
if (not self.concat):
output = tf.add_n([from_self, from_neighs])
else:
output = tf.concat([from_self, from_neighs], axis=1)
if self.bias:
output += self.vars['bias']
return self.act(output)
|
class HeteMeanAggregator(Layer):
'\n Aggregates via mean followed by matmul and non-linearity.\n '
def __init__(self, input_dim, output_dim, neigh_input_dim=None, dropout=0.0, bias=False, act=tf.nn.relu, name=None, concat=False, **kwargs):
super(MeanAggregator, self).__init__(**kwargs)
self.dropout = dropout
self.bias = bias
self.act = act
self.concat = concat
if (neigh_input_dim is None):
neigh_input_dim = input_dim
if (name is not None):
name = ('/' + name)
else:
name = ''
with tf.variable_scope(((self.name + name) + '_vars')):
self.vars['neigh_weights'] = glorot([neigh_input_dim, output_dim], name='neigh_weights')
self.vars['self_weights'] = glorot([input_dim, output_dim], name='self_weights')
if self.bias:
self.vars['bias'] = zeros([self.output_dim], name='bias')
if self.logging:
self._log_vars()
self.input_dim = input_dim
self.output_dim = output_dim
def _call(self, inputs):
(self_vecs, neigh_vecs) = inputs
neigh_vecs = tf.nn.dropout(neigh_vecs, (1 - self.dropout))
self_vecs = tf.nn.dropout(self_vecs, (1 - self.dropout))
neigh_means = tf.reduce_mean(neigh_vecs, axis=1)
from_neighs = tf.matmul(neigh_means, self.vars['neigh_weights'])
from_self = tf.matmul(self_vecs, self.vars['self_weights'])
if (not self.concat):
output = tf.add_n([from_self, from_neighs])
else:
output = tf.concat([from_self, from_neighs], axis=1)
if self.bias:
output += self.vars['bias']
return self.act(output)
|
class GCNAggregator(Layer):
'\n Aggregates via mean followed by matmul and non-linearity.\n Same matmul parameters are used self vector and neighbor vectors.\n '
def __init__(self, input_dim, output_dim, neigh_input_dim=None, dropout=0.0, bias=False, act=tf.nn.relu, name=None, concat=False, **kwargs):
super(GCNAggregator, self).__init__(**kwargs)
self.dropout = dropout
self.bias = bias
self.act = act
self.concat = concat
if (neigh_input_dim is None):
neigh_input_dim = input_dim
if (name is not None):
name = ('/' + name)
else:
name = ''
with tf.variable_scope(((self.name + name) + '_vars')):
self.vars['weights'] = glorot([neigh_input_dim, output_dim], name='neigh_weights')
if self.bias:
self.vars['bias'] = zeros([self.output_dim], name='bias')
if self.logging:
self._log_vars()
self.input_dim = input_dim
self.output_dim = output_dim
def _call(self, inputs):
(self_vecs, neigh_vecs) = inputs
neigh_vecs = tf.nn.dropout(neigh_vecs, (1 - self.dropout))
self_vecs = tf.nn.dropout(self_vecs, (1 - self.dropout))
means = tf.reduce_mean(tf.concat([neigh_vecs, tf.expand_dims(self_vecs, axis=1)], axis=1), axis=1)
output = tf.matmul(means, self.vars['weights'])
if self.bias:
output += self.vars['bias']
return self.act(output)
|
class MaxPoolingAggregator(Layer):
' Aggregates via max-pooling over MLP functions.\n '
def __init__(self, input_dim, output_dim, model_size='small', neigh_input_dim=None, dropout=0.0, bias=False, act=tf.nn.relu, name=None, concat=False, **kwargs):
super(MaxPoolingAggregator, self).__init__(**kwargs)
self.dropout = dropout
self.bias = bias
self.act = act
self.concat = concat
if (neigh_input_dim is None):
neigh_input_dim = input_dim
if (name is not None):
name = ('/' + name)
else:
name = ''
if (model_size == 'small'):
hidden_dim = self.hidden_dim = 512
elif (model_size == 'big'):
hidden_dim = self.hidden_dim = 1024
self.mlp_layers = []
self.mlp_layers.append(Dense(input_dim=neigh_input_dim, output_dim=hidden_dim, act=tf.nn.relu, dropout=dropout, sparse_inputs=False, logging=self.logging))
with tf.variable_scope(((self.name + name) + '_vars')):
self.vars['neigh_weights'] = glorot([hidden_dim, output_dim], name='neigh_weights')
self.vars['self_weights'] = glorot([input_dim, output_dim], name='self_weights')
if self.bias:
self.vars['bias'] = zeros([self.output_dim], name='bias')
if self.logging:
self._log_vars()
self.input_dim = input_dim
self.output_dim = output_dim
self.neigh_input_dim = neigh_input_dim
def _call(self, inputs):
(self_vecs, neigh_vecs) = inputs
neigh_h = neigh_vecs
dims = tf.shape(neigh_h)
batch_size = dims[0]
num_neighbors = dims[1]
h_reshaped = tf.reshape(neigh_h, ((batch_size * num_neighbors), self.neigh_input_dim))
for l in self.mlp_layers:
h_reshaped = l(h_reshaped)
neigh_h = tf.reshape(h_reshaped, (batch_size, num_neighbors, self.hidden_dim))
neigh_h = tf.reduce_max(neigh_h, axis=1)
from_neighs = tf.matmul(neigh_h, self.vars['neigh_weights'])
from_self = tf.matmul(self_vecs, self.vars['self_weights'])
if (not self.concat):
output = tf.add_n([from_self, from_neighs])
else:
output = tf.concat([from_self, from_neighs], axis=1)
if self.bias:
output += self.vars['bias']
return self.act(output)
|
class MeanPoolingAggregator(Layer):
' Aggregates via mean-pooling over MLP functions.\n '
def __init__(self, input_dim, output_dim, model_size='small', neigh_input_dim=None, dropout=0.0, bias=False, act=tf.nn.relu, name=None, concat=False, **kwargs):
super(MeanPoolingAggregator, self).__init__(**kwargs)
self.dropout = dropout
self.bias = bias
self.act = act
self.concat = concat
if (neigh_input_dim is None):
neigh_input_dim = input_dim
if (name is not None):
name = ('/' + name)
else:
name = ''
if (model_size == 'small'):
hidden_dim = self.hidden_dim = 512
elif (model_size == 'big'):
hidden_dim = self.hidden_dim = 1024
self.mlp_layers = []
self.mlp_layers.append(Dense(input_dim=neigh_input_dim, output_dim=hidden_dim, act=tf.nn.relu, dropout=dropout, sparse_inputs=False, logging=self.logging))
with tf.variable_scope(((self.name + name) + '_vars')):
self.vars['neigh_weights'] = glorot([hidden_dim, output_dim], name='neigh_weights')
self.vars['self_weights'] = glorot([input_dim, output_dim], name='self_weights')
if self.bias:
self.vars['bias'] = zeros([self.output_dim], name='bias')
if self.logging:
self._log_vars()
self.input_dim = input_dim
self.output_dim = output_dim
self.neigh_input_dim = neigh_input_dim
def _call(self, inputs):
(self_vecs, neigh_vecs) = inputs
neigh_h = neigh_vecs
dims = tf.shape(neigh_h)
batch_size = dims[0]
num_neighbors = dims[1]
h_reshaped = tf.reshape(neigh_h, ((batch_size * num_neighbors), self.neigh_input_dim))
for l in self.mlp_layers:
h_reshaped = l(h_reshaped)
neigh_h = tf.reshape(h_reshaped, (batch_size, num_neighbors, self.hidden_dim))
neigh_h = tf.reduce_mean(neigh_h, axis=1)
from_neighs = tf.matmul(neigh_h, self.vars['neigh_weights'])
from_self = tf.matmul(self_vecs, self.vars['self_weights'])
if (not self.concat):
output = tf.add_n([from_self, from_neighs])
else:
output = tf.concat([from_self, from_neighs], axis=1)
if self.bias:
output += self.vars['bias']
return self.act(output)
|
class TwoMaxLayerPoolingAggregator(Layer):
' Aggregates via pooling over two MLP functions.\n '
def __init__(self, input_dim, output_dim, model_size='small', neigh_input_dim=None, dropout=0.0, bias=False, act=tf.nn.relu, name=None, concat=False, **kwargs):
super(TwoMaxLayerPoolingAggregator, self).__init__(**kwargs)
self.dropout = dropout
self.bias = bias
self.act = act
self.concat = concat
if (neigh_input_dim is None):
neigh_input_dim = input_dim
if (name is not None):
name = ('/' + name)
else:
name = ''
if (model_size == 'small'):
hidden_dim_1 = self.hidden_dim_1 = 512
hidden_dim_2 = self.hidden_dim_2 = 256
elif (model_size == 'big'):
hidden_dim_1 = self.hidden_dim_1 = 1024
hidden_dim_2 = self.hidden_dim_2 = 512
self.mlp_layers = []
self.mlp_layers.append(Dense(input_dim=neigh_input_dim, output_dim=hidden_dim_1, act=tf.nn.relu, dropout=dropout, sparse_inputs=False, logging=self.logging))
self.mlp_layers.append(Dense(input_dim=hidden_dim_1, output_dim=hidden_dim_2, act=tf.nn.relu, dropout=dropout, sparse_inputs=False, logging=self.logging))
with tf.variable_scope(((self.name + name) + '_vars')):
self.vars['neigh_weights'] = glorot([hidden_dim_2, output_dim], name='neigh_weights')
self.vars['self_weights'] = glorot([input_dim, output_dim], name='self_weights')
if self.bias:
self.vars['bias'] = zeros([self.output_dim], name='bias')
if self.logging:
self._log_vars()
self.input_dim = input_dim
self.output_dim = output_dim
self.neigh_input_dim = neigh_input_dim
def _call(self, inputs):
(self_vecs, neigh_vecs) = inputs
neigh_h = neigh_vecs
dims = tf.shape(neigh_h)
batch_size = dims[0]
num_neighbors = dims[1]
h_reshaped = tf.reshape(neigh_h, ((batch_size * num_neighbors), self.neigh_input_dim))
for l in self.mlp_layers:
h_reshaped = l(h_reshaped)
neigh_h = tf.reshape(h_reshaped, (batch_size, num_neighbors, self.hidden_dim_2))
neigh_h = tf.reduce_max(neigh_h, axis=1)
from_neighs = tf.matmul(neigh_h, self.vars['neigh_weights'])
from_self = tf.matmul(self_vecs, self.vars['self_weights'])
if (not self.concat):
output = tf.add_n([from_self, from_neighs])
else:
output = tf.concat([from_self, from_neighs], axis=1)
if self.bias:
output += self.vars['bias']
return self.act(output)
|
class SeqAggregator(Layer):
' Aggregates via a standard LSTM.\n '
def __init__(self, input_dim, output_dim, model_size='small', neigh_input_dim=None, dropout=0.0, bias=False, act=tf.nn.relu, name=None, concat=False, **kwargs):
super(SeqAggregator, self).__init__(**kwargs)
self.dropout = dropout
self.bias = bias
self.act = act
self.concat = concat
if (neigh_input_dim is None):
neigh_input_dim = input_dim
if (name is not None):
name = ('/' + name)
else:
name = ''
if (model_size == 'small'):
hidden_dim = self.hidden_dim = 128
elif (model_size == 'big'):
hidden_dim = self.hidden_dim = 256
with tf.variable_scope(((self.name + name) + '_vars')):
self.vars['neigh_weights'] = glorot([hidden_dim, output_dim], name='neigh_weights')
self.vars['self_weights'] = glorot([input_dim, output_dim], name='self_weights')
if self.bias:
self.vars['bias'] = zeros([self.output_dim], name='bias')
if self.logging:
self._log_vars()
self.input_dim = input_dim
self.output_dim = output_dim
self.neigh_input_dim = neigh_input_dim
self.cell = tf.contrib.rnn.BasicLSTMCell(self.hidden_dim)
def _call(self, inputs):
(self_vecs, neigh_vecs) = inputs
dims = tf.shape(neigh_vecs)
batch_size = dims[0]
initial_state = self.cell.zero_state(batch_size, tf.float32)
used = tf.sign(tf.reduce_max(tf.abs(neigh_vecs), axis=2))
length = tf.reduce_sum(used, axis=1)
length = tf.maximum(length, tf.constant(1.0))
length = tf.cast(length, tf.int32)
with tf.variable_scope(self.name) as scope:
try:
(rnn_outputs, rnn_states) = tf.nn.dynamic_rnn(self.cell, neigh_vecs, initial_state=initial_state, dtype=tf.float32, time_major=False, sequence_length=length)
except ValueError:
scope.reuse_variables()
(rnn_outputs, rnn_states) = tf.nn.dynamic_rnn(self.cell, neigh_vecs, initial_state=initial_state, dtype=tf.float32, time_major=False, sequence_length=length)
batch_size = tf.shape(rnn_outputs)[0]
max_len = tf.shape(rnn_outputs)[1]
out_size = int(rnn_outputs.get_shape()[2])
index = ((tf.range(0, batch_size) * max_len) + (length - 1))
flat = tf.reshape(rnn_outputs, [(- 1), out_size])
neigh_h = tf.gather(flat, index)
from_neighs = tf.matmul(neigh_h, self.vars['neigh_weights'])
from_self = tf.matmul(self_vecs, self.vars['self_weights'])
output = tf.add_n([from_self, from_neighs])
if (not self.concat):
output = tf.add_n([from_self, from_neighs])
else:
output = tf.concat([from_self, from_neighs], axis=1)
if self.bias:
output += self.vars['bias']
return self.act(output)
|
def uniform(shape, scale=0.05, name=None):
'Uniform init.'
initial = tf.random_uniform(shape, minval=(- scale), maxval=scale, dtype=tf.float32)
return tf.Variable(initial, name=name)
|
def glorot(shape, name=None):
'Glorot & Bengio (AISTATS 2010) init.'
init_range = np.sqrt((6.0 / (shape[0] + shape[1])))
initial = tf.random_uniform(shape, minval=(- init_range), maxval=init_range, dtype=tf.float32)
return tf.Variable(initial, name=name)
|
def zeros(shape, name=None):
'All zeros.'
initial = tf.zeros(shape, dtype=tf.float32)
return tf.Variable(initial, name=name)
|
def ones(shape, name=None):
'All ones.'
initial = tf.ones(shape, dtype=tf.float32)
return tf.Variable(initial, name=name)
|
def get_layer_uid(layer_name=''):
'Helper function, assigns unique layer IDs.'
if (layer_name not in _LAYER_UIDS):
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name]
|
class Layer(object):
'Base layer class. Defines basic API for all layer objects.\n Implementation inspired by keras (http://keras.io).\n # Properties\n name: String, defines the variable scope of the layer.\n logging: Boolean, switches Tensorflow histogram logging on/off\n\n # Methods\n _call(inputs): Defines computation graph of layer\n (i.e. takes input, returns output)\n __call__(inputs): Wrapper for _call()\n _log_vars(): Log all variables\n '
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging', 'model_size'}
for kwarg in kwargs.keys():
assert (kwarg in allowed_kwargs), ('Invalid keyword argument: ' + kwarg)
name = kwargs.get('name')
if (not name):
layer = self.__class__.__name__.lower()
name = ((layer + '_') + str(get_layer_uid(layer)))
self.name = name
self.vars = {}
logging = kwargs.get('logging', False)
self.logging = logging
self.sparse_inputs = False
def _call(self, inputs):
return inputs
def __call__(self, inputs):
with tf.name_scope(self.name):
if (self.logging and (not self.sparse_inputs)):
tf.summary.histogram((self.name + '/inputs'), inputs)
outputs = self._call(inputs)
if self.logging:
tf.summary.histogram((self.name + '/outputs'), outputs)
return outputs
def _log_vars(self):
for var in self.vars:
tf.summary.histogram(((self.name + '/vars/') + var), self.vars[var])
|
class Dense(Layer):
'Dense layer.'
def __init__(self, input_dim, output_dim, dropout=0.0, act=tf.nn.relu, placeholders=None, bias=True, featureless=False, sparse_inputs=False, **kwargs):
super(Dense, self).__init__(**kwargs)
self.dropout = dropout
self.act = act
self.featureless = featureless
self.bias = bias
self.input_dim = input_dim
self.output_dim = output_dim
self.sparse_inputs = sparse_inputs
if sparse_inputs:
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope((self.name + '_vars')):
self.vars['weights'] = tf.get_variable('weights', shape=(input_dim, output_dim), dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(), regularizer=tf.contrib.layers.l2_regularizer(FLAGS.weight_decay))
if self.bias:
self.vars['bias'] = zeros([output_dim], name='bias')
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
x = tf.nn.dropout(x, (1 - self.dropout))
output = tf.matmul(x, self.vars['weights'])
if self.bias:
output += self.vars['bias']
return self.act(output)
|
def masked_logit_cross_entropy(preds, labels, mask):
'Logit cross-entropy loss with masking.'
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=preds, labels=labels)
loss = tf.reduce_sum(loss, axis=1)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.maximum(tf.reduce_sum(mask), tf.constant([1.0]))
loss *= mask
return tf.reduce_mean(loss)
|
def masked_softmax_cross_entropy(preds, labels, mask):
'Softmax cross-entropy loss with masking.'
loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.maximum(tf.reduce_sum(mask), tf.constant([1.0]))
loss *= mask
return tf.reduce_mean(loss)
|
def masked_l2(preds, actuals, mask):
'L2 loss with masking.'
loss = tf.nn.l2(preds, actuals)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
loss *= mask
return tf.reduce_mean(loss)
|
def masked_accuracy(preds, labels, mask):
'Accuracy with masking.'
correct_prediction = tf.equal(tf.argmax(preds, 1), tf.argmax(labels, 1))
accuracy_all = tf.cast(correct_prediction, tf.float32)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
accuracy_all *= mask
return tf.reduce_mean(accuracy_all)
|
class UniformNeighborSampler(Layer):
'\n Uniformly samples neighbors.\n Assumes that adj lists are padded with random re-sampling\n '
def __init__(self, adj_info, **kwargs):
super(UniformNeighborSampler, self).__init__(**kwargs)
self.adj_info = adj_info
def _call(self, inputs):
(ids, num_samples) = inputs
adj_lists = tf.nn.embedding_lookup(self.adj_info, ids)
adj_lists = tf.transpose(tf.random_shuffle(tf.transpose(adj_lists)))
adj_lists = tf.slice(adj_lists, [0, 0], [(- 1), num_samples])
return adj_lists
|
class DistanceNeighborSampler(Layer):
'\n Sampling neighbors based on the feature consistency.\n '
def __init__(self, adj_info, **kwargs):
super(DistanceNeighborSampler, self).__init__(**kwargs)
self.adj_info = adj_info
self.num_neighs = adj_info.shape[(- 1)]
def _call(self, inputs):
eps = 0.001
(ids, num_samples, features, batch_size) = inputs
adj_lists = tf.gather(self.adj_info, ids)
node_features = tf.gather(features, ids)
feature_size = tf.shape(features)[(- 1)]
node_feature_repeat = tf.tile(node_features, [1, self.num_neighs])
node_feature_repeat = tf.reshape(node_feature_repeat, [batch_size, self.num_neighs, feature_size])
neighbor_feature = tf.gather(features, adj_lists)
distance = tf.sqrt(tf.reduce_sum(tf.square((node_feature_repeat - neighbor_feature)), (- 1)))
prob = tf.exp((- distance))
prob_sum = tf.reduce_sum(prob, (- 1), keepdims=True)
prob_sum = tf.tile(prob_sum, [1, self.num_neighs])
prob = tf.divide(prob, prob_sum)
prob = tf.where((prob > eps), prob, (0 * prob))
samples_idx = tf.random.categorical(tf.math.log(prob), num_samples)
selected = tf.batch_gather(adj_lists, samples_idx)
return selected
|
class BipartiteEdgePredLayer(Layer):
def __init__(self, input_dim1, input_dim2, placeholders, dropout=False, act=tf.nn.sigmoid, loss_fn='xent', neg_sample_weights=1.0, bias=False, bilinear_weights=False, **kwargs):
'\n Basic class that applies skip-gram-like loss\n (i.e., dot product of node+target and node and negative samples)\n Args:\n bilinear_weights: use a bilinear weight for affinity calculation: u^T A v. If set to\n false, it is assumed that input dimensions are the same and the affinity will be \n based on dot product.\n '
super(BipartiteEdgePredLayer, self).__init__(**kwargs)
self.input_dim1 = input_dim1
self.input_dim2 = input_dim2
self.act = act
self.bias = bias
self.eps = 1e-07
self.margin = 0.1
self.neg_sample_weights = neg_sample_weights
self.bilinear_weights = bilinear_weights
if dropout:
self.dropout = placeholders['dropout']
else:
self.dropout = 0.0
self.output_dim = 1
with tf.variable_scope((self.name + '_vars')):
if bilinear_weights:
self.vars['weights'] = tf.get_variable('pred_weights', shape=(input_dim1, input_dim2), dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())
if self.bias:
self.vars['bias'] = zeros([self.output_dim], name='bias')
if (loss_fn == 'xent'):
self.loss_fn = self._xent_loss
elif (loss_fn == 'skipgram'):
self.loss_fn = self._skipgram_loss
elif (loss_fn == 'hinge'):
self.loss_fn = self._hinge_loss
if self.logging:
self._log_vars()
def affinity(self, inputs1, inputs2):
' Affinity score between batch of inputs1 and inputs2.\n Args:\n inputs1: tensor of shape [batch_size x feature_size].\n '
if self.bilinear_weights:
prod = tf.matmul(inputs2, tf.transpose(self.vars['weights']))
self.prod = prod
result = tf.reduce_sum((inputs1 * prod), axis=1)
else:
result = tf.reduce_sum((inputs1 * inputs2), axis=1)
return result
def neg_cost(self, inputs1, neg_samples, hard_neg_samples=None):
' For each input in batch, compute the sum of its affinity to negative samples.\n\n Returns:\n Tensor of shape [batch_size x num_neg_samples]. For each node, a list of affinities to\n negative samples is computed.\n '
if self.bilinear_weights:
inputs1 = tf.matmul(inputs1, self.vars['weights'])
neg_aff = tf.matmul(inputs1, tf.transpose(neg_samples))
return neg_aff
def loss(self, inputs1, inputs2, neg_samples):
' negative sampling loss.\n Args:\n neg_samples: tensor of shape [num_neg_samples x input_dim2]. Negative samples for all\n inputs in batch inputs1.\n '
return self.loss_fn(inputs1, inputs2, neg_samples)
def _xent_loss(self, inputs1, inputs2, neg_samples, hard_neg_samples=None):
aff = self.affinity(inputs1, inputs2)
neg_aff = self.neg_cost(inputs1, neg_samples, hard_neg_samples)
true_xent = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(aff), logits=aff)
negative_xent = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(neg_aff), logits=neg_aff)
loss = (tf.reduce_sum(true_xent) + (self.neg_sample_weights * tf.reduce_sum(negative_xent)))
return loss
def _skipgram_loss(self, inputs1, inputs2, neg_samples, hard_neg_samples=None):
aff = self.affinity(inputs1, inputs2)
neg_aff = self.neg_cost(inputs1, neg_samples, hard_neg_samples)
neg_cost = tf.log(tf.reduce_sum(tf.exp(neg_aff), axis=1))
loss = tf.reduce_sum((aff - neg_cost))
return loss
def _hinge_loss(self, inputs1, inputs2, neg_samples, hard_neg_samples=None):
aff = self.affinity(inputs1, inputs2)
neg_aff = self.neg_cost(inputs1, neg_samples, hard_neg_samples)
diff = tf.nn.relu(tf.subtract(neg_aff, (tf.expand_dims(aff, 1) - self.margin)), name='diff')
loss = tf.reduce_sum(diff)
self.neg_shape = tf.shape(neg_aff)
return loss
def weights_norm(self):
return tf.nn.l2_norm(self.vars['weights'])
|
class SupervisedGraphconsis(models.SampleAndAggregate):
'Implementation of supervised GraphConsis.'
def __init__(self, num_classes, placeholders, features, adj, degrees, layer_infos, concat=True, aggregator_type='mean', model_size='small', sigmoid_loss=False, identity_dim=0, num_re=3, **kwargs):
'\n Args:\n - placeholders: Stanford TensorFlow placeholder object.\n - features: Numpy array with node features.\n - adj: Numpy array with adjacency lists (padded with random re-samples)\n - degrees: Numpy array with node degrees. \n - layer_infos: List of SAGEInfo namedtuples that describe the parameters of all \n the recursive layers. See SAGEInfo definition above. It contains *numer_re* lists of layer_info\n - concat: whether to concatenate during recursive iterations\n - aggregator_type: how to aggregate neighbor information\n - model_size: one of "small" and "big"\n - sigmoid_loss: Set to true if nodes can belong to multiple classes\n - identity_dim: context embedding\n '
models.GeneralizedModel.__init__(self, **kwargs)
if (aggregator_type == 'mean'):
self.aggregator_cls = MeanAggregator
elif (aggregator_type == 'seq'):
self.aggregator_cls = SeqAggregator
elif (aggregator_type == 'meanpool'):
self.aggregator_cls = MeanPoolingAggregator
elif (aggregator_type == 'maxpool'):
self.aggregator_cls = MaxPoolingAggregator
elif (aggregator_type == 'gcn'):
self.aggregator_cls = GCNAggregator
else:
raise Exception('Unknown aggregator: ', self.aggregator_cls)
self.inputs1 = placeholders['batch']
self.model_size = model_size
self.adj_info = adj
if (identity_dim > 0):
self.embeds_context = tf.get_variable('node_embeddings', [features.shape[0], identity_dim])
else:
self.embeds_context = None
if (features is None):
if (identity_dim == 0):
raise Exception('Must have a positive value for identity feature dimension if no input features given.')
self.features = self.embeds_context
else:
self.features = tf.Variable(tf.constant(features, dtype=tf.float32), trainable=False)
if (not (self.embeds_context is None)):
self.features = tf.concat([self.embeds_context, self.features], axis=1)
self.degrees = degrees
self.concat = concat
self.num_classes = num_classes
self.sigmoid_loss = sigmoid_loss
self.dims = [((0 if (features is None) else features.shape[1]) + identity_dim)]
self.dims.extend([layer_infos[0][i].output_dim for i in range(len(layer_infos[0]))])
self.batch_size = placeholders['batch_size']
self.placeholders = placeholders
self.layer_infos = layer_infos
self.num_relations = num_re
dim_mult = (2 if self.concat else 1)
self.relation_vectors = tf.Variable(glorot([num_re, (self.dims[(- 1)] * dim_mult)]), trainable=True, name='relation_vectors')
self.attention_vec = tf.Variable(glorot([((self.dims[(- 1)] * dim_mult) * 2), 1]))
self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
self.build()
def build(self):
(samples1_list, support_sizes1_list) = ([], [])
for r_idx in range(self.num_relations):
(samples1, support_sizes1) = self.sample(self.inputs1, self.layer_infos[r_idx])
samples1_list.append(samples1)
support_sizes1_list.append(support_sizes1)
num_samples = [layer_info.num_samples for layer_info in self.layer_infos[0]]
self.outputs1_list = []
dim_mult = (2 if self.concat else 1)
dim_mult = (dim_mult * 2)
for r_idx in range(self.num_relations):
(outputs1, self.aggregators) = self.aggregate(samples1_list[r_idx], [self.features], self.dims, num_samples, support_sizes1, concat=self.concat, model_size=self.model_size)
self.relation_batch = tf.tile([tf.nn.embedding_lookup(self.relation_vectors, r_idx)], [self.batch_size, 1])
outputs1 = tf.concat([outputs1, self.relation_batch], 1)
self.attention_weights = tf.matmul(outputs1, self.attention_vec)
self.attention_weights = tf.tile(self.attention_weights, [1, (dim_mult * self.dims[(- 1)])])
outputs1 = tf.multiply(self.attention_weights, outputs1)
self.outputs1_list += [outputs1]
self.outputs1 = tf.stack(self.outputs1_list, 1)
self.outputs1 = tf.reduce_sum(self.outputs1, axis=1, keepdims=False)
self.outputs1 = tf.nn.l2_normalize(self.outputs1, 1)
self.node_pred = layers.Dense((dim_mult * self.dims[(- 1)]), self.num_classes, dropout=self.placeholders['dropout'], act=(lambda x: x))
self.node_preds = self.node_pred(self.outputs1)
self._loss()
grads_and_vars = self.optimizer.compute_gradients(self.loss)
clipped_grads_and_vars = [((tf.clip_by_value(grad, (- 5.0), 5.0) if (grad is not None) else None), var) for (grad, var) in grads_and_vars]
(self.grad, _) = clipped_grads_and_vars[0]
self.opt_op = self.optimizer.apply_gradients(clipped_grads_and_vars)
self.preds = self.predict()
def _loss(self):
for aggregator in self.aggregators:
for var in aggregator.vars.values():
self.loss += (FLAGS.weight_decay * tf.nn.l2_loss(var))
for var in self.node_pred.vars.values():
self.loss += (FLAGS.weight_decay * tf.nn.l2_loss(var))
if self.sigmoid_loss:
self.loss += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.node_preds, labels=self.placeholders['labels']))
else:
self.loss += tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.node_preds, labels=self.placeholders['labels']))
tf.summary.scalar('loss', self.loss)
def predict(self):
if self.sigmoid_loss:
return tf.nn.sigmoid(self.node_preds)
else:
return tf.nn.softmax(self.node_preds)
|
def calc_f1(y_true, y_pred):
if (not FLAGS.sigmoid):
y_true = np.argmax(y_true, axis=1)
y_pred = np.argmax(y_pred, axis=1)
else:
y_pred[(y_pred > 0.5)] = 1
y_pred[(y_pred <= 0.5)] = 0
return (metrics.f1_score(y_true, y_pred, average='micro'), metrics.f1_score(y_true, y_pred, average='macro'))
|
def calc_auc(y_true, y_pred):
return metrics.roc_auc_score(y_true, y_pred)
|
def evaluate(sess, model, minibatch_iter, size=None):
t_test = time.time()
(feed_dict_val, labels) = minibatch_iter.node_val_feed_dict(size)
node_outs_val = sess.run([model.preds, model.loss], feed_dict=feed_dict_val)
(mic, mac) = calc_f1(labels, node_outs_val[0])
auc = calc_auc(labels, node_outs_val[0])
return (node_outs_val[1], mic, mac, auc, (time.time() - t_test))
|
def incremental_evaluate(sess, model, minibatch_iter, size, test=False):
t_test = time.time()
finished = False
val_losses = []
val_preds = []
labels = []
iter_num = 0
finished = False
while (not finished):
(feed_dict_val, batch_labels, finished, _) = minibatch_iter.incremental_node_val_feed_dict(size, iter_num, test=test)
node_outs_val = sess.run([model.preds, model.loss], feed_dict=feed_dict_val)
val_preds.append(node_outs_val[0])
labels.append(batch_labels)
val_losses.append(node_outs_val[1])
iter_num += 1
val_preds = np.vstack(val_preds)
labels = np.vstack(labels)
f1_scores = calc_f1(labels, val_preds)
auc_score = calc_auc(labels, val_preds)
return (np.mean(val_losses), f1_scores[0], f1_scores[1], auc_score, (time.time() - t_test))
|
def construct_placeholders(num_classes):
placeholders = {'labels': tf.placeholder(tf.float32, shape=(None, num_classes), name='labels'), 'batch': tf.placeholder(tf.int32, shape=None, name='batch1'), 'dropout': tf.placeholder_with_default(0.0, shape=(), name='dropout'), 'batch_size': tf.placeholder(tf.int32, name='batch_size')}
return placeholders
|
def train(train_data, test_data=None):
G = train_data[0]
features = train_data[1]
id_map = train_data[2]
class_map = train_data[4]
gs = train_data[5]
num_relations = len(gs)
if isinstance(list(class_map.values())[0], list):
num_classes = len(list(class_map.values())[0])
else:
num_classes = len(set(class_map.values()))
if (not (features is None)):
features = np.vstack([features, np.zeros((features.shape[1],))])
context_pairs = (train_data[3] if FLAGS.random_context else None)
placeholders = construct_placeholders(num_classes)
minibatch_list = [NodeMinibatchIterator(g, id_map, placeholders, class_map, num_classes, batch_size=FLAGS.batch_size, max_degree=FLAGS.max_degree, context_pairs=context_pairs) for g in gs]
minibatch_main = NodeMinibatchIterator(G, id_map, placeholders, class_map, num_classes, batch_size=FLAGS.batch_size, max_degree=FLAGS.max_degree, context_pairs=context_pairs)
adj_info_ph_list = [tf.placeholder(tf.int32, shape=minibatch_main.adj.shape) for i in range(num_relations)]
adj_info_list = [tf.Variable(adj_info_ph, trainable=False, name='adj_info') for adj_info_ph in adj_info_ph_list]
adj_info_main = adj_info_list[0]
if (FLAGS.model == 'graphsage_mean'):
sampler_list = [DistanceNeighborSampler(adj_info) for adj_info in adj_info_list]
if (FLAGS.samples_3 != 0):
hete_layer_infos = [[SAGEInfo('node', sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo('node', sampler, FLAGS.samples_2, FLAGS.dim_2), SAGEInfo('node', sampler, FLAGS.samples_3, FLAGS.dim_2)] for sampler in sampler_list]
elif (FLAGS.samples_2 != 0):
hete_layer_infos = [[SAGEInfo('node', sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo('node', sampler, FLAGS.samples_2, FLAGS.dim_2)] for sampler in sampler_list]
else:
hete_layer_infos = [[SAGEInfo('node', sampler, FLAGS.samples_1, FLAGS.dim_1)] for sampler in sampler_list]
model = SupervisedGraphconsis(num_classes, placeholders, features, adj_info_main, minibatch_main.deg, hete_layer_infos, model_size=FLAGS.model_size, sigmoid_loss=FLAGS.sigmoid, identity_dim=FLAGS.context_dim, num_re=num_relations, logging=True)
elif (FLAGS.model == 'gcn'):
sampler_list = [DistanceNeighborSampler(adj_info) for adj_info in adj_info_list]
if (FLAGS.samples_3 != 0):
hete_layer_infos = [[SAGEInfo('node', sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo('node', sampler, FLAGS.samples_2, FLAGS.dim_2), SAGEInfo('node', sampler, FLAGS.samples_3, FLAGS.dim_2)] for sampler in sampler_list]
elif (FLAGS.samples_2 != 0):
hete_layer_infos = [[SAGEInfo('node', sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo('node', sampler, FLAGS.samples_2, FLAGS.dim_2)] for sampler in sampler_list]
else:
hete_layer_infos = [[SAGEInfo('node', sampler, FLAGS.samples_1, FLAGS.dim_1)] for sampler in sampler_list]
model = SupervisedGraphconsis(num_classes, placeholders, features, adj_info_main, minibatch_main.deg, layer_infos=hete_layer_infos, aggregator_type='gcn', model_size=FLAGS.model_size, concat=False, sigmoid_loss=FLAGS.sigmoid, identity_dim=FLAGS.context_dim, num_re=num_relations, logging=True)
elif (FLAGS.model == 'graphsage_seq'):
sampler_list = [DistanceNeighborSampler(adj_info) for adj_info in adj_info_list]
if (FLAGS.samples_3 != 0):
hete_layer_infos = [[SAGEInfo('node', sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo('node', sampler, FLAGS.samples_2, FLAGS.dim_2), SAGEInfo('node', sampler, FLAGS.samples_3, FLAGS.dim_2)] for sampler in sampler_list]
elif (FLAGS.samples_2 != 0):
hete_layer_infos = [[SAGEInfo('node', sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo('node', sampler, FLAGS.samples_2, FLAGS.dim_2)] for sampler in sampler_list]
else:
hete_layer_infos = [[SAGEInfo('node', sampler, FLAGS.samples_1, FLAGS.dim_1)] for sampler in sampler_list]
model = SupervisedGraphconsis(num_classes, placeholders, features, adj_info_main, minibatch_main.deg, layer_infos=hete_layer_infos, aggregator_type='seq', model_size=FLAGS.model_size, sigmoid_loss=FLAGS.sigmoid, identity_dim=FLAGS.context_dim, num_re=num_relations, logging=True)
elif (FLAGS.model == 'graphsage_maxpool'):
sampler_list = [DistanceNeighborSampler(adj_info) for adj_info in adj_info_list]
if (FLAGS.samples_3 != 0):
hete_layer_infos = [[SAGEInfo('node', sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo('node', sampler, FLAGS.samples_2, FLAGS.dim_2), SAGEInfo('node', sampler, FLAGS.samples_3, FLAGS.dim_2)] for sampler in sampler_list]
elif (FLAGS.samples_2 != 0):
hete_layer_infos = [[SAGEInfo('node', sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo('node', sampler, FLAGS.samples_2, FLAGS.dim_2)] for sampler in sampler_list]
else:
hete_layer_infos = [[SAGEInfo('node', sampler, FLAGS.samples_1, FLAGS.dim_1)] for sampler in sampler_list]
model = SupervisedGraphconsis(num_classes, placeholders, features, adj_info_main, minibatch_main.deg, layer_infos=hete_layer_infos, aggregator_type='maxpool', model_size=FLAGS.model_size, sigmoid_loss=FLAGS.sigmoid, identity_dim=FLAGS.context_dim, num_re=num_relations, logging=True)
elif (FLAGS.model == 'graphsage_meanpool'):
sampler_list = [DistanceNeighborSampler(adj_info) for adj_info in adj_info_list]
if (FLAGS.samples_3 != 0):
hete_layer_infos = [[SAGEInfo('node', sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo('node', sampler, FLAGS.samples_2, FLAGS.dim_2), SAGEInfo('node', sampler, FLAGS.samples_3, FLAGS.dim_2)] for sampler in sampler_list]
elif (FLAGS.samples_2 != 0):
hete_layer_infos = [[SAGEInfo('node', sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo('node', sampler, FLAGS.samples_2, FLAGS.dim_2)] for sampler in sampler_list]
else:
hete_layer_infos = [[SAGEInfo('node', sampler, FLAGS.samples_1, FLAGS.dim_1)] for sampler in sampler_list]
model = SupervisedGraphconsis(num_classes, placeholders, features, adj_info_main, minibatch_main.deg, layer_infos=hete_layer_infos, aggregator_type='meanpool', model_size=FLAGS.model_size, sigmoid_loss=FLAGS.sigmoid, identity_dim=FLAGS.context_dim, num_re=num_relations, logging=True)
else:
raise Exception('Error: model name unrecognized.')
config = tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
sess = tf.Session(config=config)
merged = tf.summary.merge_all()
sess.run(tf.global_variables_initializer(), feed_dict={adj_info_ph_list[i]: minibatch_list[i].adj for i in range(num_relations)})
total_steps = 0
avg_time = 0.0
epoch_val_costs = []
for epoch in range(FLAGS.epochs):
minibatch_main.shuffle()
iter = 0
print(('Epoch: %04d' % (epoch + 1)))
epoch_val_costs.append(0)
while (not minibatch_main.end()):
(feed_dict, labels) = minibatch_main.next_minibatch_feed_dict()
feed_dict.update({placeholders['dropout']: FLAGS.dropout})
t = time.time()
outs = sess.run([merged, model.opt_op, model.loss, model.preds], feed_dict=feed_dict)
train_cost = outs[2]
if ((iter % FLAGS.validate_iter) == 0):
if (FLAGS.validate_batch_size == (- 1)):
ret = incremental_evaluate(sess, model, minibatch_main, FLAGS.batch_size)
(val_cost, val_f1_mic, val_f1_mac, val_auc, duration) = ret
else:
ret = evaluate(sess, model, minibatch_main, FLAGS.validate_batch_size)
(val_cost, val_f1_mic, val_f1_mac, val_auc, duration) = ret
epoch_val_costs[(- 1)] += val_cost
avg_time = ((((avg_time * total_steps) + time.time()) - t) / (total_steps + 1))
if ((total_steps % FLAGS.print_every) == 0):
(train_f1_mic, train_f1_mac) = calc_f1(labels, outs[(- 1)])
print('Iter:', ('%04d' % iter), 'train_loss=', '{:.5f}'.format(train_cost), 'train_f1_mic=', '{:.5f}'.format(train_f1_mic), 'train_f1_mac=', '{:.5f}'.format(train_f1_mac), 'val_loss=', '{:.5f}'.format(val_cost), 'val_f1_mic=', '{:.5f}'.format(val_f1_mic), 'val_f1_mac=', '{:.5f}'.format(val_f1_mac), 'time=', '{:.5f}'.format(avg_time))
iter += 1
total_steps += 1
if (total_steps > FLAGS.max_total_steps):
break
if (total_steps > FLAGS.max_total_steps):
break
print('Optimization Finished!')
ret = incremental_evaluate(sess, model, minibatch_main, FLAGS.batch_size)
(val_cost, val_f1_mic, val_f1_mac, val_auc, duration) = ret
print('Full validation stats:', 'loss=', '{:.5f}'.format(val_cost), 'f1_micro=', '{:.5f}'.format(val_f1_mic), 'f1_macro=', '{:.5f}'.format(val_f1_mac), 'auc=', '{:.5f}'.format(val_auc), 'time=', '{:.5f}'.format(duration))
|
def main(argv=None):
print('Loading training data..')
file_name = FLAGS.file_name
train_perc = FLAGS.train_perc
relations = ['net_rur', 'net_rtr', 'net_rsr']
train_data = load_data(FLAGS.train_prefix, file_name, relations, train_perc)
print('Done loading training data..')
train(train_data)
|
class MeanAggregator(Layer):
'\n Aggregates via mean followed by matmul and non-linearity.\n '
def __init__(self, input_dim, output_dim, neigh_input_dim=None, dropout=0.0, bias=False, act=tf.nn.relu, name=None, concat=False, **kwargs):
super(MeanAggregator, self).__init__(**kwargs)
self.dropout = dropout
self.bias = bias
self.act = act
self.concat = concat
if (neigh_input_dim is None):
neigh_input_dim = input_dim
if (name is not None):
name = ('/' + name)
else:
name = ''
with tf.variable_scope(((self.name + name) + '_vars')):
self.vars['neigh_weights'] = glorot([neigh_input_dim, output_dim], name='neigh_weights')
self.vars['self_weights'] = glorot([input_dim, output_dim], name='self_weights')
if self.bias:
self.vars['bias'] = zeros([self.output_dim], name='bias')
if self.logging:
self._log_vars()
self.input_dim = input_dim
self.output_dim = output_dim
def _call(self, inputs):
(self_vecs, neigh_vecs) = inputs
neigh_vecs = tf.nn.dropout(neigh_vecs, (1 - self.dropout))
self_vecs = tf.nn.dropout(self_vecs, (1 - self.dropout))
neigh_means = tf.reduce_mean(neigh_vecs, axis=1)
from_neighs = tf.matmul(neigh_means, self.vars['neigh_weights'])
from_self = tf.matmul(self_vecs, self.vars['self_weights'])
if (not self.concat):
output = tf.add_n([from_self, from_neighs])
else:
output = tf.concat([from_self, from_neighs], axis=1)
if self.bias:
output += self.vars['bias']
return self.act(output)
|
class GCNAggregator(Layer):
'\n Aggregates via mean followed by matmul and non-linearity.\n Same matmul parameters are used self vector and neighbor vectors.\n '
def __init__(self, input_dim, output_dim, neigh_input_dim=None, dropout=0.0, bias=False, act=tf.nn.relu, name=None, concat=False, **kwargs):
super(GCNAggregator, self).__init__(**kwargs)
self.dropout = dropout
self.bias = bias
self.act = act
self.concat = concat
if (neigh_input_dim is None):
neigh_input_dim = input_dim
if (name is not None):
name = ('/' + name)
else:
name = ''
with tf.variable_scope(((self.name + name) + '_vars')):
self.vars['weights'] = glorot([neigh_input_dim, output_dim], name='neigh_weights')
if self.bias:
self.vars['bias'] = zeros([self.output_dim], name='bias')
if self.logging:
self._log_vars()
self.input_dim = input_dim
self.output_dim = output_dim
def _call(self, inputs):
(self_vecs, neigh_vecs) = inputs
neigh_vecs = tf.nn.dropout(neigh_vecs, (1 - self.dropout))
self_vecs = tf.nn.dropout(self_vecs, (1 - self.dropout))
means = tf.reduce_mean(tf.concat([neigh_vecs, tf.expand_dims(self_vecs, axis=1)], axis=1), axis=1)
output = tf.matmul(means, self.vars['weights'])
if self.bias:
output += self.vars['bias']
return self.act(output)
|
class MaxPoolingAggregator(Layer):
' Aggregates via max-pooling over MLP functions.\n '
def __init__(self, input_dim, output_dim, model_size='small', neigh_input_dim=None, dropout=0.0, bias=False, act=tf.nn.relu, name=None, concat=False, **kwargs):
super(MaxPoolingAggregator, self).__init__(**kwargs)
self.dropout = dropout
self.bias = bias
self.act = act
self.concat = concat
if (neigh_input_dim is None):
neigh_input_dim = input_dim
if (name is not None):
name = ('/' + name)
else:
name = ''
if (model_size == 'small'):
hidden_dim = self.hidden_dim = 512
elif (model_size == 'big'):
hidden_dim = self.hidden_dim = 1024
self.mlp_layers = []
self.mlp_layers.append(Dense(input_dim=neigh_input_dim, output_dim=hidden_dim, act=tf.nn.relu, dropout=dropout, sparse_inputs=False, logging=self.logging))
with tf.variable_scope(((self.name + name) + '_vars')):
self.vars['neigh_weights'] = glorot([hidden_dim, output_dim], name='neigh_weights')
self.vars['self_weights'] = glorot([input_dim, output_dim], name='self_weights')
if self.bias:
self.vars['bias'] = zeros([self.output_dim], name='bias')
if self.logging:
self._log_vars()
self.input_dim = input_dim
self.output_dim = output_dim
self.neigh_input_dim = neigh_input_dim
def _call(self, inputs):
(self_vecs, neigh_vecs) = inputs
neigh_h = neigh_vecs
dims = tf.shape(neigh_h)
batch_size = dims[0]
num_neighbors = dims[1]
h_reshaped = tf.reshape(neigh_h, ((batch_size * num_neighbors), self.neigh_input_dim))
for l in self.mlp_layers:
h_reshaped = l(h_reshaped)
neigh_h = tf.reshape(h_reshaped, (batch_size, num_neighbors, self.hidden_dim))
neigh_h = tf.reduce_max(neigh_h, axis=1)
from_neighs = tf.matmul(neigh_h, self.vars['neigh_weights'])
from_self = tf.matmul(self_vecs, self.vars['self_weights'])
if (not self.concat):
output = tf.add_n([from_self, from_neighs])
else:
output = tf.concat([from_self, from_neighs], axis=1)
if self.bias:
output += self.vars['bias']
return self.act(output)
|
class MeanPoolingAggregator(Layer):
' Aggregates via mean-pooling over MLP functions.\n '
def __init__(self, input_dim, output_dim, model_size='small', neigh_input_dim=None, dropout=0.0, bias=False, act=tf.nn.relu, name=None, concat=False, **kwargs):
super(MeanPoolingAggregator, self).__init__(**kwargs)
self.dropout = dropout
self.bias = bias
self.act = act
self.concat = concat
if (neigh_input_dim is None):
neigh_input_dim = input_dim
if (name is not None):
name = ('/' + name)
else:
name = ''
if (model_size == 'small'):
hidden_dim = self.hidden_dim = 512
elif (model_size == 'big'):
hidden_dim = self.hidden_dim = 1024
self.mlp_layers = []
self.mlp_layers.append(Dense(input_dim=neigh_input_dim, output_dim=hidden_dim, act=tf.nn.relu, dropout=dropout, sparse_inputs=False, logging=self.logging))
with tf.variable_scope(((self.name + name) + '_vars')):
self.vars['neigh_weights'] = glorot([hidden_dim, output_dim], name='neigh_weights')
self.vars['self_weights'] = glorot([input_dim, output_dim], name='self_weights')
if self.bias:
self.vars['bias'] = zeros([self.output_dim], name='bias')
if self.logging:
self._log_vars()
self.input_dim = input_dim
self.output_dim = output_dim
self.neigh_input_dim = neigh_input_dim
def _call(self, inputs):
(self_vecs, neigh_vecs) = inputs
neigh_h = neigh_vecs
dims = tf.shape(neigh_h)
batch_size = dims[0]
num_neighbors = dims[1]
h_reshaped = tf.reshape(neigh_h, ((batch_size * num_neighbors), self.neigh_input_dim))
for l in self.mlp_layers:
h_reshaped = l(h_reshaped)
neigh_h = tf.reshape(h_reshaped, (batch_size, num_neighbors, self.hidden_dim))
neigh_h = tf.reduce_mean(neigh_h, axis=1)
from_neighs = tf.matmul(neigh_h, self.vars['neigh_weights'])
from_self = tf.matmul(self_vecs, self.vars['self_weights'])
if (not self.concat):
output = tf.add_n([from_self, from_neighs])
else:
output = tf.concat([from_self, from_neighs], axis=1)
if self.bias:
output += self.vars['bias']
return self.act(output)
|
class TwoMaxLayerPoolingAggregator(Layer):
' Aggregates via pooling over two MLP functions.\n '
def __init__(self, input_dim, output_dim, model_size='small', neigh_input_dim=None, dropout=0.0, bias=False, act=tf.nn.relu, name=None, concat=False, **kwargs):
super(TwoMaxLayerPoolingAggregator, self).__init__(**kwargs)
self.dropout = dropout
self.bias = bias
self.act = act
self.concat = concat
if (neigh_input_dim is None):
neigh_input_dim = input_dim
if (name is not None):
name = ('/' + name)
else:
name = ''
if (model_size == 'small'):
hidden_dim_1 = self.hidden_dim_1 = 512
hidden_dim_2 = self.hidden_dim_2 = 256
elif (model_size == 'big'):
hidden_dim_1 = self.hidden_dim_1 = 1024
hidden_dim_2 = self.hidden_dim_2 = 512
self.mlp_layers = []
self.mlp_layers.append(Dense(input_dim=neigh_input_dim, output_dim=hidden_dim_1, act=tf.nn.relu, dropout=dropout, sparse_inputs=False, logging=self.logging))
self.mlp_layers.append(Dense(input_dim=hidden_dim_1, output_dim=hidden_dim_2, act=tf.nn.relu, dropout=dropout, sparse_inputs=False, logging=self.logging))
with tf.variable_scope(((self.name + name) + '_vars')):
self.vars['neigh_weights'] = glorot([hidden_dim_2, output_dim], name='neigh_weights')
self.vars['self_weights'] = glorot([input_dim, output_dim], name='self_weights')
if self.bias:
self.vars['bias'] = zeros([self.output_dim], name='bias')
if self.logging:
self._log_vars()
self.input_dim = input_dim
self.output_dim = output_dim
self.neigh_input_dim = neigh_input_dim
def _call(self, inputs):
(self_vecs, neigh_vecs) = inputs
neigh_h = neigh_vecs
dims = tf.shape(neigh_h)
batch_size = dims[0]
num_neighbors = dims[1]
h_reshaped = tf.reshape(neigh_h, ((batch_size * num_neighbors), self.neigh_input_dim))
for l in self.mlp_layers:
h_reshaped = l(h_reshaped)
neigh_h = tf.reshape(h_reshaped, (batch_size, num_neighbors, self.hidden_dim_2))
neigh_h = tf.reduce_max(neigh_h, axis=1)
from_neighs = tf.matmul(neigh_h, self.vars['neigh_weights'])
from_self = tf.matmul(self_vecs, self.vars['self_weights'])
if (not self.concat):
output = tf.add_n([from_self, from_neighs])
else:
output = tf.concat([from_self, from_neighs], axis=1)
if self.bias:
output += self.vars['bias']
return self.act(output)
|
class SeqAggregator(Layer):
' Aggregates via a standard LSTM.\n '
def __init__(self, input_dim, output_dim, model_size='small', neigh_input_dim=None, dropout=0.0, bias=False, act=tf.nn.relu, name=None, concat=False, **kwargs):
super(SeqAggregator, self).__init__(**kwargs)
self.dropout = dropout
self.bias = bias
self.act = act
self.concat = concat
if (neigh_input_dim is None):
neigh_input_dim = input_dim
if (name is not None):
name = ('/' + name)
else:
name = ''
if (model_size == 'small'):
hidden_dim = self.hidden_dim = 128
elif (model_size == 'big'):
hidden_dim = self.hidden_dim = 256
with tf.variable_scope(((self.name + name) + '_vars')):
self.vars['neigh_weights'] = glorot([hidden_dim, output_dim], name='neigh_weights')
self.vars['self_weights'] = glorot([input_dim, output_dim], name='self_weights')
if self.bias:
self.vars['bias'] = zeros([self.output_dim], name='bias')
if self.logging:
self._log_vars()
self.input_dim = input_dim
self.output_dim = output_dim
self.neigh_input_dim = neigh_input_dim
self.cell = tf.contrib.rnn.BasicLSTMCell(self.hidden_dim)
def _call(self, inputs):
(self_vecs, neigh_vecs) = inputs
dims = tf.shape(neigh_vecs)
batch_size = dims[0]
initial_state = self.cell.zero_state(batch_size, tf.float32)
used = tf.sign(tf.reduce_max(tf.abs(neigh_vecs), axis=2))
length = tf.reduce_sum(used, axis=1)
length = tf.maximum(length, tf.constant(1.0))
length = tf.cast(length, tf.int32)
with tf.variable_scope(self.name) as scope:
try:
(rnn_outputs, rnn_states) = tf.nn.dynamic_rnn(self.cell, neigh_vecs, initial_state=initial_state, dtype=tf.float32, time_major=False, sequence_length=length)
except ValueError:
scope.reuse_variables()
(rnn_outputs, rnn_states) = tf.nn.dynamic_rnn(self.cell, neigh_vecs, initial_state=initial_state, dtype=tf.float32, time_major=False, sequence_length=length)
batch_size = tf.shape(rnn_outputs)[0]
max_len = tf.shape(rnn_outputs)[1]
out_size = int(rnn_outputs.get_shape()[2])
index = ((tf.range(0, batch_size) * max_len) + (length - 1))
flat = tf.reshape(rnn_outputs, [(- 1), out_size])
neigh_h = tf.gather(flat, index)
from_neighs = tf.matmul(neigh_h, self.vars['neigh_weights'])
from_self = tf.matmul(self_vecs, self.vars['self_weights'])
output = tf.add_n([from_self, from_neighs])
if (not self.concat):
output = tf.add_n([from_self, from_neighs])
else:
output = tf.concat([from_self, from_neighs], axis=1)
if self.bias:
output += self.vars['bias']
return self.act(output)
|
def uniform(shape, scale=0.05, name=None):
'Uniform init.'
initial = tf.random_uniform(shape, minval=(- scale), maxval=scale, dtype=tf.float32)
return tf.Variable(initial, name=name)
|
def glorot(shape, name=None):
'Glorot & Bengio (AISTATS 2010) init.'
init_range = np.sqrt((6.0 / (shape[0] + shape[1])))
initial = tf.random_uniform(shape, minval=(- init_range), maxval=init_range, dtype=tf.float32)
return tf.Variable(initial, name=name)
|
def zeros(shape, name=None):
'All zeros.'
initial = tf.zeros(shape, dtype=tf.float32)
return tf.Variable(initial, name=name)
|
def ones(shape, name=None):
'All ones.'
initial = tf.ones(shape, dtype=tf.float32)
return tf.Variable(initial, name=name)
|
def get_layer_uid(layer_name=''):
'Helper function, assigns unique layer IDs.'
if (layer_name not in _LAYER_UIDS):
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name]
|
class Layer(object):
'Base layer class. Defines basic API for all layer objects.\n Implementation inspired by keras (http://keras.io).\n # Properties\n name: String, defines the variable scope of the layer.\n logging: Boolean, switches Tensorflow histogram logging on/off\n\n # Methods\n _call(inputs): Defines computation graph of layer\n (i.e. takes input, returns output)\n __call__(inputs): Wrapper for _call()\n _log_vars(): Log all variables\n '
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging', 'model_size'}
for kwarg in kwargs.keys():
assert (kwarg in allowed_kwargs), ('Invalid keyword argument: ' + kwarg)
name = kwargs.get('name')
if (not name):
layer = self.__class__.__name__.lower()
name = ((layer + '_') + str(get_layer_uid(layer)))
self.name = name
self.vars = {}
logging = kwargs.get('logging', False)
self.logging = logging
self.sparse_inputs = False
def _call(self, inputs):
return inputs
def __call__(self, inputs):
with tf.name_scope(self.name):
if (self.logging and (not self.sparse_inputs)):
tf.summary.histogram((self.name + '/inputs'), inputs)
outputs = self._call(inputs)
if self.logging:
tf.summary.histogram((self.name + '/outputs'), outputs)
return outputs
def _log_vars(self):
for var in self.vars:
tf.summary.histogram(((self.name + '/vars/') + var), self.vars[var])
|
class Dense(Layer):
'Dense layer.'
def __init__(self, input_dim, output_dim, dropout=0.0, act=tf.nn.relu, placeholders=None, bias=True, featureless=False, sparse_inputs=False, **kwargs):
super(Dense, self).__init__(**kwargs)
self.dropout = dropout
self.act = act
self.featureless = featureless
self.bias = bias
self.input_dim = input_dim
self.output_dim = output_dim
self.sparse_inputs = sparse_inputs
if sparse_inputs:
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope((self.name + '_vars')):
self.vars['weights'] = tf.get_variable('weights', shape=(input_dim, output_dim), dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(), regularizer=tf.contrib.layers.l2_regularizer(FLAGS.weight_decay))
if self.bias:
self.vars['bias'] = zeros([output_dim], name='bias')
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
x = tf.nn.dropout(x, (1 - self.dropout))
output = tf.matmul(x, self.vars['weights'])
if self.bias:
output += self.vars['bias']
return self.act(output)
|
def masked_logit_cross_entropy(preds, labels, mask):
'Logit cross-entropy loss with masking.'
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=preds, labels=labels)
loss = tf.reduce_sum(loss, axis=1)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.maximum(tf.reduce_sum(mask), tf.constant([1.0]))
loss *= mask
return tf.reduce_mean(loss)
|
def masked_softmax_cross_entropy(preds, labels, mask):
'Softmax cross-entropy loss with masking.'
loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.maximum(tf.reduce_sum(mask), tf.constant([1.0]))
loss *= mask
return tf.reduce_mean(loss)
|
def masked_l2(preds, actuals, mask):
'L2 loss with masking.'
loss = tf.nn.l2(preds, actuals)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
loss *= mask
return tf.reduce_mean(loss)
|
def masked_accuracy(preds, labels, mask):
'Accuracy with masking.'
correct_prediction = tf.equal(tf.argmax(preds, 1), tf.argmax(labels, 1))
accuracy_all = tf.cast(correct_prediction, tf.float32)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
accuracy_all *= mask
return tf.reduce_mean(accuracy_all)
|
class UniformNeighborSampler(Layer):
'\n Uniformly samples neighbors.\n Assumes that adj lists are padded with random re-sampling\n '
def __init__(self, adj_info, **kwargs):
super(UniformNeighborSampler, self).__init__(**kwargs)
self.adj_info = adj_info
def _call(self, inputs):
(ids, num_samples) = inputs
adj_lists = tf.nn.embedding_lookup(self.adj_info, ids)
adj_lists = tf.transpose(tf.random_shuffle(tf.transpose(adj_lists)))
adj_lists = tf.slice(adj_lists, [0, 0], [(- 1), num_samples])
return adj_lists
|
class BipartiteEdgePredLayer(Layer):
def __init__(self, input_dim1, input_dim2, placeholders, dropout=False, act=tf.nn.sigmoid, loss_fn='xent', neg_sample_weights=1.0, bias=False, bilinear_weights=False, **kwargs):
'\n Basic class that applies skip-gram-like loss\n (i.e., dot product of node+target and node and negative samples)\n Args:\n bilinear_weights: use a bilinear weight for affinity calculation: u^T A v. If set to\n false, it is assumed that input dimensions are the same and the affinity will be \n based on dot product.\n '
super(BipartiteEdgePredLayer, self).__init__(**kwargs)
self.input_dim1 = input_dim1
self.input_dim2 = input_dim2
self.act = act
self.bias = bias
self.eps = 1e-07
self.margin = 0.1
self.neg_sample_weights = neg_sample_weights
self.bilinear_weights = bilinear_weights
if dropout:
self.dropout = placeholders['dropout']
else:
self.dropout = 0.0
self.output_dim = 1
with tf.variable_scope((self.name + '_vars')):
if bilinear_weights:
self.vars['weights'] = tf.get_variable('pred_weights', shape=(input_dim1, input_dim2), dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())
if self.bias:
self.vars['bias'] = zeros([self.output_dim], name='bias')
if (loss_fn == 'xent'):
self.loss_fn = self._xent_loss
elif (loss_fn == 'skipgram'):
self.loss_fn = self._skipgram_loss
elif (loss_fn == 'hinge'):
self.loss_fn = self._hinge_loss
if self.logging:
self._log_vars()
def affinity(self, inputs1, inputs2):
' Affinity score between batch of inputs1 and inputs2.\n Args:\n inputs1: tensor of shape [batch_size x feature_size].\n '
if self.bilinear_weights:
prod = tf.matmul(inputs2, tf.transpose(self.vars['weights']))
self.prod = prod
result = tf.reduce_sum((inputs1 * prod), axis=1)
else:
result = tf.reduce_sum((inputs1 * inputs2), axis=1)
return result
def neg_cost(self, inputs1, neg_samples, hard_neg_samples=None):
' For each input in batch, compute the sum of its affinity to negative samples.\n\n Returns:\n Tensor of shape [batch_size x num_neg_samples]. For each node, a list of affinities to\n negative samples is computed.\n '
if self.bilinear_weights:
inputs1 = tf.matmul(inputs1, self.vars['weights'])
neg_aff = tf.matmul(inputs1, tf.transpose(neg_samples))
return neg_aff
def loss(self, inputs1, inputs2, neg_samples):
' negative sampling loss.\n Args:\n neg_samples: tensor of shape [num_neg_samples x input_dim2]. Negative samples for all\n inputs in batch inputs1.\n '
return self.loss_fn(inputs1, inputs2, neg_samples)
def _xent_loss(self, inputs1, inputs2, neg_samples, hard_neg_samples=None):
aff = self.affinity(inputs1, inputs2)
neg_aff = self.neg_cost(inputs1, neg_samples, hard_neg_samples)
true_xent = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(aff), logits=aff)
negative_xent = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(neg_aff), logits=neg_aff)
loss = (tf.reduce_sum(true_xent) + (self.neg_sample_weights * tf.reduce_sum(negative_xent)))
return loss
def _skipgram_loss(self, inputs1, inputs2, neg_samples, hard_neg_samples=None):
aff = self.affinity(inputs1, inputs2)
neg_aff = self.neg_cost(inputs1, neg_samples, hard_neg_samples)
neg_cost = tf.log(tf.reduce_sum(tf.exp(neg_aff), axis=1))
loss = tf.reduce_sum((aff - neg_cost))
return loss
def _hinge_loss(self, inputs1, inputs2, neg_samples, hard_neg_samples=None):
aff = self.affinity(inputs1, inputs2)
neg_aff = self.neg_cost(inputs1, neg_samples, hard_neg_samples)
diff = tf.nn.relu(tf.subtract(neg_aff, (tf.expand_dims(aff, 1) - self.margin)), name='diff')
loss = tf.reduce_sum(diff)
self.neg_shape = tf.shape(neg_aff)
return loss
def weights_norm(self):
return tf.nn.l2_norm(self.vars['weights'])
|
class SupervisedGraphsage(models.SampleAndAggregate):
'Implementation of supervised GraphSAGE.'
def __init__(self, num_classes, placeholders, features, adj, degrees, layer_infos, concat=True, aggregator_type='mean', model_size='small', sigmoid_loss=False, identity_dim=0, **kwargs):
'\n Args:\n - placeholders: Stanford TensorFlow placeholder object.\n - features: Numpy array with node features.\n - adj: Numpy array with adjacency lists (padded with random re-samples)\n - degrees: Numpy array with node degrees. \n - layer_infos: List of SAGEInfo namedtuples that describe the parameters of all \n the recursive layers. See SAGEInfo definition above.\n - concat: whether to concatenate during recursive iterations\n - aggregator_type: how to aggregate neighbor information\n - model_size: one of "small" and "big"\n - sigmoid_loss: Set to true if nodes can belong to multiple classes\n '
models.GeneralizedModel.__init__(self, **kwargs)
if (aggregator_type == 'mean'):
self.aggregator_cls = MeanAggregator
elif (aggregator_type == 'seq'):
self.aggregator_cls = SeqAggregator
elif (aggregator_type == 'meanpool'):
self.aggregator_cls = MeanPoolingAggregator
elif (aggregator_type == 'maxpool'):
self.aggregator_cls = MaxPoolingAggregator
elif (aggregator_type == 'gcn'):
self.aggregator_cls = GCNAggregator
else:
raise Exception('Unknown aggregator: ', self.aggregator_cls)
self.inputs1 = placeholders['batch']
self.model_size = model_size
self.adj_info = adj
if (identity_dim > 0):
self.embeds = tf.get_variable('node_embeddings', [adj.get_shape().as_list()[0], identity_dim])
else:
self.embeds = None
if (features is None):
if (identity_dim == 0):
raise Exception('Must have a positive value for identity feature dimension if no input features given.')
self.features = self.embeds
else:
self.features = tf.Variable(tf.constant(features, dtype=tf.float32), trainable=False)
if (not (self.embeds is None)):
self.features = tf.concat([self.embeds, self.features], axis=1)
self.degrees = degrees
self.concat = concat
self.num_classes = num_classes
self.sigmoid_loss = sigmoid_loss
self.dims = [((0 if (features is None) else features.shape[1]) + identity_dim)]
self.dims.extend([layer_infos[i].output_dim for i in range(len(layer_infos))])
self.batch_size = placeholders['batch_size']
self.placeholders = placeholders
self.layer_infos = layer_infos
self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
self.build()
def build(self):
(samples1, support_sizes1) = self.sample(self.inputs1, self.layer_infos)
num_samples = [layer_info.num_samples for layer_info in self.layer_infos]
(self.outputs1, self.aggregators) = self.aggregate(samples1, [self.features], self.dims, num_samples, support_sizes1, concat=self.concat, model_size=self.model_size)
dim_mult = (2 if self.concat else 1)
self.outputs1 = tf.nn.l2_normalize(self.outputs1, 1)
dim_mult = (2 if self.concat else 1)
self.node_pred = layers.Dense((dim_mult * self.dims[(- 1)]), self.num_classes, dropout=self.placeholders['dropout'], act=(lambda x: x))
self.node_preds = self.node_pred(self.outputs1)
self._loss()
grads_and_vars = self.optimizer.compute_gradients(self.loss)
clipped_grads_and_vars = [((tf.clip_by_value(grad, (- 5.0), 5.0) if (grad is not None) else None), var) for (grad, var) in grads_and_vars]
(self.grad, _) = clipped_grads_and_vars[0]
self.opt_op = self.optimizer.apply_gradients(clipped_grads_and_vars)
self.preds = self.predict()
def _loss(self):
for aggregator in self.aggregators:
for var in aggregator.vars.values():
self.loss += (FLAGS.weight_decay * tf.nn.l2_loss(var))
for var in self.node_pred.vars.values():
self.loss += (FLAGS.weight_decay * tf.nn.l2_loss(var))
if self.sigmoid_loss:
self.loss += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.node_preds, labels=self.placeholders['labels']))
else:
self.loss += tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.node_preds, labels=self.placeholders['labels']))
tf.summary.scalar('loss', self.loss)
def predict(self):
if self.sigmoid_loss:
return tf.nn.sigmoid(self.node_preds)
else:
return tf.nn.softmax(self.node_preds)
|
def calc_f1(y_true, y_pred):
if (not FLAGS.sigmoid):
y_true = np.argmax(y_true, axis=1)
y_pred = np.argmax(y_pred, axis=1)
else:
y_pred[(y_pred > 0.5)] = 1
y_pred[(y_pred <= 0.5)] = 0
return (metrics.f1_score(y_true, y_pred, average='micro'), metrics.f1_score(y_true, y_pred, average='macro'))
|
def evaluate(sess, model, minibatch_iter, size=None):
t_test = time.time()
(feed_dict_val, labels) = minibatch_iter.node_val_feed_dict(size)
node_outs_val = sess.run([model.preds, model.loss], feed_dict=feed_dict_val)
(mic, mac) = calc_f1(labels, node_outs_val[0])
return (node_outs_val[1], mic, mac, (time.time() - t_test))
|
def log_dir():
log_dir = ((FLAGS.base_log_dir + '/sup-') + FLAGS.train_prefix.split('/')[(- 2)])
log_dir += '/{model:s}_{model_size:s}_{lr:0.4f}/'.format(model=FLAGS.model, model_size=FLAGS.model_size, lr=FLAGS.learning_rate)
if (not os.path.exists(log_dir)):
os.makedirs(log_dir)
return log_dir
|
def incremental_evaluate(sess, model, minibatch_iter, size, test=False):
t_test = time.time()
finished = False
val_losses = []
val_preds = []
labels = []
iter_num = 0
finished = False
while (not finished):
(feed_dict_val, batch_labels, finished, _) = minibatch_iter.incremental_node_val_feed_dict(size, iter_num, test=test)
node_outs_val = sess.run([model.preds, model.loss], feed_dict=feed_dict_val)
val_preds.append(node_outs_val[0])
labels.append(batch_labels)
val_losses.append(node_outs_val[1])
iter_num += 1
val_preds = np.vstack(val_preds)
labels = np.vstack(labels)
f1_scores = calc_f1(labels, val_preds)
return (np.mean(val_losses), f1_scores[0], f1_scores[1], (time.time() - t_test))
|
def construct_placeholders(num_classes):
placeholders = {'labels': tf.placeholder(tf.float32, shape=(None, num_classes), name='labels'), 'batch': tf.placeholder(tf.int32, shape=None, name='batch1'), 'dropout': tf.placeholder_with_default(0.0, shape=(), name='dropout'), 'batch_size': tf.placeholder(tf.int32, name='batch_size')}
return placeholders
|
def train(train_data, test_data=None):
G = train_data[0]
features = train_data[1]
id_map = train_data[2]
class_map = train_data[4]
if isinstance(list(class_map.values())[0], list):
num_classes = len(list(class_map.values())[0])
else:
num_classes = len(set(class_map.values()))
if (not (features is None)):
features = np.vstack([features, np.zeros((features.shape[1],))])
context_pairs = (train_data[3] if FLAGS.random_context else None)
placeholders = construct_placeholders(num_classes)
minibatch = NodeMinibatchIterator(G, id_map, placeholders, class_map, num_classes, batch_size=FLAGS.batch_size, max_degree=FLAGS.max_degree, context_pairs=context_pairs)
adj_info_ph = tf.placeholder(tf.int32, shape=minibatch.adj.shape)
adj_info = tf.Variable(adj_info_ph, trainable=False, name='adj_info')
if (FLAGS.model == 'graphsage_mean'):
sampler = UniformNeighborSampler(adj_info)
if (FLAGS.samples_3 != 0):
layer_infos = [SAGEInfo('node', sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo('node', sampler, FLAGS.samples_2, FLAGS.dim_2), SAGEInfo('node', sampler, FLAGS.samples_3, FLAGS.dim_2)]
elif (FLAGS.samples_2 != 0):
layer_infos = [SAGEInfo('node', sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo('node', sampler, FLAGS.samples_2, FLAGS.dim_2)]
else:
layer_infos = [SAGEInfo('node', sampler, FLAGS.samples_1, FLAGS.dim_1)]
model = SupervisedGraphsage(num_classes, placeholders, features, adj_info, minibatch.deg, layer_infos, model_size=FLAGS.model_size, sigmoid_loss=FLAGS.sigmoid, identity_dim=FLAGS.identity_dim, logging=True)
elif (FLAGS.model == 'gcn'):
sampler = UniformNeighborSampler(adj_info)
layer_infos = [SAGEInfo('node', sampler, FLAGS.samples_1, (2 * FLAGS.dim_1)), SAGEInfo('node', sampler, FLAGS.samples_2, (2 * FLAGS.dim_2))]
model = SupervisedGraphsage(num_classes, placeholders, features, adj_info, minibatch.deg, layer_infos=layer_infos, aggregator_type='gcn', model_size=FLAGS.model_size, concat=False, sigmoid_loss=FLAGS.sigmoid, identity_dim=FLAGS.identity_dim, logging=True)
elif (FLAGS.model == 'graphsage_seq'):
sampler = UniformNeighborSampler(adj_info)
layer_infos = [SAGEInfo('node', sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo('node', sampler, FLAGS.samples_2, FLAGS.dim_2)]
model = SupervisedGraphsage(num_classes, placeholders, features, adj_info, minibatch.deg, layer_infos=layer_infos, aggregator_type='seq', model_size=FLAGS.model_size, sigmoid_loss=FLAGS.sigmoid, identity_dim=FLAGS.identity_dim, logging=True)
elif (FLAGS.model == 'graphsage_maxpool'):
sampler = UniformNeighborSampler(adj_info)
layer_infos = [SAGEInfo('node', sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo('node', sampler, FLAGS.samples_2, FLAGS.dim_2)]
model = SupervisedGraphsage(num_classes, placeholders, features, adj_info, minibatch.deg, layer_infos=layer_infos, aggregator_type='maxpool', model_size=FLAGS.model_size, sigmoid_loss=FLAGS.sigmoid, identity_dim=FLAGS.identity_dim, logging=True)
elif (FLAGS.model == 'graphsage_meanpool'):
sampler = UniformNeighborSampler(adj_info)
layer_infos = [SAGEInfo('node', sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo('node', sampler, FLAGS.samples_2, FLAGS.dim_2)]
model = SupervisedGraphsage(num_classes, placeholders, features, adj_info, minibatch.deg, layer_infos=layer_infos, aggregator_type='meanpool', model_size=FLAGS.model_size, sigmoid_loss=FLAGS.sigmoid, identity_dim=FLAGS.identity_dim, logging=True)
else:
raise Exception('Error: model name unrecognized.')
config = tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
sess = tf.Session(config=config)
merged = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(log_dir(), sess.graph)
sess.run(tf.global_variables_initializer(), feed_dict={adj_info_ph: minibatch.adj})
total_steps = 0
avg_time = 0.0
epoch_val_costs = []
train_adj_info = tf.assign(adj_info, minibatch.adj)
val_adj_info = tf.assign(adj_info, minibatch.test_adj)
for epoch in range(FLAGS.epochs):
minibatch.shuffle()
iter = 0
print(('Epoch: %04d' % (epoch + 1)))
epoch_val_costs.append(0)
while (not minibatch.end()):
(feed_dict, labels) = minibatch.next_minibatch_feed_dict()
feed_dict.update({placeholders['dropout']: FLAGS.dropout})
t = time.time()
outs = sess.run([merged, model.opt_op, model.loss, model.preds], feed_dict=feed_dict)
train_cost = outs[2]
if ((iter % FLAGS.validate_iter) == 0):
sess.run(val_adj_info.op)
if (FLAGS.validate_batch_size == (- 1)):
(val_cost, val_f1_mic, val_f1_mac, duration) = incremental_evaluate(sess, model, minibatch, FLAGS.batch_size)
else:
(val_cost, val_f1_mic, val_f1_mac, duration) = evaluate(sess, model, minibatch, FLAGS.validate_batch_size)
sess.run(train_adj_info.op)
epoch_val_costs[(- 1)] += val_cost
if ((total_steps % FLAGS.print_every) == 0):
summary_writer.add_summary(outs[0], total_steps)
avg_time = ((((avg_time * total_steps) + time.time()) - t) / (total_steps + 1))
if ((total_steps % FLAGS.print_every) == 0):
(train_f1_mic, train_f1_mac) = calc_f1(labels, outs[(- 1)])
print('Iter:', ('%04d' % iter), 'train_loss=', '{:.5f}'.format(train_cost), 'train_f1_mic=', '{:.5f}'.format(train_f1_mic), 'train_f1_mac=', '{:.5f}'.format(train_f1_mac), 'val_loss=', '{:.5f}'.format(val_cost), 'val_f1_mic=', '{:.5f}'.format(val_f1_mic), 'val_f1_mac=', '{:.5f}'.format(val_f1_mac), 'time=', '{:.5f}'.format(avg_time))
iter += 1
total_steps += 1
if (total_steps > FLAGS.max_total_steps):
break
if (total_steps > FLAGS.max_total_steps):
break
print('Optimization Finished!')
sess.run(val_adj_info.op)
(val_cost, val_f1_mic, val_f1_mac, duration) = incremental_evaluate(sess, model, minibatch, FLAGS.batch_size)
print('Full validation stats:', 'loss=', '{:.5f}'.format(val_cost), 'f1_micro=', '{:.5f}'.format(val_f1_mic), 'f1_macro=', '{:.5f}'.format(val_f1_mac), 'time=', '{:.5f}'.format(duration))
with open((log_dir() + 'val_stats.txt'), 'w') as fp:
fp.write('loss={:.5f} f1_micro={:.5f} f1_macro={:.5f} time={:.5f}'.format(val_cost, val_f1_mic, val_f1_mac, duration))
print("Writing test set stats to file (don't peak!)")
(val_cost, val_f1_mic, val_f1_mac, duration) = incremental_evaluate(sess, model, minibatch, FLAGS.batch_size, test=True)
with open((log_dir() + 'test_stats.txt'), 'w') as fp:
fp.write('loss={:.5f} f1_micro={:.5f} f1_macro={:.5f}'.format(val_cost, val_f1_mic, val_f1_mac))
|
def main(argv=None):
print('Loading training data..')
train_data = load_data(FLAGS.train_prefix)
print('Done loading training data..')
train(train_data)
|
def calc_f1(y_true, y_pred):
y_true = np.argmax(y_true, axis=1)
y_pred = np.argmax(y_pred, axis=1)
return (metrics.f1_score(y_true, y_pred, average='micro'), metrics.f1_score(y_true, y_pred, average='macro'))
|
def cal_acc(y_true, y_pred):
y_true = np.argmax(y_true, axis=1)
y_pred = np.argmax(y_pred, axis=1)
return metrics.accuracy_score(y_true, y_pred)
|
class Model(object):
def __init__(self, data_config, pretrain_data, args):
self.model_type = 'hacud'
self.adj_type = args.adj_type
self.early_stop = args.early_stop
self.pretrain_data = pretrain_data
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
self.n_nodes = data_config['n_nodes']
self.n_metapath = data_config['n_metapath']
self.n_class = data_config['n_class']
self.n_fold = args.n_fold
self.n_fc = args.n_fc
self.fc = eval(args.fc)
self.reg = args.reg
self.norm_adj = data_config['norm_adj']
self.features = data_config['features']
self.f_dim = self.features.shape[1]
self.lr = args.lr
self.emb_dim = args.embed_size
self.batch_size = args.batch_size
self.verbose = args.verbose
'\n Create Placeholder for Input Data & Dropout.\n '
self.nodes = tf.placeholder(tf.int32, shape=(None,))
'\n Create Model Parameters (i.e., Initialize Weights).\n '
self.weights = self._init_weights()
'\n Compute Graph-based Representations of all nodes\n '
self.n_embeddings = self._create_embedding()
'\n Establish the representations of nodes in a batch.\n '
self.batch_embeddings = tf.nn.embedding_lookup(self.n_embeddings, self.nodes)
self.label = tf.placeholder(tf.float32, shape=(None, self.n_class))
self.pred_label = self.pred(self.batch_embeddings)
self.loss = self.create_loss(self.pred_label, self.label)
self.opt = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.loss)
def _init_weights(self):
all_weights = dict()
initializer = tf.contrib.layers.xavier_initializer()
print('using xavier initialization')
self.fc = ([self.emb_dim] + self.fc)
all_weights['W'] = tf.Variable(initializer([self.f_dim, self.emb_dim]), name='W')
all_weights['b'] = tf.Variable(initializer([1, self.emb_dim]), name='b')
tf.add_to_collection(tf.GraphKeys.WEIGHTS, all_weights['W'])
tf.add_to_collection(tf.GraphKeys.WEIGHTS, all_weights['b'])
for n in range(self.n_fc):
all_weights[('W_%d' % n)] = tf.Variable(initializer([self.fc[n], self.fc[(n + 1)]]), name=('W_%d' % n))
all_weights[('b_%d' % n)] = tf.Variable(initializer([1, self.fc[(n + 1)]]), name=('b_%d' % n))
tf.add_to_collection(tf.GraphKeys.WEIGHTS, all_weights[('W_%d' % n)])
tf.add_to_collection(tf.GraphKeys.WEIGHTS, all_weights[('b_%d' % n)])
for n in range(self.n_metapath):
all_weights[('W_rho_%d' % n)] = tf.Variable(initializer([self.f_dim, self.emb_dim]), name=('W_rho_%d' % n))
all_weights[('b_rho_%d' % n)] = tf.Variable(initializer([1, self.emb_dim]), name=('b_rho_%d' % n))
all_weights[('W_f_%d' % n)] = tf.Variable(initializer([(2 * self.emb_dim), self.emb_dim]), name=('W_f_%d' % n))
all_weights[('b_f_%d' % n)] = tf.Variable(initializer([1, self.emb_dim]), name=('b_f_%d' % n))
all_weights[('z_%d' % n)] = tf.Variable(initializer([1, (self.emb_dim * self.n_metapath)]), name=('z_%d' % n))
tf.add_to_collection(tf.GraphKeys.WEIGHTS, all_weights[('W_rho_%d' % n)])
tf.add_to_collection(tf.GraphKeys.WEIGHTS, all_weights[('b_rho_%d' % n)])
tf.add_to_collection(tf.GraphKeys.WEIGHTS, all_weights[('W_f_%d' % n)])
tf.add_to_collection(tf.GraphKeys.WEIGHTS, all_weights[('b_f_%d' % n)])
tf.add_to_collection(tf.GraphKeys.WEIGHTS, all_weights[('z_%d' % n)])
all_weights['W_f1'] = tf.Variable(initializer([(2 * self.emb_dim), self.emb_dim]), name='W_f1')
all_weights['b_f1'] = tf.Variable(initializer([1, self.emb_dim]), name='b_f1')
tf.add_to_collection(tf.GraphKeys.WEIGHTS, all_weights['W_f1'])
tf.add_to_collection(tf.GraphKeys.WEIGHTS, all_weights['b_f1'])
all_weights['W_f2'] = tf.Variable(initializer([self.emb_dim, self.emb_dim]), name='W_f2')
all_weights['b_f2'] = tf.Variable(initializer([1, self.emb_dim]), name='b_f2')
tf.add_to_collection(tf.GraphKeys.WEIGHTS, all_weights['W_f2'])
tf.add_to_collection(tf.GraphKeys.WEIGHTS, all_weights['b_f2'])
return all_weights
def _split_A_hat(self, X):
A_fold_hat = []
fold_len = (self.n_nodes // self.n_fold)
for i_fold in range(self.n_fold):
start = (i_fold * fold_len)
if (i_fold == (self.n_fold - 1)):
end = self.n_nodes
else:
end = ((i_fold + 1) * fold_len)
A_fold_hat.append(self._convert_sp_mat_to_sp_tensor(X[start:end]))
return A_fold_hat
def _create_embedding(self):
A_fold_hat = {}
for n in range(self.n_metapath):
A_fold_hat[('%d' % n)] = self._split_A_hat(self.norm_adj[n])
embeddings = self.features
embeddings = embeddings.astype(np.float32)
h = (tf.matmul(embeddings, self.weights['W']) + self.weights['b'])
embed_u = {}
h_u = {}
f_u = {}
v_u = {}
alp_u = {}
alp_hat = {}
f_tilde = {}
for n in range(self.n_metapath):
' Graph Convolution '
embed_u[('%d' % n)] = []
for f in range(self.n_fold):
embed_u[('%d' % n)].append(tf.sparse_tensor_dense_matmul(A_fold_hat[('%d' % n)][f], embeddings))
embed_u[('%d' % n)] = tf.concat(embed_u[('%d' % n)], 0)
' Feature Fusion '
h_u[('%d' % n)] = (tf.matmul(embed_u[('%d' % n)], self.weights[('W_rho_%d' % n)]) + self.weights[('b_rho_%d' % n)])
f_u[('%d' % n)] = tf.nn.relu((tf.matmul(tf.concat([h, h_u[('%d' % n)]], 1), self.weights[('W_f_%d' % n)]) + self.weights[('b_f_%d' % n)]))
' Feature Attention '
v_u[('%d' % n)] = tf.nn.relu((tf.matmul(tf.concat([h, f_u[('%d' % n)]], 1), self.weights['W_f1']) + self.weights['b_f1']))
alp_u[('%d' % n)] = tf.nn.relu((tf.matmul(v_u[('%d' % n)], self.weights['W_f2']) + self.weights['b_f2']))
alp_hat[('%d' % n)] = tf.nn.softmax(alp_u[('%d' % n)], axis=1)
f_tilde[('%d' % n)] = tf.multiply(alp_hat[('%d' % n)], f_u[('%d' % n)])
' Path Attention '
f_c = []
for n in range(self.n_metapath):
f_c.append(f_tilde[('%d' % n)])
f_c = tf.concat(f_c, 1)
for n in range(self.n_metapath):
if (n == 0):
beta = tf.matmul(f_c, tf.transpose(self.weights[('z_%d' % n)]))
f = f_tilde[('%d' % n)]
f = tf.expand_dims(f, (- 1))
else:
beta = tf.concat([beta, tf.matmul(f_c, tf.transpose(self.weights[('z_%d' % n)]))], axis=1)
f = tf.concat([f, tf.expand_dims(f_tilde[('%d' % n)], (- 1))], axis=2)
beta_u = tf.nn.softmax(beta, axis=1)
beta_u = tf.transpose(tf.expand_dims(beta_u, 0), (1, 0, 2))
e_u = tf.multiply(beta_u, f)
e_u = tf.reduce_sum(e_u, axis=2)
return e_u
def pred(self, x):
for n in range(self.n_fc):
if (n == (self.n_fc - 1)):
x = (tf.matmul(x, self.weights[('W_%d' % n)]) + self.weights[('b_%d' % n)])
else:
x = tf.nn.relu((tf.matmul(x, self.weights[('W_%d' % n)]) + self.weights[('b_%d' % n)]))
return x
def create_ce_loss(self, x, y):
ce_loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(logits=x, labels=y), 0)
return ce_loss
def create_reg_loss(self):
reg_loss = tf.add_n([tf.nn.l2_loss(tf.cast(v, tf.float32)) for v in tf.trainable_variables()])
return reg_loss
def create_loss(self, x, y):
self.ce_loss = self.create_ce_loss(x, y)
self.reg_loss = self.create_reg_loss()
loss = (self.ce_loss + (self.reg * self.reg_loss))
return loss
def _convert_sp_mat_to_sp_tensor(self, X):
coo = X.tocoo().astype(np.float32)
indices = np.mat([coo.row, coo.col]).transpose()
return tf.SparseTensor(indices, coo.data, coo.shape)
def train(self, sess, nodes, labels):
(_, batch_loss, batch_ce_loss, reg_loss) = sess.run([self.opt, self.loss, self.ce_loss, self.reg_loss], feed_dict={self.nodes: nodes, self.label: labels})
return (batch_loss, batch_ce_loss, reg_loss)
def eval(self, sess, nodes, labels):
(loss, ce_loss, reg_loss, pred_label) = sess.run([self.loss, self.ce_loss, self.reg_loss, self.pred_label], feed_dict={self.nodes: nodes, self.label: labels})
return (loss, ce_loss, reg_loss, pred_label)
|
def parse_args():
parser = argparse.ArgumentParser(description='Run HACUD.')
parser.add_argument('--weights_path', nargs='?', default='', help='Store model path.')
parser.add_argument('--data_path', nargs='?', default='../Data/', help='Input data path.')
parser.add_argument('--proj_path', nargs='?', default='', help='Project path.')
parser.add_argument('--dataset', nargs='?', default='dblp', help='Choose a dataset from {dblp, yelp}')
parser.add_argument('--pretrain', type=int, default=0, help='0: No pretrain, -1: Pretrain with the learned embeddings, 1:Pretrain with stored models.')
parser.add_argument('--verbose', type=int, default=1, help='Interval of evaluation.')
parser.add_argument('--epoch', type=int, default=500, help='Number of epoch.')
parser.add_argument('--embed_size', type=int, default=64, help='Embedding size.')
parser.add_argument('--batch_size', type=int, default=64, help='Batch size.')
parser.add_argument('--n_fold', type=int, default=20, help='number of fold.')
parser.add_argument('--n_fc', type=int, default=4, help='number of fully-connected layers.')
parser.add_argument('--fc', nargs='?', default='[32,16,8,4]', help='Output sizes of every layer')
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate.')
parser.add_argument('--reg', type=float, default=0.001, help='Regularization ratio.')
parser.add_argument('--model_type', nargs='?', default='ngcf', help='Specify the name of model (ngcf).')
parser.add_argument('--adj_type', nargs='?', default='norm', help='Specify the type of the adjacency (laplacian) matrix from {plain, norm, mean}.')
parser.add_argument('--alg_type', nargs='?', default='ngcf', help='Specify the type of the graph convolutional layer from {ngcf, gcn, gcmc}.')
parser.add_argument('--save_flag', type=int, default=0, help='0: Disable model saver, 1: Activate model saver')
parser.add_argument('--test_flag', nargs='?', default='part', help='Specify the test type from {part, full}, indicating whether the reference is done in mini-batch')
parser.add_argument('--gpu', nargs='?', default='0')
parser.add_argument('--early_stop', type=int, default=10)
parser.add_argument('--report', type=int, default=0, help='0: Disable performance report w.r.t. sparsity levels, 1: Show performance report w.r.t. sparsity levels')
return parser.parse_args()
|
class Player2Vec(Algorithm):
def __init__(self, session, meta, nodes, class_size, gcn_output1, embedding, encoding):
self.meta = meta
self.nodes = nodes
self.class_size = class_size
self.gcn_output1 = gcn_output1
self.embedding = embedding
self.encoding = encoding
self.placeholders = {'a': tf.placeholder(tf.float32, [self.meta, self.nodes, self.nodes], 'adj'), 'x': tf.placeholder(tf.float32, [self.nodes, self.embedding], 'nxf'), 'batch_index': tf.placeholder(tf.int32, [None], 'index'), 't': tf.placeholder(tf.float32, [None, self.class_size], 'labels'), 'lr': tf.placeholder(tf.float32, [], 'learning_rate'), 'mom': tf.placeholder(tf.float32, [], 'momentum'), 'num_features_nonzero': tf.placeholder(tf.int32)}
(loss, probabilities) = self.forward_propagation()
(self.loss, self.probabilities) = (loss, probabilities)
self.l2 = tf.contrib.layers.apply_regularization(tf.contrib.layers.l2_regularizer(0.01), tf.trainable_variables())
self.pred = tf.one_hot(tf.argmax(self.probabilities, 1), class_size)
print(self.pred.shape)
self.correct_prediction = tf.equal(tf.argmax(self.probabilities, 1), tf.argmax(self.placeholders['t'], 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, 'float'))
print('Forward propagation finished.')
self.sess = session
self.optimizer = tf.train.AdamOptimizer(self.placeholders['lr'])
gradients = self.optimizer.compute_gradients((self.loss + self.l2))
capped_gradients = [(tf.clip_by_value(grad, (- 5.0), 5.0), var) for (grad, var) in gradients if (grad is not None)]
self.train_op = self.optimizer.apply_gradients(capped_gradients)
self.init = tf.global_variables_initializer()
print('Backward propagation finished.')
def forward_propagation(self):
with tf.variable_scope('gcn'):
gcn_emb = []
for i in range(self.meta):
gcn_out = tf.reshape(GCN(self.placeholders, self.gcn_output1, self.embedding, self.encoding, index=i).embedding(), [1, (self.nodes * self.encoding)])
gcn_emb.append(gcn_out)
gcn_emb = tf.concat(gcn_emb, 0)
assert (gcn_emb.shape == [self.meta, (self.nodes * self.encoding)])
print('GCN embedding over!')
with tf.variable_scope('attention'):
gat_out = AttentionLayer.attention(inputs=gcn_emb, attention_size=1, v_type='tanh')
gat_out = tf.reshape(gat_out, [self.nodes, self.encoding])
print('Embedding with attention over!')
with tf.variable_scope('classification'):
batch_data = tf.matmul(tf.one_hot(self.placeholders['batch_index'], self.nodes), gat_out)
W = tf.get_variable(name='weights', shape=[self.encoding, self.class_size], initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable(name='bias', shape=[1, self.class_size], initializer=tf.zeros_initializer())
tf.transpose(batch_data, perm=[0, 1])
logits = (tf.matmul(batch_data, W) + b)
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=self.placeholders['t'], logits=logits)
return (loss, tf.nn.sigmoid(logits))
def train(self, x, a, t, b, learning_rate=0.01, momentum=0.9):
feed_dict = utils.construct_feed_dict(x, a, t, b, learning_rate, momentum, self.placeholders)
outs = self.sess.run([self.train_op, self.loss, self.accuracy, self.pred, self.probabilities], feed_dict=feed_dict)
loss = outs[1]
acc = outs[2]
pred = outs[3]
prob = outs[4]
return (loss, acc, pred, prob)
def test(self, x, a, t, b, learning_rate=0.01, momentum=0.9):
feed_dict = utils.construct_feed_dict(x, a, t, b, learning_rate, momentum, self.placeholders)
(acc, pred, probabilities, tags) = self.sess.run([self.accuracy, self.pred, self.probabilities, self.correct_prediction], feed_dict=feed_dict)
return (acc, pred, probabilities, tags)
|
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=123, help='Random seed.')
parser.add_argument('--dataset_str', type=str, default='dblp', help="['dblp','example']")
parser.add_argument('--epoch_num', type=int, default=30, help='Number of epochs to train.')
parser.add_argument('--batch_size', type=int, default=1000)
parser.add_argument('--momentum', type=int, default=0.9)
parser.add_argument('--learning_rate', default=0.001, help='the ratio of training set in whole dataset.')
parser.add_argument('--hidden1', default=16, help='Number of units in GCN hidden layer 1.')
parser.add_argument('--hidden2', default=16, help='Number of units in GCN hidden layer 2.')
parser.add_argument('--gcn_output', default=4, help='gcn output size.')
args = parser.parse_args()
return args
|
def set_env(args):
tf.reset_default_graph()
np.random.seed(args.seed)
tf.set_random_seed(args.seed)
|
def get_data(ix, int_batch, train_size):
if ((ix + int_batch) >= train_size):
ix = (train_size - int_batch)
end = train_size
else:
end = (ix + int_batch)
return (train_data[ix:end], train_label[ix:end])
|
def load_data(args):
if (args.dataset_str == 'dblp'):
(adj_list, features, train_data, train_label, test_data, test_label) = load_data_dblp()
node_size = features.shape[0]
node_embedding = features.shape[1]
class_size = train_label.shape[1]
train_size = len(train_data)
paras = [node_size, node_embedding, class_size, train_size]
return (adj_list, features, train_data, train_label, test_data, test_label, paras)
|
def train(args, adj_list, features, train_data, train_label, test_data, test_label, paras):
with tf.Session() as sess:
adj_data = [normalize_adj(adj) for adj in adj_list]
meta_size = len(adj_list)
net = Player2Vec(session=sess, class_size=paras[2], gcn_output1=args.hidden1, meta=meta_size, nodes=paras[0], embedding=paras[1], encoding=args.gcn_output)
sess.run(tf.global_variables_initializer())
t_start = time.clock()
for epoch in range(args.epoch_num):
train_loss = 0
train_acc = 0
count = 0
for index in range(0, paras[3], args.batch_size):
(batch_data, batch_label) = get_data(index, args.batch_size, paras[3])
(loss, acc, pred, prob) = net.train(features, adj_data, batch_label, batch_data, args.learning_rate, args.momentum)
print('batch loss: {:.4f}, batch acc: {:.4f}'.format(loss, acc))
train_loss += loss
train_acc += acc
count += 1
train_loss = (train_loss / count)
train_acc = (train_acc / count)
print('epoch{:d} : train_loss: {:.4f}, train_acc: {:.4f}'.format(epoch, train_loss, train_acc))
t_end = time.clock()
print('train time=', '{:.5f}'.format((t_end - t_start)))
print('Train end!')
(test_acc, test_pred, test_probabilities, test_tags) = net.test(features, adj_data, test_label, test_data)
print('test acc:', test_acc)
|
class SemiGNN(Algorithm):
def __init__(self, session, nodes, class_size, semi_encoding1, semi_encoding2, semi_encoding3, init_emb_size, meta, ul, alpha, lamtha):
self.nodes = nodes
self.meta = meta
self.class_size = class_size
self.semi_encoding1 = semi_encoding1
self.semi_encoding2 = semi_encoding2
self.semi_encoding3 = semi_encoding3
self.init_emb_size = init_emb_size
self.ul = ul
self.alpha = alpha
self.lamtha = lamtha
self.placeholders = {'a': tf.placeholder(tf.float32, [self.meta, self.nodes, self.nodes], 'adj'), 'u_i': tf.placeholder(tf.float32, [None], 'u_i'), 'u_j': tf.placeholder(tf.float32, [None], 'u_j'), 'batch_index': tf.placeholder(tf.int32, [None], 'index'), 'sup_label': tf.placeholder(tf.float32, [None, self.class_size], 'sup_label'), 'graph_label': tf.placeholder(tf.float32, [None, 1], 'graph_label'), 'lr': tf.placeholder(tf.float32, [], 'learning_rate'), 'mom': tf.placeholder(tf.float32, [], 'momentum'), 'num_features_nonzero': tf.placeholder(tf.int32)}
(loss, probabilities, pred) = self.forward_propagation()
(self.loss, self.probabilities, self.pred) = (loss, probabilities, pred)
self.l2 = tf.contrib.layers.apply_regularization(tf.contrib.layers.l2_regularizer(0.01), tf.trainable_variables())
print(self.pred.shape)
self.correct_prediction = tf.equal(tf.argmax(self.probabilities, 1), tf.argmax(self.placeholders['sup_label'], 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, 'float'))
print('Forward propagation finished.')
self.sess = session
self.optimizer = tf.train.AdamOptimizer(self.placeholders['lr'])
gradients = self.optimizer.compute_gradients((self.loss + (self.lamtha * self.l2)))
capped_gradients = [(tf.clip_by_value(grad, (- 5.0), 5.0), var) for (grad, var) in gradients if (grad is not None)]
self.train_op = self.optimizer.apply_gradients(capped_gradients)
self.init = tf.global_variables_initializer()
print('Backward propagation finished.')
def forward_propagation(self):
with tf.variable_scope('node_level_attention', reuse=tf.AUTO_REUSE):
h1 = []
for i in range(self.meta):
emb = tf.get_variable(name='init_embedding', shape=[self.nodes, self.init_emb_size], initializer=tf.contrib.layers.xavier_initializer())
h = AttentionLayer.node_attention(inputs=emb, adj=self.placeholders['a'][i])
h = tf.reshape(h, [self.nodes, emb.shape[1]])
h1.append(h)
h1 = tf.concat(h1, 0)
h1 = tf.reshape(h1, [self.meta, self.nodes, self.init_emb_size])
print('Node_level attention over!')
with tf.variable_scope('view_level_attention'):
h2 = AttentionLayer.view_attention(inputs=h1, layer_size=2, meta=self.meta, encoding1=self.semi_encoding1, encoding2=self.semi_encoding2)
h2 = tf.reshape(h2, [self.nodes, (self.semi_encoding2 * self.meta)])
print('View_level attention over!')
with tf.variable_scope('MLP'):
a_u = tf.layers.dense(inputs=h2, units=self.semi_encoding3, activation=None)
with tf.variable_scope('loss'):
labeled_a_u = tf.matmul(tf.one_hot(self.placeholders['batch_index'], self.nodes), a_u)
theta = tf.get_variable(name='theta', shape=[self.semi_encoding3, self.class_size], initializer=tf.contrib.layers.xavier_initializer())
logits = tf.matmul(labeled_a_u, theta)
prob = tf.nn.sigmoid(logits)
pred = tf.one_hot(tf.argmax(prob, 1), self.class_size)
loss1 = ((- (1 / self.ul)) * tf.reduce_sum((self.placeholders['sup_label'] * tf.log(tf.nn.softmax(logits)))))
u_i_embedding = tf.nn.embedding_lookup(a_u, tf.cast(self.placeholders['u_i'], dtype=tf.int32))
u_j_embedding = tf.nn.embedding_lookup(a_u, tf.cast(self.placeholders['u_j'], dtype=tf.int32))
inner_product = tf.reduce_sum((u_i_embedding * u_j_embedding), axis=1)
loss2 = (- tf.reduce_mean(tf.log_sigmoid((self.placeholders['graph_label'] * inner_product))))
loss = ((self.alpha * loss1) + ((1 - self.alpha) * loss2))
return (loss, prob, pred)
def train(self, a, u_i, u_j, batch_graph_label, batch_data, batch_sup_label, learning_rate=0.01, momentum=0.9):
feed_dict = utils.construct_feed_dict_semi(a, u_i, u_j, batch_graph_label, batch_data, batch_sup_label, learning_rate, momentum, self.placeholders)
outs = self.sess.run([self.train_op, self.loss, self.accuracy, self.pred, self.probabilities], feed_dict=feed_dict)
loss = outs[1]
acc = outs[2]
pred = outs[3]
prob = outs[4]
return (loss, acc, pred, prob)
def test(self, a, u_i, u_j, batch_graph_label, batch_data, batch_sup_label, learning_rate=0.01, momentum=0.9):
feed_dict = utils.construct_feed_dict_semi(a, u_i, u_j, batch_graph_label, batch_data, batch_sup_label, learning_rate, momentum, self.placeholders)
(acc, pred, probabilities, tags) = self.sess.run([self.accuracy, self.pred, self.probabilities, self.correct_prediction], feed_dict=feed_dict)
return (acc, pred, probabilities, tags)
|
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=123, help='Random seed.')
parser.add_argument('--dataset_str', type=str, default='example', help="['dblp','example']")
parser.add_argument('--epoch_num', type=int, default=30, help='Number of epochs to train.')
parser.add_argument('--batch_size', type=int, default=1000)
parser.add_argument('--momentum', type=int, default=0.9)
parser.add_argument('--learning_rate', default=0.001, help='the ratio of training set in whole dataset.')
parser.add_argument('--init_emb_size', default=4, help='initial node embedding size')
parser.add_argument('--semi_encoding1', default=3, help='the first view attention layer unit number')
parser.add_argument('--semi_encoding2', default=2, help='the second view attention layer unit number')
parser.add_argument('--semi_encoding3', default=4, help='one-layer perceptron units')
parser.add_argument('--Ul', default=8, help='labeled users number')
parser.add_argument('--alpha', default=0.5, help='loss alpha')
parser.add_argument('--lamtha', default=0.5, help='loss lamtha')
args = parser.parse_args()
return args
|
def set_env(args):
tf.reset_default_graph()
np.random.seed(args.seed)
tf.set_random_seed(args.seed)
|
def get_data(ix, int_batch, train_size):
if ((ix + int_batch) >= train_size):
ix = (train_size - int_batch)
end = train_size
else:
end = (ix + int_batch)
return (train_data[ix:end], train_label[ix:end])
|
def load_data(args):
if (args.dataset_str == 'example'):
(adj_list, features, train_data, train_label, test_data, test_label) = load_example_semi()
node_size = features.shape[0]
node_embedding = features.shape[1]
class_size = train_label.shape[1]
train_size = len(train_data)
paras = [node_size, node_embedding, class_size, train_size]
return (adj_list, features, train_data, train_label, test_data, test_label, paras)
|
def train(args, adj_list, features, train_data, train_label, test_data, test_label, paras):
with tf.Session() as sess:
adj_nodelists = [matrix_to_adjlist(adj, pad=False) for adj in adj_list]
meta_size = len(adj_list)
pairs = [random_walks(adj_nodelists[i], 2, 3) for i in range(meta_size)]
net = SemiGNN(session=sess, class_size=paras[2], semi_encoding1=args.semi_encoding1, semi_encoding2=args.semi_encoding2, semi_encoding3=args.semi_encoding3, meta=meta_size, nodes=paras[0], init_emb_size=args.init_emb_size, ul=args.batch_size, alpha=args.alpha, lamtha=args.lamtha)
adj_data = [pairs_to_matrix(p, paras[0]) for p in pairs]
u_i = []
u_j = []
for (adj_nodelist, p) in zip(adj_nodelists, pairs):
(u_i_t, u_j_t, graph_label) = get_negative_sampling(p, adj_nodelist)
u_i.append(u_i_t)
u_j.append(u_j_t)
u_i = np.concatenate(np.array(u_i))
u_j = np.concatenate(np.array(u_j))
sess.run(tf.global_variables_initializer())
t_start = time.clock()
for epoch in range(args.epoch_num):
train_loss = 0
train_acc = 0
count = 0
for index in range(0, paras[3], args.batch_size):
(batch_data, batch_sup_label) = get_data(index, args.batch_size, paras[3])
(loss, acc, pred, prob) = net.train(adj_data, u_i, u_j, graph_label, batch_data, batch_sup_label, args.learning_rate, args.momentum)
print('batch loss: {:.4f}, batch acc: {:.4f}'.format(loss, acc))
train_loss += loss
train_acc += acc
count += 1
train_loss = (train_loss / count)
train_acc = (train_acc / count)
print('epoch{:d} : train_loss: {:.4f}, train_acc: {:.4f}'.format(epoch, train_loss, train_acc))
t_end = time.clock()
print('train time=', '{:.5f}'.format((t_end - t_start)))
print('Train end!')
(test_acc, test_pred, test_probabilities, test_tags) = net.test(adj_data, u_i, u_j, graph_label, test_data, test_label, args.learning_rate, args.momentum)
print('test acc:', test_acc)
|
class Algorithm(object):
def __init__(self, **kwargs):
self.nodes = None
def forward_propagation(self):
pass
def save(self, sess=None):
if (not sess):
raise AttributeError('TensorFlow session not provided.')
saver = tf.train.Saver()
save_path = saver.save(sess, ('tmp/%s.ckpt' % 'temp'))
print(('Model saved in file: %s' % save_path))
def load(self, sess=None):
if (not sess):
raise AttributeError('TensorFlow session not provided.')
saver = tf.train.Saver()
save_path = ('tmp/%s.ckpt' % 'temp')
saver.restore(sess, save_path)
print(('Model restored from file: %s' % save_path))
|
def uniform(shape, scale=0.05, name=None):
'Uniform init.'
initial = tf.random_uniform(shape, minval=(- scale), maxval=scale, dtype=tf.float32)
return tf.Variable(initial, name=name)
|
def glorot(shape, name=None):
'Glorot & Bengio (AISTATS 2010) init.'
init_range = np.sqrt((6.0 / (shape[0] + shape[1])))
initial = tf.random_uniform(shape, minval=(- init_range), maxval=init_range, dtype=tf.float32)
return tf.Variable(initial, name=name)
|
def zeros(shape, name=None):
'All zeros.'
initial = tf.zeros(shape, dtype=tf.float32)
return tf.Variable(initial, name=name)
|
def ones(shape, name=None):
'All ones.'
initial = tf.ones(shape, dtype=tf.float32)
return tf.Variable(initial, name=name)
|
def get_layer_uid(layer_name=''):
'Helper function, assigns unique layer IDs.'
if (layer_name not in _LAYER_UIDS):
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name]
|
def sparse_dropout(x, keep_prob, noise_shape):
'Dropout for sparse tensors.'
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return (pre_out * (1.0 / keep_prob))
|
def dot(x, y, sparse=False):
'Wrapper for tf.matmul (sparse vs dense).'
if sparse:
res = tf.sparse_tensor_dense_matmul(x, y)
else:
res = tf.matmul(x, y)
return res
|
class Layer(object):
'Base layer class. Defines basic API for all layer objects.\n Implementation inspired by keras (http://keras.io).\n\n # Properties\n name: String, defines the variable scope of the layer.\n logging: Boolean, switches Tensorflow histogram logging on/off\n\n # Methods\n _call(inputs): Defines computation graph of layer\n (i.e. takes input, returns output)\n __call__(inputs): Wrapper for _call()\n _log_vars(): Log all variables\n '
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging'}
for kwarg in kwargs.keys():
assert (kwarg in allowed_kwargs), ('Invalid keyword argument: ' + kwarg)
name = kwargs.get('name')
if (not name):
layer = self.__class__.__name__.lower()
name = ((layer + '_') + str(get_layer_uid(layer)))
self.name = name
self.vars = {}
logging = kwargs.get('logging', False)
self.logging = logging
self.sparse_inputs = False
def _call(self, inputs):
return inputs
def _call(self, inputs, adj_info):
return inputs
def __call__(self, inputs):
with tf.name_scope(self.name):
if (self.logging and (not self.sparse_inputs)):
tf.summary.histogram((self.name + '/inputs'), inputs)
outputs = self._call(inputs)
if self.logging:
tf.summary.histogram((self.name + '/outputs'), outputs)
return outputs
def _log_vars(self):
for var in self.vars:
tf.summary.histogram(((self.name + '/vars/') + var), self.vars[var])
|
class GraphConvolution(Layer):
'Graph convolution layer.'
def __init__(self, input_dim, output_dim, placeholders, index=0, dropout=0.0, sparse_inputs=False, act=tf.nn.relu, bias=False, featureless=False, norm=False, **kwargs):
super(GraphConvolution, self).__init__(**kwargs)
self.dropout = dropout
self.act = act
self.support = placeholders['a']
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
self.norm = norm
self.index = index
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope((self.name + '_vars')):
for i in range(1):
self.vars[('weights_' + str(i))] = glorot([input_dim, output_dim], name=('weights_' + str(i)))
if self.bias:
self.vars['bias'] = zeros([output_dim], name='bias')
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
if self.sparse_inputs:
x = sparse_dropout(x, (1 - self.dropout), self.num_features_nonzero)
else:
x = tf.nn.dropout(x, (1 - self.dropout))
supports = list()
for i in range(1):
if (not self.featureless):
pre_sup = dot(x, self.vars[('weights_' + str(i))], sparse=self.sparse_inputs)
else:
pre_sup = self.vars[('weights_' + str(i))]
support = dot(self.support[self.index], pre_sup, sparse=False)
supports.append(support)
output = tf.add_n(supports)
axis = list(range((len(output.get_shape()) - 1)))
(mean, variance) = tf.nn.moments(output, axis)
scale = None
offset = None
variance_epsilon = 0.001
output = tf.nn.batch_normalization(output, mean, variance, offset, scale, variance_epsilon)
if self.bias:
output += self.vars['bias']
if self.norm:
return tf.nn.l2_normalize(self.act(output), axis=None, epsilon=1e-12)
return self.act(output)
|
class AttentionLayer(Layer):
' AttentionLayer is a function f : hkey × Hval → hval which maps\n a feature vector hkey and the set of candidates’ feature vectors\n Hval to an weighted sum of elements in Hval.\n '
def attention(inputs, attention_size, v_type=None, return_weights=False, bias=True, joint_type='weighted_sum', multi_view=True):
if multi_view:
inputs = tf.expand_dims(inputs, 0)
hidden_size = inputs.shape[(- 1)].value
w_omega = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1))
b_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
u_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
with tf.name_scope('v'):
v = tf.tensordot(inputs, w_omega, axes=1)
if (bias is True):
v += b_omega
if (v_type is 'tanh'):
v = tf.tanh(v)
if (v_type is 'relu'):
v = tf.nn.relu(v)
vu = tf.tensordot(v, u_omega, axes=1, name='vu')
weights = tf.nn.softmax(vu, name='alphas')
if (joint_type is 'weighted_sum'):
output = tf.reduce_sum((inputs * tf.expand_dims(weights, (- 1))), 1)
if (joint_type is 'concatenation'):
output = tf.concat((inputs * tf.expand_dims(weights, (- 1))), 2)
if (not return_weights):
return output
else:
return (output, weights)
def node_attention(inputs, adj, return_weights=False):
hidden_size = inputs.shape[(- 1)].value
H_v = tf.Variable(tf.random_normal([hidden_size, 1], stddev=0.1))
zero = tf.constant(0, dtype=tf.float32)
where = tf.not_equal(adj, zero)
indices = tf.where(where)
values = tf.gather_nd(adj, indices)
adj = tf.SparseTensor(indices=indices, values=values, dense_shape=adj.shape)
with tf.name_scope('v'):
v = (adj * tf.squeeze(tf.tensordot(inputs, H_v, axes=1)))
weights = tf.sparse_softmax(v, name='alphas')
output = tf.sparse_tensor_dense_matmul(weights, inputs)
if (not return_weights):
return output
else:
return (output, weights)
def view_attention(inputs, encoding1, encoding2, layer_size, meta, return_weights=False):
h = inputs
encoding = [encoding1, encoding2]
for l in range(layer_size):
v = []
for i in range(meta):
input = h[i]
v_i = tf.layers.dense(inputs=input, units=encoding[l], activation=tf.nn.relu)
v.append(v_i)
h = v
h = tf.concat(h, 0)
h = tf.reshape(h, [meta, inputs[0].shape[0].value, encoding2])
phi = tf.Variable(tf.random_normal([encoding2], stddev=0.1))
weights = tf.nn.softmax((h * phi), name='alphas')
output = tf.reshape((h * weights), [1, ((inputs[0].shape[0] * encoding2) * meta)])
if (not return_weights):
return output
else:
return (output, weights)
def scaled_dot_product_attention(q, k, v, mask):
qk = tf.matmul(q, k, transpose_b=True)
dk = tf.cast(tf.shape(k)[(- 1)], tf.float32)
scaled_attention = (qk / tf.math.sqrt(dk))
if (mask is not None):
scaled_attention += 1
weights = tf.nn.softmax(scaled_attention, axis=(- 1))
output = tf.matmul(weights, v)
return (output, weights)
|
class ConcatenationAggregator(Layer):
"This layer equals to the equation (3) in\n paper 'Spam Review Detection with Graph Convolutional Networks.'\n "
def __init__(self, input_dim, output_dim, review_item_adj, review_user_adj, review_vecs, user_vecs, item_vecs, dropout=0.0, act=tf.nn.relu, name=None, concat=False, **kwargs):
super(ConcatenationAggregator, self).__init__(**kwargs)
self.review_item_adj = review_item_adj
self.review_user_adj = review_user_adj
self.review_vecs = review_vecs
self.user_vecs = user_vecs
self.item_vecs = item_vecs
self.dropout = dropout
self.act = act
self.concat = concat
if (name is not None):
name = ('/' + name)
else:
name = ''
with tf.variable_scope(((self.name + name) + '_vars')):
self.vars['con_agg_weights'] = glorot([input_dim, output_dim], name='con_agg_weights')
if self.logging:
self._log_vars()
self.input_dim = input_dim
self.output_dim = output_dim
def _call(self, inputs):
review_vecs = tf.nn.dropout(self.review_vecs, (1 - self.dropout))
user_vecs = tf.nn.dropout(self.user_vecs, (1 - self.dropout))
item_vecs = tf.nn.dropout(self.item_vecs, (1 - self.dropout))
ri = tf.nn.embedding_lookup(item_vecs, tf.cast(self.review_item_adj, dtype=tf.int32))
ri = tf.transpose(tf.random_shuffle(tf.transpose(ri)))
ru = tf.nn.embedding_lookup(user_vecs, tf.cast(self.review_user_adj, dtype=tf.int32))
ru = tf.transpose(tf.random_shuffle(tf.transpose(ru)))
concate_vecs = tf.concat([review_vecs, ru, ri], axis=1)
output = tf.matmul(concate_vecs, self.vars['con_agg_weights'])
return self.act(output)
|
class AttentionAggregator(Layer):
"This layer equals to equation (5) and equation (8) in\n paper 'Spam Review Detection with Graph Convolutional Networks.'\n "
def __init__(self, input_dim1, input_dim2, output_dim, hid_dim, user_review_adj, user_item_adj, item_review_adj, item_user_adj, review_vecs, user_vecs, item_vecs, dropout=0.0, bias=False, act=tf.nn.relu, name=None, concat=False, **kwargs):
super(AttentionAggregator, self).__init__(**kwargs)
self.dropout = dropout
self.bias = bias
self.act = act
self.concat = concat
self.user_review_adj = user_review_adj
self.user_item_adj = user_item_adj
self.item_review_adj = item_review_adj
self.item_user_adj = item_user_adj
self.review_vecs = review_vecs
self.user_vecs = user_vecs
self.item_vecs = item_vecs
if (name is not None):
name = ('/' + name)
else:
name = ''
with tf.variable_scope(((self.name + name) + '_vars')):
self.vars['user_weights'] = glorot([input_dim1, hid_dim], name='user_weights')
self.vars['item_weights'] = glorot([input_dim2, hid_dim], name='item_weights')
self.vars['concate_user_weights'] = glorot([hid_dim, output_dim], name='user_weights')
self.vars['concate_item_weights'] = glorot([hid_dim, output_dim], name='item_weights')
if self.bias:
self.vars['bias'] = zeros([self.output_dim], name='bias')
if self.logging:
self._log_vars()
self.input_dim1 = input_dim1
self.input_dim2 = input_dim2
self.output_dim = output_dim
def _call(self, inputs):
review_vecs = tf.nn.dropout(self.review_vecs, (1 - self.dropout))
user_vecs = tf.nn.dropout(self.user_vecs, (1 - self.dropout))
item_vecs = tf.nn.dropout(self.item_vecs, (1 - self.dropout))
ur = tf.nn.embedding_lookup(review_vecs, tf.cast(self.user_review_adj, dtype=tf.int32))
ur = tf.transpose(tf.random_shuffle(tf.transpose(ur)))
ri = tf.nn.embedding_lookup(item_vecs, tf.cast(self.user_item_adj, dtype=tf.int32))
ri = tf.transpose(tf.random_shuffle(tf.transpose(ri)))
ir = tf.nn.embedding_lookup(review_vecs, tf.cast(self.item_review_adj, dtype=tf.int32))
ir = tf.transpose(tf.random_shuffle(tf.transpose(ir)))
ru = tf.nn.embedding_lookup(user_vecs, tf.cast(self.item_user_adj, dtype=tf.int32))
ru = tf.transpose(tf.random_shuffle(tf.transpose(ru)))
concate_user_vecs = tf.concat([ur, ri], axis=2)
concate_item_vecs = tf.concat([ir, ru], axis=2)
s1 = tf.shape(concate_user_vecs)
s2 = tf.shape(concate_item_vecs)
concate_user_vecs = tf.reshape(concate_user_vecs, [s1[0], (s1[1] * s1[2])])
concate_item_vecs = tf.reshape(concate_item_vecs, [s2[0], (s2[1] * s2[2])])
(concate_user_vecs, _) = AttentionLayer.scaled_dot_product_attention(q=user_vecs, k=user_vecs, v=concate_user_vecs, mask=None)
(concate_item_vecs, _) = AttentionLayer.scaled_dot_product_attention(q=item_vecs, k=item_vecs, v=concate_item_vecs, mask=None)
user_output = tf.matmul(concate_user_vecs, self.vars['user_weights'])
item_output = tf.matmul(concate_item_vecs, self.vars['item_weights'])
if self.bias:
user_output += self.vars['bias']
item_output += self.vars['bias']
user_output = self.act(user_output)
item_output = self.act(item_output)
if self.concat:
user_output = tf.matmul(user_output, self.vars['concate_user_weights'])
item_output = tf.matmul(item_output, self.vars['concate_item_weights'])
user_output = tf.concat([user_vecs, user_output], axis=1)
item_output = tf.concat([item_vecs, item_output], axis=1)
return (user_output, item_output)
|
class GASConcatenation(Layer):
'GCN-based Anti-Spam(GAS) layer for concatenation of comment embedding learned by GCN from the Comment Graph\n and other embeddings learned in previous operations.\n '
def __init__(self, review_item_adj, review_user_adj, review_vecs, item_vecs, user_vecs, homo_vecs, name=None, **kwargs):
super(GASConcatenation, self).__init__(**kwargs)
self.review_item_adj = review_item_adj
self.review_user_adj = review_user_adj
self.review_vecs = review_vecs
self.user_vecs = user_vecs
self.item_vecs = item_vecs
self.homo_vecs = homo_vecs
if (name is not None):
name = ('/' + name)
else:
name = ''
if self.logging:
self._log_vars()
def _call(self, inputs):
ri = tf.nn.embedding_lookup(self.item_vecs, tf.cast(self.review_item_adj, dtype=tf.int32))
ru = tf.nn.embedding_lookup(self.user_vecs, tf.cast(self.review_user_adj, dtype=tf.int32))
concate_vecs = tf.concat([ri, self.review_vecs, ru, self.homo_vecs], axis=1)
return concate_vecs
|
class GEMLayer(Layer):
"This layer equals to the equation (8) in\n paper 'Heterogeneous Graph Neural Networks for Malicious Account Detection.'\n "
def __init__(self, placeholders, nodes, device_num, embedding, encoding, name=None, **kwargs):
super(GEMLayer, self).__init__(**kwargs)
self.nodes = nodes
self.devices_num = device_num
self.encoding = encoding
self.embedding = embedding
self.placeholders = placeholders
if (name is not None):
name = ('/' + name)
else:
name = ''
with tf.variable_scope(((self.name + name) + '_vars')):
self.vars['W'] = glorot([embedding, encoding], name='W')
self.vars['V'] = glorot([encoding, encoding], name='V')
self.vars['alpha'] = glorot([self.devices_num, 1], name='V')
if self.logging:
self._log_vars()
def _call(self, inputs):
h1 = tf.matmul(self.placeholders['x'], self.vars['W'])
h2 = []
for d in range(self.devices_num):
ahv = tf.matmul(tf.matmul(self.placeholders['a'][d], inputs), self.vars['V'])
h2.append(ahv)
h2 = tf.concat(h2, 0)
h2 = tf.reshape(h2, [self.devices_num, (self.nodes * self.encoding)])
h2 = tf.transpose(h2, [1, 0])
h2 = tf.reshape(tf.matmul(h2, tf.nn.softmax(self.vars['alpha'])), [self.nodes, self.encoding])
h = tf.nn.sigmoid((h1 + h2))
return h
|
class GAT(Layer):
"This layer is adapted from PetarV-/GAT.'\n "
def __init__(self, dim, attn_drop, ffd_drop, bias_mat, n_heads, name=None, **kwargs):
super(GAT, self).__init__(**kwargs)
self.dim = dim
self.attn_drop = attn_drop
self.ffd_drop = ffd_drop
self.bias_mat = bias_mat
self.n_heads = n_heads
if (name is not None):
name = ('/' + name)
else:
name = ''
if self.logging:
self._log_vars()
def attn_head(self, seq, out_sz, bias_mat, activation, in_drop=0.0, coef_drop=0.0, residual=False):
conv1d = tf.layers.conv1d
with tf.name_scope('my_attn'):
if (in_drop != 0.0):
seq = tf.nn.dropout(seq, (1.0 - in_drop))
seq_fts = tf.layers.conv1d(seq, out_sz, 1, use_bias=False)
f_1 = tf.layers.conv1d(seq_fts, 1, 1)
f_2 = tf.layers.conv1d(seq_fts, 1, 1)
logits = (f_1 + tf.transpose(f_2, [0, 2, 1]))
coefs = tf.nn.softmax((tf.nn.leaky_relu(logits) + bias_mat))
if (coef_drop != 0.0):
coefs = tf.nn.dropout(coefs, (1.0 - coef_drop))
if (in_drop != 0.0):
seq_fts = tf.nn.dropout(seq_fts, (1.0 - in_drop))
vals = tf.matmul(coefs, seq_fts)
ret = tf.contrib.layers.bias_add(vals)
if residual:
if (seq.shape[(- 1)] != ret.shape[(- 1)]):
ret = (ret + conv1d(seq, ret.shape[(- 1)], 1))
else:
ret = (ret + seq)
return activation(ret)
def inference(self, inputs):
out = []
for i in range(self.n_heads):
out.append(self.attn_head(inputs, bias_mat=self.bias_mat, out_sz=self.dim, activation=tf.nn.elu, in_drop=self.ffd_drop, coef_drop=self.attn_drop, residual=False))
logits = (tf.add_n(out) / self.n_heads)
return logits
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.