repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
BertGen | BertGen-master/common/utils/multi_task_dataloader.py | from functools import reduce
import operator
from typing import List
from torch.utils.data import DataLoader
import sys
INT_MAX = sys.maxsize
def prod(iterable):
if len(list(iterable)) > 0:
return reduce(operator.mul, iterable)
else:
return 1
class MultiTaskDataLoader(object):
"""
Multi-task DataLoader, the first dataloader is master dataloader
"""
def __init__(self,
loaders: List[DataLoader]):
assert len(loaders) > 1, "Less than 2 loader!"
self.loaders = loaders
self.iters = [iter(loader) for loader in loaders]
self.lens = [len(loader) for loader in loaders]
self.global_idx_in_cycle = 0
def __iter__(self):
if self.global_idx_in_cycle > 0:
self.iters[0] = iter(self.loaders[0])
return self
def __next__(self):
output_tuple = (*next(self.iters[0]), )
for k, (loader, _iter) in enumerate(zip(self.loaders[1:], self.iters[1:])):
if hasattr(loader.batch_sampler.sampler, 'set_epoch'):
loader.batch_sampler.sampler.set_epoch(int(self.global_idx_in_cycle / self.lens[k+1]))
try:
output_tuple += (*next(_iter), )
except StopIteration:
_iter = iter(loader)
self.iters[k+1] = _iter
output_tuple += (*next(_iter), )
if self.global_idx_in_cycle < INT_MAX - 1:
self.global_idx_in_cycle += 1
else:
self.global_idx_in_cycle = 0
return output_tuple
def __len__(self):
return self.lens[0]
| 1,619 | 26.931034 | 102 | py |
BertGen | BertGen-master/common/utils/misc.py | import os
import numpy as np
import torch
import torch.nn.functional as F
import logging
def block_digonal_matrix(*blocks):
"""
Construct block diagonal matrix
:param blocks: blocks of block diagonal matrix
:param device
:param dtype
:return: block diagonal matrix
"""
assert len(blocks) > 0
rows = [block.shape[0] for block in blocks]
cols = [block.shape[1] for block in blocks]
out = torch.zeros((sum(rows), sum(cols)),
device=blocks[0].device,
dtype=blocks[0].dtype)
cur_row = 0
cur_col = 0
for block, row, col in zip(blocks, rows, cols):
out[cur_row:(cur_row + row), cur_col:(cur_col + col)] = block
cur_row += row
cur_col += col
return out
def print_and_log(string, logger=None):
print(string)
if logger is None:
logging.info(string)
else:
logger.info(string)
def summary_parameters(model, logger=None):
"""
Summary Parameters of Model
:param model: torch.nn.module_name
:param logger: logger
:return: None
"""
print_and_log('>> Trainable Parameters:', logger)
trainable_paramters = [(str(n), str(v.dtype), str(tuple(v.shape)), str(v.numel()))
for n, v in model.named_parameters() if v.requires_grad]
max_lens = [max([len(item) + 4 for item in col]) for col in zip(*trainable_paramters)]
raw_format = '|' + '|'.join(['{{:{}s}}'.format(max_len) for max_len in max_lens]) + '|'
raw_split = '-' * (sum(max_lens) + len(max_lens) + 1)
print_and_log(raw_split, logger)
print_and_log(raw_format.format('Name', 'Dtype', 'Shape', '#Params'), logger)
print_and_log(raw_split, logger)
for name, dtype, shape, number in trainable_paramters:
print_and_log(raw_format.format(name, dtype, shape, number), logger)
print_and_log(raw_split, logger)
num_trainable_params = sum([v.numel() for v in model.parameters() if v.requires_grad])
total_params = sum([v.numel() for v in model.parameters()])
non_trainable_params = total_params - num_trainable_params
print_and_log('>> {:25s}\t{:.2f}\tM'.format('# TrainableParams:', num_trainable_params / (1.0 * 10 ** 6)), logger)
print_and_log('>> {:25s}\t{:.2f}\tM'.format('# NonTrainableParams:', non_trainable_params / (1.0 * 10 ** 6)), logger)
print_and_log('>> {:25s}\t{:.2f}\tM'.format('# TotalParams:', total_params / (1.0 * 10 ** 6)), logger)
def clip_grad(named_parameters, max_norm, logger=logging, std_verbose=False, log_verbose=False):
"""Clips gradient norm of an iterable of parameters.
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
:param named_parameters: dict, named parameters of pytorch module
:param max_norm: float or int, max norm of the gradients
:param logger: logger to write verbose info
:param std_verbose: verbose info in stdout
:param log_verbose: verbose info in log
:return Total norm of the parameters (viewed as a dict: param name -> param grad norm).
"""
max_norm = float(max_norm)
parameters = [(n, p) for n, p in named_parameters if p.grad is not None]
total_norm = 0
param_to_norm = {}
param_to_shape = {}
for n, p in parameters:
param_norm = p.grad.data.norm(2)
total_norm += param_norm ** 2
param_to_norm[n] = param_norm
param_to_shape[n] = tuple(p.size())
if np.isnan(param_norm.item()):
raise ValueError("the param {} was null.".format(n))
total_norm = total_norm ** (1. / 2)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef.item() < 1:
logger.info('---Clip grad! Total norm: {:.3f}, clip coef: {:.3f}.'.format(total_norm, clip_coef))
for n, p in parameters:
p.grad.data.mul_(clip_coef)
if std_verbose:
print('---Total norm {:.3f} clip coef {:.3f}-----------------'.format(total_norm, clip_coef))
for name, norm in sorted(param_to_norm.items(), key=lambda x: -x[1]):
print("{:<60s}: {:.3f}, ({}: {})".format(name, norm, np.prod(param_to_shape[name]), param_to_shape[name]))
print('-------------------------------', flush=True)
if log_verbose:
logger.info('---Total norm {:.3f} clip coef {:.3f}-----------------'.format(total_norm, clip_coef))
for name, norm in sorted(param_to_norm.items(), key=lambda x: -x[1]):
logger.info("{:<60s}: {:.3f}, ({}: {})".format(name, norm, np.prod(param_to_shape[name]), param_to_shape[name]))
logger.info('-------------------------------')
return {name: norm.item() for name, norm in param_to_norm.items()}
def bn_fp16_half_eval(m):
classname = str(m.__class__)
if 'BatchNorm' in classname and (not m.training):
m.half()
def soft_cross_entropy(input, target, reduction='mean'):
"""
Cross entropy loss with input logits and soft target
:param input: Tensor, size: (N, C)
:param target: Tensor, size: (N, C)
:param reduction: 'none' or 'mean' or 'sum', default: 'mean'
:return: loss
"""
eps = 1.0e-1
# debug = False
valid = (target.sum(1) - 1).abs() < eps
# if debug:
# print('valid', valid.sum().item())
# print('all', valid.numel())
# print('non valid')
# print(target[valid == 0])
if valid.sum().item() == 0:
return input.new_zeros(())
if reduction == 'mean':
return (- F.log_softmax(input[valid], 1) * target[valid]).sum(1).mean(0)
elif reduction == 'sum':
return (- F.log_softmax(input[valid], 1) * target[valid]).sum()
elif reduction == 'none':
l = input.new_zeros((input.shape[0], ))
l[valid] = (- F.log_softmax(input[valid], 1) * target[valid]).sum(1)
return l
else:
raise ValueError('Not support reduction type: {}.'.format(reduction))
| 5,958 | 36.71519 | 124 | py |
BertGen | BertGen-master/common/utils/flatten.py | import torch
class Flattener(torch.nn.Module):
def __init__(self):
"""
Flattens last 3 dimensions to make it only batch size, -1
"""
super(Flattener, self).__init__()
def forward(self, x):
return x.view(x.size(0), -1)
| 269 | 19.769231 | 65 | py |
BertGen | BertGen-master/common/utils/bbox.py | import torch
def nonlinear_transform(ex_rois, gt_rois):
"""
compute bounding box regression targets from ex_rois to gt_rois
:param ex_rois: [k, 4] ([x1, y1, x2, y2])
:param gt_rois: [k, 4] (corresponding gt_boxes [x1, y1, x2, y2] )
:return: bbox_targets: [k, 4]
"""
assert ex_rois.shape[0] == gt_rois.shape[0], 'inconsistent rois number'
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * (ex_widths - 1.0)
ex_ctr_y = ex_rois[:, 1] + 0.5 * (ex_heights - 1.0)
gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
gt_ctr_x = gt_rois[:, 0] + 0.5 * (gt_widths - 1.0)
gt_ctr_y = gt_rois[:, 1] + 0.5 * (gt_heights - 1.0)
targets_dx = (gt_ctr_x - ex_ctr_x) / (ex_widths + 1e-6)
targets_dy = (gt_ctr_y - ex_ctr_y) / (ex_heights + 1e-6)
targets_dw = torch.log(gt_widths / (ex_widths).clamp(min=1e-6))
targets_dh = torch.log(gt_heights / ((ex_heights).clamp(min=1e-6)))
targets = torch.cat(
(targets_dx.view(-1, 1), targets_dy.view(-1, 1), targets_dw.view(-1, 1), targets_dh.view(-1, 1)), dim=-1)
return targets
def coordinate_embeddings(boxes, dim):
"""
Coordinate embeddings of bounding boxes
:param boxes: [K, 6] ([x1, y1, x2, y2, w_image, h_image])
:param dim: sin/cos embedding dimension
:return: [K, 4, 2 * dim]
"""
num_boxes = boxes.shape[0]
w = boxes[:, 4]
h = boxes[:, 5]
# transform to (x_c, y_c, w, h) format
boxes_ = boxes.new_zeros((num_boxes, 4))
boxes_[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2
boxes_[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2
boxes_[:, 2] = boxes[:, 2] - boxes[:, 0]
boxes_[:, 3] = boxes[:, 3] - boxes[:, 1]
boxes = boxes_
# position
pos = boxes.new_zeros((num_boxes, 4))
pos[:, 0] = boxes[:, 0] / w * 100
pos[:, 1] = boxes[:, 1] / h * 100
pos[:, 2] = boxes[:, 2] / w * 100
pos[:, 3] = boxes[:, 3] / h * 100
# sin/cos embedding
dim_mat = 1000 ** (torch.arange(dim, dtype=boxes.dtype, device=boxes.device) / dim)
sin_embedding = (pos.view((num_boxes, 4, 1)) / dim_mat.view((1, 1, -1))).sin()
cos_embedding = (pos.view((num_boxes, 4, 1)) / dim_mat.view((1, 1, -1))).cos()
return torch.cat((sin_embedding, cos_embedding), dim=-1)
def bbox_iou_py_vectorized(boxes, query_boxes):
n_ = boxes.shape[0]
k_ = query_boxes.shape[0]
n_mesh, k_mesh = torch.meshgrid([torch.arange(n_), torch.arange(k_)])
n_mesh = n_mesh.contiguous().view(-1)
k_mesh = k_mesh.contiguous().view(-1)
boxes = boxes[n_mesh]
query_boxes = query_boxes[k_mesh]
x11, y11, x12, y12 = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
x21, y21, x22, y22 = query_boxes[:, 0], query_boxes[:, 1], query_boxes[:, 2], query_boxes[:, 3]
xA = torch.max(x11, x21)
yA = torch.max(y11, y21)
xB = torch.min(x12, x22)
yB = torch.min(y12, y22)
interArea = torch.clamp(xB - xA + 1, min=0) * torch.clamp(yB - yA + 1, min=0)
boxAArea = (x12 - x11 + 1) * (y12 - y11 + 1)
boxBArea = (x22 - x21 + 1) * (y22 - y21 + 1)
iou = interArea / (boxAArea + boxBArea - interArea)
return iou.view(n_, k_).to(boxes.device)
| 3,289 | 33.631579 | 113 | py |
BertGen | BertGen-master/common/utils/load.py | import torch
import os
def smart_load_model_state_dict(model, state_dict):
parsed_state_dict = {}
for k, v in state_dict.items():
if k not in model.state_dict():
if k.startswith('module.'):
k = k[len('module.'):]
else:
k = 'module.' + k
if k in model.state_dict():
parsed_state_dict[k] = v
else:
print('********')
print(k)
raise ValueError('failed to match key of state dict smartly!')
model.load_state_dict(parsed_state_dict)
def smart_resume(model, optimizer, validation_monitor, config, model_prefix, logger):
if config.TRAIN.RESUME:
print(('continue training from ', config.TRAIN.BEGIN_EPOCH))
# load model
model_filename = '{}-{:04d}.model'.format(
model_prefix, config.TRAIN.BEGIN_EPOCH - 1)
check_point = torch.load(
model_filename, map_location=lambda storage, loc: storage)
# model.load_state_dict(check_point['state_dict'])
smart_load_model_state_dict(model, check_point['state_dict'])
optimizer.load_state_dict(check_point['optimizer'])
if 'validation_monitor' in check_point:
validation_monitor.load_state_dict(
check_point['validation_monitor'])
print(
'Best Val {}: {}, Epoch: {}'.format(validation_monitor.host_metric_name,
validation_monitor.best_val,
validation_monitor.best_epoch)
)
elif config.TRAIN.AUTO_RESUME:
for epoch in range(config.TRAIN.END_EPOCH, config.TRAIN.BEGIN_EPOCH, -1):
model_filename = '{}-{:04d}.model'.format(model_prefix, epoch - 1)
if os.path.exists(model_filename):
config.TRAIN.BEGIN_EPOCH = epoch
check_point = torch.load(
model_filename, map_location=lambda storage, loc: storage)
# model.load_state_dict(check_point['state_dict'])
smart_load_model_state_dict(model, check_point['state_dict'])
# FM edit TODO: UNCOMMENT THIS! Only used for testing.
optimizer.load_state_dict(check_point['optimizer'])
if 'validation_monitor' in check_point:
validation_monitor.load_state_dict(
check_point['validation_monitor'])
print(
'Best Val {}: {}, Epoch: {}'.format(validation_monitor.host_metric_name,
validation_monitor.best_val,
validation_monitor.best_epoch)
)
logger.info(
"Auto continue training from {0}".format(model_filename))
print("Auto continue training from {0}".format(model_filename))
break
# TODO: remove - just for testing
print('inside auto-resume --------------***********************')
def smart_partial_load_model_state_dict(model, state_dict):
parsed_state_dict = {}
non_match_keys = []
pretrained_keys = []
for k, v in state_dict.items():
if k not in model.state_dict():
if k.startswith('module.'):
k = k[len('module.'):]
else:
k = 'module.' + k
if k in model.state_dict():
parsed_state_dict[k] = v
pretrained_keys.append(k)
else:
non_match_keys.append(k)
# raise ValueError('failed to match key of state dict smartly!')
non_pretrain_keys = [
k for k in model.state_dict().keys() if k not in pretrained_keys]
print("[Partial Load] partial load state dict of keys: {}".format(
parsed_state_dict.keys()))
print("[Partial Load] non matched keys: {}".format(non_match_keys))
print("[Partial Load] non pretrain keys: {}".format(non_pretrain_keys))
new_state_dict = model.state_dict()
new_state_dict.update(parsed_state_dict)
model.load_state_dict(new_state_dict)
# FM: initialise with VL-BERT. Skip word embeddings, where vocabulary is changed to multilingual
def smart_skip_partial_load_model_state_dict(model, state_dict):
parsed_state_dict = {}
non_match_keys = []
pretrained_keys = []
for k, v in state_dict.items():
# FM: for partica
if k in ['module.vlbert.mlm_head.predictions.decoder.weight', 'module.vlbert.mlm_head.predictions.bias', 'module.vlbert.word_embeddings.weight']:
print('---------- skip :', k)
continue
if k not in model.state_dict():
if k.startswith('module.'):
k = k[len('module.'):]
else:
k = 'module.' + k
if k in model.state_dict():
parsed_state_dict[k] = v
pretrained_keys.append(k)
else:
non_match_keys.append(k)
# raise ValueError('failed to match key of state dict smartly!')
non_pretrain_keys = [
k for k in model.state_dict().keys() if k not in pretrained_keys]
print("[Partial Load] partial load state dict of keys: {}".format(
parsed_state_dict.keys()))
print("[Partial Load] non matched keys: {}".format(non_match_keys))
print("[Partial Load] non pretrain keys: {}".format(non_pretrain_keys))
new_state_dict = model.state_dict()
new_state_dict.update(parsed_state_dict)
model.load_state_dict(new_state_dict)
# FM: hybrid parameter initialisation that uses M-BERT for initialisation
# and VL-BERT only for specific parameters
def smart_hybrid_partial_load_model_state_dict(model, state_dict):
parsed_state_dict = {}
non_match_keys = []
pretrained_keys = []
for k, v in state_dict.items():
if k not in model.state_dict():
if k.startswith('module.'):
k = k[len('module.'):]
else:
k = 'module.' + k
if k in model.state_dict() and (("vlbert.mvrc_head" in k) or ("image_feature_extractor" in k)
or ("vlbert.visual_ln" in k) or ("end_embedding" in k)
or ("token_type_embeddings" in k) or ("position_embeddings" in k)
or ("aux_text_visual_embedding" in k) or ("object_mask_word_embedding" in k)
or ("object_linguistic_embeddings" in k)):
parsed_state_dict[k] = v
pretrained_keys.append(k)
else:
non_match_keys.append(k)
# raise ValueError('failed to match key of state dict smartly!')
non_pretrain_keys = [
k for k in model.state_dict().keys() if k not in pretrained_keys]
print("[Partial Load] partial load state dict of keys: {}".format(
parsed_state_dict.keys()))
print("[Partial Load] non matched keys: {}".format(non_match_keys))
print("[Partial Load] non pretrain keys: {}".format(non_pretrain_keys))
new_state_dict = model.state_dict()
new_state_dict.update(parsed_state_dict)
model.load_state_dict(new_state_dict)
| 7,269 | 43.329268 | 153 | py |
BertGen | BertGen-master/common/utils/masked_softmax.py | import torch
def masked_softmax(vector: torch.Tensor, mask: torch.Tensor, dim: int = -1) -> torch.Tensor:
"""
``torch.nn.functional.softmax(vector)`` does not work if some elements of ``vector`` should be
masked. This performs a softmax on just the non-masked portions of ``vector``. Passing
``None`` in for the mask is also acceptable; you'll just get a regular softmax.
``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is
broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will
unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,
do it yourself before passing the mask into this function.
In the case that the input vector is completely masked, this function returns an array
of ``0.0``. This behavior may cause ``NaN`` if this is used as the last layer of a model
that uses categorical cross-entropy loss.
"""
if mask is None:
result = torch.nn.functional.softmax(vector, dim=dim)
else:
mask = mask.type(vector.dtype)
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
# To limit numerical errors from large vector elements outside the mask, we zero these out.
result = torch.nn.functional.softmax(vector * mask, dim=dim)
result = result * mask
result = result / (result.sum(dim=dim, keepdim=True) + (1e-7 if vector.dtype == torch.half else 1e-13))
return result
| 1,533 | 50.133333 | 111 | py |
BertGen | BertGen-master/common/utils/pad_sequence.py | import torch
def pad_sequence(sequence, lengths):
"""
:param sequence: [\sum b, .....] sequence
:param lengths: [b1, b2, b3...] that sum to \sum b
:return: [len(lengths), maxlen(b), .....] tensor
"""
output = sequence.new_zeros(len(lengths), max(lengths), *sequence.shape[1:])
start = 0
for i, diff in enumerate(lengths):
if diff > 0:
output[i, :diff] = sequence[start:(start + diff)]
start += diff
return output
| 480 | 25.722222 | 80 | py |
BertGen | BertGen-master/common/utils/clip_pad.py | import torch
def clip_pad_images(tensor, pad_shape, pad=0):
"""
Clip clip_pad_images of the pad area.
:param tensor: [c, H, W]
:param pad_shape: [h, w]
:return: [c, h, w]
"""
if not isinstance(tensor, torch.Tensor):
tensor = torch.as_tensor(tensor)
H, W = tensor.shape[1:]
h = pad_shape[1]
w = pad_shape[2]
tensor_ret = torch.zeros((tensor.shape[0], h, w), dtype=tensor.dtype) + pad
tensor_ret[:, :min(h, H), :min(w, W)] = tensor[:, :min(h, H), :min(w, W)]
return tensor_ret
def clip_pad_boxes(tensor, pad_length, pad=0):
"""
Clip boxes of the pad area.
:param tensor: [k, d]
:param pad_shape: K
:return: [K, d]
"""
if not isinstance(tensor, torch.Tensor):
tensor = torch.as_tensor(tensor)
k = tensor.shape[0]
d = tensor.shape[1]
K = pad_length
tensor_ret = torch.zeros((K, d), dtype=tensor.dtype) + pad
tensor_ret[:min(k, K), :] = tensor[:min(k, K), :]
return tensor_ret
def clip_pad_1d(tensor, pad_length, pad=0):
if not isinstance(tensor, torch.Tensor):
tensor = torch.as_tensor(tensor)
tensor_ret = torch.zeros((pad_length, ), dtype=tensor.dtype) + pad
tensor_ret[:min(tensor.shape[0], pad_length)] = tensor[:min(tensor.shape[0], pad_length)]
return tensor_ret
def clip_pad_2d(tensor, pad_shape, pad=0):
if not isinstance(tensor, torch.Tensor):
tensor = torch.as_tensor(tensor)
tensor_ret = torch.zeros(*pad_shape, dtype=tensor.dtype) + pad
tensor_ret[:min(tensor.shape[0], pad_shape[0]), :min(tensor.shape[1], pad_shape[1])] \
= tensor[:min(tensor.shape[0], pad_shape[0]), :min(tensor.shape[1], pad_shape[1])]
return tensor_ret
| 1,738 | 28.982759 | 93 | py |
BertGen | BertGen-master/common/utils/mask.py | from skimage.draw import polygon
import torch
def generate_instance_mask(seg_polys, box, mask_size=(14, 14), dtype=torch.float32, copy=True):
"""
Generate instance mask from polygon
:param seg_poly: torch.Tensor, (N, 2), (x, y) coordinate of N vertices of segmented foreground polygon
:param box: array-like, (4, ), (xmin, ymin, xmax, ymax), instance bounding box
:param mask_size: tuple, (mask_height, mask_weight)
:param dtype: data type of generated mask
:param copy: whether copy seg_polys to a new tensor first
:return: torch.Tensor, of mask_size, instance mask
"""
mask = torch.zeros(mask_size, dtype=dtype)
w_ratio = float(mask_size[0]) / (box[2] - box[0] + 1)
h_ratio = float(mask_size[1]) / (box[3] - box[1] + 1)
# import IPython
# IPython.embed()
for seg_poly in seg_polys:
if copy:
seg_poly = seg_poly.detach().clone()
seg_poly = seg_poly.type(torch.float32)
seg_poly[:, 0] = (seg_poly[:, 0] - box[0]) * w_ratio
seg_poly[:, 1] = (seg_poly[:, 1] - box[1]) * h_ratio
rr, cc = polygon(seg_poly[:, 1].clamp(min=0, max=mask_size[1] - 1),
seg_poly[:, 0].clamp(min=0, max=mask_size[0] - 1))
mask[rr, cc] = 1
return mask
| 1,282 | 33.675676 | 106 | py |
BertGen | BertGen-master/LanguageGeneration/evaluate_translation.py | ######
# Author: Faidon Mitzalis
# Date: June 2020
# Comments: Use results of testing to evaluate performance of MT translation task
######
import json
import torch
import operator
import sacrebleu
import unidecode
import sys
model = sys.argv[1]
filepath = sys.argv[2]
lang = sys.argv[3]
with open(filepath) as json_file:
data = json.load(json_file)
references = []
hypotheses = []
#******************************************************
# Step 1: Read json to dictionaries
# json file to nested dictionary (each caption with all images)
for p in data:
if lang == "second":
reference = (p['caption_de'].lower())
hypothesis = p['generated_sentence'].lower()
else:
reference = (p['caption_en'].lower())
hypothesis = p['generated_sentence'].lower()
references.append(reference)
hypotheses.append(hypothesis)
#******************************************************
# # Step 2: Get BLEU score
bleu = sacrebleu.corpus_bleu(hypotheses, [references])
print(bleu.score)
with open("./checkpoints/generated_text/"+ model+'_ref.txt', 'w') as f:
for ref in references:
f.write("%s\n" % ref)
with open("./checkpoints/generated_text/"+ model+'_hyp.txt', 'w') as f:
for hyp in hypotheses:
f.write("%s\n" % hyp) | 1,398 | 28.765957 | 81 | py |
BertGen | BertGen-master/LanguageGeneration/train_end2end.py | import _init_paths
import os
import argparse
import torch
import subprocess
from LanguageGeneration.function.config import config, update_config
from LanguageGeneration.function.train import train_net
def parse_args():
parser = argparse.ArgumentParser('Train Cognition Network')
parser.add_argument('--cfg', type=str, help='path to config file')
parser.add_argument('--model-dir', type=str,
help='root path to store checkpoint')
parser.add_argument('--log-dir', type=str, help='tensorboard log dir')
parser.add_argument('--dist', help='whether to use distributed training',
default=False, action='store_true')
parser.add_argument('--slurm', help='whether this is a slurm job',
default=False, action='store_true')
parser.add_argument('--do-test', help='whether to generate csv result on test set',
default=False, action='store_true')
parser.add_argument('--cudnn-off', help='disable cudnn',
default=False, action='store_true')
args = parser.parse_args()
if args.cfg is not None:
update_config(args.cfg)
if args.model_dir is not None:
config.OUTPUT_PATH = os.path.join(args.model_dir, config.OUTPUT_PATH)
if args.slurm:
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = int(os.environ['SLURM_NTASKS'])
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
addr = subprocess.getoutput(
'scontrol show hostname {} | head -n1'.format(node_list))
os.environ['MASTER_PORT'] = str(29500)
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['RANK'] = str(proc_id)
os.environ['LOCAL_RANK'] = str(proc_id % num_gpus)
return args, config
def main():
args, config = parse_args()
rank, model = train_net(args, config)
if __name__ == '__main__':
main()
| 1,997 | 34.678571 | 87 | py |
BertGen | BertGen-master/LanguageGeneration/function/val.py | from collections import namedtuple
import torch
from common.trainer import to_cuda
@torch.no_grad()
def do_validation(net, val_loader, metrics, label_index_in_batch):
net.eval()
metrics.reset()
for nbatch, batch in enumerate(val_loader):
batch = to_cuda(batch)
outputs, _ = net(*batch)
metrics.update(outputs)
| 349 | 22.333333 | 66 | py |
BertGen | BertGen-master/LanguageGeneration/function/test.py | ######
# Author: Faidon Mitzalis
# Date: June 2020
# Comments: Run model with all caption-image pairs in the dataset
######
import os
import pprint
import shutil
import json
from tqdm import tqdm, trange
import numpy as np
import torch
import torch.nn.functional as F
from common.utils.load import smart_load_model_state_dict
from common.trainer import to_cuda
from common.utils.create_logger import create_logger
from LanguageGeneration.data.build import make_dataloader
from LanguageGeneration.modules import *
@torch.no_grad()
def test_net(args, config, ckpt_path=None, save_path=None, save_name=None):
print('test net...')
pprint.pprint(args)
pprint.pprint(config)
device_ids = [int(d) for d in config.GPUS.split(',')]
# os.environ['CUDA_VISIBLE_DEVICES'] = config.GPUS
torch.backends.cudnn.enabled = False
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if ckpt_path is None:
_, train_output_path = create_logger(config.OUTPUT_PATH, args.cfg, config.DATASET.TRAIN_IMAGE_SET,
split='train')
model_prefix = os.path.join(train_output_path, config.MODEL_PREFIX)
ckpt_path = '{}-best.model'.format(model_prefix)
print('Use best checkpoint {}...'.format(ckpt_path))
if save_path is None:
logger, test_output_path = create_logger(config.OUTPUT_PATH, args.cfg, config.DATASET.TEST_IMAGE_SET,
split='test')
save_path = test_output_path
if not os.path.exists(save_path):
os.makedirs(save_path)
shutil.copy2(ckpt_path,
os.path.join(save_path, '{}_test_ckpt_{}.model'.format(config.MODEL_PREFIX, config.DATASET.TASK)))
# ************
# Step 1: Select model architecture and preload trained model
model = eval(config.MODULE)(config)
if len(device_ids) > 1:
model = torch.nn.DataParallel(model, device_ids=device_ids).cuda()
else:
torch.cuda.set_device(device_ids[0])
model = model.cuda()
checkpoint = torch.load(
ckpt_path, map_location=lambda storage, loc: storage)
smart_load_model_state_dict(model, checkpoint['state_dict'])
# ************
# Step 2: Create dataloader to include all caption-image pairs
test_loader = make_dataloader(config, mode='test', distributed=False)
test_dataset = test_loader.dataset
test_database = test_dataset.database
# ************
# Step 3: Run all pairs through model for inference
generated_sentences = []
captions_en = []
captions_de = []
model.eval()
cur_id = 0
for nbatch, batch in zip(trange(len(test_loader)), test_loader):
bs = test_loader.batch_sampler.batch_size if test_loader.batch_sampler is not None else test_loader.batch_size
# image_ids.extend([test_database[id]['image_index'] for id in range(cur_id, min(cur_id + bs, len(test_database)))])
# if 'flickr8k' not in config.DATASET.DATASET_PATH:
# captions_en.extend([test_database[id]['caption_en'] for id in range(cur_id, min(cur_id + bs, len(test_database)))])
# captions_de.extend([test_database[id]['caption_de'] for id in range(cur_id, min(cur_id + bs, len(test_database)))])
batch = to_cuda(batch)
output = model(*batch)
generated_sentences.extend((output[0]['generated_sentences']))
cur_id += bs
# break
# exit()
# TODO: remove this is just for checking
# if nbatch>900:
# break
# ************
# Step 3: Store all logit results in file for later evalution
# if 'flickr8k' not in config.DATASET.DATASET_PATH:
# result = [{'generated_sentence': c_id, 'caption_en': caption_en, 'caption_de': caption_de}
# for c_id, caption_en, caption_de in zip(generated_sentences, captions_en, captions_de)]
# else:
# result = [{'generated_sentence': c_id, 'caption_de': caption_de}
# for c_id, caption_de in zip(generated_sentences, captions_de)]
cfg_name = os.path.splitext(os.path.basename(args.cfg))[0]
result_json_path = os.path.join(save_path, '{}.txt'.format(cfg_name if save_name is None else save_name
))
with open(result_json_path, 'w') as f:
for item in generated_sentences:
f.write('%s\n' % item)
# json.dump(result, f)
print('result json saved to {}.'.format(result_json_path))
return result_json_path
| 4,571 | 40.944954 | 129 | py |
BertGen | BertGen-master/LanguageGeneration/function/vis.py | import os
import pprint
import shutil
import inspect
import random
import math
from tqdm import trange
import numpy as np
import torch
import torch.nn
import torch.distributed as distributed
from torch.nn.parallel import DistributedDataParallel as DDP
from common.utils.load import smart_partial_load_model_state_dict
from common.trainer import to_cuda
from common.utils.multi_task_dataloader import MultiTaskDataLoader
from LanguageGeneration.data.build import make_dataloader, make_dataloaders
from LanguageGeneration.modules import *
from common.utils.create_logger import makedirsExist
def vis_net(args, config, save_dir):
pprint.pprint(config)
if args.dist:
model = eval(config.MODULE)(config)
local_rank = int(os.environ.get('LOCAL_RANK') or 0)
config.GPUS = str(local_rank)
torch.cuda.set_device(local_rank)
master_address = os.environ['MASTER_ADDR']
master_port = int(os.environ['MASTER_PORT'] or 23456)
world_size = int(os.environ['WORLD_SIZE'] or 1)
rank = int(os.environ['RANK'] or 0)
if args.slurm:
distributed.init_process_group(backend='nccl')
else:
distributed.init_process_group(
backend='nccl',
init_method='tcp://{}:{}'.format(master_address, master_port),
world_size=world_size,
rank=rank,
group_name='mtorch')
print(
f'native distributed, size: {world_size}, rank: {rank}, local rank: {local_rank}')
torch.cuda.set_device(local_rank)
config.GPUS = str(local_rank)
model = model.cuda()
model = DDP(model, device_ids=[local_rank], output_device=local_rank)
if isinstance(config.DATASET, list):
val_loaders = make_dataloaders(config,
mode='val',
distributed=True,
num_replicas=world_size,
rank=rank)
val_loader = MultiTaskDataLoader(val_loaders)
else:
val_loader = make_dataloader(config,
mode='val',
distributed=True,
num_replicas=world_size,
rank=rank)
else:
model = eval(config.MODULE)(config)
num_gpus = len(config.GPUS.split(','))
rank = None
# model
if num_gpus > 1:
model = torch.nn.DataParallel(
model, device_ids=[int(d) for d in config.GPUS.split(',')]).cuda()
else:
torch.cuda.set_device(int(config.GPUS))
model.cuda()
# loader
if isinstance(config.DATASET, list):
val_loaders = make_dataloaders(
config, mode='val', distributed=False)
val_loader = MultiTaskDataLoader(val_loaders)
else:
val_loader = make_dataloader(config, mode='val', distributed=False)
# partial load pretrain state dict
if config.NETWORK.PARTIAL_PRETRAIN != "":
pretrain_state_dict = torch.load(
config.NETWORK.PARTIAL_PRETRAIN, map_location=lambda storage, loc: storage)['state_dict']
prefix_change = [prefix_change.split(
'->') for prefix_change in config.NETWORK.PARTIAL_PRETRAIN_PREFIX_CHANGES]
if len(prefix_change) > 0:
pretrain_state_dict_parsed = {}
for k, v in pretrain_state_dict.items():
no_match = True
for pretrain_prefix, new_prefix in prefix_change:
if k.startswith(pretrain_prefix):
k = new_prefix + k[len(pretrain_prefix):]
pretrain_state_dict_parsed[k] = v
no_match = False
break
if no_match:
pretrain_state_dict_parsed[k] = v
pretrain_state_dict = pretrain_state_dict_parsed
smart_partial_load_model_state_dict(model, pretrain_state_dict)
# broadcast parameter and optimizer state from rank 0 before training start
if args.dist:
for v in model.state_dict().values():
distributed.broadcast(v, src=0)
vis(model, val_loader, save_dir, rank=rank,
world_size=world_size if args.dist else 1)
return rank, model
def vis(model, loader, save_dir, rank=None, world_size=1):
attention_dir = os.path.join(save_dir, 'attention_probs')
hidden_dir = os.path.join(save_dir, 'hidden_states')
cos_dir = os.path.join(save_dir, 'cos_similarity')
# if not os.path.exists(hidden_dir):
# makedirsExist(hidden_dir)
# if not os.path.exists(cos_dir):
# makedirsExist(cos_dir)
if not os.path.exists(attention_dir):
makedirsExist(attention_dir)
# offset = 0
# if rank is not None:
# num_samples = int(math.ceil(len(loader.dataset) * 1.0 / world_size))
# offset = num_samples * rank
# index = offset
model.eval()
for i, data in zip(trange(len(loader)), loader):
# for i, data in enumerate(loader):
data = to_cuda(data)
output = model(*data)
for _i, (attention_probs, hidden_states) in enumerate(zip(output['attention_probs'], output['hidden_states'])):
index = int(data[2][_i][-1])
if hasattr(loader.dataset, 'ids'):
image_id = loader.dataset.ids[index]
else:
image_id = loader.dataset.database[index]['image'].split(
'/')[1].split('.')[0]
attention_probs_arr = attention_probs.detach().cpu().numpy()
hidden_states_arr = hidden_states.detach().cpu().numpy()
cos_similarity_arr = (hidden_states @ hidden_states.transpose(
1, 2)).detach().cpu().numpy()
np.save(os.path.join(attention_dir, '{}.npy'.format(
image_id)), attention_probs_arr)
# np.save(os.path.join(hidden_dir, '{}.npy'.format(image_id)), hidden_states_arr)
# np.save(os.path.join(cos_dir, '{}.npy'.format(image_id)), cos_similarity_arr)
# index = (index + 1) % len(loader.dataset)
| 6,305 | 40.486842 | 119 | py |
BertGen | BertGen-master/LanguageGeneration/function/train.py | import os
import pprint
import shutil
import inspect
import random
from tensorboardX import SummaryWriter
import numpy as np
import torch
import torch.nn
import torch.optim as optim
import torch.distributed as distributed
from torch.nn.parallel import DistributedDataParallel as DDP
from common.utils.create_logger import create_logger
from common.utils.misc import summary_parameters, bn_fp16_half_eval
from common.utils.load import smart_resume, smart_partial_load_model_state_dict, smart_hybrid_partial_load_model_state_dict, smart_skip_partial_load_model_state_dict
from common.trainer import train
from common.metrics.composite_eval_metric import CompositeEvalMetric
from common.metrics import pretrain_metrics
from common.callbacks.batch_end_callbacks.speedometer import Speedometer
from common.callbacks.epoch_end_callbacks.validation_monitor import ValidationMonitor
from common.callbacks.epoch_end_callbacks.checkpoint import Checkpoint
from common.lr_scheduler import WarmupMultiStepLR
from common.nlp.bert.optimization import AdamW, WarmupLinearSchedule
from common.utils.multi_task_dataloader import MultiTaskDataLoader
from LanguageGeneration.data.build import make_dataloader, make_dataloaders
from LanguageGeneration.modules import *
from LanguageGeneration.function.val import do_validation
try:
from apex import amp
from apex.parallel import DistributedDataParallel as Apex_DDP
except ImportError:
pass
# raise ImportError("Please install apex from https://www.github.com/nvidia/apex if you want to use fp16.")
def train_net(args, config):
# setup logger
logger, final_output_path = create_logger(config.OUTPUT_PATH,
args.cfg,
config.DATASET[0].TRAIN_IMAGE_SET if isinstance(config.DATASET, list)
else config.DATASET.TRAIN_IMAGE_SET,
split='train')
model_prefix = os.path.join(final_output_path, config.MODEL_PREFIX)
if args.log_dir is None:
args.log_dir = os.path.join(final_output_path, 'tensorboard_logs')
pprint.pprint(args)
logger.info('training args:{}\n'.format(args))
pprint.pprint(config)
logger.info('training config:{}\n'.format(pprint.pformat(config)))
# manually set random seed
if config.RNG_SEED > -1:
random.seed(config.RNG_SEED)
np.random.seed(config.RNG_SEED)
torch.random.manual_seed(config.RNG_SEED)
torch.cuda.manual_seed_all(config.RNG_SEED)
# cudnn
torch.backends.cudnn.benchmark = False
if args.cudnn_off:
torch.backends.cudnn.enabled = False
if args.dist:
model = eval(config.MODULE)(config)
local_rank = int(os.environ.get('LOCAL_RANK') or 0)
config.GPUS = str(local_rank)
torch.cuda.set_device(local_rank)
master_address = os.environ['MASTER_ADDR']
master_port = int(os.environ['MASTER_PORT'] or 23456)
master_port = int(9994)
master_port = int(10008)
world_size = int(os.environ['WORLD_SIZE'] or 1)
rank = int(os.environ['RANK'] or 0)
if args.slurm:
distributed.init_process_group(backend='nccl')
else:
distributed.init_process_group(
backend='nccl',
init_method='tcp://{}:{}'.format(master_address, master_port),
world_size=world_size,
rank=rank,
group_name='mtorch')
print(
f'native distributed, size: {world_size}, rank: {rank}, local rank: {local_rank}')
torch.cuda.set_device(local_rank)
config.GPUS = str(local_rank)
model = model.cuda()
if not config.TRAIN.FP16:
model = DDP(model, find_unused_parameters=True, device_ids=[
local_rank], output_device=local_rank)
if rank == 0:
summary_parameters(model.module if isinstance(model, torch.nn.parallel.DistributedDataParallel) else model,
logger)
shutil.copy(args.cfg, final_output_path)
shutil.copy(inspect.getfile(
eval(config.MODULE)), final_output_path)
writer = None
if args.log_dir is not None:
tb_log_dir = os.path.join(args.log_dir, 'rank{}'.format(rank))
if not os.path.exists(tb_log_dir):
os.makedirs(tb_log_dir)
writer = SummaryWriter(log_dir=tb_log_dir)
if isinstance(config.DATASET, list):
train_loaders_and_samplers = make_dataloaders(config,
mode='train',
distributed=True,
num_replicas=world_size,
rank=rank,
expose_sampler=True)
val_loaders = make_dataloaders(config,
mode='val',
distributed=True,
num_replicas=world_size,
rank=rank)
train_loader = MultiTaskDataLoader(
[loader for loader, _ in train_loaders_and_samplers])
val_loader = MultiTaskDataLoader(val_loaders)
train_sampler = train_loaders_and_samplers[0][1]
else:
train_loader, train_sampler = make_dataloader(config,
mode='train',
distributed=True,
num_replicas=world_size,
rank=rank,
expose_sampler=True)
val_loader = make_dataloader(config,
mode='val',
distributed=True,
num_replicas=world_size,
rank=rank)
batch_size = world_size * (sum(config.TRAIN.BATCH_IMAGES)
if isinstance(config.TRAIN.BATCH_IMAGES, list)
else config.TRAIN.BATCH_IMAGES)
if config.TRAIN.GRAD_ACCUMULATE_STEPS > 1:
batch_size = batch_size * config.TRAIN.GRAD_ACCUMULATE_STEPS
base_lr = config.TRAIN.LR * batch_size
optimizer_grouped_parameters = [{'params': [p for n, p in model.named_parameters() if _k in n],
'lr': base_lr * _lr_mult}
for _k, _lr_mult in config.TRAIN.LR_MULT]
optimizer_grouped_parameters.append({'params': [p for n, p in model.named_parameters()
if all([_k not in n for _k, _ in config.TRAIN.LR_MULT])]})
if config.TRAIN.OPTIMIZER == 'SGD':
optimizer = optim.SGD(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
momentum=config.TRAIN.MOMENTUM,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'Adam':
optimizer = optim.Adam(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'AdamW':
optimizer = AdamW(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
betas=(0.9, 0.999),
eps=1e-6,
weight_decay=config.TRAIN.WD,
correct_bias=True)
else:
raise ValueError('Not support optimizer {}!'.format(
config.TRAIN.OPTIMIZER))
total_gpus = world_size
else:
#os.environ['CUDA_VISIBLE_DEVICES'] = config.GPUS
model = eval(config.MODULE)(config)
summary_parameters(model, logger)
shutil.copy(args.cfg, final_output_path)
shutil.copy(inspect.getfile(eval(config.MODULE)), final_output_path)
num_gpus = len(config.GPUS.split(','))
assert num_gpus <= 1 or (not config.TRAIN.FP16), "Not support fp16 with torch.nn.DataParallel. " \
"Please use amp.parallel.DistributedDataParallel instead."
total_gpus = num_gpus
rank = None
writer = SummaryWriter(
log_dir=args.log_dir) if args.log_dir is not None else None
# model
if num_gpus > 1:
model = torch.nn.DataParallel(
model, device_ids=[int(d) for d in config.GPUS.split(',')]).cuda()
else:
torch.cuda.set_device(int(config.GPUS))
model.cuda()
# loader
if isinstance(config.DATASET, list):
train_loaders = make_dataloaders(
config, mode='train', distributed=False)
val_loaders = make_dataloaders(
config, mode='val', distributed=False)
train_loader = MultiTaskDataLoader(train_loaders)
val_loader = MultiTaskDataLoader(val_loaders)
else:
train_loader = make_dataloader(
config, mode='train', distributed=False)
val_loader = make_dataloader(config, mode='val', distributed=False)
train_sampler = None
batch_size = num_gpus * (sum(config.TRAIN.BATCH_IMAGES) if isinstance(config.TRAIN.BATCH_IMAGES, list)
else config.TRAIN.BATCH_IMAGES)
if config.TRAIN.GRAD_ACCUMULATE_STEPS > 1:
batch_size = batch_size * config.TRAIN.GRAD_ACCUMULATE_STEPS
base_lr = config.TRAIN.LR * batch_size
optimizer_grouped_parameters = [{'params': [p for n, p in model.named_parameters() if _k in n],
'lr': base_lr * _lr_mult}
for _k, _lr_mult in config.TRAIN.LR_MULT]
optimizer_grouped_parameters.append({'params': [p for n, p in model.named_parameters()
if all([_k not in n for _k, _ in config.TRAIN.LR_MULT])]})
if config.TRAIN.OPTIMIZER == 'SGD':
optimizer = optim.SGD(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
momentum=config.TRAIN.MOMENTUM,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'Adam':
optimizer = optim.Adam(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'AdamW':
optimizer = AdamW(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
betas=(0.9, 0.999),
eps=1e-6,
weight_decay=config.TRAIN.WD,
correct_bias=True)
else:
raise ValueError('Not support optimizer {}!'.format(
config.TRAIN.OPTIMIZER))
# partial load pretrain state dict
if config.NETWORK.PARTIAL_PRETRAIN != "":
pretrain_state_dict = torch.load(
config.NETWORK.PARTIAL_PRETRAIN, map_location=lambda storage, loc: storage)['state_dict']
prefix_change = [prefix_change.split(
'->') for prefix_change in config.NETWORK.PARTIAL_PRETRAIN_PREFIX_CHANGES]
if len(prefix_change) > 0:
pretrain_state_dict_parsed = {}
for k, v in pretrain_state_dict.items():
no_match = True
for pretrain_prefix, new_prefix in prefix_change:
if k.startswith(pretrain_prefix):
k = new_prefix + k[len(pretrain_prefix):]
pretrain_state_dict_parsed[k] = v
no_match = False
break
if no_match:
pretrain_state_dict_parsed[k] = v
pretrain_state_dict = pretrain_state_dict_parsed
# FM edit: introduce alternative initialisations
if config.NETWORK.INITIALISATION == 'hybrid':
smart_hybrid_partial_load_model_state_dict(
model, pretrain_state_dict)
elif config.NETWORK.INITIALISATION == 'skip':
smart_skip_partial_load_model_state_dict(
model, pretrain_state_dict)
else:
smart_partial_load_model_state_dict(model, pretrain_state_dict)
# metrics
metric_kwargs = {'allreduce': args.dist,
'num_replicas': world_size if args.dist else 1}
train_metrics_list = []
val_metrics_list = []
if config.NETWORK.WITH_REL_LOSS:
train_metrics_list.append(
pretrain_metrics.RelationshipAccuracy(**metric_kwargs))
val_metrics_list.append(
pretrain_metrics.RelationshipAccuracy(**metric_kwargs))
if config.NETWORK.WITH_MLM_LOSS:
if config.MODULE == 'ResNetVLBERTForPretrainingMultitask':
train_metrics_list.append(
pretrain_metrics.MLMAccuracyDataset1(**metric_kwargs))
train_metrics_list.append(
pretrain_metrics.MLMAccuracyDataset2(**metric_kwargs))
train_metrics_list.append(
pretrain_metrics.MLMAccuracyDataset3(**metric_kwargs))
val_metrics_list.append(
pretrain_metrics.MLMAccuracyDataset1(**metric_kwargs))
val_metrics_list.append(
pretrain_metrics.MLMAccuracyDataset2(**metric_kwargs))
val_metrics_list.append(
pretrain_metrics.MLMAccuracyDataset3(**metric_kwargs))
elif config.MODULE == 'BERTGENMultitaskTraining':
num_metric = 1
try:
num_metric = len(config.TRAIN.BATCH_IMAGES)
except:
num_metric = 1
for i in range(num_metric):
train_metrics_list.append(pretrain_metrics.MLMAccuracyGlobal(
**metric_kwargs, eval_name=str(i)))
val_metrics_list.append(pretrain_metrics.MLMAccuracyGlobal(
**metric_kwargs, eval_name=str(i)))
else:
train_metrics_list.append(
pretrain_metrics.MLMAccuracy(**metric_kwargs))
val_metrics_list.append(
pretrain_metrics.MLMAccuracy(**metric_kwargs))
if config.NETWORK.WITH_MVRC_LOSS:
train_metrics_list.append(
pretrain_metrics.MVRCAccuracy(**metric_kwargs))
val_metrics_list.append(pretrain_metrics.MVRCAccuracy(**metric_kwargs))
for output_name, display_name in config.TRAIN.LOSS_LOGGERS:
train_metrics_list.append(pretrain_metrics.LossLogger(
output_name, display_name=display_name, **metric_kwargs))
val_metrics_list.append(pretrain_metrics.LossLogger(
output_name, display_name=display_name, **metric_kwargs))
train_metrics = CompositeEvalMetric()
val_metrics = CompositeEvalMetric()
for child_metric in train_metrics_list:
train_metrics.add(child_metric)
for child_metric in val_metrics_list:
val_metrics.add(child_metric)
# epoch end callbacks
epoch_end_callbacks = []
if (rank is None) or (rank == 0):
epoch_end_callbacks = [Checkpoint(
model_prefix, config.CHECKPOINT_FREQUENT)]
host_metric_name = 'MLMAcc' if not config.MODULE == 'ResNetVLBERTForPretrainingMultitask' else 'MLMAccWVC'
validation_monitor = ValidationMonitor(do_validation, val_loader, val_metrics,
host_metric_name=host_metric_name)
# optimizer initial lr before
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
# resume/auto-resume
if rank is None or rank == 0:
smart_resume(model, optimizer, validation_monitor,
config, model_prefix, logger)
if args.dist:
begin_epoch = torch.tensor(config.TRAIN.BEGIN_EPOCH).cuda()
distributed.broadcast(begin_epoch, src=0)
config.TRAIN.BEGIN_EPOCH = begin_epoch.item()
# batch end callbacks
batch_size = len(config.GPUS.split(',')) * (sum(config.TRAIN.BATCH_IMAGES)
if isinstance(config.TRAIN.BATCH_IMAGES, list)
else config.TRAIN.BATCH_IMAGES)
batch_end_callbacks = [Speedometer(batch_size, config.LOG_FREQUENT,
batches_per_epoch=len(train_loader),
epochs=config.TRAIN.END_EPOCH - config.TRAIN.BEGIN_EPOCH)]
# setup lr step and lr scheduler
if config.TRAIN.LR_SCHEDULE == 'plateau':
print("Warning: not support resuming on plateau lr schedule!")
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode='max',
factor=config.TRAIN.LR_FACTOR,
patience=1,
verbose=True,
threshold=1e-4,
threshold_mode='rel',
cooldown=2,
min_lr=0,
eps=1e-8)
elif config.TRAIN.LR_SCHEDULE == 'triangle':
# FM edit: i am using here the new optimizer so that i can tweek the LR when i resume a model (not the best solution)
# but this fix was so that i can extend the number of epochs.
lr_scheduler = WarmupLinearSchedule(optimizer,
config.TRAIN.WARMUP_STEPS if config.TRAIN.WARMUP else 0,
t_total=int(
config.TRAIN.END_EPOCH * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS),
last_epoch=int(config.TRAIN.BEGIN_EPOCH * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS) - 1)
elif config.TRAIN.LR_SCHEDULE == 'step':
lr_iters = [int(epoch * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS)
for epoch in config.TRAIN.LR_STEP]
lr_scheduler = WarmupMultiStepLR(optimizer, milestones=lr_iters, gamma=config.TRAIN.LR_FACTOR,
warmup_factor=config.TRAIN.WARMUP_FACTOR,
warmup_iters=config.TRAIN.WARMUP_STEPS if config.TRAIN.WARMUP else 0,
warmup_method=config.TRAIN.WARMUP_METHOD,
last_epoch=int(config.TRAIN.BEGIN_EPOCH * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS) - 1)
else:
raise ValueError("Not support lr schedule: {}.".format(
config.TRAIN.LR_SCHEDULE))
# broadcast parameter and optimizer state from rank 0 before training start
if args.dist:
for v in model.state_dict().values():
distributed.broadcast(v, src=0)
# for v in optimizer.state_dict().values():
# distributed.broadcast(v, src=0)
best_epoch = torch.tensor(validation_monitor.best_epoch).cuda()
best_val = torch.tensor(validation_monitor.best_val).cuda()
distributed.broadcast(best_epoch, src=0)
distributed.broadcast(best_val, src=0)
validation_monitor.best_epoch = best_epoch.item()
validation_monitor.best_val = best_val.item()
# apex: amp fp16 mixed-precision training
if config.TRAIN.FP16:
# model.apply(bn_fp16_half_eval)
model, optimizer = amp.initialize(model, optimizer,
opt_level='O2',
keep_batchnorm_fp32=False,
loss_scale=config.TRAIN.FP16_LOSS_SCALE,
max_loss_scale=128.0,
min_loss_scale=128.0)
if args.dist:
model = Apex_DDP(model, delay_allreduce=True)
train(model, optimizer, lr_scheduler, train_loader, train_sampler, train_metrics,
config.TRAIN.BEGIN_EPOCH, config.TRAIN.END_EPOCH, logger,
rank=rank, batch_end_callbacks=batch_end_callbacks, epoch_end_callbacks=epoch_end_callbacks,
writer=writer, validation_monitor=validation_monitor, fp16=config.TRAIN.FP16,
clip_grad_norm=config.TRAIN.CLIP_GRAD_NORM,
gradient_accumulate_steps=config.TRAIN.GRAD_ACCUMULATE_STEPS)
return rank, model
| 21,500 | 50.192857 | 165 | py |
BertGen | BertGen-master/LanguageGeneration/modules/bertgen_multitask_training.py | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from external.pytorch_pretrained_bert import BertTokenizer
from common.module import Module
from common.fast_rcnn import FastRCNN
from common.visual_linguistic_bert import VisualLinguisticBertForPretraining
from common.utils.misc import soft_cross_entropy
BERT_WEIGHTS_NAME = 'pytorch_model.bin'
class BERTGENMultitaskTraining(Module):
def __init__(self, config):
super(BERTGENMultitaskTraining, self).__init__(config)
# Constructs/initialises model elements
self.image_feature_extractor = FastRCNN(config,
average_pool=True,
final_dim=config.NETWORK.IMAGE_FINAL_DIM,
enable_cnn_reg_loss=False)
self.object_linguistic_embeddings = nn.Embedding(
1, config.NETWORK.VLBERT.hidden_size)
if config.NETWORK.IMAGE_FEAT_PRECOMPUTED or (not config.NETWORK.MASK_RAW_PIXELS):
self.object_mask_visual_embedding = nn.Embedding(1, 2048)
if config.NETWORK.WITH_MVRC_LOSS:
self.object_mask_word_embedding = nn.Embedding(
1, config.NETWORK.VLBERT.hidden_size)
self.aux_text_visual_embedding = nn.Embedding(
1, config.NETWORK.VLBERT.hidden_size)
self.image_feature_bn_eval = config.NETWORK.IMAGE_FROZEN_BN
self.tokenizer = BertTokenizer.from_pretrained(
config.NETWORK.BERT_MODEL_NAME)
try:
self.num_datasets = len(config.TRAIN.BATCH_IMAGES)
except:
self.num_datasets = 1
# Can specify pre-trained model or use the downloaded pretrained model specific in .yaml file
language_pretrained_model_path = None
if config.NETWORK.BERT_PRETRAINED != '':
# FM edit: just use path of pretrained model
language_pretrained_model_path = config.NETWORK.BERT_PRETRAINED
elif os.path.isdir(config.NETWORK.BERT_MODEL_NAME):
weight_path = os.path.join(
config.NETWORK.BERT_MODEL_NAME, BERT_WEIGHTS_NAME)
if os.path.isfile(weight_path):
language_pretrained_model_path = weight_path
if language_pretrained_model_path is None:
print("Warning: no pretrained language model found, training from scratch!!!")
self.vlbert = VisualLinguisticBertForPretraining(
config.NETWORK.VLBERT,
language_pretrained_model_path=None if config.NETWORK.VLBERT.from_scratch else language_pretrained_model_path,
with_rel_head=config.NETWORK.WITH_REL_LOSS,
with_mlm_head=config.NETWORK.WITH_MLM_LOSS,
with_mvrc_head=config.NETWORK.WITH_MVRC_LOSS,
)
# init weights
self.init_weight()
self.fix_params()
def init_weight(self):
if self.config.NETWORK.IMAGE_FEAT_PRECOMPUTED or (not self.config.NETWORK.MASK_RAW_PIXELS):
self.object_mask_visual_embedding.weight.data.fill_(0.0)
if self.config.NETWORK.WITH_MVRC_LOSS:
self.object_mask_word_embedding.weight.data.normal_(mean=0.0,
std=self.config.NETWORK.VLBERT.initializer_range)
self.aux_text_visual_embedding.weight.data.normal_(
mean=0.0, std=self.config.NETWORK.VLBERT.initializer_range)
self.image_feature_extractor.init_weight()
if self.object_linguistic_embeddings is not None:
self.object_linguistic_embeddings.weight.data.normal_(mean=0.0,
std=self.config.NETWORK.VLBERT.initializer_range)
def train(self, mode=True):
super(BERTGENMultitaskTraining, self).train(mode)
# turn some frozen layers to eval mode
if self.image_feature_bn_eval:
self.image_feature_extractor.bn_eval()
def fix_params(self):
pass
def _collect_obj_reps(self, span_tags, object_reps):
"""
Collect span-level object representations
:param span_tags: [batch_size, ..leading_dims.., L]
:param object_reps: [batch_size, max_num_objs_per_batch, obj_dim]
:return:
"""
# In case there were masked values here
span_tags_fixed = torch.clamp(span_tags, min=0)
row_id = span_tags_fixed.new_zeros(span_tags_fixed.shape)
row_id_broadcaster = torch.arange(
0, row_id.shape[0], step=1, device=row_id.device)[:, None]
# Add extra diminsions to the row broadcaster so it matches row_id
leading_dims = len(span_tags.shape) - 2
for i in range(leading_dims):
row_id_broadcaster = row_id_broadcaster[..., None]
row_id += row_id_broadcaster
return object_reps[row_id.view(-1), span_tags_fixed.view(-1)].view(*span_tags_fixed.shape, -1)
def forward(self,
*args):
num_datasets = self.num_datasets
image_list = []
boxes_list = []
boxes_mask_list = []
im_info_list = []
text_list = []
relationship_label_list = []
mlm_labels_list = []
mvrc_ops_list = []
mvrc_labels_list = []
has_visual = []
max_global_len = 0
max_global_text_len = 0
total_examples = 0
###########################################
# Step 1 - Loop through all to get sizes
ref = 0
vis_i = 0
for i in range(num_datasets):
if args[ref] is None:
has_visual.append(True)
else:
has_visual.append(False)
if has_visual[i]:
image_list.append(args[ref])
boxes_list.append(args[ref+1])
boxes_mask_list.append((args[ref+1])[:, :, 0] > -1.5)
im_info_list.append(args[ref+2])
text_list.append(args[ref+3])
relationship_label_list.append(args[ref+4])
mlm_labels_list.append(args[ref+5])
mvrc_ops_list.append(args[ref+6])
mvrc_labels_list.append(args[ref+7])
vis_len = int(boxes_mask_list[vis_i].sum(1).max().item())
if vis_len > max_global_len:
max_global_len = vis_len
text_len = text_list[i].shape[1]
if text_len > max_global_text_len:
max_global_text_len = text_len
ref += 8
vis_i += 1
else:
text_list.append(args[ref])
relationship_label_list.append(args[ref+1])
mlm_labels_list.append(args[ref+2])
text_len = text_list[i].shape[1]
if text_len > max_global_text_len:
max_global_text_len = text_len
ref += 3
total_examples += text_list[i].shape[0]
################################################
# Step 2 - Loop through datasets
cur_start = 0
cur_stop = 0
vis_i = 0
box_features_list = []
obj_reps_list = []
text_tags_list = []
text_visual_embeddings_list = []
object_linguistic_embeddings_list = []
object_vl_embeddings_list = []
for i in range(num_datasets):
if has_visual[i]:
boxes_mask_list[vis_i] = boxes_mask_list[vis_i][:,
:max_global_len]
boxes_list[vis_i] = boxes_list[vis_i][:, :max_global_len]
mvrc_ops_list[vis_i] = mvrc_ops_list[vis_i][:, :max_global_len]
mvrc_labels_list[vis_i] = mvrc_labels_list[vis_i][:,
:max_global_len]
if self.config.NETWORK.IMAGE_FEAT_PRECOMPUTED:
box_features_list.append(boxes_list[vis_i][:, :, 4:])
box_features_list[vis_i][mvrc_ops_list[vis_i] ==
1] = self.object_mask_visual_embedding.weight[0]
boxes_list[vis_i][:, :, 4:] = box_features_list[vis_i]
obj_reps_list.append(self.image_feature_extractor(images=image_list[vis_i],
boxes=boxes_list[vis_i],
box_mask=boxes_mask_list[vis_i],
im_info=im_info_list[vis_i],
classes=None,
segms=None,
mvrc_ops=mvrc_ops_list[vis_i],
mask_visual_embed=self.object_mask_visual_embedding.weight[
0]
if (not self.config.NETWORK.IMAGE_FEAT_PRECOMPUTED)
and (not self.config.NETWORK.MASK_RAW_PIXELS)
else None))
############################################
# prepare text
# text_input_ids = text
# size of sub-batch
cur_stop += text_list[i].shape[0]
# creates a text_tags tensor of the same shape as text tensor
text_tags_list.append(text_list[i].new_zeros(text_list[i].shape))
if has_visual[i]:
text_visual_embeddings_list.append(self._collect_obj_reps(
text_tags_list[i], obj_reps_list[vis_i]['obj_reps']))
# linguistic embedding for visual uses [IMG] embedding for all (apart from masked visual)
object_linguistic_embeddings_list.append(self.object_linguistic_embeddings(
boxes_list[vis_i].new_zeros(
(boxes_list[vis_i].shape[0], boxes_list[vis_i].shape[1])).long()
))
object_vl_embeddings_list.append(torch.cat(
(obj_reps_list[vis_i]['obj_reps'], object_linguistic_embeddings_list[vis_i]), -1))
# Initiliase in first pass
if i == 0:
text_input_ids_multi = text_list[i].new_zeros(
(total_examples, max_global_text_len))
text_visual_embeddings_multi = text_visual_embeddings_list[vis_i].new_zeros((total_examples,
max_global_text_len,
text_visual_embeddings_list[vis_i].shape[-1]))
object_vl_embeddings_multi = object_vl_embeddings_list[vis_i].new_zeros((total_examples, max_global_len,
object_vl_embeddings_list[vis_i].shape[-1]))
box_mask_multi = boxes_mask_list[vis_i].new_zeros(
(total_examples, max_global_len))
# Concatenates the sub-batches from all dataloaders
text_input_ids_multi[cur_start:cur_stop,
:text_list[i].shape[1]] = text_list[i]
if has_visual[i]:
text_visual_embeddings_multi[cur_start:cur_stop, :text_visual_embeddings_list[vis_i].shape[1]] \
= text_visual_embeddings_list[vis_i]
object_vl_embeddings_multi[cur_start:cur_stop,
:object_vl_embeddings_list[vis_i].shape[1], :] = object_vl_embeddings_list[vis_i]
box_mask_multi[cur_start:cur_stop,
:boxes_mask_list[vis_i].shape[1]] = boxes_mask_list[vis_i]
cur_start = cur_stop
# TODO: fix to increment if non_visual
if has_visual[i]:
vis_i += 1
# add final
text_token_type_ids_multi = text_input_ids_multi.new_zeros(
text_input_ids_multi.shape)
text_mask_multi = (text_input_ids_multi > 0)
###########################################
relationship_logits_multi, mlm_logits_multi, mvrc_logits_multi = self.vlbert(text_input_ids_multi,
text_token_type_ids_multi,
text_visual_embeddings_multi,
text_mask_multi,
object_vl_embeddings_multi,
box_mask_multi)
###########################################
###########################################
outputs = {}
# loss
# relationship_loss = im_info_list.new_zeros(())
# mlm_loss = im_info_list.new_zeros(())
# mvrc_loss = im_info.new_zeros(())
mlm_logits_list = []
mlm_loss_list = []
outputs_dict = {}
mlm_labels_dataset_list = []
loss = im_info_list[-1].new_zeros(())
if self.config.NETWORK.WITH_REL_LOSS:
relationship_logits = relationship_logits_multi[:text_input_ids.shape[0]]
relationship_loss = F.cross_entropy(
relationship_logits, relationship_label)
if self.config.NETWORK.WITH_MLM_LOSS:
mlm_labels_multi = mlm_labels_list[0].new_zeros((total_examples, max_global_text_len)).fill_(
-1)
cur_start = 0
cur_stop = 0
for i in range(num_datasets):
cur_stop += mlm_labels_list[i].shape[0]
mlm_labels_multi[cur_start:cur_stop,
:mlm_labels_list[i].shape[1]] = mlm_labels_list[i]
# compute individual losses for reporting metrics
mlm_loss_list.append(F.cross_entropy(
mlm_logits_multi[cur_start:cur_stop].view(
(-1, mlm_logits_multi[cur_start:cur_stop].shape[-1])),
mlm_labels_multi[cur_start:cur_stop].view(-1),
ignore_index=-1
))
# collect data for metrics
outputs_dict['mlm_logits_' +
str(i)] = mlm_logits_multi[cur_start:cur_stop]
outputs_dict['mlm_label_' +
str(i)] = mlm_labels_multi[cur_start:cur_stop]
outputs_dict['mlm_loss_'+str(i)] = mlm_loss_list[i]
cur_start = cur_stop
# USE combined loss for backpropagation - only use per dataset for reporting metrics
mlm_loss = (F.cross_entropy(
mlm_logits_multi.view((-1, mlm_logits_multi.shape[-1])),
mlm_labels_multi.view(-1),
ignore_index=-1
))
# # calculate total loss
outputs.update(outputs_dict)
loss = mlm_loss.mean()
return outputs, loss
| 15,497 | 44.988131 | 139 | py |
BertGen | BertGen-master/LanguageGeneration/modules/bertgen_generate_mt.py | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from external.pytorch_pretrained_bert import BertTokenizer
from common.module import Module
from common.fast_rcnn import FastRCNN
from common.visual_linguistic_bert import VisualLinguisticBertForPretraining
from common.utils.misc import soft_cross_entropy
BERT_WEIGHTS_NAME = 'pytorch_model.bin'
class BERTGENGenerateMT(Module):
def __init__(self, config):
super(BERTGENGenerateMT,
self).__init__(config)
self.image_feature_extractor = FastRCNN(config,
average_pool=True,
final_dim=config.NETWORK.IMAGE_FINAL_DIM,
enable_cnn_reg_loss=False)
self.object_linguistic_embeddings = nn.Embedding(
1, config.NETWORK.VLBERT.hidden_size)
if config.NETWORK.IMAGE_FEAT_PRECOMPUTED:
self.object_mask_visual_embedding = nn.Embedding(1, 2048)
if config.NETWORK.WITH_MVRC_LOSS:
self.object_mask_word_embedding = nn.Embedding(
1, config.NETWORK.VLBERT.hidden_size)
self.aux_text_visual_embedding = nn.Embedding(
1, config.NETWORK.VLBERT.hidden_size)
self.image_feature_bn_eval = config.NETWORK.IMAGE_FROZEN_BN
self.tokenizer = BertTokenizer.from_pretrained(
config.NETWORK.BERT_MODEL_NAME)
language_pretrained_model_path = None
if config.NETWORK.BERT_PRETRAINED != '':
language_pretrained_model_path = '{}-{:04d}.model'.format(config.NETWORK.BERT_PRETRAINED,
config.NETWORK.BERT_PRETRAINED_EPOCH)
elif os.path.isdir(config.NETWORK.BERT_MODEL_NAME):
weight_path = os.path.join(
config.NETWORK.BERT_MODEL_NAME, BERT_WEIGHTS_NAME)
if os.path.isfile(weight_path):
language_pretrained_model_path = weight_path
if language_pretrained_model_path is None:
print("Warning: no pretrained language model found, training from scratch!!!")
self.vlbert = VisualLinguisticBertForPretraining(
config.NETWORK.VLBERT,
language_pretrained_model_path=None if config.NETWORK.VLBERT.from_scratch else language_pretrained_model_path,
with_rel_head=config.NETWORK.WITH_REL_LOSS,
with_mlm_head=config.NETWORK.WITH_MLM_LOSS,
with_mvrc_head=config.NETWORK.WITH_MVRC_LOSS,
)
self.model_path = config.OUTPUT_PATH
# init weights
self.init_weight()
self.fix_params()
def init_weight(self):
if self.config.NETWORK.IMAGE_FEAT_PRECOMPUTED:
self.object_mask_visual_embedding.weight.data.fill_(0.0)
if self.config.NETWORK.WITH_MVRC_LOSS:
self.object_mask_word_embedding.weight.data.normal_(
mean=0.0, std=self.config.NETWORK.VLBERT.initializer_range)
self.image_feature_extractor.init_weight()
if self.object_linguistic_embeddings is not None:
self.object_linguistic_embeddings.weight.data.normal_(mean=0.0,
std=self.config.NETWORK.VLBERT.initializer_range)
def train(self, mode=True):
super(BERTGENGenerateMT, self).train(mode)
# turn some frozen layers to eval mode
if self.image_feature_bn_eval:
self.image_feature_extractor.bn_eval()
def fix_params(self):
pass
def _collect_obj_reps(self, span_tags, object_reps):
"""
Collect span-level object representations
:param span_tags: [batch_size, ..leading_dims.., L]
:param object_reps: [batch_size, max_num_objs_per_batch, obj_dim]
:return:
"""
# In case there were masked values here
span_tags_fixed = torch.clamp(span_tags, min=0)
row_id = span_tags_fixed.new_zeros(span_tags_fixed.shape)
row_id_broadcaster = torch.arange(
0, row_id.shape[0], step=1, device=row_id.device)[:, None]
# Add extra diminsions to the row broadcaster so it matches row_id
leading_dims = len(span_tags.shape) - 2
for i in range(leading_dims):
row_id_broadcaster = row_id_broadcaster[..., None]
row_id += row_id_broadcaster
return object_reps[row_id.view(-1), span_tags_fixed.view(-1)].view(*span_tags_fixed.shape, -1)
def forward(self,
text,
relationship_label,
mlm_labels):
###########################################
############################################
# prepare text
text_input_ids = text
text_tags = text.new_zeros(text.shape)
text_token_type_ids = text.new_zeros(text.shape)
# ***** FM edit: blank out visual embeddings for translation retrieval task
text_visual_embeddings = text_input_ids.new_zeros(
(text_input_ids.shape[0], text_input_ids.shape[1], 768), dtype=torch.float)
# text_visual_embeddings[:] = self.aux_text_visual_embedding.weight[0]
# ****** FM edit: blank visual embeddings (use known dimensions)
object_vl_embeddings = text_input_ids.new_zeros(
(text_input_ids.shape[0], 1, 1536), dtype=torch.float)
# FM edit: No auxiliary text is used for text only
# add auxiliary text - Concatenates the batches from the two dataloaders
# The visual features for the text only corpus is just the embedding of the aux_visual_embedding (only one embedding)
max_text_len = text_input_ids.shape[1]
text_token_type_ids = text_input_ids.new_zeros(text_input_ids.shape)
text_mask = (text_input_ids > 0)
# FM: Edit: set to zero to ignore vision
box_mask = text_input_ids.new_zeros(
(text_input_ids.shape[0], 1), dtype=torch.uint8)
###########################################
# Visual Linguistic BERT
# #loop here for test mode:
generated = []
stop = [False]*text.shape[0]
curr_len = 0
max_len = 300
while not all(stop) and curr_len <= max_len:
relationship_logits, mlm_logits, mvrc_logits = self.vlbert(text_input_ids,
text_token_type_ids,
text_visual_embeddings,
text_mask,
object_vl_embeddings,
box_mask)
# Ignore special tokens
# mlm_logits[:, :, 0] = -10000000
# mlm_logits[:, :, 2:100] = -10000000
# mlm_logits[:, :, 101:104] = -10000000
answers = torch.topk(mlm_logits[mlm_labels == 103], k=1, dim=1)
# Get size of each tensor
position_tensor = torch.arange(mlm_labels.shape[1])
position_tensor = position_tensor.repeat(
mlm_labels.shape[0]).view(mlm_labels.shape[0], -1)
indeces = position_tensor[mlm_labels == 103]
# 1. Update mlm_labels:
mlm_labels_new = mlm_labels.new_zeros(
mlm_labels.shape[0], mlm_labels.shape[1]+1)
mlm_labels_new = mlm_labels_new - 1
mlm_labels_new[torch.arange(mlm_labels.shape[0]), indeces+1] = 103
mlm_labels = mlm_labels_new
# 2. Update text_input_ids:
text_input_ids_new = text_input_ids.new_zeros(
text_input_ids.shape[0], text_input_ids.shape[1]+1)
text_input_ids_new[:, :-1] = text_input_ids
text_input_ids_new[torch.arange(
text_input_ids.shape[0]), indeces] = answers[1][:, 0]
text_input_ids_new[torch.arange(text_input_ids.shape[0]), indeces+1] = (
self.tokenizer.convert_tokens_to_ids(['[MASK]'])[0])
text_input_ids_new[torch.arange(text_input_ids.shape[0]), indeces+2] = (
self.tokenizer.convert_tokens_to_ids(['[PAD]'])[0])
text_input_ids_new[torch.arange(text_input_ids.shape[0]), indeces+3] = (
self.tokenizer.convert_tokens_to_ids(['[SEP]'])[0])
text_input_ids = text_input_ids_new
# 3. Update text_token_type_ids:
text_token_type_ids = text_token_type_ids.new_zeros(
text_token_type_ids.shape[0], text_token_type_ids.shape[1]+1)
# 4. Update text_input_ids:
text_visual_embeddings_new = text_visual_embeddings.new_zeros(
text_visual_embeddings.shape[0], text_visual_embeddings.shape[1]+1, text_visual_embeddings.shape[2])
text_visual_embeddings_new = text_visual_embeddings_new.transpose(
0, 1)
text_visual_embeddings_new[:] = text_visual_embeddings[:, 0, :]
text_visual_embeddings = text_visual_embeddings_new.transpose(0, 1)
# 5. Update text_mask:
text_mask = (text_input_ids > 0)
# 6. Add to generated
for nid, row in enumerate(answers[1]):
if curr_len == 0:
generated.append([])
for ele in row:
if not stop[nid]:
if self.tokenizer.ids_to_tokens[ele.item()] == '[STOP]':
stop[nid] = True
else:
generated[nid].append(
self.tokenizer.ids_to_tokens[ele.item()])
curr_len += 1
# Join in sentences
generated_sentences = []
for sentence in generated:
new_sentence = ' '.join(sentence)
generated_sentences.append(new_sentence.replace(' ##', ''))
###########################################
outputs = {}
if self.config.NETWORK.WITH_REL_LOSS:
relationship_loss = F.cross_entropy(
relationship_logits, relationship_label)
if self.config.NETWORK.WITH_MLM_LOSS:
mlm_logits_padded = mlm_logits.new_zeros(
(*mlm_labels.shape, mlm_logits.shape[-1])).fill_(-10000.0)
mlm_logits_padded[:, :mlm_logits.shape[1]] = mlm_logits
mlm_logits = mlm_logits_padded
if self.config.NETWORK.MLM_LOSS_NORM_IN_BATCH_FIRST:
mlm_loss = F.cross_entropy(mlm_logits.transpose(1, 2),
mlm_labels,
ignore_index=-1, reduction='none')
num_mlm = (mlm_labels != -1).sum(1,
keepdim=True).to(dtype=mlm_loss.dtype)
num_has_mlm = (num_mlm != 0).sum().to(dtype=mlm_loss.dtype)
mlm_loss = (mlm_loss / (num_mlm + 1e-4)).sum() / \
(num_has_mlm + 1e-4)
else:
mlm_loss = F.cross_entropy(mlm_logits.view((-1, mlm_logits.shape[-1])),
mlm_labels.view(-1),
ignore_index=-1)
outputs.update({
'relationship_logits': relationship_logits if self.config.NETWORK.WITH_REL_LOSS else None,
'relationship_label': relationship_label if self.config.NETWORK.WITH_REL_LOSS else None,
'mlm_logits': mlm_logits if self.config.NETWORK.WITH_MLM_LOSS else None,
'mlm_label': mlm_labels if self.config.NETWORK.WITH_MLM_LOSS else None,
'mvrc_logits': mvrc_logits if self.config.NETWORK.WITH_MVRC_LOSS else None,
'mvrc_label': mvrc_labels if self.config.NETWORK.WITH_MVRC_LOSS else None,
'mlm_loss': mlm_loss,
'generated_sentences': generated_sentences
})
loss = mlm_loss.mean()
return outputs, loss
| 12,103 | 45.733591 | 125 | py |
BertGen | BertGen-master/LanguageGeneration/modules/bertgen_generate_image_only.py | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from external.pytorch_pretrained_bert import BertTokenizer
from common.module import Module
from common.fast_rcnn import FastRCNN
from common.visual_linguistic_bert import VisualLinguisticBertForPretraining
from common.utils.misc import soft_cross_entropy
BERT_WEIGHTS_NAME = 'pytorch_model.bin'
class BERTGENGenerateImageOnly(Module):
def __init__(self, config):
super(BERTGENGenerateImageOnly,
self).__init__(config)
self.image_feature_extractor = FastRCNN(config,
average_pool=True,
final_dim=config.NETWORK.IMAGE_FINAL_DIM,
enable_cnn_reg_loss=False)
self.object_linguistic_embeddings = nn.Embedding(
1, config.NETWORK.VLBERT.hidden_size)
if config.NETWORK.IMAGE_FEAT_PRECOMPUTED:
self.object_mask_visual_embedding = nn.Embedding(1, 2048)
if config.NETWORK.WITH_MVRC_LOSS:
self.object_mask_word_embedding = nn.Embedding(
1, config.NETWORK.VLBERT.hidden_size)
self.aux_text_visual_embedding = nn.Embedding(
1, config.NETWORK.VLBERT.hidden_size)
self.image_feature_bn_eval = config.NETWORK.IMAGE_FROZEN_BN
self.tokenizer = BertTokenizer.from_pretrained(
config.NETWORK.BERT_MODEL_NAME)
language_pretrained_model_path = None
if config.NETWORK.BERT_PRETRAINED != '':
language_pretrained_model_path = '{}-{:04d}.model'.format(config.NETWORK.BERT_PRETRAINED,
config.NETWORK.BERT_PRETRAINED_EPOCH)
elif os.path.isdir(config.NETWORK.BERT_MODEL_NAME):
weight_path = os.path.join(
config.NETWORK.BERT_MODEL_NAME, BERT_WEIGHTS_NAME)
if os.path.isfile(weight_path):
language_pretrained_model_path = weight_path
if language_pretrained_model_path is None:
print("Warning: no pretrained language model found, training from scratch!!!")
self.vlbert = VisualLinguisticBertForPretraining(
config.NETWORK.VLBERT,
language_pretrained_model_path=None if config.NETWORK.VLBERT.from_scratch else language_pretrained_model_path,
with_rel_head=config.NETWORK.WITH_REL_LOSS,
with_mlm_head=config.NETWORK.WITH_MLM_LOSS,
with_mvrc_head=config.NETWORK.WITH_MVRC_LOSS,
)
# init weights
self.init_weight()
self.fix_params()
def init_weight(self):
if self.config.NETWORK.IMAGE_FEAT_PRECOMPUTED:
self.object_mask_visual_embedding.weight.data.fill_(0.0)
if self.config.NETWORK.WITH_MVRC_LOSS:
self.object_mask_word_embedding.weight.data.normal_(
mean=0.0, std=self.config.NETWORK.VLBERT.initializer_range)
self.image_feature_extractor.init_weight()
if self.object_linguistic_embeddings is not None:
self.object_linguistic_embeddings.weight.data.normal_(mean=0.0,
std=self.config.NETWORK.VLBERT.initializer_range)
def train(self, mode=True):
super(BERTGENGenerateImageOnly, self).train(mode)
# turn some frozen layers to eval mode
if self.image_feature_bn_eval:
self.image_feature_extractor.bn_eval()
def fix_params(self):
pass
def _collect_obj_reps(self, span_tags, object_reps):
"""
Collect span-level object representations
:param span_tags: [batch_size, ..leading_dims.., L]
:param object_reps: [batch_size, max_num_objs_per_batch, obj_dim]
:return:
"""
# In case there were masked values here
span_tags_fixed = torch.clamp(span_tags, min=0)
row_id = span_tags_fixed.new_zeros(span_tags_fixed.shape)
row_id_broadcaster = torch.arange(
0, row_id.shape[0], step=1, device=row_id.device)[:, None]
# Add extra diminsions to the row broadcaster so it matches row_id
leading_dims = len(span_tags.shape) - 2
for i in range(leading_dims):
row_id_broadcaster = row_id_broadcaster[..., None]
row_id += row_id_broadcaster
return object_reps[row_id.view(-1), span_tags_fixed.view(-1)].view(*span_tags_fixed.shape, -1)
def forward(self,
image,
boxes,
im_info,
text,
relationship_label,
mlm_labels,
mvrc_ops,
mvrc_labels):
###########################################
# visual feature extraction
images = image
box_mask = (boxes[:, :, 0] > -1.5)
origin_len = boxes.shape[1]
max_len = int(box_mask.sum(1).max().item())
box_mask = box_mask[:, :max_len]
boxes = boxes[:, :max_len]
mvrc_ops = mvrc_ops[:, :max_len]
mvrc_labels = mvrc_labels[:, :max_len]
if self.config.NETWORK.IMAGE_FEAT_PRECOMPUTED:
box_features = boxes[:, :, 4:]
box_features[mvrc_ops ==
1] = self.object_mask_visual_embedding.weight[0]
boxes[:, :, 4:] = box_features
obj_reps = self.image_feature_extractor(images=images,
boxes=boxes,
box_mask=box_mask,
im_info=im_info,
classes=None,
segms=None,
mvrc_ops=mvrc_ops,
mask_visual_embed=None)
############################################
# prepare text
text_input_ids = text
text_tags = text.new_zeros(text.shape)
text_token_type_ids = text.new_zeros(text.shape)
text_mask = (text_input_ids > 0)
text_visual_embeddings = self._collect_obj_reps(
text_tags, obj_reps['obj_reps'])
object_linguistic_embeddings = self.object_linguistic_embeddings(
boxes.new_zeros((boxes.shape[0], boxes.shape[1])).long()
)
if self.config.NETWORK.WITH_MVRC_LOSS:
object_linguistic_embeddings[mvrc_ops ==
1] = self.object_mask_word_embedding.weight[0]
object_vl_embeddings = torch.cat(
(obj_reps['obj_reps'], object_linguistic_embeddings), -1)
###########################################
# Visual Linguistic BERT
# #loop here for test mode:
generated = []
stop = [False]*text.shape[0]
curr_len = 0
max_len = 150
while not all(stop) and curr_len <= max_len:
relationship_logits, mlm_logits, mvrc_logits = self.vlbert(text_input_ids,
text_token_type_ids,
text_visual_embeddings,
text_mask,
object_vl_embeddings,
box_mask)
# Ignore special tokens
answers = torch.topk(mlm_logits[mlm_labels == 103], k=1, dim=1)
# Get size of each tensor
position_tensor = torch.arange(mlm_labels.shape[1])
position_tensor = position_tensor.repeat(
mlm_labels.shape[0]).view(mlm_labels.shape[0], -1)
indeces = position_tensor[mlm_labels == 103]
# 1. Update mlm_labels:
mlm_labels_new = mlm_labels.new_zeros(
mlm_labels.shape[0], mlm_labels.shape[1]+1)
mlm_labels_new = mlm_labels_new - 1
mlm_labels_new[torch.arange(mlm_labels.shape[0]), indeces+1] = 103
mlm_labels = mlm_labels_new
# 2. Update text_input_ids:
text_input_ids_new = text_input_ids.new_zeros(
text_input_ids.shape[0], text_input_ids.shape[1]+1)
text_input_ids_new[:, :-1] = text_input_ids
text_input_ids_new[torch.arange(
text_input_ids.shape[0]), indeces] = answers[1][:, 0]
text_input_ids_new[torch.arange(text_input_ids.shape[0]), indeces+1] = (
self.tokenizer.convert_tokens_to_ids(['[MASK]'])[0])
text_input_ids_new[torch.arange(text_input_ids.shape[0]), indeces+2] = (
self.tokenizer.convert_tokens_to_ids(['[PAD]'])[0])
text_input_ids_new[torch.arange(text_input_ids.shape[0]), indeces+3] = (
self.tokenizer.convert_tokens_to_ids(['[SEP]'])[0])
text_input_ids = text_input_ids_new
# 3. Update text_token_type_ids:
text_token_type_ids = text_token_type_ids.new_zeros(
text_token_type_ids.shape[0], text_token_type_ids.shape[1]+1)
# 4. Update text_input_ids:
text_visual_embeddings_new = text_visual_embeddings.new_zeros(
text_visual_embeddings.shape[0], text_visual_embeddings.shape[1]+1, text_visual_embeddings.shape[2])
text_visual_embeddings_new = text_visual_embeddings_new.transpose(
0, 1)
text_visual_embeddings_new[:] = text_visual_embeddings[:, 0, :]
text_visual_embeddings = text_visual_embeddings_new.transpose(0, 1)
# 5. Update text_mask:
text_mask = (text_input_ids > 0)
# 6. Append generated words from each sentence in the batch to list - terminate if all [STOP]
for nid, row in enumerate(answers[1]):
if curr_len == 0:
generated.append([])
for ele in row:
# try:
if not stop[nid]:
if self.tokenizer.ids_to_tokens[ele.item()] == '[STOP]':
stop[nid] = True
else:
# print('generated: ', ele.item())
generated[nid].append(
self.tokenizer.ids_to_tokens[ele.item()])
# except:
# generated[nid].append(self.tokenizer.ids_to_tokens[100])
curr_len += 1
# Join in sentences
generated_sentences = []
for sentence in generated:
new_sentence = ' '.join(sentence)
generated_sentences.append(new_sentence.replace(' ##', ''))
###########################################
outputs = {}
# loss
relationship_loss = im_info.new_zeros(())
mlm_loss = im_info.new_zeros(())
mvrc_loss = im_info.new_zeros(())
if self.config.NETWORK.WITH_REL_LOSS:
relationship_loss = F.cross_entropy(
relationship_logits, relationship_label)
if self.config.NETWORK.WITH_MLM_LOSS:
mlm_logits_padded = mlm_logits.new_zeros(
(*mlm_labels.shape, mlm_logits.shape[-1])).fill_(-10000.0)
mlm_logits_padded[:, :mlm_logits.shape[1]] = mlm_logits
mlm_logits = mlm_logits_padded
if self.config.NETWORK.MLM_LOSS_NORM_IN_BATCH_FIRST:
mlm_loss = F.cross_entropy(mlm_logits.transpose(1, 2),
mlm_labels,
ignore_index=-1, reduction='none')
num_mlm = (mlm_labels != -1).sum(1,
keepdim=True).to(dtype=mlm_loss.dtype)
num_has_mlm = (num_mlm != 0).sum().to(dtype=mlm_loss.dtype)
mlm_loss = (mlm_loss / (num_mlm + 1e-4)).sum() / \
(num_has_mlm + 1e-4)
else:
mlm_loss = F.cross_entropy(mlm_logits.view((-1, mlm_logits.shape[-1])),
mlm_labels.view(-1),
ignore_index=-1)
outputs.update({
'relationship_logits': relationship_logits if self.config.NETWORK.WITH_REL_LOSS else None,
'relationship_label': relationship_label if self.config.NETWORK.WITH_REL_LOSS else None,
'mlm_logits': mlm_logits if self.config.NETWORK.WITH_MLM_LOSS else None,
'mlm_label': mlm_labels if self.config.NETWORK.WITH_MLM_LOSS else None,
'mvrc_logits': mvrc_logits if self.config.NETWORK.WITH_MVRC_LOSS else None,
'mvrc_label': mvrc_labels if self.config.NETWORK.WITH_MVRC_LOSS else None,
'relationship_loss': relationship_loss,
'mlm_loss': mlm_loss,
'mvrc_loss': mvrc_loss,
'generated_sentences': generated_sentences
})
loss = relationship_loss.mean() + mlm_loss.mean() + mvrc_loss.mean()
return outputs, loss
| 13,238 | 45.452632 | 122 | py |
BertGen | BertGen-master/LanguageGeneration/modules/bertgen_global_generate_mmt.py | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from external.pytorch_pretrained_bert import BertTokenizer
from common.module import Module
from common.fast_rcnn import FastRCNN
from common.visual_linguistic_bert import VisualLinguisticBertForPretraining
from common.utils.misc import soft_cross_entropy
BERT_WEIGHTS_NAME = 'pytorch_model.bin'
class BERTGENGenerateMMT(Module):
def __init__(self, config):
super(BERTGENGenerateMMT, self).__init__(config)
self.image_feature_extractor = FastRCNN(config,
average_pool=True,
final_dim=config.NETWORK.IMAGE_FINAL_DIM,
enable_cnn_reg_loss=False)
self.object_linguistic_embeddings = nn.Embedding(
1, config.NETWORK.VLBERT.hidden_size)
if config.NETWORK.IMAGE_FEAT_PRECOMPUTED:
self.object_mask_visual_embedding = nn.Embedding(1, 2048)
if config.NETWORK.WITH_MVRC_LOSS:
self.object_mask_word_embedding = nn.Embedding(
1, config.NETWORK.VLBERT.hidden_size)
self.aux_text_visual_embedding = nn.Embedding(
1, config.NETWORK.VLBERT.hidden_size)
self.image_feature_bn_eval = config.NETWORK.IMAGE_FROZEN_BN
self.tokenizer = BertTokenizer.from_pretrained(
config.NETWORK.BERT_MODEL_NAME)
language_pretrained_model_path = None
if config.NETWORK.BERT_PRETRAINED != '':
language_pretrained_model_path = '{}-{:04d}.model'.format(config.NETWORK.BERT_PRETRAINED,
config.NETWORK.BERT_PRETRAINED_EPOCH)
elif os.path.isdir(config.NETWORK.BERT_MODEL_NAME):
weight_path = os.path.join(
config.NETWORK.BERT_MODEL_NAME, BERT_WEIGHTS_NAME)
if os.path.isfile(weight_path):
language_pretrained_model_path = weight_path
if language_pretrained_model_path is None:
print("Warning: no pretrained language model found, training from scratch!!!")
self.vlbert = VisualLinguisticBertForPretraining(
config.NETWORK.VLBERT,
language_pretrained_model_path=None if config.NETWORK.VLBERT.from_scratch else language_pretrained_model_path,
with_rel_head=config.NETWORK.WITH_REL_LOSS,
with_mlm_head=config.NETWORK.WITH_MLM_LOSS,
with_mvrc_head=config.NETWORK.WITH_MVRC_LOSS,
)
# init weights
self.init_weight()
self.fix_params()
def init_weight(self):
if self.config.NETWORK.IMAGE_FEAT_PRECOMPUTED:
self.object_mask_visual_embedding.weight.data.fill_(0.0)
if self.config.NETWORK.WITH_MVRC_LOSS:
self.object_mask_word_embedding.weight.data.normal_(
mean=0.0, std=self.config.NETWORK.VLBERT.initializer_range)
self.image_feature_extractor.init_weight()
if self.object_linguistic_embeddings is not None:
self.object_linguistic_embeddings.weight.data.normal_(mean=0.0,
std=self.config.NETWORK.VLBERT.initializer_range)
def train(self, mode=True):
super(BERTGENGenerateMMT, self).train(mode)
# turn some frozen layers to eval mode
if self.image_feature_bn_eval:
self.image_feature_extractor.bn_eval()
def fix_params(self):
pass
def _collect_obj_reps(self, span_tags, object_reps):
"""
Collect span-level object representations
:param span_tags: [batch_size, ..leading_dims.., L]
:param object_reps: [batch_size, max_num_objs_per_batch, obj_dim]
:return:
"""
# In case there were masked values here
span_tags_fixed = torch.clamp(span_tags, min=0)
row_id = span_tags_fixed.new_zeros(span_tags_fixed.shape)
row_id_broadcaster = torch.arange(
0, row_id.shape[0], step=1, device=row_id.device)[:, None]
# Add extra diminsions to the row broadcaster so it matches row_id
leading_dims = len(span_tags.shape) - 2
for i in range(leading_dims):
row_id_broadcaster = row_id_broadcaster[..., None]
row_id += row_id_broadcaster
return object_reps[row_id.view(-1), span_tags_fixed.view(-1)].view(*span_tags_fixed.shape, -1)
def forward(self,
image,
boxes,
im_info,
text,
relationship_label,
mlm_labels,
mvrc_ops,
mvrc_labels):
###########################################
# visual feature extraction
images = image
box_mask = (boxes[:, :, 0] > -1.5)
origin_len = boxes.shape[1]
max_len = int(box_mask.sum(1).max().item())
box_mask = box_mask[:, :max_len]
boxes = boxes[:, :max_len]
mvrc_ops = mvrc_ops[:, :max_len]
mvrc_labels = mvrc_labels[:, :max_len]
if self.config.NETWORK.IMAGE_FEAT_PRECOMPUTED:
box_features = boxes[:, :, 4:]
box_features[mvrc_ops ==
1] = self.object_mask_visual_embedding.weight[0]
boxes[:, :, 4:] = box_features
obj_reps = self.image_feature_extractor(images=images,
boxes=boxes,
box_mask=box_mask,
im_info=im_info,
classes=None,
segms=None,
mvrc_ops=mvrc_ops,
mask_visual_embed=None)
############################################
# prepare text
text_input_ids = text
text_tags = text.new_zeros(text.shape)
text_token_type_ids = text.new_zeros(text.shape)
text_mask = (text_input_ids > 0)
text_visual_embeddings = self._collect_obj_reps(
text_tags, obj_reps['obj_reps'])
object_linguistic_embeddings = self.object_linguistic_embeddings(
boxes.new_zeros((boxes.shape[0], boxes.shape[1])).long()
)
if self.config.NETWORK.WITH_MVRC_LOSS:
object_linguistic_embeddings[mvrc_ops ==
1] = self.object_mask_word_embedding.weight[0]
object_vl_embeddings = torch.cat(
(obj_reps['obj_reps'], object_linguistic_embeddings), -1)
###########################################
# Visual Linguistic BERT
# #loop here for test mode:
generated = []
stop = [False]*text.shape[0]
curr_len = 0
max_len = 150
while not all(stop) and curr_len <= max_len:
relationship_logits, mlm_logits, mvrc_logits = self.vlbert(text_input_ids,
text_token_type_ids,
text_visual_embeddings,
text_mask,
object_vl_embeddings,
box_mask)
answers = torch.topk(mlm_logits[mlm_labels == 103], k=1, dim=1)
# Get size of each tensor
position_tensor = torch.arange(mlm_labels.shape[1])
position_tensor = position_tensor.repeat(
mlm_labels.shape[0]).view(mlm_labels.shape[0], -1)
indeces = position_tensor[mlm_labels == 103]
# 1. Update mlm_labels:
mlm_labels_new = mlm_labels.new_zeros(
mlm_labels.shape[0], mlm_labels.shape[1]+1)
mlm_labels_new = mlm_labels_new - 1
mlm_labels_new[torch.arange(mlm_labels.shape[0]), indeces+1] = 103
mlm_labels = mlm_labels_new
# 2. Update text_input_ids:
text_input_ids_new = text_input_ids.new_zeros(
text_input_ids.shape[0], text_input_ids.shape[1]+1)
text_input_ids_new[:, :-1] = text_input_ids
text_input_ids_new[torch.arange(
text_input_ids.shape[0]), indeces] = answers[1][:, 0]
text_input_ids_new[torch.arange(text_input_ids.shape[0]), indeces+1] = (
self.tokenizer.convert_tokens_to_ids(['[MASK]'])[0])
text_input_ids_new[torch.arange(text_input_ids.shape[0]), indeces+2] = (
self.tokenizer.convert_tokens_to_ids(['[PAD]'])[0])
text_input_ids_new[torch.arange(text_input_ids.shape[0]), indeces+3] = (
self.tokenizer.convert_tokens_to_ids(['[SEP]'])[0])
text_input_ids = text_input_ids_new
# 3. Update text_token_type_ids:
text_token_type_ids = text_token_type_ids.new_zeros(
text_token_type_ids.shape[0], text_token_type_ids.shape[1]+1)
# 4. Update text_input_ids:
text_visual_embeddings_new = text_visual_embeddings.new_zeros(
text_visual_embeddings.shape[0], text_visual_embeddings.shape[1]+1, text_visual_embeddings.shape[2])
text_visual_embeddings_new = text_visual_embeddings_new.transpose(
0, 1)
text_visual_embeddings_new[:] = text_visual_embeddings[:, 0, :]
text_visual_embeddings = text_visual_embeddings_new.transpose(0, 1)
# 5. Update text_mask:
text_mask = (text_input_ids > 0)
# 6. Append generated words from each sentence in the batch to list - terminate if all [STOP]
for nid, row in enumerate(answers[1]):
if curr_len == 0:
generated.append([])
for ele in row:
# try:
if not stop[nid]:
if self.tokenizer.ids_to_tokens[ele.item()] == '[STOP]':
stop[nid] = True
else:
# print('generated: ', ele.item())
generated[nid].append(
self.tokenizer.ids_to_tokens[ele.item()])
# except:
# generated[nid].append(self.tokenizer.ids_to_tokens[100])
curr_len += 1
# Join in sentences
generated_sentences = []
for sentence in generated:
new_sentence = ' '.join(sentence)
generated_sentences.append(new_sentence.replace(' ##', ''))
###########################################
outputs = {}
# loss
relationship_loss = im_info.new_zeros(())
mlm_loss = im_info.new_zeros(())
mvrc_loss = im_info.new_zeros(())
if self.config.NETWORK.WITH_REL_LOSS:
relationship_loss = F.cross_entropy(
relationship_logits, relationship_label)
if self.config.NETWORK.WITH_MLM_LOSS:
mlm_logits_padded = mlm_logits.new_zeros(
(*mlm_labels.shape, mlm_logits.shape[-1])).fill_(-10000.0)
mlm_logits_padded[:, :mlm_logits.shape[1]] = mlm_logits
mlm_logits = mlm_logits_padded
if self.config.NETWORK.MLM_LOSS_NORM_IN_BATCH_FIRST:
mlm_loss = F.cross_entropy(mlm_logits.transpose(1, 2),
mlm_labels,
ignore_index=-1, reduction='none')
num_mlm = (mlm_labels != -1).sum(1,
keepdim=True).to(dtype=mlm_loss.dtype)
num_has_mlm = (num_mlm != 0).sum().to(dtype=mlm_loss.dtype)
mlm_loss = (mlm_loss / (num_mlm + 1e-4)).sum() / \
(num_has_mlm + 1e-4)
else:
mlm_loss = F.cross_entropy(mlm_logits.view((-1, mlm_logits.shape[-1])),
mlm_labels.view(-1),
ignore_index=-1)
outputs.update({
'relationship_logits': relationship_logits if self.config.NETWORK.WITH_REL_LOSS else None,
'relationship_label': relationship_label if self.config.NETWORK.WITH_REL_LOSS else None,
'mlm_logits': mlm_logits if self.config.NETWORK.WITH_MLM_LOSS else None,
'mlm_label': mlm_labels if self.config.NETWORK.WITH_MLM_LOSS else None,
'mvrc_logits': mvrc_logits if self.config.NETWORK.WITH_MVRC_LOSS else None,
'mvrc_label': mvrc_labels if self.config.NETWORK.WITH_MVRC_LOSS else None,
'relationship_loss': relationship_loss,
'mlm_loss': mlm_loss,
'mvrc_loss': mvrc_loss,
'generated_sentences': generated_sentences
})
loss = relationship_loss.mean() + mlm_loss.mean() + mvrc_loss.mean()
return outputs, loss
| 13,172 | 45.221053 | 122 | py |
BertGen | BertGen-master/LanguageGeneration/data/collate_batch.py | import torch
from common.utils.clip_pad import *
class BatchCollator(object):
def __init__(self, dataset, append_ind=False):
self.dataset = dataset
self.test_mode = self.dataset.test_mode
self.data_names = self.dataset.data_names
self.append_ind = append_ind
def __call__(self, batch):
if not isinstance(batch, list):
batch = list(batch)
if 'image' in self.data_names:
if batch[0][self.data_names.index('image')] is not None:
max_shape = tuple(max(s) for s in zip(*[data[self.data_names.index('image')].shape for data in batch]))
image_none = False
else:
image_none = True
if 'boxes' in self.data_names:
max_boxes = max([data[self.data_names.index('boxes')].shape[0] for data in batch])
if 'text' in self.data_names:
max_text_length = max([len(data[self.data_names.index('text')]) for data in batch])
if 'text_en' in self.data_names:
max_text_length_en = max([len(data[self.data_names.index('text_en')]) for data in batch])
if 'text_de' in self.data_names:
max_text_length_de = max([len(data[self.data_names.index('text_de')]) for data in batch])
for i, ibatch in enumerate(batch):
out = {}
if 'image' in self.data_names:
if image_none:
out['image'] = None
else:
image = ibatch[self.data_names.index('image')]
out['image'] = clip_pad_images(image, max_shape, pad=0)
if 'boxes' in self.data_names:
boxes = ibatch[self.data_names.index('boxes')]
out['boxes'] = clip_pad_boxes(boxes, max_boxes, pad=-2)
if 'text' in self.data_names:
text = ibatch[self.data_names.index('text')]
out['text'] = clip_pad_1d(text, max_text_length, pad=0)
if 'mlm_labels' in self.data_names:
mlm_labels = ibatch[self.data_names.index('mlm_labels')]
out['mlm_labels'] = clip_pad_1d(mlm_labels, max_text_length, pad=-1)
#****************
# FM edit: added for MT decoder encoder
if 'text_en' in self.data_names:
text_en = ibatch[self.data_names.index('text_en')]
out['text_en'] = clip_pad_1d(text_en, max_text_length_en, pad=0)
if 'mlm_labels_en' in self.data_names:
mlm_labels_en = ibatch[self.data_names.index('mlm_labels_en')]
out['mlm_labels_en'] = clip_pad_1d(mlm_labels_en, max_text_length_en, pad=-1)
if 'text_de' in self.data_names:
text_de = ibatch[self.data_names.index('text_de')]
out['text_de'] = clip_pad_1d(text_de, max_text_length_de, pad=0)
if 'mlm_labels_de' in self.data_names:
mlm_labels_de = ibatch[self.data_names.index('mlm_labels_de')]
out['mlm_labels_de'] = clip_pad_1d(mlm_labels_de, max_text_length_de, pad=-1)
#****************
if 'mvrc_ops' in self.data_names:
mvrc_ops = ibatch[self.data_names.index('mvrc_ops')]
out['mvrc_ops'] = clip_pad_1d(mvrc_ops, max_boxes, pad=0)
if 'mvrc_labels' in self.data_names:
mvrc_labels = ibatch[self.data_names.index('mvrc_labels')]
out['mvrc_labels'] = clip_pad_boxes(mvrc_labels, max_boxes, pad=0)
other_names = [data_name for data_name in self.data_names if data_name not in out]
for name in other_names:
out[name] = torch.as_tensor(ibatch[self.data_names.index(name)])
batch[i] = tuple(out[data_name] for data_name in self.data_names)
if self.append_ind:
batch[i] += (torch.tensor(i, dtype=torch.int64),)
out_tuple = ()
for items in zip(*batch):
if items[0] is None:
out_tuple += (None,)
else:
out_tuple += (torch.stack(tuple(items), dim=0), )
return out_tuple
| 4,167 | 42.416667 | 119 | py |
BertGen | BertGen-master/LanguageGeneration/data/build.py | import torch.utils.data
from .datasets import *
from . import samplers
from .transforms.build import build_transforms
from .collate_batch import BatchCollator
import pprint
from copy import deepcopy
# FM: Added mutli30k to available datasets
DATASET_CATALOGS = {
'multi30k': Multi30kDataset,
'multi30k_image_only': Multi30kDatasetImageOnly,
'multi30k_image_only_COCO': Multi30kDatasetImageOnlyCOCO,
'multi30k_image_only5x': Multi30kDatasetImageOnly5x,
'multi30k_decoder': Multi30kDatasetDecoder,
'multi30k_no_vision': Multi30kDatasetNoVision,
'multi30k_taskB': Multi30kTaskBDataset,
'parallel_text': ParallelTextDataset
}
def build_dataset(dataset_name, *args, **kwargs):
assert dataset_name in DATASET_CATALOGS, "dataset not in catalogs"
return DATASET_CATALOGS[dataset_name](*args, **kwargs)
def make_data_sampler(dataset, shuffle, distributed, num_replicas, rank):
if distributed:
return samplers.DistributedSampler(dataset, shuffle=shuffle, num_replicas=num_replicas, rank=rank)
if shuffle:
sampler = torch.utils.data.sampler.RandomSampler(dataset)
else:
sampler = torch.utils.data.sampler.SequentialSampler(dataset)
return sampler
def make_batch_data_sampler(dataset, sampler, aspect_grouping, batch_size):
if aspect_grouping:
group_ids = dataset.group_ids
batch_sampler = samplers.GroupedBatchSampler(
sampler, group_ids, batch_size, drop_uneven=False
)
else:
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, batch_size, drop_last=False
)
return batch_sampler
def make_dataloader(cfg, dataset=None, mode='train', distributed=False, num_replicas=None, rank=None,
expose_sampler=False):
assert mode in ['train', 'val', 'test']
if mode == 'train':
ann_file = cfg.DATASET.TRAIN_ANNOTATION_FILE
image_set = cfg.DATASET.TRAIN_IMAGE_SET
aspect_grouping = cfg.TRAIN.ASPECT_GROUPING
num_gpu = len(cfg.GPUS.split(','))
batch_size = cfg.TRAIN.BATCH_IMAGES * num_gpu
shuffle = cfg.TRAIN.SHUFFLE
num_workers = cfg.NUM_WORKERS_PER_GPU * num_gpu
elif mode == 'val':
ann_file = cfg.DATASET.VAL_ANNOTATION_FILE
image_set = cfg.DATASET.VAL_IMAGE_SET
aspect_grouping = False
num_gpu = len(cfg.GPUS.split(','))
batch_size = cfg.VAL.BATCH_IMAGES * num_gpu
shuffle = cfg.VAL.SHUFFLE
num_workers = cfg.NUM_WORKERS_PER_GPU * num_gpu
else:
ann_file = cfg.DATASET.TEST_ANNOTATION_FILE
image_set = cfg.DATASET.TEST_IMAGE_SET
aspect_grouping = False
num_gpu = len(cfg.GPUS.split(','))
batch_size = cfg.TEST.BATCH_IMAGES * num_gpu
shuffle = cfg.TEST.SHUFFLE
num_workers = cfg.NUM_WORKERS_PER_GPU * num_gpu
transform = build_transforms(cfg, mode)
if dataset is None:
dataset = build_dataset(dataset_name=cfg.DATASET.DATASET, ann_file=ann_file, image_set=image_set,
seq_len=cfg.DATASET.SEQ_LEN, min_seq_len=cfg.DATASET.MIN_SEQ_LEN,
with_precomputed_visual_feat=cfg.NETWORK.IMAGE_FEAT_PRECOMPUTED,
mask_raw_pixels=cfg.NETWORK.MASK_RAW_PIXELS,
with_rel_task=cfg.NETWORK.WITH_REL_LOSS,
with_mlm_task=cfg.NETWORK.WITH_MLM_LOSS,
with_mvrc_task=cfg.NETWORK.WITH_MVRC_LOSS,
answer_vocab_file=cfg.DATASET.ANSWER_VOCAB_FILE,
root_path=cfg.DATASET.ROOT_PATH, data_path=cfg.DATASET.DATASET_PATH,
test_mode=(mode == 'test'), transform=transform,
zip_mode=cfg.DATASET.ZIP_MODE, cache_mode=cfg.DATASET.CACHE_MODE,
cache_db=True if (
rank is None or rank == 0) else False,
ignore_db_cache=cfg.DATASET.IGNORE_DB_CACHE,
add_image_as_a_box=cfg.DATASET.ADD_IMAGE_AS_A_BOX,
aspect_grouping=aspect_grouping,
mask_size=(cfg.DATASET.MASK_SIZE,
cfg.DATASET.MASK_SIZE),
pretrained_model_name=cfg.NETWORK.BERT_MODEL_NAME,
task_name=cfg.DATASET.TASK_NAME,
lang=cfg.DATASET.LANG)
sampler = make_data_sampler(
dataset, shuffle, distributed, num_replicas, rank)
batch_sampler = make_batch_data_sampler(
dataset, sampler, aspect_grouping, batch_size)
collator = BatchCollator(
dataset=dataset, append_ind=cfg.DATASET.APPEND_INDEX)
dataloader = torch.utils.data.DataLoader(dataset=dataset,
batch_sampler=batch_sampler,
num_workers=num_workers,
pin_memory=False,
collate_fn=collator)
if expose_sampler:
return dataloader, sampler
return dataloader
def make_dataloaders(cfg, mode='train', distributed=False, num_replicas=None, rank=None, expose_sampler=False):
outputs = []
for i, dataset_cfg in enumerate(cfg.DATASET):
cfg_ = deepcopy(cfg)
cfg_.DATASET = dataset_cfg
cfg_.TRAIN.BATCH_IMAGES = cfg.TRAIN.BATCH_IMAGES[i]
cfg_.VAL.BATCH_IMAGES = cfg.VAL.BATCH_IMAGES[i]
cfg_.TEST.BATCH_IMAGES = cfg.TEST.BATCH_IMAGES[i]
outputs.append(
make_dataloader(cfg_,
mode=mode,
distributed=distributed,
num_replicas=num_replicas,
rank=rank,
expose_sampler=expose_sampler)
)
return outputs
| 6,042 | 41.258741 | 111 | py |
BertGen | BertGen-master/LanguageGeneration/data/datasets/multi30k_no_vision.py | import random
import os
import time
import json
import jsonlines
from PIL import Image
import base64
import numpy as np
import logging
import torch
from torch.utils.data import Dataset
from external.pytorch_pretrained_bert import BertTokenizer
from common.utils.zipreader import ZipReader
from common.utils.create_logger import makedirsExist
from copy import deepcopy
class Multi30kDatasetNoVision(Dataset):
def __init__(self, ann_file, image_set, root_path, data_path, seq_len=64,
with_precomputed_visual_feat=False, mask_raw_pixels=True,
with_rel_task=True, with_mlm_task=True, with_mvrc_task=True,
transform=None, test_mode=False,
zip_mode=False, cache_mode=False, cache_db=False, ignore_db_cache=True,
tokenizer=None, pretrained_model_name=None,
add_image_as_a_box=False,
aspect_grouping=False, task_name="None", lang="second", **kwargs):
"""
Conceptual Captions Dataset
:param ann_file: annotation jsonl file
:param image_set: image folder name, e.g., 'vcr1images'
:param root_path: root path to cache database loaded from annotation file
:param data_path: path to vcr dataset
:param transform: transform
:param test_mode: test mode means no labels available
:param zip_mode: reading images and metadata in zip archive
:param cache_mode: cache whole dataset to RAM first, then __getitem__ read them from RAM
:param ignore_db_cache: ignore previous cached database, reload it from annotation file
:param tokenizer: default is BertTokenizer from pytorch_pretrained_bert
:param add_image_as_a_box: add whole image as a box
:param aspect_grouping: whether to group images via their aspect
:param kwargs:
"""
super(Multi30kDatasetNoVision, self).__init__()
assert not cache_mode, 'currently not support cache mode!'
# FM edit: commented out to allow testin
# assert not test_mode
annot = {'train': 'train_frcnn.json',
'val': 'val_frcnn.json',
'test2015': 'test_frcnn.json',
}
self.seq_len = seq_len
self.with_rel_task = with_rel_task
self.with_mlm_task = with_mlm_task
self.with_mvrc_task = with_mvrc_task
self.data_path = data_path
self.root_path = root_path
self.ann_file = os.path.join(data_path, annot[image_set])
self.with_precomputed_visual_feat = with_precomputed_visual_feat
self.mask_raw_pixels = mask_raw_pixels
self.image_set = image_set
self.transform = transform
self.test_mode = test_mode
self.zip_mode = zip_mode
self.cache_mode = cache_mode
self.cache_db = cache_db
self.ignore_db_cache = ignore_db_cache
self.aspect_grouping = aspect_grouping
self.cache_dir = os.path.join(root_path, 'cache')
self.add_image_as_a_box = add_image_as_a_box
if not os.path.exists(self.cache_dir):
makedirsExist(self.cache_dir)
self.tokenizer = tokenizer if tokenizer is not None \
else BertTokenizer.from_pretrained(
'bert-base-uncased' if pretrained_model_name is None else pretrained_model_name,
cache_dir=self.cache_dir, do_lower_case=False)
self.zipreader = ZipReader()
# FM: define task name to add prefix
self.task_name = task_name
self.lang = lang
# FM: Customise for multi30k dataset
self.simple_database = list(jsonlines.open(self.ann_file))
if not self.test_mode:
self.database = []
db_pos = 0
# create [MASK] every time
for entry in self.simple_database:
if self.lang == "second":
caption_tokens_de = self.tokenizer.tokenize(
entry['caption_de'])
# repeat each entry multiple times - MASK the last word in each case
for pos, item in enumerate(caption_tokens_de):
self.database.append(deepcopy(entry))
self.database[db_pos]['caption_de'] = deepcopy(
caption_tokens_de[:pos+1])
db_pos += 1
# add one last entry with last token [STOP]
self.database.append(deepcopy(self.database[db_pos-1]))
self.database[db_pos]['caption_de'] = self.database[db_pos]['caption_de'] + ['[STOP]']
db_pos += 1
else:
caption_tokens_en = self.tokenizer.tokenize(
entry['caption_en'])
# repeat each entry multiple times - MASK the last word in each case
for pos, item in enumerate(caption_tokens_en):
self.database.append(deepcopy(entry))
self.database[db_pos]['caption_en'] = deepcopy(
caption_tokens_en[:pos+1])
db_pos += 1
# add one last entry with last token [STOP]
self.database.append(deepcopy(self.database[db_pos-1]))
self.database[db_pos]['caption_en'] = self.database[db_pos]['caption_en'] + ['[STOP]']
db_pos += 1
print('***********************')
print('The dataset length is: ', len(self.database))
print('Task: ', self.task_name)
print('Lang: ', self.lang)
else:
self.database = self.simple_database
if self.aspect_grouping:
assert False, "not support aspect grouping currently!"
self.group_ids = self.group_aspect(self.database)
print('mask_raw_pixels: ', self.mask_raw_pixels)
@property
def data_names(self):
return ['text',
'relationship_label', 'mlm_labels']
def __getitem__(self, index):
idb = self.database[index]
# Task #1: Caption-Image Relationship Prediction
_p = random.random()
if _p < 0.5 or (not self.with_rel_task):
relationship_label = 1
caption_en = idb['caption_en']
caption_de = idb['caption_de']
else:
relationship_label = 0
rand_index = random.randrange(0, len(self.database))
while rand_index == index:
rand_index = random.randrange(0, len(self.database))
caption_en = self.database[rand_index]['caption_en']
caption_de = self.database[rand_index]['caption_de']
# Task #2: Masked Language Modeling - Adapted for two languages
if self.with_mlm_task:
if not self.test_mode:
if self.lang == "second":
# FM: removing joining of caption - split into two languages
caption_tokens_en = self.tokenizer.tokenize(caption_en)
mlm_labels_en = [-1] * len(caption_tokens_en)
# FM edit: Mask always the last token
caption_tokens_de = caption_de
mlm_labels_de = [-1] * (len(caption_tokens_de)-1)
try:
mlm_labels_de.append(
self.tokenizer.vocab[caption_tokens_de[-1]])
except KeyError:
# For unknown words (should not occur with BPE vocab)
mlm_labels_de.append(self.tokenizer.vocab["[UNK]"])
logging.warning(
"Cannot find sub_token '{}' in vocab. Using [UNK] insetad".format(sub_token))
caption_tokens_de[-1] = '[MASK]'
else:
# FM: removing joining of caption - split into two languages
caption_tokens_de = self.tokenizer.tokenize(caption_de)
mlm_labels_de = [-1] * len(caption_tokens_de)
# FM edit: Mask always the last token
caption_tokens_en = caption_en
mlm_labels_en = [-1] * (len(caption_tokens_en)-1)
try:
mlm_labels_en.append(
self.tokenizer.vocab[caption_tokens_en[-1]])
except KeyError:
# For unknown words (should not occur with BPE vocab)
mlm_labels_en.append(self.tokenizer.vocab["[UNK]"])
logging.warning(
"Cannot find sub_token '{}' in vocab. Using [UNK] insetad".format(sub_token))
caption_tokens_en[-1] = '[MASK]'
else:
if self.lang == "second":
# FM TODO: fix inference
caption_tokens_en = self.tokenizer.tokenize(caption_en)
mlm_labels_en = [-1] * len(caption_tokens_en)
# FM edit: add [MASK] to start guessing caption
caption_tokens_de = self.tokenizer.tokenize(caption_de)
# FM edit: add label from vocabulary
mlm_labels_de = [103] + [-1]
caption_tokens_de = ['[MASK]'] + ['[PAD]']
else:
# FM TODO: fix inference
caption_tokens_de = self.tokenizer.tokenize(caption_de)
mlm_labels_de = [-1] * len(caption_tokens_de)
# FM edit: add [MASK] to start guessing caption
caption_tokens_en = self.tokenizer.tokenize(caption_en)
# FM edit: add label from vocabulary
mlm_labels_en = [103] + [-1]
caption_tokens_en = ['[MASK]'] + ['[PAD]']
else:
caption_tokens_en = self.tokenizer.tokenize(caption_en)
caption_tokens_de = self.tokenizer.tokenize(caption_de)
mlm_labels_en = [-1] * len(caption_tokens_en)
mlm_labels_de = [-1] * len(caption_tokens_de)
if self.lang == "second":
text_tokens = [self.task_name] + ['[CLS]'] + \
caption_tokens_en + ['[SEP]'] + caption_tokens_de + ['[SEP]']
mlm_labels = [-1] + [-1] + mlm_labels_en + \
[-1] + mlm_labels_de + [-1]
else:
text_tokens = [self.task_name] + ['[CLS]'] + \
caption_tokens_de + ['[SEP]'] + caption_tokens_en + ['[SEP]']
mlm_labels = [-1] + [-1] + mlm_labels_de + \
[-1] + mlm_labels_en + [-1]
text = self.tokenizer.convert_tokens_to_ids(text_tokens)
# truncate seq to max len
if len(text) > self.seq_len:
text_len_keep = len(text)
while (text_len_keep) > self.seq_len and (text_len_keep > 0):
text_len_keep -= 1
if text_len_keep < 2:
text_len_keep = 2
text = text[:(text_len_keep - 1)] + [text[-1]]
return text, relationship_label, mlm_labels
def random_word_wwm(self, tokens):
output_tokens = []
output_label = []
for i, token in enumerate(tokens):
sub_tokens = self.tokenizer.wordpiece_tokenizer.tokenize(token)
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
# prob /= 0.15
# FM edit: always leave as mask
# 80% randomly change token to mask token
# if prob < 0.8:
for sub_token in sub_tokens:
output_tokens.append("[MASK]")
# 10% randomly change token to random token
# elif prob < 0.9:
# for sub_token in sub_tokens:
# output_tokens.append(random.choice(list(self.tokenizer.vocab.keys())))
# # -> rest 10% randomly keep current token
# else:
# for sub_token in sub_tokens:
# output_tokens.append(sub_token)
# append current token to output (we will predict these later)
for sub_token in sub_tokens:
try:
output_label.append(self.tokenizer.vocab[sub_token])
except KeyError:
# For unknown words (should not occur with BPE vocab)
output_label.append(self.tokenizer.vocab["[UNK]"])
logging.warning(
"Cannot find sub_token '{}' in vocab. Using [UNK] insetad".format(sub_token))
else:
for sub_token in sub_tokens:
# no masking token (will be ignored by loss function later)
output_tokens.append(sub_token)
output_label.append(-1)
# if no word masked, random choose a word to mask
# if all([l_ == -1 for l_ in output_label]):
# choosed = random.randrange(0, len(output_label))
# output_label[choosed] = self.tokenizer.vocab[tokens[choosed]]
return output_tokens, output_label
def random_mask_region(self, regions_cls_scores):
num_regions, num_classes = regions_cls_scores.shape
output_op = []
output_label = []
for k, cls_scores in enumerate(regions_cls_scores):
prob = random.random()
# mask region with 15% probability
if prob < 0.15:
prob /= 0.15
if prob < 0.9:
# 90% randomly replace appearance feature by "MASK"
output_op.append(1)
else:
# -> rest 10% randomly keep current appearance feature
output_op.append(0)
# append class of region to output (we will predict these later)
output_label.append(cls_scores)
else:
# no masking region (will be ignored by loss function later)
output_op.append(0)
output_label.append(np.zeros_like(cls_scores))
# # if no region masked, random choose a region to mask
# if all([op == 0 for op in output_op]):
# choosed = random.randrange(0, len(output_op))
# output_op[choosed] = 1
# output_label[choosed] = regions_cls_scores[choosed]
return output_op, output_label
@staticmethod
def b64_decode(string):
return base64.decodebytes(string.encode())
@staticmethod
def group_aspect(database):
print('grouping aspect...')
t = time.time()
# get shape of all images
widths = torch.as_tensor([idb['width'] for idb in database])
heights = torch.as_tensor([idb['height'] for idb in database])
# group
group_ids = torch.zeros(len(database))
horz = widths >= heights
vert = 1 - horz
group_ids[horz] = 0
group_ids[vert] = 1
print('Done (t={:.2f}s)'.format(time.time() - t))
return group_ids
def __len__(self):
return len(self.database)
def _load_image(self, path):
if '.zip@' in path:
return self.zipreader.imread(path).convert('RGB')
else:
return Image.open(path).convert('RGB')
def _load_json(self, path):
if '.zip@' in path:
f = self.zipreader.read(path)
return json.loads(f.decode())
else:
with open(path, 'r') as f:
return json.load(f)
| 15,683 | 42.325967 | 106 | py |
BertGen | BertGen-master/LanguageGeneration/data/datasets/multi30k_image_only_COCO.py | import random
import os
import time
import json
import jsonlines
from PIL import Image
import base64
import numpy as np
import logging
import torch
from torch.utils.data import Dataset
from external.pytorch_pretrained_bert import BertTokenizer
from common.utils.zipreader import ZipReader
from common.utils.create_logger import makedirsExist
from copy import deepcopy
class Multi30kDatasetImageOnlyCOCO(Dataset):
def __init__(self, ann_file, image_set, root_path, data_path, seq_len=64,
with_precomputed_visual_feat=False, mask_raw_pixels=True,
with_rel_task=True, with_mlm_task=True, with_mvrc_task=True,
transform=None, test_mode=False,
zip_mode=False, cache_mode=False, cache_db=False, ignore_db_cache=True,
tokenizer=None, pretrained_model_name=None,
add_image_as_a_box=False,
aspect_grouping=False, task_name="None", lang="second", **kwargs):
"""
Conceptual Captions Dataset
:param ann_file: annotation jsonl file
:param image_set: image folder name, e.g., 'vcr1images'
:param root_path: root path to cache database loaded from annotation file
:param data_path: path to vcr dataset
:param transform: transform
:param test_mode: test mode means no labels available
:param zip_mode: reading images and metadata in zip archive
:param cache_mode: cache whole dataset to RAM first, then __getitem__ read them from RAM
:param ignore_db_cache: ignore previous cached database, reload it from annotation file
:param tokenizer: default is BertTokenizer from pytorch_pretrained_bert
:param add_image_as_a_box: add whole image as a box
:param aspect_grouping: whether to group images via their aspect
:param kwargs:
"""
super(Multi30kDatasetImageOnlyCOCO, self).__init__()
assert not cache_mode, 'currently not support cache mode!'
# FM edit: commented out to allow testin
# assert not test_mode
annot = {'train': 'train_frcnn.json',
'val': 'val_frcnn.json',
'test2015': 'test_frcnn.json',
}
self.seq_len = seq_len
self.with_rel_task = with_rel_task
self.with_mlm_task = with_mlm_task
self.with_mvrc_task = with_mvrc_task
self.data_path = data_path
self.root_path = root_path
self.ann_file = os.path.join(data_path, annot[image_set])
self.with_precomputed_visual_feat = with_precomputed_visual_feat
self.mask_raw_pixels = mask_raw_pixels
self.image_set = image_set
self.transform = transform
self.test_mode = test_mode
self.zip_mode = zip_mode
self.cache_mode = cache_mode
self.cache_db = cache_db
self.ignore_db_cache = ignore_db_cache
self.aspect_grouping = aspect_grouping
self.cache_dir = os.path.join(root_path, 'cache')
self.add_image_as_a_box = add_image_as_a_box
if not os.path.exists(self.cache_dir):
makedirsExist(self.cache_dir)
self.tokenizer = tokenizer if tokenizer is not None \
else BertTokenizer.from_pretrained(
'bert-base-uncased' if pretrained_model_name is None else pretrained_model_name,
cache_dir=self.cache_dir, do_lower_case=False)
self.zipreader = ZipReader()
# FM: define task name to add prefix
self.task_name = task_name
self.lang = lang
# FM: Customise for multi30k dataset
self.simple_database = list(jsonlines.open(self.ann_file))
if not self.zip_mode:
for i, idb in enumerate(self.simple_database):
self.simple_database[i]['frcnn'] = idb['frcnn'].replace('.zip@', '')\
.replace('.0', '').replace('.1', '').replace('.2', '').replace('.3', '')
self.simple_database[i]['image'] = idb['image'].replace(
'.zip@', '')
# FM: TODO correct this
for i, idb in enumerate(self.simple_database):
# correct address:
idb['frcnn'] = idb['frcnn'].replace(
"test_2016_flickr_frcnn.zip", "test_frcnn.zip")
old_id = idb['frcnn'].split('/')[1].split('.')[0]
image_id = old_id
self.simple_database[i]['frcnn'] = idb['frcnn'].replace(
old_id, image_id)
if not self.test_mode:
self.database = []
db_pos = 0
# create [MASK] every time
for entry in self.simple_database:
if self.lang == "second":
caption_tokens_de = self.tokenizer.tokenize(
entry['caption_de'])
# repeat each entry multiple times - MASK the last word in each case
for pos, item in enumerate(caption_tokens_de):
self.database.append(deepcopy(entry))
self.database[db_pos]['caption_de'] = deepcopy(
caption_tokens_de[:pos+1])
db_pos += 1
# add one last entry with last token [STOP]
self.database.append(deepcopy(self.database[db_pos-1]))
self.database[db_pos]['caption_de'] = self.database[db_pos]['caption_de'] + ['[STOP]']
db_pos += 1
else:
caption_tokens_en = self.tokenizer.tokenize(
entry['caption_en'])
# repeat each entry multiple times - MASK the last word in each case
for pos, item in enumerate(caption_tokens_en):
self.database.append(deepcopy(entry))
self.database[db_pos]['caption_en'] = deepcopy(
caption_tokens_en[:pos+1])
db_pos += 1
# add one last entry with last token [STOP]
self.database.append(deepcopy(self.database[db_pos-1]))
self.database[db_pos]['caption_en'] = self.database[db_pos]['caption_en'] + ['[STOP]']
db_pos += 1
print('***********************')
print('The dataset length is: ', len(self.database))
print('Task: ', self.task_name)
print('Lang: ', self.lang)
else:
self.database = self.simple_database[::5]
if self.aspect_grouping:
assert False, "not support aspect grouping currently!"
self.group_ids = self.group_aspect(self.database)
print('mask_raw_pixels: ', self.mask_raw_pixels)
@property
def data_names(self):
return ['image', 'boxes', 'im_info', 'text',
'relationship_label', 'mlm_labels', 'mvrc_ops', 'mvrc_labels']
def __getitem__(self, index):
idb = self.database[index]
# image data
# IN ALL CASES: boxes and cls scores are available for each image
frcnn_data = self._load_json(
os.path.join(self.data_path, idb['frcnn']))
boxes = np.frombuffer(self.b64_decode(frcnn_data['boxes']),
dtype=np.float32).reshape((frcnn_data['num_boxes'], -1))
boxes_cls_scores = np.ones((boxes.shape[0], 1601))
# boxes_cls_scores = np.frombuffer(self.b64_decode(frcnn_data['classes']),
# dtype=np.float32).reshape((frcnn_data['num_boxes'], -1))
boxes_max_conf = boxes_cls_scores.max(axis=1)
inds = np.argsort(boxes_max_conf)[::-1]
boxes = boxes[inds]
boxes_cls_scores = boxes_cls_scores[inds]
boxes = torch.as_tensor(boxes)
# load precomputed features or the whole image depending on setup
if self.with_precomputed_visual_feat:
image = None
w0, h0 = frcnn_data['image_w'], frcnn_data['image_h']
boxes_features = np.frombuffer(self.b64_decode(frcnn_data['features']),
dtype=np.float32).reshape((frcnn_data['num_boxes'], -1))
boxes_features = boxes_features[inds]
boxes_features = torch.as_tensor(boxes_features)
else:
try:
image = self._load_image(
os.path.join(self.data_path, idb['image']))
w0, h0 = image.size
except:
print("Failed to load image {}, use zero image!".format(
idb['image']))
image = None
w0, h0 = frcnn_data['image_w'], frcnn_data['image_h']
# append whole image to tensor of boxes (used for all linguistic tokens)
if self.add_image_as_a_box:
image_box = torch.as_tensor([[0.0, 0.0, w0 - 1.0, h0 - 1.0]])
boxes = torch.cat((image_box, boxes), dim=0)
if self.with_precomputed_visual_feat:
image_box_feat = boxes_features.mean(dim=0, keepdim=True)
boxes_features = torch.cat(
(image_box_feat, boxes_features), dim=0)
# transform
im_info = torch.tensor([w0, h0, 1.0, 1.0, index])
if self.transform is not None:
image, boxes, _, im_info = self.transform(
image, boxes, None, im_info)
if image is None and (not self.with_precomputed_visual_feat):
w = int(im_info[0].item())
h = int(im_info[1].item())
image = im_info.new_zeros((3, h, w), dtype=torch.float)
# clamp boxes
w = im_info[0].item()
h = im_info[1].item()
boxes[:, [0, 2]] = boxes[:, [0, 2]].clamp(min=0, max=w-1)
boxes[:, [1, 3]] = boxes[:, [1, 3]].clamp(min=0, max=h-1)
# Task #1: Caption-Image Relationship Prediction
_p = random.random()
if _p < 0.5 or (not self.with_rel_task):
relationship_label = 1
if self.lang == "second":
caption_de = idb['caption_de']
else:
caption_en = idb['caption_en']
else:
relationship_label = 0
rand_index = random.randrange(0, len(self.database))
while rand_index == index:
rand_index = random.randrange(0, len(self.database))
if self.lang == "second":
caption_de = self.database[rand_index]['caption_de']
else:
caption_en = self.database[rand_index]['caption_en']
# Task #2: Masked Language Modeling - Adapted for two languages
if self.with_mlm_task:
if not self.test_mode:
if self.lang == "second":
# FM edit: Mask always the last token
caption_tokens_de = caption_de
mlm_labels_de = [-1] * (len(caption_tokens_de)-1)
try:
mlm_labels_de.append(
self.tokenizer.vocab[caption_tokens_de[-1]])
except KeyError:
# For unknown words (should not occur with BPE vocab)
mlm_labels_de.append(self.tokenizer.vocab["[UNK]"])
logging.warning(
"Cannot find sub_token '{}' in vocab. Using [UNK] insetad".format(sub_token))
caption_tokens_de[-1] = '[MASK]'
else:
# FM edit: Mask always the last token
caption_tokens_en = caption_en
mlm_labels_en = [-1] * (len(caption_tokens_en)-1)
try:
mlm_labels_en.append(
self.tokenizer.vocab[caption_tokens_en[-1]])
except KeyError:
# For unknown words (should not occur with BPE vocab)
mlm_labels_en.append(self.tokenizer.vocab["[UNK]"])
logging.warning(
"Cannot find sub_token '{}' in vocab. Using [UNK] insetad".format(sub_token))
caption_tokens_en[-1] = '[MASK]'
else:
if self.lang == "second":
# FM edit: add [MASK] to start guessing caption
caption_tokens_de = self.tokenizer.tokenize(caption_de)
# FM edit: add label from vocabulary
mlm_labels_de = [103] + [-1]
caption_tokens_de = ['[MASK]'] + ['[PAD]']
else:
# FM edit: add [MASK] to start guessing caption
caption_tokens_en = self.tokenizer.tokenize(caption_en)
# FM edit: add label from vocabulary
mlm_labels_en = [103] + [-1]
caption_tokens_en = ['[MASK]'] + ['[PAD]']
else:
if self.lang == "second":
caption_tokens_de = self.tokenizer.tokenize(caption_de)
mlm_labels_de = [-1] * len(caption_tokens_de)
else:
caption_tokens_en = self.tokenizer.tokenize(caption_en)
mlm_labels_en = [-1] * len(caption_tokens_en)
if self.lang == "second":
text_tokens = [self.task_name] + ['[CLS]'] + \
['[SEP]'] + caption_tokens_de + ['[SEP]']
mlm_labels = [-1] + [-1] + [-1] + mlm_labels_de + [-1]
else:
text_tokens = [self.task_name] + ['[CLS]'] + \
['[SEP]'] + caption_tokens_en + ['[SEP]']
mlm_labels = [-1] + [-1] + [-1] + mlm_labels_en + [-1]
# Task #3: Masked Visual Region Classification
if self.with_mvrc_task:
if self.add_image_as_a_box:
mvrc_ops, mvrc_labels = self.random_mask_region(
boxes_cls_scores)
mvrc_ops = [0] + mvrc_ops
mvrc_labels = [np.zeros_like(
boxes_cls_scores[0])] + mvrc_labels
num_real_boxes = boxes.shape[0] - 1
num_masked_boxes = 0
if self.with_precomputed_visual_feat:
boxes_features[0] *= num_real_boxes
for mvrc_op, box_feat in zip(mvrc_ops, boxes_features):
if mvrc_op == 1:
num_masked_boxes += 1
boxes_features[0] -= box_feat
boxes_features[0] /= (num_real_boxes -
num_masked_boxes + 1e-5)
else:
mvrc_ops, mvrc_labels = self.random_mask_region(
boxes_cls_scores)
assert len(mvrc_ops) == boxes.shape[0], \
"Error: mvrc_ops have length {}, expected {}!".format(
len(mvrc_ops), boxes.shape[0])
assert len(mvrc_labels) == boxes.shape[0], \
"Error: mvrc_labels have length {}, expected {}!".format(
len(mvrc_labels), boxes.shape[0])
else:
mvrc_ops = [0] * boxes.shape[0]
mvrc_labels = [np.zeros_like(boxes_cls_scores[0])] * boxes.shape[0]
# zero out pixels of masked RoI
if (not self.with_precomputed_visual_feat) and self.mask_raw_pixels:
for mvrc_op, box in zip(mvrc_ops, boxes):
if mvrc_op == 1:
x1, y1, x2, y2 = box
image[:, int(y1):(int(y2)+1), int(x1):(int(x2)+1)] = 0
# store labels for masked regions
mvrc_labels = np.stack(mvrc_labels, axis=0)
text = self.tokenizer.convert_tokens_to_ids(text_tokens)
if self.with_precomputed_visual_feat:
boxes = torch.cat((boxes, boxes_features), dim=1)
# truncate seq to max len
if len(text) + len(boxes) > self.seq_len:
text_len_keep = len(text)
box_len_keep = len(boxes)
while (text_len_keep + box_len_keep) > self.seq_len and (text_len_keep > 0) and (box_len_keep > 0):
if box_len_keep > text_len_keep:
box_len_keep -= 1
else:
text_len_keep -= 1
if text_len_keep < 2:
text_len_keep = 2
if box_len_keep < 1:
box_len_keep = 1
boxes = boxes[:box_len_keep]
text = text[:(text_len_keep - 1)] + [text[-1]]
mlm_labels = mlm_labels[:(text_len_keep - 1)] + [mlm_labels[-1]]
mvrc_ops = mvrc_ops[:box_len_keep]
mvrc_labels = mvrc_labels[:box_len_keep]
return image, boxes, im_info, text, relationship_label, mlm_labels, mvrc_ops, mvrc_labels
def random_word_wwm(self, tokens):
output_tokens = []
output_label = []
for i, token in enumerate(tokens):
sub_tokens = self.tokenizer.wordpiece_tokenizer.tokenize(token)
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
# prob /= 0.15
# FM edit: always leave as mask
# 80% randomly change token to mask token
# if prob < 0.8:
for sub_token in sub_tokens:
output_tokens.append("[MASK]")
# 10% randomly change token to random token
# elif prob < 0.9:
# for sub_token in sub_tokens:
# output_tokens.append(random.choice(list(self.tokenizer.vocab.keys())))
# # -> rest 10% randomly keep current token
# else:
# for sub_token in sub_tokens:
# output_tokens.append(sub_token)
# append current token to output (we will predict these later)
for sub_token in sub_tokens:
try:
output_label.append(self.tokenizer.vocab[sub_token])
except KeyError:
# For unknown words (should not occur with BPE vocab)
output_label.append(self.tokenizer.vocab["[UNK]"])
logging.warning(
"Cannot find sub_token '{}' in vocab. Using [UNK] insetad".format(sub_token))
else:
for sub_token in sub_tokens:
# no masking token (will be ignored by loss function later)
output_tokens.append(sub_token)
output_label.append(-1)
# if no word masked, random choose a word to mask
# if all([l_ == -1 for l_ in output_label]):
# choosed = random.randrange(0, len(output_label))
# output_label[choosed] = self.tokenizer.vocab[tokens[choosed]]
return output_tokens, output_label
def random_mask_region(self, regions_cls_scores):
num_regions, num_classes = regions_cls_scores.shape
output_op = []
output_label = []
for k, cls_scores in enumerate(regions_cls_scores):
prob = random.random()
# mask region with 15% probability
if prob < 0.15:
prob /= 0.15
if prob < 0.9:
# 90% randomly replace appearance feature by "MASK"
output_op.append(1)
else:
# -> rest 10% randomly keep current appearance feature
output_op.append(0)
# append class of region to output (we will predict these later)
output_label.append(cls_scores)
else:
# no masking region (will be ignored by loss function later)
output_op.append(0)
output_label.append(np.zeros_like(cls_scores))
# # if no region masked, random choose a region to mask
# if all([op == 0 for op in output_op]):
# choosed = random.randrange(0, len(output_op))
# output_op[choosed] = 1
# output_label[choosed] = regions_cls_scores[choosed]
return output_op, output_label
@staticmethod
def b64_decode(string):
return base64.decodebytes(string.encode())
@staticmethod
def group_aspect(database):
print('grouping aspect...')
t = time.time()
# get shape of all images
widths = torch.as_tensor([idb['width'] for idb in database])
heights = torch.as_tensor([idb['height'] for idb in database])
# group
group_ids = torch.zeros(len(database))
horz = widths >= heights
vert = 1 - horz
group_ids[horz] = 0
group_ids[vert] = 1
print('Done (t={:.2f}s)'.format(time.time() - t))
return group_ids
def __len__(self):
return len(self.database)
def _load_image(self, path):
if '.zip@' in path:
return self.zipreader.imread(path).convert('RGB')
else:
return Image.open(path).convert('RGB')
def _load_json(self, path):
if '.zip@' in path:
f = self.zipreader.read(path)
return json.loads(f.decode())
else:
with open(path, 'r') as f:
return json.load(f)
| 21,270 | 42.948347 | 111 | py |
BertGen | BertGen-master/LanguageGeneration/data/datasets/multi30k_image_only_5x.py | import random
import os
import time
import json
import jsonlines
from PIL import Image
import base64
import numpy as np
import logging
import torch
from torch.utils.data import Dataset
from external.pytorch_pretrained_bert import BertTokenizer
from common.utils.zipreader import ZipReader
from common.utils.create_logger import makedirsExist
from copy import deepcopy
class Multi30kDatasetImageOnly5x(Dataset):
def __init__(self, ann_file, image_set, root_path, data_path, seq_len=64,
with_precomputed_visual_feat=False, mask_raw_pixels=True,
with_rel_task=True, with_mlm_task=True, with_mvrc_task=True,
transform=None, test_mode=False,
zip_mode=False, cache_mode=False, cache_db=False, ignore_db_cache=True,
tokenizer=None, pretrained_model_name=None,
add_image_as_a_box=False,
aspect_grouping=False, **kwargs):
"""
Conceptual Captions Dataset
:param ann_file: annotation jsonl file
:param image_set: image folder name, e.g., 'vcr1images'
:param root_path: root path to cache database loaded from annotation file
:param data_path: path to vcr dataset
:param transform: transform
:param test_mode: test mode means no labels available
:param zip_mode: reading images and metadata in zip archive
:param cache_mode: cache whole dataset to RAM first, then __getitem__ read them from RAM
:param ignore_db_cache: ignore previous cached database, reload it from annotation file
:param tokenizer: default is BertTokenizer from pytorch_pretrained_bert
:param add_image_as_a_box: add whole image as a box
:param aspect_grouping: whether to group images via their aspect
:param kwargs:
"""
super(Multi30kDatasetImageOnly5x, self).__init__()
assert not cache_mode, 'currently not support cache mode!'
# FM edit: commented out to allow testin
# assert not test_mode
annot = {'train': 'train_frcnn_5captions_both.json',
'val': 'val_frcnn.json',
'test2015': 'test_frcnn.json',
'test2018': 'test_frcnn2018.json'}
self.seq_len = seq_len
self.with_rel_task = with_rel_task
self.with_mlm_task = with_mlm_task
self.with_mvrc_task = with_mvrc_task
self.data_path = data_path
self.root_path = root_path
self.ann_file = os.path.join(data_path, annot[image_set])
self.with_precomputed_visual_feat = with_precomputed_visual_feat
self.mask_raw_pixels = mask_raw_pixels
self.image_set = image_set
self.transform = transform
self.test_mode = test_mode
self.zip_mode = zip_mode
self.cache_mode = cache_mode
self.cache_db = cache_db
self.ignore_db_cache = ignore_db_cache
self.aspect_grouping = aspect_grouping
self.cache_dir = os.path.join(root_path, 'cache')
self.add_image_as_a_box = add_image_as_a_box
if not os.path.exists(self.cache_dir):
makedirsExist(self.cache_dir)
self.tokenizer = tokenizer if tokenizer is not None \
else BertTokenizer.from_pretrained(
'bert-base-uncased' if pretrained_model_name is None else pretrained_model_name,
cache_dir=self.cache_dir)
self.zipreader = ZipReader()
# FM: Customise for multi30k dataset
self.simple_database = list(jsonlines.open(self.ann_file))
if not self.zip_mode:
for i, idb in enumerate(self.simple_database):
self.simple_database[i]['frcnn'] = idb['frcnn'].replace('.zip@', '')\
.replace('.0', '').replace('.1', '').replace('.2', '').replace('.3', '')
self.simple_database[i]['image'] = idb['image'].replace('.zip@', '')
if not self.test_mode:
self.database = []
db_pos = 0
# create [MASK] every time
for entry in self.simple_database:
caption_tokens_de = self.tokenizer.tokenize(entry['caption_de'])
# repeat each entry multiple times - MASK the last word in each case
for pos, item in enumerate(caption_tokens_de):
self.database.append(deepcopy(entry))
self.database[db_pos]['caption_de'] = deepcopy(caption_tokens_de[:pos+1])
db_pos += 1
# add one last entry with last token [STOP]
self.database.append(deepcopy(self.database[db_pos-1]))
self.database[db_pos]['caption_de'] = self.database[db_pos]['caption_de'] + ['[STOP]']
db_pos += 1
else:
self.database = self.simple_database
if self.aspect_grouping:
assert False, "not support aspect grouping currently!"
self.group_ids = self.group_aspect(self.database)
print('mask_raw_pixels: ', self.mask_raw_pixels)
@property
def data_names(self):
return ['image', 'boxes', 'im_info', 'text',
'relationship_label', 'mlm_labels', 'mvrc_ops', 'mvrc_labels']
def __getitem__(self, index):
idb = self.database[index]
# image data
# IN ALL CASES: boxes and cls scores are available for each image
frcnn_data = self._load_json(os.path.join(self.data_path, idb['frcnn']))
boxes = np.frombuffer(self.b64_decode(frcnn_data['boxes']),
dtype=np.float32).reshape((frcnn_data['num_boxes'], -1))
boxes_cls_scores = np.frombuffer(self.b64_decode(frcnn_data['classes']),
dtype=np.float32).reshape((frcnn_data['num_boxes'], -1))
boxes_max_conf = boxes_cls_scores.max(axis=1)
inds = np.argsort(boxes_max_conf)[::-1]
boxes = boxes[inds]
boxes_cls_scores = boxes_cls_scores[inds]
boxes = torch.as_tensor(boxes)
# load precomputed features or the whole image depending on setup
if self.with_precomputed_visual_feat:
image = None
w0, h0 = frcnn_data['image_w'], frcnn_data['image_h']
boxes_features = np.frombuffer(self.b64_decode(frcnn_data['features']),
dtype=np.float32).reshape((frcnn_data['num_boxes'], -1))
boxes_features = boxes_features[inds]
boxes_features = torch.as_tensor(boxes_features)
else:
try:
image = self._load_image(os.path.join(self.data_path, idb['image']))
w0, h0 = image.size
except:
print("Failed to load image {}, use zero image!".format(idb['image']))
image = None
w0, h0 = frcnn_data['image_w'], frcnn_data['image_h']
# append whole image to tensor of boxes (used for all linguistic tokens)
if self.add_image_as_a_box:
image_box = torch.as_tensor([[0.0, 0.0, w0 - 1.0, h0 - 1.0]])
boxes = torch.cat((image_box, boxes), dim=0)
if self.with_precomputed_visual_feat:
image_box_feat = boxes_features.mean(dim=0, keepdim=True)
boxes_features = torch.cat((image_box_feat, boxes_features), dim=0)
# transform
im_info = torch.tensor([w0, h0, 1.0, 1.0, index])
if self.transform is not None:
image, boxes, _, im_info = self.transform(image, boxes, None, im_info)
if image is None and (not self.with_precomputed_visual_feat):
w = int(im_info[0].item())
h = int(im_info[1].item())
image = im_info.new_zeros((3, h, w), dtype=torch.float)
# clamp boxes
w = im_info[0].item()
h = im_info[1].item()
boxes[:, [0, 2]] = boxes[:, [0, 2]].clamp(min=0, max=w-1)
boxes[:, [1, 3]] = boxes[:, [1, 3]].clamp(min=0, max=h-1)
# Task #1: Caption-Image Relationship Prediction
_p = random.random()
if _p < 0.5 or (not self.with_rel_task):
relationship_label = 1
caption_en = idb['caption_en']
caption_de = idb['caption_de']
else:
relationship_label = 0
rand_index = random.randrange(0, len(self.database))
while rand_index == index:
rand_index = random.randrange(0, len(self.database))
caption_en =self.database[rand_index]['caption_en']
caption_de =self.database[rand_index]['caption_de']
# Task #2: Masked Language Modeling - Adapted for two languages
if self.with_mlm_task:
if not self.test_mode:
# FM: removing joining of caption - split into two languages
caption_tokens_en = self.tokenizer.tokenize(caption_en)
mlm_labels_en = [-1] * len(caption_tokens_en)
# FM edit: Mask always the last token
caption_tokens_de = caption_de
mlm_labels_de = [-1] * (len(caption_tokens_de)-1)
try:
mlm_labels_de.append(self.tokenizer.vocab[caption_tokens_de[-1]])
except KeyError:
# For unknown words (should not occur with BPE vocab)
mlm_labels_de.append(self.tokenizer.vocab["[UNK]"])
logging.warning("Cannot find sub_token '{}' in vocab. Using [UNK] insetad".format(sub_token))
caption_tokens_de[-1] = '[MASK]'
else:
# FM TODO: fix inference
caption_tokens_en = self.tokenizer.tokenize(caption_en)
mlm_labels_en = [-1] * len(caption_tokens_en)
# FM edit: add [MASK] to start guessing caption
caption_tokens_de = self.tokenizer.tokenize(caption_de)
# FM edit: add label from vocabulary
mlm_labels_de = [103] + [-1]
caption_tokens_de = ['[MASK]'] + ['[PAD]']
else:
caption_tokens_en = self.tokenizer.tokenize(caption_en)
caption_tokens_de = self.tokenizer.tokenize(caption_de)
mlm_labels_en = [-1] * len(caption_tokens_en)
mlm_labels_de = [-1] * len(caption_tokens_de)
# remove english caption altogether - image captioning task
text_tokens = ['[CLS]'] + ['[SEP]'] + caption_tokens_de + ['[SEP]']
mlm_labels = [-1] + [-1] + mlm_labels_de + [-1]
# Task #3: Masked Visual Region Classification
if self.with_mvrc_task:
if self.add_image_as_a_box:
mvrc_ops, mvrc_labels = self.random_mask_region(boxes_cls_scores)
mvrc_ops = [0] + mvrc_ops
mvrc_labels = [np.zeros_like(boxes_cls_scores[0])] + mvrc_labels
num_real_boxes = boxes.shape[0] - 1
num_masked_boxes = 0
if self.with_precomputed_visual_feat:
boxes_features[0] *= num_real_boxes
for mvrc_op, box_feat in zip(mvrc_ops, boxes_features):
if mvrc_op == 1:
num_masked_boxes += 1
boxes_features[0] -= box_feat
boxes_features[0] /= (num_real_boxes - num_masked_boxes + 1e-5)
else:
mvrc_ops, mvrc_labels = self.random_mask_region(boxes_cls_scores)
assert len(mvrc_ops) == boxes.shape[0], \
"Error: mvrc_ops have length {}, expected {}!".format(len(mvrc_ops), boxes.shape[0])
assert len(mvrc_labels) == boxes.shape[0], \
"Error: mvrc_labels have length {}, expected {}!".format(len(mvrc_labels), boxes.shape[0])
else:
mvrc_ops = [0] * boxes.shape[0]
mvrc_labels = [np.zeros_like(boxes_cls_scores[0])] * boxes.shape[0]
# zero out pixels of masked RoI
if (not self.with_precomputed_visual_feat) and self.mask_raw_pixels:
for mvrc_op, box in zip(mvrc_ops, boxes):
if mvrc_op == 1:
x1, y1, x2, y2 = box
image[:, int(y1):(int(y2)+1), int(x1):(int(x2)+1)] = 0
# store labels for masked regions
mvrc_labels = np.stack(mvrc_labels, axis=0)
text = self.tokenizer.convert_tokens_to_ids(text_tokens)
if self.with_precomputed_visual_feat:
boxes = torch.cat((boxes, boxes_features), dim=1)
# truncate seq to max len
if len(text) + len(boxes) > self.seq_len:
text_len_keep = len(text)
box_len_keep = len(boxes)
while (text_len_keep + box_len_keep) > self.seq_len and (text_len_keep > 0) and (box_len_keep > 0):
if box_len_keep > text_len_keep:
box_len_keep -= 1
else:
text_len_keep -= 1
if text_len_keep < 2:
text_len_keep = 2
if box_len_keep < 1:
box_len_keep = 1
boxes = boxes[:box_len_keep]
text = text[:(text_len_keep - 1)] + [text[-1]]
mlm_labels = mlm_labels[:(text_len_keep - 1)] + [mlm_labels[-1]]
mvrc_ops = mvrc_ops[:box_len_keep]
mvrc_labels = mvrc_labels[:box_len_keep]
return image, boxes, im_info, text, relationship_label, mlm_labels, mvrc_ops, mvrc_labels
def random_word_wwm(self, tokens):
output_tokens = []
output_label = []
for i, token in enumerate(tokens):
sub_tokens = self.tokenizer.wordpiece_tokenizer.tokenize(token)
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
# prob /= 0.15
# FM edit: always leave as mask
# 80% randomly change token to mask token
# if prob < 0.8:
for sub_token in sub_tokens:
output_tokens.append("[MASK]")
# 10% randomly change token to random token
# elif prob < 0.9:
# for sub_token in sub_tokens:
# output_tokens.append(random.choice(list(self.tokenizer.vocab.keys())))
# # -> rest 10% randomly keep current token
# else:
# for sub_token in sub_tokens:
# output_tokens.append(sub_token)
# append current token to output (we will predict these later)
for sub_token in sub_tokens:
try:
output_label.append(self.tokenizer.vocab[sub_token])
except KeyError:
# For unknown words (should not occur with BPE vocab)
output_label.append(self.tokenizer.vocab["[UNK]"])
logging.warning("Cannot find sub_token '{}' in vocab. Using [UNK] insetad".format(sub_token))
else:
for sub_token in sub_tokens:
# no masking token (will be ignored by loss function later)
output_tokens.append(sub_token)
output_label.append(-1)
## if no word masked, random choose a word to mask
# if all([l_ == -1 for l_ in output_label]):
# choosed = random.randrange(0, len(output_label))
# output_label[choosed] = self.tokenizer.vocab[tokens[choosed]]
return output_tokens, output_label
def random_mask_region(self, regions_cls_scores):
num_regions, num_classes = regions_cls_scores.shape
output_op = []
output_label = []
for k, cls_scores in enumerate(regions_cls_scores):
prob = random.random()
# mask region with 15% probability
if prob < 0.15:
prob /= 0.15
if prob < 0.9:
# 90% randomly replace appearance feature by "MASK"
output_op.append(1)
else:
# -> rest 10% randomly keep current appearance feature
output_op.append(0)
# append class of region to output (we will predict these later)
output_label.append(cls_scores)
else:
# no masking region (will be ignored by loss function later)
output_op.append(0)
output_label.append(np.zeros_like(cls_scores))
# # if no region masked, random choose a region to mask
# if all([op == 0 for op in output_op]):
# choosed = random.randrange(0, len(output_op))
# output_op[choosed] = 1
# output_label[choosed] = regions_cls_scores[choosed]
return output_op, output_label
@staticmethod
def b64_decode(string):
return base64.decodebytes(string.encode())
@staticmethod
def group_aspect(database):
print('grouping aspect...')
t = time.time()
# get shape of all images
widths = torch.as_tensor([idb['width'] for idb in database])
heights = torch.as_tensor([idb['height'] for idb in database])
# group
group_ids = torch.zeros(len(database))
horz = widths >= heights
vert = 1 - horz
group_ids[horz] = 0
group_ids[vert] = 1
print('Done (t={:.2f}s)'.format(time.time() - t))
return group_ids
def __len__(self):
return len(self.database)
def _load_image(self, path):
if '.zip@' in path:
return self.zipreader.imread(path).convert('RGB')
else:
return Image.open(path).convert('RGB')
def _load_json(self, path):
if '.zip@' in path:
f = self.zipreader.read(path)
return json.loads(f.decode())
else:
with open(path, 'r') as f:
return json.load(f)
| 17,998 | 43.115196 | 117 | py |
BertGen | BertGen-master/LanguageGeneration/data/datasets/multi30k.py | import random
import os
import time
import json
import jsonlines
from PIL import Image
import base64
import numpy as np
import logging
import torch
from torch.utils.data import Dataset
from external.pytorch_pretrained_bert import BertTokenizer
from common.utils.zipreader import ZipReader
from common.utils.create_logger import makedirsExist
from copy import deepcopy
class Multi30kDataset(Dataset):
def __init__(self, ann_file, image_set, root_path, data_path, seq_len=64,
with_precomputed_visual_feat=False, mask_raw_pixels=True,
with_rel_task=True, with_mlm_task=True, with_mvrc_task=True,
transform=None, test_mode=False,
zip_mode=False, cache_mode=False, cache_db=False, ignore_db_cache=True,
tokenizer=None, pretrained_model_name=None,
add_image_as_a_box=False,
aspect_grouping=False, task_name="None", lang="second", **kwargs):
"""
Conceptual Captions Dataset
:param ann_file: annotation jsonl file
:param image_set: image folder name, e.g., 'vcr1images'
:param root_path: root path to cache database loaded from annotation file
:param data_path: path to vcr dataset
:param transform: transform
:param test_mode: test mode means no labels available
:param zip_mode: reading images and metadata in zip archive
:param cache_mode: cache whole dataset to RAM first, then __getitem__ read them from RAM
:param ignore_db_cache: ignore previous cached database, reload it from annotation file
:param tokenizer: default is BertTokenizer from pytorch_pretrained_bert
:param add_image_as_a_box: add whole image as a box
:param aspect_grouping: whether to group images via their aspect
:param kwargs:
"""
super(Multi30kDataset, self).__init__()
assert not cache_mode, 'currently not support cache mode!'
# FM edit: commented out to allow testin
# assert not test_mode
annot = {'train': 'train_frcnn.json',
'val': 'val_frcnn.json',
'valshuffled': 'val_frcnn_shuffled.json',
'test2015': 'test_frcnn.json',
'test2015shuffled1': 'test_frcnn_shuffled_1.json',
'test2015shuffled2': 'test_frcnn_shuffled_2.json',
}
self.seq_len = seq_len
self.with_rel_task = with_rel_task
self.with_mlm_task = with_mlm_task
self.with_mvrc_task = with_mvrc_task
self.data_path = data_path
self.root_path = root_path
self.ann_file = os.path.join(data_path, annot[image_set])
self.with_precomputed_visual_feat = with_precomputed_visual_feat
self.mask_raw_pixels = mask_raw_pixels
self.image_set = image_set
self.transform = transform
self.test_mode = test_mode
self.zip_mode = zip_mode
self.cache_mode = cache_mode
self.cache_db = cache_db
self.ignore_db_cache = ignore_db_cache
self.aspect_grouping = aspect_grouping
self.cache_dir = os.path.join(root_path, 'cache')
self.add_image_as_a_box = add_image_as_a_box
if not os.path.exists(self.cache_dir):
makedirsExist(self.cache_dir)
self.tokenizer = tokenizer if tokenizer is not None \
else BertTokenizer.from_pretrained(
'bert-base-uncased' if pretrained_model_name is None else pretrained_model_name,
cache_dir=self.cache_dir, do_lower_case=False)
self.zipreader = ZipReader()
# FM: define task name to add prefix
self.task_name = task_name
self.lang = lang
# FM: Customise for multi30k dataset
self.simple_database = list(jsonlines.open(self.ann_file))
if not self.zip_mode:
for i, idb in enumerate(self.simple_database):
self.simple_database[i]['frcnn'] = idb['frcnn'].replace('.zip@', '')\
.replace('.0', '').replace('.1', '').replace('.2', '').replace('.3', '')
self.simple_database[i]['image'] = idb['image'].replace(
'.zip@', '')
# FM: TODO correct this
for i, idb in enumerate(self.simple_database):
# correct address:
idb['frcnn'] = idb['frcnn'].replace(
"test_2016_flickr_frcnn.zip", "test_frcnn.zip")
old_id = idb['frcnn'].split('/')[1].split('.')[0]
image_id = old_id
while len(image_id) < 8:
image_id = '0'+image_id
self.simple_database[i]['frcnn'] = idb['frcnn'].replace(
old_id, image_id)
if not self.test_mode:
self.database = []
db_pos = 0
# create [MASK] every time
for entry in self.simple_database:
if self.lang == "second":
caption_tokens_de = self.tokenizer.tokenize(
entry['caption_de'])
# repeat each entry multiple times - MASK the last word in each case
for pos, item in enumerate(caption_tokens_de):
self.database.append(deepcopy(entry))
self.database[db_pos]['caption_de'] = deepcopy(
caption_tokens_de[:pos+1])
db_pos += 1
# add one last entry with last token [STOP]
self.database.append(deepcopy(self.database[db_pos-1]))
self.database[db_pos]['caption_de'] = self.database[db_pos]['caption_de'] + ['[STOP]']
db_pos += 1
else:
caption_tokens_en = self.tokenizer.tokenize(
entry['caption_en'])
# repeat each entry multiple times - MASK the last word in each case
for pos, item in enumerate(caption_tokens_en):
self.database.append(deepcopy(entry))
self.database[db_pos]['caption_en'] = deepcopy(
caption_tokens_en[:pos+1])
db_pos += 1
# add one last entry with last token [STOP]
self.database.append(deepcopy(self.database[db_pos-1]))
self.database[db_pos]['caption_en'] = self.database[db_pos]['caption_en'] + ['[STOP]']
db_pos += 1
print('***********************')
print('The dataset length is: ', len(self.database))
print('Task: ', self.task_name)
print('Lang: ', self.lang)
else:
self.database = self.simple_database
if self.aspect_grouping:
assert False, "not support aspect grouping currently!"
self.group_ids = self.group_aspect(self.database)
print('mask_raw_pixels: ', self.mask_raw_pixels)
@property
def data_names(self):
return ['image', 'boxes', 'im_info', 'text',
'relationship_label', 'mlm_labels', 'mvrc_ops', 'mvrc_labels']
def __getitem__(self, index):
idb = self.database[index]
# image data
# IN ALL CASES: boxes and cls scores are available for each image
frcnn_data = self._load_json(
os.path.join(self.data_path, idb['frcnn']))
boxes = np.frombuffer(self.b64_decode(frcnn_data['boxes']),
dtype=np.float32).reshape((frcnn_data['num_boxes'], -1))
boxes_cls_scores = np.frombuffer(self.b64_decode(frcnn_data['classes']),
dtype=np.float32).reshape((frcnn_data['num_boxes'], -1))
boxes_max_conf = boxes_cls_scores.max(axis=1)
inds = np.argsort(boxes_max_conf)[::-1]
boxes = boxes[inds]
boxes_cls_scores = boxes_cls_scores[inds]
boxes = torch.as_tensor(boxes)
# load precomputed features or the whole image depending on setup
if self.with_precomputed_visual_feat:
image = None
w0, h0 = frcnn_data['image_w'], frcnn_data['image_h']
boxes_features = np.frombuffer(self.b64_decode(frcnn_data['features']),
dtype=np.float32).reshape((frcnn_data['num_boxes'], -1))
boxes_features = boxes_features[inds]
boxes_features = torch.as_tensor(boxes_features)
else:
try:
image = self._load_image(
os.path.join(self.data_path, idb['image']))
w0, h0 = image.size
except:
print("Failed to load image {}, use zero image!".format(
idb['image']))
image = None
w0, h0 = frcnn_data['image_w'], frcnn_data['image_h']
# append whole image to tensor of boxes (used for all linguistic tokens)
if self.add_image_as_a_box:
image_box = torch.as_tensor([[0.0, 0.0, w0 - 1.0, h0 - 1.0]])
boxes = torch.cat((image_box, boxes), dim=0)
if self.with_precomputed_visual_feat:
image_box_feat = boxes_features.mean(dim=0, keepdim=True)
boxes_features = torch.cat(
(image_box_feat, boxes_features), dim=0)
# transform
im_info = torch.tensor([w0, h0, 1.0, 1.0, index])
if self.transform is not None:
image, boxes, _, im_info = self.transform(
image, boxes, None, im_info)
if image is None and (not self.with_precomputed_visual_feat):
w = int(im_info[0].item())
h = int(im_info[1].item())
image = im_info.new_zeros((3, h, w), dtype=torch.float)
# clamp boxes
w = im_info[0].item()
h = im_info[1].item()
boxes[:, [0, 2]] = boxes[:, [0, 2]].clamp(min=0, max=w-1)
boxes[:, [1, 3]] = boxes[:, [1, 3]].clamp(min=0, max=h-1)
# Task #1: Caption-Image Relationship Prediction
_p = random.random()
if _p < 0.5 or (not self.with_rel_task):
relationship_label = 1
caption_en = idb['caption_en']
caption_de = idb['caption_de']
else:
relationship_label = 0
rand_index = random.randrange(0, len(self.database))
while rand_index == index:
rand_index = random.randrange(0, len(self.database))
caption_en = self.database[rand_index]['caption_en']
caption_de = self.database[rand_index]['caption_de']
# Task #2: Masked Language Modeling - Adapted for two languages
if self.with_mlm_task:
if not self.test_mode:
if self.lang == "second":
# FM: removing joining of caption - split into two languages
caption_tokens_en = self.tokenizer.tokenize(caption_en)
mlm_labels_en = [-1] * len(caption_tokens_en)
# FM edit: Mask always the last token
caption_tokens_de = caption_de
mlm_labels_de = [-1] * (len(caption_tokens_de)-1)
try:
mlm_labels_de.append(
self.tokenizer.vocab[caption_tokens_de[-1]])
except KeyError:
# For unknown words (should not occur with BPE vocab)
mlm_labels_de.append(self.tokenizer.vocab["[UNK]"])
logging.warning(
"Cannot find sub_token '{}' in vocab. Using [UNK] insetad".format(sub_token))
caption_tokens_de[-1] = '[MASK]'
else:
# FM: removing joining of caption - split into two languages
caption_tokens_de = self.tokenizer.tokenize(caption_de)
mlm_labels_de = [-1] * len(caption_tokens_de)
# FM edit: Mask always the last token
caption_tokens_en = caption_en
mlm_labels_en = [-1] * (len(caption_tokens_en)-1)
try:
mlm_labels_en.append(
self.tokenizer.vocab[caption_tokens_en[-1]])
except KeyError:
# For unknown words (should not occur with BPE vocab)
mlm_labels_en.append(self.tokenizer.vocab["[UNK]"])
logging.warning(
"Cannot find sub_token '{}' in vocab. Using [UNK] insetad".format(sub_token))
caption_tokens_en[-1] = '[MASK]'
else:
if self.lang == "second":
# FM TODO: fix inference
caption_tokens_en = self.tokenizer.tokenize(caption_en)
mlm_labels_en = [-1] * len(caption_tokens_en)
# FM edit: add [MASK] to start guessing caption
caption_tokens_de = self.tokenizer.tokenize(caption_de)
# FM edit: add label from vocabulary
mlm_labels_de = [103] + [-1]
caption_tokens_de = ['[MASK]'] + ['[PAD]']
else:
# FM TODO: fix inference
caption_tokens_de = self.tokenizer.tokenize(caption_de)
mlm_labels_de = [-1] * len(caption_tokens_de)
# FM edit: add [MASK] to start guessing caption
caption_tokens_en = self.tokenizer.tokenize(caption_en)
# FM edit: add label from vocabulary
mlm_labels_en = [103] + [-1]
caption_tokens_en = ['[MASK]'] + ['[PAD]']
else:
caption_tokens_en = self.tokenizer.tokenize(caption_en)
caption_tokens_de = self.tokenizer.tokenize(caption_de)
mlm_labels_en = [-1] * len(caption_tokens_en)
mlm_labels_de = [-1] * len(caption_tokens_de)
if self.lang == "second":
text_tokens = [self.task_name] + ['[CLS]'] + \
caption_tokens_en + ['[SEP]'] + caption_tokens_de + ['[SEP]']
mlm_labels = [-1] + [-1] + mlm_labels_en + \
[-1] + mlm_labels_de + [-1]
else:
text_tokens = [self.task_name] + ['[CLS]'] + \
caption_tokens_de + ['[SEP]'] + caption_tokens_en + ['[SEP]']
mlm_labels = [-1] + [-1] + mlm_labels_de + \
[-1] + mlm_labels_en + [-1]
# Task #3: Masked Visual Region Classification
if self.with_mvrc_task:
if self.add_image_as_a_box:
mvrc_ops, mvrc_labels = self.random_mask_region(
boxes_cls_scores)
mvrc_ops = [0] + mvrc_ops
mvrc_labels = [np.zeros_like(
boxes_cls_scores[0])] + mvrc_labels
num_real_boxes = boxes.shape[0] - 1
num_masked_boxes = 0
if self.with_precomputed_visual_feat:
boxes_features[0] *= num_real_boxes
for mvrc_op, box_feat in zip(mvrc_ops, boxes_features):
if mvrc_op == 1:
num_masked_boxes += 1
boxes_features[0] -= box_feat
boxes_features[0] /= (num_real_boxes -
num_masked_boxes + 1e-5)
else:
mvrc_ops, mvrc_labels = self.random_mask_region(
boxes_cls_scores)
assert len(mvrc_ops) == boxes.shape[0], \
"Error: mvrc_ops have length {}, expected {}!".format(
len(mvrc_ops), boxes.shape[0])
assert len(mvrc_labels) == boxes.shape[0], \
"Error: mvrc_labels have length {}, expected {}!".format(
len(mvrc_labels), boxes.shape[0])
else:
mvrc_ops = [0] * boxes.shape[0]
mvrc_labels = [np.zeros_like(boxes_cls_scores[0])] * boxes.shape[0]
# zero out pixels of masked RoI
if (not self.with_precomputed_visual_feat) and self.mask_raw_pixels:
for mvrc_op, box in zip(mvrc_ops, boxes):
if mvrc_op == 1:
x1, y1, x2, y2 = box
image[:, int(y1):(int(y2)+1), int(x1):(int(x2)+1)] = 0
# store labels for masked regions
mvrc_labels = np.stack(mvrc_labels, axis=0)
text = self.tokenizer.convert_tokens_to_ids(text_tokens)
if self.with_precomputed_visual_feat:
boxes = torch.cat((boxes, boxes_features), dim=1)
# truncate seq to max len
if len(text) + len(boxes) > self.seq_len:
text_len_keep = len(text)
box_len_keep = len(boxes)
while (text_len_keep + box_len_keep) > self.seq_len and (text_len_keep > 0) and (box_len_keep > 0):
if box_len_keep > text_len_keep:
box_len_keep -= 1
else:
text_len_keep -= 1
if text_len_keep < 2:
text_len_keep = 2
if box_len_keep < 1:
box_len_keep = 1
boxes = boxes[:box_len_keep]
text = text[:(text_len_keep - 1)] + [text[-1]]
mlm_labels = mlm_labels[:(text_len_keep - 1)] + [mlm_labels[-1]]
mvrc_ops = mvrc_ops[:box_len_keep]
mvrc_labels = mvrc_labels[:box_len_keep]
return image, boxes, im_info, text, relationship_label, mlm_labels, mvrc_ops, mvrc_labels
def random_word_wwm(self, tokens):
output_tokens = []
output_label = []
for i, token in enumerate(tokens):
sub_tokens = self.tokenizer.wordpiece_tokenizer.tokenize(token)
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
# prob /= 0.15
# FM edit: always leave as mask
# 80% randomly change token to mask token
# if prob < 0.8:
for sub_token in sub_tokens:
output_tokens.append("[MASK]")
# 10% randomly change token to random token
# elif prob < 0.9:
# for sub_token in sub_tokens:
# output_tokens.append(random.choice(list(self.tokenizer.vocab.keys())))
# # -> rest 10% randomly keep current token
# else:
# for sub_token in sub_tokens:
# output_tokens.append(sub_token)
# append current token to output (we will predict these later)
for sub_token in sub_tokens:
try:
output_label.append(self.tokenizer.vocab[sub_token])
except KeyError:
# For unknown words (should not occur with BPE vocab)
output_label.append(self.tokenizer.vocab["[UNK]"])
logging.warning(
"Cannot find sub_token '{}' in vocab. Using [UNK] insetad".format(sub_token))
else:
for sub_token in sub_tokens:
# no masking token (will be ignored by loss function later)
output_tokens.append(sub_token)
output_label.append(-1)
# if no word masked, random choose a word to mask
# if all([l_ == -1 for l_ in output_label]):
# choosed = random.randrange(0, len(output_label))
# output_label[choosed] = self.tokenizer.vocab[tokens[choosed]]
return output_tokens, output_label
def random_mask_region(self, regions_cls_scores):
num_regions, num_classes = regions_cls_scores.shape
output_op = []
output_label = []
for k, cls_scores in enumerate(regions_cls_scores):
prob = random.random()
# mask region with 15% probability
if prob < 0.15:
prob /= 0.15
if prob < 0.9:
# 90% randomly replace appearance feature by "MASK"
output_op.append(1)
else:
# -> rest 10% randomly keep current appearance feature
output_op.append(0)
# append class of region to output (we will predict these later)
output_label.append(cls_scores)
else:
# no masking region (will be ignored by loss function later)
output_op.append(0)
output_label.append(np.zeros_like(cls_scores))
# # if no region masked, random choose a region to mask
# if all([op == 0 for op in output_op]):
# choosed = random.randrange(0, len(output_op))
# output_op[choosed] = 1
# output_label[choosed] = regions_cls_scores[choosed]
return output_op, output_label
@staticmethod
def b64_decode(string):
return base64.decodebytes(string.encode())
@staticmethod
def group_aspect(database):
print('grouping aspect...')
t = time.time()
# get shape of all images
widths = torch.as_tensor([idb['width'] for idb in database])
heights = torch.as_tensor([idb['height'] for idb in database])
# group
group_ids = torch.zeros(len(database))
horz = widths >= heights
vert = 1 - horz
group_ids[horz] = 0
group_ids[vert] = 1
print('Done (t={:.2f}s)'.format(time.time() - t))
return group_ids
def __len__(self):
return len(self.database)
def _load_image(self, path):
if '.zip@' in path:
return self.zipreader.imread(path).convert('RGB')
else:
return Image.open(path).convert('RGB')
def _load_json(self, path):
if '.zip@' in path:
f = self.zipreader.read(path)
return json.loads(f.decode())
else:
with open(path, 'r') as f:
return json.load(f)
| 22,174 | 43.707661 | 111 | py |
BertGen | BertGen-master/LanguageGeneration/data/datasets/multi30k_image_only.py | import random
import os
import time
import json
import jsonlines
from PIL import Image
import base64
import numpy as np
import logging
import torch
from torch.utils.data import Dataset
from external.pytorch_pretrained_bert import BertTokenizer
from common.utils.zipreader import ZipReader
from common.utils.create_logger import makedirsExist
from copy import deepcopy
class Multi30kDatasetImageOnly(Dataset):
def __init__(self, ann_file, image_set, root_path, data_path, seq_len=64,
with_precomputed_visual_feat=False, mask_raw_pixels=True,
with_rel_task=True, with_mlm_task=True, with_mvrc_task=True,
transform=None, test_mode=False,
zip_mode=False, cache_mode=False, cache_db=False, ignore_db_cache=True,
tokenizer=None, pretrained_model_name=None,
add_image_as_a_box=False,
aspect_grouping=False, task_name="None", lang="second", **kwargs):
"""
Conceptual Captions Dataset
:param ann_file: annotation jsonl file
:param image_set: image folder name, e.g., 'vcr1images'
:param root_path: root path to cache database loaded from annotation file
:param data_path: path to vcr dataset
:param transform: transform
:param test_mode: test mode means no labels available
:param zip_mode: reading images and metadata in zip archive
:param cache_mode: cache whole dataset to RAM first, then __getitem__ read them from RAM
:param ignore_db_cache: ignore previous cached database, reload it from annotation file
:param tokenizer: default is BertTokenizer from pytorch_pretrained_bert
:param add_image_as_a_box: add whole image as a box
:param aspect_grouping: whether to group images via their aspect
:param kwargs:
"""
super(Multi30kDatasetImageOnly, self).__init__()
assert not cache_mode, 'currently not support cache mode!'
# FM edit: commented out to allow testin
# assert not test_mode
annot = {'train': 'train_frcnn.json',
'val': 'val_frcnn.json',
'test2015': 'test_frcnn.json',
'test2018': 'test_frcnn2018.json',
}
self.seq_len = seq_len
self.with_rel_task = with_rel_task
self.with_mlm_task = with_mlm_task
self.with_mvrc_task = with_mvrc_task
self.data_path = data_path
self.root_path = root_path
self.ann_file = os.path.join(data_path, annot[image_set])
self.with_precomputed_visual_feat = with_precomputed_visual_feat
self.mask_raw_pixels = mask_raw_pixels
self.image_set = image_set
self.transform = transform
self.test_mode = test_mode
self.zip_mode = zip_mode
self.cache_mode = cache_mode
self.cache_db = cache_db
self.ignore_db_cache = ignore_db_cache
self.aspect_grouping = aspect_grouping
self.cache_dir = os.path.join(root_path, 'cache')
self.add_image_as_a_box = add_image_as_a_box
if not os.path.exists(self.cache_dir):
makedirsExist(self.cache_dir)
self.tokenizer = tokenizer if tokenizer is not None \
else BertTokenizer.from_pretrained(
'bert-base-uncased' if pretrained_model_name is None else pretrained_model_name,
cache_dir=self.cache_dir, do_lower_case=False)
self.zipreader = ZipReader()
# FM: define task name to add prefix
self.task_name = task_name
self.lang = lang
# FM: Customise for multi30k dataset
self.simple_database = list(jsonlines.open(self.ann_file))
if not self.zip_mode:
for i, idb in enumerate(self.simple_database):
self.simple_database[i]['frcnn'] = idb['frcnn'].replace('.zip@', '')\
.replace('.0', '').replace('.1', '').replace('.2', '').replace('.3', '')
self.simple_database[i]['image'] = idb['image'].replace(
'.zip@', '')
# FM: TODO correct this
for i, idb in enumerate(self.simple_database):
# correct address:
idb['frcnn'] = idb['frcnn'].replace(
"test_2016_flickr_frcnn.zip", "test_frcnn.zip")
old_id = idb['frcnn'].split('/')[1].split('.')[0]
image_id = old_id
while len(image_id) < 8:
image_id = '0'+image_id
self.simple_database[i]['frcnn'] = idb['frcnn'].replace(
old_id, image_id)
if not self.test_mode:
self.database = []
db_pos = 0
# create [MASK] every time
for entry in self.simple_database:
if self.lang == "second":
caption_tokens_de = self.tokenizer.tokenize(
entry['caption_de'])
# repeat each entry multiple times - MASK the last word in each case
for pos, item in enumerate(caption_tokens_de):
self.database.append(deepcopy(entry))
self.database[db_pos]['caption_de'] = deepcopy(
caption_tokens_de[:pos+1])
db_pos += 1
# add one last entry with last token [STOP]
self.database.append(deepcopy(self.database[db_pos-1]))
self.database[db_pos]['caption_de'] = self.database[db_pos]['caption_de'] + ['[STOP]']
db_pos += 1
else:
caption_tokens_en = self.tokenizer.tokenize(
entry['caption_en'])
# repeat each entry multiple times - MASK the last word in each case
for pos, item in enumerate(caption_tokens_en):
self.database.append(deepcopy(entry))
self.database[db_pos]['caption_en'] = deepcopy(
caption_tokens_en[:pos+1])
db_pos += 1
# add one last entry with last token [STOP]
self.database.append(deepcopy(self.database[db_pos-1]))
self.database[db_pos]['caption_en'] = self.database[db_pos]['caption_en'] + ['[STOP]']
db_pos += 1
print('***********************')
print('The dataset length is: ', len(self.database))
print('Task: ', self.task_name)
print('Lang: ', self.lang)
else:
# ignore multiple in turkish (2xcaption), english/german(5xcaption)
if self.task_name == '[TO_TU]':
self.database = self.simple_database[::2]
elif self.task_name == '[TO_FR]':
self.database = self.simple_database
else:
self.database = self.simple_database[::5]
if self.aspect_grouping:
assert False, "not support aspect grouping currently!"
self.group_ids = self.group_aspect(self.database)
print('mask_raw_pixels: ', self.mask_raw_pixels)
@property
def data_names(self):
return ['image', 'boxes', 'im_info', 'text',
'relationship_label', 'mlm_labels', 'mvrc_ops', 'mvrc_labels']
def __getitem__(self, index):
idb = self.database[index]
# image data
# IN ALL CASES: boxes and cls scores are available for each image
frcnn_data = self._load_json(
os.path.join(self.data_path, idb['frcnn']))
boxes = np.frombuffer(self.b64_decode(frcnn_data['boxes']),
dtype=np.float32).reshape((frcnn_data['num_boxes'], -1))
boxes_cls_scores = np.frombuffer(self.b64_decode(frcnn_data['classes']),
dtype=np.float32).reshape((frcnn_data['num_boxes'], -1))
boxes_max_conf = boxes_cls_scores.max(axis=1)
inds = np.argsort(boxes_max_conf)[::-1]
boxes = boxes[inds]
boxes_cls_scores = boxes_cls_scores[inds]
boxes = torch.as_tensor(boxes)
# load precomputed features or the whole image depending on setup
if self.with_precomputed_visual_feat:
image = None
w0, h0 = frcnn_data['image_w'], frcnn_data['image_h']
boxes_features = np.frombuffer(self.b64_decode(frcnn_data['features']),
dtype=np.float32).reshape((frcnn_data['num_boxes'], -1))
boxes_features = boxes_features[inds]
boxes_features = torch.as_tensor(boxes_features)
else:
try:
image = self._load_image(
os.path.join(self.data_path, idb['image']))
w0, h0 = image.size
except:
print("Failed to load image {}, use zero image!".format(
idb['image']))
image = None
w0, h0 = frcnn_data['image_w'], frcnn_data['image_h']
# append whole image to tensor of boxes (used for all linguistic tokens)
if self.add_image_as_a_box:
image_box = torch.as_tensor([[0.0, 0.0, w0 - 1.0, h0 - 1.0]])
boxes = torch.cat((image_box, boxes), dim=0)
if self.with_precomputed_visual_feat:
image_box_feat = boxes_features.mean(dim=0, keepdim=True)
boxes_features = torch.cat(
(image_box_feat, boxes_features), dim=0)
# transform
im_info = torch.tensor([w0, h0, 1.0, 1.0, index])
if self.transform is not None:
image, boxes, _, im_info = self.transform(
image, boxes, None, im_info)
if image is None and (not self.with_precomputed_visual_feat):
w = int(im_info[0].item())
h = int(im_info[1].item())
image = im_info.new_zeros((3, h, w), dtype=torch.float)
# clamp boxes
w = im_info[0].item()
h = im_info[1].item()
boxes[:, [0, 2]] = boxes[:, [0, 2]].clamp(min=0, max=w-1)
boxes[:, [1, 3]] = boxes[:, [1, 3]].clamp(min=0, max=h-1)
# Task #1: Caption-Image Relationship Prediction
_p = random.random()
if _p < 0.5 or (not self.with_rel_task):
relationship_label = 1
if self.lang == "second":
caption_de = idb['caption_de']
else:
caption_en = idb['caption_en']
else:
relationship_label = 0
rand_index = random.randrange(0, len(self.database))
while rand_index == index:
rand_index = random.randrange(0, len(self.database))
if self.lang == "second":
caption_de = self.database[rand_index]['caption_de']
else:
caption_en = self.database[rand_index]['caption_en']
# Task #2: Masked Language Modeling - Adapted for two languages
if self.with_mlm_task:
if not self.test_mode:
if self.lang == "second":
# FM edit: Mask always the last token
caption_tokens_de = caption_de
mlm_labels_de = [-1] * (len(caption_tokens_de)-1)
try:
mlm_labels_de.append(
self.tokenizer.vocab[caption_tokens_de[-1]])
except KeyError:
# For unknown words (should not occur with BPE vocab)
mlm_labels_de.append(self.tokenizer.vocab["[UNK]"])
logging.warning(
"Cannot find sub_token '{}' in vocab. Using [UNK] insetad".format(sub_token))
caption_tokens_de[-1] = '[MASK]'
else:
# FM edit: Mask always the last token
caption_tokens_en = caption_en
mlm_labels_en = [-1] * (len(caption_tokens_en)-1)
try:
mlm_labels_en.append(
self.tokenizer.vocab[caption_tokens_en[-1]])
except KeyError:
# For unknown words (should not occur with BPE vocab)
mlm_labels_en.append(self.tokenizer.vocab["[UNK]"])
logging.warning(
"Cannot find sub_token '{}' in vocab. Using [UNK] insetad".format(sub_token))
caption_tokens_en[-1] = '[MASK]'
else:
if self.lang == "second":
# FM edit: add [MASK] to start guessing caption
caption_tokens_de = self.tokenizer.tokenize(caption_de)
# FM edit: add label from vocabulary
mlm_labels_de = [103] + [-1]
caption_tokens_de = ['[MASK]'] + ['[PAD]']
else:
# FM edit: add [MASK] to start guessing caption
caption_tokens_en = self.tokenizer.tokenize(caption_en)
# FM edit: add label from vocabulary
mlm_labels_en = [103] + [-1]
caption_tokens_en = ['[MASK]'] + ['[PAD]']
else:
if self.lang == "second":
caption_tokens_de = self.tokenizer.tokenize(caption_de)
mlm_labels_de = [-1] * len(caption_tokens_de)
else:
caption_tokens_en = self.tokenizer.tokenize(caption_en)
mlm_labels_en = [-1] * len(caption_tokens_en)
if self.lang == "second":
text_tokens = [self.task_name] + ['[CLS]'] + \
['[SEP]'] + caption_tokens_de + ['[SEP]']
mlm_labels = [-1] + [-1] + [-1] + mlm_labels_de + [-1]
else:
text_tokens = [self.task_name] + ['[CLS]'] + \
['[SEP]'] + caption_tokens_en + ['[SEP]']
mlm_labels = [-1] + [-1] + [-1] + mlm_labels_en + [-1]
# Task #3: Masked Visual Region Classification
if self.with_mvrc_task:
if self.add_image_as_a_box:
mvrc_ops, mvrc_labels = self.random_mask_region(
boxes_cls_scores)
mvrc_ops = [0] + mvrc_ops
mvrc_labels = [np.zeros_like(
boxes_cls_scores[0])] + mvrc_labels
num_real_boxes = boxes.shape[0] - 1
num_masked_boxes = 0
if self.with_precomputed_visual_feat:
boxes_features[0] *= num_real_boxes
for mvrc_op, box_feat in zip(mvrc_ops, boxes_features):
if mvrc_op == 1:
num_masked_boxes += 1
boxes_features[0] -= box_feat
boxes_features[0] /= (num_real_boxes -
num_masked_boxes + 1e-5)
else:
mvrc_ops, mvrc_labels = self.random_mask_region(
boxes_cls_scores)
assert len(mvrc_ops) == boxes.shape[0], \
"Error: mvrc_ops have length {}, expected {}!".format(
len(mvrc_ops), boxes.shape[0])
assert len(mvrc_labels) == boxes.shape[0], \
"Error: mvrc_labels have length {}, expected {}!".format(
len(mvrc_labels), boxes.shape[0])
else:
mvrc_ops = [0] * boxes.shape[0]
mvrc_labels = [np.zeros_like(boxes_cls_scores[0])] * boxes.shape[0]
# zero out pixels of masked RoI
if (not self.with_precomputed_visual_feat) and self.mask_raw_pixels:
for mvrc_op, box in zip(mvrc_ops, boxes):
if mvrc_op == 1:
x1, y1, x2, y2 = box
image[:, int(y1):(int(y2)+1), int(x1):(int(x2)+1)] = 0
# store labels for masked regions
mvrc_labels = np.stack(mvrc_labels, axis=0)
text = self.tokenizer.convert_tokens_to_ids(text_tokens)
if self.with_precomputed_visual_feat:
boxes = torch.cat((boxes, boxes_features), dim=1)
# truncate seq to max len
if len(text) + len(boxes) > self.seq_len:
text_len_keep = len(text)
box_len_keep = len(boxes)
while (text_len_keep + box_len_keep) > self.seq_len and (text_len_keep > 0) and (box_len_keep > 0):
if box_len_keep > text_len_keep:
box_len_keep -= 1
else:
text_len_keep -= 1
if text_len_keep < 2:
text_len_keep = 2
if box_len_keep < 1:
box_len_keep = 1
boxes = boxes[:box_len_keep]
text = text[:(text_len_keep - 1)] + [text[-1]]
mlm_labels = mlm_labels[:(text_len_keep - 1)] + [mlm_labels[-1]]
mvrc_ops = mvrc_ops[:box_len_keep]
mvrc_labels = mvrc_labels[:box_len_keep]
return image, boxes, im_info, text, relationship_label, mlm_labels, mvrc_ops, mvrc_labels
def random_word_wwm(self, tokens):
output_tokens = []
output_label = []
for i, token in enumerate(tokens):
sub_tokens = self.tokenizer.wordpiece_tokenizer.tokenize(token)
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
# prob /= 0.15
# FM edit: always leave as mask
# 80% randomly change token to mask token
# if prob < 0.8:
for sub_token in sub_tokens:
output_tokens.append("[MASK]")
# 10% randomly change token to random token
# elif prob < 0.9:
# for sub_token in sub_tokens:
# output_tokens.append(random.choice(list(self.tokenizer.vocab.keys())))
# # -> rest 10% randomly keep current token
# else:
# for sub_token in sub_tokens:
# output_tokens.append(sub_token)
# append current token to output (we will predict these later)
for sub_token in sub_tokens:
try:
output_label.append(self.tokenizer.vocab[sub_token])
except KeyError:
# For unknown words (should not occur with BPE vocab)
output_label.append(self.tokenizer.vocab["[UNK]"])
logging.warning(
"Cannot find sub_token '{}' in vocab. Using [UNK] insetad".format(sub_token))
else:
for sub_token in sub_tokens:
# no masking token (will be ignored by loss function later)
output_tokens.append(sub_token)
output_label.append(-1)
# if no word masked, random choose a word to mask
# if all([l_ == -1 for l_ in output_label]):
# choosed = random.randrange(0, len(output_label))
# output_label[choosed] = self.tokenizer.vocab[tokens[choosed]]
return output_tokens, output_label
def random_mask_region(self, regions_cls_scores):
num_regions, num_classes = regions_cls_scores.shape
output_op = []
output_label = []
for k, cls_scores in enumerate(regions_cls_scores):
prob = random.random()
# mask region with 15% probability
if prob < 0.15:
prob /= 0.15
if prob < 0.9:
# 90% randomly replace appearance feature by "MASK"
output_op.append(1)
else:
# -> rest 10% randomly keep current appearance feature
output_op.append(0)
# append class of region to output (we will predict these later)
output_label.append(cls_scores)
else:
# no masking region (will be ignored by loss function later)
output_op.append(0)
output_label.append(np.zeros_like(cls_scores))
# # if no region masked, random choose a region to mask
# if all([op == 0 for op in output_op]):
# choosed = random.randrange(0, len(output_op))
# output_op[choosed] = 1
# output_label[choosed] = regions_cls_scores[choosed]
return output_op, output_label
@staticmethod
def b64_decode(string):
return base64.decodebytes(string.encode())
@staticmethod
def group_aspect(database):
print('grouping aspect...')
t = time.time()
# get shape of all images
widths = torch.as_tensor([idb['width'] for idb in database])
heights = torch.as_tensor([idb['height'] for idb in database])
# group
group_ids = torch.zeros(len(database))
horz = widths >= heights
vert = 1 - horz
group_ids[horz] = 0
group_ids[vert] = 1
print('Done (t={:.2f}s)'.format(time.time() - t))
return group_ids
def __len__(self):
return len(self.database)
def _load_image(self, path):
if '.zip@' in path:
return self.zipreader.imread(path).convert('RGB')
else:
return Image.open(path).convert('RGB')
def _load_json(self, path):
if '.zip@' in path:
f = self.zipreader.read(path)
return json.loads(f.decode())
else:
with open(path, 'r') as f:
return json.load(f)
| 21,632 | 42.969512 | 111 | py |
BertGen | BertGen-master/LanguageGeneration/data/samplers/grouped_batch_sampler.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import itertools
import torch
from torch.utils.data.sampler import BatchSampler
from torch.utils.data.sampler import Sampler
class GroupedBatchSampler(BatchSampler):
"""
Wraps another sampler to yield a mini-batch of indices.
It enforces that elements from the same group should appear in groups of batch_size.
It also tries to provide mini-batches which follows an ordering which is
as close as possible to the ordering from the original sampler.
Arguments:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_uneven (bool): If ``True``, the sampler will drop the batches whose
size is less than ``batch_size``
"""
def __init__(self, sampler, group_ids, batch_size, drop_uneven=False):
if not isinstance(sampler, Sampler):
raise ValueError(
"sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}".format(sampler)
)
self.sampler = sampler
self.group_ids = torch.as_tensor(group_ids)
assert self.group_ids.dim() == 1
self.batch_size = batch_size
self.drop_uneven = drop_uneven
self.groups = torch.unique(self.group_ids).sort(0)[0]
self._can_reuse_batches = False
def _prepare_batches(self):
dataset_size = len(self.group_ids)
# get the sampled indices from the sampler
sampled_ids = torch.as_tensor(list(self.sampler))
# potentially not all elements of the dataset were sampled
# by the sampler (e.g., DistributedSampler).
# construct a tensor which contains -1 if the element was
# not sampled, and a non-negative number indicating the
# order where the element was sampled.
# for example. if sampled_ids = [3, 1] and dataset_size = 5,
# the order is [-1, 1, -1, 0, -1]
order = torch.full((dataset_size,), -1, dtype=torch.int64)
order[sampled_ids] = torch.arange(len(sampled_ids))
# get a mask with the elements that were sampled
mask = order >= 0
# find the elements that belong to each individual cluster
clusters = [(self.group_ids == i) & mask for i in self.groups]
# get relative order of the elements inside each cluster
# that follows the order from the sampler
relative_order = [order[cluster] for cluster in clusters]
# with the relative order, find the absolute order in the
# sampled space
permutation_ids = [s[s.sort()[1]] for s in relative_order]
# permute each cluster so that they follow the order from
# the sampler
permuted_clusters = [sampled_ids[idx] for idx in permutation_ids]
# splits each cluster in batch_size, and merge as a list of tensors
splits = [c.split(self.batch_size) for c in permuted_clusters]
merged = tuple(itertools.chain.from_iterable(splits))
# now each batch internally has the right order, but
# they are grouped by clusters. Find the permutation between
# different batches that brings them as close as possible to
# the order that we have in the sampler. For that, we will consider the
# ordering as coming from the first element of each batch, and sort
# correspondingly
first_element_of_batch = [t[0].item() for t in merged]
# get and inverse mapping from sampled indices and the position where
# they occur (as returned by the sampler)
inv_sampled_ids_map = {v: k for k, v in enumerate(sampled_ids.tolist())}
# from the first element in each batch, get a relative ordering
first_index_of_batch = torch.as_tensor(
[inv_sampled_ids_map[s] for s in first_element_of_batch]
)
# permute the batches so that they approximately follow the order
# from the sampler
permutation_order = first_index_of_batch.sort(0)[1].tolist()
# finally, permute the batches
batches = [merged[i].tolist() for i in permutation_order]
if self.drop_uneven:
kept = []
for batch in batches:
if len(batch) == self.batch_size:
kept.append(batch)
batches = kept
return batches
def __iter__(self):
if self._can_reuse_batches:
batches = self._batches
self._can_reuse_batches = False
else:
batches = self._prepare_batches()
self._batches = batches
return iter(batches)
def __len__(self):
if not hasattr(self, "_batches"):
self._batches = self._prepare_batches()
self._can_reuse_batches = True
return len(self._batches)
| 4,846 | 40.42735 | 88 | py |
BertGen | BertGen-master/LanguageGeneration/data/samplers/distributed.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Code is copy-pasted exactly as in torch.utils.data.distributed.
# FIXME remove this once c10d fixes the bug it has
import math
import torch
import torch.distributed as dist
from torch.utils.data.sampler import Sampler
class DistributedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
def __iter__(self):
if self.shuffle:
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
indices += indices[: (self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
offset = self.num_samples * self.rank
indices = indices[offset : offset + self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch | 2,568 | 37.924242 | 86 | py |
BertGen | BertGen-master/LanguageGeneration/data/transforms/transforms.py | import random
import numpy as np
import torch
import torchvision
from torchvision.transforms import functional as F
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, boxes, masks, im_info):
for t in self.transforms:
image, boxes, masks, im_info = t(image, boxes, masks, im_info)
return image, boxes, masks, im_info
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
class Resize(object):
def __init__(self, min_size, max_size):
self.min_size = min_size
self.max_size = max_size
# modified from torchvision to add support for max size
def get_size(self, image_size):
w, h = image_size
size = self.min_size
max_size = self.max_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(max_size * min_original_size / max_original_size)
if (w <= h and w == size) or (h <= w and h == size):
return (w, h)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (ow, oh)
def __call__(self, image, boxes, masks, im_info):
origin_size = im_info[:2]
size = self.get_size(origin_size)
if image is not None:
image = F.resize(image, (size[1], size[0]))
ratios = [size[0] * 1.0 / origin_size[0], size[1] * 1.0 / origin_size[1]]
if boxes is not None:
boxes[:, [0, 2]] *= ratios[0]
boxes[:, [1, 3]] *= ratios[1]
im_info[0], im_info[1] = size
im_info[2], im_info[3] = ratios
return image, boxes, masks, im_info
class RandomHorizontalFlip(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, boxes, masks, im_info):
if random.random() < self.prob:
w, h = im_info[:2]
if image is not None:
image = F.hflip(image)
if boxes is not None:
boxes[:, [0, 2]] = w - 1 - boxes[:, [2, 0]]
if masks is not None:
masks = torch.as_tensor(masks.numpy()[:, :, ::-1].tolist())
return image, boxes, masks, im_info
class ToTensor(object):
def __call__(self, image, boxes, masks, im_info):
return F.to_tensor(image) if image is not None else image, boxes, masks, im_info
class Normalize(object):
def __init__(self, mean, std, to_bgr255=True):
self.mean = mean
self.std = std
self.to_bgr255 = to_bgr255
def __call__(self, image, boxes, masks, im_info):
if image is not None:
if self.to_bgr255:
image = image[[2, 1, 0]] * 255
image = F.normalize(image, mean=self.mean, std=self.std)
return image, boxes, masks, im_info
class FixPadding(object):
def __init__(self, min_size, max_size, pad=0):
self.min_size = min_size
self.max_size = max_size
self.pad = pad
def __call__(self, image, boxes, masks, im_info):
if image is not None:
# padding to fixed size for determinacy
c, h, w = image.shape
if h <= w:
h1 = self.min_size
w1 = self.max_size
else:
h1 = self.max_size
w1 = self.min_size
padded_image = image.new_zeros((c, h1, w1)).fill_(self.pad)
padded_image[:, :h, :w] = image
image = padded_image
return image, boxes, masks, im_info
| 3,944 | 29.820313 | 88 | py |
BertGen | BertGen-master/scripts/launch.py | r"""
`torch.distributed.launch` is a module that spawns up multiple distributed
training processes on each of the training nodes.
The utility can be used for single-node distributed training, in which one or
more processes per node will be spawned. The utility can be used for either
CPU training or GPU training. If the utility is used for GPU training,
each distributed process will be operating on a single GPU. This can achieve
well-improved single-node training performance. It can also be used in
multi-node distributed training, by spawning up multiple processes on each node
for well-improved multi-node distributed training performance as well.
This will especially be benefitial for systems with multiple Infiniband
interfaces that have direct-GPU support, since all of them can be utilized for
aggregated communication bandwidth.
In both cases of single-node distributed training or multi-node distributed
training, this utility will launch the given number of processes per node
(``--nproc_per_node``). If used for GPU training, this number needs to be less
or euqal to the number of GPUs on the current system (``nproc_per_node``),
and each process will be operating on a single GPU from *GPU 0 to
GPU (nproc_per_node - 1)*.
**How to use this module:**
1. Single-Node multi-process distributed training
::
>>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE
YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 and all other
arguments of your training script)
2. Multi-Node multi-process distributed training: (e.g. two nodes)
Node 1: *(IP: 192.168.1.1, and has a free port: 1234)*
::
>>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE
--nnodes=2 --node_rank=0 --master_addr="192.168.1.1"
--master_port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3
and all other arguments of your training script)
Node 2:
::
>>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE
--nnodes=2 --node_rank=1 --master_addr="192.168.1.1"
--master_port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3
and all other arguments of your training script)
3. To look up what optional arguments this module offers:
::
>>> python -m torch.distributed.launch --help
**Important Notices:**
1. This utilty and multi-process distributed (single-node or
multi-node) GPU training currently only achieves the best performance using
the NCCL distributed backend. Thus NCCL backend is the recommended backend to
use for GPU training.
2. In your training program, you must parse the command-line argument:
``--local_rank=LOCAL_PROCESS_RANK``, which will be provided by this module.
If your training program uses GPUs, you should ensure that your code only
runs on the GPU device of LOCAL_PROCESS_RANK. This can be done by:
Parsing the local_rank argument
::
>>> import argparse
>>> parser = argparse.ArgumentParser()
>>> parser.add_argument("--local_rank", type=int)
>>> args = parser.parse_args()
Set your device to local rank using either
::
>>> torch.cuda.set_device(arg.local_rank) # before your code runs
or
::
>>> with torch.cuda.device(arg.local_rank):
>>> # your code to run
3. In your training program, you are supposed to call the following function
at the beginning to start the distributed backend. You need to make sure that
the init_method uses ``env://``, which is the only supported ``init_method``
by this module.
::
torch.distributed.init_process_group(backend='YOUR BACKEND',
init_method='env://')
4. In your training program, you can either use regular distributed functions
or use :func:`torch.nn.parallel.DistributedDataParallel` module. If your
training program uses GPUs for training and you would like to use
:func:`torch.nn.parallel.DistributedDataParallel` module,
here is how to configure it.
::
model = torch.nn.parallel.DistributedDataParallel(model,
device_ids=[arg.local_rank],
output_device=arg.local_rank)
Please ensure that ``device_ids`` argument is set to be the only GPU device id
that your code will be operating on. This is generally the local rank of the
process. In other words, the ``device_ids`` needs to be ``[args.local_rank]``,
and ``output_device`` needs to be ``args.local_rank`` in order to use this
utility
5. Another way to pass ``local_rank`` to the subprocesses via environment variable
``LOCAL_RANK``. This behavior is enabled when you launch the script with
``--use_env=True``. You must adjust the subprocess example above to replace
``args.local_rank`` with ``os.environ['LOCAL_RANK']``; the launcher
will not pass ``--local_rank`` when you specify this flag.
.. warning::
``local_rank`` is NOT globally unique: it is only unique per process
on a machine. Thus, don't use it to decide if you should, e.g.,
write to a networked filesystem. See
https://github.com/pytorch/pytorch/issues/12042 for an example of
how things can go wrong if you don't do this correctly.
"""
import sys
import subprocess
import os
import socket
from argparse import ArgumentParser, REMAINDER
import torch
def parse_args():
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
parser = ArgumentParser(description="PyTorch distributed training launch "
"helper utilty that will spawn up "
"multiple distributed processes")
# Optional arguments for the launch helper
parser.add_argument("--nnodes", type=int, default=1,
help="The number of nodes to use for distributed "
"training")
parser.add_argument("--node_rank", type=int, default=0,
help="The rank of the node for multi-node distributed "
"training")
parser.add_argument("--nproc_per_node", type=int, default=1,
help="The number of processes to launch on each node, "
"for GPU training, this is recommended to be set "
"to the number of GPUs in your system so that "
"each process can be bound to a single GPU.")
parser.add_argument("--master_addr", default="127.0.0.1", type=str,
help="Master node (rank 0)'s address, should be either "
"the IP address or the hostname of node 0, for "
"single node multi-proc training, the "
"--master_addr can simply be 127.0.0.1")
parser.add_argument("--master_port", default=29500, type=int,
help="Master node (rank 0)'s free port that needs to "
"be used for communciation during distributed "
"training")
# parser.add_argument("--use_env", default=False, action="store_true",
# help="Use environment variable to pass "
# "'local rank'. For legacy reasons, the default value is False. "
# "If set to True, the script will not pass "
# "--local_rank as argument, and will instead set LOCAL_RANK.")
# positional
parser.add_argument("training_script", type=str,
help="The full path to the single GPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script")
# rest from the training program
parser.add_argument('training_script_args', nargs=REMAINDER)
return parser.parse_args()
def main():
args = parse_args()
# world size in terms of number of processes
dist_world_size = args.nproc_per_node * args.nnodes
# set PyTorch distributed related environmental variables
current_env = os.environ.copy()
current_env["MASTER_ADDR"] = args.master_addr
current_env["MASTER_PORT"] = str(args.master_port)
current_env["WORLD_SIZE"] = str(dist_world_size)
processes = []
for local_rank in range(0, args.nproc_per_node):
# each process's rank
dist_rank = args.nproc_per_node * args.node_rank + local_rank
current_env["RANK"] = str(dist_rank)
current_env["LOCAL_RANK"] = str(local_rank)
# # spawn the processes
# if args.use_env:
# cmd = [sys.executable, "-u",
# args.training_script] + args.training_script_args
# else:
# cmd = [sys.executable,
# "-u",
# args.training_script,
# "--local_rank={}".format(local_rank)] + args.training_script_args
cmd = [sys.executable, "-u",
args.training_script] + args.training_script_args + ["--dist"]
process = subprocess.Popen(cmd, env=current_env)
processes.append(process)
for process in processes:
process.wait()
if process.returncode != 0:
raise subprocess.CalledProcessError(returncode=process.returncode,
cmd=process.args)
if __name__ == "__main__":
main()
| 9,500 | 46.268657 | 95 | py |
paperdata | paperdata-master/test_nn.py | import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import BatchNormalization, Dense, Dropout
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import SGD
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
nsample = 10000
ndim = 2
nhidden1 = nhidden2 = 1000
x = np.random.normal(0, 1, (nsample, ndim))
y = x[:, 0] ** 2 + x[:, 1] ** 2 + np.sin(x[:, 1])
x_train, x_test, pb_train, pb_test = train_test_split(x, y)
model = Sequential()
model.add(Dense(nhidden1, input_shape=(x_train.shape[1],), activation="sigmoid"))
model.add(BatchNormalization())
model.add(Dense(nhidden2, activation="sigmoid"))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(1))
opt = SGD(0.000001)
model.compile(loss='mse',
optimizer=opt,
metrics=['mse'])
model.summary()
history = model.fit(x_train, pb_train,
batch_size=100,
epochs=1000,
validation_split=0.1,
initial_epoch=0,
validation_data=(x_test, pb_test))
| 1,170 | 27.560976 | 81 | py |
SparkNet | SparkNet-master/scripts/put_imagenet_on_s3.py | # Script to upload the imagenet dataset to Amazon S3 or another remote file
# system (have to change the function upload_file to support more storage
# systems).
import boto3
import urllib
import tarfile, io
import argparse
import random
import PIL.Image
import collections
parser = argparse.ArgumentParser()
parser.add_argument("s3_bucket", help="Bucket to which imagenet data is uploaded", type=str)
parser.add_argument("--train_tar_file", help="Path to the ILSVRC2012_img_train.tar file", type=str)
parser.add_argument("--val_tar_file", help="Path to the ILSVRC2012_img_val.tar file", type=str)
parser.add_argument("--num_train_chunks", help="Number of train .tar files generated", type=int, default=1000)
parser.add_argument("--num_val_chunks", help="Number of val .tar files generated", type=int, default=50)
parser.add_argument("--new_width", help="Width to resize images to", type=int, default=-1)
parser.add_argument("--new_height", help="Height to resize images to", type=int, default=-1)
args = parser.parse_args()
url = "http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz"
urllib.urlretrieve(url, "caffe_ilsvrc12.tar.gz")
tar = tarfile.open("caffe_ilsvrc12.tar.gz")
train_label_file = tar.extractfile("train.txt")
val_label_file = tar.extractfile("val.txt")
new_image_size = None
if args.new_width != -1 and args.new_height != -1:
new_image_size = (args.new_width, args.new_height)
s3 = boto3.client('s3')
"""Change this function if you want to upload to HDFS or local storage"""
def upload_file(targetname, stream):
print "starting to upload", targetname, "to bucket", args.s3_bucket
s3.put_object(Bucket=args.s3_bucket, Key=targetname, Body=stream)
print "finished uploading", targetname, "to bucket", args.s3_bucket
def split_label_file(label_file, num_chunks):
lines = label_file.readlines()
split_lines = map(lambda s: s.split(), lines)
random.shuffle(split_lines)
num_images = len(split_lines)
shuffled_lists = [[] for _ in range(num_chunks)]
for i in range(num_images):
shuffled_lists[i % num_chunks].append(split_lines[i])
return shuffled_lists
def resize_and_add_image(next_file, file_name, imgfile, new_size=None):
img = PIL.Image.open(imgfile)
if new_size is not None:
img = img.resize(new_size, PIL.Image.ANTIALIAS)
output = io.BytesIO()
img.save(output, format='JPEG')
output.seek(0)
tarinfo = tarfile.TarInfo(name=file_name)
tarinfo.size = len(output.getvalue())
next_file.addfile(tarinfo, fileobj=output)
def process_val_files(val_tar_file, val_label_file, num_chunks):
val_file = tarfile.open(val_tar_file)
chunks = split_label_file(val_label_file, num_chunks)
for i, chunk in enumerate(chunks):
output = io.BytesIO() # process validation files in memory
next_file = tarfile.open(mode= "w", fileobj=output)
for file_name, label in chunk:
imgfile = val_file.extractfile(file_name)
resize_and_add_image(next_file, file_name, imgfile, new_size=new_image_size)
output.seek(0)
upload_file("ILSVRC2012_img_val/val." + str(i).zfill(3) + ".tar", output)
def build_index(train_tar_file):
index = dict()
filehandles = []
train_file = tarfile.open(train_tar_file)
for member in train_file.getmembers():
subtar = tarfile.open(fileobj=train_file.extractfile(member.name))
filehandles.append(subtar)
current_member = subtar.next()
while current_member is not None:
offset = current_member.offset
filename = current_member.name
current_member = subtar.next()
index[filename] = (subtar, offset)
return index, filehandles
def process_train_files(train_tar_file, train_label_file, num_chunks):
chunks = split_label_file(train_label_file, num_chunks)
index, filehandles = build_index(train_tar_file)
for i, chunk in enumerate(chunks):
output = io.BytesIO() # process training files in memory
next_file = tarfile.open(mode="w", fileobj=output)
for file_name, label in chunk:
(folder, img_name) = file_name.split('/')
(file_handle, offset) = index[img_name]
file_handle.offset = offset
imgfile = file_handle.extractfile(file_handle.next())
resize_and_add_image(next_file, img_name, imgfile, new_size=new_image_size)
output.seek(0)
upload_file("ILSVRC2012_img_train/train." + str(i).zfill(5) + ".tar", output)
for handle in filehandles:
handle.close()
if __name__ == "__main__":
upload_file("train.txt", train_label_file.read())
train_label_file.seek(0) # make it possible to read from this file again
upload_file("val.txt", val_label_file.read())
val_label_file.seek(0) # make it possible to read from this file again
if args.train_tar_file is not None:
process_train_files(args.train_tar_file, train_label_file, 1000)
if args.val_tar_file is not None:
process_val_files(args.val_tar_file, val_label_file, 50)
| 5,062 | 42.273504 | 110 | py |
neat-ml | neat-ml-main/neat_ml/link_prediction/mlp_model.py | """MLP model."""
import os
import pickle
from warnings import warn
try:
import tensorflow as tf # type: ignore
HAVE_TF = True
except ModuleNotFoundError:
print("Tensorflow not found. MLP model compilation may fail!")
HAVE_TF = False
from .model import Model
class MLPModel(Model):
"""MLP model class."""
def __init__(self, config, outdir: str = None) -> None:
"""Make an MLP model.
:param config: The classifier config
:param outdir: Output path., defaults to None
:return: The model
"""
super().__init__(outdir=outdir)
self.config = config
model_type = config["classifier_type"]
if not HAVE_TF:
warn(
"Tensorflow not available - specified method \
may not be accessible!"
)
model_class = self.dynamically_import_class(model_type)
model_layers = []
for layer in config["parameters"]["tf_keras_params"]["layers_config"][
"layers"
]:
layer_type = layer["type"]
layer_class = self.dynamically_import_class(layer_type)
parameters = layer["parameters"]
layer_instance = layer_class(**parameters) # type: ignore
model_layers.append(layer_instance)
model_instance = model_class() # type: ignore
for one_layer in model_layers:
model_instance.add(one_layer)
self.model = model_instance
def compile(self):
"""Compile model."""
model_compile_parameters = self.config["parameters"]["tf_keras_params"]
metrics = (
model_compile_parameters["metrics_config"]["metrics"]
if "metrics_config" in model_compile_parameters
else None
)
metrics_class_list = []
for m in metrics:
if m["type"].startswith("tensorflow.keras"):
m_class = self.dynamically_import_class(m["type"])
m_parameters = {}
if "name" in m:
m_parameters["name"] = m["name"]
if "curve" in m:
m_parameters["curve"] = m["curve"]
m_instance = m_class(**m_parameters)
metrics_class_list.append(m_instance)
else:
metrics_class_list.append([m["type"]])
self.model.compile(
loss=model_compile_parameters["loss"],
optimizer=model_compile_parameters["optimizer"],
metrics=metrics_class_list,
)
def fit(self, train_data, train_labels):
"""Take a model, generated from 'make_model' and call 'fit'.
:param train_data: Training data for fitting.
:param train_labels: Validation data for fitting.
:return: The model object
"""
try:
classifier_params = self.config["parameters"]["tf_keras_params"][
"fit_config"
]
except KeyError:
classifier_params = {}
callback_list = []
if "callbacks_list" in classifier_params:
for callback in classifier_params["callbacks_list"]["callbacks"]:
c_class = self.dynamically_import_class(callback["type"])
c_params = (
callback["parameters"] if "parameters" in callback else {}
)
c_instance = c_class(**c_params)
callback_list.append(c_instance)
del classifier_params["callbacks"]
history = self.model.fit(
train_data,
train_labels,
**classifier_params,
callbacks=callback_list
)
return history
def save(self) -> None:
"""Save model."""
self.model.save(os.path.join(self.outdir, self.config["outfile"]))
fn, ext = os.path.splitext(self.config["outfile"])
model_outfile = fn + "_custom" + ext
with open(os.path.join(self.outdir, model_outfile), "wb") as f:
pickle.dump(self, f)
def load(self, path: str) -> tuple(): # type: ignore
"""Load model."""
if not HAVE_TF:
warn("Tensorflow not available - cannot load model.")
return ()
fn, ext = os.path.splitext(path)
custom_model_filename = fn + "_custom" + ext
generic_model_object = tf.keras.models.load_model(path)
with open(custom_model_filename, "rb") as mf2:
custom_model_object = pickle.load(mf2)
return generic_model_object, custom_model_object
| 4,566 | 33.338346 | 79 | py |
neat-ml | neat-ml-main/tests/test_link_prediction.py | """Test link prediction."""
import os
import pathlib
from unittest import TestCase
import numpy as np
import pandas as pd
from grape import Graph
try:
from keras.engine.sequential import Sequential
HAVE_KERAS = True
except ModuleNotFoundError:
print("Keras not found - will not test related functions.")
HAVE_KERAS = False
from neat_ml.link_prediction.grape_model import GrapeModel
from neat_ml.link_prediction.mlp_model import MLPModel
from neat_ml.link_prediction.sklearn_model import SklearnModel
from neat_ml.run_classifier.run_classifier import get_custom_model_path
from neat_ml.yaml_helper.yaml_helper import YamlHelper
class TestLinkPrediction(TestCase):
"""Test link prediction."""
@classmethod
def setUpClass(cls) -> None:
"""Set up."""
pass
def setUp(self) -> None:
"""Set up."""
self.yaml_file = "tests/resources/test.yaml"
self.embed_file = "tests/resources/test_link_prediction/test_embeddings_test_yaml.csv" # noqa E501
self.embed_snippet_file = "tests/resources/test_link_prediction/test_embeddings_test_yaml_SNIPPET.csv" # noqa E501
self.yhelp = YamlHelper(self.yaml_file)
self.test_model_path = "tests/resources/test_output_data_dir/"
self.test_load_path = "tests/resources/test_link_prediction/"
self.sklearn_model = SklearnModel(
(self.yhelp.classifiers())[0], self.test_model_path
)
self.tf_model = MLPModel(
(self.yhelp.classifiers())[1], self.test_model_path
)
self.grape_model = GrapeModel(
(self.yhelp.classifiers())[2], self.test_model_path
)
self.sklearn_outfile = ((self.yhelp.classifiers())[0])["outfile"]
self.generic_tf_outfile = ((self.yhelp.classifiers())[1])["outfile"]
self.custom_tf_outfile = get_custom_model_path(self.generic_tf_outfile)
self.grape_outfile = ((self.yhelp.classifiers())[2])["outfile"]
self.training_graph_args = {
"directed": False,
"node_path": "tests/resources/test_graphs/pos_train_nodes.tsv",
"edge_path": "tests/resources/test_graphs/pos_train_edges.tsv",
"verbose": True,
"nodes_column": "id",
"node_list_node_types_column": "category",
"default_node_type": "biolink:NamedThing",
"sources_column": "subject",
"destinations_column": "object",
"default_edge_type": "biolink:related_to",
}
def assert_is_file(self, path):
"""Assert if path is a file."""
if not pathlib.Path(path).resolve().is_file():
raise AssertionError("File does not exist: %s" % str(path))
def test_sklearn_save(self) -> None:
"""Test saving model using sklearn."""
model_object = self.sklearn_model
# Need to have a fitted model here
embed_contents = pd.read_csv(
self.embed_snippet_file, index_col=0, header=None
)
dummy_labels = np.random.randint(
0, high=2, size=(embed_contents.shape[0],), dtype=np.bool
)
model_object.fit(embed_contents, dummy_labels)
model_object.save()
self.assert_is_file(
os.path.join(self.test_model_path, self.sklearn_outfile)
)
def test_tf_save(self) -> None:
"""Test saving model in tensorflow."""
model_object = self.tf_model
model_object.save()
self.assert_is_file(
os.path.join(self.test_model_path, self.generic_tf_outfile)
)
self.assert_is_file(
os.path.join(self.test_model_path, self.custom_tf_outfile)
)
# Note that the load tests *do not* use the files created by
# the save tests above, so they may remain independent.
def test_sklearn_load(self) -> None:
"""Test sklearn loading."""
out_fn = os.path.join(self.test_load_path, self.sklearn_outfile)
model_object = self.sklearn_model.load(out_fn)
self.assertEqual(type(model_object), SklearnModel)
def test_sklearn_fit(self) -> None:
"""Test sklearn fitting."""
model_object = self.sklearn_model
result, _ = model_object.make_train_valid_data(
embedding_file=self.embed_file,
training_graph_args=self.training_graph_args,
edge_method="Average",
)
fit_out = model_object.fit(*result)
self.assertEqual(str(fit_out), "LogisticRegression()")
def test_tf_load(self) -> None:
"""Test tensorflow load."""
out_fn = os.path.join(self.test_load_path, self.generic_tf_outfile)
(
generic_model_object,
customized_model_object,
) = self.tf_model.load(out_fn)
if HAVE_KERAS:
self.assertEqual(type(generic_model_object), Sequential)
self.assertEqual(type(customized_model_object), MLPModel)
def test_sklearn_make_link_prediction_data(self) -> None:
"""Test sklearn link predication data generation."""
model_object = self.sklearn_model
result = model_object.make_train_valid_data(
embedding_file=self.embed_file,
training_graph_args=self.training_graph_args,
edge_method="Average",
)
# result contains tuple of tuples of 2-dim arrays
self.assertEqual(result[0][0].ndim, 2)
def test_grape_fit(self) -> None:
"""Test grape's Ensmallen model fitting."""
model_object = self.grape_model
graph_in = Graph.from_csv(**self.training_graph_args)
model_object.fit(graph_in)
self.assertTrue(model_object.is_fit)
output = model_object.predict_proba(graph_in)
self.assertGreaterEqual(len(output), 470000)
| 5,797 | 34.570552 | 123 | py |
neat-ml | neat-ml-main/input_data/analyze_GO_pos_neg_edges.py | from ensmallen import Graph # type: ignore
from embiggen import GraphTransformer # type: ignore
import numpy as np
import pandas as pd
import yaml
import os
import tensorflow as tf
go_yaml_file = "go.yaml"
os.chdir("..") # paths to files are from root dir
with open(go_yaml_file, 'r') as stream:
go_yaml = yaml.load(stream, Loader=yaml.FullLoader)
mlp_file = [c for c in go_yaml['classifier']['classifiers'] if c['type'] == 'neural network'][0]['model']['outfile']
mlp = tf.keras.models.load_model(os.path.join("output_data", mlp_file))
node_data = pd.read_csv('input_data/go_nodes.tsv', sep='\t')
node_data = node_data.filter(['id', 'name'])
#
# positive validation edges
#
pos_graph_args = go_yaml['graph_data']['graph']
pos_graph_args['directed'] = True
pos_graph_args['edge_path'] = go_yaml['graph_data']['pos_validation']['edge_path']
pos_validation_graph = Graph.from_unsorted_csv(**pos_graph_args)
pos_edges = list(zip(pos_validation_graph.get_source_names(),
pos_validation_graph.get_destination_names()))
pos_edge_transform = GraphTransformer(go_yaml['classifier']['edge_method'])
pos_edge_transform.fit(
np.load(os.path.join("output_data", go_yaml['embeddings']['embedding_file_name'])))
pos_edges_to_eval_emb = pos_edge_transform.transform(pos_validation_graph)
pos_valid_predict = mlp.predict(pos_edges_to_eval_emb, batch_size=1048)
pos_valid_predict_sorted = pd.DataFrame({
"pred": pos_valid_predict.flatten(),
"subject": [t[0] for t in pos_edges],
"object": [t[1] for t in pos_edges]
}).sort_values(by=["pred"], ascending=True)
# add GO CURIE names
pos_valid_predict_sorted = \
pos_valid_predict_sorted.\
merge(how='left',
right=node_data,
left_on='subject',
right_on='id').drop('id', axis=1).\
merge(how='left',
right=node_data,
left_on='object',
right_on='id').drop('id', axis=1)
pos_valid_predict_sorted = \
pos_valid_predict_sorted.rename(columns={'name_x': 'subject_name',
'name_y': 'object_name'})
pos_valid_predict_sorted.to_csv(os.path.join("output_data", "pos_sco_edges.tsv"),
sep='\t', index=False)
#
# negative validation edges
#
neg_graph_args = go_yaml['graph_data']['graph']
neg_graph_args['directed'] = True
neg_graph_args['edge_path'] = go_yaml['graph_data']['neg_validation']['edge_path']
neg_validation_graph = EnsmallenGraph.from_unsorted_csv(**neg_graph_args)
neg_edges = list(zip(neg_validation_graph.get_source_names(),
neg_validation_graph.get_destination_names()))
neg_edge_transform = GraphTransformer(go_yaml['classifier']['edge_method'])
neg_edge_transform.fit(
np.load(os.path.join("output_data", go_yaml['embeddings']['embedding_file_name'])))
neg_edges_to_eval_emb = neg_edge_transform.transform(neg_validation_graph)
neg_valid_predict = mlp.predict(neg_edges_to_eval_emb, batch_size=1048)
neg_valid_predict_sorted = pd.DataFrame({
"pred": neg_valid_predict.flatten(),
"subject": [t[0] for t in neg_edges],
"object": [t[1] for t in neg_edges]
}).sort_values(by=["pred"], ascending=False)
neg_valid_predict_sorted = \
neg_valid_predict_sorted.\
merge(how='left',
right=node_data,
left_on='subject',
right_on='id').drop('id', axis=1).\
merge(how='left',
right=node_data,
left_on='object',
right_on='id').drop('id', axis=1)
neg_valid_predict_sorted = \
neg_valid_predict_sorted.rename(columns={'name_x': 'subject_name',
'name_y': 'object_name'})
neg_valid_predict_sorted.to_csv(os.path.join("output_data", "neg_sco_edges.tsv"),
sep='\t', index=False)
| 3,854 | 38.336735 | 116 | py |
simulacra-aesthetic-models | simulacra-aesthetic-models-master/simulacra_compute_embeddings.py | #!/usr/bin/env python3
"""Precomputes CLIP embeddings for Simulacra Aesthetic Captions."""
import argparse
import os
from pathlib import Path
import sqlite3
from PIL import Image
import torch
from torch import multiprocessing as mp
from torch.utils import data
import torchvision.transforms as transforms
from tqdm import tqdm
from CLIP import clip
class SimulacraDataset(data.Dataset):
"""Simulacra dataset
Args:
images_dir: directory
transform: preprocessing and augmentation of the training images
"""
def __init__(self, images_dir, db, transform=None):
self.images_dir = Path(images_dir)
self.transform = transform
self.conn = sqlite3.connect(db)
self.ratings = []
for row in self.conn.execute('SELECT generations.id, images.idx, paths.path, AVG(ratings.rating) FROM images JOIN generations ON images.gid=generations.id JOIN ratings ON images.id=ratings.iid JOIN paths ON images.id=paths.iid GROUP BY images.id'):
self.ratings.append(row)
def __len__(self):
return len(self.ratings)
def __getitem__(self, key):
gid, idx, filename, rating = self.ratings[key]
image = Image.open(self.images_dir / filename).convert('RGB')
if self.transform:
image = self.transform(image)
return image, torch.tensor(rating)
def main():
p = argparse.ArgumentParser(description=__doc__)
p.add_argument('--batch-size', '-bs', type=int, default=10,
help='the CLIP model')
p.add_argument('--clip-model', type=str, default='ViT-B/16',
help='the CLIP model')
p.add_argument('--db', type=str, required=True,
help='the database location')
p.add_argument('--device', type=str,
help='the device to use')
p.add_argument('--images-dir', type=str, required=True,
help='the dataset images directory')
p.add_argument('--num-workers', type=int, default=8,
help='the number of data loader workers')
p.add_argument('--output', type=str, required=True,
help='the output file')
p.add_argument('--start-method', type=str, default='spawn',
choices=['fork', 'forkserver', 'spawn'],
help='the multiprocessing start method')
args = p.parse_args()
mp.set_start_method(args.start_method)
if args.device:
device = torch.device(device)
else:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device:', device)
clip_model, clip_tf = clip.load(args.clip_model, device=device, jit=False)
clip_model = clip_model.eval().requires_grad_(False)
dataset = SimulacraDataset(args.images_dir, args.db, transform=clip_tf)
loader = data.DataLoader(dataset, args.batch_size, num_workers=args.num_workers)
embeds, ratings = [], []
for batch in tqdm(loader):
images_batch, ratings_batch = batch
embeds.append(clip_model.encode_image(images_batch.to(device)).cpu())
ratings.append(ratings_batch.clone())
obj = {'clip_model': args.clip_model,
'embeds': torch.cat(embeds),
'ratings': torch.cat(ratings)}
torch.save(obj, args.output)
if __name__ == '__main__':
main()
| 3,327 | 33.309278 | 256 | py |
simulacra-aesthetic-models | simulacra-aesthetic-models-master/rank_images.py | import os
from argparse import ArgumentParser
from tqdm import tqdm
from PIL import Image
from torch.nn import functional as F
from torchvision import transforms
from torchvision.transforms import functional as TF
import torch
from simulacra_fit_linear_model import AestheticMeanPredictionLinearModel
from CLIP import clip
parser = ArgumentParser()
parser.add_argument("directory")
parser.add_argument("-t", "--top-n", default=50)
args = parser.parse_args()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
clip_model_name = 'ViT-B/16'
clip_model = clip.load(clip_model_name, jit=False, device=device)[0]
clip_model.eval().requires_grad_(False)
normalize = transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073],
std=[0.26862954, 0.26130258, 0.27577711])
# 512 is embed dimension for ViT-B/16 CLIP
model = AestheticMeanPredictionLinearModel(512)
model.load_state_dict(
torch.load("models/sac_public_2022_06_29_vit_b_16_linear.pth")
)
model = model.to(device)
def get_filepaths(parentpath, filepaths):
paths = []
for path in filepaths:
try:
new_parent = os.path.join(parentpath, path)
paths += get_filepaths(new_parent, os.listdir(new_parent))
except NotADirectoryError:
paths.append(os.path.join(parentpath, path))
return paths
filepaths = get_filepaths(args.directory, os.listdir(args.directory))
scores = []
for path in tqdm(filepaths):
# This is obviously a flawed way to check for an image but this is just
# a demo script anyway.
if path[-4:] not in (".png", ".jpg"):
continue
img = Image.open(path).convert('RGB')
img = TF.resize(img, 224, transforms.InterpolationMode.LANCZOS)
img = TF.center_crop(img, (224,224))
img = TF.to_tensor(img).to(device)
img = normalize(img)
clip_image_embed = F.normalize(
clip_model.encode_image(img[None, ...]).float(),
dim=-1)
score = model(clip_image_embed)
if len(scores) < args.top_n:
scores.append((score.item(),path))
scores.sort()
else:
if scores[0][0] < score:
scores.append((score.item(),path))
scores.sort(key=lambda x: x[0])
scores = scores[1:]
for score, path in scores:
print(f"{score}: {path}")
| 2,334 | 32.357143 | 75 | py |
simulacra-aesthetic-models | simulacra-aesthetic-models-master/simulacra_fit_linear_model.py | #!/usr/bin/env python3
"""Fits a linear aesthetic model to precomputed CLIP embeddings."""
import argparse
import numpy as np
from sklearn.linear_model import Ridge
from sklearn.model_selection import train_test_split
import torch
from torch import nn
from torch.nn import functional as F
class AestheticMeanPredictionLinearModel(nn.Module):
def __init__(self, feats_in):
super().__init__()
self.linear = nn.Linear(feats_in, 1)
def forward(self, input):
x = F.normalize(input, dim=-1) * input.shape[-1] ** 0.5
return self.linear(x)
def main():
p = argparse.ArgumentParser(description=__doc__)
p.add_argument('input', type=str, help='the input feature vectors')
p.add_argument('output', type=str, help='the output model')
p.add_argument('--val-size', type=float, default=0.1, help='the validation set size')
p.add_argument('--seed', type=int, default=0, help='the random seed')
args = p.parse_args()
train_set = torch.load(args.input, map_location='cpu')
X = F.normalize(train_set['embeds'].float(), dim=-1).numpy()
X *= X.shape[-1] ** 0.5
y = train_set['ratings'].numpy()
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=args.val_size, random_state=args.seed)
regression = Ridge()
regression.fit(X_train, y_train)
score_train = regression.score(X_train, y_train)
score_val = regression.score(X_val, y_val)
print(f'Score on train: {score_train:g}')
print(f'Score on val: {score_val:g}')
model = AestheticMeanPredictionLinearModel(X_train.shape[1])
with torch.no_grad():
model.linear.weight.copy_(torch.tensor(regression.coef_))
model.linear.bias.copy_(torch.tensor(regression.intercept_))
torch.save(model.state_dict(), args.output)
if __name__ == '__main__':
main()
| 1,834 | 33.622642 | 108 | py |
oilmm | oilmm-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# oilmm documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'oilmm'
copyright = u'2019, Wessel Bruinsma'
author = u'Wessel Bruinsma'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'oilmm.tex', u'OILMM Documentation',
u'Wessel Bruinsma', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'oilmm', u'OILMM Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'oilmm', u'OILMM Documentation',
author, 'oilmm', 'Implementation of the Orthogonal Linear Mixing Model',
'Miscellaneous'),
]
| 5,270 | 30.562874 | 79 | py |
oilmm | oilmm-master/experiments/temperature_igp.py | import lab.torch as B
import numpy as np
import torch
import wbml.plot
from stheno import Matern52
from varz import Vars
from varz.torch import minimise_l_bfgs_b
from wbml.data.cmip5 import load
from wbml.experiment import WorkingDirectory
from oilmm import IGP, Normaliser
if __name__ == "__main__":
B.epsilon = 1e-8
wbml.out.report_time = True
wd = WorkingDirectory("_experiments", "temperature_igp")
loc, temp, _ = load()
# Smooth and subsample temperature data.
temp = temp.rolling(window=31, center=True, min_periods=1, win_type="hamming")
temp = temp.mean().iloc[::31, :]
# Create train and test splits
x = np.array([(day - temp.index[0]).days for day in temp.index])
y = np.array(temp)
# Divide into training and test set.
x_train = x[:250]
y_train = y[:250]
x_test = x[250:350]
y_test = y[250:350]
# Perform normalisation.
normaliser = Normaliser(y_train)
y_train_norm = normaliser.normalise(y_train)
# Determine initialisation of spatial length scales.
scales_init = np.maximum(0.2 * np.array(loc.max() - loc.min()), 1)
# Convert to PyTorch.
loc = torch.tensor(np.array(loc))
p = B.shape(y)[1]
m = p
vs = Vars(torch.float64)
def construct_model(vs):
kernels = [
vs.pos(0.5, name=f"{i}/k_var")
* Matern52().stretch(vs.bnd(2 * 30, name=f"{i}/k_scale"))
+ vs.pos(0.5, name=f"{i}/k_per_var")
* (Matern52().stretch(vs.bnd(1.0, name=f"{i}/k_per_scale")).periodic(365))
for i in range(m)
]
noises = vs.pos(1e-2 * B.ones(m), name="noises")
return IGP(kernels, noises)
def objective(vs):
return -construct_model(vs).logpdf(
torch.tensor(x_train), torch.tensor(y_train_norm)
)
minimise_l_bfgs_b(objective, vs, trace=True, iters=1000)
# Print variables.
vs.print()
# Predict.
model = construct_model(vs)
model = model.condition(torch.tensor(x_train), torch.tensor(y_train_norm))
means, lowers, uppers = B.to_numpy(model.predict(x_test))
# Compute RMSE.
wbml.out.kv("RMSE", B.mean((normaliser.unnormalise(means) - y_test) ** 2) ** 0.5)
# Compute LML.
lml = -objective(vs) + normaliser.normalise_logdet(y_train)
wbml.out.kv("LML", lml / B.length(y_train))
# Compute PPLP.
logprob = model.logpdf(
torch.tensor(x_test), torch.tensor(normaliser.normalise(y_test))
)
logdet = normaliser.normalise_logdet(y_test)
pplp = logprob + logdet
wbml.out.kv("PPLP", pplp / B.length(y_test))
| 2,601 | 27.911111 | 86 | py |
oilmm | oilmm-master/experiments/timing.py | import time
import lab.torch as B
import numpy as np
import torch
import wbml.plot
from matrix import Dense, Diagonal
from oilmm import OILMM
from stheno import Matern52
from varz import Vars
from wbml.data.cmip5 import load
from wbml.experiment import WorkingDirectory
if __name__ == "__main__":
B.epsilon = 1e-8
wd = WorkingDirectory("_experiments", "timing")
loc, temp, _ = load()
# Smooth and subsample temperature data.
temp = temp.rolling(window=31, center=True, min_periods=1, win_type="hamming")
temp = temp.mean().iloc[::31, :]
x = np.array([(day - temp.index[0]).days for day in temp.index])
y = np.array(temp)
p = B.shape(y)[1]
def construct_model(vs, m):
kernels = [
vs.pos(0.5, name=f"{i}/k_var")
* Matern52().stretch(vs.bnd(2 * 30, name=f"{i}/k_scale"))
+ vs.pos(0.5, name=f"{i}/k_per_var")
* (Matern52().stretch(vs.bnd(1.0, name=f"{i}/k_per_scale")).periodic(365))
for i in range(m)
]
noise = vs.pos(1e-2, name="noise")
latent_noises = vs.pos(1e-2 * B.ones(m), name="latent_noises")
# Construct orthogonal matrix and time it.
time_h_start = time.time()
u = Dense(vs.orth(shape=(p, m), name="u"))
s_sqrt = Diagonal(vs.pos(shape=(m,), name="s_sqrt"))
dur_h = time.time() - time_h_start
return OILMM(kernels, u, s_sqrt, noise, latent_noises), dur_h
ns = [100, 200, 300]
ms = [5, 25, 50, 75, 100, 125, 150, 175, 200, 225, 247]
for n in ns:
for m in ms:
hs = []
totals = []
percs = []
vs = Vars(torch.float64)
for _ in range(21):
time_total_start = time.time()
model, dur_h = construct_model(vs, m)
model.logpdf(torch.tensor(x[:n]), torch.tensor(y[:n]))
dur_total = time.time() - time_total_start
totals.append(dur_total)
hs.append(dur_h)
percs.append(dur_h / dur_total * 100)
# Discard the first run.
percs = percs[1:]
wbml.out.kv("n", n)
wbml.out.kv("m", m)
with wbml.out.Section("Total"):
wbml.out.kv("Mean", np.mean(totals))
wbml.out.kv("Error", 2 * np.std(totals) / np.sqrt(len(totals)))
with wbml.out.Section("H"):
wbml.out.kv("Mean", np.mean(hs))
wbml.out.kv("Error", 2 * np.std(hs) / np.sqrt(len(hs)))
with wbml.out.Section("Percentage"):
wbml.out.kv("Mean", np.mean(percs))
wbml.out.kv("Error", 2 * np.std(percs) / np.sqrt(len(percs)))
| 2,722 | 32.207317 | 86 | py |
oilmm | oilmm-master/experiments/simulators.py | import argparse
import lab.torch as B
import numpy as np
import torch
import wbml.plot
from matrix import Dense, Diagonal, Kronecker
from oilmm import OILMM, Normaliser
from stheno.torch import Matern52 as Mat52
from varz import Vars
from varz.torch import minimise_l_bfgs_b
from wbml.data.cmip5 import load
from wbml.experiment import WorkingDirectory
if __name__ == "__main__":
# Parse script arguments.
parser = argparse.ArgumentParser()
parser.add_argument(
"-i", type=int, default=1_000, help="Number of optimisation iterations."
)
parser.add_argument("-n", type=int, default=10_000, help="Number of time points.")
parser.add_argument(
"-mr", type=int, default=10, help="Number of latent processes for space."
)
parser.add_argument(
"-ms", type=int, default=5, help="Number of latent processes for simulators."
)
parser.add_argument(
"--separable", action="store_true", help="Use a separable model."
)
args = parser.parse_args()
# Determine suffix.
if args.separable:
suffix = "_separable"
else:
suffix = ""
B.epsilon = 1e-8
wbml.out.report_time = True
wd = WorkingDirectory("_experiments", "simulators", log=f"log{suffix}.txt")
# Load data.
loc, temp, sims = load()
sims = {k: v for k, v in list(sims.items())}
x_data = np.array([(day - temp.index[0]).days for day in temp.index[: args.n]])
y_data = np.concatenate([sim.to_numpy()[: args.n] for sim in sims.values()], axis=1)
wbml.out.out("Data loaded")
# Normalise training data.
normaliser = Normaliser(y_data)
y_data = normaliser.normalise(y_data)
# Determine initialisation of spatial length scales.
scales_init = 0.5 * np.array(loc.max() - loc.min())
# Convert to PyTorch.
loc = torch.tensor(np.array(loc), dtype=torch.float64)
x_data = torch.tensor(x_data, dtype=torch.float64)
y_data = torch.tensor(y_data, dtype=torch.float64)
# Determine number of latent processes.
m_r = args.mr
m_s = args.ms
m = m_r * m_s
# Determine number of outputs.
p_s = len(sims)
p_r = loc.shape[0]
p = p_s * p_r
# Compute inducing point locations, assuming that inputs are time.
n_ind = int(args.n / 60) # One inducing point per two months.
x_ind_init = B.linspace(x_data.min(), x_data.max(), n_ind)
# Determine initialisation for covariance between sims.
rho = 0.5
u, s, _ = B.svd((1 - rho) * B.eye(p_s) + rho * B.ones(p_s, p_s))
u_s_init = u[:, :m_s]
s_sqrt_s_init = B.sqrt(s[:m_s])
vs = Vars(torch.float64)
def construct_model(vs):
if args.separable:
# Copy same kernel `m` times.
kernel = [Mat52().stretch(vs.bnd(6 * 30, lower=60, name="k_scale"))]
kernels = kernel * m
else:
# Parametrise different kernels.
kernels = [
Mat52().stretch(vs.bnd(6 * 30, lower=60, name=f"{i}/k_scale"))
for i in range(m)
]
noise = vs.bnd(1e-2, name="noise")
latent_noises = vs.bnd(1e-2 * B.ones(m), name="latent_noises")
# Construct component of the mixing matrix over simulators.
u = vs.orth(init=u_s_init, shape=(p_s, m_s), name="sims/u")
s_sqrt = vs.bnd(init=s_sqrt_s_init, shape=(m_s,), name="sims/s_sqrt")
u_s = Dense(u)
s_sqrt_s = Diagonal(s_sqrt)
# Construct components of the mixing matrix over space from a
# covariance.
scales = vs.bnd(init=scales_init, name="space/scales")
k = Mat52().stretch(scales)
u, s, _ = B.svd(B.dense(k(loc)))
u_r = Dense(u[:, :m_r])
s_sqrt_r = Diagonal(B.sqrt(s[:m_r]))
# Compose.
s_sqrt = Kronecker(s_sqrt_s, s_sqrt_r)
u = Kronecker(u_s, u_r)
return OILMM(kernels, u, s_sqrt, noise, latent_noises)
def objective(vs):
x_ind = vs.unbounded(x_ind_init, name="x_ind")
return -construct_model(vs).logpdf(x_data, y_data, x_ind=x_ind)
minimise_l_bfgs_b(objective, vs, trace=True, iters=args.i)
# Print variables.
vs.print()
def cov_to_corr(k):
std = B.sqrt(B.diag(k))
return k / std[:, None] / std[None, :]
# Compute correlations between simulators.
u = Dense(vs["sims/u"])
s_sqrt = Diagonal(vs["sims/s_sqrt"])
k = u @ s_sqrt @ s_sqrt @ u.T
std = B.sqrt(B.diag(k))
corr_learned = cov_to_corr(k)
# Compute empirical correlations.
all_obs = np.concatenate(
[sim.to_numpy()[: args.n].reshape(-1, 1) for sim in sims.values()], axis=1
)
corr_empirical = cov_to_corr(np.cov(all_obs.T))
# Compute predictions for latent processes.
model = construct_model(vs)
model = model.condition(x_data, y_data, x_ind=vs["x_ind"])
x_proj, y_proj, _, _ = model.project(x_data, y_data)
means, lowers, uppers = model.model.predict(x_proj)
# Save for processing.
wd.save(
B.to_numpy(
{
"n": args.n,
"m": m,
"p": p,
"m_r": m_r,
"m_s": m_s,
"x_proj": x_proj,
"y_proj": y_proj,
"means": means,
"lowers": lowers,
"uppers": uppers,
"learned_parameters": {name: vs[name] for name in vs.names},
"corr_learned": corr_learned,
"corr_empirical": corr_empirical,
}
),
f"results_mr{m_r}_ms{m_s}{suffix}.pickle",
)
| 5,569 | 31.011494 | 88 | py |
oilmm | oilmm-master/experiments/temperature.py | import argparse
import lab.torch as B
import numpy as np
import torch
import wbml.plot
from matrix import Dense, Diagonal
from oilmm import OILMM, Normaliser
from stheno import Matern52
from varz import Vars
from varz.torch import minimise_l_bfgs_b
from wbml.data.cmip5 import load
from wbml.experiment import WorkingDirectory
if __name__ == "__main__":
# Parse arguments of script.
parser = argparse.ArgumentParser()
parser.add_argument("-m", type=int, default=13 * 19)
args = parser.parse_args()
B.epsilon = 1e-6
wbml.out.report_time = True
wd = WorkingDirectory("_experiments", f"temperature_{args.m}")
loc, temp, _ = load()
# Smooth and subsample temperature data.
temp = temp.rolling(window=31, center=True, min_periods=1, win_type="hamming")
temp = temp.mean().iloc[::31, :]
# Create train and test splits
x = np.array([(day - temp.index[0]).days for day in temp.index])
y = np.array(temp)
# Divide into training and test set.
x_train = x[:250]
y_train = y[:250]
x_test = x[250:350]
y_test = y[250:350]
# Perform normalisation.
normaliser = Normaliser(y_train)
y_train_norm = normaliser.normalise(y_train)
# Determine initialisation of spatial length scales.
scales_init = np.maximum(0.2 * np.array(loc.max() - loc.min()), 1)
# Convert to PyTorch.
loc = torch.tensor(np.array(loc))
p = B.shape(y)[1]
m = args.m
vs = Vars(torch.float64)
def construct_model(vs):
kernels = [
vs.pos(0.5, name=f"{i}/k_var")
* Matern52().stretch(vs.bnd(2 * 30, name=f"{i}/k_scale"))
+ vs.pos(0.5, name=f"{i}/k_per_var")
* (Matern52().stretch(vs.bnd(1.0, name=f"{i}/k_per_scale")).periodic(365))
for i in range(m)
]
latent_noises = vs.pos(1e-2 * B.ones(m), name="latent_noises")
noise = vs.pos(1e-2, name="noise")
# Construct components of mixing matrix from a covariance over
# outputs.
variance = vs.pos(1, name="h/variance")
scales = vs.pos(init=scales_init, name="h/scales")
k = variance * Matern52().stretch(scales)
u, s, _ = B.svd(B.dense(B.reg(k(loc))))
u = Dense(u[:, :m])
s_sqrt = Diagonal(B.sqrt(s[:m]))
return OILMM(kernels, u, s_sqrt, noise, latent_noises)
def objective(vs):
return -construct_model(vs).logpdf(
torch.tensor(x_train), torch.tensor(y_train_norm)
)
# Perform optimisation.
minimise_l_bfgs_b(objective, vs, trace=True, iters=1000)
# Print variables.
vs.print()
# Predict.
model = construct_model(vs)
model = model.condition(torch.tensor(x_train), torch.tensor(y_train_norm))
means, lowers, uppers = B.to_numpy(model.predict(x_test))
# Compute RMSE.
wbml.out.kv("RMSE", B.mean((normaliser.unnormalise(means) - y_test) ** 2) ** 0.5)
# Compute LML.
lml = -objective(vs) + normaliser.normalise_logdet(y_train)
wbml.out.kv("LML", lml / B.length(y_train))
# Compute PPLP.
logprob = model.logpdf(
torch.tensor(x_test), torch.tensor(normaliser.normalise(y_test))
)
logdet = normaliser.normalise_logdet(y_test)
pplp = logprob + logdet
wbml.out.kv("PPLP", pplp / B.length(y_test))
| 3,302 | 29.869159 | 86 | py |
oilmm | oilmm-master/oilmm/jax.py | # noinspection PyUnresolvedReferences
import stheno.jax
# noinspection PyUnresolvedReferences
from . import *
| 111 | 17.666667 | 37 | py |
oilmm | oilmm-master/oilmm/torch.py | # noinspection PyUnresolvedReferences
import stheno.torch
# noinspection PyUnresolvedReferences
from . import *
| 113 | 18 | 37 | py |
stellargraph | stellargraph-master/stellargraph/losses.py | # -*- coding: utf-8 -*-
#
# Copyright 2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
"graph_log_likelihood",
"SelfAdversarialNegativeSampling",
]
import tensorflow as tf
from .core.experimental import experimental
@experimental(reason="lack of unit tests", issues=[804])
def graph_log_likelihood(batch_adj, wys_output):
"""
Computes the graph log likelihood loss function as in https://arxiv.org/abs/1710.09599.
This is different to most Keras loss functions in that it doesn't directly compare predicted values to expected
values. It uses `wys_output` which contains the dot products of embeddings and expected random walks,
and part of the adjacency matrix `batch_adj` to calculate how well the node embeddings capture the graph
structure in some sense.
.. seealso: The :class:`.WatchYourStep` model, for which this loss function is designed.
Args:
batch_adj: tensor with shape ``batch_rows x 1 x num_nodes`` containing rows of the adjacency matrix
wys_output: tensor with shape ``batch_rows x 2 x num_nodes`` containing the embedding outer product
scores with shape ``batch_rows x 1 x num_nodes`` and attentive expected random walk
with shape ``batch_rows x 1, num_nodes`` concatenated.
Returns:
the graph log likelihood loss for the batch
"""
expected_walks = tf.gather(wys_output, [0], axis=1)
scores = tf.gather(wys_output, [1], axis=1)
adj_mask = tf.cast((batch_adj == 0), "float32")
log_sigmoid = tf.math.log_sigmoid(scores)
log1m_sigmoid = log_sigmoid - scores # log(1 - σ(scores)), simplified
matrix = -expected_walks * log_sigmoid - adj_mask * log1m_sigmoid
loss = tf.math.reduce_sum(tf.abs(matrix))
return tf.expand_dims(loss, 0)
class SelfAdversarialNegativeSampling(tf.keras.losses.Loss):
"""
Computes the self-adversarial binary cross entropy for negative sampling, from [1].
[1] Z. Sun, Z.-H. Deng, J.-Y. Nie, and J. Tang, “RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space,” `arXiv:1902.10197 <http://arxiv.org/abs/1902.10197>`_
Args:
temperature (float, optional): a scaling factor for the weighting of negative samples
"""
def __init__(
self, temperature=1.0, name="self_adversarial_negative_sampling",
):
self._temperature = temperature
super().__init__(name=name)
def call(self, labels, logit_scores):
"""
Args:
labels: tensor of integer labels for each row, either 1 for a true sample, or any value <= 0 for negative samples. Negative samples with identical labels are combined for the softmax normalisation.
logit_scores: tensor of scores for each row in logits
"""
scores = tf.math.sigmoid(logit_scores)
if labels.dtype != tf.int32:
labels = tf.cast(labels, tf.int64)
flipped_labels = -labels
exp_scores = tf.math.exp(self._temperature * scores)
sums = tf.math.unsorted_segment_sum(
exp_scores, flipped_labels, tf.reduce_max(flipped_labels) + 1
)
denoms = tf.gather(sums, tf.maximum(flipped_labels, 0))
# adversarial sampling shouldn't influence the gradient/update
negative_weights = tf.stop_gradient(exp_scores / denoms)
loss_elems = tf.where(
labels > 0,
-tf.math.log_sigmoid(logit_scores),
-tf.math.log_sigmoid(-logit_scores) * negative_weights,
)
return tf.reduce_mean(loss_elems, axis=-1)
| 4,094 | 36.916667 | 209 | py |
stellargraph | stellargraph-master/stellargraph/calibration.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Calibration for classification, binary and multi-class, models.
"""
__all__ = [
"IsotonicCalibration",
"TemperatureCalibration",
"expected_calibration_error",
"plot_reliability_diagram",
]
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.isotonic import IsotonicRegression
from sklearn.linear_model import LogisticRegression
def expected_calibration_error(prediction_probabilities, accuracy, confidence):
"""
Helper function for calculating the expected calibration error as defined in
the paper On Calibration of Modern Neural Networks, C. Guo, et. al., ICML, 2017
It is assumed that for a validation dataset, the prediction probabilities have
been calculated for each point in the dataset and given in the array
prediction_probabilities.
.. seealso::
`Examples using calibration <https://stellargraph.readthedocs.io/en/stable/demos/calibration/index.html>`__.
Related functionality: :func:`.plot_reliability_diagram`, :class:`.IsotonicCalibration`, :class:`.TemperatureCalibration`.
Args:
prediction_probabilities (numpy array): The predicted probabilities.
accuracy (numpy array): The accuracy such that the i-th entry in the array holds the proportion of correctly
classified samples that fall in the i-th bin.
confidence (numpy array): The confidence such that the i-th entry in the array is the average prediction
probability over all the samples assigned to this bin.
Returns:
float: The expected calibration error.
"""
if not isinstance(prediction_probabilities, np.ndarray):
raise ValueError(
"Parameter prediction_probabilities must be type numpy.ndarray but given object of type {}".format(
type(prediction_probabilities).__name__
)
)
if not isinstance(accuracy, np.ndarray):
raise ValueError(
"Parameter accuracy must be type numpy.ndarray but given object of type {}".format(
type(accuracy).__name__
)
)
if not isinstance(confidence, np.ndarray):
raise ValueError(
"Parameter confidence must be type numpy.ndarray but given object of type {}".format(
type(confidence).__name__
)
)
if len(accuracy) != len(confidence):
raise ValueError(
"Arrays accuracy and confidence should have the same size but instead received {} and {} respectively.".format(
len(accuracy), len(confidence)
)
)
n_bins = len(accuracy) # the number of bins
n = len(prediction_probabilities) # number of samples
h = np.histogram(a=prediction_probabilities, range=(0, 1), bins=n_bins)[
0
] # just the counts
ece = 0
for m in np.arange(n_bins):
ece = ece + (h[m] / n) * np.abs(accuracy[m] - confidence[m])
return ece
def plot_reliability_diagram(calibration_data, predictions, ece=None, filename=None):
"""
Helper function for plotting a reliability diagram.
.. seealso::
`Examples using calibration <https://stellargraph.readthedocs.io/en/stable/demos/calibration/index.html>`__.
Related functionality: :func:`.expected_calibration_error`, :class:`.IsotonicCalibration`, :class:`.TemperatureCalibration`.
Args:
calibration_data (list): The calibration data as a list where each entry in the list is a 2-tuple of type
:class:`numpy.ndarray`. Each entry in the tuple holds the fraction of positives and the mean predicted values
for the true and predicted class labels.
predictions (np.ndarray): The probabilistic predictions of the classifier for each sample in the dataset used
for diagnosing miscalibration.
ece (None or list of float): If not None, this list stores the expected calibration error for each class.
filename (str or None): If not None, the figure is saved on disk in the given filename.
"""
if not isinstance(calibration_data, list):
raise ValueError(
"Parameter calibration_data should be list of 2-tuples but received type {}".format(
type(calibration_data).__name__
)
)
if not isinstance(predictions, np.ndarray):
raise ValueError(
"Parameter predictions should be of type numpy.ndarray but received type {}".format(
type(predictions).__name__
)
)
if ece is not None and not isinstance(ece, list):
raise ValueError(
"Parameter ece should be None or list of floating point numbers but received type {}".format(
type(ece).__name__
)
)
if filename is not None and not isinstance(filename, str):
raise ValueError(
"Parameter filename should be None or str type but received type {}".format(
type(filename).__name__
)
)
fig = plt.figure(figsize=(12, 8))
ax1 = plt.subplot2grid((6, 1), (0, 0), rowspan=3)
ax2 = plt.subplot2grid((6, 1), (4, 0))
if ece is not None:
calibration_error = ",".join(format(e, " 0.4f") for e in ece)
for i, data in enumerate(calibration_data):
fraction_of_positives, mean_predicted_value = data
# print(fraction_of_positives, mean_predicted_value)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-", alpha=1.0)
if ece is not None:
ax1.set_title("Calibration Curve (ECE={})".format(calibration_error))
ax1.set_xlabel("Mean Predicted Value", fontsize=16)
ax1.set_ylabel("Fraction of Positives", fontsize=16)
ax1.plot([0, 1], [0, 1], "g--")
ax2.hist(predictions[:, i], range=(0, 1), bins=10, histtype="step", lw=2)
ax2.set_xlabel("Bin", fontsize=16)
ax2.set_ylabel("Count", fontsize=16)
if filename is not None:
fig.savefig(filename, bbox_inches="tight")
class TemperatureCalibration(object):
"""
A class for temperature calibration for binary and multi-class classification problems.
For binary classification, Platt Scaling is used for calibration. Platt Scaling was
proposed in the paper Probabilistic outputs for support vector machines and comparisons to regularized
likelihood methods, J. C. Platt, Advances in large margin classifiers, 10(3): 61-74, 1999.
For multi-class classification, Temperature Calibration is used. It is an extension of Platt Scaling
and it was proposed in the paper On Calibration of Modern Neural Networks, C. Guo et. al., ICML, 2017.
In Temperature Calibration, a classifier's non-probabilistic outputs, i.e., logits, are
scaled by a trainable parameter called Temperature. The softmax is applied to the rescaled
logits to calculate the probabilistic output. As noted in the cited paper, Temperature
Scaling does not change the maximum of the softmax function so the classifier's prediction
remain the same.
.. seealso::
`Examples using calibration <https://stellargraph.readthedocs.io/en/stable/demos/calibration/index.html>`__.
Related functionality: :func:`.expected_calibration_error`, :func:`.plot_reliability_diagram`, :class:`.IsotonicCalibration`.
"""
def __init__(self, epochs=1000):
self.epochs = epochs
self.n_classes = None
self.temperature = 1.0 # default is no scaling
self.history = []
self.early_stopping = False
self.lr = None # The logistic regression model for Platt scaling
def _fit_temperature_scaling(self, x_train, y_train, x_val=None, y_val=None):
"""
Train the calibration model using Temperature Scaling.
If validation data is given, then training stops when the validation accuracy starts increasing.
Args:
x_train (numpy array): The training data that should be a classifier's non-probabilistic outputs. It should
have shape (N, C) where N is the number of samples and C is the number of classes.
y_train (numpy array): The training data class labels. It should have shape (N, C) where N is the number
of samples and C is the number of classes and the class labels are one-hot encoded.
x_val (numpy array or None): The validation data used for early stopping. It should have shape (M, C) where
M is the number of validation samples and C is the number of classes and the class labels are one-hot
encoded.
y_val (numpy array or None): The validation data class labels. It should have shape (M, C) where M is the
number of validation samples and C is the number of classes and the class labels are one-hot encoded.
"""
T = tf.Variable(tf.ones(shape=(1,)), name="T")
def cost(T, x, y):
scaled_logits = tf.multiply(name="z", x=x, y=1.0 / T)
cost_value = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=scaled_logits, labels=y)
)
return cost_value
def grad(T, x, y):
with tf.GradientTape() as tape:
cost_value = cost(T, x, y)
return cost_value, tape.gradient(cost_value, T)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
for epoch in range(self.epochs):
train_cost, grads = grad(T, x_train, y_train)
optimizer.apply_gradients(zip([grads], [T]))
if self.early_stopping:
val_cost = cost(T, x_val, y_val)
if (len(self.history) > 0) and (val_cost > self.history[-1][1]):
break
else: # keep going
self.history.append([train_cost, val_cost, T.numpy()[0]])
else:
self.history.append([train_cost, T.numpy()[0]])
self.history = np.array(self.history)
self.temperature = self.history[-1, -1]
def _fit_platt_scaling(self, x_train, y_train):
"""
Helper method for calibration of a binary classifier using Platt Scaling.
Args:
x_train (numpy array): The training data that should be a classifier's non-probabilistic outputs. It
should have shape (N,) where N is the number of training samples.
y_train (numpy array): The training data class labels. It should have shape (N,) where N is the number
of training samples.
"""
self.lr = LogisticRegression(fit_intercept=True, verbose=False)
self.lr.fit(x_train, y_train)
def fit(self, x_train, y_train, x_val=None, y_val=None):
"""
Train the calibration model.
For temperature scaling of a multi-class classifier, If validation data is given, then
training stops when the validation accuracy starts increasing. Validation data are ignored for Platt scaling
Args:
x_train (numpy array): The training data that should be a classifier's non-probabilistic outputs. For
calibrating a binary classifier it should have shape (N,) where N is the number of training samples.
For calibrating a multi-class classifier, it should have shape (N, C) where N is the number of samples
and C is the number of classes.
y_train (numpy array): The training data class labels. For
calibrating a binary classifier it should have shape (N,) where N is the number of training samples.
For calibrating a multi-class classifier, it should have shape (N, C) where N is the number of samples
and C is the number of classes and the class labels are one-hot encoded.
x_val (numpy array or None): The validation data used only for calibrating multi-class classification
models. It should have shape (M, C) where M is the number of validation samples and C is the number of
classes and the class labels are one-hot encoded.
that should be the classifier's non-probabilistic outputs.
y_val (numpy array or None): The validation data class labels used only for calibrating multi-class
classification models. It should have shape (M, C) where M is the number of validation samples and C
is the number of classes and the class labels are one-hot encoded.
"""
if not isinstance(x_train, np.ndarray) or not isinstance(y_train, np.ndarray):
raise ValueError("x_train and y_train must be numpy arrays")
if (x_val is not None and y_val is None) or (
x_val is None and y_val is not None
):
raise ValueError(
"Either both x_val and y_val should be None or both should be numpy arrays."
)
if x_val is not None and y_val is not None:
if not isinstance(x_val, np.ndarray) or not isinstance(y_val, np.ndarray):
raise ValueError("x_train and y_train must be numpy arrays")
self.early_stopping = True
print(
"Using Early Stopping based on performance evaluated on given validation set."
)
if len(x_train.shape) == 1:
self.n_classes = 1
else:
self.n_classes = x_train.shape[1]
if self.n_classes > 1:
self._fit_temperature_scaling(x_train, y_train, x_val, y_val)
else:
self._fit_platt_scaling(x_train.reshape(-1, 1), y_train.reshape(-1, 1))
def plot_training_history(self):
"""
Helper function for plotting the training history.
"""
fig, (ax1, ax2) = plt.subplots(1, 2, sharex=True, figsize=(12, 5))
ax1.plot(self.history[:, 0], label="Training")
if self.history.shape[1] == 3: # has validation cost
ax1.plot(self.history[:, 1], label="Validation")
ax1.set_title("Cost")
ax1.set_xlabel("Epoch")
ax1.set_ylabel("Cost")
ax1.legend(loc="upper right")
ax2.plot(self.history[:, -1])
ax2.set_title("Temperature")
ax2.set_xlabel("Epoch")
ax2.set_ylabel("Temperature")
def predict(self, x):
"""
This method calibrates the given data using the learned temperature. It
scales each logit by the temperature, exponentiates the results, and finally
normalizes the scaled values such that their sum is 1.
Args:
x (numpy.ndarray): The logits. For binary classification problems, it should have dimensionality (N,) where
N is the number of samples to calibrate. For multi-class problems, it should have dimensionality (N, C)
where C is the number of classes.
Returns:
numpy array: The calibrated probabilities.
"""
if not isinstance(x, np.ndarray):
raise ValueError(
"x should be numpy.ndarray but received {}".format(type(x).__name__)
)
if len(x.shape) > 1 and x.shape[1] != self.n_classes:
raise ValueError(
"Expecting input vector of dimensionality {} but received {}".format(
self.n_classes, len(x)
)
)
x_ = x
if self.n_classes == 1:
return self.lr.predict_proba(X=x)[:, 1].reshape(-1, 1)
else:
scaled_prediction = x_ / self.temperature
return np.exp(scaled_prediction) / np.sum(
np.exp(scaled_prediction), axis=-1, keepdims=True
)
class IsotonicCalibration(object):
"""
A class for applying Isotonic Calibration to the outputs of a binary or multi-class classifier.
.. seealso::
`Examples using calibration <https://stellargraph.readthedocs.io/en/stable/demos/calibration/index.html>`__.
Related functionality: :func:`.expected_calibration_error`, :func:`.plot_reliability_diagram`, :class:`.TemperatureCalibration`.
"""
def __init__(self):
self.n_classes = None
self.regressors = []
def fit(self, x_train, y_train):
"""
Train a calibration model using the provided data.
Args:
x_train (numpy array): The training data that should be the classifier's probabilistic outputs. It should
have shape N × C where N is the number of training samples and C is the number of classes.
y_train (numpy array): The training class labels. For binary problems y_train has shape (N,)
when N is the number of samples. For multi-class classification, y_train has shape (N,C) where
C is the number of classes and y_train is using one-hot encoding.
"""
if not isinstance(x_train, np.ndarray) or not isinstance(y_train, np.ndarray):
raise ValueError(
"x_train and y_train should be type numpy.ndarray but received {} and {}".format(
type(x_train).__name__, type(y_train).__name__
)
)
if len(x_train.shape) == 1:
self.n_classes = 1
else:
self.n_classes = x_train.shape[1]
if self.n_classes == 1:
self.regressors.append(IsotonicRegression(out_of_bounds="clip"))
if len(x_train.shape) > 1:
x_train = x_train.reshape(-1)
self.regressors[-1].fit(X=x_train.astype(np.double), y=y_train)
else:
for n in range(self.n_classes):
self.regressors.append(IsotonicRegression(out_of_bounds="clip"))
self.regressors[-1].fit(
X=x_train[:, n].astype(np.double), y=y_train[:, n]
)
def predict(self, x):
"""
This method calibrates the given data assumed the output of a classification model.
For multi-class classification, the probabilities for each class are first scaled using the corresponding
isotonic regression model and then normalized to sum to 1.
Args:
x (numpy array): The values to calibrate. For binary classification problems it should have shape (N,) where
N is the number of samples to calibrate. For multi-class classification problems, it should have shape
(N, C) where C is the number of classes.
Returns:
numpy array: The calibrated probabilities. It has shape (N, C) where N is the number of samples
and C is the number of classes.
"""
if not isinstance(x, np.ndarray):
raise ValueError(
"x should be numpy.ndarray but received {}".format(type(x).__name__)
)
if self.n_classes > 1 and x.shape[1] != self.n_classes:
raise ValueError(
"Expecting input vector of dimensionality {} but received {}".format(
self.n_classes, len(x)
)
)
if self.n_classes == 1:
x = x.reshape(-1, 1)
predictions = []
for n in range(self.n_classes):
predictions.append(self.regressors[n].transform(T=x[:, n]))
predictions = np.transpose(np.array(predictions))
if self.n_classes > 1:
predictions = predictions / np.sum(predictions, axis=-1, keepdims=True)
return predictions
| 20,128 | 41.918977 | 135 | py |
stellargraph | stellargraph-master/stellargraph/__init__.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
"data",
"datasets",
"calibration",
"ensemble",
"interpretability",
"losses",
"layer",
"mapper",
"utils",
"custom_keras_layers",
"StellarDiGraph",
"StellarGraph",
"GraphSchema",
"__version__",
]
# Version
from .version import __version__
# Import modules
from stellargraph import (
data,
calibration,
datasets,
ensemble,
interpretability,
losses,
layer,
mapper,
utils,
)
# Top-level imports
from stellargraph.core.graph import StellarGraph, StellarDiGraph
from stellargraph.core.indexed_array import IndexedArray
from stellargraph.core.schema import GraphSchema
import warnings
# Custom layers for keras deserialization (this is computed from a manual list to make it clear
# what's included)
# the `link_inference` module is shadowed in `sg.layer` by the `link_inference` function, so these
# layers need to be manually imported
from .layer.link_inference import (
LinkEmbedding as _LinkEmbedding,
LeakyClippedLinear as _LeakyClippedLinear,
)
custom_keras_layers = {
class_.__name__: class_
for class_ in [
layer.GraphConvolution,
layer.ClusterGraphConvolution,
layer.GraphAttention,
layer.GraphAttentionSparse,
layer.SqueezedSparseConversion,
layer.graphsage.MeanAggregator,
layer.graphsage.MaxPoolingAggregator,
layer.graphsage.MeanPoolingAggregator,
layer.graphsage.AttentionalAggregator,
layer.hinsage.MeanHinAggregator,
layer.rgcn.RelationalGraphConvolution,
layer.ppnp.PPNPPropagationLayer,
layer.appnp.APPNPPropagationLayer,
layer.misc.GatherIndices,
layer.deep_graph_infomax.DGIDiscriminator,
layer.deep_graph_infomax.DGIReadout,
layer.graphsage.GraphSAGEAggregator,
layer.knowledge_graph.ComplExScore,
layer.knowledge_graph.DistMultScore,
layer.knowledge_graph.RotatEScore,
layer.knowledge_graph.RotHEScore,
layer.preprocessing_layer.GraphPreProcessingLayer,
layer.preprocessing_layer.SymmetricGraphPreProcessingLayer,
layer.watch_your_step.AttentiveWalk,
layer.sort_pooling.SortPooling,
layer.gcn_lstm.FixedAdjacencyGraphConvolution,
_LinkEmbedding,
_LeakyClippedLinear,
]
}
"""
A dictionary of the ``tensorflow.keras`` layers defined by StellarGraph.
When Keras models using StellarGraph layers are saved, they can be loaded by passing this value to
the ``custom_objects`` parameter to model loading functions like
``tensorflow.keras.models.load_model``.
Example::
import stellargraph as sg
from tensorflow import keras
keras.models.load_model("/path/to/model", custom_objects=sg.custom_keras_layers)
"""
def _top_level_deprecation_warning(name, path):
warnings.warn(
f"'{name}' is no longer available at the top-level. "
f"Please use 'stellargraph.{path}.{name}' instead.",
DeprecationWarning,
stacklevel=3,
)
def expected_calibration_error(*args, **kwargs):
_top_level_deprecation_warning("expected_calibration_error", "calibration")
return calibration.expected_calibration_error(*args, **kwargs)
def plot_reliability_diagram(*args, **kwargs):
_top_level_deprecation_warning("plot_reliability_diagram", "calibration")
return calibration.plot_reliability_diagram(*args, **kwargs)
def Ensemble(*args, **kwargs):
_top_level_deprecation_warning("Ensemble", "ensemble")
return ensemble.Ensemble(*args, **kwargs)
def BaggingEnsemble(*args, **kwargs):
_top_level_deprecation_warning("BaggingEnsemble", "ensemble")
return ensemble.BaggingEnsemble(*args, **kwargs)
def TemperatureCalibration(*args, **kwargs):
_top_level_deprecation_warning("TemperatureCalibration", "calibration")
return calibration.TemperatureCalibration(*args, **kwargs)
def IsotonicCalibration(*args, **kwargs):
_top_level_deprecation_warning("IsotonicCalibration", "calibration")
return calibration.IsotonicCalibration(*args, **kwargs)
| 4,691 | 29.868421 | 98 | py |
stellargraph | stellargraph-master/stellargraph/ensemble.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Ensembles of graph neural network models, GraphSAGE, GCN, GAT, and HinSAGE, with optional bootstrap sampling of the
training data (implemented in the BaggingEnsemble class).
"""
from stellargraph.layer import *
__all__ = ["Ensemble", "BaggingEnsemble"]
import numpy as np
from tensorflow import keras as K
from tensorflow.keras.callbacks import EarlyStopping
import stellargraph as sg
class Ensemble(object):
"""
The Ensemble class can be used to create ensembles of stellargraph's graph neural network algorithms including
GCN, GraphSAGE, GAT, and HinSAGE. Ensembles can be used for training classification and regression problems for
node attribute inference and link prediction.
The Ensemble class can be used to create Naive ensembles.
Naive ensembles add model diversity by random initialisation of the models' weights (before training) to
different values. Each model in the ensemble is trained on the same training set of examples.
.. seealso::
Example using ensembles: `node classification <https://stellargraph.readthedocs.io/en/stable/demos/ensembles/ensemble-node-classification-example.html>`__.
Related functionality: :class:`.BaggingEnsemble` for bootstrap sampling while training, in addition to random initialisation.
"""
def __init__(self, model, n_estimators=3, n_predictions=3):
"""
Args:
model: A keras model.
n_estimators (int): The number of estimators (aka models) in the ensemble.
n_predictions (int): The number of predictions per query point per estimator
"""
if not isinstance(model, K.Model):
raise ValueError(
"({}) model must be a Keras model received object of type {}".format(
type(self).__name__, type(model).__name__
)
)
if n_estimators <= 0 or not isinstance(n_estimators, int):
raise ValueError(
"({}) n_estimators must be positive integer but received {}".format(
type(self).__name__, n_estimators
)
)
if n_predictions <= 0 or not isinstance(n_predictions, int):
raise ValueError(
"({}) n_predictions must be positive integer but received {}".format(
type(self).__name__, n_predictions
)
)
self.metrics_names = (
None # It will be set when the self.compile() method is called
)
self.models = []
self.history = []
self.n_estimators = n_estimators
self.n_predictions = n_predictions
self.early_stoppping_patience = 10
# Create the enseble from the given base model
self._init_models(model)
def _init_models(self, model):
"""
This method creates an ensemble of models by cloning the given base model self.n_estimators times.
All models have the same architecture but their weights are initialised with different (random) values.
Args:
model: A Keras model that is the base model for the ensemble.
"""
# first copy is the given model
self.models.append(model)
# now clone the model self.n_estimators-1 times
for _ in range(self.n_estimators - 1):
self.models.append(K.models.clone_model(model))
def layers(self, indx=None):
"""
This method returns the layer objects for the model specified by the value of ``indx``.
Args:
indx (None or int): The index (starting at 0) of the model to return the layers for.
If it is None, then the layers for the 0-th (or first) model are returned.
Returns:
list: The layers for the specified model.
"""
if indx is not None and not isinstance(indx, (int,)):
raise ValueError(
"({}) indx should be None or integer type but received type {}".format(
type(self).__name__, type(indx).__name__
)
)
if isinstance(indx, (int,)) and indx < 0:
raise ValueError(
"({}) indx must be greater than or equal to zero but received {}".format(
type(self).__name__, indx
)
)
if indx is None and len(self.models) > 0:
# Default is to return the layers for the first model
return self.models[0].layers
if len(self.models) > indx:
return self.models[indx].layers
else:
# Error because index is out of bounds
raise ValueError(
"({}) indx {} is out of range 0 to {}".format(
type(self).__name__, indx, len(self.models)
)
)
def compile(
self,
optimizer,
loss=None,
metrics=None,
loss_weights=None,
sample_weight_mode=None,
weighted_metrics=None,
):
"""
Method for configuring the model for training. It is a wrapper of the `keras.models.Model.compile` method for
all models in the ensemble.
For detailed descriptions of Keras-specific parameters consult the Keras documentation
at https://keras.io/models/sequential/
Args:
optimizer (Keras optimizer or str): (Keras-specific parameter) The optimizer to use given either as an
instance of a Keras optimizer or a string naming the optimiser of choice.
loss (Keras function or str): (Keras-specific parameter) The loss function or string indicating the
type of loss to use.
metrics (list or dict): (Keras-specific parameter) List of metrics to be evaluated by each model in
the ensemble during training and testing. It should be a list for a model with a single output. To
specify different metrics for different outputs of a multi-output model, you could also pass a
dictionary.
loss_weights (None or list): (Keras-specific parameter) Optional list or dictionary specifying scalar
coefficients (Python floats) to weight the loss contributions of different model outputs. The loss value
that will be minimized by the model will then be the weighted sum of all individual losses, weighted by
the loss_weights coefficients. If a list, it is expected to have a 1:1 mapping to the model's outputs.
If a tensor, it is expected to map output names (strings) to scalar coefficients.
sample_weight_mode (None, str, list, or dict): (Keras-specific parameter) If you need to do
timestep-wise sample weighting (2D weights), set this to "temporal". None defaults to sample-wise
weights (1D). If the model has multiple outputs, you can use a different sample_weight_mode on
each output by passing a dictionary or a list of modes.
weighted_metrics (list): (Keras-specific parameter) List of metrics to be evaluated and weighted by
sample_weight or class_weight during training and testing.
"""
for model in self.models:
model.compile(
optimizer=optimizer,
loss=loss,
metrics=metrics,
loss_weights=loss_weights,
sample_weight_mode=sample_weight_mode,
weighted_metrics=weighted_metrics,
)
self.metrics_names = self.models[0].metrics_names # assumes all models are same
def fit(
self,
generator,
steps_per_epoch=None,
epochs=1,
verbose=1,
validation_data=None,
validation_steps=None,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
shuffle=True,
initial_epoch=0,
use_early_stopping=False,
early_stopping_monitor="val_loss",
):
"""
This method trains the ensemble on the data specified by the generator. If validation data are given, then the
training metrics are evaluated on these data and results printed on screen if verbose level is greater than 0.
The method trains each model in the ensemble in series for the number of epochs specified. Training can
also stop early with the best model as evaluated on the validation data, if use_early_stopping is set to True.
For detail descriptions of Keras-specific parameters consult the Keras documentation
at https://keras.io/models/sequential/
Args:
generator: The generator object for training data. It should be one of type
NodeSequence, LinkSequence, SparseFullBatchSequence, or FullBatchSequence.
steps_per_epoch (None or int): (Keras-specific parameter) If not None, it specifies the number of steps
to yield from the generator before declaring one epoch finished and starting a new epoch.
epochs (int): (Keras-specific parameter) The number of training epochs.
verbose (int): (Keras-specific parameter) The verbosity mode that should be 0 , 1, or 2 meaning silent,
progress bar, and one line per epoch respectively.
validation_data: A generator for validation data that is optional (None). If not None then, it should
be one of type NodeSequence, LinkSequence, SparseFullBatchSequence, or FullBatchSequence.
validation_steps (None or int): (Keras-specific parameter) If validation_generator is not None, then it
specifies the number of steps to yield from the generator before stopping at the end of every epoch.
class_weight (None or dict): (Keras-specific parameter) If not None, it should be a dictionary
mapping class indices (integers) to a weight (float) value, used for weighting the loss function (during
training only). This can be useful to tell the model to "pay more attention" to samples from an
under-represented class.
max_queue_size (int): (Keras-specific parameter) The maximum size for the generator queue.
workers (int): (Keras-specific parameter) The maximum number of workers to use.
use_multiprocessing (bool): (Keras-specific parameter) If True then use process based threading.
shuffle (bool): (Keras-specific parameter) If True, then it shuffles the order of batches at the
beginning of each training epoch.
initial_epoch (int): (Keras-specific parameter) Epoch at which to start training (useful for resuming a
previous training run).
use_early_stopping (bool): If set to True, then early stopping is used when training each model
in the ensemble. The default is False.
early_stopping_monitor (str): The quantity to monitor for early stopping, e.g., 'val_loss',
'val_weighted_acc'. It should be a valid Keras metric.
Returns:
list: It returns a list of Keras History objects each corresponding to one trained model in the ensemble.
"""
if not isinstance(
generator,
(
sg.mapper.NodeSequence,
sg.mapper.LinkSequence,
sg.mapper.FullBatchSequence,
sg.mapper.SparseFullBatchSequence,
),
):
raise ValueError(
"({}) If train_data is None, generator must be one of type NodeSequence, LinkSequence, FullBatchSequence "
"but received object of type {}".format(
type(self).__name__, type(generator).__name__
)
)
self.history = []
es_callback = None
if use_early_stopping and validation_data is not None:
es_callback = [
EarlyStopping(
monitor=early_stopping_monitor,
patience=self.early_stoppping_patience,
restore_best_weights=True,
)
]
for model in self.models:
self.history.append(
model.fit(
generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
verbose=verbose,
callbacks=es_callback,
validation_data=validation_data,
validation_steps=validation_steps,
class_weight=class_weight,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
shuffle=shuffle,
initial_epoch=initial_epoch,
)
)
return self.history
def fit_generator(self, *args, **kwargs):
"""
Deprecated: use :meth:`fit`.
"""
warnings.warn(
"'fit_generator' has been replaced by 'fit', to match tensorflow.keras.Model",
DeprecationWarning,
stacklevel=2,
)
return self.fit(*args, **kwargs)
def evaluate(
self,
generator,
test_data=None,
test_targets=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0,
):
"""
Evaluates the ensemble on a data (node or link) generator. It makes `n_predictions` for each data point for each
of the `n_estimators` and returns the mean and standard deviation of the predictions.
For detailed descriptions of Keras-specific parameters consult the Keras documentation
at https://keras.io/models/sequential/
Args:
generator: The generator object that, if test_data is not None, should be one of type
GraphSAGENodeGenerator, HinSAGENodeGenerator, FullBatchNodeGenerator, GraphSAGELinkGenerator,
or HinSAGELinkGenerator. However, if test_data is None, then generator should be one of type
NodeSequence, LinkSequence, or FullBatchSequence.
test_data (None or iterable): If not None, then it is an iterable, e.g. list, that specifies the node IDs
to evaluate the model on.
test_targets (None or iterable): If not None, then it is an iterable, e.g. list, that specifies the target
values for the test_data.
max_queue_size (int): (Keras-specific parameter) The maximum size for the generator queue.
workers (int): (Keras-specific parameter) The maximum number of workers to use.
use_multiprocessing (bool): (Keras-specific parameter) If True then use process based threading.
verbose (int): (Keras-specific parameter) The verbosity mode that should be 0 or 1 with the former turning
verbosity off and the latter on.
Returns:
tuple: The mean and standard deviation of the model metrics for the given data.
"""
if test_data is not None and not isinstance(
generator,
(
sg.mapper.GraphSAGENodeGenerator,
sg.mapper.HinSAGENodeGenerator,
sg.mapper.FullBatchNodeGenerator,
sg.mapper.GraphSAGELinkGenerator,
sg.mapper.HinSAGELinkGenerator,
),
):
raise ValueError(
"({}) generator parameter must be of type GraphSAGENodeGenerator, HinSAGENodeGenerator, FullBatchNodeGenerator, "
"GraphSAGELinkGenerator, or HinSAGELinkGenerator. Received type {}".format(
type(self).__name__, type(generator).__name__
)
)
elif not isinstance(
generator,
(
sg.mapper.NodeSequence,
sg.mapper.LinkSequence,
sg.mapper.FullBatchSequence,
sg.mapper.SparseFullBatchSequence,
),
):
raise ValueError(
"({}) If test_data is None, generator must be one of type NodeSequence, "
"LinkSequence, FullBatchSequence, or SparseFullBatchSequence "
"but received object of type {}".format(
type(self).__name__, type(generator).__name__
)
)
if test_data is not None and test_targets is None:
raise ValueError("({}) test_targets not given.".format(type(self).__name__))
data_generator = generator
if test_data is not None:
data_generator = generator.flow(test_data, test_targets)
test_metrics = []
for model in self.models:
tm = []
for _ in range(self.n_predictions):
tm.append(
model.evaluate(
data_generator,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose,
) # Keras evaluate_generator returns a scalar
)
test_metrics.append(np.mean(tm, axis=0))
# Return the mean and standard deviation of the metrics
return np.mean(test_metrics, axis=0), np.std(test_metrics, axis=0)
def evaluate_generator(self, *args, **kwargs):
"""
Deprecated: use :meth:`evaluate`.
"""
warnings.warn(
"'evaluate_generator' has been replaced by 'evaluate', to match tensorflow.keras.Model",
DeprecationWarning,
stacklevel=2,
)
return self.evaluate(*args, **kwargs)
def predict(
self,
generator,
predict_data=None,
summarise=False,
output_layer=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0,
):
"""
This method generates predictions for the data produced by the given generator or alternatively the data
given in parameter predict_data.
For detailed descriptions of Keras-specific parameters consult the Keras documentation
at https://keras.io/models/sequential/
Args:
generator: The generator object that, if predict_data is None, should be one of type
GraphSAGENodeGenerator, HinSAGENodeGenerator, FullBatchNodeGenerator, GraphSAGELinkGenerator,
or HinSAGELinkGenerator. However, if predict_data is not None, then generator should be one of type
NodeSequence, LinkSequence, SparseFullBatchSequence, or FullBatchSequence.
predict_data (None or iterable): If not None, then it is an iterable, e.g. list, that specifies the node IDs
to make predictions for. If generator is of type FullBatchNodeGenerator then predict_data should be all
the nodes in the graph since full batch approaches such as GCN and GAT can only be used to make
predictions for all graph nodes.
summarise (bool): If True, then the mean of the predictions over self.n_estimators and
self.n_predictions are returned for each query point. If False, then all predictions are returned.
output_layer (None or int): If not None, then the predictions are the outputs of the layer specified.
The default is the model's output layer.
max_queue_size (int): (Keras-specific parameter) The maximum size for the generator queue.
workers (int): (Keras-specific parameter) The maximum number of workers to use.
use_multiprocessing (bool): (Keras-specific parameter) If True then use process based threading.
verbose (int): (Keras-specific parameter) The verbosity mode that should be 0 or 1 with the former turning
verbosity off and the latter on.
Returns:
numpy array: The predictions. It will have shape ``M × K × N × F`` if ``summarise`` is set to ``False``, or ``N × F``
otherwise. ``M`` is the number of estimators in the ensemble; ``K`` is the number of predictions per query
point; ``N`` is the number of query points; and ``F`` is the output dimensionality of the specified layer
determined by the shape of the output layer.
"""
data_generator = generator
if predict_data is not None:
if not isinstance(
generator,
(
sg.mapper.GraphSAGENodeGenerator,
sg.mapper.HinSAGENodeGenerator,
sg.mapper.FullBatchNodeGenerator,
),
):
raise ValueError(
"({}) generator parameter must be of type GraphSAGENodeGenerator, HinSAGENodeGenerator, or FullBatchNodeGenerator. Received type {}".format(
type(self).__name__, type(generator).__name__
)
)
data_generator = generator.flow(predict_data)
elif not isinstance(
generator,
(
sg.mapper.NodeSequence,
sg.mapper.LinkSequence,
sg.mapper.FullBatchSequence,
sg.mapper.SparseFullBatchSequence,
),
):
raise ValueError(
"({}) If x is None, generator must be one of type NodeSequence, "
"LinkSequence, SparseFullBatchSequence, or FullBatchSequence.".format(
type(self).__name__
)
)
predictions = []
if output_layer is not None:
predict_models = [
K.Model(inputs=model.input, outputs=model.layers[output_layer].output)
for model in self.models
]
else:
predict_models = self.models
for model in predict_models:
model_predictions = []
for _ in range(self.n_predictions):
model_predictions.append(
model.predict(
data_generator,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose,
)
)
# add to predictions list
predictions.append(model_predictions)
predictions = np.array(predictions)
if summarise is True:
# average the predictions across models and predictions per query point
predictions = np.mean(predictions, axis=(0, 1))
# if len(predictions.shape) > 4:
# predictions = predictions.reshape(predictions.shape[0:3] + (-1,))
return predictions
def predict_generator(self, *args, **kwargs):
"""
Deprecated: use :meth:`predict`.
"""
warnings.warn(
"'predict_generator' has been replaced by 'predict', to match tensorflow.keras.Model",
DeprecationWarning,
stacklevel=2,
)
return self.predict(*args, **kwargs)
#
#
#
class BaggingEnsemble(Ensemble):
"""
The BaggingEnsemble class can be used to create ensembles of stellargraph's graph neural network algorithms
including GCN, GraphSAGE, GAT, and HinSAGE. Ensembles can be used for training classification and regression
problems for node attribute inference and link prediction.
This class can be used to create Bagging ensembles.
Bagging ensembles add model diversity in two ways: (1) by random initialisation of the models' weights (before
training) to different values; and (2) by bootstrap sampling of the training data for each model. That is, each
model in the ensemble is trained on a random subset of the training examples, sampled with replacement from the
original training data.
.. seealso::
`Examples using ensembles <https://stellargraph.readthedocs.io/en/stable/demos/ensembles/index.html>`__.
Related functionality: :class:`.Ensemble` for only random initialisation.
"""
def __init__(self, model, n_estimators=3, n_predictions=3):
"""
Args:
model: A keras model.
n_estimators (int): The number of estimators (aka models) in the ensemble.
n_predictions (int): The number of predictions per query point per estimator
"""
super().__init__(
model=model, n_estimators=n_estimators, n_predictions=n_predictions
)
def fit(
self,
generator,
train_data,
train_targets,
steps_per_epoch=None,
epochs=1,
verbose=1,
validation_data=None,
validation_steps=None,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
shuffle=True,
initial_epoch=0,
bag_size=None,
use_early_stopping=False,
early_stopping_monitor="val_loss",
):
"""
This method trains the ensemble on the data given in train_data and train_targets. If validation data are
also given, then the training metrics are evaluated on these data and results printed on screen if verbose
level is greater than 0.
The method trains each model in the ensemble in series for the number of epochs specified. Training can
also stop early with the best model as evaluated on the validation data, if use_early_stopping is enabled.
Each model in the ensemble is trained using a bootstrapped sample of the data (the train data are re-sampled
with replacement.) The number of bootstrap samples can be specified via the bag_size parameter; by default,
the number of bootstrap samples equals the number of training points.
For detail descriptions of Keras-specific parameters consult the Keras documentation
at https://keras.io/models/sequential/
Args:
generator: The generator object for training data. It should be one of type
GraphSAGENodeGenerator, HinSAGENodeGenerator, FullBatchNodeGenerator, GraphSAGELinkGenerator,
or HinSAGELinkGenerator.
train_data (iterable): It is an iterable, e.g. list, that specifies the data
to train the model with.
train_targets (iterable): It is an iterable, e.g. list, that specifies the target
values for the train data.
steps_per_epoch (None or int): (Keras-specific parameter) If not None, it specifies the number of steps
to yield from the generator before declaring one epoch finished and starting a new epoch.
epochs (int): (Keras-specific parameter) The number of training epochs.
verbose (int): (Keras-specific parameter) The verbosity mode that should be 0 , 1, or 2 meaning silent,
progress bar, and one line per epoch respectively.
validation_data: A generator for validation data that is optional (None). If not None then, it should
be one of type GraphSAGENodeGenerator, HinSAGENodeGenerator, FullBatchNodeGenerator,
GraphSAGELinkGenerator, or HinSAGELinkGenerator.
validation_steps (None or int): (Keras-specific parameter) If validation_generator is not None, then it
specifies the number of steps to yield from the generator before stopping at the end of every epoch.
class_weight (None or dict): (Keras-specific parameter) If not None, it should be a dictionary
mapping class indices (integers) to a weight (float) value, used for weighting the loss function (during
training only). This can be useful to tell the model to "pay more attention" to samples from an
under-represented class.
max_queue_size (int): (Keras-specific parameter) The maximum size for the generator queue.
workers (int): (Keras-specific parameter) The maximum number of workers to use.
use_multiprocessing (bool): (Keras-specific parameter) If True then use process based threading.
shuffle (bool): (Keras-specific parameter) If True, then it shuffles the order of batches at the
beginning of each training epoch.
initial_epoch (int): (Keras-specific parameter) Epoch at which to start training (useful for resuming a
previous training run).
bag_size (None or int): The number of samples in a bootstrap sample. If None and bagging is used, then
the number of samples is equal to the number of training points.
use_early_stopping (bool): If set to True, then early stopping is used when training each model
in the ensemble. The default is False.
early_stopping_monitor (str): The quantity to monitor for early stopping, e.g., 'val_loss',
'val_weighted_acc'. It should be a valid Keras metric.
Returns:
list: It returns a list of Keras History objects each corresponding to one trained model in the ensemble.
"""
if not isinstance(
generator,
(
sg.mapper.GraphSAGENodeGenerator,
sg.mapper.HinSAGENodeGenerator,
sg.mapper.FullBatchNodeGenerator,
sg.mapper.GraphSAGELinkGenerator,
sg.mapper.HinSAGELinkGenerator,
),
):
raise ValueError(
"({}) generator parameter must be of type GraphSAGENodeGenerator, HinSAGENodeGenerator, "
"FullBatchNodeGenerator, GraphSAGELinkGenerator, or HinSAGELinkGenerator if you want to use Bagging. "
"Received type {}".format(type(self).__name__, type(generator).__name__)
)
if bag_size is not None and (bag_size > len(train_data) or bag_size <= 0):
raise ValueError(
"({}) bag_size must be positive and less than or equal to the number of training points ({})".format(
type(self).__name__, len(train_data)
)
)
if train_targets is None:
raise ValueError(
"({}) If train_data is given then train_targets must be given as well.".format(
type(self).__name__
)
)
self.history = []
num_points_per_bag = bag_size if bag_size is not None else len(train_data)
# Prepare the training data for each model. Use sampling with replacement to create len(self.models)
# datasets.
for model in self.models:
di_index = np.random.choice(
len(train_data), size=num_points_per_bag
) # sample with replacement
di_train = train_data[di_index]
di_targets = train_targets[di_index]
di_gen = generator.flow(di_train, di_targets)
es_callback = None
if use_early_stopping and validation_data is not None:
es_callback = [
EarlyStopping(
monitor=early_stopping_monitor,
patience=self.early_stoppping_patience,
restore_best_weights=True,
)
]
self.history.append(
model.fit(
di_gen,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
verbose=verbose,
callbacks=es_callback,
validation_data=validation_data,
validation_steps=validation_steps,
class_weight=class_weight,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
shuffle=shuffle,
initial_epoch=initial_epoch,
)
)
return self.history
def fit_generator(self, *args, **kwargs):
"""
Deprecated: use :meth:`fit`.
"""
warnings.warn(
"'fit_generator' has been replaced by 'fit', to match tensorflow.keras.Model",
DeprecationWarning,
stacklevel=2,
)
return self.fit(*args, **kwargs)
| 33,345 | 44.184282 | 162 | py |
stellargraph | stellargraph-master/stellargraph/mapper/sliding.py | # -*- coding: utf-8 -*-
#
# Copyright 2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
"SlidingFeaturesNodeGenerator",
"SlidingFeaturesNodeSequence",
]
import numpy as np
from . import Generator
from tensorflow.keras.utils import Sequence
from ..core.validation import require_integer_in_range
class SlidingFeaturesNodeGenerator(Generator):
"""
A data generator for a graph containing sequence data, created by sliding windows across the
features of each node in a graph.
.. seealso:: Model using this generator: :class:`.GCN_LSTM`.
Args:
G (StellarGraph): a graph instance where the node features are ordered sequence data
window_size (int): the number of sequence points included in the sliding window.
batch_size (int, optional): the number of sliding windows to include in each batch.
"""
def __init__(self, G, window_size, batch_size=1):
require_integer_in_range(window_size, "window_size", min_val=1)
require_integer_in_range(batch_size, "batch_size", min_val=1)
self.graph = G
node_type = G.unique_node_type(
"G: expected a graph with a single node type, found a graph with node types: %(found)s"
)
self._features = G.node_features(node_type=node_type)
if len(self._features.shape) == 3:
self.variates = self._features.shape[2]
else:
self.variates = None
self.window_size = window_size
self._batch_size = batch_size
def num_batch_dims(self):
return 1
def flow(self, sequence_iloc_slice, target_distance=None):
"""
Create a sequence object for time series prediction within the given section of the node
features.
This handles both univariate data (each node has a single associated feature vector) and
multivariate data (each node has an associated feature tensor). The features are always
sliced and indexed along the first feature axis.
Args:
sequence_iloc_slice (slice):
A slice object of the range of features from which to select windows. A slice object
is the object form of ``:`` within ``[...]``, e.g. ``slice(a, b)`` is equivalent to
the ``a:b`` in ``v[a:b]``, and ``slice(None, b)`` is equivalent to ``v[:b]``. As
with that slicing, this parameter is inclusive in the start and exclusive in the
end.
For example, suppose the graph has feature vectors of length 10 and ``window_size =
3``:
* passing in ``slice(None, None)`` will create 7 windows across all 10 features
starting with the features slice ``0:3``, then ``1:4``, and so on.
* passing in ``slice(4, 7)`` will create just one window, slicing the three elements
``4:7``.
For training, one might do a train-test split by choosing a boundary and considering
everything before that as training data, and everything after, e.g. 80% of the
features::
train_end = int(0.8 * sequence_length)
train_gen = sliding_generator.flow(slice(None, train_end))
test_gen = sliding_generator.flow(slice(train_end, None))
target_distance (int, optional):
The distance from the last element of each window to select an element to include as
a supervised training target. Note: this always stays within the slice defined by
``sequence_iloc_slice``.
Continuing the example above: a call like ``sliding_generator.flow(slice(4, 9),
target_distance=1)`` will yield two pairs of window and target:
* a feature window slicing ``4:7`` which includes the features at indices 4, 5, 6,
and then a target feature at index 7 (distance 1 from the last element of the
feature window)
* a feature window slicing ``5:8`` and a target feature from index 8.
Returns:
A Keras sequence that yields batches of sliced windows of features, and, optionally,
selected target values.
"""
return SlidingFeaturesNodeSequence(
self._features,
self.window_size,
self._batch_size,
sequence_iloc_slice,
target_distance,
)
class SlidingFeaturesNodeSequence(Sequence):
def __init__(
self, features, window_size, batch_size, sequence_iloc_slice, target_distance
):
if target_distance is not None:
require_integer_in_range(target_distance, "target_distance", min_val=1)
if not isinstance(sequence_iloc_slice, slice):
raise TypeError(
f"sequence_iloc_slice: expected a slice(...) object, found {type(sequence_iloc_slice).__name__}"
)
if sequence_iloc_slice.step not in (None, 1):
raise TypeError(
f"sequence_iloc_slice: expected a slice object with a step = 1, found step = {sequence_iloc_slice.step}"
)
self._features = features[:, sequence_iloc_slice, ...]
shape = self._features.shape
self._num_nodes = shape[0]
self._num_sequence_samples = shape[1]
self._num_sequence_variates = shape[2:]
self._window_size = window_size
self._target_distance = target_distance
self._batch_size = batch_size
query_length = window_size + (0 if target_distance is None else target_distance)
self._num_windows = self._num_sequence_samples - query_length + 1
# if there's not enough data to fill one window, there's a problem!
if self._num_windows <= 0:
if target_distance is None:
target_str = ""
else:
target_str = f" + target_distance={target_distance}"
total_sequence_samples = features.shape[1]
start, stop, step = sequence_iloc_slice.indices(total_sequence_samples)
# non-trivial steps aren't supported at the moment, so this doesn't need to be included
# in the message
assert step == 1
raise ValueError(
f"expected at least one sliding window of features, found a total window of size {query_length} (window_size={window_size}{target_str}) which is larger than the {self._num_sequence_samples} selected feature sample(s) (sequence_iloc_slice selected from {start} to {stop} in the sequence axis of length {total_sequence_samples})"
)
def __len__(self):
return int(np.ceil(self._num_windows / self._batch_size))
def __getitem__(self, batch_num):
first_start = batch_num * self._batch_size
last_start = min((batch_num + 1) * self._batch_size, self._num_windows)
has_targets = self._target_distance is not None
arrays = []
targets = [] if has_targets else None
for start in range(first_start, last_start):
end = start + self._window_size
arrays.append(self._features[:, start:end, ...])
if has_targets:
target_idx = end + self._target_distance - 1
targets.append(self._features[:, target_idx, ...])
this_batch_size = last_start - first_start
batch_feats = np.stack(arrays)
assert (
batch_feats.shape
== (this_batch_size, self._num_nodes, self._window_size)
+ self._num_sequence_variates
)
if has_targets:
batch_targets = np.stack(targets)
assert (
batch_targets.shape
== (this_batch_size, self._num_nodes) + self._num_sequence_variates
)
else:
batch_targets = None
return [batch_feats], batch_targets
| 8,487 | 39.807692 | 343 | py |
stellargraph | stellargraph-master/stellargraph/mapper/graphwave_generator.py | # -*- coding: utf-8 -*-
#
# Copyright 2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.keras import backend as K
import numpy as np
from ..core import StellarGraph
from ..core.validation import require_integer_in_range
from .base import Generator
from scipy.sparse.linalg import eigs
from scipy.sparse import diags
class GraphWaveGenerator(Generator):
"""
Implementation of the GraphWave structural embedding algorithm from the paper: "Learning Structural Node Embeddings
via Diffusion Wavelets" (https://arxiv.org/pdf/1710.10321.pdf)
This class is minimally with a StellarGraph object. Calling the flow function will return a TensorFlow
DataSet that contains the GraphWave embeddings.
This implementation differs from the paper by removing the automatic method of calculating scales. This method was
found to not work well in practice, and replicating the results of the paper requires manually specifying much
larger scales than those automatically calculated.
.. seealso:: Example using this generator: `unsupervised representation learning <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/graphwave-embeddings.html>`__.
Args:
G (StellarGraph): the StellarGraph object.
scales (iterable of floats): the wavelet scales to use. Smaller values embed smaller scale structural
features, and larger values embed larger structural features.
degree: the degree of the Chebyshev polynomial to use. Higher degrees yield more accurate results but at a
higher computational cost. According to [1], the default value of 20 is accurate enough for most
applications.
[1] D. I. Shuman, P. Vandergheynst, and P. Frossard, “Chebyshev Polynomial Approximation for Distributed Signal
Processing,” https://arxiv.org/abs/1105.1891
"""
def __init__(self, G, scales=(5, 10), degree=20):
if not isinstance(G, StellarGraph):
raise TypeError("G must be a StellarGraph object.")
# Check that there is only a single node type
_ = G.unique_node_type(
"G: expected a graph with a single node type, found a graph with node types: %(found)s"
)
require_integer_in_range(degree, "degree", min_val=1)
# Create sparse adjacency matrix:
adj = G.to_adjacency_matrix().tocoo()
# Function to map node IDs to indices for quicker node index lookups
self._node_lookup = G.node_ids_to_ilocs
degree_mat = diags(np.asarray(adj.sum(1)).ravel())
laplacian = degree_mat - adj
laplacian = laplacian.tocoo()
self.scales = np.array(scales).astype(np.float32)
max_eig = eigs(laplacian, k=1, return_eigenvectors=False)
self.max_eig = np.real(max_eig).astype(np.float32)[0]
coeffs = [
np.polynomial.chebyshev.Chebyshev.interpolate(
lambda x: np.exp(-s * x), domain=[0, self.max_eig], deg=degree
).coef.astype(np.float32)
for s in scales
]
self.coeffs = tf.convert_to_tensor(np.stack(coeffs, axis=0))
self.laplacian = tf.sparse.SparseTensor(
indices=np.column_stack((laplacian.row, laplacian.col)),
values=laplacian.data.astype(np.float32),
dense_shape=laplacian.shape,
)
def num_batch_dims(self):
return 1
def flow(
self,
node_ids,
sample_points,
batch_size,
targets=None,
shuffle=False,
seed=None,
repeat=False,
num_parallel_calls=1,
):
"""
Creates a TensorFlow DataSet object of GraphWave embeddings.
The dimension of the embeddings are `2 * len(scales) * len(sample_points)`.
Args:
node_ids: an iterable of node ids for the nodes of interest
(e.g., training, validation, or test set nodes)
sample_points: a 1D array of points at which to sample the characteristic function. This should be of the
form: `sample_points=np.linspace(0, max_val, number_of_samples)` and is graph dependent.
batch_size (int): the number of node embeddings to include in a batch.
targets: a 1D or 2D array of numeric node targets with shape ``(len(node_ids),)``
or ``(len(node_ids), target_size)``
shuffle (bool): indicates whether to shuffle the dataset after each epoch
seed (int,optional): the random seed to use for shuffling the dataset
repeat (bool): indicates whether iterating through the DataSet will continue infinitely or stop after one
full pass.
num_parallel_calls (int): number of threads to use.
"""
require_integer_in_range(batch_size, "batch_size", min_val=1)
require_integer_in_range(num_parallel_calls, "num_parallel_calls", min_val=1)
if not isinstance(shuffle, bool):
raise TypeError(f"shuffle: expected bool, found {type(shuffle).__name__}")
if not isinstance(repeat, bool):
raise TypeError(f"repeat: expected bool, found {type(repeat).__name__}")
ts = tf.convert_to_tensor(sample_points.astype(np.float32))
def _map_func(x):
return _empirical_characteristic_function(
_chebyshev(x, self.laplacian, self.coeffs, self.max_eig), ts,
)
node_idxs = self._node_lookup(node_ids)
# calculates the columns of U exp(-scale * eigenvalues) U^T on the fly
# empirically calculate the characteristic function for each column of U exp(-scale * eigenvalues) U^T
dataset = tf.data.Dataset.from_tensor_slices(
tf.sparse.SparseTensor(
indices=np.stack([np.arange(len(node_ids)), node_idxs], axis=1),
dense_shape=(len(node_ids), self.laplacian.shape[0]),
values=np.ones(len(node_ids), dtype=np.float32),
)
).map(_map_func, num_parallel_calls=num_parallel_calls)
if targets is not None:
target_dataset = tf.data.Dataset.from_tensor_slices(targets)
dataset = tf.data.Dataset.zip((dataset, target_dataset))
# cache embeddings in memory for performance
dataset = dataset.cache()
if shuffle:
dataset = dataset.shuffle(buffer_size=len(node_ids), seed=seed)
if repeat:
return dataset.batch(batch_size).repeat()
else:
return dataset.batch(batch_size)
def _empirical_characteristic_function(samples, ts):
"""
This function estimates the characteristic function for the wavelet spread of a single node.
Args:
samples (Tensor): a tensor of samples drawn from a wavelet distribution at different scales.
ts (Tensor): a tensor containing the "time" points to sample the characteristic function at.
Returns:
embedding (Tensor): the node embedding for the GraphWave algorithm.
"""
# (scales, ns) -> (1, scales, ns)
samples = samples[tf.newaxis, :, :]
# (nt,) -> (nt, 1, 1)
ts = ts[:, tf.newaxis, tf.newaxis]
# (1, scales, ns) * (nt, 1, 1) -> (nt, scales, ns) via broadcasting rules
t_psi = samples * ts
# (nt, scales, ns) -> (nt, scales)
mean_cos_t_psi = tf.math.reduce_mean(tf.math.cos(t_psi), axis=2)
# (nt, scales, ns) -> (nt, scales)
mean_sin_t_psi = tf.math.reduce_mean(tf.math.sin(t_psi), axis=2)
# [(nt, scales), (nt, scales)] -> (2 * nt * scales,)
embedding = K.flatten(tf.concat([mean_cos_t_psi, mean_sin_t_psi], axis=0))
return embedding
def _chebyshev(one_hot_encoded_col, laplacian, coeffs, max_eig):
"""
This function calculates one column of the Chebyshev approximation of exp(-scale * laplacian) for
all scales using the approach from: https://arxiv.org/abs/1105.1891. See equations (7)-(11) for more info.
Args:
one_hot_encoded_col (SparseTensor): a sparse tensor indicating which column (node) to calculate.
laplacian (SparseTensor): the unnormalized graph laplacian
coeffs: the Chebyshev coefficients for exp(-scale * x) for each scale in the shape (num_scales, deg)
Returns:
(num_scales, num_nodes) tensor of the wavelets for each scale for the specified node.
"""
# Chebyshev polynomials are in range [-1, 1] by default so we shift the coordinates here
# the laplacian in the new coordinates is y = (L / a) - I. But y is only accessed through matrix vector
# multiplications so we model it as a linear operator
def y(vector):
return K.dot(laplacian, vector) / a - vector
a = max_eig / 2
# f is a one-hot vector to select a column from the Laplacian
# this allows to compute the filtered laplacian (psi in the paper) one column at time
# using only matrix vector products
f = tf.reshape(
tf.sparse.to_dense(one_hot_encoded_col), shape=(laplacian.shape[0], 1)
)
T_0 = f # If = f
T_1 = y(f)
cheby_polys = [T_0, T_1]
for i in range(coeffs.shape[1] - 2):
cheby_poly = 2 * y(cheby_polys[-1]) - cheby_polys[-2]
cheby_polys.append(cheby_poly)
# note: difference to the paper. the 0th coefficient is not halved here because its
# automatically halved by numpy
cheby_polys = K.squeeze(tf.stack(cheby_polys, axis=0), axis=-1)
return tf.matmul(coeffs, cheby_polys)
| 9,979 | 39.734694 | 179 | py |
stellargraph | stellargraph-master/stellargraph/mapper/knowledge_graph.py | # -*- coding: utf-8 -*-
#
# Copyright 2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import numpy as np
import pandas as pd
from tensorflow.keras.utils import Sequence
from ..globalvar import SOURCE, TARGET, TYPE_ATTR_NAME
from ..random import random_state, SeededPerBatch
from .base import Generator
from ..core.validation import comma_sep, require_integer_in_range
class KGTripleGenerator(Generator):
"""
A data generator for working with triple-based knowledge graph models, like ComplEx.
This requires a StellarGraph that contains all nodes/entities and every edge/relation type that
will be trained or predicted upon. The graph does not need to contain the edges/triples that are
used for training or prediction.
.. seealso::
Models using this generator: :class:`.ComplEx`, :class:`.DistMult`, :class:`.RotatE`, :class:`.RotE`, :class:`.RotH`.
Example using this generator (see individual models for more): `link prediction with ComplEx <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/complex-link-prediction.html>`__.
Args:
G (StellarGraph): the graph containing all nodes, and all edge types.
batch_size (int): the size of the batches to generate
"""
def __init__(self, G, batch_size):
self.G = G
if not isinstance(batch_size, int):
raise TypeError(
f"batch_size: expected int, found {type(batch_size).__name__}"
)
self.batch_size = batch_size
def num_batch_dims(self):
return 1
def flow(
self,
edges,
negative_samples=None,
sample_strategy="uniform",
shuffle=False,
seed=None,
):
"""
Create a Keras Sequence yielding the edges/triples in ``edges``, potentially with some negative
edges.
The negative edges are sampled using the "local closed world assumption", where a
source/subject or a target/object is randomly mutated.
Args:
edges: the edges/triples to feed into a knowledge graph model.
negative_samples (int, optional): the number of negative samples to generate for each positive edge.
sample_strategy (str, optional): the sampling strategy to use for negative sampling, if ``negative_samples`` is not None. Supported values:
``uniform``
Uniform sampling, where a negative edge is created from a positive edge in
``edges`` by replacing the source or destination entity with a uniformly sampled
random entity in the graph (without verifying if the edge exists in the graph: for
sparse graphs, this is unlikely). Each element in a batch is labelled as 1
(positive) or 0 (negative). An appropriate loss function is
:class:`tensorflow.keras.losses.BinaryCrossentropy` (probably with
``from_logits=True``).
``self-adversarial``
Self-adversarial sampling from [1], where each edge is sampled in the same manner
as ``uniform`` sampling. Each element in a batch is labelled as 1 (positive) or an
integer in ``[0, -batch_size)`` (negative). An appropriate loss function is
:class:`stellargraph.losses.SelfAdversarialNegativeSampling`.
[1] Z. Sun, Z.-H. Deng, J.-Y. Nie, and J. Tang, “RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space,” `arXiv:1902.10197 <http://arxiv.org/abs/1902.10197>`_, Feb. 2019.
Returns:
A Keras sequence that can be passed to the ``fit`` and ``predict`` method of knowledge-graph models.
"""
if isinstance(edges, pd.DataFrame):
sources = edges[SOURCE]
rels = edges[TYPE_ATTR_NAME]
targets = edges[TARGET]
else:
raise TypeError(
f"edges: expected pandas.DataFrame; found {type(edges).__name__}"
)
if negative_samples is not None:
require_integer_in_range(negative_samples, "negative_samples", min_val=0)
supported_strategies = ["uniform", "self-adversarial"]
if sample_strategy not in supported_strategies:
raise ValueError(
f"sample_strategy: expected one of {comma_sep(supported_strategies)}, found {sample_strategy!r}"
)
source_ilocs = self.G.node_ids_to_ilocs(sources)
rel_ilocs = self.G.edge_type_names_to_ilocs(rels)
target_ilocs = self.G.node_ids_to_ilocs(targets)
return KGTripleSequence(
max_node_iloc=self.G.number_of_nodes(),
source_ilocs=source_ilocs,
rel_ilocs=rel_ilocs,
target_ilocs=target_ilocs,
batch_size=self.batch_size,
shuffle=shuffle,
negative_samples=negative_samples,
sample_strategy=sample_strategy,
seed=seed,
)
class KGTripleSequence(Sequence):
def __init__(
self,
*,
max_node_iloc,
source_ilocs,
rel_ilocs,
target_ilocs,
batch_size,
shuffle,
negative_samples,
sample_strategy,
seed,
):
self.max_node_iloc = max_node_iloc
num_edges = len(source_ilocs)
self.indices = np.arange(num_edges, dtype=np.min_scalar_type(num_edges))
self.source_ilocs = np.asarray(source_ilocs)
self.rel_ilocs = np.asarray(rel_ilocs)
self.target_ilocs = np.asarray(target_ilocs)
self.negative_samples = negative_samples
self.sample_strategy = sample_strategy
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
_, self._global_rs = random_state(seed)
self._batch_sampler = SeededPerBatch(
np.random.RandomState, self._global_rs.randint(2 ** 32, dtype=np.uint32)
)
def __len__(self):
return int(np.ceil(len(self.indices) / self.batch_size))
def __getitem__(self, batch_num):
start = self.batch_size * batch_num
end = start + self.batch_size
indices = self.indices[start:end]
s_iloc = self.source_ilocs[indices]
r_iloc = self.rel_ilocs[indices]
o_iloc = self.target_ilocs[indices]
positive_count = len(s_iloc)
targets = None
if self.negative_samples is not None:
s_iloc = np.tile(s_iloc, 1 + self.negative_samples)
r_iloc = np.tile(r_iloc, 1 + self.negative_samples)
o_iloc = np.tile(o_iloc, 1 + self.negative_samples)
negative_count = self.negative_samples * positive_count
assert len(s_iloc) == positive_count + negative_count
rng = self._batch_sampler[batch_num]
# FIXME (#882): this sampling may be able to be optimised to a slice-write
change_source = rng.random(size=negative_count) < 0.5
source_changes = change_source.sum()
new_nodes = rng.randint(self.max_node_iloc, size=negative_count)
s_iloc[positive_count:][change_source] = new_nodes[:source_changes]
o_iloc[positive_count:][~change_source] = new_nodes[source_changes:]
if self.sample_strategy == "uniform":
targets = np.repeat(
np.array([1, 0], dtype=np.float32), [positive_count, negative_count]
)
elif self.sample_strategy == "self-adversarial":
# the negative samples are labelled with an arbitrary within-batch integer <= 0, based on
# which positive edge they came from.
targets = np.tile(
np.arange(0, -positive_count, -1), 1 + self.negative_samples
)
# the positive examples are labelled with 1
targets[:positive_count] = 1
else:
raise ValueError(f"unknown sample_strategy: {sample_strategy!r}")
assert len(targets) == len(s_iloc)
assert len(s_iloc) == len(r_iloc) == len(o_iloc)
if targets is None:
return ((s_iloc, r_iloc, o_iloc),)
return (s_iloc, r_iloc, o_iloc), targets
def on_epoch_end(self):
if self.shuffle:
self._global_rs.shuffle(self.indices)
| 8,924 | 36.5 | 208 | py |
stellargraph | stellargraph-master/stellargraph/mapper/adjacency_generators.py | # -*- coding: utf-8 -*-
#
# Copyright 2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.keras import backend as K
import numpy as np
from ..core import StellarGraph
from ..core.validation import require_integer_in_range
from ..core.utils import normalize_adj
from .base import Generator
class AdjacencyPowerGenerator(Generator):
"""
A data generator for use with the Watch Your Step algorithm [1]. It calculates and returns the first ``num_powers``
of the adjacency matrix row by row.
.. seealso::
Model using this generator: :class:`.WatchYourStep`.
Example using this generator: `unsupervised representation learning <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/watch-your-step-embeddings.html>`__
Args:
G (StellarGraph): a machine-learning StellarGraph-type graph
num_powers (int): the number of adjacency powers to calculate. Defaults
to 10 as this value was found to perform well by the authors of the paper.
weighted (bool, optional): if True, use the edge weights from ``G``; if False, treat the
graph as unweighted.
"""
def __init__(self, G, num_powers=10, weighted=False):
if not isinstance(G, StellarGraph):
raise TypeError("G must be a StellarGraph object.")
require_integer_in_range(num_powers, "num_powers", min_val=1)
Aadj = G.to_adjacency_matrix(weighted=weighted)
def tfify(matrix):
matrix = matrix.tocoo(copy=False)
return tf.sparse.SparseTensor(
# construct the transpose
indices=np.column_stack([matrix.col, matrix.row]),
values=matrix.data.astype(np.float32),
dense_shape=matrix.shape,
)
self.Aadj_T = tfify(Aadj)
self.transition_matrix_T = tfify(normalize_adj(Aadj, symmetric=False))
self.num_powers = num_powers
def num_batch_dims(self):
return 1
def flow(self, batch_size, num_parallel_calls=1):
"""
Creates the `tensorflow.data.Dataset` object for training node embeddings from powers of the adjacency matrix.
Args:
batch_size (int): the number of rows of the adjacency powers to include in each batch.
num_parallel_calls (int): the number of threads to use for preprocessing of batches.
Returns:
A `tensorflow.data.Dataset` object for training node embeddings from powers of the adjacency matrix.
"""
require_integer_in_range(batch_size, "batch_size", min_val=1)
require_integer_in_range(num_parallel_calls, "num_parallel_calls", min_val=1)
row_dataset = tf.data.Dataset.from_tensor_slices(
tf.sparse.eye(int(self.Aadj_T.shape[0]))
)
adj_powers_dataset = row_dataset.map(
lambda ohe_rows: _partial_powers(
ohe_rows, self.transition_matrix_T, num_powers=self.num_powers
),
num_parallel_calls=num_parallel_calls,
)
row_index_dataset = tf.data.Dataset.range(self.Aadj_T.shape[0])
row_index_adj_powers_dataset = tf.data.Dataset.zip(
(row_index_dataset, adj_powers_dataset)
)
batch_adj_dataset = row_dataset.map(
lambda ohe_rows: _select_row_from_sparse_tensor(ohe_rows, self.Aadj_T),
num_parallel_calls=num_parallel_calls,
)
training_dataset = tf.data.Dataset.zip(
(row_index_adj_powers_dataset, batch_adj_dataset)
).batch(batch_size)
return training_dataset.repeat()
def _partial_powers(one_hot_encoded_row, Aadj_T, num_powers):
"""
This function computes the first num_powers powers of the adjacency matrix
for the row specified in one_hot_encoded_row
Args:
one_hot_encoded_row: one-hot-encoded row
Aadj_T: the transpose of the adjacency matrix
num_powers (int): the adjacency number of powers to compute
returns:
A matrix of the shape (num_powers, Aadj_T.shape[1]) of
the specified row of the first num_powers of the adjacency matrix.
"""
# make sure the transpose of the adjacency is used
# tensorflow requires that the sparse matrix is the first operand
partial_power = tf.reshape(
tf.sparse.to_dense(one_hot_encoded_row), shape=(1, Aadj_T.shape[1])
)
partial_powers_list = []
for i in range(num_powers):
partial_power = K.transpose(K.dot(Aadj_T, K.transpose(partial_power)))
partial_powers_list.append(partial_power)
return K.squeeze(tf.stack(partial_powers_list, axis=1), axis=0)
def _select_row_from_sparse_tensor(one_hot_encoded_row, sp_tensor_T):
"""
This function gathers the row specified in one_hot_encoded_row from the input sparse matrix
Args:
one_hot_encoded_row: one-hot-encoded row
sp_tensor_T: the transpose of the sparse matrix
returns:
The specified row from sp_tensor_T.
"""
one_hot_encoded_row = tf.reshape(
tf.sparse.to_dense(one_hot_encoded_row), shape=(1, sp_tensor_T.shape[1])
)
row_T = K.dot(sp_tensor_T, K.transpose(one_hot_encoded_row))
row = K.transpose(row_T)
return row
| 5,798 | 34.359756 | 174 | py |
stellargraph | stellargraph-master/stellargraph/mapper/sampled_node_generators.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mappers to provide input data for the graph models in layers.
"""
__all__ = [
"GraphSAGENodeGenerator",
"HinSAGENodeGenerator",
"Attri2VecNodeGenerator",
"Node2VecNodeGenerator",
"DirectedGraphSAGENodeGenerator",
]
import warnings
import operator
import random
import abc
import warnings
import numpy as np
import itertools as it
import networkx as nx
import scipy.sparse as sps
from tensorflow.keras import backend as K
from functools import reduce
from tensorflow.keras.utils import Sequence
from collections import defaultdict
from ..data import (
SampledBreadthFirstWalk,
SampledHeterogeneousBreadthFirstWalk,
DirectedBreadthFirstNeighbours,
)
from ..core.graph import StellarGraph, GraphSchema
from ..core.utils import is_real_iterable
from ..core.validation import comma_sep
from . import NodeSequence, Generator
from ..random import SeededPerBatch
class BatchedNodeGenerator(Generator):
"""
Abstract base class for graph data generators.
The supplied graph should be a StellarGraph object that is ready for
machine learning.
Do not use this base class: use a subclass specific to the method.
Args:
G (StellarGraph): The machine-learning ready graph.
batch_size (int): Size of batch to return.
schema (GraphSchema): [Optional] Schema for the graph, for heterogeneous graphs.
"""
def __init__(self, G, batch_size, schema=None, use_node_features=True):
if not isinstance(G, StellarGraph):
raise TypeError("Graph must be a StellarGraph or StellarDiGraph object.")
self.graph = G
self.batch_size = batch_size
# This is a node generator and requries a model with one root nodes per query
self.multiplicity = 1
# We need a schema for compatibility with HinSAGE
if schema is None:
self.schema = G.create_graph_schema()
elif isinstance(schema, GraphSchema):
self.schema = schema
else:
raise TypeError("Schema must be a GraphSchema object")
# We will need real node types here
self.head_node_types = None
# Create sampler for GraphSAGE
self.sampler = None
# Check if the graph has features
if use_node_features:
G.check_graph_for_ml()
@abc.abstractmethod
def sample_features(self, head_nodes, batch_num):
pass
def num_batch_dims(self):
return 1
def flow(self, node_ids, targets=None, shuffle=False, seed=None):
"""
Creates a generator/sequence object for training or evaluation
with the supplied node ids and numeric targets.
The node IDs are the nodes to train or inference on: the embeddings
calculated for these nodes are passed to the downstream task. These
are a subset of the nodes in the graph.
The targets are an array of numeric targets corresponding to the
supplied node_ids to be used by the downstream task. They should
be given in the same order as the list of node IDs.
If they are not specified (for example, for use in prediction),
the targets will not be available to the downstream task.
Note that the shuffle argument should be True for training and
False for prediction.
Args:
node_ids: an iterable of node IDs
targets: a 2D array of numeric targets with shape
``(len(node_ids), target_size)``
shuffle (bool): If True the node_ids will be shuffled at each
epoch, if False the node_ids will be processed in order.
Returns:
A NodeSequence object to use with with StellarGraph models
in Keras methods ``fit``, ``evaluate``,
and ``predict``
"""
if self.head_node_types is not None:
expected_node_type = self.head_node_types[0]
else:
expected_node_type = None
node_ilocs = self.graph.node_ids_to_ilocs(node_ids)
node_types = self.graph.node_type(node_ilocs, use_ilocs=True)
invalid = node_ilocs[node_types != expected_node_type]
if len(invalid) > 0:
raise ValueError(
f"node_ids: expected all nodes to be of type {expected_node_type}, "
f"found some nodes with wrong type: {comma_sep(invalid, stringify=format)}"
)
return NodeSequence(
self.sample_features,
self.batch_size,
node_ilocs,
targets,
shuffle=shuffle,
seed=seed,
)
def flow_from_dataframe(self, node_targets, shuffle=False):
"""
Creates a generator/sequence object for training or evaluation
with the supplied node ids and numeric targets.
Args:
node_targets: a Pandas DataFrame of numeric targets indexed
by the node ID for that target.
shuffle (bool): If True the node_ids will be shuffled at each
epoch, if False the node_ids will be processed in order.
Returns:
A NodeSequence object to use with with StellarGraph models
in Keras methods ``fit``, ``evaluate``,
and ``predict``
"""
return self.flow(node_targets.index, node_targets.values, shuffle=shuffle)
class GraphSAGENodeGenerator(BatchedNodeGenerator):
"""
A data generator for node prediction with Homogeneous GraphSAGE models
At minimum, supply the StellarGraph, the batch size, and the number of
node samples for each layer of the GraphSAGE model.
The supplied graph should be a StellarGraph object with node features.
Use the :meth:`flow` method supplying the nodes and (optionally) targets
to get an object that can be used as a Keras data generator.
Example::
G_generator = GraphSAGENodeGenerator(G, 50, [10,10])
train_data_gen = G_generator.flow(train_node_ids, train_node_labels)
test_data_gen = G_generator.flow(test_node_ids)
.. seealso::
Model using this generator: :class:`.GraphSAGE`.
Some examples using this generator (see the model for more):
- `node classification <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/graphsage-node-classification.html>`__
- `unsupervised representation learning via Deep Graph Infomax <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/deep-graph-infomax-embeddings.html>`__
Related functionality:
- :class:`.Neo4jGraphSAGENodeGenerator` for using :class:`.GraphSAGE` with Neo4j
- :class:`.CorruptedGenerator` for unsupervised training using :class:`.DeepGraphInfomax`
- :class:`.GraphSAGELinkGenerator` for link prediction, unsupervised training using random walks and related tasks
- :class:`.DirectedGraphSAGENodeGenerator` for directed graphs
- :class:`.HinSAGENodeGenerator` for heterogeneous graphs
Args:
G (StellarGraph): The machine-learning ready graph.
batch_size (int): Size of batch to return.
num_samples (list): The number of samples per layer (hop) to take.
seed (int): [Optional] Random seed for the node sampler.
weighted (bool, optional): If True, sample neighbours using the edge weights in the graph.
"""
def __init__(
self, G, batch_size, num_samples, seed=None, name=None, weighted=False
):
super().__init__(G, batch_size)
self.num_samples = num_samples
self.head_node_types = self.schema.node_types
self.name = name
self.weighted = weighted
# Check that there is only a single node type for GraphSAGE
if len(self.head_node_types) > 1:
warnings.warn(
"running homogeneous GraphSAGE on a graph with multiple node types",
RuntimeWarning,
stacklevel=2,
)
# Create sampler for GraphSAGE
self._samplers = SeededPerBatch(
lambda s: SampledBreadthFirstWalk(G, graph_schema=self.schema, seed=s),
seed=seed,
)
def sample_features(self, head_nodes, batch_num):
"""
Sample neighbours recursively from the head nodes, collect the features of the
sampled nodes, and return these as a list of feature arrays for the GraphSAGE
algorithm.
Args:
head_nodes: An iterable of head nodes to perform sampling on.
batch_num (int): Batch number
Returns:
A list of the same length as ``num_samples`` of collected features from
the sampled nodes of shape:
``(len(head_nodes), num_sampled_at_layer, feature_size)``
where ``num_sampled_at_layer`` is the cumulative product of ``num_samples``
for that layer.
"""
node_samples = self._samplers[batch_num].run(
nodes=head_nodes, n=1, n_size=self.num_samples, weighted=self.weighted
)
# The number of samples for each head node (not including itself)
num_full_samples = np.sum(np.cumprod(self.num_samples))
# Reshape node samples to sensible format
def get_levels(loc, lsize, samples_per_hop, walks):
end_loc = loc + lsize
walks_at_level = list(it.chain(*[w[loc:end_loc] for w in walks]))
if len(samples_per_hop) < 1:
return [walks_at_level]
return [walks_at_level] + get_levels(
end_loc, lsize * samples_per_hop[0], samples_per_hop[1:], walks
)
nodes_per_hop = get_levels(0, 1, self.num_samples, node_samples)
node_type = self.head_node_types[0]
# Get features for sampled nodes
batch_feats = [
self.graph.node_features(layer_nodes, node_type, use_ilocs=True)
for layer_nodes in nodes_per_hop
]
# Resize features to (batch_size, n_neighbours, feature_size)
batch_feats = [
np.reshape(a, (len(head_nodes), -1 if np.size(a) > 0 else 0, a.shape[1]))
for a in batch_feats
]
return batch_feats
def default_corrupt_input_index_groups(self):
# everything can be shuffled together
return [list(range(len(self.num_samples) + 1))]
class DirectedGraphSAGENodeGenerator(BatchedNodeGenerator):
"""
A data generator for node prediction with homogeneous GraphSAGE models
on directed graphs.
At minimum, supply the StellarDiGraph, the batch size, and the number of
node samples (separately for in-nodes and out-nodes)
for each layer of the GraphSAGE model.
The supplied graph should be a StellarDiGraph object with node features.
Use the :meth:`flow` method supplying the nodes and (optionally) targets
to get an object that can be used as a Keras data generator.
Example::
G_generator = DirectedGraphSAGENodeGenerator(G, 50, [10,5], [5,1])
train_data_gen = G_generator.flow(train_node_ids, train_node_labels)
test_data_gen = G_generator.flow(test_node_ids)
.. seealso::
Model using this generator: :class:`.DirectedGraphSAGE`.
Example using this generator: `node classification <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/directed-graphsage-node-classification.html>`__.
Related functionality:
- :class:`.Neo4jDirectedGraphSAGENodeGenerator` for using :class:`.DirectedGraphSAGE` with Neo4j
- :class:`.CorruptedGenerator` for unsupervised training using :class:`.DeepGraphInfomax`
- :class:`.DirectedGraphSAGELinkGenerator` for link prediction and related tasks
- :class:`.GraphSAGENodeGenerator` for undirected graphs
- :class:`.HinSAGENodeGenerator` for heterogeneous graphs
Args:
G (StellarDiGraph): The machine-learning ready graph.
batch_size (int): Size of batch to return.
in_samples (list): The number of in-node samples per layer (hop) to take.
out_samples (list): The number of out-node samples per layer (hop) to take.
seed (int): [Optional] Random seed for the node sampler.
weighted (bool, optional): If True, sample neighbours using the edge weights in the graph.
"""
def __init__(
self,
G,
batch_size,
in_samples,
out_samples,
seed=None,
name=None,
weighted=False,
):
super().__init__(G, batch_size)
# TODO Add checks for in- and out-nodes sizes
self.in_samples = in_samples
self.out_samples = out_samples
self.head_node_types = self.schema.node_types
self.name = name
self.weighted = weighted
# Check that there is only a single node type for GraphSAGE
if len(self.head_node_types) > 1:
warnings.warn(
"running homogeneous GraphSAGE on a graph with multiple node types",
RuntimeWarning,
stacklevel=2,
)
# Create sampler for GraphSAGE
self.sampler = DirectedBreadthFirstNeighbours(
G, graph_schema=self.schema, seed=seed
)
def _max_slots(self):
max_hops = len(self.in_samples)
return 2 ** (max_hops + 1) - 1
def sample_features(self, head_nodes, batch_num):
"""
Sample neighbours recursively from the head nodes, collect the features of the
sampled nodes, and return these as a list of feature arrays for the GraphSAGE
algorithm.
Args:
head_nodes: An iterable of head nodes to perform sampling on.
batch_num (int): Batch number
Returns:
A list of feature tensors from the sampled nodes at each layer, each of shape:
``(len(head_nodes), num_sampled_at_layer, feature_size)``
where ``num_sampled_at_layer`` is the total number (cumulative product)
of nodes sampled at the given number of hops from each head node,
given the sequence of in/out directions.
"""
node_samples = self.sampler.run(
nodes=head_nodes,
n=1,
in_size=self.in_samples,
out_size=self.out_samples,
weighted=self.weighted,
)
# Reshape node samples to sensible format
# Each 'slot' represents the list of nodes sampled from some neighbourhood, and will have a corresponding
# NN input layer. Every hop potentially generates both in-nodes and out-nodes, held separately,
# and thus the slot (or directed hop sequence) structure forms a binary tree.
node_type = self.head_node_types[0]
max_slots = self._max_slots()
features = [None] * max_slots # flattened binary tree
for slot in range(max_slots):
nodes_in_slot = list(it.chain(*[sample[slot] for sample in node_samples]))
features_for_slot = self.graph.node_features(
nodes_in_slot, node_type, use_ilocs=True
)
resize = -1 if np.size(features_for_slot) > 0 else 0
features[slot] = np.reshape(
features_for_slot, (len(head_nodes), resize, features_for_slot.shape[1])
)
return features
def default_corrupt_input_index_groups(self):
# everything can be shuffled together
return [list(range(self._max_slots()))]
class HinSAGENodeGenerator(BatchedNodeGenerator):
"""Keras-compatible data mapper for Heterogeneous GraphSAGE (HinSAGE)
At minimum, supply the StellarGraph, the batch size, and the number of
node samples for each layer of the HinSAGE model.
The supplied graph should be a StellarGraph object with node features for all node types.
Use the :meth:`flow` method supplying the nodes and (optionally) targets
to get an object that can be used as a Keras data generator.
Note that the shuffle argument should be True for training and
False for prediction.
.. seealso::
Model using this generator: :class:`.HinSAGE`.
Example using this generator: `unsupervised representation learning via Deep Graph Infomax <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/deep-graph-infomax-embeddings.html>`_.
Related functionality:
- :class:`.CorruptedGenerator` for unsupervised training using :class:`.DeepGraphInfomax`
- :class:`.HinSAGELinkGenerator` for link prediction and related tasks
- :class:`.GraphSAGENodeGenerator` for homogeneous graphs
- :class:`.DirectedGraphSAGENodeGenerator` for directed homogeneous graphs
Args:
G (StellarGraph): The machine-learning ready graph
batch_size (int): Size of batch to return
num_samples (list): The number of samples per layer (hop) to take
head_node_type (str, optional): The node type that will be given to the generator using the
`flow` method, the model will expect this node type. This does not need to be specified
if ``G`` has only one node type.
schema (GraphSchema, optional): Graph schema for G.
seed (int, optional): Random seed for the node sampler
Example::
G_generator = HinSAGENodeGenerator(G, 50, [10,10])
train_data_gen = G_generator.flow(train_node_ids, train_node_labels)
test_data_gen = G_generator.flow(test_node_ids)
"""
def __init__(
self,
G,
batch_size,
num_samples,
head_node_type=None,
schema=None,
seed=None,
name=None,
):
super().__init__(G, batch_size, schema=schema)
self.num_samples = num_samples
self.name = name
# The head node type
if head_node_type is None:
# infer the head node type, if this is a homogeneous-node graph
head_node_type = G.unique_node_type(
"head_node_type: expected a head node type because G has more than one node type, found node types: %(found)s"
)
if head_node_type not in self.schema.node_types:
raise KeyError("Supplied head node type must exist in the graph")
self.head_node_types = [head_node_type]
# Create sampling schema
self._sampling_schema = self.schema.sampling_layout(
self.head_node_types, self.num_samples
)
self._type_adjacency_list = self.schema.type_adjacency_list(
self.head_node_types, len(self.num_samples)
)
# Create sampler for HinSAGE
self.sampler = SampledHeterogeneousBreadthFirstWalk(
G, graph_schema=self.schema, seed=seed
)
def sample_features(self, head_nodes, batch_num):
"""
Sample neighbours recursively from the head nodes, collect the features of the
sampled nodes, and return these as a list of feature arrays for the GraphSAGE
algorithm.
Args:
head_nodes: An iterable of head nodes to perform sampling on.
batch_num (int): Batch number
Returns:
A list of the same length as ``num_samples`` of collected features from
the sampled nodes of shape:
``(len(head_nodes), num_sampled_at_layer, feature_size)``
where ``num_sampled_at_layer`` is the cumulative product of ``num_samples``
for that layer.
"""
# Get sampled nodes
node_samples = self.sampler.run(nodes=head_nodes, n=1, n_size=self.num_samples)
# Reshape node samples to the required format for the HinSAGE model
# This requires grouping the sampled nodes by edge type and in order
nodes_by_type = [
(
nt,
reduce(
operator.concat,
(samples[ks] for samples in node_samples for ks in indices),
[],
),
)
for nt, indices in self._sampling_schema[0]
]
# Get features
batch_feats = [
self.graph.node_features(layer_nodes, nt, use_ilocs=True)
for nt, layer_nodes in nodes_by_type
]
# Resize features to (batch_size, n_neighbours, feature_size)
batch_feats = [
np.reshape(a, (len(head_nodes), -1 if np.size(a) > 0 else 0, a.shape[1]))
for a in batch_feats
]
return batch_feats
def default_corrupt_input_index_groups(self):
# every sample of a given node type can be grouped together
indices_per_nt = defaultdict(list)
for tensor_idx, (nt, _) in enumerate(self._sampling_schema[0]):
indices_per_nt[nt].append(tensor_idx)
# ensure there's a consistent order both within each group, and across groups, ensure the
# shuffling is deterministic (at least with respect to the model)
return sorted(sorted(idx) for idx in indices_per_nt.values())
class Attri2VecNodeGenerator(BatchedNodeGenerator):
"""
A node feature generator for node representation prediction with the
attri2vec model.
At minimum, supply the StellarGraph and the batch size.
The supplied graph should be a StellarGraph object with node features.
Use the :meth:`flow` method supplying the nodes to get an object
that can be used as a Keras data generator.
Example::
G_generator = Attri2VecNodeGenerator(G, 50)
data_gen = G_generator.flow(node_ids)
.. seealso::
Model using this generator: :class:`.Attri2Vec`.
An example using this generator (see the model for more): `node classification <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/attri2vec-node-classification.html>`__.
Related functionality: :class:`.Attri2VecLinkGenerator` for training, link prediction and related tasks.
Args:
G (StellarGraph): The machine-learning ready graph.
batch_size (int): Size of batch to return.
name (str or None): Name of the generator (optional).
"""
def __init__(self, G, batch_size, name=None):
super().__init__(G, batch_size)
self.name = name
def sample_features(self, head_nodes, batch_num):
"""
Sample content features of the head nodes, and return these as a list of feature
arrays for the attri2vec algorithm.
Args:
head_nodes: An iterable of head nodes to perform sampling on.
batch_num (int): Batch number
Returns:
A list of feature arrays, with each element being the feature of a
head node.
"""
batch_feats = self.graph.node_features(head_nodes, use_ilocs=True)
return batch_feats
def flow(self, node_ids):
"""
Creates a generator/sequence object for node representation prediction
with the supplied node ids.
The node IDs are the nodes to inference on: the embeddings
calculated for these nodes are passed to the downstream task. These
are a subset/all of the nodes in the graph.
Args:
node_ids: an iterable of node IDs.
Returns:
A NodeSequence object to use with the Attri2Vec model
in the Keras method ``predict``.
"""
node_ilocs = self.graph.node_ids_to_ilocs(node_ids)
return NodeSequence(
self.sample_features, self.batch_size, node_ilocs, shuffle=False
)
def flow_from_dataframe(self, node_ids):
"""
Creates a generator/sequence object for node representation prediction
by using the index of the supplied dataframe as the node ids.
Args:
node_ids: a Pandas DataFrame of node_ids.
Returns:
A NodeSequence object to use with the Attri2Vec model
in the Keras method ``predict``.
"""
node_ilocs = self.graph.node_ids_to_ilocs(node_ids.index)
return NodeSequence(
self.sample_features, self.batch_size, node_ilocs, shuffle=False
)
class Node2VecNodeGenerator(BatchedNodeGenerator):
"""
A data generator for node representation prediction with Node2Vec models.
At minimum, supply the StellarGraph and the batch size.
The supplied graph should be a StellarGraph object that is ready for
machine learning. Currently the model does not require node features for
nodes in the graph.
Use the :meth:`flow` method supplying the nodes to get an object
that can be used as a Keras data generator.
Example::
G_generator = Node2VecNodeGenerator(G, 50)
data_gen = G_generator.flow(node_ids)
.. seealso::
Model using this generator: :class:`.Node2Vec`.
An example using this generator (see the model for more): `unsupervised representation learning <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/keras-node2vec-embeddings.html>`_.
Related functionality: :class:`.Node2VecLinkGenerator` for training, link prediction, and related tasks.
Args:
G (StellarGraph): The machine-learning ready graph.
batch_size (int): Size of batch to return.
name (str or None): Name of the generator (optional).
"""
def __init__(self, G, batch_size, name=None):
super().__init__(G, batch_size, use_node_features=False)
self.name = name
def sample_features(self, head_nodes, batch_num):
"""
Get the ids of the head nodes, and return these as a list of feature
arrays for the Node2Vec algorithm.
Args:
head_nodes: An iterable of head nodes to perform sampling on.
Returns:
A list of feature arrays, with each element being the id of each
head node.
"""
return np.array(head_nodes)
def flow(self, node_ids):
"""
Creates a generator/sequence object for node representation prediction
with the supplied node ids. This should be used with a trained ``Node2Vec``
model in order to transform node ids to node embeddings. For training,
see ``Node2VecLinkGenerator`` instead.
The node IDs are the nodes to inference on: the embeddings
calculated for these nodes are passed to the downstream task. These
are a subset/all of the nodes in the graph.
Args:
node_ids: an iterable of node IDs.
Returns:
A NodeSequence object to use with the Node2Vec model
in the Keras method ``predict``.
"""
node_ilocs = self.graph.node_ids_to_ilocs(node_ids)
return NodeSequence(
self.sample_features, self.batch_size, node_ilocs, shuffle=False
)
def flow_from_dataframe(self, node_ids):
"""
Creates a generator/sequence object for node representation prediction
by using the index of the supplied dataframe as the node ids.
Args:
node_ids: a Pandas DataFrame of node_ids.
Returns:
A NodeSequence object to use with the Node2Vec model
in the Keras method ``predict``.
"""
node_ilocs = self.graph.node_ids_to_ilocs(node_ids.index)
return NodeSequence(
self.sample_features, self.batch_size, node_ilocs, shuffle=False
)
| 28,026 | 35.926219 | 201 | py |
stellargraph | stellargraph-master/stellargraph/mapper/padded_graph_generator.py | # -*- coding: utf-8 -*-
#
# Copyright 2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..core.graph import StellarGraph
from ..core.utils import is_real_iterable, normalize_adj
from ..random import random_state
import numpy as np
from tensorflow.keras.utils import Sequence
from .base import Generator
class PaddedGraphGenerator(Generator):
"""
A data generator for use with graph classification algorithms.
The supplied graphs should be :class:`.StellarGraph` objects with node features.
Use the :meth:`flow` method supplying the graph indexes and (optionally) targets
to get an object that can be used as a Keras data generator.
This generator supplies the features arrays and the adjacency matrices to a mini-batch Keras
graph classification model. Differences in the number of nodes are resolved by padding each
batch of features and adjacency matrices, and supplying a boolean mask indicating which are
valid and which are padding.
.. seealso::
Models using this generator: :class:`.GCNSupervisedGraphClassification`, :class:`.DeepGraphCNN`.
Examples using this generator:
- `graph classification with GCN <https://stellargraph.readthedocs.io/en/stable/demos/graph-classification/gcn-supervised-graph-classification.html>`__
- `graph classification with Deep Graph CNN <https://stellargraph.readthedocs.io/en/stable/demos/graph-classification/dgcnn-graph-classification.html>`__
- `unsupervised graph representation learning <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/gcn-unsupervised-graph-embeddings.html>`__
Args:
graphs (list): a collection of StellarGraph objects
name (str): an optional name of the generator
"""
def __init__(self, graphs, name=None):
self.node_features_size = None
self._check_graphs(graphs)
self.graphs = graphs
self.name = name
def _check_graphs(self, graphs):
for graph in graphs:
if not isinstance(graph, StellarGraph):
raise TypeError(
f"graphs: expected every element to be a StellarGraph object, found {type(graph).__name__}."
)
if graph.number_of_nodes() == 0:
# an empty graph has no information at all and breaks things like mean pooling, so
# let's disallow them
raise ValueError(
"graphs: expected every graph to be non-empty, found graph with no nodes"
)
# Check that there is only a single node type for GAT or GCN
node_type = graph.unique_node_type(
"graphs: expected only graphs with a single node type, found a graph with node types: %(found)s"
)
graph.check_graph_for_ml()
# we require that all graphs have node features of the same dimensionality
f_dim = graph.node_feature_sizes()[node_type]
if self.node_features_size is None:
self.node_features_size = f_dim
elif self.node_features_size != f_dim:
raise ValueError(
"graphs: expected node features for all graph to have same dimensions,"
f"found {self.node_features_size} vs {f_dim}"
)
def num_batch_dims(self):
return 1
def flow(
self,
graphs,
targets=None,
symmetric_normalization=True,
weighted=False,
batch_size=1,
name=None,
shuffle=False,
seed=None,
):
"""
Creates a generator/sequence object for training, evaluation, or prediction
with the supplied graph indexes and targets.
Args:
graphs (iterable): an iterable of graph indexes in self.graphs or an iterable of :class:`.StellarGraph` objects
for the graphs of interest (e.g., training, validation, or test set nodes).
targets (2d array, optional): a 2D array of numeric graph targets with shape ``(len(graphs),
len(targets))``.
symmetric_normalization (bool, optional): The type of normalization to be applied on the graph adjacency
matrices. If True, the adjacency matrix is left and right multiplied by the inverse square root of the
degree matrix; otherwise, the adjacency matrix is only left multiplied by the inverse of the degree
matrix.
weighted (bool, optional): if True, use the edge weights from ``G``; if False, treat the
graph as unweighted.
batch_size (int, optional): The batch size.
name (str, optional): An optional name for the returned generator object.
shuffle (bool, optional): If True the node IDs will be shuffled at the end of each epoch.
seed (int, optional): Random seed to use in the sequence object.
Returns:
A :class:`.PaddedGraphSequence` object to use with Keras methods :meth:`fit`, :meth:`evaluate`, and :meth:`predict`
"""
if targets is not None:
# Check targets is an iterable
if not is_real_iterable(targets):
raise TypeError(
f"targets: expected an iterable or None object, found {type(targets).__name__}"
)
# Check targets correct shape
if len(targets) != len(graphs):
raise ValueError(
f"expected targets to be the same length as node_ids, found {len(targets)} vs {len(graphs)}"
)
if not isinstance(batch_size, int):
raise TypeError(
f"expected batch_size to be integer type, found {type(batch_size).__name__}"
)
if batch_size <= 0:
raise ValueError(
f"expected batch_size to be strictly positive integer, found {batch_size}"
)
graphs_array = np.asarray(graphs)
if len(graphs_array.shape) == 1:
graphs_array = graphs_array[:, None]
elif len(graphs_array.shape) != 2:
raise ValueError(
f"graphs: expected a shape of length 1 or 2, found shape {graphs_array.shape}"
)
flat_graphs = graphs_array.ravel()
if isinstance(flat_graphs[0], StellarGraph):
self._check_graphs(flat_graphs)
graphs = flat_graphs
selected_ilocs = np.arange(len(graphs)).reshape(graphs_array.shape)
else:
selected_ilocs = graphs_array
graphs = self.graphs
return PaddedGraphSequence(
graphs=graphs,
selected_ilocs=selected_ilocs,
targets=targets,
symmetric_normalization=symmetric_normalization,
weighted=weighted,
batch_size=batch_size,
name=name,
shuffle=shuffle,
seed=seed,
)
class PaddedGraphSequence(Sequence):
"""
A Keras-compatible data generator for training and evaluating graph classification models.
Use this class with the Keras methods :meth:`keras.Model.fit`,
:meth:`keras.Model.evaluate`, and
:meth:`keras.Model.predict`,
This class should be created using the `.flow(...)` method of
:class:`.PaddedGraphGenerator`.
Args:
graphs (list)): The graphs as StellarGraph objects.
selected_ilocs (array): an array of indices into ``graphs``, of shape N × K for some N and K.
targets (np.ndarray, optional): An optional array of graph targets of size (N x C),
where N is the number of selected graph ilocs and C is the target size (e.g., number of classes.)
normalize (bool, optional): Specifies whether the adjacency matrix for each graph should
be normalized or not. The default is True.
symmetric_normalization (bool, optional): Use symmetric normalization if True, that is left and right multiply
the adjacency matrix by the inverse square root of the degree matrix; otherwise left multiply the adjacency
matrix by the inverse of the degree matrix. This parameter is ignored if normalize=False.
batch_size (int, optional): The batch size. It defaults to 1.
name (str, optional): An optional name for this generator object.
shuffle (bool, optional): If True the node IDs will be shuffled at the end of each epoch.
seed (int, optional): Random seed.
"""
def __init__(
self,
graphs,
selected_ilocs,
targets=None,
normalize=True,
symmetric_normalization=True,
weighted=False,
batch_size=1,
name=None,
shuffle=False,
seed=None,
):
self.name = name
self.graphs = np.asanyarray(graphs)
if not isinstance(selected_ilocs, np.ndarray):
raise TypeError(
"selected_ilocs: expected a NumPy array, found {type(selected_ilocs).__name__}"
)
if not len(selected_ilocs.shape) == 2:
raise ValueError(
"selected_ilocs: expected a NumPy array of rank 2, found shape {selected_ilocs.shape}"
)
# each row of the input corresponds to a single dataset example, but we want to handle
# columns as bulk operations, and iterating over the major axis is easier
self.selected_ilocs = selected_ilocs.transpose()
self.normalize_adj = normalize
self.targets = targets
self.batch_size = batch_size
if targets is not None:
if len(selected_ilocs) != len(targets):
raise ValueError(
"expected the number of target values and the number of graph ilocs to be the same length,"
f"found {len(selected_ilocs)} graph ilocs and {len(targets)} targets."
)
self.targets = np.asanyarray(targets)
adjacencies = [graph.to_adjacency_matrix(weighted=weighted) for graph in graphs]
if self.normalize_adj:
self.normalized_adjs = [
normalize_adj(
adj, symmetric=symmetric_normalization, add_self_loops=True,
)
for adj in adjacencies
]
else:
self.normalize_adjs = adjacencies
self.normalized_adjs = np.asanyarray(self.normalized_adjs)
_, self._np_rs = random_state(seed)
self.shuffle = shuffle
self.on_epoch_end()
def _epoch_size(self):
return self.selected_ilocs.shape[1]
def __len__(self):
return int(np.ceil(self._epoch_size() / self.batch_size))
def _pad_graphs(self, graphs, adj_graphs, max_nodes):
# pad adjacency and feature matrices to equal the size of those from the largest graph
features = [
np.pad(
graph.node_features(),
pad_width=((0, max_nodes - graph.number_of_nodes()), (0, 0)),
)
for graph in graphs
]
features = np.stack(features)
for adj in adj_graphs:
adj.resize((max_nodes, max_nodes))
adj_graphs = np.stack([adj.toarray() for adj in adj_graphs])
masks = np.full((len(graphs), max_nodes), fill_value=False, dtype=np.bool)
for index, graph in enumerate(graphs):
masks[index, : graph.number_of_nodes()] = True
# features is array of dimensionality
# batch size x N x F
# masks is array of dimensionality
# batch size x N
# adj_graphs is array of dimensionality
# batch size x N x N
# graph_targets is array of dimensionality
# batch size x C
# where N is the maximum number of nodes for largest graph in the batch, F is
# the node feature dimensionality, and C is the number of target classes
return [features, masks, adj_graphs]
def __getitem__(self, index):
batch_start, batch_end = index * self.batch_size, (index + 1) * self.batch_size
batch_ilocs = self.selected_ilocs[:, batch_start:batch_end]
graphs = self.graphs[batch_ilocs]
adj_graphs = self.normalized_adjs[batch_ilocs]
# The number of nodes for the largest graph in the batch. We are going to pad with 0 rows and columns
# the adjacency and node feature matrices (only the rows in this case) to equal in size the adjacency and
# feature matrices of the largest graph.
max_nodes = max(graph.number_of_nodes() for graph in graphs.ravel())
graph_targets = None
if self.targets is not None:
graph_targets = self.targets[batch_start:batch_end]
padded = [
self._pad_graphs(g, adj, max_nodes) for g, adj in zip(graphs, adj_graphs)
]
return [output for arrays in padded for output in arrays], graph_targets
def on_epoch_end(self):
"""
Shuffle all graphs at the end of each epoch
"""
if self.shuffle:
indexes = self._np_rs.permutation(self._epoch_size())
self.selected_ilocs = self.selected_ilocs[:, indexes]
if self.targets is not None:
self.targets = self.targets[indexes]
| 13,859 | 39.645161 | 160 | py |
stellargraph | stellargraph-master/stellargraph/mapper/corrupted.py | # -*- coding: utf-8 -*-
#
# Copyright 2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from tensorflow.keras.utils import Sequence
from . import Generator
from ..core.validation import comma_sep
def _validate_indices(corrupt_index_groups):
# specific type check because the iteration order needs to be controlled/consistent
if not isinstance(corrupt_index_groups, (list, tuple)):
raise TypeError(
f"corrupt_index_groups: expected list or tuple, found {type(corrupt_index_groups).__name__}"
)
all_seen = {}
for group_idx, group in enumerate(corrupt_index_groups):
if not isinstance(group, (list, tuple)):
raise TypeError(
f"corrupt_index_groups: expected each group to be a list or tuple, found {type(group).__name__} for group number {group_idx}"
)
if len(group) == 0:
raise ValueError(
f"corrupt_index_groups: expected each group to have at least one index, found empty group number {group_idx}"
)
for elem in group:
earlier_idx = all_seen.get(elem)
if earlier_idx is not None:
raise ValueError(
f"corrupt_index_groups: expected each index to appear at most once, found two occurrences of {elem} (in group numbers {earlier_idx} and {group_idx})"
)
all_seen[elem] = group_idx
if not isinstance(elem, int) or elem < 0:
raise TypeError(
f"corrupt_index_groups: expected each index to be a non-negative integer, found {type(elem).__name__} ({elem!r}) in group number {group_idx}"
)
class CorruptedGenerator(Generator):
"""
Keras compatible data generator that wraps a :class:`.Generator` and provides corrupted data for
training Deep Graph Infomax.
.. seealso::
Model using this generator: :class:`.DeepGraphInfomax`.
Examples using this generator:
- `unsupervised representation learning <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/deep-graph-infomax-embeddings.html>`__
- `semi-supervised node classification <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/gcn-deep-graph-infomax-fine-tuning-node-classification.html>`__
Generators that support corruption natively: :class:`.FullBatchNodeGenerator`, :class:`.RelationalFullBatchNodeGenerator`, :class:`.GraphSAGENodeGenerator`, :class:`.DirectedGraphSAGENodeGenerator`, :class:`.HinSAGENodeGenerator`, :class:`.ClusterNodeGenerator`.
Args:
base_generator (Generator): the uncorrupted Generator object.
corrupt_index_groups (list of list of int, optional): an explicit list of which input
tensors should be shuffled to create the corrupted inputs. This is a list of "groups",
where each group is a non-empty list of indices into the tensors that the base generator
yields. The tensors within each group are flattened to be rank-2 (preserving the last
dimension, of node features), concatenated, shuffled and split back to their original
shapes, to compute new corrupted values for each tensors within that group. Each group
has this operation done independently. Each index can appear in at most one
group. (This parameter is only optional if ``base_generator`` provides a default via
``default_corrupt_input_index_groups``. Otherwise, this parameter must be specified.)
"""
def __init__(self, base_generator, *, corrupt_index_groups=None):
if not isinstance(base_generator, Generator):
raise TypeError(
f"base_generator: expected a Generator subclass, found {type(base_generator).__name__}"
)
if corrupt_index_groups is None:
# check that this generator has a notion of default corruption support
corrupt_index_groups = base_generator.default_corrupt_input_index_groups()
if corrupt_index_groups is None:
# this is a TypeError because most cases of this will be types that _statically_ don't
# support corruption, not ones that sometimes support corruption and sometimes don't
raise TypeError(
f"base_generator: expected a Generator that supports corruption if 'corrupt_index_groups' is not passed, found {type(base_generator).__name__}"
)
_validate_indices(corrupt_index_groups)
self.base_generator = base_generator
self.corrupt_index_groups = corrupt_index_groups
def num_batch_dims(self):
return self.base_generator.num_batch_dims()
def flow(self, *args, **kwargs):
"""
Creates the corrupted :class: `Sequence` object for training Deep Graph Infomax.
Args:
args: the positional arguments for the self.base_generator.flow(...) method
kwargs: the keyword arguments for the self.base_generator.flow(...) method
"""
return CorruptedSequence(
self.base_generator.flow(*args, **kwargs),
self.corrupt_index_groups,
self.base_generator.num_batch_dims(),
)
class CorruptedSequence(Sequence):
"""
Keras compatible data generator that wraps a Keras Sequence and provides corrupted
data for training Deep Graph Infomax.
Args:
base_sequence: the uncorrupted Sequence object.
corrupt_index_groups: the groups among which nodes will be shuffled (see :class:`.CorruptedGenerator` for more details)
num_batch_dims: the number of axes that are "batch" dimensions
"""
def __init__(self, base_sequence, corrupt_index_groups, num_batch_dims):
self.corrupt_index_groups = corrupt_index_groups
self.base_sequence = base_sequence
self.num_batch_dims = num_batch_dims
def __len__(self):
return len(self.base_sequence)
def __getitem__(self, index):
inputs, _ = self.base_sequence[index]
def corrupt_group(group_idx, group):
try:
feats_orig = [inputs[idx] for idx in group]
except IndexError:
# Provide a better error for indices being out of bounds (doing it earlier/outside
# `__getitem__` would require evaluating the base generator beforehand/non-lazily)
invalid = [idx for idx in group if idx >= len(inputs)]
raise ValueError(
f"corrupt_index_groups (group number {group_idx}): expected valid indices among the {len(inputs)} input tensors, found some too large: {comma_sep(invalid)}"
)
# this assumes that the input satisfies: last axis holds features for individual nodes;
# all earlier axes are just arranging those nodes. In particular, a node shouldn't have
# its features spread across multiple non-last axes, although it can appear more
# than once.
feature_dim = feats_orig[0].shape[-1]
nodes_per_input = [np.product(feat.shape[:-1]) for feat in feats_orig]
sections = np.cumsum(nodes_per_input)
feats_rank_2 = [feat.reshape(-1, feature_dim) for feat in feats_orig]
all_feats_shuffled = np.concatenate(feats_rank_2, axis=0)
np.random.shuffle(all_feats_shuffled)
feats_rank_2_shuffled = np.split(all_feats_shuffled, sections[:-1])
return (
shuf.reshape(orig.shape)
for shuf, orig in zip(feats_rank_2_shuffled, feats_orig)
)
shuffled_feats = [
corrupted
for group_idx, group in enumerate(self.corrupt_index_groups)
for corrupted in corrupt_group(group_idx, group)
]
# create the appropriate labels
# we assume the smallest batch shape is the correct output shape
# e.g. for fullbatch methods the correct output shape is (1, num_output_nodes) not (1, num_nodes_in_graph)
# this is true for all current methods but might have to be re-evaluated in the future
output_batch_shape = min(inp.shape[: self.num_batch_dims] for inp in inputs)
targets = np.broadcast_to([np.float32(1), 0], (*output_batch_shape, 2))
return shuffled_feats + inputs, targets
| 8,936 | 45.546875 | 269 | py |
stellargraph | stellargraph-master/stellargraph/mapper/mini_batch_node_generators.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mappers to provide input data for the graph models in layers.
"""
__all__ = ["ClusterNodeGenerator", "ClusterNodeSequence"]
import random
import copy
import numpy as np
import networkx as nx
from tensorflow.keras.utils import Sequence
from scipy import sparse
from ..core.graph import StellarGraph
from ..core.utils import is_real_iterable, normalize_adj
from ..connector.neo4j.graph import Neo4jStellarGraph
from .base import Generator
class ClusterNodeGenerator(Generator):
"""
A data generator for use with GCN, GAT and APPNP models on homogeneous graphs, see [1].
The supplied graph G should be a StellarGraph object with node features.
Use the :meth:`flow` method supplying the nodes and (optionally) targets
to get an object that can be used as a Keras data generator.
This generator will supply the features array and the adjacency matrix to a
mini-batch Keras graph ML model.
[1] `W. Chiang, X. Liu, S. Si, Y. Li, S. Bengio, C. Hsieh, 2019 <https://arxiv.org/abs/1905.07953>`_.
.. seealso::
Models using this generator: :class:`.GCN`, :class:`.GAT`, :class:`.APPNP`.
Examples using this generator:
- `Cluster-GCN node classification <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/cluster-gcn-node-classification.html>`__
- `Cluster-GCN node classification with Neo4j <https://stellargraph.readthedocs.io/en/stable/demos/connector/neo4j/cluster-gcn-on-cora-neo4j-example.html>`__
- `unsupervised representation learning with Deep Graph Infomax <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/deep-graph-infomax-embeddings.html>`__
Args:
G (StellarGraph): a machine-learning StellarGraph-type graph
clusters (int or list, optional): If int, it indicates the number of clusters (default is 1, corresponding to the entire graph).
If `clusters` is greater than 1, then nodes are randomly assigned to a cluster.
If list, then it should be a list of lists of node IDs, such that each list corresponds to a cluster of nodes
in `G`. The clusters should be non-overlapping.
q (int, optional): The number of clusters to combine for each mini-batch (default is 1).
The total number of clusters must be divisible by `q`.
lam (float, optional): The mixture coefficient for adjacency matrix normalisation (default is 0.1).
Valid values are in the interval [0, 1].
weighted (bool, optional): if True, use the edge weights from ``G``; if False, treat the
graph as unweighted.
name (str, optional): Name for the node generator.
"""
def __init__(self, G, clusters=1, q=1, lam=0.1, weighted=False, name=None):
if not isinstance(G, (StellarGraph, Neo4jStellarGraph)):
raise TypeError("Graph must be a StellarGraph or StellarDiGraph object.")
self.graph = G
self.name = name
self.q = q # The number of clusters to sample per mini-batch
self.lam = lam
self.clusters = clusters
self.method = "cluster_gcn"
self.multiplicity = 1
self.use_sparse = False
self.weighted = weighted
if isinstance(clusters, list):
self.k = len(clusters)
elif isinstance(clusters, int):
if clusters <= 0:
raise ValueError(
"{}: clusters must be greater than 0.".format(type(self).__name__)
)
self.k = clusters
else:
raise TypeError(
"{}: clusters must be either int or list type.".format(
type(self).__name__
)
)
# Some error checking on the given parameter values
if not isinstance(lam, float):
raise TypeError("{}: lam must be a float type.".format(type(self).__name__))
if lam < 0 or lam > 1:
raise ValueError(
"{}: lam must be in the range [0, 1].".format(type(self).__name__)
)
if not isinstance(q, int):
raise TypeError("{}: q must be integer type.".format(type(self).__name__))
if q <= 0:
raise ValueError(
"{}: q must be greater than 0.".format(type(self).__name__)
)
if self.k % q != 0:
raise ValueError(
"{}: the number of clusters must be exactly divisible by q.".format(
type(self).__name__
)
)
self.node_list = list(G.nodes())
# if graph is a StellarGraph check that the graph has features
G.check_graph_for_ml(expensive_check=False)
# Check that there is only a single node type
_ = G.unique_node_type(
"G: expected a graph with a single node type, found a graph with node types: %(found)s"
)
if isinstance(clusters, int):
# We are not given graph clusters.
# We are going to split the graph into self.k random clusters
all_nodes = list(G.nodes())
random.shuffle(all_nodes)
cluster_size = len(all_nodes) // self.k
self.clusters = [
all_nodes[i : i + cluster_size]
for i in range(0, len(all_nodes), cluster_size)
]
if len(self.clusters) > self.k:
# for the case that the number of nodes is not exactly divisible by k, we combine
# the last cluster with the second last one
self.clusters[-2].extend(self.clusters[-1])
del self.clusters[-1]
print(f"Number of clusters {self.k}")
for i, c in enumerate(self.clusters):
print(f"{i} cluster has size {len(c)}")
# Store the features of one node to allow graph ML models to peak at the feature dimension
# FIXME 1621: store feature_dimension here instead of features. This must also update ClusterGCN, and all
# fullbactch methods and generators
self.features = G.node_features(self.node_list[:1])
def num_batch_dims(self):
return 2
def flow(self, node_ids, targets=None, name=None):
"""
Creates a generator/sequence object for training, evaluation, or prediction
with the supplied node ids and numeric targets.
Args:
node_ids (iterable): an iterable of node ids for the nodes of interest
(e.g., training, validation, or test set nodes)
targets (2d array, optional): a 2D array of numeric node targets with shape ``(len(node_ids),
target_size)``
name (str, optional): An optional name for the returned generator object.
Returns:
A :class:`ClusterNodeSequence` object to use with :class:`.GCN`, :class:`.GAT` or :class:`.APPNP` in Keras
methods :meth:`fit`, :meth:`evaluate`, and :meth:`predict`.
"""
if targets is not None:
# Check targets is an iterable
if not is_real_iterable(targets):
raise TypeError(
"{}: Targets must be an iterable or None".format(
type(self).__name__
)
)
# Check targets correct shape
if len(targets) != len(node_ids):
raise ValueError(
"{}: Targets must be the same length as node_ids".format(
type(self).__name__
)
)
return ClusterNodeSequence(
self.graph,
self.clusters,
targets=targets,
node_ids=node_ids,
q=self.q,
lam=self.lam,
weighted=self.weighted,
name=name,
)
def default_corrupt_input_index_groups(self):
return [[0]]
class ClusterNodeSequence(Sequence):
"""
A Keras-compatible data generator for node inference using ClusterGCN model.
Use this class with the Keras methods :meth:`keras.Model.fit`,
:meth:`keras.Model.evaluate`, and
:meth:`keras.Model.predict`.
This class should be created using the :meth:`flow` method of
:class:`.ClusterNodeGenerator`.
Args:
graph (StellarGraph): The graph
clusters (list): A list of lists such that each sub-list indicates the nodes in a cluster.
The length of this list, len(clusters) indicates the number of batches in one epoch.
targets (np.ndarray, optional): An optional array of node targets of size (N x C),
where C is the target size (e.g., number of classes for one-hot class targets)
node_ids (iterable, optional): The node IDs for the target nodes. Required if targets is not None.
normalize_adj (bool, optional): Specifies whether the adjacency matrix for each mini-batch should
be normalized or not. The default is True.
q (int, optional): The number of subgraphs to combine for each batch. The default value is
1 such that the generator treats each subgraph as a batch.
lam (float, optional): The mixture coefficient for adjacency matrix normalisation (the
'diagonal enhancement' method). Valid values are in the interval [0, 1] and the default value is 0.1.
name (str, optional): An optional name for this generator object.
"""
def __init__(
self,
graph,
clusters,
targets=None,
node_ids=None,
normalize_adj=True,
q=1,
lam=0.1,
weighted=False,
name=None,
):
self.name = name
self.clusters = list()
self.clusters_original = copy.deepcopy(clusters)
self.graph = graph
self.node_list = list(graph.nodes())
self.normalize_adj = normalize_adj
self.q = q
self.lam = lam
self.weighted = weighted
self.node_order = list()
self._node_order_in_progress = list()
self.__node_buffer = dict()
self.target_ids = list()
if len(clusters) % self.q != 0:
raise ValueError(
"The number of clusters should be exactly divisible by q. However, {} number of clusters is not exactly divisible by {}.".format(
len(clusters), q
)
)
if node_ids is not None:
self.target_ids = list(node_ids)
if targets is not None:
if node_ids is None:
raise ValueError(
"Since targets is not None, node_ids must be given and cannot be None."
)
if len(node_ids) != len(targets):
raise ValueError(
"When passed together targets and indices should be the same length."
)
self.targets = np.asanyarray(targets)
self.target_node_lookup = dict(
zip(self.target_ids, range(len(self.target_ids)))
)
else:
self.targets = None
self.on_epoch_end()
def __len__(self):
num_batches = len(self.clusters_original) // self.q
return num_batches
def _diagonal_enhanced_normalization(self, adj_cluster):
# Cluster-GCN normalization is:
# A~ + λdiag(A~) where A~ = N(A + I) with normalization factor N = (D + I)^(-1)
#
# Expands to:
# NA + NI + λN(diag(A) + I) =
# NA + N(I + λ(diag(A) + I)) =
# NA + λN(diag(A) + (1 + 1/λ)I))
#
# (This could potentially become a layer, to benefit from a GPU.)
degrees = np.asarray(adj_cluster.sum(axis=1)).ravel()
normalization = 1 / (degrees + 1)
# NA: multiply rows manually
norm_adj = adj_cluster.multiply(normalization[:, None]).toarray()
# λN(diag(A) + (1 + 1/λ)I): work with the diagonals directly
diag = np.diag(norm_adj)
diag_addition = (
normalization * self.lam * (adj_cluster.diagonal() + (1 + 1 / self.lam))
)
np.fill_diagonal(norm_adj, diag + diag_addition)
return norm_adj
def __getitem__(self, index):
# The next batch should be the adjacency matrix for the cluster and the corresponding feature vectors
# and targets if available.
cluster = self.clusters[index]
adj_cluster = self.graph.to_adjacency_matrix(cluster, weighted=self.weighted)
if self.normalize_adj:
adj_cluster = self._diagonal_enhanced_normalization(adj_cluster)
else:
adj_cluster = adj_cluster.toarray()
g_node_list = list(cluster)
# Determine the target nodes that exist in this cluster
target_nodes_in_cluster = np.asanyarray(
list(set(g_node_list).intersection(self.target_ids))
)
self.__node_buffer[index] = target_nodes_in_cluster
# Dictionary to store node indices for quicker node index lookups
node_lookup = dict(zip(g_node_list, range(len(g_node_list))))
# The list of indices of the target nodes in self.node_list
target_node_indices = np.array(
[node_lookup[n] for n in target_nodes_in_cluster]
)
if index == (len(self.clusters_original) // self.q) - 1:
# last batch
self.__node_buffer_dict_to_list()
cluster_targets = None
#
if self.targets is not None:
# Dictionary to store node indices for quicker node index lookups
# The list of indices of the target nodes in self.node_list
cluster_target_indices = np.array(
[self.target_node_lookup[n] for n in target_nodes_in_cluster],
dtype=np.int64,
)
cluster_targets = self.targets[cluster_target_indices]
cluster_targets = cluster_targets.reshape((1,) + cluster_targets.shape)
features = self.graph.node_features(g_node_list)
features = np.reshape(features, (1,) + features.shape)
adj_cluster = adj_cluster.reshape((1,) + adj_cluster.shape)
target_node_indices = target_node_indices[np.newaxis, :]
return [features, target_node_indices, adj_cluster], cluster_targets
def __node_buffer_dict_to_list(self):
self.node_order = []
for k, v in self.__node_buffer.items():
self.node_order.extend(v)
def on_epoch_end(self):
"""
Shuffle all nodes at the end of each epoch
"""
if self.q > 1:
# combine clusters
cluster_indices = list(range(len(self.clusters_original)))
random.shuffle(cluster_indices)
self.clusters = []
for i in range(0, len(cluster_indices) - 1, self.q):
cc = cluster_indices[i : i + self.q]
tmp = []
for l in cc:
tmp.extend(list(self.clusters_original[l]))
self.clusters.append(tmp)
else:
self.clusters = copy.deepcopy(self.clusters_original)
self.__node_buffer = dict()
random.shuffle(self.clusters)
| 15,819 | 38.255583 | 173 | py |
stellargraph | stellargraph-master/stellargraph/mapper/full_batch_generators.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mappers to provide input data for the graph models in layers.
"""
__all__ = [
"FullBatchGenerator",
"FullBatchNodeGenerator",
"FullBatchLinkGenerator",
"RelationalFullBatchNodeGenerator",
]
import warnings
import operator
import random
import numpy as np
import itertools as it
import networkx as nx
import scipy.sparse as sps
from tensorflow.keras import backend as K
from functools import reduce
from tensorflow.keras.utils import Sequence
from . import (
Generator,
FullBatchSequence,
SparseFullBatchSequence,
RelationalFullBatchNodeSequence,
GraphSAGENodeGenerator,
DirectedGraphSAGENodeGenerator,
)
from ..core.graph import StellarGraph
from ..core.utils import is_real_iterable
from ..core.utils import GCN_Aadj_feats_op, PPNP_Aadj_feats_op
from ..core.validation import comma_sep
class FullBatchGenerator(Generator):
multiplicity = None
def __init__(
self,
G,
name=None,
method="gcn",
k=1,
sparse=True,
transform=None,
teleport_probability=0.1,
weighted=False,
):
if self.multiplicity is None:
raise TypeError(
"Can't instantiate abstract class 'FullBatchGenerator', please"
"instantiate either 'FullBatchNodeGenerator' or 'FullBatchLinkGenerator'"
)
if not isinstance(G, StellarGraph):
raise TypeError("Graph must be a StellarGraph or StellarDiGraph object.")
self.graph = G
self.name = name
self.k = k
self.teleport_probability = teleport_probability
self.method = method
# Check if the graph has features
G.check_graph_for_ml()
# Check that there is only a single node type for GAT or GCN
node_type = G.unique_node_type(
"G: expected a graph with a single node type, found a graph with node types: %(found)s"
)
# Create sparse adjacency matrix:
# Use the node orderings the same as in the graph features
self.node_list = G.nodes()
self.Aadj = G.to_adjacency_matrix(weighted=weighted)
# Power-user feature: make the generator yield dense adjacency matrix instead
# of the default sparse one.
# If sparse is specified, check that the backend is tensorflow
if sparse and K.backend() != "tensorflow":
warnings.warn(
"Sparse adjacency matrices are only supported in tensorflow."
" Falling back to using a dense adjacency matrix."
)
self.use_sparse = False
else:
self.use_sparse = sparse
# Get the features for the nodes
self.features = G.node_features(node_type=node_type)
if transform is not None:
if callable(transform):
self.features, self.Aadj = transform(
features=self.features, A=self.Aadj
)
else:
raise ValueError("argument 'transform' must be a callable.")
elif self.method in ["gcn", "sgc"]:
self.features, self.Aadj = GCN_Aadj_feats_op(
features=self.features, A=self.Aadj, k=self.k, method=self.method
)
elif self.method in ["gat", "self_loops"]:
self.Aadj = self.Aadj + sps.diags(
np.ones(self.Aadj.shape[0]) - self.Aadj.diagonal()
)
elif self.method in ["ppnp"]:
if self.use_sparse:
raise ValueError(
"sparse: method='ppnp' requires 'sparse=False', found 'sparse=True' "
"(consider using the APPNP model for sparse support)"
)
self.features, self.Aadj = PPNP_Aadj_feats_op(
features=self.features,
A=self.Aadj,
teleport_probability=self.teleport_probability,
)
elif self.method in [None, "none"]:
pass
else:
raise ValueError(
"Undefined method for adjacency matrix transformation. "
"Accepted: 'gcn' (default), 'sgc', and 'self_loops'."
)
def num_batch_dims(self):
return 2
def flow(self, node_ids, targets=None, use_ilocs=False):
"""
Creates a generator/sequence object for training or evaluation
with the supplied node ids and numeric targets.
Args:
node_ids: an iterable of node ids for the nodes of interest
(e.g., training, validation, or test set nodes)
targets: a 1D or 2D array of numeric node targets with shape ``(len(node_ids),)``
or ``(len(node_ids), target_size)``
use_ilocs (bool): if True, node_ids are represented by ilocs,
otherwise node_ids need to be transformed into ilocs
Returns:
A NodeSequence object to use with GCN or GAT models
in Keras methods :meth:`fit`, :meth:`evaluate`,
and :meth:`predict`
"""
if targets is not None:
# Check targets is an iterable
if not is_real_iterable(targets):
raise TypeError("Targets must be an iterable or None")
# Check targets correct shape
if len(targets) != len(node_ids):
raise TypeError("Targets must be the same length as node_ids")
# find the indices of the nodes, handling both multiplicity 1 [node, node, ...] and 2
# [(source, target), ...]
node_ids = np.asarray(node_ids)
if use_ilocs:
node_indices = node_ids
else:
flat_node_ids = node_ids.reshape(-1)
flat_node_indices = self.graph.node_ids_to_ilocs(flat_node_ids)
# back to the original shape
node_indices = flat_node_indices.reshape(node_ids.shape)
if self.use_sparse:
return SparseFullBatchSequence(
self.features, self.Aadj, targets, node_indices
)
else:
return FullBatchSequence(self.features, self.Aadj, targets, node_indices)
class FullBatchNodeGenerator(FullBatchGenerator):
"""
A data generator for use with full-batch models on homogeneous graphs,
e.g., GCN, GAT, SGC.
The supplied graph G should be a StellarGraph object with node features.
Use the :meth:`flow` method supplying the nodes and (optionally) targets
to get an object that can be used as a Keras data generator.
This generator will supply the features array and the adjacency matrix to a
full-batch Keras graph ML model. There is a choice to supply either a sparse
adjacency matrix (the default) or a dense adjacency matrix, with the `sparse`
argument.
For these algorithms the adjacency matrix requires preprocessing and the
'method' option should be specified with the correct preprocessing for
each algorithm. The options are as follows:
* ``method='gcn'``: Normalizes the adjacency matrix for the GCN algorithm.
This implements the linearized convolution of Eq. 8 in [1].
* ``method='sgc'``: This replicates the k-th order smoothed adjacency matrix
to implement the Simplified Graph Convolutions of Eq. 8 in [2].
* ``method='self_loops'`` or ``method='gat'``: Simply sets the diagonal elements
of the adjacency matrix to one, effectively adding self-loops to the graph. This is
used by the GAT algorithm of [3].
* ``method='ppnp'``: Calculates the personalized page rank matrix of Eq. 2 in [4].
[1] `Kipf and Welling, 2017 <https://arxiv.org/abs/1609.02907>`_.
[2] `Wu et al. 2019 <https://arxiv.org/abs/1902.07153>`_.
[3] `Veličković et al., 2018 <https://arxiv.org/abs/1710.10903>`_.
[4] `Klicpera et al., 2018 <https://arxiv.org/abs/1810.05997>`_.
Example::
G_generator = FullBatchNodeGenerator(G)
train_flow = G_generator.flow(node_ids, node_targets)
# Fetch the data from train_flow, and feed into a Keras model:
x_inputs, y_train = train_flow[0]
model.fit(x=x_inputs, y=y_train)
# Alternatively, use the generator itself with model.fit:
model.fit(train_flow, epochs=num_epochs)
.. seealso::
Models using this generator: :class:`.GCN`, :class:`.GAT`, :class:`.APPNP`, :class:`.PPNP`.
Example using this generator (see individual models for more): `node classification <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/gcn-node-classification.html>`__.
Related generators:
- :class:`.ClusterNodeGenerator` for scalable/inductive training
- :class:`.CorruptedGenerator` for unsupervised training with :class:`.DeepGraphInfomax`
- :class:`.FullBatchLinkGenerator` for link prediction and similar tasks
- :class:`.RelationalFullBatchNodeGenerator` for multiple edge types, with :class:`.RGCN`
- :class:`.PaddedGraphGenerator` for graph classification
Args:
G (StellarGraph): a machine-learning StellarGraph-type graph
name (str): an optional name of the generator
method (str): Method to preprocess adjacency matrix. One of ``gcn`` (default),
``sgc``, ``self_loops``, or ``none``.
k (None or int): This is the smoothing order for the ``sgc`` method. This should be positive
integer.
transform (callable): an optional function to apply on features and adjacency matrix
the function takes ``(features, Aadj)`` as arguments.
sparse (bool): If True (default) a sparse adjacency matrix is used,
if False a dense adjacency matrix is used.
teleport_probability (float): teleport probability between 0.0 and 1.0.
"probability" of returning to the starting node in the propagation step as in [4].
weighted (bool, optional): if True, use the edge weights from ``G``; if False, treat the
graph as unweighted.
"""
multiplicity = 1
def flow(self, node_ids, targets=None, use_ilocs=False):
"""
Creates a generator/sequence object for training or evaluation
with the supplied node ids and numeric targets.
Args:
node_ids: an iterable of node ids for the nodes of interest
(e.g., training, validation, or test set nodes)
targets: a 1D or 2D array of numeric node targets with shape ``(len(node_ids),)``
or ``(len(node_ids), target_size)``
use_ilocs (bool): if True, node_ids are represented by ilocs,
otherwise node_ids need to be transformed into ilocs
Returns:
A NodeSequence object to use with GCN or GAT models
in Keras methods :meth:`fit`, :meth:`evaluate`,
and :meth:`predict`
"""
return super().flow(node_ids, targets, use_ilocs)
def default_corrupt_input_index_groups(self):
return [[0]]
class FullBatchLinkGenerator(FullBatchGenerator):
"""
A data generator for use with full-batch models on homogeneous graphs,
e.g., GCN, GAT, SGC.
The supplied graph G should be a StellarGraph object with node features.
Use the :meth:`flow` method supplying the links as a list of ``(src, dst)`` tuples
of node IDs and (optionally) targets.
This generator will supply the features array and the adjacency matrix to a
full-batch Keras graph ML model. There is a choice to supply either a sparse
adjacency matrix (the default) or a dense adjacency matrix, with the `sparse`
argument.
For these algorithms the adjacency matrix requires preprocessing and the
'method' option should be specified with the correct preprocessing for
each algorithm. The options are as follows:
* ``method='gcn'``: Normalizes the adjacency matrix for the GCN algorithm.
This implements the linearized convolution of Eq. 8 in [1].
* ``method='sgc'``: This replicates the k-th order smoothed adjacency matrix
to implement the Simplified Graph Convolutions of Eq. 8 in [2].
* ``method='self_loops'`` or ``method='gat'``: Simply sets the diagonal elements
of the adjacency matrix to one, effectively adding self-loops to the graph. This is
used by the GAT algorithm of [3].
* ``method='ppnp'``: Calculates the personalized page rank matrix of Eq. 2 in [4].
[1] `Kipf and Welling, 2017 <https://arxiv.org/abs/1609.02907>`_.
[2] `Wu et al. 2019 <https://arxiv.org/abs/1902.07153>`_.
[3] `Veličković et al., 2018 <https://arxiv.org/abs/1710.10903>`_.
[4] `Klicpera et al., 2018 <https://arxiv.org/abs/1810.05997>`_.
Example::
G_generator = FullBatchLinkGenerator(G)
train_flow = G_generator.flow([(1,2), (3,4), (5,6)], [0, 1, 1])
# Fetch the data from train_flow, and feed into a Keras model:
x_inputs, y_train = train_flow[0]
model.fit(x=x_inputs, y=y_train)
# Alternatively, use the generator itself with model.fit:
model.fit(train_flow, epochs=num_epochs)
.. seealso::
Models using this generator: :class:`.GCN`, :class:`.GAT`, :class:`.APPNP`, :class:`.PPNP`.
Example using this generator: `link classification with GCN <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/gcn-link-prediction.html>`__.
Related generator: :class:`.FullBatchNodeGenerator` for node classification and similar tasks.
Args:
G (StellarGraph): a machine-learning StellarGraph-type graph
name (str): an optional name of the generator
method (str): Method to preprocess adjacency matrix. One of ``gcn`` (default),
``sgc``, ``self_loops``, or ``none``.
k (None or int): This is the smoothing order for the ``sgc`` method. This should be positive
integer.
transform (callable): an optional function to apply on features and adjacency matrix
the function takes ``(features, Aadj)`` as arguments.
sparse (bool): If True (default) a sparse adjacency matrix is used,
if False a dense adjacency matrix is used.
teleport_probability (float): teleport probability between 0.0 and 1.0. "probability"
of returning to the starting node in the propagation step as in [4].
weighted (bool, optional): if True, use the edge weights from ``G``; if False, treat the
graph as unweighted.
"""
multiplicity = 2
def flow(self, link_ids, targets=None, use_ilocs=False):
"""
Creates a generator/sequence object for training or evaluation
with the supplied node ids and numeric targets.
Args:
link_ids: an iterable of link ids specified as tuples of node ids
or an array of shape (N_links, 2) specifying the links.
targets: a 1D or 2D array of numeric node targets with shape ``(len(node_ids),)``
or ``(len(node_ids), target_size)``
use_ilocs (bool): if True, node_ids are represented by ilocs,
otherwise node_ids need to be transformed into ilocs
Returns:
A NodeSequence object to use with GCN or GAT models
in Keras methods :meth:`fit`, :meth:`evaluate`,
and :meth:`predict`
"""
return super().flow(link_ids, targets, use_ilocs)
class RelationalFullBatchNodeGenerator(Generator):
"""
A data generator for use with full-batch models on relational graphs e.g. RGCN.
The supplied graph G should be a StellarGraph or StellarDiGraph object with node features.
Use the :meth:`flow` method supplying the nodes and (optionally) targets
to get an object that can be used as a Keras data generator.
This generator will supply the features array and the adjacency matrix to a
full-batch Keras graph ML model. There is a choice to supply either a list of sparse
adjacency matrices (the default) or a list of dense adjacency matrices, with the `sparse`
argument.
For these algorithms the adjacency matrices require preprocessing and the default option is to
normalize each row of the adjacency matrix so that it sums to 1.
For customization a transformation (callable) can be passed that
operates on the node features and adjacency matrix.
Example::
G_generator = RelationalFullBatchNodeGenerator(G)
train_data_gen = G_generator.flow(node_ids, node_targets)
# Fetch the data from train_data_gen, and feed into a Keras model:
# Alternatively, use the generator itself with model.fit:
model.fit(train_gen, epochs=num_epochs, ...)
.. seealso::
Model using this generator: :class:`.RGCN`.
Examples using this generator:
- `node classification <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/rgcn-node-classification.html>`__
- `unsupervised representation learning with Deep Graph Infomax <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/deep-graph-infomax-embeddings.html>`__
Related generators:
- :class:`.FullBatchNodeGenerator` for graphs with one edge type
- :class:`.CorruptedGenerator` for unsupervised training with :class:`.DeepGraphInfomax`
Args:
G (StellarGraph): a machine-learning StellarGraph-type graph
name (str): an optional name of the generator
transform (callable): an optional function to apply on features and adjacency matrix
the function takes ``(features, Aadj)`` as arguments.
sparse (bool): If True (default) a list of sparse adjacency matrices is used,
if False a list of dense adjacency matrices is used.
weighted (bool, optional): if True, use the edge weights from ``G``; if False, treat the
graph as unweighted.
"""
def __init__(self, G, name=None, sparse=True, transform=None, weighted=False):
if not isinstance(G, StellarGraph):
raise TypeError("Graph must be a StellarGraph object.")
self.graph = G
self.name = name
self.use_sparse = sparse
self.multiplicity = 1
# Check if the graph has features
G.check_graph_for_ml()
# extract node, feature, and edge type info from G
node_types = list(G.node_types)
if len(node_types) != 1:
raise ValueError(
f"G: expected one node type, found {comma_sep(sorted(node_types))}",
)
self.features = G.node_features(node_type=node_types[0])
# create a list of adjacency matrices - one adj matrix for each edge type
# an adjacency matrix is created for each edge type from all edges of that type
self.As = []
for edge_type in G.edge_types:
# note that A is the transpose of the standard adjacency matrix
# this is to aggregate features from incoming nodes
A = G.to_adjacency_matrix(
edge_type=edge_type, weighted=weighted
).transpose()
if transform is None:
# normalize here and replace zero row sums with 1
# to avoid harmless divide by zero warnings
d = sps.diags(
np.float_power(np.ravel(np.maximum(A.sum(axis=1), 1)), -1), 0
)
A = d.dot(A)
else:
self.features, A = transform(self.features, A)
A = A.tocoo()
self.As.append(A)
def num_batch_dims(self):
return 2
def flow(self, node_ids, targets=None):
"""
Creates a generator/sequence object for training or evaluation
with the supplied node ids and numeric targets.
Args:
node_ids: and iterable of node ids for the nodes of interest
(e.g., training, validation, or test set nodes)
targets: a 2D array of numeric node targets with shape ``(len(node_ids), target_size)``
Returns:
A NodeSequence object to use with RGCN models
in Keras methods :meth:`fit`, :meth:`evaluate`,
and :meth:`predict`
"""
if targets is not None:
# Check targets is an iterable
if not is_real_iterable(targets):
raise TypeError("Targets must be an iterable or None")
# Check targets correct shape
if len(targets) != len(node_ids):
raise TypeError("Targets must be the same length as node_ids")
node_indices = self.graph.node_ids_to_ilocs(node_ids)
return RelationalFullBatchNodeSequence(
self.features, self.As, self.use_sparse, targets, node_indices
)
def default_corrupt_input_index_groups(self):
return [[0]]
| 21,497 | 39.562264 | 197 | py |
stellargraph | stellargraph-master/stellargraph/mapper/sequences.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sequences to provide input to Keras
"""
__all__ = [
"NodeSequence",
"LinkSequence",
"OnDemandLinkSequence",
"FullBatchSequence",
"SparseFullBatchSequence",
"RelationalFullBatchNodeSequence",
]
import warnings
import operator
import random
import collections
import numpy as np
import itertools as it
import networkx as nx
import scipy.sparse as sps
from tensorflow.keras import backend as K
from functools import reduce
from tensorflow.keras.utils import Sequence
from ..data.unsupervised_sampler import UnsupervisedSampler
from ..core.utils import is_real_iterable
from ..random import random_state
from scipy import sparse
from ..core.experimental import experimental
class NodeSequence(Sequence):
"""Keras-compatible data generator to use with the Keras
methods :meth:`keras.Model.fit`, :meth:`keras.Model.evaluate`,
and :meth:`keras.Model.predict`.
This class generated data samples for node inference models
and should be created using the `.flow(...)` method of
:class:`.GraphSAGENodeGenerator` or :class:`.DirectedGraphSAGENodeGenerator`
or :class:`.HinSAGENodeGenerator` or :class:`.Attri2VecNodeGenerator`.
These generator classes are used within the NodeSequence to generate
the required features for downstream ML tasks from the graph.
Args:
sample_function (Callable): A function that returns features for supplied head nodes.
ids (list): A list of the node_ids to be used as head-nodes in the downstream task.
targets (list, optional): A list of targets or labels to be used in the downstream task.
shuffle (bool): If True (default) the ids will be randomly shuffled every epoch.
"""
def __init__(
self, sample_function, batch_size, ids, targets=None, shuffle=True, seed=None
):
# Check that ids is an iterable
if not is_real_iterable(ids):
raise TypeError("IDs must be an iterable or numpy array of graph node IDs")
# Check targets is iterable & has the correct length
if targets is not None:
if not is_real_iterable(targets):
raise TypeError("Targets must be None or an iterable or numpy array ")
if len(ids) != len(targets):
raise ValueError(
"The length of the targets must be the same as the length of the ids"
)
self.targets = np.asanyarray(targets)
else:
self.targets = None
# Store the generator to draw samples from graph
if isinstance(sample_function, collections.abc.Callable):
self._sample_function = sample_function
else:
raise TypeError(
"({}) The sampling function expects a callable function.".format(
type(self).__name__
)
)
self.ids = list(ids)
self.data_size = len(self.ids)
self.shuffle = shuffle
self.batch_size = batch_size
self._rs, _ = random_state(seed)
# Shuffle IDs to start
self.on_epoch_end()
def __len__(self):
"""Denotes the number of batches per epoch"""
return int(np.ceil(self.data_size / self.batch_size))
def __getitem__(self, batch_num):
"""
Generate one batch of data
Args:
batch_num (int): number of a batch
Returns:
batch_feats (list): Node features for nodes and neighbours sampled from a
batch of the supplied IDs
batch_targets (list): Targets/labels for the batch.
"""
start_idx = self.batch_size * batch_num
end_idx = start_idx + self.batch_size
if start_idx >= self.data_size:
raise IndexError("Mapper: batch_num larger than length of data")
# print("Fetching batch {} [{}]".format(batch_num, start_idx))
# The ID indices for this batch
batch_indices = self.indices[start_idx:end_idx]
# Get head (root) nodes
head_ids = [self.ids[ii] for ii in batch_indices]
# Get corresponding targets
batch_targets = None if self.targets is None else self.targets[batch_indices]
# Get features for nodes
batch_feats = self._sample_function(head_ids, batch_num)
return batch_feats, batch_targets
def on_epoch_end(self):
"""
Shuffle all head (root) nodes at the end of each epoch
"""
self.indices = list(range(self.data_size))
if self.shuffle:
self._rs.shuffle(self.indices)
class LinkSequence(Sequence):
"""
Keras-compatible data generator to use with Keras methods :meth:`keras.Model.fit`,
:meth:`keras.Model.evaluate`, and :meth:`keras.Model.predict`
This class generates data samples for link inference models
and should be created using the :meth:`flow` method of
:class:`.GraphSAGELinkGenerator` or :class:`.HinSAGELinkGenerator` or :class:`.Attri2VecLinkGenerator`.
Args:
sample_function (Callable): A function that returns features for supplied head nodes.
ids (iterable): Link IDs to batch, each link id being a tuple of (src, dst) node ids.
targets (list, optional): A list of targets or labels to be used in the downstream task.
shuffle (bool): If True (default) the ids will be randomly shuffled every epoch.
seed (int, optional): Random seed
"""
def __init__(
self, sample_function, batch_size, ids, targets=None, shuffle=True, seed=None
):
# Check that ids is an iterable
if not is_real_iterable(ids):
raise TypeError("IDs must be an iterable or numpy array of graph node IDs")
# Check targets is iterable & has the correct length
if targets is not None:
if not is_real_iterable(targets):
raise TypeError("Targets must be None or an iterable or numpy array ")
if len(ids) != len(targets):
raise ValueError(
"The length of the targets must be the same as the length of the ids"
)
self.targets = np.asanyarray(targets)
else:
self.targets = None
# Ensure number of labels matches number of ids
if targets is not None and len(ids) != len(targets):
raise ValueError("Length of link ids must match length of link targets")
# Store the generator to draw samples from graph
if isinstance(sample_function, collections.abc.Callable):
self._sample_features = sample_function
else:
raise TypeError(
"({}) The sampling function expects a callable function.".format(
type(self).__name__
)
)
self.batch_size = batch_size
self.ids = list(ids)
self.data_size = len(self.ids)
self.shuffle = shuffle
self._rs, _ = random_state(seed)
# Shuffle the IDs to begin
self.on_epoch_end()
def __len__(self):
"""Denotes the number of batches per epoch"""
return int(np.ceil(self.data_size / self.batch_size))
def __getitem__(self, batch_num):
"""
Generate one batch of data
Args:
batch_num (int): number of a batch
Returns:
batch_feats (list): Node features for nodes and neighbours sampled from a
batch of the supplied IDs
batch_targets (list): Targets/labels for the batch.
"""
start_idx = self.batch_size * batch_num
end_idx = start_idx + self.batch_size
if start_idx >= self.data_size:
raise IndexError("Mapper: batch_num larger than length of data")
# print("Fetching {} batch {} [{}]".format(self.name, batch_num, start_idx))
# The ID indices for this batch
batch_indices = self.indices[start_idx:end_idx]
# Get head (root) nodes for links
head_ids = [self.ids[ii] for ii in batch_indices]
# Get targets for nodes
batch_targets = None if self.targets is None else self.targets[batch_indices]
# Get node features for batch of link ids
batch_feats = self._sample_features(head_ids, batch_num)
return batch_feats, batch_targets
def on_epoch_end(self):
"""
Shuffle all link IDs at the end of each epoch
"""
self.indices = list(range(self.data_size))
if self.shuffle:
self._rs.shuffle(self.indices)
class OnDemandLinkSequence(Sequence):
"""
Keras-compatible data generator to use with Keras methods :meth:`keras.Model.fit`,
:meth:`keras.Model.evaluate`, and :meth:`keras.Model.predict`
This class generates data samples for link inference models
and should be created using the :meth:`flow` method of
:class:`.GraphSAGELinkGenerator` or :class:`.Attri2VecLinkGenerator`.
Args:
sample_function (Callable): A function that returns features for supplied head nodes.
sampler (UnsupersizedSampler): An object that encapsulates the neighbourhood sampling of a graph.
The generator method of this class returns a batch of positive and negative samples on demand.
"""
def __init__(self, sample_function, batch_size, walker, shuffle=True):
# Store the generator to draw samples from graph
if isinstance(sample_function, collections.abc.Callable):
self._sample_features = sample_function
else:
raise TypeError(
"({}) The sampling function expects a callable function.".format(
type(self).__name__
)
)
if not isinstance(walker, UnsupervisedSampler):
raise TypeError(
"({}) UnsupervisedSampler is required.".format(type(self).__name__)
)
self.batch_size = batch_size
self.walker = walker
self.shuffle = shuffle
# FIXME(#681): all batches are created at once, so this is no longer "on demand"
self._batches = self._create_batches()
self.length = len(self._batches)
self.data_size = sum(len(batch[0]) for batch in self._batches)
def __getitem__(self, batch_num):
"""
Generate one batch of data.
Args:
batch_num<int>: number of a batch
Returns:
batch_feats<list>: Node features for nodes and neighbours sampled from a
batch of the supplied IDs
batch_targets<list>: Targets/labels for the batch.
"""
if batch_num >= self.__len__():
raise IndexError(
"Mapper: batch_num larger than number of esstaimted batches for this epoch."
)
# print("Fetching {} batch {} [{}]".format(self.name, batch_num, start_idx))
# Get head nodes and labels
head_ids, batch_targets = self._batches[batch_num]
# Obtain features for head ids
batch_feats = self._sample_features(head_ids, batch_num)
return batch_feats, batch_targets
def __len__(self):
"""Denotes the number of batches per epoch"""
return self.length
def _create_batches(self):
return self.walker.run(self.batch_size)
def on_epoch_end(self):
"""
Shuffle all link IDs at the end of each epoch
"""
if self.shuffle:
self._batches = self._create_batches()
def _full_batch_array_and_reshape(array, propagate_none=False):
"""
Args:
array: an array-like object
propagate_none: if True, return None when array is None
Returns:
array as a numpy array with an extra first dimension (batch dimension) equal to 1
"""
# if it's ok, just short-circuit on None (e.g. for target arrays, that may or may not exist)
if propagate_none and array is None:
return None
as_np = np.asanyarray(array)
return np.reshape(as_np, (1,) + as_np.shape)
class FullBatchSequence(Sequence):
"""
Keras-compatible data generator for for node inference models
that require full-batch training (e.g., GCN, GAT).
Use this class with the Keras methods :meth:`keras.Model.fit`,
:meth:`keras.Model.evaluate`, and
:meth:`keras.Model.predict`,
This class should be created using the `.flow(...)` method of
:class:`.FullBatchNodeGenerator`.
Args:
features (np.ndarray): An array of node features of size (N x F),
where N is the number of nodes in the graph, F is the node feature size
A (np.ndarray or sparse matrix): An adjacency matrix of the graph of size (N x N).
targets (np.ndarray, optional): An optional array of node targets of size (N x C),
where C is the target size (e.g., number of classes for one-hot class targets)
indices (np.ndarray, optional): Array of indices to the feature and adjacency matrix
of the targets. Required if targets is not None.
"""
use_sparse = False
def __init__(self, features, A, targets=None, indices=None):
if (targets is not None) and (len(indices) != len(targets)):
raise ValueError(
"When passed together targets and indices should be the same length."
)
# Store features and targets as np.ndarray
self.features = np.asanyarray(features)
self.target_indices = np.asanyarray(indices)
# Convert sparse matrix to dense:
if sps.issparse(A) and hasattr(A, "toarray"):
self.A_dense = _full_batch_array_and_reshape(A.toarray())
elif isinstance(A, (np.ndarray, np.matrix)):
self.A_dense = _full_batch_array_and_reshape(A)
else:
raise TypeError(
"Expected input matrix to be either a Scipy sparse matrix or a Numpy array."
)
# Reshape all inputs to have batch dimension of 1
self.features = _full_batch_array_and_reshape(features)
self.target_indices = _full_batch_array_and_reshape(indices)
self.inputs = [self.features, self.target_indices, self.A_dense]
self.targets = _full_batch_array_and_reshape(targets, propagate_none=True)
def __len__(self):
return 1
def __getitem__(self, index):
return self.inputs, self.targets
class SparseFullBatchSequence(Sequence):
"""
Keras-compatible data generator for for node inference models
that require full-batch training (e.g., GCN, GAT).
Use this class with the Keras methods :meth:`keras.Model.fit`,
:meth:`keras.Model.evaluate`, and
:meth:`keras.Model.predict`,
This class uses sparse matrix representations to send data to the models,
and only works with the Keras tensorflow backend. For any other backends,
use the :class:`.FullBatchSequence` class.
This class should be created using the `.flow(...)` method of
:class:`.FullBatchNodeGenerator`.
Args:
features (np.ndarray): An array of node features of size (N x F),
where N is the number of nodes in the graph, F is the node feature size
A (sparse matrix): An adjacency matrix of the graph of size (N x N).
targets (np.ndarray, optional): An optional array of node targets of size (N x C),
where C is the target size (e.g., number of classes for one-hot class targets)
indices (np.ndarray, optional): Array of indices to the feature and adjacency matrix
of the targets. Required if targets is not None.
"""
use_sparse = True
def __init__(self, features, A, targets=None, indices=None):
if (targets is not None) and (len(indices) != len(targets)):
raise ValueError(
"When passed together targets and indices should be the same length."
)
# Ensure matrix is in COO format to extract indices
if sps.isspmatrix(A):
A = A.tocoo()
else:
raise ValueError("Adjacency matrix not in expected sparse format")
# Convert matrices to list of indices & values
self.A_indices = np.expand_dims(
np.hstack((A.row[:, None], A.col[:, None])), 0
).astype("int64")
self.A_values = np.expand_dims(A.data, 0)
# Reshape all inputs to have batch dimension of 1
self.target_indices = _full_batch_array_and_reshape(indices)
self.features = _full_batch_array_and_reshape(features)
self.inputs = [
self.features,
self.target_indices,
self.A_indices,
self.A_values,
]
self.targets = _full_batch_array_and_reshape(targets, propagate_none=True)
def __len__(self):
return 1
def __getitem__(self, index):
return self.inputs, self.targets
class RelationalFullBatchNodeSequence(Sequence):
"""
Keras-compatible data generator for for node inference models on relational graphs
that require full-batch training (e.g., RGCN).
Use this class with the Keras methods :meth:`keras.Model.fit`,
:meth:`keras.Model.evaluate`, and
:meth:`keras.Model.predict`,
This class uses either dense or sparse representations to send data to the models.
This class should be created using the `.flow(...)` method of
:class:`.RelationalFullBatchNodeGenerator`.
Args:
features (np.ndarray): An array of node features of size (N x F),
where N is the number of nodes in the graph, F is the node feature size
As (list of sparse matrices): A list of length R of adjacency matrices of the graph of size (N x N)
where R is the number of relationships in the graph.
targets (np.ndarray, optional): An optional array of node targets of size (N x C),
where C is the target size (e.g., number of classes for one-hot class targets)
indices (np.ndarray, optional): Array of indices to the feature and adjacency matrix
of the targets. Required if targets is not None.
"""
def __init__(self, features, As, use_sparse, targets=None, indices=None):
if (targets is not None) and (len(indices) != len(targets)):
raise ValueError(
"When passed together targets and indices should be the same length."
)
self.use_sparse = use_sparse
# Convert all adj matrices to dense and reshape to have batch dimension of 1
if self.use_sparse:
self.A_indices = [
np.expand_dims(
np.hstack((A.row[:, None], A.col[:, None])).astype(np.int64), 0
)
for A in As
]
self.A_values = [np.expand_dims(A.data, 0) for A in As]
self.As = self.A_indices + self.A_values
else:
self.As = [np.expand_dims(A.todense(), 0) for A in As]
# Make sure all inputs are numpy arrays, and have batch dimension of 1
self.target_indices = _full_batch_array_and_reshape(indices)
self.features = _full_batch_array_and_reshape(features)
self.inputs = [self.features, self.target_indices] + self.As
self.targets = _full_batch_array_and_reshape(targets, propagate_none=True)
def __len__(self):
return 1
def __getitem__(self, index):
return self.inputs, self.targets
| 20,050 | 36.548689 | 107 | py |
stellargraph | stellargraph-master/stellargraph/mapper/sampled_link_generators.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generators that create batches of data from a machine-learnign ready graph
for link prediction/link attribute inference problems using GraphSAGE, HinSAGE and Attri2Vec.
"""
__all__ = [
"GraphSAGELinkGenerator",
"HinSAGELinkGenerator",
"Attri2VecLinkGenerator",
"Node2VecLinkGenerator",
"DirectedGraphSAGELinkGenerator",
]
import random
import operator
import numpy as np
import itertools as it
import operator
import collections
import abc
import warnings
from functools import reduce
from tensorflow import keras
from ..core.graph import StellarGraph, GraphSchema
from ..data import (
SampledBreadthFirstWalk,
SampledHeterogeneousBreadthFirstWalk,
UniformRandomWalk,
UnsupervisedSampler,
DirectedBreadthFirstNeighbours,
)
from ..core.utils import is_real_iterable
from . import LinkSequence, OnDemandLinkSequence
from ..random import SeededPerBatch
from .base import Generator
class BatchedLinkGenerator(Generator):
def __init__(self, G, batch_size, schema=None, use_node_features=True):
if not isinstance(G, StellarGraph):
raise TypeError("Graph must be a StellarGraph or StellarDiGraph object.")
self.graph = G
self.batch_size = batch_size
# This is a link generator and requries a model with two root nodes per query
self.multiplicity = 2
# We need a schema for compatibility with HinSAGE
if schema is None:
self.schema = G.create_graph_schema()
elif isinstance(schema, GraphSchema):
self.schema = schema
else:
raise TypeError("Schema must be a GraphSchema object")
# Do we need real node types here?
self.head_node_types = None
# Sampler (if required)
self.sampler = None
# Check if the graph has features
if use_node_features:
G.check_graph_for_ml()
@abc.abstractmethod
def sample_features(self, head_links, batch_num):
pass
def num_batch_dims(self):
return 1
def flow(self, link_ids, targets=None, shuffle=False, seed=None):
"""
Creates a generator/sequence object for training or evaluation
with the supplied node ids and numeric targets.
The node IDs are the nodes to train or inference on: the embeddings
calculated for these nodes are passed to the downstream task. These
are a subset of the nodes in the graph.
The targets are an array of numeric targets corresponding to the
supplied node_ids to be used by the downstream task. They should
be given in the same order as the list of node IDs.
If they are not specified (for example, for use in prediction),
the targets will not be available to the downstream task.
Note that the shuffle argument should be True for training and
False for prediction.
Args:
link_ids: an iterable of tuples of node IDs as (source, target)
targets: a 2D array of numeric targets with shape
``(len(link_ids), target_size)``
shuffle (bool): If True the links will be shuffled at each
epoch, if False the links will be processed in order.
seed (int, optional): Random seed
Returns:
A NodeSequence object to use with with StellarGraph models
in Keras methods ``fit``, ``evaluate``,
and ``predict``
"""
if self.head_node_types is not None:
expected_src_type = self.head_node_types[0]
expected_dst_type = self.head_node_types[1]
# Pass sampler to on-demand link sequence generation
if isinstance(link_ids, UnsupervisedSampler):
return OnDemandLinkSequence(self.sample_features, self.batch_size, link_ids)
# Otherwise pass iterable (check?) to standard LinkSequence
elif isinstance(link_ids, collections.abc.Iterable):
# Check all IDs are actually in the graph and are of expected type
for link in link_ids:
if len(link) != 2:
raise KeyError("Expected link IDs to be a tuple of length 2")
src, dst = link
try:
node_type_src = self.graph.node_type(src)
except KeyError:
raise KeyError(
f"Node ID {src} supplied to generator not found in graph"
)
try:
node_type_dst = self.graph.node_type(dst)
except KeyError:
raise KeyError(
f"Node ID {dst} supplied to generator not found in graph"
)
if self.head_node_types is not None and (
node_type_src != expected_src_type
or node_type_dst != expected_dst_type
):
raise ValueError(
f"Node pair ({src}, {dst}) not of expected type ({expected_src_type}, {expected_dst_type})"
)
link_ids = [self.graph.node_ids_to_ilocs(ids) for ids in link_ids]
return LinkSequence(
self.sample_features,
self.batch_size,
link_ids,
targets=targets,
shuffle=shuffle,
seed=seed,
)
else:
raise TypeError(
"Argument to .flow not recognised. "
"Please pass a list of samples or a UnsupervisedSampler object."
)
def flow_from_dataframe(self, link_targets, shuffle=False):
"""
Creates a generator/sequence object for training or evaluation
with the supplied node ids and numeric targets.
Args:
link_targets: a Pandas DataFrame of links specified by
'source' and 'target' and an optional target label
specified by 'label'.
shuffle (bool): If True the links will be shuffled at each
epoch, if False the links will be processed in order.
Returns:
A NodeSequence object to use with StellarGraph models
in Keras methods ``fit``, ``evaluate``,
and ``predict``
"""
return self.flow(
link_targets["source", "target"].values,
link_targets["label"].values,
shuffle=shuffle,
)
class GraphSAGELinkGenerator(BatchedLinkGenerator):
"""
A data generator for link prediction with Homogeneous GraphSAGE models
At minimum, supply the StellarGraph, the batch size, and the number of
node samples for each layer of the GraphSAGE model.
The supplied graph should be a StellarGraph object with node features.
Use the :meth:`flow` method supplying the nodes and (optionally) targets,
or an UnsupervisedSampler instance that generates node samples on demand,
to get an object that can be used as a Keras data generator.
Example::
G_generator = GraphSageLinkGenerator(G, 50, [10,10])
train_data_gen = G_generator.flow(edge_ids)
.. seealso::
Model using this generator: :class:`.GraphSAGE`.
Some examples using this generator (see the model for more):
- `link prediction <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/graphsage-link-prediction.html>`__
- `unsupervised representation learning via random walks <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/graphsage-unsupervised-sampler-embeddings.html>`__
Related functionality:
- :class:`.UnsupervisedSampler` for unsupervised training using random walks
- :class:`.GraphSAGENodeGenerator` for node classification and related tasks
- :class:`.DirectedGraphSAGELinkGenerator` for directed graphs
- :class:`.HinSAGELinkGenerator` for heterogeneous graphs
Args:
G (StellarGraph): A machine-learning ready graph.
batch_size (int): Size of batch of links to return.
num_samples (list): List of number of neighbour node samples per GraphSAGE layer (hop) to take.
seed (int or str), optional: Random seed for the sampling methods.
weighted (bool, optional): If True, sample neighbours using the edge weights in the graph.
"""
def __init__(
self, G, batch_size, num_samples, seed=None, name=None, weighted=False
):
super().__init__(G, batch_size)
self.num_samples = num_samples
self.name = name
self.weighted = weighted
# Check that there is only a single node type for GraphSAGE
if len(self.schema.node_types) > 1:
warnings.warn(
"running homogeneous GraphSAGE on a graph with multiple node types",
RuntimeWarning,
stacklevel=2,
)
self.head_node_types = self.schema.node_types * 2
self._graph = G
self._samplers = SeededPerBatch(
lambda s: SampledBreadthFirstWalk(
self._graph, graph_schema=self.schema, seed=s
),
seed=seed,
)
def sample_features(self, head_links, batch_num):
"""
Sample neighbours recursively from the head nodes, collect the features of the
sampled nodes, and return these as a list of feature arrays for the GraphSAGE
algorithm.
Args:
head_links: An iterable of edges to perform sampling for.
batch_num (int): Batch number
Returns:
A list of the same length as ``num_samples`` of collected features from
the sampled nodes of shape:
``(len(head_nodes), num_sampled_at_layer, feature_size)``
where ``num_sampled_at_layer`` is the cumulative product of `num_samples`
for that layer.
"""
node_type = self.head_node_types[0]
head_size = len(head_links)
# The number of samples for each head node (not including itself)
num_full_samples = np.sum(np.cumprod(self.num_samples))
# Reshape node samples to sensible format
def get_levels(loc, lsize, samples_per_hop, walks):
end_loc = loc + lsize
walks_at_level = list(it.chain(*[w[loc:end_loc] for w in walks]))
if len(samples_per_hop) < 1:
return [walks_at_level]
return [walks_at_level] + get_levels(
end_loc, lsize * samples_per_hop[0], samples_per_hop[1:], walks
)
# Get sampled nodes for the subgraphs for the edges where each edge is a tuple
# of 2 nodes, so we are extracting 2 head nodes per edge
batch_feats = []
for hns in zip(*head_links):
node_samples = self._samplers[batch_num].run(
nodes=hns, n=1, n_size=self.num_samples, weighted=self.weighted
)
nodes_per_hop = get_levels(0, 1, self.num_samples, node_samples)
# Get features for the sampled nodes
batch_feats.append(
[
self.graph.node_features(layer_nodes, node_type, use_ilocs=True,)
for layer_nodes in nodes_per_hop
]
)
# Resize features to (batch_size, n_neighbours, feature_size)
# and re-pack features into a list where source, target feats alternate
# This matches the GraphSAGE link model with (node_src, node_dst) input sockets:
batch_feats = [
np.reshape(feats, (head_size, -1, feats.shape[1]))
for ab in zip(*batch_feats)
for feats in ab
]
return batch_feats
class HinSAGELinkGenerator(BatchedLinkGenerator):
"""
A data generator for link prediction with Heterogeneous HinSAGE models
At minimum, supply the StellarGraph, the batch size, and the number of
node samples for each layer of the GraphSAGE model.
The supplied graph should be a StellarGraph object with node features for all node types.
Use the :meth:`flow` method supplying the nodes and (optionally) targets
to get an object that can be used as a Keras data generator.
The generator should be given the ``(src,dst)`` node types using
* It's possible to do link prediction on a graph where that link type is completely removed from the graph
(e.g., "same_as" links in ER)
.. seealso::
Model using this generator: :class:`.HinSAGE`.
Example using this generator: `link prediction <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/hinsage-link-prediction.html>`__.
Related functionality:
- :class:`.UnsupervisedSampler` for unsupervised training using random walks
- :class:`.HinSAGENodeGenerator` for node classification and related tasks
- :class:`.GraphSAGELinkGenerator` for homogeneous graphs
- :class:`.DirectedGraphSAGELinkGenerator` for directed homogeneous graphs
Args:
g (StellarGraph): A machine-learning ready graph.
batch_size (int): Size of batch of links to return.
num_samples (list): List of number of neighbour node samples per GraphSAGE layer (hop) to take.
head_node_types (list, optional): List of the types (str) of the two head nodes forming the
node pair. This does not need to be specified if ``G`` has only one node type.
seed (int or str, optional): Random seed for the sampling methods.
Example::
G_generator = HinSAGELinkGenerator(G, 50, [10,10])
data_gen = G_generator.flow(edge_ids)
"""
def __init__(
self,
G,
batch_size,
num_samples,
head_node_types=None,
schema=None,
seed=None,
name=None,
):
super().__init__(G, batch_size, schema)
self.num_samples = num_samples
self.name = name
# This is a link generator and requires two nodes per query
if head_node_types is None:
# infer the head node types, if this is a homogeneous-node graph
node_type = G.unique_node_type(
"head_node_types: expected a pair of head node types because G has more than one node type, found node types: %(found)s"
)
head_node_types = [node_type, node_type]
self.head_node_types = head_node_types
if len(self.head_node_types) != 2:
raise ValueError(
"The head_node_types should be of length 2 for a link generator"
)
# Create sampling schema
self._sampling_schema = self.schema.sampling_layout(
self.head_node_types, self.num_samples
)
self._type_adjacency_list = self.schema.type_adjacency_list(
self.head_node_types, len(self.num_samples)
)
# The sampler used to generate random samples of neighbours
self.sampler = SampledHeterogeneousBreadthFirstWalk(
G, graph_schema=self.schema, seed=seed
)
def _get_features(self, node_samples, head_size, use_ilocs=False):
"""
Collect features from sampled nodes.
Args:
node_samples: A list of lists of node IDs
head_size: The number of head nodes (typically the batch size).
Returns:
A list of numpy arrays that store the features for each head
node.
"""
# Note the if there are no samples for a node a zero array is returned.
# Resize features to (batch_size, n_neighbours, feature_size)
# for each node type (note that we can have different feature size for each node type)
batch_feats = [
self.graph.node_features(layer_nodes, nt, use_ilocs=use_ilocs)
for nt, layer_nodes in node_samples
]
# Resize features to (batch_size, n_neighbours, feature_size)
batch_feats = [np.reshape(a, (head_size, -1, a.shape[1])) for a in batch_feats]
return batch_feats
def sample_features(self, head_links, batch_num):
"""
Sample neighbours recursively from the head nodes, collect the features of the
sampled nodes, and return these as a list of feature arrays for the GraphSAGE
algorithm.
Args:
head_links (list): An iterable of edges to perform sampling for.
batch_num (int): Batch number
Returns:
A list of the same length as `num_samples` of collected features from
the sampled nodes of shape: ``(len(head_nodes), num_sampled_at_layer, feature_size)``
where ``num_sampled_at_layer`` is the cumulative product of `num_samples`
for that layer.
"""
nodes_by_type = []
for ii in range(2):
# Extract head nodes from edges: each edge is a tuple of 2 nodes, so we are extracting 2 head nodes per edge
head_nodes = [e[ii] for e in head_links]
# Get sampled nodes for the subgraphs starting from the (src, dst) head nodes
# nodes_samples is list of two lists: [[samples for src], [samples for dst]]
node_samples = self.sampler.run(
nodes=head_nodes, n=1, n_size=self.num_samples
)
# Reshape node samples to the required format for the HinSAGE model
# This requires grouping the sampled nodes by edge type and in order
nodes_by_type.append(
[
(
nt,
reduce(
operator.concat,
(samples[ks] for samples in node_samples for ks in indices),
[],
),
)
for nt, indices in self._sampling_schema[ii]
]
)
# Interlace the two lists, nodes_by_type[0] (for src head nodes) and nodes_by_type[1] (for dst head nodes)
nodes_by_type = [
tuple((ab[0][0], reduce(operator.concat, (ab[0][1], ab[1][1]))))
for ab in zip(nodes_by_type[0], nodes_by_type[1])
]
batch_feats = self._get_features(nodes_by_type, len(head_links), use_ilocs=True)
return batch_feats
class Attri2VecLinkGenerator(BatchedLinkGenerator):
"""
A data generator for context node prediction with the attri2vec model.
At minimum, supply the StellarGraph and the batch size.
The supplied graph should be a StellarGraph object with node features.
Use the :meth:`flow` method supplying the nodes and targets,
or an UnsupervisedSampler instance that generates node samples on demand,
to get an object that can be used as a Keras data generator.
Example::
G_generator = Attri2VecLinkGenerator(G, 50)
train_data_gen = G_generator.flow(edge_ids, edge_labels)
.. seealso::
Model using this generator: :class:`.Attri2Vec`.
An example using this generator (see the model for more): `link prediction <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/attri2vec-link-prediction.html>`__.
Related functionality:
- :class:`.UnsupervisedSampler` for unsupervised training using random walks
- :class:`.Attri2VecNodeGenerator` for node classification and related tasks
Args:
G (StellarGraph): A machine-learning ready graph.
batch_size (int): Size of batch of links to return.
name, optional: Name of generator.
"""
def __init__(self, G, batch_size, name=None):
super().__init__(G, batch_size)
self.name = name
def sample_features(self, head_links, batch_num):
"""
Sample content features of the target nodes and the ids of the context nodes
and return these as a list of feature arrays for the attri2vec algorithm.
Args:
head_links: An iterable of edges to perform sampling for.
batch_num (int): Batch number
Returns:
A list of feature arrays, with each element being the feature of a
target node and the id of the corresponding context node.
"""
target_ids = [head_link[0] for head_link in head_links]
context_ids = [head_link[1] for head_link in head_links]
target_feats = self.graph.node_features(target_ids, use_ilocs=True)
context_feats = np.array(context_ids)
batch_feats = [target_feats, np.array(context_feats)]
return batch_feats
class Node2VecLinkGenerator(BatchedLinkGenerator):
"""
A data generator for context node prediction with Node2Vec models.
At minimum, supply the StellarGraph and the batch size.
The supplied graph should be a StellarGraph object that is ready for
machine learning. Currently the model does not require node features for
nodes in the graph.
Use the :meth:`flow` method supplying the nodes and targets,
or an UnsupervisedSampler instance that generates node samples on demand,
to get an object that can be used as a Keras data generator.
Example::
G_generator = Node2VecLinkGenerator(G, 50)
data_gen = G_generator.flow(edge_ids, edge_labels)
.. seealso::
Model using this generator: :class:`.Node2Vec`.
An example using this generator (see the model for more): `unsupervised representation learning <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/keras-node2vec-embeddings.html>`__.
Related functionality: :class:`.Node2VecNodeGenerator` for node classification and related tasks.
Args:
G (StellarGraph): A machine-learning ready graph.
batch_size (int): Size of batch of links to return.
name (str or None): Name of the generator (optional).
"""
def __init__(self, G, batch_size, name=None):
super().__init__(G, batch_size, use_node_features=False)
self.name = name
def sample_features(self, head_links, batch_num):
"""
Sample the ids of the target and context nodes.
and return these as a list of feature arrays for the Node2Vec algorithm.
Args:
head_links: An iterable of edges to perform sampling for.
Returns:
A list of feature arrays, with each element being the ids of
the sampled target and context node.
"""
return [np.array(ids) for ids in zip(*head_links)]
class DirectedGraphSAGELinkGenerator(BatchedLinkGenerator):
"""
A data generator for link prediction with directed Homogeneous GraphSAGE models
At minimum, supply the StellarDiGraph, the batch size, and the number of
node samples (separately for in-nodes and out-nodes) for each layer of the GraphSAGE model.
The supplied graph should be a StellarDiGraph object with node features.
Use the :meth:`flow` method supplying the nodes and (optionally) targets,
or an UnsupervisedSampler instance that generates node samples on demand,
to get an object that can be used as a Keras data generator.
Example::
G_generator = DirectedGraphSageLinkGenerator(G, 50, [10,10], [10,10])
train_data_gen = G_generator.flow(edge_ids)
.. seealso::
Model using this generator: :class:`.GraphSAGE`.
Related functionality:
- :class:`.UnsupervisedSampler` for unsupervised training using random walks
- :class:`.DirectedGraphSAGENodeGenerator` for node classification and related tasks
- :class:`.GraphSAGELinkGenerator` for undirected graphs
- :class:`.HinSAGELinkGenerator` for heterogeneous graphs
Args:
G (StellarGraph): A machine-learning ready graph.
batch_size (int): Size of batch of links to return.
in_samples (list): The number of in-node samples per layer (hop) to take.
out_samples (list): The number of out-node samples per layer (hop) to take.
seed (int or str), optional: Random seed for the sampling methods.
name, optional: Name of generator.
weighted (bool, optional): If True, sample neighbours using the edge weights in the graph.
"""
def __init__(
self,
G,
batch_size,
in_samples,
out_samples,
seed=None,
name=None,
weighted=False,
):
super().__init__(G, batch_size)
self.in_samples = in_samples
self.out_samples = out_samples
self._name = name
self.weighted = weighted
# Check that there is only a single node type for GraphSAGE
if len(self.schema.node_types) > 1:
warnings.warn(
"running homogeneous GraphSAGE on a graph with multiple node types",
RuntimeWarning,
stacklevel=2,
)
self.head_node_types = self.schema.node_types * 2
self._graph = G
self._samplers = SeededPerBatch(
lambda s: DirectedBreadthFirstNeighbours(
self._graph, graph_schema=self.schema, seed=s
),
seed=seed,
)
def sample_features(self, head_links, batch_num):
"""
Sample neighbours recursively from the head links, collect the features of the
sampled nodes, and return these as a list of feature arrays for the GraphSAGE
algorithm.
Args:
head_links: An iterable of head links to perform sampling on.
Returns:
A list of feature tensors from the sampled nodes at each layer, each of shape:
``(len(head_nodes), num_sampled_at_layer, feature_size)``
where ``num_sampled_at_layer`` is the total number (cumulative product)
of nodes sampled at the given number of hops from each head node,
given the sequence of in/out directions.
"""
batch_feats = []
for hns in zip(*head_links):
node_samples = self._samplers[batch_num].run(
nodes=hns,
n=1,
in_size=self.in_samples,
out_size=self.out_samples,
weighted=self.weighted,
)
# Reshape node samples to sensible format
# Each 'slot' represents the list of nodes sampled from some neighbourhood, and will have a corresponding
# NN input layer. Every hop potentially generates both in-nodes and out-nodes, held separately,
# and thus the slot (or directed hop sequence) structure forms a binary tree.
node_type = self.head_node_types[0]
max_hops = len(self.in_samples)
max_slots = 2 ** (max_hops + 1) - 1
features = [None] * max_slots # flattened binary tree
for slot in range(max_slots):
nodes_in_slot = [
element for sample in node_samples for element in sample[slot]
]
features_for_slot = self.graph.node_features(
nodes_in_slot, node_type, use_ilocs=True,
)
features[slot] = np.reshape(
features_for_slot, (len(hns), -1, features_for_slot.shape[1])
)
# Get features for the sampled nodes
batch_feats.append(features)
# Resize features to (batch_size, n_neighbours, feature_size)
# and re-pack features into a list where source, target feats alternate
# This matches the GraphSAGE link model with (node_src, node_dst) input sockets:
batch_feats = [feats for ab in zip(*batch_feats) for feats in ab]
return batch_feats
| 28,318 | 37.012081 | 202 | py |
stellargraph | stellargraph-master/stellargraph/interpretability/saliency_maps/integrated_gradients.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The vanilla gradients may not work well for the graph setting. The main reason is that when you compute the vanilla gradients,
you only get the direction of changing at the current state of the graph (i.e., the adjacency matrix and feature matrix). However,
even though the feature values and entries in the adjacency matrix are not continous values, the model (e.g., GCN or GAT) learns
a continous function which may not be linear when a feature or edge value changes discretely. Let's take ReLU(x) as an example, when x
changes from 0 to 1, the output of the function changes from 0 to 1 as well. However, when you compute the gradient of the function
at x = 0, you will get grad(ReLU(x = 0)) = 0 which is obviously not what we want.
Integrated gradients approximates Shapley values by integrating partial gradients w.r.t input features from reference input to the
actual input. Therefore, it could solve the problem we described above and give much better accuracy. It was initially proposed in the paper
"Axiomatic attribution for deep neuron networks" published in ICML'17.
"""
import numpy as np
from tensorflow.keras import backend as K
from scipy.sparse import csr_matrix
import tensorflow as tf
from stellargraph.mapper import SparseFullBatchSequence, FullBatchSequence
class IntegratedGradients:
"""
A SaliencyMask class that implements the integrated gradients method.
"""
def __init__(self, model, generator):
"""
Args:
model (Keras model object): The differentiable graph model object.
For a dense model, the model.input should contain two tensors:
- features: The placeholder of the feature matrix.
- adj: The placeholder of the adjacency matrix.
For a sparse model, the model.input should contain three tensors:
- features: The placeholder of the feature matrix.
- adj_index: The placeholder of the adjacency matrix.
- adj_values: The placeholder of the adjacency matrix.
The model.output (Keras tensor) is the tensor of model prediction output.
This is typically the logit or softmax output.
"""
# Set sparse flag from the generator
self._is_sparse = generator.use_sparse
if self._is_sparse:
if not isinstance(generator, SparseFullBatchSequence):
raise TypeError(
"The generator supplied has to be an object of SparseFullBatchSequence for sparse adjacency matrix."
)
if len(model.input) != 4:
raise RuntimeError(
"Keras model for sparse adjacency is expected to have four inputs"
)
self._adj = generator.A_values
self._adj_inds = generator.A_indices
else:
if not isinstance(generator, FullBatchSequence):
raise TypeError(
"The generator supplied has to be an object of FullBatchSequence for dense adjacency matrix."
)
if len(model.input) != 3:
raise RuntimeError(
"Keras model for dense adjacency is expected to have three inputs"
)
self._adj = generator.A_dense
# Extract features from generator
self._features = generator.features
self._model = model
def get_integrated_node_masks(
self, node_idx, class_of_interest, features_baseline=None, steps=20,
):
"""
Args:
node_idx: the index of the node to calculate gradients for.
class_of_interest: the index for the class probability that the gradients will be calculated for.
features_baseline: For integrated gradients, X_baseline is the reference X to start with. Generally we should set
X_baseline to a all-zero matrix with the size of the original feature matrix.
steps (int): The number of values we need to interpolate. Generally steps = 20 should give good enough results.
Returns
(Numpy array): Integrated gradients for the node features.
"""
if features_baseline is None:
features_baseline = np.zeros(self._features.shape)
features_diff = self._features - features_baseline
total_gradients = np.zeros(self._features.shape)
for alpha in np.linspace(0, 1, steps):
features_step = features_baseline + alpha * features_diff
if self._is_sparse:
model_input = [
features_step,
np.array([[node_idx]]),
self._adj_inds,
self._adj,
]
else:
model_input = [features_step, np.array([[node_idx]]), self._adj]
model_input = [tf.convert_to_tensor(x) for x in model_input]
grads = self._compute_gradients(
model_input, class_of_interest, wrt=model_input[0]
)
total_gradients += grads
return np.squeeze(total_gradients * features_diff, 0)
def get_integrated_link_masks(
self,
node_idx,
class_of_interest,
non_exist_edge=False,
adj_baseline=None,
steps=20,
):
"""
Args:
node_idx: the index of the node to calculate gradients for.
class_of_interest: the index for the class probability that the gradients will be calculated for.
non_exist_edge (bool): Setting to True allows the function to get the importance for non-exist edges.
This is useful when we want to understand adding which edges could change the current predictions.
But the results for existing edges are not reliable. Simiarly, setting to False ((A_baseline = all zero matrix))
could only accurately measure the importance of existing edges.
adj_baseline: For integrated gradients, adj_baseline is the reference adjacency matrix to start with. Generally
we should set A_baseline to an all-zero matrix or all-one matrix with the size of the original
A_baseline matrix.
steps (int): The number of values we need to interpolate. Generally steps = 20 should give good enough results.
Returns
(Numpy array): Integrated gradients for the links.
"""
if adj_baseline is None:
if non_exist_edge:
adj_baseline = np.ones(self._adj.shape)
else:
adj_baseline = np.zeros(self._adj.shape)
adj_diff = self._adj - adj_baseline
total_gradients = np.zeros_like(self._adj)
for alpha in np.linspace(1.0 / steps, 1.0, steps):
adj_step = adj_baseline + alpha * adj_diff
if self._is_sparse:
model_input = [
self._features,
np.array([[node_idx]]),
self._adj_inds,
adj_step,
]
else:
model_input = [
self._features,
np.array([[node_idx]]),
adj_step,
]
model_input = [tf.convert_to_tensor(x) for x in model_input]
grads = self._compute_gradients(
model_input, class_of_interest, wrt=model_input[-1]
)
total_gradients += grads.numpy()
if self._is_sparse:
total_gradients = csr_matrix(
(total_gradients[0], (self._adj_inds[0, :, 0], self._adj_inds[0, :, 1]))
)
adj_diff = csr_matrix(
(adj_diff[0], (self._adj_inds[0, :, 0], self._adj_inds[0, :, 1]))
)
total_gradients = total_gradients.multiply(adj_diff) / steps
else:
total_gradients = np.squeeze(
np.multiply(total_gradients, adj_diff) / steps, 0
)
return total_gradients
def get_node_importance(
self, node_idx, class_of_interest, steps=20,
):
"""
The importance of the node is defined as the sum of all the feature importance of the node.
Args:
node_idx: the index of the node to calculate gradients for.
class_of_interest: the index for the class probability that the gradients will be calculated for.
steps (int): The number of values we need to interpolate. Generally steps = 20 should give good enough results.
return (float): Importance score for the node.
"""
gradients = self.get_integrated_node_masks(
node_idx, class_of_interest, steps=steps,
)
return np.sum(gradients, axis=-1)
def _compute_gradients(self, model_input, class_of_interest, wrt):
class_of_interest = tf.convert_to_tensor(class_of_interest)
with tf.GradientTape() as tape:
tape.watch(wrt)
output = self._model(model_input)
cost_value = K.gather(output[0, 0], class_of_interest)
gradients = tape.gradient(cost_value, wrt)
return gradients
| 9,865 | 40.628692 | 140 | py |
stellargraph | stellargraph-master/stellargraph/interpretability/saliency_maps/saliency_gat.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from tensorflow.keras import backend as K
import scipy.sparse as sp
from stellargraph.mapper import FullBatchSequence
import tensorflow as tf
class GradientSaliencyGAT:
"""
Class to compute the saliency maps based on the vanilla gradient w.r.t the adjacency and the feature matrix.
Args:
model (Keras model object): The differentiable graph model object.
model.input should contain two tensors:
- features (Numpy array): The placeholder of the feature matrix.
- adj (Numpy array): The placeholder of the adjacency matrix.
model.output (Keras tensor): The tensor of model prediction output.
This is typically the logit or softmax output.
"""
def __init__(self, model, generator):
"""
Args:
model (Keras model object): The Keras GAT model.
generator (FullBatchSequence object): The generator from which we extract the feature and adjacency matirx.
"""
# The placeholders for features and adjacency matrix (model input):
if not isinstance(generator, FullBatchSequence):
raise TypeError(
"The generator supplied has to be an object of FullBatchSequence."
)
self.model = model
# Collect variables for IG
self.deltas = []
self.non_exist_edges = []
for var in model.non_trainable_weights:
if "ig_delta" in var.name:
self.deltas.append(var)
if "ig_non_exist_edge" in var.name:
self.non_exist_edges.append(var)
features_t, output_indices_t, adj_t = model.input
# Placeholder for class prediction (model output):
output = self.model.output
self.A = generator.A_dense
self.X = generator.features
self.is_sparse = K.is_sparse(adj_t)
def compute_node_gradients(self, node_mask_tensors):
for i, x in enumerate(node_mask_tensors):
if not isinstance(x, tf.Tensor):
node_mask_tensors[i] = tf.convert_to_tensor(x)
X_val, out_indices, A_val, _, class_of_interest = node_mask_tensors
model_input = [X_val, out_indices, A_val]
with tf.GradientTape() as tape:
tape.watch(X_val)
output = self.model(model_input)
cost_value = K.gather(output[0, 0], class_of_interest)
node_gradients = tape.gradient(cost_value, X_val)
return node_gradients
def compute_link_gradients(self, link_mask_tensors):
for i, x in enumerate(link_mask_tensors):
if not isinstance(x, tf.Tensor):
link_mask_tensors[i] = tf.convert_to_tensor(x)
X_val, out_indices, A_val, _, class_of_interest = link_mask_tensors
model_input = [X_val, out_indices, A_val]
with tf.GradientTape() as tape:
tape.watch(A_val)
output = self.model(model_input)
if self.is_sparse:
cost_value = (
K.gather(K.gather(output, out_indices), class_of_interest),
)
else:
cost_value = K.gather(output[0, 0], class_of_interest)
link_gradients = tape.gradient(cost_value, A_val)
return link_gradients
def set_ig_values(self, delta_value, edge_value):
"""
Set values of the integrated gradient parameters in all layers of the model.
Args:
delta_value: Value of the `delta` parameter
edge_value: Value of the `non_exist_edges` parameter
"""
for delta_var in self.deltas:
K.set_value(delta_var, delta_value)
for edge_var in self.non_exist_edges:
K.set_value(edge_var, edge_value)
def get_node_masks(self, node_id, class_of_interest, X_val=None, A_val=None):
"""
Args:
This function computes the saliency maps (gradients) which measure the importance of each feature to the prediction score of 'class_of_interest'
for node 'node_id'.
node_id (int): The node ID in the StellarGraph object.
class_of_interest (int): The class of interest for which the saliency maps are computed.
X_val (Numpy array): The feature matrix, we do not directly get it from generator to support the integrated gradients computation.
A_val (Numpy array): The adjacency matrix, we do not directly get it from generator to support the integrated gradients computation.
Returns:
gradients (Numpy array): Returns a vanilla gradient mask for the nodes.
"""
out_indices = np.array([[node_id]])
if X_val is None:
X_val = self.X
if A_val is None:
A_val = self.A
# Execute the function to compute the gradient
self.set_ig_values(1.0, 0.0)
if self.is_sparse and not sp.issparse(A_val):
A_val = sp.lil_matrix(A_val)
gradients = self.compute_node_gradients(
[X_val, out_indices, A_val, 0, class_of_interest]
)
return gradients[0]
def get_link_masks(
self, alpha, node_id, class_of_interest, non_exist_edge, X_val=None, A_val=None
):
"""
This function computes the saliency maps (gradients) which measure the importance of each edge to the prediction score of 'class_of_interest'
for node 'node_id'.
Args:
alpha (float): The path position parameter to support integrated gradient computation.
node_id (int): The node ID in the StellarGraph object.
class_of_interest (int): The class of interest for which the saliency maps are computed.
non_exist_edge (bool): Setting to True allows the function to get the importance for non-exist edges. This is useful when we want to understand
adding which edges could change the current predictions. But the results for existing edges are not reliable. Simiarly, setting to False ((A_baseline = all zero matrix))
could only accurately measure the importance of existing edges.
X_val (Numpy array): The feature matrix, we do not directly get it from generator to support the integrated gradients computation.
A_val (Numpy array): The adjacency matrix, we do not directly get it from generator to support the integrated gradients computation.
Returns:
gradients (Numpy array): Returns a vanilla gradient mask for the nodes.
"""
out_indices = np.array([[node_id]])
if X_val is None:
X_val = self.X
if A_val is None:
A_val = self.A
# Execute the function to compute the gradient
self.set_ig_values(alpha, non_exist_edge)
if self.is_sparse and not sp.issparse(A_val):
A_val = sp.lil_matrix(A_val)
gradients = self.compute_link_gradients(
[X_val, out_indices, A_val, 0, class_of_interest]
)
return gradients[0]
def get_node_importance(self, node_id, class_of_interest, X_val=None, A_val=None):
"""
For nodes, the saliency mask we get gives us the importance of each features. For visualization purpose, we may
want to see a summary of the importance for the node. The importance of each node can be defined as the sum of
all the partial gradients w.r.t its features.
Args:
node_id (int): The node ID in the StellarGraph object.
class_of_interest (int): The class of interest for which the saliency maps are computed.
non_exist_edge (bool): Setting to True allows the function to get the importance for non-exist edges. This is useful when we want to understand
adding which edges could change the current predictions. But the results for existing edges are not reliable. Simiarly, setting to False ((A_baseline = all zero matrix))
could only accurately measure the importance of existing edges.
X_val (Numpy array): The feature matrix, we do not directly get it from generator to support the integrated gradients computation.
A_val (Numpy array): The adjacency matrix, we do not directly get it from generator to support the integrated gradients computation. Returns:
"""
if X_val is None:
X_val = self.X
if A_val is None:
A_val = self.A
gradients = self.get_node_masks(X_val, A_val, node_id, class_of_interest)[0]
return np.sum(gradients, axis=1)
| 9,216 | 43.960976 | 185 | py |
stellargraph | stellargraph-master/stellargraph/interpretability/saliency_maps/integrated_gradients_gat.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The vanilla gradients may not work well for the graph setting. The main reason is that when you compute the vanilla gradients,
you only get the direction of changing at the current state of the graph (i.e., the adjacency matrix and feature matrix). However,
even though the feature values and entries in the adjacency matrix are not continous values, the model (e.g., GCN or GAT) learns
a continous function which may not be linear when a feature or edge value changes discretely. Let's take ReLU(x) as an example, when x
changes from 0 to 1, the output of the function changes from 0 to 1 as well. However, when you compute the gradient of the function
at x = 0, you will get grad(ReLU(x = 0)) = 0 which is obviously not what we want.
Integrated gradients approximates Shapley values by integrating partial gradients w.r.t input features from reference input to the
actual input. Therefore, it could solve the problem we described above and give much better accuracy. It was initially proposed in the paper
"Axiomatic attribution for deep neuron networks" published in ICML'17.
"""
import numpy as np
from .saliency_gat import GradientSaliencyGAT
import scipy.sparse as sp
from tensorflow.keras import backend as K
class IntegratedGradientsGAT(GradientSaliencyGAT):
"""
A SaliencyMask class that implements the integrated gradients method.
"""
def __init__(self, model, generator, node_list):
self.node_list = list(node_list)
super().__init__(model, generator)
def get_integrated_node_masks(
self,
node_id,
class_of_interest,
X_baseline=None,
steps=20,
non_exist_feature=False,
):
"""
This function computes the integrated gradients which measure the importance of each feature to the prediction score of 'class_of_interest'
for node 'node_id'.
Args:
node_id (int): The node ID in the StellarGraph object.
class_of_interest (int): The class of interest for which the saliency maps are computed.
X_baseline: For integrated gradients, X_baseline is the reference X to start with. Generally we should set X_baseline to a all-zero
matrix with the size of the original feature matrix for existing features.
steps (int): The number of values we need to interpolate. Generally steps = 20 should give good enough results.
non_exist_feature (bool): Setting it to True allows to compute the importance of features that are 0.
return (Numpy array): Integrated gradients for the node features.
"""
node_idx = self.node_list.index(node_id)
X_val = self.X
if X_baseline is None:
if not non_exist_feature:
X_baseline = np.zeros(X_val.shape)
else:
X_baseline = X_val
X_val = np.ones_like(X_val)
X_diff = X_val - X_baseline
total_gradients = np.zeros(X_val.shape)
for alpha in np.linspace(1.0 / steps, 1, steps):
X_step = X_baseline + alpha * X_diff
total_gradients += super().get_node_masks(
node_idx, class_of_interest, X_val=X_step
)
return np.squeeze(total_gradients * X_diff, 0)
def get_link_importance(
self, node_id, class_of_interest, steps=20, non_exist_edge=False
):
"""
This function computes the integrated gradients which measure the importance of each edge to the prediction score of 'class_of_interest'
for node 'node_id'.
Args:
node_id (int): The node ID in the StellarGraph object.
class_of_interest (int): The class of interest for which the saliency maps are computed.
steps (int): The number of values we need to interpolate. Generally steps = 20 should give good enough results.\
non_exist_edge (bool): Setting to True allows the function to get the importance for non-exist edges. This is useful when we want to understand
adding which edges could change the current predictions. But the results for existing edges are not reliable. Simiarly, setting to False ((A_baseline = all zero matrix))
could only accurately measure the importance of existing edges.
return (Numpy array): shape the same with A_val. Integrated gradients for the links.
"""
node_idx = self.node_list.index(node_id)
A_val = self.A
total_gradients = np.zeros(A_val.shape)
A_diff = (
A_val
if not non_exist_edge
else (np.ones_like(A_val) - np.eye(A_val.shape[0]) - A_val)
)
for alpha in np.linspace(1.0 / steps, 1.0, steps):
if self.is_sparse:
A_val = sp.lil_matrix(A_val)
tmp = super().get_link_masks(
alpha, node_idx, class_of_interest, int(non_exist_edge)
)
if self.is_sparse:
tmp = sp.csr_matrix(
(tmp, A_val.indices, A_val.indptr), shape=A_val.shape
).toarray()
total_gradients += tmp
return np.squeeze(np.multiply(total_gradients, A_diff) / steps, 0)
def get_node_importance(self, node_id, class_of_interest, steps=20):
"""
The importance of the node is defined as the sum of all the feature importance of the node.
Args:
Refer to the parameters in get_integrated_node_masks.
return (float): Importance score for the node.
"""
gradients = self.get_integrated_node_masks(
node_id, class_of_interest, steps=steps
)
return np.sum(gradients, axis=1)
| 6,306 | 45.036496 | 181 | py |
stellargraph | stellargraph-master/stellargraph/utils/history.py | # -*- coding: utf-8 -*-
#
# Copyright 2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as plt
__all__ = ["plot_history"]
def plot_history(history, individual_figsize=(7, 4), return_figure=False, **kwargs):
"""
Plot the training history of one or more models.
This creates a column of plots, with one plot for each metric recorded during training, with the
plot showing the metric vs. epoch. If multiple models have been trained (that is, a list of
histories is passed in), each metric plot includes multiple train and validation series.
Validation data is optional (it is detected by metrics with names starting with ``val_``).
Args:
history: the training history, as returned by :meth:`tf.keras.Model.fit`
individual_figsize (tuple of numbers): the size of the plot for each metric
return_figure (bool): if True, then the figure object with the plots is returned, None otherwise.
kwargs: additional arguments to pass to :meth:`matplotlib.pyplot.subplots`
Returns:
:class:`matplotlib.figure.Figure`: The figure object with the plots if ``return_figure=True``, None otherwise
"""
# explicit colours are needed if there's multiple train or multiple validation series, because
# each train series should have the same color. This uses the global matplotlib defaults that
# would be used for a single train and validation series.
colors = plt.rcParams["axes.prop_cycle"].by_key()["color"]
color_train = colors[0]
color_validation = colors[1]
if not isinstance(history, list):
history = [history]
def remove_prefix(text, prefix):
return text[text.startswith(prefix) and len(prefix) :]
metrics = sorted({remove_prefix(m, "val_") for m in history[0].history.keys()})
width, height = individual_figsize
overall_figsize = (width, len(metrics) * height)
# plot each metric in a column, so that epochs are aligned (squeeze=False, so we don't have to
# special case len(metrics) == 1 in the zip)
fig, all_axes = plt.subplots(
len(metrics), 1, squeeze=False, sharex="col", figsize=overall_figsize, **kwargs
)
has_validation = False
for ax, m in zip(all_axes[:, 0], metrics):
for h in history:
# summarize history for metric m
ax.plot(h.history[m], c=color_train)
try:
val = h.history["val_" + m]
except KeyError:
# no validation data for this metric
pass
else:
ax.plot(val, c=color_validation)
has_validation = True
ax.set_ylabel(m, fontsize="x-large")
# don't be redundant: only include legend on the top plot
labels = ["train"]
if has_validation:
labels.append("validation")
all_axes[0, 0].legend(labels, loc="best", fontsize="x-large")
# ... and only label "epoch" on the bottom
all_axes[-1, 0].set_xlabel("epoch", fontsize="x-large")
# minimise whitespace
fig.tight_layout()
if return_figure:
return fig
| 3,633 | 35.707071 | 117 | py |
stellargraph | stellargraph-master/stellargraph/data/explorer.py | # -*- coding: utf-8 -*-
#
# Copyright 2017-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
"UniformRandomWalk",
"BiasedRandomWalk",
"UniformRandomMetaPathWalk",
"SampledBreadthFirstWalk",
"SampledHeterogeneousBreadthFirstWalk",
"TemporalRandomWalk",
"DirectedBreadthFirstNeighbours",
]
import numpy as np
import warnings
from collections import defaultdict, deque
from scipy import stats
from scipy.special import softmax
from ..core.schema import GraphSchema
from ..core.graph import StellarGraph
from ..core.utils import is_real_iterable
from ..core.validation import require_integer_in_range, comma_sep
from ..random import random_state
from abc import ABC, abstractmethod
def _default_if_none(value, default, name, ensure_not_none=True):
value = value if value is not None else default
if ensure_not_none and value is None:
raise ValueError(
f"{name}: expected a value to be specified in either `__init__` or `run`, found None in both"
)
return value
class RandomWalk(ABC):
"""
Abstract base class for Random Walk classes. A Random Walk class must implement a ``run`` method
which takes an iterable of node IDs and returns a list of walks. Each walk is a list of node IDs
that contains the starting node as its first element.
"""
def __init__(self, graph, seed=None):
if not isinstance(graph, StellarGraph):
raise TypeError("Graph must be a StellarGraph or StellarDiGraph.")
self.graph = graph
self._random_state, self._np_random_state = random_state(seed)
def _get_random_state(self, seed):
"""
Args:
seed: The optional seed value for a given run.
Returns:
The random state as determined by the seed.
"""
if seed is None:
# Restore the random state
return self._random_state, self._np_random_state
# seed the random number generator
require_integer_in_range(seed, "seed", min_val=0)
return random_state(seed)
@staticmethod
def _validate_walk_params(nodes, n, length):
if not is_real_iterable(nodes):
raise ValueError(f"nodes: expected an iterable, found: {nodes}")
if len(nodes) == 0:
warnings.warn(
"No root node IDs given. An empty list will be returned as a result.",
RuntimeWarning,
stacklevel=3,
)
require_integer_in_range(n, "n", min_val=1)
require_integer_in_range(length, "length", min_val=1)
@abstractmethod
def run(self, nodes, **kwargs):
pass
class GraphWalk(object):
"""
Base class for exploring graphs.
"""
def __init__(self, graph, graph_schema=None, seed=None):
self.graph = graph
# Initialize the random state
self._check_seed(seed)
self._random_state, self._np_random_state = random_state(seed)
# We require a StellarGraph for this
if not isinstance(graph, StellarGraph):
raise TypeError("Graph must be a StellarGraph or StellarDiGraph.")
if not graph_schema:
self.graph_schema = self.graph.create_graph_schema()
else:
self.graph_schema = graph_schema
if type(self.graph_schema) is not GraphSchema:
self._raise_error(
"The parameter graph_schema should be either None or of type GraphSchema."
)
def get_adjacency_types(self):
# Allow additional info for heterogeneous graphs.
adj = getattr(self, "adj_types", None)
if not adj:
# Create a dict of adjacency lists per edge type, for faster neighbour sampling from graph in SampledHeteroBFS:
self.adj_types = adj = self.graph._adjacency_types(
self.graph_schema, use_ilocs=True
)
return adj
def _check_seed(self, seed):
if seed is not None:
if type(seed) != int:
self._raise_error(
"The random number generator seed value, seed, should be integer type or None."
)
if seed < 0:
self._raise_error(
"The random number generator seed value, seed, should be non-negative integer or None."
)
def _get_random_state(self, seed):
"""
Args:
seed: The optional seed value for a given run.
Returns:
The random state as determined by the seed.
"""
if seed is None:
# Use the class's random state
return self._random_state, self._np_random_state
# seed the random number generators
return random_state(seed)
def neighbors(self, node):
return self.graph.neighbor_arrays(node, use_ilocs=True)
def run(self, *args, **kwargs):
"""
To be overridden by subclasses. It is the main entry point for performing random walks on the given
graph.
It should return the sequences of nodes in each random walk.
"""
raise NotImplementedError
def _raise_error(self, msg):
raise ValueError("({}) {}".format(type(self).__name__, msg))
def _check_common_parameters(self, nodes, n, length, seed):
"""
Checks that the parameter values are valid or raises ValueError exceptions with a message indicating the
parameter (the first one encountered in the checks) with invalid value.
Args:
nodes: <list> A list of root node ids from which to commence the random walks.
n: <int> Number of walks per node id.
length: <int> Maximum length of each walk.
seed: <int> Random number generator seed.
"""
self._check_nodes(nodes)
self._check_repetitions(n)
self._check_length(length)
self._check_seed(seed)
def _check_nodes(self, nodes):
if nodes is None:
self._raise_error("A list of root node IDs was not provided.")
if not is_real_iterable(nodes):
self._raise_error("Nodes parameter should be an iterable of node IDs.")
if (
len(nodes) == 0
): # this is not an error but maybe a warning should be printed to inform the caller
warnings.warn(
"No root node IDs given. An empty list will be returned as a result.",
RuntimeWarning,
stacklevel=3,
)
def _check_repetitions(self, n):
if type(n) != int:
self._raise_error(
"The number of walks per root node, n, should be integer type."
)
if n <= 0:
self._raise_error(
"The number of walks per root node, n, should be a positive integer."
)
def _check_length(self, length):
if type(length) != int:
self._raise_error("The walk length, length, should be integer type.")
if length <= 0:
# Technically, length 0 should be okay, but by consensus is invalid.
self._raise_error("The walk length, length, should be a positive integer.")
# For neighbourhood sampling
def _check_sizes(self, n_size):
err_msg = "The neighbourhood size must be a list of non-negative integers."
if not isinstance(n_size, list):
self._raise_error(err_msg)
if len(n_size) == 0:
# Technically, length 0 should be okay, but by consensus it is invalid.
self._raise_error("The neighbourhood size list should not be empty.")
for d in n_size:
if type(d) != int or d < 0:
self._raise_error(err_msg)
def _sample_neighbours_untyped(
self, neigh_func, py_and_np_rs, cur_node, size, weighted
):
"""
Sample ``size`` neighbours of ``cur_node`` without checking node types or edge types, optionally
using edge weights.
"""
if cur_node != -1:
neighbours = neigh_func(
cur_node, use_ilocs=True, include_edge_weight=weighted
)
if weighted:
neighbours, weights = neighbours
else:
neighbours = []
if len(neighbours) > 0:
if weighted:
# sample following the edge weights
idx = naive_weighted_choices(py_and_np_rs[1], weights, size=size)
if idx is not None:
return neighbours[idx]
else:
# uniform sample; for small-to-moderate `size`s (< 100 is typical for GraphSAGE), random
# has less overhead than np.random
return np.array(py_and_np_rs[0].choices(neighbours, k=size))
# no neighbours (e.g. isolated node, cur_node == -1 or all weights 0), so propagate the -1 sentinel
return np.full(size, -1)
class UniformRandomWalk(RandomWalk):
"""
Performs uniform random walks on the given graph
.. seealso::
Related functionality:
- :class:`.UnsupervisedSampler` for transforming random walks into links for unsupervised training of link prediction models
- Other random walks: :class:`.BiasedRandomWalk`, :class:`.UniformRandomMetaPathWalk`, :class:`.TemporalRandomWalk`.
Args:
graph (StellarGraph): Graph to traverse
n (int, optional): Total number of random walks per root node
length (int, optional): Maximum length of each random walk
seed (int, optional): Random number generator seed
"""
def __init__(self, graph, n=None, length=None, seed=None):
super().__init__(graph, seed=seed)
self.n = n
self.length = length
def run(self, nodes, *, n=None, length=None, seed=None):
"""
Perform a random walk starting from the root nodes. Optional parameters default to using the
values passed in during construction.
Args:
nodes (list): The root nodes as a list of node IDs
n (int, optional): Total number of random walks per root node
length (int, optional): Maximum length of each random walk
seed (int, optional): Random number generator seed
Returns:
List of lists of nodes ids for each of the random walks
"""
n = _default_if_none(n, self.n, "n")
length = _default_if_none(length, self.length, "length")
self._validate_walk_params(nodes, n, length)
rs, _ = self._get_random_state(seed)
nodes = self.graph.node_ids_to_ilocs(nodes)
# for each root node, do n walks
return [self._walk(rs, node, length) for node in nodes for _ in range(n)]
def _walk(self, rs, start_node, length):
walk = [start_node]
current_node = start_node
for _ in range(length - 1):
neighbours = self.graph.neighbor_arrays(current_node, use_ilocs=True)
if len(neighbours) == 0:
# dead end, so stop
break
else:
# has neighbours, so pick one to walk to
current_node = rs.choice(neighbours)
walk.append(current_node)
return list(self.graph.node_ilocs_to_ids(walk))
def naive_weighted_choices(rs, weights, size=None):
"""
Select indices at random, weighted by the iterator `weights` of
arbitrary (non-negative) floats. That is, `x` will be returned
with probability `weights[x]/sum(weights)`.
For doing a single sample with arbitrary weights, this is much (5x
or more) faster than numpy.random.choice, because the latter
requires a lot of preprocessing (normalized probabilties), and
does a lot of conversions/checks/preprocessing internally.
"""
probs = np.cumsum(weights)
total = probs[-1]
if total == 0:
# all weights were zero (probably), so we shouldn't choose anything
return None
thresholds = rs.random() if size is None else rs.random(size)
idx = np.searchsorted(probs, thresholds * total, side="left")
return idx
class BiasedRandomWalk(RandomWalk):
"""
Performs biased second order random walks (like those used in Node2Vec algorithm
https://snap.stanford.edu/node2vec/) controlled by the values of two parameters p and q.
.. seealso::
Examples using this random walk:
- unsupervised representation learning: `Node2Vec using Gensim Word2Vec <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/node2vec-embeddings.html>`__, `Node2Vec using StellarGraph <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/keras-node2vec-embeddings.html>`__
- node classification: `Node2Vec using Gensim Word2Vec <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/node2vec-node-classification.html>`__, `Node2Vec using StellarGraph <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/keras-node2vec-node-classification.html>`__, `Node2Vec with edge weights <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/node2vec-weighted-node-classification.html>`__
- link prediction: `Node2Vec <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/node2vec-link-prediction.html>`__, `comparison to CTDNE (TemporalRandomWalk) <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/ctdne-link-prediction.html>`__, `comparison of algorithms <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/homogeneous-comparison-link-prediction.html>`__
Related functionality:
- :class:`.UnsupervisedSampler` for transforming random walks into links for unsupervised training of link prediction models
- :class:`.Node2Vec`, :class:`.Node2VecNodeGenerator` and :class:`.Node2VecLinkGenerator` for training a Node2Vec using only StellarGraph
- Other random walks: :class:`.UniformRandomWalk`, :class:`.UniformRandomMetaPathWalk`, :class:`.TemporalRandomWalk`.
Args:
graph (StellarGraph): Graph to traverse
n (int, optional): Total number of random walks per root node
length (int, optional): Maximum length of each random walk
p (float, optional): Defines probability, 1/p, of returning to source node
q (float, optional): Defines probability, 1/q, for moving to a node away from the source node
weighted (bool, optional): Indicates whether the walk is unweighted or weighted
seed (int, optional): Random number generator seed
"""
def __init__(
self, graph, n=None, length=None, p=1.0, q=1.0, weighted=False, seed=None,
):
super().__init__(graph, seed=seed)
self.n = n
self.length = length
self.p = p
self.q = q
self.weighted = weighted
self._checked_weights = False
if weighted:
self._check_weights_valid()
def _check_weights_valid(self):
if self._checked_weights:
# we only need to check the weights once, either in the constructor or in run, whichever
# sets `weighted=True` first
return
# Check that all edge weights are greater than or equal to 0.
source, target, _, weights = self.graph.edge_arrays(
include_edge_weight=True, use_ilocs=True
)
(invalid,) = np.where((weights < 0) | ~np.isfinite(weights))
if len(invalid) > 0:
def format(idx):
s = source[idx]
t = target[idx]
w = weights[idx]
return f"{s!r} to {t!r} (weight = {w})"
raise ValueError(
f"graph: expected all edge weights to be non-negative and finite, found some negative or infinite: {comma_sep(invalid, stringify=format)}"
)
self._checked_weights = True
def run(
self, nodes, *, n=None, length=None, p=None, q=None, seed=None, weighted=None
):
"""
Perform a random walk starting from the root nodes. Optional parameters default to using the
values passed in during construction.
Args:
nodes (list): The root nodes as a list of node IDs
n (int, optional): Total number of random walks per root node
length (int, optional): Maximum length of each random walk
p (float, optional): Defines probability, 1/p, of returning to source node
q (float, optional): Defines probability, 1/q, for moving to a node away from the source node
seed (int, optional): Random number generator seed; default is None
weighted (bool, optional): Indicates whether the walk is unweighted or weighted
Returns:
List of lists of nodes ids for each of the random walks
"""
n = _default_if_none(n, self.n, "n")
length = _default_if_none(length, self.length, "length")
p = _default_if_none(p, self.p, "p")
q = _default_if_none(q, self.q, "q")
weighted = _default_if_none(weighted, self.weighted, "weighted")
self._validate_walk_params(nodes, n, length)
self._check_weights(p, q, weighted)
rs, _ = self._get_random_state(seed)
nodes = self.graph.node_ids_to_ilocs(nodes)
if weighted:
self._check_weights_valid()
weight_dtype = self.graph._edges.weights.dtype
cast_func = np.cast[weight_dtype]
ip = cast_func(1.0 / p)
iq = cast_func(1.0 / q)
if np.isinf(ip):
raise ValueError(
f"p: value ({p}) is too small. It must be possible to represent 1/p in {weight_dtype}, but this value overflows to infinity."
)
if np.isinf(iq):
raise ValueError(
f"q: value ({q}) is too small. It must be possible to represent 1/q in {weight_dtype}, but this value overflows to infinity."
)
walks = []
for node in nodes: # iterate over root nodes
for walk_number in range(n): # generate n walks per root node
# the walk starts at the root
walk = [node]
previous_node = None
previous_node_neighbours = []
current_node = node
for _ in range(length - 1):
# select one of the neighbours using the
# appropriate transition probabilities
if weighted:
neighbours, weights = self.graph.neighbor_arrays(
current_node, include_edge_weight=True, use_ilocs=True
)
else:
neighbours = self.graph.neighbor_arrays(
current_node, use_ilocs=True
)
weights = np.ones(neighbours.shape, dtype=weight_dtype)
if len(neighbours) == 0:
break
mask = neighbours == previous_node
weights[mask] *= ip
mask |= np.isin(neighbours, previous_node_neighbours)
weights[~mask] *= iq
choice = naive_weighted_choices(rs, weights)
if choice is None:
break
previous_node = current_node
previous_node_neighbours = neighbours
current_node = neighbours[choice]
walk.append(current_node)
walks.append(list(self.graph.node_ilocs_to_ids(walk)))
return walks
def _check_weights(self, p, q, weighted):
"""
Checks that the parameter values are valid or raises ValueError exceptions with a message indicating the
parameter (the first one encountered in the checks) with invalid value.
Args:
p: <float> The backward walk 'penalty' factor.
q: <float> The forward walk 'penalty' factor.
weighted: <False or True> Indicates whether the walk is unweighted or weighted.
"""
if p <= 0.0:
raise ValueError(f"p: expected positive numeric value, found {p}")
if q <= 0.0:
raise ValueError(f"q: expected positive numeric value, found {q}")
if type(weighted) != bool:
raise ValueError(f"weighted: expected boolean value, found {weighted}")
class UniformRandomMetaPathWalk(RandomWalk):
"""
For heterogeneous graphs, it performs uniform random walks based on given metapaths. Optional
parameters default to using the values passed in during construction.
.. seealso::
Examples using this random walk:
- `Metapath2Vec link prediction <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/metapath2vec-link-prediction.html>`__
- `Metapath2Vec unsupervised representation learning <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/metapath2vec-embeddings.html>`__
Related functionality:
- :class:`.UnsupervisedSampler` for transforming random walks into links for unsupervised training of link prediction models
- Other random walks: :class:`.UniformRandomWalk`, :class:`.BiasedRandomWalk`, :class:`.TemporalRandomWalk`.
Args:
graph (StellarGraph): Graph to traverse
n (int, optional): Total number of random walks per root node
length (int, optional): Maximum length of each random walk
metapaths (list of list, optional): List of lists of node labels that specify a metapath schema, e.g.,
[['Author', 'Paper', 'Author'], ['Author, 'Paper', 'Venue', 'Paper', 'Author']] specifies two metapath
schemas of length 3 and 5 respectively.
seed (int, optional): Random number generator seed
"""
def __init__(
self, graph, n=None, length=None, metapaths=None, seed=None,
):
super().__init__(graph, seed=seed)
self.n = n
self.length = length
self.metapaths = metapaths
def run(self, nodes, *, n=None, length=None, metapaths=None, seed=None):
"""
Performs metapath-driven uniform random walks on heterogeneous graphs.
Args:
nodes (list): The root nodes as a list of node IDs
n (int, optional): Total number of random walks per root node
length (int, optional): Maximum length of each random walk
metapaths (list of list, optional): List of lists of node labels that specify a metapath schema, e.g.,
[['Author', 'Paper', 'Author'], ['Author, 'Paper', 'Venue', 'Paper', 'Author']] specifies two metapath
schemas of length 3 and 5 respectively.
seed (int, optional): Random number generator seed; default is None
Returns:
List of lists of nodes ids for each of the random walks generated
"""
n = _default_if_none(n, self.n, "n")
length = _default_if_none(length, self.length, "length")
metapaths = _default_if_none(metapaths, self.metapaths, "metapaths")
self._validate_walk_params(nodes, n, length)
self._check_metapath_values(metapaths)
rs, _ = self._get_random_state(seed)
nodes = self.graph.node_ids_to_ilocs(nodes)
walks = []
for node in nodes:
# retrieve node type
label = self.graph.node_type(node, use_ilocs=True)
filtered_metapaths = [
metapath
for metapath in metapaths
if len(metapath) > 0 and metapath[0] == label
]
for metapath in filtered_metapaths:
# augment metapath to be length long
# if (
# len(metapath) == 1
# ): # special case for random walks like in a homogeneous graphs
# metapath = metapath * length
# else:
metapath = metapath[1:] * ((length // (len(metapath) - 1)) + 1)
for _ in range(n):
walk = (
[]
) # holds the walk data for this walk; first node is the starting node
current_node = node
for d in range(length):
walk.append(current_node)
# d+1 can also be used to index metapath to retrieve the node type for the next step in the walk
neighbours = self.graph.neighbor_arrays(
current_node, use_ilocs=True
)
# filter these by node type
neighbour_types = self.graph.node_type(
neighbours, use_ilocs=True
)
neighbours = [
neigh
for neigh, neigh_type in zip(neighbours, neighbour_types)
if neigh_type == metapath[d]
]
if len(neighbours) == 0:
# if no neighbours of the required type as dictated by the metapath exist, then stop.
break
# select one of the neighbours uniformly at random
current_node = rs.choice(
neighbours
) # the next node in the walk
walks.append(
list(self.graph.node_ilocs_to_ids(walk))
) # store the walk
return walks
def _check_metapath_values(self, metapaths):
"""
Checks that the parameter values are valid or raises ValueError exceptions with a message indicating the
parameter (the first one encountered in the checks) with invalid value.
Args:
metapaths: <list> List of lists of node labels that specify a metapath schema, e.g.,
[['Author', 'Paper', 'Author'], ['Author, 'Paper', 'Venue', 'Paper', 'Author']] specifies two metapath
schemas of length 3 and 5 respectively.
"""
def raise_error(msg):
raise ValueError(f"metapaths: {msg}, found {metapaths}")
if type(metapaths) != list:
raise_error("expected list of lists.")
for metapath in metapaths:
if type(metapath) != list:
raise_error("expected each metapath to be a list of node labels")
if len(metapath) < 2:
raise_error("expected each metapath to specify at least two node types")
for node_label in metapath:
if type(node_label) != str:
raise_error("expected each node type in metapaths to be a string")
if metapath[0] != metapath[-1]:
raise_error(
"expected the first and last node type in a metapath to be the same"
)
class SampledBreadthFirstWalk(GraphWalk):
"""
Breadth First Walk that generates a sampled number of paths from a starting node.
It can be used to extract a random sub-graph starting from a set of initial nodes.
"""
def run(self, nodes, n_size, n=1, seed=None, weighted=False):
"""
Performs a sampled breadth-first walk starting from the root nodes.
Args:
nodes (list): A list of root node ids such that from each node a BFWs will be generated up to the
given depth. The depth of each of the walks is inferred from the length of the ``n_size``
list parameter.
n_size (list of int): The number of neighbouring nodes to expand at each depth of the walk.
Sampling of neighbours is always done with replacement regardless of the node degree and
number of neighbours requested.
n (int): Number of walks per node id.
seed (int, optional): Random number generator seed; Default is None.
weighted (bool, optional): If True, sample neighbours using the edge weights in the graph.
Returns:
A list of lists such that each list element is a sequence of ids corresponding to a BFW.
"""
self._check_sizes(n_size)
self._check_common_parameters(nodes, n, len(n_size), seed)
py_and_np_rs = self._get_random_state(seed)
walks = []
max_hops = len(n_size) # depth of search
for node in nodes: # iterate over root nodes
for _ in range(n): # do n bounded breadth first walks from each root node
q = deque() # the queue of neighbours
walk = list() # the list of nodes in the subgraph of node
# extend() needs iterable as parameter; we use list of tuples (node id, depth)
q.append((node, 0))
while len(q) > 0:
# remove the top element in the queue
# index 0 pop the item from the front of the list
cur_node, cur_depth = q.popleft()
depth = cur_depth + 1 # the depth of the neighbouring nodes
walk.append(cur_node) # add to the walk
# consider the subgraph up to and including max_hops from root node
if depth > max_hops:
continue
neighbours = self._sample_neighbours_untyped(
self.graph.neighbor_arrays,
py_and_np_rs,
cur_node,
n_size[cur_depth],
weighted,
)
# add them to the back of the queue
q.extend((sampled_node, depth) for sampled_node in neighbours)
# finished i-th walk from node so add it to the list of walks as a list
walks.append(walk)
return walks
class SampledHeterogeneousBreadthFirstWalk(GraphWalk):
"""
Breadth First Walk for heterogeneous graphs that generates a sampled number of paths from a starting node.
It can be used to extract a random sub-graph starting from a set of initial nodes.
"""
def run(self, nodes, n_size, n=1, seed=None):
"""
Performs a sampled breadth-first walk starting from the root nodes.
Args:
nodes (list): A list of root node ids such that from each node n BFWs will be generated
with the number of samples per hop specified in n_size.
n_size (int): The number of neighbouring nodes to expand at each depth of the walk. Sampling of
n (int, default 1): Number of walks per node id. Neighbours with replacement is always used regardless
of the node degree and number of neighbours requested.
seed (int, optional): Random number generator seed; default is None
Returns:
A list of lists such that each list element is a sequence of ids corresponding to a sampled Heterogeneous
BFW.
"""
self._check_sizes(n_size)
self._check_common_parameters(nodes, n, len(n_size), seed)
rs, _ = self._get_random_state(seed)
adj = self.get_adjacency_types()
walks = []
d = len(n_size) # depth of search
for node in nodes: # iterate over root nodes
for _ in range(n): # do n bounded breadth first walks from each root node
q = list() # the queue of neighbours
walk = list() # the list of nodes in the subgraph of node
# Start the walk by adding the head node, and node type to the frontier list q
node_type = self.graph.node_type(node, use_ilocs=True)
q.extend([(node, node_type, 0)])
# add the root node to the walks
walk.append([node])
while len(q) > 0:
# remove the top element in the queue and pop the item from the front of the list
frontier = q.pop(0)
current_node, current_node_type, depth = frontier
depth = depth + 1 # the depth of the neighbouring nodes
# consider the subgraph up to and including depth d from root node
if depth <= d:
# Find edge types for current node type
current_edge_types = self.graph_schema.schema[current_node_type]
# Create samples of neigbhours for all edge types
for et in current_edge_types:
neigh_et = adj[et][current_node]
# If there are no neighbours of this type then we return None
# in the place of the nodes that would have been sampled
# YT update: with the new way to get neigh_et from adj[et][current_node], len(neigh_et) is always > 0.
# In case of no neighbours of the current node for et, neigh_et == [None],
# and samples automatically becomes [None]*n_size[depth-1]
if len(neigh_et) > 0:
samples = rs.choices(neigh_et, k=n_size[depth - 1])
else: # this doesn't happen anymore, see the comment above
_size = n_size[depth - 1]
samples = [-1] * _size
walk.append(samples)
q.extend(
[
(sampled_node, et.n2, depth)
for sampled_node in samples
]
)
# finished i-th walk from node so add it to the list of walks as a list
walks.append(walk)
return walks
class DirectedBreadthFirstNeighbours(GraphWalk):
"""
Breadth First sampler that generates the composite of a number of sampled paths from a starting node.
It can be used to extract a random sub-graph starting from a set of initial nodes.
"""
def __init__(self, graph, graph_schema=None, seed=None):
super().__init__(graph, graph_schema, seed)
if not graph.is_directed():
self._raise_error("Graph must be directed")
def run(self, nodes, in_size, out_size, n=1, seed=None, weighted=False):
"""
Performs a sampled breadth-first walk starting from the root nodes.
Args:
nodes (list): A list of root node ids such that from each node n BFWs will be generated up to the
given depth d.
in_size (int): The number of in-directed nodes to sample with replacement at each depth of the walk.
out_size (int): The number of out-directed nodes to sample with replacement at each depth of the walk.
n (int, default 1): Number of walks per node id.
seed (int, optional): Random number generator seed; default is None
weighted (bool, optional): If True, sample neighbours using the edge weights in the graph.
Returns:
A list of multi-hop neighbourhood samples. Each sample expresses multiple undirected walks, but the in-node
neighbours and out-node neighbours are sampled separately. Each sample has the format:
[[node]
[in_1...in_n] [out_1...out_m]
[in_1.in_1...in_n.in_p] [in_1.out_1...in_n.out_q]
[out_1.in_1...out_m.in_p] [out_1.out_1...out_m.out_q]
[in_1.in_1.in_1...in_n.in_p.in_r] [in_1.in_1.out_1...in_n.in_p.out_s] ...
...]
where a single, undirected walk might be, for example:
[node out_i out_i.in_j out_i.in_j.in_k ...]
"""
self._check_neighbourhood_sizes(in_size, out_size)
self._check_common_parameters(nodes, n, len(in_size), seed)
py_and_np_rs = self._get_random_state(seed)
max_hops = len(in_size)
# A binary tree is a graph of nodes; however, we wish to avoid overusing the term 'node'.
# Consider that each binary tree node carries some information.
# We uniquely and deterministically number every node in the tree, so we
# can represent the information stored in the tree via a flattened list of 'slots'.
# Each slot (and corresponding binary tree node) now has a unique index in the flattened list.
max_slots = 2 ** (max_hops + 1) - 1
samples = []
for node in nodes: # iterate over root nodes
for _ in range(n): # do n bounded breadth first walks from each root node
q = list() # the queue of neighbours
# the list of sampled node-lists:
sample = [[] for _ in range(max_slots)]
# Add node to queue as (node, depth, slot)
q.append((node, 0, 0))
while len(q) > 0:
# remove the top element in the queue
# index 0 pop the item from the front of the list
cur_node, cur_depth, cur_slot = q.pop(0)
sample[cur_slot].append(cur_node) # add to the walk
depth = cur_depth + 1 # the depth of the neighbouring nodes
# consider the subgraph up to and including max_hops from root node
if depth > max_hops:
continue
# get in-nodes
neighbours = self._sample_neighbours_untyped(
self.graph.in_node_arrays,
py_and_np_rs,
cur_node,
in_size[cur_depth],
weighted,
)
# add them to the back of the queue
slot = 2 * cur_slot + 1
q.extend(
[(sampled_node, depth, slot) for sampled_node in neighbours]
)
# get out-nodes
neighbours = self._sample_neighbours_untyped(
self.graph.out_node_arrays,
py_and_np_rs,
cur_node,
out_size[cur_depth],
weighted,
)
# add them to the back of the queue
slot = slot + 1
q.extend(
[(sampled_node, depth, slot) for sampled_node in neighbours]
)
# finished multi-hop neighbourhood sampling
samples.append(sample)
return samples
def _check_neighbourhood_sizes(self, in_size, out_size):
"""
Checks that the parameter values are valid or raises ValueError exceptions with a message indicating the
parameter (the first one encountered in the checks) with invalid value.
Args:
nodes: <list> A list of root node ids such that from each node n BFWs will be generated up to the
given depth d.
n_size: <list> The number of neighbouring nodes to expand at each depth of the walk.
seed: <int> Random number generator seed; default is None
"""
self._check_sizes(in_size)
self._check_sizes(out_size)
if len(in_size) != len(out_size):
self._raise_error(
"The number of hops for the in and out neighbourhoods must be the same."
)
class TemporalRandomWalk(GraphWalk):
"""
Performs temporal random walks on the given graph. The graph should contain numerical edge
weights that correspond to the time at which the edge was created. Exact units are not relevant
for the algorithm, only the relative differences (e.g. seconds, days, etc).
.. seealso::
Example using this random walk: `link prediction with CTDNE <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/ctdne-link-prediction.html>`__
Related functionality: other random walks: :class:`.UniformRandomWalk`, :class:`.BiasedRandomWalk`, :class:`.UniformRandomMetaPathWalk`.
Args:
graph (StellarGraph): Graph to traverse
cw_size (int, optional): Size of context window. Also used as the minimum walk length,
since a walk must generate at least 1 context window for it to be useful.
max_walk_length (int, optional): Maximum length of each random walk. Should be greater
than or equal to the context window size.
initial_edge_bias (str, optional): Distribution to use when choosing a random
initial temporal edge to start from. Available options are:
* None (default) - The initial edge is picked from a uniform distribution.
* "exponential" - Heavily biased towards more recent edges.
walk_bias (str, optional): Distribution to use when choosing a random
neighbour to walk through. Available options are:
* None (default) - Neighbours are picked from a uniform distribution.
* "exponential" - Exponentially decaying probability, resulting in a bias towards shorter time gaps.
p_walk_success_threshold (float, optional): Lower bound for the proportion of successful
(i.e. longer than minimum length) walks. If the 95% percentile of the
estimated proportion is less than the provided threshold, a RuntimeError
will be raised. The default value of 0.01 means an error is raised if less than 1%
of the attempted random walks are successful. This parameter exists to catch any
potential situation where too many unsuccessful walks can cause an infinite or very
slow loop.
seed (int, optional): Random number generator seed.
"""
def __init__(
self,
graph,
cw_size=None,
max_walk_length=80,
initial_edge_bias=None,
walk_bias=None,
p_walk_success_threshold=0.01,
seed=None,
):
super().__init__(graph, graph_schema=None, seed=seed)
self.cw_size = cw_size
self.max_walk_length = max_walk_length
self.initial_edge_bias = initial_edge_bias
self.walk_bias = walk_bias
self.p_walk_success_threshold = p_walk_success_threshold
def run(
self,
num_cw,
cw_size=None,
max_walk_length=None,
initial_edge_bias=None,
walk_bias=None,
p_walk_success_threshold=None,
seed=None,
):
"""
Perform a time respecting random walk starting from randomly selected temporal edges.
Optional parameters default to using the values passed in during construction.
Args:
num_cw (int): Total number of context windows to generate. For comparable
results to most other random walks, this should be a multiple of the number
of nodes in the graph.
cw_size (int, optional): Size of context window. Also used as the minimum walk length,
since a walk must generate at least 1 context window for it to be useful.
max_walk_length (int, optional): Maximum length of each random walk. Should be greater
than or equal to the context window size.
initial_edge_bias (str, optional): Distribution to use when choosing a random
initial temporal edge to start from. Available options are:
* None (default) - The initial edge is picked from a uniform distribution.
* "exponential" - Heavily biased towards more recent edges.
walk_bias (str, optional): Distribution to use when choosing a random
neighbour to walk through. Available options are:
* None (default) - Neighbours are picked from a uniform distribution.
* "exponential" - Exponentially decaying probability, resulting in a bias towards shorter time gaps.
p_walk_success_threshold (float, optional): Lower bound for the proportion of successful
(i.e. longer than minimum length) walks. If the 95% percentile of the
estimated proportion is less than the provided threshold, a RuntimeError
will be raised. The default value of 0.01 means an error is raised if less than 1%
of the attempted random walks are successful. This parameter exists to catch any
potential situation where too many unsuccessful walks can cause an infinite or very
slow loop.
seed (int, optional): Random number generator seed; default is None.
Returns:
List of lists of node ids for each of the random walks.
"""
cw_size = _default_if_none(cw_size, self.cw_size, "cw_size")
max_walk_length = _default_if_none(
max_walk_length, self.max_walk_length, "max_walk_length"
)
initial_edge_bias = _default_if_none(
initial_edge_bias,
self.initial_edge_bias,
"initial_edge_bias",
ensure_not_none=False,
)
walk_bias = _default_if_none(
walk_bias, self.walk_bias, "walk_bias", ensure_not_none=False
)
p_walk_success_threshold = _default_if_none(
p_walk_success_threshold,
self.p_walk_success_threshold,
"p_walk_success_threshold",
)
if cw_size < 2:
raise ValueError(
f"cw_size: context window size should be greater than 1, found {cw_size}"
)
if max_walk_length < cw_size:
raise ValueError(
f"max_walk_length: maximum walk length should not be less than the context window size, found {max_walk_length}"
)
_, np_rs = self._get_random_state(seed)
walks = []
num_cw_curr = 0
sources, targets, _, times = self.graph.edge_arrays(include_edge_weight=True)
edge_biases = self._temporal_biases(
times, None, bias_type=initial_edge_bias, is_forward=False,
)
successes = 0
failures = 0
def not_progressing_enough():
# Estimate the probability p of a walk being long enough; the 95% percentile is used to
# be more stable with respect to randomness. This uses Beta(1, 1) as the prior, since
# it's uniform on p
posterior = stats.beta.ppf(0.95, 1 + successes, 1 + failures)
return posterior < p_walk_success_threshold
# loop runs until we have enough context windows in total
while num_cw_curr < num_cw:
first_edge_index = self._sample(len(times), edge_biases, np_rs)
src = sources[first_edge_index]
dst = targets[first_edge_index]
t = times[first_edge_index]
remaining_length = num_cw - num_cw_curr + cw_size - 1
walk = self._walk(
src, dst, t, min(max_walk_length, remaining_length), walk_bias, np_rs
)
if len(walk) >= cw_size:
walks.append(walk)
num_cw_curr += len(walk) - cw_size + 1
successes += 1
else:
failures += 1
if not_progressing_enough():
raise RuntimeError(
f"Discarded {failures} walks out of {failures + successes}. "
"Too many temporal walks are being discarded for being too short. "
f"Consider using a smaller context window size (currently cw_size={cw_size})."
)
return walks
def _sample(self, n, biases, np_rs):
if biases is not None:
assert len(biases) == n
return naive_weighted_choices(np_rs, biases)
else:
return np_rs.choice(n)
def _exp_biases(self, times, t_0, decay):
# t_0 assumed to be smaller than all time values
return softmax(t_0 - np.array(times) if decay else np.array(times) - t_0)
def _temporal_biases(self, times, time, bias_type, is_forward):
if bias_type is None:
# default to uniform random sampling
return None
# time is None indicates we should obtain the minimum available time for t_0
t_0 = time if time is not None else min(times)
if bias_type == "exponential":
# exponential decay bias needs to be reversed if looking backwards in time
return self._exp_biases(times, t_0, decay=is_forward)
else:
raise ValueError("Unsupported bias type")
def _step(self, node, time, bias_type, np_rs):
"""
Perform 1 temporal step from a node. Returns None if a dead-end is reached.
"""
neighbours, times = self.graph.neighbor_arrays(node, include_edge_weight=True)
neighbours = neighbours[times > time]
times = times[times > time]
if len(neighbours) > 0:
biases = self._temporal_biases(times, time, bias_type, is_forward=True)
chosen_neighbour_index = self._sample(len(neighbours), biases, np_rs)
assert chosen_neighbour_index is not None, "biases should never be all zero"
next_node = neighbours[chosen_neighbour_index]
next_time = times[chosen_neighbour_index]
return next_node, next_time
else:
return None
def _walk(self, src, dst, t, length, bias_type, np_rs):
walk = [src, dst]
node, time = dst, t
for _ in range(length - 2):
result = self._step(node, time=time, bias_type=bias_type, np_rs=np_rs)
if result is not None:
node, time = result
walk.append(node)
else:
break
return walk
| 50,782 | 41.603188 | 468 | py |
stellargraph | stellargraph-master/stellargraph/data/unsupervised_sampler.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2019-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ["UnsupervisedSampler"]
import numpy as np
from stellargraph.core.utils import is_real_iterable
from stellargraph.core.graph import StellarGraph
from stellargraph.data.explorer import UniformRandomWalk
from stellargraph.random import random_state
def _warn_if_ignored(value, default, name):
if value != default:
raise ValueError(
f"walker, {name}: cannot specify both 'walker' and '{name}'. Please use one or the other."
)
class UnsupervisedSampler:
"""
The UnsupervisedSampler is responsible for sampling walks in the given graph
and returning positive and negative samples w.r.t. those walks, on demand.
The positive samples are all the (target, context) pairs from the walks and the negative
samples are contexts generated for each target based on a sampling distribution.
By default, a UniformRandomWalk is used, but a custom `walker` can be specified instead. An
error will be raised if other parameters are specified along with a custom `walker`.
.. seealso::
Examples using this sampler:
- Attri2Vec: `node classification <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/attri2vec-node-classification.html>`__ `link prediction <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/attri2vec-link-prediction.html>`__, `unsupervised representation learning <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/attri2vec-embeddings.html>`__
- GraphSAGE: `unsupervised representation learning <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/graphsage-unsupervised-sampler-embeddings.html>`__
- Node2Vec: `node classification <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/keras-node2vec-node-classification.html>`__, `unsupervised representation learning <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/keras-node2vec-embeddings.html>`__
- `comparison of link prediction algorithms <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/homogeneous-comparison-link-prediction.html>`__
Built-in classes for ``walker``: :class:`.UniformRandomWalk`, :class:`.BiasedRandomWalk`, :class:`.UniformRandomMetaPathWalk`.
Args:
G (StellarGraph): A stellargraph with features.
nodes (iterable, optional) The root nodes from which individual walks start.
If not provided, all nodes in the graph are used.
length (int): Length of the walks for the default UniformRandomWalk walker. Length must
be at least 2.
number_of_walks (int): Number of walks from each root node for the default
UniformRandomWalk walker.
seed (int, optional): Random seed for the default UniformRandomWalk walker.
walker (RandomWalk, optional): A RandomWalk object to use instead of the default
UniformRandomWalk walker.
"""
def __init__(
self, G, nodes=None, length=2, number_of_walks=1, seed=None, walker=None,
):
if not isinstance(G, StellarGraph):
raise ValueError(
"({}) Graph must be a StellarGraph or StellarDigraph object.".format(
type(self).__name__
)
)
else:
self.graph = G
# Instantiate the walker class used to generate random walks in the graph
if walker is not None:
_warn_if_ignored(length, 2, "length")
_warn_if_ignored(number_of_walks, 1, "number_of_walks")
_warn_if_ignored(seed, None, "seed")
self.walker = walker
else:
self.walker = UniformRandomWalk(
G, n=number_of_walks, length=length, seed=seed
)
# Define the root nodes for the walks
# if no root nodes are provided for sampling defaulting to using all nodes as root nodes.
if nodes is None:
self.nodes = list(G.nodes())
elif is_real_iterable(nodes): # check whether the nodes provided are valid.
self.nodes = list(nodes)
else:
raise ValueError("nodes parameter should be an iterable of node IDs.")
# Require walks of at lease length two because to create a sample pair we need at least two nodes.
if length < 2:
raise ValueError(
"({}) For generating (target,context) samples, walk length has to be at least 2".format(
type(self).__name__
)
)
else:
self.length = length
if number_of_walks < 1:
raise ValueError(
"({}) At least 1 walk from each head node has to be done".format(
type(self).__name__
)
)
else:
self.number_of_walks = number_of_walks
# Setup an interal random state with the given seed
_, self.np_random = random_state(seed)
def run(self, batch_size):
"""
This method returns a batch_size number of positive and negative samples from the graph.
A random walk is generated from each root node, which are transformed into positive context
pairs, and the same number of negative pairs are generated from a global node sampling
distribution. The resulting list of context pairs are shuffled and converted to batches of
size ``batch_size``.
Currently the global node sampling distribution for the negative pairs is the degree
distribution to the 3/4 power. This is the same used in node2vec
(https://snap.stanford.edu/node2vec/).
Args:
batch_size (int): The number of samples to generate for each batch.
This must be an even number.
Returns:
List of batches, where each batch is a tuple of (list context pairs, list of labels)
"""
self._check_parameter_values(batch_size)
all_nodes = list(self.graph.nodes(use_ilocs=True))
# Use the sampling distribution as per node2vec
degrees = self.graph.node_degrees(use_ilocs=True)
sampling_distribution = np.array([degrees[n] ** 0.75 for n in all_nodes])
sampling_distribution_norm = sampling_distribution / np.sum(
sampling_distribution
)
walks = self.walker.run(nodes=self.nodes)
# first item in each walk is the target/head node
targets = [walk[0] for walk in walks]
positive_pairs = np.array(
[
(target, positive_context)
for target, walk in zip(targets, walks)
for positive_context in walk[1:]
]
)
positive_pairs = self.graph.node_ids_to_ilocs(positive_pairs.flatten()).reshape(
positive_pairs.shape
)
negative_samples = self.np_random.choice(
all_nodes, size=len(positive_pairs), p=sampling_distribution_norm
)
negative_pairs = np.column_stack((positive_pairs[:, 0], negative_samples))
pairs = np.concatenate((positive_pairs, negative_pairs), axis=0)
labels = np.repeat([1, 0], len(positive_pairs))
# shuffle indices - note this doesn't ensure an equal number of positive/negative examples in
# each batch, just an equal number overall
indices = self.np_random.permutation(len(pairs))
batch_indices = [
indices[i : i + batch_size] for i in range(0, len(indices), batch_size)
]
return [(pairs[i], labels[i]) for i in batch_indices]
def _check_parameter_values(self, batch_size):
"""
Checks that the parameter values are valid or raises ValueError exceptions with a message indicating the
parameter (the first one encountered in the checks) with invalid value.
Args:
batch_size: <int> number of samples to generate in each call of generator
"""
if (
batch_size is None
): # must provide a batch size since this is an indicator of how many samples to return
raise ValueError(
"({}) The batch_size must be provided to generate samples for each batch in the epoch".format(
type(self).__name__
)
)
if type(batch_size) != int: # must be an integer
raise TypeError(
"({}) The batch_size must be positive integer.".format(
type(self).__name__
)
)
if batch_size < 1: # must be greater than 0
raise ValueError(
"({}) The batch_size must be positive integer.".format(
type(self).__name__
)
)
if (
batch_size % 2 != 0
): # should be even since we generate 1 negative sample for each positive one.
raise ValueError(
"({}) The batch_size must be an even integer since equal number of positive and negative samples are generated in each batch.".format(
type(self).__name__
)
)
| 9,902 | 41.87013 | 410 | py |
stellargraph | stellargraph-master/stellargraph/layer/gcn_lstm.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras import activations, initializers, constraints, regularizers
from tensorflow.keras.layers import Input, Layer, Dropout, LSTM, Dense, Permute, Reshape
from ..mapper import SlidingFeaturesNodeGenerator
from ..core.experimental import experimental
from ..core.utils import calculate_laplacian
class FixedAdjacencyGraphConvolution(Layer):
"""
Graph Convolution (GCN) Keras layer.
The implementation is based on https://github.com/tkipf/keras-gcn.
Original paper: Semi-Supervised Classification with Graph Convolutional Networks. Thomas N. Kipf, Max Welling,
International Conference on Learning Representations (ICLR), 2017 https://github.com/tkipf/gcn
Notes:
- The inputs are 3 dimensional tensors: batch size, sequence length, and number of nodes.
- This class assumes that a simple unweighted or weighted adjacency matrix is passed to it,
the normalized Laplacian matrix is calculated within the class.
Args:
units (int): dimensionality of output feature vectors
A (N x N): weighted/unweighted adjacency matrix
activation (str or func): nonlinear activation applied to layer's output to obtain output features
use_bias (bool): toggles an optional bias
kernel_initializer (str or func, optional): The initialiser to use for the weights.
kernel_regularizer (str or func, optional): The regulariser to use for the weights.
kernel_constraint (str or func, optional): The constraint to use for the weights.
bias_initializer (str or func, optional): The initialiser to use for the bias.
bias_regularizer (str or func, optional): The regulariser to use for the bias.
bias_constraint (str or func, optional): The constraint to use for the bias.
"""
def __init__(
self,
units,
A,
activation=None,
use_bias=True,
input_dim=None,
kernel_initializer="glorot_uniform",
kernel_regularizer=None,
kernel_constraint=None,
bias_initializer="zeros",
bias_regularizer=None,
bias_constraint=None,
**kwargs,
):
if "input_shape" not in kwargs and input_dim is not None:
kwargs["input_shape"] = (input_dim,)
self.units = units
self.adj = calculate_laplacian(A)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_initializer = initializers.get(bias_initializer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.bias_constraint = constraints.get(bias_constraint)
super().__init__(**kwargs)
def get_config(self):
"""
Gets class configuration for Keras serialization.
Used by Keras model serialization.
Returns:
A dictionary that contains the config of the layer
"""
config = {
"units": self.units,
"use_bias": self.use_bias,
"activation": activations.serialize(self.activation),
"kernel_initializer": initializers.serialize(self.kernel_initializer),
"kernel_regularizer": regularizers.serialize(self.kernel_regularizer),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"bias_initializer": initializers.serialize(self.bias_initializer),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"bias_constraint": constraints.serialize(self.bias_constraint),
}
base_config = super().get_config()
return {**base_config, **config}
def compute_output_shape(self, input_shapes):
"""
Computes the output shape of the layer.
Assumes the following inputs:
Args:
input_shapes (tuple of int)
Shape tuples can include None for free dimensions, instead of an integer.
Returns:
An input shape tuple.
"""
feature_shape = input_shapes
return feature_shape[0], feature_shape[1], self.units
def build(self, input_shapes):
"""
Builds the layer
Args:
input_shapes (list of int): shapes of the layer's inputs (the batches of node features)
"""
_batch_dim, n_nodes, features = input_shapes
self.A = self.add_weight(
name="A",
shape=(n_nodes, n_nodes),
trainable=False,
initializer=initializers.constant(self.adj),
)
self.kernel = self.add_weight(
shape=(features, self.units),
initializer=self.kernel_initializer,
name="kernel",
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
if self.use_bias:
self.bias = self.add_weight(
# ensure the per-node bias can be broadcast across each feature
shape=(n_nodes, 1),
initializer=self.bias_initializer,
name="bias",
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
else:
self.bias = None
self.built = True
def call(self, features):
"""
Applies the layer.
Args:
features (ndarray): node features (size B x N x F), where B is the batch size, F = TV is
the feature size (consisting of the sequence length and the number of variates), and
N is the number of nodes in the graph.
Returns:
Keras Tensor that represents the output of the layer.
"""
# Calculate the layer operation of GCN
# shape = B x F x N
nodes_last = tf.transpose(features, [0, 2, 1])
neighbours = K.dot(nodes_last, self.A)
# shape = B x N x F
h_graph = tf.transpose(neighbours, [0, 2, 1])
# shape = B x N x units
output = K.dot(h_graph, self.kernel)
# Add optional bias & apply activation
if self.bias is not None:
output += self.bias
output = self.activation(output)
return output
@experimental(
reason="Lack of unit tests and code refinement", issues=[1132, 1526, 1564]
)
class GCN_LSTM:
"""
GCN_LSTM is a univariate timeseries forecasting method. The architecture comprises of a stack of N1 Graph Convolutional layers followed by N2 LSTM layers, a Dropout layer, and a Dense layer.
This main components of GNN architecture is inspired by: T-GCN: A Temporal Graph Convolutional Network for Traffic Prediction (https://arxiv.org/abs/1811.05320).
The implementation of the above paper is based on one graph convolution layer stacked with a GRU layer.
The StellarGraph implementation is built as a stack of the following set of layers:
1. User specified no. of Graph Convolutional layers
2. User specified no. of LSTM layers
3. 1 Dense layer
4. 1 Dropout layer.
The last two layers consistently showed better performance and regularization experimentally.
.. seealso::
Example using GCN_LSTM: `spatio-temporal time-series prediction <https://stellargraph.readthedocs.io/en/stable/demos/time-series/gcn-lstm-time-series.html>`__.
Appropriate data generator: :class:`.SlidingFeaturesNodeGenerator`.
Related model: :class:`.GCN` for graphs without time-series node features.
Args:
seq_len: No. of LSTM cells
adj: unweighted/weighted adjacency matrix of [no.of nodes by no. of nodes dimension
gc_layer_sizes (list of int): Output sizes of Graph Convolution layers in the stack.
lstm_layer_sizes (list of int): Output sizes of LSTM layers in the stack.
generator (SlidingFeaturesNodeGenerator): A generator instance.
bias (bool): If True, a bias vector is learnt for each layer in the GCN model.
dropout (float): Dropout rate applied to input features of each GCN layer.
gc_activations (list of str or func): Activations applied to each layer's output; defaults to ``['relu', ..., 'relu']``.
lstm_activations (list of str or func): Activations applied to each layer's output; defaults to ``['tanh', ..., 'tanh']``.
kernel_initializer (str or func, optional): The initialiser to use for the weights of each layer.
kernel_regularizer (str or func, optional): The regulariser to use for the weights of each layer.
kernel_constraint (str or func, optional): The constraint to use for the weights of each layer.
bias_initializer (str or func, optional): The initialiser to use for the bias of each layer.
bias_regularizer (str or func, optional): The regulariser to use for the bias of each layer.
bias_constraint (str or func, optional): The constraint to use for the bias of each layer.
"""
def __init__(
self,
seq_len,
adj,
gc_layer_sizes,
lstm_layer_sizes,
gc_activations=None,
generator=None,
lstm_activations=None,
bias=True,
dropout=0.5,
kernel_initializer=None,
kernel_regularizer=None,
kernel_constraint=None,
bias_initializer=None,
bias_regularizer=None,
bias_constraint=None,
):
if generator is not None:
if not isinstance(generator, SlidingFeaturesNodeGenerator):
raise ValueError(
f"generator: expected a SlidingFeaturesNodeGenerator, found {type(generator).__name__}"
)
if seq_len is not None or adj is not None:
raise ValueError(
"expected only one of generator and (seq_len, adj) to be specified, found multiple"
)
adj = generator.graph.to_adjacency_matrix(weighted=True).todense()
seq_len = generator.window_size
variates = generator.variates
else:
variates = None
super(GCN_LSTM, self).__init__()
n_gc_layers = len(gc_layer_sizes)
n_lstm_layers = len(lstm_layer_sizes)
self.lstm_layer_sizes = lstm_layer_sizes
self.gc_layer_sizes = gc_layer_sizes
self.bias = bias
self.dropout = dropout
self.adj = adj
self.n_nodes = adj.shape[0]
self.n_features = seq_len
self.seq_len = seq_len
self.multivariate_input = variates is not None
self.variates = variates if self.multivariate_input else 1
self.outputs = self.n_nodes * self.variates
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_initializer = initializers.get(bias_initializer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.bias_constraint = constraints.get(bias_constraint)
# Activation function for each gcn layer
if gc_activations is None:
gc_activations = ["relu"] * n_gc_layers
elif len(gc_activations) != n_gc_layers:
raise ValueError(
"Invalid number of activations; require one function per graph convolution layer"
)
self.gc_activations = gc_activations
# Activation function for each lstm layer
if lstm_activations is None:
lstm_activations = ["tanh"] * n_lstm_layers
elif len(lstm_activations) != n_lstm_layers:
padding_size = n_lstm_layers - len(lstm_activations)
if padding_size > 0:
lstm_activations = lstm_activations + ["tanh"] * padding_size
else:
raise ValueError(
"Invalid number of activations; require one function per lstm layer"
)
self.lstm_activations = lstm_activations
self._gc_layers = [
FixedAdjacencyGraphConvolution(
units=self.variates * layer_size,
A=self.adj,
activation=activation,
kernel_initializer=self.kernel_initializer,
kernel_regularizer=self.kernel_regularizer,
kernel_constraint=self.kernel_constraint,
bias_initializer=self.bias_initializer,
bias_regularizer=self.bias_regularizer,
bias_constraint=self.bias_constraint,
)
for layer_size, activation in zip(self.gc_layer_sizes, self.gc_activations)
]
self._lstm_layers = [
LSTM(layer_size, activation=activation, return_sequences=True)
for layer_size, activation in zip(
self.lstm_layer_sizes[:-1], self.lstm_activations
)
]
self._lstm_layers.append(
LSTM(
self.lstm_layer_sizes[-1],
activation=self.lstm_activations[-1],
return_sequences=False,
)
)
self._decoder_layer = Dense(self.outputs, activation="sigmoid")
def __call__(self, x):
x_in, out_indices = x
h_layer = x_in
if not self.multivariate_input:
# normalize to always have a final variate dimension, with V = 1 if it doesn't exist
# shape = B x N x T x 1
h_layer = tf.expand_dims(h_layer, axis=-1)
# flatten variates into sequences, for convolution
# shape B x N x (TV)
h_layer = Reshape((self.n_nodes, self.seq_len * self.variates))(h_layer)
for layer in self._gc_layers:
h_layer = layer(h_layer)
# return the layer to its natural multivariate tensor form
# shape B x N x T' x V (where T' is the sequence length of the last GC)
h_layer = Reshape((self.n_nodes, -1, self.variates))(h_layer)
# put time dimension first for LSTM layers
# shape B x T' x N x V
h_layer = Permute((2, 1, 3))(h_layer)
# flatten the variates across all nodes, shape B x T' x (N V)
h_layer = Reshape((-1, self.n_nodes * self.variates))(h_layer)
for layer in self._lstm_layers:
h_layer = layer(h_layer)
h_layer = Dropout(self.dropout)(h_layer)
h_layer = self._decoder_layer(h_layer)
if self.multivariate_input:
# flatten things out to the multivariate shape
# shape B x N x V
h_layer = Reshape((self.n_nodes, self.variates))(h_layer)
return h_layer
def in_out_tensors(self):
"""
Builds a GCN model for node feature prediction
Returns:
tuple: ``(x_inp, x_out)``, where ``x_inp`` is a list of Keras/TensorFlow
input tensors for the GCN model and ``x_out`` is a tensor of the GCN model output.
"""
# Inputs for features
if self.multivariate_input:
shape = (None, self.n_nodes, self.n_features, self.variates)
else:
shape = (None, self.n_nodes, self.n_features)
x_t = Input(batch_shape=shape)
# Indices to gather for model output
out_indices_t = Input(batch_shape=(None, self.n_nodes), dtype="int32")
x_inp = [x_t, out_indices_t]
x_out = self(x_inp)
return x_inp[0], x_out
| 16,243 | 38.523114 | 196 | py |
stellargraph | stellargraph-master/stellargraph/layer/hinsage.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Heterogeneous GraphSAGE and compatible aggregator layers
"""
__all__ = ["HinSAGE", "MeanHinAggregator"]
import tensorflow as tf
from tensorflow.keras.layers import Layer
from tensorflow.keras import backend as K, Input
from tensorflow.keras.layers import Lambda, Dropout, Reshape
from tensorflow.keras.utils import Sequence
from tensorflow.keras import activations, initializers, regularizers, constraints
from typing import List, Callable, Tuple, Dict, Union, AnyStr
import itertools as it
import operator as op
import warnings
from .misc import deprecated_model_function
from ..mapper import HinSAGENodeGenerator, HinSAGELinkGenerator
HinSAGEAggregator = Layer
class MeanHinAggregator(HinSAGEAggregator):
"""Mean Aggregator for HinSAGE implemented with Keras base layer
Args:
output_dim (int): Output dimension
bias (bool): Use bias in layer or not (Default False)
act (Callable or str): name of the activation function to use (must be a Keras
activation function), or alternatively, a TensorFlow operation.
kernel_initializer (str or func): The initialiser to use for the weights
kernel_regularizer (str or func): The regulariser to use for the weights
kernel_constraint (str or func): The constraint to use for the weights
bias_initializer (str or func): The initialiser to use for the bias
bias_regularizer (str or func): The regulariser to use for the bias
bias_constraint (str or func): The constraint to use for the bias
"""
def __init__(
self,
output_dim: int = 0,
bias: bool = False,
act: Union[Callable, AnyStr] = "relu",
kernel_initializer="glorot_uniform",
kernel_regularizer=None,
kernel_constraint=None,
bias_initializer="zeros",
bias_regularizer=None,
bias_constraint=None,
**kwargs,
):
self.output_dim = output_dim
if output_dim % 2 != 0:
raise ValueError("The output_dim must be a multiple of two.")
self.half_output_dim = output_dim // 2
self.has_bias = bias
self.act = activations.get(act)
self.nr = None
self.w_neigh = []
self.w_self = None
self.bias = None
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_initializer = initializers.get(bias_initializer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.bias_constraint = constraints.get(bias_constraint)
super().__init__(**kwargs)
def get_config(self):
"""
Gets class configuration for Keras serialization
"""
config = {
"output_dim": self.output_dim,
"bias": self.has_bias,
"act": activations.serialize(self.act),
"kernel_initializer": initializers.serialize(self.kernel_initializer),
"kernel_regularizer": regularizers.serialize(self.kernel_regularizer),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"bias_initializer": initializers.serialize(self.bias_initializer),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"bias_constraint": constraints.serialize(self.bias_constraint),
}
base_config = super().get_config()
return {**base_config, **config}
def build(self, input_shape):
"""
Builds layer
Args:
input_shape (list of list of int): Shape of input per neighbour type.
"""
# Weight matrix for each type of neighbour
# If there are no neighbours (input_shape[x][2]) for an input
# then do not create weights as they are not used.
self.nr = len(input_shape) - 1
self.w_neigh = [
self.add_weight(
name="w_neigh_" + str(r),
shape=(int(input_shape[1 + r][3]), self.half_output_dim),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
)
if input_shape[1 + r][2] > 0
else None
for r in range(self.nr)
]
# Weight matrix for self
self.w_self = self.add_weight(
name="w_self",
shape=(int(input_shape[0][2]), self.half_output_dim),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
)
# Optional bias
if self.has_bias:
self.bias = self.add_weight(
name="bias",
shape=[self.output_dim],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
)
super().build(input_shape)
def call(self, x, **kwargs):
"""
Apply MeanAggregation on input tensors, x
Args:
x: List of Keras Tensors with the following elements
- x[0]: tensor of self features shape (n_batch, n_head, n_feat)
- x[1+r]: tensors of neighbour features each of shape (n_batch, n_head, n_neighbour[r], n_feat[r])
Returns:
Keras Tensor representing the aggregated embeddings in the input.
"""
# Calculate the mean vectors over the neigbours of each relation (edge) type
neigh_agg_by_relation = []
for r in range(self.nr):
# The neighbour input tensors for relation r
z = x[1 + r]
# If there are neighbours aggregate over them
if z.shape[2] > 0:
z_agg = K.dot(K.mean(z, axis=2), self.w_neigh[r])
# Otherwise add a synthetic zero vector
else:
z_shape = K.shape(z)
w_shape = self.half_output_dim
z_agg = tf.zeros((z_shape[0], z_shape[1], w_shape))
neigh_agg_by_relation.append(z_agg)
# Calculate the self vector shape (n_batch, n_head, n_out_self)
from_self = K.dot(x[0], self.w_self)
# Sum the contributions from all neighbour averages shape (n_batch, n_head, n_out_neigh)
from_neigh = sum(neigh_agg_by_relation) / self.nr
# Concatenate self + neighbour features, shape (n_batch, n_head, n_out)
total = K.concatenate(
[from_self, from_neigh], axis=2
) # YT: this corresponds to concat=Partial
# TODO: implement concat=Full and concat=False
return self.act((total + self.bias) if self.has_bias else total)
def compute_output_shape(self, input_shape):
"""
Computes the output shape of the layer.
Assumes that the layer will be built to match that input shape provided.
Args:
input_shape (tuple of int)
Shape tuples can include `None` for free dimensions, instead of an integer.
Returns:
An input shape tuple.
"""
return input_shape[0][0], input_shape[0][1], self.output_dim
def _require_without_generator(value, name):
if value is not None:
return value
else:
raise ValueError(
f"{name}: expected a value for 'input_neighbor_tree', 'n_samples', 'input_dim', and "
f"'multiplicity' when 'generator' is not provided, found {name}=None."
)
class HinSAGE:
"""
Implementation of the GraphSAGE algorithm extended for heterogeneous graphs with Keras layers.
To use this class as a Keras model, the features and graph should be supplied using the
:class:`.HinSAGENodeGenerator` class for node inference models or the
:class:`.HinSAGELinkGenerator` class for link inference models. The `.in_out_tensors` method should
be used to create a Keras model from the `GraphSAGE` object.
Currently the class supports node or link prediction models which are built depending on whether
a `HinSAGENodeGenerator` or `HinSAGELinkGenerator` object is specified.
The models are built for a single node or link type. For example if you have nodes of types 'A' and 'B'
you can build a link model for only a single pair of node types, for example ('A', 'B'), which should
be specified in the `HinSAGELinkGenerator`.
If you feed links into the model that do not have these node types (in correct order) an error will be
raised.
Examples:
Creating a two-level GrapSAGE node classification model on nodes of type 'A' with hidden node sizes of 8 and 4
and 10 neighbours sampled at each layer using an existing :class:`.StellarGraph` object `G`
containing the graph and node features::
generator = HinSAGENodeGenerator(
G, batch_size=50, num_samples=[10,10], head_node_type='A'
)
gat = HinSAGE(
layer_sizes=[8, 4],
activations=["relu","softmax"],
generator=generator,
)
x_inp, predictions = gat.in_out_tensors()
Creating a two-level GrapSAGE link classification model on nodes pairs of type ('A', 'B')
with hidden node sizes of 8 and 4 and 5 neighbours sampled at each layer::
generator = HinSAGELinkGenerator(
G, batch_size=50, num_samples=[5,5], head_node_types=('A','B')
)
gat = HinSAGE(
layer_sizes=[8, 4],
activations=["relu","softmax"],
generator=generator,
)
x_inp, predictions = gat.in_out_tensors()
Note that passing a `NodeSequence` or `LinkSequence` object from the `generator.flow(...)` method
as the `generator=` argument is now deprecated and the base generator object should be passed instead.
.. seealso::
Examples using HinSAGE:
- `link prediction <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/hinsage-link-prediction.html>`__
- `unsupervised representation learning with Deep Graph Infomax <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/deep-graph-infomax-embeddings.html>`__
Appropriate data generators: :class:`.HinSAGENodeGenerator`, :class:`.HinSAGELinkGenerator`.
Related models:
- :class:`.GraphSAGE` for homogeneous graphs
- :class:`.DirectedGraphSAGE` for homogeneous directed graphs
- :class:`.DeepGraphInfomax` for unsupervised training
Aggregators: :class:`.MeanHinAggregator`.
The `Heterogeneous GraphSAGE (HinSAGE) <https://stellargraph.readthedocs.io/en/stable/hinsage.html>`__ explanatory document has more theoretical details.
Args:
layer_sizes (list): Hidden feature dimensions for each layer
generator (HinSAGENodeGenerator or HinSAGELinkGenerator):
If specified, required model arguments such as the number of samples
will be taken from the generator object. See note below.
aggregator (HinSAGEAggregator): The HinSAGE aggregator to use; defaults to the `MeanHinAggregator`.
bias (bool): If True (default), a bias vector is learnt for each layer.
dropout (float): The dropout supplied to each layer; defaults to no dropout.
normalize (str): The normalization used after each layer; defaults to L2 normalization.
activations (list): Activations applied to each layer's output;
defaults to ``['relu', ..., 'relu', 'linear']``.
kernel_initializer (str or func, optional): The initialiser to use for the weights of each layer.
kernel_regularizer (str or func, optional): The regulariser to use for the weights of each layer.
kernel_constraint (str or func, optional): The constraint to use for the weights of each layer.
bias_initializer (str or func, optional): The initialiser to use for the bias of each layer.
bias_regularizer (str or func, optional): The regulariser to use for the bias of each layer.
bias_constraint (str or func, optional): The constraint to use for the bias of each layer.
n_samples (list, optional): The number of samples per layer in the model.
input_neighbor_tree (list of tuple, optional): A list of (node_type, [children]) tuples that
specify the subtree to be created by the HinSAGE model.
input_dim (dict, optional): The input dimensions for each node type as a dictionary of the form
``{node_type: feature_size}``.
multiplicity (int, optional): The number of nodes to process at a time. This is 1 for a node
inference and 2 for link inference (currently no others are supported).
.. note::
The values for ``n_samples``, ``input_neighbor_tree``, ``input_dim``, and ``multiplicity`` are
obtained from the provided ``generator`` by default. The additional keyword arguments for these
parameters provide an alternative way to specify them if a generator cannot be supplied.
"""
def __init__(
self,
layer_sizes,
generator=None,
aggregator=None,
bias=True,
dropout=0.0,
normalize="l2",
activations=None,
kernel_initializer="glorot_uniform",
kernel_regularizer=None,
kernel_constraint=None,
bias_initializer="zeros",
bias_regularizer=None,
bias_constraint=None,
n_samples=None,
input_neighbor_tree=None,
input_dim=None,
multiplicity=None,
):
# Set the aggregator layer used in the model
if aggregator is None:
self._aggregator = MeanHinAggregator
elif issubclass(aggregator, Layer):
self._aggregator = aggregator
else:
raise TypeError("Aggregator should be a subclass of Keras Layer")
# Set the normalization layer used in the model
if normalize == "l2":
self._normalization = Lambda(lambda x: K.l2_normalize(x, axis=-1))
elif normalize is None or normalize == "none" or normalize == "None":
self._normalization = Lambda(lambda x: x)
else:
raise ValueError(
"Normalization should be either 'l2' or 'none'; received '{}'".format(
normalize
)
)
# Get the sampling tree, input_dim, and num_samples from the generator
# if no generator these must be supplied in kwargs
if generator is not None:
self._get_sizes_from_generator(generator)
else:
self.subtree_schema = _require_without_generator(
input_neighbor_tree, "input_neighbor_tree"
)
self.n_samples = _require_without_generator(n_samples, "n_samples")
self.input_dims = _require_without_generator(input_dim, "input_dim")
self.multiplicity = _require_without_generator(multiplicity, "multiplicity")
# Set parameters for the model
self.n_layers = len(self.n_samples)
self.bias = bias
self.dropout = dropout
# Neighbourhood info per layer
self.neigh_trees = self._eval_neigh_tree_per_layer(
[li for li in self.subtree_schema if len(li[1]) > 0]
)
# Depth of each input tensor i.e. number of hops from root nodes
self._depths = [
self.n_layers
+ 1
- sum([1 for li in [self.subtree_schema] + self.neigh_trees if i < len(li)])
for i in range(len(self.subtree_schema))
]
# Dict of {node type: dimension} per layer
self.dims = [
dim
if isinstance(dim, dict)
else {k: dim for k, _ in ([self.subtree_schema] + self.neigh_trees)[layer]}
for layer, dim in enumerate([self.input_dims] + layer_sizes)
]
# Activation function for each layer
if activations is None:
activations = ["relu"] * (self.n_layers - 1) + ["linear"]
elif len(activations) != self.n_layers:
raise ValueError(
"Invalid number of activations; require one function per layer"
)
self.activations = activations
# Aggregator functions for each layer
self._aggs = [
{
node_type: self._aggregator(
output_dim,
bias=self.bias,
act=self.activations[layer],
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
kernel_constraint=kernel_constraint,
bias_initializer=bias_initializer,
bias_regularizer=bias_regularizer,
bias_constraint=bias_constraint,
)
for node_type, output_dim in self.dims[layer + 1].items()
}
for layer in range(self.n_layers)
]
def _get_sizes_from_generator(self, generator):
"""
Sets n_samples and input_feature_size from the generator.
Args:
generator: The supplied generator.
"""
if not isinstance(generator, (HinSAGELinkGenerator, HinSAGENodeGenerator)):
errmsg = "Generator should be an instance of HinSAGELinkGenerator or HinSAGENodeGenerator"
if isinstance(generator, (NodeSequence, LinkSequence)):
errmsg = (
"Passing a Sequence object as the generator to HinSAGE is no longer supported. "
+ errmsg
)
raise TypeError(errmsg)
self.n_samples = generator.num_samples
self.subtree_schema = generator.schema.type_adjacency_list(
generator.head_node_types, len(self.n_samples)
)
self.input_dims = generator.graph.node_feature_sizes()
self.multiplicity = generator.multiplicity
@staticmethod
def _eval_neigh_tree_per_layer(input_tree):
"""
Function to evaluate the neighbourhood tree structure for every layer. The tree
structure at each layer is a truncated version of the previous layer.
Args:
input_tree: Neighbourhood tree for the input batch
Returns:
List of neighbourhood trees
"""
reduced = [
li
for li in input_tree
if all(li_neigh < len(input_tree) for li_neigh in li[1])
]
return (
[input_tree]
if len(reduced) == 0
else [input_tree] + HinSAGE._eval_neigh_tree_per_layer(reduced)
)
def __call__(self, xin: List):
"""
Apply aggregator layers
Args:
x (list of Tensor): Batch input features
Returns:
Output tensor
"""
def apply_layer(x: List, layer: int):
"""
Compute the list of output tensors for a single HinSAGE layer
Args:
x (List[Tensor]): Inputs to the layer
layer (int): Layer index
Returns:
Outputs of applying the aggregators as a list of Tensors
"""
layer_out = []
for i, (node_type, neigh_indices) in enumerate(self.neigh_trees[layer]):
# The shape of the head node is used for reshaping the neighbour inputs
head_shape = K.int_shape(x[i])[1]
# Aplly dropout and reshape neighbours per node per layer
neigh_list = [
Dropout(self.dropout)(
Reshape(
(
head_shape,
self.n_samples[self._depths[i]],
self.dims[layer][self.subtree_schema[neigh_index][0]],
)
)(x[neigh_index])
)
for neigh_index in neigh_indices
]
# Apply dropout to head inputs
x_head = Dropout(self.dropout)(x[i])
# Apply aggregator to head node and reshaped neighbour nodes
layer_out.append(self._aggs[layer][node_type]([x_head] + neigh_list))
return layer_out
# Form HinSAGE layers iteratively
self.layer_tensors = []
h_layer = xin
for layer in range(0, self.n_layers):
h_layer = apply_layer(h_layer, layer)
self.layer_tensors.append(h_layer)
# Remove neighbourhood dimension from output tensors
# note that at this point h_layer contains the output tensor of the top (last applied) layer of the stack
h_layer = [
Reshape(K.int_shape(x)[2:])(x) for x in h_layer if K.int_shape(x)[1] == 1
]
# Return final layer output tensor with optional normalization
return (
self._normalization(h_layer[0])
if len(h_layer) == 1
else [self._normalization(xi) for xi in h_layer]
)
def _input_shapes(self) -> List[Tuple[int, int]]:
"""
Returns the input shapes for the tensors of the supplied neighbourhood type tree
Returns:
A list of tuples giving the shape (number of nodes, feature size) for
the corresponding item in the neighbourhood type tree (self.subtree_schema)
"""
neighbor_sizes = list(it.accumulate([1] + self.n_samples, op.mul))
def get_shape(stree, cnode, level=0):
adj = stree[cnode][1]
size_dict = {
cnode: (neighbor_sizes[level], self.input_dims[stree[cnode][0]])
}
if len(adj) > 0:
size_dict.update(
{
k: s
for a in adj
for k, s in get_shape(stree, a, level + 1).items()
}
)
return size_dict
input_shapes = dict()
for ii in range(len(self.subtree_schema)):
input_shapes_ii = get_shape(self.subtree_schema, ii)
# Update input_shapes if input_shapes_ii.keys() are not already in input_shapes.keys():
if (
len(set(input_shapes_ii.keys()).intersection(set(input_shapes.keys())))
== 0
):
input_shapes.update(input_shapes_ii)
return [input_shapes[ii] for ii in range(len(self.subtree_schema))]
def in_out_tensors(self):
"""
Builds a HinSAGE model for node or link/node pair prediction, depending on the generator used to construct
the model (whether it is a node or link/node pair generator).
Returns:
tuple: ``(x_inp, x_out)``, where ``x_inp`` is a list of Keras input tensors
for the specified HinSAGE model (either node or link/node pair model) and ``x_out`` contains
model output tensor(s) of shape (batch_size, layer_sizes[-1]).
"""
# Create tensor inputs
x_inp = [Input(shape=s) for s in self._input_shapes()]
# Output from HinSAGE model
x_out = self(x_inp)
return x_inp, x_out
def default_model(self, flatten_output=True):
warnings.warn(
"The .default_model() method is deprecated. Please use .in_out_tensors() method instead.",
DeprecationWarning,
stacklevel=2,
)
return self.in_out_tensors()
build = deprecated_model_function(in_out_tensors, "build")
| 24,550 | 38.726537 | 173 | py |
stellargraph | stellargraph-master/stellargraph/layer/graphsage.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
GraphSAGE and compatible aggregator layers
"""
__all__ = [
"GraphSAGE",
"MeanAggregator",
"MaxPoolingAggregator",
"MeanPoolingAggregator",
"AttentionalAggregator",
"DirectedGraphSAGE",
]
import warnings
import numpy as np
from tensorflow.keras.layers import Layer
from tensorflow.keras import Input
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Lambda, Dropout, Reshape, LeakyReLU
from tensorflow.keras.utils import Sequence
from tensorflow.keras import activations, initializers, constraints, regularizers
from typing import List, Tuple, Callable, AnyStr, Union
from ..mapper import (
GraphSAGENodeGenerator,
GraphSAGELinkGenerator,
DirectedGraphSAGENodeGenerator,
DirectedGraphSAGELinkGenerator,
NodeSequence,
LinkSequence,
)
from .misc import deprecated_model_function
from ..connector.neo4j.mapper import (
Neo4jGraphSAGENodeGenerator,
Neo4jDirectedGraphSAGENodeGenerator,
)
class GraphSAGEAggregator(Layer):
"""
Base class for GraphSAGE aggregators
Args:
output_dim (int): Output dimension
bias (bool): Optional flag indicating whether (True) or not (False; default)
a bias term should be included.
act (Callable or str): name of the activation function to use (must be a
Keras activation function), or alternatively, a TensorFlow operation.
kernel_initializer (str or func): The initialiser to use for the weights
kernel_regularizer (str or func): The regulariser to use for the weights
kernel_constraint (str or func): The constraint to use for the weights
bias_initializer (str or func): The initialiser to use for the bias
bias_regularizer (str or func): The regulariser to use for the bias
bias_constraint (str or func): The constraint to use for the bias
"""
def __init__(
self,
output_dim: int = 0,
bias: bool = False,
act: Union[Callable, AnyStr] = "relu",
kernel_initializer="glorot_uniform",
kernel_regularizer=None,
kernel_constraint=None,
bias_initializer="zeros",
bias_regularizer=None,
bias_constraint=None,
**kwargs,
):
self.output_dim = output_dim
self.has_bias = bias
self.act = activations.get(act)
super().__init__(**kwargs)
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_initializer = initializers.get(bias_initializer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.bias_constraint = constraints.get(bias_constraint)
# These will be filled in at build time
self.bias = None
self.w_self = None
self.w_group = None
self.weight_dims = None
self.included_weight_groups = None
def get_config(self):
"""
Gets class configuration for Keras serialization
"""
config = {
"output_dim": self.output_dim,
"bias": self.has_bias,
"act": activations.serialize(self.act),
"kernel_initializer": initializers.serialize(self.kernel_initializer),
"kernel_regularizer": regularizers.serialize(self.kernel_regularizer),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"bias_initializer": initializers.serialize(self.bias_initializer),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"bias_constraint": constraints.serialize(self.bias_constraint),
}
base_config = super().get_config()
return {**base_config, **config}
def calculate_group_sizes(self, input_shape):
"""
Calculates the output size for each input group. The results are stored in two variables:
self.included_weight_groups: if the corresponding entry is True then the input group
is valid and should be used.
self.weight_sizes: the size of the output from this group.
Args:
input_shape (list of list of int): Shape of input tensors for self
and neighbour features
"""
# If the neighbours are zero-dimensional for any of the shapes
# in the input, do not use the input group in the model.
# XXX Ignore batch size, since test dim != 0 evaluates to None!!
self.included_weight_groups = [
all(dim != 0 for dim in group_shape[1:]) for group_shape in input_shape
]
# The total number of enabled input groups
num_groups = np.sum(self.included_weight_groups)
if num_groups < 1:
raise ValueError(
"There must be at least one input with a non-zero neighbourhood dimension"
)
# Calculate the dimensionality of each group, and put remainder into the first group
# with non-zero dimensions, which should be the head node group.
group_output_dim = self.output_dim // num_groups
remainder_dim = self.output_dim - num_groups * group_output_dim
weight_dims = []
for g in self.included_weight_groups:
if g:
group_dim = group_output_dim + remainder_dim
remainder_dim = 0
else:
group_dim = 0
weight_dims.append(group_dim)
self.weight_dims = weight_dims
def build(self, input_shape):
"""
Builds the weight tensor corresponding to the features
of the initial nodes in sampled random walks.
Optionally builds the weight tensor(s) corresponding
to sampled neighbourhoods, if required.
Optionally builds the bias tensor, if requested.
Args:
input_shape (list of list of int): Shape of input tensors for self
and neighbour features
"""
if not isinstance(input_shape, list):
raise ValueError(
"Expected a list of inputs, not {}".format(type(input_shape).__name__)
)
# Configure bias vector, if used.
if self.has_bias:
self.bias = self.add_weight(
name="bias",
shape=(self.output_dim,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
)
# Calculate weight size for each input group
self.calculate_group_sizes(input_shape)
# Configure weights for input groups, if used.
w_group = [None] * len(input_shape)
for ii, g_shape in enumerate(input_shape):
if self.included_weight_groups[ii]:
weight = self._build_group_weights(
g_shape, self.weight_dims[ii], group_idx=ii
)
w_group[ii] = weight
self.w_group = w_group
# Signal that the build has completed.
super().build(input_shape)
def _build_group_weights(self, in_shape, out_size, group_idx=0):
"""
Builds the weight tensor(s) corresponding to the features of the input groups.
Args:
in_shape (list of int): Shape of input tensor for single group
out_size (int): The size of the output vector for this group
group_idx (int): The index of the input group
"""
weight = self.add_weight(
shape=(int(in_shape[-1]), out_size),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
name=f"weight_g{group_idx}",
)
return weight
def aggregate_neighbours(self, x_neigh, group_idx: int = 0):
"""
Override with a method to aggregate tensors over neighbourhood.
Args:
x_neigh: The input tensor representing the sampled neighbour nodes.
group_idx: Optional neighbourhood index used for multi-dimensional hops.
Returns:
A tensor aggregation of the input nodes features.
"""
raise NotImplementedError(
"The GraphSAGEAggregator base class should not be directly instantiated"
)
def call(self, inputs, **kwargs):
"""
Apply aggregator on the input tensors, `inputs`
Args:
inputs: List of Keras tensors
Returns:
Keras Tensor representing the aggregated embeddings in the input.
"""
# If a neighbourhood dimension exists for the group, aggregate over the neighbours
# otherwise create a simple layer.
sources = []
for ii, x in enumerate(inputs):
# If the group is included, apply aggregation and collect the output tensor
# otherwise, this group is ignored
if self.included_weight_groups[ii]:
x_agg = self.group_aggregate(x, group_idx=ii)
sources.append(x_agg)
# Concatenate outputs from all groups
# TODO: Generalize to sum a subset of groups.
h_out = K.concatenate(sources, axis=2)
# Optionally add bias
if self.has_bias:
h_out = h_out + self.bias
# Finally, apply activation
return self.act(h_out)
def compute_output_shape(self, input_shape):
"""
Computes the output shape of the layer.
Assumes that the layer will be built to match that input shape provided.
Args:
input_shape (tuple of int)
Shape tuples can include None for free dimensions, instead of an integer.
Returns:
The output shape calculated from the input shape, this is of the form
(batch_num, head_num, output_dim)
"""
return input_shape[0][0], input_shape[0][1], self.output_dim
def group_aggregate(self, x_neigh, group_idx=0):
"""
Override with a method to aggregate tensors over the neighbourhood for each group.
Args:
x_neigh (tf.Tensor): : The input tensor representing the sampled neighbour nodes.
group_idx (int, optional): Group index.
Returns:
:class:`tensorflow.Tensor`: A tensor aggregation of the input nodes features.
"""
raise NotImplementedError(
"The GraphSAGEAggregator base class should not be directly instantiated"
)
class MeanAggregator(GraphSAGEAggregator):
"""
Mean Aggregator for GraphSAGE implemented with Keras base layer
Args:
output_dim (int): Output dimension
bias (bool): Optional bias
act (Callable or str): name of the activation function to use (must be a
Keras activation function), or alternatively, a TensorFlow operation.
"""
def group_aggregate(self, x_group, group_idx=0):
"""
Mean aggregator for tensors over the neighbourhood for each group.
Args:
x_group (tf.Tensor): : The input tensor representing the sampled neighbour nodes.
group_idx (int, optional): Group index.
Returns:
:class:`tensorflow.Tensor`: A tensor aggregation of the input nodes features.
"""
# The first group is assumed to be the self-tensor and we do not aggregate over it
if group_idx == 0:
x_agg = x_group
else:
x_agg = K.mean(x_group, axis=2)
return K.dot(x_agg, self.w_group[group_idx])
class MaxPoolingAggregator(GraphSAGEAggregator):
"""
Max Pooling Aggregator for GraphSAGE implemented with Keras base layer
Implements the aggregator of Eq. (3) in Hamilton et al. (2017)
Args:
output_dim (int): Output dimension
bias (bool): Optional bias
act (Callable or str): name of the activation function to use (must be a
Keras activation function), or alternatively, a TensorFlow operation.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# TODO: These should be user parameters
self.hidden_dim = self.output_dim
self.hidden_act = activations.get("relu")
def _build_group_weights(self, in_shape, out_size, group_idx=0):
"""
Builds the weight tensor(s) corresponding to the features of the input groups.
Args:
in_shape (list of int): Shape of input tensor for single group
out_size (int): The size of the output vector for this group
group_idx (int): The index of the input group
"""
if group_idx == 0:
weights = self.add_weight(
name=f"w_g{group_idx}",
shape=(int(in_shape[-1]), out_size),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
)
else:
w_group = self.add_weight(
name=f"w_g{group_idx}",
shape=(self.hidden_dim, out_size),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
)
w_pool = self.add_weight(
name=f"w_pool_g{group_idx}",
shape=(int(in_shape[-1]), self.hidden_dim),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
)
b_pool = self.add_weight(
name=f"b_pool_g{group_idx}",
shape=(self.hidden_dim,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
)
weights = [w_group, w_pool, b_pool]
return weights
def group_aggregate(self, x_group, group_idx=0):
"""
Aggregates the group tensors by max-pooling of neighbours
Args:
x_group (tf.Tensor): : The input tensor representing the sampled neighbour nodes.
group_idx (int, optional): Group index.
Returns:
:class:`tensorflow.Tensor`: A tensor aggregation of the input nodes features.
"""
if group_idx == 0:
# Do not aggregate features for head nodes
x_agg = K.dot(x_group, self.w_group[0])
else:
w_g, w_pool, b_pool = self.w_group[group_idx]
# Pass neighbour features through a dense layer with w_pool, b_pool
xw_neigh = self.hidden_act(K.dot(x_group, w_pool) + b_pool)
# Take max of this tensor over neighbour dimension
x_agg = K.max(xw_neigh, axis=2)
# Final output is a dense layer over the aggregated tensor
x_agg = K.dot(x_agg, w_g)
return x_agg
class MeanPoolingAggregator(GraphSAGEAggregator):
"""
Mean Pooling Aggregator for GraphSAGE implemented with Keras base layer
Implements the aggregator of Eq. (3) in Hamilton et al. (2017), with max pooling replaced with mean pooling
Args:
output_dim (int): Output dimension
bias (bool): Optional bias
act (Callable or str): name of the activation function to use (must be a
Keras activation function), or alternatively, a TensorFlow operation.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# TODO: These should be user parameters
self.hidden_dim = self.output_dim
self.hidden_act = activations.get("relu")
def _build_group_weights(self, in_shape, out_size, group_idx=0):
"""
Builds the weight tensor(s) corresponding to the features of the input groups.
Args:
in_shape (list of int): Shape of input tensor for single group
out_size (int): The size of the output vector for this group
group_idx (int): The index of the input group
"""
if group_idx == 0:
weights = self.add_weight(
name=f"w_g{group_idx}",
shape=(int(in_shape[-1]), out_size),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
)
else:
w_group = self.add_weight(
name=f"w_g{group_idx}",
shape=(self.hidden_dim, out_size),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
)
w_pool = self.add_weight(
name=f"w_pool_g{group_idx}",
shape=(int(in_shape[-1]), self.hidden_dim),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
)
b_pool = self.add_weight(
name=f"b_pool_g{group_idx}",
shape=(self.hidden_dim,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
)
weights = [w_group, w_pool, b_pool]
return weights
def group_aggregate(self, x_group, group_idx=0):
"""
Aggregates the group tensors by mean-pooling of neighbours
Args:
x_group (tf.Tensor): : The input tensor representing the sampled neighbour nodes.
group_idx (int, optional): Group index.
Returns:
:class:`tensorflow.Tensor`: A tensor aggregation of the input nodes features.
"""
if group_idx == 0:
# Do not aggregate features for head nodes
x_agg = K.dot(x_group, self.w_group[0])
else:
w_g, w_pool, b_pool = self.w_group[group_idx]
# Pass neighbour features through a dense layer with w_pool, b_pool
xw_neigh = self.hidden_act(K.dot(x_group, w_pool) + b_pool)
# Take max of this tensor over neighbour dimension
x_agg = K.mean(xw_neigh, axis=2)
# Final output is a dense layer over the aggregated tensor
x_agg = K.dot(x_agg, w_g)
return x_agg
class AttentionalAggregator(GraphSAGEAggregator):
"""
Attentional Aggregator for GraphSAGE implemented with Keras base layer
Implements the aggregator of Veličković et al. "Graph Attention Networks" ICLR 2018
Args:
output_dim (int): Output dimension
bias (bool): Optional bias
act (Callable or str): name of the activation function to use (must be a
Keras activation function), or alternatively, a TensorFlow operation.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# TODO: How can we expose these options to the user?
self.hidden_dim = self.output_dim
self.attn_act = LeakyReLU(0.2)
def _build_group_weights(self, in_shape, out_size, group_idx=0):
"""
Builds the weight tensor(s) corresponding to the features of the input groups.
Args:
in_shape (list of int): Shape of input tensor for single group
out_size (int): The size of the output vector for this group
group_idx (int): The index of the input group
"""
if group_idx == 0:
if out_size > 0:
weights = self.add_weight(
name=f"w_self",
shape=(int(in_shape[-1]), out_size),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
)
else:
weights = None
else:
w_g = self.add_weight(
name=f"w_g{group_idx}",
shape=(int(in_shape[-1]), out_size),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
)
w_attn_s = self.add_weight(
name=f"w_attn_s{group_idx}",
shape=(out_size, 1),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
)
w_attn_g = self.add_weight(
name=f"w_attn_g{group_idx}",
shape=(out_size, 1),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
)
weights = [w_g, w_attn_s, w_attn_g]
return weights
def calculate_group_sizes(self, input_shape):
"""
Calculates the output size for each input group.
The results are stored in two variables:
* self.included_weight_groups: if the corresponding entry is True then the input group
is valid and should be used.
* self.weight_sizes: the size of the output from this group.
The AttentionalAggregator is implemented to not use the first (head node) group. This makes
the implementation different from other aggregators.
Args:
input_shape (list of list of int): Shape of input tensors for self
and neighbour features
"""
# If the neighbours are zero-dimensional for any of the shapes
# in the input, do not use the input group in the model.
# XXX Ignore batch size, since dim != 0 results in None!!
self.included_weight_groups = [
all(dim != 0 for dim in group_shape[1:]) for group_shape in input_shape
]
# The total number of enabled input groups
num_groups = np.sum(self.included_weight_groups) - 1
# We do not assign any features to the head node group, unless this is the only group.
if num_groups == 0:
weight_dims = [self.output_dim] + [0] * (len(input_shape) - 1)
else:
# Calculate the dimensionality of each group, and put remainder into the first group
# with non-zero dimensions.
group_output_dim = self.output_dim // num_groups
remainder_dim = self.output_dim - num_groups * group_output_dim
weight_dims = [0]
for g in self.included_weight_groups[1:]:
if g:
group_dim = group_output_dim + remainder_dim
remainder_dim = 0
else:
group_dim = 0
weight_dims.append(group_dim)
self.weight_dims = weight_dims
def call(self, inputs, **kwargs):
"""
Apply aggregator on the input tensors, `inputs`
Args:
inputs (List[Tensor]): Tensors giving self and neighbour features
x[0]: self Tensor (batch_size, head size, feature_size)
x[k>0]: group Tensors for neighbourhood (batch_size, head size, neighbours, feature_size)
Returns:
Keras Tensor representing the aggregated embeddings in the input.
"""
# We require the self group to be included to calculate attention
if not self.included_weight_groups[0]:
raise ValueError("The head node group must have non-zero dimension")
# If a neighbourhood dimension exists for the group, aggregate over the neighbours
# otherwise create a simple layer.
x_self = inputs[0]
group_sources = []
for ii, x_g in enumerate(inputs[1:]):
group_idx = ii + 1
if not self.included_weight_groups[group_idx]:
continue
# Get the weights for this group
w_g, w_attn_s, w_attn_g = self.w_group[group_idx]
# Group transform for self & neighbours
xw_self = K.expand_dims(K.dot(x_self, w_g), axis=2)
xw_neigh = K.dot(x_g, w_g)
# Concatenate self vector to neighbour vectors
# Shape is (n_b, n_h, n_neigh+1, n_out[ii])
xw_all = K.concatenate([xw_self, xw_neigh], axis=2)
# Calculate group attention
attn_self = K.dot(xw_self, w_attn_s) # (n_b, n_h, 1)
attn_neigh = K.dot(xw_all, w_attn_g) # (n_b, n_h, n_neigh+1, 1)
# Add self and neighbour attn and apply activation
# Note: This broadcasts to (n_b, n_h, n_neigh + 1, 1)
attn_u = self.attn_act(attn_self + attn_neigh)
# Attn coefficients, softmax over the neighbours
attn = K.softmax(attn_u, axis=2)
# Multiply attn coefficients by neighbours (and self) and aggregate
h_out = K.sum(attn * xw_all, axis=2)
group_sources.append(h_out)
# If there are no groups with features built, fallback to a MLP on the head node features
if not group_sources:
group_sources = [K.dot(x_self, self.w_group[0])]
# Concatenate or sum the outputs from all groups
h_out = K.concatenate(group_sources, axis=2)
if self.has_bias:
h_out = h_out + self.bias
return self.act(h_out)
def _require_without_generator(value, name):
if value is not None:
return value
else:
raise ValueError(
f"{name}: expected a value for 'n_samples', 'input_dim', and 'multiplicity' when "
f"'generator' is not provided, found {name}=None."
)
class GraphSAGE:
"""
Implementation of the GraphSAGE algorithm of Hamilton et al. with Keras layers.
see: http://snap.stanford.edu/graphsage/
The model minimally requires specification of the layer sizes as a list of int
corresponding to the feature dimensions for each hidden layer and a generator object.
Different neighbour node aggregators can also be specified with the ``aggregator``
argument, which should be the aggregator class,
either :class:`.MeanAggregator`, :class:`.MeanPoolingAggregator`,
:class:`.MaxPoolingAggregator`, or :class:`.AttentionalAggregator`.
To use this class as a Keras model, the features and graph should be supplied using the
:class:`.GraphSAGENodeGenerator` class for node inference models or the
:class:`.GraphSAGELinkGenerator` class for link inference models. The `.in_out_tensors` method should
be used to create a Keras model from the `GraphSAGE` object.
Examples:
Creating a two-level GrapSAGE node classification model with hidden node sizes of 8 and 4
and 10 neighbours sampled at each layer using an existing :class:`.StellarGraph` object `G`
containing the graph and node features::
generator = GraphSAGENodeGenerator(G, batch_size=50, num_samples=[10,10])
gat = GraphSAGE(
layer_sizes=[8, 4],
activations=["relu","softmax"],
generator=generator,
)
x_inp, predictions = gat.in_out_tensors()
Note that passing a `NodeSequence` or `LinkSequence` object from the `generator.flow(...)` method
as the `generator=` argument is now deprecated and the base generator object should be passed instead.
.. seealso::
Examples using GraphSAGE:
- node classification: `natively <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/graphsage-node-classification.html>`__, `via Neo4j <https://stellargraph.readthedocs.io/en/stable/demos/connector/neo4j/undirected-graphsage-on-cora-neo4j-example.html>`__
- `link prediction <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/graphsage-link-prediction.html>`__
- unsupervised representation learning: `via random walks <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/graphsage-unsupervised-sampler-embeddings.html>`__, `via Deep Graph Infomax <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/deep-graph-infomax-embeddings.html>`__
- calibrating models: `node classification <https://stellargraph.readthedocs.io/en/stable/demos/calibration/calibration-node-classification.html>`__, `link prediction <https://stellargraph.readthedocs.io/en/stable/demos/calibration/calibration-link-prediction.html>`__
- ensemble models: `node classification <https://stellargraph.readthedocs.io/en/stable/demos/ensembles/ensemble-node-classification-example.html>`__, `link prediction <https://stellargraph.readthedocs.io/en/stable/demos/ensembles/ensemble-link-prediction-example.html>`__
- `comparison of link prediction algorithms <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/homogeneous-comparison-link-prediction.html>`__
Appropriate data generators: :class:`.GraphSAGENodeGenerator`, :class:`.Neo4jGraphSAGENodeGenerator`, :class:`.GraphSAGELinkGenerator`.
Related models:
- :class:`.DirectedGraphSAGE` for a generalisation to directed graphs
- :class:`.HinSAGE` for a generalisation to heterogeneous graphs
- :class:`.DeepGraphInfomax` for unsupervised training
Aggregators: :class:`.MeanAggregator`, :class:`.MeanPoolingAggregator`, :class:`.MaxPoolingAggregator`, :class:`.AttentionalAggregator`.
Args:
layer_sizes (list): Hidden feature dimensions for each layer.
generator (GraphSAGENodeGenerator or GraphSAGELinkGenerator):
If specified `n_samples` and `input_dim` will be extracted from this object.
aggregator (class): The GraphSAGE aggregator to use; defaults to the `MeanAggregator`.
bias (bool): If True (default), a bias vector is learnt for each layer.
dropout (float): The dropout supplied to each layer; defaults to no dropout.
normalize (str or None): The normalization used after each layer; defaults to L2 normalization.
activations (list): Activations applied to each layer's output;
defaults to ``['relu', ..., 'relu', 'linear']``.
kernel_initializer (str or func, optional): The initialiser to use for the weights of each layer.
kernel_regularizer (str or func, optional): The regulariser to use for the weights of each layer.
kernel_constraint (str or func, optional): The constraint to use for the weights of each layer.
bias_initializer (str or func, optional): The initialiser to use for the bias of each layer.
bias_regularizer (str or func, optional): The regulariser to use for the bias of each layer.
bias_constraint (str or func, optional): The constraint to use for the bias of each layer.
n_samples (list, optional): The number of samples per layer in the model.
input_dim (int, optional): The dimensions of the node features used as input to the model.
multiplicity (int, optional): The number of nodes to process at a time. This is 1 for a node inference
and 2 for link inference (currently no others are supported).
.. note::
The values for ``n_samples``, ``input_dim``, and ``multiplicity`` are obtained from the provided
``generator`` by default. The additional keyword arguments for these parameters provide an
alternative way to specify them if a generator cannot be supplied.
"""
def __init__(
self,
layer_sizes,
generator=None,
aggregator=None,
bias=True,
dropout=0.0,
normalize="l2",
activations=None,
kernel_initializer="glorot_uniform",
kernel_regularizer=None,
kernel_constraint=None,
bias_initializer="zeros",
bias_regularizer=None,
bias_constraint=None,
n_samples=None,
input_dim=None,
multiplicity=None,
):
# Model parameters
self.layer_sizes = layer_sizes
self.max_hops = len(layer_sizes)
self.bias = bias
self.dropout = dropout
# Set the normalization layer used in the model
if normalize == "l2":
self._normalization = Lambda(lambda x: K.l2_normalize(x, axis=-1))
elif normalize is None or normalize == "none" or normalize == "None":
self._normalization = Lambda(lambda x: x)
else:
raise ValueError(
"Normalization should be either 'l2' or 'none'; received '{}'".format(
normalize
)
)
# Get the input_dim and num_samples
if generator is not None:
self._get_sizes_from_generator(generator)
else:
self.n_samples = _require_without_generator(n_samples, "n_samples")
self.input_feature_size = _require_without_generator(input_dim, "input_dim")
self.multiplicity = _require_without_generator(multiplicity, "multiplicity")
# Check the number of samples and the layer sizes are consistent
if len(self.n_samples) != self.max_hops:
raise ValueError(
f"n_samples: expected one sample size for each of the {self.max_hops} layers, "
f"found {len(self.n_samples)} sample sizes"
)
# Feature dimensions for each layer
self.dims = [self.input_feature_size] + layer_sizes
# Compute size of each sampled neighbourhood
self._compute_neighbourhood_sizes()
# Set the aggregator layer used in the model
if aggregator is None:
self._aggregator = MeanAggregator
elif issubclass(aggregator, Layer):
self._aggregator = aggregator
else:
raise TypeError("Aggregator should be a subclass of Keras Layer")
# Activation function for each layer
if activations is None:
activations = ["relu"] * (self.max_hops - 1) + ["linear"]
elif len(activations) != self.max_hops:
raise ValueError(
"Invalid number of activations; require one function per layer"
)
self.activations = activations
# Aggregator functions for each layer
self._aggs = [
self._aggregator(
output_dim=self.layer_sizes[layer],
bias=self.bias,
act=self.activations[layer],
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
kernel_constraint=kernel_constraint,
bias_initializer=bias_initializer,
bias_regularizer=bias_regularizer,
bias_constraint=bias_constraint,
)
for layer in range(self.max_hops)
]
def _get_sizes_from_generator(self, generator):
"""
Sets n_samples and input_feature_size from the generator.
Args:
generator: The supplied generator.
"""
if not isinstance(
generator,
(
GraphSAGENodeGenerator,
GraphSAGELinkGenerator,
Neo4jGraphSAGENodeGenerator,
),
):
errmsg = "Generator should be an instance of GraphSAGENodeGenerator or GraphSAGELinkGenerator"
if isinstance(generator, (NodeSequence, LinkSequence)):
errmsg = (
"Passing a Sequence object as the generator to GraphSAGE is no longer supported. "
+ errmsg
)
raise TypeError(errmsg)
self.n_samples = generator.num_samples
# Check the number of samples and the layer sizes are consistent
if len(self.n_samples) != self.max_hops:
raise ValueError(
"Mismatched lengths: neighbourhood sample sizes {} versus layer sizes {}".format(
self.n_samples, self.layer_sizes
)
)
self.multiplicity = generator.multiplicity
feature_sizes = generator.graph.node_feature_sizes()
if len(feature_sizes) > 1:
raise RuntimeError(
"GraphSAGE called on graph with more than one node type."
)
self.input_feature_size = feature_sizes.popitem()[1]
def _compute_neighbourhood_sizes(self):
"""
Computes the total (cumulative product) number of nodes
sampled at each neighbourhood.
Each hop samples from the neighbours of the previous nodes.
"""
def size_at(i):
return np.product(self.n_samples[:i], dtype=int)
self.neighbourhood_sizes = [size_at(i) for i in range(self.max_hops + 1)]
def __call__(self, xin: List):
"""
Apply aggregator layers
Args:
xin (list of Tensor): Batch input features
Returns:
Output tensor
"""
def apply_layer(x: List, num_hops: int):
"""
Compute the list of output tensors for a single GraphSAGE layer
Args:
x (List[Tensor]): Inputs to the layer
num_hops (int): Layer index to construct
Returns:
Outputs of applying the aggregators as a list of Tensors
"""
layer_out = []
for i in range(self.max_hops - num_hops):
head_shape = K.int_shape(x[i])[1]
# Reshape neighbours per node per layer
neigh_in = Dropout(self.dropout)(
Reshape((head_shape, self.n_samples[i], self.dims[num_hops]))(
x[i + 1]
)
)
# Apply aggregator to head node and neighbour nodes
layer_out.append(
self._aggs[num_hops]([Dropout(self.dropout)(x[i]), neigh_in])
)
return layer_out
if not isinstance(xin, list):
raise TypeError("Input features to GraphSAGE must be a list")
if len(xin) != self.max_hops + 1:
raise ValueError(
"Length of input features should equal the number of GraphSAGE layers plus one"
)
# Form GraphSAGE layers iteratively
h_layer = xin
for layer in range(0, self.max_hops):
h_layer = apply_layer(h_layer, layer)
# Remove neighbourhood dimension from output tensors of the stack
# note that at this point h_layer contains the output tensor of the top (last applied) layer of the stack
h_layer = [
Reshape(K.int_shape(x)[2:])(x) if K.int_shape(x)[1] == 1 else x
for x in h_layer
]
return (
self._normalization(h_layer[0])
if len(h_layer) == 1
else [self._normalization(xi) for xi in h_layer]
)
def _node_model(self):
"""
Builds a GraphSAGE model for node prediction
Returns:
tuple: (x_inp, x_out) where ``x_inp`` is a list of Keras input tensors
for the specified GraphSAGE model and ``x_out`` is the Keras tensor
for the GraphSAGE model output.
"""
# Create tensor inputs for neighbourhood sampling
x_inp = [
Input(shape=(s, self.input_feature_size)) for s in self.neighbourhood_sizes
]
# Output from GraphSAGE model
x_out = self(x_inp)
# Returns inputs and outputs
return x_inp, x_out
def _link_model(self):
"""
Builds a GraphSAGE model for link or node pair prediction
Returns:
tuple: (x_inp, x_out) where ``x_inp`` is a list of Keras input tensors for (src, dst) node pairs
(where (src, dst) node inputs alternate),
and ``x_out`` is a list of output tensors for (src, dst) nodes in the node pairs
"""
# Expose input and output sockets of the model, for source and destination nodes:
x_inp_src, x_out_src = self._node_model()
x_inp_dst, x_out_dst = self._node_model()
# re-pack into a list where (source, target) inputs alternate, for link inputs:
x_inp = [x for ab in zip(x_inp_src, x_inp_dst) for x in ab]
# same for outputs:
x_out = [x_out_src, x_out_dst]
return x_inp, x_out
def in_out_tensors(self, multiplicity=None):
"""
Builds a GraphSAGE model for node or link/node pair prediction, depending on the generator used to construct
the model (whether it is a node or link/node pair generator).
Returns:
tuple: ``(x_inp, x_out)``, where ``x_inp`` is a list of Keras input tensors
for the specified GraphSAGE model (either node or link/node pair model) and ``x_out`` contains
model output tensor(s) of shape (batch_size, layer_sizes[-1])
"""
if multiplicity is None:
multiplicity = self.multiplicity
if multiplicity == 1:
return self._node_model()
elif multiplicity == 2:
return self._link_model()
else:
raise RuntimeError(
"Currently only multiplicities of 1 and 2 are supported. Consider using node_model or "
"link_model method explicitly to build node or link prediction model, respectively."
)
def default_model(self, flatten_output=True):
warnings.warn(
"The .default_model() method is deprecated. Please use .in_out_tensors() method instead.",
DeprecationWarning,
stacklevel=2,
)
return self.in_out_tensors()
node_model = deprecated_model_function(_node_model, "node_model")
link_model = deprecated_model_function(_link_model, "link_model")
build = deprecated_model_function(in_out_tensors, "build")
class DirectedGraphSAGE(GraphSAGE):
"""
Implementation of a directed version of the GraphSAGE algorithm of Hamilton et al. with Keras layers.
see: http://snap.stanford.edu/graphsage/
The model minimally requires specification of the layer sizes as a list of int
corresponding to the feature dimensions for each hidden layer and a generator object.
Different neighbour node aggregators can also be specified with the ``aggregator``
argument, which should be the aggregator class,
either :class:`.MeanAggregator`, :class:`.MeanPoolingAggregator`,
:class:`.MaxPoolingAggregator`, or :class:`.AttentionalAggregator`.
.. seealso::
Examples using Directed GraphSAGE:
- `node classification <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/directed-graphsage-node-classification.html>`__
- `node classification with Neo4j <https://stellargraph.readthedocs.io/en/stable/demos/connector/neo4j/directed-graphsage-on-cora-neo4j-example.html>`__
Appropriate data generators: :class:`.DirectedGraphSAGENodeGenerator`, :class:`.Neo4jDirectedGraphSAGENodeGenerator`, :class:`.DirectedGraphSAGELinkGenerator`.
Related models:
- :class:`.GraphSAGE` for undirected graphs
- :class:`.HinSAGE` for undirected heterogeneous graphs
Aggregators: :class:`.MeanAggregator`, :class:`.MeanPoolingAggregator`, :class:`.MaxPoolingAggregator`, :class:`.AttentionalAggregator`.
Args:
layer_sizes (list): Hidden feature dimensions for each layer.
generator (DirectedGraphSAGENodeGenerator):
If specified `n_samples` and `input_dim` will be extracted from this object.
aggregator (class, optional): The GraphSAGE aggregator to use; defaults to the `MeanAggregator`.
bias (bool, optional): If True (default), a bias vector is learnt for each layer.
dropout (float, optional): The dropout supplied to each layer; defaults to no dropout.
normalize (str, optional): The normalization used after each layer; defaults to L2 normalization.
kernel_initializer (str or func, optional): The initialiser to use for the weights of each layer.
kernel_regularizer (str or func, optional): The regulariser to use for the weights of each layer.
kernel_constraint (str or func, optional): The constraint to use for the weights of each layer.
bias_initializer (str or func, optional): The initialiser to use for the bias of each layer.
bias_regularizer (str or func, optional): The regulariser to use for the bias of each layer.
bias_constraint (str or func, optional): The constraint to use for the bias of each layer.
Notes::
If a generator is not specified, then additional keyword arguments must be supplied:
* in_samples (list): The number of in-node samples per layer in the model.
* out_samples (list): The number of out-node samples per layer in the model.
* input_dim (int): The dimensions of the node features used as input to the model.
* multiplicity (int): The number of nodes to process at a time. This is 1 for a node inference
and 2 for link inference (currently no others are supported).
Passing a `NodeSequence` or `LinkSequence` object from the `generator.flow(...)` method
as the `generator=` argument is now deprecated and the base generator object should be passed instead.
"""
def _get_sizes_from_generator(self, generator):
"""
Sets in_samples, out_samples and input_feature_size from the generator.
Args:
generator: The supplied generator.
"""
if not isinstance(
generator,
(
DirectedGraphSAGENodeGenerator,
DirectedGraphSAGELinkGenerator,
Neo4jDirectedGraphSAGENodeGenerator,
),
):
errmsg = "Generator should be an instance of DirectedGraphSAGENodeGenerator"
if isinstance(generator, (NodeSequence, LinkSequence)):
errmsg = (
"Passing a Sequence object as the generator to DirectedGraphSAGE is no longer supported. "
+ errmsg
)
raise TypeError(errmsg)
self.in_samples = generator.in_samples
if len(self.in_samples) != self.max_hops:
raise ValueError(
"Mismatched lengths: in-node sample sizes {} versus layer sizes {}".format(
self.in_samples, self.layer_sizes
)
)
self.out_samples = generator.out_samples
if len(self.out_samples) != self.max_hops:
raise ValueError(
"Mismatched lengths: out-node sample sizes {} versus layer sizes {}".format(
self.out_samples, self.layer_sizes
)
)
feature_sizes = generator.graph.node_feature_sizes()
if len(feature_sizes) > 1:
raise RuntimeError(
"DirectedGraphSAGE called on graph with more than one node type."
)
self.input_feature_size = feature_sizes.popitem()[1]
self.multiplicity = generator.multiplicity
def _get_sizes_from_keywords(self, **kwargs):
"""
Sets in_samples, out_samples and input_feature_size from the keywords.
Args:
kwargs: The additional keyword arguments.
"""
try:
self.in_samples = kwargs["in_samples"]
self.out_samples = kwargs["out_samples"]
self.input_feature_size = kwargs["input_dim"]
self.multiplicity = kwargs["multiplicity"]
except KeyError:
raise KeyError(
"If generator is not provided, in_samples, out_samples, "
"input_dim, and multiplicity must be specified."
)
if len(self.in_samples) != self.max_hops:
raise ValueError(
"Mismatched lengths: in-node sample sizes {} versus layer sizes {}".format(
self.in_samples, self.layer_sizes
)
)
if len(self.out_samples) != self.max_hops:
raise ValueError(
"Mismatched lengths: out-node sample sizes {} versus layer sizes {}".format(
self.out_samples, self.layer_sizes
)
)
def _compute_neighbourhood_sizes(self):
"""
Computes the total (cumulative product) number of nodes
sampled at each neighbourhood.
Each hop has to sample separately from both the in-nodes
and the out-nodes of the previous nodes.
This gives rise to a binary tree of directed neighbourhoods.
"""
self.max_slots = 2 ** (self.max_hops + 1) - 1
self.neighbourhood_sizes = [1] + [
np.product(
[
self.in_samples[kk] if d == "0" else self.out_samples[kk]
for kk, d in enumerate(np.binary_repr(ii + 1)[1:])
]
)
for ii in range(1, self.max_slots)
]
def __call__(self, xin: List):
"""
Apply aggregator layers
Args:
xin (list of Tensor): Batch input features
Returns:
Output tensor
"""
def aggregate_neighbours(tree: List, stage: int):
# compute the number of slots with children in the binary tree
num_slots = (len(tree) - 1) // 2
new_tree = [None] * num_slots
for slot in range(num_slots):
# get parent nodes
num_head_nodes = K.int_shape(tree[slot])[1]
parent = Dropout(self.dropout)(tree[slot])
# find in-nodes
child_slot = 2 * slot + 1
size = (
self.neighbourhood_sizes[child_slot] // num_head_nodes
if num_head_nodes > 0
else 0
)
in_child = Dropout(self.dropout)(
Reshape((num_head_nodes, size, self.dims[stage]))(tree[child_slot])
)
# find out-nodes
child_slot = child_slot + 1
size = (
self.neighbourhood_sizes[child_slot] // num_head_nodes
if num_head_nodes > 0
else 0
)
out_child = Dropout(self.dropout)(
Reshape((num_head_nodes, size, self.dims[stage]))(tree[child_slot])
)
# aggregate neighbourhoods
new_tree[slot] = self._aggs[stage]([parent, in_child, out_child])
return new_tree
if not isinstance(xin, list):
raise TypeError("Input features to GraphSAGE must be a list")
if len(xin) != self.max_slots:
raise ValueError(
"Number of input tensors does not match number of GraphSAGE layers"
)
# Combine GraphSAGE layers in stages
stage_tree = xin
for stage in range(self.max_hops):
stage_tree = aggregate_neighbours(stage_tree, stage)
out_layer = stage_tree[0]
# Remove neighbourhood dimension from output tensors of the stack
if K.int_shape(out_layer)[1] == 1:
out_layer = Reshape(K.int_shape(out_layer)[2:])(out_layer)
return self._normalization(out_layer)
| 52,721 | 39.123288 | 307 | py |
stellargraph | stellargraph-master/stellargraph/layer/knowledge_graph.py | # -*- coding: utf-8 -*-
#
# Copyright 2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras import activations, initializers, constraints, regularizers
from tensorflow.keras.layers import Input, Layer, Lambda, Dropout, Reshape, Embedding
from .misc import deprecated_model_function
from ..mapper.knowledge_graph import KGTripleGenerator, KGTripleSequence
from ..core.experimental import experimental
from ..core.validation import require_integer_in_range, comma_sep
from ..utils.hyperbolic import *
class KGModel:
def __init__(
self,
generator,
scoring,
embedding_dimension,
*,
embeddings_initializer,
embeddings_regularizer,
):
if not isinstance(generator, KGTripleGenerator):
raise TypeError(
f"generator: expected KGTripleGenerator, found {type(generator).__name__}"
)
if not isinstance(scoring, KGScore):
raise TypeError(
f"scoring: expected KGScore subclass, found {type(scoring).__name__}"
)
require_integer_in_range(embedding_dimension, "embedding_dimension", min_val=1)
graph = generator.G
self.num_nodes = graph.number_of_nodes()
self.num_edge_types = len(graph._edges.types)
self._scoring = scoring
embeddings = scoring.embeddings(
self.num_nodes,
self.num_edge_types,
embedding_dimension,
embeddings_initializer,
embeddings_regularizer,
)
self._validate_embeddings(embeddings)
self._node_embs, self._edge_type_embs = embeddings
def _validate_embeddings(self, embeddings):
def error(found):
raise ValueError(
f"scoring: expected 'embeddings' method to return two lists of tf.keras.layers.Embedding layers, found {found}"
)
if len(embeddings) != 2:
error(f"a sequence of length {len(embeddings)}")
a, b = embeddings
if not all(isinstance(x, list) for x in embeddings):
error(f"a pair with types ({type(a).__name__}, {type(b).__name__})")
if not all(isinstance(x, Embedding) for x in a + b):
a_types = comma_sep(a, stringify=lambda x: type(x).__name__)
b_types = comma_sep(b, stringify=lambda x: type(x).__name__)
error(f"a pair of lists containing types ([{a_types}], [{b_types}])")
# all good!
return
def embedding_arrays(self):
"""
Retrieve each separate set of embeddings for nodes/entities and edge types/relations in this model.
Returns:
A tuple of lists of numpy arrays: the first element contains the embeddings for nodes/entities (for each element, ``shape
= number of nodes × k``), the second element contains the embeddings for edge types/relations
(``shape = number of edge types x k``), where ``k`` is some notion of the embedding
dimension for each layer. The type of the embeddings depends on the specific scoring function chosen.
"""
node = [e.embeddings.numpy() for e in self._node_embs]
edge_type = [e.embeddings.numpy() for e in self._edge_type_embs]
return self._scoring.embeddings_to_numpy(node, edge_type)
def embeddings(self):
"""
Retrieve the embeddings for nodes/entities and edge types/relations in this model, if there's only one set of embeddings for each of nodes and edge types.
Returns:
A tuple of numpy arrays: the first element is the embeddings for nodes/entities (``shape
= number of nodes × k``), the second element is the embeddings for edge types/relations
(``shape = number of edge types x k``), where ``k`` is some notion of the embedding
dimension. The type of the embeddings depends on the specific scoring function chosen.
"""
node, edge_type = self.embedding_arrays()
if len(node) != 1 and len(edge_type) != 1:
raise ValueError(
f"embeddings: expected a single embedding array for nodes and for edge types from embedding_arrays, found {len(node)} node and {len(edge_type)} edge type arrays; use embedding_arrays to retrieve the lists instead"
)
return node[0], edge_type[0]
def __call__(self, x):
"""
Apply embedding layers to the source, relation and object input "ilocs" (sequential integer
labels for the nodes and edge types).
Args:
x (list): list of 3 tensors (each batch size x 1) storing the ilocs of the subject,
relation and object elements for each edge in the batch.
"""
s_iloc, r_iloc, o_iloc = x
sequenced = [
(s_iloc, self._node_embs),
(r_iloc, self._edge_type_embs),
(o_iloc, self._node_embs),
]
inp = [
emb_layer(ilocs)
for ilocs, emb_layers in sequenced
for emb_layer in emb_layers
]
return self._scoring(inp)
def in_out_tensors(self):
"""
Builds a knowledge graph model.
Returns:
A tuple of (list of input tensors, tensor for ComplEx model score outputs)
"""
s_iloc = Input(shape=1)
r_iloc = Input(shape=1)
o_iloc = Input(shape=1)
x_inp = [s_iloc, r_iloc, o_iloc]
x_out = self(x_inp)
return x_inp, x_out
def rank_edges_against_all_nodes(
self, test_data, known_edges_graph, tie_breaking="random"
):
"""
Returns the ranks of the true edges in ``test_data``, when scored against all other similar
edges.
For each input edge ``E = (s, r, o)``, the score of the *modified-object* edge ``(s, r, n)``
is computed for every node ``n`` in the graph, and similarly the score of the
*modified-subject* edge ``(n, r, o)``.
This computes "raw" and "filtered" ranks:
raw
The score of each edge is ranked against all of the modified-object and modified-subject
ones, for instance, if ``E = ("a", "X", "b")`` has score 3.14, and only one
modified-object edge has a higher score (e.g. ``F = ("a", "X", "c")``), then the raw
modified-object rank for ``E`` will be 2; if all of the ``(n, "X", "b")`` edges have score
less than 3.14, then the raw modified-subject rank for ``E`` will be 1.
filtered
The score of each edge is ranked against only the unknown modified-object and
modified-subject edges. An edge is considered known if it is in ``known_edges_graph``
which should typically hold every edge in the dataset (that is everything from the train,
test and validation sets, if the data has been split). For instance, continuing the raw
example, if the higher-scoring edge ``F`` is in the graph, then it will be ignored, giving
a filtered modified-object rank for ``E`` of 1. (If ``F`` was not in the graph, the
filtered modified-object rank would be 2.)
Args:
test_data: the output of :meth:`KGTripleGenerator.flow` on some test triples
known_edges_graph (StellarGraph):
a graph instance containing all known edges/triples
tie_breaking ('random', 'top' or 'bottom'):
How to rank true edges that tie with modified-object or modified-subject ones, see
`Sun et al. "A Re-evaluation of Knowledge Graph Completion Methods"
<http://arxiv.org/abs/1911.03903>`_
Returns:
A numpy array of integer raw ranks. It has shape ``N × 2``, where N is the number of
test triples in ``test_data``; the first column (``array[:, 0]``) holds the
modified-object ranks, and the second (``array[:, 1]``) holds the modified-subject
ranks.
"""
if not isinstance(test_data, KGTripleSequence):
raise TypeError(
"test_data: expected KGTripleSequence; found {type(test_data).__name__}"
)
num_nodes = known_edges_graph.number_of_nodes()
node_embs, edge_type_embs = self.embedding_arrays()
extra_data = self._scoring.bulk_scoring_data(node_embs, edge_type_embs)
raws = []
filtereds = []
# run through the batches and compute the ranks for each one
num_tested = 0
for ((subjects, rels, objects),) in test_data:
num_tested += len(subjects)
# batch_size x k
ss = [e[subjects, :] for e in node_embs]
rs = [e[rels, :] for e in edge_type_embs]
os = [e[objects, :] for e in node_embs]
mod_o_pred, mod_s_pred = self._scoring.bulk_scoring(
node_embs, extra_data, ss, rs, os,
)
mod_o_raw, mod_o_filt = _ranks_from_score_columns(
mod_o_pred,
true_modified_node_ilocs=objects,
unmodified_node_ilocs=subjects,
true_rel_ilocs=rels,
modified_object=True,
known_edges_graph=known_edges_graph,
tie_breaking=tie_breaking,
)
mod_s_raw, mod_s_filt = _ranks_from_score_columns(
mod_s_pred,
true_modified_node_ilocs=subjects,
true_rel_ilocs=rels,
modified_object=False,
unmodified_node_ilocs=objects,
known_edges_graph=known_edges_graph,
tie_breaking=tie_breaking,
)
raws.append(np.column_stack((mod_o_raw, mod_s_raw)))
filtereds.append(np.column_stack((mod_o_filt, mod_s_filt)))
# make one big array
raw = np.concatenate(raws)
filtered = np.concatenate(filtereds)
# for each edge, there should be an pair of raw ranks
assert raw.shape == filtered.shape == (num_tested, 2)
return raw, filtered
class KGScore(abc.ABC):
@abc.abstractmethod
def embeddings(
self, num_nodes, num_edge_types, dimension, initializer, regularizer
):
"""
Create appropriate embedding layer(s) for this scoring.
Args:
num_nodes: the number of nodes in this graph.
num_edge_types: the number of edge types/relations in this graph.
dimension: the requested embedding dimension, for whatever that means for this scoring.
initializer: the initializer to use for embeddings, when required.
regularizer: the regularizer to use for embeddings, when required.
Returns:
A pair of lists of :class:`tensorflow.keras.layers.Embedding` layers, corresponding to
nodes and edge types.
"""
...
def embeddings_to_numpy(self, node_embs, edge_type_embs):
"""
Convert raw embedding NumPy arrays into "semantic" embeddings, such as complex numbers instead
of interleaved real numbers.
Args:
node_embs: ``num_nodes × k`` array of all node embeddings, where ``k`` is the size of
the embeddings returned by :meth:embeddings_to_numpy`.
edge_type_embs: ``num_edge_type × k`` array of all edge type/relation embeddings, where
``k`` is the size of the embeddings returned by :meth:embeddings_to_numpy`.
Returns:
Model-specific NumPy arrays corresponding to some useful view of the embeddings vectors.
"""
return node_embs, edge_type_embs
def bulk_scoring_data(self, node_embs, edge_type_embs):
"""
Pre-compute some data for bulk ranking, if any such data would be helpful.
"""
return None
@abc.abstractmethod
def bulk_scoring(
self, node_embs, extra_data, s_embs, r_embs, o_embs,
):
"""
Compute a batch of modified-object and modified-subject scores for ranking.
Args:
node_embs: ``num_nodes × k`` array of all node embeddings, where ``k`` is the size of
the embeddings returned by :meth:embeddings_to_numpy`.
extra_data: the return value of :meth:`bulk_scoring_data`
s_embs: ``batch_size × k`` embeddings for the true source nodes
r_embs: ``batch_size × k`` embeddings for the true edge types/relations
o_embs: ``batch_size × k`` embeddings for the true object nodes
Returns:
This should return a pair of NumPy arrays of shape ``num_nodes × batch_size``. The first
array contains scores of the modified-object edges, and the second contains scores of
the modified-subject edges.
"""
...
# this isn't a subclass of Keras Layer, because a model or other combination of individual
# layers is okay too, but this model will be applied by calling the instance
@abc.abstractmethod
def __call__(self, inputs):
"""
Apply this scoring mechanism to the selected values from the embedding layers.
Args:
inputs: a list of tensors selected from each of the embedding layers, concatenated like
``[source, source, ..., edge types, edge_types, ..., object, object, ...]``
"""
...
def _numpy_complex(arrays):
emb = 1j * arrays[1]
emb += arrays[0]
return emb
class ComplExScore(Layer, KGScore):
"""
ComplEx scoring Keras layer.
Original Paper: Complex Embeddings for Simple Link Prediction, Théo Trouillon, Johannes Welbl,
Sebastian Riedel, Éric Gaussier and Guillaume Bouchard, ICML
2016. http://jmlr.org/proceedings/papers/v48/trouillon16.pdf
This combines subject, relation and object embeddings into a score of the likelihood of the
link.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def embeddings(
self, num_nodes, num_edge_types, dimension, initializer, regularizer
):
def embed(count):
return Embedding(
count,
dimension,
embeddings_initializer=initializer,
embeddings_regularizer=regularizer,
)
# ComplEx generates embeddings in C, which we model as separate real and imaginary
# embeddings
nodes = [embed(num_nodes), embed(num_nodes)]
edge_types = [embed(num_edge_types), embed(num_edge_types)]
return nodes, edge_types
def embeddings_to_numpy(self, node_embs, edge_type_embs):
return (
[_numpy_complex(node_embs)],
[_numpy_complex(edge_type_embs)],
)
def bulk_scoring_data(self, node_embs, edge_type_embs):
return node_embs[0].conj()
def bulk_scoring(
self, node_embs, node_embs_conj, s_embs, r_embs, o_embs,
):
node_embs = node_embs[0]
s_embs = s_embs[0]
r_embs = r_embs[0]
o_embs = o_embs[0]
mod_o_pred = np.inner(node_embs_conj, s_embs * r_embs).real
mod_s_pred = np.inner(node_embs, r_embs * o_embs.conj()).real
return mod_o_pred, mod_s_pred
def build(self, input_shape):
self.built = True
def call(self, inputs):
"""
Applies the layer.
Args:
inputs: a list of 6 tensors (``shape = batch size × 1 × embedding dimension k``), where
the three consecutive pairs represent real and imaginary parts of the subject,
relation and object embeddings, respectively, that is, ``inputs == [Re(subject),
Im(subject), Re(relation), ...]``
"""
s_re, s_im, r_re, r_im, o_re, o_im = inputs
def inner(r, s, o):
return tf.reduce_sum(r * s * o, axis=2)
# expansion of Re(<w_r, e_s, conjugate(e_o)>)
score = (
inner(r_re, s_re, o_re)
+ inner(r_re, s_im, o_im)
+ inner(r_im, s_re, o_im)
- inner(r_im, s_im, o_re)
)
return score
class ComplEx(KGModel):
"""
Embedding layers and a ComplEx scoring layers that implement the ComplEx knowledge graph
embedding algorithm as in http://jmlr.org/proceedings/papers/v48/trouillon16.pdf
.. seealso::
Example using ComplEx: `link prediction <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/complex-link-prediction.html>`__
Related models: other knowledge graph models, see :class:`.KGTripleGenerator` for a full list.
Appropriate data generator: :class:`.KGTripleGenerator`.
Args:
generator (KGTripleGenerator): A generator of triples to feed into the model.
embedding_dimension (int): the dimension of the embedding (that is, a vector in
``C^embedding_dimension`` is learnt for each node and each link type)
embeddings_initializer (str or func, optional): The initialiser to use for the embeddings
(the default of random normal values matches the paper's reference implementation).
embeddings_regularizer (str or func, optional): The regularizer to use for the embeddings.
"""
def __init__(
self,
generator,
embedding_dimension,
embeddings_initializer="normal",
embeddings_regularizer=None,
):
super().__init__(
generator,
ComplExScore(),
embedding_dimension=embedding_dimension,
embeddings_initializer=embeddings_initializer,
embeddings_regularizer=embeddings_regularizer,
)
build = deprecated_model_function(KGModel.in_out_tensors, "build")
class DistMultScore(Layer, KGScore):
"""
DistMult scoring Keras layer.
Original Paper: Embedding Entities and Relations for Learning and Inference in Knowledge
Bases. Bishan Yang, Wen-tau Yih, Xiaodong He, Jianfeng Gao, Li Deng. ICLR 2015
This combines subject, relation and object embeddings into a score of the likelihood of the
link.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def embeddings(
self, num_nodes, num_edge_types, dimension, initializer, regularizer
):
def embed(count):
# FIXME(#980,https://github.com/tensorflow/tensorflow/issues/33755): embeddings can't
# use constraints to be normalized: per section 4 in the paper, the embeddings should be
# normalised to have unit norm.
return Embedding(
count,
dimension,
embeddings_initializer=initializer,
embeddings_regularizer=regularizer,
)
# DistMult generates embeddings in R
nodes = [embed(num_nodes)]
edge_types = [embed(num_edge_types)]
return nodes, edge_types
def bulk_scoring(
self, all_n_embs, _extra_data, s_embs, r_embs, o_embs,
):
all_n_embs = all_n_embs[0]
s_embs = s_embs[0]
r_embs = r_embs[0]
o_embs = o_embs[0]
mod_o_pred = np.inner(all_n_embs, s_embs * r_embs)
mod_s_pred = np.inner(all_n_embs, r_embs * o_embs)
return mod_o_pred, mod_s_pred
def build(self, input_shape):
self.built = True
def call(self, inputs):
"""
Applies the layer.
Args:
inputs: a list of 3 tensors (``shape = batch size × 1 × embedding dimension``),
representing the subject, relation and object embeddings, respectively, that is,
``inputs == [subject, relation, object]``
"""
y_e1, m_r, y_e2 = inputs
# y_e1^T M_r y_e2, where M_r = diag(m_r) is a diagonal matrix
score = tf.reduce_sum(y_e1 * m_r * y_e2, axis=2)
return score
class DistMult(KGModel):
"""
Embedding layers and a DistMult scoring layers that implement the DistMult knowledge graph
embedding algorithm as in https://arxiv.org/pdf/1412.6575.pdf
.. seealso::
Example using DistMult: `link prediction <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/distmult-link-prediction.html>`__
Related models: other knowledge graph models, see :class:`.KGTripleGenerator` for a full list.
Appropriate data generator: :class:`.KGTripleGenerator`.
Args:
generator (KGTripleGenerator): A generator of triples to feed into the model.
embedding_dimension (int): the dimension of the embedding (that is, a vector in
``R^embedding_dimension`` is learnt for each node and each link type)
embeddings_initializer (str or func, optional): The initialiser to use for the embeddings.
embeddings_regularizer (str or func, optional): The regularizer to use for the embeddings.
"""
def __init__(
self,
generator,
embedding_dimension,
embeddings_initializer="uniform",
embeddings_regularizer=None,
):
super().__init__(
generator,
DistMultScore(),
embedding_dimension=embedding_dimension,
embeddings_initializer=embeddings_initializer,
embeddings_regularizer=embeddings_regularizer,
)
build = deprecated_model_function(KGModel.in_out_tensors, "build")
class RotatEScore(Layer, KGScore):
def __init__(self, margin, norm_order, **kwargs):
super().__init__(**kwargs)
self._margin = margin
self._norm_order = norm_order
def embeddings(
self, num_nodes, num_edge_types, dimension, initializer, regularizer
):
def embed(count, reg=regularizer):
return Embedding(
count,
dimension,
embeddings_initializer=initializer,
embeddings_regularizer=reg,
)
# RotatE generates embeddings in C, which we model as separate real and imaginary
# embeddings for node types, and just the phase for edge types (since they have |x| = 1)
nodes = [embed(num_nodes), embed(num_nodes)]
# it doesn't make sense to regularize the phase, because it's circular
edge_types = [embed(num_edge_types, reg=None)]
return nodes, edge_types
def embeddings_to_numpy(self, node_embs, edge_type_embs):
nodes = _numpy_complex(node_embs)
edge_types = 1j * np.sin(edge_type_embs[0])
edge_types += np.cos(edge_type_embs[0])
return [nodes], [edge_types]
def bulk_scoring(
self, all_n_embs, _extra_data, s_embs, r_embs, o_embs,
):
all_n_embs = all_n_embs[0]
s_embs = s_embs[0]
r_embs = r_embs[0]
o_embs = o_embs[0]
# (the margin is a fixed offset that doesn't affect relative ranks)
mod_o_pred = -np.linalg.norm(
(s_embs * r_embs)[None, :, :] - all_n_embs[:, None, :],
ord=self._norm_order,
axis=2,
)
mod_s_pred = -np.linalg.norm(
all_n_embs[:, None, :] * r_embs[None, :, :] - o_embs[None, :, :],
ord=self._norm_order,
axis=2,
)
return mod_o_pred, mod_s_pred
def get_config(self):
return {
**super().get_config(),
"margin": self._margin,
"norm_order": self._norm_order,
}
def call(self, inputs):
s_re, s_im, r_phase, o_re, o_im = inputs
r_re = tf.math.cos(r_phase)
r_im = tf.math.sin(r_phase)
# expansion of s◦r - t
re = s_re * r_re - s_im * r_im - o_re
im = s_re * r_im + s_im * r_re - o_im
# norm the vector: -|| ... ||_p
return self._margin - tf.norm(
tf.sqrt(re * re + im * im), ord=self._norm_order, axis=2
)
@experimental(reason="demo and documentation is missing", issues=[1549, 1550])
class RotatE(KGModel):
"""
Implementation of https://arxiv.org/abs/1902.10197
.. seealso::
Related models: other knowledge graph models, see :class:`.KGTripleGenerator` for a full list.
Appropriate data generator: :class:`.KGTripleGenerator`.
"""
def __init__(
self,
generator,
embedding_dimension,
# default taken from the paper's code: https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding
margin=12.0,
# default taken from the paper's code: https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding
norm_order=2,
embeddings_initializer="normal",
embeddings_regularizer=None,
):
super().__init__(
generator,
RotatEScore(margin=margin, norm_order=norm_order),
embedding_dimension,
embeddings_initializer=embeddings_initializer,
embeddings_regularizer=embeddings_regularizer,
)
class RotHEScore(Layer, KGScore):
def __init__(self, hyperbolic):
self._hyperbolic = hyperbolic
if self._hyperbolic:
self._convert = lambda c, v: poincare_ball_exp(c, None, v)
self._add = poincare_ball_mobius_add
self._squared_distance = lambda c, v, w: tf.square(
poincare_ball_distance(c, v, w)
)
else:
self._convert = lambda _c, v: v
self._add = lambda _c, v, w: v + w
self._squared_distance = lambda _c, v, w: tf.reduce_sum(
tf.math.squared_difference(v, w), axis=-1
)
super().__init__()
def embeddings(
self, num_nodes, num_edge_types, dimension, initializer, regularizer
):
if dimension % 2 != 0:
raise ValueError(
f"embedding_dimension: expected an even integer, found {dimension}"
)
def embed(count, dim=dimension):
return Embedding(
count,
dim,
embeddings_initializer=initializer,
embeddings_regularizer=regularizer,
)
nodes = [embed(num_nodes), embed(num_nodes, 1)]
edge_types = [embed(num_edge_types), embed(num_edge_types, dimension // 2)]
return nodes, edge_types
def build(self, input_shapes):
if self._hyperbolic:
self.curvature_prime = self.add_weight(shape=(1,), name="curvature_prime")
else:
self.curvature_prime = None
super().build(input_shapes)
def _curvature(self):
assert self.built
if not self._hyperbolic:
return tf.constant([0.0])
return tf.math.softplus(self.curvature_prime)
def _rotate(self, theta, emb):
shape = tf.maximum(tf.shape(theta), tf.shape(emb))
# manual rotation matrix
cos = tf.math.cos(theta)
sin = tf.math.sin(theta)
evens = cos * emb[..., ::2] - sin * emb[..., 1::2]
odds = sin * emb[..., ::2] + cos * emb[..., 1::2]
return tf.reshape(tf.stack([evens, odds], axis=-1), shape)
def call(self, inputs):
e_s, b_s, r_r, theta_r, e_o, b_o = inputs
curvature = self._curvature()
b_s = tf.squeeze(b_s, axis=-1)
b_o = tf.squeeze(b_o, axis=-1)
eh_s = self._convert(curvature, e_s)
rh_r = self._convert(curvature, r_r)
eh_o = self._convert(curvature, e_o)
rotated_s = self._rotate(theta_r, eh_s)
d = self._squared_distance(
curvature, self._add(curvature, rotated_s, rh_r), eh_o
)
return -d + b_s + b_o
def bulk_scoring(
self, all_n_embs, _extra_data, s_embs, r_embs, o_embs,
):
curvature = self._curvature()
e_all, b_all = all_n_embs
e_all = e_all[:, None, :]
b_all = b_all[:, None, 0]
e_s, b_s = s_embs
e_s = e_s[None, :, :]
b_s = b_s[None, :, 0]
r_r, theta_r = r_embs
r_r = r_r[None, :, :]
theta_r = theta_r[None, :, :]
e_o, b_o = o_embs
e_o = e_o[None, :, :]
b_o = b_o[None, :, 0]
eh_s = self._convert(curvature, e_s)
rh_r = self._convert(curvature, r_r)
rotated_s = self._rotate(theta_r, eh_s)
d_mod_o = self._squared_distance(
curvature, self._add(curvature, rotated_s, rh_r), e_all
)
mod_o_pred = -d_mod_o + b_s + b_all
del eh_s, d_mod_o, rotated_s
eh_o = self._convert(curvature, e_o)
eh_all = self._convert(curvature, e_all)
rotated_all = self._rotate(theta_r, eh_all)
d_mod_s = self._squared_distance(
curvature, self._add(curvature, rotated_all, rh_r), e_o
)
mod_s_pred = -d_mod_s + b_all + b_o
return mod_o_pred.numpy(), mod_s_pred.numpy()
@experimental(reason="demo is missing", issues=[1664])
class RotH(KGModel):
"""
Embedding layers and a RotH scoring layer that implement the RotH knowledge graph
embedding algorithm as in https://arxiv.org/abs/2005.00545
.. seealso::
Related models:
- other knowledge graph models, see :class:`.KGTripleGenerator` for a full list
- :class:`.RotE` for the Euclidean version of this hyperbolic model
Appropriate data generator: :class:`.KGTripleGenerator`.
Args:
generator (KGTripleGenerator): A generator of triples to feed into the model.
embedding_dimension (int): the dimension of the embeddings (that is, a vector in
``R^embedding_dimension`` plus a bias in ``R`` is learnt for each node, along with a pair of
vectors in ``R^embedding_dimension`` and ``R^(embedding_dimension / 2)`` for each node
type). It must be even.
embeddings_initializer (str or func, optional): The initialiser to use for the embeddings.
embeddings_regularizer (str or func, optional): The regularizer to use for the embeddings.
"""
def __init__(
self,
generator,
embedding_dimension,
embeddings_initializer="normal",
embeddings_regularizer=None,
):
super().__init__(
generator,
RotHEScore(hyperbolic=True),
embedding_dimension=embedding_dimension,
embeddings_initializer=embeddings_initializer,
embeddings_regularizer=embeddings_regularizer,
)
@experimental(reason="demo is missing", issues=[1664])
class RotE(KGModel):
"""
Embedding layers and a RotE scoring layer that implement the RotE knowledge graph
embedding algorithm as in https://arxiv.org/pdf/2005.00545.pdf
.. seealso::
Related models:
- other knowledge graph models, see :class:`.KGTripleGenerator` for a full list
- :class:`.RotH` for the hyperbolic version of this Euclidean model
Appropriate data generator: :class:`.KGTripleGenerator`.
Args:
generator (KGTripleGenerator): A generator of triples to feed into the model.
embedding_dimension (int): the dimension of the embeddings (that is, a vector in
``R^embedding_dimension`` plus a bias in ``R`` is learnt for each node, along with a pair of
vectors in ``R^embedding_dimension`` and ``R^(embedding_dimension / 2)`` for each node
type). It must be even.
embeddings_initializer (str or func, optional): The initialiser to use for the embeddings.
embeddings_regularizer (str or func, optional): The regularizer to use for the embeddings.
"""
def __init__(
self,
generator,
embedding_dimension,
embeddings_initializer="normal",
embeddings_regularizer=None,
):
super().__init__(
generator,
RotHEScore(hyperbolic=False),
embedding_dimension=embedding_dimension,
embeddings_initializer=embeddings_initializer,
embeddings_regularizer=embeddings_regularizer,
)
def _ranks_from_comparisons(greater, greater_equal, tie_breaking):
strict = 1 + greater.sum(axis=0)
# with_ties - strict = the number of elements exactly equal (including the true edge itself)
with_ties = greater_equal.sum(axis=0)
if tie_breaking == "top":
return strict
elif tie_breaking == "bottom":
return with_ties
elif tie_breaking == "random":
return np.random.randint(strict, with_ties + 1)
else:
raise ValueError(
f"tie_breaking: expected 'top', 'bottom' or 'random', found {tie_breaking!r}"
)
def _ranks_from_score_columns(
pred,
*,
true_modified_node_ilocs,
unmodified_node_ilocs,
true_rel_ilocs,
modified_object,
known_edges_graph,
tie_breaking,
):
"""
Compute the raw and filtered ranks of a set of true edges ``E = (s, r, o)`` against all
mutations of one end of them, e.g. ``E' = (s, r, n)`` for "modified-object".
The raw rank is the total number of edges scored higher than the true edge ``E``, and the
filtered rank is the total number of unknown edges (not in ``known_edges_graph``).
Args:
pred: a 2D array: each column represents the scores for a single true edge and its
mutations, where the row indicates the ``n`` in ``E'`` (e.g. row 0 corresponds to ``n``
= node with iloc 0)
true_modified_node_ilocs: an array of ilocs of the actual node that was modified, that is,
``o`` for modified-object and ``s`` for modified subject``, index ``i`` corresponds to
the iloc for column ``pred[:, i]``.
unmodified_node_ilocs: similar to ``true_modified_node_ilocs``, except for the other end of
the edge: the node that was not modified.
true_rel_ilocs: similar to ``true_modified_node_ilocs``, except for the relationship type of
the edge (``r``).
modified_object (bool): whether the object was modified (``True``), or the subject
(``False``)
known_edges_graph (StellarGraph): a graph containing all the known edges that should be
ignored when computing filtered ranks
Returns:
a tuple of raw ranks and filtered ranks, each is an array of integers >= 1 where index ``i``
corresponds to the rank of the true edge among all of the scores in column ``pred[:, i]``.
"""
batch_size = len(true_modified_node_ilocs)
assert pred.shape == (known_edges_graph.number_of_nodes(), batch_size)
assert unmodified_node_ilocs.shape == true_rel_ilocs.shape == (batch_size,)
# the score of the true edge, for each edge in the batch (this indexes in lock-step,
# i.e. [pred[true_modified_node_ilocs[0], range(batch_size)[0]], ...])
true_scores = pred[true_modified_node_ilocs, range(batch_size)]
# for each column, compare all the scores against the score of the true edge
greater = pred > true_scores
greater_equal = pred >= true_scores
# the raw rank is the number of elements scored higher than the true edge
raw_rank = _ranks_from_comparisons(greater, greater_equal, tie_breaking)
# the filtered rank is the number of unknown elements scored higher, where an element is
# known if the edge (s, r, n) (for modified-object) or (n, r, o) (for modified-subject)
# exists in known_edges_graph.
if modified_object:
neigh_func = known_edges_graph.out_nodes
else:
neigh_func = known_edges_graph.in_nodes
for batch_column, (unmodified, r) in enumerate(
zip(unmodified_node_ilocs, true_rel_ilocs)
):
this_neighs = neigh_func(unmodified, edge_types=[r], use_ilocs=True)
greater[this_neighs, batch_column] = False
greater_equal[this_neighs, batch_column] = False
# the actual elements should be counted as equal, whether or not it was a known edge or not
greater_equal[true_modified_node_ilocs, range(batch_size)] = True
filtered_rank = _ranks_from_comparisons(greater, greater_equal, tie_breaking)
assert raw_rank.shape == filtered_rank.shape == (batch_size,)
return raw_rank, filtered_rank
| 36,707 | 35.671329 | 229 | py |
stellargraph | stellargraph-master/stellargraph/layer/sort_pooling.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.keras.layers import Layer
from ..core.experimental import experimental
from ..core.validation import require_integer_in_range
class SortPooling(Layer):
"""
Sort Pooling Keras layer.
Note that sorting is performed using only the last column of the input tensor as stated in [1], "For convenience,
we set the last graph convolution to have one channel and only used this single channel for sorting."
[1] An End-to-End Deep Learning Architecture for Graph Classification, M. Zhang, Z. Cui, M. Neumann, and
Y. Chen, AAAI-18, https://www.aaai.org/ocs/index.php/AAAI/AAAI18/paper/viewPaper/17146
.. seealso:: The :class:`.DeepGraphCNN` model uses this class for graph classification.
Args:
k (int): The number of rows of output tensor.
flatten_output (bool): If True then the output tensor is reshaped to vector for each element in the batch.
"""
def __init__(self, k, flatten_output=False):
super().__init__()
require_integer_in_range(k, "k", min_val=1)
self.trainable = False
self.k = k
self.flatten_output = flatten_output
def get_config(self):
"""
Gets class configuration for Keras serialization. Used by Keras model serialization.
Returns:
A dictionary that contains the config of the layer
"""
return {"k": self.k, "flatten_output": self.flatten_output}
def compute_output_shape(self, input_shapes):
"""
Computes the output shape of the layer.
Assumes the following inputs:
Args:
input_shapes (tuple of int)
Shape tuples can include None for free dimensions, instead of an integer.
Returns:
An input shape tuple.
"""
if self.flatten_output:
return input_shapes[0], self.k * input_shapes[2], 1
else:
return input_shapes[0], self.k, input_shapes[2]
def _sort_tensor_with_mask(self, inputs):
embeddings, mask = inputs[0], inputs[1]
masked_sorted_embeddings = tf.gather(
embeddings,
tf.argsort(
tf.boolean_mask(embeddings, mask)[..., -1],
axis=0,
direction="DESCENDING",
),
)
embeddings = tf.pad(
masked_sorted_embeddings,
[
[0, (tf.shape(embeddings)[0] - tf.shape(masked_sorted_embeddings)[0])],
[0, 0],
],
)
return embeddings
def call(self, embeddings, mask):
"""
Applies the layer.
Args:
embeddings (tensor): the node features (size B x N x Sum F_i)
where B is the batch size, N is the number of nodes in the largest graph in the batch, and
F_i is the dimensionality of node features output from the i-th convolutional layer.
mask (tensor): a boolean mask (size B x N)
Returns:
Keras Tensor that represents the output of the layer.
"""
outputs = tf.map_fn(
self._sort_tensor_with_mask, (embeddings, mask), dtype=embeddings.dtype
)
# padding or truncation based on the value of self.k and the graph size (number of nodes)
outputs_shape = tf.shape(outputs)
outputs = tf.cond(
tf.math.less(outputs_shape, self.k)[1],
true_fn=lambda: tf.pad(
outputs, [[0, 0], [0, (self.k - outputs_shape)[1]], [0, 0]]
),
false_fn=lambda: outputs[:, : self.k, :],
)
if self.flatten_output:
outputs = tf.reshape(
outputs, [outputs_shape[0], embeddings.shape[-1] * self.k, 1]
)
return outputs
| 4,421 | 32.5 | 117 | py |
stellargraph | stellargraph-master/stellargraph/layer/node2vec.py | # -*- coding: utf-8 -*-
#
# Copyright 2019-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
node2vec
"""
__all__ = ["Node2Vec"]
from tensorflow.keras import Input
from tensorflow.keras.layers import Reshape, Embedding
import math
from tensorflow import keras
import warnings
from .misc import deprecated_model_function
from ..mapper import Node2VecLinkGenerator, Node2VecNodeGenerator
def _require_without_generator(value, name):
if value is not None:
return value
else:
raise ValueError(
f"{name}: expected a value for 'node_num' and 'multiplicity' when "
f"'generator' is not provided, found {name}=None."
)
class Node2Vec:
"""
Implementation of the Node2Vec algorithm of A. Grover and J. Leskovec with Keras layers.
see: https://snap.stanford.edu/node2vec/
The model minimally requires specification of the embedding size and a generator object.
.. seealso::
Examples using Node2Vec:
- `node classification <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/keras-node2vec-node-classification.html>`__
- `unsupervised representation learning <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/keras-node2vec-embeddings.html>`__
- `comparison of link prediction algorithms <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/homogeneous-comparison-link-prediction.html>`__
- using Gensim Word2Vec, not this class: `node classification <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/node2vec-node-classification.html>`__, `node classification with edge weights <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/node2vec-weighted-node-classification.html>`__, `link prediction <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/node2vec-link-prediction.html>`__, `unsupervised representation learning <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/node2vec-embeddings.html>`__.
Appropriate data generators: :class:`.Node2VecNodeGenerator`, :class:`.Node2VecLinkGenerator`.
Related functionality: :class:`.BiasedRandomWalk` does the underlying random walks.
Args:
emb_size (int): The dimension of node embeddings.
generator (Sequence): A NodeSequence or LinkSequence.
node_num(int, optional): The number of nodes in the given graph.
multiplicity (int, optional): The number of nodes to process at a time. This is 1 for a node inference
and 2 for link inference (currently no others are supported).
"""
def __init__(self, emb_size, generator=None, node_num=None, multiplicity=None):
# Get the node_num from the generator if it is given
self.generator = generator
if generator is not None:
self._get_sizes_from_generator(generator)
else:
self.input_node_num = _require_without_generator(node_num, "node_num")
self.multiplicity = _require_without_generator(multiplicity, "multiplicity")
# Model parameters
self.emb_size = emb_size
# Initialise the target embedding layer: input-to-hidden
target_embedding_initializer = keras.initializers.RandomUniform(
minval=-1.0, maxval=1.0
)
self.target_embedding = Embedding(
self.input_node_num,
self.emb_size,
input_length=1,
name="target_embedding",
embeddings_initializer=target_embedding_initializer,
)
# Initialise the context embedding layer: hidden-to-output
context_embedding_initializer = keras.initializers.TruncatedNormal(
stddev=1.0 / math.sqrt(self.emb_size * 1.0)
)
self.context_embedding = Embedding(
self.input_node_num,
self.emb_size,
input_length=1,
name="context_embedding",
embeddings_initializer=context_embedding_initializer,
)
def _get_sizes_from_generator(self, generator):
"""
Sets node_num and multiplicity from the generator.
Args:
generator: The supplied generator.
"""
if not isinstance(generator, (Node2VecNodeGenerator, Node2VecLinkGenerator)):
raise TypeError(
"Generator should be an instance of Node2VecNodeGenerator or Node2VecLinkGenerator"
)
self.multiplicity = generator.multiplicity
self.input_node_num = generator.graph.number_of_nodes()
if len(list(generator.graph.node_types)) > 1:
raise ValueError("Node2Vec called on graph with more than one node type.")
def __call__(self, xin, embedding):
"""
Construct node representations from node ids through a look-up table.
Args:
xin (Keras Tensor): Batch input node ids.
embedding (str): "target" for target_embedding, "context" for context_embedding
Returns:
Output tensor.
"""
if embedding == "target":
h_layer = self.target_embedding(xin)
elif embedding == "context":
h_layer = self.context_embedding(xin)
else:
raise ValueError(
'wrong embedding argument is supplied: {}, should be "target" or "context"'.format(
embedding
)
)
h_layer = Reshape((self.emb_size,))(h_layer)
return h_layer
def _node_model(self, embedding="target"):
"""
Builds a Node2Vec model for node prediction.
Args:
embedding (str): "target" for target_embedding, "context" for context_embedding
Returns:
tuple: ``(x_inp, x_out)`` where ``x_inp`` is a Keras input tensor
for the Node2Vec model and ``x_out`` is the Keras tensor
for the Node2Vec model output.
"""
# Create tensor inputs
x_inp = Input(shape=(1,))
# Output from Node2Vec model
x_out = self(x_inp, embedding)
return x_inp, x_out
def _link_model(self):
"""
Builds a Node2Vec model for link or node pair prediction.
Returns:
tuple: (x_inp, x_out) where ``x_inp`` is a list of Keras input tensors for (src, dst) nodes in the node pairs
and ``x_out`` is a list of output tensors for (src, dst) nodes in the node pairs.
"""
# Expose input and output sockets of the model, for source node:
x_inp_src, x_out_src = self._node_model("target")
x_inp_dst, x_out_dst = self._node_model("context")
x_inp = [x_inp_src, x_inp_dst]
x_out = [x_out_src, x_out_dst]
return x_inp, x_out
def in_out_tensors(self, multiplicity=None):
"""
Builds a Node2Vec model for node or link/node pair prediction, depending on the generator used to construct
the model (whether it is a node or link/node pair generator).
Returns:
tuple: ``(x_inp, x_out)``, where ``x_inp`` contains Keras input tensor(s)
for the specified Node2Vec model (either node or link/node pair model) and ``x_out`` contains
model output tensor(s) of shape (batch_size, self.emb_size)
"""
if multiplicity is None:
multiplicity = self.multiplicity
if self.multiplicity == 1:
return self._node_model()
elif self.multiplicity == 2:
return self._link_model()
else:
raise ValueError("Currently only multiplicities of 1 and 2 are supported.")
def default_model(self, flatten_output=True):
warnings.warn(
"The .default_model() method is deprecated. Please use .in_out_tensors() method instead.",
DeprecationWarning,
stacklevel=2,
)
return self.build()
node_model = deprecated_model_function(_node_model, "node_model")
link_model = deprecated_model_function(_link_model, "link_model")
build = deprecated_model_function(in_out_tensors, "build")
| 8,653 | 38.158371 | 593 | py |
stellargraph | stellargraph-master/stellargraph/layer/rgcn.py | # -*- coding: utf-8 -*-
#
# Copyright 2019-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Layer, Lambda, Dropout, Input
from tensorflow.keras import activations, initializers, constraints, regularizers
from .misc import SqueezedSparseConversion, deprecated_model_function, GatherIndices
from ..mapper.full_batch_generators import RelationalFullBatchNodeGenerator
class RelationalGraphConvolution(Layer):
"""
Relational Graph Convolution (RGCN) Keras layer.
Original paper: Modeling Relational Data with Graph Convolutional Networks.
Thomas N. Kipf, Michael Schlichtkrull (2017). https://arxiv.org/pdf/1703.06103.pdf
Notes:
- The inputs are tensors with a batch dimension of 1:
Keras requires this batch dimension, and for full-batch methods
we only have a single "batch".
- There are 1 + R inputs required (where R is the number of relationships): the node features,
and a normalized adjacency matrix for each relationship
.. seealso:: :class:`.RGCN` combines several of these layers.
Args:
units (int): dimensionality of output feature vectors
num_relationships (int): the number of relationships in the graph
num_bases (int): the number of basis matrices to use for parameterizing the weight matrices as described in
the paper; defaults to 0. ``num_bases < 0`` triggers the default behaviour of ``num_bases = 0``
activation (str or func): nonlinear activation applied to layer's output to obtain output features
use_bias (bool): toggles an optional bias
final_layer (bool): Deprecated, use ``tf.gather`` or :class:`.GatherIndices`
kernel_initializer (str or func): The initialiser to use for the self kernel and also relational kernels if ``num_bases=0``.
kernel_regularizer (str or func): The regulariser to use for the self kernel and also relational kernels if ``num_bases=0``.
kernel_constraint (str or func): The constraint to use for the self kernel and also relational kernels if ``num_bases=0``.
basis_initializer (str or func): The initialiser to use for the basis matrices.
basis_regularizer (str or func): The regulariser to use for the basis matrices.
basis_constraint (str or func): The constraint to use for the basis matrices.
coefficient_initializer (str or func): The initialiser to use for the coefficients.
coefficient_regularizer (str or func): The regulariser to use for the coefficients.
coefficient_constraint (str or func): The constraint to use for the coefficients.
bias_initializer (str or func): The initialiser to use for the bias.
bias_regularizer (str or func): The regulariser to use for the bias.
bias_constraint (str or func): The constraint to use for the bias.
input_dim (int, optional): the size of the input shape, if known.
kwargs: any additional arguments to pass to :class:`tensorflow.keras.layers.Layer`
"""
def __init__(
self,
units,
num_relationships,
num_bases=0,
activation=None,
use_bias=True,
final_layer=None,
input_dim=None,
kernel_initializer="glorot_uniform",
kernel_regularizer=None,
kernel_constraint=None,
bias_initializer="zeros",
bias_regularizer=None,
bias_constraint=None,
basis_initializer="glorot_uniform",
basis_regularizer=None,
basis_constraint=None,
coefficient_initializer="glorot_uniform",
coefficient_regularizer=None,
coefficient_constraint=None,
**kwargs
):
if "input_shape" not in kwargs and input_dim is not None:
kwargs["input_shape"] = (input_dim,)
super().__init__(**kwargs)
if not isinstance(num_bases, int):
raise TypeError("num_bases should be an int")
if not isinstance(units, int):
raise TypeError("units should be an int")
if units <= 0:
raise ValueError("units should be positive")
if not isinstance(num_relationships, int):
raise TypeError("num_relationships should be an int")
if num_relationships <= 0:
raise ValueError("num_relationships should be positive")
self.units = units
self.num_relationships = num_relationships
self.num_bases = num_bases
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_initializer = initializers.get(bias_initializer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.bias_constraint = constraints.get(bias_constraint)
self.basis_initializer = initializers.get(basis_initializer)
self.basis_regularizer = regularizers.get(basis_regularizer)
self.basis_constraint = constraints.get(basis_constraint)
self.coefficient_initializer = initializers.get(coefficient_initializer)
self.coefficient_regularizer = regularizers.get(coefficient_regularizer)
self.coefficient_constraint = constraints.get(coefficient_constraint)
if final_layer is not None:
raise ValueError(
"'final_layer' is not longer supported, use 'tf.gather' or 'GatherIndices' separately"
)
super().__init__(**kwargs)
def get_config(self):
"""
Gets class configuration for Keras serialization.
Used by Keras model serialization.
Returns:
A dictionary that contains the config of the layer
"""
config = {
"units": self.units,
"use_bias": self.use_bias,
"activation": activations.serialize(self.activation),
"kernel_initializer": initializers.serialize(self.kernel_initializer),
"basis_initializer": initializers.serialize(self.basis_initializer),
"coefficient_initializer": initializers.serialize(
self.coefficient_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(self.kernel_regularizer),
"basis_regularizer": regularizers.serialize(self.basis_regularizer),
"coefficient_regularizer": regularizers.serialize(
self.coefficient_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"basis_constraint": constraints.serialize(self.basis_constraint),
"coefficient_constraint": constraints.serialize(
self.coefficient_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
"num_relationships": self.num_relationships,
"num_bases": self.num_bases,
}
base_config = super().get_config()
return {**base_config, **config}
def compute_output_shape(self, input_shapes):
"""
Computes the output shape of the layer.
Args:
input_shapes (tuple of int)
Shape tuples can include None for free dimensions, instead of an integer.
Returns:
An input shape tuple.
"""
feature_shape, A_shape = input_shapes
batch_dim = feature_shape[0]
out_dim = feature_shape[1]
return batch_dim, out_dim, self.units
def build(self, input_shapes):
"""
Builds the layer
Args:
input_shapes (list of int): shapes of the layer's inputs
(node features, node_indices, and adjacency matrices)
"""
feat_shape = input_shapes[0]
input_dim = int(feat_shape[-1])
if self.num_bases > 0:
# creates a kernel for each edge type/relationship in the graph
# each kernel is a linear combination of basis matrices
# the basis matrices are shared for all edge types/relationships
# each edge type has a different set of learnable coefficients
# initialize the shared basis matrices
self.bases = self.add_weight(
shape=(input_dim, self.units, self.num_bases),
initializer=self.basis_initializer,
name="bases",
regularizer=self.basis_regularizer,
constraint=self.basis_constraint,
)
# initialize the coefficients for each edge type/relationship
self.coefficients = [
self.add_weight(
shape=(self.num_bases,),
initializer=self.coefficient_initializer,
name="coeff",
regularizer=self.coefficient_regularizer,
constraint=self.coefficient_constraint,
)
for _ in range(self.num_relationships)
]
# To support eager TF the relational_kernels need to be explicitly calculated
# each time the layer is called
self.relational_kernels = None
else:
self.bases = None
self.coefficients = None
self.relational_kernels = [
self.add_weight(
shape=(input_dim, self.units),
name="relational_kernels",
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
for _ in range(self.num_relationships)
]
self.self_kernel = self.add_weight(
shape=(input_dim, self.units),
name="self_kernel",
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.units,),
initializer=self.bias_initializer,
name="bias",
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
else:
self.bias = None
self.built = True
def call(self, inputs):
"""
Applies the layer.
Args:
inputs (list): a list of 2 + R input tensors that includes
node features (size 1 x N x F),
and a graph adjacency matrix (size N x N) for each relationship.
R is the number of relationships in the graph (edge type),
N is the number of nodes in the graph, and
F is the dimensionality of node features.
Returns:
Keras Tensor that represents the output of the layer.
"""
features, *As = inputs
batch_dim, n_nodes, _ = K.int_shape(features)
if batch_dim != 1:
raise ValueError(
"Currently full-batch methods only support a batch dimension of one"
)
# Remove singleton batch dimension
features = K.squeeze(features, 0)
# Calculate the layer operation of RGCN
output = K.dot(features, self.self_kernel)
if self.relational_kernels is None:
# explicitly calculate the relational kernels if basis matrices are used
relational_kernels = [
tf.einsum("ijk,k->ij", self.bases, coeff) for coeff in self.coefficients
]
else:
relational_kernels = self.relational_kernels
for i in range(self.num_relationships):
h_graph = K.dot(As[i], features)
output += K.dot(h_graph, relational_kernels[i])
# Add optional bias & apply activation
if self.bias is not None:
output += self.bias
output = self.activation(output)
# Add batch dimension back if we removed it
if batch_dim == 1:
output = K.expand_dims(output, 0)
return output
class RGCN:
"""
A stack of Relational Graph Convolutional layers that implement a relational graph
convolution neural network model as in https://arxiv.org/pdf/1703.06103.pdf
The model minimally requires specification of the layer sizes as a list of int
corresponding to the feature dimensions for each hidden layer,
activation functions for each hidden layers, and a generator object.
To use this class as a Keras model, the features and preprocessed adjacency matrix
should be supplied using the :class:`.RelationalFullBatchNodeGenerator` class.
The generator object should be instantiated as follows::
generator = RelationalFullBatchNodeGenerator(G)
Note that currently the RGCN class is compatible with both sparse and dense adjacency
matrices and the :class:`.RelationalFullBatchNodeGenerator` will default to sparse.
Notes:
- The inputs are tensors with a batch dimension of 1. These are provided by the \
:class:`.RelationalFullBatchNodeGenerator` object.
- The nodes provided to the :meth:`.RelationalFullBatchNodeGenerator.flow` method are
used by the final layer to select the predictions for those nodes in order.
However, the intermediate layers before the final layer order the nodes
in the same way as the adjacency matrix.
Examples:
Creating a RGCN node classification model from an existing :class:`.StellarGraph`
object ``G``::
generator = RelationalFullBatchNodeGenerator(G)
rgcn = RGCN(
layer_sizes=[32, 4],
activations=["elu","softmax"],
bases=10,
generator=generator,
dropout=0.5
)
x_inp, predictions = rgcn.in_out_tensors()
.. seealso::
Examples using RGCN:
- `node classification <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/rgcn-node-classification.html>`__
- `unsupervised representation learning with Deep Graph Infomax <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/deep-graph-infomax-embeddings.html>`__
Appropriate data generator: :class:`.RelationalFullBatchNodeGenerator`.
Related model: :class:`.GCN` is a specialisation for a single edge type.
:class:`.RelationalGraphConvolution` is the base layer out of which an RGCN model is built.
Args:
layer_sizes (list of int): Output sizes of RGCN layers in the stack.
generator (RelationalFullBatchNodeGenerator): The generator instance.
num_bases (int): Specifies number of basis matrices to use for the weight matrices of the RGCN layer
as in the paper. Defaults to 0 which specifies that no basis decomposition is used.
bias (bool): If True, a bias vector is learnt for each layer in the RGCN model.
dropout (float): Dropout rate applied to input features of each RGCN layer.
activations (list of str or func): Activations applied to each layer's output;
defaults to ``['relu', ..., 'relu']``.
kernel_initializer (str or func, optional): The initialiser to use for the weights of each layer.
kernel_regularizer (str or func, optional): The regulariser to use for the weights of each layer.
kernel_constraint (str or func, optional): The constraint to use for the weights of each layer.
bias_initializer (str or func, optional): The initialiser to use for the bias of each layer.
bias_regularizer (str or func, optionalx): The regulariser to use for the bias of each layer.
bias_constraint (str or func, optional): The constraint to use for the bias of each layer.
"""
def __init__(
self,
layer_sizes,
generator,
bias=True,
num_bases=0,
dropout=0.0,
activations=None,
kernel_initializer="glorot_uniform",
kernel_regularizer=None,
kernel_constraint=None,
bias_initializer="zeros",
bias_regularizer=None,
bias_constraint=None,
):
if not isinstance(generator, RelationalFullBatchNodeGenerator):
raise TypeError(
"Generator should be a instance of RelationalFullBatchNodeGenerator"
)
n_layers = len(layer_sizes)
self.layer_sizes = layer_sizes
self.activations = activations
self.bias = bias
self.num_bases = num_bases
self.dropout = dropout
# Copy required information from generator
self.multiplicity = generator.multiplicity
self.n_nodes = generator.features.shape[0]
self.n_features = generator.features.shape[1]
self.n_edge_types = len(generator.As)
# Check if the generator is producing a sparse matrix
self.use_sparse = generator.use_sparse
# Activation function for each layer
if activations is None:
activations = ["relu"] * n_layers
elif len(activations) != n_layers:
raise ValueError(
"Invalid number of activations; require one function per layer"
)
self.activations = activations
self.num_bases = num_bases
# Initialize a stack of RGCN layers
self._layers = []
for ii in range(n_layers):
self._layers.append(Dropout(self.dropout))
self._layers.append(
RelationalGraphConvolution(
self.layer_sizes[ii],
num_relationships=len(generator.As),
num_bases=self.num_bases,
activation=self.activations[ii],
use_bias=self.bias,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
kernel_constraint=kernel_constraint,
bias_initializer=bias_initializer,
bias_regularizer=bias_regularizer,
bias_constraint=bias_constraint,
)
)
def __call__(self, x):
"""
Apply a stack of RGCN layers to the inputs.
The input tensors are expected to be a list of the following:
[Node features shape (1, N, F), Output indices (1, Z)] +
[Adjacency indices for each relationship (1, E, 2) for _ in range(R)]
[Adjacency values for each relationshiop (1, E) for _ in range(R)]
where N is the number of nodes, F the number of input features,
E is the number of edges, Z the number of output nodes,
R is the number of relationships in the graph (edge types).
Args:
x (Tensor): input tensors
Returns:
Output tensor
"""
x_in, out_indices, *As = x
# Currently we require the batch dimension to be one for full-batch methods
batch_dim, n_nodes, _ = K.int_shape(x_in)
if batch_dim != 1:
raise ValueError(
"Currently full-batch methods only support a batch dimension of one"
)
# Convert input indices & values to sparse matrices
if self.use_sparse:
As_indices = As[: self.n_edge_types]
As_values = As[self.n_edge_types :]
Ainput = [
SqueezedSparseConversion(
shape=(n_nodes, n_nodes), dtype=As_values[i].dtype
)([As_indices[i], As_values[i]])
for i in range(self.n_edge_types)
]
# Otherwise, create dense matrices from input tensor
else:
Ainput = [Lambda(lambda A: K.squeeze(A, 0))(A_) for A_ in As]
h_layer = x_in
for layer in self._layers:
if isinstance(layer, RelationalGraphConvolution):
# For an RGCN layer add the adjacency matrices
h_layer = layer([h_layer] + Ainput)
else:
# For other (non-graph) layers only supply the input tensor
h_layer = layer(h_layer)
# only return data for the requested nodes
h_layer = GatherIndices(batch_dims=1)([h_layer, out_indices])
return h_layer
def _node_model(self):
"""
Builds a RGCN model for node prediction
Returns:
tuple: ``(x_inp, x_out)``, where
``x_inp`` is a list of Keras input tensors for the RGCN model (containing node features,
node indices, and the indices and values for the sparse adjacency matrices for each relationship),
and ``x_out`` is a Keras tensor for the RGCN model output.
"""
# Inputs for features & target indices
x_t = Input(batch_shape=(1, self.n_nodes, self.n_features))
out_indices_t = Input(batch_shape=(1, None), dtype="int32")
# Create inputs for sparse or dense matrices
if self.use_sparse:
# Placeholders for the sparse adjacency matrix
A_indices_t = [
Input(batch_shape=(1, None, 2), dtype="int64")
for i in range(self.n_edge_types)
]
A_values_t = [
Input(batch_shape=(1, None)) for i in range(self.n_edge_types)
]
A_placeholders = A_indices_t + A_values_t
else:
# Placeholders for the dense adjacency matrix
A_placeholders = [
Input(batch_shape=(1, self.n_nodes, self.n_nodes))
for i in range(self.n_edge_types)
]
x_inp = [x_t, out_indices_t] + A_placeholders
x_out = self(x_inp)
# Flatten output by removing singleton batch dimension
if x_out.shape[0] == 1:
self.x_out_flat = Lambda(lambda x: K.squeeze(x, 0))(x_out)
else:
self.x_out_flat = x_out
return x_inp, x_out
def in_out_tensors(self, multiplicity=None):
"""
Builds a RGCN model for node prediction. Link/node pair prediction will added in the future.
Returns:
tuple: ``(x_inp, x_out)``, where ``x_inp`` is a list of Keras input tensors
for the specified RGCN model and ``x_out`` contains
model output tensor(s) of shape ``(batch_size, layer_sizes[-1])``
"""
if multiplicity is None:
multiplicity = self.multiplicity
if multiplicity == 1:
return self._node_model()
else:
raise NotImplementedError(
"Currently only node prediction if supported for RGCN."
)
build = deprecated_model_function(in_out_tensors, "build")
| 23,809 | 39.424448 | 173 | py |
stellargraph | stellargraph-master/stellargraph/layer/deep_graph_infomax.py | # -*- coding: utf-8 -*-
#
# Copyright 2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import GCN, GAT, APPNP, PPNP, GraphSAGE, DirectedGraphSAGE
from .misc import deprecated_model_function
from ..mapper import CorruptedGenerator
from tensorflow.keras.layers import Input, Lambda, Layer, GlobalAveragePooling1D
import tensorflow as tf
from tensorflow.keras import backend as K
import warnings
import numpy as np
__all__ = ["DeepGraphInfomax", "DGIDiscriminator"]
class DGIDiscriminator(Layer):
"""
This Layer computes the Discriminator function for Deep Graph Infomax (https://arxiv.org/pdf/1809.10341.pdf).
.. seealso:: :class:`.DeepGraphInfomax` uses this layer.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def build(self, input_shapes):
first_size = input_shapes[0][-1]
second_size = input_shapes[1][-1]
self.kernel = self.add_weight(
shape=(first_size, second_size),
initializer="glorot_uniform",
name="kernel",
regularizer=None,
constraint=None,
)
self.built = True
def call(self, inputs):
"""
Applies the layer to the inputs.
Args:
inputs: a list or tuple of tensors with shapes ``[(1, N, F), (1, F)]`` for full batch methods and shapes
``[(B, F), (F,)]`` for sampled node methods, containing the node features and a summary feature vector.
Where ``N`` is the number of nodes in the graph, ``F`` is the feature dimension, and ``B`` is the batch size.
Returns:
a Tensor with shape ``(1, N)`` for full batch methods and shape ``(B,)`` for sampled node methods.
"""
features, summary = inputs
score = tf.linalg.matvec(features, tf.linalg.matvec(self.kernel, summary),)
return score
class DGIReadout(Layer):
"""
This Layer computes the Readout function for Deep Graph Infomax (https://arxiv.org/pdf/1809.10341.pdf).
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def build(self, input_shapes):
self.built = True
def call(self, node_feats):
"""
Applies the layer to the inputs.
Args:
node_feats: a tensor containing the batch node features from the base model. This has shape `(1, N, F)`
for full batch methods and shape `(B, F)` for sampled node methods. Where `N` is the number of nodes
in the graph, `F` is the feature dimension, and `B` is the batch size.
Returns:
a Tensor with shape `(1, F)` for full batch methods and shape `(F,)` for sampled node methods.
"""
summary = tf.reduce_mean(node_feats, axis=-2)
summary = tf.math.sigmoid(summary)
return summary
class DeepGraphInfomax:
"""
A class to wrap stellargraph models for Deep Graph Infomax unsupervised training
(https://arxiv.org/pdf/1809.10341.pdf).
.. seealso::
Examples using Deep Graph Infomax:
- `unsupervised representation learning <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/deep-graph-infomax-embeddings.html>`__
- `semi-supervised node classification <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/gcn-deep-graph-infomax-fine-tuning-node-classification.html>`__
Appropriate data generator: :class:`.CorruptedGenerator`.
Args:
base_model: the base stellargraph model class
"""
def __init__(self, base_model, corrupted_generator=None):
if corrupted_generator is None:
warnings.warn(
"The 'corrupted_generator' parameter should be set to an instance of `CorruptedGenerator`, because the support for specific algorithms is being replaced by a more general form",
DeprecationWarning,
stacklevel=2,
)
if isinstance(base_model, (GCN, GAT, APPNP, PPNP)):
self._corruptible_inputs_idxs = [0]
elif isinstance(base_model, DirectedGraphSAGE):
self._corruptible_inputs_idxs = np.arange(base_model.max_slots)
elif isinstance(base_model, GraphSAGE):
self._corruptible_inputs_idxs = np.arange(base_model.max_hops + 1)
else:
raise TypeError(
f"base_model: expected GCN, GAT, APPNP, PPNP, GraphSAGE,"
f"or DirectedGraphSAGE, found {type(base_model).__name__}"
)
elif not isinstance(corrupted_generator, CorruptedGenerator):
raise TypeError(
f"corrupted_generator: expected a CorruptedGenerator, found {type(corrupted_generator).__name__}"
)
else:
self._corruptible_inputs_idxs = [
idx
for group in corrupted_generator.corrupt_index_groups
for idx in group
]
self.base_model = base_model
self._node_feats = None
self._discriminator = DGIDiscriminator()
def in_out_tensors(self):
"""
A function to create the the Keras inputs and outputs for a Deep Graph Infomax model for unsupervised training.
Note that the :func:`tensorflow.nn.sigmoid_cross_entropy_with_logits` loss must be used with this model.
Example::
dg_infomax = DeepGraphInfoMax(...)
x_in, x_out = dg_infomax.in_out_tensors()
model = Model(inputs=x_in, outputs=x_out)
model.compile(loss=tf.nn.sigmoid_cross_entropy_with_logits, ...)
Returns:
input and output layers for use with a Keras model
"""
x_inp, node_feats = self.base_model.in_out_tensors()
x_corr = [
Input(batch_shape=x_inp[i].shape) for i in self._corruptible_inputs_idxs
]
# shallow copy normal inputs and replace corruptible inputs with new inputs
x_in_corr = x_inp.copy()
for i, x in zip(self._corruptible_inputs_idxs, x_corr):
x_in_corr[i] = x
node_feats_corr = self.base_model(x_in_corr)
summary = DGIReadout()(node_feats)
scores = self._discriminator([node_feats, summary])
scores_corrupted = self._discriminator([node_feats_corr, summary])
x_out = tf.stack([scores, scores_corrupted], axis=-1)
return x_corr + x_inp, x_out
def embedding_model(self):
"""
Deprecated: use ``base_model.in_out_tensors`` instead. Deep Graph Infomax just trains the base model,
and the model behaves as usual after training.
"""
warnings.warn(
f"The 'embedding_model' method is deprecated, use 'base_model.in_out_tensors' instead.",
DeprecationWarning,
stacklevel=2,
)
# these tensors should link into the weights that get trained by `build`
x_emb_in, x_emb_out = self.base_model.in_out_tensors()
# squeeze out batch dim of full batch models
if len(x_emb_out.shape) == 3:
squeeze_layer = Lambda(lambda x: K.squeeze(x, axis=0), name="squeeze")
x_emb_out = squeeze_layer(x_emb_out)
return x_emb_in, x_emb_out
build = deprecated_model_function(in_out_tensors, "build")
| 7,877 | 34.972603 | 193 | py |
stellargraph | stellargraph-master/stellargraph/layer/misc.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.keras.layers import Layer
from tensorflow.keras import backend as K
import warnings
class SqueezedSparseConversion(Layer):
"""
Converts Keras tensors containing indices and values to a tensorflow sparse
tensor. The input tensors are expected to have a batch dimension of 1 which
will be removed before conversion to a matrix.
This only works with a tensorflow Keras backend.
Example:
```
A_indices = Input(batch_shape=(1, None, 2), dtype="int64")
A_values = Input(batch_shape=(1, None))
Ainput = TFSparseConversion(shape=(N, N))([A_indices, A_values])
```
Args:
shape (list of int): The shape of the sparse matrix to create
dtype (str or tf.dtypes.DType): Data type for the created sparse matrix
"""
def __init__(self, shape, axis=0, dtype=None):
super().__init__(dtype=dtype)
self.trainable = False
self.supports_masking = True
self.matrix_shape = shape
# self.dtype = dtype
self.axis = axis
# Check backend
if K.backend() != "tensorflow":
raise RuntimeError(
"SqueezedSparseConversion only supports the TensorFlow backend"
)
def get_config(self):
config = {"shape": self.matrix_shape, "dtype": self.dtype}
return config
def compute_output_shape(self, input_shapes):
return tuple(self.matrix_shape)
def call(self, inputs):
"""
Creates a TensorFlow `SparseTensor` from the inputs
Args:
inputs (list): Two input tensors contining
matrix indices (size 1 x E x 2) of type int64, and
matrix values (size (size 1 x E),
where E is the number of non-zero entries in the matrix.
Returns:
TensorFlow SparseTensor that represents the converted sparse matrix.
"""
# Here we squeeze the specified axis
if self.axis is not None:
indices = K.squeeze(inputs[0], self.axis)
values = K.squeeze(inputs[1], self.axis)
else:
indices = inputs[0]
values = inputs[1]
if self.dtype is not None:
values = K.cast(values, self.dtype)
# Import tensorflow here so that the backend check will work without
# tensorflow installed.
import tensorflow as tf
# Build sparse tensor for the matrix
output = tf.SparseTensor(
indices=indices, values=values, dense_shape=self.matrix_shape
)
return output
class GatherIndices(Layer):
"""
Gathers slices from a data tensor, based on an indices tensors (``tf.gather`` in Layer form).
Args:
axis (int or Tensor): the data axis to gather from.
batch_dims (int): the number of batch dimensions in the data and indices.
"""
def __init__(self, axis=None, batch_dims=0, **kwargs):
super().__init__(**kwargs)
self._axis = axis
self._batch_dims = batch_dims
def get_config(self):
config = super().get_config()
config.update(axis=self._axis, batch_dims=self._batch_dims)
return config
def compute_output_shape(self, input_shapes):
data_shape, indices_shape = input_shapes
axis = self._batch_dims if self._axis is None else self._axis
# per https://www.tensorflow.org/api_docs/python/tf/gather
return (
data_shape[:axis]
+ indices_shape[self._batch_dims :]
+ data_shape[axis + 1 :]
)
def call(self, inputs):
"""
Args:
inputs (list): a pair of tensors, corresponding to the ``params`` and ``indices``
parameters to ``tf.gather``.
"""
data, indices = inputs
return tf.gather(data, indices, axis=self._axis, batch_dims=self._batch_dims)
def deprecated_model_function(function, old_name):
def _function_wrapper(*args, **kwargs):
"""Deprecated: use :meth:`in_out_tensors`."""
warnings.warn(
f"The '{old_name}' method is deprecated, use 'in_out_tensors' instead.",
DeprecationWarning,
stacklevel=2,
)
return function(*args, **kwargs)
return _function_wrapper
| 4,945 | 31.539474 | 97 | py |
stellargraph | stellargraph-master/stellargraph/layer/graph_classification.py | # -*- coding: utf-8 -*-
#
# Copyright 2019-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.keras import backend as K
from .misc import deprecated_model_function
from ..mapper import PaddedGraphGenerator
from .gcn import GraphConvolution
from .sort_pooling import SortPooling
from tensorflow.keras.layers import Input, Dropout, GlobalAveragePooling1D
class GCNSupervisedGraphClassification:
"""
A stack of :class:`.GraphConvolution` layers together with a Keras `GlobalAveragePooling1D` layer (by default)
that implement a supervised graph classification network using the GCN convolution operator
(https://arxiv.org/abs/1609.02907).
The model minimally requires specification of the GCN layer sizes as a list of int
corresponding to the feature dimensions for each hidden layer,
activation functions for each hidden layers, and a generator object.
To use this class as a Keras model, the features and preprocessed adjacency matrix
should be supplied using the :class:`.PaddedGraphGenerator` class.
Examples:
Creating a graph classification model from a list of :class:`.StellarGraph`
objects (``graphs``). We also add two fully connected dense layers using the last one for binary classification
with `softmax` activation::
generator = PaddedGraphGenerator(graphs)
model = GCNSupervisedGraphClassification(
layer_sizes=[32, 32],
activations=["elu","elu"],
generator=generator,
dropout=0.5
)
x_inp, x_out = model.in_out_tensors()
predictions = Dense(units=8, activation='relu')(x_out)
predictions = Dense(units=2, activation='softmax')(predictions)
.. seealso::
Examples using GCN graph classification:
- `graph classification <https://stellargraph.readthedocs.io/en/stable/demos/graph-classification/gcn-supervised-graph-classification.html>`__
- `unsupervised representation learning <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/gcn-unsupervised-graph-embeddings.html>`__
Appropriate data generator: :class:`.PaddedGraphGenerator`.
Related models:
- :class:`.DeepGraphCNN` for a specialisation using :class:`.SortPooling`
- :class:`.GCN` for predictions for individual nodes or links
Args:
layer_sizes (list of int): list of output sizes of the graph GCN layers in the stack.
activations (list of str): list of activations applied to each GCN layer's output.
generator (PaddedGraphGenerator): an instance of :class:`.PaddedGraphGenerator` class constructed on the graphs used for
training.
bias (bool, optional): toggles an optional bias in graph convolutional layers.
dropout (float, optional): dropout rate applied to input features of each GCN layer.
pooling (callable, optional): a Keras layer or function that takes two arguments and returns
a tensor representing the embeddings for each graph in the batch. Arguments:
- embeddings tensor argument with shape ``batch size × nodes × output size``, where
``nodes`` is the maximum number of nodes of a graph in the batch and ``output size``
is the size of the final graph convolutional layer, or, if ``pool_all_layers``, the
sum of the sizes of each graph convolutional layers.
- ``mask`` tensor named argument of booleans with shape ``batch size × nodes``. ``True``
values indicate which rows of the embeddings argument are valid, and all other rows
(corresponding to ``mask == False``) must be ignored.
The returned tensor can have any shape ``batch size``, ``batch size × N1``, ``batch size
× N1 × N2``, ..., as long as the ``N1``, ``N2``, ... are constant across all graphs:
they must not depend on the ``nodes`` dimension or on the number of ``True`` values in
``mask``. ``pooling`` defaults to mean pooling via ``GlobalAveragePooling1D``.
pool_all_layers (bool, optional): which layers to pass to the pooling method: if ``True``,
pass the concatenation of the output of every GCN layer, otherwise pass only the output
of the last GCN layer.
kernel_initializer (str or func, optional): The initialiser to use for the weights of each graph
convolutional layer.
kernel_regularizer (str or func, optional): The regulariser to use for the weights of each graph
convolutional layer.
kernel_constraint (str or func, optional): The constraint to use for the weights of each layer graph
convolutional.
bias_initializer (str or func, optional): The initialiser to use for the bias of each layer graph
convolutional.
bias_regularizer (str or func, optional): The regulariser to use for the bias of each layer graph
convolutional.
bias_constraint (str or func, optional): The constraint to use for the bias of each layer graph
convolutional.
"""
def __init__(
self,
layer_sizes,
activations,
generator,
bias=True,
dropout=0.0,
pooling=None,
pool_all_layers=False,
kernel_initializer=None,
kernel_regularizer=None,
kernel_constraint=None,
bias_initializer=None,
bias_regularizer=None,
bias_constraint=None,
):
if not isinstance(generator, PaddedGraphGenerator):
raise TypeError(
f"generator: expected instance of PaddedGraphGenerator, found {type(generator).__name__}"
)
if len(layer_sizes) != len(activations):
raise ValueError(
"expected the number of layers to be the same as the number of activations,"
f"found {len(layer_sizes)} layer sizes vs {len(activations)} activations"
)
self.layer_sizes = layer_sizes
self.activations = activations
self.bias = bias
self.dropout = dropout
self.generator = generator
if pooling is not None:
self.pooling = pooling
else:
self.pooling = GlobalAveragePooling1D(data_format="channels_last")
self.pool_all_layers = pool_all_layers
# Initialize a stack of GraphConvolution layers
n_layers = len(self.layer_sizes)
self._layers = []
for ii in range(n_layers):
l = self.layer_sizes[ii]
a = self.activations[ii]
self._layers.append(Dropout(self.dropout))
self._layers.append(
GraphConvolution(
l,
activation=a,
use_bias=self.bias,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
kernel_constraint=kernel_constraint,
bias_initializer=bias_initializer,
bias_regularizer=bias_regularizer,
bias_constraint=bias_constraint,
)
)
def __call__(self, x):
"""
Apply a stack of :class:`.GraphConvolution` layers to the inputs.
The input tensors are expected to be a list of the following:
[
Node features shape (batch size, N, F),
Mask (batch size, N ),
Adjacency matrices (batch size, N, N),
]
where N is the number of nodes and F the number of input features
Args:
x (Tensor): input tensors
Returns:
Output tensor
"""
x_in, mask, As = x
h_layer = x_in
gcn_layers = []
for layer in self._layers:
if isinstance(layer, GraphConvolution):
h_layer = layer([h_layer, As])
gcn_layers.append(h_layer)
else:
# For other (non-graph) layers only supply the input tensor
h_layer = layer(h_layer)
if self.pool_all_layers:
h_layer = tf.concat(gcn_layers, axis=-1)
# mask to ignore the padded values
h_layer = self.pooling(h_layer, mask=mask)
return h_layer
def in_out_tensors(self):
"""
Builds a Graph Classification model.
Returns:
tuple: ``(x_inp, x_out)``, where ``x_inp`` is a list of two input tensors for the
Graph Classification model (containing node features and normalized adjacency matrix),
and ``x_out`` is a tensor for the Graph Classification model output.
"""
x_t = Input(shape=(None, self.generator.node_features_size))
mask = Input(shape=(None,), dtype=tf.bool)
A_m = Input(shape=(None, None))
x_inp = [x_t, mask, A_m]
x_out = self(x_inp)
return x_inp, x_out
build = deprecated_model_function(in_out_tensors, "build")
class DeepGraphCNN(GCNSupervisedGraphClassification):
"""
A stack of :class:`.GraphConvolution` layers together with a `SortPooling` layer
that implement a supervised graph classification network (DGCNN) using the GCN convolution operator
(https://arxiv.org/abs/1609.02907).
The DGCNN model was introduced in the paper, "An End-to-End Deep Learning Architecture for Graph Classification" by
M. Zhang, Z. Cui, M. Neumann, and Y. Chen, AAAI 2018, https://www.cse.wustl.edu/~muhan/papers/AAAI_2018_DGCNN.pdf
The model minimally requires specification of the GCN layer sizes as a list of int corresponding to the feature
dimensions for each hidden layer, activation functions for each hidden layer, a generator object, and the number of
output nodes for the class:`SortPooling` layer.
To use this class as a Keras model, the features and preprocessed adjacency matrix should be supplied using the
:class:`.PaddedGraphGenerator` class.
Examples:
Creating a graph classification model from a list of :class:`.StellarGraph`
objects (``graphs``). We also add two one-dimensional convolutional layers, a max pooling layer, and two fully
connected dense layers one with dropout one used for binary classification::
generator = PaddedGraphGenerator(graphs)
model = DeepGraphCNN(
layer_sizes=[32, 32, 32, 1],
activations=["tanh","tanh", "tanh", "tanh"],
generator=generator,
k=30
)
x_inp, x_out = model.in_out_tensors()
x_out = Conv1D(filters=16, kernel_size=97, strides=97)(x_out)
x_out = MaxPool1D(pool_size=2)(x_out)
x_out = Conv1D(filters=32, kernel_size=5, strides=1)(x_out)
x_out = Flatten()(x_out)
x_out = Dense(units=128, activation="relu")(x_out)
x_out = Dropout(rate=0.5)(x_out)
predictions = Dense(units=1, activation="sigmoid")(x_out)
model = Model(inputs=x_inp, outputs=predictions)
.. seealso::
Example using DGCNN: `graph classification <https://stellargraph.readthedocs.io/en/stable/demos/graph-classification/gcn-supervised-graph-classification.html>`__.
Appropriate data generator: :class:`.PaddedGraphGenerator`.
Related models:
- :class:`.GCNSupervisedGraphClassification` for the general form, supporting more customisation
- :class:`.GCN` for predictions for individual nodes or links
Args:
layer_sizes (list of int): list of output sizes of the graph GCN layers in the stack.
activations (list of str): list of activations applied to each GCN layer's output.
k (int): size (number of rows) of output tensor.
generator (GraphGenerator): an instance of :class:`.GraphGenerator` class constructed on the graphs used for
training.
bias (bool, optional): toggles an optional bias in graph convolutional layers.
dropout (float, optional): dropout rate applied to input features of each GCN layer.
kernel_initializer (str or func, optional): The initialiser to use for the weights of each graph
convolutional layer.
kernel_regularizer (str or func, optional): The regulariser to use for the weights of each graph
convolutional layer.
kernel_constraint (str or func, optional): The constraint to use for the weights of each layer graph
convolutional.
bias_initializer (str or func, optional): The initialiser to use for the bias of each layer graph
convolutional.
bias_regularizer (str or func, optional): The regulariser to use for the bias of each layer graph
convolutional.
bias_constraint (str or func, optional): The constraint to use for the bias of each layer graph
convolutional.
"""
def __init__(
self,
layer_sizes,
activations,
k,
generator,
bias=True,
dropout=0.0,
kernel_initializer=None,
kernel_regularizer=None,
kernel_constraint=None,
bias_initializer=None,
bias_regularizer=None,
bias_constraint=None,
):
super().__init__(
layer_sizes=layer_sizes,
activations=activations,
generator=generator,
bias=bias,
dropout=dropout,
pooling=SortPooling(k=k, flatten_output=True),
pool_all_layers=True,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
kernel_constraint=kernel_constraint,
bias_initializer=bias_initializer,
bias_regularizer=bias_regularizer,
bias_constraint=bias_constraint,
)
| 14,568 | 42.489552 | 169 | py |
stellargraph | stellargraph-master/stellargraph/layer/graph_attention.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Definition of Graph Attention Network (GAT) layer, and GAT class that is a stack of GAT layers
"""
__all__ = ["GraphAttention", "GraphAttentionSparse", "GAT"]
import warnings
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras import activations, constraints, initializers, regularizers
from tensorflow.keras.layers import Input, Layer, Dropout, LeakyReLU, Lambda, Reshape
from ..mapper import FullBatchNodeGenerator, FullBatchGenerator, ClusterNodeGenerator
from .misc import SqueezedSparseConversion, deprecated_model_function, GatherIndices
class GraphAttention(Layer):
"""
Graph Attention (GAT) layer. The base implementation is taken from
https://github.com/danielegrattarola/keras-gat,
with some modifications added for ease of use.
Based on the original paper: Graph Attention Networks. P. Veličković et al. ICLR 2018 https://arxiv.org/abs/1710.10903
Notes:
- The inputs are tensors with a batch dimension of 1:
Keras requires this batch dimension, and for full-batch methods
we only have a single "batch".
- There are two inputs required, the node features,
and the graph adjacency matrix
- This does not add self loops to the adjacency matrix, you should preprocess
the adjacency matrix to add self-loops
.. seealso:: :class:`.GAT` combines several of these layers, and :class:`.GraphAttentionSparse` supports a sparse adjacency matrix.
Args:
F_out (int): dimensionality of output feature vectors
attn_heads (int or list of int): number of attention heads
attn_heads_reduction (str): reduction applied to output features of each attention head, ``concat`` or ``average``.
``average`` should be applied in the final prediction layer of the model (Eq. 6 of the paper).
in_dropout_rate (float): dropout rate applied to features
attn_dropout_rate (float): dropout rate applied to attention coefficients
activation (str): nonlinear activation applied to layer's output to obtain output features (eq. 4 of the GAT paper)
final_layer (bool): Deprecated, use ``tf.gather`` or :class:`.GatherIndices`
use_bias (bool): toggles an optional bias
saliency_map_support (bool): If calculating saliency maps using the tools in
stellargraph.interpretability.saliency_maps this should be True. Otherwise this should be False (default).
kernel_initializer (str or func, optional): The initialiser to use for the head weights.
kernel_regularizer (str or func, optional): The regulariser to use for the head weights.
kernel_constraint (str or func, optional): The constraint to use for the head weights.
bias_initializer (str or func, optional): The initialiser to use for the head bias.
bias_regularizer (str or func, optional): The regulariser to use for the head bias.
bias_constraint (str or func, optional): The constraint to use for the head bias.
attn_kernel_initializer (str or func, optional): The initialiser to use for the attention weights.
attn_kernel_regularizer (str or func, optional): The regulariser to use for the attention weights.
attn_kernel_constraint (str or func, optional): The constraint to use for the attention weights.
"""
def __init__(
self,
units,
attn_heads=1,
attn_heads_reduction="concat", # {'concat', 'average'}
in_dropout_rate=0.0,
attn_dropout_rate=0.0,
activation="relu",
use_bias=True,
final_layer=None,
saliency_map_support=False,
kernel_initializer="glorot_uniform",
kernel_regularizer=None,
kernel_constraint=None,
bias_initializer="zeros",
bias_regularizer=None,
bias_constraint=None,
attn_kernel_initializer="glorot_uniform",
attn_kernel_regularizer=None,
attn_kernel_constraint=None,
**kwargs,
):
if attn_heads_reduction not in {"concat", "average"}:
raise ValueError(
"{}: Possible heads reduction methods: concat, average; received {}".format(
type(self).__name__, attn_heads_reduction
)
)
self.units = units # Number of output features (F' in the paper)
self.attn_heads = attn_heads # Number of attention heads (K in the paper)
self.attn_heads_reduction = attn_heads_reduction # Eq. 5 and 6 in the paper
self.in_dropout_rate = in_dropout_rate # dropout rate for node features
self.attn_dropout_rate = attn_dropout_rate # dropout rate for attention coefs
self.activation = activations.get(activation) # Eq. 4 in the paper
self.use_bias = use_bias
if final_layer is not None:
raise ValueError(
"'final_layer' is not longer supported, use 'tf.gather' or 'GatherIndices' separately"
)
self.saliency_map_support = saliency_map_support
# Populated by build()
self.kernels = [] # Layer kernels for attention heads
self.biases = [] # Layer biases for attention heads
self.attn_kernels = [] # Attention kernels for attention heads
if attn_heads_reduction == "concat":
# Output will have shape (..., K * F')
self.output_dim = self.units * self.attn_heads
else:
# Output will have shape (..., F')
self.output_dim = self.units
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_initializer = initializers.get(bias_initializer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.bias_constraint = constraints.get(bias_constraint)
self.attn_kernel_initializer = initializers.get(attn_kernel_initializer)
self.attn_kernel_regularizer = regularizers.get(attn_kernel_regularizer)
self.attn_kernel_constraint = constraints.get(attn_kernel_constraint)
super().__init__(**kwargs)
def get_config(self):
"""
Gets class configuration for Keras serialization
"""
config = {
"units": self.units,
"attn_heads": self.attn_heads,
"attn_heads_reduction": self.attn_heads_reduction,
"in_dropout_rate": self.in_dropout_rate,
"attn_dropout_rate": self.attn_dropout_rate,
"activation": activations.serialize(self.activation),
"use_bias": self.use_bias,
"saliency_map_support": self.saliency_map_support,
"kernel_initializer": initializers.serialize(self.kernel_initializer),
"kernel_regularizer": regularizers.serialize(self.kernel_regularizer),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"bias_initializer": initializers.serialize(self.bias_initializer),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"bias_constraint": constraints.serialize(self.bias_constraint),
"attn_kernel_initializer": initializers.serialize(
self.attn_kernel_initializer
),
"attn_kernel_regularizer": regularizers.serialize(
self.attn_kernel_regularizer
),
"attn_kernel_constraint": constraints.serialize(
self.attn_kernel_constraint
),
}
base_config = super().get_config()
return {**base_config, **config}
def compute_output_shape(self, input_shapes):
"""
Computes the output shape of the layer.
Assumes the following inputs:
Args:
input_shapes (tuple of int)
Shape tuples can include None for free dimensions, instead of an integer.
Returns:
An input shape tuple.
"""
feature_shape, *As_shapes = input_shapes
batch_dim = feature_shape[0]
out_dim = feature_shape[1]
return batch_dim, out_dim, self.output_dim
def build(self, input_shapes):
"""
Builds the layer
Args:
input_shapes (list of int): shapes of the layer's inputs (node features and adjacency matrix)
"""
feat_shape = input_shapes[0]
input_dim = int(feat_shape[-1])
# Variables to support integrated gradients
self.delta = self.add_weight(
name="ig_delta", shape=(), trainable=False, initializer=initializers.ones()
)
self.non_exist_edge = self.add_weight(
name="ig_non_exist_edge",
shape=(),
trainable=False,
initializer=initializers.zeros(),
)
# Initialize weights for each attention head
for head in range(self.attn_heads):
# Layer kernel
kernel = self.add_weight(
shape=(input_dim, self.units),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
name="kernel_{}".format(head),
)
self.kernels.append(kernel)
# # Layer bias
if self.use_bias:
bias = self.add_weight(
shape=(self.units,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
name="bias_{}".format(head),
)
self.biases.append(bias)
# Attention kernels
attn_kernel_self = self.add_weight(
shape=(self.units, 1),
initializer=self.attn_kernel_initializer,
regularizer=self.attn_kernel_regularizer,
constraint=self.attn_kernel_constraint,
name="attn_kernel_self_{}".format(head),
)
attn_kernel_neighs = self.add_weight(
shape=(self.units, 1),
initializer=self.attn_kernel_initializer,
regularizer=self.attn_kernel_regularizer,
constraint=self.attn_kernel_constraint,
name="attn_kernel_neigh_{}".format(head),
)
self.attn_kernels.append([attn_kernel_self, attn_kernel_neighs])
self.built = True
def call(self, inputs):
"""
Creates the layer as a Keras graph.
Note that the inputs are tensors with a batch dimension of 1:
Keras requires this batch dimension, and for full-batch methods
we only have a single "batch".
There are two inputs required, the node features,
and the graph adjacency matrix
Notes:
This does not add self loops to the adjacency matrix.
Args:
inputs (list): list of inputs with 3 items:
node features (size 1 x N x F),
graph adjacency matrix (size N x N),
where N is the number of nodes in the graph,
F is the dimensionality of node features
M is the number of output nodes
"""
X = inputs[0] # Node features (1 x N x F)
A = inputs[1] # Adjacency matrix (1 X N x N)
N = K.int_shape(A)[-1]
batch_dim, n_nodes, _ = K.int_shape(X)
if batch_dim != 1:
raise ValueError(
"Currently full-batch methods only support a batch dimension of one"
)
else:
# Remove singleton batch dimension
X = K.squeeze(X, 0)
A = K.squeeze(A, 0)
outputs = []
for head in range(self.attn_heads):
kernel = self.kernels[head] # W in the paper (F x F')
attention_kernel = self.attn_kernels[
head
] # Attention kernel a in the paper (2F' x 1)
# Compute inputs to attention network
features = K.dot(X, kernel) # (N x F')
# Compute feature combinations
# Note: [[a_1], [a_2]]^T [[Wh_i], [Wh_2]] = [a_1]^T [Wh_i] + [a_2]^T [Wh_j]
attn_for_self = K.dot(
features, attention_kernel[0]
) # (N x 1), [a_1]^T [Wh_i]
attn_for_neighs = K.dot(
features, attention_kernel[1]
) # (N x 1), [a_2]^T [Wh_j]
# Attention head a(Wh_i, Wh_j) = a^T [[Wh_i], [Wh_j]]
dense = attn_for_self + K.transpose(
attn_for_neighs
) # (N x N) via broadcasting
# Add nonlinearity
dense = LeakyReLU(alpha=0.2)(dense)
# Mask values before activation (Vaswani et al., 2017)
# YT: this only works for 'binary' A, not for 'weighted' A!
# YT: if A does not have self-loops, the node itself will be masked, so A should have self-loops
# YT: this is ensured by setting the diagonal elements of A tensor to 1 above
if not self.saliency_map_support:
mask = -10e9 * (1.0 - A)
dense += mask
dense = K.softmax(dense) # (N x N), Eq. 3 of the paper
else:
# dense = dense - tf.reduce_max(dense)
# GAT with support for saliency calculations
W = (self.delta * A) * K.exp(
dense - K.max(dense, axis=1, keepdims=True)
) * (1 - self.non_exist_edge) + self.non_exist_edge * (
A + self.delta * (tf.ones((N, N)) - A) + tf.eye(N)
) * K.exp(
dense - K.max(dense, axis=1, keepdims=True)
)
dense = W / K.sum(W, axis=1, keepdims=True)
# Apply dropout to features and attention coefficients
dropout_feat = Dropout(self.in_dropout_rate)(features) # (N x F')
dropout_attn = Dropout(self.attn_dropout_rate)(dense) # (N x N)
# Linear combination with neighbors' features [YT: see Eq. 4]
node_features = K.dot(dropout_attn, dropout_feat) # (N x F')
if self.use_bias:
node_features = K.bias_add(node_features, self.biases[head])
# Add output of attention head to final output
outputs.append(node_features)
# Aggregate the heads' output according to the reduction method
if self.attn_heads_reduction == "concat":
output = K.concatenate(outputs) # (N x KF')
else:
output = K.mean(K.stack(outputs), axis=0) # N x F')
# Nonlinear activation function
output = self.activation(output)
# Add batch dimension back if we removed it
if batch_dim == 1:
output = K.expand_dims(output, 0)
return output
class GraphAttentionSparse(GraphAttention):
"""
Graph Attention (GAT) layer, base implementation taken from https://github.com/danielegrattarola/keras-gat,
some modifications added for ease of use.
Based on the original paper: Graph Attention Networks. P. Veličković et al. ICLR 2018 https://arxiv.org/abs/1710.10903
Notes:
- The inputs are tensors with a batch dimension of 1:
Keras requires this batch dimension, and for full-batch methods
we only have a single "batch".
- There are three inputs required, the node features, the output
indices (the nodes that are to be selected in the final layer),
and the graph adjacency matrix
- This does not add self loops to the adjacency matrix, you should preprocess
the adjacency matrix to add self-loops
.. seealso:: :class:`.GAT` combines several of these layers, and :class:`.GraphAttention` supports a dense adjacency matrix.
Args:
F_out (int): dimensionality of output feature vectors
attn_heads (int or list of int): number of attention heads
attn_heads_reduction (str): reduction applied to output features of each attention head, ``concat`` or ``average``.
``average`` should be applied in the final prediction layer of the model (Eq. 6 of the paper).
in_dropout_rate (float): dropout rate applied to features
attn_dropout_rate (float): dropout rate applied to attention coefficients
activation (str): nonlinear activation applied to layer's output to obtain output features (eq. 4 of the GAT paper)
final_layer (bool): Deprecated, use ``tf.gather`` or :class:`.GatherIndices`
use_bias (bool): toggles an optional bias
saliency_map_support (bool): If calculating saliency maps using the tools in
stellargraph.interpretability.saliency_maps this should be True. Otherwise this should be False (default).
kernel_initializer (str or func, optional): The initialiser to use for the head weights.
kernel_regularizer (str or func, optional): The regulariser to use for the head weights.
kernel_constraint (str or func, optional): The constraint to use for the head weights.
bias_initializer (str or func, optional): The initialiser to use for the head bias.
bias_regularizer (str or func, optional): The regulariser to use for the head bias.
bias_constraint (str or func, optional): The constraint to use for the head bias.
attn_kernel_initializer (str or func, optional): The initialiser to use for the attention weights.
attn_kernel_regularizer (str or func, optional): The regulariser to use for the attention weights.
attn_kernel_constraint (str or func, optional): The constraint to use for the attention weights.
"""
def call(self, inputs, **kwargs):
"""
Creates the layer as a Keras graph
Notes:
This does not add self loops to the adjacency matrix.
Args:
inputs (list): list of inputs with 4 items:
node features (size b x N x F),
sparse graph adjacency matrix (size N x N),
where N is the number of nodes in the graph,
F is the dimensionality of node features
M is the number of output nodes
"""
X = inputs[0] # Node features (1 x N x F)
A_sparse = inputs[1] # Adjacency matrix (1 x N x N)
if not isinstance(A_sparse, tf.SparseTensor):
raise TypeError("A is not sparse")
# Get undirected graph edges (E x 2)
A_indices = A_sparse.indices
batch_dim, n_nodes, _ = K.int_shape(X)
if batch_dim != 1:
raise ValueError(
"Currently full-batch methods only support a batch dimension of one"
)
else:
# Remove singleton batch dimension
X = K.squeeze(X, 0)
outputs = []
for head in range(self.attn_heads):
kernel = self.kernels[head] # W in the paper (F x F')
attention_kernel = self.attn_kernels[
head
] # Attention kernel a in the paper (2F' x 1)
# Compute inputs to attention network
features = K.dot(X, kernel) # (N x F')
# Compute feature combinations
# Note: [[a_1], [a_2]]^T [[Wh_i], [Wh_j]] = [a_1]^T [Wh_i] + [a_2]^T [Wh_j]
attn_for_self = K.dot(
features, attention_kernel[0]
) # (N x 1), [a_1]^T [Wh_i]
attn_for_neighs = K.dot(
features, attention_kernel[1]
) # (N x 1), [a_2]^T [Wh_j]
# Create sparse attention vector (All non-zero values of the matrix)
sparse_attn_self = tf.gather(
K.reshape(attn_for_self, [-1]), A_indices[:, 0], axis=0
)
sparse_attn_neighs = tf.gather(
K.reshape(attn_for_neighs, [-1]), A_indices[:, 1], axis=0
)
attn_values = sparse_attn_self + sparse_attn_neighs
# Add nonlinearity
attn_values = LeakyReLU(alpha=0.2)(attn_values)
# Apply dropout to features and attention coefficients
dropout_feat = Dropout(self.in_dropout_rate)(features) # (N x F')
dropout_attn = Dropout(self.attn_dropout_rate)(attn_values) # (N x N)
# Convert to sparse matrix
sparse_attn = tf.sparse.SparseTensor(
A_indices, values=dropout_attn, dense_shape=[n_nodes, n_nodes]
)
# Apply softmax to get attention coefficients
sparse_attn = tf.sparse.softmax(sparse_attn) # (N x N), Eq. 3 of the paper
# Linear combination with neighbors' features [YT: see Eq. 4]
node_features = tf.sparse.sparse_dense_matmul(
sparse_attn, dropout_feat
) # (N x F')
if self.use_bias:
node_features = K.bias_add(node_features, self.biases[head])
# Add output of attention head to final output
outputs.append(node_features)
# Aggregate the heads' output according to the reduction method
if self.attn_heads_reduction == "concat":
output = K.concatenate(outputs) # (N x KF')
else:
output = K.mean(K.stack(outputs), axis=0) # N x F')
output = self.activation(output)
# Add batch dimension back if we removed it
if batch_dim == 1:
output = K.expand_dims(output, 0)
return output
def _require_without_generator(value, name):
if value is not None:
return value
else:
raise ValueError(
f"{name}: expected a value for 'input_dim', 'node_num' and 'multiplicity' when "
f"'generator' is not provided, found {name}=None."
)
class GAT:
"""
A stack of Graph Attention (GAT) layers with aggregation of multiple attention heads,
Eqs 5-6 of the GAT paper https://arxiv.org/abs/1710.10903
To use this class as a Keras model, the features and preprocessed adjacency matrix
should be supplied using:
- the :class:`.FullBatchNodeGenerator` class for node inference
- the :class:`.ClusterNodeGenerator` class for scalable/inductive node inference using the Cluster-GCN training procedure (https://arxiv.org/abs/1905.07953)
- the :class:`.FullBatchLinkGenerator` class for link inference
To have the appropriate preprocessing the generator object should be instantiated
with the `method='gat'` argument.
Examples:
Creating a GAT node classification model from an existing :class:`.StellarGraph` object `G`::
generator = FullBatchNodeGenerator(G, method="gat")
gat = GAT(
layer_sizes=[8, 4],
activations=["elu","softmax"],
attn_heads=8,
generator=generator,
in_dropout=0.5,
attn_dropout=0.5,
)
x_inp, predictions = gat.in_out_tensors()
Notes:
- The inputs are tensors with a batch dimension of 1. These are provided by the \
:class:`.FullBatchNodeGenerator` object.
- This does not add self loops to the adjacency matrix, you should preprocess
the adjacency matrix to add self-loops, using the ``method='gat'`` argument
of the :class:`.FullBatchNodeGenerator`.
- The nodes provided to the :meth:`.FullBatchNodeGenerator.flow` method are
used by the final layer to select the predictions for those nodes in order.
However, the intermediate layers before the final layer order the nodes
in the same way as the adjacency matrix.
.. seealso::
Examples using GAT:
- `node classification <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/gat-node-classification.html>`__
- `unsupervised representation learning with Deep Graph Infomax <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/deep-graph-infomax-embeddings.html>`__
- `interpreting GAT predictions <https://stellargraph.readthedocs.io/en/stable/demos/interpretability/gat-node-link-importance.html>`__
- `ensemble model for node classification <https://stellargraph.readthedocs.io/en/stable/demos/ensembles/ensemble-node-classification-example.html>`__
Appropriate data generators: :class:`.FullBatchNodeGenerator`, :class:`.FullBatchLinkGenerator`, :class:`.ClusterNodeGenerator`.
Related models:
- Other full-batch models: see the documentation of :class:`.FullBatchNodeGenerator` for a full list
- :class:`.DeepGraphInfomax` for unsupervised training
:class:`.GraphAttention` and :class:`.GraphAttentionSparse` are the base layers out of which a GAT model is built.
Args:
layer_sizes (list of int): list of output sizes of GAT layers in the stack. The length of this list defines
the number of GraphAttention layers in the stack.
generator (FullBatchNodeGenerator): an instance of FullBatchNodeGenerator class constructed on the graph of interest
attn_heads (int or list of int): number of attention heads in GraphAttention layers. The options are:
- a single integer: the passed value of ``attn_heads`` will be applied to all GraphAttention layers in the stack, except the last layer (for which the number of attn_heads will be set to 1).
- a list of integers: elements of the list define the number of attention heads in the corresponding layers in the stack.
attn_heads_reduction (list of str or None): reductions applied to output features of each attention head,
for all layers in the stack. Valid entries in the list are: ``concat``, ``average``.
If None is passed, the default reductions are applied: ``concat`` reduction to all layers in the stack
except the final layer, ``average`` reduction to the last layer (Eqs. 5-6 of the GAT paper).
bias (bool): toggles an optional bias in GAT layers
in_dropout (float): dropout rate applied to input features of each GAT layer
attn_dropout (float): dropout rate applied to attention maps
normalize (str or None): normalization applied to the final output features of the GAT layers stack. Default is None.
activations (list of str): list of activations applied to each layer's output; defaults to ``['elu', ..., 'elu']``.
saliency_map_support (bool): If calculating saliency maps using the tools in
stellargraph.interpretability.saliency_maps this should be True. Otherwise this should be False (default).
multiplicity (int, optional): The number of nodes to process at a time. This is 1 for a node
inference and 2 for link inference (currently no others are supported).
num_nodes (int, optional): The number of nodes in the given graph.
num_features (int, optional): The dimensions of the node features used as input to the model.
kernel_initializer (str or func, optional): The initialiser to use for the weights of each layer.
kernel_regularizer (str or func, optional): The regulariser to use for the weights of each layer.
kernel_constraint (str or func, optional): The constraint to use for the weights of each layer.
bias_initializer (str or func, optional): The initialiser to use for the bias of each layer.
bias_regularizer (str or func, optional): The regulariser to use for the bias of each layer.
bias_constraint (str or func, optional): The constraint to use for the bias of each layer.
attn_kernel_initializer (str or func, optional): The initialiser to use for the attention weights.
attn_kernel_regularizer (str or func, optional): The regulariser to use for the attention weights.
attn_kernel_constraint (str or func, optional): The constraint to use for the attention bias.
.. note::
The values for ``multiplicity``, ``num_nodes``, and ``num_features`` are obtained from the
provided ``generator`` by default. The additional keyword arguments for these parameters
provide an alternative way to specify them if a generator cannot be supplied.
"""
def __init__(
self,
layer_sizes,
generator=None,
attn_heads=1,
attn_heads_reduction=None,
bias=True,
in_dropout=0.0,
attn_dropout=0.0,
normalize=None,
activations=None,
saliency_map_support=False,
multiplicity=1,
num_nodes=None,
num_features=None,
kernel_initializer="glorot_uniform",
kernel_regularizer=None,
kernel_constraint=None,
bias_initializer="zeros",
bias_regularizer=None,
bias_constraint=None,
attn_kernel_initializer="glorot_uniform",
attn_kernel_regularizer=None,
attn_kernel_constraint=None,
):
self.bias = bias
self.in_dropout = in_dropout
self.attn_dropout = attn_dropout
self.generator = generator
self.saliency_map_support = saliency_map_support
# Check layer_sizes (must be list of int):
# check type:
if not isinstance(layer_sizes, list):
raise TypeError(
"{}: layer_sizes should be a list of integers; received type {} instead.".format(
type(self).__name__, type(layer_sizes).__name__
)
)
# check that values are valid:
elif not all([isinstance(s, int) and s > 0 for s in layer_sizes]):
raise ValueError(
"{}: all elements in layer_sizes should be positive integers!".format(
type(self).__name__
)
)
self.layer_sizes = layer_sizes
n_layers = len(layer_sizes)
# Check attn_heads (must be int or list of int):
if isinstance(attn_heads, list):
# check the length
if not len(attn_heads) == n_layers:
raise ValueError(
"{}: length of attn_heads list ({}) should match the number of GAT layers ({})".format(
type(self).__name__, len(attn_heads), n_layers
)
)
# check that values in the list are valid
if not all([isinstance(a, int) and a > 0 for a in attn_heads]):
raise ValueError(
"{}: all elements in attn_heads should be positive integers!".format(
type(self).__name__
)
)
self.attn_heads = attn_heads # (list of int as passed by the user)
elif isinstance(attn_heads, int):
self.attn_heads = list()
for l, _ in enumerate(layer_sizes):
# number of attention heads for layer l: attn_heads (int) for all but the last layer (for which it's set to 1)
self.attn_heads.append(attn_heads if l < n_layers - 1 else 1)
else:
raise TypeError(
"{}: attn_heads should be an integer or a list of integers!".format(
type(self).__name__
)
)
# Check attn_heads_reduction (list of str, or None):
if attn_heads_reduction is None:
# set default head reductions, see eqs 5-6 of the GAT paper
self.attn_heads_reduction = ["concat"] * (n_layers - 1) + ["average"]
else:
# user-specified list of head reductions (valid entries are 'concat' and 'average')
# check type (must be a list of str):
if not isinstance(attn_heads_reduction, list):
raise TypeError(
"{}: attn_heads_reduction should be a string; received type {} instead.".format(
type(self).__name__, type(attn_heads_reduction).__name__
)
)
# check length of attn_heads_reduction list:
if not len(attn_heads_reduction) == len(layer_sizes):
raise ValueError(
"{}: length of attn_heads_reduction list ({}) should match the number of GAT layers ({})".format(
type(self).__name__, len(attn_heads_reduction), n_layers
)
)
# check that list elements are valid:
if all(
[ahr.lower() in {"concat", "average"} for ahr in attn_heads_reduction]
):
self.attn_heads_reduction = attn_heads_reduction
else:
raise ValueError(
"{}: elements of attn_heads_reduction list should be either 'concat' or 'average'!".format(
type(self).__name__
)
)
# Check activations (list of str):
# check type:
if activations is None:
activations = ["elu"] * n_layers
if not isinstance(activations, list):
raise TypeError(
"{}: activations should be a list of strings; received {} instead".format(
type(self).__name__, type(activations).__name__
)
)
# check length:
if not len(activations) == n_layers:
raise ValueError(
"{}: length of activations list ({}) should match the number of GAT layers ({})".format(
type(self).__name__, len(activations), n_layers
)
)
self.activations = activations
# Check generator and configure sparse adjacency matrix
if generator is None:
self.use_sparse = False
self.multiplicity = _require_without_generator(multiplicity, "multiplicity")
self.n_nodes = _require_without_generator(num_nodes, "num_nodes")
self.n_features = _require_without_generator(num_features, "num_features")
else:
if not isinstance(generator, (FullBatchGenerator, ClusterNodeGenerator)):
raise TypeError(
f"Generator should be a instance of FullBatchNodeGenerator, "
f"FullBatchLinkGenerator or ClusterNodeGenerator"
)
# Copy required information from generator
self.use_sparse = generator.use_sparse
self.multiplicity = generator.multiplicity
self.n_features = generator.features.shape[1]
if isinstance(generator, FullBatchGenerator):
self.n_nodes = generator.features.shape[0]
else:
self.n_nodes = None
# Set the normalization layer used in the model
if normalize == "l2":
self._normalization = Lambda(lambda x: K.l2_normalize(x, axis=2))
elif normalize is None or str(normalize).lower() in {"none", "linear"}:
self._normalization = Lambda(lambda x: x)
else:
raise ValueError(
"Normalization should be either 'l2' or None (also allowed as 'none'); received '{}'".format(
normalize
)
)
# Switch between sparse or dense model
if self.use_sparse:
self._gat_layer = GraphAttentionSparse
else:
self._gat_layer = GraphAttention
# Initialize a stack of GAT layers
self._layers = []
n_layers = len(self.layer_sizes)
for ii in range(n_layers):
# Dropout on input node features before each GAT layer
self._layers.append(Dropout(self.in_dropout))
# GraphAttention layer
self._layers.append(
self._gat_layer(
units=self.layer_sizes[ii],
attn_heads=self.attn_heads[ii],
attn_heads_reduction=self.attn_heads_reduction[ii],
in_dropout_rate=self.in_dropout,
attn_dropout_rate=self.attn_dropout,
activation=self.activations[ii],
use_bias=self.bias,
saliency_map_support=self.saliency_map_support,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
kernel_constraint=kernel_constraint,
bias_initializer=bias_initializer,
bias_regularizer=bias_regularizer,
bias_constraint=bias_constraint,
attn_kernel_initializer=attn_kernel_initializer,
attn_kernel_regularizer=attn_kernel_regularizer,
attn_kernel_constraint=attn_kernel_constraint,
)
)
def __call__(self, inputs):
"""
Apply a stack of GAT layers to the input x_inp
Args:
x_inp (Tensor): input of the 1st GAT layer in the stack
Returns: Output tensor of the GAT layers stack
"""
if not isinstance(inputs, list):
raise TypeError(f"inputs: expected list, found {type(inputs).__name__}")
x_in, out_indices, *As = inputs
# Currently we require the batch dimension to be one for full-batch methods
batch_dim, n_nodes, _ = K.int_shape(x_in)
if batch_dim != 1:
raise ValueError(
"Currently full-batch methods only support a batch dimension of one"
)
# Convert input indices & values to a sparse matrix
if self.use_sparse:
A_indices, A_values = As
Ainput = [
SqueezedSparseConversion(shape=(n_nodes, n_nodes))(
[A_indices, A_values]
)
]
# Otherwise, create dense matrix from input tensor
else:
Ainput = As
# TODO: Support multiple matrices?
if len(Ainput) != 1:
raise NotImplementedError(
"The GAT method currently only accepts a single matrix"
)
# Remove singleton batch dimension
h_layer = x_in
for layer in self._layers:
if isinstance(layer, self._gat_layer):
# For a GAT layer add the matrix
h_layer = layer([h_layer] + Ainput)
else:
# For other (non-graph) layers only supply the input tensor
h_layer = layer(h_layer)
# print("Hlayer:", h_layer)
# only return data for the requested nodes
h_layer = GatherIndices(batch_dims=1)([h_layer, out_indices])
return self._normalization(h_layer)
def in_out_tensors(self, multiplicity=None):
"""
Builds a GAT model for node or link prediction
Returns:
tuple: ``(x_inp, x_out)``, where ``x_inp`` is a list of Keras/TensorFlow
input tensors for the model and ``x_out`` is a tensor of the model output.
"""
# Inputs for features
x_t = Input(batch_shape=(1, self.n_nodes, self.n_features))
# If not specified use multiplicity from instanciation
if multiplicity is None:
multiplicity = self.multiplicity
# Indices to gather for model output
if multiplicity == 1:
out_indices_t = Input(batch_shape=(1, None), dtype="int32")
else:
out_indices_t = Input(batch_shape=(1, None, multiplicity), dtype="int32")
# Create inputs for sparse or dense matrices
if self.use_sparse:
# Placeholders for the sparse adjacency matrix
A_indices_t = Input(batch_shape=(1, None, 2), dtype="int64")
A_values_t = Input(batch_shape=(1, None))
A_placeholders = [A_indices_t, A_values_t]
else:
# Placeholders for the dense adjacency matrix
A_m = Input(batch_shape=(1, self.n_nodes, self.n_nodes))
A_placeholders = [A_m]
# TODO: Support multiple matrices
x_inp = [x_t, out_indices_t] + A_placeholders
x_out = self(x_inp)
# Flatten output by removing singleton batch dimension
if x_out.shape[0] == 1:
self.x_out_flat = Lambda(lambda x: K.squeeze(x, 0))(x_out)
else:
self.x_out_flat = x_out
return x_inp, x_out
def _link_model(self):
if self.multiplicity != 2:
warnings.warn(
"Link model requested but a generator not supporting links was supplied."
)
return self.in_out_tensors(multiplicity=2)
def _node_model(self):
if self.multiplicity != 1:
warnings.warn(
"Node model requested but a generator not supporting nodes was supplied."
)
return self.in_out_tensors(multiplicity=1)
node_model = deprecated_model_function(_node_model, "node_model")
link_model = deprecated_model_function(_link_model, "link_model")
build = deprecated_model_function(in_out_tensors, "build")
| 41,810 | 42.781152 | 202 | py |
stellargraph | stellargraph-master/stellargraph/layer/gcn.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras import activations, initializers, constraints, regularizers
from tensorflow.keras.layers import Input, Layer, Lambda, Dropout, Reshape
from ..mapper import FullBatchGenerator, ClusterNodeGenerator
from .misc import SqueezedSparseConversion, deprecated_model_function, GatherIndices
from .preprocessing_layer import GraphPreProcessingLayer
class GraphConvolution(Layer):
"""
Graph Convolution (GCN) Keras layer.
The implementation is based on https://github.com/tkipf/keras-gcn.
Original paper: Semi-Supervised Classification with Graph Convolutional Networks. Thomas N. Kipf, Max Welling,
International Conference on Learning Representations (ICLR), 2017 https://github.com/tkipf/gcn
Notes:
- The batch axis represents independent graphs to be convolved with this GCN kernel (for
instance, for full-batch node prediction on a single graph, its dimension should be 1).
- If the adjacency matrix is dense, both it and the features should have a batch axis, with
equal batch dimension.
- If the adjacency matrix is sparse, it should not have a batch axis, and the batch
dimension of the features must be 1.
- There are two inputs required, the node features,
and the normalized graph Laplacian matrix
- This class assumes that the normalized Laplacian matrix is passed as
input to the Keras methods.
.. seealso:: :class:`.GCN` combines several of these layers.
Args:
units (int): dimensionality of output feature vectors
activation (str or func): nonlinear activation applied to layer's output to obtain output features
use_bias (bool): toggles an optional bias
final_layer (bool): Deprecated, use ``tf.gather`` or :class:`.GatherIndices`
kernel_initializer (str or func, optional): The initialiser to use for the weights.
kernel_regularizer (str or func, optional): The regulariser to use for the weights.
kernel_constraint (str or func, optional): The constraint to use for the weights.
bias_initializer (str or func, optional): The initialiser to use for the bias.
bias_regularizer (str or func, optional): The regulariser to use for the bias.
bias_constraint (str or func, optional): The constraint to use for the bias.
"""
def __init__(
self,
units,
activation=None,
use_bias=True,
final_layer=None,
input_dim=None,
kernel_initializer="glorot_uniform",
kernel_regularizer=None,
kernel_constraint=None,
bias_initializer="zeros",
bias_regularizer=None,
bias_constraint=None,
**kwargs,
):
if "input_shape" not in kwargs and input_dim is not None:
kwargs["input_shape"] = (input_dim,)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
if final_layer is not None:
raise ValueError(
"'final_layer' is not longer supported, use 'tf.gather' or 'GatherIndices' separately"
)
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_initializer = initializers.get(bias_initializer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.bias_constraint = constraints.get(bias_constraint)
super().__init__(**kwargs)
def get_config(self):
"""
Gets class configuration for Keras serialization.
Used by Keras model serialization.
Returns:
A dictionary that contains the config of the layer
"""
config = {
"units": self.units,
"use_bias": self.use_bias,
"activation": activations.serialize(self.activation),
"kernel_initializer": initializers.serialize(self.kernel_initializer),
"kernel_regularizer": regularizers.serialize(self.kernel_regularizer),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"bias_initializer": initializers.serialize(self.bias_initializer),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"bias_constraint": constraints.serialize(self.bias_constraint),
}
base_config = super().get_config()
return {**base_config, **config}
def compute_output_shape(self, input_shapes):
"""
Computes the output shape of the layer.
Assumes the following inputs:
Args:
input_shapes (tuple of int)
Shape tuples can include None for free dimensions, instead of an integer.
Returns:
An input shape tuple.
"""
feature_shape, *As_shapes = input_shapes
batch_dim = feature_shape[0]
out_dim = feature_shape[1]
return batch_dim, out_dim, self.units
def build(self, input_shapes):
"""
Builds the layer
Args:
input_shapes (list of int): shapes of the layer's inputs (node features and adjacency matrix)
"""
feat_shape = input_shapes[0]
input_dim = int(feat_shape[-1])
self.kernel = self.add_weight(
shape=(input_dim, self.units),
initializer=self.kernel_initializer,
name="kernel",
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.units,),
initializer=self.bias_initializer,
name="bias",
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
else:
self.bias = None
self.built = True
def call(self, inputs):
"""
Applies the layer.
Args:
inputs (list): a list of 3 input tensors that includes
node features (size 1 x N x F),
graph adjacency matrix (size N x N),
where N is the number of nodes in the graph, and
F is the dimensionality of node features.
Returns:
Keras Tensor that represents the output of the layer.
"""
features, *As = inputs
# Calculate the layer operation of GCN
A = As[0]
if K.is_sparse(A):
# FIXME(#1222): batch_dot doesn't support sparse tensors, so we special case them to
# only work with a single batch element (and the adjacency matrix without a batch
# dimension)
if features.shape[0] != 1:
raise ValueError(
f"features: expected batch dimension = 1 when using sparse adjacency matrix in GraphConvolution, found features batch dimension {features.shape[0]}"
)
if len(A.shape) != 2:
raise ValueError(
f"adjacency: expected a single adjacency matrix when using sparse adjacency matrix in GraphConvolution (tensor of rank 2), found adjacency tensor of rank {len(A.shape)}"
)
features_sq = K.squeeze(features, axis=0)
h_graph = K.dot(A, features_sq)
h_graph = K.expand_dims(h_graph, axis=0)
else:
h_graph = K.batch_dot(A, features)
output = K.dot(h_graph, self.kernel)
# Add optional bias & apply activation
if self.bias is not None:
output += self.bias
output = self.activation(output)
return output
class GCN:
"""
A stack of Graph Convolutional layers that implement a graph convolution network model
as in https://arxiv.org/abs/1609.02907
The model minimally requires specification of the layer sizes as a list of int
corresponding to the feature dimensions for each hidden layer,
activation functions for each hidden layers, and a generator object.
To use this class as a Keras model, the features and preprocessed adjacency matrix
should be supplied using:
- the :class:`.FullBatchNodeGenerator` class for node inference
- the :class:`.ClusterNodeGenerator` class for scalable/inductive node inference using the Cluster-GCN training procedure (https://arxiv.org/abs/1905.07953)
- the :class:`.FullBatchLinkGenerator` class for link inference
To have the appropriate preprocessing the generator object should be instantiated
with the ``method='gcn'`` argument.
Note that currently the GCN class is compatible with both sparse and dense adjacency
matrices and the :class:`.FullBatchNodeGenerator` will default to sparse.
Example:
Creating a GCN node classification model from an existing :class:`.StellarGraph`
object ``G``::
generator = FullBatchNodeGenerator(G, method="gcn")
gcn = GCN(
layer_sizes=[32, 4],
activations=["elu","softmax"],
generator=generator,
dropout=0.5
)
x_inp, predictions = gcn.in_out_tensors()
Notes:
- The inputs are tensors with a batch dimension of 1. These are provided by the \
:class:`.FullBatchNodeGenerator` object.
- This assumes that the normalized Laplacian matrix is provided as input to
Keras methods. When using the :class:`.FullBatchNodeGenerator` specify the
``method='gcn'`` argument to do this preprocessing.
- The nodes provided to the :meth:`.FullBatchNodeGenerator.flow` method are
used by the final layer to select the predictions for those nodes in order.
However, the intermediate layers before the final layer order the nodes
in the same way as the adjacency matrix.
.. seealso::
Examples using GCN:
- `node classification <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/gcn-node-classification.html>`__
- `node classification trained with Cluster-GCN <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/cluster-gcn-node-classification.html>`__
- `node classification with Neo4j and Cluster-GCN <https://stellargraph.readthedocs.io/en/stable/demos/connector/neo4j/cluster-gcn-on-cora-neo4j-example.html>`__
- `semi-supervised node classification <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/gcn-deep-graph-infomax-fine-tuning-node-classification.html>`__
- `link prediction <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/gcn-link-prediction.html>`__
- `unsupervised representation learning with Deep Graph Infomax <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/deep-graph-infomax-embeddings.html>`__
- interpreting GCN predictions: `dense <https://stellargraph.readthedocs.io/en/stable/demos/interpretability/gcn-node-link-importance.html>`__, `sparse <https://stellargraph.readthedocs.io/en/stable/demos/interpretability/gcn-sparse-node-link-importance.html>`__
- `ensemble model for node classification <https://stellargraph.readthedocs.io/en/stable/demos/ensembles/ensemble-node-classification-example.html>`__
- `comparison of link prediction algorithms <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/homogeneous-comparison-link-prediction.html>`__
Appropriate data generators: :class:`.FullBatchNodeGenerator`, :class:`.FullBatchLinkGenerator`, :class:`.ClusterNodeGenerator`.
Related models:
- Other full-batch models: see the documentation of :class:`.FullBatchNodeGenerator` for a full list
- :class:`.RGCN` for a generalisation to multiple edge types
- :class:`.GCNSupervisedGraphClassification` for graph classification by pooling the output of GCN
- :class:`.GCN_LSTM` for time-series and sequence prediction, incorporating the graph structure via GCN
- :class:`.DeepGraphInfomax` for unsupervised training
:class:`.GraphConvolution` is the base layer out of which a GCN model is built.
Args:
layer_sizes (list of int): Output sizes of GCN layers in the stack.
generator (FullBatchNodeGenerator): The generator instance.
bias (bool): If True, a bias vector is learnt for each layer in the GCN model.
dropout (float): Dropout rate applied to input features of each GCN layer.
activations (list of str or func): Activations applied to each layer's output;
defaults to ``['relu', ..., 'relu']``.
kernel_initializer (str or func, optional): The initialiser to use for the weights of each layer.
kernel_regularizer (str or func, optional): The regulariser to use for the weights of each layer.
kernel_constraint (str or func, optional): The constraint to use for the weights of each layer.
bias_initializer (str or func, optional): The initialiser to use for the bias of each layer.
bias_regularizer (str or func, optional): The regulariser to use for the bias of each layer.
bias_constraint (str or func, optional): The constraint to use for the bias of each layer.
squeeze_output_batch (bool, optional): if True, remove the batch dimension when the batch size is 1. If False, leave the batch dimension.
"""
def __init__(
self,
layer_sizes,
generator,
bias=True,
dropout=0.0,
activations=None,
kernel_initializer="glorot_uniform",
kernel_regularizer=None,
kernel_constraint=None,
bias_initializer="zeros",
bias_regularizer=None,
bias_constraint=None,
squeeze_output_batch=True,
):
if not isinstance(generator, (FullBatchGenerator, ClusterNodeGenerator)):
raise TypeError(
f"Generator should be a instance of FullBatchNodeGenerator, "
f"FullBatchLinkGenerator or ClusterNodeGenerator"
)
n_layers = len(layer_sizes)
self.layer_sizes = layer_sizes
self.activations = activations
self.bias = bias
self.dropout = dropout
self.squeeze_output_batch = squeeze_output_batch
# Copy required information from generator
self.method = generator.method
self.multiplicity = generator.multiplicity
self.n_features = generator.features.shape[1]
self.use_sparse = generator.use_sparse
if isinstance(generator, FullBatchGenerator):
self.n_nodes = generator.features.shape[0]
else:
self.n_nodes = None
if self.method == "none":
self.graph_norm_layer = GraphPreProcessingLayer(num_of_nodes=self.n_nodes)
# Activation function for each layer
if activations is None:
activations = ["relu"] * n_layers
elif len(activations) != n_layers:
raise ValueError(
"Invalid number of activations; require one function per layer"
)
self.activations = activations
# Initialize a stack of GCN layers
self._layers = []
for ii in range(n_layers):
self._layers.append(Dropout(self.dropout))
self._layers.append(
GraphConvolution(
self.layer_sizes[ii],
activation=self.activations[ii],
use_bias=self.bias,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
kernel_constraint=kernel_constraint,
bias_initializer=bias_initializer,
bias_regularizer=bias_regularizer,
bias_constraint=bias_constraint,
)
)
def __call__(self, x):
"""
Apply a stack of GCN layers to the inputs.
The input tensors are expected to be a list of the following:
[
Node features shape (1, N, F),
Adjacency indices (1, E, 2),
Adjacency values (1, E),
Output indices (1, O)
]
where N is the number of nodes, F the number of input features,
E is the number of edges, O the number of output nodes.
Args:
x (Tensor): input tensors
Returns:
Output tensor
"""
x_in, out_indices, *As = x
# Currently we require the batch dimension to be one for full-batch methods
batch_dim, n_nodes, _ = K.int_shape(x_in)
if batch_dim != 1:
raise ValueError(
"Currently full-batch methods only support a batch dimension of one"
)
# Convert input indices & values to a sparse matrix
if self.use_sparse:
A_indices, A_values = As
Ainput = [
SqueezedSparseConversion(
shape=(n_nodes, n_nodes), dtype=A_values.dtype
)([A_indices, A_values])
]
else:
Ainput = As
# TODO: Support multiple matrices?
if len(Ainput) != 1:
raise NotImplementedError(
"The GCN method currently only accepts a single matrix"
)
h_layer = x_in
if self.method == "none":
# For GCN, if no preprocessing has been done, we apply the preprocessing layer to perform that.
Ainput = [self.graph_norm_layer(Ainput[0])]
for layer in self._layers:
if isinstance(layer, GraphConvolution):
# For a GCN layer add the matrix
h_layer = layer([h_layer] + Ainput)
else:
# For other (non-graph) layers only supply the input tensor
h_layer = layer(h_layer)
# only return data for the requested nodes
h_layer = GatherIndices(batch_dims=1)([h_layer, out_indices])
return h_layer
def in_out_tensors(self, multiplicity=None):
"""
Builds a GCN model for node or link prediction
Returns:
tuple: ``(x_inp, x_out)``, where ``x_inp`` is a list of Keras/TensorFlow
input tensors for the GCN model and ``x_out`` is a tensor of the GCN model output.
"""
# Inputs for features
x_t = Input(batch_shape=(1, self.n_nodes, self.n_features))
# If not specified use multiplicity from instanciation
if multiplicity is None:
multiplicity = self.multiplicity
# Indices to gather for model output
if multiplicity == 1:
out_indices_t = Input(batch_shape=(1, None), dtype="int32")
else:
out_indices_t = Input(batch_shape=(1, None, multiplicity), dtype="int32")
# Create inputs for sparse or dense matrices
if self.use_sparse:
# Placeholders for the sparse adjacency matrix
A_indices_t = Input(batch_shape=(1, None, 2), dtype="int64")
A_values_t = Input(batch_shape=(1, None))
A_placeholders = [A_indices_t, A_values_t]
else:
# Placeholders for the dense adjacency matrix
A_m = Input(batch_shape=(1, self.n_nodes, self.n_nodes))
A_placeholders = [A_m]
# TODO: Support multiple matrices
x_inp = [x_t, out_indices_t] + A_placeholders
x_out = self(x_inp)
# Flatten output by removing singleton batch dimension
if self.squeeze_output_batch and x_out.shape[0] == 1:
self.x_out_flat = Lambda(lambda x: K.squeeze(x, 0))(x_out)
else:
self.x_out_flat = x_out
return x_inp, x_out
def _link_model(self):
if self.multiplicity != 2:
warnings.warn(
"Link model requested but a generator not supporting links was supplied."
)
return self.in_out_tensors(multiplicity=2)
def _node_model(self):
if self.multiplicity != 1:
warnings.warn(
"Node model requested but a generator not supporting nodes was supplied."
)
return self.in_out_tensors(multiplicity=1)
node_model = deprecated_model_function(_node_model, "node_model")
link_model = deprecated_model_function(_link_model, "link_model")
build = deprecated_model_function(in_out_tensors, "build")
| 21,242 | 40.982213 | 269 | py |
stellargraph | stellargraph-master/stellargraph/layer/link_inference.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Link inference functions for link classification (including link prediction) and
link attribute inference (regression)
"""
from typing import AnyStr, Optional, List, Tuple
import tensorflow as tf
from tensorflow.keras.layers import (
Layer,
Concatenate,
Dense,
Lambda,
Multiply,
Average,
Reshape,
Activation,
)
from tensorflow.keras import backend as K
import warnings
class LeakyClippedLinear(Layer):
"""
Leaky Clipped Linear Unit.
Args:
low (float): Lower threshold
high (float): Lower threshold
alpha (float) The slope of the function below low or above high.
"""
def __init__(
self, low: float = 1.0, high: float = 5.0, alpha: float = 0.1, **kwargs
):
super().__init__(**kwargs)
self.supports_masking = True
self.gamma = K.cast_to_floatx(1 - alpha)
self.lo = K.cast_to_floatx(low)
self.hi = K.cast_to_floatx(high)
def call(self, x, mask=None):
x_lo = K.relu(self.lo - x)
x_hi = K.relu(x - self.hi)
return x + self.gamma * x_lo - self.gamma * x_hi
def get_config(self):
config = {
"alpha": float(1 - self.gamma),
"low": float(self.lo),
"high": float(self.hi),
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
class LinkEmbedding(Layer):
"""
Defines an edge inference function that takes source, destination node embeddings
(node features) as input, and returns a numeric vector of output_dim size.
This class takes as input as either:
* A list of two tensors of shape (N, M) being the embeddings for each of the nodes in the link,
where N is the number of links, and M is the node embedding size.
* A single tensor of shape (..., N, 2, M) where the axis second from last indexes the nodes
in the link and N is the number of links and M the embedding size.
Examples:
Consider two tensors containing the source and destination embeddings of size M::
x_src = tf.constant(x_src, shape=(1, M), dtype="float32")
x_dst = tf.constant(x_dst, shape=(1, M), dtype="float32")
li = LinkEmbedding(method="ip", activation="sigmoid")([x_src, x_dst])
.. seealso::
Examples using this class:
- `GCN link prediction <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/gcn-link-prediction.html>`__
- `comparison of link prediction algorithms <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/homogeneous-comparison-link-prediction.html>`__
Related functions: :func:`.link_inference`, :func:`.link_classification`, :func:`.link_regression`.
Args:
axis (int): If a single tensor is supplied this is the axis that indexes the node
embeddings so that the indices 0 and 1 give the node embeddings to be combined.
This is ignored if two tensors are supplied as a list.
activation (str), optional: activation function applied to the output, one of "softmax", "sigmoid", etc.,
or any activation function supported by Keras, see https://keras.io/activations/ for more information.
method (str), optional: Name of the method of combining ``(src,dst)`` node features or embeddings into edge embeddings.
One of:
* ``concat`` -- concatenation,
* ``ip`` or ``dot`` -- inner product, :math:`ip(u,v) = sum_{i=1..d}{u_i*v_i}`,
* ``mul`` or ``hadamard`` -- element-wise multiplication, :math:`h(u,v)_i = u_i*v_i`,
* ``l1`` -- L1 operator, :math:`l_1(u,v)_i = |u_i-v_i|`,
* ``l2`` -- L2 operator, :math:`l_2(u,v)_i = (u_i-v_i)^2`,
* ``avg`` -- average, :math:`avg(u,v) = (u+v)/2`.
For all methods except ``ip`` or ``dot`` a dense layer is applied on top of the combined
edge embedding to transform to a vector of size ``output_dim``.
"""
def __init__(
self,
method: AnyStr = "ip",
axis: Optional[int] = -2,
activation: Optional[AnyStr] = "linear",
**kwargs
):
super().__init__(**kwargs)
self.method = method.lower()
self.axis = axis
self.activation = tf.keras.activations.get(activation)
def get_config(self):
config = {
"activation": tf.keras.activations.serialize(self.activation),
"method": self.method,
"axis": self.axis,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, x):
"""
Apply the layer to the node embeddings in x. These embeddings are either:
* A list of two tensors of shape (N, M) being the embeddings for each of the nodes in the link,
where N is the number of links, and M is the node embedding size.
* A single tensor of shape (..., N, 2, M) where the axis second from last indexes the nodes
in the link and N is the number of links and M the embedding size.
"""
# Currently GraphSAGE & HinSage output a list of two tensors being the embeddings
# for each of the nodes in the link. However, GCN, GAT & other full-batch methods
# return a tensor of shape (1, N, 2, M).
# Detect and support both inputs
if isinstance(x, (list, tuple)):
if len(x) != 2:
raise ValueError("Expecting a list of length 2 for link embedding")
x0, x1 = x
elif isinstance(x, tf.Tensor):
if int(x.shape[self.axis]) != 2:
raise ValueError(
"Expecting a tensor of shape 2 along specified axis for link embedding"
)
x0, x1 = tf.unstack(x, axis=self.axis)
else:
raise TypeError("Expected a list, tuple, or Tensor as input")
# Apply different ways to combine the node embeddings to a link embedding.
if self.method in ["ip", "dot"]:
out = tf.reduce_sum(x0 * x1, axis=-1, keepdims=True)
elif self.method == "l1":
# l1(u,v)_i = |u_i - v_i| - vector of the same size as u,v
out = tf.abs(x0 - x1)
elif self.method == "l2":
# l2(u,v)_i = (u_i - v_i)^2 - vector of the same size as u,v
out = tf.square(x0 - x1)
elif self.method in ["mul", "hadamard"]:
out = tf.multiply(x0, x1)
elif self.method == "concat":
out = Concatenate()([x0, x1])
elif self.method == "avg":
out = Average()([x0, x1])
else:
raise NotImplementedError(
"{}: the requested method '{}' is not known/not implemented".format(
name, edge_embedding_method
)
)
# Apply activation function
out = self.activation(out)
return out
def link_inference(
output_dim: int = 1,
output_act: AnyStr = "linear",
edge_embedding_method: AnyStr = "ip",
clip_limits: Optional[Tuple[float]] = None,
name: AnyStr = "link_inference",
):
"""
Defines an edge inference function that takes source, destination node embeddings (node features) as input,
and returns a numeric vector of output_dim size.
This function takes as input as either:
* A list of two tensors of shape (N, M) being the embeddings for each of the nodes in the link,
where N is the number of links, and M is the node embedding size.
* A single tensor of shape (..., N, 2, M) where the axis second from last indexes the nodes
in the link and N is the number of links and M the embedding size.
Note that the output tensor is flattened before being returned.
.. seealso:: Related functionality: :class:`.LinkEmbedding`, :func:`.link_classification`, :func:`.link_regression`.
Args:
output_dim (int): Number of predictor's output units -- desired dimensionality of the output.
output_act (str), optional: activation function applied to the output, one of "softmax", "sigmoid", etc.,
or any activation function supported by Keras, see https://keras.io/activations/ for more information.
edge_embedding_method (str), optional: Name of the method of combining ``(src,dst)`` node features or embeddings into edge embeddings.
One of:
* ``concat`` -- concatenation,
* ``ip`` or ``dot`` -- inner product, :math:`ip(u,v) = sum_{i=1..d}{u_i*v_i}`,
* ``mul`` or ``hadamard`` -- element-wise multiplication, :math:`h(u,v)_i = u_i*v_i`,
* ``l1`` -- L1 operator, :math:`l_1(u,v)_i = |u_i-v_i|`,
* ``l2`` -- L2 operator, :math:`l_2(u,v)_i = (u_i-v_i)^2`,
* ``avg`` -- average, :math:`avg(u,v) = (u+v)/2`.
For all methods except ``ip`` or ``dot`` a dense layer is applied on top of the combined
edge embedding to transform to a vector of size ``output_dim``.
clip_limits (Tuple[float]): lower and upper thresholds for LeakyClippedLinear unit on top. If None (not provided),
the LeakyClippedLinear unit is not applied.
name (str): optional name of the defined function, used for error logging
Returns:
Function taking edge tensors with ``src``, ``dst`` node embeddings (i.e., pairs of ``(node_src, node_dst)`` tensors) and
returning a vector of output_dim length (e.g., edge class probabilities, edge attribute prediction, etc.).
"""
if edge_embedding_method in ["ip", "dot"] and output_dim != 1:
warnings.warn(
"For inner product link method the output_dim will be ignored as it is fixed to be 1.",
stacklevel=2,
)
output_dim = 1
def edge_function(x):
le = LinkEmbedding(activation="linear", method=edge_embedding_method)(x)
# All methods apart from inner product have a dense layer
# to convert link embedding to the desired output
if edge_embedding_method in ["ip", "dot"]:
out = Activation(output_act)(le)
else:
out = Dense(output_dim, activation=output_act)(le)
# Reshape outputs
out = Reshape((output_dim,))(out)
if clip_limits:
out = LeakyClippedLinear(
low=clip_limits[0], high=clip_limits[1], alpha=0.1
)(out)
return out
print(
"{}: using '{}' method to combine node embeddings into edge embeddings".format(
name, edge_embedding_method
)
)
return edge_function
def link_classification(
output_dim: int = 1,
output_act: AnyStr = "sigmoid",
edge_embedding_method: AnyStr = "ip",
):
"""
Defines a function that predicts a binary or multi-class edge classification output from
(source, destination) node embeddings (node features).
This function takes as input as either:
* A list of two tensors of shape (N, M) being the embeddings for each of the nodes in the link,
where N is the number of links, and M is the node embedding size.
* A single tensor of shape (..., N, 2, M) where the axis second from last indexes the nodes
in the link and N is the number of links and M the embedding size.
Note that the output tensor is flattened before being returned.
.. seealso::
Examples using this function:
- Attri2Vec: `node classification <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/attri2vec-node-classification.html>`__ `link prediction <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/attri2vec-link-prediction.html>`__, `unsupervised representation learning <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/attri2vec-embeddings.html>`__
- GraphSAGE: `link prediction <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/graphsage-link-prediction.html>`__, `unsupervised representation learning <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/graphsage-unsupervised-sampler-embeddings.html>`__
- Node2Vec: `node classification <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/keras-node2vec-node-classification.html>`__, `unsupervised representation learning <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/keras-node2vec-embeddings.html>`__
- other link prediction: `comparison of algorithms <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/homogeneous-comparison-link-prediction.html>`__, `ensembles <https://stellargraph.readthedocs.io/en/stable/demos/ensembles/ensemble-link-prediction-example.html>`__, `calibration <https://stellargraph.readthedocs.io/en/stable/demos/calibration/calibration-link-prediction.html>`__
Related functionality: :class:`.LinkEmbedding`, :func:`.link_inference`, :func:`.link_regression`.
Args:
output_dim (int): Number of classifier's output units -- desired dimensionality of the output,
output_act (str), optional: activation function applied to the output, one of "softmax", "sigmoid", etc.,
or any activation function supported by Keras, see https://keras.io/activations/ for more information.
edge_embedding_method (str), optional: Name of the method of combining ``(src,dst)`` node features/embeddings into edge embeddings.
One of:
* ``concat`` -- concatenation,
* ``ip`` or ``dot`` -- inner product, :math:`ip(u,v) = sum_{i=1..d}{u_i*v_i}`,
* ``mul`` or ``hadamard`` -- element-wise multiplication, :math:`h(u,v)_i = u_i*v_i`,
* ``l1`` -- L1 operator, :math:`l_1(u,v)_i = |u_i-v_i|`,
* ``l2`` -- L2 operator, :math:`l_2(u,v)_i = (u_i-v_i)^2`,
* ``avg`` -- average, :math:`avg(u,v) = (u+v)/2`.
Returns:
Function taking edge tensors with ``src``, ``dst`` node embeddings (i.e., pairs of ``(node_src, node_dst)`` tensors) and
returning logits of output_dim length (e.g., edge class probabilities).
"""
edge_function = link_inference(
output_dim=output_dim,
output_act=output_act,
edge_embedding_method=edge_embedding_method,
name="link_classification",
)
return edge_function
def link_regression(
output_dim: int = 1,
clip_limits: Optional[Tuple[float]] = None,
edge_embedding_method: AnyStr = "ip",
):
"""
Defines a function that predicts a numeric edge regression output vector/scalar from
(source, destination) node embeddings (node features).
This function takes as input as either:
* A list of two tensors of shape (N, M) being the embeddings for each of the nodes in the link,
where N is the number of links, and M is the node embedding size.
* A single tensor of shape (..., N, 2, M) where the axis second from last indexes the nodes
in the link and N is the number of links and M the embedding size.
Note that the output tensor is flattened before being returned.
.. seealso::
Example using this function: `HinSAGE link prediction <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/hinsage-link-prediction.html>`__.
Related functionality: :class:`.LinkEmbedding`, :func:`.link_inference`, :func:`.link_classification`.
Args:
output_dim (int): Number of classifier's output units -- desired dimensionality of the output,
clip_limits (tuple): lower and upper thresholds for LeakyClippedLinear unit on top. If None (not provided),
the LeakyClippedLinear unit is not applied.
edge_embedding_method (str), optional: Name of the method of combining ``(src,dst)`` node features/embeddings into edge embeddings.
One of:
* ``concat`` -- concatenation,
* ``ip`` or ``dot`` -- inner product, :math:`ip(u,v) = sum_{i=1..d}{u_i*v_i}`,
* ``mul`` or ``hadamard`` -- element-wise multiplication, :math:`h(u,v)_i = u_i*v_i`,
* ``l1`` -- L1 operator, :math:`l_1(u,v)_i = |u_i-v_i|`,
* ``l2`` -- L2 operator, :math:`l_2(u,v)_i = (u_i-v_i)^2`,
* ``avg`` -- average, :math:`avg(u,v) = (u+v)/2`.
Returns:
Function taking edge tensors with ``src``, ``dst`` node embeddings (i.e., pairs of ``(node_src, node_dst)`` tensors) and
returning a numeric value (e.g., edge attribute being predicted) constructed according to edge_embedding_method.
"""
edge_function = link_inference(
output_dim=output_dim,
output_act="linear",
edge_embedding_method=edge_embedding_method,
clip_limits=clip_limits,
name="link_regression",
)
return edge_function
| 17,566 | 43.138191 | 407 | py |
stellargraph | stellargraph-master/stellargraph/layer/cluster_gcn.py | # -*- coding: utf-8 -*-
#
# Copyright 2019-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow.keras import backend as K
from tensorflow.keras import activations, initializers, constraints, regularizers
from tensorflow.keras.layers import Input, Layer, Lambda, Dropout, Reshape
from .misc import deprecated_model_function, GatherIndices
from ..mapper import ClusterNodeGenerator
from .gcn import GraphConvolution, GCN
import warnings
class ClusterGraphConvolution(GraphConvolution):
"""
Deprecated: use :class:`.GraphConvolution`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"ClusterGraphConvolution has been replaced by GraphConvolution without functionality change",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
class ClusterGCN(GCN):
"""
Deprecated: use :class:`stellargraph.layer.GCN` with :class:`stellargraph.mapper.ClusterNodeGenerator`.
"""
def __init__(
self,
# the parameter order is slightly different between this and GCN, so the *args,
# **kwargs trick doesn't work
layer_sizes,
activations,
generator,
bias=True,
dropout=0.0,
kernel_initializer="glorot_uniform",
kernel_regularizer=None,
kernel_constraint=None,
bias_initializer="zeros",
bias_regularizer=None,
bias_constraint=None,
):
warnings.warn(
"ClusterGCN has been replaced by GCN with little functionality change (the GCN class removes the batch dimension in some cases)",
DeprecationWarning,
stacklevel=2,
)
super().__init__(
layer_sizes=layer_sizes,
generator=generator,
bias=bias,
dropout=dropout,
activations=activations,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
kernel_constraint=kernel_constraint,
bias_initializer=bias_initializer,
bias_regularizer=bias_regularizer,
bias_constraint=bias_constraint,
# for compatibility
squeeze_output_batch=False,
)
| 2,770 | 32.792683 | 141 | py |
stellargraph | stellargraph-master/stellargraph/layer/preprocessing_layer.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Preprocessing as a layer in GCN. This is to ensure that the GCN model is differentiable in an end-to-end manner.
"""
from tensorflow.keras import backend as K
import tensorflow as tf
from tensorflow.keras.layers import Layer
from tensorflow import keras
import numpy as np
class SymmetricGraphPreProcessingLayer(Layer):
"""
This class implements the preprocessing of adjacency matrices in GCN. We implement it in tensorflow so that
while computing the saliency maps, we are able to calculate the gradients in an end-to-end way.
We currently only support this for tensorflow backend.
Args:
num_of_nodes (int pair): The number of nodes in the graph.
"""
def __init__(self, num_of_nodes, **kwargs):
self.output_dims = (num_of_nodes, num_of_nodes)
super().__init__(**kwargs)
def build(self, input_shape):
super().build(input_shape)
def call(self, adj):
"""
The adjacency matrix preprocessing in tensorflow.
This function applies the matrix transformations on the adjacency matrix, which are required by GCN.
GCN requires that the input adjacency matrix should be symmetric, with self-loops, and normalized.
Args:
adj (Numpy array): the adjacency matrix to transform.
Returns:
The tensor of the transformed adjacency matrix.
"""
# Build a symmetric adjacency matrix.
adj_T = tf.transpose(adj)
adj = (
adj
+ tf.multiply(
adj_T, tf.where(adj_T > adj, tf.ones_like(adj), tf.zeros_like(adj))
)
- tf.multiply(
adj, tf.where(adj_T > adj, tf.ones_like(adj), tf.zeros_like(adj))
)
)
# Add self loops.
adj = adj + tf.linalg.diag(tf.ones(adj.shape[0]) - tf.diag_part(adj))
# Normalization
rowsum = tf.reduce_sum(adj, 1)
d_mat_inv_sqrt = tf.diag(tf.rsqrt(rowsum))
adj_normalized = tf.matmul(tf.matmul(d_mat_inv_sqrt, adj), d_mat_inv_sqrt)
return adj_normalized
class GraphPreProcessingLayer(Layer):
"""
This class implements the preprocessing of adjacency matrices in GCN. We implement it in tensorflow so that
while computing the saliency maps, we are able to calculate the gradients in an end-to-end way.
We currently only support this for tensorflow backend.
Args:
num_of_nodes (int pair): The number of nodes in the graph.
"""
def __init__(self, num_of_nodes, **kwargs):
self.output_dims = (num_of_nodes, num_of_nodes)
super().__init__(**kwargs)
def build(self, input_shape):
super().build(input_shape)
def call(self, adj):
"""
The adjacency matrix preprocessing in tensorflow.
This function applies the matrix transformations on the adjacency matrix, which are required by GCN.
GCN requires that the input adjacency matrix has self-loops and is normalized.
Args:
adj (Numpy array): the adjacency matrix to transform.
Returns:
The tensor of the transformed adjacency matrix.
"""
if K.is_sparse(adj): # isinstance(adj, tf.SparseTensor):
raise RuntimeError(
"TensorFlow adjacency matrix normalization not implemented for sparse matrices."
)
else:
# Add self loops.
adj = adj + tf.linalg.diag(tf.ones(adj.shape[0]) - tf.linalg.diag_part(adj))
# Normalization
rowsum = tf.reduce_sum(adj, 1)
d_mat_inv_sqrt = tf.linalg.diag(tf.math.rsqrt(rowsum))
adj_normalized = tf.matmul(tf.matmul(d_mat_inv_sqrt, adj), d_mat_inv_sqrt)
return adj_normalized
| 4,424 | 35.270492 | 112 | py |
stellargraph | stellargraph-master/stellargraph/layer/watch_your_step.py | # -*- coding: utf-8 -*-
#
# Copyright 2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.keras.layers import Layer, Embedding, Input, Lambda, Concatenate, Dense
from tensorflow.keras import backend as K
from tensorflow.keras import initializers, constraints, regularizers
import numpy as np
import warnings
from ..mapper.adjacency_generators import AdjacencyPowerGenerator
from ..core.validation import require_integer_in_range
from .misc import deprecated_model_function
class AttentiveWalk(Layer):
"""
This implements the graph attention as in Watch Your Step: Learning Node Embeddings via Graph Attention
https://arxiv.org/pdf/1710.09599.pdf.
Args:
walk_length (int): the length of the random walks. Equivalent to the number of adjacency powers used. Defaults
to `10` as this value was found to perform well by the authors of the paper.
attention_initializer (str or func, optional): The initialiser to use for the attention weights.
attention_regularizer (str or func, optional): The regulariser to use for the attention weights.
attention_constraint (str or func, optional): The constraint to use for the attention weights.
input_dim (tuple of int, optional): The shape of the input to the layer.
"""
def __init__(
self,
walk_length=10,
attention_initializer="glorot_uniform",
attention_regularizer=None,
attention_constraint=None,
input_dim=None,
**kwargs,
):
if "input_shape" not in kwargs and input_dim is not None:
kwargs["input_shape"] = input_dim
self.walk_length = walk_length
self.attention_initializer = initializers.get(attention_initializer)
self.attention_regularizer = regularizers.get(attention_regularizer)
self.attention_constraint = constraints.get(attention_constraint)
super().__init__(**kwargs)
def get_config(self):
config = {
"walk_length": self.walk_length,
"attention_initializer": initializers.serialize(self.attention_initializer),
"attention_regularizer": regularizers.serialize(self.attention_regularizer),
"attention_constraint": constraints.serialize(self.attention_constraint),
}
base_config = super().get_config()
return {**base_config, **config}
def compute_output_shape(self, input_shapes):
return (input_shapes[0][-1],)
compute_output_shape.__doc__ = Layer.compute_output_shape.__doc__
def build(self, input_shapes):
self.attention_weights = self.add_weight(
shape=(self.walk_length,),
initializer=self.attention_initializer,
name="attention_weights",
regularizer=self.attention_regularizer,
constraint=self.attention_constraint,
)
self.built = True
build.__doc__ = Layer.build.__doc__
def call(self, partial_powers):
"""
Applies the layer and calculates the expected random walks.
Args:
partial_powers: num_rows rows of the first num_powers powers of adjacency matrix with shape
(num_rows, num_powers, num_nodes)
Returns:
Tensor that represents the expected random walks starting from nodes corresponding to the input rows of
shape (num_rows, num_nodes)
"""
attention = K.softmax(self.attention_weights)
expected_walk = tf.einsum("ijk,j->ik", partial_powers, attention)
return expected_walk
class WatchYourStep:
"""
Implementation of the node embeddings as in Watch Your Step: Learning Node Embeddings via Graph Attention
https://arxiv.org/pdf/1710.09599.pdf.
This model requires specification of the number of random walks starting from each node, and the embedding dimension
to use for the node embeddings.
.. seealso::
Example using Watch Your Step: `unsupervised representation learning <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/watch-your-step-embeddings.html>`__
Appropriate data generator: :class:`.AdjacencyPowerGenerator`.
Appropriate loss function: :func:`.graph_log_likelihood`.
Args:
generator (AdjacencyPowerGenerator): the generator
num_walks (int): the number of random walks starting at each node to use when calculating the expected random
walks. Defaults to `80` as this value was found to perform well by the authors of the paper.
embedding dimension (int): the dimension to use for the node embeddings (must be an even number).
attention_initializer (str or func, optional): The initialiser to use for the attention weights.
attention_regularizer (str or func, optional): The regulariser to use for the attention weights.
attention_constraint (str or func, optional): The constraint to use for the attention weights.
embeddings_initializer (str or func, optional): The initialiser to use for the embeddings.
embeddings_regularizer (str or func, optional): The regulariser to use for the embeddings.
embeddings_constraint (str or func, optional): The constraint to use for the embeddings.
"""
def __init__(
self,
generator,
num_walks=80,
embedding_dimension=64,
attention_initializer="glorot_uniform",
attention_regularizer=None,
attention_constraint=None,
embeddings_initializer="uniform",
embeddings_regularizer=None,
embeddings_constraint=None,
):
if not isinstance(generator, AdjacencyPowerGenerator):
raise TypeError(
"generator should be an instance of AdjacencyPowerGenerator."
)
require_integer_in_range(num_walks, "num_walks", min_val=1)
require_integer_in_range(embedding_dimension, "embedding_dimension", min_val=2)
self.num_walks = num_walks
self.num_powers = generator.num_powers
self.n_nodes = int(generator.Aadj_T.shape[0])
if embedding_dimension % 2 != 0:
warnings.warn(
f"embedding_dimension: expected even number, found odd number ({embedding_dimension}). It will be rounded down to {embedding_dimension - 1}.",
stacklevel=2,
)
embedding_dimension -= 1
self.embedding_dimension = embedding_dimension
self._left_embedding = Embedding(
self.n_nodes,
int(self.embedding_dimension / 2),
input_length=None,
embeddings_initializer=embeddings_initializer,
embeddings_regularizer=embeddings_regularizer,
embeddings_constraint=embeddings_constraint,
)
self._right_embedding = Dense(
self.n_nodes,
use_bias=False,
kernel_initializer=embeddings_initializer,
kernel_regularizer=embeddings_regularizer,
kernel_constraint=embeddings_constraint,
)
self._attentive_walk = AttentiveWalk(
walk_length=self.num_powers,
attention_constraint=attention_constraint,
attention_regularizer=attention_regularizer,
attention_initializer=attention_initializer,
)
def embeddings(self):
"""
This function returns the embeddings from a model with Watch Your Step embeddings.
Returns:
embeddings (np.array): a numpy array of the model's embeddings.
"""
embeddings = np.hstack(
[
self._left_embedding.embeddings.numpy(),
self._right_embedding.kernel.numpy().transpose(),
]
)
return embeddings
def in_out_tensors(self):
"""
This function builds the layers for a keras model.
returns:
A tuple of (inputs, outputs) to use with a keras model.
"""
input_rows = Input(batch_shape=(None,), name="row_node_ids", dtype="int64")
input_powers = Input(batch_shape=(None, self.num_powers, self.n_nodes))
vectors_left = self._left_embedding(input_rows)
# all right embeddings are used in every batch. to avoid unnecessary lookups the right embeddings are stored
# in a dense layer to enable efficient dot product between the left vectors in the current batch and all right
# vectors
outer_product = self._right_embedding(vectors_left)
expected_walk = self.num_walks * self._attentive_walk(input_powers)
# layer to add batch dimension of 1 to output
expander = Lambda(lambda x: K.expand_dims(x, axis=1))
output = Concatenate(axis=1)([expander(expected_walk), expander(outer_product)])
return [input_rows, input_powers], output
build = deprecated_model_function(in_out_tensors, "build")
| 9,442 | 38.676471 | 175 | py |
stellargraph | stellargraph-master/stellargraph/layer/attri2vec.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
attri2vec
"""
__all__ = ["Attri2Vec"]
from tensorflow.keras import Input
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Dense, Lambda, Reshape, Embedding
import warnings
from .misc import deprecated_model_function
from ..mapper import Attri2VecLinkGenerator, Attri2VecNodeGenerator
def _require_without_generator(value, name):
if value is not None:
return value
else:
raise ValueError(
f"{name}: expected a value for 'input_dim', 'node_num' and 'multiplicity' when "
f"'generator' is not provided, found {name}=None."
)
class Attri2Vec:
"""
Implementation of the attri2vec algorithm of Zhang et al. with Keras layers.
see: https://arxiv.org/abs/1901.04095.
The model minimally requires specification of the layer sizes as a list of int
corresponding to the feature dimensions for each hidden layer and a generator object.
.. seealso::
Examples using Attri2Vec:
- `node classification <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/attri2vec-node-classification.html>`__
- `link prediction <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/attri2vec-link-prediction.html>`__
- `unsupervised representation learning <https://stellargraph.readthedocs.io/en/stable/demos/embeddings/attri2vec-embeddings.html>`__
- `comparison of link prediction algorithms <https://stellargraph.readthedocs.io/en/stable/demos/link-prediction/homogeneous-comparison-link-prediction.html>`__
Appropriate data generators: :class:`.Attri2VecNodeGenerator`, :class:`.Attri2VecLinkGenerator`.
Args:
layer_sizes (list): Hidden feature dimensions for each layer.
generator (Sequence): A NodeSequence or LinkSequence.
bias (bool): If True a bias vector is learnt for each layer in the attri2vec model, default to False.
activation (str): The activation function of each layer in the attri2vec model, which takes values from ``linear``, ``relu`` and ``sigmoid`` (default).
normalize ("l2" or None): The normalization used after each layer, default to None.
input_dim (int, optional): The dimensions of the node features used as input to the model.
node_num (int, optional): The number of nodes in the given graph.
multiplicity (int, optional): The number of nodes to process at a time. This is 1 for a node
inference and 2 for link inference (currently no others are supported).
.. note::
The values for ``input_dim``, ``node_num``, and ``multiplicity`` are obtained from the
provided ``generator`` by default. The additional keyword arguments for these parameters
provide an alternative way to specify them if a generator cannot be supplied.
"""
def __init__(
self,
layer_sizes,
generator=None,
bias=False,
activation="sigmoid",
normalize=None,
input_dim=None,
node_num=None,
multiplicity=None,
):
if activation == "linear" or activation == "relu" or activation == "sigmoid":
self.activation = activation
else:
raise ValueError(
"Activation should be either 'linear', 'relu' or 'sigmoid'; received '{}'".format(
activation
)
)
if normalize == "l2":
self._normalization = Lambda(lambda x: K.l2_normalize(x, axis=-1))
elif normalize is None:
self._normalization = Lambda(lambda x: x)
else:
raise ValueError(
"Normalization should be either 'l2' or None; received '{}'".format(
normalize
)
)
# Get the model parameters from the generator or the keyword arguments
if generator is not None:
self._get_sizes_from_generator(generator)
else:
self.input_node_num = _require_without_generator(node_num, "node_num")
self.input_feature_size = _require_without_generator(input_dim, "input_dim")
self.multiplicity = _require_without_generator(multiplicity, "multiplicity")
# Model parameters
self.n_layers = len(layer_sizes)
self.bias = bias
# Feature dimensions for each layer
self.dims = [self.input_feature_size] + layer_sizes
# store the trainable layers
self._layers = [
Dense(layer_size, activation=self.activation, use_bias=self.bias)
for layer_size in layer_sizes
]
if self.multiplicity == 1:
self._output_embedding = None
else:
self._output_embedding = Embedding(
self.input_node_num,
layer_sizes[-1],
input_length=1,
name="output_embedding",
)
def _get_sizes_from_generator(self, generator):
"""
Sets node_num and input_feature_size from the generator.
Args:
generator: The supplied generator.
"""
if not isinstance(generator, (Attri2VecNodeGenerator, Attri2VecLinkGenerator)):
raise TypeError(
"Generator should be an instance of Attri2VecNodeGenerator or Attri2VecLinkGenerator"
)
self.multiplicity = generator.multiplicity
self.input_node_num = generator.graph.number_of_nodes()
feature_sizes = generator.graph.node_feature_sizes()
if len(feature_sizes) > 1:
raise RuntimeError(
"Attri2Vec called on graph with more than one node type."
)
self.input_feature_size = feature_sizes.popitem()[1]
def __call__(self, xin):
"""
Construct node representations from node attributes through deep neural network
Args:
xin (Keras Tensor): Batch input features
Returns:
Output tensor
"""
# Form Attri2Vec layers iteratively
h_layer = xin
for layer in self._layers:
h_layer = self._normalization(layer(h_layer))
return h_layer
def _node_model(self):
"""
Builds a Attri2Vec model for node representation prediction.
Returns:
tuple: ``(x_inp, x_out)`` where ``x_inp`` is a Keras input tensor
for the Attri2Vec model and ``x_out`` is the Keras tensor
for the Attri2Vec model output.
"""
# Create tensor inputs
x_inp = Input(shape=(self.input_feature_size,))
# Output from Attri2Vec model
x_out = self(x_inp)
return x_inp, x_out
def _link_model(self):
"""
Builds a Attri2Vec model for context node prediction.
Returns:
tuple: (x_inp, x_out) where ``x_inp`` is a list of Keras input tensors for (src, dst) nodes in the node pairs
and ``x_out`` is a list of output tensors for (src, dst) nodes in the node pairs
"""
# Expose input and output sockets of the model, for source node:
x_inp_src, x_out_src = self._node_model()
# Expose input and out sockets of the model, for target node:
x_inp_dst = Input(shape=(1,))
assert isinstance(self._output_embedding, Embedding)
x_out_dst = self._output_embedding(x_inp_dst)
x_out_dst = Reshape((self.dims[self.n_layers],))(x_out_dst)
x_inp = [x_inp_src, x_inp_dst]
x_out = [x_out_src, x_out_dst]
return x_inp, x_out
def in_out_tensors(self, multiplicity=None):
"""
Builds a Attri2Vec model for node or link/node pair prediction, depending on the generator used to construct
the model (whether it is a node or link/node pair generator).
Returns:
tuple: ``(x_inp, x_out)``, where ``x_inp`` is a list of Keras input tensors
for the specified Attri2Vec model (either node or link/node pair model) and ``x_out`` contains
model output tensor(s) of shape ``(batch_size, layer_sizes[-1])``
"""
if multiplicity is None:
multiplicity = self.multiplicity
if multiplicity == 1:
return self._node_model()
elif multiplicity == 2:
return self._link_model()
else:
raise RuntimeError(
"Currently only multiplicities of 1 and 2 are supported. Consider using node_model or "
"link_model method explicitly to build node or link prediction model, respectively."
)
def default_model(self, flatten_output=True):
warnings.warn(
"The .default_model() method is deprecated. Please use .in_out_tensors() method instead.",
DeprecationWarning,
stacklevel=2,
)
return self.in_out_tensors()
node_model = deprecated_model_function(_node_model, "node_model")
link_model = deprecated_model_function(_link_model, "link_model")
build = deprecated_model_function(in_out_tensors, "build")
| 9,722 | 36.832685 | 167 | py |
stellargraph | stellargraph-master/stellargraph/layer/appnp.py | # -*- coding: utf-8 -*-
#
# Copyright 2019-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tensorflow.keras.layers import Dense, Lambda, Dropout, Input, Layer, InputLayer
import tensorflow.keras.backend as K
from ..mapper import FullBatchGenerator, ClusterNodeGenerator
from .preprocessing_layer import GraphPreProcessingLayer
from .misc import SqueezedSparseConversion, deprecated_model_function, GatherIndices
class APPNPPropagationLayer(Layer):
"""
Implementation of Approximate Personalized Propagation of Neural Predictions (PPNP)
as in https://arxiv.org/abs/1810.05997.
Notes:
- The inputs are tensors with a batch dimension of 1:
Keras requires this batch dimension, and for full-batch methods
we only have a single "batch".
- There are two inputs required, the node features,
and the normalized graph Laplacian matrix
- This class assumes that the normalized Laplacian matrix is passed as
input to the Keras methods.
.. seealso:: :class:`.APPNP` combines several of these layers.
Args:
units (int): dimensionality of output feature vectors
final_layer (bool): Deprecated, use ``tf.gather`` or :class:`.GatherIndices`
teleport_probability: "probability" of returning to the starting node in the propagation step as described in
the paper (alpha in the paper)
input_dim (int, optional): the size of the input shape, if known.
kwargs: any additional arguments to pass to :class:`tensorflow.keras.layers.Layer`
"""
def __init__(
self,
units,
teleport_probability=0.1,
final_layer=None,
input_dim=None,
**kwargs,
):
if "input_shape" not in kwargs and input_dim is not None:
kwargs["input_shape"] = (input_dim,)
super().__init__(**kwargs)
self.units = units
self.teleport_probability = teleport_probability
if final_layer is not None:
raise ValueError(
"'final_layer' is not longer supported, use 'tf.gather' or 'GatherIndices' separately"
)
def get_config(self):
"""
Gets class configuration for Keras serialization.
Used by Keras model serialization.
Returns:
A dictionary that contains the config of the layer
"""
config = {
"units": self.units,
"teleport_probability": self.teleport_probability,
}
base_config = super().get_config()
return {**base_config, **config}
def compute_output_shape(self, input_shapes):
"""
Computes the output shape of the layer.
Assumes the following inputs:
Args:
input_shapes (tuple of int)
Shape tuples can include None for free dimensions, instead of an integer.
Returns:
An input shape tuple.
"""
feature_shape, *As_shapes = input_shapes
batch_dim = feature_shape[0]
out_dim = feature_shape[1]
return batch_dim, out_dim, self.units
def build(self, input_shapes):
"""
Builds the layer
Args:
input_shapes (list of int): shapes of the layer's inputs (node features and adjacency matrix)
"""
self.built = True
def call(self, inputs):
"""
Applies the layer.
Args:
inputs (list): a list of 3 input tensors that includes
propagated node features (size 1 x N x F),
node features (size 1 x N x F),
graph adjacency matrix (size N x N),
where N is the number of nodes in the graph, and
F is the dimensionality of node features.
Returns:
Keras Tensor that represents the output of the layer.
"""
propagated_features, features, *As = inputs
batch_dim, n_nodes, _ = K.int_shape(features)
if batch_dim != 1:
raise ValueError(
"Currently full-batch methods only support a batch dimension of one"
)
# Propagate the node features
A = As[0]
if K.is_sparse(A):
propagated_features = K.squeeze(propagated_features, 0)
propagated_features = K.dot(A, propagated_features)
propagated_features = K.expand_dims(propagated_features, 0)
else:
propagated_features = K.batch_dot(A, propagated_features)
output = (1 - self.teleport_probability) * propagated_features
output += self.teleport_probability * features
return output
class APPNP:
"""
Implementation of Approximate Personalized Propagation of Neural Predictions (APPNP)
as in https://arxiv.org/abs/1810.05997.
The model minimally requires specification of the fully connected layer sizes as a list of int
corresponding to the feature dimensions for each hidden layer,
activation functions for each hidden layers, and a generator object.
To use this class as a Keras model, the features and preprocessed adjacency matrix
should be supplied using:
- the :class:`.FullBatchNodeGenerator` class for node inference
- the :class:`.ClusterNodeGenerator` class for scalable/inductive node inference using the Cluster-GCN training procedure (https://arxiv.org/abs/1905.07953)
- the :class:`.FullBatchLinkGenerator` class for link inference
To have the appropriate preprocessing the generator object should be instantiated
with the `method='gcn'` argument.
Example:
Building an APPNP node model::
generator = FullBatchNodeGenerator(G, method="gcn")
ppnp = APPNP(
layer_sizes=[64, 64, 1],
activations=['relu', 'relu', 'relu'],
generator=generator,
dropout=0.5
)
x_in, x_out = ppnp.in_out_tensors()
Notes:
- The inputs are tensors with a batch dimension of 1. These are provided by the \
:class:`.FullBatchNodeGenerator` object.
- This assumes that the normalized Laplacian matrix is provided as input to
Keras methods. When using the :class:`.FullBatchNodeGenerator` specify the
``method='gcn'`` argument to do this preprocessing.
- The nodes provided to the :meth:`.FullBatchNodeGenerator.flow` method are
used by the final layer to select the predictions for those nodes in order.
However, the intermediate layers before the final layer order the nodes
in the same way as the adjacency matrix.
- The size of the final fully connected layer must be equal to the number of classes to predict.
.. seealso::
Example using APPNP: `node classification <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/ppnp-node-classification.html>`__.
Appropriate data generators: :class:`.FullBatchNodeGenerator`, :class:`.FullBatchLinkGenerator`, :class:`.ClusterNodeGenerator`.
:class:`.APPNPPropagationLayer` is the base layer out of which an APPNP model is built.
Args:
layer_sizes (list of int): list of output sizes of fully connected layers in the stack
activations (list of str): list of activations applied to each fully connected layer's output
generator (FullBatchNodeGenerator): an instance of FullBatchNodeGenerator class constructed on the graph of interest
bias (bool): toggles an optional bias in fully connected layers
dropout (float): dropout rate applied to input features of each layer
kernel_regularizer (str): normalization applied to the kernels of fully connected layers
teleport_probability: "probability" of returning to the starting node in the propagation step as described in
the paper (alpha in the paper)
approx_iter: number of iterations to approximate PPNP as described in the paper (K in the paper)
"""
def __init__(
self,
layer_sizes,
generator,
activations,
bias=True,
dropout=0.0,
teleport_probability=0.1,
kernel_regularizer=None,
approx_iter=10,
):
if not isinstance(generator, (FullBatchGenerator, ClusterNodeGenerator)):
raise TypeError(
f"Generator should be a instance of FullBatchNodeGenerator, "
f"FullBatchLinkGenerator or ClusterNodeGenerator"
)
if not len(layer_sizes) == len(activations):
raise ValueError(
"The number of layers should equal the number of activations"
)
if not isinstance(approx_iter, int) or approx_iter <= 0:
raise ValueError("approx_iter should be a positive integer")
if (teleport_probability > 1.0) or (teleport_probability < 0.0):
raise ValueError(
"teleport_probability should be between 0 and 1 (inclusive)"
)
self.layer_sizes = layer_sizes
self.teleport_probability = teleport_probability
self.activations = activations
self.bias = bias
self.dropout = dropout
self.kernel_regularizer = kernel_regularizer
self.support = 1
self.approx_iter = approx_iter
# Copy required information from generator
self.method = generator.method
self.multiplicity = generator.multiplicity
self.n_features = generator.features.shape[1]
self.use_sparse = generator.use_sparse
if isinstance(generator, FullBatchGenerator):
self.n_nodes = generator.features.shape[0]
else:
self.n_nodes = None
if self.method == "none":
self.graph_norm_layer = GraphPreProcessingLayer(num_of_nodes=self.n_nodes)
self._feature_layers = []
# Initialize a stack of fully connected layers
n_layers = len(self.layer_sizes)
for ii in range(n_layers):
l = self.layer_sizes[ii]
a = self.activations[ii]
self._feature_layers.append(Dropout(self.dropout))
self._feature_layers.append(
Dense(
l,
activation=a,
use_bias=self.bias,
kernel_regularizer=self.kernel_regularizer,
)
)
self._propagate_layers = []
feature_dim = self.layer_sizes[-1]
for ii in range(approx_iter):
self._propagate_layers.append(Dropout(self.dropout))
self._propagate_layers.append(
APPNPPropagationLayer(
feature_dim, teleport_probability=self.teleport_probability,
)
)
def _run(self, x, feature_layers):
x_in, out_indices, *As = x
# Currently we require the batch dimension to be one for full-batch methods
batch_dim, n_nodes, _ = K.int_shape(x_in)
if batch_dim != 1:
raise ValueError(
"Currently full-batch methods only support a batch dimension of one"
)
# Convert input indices & values to a sparse matrix
if self.use_sparse:
A_indices, A_values = As
Ainput = [
SqueezedSparseConversion(
shape=(n_nodes, n_nodes), dtype=A_values.dtype
)([A_indices, A_values])
]
# Otherwise, create dense matrix from input tensor
else:
Ainput = As
# TODO: Support multiple matrices?
if len(Ainput) != 1:
raise NotImplementedError(
"The APPNP method currently only accepts a single matrix"
)
h_layer = x_in
for layer in feature_layers:
h_layer = layer(h_layer)
feature_layer = h_layer
for layer in self._propagate_layers:
if isinstance(layer, APPNPPropagationLayer):
h_layer = layer([h_layer, feature_layer] + Ainput)
else:
# For other (non-graph) layers only supply the input tensor
h_layer = layer(h_layer)
# only return data for the requested nodes
h_layer = GatherIndices(batch_dims=1)([h_layer, out_indices])
return h_layer
def __call__(self, x):
"""
Apply APPNP to the inputs.
The input tensors are expected to be a list of the following:
[
Node features shape (1, N, F),
Adjacency indices (1, E, 2),
Adjacency values (1, E),
Output indices (1, O)
]
where N is the number of nodes, F the number of input features,
E is the number of edges, O the number of output nodes.
Args:
x (Tensor): input tensors
Returns:
Output tensor
"""
return self._run(x, feature_layers=self._feature_layers)
def _tensors(self, multiplicity, feature_layers):
# Inputs for features
x_t = Input(batch_shape=(1, self.n_nodes, self.n_features))
# If not specified use multiplicity from instanciation
if multiplicity is None:
multiplicity = self.multiplicity
# Indices to gather for model output
if multiplicity == 1:
out_indices_t = Input(batch_shape=(1, None), dtype="int32")
else:
out_indices_t = Input(batch_shape=(1, None, multiplicity), dtype="int32")
# Create inputs for sparse or dense matrices
if self.use_sparse:
# Placeholders for the sparse adjacency matrix
A_indices_t = Input(batch_shape=(1, None, 2), dtype="int64")
A_values_t = Input(batch_shape=(1, None))
A_placeholders = [A_indices_t, A_values_t]
else:
# Placeholders for the dense adjacency matrix
A_m = Input(batch_shape=(1, self.n_nodes, self.n_nodes))
A_placeholders = [A_m]
# TODO: Support multiple matrices
x_inp = [x_t, out_indices_t] + A_placeholders
x_out = self._run(x_inp, feature_layers=feature_layers)
return x_inp, x_out
def in_out_tensors(self, multiplicity=None):
"""
Builds an APPNP model for node or link prediction
Returns:
tuple: ``(x_inp, x_out)``, where ``x_inp`` is a list of Keras/TensorFlow
input tensors for the model and ``x_out`` is a tensor of the model output.
"""
x_inp, x_out = self._tensors(
multiplicity=multiplicity, feature_layers=self._feature_layers
)
# Flatten output by removing singleton batch dimension
if x_out.shape[0] == 1:
self.x_out_flat = Lambda(lambda x: K.squeeze(x, 0))(x_out)
else:
self.x_out_flat = x_out
return x_inp, x_out
def _link_model(self):
if self.multiplicity != 2:
warnings.warn(
"Link model requested but a generator not supporting links was supplied."
)
return self.in_out_tensors(multiplicity=2)
def _node_model(self):
if self.multiplicity != 1:
warnings.warn(
"Node model requested but a generator not supporting nodes was supplied."
)
return self.in_out_tensors(multiplicity=1)
def propagate_model(self, base_model):
"""
Propagates a trained model using personalised PageRank.
Args:
base_model (keras Model): trained model with node features as input, predicted classes as output
Returns:
tuple: ``(x_inp, x_out)``, where ``x_inp`` is a list of two Keras input tensors
for the APPNP model (containing node features and graph adjacency),
and ``x_out`` is a Keras tensor for the APPNP model output.
"""
if self.multiplicity != 1:
raise RuntimeError(
"APPNP does not currently support propagating a link model"
)
feature_layers = [
layer for layer in base_model.layers if not isinstance(layer, InputLayer)
]
return self._tensors(multiplicity=1, feature_layers=feature_layers)
node_model = deprecated_model_function(_node_model, "node_model")
link_model = deprecated_model_function(_link_model, "link_model")
build = deprecated_model_function(in_out_tensors, "build")
| 16,999 | 36.037037 | 160 | py |
stellargraph | stellargraph-master/stellargraph/layer/ppnp.py | # -*- coding: utf-8 -*-
#
# Copyright 2019-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tensorflow.keras.layers import Dense, Lambda, Layer, Dropout, Input
import tensorflow.keras.backend as K
import tensorflow as tf
import numpy as np
from .misc import SqueezedSparseConversion, GatherIndices
from ..mapper import FullBatchNodeGenerator
from .misc import deprecated_model_function
from .preprocessing_layer import GraphPreProcessingLayer
class PPNPPropagationLayer(Layer):
"""
Implementation of Personalized Propagation of Neural Predictions (PPNP)
as in https://arxiv.org/abs/1810.05997.
Notes:
- The inputs are tensors with a batch dimension of 1:
Keras requires this batch dimension, and for full-batch methods
we only have a single "batch".
- There are two inputs required, the node features,
and the graph personalized page rank matrix
- This class assumes that the personalized page rank matrix (specified in paper) matrix is passed as
input to the Keras methods.
.. seealso:: :class:`.PPNP` combines several of these layers.
Args:
units (int): dimensionality of output feature vectors
final_layer (bool): Deprecated, use ``tf.gather`` or :class:`.GatherIndices`
input_dim (int, optional): the size of the input shape, if known.
kwargs: any additional arguments to pass to :class:`tensorflow.keras.layers.Layer`
"""
def __init__(self, units, final_layer=None, input_dim=None, **kwargs):
if "input_shape" not in kwargs and input_dim is not None:
kwargs["input_shape"] = (input_dim,)
super().__init__(**kwargs)
self.units = units
if final_layer is not None:
raise ValueError(
"'final_layer' is not longer supported, use 'tf.gather' or 'GatherIndices' separately"
)
def get_config(self):
"""
Gets class configuration for Keras serialization.
Used by Keras model serialization.
Returns:
A dictionary that contains the config of the layer
"""
config = {"units": self.units}
base_config = super().get_config()
return {**base_config, **config}
def compute_output_shape(self, input_shapes):
"""
Computes the output shape of the layer.
Assumes the following inputs:
Args:
input_shapes (tuple of int)
Shape tuples can include None for free dimensions, instead of an integer.
Returns:
An input shape tuple.
"""
feature_shape, *As_shapes = input_shapes
batch_dim = feature_shape[0]
out_dim = feature_shape[1]
return batch_dim, out_dim, self.units
def build(self, input_shapes):
"""
Builds the layer
Args:
input_shapes (list of int): shapes of the layer's inputs (node features and adjacency matrix)
"""
self.built = True
def call(self, inputs):
"""
Applies the layer.
Args:
inputs (list): a list of 3 input tensors that includes
node features (size 1 x N x F),
graph personalized page rank matrix (size N x N),
where N is the number of nodes in the graph, and
F is the dimensionality of node features.
Returns:
Keras Tensor that represents the output of the layer.
"""
features, *As = inputs
batch_dim, n_nodes, _ = K.int_shape(features)
if batch_dim != 1:
raise ValueError(
"Currently full-batch methods only support a batch dimension of one"
)
# Remove singleton batch dimension
features = K.squeeze(features, 0)
# Propagate the features
A = As[0]
output = K.dot(A, features)
# Add batch dimension back if we removed it
if batch_dim == 1:
output = K.expand_dims(output, 0)
return output
class PPNP:
"""
Implementation of Personalized Propagation of Neural Predictions (PPNP)
as in https://arxiv.org/abs/1810.05997.
The model minimally requires specification of the fully connected layer sizes as a list of int
corresponding to the feature dimensions for each hidden layer,
activation functions for each hidden layers, and a generator object.
To use this class as a Keras model, the features and preprocessed adjacency matrix
should be supplied using the :class:`.FullBatchNodeGenerator` class. To have the appropriate
preprocessing the generator object should be instantiated as follows::
generator = FullBatchNodeGenerator(G, method="ppnp")
Notes:
- The inputs are tensors with a batch dimension of 1. These are provided by the \
:class:`.FullBatchNodeGenerator` object.
- This assumes that the personalized page rank matrix is provided as input to
Keras methods. When using the :class:`.FullBatchNodeGenerator` specify the
``method='ppnp'`` argument to do this preprocessing.
- ``method='ppnp'`` requires that ``use_sparse=False`` and generates a dense personalized page rank matrix
- The nodes provided to the :meth:`FullBatchNodeGenerator.flow` method are
used by the final layer to select the predictions for those nodes in order.
However, the intermediate layers before the final layer order the nodes
in the same way as the adjacency matrix.
- The size of the final fully connected layer must be equal to the number of classes to predict.
.. seealso::
Example using PPNP: `node classification <https://stellargraph.readthedocs.io/en/stable/demos/node-classification/ppnp-node-classification.html>`__.
Appropriate data generators: :class:`.FullBatchNodeGenerator`, :class:`.FullBatchLinkGenerator`.
:class:`.PPNPPropagationLayer` is the base layer out of which a PPNP model is built.
Args:
layer_sizes (list of int): list of output sizes of fully connected layers in the stack
activations (list of str): list of activations applied to each fully connected layer's output
generator (FullBatchNodeGenerator): an instance of FullBatchNodeGenerator class constructed on the graph of interest
bias (bool): toggles an optional bias in fully connected layers
dropout (float): dropout rate applied to input features of each layer
kernel_regularizer (str): normalization applied to the kernels of fully connected layers
"""
def __init__(
self,
layer_sizes,
generator,
activations,
bias=True,
dropout=0.0,
kernel_regularizer=None,
):
if not isinstance(generator, FullBatchNodeGenerator):
raise TypeError("Generator should be a instance of FullBatchNodeGenerator")
if not len(layer_sizes) == len(activations):
raise ValueError(
"The number of layers should equal the number of activations"
)
self.layer_sizes = layer_sizes
self.activations = activations
self.bias = bias
self.dropout = dropout
self.kernel_regularizer = kernel_regularizer
self.support = 1
# Copy required information from generator
self.method = generator.method
self.multiplicity = generator.multiplicity
self.n_nodes = generator.features.shape[0]
self.n_features = generator.features.shape[1]
# Check if the generator is producing a sparse matrix
self.use_sparse = generator.use_sparse
# Initialize a stack of fully connected layers
n_layers = len(self.layer_sizes)
self._layers = []
for ii in range(n_layers):
l = self.layer_sizes[ii]
a = self.activations[ii]
self._layers.append(Dropout(self.dropout))
self._layers.append(
Dense(
l,
activation=a,
use_bias=self.bias,
kernel_regularizer=self.kernel_regularizer,
)
)
self._layers.append(Dropout(self.dropout))
self._layers.append(PPNPPropagationLayer(self.layer_sizes[-1]))
def __call__(self, x):
"""
Apply PPNP to the inputs.
The input tensors are expected to be a list of the following:
[
Node features shape (1, N, F),
Adjacency indices (1, E, 2),
Adjacency values (1, E),
Output indices (1, O)
]
where N is the number of nodes, F the number of input features,
E is the number of edges, O the number of output nodes.
Args:
x (Tensor): input tensors
Returns:
Output tensor
"""
x_in, out_indices, *As = x
# Currently we require the batch dimension to be one for full-batch methods
batch_dim, n_nodes, _ = K.int_shape(x_in)
if batch_dim != 1:
raise ValueError(
"Currently full-batch methods only support a batch dimension of one"
)
# Convert input indices & values to a sparse matrix
if self.use_sparse:
A_indices, A_values = As
Ainput = [
SqueezedSparseConversion(
shape=(n_nodes, n_nodes), dtype=A_values.dtype
)([A_indices, A_values])
]
# Otherwise, create dense matrix from input tensor
else:
Ainput = [Lambda(lambda A: K.squeeze(A, 0))(A) for A in As]
# TODO: Support multiple matrices?
if len(Ainput) != 1:
raise NotImplementedError(
"The APPNP method currently only accepts a single matrix"
)
h_layer = x_in
for layer in self._layers:
if isinstance(layer, PPNPPropagationLayer):
h_layer = layer([h_layer] + Ainput)
else:
h_layer = layer(h_layer)
# only return data for the requested nodes
h_layer = GatherIndices(batch_dims=1)([h_layer, out_indices])
return h_layer
def in_out_tensors(self, multiplicity=None):
"""
Builds a PPNP model for node or link prediction
Returns:
tuple: ``(x_inp, x_out)``, where ``x_inp`` is a list of Keras/TensorFlow
input tensors for the model and ``x_out`` is a tensor of the model output.
"""
# Inputs for features
x_t = Input(batch_shape=(1, self.n_nodes, self.n_features))
# If not specified use multiplicity from instanciation
if multiplicity is None:
multiplicity = self.multiplicity
# Indices to gather for model output
if multiplicity == 1:
out_indices_t = Input(batch_shape=(1, None), dtype="int32")
else:
out_indices_t = Input(batch_shape=(1, None, multiplicity), dtype="int32")
# Create inputs for sparse or dense matrices
if self.use_sparse:
# Placeholders for the sparse adjacency matrix
A_indices_t = Input(batch_shape=(1, None, 2), dtype="int64")
A_values_t = Input(batch_shape=(1, None))
A_placeholders = [A_indices_t, A_values_t]
else:
# Placeholders for the dense adjacency matrix
A_m = Input(batch_shape=(1, self.n_nodes, self.n_nodes))
A_placeholders = [A_m]
# TODO: Support multiple matrices
x_inp = [x_t, out_indices_t] + A_placeholders
x_out = self(x_inp)
# Flatten output by removing singleton batch dimension
if x_out.shape[0] == 1:
self.x_out_flat = Lambda(lambda x: K.squeeze(x, 0))(x_out)
else:
self.x_out_flat = x_out
return x_inp, x_out
def _link_model(self):
if self.multiplicity != 2:
warnings.warn(
"Link model requested but a generator not supporting links was supplied."
)
return self.in_out_tensors(multiplicity=2)
def _node_model(self):
if self.multiplicity != 1:
warnings.warn(
"Node model requested but a generator not supporting nodes was supplied."
)
return self.in_out_tensors(multiplicity=1)
node_model = deprecated_model_function(_node_model, "node_model")
link_model = deprecated_model_function(_link_model, "link_model")
build = deprecated_model_function(in_out_tensors, "build")
| 13,177 | 34.712737 | 155 | py |
stellargraph | stellargraph-master/scripts/demo_indexing.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import argparse
import contextlib
import difflib
import enum
import glob
import itertools
import json
import nbformat
import os.path
import re
import subprocess
import sys
import textwrap
HTML_INDENT = 2
LINK_DEFAULT_TEXT = "demo"
TRUE_TEXT = "yes"
DOC_URL_BASE = "https://stellargraph.readthedocs.io/en/stable"
AUTOGENERATED_PROMPT = (
f"autogenerated by {__file__}, edit that file instead of this location"
)
DOCS_LINK_SEPARATOR = "\n<!-- DOCS LINKS -->\n"
class LinkKind(enum.Enum):
index = 1
notebook = 2
class HtmlBuilder:
def __init__(self, indent=None):
self.html = []
self.indent_amount = indent
self.indent_level = 0
self.add_count = 0
def add(self, data, one_line=False):
self.add_count += 1
if one_line:
self.html[-1] += data
else:
if self.indent_amount:
indent = " " * (self.indent_amount * self.indent_level)
data = indent + data
self.html.append(data)
@contextlib.contextmanager
def element(self, name, attrs={}, only_with_attrs=False):
"""Open (and automatically) close an HTML element"""
if only_with_attrs and not attrs:
yield
return
attrs_str = " ".join(f"{name}='{value}'" for name, value in attrs.items())
if attrs_str:
attrs_str = " " + attrs_str
self.add(f"<{name}{attrs_str}>")
self.indent_level += 1
initial_len = len(self.html)
try:
yield
finally:
self.indent_level -= 1
closing = f"</{name}>"
self.add(f"</{name}>", one_line=len(self.html) == initial_len)
def string(self):
sep = "" if self.indent_amount is None else "\n"
return sep.join(self.html)
class T:
def __init__(self, text=None, link=None, details=None, kind=LinkKind.notebook):
if text is None:
if link is None:
raise ValueError("must specify at least one of 'text' and 'link'")
text = LINK_DEFAULT_TEXT
self.text = text
self.link = link
self.details = details
self.kind = kind
assert isinstance(kind, LinkKind)
@staticmethod
def textify(inp):
if not inp:
return None
elif inp is True:
return T(TRUE_TEXT)
elif isinstance(inp, list):
return [T.textify(x) for x in inp]
elif isinstance(inp, T):
return inp
return T(inp)
class Format(abc.ABC):
def __init__(self, file_name, separator):
self.file_name = file_name
self.separator = separator
@abc.abstractmethod
def render(self, headings, algorithms):
...
@abc.abstractmethod
def index_suffix(self, checking):
...
@abc.abstractproperty
def notebook_suffix(self, checking):
...
def link(self, t, checking=False):
if t.link is None:
return None
if t.kind is LinkKind.index:
suffix = self.index_suffix(checking)
elif t.kind is LinkKind.notebook:
suffix = self.notebook_suffix(checking)
return f"{t.link}{suffix}"
class Html(Format):
def index_suffix(self, checking):
return "/README.md"
def notebook_suffix(self, checking):
return ".ipynb"
def render(self, headings, algorithms):
builder = HtmlBuilder(indent=2)
builder.add(f"<!-- {AUTOGENERATED_PROMPT} -->")
with builder.element("table"):
with builder.element("tr"):
for heading in headings:
with builder.element("th"):
builder.add(self._render_t(heading), one_line=True)
for algorithm in algorithms:
with builder.element("tr"):
for heading in headings:
with builder.element("td"):
self._render_cell(builder, algorithm.columns[heading])
return builder.string()
def _render_t(self, t):
html = HtmlBuilder()
title_attr = {"title": t.details} if t.details else {}
link = self.link(t)
href_attr = {"href": link} if link else {}
# add a span if we need details (hover) text, and an link if there's a link
with html.element("span", title_attr, only_with_attrs=True):
with html.element("a", href_attr, only_with_attrs=True):
html.add(t.text)
return html.string()
def _render_cell(self, html, cell, one_line=True):
if not cell:
return
if isinstance(cell, list):
for contents in cell:
# multiple elements? space them out
self._render_cell(html, contents, one_line=False)
else:
html.add(self._render_t(cell), one_line=one_line)
class Rst(Format):
def index_suffix(self, checking):
if checking:
# when checking links, we need to write out the exact filename
return "/index.rst"
return "/index"
def notebook_suffix(self, checking):
if checking:
return ".nblink"
return ""
def render(self, headings, algorithms):
result = [".. list-table::", " :header-rows: 1", ""]
new_row = " *"
new_item = " -"
result.append(new_row)
for heading in headings:
rst = self._render_t(heading)
result.append(f"{new_item} {rst}")
for algorithm in algorithms:
result.append(new_row)
for heading in headings:
rst = self._render_cell(algorithm.columns[heading])
if rst:
result.append(f"{new_item} {rst}")
else:
result.append(new_item)
return "\n".join(result)
def _render_t(self, t):
link = self.link(t)
# RST doesn't support the title, directly, but CSS gives us a bit more control to display
# longer column headings
text = t.details if t.details else t.text
if link:
return f":any:`{text} <{link}>`"
return text
def _render_cell(self, cell):
if not cell:
return ""
if isinstance(cell, list):
return ", ".join(self._render_cell(contents) for contents in cell)
else:
return self._render_t(cell)
def find_links(element, fmt):
# traverse over the collection(s) to find all the links in T's
if element is None:
pass
elif isinstance(element, T):
rendered_link = fmt.link(element, checking=True)
if rendered_link:
yield (element.link, rendered_link)
elif isinstance(element, list):
for sub in element:
yield from find_links(sub, fmt)
elif isinstance(element, Algorithm):
for sub in element.columns.values():
yield from find_links(sub, fmt)
else:
raise ValueError(f"unsupported element in link finding {element!r}")
def link_is_valid_relative(link, base_dir):
if link is None:
return True
if os.path.isabs(link):
# absolute links aren't allowed
return False
if link.lower().startswith("http"):
# github (and other website) links aren't allowed either
return False
return os.path.exists(os.path.join(base_dir, link))
# Columns
def index_link(*args, **kwargs):
return T(*args, **kwargs, kind=LinkKind.index)
ALGORITHM = T("Algorithm")
HETEROGENEOUS = T("Heter.", details="Heterogeneous")
DIRECTED = T("Dir.", details="Directed")
WEIGHTED = T("EW", details="Edge weights")
TEMPORAL = T("T", details="Time-varying, temporal")
FEATURES = T("NF", details="Node features")
NC = index_link("NC", link="node-classification", details="Node classification")
LP = index_link("LP", link="link-prediction", details="Link prediction")
RL = index_link("Unsup.", link="embeddings", details="Unsupervised")
INDUCTIVE = T("Ind.", details="Inductive")
GC = index_link("GC", link="graph-classification", details="Graph classification")
COLUMNS = [
ALGORITHM,
HETEROGENEOUS,
DIRECTED,
WEIGHTED,
TEMPORAL,
FEATURES,
NC,
LP,
RL,
INDUCTIVE,
GC,
]
class Algorithm:
def __init__(
self,
algorithm,
*,
heterogeneous=None,
directed=None,
weighted=None,
temporal=None,
features=None,
nc=None,
interpretability_nc=None,
lp=None,
rl=None,
inductive=None,
gc=None,
):
columns = {
ALGORITHM: algorithm,
HETEROGENEOUS: heterogeneous,
DIRECTED: directed,
WEIGHTED: weighted,
TEMPORAL: temporal,
FEATURES: features,
NC: nc,
LP: lp,
RL: rl,
INDUCTIVE: inductive,
GC: gc,
}
self.columns = {name: T.textify(value) for name, value in columns.items()}
HETEROGENEOUS_EDGE = T("yes, edges", details="multiple edges types")
def rl_us(link=None):
return T("US", link=link, details="UnsupervisedSampler")
def rl_dgi(link="embeddings/deep-graph-infomax-embeddings"):
return T("DGI", link=link, details="DeepGraphInfomax")
def via_rl(link=None):
return T("via unsup.", link=link, details="via embedding vectors",)
ALGORITHMS = [
Algorithm(
T("GCN", details="Graph Convolutional Network (GCN)"),
heterogeneous="see RGCN",
features=True,
weighted=True,
temporal="see T-GCN",
nc=T(link="node-classification/gcn-node-classification"),
interpretability_nc=T(link="interpretability/gcn-node-link-importance"),
lp=T(link="link-prediction/gcn-link-prediction"),
rl=[rl_us(), rl_dgi()],
inductive="via Cluster-GCN",
gc=T(link="graph-classification/gcn-supervised-graph-classification"),
),
Algorithm(
"Cluster-GCN",
features=True,
weighted=True,
nc=T(link="node-classification/cluster-gcn-node-classification"),
lp=True,
inductive=True,
rl=rl_dgi(),
),
Algorithm(
T("RGCN", details="Relational GCN (RGCN)"),
heterogeneous=HETEROGENEOUS_EDGE,
features=True,
weighted=True,
nc=T(link="node-classification/rgcn-node-classification"),
lp=True,
rl=rl_dgi(),
),
Algorithm(
T("T-GCN", details="Temporal GCN (T-GCN), implemented as GCN-LSTM"),
features="time series, sequence",
temporal="node features",
nc=T(link="time-series/gcn-lstm-time-series"),
),
Algorithm(
T("GAT", details="Graph ATtention Network (GAT)"),
features=True,
weighted=True,
nc=T(link="node-classification/gat-node-classification"),
interpretability_nc=T(link="interpretability/gat-node-link-importance"),
lp=True,
rl=[rl_us(), rl_dgi()],
inductive="via Cluster-GCN",
),
Algorithm(
T("SGC", details="Simplified Graph Convolution (SGC)"),
features=True,
weighted=True,
nc=T(link="node-classification/sgc-node-classification"),
lp=True,
),
Algorithm(
T("PPNP", details="Personalized Propagation of Neural Predictions (PPNP)"),
features=True,
weighted=True,
nc=T(link="node-classification/ppnp-node-classification"),
lp=True,
rl=[rl_us(), rl_dgi(link=None)],
),
Algorithm(
T("APPNP", details="Approximate PPNP (APPNP)"),
features=True,
weighted=True,
nc=T(link="node-classification/ppnp-node-classification"),
lp=True,
rl=[rl_us(), rl_dgi()],
inductive="via Cluster-GCN",
),
Algorithm(
"GraphWave",
nc=via_rl(),
lp=via_rl(),
rl=T(link="embeddings/graphwave-embeddings"),
),
Algorithm(
"Attri2Vec",
features=True,
nc=T(link="node-classification/attri2vec-node-classification"),
lp=T(link="link-prediction/attri2vec-link-prediction"),
rl=T(link="embeddings/attri2vec-embeddings"),
inductive=True,
),
Algorithm(
"GraphSAGE",
heterogeneous="see HinSAGE",
directed=T(link="node-classification/directed-graphsage-node-classification"),
weighted=True,
features=True,
nc=T(link="node-classification/graphsage-node-classification"),
lp=T(link="link-prediction/graphsage-link-prediction"),
rl=[
rl_us(link="embeddings/graphsage-unsupervised-sampler-embeddings"),
rl_dgi(),
],
inductive=T(link="node-classification/graphsage-inductive-node-classification"),
),
Algorithm(
"HinSAGE",
heterogeneous=True,
features=True,
nc=True,
lp=T(link="link-prediction/hinsage-link-prediction"),
rl=rl_dgi(),
inductive=True,
),
Algorithm(
"Node2Vec",
weighted=T(link="node-classification/node2vec-weighted-node-classification"),
nc=[
T(text="via", details="via embedding vectors",),
T(
text="keras",
link="node-classification/keras-node2vec-node-classification",
details="keras layer",
),
T(text="gensim", link="node-classification/node2vec-node-classification",),
],
lp=via_rl(link="link-prediction/node2vec-link-prediction"),
rl=[
T(
text="keras",
link="embeddings/keras-node2vec-embeddings",
details="keras layer",
),
T(text="gensim", link="embeddings/node2vec-embeddings"),
],
),
Algorithm(
"Metapath2Vec",
heterogeneous=True,
nc=via_rl(),
lp=via_rl(link="link-prediction/metapath2vec-link-prediction"),
rl=T(link="embeddings/metapath2vec-embeddings"),
),
Algorithm(
T("CTDNE", details="Continuous-Time Dynamic Network Embeddings"),
temporal=True,
nc=via_rl(),
lp=via_rl(link="link-prediction/ctdne-link-prediction"),
rl=True,
),
Algorithm(
"Watch Your Step",
weighted=True,
nc=via_rl(link="embeddings/watch-your-step-embeddings"),
lp=via_rl(),
rl=T(link="embeddings/watch-your-step-embeddings"),
),
Algorithm(
"ComplEx",
heterogeneous=HETEROGENEOUS_EDGE,
directed=True,
nc=via_rl(),
lp=T(link="link-prediction/complex-link-prediction"),
rl=True,
),
Algorithm(
"DistMult",
heterogeneous=HETEROGENEOUS_EDGE,
directed=True,
nc=via_rl(),
lp=T(link="link-prediction/distmult-link-prediction"),
rl=True,
),
Algorithm(
T("DGCNN", details="Deep Graph CNN"),
features=True,
weighted=True,
gc=T(link="graph-classification/dgcnn-graph-classification"),
),
]
FILES = [
# a RST comment is a directive with an unknown type, like an empty string
Rst("docs/demos/index.rst", "\n..\n DEMO TABLE MARKER\n"),
Html("demos/README.md", "\n<!-- DEMO TABLE MARKER -->\n"),
]
def tables(action):
compare = action == "compare"
for file_fmt in FILES:
new_table = file_fmt.render(COLUMNS, ALGORITHMS)
file_name = file_fmt.file_name
separator = file_fmt.separator
base_dir = os.path.dirname(file_name)
invalid_links = [
(written, rendered)
for written, rendered in itertools.chain(
find_links(COLUMNS, file_fmt), find_links(ALGORITHMS, file_fmt)
)
if not link_is_valid_relative(rendered, base_dir)
]
if invalid_links:
formatted = "\n".join(
f"- `{written}` (missing target: `{base_dir}/{rendered}`)"
for written, rendered in invalid_links
)
error(
f"expected all links in algorithm specifications in `{__file__}` to be relative links that are valid starting at `{base_dir}`, but found {len(invalid_links)} invalid:\n\n{formatted}",
edit_fixit=True,
)
separate_compare_overwrite(
file_name, separator, action=action, new_middle=new_table, label="table"
)
TITLE_RE = re.compile("^# (.*)")
def demo_listing_table(root):
repo_dir = os.getcwd()
os.chdir(root)
try:
yield "| Demo | Source |"
yield "|---|---|"
# sort the demos to get a consistent order, independent of the file system traversal order
for demo in sorted(glob.iglob("**/*.ipynb", recursive=True)):
if ".ipynb_checkpoint" in demo:
continue
notebook = nbformat.read(demo, as_version=4)
markdown = "".join(notebook.cells[0].source)
title = TITLE_RE.match(markdown)
text = title[1]
demo_html = demo.replace(".ipynb", ".html")
url = os.path.join(DOC_URL_BASE, root, demo_html)
# this looks better if the two links are separated (hence ; and the explicit new line),
# and the "open here" doesn't get split across lines (hence non-breaking space)
yield f"| [{text}]({url}) | [source]({demo}) |"
finally:
os.chdir(repo_dir)
def demo_indexing(action):
root_dir = "demos/"
for directory in glob.iglob("demos/**/", recursive=True):
readme = os.path.join(directory, "README.md")
if not os.path.exists(readme):
# FIXME(#1139): some demos directories don't have a README
continue
index = os.path.join("docs", directory, "index.rst")
if not os.path.exists(index):
error(
f"expected each demo README to match a docs 'index.rst' file, found `{readme}` without corresponding `{index}`"
)
link = f"{DOC_URL_BASE}/{directory}"
if directory != root_dir:
# the root readme already has the detailed table in it, so don't include the full list
# of demos there.
listing = "\n".join(demo_listing_table(directory))
suffix = "The demo titles link to the latest, nicely rendered version. The 'source' links will open the demo in the application in which this README is being viewed, such as Jupyter Lab (ready for execution)."
else:
listing = ""
suffix = ""
new_contents = f"""\
<!-- {AUTOGENERATED_PROMPT} -->
These demos are displayed with detailed descriptions in the documentation: {link}
{listing}
{suffix}"""
separate_compare_overwrite(
readme,
DOCS_LINK_SEPARATOR,
action=action,
new_middle=new_contents,
label="docs link",
)
def separate_compare_overwrite(file_name, separator, action, new_middle, label):
with open(file_name, "r+") as f:
file_contents = f.read()
parts = file_contents.split(separator)
if len(parts) != 3:
code_block = textwrap.indent(separator.strip(), " ")
error(
f"expected exactly two instances of the separator on their own lines in `{file_name}`, found {len(parts) - 1} instances. Separator should be:\n\n{code_block}"
)
prefix, current_middle, suffix = parts
if action == "compare" and new_middle != current_middle:
diff = difflib.unified_diff(
current_middle.splitlines(keepends=True),
new_middle.splitlines(keepends=True),
fromfile=file_name,
tofile="autogenerated expected contents",
)
sys.stdout.writelines(diff)
error(
f"existing {label} in `{file_name}` differs to generated {label}; was it edited manually?",
edit_fixit=True,
)
elif action == "overwrite":
f.seek(0)
f.write("".join([prefix, separator, new_middle, separator, suffix]))
# delete any remaining content
f.truncate()
def error(message, edit_fixit=False):
formatted = f"Error while generating information for documentation: {message}"
if edit_fixit:
formatted += f"\n\nTo fix, edit `{__file__}` as appropriate and run it like `python {__file__} --action=overwrite` to overwrite existing information with updated form."
print(formatted, file=sys.stderr)
try:
subprocess.run(
[
"buildkite-agent",
"annotate",
"--style=error",
"--context=demo_indexing",
formatted,
]
)
except FileNotFoundError:
# no agent, so probably on buildkite, and so silently no annotation
pass
sys.exit(1)
def main():
parser = argparse.ArgumentParser(
description="Edits or compares the table of all algorithms and their demos in `demos/README.md` and `docs/demos/index.rst`"
)
parser.add_argument(
"--action",
choices=["compare", "overwrite"],
default="compare",
help="whether to compare the tables against what would be generated, or to overwrite them table with new ones (default: %(default)s)",
)
args = parser.parse_args()
tables(args.action)
demo_indexing(args.action)
if __name__ == "__main__":
main()
| 22,242 | 29.469863 | 221 | py |
stellargraph | stellargraph-master/tests/test_ensemble.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import pytest
import numpy as np
import tensorflow as tf
from stellargraph import StellarGraph
from stellargraph.layer import (
GraphSAGE,
GCN,
GAT,
HinSAGE,
link_classification,
link_regression,
)
from stellargraph.mapper import (
GraphSAGENodeGenerator,
FullBatchNodeGenerator,
HinSAGENodeGenerator,
GraphSAGELinkGenerator,
HinSAGELinkGenerator,
)
from stellargraph.ensemble import Ensemble, BaggingEnsemble
from tensorflow.keras import layers, Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import categorical_crossentropy, binary_crossentropy
# FIXME (#535): Consider using graph fixtures
def example_graph_1(feature_size=None):
nlist = [1, 2, 3, 4, 5, 6]
if feature_size is not None:
features = np.ones((len(nlist), feature_size))
else:
features = []
elist = [(1, 2), (2, 3), (1, 4), (3, 2), (5, 6), (1, 5)]
return StellarGraph(
pd.DataFrame(features, index=nlist),
pd.DataFrame(elist, columns=["source", "target"]),
)
def create_graphSAGE_model(graph, link_prediction=False):
if link_prediction:
# We are going to train on the original graph
generator = GraphSAGELinkGenerator(graph, batch_size=2, num_samples=[2, 2])
edge_ids_train = np.array([[1, 2], [2, 3], [1, 3]])
train_gen = generator.flow(edge_ids_train, np.array([1, 1, 0]))
else:
generator = GraphSAGENodeGenerator(graph, batch_size=2, num_samples=[2, 2])
train_gen = generator.flow([1, 2], np.array([[1, 0], [0, 1]]))
# if link_prediction:
# edge_ids_train = np.array([[1, 2], [2, 3], [1, 3]])
# train_gen = generator.flow(edge_ids_train, np.array([1, 1, 0]))
# else:
# train_gen = generator.flow([1, 2], np.array([[1, 0], [0, 1]]))
base_model = GraphSAGE(
layer_sizes=[8, 8], generator=generator, bias=True, dropout=0.5
)
if link_prediction:
# Expose input and output sockets of graphsage, for source and destination nodes:
x_inp, x_out = base_model.in_out_tensors()
prediction = link_classification(
output_dim=1, output_act="relu", edge_embedding_method="ip"
)(x_out)
keras_model = Model(inputs=x_inp, outputs=prediction)
else:
x_inp, x_out = base_model.in_out_tensors()
prediction = layers.Dense(units=2, activation="softmax")(x_out)
keras_model = Model(inputs=x_inp, outputs=prediction)
return base_model, keras_model, generator, train_gen
def create_HinSAGE_model(graph, link_prediction=False):
if link_prediction:
generator = HinSAGELinkGenerator(
graph,
batch_size=2,
num_samples=[2, 1],
head_node_types=["default", "default"],
)
edge_ids_train = np.array([[1, 2], [2, 3], [1, 3]])
train_gen = generator.flow(edge_ids_train, np.array([1, 1, 0]))
else:
generator = HinSAGENodeGenerator(
graph, batch_size=2, num_samples=[2, 2], head_node_type="default"
)
train_gen = generator.flow([1, 2], np.array([[1, 0], [0, 1]]))
base_model = HinSAGE(
layer_sizes=[8, 8], generator=generator, bias=True, dropout=0.5
)
if link_prediction:
# Define input and output sockets of hinsage:
x_inp, x_out = base_model.in_out_tensors()
# Final estimator layer
prediction = link_regression(edge_embedding_method="ip")(x_out)
else:
x_inp, x_out = base_model.in_out_tensors()
prediction = layers.Dense(units=2, activation="softmax")(x_out)
keras_model = Model(inputs=x_inp, outputs=prediction)
return base_model, keras_model, generator, train_gen
def create_GCN_model(graph):
generator = FullBatchNodeGenerator(graph)
train_gen = generator.flow([1, 2], np.array([[1, 0], [0, 1]]))
base_model = GCN(
layer_sizes=[8, 2],
generator=generator,
bias=True,
dropout=0.5,
activations=["elu", "softmax"],
)
x_inp, x_out = base_model.in_out_tensors()
keras_model = Model(inputs=x_inp, outputs=x_out)
return base_model, keras_model, generator, train_gen
def create_GAT_model(graph):
generator = FullBatchNodeGenerator(graph, sparse=False)
train_gen = generator.flow([1, 2], np.array([[1, 0], [0, 1]]))
base_model = GAT(
layer_sizes=[8, 8, 2],
generator=generator,
bias=True,
in_dropout=0.5,
attn_dropout=0.5,
activations=["elu", "elu", "softmax"],
normalize=None,
)
x_inp, x_out = base_model.in_out_tensors()
keras_model = Model(inputs=x_inp, outputs=x_out)
return base_model, keras_model, generator, train_gen
#
# Test for class Ensemble instance creation with invalid parameters given.
#
def test_ensemble_init_parameters():
tf.keras.backend.clear_session()
graph = example_graph_1(feature_size=10)
base_model, keras_model, generator, train_gen = create_graphSAGE_model(graph)
# base_model, keras_model, generator, train_gen
gnn_models = [
create_graphSAGE_model(graph),
create_HinSAGE_model(graph),
create_graphSAGE_model(graph, link_prediction=True),
create_HinSAGE_model(graph, link_prediction=True),
create_GCN_model(graph),
create_GAT_model(graph),
]
for gnn_model in gnn_models:
base_model = gnn_model[0]
keras_model = gnn_model[1]
# Test mixed types
with pytest.raises(ValueError):
Ensemble(base_model, n_estimators=3, n_predictions=3)
with pytest.raises(ValueError):
Ensemble(keras_model, n_estimators=1, n_predictions=0)
with pytest.raises(ValueError):
Ensemble(keras_model, n_estimators=1, n_predictions=-3)
with pytest.raises(ValueError):
Ensemble(keras_model, n_estimators=1, n_predictions=1.7)
with pytest.raises(ValueError):
Ensemble(keras_model, n_estimators=0, n_predictions=11)
with pytest.raises(ValueError):
Ensemble(keras_model, n_estimators=-8, n_predictions=11)
with pytest.raises(ValueError):
Ensemble(keras_model, n_estimators=2.5, n_predictions=11)
ens = Ensemble(keras_model, n_estimators=7, n_predictions=10)
assert len(ens.models) == 7
assert ens.n_estimators == 7
assert ens.n_predictions == 10
#
# Repeat for BaggingEnsemble
# Test mixed types
with pytest.raises(ValueError):
BaggingEnsemble(base_model, n_estimators=3, n_predictions=3)
with pytest.raises(ValueError):
BaggingEnsemble(keras_model, n_estimators=1, n_predictions=0)
with pytest.raises(ValueError):
BaggingEnsemble(keras_model, n_estimators=1, n_predictions=-3)
with pytest.raises(ValueError):
BaggingEnsemble(keras_model, n_estimators=1, n_predictions=1.7)
with pytest.raises(ValueError):
BaggingEnsemble(keras_model, n_estimators=0, n_predictions=11)
with pytest.raises(ValueError):
BaggingEnsemble(keras_model, n_estimators=-8, n_predictions=11)
with pytest.raises(ValueError):
BaggingEnsemble(keras_model, n_estimators=2.5, n_predictions=11)
ens = BaggingEnsemble(keras_model, n_estimators=7, n_predictions=10)
assert len(ens.models) == 7
assert ens.n_estimators == 7
assert ens.n_predictions == 10
def test_compile():
tf.keras.backend.clear_session()
graph = example_graph_1(feature_size=10)
# base_model, keras_model, generator, train_gen
gnn_models = [
create_graphSAGE_model(graph),
create_HinSAGE_model(graph),
create_graphSAGE_model(graph, link_prediction=True),
create_HinSAGE_model(graph, link_prediction=True),
create_GCN_model(graph),
create_GAT_model(graph),
]
for gnn_model in gnn_models:
keras_model = gnn_model[1]
ens = Ensemble(keras_model, n_estimators=2, n_predictions=5)
with pytest.raises(ValueError): # must specify the optimizer to use
ens.compile(
optimizer=None, loss=categorical_crossentropy, weighted_metrics=["acc"]
)
# Repeat for BaggingEnsemble
ens = BaggingEnsemble(keras_model, n_estimators=2, n_predictions=5)
with pytest.raises(ValueError): # must specify the optimizer to use
ens.compile(
optimizer=None, loss=categorical_crossentropy, weighted_metrics=["acc"]
)
def test_Ensemble_fit():
tf.keras.backend.clear_session()
graph = example_graph_1(feature_size=10)
# base_model, keras_model, generator, train_gen
gnn_models = [
create_graphSAGE_model(graph),
create_HinSAGE_model(graph),
create_GCN_model(graph),
create_GAT_model(graph),
]
for gnn_model in gnn_models:
keras_model = gnn_model[1]
generator = gnn_model[2]
train_gen = gnn_model[3]
ens = Ensemble(keras_model, n_estimators=2, n_predictions=1)
ens.compile(
optimizer=Adam(), loss=categorical_crossentropy, weighted_metrics=["acc"]
)
ens.fit(train_gen, epochs=1, verbose=0, shuffle=False)
with pytest.raises(ValueError):
ens.fit(
generator=generator, # wrong type
epochs=10,
validation_data=train_gen,
verbose=0,
shuffle=False,
)
def test_BaggingEnsemble_fit():
tf.keras.backend.clear_session()
train_data = np.array([1, 2])
train_targets = np.array([[1, 0], [0, 1]])
graph = example_graph_1(feature_size=10)
# base_model, keras_model, generator, train_gen
gnn_models = [
create_graphSAGE_model(graph),
create_HinSAGE_model(graph),
create_GCN_model(graph),
create_GAT_model(graph),
]
for gnn_model in gnn_models:
keras_model = gnn_model[1]
generator = gnn_model[2]
train_gen = gnn_model[3]
ens = BaggingEnsemble(keras_model, n_estimators=2, n_predictions=1)
ens.compile(
optimizer=Adam(), loss=categorical_crossentropy, weighted_metrics=["acc"]
)
ens.fit(
generator=generator,
train_data=train_data,
train_targets=train_targets,
epochs=1,
validation_data=train_gen,
verbose=0,
shuffle=False,
)
# This is a BaggingEnsemble so the generator in the below call is of the wrong type.
with pytest.raises(ValueError):
ens.fit(
train_gen,
train_data=train_data,
train_targets=train_targets,
epochs=10,
verbose=0,
shuffle=False,
)
with pytest.raises(ValueError):
ens.fit(
generator=generator,
train_data=train_data,
train_targets=None, # Should not be None
epochs=10,
validation_data=train_gen,
verbose=0,
shuffle=False,
)
with pytest.raises(ValueError):
ens.fit(
generator=generator,
train_data=None,
train_targets=None,
epochs=10,
validation_data=None,
verbose=0,
shuffle=False,
)
with pytest.raises(ValueError):
ens.fit(
generator=generator,
train_data=train_data,
train_targets=train_targets,
epochs=10,
validation_data=None,
verbose=0,
shuffle=False,
bag_size=-1, # should be positive integer smaller than or equal to len(train_data) or None
)
with pytest.raises(ValueError):
ens.fit(
generator=generator,
train_data=train_data,
train_targets=train_targets,
epochs=10,
validation_data=None,
verbose=0,
shuffle=False,
bag_size=10, # larger than the number of training points
)
def test_evaluate():
tf.keras.backend.clear_session()
test_data = np.array([3, 4, 5])
test_targets = np.array([[1, 0], [0, 1], [0, 1]])
graph = example_graph_1(feature_size=5)
# base_model, keras_model, generator, train_gen
gnn_models = [
create_graphSAGE_model(graph),
create_HinSAGE_model(graph),
create_GCN_model(graph),
create_GAT_model(graph),
]
for gnn_model in gnn_models:
keras_model = gnn_model[1]
generator = gnn_model[2]
ens = Ensemble(keras_model, n_estimators=2, n_predictions=1)
ens.compile(
optimizer=Adam(), loss=categorical_crossentropy, weighted_metrics=["acc"]
)
# Check that passing invalid parameters is handled correctly. We will not check error handling for those
# parameters that Keras will be responsible for.
with pytest.raises(ValueError):
ens.evaluate(
generator=generator, test_data=test_data, test_targets=test_targets
)
with pytest.raises(ValueError):
ens.evaluate(
generator=generator,
test_data=test_data,
test_targets=None, # must give test_targets
)
with pytest.raises(ValueError):
ens.evaluate(
generator=generator.flow(test_data, test_targets),
test_data=test_data,
test_targets=test_targets,
)
# We won't train the model instead use the initial random weights to test
# the evaluate method.
test_metrics_mean, test_metrics_std = ens.evaluate(
generator.flow(test_data, test_targets)
)
assert len(test_metrics_mean) == len(test_metrics_std)
assert len(test_metrics_mean.shape) == 1
assert len(test_metrics_std.shape) == 1
#
# Repeat for BaggingEnsemble
ens = BaggingEnsemble(keras_model, n_estimators=2, n_predictions=1)
ens.compile(
optimizer=Adam(), loss=categorical_crossentropy, weighted_metrics=["acc"]
)
# Check that passing invalid parameters is handled correctly. We will not check error handling for those
# parameters that Keras will be responsible for.
with pytest.raises(ValueError):
ens.evaluate(
generator=generator, test_data=test_data, test_targets=test_targets
)
with pytest.raises(ValueError):
ens.evaluate(
generator=generator,
test_data=test_data,
test_targets=None, # must give test_targets
)
with pytest.raises(ValueError):
ens.evaluate(
generator=generator.flow(test_data, test_targets),
test_data=test_data,
test_targets=test_targets,
)
# We won't train the model instead use the initial random weights to test
# the evaluate method.
test_metrics_mean, test_metrics_std = ens.evaluate(
generator.flow(test_data, test_targets)
)
assert len(test_metrics_mean) == len(test_metrics_std)
assert len(test_metrics_mean.shape) == 1
assert len(test_metrics_std.shape) == 1
def test_predict():
tf.keras.backend.clear_session()
# test_data = np.array([[0, 0], [1, 1], [0.8, 0.8]])
test_data = np.array([4, 5, 6])
test_targets = np.array([[1, 0], [0, 1], [0, 1]])
graph = example_graph_1(feature_size=2)
# base_model, keras_model, generator, train_gen
gnn_models = [
create_graphSAGE_model(graph),
create_HinSAGE_model(graph),
create_GCN_model(graph),
create_GAT_model(graph),
]
for i, gnn_model in enumerate(gnn_models):
keras_model = gnn_model[1]
generator = gnn_model[2]
ens = Ensemble(keras_model, n_estimators=2, n_predictions=2)
ens.compile(
optimizer=Adam(), loss=categorical_crossentropy, weighted_metrics=["acc"]
)
test_gen = generator.flow(test_data)
# Check that passing invalid parameters is handled correctly. We will not check error handling for those
# parameters that Keras will be responsible for.
with pytest.raises(ValueError):
ens.predict(generator=test_gen, predict_data=test_data)
# We won't train the model instead use the initial random weights to test
# the evaluate method.
test_predictions = ens.predict(test_gen, summarise=True)
print("test_predictions shape {}".format(test_predictions.shape))
if i > 1:
# GAT and GCN are full batch so the batch dimension is 1
assert len(test_predictions) == 1
assert test_predictions.shape[1] == test_targets.shape[0]
else:
assert len(test_predictions) == len(test_data)
assert test_predictions.shape[-1] == test_targets.shape[-1]
test_predictions = ens.predict(test_gen, summarise=False)
assert test_predictions.shape[0] == ens.n_estimators
assert test_predictions.shape[1] == ens.n_predictions
if i > 1:
assert test_predictions.shape[2] == 1
else:
assert test_predictions.shape[2] == len(test_data)
assert test_predictions.shape[-1] == test_targets.shape[-1]
#
# Repeat for BaggingEnsemble
ens = BaggingEnsemble(keras_model, n_estimators=2, n_predictions=2)
ens.compile(
optimizer=Adam(), loss=categorical_crossentropy, weighted_metrics=["acc"]
)
test_gen = generator.flow(test_data)
# Check that passing invalid parameters is handled correctly. We will not check error handling for those
# parameters that Keras will be responsible for.
with pytest.raises(ValueError):
ens.predict(generator=test_gen, predict_data=test_data)
# We won't train the model instead use the initial random weights to test
# the evaluate method.
test_predictions = ens.predict(test_gen, summarise=True)
print("test_predictions shape {}".format(test_predictions.shape))
if i > 1:
# GAT and GCN are full batch so the batch dimension is 1
assert len(test_predictions) == 1
assert test_predictions.shape[1] == test_targets.shape[0]
else:
assert len(test_predictions) == len(test_data)
assert test_predictions.shape[-1] == test_targets.shape[-1]
test_predictions = ens.predict(test_gen, summarise=False)
assert test_predictions.shape[0] == ens.n_estimators
assert test_predictions.shape[1] == ens.n_predictions
if i > 1:
assert test_predictions.shape[2] == 1
else:
assert test_predictions.shape[2] == len(test_data)
assert test_predictions.shape[-1] == test_targets.shape[-1]
#
# Tests for link prediction that can't be combined easily with the node attribute inference workflow above.
#
def test_evaluate_link_prediction():
tf.keras.backend.clear_session()
edge_ids_test = np.array([[1, 2], [2, 3], [1, 3]])
edge_labels_test = np.array([1, 1, 0])
graph = example_graph_1(feature_size=4)
# base_model, keras_model, generator, train_gen
gnn_models = [
create_graphSAGE_model(graph, link_prediction=True),
create_HinSAGE_model(graph, link_prediction=True),
]
for gnn_model in gnn_models:
keras_model = gnn_model[1]
generator = gnn_model[2]
ens = Ensemble(keras_model, n_estimators=2, n_predictions=1)
ens.compile(
optimizer=Adam(), loss=binary_crossentropy, weighted_metrics=["acc"]
)
# Check that passing invalid parameters is handled correctly. We will not check error handling for those
# parameters that Keras will be responsible for.
with pytest.raises(ValueError):
ens.evaluate(
generator=generator,
test_data=edge_ids_test,
test_targets=edge_labels_test,
)
with pytest.raises(ValueError):
ens.evaluate(
generator=generator,
test_data=edge_labels_test,
test_targets=None, # must give test_targets
)
with pytest.raises(ValueError):
ens.evaluate(
generator=generator.flow(edge_ids_test, edge_labels_test),
test_data=edge_ids_test,
test_targets=edge_labels_test,
)
# We won't train the model instead use the initial random weights to test
# the evaluate method.
test_metrics_mean, test_metrics_std = ens.evaluate(
generator.flow(edge_ids_test, edge_labels_test)
)
assert len(test_metrics_mean) == len(test_metrics_std)
assert len(test_metrics_mean.shape) == 1
assert len(test_metrics_std.shape) == 1
#
# Repeat for BaggingEnsemble
ens = BaggingEnsemble(keras_model, n_estimators=2, n_predictions=1)
ens.compile(
optimizer=Adam(), loss=binary_crossentropy, weighted_metrics=["acc"]
)
# Check that passing invalid parameters is handled correctly. We will not check error handling for those
# parameters that Keras will be responsible for.
with pytest.raises(ValueError):
ens.evaluate(
generator=generator,
test_data=edge_ids_test,
test_targets=edge_labels_test,
)
with pytest.raises(ValueError):
ens.evaluate(
generator=generator,
test_data=edge_labels_test,
test_targets=None, # must give test_targets
)
with pytest.raises(ValueError):
ens.evaluate(
generator=generator.flow(edge_ids_test, edge_labels_test),
test_data=edge_ids_test,
test_targets=edge_labels_test,
)
# We won't train the model instead use the initial random weights to test
# the evaluate method.
test_metrics_mean, test_metrics_std = ens.evaluate(
generator.flow(edge_ids_test, edge_labels_test)
)
assert len(test_metrics_mean) == len(test_metrics_std)
assert len(test_metrics_mean.shape) == 1
assert len(test_metrics_std.shape) == 1
def test_predict_link_prediction():
tf.keras.backend.clear_session()
edge_ids_test = np.array([[1, 2], [2, 3], [1, 3]])
graph = example_graph_1(feature_size=2)
# base_model, keras_model, generator, train_gen
gnn_models = [
create_graphSAGE_model(graph, link_prediction=True),
create_HinSAGE_model(graph, link_prediction=True),
]
for gnn_model in gnn_models:
keras_model = gnn_model[1]
generator = gnn_model[2]
ens = Ensemble(keras_model, n_estimators=2, n_predictions=1)
ens.compile(
optimizer=Adam(), loss=binary_crossentropy, weighted_metrics=["acc"]
)
test_gen = generator.flow(edge_ids_test)
# Check that passing invalid parameters is handled correctly. We will not check error handling for those
# parameters that Keras will be responsible for.
with pytest.raises(ValueError):
ens.predict(generator=test_gen, predict_data=edge_ids_test)
# We won't train the model instead use the initial random weights to test
# the evaluate method.
test_predictions = ens.predict(test_gen, summarise=True)
print("test_predictions shape {}".format(test_predictions.shape))
assert len(test_predictions) == len(edge_ids_test)
assert test_predictions.shape[1] == 1
test_predictions = ens.predict(test_gen, summarise=False)
assert test_predictions.shape[0] == ens.n_estimators
assert test_predictions.shape[1] == ens.n_predictions
assert test_predictions.shape[2] == len(edge_ids_test)
assert test_predictions.shape[3] == 1
#
# Repeat for BaggingEnsemble
ens = BaggingEnsemble(keras_model, n_estimators=2, n_predictions=1)
ens.compile(
optimizer=Adam(), loss=binary_crossentropy, weighted_metrics=["acc"]
)
test_gen = generator.flow(edge_ids_test)
# Check that passing invalid parameters is handled correctly. We will not check error handling for those
# parameters that Keras will be responsible for.
with pytest.raises(ValueError):
ens.predict(generator=test_gen, predict_data=edge_ids_test)
# We won't train the model instead use the initial random weights to test
# the evaluate method.
test_predictions = ens.predict(test_gen, summarise=True)
print("test_predictions shape {}".format(test_predictions.shape))
assert len(test_predictions) == len(edge_ids_test)
assert test_predictions.shape[1] == 1
test_predictions = ens.predict(test_gen, summarise=False)
assert test_predictions.shape[0] == ens.n_estimators
assert test_predictions.shape[1] == ens.n_predictions
assert test_predictions.shape[2] == len(edge_ids_test)
assert test_predictions.shape[3] == 1
def test_deprecated_methods():
tf.keras.backend.clear_session()
train_data = np.array([1, 2])
train_targets = np.array([[1, 0], [0, 1]])
graph = example_graph_1(feature_size=2)
_, keras_model, gen, train_gen = create_GAT_model(graph)
ensemble = Ensemble(keras_model, n_estimators=1, n_predictions=1)
bagging = BaggingEnsemble(keras_model, n_estimators=1, n_predictions=1)
models = [ensemble, bagging]
for model in models:
model.compile(optimizer=Adam(), loss=binary_crossentropy)
# check that each of the generator methods gives a warning, and also seems to behave like the
# non-deprecated method
with pytest.warns(DeprecationWarning, match="'fit_generator' .* 'fit'"):
ens_history = ensemble.fit_generator(train_gen, epochs=2, verbose=0)
assert len(ens_history) == 1
assert len(ens_history[0].history["loss"]) == 2
with pytest.warns(DeprecationWarning, match="'fit_generator' .* 'fit'"):
bag_history = bagging.fit_generator(
gen, train_data, train_targets, epochs=2, verbose=0
)
assert len(bag_history) == 1
assert len(bag_history[0].history["loss"]) == 2
for model in models:
with pytest.warns(
DeprecationWarning, match="'evaluate_generator' .* 'evaluate'"
):
eval_result = model.evaluate_generator(train_gen, verbose=0)
np.testing.assert_array_equal(eval_result, model.evaluate(train_gen, verbose=0))
with pytest.warns(DeprecationWarning, match="'predict_generator' .* 'predict'"):
pred_result = model.predict_generator(train_gen, verbose=0)
np.testing.assert_array_equal(pred_result, model.predict(train_gen, verbose=0))
| 28,207 | 32.822542 | 112 | py |
stellargraph | stellargraph-master/tests/interpretability/test_saliency_maps_gat.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from stellargraph.interpretability.saliency_maps import IntegratedGradientsGAT
import numpy as np
from stellargraph.layer import GAT
from stellargraph.mapper import FullBatchNodeGenerator
from tensorflow.keras import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import categorical_crossentropy
import networkx as nx
from tensorflow.keras import backend as K
from ..test_utils.graphs import example_graph_1_saliency_maps as example_graph_1
def create_GAT_model(graph):
generator = FullBatchNodeGenerator(graph, sparse=False, method=None)
train_gen = generator.flow([0, 1], np.array([[1, 0], [0, 1]]))
gat = GAT(
layer_sizes=[2, 2],
generator=generator,
bias=False,
in_dropout=0,
attn_dropout=0,
activations=["elu", "softmax"],
normalize=None,
saliency_map_support=True,
)
for layer in gat._layers:
layer._initializer = "ones"
x_inp, x_out = gat.in_out_tensors()
keras_model = Model(inputs=x_inp, outputs=x_out)
return gat, keras_model, generator, train_gen
def get_ego_node_num(graph, target_idx):
G_ego = nx.ego_graph(graph, target_idx, radius=2)
return G_ego.number_of_nodes()
def test_ig_saliency_map():
graph = example_graph_1(feature_size=4)
base_model, keras_model_gat, generator, train_gen = create_GAT_model(graph)
keras_model_gat.compile(
optimizer=Adam(lr=0.1), loss=categorical_crossentropy, weighted_metrics=["acc"]
)
weights = [
np.array(
[
[0.47567585, 0.7989239],
[0.33588523, 0.19814175],
[0.15685713, 0.43643117],
[0.7725941, 0.68441933],
]
),
np.array([[0.71832293], [0.8542117]]),
np.array([[0.46560588], [0.8165422]]),
np.array(1.0),
np.array(0.0),
np.array([[0.4391179, 0.595691], [0.06000895, 0.2613866]]),
np.array([[0.43496376], [0.02840129]]),
np.array([[0.33972418], [0.22352563]]),
np.array(1.0),
np.array(0.0),
]
keras_model_gat.set_weights(weights)
# sanity check to make sure that the values of delta and non_exist_edges are not trainable
# the expected value should be delta = 1.0 and non_exist_edges = 0.0
for var in keras_model_gat.non_trainable_weights:
if "ig_delta" in var.name:
assert K.get_value(var) == 1.0
if "ig_non_exist_edge" in var.name:
assert K.get_value(var) == 0.0
ig_saliency = IntegratedGradientsGAT(
keras_model_gat, train_gen, generator.node_list
)
target_id = 0
class_of_interest = 0
ig_link_importance = ig_saliency.get_link_importance(
target_id, class_of_interest, steps=200
)
print(ig_link_importance)
ig_link_importance_ref = np.array(
[
[4.759e-11, 4.759e-11, 4.759e-11, 0, 0],
[-1.442e-10, -1.442e-10, 0, 0, 0],
[1.183e-10, 0, 1.183e-10, 1.183e-10, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
]
)
# Check the number of non-zero elements in the node importance matrix. We expect to see the number be same with the number of nodes in the ego network.
assert pytest.approx(
np.sum(np.ma.masked_array(ig_link_importance, mask=train_gen.A_dense)), 0
)
# TODO: write a better comparison test with larger floating point values
# commented out test because of floating point errors
# assert ig_link_importance == pytest.approx(ig_link_importance_ref, abs=1e-11)
non_zero_edge_importance = np.sum(np.abs(ig_link_importance) > 1e-11)
assert 8 == non_zero_edge_importance
ig_node_importance = ig_saliency.get_node_importance(
target_id, class_of_interest, steps=200
)
print(ig_node_importance)
assert pytest.approx(ig_node_importance, np.array([-13.06, -9.32, -7.46, -3.73, 0]))
non_zero_node_importance = np.sum(np.abs(ig_node_importance) > 1e-5)
assert 4 == non_zero_node_importance
| 4,669 | 35.484375 | 155 | py |
stellargraph | stellargraph-master/tests/interpretability/test_saliency_maps_gcn.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from stellargraph.interpretability.saliency_maps import *
import numpy as np
from stellargraph.layer import GCN
from stellargraph.mapper import FullBatchNodeGenerator
from tensorflow.keras import Model, regularizers
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import categorical_crossentropy
from ..test_utils.graphs import example_graph_1_saliency_maps as example_graph_1
def create_GCN_model_dense(graph):
generator = FullBatchNodeGenerator(graph, sparse=False, method="gcn")
train_gen = generator.flow([0, 1], np.array([[1, 0], [0, 1]]))
layer_sizes = [2, 2]
gcn = GCN(
layer_sizes=layer_sizes,
activations=["elu", "elu"],
generator=generator,
dropout=0.3,
kernel_regularizer=regularizers.l2(5e-4),
)
for layer in gcn._layers:
layer._initializer = "ones"
x_inp, x_out = gcn.in_out_tensors()
keras_model = Model(inputs=x_inp, outputs=x_out)
return gcn, keras_model, generator, train_gen
def create_GCN_model_sparse(graph):
generator = FullBatchNodeGenerator(graph, sparse=True, method="gcn")
train_gen = generator.flow([0, 1], np.array([[1, 0], [0, 1]]))
layer_sizes = [2, 2]
gcn = GCN(
layer_sizes=layer_sizes,
activations=["elu", "elu"],
generator=generator,
dropout=0.3,
kernel_regularizer=regularizers.l2(5e-4),
)
for layer in gcn._layers:
layer._initializer = "ones"
x_inp, x_out = gcn.in_out_tensors()
keras_model = Model(inputs=x_inp, outputs=x_out)
return gcn, keras_model, generator, train_gen
def test_ig_saliency_map():
graph = example_graph_1(feature_size=4)
base_model, keras_model_gcn, generator, train_gen = create_GCN_model_dense(graph)
(
base_model_sp,
keras_model_gcn_sp,
generator_sp,
train_gen_sp,
) = create_GCN_model_sparse(graph)
keras_model_gcn.compile(
optimizer=Adam(lr=0.1), loss=categorical_crossentropy, weighted_metrics=["acc"]
)
keras_model_gcn_sp.compile(
optimizer=Adam(lr=0.1), loss=categorical_crossentropy, weighted_metrics=["acc"]
)
weights = [
np.array(
[
[0.43979216, -0.205199],
[0.774606, 0.9521842],
[-0.7586646, -0.41291213],
[-0.80931616, 0.8148985],
],
dtype="float32",
),
np.array([0.0, 0.0], dtype="float32"),
np.array([[1.0660936, -0.48291892], [1.2134176, 1.1863097]], dtype="float32"),
np.array([0.0, 0.0], dtype="float32"),
]
keras_model_gcn.set_weights(weights)
keras_model_gcn_sp.set_weights(weights)
ig_dense = IntegratedGradients(keras_model_gcn, train_gen)
ig_sparse = IntegratedGradients(keras_model_gcn_sp, train_gen_sp)
target_idx = 0
class_of_interest = 0
ig_node_importance_dense = ig_dense.get_node_importance(
target_idx, class_of_interest, steps=50
)
ig_node_importance_sp = ig_sparse.get_node_importance(
target_idx, class_of_interest, steps=50
)
ig_node_importance_ref = np.array([20.91, 18.29, 11.98, 5.98, 0])
assert pytest.approx(ig_node_importance_dense, ig_node_importance_ref)
assert pytest.approx(ig_node_importance_dense, ig_node_importance_sp)
ig_link_importance_nz_ref = np.array(
[0.2563, 0.2759, 0.2423, 0.0926, 0.1134, 0.0621, 0.0621, 0.0621]
)
ig_link_importance_dense = ig_dense.get_integrated_link_masks(
target_idx, class_of_interest, adj_baseline=None, steps=50
)
ig_link_importance_dense_nz = ig_link_importance_dense[
np.nonzero(ig_link_importance_dense)
]
ig_link_importance_sp = ig_sparse.get_integrated_link_masks(
target_idx, class_of_interest, adj_baseline=None, steps=50
)
ig_link_importance_sp_nz = ig_link_importance_sp[np.nonzero(ig_link_importance_sp)]
assert pytest.approx(ig_link_importance_dense_nz, ig_link_importance_nz_ref)
assert pytest.approx(ig_link_importance_dense_nz, ig_link_importance_sp_nz)
def test_saliency_init_parameters():
graph = example_graph_1(feature_size=4)
base_model, keras_model_gcn, generator, train_gen = create_GCN_model_dense(graph)
(
base_model_sp,
keras_model_gcn_sp,
generator_sp,
train_gen_sp,
) = create_GCN_model_sparse(graph)
keras_model_gcn.compile(
optimizer=Adam(lr=0.1), loss=categorical_crossentropy, weighted_metrics=["acc"]
)
keras_model_gcn_sp.compile(
optimizer=Adam(lr=0.1), loss=categorical_crossentropy, weighted_metrics=["acc"]
)
# Both TypeError and RuntimeError will be raised.
# TypeError is raised due to the wrong generator type while RuntimeError is due to the wrong number of inputs for the model.
with pytest.raises(TypeError) and pytest.raises(RuntimeError):
IntegratedGradients(keras_model_gcn, train_gen_sp)
with pytest.raises(TypeError) and pytest.raises(RuntimeError):
IntegratedGradients(keras_model_gcn_sp, train_gen)
| 5,711 | 33.618182 | 128 | py |
stellargraph | stellargraph-master/tests/test_utils/__init__.py | # -*- coding: utf-8 -*-
#
# Copyright 2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
import stellargraph as sg
import numpy as np
ignore_stellargraph_experimental_mark = pytest.mark.filterwarnings(
r"ignore:StellarGraph\(nodes=..., edges=...\):stellargraph.core.experimental.ExperimentalWarning"
)
def model_save_load(tmpdir, sg_model):
model = tf.keras.Model(*sg_model.in_out_tensors())
saving_functions = [
tf.keras.models.save_model,
tf.keras.Model.save,
tf.saved_model.save,
]
loading_functions = [
tf.keras.models.load_model,
# tf.saved_model.load doesn't restore the Keras Model object
]
for i, func in enumerate(saving_functions):
saved_dir = str(tmpdir.join(str(i)))
func(model, str(saved_dir))
for func in loading_functions:
loaded = func(saved_dir, sg.custom_keras_layers)
orig_weights = model.get_weights()
new_weights = loaded.get_weights()
assert len(orig_weights) == len(new_weights)
for orig, new in zip(orig_weights, new_weights):
np.testing.assert_array_equal(orig, new)
# clear the tensorflow session to free memory
tf.keras.backend.clear_session()
def flaky_xfail_mark(exception, issue_numbers):
"""
A mark for a test that occasionally fails with the given exception, associated with one or more
issues in issue_numbers.
"""
if isinstance(issue_numbers, int):
issue_numbers = [issue_numbers]
if not issue_numbers:
raise ValueError(
"at least one issue must be specified when marking a test as flaky"
)
issues = " ".join(
f"<https://github.com/stellargraph/stellargraph/issues/{num}>"
for num in issue_numbers
)
return pytest.mark.xfail(raises=exception, reason=f"flaky: {issues}")
| 2,432 | 31.013158 | 101 | py |
stellargraph | stellargraph-master/tests/reproducibility/test_deep_graph_infomax.py | # -*- coding: utf-8 -*-
#
# Copyright 2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .fixtures import assert_reproducible
from stellargraph.layer import DeepGraphInfomax, GCN, APPNP, GAT, PPNP
from stellargraph.mapper import FullBatchNodeGenerator, CorruptedGenerator
from ..test_utils.graphs import example_graph_random
from .. import require_gpu
import tensorflow as tf
import pytest
import numpy as np
def dgi(generator, gen, model_type):
tf.random.set_seed(1234)
np.random.seed(1234)
emb_dim = 4
base_model = model_type(
generator=generator, activations=["relu"], layer_sizes=[emb_dim]
)
infomax = DeepGraphInfomax(base_model)
model = tf.keras.Model(*infomax.in_out_tensors())
model.compile(loss=tf.nn.sigmoid_cross_entropy_with_logits, optimizer="Adam")
model.fit(gen)
return model
@pytest.mark.parametrize("model_type", [GCN, APPNP, GAT, PPNP])
@pytest.mark.parametrize("sparse", [False, True])
@pytest.mark.skipif(require_gpu, reason="tf on GPU is non-deterministic")
def test_dgi(model_type, sparse):
if sparse and model_type is PPNP:
pytest.skip("PPNP doesn't support sparse=True")
G = example_graph_random()
generator = FullBatchNodeGenerator(G, sparse=sparse)
corrupted_generator = CorruptedGenerator(generator)
gen = corrupted_generator.flow(G.nodes())
assert_reproducible(lambda: dgi(generator, gen, model_type))
| 1,937 | 31.847458 | 81 | py |
stellargraph | stellargraph-master/tests/reproducibility/test_graphsage.py | # -*- coding: utf-8 -*-
#
# Copyright 2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import random
import tensorflow as tf
from stellargraph.data.unsupervised_sampler import UnsupervisedSampler
from stellargraph.mapper.sampled_node_generators import GraphSAGENodeGenerator
from stellargraph.mapper.sampled_link_generators import GraphSAGELinkGenerator
from stellargraph.layer.graphsage import GraphSAGE
from stellargraph.layer.link_inference import link_classification
from stellargraph.random import set_seed
from ..test_utils import flaky_xfail_mark
from ..test_utils.graphs import petersen_graph
from .fixtures import assert_reproducible
from .. import require_gpu
def unsup_gs_model(num_samples, generator, optimizer, bias, dropout, normalize):
layer_sizes = [50] * len(num_samples)
graphsage = GraphSAGE(
layer_sizes=layer_sizes,
generator=generator,
bias=bias,
dropout=dropout,
normalize=normalize,
)
# Build the model and expose input and output sockets of graphsage, for node pair inputs:
x_inp, x_out = graphsage.in_out_tensors()
prediction = link_classification(
output_dim=1, output_act="sigmoid", edge_embedding_method="ip"
)(x_out)
model = tf.keras.Model(inputs=x_inp, outputs=prediction)
model.compile(optimizer=optimizer, loss=tf.keras.losses.binary_crossentropy)
return model
def unsup_gs(
g,
num_samples,
optimizer,
batch_size=4,
epochs=4,
bias=True,
dropout=0.0,
normalize="l2",
number_of_walks=1,
walk_length=5,
seed=0,
shuffle=True,
):
set_seed(seed)
tf.random.set_seed(seed)
if shuffle:
random.seed(seed)
nodes = list(g.nodes())
unsupervised_samples = UnsupervisedSampler(
g, nodes=nodes, length=walk_length, number_of_walks=number_of_walks
)
generator = GraphSAGELinkGenerator(g, batch_size, num_samples)
train_gen = generator.flow(unsupervised_samples)
model = unsup_gs_model(num_samples, generator, optimizer, bias, dropout, normalize)
model.fit(
train_gen,
epochs=epochs,
verbose=1,
use_multiprocessing=False,
workers=4,
shuffle=shuffle,
)
return model
def gs_nai_model(num_samples, generator, targets, optimizer, bias, dropout, normalize):
layer_sizes = [50] * len(num_samples)
graphsage = GraphSAGE(
layer_sizes=layer_sizes,
generator=generator,
bias=bias,
dropout=dropout,
normalize=normalize,
)
# Build the model and expose input and output sockets of graphsage, for node pair inputs:
x_inp, x_out = graphsage.in_out_tensors()
pred = tf.keras.layers.Dense(units=targets.shape[1], activation="softmax")(x_out)
model = tf.keras.Model(inputs=x_inp, outputs=pred)
model.compile(optimizer=optimizer, loss=tf.keras.losses.categorical_crossentropy)
return model
def gs_nai(
g,
targets,
num_samples,
optimizer,
batch_size=4,
epochs=4,
bias=True,
dropout=0.0,
normalize="l2",
seed=0,
shuffle=True,
):
set_seed(seed)
tf.random.set_seed(seed)
if shuffle:
random.seed(seed)
nodes = list(g.nodes())
generator = GraphSAGENodeGenerator(g, batch_size, num_samples)
train_gen = generator.flow(nodes, targets, shuffle=True)
model = gs_nai_model(
num_samples, generator, targets, optimizer, bias, dropout, normalize
)
model.fit(
train_gen,
epochs=epochs,
verbose=1,
use_multiprocessing=False,
workers=4,
shuffle=shuffle,
)
return model
def gs_link_pred_model(num_samples, generator, optimizer, bias, dropout, normalize):
layer_sizes = [50] * len(num_samples)
graphsage = GraphSAGE(
layer_sizes=layer_sizes,
generator=generator,
bias=bias,
dropout=dropout,
normalize=normalize,
)
# Build the model and expose input and output sockets of graphsage, for node pair inputs:
x_inp, x_out = graphsage.in_out_tensors()
pred = link_classification(
output_dim=1, output_act="relu", edge_embedding_method="ip"
)(x_out)
model = tf.keras.Model(inputs=x_inp, outputs=pred)
model.compile(
optimizer=optimizer, loss=tf.keras.losses.binary_crossentropy,
)
return model
def gs_link_prediction(
g,
edge_ids,
edge_labels,
num_samples,
optimizer,
batch_size=4,
epochs=4,
bias=True,
dropout=0.0,
normalize="l2",
seed=0,
shuffle=True,
):
set_seed(seed)
tf.random.set_seed(seed)
if shuffle:
random.seed(seed)
generator = GraphSAGELinkGenerator(g, batch_size, num_samples)
train_gen = generator.flow(edge_ids, edge_labels, shuffle=True)
model = gs_link_pred_model(
num_samples, generator, optimizer, bias, dropout, normalize
)
model.fit(
train_gen,
epochs=epochs,
verbose=1,
use_multiprocessing=False,
workers=4,
shuffle=shuffle,
)
return model
@pytest.mark.parametrize("shuffle", [True, False])
@flaky_xfail_mark(AssertionError, 1115)
def test_unsupervised(petersen_graph, shuffle):
assert_reproducible(
lambda: unsup_gs(
petersen_graph,
[2, 2],
tf.optimizers.Adam(1e-3),
epochs=4,
walk_length=2,
batch_size=4,
shuffle=shuffle,
)
)
@pytest.mark.parametrize("shuffle", [True, False])
@flaky_xfail_mark(AssertionError, 1115)
@pytest.mark.skipif(require_gpu, reason="tf on GPU is non-deterministic")
def test_nai(petersen_graph, shuffle):
target_size = 10
targets = np.random.rand(len(petersen_graph.nodes()), target_size)
assert_reproducible(
lambda: gs_nai(
petersen_graph, targets, [2, 2], tf.optimizers.Adam(1e-3), shuffle=shuffle
)
)
@flaky_xfail_mark(AssertionError, [970, 990])
@pytest.mark.parametrize("shuffle", [True, False])
def test_link_prediction(petersen_graph, shuffle):
num_examples = 10
edge_ids = np.random.choice(petersen_graph.nodes(), size=(num_examples, 2))
edge_labels = np.random.choice([0, 1], size=num_examples)
assert_reproducible(
lambda: gs_link_prediction(
petersen_graph,
edge_ids,
edge_labels,
[2, 2],
tf.optimizers.Adam(1e-3),
shuffle=shuffle,
)
)
| 7,036 | 26.488281 | 93 | py |
stellargraph | stellargraph-master/tests/reproducibility/fixtures.py | # -*- coding: utf-8 -*-
#
# Copyright 2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
def models_equals(model1, model2):
w1 = model1.get_weights()
w2 = model2.get_weights()
assert len(w1) == len(w2)
for w, w_new in zip(w1, w2):
np.testing.assert_array_equal(w, w_new)
def assert_reproducible(func, num_iter=1):
"""
Assert Keras models produced from calling ``func`` are reproducible.
Args:
func (callable): Function to check for reproducible model
num_iter (int): Number of iterations to run through to validate reproducibility.
"""
model = func()
for i in range(num_iter):
model_new = func()
models_equals(model, model_new)
# clear the tensorflow session to free memory
tf.keras.backend.clear_session()
| 1,356 | 29.155556 | 88 | py |
stellargraph | stellargraph-master/tests/layer/test_graph_classification.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from stellargraph.layer.graph_classification import *
from stellargraph.layer import SortPooling
from stellargraph.mapper import PaddedGraphGenerator, FullBatchNodeGenerator
import pytest
from ..test_utils.graphs import example_graph_random
from .. import test_utils
graphs = [
example_graph_random(feature_size=4, n_nodes=6),
example_graph_random(feature_size=4, n_nodes=5),
example_graph_random(feature_size=4, n_nodes=3),
]
generator = PaddedGraphGenerator(graphs=graphs)
def test_init():
model = GCNSupervisedGraphClassification(
layer_sizes=[16], activations=["relu"], generator=generator
)
assert len(model.layer_sizes) == 1
assert len(model.activations) == 1
assert model.layer_sizes[0] == 16
assert model.activations[0] == "relu"
with pytest.raises(
TypeError, match="generator: expected.*PaddedGraphGenerator, found NoneType"
):
GCNSupervisedGraphClassification(
layer_sizes=[16], activations=["relu"], generator=None
)
with pytest.raises(
TypeError,
match="generator: expected.*PaddedGraphGenerator, found FullBatchNodeGenerator",
):
GCNSupervisedGraphClassification(
layer_sizes=[16],
activations=["relu"],
generator=FullBatchNodeGenerator(graphs[0]),
)
with pytest.raises(
ValueError,
match="expected.*number of layers.*same as.*number of activations,found 2.*vs.*1",
):
GCNSupervisedGraphClassification(
layer_sizes=[16, 32], activations=["relu"], generator=generator
)
with pytest.raises(
ValueError,
match="expected.*number of layers.*same as.*number of activations,found 1.*vs.*2",
):
GCNSupervisedGraphClassification(
layer_sizes=[32], activations=["relu", "elu"], generator=generator
)
def test_in_out_tensors():
layer_sizes = [16, 8]
activations = ["relu", "relu"]
model = GCNSupervisedGraphClassification(
layer_sizes=layer_sizes, activations=activations, generator=generator
)
x_in, x_out = model.in_out_tensors()
assert len(x_in) == 3
assert len(x_in[0].shape) == 3
assert x_in[0].shape[-1] == 4 # the node feature dimensionality
assert len(x_out.shape) == 2
assert x_out.shape[-1] == layer_sizes[-1]
def test_stateful():
layer_sizes = [16, 2]
activations = ["elu", "elu"]
targets = np.array([[0, 1], [0, 1], [1, 0]])
train_graphs = [0, 1, 2]
gcn_graph_model = GCNSupervisedGraphClassification(
generator=generator, activations=activations, layer_sizes=layer_sizes
)
train_gen = generator.flow(graphs=train_graphs, targets=targets)
model_1 = tf.keras.Model(*gcn_graph_model.in_out_tensors())
model_2 = tf.keras.Model(*gcn_graph_model.in_out_tensors())
# check embeddings are equal before training
embeddings_1 = model_1.predict(train_gen)
embeddings_2 = model_2.predict(train_gen)
np.testing.assert_array_equal(embeddings_1, embeddings_2)
model_1.compile(loss=tf.nn.softmax_cross_entropy_with_logits, optimizer="Adam")
model_1.fit(train_gen)
# check embeddings are still equal after training one model
embeddings_1 = model_1.predict(train_gen)
embeddings_2 = model_2.predict(train_gen)
np.testing.assert_array_equal(embeddings_1, embeddings_2)
model_2.compile(loss=tf.nn.softmax_cross_entropy_with_logits, optimizer="Adam")
model_2.fit(train_gen)
# check embeddings are still equal after training both models
embeddings_1 = model_1.predict(train_gen)
embeddings_2 = model_2.predict(train_gen)
np.testing.assert_array_equal(embeddings_1, embeddings_2)
@pytest.mark.parametrize("pooling", ["default", "custom"])
def test_pooling(pooling):
# no GCN layers, to just test the pooling directly
if pooling == "default":
gcn_graph_model = GCNSupervisedGraphClassification(
layer_sizes=[], activations=[], generator=generator
)
def expected_values(array):
return array.mean(axis=0)
else:
# shift the features to make it a bit more interesting
shift = 10
def shifted_sum_pooling(tensor, mask):
mask_floats = tf.expand_dims(tf.cast(mask, tf.float32), axis=-1)
return tf.math.reduce_sum(tf.multiply(mask_floats, shift + tensor), axis=1)
gcn_graph_model = GCNSupervisedGraphClassification(
layer_sizes=[],
activations=[],
generator=generator,
pooling=shifted_sum_pooling,
)
def expected_values(array):
return (shift + array).sum(axis=0)
train_graphs = [0, 1, 2]
train_gen = generator.flow(graphs=train_graphs, batch_size=2, shuffle=False)
model = tf.keras.Model(*gcn_graph_model.in_out_tensors())
predictions = model.predict(train_gen)
assert predictions.shape == (3, 4)
expected = np.vstack(
[
expected_values(graphs[iloc].node_features(node_type="n-0"))
for iloc in train_graphs
]
)
np.testing.assert_almost_equal(predictions, expected)
def test_pool_all_layers():
gcn_graph_model = GCNSupervisedGraphClassification(
layer_sizes=[5, 7, 11, 1],
activations=["relu", "relu", "relu", "relu"],
generator=generator,
pool_all_layers=True,
)
train_graphs = [0, 1, 2]
train_gen = generator.flow(graphs=train_graphs, batch_size=2)
model = tf.keras.Model(*gcn_graph_model.in_out_tensors())
predictions = model.predict(train_gen)
assert predictions.shape == (3, 5 + 7 + 11 + 1)
def test_dgcnn_smoke():
# this is entirely implemented in terms of GCNSupervisedGraphClassification, and so it's enough
# to validate that the functionality is composed correctly.
dgcnn = DeepGraphCNN(
layer_sizes=[2, 3, 4],
activations=["relu", "relu", "relu"],
# one graph is perfect, one graph requires padding and one requires truncation
k=5,
generator=generator,
)
# validate the expectations of the implementation
assert isinstance(dgcnn, GCNSupervisedGraphClassification)
assert isinstance(dgcnn.pooling, SortPooling)
assert dgcnn.pool_all_layers == True
# check it gives output of the expected shape
model = tf.keras.Model(*dgcnn.in_out_tensors())
preds = model.predict(generator.flow([0, 1, 2]))
assert preds.shape == (3, (2 + 3 + 4) * 5, 1)
def test_save_load(tmpdir):
layer_sizes = [16, 8]
activations = ["relu", "relu"]
model = GCNSupervisedGraphClassification(
layer_sizes=layer_sizes, activations=activations, generator=generator
)
test_utils.model_save_load(tmpdir, model)
| 7,433 | 31.181818 | 99 | py |
stellargraph | stellargraph-master/tests/layer/test_ppnp.py | # -*- coding: utf-8 -*-
#
# Copyright 2019-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from stellargraph.layer import PPNP
from stellargraph.mapper import FullBatchNodeGenerator, FullBatchLinkGenerator
from stellargraph import StellarGraph
from stellargraph.core.utils import PPNP_Aadj_feats_op
import networkx as nx
import pandas as pd
import numpy as np
from tensorflow import keras
import pytest
from ..test_utils.graphs import create_graph_features
from .. import test_utils
def test_PPNP_edge_cases():
G, features = create_graph_features()
adj = G.to_adjacency_matrix()
features, adj = PPNP_Aadj_feats_op(features, adj)
ppnp_sparse_failed = False
try:
generator = FullBatchNodeGenerator(G, sparse=True, method="ppnp")
except ValueError as e:
ppnp_sparse_failed = True
assert ppnp_sparse_failed
generator = FullBatchNodeGenerator(G, sparse=False, method="ppnp")
try:
ppnpModel = PPNP([2, 2], generator=generator, activations=["relu"], dropout=0.5)
except ValueError as e:
error = e
assert str(error) == "The number of layers should equal the number of activations"
try:
ppnpModel = PPNP([2], generator=[0, 1], activations=["relu"], dropout=0.5)
except TypeError as e:
error = e
assert str(error) == "Generator should be a instance of FullBatchNodeGenerator"
def test_PPNP_apply_dense():
G, features = create_graph_features()
adj = G.to_adjacency_matrix()
features, adj = PPNP_Aadj_feats_op(features, adj)
adj = adj[None, :, :]
generator = FullBatchNodeGenerator(G, sparse=False, method="ppnp")
ppnpModel = PPNP([2], generator=generator, activations=["relu"], dropout=0.5)
x_in, x_out = ppnpModel.in_out_tensors()
model = keras.Model(inputs=x_in, outputs=x_out)
# Check fit method
out_indices = np.array([[0, 1]], dtype="int32")
preds_1 = model.predict([features[None, :, :], out_indices, adj])
assert preds_1.shape == (1, 2, 2)
# Check fit method
preds_2 = model.predict(generator.flow(["a", "b"]))
assert preds_2.shape == (1, 2, 2)
assert preds_1 == pytest.approx(preds_2)
def test_PPNP_save_load(tmpdir):
G, _ = create_graph_features()
generator = FullBatchNodeGenerator(G, sparse=False)
ppnp = PPNP([2, 3], generator, ["relu", "relu"])
test_utils.model_save_load(tmpdir, ppnp)
| 2,904 | 32.390805 | 88 | py |
stellargraph | stellargraph-master/tests/layer/test_deep_graph_infomax.py | # -*- coding: utf-8 -*-
#
# Copyright 2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from stellargraph.layer import *
from stellargraph.mapper import *
from ..test_utils.graphs import example_graph_random
from .. import require_gpu, test_utils
import tensorflow as tf
import pytest
import numpy as np
def _model_data(model_type, sparse):
emb_dim = 16
sparse_support = (GCN, APPNP, GAT, RGCN)
if sparse and model_type not in sparse_support:
pytest.skip(f"{model_type.__name__} doesn't support/use sparse=True")
if model_type in (GCN, APPNP, GAT, PPNP):
G = example_graph_random()
generator = FullBatchNodeGenerator(G, sparse=sparse)
model = model_type(
generator=generator, activations=["relu"], layer_sizes=[emb_dim]
)
nodes = G.nodes()
elif model_type is GraphSAGE:
G = example_graph_random()
generator = GraphSAGENodeGenerator(G, batch_size=5, num_samples=[2, 3])
model = GraphSAGE(generator=generator, layer_sizes=[4, emb_dim])
nodes = G.nodes()
elif model_type is DirectedGraphSAGE:
G = example_graph_random(is_directed=True)
generator = DirectedGraphSAGENodeGenerator(
G, batch_size=5, in_samples=[2, 3], out_samples=[4, 1]
)
model = DirectedGraphSAGE(generator=generator, layer_sizes=[4, emb_dim])
nodes = G.nodes()
elif model_type is HinSAGE:
head_node_type = "n-1"
node_types = 2
G = example_graph_random(
{nt: nt + 3 for nt in range(node_types)},
node_types=node_types,
edge_types=2,
)
generator = HinSAGENodeGenerator(
G, batch_size=5, num_samples=[2, 2], head_node_type=head_node_type
)
model = HinSAGE(generator=generator, layer_sizes=[4, emb_dim])
nodes = G.nodes(node_type=head_node_type)
elif model_type is RGCN:
G = example_graph_random(10, edge_types=3)
generator = RelationalFullBatchNodeGenerator(G, sparse=sparse)
model = RGCN([4, emb_dim], generator)
nodes = G.nodes()
return generator, model, nodes
@pytest.mark.parametrize(
"model_type", [GCN, APPNP, GAT, PPNP, GraphSAGE, DirectedGraphSAGE, HinSAGE, RGCN]
)
@pytest.mark.parametrize("sparse", [False, True])
def test_dgi(model_type, sparse):
base_generator, base_model, nodes = _model_data(model_type, sparse)
corrupted_generator = CorruptedGenerator(base_generator)
gen = corrupted_generator.flow(nodes)
infomax = DeepGraphInfomax(base_model, corrupted_generator)
model = tf.keras.Model(*infomax.in_out_tensors())
model.compile(loss=tf.nn.sigmoid_cross_entropy_with_logits, optimizer="Adam")
model.fit(gen)
emb_model = tf.keras.Model(*base_model.in_out_tensors())
embeddings = emb_model.predict(base_generator.flow(nodes))
if isinstance(
base_generator, (FullBatchNodeGenerator, RelationalFullBatchNodeGenerator)
):
assert embeddings.shape == (1, len(nodes), 16)
else:
assert embeddings.shape == (len(nodes), 16)
@pytest.mark.skipif(require_gpu, reason="tf on GPU is non-deterministic")
def test_dgi_stateful():
G = example_graph_random()
emb_dim = 16
generator = FullBatchNodeGenerator(G)
corrupted_generator = CorruptedGenerator(generator)
gen = corrupted_generator.flow(G.nodes())
infomax = DeepGraphInfomax(
GCN(generator=generator, activations=["relu"], layer_sizes=[emb_dim]),
corrupted_generator,
)
model_1 = tf.keras.Model(*infomax.in_out_tensors())
model_2 = tf.keras.Model(*infomax.in_out_tensors())
# check embeddings are equal before training
embeddings_1 = tf.keras.Model(*infomax.embedding_model()).predict(
generator.flow(G.nodes())
)
embeddings_2 = tf.keras.Model(*infomax.embedding_model()).predict(
generator.flow(G.nodes())
)
np.testing.assert_array_equal(embeddings_1, embeddings_2)
model_1.compile(loss=tf.nn.sigmoid_cross_entropy_with_logits, optimizer="Adam")
model_1.fit(gen)
# check embeddings are still equal after training one model
embeddings_1 = tf.keras.Model(*infomax.embedding_model()).predict(
generator.flow(G.nodes())
)
embeddings_2 = tf.keras.Model(*infomax.embedding_model()).predict(
generator.flow(G.nodes())
)
np.testing.assert_array_equal(embeddings_1, embeddings_2)
model_2.compile(loss=tf.nn.sigmoid_cross_entropy_with_logits, optimizer="Adam")
model_2.fit(gen)
# check embeddings are still equal after training both models
embeddings_1 = tf.keras.Model(*infomax.embedding_model()).predict(
generator.flow(G.nodes())
)
embeddings_2 = tf.keras.Model(*infomax.embedding_model()).predict(
generator.flow(G.nodes())
)
np.testing.assert_array_equal(embeddings_1, embeddings_2)
def test_dgi_deprecated_no_generator():
G = example_graph_random()
generator = FullBatchNodeGenerator(G)
with pytest.warns(
DeprecationWarning, match="The 'corrupted_generator' parameter should be set"
):
DeepGraphInfomax(
GCN(generator=generator, activations=["relu"], layer_sizes=[4]),
)
@pytest.mark.parametrize("model_type", [GCN, GraphSAGE])
def test_dgi_save_load(tmpdir, model_type):
base_generator, base_model, nodes = _model_data(model_type, sparse=False)
corrupted_generator = CorruptedGenerator(base_generator)
infomax = DeepGraphInfomax(base_model, corrupted_generator)
test_utils.model_save_load(tmpdir, infomax)
| 6,120 | 34.178161 | 86 | py |
stellargraph | stellargraph-master/tests/layer/test_misc.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
GCN tests
"""
import tensorflow as tf
from tensorflow import keras
import numpy as np
import scipy.sparse as sps
import pytest
from stellargraph.layer.misc import *
from ..test_utils.graphs import create_graph_features, example_graph
from stellargraph.mapper import *
from stellargraph.layer import *
def sparse_matrix_example(N=10, density=0.1):
A = sps.rand(N, N, density=density, format="coo")
A_indices = np.hstack((A.row[:, None], A.col[:, None]))
A_values = A.data
return A_indices, A_values, A
def test_squeezedsparseconversion():
N = 10
x_t = keras.Input(batch_shape=(1, N, 1), dtype="float32")
A_ind = keras.Input(batch_shape=(1, None, 2), dtype="int64")
A_val = keras.Input(batch_shape=(1, None), dtype="float32")
A_mat = SqueezedSparseConversion(shape=(N, N), dtype=A_val.dtype)([A_ind, A_val])
x_out = keras.layers.Lambda(
lambda xin: K.expand_dims(K.dot(xin[0], K.squeeze(xin[1], 0)), 0)
)([A_mat, x_t])
model = keras.Model(inputs=[x_t, A_ind, A_val], outputs=x_out)
x = np.random.randn(1, N, 1)
A_indices, A_values, A = sparse_matrix_example(N)
z = model.predict([x, np.expand_dims(A_indices, 0), np.expand_dims(A_values, 0)])
np.testing.assert_allclose(z.squeeze(), A.dot(x.squeeze()), atol=1e-7, rtol=1e-5)
def test_squeezedsparseconversion_dtype():
N = 10
x_t = keras.Input(batch_shape=(1, N, 1), dtype="float64")
A_ind = keras.Input(batch_shape=(1, None, 2), dtype="int64")
A_val = keras.Input(batch_shape=(1, None), dtype="float32")
A_mat = SqueezedSparseConversion(shape=(N, N), dtype="float64")([A_ind, A_val])
x_out = keras.layers.Lambda(
lambda xin: K.expand_dims(K.dot(xin[0], K.squeeze(xin[1], 0)), 0)
)([A_mat, x_t])
model = keras.Model(inputs=[x_t, A_ind, A_val], outputs=x_out)
x = np.random.randn(1, N, 1)
A_indices, A_values, A = sparse_matrix_example(N)
z = model.predict([x, np.expand_dims(A_indices, 0), np.expand_dims(A_values, 0)])
assert A_mat.dtype == tf.dtypes.float64
np.testing.assert_allclose(z.squeeze(), A.dot(x.squeeze()), atol=1e-7, rtol=1e-5)
def test_squeezedsparseconversion_axis():
N = 10
A_indices, A_values, A = sparse_matrix_example(N)
nnz = len(A_indices)
A_ind = keras.Input(batch_shape=(nnz, 2), dtype="int64")
A_val = keras.Input(batch_shape=(nnz, 1), dtype="float32")
# Keras reshapes everything to have ndim at least 2, we need to flatten values
A_val_1 = keras.layers.Lambda(lambda A: K.reshape(A, (-1,)))(A_val)
A_mat = SqueezedSparseConversion(shape=(N, N), axis=None, dtype=A_val_1.dtype)(
[A_ind, A_val_1]
)
ones = tf.ones((N, 1))
x_out = keras.layers.Lambda(lambda xin: K.dot(xin, ones))(A_mat)
model = keras.Model(inputs=[A_ind, A_val], outputs=x_out)
z = model.predict([A_indices, A_values])
np.testing.assert_allclose(z, A.sum(axis=1), atol=1e-7, rtol=1e-5)
def test_gather_indices():
batch_dim = 3
data_in = keras.Input(batch_shape=(batch_dim, 5, 7))
indices_in = keras.Input(batch_shape=(batch_dim, 11), dtype="int32")
data = np.arange(np.product(data_in.shape)).reshape(data_in.shape)
indices = np.random.choice(range(min(data_in.shape)), indices_in.shape)
# check that the layer acts the same as tf.gather
def run(**kwargs):
layer = GatherIndices(**kwargs)
expected = tf.gather(data, indices, **kwargs)
out = GatherIndices(**kwargs)([data_in, indices_in])
model = keras.Model(inputs=[data_in, indices_in], outputs=out)
pred = model.predict([data, indices])
np.testing.assert_array_equal(pred, expected)
# default settings
run()
# with a batch dimension
run(batch_dims=1)
# various other forms...
run(axis=1)
run(batch_dims=1, axis=2)
def _deprecated_test(sg_model):
with pytest.warns(DeprecationWarning):
x_in, x_out = sg_model.build()
try:
if type(sg_model) is not RGCN:
x_in, x_out = sg_model._node_model()
with pytest.warns(DeprecationWarning):
x_in, x_out = sg_model.node_model()
except AttributeError:
pass
try:
x_in, x_out = sg_model._link_model()
with pytest.warns(DeprecationWarning):
x_in, x_out = sg_model.link_model()
except AttributeError:
pass
def test_deprecated_model_functions():
G, _ = create_graph_features()
# full batch models
generator = FullBatchNodeGenerator(G)
for model_type in [GCN, GAT, PPNP, APPNP]:
sg_model = model_type(
generator=generator, layer_sizes=[4], activations=["relu"]
)
_deprecated_test(sg_model)
# test DeepGraphInfomax here because it needs a fullbatch model
sg_model = DeepGraphInfomax(sg_model)
_deprecated_test(sg_model)
# models with layer_sizes and activations args
generators = [
ClusterNodeGenerator(G),
HinSAGENodeGenerator(
G, batch_size=1, num_samples=[2], head_node_type="default"
),
GraphSAGENodeGenerator(G, batch_size=1, num_samples=[2]),
RelationalFullBatchNodeGenerator(G),
PaddedGraphGenerator([G]),
]
model_types = [
ClusterGCN,
HinSAGE,
GraphSAGE,
RGCN,
GCNSupervisedGraphClassification,
]
for generator, model_type in zip(generators, model_types):
sg_model = model_type(
layer_sizes=[2], activations=["relu"], generator=generator
)
_deprecated_test(sg_model)
# models with embedding_dimension arg
model_types = [WatchYourStep, DistMult, ComplEx, Attri2Vec]
generators = [
AdjacencyPowerGenerator(G),
KGTripleGenerator(G, batch_size=1),
KGTripleGenerator(G, batch_size=1),
]
for generator, model_type in zip(generators, model_types):
sg_model = model_type(generator=generator, embedding_dimension=2)
_deprecated_test(sg_model)
# outlier models that need to be treated separately
generator = Attri2VecLinkGenerator(G, batch_size=1)
sg_model = Attri2Vec(generator=generator, layer_sizes=[4], activation="sigmoid")
_deprecated_test(sg_model)
G = example_graph(feature_size=1, is_directed=True)
generator = DirectedGraphSAGENodeGenerator(
G, batch_size=1, in_samples=[2], out_samples=[2]
)
sg_model = DirectedGraphSAGE(
generator=generator, layer_sizes=[4], activations=["relu"]
)
_deprecated_test(sg_model)
| 7,161 | 30.973214 | 85 | py |
stellargraph | stellargraph-master/tests/layer/test_gcn_lstm.py | # -*- coding: utf-8 -*-
#
# Copyright 2019-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import pytest
from tensorflow.keras import Model
from stellargraph import StellarGraph, IndexedArray
from stellargraph.layer import GCN_LSTM
from stellargraph.layer import FixedAdjacencyGraphConvolution
from stellargraph.mapper import SlidingFeaturesNodeGenerator
from .. import test_utils
def get_timeseries_graph_data():
featuresX = np.random.rand(10, 5, 4)
featuresY = np.random.rand(10, 5)
adj = np.random.randint(0, 5, size=(5, 5))
return featuresX, featuresY, adj
def test_GraphConvolution_config():
_, _, a = get_timeseries_graph_data()
gc_layer = FixedAdjacencyGraphConvolution(units=10, A=a, activation="relu")
conf = gc_layer.get_config()
assert conf["units"] == 10
assert conf["activation"] == "relu"
assert conf["use_bias"] == True
assert conf["kernel_initializer"]["class_name"] == "GlorotUniform"
assert conf["bias_initializer"]["class_name"] == "Zeros"
assert conf["kernel_regularizer"] == None
assert conf["bias_regularizer"] == None
assert conf["kernel_constraint"] == None
assert conf["bias_constraint"] == None
def test_gcn_lstm_model_parameters():
fx, fy, a = get_timeseries_graph_data()
gcn_lstm_model = GCN_LSTM(
seq_len=fx.shape[-2],
adj=a,
gc_layer_sizes=[2, 2],
gc_activations=["relu", "relu"],
lstm_layer_sizes=[10],
lstm_activations=["tanh"],
)
assert gcn_lstm_model.gc_activations == ["relu", "relu"]
assert gcn_lstm_model.dropout == 0.5
assert gcn_lstm_model.lstm_activations == ["tanh"]
assert gcn_lstm_model.lstm_layer_sizes == [10]
assert len(gcn_lstm_model.lstm_layer_sizes) == len(gcn_lstm_model.lstm_activations)
def test_gcn_lstm_activations():
fx, fy, a = get_timeseries_graph_data()
gcn_lstm_model = GCN_LSTM(
seq_len=fx.shape[-2],
adj=a,
gc_layer_sizes=[10, 10, 10, 10, 10],
lstm_layer_sizes=[8, 16, 32, 64],
)
# check when no activations provided, defaults to 'relu' and 'tanh' for gc and lstm respectively
assert gcn_lstm_model.gc_activations == ["relu", "relu", "relu", "relu", "relu"]
assert gcn_lstm_model.lstm_activations == ["tanh", "tanh", "tanh", "tanh"]
gcn_lstm_model = GCN_LSTM(
seq_len=fx.shape[-2],
adj=a,
gc_layer_sizes=[10],
gc_activations=["relu"],
lstm_layer_sizes=[8, 16, 32, 64],
)
assert gcn_lstm_model.lstm_activations == ["tanh", "tanh", "tanh", "tanh"]
def test_lstm_return_sequences():
fx, fy, a = get_timeseries_graph_data()
gcn_lstm_model = GCN_LSTM(
seq_len=fx.shape[-2],
adj=a,
gc_layer_sizes=[16, 16, 16],
gc_activations=["relu", "relu", "relu"],
lstm_layer_sizes=[8, 16, 32],
lstm_activations=["tanh"],
)
for layer in gcn_lstm_model._lstm_layers[:-1]:
assert layer.return_sequences == True
assert gcn_lstm_model._lstm_layers[-1].return_sequences == False
def test_gcn_lstm_layers():
fx, fy, a = get_timeseries_graph_data()
gcn_lstm_model = GCN_LSTM(
seq_len=fx.shape[-2],
adj=a,
gc_layer_sizes=[8, 8, 16],
gc_activations=["relu", "relu", "relu"],
lstm_layer_sizes=[8, 16, 32],
lstm_activations=["tanh"],
)
# check number of layers for gc and lstm
assert len(gcn_lstm_model._gc_layers) == len(gcn_lstm_model.gc_layer_sizes)
assert len(gcn_lstm_model._lstm_layers) == len(gcn_lstm_model.lstm_layer_sizes)
def test_gcn_lstm_model_input_output():
fx, fy, a = get_timeseries_graph_data()
gcn_lstm_model = GCN_LSTM(
seq_len=fx.shape[-1],
adj=a,
gc_layer_sizes=[8, 8, 16],
gc_activations=["relu", "relu", "relu"],
lstm_layer_sizes=[8, 16, 32],
lstm_activations=["tanh"],
)
# check model input and output tensors
x_input, x_output = gcn_lstm_model.in_out_tensors()
assert x_input.shape[1] == fx.shape[1]
assert x_input.shape[2] == fx.shape[2]
assert x_output.shape[1] == fx.shape[-2]
def test_gcn_lstm_model():
fx, fy, a = get_timeseries_graph_data()
gcn_lstm_model = GCN_LSTM(
seq_len=fx.shape[-1],
adj=a,
gc_layer_sizes=[8, 8, 16],
gc_activations=["relu", "relu", "relu"],
lstm_layer_sizes=[8, 16, 32],
lstm_activations=["tanh"],
)
x_input, x_output = gcn_lstm_model.in_out_tensors()
model = Model(inputs=x_input, outputs=x_output)
model.compile(optimizer="adam", loss="mae", metrics=["mse"])
# check model training
history = model.fit(fx, fy, epochs=5, batch_size=2, shuffle=True, verbose=0)
assert history.params["epochs"] == 5
assert len(history.history["loss"]) == 5
def test_gcn_lstm_model_prediction():
fx, fy, a = get_timeseries_graph_data()
gcn_lstm_model = GCN_LSTM(
seq_len=fx.shape[-1],
adj=a,
gc_layer_sizes=[8, 8, 16],
gc_activations=["relu", "relu", "relu"],
lstm_layer_sizes=[8, 16, 32],
lstm_activations=["tanh"],
)
x_input, x_output = gcn_lstm_model.in_out_tensors()
model = Model(inputs=x_input, outputs=x_output)
test_sample = np.random.rand(1, 5, 4)
pred = model.predict(test_sample)
# check 1 prediction for each node
assert pred.shape == (1, 5)
@pytest.fixture(params=["univariate", "multivariate"])
def arange_graph(request):
shape = (3, 7, 11) if request.param == "multivariate" else (3, 7)
total_elems = np.product(shape)
nodes = IndexedArray(
np.arange(total_elems).reshape(shape) / total_elems, index=["a", "b", "c"]
)
edges = pd.DataFrame({"source": ["a", "b"], "target": ["b", "c"]})
return StellarGraph(nodes, edges)
def test_gcn_lstm_generator(arange_graph):
gen = SlidingFeaturesNodeGenerator(arange_graph, 2, batch_size=3)
gcn_lstm = GCN_LSTM(None, None, [2], [4], generator=gen)
model = Model(*gcn_lstm.in_out_tensors())
model.compile("adam", loss="mse")
history = model.fit(gen.flow(slice(0, 5), target_distance=1))
predictions = model.predict(gen.flow(slice(5, 7)))
model2 = Model(*gcn_lstm.in_out_tensors())
predictions2 = model2.predict(gen.flow(slice(5, 7)))
np.testing.assert_array_equal(predictions, predictions2)
@pytest.mark.xfail(reason="FIXME #1681")
def test_gcn_lstm_save_load(tmpdir, arange_graph):
gen = SlidingFeaturesNodeGenerator(arange_graph, 2, batch_size=3)
gcn_lstm = GCN_LSTM(None, None, [2], [4], generator=gen)
test_utils.model_save_load(tmpdir, gcn_lstm)
| 7,206 | 31.031111 | 100 | py |
stellargraph | stellargraph-master/tests/layer/test_hinsage.py | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
HinSAGE tests
"""
import pytest
import numpy as np
import networkx as nx
from tensorflow import keras
from tensorflow.keras import regularizers
from stellargraph import StellarGraph
from stellargraph.layer.hinsage import *
from stellargraph.mapper import *
from ..test_utils.graphs import example_hin_1
from .. import test_utils
def test_mean_hin_agg_constructor():
agg = MeanHinAggregator(output_dim=2)
assert agg.output_dim == 2
assert agg.half_output_dim == 1
assert not agg.has_bias
assert agg.act.__name__ == "relu"
def test_mean_hin_agg_constructor_1():
agg = MeanHinAggregator(output_dim=2, bias=True, act=lambda x: x + 1)
assert agg.output_dim == 2
assert agg.half_output_dim == 1
assert agg.has_bias
assert agg.act(2) == 3
# Check config
config = agg.get_config()
assert config["output_dim"] == 2
assert config["bias"] == True
assert config["act"] == "<lambda>"
agg = MeanHinAggregator(output_dim=2, bias=True, act="relu")
assert agg.output_dim == 2
assert agg.half_output_dim == 1
assert agg.has_bias
# Check config
config = agg.get_config()
assert config["output_dim"] == 2
assert config["bias"] == True
assert config["act"] == "relu"
def test_mean_hin_agg_apply():
agg = MeanHinAggregator(2, act=lambda z: z, kernel_initializer="ones")
inp = [
keras.Input(shape=(1, 2)),
keras.Input(shape=(1, 2, 2)),
keras.Input(shape=(1, 2, 4)),
]
out = agg(inp)
model = keras.Model(inputs=inp, outputs=out)
x = [
np.array([[[1, 1]]]),
np.array([[[[2, 2], [2, 2]]]]),
np.array([[[[3, 3, 3, 3], [3, 3, 3, 3]]]]),
]
actual = model.predict(x)
expected = np.array([[[2, 8]]])
assert actual == pytest.approx(expected)
def test_mean_hin_agg_apply_2():
agg1 = MeanHinAggregator(2, act=lambda z: z, kernel_initializer="ones")
agg2 = MeanHinAggregator(2, act=lambda z: z + 1, kernel_initializer="ones")
inp = [
keras.Input(shape=(1, 2)),
keras.Input(shape=(1, 2, 2)),
keras.Input(shape=(1, 2, 4)),
]
out1 = agg1(inp, name="test")
out2 = agg2(inp, name="test")
model = keras.Model(inputs=inp, outputs=[out1, out2])
x = [
np.array([[[1, 1]]]),
np.array([[[[2, 2], [2, 2]]]]),
np.array([[[[3, 3, 3, 3], [3, 3, 3, 3]]]]),
]
actual = model.predict(x)
expected = [np.array([[[2, 8]]]), np.array([[[3, 9]]])]
assert all(a == pytest.approx(e) for a, e in zip(actual, expected))
def test_mean_hin_zero_neighbours():
agg = MeanHinAggregator(2, bias=False, act=lambda z: z, kernel_initializer="ones")
inp = [
keras.Input(shape=(1, 2)),
keras.Input(shape=(1, 0, 2)),
keras.Input(shape=(1, 0, 4)),
]
out = agg(inp)
# Check weights added only for first input
assert all(w is None for w in agg.w_neigh)
model = keras.Model(inputs=inp, outputs=out)
x = [np.array([[[1, 1]]]), np.zeros((1, 1, 0, 2)), np.zeros((1, 1, 0, 4))]
actual = model.predict(x)
expected = np.array([[[2, 0]]])
assert actual == pytest.approx(expected)
def test_hinsage_constructor():
hs = HinSAGE(
layer_sizes=[{"1": 2, "2": 2}, {"1": 2}],
n_samples=[2, 2],
input_neighbor_tree=[
("1", [1, 2]),
("1", [3, 4]),
("2", [5]),
("1", []),
("2", []),
("2", []),
],
multiplicity=1,
input_dim={"1": 2, "2": 2},
)
assert hs.n_layers == 2
assert hs.n_samples == [2, 2]
assert hs.bias
hs = HinSAGE(
layer_sizes=[2, 2],
n_samples=[2, 2],
input_neighbor_tree=[
("1", [1, 2]),
("1", [3, 4]),
("2", [5]),
("1", []),
("2", []),
("2", []),
],
multiplicity=1,
input_dim={"1": 2, "2": 2},
)
assert hs.n_layers == 2
assert hs.n_samples == [2, 2]
assert hs.bias
def test_hinsage_constructor_with_agg():
hs = HinSAGE(
layer_sizes=[{"1": 2, "2": 2}, {"1": 2}],
n_samples=[2, 2],
input_neighbor_tree=[
("1", [1, 2]),
("1", [3, 4]),
("2", [5]),
("1", []),
("2", []),
("2", []),
],
multiplicity=1,
input_dim={"1": 2, "2": 2},
aggregator=MeanHinAggregator,
)
assert hs.n_layers == 2
assert hs.n_samples == [2, 2]
assert hs.bias
def test_hinsage_input_shapes():
hs = HinSAGE(
layer_sizes=[{"1": 2, "2": 2}, 2],
n_samples=[2, 2],
input_neighbor_tree=[
("1", [1, 2]),
("1", [3, 4]),
("2", [5]),
("1", []),
("2", []),
("2", []),
],
multiplicity=1,
input_dim={"1": 2, "2": 4},
)
assert hs._input_shapes() == [(1, 2), (2, 2), (2, 4), (4, 2), (4, 4), (4, 4)]
def test_hinsage_constructor_wrong_normalisation():
with pytest.raises(ValueError):
hs = HinSAGE(
layer_sizes=[{"1": 2, "2": 2}, {"1": 2}],
n_samples=[2, 2],
input_neighbor_tree=[
("1", [1, 2]),
("1", [3, 4]),
("2", [5]),
("1", []),
("2", []),
("2", []),
],
multiplicity=1,
input_dim={"1": 2, "2": 2},
normalize="unknown",
)
def test_hinsage_apply():
hs = HinSAGE(
layer_sizes=[{"1": 2, "2": 2}, 2],
n_samples=[2, 2],
input_neighbor_tree=[
("1", [1, 2]),
("1", [3, 4]),
("2", [5]),
("1", []),
("2", []),
("2", []),
],
multiplicity=1,
input_dim={"1": 2, "2": 4},
normalize="none",
kernel_initializer="ones",
)
inp = [
keras.Input(shape=(1, 2)),
keras.Input(shape=(2, 2)),
keras.Input(shape=(2, 4)),
keras.Input(shape=(4, 2)),
keras.Input(shape=(4, 4)),
keras.Input(shape=(4, 4)),
]
out = hs(inp)
model = keras.Model(inputs=inp, outputs=out)
x = [
np.array([[[1, 1]]]),
np.array([[[2, 2], [2, 2]]]),
np.array([[[4, 4, 4, 4], [4, 4, 4, 4]]]),
np.array([[[3, 3], [3, 3], [3, 3], [3, 3]]]),
np.array([[[6, 6, 6, 6], [6, 6, 6, 6], [6, 6, 6, 6], [6, 6, 6, 6]]]),
np.array([[[9, 9, 9, 9], [9, 9, 9, 9], [9, 9, 9, 9], [9, 9, 9, 9]]]),
]
actual = model.predict(x)
expected = np.array([[12, 35.5]])
assert actual == pytest.approx(expected)
def test_hinsage_in_out_tensors():
hs = HinSAGE(
layer_sizes=[2, 2],
n_samples=[2, 2],
input_neighbor_tree=[
("1", [1, 2]),
("1", [3, 4]),
("2", [5]),
("1", []),
("2", []),
("2", []),
],
multiplicity=1,
input_dim={"1": 2, "2": 4},
normalize="none",
kernel_initializer="ones",
)
xin, xout = hs.in_out_tensors()
model = keras.Model(inputs=xin, outputs=xout)
x = [
np.array([[[1, 1]]]),
np.array([[[2, 2], [2, 2]]]),
np.array([[[4, 4, 4, 4], [4, 4, 4, 4]]]),
np.array([[[3, 3], [3, 3], [3, 3], [3, 3]]]),
np.array([[[6, 6, 6, 6], [6, 6, 6, 6], [6, 6, 6, 6], [6, 6, 6, 6]]]),
np.array([[[9, 9, 9, 9], [9, 9, 9, 9], [9, 9, 9, 9], [9, 9, 9, 9]]]),
]
actual = model.predict(x)
expected = np.array([[12, 35.5]])
assert actual == pytest.approx(expected)
def test_hinsage_serialize():
hs = HinSAGE(
layer_sizes=[2, 2],
n_samples=[2, 2],
input_neighbor_tree=[
("1", [1, 2]),
("1", [3, 4]),
("2", [5]),
("1", []),
("2", []),
("2", []),
],
multiplicity=1,
input_dim={"1": 2, "2": 4},
normalize="none",
bias=False,
)
xin, xout = hs.in_out_tensors()
model = keras.Model(inputs=xin, outputs=xout)
# Save model
model_json = model.to_json()
# Set all weights to one
model_weights = [np.ones_like(w) for w in model.get_weights()]
# Load model from json & set all weights
model2 = keras.models.model_from_json(
model_json, custom_objects={"MeanHinAggregator": MeanHinAggregator}
)
model2.set_weights(model_weights)
# Test loaded model
x = [
np.array([[[1, 1]]]),
np.array([[[2, 2], [2, 2]]]),
np.array([[[4, 4, 4, 4], [4, 4, 4, 4]]]),
np.array([[[3, 3], [3, 3], [3, 3], [3, 3]]]),
np.array([[[6, 6, 6, 6], [6, 6, 6, 6], [6, 6, 6, 6], [6, 6, 6, 6]]]),
np.array([[[9, 9, 9, 9], [9, 9, 9, 9], [9, 9, 9, 9], [9, 9, 9, 9]]]),
]
actual = model2.predict(x)
expected = np.array([[12, 35.5]])
assert actual == pytest.approx(expected)
def test_hinsage_zero_neighbours():
hs = HinSAGE(
layer_sizes=[2, 2],
n_samples=[0, 0],
input_neighbor_tree=[
("1", [1, 2]),
("1", [3, 4]),
("2", [5]),
("1", []),
("2", []),
("2", []),
],
multiplicity=1,
input_dim={"1": 2, "2": 4},
normalize="none",
kernel_initializer="ones",
)
xin, xout = hs.in_out_tensors()
model = keras.Model(inputs=xin, outputs=xout)
x = [
np.array([[[1.5, 1]]]),
np.zeros((1, 0, 2)),
np.zeros((1, 0, 4)),
np.zeros((1, 0, 2)),
np.zeros((1, 0, 4)),
np.zeros((1, 0, 4)),
]
actual = model.predict(x)
expected = np.array([[2.5, 0]])
assert actual == pytest.approx(expected)
def test_hinsage_aggregators():
hs = HinSAGE(
layer_sizes=[2, 2],
n_samples=[2, 2],
input_neighbor_tree=[
("1", [1, 2]),
("1", [3, 4]),
("2", [5]),
("1", []),
("2", []),
("2", []),
],
multiplicity=1,
input_dim={"1": 2, "2": 4},
aggregator=MeanHinAggregator,
normalize="none",
kernel_initializer="ones",
)
xin, xout = hs.in_out_tensors()
model = keras.Model(inputs=xin, outputs=xout)
x = [
np.array([[[1, 1]]]),
np.array([[[2, 2], [2, 2]]]),
np.array([[[4, 4, 4, 4], [4, 4, 4, 4]]]),
np.array([[[3, 3], [3, 3], [3, 3], [3, 3]]]),
np.array([[[6, 6, 6, 6], [6, 6, 6, 6], [6, 6, 6, 6], [6, 6, 6, 6]]]),
np.array([[[9, 9, 9, 9], [9, 9, 9, 9], [9, 9, 9, 9], [9, 9, 9, 9]]]),
]
actual = model.predict(x)
expected = np.array([[12, 35.5]])
assert actual == pytest.approx(expected)
def test_hinsage_passing_activations():
hs = HinSAGE(
layer_sizes=[2, 2],
n_samples=[2, 2],
input_neighbor_tree=[
("1", [1, 2]),
("1", [3, 4]),
("2", [5]),
("1", []),
("2", []),
("2", []),
],
multiplicity=1,
input_dim={"1": 2, "2": 2},
)
assert hs.activations == ["relu", "linear"]
with pytest.raises(ValueError):
hs = HinSAGE(
layer_sizes=[2, 2],
n_samples=[2, 2],
input_neighbor_tree=[
("1", [1, 2]),
("1", [3, 4]),
("2", [5]),
("1", []),
("2", []),
("2", []),
],
multiplicity=1,
input_dim={"1": 2, "2": 2},
activations=["fred", "wilma"],
)
with pytest.raises(ValueError):
hs = HinSAGE(
layer_sizes=[2, 2],
n_samples=[2, 2],
input_neighbor_tree=[
("1", [1, 2]),
("1", [3, 4]),
("2", [5]),
("1", []),
("2", []),
("2", []),
],
input_dim={"1": 2, "2": 2},
multiplicity=1,
activations=["relu"],
)
hs = HinSAGE(
layer_sizes=[2, 2],
n_samples=[2, 2],
input_neighbor_tree=[
("1", [1, 2]),
("1", [3, 4]),
("2", [5]),
("1", []),
("2", []),
("2", []),
],
input_dim={"1": 2, "2": 2},
multiplicity=1,
activations=["linear"] * 2,
)
assert hs.activations == ["linear"] * 2
def test_hinsage_regularisers():
hs = HinSAGE(
layer_sizes=[2, 2],
n_samples=[2, 2],
input_neighbor_tree=[
("1", [1, 2]),
("1", [3, 4]),
("2", [5]),
("1", []),
("2", []),
("2", []),
],
input_dim={"1": 2, "2": 4},
multiplicity=1,
normalize="none",
kernel_initializer="ones",
kernel_regularizer=regularizers.l2(0.01),
)
with pytest.raises(ValueError):
hs = HinSAGE(
layer_sizes=[2, 2],
n_samples=[2, 2],
input_neighbor_tree=[
("1", [1, 2]),
("1", [3, 4]),
("2", [5]),
("1", []),
("2", []),
("2", []),
],
input_dim={"1": 2, "2": 4},
multiplicity=1,
normalize="none",
kernel_initializer="ones",
kernel_regularizer="fred",
)
def test_hinsage_unitary_layer_size():
with pytest.raises(ValueError):
hs = HinSAGE(
layer_sizes=[2, 1],
n_samples=[2, 2],
input_neighbor_tree=[
("1", [1, 2]),
("1", [3, 4]),
("2", [5]),
("1", []),
("2", []),
("2", []),
],
input_dim={"1": 2, "2": 4},
multiplicity=1,
normalize="none",
kernel_initializer="ones",
)
def test_hinsage_from_generator():
G = example_hin_1({"A": 8, "B": 4})
gen = HinSAGENodeGenerator(G, 1, [2, 2], "A")
hs = HinSAGE(
layer_sizes=[2, 2],
generator=gen,
normalize="none",
kernel_initializer="ones",
activations=["relu", "relu"],
)
xin, xout = hs.in_out_tensors()
model = keras.Model(inputs=xin, outputs=xout)
batch_feats = list(gen.flow([1, 2]))
# manually calculate the output of HinSage. All kernels are tensors of 1s
# the prediction nodes are type "A" : "A" nodes only have "B" neighbours, while "B" nodes have both "A" and "B"
# neighbours.
def transform_neighbours(neighs, dim):
return np.expand_dims(
neighs.reshape(1, dim, int(neighs.shape[1] / dim), neighs.shape[2]).sum(
axis=-1
),
-1,
).mean(2)
def hinsage_layer(head, neighs_by_type):
head_trans = np.expand_dims(head.sum(axis=-1), -1)
neigh_trans = sum(
transform_neighbours(neigh, head.shape[1]) for neigh in neighs_by_type
) / len(neighs_by_type)
return np.concatenate([head_trans, neigh_trans], axis=-1)
for i, feats in enumerate(batch_feats):
# 1st layer
# aggregate for the prediction node
layer_1_out = []
head = feats[0][0]
B_neighs = feats[0][1]
layer_1_out.append(hinsage_layer(head, [B_neighs,]))
# 1st layer
# aggregate for the neighbour nodes
head = feats[0][1]
B_neighs = feats[0][2]
A_neighs = feats[0][3]
layer_1_out.append(hinsage_layer(head, [B_neighs, A_neighs]))
# 2nd layer
# aggregate for the prediction nodes
layer_2_out = []
head = layer_1_out[0]
B_neighs = layer_1_out[1]
layer_2_out.append(hinsage_layer(head, [B_neighs,]))
actual = model.predict(batch_feats[i][0])
assert np.isclose(layer_2_out[0], actual).all()
def test_kernel_and_bias_defaults():
G = example_hin_1({"A": 8, "B": 4})
gen = HinSAGENodeGenerator(G, 1, [2, 2], "A")
hs = HinSAGE(
layer_sizes=[2, 2],
generator=gen,
normalize="none",
activations=["relu", "relu"],
)
for layer_dict in hs._aggs:
for layer in layer_dict.values():
assert isinstance(layer.kernel_initializer, tf.initializers.GlorotUniform)
assert isinstance(layer.bias_initializer, tf.initializers.Zeros)
assert layer.kernel_regularizer is None
assert layer.bias_regularizer is None
assert layer.kernel_constraint is None
assert layer.bias_constraint is None
def test_hinsage_save_load(tmpdir):
G = example_hin_1({"A": 8, "B": 4})
gen = HinSAGENodeGenerator(G, 1, [2, 2], "A")
hs = HinSAGE(
layer_sizes=[2, 2],
generator=gen,
normalize="none",
activations=["relu", "relu"],
)
test_utils.model_save_load(tmpdir, hs)
| 17,737 | 26.122324 | 116 | py |
stellargraph | stellargraph-master/tests/layer/test_watch_your_step.py | # -*- coding: utf-8 -*-
#
# Copyright 2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from stellargraph.layer import AttentiveWalk, WatchYourStep
import numpy as np
from ..test_utils.graphs import barbell
from stellargraph.mapper import AdjacencyPowerGenerator
from stellargraph.losses import graph_log_likelihood
import pytest
from tensorflow.keras import Model
from .. import test_utils
def test_AttentiveWalk_config():
att_wlk = AttentiveWalk(walk_length=10)
conf = att_wlk.get_config()
assert conf["walk_length"] == 10
assert conf["attention_initializer"]["class_name"] == "GlorotUniform"
assert conf["attention_regularizer"] is None
assert conf["attention_constraint"] is None
def test_AttentiveWalk():
random_partial_powers = np.random.random((2, 5, 31))
att_wlk = AttentiveWalk(walk_length=5, attention_initializer="ones")
output = att_wlk(random_partial_powers).numpy()
np.testing.assert_allclose(
output, random_partial_powers.mean(axis=1), rtol=1e-5, atol=1e-8
)
def test_WatchYourStep_init(barbell):
generator = AdjacencyPowerGenerator(barbell, num_powers=5)
wys = WatchYourStep(generator)
assert wys.num_powers == 5
assert wys.n_nodes == len(barbell.nodes())
assert wys.num_walks == 80
assert wys.embedding_dimension == 64
def test_WatchYourStep_bad_init(barbell):
generator = AdjacencyPowerGenerator(barbell, num_powers=5)
with pytest.raises(TypeError, match="num_walks: expected.* found float"):
wys = WatchYourStep(generator, num_walks=10.0)
with pytest.raises(ValueError, match="num_walks: expected.* found 0"):
wys = WatchYourStep(generator, num_walks=0)
with pytest.raises(TypeError, match="embedding_dimension: expected.* found float"):
wys = WatchYourStep(generator, embedding_dimension=10.0)
with pytest.raises(ValueError, match="embedding_dimension: expected.* found 1"):
wys = WatchYourStep(generator, embedding_dimension=1)
@pytest.mark.parametrize("weighted", [False, True])
def test_WatchYourStep(barbell, weighted):
generator = AdjacencyPowerGenerator(barbell, num_powers=5, weighted=weighted)
gen = generator.flow(batch_size=4)
wys = WatchYourStep(generator)
x_in, x_out = wys.in_out_tensors()
model = Model(inputs=x_in, outputs=x_out)
model.compile(optimizer="adam", loss=graph_log_likelihood)
model.fit(gen, epochs=1, steps_per_epoch=int(len(barbell.nodes()) // 4))
embs = wys.embeddings()
assert embs.shape == (len(barbell.nodes()), wys.embedding_dimension)
# build() should always return tensors backed by the same trainable weights, and thus give the
# same predictions
preds1 = model.predict(gen, steps=8)
preds2 = Model(*wys.in_out_tensors()).predict(gen, steps=8)
np.testing.assert_array_equal(preds1, preds2)
def test_WatchYourStep_embeddings(barbell):
generator = AdjacencyPowerGenerator(barbell, num_powers=5)
wys = WatchYourStep(generator, embeddings_initializer="ones")
x_in, x_out = wys.in_out_tensors()
model = Model(inputs=x_in, outputs=x_out)
model.compile(optimizer="adam", loss=graph_log_likelihood)
embs = wys.embeddings()
assert (embs == 1).all()
def test_WatchYourStep_save_load(tmpdir, barbell):
generator = AdjacencyPowerGenerator(barbell, num_powers=5)
wys = WatchYourStep(generator)
test_utils.model_save_load(tmpdir, wys)
| 3,948 | 33.33913 | 98 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.