repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
ResiDualGAN-DRDG | ResiDualGAN-DRDG-main/core/datasets/dual_dataset.py | import torch.utils.data as D
import random
from PIL import Image
import numpy as np
class DualDataset(D.Dataset):
def __init__(self, dsa_path, dsb_path, transform_imgs=None, transform_dsms=None, random_seed=666, in_memory=True):
super(DualDataset, self).__init__()
self.dsa_path = dsa_path
self.dsb_path = dsb_path
self.transform_imgs = transform_imgs
self.transform_dsms = transform_dsms
self.a_files = []
self.b_files = []
self.in_memory = in_memory
file_path = f"{dsa_path}/all.txt"
with open(file_path, "r") as f:
lines = f.readlines()
for line in lines:
self.a_files.append(line.strip())
file_path = f"{dsb_path}/all.txt"
with open(file_path, "r") as f:
lines = f.readlines()
for line in lines:
self.b_files.append(line.strip())
self.a_imgs = []
self.b_imgs = []
self.a_dsms = []
self.b_dsms = []
if in_memory:
for a_file_name in self.a_files:
a_path = "{}/images/{}".format(self.dsa_path, a_file_name)
a_img = np.array(Image.open(a_path))
try:
a_dsm_path = "{}/dsms/{}".format(self.dsa_path, a_file_name)
a_dsm = np.array(Image.open(a_dsm_path))
except:
a_dsm = []
self.a_imgs.append(a_img)
self.a_dsms.append(a_dsm)
for b_file_name in self.b_files:
b_path = "{}/images/{}".format(self.dsb_path, b_file_name)
b_img = np.array(Image.open(b_path))
try:
b_dsm_path = "{}/dsms/{}".format(self.dsb_path, b_file_name)
b_dsm = np.array(Image.open(b_dsm_path))
except:
b_dsm = []
self.b_imgs.append(b_img)
self.b_dsms.append(b_dsm)
random.seed(random_seed)
def __getitem__(self, item):
a_index = random.randint(0, len(self.a_files) - 1)
b_index = random.randint(0, len(self.b_files) - 1)
if self.in_memory:
a_img = self.a_imgs[a_index]
a_dsm = self.a_dsms[a_index]
b_img = self.b_imgs[b_index]
b_dsm = self.b_dsms[b_index]
else:
a_file_name = self.a_files[a_index]
b_file_name = self.b_files[b_index]
a_path = "{}/images/{}".format(self.dsa_path, a_file_name)
b_path = "{}/images/{}".format(self.dsb_path, b_file_name)
a_img = Image.open(a_path)
b_img = Image.open(b_path)
try:
a_dsm_path = "{}/dsms/{}".format(self.dsa_path, a_file_name)
b_dsm_path = "{}/dsms/{}".format(self.dsb_path, b_file_name)
a_dsm = Image.open(a_dsm_path)
b_dsm = Image.open(b_dsm_path)
except:
a_dsm = []
b_dsm = []
if self.transform_imgs:
a_img = self.transform_imgs(a_img)
b_img = self.transform_imgs(b_img)
if self.transform_dsms:
try:
a_dsm = self.transform_dsms(a_dsm)
b_dsm = self.transform_dsms(b_dsm)
except:
pass
return a_img, b_img, a_dsm, b_dsm
def __len__(self):
return max(len(self.a_files), len(self.b_files))
class TransferDataset(D.Dataset):
def __init__(self, path, transform, in_memory=False) -> None:
super(TransferDataset, self).__init__()
self.path = path
self.files = []
self.in_memory = in_memory
file_path = f"{self.path}/all.txt"
with open(file_path, mode="r") as f:
lines = f.readlines()
for line in lines:
self.files.append(line.strip())
self.len = len(self.files)
self.transform = transform
self.items = []
if self.in_memory:
for file_name in self.files:
img_path = f"{self.path}/images/{file_name}"
lbl_path = f"{self.path}/labels/{file_name}"
img = np.array(Image.open(img_path))
lbl = np.array(np.uint8(Image.open(lbl_path)))
self.items.append((file_name, img, lbl))
def __getitem__(self, index):
if self.in_memory:
file_name, img, lbl = self.items[index]
else:
file_name = self.files[index]
img_path = f"{self.path}/images/{file_name}"
lbl_path = f"{self.path}/labels/{file_name}"
img = np.array(Image.open(img_path))
lbl = np.array(np.uint8(Image.open(lbl_path)))
img = self.transform(img)
return file_name, img, lbl
def __len__(self):
return self.len | 4,959 | 31.847682 | 119 | py |
ResiDualGAN-DRDG | ResiDualGAN-DRDG-main/core/utils/utils.py | import numpy as np
from torch import FloatTensor
from torch.autograd import Variable
import torch.autograd as autograd
import torch
import math
import segmentation_models_pytorch as smp
import logging
import sys
import os
import torch.nn as nn
def get_model(model_type, encoder_name="resnet34", encoder_weights="imagenet", in_channels=3, classes=6):
model = None
if model_type == "UNet":
model = smp.Unet(
encoder_name=encoder_name, # choose encoder, e.g. mobilenet_v2 or efficientnet-b7
encoder_weights=encoder_weights, # use `imagenet` pre-trained weights for encoder initialization
in_channels=in_channels, # model input channels (1 for gray-scale images, 3 for RGB, etc.)
classes=classes, # model output channels (number of classes in your dataset)
).cuda()
elif model_type == "DeepLabV3":
model = smp.DeepLabV3(
encoder_name=encoder_name, # choose encoder, e.g. mobilenet_v2 or efficientnet-b7
encoder_weights=encoder_weights, # use `imagenet` pre-trained weights for encoder initialization
in_channels=in_channels, # model input channels (1 for gray-scale images, 3 for RGB, etc.)
classes=classes, # model output channels (number of classes in your dataset)
).cuda()
elif model_type == "DeepLabV3+":
model = smp.DeepLabV3Plus(
encoder_name=encoder_name, # choose encoder, e.g. mobilenet_v2 or efficientnet-b7
encoder_weights=encoder_weights, # use `imagenet` pre-trained weights for encoder initialization
in_channels=in_channels, # model input channels (1 for gray-scale images, 3 for RGB, etc.)
classes=classes, # model output channels (number of classes in your dataset)
).cuda()
return model
def weights_init_normal(m):
try:
classname = m.__class__.__name__
if classname.find("Conv") != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find("BatchNorm2d") != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
except:
return
def adjust_param(cur_epoch, total_epoch):
t = float(cur_epoch)/total_epoch
return math.exp(-5*((1-t)**2))
def iou(output, target, n_classes=6):
smooth = 1e-5
ious = []
output = output.argmax(dim=1)
for cls in range(n_classes):
pred_inds = output == cls
target_inds = target == cls
intersection = (pred_inds[target_inds]).sum()
union = pred_inds.sum() + target_inds.sum() - intersection
ious.append((float(intersection)+smooth)/ (float(union) + smooth))
ious.append(sum(ious)/6)
return np.array(ious)*100
def tp(output, target, n_classes=6):
res = []
for cls in range(n_classes):
pred_inds = output == cls
target_inds = target == cls
res.append(float(pred_inds[target_inds].sum()))
return np.array(res).astype(np.float)
def fp(output, target, n_classes=6):
res = []
for cls in range(n_classes):
pred_inds = output == cls
target_inds = target != cls
res.append(float(pred_inds[target_inds].sum()))
return np.array(res).astype(np.float)
def fn(output, target, n_classes=6):
res = []
for cls in range(n_classes):
pred_inds = output != cls
target_inds = target == cls
res.append(float(pred_inds[target_inds].sum()))
return np.array(res).astype(np.float)
def tf(output, target, n_classes=6):
res = []
for cls in range(n_classes):
pred_inds = output != cls
target_inds = target != cls
res.append(float(pred_inds[target_inds].sum()))
return np.array(res).astype(np.float)
def f1(output, target, n_classes=6):
smooth = 1e-5
output = output.argmax(dim=1)
f1 = (2*tp(output, target, n_classes) + smooth)/\
(2*tp(output, target, n_classes)+fp(output, target, n_classes)+fn(output, target, n_classes) + smooth)
f1 = np.append(f1, np.sum(f1)/6)
return f1*100
def log_loss(epoch, time, loss, iou, f1, lr, file_path="./log.txt"):
with open(file_path, 'a+') as f:
f.write("epoch={}\ttime={:.3f}\tloss={:.3f}\tiou={:.3f}\tf1={:.3f}\tlr={:.6f}\n"
.format(epoch, time, loss, iou, f1, lr))
def up_lower_limit_str(data):
min_n = min(data)
max_n = max(data)
return "{:.2f}±{:.2f}".format(float(min_n+max_n)/2, float(max_n-min_n)/2)
class UnNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
:param tensor: tensor image of size (B,C,H,W) to be un-normalized
:return: UnNormalized image
"""
for i, m, s in zip(range(tensor.size(0)), self.mean, self.std):
t = tensor[i]
t.mul_(s).add_(m)
return tensor
def setup_logger(name, save_dir, filename="log.txt"):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
if save_dir:
fh = logging.FileHandler(os.path.join(save_dir, filename))
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
def update_avg(new_data, data, cnt):
assert cnt != 0
return float(new_data + (cnt - 1) * data) / cnt
class BerHu(nn.Module):
def __init__(self) -> None:
super().__init__()
self.eps = 10e-5
def forward(self, pred, target):
abs_1 = torch.abs((pred - target))
L = 0.2 * torch.max(abs_1)
mask = (abs_1 <= L)
abs_2 = (torch.square(abs_1)+ L**2)/(2*L+self.eps)
return (abs_1[mask].sum() + abs_2[~mask].sum())/torch.numel(pred)
def _data_part(str):
if str == "all":
return True, False
elif str == "train":
return False, True
elif str == "test":
return False, False
else:
raise KeyError | 6,272 | 32.725806 | 113 | py |
ResiDualGAN-DRDG | ResiDualGAN-DRDG-main/core/utils/data_display.py | import os
import sys
import PIL
from matplotlib import pyplot as plt
import torch
from torchvision import transforms
from PIL import Image, ImageDraw
import numpy as np
from .utils import *
import albumentations as A
from ..datasets.seg_dataset import SegDataset
import segmentation_models_pytorch as smp
from ..models.residualgan import *
# from ..datasets.seg_dataset import TransferDataset
from torchvision.utils import save_image
def load_models(models, model_type, dsa, dsb):
res = []
for model in models:
model_path = "./train/res/{}2{}/{}".format(dsa, dsb, model)
model = get_model(model_type)
model.load_state_dict(torch.load("{}/model.pt".format(model_path), map_location='cuda:0'))
res.append(model)
return res
def load_images(images, img_size, dataset):
trans = A.Compose([
A.RandomCrop(448, 448)
])
dataset = SegDatatset("dataset/{}".format(dataset), train=True, transform=trans)
imgs = []
lbls = []
for image in images:
cur_img, cur_lbl = dataset[image]
imgs.append(cur_img)
lbls.append(cur_lbl)
return imgs, lbls
def show_imgs_and_lbls(models, images, dsa="PotsdamIRRG" ,dsb="Vaihingen", \
figsize=(12,8), img_size=(448, 448), model_type="DeepLabV3+"):
"""
models: ["AdaptSegNet", "MUCSS"......]
images: [1, 2, 3, ...]
"""
plt.subplots_adjust(hspace=0.01, wspace=0.01)
models = load_models(models, model_type, dsa, dsb)
images, labels = load_images(images, img_size, dsb)
rows = len(images)
lines = len(models)+2
for i, image in enumerate(images):
print(i)
plt.subplot(rows, lines, i*lines+1)
img_pil = transforms.ToPILImage()(image).convert("RGB")
plt.imshow(img_pil)
plt.axis('off')
plt.subplot(rows, lines, i*lines+2)
plt.imshow(lbl_img(Image.fromarray(np.uint8(labels[i]))))
plt.axis('off')
for j, model in enumerate(models):
model.eval()
plt.subplot(rows, lines, i*lines+j+3)
cur_res = torch.argmax(model(image.unsqueeze(dim=0).cuda()), dim=1)
res_img = lbl_img_from_tensor(cur_res)
plt.imshow(res_img)
plt.axis('off')
plt.savefig("./res.pdf", bbox_inches="tight", dpi=450)
def lbl_img(lbl_img, palette):
lbl_img.putpalette(palette)
return lbl_img
def lbl_img_from_tensor(lbl_torch, palette):
img = Image.fromarray(np.uint8(lbl_torch.squeeze().detach().cpu().numpy()))
return lbl_img(img, palette)
def img_from_tensor(img_torch):
return transforms.ToPILImage()(img_torch.squeeze())
def show_one_image(img):
plt.figure(figsize=(8, 6))
plt.imshow(img)
plt.axis("off")
plt.show()
def show_two_images(img1, img2):
plt.figure(figsize=(8, 6))
plt.subplot(121)
plt.axis("off")
plt.imshow(img1)
plt.subplot(122)
plt.axis("off")
plt.imshow(img2)
plt.show()
| 2,955 | 26.37037 | 98 | py |
3D-IWGAN | 3D-IWGAN-master/scripts/global_variables.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import socket
g_render4cnn_root_folder = os.path.dirname(os.path.abspath(__file__))
# ------------------------------------------------------------
# PATHS
# ------------------------------------------------------------
g_blender_executable_path = 'blender' #!! MODIFY if necessary
g_matlab_executable_path = 'matlab' # !! MODIFY if necessary
g_data_folder = os.path.abspath(os.path.join(g_render4cnn_root_folder, 'data'))
g_datasets_folder = os.path.abspath(os.path.join(g_render4cnn_root_folder, 'datasets'))
g_shapenet_root_folder = os.path.join(g_datasets_folder, 'shapenetcore')
g_pascal3d_root_folder = os.path.join(g_datasets_folder, 'pascal3d')
g_sun2012pascalformat_root_folder = os.path.join(g_datasets_folder, 'sun2012pascalformat')
# ------------------------------------------------------------
# RENDER FOR CNN PIPELINE
# ------------------------------------------------------------
g_shape_synset_name_pairs = [('02691156', 'aeroplane'),
('02834778', 'bicycle'),
('02858304', 'boat'),
('02876657', 'bottle'),
('02924116', 'bus'),
('02958343', 'car'),
('03001627', 'chair'),
('04379243', 'diningtable'),
('03790512', 'motorbike'),
('04256520', 'sofa'),
('04468005', 'train'),
('03211117', 'tvmonitor')]
g_shape_synsets = [x[0] for x in g_shape_synset_name_pairs]
g_shape_names = [x[1] for x in g_shape_synset_name_pairs]
g_syn_images_folder = os.path.join(g_data_folder, 'syn_images')
g_syn_images_cropped_folder = os.path.join(g_data_folder, 'syn_images_cropped')
g_syn_images_bkg_overlaid_folder = os.path.join(g_data_folder, 'syn_images_cropped_bkg_overlaid')
g_syn_bkg_filelist = os.path.join(g_sun2012pascalformat_root_folder, 'filelist.txt')
g_syn_bkg_folder = os.path.join(g_sun2012pascalformat_root_folder, 'JPEGImages')
g_syn_cluttered_bkg_ratio = 0.8
g_blank_blend_file_path = os.path.join(g_render4cnn_root_folder, 'render_pipeline/blank.blend')
g_syn_images_num_per_category = 200000
g_syn_rendering_thread_num = 20
# Rendering is computational demanding. you may want to consider using multiple servers.
#g_hostname_synset_idx_map = {'<server1-hostname>': [0,1],
# '<server2-hostname>': [2,3,4],
# '<server3-hostname>': [5,6,7],
# '<server4-hostname>':[8,9],
# '<server5-hostname>':[10,11]}
g_hostname_synset_idx_map = {socket.gethostname(): range(12)}
# Crop and overlay is IO-heavy, running on local FS is much faster
g_crop_hostname_synset_idx_map = {socket.gethostname(): range(12)}
g_overlay_hostname_synset_idx_map = {socket.gethostname(): range(12)}
# view and truncation distribution estimation
g_matlab_kde_folder = os.path.join(g_render4cnn_root_folder, 'render_pipeline/kde/matlab_kde_package')
g_view_statistics_folder = os.path.join(g_data_folder, 'view_statistics')
g_view_distribution_folder = os.path.join(g_data_folder, 'view_distribution')
g_view_distribution_files = dict(zip(g_shape_synsets, [os.path.join(g_view_distribution_folder, name+'.txt') for name in g_shape_names]))
g_truncation_statistics_folder = os.path.join(g_data_folder, 'truncation_statistics')
g_truncation_distribution_folder = os.path.join(g_data_folder, 'truncation_distribution')
g_truncation_distribution_files = dict(zip(g_shape_synsets, [os.path.join(g_truncation_distribution_folder, name+'.txt') for name in g_shape_names]))
# render_model_views
g_syn_light_num_lowbound = 0
g_syn_light_num_highbound = 6
g_syn_light_dist_lowbound = 8
g_syn_light_dist_highbound = 20
g_syn_light_azimuth_degree_lowbound = 0
g_syn_light_azimuth_degree_highbound = 360
g_syn_light_elevation_degree_lowbound = -90
g_syn_light_elevation_degree_highbound = 90
g_syn_light_energy_mean = 2
g_syn_light_energy_std = 2
g_syn_light_environment_energy_lowbound = 0
g_syn_light_environment_energy_highbound = 1
# ------------------------------------------------------------
# VIEW_ESTIMATION
# ------------------------------------------------------------
g_syn_images_lmdb_folder = os.path.join(g_data_folder, 'syn_lmdbs')
g_syn_images_lmdb_pathname_prefix = '/ShapeNetDL/projects/render4cnn/syn_lmdb' #os.path.join(g_syn_images_lmdb_folder, 'syn_lmdb')
g_syn_images_resize_dim = 227
g_images_resize_dim = 227
g_real_images_folder = os.path.join(g_data_folder, 'real_images')
g_real_images_voc12val_det_bbox_folder = os.path.join(g_real_images_folder, 'voc12val_det_bbox')
g_real_images_voc12val_easy_gt_bbox_folder = os.path.join(g_real_images_folder, 'voc12val_easy_gt_bbox')
g_real_images_voc12train_all_gt_bbox_folder = os.path.join(g_real_images_folder, 'voc12train_all_gt_bbox')
g_real_images_voc12train_flip = 1
g_real_images_voc12train_aug_n = 1
g_real_images_voc12train_jitter_IoU = 1
g_real_images_lmdb_folder = os.path.join(g_data_folder, 'real_lmdbs')
g_real_images_voc12train_all_gt_bbox_lmdb_prefix = os.path.join(g_real_images_lmdb_folder, 'voc12train_all_gt_bbox_lmdb')
g_detection_results_folder = os.path.join(g_data_folder, 'detection_results')
g_rcnn_detection_bbox_mat_filelist = os.path.join(g_detection_results_folder, 'bbox_mat_filelist.txt')
# testing
g_caffe_param_file = os.path.join(g_render4cnn_root_folder,'caffe_models', 'render4cnn_3dview.caffemodel')
g_caffe_deploy_file = os.path.join(g_render4cnn_root_folder, 'caffe_models', 'deploy.prototxt')
g_image_mean_file = os.path.join(g_render4cnn_root_folder, 'caffe_models', 'imagenet_mean.npy')
g_caffe_prob_keys = ['fc-azimuth','fc-elevation','fc-tilt']
g_test_batch_size = 64
| 5,852 | 52.697248 | 149 | py |
ConjugateGradient_GAN | ConjugateGradient_GAN-master/main.py | """
forked from https://github.com/juntang-zhuang/Adabelief-Optimizer/tree/update_0.2.0/PyTorch_Experiments/wgan/main.py
"""
from __future__ import print_function
import argparse
import os
import random
import wandb
import uuid
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.utils.data
import torchvision.utils as vision_util
import torch.nn.functional as F
from tqdm import tqdm
from utils.metric import FloatMetric, TensorMetric
from utils.lib_version import print_libs_version
from utils.fid_score import calculate_fid_given_paths
from optimizers.set_optim import set_optimizers
from utils.set_model import set_models
from utils.log_utils import date_str, set_otf
from utils.data_utils import build_dataset
from utils.lr_scheduler import build_scheduler
parser = argparse.ArgumentParser()
# Problem Setting
parser.add_argument('--dataset', required=False, default='cifar10', choices=[
'cifar10', 'mnist', 'lsun', 'celeba'], help='cifar10 | mnist | ( imagenet | folder | lfw | fake : Not Supported)')
parser.add_argument('--classes', default='bedroom',
help='comma separated list of classes for the lsun data set')
parser.add_argument('--dataroot', required=False, default='./',help='path to dataset')
parser.add_argument('--batchsize', type=int, default=64, help='input batch size')
parser.add_argument('--iters_budget', type=int, default=100000, help='number of iter to train form, default is 100K')
# GAN Specific Setting
parser.add_argument('--imagesize', type=int, default=64, help='the height / width of the input image to network')
parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')
parser.add_argument('--ndf', type=int, default=64, help='dimension of discrim filters in first conv layer.')
parser.add_argument('--ngf', type=int, default=64, help='dimension of gen filters in first conv layer.')
parser.add_argument("--n_critic", type=int, default=1, help="number of training iter for discriminator per iter")
parser.add_argument("--clip_value", type=float, default=0.01, help="(only for WGAN) lower and upper clip value for disc. weights")
parser.add_argument('--model', default='GAN', choices=['GAN', 'WGAN'], help = 'GAN | (WGAN : Not Supported')
parser.add_argument('--SN', action='store_true', help = 'If you want to use SN when using GAN, set a flag.')
# Hardware Setting and for Reproduction
parser.add_argument('--workers', type=int, help='number of data loading workers', default=2)
parser.add_argument('--cuda', default=True)
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--manualSeed', type=int, help='manual seed')
# For Optimizer and Learning Rate
parser.add_argument('--scheduler_type', default='ConstLR', choices=['ConstLR', 'ExponentialLR', 'SqrtLR', 'TTScaleLR', 'StepDecayLR'], help='ConstLR, ExponentialLR, SqrtLR, TTScaleLR or StepDecayLR')
parser.add_argument('--lr_D', type=float, default=0.0002, help='learning rate for discrim, default=0.0002')
parser.add_argument('--lr_G', type=float, default=0.0002, help='learning rate for gen, default=0.0002')
parser.add_argument('--optimizer', default='adam', type=str, choices=['adam', 'momentum_sgd', 'sgd', 'cgd_dy', 'cgd_fr', 'cgd_hs', 'cgd_hz', 'cgd_fr_prp', 'cgd_hs_dy', 'cgd_prp'], help='Optimizer')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--beta2', default=0.999, type=float, help='Beta2')
parser.add_argument('--eps',default=1e-8, type=float, help='eps')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--beta_momentum_coeff', default=1.0, type=float, help='beta coefficient for conjugate gradient')
# Logger
parser.add_argument("--update_frequency", type=int, default=1000, help="number of iter frequency for logging")
parser.add_argument("--wandb_entity", type=str, default='XXXXXX', help="entitiy of wandb team")
parser.add_argument('--debug_mode', action = 'store_true')
args = parser.parse_args()
print_libs_version()
# ============= Wandb Setup =============
wandb_project_name = f"{args.scheduler_type}_{args.dataset}_{args.model}_{args.optimizer}"
exp_name_suffix = str(uuid.uuid4())
wandb_exp_name = f"{exp_name_suffix}" # example: XX823748291
wandb.init(config=args,
project=wandb_project_name,
name=wandb_exp_name,
entity=args.wandb_entity)
# update hyperparams for reflecting wandb sweep
opt = wandb.config
print('Updated HyperParams:')
for k, v in sorted(opt.items()):
print('\t{}: {}'.format(k, v))
# ============= Initialize and Determine Seeds =============
if opt.manualSeed is None:
opt.manualSeed = random.randint(1, 10000)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
cudnn.benchmark = True
# ============= Log Dirs Setup =============
output_dir = os.environ['OUTPUT_DIR']
date_str = date_str()
opt.outf = set_otf(opt, wandb_project_name)
try:
os.makedirs(output_dir + wandb_project_name)
except OSError:
pass
try:
os.makedirs(output_dir + opt.outf)
except OSError:
pass
try:
os.makedirs(output_dir + opt.outf + '/img')
except OSError:
pass
# ============= Decide which device we want to run on =============
device = torch.device("cuda:0" if (torch.cuda.is_available() and opt.ngpu > 0) else "cpu")
print(f'device : {device}')
# ============= Build Data Loader =============
dataset, n_channel = build_dataset(opt)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=opt.batchsize,
shuffle=True,
num_workers=int(opt.workers))
# ============= Weight Initialization =============
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
# ============= Build Model and Loss =============
netD, netG = set_models(weights_init,
model=opt.model,
SN=opt.SN,
ngpu=int(opt.ngpu), nz=int(opt.nz), ngf=int(opt.ngf), ndf=int(opt.ndf), nc=n_channel)
criterion = nn.BCELoss()
fixed_noise = torch.randn(opt.batchsize, int(opt.nz), 1, 1).cuda()
real_label = 1
fake_label = 0
# ============= Setup Optimizer and LR Scheduler =============
optimizerD = set_optimizers(opt.optimizer.lower(),
netD,
opt.lr_D,
opt.momentum,
opt.beta1, opt.beta2, opt.eps, # For Adaptive Optimizer
opt.beta_momentum_coeff
)
optimizerG = set_optimizers(opt.optimizer.lower(),
netG,
opt.lr_G,
opt.momentum,
opt.beta1, opt.beta2, opt.eps, # For Adaptive Optimizer
opt.beta_momentum_coeff
)
scheduler_D, scheduler_G = build_scheduler(opt, optimizerD, optimizerG)
# ============= Convert all training data into png format =============
real_folder = output_dir + f'all_real_imgs_{opt.dataset}'
if not os.path.exists(real_folder):
os.mkdir(real_folder)
for i in tqdm(range(len(dataset))):
vision_util.save_image(dataset[i][0], real_folder + '/{}.png'.format(i), normalize=True)
fake_folder = output_dir + opt.outf +'/'+ f'all_fake_imgs_{opt.dataset}'
if not os.path.exists(fake_folder):
os.mkdir(fake_folder)
# ============= Setup iter and Epochs =============
iters_per_epoch = int(len(dataset) / opt.batchsize) + 1
epochs = int(opt.iters_budget / iters_per_epoch) + 1
print("Iterations Budget:")
print("\tTotal Iterations: {}".format(opt.iters_budget))
print("\tBatch Size : {}".format(opt.batchsize))
print("\tData Size : {}".format(len(dataset)))
print("\tUpdate Iter Interval : {}".format(opt.update_frequency))
print("\tIterations per Epoch : {}".format(iters_per_epoch))
print("\tTotal Epochs : {}".format(epochs))
# ============= Training Loop =============
iter = 0
for epoch in range(epochs):
print('Epoch {}'.format(epoch))
losses_D = TensorMetric('losses_D')
losses_G = TensorMetric('losses_G')
norms_D = FloatMetric('norms_D')
norms_G = FloatMetric('norms_G')
for i, data in enumerate(dataloader, 0):
iter += 1
real_imgs = data[0].cuda()
batch_size = real_imgs.size(0)
# For GAN
label_real = torch.ones(batch_size).cuda()
label_fake = torch.zeros(batch_size).cuda()
# ============= Training Discriminator =============
optimizerD.zero_grad()
# Sample noise as netG input
z = torch.randn(batch_size, int(opt.nz), 1, 1).cuda()
# Generate a batch of images
gen_imgs = netG(z).detach()
# ============= Compute Adversarial Loss =============
if opt.model == 'GAN':
loss_D = 0.5 * (
F.binary_cross_entropy(netD(real_imgs).squeeze(), label_real) +
F.binary_cross_entropy(netD(gen_imgs).squeeze(), label_fake))
elif opt.model == 'WGAN':
loss_D = -torch.mean(netD(real_imgs)) + torch.mean(netD(gen_imgs))
losses_D.update(loss_D)
# ============= Compute Gradient and Backprop =============
loss_D.backward()
optimizerD.step()
# Clip weights of netD
if opt.model == 'WGAN':
for p in netD.parameters():
p.data.clamp_(-opt.clip_value, opt.clip_value)
# ============= Compute Grad Norm =============
norm_D = 0
parameters_D = [p for p in netD.parameters() if p.grad is not None and p.requires_grad]
for p in parameters_D:
param_norm = p.grad.detach().data.norm(2)
norm_D += param_norm.item() ** 2
norm_D = norm_D ** 0.5
norms_D.update(norm_D)
# ============= Training Generator =============
# Train the netG every n_critic iterations
if i % opt.n_critic == 0:
optimizerG.zero_grad()
# Generate a batch of images
gen_imgs = netG(z)
# ============= Compute Adversarial Loss =============
if opt.model == 'GAN':
loss_G = F.binary_cross_entropy(netD(gen_imgs).squeeze(), label_real)
elif opt.model == 'WGAN':
# Adversarial loss
loss_G = -torch.mean(netD(gen_imgs))
losses_G.update(loss_G)
# ============= Compute Gradient and Backprop =============
loss_G.backward()
optimizerG.step()
# ============= Compute Grad Norm =============
norm_G = 0
parameters_G = [p for p in netG.parameters() if p.grad is not None and p.requires_grad]
for p in parameters_G:
param_norm = p.grad.detach().data.norm(2)
norm_G += param_norm.item() ** 2
norm_G = norm_G ** 0.5
norms_G.update(norm_G)
if iter % opt.update_frequency == 0:
print(f'update and save at iteration: {iter} / epoch: {epoch}')
vision_util.save_image(real_imgs,
'%s/real_samples.png' % (output_dir + opt.outf + '/img'),
normalize=True)
fake = netG(fixed_noise)
vision_util.save_image(fake.detach(),
'%s/fake_samples_iter_%07d.png' % (output_dir + opt.outf + '/img', iter),
normalize=True)
# ============= FID =============
fid_batch_size = 256
fake_image_num_sample = 10000
generation_loop_iter = int(fake_image_num_sample/fid_batch_size)
# test netG, and calculate FID score
netG.eval()
for i in range(generation_loop_iter):
noise = torch.randn(fid_batch_size, int(opt.nz), 1, 1).cuda()
fake = netG(noise)
for j in range(fake.shape[0]):
# replace fake images which is reflected current status
vision_util.save_image(fake.detach()[j,...], fake_folder + '/{}.png'.format(j + i * fid_batch_size), normalize=True)
netG.train()
# calculate FID score
fid_value = calculate_fid_given_paths([real_folder, fake_folder],
fid_batch_size//2,
cuda=True)
print('FID: {}'.format(fid_value))
wandb.log({
'avg_losses_D' : losses_D.avg.item(),
'avg_losses_G' : losses_G.avg.item(),
'avg_norms_D' : norms_D.avg,
'avg_norms_G' : norms_G.avg,
'loss_D': loss_D,
'loss_G': loss_G,
'norm_D': norm_D,
'norm_G': norm_G,
'fid' : fid_value,
'epoch': epoch,
'iter': iter,
'lr_D': scheduler_D.get_last_lr()[0],
'lr_G': scheduler_G.get_last_lr()[0],
})
print(f'clear accumulated gradients and losses')
losses_D = TensorMetric('losses_D')
losses_G = TensorMetric('losses_G')
norms_D = FloatMetric('norms_D')
norms_G = FloatMetric('norms_G')
# ============= Update Scheduler for Each Step =============
scheduler_G.step()
scheduler_D.step()
| 13,951 | 40.278107 | 199 | py |
ConjugateGradient_GAN | ConjugateGradient_GAN-master/optimizers/set_optim.py | from optimizers import *
import torch.optim as optim
import torch
import sys
import os
from util import build_optimizer, OptimizerSetting
def set_optimizers(optimizer, model, lr, momentum, beta1, beta2, eps, beta_momentum_coeff):
if optimizer == 'adam':
optimizer = optim.Adam(model.parameters(), lr=lr, betas=(beta1, beta2), eps=eps)
elif optimizer == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
elif optimizer == 'momentum_sgd':
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum)
elif optimizer == 'rmsprop':
optimizer = torch.optim.RMSprop(model.parameters(), lr=lr)
elif optimizer == 'cgd_fr':
optimizer = build_optimizer(
OptimizerSetting(name='cgd',
weight_decay = 0,
lr=lr,
beta_update_rule='FR',
beta_momentum_coeff = beta_momentum_coeff,
model=model))
elif optimizer == 'cgd_prp':
optimizer = build_optimizer(
OptimizerSetting(name='cgd',
weight_decay = 0,
lr=lr,
beta_update_rule='PRP',
beta_momentum_coeff = beta_momentum_coeff,
model=model))
elif optimizer == 'cgd_hs':
optimizer = build_optimizer(
OptimizerSetting(name='cgd',
weight_decay = 0,
lr=lr,
beta_update_rule='HS',
beta_momentum_coeff = beta_momentum_coeff,
model=model))
elif optimizer == 'cgd_dy':
optimizer = build_optimizer(
OptimizerSetting(name='cgd',
weight_decay = 0,
lr=lr,
beta_update_rule='DY',
beta_momentum_coeff = beta_momentum_coeff,
model=model))
elif optimizer == 'cgd_hs_dy':
optimizer = build_optimizer(
OptimizerSetting(name='cgd',
weight_decay = 0,
lr=lr,
beta_update_rule='HS_DY',
beta_momentum_coeff = beta_momentum_coeff,
model=model))
elif optimizer == 'cgd_fr_prp':
optimizer = build_optimizer(
OptimizerSetting(name='cgd',
weight_decay = 0,
lr=lr,
beta_update_rule='FR_PRP',
beta_momentum_coeff = beta_momentum_coeff,
model=model))
elif optimizer == 'cgd_hz':
optimizer = build_optimizer(
OptimizerSetting(name='cgd',
weight_decay = 0,
lr=lr,
beta_update_rule='HZ',
beta_momentum_coeff = beta_momentum_coeff,
model=model))
return optimizer
| 3,198 | 37.083333 | 91 | py |
ConjugateGradient_GAN | ConjugateGradient_GAN-master/utils/lr_scheduler.py | from torch.optim import lr_scheduler
import math
# ================
# Set LR Scheduler
# Reference https://github.com/christiancosgrove/pytorch-spectral-normalization-gan/blob/12dcf945a6359301d63d1e0da3708cd0f0590b19/main.py#L55
# ================
def build_scheduler(opt, optimizerD, optimizerG):
if opt.scheduler_type == 'ConstLR':
scheduler_D = lr_scheduler.LambdaLR(optimizerD, lr_lambda=lambda x: 1.0)
scheduler_G = lr_scheduler.LambdaLR(optimizerG, lr_lambda=lambda x: 1.0)
elif opt.scheduler_type == 'ExponentialLR':
scheduler_D = lr_scheduler.ExponentialLR(optimizerD, gamma=0.99999)
scheduler_G = lr_scheduler.ExponentialLR(optimizerG, gamma=0.99999)
elif opt.scheduler_type == 'SqrtLR':
scheduler_D = lr_scheduler.LambdaLR(optimizerD, lr_lambda = lambda steps: 1/math.sqrt(steps+1))
scheduler_G = lr_scheduler.LambdaLR(optimizerG, lr_lambda = lambda steps: 1/math.sqrt(steps+1))
elif opt.scheduler_type == 'StepDecayLR':
"""
ref: https://katsura-jp.hatenablog.com/entry/2019/01/30/183501#LambdaLR
"""
"""
(0.9)^5 = 0.59049
(0.9)^10 = 0.3486784401
"""
scheduler_D = lr_scheduler.StepLR(optimizerD, step_size=10000, gamma=0.9)
scheduler_G = lr_scheduler.StepLR(optimizerD, step_size=10000, gamma=0.9)
elif opt.scheduler_type == 'TTScaleLR ':
eta_a = 0.75
eta_b = 0.75
scheduler_D = lr_scheduler.LambdaLR(optimizerD, lr_lambda = lambda steps: (steps+1)**(-eta_a))
scheduler_G = lr_scheduler.LambdaLR(optimizerG, lr_lambda = lambda steps: (steps+1)**(-eta_b))
return scheduler_D, scheduler_G | 1,690 | 33.510204 | 141 | py |
ConjugateGradient_GAN | ConjugateGradient_GAN-master/utils/set_model.py | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
import torch.nn.functional as F
def set_models(weights_init, model="GAN", netG_path='', netD_path='', SN=False, ngpu=1 ,nz=100, ngf=64, ndf=64, nc=3):
if model == "WGAN":
class Generator(nn.Module):
def __init__(self, ngpu):
super(Generator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 64 x 64
)
def forward(self, input):
if input.is_cuda and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output
class Discriminator(nn.Module):
def __init__(self, ngpu):
super(Discriminator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
if input.is_cuda and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output.view(-1, 1).squeeze(1)
elif model == 'GAN':
#ref https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html
class Generator(nn.Module):
def __init__(self, ngpu):
super(Generator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d( nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d( ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d( ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 64 x 64
)
def forward(self, input):
return self.main(input)
class Discriminator(nn.Module):
def __init__(self, ngpu):
super(Discriminator, self).__init__()
self.ngpu = ngpu
# input is (nc) x 64 x 64
layers = [nn.Conv2d(nc, ndf, 4, 2, 1, bias=False), nn.LeakyReLU(0.2, inplace=True)]
# state size. (ndf) x 32 x 32
if SN == True:
layers += [nn.utils.spectral_norm(nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False))]
else:
layers += [nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 2)]
layers += [nn.LeakyReLU(0.2, inplace=True)]
# state size. (ndf*2) x 16 x 16
if SN == True:
layers += [nn.utils.spectral_norm(nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False))]
else:
layers += [nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 4)]
layers += [nn.LeakyReLU(0.2, inplace=True)]
# state size. (ndf*4) x 8 x 8
if SN == True:
layers += [nn.utils.spectral_norm(nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False))]
else:
layers += [nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 8)]
layers += [nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),nn.Sigmoid()]
self.main = nn.Sequential(*layers)
def forward(self, input):
return self.main(input)
netG = Generator(ngpu).cuda()
netG.apply(weights_init)
if netG_path != '':
netG.load_state_dict(torch.load(netG_path))
netD = Discriminator(ngpu).cuda()
netD.apply(weights_init)
if netD_path != '':
netD.load_state_dict(torch.load(netD_path))
num_params_gen = sum(p.numel() for p in netG.parameters() if p.requires_grad)
num_params_disc = sum(p.numel() for p in netD.parameters() if p.requires_grad)
print('Number of parameters for generator: %d and discriminator: %d' % (num_params_gen, num_params_disc))
return netD, netG
| 7,074 | 44.352564 | 118 | py |
ConjugateGradient_GAN | ConjugateGradient_GAN-master/utils/inception.py | """
forked from https://github.com/mseitzer/pytorch-fid/blob/master/inception.py
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
try:
from torchvision.models.utils import load_state_dict_from_url, load_state_dict
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
# Inception weights ported to Pytorch from
# http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
import os
FID_WEIGHTS_PATH = os.environ['FID_WEIGHTS_PATH']
class InceptionV3(nn.Module):
"""Pretrained InceptionV3 network returning feature maps"""
# Index of default block of inception to return,
# corresponds to output of final average pooling
DEFAULT_BLOCK_INDEX = 3
# Maps feature dimensionality to their output blocks indices
BLOCK_INDEX_BY_DIM = {
64: 0, # First max pooling features
192: 1, # Second max pooling featurs
768: 2, # Pre-aux classifier features
2048: 3 # Final average pooling features
}
def __init__(self,
output_blocks=[DEFAULT_BLOCK_INDEX],
resize_input=True,
normalize_input=True,
requires_grad=False,
use_fid_inception=True):
"""Build pretrained InceptionV3
Parameters
----------
output_blocks : list of int
Indices of blocks to return features of. Possible values are:
- 0: corresponds to output of first max pooling
- 1: corresponds to output of second max pooling
- 2: corresponds to output which is fed to aux classifier
- 3: corresponds to output of final average pooling
resize_input : bool
If true, bilinearly resizes input to width and height 299 before
feeding input to model. As the network without fully connected
layers is fully convolutional, it should be able to handle inputs
of arbitrary size, so resizing might not be strictly needed
normalize_input : bool
If true, scales the input from range (0, 1) to the range the
pretrained Inception network expects, namely (-1, 1)
requires_grad : bool
If true, parameters of the model require gradients. Possibly useful
for finetuning the network
use_fid_inception : bool
If true, uses the pretrained Inception model used in Tensorflow's
FID implementation. If false, uses the pretrained Inception model
available in torchvision. The FID Inception model has different
weights and a slightly different structure from torchvision's
Inception model. If you want to compute FID scores, you are
strongly advised to set this parameter to true to get comparable
results.
"""
super(InceptionV3, self).__init__()
self.resize_input = resize_input
self.normalize_input = normalize_input
self.output_blocks = sorted(output_blocks)
self.last_needed_block = max(output_blocks)
assert self.last_needed_block <= 3, \
'Last possible output block index is 3'
self.blocks = nn.ModuleList()
if use_fid_inception:
inception = fid_inception_v3()
else:
inception = models.inception_v3(pretrained=True)
# Block 0: input to maxpool1
block0 = [
inception.Conv2d_1a_3x3,
inception.Conv2d_2a_3x3,
inception.Conv2d_2b_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block0))
# Block 1: maxpool1 to maxpool2
if self.last_needed_block >= 1:
block1 = [
inception.Conv2d_3b_1x1,
inception.Conv2d_4a_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block1))
# Block 2: maxpool2 to aux classifier
if self.last_needed_block >= 2:
block2 = [
inception.Mixed_5b,
inception.Mixed_5c,
inception.Mixed_5d,
inception.Mixed_6a,
inception.Mixed_6b,
inception.Mixed_6c,
inception.Mixed_6d,
inception.Mixed_6e,
]
self.blocks.append(nn.Sequential(*block2))
# Block 3: aux classifier to final avgpool
if self.last_needed_block >= 3:
block3 = [
inception.Mixed_7a,
inception.Mixed_7b,
inception.Mixed_7c,
nn.AdaptiveAvgPool2d(output_size=(1, 1))
]
self.blocks.append(nn.Sequential(*block3))
for param in self.parameters():
param.requires_grad = requires_grad
def forward(self, inp):
"""Get Inception feature maps
Parameters
----------
inp : torch.autograd.Variable
Input tensor of shape Bx3xHxW. Values are expected to be in
range (0, 1)
Returns
-------
List of torch.autograd.Variable, corresponding to the selected output
block, sorted ascending by index
"""
outp = []
x = inp
if self.resize_input:
x = F.interpolate(x,
size=(299, 299),
mode='bilinear',
align_corners=False)
if self.normalize_input:
x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1)
for idx, block in enumerate(self.blocks):
x = block(x)
if idx in self.output_blocks:
outp.append(x)
if idx == self.last_needed_block:
break
return outp
def fid_inception_v3():
"""Build pretrained Inception model for FID computation
The Inception model for FID computation uses a different set of weights
and has a slightly different structure than torchvision's Inception.
This method first constructs torchvision's Inception and then patches the
necessary parts that are different in the FID Inception model.
"""
inception = models.inception_v3(num_classes=1008,
aux_logits=False,
pretrained=False)
inception.Mixed_5b = FIDInceptionA(192, pool_features=32)
inception.Mixed_5c = FIDInceptionA(256, pool_features=64)
inception.Mixed_5d = FIDInceptionA(288, pool_features=64)
inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128)
inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192)
inception.Mixed_7b = FIDInceptionE_1(1280)
inception.Mixed_7c = FIDInceptionE_2(2048)
# state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True)
# inception.load_state_dict(state_dict)
# state_dict = load_state_dict(FID_WEIGHTS_PATH)
inception.load_state_dict(torch.load(FID_WEIGHTS_PATH))
return inception
class FIDInceptionA(models.inception.InceptionA):
"""InceptionA block patched for FID computation"""
def __init__(self, in_channels, pool_features):
super(FIDInceptionA, self).__init__(in_channels, pool_features)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionC(models.inception.InceptionC):
"""InceptionC block patched for FID computation"""
def __init__(self, in_channels, channels_7x7):
super(FIDInceptionC, self).__init__(in_channels, channels_7x7)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionE_1(models.inception.InceptionE):
"""First InceptionE block patched for FID computation"""
def __init__(self, in_channels):
super(FIDInceptionE_1, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionE_2(models.inception.InceptionE):
"""Second InceptionE block patched for FID computation"""
def __init__(self, in_channels):
super(FIDInceptionE_2, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
# Patch: The FID Inception model uses max pooling instead of average
# pooling. This is likely an error in this specific Inception
# implementation, as other Inception models use average pooling here
# (which matches the description in the paper).
branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1) | 11,769 | 36.845659 | 82 | py |
ConjugateGradient_GAN | ConjugateGradient_GAN-master/utils/data_utils.py | import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
def build_dataset(opt):
if opt.dataset in ['imagenet', 'folder', 'lfw']:
# folder dataset
dataset = datasets.ImageFolder(root=opt.dataroot,
transform=transforms.Compose([
transforms.Resize(opt.imagesize),
transforms.CenterCrop(opt.imagesize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
nc=3
elif opt.dataset == 'lsun':
'''
1st pip install lmdb
2ns git clone https://github.com/fyu/lsun
3rd python3 download.py -c bedroom -o /groups1/gcb50275/lsun
'''
classes = [ c + '_train' for c in opt.classes.split(',')]
dataset = datasets.LSUN(root=opt.dataroot, classes=classes,
transform=transforms.Compose([
transforms.Resize(opt.imagesize),
transforms.CenterCrop(opt.imagesize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
nc=3
elif opt.dataset == 'cifar10':
dataset = datasets.CIFAR10(root=opt.dataroot, download=True,
transform=transforms.Compose([
transforms.Resize(opt.imagesize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
nc=3
elif opt.dataset == 'celeba':
dataset = datasets.ImageFolder(root=opt.dataroot,
transform=transforms.Compose([
transforms.Resize(opt.imagesize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
nc=3
elif opt.dataset == 'mnist':
dataset = datasets.MNIST(root=opt.dataroot, download=True,
transform=transforms.Compose([
transforms.Resize(opt.imagesize),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)),
]))
nc=1
elif opt.dataset == 'fake':
dataset = datasets.FakeData(image_size=(3, opt.imagesize, opt.imagesize),
transform=transforms.ToTensor())
nc=3
assert dataset
return dataset, nc
| 2,828 | 41.223881 | 91 | py |
ConjugateGradient_GAN | ConjugateGradient_GAN-master/utils/lib_version.py | import sys
import torch
import torchvision
import numpy as np
import PIL
def print_libs_version():
print("Environment:")
print("\tPython: {}".format(sys.version.split(" ")[0]))
print("\tPyTorch: {}".format(torch.__version__))
print("\tTorchvision: {}".format(torchvision.__version__))
print("\tCUDA: {}".format(torch.version.cuda))
print("\tCUDNN: {}".format(torch.backends.cudnn.version()))
print("\tNumPy: {}".format(np.__version__))
print("\tPIL: {}".format(PIL.__version__)) | 511 | 33.133333 | 63 | py |
ConjugateGradient_GAN | ConjugateGradient_GAN-master/utils/fid_score.py | """
forked from https://github.com/mseitzer/pytorch-fid/blob/master/inception.py
"""
import os
import pathlib
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import numpy as np
import torch
from scipy import linalg
from torch.nn.functional import adaptive_avg_pool2d
from PIL import Image
try:
from tqdm import tqdm
except ImportError:
# If not tqdm is not available, provide a mock version of it
def tqdm(x): return x
from utils.inception import InceptionV3
def imread(filename):
"""
Loads an image file into a (height, width, 3) uint8 ndarray.
"""
return np.asarray(Image.open(filename), dtype=np.uint8)[..., :3]
def get_activations(files, model, batch_size=50, dims=2048,
cuda=True, verbose=False):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : Batch size of images for the model to process at once.
Make sure that the number of samples is a multiple of
the batch size, otherwise some samples are ignored. This
behavior is retained to match the original FID score
implementation.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the number
of calculated batches is reported.
Returns:
-- A numpy array of dimension (num images, dims) that contains the
activations of the given tensor when feeding inception with the
query tensor.
"""
model.eval()
if batch_size > len(files):
print(('Warning: batch size is bigger than the data size. '
'Setting batch size to data size'))
batch_size = len(files)
pred_arr = np.empty((len(files), dims))
for i in range(0, len(files), batch_size):
# for i in tqdm(range(0, len(files), batch_size)):
# if verbose:
# print('\rPropagating batch %d/%d' % (i + 1, n_batches),
# end='', flush=True)
start = i
end = i + batch_size
images = np.array([imread(str(f)).astype(np.float32)
for f in files[start:end]])
# Reshape to (n_images, 3, height, width)
images = images.transpose((0, 3, 1, 2))
images /= 255
batch = torch.from_numpy(images).type(torch.FloatTensor)
if cuda:
batch = batch.cuda()
pred = model(batch)[0]
# If model output is not scalar, apply global spatial average pooling.
# This happens if you choose a dimensionality not equal 2048.
if pred.size(2) != 1 or pred.size(3) != 1:
pred = adaptive_avg_pool2d(pred, output_size=(1, 1))
pred_arr[start:end] = pred.cpu().data.numpy().reshape(pred.size(0), -1)
if verbose:
print(' done')
return pred_arr
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean)
def calculate_activation_statistics(files, model, batch_size=50,
dims=2048, cuda=True, verbose=False):
"""Calculation of the statistics used by the FID.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : The images numpy array is split into batches with
batch size batch_size. A reasonable batch size
depends on the hardware.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the
number of calculated batches is reported.
Returns:
-- mu : The mean over samples of the activations of the pool_3 layer of
the inception model.
-- sigma : The covariance matrix of the activations of the pool_3 layer of
the inception model.
"""
act = get_activations(files, model, batch_size, dims, cuda, verbose)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
def _compute_statistics_of_path(path, model, batch_size, dims, cuda):
if path.endswith('.npz'):
f = np.load(path)
m, s = f['mu'][:], f['sigma'][:]
f.close()
else:
path = pathlib.Path(path)
files = list(path.glob('*.jpg')) + list(path.glob('*.png'))
m, s = calculate_activation_statistics(files, model, batch_size,
dims, cuda)
return m, s
def calculate_fid_given_paths(paths, batch_size, cuda, dims=2048):
"""Calculates the FID of two paths"""
for p in paths:
if not os.path.exists(p):
raise RuntimeError('Invalid path: %s' % p)
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx])
if cuda:
print('use CUDA')
model.cuda()
print(f'real-image folder: {paths[0]}')
m1, s1 = _compute_statistics_of_path(paths[0], model, batch_size,
dims, cuda)
print(f'real-image folder: {paths[1]}')
m2, s2 = _compute_statistics_of_path(paths[1], model, batch_size,
dims, cuda)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value | 7,548 | 35.119617 | 79 | py |
ConjugateGradient_GAN | ConjugateGradient_GAN-master/utils/metric.py | import torch
class TensorMetric(object):
def __init__(self, name):
self.name = name
self.sum = torch.tensor(0.)
self.n = torch.tensor(0.)
def update(self, val):
self.sum += val.detach().cpu()
self.n += 1
@property
def avg(self):
return self.sum / self.n
class FloatMetric(object):
def __init__(self, name):
self.name = name
self.sum = 0
self.n = 0
def update(self, val):
self.sum += val
self.n += 1
@property
def avg(self):
return self.sum / self.n | 582 | 19.103448 | 38 | py |
ConjugateGradient_GAN | ConjugateGradient_GAN-master/jupyter/toy_example/set_optimizer.py | # coding: utf-8
import attr
import torch.optim as optim
from cg_optimizer import ConjugateGradientOptimizer
@attr.s
class OptimizerSetting:
name = attr.ib()
lr = attr.ib()
weight_decay = attr.ib()
model = attr.ib()
momentum = attr.ib(default=0.9) # sgd, sgd_nesterov
eps = attr.ib(default=0.001) # adam, rmsprop (term added to the denominator to improve numerical stability )
alpha = attr.ib(default=0.99) # rmsprop (smoothing constant)
beta_1 = attr.ib(default=0.5) #adam
beta_2 = attr.ib(default=0.999) #adam
eta = attr.ib(default=0.001) # lars coefficient
# kfac
damping = attr.ib(default=0.001)
# for cgd
beta_update_rule = attr.ib(default='FR')
beta_momentum_coeff = attr.ib(default=1)
mu = attr.ib(2)
max_epoch = attr.ib(200)
def build_optimizer(setting: OptimizerSetting):
name = setting.name
# Standard Optimizer
if name == 'vanilla_sgd':
return optim.SGD(setting.model, lr = setting.lr, weight_decay=setting.weight_decay)
elif name == 'momentum_sgd':
return optim.SGD(filter(lambda p: p.requires_grad, setting.model), lr = setting.lr, momentum=setting.momentum, weight_decay=setting.weight_decay)
elif name == 'adam':
return optim.Adam(setting.model,
lr = setting.lr, betas=(setting.beta_1, setting.beta_2),
eps=setting.eps,
weight_decay=setting.weight_decay,
amsgrad=True)
elif name == 'cgd':
return ConjugateGradientOptimizer(params=setting.model,
lr=setting.lr,
weight_decay=setting.weight_decay,
beta_update_rule=setting.beta_update_rule,
beta_momentum_coeff=setting.beta_momentum_coeff,
mu=setting.mu)
else:
raise ValueError(
'The selected optimizer is not supported for this trainer.')
| 2,082 | 35.54386 | 153 | py |
ConjugateGradient_GAN | ConjugateGradient_GAN-master/jupyter/toy_example/cg_optimizer.py | """ Conjugate Gradient method in PyTorch! """
import torch
from torch.optim.optimizer import Optimizer, required
class ConjugateGradientOptimizer(Optimizer):
"""
Conjugate Gradient method
Notation:
d_buffer: update vector
alpha_buffer: alpha
beta_buffer: beta
"""
def __init__(self, params, lr=required, weight_decay=0, beta_update_rule='FR', beta_momentum_coeff=1, mu=2):
self.epoch = 0
defaults = dict(lr=lr,
weight_decay=weight_decay,
mu=mu,
beta_update_rule=beta_update_rule,
beta_momentum_coeff=beta_momentum_coeff
)
print(f'Initialized Conjugate Gradient as {beta_update_rule} Beta Update Rule Mode')
print(f'beta_momentum_coeff: {beta_momentum_coeff}')
super(ConjugateGradientOptimizer, self).__init__(params, defaults)
def calculate_beta(self, current_grad, prev_grad, d_update_v, beta_update_rule):
current_grad = current_grad.view(-1)
prev_grad = prev_grad.view(-1)
d_update_v = d_update_v.view(-1)
if beta_update_rule == 'FR':
beta = 0
denominator = float(torch.dot(prev_grad, prev_grad))
if denominator != 0:
numerator = float(torch.dot(current_grad, current_grad))
beta = min(float(numerator/denominator), 1/2)
elif beta_update_rule == 'PRP':
beta = 0
denominator = float(torch.dot(prev_grad, prev_grad))
if denominator != 0:
numerator = float(torch.dot(current_grad, current_grad - prev_grad))
beta = min(float(numerator/denominator), 1/2)
elif beta_update_rule == 'HS':
beta = 0
denominator = float(torch.dot(d_update_v, current_grad - prev_grad))
if denominator != 0:
numerator = float(torch.dot(current_grad, current_grad - prev_grad))
beta = min(float(numerator/denominator), 1/2)
elif beta_update_rule == 'DY':
beta = 0
denominator = float(torch.dot(d_update_v, current_grad - prev_grad))
if denominator != 0:
numerator = float(torch.dot(current_grad, current_grad))
beta = min(float(numerator/denominator), 1/2)
elif beta_update_rule == 'HS_DY':
beta = 0
denominator = float(torch.dot(d_update_v, current_grad - prev_grad))
if denominator != 0:
beta_hs = min(float(torch.dot(current_grad, current_grad - prev_grad))/denominator, 1/2)
beta_dy = min(float(torch.dot(current_grad, current_grad))/denominator, 1/2)
beta = max(0, min(beta_hs, beta_dy))
elif beta_update_rule == 'FR_PRP':
beta = 0
denominator = float(torch.dot(prev_grad, prev_grad))
if denominator != 0:
beta_fr = min(float(torch.dot(current_grad, current_grad)) / denominator, 1/2)
beta_prp = min(float(torch.dot(current_grad, current_grad - prev_grad)) / denominator, 1/2)
beta = max(0, min(beta_fr, beta_prp))
elif beta_update_rule == 'HZ':
mu = 0
for group in self.param_groups:
mu = group['mu']
beta = 0
denominator = float(torch.dot(d_update_v, current_grad - prev_grad))
if denominator != 0:
beta_hs = min(float(torch.dot(current_grad, current_grad - prev_grad)) / denominator, 1/2)
numerator = float(torch.dot(current_grad - prev_grad, current_grad - prev_grad)) * float(torch.dot(current_grad, d_update_v))
beta = min(beta_hs - mu * (numerator / (denominator** 2)), 1/2)
return beta
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
epoch: current epoch to calculate polynomial LR decay schedule.
if None, uses self.epoch and increments it.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
current_grad_list = []
prev_grad_list = []
d_buffer_list = []
beta_list = []
weight_decay = group['weight_decay']
lr = group['lr']
beta_update_rule = group['beta_update_rule']
beta_momentum_coeff = group['beta_momentum_coeff']
for p in group['params']:
if p.grad is not None:
params_with_grad.append(p)
current_grad_list.append(p.grad.data)
state = self.state[p]
if 'd_buffer' not in state:
d_buffer_list.append(None)
else:
d_buffer_list.append(state['d_buffer'])
if 'prev_grad' not in state:
prev_grad_list.append(None)
else:
prev_grad_list.append(state['prev_grad'])
for i, param in enumerate(params_with_grad):
d_update_v = d_buffer_list[i]
current_grad = current_grad_list[i]
prev_grad = prev_grad_list[i]
if weight_decay != 0:
current_grad = current_grad.add(param, alpha=weight_decay)
if prev_grad is None: # init
beta = 0
else:
beta = self.calculate_beta(current_grad, prev_grad, d_update_v, beta_update_rule) * beta_momentum_coeff
beta_list.append(beta)
if d_update_v is None:
d_update_v = torch.clone(current_grad).detach()
d_buffer_list[i] = d_update_v
else:
d_update_v.mul_(beta).add_(current_grad, alpha=-1)
alpha_buffer = lr
param.data.add_(d_update_v, alpha=alpha_buffer)
for p, d_buffer, beta in zip(params_with_grad, d_buffer_list, beta_list):
state = self.state[p]
state['prev_grad'] = p.grad
state['d_buffer'] = d_buffer * beta - p.grad
return loss
| 6,671 | 39.436364 | 141 | py |
spektral | spektral-master/spektral/models/gnn_explainer.py | import networkx as nx
import numpy as np
import tensorflow as tf
from scipy.sparse import csr_matrix
from spektral.layers import MessagePassing
from spektral.layers.convolutional.conv import Conv
from spektral.layers.ops import dot
from spektral.utils.sparse import sp_matrix_to_sp_tensor
class GNNExplainer:
"""
The GNNExplainer model from the paper:
> [GNNExplainer: Generating Explanations for Graph Neural Networks](https://arxiv.org/abs/1903.03894)<br>
> Rex Ying, Dylan Bourgeois, Jiaxuan You, Marinka Zitnik and Jure Leskovec.
The model can be used to explain the predictions for a single node or for an entire
graph. In both cases, it returns the subgraph that mostly contributes to the
prediction.
**Arguments**
- `model`: tf.keras.Model to explain;
- `n_hops`: number of hops from which the GNN aggregates info. If `None`, then the
number is inferred from the Conv and MessagePassing layers in the model.
- `preprocess`: a preprocessing function to transform the adjacency matrix before
giving it as input to the GNN; this is usually the same `preprocess` function of the
Conv or MessagePassing layers used in the GNN (e.g., `GCNConv.preprocess`).
- `graph_level`: if True, the GNN is assumed to be for graph-level prediction and
the explanation is computed for the whole graph (and not just a node).
- `verbose`: if True, print info during training;
- `learning_rate`: learning rate when training the model;
- `a_size_coef`: coefficient to control the number of edges of the subgraph that
contributes to the prediction;
- `x_size_coef`: coefficient to control the number of features of the subgraph
that contributes to the prediction;
- `a_entropy_coef`: coefficient to control the discretization of the adjacency
mask;
- `x_entropy_coef`: coefficient to control the discretization of the features
mask;
- `laplacian_coef`: coefficient to control the graph Laplacian loss;
"""
def __init__(
self,
model,
n_hops=None,
preprocess=None,
graph_level=False,
verbose=False,
learning_rate=0.01,
a_size_coef=0.0005,
x_size_coef=0.1,
a_entropy_coef=0.1,
x_entropy_coef=0.1,
laplacian_coef=0.0,
):
self.model = model
# Automatically detect the number of hops from which the GNN aggregates info
if n_hops is None:
self.n_hops = 0
for layer in model.layers:
if isinstance(layer, (Conv, MessagePassing)):
self.n_hops += 1
print(f"n_hops was automatically inferred to be {self.n_hops}")
else:
self.n_hops = n_hops
self.preprocess = preprocess
self.graph_level = graph_level
self.verbose = verbose
self.learning_rate = learning_rate
self.a_size_coef = a_size_coef
self.x_size_coef = x_size_coef
self.a_entropy_coef = a_entropy_coef
self.x_entropy_coef = x_entropy_coef
self.laplacian_coef = laplacian_coef
def explain_node(self, x, a, node_idx=None, epochs=100):
"""
Train the GNNExplainer to explain the given graph.
:param x: feature matrix of shape `(n_nodes, n_node_features)`;
:param a: sparse adjacency matrix of shape `(n_nodes, n_nodes)`;
:param node_idx: index of the node to explain. If `self.graph_level=True`, this
is ignored;
:param epochs: number of epochs to train for.
:return:
- `a_mask`: mask for the adjacency matrix;
- `x_mask`: mask for the node features.
"""
x = tf.cast(x, tf.float32)
if node_idx is None:
node_idx = 0
# Get the computational graph
if self.graph_level:
self.comp_graph = tf.cast(a, tf.float32)
self.i = tf.zeros(x.shape[0], dtype=tf.int32)
self.y_pred = tf.argmax(self.model([x, a, self.i], training=False), axis=1)
else:
self.comp_graph = k_hop_sparse_subgraph(
a, node_idx, self.n_hops, self.preprocess
)
self.y_pred = tf.argmax(self.model([x, a], training=False), axis=1)
self.node_pred = self.y_pred[node_idx]
self.y_pred = tf.cast(self.y_pred, tf.float32)
# Optimizer for training
self.opt = tf.keras.optimizers.Adam(self.learning_rate)
# Init the trainable masks
x_mask = tf.Variable(
tf.random.normal((1, x.shape[1]), stddev=0.1),
dtype=tf.float32,
trainable=True,
)
a_mask = tf.Variable(
tf.random.normal(
self.comp_graph.values.shape, stddev=(2 / x.shape[0]) ** 0.5
),
dtype=tf.float32,
trainable=True,
)
# Training loop
for i in range(epochs):
losses = self._train_step(x, a_mask, x_mask, node_idx)
if self.verbose:
print(", ".join([f"{key}: {val}" for key, val in losses.items()]))
return a_mask, x_mask
@tf.function
def _train_step(self, x, a_mask, x_mask, node_idx):
with tf.GradientTape() as tape:
masked_a = tf.sparse.map_values(
tf.multiply, self.comp_graph, tf.nn.sigmoid(a_mask)
)
masked_x = x * tf.nn.sigmoid(x_mask)
if self.graph_level:
pred = self.model([masked_x, masked_a, self.i], training=False)[
0, self.node_pred
]
else:
pred = self.model([masked_x, masked_a], training=False)[
node_idx, self.node_pred
]
loss, losses = self._explain_loss_fn(pred, a_mask, x_mask)
grad = tape.gradient(loss, [a_mask, x_mask])
self.opt.apply_gradients(zip(grad, [a_mask, x_mask]))
return losses
def _explain_loss_fn(self, y_pred, a_mask, x_mask):
mask = tf.nn.sigmoid(a_mask)
# Prediction loss
pred_loss = -tf.math.log(y_pred + 1e-15)
# Loss for A
a_size_loss = self.a_size_coef * tf.reduce_sum(mask)
entropy = -mask * tf.math.log(mask + 1e-15) - (1 - mask) * tf.math.log(
1 - mask + 1e-15
)
a_entropy_loss = self.a_entropy_coef * tf.reduce_mean(entropy)
# Graph Laplacian loss
if self.graph_level:
smoothness_loss = 0
else:
masked_a = tf.sparse.map_values(tf.multiply, self.comp_graph, mask)
d = tf.linalg.diag(tf.sparse.reduce_sum(masked_a, axis=0))
masked_a = tf.sparse.map_values(tf.multiply, masked_a, -1)
laplacian = tf.sparse.add(d, masked_a)
laplacian = tf.cast(laplacian, tf.float32)
quad_form = (
tf.reshape(self.y_pred, (1, -1))
@ laplacian
@ tf.reshape(self.y_pred, (-1, 1))
)
smoothness_loss = self.laplacian_coef * quad_form
# Feature loss
mask = tf.nn.sigmoid(x_mask)
x_size_loss = self.x_size_coef * tf.reduce_sum(mask)
entropy = -mask * tf.math.log(mask + 1e-15) - (1 - mask) * tf.math.log(
1 - mask + 1e-15
)
x_entropy_loss = self.x_entropy_coef * tf.reduce_mean(entropy)
loss = (
pred_loss
+ a_size_loss
+ a_entropy_loss
+ smoothness_loss
+ x_size_loss
+ x_entropy_loss
)
losses = {
"pred_loss": pred_loss,
"a_size_loss": a_size_loss,
"a_entropy_loss": a_entropy_loss,
"smoothness_loss": smoothness_loss,
"x_size_loss": x_size_loss,
"x_entropy_loss": x_entropy_loss,
}
return loss, losses
def _explainer_cleaning(self, a_mask, x_mask, node_idx, a_thresh):
# Get the masks
selected_adj_mask = tf.nn.sigmoid(a_mask)
selected_feat_mask = tf.nn.sigmoid(x_mask)
# convert into a binary matrix
if self.preprocess is not None:
comp_graph_values = tf.ones_like(self.comp_graph.values)
self.comp_graph = tf.sparse.SparseTensor(
self.comp_graph.indices, comp_graph_values, self.comp_graph.shape
)
# remove the edges which value is < a_thresh
selected_adj_mask = tf.where(
selected_adj_mask >= a_thresh, selected_adj_mask, 0
)
selected_subgraph = tf.sparse.map_values(
tf.multiply, self.comp_graph, selected_adj_mask
)
is_nonzero = tf.not_equal(selected_subgraph.values, 0)
selected_subgraph = tf.sparse.retain(selected_subgraph, is_nonzero)
# impose the symmetry of the adj matrix
selected_subgraph = (
tf.sparse.add(selected_subgraph, tf.sparse.transpose(selected_subgraph)) / 2
)
if not self.graph_level:
# get the final denoised subgraph centerd in the interested node
selected_subgraph = k_hop_sparse_subgraph(
selected_subgraph, node_idx, self.n_hops
)
# the the top_feat relevant feature ids
selected_features = tf.argsort(
tf.nn.sigmoid(selected_feat_mask), direction="DESCENDING"
)[0]
return selected_subgraph, selected_features
def plot_subgraph(
self, a_mask, x_mask, node_idx=None, a_thresh=0.1, return_features=False
):
"""
Plot the subgraph computed by the GNNExplainer.
**Arguments**
:param a_mask: the mask for the adjacency matrix computed by `explain_node`;
:param x_mask: the mask for the node features computed by `explain_node`;
:param node_idx: the same node index that was given to `explain_node`;
:param a_thresh: threshold to remove low-importance edges;
:param return_features: if True, return indices to sort the nodes by their
importance.
:return: The subgraph computed by GNNExplainer in Networkx format. If
`return_features=True`, also returns an indices to sort the nodes by their
importance.
"""
adj_mtx, top_ftrs = self._explainer_cleaning(a_mask, x_mask, node_idx, a_thresh)
edge_list = adj_mtx.indices.numpy()
weights = adj_mtx.values
G = nx.Graph()
for i, (n1, n2) in enumerate(edge_list):
if weights[i] != 0:
G.add_edge(n1, n2, w=weights[i].numpy())
# take the largest component
giant = max(nx.algorithms.components.connected_components(G), key=len)
pos = nx.layout.spring_layout(G, k=0.04)
nx.draw_networkx_nodes(G, pos=pos, node_size=30, nodelist=giant)
nx.draw_networkx_edges(G, pos=pos, edge_color="grey", alpha=0.8)
nx.draw_networkx_labels(
G, pos=pos, font_color="black", font_size=10, verticalalignment="bottom"
)
if return_features:
return G, top_ftrs
else:
return G
def k_hop_sparse_subgraph(a, node_idx, k, transformer=None):
"""
Computes the subgraph containing all the neighbors of `node_idx` up to the k-th order.
If `a` is not the binary adjacency matrix a `transformer` should be passed.
**Arguments**
- `a`: sparse `(n_nodes, n_nodes)` graph tensor;
- `node_idx`: center node;
- `k`: order of neighbor;
- `transformer`: one of the functions from the `spektral.transforms` module,
needed to convert the binary adjacency matrix into the correct format for the model;
"""
if a.dtype != tf.float32:
a = tf.cast(a, tf.float32)
if transformer:
a = binary_adj_converter(a)
power_a = tf.sparse.eye(a.shape[0])
k_neighs = np.zeros(a.shape[0]).astype("float32").reshape(1, -1)
k_neighs[0, node_idx] = 1
for _ in range(k - 1):
power_a = dot(power_a, a)
temp = tf.sparse.slice(power_a, start=[node_idx, 0], size=[1, power_a.shape[0]])
k_neighs += tf.sparse.to_dense(temp)
comp_graph = tf.sparse.add(a * tf.reshape(k_neighs, (-1, 1)), a * k_neighs)
is_nonzero = tf.not_equal(comp_graph.values, 0)
comp_graph = tf.sparse.retain(comp_graph, is_nonzero)
comp_graph = tf.sign(comp_graph)
if transformer:
comp_graph = sp_tensor_to_sp_matrix(comp_graph)
comp_graph = transformer(comp_graph)
return sp_matrix_to_sp_tensor(comp_graph)
else:
return comp_graph
def binary_adj_converter(a_in):
"""
Transforms a graph matrix into the binary adjacency matrix.
**Arguments**
- `a_in`: sparse `(n_nodes, n_nodes)` graph tensor;
"""
a_idx = a_in.indices
off_diag_idx = tf.not_equal(a_idx[:, 0], a_idx[:, 1])
a_idx = a_idx[off_diag_idx]
a = tf.sparse.SparseTensor(
a_idx, tf.ones(a_idx.shape[0], dtype=tf.float32), a_in.shape
)
return a
def sp_tensor_to_sp_matrix(a):
"""
Transforms a sparse tensor into a sparse scipy matrix .
**Arguments**
- `a`: sparse `(n_nodes, n_nodes)` graph tensor;
"""
a_idx = a.indices
a_val = a.values
row_idx = a_idx[:, 0]
col_idx = a_idx[:, 1]
return csr_matrix((a_val, (row_idx, col_idx)), shape=a.shape)
| 13,348 | 34.981132 | 109 | py |
spektral | spektral-master/spektral/models/general_gnn.py | from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import (
Activation,
Add,
BatchNormalization,
Concatenate,
Dense,
Dropout,
PReLU,
)
from spektral.layers import GeneralConv
from spektral.layers.pooling import global_pool
def get_act(identifier):
if identifier == "prelu":
return PReLU()
else:
return Activation(identifier)
class GeneralGNN(Model):
r"""
This model implements the GNN architecture from the paper
> [Design Space for Graph Neural Networks](https://arxiv.org/abs/2011.08843)<br>
> Jiaxuan You, Rex Ying, Jure Leskovec
**Mode**: single, disjoint, mixed.
The default parameters of the model are selected according to the best
results obtained in the paper, and should provide a good performance on
many node-level and graph-level tasks, without modifications.
The defaults are as follows:
- 256 hidden channels
- 4 message passing layers
- 2 pre-processing layers
- 2 post-processing layers
- Skip connections with concatenation
- Batch normalization
- No dropout
- PReLU activations
- Sum aggregation in the message-passing layers
- Global sum pooling (not from the paper)
The GNN uses the [`GeneralConv` layer](/layers/convolution/#generalconv)
for message passing, and has a pre- and a post-processing MLP for the node
features.
Message-passing layers also have optional skip connections, which can be
implemented as sum or concatenation.
The dense layers of the pre-processing and post-processing MLPs compute the
following update of the node features:
$$
\h_i = \mathrm{Act} \left( \mathrm{Dropout} \left( \mathrm{BN}
\left( \x_i \W + \b \right) \right) \right)
$$
Message-passing layers compute:
$$
\h_i = \mathrm{Agg} \left( \left\{ \mathrm{Act} \left( \mathrm{Dropout}
\left( \mathrm{BN} \left( \x_j \W + \b \right) \right) \right),
j \in \mathcal{N}(i) \right\} \right)
$$
**Arguments**
- `output`: int, the number of output units;
- `activation`: the activation function of the output layer.
- `hidden`: int, the number of hidden units for all layers except the output
one;
- `message_passing`: int, the nummber of message-passing layers;
- `pre_process`: int, the number of layers in the pre-processing MLP;
- `post_process`: int, the number of layers in the post-processing MLP;
- `connectivity`: the type of skip connection. Can be: None, 'sum' or 'cat';
- `batch_norm`: bool, whether to use batch normalization;
- `dropout`: float, dropout rate;
- `aggregate`: string or callable, an aggregation function. Supported
aggregations: 'sum', 'mean', 'max', 'min', 'prod'.
- `hidden_activation`: activation function in the hidden layers. The PReLU
activation can be used by passing `hidden_activation='prelu'`.
- `pool`: string or None, the global pooling function. If None, no global
pooling is applied (e.g., for node-level learning). Supported pooling methods:
'sum', 'avg', 'max', 'attn', 'attn_sum', 'sort'
(see `spektral.layers.pooling.global_pool`).
"""
def __init__(
self,
output,
activation=None,
hidden=256,
message_passing=4,
pre_process=2,
post_process=2,
connectivity="cat",
batch_norm=True,
dropout=0.0,
aggregate="sum",
hidden_activation="prelu",
pool="sum",
):
super().__init__()
self.config = {
"output": output,
"activation": activation,
"hidden": hidden,
"message_passing": message_passing,
"pre_process": pre_process,
"post_process": post_process,
"connectivity": connectivity,
"batch_norm": batch_norm,
"dropout": dropout,
"aggregate": aggregate,
"hidden_activation": hidden_activation,
"pool": pool,
}
# Connectivity function
if connectivity is None:
self.connectivity = None
elif connectivity == "sum":
self.connectivity = Add()
elif connectivity == "cat":
self.connectivity = Concatenate()
else:
raise ValueError("Unknown connectivity: {}. Available: None, sum, cat.")
# Global pooling
if pool is not None:
self.pool = global_pool.get(pool)()
else:
self.pool = None
# Neural blocks
self.pre = MLP(
hidden,
hidden,
pre_process,
batch_norm,
dropout,
hidden_activation,
hidden_activation,
)
self.gnn = [
GeneralConv(hidden, batch_norm, dropout, aggregate, hidden_activation)
for _ in range(message_passing)
]
self.post = MLP(
output,
hidden,
post_process,
batch_norm,
dropout,
hidden_activation,
activation,
)
def call(self, inputs):
if len(inputs) == 2:
x, a = inputs
i = None
else:
x, a, i = inputs
# Pre-process
out = self.pre(x)
# Message passing
for layer in self.gnn:
z = layer([out, a])
if self.connectivity is not None:
out = self.connectivity([z, out])
else:
out = z
# Global pooling
if self.pool is not None:
out = self.pool([out] + ([i] if i is not None else []))
# Post-process
out = self.post(out)
return out
def get_config(self):
return self.config
class MLP(Model):
def __init__(
self,
output,
hidden=256,
layers=2,
batch_norm=True,
dropout=0.0,
activation="prelu",
final_activation=None,
):
super().__init__()
self.config = {
"output": output,
"hidden": hidden,
"layers": layers,
"batch_norm": batch_norm,
"dropout": dropout,
"activation": activation,
"final_activation": final_activation,
}
self.batch_norm = batch_norm
self.dropout_rate = dropout
self.mlp = Sequential()
for i in range(layers):
# Linear
self.mlp.add(Dense(hidden if i < layers - 1 else output))
# Batch norm
if self.batch_norm:
self.mlp.add(BatchNormalization())
# Dropout
self.mlp.add(Dropout(self.dropout_rate))
# Activation
self.mlp.add(get_act(activation if i < layers - 1 else final_activation))
def call(self, inputs):
return self.mlp(inputs)
def get_config(self):
return self.config
| 6,997 | 29.426087 | 85 | py |
spektral | spektral-master/spektral/models/gcn.py | import tensorflow as tf
from spektral.layers.convolutional import gcn_conv
class GCN(tf.keras.Model):
"""
This model, with its default hyperparameters, implements the architecture
from the paper:
> [Semi-Supervised Classification with Graph Convolutional Networks](https://arxiv.org/abs/1609.02907)<br>
> Thomas N. Kipf and Max Welling
**Mode**: single, disjoint, mixed, batch.
**Input**
- Node features of shape `([batch], n_nodes, n_node_features)`
- Weighted adjacency matrix of shape `([batch], n_nodes, n_nodes)`
**Output**
- Softmax predictions with shape `([batch], n_nodes, n_labels)`.
**Arguments**
- `n_labels`: number of channels in output;
- `channels`: number of channels in first GCNConv layer;
- `activation`: activation of the first GCNConv layer;
- `output_activation`: activation of the second GCNConv layer;
- `use_bias`: whether to add a learnable bias to the two GCNConv layers;
- `dropout_rate`: `rate` used in `Dropout` layers;
- `l2_reg`: l2 regularization strength;
- `**kwargs`: passed to `Model.__init__`.
"""
def __init__(
self,
n_labels,
channels=16,
activation="relu",
output_activation="softmax",
use_bias=False,
dropout_rate=0.5,
l2_reg=2.5e-4,
**kwargs,
):
super().__init__(**kwargs)
self.n_labels = n_labels
self.channels = channels
self.activation = activation
self.output_activation = output_activation
self.use_bias = use_bias
self.dropout_rate = dropout_rate
self.l2_reg = l2_reg
reg = tf.keras.regularizers.l2(l2_reg)
self._d0 = tf.keras.layers.Dropout(dropout_rate)
self._gcn0 = gcn_conv.GCNConv(
channels, activation=activation, kernel_regularizer=reg, use_bias=use_bias
)
self._d1 = tf.keras.layers.Dropout(dropout_rate)
self._gcn1 = gcn_conv.GCNConv(
n_labels, activation=output_activation, use_bias=use_bias
)
def get_config(self):
return dict(
n_labels=self.n_labels,
channels=self.channels,
activation=self.activation,
output_activation=self.output_activation,
use_bias=self.use_bias,
dropout_rate=self.dropout_rate,
l2_reg=self.l2_reg,
)
def call(self, inputs):
if len(inputs) == 2:
x, a = inputs
else:
x, a, _ = inputs # So that the model can be used with DisjointLoader
x = self._d0(x)
x = self._gcn0([x, a])
x = self._d1(x)
return self._gcn1([x, a])
| 2,705 | 29.75 | 110 | py |
spektral | spektral-master/spektral/datasets/qm9.py | import os
import os.path as osp
import numpy as np
from joblib import Parallel, delayed
from tensorflow.keras.utils import get_file
from tqdm import tqdm
from spektral.data import Dataset, Graph
from spektral.utils import label_to_one_hot, sparse
from spektral.utils.io import load_csv, load_sdf
ATOM_TYPES = [1, 6, 7, 8, 9]
BOND_TYPES = [1, 2, 3, 4]
class QM9(Dataset):
"""
The QM9 chemical data set of small molecules.
In this dataset, nodes represent atoms and edges represent chemical bonds.
There are 5 possible atom types (H, C, N, O, F) and 4 bond types (single,
double, triple, aromatic).
Node features represent the chemical properties of each atom and include:
- The atomic number, one-hot encoded;
- The atom's position in the X, Y, and Z dimensions;
- The atomic charge;
- The mass difference from the monoisotope;
The edge features represent the type of chemical bond between two atoms,
one-hot encoded.
Each graph has an 19-dimensional label for regression.
**Arguments**
- `amount`: int, load this many molecules instead of the full dataset
(useful for debugging).
- `n_jobs`: number of CPU cores to use for reading the data (-1, to use all
available cores).
"""
url = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/gdb9.tar.gz"
def __init__(self, amount=None, n_jobs=1, **kwargs):
self.amount = amount
self.n_jobs = n_jobs
super().__init__(**kwargs)
def download(self):
get_file(
"qm9.tar.gz",
self.url,
extract=True,
cache_dir=self.path,
cache_subdir=self.path,
)
os.remove(osp.join(self.path, "qm9.tar.gz"))
def read(self):
print("Loading QM9 dataset.")
sdf_file = osp.join(self.path, "gdb9.sdf")
data = load_sdf(sdf_file, amount=self.amount) # Internal SDF format
def read_mol(mol):
x = np.array([atom_to_feature(atom) for atom in mol["atoms"]])
a, e = mol_to_adj(mol)
return x, a, e
data = Parallel(n_jobs=self.n_jobs)(
delayed(read_mol)(mol) for mol in tqdm(data, ncols=80)
)
x_list, a_list, e_list = list(zip(*data))
# Load labels
labels_file = osp.join(self.path, "gdb9.sdf.csv")
labels = load_csv(labels_file)
labels = labels.set_index("mol_id").values
if self.amount is not None:
labels = labels[: self.amount]
return [
Graph(x=x, a=a, e=e, y=y)
for x, a, e, y in zip(x_list, a_list, e_list, labels)
]
def atom_to_feature(atom):
atomic_num = label_to_one_hot(atom["atomic_num"], ATOM_TYPES)
coords = atom["coords"]
charge = atom["charge"]
iso = atom["iso"]
return np.concatenate((atomic_num, coords, [charge, iso]), -1)
def mol_to_adj(mol):
row, col, edge_features = [], [], []
for bond in mol["bonds"]:
start, end = bond["start_atom"], bond["end_atom"]
row += [start, end]
col += [end, start]
edge_features += [bond["type"]] * 2
a, e = sparse.edge_index_to_matrix(
edge_index=np.array((row, col)).T,
edge_weight=np.ones_like(row),
edge_features=label_to_one_hot(edge_features, BOND_TYPES),
)
return a, e
| 3,361 | 28.491228 | 80 | py |
spektral | spektral-master/spektral/datasets/qm7.py | import os.path as osp
import numpy as np
import scipy.sparse as sp
from scipy.io import loadmat
from tensorflow.keras.utils import get_file
from spektral.data import Dataset, Graph
from spektral.utils import sparse
class QM7(Dataset):
"""
The QM7b dataset of molecules from the paper:
> [MoleculeNet: A Benchmark for Molecular Machine Learning](https://arxiv.org/abs/1703.00564)<br>
> Zhenqin Wu et al.
The dataset has no node features.
Edges and edge features are obtained from the Coulomb matrices of the
molecules.
Each graph has a 14-dimensional label for regression.
"""
url = "http://deepchem.io.s3-website-us-west-1.amazonaws.com/datasets/qm7b.mat"
def __init__(self, **kwargs):
super().__init__(**kwargs)
def download(self):
get_file(
"qm7b.mat",
self.url,
extract=True,
cache_dir=self.path,
cache_subdir=self.path,
)
def read(self):
print("Loading QM7 dataset.")
mat_file = osp.join(self.path, "qm7b.mat")
data = loadmat(mat_file)
coulomb_matrices = data["X"]
labels = data["T"]
output = []
for i in range(len(coulomb_matrices)):
row, col, data = sp.find(coulomb_matrices[i])
edge_index = np.array([row, col]).T
a, e = sparse.edge_index_to_matrix(edge_index, np.ones_like(data), data)
e = data[:, None]
y = labels[i]
output.append(Graph(a=a, e=e, y=y))
return output
| 1,562 | 25.948276 | 101 | py |
spektral | spektral-master/spektral/datasets/mnist.py | import numpy as np
import scipy.sparse as sp
from sklearn.neighbors import kneighbors_graph
from tensorflow.keras.datasets import mnist as m
from spektral.data import Dataset, Graph
MNIST_SIZE = 28
class MNIST(Dataset):
"""
The MNIST images used as node features for a grid graph, as described by
[Defferrard et al. (2016)](https://arxiv.org/abs/1606.09375).
This dataset is a graph signal classification task, where graphs are
represented in mixed mode: one adjacency matrix, many instances of node
features.
For efficiency, the adjacency matrix is stored in a special attribute of the
dataset and the Graphs only contain the node features.
You can access the adjacency matrix via the `a` attribute.
The node features of each graph are the MNIST digits vectorized and rescaled
to [0, 1].
Two nodes are connected if they are neighbours on the grid.
Labels represent the MNIST class associated to each sample.
**Note:** the last 10000 samples are the default test set of the MNIST
dataset.
**Arguments**
- `p_flip`: if >0, then edges are randomly flipped from 0 to 1 or vice versa
with that probability.
- `k`: number of neighbours of each node.
"""
def __init__(self, p_flip=0.0, k=8, **kwargs):
self.a = None
self.k = k
self.p_flip = p_flip
super().__init__(**kwargs)
def read(self):
self.a = _mnist_grid_graph(self.k)
self.a = _flip_random_edges(self.a, self.p_flip)
(x_train, y_train), (x_test, y_test) = m.load_data()
x = np.vstack((x_train, x_test))
x = x / 255.0
y = np.concatenate((y_train, y_test), 0)
x = x.reshape(-1, MNIST_SIZE**2, 1)
return [Graph(x=x_, y=y_) for x_, y_ in zip(x, y)]
def _grid_coordinates(side):
M = side**2
x = np.linspace(0, 1, side, dtype=np.float32)
y = np.linspace(0, 1, side, dtype=np.float32)
xx, yy = np.meshgrid(x, y)
z = np.empty((M, 2), np.float32)
z[:, 0] = xx.reshape(M)
z[:, 1] = yy.reshape(M)
return z
def _get_adj_from_data(X, k, **kwargs):
A = kneighbors_graph(X, k, **kwargs).toarray()
A = sp.csr_matrix(np.maximum(A, A.T))
return A
def _mnist_grid_graph(k):
X = _grid_coordinates(MNIST_SIZE)
A = _get_adj_from_data(
X, k, mode="connectivity", metric="euclidean", include_self=False
)
return A
def _flip_random_edges(A, p_swap):
if not A.shape[0] == A.shape[1]:
raise ValueError("A must be a square matrix.")
dtype = A.dtype
A = sp.lil_matrix(A).astype(bool)
n_elem = A.shape[0] ** 2
n_elem_to_flip = round(p_swap * n_elem)
unique_idx = np.random.choice(n_elem, replace=False, size=n_elem_to_flip)
row_idx = unique_idx // A.shape[0]
col_idx = unique_idx % A.shape[0]
idxs = np.stack((row_idx, col_idx)).T
for i in idxs:
i = tuple(i)
A[i] = np.logical_not(A[i])
A = A.tocsr().astype(dtype)
A.eliminate_zeros()
return A
| 3,018 | 28.598039 | 80 | py |
spektral | spektral-master/spektral/layers/base.py | import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import backend as K
from tensorflow.keras import constraints, initializers, regularizers
from tensorflow.keras.layers import Layer
from tensorflow.python.framework import smart_cond
from spektral.layers import ops
class Disjoint2Batch(Layer):
r"""Utility layer that converts data from disjoint mode to batch mode by
zero-padding the node features and adjacency matrices.
**Mode**: disjoint.
**This layer expects a sparse adjacency matrix.**
**Input**
- Node features of shape `(n_nodes, n_node_features)`;
- Binary adjacency matrix of shape `(n_nodes, n_nodes)`;
- Graph IDs of shape `(n_nodes, )`;
**Output**
- Batched node features of shape `(batch, N_max, n_node_features)`;
- Batched adjacency matrix of shape `(batch, N_max, N_max)`;
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) >= 2
self.built = True
def call(self, inputs, **kwargs):
X, A, I = inputs
batch_X = ops.disjoint_signal_to_batch(X, I)
batch_A = ops.disjoint_adjacency_to_batch(A, I)
# Ensure that the channel dimension is known
batch_X.set_shape((None, None, X.shape[-1]))
batch_A.set_shape((None, None, None))
return batch_X, batch_A
class GraphMasking(Layer):
"""
A layer that starts the propagation of masks in a model.
This layer assumes that the node features given as input have been extended with a
binary mask that indicates which nodes are valid in each graph.
The layer is useful when using a `data.BatchLoader` with `mask=True` or in general
when zero-padding graphs so that all batches have the same size. The binary mask
indicates with a 1 those nodes that should be taken into account by the model.
The layer will remove the rightmost feature from the nodes and start a mask
propagation to all subsequent layers:
```python
print(x.shape) # shape (batch, n_nodes, n_node_features + 1)
mask = x[..., -1:] # shape (batch, n_nodes, 1)
x_new = x[..., :-1] # shape (batch, n_nodes, n_node_features)
```
"""
def compute_mask(self, inputs, mask=None):
x = inputs[0] if isinstance(inputs, list) else inputs
return x[..., -1:]
def call(self, inputs, **kwargs):
# Remove mask from features
if isinstance(inputs, list):
inputs[0] = inputs[0][..., :-1]
else:
inputs = inputs[..., :-1]
return inputs
class InnerProduct(Layer):
r"""
Computes the inner product between elements of a 2d Tensor:
$$
\langle \x, \x \rangle = \x\x^\top.
$$
**Mode**: single.
**Input**
- Tensor of shape `(n_nodes, n_features)`;
**Output**
- Tensor of shape `(n_nodes, n_nodes)`.
:param trainable_kernel: add a trainable square matrix between the inner
product (e.g., `X @ W @ X.T`);
:param activation: activation function;
:param kernel_initializer: initializer for the weights;
:param kernel_regularizer: regularization applied to the kernel;
:param kernel_constraint: constraint applied to the kernel;
"""
def __init__(
self,
trainable_kernel=False,
activation=None,
kernel_initializer="glorot_uniform",
kernel_regularizer=None,
kernel_constraint=None,
**kwargs,
):
super().__init__(**kwargs)
self.trainable_kernel = trainable_kernel
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
def build(self, input_shape):
if self.trainable_kernel:
features_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(features_dim, features_dim),
name="kernel",
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
self.built = True
def call(self, inputs):
if self.trainable_kernel:
output = K.dot(K.dot(inputs, self.kernel), K.transpose(inputs))
else:
output = K.dot(inputs, K.transpose(inputs))
if self.activation is not None:
output = self.activation(output)
return output
def get_config(self):
config = {
"trainable_kernel": self.trainable_kernel,
"activation": self.activation,
"kernel_initializer": self.kernel_initializer,
"kernel_regularizer": self.kernel_regularizer,
"kernel_constraint": self.kernel_constraint,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class MinkowskiProduct(Layer):
r"""
Computes the hyperbolic inner product between elements of a rank 2 Tensor:
$$
\langle \x, \x \rangle = \x \,
\begin{pmatrix}
\I_{d \times d} & 0 \\
0 & -1
\end{pmatrix} \, \x^\top.
$$
**Mode**: single.
**Input**
- Tensor of shape `(n_nodes, n_features)`;
**Output**
- Tensor of shape `(n_nodes, n_nodes)`.
:param activation: activation function;
"""
def __init__(self, activation=None, **kwargs):
super().__init__(**kwargs)
self.activation = activations.get(activation)
def build(self, input_shape):
assert len(input_shape) >= 2
self.built = True
def call(self, inputs):
F = tf.shape(inputs)[-1]
minkowski_prod_mat = np.eye(F)
minkowski_prod_mat[-1, -1] = -1.0
minkowski_prod_mat = K.constant(minkowski_prod_mat)
output = K.dot(inputs, minkowski_prod_mat)
output = K.dot(output, K.transpose(inputs))
output = K.clip(output, -10e9, -1.0)
if self.activation is not None:
output = self.activation(output)
return output
def get_config(self):
config = {"activation": self.activation}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class SparseDropout(Layer):
"""Applies Dropout to the input.
Dropout consists in randomly setting
a fraction `rate` of input units to 0 at each update during training time,
which helps prevent overfitting.
Arguments:
rate: Float between 0 and 1. Fraction of the input units to drop.
noise_shape: 1D integer tensor representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)` and
you want the dropout mask to be the same for all timesteps,
you can use `noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super().__init__(**kwargs)
self.rate = rate
self.noise_shape = noise_shape
self.seed = seed
self.supports_masking = True
@staticmethod
def _get_noise_shape(inputs):
return tf.shape(inputs.values)
def call(self, inputs, training=None):
if training is None:
training = K.learning_phase()
def dropped_inputs():
return self.sparse_dropout(
inputs,
noise_shape=self._get_noise_shape(inputs),
seed=self.seed,
rate=self.rate,
)
output = smart_cond.smart_cond(training, dropped_inputs, lambda: inputs)
return output
def get_config(self):
config = {"rate": self.rate, "noise_shape": self.noise_shape, "seed": self.seed}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@staticmethod
def sparse_dropout(x, rate, noise_shape=None, seed=None):
random_tensor = tf.random.uniform(noise_shape, seed=seed, dtype=x.dtype)
keep_prob = 1 - rate
scale = 1 / keep_prob
keep_mask = random_tensor >= rate
output = tf.sparse.retain(x, keep_mask)
# output = output * scale # gradient issues with automatic broadcasting
output = output * tf.reshape(
tf.convert_to_tensor(scale, dtype=output.dtype), (1,) * output.shape.ndims
)
return output
| 8,948 | 31.075269 | 88 | py |
spektral | spektral-master/spektral/layers/pooling/global_pool.py | import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras import constraints, initializers, regularizers
from tensorflow.keras.layers import Dense, Layer
from spektral.layers import ops
class GlobalPool(Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
self.pooling_op = None
self.batch_pooling_op = None
def build(self, input_shape):
if isinstance(input_shape, list) and len(input_shape) == 2:
self.data_mode = "disjoint"
else:
if len(input_shape) == 2:
self.data_mode = "single"
else:
self.data_mode = "batch"
super().build(input_shape)
def call(self, inputs):
if self.data_mode == "disjoint":
X = inputs[0]
I = inputs[1]
if K.ndim(I) == 2:
I = I[:, 0]
else:
X = inputs
if self.data_mode == "disjoint":
return self.pooling_op(X, I)
else:
return self.batch_pooling_op(
X, axis=-2, keepdims=(self.data_mode == "single")
)
def compute_output_shape(self, input_shape):
if self.data_mode == "single":
return (1,) + input_shape[-1:]
elif self.data_mode == "batch":
return input_shape[:-2] + input_shape[-1:]
else:
# Input shape is a list of shapes for X and I
return input_shape[0]
class GlobalSumPool(GlobalPool):
"""
A global sum pooling layer. Pools a graph by computing the sum of its node
features.
**Mode**: single, disjoint, mixed, batch.
**Input**
- Node features of shape `([batch], n_nodes, n_node_features)`;
- Graph IDs of shape `(n_nodes, )` (only in disjoint mode);
**Output**
- Pooled node features of shape `(batch, n_node_features)` (if single mode, shape will
be `(1, n_node_features)`).
**Arguments**
None.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.pooling_op = tf.math.segment_sum
self.batch_pooling_op = tf.reduce_sum
class GlobalAvgPool(GlobalPool):
"""
An average pooling layer. Pools a graph by computing the average of its node
features.
**Mode**: single, disjoint, mixed, batch.
**Input**
- Node features of shape `([batch], n_nodes, n_node_features)`;
- Graph IDs of shape `(n_nodes, )` (only in disjoint mode);
**Output**
- Pooled node features of shape `(batch, n_node_features)` (if single mode, shape will
be `(1, n_node_features)`).
**Arguments**
None.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.pooling_op = tf.math.segment_mean
self.batch_pooling_op = tf.reduce_mean
class GlobalMaxPool(GlobalPool):
"""
A max pooling layer. Pools a graph by computing the maximum of its node
features.
**Mode**: single, disjoint, mixed, batch.
**Input**
- Node features of shape `([batch], n_nodes, n_node_features)`;
- Graph IDs of shape `(n_nodes, )` (only in disjoint mode);
**Output**
- Pooled node features of shape `(batch, n_node_features)` (if single mode, shape will
be `(1, n_node_features)`).
**Arguments**
None.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.pooling_op = tf.math.segment_max
self.batch_pooling_op = tf.reduce_max
class GlobalAttentionPool(GlobalPool):
r"""
A gated attention global pooling layer from the paper
> [Gated Graph Sequence Neural Networks](https://arxiv.org/abs/1511.05493)<br>
> Yujia Li et al.
This layer computes:
$$
\X' = \sum\limits_{i=1}^{N} (\sigma(\X \W_1 + \b_1) \odot (\X \W_2 + \b_2))_i
$$
where \(\sigma\) is the sigmoid activation function.
**Mode**: single, disjoint, mixed, batch.
**Input**
- Node features of shape `([batch], n_nodes, n_node_features)`;
- Graph IDs of shape `(n_nodes, )` (only in disjoint mode);
**Output**
- Pooled node features of shape `(batch, channels)` (if single mode,
shape will be `(1, channels)`).
**Arguments**
- `channels`: integer, number of output channels;
- `bias_initializer`: initializer for the bias vectors;
- `kernel_regularizer`: regularization applied to the kernel matrices;
- `bias_regularizer`: regularization applied to the bias vectors;
- `kernel_constraint`: constraint applied to the kernel matrices;
- `bias_constraint`: constraint applied to the bias vectors.
"""
def __init__(
self,
channels,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(**kwargs)
self.channels = channels
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
def build(self, input_shape):
super().build(input_shape)
layer_kwargs = dict(
kernel_initializer=self.kernel_initializer,
bias_initializer=self.bias_initializer,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
kernel_constraint=self.kernel_constraint,
bias_constraint=self.bias_constraint,
)
self.features_layer = Dense(
self.channels, name="features_layer", **layer_kwargs
)
self.attention_layer = Dense(
self.channels, activation="sigmoid", name="attn_layer", **layer_kwargs
)
self.built = True
def call(self, inputs):
if self.data_mode == "disjoint":
X, I = inputs
if K.ndim(I) == 2:
I = I[:, 0]
else:
X = inputs
inputs_linear = self.features_layer(X)
attn = self.attention_layer(X)
masked_inputs = inputs_linear * attn
if self.data_mode in {"single", "batch"}:
output = K.sum(masked_inputs, axis=-2, keepdims=self.data_mode == "single")
else:
output = tf.math.segment_sum(masked_inputs, I)
return output
def compute_output_shape(self, input_shape):
if self.data_mode == "single":
return (1,) + (self.channels,)
elif self.data_mode == "batch":
return input_shape[:-2] + (self.channels,)
else:
output_shape = input_shape[0]
output_shape = output_shape[:-1] + (self.channels,)
return output_shape
def get_config(self):
config = {
"channels": self.channels,
"kernel_initializer": self.kernel_initializer,
"bias_initializer": self.bias_initializer,
"kernel_regularizer": self.kernel_regularizer,
"bias_regularizer": self.bias_regularizer,
"kernel_constraint": self.kernel_constraint,
"bias_constraint": self.bias_constraint,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class GlobalAttnSumPool(GlobalPool):
r"""
A node-attention global pooling layer. Pools a graph by learning attention
coefficients to sum node features.
This layer computes:
$$
\alpha = \textrm{softmax}( \X \a); \\
\X' = \sum\limits_{i=1}^{N} \alpha_i \cdot \X_i
$$
where \(\a \in \mathbb{R}^F\) is a trainable vector. Note that the softmax
is applied across nodes, and not across features.
**Mode**: single, disjoint, mixed, batch.
**Input**
- Node features of shape `([batch], n_nodes, n_node_features)`;
- Graph IDs of shape `(n_nodes, )` (only in disjoint mode);
**Output**
- Pooled node features of shape `(batch, n_node_features)` (if single mode, shape will
be `(1, n_node_features)`).
**Arguments**
- `attn_kernel_initializer`: initializer for the attention weights;
- `attn_kernel_regularizer`: regularization applied to the attention kernel
matrix;
- `attn_kernel_constraint`: constraint applied to the attention kernel
matrix;
"""
def __init__(
self,
attn_kernel_initializer="glorot_uniform",
attn_kernel_regularizer=None,
attn_kernel_constraint=None,
**kwargs,
):
super().__init__(**kwargs)
self.attn_kernel_initializer = initializers.get(attn_kernel_initializer)
self.attn_kernel_regularizer = regularizers.get(attn_kernel_regularizer)
self.attn_kernel_constraint = constraints.get(attn_kernel_constraint)
def build(self, input_shape):
assert len(input_shape) >= 2
if isinstance(input_shape, list) and len(input_shape) == 2:
self.data_mode = "disjoint"
F = input_shape[0][-1]
else:
if len(input_shape) == 2:
self.data_mode = "single"
else:
self.data_mode = "batch"
F = input_shape[-1]
# Attention kernels
self.attn_kernel = self.add_weight(
shape=(F, 1),
initializer=self.attn_kernel_initializer,
regularizer=self.attn_kernel_regularizer,
constraint=self.attn_kernel_constraint,
name="attn_kernel",
)
self.built = True
def call(self, inputs):
if self.data_mode == "disjoint":
X, I = inputs
if K.ndim(I) == 2:
I = I[:, 0]
else:
X = inputs
attn_coeff = K.dot(X, self.attn_kernel)
attn_coeff = K.squeeze(attn_coeff, -1)
if self.data_mode == "single":
attn_coeff = K.softmax(attn_coeff)
output = K.dot(attn_coeff[None, ...], X)
elif self.data_mode == "batch":
attn_coeff = K.softmax(attn_coeff)
output = K.batch_dot(attn_coeff, X)
else:
attn_coeff = ops.unsorted_segment_softmax(attn_coeff, I, K.shape(X)[0])
output = attn_coeff[:, None] * X
output = tf.math.segment_sum(output, I)
return output
def get_config(self):
config = {
"attn_kernel_initializer": self.attn_kernel_initializer,
"attn_kernel_regularizer": self.attn_kernel_regularizer,
"attn_kernel_constraint": self.attn_kernel_constraint,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class SortPool(Layer):
r"""
A SortPool layer as described by
[Zhang et al](https://www.cse.wustl.edu/~muhan/papers/AAAI_2018_DGCNN.pdf).
This layers takes a graph signal \(\mathbf{X}\) and returns the topmost k
rows according to the last column.
If \(\mathbf{X}\) has less than k rows, the result is zero-padded to k.
**Mode**: single, disjoint, batch.
**Input**
- Node features of shape `([batch], n_nodes, n_node_features)`;
- Graph IDs of shape `(n_nodes, )` (only in disjoint mode);
**Output**
- Pooled node features of shape `(batch, k, n_node_features)` (if single mode, shape will
be `(1, k, n_node_features)`).
**Arguments**
- `k`: integer, number of nodes to keep;
"""
def __init__(self, k, **kwargs):
super().__init__(**kwargs)
k = int(k)
if k <= 0:
raise ValueError("K must be a positive integer")
self.k = k
def build(self, input_shape):
if isinstance(input_shape, list) and len(input_shape) == 2:
self.data_mode = "disjoint"
self.F = input_shape[0][-1]
else:
if len(input_shape) == 2:
self.data_mode = "single"
else:
self.data_mode = "batch"
self.F = input_shape[-1]
def call(self, inputs):
if self.data_mode == "disjoint":
X, I = inputs
X = ops.disjoint_signal_to_batch(X, I)
else:
X = inputs
if self.data_mode == "single":
X = tf.expand_dims(X, 0)
N = tf.shape(X)[-2]
sort_perm = tf.argsort(X[..., -1], direction="DESCENDING")
X_sorted = tf.gather(X, sort_perm, axis=-2, batch_dims=1)
def truncate():
_X_out = X_sorted[..., : self.k, :]
return _X_out
def pad():
padding = [[0, 0], [0, self.k - N], [0, 0]]
_X_out = tf.pad(X_sorted, padding)
return _X_out
X_out = tf.cond(tf.less_equal(self.k, N), truncate, pad)
if self.data_mode == "single":
X_out = tf.squeeze(X_out, [0])
X_out.set_shape((self.k, self.F))
elif self.data_mode == "batch" or self.data_mode == "disjoint":
X_out.set_shape((None, self.k, self.F))
return X_out
def get_config(self):
config = {
"k": self.k,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
if self.data_mode == "single":
return self.k, self.F
elif self.data_mode == "batch" or self.data_mode == "disjoint":
return input_shape[0], self.k, self.F
layers = {
"sum": GlobalSumPool,
"avg": GlobalAvgPool,
"max": GlobalMaxPool,
"attn": GlobalAttentionPool,
"attn_sum": GlobalAttnSumPool,
"sort": SortPool,
}
def get(identifier):
if identifier not in layers:
raise ValueError(
"Unknown identifier {}. Available: {}".format(
identifier, list(layers.keys())
)
)
else:
return layers[identifier]
| 14,218 | 29.91087 | 93 | py |
spektral | spektral-master/spektral/layers/pooling/asym_cheeger_cut_pool.py | import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
from spektral.layers import ops
from spektral.layers.pooling.src import SRCPool
class AsymCheegerCutPool(SRCPool):
r"""
An Asymmetric Cheeger Cut Pooling layer from the paper
> [Total Variation Graph Neural Networks](https://arxiv.org/abs/2211.06218)<br>
> Jonas Berg Hansen and Filippo Maria Bianchi
**Mode**: single, batch.
This layer learns a soft clustering of the input graph as follows:
$$
\begin{align}
\S &= \textrm{MLP}(\X); \\
\X' &= \S^\top \X \\
\A' &= \S^\top \A \S; \\
\end{align}
$$
where \(\textrm{MLP}\) is a multi-layer perceptron with softmax output.
The layer includes two auxiliary loss terms/components:
A graph total variation component given by
$$
L_\text{GTV} = \frac{1}{2E} \sum_{k=1}^K \sum_{i=1}^N \sum_{j=i}^N a_{i,j} |s_{i,k} - s_{j,k}|,
$$
where \(E\) is the number of edges/links, \(K\) is the number of clusters or output nodes, and \(N\) is the number of nodes.
An asymmetrical norm component given by
$$
L_\text{AN} = \frac{N(K - 1) - \sum_{k=1}^K ||\s_{:,k} - \textrm{quant}_{K-1} (\s_{:,k})||_{1, K-1}}{N(K-1)},
$$
The layer can be used without a supervised loss to compute node clustering by
minimizing the two auxiliary losses.
**Input**
- Node features of shape `(batch, n_nodes_in, n_node_features)`;
- Adjacency matrix of shape `(batch, n_nodes_in, n_nodes_in)`;
**Output**
- Reduced node features of shape `(batch, n_nodes_out, n_node_features)`;
- If `return_selection=True`, the selection matrix of shape
`(batch, n_nodes_in, n_nodes_out)`.
**Arguments**
- `k`: number of output nodes;
- `mlp_hidden`: list of integers, number of hidden units for each hidden layer in
the MLP used to compute cluster assignments (if `None`, the MLP has only one output
layer);
- `mlp_activation`: activation for the MLP layers;
- `totvar_coeff`: coefficient for graph total variation loss component;
- `balance_coeff`: coefficient for asymmetric norm loss component;
- `return_selection`: boolean, whether to return the selection matrix;
- `use_bias`: use bias in the MLP;
- `kernel_initializer`: initializer for the weights of the MLP;
- `bias_regularizer`: regularization applied to the bias of the MLP;
- `kernel_constraint`: constraint applied to the weights of the MLP;
- `bias_constraint`: constraint applied to the bias of the MLP;
"""
def __init__(
self,
k,
mlp_hidden=None,
mlp_activation="relu",
totvar_coeff=1.0,
balance_coeff=1.0,
return_selection=False,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
k=k,
mlp_hidden=mlp_hidden,
mlp_activation=mlp_activation,
return_selection=return_selection,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
self.k = k
self.mlp_hidden = mlp_hidden if mlp_hidden else []
self.mlp_activation = mlp_activation
self.totvar_coeff = totvar_coeff
self.balance_coeff = balance_coeff
def build(self, input_shape):
layer_kwargs = dict(
kernel_initializer=self.kernel_initializer,
bias_initializer=self.bias_initializer,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
kernel_constraint=self.kernel_constraint,
bias_constraint=self.bias_constraint,
)
self.mlp = Sequential(
[
Dense(channels, self.mlp_activation, **layer_kwargs)
for channels in self.mlp_hidden
]
+ [Dense(self.k, "softmax", **layer_kwargs)]
)
super().build(input_shape)
def call(self, inputs, mask=None):
x, a, i = self.get_inputs(inputs)
return self.pool(x, a, i, mask=mask)
def select(self, x, a, i, mask=None):
s = self.mlp(x)
if mask is not None:
s *= mask[0]
# Total variation loss
tv_loss = self.totvar_loss(a, s)
if K.ndim(a) == 3:
tv_loss = K.mean(tv_loss)
self.add_loss(self.totvar_coeff * tv_loss)
# Asymmetric l1-norm loss
bal_loss = self.balance_loss(s)
if K.ndim(a) == 3:
bal_loss = K.mean(bal_loss)
self.add_loss(self.balance_coeff * bal_loss)
return s
def reduce(self, x, s, **kwargs):
return ops.modal_dot(s, x, transpose_a=True)
def connect(self, a, s, **kwargs):
a_pool = ops.matmul_at_b_a(s, a)
return a_pool
def reduce_index(self, i, s, **kwargs):
i_mean = tf.math.segment_mean(i, i)
i_pool = ops.repeat(i_mean, tf.ones_like(i_mean) * self.k)
return i_pool
def totvar_loss(self, a, s):
if K.is_sparse(a):
index_i = a.indices[:, 0]
index_j = a.indices[:, 1]
n_edges = tf.cast(len(a.values), dtype=s.dtype)
loss = tf.math.reduce_sum(
a.values[:, tf.newaxis]
* tf.math.abs(tf.gather(s, index_i) - tf.gather(s, index_j)),
axis=(-2, -1),
)
else:
n_edges = tf.cast(tf.math.count_nonzero(a, axis=(-2, -1)), dtype=s.dtype)
n_nodes = tf.shape(a)[-1]
if K.ndim(a) == 3:
loss = tf.math.reduce_sum(
a
* tf.math.reduce_sum(
tf.math.abs(
s[:, tf.newaxis, ...]
- tf.repeat(s[..., tf.newaxis, :], n_nodes, axis=-2)
),
axis=-1,
),
axis=(-2, -1),
)
else:
loss = tf.math.reduce_sum(
a
* tf.math.reduce_sum(
tf.math.abs(
s - tf.repeat(s[..., tf.newaxis, :], n_nodes, axis=-2)
),
axis=-1,
),
axis=(-2, -1),
)
loss *= 1 / (2 * n_edges)
return loss
def balance_loss(self, s):
n_nodes = tf.cast(tf.shape(s, out_type=tf.int32)[-2], s.dtype)
# k-quantile
idx = tf.cast(tf.math.floor(n_nodes / self.k) + 1, dtype=tf.int32)
med = tf.math.top_k(tf.linalg.matrix_transpose(s), k=idx).values[..., -1]
# Asymmetric l1-norm
if K.ndim(s) == 2:
loss = s - med
else:
loss = s - med[:, tf.newaxis, ...]
loss = (tf.cast(loss >= 0, loss.dtype) * (self.k - 1) * loss) + (
tf.cast(loss < 0, loss.dtype) * loss * -1.0
)
loss = tf.math.reduce_sum(loss, axis=(-2, -1))
loss = 1 / (n_nodes * (self.k - 1)) * (n_nodes * (self.k - 1) - loss)
return loss
def get_config(self):
config = {
"k": self.k,
"mlp_hidden": self.mlp_hidden,
"mlp_activation": self.mlp_activation,
"totvar_coeff": self.totvar_coeff,
"balance_coeff": self.balance_coeff,
}
base_config = super().get_config()
return {**base_config, **config}
| 8,006 | 32.642857 | 128 | py |
spektral | spektral-master/spektral/layers/pooling/dmon_pool.py | import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Dense
from spektral.layers import ops
from spektral.layers.pooling.src import SRCPool
class DMoNPool(SRCPool):
r"""
The DMoN pooling layer from the paper
> [Graph Clustering with Graph Neural Networks](https://arxiv.org/abs/2006.16904)<br>
> Anton Tsitsulin et al.
**Mode**: single, batch.
This layer learns a soft clustering of the input graph as follows:
$$
\begin{align}
\C &= \textrm{MLP}(\X); \\
\X' &= \C^\top \X \\
\A' &= \C^\top \A \C; \\
\end{align}
$$
where \(\textrm{MLP}\) is a multi-layer perceptron with softmax output.
Two auxiliary loss terms are also added to the model: the modularity loss
$$
L_m = - \frac{1}{2m} \mathrm{Tr}(\C^\top \A \C - \C^\top \d^\top \d \C)
$$
and the collapse regularization loss
$$
L_c = \frac{\sqrt{k}}{n} \left\|
\sum_i \C_i^\top
\right\|_F -1.
$$
This layer is based on the original implementation found
[here](https://github.com/google-research/google-research/blob/master/graph_embedding/dmon/dmon.py).
**Input**
- Node features of shape `(batch, n_nodes_in, n_node_features)`;
- Symmetrically normalized adjacency matrix of shape
`(batch, n_nodes_in, n_nodes_in)`;
**Output**
- Reduced node features of shape `(batch, n_nodes_out, n_node_features)`;
- Reduced adjacency matrix of shape `(batch, n_nodes_out, n_nodes_out)`;
- If `return_selection=True`, the selection matrix of shape
`(batch, n_nodes_in, n_nodes_out)`.
**Arguments**
- `k`: number of output nodes;
- `mlp_hidden`: list of integers, number of hidden units for each hidden layer in
the MLP used to compute cluster assignments (if `None`, the MLP has only one output
layer);
- `mlp_activation`: activation for the MLP layers;
- `collapse_regularization`: strength of the collapse regularization;
- `return_selection`: boolean, whether to return the selection matrix;
- `use_bias`: use bias in the MLP;
- `kernel_initializer`: initializer for the weights of the MLP;
- `bias_initializer`: initializer for the bias of the MLP;
- `kernel_regularizer`: regularization applied to the weights of the MLP;
- `bias_regularizer`: regularization applied to the bias of the MLP;
- `kernel_constraint`: constraint applied to the weights of the MLP;
- `bias_constraint`: constraint applied to the bias of the MLP;
"""
def __init__(
self,
k,
mlp_hidden=None,
mlp_activation="relu",
return_selection=False,
collapse_regularization=0.1,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
k=k,
mlp_hidden=mlp_hidden,
mlp_activation=mlp_activation,
return_selection=return_selection,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
self.k = k
self.mlp_hidden = mlp_hidden if mlp_hidden is not None else []
self.mlp_activation = mlp_activation
self.collapse_regularization = collapse_regularization
def build(self, input_shape):
layer_kwargs = dict(
kernel_initializer=self.kernel_initializer,
bias_initializer=self.bias_initializer,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
kernel_constraint=self.kernel_constraint,
bias_constraint=self.bias_constraint,
)
self.mlp = Sequential(
[
Dense(channels, self.mlp_activation, **layer_kwargs)
for channels in self.mlp_hidden
]
+ [Dense(self.k, "softmax", **layer_kwargs)]
)
super().build(input_shape)
def call(self, inputs, mask=None):
x, a, i = self.get_inputs(inputs)
return self.pool(x, a, i, mask=mask)
def select(self, x, a, i, mask=None):
s = self.mlp(x)
if mask is not None:
s *= mask[0]
# Collapse loss
col_loss = self.collapse_loss(a, s)
if K.ndim(a) == 3:
col_loss = K.mean(col_loss)
self.add_loss(self.collapse_regularization * col_loss)
return s
def reduce(self, x, s, **kwargs):
return ops.modal_dot(s, x, transpose_a=True)
def connect(self, a, s, **kwargs):
a_pool = ops.matmul_at_b_a(s, a)
# Modularity loss
mod_loss = self.modularity_loss(a, s, a_pool)
if K.ndim(a) == 3:
mod_loss = K.mean(mod_loss)
self.add_loss(mod_loss)
return a_pool
def reduce_index(self, i, s, **kwargs):
i_mean = tf.math.segment_mean(i, i)
i_pool = ops.repeat(i_mean, tf.ones_like(i_mean) * self.k)
return i_pool
def modularity_loss(self, a, s, a_pool):
if K.is_sparse(a):
n_edges = tf.cast(len(a.values), dtype=s.dtype)
degrees = tf.sparse.reduce_sum(a, axis=-1)
degrees = tf.reshape(degrees, (-1, 1))
else:
n_edges = tf.cast(tf.math.count_nonzero(a, axis=(-2, -1)), dtype=s.dtype)
degrees = tf.reduce_sum(a, axis=-1, keepdims=True)
normalizer_left = tf.matmul(s, degrees, transpose_a=True)
normalizer_right = tf.matmul(degrees, s, transpose_a=True)
if K.ndim(s) == 3:
normalizer = (
ops.modal_dot(normalizer_left, normalizer_right)
/ 2
/ tf.reshape(n_edges, [tf.shape(n_edges)[0]] + [1] * 2)
)
else:
normalizer = ops.modal_dot(normalizer_left, normalizer_right) / 2 / n_edges
loss = -tf.linalg.trace(a_pool - normalizer) / 2 / n_edges
return loss
def collapse_loss(self, a, s):
cluster_sizes = tf.math.reduce_sum(s, axis=-2)
n_nodes = tf.cast(tf.shape(a)[-1], s.dtype)
loss = (
tf.norm(cluster_sizes, axis=-1)
/ n_nodes
* tf.sqrt(tf.cast(self.k, s.dtype))
- 1
)
return loss
def get_config(self):
config = {
"collapse_regularization": self.collapse_regularization,
"k": self.k,
"mlp_hidden": self.mlp_hidden,
"mlp_activation": self.mlp_activation,
}
base_config = super().get_config()
return {**base_config, **config}
| 6,994 | 32.151659 | 104 | py |
spektral | spektral-master/spektral/layers/pooling/diff_pool.py | import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import backend as K
from spektral.layers import ops
from spektral.layers.pooling.src import SRCPool
class DiffPool(SRCPool):
r"""
A DiffPool layer from the paper
> [Hierarchical Graph Representation Learning with Differentiable Pooling](https://arxiv.org/abs/1806.08804)<br>
> Rex Ying et al.
**Mode**: single, batch.
This layer learns a soft clustering of the input graph as follows:
$$
\begin{align}
\Z &= \textrm{GNN}_{embed}(\A, \X); \\
\S &= \textrm{GNN}_{pool}(\A, \X); \\
\X' &= \S^\top \Z; \\
\A' &= \S^\top \A \S; \\
\end{align}
$$
where:
$$
\textrm{GNN}_{\square}(\A, \X) = \D^{-1/2} \A \D^{-1/2} \X \W_{\square}.
$$
The number of output channels of \(\textrm{GNN}_{embed}\) is controlled by the
`channels` parameter.
Two auxiliary loss terms are also added to the model: the link prediction loss
$$
L_{LP} = \big\| \A - \S\S^\top \big\|_F
$$
and the entropy loss
$$
L_{E} - \frac{1}{N} \sum\limits_{i = 1}^{N} \S \log (\S).
$$
The layer can be used without a supervised loss to compute node clustering by
minimizing the two auxiliary losses.
**Input**
- Node features of shape `(batch, n_nodes_in, n_node_features)`;
- Adjacency matrix of shape `(batch, n_nodes_in, n_nodes_in)`;
**Output**
- Reduced node features of shape `(batch, n_nodes_out, channels)`;
- Reduced adjacency matrix of shape `(batch, n_nodes_out, n_nodes_out)`;
- If `return_selection=True`, the selection matrix of shape
`(batch, n_nodes_in, n_nodes_out)`.
**Arguments**
- `k`: number of output nodes;
- `channels`: number of output channels (if `None`, the number of output channels is
the same as the input);
- `return_selection`: boolean, whether to return the selection matrix;
- `activation`: activation to apply after reduction;
- `kernel_initializer`: initializer for the weights;
- `kernel_regularizer`: regularization applied to the weights;
- `kernel_constraint`: constraint applied to the weights;
"""
def __init__(
self,
k,
channels=None,
return_selection=False,
activation=None,
kernel_initializer="glorot_uniform",
kernel_regularizer=None,
kernel_constraint=None,
**kwargs,
):
super().__init__(
return_selection=return_selection,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
kernel_constraint=kernel_constraint,
**kwargs,
)
self.k = k
self.channels = channels
self.activation = activations.get(activation)
def build(self, input_shape):
in_channels = input_shape[0][-1]
if self.channels is None:
self.channels = in_channels
self.kernel_emb = self.add_weight(
shape=(in_channels, self.channels),
name="kernel_emb",
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
self.kernel_pool = self.add_weight(
shape=(in_channels, self.k),
name="kernel_pool",
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
super().build(input_shape)
def call(self, inputs, mask=None):
x, a, i = self.get_inputs(inputs)
# Graph filter for GNNs
if K.is_sparse(a):
i_n = tf.sparse.eye(self.n_nodes, dtype=a.dtype)
a_ = tf.sparse.add(a, i_n)
else:
i_n = tf.eye(self.n_nodes, dtype=a.dtype)
a_ = a + i_n
fltr = ops.normalize_A(a_)
output = self.pool(x, a, i, fltr=fltr, mask=mask)
return output
def select(self, x, a, i, fltr=None, mask=None):
s = ops.modal_dot(fltr, K.dot(x, self.kernel_pool))
s = activations.softmax(s, axis=-1)
if mask is not None:
s *= mask[0]
# Auxiliary losses
lp_loss = self.link_prediction_loss(a, s)
entr_loss = self.entropy_loss(s)
if K.ndim(x) == 3:
lp_loss = K.mean(lp_loss)
entr_loss = K.mean(entr_loss)
self.add_loss(lp_loss)
self.add_loss(entr_loss)
return s
def reduce(self, x, s, fltr=None):
z = ops.modal_dot(fltr, K.dot(x, self.kernel_emb))
z = self.activation(z)
return ops.modal_dot(s, z, transpose_a=True)
def connect(self, a, s, **kwargs):
return ops.matmul_at_b_a(s, a)
def reduce_index(self, i, s, **kwargs):
i_mean = tf.math.segment_mean(i, i)
i_pool = ops.repeat(i_mean, tf.ones_like(i_mean) * self.k)
return i_pool
@staticmethod
def link_prediction_loss(a, s):
s_gram = ops.modal_dot(s, s, transpose_b=True)
if K.is_sparse(a):
lp_loss = tf.sparse.add(a, -s_gram)
else:
lp_loss = a - s_gram
lp_loss = tf.norm(lp_loss, axis=(-1, -2))
return lp_loss
@staticmethod
def entropy_loss(s):
entr = tf.negative(
tf.reduce_sum(tf.multiply(s, K.log(s + K.epsilon())), axis=-1)
)
entr_loss = K.mean(entr, axis=-1)
return entr_loss
def get_config(self):
config = {"k": self.k, "channels": self.channels}
base_config = super().get_config()
return {**base_config, **config}
| 5,706 | 30.357143 | 116 | py |
spektral | spektral-master/spektral/layers/pooling/sag_pool.py | import tensorflow as tf
from tensorflow.keras import backend as K
from spektral.layers import ops
from spektral.layers.pooling.topk_pool import TopKPool
class SAGPool(TopKPool):
r"""
A self-attention graph pooling layer from the paper
> [Self-Attention Graph Pooling](https://arxiv.org/abs/1904.08082)<br>
> Junhyun Lee et al.
**Mode**: single, disjoint.
This layer computes:
$$
\y = \textrm{GNN}(\A, \X); \;\;\;\;
\i = \textrm{rank}(\y, K); \;\;\;\;
\X' = (\X \odot \textrm{tanh}(\y))_\i; \;\;\;\;
\A' = \A_{\i, \i}
$$
where \(\textrm{rank}(\y, K)\) returns the indices of the top K values of \(\y\) and
$$
\textrm{GNN}(\A, \X) = \A \X \W.
$$
\(K\) is defined for each graph as a fraction of the number of nodes, controlled by
the `ratio` argument.
The gating operation \(\textrm{tanh}(\y)\) (Cangea et al.) can be replaced with a
sigmoid (Gao & Ji).
**Input**
- Node features of shape `(n_nodes_in, n_node_features)`;
- Adjacency matrix of shape `(n_nodes_in, n_nodes_in)`;
- Graph IDs of shape `(n_nodes, )` (only in disjoint mode);
**Output**
- Reduced node features of shape `(ratio * n_nodes_in, n_node_features)`;
- Reduced adjacency matrix of shape `(ratio * n_nodes_in, ratio * n_nodes_in)`;
- Reduced graph IDs of shape `(ratio * n_nodes_in, )` (only in disjoint mode);
- If `return_selection=True`, the selection mask of shape `(ratio * n_nodes_in, )`.
- If `return_score=True`, the scoring vector of shape `(n_nodes_in, )`
**Arguments**
- `ratio`: float between 0 and 1, ratio of nodes to keep in each graph;
- `return_selection`: boolean, whether to return the selection mask;
- `return_score`: boolean, whether to return the node scoring vector;
- `sigmoid_gating`: boolean, use a sigmoid activation for gating instead of a
tanh;
- `kernel_initializer`: initializer for the weights;
- `kernel_regularizer`: regularization applied to the weights;
- `kernel_constraint`: constraint applied to the weights;
"""
def __init__(
self,
ratio,
return_selection=False,
return_score=False,
sigmoid_gating=False,
kernel_initializer="glorot_uniform",
kernel_regularizer=None,
kernel_constraint=None,
**kwargs,
):
super().__init__(
ratio,
return_selection=return_selection,
return_score=return_score,
sigmoid_gating=sigmoid_gating,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
kernel_constraint=kernel_constraint,
**kwargs,
)
def call(self, inputs):
x, a, i = self.get_inputs(inputs)
# Graph filter for GNN
if K.is_sparse(a):
i_n = tf.sparse.eye(self.n_nodes, dtype=a.dtype)
a_ = tf.sparse.add(a, i_n)
else:
i_n = tf.eye(self.n_nodes, dtype=a.dtype)
a_ = a + i_n
fltr = ops.normalize_A(a_)
y = ops.modal_dot(fltr, K.dot(x, self.kernel))
output = self.pool(x, a, i, y=y)
if self.return_score:
output.append(y)
return output
| 3,287 | 31.554455 | 88 | py |
spektral | spektral-master/spektral/layers/pooling/topk_pool.py | import tensorflow as tf
from tensorflow.keras import backend as K
from spektral.layers import ops
from spektral.layers.pooling.src import SRCPool
class TopKPool(SRCPool):
r"""
A gPool/Top-K layer from the papers
> [Graph U-Nets](https://arxiv.org/abs/1905.05178)<br>
> Hongyang Gao and Shuiwang Ji
and
> [Towards Sparse Hierarchical Graph Classifiers](https://arxiv.org/abs/1811.01287)<br>
> Cătălina Cangea et al.
**Mode**: single, disjoint.
This layer computes:
$$
\y = \frac{\X\p}{\|\p\|}; \;\;\;\;
\i = \textrm{rank}(\y, K); \;\;\;\;
\X' = (\X \odot \textrm{tanh}(\y))_\i; \;\;\;\;
\A' = \A_{\i, \i}
$$
where \(\textrm{rank}(\y, K)\) returns the indices of the top K values of
\(\y\), and \(\p\) is a learnable parameter vector of size \(F\).
\(K\) is defined for each graph as a fraction of the number of nodes,
controlled by the `ratio` argument.
The gating operation \(\textrm{tanh}(\y)\) (Cangea et al.) can be replaced with a
sigmoid (Gao & Ji).
**Input**
- Node features of shape `(n_nodes_in, n_node_features)`;
- Adjacency matrix of shape `(n_nodes_in, n_nodes_in)`;
- Graph IDs of shape `(n_nodes, )` (only in disjoint mode);
**Output**
- Reduced node features of shape `(ratio * n_nodes_in, n_node_features)`;
- Reduced adjacency matrix of shape `(ratio * n_nodes_in, ratio * n_nodes_in)`;
- Reduced graph IDs of shape `(ratio * n_nodes_in, )` (only in disjoint mode);
- If `return_selection=True`, the selection mask of shape `(ratio * n_nodes_in, )`.
- If `return_score=True`, the scoring vector of shape `(n_nodes_in, )`
**Arguments**
- `ratio`: float between 0 and 1, ratio of nodes to keep in each graph;
- `return_selection`: boolean, whether to return the selection mask;
- `return_score`: boolean, whether to return the node scoring vector;
- `sigmoid_gating`: boolean, use a sigmoid activation for gating instead of a
tanh;
- `kernel_initializer`: initializer for the weights;
- `kernel_regularizer`: regularization applied to the weights;
- `kernel_constraint`: constraint applied to the weights;
"""
def __init__(
self,
ratio,
return_selection=False,
return_score=False,
sigmoid_gating=False,
kernel_initializer="glorot_uniform",
kernel_regularizer=None,
kernel_constraint=None,
**kwargs,
):
super().__init__(
return_selection=return_selection,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
kernel_constraint=kernel_constraint,
**kwargs,
)
self.ratio = ratio
self.return_score = return_score
self.sigmoid_gating = sigmoid_gating
self.gating_op = K.sigmoid if self.sigmoid_gating else K.tanh
def build(self, input_shape):
self.n_nodes = input_shape[0][0]
self.kernel = self.add_weight(
shape=(input_shape[0][-1], 1),
name="kernel",
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
super().build(input_shape)
def call(self, inputs, **kwargs):
x, a, i = self.get_inputs(inputs)
y = K.dot(x, K.l2_normalize(self.kernel))
output = self.pool(x, a, i, y=y)
if self.return_score:
output.append(y)
return output
def select(self, x, a, i, y=None):
if i is None:
i = tf.zeros(self.n_nodes)
s = segment_top_k(y[:, 0], i, self.ratio)
return tf.sort(s)
def reduce(self, x, s, y=None):
x_pool = tf.gather(x * self.gating_op(y), s)
return x_pool
def get_outputs(self, x_pool, a_pool, i_pool, s):
output = [x_pool, a_pool]
if i_pool is not None:
output.append(i_pool)
if self.return_selection:
# Convert sparse indices to boolean mask
s = tf.scatter_nd(s[:, None], tf.ones_like(s), (self.n_nodes,))
output.append(s)
return output
def get_config(self):
config = {
"ratio": self.ratio,
}
base_config = super().get_config()
return {**base_config, **config}
def segment_top_k(x, i, ratio):
"""
Returns indices to get the top K values in x segment-wise, according to
the segments defined in I. K is not fixed, but it is defined as a ratio of
the number of elements in each segment.
:param x: a rank 1 Tensor;
:param i: a rank 1 Tensor with segment IDs for x;
:param ratio: float, ratio of elements to keep for each segment;
:return: a rank 1 Tensor containing the indices to get the top K values of
each segment in x.
"""
i = tf.cast(i, tf.int32)
n = tf.shape(i)[0]
n_nodes = tf.math.segment_sum(tf.ones_like(i), i)
batch_size = tf.shape(n_nodes)[0]
n_nodes_max = tf.reduce_max(n_nodes)
cumulative_n_nodes = tf.concat(
(tf.zeros(1, dtype=n_nodes.dtype), tf.cumsum(n_nodes)[:-1]), 0
)
index = tf.range(n)
index = (index - tf.gather(cumulative_n_nodes, i)) + (i * n_nodes_max)
dense_x = tf.zeros(batch_size * n_nodes_max, dtype=x.dtype) - 1e20
dense_x = tf.tensor_scatter_nd_update(dense_x, index[:, None], x)
dense_x = tf.reshape(dense_x, (batch_size, n_nodes_max))
perm = tf.argsort(dense_x, direction="DESCENDING")
perm = perm + cumulative_n_nodes[:, None]
perm = tf.reshape(perm, (-1,))
k = tf.cast(tf.math.ceil(ratio * tf.cast(n_nodes, tf.float32)), i.dtype)
# This costs more memory
# to_rep = tf.tile(tf.constant([1., 0.]), (batch_size,))
# rep_times = tf.reshape(tf.concat((k[:, None], (n_nodes_max - k)[:, None]), -1), (-1,))
# mask = ops.repeat(to_rep, rep_times)
# perm = tf.boolean_mask(perm, mask)
# This is slower
r_range = tf.ragged.range(k).flat_values
r_delta = ops.repeat(tf.range(batch_size) * n_nodes_max, k)
mask = r_range + r_delta
perm = tf.gather(perm, mask)
return perm
| 6,197 | 32.868852 | 92 | py |
spektral | spektral-master/spektral/layers/pooling/la_pool.py | import tensorflow as tf
from scipy import sparse
from tensorflow.keras import backend as K
from spektral.layers import ops
from spektral.layers.pooling.src import SRCPool
class LaPool(SRCPool):
r"""
A Laplacian pooling (LaPool) layer from the paper
> [Towards Interpretable Sparse Graph Representation Learning with Laplacian Pooling](https://arxiv.org/abs/1905.11577)<br>
> Emmanuel Noutahi et al.
**Mode**: disjoint.
This layer computes a soft clustering of the graph by first identifying a set of
leaders, and then assigning every remaining node to the cluster of the closest
leader:
$$
\V = \|\L\X\|_d; \\
\i = \{ i \mid \V_i > \V_j, \forall j \in \mathcal{N}(i) \} \\
\S^\top = \textrm{SparseMax}\left( \beta \frac{\X\X_{\i}^\top}{\|\X\|\|\X_{\i}\|} \right)
$$
\(\beta\) is a regularization vecotr that is applied element-wise to the selection
matrix.
If `shortest_path_reg=True`, it is equal to the inverse of the shortest path between
each node and its corresponding leader (this can be expensive since it runs on CPU).
Otherwise it is equal to 1.
The reduction and connection are computed as \(\X' = \S\X\) and
\(\A' = \S^\top\A\S\), respectively.
Note that the number of nodes in the output graph depends on the input node features.
**Input**
- Node features of shape `(n_nodes_in, n_node_features)`;
- Adjacency matrix of shape `(n_nodes_in, n_nodes_in)`;
**Output**
- Reduced node features of shape `(n_nodes_out, channels)`;
- Reduced adjacency matrix of shape `(n_nodes_out, n_nodes_out)`;
- If `return_selection=True`, the selection matrix of shape
`(n_nodes_in, n_nodes_out)`.
**Arguments**
- `shortest_path_reg`: boolean, apply the shortest path regularization described in
the papaer (can be expensive);
- `return_selection`: boolean, whether to return the selection matrix;
"""
def __init__(self, shortest_path_reg=True, return_selection=False, **kwargs):
super().__init__(return_selection=return_selection, **kwargs)
self.shortest_path_reg = shortest_path_reg
def call(self, inputs, **kwargs):
x, a, i = self.get_inputs(inputs)
# Select leaders
lap = laplacian(a)
v = ops.modal_dot(lap, x)
v = tf.norm(v, axis=-1, keepdims=1)
row = a.indices[:, 0]
col = a.indices[:, 1]
leader_check = tf.cast(tf.gather(v, row) >= tf.gather(v, col), tf.int32)
leader_mask = ops.scatter_prod(leader_check[:, 0], row, self.n_nodes)
leader_mask = tf.cast(leader_mask, tf.bool)
return self.pool(x, a, i, leader_mask=leader_mask)
def select(self, x, a, i, leader_mask=None):
# Cosine similarity
if i is None:
i = tf.zeros(self.n_nodes, dtype=tf.int32)
cosine_similarity = sparse_cosine_similarity(x, self.n_nodes, leader_mask, i)
# Shortest path regularization
if self.shortest_path_reg:
def shortest_path(a_):
return sparse.csgraph.shortest_path(a_, directed=False)
np_fn_input = tf.sparse.to_dense(a) if K.is_sparse(a) else a
beta = 1 / tf.numpy_function(shortest_path, [np_fn_input], tf.float64)
beta = tf.where(tf.math.is_inf(beta), tf.zeros_like(beta), beta)
beta = tf.boolean_mask(beta, leader_mask, axis=1)
beta = tf.cast(
tf.ensure_shape(beta, cosine_similarity.shape), cosine_similarity.dtype
)
else:
beta = 1.0
s = tf.sparse.softmax(cosine_similarity)
s = beta * tf.sparse.to_dense(s)
# Leaders end up entirely in their own cluster
kronecker_delta = tf.boolean_mask(
tf.eye(self.n_nodes, dtype=s.dtype), leader_mask, axis=1
)
# Create clustering
s = tf.where(leader_mask[:, None], kronecker_delta, s)
return s
def reduce(self, x, s, **kwargs):
return ops.modal_dot(s, x, transpose_a=True)
def connect(self, a, s, **kwargs):
return ops.matmul_at_b_a(s, a)
def reduce_index(self, i, s, leader_mask=None):
i_pool = tf.boolean_mask(i, leader_mask)
return i_pool
def get_config(self):
config = {"shortest_path_reg": self.shortest_path_reg}
base_config = super().get_config()
return {**base_config, **config}
def laplacian(a):
d = ops.degree_matrix(a, return_sparse_batch=True)
if K.is_sparse(a):
a = a.__mul__(-1)
else:
a = -a
return tf.sparse.add(d, a)
def reduce_sum(x, **kwargs):
if K.is_sparse(x):
return tf.sparse.reduce_sum(x, **kwargs)
else:
return tf.reduce_sum(x, **kwargs)
def sparse_cosine_similarity(x, n_nodes, mask, i):
mask = tf.cast(mask, tf.int32)
leader_idx = tf.where(mask)
# Number of nodes in each graph
ns = tf.math.segment_sum(tf.ones_like(i), i)
ks = tf.math.segment_sum(mask, i)
# s will be a block-diagonal matrix where entry i,j is the cosine
# similarity between node i and leader j.
# The code below creates the indices of the sparse block-diagonal matrix
# Row indices of the block-diagonal S
starts = tf.cumsum(ns) - ns
starts = tf.repeat(starts, ks)
stops = tf.cumsum(ns)
stops = tf.repeat(stops, ks)
index_n = tf.ragged.range(starts, stops).flat_values
# Column indices of the block-diagonal S
index_k = tf.repeat(leader_idx, tf.repeat(ns, ks))
index_k_for_s = tf.repeat(tf.range(tf.reduce_sum(ks)), tf.repeat(ns, ks))
# Make index int64
index_n = tf.cast(index_n, tf.int64)
index_k = tf.cast(index_k, tf.int64)
index_k_for_s = tf.cast(index_k_for_s, tf.int64)
# Compute similarity between nodes and leaders
x_n = tf.gather(x, index_n)
x_n_norm = tf.norm(x_n, axis=-1)
x_k = tf.gather(x, index_k)
x_k_norm = tf.norm(x_k, axis=-1)
values = tf.reduce_sum(x_n * x_k, -1) / (x_n_norm * x_k_norm)
# Create a sparse tensor for S
indices = tf.stack((index_n, index_k_for_s), 1)
s = tf.SparseTensor(
values=values, indices=indices, dense_shape=(n_nodes, tf.reduce_sum(ks))
)
s = tf.sparse.reorder(s)
return s
| 6,279 | 32.404255 | 127 | py |
spektral | spektral-master/spektral/layers/pooling/mincut_pool.py | import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Dense
from spektral.layers import ops
from spektral.layers.pooling.src import SRCPool
class MinCutPool(SRCPool):
r"""
A MinCut pooling layer from the paper
> [Spectral Clustering with Graph Neural Networks for Graph Pooling](https://arxiv.org/abs/1907.00481)<br>
> Filippo Maria Bianchi et al.
**Mode**: single, batch.
This layer learns a soft clustering of the input graph as follows:
$$
\begin{align}
\S &= \textrm{MLP}(\X); \\
\X' &= \S^\top \X \\
\A' &= \S^\top \A \S; \\
\end{align}
$$
where \(\textrm{MLP}\) is a multi-layer perceptron with softmax output.
Two auxiliary loss terms are also added to the model: the minimum cut loss
$$
L_c = - \frac{ \mathrm{Tr}(\S^\top \A \S) }{ \mathrm{Tr}(\S^\top \D \S) }
$$
and the orthogonality loss
$$
L_o = \left\|
\frac{\S^\top \S}{\| \S^\top \S \|_F}
- \frac{\I_K}{\sqrt{K}}
\right\|_F.
$$
The layer can be used without a supervised loss to compute node clustering by
minimizing the two auxiliary losses.
**Input**
- Node features of shape `(batch, n_nodes_in, n_node_features)`;
- Symmetrically normalized adjacency matrix of shape
`(batch, n_nodes_in, n_nodes_in)`;
**Output**
- Reduced node features of shape `(batch, n_nodes_out, n_node_features)`;
- Reduced adjacency matrix of shape `(batch, n_nodes_out, n_nodes_out)`;
- If `return_selection=True`, the selection matrix of shape
`(batch, n_nodes_in, n_nodes_out)`.
**Arguments**
- `k`: number of output nodes;
- `mlp_hidden`: list of integers, number of hidden units for each hidden layer in
the MLP used to compute cluster assignments (if `None`, the MLP has only one output
layer);
- `mlp_activation`: activation for the MLP layers;
- `return_selection`: boolean, whether to return the selection matrix;
- `use_bias`: use bias in the MLP;
- `kernel_initializer`: initializer for the weights of the MLP;
- `bias_initializer`: initializer for the bias of the MLP;
- `kernel_regularizer`: regularization applied to the weights of the MLP;
- `bias_regularizer`: regularization applied to the bias of the MLP;
- `kernel_constraint`: constraint applied to the weights of the MLP;
- `bias_constraint`: constraint applied to the bias of the MLP;
"""
def __init__(
self,
k,
mlp_hidden=None,
mlp_activation="relu",
return_selection=False,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
return_selection=return_selection,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
self.k = k
self.mlp_hidden = mlp_hidden if mlp_hidden is not None else []
self.mlp_activation = mlp_activation
def build(self, input_shape):
layer_kwargs = dict(
kernel_initializer=self.kernel_initializer,
bias_initializer=self.bias_initializer,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
kernel_constraint=self.kernel_constraint,
bias_constraint=self.bias_constraint,
)
self.mlp = Sequential(
[
Dense(channels, self.mlp_activation, **layer_kwargs)
for channels in self.mlp_hidden
]
+ [Dense(self.k, "softmax", **layer_kwargs)]
)
super().build(input_shape)
def call(self, inputs, mask=None):
x, a, i = self.get_inputs(inputs)
return self.pool(x, a, i, mask=mask)
def select(self, x, a, i, mask=None):
s = self.mlp(x)
if mask is not None:
s *= mask[0]
# Orthogonality loss
ortho_loss = self.orthogonality_loss(s)
if K.ndim(a) == 3:
ortho_loss = K.mean(ortho_loss)
self.add_loss(ortho_loss)
return s
def reduce(self, x, s, **kwargs):
return ops.modal_dot(s, x, transpose_a=True)
def connect(self, a, s, **kwargs):
a_pool = ops.matmul_at_b_a(s, a)
# MinCut loss
cut_loss = self.mincut_loss(a, s, a_pool)
if K.ndim(a) == 3:
cut_loss = K.mean(cut_loss)
self.add_loss(cut_loss)
# Post-processing of A
a_pool = tf.linalg.set_diag(
a_pool, tf.zeros(K.shape(a_pool)[:-1], dtype=a_pool.dtype)
)
a_pool = ops.normalize_A(a_pool)
return a_pool
def reduce_index(self, i, s, **kwargs):
i_mean = tf.math.segment_mean(i, i)
i_pool = ops.repeat(i_mean, tf.ones_like(i_mean) * self.k)
return i_pool
def orthogonality_loss(self, s):
ss = ops.modal_dot(s, s, transpose_a=True)
i_s = tf.eye(self.k, dtype=ss.dtype)
ortho_loss = tf.norm(
ss / tf.norm(ss, axis=(-1, -2), keepdims=True) - i_s / tf.norm(i_s),
axis=(-1, -2),
)
return ortho_loss
@staticmethod
def mincut_loss(a, s, a_pool):
num = tf.linalg.trace(a_pool)
d = ops.degree_matrix(a)
den = tf.linalg.trace(ops.matmul_at_b_a(s, d))
cut_loss = -(num / den)
return cut_loss
def get_config(self):
config = {
"k": self.k,
"mlp_hidden": self.mlp_hidden,
"mlp_activation": self.mlp_activation,
}
base_config = super().get_config()
return {**base_config, **config}
| 6,106 | 31.142105 | 110 | py |
spektral | spektral-master/spektral/layers/pooling/just_balance_pool.py | import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Dense
from spektral.layers import ops
from spektral.layers.pooling.src import SRCPool
class JustBalancePool(SRCPool):
r"""
The Just Balance pooling layer from the paper
> [Simplifying Clustering with Graph Neural Networks](https://arxiv.org/abs/2207.08779)<br>
> Filippo Maria Bianchi
**Mode**: single, batch.
This layer learns a soft clustering of the input graph as follows:
$$
\begin{align}
\S &= \textrm{MLP}(\X); \\
\X' &= \S^\top \X \\
\A' &= \S^\top \A \S; \\
\end{align}
$$
where \(\textrm{MLP}\) is a multi-layer perceptron with softmax output.
The layer adds the following auxiliary loss to the model
$$
L = - \mathrm{Tr}(\sqrt{ \S^\top \S })
$$
The layer can be used without a supervised loss to compute node clustering by
minimizing the auxiliary loss.
The layer is originally designed to be used in conjuction with a
[GCNConv](https://graphneural.network/layers/convolution/#gcnconv)
layer operating on the following connectivity matrix
$$
\tilde{\A} = \I - \delta (\I - \D^{-1/2} \A \D^{-1/2})
$$
**Input**
- Node features of shape `(batch, n_nodes_in, n_node_features)`;
- Connectivity matrix of shape
`(batch, n_nodes_in, n_nodes_in)`;
**Output**
- Reduced node features of shape `(batch, n_nodes_out, n_node_features)`;
- Reduced adjacency matrix of shape `(batch, n_nodes_out, n_nodes_out)`;
- If `return_selection=True`, the selection matrix of shape
`(batch, n_nodes_in, n_nodes_out)`.
**Arguments**
- `k`: number of output nodes;
- `mlp_hidden`: list of integers, number of hidden units for each hidden layer in
the MLP used to compute cluster assignments (if `None`, the MLP has only one output
layer);
- `mlp_activation`: activation for the MLP layers;
- `normalized_loss`: booelan, whether to normalize the auxiliary loss in [0,1];
- `return_selection`: boolean, whether to return the selection matrix;
- `kernel_initializer`: initializer for the weights of the MLP;
- `bias_initializer`: initializer for the bias of the MLP;
- `kernel_regularizer`: regularization applied to the weights of the MLP;
- `bias_regularizer`: regularization applied to the bias of the MLP;
- `kernel_constraint`: constraint applied to the weights of the MLP;
- `bias_constraint`: constraint applied to the bias of the MLP;
"""
def __init__(
self,
k,
mlp_hidden=None,
mlp_activation="relu",
normalized_loss=False,
return_selection=False,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
return_selection=return_selection,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
self.k = k
self.mlp_hidden = mlp_hidden if mlp_hidden else []
self.mlp_activation = mlp_activation
self.normalized_loss = normalized_loss
def build(self, input_shape):
layer_kwargs = dict(
kernel_initializer=self.kernel_initializer,
bias_initializer=self.bias_initializer,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
kernel_constraint=self.kernel_constraint,
bias_constraint=self.bias_constraint,
)
self.mlp = Sequential(
[
Dense(channels, self.mlp_activation, **layer_kwargs)
for channels in self.mlp_hidden
]
+ [Dense(self.k, "softmax", **layer_kwargs)]
)
super().build(input_shape)
def call(self, inputs, mask=None):
x, a, i = self.get_inputs(inputs)
return self.pool(x, a, i, mask=mask)
def select(self, x, a, i, mask=None):
s = self.mlp(x)
if mask is not None:
s *= mask[0]
self.add_loss(self.balance_loss(s))
return s
def reduce(self, x, s, **kwargs):
return ops.modal_dot(s, x, transpose_a=True)
def connect(self, a, s, **kwargs):
a_pool = ops.matmul_at_b_a(s, a)
# Post-processing of A
a_pool = tf.linalg.set_diag(
a_pool, tf.zeros(K.shape(a_pool)[:-1], dtype=a_pool.dtype)
)
a_pool = ops.normalize_A(a_pool)
return a_pool
def reduce_index(self, i, s, **kwargs):
i_mean = tf.math.segment_mean(i, i)
i_pool = ops.repeat(i_mean, tf.ones_like(i_mean) * self.k)
return i_pool
def balance_loss(self, s):
ss = ops.modal_dot(s, s, transpose_a=True)
loss = -tf.linalg.trace(tf.math.sqrt(ss))
if self.normalized_loss:
n = float(tf.shape(s, out_type=tf.int32)[-2])
c = float(tf.shape(s, out_type=tf.int32)[-1])
loss = loss / tf.math.sqrt(n * c)
return loss
def get_config(self):
config = {
"k": self.k,
"mlp_hidden": self.mlp_hidden,
"mlp_activation": self.mlp_activation,
"normalized_loss": self.normalized_loss,
}
base_config = super().get_config()
return {**base_config, **config}
| 5,777 | 31.829545 | 95 | py |
spektral | spektral-master/spektral/layers/pooling/src.py | import inspect
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Layer
from spektral.utils.keras import (
deserialize_kwarg,
is_keras_kwarg,
is_layer_kwarg,
serialize_kwarg,
)
class SRCPool(Layer):
r"""
A general class for graph pooling layers based on the "Select, Reduce,
Connect" framework presented in:
> [Understanding Pooling in Graph Neural Networks.](https://arxiv.org/abs/2110.05292)<br>
> Daniele Grattarola et al.
This layer computes:
$$
\begin{align}
& \mathcal{S} = \left\{\mathcal{S}_k\right\}_{k=1:K} = \textrm{Sel}(\mathcal{G}) \\
& \mathcal{X}'=\left\{\textrm{Red}( \mathcal{G}, \mathcal{S}_k )\right\}_{k=1:K} \\
& \mathcal{E}'=\left\{\textrm{Con}( \mathcal{G}, \mathcal{S}_k, \mathcal{S}_l )\right\}_{k,L=1:K} \\
\end{align}
$$
Where \(\textrm{Sel}\) is a node equivariant selection function that computes
the supernode assignments \(\mathcal{S}_k\), \(\textrm{Red}\) is a
permutation-invariant function to reduce the supernodes into the new node
attributes, and \(\textrm{Con}\) is a permutation-invariant connection
function that computes the link between the pooled nodes.
By extending this class, it is possible to create any pooling layer in the
SRC formalism.
**Input**
- `x`: Tensor of shape `([batch], N, F)` representing node features;
- `a`: Tensor or SparseTensor of shape `([batch], N, N)` representing the
adjacency matrix;
- `i`: (optional) Tensor of integers with shape `(N, )` representing the
batch index;
**Output**
- `x_pool`: Tensor of shape `([batch], K, F)`, representing the node
features of the output. `K` is the number of output nodes and depends on the
specific pooling strategy;
- `a_pool`: Tensor or SparseTensor of shape `([batch], K, K)` representing
the adjacency matrix of the output;
- `i_pool`: (only if i was given as input) Tensor of integers with shape
`(K, )` representing the batch index of the output;
- `s`: (if `return_selection=True`) Tensor or SparseTensor representing the
supernode assignments;
**API**
- `pool(x, a, i, **kwargs)`: pools the graph and returns the reduced node
features and adjacency matrix. If the batch index `i` is not `None`, a
reduced version of `i` will be returned as well.
Any given `kwargs` will be passed as keyword arguments to `select()`,
`reduce()` and `connect()` if any matching key is found.
The mandatory arguments of `pool()` **must** be computed in `call()` by
calling `self.get_inputs(inputs)`.
- `select(x, a, i, **kwargs)`: computes supernode assignments mapping the
nodes of the input graph to the nodes of the output.
- `reduce(x, s, **kwargs)`: reduces the supernodes to form the nodes of the
pooled graph.
- `connect(a, s, **kwargs)`: connects the reduced supernodes.
- `reduce_index(i, s, **kwargs)`: helper function to reduce the batch index
(only called if `i` is given as input).
When overriding any function of the API, it is possible to access the
true number of nodes of the input (`n_nodes`) as a Tensor in the instance variable
`self.n_nodes` (this is populated by `self.get_inputs()` at the beginning of
`call()`).
**Arguments**:
- `return_selection`: if `True`, the Tensor used to represent supernode assignments
will be returned with `x_pool`, `a_pool`, and `i_pool`;
"""
def __init__(self, return_selection=False, **kwargs):
# kwargs for the Layer class are handled automatically
super().__init__(**{k: v for k, v in kwargs.items() if is_keras_kwarg(k)})
self.supports_masking = True
self.return_selection = return_selection
# *_regularizer, *_constraint, *_initializer, activation, and use_bias
# are dealt with automatically if passed to the constructor
self.kwargs_keys = []
for key in kwargs:
if is_layer_kwarg(key):
attr = kwargs[key]
attr = deserialize_kwarg(key, attr)
self.kwargs_keys.append(key)
setattr(self, key, attr)
# Signature of the SRC functions
self.sel_signature = inspect.signature(self.select).parameters
self.red_signature = inspect.signature(self.reduce).parameters
self.con_signature = inspect.signature(self.connect).parameters
self.i_red_signature = inspect.signature(self.reduce_index).parameters
self._n_nodes = None
def build(self, input_shape):
super().build(input_shape)
def call(self, inputs, **kwargs):
# Always start the call() method with get_inputs(inputs) to set self.n_nodes
x, a, i = self.get_inputs(inputs)
return self.pool(x, a, i, **kwargs)
def pool(self, x, a, i, **kwargs):
"""
This is the core method of the SRC class, which runs a full pass of
selection, reduction and connection.
It is usually not necessary to modify this function. Any previous/shared
operations should be done in `call()` and their results can be passed to
the three SRC functions via keyword arguments (any kwargs given to this
function will be matched to the signature of `select()`, `reduce()` and
`connect()` and propagated as input to the three functions).
Any pooling logic should go in the SRC functions themselves.
:param x: Tensor of shape `([batch], N, F)`;
:param a: Tensor or SparseTensor of shape `([batch], N, N)`;
:param i: only in single/disjoint mode, Tensor of integers with shape
`(N, )`; otherwise, `None`;
:param kwargs: additional keyword arguments for `select()`, `reduce()`
and `connect()`. Any matching kwargs will be passed to each of the three
functions.
:return:
- `x_pool`: Tensor of shape `([batch], K, F)`, where `K` is the
number of output nodes and depends on the pooling strategy;
- `a_pool`: Tensor or SparseTensor of shape `([batch], K, K)`;
- `i_pool`: (only if `i` is not `None`) Tensor of integers with shape
`(K, )`;
"""
# Select
sel_kwargs = self._get_kwargs(x, a, i, self.sel_signature, kwargs)
s = self.select(x, a, i, **sel_kwargs)
# Reduce
red_kwargs = self._get_kwargs(x, a, i, self.red_signature, kwargs)
x_pool = self.reduce(x, s, **red_kwargs)
# Index reduce
i_red_kwargs = self._get_kwargs(x, a, i, self.i_red_signature, kwargs)
i_pool = self.reduce_index(i, s, **i_red_kwargs) if i is not None else None
# Connect
con_kwargs = self._get_kwargs(x, a, i, self.con_signature, kwargs)
a_pool = self.connect(a, s, **con_kwargs)
return self.get_outputs(x_pool, a_pool, i_pool, s)
def select(self, x, a, i, **kwargs):
"""
Selection function. Given the graph, computes the supernode assignments
that will eventually be mapped to the `K` nodes of the pooled graph.
Supernode assignments are usually represented as a dense matrix of shape
`(N, K)` or sparse indices of shape `(K, )`.
:param x: Tensor of shape `([batch], N, F)`;
:param a: Tensor or SparseTensor (depending on the implementation of the
SRC functions) of shape `([batch], N, N)`;
:param i: Tensor of integers with shape `(N, )` or `None`;
:param kwargs: additional keyword arguments.
:return: Tensor representing supernode assignments.
"""
return tf.range(tf.shape(i))
def reduce(self, x, s, **kwargs):
"""
Reduction function. Given a selection, reduces the supernodes to form
the nodes of the new graph.
:param x: Tensor of shape `([batch], N, F)`;
:param s: Tensor representing supernode assignments, as computed by
`select()`;
:param kwargs: additional keyword arguments; when overriding this
function, any keyword argument defined explicitly as `key=None` will be
automatically filled in when calling `pool(key=value)`.
:return: Tensor of shape `([batch], K, F)` representing the node attributes of
the pooled graph.
"""
return tf.gather(x, s)
def connect(self, a, s, **kwargs):
"""
Connection function. Given a selection, connects the nodes of the pooled
graphs.
:param a: Tensor or SparseTensor of shape `([batch], N, N)`;
:param s: Tensor representing supernode assignments, as computed by
`select()`;
:param kwargs: additional keyword arguments; when overriding this
function, any keyword argument defined explicitly as `key=None` will be
automatically filled in when calling `pool(key=value)`.
:return: Tensor or SparseTensor of shape `([batch], K, K)` representing
the adjacency matrix of the pooled graph.
"""
return sparse_connect(a, s, self.n_nodes)
def reduce_index(self, i, s, **kwargs):
"""
Helper function to reduce the batch index `i`. Given a selection,
returns a new batch index for the pooled graph. This is only called by
`pool()` when `i` is given as input to the layer.
:param i: Tensor of integers with shape `(N, )`;
:param s: Tensor representing supernode assignments, as computed by
`select()`.
:param kwargs: additional keyword arguments; when overriding this
function, any keyword argument defined explicitly as `key=None` will be
automatically filled in when calling `pool(key=value)`.
:return: Tensor of integers of shape `(K, )`.
"""
return tf.gather(i, s)
@staticmethod
def _get_kwargs(x, a, i, signature, kwargs):
output = {}
for k in signature.keys():
if signature[k].default is inspect.Parameter.empty or k == "kwargs":
pass
elif k == "x":
output[k] = x
elif k == "a":
output[k] = a
elif k == "i":
output[k] = i
elif k in kwargs:
output[k] = kwargs[k]
else:
raise ValueError("Missing key {} for signature {}".format(k, signature))
return output
def get_inputs(self, inputs):
if len(inputs) == 3:
x, a, i = inputs
if K.ndim(i) == 2:
i = i[:, 0]
assert K.ndim(i) == 1, "i must have rank 1"
elif len(inputs) == 2:
x, a = inputs
i = None
else:
raise ValueError(
"Expected 2 or 3 inputs tensors (x, a, i), got {}.".format(len(inputs))
)
self.n_nodes = tf.shape(x)[-2]
return x, a, i
def get_outputs(self, x_pool, a_pool, i_pool, s):
output = [x_pool, a_pool]
if i_pool is not None:
output.append(i_pool)
if self.return_selection:
output.append(s)
return output
def get_config(self):
config = {
"return_selection": self.return_selection,
}
for key in self.kwargs_keys:
config[key] = serialize_kwarg(key, getattr(self, key))
base_config = super().get_config()
return {**base_config, **config}
def compute_mask(self, inputs, mask=None):
# After pooling all nodes are always valid
return None
@property
def n_nodes(self):
if self._n_nodes is None:
raise ValueError(
"self.n_nodes has not been defined. Have you called "
"self.get_inputs(inputs) at the beginning of call()?"
)
return self._n_nodes
@n_nodes.setter
def n_nodes(self, value):
self._n_nodes = value
@n_nodes.deleter
def n_nodes(self):
self._n_nodes = None
def sparse_connect(A, S, N):
N_sel = tf.cast(tf.shape(S), tf.int64)[0]
m = tf.scatter_nd(S[:, None], tf.range(N_sel) + 1, (N,)) - 1
row, col = A.indices[:, 0], A.indices[:, 1]
r_mask = tf.gather(m, row)
c_mask = tf.gather(m, col)
mask_total = (r_mask >= 0) & (c_mask >= 0)
r_new = tf.boolean_mask(r_mask, mask_total)
c_new = tf.boolean_mask(c_mask, mask_total)
v_new = tf.boolean_mask(A.values, mask_total)
output = tf.SparseTensor(
values=v_new, indices=tf.stack((r_new, c_new), 1), dense_shape=(N_sel, N_sel)
)
return tf.sparse.reorder(output)
| 12,671 | 39.101266 | 112 | py |
spektral | spektral-master/spektral/layers/convolutional/diffusion_conv.py | import tensorflow as tf
import tensorflow.keras.layers as layers
from spektral.layers.convolutional.conv import Conv
from spektral.utils import normalized_adjacency
class DiffuseFeatures(layers.Layer):
r"""
Utility layer calculating a single channel of the diffusional convolution.
The procedure is based on [https://arxiv.org/abs/1707.01926](https://arxiv.org/abs/1707.01926)
**Input**
- Node features of shape `([batch], n_nodes, n_node_features)`;
- Normalized adjacency or attention coef. matrix \(\hat \A \) of shape
`([batch], n_nodes, n_nodes)`; Use DiffusionConvolution.preprocess to normalize.
**Output**
- Node features with the same shape as the input, but with the last
dimension changed to \(1\).
**Arguments**
- `num_diffusion_steps`: How many diffusion steps to consider. \(K\) in paper.
- `kernel_initializer`: initializer for the weights;
- `kernel_regularizer`: regularization applied to the kernel vectors;
- `kernel_constraint`: constraint applied to the kernel vectors;
"""
def __init__(
self,
num_diffusion_steps,
kernel_initializer,
kernel_regularizer,
kernel_constraint,
**kwargs,
):
super().__init__(**kwargs)
self.K = num_diffusion_steps
self.kernel_initializer = kernel_initializer
self.kernel_regularizer = kernel_regularizer
self.kernel_constraint = kernel_constraint
def build(self, input_shape):
# Initializing the kernel vector (R^K) (theta in paper)
self.kernel = self.add_weight(
shape=(self.K,),
name="kernel",
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
def call(self, inputs):
x, a = inputs
# Calculate diffusion matrix: sum kernel_k * Attention_t^k
# tf.polyval needs a list of tensors as the coeff. thus we
# unstack kernel
diffusion_matrix = tf.math.polyval(tf.unstack(self.kernel), a)
# Apply it to X to get a matrix C = [C_1, ..., C_F] (n_nodes x n_node_features)
# of diffused features
diffused_features = tf.matmul(diffusion_matrix, x)
# Now we add all diffused features (columns of the above matrix)
# and apply a non linearity to obtain H:,q (eq. 3 in paper)
H = tf.math.reduce_sum(diffused_features, axis=-1)
# H has shape ([batch], n_nodes) but as it is the sum of columns
# we reshape it to ([batch], n_nodes, 1)
return tf.expand_dims(H, -1)
class DiffusionConv(Conv):
r"""
A diffusion convolution operator from the paper
> [Diffusion Convolutional Recurrent Neural Network: Data-Driven Traffic
Forecasting](https://arxiv.org/abs/1707.01926)<br>
> Yaguang Li et al.
**Mode**: single, disjoint, mixed, batch.
**This layer expects a dense adjacency matrix.**
Given a number of diffusion steps \(K\) and a row-normalized adjacency
matrix \(\hat \A \), this layer calculates the \(q\)-th channel as:
$$
\mathbf{X}_{~:,~q}' = \sigma\left( \sum_{f=1}^{F} \left( \sum_{k=0}^{K-1}
\theta_k {\hat \A}^k \right) \X_{~:,~f} \right)
$$
**Input**
- Node features of shape `([batch], n_nodes, n_node_features)`;
- Normalized adjacency or attention coef. matrix \(\hat \A \) of shape
`([batch], n_nodes, n_nodes)`; Use `DiffusionConvolution.preprocess` to normalize.
**Output**
- Node features with the same shape as the input, but with the last
dimension changed to `channels`.
**Arguments**
- `channels`: number of output channels;
- `K`: number of diffusion steps.
- `activation`: activation function \(\sigma\); (\(\tanh\) by default)
- `kernel_initializer`: initializer for the weights;
- `kernel_regularizer`: regularization applied to the weights;
- `kernel_constraint`: constraint applied to the weights;
"""
def __init__(
self,
channels,
K=6,
activation="tanh",
kernel_initializer="glorot_uniform",
kernel_regularizer=None,
kernel_constraint=None,
**kwargs,
):
super().__init__(
activation=activation,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
kernel_constraint=kernel_constraint,
**kwargs,
)
self.channels = channels
self.K = K + 1
def build(self, input_shape):
self.filters = [
DiffuseFeatures(
num_diffusion_steps=self.K,
kernel_initializer=self.kernel_initializer,
kernel_regularizer=self.kernel_regularizer,
kernel_constraint=self.kernel_constraint,
)
for _ in range(self.channels)
]
def apply_filters(self, x, a):
# This will be a list of channels diffused features.
# Each diffused feature is a (batch, n_nodes, 1) tensor.
# Later we will concat all the features to get one
# (batch, n_nodes, channels) diffused graph signal
diffused_features = []
# Iterating over all channels diffusion filters
for diffusion in self.filters:
diffused_feature = diffusion((x, a))
diffused_features.append(diffused_feature)
return tf.concat(diffused_features, -1)
def call(self, inputs, mask=None):
x, a = inputs
output = self.apply_filters(x, a)
if mask is not None:
output *= mask[0]
output = self.activation(output)
return output
@property
def config(self):
return {"channels": self.channels, "K": self.K - 1}
@staticmethod
def preprocess(a):
return normalized_adjacency(a)
| 5,944 | 31.664835 | 98 | py |
spektral | spektral-master/spektral/layers/convolutional/xenet_conv.py | from collections.abc import Iterable
import tensorflow as tf
from tensorflow.keras.layers import Concatenate, Dense, Multiply, PReLU, ReLU
from tensorflow.python.ops import gen_sparse_ops
from spektral.layers.convolutional.conv import Conv
from spektral.layers.convolutional.message_passing import MessagePassing
class XENetConv(MessagePassing):
r"""
A XENet convolutional layer from the paper
> [XENet: Using a new graph convolution to accelerate the timeline for protein design on quantum computers](https://www.biorxiv.org/content/10.1101/2021.05.05.442729v1)<br>
> Jack B. Maguire, Daniele Grattarola, Eugene Klyshko, Vikram Khipple Mulligan, Hans Melo
**Mode**: single, disjoint, mixed.
**This layer expects a sparse adjacency matrix.**
For a version of this layer that supports batch mode, you can use
`spektral.layers.XENetDenseConv` as a drop-in replacement.
This layer computes for each node \(i\):
$$
\s_{ij} = \text{PReLU} \left( (\x_{i} \| \x_{j} \| \e_{ij} \| \e_{ji}) \W^{(s)} + \b^{(s)} \right) \\
\s^{(\text{out})}_{i} = \sum\limits_{j \in \mathcal{N}(i)} \s_{ij} \\
\s^{(\text{in})}_{i} = \sum\limits_{j \in \mathcal{N}(i)} \s_{ji} \\
\x_{i}' = \sigma\left( (\x_{i} \| \s^{(\text{out})}_{i} \| \s^{(\text{in})}_{i}) \W^{(n)} + \b^{(n)} \right) \\
\e_{ij}' = \sigma\left( \s_{ij} \W^{(e)} + \b^{(e)} \right)
$$
**Input**
- Node features of shape `([batch], n_nodes, n_node_features)`;
- Binary adjacency matrices of shape `([batch], n_nodes, n_nodes)`;
- Edge features of shape `(num_edges, n_edge_features)`;
**Output**
- Node features with the same shape of the input, but the last dimension
changed to `node_channels`.
- Edge features with the same shape of the input, but the last dimension
changed to `edge_channels`.
**Arguments**
- `stack_channels`: integer or list of integers, number of channels for the hidden layers;
- `node_channels`: integer, number of output channels for the nodes;
- `edge_channels`: integer, number of output channels for the edges;
- `attention`: whether to use attention when aggregating the stacks;
- `node_activation`: activation function for nodes;
- `edge_activation`: activation function for edges;
- `use_bias`: bool, add a bias vector to the output;
- `kernel_initializer`: initializer for the weights;
- `bias_initializer`: initializer for the bias vector;
- `kernel_regularizer`: regularization applied to the weights;
- `bias_regularizer`: regularization applied to the bias vector;
- `activity_regularizer`: regularization applied to the output;
- `kernel_constraint`: constraint applied to the weights;
- `bias_constraint`: constraint applied to the bias vector.
"""
def __init__(
self,
stack_channels,
node_channels,
edge_channels,
attention: bool = True,
node_activation=None,
edge_activation=None,
aggregate: str = "sum",
use_bias: bool = True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
aggregate=aggregate,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
self.stack_channels = stack_channels
self.node_channels = node_channels
self.edge_channels = edge_channels
self.attention = attention
self.node_activation = node_activation
self.edge_activation = edge_activation
def build(self, input_shape):
assert len(input_shape) == 3 # X, A, E, right?
layer_kwargs = dict(
kernel_initializer=self.kernel_initializer,
bias_initializer=self.bias_initializer,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
kernel_constraint=self.kernel_constraint,
bias_constraint=self.bias_constraint,
)
self.stack_models = []
self.stack_model_acts = []
if isinstance(self.stack_channels, Iterable):
assert len(self.stack_channels) > 0
for count, value in enumerate(self.stack_channels):
self.stack_models.append(Dense(value, **layer_kwargs))
if count != len(self.stack_channels) - 1:
self.stack_model_acts.append(ReLU())
else:
self.stack_model_acts.append(PReLU())
else:
self.stack_models.append(Dense(self.stack_channels, **layer_kwargs))
self.stack_model_acts.append(PReLU())
self.node_model = Dense(
self.node_channels, activation=self.node_activation, **layer_kwargs
)
self.edge_model = Dense(
self.edge_channels, activation=self.edge_activation, **layer_kwargs
)
if self.attention:
self.incoming_att_sigmoid = Dense(1, activation="sigmoid")
self.incoming_att_multiply = Multiply()
self.outgoing_att_sigmoid = Dense(1, activation="sigmoid")
self.outgoing_att_multiply = Multiply()
self.built = True
def call(self, inputs, **kwargs):
x, a, e = self.get_inputs(inputs)
x_out, e_out = self.propagate(x, a, e)
return x_out, e_out
def message(self, x, e=None):
x_i = self.get_targets(x) # Features of self
x_j = self.get_sources(x) # Features of neighbours
# Features of outgoing edges are simply the edge features
e_ij = e
# Features of incoming edges j -> i are obtained by transposing the edge features.
# Since TF does not allow transposing sparse matrices with rank > 2, we instead
# re-order a tf.range(n_edges) and use it as indices to re-order the edge
# features.
# The following two instructions are the sparse equivalent of
# tf.transpose(E, perm=(1, 0, 2))
# where E has shape (N, N, S).
reorder_idx = gen_sparse_ops.sparse_reorder(
tf.stack([self.index_targets, self.index_sources], axis=-1),
tf.range(tf.shape(e)[0]),
(self.n_nodes, self.n_nodes),
)[1]
e_ji = tf.gather(e, reorder_idx)
# Concatenate the features and feed to first MLP
stack_ij = tf.concat(
[x_i, x_j, e_ij, e_ji], axis=-1
) # Shape: (n_edges, F + F + S + S)
for stack_conv in range(0, len(self.stack_models)):
stack_ij = self.stack_models[stack_conv](stack_ij)
stack_ij = self.stack_model_acts[stack_conv](stack_ij)
return stack_ij
def aggregate(self, messages, x=None):
if self.attention:
incoming_att = self.incoming_att_sigmoid(messages)
incoming = self.incoming_att_multiply([incoming_att, messages])
incoming = self.agg(incoming, self.index_targets, self.n_nodes)
outgoing_att = self.outgoing_att_sigmoid(messages)
outgoing = self.outgoing_att_multiply([outgoing_att, messages])
outgoing = self.agg(outgoing, self.index_sources, self.n_nodes)
else:
incoming = self.agg(messages, self.index_targets, self.n_nodes)
outgoing = self.agg(messages, self.index_sources, self.n_nodes)
return tf.concat([x, incoming, outgoing], axis=-1), messages
def update(self, embeddings):
x_new, stack_ij = embeddings
return self.node_model(x_new), self.edge_model(stack_ij)
@property
def config(self):
return {
"stack_channels": self.stack_channels,
"node_channels": self.node_channels,
"edge_channels": self.edge_channels,
"attention": self.attention,
"node_activation": self.node_activation,
"edge_activation": self.edge_activation,
}
class XENetConvBatch(Conv):
r"""
A batch-mode version of XENetConv.
**Mode**: batch.
**This layer expects a dense adjacency matrix.**
"""
def __init__(
self,
stack_channels,
node_channels,
edge_channels,
attention: bool = True,
node_activation=None,
edge_activation=None,
aggregate: str = "sum",
use_bias: bool = True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
aggregate=aggregate,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
self.stack_channels = stack_channels
self.node_channels = node_channels
self.edge_channels = edge_channels
self.attention = attention
self.node_activation = node_activation
self.edge_activation = edge_activation
def build(self, input_shape):
assert len(input_shape) == 3 # X, A, E, right?
layer_kwargs = dict(
kernel_initializer=self.kernel_initializer,
bias_initializer=self.bias_initializer,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
kernel_constraint=self.kernel_constraint,
bias_constraint=self.bias_constraint,
dtype=self.dtype,
)
channels = input_shape[0][-1] # last dim of X
self.stack_models = []
self.stack_model_acts = []
if isinstance(self.stack_channels, Iterable):
assert len(self.stack_channels) > 0
for count, value in enumerate(self.stack_channels):
self.stack_models.append(Dense(value, **layer_kwargs))
if count != len(self.stack_channels) - 1:
self.stack_model_acts.append(ReLU())
else:
self.stack_model_acts.append(PReLU(shared_axes=[1, 2]))
else:
self.stack_models.append(Dense(self.stack_channels, **layer_kwargs))
self.stack_model_acts.append(PReLU(shared_axes=[1, 2]))
self.node_model = Dense(
self.node_channels, activation=self.node_activation, **layer_kwargs
)
self.edge_model = Dense(
self.edge_channels, activation=self.edge_activation, **layer_kwargs
)
if self.attention:
self.incoming_att_sigmoid = Dense(1, activation="sigmoid")
self.incoming_att_multiply = Multiply()
self.outgoing_att_sigmoid = Dense(1, activation="sigmoid")
self.outgoing_att_multiply = Multiply()
self.built = True
def call(self, inputs):
x, a, e = inputs
assert len(x.shape) == 3
assert len(e.shape) == 4
N = tf.shape(x)[1]
xi_shape = [-1, N, 1, x.shape[2]]
xj_shape = [-1, 1, N, x.shape[2]]
xi = tf.reshape(x, xi_shape) # b n 1 f
xj = tf.reshape(x, xj_shape) # b 1 n f
xi = tf.repeat(xi, N, axis=2)
xj = tf.repeat(xj, N, axis=1)
e_transpose = tf.transpose(e, perm=[0, 2, 1, 3])
stack = tf.concat([xi, xj, e, e_transpose], axis=-1)
for i in range(0, len(self.stack_models)):
stack = self.stack_models[i](stack)
stack = self.stack_model_acts[i](stack)
e_mask_shape = [-1, tf.shape(a)[1], tf.shape(a)[2], 1]
e_mask = tf.reshape(a, e_mask_shape)
# zero-out elements that aren't edges in the adjacency matrix
stack = Multiply()([stack, e_mask])
if self.attention:
att1 = self.incoming_att_sigmoid(stack)
incoming_e = self.incoming_att_multiply([stack, att1])
incoming_e = tf.keras.backend.sum(incoming_e, axis=-2, keepdims=False)
att2 = self.outgoing_att_sigmoid(stack)
outgoing_e = self.outgoing_att_multiply([stack, att2])
outgoing_e = tf.keras.backend.sum(outgoing_e, axis=-3, keepdims=False)
else:
incoming_e = tf.keras.backend.sum(stack, axis=-2, keepdims=False)
outgoing_e = tf.keras.backend.sum(stack, axis=-3, keepdims=False)
final_stack = Concatenate(axis=-1)([x, incoming_e, outgoing_e])
x = self.node_model(final_stack)
e = self.edge_model(stack)
# zero-out elements that aren't edges in the adjacency matrix
e = Multiply()([e, e_mask])
return x, e
@property
def config(self):
return {
"stack_channels": self.stack_channels,
"node_channels": self.node_channels,
"edge_channels": self.edge_channels,
"attention": self.attention,
"node_activation": self.node_activation,
"edge_activation": self.edge_activation,
}
| 13,762 | 36.603825 | 178 | py |
spektral | spektral-master/spektral/layers/convolutional/cheb_conv.py | from tensorflow.keras import backend as KB
from spektral.layers import ops
from spektral.layers.convolutional.conv import Conv
from spektral.utils import normalized_laplacian, rescale_laplacian
class ChebConv(Conv):
r"""
A Chebyshev convolutional layer from the paper
> [Convolutional Neural Networks on Graphs with Fast Localized Spectral
Filtering](https://arxiv.org/abs/1606.09375)<br>
> Michaël Defferrard et al.
**Mode**: single, disjoint, mixed, batch.
This layer computes:
$$
\X' = \sum \limits_{k=0}^{K - 1} \T^{(k)} \W^{(k)} + \b^{(k)},
$$
where \( \T^{(0)}, ..., \T^{(K - 1)} \) are Chebyshev polynomials of \(\tilde \L\)
defined as
$$
\T^{(0)} = \X \\
\T^{(1)} = \tilde \L \X \\
\T^{(k \ge 2)} = 2 \cdot \tilde \L \T^{(k - 1)} - \T^{(k - 2)},
$$
where
$$
\tilde \L = \frac{2}{\lambda_{max}} \cdot (\I - \D^{-1/2} \A \D^{-1/2}) - \I.
$$
**Input**
- Node features of shape `([batch], n_nodes, n_node_features)`;
- A list of K Chebyshev polynomials of shape
`[([batch], n_nodes, n_nodes), ..., ([batch], n_nodes, n_nodes)]`; can be computed with
`spektral.utils.convolution.chebyshev_filter`.
**Output**
- Node features with the same shape of the input, but with the last
dimension changed to `channels`.
**Arguments**
- `channels`: number of output channels;
- `K`: order of the Chebyshev polynomials;
- `activation`: activation function;
- `use_bias`: bool, add a bias vector to the output;
- `kernel_initializer`: initializer for the weights;
- `bias_initializer`: initializer for the bias vector;
- `kernel_regularizer`: regularization applied to the weights;
- `bias_regularizer`: regularization applied to the bias vector;
- `activity_regularizer`: regularization applied to the output;
- `kernel_constraint`: constraint applied to the weights;
- `bias_constraint`: constraint applied to the bias vector.
"""
def __init__(
self,
channels,
K=1,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
self.channels = channels
self.K = K
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[0][-1]
self.kernel = self.add_weight(
shape=(self.K, input_dim, self.channels),
initializer=self.kernel_initializer,
name="kernel",
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.channels,),
initializer=self.bias_initializer,
name="bias",
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
self.built = True
def call(self, inputs, mask=None):
x, a = inputs
T_0 = x
output = KB.dot(T_0, self.kernel[0])
if self.K > 1:
T_1 = ops.modal_dot(a, x)
output += KB.dot(T_1, self.kernel[1])
for k in range(2, self.K):
T_2 = 2 * ops.modal_dot(a, T_1) - T_0
output += KB.dot(T_2, self.kernel[k])
T_0, T_1 = T_1, T_2
if self.use_bias:
output = KB.bias_add(output, self.bias)
if mask is not None:
output *= mask[0]
output = self.activation(output)
return output
@property
def config(self):
return {"channels": self.channels, "K": self.K}
@staticmethod
def preprocess(a):
a = normalized_laplacian(a)
a = rescale_laplacian(a)
return a
| 4,462 | 29.993056 | 91 | py |
spektral | spektral-master/spektral/layers/convolutional/appnp_conv.py | from tensorflow.keras import activations
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.models import Sequential
from spektral.layers import ops
from spektral.layers.convolutional.conv import Conv
from spektral.utils import gcn_filter
class APPNPConv(Conv):
r"""
The APPNP operator from the paper
> [Predict then Propagate: Graph Neural Networks meet Personalized PageRank](https://arxiv.org/abs/1810.05997)<br>
> Johannes Klicpera et al.
**Mode**: single, disjoint, mixed, batch.
This layer computes:
$$
\Z^{(0)} = \textrm{MLP}(\X); \\
\Z^{(K)} = (1 - \alpha) \hat \D^{-1/2} \hat \A \hat \D^{-1/2} \Z^{(K - 1)} +
\alpha \Z^{(0)},
$$
where \(\alpha\) is the teleport probability, \(\textrm{MLP}\) is a
multi-layer perceptron, and \(K\) is defined by the `propagations` argument.
**Input**
- Node features of shape `([batch], n_nodes, n_node_features)`;
- Modified Laplacian of shape `([batch], n_nodes, n_nodes)`; can be computed with
`spektral.utils.convolution.gcn_filter`.
**Output**
- Node features with the same shape as the input, but with the last
dimension changed to `channels`.
**Arguments**
- `channels`: number of output channels;
- `alpha`: teleport probability during propagation;
- `propagations`: number of propagation steps;
- `mlp_hidden`: list of integers, number of hidden units for each hidden
layer in the MLP (if None, the MLP has only the output layer);
- `mlp_activation`: activation for the MLP layers;
- `dropout_rate`: dropout rate for Laplacian and MLP layers;
- `activation`: activation function;
- `use_bias`: bool, add a bias vector to the output;
- `kernel_initializer`: initializer for the weights;
- `bias_initializer`: initializer for the bias vector;
- `kernel_regularizer`: regularization applied to the weights;
- `bias_regularizer`: regularization applied to the bias vector;
- `activity_regularizer`: regularization applied to the output;
- `kernel_constraint`: constraint applied to the weights;
- `bias_constraint`: constraint applied to the bias vector.
"""
def __init__(
self,
channels,
alpha=0.2,
propagations=1,
mlp_hidden=None,
mlp_activation="relu",
dropout_rate=0.0,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
self.channels = channels
self.mlp_hidden = mlp_hidden if mlp_hidden else []
self.alpha = alpha
self.propagations = propagations
self.mlp_activation = activations.get(mlp_activation)
self.dropout_rate = dropout_rate
def build(self, input_shape):
assert len(input_shape) >= 2
layer_kwargs = dict(
kernel_initializer=self.kernel_initializer,
bias_initializer=self.bias_initializer,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
kernel_constraint=self.kernel_constraint,
bias_constraint=self.bias_constraint,
dtype=self.dtype,
)
mlp_layers = []
for channels in self.mlp_hidden:
mlp_layers.extend(
[
Dropout(self.dropout_rate),
Dense(channels, self.mlp_activation, **layer_kwargs),
]
)
mlp_layers.append(Dense(self.channels, "linear", **layer_kwargs))
self.mlp = Sequential(mlp_layers)
self.built = True
def call(self, inputs, mask=None):
x, a = inputs
mlp_out = self.mlp(x)
output = mlp_out
for _ in range(self.propagations):
output = (1 - self.alpha) * ops.modal_dot(a, output) + self.alpha * mlp_out
if mask is not None:
output *= mask[0]
output = self.activation(output)
return output
@property
def config(self):
return {
"channels": self.channels,
"alpha": self.alpha,
"propagations": self.propagations,
"mlp_hidden": self.mlp_hidden,
"mlp_activation": activations.serialize(self.mlp_activation),
"dropout_rate": self.dropout_rate,
}
@staticmethod
def preprocess(a):
return gcn_filter(a)
| 5,098 | 33.452703 | 118 | py |
spektral | spektral-master/spektral/layers/convolutional/agnn_conv.py | import tensorflow as tf
from tensorflow.keras import backend as K
from spektral.layers import ops
from spektral.layers.convolutional.message_passing import MessagePassing
class AGNNConv(MessagePassing):
r"""
An Attention-based Graph Neural Network (AGNN) from the paper
> [Attention-based Graph Neural Network for Semi-supervised Learning](https://arxiv.org/abs/1803.03735)<br>
> Kiran K. Thekumparampil et al.
**Mode**: single, disjoint, mixed.
**This layer expects a sparse adjacency matrix.**
This layer computes:
$$
\X' = \P\X
$$
where
$$
\P_{ij} = \frac{
\exp \left( \beta \cos \left( \x_i, \x_j \right) \right)
}{
\sum\limits_{k \in \mathcal{N}(i) \cup \{ i \}}
\exp \left( \beta \cos \left( \x_i, \x_k \right) \right)
}
$$
and \(\beta\) is a trainable parameter.
**Input**
- Node features of shape `(n_nodes, n_node_features)`;
- Binary adjacency matrix of shape `(n_nodes, n_nodes)`.
**Output**
- Node features with the same shape of the input.
**Arguments**
- `trainable`: boolean, if True, then beta is a trainable parameter.
Otherwise, beta is fixed to 1;
- `activation`: activation function;
"""
def __init__(self, trainable=True, aggregate="sum", activation=None, **kwargs):
super().__init__(aggregate=aggregate, activation=activation, **kwargs)
self.trainable = trainable
def build(self, input_shape):
assert len(input_shape) >= 2
if self.trainable:
self.beta = self.add_weight(shape=(1,), initializer="ones", name="beta")
else:
self.beta = tf.cast(1.0, self.dtype)
self.built = True
def call(self, inputs, **kwargs):
x, a, _ = self.get_inputs(inputs)
x_norm = K.l2_normalize(x, axis=-1)
output = self.propagate(x, a, x_norm=x_norm)
output = self.activation(output)
return output
def message(self, x, x_norm=None):
x_j = self.get_sources(x)
x_norm_i = self.get_targets(x_norm)
x_norm_j = self.get_sources(x_norm)
alpha = self.beta * tf.reduce_sum(x_norm_i * x_norm_j, axis=-1)
if len(alpha.shape) == 2:
alpha = tf.transpose(alpha) # For mixed mode
alpha = ops.unsorted_segment_softmax(alpha, self.index_targets, self.n_nodes)
if len(alpha.shape) == 2:
alpha = tf.transpose(alpha) # For mixed mode
alpha = alpha[..., None]
return alpha * x_j
@property
def config(self):
return {
"trainable": self.trainable,
}
| 2,666 | 28.633333 | 111 | py |
spektral | spektral-master/spektral/layers/convolutional/arma_conv.py | from tensorflow.keras import activations
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Dropout
from spektral.layers import ops
from spektral.layers.convolutional.conv import Conv
from spektral.utils import normalized_adjacency
class ARMAConv(Conv):
r"""
An Auto-Regressive Moving Average convolutional layer (ARMA) from the paper
> [Graph Neural Networks with convolutional ARMA filters](https://arxiv.org/abs/1901.01343)<br>
> Filippo Maria Bianchi et al.
**Mode**: single, disjoint, mixed, batch.
This layer computes:
$$
\X' = \frac{1}{K} \sum\limits_{k=1}^K \bar\X_k^{(T)},
$$
where \(K\) is the order of the ARMA\(_K\) filter, and where:
$$
\bar \X_k^{(t + 1)} =
\sigma \left(\tilde \A \bar \X^{(t)} \W^{(t)} + \X \V^{(t)} \right)
$$
is a recursive approximation of an ARMA\(_1\) filter, where
\( \bar \X^{(0)} = \X \)
and
$$
\tilde \A = \D^{-1/2} \A \D^{-1/2}.
$$
**Input**
- Node features of shape `([batch], n_nodes, n_node_features)`;
- Normalized and rescaled Laplacian of shape `([batch], n_nodes, n_nodes)`; can be
computed with `spektral.utils.convolution.normalized_laplacian` and
`spektral.utils.convolution.rescale_laplacian`.
**Output**
- Node features with the same shape as the input, but with the last
dimension changed to `channels`.
**Arguments**
- `channels`: number of output channels;
- `order`: order of the full ARMA\(_K\) filter, i.e., the number of parallel
stacks in the layer;
- `iterations`: number of iterations to compute each ARMA\(_1\) approximation;
- `share_weights`: share the weights in each ARMA\(_1\) stack.
- `gcn_activation`: activation function to compute each ARMA\(_1\)
stack;
- `dropout_rate`: dropout rate for skip connection;
- `activation`: activation function;
- `use_bias`: bool, add a bias vector to the output;
- `kernel_initializer`: initializer for the weights;
- `bias_initializer`: initializer for the bias vector;
- `kernel_regularizer`: regularization applied to the weights;
- `bias_regularizer`: regularization applied to the bias vector;
- `activity_regularizer`: regularization applied to the output;
- `kernel_constraint`: constraint applied to the weights;
- `bias_constraint`: constraint applied to the bias vector.
"""
def __init__(
self,
channels,
order=1,
iterations=1,
share_weights=False,
gcn_activation="relu",
dropout_rate=0.0,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
self.channels = channels
self.iterations = iterations
self.order = order
self.share_weights = share_weights
self.gcn_activation = activations.get(gcn_activation)
self.dropout_rate = dropout_rate
def build(self, input_shape):
assert len(input_shape) >= 2
F = input_shape[0][-1]
# Create weights for parallel stacks
# self.kernels[k][i] refers to the k-th stack, i-th iteration
self.kernels = []
for k in range(self.order):
kernel_stack = []
current_shape = F
for i in range(self.iterations):
kernel_stack.append(
self.create_weights(
current_shape, F, self.channels, "ARMA_GCS_{}{}".format(k, i)
)
)
current_shape = self.channels
if self.share_weights and i == 1:
# No need to continue because all weights will be shared
break
self.kernels.append(kernel_stack)
self.dropout = Dropout(self.dropout_rate, dtype=self.dtype)
self.built = True
def call(self, inputs, mask=None):
x, a = inputs
output = []
for k in range(self.order):
output_k = x
for i in range(self.iterations):
output_k = self.gcs([output_k, x, a], k, i)
output.append(output_k)
output = K.stack(output, axis=-1)
output = K.mean(output, axis=-1)
if mask is not None:
output *= mask[0]
output = self.activation(output)
return output
def create_weights(self, input_dim, input_dim_skip, channels, name):
"""
Creates a set of weights for a GCN with skip connections.
:param input_dim: dimension of the input space
:param input_dim_skip: dimension of the input space for the skip connection
:param channels: dimension of the output space
:param name: name of the layer
:return:
- kernel_1, from input space of the layer to output space
- kernel_2, from input space of the skip connection to output space
- bias, bias vector on the output space if use_bias=True, None otherwise.
"""
kernel_1 = self.add_weight(
shape=(input_dim, channels),
name=name + "_kernel_1",
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
kernel_2 = self.add_weight(
shape=(input_dim_skip, channels),
name=name + "_kernel_2",
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
bias = None
if self.use_bias:
bias = self.add_weight(
shape=(channels,),
name=name + "_bias",
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
return kernel_1, kernel_2, bias
def gcs(self, inputs, stack, iteration):
"""
Creates a graph convolutional layer with a skip connection.
:param inputs: list of input Tensors, namely
- input node features
- input node features for the skip connection
- normalized adjacency matrix;
:param stack: int, current stack (used to retrieve kernels);
:param iteration: int, current iteration (used to retrieve kernels);
:return: output node features.
"""
x, x_skip, a = inputs
itr = 1 if self.share_weights and iteration >= 1 else iteration
kernel_1, kernel_2, bias = self.kernels[stack][itr]
output = K.dot(x, kernel_1)
output = ops.modal_dot(a, output)
skip = K.dot(x_skip, kernel_2)
skip = self.dropout(skip)
output += skip
if self.use_bias:
output = K.bias_add(output, bias)
output = self.gcn_activation(output)
return output
@property
def config(self):
return {
"channels": self.channels,
"iterations": self.iterations,
"order": self.order,
"share_weights": self.share_weights,
"gcn_activation": activations.serialize(self.gcn_activation),
"dropout_rate": self.dropout_rate,
}
@staticmethod
def preprocess(a):
return normalized_adjacency(a, symmetric=True)
| 8,048 | 34.148472 | 99 | py |
spektral | spektral-master/spektral/layers/convolutional/general_conv.py | import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras.layers import BatchNormalization, Dropout, PReLU
from spektral.layers.convolutional.message_passing import MessagePassing
class GeneralConv(MessagePassing):
r"""
A general convolutional layer from the paper
> [Design Space for Graph Neural Networks](https://arxiv.org/abs/2011.08843)<br>
> Jiaxuan You et al.
**Mode**: single, disjoint, mixed.
**This layer expects a sparse adjacency matrix.**
This layer computes:
$$
\x_i' = \mathrm{Agg} \left( \left\{ \mathrm{Act} \left( \mathrm{Dropout}
\left( \mathrm{BN} \left( \x_j \W + \b \right) \right) \right),
j \in \mathcal{N}(i) \right\} \right)
$$
where \( \mathrm{Agg} \) is an aggregation function for the messages,
\( \mathrm{Act} \) is an activation function, \( \mathrm{Dropout} \)
applies dropout to the node features, and \( \mathrm{BN} \) applies batch
normalization to the node features.
This layer supports the PReLU activation via the 'prelu' keyword.
The default parameters of this layer are selected according to the best
results obtained in the paper, and should provide a good performance on
many node-level and graph-level tasks, without modifications.
The defaults are as follows:
- 256 channels
- Batch normalization
- No dropout
- PReLU activation
- Sum aggregation
If you are uncertain about which layers to use for your GNN, this is a
safe choice. Check out the original paper for more specific configurations.
**Input**
- Node features of shape `(n_nodes, n_node_features)`;
- Binary adjacency matrix of shape `(n_nodes, n_nodes)`.
**Output**
- Node features with the same shape of the input, but the last dimension
changed to `channels`.
**Arguments**
- `channels`: integer, number of output channels;
- `batch_norm`: bool, whether to use batch normalization;
- `dropout`: float, dropout rate;
- `aggregate`: string or callable, an aggregation function. Supported
aggregations: 'sum', 'mean', 'max', 'min', 'prod'.
- `activation`: activation function. This layer also supports the
advanced activation PReLU by passing `activation='prelu'`.
- `use_bias`: bool, add a bias vector to the output;
- `kernel_initializer`: initializer for the weights;
- `bias_initializer`: initializer for the bias vector;
- `kernel_regularizer`: regularization applied to the weights;
- `bias_regularizer`: regularization applied to the bias vector;
- `activity_regularizer`: regularization applied to the output;
- `kernel_constraint`: constraint applied to the weights;
- `bias_constraint`: constraint applied to the bias vector.
"""
def __init__(
self,
channels=256,
batch_norm=True,
dropout=0.0,
aggregate="sum",
activation="prelu",
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
aggregate=aggregate,
activation=None,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
self.channels = channels
self.dropout_rate = dropout
self.use_batch_norm = batch_norm
if activation == "prelu" or "prelu" in kwargs:
self.activation = PReLU()
else:
self.activation = activations.get(activation)
def build(self, input_shape):
input_dim = input_shape[0][-1]
self.dropout = Dropout(self.dropout_rate)
if self.use_batch_norm:
self.batch_norm = BatchNormalization()
self.kernel = self.add_weight(
shape=(input_dim, self.channels),
initializer=self.kernel_initializer,
name="kernel",
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.channels,),
initializer=self.bias_initializer,
name="bias",
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
self.built = True
def call(self, inputs, **kwargs):
x, a, _ = self.get_inputs(inputs)
# TODO: a = add_self_loops(a)
x = tf.matmul(x, self.kernel)
if self.use_bias:
x = tf.nn.bias_add(x, self.bias)
if self.use_batch_norm:
x = self.batch_norm(x)
x = self.dropout(x)
x = self.activation(x)
return self.propagate(x, a)
@property
def config(self):
config = {
"channels": self.channels,
}
if self.activation.__class__.__name__ == "PReLU":
config["prelu"] = True
return config
| 5,435 | 32.975 | 84 | py |
spektral | spektral-master/spektral/layers/convolutional/gin_conv.py | import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras.layers import BatchNormalization, Dense
from tensorflow.keras.models import Sequential
from spektral.layers import ops
from spektral.layers.convolutional.message_passing import MessagePassing
class GINConv(MessagePassing):
r"""
A Graph Isomorphism Network (GIN) from the paper
> [How Powerful are Graph Neural Networks?](https://arxiv.org/abs/1810.00826)<br>
> Keyulu Xu et al.
**Mode**: single, disjoint, mixed.
**This layer expects a sparse adjacency matrix.**
This layer computes for each node \(i\):
$$
\x_i' = \textrm{MLP}\big( (1 + \epsilon) \cdot \x_i + \sum\limits_{j
\in \mathcal{N}(i)} \x_j \big)
$$
where \(\textrm{MLP}\) is a multi-layer perceptron.
**Input**
- Node features of shape `(n_nodes, n_node_features)`;
- Binary adjacency matrix of shape `(n_nodes, n_nodes)`.
**Output**
- Node features with the same shape of the input, but the last dimension
changed to `channels`.
**Arguments**
- `channels`: integer, number of output channels;
- `epsilon`: unnamed parameter, see the original paper and the equation
above.
By setting `epsilon=None`, the parameter will be learned (default behaviour).
If given as a value, the parameter will stay fixed.
- `mlp_hidden`: list of integers, number of hidden units for each hidden
layer in the MLP (if None, the MLP has only the output layer);
- `mlp_activation`: activation for the MLP layers;
- `mlp_batchnorm`: apply batch normalization after every hidden layer of the MLP;
- `activation`: activation function;
- `use_bias`: bool, add a bias vector to the output;
- `kernel_initializer`: initializer for the weights;
- `bias_initializer`: initializer for the bias vector;
- `kernel_regularizer`: regularization applied to the weights;
- `bias_regularizer`: regularization applied to the bias vector;
- `activity_regularizer`: regularization applied to the output;
- `kernel_constraint`: constraint applied to the weights;
- `bias_constraint`: constraint applied to the bias vector.
"""
def __init__(
self,
channels,
epsilon=None,
mlp_hidden=None,
mlp_activation="relu",
mlp_batchnorm=True,
aggregate="sum",
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
aggregate=aggregate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
self.channels = channels
self.epsilon = epsilon
self.mlp_hidden = mlp_hidden if mlp_hidden else []
self.mlp_activation = activations.get(mlp_activation)
self.mlp_batchnorm = mlp_batchnorm
def build(self, input_shape):
assert len(input_shape) >= 2
layer_kwargs = dict(
kernel_initializer=self.kernel_initializer,
bias_initializer=self.bias_initializer,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
kernel_constraint=self.kernel_constraint,
bias_constraint=self.bias_constraint,
)
self.mlp = Sequential()
for channels in self.mlp_hidden:
self.mlp.add(Dense(channels, self.mlp_activation, **layer_kwargs))
if self.mlp_batchnorm:
self.mlp.add(BatchNormalization())
self.mlp.add(
Dense(
self.channels, self.activation, use_bias=self.use_bias, **layer_kwargs
)
)
if self.epsilon is None:
self.eps = self.add_weight(shape=(1,), initializer="zeros", name="eps")
else:
# If epsilon is given, keep it constant
self.eps = tf.cast(self.epsilon, self.dtype)
self.one = tf.cast(1, self.dtype)
self.built = True
def call(self, inputs, **kwargs):
x, a, _ = self.get_inputs(inputs)
output = self.mlp((self.one + self.eps) * x + self.propagate(x, a))
return output
@property
def config(self):
return {
"channels": self.channels,
"epsilon": self.epsilon,
"mlp_hidden": self.mlp_hidden,
"mlp_activation": self.mlp_activation,
"mlp_batchnorm": self.mlp_batchnorm,
}
class GINConvBatch(GINConv):
r"""
A batch-mode version of GINConv.
**Mode**: batch.
**This layer expects a dense adjacency matrix.**
"""
def call(self, inputs, **kwargs):
x, a = inputs
output = self.mlp((self.one + self.eps) * x + ops.modal_dot(a, x))
return output
| 5,345 | 32.4125 | 86 | py |
spektral | spektral-master/spektral/layers/convolutional/graphsage_conv.py | from tensorflow.keras import backend as K
from spektral.layers import ops
from spektral.layers.convolutional.message_passing import MessagePassing
class GraphSageConv(MessagePassing):
r"""
A GraphSAGE layer from the paper
> [Inductive Representation Learning on Large Graphs](https://arxiv.org/abs/1706.02216)<br>
> William L. Hamilton et al.
**Mode**: single, disjoint, mixed.
**This layer expects a sparse adjacency matrix.**
This layer computes:
$$
\X' = \big[ \textrm{AGGREGATE}(\X) \| \X \big] \W + \b; \\
\X' = \frac{\X'}{\|\X'\|}
$$
where \( \textrm{AGGREGATE} \) is a function to aggregate a node's
neighbourhood. The supported aggregation methods are: sum, mean,
max, min, and product.
**Input**
- Node features of shape `(n_nodes, n_node_features)`;
- Binary adjacency matrix of shape `(n_nodes, n_nodes)`.
**Output**
- Node features with the same shape as the input, but with the last
dimension changed to `channels`.
**Arguments**
- `channels`: number of output channels;
- `aggregate_op`: str, aggregation method to use (`'sum'`, `'mean'`,
`'max'`, `'min'`, `'prod'`);
- `activation`: activation function;
- `use_bias`: bool, add a bias vector to the output;
- `kernel_initializer`: initializer for the weights;
- `bias_initializer`: initializer for the bias vector;
- `kernel_regularizer`: regularization applied to the weights;
- `bias_regularizer`: regularization applied to the bias vector;
- `activity_regularizer`: regularization applied to the output;
- `kernel_constraint`: constraint applied to the weights;
- `bias_constraint`: constraint applied to the bias vector.
"""
def __init__(
self,
channels,
aggregate="mean",
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
aggregate=aggregate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
self.channels = channels
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[0][-1]
self.kernel = self.add_weight(
shape=(2 * input_dim, self.channels),
initializer=self.kernel_initializer,
name="kernel",
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.channels,),
initializer=self.bias_initializer,
name="bias",
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
self.built = True
def call(self, inputs):
x, a, _ = self.get_inputs(inputs)
a = ops.add_self_loops(a)
aggregated = self.propagate(x, a)
output = K.concatenate([x, aggregated])
output = K.dot(output, self.kernel)
if self.use_bias:
output = K.bias_add(output, self.bias)
output = K.l2_normalize(output, axis=-1)
if self.activation is not None:
output = self.activation(output)
return output
@property
def config(self):
return {"channels": self.channels}
| 3,941 | 31.04878 | 95 | py |
spektral | spektral-master/spektral/layers/convolutional/edge_conv.py | from tensorflow.keras import activations
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from spektral.layers.convolutional.message_passing import MessagePassing
class EdgeConv(MessagePassing):
r"""
An edge convolutional layer from the paper
> [Dynamic Graph CNN for Learning on Point Clouds](https://arxiv.org/abs/1801.07829)<br>
> Yue Wang et al.
**Mode**: single, disjoint, mixed.
**This layer expects a sparse adjacency matrix.**
This layer computes for each node \(i\):
$$
\x_i' = \sum\limits_{j \in \mathcal{N}(i)} \textrm{MLP}\big( \x_i \|
\x_j - \x_i \big)
$$
where \(\textrm{MLP}\) is a multi-layer perceptron.
**Input**
- Node features of shape `(n_nodes, n_node_features)`;
- Binary adjacency matrix of shape `(n_nodes, n_nodes)`.
**Output**
- Node features with the same shape of the input, but the last dimension
changed to `channels`.
**Arguments**
- `channels`: integer, number of output channels;
- `mlp_hidden`: list of integers, number of hidden units for each hidden
layer in the MLP (if None, the MLP has only the output layer);
- `mlp_activation`: activation for the MLP layers;
- `activation`: activation function;
- `use_bias`: bool, add a bias vector to the output;
- `kernel_initializer`: initializer for the weights;
- `bias_initializer`: initializer for the bias vector;
- `kernel_regularizer`: regularization applied to the weights;
- `bias_regularizer`: regularization applied to the bias vector;
- `activity_regularizer`: regularization applied to the output;
- `kernel_constraint`: constraint applied to the weights;
- `bias_constraint`: constraint applied to the bias vector.
"""
def __init__(
self,
channels,
mlp_hidden=None,
mlp_activation="relu",
aggregate="sum",
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
aggregate=aggregate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
self.channels = channels
self.mlp_hidden = mlp_hidden if mlp_hidden else []
self.mlp_activation = activations.get(mlp_activation)
def build(self, input_shape):
assert len(input_shape) >= 2
layer_kwargs = dict(
kernel_initializer=self.kernel_initializer,
bias_initializer=self.bias_initializer,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
kernel_constraint=self.kernel_constraint,
bias_constraint=self.bias_constraint,
dtype=self.dtype,
)
self.mlp = Sequential(
[
Dense(channels, self.mlp_activation, **layer_kwargs)
for channels in self.mlp_hidden
]
+ [
Dense(
self.channels,
self.activation,
use_bias=self.use_bias,
**layer_kwargs,
)
]
)
self.built = True
def message(self, x, **kwargs):
x_i = self.get_targets(x)
x_j = self.get_sources(x)
return self.mlp(K.concatenate((x_i, x_j - x_i)))
@property
def config(self):
return {
"channels": self.channels,
"mlp_hidden": self.mlp_hidden,
"mlp_activation": self.mlp_activation,
}
| 4,206 | 31.612403 | 92 | py |
spektral | spektral-master/spektral/layers/convolutional/gated_graph_conv.py | import tensorflow as tf
from tensorflow.keras.layers import GRUCell
from spektral.layers.convolutional.message_passing import MessagePassing
class GatedGraphConv(MessagePassing):
r"""
A gated graph convolutional layer from the paper
> [Gated Graph Sequence Neural Networks](https://arxiv.org/abs/1511.05493)<br>
> Yujia Li et al.
**Mode**: single, disjoint, mixed.
**This layer expects a sparse adjacency matrix.**
This layer computes \(\x_i' = \h^{(L)}_i\) where:
$$
\begin{align}
& \h^{(0)}_i = \x_i \| \mathbf{0} \\
& \m^{(l)}_i = \sum\limits_{j \in \mathcal{N}(i)} \h^{(l - 1)}_j \W \\
& \h^{(l)}_i = \textrm{GRU} \left(\m^{(l)}_i, \h^{(l - 1)}_i \right) \\
\end{align}
$$
where \(\textrm{GRU}\) is a gated recurrent unit cell.
**Input**
- Node features of shape `(n_nodes, n_node_features)`; note that
`n_node_features` must be smaller or equal than `channels`.
- Binary adjacency matrix of shape `(n_nodes, n_nodes)`.
**Output**
- Node features with the same shape of the input, but the last dimension
changed to `channels`.
**Arguments**
- `channels`: integer, number of output channels;
- `n_layers`: integer, number of iterations with the GRU cell;
- `activation`: activation function;
- `use_bias`: bool, add a bias vector to the output;
- `kernel_initializer`: initializer for the weights;
- `bias_initializer`: initializer for the bias vector;
- `kernel_regularizer`: regularization applied to the weights;
- `bias_regularizer`: regularization applied to the bias vector;
- `activity_regularizer`: regularization applied to the output;
- `kernel_constraint`: constraint applied to the weights;
- `bias_constraint`: constraint applied to the bias vector.
"""
def __init__(
self,
channels,
n_layers,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
self.channels = channels
self.n_layers = n_layers
def build(self, input_shape):
assert len(input_shape) >= 2
self.kernel = self.add_weight(
name="kernel",
shape=(self.n_layers, self.channels, self.channels),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
self.rnn = GRUCell(
self.channels,
kernel_initializer=self.kernel_initializer,
bias_initializer=self.bias_initializer,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
activity_regularizer=self.activity_regularizer,
kernel_constraint=self.kernel_constraint,
bias_constraint=self.bias_constraint,
use_bias=self.use_bias,
dtype=self.dtype,
)
self.built = True
def call(self, inputs):
x, a, _ = self.get_inputs(inputs)
F = tf.shape(x)[-1]
to_pad = self.channels - F
ndims = len(x.shape) - 1
output = tf.pad(x, [[0, 0]] * ndims + [[0, to_pad]])
for i in range(self.n_layers):
m = tf.matmul(output, self.kernel[i])
m = self.propagate(m, a)
output = self.rnn(m, [output])[0]
output = self.activation(output)
return output
@property
def config(self):
return {
"channels": self.channels,
"n_layers": self.n_layers,
}
| 4,254 | 32.242188 | 82 | py |
spektral | spektral-master/spektral/layers/convolutional/ecc_conv.py | import warnings
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Dense
from spektral.layers import ops
from spektral.layers.convolutional.conv import Conv
from spektral.layers.ops import modes
class ECCConv(Conv):
r"""
An edge-conditioned convolutional layer (ECC) from the paper
> [Dynamic Edge-Conditioned Filters in Convolutional Neural Networks on
Graphs](https://arxiv.org/abs/1704.02901)<br>
> Martin Simonovsky and Nikos Komodakis
**Mode**: single, disjoint, batch, mixed.
**In single, disjoint, and mixed mode, this layer expects a sparse adjacency
matrix. If a dense adjacency is given as input, it will be automatically
cast to sparse, which might be expensive.**
This layer computes:
$$
\x_i' = \x_{i} \W_{\textrm{root}} + \sum\limits_{j \in \mathcal{N}(i)}
\x_{j} \textrm{MLP}(\e_{j \rightarrow i}) + \b
$$
where \(\textrm{MLP}\) is a multi-layer perceptron that outputs an
edge-specific weight as a function of edge attributes.
**Input**
- Node features of shape `([batch], n_nodes, n_node_features)`;
- Binary adjacency matrices of shape `([batch], n_nodes, n_nodes)`;
- Edge features. In single mode, shape `(num_edges, n_edge_features)`; in
batch mode, shape `(batch, n_nodes, n_nodes, n_edge_features)`.
**Output**
- node features with the same shape of the input, but the last dimension
changed to `channels`.
**Arguments**
- `channels`: integer, number of output channels;
- `kernel_network`: a list of integers representing the hidden neurons of
the kernel-generating network;
- 'root': if False, the layer will not consider the root node for computing
the message passing (first term in equation above), but only the neighbours.
- `activation`: activation function;
- `use_bias`: bool, add a bias vector to the output;
- `kernel_initializer`: initializer for the weights;
- `bias_initializer`: initializer for the bias vector;
- `kernel_regularizer`: regularization applied to the weights;
- `bias_regularizer`: regularization applied to the bias vector;
- `activity_regularizer`: regularization applied to the output;
- `kernel_constraint`: constraint applied to the weights;
- `bias_constraint`: constraint applied to the bias vector.
"""
def __init__(
self,
channels,
kernel_network=None,
root=True,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
self.channels = channels
self.kernel_network = kernel_network
self.root = root
def build(self, input_shape):
F = input_shape[0][-1]
F_ = self.channels
self.kernel_network_layers = []
if self.kernel_network is not None:
for i, l in enumerate(self.kernel_network):
self.kernel_network_layers.append(
Dense(
l,
name="FGN_{}".format(i),
activation="relu",
use_bias=self.use_bias,
kernel_initializer=self.kernel_initializer,
bias_initializer=self.bias_initializer,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
kernel_constraint=self.kernel_constraint,
bias_constraint=self.bias_constraint,
dtype=self.dtype,
)
)
self.kernel_network_layers.append(
Dense(F_ * F, dtype=self.dtype, name="FGN_out")
)
if self.root:
self.root_kernel = self.add_weight(
name="root_kernel",
shape=(F, F_),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
if self.use_bias:
self.bias = self.add_weight(
name="bias",
shape=(self.channels,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
self.built = True
def call(self, inputs, mask=None):
x, a, e = inputs
# Parameters
N = tf.shape(x)[-2]
F = tf.shape(x)[-1]
F_ = self.channels
# Filter network
kernel_network = e
for layer in self.kernel_network_layers:
kernel_network = layer(kernel_network)
# Convolution
mode = ops.autodetect_mode(x, a)
if mode == modes.BATCH:
kernel = K.reshape(kernel_network, (-1, N, N, F_, F))
output = kernel * a[..., None, None]
output = tf.einsum("abcde,ace->abd", output, x)
else:
# Enforce sparse representation
if not K.is_sparse(a):
warnings.warn(
"Casting dense adjacency matrix to SparseTensor."
"This can be an expensive operation. "
)
a = tf.sparse.from_dense(a)
target_shape = (-1, F, F_)
if mode == modes.MIXED:
target_shape = (tf.shape(x)[0],) + target_shape
kernel = tf.reshape(kernel_network, target_shape)
index_targets = a.indices[:, 1]
index_sources = a.indices[:, 0]
messages = tf.gather(x, index_sources, axis=-2)
messages = tf.einsum("...ab,...abc->...ac", messages, kernel)
output = ops.scatter_sum(messages, index_targets, N)
if self.root:
output += K.dot(x, self.root_kernel)
if self.use_bias:
output = K.bias_add(output, self.bias)
if mask is not None:
output *= mask[0]
output = self.activation(output)
return output
@property
def config(self):
return {
"channels": self.channels,
"kernel_network": self.kernel_network,
"root": self.root,
}
| 6,994 | 34.871795 | 82 | py |
spektral | spektral-master/spektral/layers/convolutional/gcn_conv.py | from tensorflow.keras import backend as K
from spektral.layers import ops
from spektral.layers.convolutional.conv import Conv
from spektral.utils import gcn_filter
class GCNConv(Conv):
r"""
A graph convolutional layer (GCN) from the paper
> [Semi-Supervised Classification with Graph Convolutional Networks](https://arxiv.org/abs/1609.02907)<br>
> Thomas N. Kipf and Max Welling
**Mode**: single, disjoint, mixed, batch.
This layer computes:
$$
\X' = \hat \D^{-1/2} \hat \A \hat \D^{-1/2} \X \W + \b
$$
where \( \hat \A = \A + \I \) is the adjacency matrix with added self-loops
and \(\hat\D\) is its degree matrix.
**Input**
- Node features of shape `([batch], n_nodes, n_node_features)`;
- Modified Laplacian of shape `([batch], n_nodes, n_nodes)`; can be computed with
`spektral.utils.convolution.gcn_filter`.
**Output**
- Node features with the same shape as the input, but with the last
dimension changed to `channels`.
**Arguments**
- `channels`: number of output channels;
- `activation`: activation function;
- `use_bias`: bool, add a bias vector to the output;
- `kernel_initializer`: initializer for the weights;
- `bias_initializer`: initializer for the bias vector;
- `kernel_regularizer`: regularization applied to the weights;
- `bias_regularizer`: regularization applied to the bias vector;
- `activity_regularizer`: regularization applied to the output;
- `kernel_constraint`: constraint applied to the weights;
- `bias_constraint`: constraint applied to the bias vector.
"""
def __init__(
self,
channels,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
self.channels = channels
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[0][-1]
self.kernel = self.add_weight(
shape=(input_dim, self.channels),
initializer=self.kernel_initializer,
name="kernel",
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.channels,),
initializer=self.bias_initializer,
name="bias",
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
self.built = True
def call(self, inputs, mask=None):
x, a = inputs
output = K.dot(x, self.kernel)
output = ops.modal_dot(a, output)
if self.use_bias:
output = K.bias_add(output, self.bias)
if mask is not None:
output *= mask[0]
output = self.activation(output)
return output
@property
def config(self):
return {"channels": self.channels}
@staticmethod
def preprocess(a):
return gcn_filter(a)
| 3,695 | 30.322034 | 110 | py |
spektral | spektral-master/spektral/layers/convolutional/gtv_conv.py | import tensorflow as tf
from tensorflow.keras import backend as K
from spektral.layers import ops
from spektral.layers.convolutional.conv import Conv
class GTVConv(Conv):
r"""
A graph total variation convolutional layer (GTVConv) from the paper
> [Total Variation Graph Neural Networks](https://arxiv.org/abs/2211.06218)<br>
> Jonas Berg Hansen and Filippo Maria Bianchi
**Mode**: single, disjoint, batch.
This layer computes
$$
\X' = \sigma\left[\left(\I - \delta\L_\hat{\mathbf{\Gamma}}\right) \tilde{\X} \right]
$$
where
$$
\begin{align}
\tilde{\X} &= \X \W\\[5pt]
\L_\hat{\mathbf{\Gamma}} &= \D_\mathbf{\hat{\Gamma}} - \hat{\mathbf{\Gamma}}\\[5pt]
[\hat{\mathbf{\Gamma}}]_{ij} &= \frac{[\mathbf{A}]_{ij}}{\max\{||\tilde{\x}_i-\tilde{\x}_j||_1, \epsilon\}}\\
\end{align}
$$
**Input**
- Node features of shape `(batch, n_nodes, n_node_features)`;
- Adjacency matrix of shape `(batch, n_nodes, n_nodes)`;
**Output**
- Node features with the same shape as the input, but with the last
dimension changed to `channels`.
**Arguments**
- `channels`: number of output channels;
- `delta_coeff`: step size for gradient descent of GTV
- `epsilon`: small number used to numerically stabilize the computation of new adjacency weights
- `activation`: activation function;
- `use_bias`: bool, add a bias vector to the output;
- `kernel_initializer`: initializer for the weights;
- `bias_initializer`: initializer for the bias vector;
- `kernel_regularizer`: regularization applied to the weights;
- `bias_regularizer`: regularization applied to the bias vector;
- `activity_regularizer`: regularization applied to the output;
- `kernel_constraint`: constraint applied to the weights;
- `bias_constraint`: constraint applied to the bias vector.
"""
def __init__(
self,
channels,
delta_coeff=1.0,
epsilon=0.001,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
self.channels = channels
self.delta_coeff = delta_coeff
self.epsilon = epsilon
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[0][-1]
self.kernel = self.add_weight(
shape=(input_dim, self.channels),
initializer=self.kernel_initializer,
name="kernel",
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.channels,),
initializer=self.bias_initializer,
name="bias",
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
self.built = True
def call(self, inputs, mask=None):
x, a = inputs
mode = ops.autodetect_mode(x, a)
x = K.dot(x, self.kernel)
if mode == ops.modes.SINGLE:
output = self._call_single(x, a)
elif mode == ops.modes.BATCH:
output = self._call_batch(x, a)
if self.use_bias:
output = K.bias_add(output, self.bias)
if mask is not None:
output *= mask[0]
output = self.activation(output)
return output
def _call_single(self, x, a):
if K.is_sparse(a):
index_i = a.indices[:, 0]
index_j = a.indices[:, 1]
n_nodes = tf.shape(a, out_type=index_i.dtype)[0]
# Compute absolute differences between neighbouring nodes
abs_diff = tf.math.abs(
tf.transpose(tf.gather(x, index_i))
- tf.transpose(tf.gather(x, index_j))
)
abs_diff = tf.math.reduce_sum(abs_diff, axis=0)
# Compute new adjacency matrix
gamma = tf.sparse.map_values(
tf.multiply, a, 1 / tf.math.maximum(abs_diff, self.epsilon)
)
# Compute degree matrix from gamma matrix
d_gamma = tf.sparse.SparseTensor(
tf.stack([tf.range(n_nodes)] * 2, axis=1),
tf.sparse.reduce_sum(gamma, axis=-1),
[n_nodes, n_nodes],
)
# Compute laplcian: L = D_gamma - Gamma
l = tf.sparse.add(d_gamma, tf.sparse.map_values(tf.multiply, gamma, -1.0))
# Compute adjusted laplacian: L_adjusted = I - delta*L
l = tf.sparse.add(
tf.sparse.eye(n_nodes, dtype=x.dtype),
tf.sparse.map_values(tf.multiply, l, -self.delta_coeff),
)
# Aggregate features with adjusted laplacian
output = ops.modal_dot(l, x)
else:
n_nodes = tf.shape(a)[-1]
abs_diff = tf.math.abs(x[:, tf.newaxis, :] - x)
abs_diff = tf.reduce_sum(abs_diff, axis=-1)
gamma = a / tf.math.maximum(abs_diff, self.epsilon)
degrees = tf.math.reduce_sum(gamma, axis=-1)
l = -gamma
l = tf.linalg.set_diag(l, degrees - tf.linalg.diag_part(gamma))
l = tf.eye(n_nodes, dtype=x.dtype) - self.delta_coeff * l
output = tf.matmul(l, x)
return output
def _call_batch(self, x, a):
n_nodes = tf.shape(a)[-1]
abs_diff = tf.reduce_sum(
tf.math.abs(tf.expand_dims(x, 2) - tf.expand_dims(x, 1)), axis=-1
)
gamma = a / tf.math.maximum(abs_diff, self.epsilon)
degrees = tf.math.reduce_sum(gamma, axis=-1)
l = -gamma
l = tf.linalg.set_diag(l, degrees - tf.linalg.diag_part(gamma))
l = tf.eye(n_nodes, dtype=x.dtype) - self.delta_coeff * l
output = tf.matmul(l, x)
return output
@property
def config(self):
return {
"channels": self.channels,
"delta_coeff": self.delta_coeff,
"epsilon": self.epsilon,
}
| 6,767 | 30.774648 | 121 | py |
spektral | spektral-master/spektral/layers/convolutional/gcs_conv.py | from tensorflow.keras import backend as K
from spektral.layers import ops
from spektral.layers.convolutional.conv import Conv
from spektral.utils import normalized_adjacency
class GCSConv(Conv):
r"""
A `GraphConv` layer with a trainable skip connection.
**Mode**: single, disjoint, mixed, batch.
This layer computes:
$$
\Z' = \D^{-1/2} \A \D^{-1/2} \X \W_1 + \X \W_2 + \b
$$
where \( \A \) does not have self-loops.
**Input**
- Node features of shape `([batch], n_nodes, n_node_features)`;
- Normalized adjacency matrix of shape `([batch], n_nodes, n_nodes)`; can be computed
with `spektral.utils.convolution.normalized_adjacency`.
**Output**
- Node features with the same shape as the input, but with the last
dimension changed to `channels`.
**Arguments**
- `channels`: number of output channels;
- `activation`: activation function;
- `use_bias`: bool, add a bias vector to the output;
- `kernel_initializer`: initializer for the weights;
- `bias_initializer`: initializer for the bias vector;
- `kernel_regularizer`: regularization applied to the weights;
- `bias_regularizer`: regularization applied to the bias vector;
- `activity_regularizer`: regularization applied to the output;
- `kernel_constraint`: constraint applied to the weights;
- `bias_constraint`: constraint applied to the bias vector.
"""
def __init__(
self,
channels,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
self.channels = channels
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[0][-1]
self.kernel_1 = self.add_weight(
shape=(input_dim, self.channels),
initializer=self.kernel_initializer,
name="kernel_1",
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
self.kernel_2 = self.add_weight(
shape=(input_dim, self.channels),
initializer=self.kernel_initializer,
name="kernel_2",
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.channels,),
initializer=self.bias_initializer,
name="bias",
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
self.built = True
def call(self, inputs, mask=None):
x, a = inputs
output = K.dot(x, self.kernel_1)
output = ops.modal_dot(a, output)
skip = K.dot(x, self.kernel_2)
output += skip
if self.use_bias:
output = K.bias_add(output, self.bias)
if mask is not None:
output *= mask[0]
output = self.activation(output)
return output
@property
def config(self):
return {"channels": self.channels}
@staticmethod
def preprocess(a):
return normalized_adjacency(a)
| 3,852 | 29.824 | 89 | py |
spektral | spektral-master/spektral/layers/convolutional/crystal_conv.py | from tensorflow.keras import backend as K
from tensorflow.keras.layers import Dense
from spektral.layers.convolutional.message_passing import MessagePassing
class CrystalConv(MessagePassing):
r"""
A crystal graph convolutional layer from the paper
> [Crystal Graph Convolutional Neural Networks for an Accurate and
Interpretable Prediction of Material Properties](https://arxiv.org/abs/1710.10324)<br>
> Tian Xie and Jeffrey C. Grossman
**Mode**: single, disjoint, mixed.
**This layer expects a sparse adjacency matrix.**
This layer computes:
$$
\x_i' = \x_i + \sum\limits_{j \in \mathcal{N}(i)} \sigma \left( \z_{ij}
\W^{(f)} + \b^{(f)} \right) \odot \g \left( \z_{ij} \W^{(s)} + \b^{(s)}
\right)
$$
where \(\z_{ij} = \x_i \| \x_j \| \e_{ji} \), \(\sigma\) is a sigmoid
activation, and \(g\) is the activation function (defined by the `activation`
argument).
**Input**
- Node features of shape `(n_nodes, n_node_features)`;
- Binary adjacency matrix of shape `(n_nodes, n_nodes)`.
- Edge features of shape `(num_edges, n_edge_features)`.
**Output**
- Node features with the same shape of the input.
**Arguments**
- `activation`: activation function;
- `use_bias`: bool, add a bias vector to the output;
- `kernel_initializer`: initializer for the weights;
- `bias_initializer`: initializer for the bias vector;
- `kernel_regularizer`: regularization applied to the weights;
- `bias_regularizer`: regularization applied to the bias vector;
- `activity_regularizer`: regularization applied to the output;
- `kernel_constraint`: constraint applied to the weights;
- `bias_constraint`: constraint applied to the bias vector.
"""
def __init__(
self,
aggregate="sum",
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
aggregate=aggregate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
def build(self, input_shape):
assert len(input_shape) >= 2
layer_kwargs = dict(
kernel_initializer=self.kernel_initializer,
bias_initializer=self.bias_initializer,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
kernel_constraint=self.kernel_constraint,
bias_constraint=self.bias_constraint,
dtype=self.dtype,
)
channels = input_shape[0][-1]
self.dense_f = Dense(channels, activation="sigmoid", **layer_kwargs)
self.dense_s = Dense(channels, activation=self.activation, **layer_kwargs)
self.built = True
def message(self, x, e=None):
x_i = self.get_targets(x)
x_j = self.get_sources(x)
to_concat = [x_i, x_j]
if e is not None:
to_concat += [e]
z = K.concatenate(to_concat, axis=-1)
output = self.dense_s(z) * self.dense_f(z)
return output
def update(self, embeddings, x=None):
return x + embeddings
| 3,725 | 32.567568 | 90 | py |
spektral | spektral-master/spektral/layers/convolutional/censnet_conv.py | import tensorflow as tf
from spektral.layers import ops
from spektral.layers.convolutional.conv import Conv
from spektral.utils.convolution import gcn_filter, incidence_matrix, line_graph
class CensNetConv(Conv):
r"""
A CensNet convolutional layer from the paper
> [Co-embedding of Nodes and Edges with Graph Neural Networks](https://arxiv.org/abs/2010.13242)<br>
> Xiaodong Jiang et al.
This implements both the node and edge propagation rules as a single layer.
**Mode**: single, disjoint, batch.
**Input**
- Node features of shape `([batch], n_nodes, n_node_features)`;
- A tuple containing:
- Modified Laplacian of shape `([batch], n_nodes, n_nodes)`; can be
computed with `spektral.utils.convolution.gcn_filter`.
- Modified line graph Laplacian of shape `([batch], n_edges, n_edges)`;
can be computed with `spektral.utils.convolution.line_graph` and
`spektral.utils.convolution.gcn_filter`.
- Incidence matrix of shape `([batch], n_nodes, n_edges)`; can be
computed with `spektral.utils.convolution.incidence_matrix`.
- Edge features of shape `([batch], n_edges, n_edge_features)`;
**Output**
- Node features with the same shape as the input, but with the last
dimension changed to `node_channels`.
- Edge features with the same shape as the input, but with the last
dimension changed to `edge_channels`.
**Arguments**
- `node_channels`: number of output channels for the node features;
- `edge_channels`: number of output channels for the edge features;
- `activation`: activation function;
- `use_bias`: bool, add a bias vector to the output;
- `kernel_initializer`: initializer for the weights;
- `node_initializer`: initializer for the node feature weights (P_n);
- `edge_initializer`: initializer for the edge feature weights (P_e);
- `bias_initializer`: initializer for the bias vector;
- `kernel_regularizer`: regularization applied to the weights;
- `edge_regularizer`: regularization applied to the edge feature weights;
- `bias_regularizer`: regularization applied to the bias vector;
- `activity_regularizer`: regularization applied to the output;
- `kernel_constraint`: constraint applied to the weights;
- `edge_constraint`: constraint applied to the edge feature weights;
- `bias_constraint`: constraint applied to the bias vector.
"""
def __init__(
self,
node_channels,
edge_channels,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
node_initializer="glorot_uniform",
edge_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
node_regularizer=None,
edge_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
node_constraint=None,
edge_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
self.node_channels = node_channels
self.edge_channels = edge_channels
self.__node_initializer = tf.keras.initializers.get(node_initializer)
self.__node_regularizer = tf.keras.regularizers.get(node_regularizer)
self.__node_constraint = tf.keras.constraints.get(node_constraint)
self.__edge_initializer = tf.keras.initializers.get(edge_initializer)
self.__edge_regularizer = tf.keras.regularizers.get(edge_regularizer)
self.__edge_constraint = tf.keras.constraints.get(edge_constraint)
def build(self, input_shape):
assert len(input_shape) >= 2
node_features_shape, _, edge_features_shape = input_shape
num_input_node_features = node_features_shape[-1]
num_input_edge_features = edge_features_shape[-1]
self.node_kernel = self.add_weight(
shape=(num_input_node_features, self.node_channels),
initializer=self.kernel_initializer,
name="node_kernel",
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
self.edge_kernel = self.add_weight(
shape=(num_input_edge_features, self.edge_channels),
initializer=self.kernel_initializer,
name="edge_kernel",
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
# Add separate weight vectors for the edge features for the node and
# edge feature vectors. (These are P_n and P_e in the paper.)
self.node_weights = self.add_weight(
shape=(num_input_node_features, 1),
initializer=self.__node_initializer,
name="node_weights",
regularizer=self.__node_regularizer,
constraint=self.__node_constraint,
)
self.edge_weights = self.add_weight(
shape=(num_input_edge_features, 1),
initializer=self.__edge_initializer,
name="edge_weights",
regularizer=self.__edge_regularizer,
constraint=self.__edge_constraint,
)
if self.use_bias:
self.node_bias = self.add_weight(
shape=(self.node_channels,),
initializer=self.bias_initializer,
name="node_bias",
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
self.edge_bias = self.add_weight(
shape=(self.edge_channels,),
initializer=self.bias_initializer,
name="edge_bias",
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
self.built = True
def _bias_and_activation(self, pre_activation, *, bias_weights, mask=None):
"""
Applies the bias, activation, and mask, if necessary.
:param pre_activation: The layer output, pre-activation.
:param bias_weights: The weights to use for the bias.
:param mask: The mask to use.
:return: The biased, activated, and masked output.
"""
if self.use_bias:
# Apply the bias if needed.
pre_activation = tf.nn.bias_add(pre_activation, bias_weights)
if mask is not None:
pre_activation *= mask[0]
return self.activation(pre_activation)
def _propagate_nodes(self, inputs, mask=None):
"""
Performs the node feature propagation step.
:param inputs: All the inputs to the layer.
:param mask: The mask to use.
:return: The propagated node features.
"""
node_features, (laplacian, _, incidence), edge_features = inputs
weighted_edge_features = tf.matmul(edge_features, self.edge_weights)
# Remove the extra 1-dimension.
weighted_edge_features = tf.squeeze(weighted_edge_features, axis=[-1])
weighted_edge_features = tf.linalg.diag(weighted_edge_features)
weighted_edge_features = ops.modal_dot(incidence, weighted_edge_features)
weighted_edge_features = ops.modal_dot(
weighted_edge_features, incidence, transpose_b=True
)
node_adjacency = weighted_edge_features * laplacian
output = ops.modal_dot(node_adjacency, node_features)
output = ops.modal_dot(output, self.node_kernel)
return self._bias_and_activation(output, bias_weights=self.node_bias, mask=mask)
def _propagate_edges(self, inputs, mask=None):
"""
Performs the edge feature propagation step.
:param inputs: All the inputs to the layer.
:param mask: The mask to use.
:return: The propagated edge features.
"""
node_features, (_, laplacian, incidence), edge_features = inputs
weighted_node_features = tf.matmul(node_features, self.node_weights)
# Remove the extra 1-dimension.
weighted_node_features = tf.squeeze(weighted_node_features, axis=[-1])
weighted_node_features = tf.linalg.diag(weighted_node_features)
weighted_node_features = ops.modal_dot(
incidence, weighted_node_features, transpose_a=True
)
weighted_node_features = ops.modal_dot(weighted_node_features, incidence)
edge_adjacency = weighted_node_features * laplacian
output = ops.modal_dot(edge_adjacency, edge_features)
output = ops.modal_dot(output, self.edge_kernel)
return self._bias_and_activation(output, bias_weights=self.edge_bias, mask=mask)
def call(self, inputs, mask=None):
node_features = self._propagate_nodes(inputs, mask=mask)
edge_features = self._propagate_edges(inputs, mask=mask)
return node_features, edge_features
@property
def config(self):
# Get configuration for sub-components.
node_reg = tf.keras.regularizers.serialize(self.__node_regularizer)
node_init = tf.keras.initializers.serialize(self.__node_initializer)
node_constraint = tf.keras.constraints.serialize(self.__node_constraint)
edge_reg = tf.keras.regularizers.serialize(self.__edge_regularizer)
edge_init = tf.keras.initializers.serialize(self.__edge_initializer)
edge_constraint = tf.keras.constraints.serialize(self.__edge_constraint)
return dict(
node_channels=self.node_channels,
edge_channels=self.edge_channels,
node_regularizer=node_reg,
node_initializer=node_init,
node_constraint=node_constraint,
edge_regularizer=edge_reg,
edge_initializer=edge_init,
edge_constraint=edge_constraint,
)
@staticmethod
def preprocess(adjacency):
laplacian = gcn_filter(adjacency)
incidence = incidence_matrix(adjacency)
edge_laplacian = gcn_filter(line_graph(incidence).numpy())
return laplacian, edge_laplacian, incidence
| 10,489 | 39.346154 | 104 | py |
spektral | spektral-master/spektral/layers/convolutional/conv.py | import warnings
from functools import wraps
import tensorflow as tf
from tensorflow.keras.layers import Layer
from spektral.utils.keras import (
deserialize_kwarg,
is_keras_kwarg,
is_layer_kwarg,
serialize_kwarg,
)
class Conv(Layer):
r"""
A general class for convolutional layers.
You can extend this class to create custom implementations of GNN layers
that use standard matrix multiplication instead of the gather-scatter
approach of MessagePassing.
This is useful if you want to create layers that support dense inputs,
batch and mixed modes, or other non-standard processing. No checks are done
on the inputs, to allow for maximum flexibility.
Any extension of this class must implement the `call(self, inputs)` and
`config(self)` methods.
**Arguments**:
- ``**kwargs`: additional keyword arguments specific to Keras' Layers, like
regularizers, initializers, constraints, etc.
"""
def __init__(self, **kwargs):
super().__init__(**{k: v for k, v in kwargs.items() if is_keras_kwarg(k)})
self.supports_masking = True
self.kwargs_keys = []
for key in kwargs:
if is_layer_kwarg(key):
attr = kwargs[key]
attr = deserialize_kwarg(key, attr)
self.kwargs_keys.append(key)
setattr(self, key, attr)
self.call = check_dtypes_decorator(self.call)
def build(self, input_shape):
self.built = True
def call(self, inputs):
raise NotImplementedError
def get_config(self):
base_config = super().get_config()
keras_config = {}
for key in self.kwargs_keys:
keras_config[key] = serialize_kwarg(key, getattr(self, key))
return {**base_config, **keras_config, **self.config}
@property
def config(self):
return {}
@staticmethod
def preprocess(a):
return a
def check_dtypes_decorator(call):
@wraps(call)
def _inner_check_dtypes(inputs, **kwargs):
inputs = check_dtypes(inputs)
return call(inputs, **kwargs)
return _inner_check_dtypes
def check_dtypes(inputs):
for value in inputs:
if not hasattr(value, "dtype"):
# It's not a valid tensor.
return inputs
if len(inputs) == 2:
x, a = inputs
e = None
elif len(inputs) == 3:
x, a, e = inputs
else:
return inputs
if a.dtype in (tf.int32, tf.int64) and x.dtype in (
tf.float16,
tf.float32,
tf.float64,
):
warnings.warn(
f"The adjacency matrix of dtype {a.dtype} is incompatible with the dtype "
f"of the node features {x.dtype} and has been automatically cast to "
f"{x.dtype}."
)
a = tf.cast(a, x.dtype)
output = [_ for _ in [x, a, e] if _ is not None]
return output
| 2,918 | 26.280374 | 86 | py |
spektral | spektral-master/spektral/layers/convolutional/tag_conv.py | from tensorflow.keras import backend as K
from tensorflow.keras.layers import Dense
from spektral.layers.convolutional.message_passing import MessagePassing
from spektral.utils import normalized_adjacency
class TAGConv(MessagePassing):
r"""
A Topology Adaptive Graph Convolutional layer (TAG) from the paper
> [Topology Adaptive Graph Convolutional Networks](https://arxiv.org/abs/1710.10370)<br>
> Jian Du et al.
**Mode**: single, disjoint, mixed.
**This layer expects a sparse adjacency matrix.**
This layer computes:
$$
\Z = \sum\limits_{k=0}^{K} \D^{-1/2}\A^k\D^{-1/2}\X\W^{(k)}
$$
**Input**
- Node features of shape `(n_nodes, n_node_features)`;
- Binary adjacency matrix of shape `(n_nodes, n_nodes)`.
**Output**
- Node features with the same shape of the input, but the last dimension
changed to `channels`.
**Arguments**
- `channels`: integer, number of output channels;
- `K`: the order of the layer (i.e., the layer will consider a K-hop
neighbourhood for each node);
- `activation`: activation function;
- `use_bias`: bool, add a bias vector to the output;
- `kernel_initializer`: initializer for the weights;
- `bias_initializer`: initializer for the bias vector;
- `kernel_regularizer`: regularization applied to the weights;
- `bias_regularizer`: regularization applied to the bias vector;
- `activity_regularizer`: regularization applied to the output;
- `kernel_constraint`: constraint applied to the weights;
- `bias_constraint`: constraint applied to the bias vector.
"""
def __init__(
self,
channels,
K=3,
aggregate="sum",
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
aggregate=aggregate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
self.channels = channels
self.K = K
self.linear = Dense(
channels,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)
def build(self, input_shape):
assert len(input_shape) >= 2
self.built = True
def call(self, inputs, **kwargs):
x, a, _ = self.get_inputs(inputs)
edge_weight = a.values
output = [x]
for _ in range(self.K):
output.append(self.propagate(x, a, edge_weight=edge_weight))
output = K.concatenate(output)
return self.linear(output)
def message(self, x, edge_weight=None):
x_j = self.get_sources(x)
return edge_weight[:, None] * x_j
@property
def config(self):
return {
"channels": self.channels,
}
@staticmethod
def preprocess(a):
return normalized_adjacency(a)
| 3,772 | 29.92623 | 92 | py |
spektral | spektral-master/spektral/layers/convolutional/message_passing.py | import inspect
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Layer
from spektral.layers.ops.scatter import deserialize_scatter, serialize_scatter
from spektral.utils.keras import (
deserialize_kwarg,
is_keras_kwarg,
is_layer_kwarg,
serialize_kwarg,
)
class MessagePassing(Layer):
r"""
A general class for message passing networks from the paper
> [Neural Message Passing for Quantum Chemistry](https://arxiv.org/abs/1704.01212)<br>
> Justin Gilmer et al.
**Mode**: single, disjoint.
**This layer and all of its extensions expect a sparse adjacency matrix.**
This layer computes:
$$
\x_i' = \gamma \left( \x_i, \square_{j \in \mathcal{N}(i)} \,
\phi \left(\x_i, \x_j, \e_{j \rightarrow i} \right) \right),
$$
where \( \gamma \) is a differentiable update function, \( \phi \) is a
differentiable message function, \( \square \) is a permutation-invariant
function to aggregate the messages (like the sum or the average), and
\(\E_{ij}\) is the edge attribute of edge j-i.
By extending this class, it is possible to create any message-passing layer
in single/disjoint mode.
**API**
```python
propagate(x, a, e=None, **kwargs)
```
Propagates the messages and computes embeddings for each node in the graph. <br>
Any `kwargs` will be forwarded as keyword arguments to `message()`,
`aggregate()` and `update()`.
```python
message(x, **kwargs)
```
Computes messages, equivalent to \(\phi\) in the definition. <br>
Any extra keyword argument of this function will be populated by
`propagate()` if a matching keyword is found. <br>
The `get_sources` and `get_targets` built-in methods can be used to automatically
retrieve the node attributes of nodes that are sending (sources) or receiving
(targets) a message.
If you need direct access to the edge indices, you can use the `index_sources` and
`index_targets` attributes.
```python
aggregate(messages, **kwargs)
```
Aggregates the messages, equivalent to \(\square\) in the definition. <br>
The behaviour of this function can also be controlled using the `aggregate`
keyword in the constructor of the layer (supported aggregations: sum, mean,
max, min, prod). <br>
Any extra keyword argument of this function will be populated by
`propagate()` if a matching keyword is found.
```python
update(embeddings, **kwargs)
```
Updates the aggregated messages to obtain the final node embeddings,
equivalent to \(\gamma\) in the definition. <br>
Any extra keyword argument of this function will be populated by
`propagate()` if a matching keyword is found.
**Arguments**:
- `aggregate`: string or callable, an aggregation function. This flag can be
used to control the behaviour of `aggregate()` wihtout re-implementing it.
Supported aggregations: 'sum', 'mean', 'max', 'min', 'prod'.
If callable, the function must have the signature `foo(updates, indices, n_nodes)`
and return a rank 2 tensor with shape `(n_nodes, ...)`.
- `kwargs`: additional keyword arguments specific to Keras' Layers, like
regularizers, initializers, constraints, etc.
"""
def __init__(self, aggregate="sum", **kwargs):
super().__init__(**{k: v for k, v in kwargs.items() if is_keras_kwarg(k)})
self.kwargs_keys = []
for key in kwargs:
if is_layer_kwarg(key):
attr = kwargs[key]
attr = deserialize_kwarg(key, attr)
self.kwargs_keys.append(key)
setattr(self, key, attr)
self.msg_signature = inspect.signature(self.message).parameters
self.agg_signature = inspect.signature(self.aggregate).parameters
self.upd_signature = inspect.signature(self.update).parameters
self.agg = deserialize_scatter(aggregate)
def call(self, inputs, **kwargs):
x, a, e = self.get_inputs(inputs)
return self.propagate(x, a, e)
def build(self, input_shape):
self.built = True
def propagate(self, x, a, e=None, **kwargs):
self.n_nodes = tf.shape(x)[-2]
self.index_targets = a.indices[:, 1] # Nodes receiving the message
self.index_sources = a.indices[:, 0] # Nodes sending the message (ie neighbors)
# Message
msg_kwargs = self.get_kwargs(x, a, e, self.msg_signature, kwargs)
messages = self.message(x, **msg_kwargs)
# Aggregate
agg_kwargs = self.get_kwargs(x, a, e, self.agg_signature, kwargs)
embeddings = self.aggregate(messages, **agg_kwargs)
# Update
upd_kwargs = self.get_kwargs(x, a, e, self.upd_signature, kwargs)
output = self.update(embeddings, **upd_kwargs)
return output
def message(self, x, **kwargs):
return self.get_sources(x)
def aggregate(self, messages, **kwargs):
return self.agg(messages, self.index_targets, self.n_nodes)
def update(self, embeddings, **kwargs):
return embeddings
def get_targets(self, x):
return tf.gather(x, self.index_targets, axis=-2)
def get_sources(self, x):
return tf.gather(x, self.index_sources, axis=-2)
def get_kwargs(self, x, a, e, signature, kwargs):
output = {}
for k in signature.keys():
if signature[k].default is inspect.Parameter.empty or k == "kwargs":
pass
elif k == "x":
output[k] = x
elif k == "a":
output[k] = a
elif k == "e":
output[k] = e
elif k in kwargs:
output[k] = kwargs[k]
else:
raise ValueError("Missing key {} for signature {}".format(k, signature))
return output
@staticmethod
def get_inputs(inputs):
"""
Parses the inputs lists and returns a tuple (x, a, e) with node features,
adjacency matrix and edge features. In the inputs only contain x and a, then
e=None is returned.
"""
if len(inputs) == 3:
x, a, e = inputs
assert K.ndim(e) in (2, 3), "E must have rank 2 or 3"
elif len(inputs) == 2:
x, a = inputs
e = None
else:
raise ValueError(
"Expected 2 or 3 inputs tensors (X, A, E), got {}.".format(len(inputs))
)
assert K.ndim(x) in (2, 3), "X must have rank 2 or 3"
assert K.is_sparse(a), "A must be a SparseTensor"
assert K.ndim(a) == 2, "A must have rank 2"
return x, a, e
def get_config(self):
mp_config = {"aggregate": serialize_scatter(self.agg)}
keras_config = {}
for key in self.kwargs_keys:
keras_config[key] = serialize_kwarg(key, getattr(self, key))
base_config = super().get_config()
return {**base_config, **keras_config, **mp_config, **self.config}
@property
def config(self):
return {}
@staticmethod
def preprocess(a):
return a
| 7,175 | 34.176471 | 90 | py |
spektral | spektral-master/spektral/layers/convolutional/gat_conv.py | import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras import constraints, initializers, regularizers
from tensorflow.keras.layers import Dropout
from spektral.layers import ops
from spektral.layers.convolutional.conv import Conv
from spektral.layers.ops import modes
class GATConv(Conv):
r"""
A Graph Attention layer (GAT) from the paper
> [Graph Attention Networks](https://arxiv.org/abs/1710.10903)<br>
> Petar Veličković et al.
**Mode**: single, disjoint, mixed, batch.
**This layer expects dense inputs when working in batch mode.**
This layer computes a convolution similar to `layers.GraphConv`, but
uses the attention mechanism to weight the adjacency matrix instead of
using the normalized Laplacian:
$$
\X' = \mathbf{\alpha}\X\W + \b
$$
where
$$
\mathbf{\alpha}_{ij} =\frac{ \exp\left(\mathrm{LeakyReLU}\left(
\a^{\top} [(\X\W)_i \, \| \, (\X\W)_j]\right)\right)}{\sum\limits_{k
\in \mathcal{N}(i) \cup \{ i \}} \exp\left(\mathrm{LeakyReLU}\left(
\a^{\top} [(\X\W)_i \, \| \, (\X\W)_k]\right)\right)}
$$
where \(\a \in \mathbb{R}^{2F'}\) is a trainable attention kernel.
Dropout is also applied to \(\alpha\) before computing \(\Z\).
Parallel attention heads are computed in parallel and their results are
aggregated by concatenation or average.
**Input**
- Node features of shape `([batch], n_nodes, n_node_features)`;
- Binary adjacency matrix of shape `([batch], n_nodes, n_nodes)`;
**Output**
- Node features with the same shape as the input, but with the last
dimension changed to `channels`;
- if `return_attn_coef=True`, a list with the attention coefficients for
each attention head. Each attention coefficient matrix has shape
`([batch], n_nodes, n_nodes)`.
**Arguments**
- `channels`: number of output channels;
- `attn_heads`: number of attention heads to use;
- `concat_heads`: bool, whether to concatenate the output of the attention
heads instead of averaging;
- `dropout_rate`: internal dropout rate for attention coefficients;
- `return_attn_coef`: if True, return the attention coefficients for
the given input (one n_nodes x n_nodes matrix for each head).
- `add_self_loops`: if True, add self loops to the adjacency matrix.
- `activation`: activation function;
- `use_bias`: bool, add a bias vector to the output;
- `kernel_initializer`: initializer for the weights;
- `attn_kernel_initializer`: initializer for the attention weights;
- `bias_initializer`: initializer for the bias vector;
- `kernel_regularizer`: regularization applied to the weights;
- `attn_kernel_regularizer`: regularization applied to the attention kernels;
- `bias_regularizer`: regularization applied to the bias vector;
- `activity_regularizer`: regularization applied to the output;
- `kernel_constraint`: constraint applied to the weights;
- `attn_kernel_constraint`: constraint applied to the attention kernels;
- `bias_constraint`: constraint applied to the bias vector.
"""
def __init__(
self,
channels,
attn_heads=1,
concat_heads=True,
dropout_rate=0.5,
return_attn_coef=False,
add_self_loops=True,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
attn_kernel_initializer="glorot_uniform",
kernel_regularizer=None,
bias_regularizer=None,
attn_kernel_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
attn_kernel_constraint=None,
**kwargs,
):
super().__init__(
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
self.channels = channels
self.attn_heads = attn_heads
self.concat_heads = concat_heads
self.dropout_rate = dropout_rate
self.return_attn_coef = return_attn_coef
self.add_self_loops = add_self_loops
self.attn_kernel_initializer = initializers.get(attn_kernel_initializer)
self.attn_kernel_regularizer = regularizers.get(attn_kernel_regularizer)
self.attn_kernel_constraint = constraints.get(attn_kernel_constraint)
if concat_heads:
self.output_dim = self.channels * self.attn_heads
else:
self.output_dim = self.channels
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[0][-1]
self.kernel = self.add_weight(
name="kernel",
shape=[input_dim, self.attn_heads, self.channels],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
self.attn_kernel_self = self.add_weight(
name="attn_kernel_self",
shape=[self.channels, self.attn_heads, 1],
initializer=self.attn_kernel_initializer,
regularizer=self.attn_kernel_regularizer,
constraint=self.attn_kernel_constraint,
)
self.attn_kernel_neighs = self.add_weight(
name="attn_kernel_neigh",
shape=[self.channels, self.attn_heads, 1],
initializer=self.attn_kernel_initializer,
regularizer=self.attn_kernel_regularizer,
constraint=self.attn_kernel_constraint,
)
if self.use_bias:
self.bias = self.add_weight(
shape=[self.output_dim],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
name="bias",
)
self.dropout = Dropout(self.dropout_rate, dtype=self.dtype)
self.built = True
def call(self, inputs, mask=None):
x, a = inputs
mode = ops.autodetect_mode(x, a)
if mode == modes.SINGLE and K.is_sparse(a):
output, attn_coef = self._call_single(x, a)
else:
if K.is_sparse(a):
a = tf.sparse.to_dense(a)
output, attn_coef = self._call_dense(x, a)
if self.concat_heads:
shape = tf.concat(
(tf.shape(output)[:-2], [self.attn_heads * self.channels]), axis=0
)
output = tf.reshape(output, shape)
else:
output = tf.reduce_mean(output, axis=-2)
if self.use_bias:
output += self.bias
if mask is not None:
output *= mask[0]
output = self.activation(output)
if self.return_attn_coef:
return output, attn_coef
else:
return output
def _call_single(self, x, a):
# Reshape kernels for efficient message-passing
kernel = tf.reshape(self.kernel, (-1, self.attn_heads * self.channels))
attn_kernel_self = ops.transpose(self.attn_kernel_self, (2, 1, 0))
attn_kernel_neighs = ops.transpose(self.attn_kernel_neighs, (2, 1, 0))
# Prepare message-passing
indices = a.indices
N = tf.shape(x, out_type=indices.dtype)[-2]
if self.add_self_loops:
indices = ops.add_self_loops_indices(indices, N)
targets, sources = indices[:, 1], indices[:, 0]
# Update node features
x = K.dot(x, kernel)
x = tf.reshape(x, (-1, self.attn_heads, self.channels))
# Compute attention
attn_for_self = tf.reduce_sum(x * attn_kernel_self, -1)
attn_for_self = tf.gather(attn_for_self, targets)
attn_for_neighs = tf.reduce_sum(x * attn_kernel_neighs, -1)
attn_for_neighs = tf.gather(attn_for_neighs, sources)
attn_coef = attn_for_self + attn_for_neighs
attn_coef = tf.nn.leaky_relu(attn_coef, alpha=0.2)
attn_coef = ops.unsorted_segment_softmax(attn_coef, targets, N)
attn_coef = self.dropout(attn_coef)
attn_coef = attn_coef[..., None]
# Update representation
output = attn_coef * tf.gather(x, sources)
output = tf.math.unsorted_segment_sum(output, targets, N)
return output, attn_coef
def _call_dense(self, x, a):
shape = tf.shape(a)[:-1]
if self.add_self_loops:
a = tf.linalg.set_diag(a, tf.ones(shape, a.dtype))
x = tf.einsum("...NI , IHO -> ...NHO", x, self.kernel)
attn_for_self = tf.einsum("...NHI , IHO -> ...NHO", x, self.attn_kernel_self)
attn_for_neighs = tf.einsum(
"...NHI , IHO -> ...NHO", x, self.attn_kernel_neighs
)
attn_for_neighs = tf.einsum("...ABC -> ...CBA", attn_for_neighs)
attn_coef = attn_for_self + attn_for_neighs
attn_coef = tf.nn.leaky_relu(attn_coef, alpha=0.2)
mask = tf.where(a == 0.0, -10e9, 0.0)
mask = tf.cast(mask, dtype=attn_coef.dtype)
attn_coef += mask[..., None, :]
attn_coef = tf.nn.softmax(attn_coef, axis=-1)
attn_coef_drop = self.dropout(attn_coef)
output = tf.einsum("...NHM , ...MHI -> ...NHI", attn_coef_drop, x)
return output, attn_coef
@property
def config(self):
return {
"channels": self.channels,
"attn_heads": self.attn_heads,
"concat_heads": self.concat_heads,
"dropout_rate": self.dropout_rate,
"return_attn_coef": self.return_attn_coef,
"attn_kernel_initializer": initializers.serialize(
self.attn_kernel_initializer
),
"attn_kernel_regularizer": regularizers.serialize(
self.attn_kernel_regularizer
),
"attn_kernel_constraint": constraints.serialize(
self.attn_kernel_constraint
),
}
| 10,279 | 36.933579 | 85 | py |
spektral | spektral-master/spektral/layers/ops/modes.py | import tensorflow as tf
from tensorflow.keras import backend as K
SINGLE = 1 # Single mode rank(x) = 2, rank(a) = 2
DISJOINT = SINGLE # Disjoint mode rank(x) = 2, rank(a) = 2
BATCH = 3 # Batch mode rank(x) = 3, rank(a) = 3
MIXED = 4 # Mixed mode rank(x) = 3, rank(a) = 2
def disjoint_signal_to_batch(X, I):
"""
Converts a disjoint graph signal to batch node by zero-padding.
:param X: Tensor, node features of shape (nodes, features).
:param I: Tensor, graph IDs of shape `(n_nodes, )`;
:return batch: Tensor, batched node features of shape (batch, N_max, n_node_features)
"""
I = tf.cast(I, tf.int32)
num_nodes = tf.math.segment_sum(tf.ones_like(I), I)
start_index = tf.cumsum(num_nodes, exclusive=True)
n_graphs = tf.shape(num_nodes)[0]
max_n_nodes = tf.reduce_max(num_nodes)
batch_n_nodes = tf.shape(I)[0]
feature_dim = tf.shape(X)[-1]
index = tf.range(batch_n_nodes)
index = (index - tf.gather(start_index, I)) + (I * max_n_nodes)
dense = tf.zeros((n_graphs * max_n_nodes, feature_dim), dtype=X.dtype)
dense = tf.tensor_scatter_nd_update(dense, index[..., None], X)
batch = tf.reshape(dense, (n_graphs, max_n_nodes, feature_dim))
return batch
def disjoint_adjacency_to_batch(A, I):
"""
Converts a disjoint adjacency matrix to batch node by zero-padding.
:param A: Tensor, binary adjacency matrix of shape `(n_nodes, n_nodes)`;
:param I: Tensor, graph IDs of shape `(n_nodes, )`;
:return: Tensor, batched adjacency matrix of shape `(batch, N_max, N_max)`;
"""
I = tf.cast(I, tf.int64)
indices = A.indices
values = A.values
i_nodes, j_nodes = indices[:, 0], indices[:, 1]
graph_sizes = tf.math.segment_sum(tf.ones_like(I), I)
max_n_nodes = tf.reduce_max(graph_sizes)
n_graphs = tf.shape(graph_sizes)[0]
offset = tf.gather(I, i_nodes)
offset = tf.gather(tf.cumsum(graph_sizes, exclusive=True), offset)
relative_j_nodes = j_nodes - offset
relative_i_nodes = i_nodes - offset
spaced_i_nodes = tf.gather(I, i_nodes) * max_n_nodes + relative_i_nodes
new_indices = tf.transpose(tf.stack([spaced_i_nodes, relative_j_nodes]))
n_graphs = tf.cast(n_graphs, new_indices.dtype)
max_n_nodes = tf.cast(max_n_nodes, new_indices.dtype)
dense_adjacency = tf.scatter_nd(
new_indices, values, (n_graphs * max_n_nodes, max_n_nodes)
)
batch = tf.reshape(dense_adjacency, (n_graphs, max_n_nodes, max_n_nodes))
return batch
def autodetect_mode(x, a):
"""
Returns a code that identifies the data mode from the given node features
and adjacency matrix(s).
The output of this function can be used as follows:
```py
from spektral.layers.ops import modes
mode = modes.autodetect_mode(x, a)
if mode == modes.SINGLE:
print('Single!')
elif mode == modes.BATCH:
print('Batch!')
elif mode == modes.MIXED:
print('Mixed!')
```
:param x: Tensor or SparseTensor representing the node features
:param a: Tensor or SparseTensor representing the adjacency matrix(s)
:return: mode of operation as an integer code.
"""
x_ndim = K.ndim(x)
a_ndim = K.ndim(a)
if x_ndim == 2 and a_ndim == 2:
return SINGLE
elif x_ndim == 3 and a_ndim == 3:
return BATCH
elif x_ndim == 3 and a_ndim == 2:
return MIXED
else:
raise ValueError(
"Unknown mode for inputs x, a with ranks {} and {}"
"respectively.".format(x_ndim, a_ndim)
)
| 3,567 | 32.345794 | 89 | py |
spektral | spektral-master/spektral/layers/ops/graph.py | import tensorflow as tf
from tensorflow.keras import backend as K
from . import ops
def normalize_A(A):
"""
Computes symmetric normalization of A, dealing with sparse A and batch mode
automatically.
:param A: Tensor or SparseTensor with rank k = {2, 3}.
:return: Tensor or SparseTensor of rank k.
"""
D = degrees(A)
D = tf.sqrt(D)[:, None] + K.epsilon()
perm = (0, 2, 1) if K.ndim(A) == 3 else (1, 0)
output = (A / D) / ops.transpose(D, perm=perm)
return output
def degrees(A):
"""
Computes the degrees of each node in A, dealing with sparse A and batch mode
automatically.
:param A: Tensor or SparseTensor with rank k = {2, 3}.
:return: Tensor or SparseTensor of rank k - 1.
"""
if K.is_sparse(A):
D = tf.sparse.reduce_sum(A, axis=-1)
else:
D = tf.reduce_sum(A, axis=-1)
return D
def degree_matrix(A, return_sparse_batch=False):
"""
Computes the degree matrix of A, deals with sparse A and batch mode
automatically.
:param A: Tensor or SparseTensor with rank k = {2, 3}.
:param return_sparse_batch: if operating in batch mode, return a
SparseTensor. Note that the sparse degree Tensor returned by this function
cannot be used for sparse matrix multiplication afterwards.
:return: SparseTensor of rank k.
"""
D = degrees(A)
batch_mode = K.ndim(D) == 2
N = tf.shape(D)[-1]
batch_size = tf.shape(D)[0] if batch_mode else 1
inner_index = tf.tile(tf.stack([tf.range(N)] * 2, axis=1), (batch_size, 1))
if batch_mode:
if return_sparse_batch:
outer_index = ops.repeat(
tf.range(batch_size), tf.ones(batch_size) * tf.cast(N, tf.float32)
)
indices = tf.concat([outer_index[:, None], inner_index], 1)
dense_shape = (batch_size, N, N)
else:
return tf.linalg.diag(D)
else:
indices = inner_index
dense_shape = (N, N)
indices = tf.cast(indices, tf.int64)
values = tf.reshape(D, (-1,))
return tf.SparseTensor(indices, values, dense_shape)
| 2,116 | 29.242857 | 82 | py |
spektral | spektral-master/spektral/layers/ops/matmul.py | import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.python.ops.linalg.sparse import sparse as tfsp
from . import ops
def dot(a, b):
"""
Computes a @ b, for a, b of the same rank (both 2 or both 3).
If the rank is 2, then the innermost dimension of `a` must match the
outermost dimension of `b`.
If the rank is 3, the first dimension of `a` and `b` must be equal and the
function computes a batch matmul.
Supports both dense and sparse multiplication (including sparse-sparse).
:param a: Tensor or SparseTensor with rank 2 or 3.
:param b: Tensor or SparseTensor with same rank as b.
:return: Tensor or SparseTensor with rank 2 or 3.
"""
a_ndim = K.ndim(a)
b_ndim = K.ndim(b)
assert a_ndim == b_ndim, "Expected equal ranks, got {} and {}" "".format(
a_ndim, b_ndim
)
a_is_sparse = K.is_sparse(a)
b_is_sparse = K.is_sparse(b)
# Handle cases: rank 2 sparse-dense, rank 2 dense-sparse
# In these cases we can use the faster sparse-dense matmul of tf.sparse
if a_ndim == 2:
if a_is_sparse and not b_is_sparse:
return tf.sparse.sparse_dense_matmul(a, b)
if not a_is_sparse and b_is_sparse:
return ops.transpose(
tf.sparse.sparse_dense_matmul(ops.transpose(b), ops.transpose(a))
)
# Handle cases: rank 2 sparse-sparse, rank 3 sparse-dense,
# rank 3 dense-sparse, rank 3 sparse-sparse
# In these cases we can use the tfsp.CSRSparseMatrix implementation (slower,
# but saves memory)
if a_is_sparse:
a = tfsp.CSRSparseMatrix(a)
if b_is_sparse:
b = tfsp.CSRSparseMatrix(b)
if a_is_sparse or b_is_sparse:
out = tfsp.matmul(a, b)
if hasattr(out, "to_sparse_tensor"):
return out.to_sparse_tensor()
else:
return out
# Handle case: rank 2 dense-dense, rank 3 dense-dense
# Here we use the standard dense operation
return tf.matmul(a, b)
def mixed_mode_dot(a, b):
"""
Computes the equivalent of `tf.einsum('ij,bjk->bik', a, b)`, but
works for both dense and sparse inputs.
:param a: Tensor or SparseTensor with rank 2.
:param b: Tensor or SparseTensor with rank 3.
:return: Tensor or SparseTensor with rank 3.
"""
a_shp = tf.shape(a)
b_shp = tf.shape(b)
b_t = ops.transpose(b, (1, 2, 0))
b_t = ops.reshape(b_t, tf.stack((b_shp[1], -1)))
output = dot(a, b_t)
output = ops.reshape(output, tf.stack((a_shp[0], b_shp[2], -1)))
output = ops.transpose(output, (2, 0, 1))
return output
def modal_dot(a, b, transpose_a=False, transpose_b=False):
"""
Computes the matrix multiplication of a and b, handling the data modes
automatically.
This is a wrapper to standard matmul operations, for a and b with rank 2
or 3, that:
- Supports automatic broadcasting of the "batch" dimension if the two inputs
have different ranks.
- Supports any combination of dense and sparse inputs.
This op is useful for multiplying matrices that represent batches of graphs
in the different modes, for which the adjacency matrices may or may not be
sparse and have different ranks from the node attributes.
Additionally, it can also support the case where we have many adjacency
matrices and only one graph signal (which is uncommon, but may still happen).
If you know a-priori the type and shape of the inputs, it may be faster to
use the built-in functions of TensorFlow directly instead.
Examples:
- `a` rank 2, `b` rank 2 -> `a @ b`
- `a` rank 3, `b` rank 3 -> `[a[i] @ b[i] for i in range(len(a))]`
- `a` rank 2, `b` rank 3 -> `[a @ b[i] for i in range(len(b))]`
- `a` rank 3, `b` rank 2 -> `[a[i] @ b for i in range(len(a))]`
:param a: Tensor or SparseTensor with rank 2 or 3;
:param b: Tensor or SparseTensor with rank 2 or 3;
:param transpose_a: transpose the innermost 2 dimensions of `a`;
:param transpose_b: transpose the innermost 2 dimensions of `b`;
:return: Tensor or SparseTensor with rank = max(rank(a), rank(b)).
"""
a_ndim = K.ndim(a)
b_ndim = K.ndim(b)
assert a_ndim in (2, 3), "Expected a of rank 2 or 3, got {}".format(a_ndim)
assert b_ndim in (2, 3), "Expected b of rank 2 or 3, got {}".format(b_ndim)
if transpose_a:
perm = None if a_ndim == 2 else (0, 2, 1)
a = ops.transpose(a, perm)
if transpose_b:
perm = None if b_ndim == 2 else (0, 2, 1)
b = ops.transpose(b, perm)
if a_ndim == b_ndim:
# ...ij,...jk->...ik
return dot(a, b)
elif a_ndim == 2:
# ij,bjk->bik
return mixed_mode_dot(a, b)
else: # a_ndim == 3
# bij,jk->bik
if not K.is_sparse(a) and not K.is_sparse(b):
# Immediately fallback to standard dense matmul, no need to reshape
return tf.matmul(a, b)
# If either input is sparse, we use dot(a, b)
# This implementation is faster than using rank 3 sparse matmul with tfsp
a_shape = tf.shape(a)
b_shape = tf.shape(b)
a_flat = ops.reshape(a, (-1, a_shape[2]))
output = dot(a_flat, b)
return ops.reshape(output, (-1, a_shape[1], b_shape[1]))
def matmul_at_b_a(a, b):
"""
Computes a.T @ b @ a, for a, b with rank 2 or 3.
Supports automatic broadcasting of the "batch" dimension if the two inputs
have different ranks.
Supports any combination of dense and sparse inputs.
:param a: Tensor or SparseTensor with rank 2 or 3.
:param b: Tensor or SparseTensor with rank 2 or 3.
:return: Tensor or SparseTensor with rank = max(rank(a), rank(b)).
"""
at_b = modal_dot(a, b, transpose_a=True)
at_b_a = modal_dot(at_b, a)
return at_b_a
def matrix_power(a, k):
"""
If a is a square matrix, computes a^k. If a is a rank 3 Tensor of square
matrices, computes the exponent of each inner matrix.
:param a: Tensor or SparseTensor with rank 2 or 3. The innermost two
dimensions must be the same.
:param k: int, the exponent to which to raise the matrices.
:return: Tensor or SparseTensor with same rank as the input.
"""
x_k = a
for _ in range(k - 1):
x_k = modal_dot(a, x_k)
return x_k
| 6,354 | 33.726776 | 81 | py |
spektral | spektral-master/spektral/layers/ops/ops.py | import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
def transpose(a, perm=None, name=None):
"""
Transposes a according to perm, dealing automatically with sparsity.
:param a: Tensor or SparseTensor with rank k.
:param perm: permutation indices of size k.
:param name: name for the operation.
:return: Tensor or SparseTensor with rank k.
"""
if K.is_sparse(a):
transpose_op = tf.sparse.transpose
else:
transpose_op = tf.transpose
if perm is None:
perm = (1, 0) # Make explicit so that shape will always be preserved
return transpose_op(a, perm=perm, name=name)
def reshape(a, shape=None, name=None):
"""
Reshapes a according to shape, dealing automatically with sparsity.
:param a: Tensor or SparseTensor.
:param shape: new shape.
:param name: name for the operation.
:return: Tensor or SparseTensor.
"""
if K.is_sparse(a):
reshape_op = tf.sparse.reshape
else:
reshape_op = tf.reshape
return reshape_op(a, shape=shape, name=name)
def repeat(x, repeats):
"""
Repeats elements of a Tensor (equivalent to np.repeat, but only for 1D
tensors).
:param x: rank 1 Tensor;
:param repeats: rank 1 Tensor with same shape as x, the number of
repetitions for each element;
:return: rank 1 Tensor, of shape `(sum(repeats), )`.
"""
x = tf.expand_dims(x, 1)
max_repeats = tf.reduce_max(repeats)
tile_repeats = [1, max_repeats]
arr_tiled = tf.tile(x, tile_repeats)
mask = tf.less(tf.range(max_repeats), tf.expand_dims(repeats, 1))
result = tf.reshape(tf.boolean_mask(arr_tiled, mask), [-1])
return result
def segment_top_k(x, I, ratio):
"""
Returns indices to get the top K values in x segment-wise, according to
the segments defined in I. K is not fixed, but it is defined as a ratio of
the number of elements in each segment.
:param x: a rank 1 Tensor;
:param I: a rank 1 Tensor with segment IDs for x;
:param ratio: float, ratio of elements to keep for each segment;
:return: a rank 1 Tensor containing the indices to get the top K values of
each segment in x.
"""
rt = tf.RaggedTensor.from_value_rowids(x, I)
row_lengths = rt.row_lengths()
dense = rt.to_tensor(default_value=-np.inf)
indices = tf.cast(tf.argsort(dense, direction="DESCENDING"), tf.int64)
row_starts = tf.cast(rt.row_starts(), tf.int64)
indices = indices + tf.expand_dims(row_starts, 1)
row_lengths = tf.cast(
tf.math.ceil(ratio * tf.cast(row_lengths, tf.float32)), tf.int32
)
return tf.RaggedTensor.from_tensor(indices, row_lengths).values
def indices_to_mask(indices, shape, dtype=tf.bool):
"""
Return mask with true values at indices of the given shape.
This can be used as an inverse to tf.where.
:param indices: [nnz, k] or [nnz] Tensor indices of True values.
:param shape: [k] or [] (scalar) Tensor shape/size of output.
:param dtype: dtype of the output.
:return: Tensor of given shape and dtype.
"""
indices = tf.convert_to_tensor(indices, dtype_hint=tf.int64)
if indices.shape.ndims == 1:
assert isinstance(shape, int) or shape.shape.ndims == 0
indices = tf.expand_dims(indices, axis=1)
if isinstance(shape, int):
shape = tf.TensorShape([shape])
else:
shape = tf.expand_dims(shape, axis=0)
else:
indices.shape.assert_has_rank(2)
assert indices.dtype.is_integer
nnz = tf.shape(indices)[0]
indices = tf.cast(indices, tf.int64)
shape = tf.cast(shape, tf.int64)
return tf.scatter_nd(indices, tf.ones((nnz,), dtype=dtype), shape)
| 3,729 | 34.52381 | 78 | py |
spektral | spektral-master/spektral/utils/keras.py | from tensorflow.keras import activations, constraints, initializers, regularizers
LAYER_KWARGS = {"activation", "use_bias"}
KERAS_KWARGS = {
"trainable",
"name",
"dtype",
"dynamic",
"input_dim",
"input_shape",
"batch_input_shape",
"batch_size",
"weights",
"activity_regularizer",
"autocast",
"implementation",
}
def is_layer_kwarg(key):
return key not in KERAS_KWARGS and (
key.endswith("_initializer")
or key.endswith("_regularizer")
or key.endswith("_constraint")
or key in LAYER_KWARGS
)
def is_keras_kwarg(key):
return key in KERAS_KWARGS
def deserialize_kwarg(key, attr):
if key.endswith("_initializer"):
return initializers.get(attr)
if key.endswith("_regularizer"):
return regularizers.get(attr)
if key.endswith("_constraint"):
return constraints.get(attr)
if key == "activation":
return activations.get(attr)
return attr
def serialize_kwarg(key, attr):
if key.endswith("_initializer"):
return initializers.serialize(attr)
if key.endswith("_regularizer"):
return regularizers.serialize(attr)
if key.endswith("_constraint"):
return constraints.serialize(attr)
if key == "activation":
return activations.serialize(attr)
if key == "use_bias":
return attr
| 1,372 | 23.517857 | 81 | py |
spektral | spektral-master/examples/other/explain_graph_predictions.py | import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.metrics import categorical_accuracy
from tensorflow.keras.optimizers import Adam
from spektral.data import DisjointLoader
from spektral.datasets import TUDataset
from spektral.models import GeneralGNN, GNNExplainer
# Config
learning_rate = 1e-2
batch_size = 32
epochs = 100
# Load data
data = TUDataset("PROTEINS")
# Train/test split
np.random.shuffle(data)
split = int(0.8 * len(data))
data_tr, data_te = data[:split], data[split:]
# Data loaders
loader_tr = DisjointLoader(data_tr, batch_size=batch_size, epochs=epochs)
loader_te = DisjointLoader(data_te, batch_size=batch_size)
# Create model
model = GeneralGNN(data.n_labels, activation="softmax")
optimizer = Adam(learning_rate)
loss_fn = CategoricalCrossentropy()
# Training function
@tf.function(input_signature=loader_tr.tf_signature(), experimental_relax_shapes=True)
def train_on_batch(inputs, target):
with tf.GradientTape() as tape:
predictions = model(inputs, training=True)
loss = loss_fn(target, predictions) + sum(model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
acc = tf.reduce_mean(categorical_accuracy(target, predictions))
return loss, acc
# Evaluation function
def evaluate(loader):
step = 0
results = []
for batch in loader:
step += 1
inputs, target = batch
predictions = model(inputs, training=False)
loss = loss_fn(target, predictions)
acc = tf.reduce_mean(categorical_accuracy(target, predictions))
results.append((loss, acc, len(target))) # Keep track of batch size
if step == loader.steps_per_epoch:
results = np.array(results)
return np.average(results[:, :-1], 0, weights=results[:, -1])
# Train model
epoch = step = 0
results = []
for batch in loader_tr:
step += 1
loss, acc = train_on_batch(*batch)
results.append((loss, acc))
if step == loader_tr.steps_per_epoch:
step = 0
epoch += 1
results_te = evaluate(loader_te)
print(
"Epoch {} - Train loss: {:.3f} - Train acc: {:.3f} - "
"Test loss: {:.3f} - Test acc: {:.3f}".format(
epoch, *np.mean(results, 0), *results_te
)
)
# Set up explainer
(x, a, i), y = next(loader_te)
cut_idx = (i == 0).sum()
x_exp = x[:cut_idx]
a_exp = tf.sparse.slice(a, start=[0, 0], size=[cut_idx, cut_idx])
explainer = GNNExplainer(model, graph_level=True, verbose=True)
# Explain prediction for one graph
adj_mask, feat_mask = explainer.explain_node(x=x_exp, a=a_exp)
# Plot the result
G = explainer.plot_subgraph(adj_mask, feat_mask)
plt.show()
| 2,857 | 29.084211 | 86 | py |
spektral | spektral-master/examples/other/explain_node_predictions.py | import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.optimizers import Adam
from spektral.data.loaders import SingleLoader
from spektral.datasets.citation import Citation
from spektral.layers import GCNConv
from spektral.models import GNNExplainer
from spektral.models.gcn import GCN
from spektral.transforms import AdjToSpTensor, LayerPreprocess
from spektral.utils import gcn_filter
# Config
learning_rate = 1e-2
seed = 0
epochs = 50
patience = 10
data = "cora"
tf.random.set_seed(seed=seed) # make weight initialization reproducible
# Load data
dataset = Citation(data, normalize_x=True, transforms=[LayerPreprocess(GCNConv)])
# We convert the binary masks to sample weights so that we can compute the
# average loss over the nodes (following original implementation by
# Kipf & Welling)
def mask_to_weights(mask):
return mask.astype(np.float32) / np.count_nonzero(mask)
weights_tr, weights_va, weights_te = (
mask_to_weights(mask)
for mask in (dataset.mask_tr, dataset.mask_va, dataset.mask_te)
)
model = GCN(n_labels=dataset.n_labels)
model.compile(
optimizer=Adam(learning_rate),
loss=CategoricalCrossentropy(reduction="sum"),
weighted_metrics=["acc"],
)
# Train model
loader_tr = SingleLoader(dataset, sample_weights=weights_tr)
loader_va = SingleLoader(dataset, sample_weights=weights_va)
model.fit(
loader_tr.load(),
steps_per_epoch=loader_tr.steps_per_epoch,
validation_data=loader_va.load(),
validation_steps=loader_va.steps_per_epoch,
epochs=epochs,
callbacks=[EarlyStopping(patience=patience, restore_best_weights=True)],
)
# Set up explainer
dataset.apply(AdjToSpTensor())
x_exp, a_exp = dataset[0].x, dataset[0].a
explainer = GNNExplainer(model, preprocess=gcn_filter, verbose=True)
# Explain prediction for one node
node_idx = 1000
adj_mask, feat_mask = explainer.explain_node(x=x_exp, a=a_exp, node_idx=node_idx)
# Plot the result
G = explainer.plot_subgraph(adj_mask, feat_mask, node_idx)
plt.show()
| 2,137 | 28.694444 | 81 | py |
spektral | spektral-master/examples/other/node_clustering_mincut.py | """
This example implements the experiments for node clustering on citation networks
from the paper:
Mincut pooling in Graph Neural Networks (https://arxiv.org/abs/1907.00481)
Filippo Maria Bianchi, Daniele Grattarola, Cesare Alippi
"""
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn.metrics.cluster import (
completeness_score,
homogeneity_score,
v_measure_score,
)
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tqdm import tqdm
from spektral.datasets.citation import Cora
from spektral.layers.convolutional import GCSConv
from spektral.layers.pooling import MinCutPool
from spektral.utils.convolution import normalized_adjacency
from spektral.utils.sparse import sp_matrix_to_sp_tensor
@tf.function
def train_step(inputs):
with tf.GradientTape() as tape:
_, S_pool = model(inputs, training=True)
loss = sum(model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
opt.apply_gradients(zip(gradients, model.trainable_variables))
return model.losses[0], model.losses[1], S_pool
np.random.seed(1)
epochs = 5000 # Training iterations
lr = 5e-4 # Learning rate
################################################################################
# LOAD DATASET
################################################################################
dataset = Cora()
adj, x, y = dataset[0].a, dataset[0].x, dataset[0].y
a_norm = normalized_adjacency(adj)
a_norm = sp_matrix_to_sp_tensor(a_norm)
F = dataset.n_node_features
y = np.argmax(y, axis=-1)
n_clusters = y.max() + 1
################################################################################
# MODEL
################################################################################
x_in = Input(shape=(F,), name="X_in")
a_in = Input(shape=(None,), name="A_in", sparse=True)
x_1 = GCSConv(16, activation="elu")([x_in, a_in])
x_1, a_1, s_1 = MinCutPool(n_clusters, return_selection=True)([x_1, a_in])
model = Model([x_in, a_in], [x_1, s_1])
################################################################################
# TRAINING
################################################################################
# Setup
inputs = [x, a_norm]
opt = tf.keras.optimizers.Adam(learning_rate=lr)
# Fit model
loss_history = []
nmi_history = []
for _ in tqdm(range(epochs)):
outs = train_step(inputs)
outs = [o.numpy() for o in outs]
loss_history.append((outs[0], outs[1], (outs[0] + outs[1])))
s_out = np.argmax(outs[2], axis=-1)
nmi_history.append(v_measure_score(y, s_out))
loss_history = np.array(loss_history)
################################################################################
# RESULTS
################################################################################
_, s_out = model(inputs, training=False)
s_out = np.argmax(s_out, axis=-1)
hom = homogeneity_score(y, s_out)
com = completeness_score(y, s_out)
nmi = v_measure_score(y, s_out)
print("Homogeneity: {:.3f}; Completeness: {:.3f}; NMI: {:.3f}".format(hom, com, nmi))
# Plots
plt.figure(figsize=(10, 5))
plt.subplot(121)
plt.plot(loss_history[:, 0], label="Ortho. loss")
plt.plot(loss_history[:, 1], label="MinCUT loss")
plt.plot(loss_history[:, 2], label="Total loss")
plt.legend()
plt.ylabel("Loss")
plt.xlabel("Iteration")
plt.subplot(122)
plt.plot(nmi_history, label="NMI")
plt.legend()
plt.ylabel("NMI")
plt.xlabel("Iteration")
plt.show()
| 3,447 | 30.345455 | 85 | py |
spektral | spektral-master/examples/other/graph_signal_classification_mnist.py | import numpy as np
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from tensorflow.keras.metrics import sparse_categorical_accuracy
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2
from spektral.data import MixedLoader
from spektral.datasets.mnist import MNIST
from spektral.layers import GCNConv, GlobalSumPool
from spektral.utils.sparse import sp_matrix_to_sp_tensor
# Parameters
batch_size = 32 # Batch size
epochs = 1000 # Number of training epochs
patience = 10 # Patience for early stopping
l2_reg = 5e-4 # Regularization rate for l2
# Load data
data = MNIST()
# The adjacency matrix is stored as an attribute of the dataset.
# Create filter for GCN and convert to sparse tensor.
data.a = GCNConv.preprocess(data.a)
data.a = sp_matrix_to_sp_tensor(data.a)
# Train/valid/test split
data_tr, data_te = data[:-10000], data[-10000:]
np.random.shuffle(data_tr)
data_tr, data_va = data_tr[:-10000], data_tr[-10000:]
# We use a MixedLoader since the dataset is in mixed mode
loader_tr = MixedLoader(data_tr, batch_size=batch_size, epochs=epochs)
loader_va = MixedLoader(data_va, batch_size=batch_size)
loader_te = MixedLoader(data_te, batch_size=batch_size)
# Build model
class Net(Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.conv1 = GCNConv(32, activation="elu", kernel_regularizer=l2(l2_reg))
self.conv2 = GCNConv(32, activation="elu", kernel_regularizer=l2(l2_reg))
self.flatten = GlobalSumPool()
self.fc1 = Dense(512, activation="relu")
self.fc2 = Dense(10, activation="softmax") # MNIST has 10 classes
def call(self, inputs):
x, a = inputs
x = self.conv1([x, a])
x = self.conv2([x, a])
output = self.flatten(x)
output = self.fc1(output)
output = self.fc2(output)
return output
# Create model
model = Net()
optimizer = Adam()
loss_fn = SparseCategoricalCrossentropy()
# Training function
@tf.function
def train_on_batch(inputs, target):
with tf.GradientTape() as tape:
predictions = model(inputs, training=True)
loss = loss_fn(target, predictions) + sum(model.losses)
acc = tf.reduce_mean(sparse_categorical_accuracy(target, predictions))
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss, acc
# Evaluation function
def evaluate(loader):
step = 0
results = []
for batch in loader:
step += 1
inputs, target = batch
predictions = model(inputs, training=False)
loss = loss_fn(target, predictions)
acc = tf.reduce_mean(sparse_categorical_accuracy(target, predictions))
results.append((loss, acc, len(target))) # Keep track of batch size
if step == loader.steps_per_epoch:
results = np.array(results)
return np.average(results[:, :-1], 0, weights=results[:, -1])
# Setup training
best_val_loss = 99999
current_patience = patience
step = 0
# Training loop
results_tr = []
for batch in loader_tr:
step += 1
# Training step
inputs, target = batch
loss, acc = train_on_batch(inputs, target)
results_tr.append((loss, acc, len(target)))
if step == loader_tr.steps_per_epoch:
results_va = evaluate(loader_va)
if results_va[0] < best_val_loss:
best_val_loss = results_va[0]
current_patience = patience
results_te = evaluate(loader_te)
else:
current_patience -= 1
if current_patience == 0:
print("Early stopping")
break
# Print results
results_tr = np.array(results_tr)
results_tr = np.average(results_tr[:, :-1], 0, weights=results_tr[:, -1])
print(
"Train loss: {:.4f}, acc: {:.4f} | "
"Valid loss: {:.4f}, acc: {:.4f} | "
"Test loss: {:.4f}, acc: {:.4f}".format(
*results_tr, *results_va, *results_te
)
)
# Reset epoch
results_tr = []
step = 0
| 4,254 | 30.058394 | 81 | py |
spektral | spektral-master/examples/other/node_clustering_tvgnn.py | """
This example implements the node clustering experiment on citation networks
from the paper:
Total Variation Graph Neural Networks (https://arxiv.org/abs/2211.06218)
Jonas Berg Hansen and Filippo Maria Bianchi
"""
import numpy as np
import tensorflow as tf
from sklearn.metrics.cluster import (
completeness_score,
homogeneity_score,
normalized_mutual_info_score,
)
from tensorflow.keras import Model
from tqdm import tqdm
from spektral.datasets import DBLP
from spektral.datasets.citation import Citation
from spektral.layers import AsymCheegerCutPool, GTVConv
from spektral.utils.sparse import sp_matrix_to_sp_tensor
tf.random.set_seed(1)
################################
# CONFIG/HYPERPARAMETERS
################################
dataset_id = "cora"
mp_channels = 512
mp_layers = 2
mp_activation = "elu"
delta_coeff = 0.311
epsilon = 1e-3
mlp_hidden_channels = 256
mlp_hidden_layers = 1
mlp_activation = "relu"
totvar_coeff = 0.785
balance_coeff = 0.514
learning_rate = 1e-3
epochs = 500
################################
# LOAD DATASET
################################
if dataset_id in ["cora", "citeseer", "pubmed"]:
dataset = Citation(dataset_id, normalize_x=True)
elif dataset_id == "dblp":
dataset = DBLP(normalize_x=True)
X = dataset.graphs[0].x
A = dataset.graphs[0].a
Y = dataset.graphs[0].y
y = np.argmax(Y, axis=-1)
n_clust = Y.shape[-1]
################################
# MODEL
################################
class ClusteringModel(Model):
"""
Defines the general model structure
"""
def __init__(self, aggr, pool):
super().__init__()
self.mp = aggr
self.pool = pool
def call(self, inputs):
x, a = inputs
out = x
for _mp in self.mp:
out = _mp([out, a])
_, _, s_pool = self.pool([out, a])
return s_pool
# Define the message-passing layers
MP_layers = [
GTVConv(
mp_channels, delta_coeff=delta_coeff, epsilon=1e-3, activation=mp_activation
)
for _ in range(mp_layers)
]
# Define the pooling layer
pool_layer = AsymCheegerCutPool(
n_clust,
mlp_hidden=[mlp_hidden_channels for _ in range(mlp_hidden_layers)],
mlp_activation=mlp_activation,
totvar_coeff=totvar_coeff,
balance_coeff=balance_coeff,
return_selection=True,
)
# Instantiate model and optimizer
model = ClusteringModel(aggr=MP_layers, pool=pool_layer)
opt = tf.keras.optimizers.Adam(learning_rate=learning_rate)
################################
# TRAINING
################################
@tf.function(input_signature=None)
def train_step(model, inputs):
with tf.GradientTape() as tape:
_ = model(inputs, training=True)
loss = sum(model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
opt.apply_gradients(zip(gradients, model.trainable_variables))
return model.losses
A = sp_matrix_to_sp_tensor(A)
inputs = [X, A]
loss_history = []
# Training loop
for _ in tqdm(range(epochs)):
outs = train_step(model, inputs)
################################
# INFERENCE/RESULTS
################################
S_ = model(inputs, training=False)
s_out = np.argmax(S_, axis=-1)
nmi = normalized_mutual_info_score(y, s_out)
hom = homogeneity_score(y, s_out)
com = completeness_score(y, s_out)
print("Homogeneity: {:.3f}; Completeness: {:.3f}; NMI: {:.3f}".format(hom, com, nmi))
| 3,374 | 23.816176 | 85 | py |
spektral | spektral-master/examples/graph_prediction/ogbg-mol-hiv_ecc.py | """
This example shows how to perform molecule classification with the
[Open Graph Benchmark](https://ogb.stanford.edu) `mol-hiv` dataset, using a
simple ECC-based GNN in disjoint mode. The model does not perform really well
but should give you a starting point if you want to implement a more
sophisticated one.
"""
import numpy as np
import tensorflow as tf
from ogb.graphproppred import Evaluator, GraphPropPredDataset
from tensorflow.keras.layers import Dense, Input
from tensorflow.keras.losses import BinaryCrossentropy
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from spektral.data import DisjointLoader
from spektral.datasets import OGB
from spektral.layers import ECCConv, GlobalSumPool
################################################################################
# Config
################################################################################
learning_rate = 1e-3 # Learning rate
epochs = 10 # Number of training epochs
batch_size = 32 # Batch size
################################################################################
# Load data
################################################################################
dataset_name = "ogbg-molhiv"
ogb_dataset = GraphPropPredDataset(name=dataset_name)
dataset = OGB(ogb_dataset)
# Parameters
F = dataset.n_node_features # Dimension of node features
S = dataset.n_edge_features # Dimension of edge features
n_out = dataset.n_labels # Dimension of the target
# Train/test split
idx = ogb_dataset.get_idx_split()
idx_tr, idx_va, idx_te = idx["train"], idx["valid"], idx["test"]
dataset_tr = dataset[idx_tr]
dataset_va = dataset[idx_va]
dataset_te = dataset[idx_te]
loader_tr = DisjointLoader(dataset_tr, batch_size=batch_size, epochs=epochs)
loader_te = DisjointLoader(dataset_te, batch_size=batch_size, epochs=1)
################################################################################
# Build model
################################################################################
X_in = Input(shape=(F,))
A_in = Input(shape=(None,), sparse=True)
E_in = Input(shape=(S,))
I_in = Input(shape=(), dtype=tf.int64)
X_1 = ECCConv(32, activation="relu")([X_in, A_in, E_in])
X_2 = ECCConv(32, activation="relu")([X_1, A_in, E_in])
X_3 = GlobalSumPool()([X_2, I_in])
output = Dense(n_out, activation="sigmoid")(X_3)
model = Model(inputs=[X_in, A_in, E_in, I_in], outputs=output)
optimizer = Adam(learning_rate)
loss_fn = BinaryCrossentropy()
################################################################################
# Fit model
################################################################################
@tf.function(input_signature=loader_tr.tf_signature(), experimental_relax_shapes=True)
def train_step(inputs, target):
with tf.GradientTape() as tape:
predictions = model(inputs, training=True)
loss = loss_fn(target, predictions) + sum(model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss
step = loss = 0
for batch in loader_tr:
step += 1
loss += train_step(*batch)
if step == loader_tr.steps_per_epoch:
step = 0
print("Loss: {}".format(loss / loader_tr.steps_per_epoch))
loss = 0
################################################################################
# Evaluate model
################################################################################
print("Testing model")
evaluator = Evaluator(name=dataset_name)
y_true = []
y_pred = []
for batch in loader_te:
inputs, target = batch
p = model(inputs, training=False)
y_true.append(target)
y_pred.append(p.numpy())
y_true = np.vstack(y_true)
y_pred = np.vstack(y_pred)
model_loss = loss_fn(y_true, y_pred)
ogb_score = evaluator.eval({"y_true": y_true, "y_pred": y_pred})
print(
"Done. Test loss: {:.4f}. ROC-AUC: {:.2f}".format(model_loss, ogb_score["rocauc"])
)
| 3,971 | 34.783784 | 86 | py |
spektral | spektral-master/examples/graph_prediction/qm9_ecc_batch.py | """
This example shows how to perform regression of molecular properties with the
QM9 database, using a GNN based on edge-conditioned convolutions in batch mode.
"""
import numpy as np
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from spektral.data import BatchLoader
from spektral.datasets import QM9
from spektral.layers import ECCConv, GlobalSumPool, GraphMasking
################################################################################
# Config
################################################################################
learning_rate = 1e-3 # Learning rate
epochs = 10 # Number of training epochs
batch_size = 32 # Batch size
################################################################################
# Load data
################################################################################
dataset = QM9(amount=1000) # Set amount=None to train on whole dataset
# Parameters
F = dataset.n_node_features # Dimension of node features
S = dataset.n_edge_features # Dimension of edge features
n_out = dataset.n_labels # Dimension of the target
# Train/test split
idxs = np.random.permutation(len(dataset))
split = int(0.9 * len(dataset))
idx_tr, idx_te = np.split(idxs, [split])
dataset_tr, dataset_te = dataset[idx_tr], dataset[idx_te]
################################################################################
# Build model
################################################################################
class Net(Model):
def __init__(self):
super().__init__()
self.masking = GraphMasking()
self.conv1 = ECCConv(32, activation="relu")
self.conv2 = ECCConv(32, activation="relu")
self.global_pool = GlobalSumPool()
self.dense = Dense(n_out)
def call(self, inputs):
x, a, e = inputs
x = self.masking(x)
x = self.conv1([x, a, e])
x = self.conv2([x, a, e])
output = self.global_pool(x)
output = self.dense(output)
return output
model = Net()
optimizer = Adam(learning_rate)
model.compile(optimizer=optimizer, loss="mse")
################################################################################
# Fit model
################################################################################
loader_tr = BatchLoader(dataset_tr, batch_size=batch_size, mask=True)
model.fit(loader_tr.load(), steps_per_epoch=loader_tr.steps_per_epoch, epochs=epochs)
################################################################################
# Evaluate model
################################################################################
print("Testing model")
loader_te = BatchLoader(dataset_te, batch_size=batch_size, mask=True)
loss = model.evaluate(loader_te.load(), steps=loader_te.steps_per_epoch)
print("Done. Test loss: {}".format(loss))
| 2,883 | 35.506329 | 85 | py |
spektral | spektral-master/examples/graph_prediction/custom_dataset.py | """
This example shows how to define your own dataset and use it to train a
non-trivial GNN with message-passing and pooling layers.
The script also shows how to implement fast training and evaluation functions
in disjoint mode, with early stopping and accuracy monitoring.
The dataset that we create is a simple synthetic task in which we have random
graphs with randomly-colored nodes. The goal is to classify each graph with the
color that occurs the most on its nodes. For example, given a graph with 2
colors and 3 nodes:
x = [[1, 0],
[1, 0],
[0, 1]],
the corresponding target will be [1, 0].
"""
import numpy as np
import scipy.sparse as sp
import tensorflow as tf
from tensorflow.keras.layers import Dense
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.metrics import categorical_accuracy
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from spektral.data import Dataset, DisjointLoader, Graph
from spektral.layers import GCSConv, GlobalAvgPool
from spektral.transforms.normalize_adj import NormalizeAdj
################################################################################
# Config
################################################################################
learning_rate = 1e-2 # Learning rate
epochs = 400 # Number of training epochs
es_patience = 10 # Patience for early stopping
batch_size = 32 # Batch size
################################################################################
# Load data
################################################################################
class MyDataset(Dataset):
"""
A dataset of random colored graphs.
The task is to classify each graph with the color which occurs the most in
its nodes.
The graphs have `n_colors` colors, of at least `n_min` and at most `n_max`
nodes connected with probability `p`.
"""
def __init__(self, n_samples, n_colors=3, n_min=10, n_max=100, p=0.1, **kwargs):
self.n_samples = n_samples
self.n_colors = n_colors
self.n_min = n_min
self.n_max = n_max
self.p = p
super().__init__(**kwargs)
def read(self):
def make_graph():
n = np.random.randint(self.n_min, self.n_max)
colors = np.random.randint(0, self.n_colors, size=n)
# Node features
x = np.zeros((n, self.n_colors))
x[np.arange(n), colors] = 1
# Edges
a = np.random.rand(n, n) <= self.p
a = np.maximum(a, a.T).astype(int)
a = sp.csr_matrix(a)
# Labels
y = np.zeros((self.n_colors,))
color_counts = x.sum(0)
y[np.argmax(color_counts)] = 1
return Graph(x=x, a=a, y=y)
# We must return a list of Graph objects
return [make_graph() for _ in range(self.n_samples)]
data = MyDataset(1000, transforms=NormalizeAdj())
# Train/valid/test split
idxs = np.random.permutation(len(data))
split_va, split_te = int(0.8 * len(data)), int(0.9 * len(data))
idx_tr, idx_va, idx_te = np.split(idxs, [split_va, split_te])
data_tr = data[idx_tr]
data_va = data[idx_va]
data_te = data[idx_te]
# Data loaders
loader_tr = DisjointLoader(data_tr, batch_size=batch_size, epochs=epochs)
loader_va = DisjointLoader(data_va, batch_size=batch_size)
loader_te = DisjointLoader(data_te, batch_size=batch_size)
################################################################################
# Build model
################################################################################
class Net(Model):
def __init__(self):
super().__init__()
self.conv1 = GCSConv(32, activation="relu")
self.conv2 = GCSConv(32, activation="relu")
self.conv3 = GCSConv(32, activation="relu")
self.global_pool = GlobalAvgPool()
self.dense = Dense(data.n_labels, activation="softmax")
def call(self, inputs):
x, a, i = inputs
x = self.conv1([x, a])
x = self.conv2([x, a])
x = self.conv3([x, a])
output = self.global_pool([x, i])
output = self.dense(output)
return output
model = Net()
optimizer = Adam(learning_rate=learning_rate)
loss_fn = CategoricalCrossentropy()
################################################################################
# Fit model
################################################################################
@tf.function(input_signature=loader_tr.tf_signature(), experimental_relax_shapes=True)
def train_step(inputs, target):
with tf.GradientTape() as tape:
predictions = model(inputs, training=True)
loss = loss_fn(target, predictions) + sum(model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
acc = tf.reduce_mean(categorical_accuracy(target, predictions))
return loss, acc
def evaluate(loader):
output = []
step = 0
while step < loader.steps_per_epoch:
step += 1
inputs, target = loader.__next__()
pred = model(inputs, training=False)
outs = (
loss_fn(target, pred),
tf.reduce_mean(categorical_accuracy(target, pred)),
len(target), # Keep track of batch size
)
output.append(outs)
if step == loader.steps_per_epoch:
output = np.array(output)
return np.average(output[:, :-1], 0, weights=output[:, -1])
epoch = step = 0
best_val_loss = np.inf
best_weights = None
patience = es_patience
results = []
for batch in loader_tr:
step += 1
loss, acc = train_step(*batch)
results.append((loss, acc))
if step == loader_tr.steps_per_epoch:
step = 0
epoch += 1
# Compute validation loss and accuracy
val_loss, val_acc = evaluate(loader_va)
print(
"Ep. {} - Loss: {:.3f} - Acc: {:.3f} - Val loss: {:.3f} - Val acc: {:.3f}".format(
epoch, *np.mean(results, 0), val_loss, val_acc
)
)
# Check if loss improved for early stopping
if val_loss < best_val_loss:
best_val_loss = val_loss
patience = es_patience
print("New best val_loss {:.3f}".format(val_loss))
best_weights = model.get_weights()
else:
patience -= 1
if patience == 0:
print("Early stopping (best val_loss: {})".format(best_val_loss))
break
results = []
################################################################################
# Evaluate model
################################################################################
model.set_weights(best_weights) # Load best model
test_loss, test_acc = evaluate(loader_te)
print("Done. Test loss: {:.4f}. Test acc: {:.2f}".format(test_loss, test_acc))
| 6,894 | 33.133663 | 94 | py |
spektral | spektral-master/examples/graph_prediction/tud_mincut.py | import numpy as np
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from spektral.data import BatchLoader
from spektral.datasets import TUDataset
from spektral.layers import GCSConv, GlobalSumPool, GraphMasking, MinCutPool
################################################################################
# Config
################################################################################
learning_rate = 1e-3 # Learning rate
epochs = 10 # Number of training epochs
batch_size = 32 # Batch size
################################################################################
# Load data
################################################################################
dataset = TUDataset("PROTEINS", clean=True)
# Parameters
N = max(g.n_nodes for g in dataset)
F = dataset.n_node_features # Dimension of node features
S = dataset.n_edge_features # Dimension of edge features
n_out = dataset.n_labels # Dimension of the target
# Train/test split
idxs = np.random.permutation(len(dataset))
split_va, split_te = int(0.8 * len(dataset)), int(0.9 * len(dataset))
idx_tr, idx_va, idx_te = np.split(idxs, [split_va, split_te])
dataset_tr = dataset[idx_tr]
dataset_va = dataset[idx_va]
dataset_te = dataset[idx_te]
loader_tr = BatchLoader(dataset_tr, batch_size=batch_size, mask=True)
loader_va = BatchLoader(dataset_va, batch_size=batch_size, mask=True)
loader_te = BatchLoader(dataset_te, batch_size=batch_size, mask=True)
################################################################################
# Build model
################################################################################
class Net(Model):
def __init__(self):
super().__init__()
self.mask = GraphMasking()
self.conv1 = GCSConv(32, activation="relu")
self.pool = MinCutPool(N // 2)
self.conv2 = GCSConv(32, activation="relu")
self.global_pool = GlobalSumPool()
self.dense1 = Dense(n_out)
def call(self, inputs):
x, a = inputs
x = self.mask(x)
x = self.conv1([x, a])
x_pool, a_pool = self.pool([x, a])
x_pool = self.conv2([x_pool, a_pool])
output = self.global_pool(x_pool)
output = self.dense1(output)
return output
model = Net()
opt = Adam(learning_rate=learning_rate)
model.compile(optimizer=opt, loss="categorical_crossentropy", metrics=["acc"])
################################################################################
# Fit model
################################################################################
model.fit(
loader_tr.load(),
steps_per_epoch=loader_tr.steps_per_epoch,
epochs=epochs,
validation_data=loader_va,
validation_steps=loader_va.steps_per_epoch,
callbacks=[EarlyStopping(patience=10, restore_best_weights=True)],
)
################################################################################
# Evaluate model
################################################################################
print("Testing model")
loss, acc = model.evaluate(loader_te.load(), steps=loader_te.steps_per_epoch)
print("Done. Test loss: {}. Test acc: {}".format(loss, acc))
| 3,272 | 35.775281 | 80 | py |
spektral | spektral-master/examples/graph_prediction/general_gnn.py | """
This example implements the model from the paper
> [Design Space for Graph Neural Networks](https://arxiv.org/abs/2011.08843)<br>
> Jiaxuan You, Rex Ying, Jure Leskovec
using the PROTEINS dataset.
The configuration at the top of the file is the best one identified in the
paper, and should work well for many different datasets without changes.
Note: the results reported in the paper are averaged over 3 random repetitions
with an 80/20 split.
"""
import numpy as np
import tensorflow as tf
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.metrics import categorical_accuracy
from tensorflow.keras.optimizers import Adam
from spektral.data import DisjointLoader
from spektral.datasets import TUDataset
from spektral.models import GeneralGNN
physical_devices = tf.config.list_physical_devices("GPU")
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
################################################################################
# Config
################################################################################
batch_size = 32
learning_rate = 0.01
epochs = 400
################################################################################
# Load data
################################################################################
data = TUDataset("PROTEINS")
# Train/test split
np.random.shuffle(data)
split = int(0.8 * len(data))
data_tr, data_te = data[:split], data[split:]
# Data loaders
loader_tr = DisjointLoader(data_tr, batch_size=batch_size, epochs=epochs)
loader_te = DisjointLoader(data_te, batch_size=batch_size)
################################################################################
# Build model
################################################################################
model = GeneralGNN(data.n_labels, activation="softmax")
optimizer = Adam(learning_rate)
loss_fn = CategoricalCrossentropy()
################################################################################
# Fit model
################################################################################
@tf.function(input_signature=loader_tr.tf_signature(), experimental_relax_shapes=True)
def train_step(inputs, target):
with tf.GradientTape() as tape:
predictions = model(inputs, training=True)
loss = loss_fn(target, predictions) + sum(model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
acc = tf.reduce_mean(categorical_accuracy(target, predictions))
return loss, acc
def evaluate(loader):
output = []
step = 0
while step < loader.steps_per_epoch:
step += 1
inputs, target = loader.__next__()
pred = model(inputs, training=False)
outs = (
loss_fn(target, pred),
tf.reduce_mean(categorical_accuracy(target, pred)),
len(target), # Keep track of batch size
)
output.append(outs)
if step == loader.steps_per_epoch:
output = np.array(output)
return np.average(output[:, :-1], 0, weights=output[:, -1])
epoch = step = 0
results = []
for batch in loader_tr:
step += 1
loss, acc = train_step(*batch)
results.append((loss, acc))
if step == loader_tr.steps_per_epoch:
step = 0
epoch += 1
results_te = evaluate(loader_te)
print(
"Ep. {} - Loss: {:.3f} - Acc: {:.3f} - Test loss: {:.3f} - Test acc: {:.3f}".format(
epoch, *np.mean(results, 0), *results_te
)
)
results = []
################################################################################
# Evaluate model
################################################################################
results_te = evaluate(loader_te)
print("Final results - Loss: {:.3f} - Acc: {:.3f}".format(*results_te))
| 3,934 | 34.133929 | 96 | py |
spektral | spektral-master/examples/graph_prediction/tud_gin.py | """
This example shows how to perform graph classification with a simple Graph
Isomorphism Network.
"""
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.metrics import categorical_accuracy
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from spektral.data import DisjointLoader
from spektral.datasets import TUDataset
from spektral.layers import GINConv, GlobalAvgPool
################################################################################
# Config
################################################################################
learning_rate = 1e-3 # Learning rate
channels = 128 # Hidden units
layers = 3 # GIN layers
epochs = 10 # Number of training epochs
batch_size = 32 # Batch size
################################################################################
# Load data
################################################################################
dataset = TUDataset("PROTEINS", clean=True)
# Parameters
F = dataset.n_node_features # Dimension of node features
n_out = dataset.n_labels # Dimension of the target
# Train/test split
idxs = np.random.permutation(len(dataset))
split = int(0.9 * len(dataset))
idx_tr, idx_te = np.split(idxs, [split])
dataset_tr, dataset_te = dataset[idx_tr], dataset[idx_te]
loader_tr = DisjointLoader(dataset_tr, batch_size=batch_size, epochs=epochs)
loader_te = DisjointLoader(dataset_te, batch_size=batch_size, epochs=1)
################################################################################
# Build model
################################################################################
class GIN0(Model):
def __init__(self, channels, n_layers):
super().__init__()
self.conv1 = GINConv(channels, epsilon=0, mlp_hidden=[channels, channels])
self.convs = []
for _ in range(1, n_layers):
self.convs.append(
GINConv(channels, epsilon=0, mlp_hidden=[channels, channels])
)
self.pool = GlobalAvgPool()
self.dense1 = Dense(channels, activation="relu")
self.dropout = Dropout(0.5)
self.dense2 = Dense(n_out, activation="softmax")
def call(self, inputs):
x, a, i = inputs
x = self.conv1([x, a])
for conv in self.convs:
x = conv([x, a])
x = self.pool([x, i])
x = self.dense1(x)
x = self.dropout(x)
return self.dense2(x)
# Build model
model = GIN0(channels, layers)
optimizer = Adam(learning_rate)
loss_fn = CategoricalCrossentropy()
################################################################################
# Fit model
################################################################################
@tf.function(input_signature=loader_tr.tf_signature(), experimental_relax_shapes=True)
def train_step(inputs, target):
with tf.GradientTape() as tape:
predictions = model(inputs, training=True)
loss = loss_fn(target, predictions) + sum(model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
acc = tf.reduce_mean(categorical_accuracy(target, predictions))
return loss, acc
epoch = step = 0
results = []
for batch in loader_tr:
step += 1
loss, acc = train_step(*batch)
results.append((loss, acc))
if step == loader_tr.steps_per_epoch:
step = 0
epoch += 1
print("Ep. {} - Loss: {}. Acc: {}".format(epoch, *np.mean(results, 0)))
results = []
################################################################################
# Evaluate model
################################################################################
results = []
for batch in loader_te:
inputs, target = batch
predictions = model(inputs, training=False)
results.append(
(
loss_fn(target, predictions),
tf.reduce_mean(categorical_accuracy(target, predictions)),
)
)
print("Done. Test loss: {}. Test acc: {}".format(*np.mean(results, 0)))
| 4,168 | 33.741667 | 86 | py |
spektral | spektral-master/examples/graph_prediction/qm9_ecc.py | """
This example shows how to perform regression of molecular properties with the
QM9 database, using a simple GNN in disjoint mode.
"""
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense
from tensorflow.keras.losses import MeanSquaredError
from tensorflow.keras.optimizers import Adam
from spektral.data import DisjointLoader
from spektral.datasets import QM9
from spektral.layers import ECCConv, GlobalSumPool
################################################################################
# Config
################################################################################
learning_rate = 1e-3 # Learning rate
epochs = 10 # Number of training epochs
batch_size = 32 # Batch size
################################################################################
# Load data
################################################################################
dataset = QM9(amount=1000) # Set amount=None to train on whole dataset
# Parameters
F = dataset.n_node_features # Dimension of node features
S = dataset.n_edge_features # Dimension of edge features
n_out = dataset.n_labels # Dimension of the target
# Train/test split
idxs = np.random.permutation(len(dataset))
split = int(0.9 * len(dataset))
idx_tr, idx_te = np.split(idxs, [split])
dataset_tr, dataset_te = dataset[idx_tr], dataset[idx_te]
loader_tr = DisjointLoader(dataset_tr, batch_size=batch_size, epochs=epochs)
loader_te = DisjointLoader(dataset_te, batch_size=batch_size, epochs=1)
################################################################################
# Build model
################################################################################
class Net(Model):
def __init__(self):
super().__init__()
self.conv1 = ECCConv(32, activation="relu")
self.conv2 = ECCConv(32, activation="relu")
self.global_pool = GlobalSumPool()
self.dense = Dense(n_out)
def call(self, inputs):
x, a, e, i = inputs
x = self.conv1([x, a, e])
x = self.conv2([x, a, e])
output = self.global_pool([x, i])
output = self.dense(output)
return output
model = Net()
optimizer = Adam(learning_rate)
loss_fn = MeanSquaredError()
################################################################################
# Fit model
################################################################################
@tf.function(input_signature=loader_tr.tf_signature(), experimental_relax_shapes=True)
def train_step(inputs, target):
with tf.GradientTape() as tape:
predictions = model(inputs, training=True)
loss = loss_fn(target, predictions) + sum(model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss
step = loss = 0
for batch in loader_tr:
step += 1
loss += train_step(*batch)
if step == loader_tr.steps_per_epoch:
step = 0
print("Loss: {}".format(loss / loader_tr.steps_per_epoch))
loss = 0
################################################################################
# Evaluate model
################################################################################
print("Testing model")
loss = 0
for batch in loader_te:
inputs, target = batch
predictions = model(inputs, training=False)
loss += loss_fn(target, predictions)
loss /= loader_te.steps_per_epoch
print("Done. Test loss: {}".format(loss))
| 3,524 | 33.223301 | 86 | py |
spektral | spektral-master/examples/node_prediction/citation_gat_custom.py | """
This script is an extension of the citation_gcn_custom.py script.
It shows how to train GAT (with the same experimental setting of the original
paper), using faster training and test functions.
"""
import tensorflow as tf
from tensorflow.keras.layers import Dropout, Input
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.metrics import categorical_accuracy
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2
from spektral.datasets.citation import Cora
from spektral.layers import GATConv
from spektral.transforms import AdjToSpTensor, LayerPreprocess
from spektral.utils import tic, toc
tf.random.set_seed(0)
# Load data
dataset = Cora(normalize_x=True, transforms=[LayerPreprocess(GATConv), AdjToSpTensor()])
graph = dataset[0]
x, a, y = graph.x, graph.a, graph.y
mask_tr, mask_va, mask_te = dataset.mask_tr, dataset.mask_va, dataset.mask_te
l2_reg = 2.5e-4
# Define model
x_in = Input(shape=(dataset.n_node_features,))
a_in = Input(shape=(None,), sparse=True)
x_1 = Dropout(0.6)(x_in)
x_1 = GATConv(
8,
attn_heads=8,
concat_heads=True,
dropout_rate=0.6,
activation="elu",
kernel_regularizer=l2(l2_reg),
attn_kernel_regularizer=l2(l2_reg),
bias_regularizer=l2(l2_reg),
)([x_1, a_in])
x_2 = Dropout(0.6)(x_1)
x_2 = GATConv(
dataset.n_labels,
attn_heads=1,
concat_heads=False,
dropout_rate=0.6,
activation="softmax",
kernel_regularizer=l2(l2_reg),
attn_kernel_regularizer=l2(l2_reg),
bias_regularizer=l2(l2_reg),
)([x_2, a_in])
# Build model
model = Model(inputs=[x_in, a_in], outputs=x_2)
optimizer = Adam(learning_rate=5e-3)
loss_fn = CategoricalCrossentropy()
# Training step
@tf.function
def train():
with tf.GradientTape() as tape:
predictions = model([x, a], training=True)
loss = loss_fn(y[mask_tr], predictions[mask_tr])
loss += sum(model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss
@tf.function
def evaluate():
predictions = model([x, a], training=False)
losses = []
accuracies = []
for mask in [mask_tr, mask_va, mask_te]:
loss = loss_fn(y[mask], predictions[mask])
loss += sum(model.losses)
losses.append(loss)
acc = tf.reduce_mean(categorical_accuracy(y[mask], predictions[mask]))
accuracies.append(acc)
return losses, accuracies
best_val_loss = 99999
best_test_acc = 0
current_patience = patience = 100
epochs = 999999
tic()
for epoch in range(1, epochs + 1):
train()
l, a = evaluate()
print(
"Loss tr: {:.4f}, Acc tr: {:.4f}, "
"Loss va: {:.4f}, Acc va: {:.4f}, "
"Loss te: {:.4f}, Acc te: {:.4f}".format(l[0], a[0], l[1], a[1], l[2], a[2])
)
if l[1] < best_val_loss:
best_val_loss = l[1]
best_test_acc = a[2]
current_patience = patience
print("Improved")
else:
current_patience -= 1
if current_patience == 0:
print("Test accuracy: {}".format(best_test_acc))
break
toc("GAT ({} epochs)".format(epoch))
| 3,234 | 28.144144 | 88 | py |
spektral | spektral-master/examples/node_prediction/citation_gcn.py | """
This example implements the experiments on citation networks from the paper:
Semi-Supervised Classification with Graph Convolutional Networks (https://arxiv.org/abs/1609.02907)
Thomas N. Kipf, Max Welling
"""
import numpy as np
import tensorflow as tf
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.optimizers import Adam
from spektral.data.loaders import SingleLoader
from spektral.datasets.citation import Citation
from spektral.layers import GCNConv
from spektral.models.gcn import GCN
from spektral.transforms import LayerPreprocess
learning_rate = 1e-2
seed = 0
epochs = 200
patience = 10
data = "cora"
tf.random.set_seed(seed=seed) # make weight initialization reproducible
# Load data
dataset = Citation(data, normalize_x=True, transforms=[LayerPreprocess(GCNConv)])
# We convert the binary masks to sample weights so that we can compute the
# average loss over the nodes (following original implementation by
# Kipf & Welling)
def mask_to_weights(mask):
return mask.astype(np.float32) / np.count_nonzero(mask)
weights_tr, weights_va, weights_te = (
mask_to_weights(mask)
for mask in (dataset.mask_tr, dataset.mask_va, dataset.mask_te)
)
model = GCN(n_labels=dataset.n_labels)
model.compile(
optimizer=Adam(learning_rate),
loss=CategoricalCrossentropy(reduction="sum"),
weighted_metrics=["acc"],
)
# Train model
loader_tr = SingleLoader(dataset, sample_weights=weights_tr)
loader_va = SingleLoader(dataset, sample_weights=weights_va)
model.fit(
loader_tr.load(),
steps_per_epoch=loader_tr.steps_per_epoch,
validation_data=loader_va.load(),
validation_steps=loader_va.steps_per_epoch,
epochs=epochs,
callbacks=[EarlyStopping(patience=patience, restore_best_weights=True)],
)
# Evaluate model
print("Evaluating model.")
loader_te = SingleLoader(dataset, sample_weights=weights_te)
eval_results = model.evaluate(loader_te.load(), steps=loader_te.steps_per_epoch)
print("Done.\n" "Test loss: {}\n" "Test accuracy: {}".format(*eval_results))
| 2,097 | 30.313433 | 99 | py |
spektral | spektral-master/examples/node_prediction/citation_cheby.py | """
This example implements the experiments on citation networks from the paper:
Semi-Supervised Classification with Graph Convolutional Networks (https://arxiv.org/abs/1609.02907)
Thomas N. Kipf, Max Welling
using the convolutional layers described in:
Convolutional Neural Networks on Graphs with Fast Localized Spectral Filtering (https://arxiv.org/abs/1606.09375)
Michaël Defferrard, Xavier Bresson, Pierre Vandergheynst
"""
import numpy as np
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import Dropout, Input
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2
from spektral.data.loaders import SingleLoader
from spektral.datasets.citation import Citation
from spektral.layers import ChebConv
from spektral.transforms import LayerPreprocess
# Load data
dataset = Citation("cora", transforms=[LayerPreprocess(ChebConv)])
# We convert the binary masks to sample weights so that we can compute the
# average loss over the nodes (following original implementation by
# Kipf & Welling)
def mask_to_weights(mask):
return mask / np.count_nonzero(mask)
weights_tr, weights_va, weights_te = (
mask_to_weights(mask)
for mask in (dataset.mask_tr, dataset.mask_va, dataset.mask_te)
)
# Parameters
channels = 16 # Number of channels in the first layer
K = 2 # Max degree of the Chebyshev polynomials
dropout = 0.5 # Dropout rate for the features
l2_reg = 2.5e-4 # L2 regularization rate
learning_rate = 1e-2 # Learning rate
epochs = 200 # Number of training epochs
patience = 10 # Patience for early stopping
a_dtype = dataset[0].a.dtype # Only needed for TF 2.1
N = dataset.n_nodes # Number of nodes in the graph
F = dataset.n_node_features # Original size of node features
n_out = dataset.n_labels # Number of classes
# Model definition
x_in = Input(shape=(F,))
a_in = Input((N,), sparse=True, dtype=a_dtype)
do_1 = Dropout(dropout)(x_in)
gc_1 = ChebConv(
channels, K=K, activation="relu", kernel_regularizer=l2(l2_reg), use_bias=False
)([do_1, a_in])
do_2 = Dropout(dropout)(gc_1)
gc_2 = ChebConv(n_out, K=K, activation="softmax", use_bias=False)([do_2, a_in])
# Build model
model = Model(inputs=[x_in, a_in], outputs=gc_2)
optimizer = Adam(learning_rate=learning_rate)
model.compile(
optimizer=optimizer,
loss=CategoricalCrossentropy(reduction="sum"), # To compute mean
weighted_metrics=["acc"],
)
model.summary()
# Train model
loader_tr = SingleLoader(dataset, sample_weights=weights_tr)
loader_va = SingleLoader(dataset, sample_weights=weights_va)
model.fit(
loader_tr.load(),
steps_per_epoch=loader_tr.steps_per_epoch,
validation_data=loader_va.load(),
validation_steps=loader_va.steps_per_epoch,
epochs=epochs,
callbacks=[EarlyStopping(patience=patience, restore_best_weights=True)],
)
# Evaluate model
print("Evaluating model.")
loader_te = SingleLoader(dataset, sample_weights=weights_te)
eval_results = model.evaluate(loader_te.load(), steps=loader_te.steps_per_epoch)
print("Done.\n" "Test loss: {}\n" "Test accuracy: {}".format(*eval_results))
| 3,207 | 33.494624 | 113 | py |
spektral | spektral-master/examples/node_prediction/citation_arma.py | """
This example implements the experiments on citation networks from the paper:
Graph Neural Networks with convolutional ARMA filters (https://arxiv.org/abs/1901.01343)
Filippo Maria Bianchi, Daniele Grattarola, Cesare Alippi, Lorenzo Livi
"""
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import Dropout, Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2
from spektral.data.loaders import SingleLoader
from spektral.datasets.citation import Citation
from spektral.layers import ARMAConv
from spektral.transforms import LayerPreprocess
# Load data
dataset = Citation("cora", transforms=[LayerPreprocess(ARMAConv)])
mask_tr, mask_va, mask_te = dataset.mask_tr, dataset.mask_va, dataset.mask_te
# Parameters
channels = 16 # Number of channels in the first layer
iterations = 1 # Number of iterations to approximate each ARMA(1)
order = 2 # Order of the ARMA filter (number of parallel stacks)
share_weights = True # Share weights in each ARMA stack
dropout_skip = 0.75 # Dropout rate for the internal skip connection of ARMA
dropout = 0.5 # Dropout rate for the features
l2_reg = 5e-5 # L2 regularization rate
learning_rate = 1e-2 # Learning rate
epochs = 20000 # Number of training epochs
patience = 100 # Patience for early stopping
a_dtype = dataset[0].a.dtype # Only needed for TF 2.1
N = dataset.n_nodes # Number of nodes in the graph
F = dataset.n_node_features # Original size of node features
n_out = dataset.n_labels # Number of classes
# Model definition
x_in = Input(shape=(F,))
a_in = Input((N,), sparse=True, dtype=a_dtype)
gc_1 = ARMAConv(
channels,
iterations=iterations,
order=order,
share_weights=share_weights,
dropout_rate=dropout_skip,
activation="elu",
gcn_activation="elu",
kernel_regularizer=l2(l2_reg),
)([x_in, a_in])
gc_2 = Dropout(dropout)(gc_1)
gc_2 = ARMAConv(
n_out,
iterations=1,
order=1,
share_weights=share_weights,
dropout_rate=dropout_skip,
activation="softmax",
gcn_activation=None,
kernel_regularizer=l2(l2_reg),
)([gc_2, a_in])
# Build model
model = Model(inputs=[x_in, a_in], outputs=gc_2)
optimizer = Adam(learning_rate=learning_rate)
model.compile(
optimizer=optimizer, loss="categorical_crossentropy", weighted_metrics=["acc"]
)
model.summary()
# Train model
loader_tr = SingleLoader(dataset, sample_weights=mask_tr)
loader_va = SingleLoader(dataset, sample_weights=mask_va)
model.fit(
loader_tr.load(),
steps_per_epoch=loader_tr.steps_per_epoch,
validation_data=loader_va.load(),
validation_steps=loader_va.steps_per_epoch,
epochs=epochs,
callbacks=[EarlyStopping(patience=patience, restore_best_weights=True)],
)
# Evaluate model
print("Evaluating model.")
loader_te = SingleLoader(dataset, sample_weights=mask_te)
eval_results = model.evaluate(loader_te.load(), steps=loader_te.steps_per_epoch)
print("Done.\n" "Test loss: {}\n" "Test accuracy: {}".format(*eval_results))
| 3,057 | 32.604396 | 88 | py |
spektral | spektral-master/examples/node_prediction/citation_gcn_custom.py | """
This script is a proof of concept to train GCN as fast as possible and with as
little lines of code as possible.
It uses a custom training function instead of the standard Keras fit(), and
can train GCN for 200 epochs in a few tenths of a second (~0.20 on a GTX 1050).
"""
import tensorflow as tf
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.optimizers import Adam
from spektral.datasets.citation import Cora
from spektral.layers import GCNConv
from spektral.models.gcn import GCN
from spektral.transforms import AdjToSpTensor, LayerPreprocess
from spektral.utils import tic, toc
tf.random.set_seed(seed=0) # make weight initialization reproducible
# Load data
dataset = Cora(normalize_x=True, transforms=[LayerPreprocess(GCNConv), AdjToSpTensor()])
graph = dataset[0]
x, a, y = graph.x, graph.a, graph.y
mask_tr, mask_va, mask_te = dataset.mask_tr, dataset.mask_va, dataset.mask_te
model = GCN(n_labels=dataset.n_labels)
optimizer = Adam(learning_rate=1e-2)
loss_fn = CategoricalCrossentropy()
# Training step
@tf.function
def train():
with tf.GradientTape() as tape:
predictions = model([x, a], training=True)
loss = loss_fn(y[mask_tr], predictions[mask_tr])
loss += sum(model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss
# Time the execution of 200 epochs of training
train() # Warm up to ignore tracing times when timing
tic()
for epoch in range(1, 201):
loss = train()
toc("Spektral - GCN (200 epochs)")
print(f"Final loss = {loss}")
| 1,637 | 32.428571 | 88 | py |
spektral | spektral-master/examples/node_prediction/citation_simple_gc.py | """
This example implements the experiments on citation networks from the paper:
Simplifying Graph Convolutional Networks (https://arxiv.org/abs/1902.07153)
Felix Wu, Tianyi Zhang, Amauri Holanda de Souza Jr., Christopher Fifty, Tao Yu, Kilian Q. Weinberger
To implement it, we define a custom transform for the adjacency matrix. A
transform is simply a callable object that takes a Graph as input and returns
a Graph.
"""
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2
from spektral.data.loaders import SingleLoader
from spektral.datasets.citation import Citation
from spektral.layers import GCNConv
from spektral.transforms import LayerPreprocess
class SGCN:
def __init__(self, K):
self.K = K
def __call__(self, graph):
out = graph.a
for _ in range(self.K - 1):
out = out.dot(out)
out.sort_indices()
graph.a = out
return graph
# Load data
K = 2 # Propagation steps for SGCN
dataset = Citation("cora", transforms=[LayerPreprocess(GCNConv), SGCN(K)])
mask_tr, mask_va, mask_te = dataset.mask_tr, dataset.mask_va, dataset.mask_te
# Parameters
l2_reg = 5e-6 # L2 regularization rate
learning_rate = 0.2 # Learning rate
epochs = 20000 # Number of training epochs
patience = 200 # Patience for early stopping
a_dtype = dataset[0].a.dtype # Only needed for TF 2.1
N = dataset.n_nodes # Number of nodes in the graph
F = dataset.n_node_features # Original size of node features
n_out = dataset.n_labels # Number of classes
# Model definition
x_in = Input(shape=(F,))
a_in = Input((N,), sparse=True, dtype=a_dtype)
output = GCNConv(
n_out, activation="softmax", kernel_regularizer=l2(l2_reg), use_bias=False
)([x_in, a_in])
# Build model
model = Model(inputs=[x_in, a_in], outputs=output)
optimizer = Adam(learning_rate=learning_rate)
model.compile(
optimizer=optimizer, loss="categorical_crossentropy", weighted_metrics=["acc"]
)
model.summary()
# Train model
loader_tr = SingleLoader(dataset, sample_weights=mask_tr)
loader_va = SingleLoader(dataset, sample_weights=mask_va)
model.fit(
loader_tr.load(),
steps_per_epoch=loader_tr.steps_per_epoch,
validation_data=loader_va.load(),
validation_steps=loader_va.steps_per_epoch,
epochs=epochs,
callbacks=[EarlyStopping(patience=patience, restore_best_weights=True)],
)
# Evaluate model
print("Evaluating model.")
loader_te = SingleLoader(dataset, sample_weights=mask_te)
eval_results = model.evaluate(loader_te.load(), steps=loader_te.steps_per_epoch)
print("Done.\n" "Test loss: {}\n" "Test accuracy: {}".format(*eval_results))
| 2,774 | 31.267442 | 100 | py |
spektral | spektral-master/examples/node_prediction/ogbn-arxiv_gcn.py | """
This example implements the same GCN example for node classification provided
with the [Open Graph Benchmark](https://ogb.stanford.edu).
See https://github.com/snap-stanford/ogb/blob/master/examples/nodeproppred/arxiv/gnn.py
for the reference implementation.
"""
import numpy as np
import tensorflow as tf
from ogb.nodeproppred import Evaluator, NodePropPredDataset
from tensorflow.keras.layers import BatchNormalization, Dropout, Input
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from spektral.datasets.ogb import OGB
from spektral.layers import GCNConv
from spektral.transforms import AdjToSpTensor, GCNFilter
# Load data
dataset_name = "ogbn-arxiv"
ogb_dataset = NodePropPredDataset(dataset_name)
dataset = OGB(ogb_dataset, transforms=[GCNFilter(), AdjToSpTensor()])
graph = dataset[0]
x, adj, y = graph.x, graph.a, graph.y
# Parameters
channels = 256 # Number of channels for GCN layers
dropout = 0.5 # Dropout rate for the features
learning_rate = 1e-2 # Learning rate
epochs = 200 # Number of training epochs
N = dataset.n_nodes # Number of nodes in the graph
F = dataset.n_node_features # Original size of node features
n_out = ogb_dataset.num_classes # OGB labels are sparse indices
# Data splits
idx = ogb_dataset.get_idx_split()
idx_tr, idx_va, idx_te = idx["train"], idx["valid"], idx["test"]
mask_tr = np.zeros(N, dtype=bool)
mask_va = np.zeros(N, dtype=bool)
mask_te = np.zeros(N, dtype=bool)
mask_tr[idx_tr] = True
mask_va[idx_va] = True
mask_te[idx_te] = True
masks = [mask_tr, mask_va, mask_te]
# Model definition
x_in = Input(shape=(F,))
a_in = Input((N,), sparse=True)
x_1 = GCNConv(channels, activation="relu")([x_in, a_in])
x_1 = BatchNormalization()(x_1)
x_1 = Dropout(dropout)(x_1)
x_2 = GCNConv(channels, activation="relu")([x_1, a_in])
x_2 = BatchNormalization()(x_2)
x_2 = Dropout(dropout)(x_2)
x_3 = GCNConv(n_out, activation="softmax")([x_2, a_in])
# Build model
model = Model(inputs=[x_in, a_in], outputs=x_3)
optimizer = Adam(learning_rate=learning_rate)
loss_fn = SparseCategoricalCrossentropy()
model.summary()
# Training function
@tf.function
def train(inputs, target, mask):
with tf.GradientTape() as tape:
predictions = model(inputs, training=True)
loss = loss_fn(target[mask], predictions[mask]) + sum(model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss
# Evaluation with OGB
evaluator = Evaluator(dataset_name)
def evaluate(x, a, y, model, masks, evaluator):
p = model([x, a], training=False)
p = p.numpy().argmax(-1)[:, None]
tr_mask, va_mask, te_mask = masks
tr_auc = evaluator.eval({"y_true": y[tr_mask], "y_pred": p[tr_mask]})["acc"]
va_auc = evaluator.eval({"y_true": y[va_mask], "y_pred": p[va_mask]})["acc"]
te_auc = evaluator.eval({"y_true": y[te_mask], "y_pred": p[te_mask]})["acc"]
return tr_auc, va_auc, te_auc
# Train model
for i in range(1, 1 + epochs):
tr_loss = train([x, adj], y, mask_tr)
tr_acc, va_acc, te_acc = evaluate(x, adj, y, model, masks, evaluator)
print(
"Ep. {} - Loss: {:.3f} - Acc: {:.3f} - Val acc: {:.3f} - Test acc: "
"{:.3f}".format(i, tr_loss, tr_acc, va_acc, te_acc)
)
# Evaluate model
print("Evaluating model.")
tr_acc, va_acc, te_acc = evaluate(x, adj, y, model, masks, evaluator)
print("Done! - Test acc: {:.3f}".format(te_acc))
| 3,535 | 33 | 87 | py |
spektral | spektral-master/examples/node_prediction/citation_gat.py | """
This example implements the experiments on citation networks from the paper:
Graph Attention Networks (https://arxiv.org/abs/1710.10903)
Petar Veličković, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Liò, Yoshua Bengio
"""
import numpy as np
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import Dropout, Input
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2
from tensorflow.random import set_seed
from spektral.data.loaders import SingleLoader
from spektral.datasets.citation import Citation
from spektral.layers import GATConv
from spektral.transforms import LayerPreprocess
set_seed(0)
# Load data
dataset = Citation("cora", normalize_x=True, transforms=[LayerPreprocess(GATConv)])
def mask_to_weights(mask):
return mask.astype(np.float32) / np.count_nonzero(mask)
weights_tr, weights_va, weights_te = (
mask_to_weights(mask)
for mask in (dataset.mask_tr, dataset.mask_va, dataset.mask_te)
)
# Parameters
channels = 8 # Number of channels in each head of the first GAT layer
n_attn_heads = 8 # Number of attention heads in first GAT layer
dropout = 0.6 # Dropout rate for the features and adjacency matrix
l2_reg = 2.5e-4 # L2 regularization rate
learning_rate = 5e-3 # Learning rate
epochs = 20000 # Number of training epochs
patience = 100 # Patience for early stopping
N = dataset.n_nodes # Number of nodes in the graph
F = dataset.n_node_features # Original size of node features
n_out = dataset.n_labels # Number of classes
# Model definition
x_in = Input(shape=(F,))
a_in = Input((N,), sparse=True)
do_1 = Dropout(dropout)(x_in)
gc_1 = GATConv(
channels,
attn_heads=n_attn_heads,
concat_heads=True,
dropout_rate=dropout,
activation="elu",
kernel_regularizer=l2(l2_reg),
attn_kernel_regularizer=l2(l2_reg),
bias_regularizer=l2(l2_reg),
)([do_1, a_in])
do_2 = Dropout(dropout)(gc_1)
gc_2 = GATConv(
n_out,
attn_heads=1,
concat_heads=False,
dropout_rate=dropout,
activation="softmax",
kernel_regularizer=l2(l2_reg),
attn_kernel_regularizer=l2(l2_reg),
bias_regularizer=l2(l2_reg),
)([do_2, a_in])
# Build model
model = Model(inputs=[x_in, a_in], outputs=gc_2)
optimizer = Adam(learning_rate=learning_rate)
model.compile(
optimizer=optimizer,
loss=CategoricalCrossentropy(reduction="sum"),
weighted_metrics=["acc"],
)
model.summary()
# Train model
loader_tr = SingleLoader(dataset, sample_weights=weights_tr)
loader_va = SingleLoader(dataset, sample_weights=weights_va)
model.fit(
loader_tr.load(),
steps_per_epoch=loader_tr.steps_per_epoch,
validation_data=loader_va.load(),
validation_steps=loader_va.steps_per_epoch,
epochs=epochs,
callbacks=[EarlyStopping(patience=patience, restore_best_weights=True)],
)
# Evaluate model
print("Evaluating model.")
loader_te = SingleLoader(dataset, sample_weights=weights_te)
eval_results = model.evaluate(loader_te.load(), steps=loader_te.steps_per_epoch)
print("Done.\n" "Test loss: {}\n" "Test accuracy: {}".format(*eval_results))
| 3,212 | 30.194175 | 95 | py |
spektral | spektral-master/tests/test_layers/pooling/core.py | import numpy as np
import scipy.sparse as sp
import tensorflow as tf
from tensorflow.keras import Input, Model
from spektral.utils.sparse import sp_matrix_to_sp_tensor
from tests.test_layers.convolutional.core import _test_get_config
tf.keras.backend.set_floatx("float64")
MODES = {
"SINGLE": 0,
"BATCH": 1,
"DISJOINT": 2,
}
batch_size = 3
N1, N2, N3 = 4, 5, 2
N = N1 + N2 + N3
F = 7
def _check_output_and_model_output_shapes(true_shape, model_shape):
assert len(true_shape) == len(model_shape)
for i in range(len(true_shape)):
assert len(true_shape[i]) == len(model_shape[i])
for j in range(len(true_shape[i])):
assert model_shape[i][j] in {true_shape[i][j], None}
def _check_number_of_nodes(N_pool_expected, N_pool_true):
assert N_pool_expected == N_pool_true or N_pool_true is None
def _test_single_mode(layer, sparse=False, **kwargs):
A = np.ones((N, N))
X = np.random.normal(size=(N, F))
A_in = Input(shape=(None,), sparse=sparse)
X_in = Input(shape=(F,))
inputs = [X_in, A_in]
if sparse:
input_data = [X, sp_matrix_to_sp_tensor(A)]
else:
input_data = [X, A]
layer_instance = layer(**kwargs)
output = layer_instance(inputs)
model = Model(inputs, output)
output = model(input_data)
X_pool, A_pool, mask = output
if "ratio" in kwargs.keys():
N_exp = kwargs["ratio"] * N
N_pool_expected = int(np.ceil(N_exp))
elif "k" in kwargs.keys():
N_pool_expected = int(kwargs["k"])
else:
N_pool_expected = None
N_pool_true = A_pool.shape[-1]
assert N_pool_true > 0
if N_pool_expected is not None:
_check_number_of_nodes(N_pool_expected, N_pool_true)
assert X_pool.shape == (N_pool_expected, F)
assert A_pool.shape == (N_pool_expected, N_pool_expected)
output_shape = [o.shape for o in output]
_check_output_and_model_output_shapes(output_shape, model.output_shape)
def _test_batch_mode(layer, **kwargs):
A_batch = np.ones((batch_size, N, N))
X_batch = np.random.normal(size=(batch_size, N, F))
A_in = Input(shape=(N, N))
X_in = Input(shape=(N, F))
inputs = [X_in, A_in]
input_data = [X_batch, A_batch]
layer_instance = layer(**kwargs)
output = layer_instance(inputs)
model = Model(inputs, output)
output = model(input_data)
X_pool, A_pool, mask = output
if "ratio" in kwargs.keys():
N_exp = kwargs["ratio"] * N
N_pool_expected = int(np.ceil(N_exp))
elif "k" in kwargs.keys():
N_pool_expected = int(kwargs["k"])
else:
N_pool_expected = None
N_pool_true = A_pool.shape[-1]
if N_pool_expected is not None:
_check_number_of_nodes(N_pool_expected, N_pool_true)
assert X_pool.shape == (batch_size, N_pool_expected, F)
assert A_pool.shape == (batch_size, N_pool_expected, N_pool_expected)
output_shape = [o.shape for o in output]
_check_output_and_model_output_shapes(output_shape, model.output_shape)
def _test_disjoint_mode(layer, sparse=False, **kwargs):
A = sp.block_diag(
[np.ones((N1, N1)), np.ones((N2, N2)), np.ones((N3, N3))]
).todense()
X = np.random.normal(size=(N, F))
I = np.array([0] * N1 + [1] * N2 + [2] * N3).astype(int)
A_in = Input(shape=(None,), sparse=sparse)
X_in = Input(shape=(F,))
I_in = Input(shape=(), dtype=tf.int32)
inputs = [X_in, A_in, I_in]
if sparse:
input_data = [X, sp_matrix_to_sp_tensor(A), I]
else:
input_data = [X, A, I]
layer_instance = layer(**kwargs)
output = layer_instance(inputs)
model = Model(inputs, output)
output = model(input_data)
X_pool, A_pool, I_pool, s = output
if "ratio" in kwargs.keys():
N_pool_expected = int(
np.ceil(kwargs["ratio"] * N1)
+ np.ceil(kwargs["ratio"] * N2)
+ np.ceil(kwargs["ratio"] * N3)
)
elif "k" in kwargs.keys():
N_pool_expected = int(kwargs["k"])
else:
N_pool_expected = None
N_pool_true = A_pool.shape[0]
if N_pool_expected is not None:
_check_number_of_nodes(N_pool_expected, N_pool_true)
assert X_pool.shape == (N_pool_expected, F)
assert A_pool.shape == (N_pool_expected, N_pool_expected)
assert I_pool.shape == (N_pool_expected,)
output_shape = [o.shape for o in output]
_check_output_and_model_output_shapes(output_shape, model.output_shape)
def run_layer(config):
for mode in config["modes"]:
if mode == MODES["SINGLE"]:
if config["dense"]:
_test_single_mode(config["layer"], **config["kwargs"])
if config["sparse"]:
_test_single_mode(config["layer"], sparse=True, **config["kwargs"])
elif mode == MODES["BATCH"]:
_test_batch_mode(config["layer"], **config["kwargs"])
elif mode == MODES["DISJOINT"]:
if config["dense"]:
_test_disjoint_mode(config["layer"], **config["kwargs"])
if config["sparse"]:
_test_disjoint_mode(config["layer"], sparse=True, **config["kwargs"])
_test_get_config(config["layer"], **config["kwargs"])
| 5,234 | 29.086207 | 85 | py |
spektral | spektral-master/tests/test_layers/pooling/test_global_pooling.py | import numpy as np
import tensorflow as tf
from tensorflow.keras import Input, Model
from spektral.layers import (
GlobalAttentionPool,
GlobalAttnSumPool,
GlobalAvgPool,
GlobalMaxPool,
GlobalSumPool,
SortPool,
)
from tests.test_layers.convolutional.core import _test_get_config
tf.keras.backend.set_floatx("float64")
batch_size = 32
N = 11
F = 7
def _check_output_and_model_output_shapes(true_shape, model_shape):
assert len(true_shape) == len(model_shape)
for i in range(len(true_shape)):
if true_shape[i] == N:
assert model_shape[i] in {N, None}
elif true_shape[i] == batch_size:
assert model_shape[i] in {batch_size, None}
else:
assert model_shape[i] == true_shape[i]
def _test_single_mode(layer, **kwargs):
X = np.random.normal(size=(N, F))
if "target_shape" in kwargs:
target_output_shape = kwargs.pop("target_shape")
else:
target_output_shape = (1, kwargs.get("channels", F))
X_in = Input(shape=(F,))
layer_instance = layer(**kwargs)
output = layer_instance(X_in)
model = Model(X_in, output)
output = model(X)
assert output.shape == target_output_shape
assert output.shape == layer_instance.compute_output_shape(X.shape)
_check_output_and_model_output_shapes(output.shape, model.output_shape)
def _test_batch_mode(layer, **kwargs):
X = np.random.normal(size=(batch_size, N, F))
if "target_shape" in kwargs:
target_output_shape = kwargs.pop("target_shape")
else:
target_output_shape = (batch_size, kwargs.get("channels", F))
X_in = Input(shape=(N, F))
layer_instance = layer(**kwargs)
output = layer_instance(X_in)
model = Model(X_in, output)
output = model(X)
assert output.shape == target_output_shape
assert output.shape == layer_instance.compute_output_shape(X.shape)
_check_output_and_model_output_shapes(output.shape, model.output_shape)
def _test_disjoint_mode(layer, **kwargs):
X = np.random.normal(size=(batch_size * N, F))
I = np.repeat(np.arange(batch_size), N).astype(int)
if "target_shape" in kwargs:
target_output_shape = kwargs.pop("target_shape")
else:
target_output_shape = (batch_size, kwargs.get("channels", F))
X_in = Input(shape=(F,))
I_in = Input(shape=(), dtype=I.dtype)
layer_instance = layer(**kwargs)
output = layer_instance([X_in, I_in])
model = Model([X_in, I_in], output)
output = model([X, I])
assert output.shape == target_output_shape
assert (
output.shape[1:] == layer_instance.compute_output_shape([X.shape, I.shape])[1:]
)
_check_output_and_model_output_shapes(output.shape, model.output_shape)
def test_global_sum_pool():
_test_single_mode(GlobalSumPool)
_test_batch_mode(GlobalSumPool)
_test_disjoint_mode(GlobalSumPool)
_test_get_config(GlobalSumPool)
def test_global_avg_pool():
_test_single_mode(GlobalAvgPool)
_test_batch_mode(GlobalAvgPool)
_test_disjoint_mode(GlobalAvgPool)
_test_get_config(GlobalAvgPool)
def test_global_max_pool():
_test_single_mode(GlobalMaxPool)
_test_batch_mode(GlobalMaxPool)
_test_disjoint_mode(GlobalMaxPool)
_test_get_config(GlobalMaxPool)
def test_global_node_attention_pool():
_test_single_mode(GlobalAttnSumPool)
_test_batch_mode(GlobalAttnSumPool)
_test_disjoint_mode(GlobalAttnSumPool)
_test_get_config(GlobalAttnSumPool)
def test_global_attention_pool():
F_ = 10
assert F_ != F
_test_single_mode(GlobalAttentionPool, channels=F_)
_test_batch_mode(GlobalAttentionPool, channels=F_)
_test_disjoint_mode(GlobalAttentionPool, channels=F_)
_test_get_config(GlobalAttentionPool, channels=F_)
def test_sort_pool():
k = 6
_test_single_mode(SortPool, k=k, target_shape=(k, F))
_test_batch_mode(SortPool, k=k, target_shape=(batch_size, k, F))
_test_disjoint_mode(SortPool, k=k, target_shape=(batch_size, k, F))
_test_get_config(SortPool, k=k)
def test_sort_pool_padded():
k = N + 2
_test_single_mode(SortPool, k=k, target_shape=(k, F))
_test_batch_mode(SortPool, k=k, target_shape=(batch_size, k, F))
_test_disjoint_mode(SortPool, k=k, target_shape=(batch_size, k, F))
_test_get_config(SortPool, k=k)
| 4,319 | 30.304348 | 87 | py |
spektral | spektral-master/tests/test_layers/convolutional/test_censnet_conv.py | import enum
import networkx as nx
import numpy as np
import pytest
from core import A, F, S, batch_size
from tensorflow.keras import Input, Model
from spektral.layers import CensNetConv
NODE_CHANNELS = 8
"""
Number of node output channels to use for testing.
"""
EDGE_CHANNELS = 10
"""
Number of edge output channels to use for testing.
"""
@enum.unique
class Modes(enum.IntEnum):
"""
Represents the data modes to use for testing.
"""
SINGLE = enum.auto()
BATCH = enum.auto()
MIXED = enum.auto()
@pytest.fixture()
def random_graph_descriptors():
"""
Creates a random graph to use, and computes its various descriptors.
:return: The normalized graph laplacian, the normalized line graph
laplacian, and the incidence matrix.
"""
graph = nx.dense_gnm_random_graph(11, 50, seed=1337)
line_graph = nx.line_graph(graph)
node_laplacian = np.array(nx.normalized_laplacian_matrix(graph).todense())
edge_laplacian = np.array(nx.normalized_laplacian_matrix(line_graph).todense())
incidence = np.array(nx.incidence_matrix(graph).todense())
return node_laplacian, edge_laplacian, incidence
@pytest.mark.parametrize("mode", [Modes.SINGLE, Modes.BATCH, Modes.MIXED])
def test_smoke(random_graph_descriptors, mode):
"""
Tests that we can create a model with the layer, and it processes
input data without crashing.
:param random_graph_descriptors: Descriptors for the graph to use when
testing.
:param mode: The data mode to use for this test.
"""
# Arrange.
node_laplacian, edge_laplacian, incidence = random_graph_descriptors
# Create node and edge features.
node_feature_shape = (node_laplacian.shape[0], F)
edge_feature_shape = (edge_laplacian.shape[0], S)
if mode != Modes.SINGLE:
# Add batch dimensions.
node_feature_shape = (batch_size,) + node_feature_shape
edge_feature_shape = (batch_size,) + edge_feature_shape
if mode == Modes.BATCH:
node_laplacian = np.stack([node_laplacian] * batch_size)
edge_laplacian = np.stack([edge_laplacian] * batch_size)
incidence = np.stack([incidence] * batch_size)
node_features = np.random.normal(size=node_feature_shape)
edge_features = np.random.normal(size=edge_feature_shape)
# Create the model.
node_input = Input(shape=node_features.shape[1:])
laplacian_input = Input(shape=node_laplacian.shape[1:])
edge_laplacian_input = Input(shape=edge_laplacian.shape[1:])
incidence_input = Input(shape=incidence.shape[1:])
edge_input = Input(shape=edge_features.shape[1:])
next_nodes, next_edges = CensNetConv(
NODE_CHANNELS, EDGE_CHANNELS, activation="relu"
)(
(
node_input,
(laplacian_input, edge_laplacian_input, incidence_input),
edge_input,
)
)
model = Model(
inputs=(
node_input,
edge_input,
laplacian_input,
edge_laplacian_input,
incidence_input,
),
outputs=(next_nodes, next_edges),
)
# Act.
# Run the model.
got_next_nodes, got_next_edges = model(
[node_features, edge_features, node_laplacian, edge_laplacian, incidence]
)
# Assert.
# Make sure that the output shapes are correct.
got_node_shape = got_next_nodes.numpy().shape
got_edge_shape = got_next_edges.numpy().shape
assert got_node_shape == node_features.shape[:-1] + (NODE_CHANNELS,)
assert got_edge_shape == edge_features.shape[:-1] + (EDGE_CHANNELS,)
@pytest.mark.parametrize("mode", [Modes.SINGLE, Modes.BATCH, Modes.MIXED])
@pytest.mark.parametrize(("num_nodes", "num_edges"), [(5, 5), (4, 10), (1, 1)])
def test_output_shape(mode, num_nodes, num_edges):
"""
Tests that we can compute the output shape correctly.
:param mode: The data mode to use for this test.
:param num_nodes: The number of nodes to use for the input.
:param num_edges: The number of edges to use for the input.
"""
# Arrange.
# Create valid-looking input shapes.
node_features_shape = (num_nodes, F)
edge_features_shape = (num_edges, S)
node_laplacian_shape = (num_nodes, num_nodes)
edge_laplacian_shape = (num_edges, num_edges)
incidence_shape = (num_nodes, num_edges)
if mode != Modes.SINGLE:
node_features_shape = (batch_size,) + node_features_shape
edge_features_shape = (batch_size,) + edge_features_shape
if mode == Modes.BATCH:
node_laplacian_shape = (batch_size,) + node_laplacian_shape
edge_laplacian_shape = (batch_size,) + edge_laplacian_shape
incidence_shape = (batch_size,) + incidence_shape
input_shape = (
node_features_shape,
(node_laplacian_shape, edge_laplacian_shape, incidence_shape),
edge_features_shape,
)
# Create the layer.
layer = CensNetConv(NODE_CHANNELS, EDGE_CHANNELS)
# Act.
got_output_shape = layer.compute_output_shape(input_shape)
# Assert.
# The output shape should be the same as the input, but with the correct
# channel numbers.
expected_node_feature_shape = node_features_shape[:-1] + (NODE_CHANNELS,)
expected_edge_feature_shape = edge_features_shape[:-1] + (EDGE_CHANNELS,)
assert got_output_shape == (
expected_node_feature_shape,
expected_edge_feature_shape,
)
def test_get_config_round_trip():
"""
Tests that it is possible to serialize a layer using `get_config()`,
and then re-instantiate an identical one.
"""
# Arrange.
# Create the layer to test with.
layer = CensNetConv(NODE_CHANNELS, EDGE_CHANNELS)
# Act.
config = layer.get_config()
new_layer = CensNetConv(**config)
# Assert.
# The new layer should be the same.
assert new_layer.node_channels == layer.node_channels
assert new_layer.edge_channels == layer.edge_channels
def test_preprocess_smoke():
"""
Tests that the preprocessing functionality does not crash.
"""
# Act.
node_laplacian, edge_laplacian, incidence = CensNetConv.preprocess(A)
| 6,138 | 30.64433 | 83 | py |
spektral | spektral-master/tests/test_layers/convolutional/core.py | import itertools
import numpy as np
import tensorflow as tf
from tensorflow.keras import Input, Model
from spektral.utils.sparse import sp_matrix_to_sp_tensor
tf.keras.backend.set_floatx("float64")
MODES = {
"SINGLE": 0,
"BATCH": 1,
"MIXED": 2,
}
batch_size = 32
N = 11
F = 7
S = 3
A = np.ones((N, N))
X = np.random.normal(size=(N, F))
E = np.random.normal(size=(N, N, S))
E_single = np.random.normal(size=(N * N, S))
def _test_single_mode(layer, sparse=False, edges=False, **kwargs):
A_in = Input(shape=(None,), sparse=sparse)
X_in = Input(shape=(F,))
inputs = [X_in, A_in]
if sparse:
input_data = [X, sp_matrix_to_sp_tensor(A)]
else:
input_data = [X, A]
if edges:
E_in = Input(shape=(S,))
inputs.append(E_in)
input_data.append(E_single)
layer_instance = layer(**kwargs)
output = layer_instance(inputs)
model = Model(inputs, output)
output = model(input_data)
assert output.shape == (N, kwargs["channels"])
def _test_batch_mode(layer, edges=False, **kwargs):
A_batch = np.stack([A] * batch_size)
X_batch = np.stack([X] * batch_size)
A_in = Input(shape=(N, N))
X_in = Input(shape=(N, F))
inputs = [X_in, A_in]
input_data = [X_batch, A_batch]
if edges:
E_batch = np.stack([E] * batch_size)
E_in = Input(shape=(N, N, S))
inputs.append(E_in)
input_data.append(E_batch)
layer_instance = layer(**kwargs)
output = layer_instance(inputs)
model = Model(inputs, output)
output = model(input_data)
assert output.shape == (batch_size, N, kwargs["channels"])
def _test_mixed_mode(layer, sparse=False, edges=False, **kwargs):
X_batch = np.stack([X] * batch_size)
A_in = Input(shape=(N,), sparse=sparse)
X_in = Input(shape=(N, F))
inputs = [X_in, A_in]
if sparse:
input_data = [X_batch, sp_matrix_to_sp_tensor(A)]
else:
input_data = [X_batch, A]
if edges:
E_in = Input(shape=(N * N, S))
inputs.append(E_in)
E_batch = np.stack([E_single] * batch_size)
input_data.append(E_batch)
layer_instance = layer(**kwargs)
output = layer_instance(inputs)
model = Model(inputs, output)
output = model(input_data)
assert output.shape == (batch_size, N, kwargs["channels"])
def _test_get_config(layer, **kwargs):
layer_instance = layer(**kwargs)
config = layer_instance.get_config()
layer_instance_new = layer(**config)
config_new = layer_instance_new.get_config()
config.pop("name")
config_new.pop("name")
# Remove 'name' if we have advanced activations (needed for GeneralConv)
if (
"activation" in config
and isinstance(config["activation"], dict)
and "class_name" in config["activation"]
):
config["activation"]["config"].pop("name")
config_new["activation"]["config"].pop("name")
assert config_new == config
def _test_preprocess(layer):
a_out = layer.preprocess(A)
assert a_out.shape == A.shape
def _get_input_from_dtypes(dtypes, sparse=False):
assert len(dtypes) >= 2
x = np.ones((3, 1), dtype=dtypes[0])
a = np.ones((3, 3), dtype=dtypes[1])
if sparse:
a = sp_matrix_to_sp_tensor(a)
output = [x, a]
if len(dtypes) > 2:
e = np.ones((3 * 3, 1), dtype=dtypes[2])
output.append(e)
return output
def _test_dtype_compatibility(layer, sparse=False, edges=False, **kwargs):
total = positive = 0
dtypes_all = ["int32", "int64", "float16", "float32", "float64"]
for dtypes in itertools.product(*([dtypes_all] * (3 if edges else 2))):
total += 1
layer_instance = layer(**kwargs, dtype=dtypes[0])
inputs = _get_input_from_dtypes(dtypes, sparse=sparse)
try:
layer_instance(inputs)
print(
"sparse" if sparse else "dense",
"".join([f"{dtype:10s}" for dtype in dtypes]),
"OK",
)
positive += 1
except Exception as e:
print(
"sparse" if sparse else "dense",
"".join([f"{dtype:10s}" for dtype in dtypes]),
"FAIL",
# e
)
# raise e
pass
print(f"RATIO: {positive / total}")
with open("dtype_ratio.txt", "a") as f:
f.write(f"{layer.__name__:20s} {positive / total:.2f}\n")
assert positive / total >= 0.2
def run_layer(config):
"""
Each `config` is a dictionary with the form:
{
"layer": class,
"modes": list[int],
"kwargs": dict,
"dense": bool,
"sparse": bool,
"edges": bool
},
"layer" is the class of the layer to be tested.
"modes" is a list containing the data modes supported by the model, as specified by
the global MODES dictionary in this file.
"kwargs" is a dictionary containing all keywords to be passed to the layer
(including mandatory ones).
"dense" is True if the layer supports dense adjacency matrices.
"sparse" is True if the layer supports sparse adjacency matrices.
"edges" is True if the layer supports edge attributes.
The testing loop will create a simple 1-layer Model and run it in single, mixed,
and batch mode according the what specified in the testing config.
The loop will check:
- that the model does not crash;
- that the output shape is pre-computed correctly;
- that the real output shape is correct;
- that the get_config() method works correctly (i.e., it is possible to
re-instatiate a layer using LayerClass(**layer_instance.get_config())).
"""
for mode in config["modes"]:
if mode == MODES["SINGLE"]:
if config["dense"]:
_test_single_mode(
config["layer"],
sparse=False,
edges=config.get("edges", False),
**config["kwargs"],
)
if config["sparse"]:
_test_single_mode(
config["layer"],
sparse=True,
edges=config.get("edges", False),
**config["kwargs"],
)
elif mode == MODES["BATCH"]:
_test_batch_mode(
config["layer"], edges=config.get("edges", False), **config["kwargs"]
)
elif mode == MODES["MIXED"]:
if config["dense"]:
_test_mixed_mode(
config["layer"],
sparse=False,
edges=config.get("edges", False),
**config["kwargs"],
)
if config["sparse"]:
_test_mixed_mode(
config["layer"],
sparse=True,
edges=config.get("edges", False),
**config["kwargs"],
)
_test_get_config(config["layer"], **config["kwargs"])
_test_preprocess(config["layer"])
for k, v in config["kwargs"].items():
if k.endswith("_initializer"):
config["kwargs"][k] = "zeros"
config["kwargs"]["kernel_initializer"] = "zeros"
if config["dense"]:
_test_dtype_compatibility(
config["layer"],
sparse=False,
edges=config.get("edges", False),
**config["kwargs"],
)
if config["sparse"]:
_test_dtype_compatibility(
config["layer"],
sparse=True,
edges=config.get("edges", False),
**config["kwargs"],
)
| 7,676 | 28.413793 | 87 | py |
spektral | spektral-master/tests/test_layers/convolutional/test_xenet_conv.py | import numpy as np
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from spektral.layers import XENetConv, XENetConvBatch
from spektral.utils.sparse import sp_matrix_to_sp_tensor
# Not using these tests because they assume certain behaviors that we
# don't follow
"""
dense_config = {
"layer": layers.XENetDenseConv,
"modes": [MODES["BATCH"],],
"kwargs": {"kernel_network": [8], "stack_channels": [2, 4], "node_channels": 64, "edge_channels": 16, "channels" : 64 },
"dense": True,
"sparse": True,
"edges": True,
}
sparse_config = {
"layer": layers.XENetConv,
"modes": [MODES["SINGLE"], MODES["MIXED"]],
"kwargs": {"kernel_network": [8], "stack_channels": [2, 4], "node_channels": 64, "edge_channels": 16, "channels": 64 },
"dense": True,
"sparse": True,
"edges": True,
}
"""
def test_sparse_model_sizes():
"""
This is a sanity check to make sure we have the same number of operations that we intend to have
"""
N = 5
F = 4
S = 3
X_in = Input(shape=(F,), name="X_in")
A_in = Input(shape=(None,), name="A_in", sparse=True)
E_in = Input(shape=(S,), name="E_in")
x = np.ones(shape=(N, F))
a = np.ones(shape=(N, N))
a = sp_matrix_to_sp_tensor(a)
e = np.ones(shape=(N * N, S))
def assert_n_params(inp, out, expected_size):
model = Model(inputs=inp, outputs=out)
model.compile(optimizer="adam", loss="mean_squared_error")
print(model.count_params())
assert model.count_params() == expected_size
# for test coverage:
model([x, a, e])
X, E = XENetConv([5], 10, 20, False)([X_in, A_in, E_in])
assert_n_params([X_in, A_in, E_in], [X, E], 350)
# int vs list: 5 vs [5]
X, E = XENetConv(5, 10, 20, False)([X_in, A_in, E_in])
assert_n_params([X_in, A_in, E_in], [X, E], 350)
# t = (4+4+3+3+1)*5 = 75 # Stack Conv
# x = (4+5+5+1)*10 = 150 # Node reduce
# e = (5+1)*20 = 120 # Edge reduce
# p = 5 # Prelu
# total = t+x+e+p = 350
X, E = XENetConv(5, 10, 20, True)([X_in, A_in, E_in])
assert_n_params([X_in, A_in, E_in], [X, E], 362)
# t = (4+4+3+3+1)*5 = 75
# a = (5+1)*1 *2 = 12 # Attention
# x = (4+5+5+1)*10 = 150
# e = (5+1)*20 = 120
# p = 5 # Prelu
# total = t+x+e+p = 362
X, E = XENetConv([50, 5], 10, 20, True)([X_in, A_in, E_in])
assert_n_params([X_in, A_in, E_in], [X, E], 1292)
# t1 = (4+4+3+3+1)*50 = 750
# t2 = (50+1)*5 = 255
# a = (5+1)*1 *2 = 12 # Attention
# x = (4+5+5+1)*10 = 150
# e = (5+1)*20 = 120
# p = 5 # Prelu
# total = t+x+e+p = 1292
def test_dense_model_sizes():
N = 5
F = 4
S = 3
X_in = Input(shape=(N, F), name="X_in")
A_in = Input(shape=(N, N), sparse=False, name="A_in")
E_in = Input(shape=(N, N, S), name="E_in")
x = np.ones(shape=(1, N, F))
a = np.ones(shape=(1, N, N))
e = np.ones(shape=(1, N, N, S))
a[0][1][2] = 0
a[0][2][1] = 0
def assert_n_params(inp, out, expected_size):
model = Model(inputs=inp, outputs=out)
model.compile(optimizer="adam", loss="mean_squared_error")
print(model.count_params())
assert model.count_params() == expected_size
# for test coverage:
model.predict([x, a, e])
X, E = XENetConvBatch([5], 10, 20, False)([X_in, A_in, E_in])
assert_n_params([X_in, A_in, E_in], [X, E], 350)
# int vs list: 5 vs [5]
X, E = XENetConvBatch(5, 10, 20, False)([X_in, A_in, E_in])
assert_n_params([X_in, A_in, E_in], [X, E], 350)
# t = (4+4+3+3+1)*5 = 75 # Stack Conv
# x = (4+5+5+1)*10 = 150 # Node reduce
# e = (5+1)*20 = 120 # Edge reduce
# p = 5 # Prelu
# total = t+x+e+p = 350
X, E = XENetConvBatch(5, 10, 20, True)([X_in, A_in, E_in])
assert_n_params([X_in, A_in, E_in], [X, E], 362)
# t = (4+4+3+3+1)*5 = 75
# a = (5+1)*1 *2 = 12 # Attention
# x = (4+5+5+1)*10 = 150
# e = (5+1)*20 = 120
# p = 5 # Prelu
# total = t+x+e+p = 362
X, E = XENetConvBatch([50, 5], 10, 20, True)([X_in, A_in, E_in])
assert_n_params([X_in, A_in, E_in], [X, E], 1292)
# t1 = (4+4+3+3+1)*50 = 750
# t2 = (50+1)*5 = 255
# a = (5+1)*1 *2 = 12 # Attention
# x = (4+5+5+1)*10 = 150
# e = (5+1)*20 = 120
# p = 5 # Prelu
# total = t+x+e+p = 1292
def test_dense_model_unknown_size():
N = None
F = 4
S = 3
X_in = Input(shape=(N, F), name="X_in")
A_in = Input(shape=(N, N), sparse=False, name="A_in")
E_in = Input(shape=(N, N, S), name="E_in")
N = 5
x = np.ones(shape=(1, N, F))
a = np.ones(shape=(1, N, N))
e = np.ones(shape=(1, N, N, S))
a[0][1][2] = 0
a[0][2][1] = 0
def assert_n_params(inp, out, expected_size):
model = Model(inputs=inp, outputs=out)
model.compile(optimizer="adam", loss="mean_squared_error")
print(model.count_params())
assert model.count_params() == expected_size
# for test coverage:
model.predict([x, a, e])
X, E = XENetConvBatch([5], 10, 20, False)([X_in, A_in, E_in])
assert_n_params([X_in, A_in, E_in], [X, E], 350)
# int vs list: 5 vs [5]
X, E = XENetConvBatch(5, 10, 20, False)([X_in, A_in, E_in])
assert_n_params([X_in, A_in, E_in], [X, E], 350)
# t = (4+4+3+3+1)*5 = 75 # Stack Conv
# x = (4+5+5+1)*10 = 150 # Node reduce
# e = (5+1)*20 = 120 # Edge reduce
# p = 5 # Prelu
# total = t+x+e+p = 350
X, E = XENetConvBatch(5, 10, 20, True)([X_in, A_in, E_in])
assert_n_params([X_in, A_in, E_in], [X, E], 362)
# t = (4+4+3+3+1)*5 = 75
# a = (5+1)*1 *2 = 12 # Attention
# x = (4+5+5+1)*10 = 150
# e = (5+1)*20 = 120
# p = 5 # Prelu
# total = t+x+e+p = 362
X, E = XENetConvBatch([50, 5], 10, 20, True)([X_in, A_in, E_in])
assert_n_params([X_in, A_in, E_in], [X, E], 1292)
# t1 = (4+4+3+3+1)*50 = 750
# t2 = (50+1)*5 = 255
# a = (5+1)*1 *2 = 12 # Attention
# x = (4+5+5+1)*10 = 150
# e = (5+1)*20 = 120
# p = 5 # Prelu
# total = t+x+e+p = 1292
if __name__ == "__main__":
test_sparse_model_sizes()
test_dense_model_sizes()
test_dense_model_unknown_size()
| 6,662 | 32.822335 | 124 | py |
spektral | spektral-master/tests/test_models/core.py | import numpy as np
import scipy.sparse as sp
import tensorflow as tf
from spektral.data import Dataset, Graph, loaders
tf.keras.backend.set_floatx("float64")
MODES = {"SINGLE": 0, "BATCH": 1, "MIXED": 2, "DISJOINT": 3}
batch_size = 16
n_nodes = 11
n_node_features = 7
n_edge_features = 3
def _get_graph(n_nodes, n_features, n_edge_features=None, sparse=False):
x = np.random.rand(n_nodes, n_features)
a = np.random.randint(0, 2, (n_nodes, n_nodes)).astype("f4")
e = (
np.random.rand(np.count_nonzero(a), n_edge_features)
if n_edge_features is not None
else None
)
if sparse:
a = sp.csr_matrix(a)
return Graph(x=x, a=a, e=e)
class TestDataset(Dataset):
def __init__(self, graphs):
self.graphs = graphs
super().__init__()
def read(self):
return self.graphs
def _test_single_mode(model, sparse=False, edges=False, **kwargs):
dataset = TestDataset(
[
_get_graph(
n_nodes=n_nodes,
n_features=n_node_features,
n_edge_features=n_edge_features if edges else None,
sparse=sparse,
)
]
)
loader = loaders.SingleLoader(dataset, epochs=1)
inputs = list(loader)[0]
model_instance = model(**kwargs)
output = model_instance(inputs)
def _test_disjoint_mode(model, sparse=False, edges=False, **kwargs):
dataset = TestDataset(
[
_get_graph(
n_nodes=n_nodes,
n_features=n_node_features,
n_edge_features=n_edge_features if edges else None,
sparse=sparse,
)
for _ in range(batch_size)
]
)
loader = loaders.DisjointLoader(dataset, epochs=1, batch_size=batch_size)
inputs = loader.__next__()
model_instance = model(**kwargs)
output = model_instance(inputs)
def _test_batch_mode(model, edges=False, **kwargs):
dataset = TestDataset(
[
_get_graph(
n_nodes=n_nodes,
n_features=n_node_features,
n_edge_features=n_edge_features if edges else None,
)
for _ in range(batch_size)
]
)
loader = loaders.BatchLoader(dataset, epochs=1, batch_size=batch_size)
inputs = loader.__next__()
model_instance = model(**kwargs)
output = model_instance(inputs)
def _test_mixed_mode(model, sparse=False, edges=False, **kwargs):
graphs = []
for i in range(batch_size):
graph = _get_graph(
n_nodes=n_nodes,
n_features=n_node_features,
n_edge_features=n_edge_features if edges else None,
sparse=sparse,
)
if i == 0:
a = graph.a
graph.a = None
graphs.append(graph)
dataset = TestDataset(graphs)
dataset.a = a
loader = loaders.MixedLoader(dataset, epochs=1, batch_size=batch_size)
inputs = loader.__next__()
model_instance = model(**kwargs)
output = model_instance(inputs)
def _test_get_config(layer, **kwargs):
layer_instance = layer(**kwargs)
config = layer_instance.get_config()
layer_instance_new = layer(**config)
config_new = layer_instance_new.get_config()
# Remove 'name' if we have advanced activations (needed for GeneralConv)
if (
"activation" in config
and isinstance(config["activation"], dict)
and "class_name" in config["activation"]
):
config["activation"]["config"].pop("name")
config_new["activation"]["config"].pop("name")
assert config_new == config
def run_model(config):
"""
Each `config` is a dictionary with the form:
{
"model": class,
"modes": list[int],
"kwargs": dict,
"dense": bool,
"sparse": bool,
"edges": bool
},
"model" is the class of the model to be tested.
"modes" is a list containing the data modes supported by the model, as specified by
the global MODES dictionary in this file.
"kwargs" is a dictionary containing all keywords to be passed to the layer
(including mandatory ones).
"dense" is True if the layer supports dense adjacency matrices.
"sparse" is True if the layer supports sparse adjacency matrices.
"edges" is True if the layer supports edge attributes.
The testing loop will create a simple 1-layer Model and run it in single, mixed,
and batch mode according the what specified in the testing config.
The loop will check:
- that the model does not crash;
- that the output shape is pre-computed correctly;
- that the real output shape is correct;
- that the get_config() method works correctly (i.e., it is possible to
re-instatiate a layer using LayerClass(**layer_instance.get_config())).
"""
for mode in config["modes"]:
if mode == MODES["SINGLE"]:
if config["dense"]:
_test_single_mode(
config["model"],
sparse=False,
edges=config.get("edges", False),
**config["kwargs"],
)
if config["sparse"]:
_test_single_mode(
config["model"],
sparse=True,
edges=config.get("edges", False),
**config["kwargs"],
)
elif mode == MODES["BATCH"]:
_test_batch_mode(
config["model"], edges=config.get("edges", False), **config["kwargs"]
)
elif mode == MODES["MIXED"]:
if config["dense"]:
_test_mixed_mode(
config["model"],
sparse=False,
edges=config.get("edges", False),
**config["kwargs"],
)
if config["sparse"]:
_test_mixed_mode(
config["model"],
sparse=True,
edges=config.get("edges", False),
**config["kwargs"],
)
elif mode == MODES["DISJOINT"]:
if config["dense"]:
_test_disjoint_mode(
config["model"],
sparse=False,
edges=config.get("edges", False),
**config["kwargs"],
)
if config["sparse"]:
_test_disjoint_mode(
config["model"],
sparse=True,
edges=config.get("edges", False),
**config["kwargs"],
)
_test_get_config(config["model"], **config["kwargs"])
| 6,747 | 28.858407 | 87 | py |
NLI4CT | NLI4CT-main/pipeline/task1_entailment.py | import torch
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
from transformers import Trainer, TrainingArguments
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from prepare_data import generate_nli_data
TRAIN_PATH = "data/train.json"
DEV_PATH = "data/dev.json"
TEST_PATH = "data/test.json"
#Torch dataset used in the models. Consists of encodings of training instances and of labels.
#One training instance is: BERT_TOKENIZER("claim [SEP] evidence_text").
class CtDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item['labels'] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.labels)
models = ["ynie/xlnet-large-cased-snli_mnli_fever_anli_R1_R2_R3-nli",
"ynie/albert-xxlarge-v2-snli_mnli_fever_anli_R1_R2_R3-nli",
"MoritzLaurer/DeBERTa-v3-large-mnli-fever-anli-ling-wanli",
"microsoft/deberta-v2-xlarge-mnli",
"MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli"]
#Compute the metrics (accuracy, precision, recall, F1) for a give prediction.
def compute_metrics(pred):
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
f1 = f1_score(labels, preds, average="weighted")
acc = accuracy_score(labels, preds)
prec = precision_score(labels, preds)
recall = recall_score(labels, preds)
return {"accuracy": acc, "precision" : prec, "recall" : recall, "f1": f1}
#Training loop.
def train(model_name):
#model_name = "MoritzLaurer/DeBERTa-v3-large-mnli-fever-anli-ling-wanli"
#Load the models. Adjust max instance length to fit your machine.
tokenizer = AutoTokenizer.from_pretrained(model_name, model_max_length=1024)
model = AutoModelForSequenceClassification.from_pretrained(model_name,
num_labels=2, ignore_mismatched_sizes=True)
#Generate joint claim+[SEP]+evidence data.
joint_train, labels_train = generate_nli_data(TRAIN_PATH)
joint_dev, labels_dev= generate_nli_data(DEV_PATH)
#Tokenize the data.
encoded_train = tokenizer(joint_train, return_tensors='pt',
truncation_strategy='only_first', add_special_tokens=True, padding=True)
encoded_dev = tokenizer(joint_dev, return_tensors='pt',
truncation_strategy='only_first', add_special_tokens=True, padding=True)
#Convert data into datasets
train_dataset = CtDataset(encoded_train, labels_train)
dev_dataset = CtDataset(encoded_dev, labels_dev)
#Define the batch size to fit your GPU memory.
batch_size = 16
logging_steps = len(train_data["claims"]) // batch_size
output_name = f"finetuned-model"
training_args = TrainingArguments(output_dir=output_name,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
#for faster training time
dataloader_pin_memory=True,
dataloader_num_workers=4,
gradient_accumulation_steps=2,
fp16=True,
#training hyperparameters
num_train_epochs=5,
learning_rate=5e-6,
weight_decay=0.01,
warmup_ratio=0.06,
#other parameters
evaluation_strategy="epoch",
save_strategy="no",
disable_tqdm=False,
logging_steps=logging_steps,
push_to_hub=False)
trainer = Trainer(model=model, args=training_args,
compute_metrics=compute_metrics,
train_dataset=train_dataset,
eval_dataset=dev_dataset,
tokenizer=tokenizer)
#Start the training process.
trainer.train()
#Save the fine-tuned NLI (textual entailment) model.
trainer.save_model("model-nli") | 4,407 | 39.814815 | 97 | py |
NLI4CT | NLI4CT-main/pipeline/task2_evidence.py | import torch
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
from transformers import Trainer, TrainingArguments
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from prepare_data import generate_evidence_data
TRAIN_PATH = "data/train.json"
DEV_PATH = "data/dev.json"
TEST_PATH = "data/test.json"
#Torch dataset used in the models. Consists of encodings of training instances and of labels.
#One training instance is: BERT_TOKENIZER("claim [SEP] evidence_text").
class CtDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item['labels'] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.labels)
models = ["ynie/xlnet-large-cased-snli_mnli_fever_anli_R1_R2_R3-nli",
"ynie/albert-xxlarge-v2-snli_mnli_fever_anli_R1_R2_R3-nli",
"MoritzLaurer/DeBERTa-v3-large-mnli-fever-anli-ling-wanli",
"microsoft/deberta-v2-xlarge-mnli",
"MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli"]
#Compute the metrics (accuracy, precision, recall, F1) for a give prediction.
def compute_metrics(pred):
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
f1 = f1_score(labels, preds, average="weighted")
acc = accuracy_score(labels, preds)
prec = precision_score(labels, preds)
recall = recall_score(labels, preds)
return {"accuracy": acc, "precision" : prec, "recall" : recall, "f1": f1}
#Training loop.
def train(model_name):
#model_name = "MoritzLaurer/DeBERTa-v3-large-mnli-fever-anli-ling-wanli"
#Load the models. Adjust max instance length to fit your machine.
tokenizer = AutoTokenizer.from_pretrained(model_name, model_max_length=128)
model = AutoModelForSequenceClassification.from_pretrained(model_name,
num_labels=2, ignore_mismatched_sizes=True)
#Generate joint claim+[SEP]+candidate_sentence data
joint_train, labels_train = generate_evidence_data(TRAIN_PATH)
joint_dev, labels_dev = generate_evidence_data(DEV_PATH)
#Tokenize the data.
encoded_train = tokenizer(joint_train, return_tensors='pt',
truncation_strategy='only_first', add_special_tokens=True, padding=True)
encoded_dev = tokenizer(joint_dev, return_tensors='pt',
truncation_strategy='only_first', add_special_tokens=True, padding=True)
#Convert data into datasets
train_dataset = CtDataset(encoded_train, labels_train)
dev_dataset = CtDataset(encoded_dev, labels_dev)
#Define the batch size to fit your GPU memory.
batch_size = 16
logging_steps = len(joint_train) // batch_size
output_name = f"finetuned-model"
training_args = TrainingArguments(output_dir=output_name,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
#for faster training time
dataloader_pin_memory=True,
dataloader_num_workers=4,
gradient_accumulation_steps=2,
fp16=True,
#training hyperparameters
num_train_epochs=5,
learning_rate=5e-6,
weight_decay=0.01,
warmup_ratio=0.06,
#other parameters
evaluation_strategy="epoch",
save_strategy="no",
disable_tqdm=False,
logging_steps=logging_steps,
push_to_hub=False)
trainer = Trainer(model=model, args=training_args,
compute_metrics=compute_metrics,
train_dataset=train_dataset,
eval_dataset=dev_dataset,
tokenizer=tokenizer)
#Start the training process.
trainer.train()
#Save the fine-tuned NLI (textual entailment) model.
trainer.save_model("model-evidence_selection") | 4,437 | 40.092593 | 97 | py |
NLI4CT | NLI4CT-main/joint/main.py | import torch
import torch.nn as nn
from sklearn.metrics import f1_score, precision_score, recall_score
from tqdm import tqdm
from torch.utils.data import DataLoader
from transformers import AutoModel, AutoTokenizer, get_cosine_schedule_with_warmup, AdamW
from model import ModelForSequenceClassification
from prepare_joint import generate_joint_data, generate_masks
TRAIN_PATH = "data/train.json"
DEV_PATH = "data/dev.json"
TEST_PATH = "data/test.json"
#DeBERTa-v3-large, additionally fine-tuned on popular NLI datasets.
DEBERTA_PATH = "MoritzLaurer/DeBERTa-v3-large-mnli-fever-anli-ling-wanli"
device = torch.device('cuda:0')
'''
Torch dataset used for the model.
encoded: DeBERTa-encoded representation of a training instance (claim + all sentences)
labels: evidence labels
nlis: NLI/entailment labels
'''
class CtDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels, nlis):
self.encoded = encodings
self.labels = labels
self.nlis = nlis
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encoded.items()}
item['labels'] = self.labels[idx]
item['nli'] = self.nlis[idx]
return item
def __len__(self):
return len(self.labels)
def batch_evidence_label(labels, padding_idx = 2):
max_sent_len = max([len(label) for label in labels])
label_matrix = torch.ones(len(labels), max_sent_len) * padding_idx
label_list = []
for i, label in enumerate(labels):
for j, evid in enumerate(label):
label_matrix[i,j] = int(evid)
label_list.append([int(evid) for evid in label])
return label_matrix.long(), label_list
def batch_sentence_mask(masks):
batch_mask = masks
padded_batch_mask = list()
max_shape = -1
for m in batch_mask:
if m.size(0) > max_shape:
max_shape = m.size(0)
padded_batch_mask = list()
for m in batch_mask:
if m.size(0) < max_shape:
expanded = torch.cat((m, torch.zeros((max_shape - m.size(0), m.size(1)))))
else:
expanded = m
expanded = expanded.view(1, expanded.size(0), expanded.size(1))
padded_batch_mask.append(expanded)
padded_batch_mask = torch.cat(padded_batch_mask)
return padded_batch_mask
def token_idx_by_sentence(input_ids, sep_token_id, model_name):
"""
Compute the token indices matrix of the BERT output.
input_ids: (batch_size, paragraph_len)
batch_indices, indices_by_batch, mask: (batch_size, N_sentence, N_token)
bert_out: (batch_size, paragraph_len,BERT_dim)
bert_out[batch_indices,indices_by_batch,:]: (batch_size, N_sentence, N_token, BERT_dim)
"""
padding_idx = -1
sep_tokens = (input_ids == sep_token_id).bool()
paragraph_lens = torch.sum(sep_tokens,1).numpy().tolist()
indices = torch.arange(sep_tokens.size(-1)).unsqueeze(0).expand(sep_tokens.size(0),-1)
sep_indices = torch.split(indices[sep_tokens],paragraph_lens)
paragraph_lens = []
all_word_indices = []
for paragraph in sep_indices:
if "large" in model_name:
paragraph = paragraph[1:]
word_indices = [torch.arange(paragraph[i]+1, paragraph[i+1]+1) for i in range(paragraph.size(0)-2)]
paragraph_lens.append(len(word_indices))
all_word_indices.extend(word_indices)
indices_by_sentence = nn.utils.rnn.pad_sequence(all_word_indices, batch_first=True, padding_value=padding_idx)
indices_by_sentence_split = torch.split(indices_by_sentence,paragraph_lens)
indices_by_batch = nn.utils.rnn.pad_sequence(indices_by_sentence_split, batch_first=True, padding_value=padding_idx)
batch_indices = torch.arange(sep_tokens.size(0)).unsqueeze(-1).unsqueeze(-1).expand(-1,indices_by_batch.size(1),indices_by_batch.size(-1))
mask = (indices_by_batch>=0)
return batch_indices.long(), indices_by_batch.long(), mask.long()
#Function for evaluating the model output.
def evaluation(model, dataset, data_masks):
model.eval()
evidence_predictions = list()
evidence_labels = list()
nli_preds = list()
nli_labels = list()
batch_size = 4
with torch.no_grad():
for i, batch in enumerate(tqdm(DataLoader(dataset, batch_size = 4, shuffle=False))):
#encoded = batch["encodings"]
input_ids = batch['input_ids']
attention_mask = batch['attention_mask']
transformation_indices = token_idx_by_sentence(input_ids, 102, "bert")
transformation_indices = [tensor.to(device) for tensor in transformation_indices]
input_ids = input_ids.to(device)
attention_mask = attention_mask.to(device)
padded_evidence_label, evidence_label = batch_evidence_label(batch["labels"], padding_idx = 2)
sentence_masks = batch_sentence_mask(data_masks[i*batch_size:i*batch_size+batch_size])
sentence_masks = sentence_masks.to(device)
nli_label = batch["nli"].to(device)
evidence_out, evidence_preds, evidence_loss, nli_out, nli_loss = \
model(input_ids, attention_mask, nli_label=nli_label,
evidence_label = padded_evidence_label.to(device),
transformation_indices=transformation_indices)
batch_labels = batch["labels"]
batch_selected = (torch.softmax(evidence_out, dim=2)[:,:,1] > 0.5).tolist()
for idx in range(len(batch_selected)):
selected = [1 if l else 0 for l in batch_selected[idx]]
evidence_predictions.extend(selected)
true = [1 if c=="1" else 0 for c in batch_labels[idx]]
evidence_labels.extend(true)
if len(evidence_labels) > len(evidence_predictions):
miss = len(evidence_labels) - len(evidence_predictions)
evidence_predictions.extend([0] * miss)
elif len(evidence_labels) < len(evidence_predictions):
miss = len(evidence_predictions) - len(evidence_labels)
evidence_labels.extend([0] * miss)
nli_labels.extend(nli_label.cpu().numpy().tolist())
nli_preds.extend(nli_out)
nli_f1 = f1_score(nli_labels,nli_preds, average="macro")
nli_precision = precision_score(nli_labels,nli_preds,average="macro")
nli_recall = recall_score(nli_labels,nli_preds,average="macro")
#print(evidence_predictions)
evidence_f1 = f1_score(evidence_labels,evidence_predictions,average="macro")
evidence_precision = precision_score(evidence_labels,evidence_predictions,average="macro")
evidence_recall = recall_score(evidence_labels,evidence_predictions,average="macro")
return nli_f1, nli_precision, nli_recall, evidence_f1, evidence_precision, evidence_recall
#Main training loop.
def train():
#Load the base model.
deberta = AutoModel.from_pretrained(DEBERTA_PATH)
deberta = deberta.to(device)
#Instantiate the developed model.
model = ModelForSequenceClassification(deberta)
model.to(device)
settings = [{'params': model.deberta.parameters(), 'lr': 1e-5}]
for module in model.extra_modules:
settings.append({'params': module.parameters(), 'lr': 5e-6})
#Load the tokenizer.
tokenizer = AutoTokenizer.from_pretrained(DEBERTA_PATH, model_max_length=512)
#Prepare and generate all data for the model.
joint_train, nli_labels_train, evidence_labels_train = generate_joint_data(TRAIN_PATH)
joint_dev, nli_labels_dev, evidence_labels_dev = generate_joint_data(TEST_PATH)
encoded_train = tokenizer(joint_train, return_tensors='pt',
truncation_strategy='only_first', add_special_tokens=True, padding=True)
encoded_dev = tokenizer(joint_dev, return_tensors='pt',
truncation_strategy='only_first', add_special_tokens=True, padding=True)
train_masks = generate_masks(encoded_train)
dev_masks = generate_masks(encoded_dev)
train_dataset = CtDataset(encoded_train, evidence_labels_train, nli_labels_train)
dev_dataset = CtDataset(encoded_dev, evidence_labels_dev, nli_labels_dev)
optimizer = torch.optim.AdamW(settings)
scheduler = get_cosine_schedule_with_warmup(optimizer, 0, epochs)
model.train()
#Hyperparameters.
epochs = 5
batch_size = 1
update_step = 10
NUM_ACCUMULATION_STEPS = 4
prev_performance = 0
#Main training loop.
for epoch in range(epochs):
model.train()
tq = tqdm(DataLoader(train_dataset, batch_size=batch_size, shuffle=False))
for i, batch in enumerate(tq):
optimizer.zero_grad()
input_ids = batch['input_ids']
attention_mask = batch['attention_mask']
transformation_indices = token_idx_by_sentence(input_ids, 2, "bert")
transformation_indices = [tensor.to(device) for tensor in transformation_indices]
input_ids = input_ids.to(device)
attention_mask = attention_mask.to(device)
padded_evidence_label, evidence_label = batch_evidence_label(batch["labels"], padding_idx = 2)
sentence_masks = batch_sentence_mask(train_masks[i*batch_size:i*batch_size+batch_size])
sentence_masks = sentence_masks.to(device)
nli_label = batch["nli"].to(device)
evidence_out, evidence_preds, evidence_loss, nli_out, nli_loss = \
model(input_ids, attention_mask, sentence_masks, nli_label=nli_label,
evidence_label = padded_evidence_label.to(device),
transformation_indices=transformation_indices)
evidence_loss *= 6. #LOSS RATIO
loss = evidence_loss + nli_loss
loss = loss / NUM_ACCUMULATION_STEPS
try:
loss.backward()
except:
optimizer.zero_grad()
continue
if ((i + 1) % NUM_ACCUMULATION_STEPS == 0) or (i + 1 == len(train_dataset)):
optimizer.step()
if i % update_step == update_step - 1:
print(f'Epoch {epoch}, iter {i}, loss: {round(loss.item(), 4)}')
scheduler.step()
train_score = evaluation(model, train_dataset, train_masks)
print(f'Epoch {epoch}, train nli f1 p r: %.4f, %.4f, %.4f, evidence f1 p r: %.4f, %.4f, %.4f' % train_score)
dev_score = evaluation(model, dev_dataset, dev_masks)
print(f'Epoch {epoch}, dev nli f1 p r: %.4f, %.4f, %.4f, evidence f1 p r: %.4f, %.4f, %.4f' % dev_score)
dev_perf = dev_score[0] * dev_score[3]
print(dev_perf)
if dev_perf >= prev_performance:
torch.save(model.state_dict(), "checkpoint.model")
prev_performance = dev_perf
print("New model saved.")
else:
print("Skip saving model.")
| 11,141 | 39.369565 | 142 | py |
NLI4CT | NLI4CT-main/joint/prepare_joint.py | import json
import pandas as pd
TRAIN_DATA = "data/train.json"
def generate_multi_data(file_path):
df = pd.read_json(file_path)
df = df.transpose()
#Extract the claims and NLI labels (Entailment/Contradiction).
claims = df.Statement.tolist()
nli_labels = df.Label.tolist()
primary_indices = df.Primary_evidence_index.tolist()
secondary_indices = df.Secondary_evidence_index.tolist()
primary_cts = df.Primary_id.tolist()
secondary_cts = df.Secondary_id.tolist()
types = df.Type.tolist()
sections = df.Section_id.tolist()
primary_evidence_sentences = list()
secondary_evidence_sentences = list()
#Process the clinical trial report files.
for idx in range(len(claims)):
file_name = "data/CTs/" + primary_cts[idx] + ".json"
with open(file_name, 'r') as f:
data = json.load(f)
primary_evidence_sentences.append(data[sections[idx]])
if types[idx] == "Comparison":
file_name = "data/CTs/" + secondary_cts[idx] + ".json"
with open(file_name, 'r') as f:
data = json.load(f)
secondary_evidence_sentences.append(data[sections[idx]])
else:
secondary_evidence_sentences.append(list())
#Generate the joint data instances and labels of evidence sentences.
joint_data = list()
evidence_labels = list()
for claim_id in range(len(claims)):
claim = claims[claim_id]
current_evidence_labels = list()
primary_sents = primary_evidence_sentences[claim_id]
full_instance = claim + " [SEP] "
#One data instance is a concatenation of the claim and all sentences from the CTR.
# "claim [SEP] candidate_sent_1 [SEP] candidate_sent_2 (...) [SEP] candidate_sent_n"
for sid in range(len(primary_sents)):
candidate_sentence = primary_sents[sid]
full_instance += candidate_sentence
full_instance += " [SEP] "
current_evidence_labels.append(1 if sid in primary_indices[claim_id] else 0)
if types[claim_id] == "Comparison":
secondary_sents = secondary_evidence_sentences[claim_id]
for sid in range(len(secondary_sents)):
candidate_sentence = secondary_sents[sid]
full_instance += candidate_sentence
full_instance += " [SEP] "
current_evidence_labels.append(1 if sid in secondary_indices[claim_id] else 0)
joint_data.append(full_instance)
#For a given clinical trial, evidence string is a string of 0's and 1's,
# denoting whether the i-th candidate sentence is or is not evidence.
evidence_string = ""
for lab in current_evidence_labels:
evidence_string += str(int(lab))
evidence_labels.append(evidence_string)
return joint_data, nli_labels, evidence_labels
import torch
class CtDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels, stances):
self.encoded = encodings
self.labels = labels
self.stances = stances
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encoded.items()}
item['labels'] = self.labels[idx]
item['stance'] = self.stances[idx]
return item
def __len__(self):
return len(self.labels)
| 3,410 | 36.076087 | 94 | py |
NLI4CT | NLI4CT-main/joint/model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class ClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, hidden_dim, n_labels, hidden_dropout_prob = 0.1):
super().__init__()
self.dense = nn.Linear(hidden_dim, hidden_dim)
self.dropout = nn.Dropout(hidden_dropout_prob)
self.out_proj = nn.Linear(hidden_dim, n_labels)
def forward(self, x, **kwargs):
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
#Applies a linear weighting / self-attention layer.
class WordAttention(nn.Module):
"""
x: (BATCH_SIZE, N_sentence, N_token, INPUT_SIZE)
token_mask: (batch_size, N_sep, N_token)
out: (BATCH_SIZE, N_sentence, INPUT_SIZE)
mask: (BATCH_SIZE, N_sentence)
"""
def __init__(self, INPUT_SIZE, PROJ_SIZE, dropout = 0.0):
super(WordAttention, self).__init__()
self.activation = torch.tanh
self.att_proj = nn.Linear(INPUT_SIZE, PROJ_SIZE)
self.dropout = nn.Dropout(dropout)
self.att_scorer = nn.Linear(PROJ_SIZE, 1)
def forward(self, x, token_mask):
proj_input = self.att_proj(self.dropout(x.view(-1, x.size(-1))))
proj_input = self.dropout(self.activation(proj_input))
raw_att_scores = self.att_scorer(proj_input).squeeze(-1).view(x.size(0),x.size(1),x.size(2)) # (Batch_size, N_sentence, N_token)
att_scores = F.softmax(raw_att_scores.masked_fill((1 - token_mask).bool(), float('-inf')), dim=-1)
att_scores = torch.where(torch.isnan(att_scores), torch.zeros_like(att_scores), att_scores) # Replace NaN with 0
batch_att_scores = att_scores.view(-1, att_scores.size(-1)) # (Batch_size * N_sentence, N_token)
out = torch.bmm(batch_att_scores.unsqueeze(1), x.view(-1, x.size(2), x.size(3))).squeeze(1)
# (Batch_size * N_sentence, INPUT_SIZE)
out = out.view(x.size(0), x.size(1), x.size(-1))
mask = token_mask[:,:,0]
return out, mask
class DynamicSentenceAttention(nn.Module):
"""
input: (BATCH_SIZE, N_sentence, INPUT_SIZE)
output: (BATCH_SIZE, INPUT_SIZE)
"""
def __init__(self, INPUT_SIZE, PROJ_SIZE, REC_HID_SIZE = None, dropout = 0.1):
super(DynamicSentenceAttention, self).__init__()
self.activation = torch.tanh
self.att_proj = nn.Linear(INPUT_SIZE, PROJ_SIZE)
self.dropout = nn.Dropout(dropout)
if REC_HID_SIZE is not None:
self.contextualized = True
self.lstm = nn.LSTM(PROJ_SIZE, REC_HID_SIZE, bidirectional = False, batch_first = True)
self.att_scorer = nn.Linear(REC_HID_SIZE, 2)
else:
self.contextualized = False
self.att_scorer = nn.Linear(PROJ_SIZE, 2)
def forward(self, sentence_reps, sentence_mask, att_scores, valid_scores):
# sentence_reps: (BATCH_SIZE, N_sentence, INPUT_SIZE)
# sentence_mask: (BATCH_SIZE, N_sentence)
# att_scores: (BATCH_SIZE, N_sentence)
# valid_scores: (BATCH_SIZE, N_sentence)
# result: (BATCH_SIZE, INPUT_SIZE)
#att_scores = evidence_out[:,:,1] # (BATCH_SIZE, N_sentence)
#valid_scores = evidence_out[:,:,1] > evidence_out[:,:,0] # Only consider sentences predicted as evidences
sentence_mask = torch.logical_and(sentence_mask, valid_scores)
if sentence_reps.size(0) > 0:
att_scores = F.softmax(att_scores.masked_fill((~sentence_mask).bool(), -1e4), dim=-1)
result = torch.bmm(att_scores.unsqueeze(1), sentence_reps).squeeze(1)
return result
else:
return sentence_reps[:,0,:]
#The final joint model used for the tasks.
class ModelForSequenceClassification(nn.Module):
def __init__(self, base_model, hidden_dim=1024, n_labels=2):
super().__init__()
#DeBERTa-v3-large hidden size iz 1024.
#We use DeBERTa as the base model for encoding the data instances.
self.deberta = base_model
self.word_attention = WordAttention(hidden_dim, hidden_dim, dropout=0.0)
self.evidence_linear = ClassificationHead(hidden_dim=hidden_dim,
n_labels=n_labels, hidden_dropout_prob=0.0)
self.evidence_criterion = nn.CrossEntropyLoss(ignore_index=2)
self.nli_criterion = nn.CrossEntropyLoss()
self.sentence_attention = DynamicSentenceAttention(hidden_dim, hidden_dim, dropout=0.0)
self.nli_linear = ClassificationHead(hidden_dim, 3, hidden_dropout_prob = 0.0)
self.extra_modules = [
self.sentence_attention,
self.nli_linear,
self.evidence_linear,
self.nli_criterion,
self.evidence_criterion,
self.word_attention
]
def select_valid(self, token_reps, token_mask, valid_sentences):
# token_reps: (BATCH_SIZE, N_sentence, N_token, INPUT_SIZE)
# token_mask: (BATCH_SIZE, N_sentence, N_token)
# valid_sentences: (BATCH_SIZE, N_sentence)
#valid_sentences = evidence_out[:,:,1] > evidence_out[:,:,0] # Only consider sentences predicted as evidences
if valid_sentences.size(1) > token_reps[:,1:,:,:].size(1):
valid_sentences = valid_sentences[:, :token_reps[:,1:,:,:].size(1)]
evidence_reps = token_reps[:,1:,:,:][valid_sentences]
evidence_token_mask = token_mask[:,1:,:][valid_sentences]
evidence_reps = evidence_reps.view(1, evidence_reps.size(0), evidence_reps.size(1), evidence_reps.size(2))
evidence_token_mask = evidence_token_mask.view(1, evidence_token_mask.size(0), evidence_token_mask.size(1))
if len(evidence_reps.shape) == 3 or evidence_reps.size(1) == 0:
evidence_reps = token_reps[:,1,:,:].unsqueeze(1) # First sentence is claim; second is dummy
evidence_token_mask = token_mask[:,1,:].unsqueeze(1)
return evidence_reps, evidence_token_mask
def forward(
self,
encoded,
attention_mask,
nli_label,
evidence_label,
transformation_indices,
sample_p = 1,
#return_features=True,
**kwargs
):
batch_indices, indices_by_batch, mask = transformation_indices # (batch_size, N_sep, N_token)
# (Batch_size, N_sep, BERT_DIM), (Batch_size, N_sep)
deberta_out = self.deberta(encoded, attention_mask)[0] # (BATCH_SIZE, sequence_len, BERT_DIM)
deberta_tokens = deberta_out[batch_indices, indices_by_batch, :]
#represent sentences as weighted self-attention reps
sentence_reps, sentence_mask = self.word_attention(deberta_tokens, mask)
#logits of linear predictor
evidence_out = self.evidence_linear(sentence_reps)
## New linear
att_scores = evidence_out[:,:,1] # (BATCH_SIZE, N_sentence)
if bool(torch.rand(1) < sample_p): # Choose sentence according to predicted evidence
valid_scores = evidence_out[:,:,1] > evidence_out[:,:,0]
else:
valid_scores = evidence_label == 1 # Ground truth
valid_scores = valid_scores[:,:mask.size(1)]
paragraph_rep = self.sentence_attention(sentence_reps, sentence_mask, att_scores, valid_scores)
# (BATCH_SIZE, BERT_DIM)
nli_out = self.nli_linear(paragraph_rep) # (Batch_size, 3)
#for loss calculation
if evidence_label.size(1) > evidence_out.size(1):
evidence_label = evidence_label[:,:evidence_out.size(1)]
evidence_loss = self.evidence_criterion(evidence_out.view(-1, 2),
evidence_label.reshape(-1)) # ignore index 2
evidence_preds = (torch.softmax(evidence_out, dim=1)[:, 1] > 0.5).nonzero().flatten()
nli_loss = self.nli_criterion(nli_out, nli_label)
nli_out = torch.argmax(nli_out.cpu(), dim=-1).detach().numpy().tolist()
return evidence_out, evidence_preds, evidence_loss, nli_out, nli_loss
def evaluate(
self,
encoded,
attention_mask,
transformation_indices,
**kwargs
):
batch_indices, indices_by_batch, mask = transformation_indices # (batch_size, N_sep, N_token)
deberta_out = self.deberta(encoded, attention_mask)[0] # (BATCH_SIZE, sequence_len, BERT_DIM)
deberta_tokens = deberta_out[batch_indices, indices_by_batch, :]
#represent sentences as weighted self-attention reps
sentence_reps, sentence_mask = self.word_attention(deberta_tokens, mask)
#logits of linear predictor
evidence_out = self.evidence_linear(sentence_reps)
att_scores = evidence_out[:,:,1] # (BATCH_SIZE, N_sentence)
valid_scores = evidence_out[:,:,1] > evidence_out[:,:,0]
paragraph_rep = self.sentence_attention(sentence_reps, sentence_mask, att_scores, valid_scores)
# (BATCH_SIZE, BERT_DIM)
evidence_preds = (torch.softmax(evidence_out, dim=1)[:, 1] > 0.5).nonzero().flatten()
nli_out = self.nli_linear(paragraph_rep) # (Batch_size, 3)
nli_out = torch.argmax(nli_out.cpu(), dim=-1).detach().numpy().tolist()
self.deberta.train()
return evidence_out, evidence_preds, nli_out
| 9,675 | 42.390135 | 136 | py |
lm-evaluation-harness | lm-evaluation-harness-master/setup.py | from setuptools import setup, find_packages
from setuptools.command.install import install
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
dev_requires = (["black<=21.12b0", "coverage<=6.2", "mock>=4.0.3", "pytest"],)
install_requires = [
"datasets>=2.0.0",
"codecarbon",
"nltk==3.6",
"openai==0.13.0",
"pycountry==20.7.3",
"pytablewriter==0.58.0",
"rouge-score==0.0.4",
"sacrebleu==1.5.0",
"scikit-learn>=0.24.1",
"sqlitedict==1.6.0",
"torch>=1.9",
"tqdm-multiprocess==0.0.11",
"accelerate@git+https://github.com/huggingface/accelerate@main",
"transformers@git+https://github.com/huggingface/transformers@main",
"promptsource@git+https://github.com/bigscience-workshop/promptsource@eval-hackathon",
]
dependency_links = []
class PostInstall(install):
@staticmethod
def post_install():
"""Post installation `nltk` downloads."""
import nltk
nltk.download("popular")
def run(self):
install.run(self)
self.execute(
PostInstall.post_install, [], msg="Running post installation tasks"
)
setup(
name="lm_eval",
version="0.2.0",
author="Leo Gao & EleutherAI",
description="A framework for evaluating autoregressive language models",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/EleutherAI/lm-evaluation-harness",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.8",
install_requires=install_requires,
dependency_links=dependency_links,
extras_require={"dev": dev_requires},
cmdclass={"install": PostInstall},
)
| 1,863 | 27.676923 | 90 | py |
lm-evaluation-harness | lm-evaluation-harness-master/scripts/make_gpt2_test_cases.py | import transformers
import torch
import torch.nn.functional as F
import random
from lm_eval.api.utils import set_seed
data = [
"A multilayer perceptron (MLP) is a class of feedforward artificial neural network (ANN)",
"The term MLP is used ambiguously, sometimes loosely to any feedforward ANN, sometimes strictly to refer to networks composed of multiple layers of perceptrons (with threshold activation); see § Terminology",
'Multilayer perceptrons are sometimes colloquially referred to as "vanilla" neural networks, especially when they have a single hidden layer.[1]',
"An MLP consists of at least three layers of nodes: an input layer, a hidden layer and an output layer. Except for the input nodes, each node is a neuron that uses a nonlinear activation function.",
"MLP utilizes a supervised learning technique called backpropagation for training.[2][3] Its multiple layers and non-linear activation distinguish MLP from a linear perceptron. It can distinguish data that is not linearly separable.[4]",
"Recent work has demonstrated substantial gains on many NLP tasks and benchmarks by pre-training on a large corpus of text followed by fine-tuning on a specific task. While typically task-agnostic in architecture, this method still requires task-specific fine-tuning datasets of thousands or tens of thousands of examples. By contrast, humans can generally perform a new language task from only a few examples or from simple instructions - something which current NLP systems still largely struggle to do. Here we show that scaling up language models greatly improves task-agnostic, few-shot performance, sometimes even reaching competitiveness with prior state-of-the-art fine-tuning approaches. ",
"Specifically, we train GPT-3, an autoregressive language model with 175 billion parameters, 10x more than any previous non-sparse language model, and test its performance in the few-shot setting. For all tasks, GPT-3 is applied without any gradient updates or fine-tuning, with tasks and few-shot demonstrations specified purely via text interaction with the model. GPT-3 achieves strong performance on many NLP datasets, including translation, question-answering, and cloze tasks, as well as several tasks that require on-the-fly reasoning or domain adaptation, such as unscrambling words, using a novel word in a sentence, or performing 3-digit arithmetic. At the same time, we also identify some datasets where GPT-3's few-shot learning still struggles, as well as some datasets where GPT-3 faces methodological issues related to training on large web corpora. Finally, we find that GPT-3 can generate samples of news articles which human evaluators have difficulty distinguishing from articles written by humans. We discuss broader societal impacts of this finding and of GPT-3 in general.",
"A multilayer perceptron (MLP) is a class of feedforward artificial neural network (ANN)",
"Hello World",
]
model = transformers.GPT2LMHeadModel.from_pretrained("gpt2")
tok = transformers.GPT2Tokenizer.from_pretrained("gpt2")
tgs = []
set_seed()
for dat in data:
tokens = tok.encode(dat, return_tensors="pt")
ind = random.randrange(len(tokens[0]) - 1)
logits = F.log_softmax(model(tokens)[0], dim=-1)[:, :-1] # [batch, seq, vocab]
res = torch.gather(logits, 2, tokens[:, 1:].unsqueeze(-1)).squeeze(-1)[0]
tgs.append(float(res[ind:].sum()))
print(
r'("""'
+ tok.decode(tokens[0, : ind + 1])
+ r'""", """'
+ tok.decode(tokens[0, ind + 1 :])
+ r'"""), '
)
print(tgs)
| 3,587 | 78.733333 | 1,100 | py |
lm-evaluation-harness | lm-evaluation-harness-master/tests/test_models_huggingface.py | import unittest.mock as mock
import logging
import pytest
import lm_eval.models
from lm_eval.api.utils import set_seed
logger = logging.getLogger(__name__)
# Only use cpu to avoid non-deterministic CUDA settings.
# See: https://pytorch.org/docs/stable/notes/randomness.html
_DEVICE = "cpu"
@pytest.mark.parametrize(
"stop_sequences,test_input,expected",
[
(["not"], "i like", "i like to say that I'm not"),
(["say that"], "i like", "i like to say that"),
(["great"], "big science is", "big science is a great"),
(
["<|endoftext|>"],
"big science has",
"big science has been done in the past, but it's not the same as the science of the past. It",
),
],
)
def test_causal_stop_sequences(stop_sequences, test_input, expected):
set_seed()
causal_model = lm_eval.models.get_model(
"hf-causal", pretrained="gpt2", device=_DEVICE
)
inputs = causal_model.tok_encode_batch([test_input])
generations = causal_model._model_generate(
inputs=inputs,
max_tokens=20,
stop=stop_sequences,
)
generations = causal_model.tok_decode(generations)[0]
assert test_input + generations == expected
@pytest.mark.parametrize(
"stop_sequences,test_input,expected",
[
(["better"], "big science is ", "big science is a great way to get a better"),
(
["the"],
"big science is ",
"big science is a great way to get a better understanding of the",
),
(
["."],
"The quick brown fox jumps over the lazy ",
"The quick brown fox jumps over the lazy fox.",
),
(
["</s>"],
"big science is ",
"big science is a great way to get a better understanding of the world.",
),
],
)
def test_seq2seq_stop_sequences(stop_sequences, test_input, expected):
seq2seq_model = lm_eval.models.get_model(
"hf-seq2seq", pretrained="google/t5-small-lm-adapt", device=_DEVICE
)
inputs = seq2seq_model.tok_encode_batch([test_input])
generations = seq2seq_model._model_generate(
inputs=inputs,
max_tokens=20,
stop=stop_sequences,
)
generations = seq2seq_model.tok_decode(generations)[0]
assert test_input + generations == expected
def test_causal_model():
set_seed()
causal_model = lm_eval.models.get_model(
"hf-causal",
pretrained="gpt2",
device=_DEVICE,
)
(
(ll_dog, ig_dog),
(ll_cat, ig_cat),
(_, ll_max_0),
(_, ll_max_1),
(_, ll_max_2),
*vals,
) = causal_model.loglikelihood(
[
("The quick brown fox jumps over the lazy", " dog"),
("The quick brown fox jumps over the lazy", " cat"),
("The quick brown fox jumps over the lazy", ", lazy dog"),
("The quick brown fox jumps over the lazy", ", lazy fox"),
(
"The quick brown fox jumps over the lazy",
", lazy fox and they both fall to the ground",
),
(
"""A mult""",
"""ilayer perceptron (MLP) is a class of feedforward artificial neural network (ANN)""",
),
(
"""The term MLP is used ambiguously, sometimes loosely to any feedforward ANN, sometimes strictly to refer to networks composed of multiple layers of perceptrons""",
""" (with threshold activation); see § Terminology""",
),
(
"""Multilayer perceptrons are sometimes coll""",
"""oquially referred to as "vanilla" neural networks, especially when they have a single hidden layer.[1]""",
),
(
"""An MLP consists of at least three layers of nodes: an input layer, a hidden layer and an output layer. Except for the input nodes, each node is a neuron that uses a nonlinear""",
""" activation function.""",
),
(
"""MLP utilizes a supervised""",
""" learning technique called backpropagation for training.[2][3] Its multiple layers and non-linear activation distinguish MLP from a linear perceptron. It can distinguish data that is not linearly separable.[4]""",
),
(
"""Recent work has demonstrated substantial gains on many NLP tasks and benchmarks by pre-training on a large corpus of text followed by fine-tuning on a specific task. While typically task-agnostic""",
""" in architecture, this method still requires task-specific fine-tuning datasets of thousands or tens of thousands of examples. By contrast, humans can generally perform a new language task from only a few examples or from simple instructions - something which current NLP systems still largely struggle to do. Here we show that scaling up language models greatly improves task-agnostic, few-shot performance, sometimes even reaching competitiveness with prior state-of-the-art fine-tuning approaches. """,
),
(
"""Specifically, we train GPT-3, an autoregressive language model with 175""",
""" billion parameters, 10x more than any previous non-sparse language model, and test its performance in the few-shot setting. For all tasks, GPT-3 is applied without any gradient updates or fine-tuning, with tasks and few-shot demonstrations specified purely via text interaction with the model. GPT-3 achieves strong performance on many NLP datasets, including translation, question-answering, and cloze tasks, as well as several tasks that require on-the-fly reasoning or domain adaptation, such as unscrambling words, using a novel word in a sentence, or performing 3-digit arithmetic. At the same time, we also identify some datasets where GPT-3's few-shot learning still struggles, as well as some datasets where GPT-3 faces methodological issues related to training on large web corpora. Finally, we find that GPT-3 can generate samples of news articles which human evaluators have difficulty distinguishing from articles written by humans. We discuss broader societal impacts of this finding and of GPT-3 in general.""",
),
(
"""A mult""",
"""ilayer perceptron (MLP) is a class of feedforward artificial neural network (ANN)""",
),
("""Hello""", """ World"""),
]
)
assert ll_dog > ll_cat
assert not ig_cat
assert not ll_max_0
assert ll_max_1
assert ll_max_2
targets = [
-61.60536193847656,
-56.57843780517578,
-62.131004333496094,
-9.799489974975586,
-153.96334838867188,
-341.222900390625,
-731.1475830078125,
-61.60536193847656,
-8.682319641113281,
]
for (pred, _), tgt in zip(vals, targets):
assert pred == pytest.approx(tgt, rel=1e-3)
# Test empty context
causal_model.loglikelihood([("", "test")])
request_args = {
"stop_sequences": [".", "\n", "'"],
"max_generation_length": None,
"num_fewshot": 1,
}
(gen,) = causal_model.greedy_until(
[("The quick brown fox jumps over the lazy", request_args)]
)
assert gen == ", lazy fox and they both fall to the ground"
def test_causal_model_perplexity():
set_seed()
causal_model = lm_eval.models.get_model_from_args_string(
model_api_name="hf-causal", model_args=f"device={_DEVICE},pretrained=gpt2"
)
test_string = "We study empirical scaling laws for language model performance on the cross-entropy loss."
perplexity = causal_model.loglikelihood_rolling([(test_string,)])[0]
tgt = sum(
[
-4.9599953,
-8.069298,
-8.308624,
-10.178513,
-8.906924,
-1.9318912,
-7.745445,
-7.146077,
-5.2072,
-3.5882986,
-1.9957212,
-8.044922,
-0.20841774,
-5.1096807,
-0.099879116,
-8.888423,
-4.6180487,
]
)
assert perplexity == pytest.approx(tgt, rel=1e-3)
with mock.patch.object(
lm_eval.models.huggingface.AutoCausalLM,
"max_length",
new_callable=mock.PropertyMock,
) as mock_max_length:
mock_max_length.return_value = 5
causal_model = lm_eval.models.get_model_from_args_string(
model_api_name="hf-causal", model_args=f"device={_DEVICE},pretrained=gpt2"
)
perplexity = causal_model.loglikelihood_rolling([(test_string,)])[0]
logger.info(perplexity)
tgt = sum(
[
-4.96001,
-8.069275,
-8.308612,
-10.178482,
-8.90691,
-4.037338,
-8.09261,
-11.662385,
-10.206891,
-4.425003,
-2.2563353,
-7.909143,
-1.9304147,
-7.3610134,
-2.3120654,
-7.3229,
-2.1643813,
]
)
assert perplexity == pytest.approx(tgt, rel=1e-3)
def test_seq2seq_model():
seq2seq_model = lm_eval.models.get_model(
"hf-seq2seq",
pretrained="google/t5-small-lm-adapt",
device=_DEVICE,
)
llhs = seq2seq_model.loglikelihood(
[
("The quick brown fox jumps over the lazy", " dog"),
("The quick brown fox jumps over the lazy", " cat"),
("The quick brown fox jumps over the lazy", ", lazy dog"),
("The quick brown fox jumps over the lazy", "<pad> fox."),
(
"The quick brown fox jumps over the lazy",
", lazy fox and they both fall to the ground",
),
(
"""A mult""",
"""ilayer perceptron (MLP) is a class of feedforward artificial neural network (ANN)""",
),
(
"""The term MLP is used ambiguously, sometimes loosely to any feedforward ANN, sometimes strictly to refer to networks composed of multiple layers of perceptrons""",
""" (with threshold activation); see § Terminology""",
),
(
"""Multilayer perceptrons are sometimes coll""",
"""oquially referred to as "vanilla" neural networks, especially when they have a single hidden layer.[1]""",
),
(
"""An MLP consists of at least three layers of nodes: an input layer, a hidden layer and an output layer. Except for the input nodes, each node is a neuron that uses a nonlinear""",
""" activation function.""",
),
(
"""MLP utilizes a supervised""",
""" learning technique called backpropagation for training.[2][3] Its multiple layers and non-linear activation distinguish MLP from a linear perceptron. It can distinguish data that is not linearly separable.[4]""",
),
(
"""Recent work has demonstrated substantial gains on many NLP tasks and benchmarks by pre-training on a large corpus of text followed by fine-tuning on a specific task. While typically task-agnostic""",
""" in architecture, this method still requires task-specific fine-tuning datasets of thousands or tens of thousands of examples. By contrast, humans can generally perform a new language task from only a few examples or from simple instructions - something which current NLP systems still largely struggle to do. Here we show that scaling up language models greatly improves task-agnostic, few-shot performance, sometimes even reaching competitiveness with prior state-of-the-art fine-tuning approaches. """,
),
(
"""Specifically, we train GPT-3, an autoregressive language model with 175""",
""" billion parameters, 10x more than any previous non-sparse language model, and test its performance in the few-shot setting. For all tasks, GPT-3 is applied without any gradient updates or fine-tuning, with tasks and few-shot demonstrations specified purely via text interaction with the model. GPT-3 achieves strong performance on many NLP datasets, including translation, question-answering, and cloze tasks, as well as several tasks that require on-the-fly reasoning or domain adaptation, such as unscrambling words, using a novel word in a sentence, or performing 3-digit arithmetic. At the same time, we also identify some datasets where GPT-3's few-shot learning still struggles, as well as some datasets where GPT-3 faces methodological issues related to training on large web corpora. Finally, we find that GPT-3 can generate samples of news articles which human evaluators have difficulty distinguishing from articles written by humans. We discuss broader societal impacts of this finding and of GPT-3 in general.""",
),
(
"""A mult""",
"""ilayer perceptron (MLP) is a class of feedforward artificial neural network (ANN)""",
),
("""Hello""", """ World"""),
]
)
(
(ll_dog, ig_dog),
(ll_cat, ig_cat),
(_, ll_max_0),
(_, ll_max_1),
(_, ll_max_2),
*vals,
) = llhs
assert ll_dog > ll_cat
assert not ig_cat
targets = [
-118.2639,
-70.3217,
-116.2367,
-16.5411,
-227.1213,
-393.8974,
-851.3747,
-118.2639,
-19.8556,
]
for (pred, _), tgt in zip(vals, targets):
assert pred == pytest.approx(tgt, rel=1e-3)
# Test empty context
seq2seq_model.loglikelihood([("", "test")])
request_args = {
"stop_sequences": [".", "\n"],
"max_generation_length": 20,
"num_fewshot": 1,
}
(gen,) = seq2seq_model.greedy_until(
[("The quick brown fox jumps over the lazy", request_args)]
)
assert gen == "fox"
| 14,178 | 42.360856 | 1,045 | py |
lm-evaluation-harness | lm-evaluation-harness-master/tests/test_utils.py | import torch
from lm_eval.api.utils import (
get_rolling_token_windows,
make_disjoint_window,
select_continuation_from_batch_left_padding,
split_and_pad_windows,
)
# noinspection DuplicatedCode
def test_get_rolling_token_windows_v1():
gold = [
([-100, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
(
[9, 10, 11, 12, 13, 14, 15, 16, 17, 18],
[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
),
(
[19, 20, 21, 22, 23, 24, 25, 26, 27, 28],
[20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
),
([23, 24, 25, 26, 27, 28, 29, 30, 31, 32], [30, 31, 32, 33]),
]
x = list(range(34))
generator = get_rolling_token_windows(
token_list=x,
prefix_token=-100,
max_seq_len=10,
context_len=1,
)
pred_length = 0
output = []
for input_tokens, pred_tokens in generator:
output.append((input_tokens, pred_tokens))
pred_length += len(pred_tokens)
assert pred_length == len(x)
assert gold == output
# noinspection DuplicatedCode
def test_get_rolling_token_windows_v2():
gold = [
([-100, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
([2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [10, 11, 12]),
([5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [13, 14, 15]),
([8, 9, 10, 11, 12, 13, 14, 15, 16, 17], [16, 17, 18]),
([11, 12, 13, 14, 15, 16, 17, 18, 19, 20], [19, 20, 21]),
([14, 15, 16, 17, 18, 19, 20, 21, 22, 23], [22, 23, 24]),
([17, 18, 19, 20, 21, 22, 23, 24, 25, 26], [25, 26, 27]),
([20, 21, 22, 23, 24, 25, 26, 27, 28, 29], [28, 29, 30]),
([23, 24, 25, 26, 27, 28, 29, 30, 31, 32], [31, 32, 33]),
]
x = list(range(34))
generator = get_rolling_token_windows(
token_list=x,
prefix_token=-100,
max_seq_len=10,
context_len=8,
)
pred_length = 0
output = []
for input_tokens, pred_tokens in generator:
output.append((input_tokens, pred_tokens))
pred_length += len(pred_tokens)
assert pred_length == len(x)
assert gold == output
# noinspection DuplicatedCode
def test_get_rolling_token_windows_v3():
gold = [
([-100, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10]),
([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [11]),
([2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [12]),
([3, 4, 5, 6, 7, 8, 9, 10, 11, 12], [13]),
([4, 5, 6, 7, 8, 9, 10, 11, 12, 13], [14]),
([5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [15]),
([6, 7, 8, 9, 10, 11, 12, 13, 14, 15], [16]),
([7, 8, 9, 10, 11, 12, 13, 14, 15, 16], [17]),
([8, 9, 10, 11, 12, 13, 14, 15, 16, 17], [18]),
([9, 10, 11, 12, 13, 14, 15, 16, 17, 18], [19]),
([10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [20]),
([11, 12, 13, 14, 15, 16, 17, 18, 19, 20], [21]),
([12, 13, 14, 15, 16, 17, 18, 19, 20, 21], [22]),
([13, 14, 15, 16, 17, 18, 19, 20, 21, 22], [23]),
([14, 15, 16, 17, 18, 19, 20, 21, 22, 23], [24]),
([15, 16, 17, 18, 19, 20, 21, 22, 23, 24], [25]),
([16, 17, 18, 19, 20, 21, 22, 23, 24, 25], [26]),
([17, 18, 19, 20, 21, 22, 23, 24, 25, 26], [27]),
([18, 19, 20, 21, 22, 23, 24, 25, 26, 27], [28]),
([19, 20, 21, 22, 23, 24, 25, 26, 27, 28], [29]),
([20, 21, 22, 23, 24, 25, 26, 27, 28, 29], [30]),
([21, 22, 23, 24, 25, 26, 27, 28, 29, 30], [31]),
([22, 23, 24, 25, 26, 27, 28, 29, 30, 31], [32]),
([23, 24, 25, 26, 27, 28, 29, 30, 31, 32], [33]),
]
x = list(range(34))
generator = get_rolling_token_windows(
token_list=x,
prefix_token=-100,
max_seq_len=10,
context_len=10,
)
pred_length = 0
output = []
for input_tokens, pred_tokens in generator:
output.append((input_tokens, pred_tokens))
pred_length += len(pred_tokens)
assert pred_length == len(x)
assert gold == output
# noinspection DuplicatedCode
def test_get_rolling_token_windows_v4():
gold = [
([-100, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10]),
([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [11]),
([2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [12]),
([3, 4, 5, 6, 7, 8, 9, 10, 11, 12], [13]),
([4, 5, 6, 7, 8, 9, 10, 11, 12, 13], [14]),
([5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [15]),
([6, 7, 8, 9, 10, 11, 12, 13, 14, 15], [16]),
([7, 8, 9, 10, 11, 12, 13, 14, 15, 16], [17]),
([8, 9, 10, 11, 12, 13, 14, 15, 16, 17], [18]),
([9, 10, 11, 12, 13, 14, 15, 16, 17, 18], [19]),
([10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [20]),
([11, 12, 13, 14, 15, 16, 17, 18, 19, 20], [21]),
([12, 13, 14, 15, 16, 17, 18, 19, 20, 21], [22]),
([13, 14, 15, 16, 17, 18, 19, 20, 21, 22], [23]),
([14, 15, 16, 17, 18, 19, 20, 21, 22, 23], [24]),
([15, 16, 17, 18, 19, 20, 21, 22, 23, 24], [25]),
([16, 17, 18, 19, 20, 21, 22, 23, 24, 25], [26]),
([17, 18, 19, 20, 21, 22, 23, 24, 25, 26], [27]),
([18, 19, 20, 21, 22, 23, 24, 25, 26, 27], [28]),
([19, 20, 21, 22, 23, 24, 25, 26, 27, 28], [29]),
]
x = list(range(30))
generator = get_rolling_token_windows(
token_list=x,
prefix_token=-100,
max_seq_len=10,
context_len=10,
)
pred_length = 0
output = []
for input_tokens, pred_tokens in generator:
output.append((input_tokens, pred_tokens))
pred_length += len(pred_tokens)
assert pred_length == len(x)
assert gold == output
# noinspection DuplicatedCode
def test_get_rolling_token_windows_v5():
gold = [
([-100, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
(
[9, 10, 11, 12, 13, 14, 15, 16, 17, 18],
[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
),
(
[19, 20, 21, 22, 23, 24, 25, 26, 27, 28],
[20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
),
]
x = list(range(30))
generator = get_rolling_token_windows(
token_list=x,
prefix_token=-100,
max_seq_len=10,
context_len=1,
)
pred_length = 0
output = []
for input_tokens, pred_tokens in generator:
output.append((input_tokens, pred_tokens))
pred_length += len(pred_tokens)
assert pred_length == len(x)
assert gold == output
# noinspection DuplicatedCode
def test_get_rolling_token_windows_v6():
gold = [
([-100, 0], [0, 1]),
([1, 2], [2, 3]),
([3, 4], [4, 5]),
([5, 6], [6, 7]),
([6, 7], [8]),
]
x = list(range(9))
generator = get_rolling_token_windows(
token_list=x,
prefix_token=-100,
max_seq_len=2,
context_len=1,
)
pred_length = 0
output = []
for input_tokens, pred_tokens in generator:
output.append((input_tokens, pred_tokens))
pred_length += len(pred_tokens)
assert pred_length == len(x)
assert gold == output
def test_get_rolling_token_windows_empty():
generator = get_rolling_token_windows(
token_list=[],
prefix_token=-100,
max_seq_len=2,
context_len=1,
)
n = 0
for _ in generator:
n += 1
assert n == 0
def test_make_disjoint_window():
assert make_disjoint_window(([1, 2, 3, 4, 5], [2, 3, 4, 5, 6])) == (
[1],
[2, 3, 4, 5, 6],
)
assert make_disjoint_window(([1, 2, 3, 4, 5], [4, 5, 6])) == ([1, 2, 3], [4, 5, 6])
def test_pad_windows():
token_list = [100, 19, 3, 9, 794, 7142, 81, 1327, 5]
# Representation: [([context], [continuation]), ...]
# [
# ([1], [100, 19, 3, 9]),
# ([9], [794, 7142, 81, 1327]),
# ([ ], [5])
# ]
rolling_token_windows = list(
map(
make_disjoint_window,
get_rolling_token_windows(
token_list=token_list,
prefix_token=1,
max_seq_len=4,
context_len=1,
),
)
)
expected = (
[[1], [9], [1]], # Split & padded contexts.
[
[100, 19, 3, 9],
[794, 7142, 81, 1327],
[5, 1, 1, 1],
], # Split & padded continuations.
)
padded_windows = split_and_pad_windows(
rolling_token_windows, pad_token_id=1, max_seq_len=4
)
assert padded_windows == expected
def test_select_continuation_from_batch_1():
generations = torch.tensor(
[
# sentence 1
[0, 0, 1, 2, 3, 11, 22, 33, 44, 55],
# sentence 2
[1, 2, 3, 4, 5, 11, 22, 33, 0, 0],
]
)
max_context_size = 5
expected = torch.tensor(
[
# sentence 1
[11, 22, 33, 44, 55],
# sentence 2
[11, 22, 33, 0, 0],
]
)
assert torch.equal(
select_continuation_from_batch_left_padding(generations, max_context_size),
expected,
)
| 9,180 | 31.101399 | 87 | py |
lm-evaluation-harness | lm-evaluation-harness-master/lm_eval/api/utils.py | import collections
import pathlib
import re
import sys
import torch
from typing import Callable, Final, Iterable, List, Optional, Tuple, Union
from collections.abc import MutableMapping
from transformers import set_seed as transformers_set_seed
# General Utils
class ExitCodeError(Exception):
pass
# Reproducibility utils
DEFAULT_SEED: Final[int] = 1234
def set_seed(seed: Optional[int] = DEFAULT_SEED):
transformers_set_seed(seed)
# Token Utils
def general_detokenize(s: str) -> str:
s = s.replace(" n't", "n't")
s = s.replace(" )", ")")
s = s.replace("( ", "(")
s = s.replace('" ', '"')
s = s.replace(' "', '"')
s = re.sub(r" (['.,])", r"\1", s)
return s
def get_rolling_token_windows(
token_list: List[int], prefix_token: int, max_seq_len: int, context_len: int
) -> Iterable[Tuple[List[int], List[int]]]:
"""Returns a generator of rolling windows of length `max_seq_len` from a list of tokens.
Args:
token_list (List[int]):
List of tokens to be predicted.
prefix_token (int):
Dummy token like <eos> so the first token has something to condition
on.
max_seq_len (int):
The maximum sequence length of the model or a length we want to use.
context_len (int):
Amount of desired token context for prediction. Needs to be at least 1.
This allows for a rolling window context, letting each prediction
window to potentially condition on some context.
Returns:
Generator of tuples: (input_tokens, pred_tokens)
NOTE: Score only the last len(pred_tokens) logits of the LM.
"""
assert 1 <= context_len <= max_seq_len
if not token_list:
return
# +1 offset, going from input->preds
pred_len = max_seq_len - context_len + 1
predicted = 0
# Special handling for first window: predict all tokens
first_seq_len = min(max_seq_len, len(token_list))
yield [prefix_token] + token_list[: first_seq_len - 1], token_list[:first_seq_len]
predicted += first_seq_len
while predicted < len(token_list):
window_pred_len = min(len(token_list) - predicted, pred_len)
window_end = predicted + window_pred_len
yield (
token_list[window_end - max_seq_len - 1 : window_end - 1],
token_list[window_end - window_pred_len : window_end],
)
predicted += window_pred_len
def split_and_pad_windows(
windows: List[Tuple[str, str]], pad_token_id: int, max_seq_len: int
) -> Tuple[List[int], List[int]]:
"""Splits and pads a sequence of rolling context and continuation windows
from `get_rolling_token_windows`.
Example:
[
([1] , [23, 19, 3]), # (context, continuation)
([43], [2, 4]])
]
Output:
[
[[1],[43]], # Split & padded contexts.
[[23, 19, 3], [2, 4, 1]]` # Split & padded continuations.
]
where `1` = `pad_token` id.
Args:
windows (List[Tuple[str, str]]):
A generator of rolling `(context, continuation)` token windows
(tuples).
pad_token_id (int):
The token id to pad with.
max_seq_len (int):
The maximum sequence length of the model or a length we want to use.
Returns:
A tuple of (context, continuation) padding windows.
"""
contexts, continuations = zip(*windows)
contexts, continuations = list(contexts), list(continuations)
# Pad contexts:
rollover_context = contexts[-1]
rollover_context_size = len(rollover_context)
# Handle empty final context token list - just add 1 token.
if rollover_context_size == 0:
contexts[-1] += [pad_token_id]
elif rollover_context_size > 1:
for i in range(len(contexts[:-1])):
contexts[i] += [pad_token_id] * (rollover_context_size - len(contexts[i]))
# Pad continuations:
rollover_continuation = continuations[-1]
rollover_continuation_size = len(rollover_continuation)
is_multiple_windows = len(continuations) > 1
if rollover_continuation_size < max_seq_len and is_multiple_windows:
continuations[-1] = rollover_continuation + [pad_token_id] * (
max_seq_len - rollover_continuation_size
)
return contexts, continuations
def make_disjoint_window(pair):
"""Takes output from get_rolling_token_windows and makes the context not
overlap with the continuation.
"""
a, b = pair
return a[: -(len(b) - 1)], b
def select_continuation_from_batch_left_padding(
generations: Union[List[List[int]], torch.Tensor], max_context_size: int
):
"""Select the continuation from the batch, removing prompts of different lengths.
Args:
generations (Union[List[List[int]], torch.Tensor]):
A tensor or list-of-lists of shape [batch_size, sequence length].
max_context_size (int):
The size of the biggest context; generations will proceed from that
index.
Example:
PAD PAD Continue : The dog chased the cat [every day of the week]
Riddle me this : The dog chased the cat [yesterday] PAD PAD PAD PAD
Output:
[every day of the week]
[yesterday] PAD PAD PAD PAD
"""
return generations[:, max_context_size:]
# Container Utils
class Reorderer:
def __init__(self, arr, fn):
self.size = len(arr)
arr = list(enumerate(arr))
arr = group(arr, lambda x: fn(x[1]))
arr = [([y[0] for y in x], x[0][1]) for x in arr]
arr.sort(key=lambda x: fn(x[1]))
self.arr = arr
def get_reordered(self):
return [x[1] for x in self.arr]
def get_original(self, newarr):
res = [None] * self.size
cov = [False] * self.size
for (inds, _), v in zip(self.arr, newarr):
for ind in inds:
res[ind] = v
cov[ind] = True
assert all(cov)
return res
def flatten(
d: Union[dict, MutableMapping],
parent_key: str = "",
sep: str = "_",
) -> dict:
# From: https://stackoverflow.com/a/6027615
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def join_iters(iterables: Iterable) -> List:
for iterable in iterables:
yield from iterable
def chunks(iterable: Iterable, n: int) -> List:
arr = []
for x in iterable:
arr.append(x)
if len(arr) == n:
yield arr
arr = []
if arr:
yield arr
def group(arr: Iterable, fn: Callable) -> List:
res = collections.defaultdict(list)
for ob in arr:
res[fn(ob)].append(ob)
return list(res.values())
# CLI utils
def cli_template_names(
task_name: str, template_names: str, template_idx: int = None
) -> List[str]:
"""Returns a selection of template names for a given task and comma-
separated string of template names.
Example:
cli_template_names("task", "A,B,C") -> ["A", "B", "C"]
Args:
task_name (str):
Name of the task from which to retrieve template names.
template_names (str):
A string of template names separated by a comma if multiple names
are given.
General Selectors:
"all_templates":
Returns all templates for the task.
"original_templates":
Returns all templates with formatting that matches the
original task design.
template_idx (int, optional, defaults to None):
If given, returns only the template at the given index.
Returns:
A list of template names.
"""
import lm_eval.tasks
if template_names == "all_templates":
selections = lm_eval.tasks.list_templates(task_name)
elif template_names == "original_templates":
templates = lm_eval.tasks.get_templates(task_name)
selections = []
for name in templates.all_template_names:
if templates[name].metadata.original_task is True:
selections.append(name)
if not selections:
raise ValueError(f"No original task templates found for {task_name}")
else:
selections = template_names.split(",")
if template_idx is not None:
selections = [selections[template_idx]]
return selections
def parse_cli_args_string(args: str) -> dict:
"""Parses a string in the following format to a kwargs dictionary.
"args1=val1,arg2=val2"
"""
# Remove leading whitespace but not trailing in case a `val` contains necessary whitespace.
args = args.lstrip()
if not args:
return {}
arg_list = args.split(",")
args_dict = {}
for arg in arg_list:
# Split on the first `=` to allow for `=`s in `val`.
k, v = arg.split("=", 1)
args_dict[k] = str_to_builtin_type(v)
return args_dict
def str_to_builtin_type(s: str) -> str:
for fn in (to_bool, int, float):
try:
return fn(s)
except ValueError:
pass
return s
# https://stackoverflow.com/questions/7019283/automatically-type-cast-parameters-in-python
def to_bool(s: str):
if s == "True" or s == "true":
return True
if s == "False" or s == "false":
return False
raise ValueError(f"The input `{s}` is not of boolean form.")
# Test utils
def find_test_root(*, start_path: pathlib.Path) -> pathlib.Path:
"""Search upward in the directory tree to a maximum of three layers
to find and return the package root (containing the 'tests' folder)
"""
cur_path = start_path.resolve()
max_layers = 3
for _ in range(max_layers):
if (cur_path / "tests" / "test_version_stable.py").exists():
return cur_path
else:
cur_path = cur_path.parent.resolve()
raise FileNotFoundError(
f"Unable to find package root within {max_layers} upwards" + f"of {start_path}"
)
def run_task_tests(*, task_list: List[str]):
"""Find the package root and run the tests for the given tasks."""
import pytest
package_root = find_test_root(start_path=pathlib.Path(__file__))
task_string = " or ".join(task_list)
args = [
f"{package_root}/tests/test_version_stable.py",
f"--rootdir={package_root}",
"-k",
f"{task_string}",
]
sys.path.append(str(package_root))
pytest_return_val = pytest.main(args)
if pytest_return_val:
raise ValueError(
f"Not all tests for the specified tasks ({task_list}) ran successfully! Error code: {pytest_return_val}"
)
| 10,944 | 29.572626 | 116 | py |
lm-evaluation-harness | lm-evaluation-harness-master/lm_eval/api/model.py | import abc
import hashlib
import json
import os
import torch
import torch.nn.functional as F
from tqdm import tqdm
from typing import Iterable, List, Optional, Tuple, Union
from transformers import BatchEncoding
from lm_eval.api import utils
class LM(abc.ABC):
def __init__(self):
self.cache_hook = CacheHook(None)
@abc.abstractmethod
def loglikelihood(
self, requests: List[Tuple[str, str]]
) -> List[Tuple[float, bool]]:
"""Compute log-likelihood of generating a continuation from a context.
Downstream tasks should attempt to use loglikelihood instead of other
LM calls whenever possible.
Args:
requests (List[Tuple[str, str]]):
A list of pairs (context, continuation):
context (str):
Context string. Implementations of LM must be able to handle
an empty context string.
continuation (str):
The continuation over which log likelihood will be calculated.
If there is a word boundary, the space should be in the
continuation. For example, context="hello" continuation=" world"
is correct.
Returns:
A list of pairs (logprob, isgreedy):
logprob (float):
The log probability of `continuation`.
isgreedy (bool):
Whether `continuation` would be generated by greedy
sampling from `context`.
"""
pass
@abc.abstractmethod
def loglikelihood_rolling(self, requests: List[Tuple[str, str]]) -> List[float]:
"""Compute full log-likelihood of a string, with no truncation, for perplexity computation
- We will use the full max context length of the model.
- For inputs that exceed the max context length, we divide the tokenized string into chunks of up to
the max context length.
- IMPORTANT: Each document's loglikelihood/perplexity is computed *separately*, unlike other implementations
which may simply concatenate multiple documents together.
- IMPORTANT: We maximize the amount of context for each prediction. Specifically, for inputs that we break into
multiple chunks, the last input will still a full-sized context.
Example:
Input tokens: [ 0 1 2 3 4 5 6 7 8 9 ]
Prefix: EOT
Max context length: 4
Resulting input/prediction pairs:
INPUT: EOT 0 1 2
PRED: 0 1 2 3
INPUT: 3 4 5 6
PRED: 4 5 6 7
INPUT: 5 6 7 8
PRED: 8 9
Observe that:
1. Each token is predicted exactly once
2. For the last pair, we provide the full context, but only score the last two tokens
Args:
requests (List[Tuple[str, str]]):
A list of paired strings.
string (str):
String for which we are computing per-token loglikelihood.
Returns:
A list of logprobs on the `continuation`.
"""
pass
@abc.abstractmethod
def greedy_until(self, requests: List[Tuple[str, dict]]) -> List[str]:
"""Generate greedily until a stopping sequence or max generation length.
Args:
requests (List[Tuple[str, dict]]):
A list of pairs (context, args):
context (str):
Context string.
args (dict):
A dictionary of generation arguments in the form:
{
stop_sequences: str,
max_generation_length: int,
num_fewshot: int
}
Returns:
A list of strings continuation:
continuation: str
The generated continuation.
"""
pass
def set_cache_hook(self, cache_hook: "CacheHook"):
self.cache_hook = cache_hook
TokenSequence = Union[List[int], torch.LongTensor, torch.Tensor, BatchEncoding]
class TokenLM(LM):
"""A language model that assumes inputs, and possibly outputs, are
tokenized text as opposed to language model APIs that only support
string-based input and output systems.
"""
@abc.abstractmethod
def tok_encode(self, string: str):
pass
@abc.abstractmethod
def tok_decode(self, tokens: Iterable[int]) -> List[str]:
pass
@property
@abc.abstractmethod
def eot_token(self) -> str:
pass
@property
@abc.abstractmethod
def eot_token_id(self) -> int:
pass
@property
@abc.abstractmethod
def max_gen_toks(self) -> int:
"""The maximum number of tokens to generate - not including context."""
pass
@property
@abc.abstractmethod
def max_length(self) -> int:
"""The maximum sequence length of the model."""
pass
@property
@abc.abstractmethod
def batch_size(self) -> int:
pass
@property
@abc.abstractmethod
def device(self) -> Union[int, str, torch.device]:
pass
def loglikelihood(
self, requests: List[Tuple[str, str]]
) -> List[Tuple[float, bool]]:
new_requests = []
for context, continuation in requests:
if context == "":
# End of text as context
context_enc = [self.eot_token_id]
else:
context_enc = self.tok_encode(context)
continuation_enc = self.tok_encode(continuation)
new_requests.append(
((context, continuation), context_enc, continuation_enc)
)
return self._loglikelihood_tokens(new_requests)
def loglikelihood_rolling(self, requests: List[Tuple[str, str]]) -> List[float]:
# TODO: Implement caching once we've confirmed the perplexity implementation
# TODO: Automatic batch size detection for vectorization
loglikelihoods = []
for (string,) in tqdm(requests):
rolling_token_windows = list(
map(
utils.make_disjoint_window,
utils.get_rolling_token_windows(
token_list=self.tok_encode(string),
prefix_token=self.eot_token_id,
max_seq_len=self.max_length,
context_len=1,
),
)
)
rolling_token_windows = [(None,) + x for x in rolling_token_windows]
# TODO: Extract out this call so it only gets called once and
# also somehow figure out partial caching for that.
string_nll = self._loglikelihood_tokens(
rolling_token_windows, disable_tqdm=True
)
# Discard `is_greedy`
string_nll = [x[0] for x in string_nll]
string_nll = sum(string_nll)
loglikelihoods.append(string_nll)
return loglikelihoods
def _loglikelihood_tokens(
self,
requests: List[Tuple[Tuple[str, str], TokenSequence, TokenSequence]],
disable_tqdm: Optional[bool] = False,
) -> List[Tuple[float, bool]]:
"""Helper method for computing log-likelihood of generating a
continuation from a context that have both been tokenized/encoded.
Args:
requests (List[Tuple[Tuple[str, str], TokenSequence, TokenSequence]]):
A list of pairs ((context, continuation), context_enc, continuation_enc):
context (str):
Context string. Implementations of LM must be able to handle
an empty context string.
continuation (str):
The continuation over which log likelihood will be calculated.
If there is a word boundary, the space should be in the
continuation. For example, context="hello" continuation=" world"
is correct.
context_enc (TokenSequence):
The tokenized/encoded context.
continuation_enc (TokenSequence):
The tokenized/encoded continuation.
disable_tqdm (bool, optional, defaults to False):
Whether to disable `tqdm` progress bar.
Returns:
A list of pairs (logprob, isgreedy):
logprob (float):
The log probability of `continuation`.
isgreedy (float):
Whether `continuation` would be generated by greedy sampling from `context`.
"""
def _collate(x):
# The negative sign on len(tokens) sorts descending - this has a few advantages:
# - Time estimates will always be over not underestimates, which is more useful for planning
# - To know the size of a batch when going through the list, you know the first one is always the batch
# padded context length. this is useful to simplify the batching logic and more importantly to make
# automatic adaptive batches much easier to implement
# - Any OOMs will happen right away rather than near the end
tokens = x[1] + x[2]
return -len(tokens), tuple(tokens)
# TODO: Automatic (variable) batch size detection for vectorization
# TODO: Implement some kind of efficient-request-middleware that lumps together requests with the same context
results = []
reorder = utils.Reorderer(requests, _collate)
for chunk in utils.chunks(
tqdm(reorder.get_reordered(), disable=disable_tqdm), self.batch_size
):
inputs = []
input_lens = []
cont_tokens_list = []
padding_length = None
# Because vectorizing is annoying, we first convert each (context, continuation) pair to padded
# tensors, then we pack them together into a batch, call the model, and then pick it all apart
# again because vectorizing is annoying
for _, context_enc, continuation_enc in chunk:
# sanity check
assert len(context_enc) > 0
assert len(continuation_enc) > 0
assert len(continuation_enc) <= self.max_length
# How this all works:
# CTX CONT
# inp 0 1 2 3|4 5 6 7 8 9 <- last token is deleted by inp[:, :-1]
# gpt2 \ \
# logits 1 2 3|4 5 6 7 8 9 <- the ctx half gets tossed out by the
# cont_tokens 4 5 6 7 8 9 [:, -len(continuation_enc):, :self.vocab_size] slice
# When too long to fit in context, truncate from the left
_full_enc = context_enc + continuation_enc
input = torch.tensor(
_full_enc[-(self.max_length + 1) :][:-1],
dtype=torch.long,
).to(self.device)
(input_len,) = input.shape
# Since in _collate we make sure length is descending, the longest is always the first one.
padding_length = (
padding_length if padding_length is not None else input_len
)
# Pad length from seq to padding_length
input = torch.cat(
[
input, # [seq]
torch.zeros(padding_length - input_len, dtype=torch.long).to(
input.device
), # [padding_length - seq]
],
dim=0,
)
inputs.append(input.unsqueeze(0)) # [1, padding_length]
cont_tokens_list.append(continuation_enc)
input_lens.append(input_len)
batched_inputs = torch.cat(inputs, dim=0) # [batch, padding_length]
multi_logits = F.log_softmax(
self._model_call(batched_inputs), dim=-1
).cpu() # [batch, padding_length, vocab]
for (cache_key, _, _), logits, input, input_len, cont_tokens in zip(
chunk, multi_logits, inputs, input_lens, cont_tokens_list
):
# Slice to original seq length
cont_len = len(cont_tokens)
# [1, seq, vocab]
logits = logits[input_len - cont_len : input_len].unsqueeze(0)
# Check if per-token argmax is exactly equal to continuation
greedy_tokens = logits.argmax(dim=-1)
# [1, seq]
cont_tokens = torch.tensor(cont_tokens, dtype=torch.long).unsqueeze(0)
max_equal = (greedy_tokens == cont_tokens).all()
# Obtain logprobs at the corresponding continuation token indices
# last_token_slice = logits[:, -1, :].squeeze(0).tolist()
# [1, seq]
logits = torch.gather(logits, 2, cont_tokens.unsqueeze(-1)).squeeze(-1)
# Answer: (log prob, is-exact-match)
answer = (float(logits.sum()), bool(max_equal))
# Partial caching
if cache_key is not None:
self.cache_hook.add_partial("loglikelihood", cache_key, answer)
results.append(answer)
return reorder.get_original(results)
@abc.abstractmethod
def _model_call(
self, inputs: TokenSequence, labels: Optional[TokenSequence] = None
) -> TokenSequence:
"""
Args:
inputs (TokenSequence):
A list of strings or torch tensor of shape [batch, sequence]
the size of sequence may vary from call to call.
labels (TokenSequence, optional, defaults to None):
A list of strings or torch tensor of shape [batch, sequence]
useful for sequence-to-sequence language models.
Returns:
A list of ints or torch tensor of shape [batch, sequence, vocab]
with the logits returned from the model.
"""
pass
@abc.abstractmethod
def _model_generate(
self, inputs: TokenSequence, max_tokens: int, stop: Optional[List[str]] = None
) -> Union[TokenSequence, List[str]]:
"""
Args:
inputs (TokenSequence):
A list of strings/ints or torch tensor of shape [batch, sequence]
the size of sequence may vary from call to call.
max_tokens (int):
The maximum number of tokens to generate.
stop (List[str], optional, defaults to None):
A list of stopping sequences. If provided, the generation will
stop when any string sequence in the list is encountered.
Returns:
A list of ints/strings or a torch tensor of shape [batch, sequence, vocab]
with continuation tokens/string of the inputs.
"""
pass
def hash_args(attr, args):
data = json.dumps([attr] + list(args))
return hashlib.sha256(data.encode("utf-8")).hexdigest()
class CachingLM:
def __init__(self, lm: LM, cache_db: str):
"""LM wrapper that returns cached results if they exist, and uses the underlying LM if not.
Args:
lm (LM):
The underlying LM to use.
cache_db (str):
Path to the `cache` database.
"""
from sqlitedict import SqliteDict
self.lm = lm
if os.path.dirname(cache_db):
os.makedirs(os.path.dirname(cache_db), exist_ok=True)
self.cache_db = cache_db
self.dbdict = SqliteDict(cache_db, autocommit=True)
# Add hook to lm
lm.set_cache_hook(self.get_cache_hook())
def __getattr__(self, attr):
def fn(requests):
res = []
remaining_reqs = []
# Figure out which ones are cached and which ones are new
for req in requests:
hsh = hash_args(attr, req)
if hsh in self.dbdict:
ob = self.dbdict[hsh]
assert ob is not None
res.append(ob)
else:
res.append(None)
remaining_reqs.append(req)
# Actually run the LM on the requests that do not have cached results
rem_res = getattr(self.lm, attr)(remaining_reqs)
# Stick the new ones back into the list and also cache any of the new ones
resptr = 0
for req, r in zip(remaining_reqs, rem_res):
while res[resptr] is not None:
resptr += 1
res[resptr] = r
# Caching
hsh = hash_args(attr, req)
self.dbdict[hsh] = r
self.dbdict.commit()
return res
return fn
def get_cache_hook(self):
return CacheHook(self)
class CacheHook:
def __init__(self, cachinglm: CachingLM):
if cachinglm is None:
self.dbdict = None
return
self.dbdict = cachinglm.dbdict
def add_partial(self, attr, req, res):
if self.dbdict is None:
return
hsh = hash_args(attr, req)
self.dbdict[hsh] = res
| 17,599 | 37.681319 | 119 | py |
lm-evaluation-harness | lm-evaluation-harness-master/lm_eval/models/huggingface.py | import math
import torch
import torch.nn.functional as F
import transformers
from typing import List, Mapping, NewType, Optional, Tuple, Union
from tqdm import tqdm
from lm_eval.api import utils
from lm_eval.api.model import TokenLM, TokenSequence
_DeviceMapping = NewType("DeviceMapping", Mapping[str, Union[int, str, torch.device]])
def _get_accelerate_args(
device_map_option: Optional[str] = "auto",
max_memory_per_gpu: Optional[Union[int, str]] = None,
max_cpu_memory: Optional[Union[int, str]] = None,
offload_folder: Optional[str] = "./offload",
) -> dict:
"""Returns the kwargs needed to apply `accelerate` in `AutoModel.from_pretrained`."""
max_memory = {}
if max_memory_per_gpu is not None:
max_memory_per_gpu_map = {
device_idx: max_memory_per_gpu
for device_idx in range(torch.cuda.device_count())
}
max_memory.update(max_memory_per_gpu_map)
if max_cpu_memory is not None:
max_memory["cpu"] = max_cpu_memory
args = {}
if max_memory:
args["max_memory"] = max_memory
args["device_map"] = device_map_option
args["offload_folder"] = offload_folder
return args
def _get_dtype(
dtype: Union[str, torch.dtype], config: Optional[transformers.AutoConfig] = None
) -> torch.dtype:
"""Converts `dtype` from `str` to torch.dtype when possible."""
if dtype is None and config is not None:
_torch_dtype = config.torch_dtype
elif isinstance(dtype, str) and dtype != "auto":
# Convert `str` args torch dtype: `float16` -> `torch.float16`
_torch_dtype = getattr(torch, dtype)
else:
_torch_dtype = dtype
return _torch_dtype
class HuggingFaceAutoLM(TokenLM):
AUTO_CONFIG_CLASS: transformers.AutoConfig = transformers.AutoConfig
AUTO_TOKENIZER_CLASS: transformers.AutoTokenizer = transformers.AutoTokenizer
AUTO_MODEL_CLASS: transformers.AutoModel = None
# Default max sequence length setting for when no `max_length` is provided
# or no max length config setting is found in the model or tokenizer.
_DEFAULT_MAX_LENGTH: int = 2048
def __init__(
self,
pretrained: str,
tokenizer: Optional[str] = None,
subfolder: Optional[str] = None,
revision: Optional[str] = "main",
batch_size: Optional[int] = 1,
max_gen_toks: Optional[int] = 256,
max_length: Optional[int] = None,
add_special_tokens: Optional[bool] = None,
use_accelerate: Optional[bool] = False,
device_map_option: Optional[str] = "auto",
max_memory_per_gpu: Optional[Union[int, str]] = None,
max_cpu_memory: Optional[Union[int, str]] = None,
offload_folder: Optional[str] = "./offload",
dtype: Optional[Union[str, torch.dtype]] = None,
device: Optional[Union[int, str]] = "cuda",
):
"""Initializes a HuggingFace `AutoModel` and `AutoTokenizer` for evaluation.
Args:
pretrained (str):
The HuggingFace Hub model ID name or the path to a pre-trained
model to load. This is effectively the `pretrained_model_name_or_path`
argument of `from_pretrained` in the HuggingFace `transformers` API.
add_special_tokens (bool, optional, defaults to True):
Whether to add special tokens to the input sequences. If `None`, the
default value will be set to `True` for seq2seq models (e.g. T5) and
`False` for causal models.
WARNING: Evaluating causal models with `add_special_tokens=True` is
currently __not__ supported.
> Large model loading `accelerate` arguments
use_accelerate (bool, optional, defaults to False):
If True, uses the `accelerate` library to load a large model across
multiple devices.
device_map_option (str, optional, defaults to "auto"):
The device map option to use when loading the model with
`accelerate`.
Options:
"auto", "balanced", "balanced_low_0", "sequential"
See the `accelerate` docs for more details on these options:
https://huggingface.co/docs/accelerate/v0.12.0/en/usage_guides/big_modeling#designing-a-device-map
max_memory_per_gpu (Union[int, str], optional, defaults to None):
The maximum memory available for each GPU in bytes as `int` or in
the format f"{significand}{unit_symbol}" where {unit_symbol} is
any of ["GB", "MB", "GIB", "MIB"]. Refer to the `max_memory` arg in
the "Parameters for big model inference" section of the following
docs:
https://huggingface.co/docs/transformers/v4.20.1/en/main_classes/model#large-model-loading
max_cpu_memory (Union[int, str], optional, defaults to None):
The maximum available CPU RAM in bytes as `int` or in the format
f"{significand}{unit_symbol}" where {unit_symbol} is any of
["GB", "MB", "GIB", "MIB"]. Refer to the `max_memory` arg in the
"Parameters for big model inference" section of the following docs:
https://huggingface.co/docs/transformers/v4.20.1/en/main_classes/model#large-model-loading
offload_folder (str, optional, defaults to "./offload"):
The folder to offload weights into if `device_map` contains any
"disk" value.
dtype (Union[str, torch.dtype], optional, defaults to None):):
Converts the model weights to `dtype`, if specified. Strings get
converted to `torch.dtype` objects (e.g. `float16` -> `torch.float16`).
Use `dtype="auto"` to derive the type from the model’s weights.
"""
super().__init__()
assert isinstance(pretrained, str)
assert isinstance(device, str)
assert isinstance(batch_size, int)
if (
add_special_tokens is not None
and self.AUTO_MODEL_CLASS is transformers.AutoModelForCausalLM
):
# TODO: Support evaluating causal models with special tokens. Currently,
# this is not possible because the `_loglikelihood_tokens()` method for
# causal LMs makes a no-special-tokens assumption given that contexts
# and labels/continuations are tokenized separately without special
# tokens, concatenated, and then processed as inputs.
assert (
not add_special_tokens
), "Evaluating causal models with `add_special_tokens=True` is currently not supported."
self._batch_size = batch_size # TODO: Adaptive batch size
self._max_gen_toks = max_gen_toks
self._max_length = max_length
self._config = self.AUTO_CONFIG_CLASS.from_pretrained(
pretrained,
revision=revision + ("/" + subfolder if subfolder is not None else ""),
)
self._add_special_tokens = add_special_tokens
self.tokenizer = self._create_auto_tokenizer(
pretrained=pretrained,
revision=revision,
subfolder=subfolder,
tokenizer=tokenizer,
)
self.tokenizer.model_max_length = self.max_length
accelerate_kwargs = {}
if use_accelerate:
accelerate_kwargs = _get_accelerate_args(
device_map_option,
max_memory_per_gpu,
max_cpu_memory,
offload_folder,
)
self.model = self._create_auto_model(
pretrained=pretrained,
revision=revision,
subfolder=subfolder,
torch_dtype=_get_dtype(dtype, self._config),
**accelerate_kwargs,
)
self.model.eval()
torch.set_grad_enabled(False)
self._device = device
if use_accelerate and "lm_head" in self.model.hf_device_map:
# `accelerate` can place `lm_head` weights on a different device than
# the user specified one so we force `self._device` to be the same as
# `lm_head`'s.
self._device = self.model.hf_device_map["lm_head"]
if not use_accelerate:
self.model.to(self._device)
def _create_auto_model(
self,
*,
pretrained: str,
revision: str,
subfolder: str,
device_map: Optional[Union[str, _DeviceMapping]] = None,
max_memory: Optional[dict] = None,
offload_folder: Optional[str] = None,
torch_dtype: Optional[Union[str, torch.dtype]] = None,
) -> transformers.AutoModel:
"""Returns a pre-trained pytorch model from a pre-trained model configuration."""
model = self.AUTO_MODEL_CLASS.from_pretrained(
pretrained,
revision=revision + ("/" + subfolder if subfolder is not None else ""),
device_map=device_map,
max_memory=max_memory,
offload_folder=offload_folder,
torch_dtype=torch_dtype,
)
return model
def _create_auto_tokenizer(
self,
*,
pretrained: str,
revision: str,
subfolder: str,
tokenizer: Optional[str] = None,
) -> transformers.PreTrainedTokenizer:
"""Returns a pre-trained tokenizer from a pre-trained tokenizer configuration."""
tokenizer = self.AUTO_TOKENIZER_CLASS.from_pretrained(
pretrained if tokenizer is None else tokenizer,
revision=revision + ("/" + subfolder if subfolder is not None else ""),
)
tokenizer.pad_token = tokenizer.eos_token
return tokenizer
@property
def add_special_tokens(self) -> bool:
"""Whether to include special tokens in encoded text. This should be
determined by whether or not the model was trained with special tokens.
TODO: Remove these conditionals once HuggingFace supports a way to
check whether or not an arbitrary model was trained with special tokens.
"""
if self._add_special_tokens is not None:
return self._add_special_tokens
elif self.AUTO_MODEL_CLASS is transformers.AutoModelForCausalLM:
return False
elif self.AUTO_MODEL_CLASS is transformers.AutoModelForSeq2SeqLM:
return True
else:
raise ValueError(
"Could not determine `add_special_tokens` value from the model "
"class. Set to `True` or `False` depending on whether the model "
"was pre-trained with special tokens."
)
@property
def eot_token(self) -> str:
return self.tokenizer.eos_token
@property
def eot_token_id(self) -> int:
return self.tokenizer.eos_token_id
@property
def max_gen_toks(self) -> int:
return self._max_gen_toks
@property
def max_length(self) -> int:
"""Return the maximum sequence length of the model.
NOTE: Different model configurations have different max sequence length
attribute names.
- n_positions: (CTRLConfig)
- max_position_embeddings: (BartConfig, RoFormerConfig)
- n_ctx: (GPT2Config)
NOTE: For relative position encoded models you should specify the max
sequence length of the model in the constructor via `max_length`.
"""
if self._max_length is not None:
return self._max_length
# Try to get the sequence length from the model config.
seqlen_config_attrs = ("n_positions", "max_position_embeddings", "n_ctx")
for attr in seqlen_config_attrs:
if hasattr(self._config, attr):
return getattr(self._config, attr)
if hasattr(self.tokenizer, "model_max_length"):
return self.tokenizer.model_max_length
return self._DEFAULT_MAX_LENGTH
@property
def batch_size(self) -> int:
# TODO: Add adaptive batch size.
return self._batch_size # * gpus
@property
def device(self) -> Union[int, str, torch.device]:
return self._device
def tok_encode(self, string: str) -> TokenSequence:
# TODO: Merge `tok_encode_batch` here.
return self.tokenizer.encode(string, add_special_tokens=self.add_special_tokens)
def tok_encode_batch(self, strings: List[str]) -> TokenSequence:
return self.tokenizer(
strings,
padding=True,
add_special_tokens=self.add_special_tokens,
return_tensors="pt",
)
def tok_decode(self, tokens: torch.LongTensor) -> List[str]:
return self.tokenizer.batch_decode(tokens, skip_special_tokens=True)
def greedy_until(self, requests: List[Tuple[str, dict]]) -> List[str]:
def _collate(x):
tokens = self.tok_encode(x[0])
return len(tokens), x[0]
results = []
reorder = utils.Reorderer(requests, _collate)
for chunk in utils.chunks(
tqdm(reorder.get_reordered(), disable=False), self.batch_size
):
context = [c[0] for c in chunk]
request_args = chunk[0][1]
stop_sequences = request_args["stop_sequences"]
max_generation_length = request_args["max_generation_length"]
num_fewshot = request_args["num_fewshot"]
assert (
isinstance(max_generation_length, int) or max_generation_length is None
)
assert isinstance(stop_sequences, list) or stop_sequences is None
assert isinstance(num_fewshot, int) or num_fewshot is None
# TODO: Find a better way to handle stop sequences for 0-shot.
if stop_sequences is None or num_fewshot == 0:
until = [self.eot_token]
else:
until = stop_sequences + [self.eot_token]
if max_generation_length is None:
max_tokens = self.max_gen_toks
else:
max_tokens = max_generation_length
token_context = self.tok_encode_batch(context)
responses = self._model_generate(
inputs=token_context,
max_tokens=max_tokens,
stop=until,
)
responses = self.tok_decode(responses.tolist())
for response in responses:
# Ensure the generated responses do not contain the stop sequences.
for term in until:
response = response.split(term)[0]
# partial caching
self.cache_hook.add_partial("greedy_until", (context, until), response)
results.append(response)
return reorder.get_original(results)
class AutoCausalLM(HuggingFaceAutoLM):
"""Causal language modeling.
You can find a set of supported models in the HF documentation:
https://huggingface.co/docs/transformers/main/model_doc/auto#transformers.AutoModelForCausalLM
"""
AUTO_MODEL_CLASS = transformers.AutoModelForCausalLM
def _create_auto_tokenizer(
self,
*,
pretrained: str,
revision: str,
subfolder: str,
tokenizer: Optional[str] = None,
) -> transformers.PreTrainedTokenizer:
tokenizer = super()._create_auto_tokenizer(
pretrained=pretrained,
revision=revision,
subfolder=subfolder,
tokenizer=tokenizer,
)
tokenizer.padding_side = "left"
return tokenizer
def _model_call(
self, inputs: TokenSequence, labels: Optional[TokenSequence] = None
) -> TokenSequence:
return self.model(inputs)["logits"]
def _model_generate(
self,
inputs: transformers.BatchEncoding,
max_tokens: int,
stop: Optional[List[str]] = None,
) -> TokenSequence:
# Ensure that the context does not encroach into the `space`
# for the generation.
input_ids = inputs["input_ids"][:, self.max_gen_toks - self.max_length :]
attention_mask = inputs["attention_mask"][
:, self.max_gen_toks - self.max_length :
]
input_ids = input_ids.to(self.device)
attention_mask = attention_mask.to(self.device)
stopping_criteria = stop_sequences_criteria(
self.tokenizer, stop, input_ids.shape[1], input_ids.shape[0]
)
generations = self.model.generate(
input_ids=input_ids,
attention_mask=attention_mask,
# GPT style models require the `generate` `max_length` arg to include the
# context length, so we instead set `max_new_tokens` which is the number
# of new tokens to generate, excluding the current number of tokens.
max_new_tokens=max_tokens,
stopping_criteria=stopping_criteria,
do_sample=False,
)
return utils.select_continuation_from_batch_left_padding(
generations, max_context_size=inputs["input_ids"].size(1)
)
class AutoSeq2SeqLM(HuggingFaceAutoLM):
"""Seq2Seq language modeling.
You can find a set of supported models in the following documentation:
https://huggingface.co/docs/transformers/main/model_doc/auto#transformers.AutoModelForSeq2SeqLM
"""
AUTO_MODEL_CLASS = transformers.AutoModelForSeq2SeqLM
@property
def max_length(self) -> int:
"""Return the maximum sequence length of the model.
TODO: Currently only works for relative position encoded Seq2Seq models.
"""
if self._max_length is not None:
return self._max_length
return self._DEFAULT_MAX_LENGTH
def loglikelihood(
self, requests: List[Tuple[str, str]]
) -> List[Tuple[float, bool]]:
new_requests = []
for chunk in utils.chunks(requests, self.batch_size):
context, continuation = zip(*chunk)
# Fill empty contexts with the EOT token.
context = [
f"{self.eot_token}" if len(text) == 0 else text for text in context
]
context_enc = self.tok_encode_batch(context)
for key in context_enc:
context_enc[key] = context_enc[key][:, -self.max_length :]
# Remove leading whitespace introduced by the default
# `text_target_separator` since the context and continuation
# will not be concatenated as a single (decoder) input.
continuation = [text.lstrip() for text in continuation]
continuation_enc = self.tok_encode_batch(list(continuation))
for key in continuation_enc:
continuation_enc[key] = continuation_enc[key][:, -self.max_length :]
new_requests.append(
((context, continuation), context_enc, continuation_enc)
)
return self._loglikelihood_tokens(new_requests)
def loglikelihood_rolling(self, requests: List[Tuple[str, str]]) -> List[float]:
loglikelihoods = []
for (string,) in tqdm(requests):
rolling_token_windows = list(
map(
utils.make_disjoint_window,
utils.get_rolling_token_windows(
token_list=self.tok_encode(string),
prefix_token=self.eot_token_id,
max_seq_len=self.max_length,
context_len=1,
),
)
)
contexts, conts = utils.split_and_pad_windows(
rolling_token_windows,
pad_token_id=self.eot_token_id,
max_seq_len=self.max_length,
)
# Manually create BatchEncoding tensors with attention masks as
# expected by `self._model_call` in `self._loglikelihood_tokens`.
contexts_enc = torch.Tensor(contexts).long()
contexts_enc = transformers.tokenization_utils_base.BatchEncoding(
{
"input_ids": contexts_enc,
"attention_mask": (contexts_enc != self.eot_token_id).long(),
}
)
conts_enc = torch.Tensor(conts).long()
conts_enc = transformers.tokenization_utils_base.BatchEncoding(
{
"input_ids": conts_enc,
"attention_mask": (conts_enc != self.eot_token_id).long(),
}
)
# TODO: Extract out this call so it only gets called once and also
# somehow figure out partial caching for.
rolling_token_windows_request = [
((contexts, conts), contexts_enc, conts_enc)
]
string_nll = self._loglikelihood_tokens(
rolling_token_windows_request, disable_tqdm=True
)
string_nll = [x[0] for x in string_nll] # discard is_greedy
string_nll = sum(string_nll)
loglikelihoods.append(string_nll)
return loglikelihoods
def _loglikelihood_tokens(
self,
requests: List[Tuple[Tuple[str, str], TokenSequence, TokenSequence]],
disable_tqdm: Optional[bool] = False,
) -> List[Tuple[float, bool]]:
results = []
for chunk in tqdm(
requests, total=math.ceil(len(requests)), disable=disable_tqdm
):
cache_keys, inputs_tokens, targets_tokens = chunk
inputs_tokens = inputs_tokens.to(self.device)
targets_tokens = targets_tokens.to(self.device)
outputs = self._model_call(inputs=inputs_tokens, labels=targets_tokens)
log_softmaxes = F.log_softmax(outputs.logits, dim=-1)
output_iterator = zip(
zip(cache_keys[0], cache_keys[1]),
log_softmaxes,
targets_tokens["input_ids"],
targets_tokens["attention_mask"],
)
for cache_key, log_softmax, target_tokens, target_mask in output_iterator:
length = target_mask.sum()
log_softmax = log_softmax[:length]
target_tokens = target_tokens[:length]
greedy_tokens = log_softmax.argmax(dim=-1)
max_equal = (greedy_tokens == target_tokens).all()
target_logits = torch.gather(
log_softmax, 1, target_tokens.unsqueeze(-1)
).squeeze(-1)
answer = (float(target_logits.sum()), bool(max_equal))
results.append(answer)
if cache_key is not None:
self.cache_hook.add_partial("loglikelihood", cache_key, answer)
return results
def _model_call(
self, inputs: TokenSequence, labels: Optional[TokenSequence] = None
) -> TokenSequence:
return self.model(**inputs, labels=labels["input_ids"])
def _model_generate(
self,
inputs: transformers.BatchEncoding,
max_tokens: int,
stop: Optional[List[str]] = None,
) -> TokenSequence:
input_ids = inputs["input_ids"][:, -self.max_length :].to(self.device)
attention_mask = inputs["attention_mask"][:, -self.max_length :].to(self.device)
# Generate one token to calculate the number of start tokens prepended to decoder_input_ids
# (leaving this here in case the below assumption is violated in the future)
# one_tok_gen = self.model.generate(
# input_ids=torch.zeros((1, 1), dtype=torch.int),
# min_length=2,
# max_new_tokens=1,
# ).squeeze()
# initial_decoder_input_length = len(one_tok_gen) - 1
# Assume that there will always only be one token in the decoder inputs, assumption holds for existing HF models
stopping_criteria = stop_sequences_criteria(
self.tokenizer, stop, 1, input_ids.shape[0]
)
generations = self.model.generate(
input_ids=input_ids,
attention_mask=attention_mask,
max_new_tokens=max_tokens,
stopping_criteria=stopping_criteria,
do_sample=False,
)
return generations
class MultiTokenEOSCriteria(transformers.StoppingCriteria):
"""Criteria to stop on the specified multi-token sequence."""
def __init__(
self,
sequence: str,
tokenizer: transformers.PreTrainedTokenizer,
initial_decoder_input_length: int,
batch_size: int,
):
self.initial_decoder_input_length = initial_decoder_input_length
self.done_tracker = [False] * batch_size
self.sequence = sequence
self.sequence_ids = tokenizer.encode(sequence, add_special_tokens=False)
self.sequence_id_len = len(self.sequence_ids)
self.tokenizer = tokenizer
def __call__(self, input_ids, scores, **kwargs) -> bool:
# For efficiency, we compare the last n tokens where n is the number of tokens in the stop_sequence
lookback_ids_batch = input_ids[:, self.initial_decoder_input_length :][
:, -self.sequence_id_len :
]
lookback_tokens_batch = self.tokenizer.batch_decode(lookback_ids_batch)
for i, done in enumerate(self.done_tracker):
if not done:
self.done_tracker[i] = self.sequence in lookback_tokens_batch[i]
return False not in self.done_tracker
def stop_sequences_criteria(
tokenizer: transformers.PreTrainedTokenizer,
stop_sequences: List[str],
initial_decoder_input_length: int,
batch_size: int,
) -> transformers.StoppingCriteriaList:
return transformers.StoppingCriteriaList(
[
*[
MultiTokenEOSCriteria(
sequence, tokenizer, initial_decoder_input_length, batch_size
)
for sequence in stop_sequences
],
]
)
| 26,068 | 39.860502 | 120 | py |
GradAug | GradAug-main/train_cifar.py | import os
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from models.wideresnet_randwidth import WideResNet_randwidth
from models.pyramidnet_randwidth import PyramidNet_randwidth
import models.resnet_randdepth as resnet_randdepth
from utils.setlogger import get_logger
from utils.config import FLAGS
import numpy as np
import random
best_prec1 = 0
logpath = FLAGS.log_dir
if not os.path.exists(logpath):
os.makedirs(logpath)
logger = get_logger(os.path.join(logpath, 'train.log'))
def main():
global best_prec1
# Data loading code
normalize = transforms.Normalize(mean=[x/255.0 for x in [125.3, 123.0, 113.9]],
std=[x/255.0 for x in [63.0, 62.1, 66.7]])
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: F.pad(x.unsqueeze(0),
(4,4,4,4),mode='reflect').squeeze()),
transforms.ToPILImage(),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
transform_test = transforms.Compose([
transforms.ToTensor(),
normalize
])
kwargs = {'num_workers': FLAGS.workers, 'pin_memory': True}
assert(FLAGS.dataset == 'cifar10' or FLAGS.dataset == 'cifar100')
train_loader = torch.utils.data.DataLoader(
datasets.__dict__[FLAGS.dataset.upper()]('../data', train=True, download=True,
transform=transform_train),
batch_size=FLAGS.batch_size, shuffle=True, **kwargs)
val_loader = torch.utils.data.DataLoader(
datasets.__dict__[FLAGS.dataset.upper()]('../data', train=False, transform=transform_test),
batch_size=FLAGS.batch_size, shuffle=True, **kwargs)
# create model
if FLAGS.model == 'wideresnet':
model = WideResNet_randwidth(depth=FLAGS.depth, num_classes=FLAGS.dataset == 'cifar10' and 10 or 100,
widen_factor=FLAGS.widen_factor, dropRate=0)
elif FLAGS.model == 'pyramidnet':
model = PyramidNet_randwidth(dataset=FLAGS.dataset, depth=200, alpha=240, num_classes=100, bottleneck=True)
elif FLAGS.model == 'resnet_randdepth':
model = resnet_randdepth.resnet110_cifar(num_classes=FLAGS.dataset == 'cifar10' and 10 or 100)
else:
raise NotImplementedError('model type not implemented.')
# get the number of model parameters
print('Number of model parameters: {}'.format(
sum([p.data.nelement() for p in model.parameters()])))
# for training on multiple GPUs.
# Use CUDA_VISIBLE_DEVICES=0,1 to specify which GPUs to use
model = torch.nn.DataParallel(model).cuda()
# model = model.cuda()
# optionally resume from a checkpoint
if FLAGS.resume:
if os.path.isfile(FLAGS.resume):
print("=> loading checkpoint '{}'".format(FLAGS.resume))
checkpoint = torch.load(FLAGS.resume)
FLAGS.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(FLAGS.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(FLAGS.resume))
# cudnn.benchmark = True
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), FLAGS.lr,
momentum=FLAGS.momentum, nesterov=FLAGS.nesterov,
weight_decay=FLAGS.weight_decay)
# cosine learning rate
scheduler = get_lr_scheduler(optimizer, train_loader)
if FLAGS.test_only:
ckpt = torch.load(FLAGS.pretrained)
model.load_state_dict(ckpt['state_dict'], strict=True)
print('Load pretrained weights from ', FLAGS.pretrained)
acc1, acc5 = validate(val_loader, model, criterion, 0)
print('Top-1 and 5 accuracy:', acc1, acc5)
return
for epoch in range(FLAGS.start_epoch, FLAGS.epochs):
# train for one epoch
train(train_loader, model, criterion, optimizer, scheduler, epoch)
# evaluate on validation set
prec1, prec5 = validate(val_loader, model, criterion, epoch)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
if is_best:
best_prec5 = prec5
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'best_prec5': best_prec5,
'optimizer': optimizer.state_dict(),
}, is_best)
logger.info('Best accuracy: Top1:{} Top5:{}'.format(best_prec1, best_prec5))
def train(train_loader, model, criterion, optimizer, scheduler, epoch):
"""Train for one epoch on the training set"""
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
data_time.update(time.time() - end)
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
optimizer.zero_grad()
# GradAug training
if FLAGS.min_width > 0: # randwidth
max_width = FLAGS.max_width
min_width = FLAGS.min_width
width_mult_list = [min_width]
sampled_width = list(np.random.uniform(min_width, max_width, FLAGS.num_subnet-1))
width_mult_list.extend(sampled_width)
model.apply(lambda m: setattr(m, 'width_mult', max_width))
else: # randdepth
model.apply(lambda m: setattr(m, 'fullnet', True))
max_output = model(input.cuda(non_blocking=True))
loss = criterion(max_output, target)
loss.backward()
prec1, prec5 = accuracy(max_output.data, target, topk=(1, 5))
losses.update(loss.data.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
max_output_detach = max_output.detach()
if FLAGS.min_width > 0: # randwidth
for width_mult in sorted(width_mult_list, reverse=True):
model.apply(
lambda m: setattr(m, 'width_mult', width_mult))
resolution = FLAGS.resos[random.randint(0, len(FLAGS.resos)-1)]
output = model(F.interpolate(input, (resolution, resolution), mode='bilinear', align_corners=True))
loss = torch.nn.KLDivLoss(reduction='batchmean')(F.log_softmax(output, dim=1), F.softmax(max_output_detach, dim=1))
loss.backward()
else: # randdepth
model.apply(lambda m: setattr(m, 'fullnet', False))
for k in range(3):
resolution = FLAGS.resos[random.randint(0, len(FLAGS.resos) - 1)]
output = model(F.interpolate(input, (resolution, resolution), mode='bilinear', align_corners=True))
loss = torch.nn.KLDivLoss(reduction='batchmean')(F.log_softmax(output, dim=1),
F.softmax(max_output_detach, dim=1))
loss.backward()
# compute gradient and do SGD step
optimizer.step()
scheduler.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % FLAGS.print_freq == 0:
logger.info('Epoch: [{0}][{1}/{2}]\t'
'LR:{3: .4f}\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), optimizer.param_groups[0]['lr'], batch_time=batch_time, data_time=data_time,
loss=losses, top1=top1, top5=top5))
def validate(val_loader, model, criterion, epoch):
"""Perform validation on the validation set"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
model.apply(lambda m: setattr(m, 'width_mult', 1.0))
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(non_blocking=True)
input = input.cuda(non_blocking=True)
# compute output
with torch.no_grad():
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % FLAGS.print_freq == 0:
logger.info('Test (on val set): [{0}/{1}][{2}/{3}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Top 1-err {top1.val:.4f} ({top1.avg:.4f})\t'
'Top 5-err {top5.val:.4f} ({top5.avg:.4f})'.format(
epoch, FLAGS.epochs, i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
logger.info('* Epoch: [{0}/{1}]\t Top 1-err {top1.avg:.3f} Top 5-err {top5.avg:.3f}\t Test Loss {loss.avg:.3f}'.format(
epoch, FLAGS.epochs, top1=top1, top5=top5, loss=losses))
return top1.avg, top5.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
"""Saves checkpoint to disk"""
directory = logpath
if not os.path.exists(directory):
os.makedirs(directory)
filename = directory + filename
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, logpath + 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def get_lr_scheduler(optimizer, trainloader):
if FLAGS.lr_scheduler == 'multistep':
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=[150*len(trainloader), 225*len(trainloader)], gamma=0.1)
elif FLAGS.lr_scheduler == 'cosine':
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, FLAGS.epochs*len(trainloader))
else:
raise NotImplemented('LR scheduler not implemented.')
return lr_scheduler
if __name__ == '__main__':
main()
| 11,920 | 37.33119 | 131 | py |
GradAug | GradAug-main/train.py | import os
import shutil
import time
import importlib
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import utils.mytransforms as mytransforms
import numpy as np
import random
from utils.config import FLAGS
from utils.setlogger import get_logger
saved_path = FLAGS.log_dir
if not os.path.exists(saved_path):
os.makedirs(saved_path)
logger = get_logger(os.path.join(saved_path, 'train.log'))
best_acc1 = 0
best_acc5 = 0
def main():
global best_acc1, best_acc5
traindir = os.path.join(FLAGS.dataset_dir, 'train')
valdir = os.path.join(FLAGS.dataset_dir, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
jittering = transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4)
lighting = mytransforms.Lighting(alphastd=0.1)
train_transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
jittering,
lighting,
transforms.ToTensor(),
normalize,
])
train_dataset = datasets.ImageFolder(
traindir,
transform=train_transform
)
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=FLAGS.batch_size, shuffle=(train_sampler is None),
num_workers=FLAGS.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=FLAGS.batch_size//2, shuffle=False,
num_workers=FLAGS.workers, pin_memory=True)
numberofclass = FLAGS.num_classes
model_lib = importlib.import_module(FLAGS.model)
model = model_lib.Model(depth=FLAGS.depth, num_classes=numberofclass)
model = torch.nn.DataParallel(model).cuda()
print(model)
print('the number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), FLAGS.lr,
momentum=FLAGS.momentum,
weight_decay=FLAGS.weight_decay, nesterov=FLAGS.nesterov)
lr_scheduler = get_lr_scheduler(optimizer, train_loader)
if FLAGS.test_only:
ckpt = torch.load(FLAGS.pretrained)
model.load_state_dict(ckpt['model'], strict=True)
print('Load pretrained weights from ', FLAGS.pretrained)
acc1, acc5, _ = validate(val_loader, model, criterion, 0)
print('Top-1 and 5 accuracy:', acc1, acc5)
return
for epoch in range(0, FLAGS.epochs):
# train for one epoch
train_loss = train(train_loader, model, criterion, optimizer, epoch, lr_scheduler)
# evaluate on validation set
acc1, acc5, val_loss = validate(val_loader, model, criterion, epoch)
# remember best prec@1 and save checkpoint
is_best = acc1 >= best_acc1
best_acc1 = max(acc1, best_acc1)
if is_best:
best_acc5 = acc5
print('Current best accuracy (top-1 and 5 accuracy):', best_acc1, best_acc5)
save_checkpoint({
'epoch': epoch,
# 'arch': FLAGS.net_type,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'best_acc5': best_acc5,
'optimizer': optimizer.state_dict(),
}, is_best)
print('Best accuracy (top-1 and 5 accuracy):', best_acc1, best_acc5)
def train(train_loader, model, criterion, optimizer, epoch, lr_scheduler):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
current_LR = get_learning_rate(optimizer)[0]
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
input = input.cuda()
target = target.cuda()
optimizer.zero_grad()
# first do max_width and max_resolution
max_width = FLAGS.max_width
model.apply(lambda m: setattr(m, 'width_mult', max_width))
max_output = model(input)
loss = torch.mean(criterion(max_output, target))
loss.backward()
max_output_detach = max_output.detach()
# measure accuracy and record loss
acc1, acc5 = accuracy(max_output.data, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1.item(), input.size(0))
top5.update(acc5.item(), input.size(0))
# do other widths and resolution
min_width = FLAGS.min_width
width_mult_list = [min_width]
sampled_width = list(np.random.uniform(min_width, max_width, FLAGS.num_subnet-1))
width_mult_list.extend(sampled_width)
for width_mult in sorted(width_mult_list, reverse=True):
model.apply(
lambda m: setattr(m, 'width_mult', width_mult))
idx = random.randint(0, len(FLAGS.resos) - 1)
output = model(F.interpolate(input, (FLAGS.resos[idx], FLAGS.resos[idx]), mode='bilinear', align_corners=True))
loss = torch.nn.KLDivLoss(reduction='batchmean')(F.log_softmax(output, dim=1),
F.softmax(max_output_detach, dim=1))
loss.backward()
optimizer.step()
lr_scheduler.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % FLAGS.print_freq == 0:
logger.info('Epoch: [{0}/{1}][{2}/{3}]\t'
'LR: {LR:.6f}\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Top 1-acc {top1.val:.4f} ({top1.avg:.4f})\t'
'Top 5-acc {top5.val:.4f} ({top5.avg:.4f})'.format(
epoch, FLAGS.epochs, i, len(train_loader), LR=current_LR, batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
logger.info('* Epoch: [{0}/{1}]\t Top 1-acc {top1.avg:.3f} Top 5-acc {top5.avg:.3f}\t Train Loss {loss.avg:.3f}'.format(
epoch, FLAGS.epochs, top1=top1, top5=top5, loss=losses))
return losses.avg
def validate(val_loader, model, criterion, epoch):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
model.apply(lambda m: setattr(m, 'width_mult', 1.0))
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda()
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1.item(), input.size(0))
top5.update(acc5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % FLAGS.print_freq == 0:
logger.info('Test (on val set): [{0}/{1}][{2}/{3}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Top 1-acc {top1.val:.4f} ({top1.avg:.4f})\t'
'Top 5-acc {top5.val:.4f} ({top5.avg:.4f})'.format(
epoch, FLAGS.epochs, i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
logger.info('* Epoch: [{0}/{1}]\t Top 1-acc {top1.avg:.3f} Top 5-acc {top5.avg:.3f}\t Test Loss {loss.avg:.3f}'.format(
epoch, FLAGS.epochs, top1=top1, top5=top5, loss=losses))
return top1.avg, top5.avg, losses.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
directory = FLAGS.log_dir
if not os.path.exists(directory):
os.makedirs(directory)
filename = directory + filename
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, directory + 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def get_lr_scheduler(optimizer, trainloader):
if FLAGS.lr_scheduler == 'multistep':
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=FLAGS.multistep_lr_milestones,
gamma=FLAGS.multistep_lr_gamma)
elif FLAGS.lr_scheduler == 'cosine':
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, FLAGS.epochs*len(trainloader))
else:
raise NotImplementedError('LR scheduler not implemented.')
return lr_scheduler
# def adjust_learning_rate(optimizer, epoch):
# """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
# if args.dataset.startswith('cifar'):
# lr = args.lr * (0.1 ** (epoch // (args.epochs * 0.5))) * (0.1 ** (epoch // (args.epochs * 0.75)))
# elif args.dataset == ('imagenet'):
# if args.epochs == 300:
# lr = args.lr * (0.1 ** (epoch // 75))
# else:
# lr = args.lr * (0.1 ** (epoch // 30))
#
# for param_group in optimizer.param_groups:
# param_group['lr'] = lr
def get_learning_rate(optimizer):
lr = []
for param_group in optimizer.param_groups:
lr += [param_group['lr']]
return lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main() | 10,863 | 33.820513 | 125 | py |
GradAug | GradAug-main/models/randwidth_ops.py | # These operations are based on the implementation of https://github.com/JiahuiYu/slimmable_networks
import torch.nn as nn
from utils.config import FLAGS
def make_divisible(v, divisor=1, min_value=1):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class RWConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, depthwise=False, bias=True,
us=[True, True], ratio=[1, 1]):
in_channels_max = in_channels
out_channels_max = out_channels
if us[0]:
in_channels_max = int(make_divisible(
in_channels
* FLAGS.max_width
/ ratio[0]) * ratio[0])
if us[1]:
out_channels_max = int(make_divisible(
out_channels
* FLAGS.max_width
/ ratio[1]) * ratio[1])
groups = in_channels_max if depthwise else 1
super(RWConv2d, self).__init__(
in_channels_max, out_channels_max,
kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.depthwise = depthwise
self.in_channels_basic = in_channels
self.out_channels_basic = out_channels
self.width_mult = None
self.us = us
self.ratio = ratio
def forward(self, input):
in_channels = self.in_channels_basic
out_channels = self.out_channels_basic
if self.us[0]:
in_channels = int(make_divisible(
self.in_channels_basic
* self.width_mult
/ self.ratio[0]) * self.ratio[0])
if self.us[1]:
out_channels = int(make_divisible(
self.out_channels_basic
* self.width_mult
/ self.ratio[1]) * self.ratio[1])
self.groups = in_channels if self.depthwise else 1
weight = self.weight[:out_channels, :in_channels, :, :]
if self.bias is not None:
bias = self.bias[:out_channels]
else:
bias = self.bias
y = nn.functional.conv2d(
input, weight, bias, self.stride, self.padding,
self.dilation, self.groups)
if getattr(FLAGS, 'conv_averaged', False):
y = y * (max(self.in_channels_list)/self.in_channels)
return y
class RWLinear(nn.Linear):
def __init__(self, in_features, out_features, bias=True, us=[True, True]):
in_features_max = in_features
out_features_max = out_features
if us[0]:
in_features_max = make_divisible(
in_features * FLAGS.max_width)
if us[1]:
out_features_max = make_divisible(
out_features * FLAGS.max_width)
super(RWLinear, self).__init__(
in_features_max, out_features_max, bias=bias)
self.in_features_basic = in_features
self.out_features_basic = out_features
self.width_mult = None
self.us = us
def forward(self, input):
in_features = self.in_features_basic
out_features = self.out_features_basic
if self.us[0]:
in_features = make_divisible(
self.in_features_basic * self.width_mult)
if self.us[1]:
out_features = make_divisible(
self.out_features_basic * self.width_mult)
weight = self.weight[:out_features, :in_features]
if self.bias is not None:
bias = self.bias[:self.out_features]
else:
bias = self.bias
return nn.functional.linear(input, weight, bias)
class RWBatchNorm2d(nn.BatchNorm2d):
def __init__(self, num_features, ratio=1):
num_features_max = int(make_divisible(
num_features * FLAGS.max_width / ratio) * ratio)
super(RWBatchNorm2d, self).__init__(
num_features_max, affine=True, track_running_stats=True)
self.num_features_basic = num_features
self.ratio = ratio
self.width_mult = None
self.ignore_model_profiling = True
def forward(self, input):
weight = self.weight
bias = self.bias
c = int(make_divisible(
self.num_features_basic * self.width_mult / self.ratio) * self.ratio)
if self.width_mult == 1.0:
y = nn.functional.batch_norm(
input,
self.running_mean[:c],
self.running_var[:c],
weight[:c],
bias[:c],
self.training,
self.momentum,
self.eps)
else:
y = nn.functional.batch_norm(
input,
None,
None,
weight[:c],
bias[:c],
self.training,
self.momentum,
self.eps)
return y | 5,110 | 34.992958 | 100 | py |
GradAug | GradAug-main/models/resnet_randwidth.py | import torch.nn as nn
import math
from models.randwidth_ops import RWConv2d, RWLinear, RWBatchNorm2d, make_divisible
from utils.config import FLAGS
class Block(nn.Module):
def __init__(self, inp, outp, stride, tmp_ratio=1.0):
super(Block, self).__init__()
assert stride in [1, 2]
# midp = [i // 4 for i in outp]
midp = make_divisible(outp // 4)
expand_ratio = 0.25
layers = [
RWConv2d(inp, midp, 1, 1, 0, bias=False, ratio=[tmp_ratio, expand_ratio]),
RWBatchNorm2d(midp, ratio=expand_ratio),
nn.ReLU(inplace=True),
RWConv2d(midp, midp, 3, stride, 1, bias=False, ratio=[expand_ratio, expand_ratio]),
RWBatchNorm2d(midp, ratio=expand_ratio),
nn.ReLU(inplace=True),
RWConv2d(midp, outp, 1, 1, 0, bias=False, ratio=[expand_ratio, 1]),
RWBatchNorm2d(outp),
]
self.body = nn.Sequential(*layers)
self.residual_connection = stride == 1 and inp == outp
if not self.residual_connection:
self.shortcut = nn.Sequential(
RWConv2d(inp, outp, 1, stride=stride, bias=False, ratio=[tmp_ratio, 1]),
RWBatchNorm2d(outp),
)
self.post_relu = nn.ReLU(inplace=True)
def forward(self, x):
if self.residual_connection:
res = self.body(x)
res += x
else:
res = self.body(x)
res += self.shortcut(x)
res = self.post_relu(res)
return res
class Model(nn.Module):
def __init__(self, depth=50, num_classes=1000, input_size=224):
super(Model, self).__init__()
self.features = []
# head
assert input_size % 32 == 0
# setting of inverted residual blocks
self.block_setting_dict = {
# : [stage1, stage2, stage3, stage4]
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
}
init_channel = 64
width_mult = FLAGS.max_width # upper bound
channels = make_divisible(init_channel * width_mult)
self.block_setting = self.block_setting_dict[depth]
feats = [64, 128, 256, 512]
# channels = [
# int(64 * width_mult) for width_mult in FLAGS.width_mult_list]
self.features.append(
nn.Sequential(
RWConv2d(
3, channels, 7, 2, 3,
bias=False, us=[False, True], ratio=[1, 0.25]),
RWBatchNorm2d(channels, ratio=0.25),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, 2, 1),
)
)
# body
for stage_id, n in enumerate(self.block_setting):
outp = make_divisible(feats[stage_id] * width_mult * 4)
# outp = [
# int(feats[stage_id] * width_mult * 4)
# for width_mult in FLAGS.width_mult_list]
for i in range(n):
if i == 0 and stage_id != 0:
self.features.append(Block(channels, outp, 2))
elif i == 0 and stage_id == 0:
self.features.append(Block(channels, outp, 1, tmp_ratio=0.25))
else:
self.features.append(Block(channels, outp, 1))
channels = outp
avg_pool_size = input_size // 32
self.features.append(nn.AdaptiveAvgPool2d((1, 1)))
# make it nn.Sequential
self.features = nn.Sequential(*self.features)
# classifier
self.outp = channels
self.classifier = nn.Sequential(
RWLinear(self.outp, num_classes, us=[True, False])
)
if FLAGS.reset_parameters:
self.reset_parameters()
def forward(self, x):
x = self.features(x)
last_dim = x.size()[1]
x = x.view(-1, last_dim)
x = self.classifier(x)
return x
def reset_parameters(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
if m.affine:
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
| 4,581 | 33.451128 | 95 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.